From 6986e90351bca4071ea61387bee1f52f1125aceb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Thu, 31 Aug 2023 14:35:10 +0200 Subject: [PATCH 001/116] [BUG] inside unit_tests workflow --- .github/workflows/unit_tests.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/unit_tests.yml b/.github/workflows/unit_tests.yml index 20f20ea3..d0097882 100644 --- a/.github/workflows/unit_tests.yml +++ b/.github/workflows/unit_tests.yml @@ -34,7 +34,7 @@ jobs: - name: Checkout repository uses: actions/checkout@v3 - - name: Load configuration for self-hosted runner + - name: Load configuration for self-hosted runner run: cp /home/neuro/local_testing_config.toml narps_open/utils/configuration/testing_config.toml - name: Install dependencies From 7fde1e17d754df35ae384148c7ee0c5708d7fe75 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Fri, 6 Oct 2023 11:46:50 +0200 Subject: [PATCH 002/116] [08MQ] Preprocessing script in progress --- narps_open/pipelines/team_08MQ.py | 723 ++++++++++++++++++++++++++++++ 1 file changed, 723 insertions(+) create mode 100644 narps_open/pipelines/team_08MQ.py diff --git a/narps_open/pipelines/team_08MQ.py b/narps_open/pipelines/team_08MQ.py new file mode 100644 index 00000000..5db3004a --- /dev/null +++ b/narps_open/pipelines/team_08MQ.py @@ -0,0 +1,723 @@ +#!/usr/bin/python +# coding: utf-8 + +""" Write the work of NARPS team 08MQ using Nipype """ + +""" +This template can be use to reproduce a pipeline using FSL as main software. + +- All lines starting with [INFO], are meant to help you during the reproduction, these can be removed +eventually. +- Also remove lines starting with [TODO], once you did what they suggested. +""" + +# [TODO] Only import modules you use further in te code, remove others from the import section + +from os.path import join + +# [INFO] The import of base objects from Nipype, to create Workflows +from nipype import Node, Workflow # , JoinNode, MapNode + +# [INFO] a list of interfaces used to manpulate data +from nipype.interfaces.utility import IdentityInterface, Function +from nipype.interfaces.io import SelectFiles, DataSink +# from nipype.algorithms.misc import Gunzip + +# from nipype.algorithms.modelgen import SpecifyModel +from nipype.interfaces.fsl import ( + FAST, BET, Registration, ErodeImage, PrepareFieldmap, MCFLIRT, SliceTimer + ) + + +""" + Info, ImageMaths, IsotropicSmooth, Threshold, + Level1Design, FEATModel, L2Model, Merge, + FLAMEO, ContrastMgr, FILMGLS, MultipleRegressDesign, + Cluster, BET, SmoothEstimate + ) +""" + +from nipype.interfaces.ants import Registration + + +from narps_open.pipelines import Pipeline +from narps_open.pipelines import TaskInformation + +class PipelineTeam08MQ(Pipeline): + """ A class that defines the pipeline of team 08MQ """ + + def __init__(self): + super().__init__() + self.fwhm = 6.0 + self.team_id = '08MQ' + self.contrast_list = [] + + def get_preprocessing(self): + """ Return a Nipype workflow describing the prerpocessing part of the pipeline """ + + # IdentityInterface node - allows to iterate over subjects and runs + info_source = Node(IdentityInterface(), name='info_source') + info_source.inputs.fields=['subject_id', 'run_id'] + info_source.iterables = [ + ('subject_id', self.subject_list), + ('run_id', self.run_list), + ] + + # SelectFiles node - to select necessary files + file_templates = { + 'anat': join('sub-{subject_id}', 'anat', 'sub-{subject_id}_T1w.nii.gz'), + 'func': join( + 'sub-{subject_id}', 'func', 'sub-{subject_id}_task-MGT_run-{run_id}_bold.nii.gz' + ), + 'magnitude': join('sub-{subject_id}', 'fmap', 'sub-{subject_id}_magnitude1.nii.gz'), + 'phasediff': join('sub-{subject_id}', 'fmap', 'sub-{subject_id}_phasediff.nii.gz') + } + select_files = Node(SelectFiles(file_templates), name = 'select_files') + select_files.input.base_directory = self.directories.dataset_dir + + # DataSink Node - store the wanted results in the wanted repository + data_sink = Node(DataSink(), name = 'data_sink') + data_sink.inputs.base_directory = self.directories.output_dir + + # FAST Node - Bias field correction on anatomical images + bias_field_correction = Node(FAST(), name = 'bias_field_correction') + bias_field_correction.inputs.img_type = 1 # T1 image + bias_field_correction.inputs.output_biascorrected = True + #bias_field_correction.inputs.output_biasfield = True + + # BET Node - Brain extraction for anatomical images + brain_extraction_anat = Node(BET(), name = 'brain_extraction_anat') + brain_extraction_anat.inputs.frac = 0.5 + + # FAST Node - Segmentation of anatomical images + segmentation_anat = Node(FAST(), name = 'segmentation_anat') + segmentation_anat.inputs.no_bias = True # Bias field was already removed + segmentation_anat.inputs.number_classes = + segmentation_anat.inputs.segments = True # One image per tissue class + + # ANTs Node - Registration to T1 MNI152 space + registration_anat = Node(Registration(), name = 'registration_anat') + registration_anat.inputs.fixed_image = '' + registration_anat.inputs.moving_image = '' + registration_anat.inputs.initial_moving_transform = '' + registration_anat.inputs.transforms = ['Rigid', 'Affine', 'SyN'] + registration_anat.inputs.metric = ['MI', 'MI', 'CC'] + + # ErodeImage Node - Erode white-matter mask + erode_white_matter = Node(ErodeImage(), name = 'erode_white_matter') + + # ErodeImage Node - Erode CSF mask + erode_csf = Node(ErodeImage(), name = 'erode_csf') + + # BET Node - Brain extraction of magnitude images + brain_extraction_magnitude = Node(BET(), name = 'brain_extraction_magnitude') + brain_extraction_magnitude.inputs.frac = 0.5 + + # PrepareFieldmap Node - Convert phase and magnitude to fieldmap images + convert_to_fieldmap = Node(PrepareFieldmap(), name = 'convert_to_fieldmap') + + # BET Node - Brain extraction for functional images + brain_extraction_func = Node(BET(), name = 'brain_extraction_func') + brain_extraction_func.inputs.frac = 0.3 + + # MCFLIRT Node - Motion correction of functional images + motion_correction = Node(MCFLIRT(), name = 'motion_correction') + motion_correction.inputs.cost = 'normcorr' + motion_correction.inputs.interpolation = 'trilinear' + + # SliceTimer Node - Slice time correction + slice_time_correction = Node(SliceTimer(), name = 'slice_time_correction') + slice_time_correction.inputs.time_repetition = TaskInformation()['RepetitionTime'] + + custom_order (a pathlike object or string representing an existing file) – Filename of single-column custom interleave order file (first slice is referred to as 1 not 0). Maps to a command-line argument: --ocustom=%s. + custom_timings (a pathlike object or string representing an existing file) – Slice timings, in fractions of TR, range 0:1 (default is 0.5 = no shift). Maps to a command-line argument: --tcustom=%s. + environ (a dictionary with keys which are a bytes or None or a value of class ‘str’ and with values which are a bytes or None or a value of class ‘str’) – Environment variables. (Nipype default value: {}) + global_shift (a float) – Shift in fraction of TR, range 0:1 (default is 0.5 = no shift). Maps to a command-line argument: --tglobal. + index_dir (a boolean) – Slice indexing from top to bottom. Maps to a command-line argument: --down. + interleaved (a boolean) – Use interleaved acquisition. Maps to a command-line argument: --odd. + out_file (a pathlike object or string representing a file) – Filename of output timeseries. Maps to a command-line argument: --out=%s. + output_type (‘NIFTI’ or ‘NIFTI_PAIR’ or ‘NIFTI_GZ’ or ‘NIFTI_PAIR_GZ’) – FSL output type. + slice_direction (1 or 2 or 3) – Direction of slice acquisition (x=1, y=2, z=3) - default is z. Maps to a command-line argument: --direction=%d. + time_repetition (a float) – Specify TR of data - default is 3s. Maps to a command-line argument: --repeat=%f. + + # [INFO] The following part has to be modified with nodes of the pipeline + """ + Anatomical: + V Bias correction -> Bias field correction was applied to the anatomical images using FAST. + V Brain extraction -> BET was used for brain extraction for the anatomical, field map, and functional images. A fractional intensity threshold of 0.5 was used for the anatomical and field map images. One of 0.3 was used for the functional data. + V Segmentation -> Structural images were segmented with FAST. Bias correction was done first. + Alignment to MNI template -> + Data were converted to T1 MNI152 space with a 2mm resolution. + Alignment between T1 anatomical images and the T1 MNI template was calculated with ANTs. + T1 images had bias field correction applied prior to alignment. + Rigid (mutual information cost function), affine (mutual information cost function), + and SyN (cross correlation cost function) steps were applied, in that order. + The combined functional-to-anatomical plus distortion correction warps were applied to functional data and then + the anatomical-to-MNI warps applied to that data. + Creation of white matter and CSF masks from segmentation with threshold=1. Erode masks + + Field maps: + V Brain extraction of magnitude image -> BET was used for brain extraction for the anatomical, field map, and functional images. A fractional intensity threshold of 0.5 was used for the anatomical and field map images. One of 0.3 was used for the functional data. + V Conversion of phase and magnitude images to field maps + + High contrast functional volume: + Alignment to anatomical image including distortion correction with field map + Calculation of inverse warp (anatomical to functional) + + Functional: + V Brain extraction -> BET was used for brain extraction for the anatomical, field map, and functional images. A fractional intensity threshold of 0.5 was used for the anatomical and field map images. One of 0.3 was used for the functional data. + V Motion correction with high contrast image as reference -> MCFLIRT was used for motion correction. + The single volume, high contrast image was used as the reference scan. + Normalised correlation was used as the image similarity metric with trilinear interpolation. + Slice time correction -> Slicetimer was used and was applied after motion correction. + The middle slice was used as the reference slice. Sinc interpolation was used. + Alignment of white matter and CSF masks to functional space with previously calculated warps + Calculate aCompCor components + """ + + preprocessing = Workflow(base_dir = self.directories.working_dir, name = 'preprocessing') + preprocessing.connect([ + # Inputs + (info_source, select_files, [('subject_id', 'subject_id'), ('run_id', 'run_id')]), + (select_files, node_name, [('func', 'node_input_name')]), + (node_name, data_sink, [('node_output_name', 'preprocessing.@sym_link')]), + + # Anatomical images + (select_files, bias_field_correction, [('anat', 'in_files')]), + (bias_field_correction, brain_extraction_anat, [('restored_image', 'in_file')]), + (brain_extraction_anat, segmentation_anat, [('out_file', 'in_file')]), + (segmentation_anat, registration_anat, [('?', 'in_file')]), + + (registration_anat, erode_white_matter, [('', '')]), + (registration_anat, erode_csf, [('', '')]), + + (erode_white_matter, , [('', '')]), + (erode_csf, , [('', '')]), + + # Field maps + (select_files, brain_extraction_magnitude, [('magnitude', 'in_file')]), + (brain_extraction_magnitude, convert_to_fieldmap, [('out_file', 'in_magnitude')]), + (select_files, convert_to_fieldmap, [('phasediff', 'in_phase')]), + + # High contrast functional volume + # Functional images + (select_files, brain_extraction_func, [('func', 'in_file')]), + (brain_extraction_func, motion_correction, [('out_file', 'in_file')]), + (, motion_correction, [('out_file', 'ref_file')]), # high contrast images + (motion_correction, slice_time_correction, [('out_file', 'in_file')]), + + ]) + + return preprocessing + + def get_run_level_analysis(self): + """ Return a Nipype workflow describing the run level analysis part of the pipeline """ + return None + + def get_session_infos(event_file: str): + """ + Create Bunchs for specifyModel. + + Parameters : + - event_file : file corresponding to the run and the subject to analyze + + Returns : + - subject_info : list of Bunch for 1st level analysis. + """ + + """ + Canonical double gamma HRF plus temporal derivative. + Model consisted of: + + Event regressor with 4 second ON duration. + Parametric modulation of events corresponding to gain magnitude. Mean centred. + Parametric modulation of events corresponding to loss magnitude. Mean centred. + Response regressor with 1 for accept and -1 for reject. Mean centred. + Six head motion parameters plus four aCompCor regressors. + Model and data had a 90s high-pass filter applied. + """ + + from nipype.interfaces.base import Bunch + + condition_names = ['trial', 'gain', 'loss'] + + onset = {} + duration = {} + amplitude = {} + + # Creates dictionary items with empty lists for each condition. + for condition in condition_names: + onset.update({condition: []}) + duration.update({condition: []}) + amplitude.update({condition: []}) + + with open(event_file, 'rt') as file: + next(file) # skip the header + + for line in file: + info = line.strip().split() + # Creates list with onsets, duration and loss/gain for amplitude (FSL) + for condition in condition_names: + if condition == 'gain': + onset[condition].append(float(info[0])) + duration[condition].append(float(info[4])) + amplitude[condition].append(float(info[2])) + elif condition == 'loss': + onset[condition].append(float(info[0])) + duration[condition].append(float(info[4])) + amplitude[condition].append(float(info[3])) + elif condition == 'trial': + onset[condition].append(float(info[0])) + duration[condition].append(float(info[4])) + amplitude[condition].append(float(1)) + + subject_info = [] + subject_info.append( + Bunch( + conditions = condition_names, + onsets = [onset[k] for k in condition_names], + durations = [duration[k] for k in condition_names], + amplitudes = [amplitude[k] for k in condition_names], + regressor_names = None, + regressors = None, + ) + ) + + return subject_info + + # [INFO] This function creates the contrasts that will be analyzed in the first level analysis + # [TODO] Adapt this example to your specific pipeline + def get_contrasts(): + """ + Create the list of tuples that represents contrasts. + Each contrast is in the form : + (Name,Stat,[list of condition names],[weights on those conditions]) + + Returns: + - contrasts: list of tuples, list of contrasts to analyze + """ + # List of condition names + conditions = ['trial', 'trialxgain^1', 'trialxloss^1'] + + # Create contrasts + trial = ('trial', 'T', conditions, [1, 0, 0]) + effect_gain = ('effect_of_gain', 'T', conditions, [0, 1, 0]) + effect_loss = ('effect_of_loss', 'T', conditions, [0, 0, 1]) + + # Contrast list + return [trial, effect_gain, effect_loss] + + def get_subject_level_analysis(self): + """ Return a Nipype workflow describing the subject level analysis part of the pipeline """ + + # [INFO] The following part stays the same for all pipelines + + # Infosource Node - To iterate on subjects + info_source = Node( + IdentityInterface( + fields = ['subject_id', 'dataset_dir', 'results_dir', 'working_dir', 'run_list'], + dataset_dir = self.directories.dataset_dir, + results_dir = self.directories.results_dir, + working_dir = self.directories.working_dir, + run_list = self.run_list + ), + name='info_source', + ) + info_source.iterables = [('subject_id', self.subject_list)] + + # Templates to select files node + # [TODO] Change the name of the files depending on the filenames of results of preprocessing + templates = { + 'func': join( + self.directories.results_dir, + 'preprocess', + '_run_id_*_subject_id_{subject_id}', + 'complete_filename_{subject_id}_complete_filename.nii', + ), + 'event': join( + self.directories.dataset_dir, + 'sub-{subject_id}', + 'func', + 'sub-{subject_id}_task-MGT_run-*_events.tsv', + ) + } + + # SelectFiles node - to select necessary files + select_files = Node(SelectFiles(templates), name = 'select_files') + select_files.inputs.base_directory = self.directories.dataset_dir + + # DataSink Node - store the wanted results in the wanted repository + data_sink = Node(DataSink(), name = 'data_sink') + data_sink.inputs.base_directory = self.directories.output_dir + + # [INFO] This is the node executing the get_subject_infos_spm function + # Subject Infos node - get subject specific condition information + subject_infos = Node( + Function( + function = self.get_subject_infos, + input_names = ['event_files', 'runs'], + output_names = ['subject_info'] + ), + name = 'subject_infos', + ) + subject_infos.inputs.runs = self.run_list + + # [INFO] This is the node executing the get_contrasts function + # Contrasts node - to get contrasts + contrasts = Node( + Function( + function = self.get_contrasts, + input_names = ['subject_id'], + output_names = ['contrasts'] + ), + name = 'contrasts', + ) + + # [INFO] The following part has to be modified with nodes of the pipeline + + # [TODO] For each node, replace 'node_name' by an explicit name, and use it for both: + # - the name of the variable in which you store the Node object + # - the 'name' attribute of the Node + # [TODO] The node_function refers to a NiPype interface that you must import + # at the beginning of the file. + node_name = Node( + node_function, + name = 'node_name' + ) + + # [TODO] Add other nodes with the different steps of the pipeline + + # [INFO] The following part defines the nipype workflow and the connections between nodes + + subject_level_analysis = Workflow( + base_dir = self.directories.working_dir, + name = 'subject_level_analysis' + ) + # [TODO] Add the connections the workflow needs + # [INFO] Input and output names can be found on NiPype documentation + subject_level_analysis.connect([ + ( + info_source, + select_files, + [('subject_id', 'subject_id')] + ), + ( + info_source, + contrasts, + [('subject_id', 'subject_id')] + ), + ( + select_files, + subject_infos, + [('event', 'event_files')] + ), + ( + select_files, + node_name, + [('func', 'node_input_name')] + ), + ( + node_name, data_sink, + [('node_output_name', 'preprocess.@sym_link')] + ), + ]) + + # [INFO] Here we simply return the created workflow + return subject_level_analysis + + # [INFO] This function returns the list of ids and files of each group of participants + # to do analyses for both groups, and one between the two groups. + def get_subgroups_contrasts( + copes, varcopes, subject_list: list, participants_file: str + ): + """ + This function return the file list containing only the files + belonging to subject in the wanted group. + + Parameters : + - copes: original file list selected by select_files node + - varcopes: original file list selected by select_files node + - subject_ids: list of subject IDs that are analyzed + - participants_file: file containing participants characteristics + + Returns : + - copes_equal_indifference : a subset of copes corresponding to subjects + in the equalIndifference group + - copes_equal_range : a subset of copes corresponding to subjects + in the equalRange group + - copes_global : a list of all copes + - varcopes_equal_indifference : a subset of varcopes corresponding to subjects + in the equalIndifference group + - varcopes_equal_range : a subset of varcopes corresponding to subjects + in the equalRange group + - equal_indifference_id : a list of subject ids in the equalIndifference group + - equal_range_id : a list of subject ids in the equalRange group + - varcopes_global : a list of all varcopes + """ + + equal_range_id = [] + equal_indifference_id = [] + + # Reading file containing participants IDs and groups + with open(participants_file, 'rt') as file: + next(file) # skip the header + + for line in file: + info = line.strip().split() + + # Checking for each participant if its ID was selected + # and separate people depending on their group + if info[0][-3:] in subject_list and info[1] == 'equalIndifference': + equal_indifference_id.append(info[0][-3:]) + elif info[0][-3:] in subject_list and info[1] == 'equalRange': + equal_range_id.append(info[0][-3:]) + + copes_equal_indifference = [] + copes_equal_range = [] + copes_global = [] + varcopes_equal_indifference = [] + varcopes_equal_range = [] + varcopes_global = [] + + # Checking for each selected file if the corresponding participant was selected + # and add the file to the list corresponding to its group + for cope, varcope in zip(copes, varcopes): + sub_id = cope.split('/') + if sub_id[-2][-3:] in equal_indifference_id: + copes_equal_indifference.append(cope) + elif sub_id[-2][-3:] in equal_range_id: + copes_equal_range.append(cope) + if sub_id[-2][-3:] in subject_list: + copes_global.append(cope) + + sub_id = varcope.split('/') + if sub_id[-2][-3:] in equal_indifference_id: + varcopes_equal_indifference.append(varcope) + elif sub_id[-2][-3:] in equal_range_id: + varcopes_equal_range.append(varcope) + if sub_id[-2][-3:] in subject_list: + varcopes_global.append(varcope) + + return copes_equal_indifference, copes_equal_range, + varcopes_equal_indifference, varcopes_equal_range, + equal_indifference_id, equal_range_id, + copes_global, varcopes_global + + + # [INFO] This function creates the dictionary of regressors used in FSL Nipype pipelines + def get_regressors( + equal_range_id: list, + equal_indifference_id: list, + method: str, + subject_list: list, + ) -> dict: + """ + Create dictionary of regressors for group analysis. + + Parameters: + - equal_range_id: ids of subjects in equal range group + - equal_indifference_id: ids of subjects in equal indifference group + - method: one of "equalRange", "equalIndifference" or "groupComp" + - subject_list: ids of subject for which to do the analysis + + Returns: + - regressors: regressors used to distinguish groups in FSL group analysis + """ + # For one sample t-test, creates a dictionary + # with a list of the size of the number of participants + if method == 'equalRange': + regressors = dict(group_mean = [1 for i in range(len(equal_range_id))]) + elif method == 'equalIndifference': + regressors = dict(group_mean = [1 for i in range(len(equal_indifference_id))]) + + # For two sample t-test, creates 2 lists: + # - one for equal range group, + # - one for equal indifference group + # Each list contains n_sub values with 0 and 1 depending on the group of the participant + # For equalRange_reg list --> participants with a 1 are in the equal range group + elif method == 'groupComp': + equalRange_reg = [ + 1 for i in range(len(equal_range_id) + len(equal_indifference_id)) + ] + equalIndifference_reg = [ + 0 for i in range(len(equal_range_id) + len(equal_indifference_id)) + ] + + for index, subject_id in enumerate(subject_list): + if subject_id in equal_indifference_id: + equalIndifference_reg[index] = 1 + equalRange_reg[index] = 0 + + regressors = dict( + equalRange = equalRange_reg, + equalIndifference = equalIndifference_reg + ) + + return regressors + + def get_group_level_analysis(self): + """ + Return all workflows for the group level analysis. + + Returns; + - a list of nipype.WorkFlow + """ + + methods = ['equalRange', 'equalIndifference', 'groupComp'] + return [self.get_group_level_analysis_sub_workflow(method) for method in methods] + + def get_group_level_analysis_sub_workflow(self, method): + """ + Return a workflow for the group level analysis. + + Parameters: + - method: one of 'equalRange', 'equalIndifference' or 'groupComp' + + Returns: + - group_level_analysis: nipype.WorkFlow + """ + # [INFO] The following part stays the same for all preprocessing pipelines + + # Infosource node - iterate over the list of contrasts generated + # by the subject level analysis + info_source = Node( + IdentityInterface( + fields = ['contrast_id', 'subjects'], + subjects = self.subject_list + ), + name = 'info_source', + ) + info_source.iterables = [('contrast_id', self.contrast_list)] + + # Templates to select files node + # [TODO] Change the name of the files depending on the filenames + # of results of first level analysis + template = { + 'cope' : join( + self.directories.results_dir, + 'subject_level_analysis', + '_contrast_id_{contrast_id}_subject_id_*', 'cope1.nii.gz'), + 'varcope' : join( + self.directories.results_dir, + 'subject_level_analysis', + '_contrast_id_{contrast_id}_subject_id_*', 'varcope1.nii.gz'), + 'participants' : join( + self.directories.dataset_dir, + 'participants.tsv') + } + select_files = Node( + SelectFiles( + templates, + base_directory = self.directories.results_dir, + force_list = True + ), + name = 'select_files', + ) + + # Datasink node - to save important files + data_sink = Node( + DataSink(base_directory = self.directories.output_dir), + name = 'data_sink', + ) + + contrasts = Node( + Function( + input_names=['copes', 'varcopes', 'subject_ids', 'participants_file'], + output_names=[ + 'copes_equalIndifference', + 'copes_equalRange', + 'varcopes_equalIndifference', + 'varcopes_equalRange', + 'equalIndifference_id', + 'equalRange_id', + 'copes_global', + 'varcopes_global' + ], + function = self.get_subgroups_contrasts, + ), + name = 'subgroups_contrasts', + ) + + regs = Node( + Function( + input_names = [ + 'equalRange_id', + 'equalIndifference_id', + 'method', + 'subject_list', + ], + output_names = ['regressors'], + function = self.get_regressors, + ), + name = 'regs', + ) + regs.inputs.method = method + regs.inputs.subject_list = subject_list + + # [INFO] The following part has to be modified with nodes of the pipeline + + # [TODO] For each node, replace 'node_name' by an explicit name, and use it for both: + # - the name of the variable in which you store the Node object + # - the 'name' attribute of the Node + # [TODO] The node_function refers to a NiPype interface that you must import + # at the beginning of the file. + node_name = Node( + node_function, + name = 'node_name' + ) + + # [INFO] The following part defines the nipype workflow and the connections between nodes + + # Compute the number of participants used to do the analysis + nb_subjects = len(self.subject_list) + + # Declare the workflow + group_level_analysis = Workflow( + base_dir = self.directories.working_dir, + name = f'group_level_analysis_{method}_nsub_{nb_subjects}' + ) + group_level_analysis.connect( + [ + ( + info_source, + select_files, + [('contrast_id', 'contrast_id')], + ), + ( + info_source, + subgroups_contrasts, + [('subject_list', 'subject_ids')], + ), + ( + select_files, + subgroups_contrasts, + [ + ('cope', 'copes'), + ('varcope', 'varcopes'), + ('participants', 'participants_file'), + ], + ), + ( + select_files, + node_name[('func', 'node_input_name')], + ), + ( + node_variable, + datasink_groupanalysis, + [('node_output_name', 'preprocess.@sym_link')], + ), + ] + ) # Complete with other links between nodes + + # [INFO] Here we define the contrasts used for the group level analysis, depending on the + # method used. + if method in ('equalRange', 'equalIndifference'): + contrasts = [('Group', 'T', ['mean'], [1]), ('Group', 'T', ['mean'], [-1])] + + elif method == 'groupComp': + contrasts = [ + ('Eq range vs Eq indiff in loss', 'T', ['Group_{1}', 'Group_{2}'], [1, -1]) + ] + + # [INFO] Here we simply return the created workflow + return group_level_analysis From 2b07afcdd3be0ecf41996045923438b7c3c3cfda Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Mon, 9 Oct 2023 11:10:42 +0200 Subject: [PATCH 003/116] Working on preprocessing --- narps_open/pipelines/team_08MQ.py | 327 +++++++++++++++++------------- 1 file changed, 188 insertions(+), 139 deletions(-) diff --git a/narps_open/pipelines/team_08MQ.py b/narps_open/pipelines/team_08MQ.py index 5db3004a..49326c97 100644 --- a/narps_open/pipelines/team_08MQ.py +++ b/narps_open/pipelines/team_08MQ.py @@ -25,7 +25,8 @@ # from nipype.algorithms.modelgen import SpecifyModel from nipype.interfaces.fsl import ( - FAST, BET, Registration, ErodeImage, PrepareFieldmap, MCFLIRT, SliceTimer + FAST, BET, Registration, ErodeImage, PrepareFieldmap, MCFLIRT, SliceTimer, + Threshold, Info ) @@ -33,13 +34,12 @@ Info, ImageMaths, IsotropicSmooth, Threshold, Level1Design, FEATModel, L2Model, Merge, FLAMEO, ContrastMgr, FILMGLS, MultipleRegressDesign, - Cluster, BET, SmoothEstimate + Cluster, SmoothEstimate ) """ from nipype.interfaces.ants import Registration - from narps_open.pipelines import Pipeline from narps_open.pipelines import TaskInformation @@ -83,7 +83,6 @@ def get_preprocessing(self): bias_field_correction = Node(FAST(), name = 'bias_field_correction') bias_field_correction.inputs.img_type = 1 # T1 image bias_field_correction.inputs.output_biascorrected = True - #bias_field_correction.inputs.output_biasfield = True # BET Node - Brain extraction for anatomical images brain_extraction_anat = Node(BET(), name = 'brain_extraction_anat') @@ -92,17 +91,40 @@ def get_preprocessing(self): # FAST Node - Segmentation of anatomical images segmentation_anat = Node(FAST(), name = 'segmentation_anat') segmentation_anat.inputs.no_bias = True # Bias field was already removed - segmentation_anat.inputs.number_classes = + segmentation_anat.inputs.number_classes = 1 # ? segmentation_anat.inputs.segments = True # One image per tissue class # ANTs Node - Registration to T1 MNI152 space registration_anat = Node(Registration(), name = 'registration_anat') - registration_anat.inputs.fixed_image = '' - registration_anat.inputs.moving_image = '' - registration_anat.inputs.initial_moving_transform = '' + """[ + 'MNI152_T1_2mm_eye_mask.nii.gz', + 'MNI152_T1_2mm_edges.nii.gz', + 'MNI152_T1_2mm_brain_mask_deweight_eyes.nii.gz', + 'MNI152_T1_2mm_brain_mask_dil1.nii.gz', + 'MNI152_T1_2mm_skull.nii.gz', + 'MNI152_T1_2mm_LR-masked.nii.gz', + 'MNI152_T1_2mm_brain.nii.gz', + 'MNI152_T1_2mm_brain_mask.nii.gz', + 'MNI152_T1_2mm_VentricleMask.nii.gz', + 'MNI152_T1_2mm_strucseg_periph.nii.gz', + 'MNI152_T1_2mm.nii.gz', + 'MNI152_T1_2mm_b0.nii.gz', + 'MNI152_T1_2mm_brain_mask_dil.nii.gz', + 'MNI152_T1_2mm_strucseg.nii.gz' + ] + """ + registration_anat.inputs.fixed_image = Info.standard_image('MNI152_T1_2mm_brain.nii.gz') registration_anat.inputs.transforms = ['Rigid', 'Affine', 'SyN'] registration_anat.inputs.metric = ['MI', 'MI', 'CC'] + # Threshold Node - create white-matter mask + threshold_white_matter = Node(Threshold(), name = 'threshold_white_matter') + threshold_white_matter.inputs.thresh = 1 + + # Threshold Node - create CSF mask + threshold_csf = Node(Threshold(), name = 'threshold_csf') + threshold_white_matter.inputs.thresh = 1 + # ErodeImage Node - Erode white-matter mask erode_white_matter = Node(ErodeImage(), name = 'erode_white_matter') @@ -140,6 +162,16 @@ def get_preprocessing(self): slice_direction (1 or 2 or 3) – Direction of slice acquisition (x=1, y=2, z=3) - default is z. Maps to a command-line argument: --direction=%d. time_repetition (a float) – Specify TR of data - default is 3s. Maps to a command-line argument: --repeat=%f. + # ApplyWarp Node - Alignment of white matter + alignment_white_matter = Node(ApplyWarp(), name = 'alignment_white_matter') + alignment_white_matter.inputs.in_file = '' + alignment_white_matter.inputs.ref_file = Info.standard_image('MNI152_T1_2mm_brain.nii.gz') + + # ApplyWarp Node - Alignment of CSF + alignment_csf = Node(ApplyWarp(), name = 'alignment_csf') + alignment_csf.inputs.in_file = '' + alignment_csf.inputs.ref_file = Info.standard_image('MNI152_T1_2mm_brain.nii.gz') + # [INFO] The following part has to be modified with nodes of the pipeline """ Anatomical: @@ -186,10 +218,13 @@ def get_preprocessing(self): (select_files, bias_field_correction, [('anat', 'in_files')]), (bias_field_correction, brain_extraction_anat, [('restored_image', 'in_file')]), (brain_extraction_anat, segmentation_anat, [('out_file', 'in_file')]), - (segmentation_anat, registration_anat, [('?', 'in_file')]), + (segmentation_anat, registration_anat, [('?', 'moving_image')]), + + (registration_anat, threshold_white_matter, [('', '')]), + (registration_anat, threshold_csf, [('', '')]), - (registration_anat, erode_white_matter, [('', '')]), - (registration_anat, erode_csf, [('', '')]), + (threshold_white_matter, erode_white_matter, [('out_file', 'in_file')]), + (threshold_csf, erode_csf, [('out_file', 'in_file')]), (erode_white_matter, , [('', '')]), (erode_csf, , [('', '')]), @@ -200,12 +235,17 @@ def get_preprocessing(self): (select_files, convert_to_fieldmap, [('phasediff', 'in_phase')]), # High contrast functional volume + # Functional images (select_files, brain_extraction_func, [('func', 'in_file')]), (brain_extraction_func, motion_correction, [('out_file', 'in_file')]), (, motion_correction, [('out_file', 'ref_file')]), # high contrast images (motion_correction, slice_time_correction, [('out_file', 'in_file')]), + (, alignment_white_matter, [('', 'in_file')]), + (, alignment_white_matter, [('', 'field_file')]), + (, alignment_csf, [('', 'in_file')]), + (, alignment_csf, [('', 'field_file')]) ]) return preprocessing @@ -214,12 +254,12 @@ def get_run_level_analysis(self): """ Return a Nipype workflow describing the run level analysis part of the pipeline """ return None - def get_session_infos(event_file: str): + def get_subject_information(event_file: str): """ - Create Bunchs for specifyModel. + Extact subject information from the event file, to create a Bunch with required data only. Parameters : - - event_file : file corresponding to the run and the subject to analyze + - event_file : event file corresponding to the run and the subject to analyze Returns : - subject_info : list of Bunch for 1st level analysis. @@ -233,17 +273,19 @@ def get_session_infos(event_file: str): Parametric modulation of events corresponding to gain magnitude. Mean centred. Parametric modulation of events corresponding to loss magnitude. Mean centred. Response regressor with 1 for accept and -1 for reject. Mean centred. - Six head motion parameters plus four aCompCor regressors. + + Six head motion parameters plus four aCompCor regressors. > + Model and data had a 90s high-pass filter applied. """ from nipype.interfaces.base import Bunch - condition_names = ['trial', 'gain', 'loss'] + condition_names = ['event', 'gain', 'loss', 'response'] - onset = {} - duration = {} - amplitude = {} + onsets = {} + durations = {} + amplitudes = {} # Creates dictionary items with empty lists for each condition. for condition in condition_names: @@ -251,6 +293,55 @@ def get_session_infos(event_file: str): duration.update({condition: []}) amplitude.update({condition: []}) + """ + onset = { + event: [], + gain: [], + loss: [], + response: [] + } + duration = { + event: [], + gain: [], + loss: [], + response: [] + } + amplitude = { + event: [], + gain: [], + loss: [], + response: [] + } + + + + [Mandatory] + conditions : list of names + onsets : lists of onsets corresponding to each condition + durations : lists of durations corresponding to each condition. Should be + left to a single 0 if all events are being modelled as impulses. + + [Optional] + regressor_names : list of str + list of names corresponding to each column. Should be None if + automatically assigned. + regressors : list of lists + values for each regressor - must correspond to the number of + volumes in the functional run + amplitudes : lists of amplitudes for each event. This will be ignored by + SPM's Level1Design. + + The following two (tmod, pmod) will be ignored by any Level1Design class + other than SPM: + + tmod : lists of conditions that should be temporally modulated. Should + default to None if not being used. + pmod : list of Bunch corresponding to conditions + - name : name of parametric modulator + - param : values of the modulator + - poly : degree of modulation + """ + with open(event_file, 'rt') as file: next(file) # skip the header @@ -258,26 +349,30 @@ def get_session_infos(event_file: str): info = line.strip().split() # Creates list with onsets, duration and loss/gain for amplitude (FSL) for condition in condition_names: + onset[condition].append(float(info[0])) + duration[condition].append(float(info[1])) + if condition == 'gain': - onset[condition].append(float(info[0])) - duration[condition].append(float(info[4])) amplitude[condition].append(float(info[2])) elif condition == 'loss': - onset[condition].append(float(info[0])) - duration[condition].append(float(info[4])) amplitude[condition].append(float(info[3])) - elif condition == 'trial': - onset[condition].append(float(info[0])) - duration[condition].append(float(info[4])) - amplitude[condition].append(float(1)) + elif condition == 'event': + amplitude[condition].append(1.0) + elif condition == 'response': + if 'reject' in info[5]: + amplitude[condition].append(-1.0) + elif 'accept' in info[5]: + amplitude[condition].append(1.0) + else: + amplitude[condition].append(0.0) # TODO : zeros for NoResp ??? subject_info = [] subject_info.append( Bunch( conditions = condition_names, - onsets = [onset[k] for k in condition_names], - durations = [duration[k] for k in condition_names], - amplitudes = [amplitude[k] for k in condition_names], + onsets = [onsets[c] for c in condition_names], + durations = [durations[c] for c in condition_names], + amplitudes = [amplitudes[c] for c in condition_names], regressor_names = None, regressors = None, ) @@ -295,49 +390,39 @@ def get_contrasts(): Returns: - contrasts: list of tuples, list of contrasts to analyze + + + First level + Positive parametric effect of gain; Positive parametric effect of loss; Negative parametric effect of loss. + Second level + Positive one-sample ttest over first level contrast estimates. """ # List of condition names - conditions = ['trial', 'trialxgain^1', 'trialxloss^1'] + conditions = ['event', 'gain', 'loss', 'response'] # Create contrasts - trial = ('trial', 'T', conditions, [1, 0, 0]) - effect_gain = ('effect_of_gain', 'T', conditions, [0, 1, 0]) - effect_loss = ('effect_of_loss', 'T', conditions, [0, 0, 1]) + positive_effect_gain = ('positive_effect_gain', 'T', conditions, [0, 1, 0]) + positive_effect_loss = ('positive_effect_loss', 'T', conditions, [0, 0, 1]) + negative_effect_loss = ('negative_effect_loss', 'T', conditions, [0, 0, -1]) # Contrast list - return [trial, effect_gain, effect_loss] + return [positive_effect_gain, positive_effect_loss, negative_effect_loss] def get_subject_level_analysis(self): """ Return a Nipype workflow describing the subject level analysis part of the pipeline """ - # [INFO] The following part stays the same for all pipelines - # Infosource Node - To iterate on subjects - info_source = Node( - IdentityInterface( - fields = ['subject_id', 'dataset_dir', 'results_dir', 'working_dir', 'run_list'], - dataset_dir = self.directories.dataset_dir, - results_dir = self.directories.results_dir, - working_dir = self.directories.working_dir, - run_list = self.run_list - ), - name='info_source', - ) + info_source = Node(IdentityInterface(), name = 'info_source') + info_source.inputs.fields = ['subject_id', 'run_id'] info_source.iterables = [('subject_id', self.subject_list)] # Templates to select files node - # [TODO] Change the name of the files depending on the filenames of results of preprocessing templates = { - 'func': join( - self.directories.results_dir, - 'preprocess', + 'func': join(self.directories.output_dir, 'preprocessing', '_run_id_*_subject_id_{subject_id}', 'complete_filename_{subject_id}_complete_filename.nii', ), - 'event': join( - self.directories.dataset_dir, - 'sub-{subject_id}', - 'func', + 'event': join(self.directories.dataset_dir, 'sub-{subject_id}', 'func', 'sub-{subject_id}_task-MGT_run-*_events.tsv', ) } @@ -350,20 +435,29 @@ def get_subject_level_analysis(self): data_sink = Node(DataSink(), name = 'data_sink') data_sink.inputs.base_directory = self.directories.output_dir - # [INFO] This is the node executing the get_subject_infos_spm function - # Subject Infos node - get subject specific condition information - subject_infos = Node( + # Subject information Node - get subject specific condition information + subject_information = Node( Function( - function = self.get_subject_infos, + function = self.get_subject_information, input_names = ['event_files', 'runs'], output_names = ['subject_info'] ), - name = 'subject_infos', + name = 'subject_information', ) subject_infos.inputs.runs = self.run_list - # [INFO] This is the node executing the get_contrasts function - # Contrasts node - to get contrasts + # Parameters Node - create files with parameters from subject session data + parameters = Node( + Function( + function = self.get_parameters_file, + input_names = ['event_files', 'runs'], + output_names = ['parameters_files'] + ), + name = 'parameters', + ) + parameters.inputs.runs = self.run_list + + # Contrasts node - get contrasts to compute from the model contrasts = Node( Function( function = self.get_contrasts, @@ -373,60 +467,20 @@ def get_subject_level_analysis(self): name = 'contrasts', ) - # [INFO] The following part has to be modified with nodes of the pipeline - - # [TODO] For each node, replace 'node_name' by an explicit name, and use it for both: - # - the name of the variable in which you store the Node object - # - the 'name' attribute of the Node - # [TODO] The node_function refers to a NiPype interface that you must import - # at the beginning of the file. - node_name = Node( - node_function, - name = 'node_name' - ) - - # [TODO] Add other nodes with the different steps of the pipeline - - # [INFO] The following part defines the nipype workflow and the connections between nodes - subject_level_analysis = Workflow( base_dir = self.directories.working_dir, name = 'subject_level_analysis' ) - # [TODO] Add the connections the workflow needs - # [INFO] Input and output names can be found on NiPype documentation subject_level_analysis.connect([ - ( - info_source, - select_files, - [('subject_id', 'subject_id')] - ), - ( - info_source, - contrasts, - [('subject_id', 'subject_id')] - ), - ( - select_files, - subject_infos, - [('event', 'event_files')] - ), - ( - select_files, - node_name, - [('func', 'node_input_name')] - ), - ( - node_name, data_sink, - [('node_output_name', 'preprocess.@sym_link')] - ), + (info_source, select_files, [('subject_id', 'subject_id')]), + (info_source, contrasts, [('subject_id', 'subject_id')]), + (select_files, subject_infos, [('event', 'event_files')]), + (select_files, node_name, [('func', 'node_input_name')]), + (node_name, data_sink, [('node_output_name', 'preprocess.@sym_link')]), ]) - # [INFO] Here we simply return the created workflow return subject_level_analysis - # [INFO] This function returns the list of ids and files of each group of participants - # to do analyses for both groups, and one between the two groups. def get_subgroups_contrasts( copes, varcopes, subject_list: list, participants_file: str ): @@ -503,8 +557,6 @@ def get_subgroups_contrasts( equal_indifference_id, equal_range_id, copes_global, varcopes_global - - # [INFO] This function creates the dictionary of regressors used in FSL Nipype pipelines def get_regressors( equal_range_id: list, equal_indifference_id: list, @@ -593,12 +645,11 @@ def get_group_level_analysis_sub_workflow(self, method): # [TODO] Change the name of the files depending on the filenames # of results of first level analysis template = { - 'cope' : join( - self.directories.results_dir, + 'cope' : join(self.directories.output_dir, 'subject_level_analysis', '_contrast_id_{contrast_id}_subject_id_*', 'cope1.nii.gz'), 'varcope' : join( - self.directories.results_dir, + self.directories.output_dir, 'subject_level_analysis', '_contrast_id_{contrast_id}_subject_id_*', 'varcope1.nii.gz'), 'participants' : join( @@ -608,7 +659,7 @@ def get_group_level_analysis_sub_workflow(self, method): select_files = Node( SelectFiles( templates, - base_directory = self.directories.results_dir, + base_directory = self.directories.output_dir, force_list = True ), name = 'select_files', @@ -620,39 +671,37 @@ def get_group_level_analysis_sub_workflow(self, method): name = 'data_sink', ) - contrasts = Node( - Function( - input_names=['copes', 'varcopes', 'subject_ids', 'participants_file'], - output_names=[ - 'copes_equalIndifference', - 'copes_equalRange', - 'varcopes_equalIndifference', - 'varcopes_equalRange', - 'equalIndifference_id', - 'equalRange_id', - 'copes_global', - 'varcopes_global' - ], - function = self.get_subgroups_contrasts, + contrasts = Node(Function( + function = self.get_subgroups_contrasts, + input_names=['copes', 'varcopes', 'subject_ids', 'participants_file'], + output_names=[ + 'copes_equalIndifference', + 'copes_equalRange', + 'varcopes_equalIndifference', + 'varcopes_equalRange', + 'equalIndifference_id', + 'equalRange_id', + 'copes_global', + 'varcopes_global' + ] ), name = 'subgroups_contrasts', ) - regs = Node( - Function( - input_names = [ - 'equalRange_id', - 'equalIndifference_id', - 'method', - 'subject_list', + regressors = Node(Function( + function = self.get_regressors, + input_names = [ + 'equalRange_id', + 'equalIndifference_id', + 'method', + 'subject_list', ], - output_names = ['regressors'], - function = self.get_regressors, + output_names = ['regressors'] ), - name = 'regs', + name = 'regressors', ) - regs.inputs.method = method - regs.inputs.subject_list = subject_list + regressors.inputs.method = method + regressors.inputs.subject_list = subject_list # [INFO] The following part has to be modified with nodes of the pipeline From a454652a2131438073baebaf6c98bb67d697b9cb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Mon, 9 Oct 2023 11:24:59 +0200 Subject: [PATCH 004/116] Codespell --- narps_open/pipelines/team_08MQ.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/narps_open/pipelines/team_08MQ.py b/narps_open/pipelines/team_08MQ.py index 49326c97..67d2b2c5 100644 --- a/narps_open/pipelines/team_08MQ.py +++ b/narps_open/pipelines/team_08MQ.py @@ -256,7 +256,7 @@ def get_run_level_analysis(self): def get_subject_information(event_file: str): """ - Extact subject information from the event file, to create a Bunch with required data only. + Extract subject information from the event file, to create a Bunch with required data only. Parameters : - event_file : event file corresponding to the run and the subject to analyze From aca4896a5d538f7c919d6327e5348d3e39013198 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Tue, 10 Oct 2023 10:59:34 +0200 Subject: [PATCH 005/116] Working on pipeline 08MQ --- narps_open/pipelines/team_08MQ.py | 23 ++++++++++++++++++++++- 1 file changed, 22 insertions(+), 1 deletion(-) diff --git a/narps_open/pipelines/team_08MQ.py b/narps_open/pipelines/team_08MQ.py index 67d2b2c5..5b9ee521 100644 --- a/narps_open/pipelines/team_08MQ.py +++ b/narps_open/pipelines/team_08MQ.py @@ -26,7 +26,7 @@ # from nipype.algorithms.modelgen import SpecifyModel from nipype.interfaces.fsl import ( FAST, BET, Registration, ErodeImage, PrepareFieldmap, MCFLIRT, SliceTimer, - Threshold, Info + Threshold, Info, SUSAN ) @@ -87,6 +87,7 @@ def get_preprocessing(self): # BET Node - Brain extraction for anatomical images brain_extraction_anat = Node(BET(), name = 'brain_extraction_anat') brain_extraction_anat.inputs.frac = 0.5 + brain_extraction_anat.inputs.mask = True # ? # FAST Node - Segmentation of anatomical images segmentation_anat = Node(FAST(), name = 'segmentation_anat') @@ -127,9 +128,13 @@ def get_preprocessing(self): # ErodeImage Node - Erode white-matter mask erode_white_matter = Node(ErodeImage(), name = 'erode_white_matter') + erode_white_matter.inputs.kernel_shape = 'sphere' + erode_white_matter.inputs.kernel_size = 2.0 #mm # ErodeImage Node - Erode CSF mask erode_csf = Node(ErodeImage(), name = 'erode_csf') + erode_csf.inputs.kernel_shape = 'sphere' + erode_csf.inputs.kernel_size = 1.5 #mm # BET Node - Brain extraction of magnitude images brain_extraction_magnitude = Node(BET(), name = 'brain_extraction_magnitude') @@ -138,18 +143,27 @@ def get_preprocessing(self): # PrepareFieldmap Node - Convert phase and magnitude to fieldmap images convert_to_fieldmap = Node(PrepareFieldmap(), name = 'convert_to_fieldmap') + # FLIRT was used to align the high contrast functional image to anatomical. + # The calculated transforms were then applied to the 4d functional images + # (which were aligned with the high contrast image in the motion correction step). + # A boundary-based registration cost function was used with trilinear interpolation. + # BET Node - Brain extraction for functional images brain_extraction_func = Node(BET(), name = 'brain_extraction_func') brain_extraction_func.inputs.frac = 0.3 + brain_extraction_func.inputs.mask = True # ? + brain_extraction_func.inputs.functional = True # MCFLIRT Node - Motion correction of functional images motion_correction = Node(MCFLIRT(), name = 'motion_correction') motion_correction.inputs.cost = 'normcorr' motion_correction.inputs.interpolation = 'trilinear' + # single volume, high contrast image was used as the reference scan # SliceTimer Node - Slice time correction slice_time_correction = Node(SliceTimer(), name = 'slice_time_correction') slice_time_correction.inputs.time_repetition = TaskInformation()['RepetitionTime'] + # Slicetimer was used and was applied after motion correction. The middle slice was used as the reference slice. Sinc interpolation was used. custom_order (a pathlike object or string representing an existing file) – Filename of single-column custom interleave order file (first slice is referred to as 1 not 0). Maps to a command-line argument: --ocustom=%s. custom_timings (a pathlike object or string representing an existing file) – Slice timings, in fractions of TR, range 0:1 (default is 0.5 = no shift). Maps to a command-line argument: --tcustom=%s. @@ -162,6 +176,13 @@ def get_preprocessing(self): slice_direction (1 or 2 or 3) – Direction of slice acquisition (x=1, y=2, z=3) - default is z. Maps to a command-line argument: --direction=%d. time_repetition (a float) – Specify TR of data - default is 3s. Maps to a command-line argument: --repeat=%f. + + # SUSAN Node - smoothing of functional images + smoothing = Node(SUSAN(), name = 'smoothing') + smoothing.inputs.brightness_threshold = # ? + smoothing.inputs.fwhm = self.fwhm + smoothing.inputs.in_file + # ApplyWarp Node - Alignment of white matter alignment_white_matter = Node(ApplyWarp(), name = 'alignment_white_matter') alignment_white_matter.inputs.in_file = '' From 8b488233f5331c76770bf8378ecf802cc6b2efa8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Wed, 11 Oct 2023 14:49:48 +0200 Subject: [PATCH 006/116] Towards a running version of the pipeline --- narps_open/pipelines/team_08MQ.py | 174 +++++++++++------------------- 1 file changed, 65 insertions(+), 109 deletions(-) diff --git a/narps_open/pipelines/team_08MQ.py b/narps_open/pipelines/team_08MQ.py index 5b9ee521..974dec45 100644 --- a/narps_open/pipelines/team_08MQ.py +++ b/narps_open/pipelines/team_08MQ.py @@ -3,45 +3,20 @@ """ Write the work of NARPS team 08MQ using Nipype """ -""" -This template can be use to reproduce a pipeline using FSL as main software. - -- All lines starting with [INFO], are meant to help you during the reproduction, these can be removed -eventually. -- Also remove lines starting with [TODO], once you did what they suggested. -""" - -# [TODO] Only import modules you use further in te code, remove others from the import section - from os.path import join -# [INFO] The import of base objects from Nipype, to create Workflows from nipype import Node, Workflow # , JoinNode, MapNode - -# [INFO] a list of interfaces used to manpulate data from nipype.interfaces.utility import IdentityInterface, Function from nipype.interfaces.io import SelectFiles, DataSink -# from nipype.algorithms.misc import Gunzip - -# from nipype.algorithms.modelgen import SpecifyModel from nipype.interfaces.fsl import ( - FAST, BET, Registration, ErodeImage, PrepareFieldmap, MCFLIRT, SliceTimer, - Threshold, Info, SUSAN + FAST, BET, ErodeImage, PrepareFieldmap, MCFLIRT, SliceTimer, + Threshold, Info, SUSAN, FLIRT, ApplyWarp ) - -""" - Info, ImageMaths, IsotropicSmooth, Threshold, - Level1Design, FEATModel, L2Model, Merge, - FLAMEO, ContrastMgr, FILMGLS, MultipleRegressDesign, - Cluster, SmoothEstimate - ) -""" - from nipype.interfaces.ants import Registration from narps_open.pipelines import Pipeline -from narps_open.pipelines import TaskInformation +from narps_open.pipelines.task import TaskInformation class PipelineTeam08MQ(Pipeline): """ A class that defines the pipeline of team 08MQ """ @@ -69,6 +44,9 @@ def get_preprocessing(self): 'func': join( 'sub-{subject_id}', 'func', 'sub-{subject_id}_task-MGT_run-{run_id}_bold.nii.gz' ), + 'sbref': join( + 'sub-{subject_id}', 'func', 'sub-{subject_id}_task-MGT_run-{run_id}_sbref.nii.gz' + ), 'magnitude': join('sub-{subject_id}', 'fmap', 'sub-{subject_id}_magnitude1.nii.gz'), 'phasediff': join('sub-{subject_id}', 'fmap', 'sub-{subject_id}_phasediff.nii.gz') } @@ -143,6 +121,23 @@ def get_preprocessing(self): # PrepareFieldmap Node - Convert phase and magnitude to fieldmap images convert_to_fieldmap = Node(PrepareFieldmap(), name = 'convert_to_fieldmap') + # FLIRT Node - Align high contrast functional images to anatomical + # (i.e.: single-band reference images a.k.a. sbref) + registration_sbref = Node(FLIRT(), name = 'registration_sbref') + registration_sbref.inputs.interp = 'trilinear' + registration_sbref.inputs.cost = 'bbr' # boundary-based registration + #out_file + #out_matrix_file + # fieldmap (a pathlike object or string representing a file) – Fieldmap image in rads/s - must be already registered to the reference image. Maps to a command-line argument: -fieldmap %s. + # wm_seg (a pathlike object or string representing a file) – White matter segmentation volume needed by BBR cost function. Maps to a command-line argument: -wmseg %s. + # wmcoords (a pathlike object or string representing a file) – White matter boundary coordinates for BBR cost function. Maps to a command-line argument: -wmcoords %s. + # wmnorms (a pathlike object or string representing a file) – White matter boundary normals for BBR cost function. Maps to a command-line argument: -wmnorms %s. + + """ + High contrast functional volume: + Alignment to anatomical image including distortion correction with field map + Calculation of inverse warp (anatomical to functional) + """ # FLIRT was used to align the high contrast functional image to anatomical. # The calculated transforms were then applied to the 4d functional images # (which were aligned with the high contrast image in the motion correction step). @@ -164,7 +159,7 @@ def get_preprocessing(self): slice_time_correction = Node(SliceTimer(), name = 'slice_time_correction') slice_time_correction.inputs.time_repetition = TaskInformation()['RepetitionTime'] # Slicetimer was used and was applied after motion correction. The middle slice was used as the reference slice. Sinc interpolation was used. - + """ custom_order (a pathlike object or string representing an existing file) – Filename of single-column custom interleave order file (first slice is referred to as 1 not 0). Maps to a command-line argument: --ocustom=%s. custom_timings (a pathlike object or string representing an existing file) – Slice timings, in fractions of TR, range 0:1 (default is 0.5 = no shift). Maps to a command-line argument: --tcustom=%s. environ (a dictionary with keys which are a bytes or None or a value of class ‘str’ and with values which are a bytes or None or a value of class ‘str’) – Environment variables. (Nipype default value: {}) @@ -175,13 +170,12 @@ def get_preprocessing(self): output_type (‘NIFTI’ or ‘NIFTI_PAIR’ or ‘NIFTI_GZ’ or ‘NIFTI_PAIR_GZ’) – FSL output type. slice_direction (1 or 2 or 3) – Direction of slice acquisition (x=1, y=2, z=3) - default is z. Maps to a command-line argument: --direction=%d. time_repetition (a float) – Specify TR of data - default is 3s. Maps to a command-line argument: --repeat=%f. - - + """ # SUSAN Node - smoothing of functional images smoothing = Node(SUSAN(), name = 'smoothing') - smoothing.inputs.brightness_threshold = # ? + #smoothing.inputs.brightness_threshold = # ? smoothing.inputs.fwhm = self.fwhm - smoothing.inputs.in_file + #smoothing.inputs.in_file # ApplyWarp Node - Alignment of white matter alignment_white_matter = Node(ApplyWarp(), name = 'alignment_white_matter') @@ -232,8 +226,6 @@ def get_preprocessing(self): preprocessing.connect([ # Inputs (info_source, select_files, [('subject_id', 'subject_id'), ('run_id', 'run_id')]), - (select_files, node_name, [('func', 'node_input_name')]), - (node_name, data_sink, [('node_output_name', 'preprocessing.@sym_link')]), # Anatomical images (select_files, bias_field_correction, [('anat', 'in_files')]), @@ -247,8 +239,8 @@ def get_preprocessing(self): (threshold_white_matter, erode_white_matter, [('out_file', 'in_file')]), (threshold_csf, erode_csf, [('out_file', 'in_file')]), - (erode_white_matter, , [('', '')]), - (erode_csf, , [('', '')]), + #(erode_white_matter, , [('', '')]), + #(erode_csf, , [('', '')]), # Field maps (select_files, brain_extraction_magnitude, [('magnitude', 'in_file')]), @@ -256,17 +248,20 @@ def get_preprocessing(self): (select_files, convert_to_fieldmap, [('phasediff', 'in_phase')]), # High contrast functional volume + (select_files, registration_sbref, [('sbref', 'in_files')]), + (convert_to_fieldmap, registration_sbref, [('', 'fieldmap')]), # ? + (registration_anat, registration_sbref, [('', 'reference')]), # Functional images (select_files, brain_extraction_func, [('func', 'in_file')]), (brain_extraction_func, motion_correction, [('out_file', 'in_file')]), - (, motion_correction, [('out_file', 'ref_file')]), # high contrast images - (motion_correction, slice_time_correction, [('out_file', 'in_file')]), + #(, motion_correction, [('out_file', 'ref_file')]), # high contrast images + #(motion_correction, slice_time_correction, [('out_file', 'in_file')]), - (, alignment_white_matter, [('', 'in_file')]), - (, alignment_white_matter, [('', 'field_file')]), - (, alignment_csf, [('', 'in_file')]), - (, alignment_csf, [('', 'field_file')]) + #(, alignment_white_matter, [('', 'in_file')]), + #(, alignment_white_matter, [('', 'field_file')]), + #(, alignment_csf, [('', 'in_file')]), + #(, alignment_csf, [('', 'field_file')]) ]) return preprocessing @@ -295,7 +290,7 @@ def get_subject_information(event_file: str): Parametric modulation of events corresponding to loss magnitude. Mean centred. Response regressor with 1 for accept and -1 for reject. Mean centred. - Six head motion parameters plus four aCompCor regressors. > + Six head motion parameters plus four aCompCor regressors. > Model and data had a 90s high-pass filter applied. """ @@ -309,8 +304,8 @@ def get_subject_information(event_file: str): amplitudes = {} # Creates dictionary items with empty lists for each condition. - for condition in condition_names: - onset.update({condition: []}) + for condition in condition_names: + onset.update({condition: []}) duration.update({condition: []}) amplitude.update({condition: []}) @@ -461,7 +456,7 @@ def get_subject_level_analysis(self): Function( function = self.get_subject_information, input_names = ['event_files', 'runs'], - output_names = ['subject_info'] + output_names = ['subject_info'] ), name = 'subject_information', ) @@ -472,7 +467,7 @@ def get_subject_level_analysis(self): Function( function = self.get_parameters_file, input_names = ['event_files', 'runs'], - output_names = ['parameters_files'] + output_names = ['parameters_files'] ), name = 'parameters', ) @@ -483,7 +478,7 @@ def get_subject_level_analysis(self): Function( function = self.get_contrasts, input_names = ['subject_id'], - output_names = ['contrasts'] + output_names = ['contrasts'] ), name = 'contrasts', ) @@ -495,9 +490,7 @@ def get_subject_level_analysis(self): subject_level_analysis.connect([ (info_source, select_files, [('subject_id', 'subject_id')]), (info_source, contrasts, [('subject_id', 'subject_id')]), - (select_files, subject_infos, [('event', 'event_files')]), - (select_files, node_name, [('func', 'node_input_name')]), - (node_name, data_sink, [('node_output_name', 'preprocess.@sym_link')]), + (select_files, subject_information, [('event', 'event_files')]), ]) return subject_level_analysis @@ -573,10 +566,10 @@ def get_subgroups_contrasts( if sub_id[-2][-3:] in subject_list: varcopes_global.append(varcope) - return copes_equal_indifference, copes_equal_range, + return (copes_equal_indifference, copes_equal_range, varcopes_equal_indifference, varcopes_equal_range, equal_indifference_id, equal_range_id, - copes_global, varcopes_global + copes_global, varcopes_global) def get_regressors( equal_range_id: list, @@ -609,21 +602,21 @@ def get_regressors( # Each list contains n_sub values with 0 and 1 depending on the group of the participant # For equalRange_reg list --> participants with a 1 are in the equal range group elif method == 'groupComp': - equalRange_reg = [ + equal_range_regressors = [ 1 for i in range(len(equal_range_id) + len(equal_indifference_id)) ] - equalIndifference_reg = [ + equal_indifference_regressors = [ 0 for i in range(len(equal_range_id) + len(equal_indifference_id)) ] for index, subject_id in enumerate(subject_list): if subject_id in equal_indifference_id: - equalIndifference_reg[index] = 1 - equalRange_reg[index] = 0 + equal_indifference_regressors[index] = 1 + equal_range_regressors[index] = 0 regressors = dict( - equalRange = equalRange_reg, - equalIndifference = equalIndifference_reg + equalRange = equal_range_regressors, + equalIndifference = equal_indifference_regressors ) return regressors @@ -665,7 +658,7 @@ def get_group_level_analysis_sub_workflow(self, method): # Templates to select files node # [TODO] Change the name of the files depending on the filenames # of results of first level analysis - template = { + templates = { 'cope' : join(self.directories.output_dir, 'subject_level_analysis', '_contrast_id_{contrast_id}_subject_id_*', 'cope1.nii.gz'), @@ -692,7 +685,7 @@ def get_group_level_analysis_sub_workflow(self, method): name = 'data_sink', ) - contrasts = Node(Function( + subgroups_contrasts = Node(Function( function = self.get_subgroups_contrasts, input_names=['copes', 'varcopes', 'subject_ids', 'participants_file'], output_names=[ @@ -722,21 +715,7 @@ def get_group_level_analysis_sub_workflow(self, method): name = 'regressors', ) regressors.inputs.method = method - regressors.inputs.subject_list = subject_list - - # [INFO] The following part has to be modified with nodes of the pipeline - - # [TODO] For each node, replace 'node_name' by an explicit name, and use it for both: - # - the name of the variable in which you store the Node object - # - the 'name' attribute of the Node - # [TODO] The node_function refers to a NiPype interface that you must import - # at the beginning of the file. - node_name = Node( - node_function, - name = 'node_name' - ) - - # [INFO] The following part defines the nipype workflow and the connections between nodes + regressors.inputs.subject_list = self.subject_list # Compute the number of participants used to do the analysis nb_subjects = len(self.subject_list) @@ -746,38 +725,15 @@ def get_group_level_analysis_sub_workflow(self, method): base_dir = self.directories.working_dir, name = f'group_level_analysis_{method}_nsub_{nb_subjects}' ) - group_level_analysis.connect( - [ - ( - info_source, - select_files, - [('contrast_id', 'contrast_id')], - ), - ( - info_source, - subgroups_contrasts, - [('subject_list', 'subject_ids')], - ), - ( - select_files, - subgroups_contrasts, - [ - ('cope', 'copes'), - ('varcope', 'varcopes'), - ('participants', 'participants_file'), - ], - ), - ( - select_files, - node_name[('func', 'node_input_name')], - ), - ( - node_variable, - datasink_groupanalysis, - [('node_output_name', 'preprocess.@sym_link')], - ), - ] - ) # Complete with other links between nodes + group_level_analysis.connect([ + (info_source, select_files, [('contrast_id', 'contrast_id')]), + (info_source, subgroups_contrasts, [('subject_list', 'subject_ids')]), + (select_files, subgroups_contrasts, [ + ('cope', 'copes'), + ('varcope', 'varcopes'), + ('participants', 'participants_file'), + ]) + ]) # [INFO] Here we define the contrasts used for the group level analysis, depending on the # method used. From ecec8c22e73097003178fca89ca25b25084d8f60 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Wed, 11 Oct 2023 14:56:03 +0200 Subject: [PATCH 007/116] Conforming to Pipeline class + init test module --- narps_open/pipelines/__init__.py | 2 +- narps_open/pipelines/team_08MQ.py | 20 +++++++++ tests/pipelines/test_team_08MQ.py | 68 +++++++++++++++++++++++++++++++ 3 files changed, 89 insertions(+), 1 deletion(-) create mode 100644 tests/pipelines/test_team_08MQ.py diff --git a/narps_open/pipelines/__init__.py b/narps_open/pipelines/__init__.py index 6c5239ca..e73fa697 100644 --- a/narps_open/pipelines/__init__.py +++ b/narps_open/pipelines/__init__.py @@ -8,7 +8,7 @@ # List all the available pipelines and the corresponding class for each implemented_pipelines = { - '08MQ': None, + '08MQ': 'PipelineTeam08MQ', '0C7Q': None, '0ED6': None, '0H5E': None, diff --git a/narps_open/pipelines/team_08MQ.py b/narps_open/pipelines/team_08MQ.py index 974dec45..b489c830 100644 --- a/narps_open/pipelines/team_08MQ.py +++ b/narps_open/pipelines/team_08MQ.py @@ -266,10 +266,18 @@ def get_preprocessing(self): return preprocessing + def get_preprocessing_outputs(self): + """ Return a list of the files generated by the preprocessing """ + return [] + def get_run_level_analysis(self): """ Return a Nipype workflow describing the run level analysis part of the pipeline """ return None + def get_run_level_outputs(self): + """ Return a list of the files generated by the run level analysis """ + return [] + def get_subject_information(event_file: str): """ Extract subject information from the event file, to create a Bunch with required data only. @@ -495,6 +503,10 @@ def get_subject_level_analysis(self): return subject_level_analysis + def get_subject_level_outputs(self): + """ Return a list of the files generated by the subject level analysis """ + return [] + def get_subgroups_contrasts( copes, varcopes, subject_list: list, participants_file: str ): @@ -747,3 +759,11 @@ def get_group_level_analysis_sub_workflow(self, method): # [INFO] Here we simply return the created workflow return group_level_analysis + + def get_group_level_outputs(self): + """ Return a list of the files generated by the group level analysis """ + return [] + + def get_hypotheses_outputs(self): + """ Return the names of the files used by the team to answer the hypotheses of NARPS. """ + return [] diff --git a/tests/pipelines/test_team_08MQ.py b/tests/pipelines/test_team_08MQ.py new file mode 100644 index 00000000..d062b08c --- /dev/null +++ b/tests/pipelines/test_team_08MQ.py @@ -0,0 +1,68 @@ +#!/usr/bin/python +# coding: utf-8 + +""" Tests of the 'narps_open.pipelines.team_08MQ' module. + +Launch this test with PyTest + +Usage: +====== + pytest -q test_team_08MQ.py + pytest -q test_team_08MQ.py -k +""" + +from pytest import helpers, mark +from nipype import Workflow + +from narps_open.pipelines.team_08MQ import PipelineTeam08MQ + +class TestPipelinesTeam08MQ: + """ A class that contains all the unit tests for the PipelineTeam08MQ class.""" + + @staticmethod + @mark.unit_test + def test_create(): + """ Test the creation of a PipelineTeam08MQ object """ + + pipeline = PipelineTeam08MQ() + + # 1 - check the parameters + assert pipeline.fwhm == 6.0 + assert pipeline.team_id == '08MQ' + + # 2 - check workflows + assert isinstance(pipeline.get_preprocessing(), Workflow) + assert pipeline.get_run_level_analysis() is None + assert isinstance(pipeline.get_subject_level_analysis(), Workflow) + + group_level = pipeline.get_group_level_analysis() + assert len(group_level) == 3 + for sub_workflow in group_level: + assert isinstance(sub_workflow, Workflow) + + @staticmethod + @mark.unit_test + def test_outputs(): + """ Test the expected outputs of a PipelineTeam08MQ object """ + pipeline = PipelineTeam08MQ() + # 1 - 1 subject outputs + pipeline.subject_list = ['001'] + assert len(pipeline.get_preprocessing_outputs()) == 0 + assert len(pipeline.get_run_level_outputs()) == 0 + assert len(pipeline.get_subject_level_outputs()) == 0 + assert len(pipeline.get_group_level_outputs()) == 0 + assert len(pipeline.get_hypotheses_outputs()) == 0 + + # 2 - 4 subjects outputs + pipeline.subject_list = ['001', '002', '003', '004'] + assert len(pipeline.get_preprocessing_outputs()) == 0 + assert len(pipeline.get_run_level_outputs()) == 0 + assert len(pipeline.get_subject_level_outputs()) == 0 + assert len(pipeline.get_group_level_outputs()) == 0 + assert len(pipeline.get_hypotheses_outputs()) == 0 + + @staticmethod + @mark.pipeline_test + def test_execution(): + """ Test the execution of a PipelineTeam08MQ and compare results """ + helpers.test_pipeline_evaluation('08MQ') From 632f4efd91f1d9b3c4ef7e439a854e1b50f1a888 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Wed, 11 Oct 2023 15:39:15 +0200 Subject: [PATCH 008/116] Towards a running version of the pipeline --- narps_open/pipelines/team_08MQ.py | 53 ++++++++++--------- .../utils/configuration/testing_config.toml | 4 +- 2 files changed, 30 insertions(+), 27 deletions(-) diff --git a/narps_open/pipelines/team_08MQ.py b/narps_open/pipelines/team_08MQ.py index b489c830..da7deb49 100644 --- a/narps_open/pipelines/team_08MQ.py +++ b/narps_open/pipelines/team_08MQ.py @@ -9,14 +9,17 @@ from nipype.interfaces.utility import IdentityInterface, Function from nipype.interfaces.io import SelectFiles, DataSink from nipype.interfaces.fsl import ( - FAST, BET, ErodeImage, PrepareFieldmap, MCFLIRT, SliceTimer, + FSLCommand, FAST, BET, ErodeImage, PrepareFieldmap, MCFLIRT, SliceTimer, Threshold, Info, SUSAN, FLIRT, ApplyWarp ) from nipype.interfaces.ants import Registration from narps_open.pipelines import Pipeline -from narps_open.pipelines.task import TaskInformation +from narps_open.data.task import TaskInformation + +# Setup FSL +FSLCommand.set_default_output_type('NIFTI_GZ') class PipelineTeam08MQ(Pipeline): """ A class that defines the pipeline of team 08MQ """ @@ -31,8 +34,9 @@ def get_preprocessing(self): """ Return a Nipype workflow describing the prerpocessing part of the pipeline """ # IdentityInterface node - allows to iterate over subjects and runs - info_source = Node(IdentityInterface(), name='info_source') - info_source.inputs.fields=['subject_id', 'run_id'] + info_source = Node(IdentityInterface( + fields = ['subject_id', 'run_id']), + name='info_source') info_source.iterables = [ ('subject_id', self.subject_list), ('run_id', self.run_list), @@ -51,7 +55,7 @@ def get_preprocessing(self): 'phasediff': join('sub-{subject_id}', 'fmap', 'sub-{subject_id}_phasediff.nii.gz') } select_files = Node(SelectFiles(file_templates), name = 'select_files') - select_files.input.base_directory = self.directories.dataset_dir + select_files.inputs.base_directory = self.directories.dataset_dir # DataSink Node - store the wanted results in the wanted repository data_sink = Node(DataSink(), name = 'data_sink') @@ -65,12 +69,12 @@ def get_preprocessing(self): # BET Node - Brain extraction for anatomical images brain_extraction_anat = Node(BET(), name = 'brain_extraction_anat') brain_extraction_anat.inputs.frac = 0.5 - brain_extraction_anat.inputs.mask = True # ? + #brain_extraction_anat.inputs.mask = True # ? # FAST Node - Segmentation of anatomical images segmentation_anat = Node(FAST(), name = 'segmentation_anat') segmentation_anat.inputs.no_bias = True # Bias field was already removed - segmentation_anat.inputs.number_classes = 1 # ? + #segmentation_anat.inputs.number_classes = 1 # ? segmentation_anat.inputs.segments = True # One image per tissue class # ANTs Node - Registration to T1 MNI152 space @@ -152,7 +156,7 @@ def get_preprocessing(self): # MCFLIRT Node - Motion correction of functional images motion_correction = Node(MCFLIRT(), name = 'motion_correction') motion_correction.inputs.cost = 'normcorr' - motion_correction.inputs.interpolation = 'trilinear' + motion_correction.inputs.interpolation = 'spline' # should be 'trilinear' # single volume, high contrast image was used as the reference scan # SliceTimer Node - Slice time correction @@ -179,12 +183,10 @@ def get_preprocessing(self): # ApplyWarp Node - Alignment of white matter alignment_white_matter = Node(ApplyWarp(), name = 'alignment_white_matter') - alignment_white_matter.inputs.in_file = '' alignment_white_matter.inputs.ref_file = Info.standard_image('MNI152_T1_2mm_brain.nii.gz') # ApplyWarp Node - Alignment of CSF alignment_csf = Node(ApplyWarp(), name = 'alignment_csf') - alignment_csf.inputs.in_file = '' alignment_csf.inputs.ref_file = Info.standard_image('MNI152_T1_2mm_brain.nii.gz') # [INFO] The following part has to be modified with nodes of the pipeline @@ -230,17 +232,15 @@ def get_preprocessing(self): # Anatomical images (select_files, bias_field_correction, [('anat', 'in_files')]), (bias_field_correction, brain_extraction_anat, [('restored_image', 'in_file')]), - (brain_extraction_anat, segmentation_anat, [('out_file', 'in_file')]), - (segmentation_anat, registration_anat, [('?', 'moving_image')]), - - (registration_anat, threshold_white_matter, [('', '')]), - (registration_anat, threshold_csf, [('', '')]), - + (brain_extraction_anat, segmentation_anat, [('out_file', 'in_files')]), + (brain_extraction_anat, registration_anat, [('out_file', 'moving_image')]), + (brain_extraction_anat, threshold_white_matter, [('out_file', 'in_file')]), + (brain_extraction_anat, threshold_csf, [('out_file', 'in_file')]), (threshold_white_matter, erode_white_matter, [('out_file', 'in_file')]), (threshold_csf, erode_csf, [('out_file', 'in_file')]), - #(erode_white_matter, , [('', '')]), - #(erode_csf, , [('', '')]), + #(erode_white_matter, alignment_white_matter, [('out_file', '')]), + #(erode_csf, alignment_csf, [('out_file', '')]), # Field maps (select_files, brain_extraction_magnitude, [('magnitude', 'in_file')]), @@ -248,9 +248,9 @@ def get_preprocessing(self): (select_files, convert_to_fieldmap, [('phasediff', 'in_phase')]), # High contrast functional volume - (select_files, registration_sbref, [('sbref', 'in_files')]), - (convert_to_fieldmap, registration_sbref, [('', 'fieldmap')]), # ? - (registration_anat, registration_sbref, [('', 'reference')]), + (select_files, registration_sbref, [('sbref', 'in_file')]), + #(convert_to_fieldmap, registration_sbref, [('', 'fieldmap')]), # ? + #(registration_anat, registration_sbref, [('', 'reference')]), # Functional images (select_files, brain_extraction_func, [('func', 'in_file')]), @@ -436,8 +436,10 @@ def get_subject_level_analysis(self): """ Return a Nipype workflow describing the subject level analysis part of the pipeline """ # Infosource Node - To iterate on subjects - info_source = Node(IdentityInterface(), name = 'info_source') - info_source.inputs.fields = ['subject_id', 'run_id'] + info_source = Node(IdentityInterface( + fields = ['subject_id', 'run_id']), + name = 'info_source' + ) info_source.iterables = [('subject_id', self.subject_list)] # Templates to select files node @@ -468,10 +470,10 @@ def get_subject_level_analysis(self): ), name = 'subject_information', ) - subject_infos.inputs.runs = self.run_list + subject_information.inputs.runs = self.run_list # Parameters Node - create files with parameters from subject session data - parameters = Node( + """parameters = Node( Function( function = self.get_parameters_file, input_names = ['event_files', 'runs'], @@ -480,6 +482,7 @@ def get_subject_level_analysis(self): name = 'parameters', ) parameters.inputs.runs = self.run_list + """ # Contrasts node - get contrasts to compute from the model contrasts = Node( diff --git a/narps_open/utils/configuration/testing_config.toml b/narps_open/utils/configuration/testing_config.toml index b1fb28ba..40733c5a 100644 --- a/narps_open/utils/configuration/testing_config.toml +++ b/narps_open/utils/configuration/testing_config.toml @@ -3,9 +3,9 @@ title = "Testing configuration for the NARPS open pipelines project" config_type = "testing" [directories] -dataset = "run/data/ds001734/" +dataset = "data/original/ds001734/" reproduced_results = "run/data/reproduced/" -narps_results = "run/data/results/" +narps_results = "data/results/" test_data = "tests/test_data/" test_runs = "run/" From 0e97a61fa91d84e3b1dc485edbb75b7631e519f9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Wed, 11 Oct 2023 16:09:32 +0200 Subject: [PATCH 009/116] Towards a running version of the pipeline --- narps_open/pipelines/team_08MQ.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/narps_open/pipelines/team_08MQ.py b/narps_open/pipelines/team_08MQ.py index da7deb49..342c120e 100644 --- a/narps_open/pipelines/team_08MQ.py +++ b/narps_open/pipelines/team_08MQ.py @@ -249,8 +249,8 @@ def get_preprocessing(self): # High contrast functional volume (select_files, registration_sbref, [('sbref', 'in_file')]), - #(convert_to_fieldmap, registration_sbref, [('', 'fieldmap')]), # ? - #(registration_anat, registration_sbref, [('', 'reference')]), + (select_files, registration_sbref, [('anat', 'reference')]), + (convert_to_fieldmap, registration_sbref, [('out_fieldmap', 'fieldmap')]), # Functional images (select_files, brain_extraction_func, [('func', 'in_file')]), From d3dcc00b98fc97fd6ec037798a6021bd4150e384 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Fri, 13 Oct 2023 09:58:34 +0200 Subject: [PATCH 010/116] Towards a running version of the pipeline --- narps_open/pipelines/team_08MQ.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/narps_open/pipelines/team_08MQ.py b/narps_open/pipelines/team_08MQ.py index 342c120e..b0ac2562 100644 --- a/narps_open/pipelines/team_08MQ.py +++ b/narps_open/pipelines/team_08MQ.py @@ -10,7 +10,7 @@ from nipype.interfaces.io import SelectFiles, DataSink from nipype.interfaces.fsl import ( FSLCommand, FAST, BET, ErodeImage, PrepareFieldmap, MCFLIRT, SliceTimer, - Threshold, Info, SUSAN, FLIRT, ApplyWarp + Threshold, Info, SUSAN, FLIRT, ApplyWarp, EpiReg ) from nipype.interfaces.ants import Registration @@ -255,8 +255,8 @@ def get_preprocessing(self): # Functional images (select_files, brain_extraction_func, [('func', 'in_file')]), (brain_extraction_func, motion_correction, [('out_file', 'in_file')]), - #(, motion_correction, [('out_file', 'ref_file')]), # high contrast images - #(motion_correction, slice_time_correction, [('out_file', 'in_file')]), + (select_files, motion_correction, [('func', 'ref_file')]), + (motion_correction, slice_time_correction, [('out_file', 'in_file')]), #(, alignment_white_matter, [('', 'in_file')]), #(, alignment_white_matter, [('', 'field_file')]), From 988e53699970f3bc996f9fd0fce2589a0c6d2fe5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Fri, 13 Oct 2023 11:23:13 +0200 Subject: [PATCH 011/116] [TEST] runner test not using 08MQ as example anymore --- tests/test_runner.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_runner.py b/tests/test_runner.py index 12a2059c..761347fc 100644 --- a/tests/test_runner.py +++ b/tests/test_runner.py @@ -204,7 +204,7 @@ def test_create(): # 5 - Modify team id for an existing runner (with a not implemented team id) with raises(NotImplementedError): - runner.team_id = '08MQ' + runner.team_id = '1K0E' @staticmethod @mark.unit_test From ffcdf6eb790118d657b6a29d566cc89d8cc715cd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Fri, 13 Oct 2023 11:50:24 +0200 Subject: [PATCH 012/116] Issue with SelectFiles' base_directory --- narps_open/pipelines/team_08MQ.py | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-) diff --git a/narps_open/pipelines/team_08MQ.py b/narps_open/pipelines/team_08MQ.py index b0ac2562..98918e30 100644 --- a/narps_open/pipelines/team_08MQ.py +++ b/narps_open/pipelines/team_08MQ.py @@ -685,15 +685,10 @@ def get_group_level_analysis_sub_workflow(self, method): self.directories.dataset_dir, 'participants.tsv') } - select_files = Node( - SelectFiles( - templates, - base_directory = self.directories.output_dir, - force_list = True - ), - name = 'select_files', - ) - + select_files = Node(SelectFiles(templates), name = 'select_files') + select_files.inputs.base_directory = self.directories.dataset_dir + select_files.inputs.force_list = True + # Datasink node - to save important files data_sink = Node( DataSink(base_directory = self.directories.output_dir), From 04d835e7f2ec2fb67e807908e451fec1a8ec7113 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Fri, 13 Oct 2023 15:46:17 +0200 Subject: [PATCH 013/116] Towards a running version of the pipeline --- narps_open/pipelines/team_08MQ.py | 64 +++++++++++++++---------------- 1 file changed, 30 insertions(+), 34 deletions(-) diff --git a/narps_open/pipelines/team_08MQ.py b/narps_open/pipelines/team_08MQ.py index 98918e30..510236f0 100644 --- a/narps_open/pipelines/team_08MQ.py +++ b/narps_open/pipelines/team_08MQ.py @@ -77,28 +77,11 @@ def get_preprocessing(self): #segmentation_anat.inputs.number_classes = 1 # ? segmentation_anat.inputs.segments = True # One image per tissue class - # ANTs Node - Registration to T1 MNI152 space - registration_anat = Node(Registration(), name = 'registration_anat') - """[ - 'MNI152_T1_2mm_eye_mask.nii.gz', - 'MNI152_T1_2mm_edges.nii.gz', - 'MNI152_T1_2mm_brain_mask_deweight_eyes.nii.gz', - 'MNI152_T1_2mm_brain_mask_dil1.nii.gz', - 'MNI152_T1_2mm_skull.nii.gz', - 'MNI152_T1_2mm_LR-masked.nii.gz', - 'MNI152_T1_2mm_brain.nii.gz', - 'MNI152_T1_2mm_brain_mask.nii.gz', - 'MNI152_T1_2mm_VentricleMask.nii.gz', - 'MNI152_T1_2mm_strucseg_periph.nii.gz', - 'MNI152_T1_2mm.nii.gz', - 'MNI152_T1_2mm_b0.nii.gz', - 'MNI152_T1_2mm_brain_mask_dil.nii.gz', - 'MNI152_T1_2mm_strucseg.nii.gz' - ] - """ - registration_anat.inputs.fixed_image = Info.standard_image('MNI152_T1_2mm_brain.nii.gz') - registration_anat.inputs.transforms = ['Rigid', 'Affine', 'SyN'] - registration_anat.inputs.metric = ['MI', 'MI', 'CC'] + # ANTs Node - Normalization of anatomical images to T1 MNI152 space + normalization_anat = Node(Registration(), name = 'normalization_anat') + normalization_anat.inputs.fixed_image = Info.standard_image('MNI152_T1_2mm_brain.nii.gz') + normalization_anat.inputs.transforms = ['Rigid', 'Affine', 'SyN'] + normalization_anat.inputs.metric = ['MI', 'MI', 'CC'] # Threshold Node - create white-matter mask threshold_white_matter = Node(Threshold(), name = 'threshold_white_matter') @@ -127,9 +110,9 @@ def get_preprocessing(self): # FLIRT Node - Align high contrast functional images to anatomical # (i.e.: single-band reference images a.k.a. sbref) - registration_sbref = Node(FLIRT(), name = 'registration_sbref') - registration_sbref.inputs.interp = 'trilinear' - registration_sbref.inputs.cost = 'bbr' # boundary-based registration + coregistration_sbref = Node(FLIRT(), name = 'coregistration_sbref') + coregistration_sbref.inputs.interp = 'trilinear' + coregistration_sbref.inputs.cost = 'bbr' # boundary-based registration #out_file #out_matrix_file # fieldmap (a pathlike object or string representing a file) – Fieldmap image in rads/s - must be already registered to the reference image. Maps to a command-line argument: -fieldmap %s. @@ -137,6 +120,9 @@ def get_preprocessing(self): # wmcoords (a pathlike object or string representing a file) – White matter boundary coordinates for BBR cost function. Maps to a command-line argument: -wmcoords %s. # wmnorms (a pathlike object or string representing a file) – White matter boundary normals for BBR cost function. Maps to a command-line argument: -wmnorms %s. + # FLIRT Node - Inverse coregistration wrap, to get anatomical to functional warp + + """ High contrast functional volume: Alignment to anatomical image including distortion correction with field map @@ -181,13 +167,16 @@ def get_preprocessing(self): smoothing.inputs.fwhm = self.fwhm #smoothing.inputs.in_file - # ApplyWarp Node - Alignment of white matter + # ApplyWarp Node - Alignment of white matter to functional space alignment_white_matter = Node(ApplyWarp(), name = 'alignment_white_matter') - alignment_white_matter.inputs.ref_file = Info.standard_image('MNI152_T1_2mm_brain.nii.gz') + #alignment_white_matter.inputs.ref_file = high contrast sbref ? + #field_file - # ApplyWarp Node - Alignment of CSF + # ApplyWarp Node - Alignment of CSF to functional space alignment_csf = Node(ApplyWarp(), name = 'alignment_csf') alignment_csf.inputs.ref_file = Info.standard_image('MNI152_T1_2mm_brain.nii.gz') + #alignment_white_matter.inputs.ref_file = high contrast sbref ? + #field_file # [INFO] The following part has to be modified with nodes of the pipeline """ @@ -233,7 +222,7 @@ def get_preprocessing(self): (select_files, bias_field_correction, [('anat', 'in_files')]), (bias_field_correction, brain_extraction_anat, [('restored_image', 'in_file')]), (brain_extraction_anat, segmentation_anat, [('out_file', 'in_files')]), - (brain_extraction_anat, registration_anat, [('out_file', 'moving_image')]), + (brain_extraction_anat, normalization_anat, [('out_file', 'moving_image')]), (brain_extraction_anat, threshold_white_matter, [('out_file', 'in_file')]), (brain_extraction_anat, threshold_csf, [('out_file', 'in_file')]), (threshold_white_matter, erode_white_matter, [('out_file', 'in_file')]), @@ -248,20 +237,27 @@ def get_preprocessing(self): (select_files, convert_to_fieldmap, [('phasediff', 'in_phase')]), # High contrast functional volume - (select_files, registration_sbref, [('sbref', 'in_file')]), - (select_files, registration_sbref, [('anat', 'reference')]), - (convert_to_fieldmap, registration_sbref, [('out_fieldmap', 'fieldmap')]), + (select_files, coregistration_sbref, [('sbref', 'in_file')]), + (select_files, coregistration_sbref, [('anat', 'reference')]), + (convert_to_fieldmap, coregistration_sbref, [('out_fieldmap', 'fieldmap')]), + + #(coregistration_sbref, , [('out_matrix_file', '')]), # Functional images (select_files, brain_extraction_func, [('func', 'in_file')]), (brain_extraction_func, motion_correction, [('out_file', 'in_file')]), - (select_files, motion_correction, [('func', 'ref_file')]), + (select_files, motion_correction, [('sbref', 'ref_file')]), (motion_correction, slice_time_correction, [('out_file', 'in_file')]), #(, alignment_white_matter, [('', 'in_file')]), #(, alignment_white_matter, [('', 'field_file')]), + #(, alignment_white_matter, [('', 'ref_file')]), #(, alignment_csf, [('', 'in_file')]), - #(, alignment_csf, [('', 'field_file')]) + #(, alignment_csf, [('', 'field_file')]), + #(, alignment_csf, [('', 'ref_file')]), + + # Outputs of preprocessing + (motion_correction, datasink, [('par_file', 'preprocessing.@par_file')]) ]) return preprocessing From 75253094d064bcbbdbec908688c811c036102d78 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Fri, 13 Oct 2023 16:33:23 +0200 Subject: [PATCH 014/116] [TEST] stop using 08MQ in tests --- narps_open/pipelines/team_08MQ.py | 2 +- tests/test_runner.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/narps_open/pipelines/team_08MQ.py b/narps_open/pipelines/team_08MQ.py index 510236f0..4171682e 100644 --- a/narps_open/pipelines/team_08MQ.py +++ b/narps_open/pipelines/team_08MQ.py @@ -257,7 +257,7 @@ def get_preprocessing(self): #(, alignment_csf, [('', 'ref_file')]), # Outputs of preprocessing - (motion_correction, datasink, [('par_file', 'preprocessing.@par_file')]) + (motion_correction, data_sink, [('par_file', 'preprocessing.@par_file')]) ]) return preprocessing diff --git a/tests/test_runner.py b/tests/test_runner.py index 761347fc..bb2a62c3 100644 --- a/tests/test_runner.py +++ b/tests/test_runner.py @@ -195,7 +195,7 @@ def test_create(): # 3 - Instantiate a runner with a not implemented team id with raises(NotImplementedError): - PipelineRunner('08MQ') + PipelineRunner('1K0E') # 4 - Instantiate a runner with an implemented team id runner = PipelineRunner('2T6S') From da82396dcc30e3b0188c3b3c54f0db828bc10af1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Mon, 16 Oct 2023 11:14:55 +0200 Subject: [PATCH 015/116] 08MQ preprocessing instantiable --- narps_open/pipelines/team_08MQ.py | 51 +++++++++++++------------------ 1 file changed, 21 insertions(+), 30 deletions(-) diff --git a/narps_open/pipelines/team_08MQ.py b/narps_open/pipelines/team_08MQ.py index 4171682e..0805daed 100644 --- a/narps_open/pipelines/team_08MQ.py +++ b/narps_open/pipelines/team_08MQ.py @@ -178,26 +178,18 @@ def get_preprocessing(self): #alignment_white_matter.inputs.ref_file = high contrast sbref ? #field_file + # ApplyWarp Node - Alignment of functional data to anatomical space + alignment_func_to_anat = Node(ApplyWarp(), name = 'alignment_func_to_anat') + #alignment_func_to_anat.inputs.ref_file = ? + #alignment_white_matter.inputs.ref_file = high contrast sbref ? + #field_file + + # ApplyWarp Node - Alignment of functional data to MNI space + alignment_func_to_mni = Node(ApplyWarp(), name = 'alignment_func_to_mni') + alignment_func_to_mni.inputs.ref_file = Info.standard_image('MNI152_T1_2mm_brain.nii.gz') + # [INFO] The following part has to be modified with nodes of the pipeline """ - Anatomical: - V Bias correction -> Bias field correction was applied to the anatomical images using FAST. - V Brain extraction -> BET was used for brain extraction for the anatomical, field map, and functional images. A fractional intensity threshold of 0.5 was used for the anatomical and field map images. One of 0.3 was used for the functional data. - V Segmentation -> Structural images were segmented with FAST. Bias correction was done first. - Alignment to MNI template -> - Data were converted to T1 MNI152 space with a 2mm resolution. - Alignment between T1 anatomical images and the T1 MNI template was calculated with ANTs. - T1 images had bias field correction applied prior to alignment. - Rigid (mutual information cost function), affine (mutual information cost function), - and SyN (cross correlation cost function) steps were applied, in that order. - The combined functional-to-anatomical plus distortion correction warps were applied to functional data and then - the anatomical-to-MNI warps applied to that data. - Creation of white matter and CSF masks from segmentation with threshold=1. Erode masks - - Field maps: - V Brain extraction of magnitude image -> BET was used for brain extraction for the anatomical, field map, and functional images. A fractional intensity threshold of 0.5 was used for the anatomical and field map images. One of 0.3 was used for the functional data. - V Conversion of phase and magnitude images to field maps - High contrast functional volume: Alignment to anatomical image including distortion correction with field map Calculation of inverse warp (anatomical to functional) @@ -228,9 +220,6 @@ def get_preprocessing(self): (threshold_white_matter, erode_white_matter, [('out_file', 'in_file')]), (threshold_csf, erode_csf, [('out_file', 'in_file')]), - #(erode_white_matter, alignment_white_matter, [('out_file', '')]), - #(erode_csf, alignment_csf, [('out_file', '')]), - # Field maps (select_files, brain_extraction_magnitude, [('magnitude', 'in_file')]), (brain_extraction_magnitude, convert_to_fieldmap, [('out_file', 'in_magnitude')]), @@ -241,20 +230,22 @@ def get_preprocessing(self): (select_files, coregistration_sbref, [('anat', 'reference')]), (convert_to_fieldmap, coregistration_sbref, [('out_fieldmap', 'fieldmap')]), - #(coregistration_sbref, , [('out_matrix_file', '')]), - # Functional images (select_files, brain_extraction_func, [('func', 'in_file')]), (brain_extraction_func, motion_correction, [('out_file', 'in_file')]), (select_files, motion_correction, [('sbref', 'ref_file')]), (motion_correction, slice_time_correction, [('out_file', 'in_file')]), - - #(, alignment_white_matter, [('', 'in_file')]), - #(, alignment_white_matter, [('', 'field_file')]), - #(, alignment_white_matter, [('', 'ref_file')]), - #(, alignment_csf, [('', 'in_file')]), - #(, alignment_csf, [('', 'field_file')]), - #(, alignment_csf, [('', 'ref_file')]), + (slice_time_correction, alignment_func_to_anat, [('slice_time_corrected_file', 'in_file')]), + (coregistration_sbref, alignment_func_to_anat, [('out_matrix_file', 'premat')]), + (brain_extraction_anat, alignment_func_to_anat, [('out_file', 'ref_file')]), + (alignment_func_to_anat, alignment_func_to_mni, [('out_file', 'in_file')]), + (normalization_anat, alignment_func_to_mni, [('forward_transforms', 'field_file')]), # TODO : will not work ? + (erode_white_matter, alignment_white_matter, [('out_file', 'in_file')]), + #(inverse_warp, alignment_white_matter, [('out_file', 'field_file')]), + (select_files, alignment_white_matter, [('sbref', 'ref_file')]), + (erode_csf, alignment_csf, [('out_file', 'in_file')]), + #(inverse_warp, alignment_csf, [('out_file', 'field_file')]), + (select_files, alignment_csf, [('sbref', 'ref_file')]), # Outputs of preprocessing (motion_correction, data_sink, [('par_file', 'preprocessing.@par_file')]) From 8fd2195ea576d00119c2a845650fab4f39780755 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Tue, 17 Oct 2023 10:23:16 +0200 Subject: [PATCH 016/116] Compute confounds node --- narps_open/pipelines/team_08MQ.py | 43 ++++++++++++++++++++----------- 1 file changed, 28 insertions(+), 15 deletions(-) diff --git a/narps_open/pipelines/team_08MQ.py b/narps_open/pipelines/team_08MQ.py index 0805daed..bbe8a5b1 100644 --- a/narps_open/pipelines/team_08MQ.py +++ b/narps_open/pipelines/team_08MQ.py @@ -6,13 +6,13 @@ from os.path import join from nipype import Node, Workflow # , JoinNode, MapNode -from nipype.interfaces.utility import IdentityInterface, Function +from nipype.interfaces.utility import IdentityInterface, Function, Merge from nipype.interfaces.io import SelectFiles, DataSink from nipype.interfaces.fsl import ( FSLCommand, FAST, BET, ErodeImage, PrepareFieldmap, MCFLIRT, SliceTimer, Threshold, Info, SUSAN, FLIRT, ApplyWarp, EpiReg ) - +from nipype.algorithms.confounds import CompCor from nipype.interfaces.ants import Registration from narps_open.pipelines import Pipeline @@ -150,16 +150,13 @@ def get_preprocessing(self): slice_time_correction.inputs.time_repetition = TaskInformation()['RepetitionTime'] # Slicetimer was used and was applied after motion correction. The middle slice was used as the reference slice. Sinc interpolation was used. """ - custom_order (a pathlike object or string representing an existing file) – Filename of single-column custom interleave order file (first slice is referred to as 1 not 0). Maps to a command-line argument: --ocustom=%s. - custom_timings (a pathlike object or string representing an existing file) – Slice timings, in fractions of TR, range 0:1 (default is 0.5 = no shift). Maps to a command-line argument: --tcustom=%s. - environ (a dictionary with keys which are a bytes or None or a value of class ‘str’ and with values which are a bytes or None or a value of class ‘str’) – Environment variables. (Nipype default value: {}) + custom_order (file) - Filename of single-column custom interleave order file (first slice is referred to as 1 not 0). Maps to a command-line argument: --ocustom=%s. + custom_timings (file) – Slice timings, in fractions of TR, range 0:1 (default is 0.5 = no shift). Maps to a command-line argument: --tcustom=%s. global_shift (a float) – Shift in fraction of TR, range 0:1 (default is 0.5 = no shift). Maps to a command-line argument: --tglobal. index_dir (a boolean) – Slice indexing from top to bottom. Maps to a command-line argument: --down. interleaved (a boolean) – Use interleaved acquisition. Maps to a command-line argument: --odd. out_file (a pathlike object or string representing a file) – Filename of output timeseries. Maps to a command-line argument: --out=%s. - output_type (‘NIFTI’ or ‘NIFTI_PAIR’ or ‘NIFTI_GZ’ or ‘NIFTI_PAIR_GZ’) – FSL output type. slice_direction (1 or 2 or 3) – Direction of slice acquisition (x=1, y=2, z=3) - default is z. Maps to a command-line argument: --direction=%d. - time_repetition (a float) – Specify TR of data - default is 3s. Maps to a command-line argument: --repeat=%f. """ # SUSAN Node - smoothing of functional images smoothing = Node(SUSAN(), name = 'smoothing') @@ -188,6 +185,16 @@ def get_preprocessing(self): alignment_func_to_mni = Node(ApplyWarp(), name = 'alignment_func_to_mni') alignment_func_to_mni.inputs.ref_file = Info.standard_image('MNI152_T1_2mm_brain.nii.gz') + # Merge Node - Merge the two masks (WM and CSF) in one input for the next node + merge_masks = Node(Merge(2), name = 'merge_masks') + + # CompCor Node - Compute anatomical confounds (regressors of no interest in the model) + # from the WM and CSF masks + compute_confounds = Node(CompCor(), name = 'compute_confounds') + compute_confounds.inputs.num_components = 1 # ? + compute_confounds.inputs.pre_filter = 'polynomial' # ? + compute_confounds.inputs.regress_poly_degree = 2 # ? + # [INFO] The following part has to be modified with nodes of the pipeline """ High contrast functional volume: @@ -219,6 +226,14 @@ def get_preprocessing(self): (brain_extraction_anat, threshold_csf, [('out_file', 'in_file')]), (threshold_white_matter, erode_white_matter, [('out_file', 'in_file')]), (threshold_csf, erode_csf, [('out_file', 'in_file')]), + (erode_white_matter, alignment_white_matter, [('out_file', 'in_file')]), + #(inverse_warp, alignment_white_matter, [('out_file', 'field_file')]), + (select_files, alignment_white_matter, [('sbref', 'ref_file')]), + (erode_csf, alignment_csf, [('out_file', 'in_file')]), + #(inverse_warp, alignment_csf, [('out_file', 'field_file')]), + (select_files, alignment_csf, [('sbref', 'ref_file')]), + (alignment_csf, merge_masks, [('out_file', 'in1')]), + (alignment_white_matter, merge_masks, [('out_file', 'in2')]), # Field maps (select_files, brain_extraction_magnitude, [('magnitude', 'in_file')]), @@ -240,15 +255,12 @@ def get_preprocessing(self): (brain_extraction_anat, alignment_func_to_anat, [('out_file', 'ref_file')]), (alignment_func_to_anat, alignment_func_to_mni, [('out_file', 'in_file')]), (normalization_anat, alignment_func_to_mni, [('forward_transforms', 'field_file')]), # TODO : will not work ? - (erode_white_matter, alignment_white_matter, [('out_file', 'in_file')]), - #(inverse_warp, alignment_white_matter, [('out_file', 'field_file')]), - (select_files, alignment_white_matter, [('sbref', 'ref_file')]), - (erode_csf, alignment_csf, [('out_file', 'in_file')]), - #(inverse_warp, alignment_csf, [('out_file', 'field_file')]), - (select_files, alignment_csf, [('sbref', 'ref_file')]), + (merge_masks, compute_confounds, [('out', 'mask_files')]), # Masks are in the func space + (slice_time_correction, compute_confounds, [('slice_time_corrected_file', 'realigned_file')]), # Outputs of preprocessing - (motion_correction, data_sink, [('par_file', 'preprocessing.@par_file')]) + (motion_correction, data_sink, [('par_file', 'preprocessing.@par_file')]), + (compute_confounds, data_sink, [('components_file', 'preprocessing.@components_file')]) ]) return preprocessing @@ -743,7 +755,8 @@ def get_group_level_analysis_sub_workflow(self, method): ] # [INFO] Here we simply return the created workflow - return group_level_analysis + # return group_level_analysis + return None def get_group_level_outputs(self): """ Return a list of the files generated by the group level analysis """ From 9a63118e1b04641d1ce97ad09fe05548b15cbfcc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Tue, 17 Oct 2023 10:55:08 +0200 Subject: [PATCH 017/116] Group analysis workflow bug --- narps_open/pipelines/team_08MQ.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/narps_open/pipelines/team_08MQ.py b/narps_open/pipelines/team_08MQ.py index bbe8a5b1..1ba66c62 100644 --- a/narps_open/pipelines/team_08MQ.py +++ b/narps_open/pipelines/team_08MQ.py @@ -734,7 +734,7 @@ def get_group_level_analysis_sub_workflow(self, method): base_dir = self.directories.working_dir, name = f'group_level_analysis_{method}_nsub_{nb_subjects}' ) - group_level_analysis.connect([ + """group_level_analysis.connect([ (info_source, select_files, [('contrast_id', 'contrast_id')]), (info_source, subgroups_contrasts, [('subject_list', 'subject_ids')]), (select_files, subgroups_contrasts, [ @@ -742,7 +742,7 @@ def get_group_level_analysis_sub_workflow(self, method): ('varcope', 'varcopes'), ('participants', 'participants_file'), ]) - ]) + ])""" # [INFO] Here we define the contrasts used for the group level analysis, depending on the # method used. @@ -755,8 +755,7 @@ def get_group_level_analysis_sub_workflow(self, method): ] # [INFO] Here we simply return the created workflow - # return group_level_analysis - return None + return group_level_analysis def get_group_level_outputs(self): """ Return a list of the files generated by the group level analysis """ From 95fe7024c22c4781d71d7be5462d090e9d4d74ba Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Tue, 17 Oct 2023 11:00:54 +0200 Subject: [PATCH 018/116] Group analysis workflow bug --- narps_open/pipelines/team_08MQ.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/narps_open/pipelines/team_08MQ.py b/narps_open/pipelines/team_08MQ.py index 1ba66c62..d5919a4f 100644 --- a/narps_open/pipelines/team_08MQ.py +++ b/narps_open/pipelines/team_08MQ.py @@ -734,15 +734,15 @@ def get_group_level_analysis_sub_workflow(self, method): base_dir = self.directories.working_dir, name = f'group_level_analysis_{method}_nsub_{nb_subjects}' ) - """group_level_analysis.connect([ - (info_source, select_files, [('contrast_id', 'contrast_id')]), - (info_source, subgroups_contrasts, [('subject_list', 'subject_ids')]), + group_level_analysis.connect([ + (info_source, select_files, [('contrast_id', 'contrast_id')]) + """(info_source, subgroups_contrasts, [('subject_list', 'subject_ids')]), (select_files, subgroups_contrasts, [ ('cope', 'copes'), ('varcope', 'varcopes'), ('participants', 'participants_file'), - ]) - ])""" + ])""" + ]) # [INFO] Here we define the contrasts used for the group level analysis, depending on the # method used. From 1ac7267fbd4e381488048bf4ae66d6ef5f64799d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Tue, 17 Oct 2023 11:24:50 +0200 Subject: [PATCH 019/116] Group analysis workflow bug --- narps_open/pipelines/team_08MQ.py | 80 ++++++++++++++++++++++++++----- 1 file changed, 69 insertions(+), 11 deletions(-) diff --git a/narps_open/pipelines/team_08MQ.py b/narps_open/pipelines/team_08MQ.py index d5919a4f..43dee1bd 100644 --- a/narps_open/pipelines/team_08MQ.py +++ b/narps_open/pipelines/team_08MQ.py @@ -267,7 +267,30 @@ def get_preprocessing(self): def get_preprocessing_outputs(self): """ Return a list of the files generated by the preprocessing """ - return [] + + # Contrat maps + templates = [join( + self.directories.output_dir, + 'l1_analysis', '_subject_id_{subject_id}', f'con_{contrast_id}.nii')\ + for contrast_id in self.contrast_list] + + # SPM.mat file + templates += [join( + self.directories.output_dir, + 'l1_analysis', '_subject_id_{subject_id}', 'SPM.mat')] + + # spmT maps + templates += [join( + self.directories.output_dir, + 'l1_analysis', '_subject_id_{subject_id}', f'spmT_{contrast_id}.nii')\ + for contrast_id in self.contrast_list] + + # Format with subject_ids + return_list = [] + for template in templates: + return_list += [template.format(subject_id = s) for s in self.subject_list] + + return return_list def get_run_level_analysis(self): """ Return a Nipype workflow describing the run level analysis part of the pipeline """ @@ -275,7 +298,7 @@ def get_run_level_analysis(self): def get_run_level_outputs(self): """ Return a list of the files generated by the run level analysis """ - return [] + return ['fake_file'] def get_subject_information(event_file: str): """ @@ -507,7 +530,7 @@ def get_subject_level_analysis(self): def get_subject_level_outputs(self): """ Return a list of the files generated by the subject level analysis """ - return [] + return ['fake_file'] def get_subgroups_contrasts( copes, varcopes, subject_list: list, participants_file: str @@ -642,6 +665,7 @@ def get_group_level_analysis(self): Returns; - a list of nipype.WorkFlow """ + return None methods = ['equalRange', 'equalIndifference', 'groupComp'] return [self.get_group_level_analysis_sub_workflow(method) for method in methods] @@ -736,12 +760,6 @@ def get_group_level_analysis_sub_workflow(self, method): ) group_level_analysis.connect([ (info_source, select_files, [('contrast_id', 'contrast_id')]) - """(info_source, subgroups_contrasts, [('subject_list', 'subject_ids')]), - (select_files, subgroups_contrasts, [ - ('cope', 'copes'), - ('varcope', 'varcopes'), - ('participants', 'participants_file'), - ])""" ]) # [INFO] Here we define the contrasts used for the group level analysis, depending on the @@ -759,8 +777,48 @@ def get_group_level_analysis_sub_workflow(self, method): def get_group_level_outputs(self): """ Return a list of the files generated by the group level analysis """ - return [] + return ['fake_file'] def get_hypotheses_outputs(self): """ Return the names of the files used by the team to answer the hypotheses of NARPS. """ - return [] + + nb_sub = len(self.subject_list) + files = [ + join(f'l3_analysis_equalIndifference_nsub_{nb_sub}', + '_contrast_id_pgain', 'randomise_tfce_corrp_tstat1.nii.gz'), + join(f'l3_analysis_equalIndifference_nsub_{nb_sub}', + '_contrast_id_pgain', 'zstat1.nii.gz'), + join(f'l3_analysis_equalRange_nsub_{nb_sub}', + '_contrast_id_pgain', 'randomise_tfce_corrp_tstat1.nii.gz'), + join(f'l3_analysis_equalRange_nsub_{nb_sub}', + '_contrast_id_pgain', 'zstat1.nii.gz'), + join(f'l3_analysis_equalIndifference_nsub_{nb_sub}', + '_contrast_id_pgain', 'randomise_tfce_corrp_tstat1.nii.gz'), + join(f'l3_analysis_equalIndifference_nsub_{nb_sub}', + '_contrast_id_pgain', 'zstat1.nii.gz'), + join(f'l3_analysis_equalRange_nsub_{nb_sub}', + '_contrast_id_pgain', 'randomise_tfce_corrp_tstat1.nii.gz'), + join(f'l3_analysis_equalRange_nsub_{nb_sub}', + '_contrast_id_pgain', 'zstat1.nii.gz'), + join(f'l3_analysis_equalIndifference_nsub_{nb_sub}', + '_contrast_id_ploss', 'randomise_tfce_corrp_tstat2.nii.gz'), + join(f'l3_analysis_equalIndifference_nsub_{nb_sub}', + '_contrast_id_ploss', 'zstat2.nii.gz'), + join(f'l3_analysis_equalRange_nsub_{nb_sub}', + '_contrast_id_ploss', 'randomise_tfce_corrp_tstat2.nii.gz'), + join(f'l3_analysis_equalRange_nsub_{nb_sub}', + '_contrast_id_ploss', 'zstat2.nii.gz'), + join(f'l3_analysis_equalIndifference_nsub_{nb_sub}', + '_contrast_id_ploss', 'randomise_tfce_corrp_tstat1.nii.gz'), + join(f'l3_analysis_equalIndifference_nsub_{nb_sub}', + '_contrast_id_ploss', 'zstat1.nii.gz'), + join(f'l3_analysis_equalRange_nsub_{nb_sub}', + '_contrast_id_ploss', 'randomise_tfce_corrp_tstat1.nii.gz'), + join(f'l3_analysis_equalRange_nsub_{nb_sub}', + '_contrast_id_ploss', 'zstat1.nii.gz'), + join(f'l3_analysis_groupComp_nsub_{nb_sub}', + '_contrast_id_ploss', 'randomise_tfce_corrp_tstat1.nii.gz'), + join(f'l3_analysis_groupComp_nsub_{nb_sub}', + '_contrast_id_ploss', 'zstat1.nii.gz') + ] + return [join(self.directories.output_dir, f) for f in files] From ecc4977c90737eafde6fc659403aa309904cbe62 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Tue, 17 Oct 2023 17:38:43 +0200 Subject: [PATCH 020/116] Issue with Registration and Threshold --- narps_open/pipelines/team_08MQ.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/narps_open/pipelines/team_08MQ.py b/narps_open/pipelines/team_08MQ.py index 43dee1bd..d9e87d45 100644 --- a/narps_open/pipelines/team_08MQ.py +++ b/narps_open/pipelines/team_08MQ.py @@ -82,6 +82,9 @@ def get_preprocessing(self): normalization_anat.inputs.fixed_image = Info.standard_image('MNI152_T1_2mm_brain.nii.gz') normalization_anat.inputs.transforms = ['Rigid', 'Affine', 'SyN'] normalization_anat.inputs.metric = ['MI', 'MI', 'CC'] + normalization_anat.inputs.metric_weight = [1.0 / 3.0, 1.0 / 3.0, 1.0 / 3.0] + normalization_anat.inputs.shrink_factors = [1, 1, 1] + normalization_anat.inputs.smoothing_sigmas = [0.0, 0.0, 0.0] # Threshold Node - create white-matter mask threshold_white_matter = Node(Threshold(), name = 'threshold_white_matter') @@ -89,7 +92,7 @@ def get_preprocessing(self): # Threshold Node - create CSF mask threshold_csf = Node(Threshold(), name = 'threshold_csf') - threshold_white_matter.inputs.thresh = 1 + threshold_csf.inputs.thresh = 1 # ErodeImage Node - Erode white-matter mask erode_white_matter = Node(ErodeImage(), name = 'erode_white_matter') From d2cd3e4859709b3723d26736ade3db18734aec9b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Tue, 17 Oct 2023 17:41:02 +0200 Subject: [PATCH 021/116] Bug with shrink factors --- narps_open/pipelines/team_08MQ.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/narps_open/pipelines/team_08MQ.py b/narps_open/pipelines/team_08MQ.py index d9e87d45..3d1d36a4 100644 --- a/narps_open/pipelines/team_08MQ.py +++ b/narps_open/pipelines/team_08MQ.py @@ -83,8 +83,12 @@ def get_preprocessing(self): normalization_anat.inputs.transforms = ['Rigid', 'Affine', 'SyN'] normalization_anat.inputs.metric = ['MI', 'MI', 'CC'] normalization_anat.inputs.metric_weight = [1.0 / 3.0, 1.0 / 3.0, 1.0 / 3.0] - normalization_anat.inputs.shrink_factors = [1, 1, 1] - normalization_anat.inputs.smoothing_sigmas = [0.0, 0.0, 0.0] + normalization_anat.inputs.shrink_factors = [[1,1,1], [1,1,1], [1,1,1]] + normalization_anat.inputs.smoothing_sigmas = [ + [0.0, 0.0, 0.0], + [0.0, 0.0, 0.0], + [0.0, 0.0, 0.0] + ] # Threshold Node - create white-matter mask threshold_white_matter = Node(Threshold(), name = 'threshold_white_matter') From 0f186a8f5742a3188e245c5b13ba4b0178d68d30 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Tue, 17 Oct 2023 17:44:51 +0200 Subject: [PATCH 022/116] Bug with shrink factors --- narps_open/pipelines/team_08MQ.py | 1 + 1 file changed, 1 insertion(+) diff --git a/narps_open/pipelines/team_08MQ.py b/narps_open/pipelines/team_08MQ.py index 3d1d36a4..2878bc71 100644 --- a/narps_open/pipelines/team_08MQ.py +++ b/narps_open/pipelines/team_08MQ.py @@ -89,6 +89,7 @@ def get_preprocessing(self): [0.0, 0.0, 0.0], [0.0, 0.0, 0.0] ] + normalization_anat.inputs.number_of_iterations = [10000, 10000, 10000] # Threshold Node - create white-matter mask threshold_white_matter = Node(Threshold(), name = 'threshold_white_matter') From 2c73712ddf6a4a8e3f53053666141be78ba79378 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Tue, 17 Oct 2023 17:45:51 +0200 Subject: [PATCH 023/116] Bug with shrink factors --- narps_open/pipelines/team_08MQ.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/narps_open/pipelines/team_08MQ.py b/narps_open/pipelines/team_08MQ.py index 2878bc71..25095e16 100644 --- a/narps_open/pipelines/team_08MQ.py +++ b/narps_open/pipelines/team_08MQ.py @@ -89,7 +89,11 @@ def get_preprocessing(self): [0.0, 0.0, 0.0], [0.0, 0.0, 0.0] ] - normalization_anat.inputs.number_of_iterations = [10000, 10000, 10000] + normalization_anat.inputs.number_of_iterations = [ + [10000, 10000, 10000], + [10000, 10000, 10000], + [10000, 10000, 10000] + ] # Threshold Node - create white-matter mask threshold_white_matter = Node(Threshold(), name = 'threshold_white_matter') From 8dba3d32b7095115b7be9e9be8190c1309c37574 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Wed, 18 Oct 2023 11:13:50 +0200 Subject: [PATCH 024/116] ANTs registration [skip ci] --- narps_open/pipelines/team_08MQ.py | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/narps_open/pipelines/team_08MQ.py b/narps_open/pipelines/team_08MQ.py index 25095e16..92a0f171 100644 --- a/narps_open/pipelines/team_08MQ.py +++ b/narps_open/pipelines/team_08MQ.py @@ -83,16 +83,20 @@ def get_preprocessing(self): normalization_anat.inputs.transforms = ['Rigid', 'Affine', 'SyN'] normalization_anat.inputs.metric = ['MI', 'MI', 'CC'] normalization_anat.inputs.metric_weight = [1.0 / 3.0, 1.0 / 3.0, 1.0 / 3.0] - normalization_anat.inputs.shrink_factors = [[1,1,1], [1,1,1], [1,1,1]] + normalization_anat.inputs.shrink_factors = [ + [1], + [2,1], + [3,2,1] + ] normalization_anat.inputs.smoothing_sigmas = [ - [0.0, 0.0, 0.0], - [0.0, 0.0, 0.0], - [0.0, 0.0, 0.0] + [0], + [1, 0], + [2, 1, 0] ] normalization_anat.inputs.number_of_iterations = [ - [10000, 10000, 10000], - [10000, 10000, 10000], - [10000, 10000, 10000] + [1500], + [1500, 200], + [100, 50, 30] ] # Threshold Node - create white-matter mask From 7371a95c3c511dfb1ce693d223f77857a92155d8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Wed, 18 Oct 2023 17:34:53 +0200 Subject: [PATCH 025/116] Testing parameters for antsRegistration [skip ci] --- narps_open/pipelines/team_08MQ.py | 36 +++++++++++++++++++------------ 1 file changed, 22 insertions(+), 14 deletions(-) diff --git a/narps_open/pipelines/team_08MQ.py b/narps_open/pipelines/team_08MQ.py index 92a0f171..ad12e610 100644 --- a/narps_open/pipelines/team_08MQ.py +++ b/narps_open/pipelines/team_08MQ.py @@ -78,26 +78,34 @@ def get_preprocessing(self): segmentation_anat.inputs.segments = True # One image per tissue class # ANTs Node - Normalization of anatomical images to T1 MNI152 space + # https://github.com/ANTsX/ANTs/wiki/Anatomy-of-an-antsRegistration-call normalization_anat = Node(Registration(), name = 'normalization_anat') normalization_anat.inputs.fixed_image = Info.standard_image('MNI152_T1_2mm_brain.nii.gz') + normalization_anat.inputs.collapse_output_transforms = True + normalization_anat.inputs.convergence_threshold = [1e-06] + normalization_anat.inputs.convergence_window_size = [10] + normalization_anat.inputs.dimension = 3 + normalization_anat.inputs.initial_moving_transform_com = True + normalization_anat.inputs.radius_or_number_of_bins = [32, 32, 4] + normalization_anat.inputs.sampling_percentage = [0.25, 0.25, 1] + normalization_anat.inputs.sampling_strategy = ['Regular', 'Regular', 'None'] + normalization_anat.inputs.transforms = ['Rigid', 'Affine', 'SyN'] normalization_anat.inputs.metric = ['MI', 'MI', 'CC'] - normalization_anat.inputs.metric_weight = [1.0 / 3.0, 1.0 / 3.0, 1.0 / 3.0] - normalization_anat.inputs.shrink_factors = [ - [1], - [2,1], - [3,2,1] - ] - normalization_anat.inputs.smoothing_sigmas = [ - [0], - [1, 0], - [2, 1, 0] - ] + normalization_anat.inputs.transform_parameters = [(0.1,), (0.1,), (0.1, 3.0, 0.0)] + normalization_anat.inputs.metric_weight = [1.0]*3 + normalization_anat.inputs.shrink_factors = [[8, 4, 2, 1]]*3 + normalization_anat.inputs.smoothing_sigmas = [[3, 2, 1, 0]]*3 + normalization_anat.inputs.sigma_units = ['vox']*3 normalization_anat.inputs.number_of_iterations = [ - [1500], - [1500, 200], - [100, 50, 30] + [1000, 500, 250, 100], + [1000, 500, 250, 100], + [100, 70, 50, 20] ] + normalization_anat.inputs.use_histogram_matching = True + normalization_anat.inputs.winsorize_lower_quantile = 0.005 + normalization_anat.inputs.winsorize_upper_quantile = 0.995 + normalization_anat.inputs.write_composite_transform = True # Threshold Node - create white-matter mask threshold_white_matter = Node(Threshold(), name = 'threshold_white_matter') From 82c4f0a5dacd7042f86176dd8645b5bf11853a61 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Fri, 20 Oct 2023 11:32:37 +0200 Subject: [PATCH 026/116] Inverse coregistration transform [skip ci] --- narps_open/pipelines/team_08MQ.py | 55 ++++++++++--------------------- 1 file changed, 18 insertions(+), 37 deletions(-) diff --git a/narps_open/pipelines/team_08MQ.py b/narps_open/pipelines/team_08MQ.py index ad12e610..be8e27e5 100644 --- a/narps_open/pipelines/team_08MQ.py +++ b/narps_open/pipelines/team_08MQ.py @@ -10,7 +10,7 @@ from nipype.interfaces.io import SelectFiles, DataSink from nipype.interfaces.fsl import ( FSLCommand, FAST, BET, ErodeImage, PrepareFieldmap, MCFLIRT, SliceTimer, - Threshold, Info, SUSAN, FLIRT, ApplyWarp, EpiReg + Threshold, Info, SUSAN, FLIRT, ApplyWarp, EpiReg, ApplyXFM, ConvertXFM ) from nipype.algorithms.confounds import CompCor from nipype.interfaces.ants import Registration @@ -137,25 +137,10 @@ def get_preprocessing(self): coregistration_sbref = Node(FLIRT(), name = 'coregistration_sbref') coregistration_sbref.inputs.interp = 'trilinear' coregistration_sbref.inputs.cost = 'bbr' # boundary-based registration - #out_file - #out_matrix_file - # fieldmap (a pathlike object or string representing a file) – Fieldmap image in rads/s - must be already registered to the reference image. Maps to a command-line argument: -fieldmap %s. - # wm_seg (a pathlike object or string representing a file) – White matter segmentation volume needed by BBR cost function. Maps to a command-line argument: -wmseg %s. - # wmcoords (a pathlike object or string representing a file) – White matter boundary coordinates for BBR cost function. Maps to a command-line argument: -wmcoords %s. - # wmnorms (a pathlike object or string representing a file) – White matter boundary normals for BBR cost function. Maps to a command-line argument: -wmnorms %s. - # FLIRT Node - Inverse coregistration wrap, to get anatomical to functional warp - - - """ - High contrast functional volume: - Alignment to anatomical image including distortion correction with field map - Calculation of inverse warp (anatomical to functional) - """ - # FLIRT was used to align the high contrast functional image to anatomical. - # The calculated transforms were then applied to the 4d functional images - # (which were aligned with the high contrast image in the motion correction step). - # A boundary-based registration cost function was used with trilinear interpolation. + # ConvertXFM Node - Inverse coregistration transform, to get anat to func transform + inverse_func_to_anat = Node(ConvertXFM(), name = 'inverse_func_to_anat') + inverse_func_to_anat.inputs.invert_xfm = True # BET Node - Brain extraction for functional images brain_extraction_func = Node(BET(), name = 'brain_extraction_func') @@ -188,22 +173,17 @@ def get_preprocessing(self): smoothing.inputs.fwhm = self.fwhm #smoothing.inputs.in_file - # ApplyWarp Node - Alignment of white matter to functional space - alignment_white_matter = Node(ApplyWarp(), name = 'alignment_white_matter') - #alignment_white_matter.inputs.ref_file = high contrast sbref ? - #field_file + # ApplyXFM Node - Alignment of white matter to functional space + alignment_white_matter = Node(ApplyXFM(), name = 'alignment_white_matter') + alignment_white_matter.inputs.apply_xfm = True - # ApplyWarp Node - Alignment of CSF to functional space - alignment_csf = Node(ApplyWarp(), name = 'alignment_csf') - alignment_csf.inputs.ref_file = Info.standard_image('MNI152_T1_2mm_brain.nii.gz') - #alignment_white_matter.inputs.ref_file = high contrast sbref ? - #field_file + # ApplyXFM Node - Alignment of CSF to functional space + alignment_csf = Node(ApplyXFM(), name = 'alignment_csf') + alignment_csf.inputs.apply_xfm = True # ApplyWarp Node - Alignment of functional data to anatomical space - alignment_func_to_anat = Node(ApplyWarp(), name = 'alignment_func_to_anat') - #alignment_func_to_anat.inputs.ref_file = ? - #alignment_white_matter.inputs.ref_file = high contrast sbref ? - #field_file + alignment_func_to_anat = Node(ApplyXFM(), name = 'alignment_func_to_anat') + alignment_func_to_anat.inputs.apply_xfm = True # ApplyWarp Node - Alignment of functional data to MNI space alignment_func_to_mni = Node(ApplyWarp(), name = 'alignment_func_to_mni') @@ -251,11 +231,11 @@ def get_preprocessing(self): (threshold_white_matter, erode_white_matter, [('out_file', 'in_file')]), (threshold_csf, erode_csf, [('out_file', 'in_file')]), (erode_white_matter, alignment_white_matter, [('out_file', 'in_file')]), - #(inverse_warp, alignment_white_matter, [('out_file', 'field_file')]), - (select_files, alignment_white_matter, [('sbref', 'ref_file')]), + (inverse_func_to_anat, alignment_white_matter, [('out_file', 'in_matrix_file')]), + (select_files, alignment_white_matter, [('sbref', 'reference')]), (erode_csf, alignment_csf, [('out_file', 'in_file')]), - #(inverse_warp, alignment_csf, [('out_file', 'field_file')]), - (select_files, alignment_csf, [('sbref', 'ref_file')]), + (inverse_func_to_anat, alignment_csf, [('out_file', 'in_matrix_file')]), + (select_files, alignment_csf, [('sbref', 'reference')]), (alignment_csf, merge_masks, [('out_file', 'in1')]), (alignment_white_matter, merge_masks, [('out_file', 'in2')]), @@ -268,6 +248,7 @@ def get_preprocessing(self): (select_files, coregistration_sbref, [('sbref', 'in_file')]), (select_files, coregistration_sbref, [('anat', 'reference')]), (convert_to_fieldmap, coregistration_sbref, [('out_fieldmap', 'fieldmap')]), + (coregistration_sbref, inverse_func_to_anat, [('out_matrix_file', 'in_file')]), # Functional images (select_files, brain_extraction_func, [('func', 'in_file')]), @@ -275,7 +256,7 @@ def get_preprocessing(self): (select_files, motion_correction, [('sbref', 'ref_file')]), (motion_correction, slice_time_correction, [('out_file', 'in_file')]), (slice_time_correction, alignment_func_to_anat, [('slice_time_corrected_file', 'in_file')]), - (coregistration_sbref, alignment_func_to_anat, [('out_matrix_file', 'premat')]), + (coregistration_sbref, alignment_func_to_anat, [('out_matrix_file', 'in_matrix_file')]), (brain_extraction_anat, alignment_func_to_anat, [('out_file', 'ref_file')]), (alignment_func_to_anat, alignment_func_to_mni, [('out_file', 'in_file')]), (normalization_anat, alignment_func_to_mni, [('forward_transforms', 'field_file')]), # TODO : will not work ? From 6495767e3ab8cccf42511145de1cc216b08858d4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Fri, 20 Oct 2023 11:35:38 +0200 Subject: [PATCH 027/116] Inverse coregistration transform [skip ci] --- narps_open/pipelines/team_08MQ.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/narps_open/pipelines/team_08MQ.py b/narps_open/pipelines/team_08MQ.py index be8e27e5..df39d773 100644 --- a/narps_open/pipelines/team_08MQ.py +++ b/narps_open/pipelines/team_08MQ.py @@ -257,7 +257,7 @@ def get_preprocessing(self): (motion_correction, slice_time_correction, [('out_file', 'in_file')]), (slice_time_correction, alignment_func_to_anat, [('slice_time_corrected_file', 'in_file')]), (coregistration_sbref, alignment_func_to_anat, [('out_matrix_file', 'in_matrix_file')]), - (brain_extraction_anat, alignment_func_to_anat, [('out_file', 'ref_file')]), + (brain_extraction_anat, alignment_func_to_anat, [('out_file', 'reference')]), (alignment_func_to_anat, alignment_func_to_mni, [('out_file', 'in_file')]), (normalization_anat, alignment_func_to_mni, [('forward_transforms', 'field_file')]), # TODO : will not work ? (merge_masks, compute_confounds, [('out', 'mask_files')]), # Masks are in the func space From 818de15fd66d4961498c31e1f6c30d5af3e6d7b9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Fri, 20 Oct 2023 11:42:38 +0200 Subject: [PATCH 028/116] Compute confounds parameterization [skip ci] --- narps_open/pipelines/team_08MQ.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/narps_open/pipelines/team_08MQ.py b/narps_open/pipelines/team_08MQ.py index df39d773..f246612c 100644 --- a/narps_open/pipelines/team_08MQ.py +++ b/narps_open/pipelines/team_08MQ.py @@ -195,9 +195,9 @@ def get_preprocessing(self): # CompCor Node - Compute anatomical confounds (regressors of no interest in the model) # from the WM and CSF masks compute_confounds = Node(CompCor(), name = 'compute_confounds') - compute_confounds.inputs.num_components = 1 # ? - compute_confounds.inputs.pre_filter = 'polynomial' # ? - compute_confounds.inputs.regress_poly_degree = 2 # ? + compute_confounds.inputs.num_components = 4 + compute_confounds.inputs.merge_method = 'union' + compute_confounds.inputs.repetition_time = TaskInformation()['RepetitionTime'] # [INFO] The following part has to be modified with nodes of the pipeline """ @@ -259,7 +259,7 @@ def get_preprocessing(self): (coregistration_sbref, alignment_func_to_anat, [('out_matrix_file', 'in_matrix_file')]), (brain_extraction_anat, alignment_func_to_anat, [('out_file', 'reference')]), (alignment_func_to_anat, alignment_func_to_mni, [('out_file', 'in_file')]), - (normalization_anat, alignment_func_to_mni, [('forward_transforms', 'field_file')]), # TODO : will not work ? + (normalization_anat, alignment_func_to_mni, [('forward_transforms', 'field_file')]), (merge_masks, compute_confounds, [('out', 'mask_files')]), # Masks are in the func space (slice_time_correction, compute_confounds, [('slice_time_corrected_file', 'realigned_file')]), From 735d2f7a915e5418f0b4affbc1ecd5ca230d126b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Fri, 20 Oct 2023 12:15:21 +0200 Subject: [PATCH 029/116] ANTs composite transform [skip ci] --- narps_open/pipelines/team_08MQ.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/narps_open/pipelines/team_08MQ.py b/narps_open/pipelines/team_08MQ.py index f246612c..f84fbb24 100644 --- a/narps_open/pipelines/team_08MQ.py +++ b/narps_open/pipelines/team_08MQ.py @@ -89,7 +89,6 @@ def get_preprocessing(self): normalization_anat.inputs.radius_or_number_of_bins = [32, 32, 4] normalization_anat.inputs.sampling_percentage = [0.25, 0.25, 1] normalization_anat.inputs.sampling_strategy = ['Regular', 'Regular', 'None'] - normalization_anat.inputs.transforms = ['Rigid', 'Affine', 'SyN'] normalization_anat.inputs.metric = ['MI', 'MI', 'CC'] normalization_anat.inputs.transform_parameters = [(0.1,), (0.1,), (0.1, 3.0, 0.0)] @@ -259,7 +258,7 @@ def get_preprocessing(self): (coregistration_sbref, alignment_func_to_anat, [('out_matrix_file', 'in_matrix_file')]), (brain_extraction_anat, alignment_func_to_anat, [('out_file', 'reference')]), (alignment_func_to_anat, alignment_func_to_mni, [('out_file', 'in_file')]), - (normalization_anat, alignment_func_to_mni, [('forward_transforms', 'field_file')]), + (normalization_anat, alignment_func_to_mni, [('composite_transform', 'field_file')]), (merge_masks, compute_confounds, [('out', 'mask_files')]), # Masks are in the func space (slice_time_correction, compute_confounds, [('slice_time_corrected_file', 'realigned_file')]), From a1eb02036620216a40ae7bdf95787599e25ed978 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Mon, 23 Oct 2023 09:10:38 +0200 Subject: [PATCH 030/116] Trying ANTs' ApplyTransforms --- narps_open/pipelines/team_08MQ.py | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/narps_open/pipelines/team_08MQ.py b/narps_open/pipelines/team_08MQ.py index f84fbb24..dc5c4aee 100644 --- a/narps_open/pipelines/team_08MQ.py +++ b/narps_open/pipelines/team_08MQ.py @@ -13,7 +13,7 @@ Threshold, Info, SUSAN, FLIRT, ApplyWarp, EpiReg, ApplyXFM, ConvertXFM ) from nipype.algorithms.confounds import CompCor -from nipype.interfaces.ants import Registration +from nipype.interfaces.ants import Registration, ApplyTransforms from narps_open.pipelines import Pipeline from narps_open.data.task import TaskInformation @@ -104,7 +104,7 @@ def get_preprocessing(self): normalization_anat.inputs.use_histogram_matching = True normalization_anat.inputs.winsorize_lower_quantile = 0.005 normalization_anat.inputs.winsorize_upper_quantile = 0.995 - normalization_anat.inputs.write_composite_transform = True + #normalization_anat.inputs.write_composite_transform = True # Threshold Node - create white-matter mask threshold_white_matter = Node(Threshold(), name = 'threshold_white_matter') @@ -185,8 +185,9 @@ def get_preprocessing(self): alignment_func_to_anat.inputs.apply_xfm = True # ApplyWarp Node - Alignment of functional data to MNI space - alignment_func_to_mni = Node(ApplyWarp(), name = 'alignment_func_to_mni') - alignment_func_to_mni.inputs.ref_file = Info.standard_image('MNI152_T1_2mm_brain.nii.gz') + alignment_func_to_mni = Node(ApplyTransforms(), name = 'alignment_func_to_mni') + alignment_func_to_mni.inputs.reference_image = \ + Info.standard_image('MNI152_T1_2mm_brain.nii.gz') # Merge Node - Merge the two masks (WM and CSF) in one input for the next node merge_masks = Node(Merge(2), name = 'merge_masks') @@ -257,8 +258,8 @@ def get_preprocessing(self): (slice_time_correction, alignment_func_to_anat, [('slice_time_corrected_file', 'in_file')]), (coregistration_sbref, alignment_func_to_anat, [('out_matrix_file', 'in_matrix_file')]), (brain_extraction_anat, alignment_func_to_anat, [('out_file', 'reference')]), - (alignment_func_to_anat, alignment_func_to_mni, [('out_file', 'in_file')]), - (normalization_anat, alignment_func_to_mni, [('composite_transform', 'field_file')]), + (alignment_func_to_anat, alignment_func_to_mni, [('out_file', 'input_image')]), + (normalization_anat, alignment_func_to_mni, [('forward_transforms', 'transforms')]), (merge_masks, compute_confounds, [('out', 'mask_files')]), # Masks are in the func space (slice_time_correction, compute_confounds, [('slice_time_corrected_file', 'realigned_file')]), From 3f61b5d9cb60a0a5d518f46d1ad4643c282ecc44 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Mon, 23 Oct 2023 10:21:50 +0200 Subject: [PATCH 031/116] Preprocessing outputs [skip ci] --- narps_open/pipelines/team_08MQ.py | 43 +++++++++++++------------------ 1 file changed, 18 insertions(+), 25 deletions(-) diff --git a/narps_open/pipelines/team_08MQ.py b/narps_open/pipelines/team_08MQ.py index dc5c4aee..ce40f3d8 100644 --- a/narps_open/pipelines/team_08MQ.py +++ b/narps_open/pipelines/team_08MQ.py @@ -38,8 +38,8 @@ def get_preprocessing(self): fields = ['subject_id', 'run_id']), name='info_source') info_source.iterables = [ - ('subject_id', self.subject_list), ('run_id', self.run_list), + ('subject_id', self.subject_list), ] # SelectFiles node - to select necessary files @@ -151,7 +151,7 @@ def get_preprocessing(self): motion_correction = Node(MCFLIRT(), name = 'motion_correction') motion_correction.inputs.cost = 'normcorr' motion_correction.inputs.interpolation = 'spline' # should be 'trilinear' - # single volume, high contrast image was used as the reference scan + motion_correction.inputs.save_rms = True # Save rms displacement parameters # SliceTimer Node - Slice time correction slice_time_correction = Node(SliceTimer(), name = 'slice_time_correction') @@ -264,8 +264,9 @@ def get_preprocessing(self): (slice_time_correction, compute_confounds, [('slice_time_corrected_file', 'realigned_file')]), # Outputs of preprocessing - (motion_correction, data_sink, [('par_file', 'preprocessing.@par_file')]), - (compute_confounds, data_sink, [('components_file', 'preprocessing.@components_file')]) + (motion_correction, data_sink, [('rms_file', 'preprocessing.@rms_file')]), + (compute_confounds, data_sink, [('components_file', 'preprocessing.@components_file')]), + (alignment_func_to_mni, data_sink, [('output_image', 'preprocessing.@output_image')]) ]) return preprocessing @@ -273,29 +274,21 @@ def get_preprocessing(self): def get_preprocessing_outputs(self): """ Return a list of the files generated by the preprocessing """ - # Contrat maps - templates = [join( - self.directories.output_dir, - 'l1_analysis', '_subject_id_{subject_id}', f'con_{contrast_id}.nii')\ - for contrast_id in self.contrast_list] - - # SPM.mat file - templates += [join( - self.directories.output_dir, - 'l1_analysis', '_subject_id_{subject_id}', 'SPM.mat')] - - # spmT maps - templates += [join( + parameters = { + 'subject_id': self.subject_list, + 'run_id': self.run_list, + 'file': ['components_file.txt'] + } + parameter_sets = product(*parameters.values()) + template = join( self.directories.output_dir, - 'l1_analysis', '_subject_id_{subject_id}', f'spmT_{contrast_id}.nii')\ - for contrast_id in self.contrast_list] - - # Format with subject_ids - return_list = [] - for template in templates: - return_list += [template.format(subject_id = s) for s in self.subject_list] + 'preprocessing', + '_subject_id_{subject_id}_run_id_{run_id}', + '{file}' + ) - return return_list + return_list = [template.format(**dict(zip(parameters.keys(), parameter_values)))\ + for parameter_values in parameter_sets] def get_run_level_analysis(self): """ Return a Nipype workflow describing the run level analysis part of the pipeline """ From 09111a5671ebbfe94b85b7715e5384ab6aa5c04a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Mon, 23 Oct 2023 10:24:38 +0200 Subject: [PATCH 032/116] Preprocessing outputs [skip ci] --- narps_open/pipelines/team_08MQ.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/narps_open/pipelines/team_08MQ.py b/narps_open/pipelines/team_08MQ.py index ce40f3d8..fda019e8 100644 --- a/narps_open/pipelines/team_08MQ.py +++ b/narps_open/pipelines/team_08MQ.py @@ -264,7 +264,7 @@ def get_preprocessing(self): (slice_time_correction, compute_confounds, [('slice_time_corrected_file', 'realigned_file')]), # Outputs of preprocessing - (motion_correction, data_sink, [('rms_file', 'preprocessing.@rms_file')]), + (motion_correction, data_sink, [('rms_files', 'preprocessing.@rms_files')]), (compute_confounds, data_sink, [('components_file', 'preprocessing.@components_file')]), (alignment_func_to_mni, data_sink, [('output_image', 'preprocessing.@output_image')]) ]) From cde9f3798b6a63540df1c44c620af7f13c870206 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Mon, 23 Oct 2023 10:52:27 +0200 Subject: [PATCH 033/116] Add smoothing [skip ci] --- narps_open/pipelines/team_08MQ.py | 37 +++++-------------------------- 1 file changed, 5 insertions(+), 32 deletions(-) diff --git a/narps_open/pipelines/team_08MQ.py b/narps_open/pipelines/team_08MQ.py index fda019e8..6460b0f1 100644 --- a/narps_open/pipelines/team_08MQ.py +++ b/narps_open/pipelines/team_08MQ.py @@ -36,7 +36,7 @@ def get_preprocessing(self): # IdentityInterface node - allows to iterate over subjects and runs info_source = Node(IdentityInterface( fields = ['subject_id', 'run_id']), - name='info_source') + name = 'info_source') info_source.iterables = [ ('run_id', self.run_list), ('subject_id', self.subject_list), @@ -104,7 +104,6 @@ def get_preprocessing(self): normalization_anat.inputs.use_histogram_matching = True normalization_anat.inputs.winsorize_lower_quantile = 0.005 normalization_anat.inputs.winsorize_upper_quantile = 0.995 - #normalization_anat.inputs.write_composite_transform = True # Threshold Node - create white-matter mask threshold_white_matter = Node(Threshold(), name = 'threshold_white_matter') @@ -156,21 +155,11 @@ def get_preprocessing(self): # SliceTimer Node - Slice time correction slice_time_correction = Node(SliceTimer(), name = 'slice_time_correction') slice_time_correction.inputs.time_repetition = TaskInformation()['RepetitionTime'] - # Slicetimer was used and was applied after motion correction. The middle slice was used as the reference slice. Sinc interpolation was used. - """ - custom_order (file) - Filename of single-column custom interleave order file (first slice is referred to as 1 not 0). Maps to a command-line argument: --ocustom=%s. - custom_timings (file) – Slice timings, in fractions of TR, range 0:1 (default is 0.5 = no shift). Maps to a command-line argument: --tcustom=%s. - global_shift (a float) – Shift in fraction of TR, range 0:1 (default is 0.5 = no shift). Maps to a command-line argument: --tglobal. - index_dir (a boolean) – Slice indexing from top to bottom. Maps to a command-line argument: --down. - interleaved (a boolean) – Use interleaved acquisition. Maps to a command-line argument: --odd. - out_file (a pathlike object or string representing a file) – Filename of output timeseries. Maps to a command-line argument: --out=%s. - slice_direction (1 or 2 or 3) – Direction of slice acquisition (x=1, y=2, z=3) - default is z. Maps to a command-line argument: --direction=%d. - """ + # SUSAN Node - smoothing of functional images smoothing = Node(SUSAN(), name = 'smoothing') - #smoothing.inputs.brightness_threshold = # ? + smoothing.inputs.brightness_threshold = 2000.0 # ? smoothing.inputs.fwhm = self.fwhm - #smoothing.inputs.in_file # ApplyXFM Node - Alignment of white matter to functional space alignment_white_matter = Node(ApplyXFM(), name = 'alignment_white_matter') @@ -199,23 +188,6 @@ def get_preprocessing(self): compute_confounds.inputs.merge_method = 'union' compute_confounds.inputs.repetition_time = TaskInformation()['RepetitionTime'] - # [INFO] The following part has to be modified with nodes of the pipeline - """ - High contrast functional volume: - Alignment to anatomical image including distortion correction with field map - Calculation of inverse warp (anatomical to functional) - - Functional: - V Brain extraction -> BET was used for brain extraction for the anatomical, field map, and functional images. A fractional intensity threshold of 0.5 was used for the anatomical and field map images. One of 0.3 was used for the functional data. - V Motion correction with high contrast image as reference -> MCFLIRT was used for motion correction. - The single volume, high contrast image was used as the reference scan. - Normalised correlation was used as the image similarity metric with trilinear interpolation. - Slice time correction -> Slicetimer was used and was applied after motion correction. - The middle slice was used as the reference slice. Sinc interpolation was used. - Alignment of white matter and CSF masks to functional space with previously calculated warps - Calculate aCompCor components - """ - preprocessing = Workflow(base_dir = self.directories.working_dir, name = 'preprocessing') preprocessing.connect([ # Inputs @@ -255,7 +227,8 @@ def get_preprocessing(self): (brain_extraction_func, motion_correction, [('out_file', 'in_file')]), (select_files, motion_correction, [('sbref', 'ref_file')]), (motion_correction, slice_time_correction, [('out_file', 'in_file')]), - (slice_time_correction, alignment_func_to_anat, [('slice_time_corrected_file', 'in_file')]), + (slice_time_correction, smoothing, [('slice_time_corrected_file', 'in_file')]), + (smoothing, alignment_func_to_anat, [('smoothed_file', 'in_file')]), (coregistration_sbref, alignment_func_to_anat, [('out_matrix_file', 'in_matrix_file')]), (brain_extraction_anat, alignment_func_to_anat, [('out_file', 'reference')]), (alignment_func_to_anat, alignment_func_to_mni, [('out_file', 'input_image')]), From 098de9f5f3d44d100cda360cce70f400bdc9b67a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Mon, 23 Oct 2023 11:22:48 +0200 Subject: [PATCH 034/116] Motion correction outputs [skip ci] --- narps_open/pipelines/team_08MQ.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/narps_open/pipelines/team_08MQ.py b/narps_open/pipelines/team_08MQ.py index 6460b0f1..eee2ab44 100644 --- a/narps_open/pipelines/team_08MQ.py +++ b/narps_open/pipelines/team_08MQ.py @@ -150,7 +150,7 @@ def get_preprocessing(self): motion_correction = Node(MCFLIRT(), name = 'motion_correction') motion_correction.inputs.cost = 'normcorr' motion_correction.inputs.interpolation = 'spline' # should be 'trilinear' - motion_correction.inputs.save_rms = True # Save rms displacement parameters + motion_correction.inputs.save_plots = True # Save transformation parameters # SliceTimer Node - Slice time correction slice_time_correction = Node(SliceTimer(), name = 'slice_time_correction') @@ -237,7 +237,7 @@ def get_preprocessing(self): (slice_time_correction, compute_confounds, [('slice_time_corrected_file', 'realigned_file')]), # Outputs of preprocessing - (motion_correction, data_sink, [('rms_files', 'preprocessing.@rms_files')]), + (motion_correction, data_sink, [('par_file', 'preprocessing.@par_file')]), (compute_confounds, data_sink, [('components_file', 'preprocessing.@components_file')]), (alignment_func_to_mni, data_sink, [('output_image', 'preprocessing.@output_image')]) ]) From a335fd6c96e72df45b018e0dfc2cb788ffef44c0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Mon, 23 Oct 2023 14:11:44 +0200 Subject: [PATCH 035/116] Parse segmentation maps [skip ci] --- narps_open/pipelines/team_08MQ.py | 131 ++++++++++++++++++++++++++++-- 1 file changed, 124 insertions(+), 7 deletions(-) diff --git a/narps_open/pipelines/team_08MQ.py b/narps_open/pipelines/team_08MQ.py index eee2ab44..7c2e2221 100644 --- a/narps_open/pipelines/team_08MQ.py +++ b/narps_open/pipelines/team_08MQ.py @@ -6,7 +6,7 @@ from os.path import join from nipype import Node, Workflow # , JoinNode, MapNode -from nipype.interfaces.utility import IdentityInterface, Function, Merge +from nipype.interfaces.utility import IdentityInterface, Function, Merge, Split from nipype.interfaces.io import SelectFiles, DataSink from nipype.interfaces.fsl import ( FSLCommand, FAST, BET, ErodeImage, PrepareFieldmap, MCFLIRT, SliceTimer, @@ -69,14 +69,17 @@ def get_preprocessing(self): # BET Node - Brain extraction for anatomical images brain_extraction_anat = Node(BET(), name = 'brain_extraction_anat') brain_extraction_anat.inputs.frac = 0.5 - #brain_extraction_anat.inputs.mask = True # ? + #brain_extraction_anat.inputs.mask = True # TODO ? # FAST Node - Segmentation of anatomical images segmentation_anat = Node(FAST(), name = 'segmentation_anat') segmentation_anat.inputs.no_bias = True # Bias field was already removed - #segmentation_anat.inputs.number_classes = 1 # ? segmentation_anat.inputs.segments = True # One image per tissue class + # Split Node - Split probability maps as they output from the segmentation node + split_segmentation_maps = Node(Split(), name = 'split_segmentation_maps') + split_segmentation_maps.inputs.splits = [1, 1, 1] + # ANTs Node - Normalization of anatomical images to T1 MNI152 space # https://github.com/ANTsX/ANTs/wiki/Anatomy-of-an-antsRegistration-call normalization_anat = Node(Registration(), name = 'normalization_anat') @@ -143,7 +146,7 @@ def get_preprocessing(self): # BET Node - Brain extraction for functional images brain_extraction_func = Node(BET(), name = 'brain_extraction_func') brain_extraction_func.inputs.frac = 0.3 - brain_extraction_func.inputs.mask = True # ? + brain_extraction_func.inputs.mask = True brain_extraction_func.inputs.functional = True # MCFLIRT Node - Motion correction of functional images @@ -158,7 +161,7 @@ def get_preprocessing(self): # SUSAN Node - smoothing of functional images smoothing = Node(SUSAN(), name = 'smoothing') - smoothing.inputs.brightness_threshold = 2000.0 # ? + smoothing.inputs.brightness_threshold = 2000.0 # TODO : which value ? smoothing.inputs.fwhm = self.fwhm # ApplyXFM Node - Alignment of white matter to functional space @@ -198,8 +201,9 @@ def get_preprocessing(self): (bias_field_correction, brain_extraction_anat, [('restored_image', 'in_file')]), (brain_extraction_anat, segmentation_anat, [('out_file', 'in_files')]), (brain_extraction_anat, normalization_anat, [('out_file', 'moving_image')]), - (brain_extraction_anat, threshold_white_matter, [('out_file', 'in_file')]), - (brain_extraction_anat, threshold_csf, [('out_file', 'in_file')]), + (segmentation_anat, split_segmentation_maps, [('probability_maps', 'inlist')]), + (split_segmentation_maps, threshold_white_matter, [('out2', 'in_file')]), + (split_segmentation_maps, threshold_csf, [('out1', 'in_file')]), (threshold_white_matter, erode_white_matter, [('out_file', 'in_file')]), (threshold_csf, erode_csf, [('out_file', 'in_file')]), (erode_white_matter, alignment_white_matter, [('out_file', 'in_file')]), @@ -263,6 +267,119 @@ def get_preprocessing_outputs(self): return_list = [template.format(**dict(zip(parameters.keys(), parameter_values)))\ for parameter_values in parameter_sets] + def get_session_information(event_file): + """ + Extract information from an event file, to setup the model. 4 regressors are extracted : + - event: a regressor with 4 second ON duration + - gain : a parametric modulation of events corresponding to gain magnitude. Mean centred. + - loss : a parametric modulation of events corresponding to loss magnitude. Mean centred. + - response : a regressor with 1 for accept and -1 for reject. Mean centred. + + Parameters : + - event_file : str, event file corresponding to the run and the subject to analyze + + Returns : + - subject_info : list of Bunch containing event information + """ + from nipype.interfaces.base import Bunch + + condition_names = ['event', 'gain', 'loss', 'response'] + onset = {} + duration = {} + amplitude = {} + + # Create dictionary items with empty lists + for condition in condition_names: + onset.update({condition : []}) + duration.update({condition : []}) + amplitude.update({condition : []}) + + # Parse information in the event_file + with open(event_file, 'rt') as file: + next(file) # skip the header + + for line in file: + info = line.strip().split() + + for condition in condition_names: + if condition == 'gain': + onset[condition].append(float(info[0])) + duration[condition].append(float(info[4])) # TODO : change to info[1] (= 4) ? + amplitude[condition].append(float(info[2])) + elif condition == 'loss': + onset[condition].append(float(info[0])) + duration[condition].append(float(info[4])) # TODO : change to info[1] (= 4) ? + amplitude[condition].append(float(info[3])) + elif condition == 'event': + onset[condition].append(float(info[0])) + duration[condition].append(float(info[1])) + amplitude[condition].append(1.0) + elif condition == 'response': + onset[condition].append(float(info[0])) + duration[condition].append(float(info[1])) # TODO : change to info[4] (= RT) ? + if 'accept' in info[5]: + amplitude[condition].append(1.0) + elif 'reject' in info[5]: + amplitude[condition].append(-1.0) + else: + amplitude[condition].append(0.0) + + return [ + Bunch( + conditions = condition_names, + onsets = [onset[k] for k in condition_names], + durations = [duration[k] for k in condition_names], + amplitudes = [amplitude[k] for k in condition_names], + regressor_names = None, + regressors = None) + ] + + def get_parameters_file(filepath, subject_id, run_id, working_dir): + """ + Create a tsv file with only desired parameters per subject per run. + + Parameters : + - filepath : path to subject parameters file (i.e. one per run) + - subject_id : subject for whom the 1st level analysis is made + - run_id: run for which the 1st level analysis is made + - working_dir: str, name of the directory for intermediate results + + Return : + - parameters_file : paths to new files containing only desired parameters. + """ + from os import makedirs + from os.path import join, isdir + + from pandas import read_csv, DataFrame + from numpy import array, transpose + + data_frame = read_csv(filepath, sep = '\t', header=0) + if 'NonSteadyStateOutlier00' in data_frame.columns: + temp_list = array([ + data_frame['X'], data_frame['Y'], data_frame['Z'], + data_frame['RotX'], data_frame['RotY'], data_frame['RotZ'], + data_frame['NonSteadyStateOutlier00']]) + else: + temp_list = array([ + data_frame['X'], data_frame['Y'], data_frame['Z'], + data_frame['RotX'], data_frame['RotY'], data_frame['RotZ']]) + retained_parameters = DataFrame(transpose(temp_list)) + + parameters_file = join(working_dir, 'parameters_file', + f'parameters_file_sub-{subject_id}_run{run_id}.tsv') + + makedirs(join(working_dir, 'parameters_file'), exist_ok = True) + + with open(parameters_file, 'w') as writer: + writer.write(retained_parameters.to_csv( + sep = '\t', index = False, header = False, na_rep = '0.0')) + + return parameters_file + + + + + def get_run_level_analysis(self): """ Return a Nipype workflow describing the run level analysis part of the pipeline """ return None From 7579d5302be788c2a0a4e0861f6df8306365a2d2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Mon, 23 Oct 2023 14:23:40 +0200 Subject: [PATCH 036/116] Use partial volume files from segmentation [skip ci] --- narps_open/pipelines/team_08MQ.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/narps_open/pipelines/team_08MQ.py b/narps_open/pipelines/team_08MQ.py index 7c2e2221..0aa53368 100644 --- a/narps_open/pipelines/team_08MQ.py +++ b/narps_open/pipelines/team_08MQ.py @@ -74,7 +74,8 @@ def get_preprocessing(self): # FAST Node - Segmentation of anatomical images segmentation_anat = Node(FAST(), name = 'segmentation_anat') segmentation_anat.inputs.no_bias = True # Bias field was already removed - segmentation_anat.inputs.segments = True # One image per tissue class + segmentation_anat.inputs.segments = False # Only output partial volume estimation + segmentation_anat.inputs.probability_maps = False # Only output partial volume estimation # Split Node - Split probability maps as they output from the segmentation node split_segmentation_maps = Node(Split(), name = 'split_segmentation_maps') @@ -201,7 +202,7 @@ def get_preprocessing(self): (bias_field_correction, brain_extraction_anat, [('restored_image', 'in_file')]), (brain_extraction_anat, segmentation_anat, [('out_file', 'in_files')]), (brain_extraction_anat, normalization_anat, [('out_file', 'moving_image')]), - (segmentation_anat, split_segmentation_maps, [('probability_maps', 'inlist')]), + (segmentation_anat, split_segmentation_maps, [('partial_volume_files', 'inlist')]), (split_segmentation_maps, threshold_white_matter, [('out2', 'in_file')]), (split_segmentation_maps, threshold_csf, [('out1', 'in_file')]), (threshold_white_matter, erode_white_matter, [('out_file', 'in_file')]), From d26e1885930d1c3b38caacb2ecfcfd501110d93e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Mon, 23 Oct 2023 14:28:48 +0200 Subject: [PATCH 037/116] Use partial volume files from segmentation [skip ci] --- narps_open/pipelines/team_08MQ.py | 1 + 1 file changed, 1 insertion(+) diff --git a/narps_open/pipelines/team_08MQ.py b/narps_open/pipelines/team_08MQ.py index 0aa53368..13de59b8 100644 --- a/narps_open/pipelines/team_08MQ.py +++ b/narps_open/pipelines/team_08MQ.py @@ -80,6 +80,7 @@ def get_preprocessing(self): # Split Node - Split probability maps as they output from the segmentation node split_segmentation_maps = Node(Split(), name = 'split_segmentation_maps') split_segmentation_maps.inputs.splits = [1, 1, 1] + split_segmentation_maps.inputs.squeeze = True # Unfold one-element splits removing the list # ANTs Node - Normalization of anatomical images to T1 MNI152 space # https://github.com/ANTsX/ANTs/wiki/Anatomy-of-an-antsRegistration-call From 23d3af9bd2a861c92520bc2d0bd8442f9f37fbf5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Mon, 23 Oct 2023 15:43:54 +0200 Subject: [PATCH 038/116] First try of run level analysis [skip ci] --- narps_open/pipelines/team_08MQ.py | 233 +++++++++++++++++++++--------- 1 file changed, 167 insertions(+), 66 deletions(-) diff --git a/narps_open/pipelines/team_08MQ.py b/narps_open/pipelines/team_08MQ.py index 13de59b8..0f973403 100644 --- a/narps_open/pipelines/team_08MQ.py +++ b/narps_open/pipelines/team_08MQ.py @@ -9,8 +9,14 @@ from nipype.interfaces.utility import IdentityInterface, Function, Merge, Split from nipype.interfaces.io import SelectFiles, DataSink from nipype.interfaces.fsl import ( - FSLCommand, FAST, BET, ErodeImage, PrepareFieldmap, MCFLIRT, SliceTimer, - Threshold, Info, SUSAN, FLIRT, ApplyWarp, EpiReg, ApplyXFM, ConvertXFM + FSLCommand, + + FAST, BET, ErodeImage, PrepareFieldmap, MCFLIRT, SliceTimer, + Threshold, Info, SUSAN, FLIRT, EpiReg, ApplyXFM, ConvertXFM, + + Level1Design, FEATModel, L2Model + + # , Merge, FLAMEO, FILMGLS, Randomise, MultipleRegressDesign ) from nipype.algorithms.confounds import CompCor from nipype.interfaces.ants import Registration, ApplyTransforms @@ -57,7 +63,7 @@ def get_preprocessing(self): select_files = Node(SelectFiles(file_templates), name = 'select_files') select_files.inputs.base_directory = self.directories.dataset_dir - # DataSink Node - store the wanted results in the wanted repository + # DataSink Node - store the wanted results in the wanted directory data_sink = Node(DataSink(), name = 'data_sink') data_sink.inputs.base_directory = self.directories.output_dir @@ -256,13 +262,17 @@ def get_preprocessing_outputs(self): parameters = { 'subject_id': self.subject_list, 'run_id': self.run_list, - 'file': ['components_file.txt'] + 'file': [ + 'components_file.txt', + 'sub-{subject_id}_task-MGT_run-{run_id}_bold_brain_mcf.nii.gz.par', + 'sub-{subject_id}_task-MGT_run-{run_id}_bold_brain_mcf_st_smooth_flirt_trans.nii.gz' + ] } parameter_sets = product(*parameters.values()) template = join( self.directories.output_dir, 'preprocessing', - '_subject_id_{subject_id}_run_id_{run_id}', + '_run_id_{run_id}_subject_id_{subject_id}', '{file}' ) @@ -286,15 +296,15 @@ def get_session_information(event_file): from nipype.interfaces.base import Bunch condition_names = ['event', 'gain', 'loss', 'response'] - onset = {} - duration = {} - amplitude = {} + onsets = {} + durations = {} + amplitudes = {} # Create dictionary items with empty lists for condition in condition_names: - onset.update({condition : []}) - duration.update({condition : []}) - amplitude.update({condition : []}) + onsets.update({condition : []}) + durations.update({condition : []}) + amplitudes.update({condition : []}) # Parse information in the event_file with open(event_file, 'rt') as file: @@ -305,90 +315,181 @@ def get_session_information(event_file): for condition in condition_names: if condition == 'gain': - onset[condition].append(float(info[0])) - duration[condition].append(float(info[4])) # TODO : change to info[1] (= 4) ? - amplitude[condition].append(float(info[2])) + onsets[condition].append(float(info[0])) + durations[condition].append(float(info[4])) # TODO : change to info[1] (= 4) ? + amplitudes[condition].append(float(info[2])) elif condition == 'loss': - onset[condition].append(float(info[0])) - duration[condition].append(float(info[4])) # TODO : change to info[1] (= 4) ? - amplitude[condition].append(float(info[3])) + onsets[condition].append(float(info[0])) + durations[condition].append(float(info[4])) # TODO : change to info[1] (= 4) ? + amplitudes[condition].append(float(info[3])) elif condition == 'event': - onset[condition].append(float(info[0])) - duration[condition].append(float(info[1])) - amplitude[condition].append(1.0) + onsets[condition].append(float(info[0])) + durations[condition].append(float(info[1])) + amplitudes[condition].append(1.0) elif condition == 'response': - onset[condition].append(float(info[0])) - duration[condition].append(float(info[1])) # TODO : change to info[4] (= RT) ? + onsets[condition].append(float(info[0])) + durations[condition].append(float(info[1])) # TODO : change to info[4] (= RT) ? if 'accept' in info[5]: - amplitude[condition].append(1.0) + amplitudes[condition].append(1.0) elif 'reject' in info[5]: - amplitude[condition].append(-1.0) + amplitudes[condition].append(-1.0) else: - amplitude[condition].append(0.0) + amplitudes[condition].append(0.0) return [ Bunch( conditions = condition_names, - onsets = [onset[k] for k in condition_names], - durations = [duration[k] for k in condition_names], - amplitudes = [amplitude[k] for k in condition_names], + onsets = [onsets[k] for k in condition_names], + durations = [durations[k] for k in condition_names], + amplitudes = [amplitudes[k] for k in condition_names], regressor_names = None, regressors = None) ] - def get_parameters_file(filepath, subject_id, run_id, working_dir): + def get_run_level_contrasts(): """ - Create a tsv file with only desired parameters per subject per run. - - Parameters : - - filepath : path to subject parameters file (i.e. one per run) - - subject_id : subject for whom the 1st level analysis is made - - run_id: run for which the 1st level analysis is made - - working_dir: str, name of the directory for intermediate results + Create a list of tuples that represent contrasts. + Each contrast is in the form : + (Name,Stat,[list of condition names],[weights on those conditions]) - Return : - - parameters_file : paths to new files containing only desired parameters. + Returns: + - contrasts: list of tuples, list of contrasts to analyze """ - from os import makedirs - from os.path import join, isdir + # List of condition names + conditions = ['gain', 'loss'] - from pandas import read_csv, DataFrame - from numpy import array, transpose + # Return contrast list + return [ + # Positive parametric effect of gain + ('gain', 'T', conditions, [1, 0]), + # Positive parametric effect of loss + ('loss', 'T', conditions, [0, 1]), + # Negative parametric effect of loss. + ('loss', 'T', conditions, [0, -1]) + ] + + def get_run_level_analysis(self): + """ Return a Nipype workflow describing the run level analysis part of the pipeline - data_frame = read_csv(filepath, sep = '\t', header=0) - if 'NonSteadyStateOutlier00' in data_frame.columns: - temp_list = array([ - data_frame['X'], data_frame['Y'], data_frame['Z'], - data_frame['RotX'], data_frame['RotY'], data_frame['RotZ'], - data_frame['NonSteadyStateOutlier00']]) - else: - temp_list = array([ - data_frame['X'], data_frame['Y'], data_frame['Z'], - data_frame['RotX'], data_frame['RotY'], data_frame['RotZ']]) - retained_parameters = DataFrame(transpose(temp_list)) + Returns: + - run_level_analysis : nipype.WorkFlow + """ - parameters_file = join(working_dir, 'parameters_file', - f'parameters_file_sub-{subject_id}_run{run_id}.tsv') + # IdentityInterface node - allows to iterate over subjects and runs + info_source = Node(IdentityInterface( + fields = ['subject_id', 'run_id']), + name = 'info_source') + info_source.iterables = [ + ('run_id', self.run_list), + ('subject_id', self.subject_list), + ] - makedirs(join(working_dir, 'parameters_file'), exist_ok = True) + # SelectFiles node - to select necessary files + templates = { + # Functional MRI + 'func' : join(self.directories.output_dir, 'preprocessing', + '_run_id_{run_id}_subject_id_{subject_id}', + 'sub-{subject_id}_task-MGT_run-{run_id}_bold_brain_mcf_st_smooth_flirt_trans.nii.gz' + ), + # Event file + 'event' : join('sub-{subject_id}', 'func', + 'sub-{subject_id}_task-MGT_run-{run_id}_events.tsv' + ), + # Motion parameters + 'motion' : join(self.directories.output_dir, 'preprocessing', + '_run_id_{run_id}_subject_id_{subject_id}', + 'sub-{subject_id}_task-MGT_run-{run_id}_bold_brain_mcf.nii.gz.par', + ) + } + select_files = Node(SelectFiles(templates), name = 'selectfiles') + select_files.inputs.base_directory = self.directories.dataset_dir - with open(parameters_file, 'w') as writer: - writer.write(retained_parameters.to_csv( - sep = '\t', index = False, header = False, na_rep = '0.0')) + # DataSink Node - store the wanted results in the wanted directory + data_sink = Node(DataSink(), name='datasink') + data_sink.inputs.base_directory = self.directories.output_dir - return parameters_file + # Function Node get_session_information - Get subject information from event files + session_information = Node(Function(), name = 'session_information') + session_information.inputs.function = self.get_session_information + session_information.inputs.input_names = ['event_file'] + session_information.inputs.output_names = ['session_information'] + + # SpecifyModel - Generates a model + specify_model = Node(SpecifyModel(), name = 'specify_model') + specify_model.inputs.high_pass_filter_cutoff = 90 + specify_model.inputs.input_units = 'secs' + specify_model.inputs.time_repetition = TaskInformation()['RepetitionTime'] + specify_model.inputs.parameter_source = 'FSL' # Source of motion parameters. + + # Function Node get_contrasts - Get the list of contrasts + contrasts = Node(Function(), name = 'contrasts') + contrasts.inputs.function = self.get_contrasts + contrasts.inputs.input_names = [] + contrasts.inputs.output_names = ['contrasts'] + + # Level1Design Node - Generate files for first level computation + l1_design = Node(Level1Design(), 'l1_design') + l1_design.inputs.bases = { + 'dgamma':{'derivs' : True} # Canonical double gamma HRF plus temporal derivative + } + l1_design.inputs.interscan_interval = TaskInformation()['RepetitionTime'] + l1_design.inputs.model_serial_correlations = True + + # FEATModel Node - Generate first level model + model_generation = Node(FEATModel(), name = 'model_generation') + + # FILMGLS Node - Estimate first level model + model_estimate = Node(FILMGLS(), name = 'model_estimate') + + # Create l1 analysis workflow and connect its nodes + run_level_analysis = Workflow( + base_dir = self.directories.working_dir, + name = 'run_level_analysis' + ) + run_level_analysis.connect([ + (info_source, select_files, [('subject_id', 'subject_id'), ('run_id', 'run_id')]), + (select_files, session_information, [('event', 'event_file')]), + (session_information, specify_model, [('subject_info', 'subject_info')]), + (select_files, specify_model, [('motion', 'realignment_parameters')]), + (select_files, specify_model, [('func', 'functional_runs')]), + (contrasts, l1_design, [('contrasts', 'contrasts')]), + (specify_model, l1_design, [('session_info', 'session_info')]), + (l1_design, model_generation, [ + ('ev_files', 'ev_files'), + ('fsf_files', 'fsf_file')]), + (select_files, model_estimate, [('func', 'in_file')]), + (model_generation, model_estimate, [ + ('con_file', 'tcon_file'), + ('design_file', 'design_file')]), + (model_estimate, data_sink, [('results_dir', 'l1_analysis.@results')]), + (model_generation, data_sink, [ + ('design_file', 'l1_analysis.@design_file'), + ('design_image', 'l1_analysis.@design_img')]), + ]) + + return l1_analysis + def get_run_level_outputs(self): + """ Return a list of the files generated by the run level analysis """ + return ['fake_file'] + """ + Group level + Ordinary least squares. Pooled variance. + Second level + Positive one-sample ttest over first level contrast estimates. + Group level + Group effect for each first level contrast for each of the two groups. + Contrast of positive parametric effect of loss, testing for equal range group responses being greater than equal indifference group. - def get_run_level_analysis(self): - """ Return a Nipype workflow describing the run level analysis part of the pipeline """ - return None + TFCE - def get_run_level_outputs(self): - """ Return a list of the files generated by the run level analysis """ - return ['fake_file'] + pval_computation : Permutation testing implemented in randomise (10,000 permutations). + multiple_testing_correction : FWE permutation (10,000 permutations). + comments_analysis : NA + """ def get_subject_information(event_file: str): """ From 6dfc51ff4ceb97b6527faff1b2aec02aee936f91 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Mon, 23 Oct 2023 15:46:32 +0200 Subject: [PATCH 039/116] First try of run level analysis [skip ci] --- narps_open/pipelines/team_08MQ.py | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/narps_open/pipelines/team_08MQ.py b/narps_open/pipelines/team_08MQ.py index 0f973403..ffcef483 100644 --- a/narps_open/pipelines/team_08MQ.py +++ b/narps_open/pipelines/team_08MQ.py @@ -409,10 +409,11 @@ def get_run_level_analysis(self): data_sink.inputs.base_directory = self.directories.output_dir # Function Node get_session_information - Get subject information from event files - session_information = Node(Function(), name = 'session_information') + session_information = Node(Function( + input_names = ['event_file'], + output_names = ['session_information'] + ), name = 'session_information') session_information.inputs.function = self.get_session_information - session_information.inputs.input_names = ['event_file'] - session_information.inputs.output_names = ['session_information'] # SpecifyModel - Generates a model specify_model = Node(SpecifyModel(), name = 'specify_model') @@ -422,10 +423,11 @@ def get_run_level_analysis(self): specify_model.inputs.parameter_source = 'FSL' # Source of motion parameters. # Function Node get_contrasts - Get the list of contrasts - contrasts = Node(Function(), name = 'contrasts') + contrasts = Node(Function( + input_names = [], + output_names = ['contrasts'] + ), name = 'contrasts') contrasts.inputs.function = self.get_contrasts - contrasts.inputs.input_names = [] - contrasts.inputs.output_names = ['contrasts'] # Level1Design Node - Generate files for first level computation l1_design = Node(Level1Design(), 'l1_design') From 4adfe4343f2d166a1e7d46192d1b6fc3c1ff55b0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Mon, 23 Oct 2023 15:56:41 +0200 Subject: [PATCH 040/116] First try of run level analysis [skip ci] --- narps_open/pipelines/team_08MQ.py | 219 +++++++----------------------- 1 file changed, 51 insertions(+), 168 deletions(-) diff --git a/narps_open/pipelines/team_08MQ.py b/narps_open/pipelines/team_08MQ.py index ffcef483..aae989e9 100644 --- a/narps_open/pipelines/team_08MQ.py +++ b/narps_open/pipelines/team_08MQ.py @@ -4,8 +4,9 @@ """ Write the work of NARPS team 08MQ using Nipype """ from os.path import join +from itertools import product -from nipype import Node, Workflow # , JoinNode, MapNode +from nipype import Node, Workflow from nipype.interfaces.utility import IdentityInterface, Function, Merge, Split from nipype.interfaces.io import SelectFiles, DataSink from nipype.interfaces.fsl import ( @@ -14,13 +15,13 @@ FAST, BET, ErodeImage, PrepareFieldmap, MCFLIRT, SliceTimer, Threshold, Info, SUSAN, FLIRT, EpiReg, ApplyXFM, ConvertXFM, - Level1Design, FEATModel, L2Model + Level1Design, FEATModel, L2Model, FILMGLS - # , Merge, FLAMEO, FILMGLS, Randomise, MultipleRegressDesign + # , Merge, FLAMEO, Randomise, MultipleRegressDesign ) from nipype.algorithms.confounds import CompCor +from nipype.algorithms.modelgen import SpecifyModel from nipype.interfaces.ants import Registration, ApplyTransforms - from narps_open.pipelines import Pipeline from narps_open.data.task import TaskInformation @@ -34,7 +35,7 @@ def __init__(self): super().__init__() self.fwhm = 6.0 self.team_id = '08MQ' - self.contrast_list = [] + self.contrast_list = ['1', '2', '3'] def get_preprocessing(self): """ Return a Nipype workflow describing the prerpocessing part of the pipeline """ @@ -242,7 +243,7 @@ def get_preprocessing(self): (slice_time_correction, smoothing, [('slice_time_corrected_file', 'in_file')]), (smoothing, alignment_func_to_anat, [('smoothed_file', 'in_file')]), (coregistration_sbref, alignment_func_to_anat, [('out_matrix_file', 'in_matrix_file')]), - (brain_extraction_anat, alignment_func_to_anat, [('out_file', 'reference')]), + (brain_extraction_anat, alignment_func_to_anat, [('out_file', 'reference')]), (alignment_func_to_anat, alignment_func_to_mni, [('out_file', 'input_image')]), (normalization_anat, alignment_func_to_mni, [('forward_transforms', 'transforms')]), (merge_masks, compute_confounds, [('out', 'mask_files')]), # Masks are in the func space @@ -276,7 +277,7 @@ def get_preprocessing_outputs(self): '{file}' ) - return_list = [template.format(**dict(zip(parameters.keys(), parameter_values)))\ + return [template.format(**dict(zip(parameters.keys(), parameter_values)))\ for parameter_values in parameter_sets] def get_session_information(event_file): @@ -361,11 +362,11 @@ def get_run_level_contrasts(): # Return contrast list return [ # Positive parametric effect of gain - ('gain', 'T', conditions, [1, 0]), + ('positive_effect_gain', 'T', conditions, [1, 0]), # Positive parametric effect of loss - ('loss', 'T', conditions, [0, 1]), + ('positive_effect_loss', 'T', conditions, [0, 1]), # Negative parametric effect of loss. - ('loss', 'T', conditions, [0, -1]) + ('negative_effect_loss', 'T', conditions, [0, -1]) ] def get_run_level_analysis(self): @@ -427,7 +428,7 @@ def get_run_level_analysis(self): input_names = [], output_names = ['contrasts'] ), name = 'contrasts') - contrasts.inputs.function = self.get_contrasts + contrasts.inputs.function = self.get_run_level_contrasts # Level1Design Node - Generate files for first level computation l1_design = Node(Level1Design(), 'l1_design') @@ -473,8 +474,44 @@ def get_run_level_analysis(self): def get_run_level_outputs(self): """ Return a list of the files generated by the run level analysis """ - return ['fake_file'] + parameters = { + 'run_id' : self.run_list, + 'subject_id' : self.subject_list, + 'file' : [ + 'run0.mat', + 'run0.png' + ] + } + parameter_sets = product(*parameters.values()) + template = join( + self.directories.output_dir, + 'l1_analysis', '_run_id_{run_id}_subject_id_{subject_id}','{file}' + ) + return_list = [template.format(**dict(zip(parameters.keys(), parameter_values)))\ + for parameter_values in parameter_sets] + + parameters = { + 'run_id' : self.run_list, + 'subject_id' : self.subject_list, + 'contrast_id' : self.contrast_list, + 'file' : [ + join('results', 'cope{contrast_id}.nii.gz'), + join('results', 'tstat{contrast_id}.nii.gz'), + join('results', 'varcope{contrast_id}.nii.gz'), + join('results', 'zstat{contrast_id}.nii.gz'), + ] + } + parameter_sets = product(*parameters.values()) + template = join( + self.directories.output_dir, + 'l1_analysis', '_run_id_{run_id}_subject_id_{subject_id}','{file}' + ) + + return_list += [template.format(**dict(zip(parameters.keys(), parameter_values)))\ + for parameter_values in parameter_sets] + + return return_list """ Group level Ordinary least squares. Pooled variance. @@ -493,160 +530,6 @@ def get_run_level_outputs(self): comments_analysis : NA """ - def get_subject_information(event_file: str): - """ - Extract subject information from the event file, to create a Bunch with required data only. - - Parameters : - - event_file : event file corresponding to the run and the subject to analyze - - Returns : - - subject_info : list of Bunch for 1st level analysis. - """ - - """ - Canonical double gamma HRF plus temporal derivative. - Model consisted of: - - Event regressor with 4 second ON duration. - Parametric modulation of events corresponding to gain magnitude. Mean centred. - Parametric modulation of events corresponding to loss magnitude. Mean centred. - Response regressor with 1 for accept and -1 for reject. Mean centred. - - Six head motion parameters plus four aCompCor regressors. > - - Model and data had a 90s high-pass filter applied. - """ - - from nipype.interfaces.base import Bunch - - condition_names = ['event', 'gain', 'loss', 'response'] - - onsets = {} - durations = {} - amplitudes = {} - - # Creates dictionary items with empty lists for each condition. - for condition in condition_names: - onset.update({condition: []}) - duration.update({condition: []}) - amplitude.update({condition: []}) - - """ - onset = { - event: [], - gain: [], - loss: [], - response: [] - } - duration = { - event: [], - gain: [], - loss: [], - response: [] - } - amplitude = { - event: [], - gain: [], - loss: [], - response: [] - } - - - - [Mandatory] - conditions : list of names - onsets : lists of onsets corresponding to each condition - durations : lists of durations corresponding to each condition. Should be - left to a single 0 if all events are being modelled as impulses. - - [Optional] - regressor_names : list of str - list of names corresponding to each column. Should be None if - automatically assigned. - regressors : list of lists - values for each regressor - must correspond to the number of - volumes in the functional run - amplitudes : lists of amplitudes for each event. This will be ignored by - SPM's Level1Design. - - The following two (tmod, pmod) will be ignored by any Level1Design class - other than SPM: - - tmod : lists of conditions that should be temporally modulated. Should - default to None if not being used. - pmod : list of Bunch corresponding to conditions - - name : name of parametric modulator - - param : values of the modulator - - poly : degree of modulation - """ - - with open(event_file, 'rt') as file: - next(file) # skip the header - - for line in file: - info = line.strip().split() - # Creates list with onsets, duration and loss/gain for amplitude (FSL) - for condition in condition_names: - onset[condition].append(float(info[0])) - duration[condition].append(float(info[1])) - - if condition == 'gain': - amplitude[condition].append(float(info[2])) - elif condition == 'loss': - amplitude[condition].append(float(info[3])) - elif condition == 'event': - amplitude[condition].append(1.0) - elif condition == 'response': - if 'reject' in info[5]: - amplitude[condition].append(-1.0) - elif 'accept' in info[5]: - amplitude[condition].append(1.0) - else: - amplitude[condition].append(0.0) # TODO : zeros for NoResp ??? - - subject_info = [] - subject_info.append( - Bunch( - conditions = condition_names, - onsets = [onsets[c] for c in condition_names], - durations = [durations[c] for c in condition_names], - amplitudes = [amplitudes[c] for c in condition_names], - regressor_names = None, - regressors = None, - ) - ) - - return subject_info - - # [INFO] This function creates the contrasts that will be analyzed in the first level analysis - # [TODO] Adapt this example to your specific pipeline - def get_contrasts(): - """ - Create the list of tuples that represents contrasts. - Each contrast is in the form : - (Name,Stat,[list of condition names],[weights on those conditions]) - - Returns: - - contrasts: list of tuples, list of contrasts to analyze - - - First level - Positive parametric effect of gain; Positive parametric effect of loss; Negative parametric effect of loss. - Second level - Positive one-sample ttest over first level contrast estimates. - """ - # List of condition names - conditions = ['event', 'gain', 'loss', 'response'] - - # Create contrasts - positive_effect_gain = ('positive_effect_gain', 'T', conditions, [0, 1, 0]) - positive_effect_loss = ('positive_effect_loss', 'T', conditions, [0, 0, 1]) - negative_effect_loss = ('negative_effect_loss', 'T', conditions, [0, 0, -1]) - - # Contrast list - return [positive_effect_gain, positive_effect_loss, negative_effect_loss] - def get_subject_level_analysis(self): """ Return a Nipype workflow describing the subject level analysis part of the pipeline """ @@ -679,7 +562,7 @@ def get_subject_level_analysis(self): # Subject information Node - get subject specific condition information subject_information = Node( Function( - function = self.get_subject_information, + function = self.get_run_level_contrasts, input_names = ['event_files', 'runs'], output_names = ['subject_info'] ), @@ -904,7 +787,7 @@ def get_group_level_analysis_sub_workflow(self, method): select_files = Node(SelectFiles(templates), name = 'select_files') select_files.inputs.base_directory = self.directories.dataset_dir select_files.inputs.force_list = True - + # Datasink node - to save important files data_sink = Node( DataSink(base_directory = self.directories.output_dir), From af08110b12d35cd906b674e85606626f3f746a9b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Mon, 23 Oct 2023 15:57:33 +0200 Subject: [PATCH 041/116] First try of run level analysis [skip ci] --- narps_open/pipelines/team_08MQ.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/narps_open/pipelines/team_08MQ.py b/narps_open/pipelines/team_08MQ.py index aae989e9..89b30d3f 100644 --- a/narps_open/pipelines/team_08MQ.py +++ b/narps_open/pipelines/team_08MQ.py @@ -464,13 +464,13 @@ def get_run_level_analysis(self): (model_generation, model_estimate, [ ('con_file', 'tcon_file'), ('design_file', 'design_file')]), - (model_estimate, data_sink, [('results_dir', 'l1_analysis.@results')]), + (model_estimate, data_sink, [('results_dir', 'run_level_analysis.@results')]), (model_generation, data_sink, [ - ('design_file', 'l1_analysis.@design_file'), - ('design_image', 'l1_analysis.@design_img')]), + ('design_file', 'run_level_analysis.@design_file'), + ('design_image', 'run_level_analysis.@design_img')]), ]) - return l1_analysis + return run_level_analysis def get_run_level_outputs(self): """ Return a list of the files generated by the run level analysis """ From dbfed24900ec542eaae929cf6e8342f3b56a462d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Mon, 23 Oct 2023 15:59:20 +0200 Subject: [PATCH 042/116] First try of run level analysis [skip ci] --- narps_open/pipelines/team_08MQ.py | 65 ------------------------------- 1 file changed, 65 deletions(-) diff --git a/narps_open/pipelines/team_08MQ.py b/narps_open/pipelines/team_08MQ.py index 89b30d3f..5f8dca3a 100644 --- a/narps_open/pipelines/team_08MQ.py +++ b/narps_open/pipelines/team_08MQ.py @@ -533,75 +533,10 @@ def get_run_level_outputs(self): def get_subject_level_analysis(self): """ Return a Nipype workflow describing the subject level analysis part of the pipeline """ - # Infosource Node - To iterate on subjects - info_source = Node(IdentityInterface( - fields = ['subject_id', 'run_id']), - name = 'info_source' - ) - info_source.iterables = [('subject_id', self.subject_list)] - - # Templates to select files node - templates = { - 'func': join(self.directories.output_dir, 'preprocessing', - '_run_id_*_subject_id_{subject_id}', - 'complete_filename_{subject_id}_complete_filename.nii', - ), - 'event': join(self.directories.dataset_dir, 'sub-{subject_id}', 'func', - 'sub-{subject_id}_task-MGT_run-*_events.tsv', - ) - } - - # SelectFiles node - to select necessary files - select_files = Node(SelectFiles(templates), name = 'select_files') - select_files.inputs.base_directory = self.directories.dataset_dir - - # DataSink Node - store the wanted results in the wanted repository - data_sink = Node(DataSink(), name = 'data_sink') - data_sink.inputs.base_directory = self.directories.output_dir - - # Subject information Node - get subject specific condition information - subject_information = Node( - Function( - function = self.get_run_level_contrasts, - input_names = ['event_files', 'runs'], - output_names = ['subject_info'] - ), - name = 'subject_information', - ) - subject_information.inputs.runs = self.run_list - - # Parameters Node - create files with parameters from subject session data - """parameters = Node( - Function( - function = self.get_parameters_file, - input_names = ['event_files', 'runs'], - output_names = ['parameters_files'] - ), - name = 'parameters', - ) - parameters.inputs.runs = self.run_list - """ - - # Contrasts node - get contrasts to compute from the model - contrasts = Node( - Function( - function = self.get_contrasts, - input_names = ['subject_id'], - output_names = ['contrasts'] - ), - name = 'contrasts', - ) - subject_level_analysis = Workflow( base_dir = self.directories.working_dir, name = 'subject_level_analysis' ) - subject_level_analysis.connect([ - (info_source, select_files, [('subject_id', 'subject_id')]), - (info_source, contrasts, [('subject_id', 'subject_id')]), - (select_files, subject_information, [('event', 'event_files')]), - ]) - return subject_level_analysis def get_subject_level_outputs(self): From d2951b3f3bcaf1f31ef531c2c56181d6090135ac Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Mon, 23 Oct 2023 16:03:31 +0200 Subject: [PATCH 043/116] First try of run level analysis [skip ci] --- narps_open/pipelines/team_08MQ.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/narps_open/pipelines/team_08MQ.py b/narps_open/pipelines/team_08MQ.py index 5f8dca3a..89f55314 100644 --- a/narps_open/pipelines/team_08MQ.py +++ b/narps_open/pipelines/team_08MQ.py @@ -402,19 +402,19 @@ def get_run_level_analysis(self): 'sub-{subject_id}_task-MGT_run-{run_id}_bold_brain_mcf.nii.gz.par', ) } - select_files = Node(SelectFiles(templates), name = 'selectfiles') + select_files = Node(SelectFiles(templates), name = 'select_files') select_files.inputs.base_directory = self.directories.dataset_dir # DataSink Node - store the wanted results in the wanted directory - data_sink = Node(DataSink(), name='datasink') + data_sink = Node(DataSink(), name = 'data_sink') data_sink.inputs.base_directory = self.directories.output_dir # Function Node get_session_information - Get subject information from event files session_information = Node(Function( + function = self.get_session_information, input_names = ['event_file'], output_names = ['session_information'] ), name = 'session_information') - session_information.inputs.function = self.get_session_information # SpecifyModel - Generates a model specify_model = Node(SpecifyModel(), name = 'specify_model') @@ -425,10 +425,10 @@ def get_run_level_analysis(self): # Function Node get_contrasts - Get the list of contrasts contrasts = Node(Function( + function = self.get_run_level_contrasts, input_names = [], output_names = ['contrasts'] ), name = 'contrasts') - contrasts.inputs.function = self.get_run_level_contrasts # Level1Design Node - Generate files for first level computation l1_design = Node(Level1Design(), 'l1_design') From 1e46ec015b467502ac2791f88c13e0d41408a800 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Mon, 23 Oct 2023 16:06:27 +0200 Subject: [PATCH 044/116] First try of run level analysis [skip ci] --- narps_open/pipelines/team_08MQ.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/narps_open/pipelines/team_08MQ.py b/narps_open/pipelines/team_08MQ.py index 89f55314..650b3341 100644 --- a/narps_open/pipelines/team_08MQ.py +++ b/narps_open/pipelines/team_08MQ.py @@ -280,7 +280,7 @@ def get_preprocessing_outputs(self): return [template.format(**dict(zip(parameters.keys(), parameter_values)))\ for parameter_values in parameter_sets] - def get_session_information(event_file): + def get_subject_information(event_file): """ Extract information from an event file, to setup the model. 4 regressors are extracted : - event: a regressor with 4 second ON duration @@ -409,12 +409,12 @@ def get_run_level_analysis(self): data_sink = Node(DataSink(), name = 'data_sink') data_sink.inputs.base_directory = self.directories.output_dir - # Function Node get_session_information - Get subject information from event files - session_information = Node(Function( - function = self.get_session_information, + # Function Node get_subject_information - Get subject information from event files + subject_information = Node(Function( + function = self.get_subject_information, input_names = ['event_file'], - output_names = ['session_information'] - ), name = 'session_information') + output_names = ['subject_information'] + ), name = 'subject_information') # SpecifyModel - Generates a model specify_model = Node(SpecifyModel(), name = 'specify_model') @@ -451,8 +451,8 @@ def get_run_level_analysis(self): ) run_level_analysis.connect([ (info_source, select_files, [('subject_id', 'subject_id'), ('run_id', 'run_id')]), - (select_files, session_information, [('event', 'event_file')]), - (session_information, specify_model, [('subject_info', 'subject_info')]), + (select_files, subject_information, [('event', 'event_file')]), + (subject_information, specify_model, [('subject_info', 'subject_info')]), (select_files, specify_model, [('motion', 'realignment_parameters')]), (select_files, specify_model, [('func', 'functional_runs')]), (contrasts, l1_design, [('contrasts', 'contrasts')]), From 3fe4ca2592c3efbf06e36ca1cdf97cc4c28ec320 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Mon, 23 Oct 2023 16:09:12 +0200 Subject: [PATCH 045/116] First try of run level analysis [skip ci] --- narps_open/pipelines/team_08MQ.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/narps_open/pipelines/team_08MQ.py b/narps_open/pipelines/team_08MQ.py index 650b3341..4804de36 100644 --- a/narps_open/pipelines/team_08MQ.py +++ b/narps_open/pipelines/team_08MQ.py @@ -413,7 +413,7 @@ def get_run_level_analysis(self): subject_information = Node(Function( function = self.get_subject_information, input_names = ['event_file'], - output_names = ['subject_information'] + output_names = ['subject_info'] ), name = 'subject_information') # SpecifyModel - Generates a model From 72635b1efd0f4327dce45e3eda7dec71559722ba Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Mon, 23 Oct 2023 17:01:27 +0200 Subject: [PATCH 046/116] First try of run level analysis [skip ci] --- narps_open/pipelines/team_08MQ.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/narps_open/pipelines/team_08MQ.py b/narps_open/pipelines/team_08MQ.py index 4804de36..4b187f1b 100644 --- a/narps_open/pipelines/team_08MQ.py +++ b/narps_open/pipelines/team_08MQ.py @@ -387,16 +387,16 @@ def get_run_level_analysis(self): # SelectFiles node - to select necessary files templates = { - # Functional MRI + # Functional MRI - computed by preprocessing 'func' : join(self.directories.output_dir, 'preprocessing', '_run_id_{run_id}_subject_id_{subject_id}', 'sub-{subject_id}_task-MGT_run-{run_id}_bold_brain_mcf_st_smooth_flirt_trans.nii.gz' ), - # Event file + # Event file - from the original dataset 'event' : join('sub-{subject_id}', 'func', 'sub-{subject_id}_task-MGT_run-{run_id}_events.tsv' ), - # Motion parameters + # Motion parameters - computed by preprocessing's motion_correction Node 'motion' : join(self.directories.output_dir, 'preprocessing', '_run_id_{run_id}_subject_id_{subject_id}', 'sub-{subject_id}_task-MGT_run-{run_id}_bold_brain_mcf.nii.gz.par', From 4c62bf2f7f4d5d5e376034d1e687e887910aca43 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Mon, 23 Oct 2023 17:33:23 +0200 Subject: [PATCH 047/116] ANT's apply transform bug [skip ci] --- narps_open/pipelines/team_08MQ.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/narps_open/pipelines/team_08MQ.py b/narps_open/pipelines/team_08MQ.py index 4b187f1b..1a5f854e 100644 --- a/narps_open/pipelines/team_08MQ.py +++ b/narps_open/pipelines/team_08MQ.py @@ -189,6 +189,8 @@ def get_preprocessing(self): alignment_func_to_mni = Node(ApplyTransforms(), name = 'alignment_func_to_mni') alignment_func_to_mni.inputs.reference_image = \ Info.standard_image('MNI152_T1_2mm_brain.nii.gz') + alignment_func_to_mni.inputs.dimension = 4 + alignment_func_to_mni.inputs.input_image_type = 3 # time series # Merge Node - Merge the two masks (WM and CSF) in one input for the next node merge_masks = Node(Merge(2), name = 'merge_masks') From e7217dac1d95842c63a731313332267d68221393 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Mon, 23 Oct 2023 17:36:17 +0200 Subject: [PATCH 048/116] ANT's apply transform bug [skip ci] --- narps_open/pipelines/team_08MQ.py | 1 - 1 file changed, 1 deletion(-) diff --git a/narps_open/pipelines/team_08MQ.py b/narps_open/pipelines/team_08MQ.py index 1a5f854e..ba43b8d7 100644 --- a/narps_open/pipelines/team_08MQ.py +++ b/narps_open/pipelines/team_08MQ.py @@ -190,7 +190,6 @@ def get_preprocessing(self): alignment_func_to_mni.inputs.reference_image = \ Info.standard_image('MNI152_T1_2mm_brain.nii.gz') alignment_func_to_mni.inputs.dimension = 4 - alignment_func_to_mni.inputs.input_image_type = 3 # time series # Merge Node - Merge the two masks (WM and CSF) in one input for the next node merge_masks = Node(Merge(2), name = 'merge_masks') From e029f371afdb6b76d4bb2592e929e4d94fff2b4b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Tue, 24 Oct 2023 09:01:46 +0200 Subject: [PATCH 049/116] ANT's apply transform bug [skip ci] --- narps_open/pipelines/team_08MQ.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/narps_open/pipelines/team_08MQ.py b/narps_open/pipelines/team_08MQ.py index ba43b8d7..87c6cda8 100644 --- a/narps_open/pipelines/team_08MQ.py +++ b/narps_open/pipelines/team_08MQ.py @@ -189,7 +189,7 @@ def get_preprocessing(self): alignment_func_to_mni = Node(ApplyTransforms(), name = 'alignment_func_to_mni') alignment_func_to_mni.inputs.reference_image = \ Info.standard_image('MNI152_T1_2mm_brain.nii.gz') - alignment_func_to_mni.inputs.dimension = 4 + alignment_func_to_mni.inputs.dimension = 3 # Merge Node - Merge the two masks (WM and CSF) in one input for the next node merge_masks = Node(Merge(2), name = 'merge_masks') From e8f475dc36e7ecd909563183bf7f45c6e592a95c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Tue, 24 Oct 2023 09:19:44 +0200 Subject: [PATCH 050/116] ANT's apply transform bug [skip ci] --- narps_open/pipelines/team_08MQ.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/narps_open/pipelines/team_08MQ.py b/narps_open/pipelines/team_08MQ.py index 87c6cda8..ac7e3091 100644 --- a/narps_open/pipelines/team_08MQ.py +++ b/narps_open/pipelines/team_08MQ.py @@ -7,7 +7,7 @@ from itertools import product from nipype import Node, Workflow -from nipype.interfaces.utility import IdentityInterface, Function, Merge, Split +from nipype.interfaces.utility import IdentityInterface, Function, Merge, Split, Select from nipype.interfaces.io import SelectFiles, DataSink from nipype.interfaces.fsl import ( FSLCommand, @@ -185,11 +185,15 @@ def get_preprocessing(self): alignment_func_to_anat = Node(ApplyXFM(), name = 'alignment_func_to_anat') alignment_func_to_anat.inputs.apply_xfm = True + # Select Node - Change the order of transforms comming from ANTs Registration + reverse_transform_order = Node(Select(), name = 'reverse_transform_order') + reverse_transform_order.inputs.index = [1, 0] + # ApplyWarp Node - Alignment of functional data to MNI space alignment_func_to_mni = Node(ApplyTransforms(), name = 'alignment_func_to_mni') alignment_func_to_mni.inputs.reference_image = \ Info.standard_image('MNI152_T1_2mm_brain.nii.gz') - alignment_func_to_mni.inputs.dimension = 3 + alignment_func_to_mni.inputs.dimension = 4 # Merge Node - Merge the two masks (WM and CSF) in one input for the next node merge_masks = Node(Merge(2), name = 'merge_masks') @@ -246,7 +250,8 @@ def get_preprocessing(self): (coregistration_sbref, alignment_func_to_anat, [('out_matrix_file', 'in_matrix_file')]), (brain_extraction_anat, alignment_func_to_anat, [('out_file', 'reference')]), (alignment_func_to_anat, alignment_func_to_mni, [('out_file', 'input_image')]), - (normalization_anat, alignment_func_to_mni, [('forward_transforms', 'transforms')]), + (normalization_anat, reverse_transform_order, [('forward_transforms', 'inlist')]), + (reverse_transform_order, alignment_func_to_mni, [('out', 'transforms')]), (merge_masks, compute_confounds, [('out', 'mask_files')]), # Masks are in the func space (slice_time_correction, compute_confounds, [('slice_time_corrected_file', 'realigned_file')]), From 04bdebacfb43f181d6312e8f6174faae7691e7d5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Tue, 24 Oct 2023 09:30:51 +0200 Subject: [PATCH 051/116] ANT's apply transform bug [skip ci] --- narps_open/pipelines/team_08MQ.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/narps_open/pipelines/team_08MQ.py b/narps_open/pipelines/team_08MQ.py index ac7e3091..8b15de49 100644 --- a/narps_open/pipelines/team_08MQ.py +++ b/narps_open/pipelines/team_08MQ.py @@ -21,7 +21,7 @@ ) from nipype.algorithms.confounds import CompCor from nipype.algorithms.modelgen import SpecifyModel -from nipype.interfaces.ants import Registration, ApplyTransforms +from nipype.interfaces.ants import Registration, WarpTimeSeriesImageMultiTransform from narps_open.pipelines import Pipeline from narps_open.data.task import TaskInformation @@ -190,10 +190,10 @@ def get_preprocessing(self): reverse_transform_order.inputs.index = [1, 0] # ApplyWarp Node - Alignment of functional data to MNI space - alignment_func_to_mni = Node(ApplyTransforms(), name = 'alignment_func_to_mni') + alignment_func_to_mni = Node(WarpTimeSeriesImageMultiTransform(), + name = 'alignment_func_to_mni') alignment_func_to_mni.inputs.reference_image = \ Info.standard_image('MNI152_T1_2mm_brain.nii.gz') - alignment_func_to_mni.inputs.dimension = 4 # Merge Node - Merge the two masks (WM and CSF) in one input for the next node merge_masks = Node(Merge(2), name = 'merge_masks') @@ -251,7 +251,7 @@ def get_preprocessing(self): (brain_extraction_anat, alignment_func_to_anat, [('out_file', 'reference')]), (alignment_func_to_anat, alignment_func_to_mni, [('out_file', 'input_image')]), (normalization_anat, reverse_transform_order, [('forward_transforms', 'inlist')]), - (reverse_transform_order, alignment_func_to_mni, [('out', 'transforms')]), + (reverse_transform_order, alignment_func_to_mni, [('out', 'transformation_series')]), (merge_masks, compute_confounds, [('out', 'mask_files')]), # Masks are in the func space (slice_time_correction, compute_confounds, [('slice_time_corrected_file', 'realigned_file')]), From 858004af97f32f51e5a703cb44337444fc3553a2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Tue, 24 Oct 2023 10:15:11 +0200 Subject: [PATCH 052/116] First try of run level analysis [skip ci] --- narps_open/pipelines/team_08MQ.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/narps_open/pipelines/team_08MQ.py b/narps_open/pipelines/team_08MQ.py index 8b15de49..f5b26752 100644 --- a/narps_open/pipelines/team_08MQ.py +++ b/narps_open/pipelines/team_08MQ.py @@ -272,7 +272,7 @@ def get_preprocessing_outputs(self): 'file': [ 'components_file.txt', 'sub-{subject_id}_task-MGT_run-{run_id}_bold_brain_mcf.nii.gz.par', - 'sub-{subject_id}_task-MGT_run-{run_id}_bold_brain_mcf_st_smooth_flirt_trans.nii.gz' + 'sub-{subject_id}_task-MGT_run-{run_id}_bold_brain_mcf_st_smooth_flirt_wtsimt.nii.gz' ] } parameter_sets = product(*parameters.values()) From b63a052ccf697bc5bc6f260f1dc8815ea98d1c1a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Tue, 24 Oct 2023 10:24:36 +0200 Subject: [PATCH 053/116] First try of run level analysis [skip ci] --- narps_open/pipelines/team_08MQ.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/narps_open/pipelines/team_08MQ.py b/narps_open/pipelines/team_08MQ.py index f5b26752..130da240 100644 --- a/narps_open/pipelines/team_08MQ.py +++ b/narps_open/pipelines/team_08MQ.py @@ -396,7 +396,7 @@ def get_run_level_analysis(self): # Functional MRI - computed by preprocessing 'func' : join(self.directories.output_dir, 'preprocessing', '_run_id_{run_id}_subject_id_{subject_id}', - 'sub-{subject_id}_task-MGT_run-{run_id}_bold_brain_mcf_st_smooth_flirt_trans.nii.gz' + 'sub-{subject_id}_task-MGT_run-{run_id}_bold_brain_mcf_st_smooth_flirt_wtsimt.nii.gz' ), # Event file - from the original dataset 'event' : join('sub-{subject_id}', 'func', From 8ba967f07e392551e07190a58158add4f47db539 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Tue, 24 Oct 2023 14:00:40 +0200 Subject: [PATCH 054/116] First try of subject level analysis [skip ci] --- narps_open/pipelines/team_08MQ.py | 377 +++++++++++++++++++++++------- 1 file changed, 288 insertions(+), 89 deletions(-) diff --git a/narps_open/pipelines/team_08MQ.py b/narps_open/pipelines/team_08MQ.py index 130da240..059d5d96 100644 --- a/narps_open/pipelines/team_08MQ.py +++ b/narps_open/pipelines/team_08MQ.py @@ -422,7 +422,7 @@ def get_run_level_analysis(self): output_names = ['subject_info'] ), name = 'subject_information') - # SpecifyModel - Generates a model + # SpecifyModel Node - Generates a model specify_model = Node(SpecifyModel(), name = 'specify_model') specify_model.inputs.high_pass_filter_cutoff = 90 specify_model.inputs.input_units = 'secs' @@ -492,7 +492,7 @@ def get_run_level_outputs(self): parameter_sets = product(*parameters.values()) template = join( self.directories.output_dir, - 'l1_analysis', '_run_id_{run_id}_subject_id_{subject_id}','{file}' + 'run_level_analysis', '_run_id_{run_id}_subject_id_{subject_id}','{file}' ) return_list = [template.format(**dict(zip(parameters.keys(), parameter_values)))\ for parameter_values in parameter_sets] @@ -511,13 +511,100 @@ def get_run_level_outputs(self): parameter_sets = product(*parameters.values()) template = join( self.directories.output_dir, - 'l1_analysis', '_run_id_{run_id}_subject_id_{subject_id}','{file}' + 'run_level_analysis', '_run_id_{run_id}_subject_id_{subject_id}','{file}' ) return_list += [template.format(**dict(zip(parameters.keys(), parameter_values)))\ for parameter_values in parameter_sets] return return_list + + def get_subject_level_analysis(self): + """ Return a Nipype workflow describing the subject level analysis part of the pipeline """ + + # IdentityInterface node - allows to iterate over subjects and contrasts + info_source = Node(IdentityInterface( + fields = ['subject_id', 'contrast_id']), + name = 'info_source') + info_source.iterables = [ + ('subject_id', self.subject_list), + ('contrast_id', self.contrast_list) + ] + + # SelectFiles Node - select necessary files + templates = { + 'cope' : join('run_level_analysis', '_run_id_*_subject_id_{subject_id}', + 'results', 'cope{contrast_id}.nii.gz'), + 'varcope' : join('run_level_analysis', '_run_id_*_subject_id_{subject_id}', + 'results', 'varcope{contrast_id}.nii.gz'), + 'mask': join('preprocessing', '_run_id_*_subject_id_{subject_id}', + '') + } + select_files = Node(SelectFiles(templates), name = 'select_files') + select_files.inputs.base_directory = self.directories.output_dir + + # DataSink Node - store the wanted results in the wanted directory + data_sink = Node(DataSink(), name = 'data_sink') + data_sink.inputs.base_directory = self.directories.output_dir + + # L2Model Node - Generate subject specific second level model + generate_model = Node(L2Model(), name = 'generate_model') + generate_model.inputs.num_copes = len(self.run_list) + + # Merge Node - Merge copes files for each subject + merge_copes = Node(Merge(), name = 'merge_copes') + merge_copes.inputs.dimension = 't' + + # Merge Node - Merge varcopes files for each subject + merge_varcopes = Node(Merge(), name = 'merge_varcopes') + merge_varcopes.inputs.dimension = 't' + + # FLAMEO Node - Estimate model + estimate_model = Node(FLAMEO(), name = 'estimate_model') + estimate_model.inputs.run_mode = 'fe' # Fixed effect + + # Second level (single-subject, mean of all four scans) analyses: Fixed effects analysis. + subject_level_analysis = Workflow( + base_dir = self.directories.working_dir, + name = 'subject_level_analysis') + subject_level_analysis.connect([ + (infosource_sub_level, select_files, [ + ('subject_id', 'subject_id'), + ('contrast_id', 'contrast_id')]), + (select_files, merge_copes, [('cope', 'in_files')]), + (select_files, merge_varcopes, [('varcope', 'in_files')]), + (select_files, estimate_model, [('mask', 'mask_file')]), + (merge_copes, estimate_model, [('merged_file', 'cope_file')]), + (merge_varcopes, estimate_model, [('merged_file', 'var_cope_file')]), + (generate_model, estimate_model, [ + ('design_mat', 'design_file'), + ('design_con', 't_con_file'), + ('design_grp', 'cov_split_file')]), + (estimate_model, data_sink, [ + ('zstats', 'subject_level_analysis.@stats'), + ('tstats', 'subject_level_analysis.@tstats'), + ('copes', 'subject_level_analysis.@copes'), + ('var_copes', 'subject_level_analysis.@varcopes')])]) + + return subject_level_analysis + + def get_subject_level_outputs(self): + """ Return a list of the files generated by the subject level analysis """ + + parameters = { + 'contrast_id' : self.contrast_list, + 'subject_id' : self.subject_list, + 'file' : ['cope1.nii.gz', 'tstat1.nii.gz', 'varcope1.nii.gz', 'zstat1.nii.gz'] + } + parameter_sets = product(*parameters.values()) + template = join( + self.directories.output_dir, + 'subject_level_analysis', '_contrast_id_{contrast_id}_subject_id_{subject_id}','{file}' + ) + + return [template.format(**dict(zip(parameters.keys(), parameter_values)))\ + for parameter_values in parameter_sets] + """ Group level Ordinary least squares. Pooled variance. @@ -527,41 +614,25 @@ def get_run_level_outputs(self): Group level Group effect for each first level contrast for each of the two groups. - Contrast of positive parametric effect of loss, testing for equal range group responses being greater than equal indifference group. + Contrast of positive parametric effect of loss, + testing for equal range group responses being greater than equal indifference group. TFCE pval_computation : Permutation testing implemented in randomise (10,000 permutations). multiple_testing_correction : FWE permutation (10,000 permutations). - comments_analysis : NA """ - def get_subject_level_analysis(self): - """ Return a Nipype workflow describing the subject level analysis part of the pipeline """ - - subject_level_analysis = Workflow( - base_dir = self.directories.working_dir, - name = 'subject_level_analysis' - ) - return subject_level_analysis - - def get_subject_level_outputs(self): - """ Return a list of the files generated by the subject level analysis """ - return ['fake_file'] - - def get_subgroups_contrasts( - copes, varcopes, subject_list: list, participants_file: str - ): + def get_subgroups_contrasts(copes, varcopes, subject_list: list, participants_file: str): """ - This function return the file list containing only the files - belonging to subject in the wanted group. - + Return the file list containing only the files belonging to subject in the wanted group. + Parameters : - copes: original file list selected by select_files node - varcopes: original file list selected by select_files node - subject_ids: list of subject IDs that are analyzed - participants_file: file containing participants characteristics - + Returns : - copes_equal_indifference : a subset of copes corresponding to subjects in the equalIndifference group @@ -620,10 +691,7 @@ def get_subgroups_contrasts( if sub_id[-2][-3:] in subject_list: varcopes_global.append(varcope) - return (copes_equal_indifference, copes_equal_range, - varcopes_equal_indifference, varcopes_equal_range, - equal_indifference_id, equal_range_id, - copes_global, varcopes_global) + return copes_equal_indifference, copes_equal_range, varcopes_equal_indifference, varcopes_equal_range,equal_indifference_id, equal_range_id,copes_global, varcopes_global def get_regressors( equal_range_id: list, @@ -643,6 +711,7 @@ def get_regressors( Returns: - regressors: regressors used to distinguish groups in FSL group analysis """ + # For one sample t-test, creates a dictionary # with a list of the size of the number of participants if method == 'equalRange': @@ -656,33 +725,27 @@ def get_regressors( # Each list contains n_sub values with 0 and 1 depending on the group of the participant # For equalRange_reg list --> participants with a 1 are in the equal range group elif method == 'groupComp': - equal_range_regressors = [ + equalRange_reg = [ 1 for i in range(len(equal_range_id) + len(equal_indifference_id)) ] - equal_indifference_regressors = [ + equalIndifference_reg = [ 0 for i in range(len(equal_range_id) + len(equal_indifference_id)) ] for index, subject_id in enumerate(subject_list): if subject_id in equal_indifference_id: - equal_indifference_regressors[index] = 1 - equal_range_regressors[index] = 0 + equalIndifference_reg[index] = 1 + equalRange_reg[index] = 0 regressors = dict( - equalRange = equal_range_regressors, - equalIndifference = equal_indifference_regressors + equalRange = equalRange_reg, + equalIndifference = equalIndifference_reg ) return regressors def get_group_level_analysis(self): - """ - Return all workflows for the group level analysis. - - Returns; - - a list of nipype.WorkFlow - """ - return None + """ Return all workflows for the group level analysis. """ methods = ['equalRange', 'equalIndifference', 'groupComp'] return [self.get_group_level_analysis_sub_workflow(method) for method in methods] @@ -690,17 +753,14 @@ def get_group_level_analysis(self): def get_group_level_analysis_sub_workflow(self, method): """ Return a workflow for the group level analysis. - + Parameters: - method: one of 'equalRange', 'equalIndifference' or 'groupComp' - + Returns: - group_level_analysis: nipype.WorkFlow """ - # [INFO] The following part stays the same for all preprocessing pipelines - - # Infosource node - iterate over the list of contrasts generated - # by the subject level analysis + # Infosource Node - iterate over the contrasts generated by the subject level analysis info_source = Node( IdentityInterface( fields = ['contrast_id', 'subjects'], @@ -710,15 +770,12 @@ def get_group_level_analysis_sub_workflow(self, method): ) info_source.iterables = [('contrast_id', self.contrast_list)] - # Templates to select files node - # [TODO] Change the name of the files depending on the filenames - # of results of first level analysis - templates = { - 'cope' : join(self.directories.output_dir, + # SelectFiles Node - select necessary files + template = { + 'cope' : join( 'subject_level_analysis', '_contrast_id_{contrast_id}_subject_id_*', 'cope1.nii.gz'), 'varcope' : join( - self.directories.output_dir, 'subject_level_analysis', '_contrast_id_{contrast_id}_subject_id_*', 'varcope1.nii.gz'), 'participants' : join( @@ -726,47 +783,75 @@ def get_group_level_analysis_sub_workflow(self, method): 'participants.tsv') } select_files = Node(SelectFiles(templates), name = 'select_files') - select_files.inputs.base_directory = self.directories.dataset_dir + select_files.inputs.base_directory = self.directories.output_dir select_files.inputs.force_list = True - # Datasink node - to save important files - data_sink = Node( - DataSink(base_directory = self.directories.output_dir), - name = 'data_sink', - ) + # Datasink Node - save important files + data_sink = Node(DataSink(), name = 'data_sink') + data_sink.inputs.base_directory = self.directories.output_dir - subgroups_contrasts = Node(Function( - function = self.get_subgroups_contrasts, - input_names=['copes', 'varcopes', 'subject_ids', 'participants_file'], - output_names=[ - 'copes_equalIndifference', - 'copes_equalRange', - 'varcopes_equalIndifference', - 'varcopes_equalRange', - 'equalIndifference_id', - 'equalRange_id', - 'copes_global', - 'varcopes_global' + # Function Node get_subgroups_contrasts - Get the contrast files for each subgroup + contrasts = Node( + Function( + function = self.get_subgroups_contrasts, + input_names = ['copes', 'varcopes', 'subject_ids', 'participants_file'], + output_names = [ + 'copes_equalIndifference', + 'copes_equalRange', + 'varcopes_equalIndifference', + 'varcopes_equalRange', + 'equalIndifference_id', + 'equalRange_id', + 'copes_global', + 'varcopes_global' ] ), name = 'subgroups_contrasts', ) - regressors = Node(Function( - function = self.get_regressors, - input_names = [ - 'equalRange_id', - 'equalIndifference_id', - 'method', - 'subject_list', + # Function Node get_regressors - Get regressors + regressors = Node( + Function( + function = self.get_regressors, + input_names = [ + 'equalRange_id', + 'equalIndifference_id', + 'method', + 'subject_list', ], - output_names = ['regressors'] + output_names = ['regressors'] ), name = 'regressors', ) regressors.inputs.method = method regressors.inputs.subject_list = self.subject_list + # Merge Node - Merge cope files + merge_copes = Node(Merge(), name = 'merge_copes') + merge_copes.inputs.dimension = 't' + + # Merge Node - Merge cope files + merge_varcopes = Node(Merge(), name = 'merge_varcopes') + merge_varcopes.inputs.dimension = 't' + + # MultipleRegressDesign Node - Specify model + specify_model = Node(MultipleRegressDesign(), name = 'specify_model') + + # FLAMEO Node - Estimate model + estimate_model = Node(FLAMEO(), name = 'estimate_model') + estimate_model.inputs.run_mode = 'ols', # Ordinary least squares + estimate_model.inputs.mask_file = Info.standard_image('MNI152_T1_2mm_brain_mask.nii.gz') + + # Cluster Node - + cluster = MapNode(Cluster( + threshold = 3.1, + out_threshold_file = True + ), + name = 'cluster', + iterfield = ['in_file', 'cope_file'], + synchronize = True + ) + # Compute the number of participants used to do the analysis nb_subjects = len(self.subject_list) @@ -775,27 +860,141 @@ def get_group_level_analysis_sub_workflow(self, method): base_dir = self.directories.working_dir, name = f'group_level_analysis_{method}_nsub_{nb_subjects}' ) - group_level_analysis.connect([ - (info_source, select_files, [('contrast_id', 'contrast_id')]) - ]) + group_level_analysis.connect( + [ + ( + info_source, + select_files, + [('contrast_id', 'contrast_id')], + ), + ( + info_source, + subgroups_contrasts, + [('subject_list', 'subject_ids')], + ), + ( + select_files, + subgroups_contrasts, + [ + ('cope', 'copes'), + ('varcope', 'varcopes'), + ('participants', 'participants_file'), + ], + ), + ( + subgroups_contrasts, + regs, + [ + ('equalRange_id', 'equalRange_id'), + ('equalIndifference_id', 'equalIndifference_id') + ] + ), + ( + regs, + specify_model, + [('regressors', 'regressors')] + ) + ] + ) + - # [INFO] Here we define the contrasts used for the group level analysis, depending on the - # method used. if method in ('equalRange', 'equalIndifference'): contrasts = [('Group', 'T', ['mean'], [1]), ('Group', 'T', ['mean'], [-1])] + if method == 'equalIndifference': + group_level_analysis.connect([ + ( + subgroups_contrasts, + merge_copes, + [('copes_equalIndifference', 'in_files')] + ), + ( + subgroups_contrasts, + merge_varcopes, + [('varcopes_equalIndifference', 'in_files')] + ) + ]) + + elif method == 'equalRange': + group_level_analysis.connect([ + ( + subgroups_contrasts, + merge_copes_3rdlevel, + [('copes_equalRange', 'in_files')] + ), + ( + subgroups_contrasts, + merge_varcopes_3rdlevel, + [('varcopes_equalRange', 'in_files')] + ) + ]) + + elif method == 'groupComp': contrasts = [ ('Eq range vs Eq indiff in loss', 'T', ['Group_{1}', 'Group_{2}'], [1, -1]) ] + group_level_analysis.connect([ + ( + select_files, + merge_copes, + [('cope', 'in_files')] + ), + ( + select_files, + merge_varcopes, + [('varcope', 'in_files')] + ) + ]) + + group_level_analysis.connect([ + ( + merge_copes, + flame, + [('merged_file', 'cope_file')] + ), + ( + merge_varcopes, + flame, + [('merged_file', 'var_cope_file')] + ), + ( + specify_model, + flame, + [ + ('design_mat', 'design_file'), + ('design_con', 't_con_file'), + ('design_grp', 'cov_split_file') + ] + ), + ( + flame, + cluster, + [ + ('zstats', 'in_file'), + ('copes', 'cope_file') + ] + ), + ( + flame, + data_sink, + [ + ('zstats', f"group_level_analysis_{method}_nsub_{nb_subjects}.@zstats"), + ('tstats', f"group_level_analysis_{method}_nsub_{nb_subjects}.@tstats") + ] + ), + ( + cluster, + data_sink, + [('threshold_file', f"group_level_analysis_{method}_nsub_{nb_subjects}.@thresh")] + ) + ]) + + # [INFO] Here we simply return the created workflow return group_level_analysis - def get_group_level_outputs(self): - """ Return a list of the files generated by the group level analysis """ - return ['fake_file'] - def get_hypotheses_outputs(self): """ Return the names of the files used by the team to answer the hypotheses of NARPS. """ From d39fc44a1f9728dacd82b5553016a6dd63158f4c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Tue, 24 Oct 2023 14:06:57 +0200 Subject: [PATCH 055/116] First try of subject level analysis [skip ci] --- narps_open/pipelines/team_08MQ.py | 103 +++++++++++++++--------------- 1 file changed, 51 insertions(+), 52 deletions(-) diff --git a/narps_open/pipelines/team_08MQ.py b/narps_open/pipelines/team_08MQ.py index 059d5d96..b3065a26 100644 --- a/narps_open/pipelines/team_08MQ.py +++ b/narps_open/pipelines/team_08MQ.py @@ -6,7 +6,7 @@ from os.path import join from itertools import product -from nipype import Node, Workflow +from nipype import Node, Workflow, MapNode from nipype.interfaces.utility import IdentityInterface, Function, Merge, Split, Select from nipype.interfaces.io import SelectFiles, DataSink from nipype.interfaces.fsl import ( @@ -15,9 +15,8 @@ FAST, BET, ErodeImage, PrepareFieldmap, MCFLIRT, SliceTimer, Threshold, Info, SUSAN, FLIRT, EpiReg, ApplyXFM, ConvertXFM, - Level1Design, FEATModel, L2Model, FILMGLS - - # , Merge, FLAMEO, Randomise, MultipleRegressDesign + Level1Design, FEATModel, L2Model, FILMGLS, + Merge, FLAMEO, Randomise, MultipleRegressDesign, Cluster ) from nipype.algorithms.confounds import CompCor from nipype.algorithms.modelgen import SpecifyModel @@ -568,7 +567,7 @@ def get_subject_level_analysis(self): base_dir = self.directories.working_dir, name = 'subject_level_analysis') subject_level_analysis.connect([ - (infosource_sub_level, select_files, [ + (info_source, select_files, [ ('subject_id', 'subject_id'), ('contrast_id', 'contrast_id')]), (select_files, merge_copes, [('cope', 'in_files')]), @@ -626,13 +625,13 @@ def get_subject_level_outputs(self): def get_subgroups_contrasts(copes, varcopes, subject_list: list, participants_file: str): """ Return the file list containing only the files belonging to subject in the wanted group. - + Parameters : - copes: original file list selected by select_files node - varcopes: original file list selected by select_files node - subject_ids: list of subject IDs that are analyzed - participants_file: file containing participants characteristics - + Returns : - copes_equal_indifference : a subset of copes corresponding to subjects in the equalIndifference group @@ -753,10 +752,10 @@ def get_group_level_analysis(self): def get_group_level_analysis_sub_workflow(self, method): """ Return a workflow for the group level analysis. - + Parameters: - method: one of 'equalRange', 'equalIndifference' or 'groupComp' - + Returns: - group_level_analysis: nipype.WorkFlow """ @@ -771,7 +770,7 @@ def get_group_level_analysis_sub_workflow(self, method): info_source.iterables = [('contrast_id', self.contrast_list)] # SelectFiles Node - select necessary files - template = { + templates = { 'cope' : join( 'subject_level_analysis', '_contrast_id_{contrast_id}_subject_id_*', 'cope1.nii.gz'), @@ -806,7 +805,7 @@ def get_group_level_analysis_sub_workflow(self, method): 'varcopes_global' ] ), - name = 'subgroups_contrasts', + name = 'contrasts', ) # Function Node get_regressors - Get regressors @@ -839,16 +838,16 @@ def get_group_level_analysis_sub_workflow(self, method): # FLAMEO Node - Estimate model estimate_model = Node(FLAMEO(), name = 'estimate_model') - estimate_model.inputs.run_mode = 'ols', # Ordinary least squares + estimate_model.inputs.run_mode = 'ols' # Ordinary least squares estimate_model.inputs.mask_file = Info.standard_image('MNI152_T1_2mm_brain_mask.nii.gz') - # Cluster Node - + # Cluster Node - cluster = MapNode(Cluster( - threshold = 3.1, + threshold = 3.1, out_threshold_file = True - ), - name = 'cluster', - iterfield = ['in_file', 'cope_file'], + ), + name = 'cluster', + iterfield = ['in_file', 'cope_file'], synchronize = True ) @@ -869,12 +868,12 @@ def get_group_level_analysis_sub_workflow(self, method): ), ( info_source, - subgroups_contrasts, + contrasts, [('subject_list', 'subject_ids')], ), ( select_files, - subgroups_contrasts, + contrasts, [ ('cope', 'copes'), ('varcope', 'varcopes'), @@ -882,16 +881,16 @@ def get_group_level_analysis_sub_workflow(self, method): ], ), ( - subgroups_contrasts, - regs, + contrasts, + regressors, [ ('equalRange_id', 'equalRange_id'), ('equalIndifference_id', 'equalIndifference_id') ] ), ( - regs, - specify_model, + regressors, + specify_model, [('regressors', 'regressors')] ) ] @@ -904,13 +903,13 @@ def get_group_level_analysis_sub_workflow(self, method): if method == 'equalIndifference': group_level_analysis.connect([ ( - subgroups_contrasts, - merge_copes, + contrasts, + merge_copes, [('copes_equalIndifference', 'in_files')] - ), + ), ( - subgroups_contrasts, - merge_varcopes, + contrasts, + merge_varcopes, [('varcopes_equalIndifference', 'in_files')] ) ]) @@ -918,13 +917,13 @@ def get_group_level_analysis_sub_workflow(self, method): elif method == 'equalRange': group_level_analysis.connect([ ( - subgroups_contrasts, - merge_copes_3rdlevel, + contrasts, + merge_copes, [('copes_equalRange', 'in_files')] ), ( - subgroups_contrasts, - merge_varcopes_3rdlevel, + contrasts, + merge_varcopes, [('varcopes_equalRange', 'in_files')] ) ]) @@ -937,56 +936,56 @@ def get_group_level_analysis_sub_workflow(self, method): group_level_analysis.connect([ ( - select_files, - merge_copes, + select_files, + merge_copes, [('cope', 'in_files')] ), ( - select_files, - merge_varcopes, + select_files, + merge_varcopes, [('varcope', 'in_files')] ) ]) group_level_analysis.connect([ ( - merge_copes, - flame, + merge_copes, + estimate_model, [('merged_file', 'cope_file')] ), ( - merge_varcopes, - flame, + merge_varcopes, + estimate_model, [('merged_file', 'var_cope_file')] ), ( - specify_model, - flame, + specify_model, + estimate_model, [ ('design_mat', 'design_file'), - ('design_con', 't_con_file'), + ('design_con', 't_con_file'), ('design_grp', 'cov_split_file') ] ), ( - flame, - cluster, + estimate_model, + cluster, [ - ('zstats', 'in_file'), + ('zstats', 'in_file'), ('copes', 'cope_file') ] ), ( - flame, - data_sink, + estimate_model, + data_sink, [ - ('zstats', f"group_level_analysis_{method}_nsub_{nb_subjects}.@zstats"), + ('zstats', f"group_level_analysis_{method}_nsub_{nb_subjects}.@zstats"), ('tstats', f"group_level_analysis_{method}_nsub_{nb_subjects}.@tstats") ] - ), + ), ( - cluster, - data_sink, + cluster, + data_sink, [('threshold_file', f"group_level_analysis_{method}_nsub_{nb_subjects}.@thresh")] ) ]) From 98abcfcce5c0c5cf69bf3b42742ad5161c4f92ba Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Tue, 24 Oct 2023 14:14:56 +0200 Subject: [PATCH 056/116] FSL's Merge disambiguataion [skip ci] --- narps_open/pipelines/team_08MQ.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/narps_open/pipelines/team_08MQ.py b/narps_open/pipelines/team_08MQ.py index b3065a26..2b0ce8c3 100644 --- a/narps_open/pipelines/team_08MQ.py +++ b/narps_open/pipelines/team_08MQ.py @@ -16,8 +16,9 @@ Threshold, Info, SUSAN, FLIRT, EpiReg, ApplyXFM, ConvertXFM, Level1Design, FEATModel, L2Model, FILMGLS, - Merge, FLAMEO, Randomise, MultipleRegressDesign, Cluster + FLAMEO, Randomise, MultipleRegressDesign, Cluster ) +from nipype.interfaces.fsl.utils import Merge as MergeImages from nipype.algorithms.confounds import CompCor from nipype.algorithms.modelgen import SpecifyModel from nipype.interfaces.ants import Registration, WarpTimeSeriesImageMultiTransform @@ -551,11 +552,11 @@ def get_subject_level_analysis(self): generate_model.inputs.num_copes = len(self.run_list) # Merge Node - Merge copes files for each subject - merge_copes = Node(Merge(), name = 'merge_copes') + merge_copes = Node(MergeImages(), name = 'merge_copes') merge_copes.inputs.dimension = 't' # Merge Node - Merge varcopes files for each subject - merge_varcopes = Node(Merge(), name = 'merge_varcopes') + merge_varcopes = Node(MergeImages(), name = 'merge_varcopes') merge_varcopes.inputs.dimension = 't' # FLAMEO Node - Estimate model @@ -826,11 +827,11 @@ def get_group_level_analysis_sub_workflow(self, method): regressors.inputs.subject_list = self.subject_list # Merge Node - Merge cope files - merge_copes = Node(Merge(), name = 'merge_copes') + merge_copes = Node(MergeImages(), name = 'merge_copes') merge_copes.inputs.dimension = 't' # Merge Node - Merge cope files - merge_varcopes = Node(Merge(), name = 'merge_varcopes') + merge_varcopes = Node(MergeImages(), name = 'merge_varcopes') merge_varcopes.inputs.dimension = 't' # MultipleRegressDesign Node - Specify model From 77515c08afb6b85a7715774bc696cdb6f0382560 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Tue, 24 Oct 2023 14:37:00 +0200 Subject: [PATCH 057/116] Use MNI masks [skip ci] --- narps_open/pipelines/team_08MQ.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/narps_open/pipelines/team_08MQ.py b/narps_open/pipelines/team_08MQ.py index 2b0ce8c3..d8ebc0ec 100644 --- a/narps_open/pipelines/team_08MQ.py +++ b/narps_open/pipelines/team_08MQ.py @@ -536,9 +536,7 @@ def get_subject_level_analysis(self): 'cope' : join('run_level_analysis', '_run_id_*_subject_id_{subject_id}', 'results', 'cope{contrast_id}.nii.gz'), 'varcope' : join('run_level_analysis', '_run_id_*_subject_id_{subject_id}', - 'results', 'varcope{contrast_id}.nii.gz'), - 'mask': join('preprocessing', '_run_id_*_subject_id_{subject_id}', - '') + 'results', 'varcope{contrast_id}.nii.gz') } select_files = Node(SelectFiles(templates), name = 'select_files') select_files.inputs.base_directory = self.directories.output_dir @@ -562,6 +560,7 @@ def get_subject_level_analysis(self): # FLAMEO Node - Estimate model estimate_model = Node(FLAMEO(), name = 'estimate_model') estimate_model.inputs.run_mode = 'fe' # Fixed effect + estimate_model.inputs.mask_file = Info.standard_image('MNI152_T1_2mm_brain_mask.nii.gz') # Second level (single-subject, mean of all four scans) analyses: Fixed effects analysis. subject_level_analysis = Workflow( @@ -573,7 +572,6 @@ def get_subject_level_analysis(self): ('contrast_id', 'contrast_id')]), (select_files, merge_copes, [('cope', 'in_files')]), (select_files, merge_varcopes, [('varcope', 'in_files')]), - (select_files, estimate_model, [('mask', 'mask_file')]), (merge_copes, estimate_model, [('merged_file', 'cope_file')]), (merge_varcopes, estimate_model, [('merged_file', 'var_cope_file')]), (generate_model, estimate_model, [ From 8d907cdc99c911ea500e9ac5f7904582dbb11896 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Tue, 24 Oct 2023 17:14:18 +0200 Subject: [PATCH 058/116] Preprocessing sieable files removing --- narps_open/pipelines/team_08MQ.py | 280 +++++++++++++++--------------- 1 file changed, 136 insertions(+), 144 deletions(-) diff --git a/narps_open/pipelines/team_08MQ.py b/narps_open/pipelines/team_08MQ.py index d8ebc0ec..a367b2fe 100644 --- a/narps_open/pipelines/team_08MQ.py +++ b/narps_open/pipelines/team_08MQ.py @@ -6,22 +6,24 @@ from os.path import join from itertools import product -from nipype import Node, Workflow, MapNode +from nipype import Node, Workflow from nipype.interfaces.utility import IdentityInterface, Function, Merge, Split, Select from nipype.interfaces.io import SelectFiles, DataSink from nipype.interfaces.fsl import ( + # General usage FSLCommand, - + # Preprocessing FAST, BET, ErodeImage, PrepareFieldmap, MCFLIRT, SliceTimer, - Threshold, Info, SUSAN, FLIRT, EpiReg, ApplyXFM, ConvertXFM, - + Threshold, Info, SUSAN, FLIRT, ApplyXFM, ConvertXFM, + # Analyses Level1Design, FEATModel, L2Model, FILMGLS, - FLAMEO, Randomise, MultipleRegressDesign, Cluster + FLAMEO, Randomise, MultipleRegressDesign ) from nipype.interfaces.fsl.utils import Merge as MergeImages from nipype.algorithms.confounds import CompCor from nipype.algorithms.modelgen import SpecifyModel from nipype.interfaces.ants import Registration, WarpTimeSeriesImageMultiTransform + from narps_open.pipelines import Pipeline from narps_open.data.task import TaskInformation @@ -37,6 +39,29 @@ def __init__(self): self.team_id = '08MQ' self.contrast_list = ['1', '2', '3'] + def remove_files(_, files): + """ + This method is used in a Function node to fully remove + files generated by a Node, once they aren't needed anymore. + + Parameters: + - _: Node input only used for triggering the Node + - files: str or list, a single filename or a list of filenames to remove + """ + from os import remove + + if isinstance(files, str): + files = [files] + + try: + for file in files: + remove(file) + except OSError as error: + print(error) + else: + print('The following files were successfully deleted.') + print(files) + def get_preprocessing(self): """ Return a Nipype workflow describing the prerpocessing part of the pipeline """ @@ -205,6 +230,31 @@ def get_preprocessing(self): compute_confounds.inputs.merge_method = 'union' compute_confounds.inputs.repetition_time = TaskInformation()['RepetitionTime'] + # Function Nodes remove_files - Remove sizeable files once they aren't needed + remove_func_0 = Node(Function( + function = self.remove_files, + input_names = ['_', 'files'], + output_names = [] + ), name = 'remove_func_0') + + remove_func_1 = Node(Function( + function = self.remove_files, + input_names = ['_', 'files'], + output_names = [] + ), name = 'remove_func_1') + + remove_func_2 = Node(Function( + function = self.remove_files, + input_names = ['_', 'files'], + output_names = [] + ), name = 'remove_func_2') + + remove_func_3 = Node(Function( + function = self.remove_files, + input_names = ['_', 'files'], + output_names = [] + ), name = 'remove_func_3') + preprocessing = Workflow(base_dir = self.directories.working_dir, name = 'preprocessing') preprocessing.connect([ # Inputs @@ -253,12 +303,24 @@ def get_preprocessing(self): (normalization_anat, reverse_transform_order, [('forward_transforms', 'inlist')]), (reverse_transform_order, alignment_func_to_mni, [('out', 'transformation_series')]), (merge_masks, compute_confounds, [('out', 'mask_files')]), # Masks are in the func space - (slice_time_correction, compute_confounds, [('slice_time_corrected_file', 'realigned_file')]), + (slice_time_correction, compute_confounds, [ + ('slice_time_corrected_file', 'realigned_file') + ]), # Outputs of preprocessing (motion_correction, data_sink, [('par_file', 'preprocessing.@par_file')]), (compute_confounds, data_sink, [('components_file', 'preprocessing.@components_file')]), - (alignment_func_to_mni, data_sink, [('output_image', 'preprocessing.@output_image')]) + (alignment_func_to_mni, data_sink, [('output_image', 'preprocessing.@output_image')]), + + # File removals + (motion_correction, remove_func_0, [('out_file', 'files')]), + (slice_time_correction, remove_func_0, [('slice_time_corrected_file', '_')]), + (slice_time_correction, remove_func_1, [('slice_time_corrected_file', 'files')]), + (smoothing, remove_func_1, [('smoothed_file', '_')]), + (smoothing, remove_func_2, [('smoothed_file', 'files')]), + (alignment_func_to_anat, remove_func_2, [('out_file', '_')]), + (alignment_func_to_anat, remove_func_3, [('out_file', 'files')]), + (alignment_func_to_mni, remove_func_3, [('out', '_')]) ]) return preprocessing @@ -840,15 +902,14 @@ def get_group_level_analysis_sub_workflow(self, method): estimate_model.inputs.run_mode = 'ols' # Ordinary least squares estimate_model.inputs.mask_file = Info.standard_image('MNI152_T1_2mm_brain_mask.nii.gz') - # Cluster Node - - cluster = MapNode(Cluster( - threshold = 3.1, - out_threshold_file = True - ), - name = 'cluster', - iterfield = ['in_file', 'cope_file'], - synchronize = True - ) + # Randomise Node - + randomise = Node(Randomise(), name = 'randomise') + randomise.inputs.num_perm = 10000 + randomise.inputs.tfce = True + randomise.inputs.vox_p_values = True + randomise.inputs.c_thresh = 0.05 + randomise.inputs.tfce_E = 0.01 + randomise.inputs.mask = Info.standard_image('MNI152_T1_2mm_brain_mask.nii.gz') # Compute the number of participants used to do the analysis nb_subjects = len(self.subject_list) @@ -858,139 +919,70 @@ def get_group_level_analysis_sub_workflow(self, method): base_dir = self.directories.working_dir, name = f'group_level_analysis_{method}_nsub_{nb_subjects}' ) - group_level_analysis.connect( - [ - ( - info_source, - select_files, - [('contrast_id', 'contrast_id')], - ), - ( - info_source, - contrasts, - [('subject_list', 'subject_ids')], - ), - ( - select_files, - contrasts, - [ - ('cope', 'copes'), - ('varcope', 'varcopes'), - ('participants', 'participants_file'), - ], - ), - ( - contrasts, - regressors, - [ - ('equalRange_id', 'equalRange_id'), - ('equalIndifference_id', 'equalIndifference_id') - ] - ), - ( - regressors, - specify_model, - [('regressors', 'regressors')] - ) - ] - ) - + group_level_analysis.connect([ + (info_source, select_files, [('contrast_id', 'contrast_id')]), + (info_source, contrasts, [('subject_list', 'subject_ids')]), + (select_files, contrasts, [ + ('cope', 'copes'), + ('varcope', 'varcopes'), + ('participants', 'participants_file'), + ]), + (contrasts, regressors, [ + ('equalRange_id', 'equalRange_id'), + ('equalIndifference_id', 'equalIndifference_id') + ]), + (regressors, specify_model, [('regressors', 'regressors')]) + ]) if method in ('equalRange', 'equalIndifference'): contrasts = [('Group', 'T', ['mean'], [1]), ('Group', 'T', ['mean'], [-1])] if method == 'equalIndifference': group_level_analysis.connect([ - ( - contrasts, - merge_copes, - [('copes_equalIndifference', 'in_files')] - ), - ( - contrasts, - merge_varcopes, - [('varcopes_equalIndifference', 'in_files')] - ) + (contrasts, merge_copes, [('copes_equalIndifference', 'in_files')]), + (contrasts, merge_varcopes, [('varcopes_equalIndifference', 'in_files')]) ]) elif method == 'equalRange': group_level_analysis.connect([ - ( - contrasts, - merge_copes, - [('copes_equalRange', 'in_files')] - ), - ( - contrasts, - merge_varcopes, - [('varcopes_equalRange', 'in_files')] - ) + (contrasts, merge_copes, [('copes_equalRange', 'in_files')]), + (contrasts, merge_varcopes, [('varcopes_equalRange', 'in_files')]) ]) - elif method == 'groupComp': contrasts = [ ('Eq range vs Eq indiff in loss', 'T', ['Group_{1}', 'Group_{2}'], [1, -1]) ] group_level_analysis.connect([ - ( - select_files, - merge_copes, - [('cope', 'in_files')] - ), - ( - select_files, - merge_varcopes, - [('varcope', 'in_files')] - ) + (select_files, merge_copes, [('cope', 'in_files')]), + (select_files, merge_varcopes, [('varcope', 'in_files')]) ]) group_level_analysis.connect([ - ( - merge_copes, - estimate_model, - [('merged_file', 'cope_file')] - ), - ( - merge_varcopes, - estimate_model, - [('merged_file', 'var_cope_file')] - ), - ( - specify_model, - estimate_model, - [ - ('design_mat', 'design_file'), - ('design_con', 't_con_file'), - ('design_grp', 'cov_split_file') - ] - ), - ( - estimate_model, - cluster, - [ - ('zstats', 'in_file'), - ('copes', 'cope_file') - ] - ), - ( - estimate_model, - data_sink, - [ - ('zstats', f"group_level_analysis_{method}_nsub_{nb_subjects}.@zstats"), - ('tstats', f"group_level_analysis_{method}_nsub_{nb_subjects}.@tstats") - ] - ), - ( - cluster, - data_sink, - [('threshold_file', f"group_level_analysis_{method}_nsub_{nb_subjects}.@thresh")] - ) + (merge_copes, estimate_model, [('merged_file', 'cope_file')]), + (merge_varcopes, estimate_model, [('merged_file', 'var_cope_file')]), + (specify_model, estimate_model, [ + ('design_mat', 'design_file'), + ('design_con', 't_con_file'), + ('design_grp', 'cov_split_file') + ]), + (merge_copes, randomise, [('merged_file', 'in_file')]), + (specify_model, randomise, [ + ('design_mat', 'design_mat'), + ('design_con', 'tcon') + ]), + (randomise, data_sink, [ + ('t_corrected_p_files', + f'group_level_analysis_{method}_nsub_{nb_subjects}.@tcorpfile'), + ('tstat_files', f'group_level_analysis_{method}_nsub_{nb_subjects}.@tstat') + ]), + (estimate_model, data_sink, [ + ('zstats', f'group_level_analysis_{method}_nsub_{nb_subjects}.@zstats'), + ('tstats', f'group_level_analysis_{method}_nsub_{nb_subjects}.@tstats') + ]) ]) - - # [INFO] Here we simply return the created workflow return group_level_analysis def get_hypotheses_outputs(self): @@ -998,41 +990,41 @@ def get_hypotheses_outputs(self): nb_sub = len(self.subject_list) files = [ - join(f'l3_analysis_equalIndifference_nsub_{nb_sub}', + join(f'group_level_analysis_equalIndifference_nsub_{nb_sub}', '_contrast_id_pgain', 'randomise_tfce_corrp_tstat1.nii.gz'), - join(f'l3_analysis_equalIndifference_nsub_{nb_sub}', + join(f'group_level_analysis_equalIndifference_nsub_{nb_sub}', '_contrast_id_pgain', 'zstat1.nii.gz'), - join(f'l3_analysis_equalRange_nsub_{nb_sub}', + join(f'group_level_analysis_equalRange_nsub_{nb_sub}', '_contrast_id_pgain', 'randomise_tfce_corrp_tstat1.nii.gz'), - join(f'l3_analysis_equalRange_nsub_{nb_sub}', + join(f'group_level_analysis_equalRange_nsub_{nb_sub}', '_contrast_id_pgain', 'zstat1.nii.gz'), - join(f'l3_analysis_equalIndifference_nsub_{nb_sub}', + join(f'group_level_analysis_equalIndifference_nsub_{nb_sub}', '_contrast_id_pgain', 'randomise_tfce_corrp_tstat1.nii.gz'), - join(f'l3_analysis_equalIndifference_nsub_{nb_sub}', + join(f'group_level_analysis_equalIndifference_nsub_{nb_sub}', '_contrast_id_pgain', 'zstat1.nii.gz'), - join(f'l3_analysis_equalRange_nsub_{nb_sub}', + join(f'group_level_analysis_equalRange_nsub_{nb_sub}', '_contrast_id_pgain', 'randomise_tfce_corrp_tstat1.nii.gz'), - join(f'l3_analysis_equalRange_nsub_{nb_sub}', + join(f'group_level_analysis_equalRange_nsub_{nb_sub}', '_contrast_id_pgain', 'zstat1.nii.gz'), - join(f'l3_analysis_equalIndifference_nsub_{nb_sub}', + join(f'group_level_analysis_equalIndifference_nsub_{nb_sub}', '_contrast_id_ploss', 'randomise_tfce_corrp_tstat2.nii.gz'), - join(f'l3_analysis_equalIndifference_nsub_{nb_sub}', + join(f'group_level_analysis_equalIndifference_nsub_{nb_sub}', '_contrast_id_ploss', 'zstat2.nii.gz'), - join(f'l3_analysis_equalRange_nsub_{nb_sub}', + join(f'group_level_analysis_equalRange_nsub_{nb_sub}', '_contrast_id_ploss', 'randomise_tfce_corrp_tstat2.nii.gz'), - join(f'l3_analysis_equalRange_nsub_{nb_sub}', + join(f'group_level_analysis_equalRange_nsub_{nb_sub}', '_contrast_id_ploss', 'zstat2.nii.gz'), - join(f'l3_analysis_equalIndifference_nsub_{nb_sub}', + join(f'group_level_analysis_equalIndifference_nsub_{nb_sub}', '_contrast_id_ploss', 'randomise_tfce_corrp_tstat1.nii.gz'), - join(f'l3_analysis_equalIndifference_nsub_{nb_sub}', + join(f'group_level_analysis_equalIndifference_nsub_{nb_sub}', '_contrast_id_ploss', 'zstat1.nii.gz'), - join(f'l3_analysis_equalRange_nsub_{nb_sub}', + join(f'group_level_analysis_equalRange_nsub_{nb_sub}', '_contrast_id_ploss', 'randomise_tfce_corrp_tstat1.nii.gz'), - join(f'l3_analysis_equalRange_nsub_{nb_sub}', + join(f'group_level_analysis_equalRange_nsub_{nb_sub}', '_contrast_id_ploss', 'zstat1.nii.gz'), - join(f'l3_analysis_groupComp_nsub_{nb_sub}', + join(f'group_level_analysis_groupComp_nsub_{nb_sub}', '_contrast_id_ploss', 'randomise_tfce_corrp_tstat1.nii.gz'), - join(f'l3_analysis_groupComp_nsub_{nb_sub}', + join(f'group_level_analysis_groupComp_nsub_{nb_sub}', '_contrast_id_ploss', 'zstat1.nii.gz') ] return [join(self.directories.output_dir, f) for f in files] From 1ea7e12e8acd581627ff7d3bd8990833c3ee75eb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Tue, 24 Oct 2023 17:40:54 +0200 Subject: [PATCH 059/116] Bug with contrasts Node naming --- narps_open/pipelines/team_08MQ.py | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/narps_open/pipelines/team_08MQ.py b/narps_open/pipelines/team_08MQ.py index a367b2fe..29fb0301 100644 --- a/narps_open/pipelines/team_08MQ.py +++ b/narps_open/pipelines/team_08MQ.py @@ -320,7 +320,7 @@ def get_preprocessing(self): (smoothing, remove_func_2, [('smoothed_file', 'files')]), (alignment_func_to_anat, remove_func_2, [('out_file', '_')]), (alignment_func_to_anat, remove_func_3, [('out_file', 'files')]), - (alignment_func_to_mni, remove_func_3, [('out', '_')]) + (alignment_func_to_mni, remove_func_3, [('output_image', '_')]) ]) return preprocessing @@ -851,7 +851,7 @@ def get_group_level_analysis_sub_workflow(self, method): data_sink.inputs.base_directory = self.directories.output_dir # Function Node get_subgroups_contrasts - Get the contrast files for each subgroup - contrasts = Node( + get_contrasts = Node( Function( function = self.get_subgroups_contrasts, input_names = ['copes', 'varcopes', 'subject_ids', 'participants_file'], @@ -866,7 +866,7 @@ def get_group_level_analysis_sub_workflow(self, method): 'varcopes_global' ] ), - name = 'contrasts', + name = 'get_contrasts', ) # Function Node get_regressors - Get regressors @@ -921,13 +921,13 @@ def get_group_level_analysis_sub_workflow(self, method): ) group_level_analysis.connect([ (info_source, select_files, [('contrast_id', 'contrast_id')]), - (info_source, contrasts, [('subject_list', 'subject_ids')]), - (select_files, contrasts, [ + (info_source, get_contrasts, [('subject_list', 'subject_ids')]), + (select_files, get_contrasts, [ ('cope', 'copes'), ('varcope', 'varcopes'), ('participants', 'participants_file'), ]), - (contrasts, regressors, [ + (get_contrasts, regressors, [ ('equalRange_id', 'equalRange_id'), ('equalIndifference_id', 'equalIndifference_id') ]), @@ -939,14 +939,14 @@ def get_group_level_analysis_sub_workflow(self, method): if method == 'equalIndifference': group_level_analysis.connect([ - (contrasts, merge_copes, [('copes_equalIndifference', 'in_files')]), - (contrasts, merge_varcopes, [('varcopes_equalIndifference', 'in_files')]) + (get_contrasts, merge_copes, [('copes_equalIndifference', 'in_files')]), + (get_contrasts, merge_varcopes, [('varcopes_equalIndifference', 'in_files')]) ]) elif method == 'equalRange': group_level_analysis.connect([ - (contrasts, merge_copes, [('copes_equalRange', 'in_files')]), - (contrasts, merge_varcopes, [('varcopes_equalRange', 'in_files')]) + (get_contrasts, merge_copes, [('copes_equalRange', 'in_files')]), + (get_contrasts, merge_varcopes, [('varcopes_equalRange', 'in_files')]) ]) elif method == 'groupComp': From e79239f858dd0ec828ce4eca5bf5789b4561fb28 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Wed, 25 Oct 2023 10:10:09 +0200 Subject: [PATCH 060/116] Base directories --- narps_open/pipelines/team_08MQ.py | 24 ++++++++++-------------- 1 file changed, 10 insertions(+), 14 deletions(-) diff --git a/narps_open/pipelines/team_08MQ.py b/narps_open/pipelines/team_08MQ.py index 29fb0301..c0d9ef9e 100644 --- a/narps_open/pipelines/team_08MQ.py +++ b/narps_open/pipelines/team_08MQ.py @@ -210,7 +210,7 @@ def get_preprocessing(self): alignment_func_to_anat = Node(ApplyXFM(), name = 'alignment_func_to_anat') alignment_func_to_anat.inputs.apply_xfm = True - # Select Node - Change the order of transforms comming from ANTs Registration + # Select Node - Change the order of transforms coming from ANTs Registration reverse_transform_order = Node(Select(), name = 'reverse_transform_order') reverse_transform_order.inputs.index = [1, 0] @@ -595,13 +595,13 @@ def get_subject_level_analysis(self): # SelectFiles Node - select necessary files templates = { - 'cope' : join('run_level_analysis', '_run_id_*_subject_id_{subject_id}', - 'results', 'cope{contrast_id}.nii.gz'), - 'varcope' : join('run_level_analysis', '_run_id_*_subject_id_{subject_id}', - 'results', 'varcope{contrast_id}.nii.gz') + 'cope' : join(self.directories.output_dir, 'run_level_analysis', + '_run_id_*_subject_id_{subject_id}', 'results', 'cope{contrast_id}.nii.gz'), + 'varcope' : join(self.directories.output_dir, 'run_level_analysis', + '_run_id_*_subject_id_{subject_id}', 'results', 'varcope{contrast_id}.nii.gz') } select_files = Node(SelectFiles(templates), name = 'select_files') - select_files.inputs.base_directory = self.directories.output_dir + select_files.inputs.base_directory = self.directories.dataset_dir # DataSink Node - store the wanted results in the wanted directory data_sink = Node(DataSink(), name = 'data_sink') @@ -832,18 +832,14 @@ def get_group_level_analysis_sub_workflow(self, method): # SelectFiles Node - select necessary files templates = { - 'cope' : join( - 'subject_level_analysis', + 'cope' : join(self.directories.output_dir, 'subject_level_analysis', '_contrast_id_{contrast_id}_subject_id_*', 'cope1.nii.gz'), - 'varcope' : join( - 'subject_level_analysis', + 'varcope' : join(self.directories.output_dir, 'subject_level_analysis', '_contrast_id_{contrast_id}_subject_id_*', 'varcope1.nii.gz'), - 'participants' : join( - self.directories.dataset_dir, - 'participants.tsv') + 'participants' : 'participants.tsv' } select_files = Node(SelectFiles(templates), name = 'select_files') - select_files.inputs.base_directory = self.directories.output_dir + select_files.inputs.base_directory = self.directories.dataset_dir select_files.inputs.force_list = True # Datasink Node - save important files From 4dad12914bdabeefa6b87d2c8d1e7db13c4dba96 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Wed, 25 Oct 2023 10:58:36 +0200 Subject: [PATCH 061/116] Remove alignment_func_to_mni's output [skip ci] --- narps_open/pipelines/team_08MQ.py | 8 ++++++++ tests/pipelines/test_team_08MQ.py | 18 +++++++++--------- 2 files changed, 17 insertions(+), 9 deletions(-) diff --git a/narps_open/pipelines/team_08MQ.py b/narps_open/pipelines/team_08MQ.py index c0d9ef9e..c66b8440 100644 --- a/narps_open/pipelines/team_08MQ.py +++ b/narps_open/pipelines/team_08MQ.py @@ -255,6 +255,12 @@ def get_preprocessing(self): output_names = [] ), name = 'remove_func_3') + remove_func_4 = Node(Function( + function = self.remove_files, + input_names = ['_', 'files'], + output_names = [] + ), name = 'remove_func_4') + preprocessing = Workflow(base_dir = self.directories.working_dir, name = 'preprocessing') preprocessing.connect([ # Inputs @@ -321,6 +327,8 @@ def get_preprocessing(self): (alignment_func_to_anat, remove_func_2, [('out_file', '_')]), (alignment_func_to_anat, remove_func_3, [('out_file', 'files')]), (alignment_func_to_mni, remove_func_3, [('output_image', '_')]) + (alignment_func_to_mni, remove_func_4, [('output_image', 'files')]), + (data_sink, remove_func_4, [('out_file', '_')]) ]) return preprocessing diff --git a/tests/pipelines/test_team_08MQ.py b/tests/pipelines/test_team_08MQ.py index d062b08c..d3f3d551 100644 --- a/tests/pipelines/test_team_08MQ.py +++ b/tests/pipelines/test_team_08MQ.py @@ -32,7 +32,7 @@ def test_create(): # 2 - check workflows assert isinstance(pipeline.get_preprocessing(), Workflow) - assert pipeline.get_run_level_analysis() is None + assert isinstance(pipeline.get_run_level_analysis(), Workflow) assert isinstance(pipeline.get_subject_level_analysis(), Workflow) group_level = pipeline.get_group_level_analysis() @@ -47,19 +47,19 @@ def test_outputs(): pipeline = PipelineTeam08MQ() # 1 - 1 subject outputs pipeline.subject_list = ['001'] - assert len(pipeline.get_preprocessing_outputs()) == 0 - assert len(pipeline.get_run_level_outputs()) == 0 - assert len(pipeline.get_subject_level_outputs()) == 0 + assert len(pipeline.get_preprocessing_outputs()) == 3*4 + assert len(pipeline.get_run_level_outputs()) == 8+4*3*4 + assert len(pipeline.get_subject_level_outputs()) == 4*3 assert len(pipeline.get_group_level_outputs()) == 0 - assert len(pipeline.get_hypotheses_outputs()) == 0 + assert len(pipeline.get_hypotheses_outputs()) == 18 # 2 - 4 subjects outputs pipeline.subject_list = ['001', '002', '003', '004'] - assert len(pipeline.get_preprocessing_outputs()) == 0 - assert len(pipeline.get_run_level_outputs()) == 0 - assert len(pipeline.get_subject_level_outputs()) == 0 + assert len(pipeline.get_preprocessing_outputs()) == 3*4*4 + assert len(pipeline.get_run_level_outputs()) == (8+4*3*4)*4 + assert len(pipeline.get_subject_level_outputs()) == 4*3*4 assert len(pipeline.get_group_level_outputs()) == 0 - assert len(pipeline.get_hypotheses_outputs()) == 0 + assert len(pipeline.get_hypotheses_outputs()) == 18 @staticmethod @mark.pipeline_test From 26ddc3a02831a4209caa05a34a31cb11969c4178 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Wed, 25 Oct 2023 13:33:26 +0200 Subject: [PATCH 062/116] Autoremove intermediate results [skip ci] --- narps_open/pipelines/team_08MQ.py | 47 +++---------------------------- 1 file changed, 4 insertions(+), 43 deletions(-) diff --git a/narps_open/pipelines/team_08MQ.py b/narps_open/pipelines/team_08MQ.py index c66b8440..907a1a5d 100644 --- a/narps_open/pipelines/team_08MQ.py +++ b/narps_open/pipelines/team_08MQ.py @@ -230,38 +230,11 @@ def get_preprocessing(self): compute_confounds.inputs.merge_method = 'union' compute_confounds.inputs.repetition_time = TaskInformation()['RepetitionTime'] - # Function Nodes remove_files - Remove sizeable files once they aren't needed - remove_func_0 = Node(Function( - function = self.remove_files, - input_names = ['_', 'files'], - output_names = [] - ), name = 'remove_func_0') - - remove_func_1 = Node(Function( - function = self.remove_files, - input_names = ['_', 'files'], - output_names = [] - ), name = 'remove_func_1') - - remove_func_2 = Node(Function( - function = self.remove_files, - input_names = ['_', 'files'], - output_names = [] - ), name = 'remove_func_2') - - remove_func_3 = Node(Function( - function = self.remove_files, - input_names = ['_', 'files'], - output_names = [] - ), name = 'remove_func_3') - - remove_func_4 = Node(Function( - function = self.remove_files, - input_names = ['_', 'files'], - output_names = [] - ), name = 'remove_func_4') - preprocessing = Workflow(base_dir = self.directories.working_dir, name = 'preprocessing') + preprocessing.config['execution'] = { + 'remove_node_directories': 'True', + 'stop_on_first_crash': 'True' + } preprocessing.connect([ # Inputs (info_source, select_files, [('subject_id', 'subject_id'), ('run_id', 'run_id')]), @@ -317,18 +290,6 @@ def get_preprocessing(self): (motion_correction, data_sink, [('par_file', 'preprocessing.@par_file')]), (compute_confounds, data_sink, [('components_file', 'preprocessing.@components_file')]), (alignment_func_to_mni, data_sink, [('output_image', 'preprocessing.@output_image')]), - - # File removals - (motion_correction, remove_func_0, [('out_file', 'files')]), - (slice_time_correction, remove_func_0, [('slice_time_corrected_file', '_')]), - (slice_time_correction, remove_func_1, [('slice_time_corrected_file', 'files')]), - (smoothing, remove_func_1, [('smoothed_file', '_')]), - (smoothing, remove_func_2, [('smoothed_file', 'files')]), - (alignment_func_to_anat, remove_func_2, [('out_file', '_')]), - (alignment_func_to_anat, remove_func_3, [('out_file', 'files')]), - (alignment_func_to_mni, remove_func_3, [('output_image', '_')]) - (alignment_func_to_mni, remove_func_4, [('output_image', 'files')]), - (data_sink, remove_func_4, [('out_file', '_')]) ]) return preprocessing From bedc2b54766fb52d4fec6a978428bcb36cb241f0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Wed, 25 Oct 2023 14:17:30 +0200 Subject: [PATCH 063/116] Autoremove intermediate results [skip ci] --- narps_open/pipelines/team_08MQ.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/narps_open/pipelines/team_08MQ.py b/narps_open/pipelines/team_08MQ.py index 907a1a5d..1789dc7f 100644 --- a/narps_open/pipelines/team_08MQ.py +++ b/narps_open/pipelines/team_08MQ.py @@ -233,7 +233,8 @@ def get_preprocessing(self): preprocessing = Workflow(base_dir = self.directories.working_dir, name = 'preprocessing') preprocessing.config['execution'] = { 'remove_node_directories': 'True', - 'stop_on_first_crash': 'True' + 'stop_on_first_crash': 'True', + 'remove_unnecessary_outputs': 'False' } preprocessing.connect([ # Inputs From fa2ebd76f79dd9fb4fe9d3b48746dc85ff8db5f0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Wed, 25 Oct 2023 15:28:23 +0200 Subject: [PATCH 064/116] Revert "Remove alignment_func_to_mni's output [skip ci]" This reverts commit 4dad12914bdabeefa6b87d2c8d1e7db13c4dba96. --- narps_open/pipelines/team_08MQ.py | 50 +++++++++++++++++++++++++++---- tests/pipelines/test_team_08MQ.py | 18 +++++------ 2 files changed, 54 insertions(+), 14 deletions(-) diff --git a/narps_open/pipelines/team_08MQ.py b/narps_open/pipelines/team_08MQ.py index 1789dc7f..ebf0133f 100644 --- a/narps_open/pipelines/team_08MQ.py +++ b/narps_open/pipelines/team_08MQ.py @@ -230,12 +230,40 @@ def get_preprocessing(self): compute_confounds.inputs.merge_method = 'union' compute_confounds.inputs.repetition_time = TaskInformation()['RepetitionTime'] +<<<<<<< HEAD +======= + # Function Nodes remove_files - Remove sizeable files once they aren't needed + remove_func_0 = Node(Function( + function = self.remove_files, + input_names = ['_', 'files'], + output_names = [] + ), name = 'remove_func_0') + + remove_func_1 = Node(Function( + function = self.remove_files, + input_names = ['_', 'files'], + output_names = [] + ), name = 'remove_func_1') + + remove_func_2 = Node(Function( + function = self.remove_files, + input_names = ['_', 'files'], + output_names = [] + ), name = 'remove_func_2') + + remove_func_3 = Node(Function( + function = self.remove_files, + input_names = ['_', 'files'], + output_names = [] + ), name = 'remove_func_3') + + remove_func_4 = Node(Function( + function = self.remove_files, + input_names = ['_', 'files'], + output_names = [] + ), name = 'remove_func_4') + preprocessing = Workflow(base_dir = self.directories.working_dir, name = 'preprocessing') - preprocessing.config['execution'] = { - 'remove_node_directories': 'True', - 'stop_on_first_crash': 'True', - 'remove_unnecessary_outputs': 'False' - } preprocessing.connect([ # Inputs (info_source, select_files, [('subject_id', 'subject_id'), ('run_id', 'run_id')]), @@ -291,6 +319,18 @@ def get_preprocessing(self): (motion_correction, data_sink, [('par_file', 'preprocessing.@par_file')]), (compute_confounds, data_sink, [('components_file', 'preprocessing.@components_file')]), (alignment_func_to_mni, data_sink, [('output_image', 'preprocessing.@output_image')]), + + # File removals + (motion_correction, remove_func_0, [('out_file', 'files')]), + (slice_time_correction, remove_func_0, [('slice_time_corrected_file', '_')]), + (slice_time_correction, remove_func_1, [('slice_time_corrected_file', 'files')]), + (smoothing, remove_func_1, [('smoothed_file', '_')]), + (smoothing, remove_func_2, [('smoothed_file', 'files')]), + (alignment_func_to_anat, remove_func_2, [('out_file', '_')]), + (alignment_func_to_anat, remove_func_3, [('out_file', 'files')]), + (alignment_func_to_mni, remove_func_3, [('output_image', '_')]) + (alignment_func_to_mni, remove_func_4, [('output_image', 'files')]), + (data_sink, remove_func_4, [('out_file', '_')]) ]) return preprocessing diff --git a/tests/pipelines/test_team_08MQ.py b/tests/pipelines/test_team_08MQ.py index d3f3d551..d062b08c 100644 --- a/tests/pipelines/test_team_08MQ.py +++ b/tests/pipelines/test_team_08MQ.py @@ -32,7 +32,7 @@ def test_create(): # 2 - check workflows assert isinstance(pipeline.get_preprocessing(), Workflow) - assert isinstance(pipeline.get_run_level_analysis(), Workflow) + assert pipeline.get_run_level_analysis() is None assert isinstance(pipeline.get_subject_level_analysis(), Workflow) group_level = pipeline.get_group_level_analysis() @@ -47,19 +47,19 @@ def test_outputs(): pipeline = PipelineTeam08MQ() # 1 - 1 subject outputs pipeline.subject_list = ['001'] - assert len(pipeline.get_preprocessing_outputs()) == 3*4 - assert len(pipeline.get_run_level_outputs()) == 8+4*3*4 - assert len(pipeline.get_subject_level_outputs()) == 4*3 + assert len(pipeline.get_preprocessing_outputs()) == 0 + assert len(pipeline.get_run_level_outputs()) == 0 + assert len(pipeline.get_subject_level_outputs()) == 0 assert len(pipeline.get_group_level_outputs()) == 0 - assert len(pipeline.get_hypotheses_outputs()) == 18 + assert len(pipeline.get_hypotheses_outputs()) == 0 # 2 - 4 subjects outputs pipeline.subject_list = ['001', '002', '003', '004'] - assert len(pipeline.get_preprocessing_outputs()) == 3*4*4 - assert len(pipeline.get_run_level_outputs()) == (8+4*3*4)*4 - assert len(pipeline.get_subject_level_outputs()) == 4*3*4 + assert len(pipeline.get_preprocessing_outputs()) == 0 + assert len(pipeline.get_run_level_outputs()) == 0 + assert len(pipeline.get_subject_level_outputs()) == 0 assert len(pipeline.get_group_level_outputs()) == 0 - assert len(pipeline.get_hypotheses_outputs()) == 18 + assert len(pipeline.get_hypotheses_outputs()) == 0 @staticmethod @mark.pipeline_test From 7832af2491f6e4028835db87a635890939240b3e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Wed, 25 Oct 2023 15:30:53 +0200 Subject: [PATCH 065/116] Bug after revert [skip ci] --- narps_open/pipelines/team_08MQ.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/narps_open/pipelines/team_08MQ.py b/narps_open/pipelines/team_08MQ.py index ebf0133f..c66b8440 100644 --- a/narps_open/pipelines/team_08MQ.py +++ b/narps_open/pipelines/team_08MQ.py @@ -230,8 +230,6 @@ def get_preprocessing(self): compute_confounds.inputs.merge_method = 'union' compute_confounds.inputs.repetition_time = TaskInformation()['RepetitionTime'] -<<<<<<< HEAD -======= # Function Nodes remove_files - Remove sizeable files once they aren't needed remove_func_0 = Node(Function( function = self.remove_files, From c575158ccb5fadf54f955ffa0394e81d63598789 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Wed, 25 Oct 2023 15:31:40 +0200 Subject: [PATCH 066/116] Bug after revert [skip ci] --- narps_open/pipelines/team_08MQ.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/narps_open/pipelines/team_08MQ.py b/narps_open/pipelines/team_08MQ.py index c66b8440..b31627b1 100644 --- a/narps_open/pipelines/team_08MQ.py +++ b/narps_open/pipelines/team_08MQ.py @@ -326,7 +326,7 @@ def get_preprocessing(self): (smoothing, remove_func_2, [('smoothed_file', 'files')]), (alignment_func_to_anat, remove_func_2, [('out_file', '_')]), (alignment_func_to_anat, remove_func_3, [('out_file', 'files')]), - (alignment_func_to_mni, remove_func_3, [('output_image', '_')]) + (alignment_func_to_mni, remove_func_3, [('output_image', '_')]), (alignment_func_to_mni, remove_func_4, [('output_image', 'files')]), (data_sink, remove_func_4, [('out_file', '_')]) ]) From f6844a5838888b634be8265dabc775769595678b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Thu, 26 Oct 2023 14:27:32 +0200 Subject: [PATCH 067/116] [TEST] tests updates --- narps_open/pipelines/team_08MQ.py | 1 + tests/pipelines/test_team_08MQ.py | 18 +++++++++--------- 2 files changed, 10 insertions(+), 9 deletions(-) diff --git a/narps_open/pipelines/team_08MQ.py b/narps_open/pipelines/team_08MQ.py index b31627b1..6d61adf9 100644 --- a/narps_open/pipelines/team_08MQ.py +++ b/narps_open/pipelines/team_08MQ.py @@ -262,6 +262,7 @@ def get_preprocessing(self): ), name = 'remove_func_4') preprocessing = Workflow(base_dir = self.directories.working_dir, name = 'preprocessing') + preprocessing.config['execution']['stop_on_first_crash'] = 'true' preprocessing.connect([ # Inputs (info_source, select_files, [('subject_id', 'subject_id'), ('run_id', 'run_id')]), diff --git a/tests/pipelines/test_team_08MQ.py b/tests/pipelines/test_team_08MQ.py index d062b08c..d3f3d551 100644 --- a/tests/pipelines/test_team_08MQ.py +++ b/tests/pipelines/test_team_08MQ.py @@ -32,7 +32,7 @@ def test_create(): # 2 - check workflows assert isinstance(pipeline.get_preprocessing(), Workflow) - assert pipeline.get_run_level_analysis() is None + assert isinstance(pipeline.get_run_level_analysis(), Workflow) assert isinstance(pipeline.get_subject_level_analysis(), Workflow) group_level = pipeline.get_group_level_analysis() @@ -47,19 +47,19 @@ def test_outputs(): pipeline = PipelineTeam08MQ() # 1 - 1 subject outputs pipeline.subject_list = ['001'] - assert len(pipeline.get_preprocessing_outputs()) == 0 - assert len(pipeline.get_run_level_outputs()) == 0 - assert len(pipeline.get_subject_level_outputs()) == 0 + assert len(pipeline.get_preprocessing_outputs()) == 3*4 + assert len(pipeline.get_run_level_outputs()) == 8+4*3*4 + assert len(pipeline.get_subject_level_outputs()) == 4*3 assert len(pipeline.get_group_level_outputs()) == 0 - assert len(pipeline.get_hypotheses_outputs()) == 0 + assert len(pipeline.get_hypotheses_outputs()) == 18 # 2 - 4 subjects outputs pipeline.subject_list = ['001', '002', '003', '004'] - assert len(pipeline.get_preprocessing_outputs()) == 0 - assert len(pipeline.get_run_level_outputs()) == 0 - assert len(pipeline.get_subject_level_outputs()) == 0 + assert len(pipeline.get_preprocessing_outputs()) == 3*4*4 + assert len(pipeline.get_run_level_outputs()) == (8+4*3*4)*4 + assert len(pipeline.get_subject_level_outputs()) == 4*3*4 assert len(pipeline.get_group_level_outputs()) == 0 - assert len(pipeline.get_hypotheses_outputs()) == 0 + assert len(pipeline.get_hypotheses_outputs()) == 18 @staticmethod @mark.pipeline_test From ed4f11e30613744d1b20a8d32aacf6c660cb6b6a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Thu, 26 Oct 2023 17:59:58 +0200 Subject: [PATCH 068/116] Bug with big files removal --- narps_open/pipelines/team_08MQ.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/narps_open/pipelines/team_08MQ.py b/narps_open/pipelines/team_08MQ.py index 6d61adf9..3337a0fd 100644 --- a/narps_open/pipelines/team_08MQ.py +++ b/narps_open/pipelines/team_08MQ.py @@ -261,6 +261,10 @@ def get_preprocessing(self): output_names = [] ), name = 'remove_func_4') + # Merge Node - Merge the output triggers for remove_func_1 Node + # i.e: slice_time_corrected_file is needed by two Nodes before being removed + merge_removal_triggers = Node(Merge(2), name = 'merge_removal_triggers') + preprocessing = Workflow(base_dir = self.directories.working_dir, name = 'preprocessing') preprocessing.config['execution']['stop_on_first_crash'] = 'true' preprocessing.connect([ @@ -323,7 +327,9 @@ def get_preprocessing(self): (motion_correction, remove_func_0, [('out_file', 'files')]), (slice_time_correction, remove_func_0, [('slice_time_corrected_file', '_')]), (slice_time_correction, remove_func_1, [('slice_time_corrected_file', 'files')]), - (smoothing, remove_func_1, [('smoothed_file', '_')]), + (smoothing, merge_removal_triggers, [('smoothed_file', 'in1')]), + (compute_confounds, merge_removal_triggers, [('components_file', 'in2')]), + (merge_removal_triggers, remove_func_1, [('out', '_')]), (smoothing, remove_func_2, [('smoothed_file', 'files')]), (alignment_func_to_anat, remove_func_2, [('out_file', '_')]), (alignment_func_to_anat, remove_func_3, [('out_file', 'files')]), From 8770d81f75b45935168c3539159d1a014dd89c06 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Fri, 27 Oct 2023 09:36:35 +0200 Subject: [PATCH 069/116] Deletinglarge files after datasink --- narps_open/pipelines/team_08MQ.py | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) diff --git a/narps_open/pipelines/team_08MQ.py b/narps_open/pipelines/team_08MQ.py index 3337a0fd..cabea294 100644 --- a/narps_open/pipelines/team_08MQ.py +++ b/narps_open/pipelines/team_08MQ.py @@ -261,10 +261,6 @@ def get_preprocessing(self): output_names = [] ), name = 'remove_func_4') - # Merge Node - Merge the output triggers for remove_func_1 Node - # i.e: slice_time_corrected_file is needed by two Nodes before being removed - merge_removal_triggers = Node(Merge(2), name = 'merge_removal_triggers') - preprocessing = Workflow(base_dir = self.directories.working_dir, name = 'preprocessing') preprocessing.config['execution']['stop_on_first_crash'] = 'true' preprocessing.connect([ @@ -325,15 +321,13 @@ def get_preprocessing(self): # File removals (motion_correction, remove_func_0, [('out_file', 'files')]), - (slice_time_correction, remove_func_0, [('slice_time_corrected_file', '_')]), + (data_sink, remove_func_0, [('out_file', '_')]), (slice_time_correction, remove_func_1, [('slice_time_corrected_file', 'files')]), - (smoothing, merge_removal_triggers, [('smoothed_file', 'in1')]), - (compute_confounds, merge_removal_triggers, [('components_file', 'in2')]), - (merge_removal_triggers, remove_func_1, [('out', '_')]), + (data_sink, remove_func_1, [('out_file', '_')]), (smoothing, remove_func_2, [('smoothed_file', 'files')]), - (alignment_func_to_anat, remove_func_2, [('out_file', '_')]), + (data_sink, remove_func_2, [('out_file', '_')]), (alignment_func_to_anat, remove_func_3, [('out_file', 'files')]), - (alignment_func_to_mni, remove_func_3, [('output_image', '_')]), + (data_sink, remove_func_3, [('out_file', '_')]), (alignment_func_to_mni, remove_func_4, [('output_image', 'files')]), (data_sink, remove_func_4, [('out_file', '_')]) ]) From 899218413a2ad13ba576dc85f8e5ec656d337c3e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Wed, 15 Nov 2023 11:23:18 +0100 Subject: [PATCH 070/116] [BUG] group level workflow connections [skip ci] --- narps_open/pipelines/team_08MQ.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/narps_open/pipelines/team_08MQ.py b/narps_open/pipelines/team_08MQ.py index cabea294..a5e043ac 100644 --- a/narps_open/pipelines/team_08MQ.py +++ b/narps_open/pipelines/team_08MQ.py @@ -926,7 +926,7 @@ def get_group_level_analysis_sub_workflow(self, method): ) group_level_analysis.connect([ (info_source, select_files, [('contrast_id', 'contrast_id')]), - (info_source, get_contrasts, [('subject_list', 'subject_ids')]), + (info_source, get_contrasts, [('subjects', 'subject_ids')]), (select_files, get_contrasts, [ ('cope', 'copes'), ('varcope', 'varcopes'), From f488a7bc718a3b681fcfe21e114b36d19c56f221 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Wed, 15 Nov 2023 11:26:46 +0100 Subject: [PATCH 071/116] [BUG] group level [skip ci] --- narps_open/pipelines/team_08MQ.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/narps_open/pipelines/team_08MQ.py b/narps_open/pipelines/team_08MQ.py index a5e043ac..fec26dc0 100644 --- a/narps_open/pipelines/team_08MQ.py +++ b/narps_open/pipelines/team_08MQ.py @@ -699,7 +699,7 @@ def get_subgroups_contrasts(copes, varcopes, subject_list: list, participants_fi Parameters : - copes: original file list selected by select_files node - varcopes: original file list selected by select_files node - - subject_ids: list of subject IDs that are analyzed + - subject_list: list of subject IDs that are analyzed - participants_file: file containing participants characteristics Returns : @@ -859,7 +859,7 @@ def get_group_level_analysis_sub_workflow(self, method): get_contrasts = Node( Function( function = self.get_subgroups_contrasts, - input_names = ['copes', 'varcopes', 'subject_ids', 'participants_file'], + input_names = ['copes', 'varcopes', 'subject_list', 'participants_file'], output_names = [ 'copes_equalIndifference', 'copes_equalRange', @@ -926,7 +926,7 @@ def get_group_level_analysis_sub_workflow(self, method): ) group_level_analysis.connect([ (info_source, select_files, [('contrast_id', 'contrast_id')]), - (info_source, get_contrasts, [('subjects', 'subject_ids')]), + (info_source, get_contrasts, [('subjects', 'subject_list')]), (select_files, get_contrasts, [ ('cope', 'copes'), ('varcope', 'varcopes'), From d93e3a67a69cdaf04f67c91ac87a882e423a9470 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Wed, 15 Nov 2023 11:32:51 +0100 Subject: [PATCH 072/116] [REFAC] equalRange > equal_range [skip ci] --- narps_open/pipelines/team_08MQ.py | 47 ++++++++++++++++--------------- 1 file changed, 25 insertions(+), 22 deletions(-) diff --git a/narps_open/pipelines/team_08MQ.py b/narps_open/pipelines/team_08MQ.py index fec26dc0..f47ffd30 100644 --- a/narps_open/pipelines/team_08MQ.py +++ b/narps_open/pipelines/team_08MQ.py @@ -760,7 +760,10 @@ def get_subgroups_contrasts(copes, varcopes, subject_list: list, participants_fi if sub_id[-2][-3:] in subject_list: varcopes_global.append(varcope) - return copes_equal_indifference, copes_equal_range, varcopes_equal_indifference, varcopes_equal_range,equal_indifference_id, equal_range_id,copes_global, varcopes_global + return copes_equal_indifference, copes_equal_range,\ + varcopes_equal_indifference, varcopes_equal_range,\ + equal_indifference_id, equal_range_id,\ + copes_global, varcopes_global def get_regressors( equal_range_id: list, @@ -792,23 +795,23 @@ def get_regressors( # - one for equal range group, # - one for equal indifference group # Each list contains n_sub values with 0 and 1 depending on the group of the participant - # For equalRange_reg list --> participants with a 1 are in the equal range group + # For equal_range_reg list --> participants with a 1 are in the equal range group elif method == 'groupComp': - equalRange_reg = [ + equal_range_reg = [ 1 for i in range(len(equal_range_id) + len(equal_indifference_id)) ] - equalIndifference_reg = [ + equal_indifference_reg = [ 0 for i in range(len(equal_range_id) + len(equal_indifference_id)) ] for index, subject_id in enumerate(subject_list): if subject_id in equal_indifference_id: - equalIndifference_reg[index] = 1 - equalRange_reg[index] = 0 + equal_indifference_reg[index] = 1 + equal_range_reg[index] = 0 regressors = dict( - equalRange = equalRange_reg, - equalIndifference = equalIndifference_reg + equalRange = equal_range_reg, + equalIndifference = equal_indifference_reg ) return regressors @@ -861,12 +864,12 @@ def get_group_level_analysis_sub_workflow(self, method): function = self.get_subgroups_contrasts, input_names = ['copes', 'varcopes', 'subject_list', 'participants_file'], output_names = [ - 'copes_equalIndifference', - 'copes_equalRange', - 'varcopes_equalIndifference', - 'varcopes_equalRange', - 'equalIndifference_id', - 'equalRange_id', + 'copes_equal_indifference', + 'copes_equal_range', + 'varcopes_equal_indifference', + 'varcopes_equal_range', + 'equal_indifference_id', + 'equal_range_id', 'copes_global', 'varcopes_global' ] @@ -879,8 +882,8 @@ def get_group_level_analysis_sub_workflow(self, method): Function( function = self.get_regressors, input_names = [ - 'equalRange_id', - 'equalIndifference_id', + 'equal_range_id', + 'equal_indifference_id', 'method', 'subject_list', ], @@ -933,8 +936,8 @@ def get_group_level_analysis_sub_workflow(self, method): ('participants', 'participants_file'), ]), (get_contrasts, regressors, [ - ('equalRange_id', 'equalRange_id'), - ('equalIndifference_id', 'equalIndifference_id') + ('equal_range_id', 'equal_range_id'), + ('equal_indifference_id', 'equal_indifference_id') ]), (regressors, specify_model, [('regressors', 'regressors')]) ]) @@ -944,14 +947,14 @@ def get_group_level_analysis_sub_workflow(self, method): if method == 'equalIndifference': group_level_analysis.connect([ - (get_contrasts, merge_copes, [('copes_equalIndifference', 'in_files')]), - (get_contrasts, merge_varcopes, [('varcopes_equalIndifference', 'in_files')]) + (get_contrasts, merge_copes, [('copes_equal_indifference', 'in_files')]), + (get_contrasts, merge_varcopes, [('varcopes_equal_indifference', 'in_files')]) ]) elif method == 'equalRange': group_level_analysis.connect([ - (get_contrasts, merge_copes, [('copes_equalRange', 'in_files')]), - (get_contrasts, merge_varcopes, [('varcopes_equalRange', 'in_files')]) + (get_contrasts, merge_copes, [('copes_equal_range', 'in_files')]), + (get_contrasts, merge_varcopes, [('varcopes_equal_range', 'in_files')]) ]) elif method == 'groupComp': From bd13c53f38dc1a8c63638aa5d9627080b046f33e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Wed, 15 Nov 2023 12:08:17 +0100 Subject: [PATCH 073/116] Regressors function + group output [skip ci] --- narps_open/pipelines/team_08MQ.py | 138 +++++++++++++++--------------- 1 file changed, 68 insertions(+), 70 deletions(-) diff --git a/narps_open/pipelines/team_08MQ.py b/narps_open/pipelines/team_08MQ.py index f47ffd30..df61df5a 100644 --- a/narps_open/pipelines/team_08MQ.py +++ b/narps_open/pipelines/team_08MQ.py @@ -712,13 +712,13 @@ def get_subgroups_contrasts(copes, varcopes, subject_list: list, participants_fi in the equalIndifference group - varcopes_equal_range : a subset of varcopes corresponding to subjects in the equalRange group - - equal_indifference_id : a list of subject ids in the equalIndifference group - - equal_range_id : a list of subject ids in the equalRange group + - equal_indifference_ids : a list of subject ids in the equalIndifference group + - equal_range_ids : a list of subject ids in the equalRange group - varcopes_global : a list of all varcopes """ - equal_range_id = [] - equal_indifference_id = [] + equal_range_ids = [] + equal_indifference_ids = [] # Reading file containing participants IDs and groups with open(participants_file, 'rt') as file: @@ -730,9 +730,9 @@ def get_subgroups_contrasts(copes, varcopes, subject_list: list, participants_fi # Checking for each participant if its ID was selected # and separate people depending on their group if info[0][-3:] in subject_list and info[1] == 'equalIndifference': - equal_indifference_id.append(info[0][-3:]) + equal_indifference_ids.append(info[0][-3:]) elif info[0][-3:] in subject_list and info[1] == 'equalRange': - equal_range_id.append(info[0][-3:]) + equal_range_ids.append(info[0][-3:]) copes_equal_indifference = [] copes_equal_range = [] @@ -745,76 +745,62 @@ def get_subgroups_contrasts(copes, varcopes, subject_list: list, participants_fi # and add the file to the list corresponding to its group for cope, varcope in zip(copes, varcopes): sub_id = cope.split('/') - if sub_id[-2][-3:] in equal_indifference_id: + if sub_id[-2][-3:] in equal_indifference_ids: copes_equal_indifference.append(cope) - elif sub_id[-2][-3:] in equal_range_id: + elif sub_id[-2][-3:] in equal_range_ids: copes_equal_range.append(cope) if sub_id[-2][-3:] in subject_list: copes_global.append(cope) sub_id = varcope.split('/') - if sub_id[-2][-3:] in equal_indifference_id: + if sub_id[-2][-3:] in equal_indifference_ids: varcopes_equal_indifference.append(varcope) - elif sub_id[-2][-3:] in equal_range_id: + elif sub_id[-2][-3:] in equal_range_ids: varcopes_equal_range.append(varcope) if sub_id[-2][-3:] in subject_list: varcopes_global.append(varcope) return copes_equal_indifference, copes_equal_range,\ varcopes_equal_indifference, varcopes_equal_range,\ - equal_indifference_id, equal_range_id,\ + equal_indifference_ids, equal_range_ids,\ copes_global, varcopes_global - def get_regressors( - equal_range_id: list, - equal_indifference_id: list, - method: str, + def get_two_sample_t_test_regressors( + equal_range_ids: list, + equal_indifference_ids: list, subject_list: list, ) -> dict: """ - Create dictionary of regressors for group analysis. + Create dictionary of regressors for two sample t-test group analysis. Parameters: - - equal_range_id: ids of subjects in equal range group - - equal_indifference_id: ids of subjects in equal indifference group - - method: one of "equalRange", "equalIndifference" or "groupComp" + - equal_range_ids: ids of subjects in equal range group + - equal_indifference_ids: ids of subjects in equal indifference group - subject_list: ids of subject for which to do the analysis Returns: - - regressors: regressors used to distinguish groups in FSL group analysis + - regressors, dict: containing named lists of regressors. + - groups, list: group identifiers to distinguish groups in FSL analysis. """ - # For one sample t-test, creates a dictionary - # with a list of the size of the number of participants - if method == 'equalRange': - regressors = dict(group_mean = [1 for i in range(len(equal_range_id))]) - elif method == 'equalIndifference': - regressors = dict(group_mean = [1 for i in range(len(equal_indifference_id))]) - - # For two sample t-test, creates 2 lists: - # - one for equal range group, - # - one for equal indifference group - # Each list contains n_sub values with 0 and 1 depending on the group of the participant - # For equal_range_reg list --> participants with a 1 are in the equal range group - elif method == 'groupComp': - equal_range_reg = [ - 1 for i in range(len(equal_range_id) + len(equal_indifference_id)) - ] - equal_indifference_reg = [ - 0 for i in range(len(equal_range_id) + len(equal_indifference_id)) + # Create 2 lists containing n_sub values which are + # * 1 if the participant is on the group + # * 0 otherwise + equal_range_regressors = [1 if i in equal_range_ids else 0 for i in subject_list] + equal_indifference_regressors = [ + 1 if i in equal_indifference_ids else 0 for i in subject_list ] - for index, subject_id in enumerate(subject_list): - if subject_id in equal_indifference_id: - equal_indifference_reg[index] = 1 - equal_range_reg[index] = 0 + # Create regressors output : a dict with the two list + regressors = dict( + equalRange = equal_range_reg, + equalIndifference = equal_indifference_reg + ) - regressors = dict( - equalRange = equal_range_reg, - equalIndifference = equal_indifference_reg - ) + # Create groups outputs : a list with 1 for equalRange subjects and 2 for equalIndifference + groups = [1 if i == 1 else 2 for i in equal_range_regressors] - return regressors + return regressors, groups def get_group_level_analysis(self): """ Return all workflows for the group level analysis. """ @@ -868,8 +854,8 @@ def get_group_level_analysis_sub_workflow(self, method): 'copes_equal_range', 'varcopes_equal_indifference', 'varcopes_equal_range', - 'equal_indifference_id', - 'equal_range_id', + 'equal_indifference_ids', + 'equal_range_ids', 'copes_global', 'varcopes_global' ] @@ -877,22 +863,21 @@ def get_group_level_analysis_sub_workflow(self, method): name = 'get_contrasts', ) - # Function Node get_regressors - Get regressors - regressors = Node( + # Function Node get_two_sample_t_test_regressors + # Get regressors in the groupComp method case + regressors_two_sample = Node( Function( - function = self.get_regressors, + function = self.get_two_sample_t_test_regressors, input_names = [ - 'equal_range_id', - 'equal_indifference_id', - 'method', + 'equal_range_ids', + 'equal_indifference_ids', 'subject_list', ], - output_names = ['regressors'] + output_names = ['regressors', 'groups'] ), - name = 'regressors', + name = 'regressors_two_sample', ) - regressors.inputs.method = method - regressors.inputs.subject_list = self.subject_list + regressors_two_sample.inputs.subject_list = self.subject_list # Merge Node - Merge cope files merge_copes = Node(MergeImages(), name = 'merge_copes') @@ -934,16 +919,18 @@ def get_group_level_analysis_sub_workflow(self, method): ('cope', 'copes'), ('varcope', 'varcopes'), ('participants', 'participants_file'), - ]), - (get_contrasts, regressors, [ - ('equal_range_id', 'equal_range_id'), - ('equal_indifference_id', 'equal_indifference_id') - ]), - (regressors, specify_model, [('regressors', 'regressors')]) + ]) ]) if method in ('equalRange', 'equalIndifference'): - contrasts = [('Group', 'T', ['mean'], [1]), ('Group', 'T', ['mean'], [-1])] + specify_model.inputs.contrasts = [ + ('Group', 'T', ['mean'], [1]), + ('Group', 'T', ['mean'], [-1]) + ] + specify_model.inputs.regressors = dict( + group_mean = [1 for _ in self.subject_list] + ) + specify_model.inputs.groups = [1 for _ in self.subject_list] if method == 'equalIndifference': group_level_analysis.connect([ @@ -958,13 +945,24 @@ def get_group_level_analysis_sub_workflow(self, method): ]) elif method == 'groupComp': - contrasts = [ - ('Eq range vs Eq indiff in loss', 'T', ['Group_{1}', 'Group_{2}'], [1, -1]) - ] + specify_model.inputs.contrasts = [( + 'Eq range vs Eq indiff in loss', + 'T', + ['equalRange', 'equalIndifference'], + [1, -1] + )] group_level_analysis.connect([ (select_files, merge_copes, [('cope', 'in_files')]), - (select_files, merge_varcopes, [('varcope', 'in_files')]) + (select_files, merge_varcopes, [('varcope', 'in_files')]), + (get_contrasts, regressors_two_sample, [ + ('equal_range_ids', 'equal_range_ids'), + ('equal_indifference_ids', 'equal_indifference_ids') + ]), + (regressors_two_sample, specify_model, [ + ('regressors', 'regressors'), + ('groups', 'groups') + ]) ]) group_level_analysis.connect([ From 0f8f35e70be9021db31eac84a2b4a7b3eeca6691 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Wed, 15 Nov 2023 12:10:00 +0100 Subject: [PATCH 074/116] Typo in regressors [skip ci] --- narps_open/pipelines/team_08MQ.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/narps_open/pipelines/team_08MQ.py b/narps_open/pipelines/team_08MQ.py index df61df5a..a4762807 100644 --- a/narps_open/pipelines/team_08MQ.py +++ b/narps_open/pipelines/team_08MQ.py @@ -924,8 +924,8 @@ def get_group_level_analysis_sub_workflow(self, method): if method in ('equalRange', 'equalIndifference'): specify_model.inputs.contrasts = [ - ('Group', 'T', ['mean'], [1]), - ('Group', 'T', ['mean'], [-1]) + ('Group', 'T', ['group_mean'], [1]), + ('Group', 'T', ['group_mean'], [-1]) ] specify_model.inputs.regressors = dict( group_mean = [1 for _ in self.subject_list] From 3025305fe532eb5373a9068791775c0cb93fe324 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Wed, 15 Nov 2023 12:23:41 +0100 Subject: [PATCH 075/116] Issue with list of regressors [skip ci] --- narps_open/pipelines/team_08MQ.py | 40 ++++++++++++++++++++++++++----- 1 file changed, 34 insertions(+), 6 deletions(-) diff --git a/narps_open/pipelines/team_08MQ.py b/narps_open/pipelines/team_08MQ.py index a4762807..f0b69ce0 100644 --- a/narps_open/pipelines/team_08MQ.py +++ b/narps_open/pipelines/team_08MQ.py @@ -765,6 +765,19 @@ def get_subgroups_contrasts(copes, varcopes, subject_list: list, participants_fi equal_indifference_ids, equal_range_ids,\ copes_global, varcopes_global + def get_one_sample_t_test_regressors(subject_ids: list) -> dict: + """ + Create dictionary of regressors for one sample t-test group analysis. + + Parameters: + - subject_ids: ids of subject in the group for which to do the analysis + + Returns: + - dict containing named lists of regressors. + """ + + return dict(group_mean = [1 for _ in self.subject_ids]) + def get_two_sample_t_test_regressors( equal_range_ids: list, equal_indifference_ids: list, @@ -863,6 +876,17 @@ def get_group_level_analysis_sub_workflow(self, method): name = 'get_contrasts', ) + # Function Node get_one_sample_t_test_regressors + # Get regressors in the equalRange and equalIndifference method case + regressors_one_sample = Node( + Function( + function = self.get_one_sample_t_test_regressors, + input_names = ['subject_ids'], + output_names = ['regressors'] + ), + name = 'regressors_one_sample', + ) + # Function Node get_two_sample_t_test_regressors # Get regressors in the groupComp method case regressors_two_sample = Node( @@ -927,21 +951,25 @@ def get_group_level_analysis_sub_workflow(self, method): ('Group', 'T', ['group_mean'], [1]), ('Group', 'T', ['group_mean'], [-1]) ] - specify_model.inputs.regressors = dict( - group_mean = [1 for _ in self.subject_list] - ) - specify_model.inputs.groups = [1 for _ in self.subject_list] + + group_level_analysis.connect([ + (regressors_one_sample, specify_model, [('regressors', 'regressors')]) + ]) if method == 'equalIndifference': group_level_analysis.connect([ (get_contrasts, merge_copes, [('copes_equal_indifference', 'in_files')]), - (get_contrasts, merge_varcopes, [('varcopes_equal_indifference', 'in_files')]) + (get_contrasts, merge_varcopes, [('varcopes_equal_indifference', 'in_files')]), + (get_contrasts, regressors_one_sample, [('equal_range_ids', 'subject_ids')]) ]) elif method == 'equalRange': group_level_analysis.connect([ (get_contrasts, merge_copes, [('copes_equal_range', 'in_files')]), - (get_contrasts, merge_varcopes, [('varcopes_equal_range', 'in_files')]) + (get_contrasts, merge_varcopes, [('varcopes_equal_range', 'in_files')]), + (get_contrasts, regressors_one_sample, [ + ('equal_indifference_ids', 'subject_ids') + ]) ]) elif method == 'groupComp': From 8d14472c8422d76671af8966631d64c185962840 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Wed, 15 Nov 2023 12:25:52 +0100 Subject: [PATCH 076/116] Issue with list of regressors [skip ci] --- narps_open/pipelines/team_08MQ.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/narps_open/pipelines/team_08MQ.py b/narps_open/pipelines/team_08MQ.py index f0b69ce0..978c5e5a 100644 --- a/narps_open/pipelines/team_08MQ.py +++ b/narps_open/pipelines/team_08MQ.py @@ -776,7 +776,7 @@ def get_one_sample_t_test_regressors(subject_ids: list) -> dict: - dict containing named lists of regressors. """ - return dict(group_mean = [1 for _ in self.subject_ids]) + return dict(group_mean = [1 for _ in subject_ids]) def get_two_sample_t_test_regressors( equal_range_ids: list, From 817ec3103e05bbe55ec12f2fa7cb53ca0622e441 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Wed, 15 Nov 2023 13:23:48 +0100 Subject: [PATCH 077/116] Issue with list of regressors [skip ci] --- narps_open/pipelines/team_08MQ.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/narps_open/pipelines/team_08MQ.py b/narps_open/pipelines/team_08MQ.py index 978c5e5a..71e58545 100644 --- a/narps_open/pipelines/team_08MQ.py +++ b/narps_open/pipelines/team_08MQ.py @@ -806,8 +806,8 @@ def get_two_sample_t_test_regressors( # Create regressors output : a dict with the two list regressors = dict( - equalRange = equal_range_reg, - equalIndifference = equal_indifference_reg + equalRange = equal_range_regressors, + equalIndifference = equal_indifference_regressors ) # Create groups outputs : a list with 1 for equalRange subjects and 2 for equalIndifference From e092bc560110b1825f7c464ca1cb2ae51c5e326f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Wed, 15 Nov 2023 14:15:01 +0100 Subject: [PATCH 078/116] [TEST] adding unit tests to 08MQ [skip ci] --- narps_open/pipelines/team_08MQ.py | 28 ++++-------------- tests/pipelines/test_team_08MQ.py | 47 +++++++++++++++++++++++++++++++ 2 files changed, 52 insertions(+), 23 deletions(-) diff --git a/narps_open/pipelines/team_08MQ.py b/narps_open/pipelines/team_08MQ.py index 71e58545..aa51f417 100644 --- a/narps_open/pipelines/team_08MQ.py +++ b/narps_open/pipelines/team_08MQ.py @@ -674,24 +674,6 @@ def get_subject_level_outputs(self): return [template.format(**dict(zip(parameters.keys(), parameter_values)))\ for parameter_values in parameter_sets] - """ - Group level - Ordinary least squares. Pooled variance. - - Second level - Positive one-sample ttest over first level contrast estimates. - - Group level - Group effect for each first level contrast for each of the two groups. - Contrast of positive parametric effect of loss, - testing for equal range group responses being greater than equal indifference group. - - TFCE - - pval_computation : Permutation testing implemented in randomise (10,000 permutations). - multiple_testing_correction : FWE permutation (10,000 permutations). - """ - def get_subgroups_contrasts(copes, varcopes, subject_list: list, participants_file: str): """ Return the file list containing only the files belonging to subject in the wanted group. @@ -704,14 +686,14 @@ def get_subgroups_contrasts(copes, varcopes, subject_list: list, participants_fi Returns : - copes_equal_indifference : a subset of copes corresponding to subjects - in the equalIndifference group + in the equalIndifference group - copes_equal_range : a subset of copes corresponding to subjects - in the equalRange group + in the equalRange group - copes_global : a list of all copes - varcopes_equal_indifference : a subset of varcopes corresponding to subjects - in the equalIndifference group + in the equalIndifference group - varcopes_equal_range : a subset of varcopes corresponding to subjects - in the equalRange group + in the equalRange group - equal_indifference_ids : a list of subject ids in the equalIndifference group - equal_range_ids : a list of subject ids in the equalRange group - varcopes_global : a list of all varcopes @@ -782,7 +764,7 @@ def get_two_sample_t_test_regressors( equal_range_ids: list, equal_indifference_ids: list, subject_list: list, - ) -> dict: + ) -> dict: """ Create dictionary of regressors for two sample t-test group analysis. diff --git a/tests/pipelines/test_team_08MQ.py b/tests/pipelines/test_team_08MQ.py index d3f3d551..589d49f0 100644 --- a/tests/pipelines/test_team_08MQ.py +++ b/tests/pipelines/test_team_08MQ.py @@ -61,6 +61,53 @@ def test_outputs(): assert len(pipeline.get_group_level_outputs()) == 0 assert len(pipeline.get_hypotheses_outputs()) == 18 + @staticmethod + @mark.unit_test + def test_subject_information(): + """ Test the get_subject_information method """ + + @staticmethod + @mark.unit_test + def test_run_level_contrasts(): + """ Test the get_run_level_contrasts method """ + contrasts = PipelineTeam08MQ().get_run_level_contrasts() + + assert contrasts[0] == ('positive_effect_gain', 'T', ['gain', 'loss'], [1, 0]), + assert contrasts[0] == ('positive_effect_loss', 'T', ['gain', 'loss'], [0, 1]), + assert contrasts[0] == ('negative_effect_loss', 'T', ['gain', 'loss'], [0, -1]), + + @staticmethod + @mark.unit_test + def test_subgroups_contrasts(): + """ Test the get_subgroups_contrasts method """ + + #contrasts = PipelineTeam08MQ().get_subgroups_contrasts() + #copes, varcopes, subject_list: list, participants_file: str + + @staticmethod + @mark.unit_test + def test_one_sample_t_test_regressors(): + """ Test the get_one_sample_t_test_regressors method """ + + regressors = PipelineTeam08MQ().get_one_sample_t_test_regressors(['001', '002']) + assert regressors == [1, 1] + + @staticmethod + @mark.unit_test + def test_two_sample_t_test_regressors(): + """ Test the get_two_sample_t_test_regressors method """ + + regressors, groups = PipelineTeam08MQ().get_two_sample_t_test_regressors( + ['001', '003'], # equalRange group + ['002', '004'], # equalIndifference group + ['001', '002', '003', '004'] # all subjects + ) + assert regressors == dict( + equalRange = [1, 0, 1, 0], + equalIndifference = [0, 1, 0, 1] + ) + assert groups == [1, 2, 1, 2] + @staticmethod @mark.pipeline_test def test_execution(): From 4892f5cdb47dfc579f4c78e6f2015d6a519de944 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Wed, 15 Nov 2023 14:16:39 +0100 Subject: [PATCH 079/116] [TEST] typo [skip ci] --- tests/pipelines/test_team_08MQ.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/pipelines/test_team_08MQ.py b/tests/pipelines/test_team_08MQ.py index 589d49f0..25b12670 100644 --- a/tests/pipelines/test_team_08MQ.py +++ b/tests/pipelines/test_team_08MQ.py @@ -72,9 +72,9 @@ def test_run_level_contrasts(): """ Test the get_run_level_contrasts method """ contrasts = PipelineTeam08MQ().get_run_level_contrasts() - assert contrasts[0] == ('positive_effect_gain', 'T', ['gain', 'loss'], [1, 0]), - assert contrasts[0] == ('positive_effect_loss', 'T', ['gain', 'loss'], [0, 1]), - assert contrasts[0] == ('negative_effect_loss', 'T', ['gain', 'loss'], [0, -1]), + assert contrasts[0] == ('positive_effect_gain', 'T', ['gain', 'loss'], [1, 0]) + assert contrasts[0] == ('positive_effect_loss', 'T', ['gain', 'loss'], [0, 1]) + assert contrasts[0] == ('negative_effect_loss', 'T', ['gain', 'loss'], [0, -1]) @staticmethod @mark.unit_test From 517fc41ba6084cc17fee218430f2d9bab46648f1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Wed, 15 Nov 2023 14:18:09 +0100 Subject: [PATCH 080/116] [TEST] static methods testing [skip ci] --- tests/pipelines/test_team_08MQ.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/pipelines/test_team_08MQ.py b/tests/pipelines/test_team_08MQ.py index 25b12670..d5feff70 100644 --- a/tests/pipelines/test_team_08MQ.py +++ b/tests/pipelines/test_team_08MQ.py @@ -70,7 +70,7 @@ def test_subject_information(): @mark.unit_test def test_run_level_contrasts(): """ Test the get_run_level_contrasts method """ - contrasts = PipelineTeam08MQ().get_run_level_contrasts() + contrasts = PipelineTeam08MQ.get_run_level_contrasts() assert contrasts[0] == ('positive_effect_gain', 'T', ['gain', 'loss'], [1, 0]) assert contrasts[0] == ('positive_effect_loss', 'T', ['gain', 'loss'], [0, 1]) @@ -89,7 +89,7 @@ def test_subgroups_contrasts(): def test_one_sample_t_test_regressors(): """ Test the get_one_sample_t_test_regressors method """ - regressors = PipelineTeam08MQ().get_one_sample_t_test_regressors(['001', '002']) + regressors = PipelineTeam08MQ.get_one_sample_t_test_regressors(['001', '002']) assert regressors == [1, 1] @staticmethod @@ -97,7 +97,7 @@ def test_one_sample_t_test_regressors(): def test_two_sample_t_test_regressors(): """ Test the get_two_sample_t_test_regressors method """ - regressors, groups = PipelineTeam08MQ().get_two_sample_t_test_regressors( + regressors, groups = PipelineTeam08MQ.get_two_sample_t_test_regressors( ['001', '003'], # equalRange group ['002', '004'], # equalIndifference group ['001', '002', '003', '004'] # all subjects From fd9bbf38117ab0687a1c71d7b44ec1b8d328f606 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Wed, 15 Nov 2023 16:37:08 +0100 Subject: [PATCH 081/116] [TEST] adding unit tests for 08MQ --- narps_open/pipelines/team_08MQ.py | 122 +++++++++++------------------- tests/pipelines/__init__.py | 31 ++++++++ tests/pipelines/test_team_08MQ.py | 42 ++++++++-- 3 files changed, 111 insertions(+), 84 deletions(-) diff --git a/narps_open/pipelines/team_08MQ.py b/narps_open/pipelines/team_08MQ.py index aa51f417..b672367c 100644 --- a/narps_open/pipelines/team_08MQ.py +++ b/narps_open/pipelines/team_08MQ.py @@ -390,29 +390,23 @@ def get_subject_information(event_file): for line in file: info = line.strip().split() - - for condition in condition_names: - if condition == 'gain': - onsets[condition].append(float(info[0])) - durations[condition].append(float(info[4])) # TODO : change to info[1] (= 4) ? - amplitudes[condition].append(float(info[2])) - elif condition == 'loss': - onsets[condition].append(float(info[0])) - durations[condition].append(float(info[4])) # TODO : change to info[1] (= 4) ? - amplitudes[condition].append(float(info[3])) - elif condition == 'event': - onsets[condition].append(float(info[0])) - durations[condition].append(float(info[1])) - amplitudes[condition].append(1.0) - elif condition == 'response': - onsets[condition].append(float(info[0])) - durations[condition].append(float(info[1])) # TODO : change to info[4] (= RT) ? - if 'accept' in info[5]: - amplitudes[condition].append(1.0) - elif 'reject' in info[5]: - amplitudes[condition].append(-1.0) - else: - amplitudes[condition].append(0.0) + onsets['event'].append(float(info[0])) + durations['event'].append(float(info[1])) + amplitudes['event'].append(1.0) + onsets['gain'].append(float(info[0])) + durations['gain'].append(float(info[4])) # TODO : change to info[1] (= 4) ? + amplitudes['gain'].append(float(info[2])) + onsets['loss'].append(float(info[0])) + durations['loss'].append(float(info[4])) # TODO : change to info[1] (= 4) ? + amplitudes['loss'].append(float(info[3])) + onsets['response'].append(float(info[0])) + durations['response'].append(float(info[1])) # TODO : change to info[4] (= RT) ? + if 'accept' in info[5]: + amplitudes['response'].append(1.0) + elif 'reject' in info[5]: + amplitudes['response'].append(-1.0) + else: + amplitudes['response'].append(0.0) return [ Bunch( @@ -433,17 +427,12 @@ def get_run_level_contrasts(): Returns: - contrasts: list of tuples, list of contrasts to analyze """ - # List of condition names conditions = ['gain', 'loss'] - # Return contrast list return [ - # Positive parametric effect of gain - ('positive_effect_gain', 'T', conditions, [1, 0]), - # Positive parametric effect of loss - ('positive_effect_loss', 'T', conditions, [0, 1]), - # Negative parametric effect of loss. - ('negative_effect_loss', 'T', conditions, [0, -1]) + ('positive_effect_gain', 'T', conditions, [1, 0]), # Positive parametric effect of gain + ('positive_effect_loss', 'T', conditions, [0, 1]), # Positive parametric effect of loss + ('negative_effect_loss', 'T', conditions, [0, -1]) # Negative parametric effect of loss ] def get_run_level_analysis(self): @@ -689,18 +678,19 @@ def get_subgroups_contrasts(copes, varcopes, subject_list: list, participants_fi in the equalIndifference group - copes_equal_range : a subset of copes corresponding to subjects in the equalRange group - - copes_global : a list of all copes - varcopes_equal_indifference : a subset of varcopes corresponding to subjects in the equalIndifference group - varcopes_equal_range : a subset of varcopes corresponding to subjects in the equalRange group - equal_indifference_ids : a list of subject ids in the equalIndifference group - equal_range_ids : a list of subject ids in the equalRange group - - varcopes_global : a list of all varcopes """ - equal_range_ids = [] - equal_indifference_ids = [] + subject_list_sub_ids = [] # ids as written in the participants file + equal_range_ids = [] # ids as 3-digit string + equal_indifference_ids = [] # ids as 3-digit string + equal_range_sub_ids = [] # ids as written in the participants file + equal_indifference_sub_ids = [] # ids as written in the participants file # Reading file containing participants IDs and groups with open(participants_file, 'rt') as file: @@ -708,44 +698,26 @@ def get_subgroups_contrasts(copes, varcopes, subject_list: list, participants_fi for line in file: info = line.strip().split() - - # Checking for each participant if its ID was selected - # and separate people depending on their group - if info[0][-3:] in subject_list and info[1] == 'equalIndifference': - equal_indifference_ids.append(info[0][-3:]) - elif info[0][-3:] in subject_list and info[1] == 'equalRange': - equal_range_ids.append(info[0][-3:]) - - copes_equal_indifference = [] - copes_equal_range = [] - copes_global = [] - varcopes_equal_indifference = [] - varcopes_equal_range = [] - varcopes_global = [] - - # Checking for each selected file if the corresponding participant was selected - # and add the file to the list corresponding to its group - for cope, varcope in zip(copes, varcopes): - sub_id = cope.split('/') - if sub_id[-2][-3:] in equal_indifference_ids: - copes_equal_indifference.append(cope) - elif sub_id[-2][-3:] in equal_range_ids: - copes_equal_range.append(cope) - if sub_id[-2][-3:] in subject_list: - copes_global.append(cope) - - sub_id = varcope.split('/') - if sub_id[-2][-3:] in equal_indifference_ids: - varcopes_equal_indifference.append(varcope) - elif sub_id[-2][-3:] in equal_range_ids: - varcopes_equal_range.append(varcope) - if sub_id[-2][-3:] in subject_list: - varcopes_global.append(varcope) - - return copes_equal_indifference, copes_equal_range,\ - varcopes_equal_indifference, varcopes_equal_range,\ - equal_indifference_ids, equal_range_ids,\ - copes_global, varcopes_global + subject_id = info[0][-3:] + subject_group = info[1] + + # Check if the participant ID was selected and sort depending on group + if subject_id in subject_list: + subject_list_sub_ids.append(info[0]) + if subject_group == 'equalIndifference': + equal_indifference_ids.append(subject_id) + equal_indifference_sub_ids.append(info[0]) + elif subject_group == 'equalRange': + equal_range_ids.append(subject_id) + equal_range_sub_ids.append(info[0]) + + # Reurn sorted selected copes and varcopes by group, and corresponding ids + return \ + [c for c in copes if any(i in c for i in equal_indifference_sub_ids)],\ + [c for c in copes if any(i in c for i in equal_range_sub_ids)],\ + [v for v in varcopes if any(i in v for i in equal_indifference_sub_ids)],\ + [v for v in varcopes if any(i in v for i in equal_range_sub_ids)],\ + equal_indifference_ids, equal_range_ids def get_one_sample_t_test_regressors(subject_ids: list) -> dict: """ @@ -850,9 +822,7 @@ def get_group_level_analysis_sub_workflow(self, method): 'varcopes_equal_indifference', 'varcopes_equal_range', 'equal_indifference_ids', - 'equal_range_ids', - 'copes_global', - 'varcopes_global' + 'equal_range_ids' ] ), name = 'get_contrasts', diff --git a/tests/pipelines/__init__.py b/tests/pipelines/__init__.py index e69de29b..d6ad4fdc 100644 --- a/tests/pipelines/__init__.py +++ b/tests/pipelines/__init__.py @@ -0,0 +1,31 @@ +#!/usr/bin/python +# coding: utf-8 + +""" +Configuration for testing of the narps_open.pipelines modules. +""" + +from pytest import helpers + +@helpers.register +def mock_event_data(mocker): + """ Mocks the retrun of the open function with the contents of a fake event file """ + + fake_event_data = 'onset duration\tgain\tloss\tRT\tparticipant_response\n' + fake_event_data += '4.071\t4\t14\t6\t2.388\tweakly_accept\n' + fake_event_data += '11.834\t4\t34\t14\t2.289\tstrongly_accept\n' + + mocker.patch('builtins.open', mocker.mock_open(read_data = fake_event_data)) + + +@helpers.register +def mock_participants_data(mocker): + """ Mocks the retrun of the open function with the contents of a fake participants file """ + + fake_participants_data = 'participant_id\tgroup\tgender\tage\n' + fake_participants_data += 'sub-001\tequalIndifference\tM\t24\n' + fake_participants_data += 'sub-002\tequalRange\tM\t25\n' + fake_participants_data += 'sub-003\tequalIndifference\tF\t27\n' + fake_participants_data += 'sub-004\tequalRange\tM\t25\n' + + mocker.patch('builtins.open', mocker.mock_open(read_data = fake_participants_data)) diff --git a/tests/pipelines/test_team_08MQ.py b/tests/pipelines/test_team_08MQ.py index d5feff70..298107b1 100644 --- a/tests/pipelines/test_team_08MQ.py +++ b/tests/pipelines/test_team_08MQ.py @@ -13,6 +13,7 @@ from pytest import helpers, mark from nipype import Workflow +from nipype.interfaces.base import Bunch from narps_open.pipelines.team_08MQ import PipelineTeam08MQ @@ -63,26 +64,51 @@ def test_outputs(): @staticmethod @mark.unit_test - def test_subject_information(): + def test_subject_information(mocker): """ Test the get_subject_information method """ + helpers.mock_event_data(mocker) + + information = PipelineTeam08MQ.get_subject_information('fake_event_file_path')[0] + + assert isinstance(information, Bunch) + assert information.amplitudes == [[1.0, 1.0], [14.0, 34.0], [6.0, 14.0], [1.0, 1.0]] + assert information.durations == [[4.0, 4.0], [2.388, 2.289], [2.388, 2.289], [4.0, 4.0]] + assert information.conditions == ['event', 'gain', 'loss', 'response'] + assert information.onsets == [ + [4.071, 11.834], [4.071, 11.834], [4.071, 11.834], [4.071, 11.834] + ] + @staticmethod @mark.unit_test def test_run_level_contrasts(): """ Test the get_run_level_contrasts method """ - contrasts = PipelineTeam08MQ.get_run_level_contrasts() + contrasts = PipelineTeam08MQ.get_run_level_contrasts() assert contrasts[0] == ('positive_effect_gain', 'T', ['gain', 'loss'], [1, 0]) - assert contrasts[0] == ('positive_effect_loss', 'T', ['gain', 'loss'], [0, 1]) - assert contrasts[0] == ('negative_effect_loss', 'T', ['gain', 'loss'], [0, -1]) + assert contrasts[1] == ('positive_effect_loss', 'T', ['gain', 'loss'], [0, 1]) + assert contrasts[2] == ('negative_effect_loss', 'T', ['gain', 'loss'], [0, -1]) @staticmethod @mark.unit_test - def test_subgroups_contrasts(): + def test_subgroups_contrasts(mocker): """ Test the get_subgroups_contrasts method """ - #contrasts = PipelineTeam08MQ().get_subgroups_contrasts() - #copes, varcopes, subject_list: list, participants_file: str + helpers.mock_participants_data(mocker) + + cei, cer, vei, ver, eii, eri = PipelineTeam08MQ.get_subgroups_contrasts( + ['sub-001/_contrast_id_1/cope1.nii.gz', 'sub-001/_contrast_id_2/cope1.nii.gz', 'sub-002/_contrast_id_1/cope1.nii.gz', 'sub-002/_contrast_id_2/cope1.nii.gz', 'sub-003/_contrast_id_1/cope1.nii.gz', 'sub-003/_contrast_id_2/cope1.nii.gz', 'sub-004/_contrast_id_1/cope1.nii.gz', 'sub-004/_contrast_id_2/cope1.nii.gz'], # copes + ['sub-001/_contrast_id_1/varcope1.nii.gz', 'sub-001/_contrast_id_2/varcope1.nii.gz', 'sub-002/_contrast_id_1/varcope1.nii.gz', 'sub-002/_contrast_id_2/varcope1.nii.gz', 'sub-003/_contrast_id_1/varcope1.nii.gz', 'sub-003/_contrast_id_2/varcope1.nii.gz', 'sub-004/_contrast_id_1/varcope1.nii.gz', 'sub-004/_contrast_id_2/varcope1.nii.gz'], # varcopes + ['001', '002', '003', '004'], # subject_list + ['fake_participants_file_path'] # participants file + ) + + assert cei == ['sub-001/_contrast_id_1/cope1.nii.gz', 'sub-001/_contrast_id_2/cope1.nii.gz', 'sub-003/_contrast_id_1/cope1.nii.gz', 'sub-003/_contrast_id_2/cope1.nii.gz'] + assert cer == ['sub-002/_contrast_id_1/cope1.nii.gz', 'sub-002/_contrast_id_2/cope1.nii.gz', 'sub-004/_contrast_id_1/cope1.nii.gz', 'sub-004/_contrast_id_2/cope1.nii.gz'] + assert vei == ['sub-001/_contrast_id_1/varcope1.nii.gz', 'sub-001/_contrast_id_2/varcope1.nii.gz', 'sub-003/_contrast_id_1/varcope1.nii.gz', 'sub-003/_contrast_id_2/varcope1.nii.gz'] + assert ver == ['sub-002/_contrast_id_1/varcope1.nii.gz', 'sub-002/_contrast_id_2/varcope1.nii.gz', 'sub-004/_contrast_id_1/varcope1.nii.gz', 'sub-004/_contrast_id_2/varcope1.nii.gz'] + assert eii == ['001', '003'] + assert eri == ['002', '004'] @staticmethod @mark.unit_test @@ -90,7 +116,7 @@ def test_one_sample_t_test_regressors(): """ Test the get_one_sample_t_test_regressors method """ regressors = PipelineTeam08MQ.get_one_sample_t_test_regressors(['001', '002']) - assert regressors == [1, 1] + assert regressors == {'group_mean': [1, 1]} @staticmethod @mark.unit_test From 72528647361c9402886b8285d9ec7e76e1735952 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Wed, 15 Nov 2023 16:44:41 +0100 Subject: [PATCH 082/116] [CODESPELL] typos --- narps_open/pipelines/team_08MQ.py | 2 +- tests/pipelines/__init__.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/narps_open/pipelines/team_08MQ.py b/narps_open/pipelines/team_08MQ.py index b672367c..ef1c2fb1 100644 --- a/narps_open/pipelines/team_08MQ.py +++ b/narps_open/pipelines/team_08MQ.py @@ -711,7 +711,7 @@ def get_subgroups_contrasts(copes, varcopes, subject_list: list, participants_fi equal_range_ids.append(subject_id) equal_range_sub_ids.append(info[0]) - # Reurn sorted selected copes and varcopes by group, and corresponding ids + # Return sorted selected copes and varcopes by group, and corresponding ids return \ [c for c in copes if any(i in c for i in equal_indifference_sub_ids)],\ [c for c in copes if any(i in c for i in equal_range_sub_ids)],\ diff --git a/tests/pipelines/__init__.py b/tests/pipelines/__init__.py index d6ad4fdc..2325a9b9 100644 --- a/tests/pipelines/__init__.py +++ b/tests/pipelines/__init__.py @@ -9,7 +9,7 @@ @helpers.register def mock_event_data(mocker): - """ Mocks the retrun of the open function with the contents of a fake event file """ + """ Mocks the return of the open function with the contents of a fake event file """ fake_event_data = 'onset duration\tgain\tloss\tRT\tparticipant_response\n' fake_event_data += '4.071\t4\t14\t6\t2.388\tweakly_accept\n' @@ -20,7 +20,7 @@ def mock_event_data(mocker): @helpers.register def mock_participants_data(mocker): - """ Mocks the retrun of the open function with the contents of a fake participants file """ + """ Mocks the return of the open function with the contents of a fake participants file """ fake_participants_data = 'participant_id\tgroup\tgender\tage\n' fake_participants_data += 'sub-001\tequalIndifference\tM\t24\n' From 68adef484e60cc67e0323fb20700dd577b55f9d1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Thu, 16 Nov 2023 12:15:11 +0100 Subject: [PATCH 083/116] [BUG] inversion between groups [skip ci] --- narps_open/pipelines/team_08MQ.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/narps_open/pipelines/team_08MQ.py b/narps_open/pipelines/team_08MQ.py index ef1c2fb1..8b351ebd 100644 --- a/narps_open/pipelines/team_08MQ.py +++ b/narps_open/pipelines/team_08MQ.py @@ -719,18 +719,18 @@ def get_subgroups_contrasts(copes, varcopes, subject_list: list, participants_fi [v for v in varcopes if any(i in v for i in equal_range_sub_ids)],\ equal_indifference_ids, equal_range_ids - def get_one_sample_t_test_regressors(subject_ids: list) -> dict: + def get_one_sample_t_test_regressors(subject_list: list) -> dict: """ Create dictionary of regressors for one sample t-test group analysis. Parameters: - - subject_ids: ids of subject in the group for which to do the analysis + - subject_list: ids of subject in the group for which to do the analysis Returns: - dict containing named lists of regressors. """ - return dict(group_mean = [1 for _ in subject_ids]) + return dict(group_mean = [1 for _ in subject_list]) def get_two_sample_t_test_regressors( equal_range_ids: list, @@ -833,7 +833,7 @@ def get_group_level_analysis_sub_workflow(self, method): regressors_one_sample = Node( Function( function = self.get_one_sample_t_test_regressors, - input_names = ['subject_ids'], + input_names = ['subject_list'], output_names = ['regressors'] ), name = 'regressors_one_sample', @@ -912,16 +912,16 @@ def get_group_level_analysis_sub_workflow(self, method): group_level_analysis.connect([ (get_contrasts, merge_copes, [('copes_equal_indifference', 'in_files')]), (get_contrasts, merge_varcopes, [('varcopes_equal_indifference', 'in_files')]), - (get_contrasts, regressors_one_sample, [('equal_range_ids', 'subject_ids')]) + (get_contrasts, regressors_one_sample, [ + ('equal_indifference_ids', 'subject_list') + ]) ]) elif method == 'equalRange': group_level_analysis.connect([ (get_contrasts, merge_copes, [('copes_equal_range', 'in_files')]), (get_contrasts, merge_varcopes, [('varcopes_equal_range', 'in_files')]), - (get_contrasts, regressors_one_sample, [ - ('equal_indifference_ids', 'subject_ids') - ]) + (get_contrasts, regressors_one_sample, [('equal_range_ids', 'subject_list')]) ]) elif method == 'groupComp': From c6ae5216e04b23260189eae95ae294517fd7bae6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Mon, 20 Nov 2023 16:50:13 +0100 Subject: [PATCH 084/116] [REFAC] Using narps_open.core functions --- narps_open/pipelines/team_08MQ.py | 389 +++++++++++++----------------- tests/pipelines/test_team_08MQ.py | 37 +-- 2 files changed, 180 insertions(+), 246 deletions(-) diff --git a/narps_open/pipelines/team_08MQ.py b/narps_open/pipelines/team_08MQ.py index 8b351ebd..3bf5dd65 100644 --- a/narps_open/pipelines/team_08MQ.py +++ b/narps_open/pipelines/team_08MQ.py @@ -6,7 +6,7 @@ from os.path import join from itertools import product -from nipype import Node, Workflow +from nipype import Node, Workflow, MapNode from nipype.interfaces.utility import IdentityInterface, Function, Merge, Split, Select from nipype.interfaces.io import SelectFiles, DataSink from nipype.interfaces.fsl import ( @@ -26,7 +26,8 @@ from narps_open.pipelines import Pipeline from narps_open.data.task import TaskInformation - +from narps_open.data.participants import get_group +from narps_open.core.common import remove_file, list_intersection, elements_in_string, clean_list # Setup FSL FSLCommand.set_default_output_type('NIFTI_GZ') @@ -38,6 +39,11 @@ def __init__(self): self.fwhm = 6.0 self.team_id = '08MQ' self.contrast_list = ['1', '2', '3'] + self.run_level_contasts = [ + ('positive_effect_gain', 'T', ['gain', 'loss'], [1, 0]), + ('positive_effect_loss', 'T', ['gain', 'loss'], [0, 1]), + ('negative_effect_loss', 'T', ['gain', 'loss'], [0, -1]) + ] def remove_files(_, files): """ @@ -66,10 +72,10 @@ def get_preprocessing(self): """ Return a Nipype workflow describing the prerpocessing part of the pipeline """ # IdentityInterface node - allows to iterate over subjects and runs - info_source = Node(IdentityInterface( + information_source = Node(IdentityInterface( fields = ['subject_id', 'run_id']), - name = 'info_source') - info_source.iterables = [ + name = 'information_source') + information_source.iterables = [ ('run_id', self.run_list), ('subject_id', self.subject_list), ] @@ -265,7 +271,7 @@ def get_preprocessing(self): preprocessing.config['execution']['stop_on_first_crash'] = 'true' preprocessing.connect([ # Inputs - (info_source, select_files, [('subject_id', 'subject_id'), ('run_id', 'run_id')]), + (information_source, select_files, [('subject_id', 'subject_id'), ('run_id', 'run_id')]), # Anatomical images (select_files, bias_field_correction, [('anat', 'in_files')]), @@ -418,23 +424,6 @@ def get_subject_information(event_file): regressors = None) ] - def get_run_level_contrasts(): - """ - Create a list of tuples that represent contrasts. - Each contrast is in the form : - (Name,Stat,[list of condition names],[weights on those conditions]) - - Returns: - - contrasts: list of tuples, list of contrasts to analyze - """ - conditions = ['gain', 'loss'] - - return [ - ('positive_effect_gain', 'T', conditions, [1, 0]), # Positive parametric effect of gain - ('positive_effect_loss', 'T', conditions, [0, 1]), # Positive parametric effect of loss - ('negative_effect_loss', 'T', conditions, [0, -1]) # Negative parametric effect of loss - ] - def get_run_level_analysis(self): """ Return a Nipype workflow describing the run level analysis part of the pipeline @@ -443,10 +432,10 @@ def get_run_level_analysis(self): """ # IdentityInterface node - allows to iterate over subjects and runs - info_source = Node(IdentityInterface( + information_source = Node(IdentityInterface( fields = ['subject_id', 'run_id']), - name = 'info_source') - info_source.iterables = [ + name = 'information_source') + information_source.iterables = [ ('run_id', self.run_list), ('subject_id', self.subject_list), ] @@ -489,20 +478,14 @@ def get_run_level_analysis(self): specify_model.inputs.time_repetition = TaskInformation()['RepetitionTime'] specify_model.inputs.parameter_source = 'FSL' # Source of motion parameters. - # Function Node get_contrasts - Get the list of contrasts - contrasts = Node(Function( - function = self.get_run_level_contrasts, - input_names = [], - output_names = ['contrasts'] - ), name = 'contrasts') - # Level1Design Node - Generate files for first level computation - l1_design = Node(Level1Design(), 'l1_design') - l1_design.inputs.bases = { + model_design = Node(Level1Design(), 'model_design') + model_design.inputs.bases = { 'dgamma':{'derivs' : True} # Canonical double gamma HRF plus temporal derivative } - l1_design.inputs.interscan_interval = TaskInformation()['RepetitionTime'] - l1_design.inputs.model_serial_correlations = True + model_design.inputs.interscan_interval = TaskInformation()['RepetitionTime'] + model_design.inputs.model_serial_correlations = True + model_design.inputs.contrasts = self.run_level_contasts # FEATModel Node - Generate first level model model_generation = Node(FEATModel(), name = 'model_generation') @@ -516,14 +499,13 @@ def get_run_level_analysis(self): name = 'run_level_analysis' ) run_level_analysis.connect([ - (info_source, select_files, [('subject_id', 'subject_id'), ('run_id', 'run_id')]), + (information_source, select_files, [('subject_id', 'subject_id'), ('run_id', 'run_id')]), (select_files, subject_information, [('event', 'event_file')]), (subject_information, specify_model, [('subject_info', 'subject_info')]), (select_files, specify_model, [('motion', 'realignment_parameters')]), (select_files, specify_model, [('func', 'functional_runs')]), - (contrasts, l1_design, [('contrasts', 'contrasts')]), - (specify_model, l1_design, [('session_info', 'session_info')]), - (l1_design, model_generation, [ + (specify_model, model_design, [('session_info', 'session_info')]), + (model_design, model_generation, [ ('ev_files', 'ev_files'), ('fsf_files', 'fsf_file')]), (select_files, model_estimate, [('func', 'in_file')]), @@ -583,10 +565,10 @@ def get_subject_level_analysis(self): """ Return a Nipype workflow describing the subject level analysis part of the pipeline """ # IdentityInterface node - allows to iterate over subjects and contrasts - info_source = Node(IdentityInterface( + information_source = Node(IdentityInterface( fields = ['subject_id', 'contrast_id']), - name = 'info_source') - info_source.iterables = [ + name = 'information_source') + information_source.iterables = [ ('subject_id', self.subject_list), ('contrast_id', self.contrast_list) ] @@ -627,7 +609,7 @@ def get_subject_level_analysis(self): base_dir = self.directories.working_dir, name = 'subject_level_analysis') subject_level_analysis.connect([ - (info_source, select_files, [ + (information_source, select_files, [ ('subject_id', 'subject_id'), ('contrast_id', 'contrast_id')]), (select_files, merge_copes, [('cope', 'in_files')]), @@ -663,62 +645,6 @@ def get_subject_level_outputs(self): return [template.format(**dict(zip(parameters.keys(), parameter_values)))\ for parameter_values in parameter_sets] - def get_subgroups_contrasts(copes, varcopes, subject_list: list, participants_file: str): - """ - Return the file list containing only the files belonging to subject in the wanted group. - - Parameters : - - copes: original file list selected by select_files node - - varcopes: original file list selected by select_files node - - subject_list: list of subject IDs that are analyzed - - participants_file: file containing participants characteristics - - Returns : - - copes_equal_indifference : a subset of copes corresponding to subjects - in the equalIndifference group - - copes_equal_range : a subset of copes corresponding to subjects - in the equalRange group - - varcopes_equal_indifference : a subset of varcopes corresponding to subjects - in the equalIndifference group - - varcopes_equal_range : a subset of varcopes corresponding to subjects - in the equalRange group - - equal_indifference_ids : a list of subject ids in the equalIndifference group - - equal_range_ids : a list of subject ids in the equalRange group - """ - - subject_list_sub_ids = [] # ids as written in the participants file - equal_range_ids = [] # ids as 3-digit string - equal_indifference_ids = [] # ids as 3-digit string - equal_range_sub_ids = [] # ids as written in the participants file - equal_indifference_sub_ids = [] # ids as written in the participants file - - # Reading file containing participants IDs and groups - with open(participants_file, 'rt') as file: - next(file) # skip the header - - for line in file: - info = line.strip().split() - subject_id = info[0][-3:] - subject_group = info[1] - - # Check if the participant ID was selected and sort depending on group - if subject_id in subject_list: - subject_list_sub_ids.append(info[0]) - if subject_group == 'equalIndifference': - equal_indifference_ids.append(subject_id) - equal_indifference_sub_ids.append(info[0]) - elif subject_group == 'equalRange': - equal_range_ids.append(subject_id) - equal_range_sub_ids.append(info[0]) - - # Return sorted selected copes and varcopes by group, and corresponding ids - return \ - [c for c in copes if any(i in c for i in equal_indifference_sub_ids)],\ - [c for c in copes if any(i in c for i in equal_range_sub_ids)],\ - [v for v in varcopes if any(i in v for i in equal_indifference_sub_ids)],\ - [v for v in varcopes if any(i in v for i in equal_range_sub_ids)],\ - equal_indifference_ids, equal_range_ids - def get_one_sample_t_test_regressors(subject_list: list) -> dict: """ Create dictionary of regressors for one sample t-test group analysis. @@ -786,14 +712,13 @@ def get_group_level_analysis_sub_workflow(self, method): - group_level_analysis: nipype.WorkFlow """ # Infosource Node - iterate over the contrasts generated by the subject level analysis - info_source = Node( + information_source = Node( IdentityInterface( - fields = ['contrast_id', 'subjects'], - subjects = self.subject_list + fields = ['contrast_id'] ), - name = 'info_source', + name = 'information_source', ) - info_source.iterables = [('contrast_id', self.contrast_list)] + information_source.iterables = [('contrast_id', self.contrast_list)] # SelectFiles Node - select necessary files templates = { @@ -811,49 +736,29 @@ def get_group_level_analysis_sub_workflow(self, method): data_sink = Node(DataSink(), name = 'data_sink') data_sink.inputs.base_directory = self.directories.output_dir - # Function Node get_subgroups_contrasts - Get the contrast files for each subgroup - get_contrasts = Node( - Function( - function = self.get_subgroups_contrasts, - input_names = ['copes', 'varcopes', 'subject_list', 'participants_file'], - output_names = [ - 'copes_equal_indifference', - 'copes_equal_range', - 'varcopes_equal_indifference', - 'varcopes_equal_range', - 'equal_indifference_ids', - 'equal_range_ids' - ] - ), - name = 'get_contrasts', - ) - - # Function Node get_one_sample_t_test_regressors - # Get regressors in the equalRange and equalIndifference method case - regressors_one_sample = Node( - Function( - function = self.get_one_sample_t_test_regressors, - input_names = ['subject_list'], - output_names = ['regressors'] + # Function Node elements_in_string + # Get contrast of parameter estimates (cope) for these subjects + # Note : using a MapNode with elements_in_string requires using clean_list to remove + # None values from the out_list + get_copes = MapNode(Function( + function = elements_in_string, + input_names = ['input_str', 'elements'], + output_names = ['out_list'] ), - name = 'regressors_one_sample', + name = 'get_copes', iterfield = 'input_str' ) - # Function Node get_two_sample_t_test_regressors - # Get regressors in the groupComp method case - regressors_two_sample = Node( - Function( - function = self.get_two_sample_t_test_regressors, - input_names = [ - 'equal_range_ids', - 'equal_indifference_ids', - 'subject_list', - ], - output_names = ['regressors', 'groups'] + # Function Node elements_in_string + # Get variance of the estimated copes (varcope) for these subjects + # Note : using a MapNode with elements_in_string requires using clean_list to remove + # None values from the out_list + get_varcopes = MapNode(Function( + function = elements_in_string, + input_names = ['input_str', 'elements'], + output_names = ['out_list'] ), - name = 'regressors_two_sample', + name = 'get_varcopes', iterfield = 'input_str' ) - regressors_two_sample.inputs.subject_list = self.subject_list # Merge Node - Merge cope files merge_copes = Node(MergeImages(), name = 'merge_copes') @@ -889,42 +794,80 @@ def get_group_level_analysis_sub_workflow(self, method): name = f'group_level_analysis_{method}_nsub_{nb_subjects}' ) group_level_analysis.connect([ - (info_source, select_files, [('contrast_id', 'contrast_id')]), - (info_source, get_contrasts, [('subjects', 'subject_list')]), - (select_files, get_contrasts, [ - ('cope', 'copes'), - ('varcope', 'varcopes'), - ('participants', 'participants_file'), + (information_source, select_files, [('contrast_id', 'contrast_id')]), + (select_files, get_copes, [('cope', 'input_str')]), + (select_files, get_varcopes, [('varcope', 'input_str')]), + (get_copes, merge_copes, [(('out_list', clean_list), 'in_files')]), + (get_varcopes, merge_varcopes,[(('out_list', clean_list), 'in_files')]), + (merge_copes, estimate_model, [('merged_file', 'cope_file')]), + (merge_varcopes, estimate_model, [('merged_file', 'var_cope_file')]), + (specify_model, estimate_model, [ + ('design_mat', 'design_file'), + ('design_con', 't_con_file'), + ('design_grp', 'cov_split_file') + ]), + (merge_copes, randomise, [('merged_file', 'in_file')]), + (specify_model, randomise, [ + ('design_mat', 'design_mat'), + ('design_con', 'tcon') + ]), + (randomise, data_sink, [ + ('t_corrected_p_files', + f'group_level_analysis_{method}_nsub_{nb_subjects}.@tcorpfile'), + ('tstat_files', f'group_level_analysis_{method}_nsub_{nb_subjects}.@tstat') + ]), + (estimate_model, data_sink, [ + ('zstats', f'group_level_analysis_{method}_nsub_{nb_subjects}.@zstats'), + ('tstats', f'group_level_analysis_{method}_nsub_{nb_subjects}.@tstats') ]) ]) if method in ('equalRange', 'equalIndifference'): + + # Setup a one sample t-test specify_model.inputs.contrasts = [ ('Group', 'T', ['group_mean'], [1]), ('Group', 'T', ['group_mean'], [-1]) ] + # Function Node get_group_subjects - Get subjects in the group and in the subject_list + get_group_subjects = Node(Function( + function = list_intersection, + input_names = ['list_1', 'list_2'], + output_names = ['out_list'] + ), + name = 'get_group_subjects' + ) + get_group_subjects.inputs.list_1 = get_group(method) + get_group_subjects.inputs.list_2 = self.subject_list + + # Function Node get_one_sample_t_test_regressors + # Get regressors in the equalRange and equalIndifference method case + regressors_one_sample = Node( + Function( + function = self.get_one_sample_t_test_regressors, + input_names = ['subject_list'], + output_names = ['regressors'] + ), + name = 'regressors_one_sample', + ) + + # Add missing connections group_level_analysis.connect([ + (get_group_subjects, get_copes, [('out_list', 'elements')]), + (get_group_subjects, get_varcopes, [('out_list', 'elements')]), + (get_group_subjects, regressors_one_sample, [('out_list', 'subject_list')]), (regressors_one_sample, specify_model, [('regressors', 'regressors')]) - ]) + ]) - if method == 'equalIndifference': - group_level_analysis.connect([ - (get_contrasts, merge_copes, [('copes_equal_indifference', 'in_files')]), - (get_contrasts, merge_varcopes, [('varcopes_equal_indifference', 'in_files')]), - (get_contrasts, regressors_one_sample, [ - ('equal_indifference_ids', 'subject_list') - ]) - ]) + elif method == 'groupComp': - elif method == 'equalRange': - group_level_analysis.connect([ - (get_contrasts, merge_copes, [('copes_equal_range', 'in_files')]), - (get_contrasts, merge_varcopes, [('varcopes_equal_range', 'in_files')]), - (get_contrasts, regressors_one_sample, [('equal_range_ids', 'subject_list')]) - ]) + # Select copes and varcopes corresponding to the selected subjects + # Indeed the SelectFiles node asks for all (*) subjects available + get_copes.inputs.elements = self.subject_list + get_varcopes.inputs.elements = self.subject_list - elif method == 'groupComp': + # Setup a two sample t-test specify_model.inputs.contrasts = [( 'Eq range vs Eq indiff in loss', 'T', @@ -932,43 +875,59 @@ def get_group_level_analysis_sub_workflow(self, method): [1, -1] )] + # Function Node get_equal_range_subjects + # Get subjects in the equalRange group and in the subject_list + get_equal_range_subjects = Node(Function( + function = list_intersection, + input_names = ['list_1', 'list_2'], + output_names = ['out_list'] + ), + name = 'get_equal_range_subjects' + ) + get_equal_range_subjects.inputs.list_1 = get_group('equalRange') + get_equal_range_subjects.inputs.list_2 = self.subject_list + + # Function Node get_equal_indifference_subjects + # Get subjects in the equalIndifference group and in the subject_list + get_equal_indifference_subjects = Node(Function( + function = list_intersection, + input_names = ['list_1', 'list_2'], + output_names = ['out_list'] + ), + name = 'get_equal_indifference_subjects' + ) + get_equal_indifference_subjects.inputs.list_1 = get_group('equalIndifference') + get_equal_indifference_subjects.inputs.list_2 = self.subject_list + + # Function Node get_two_sample_t_test_regressors + # Get regressors in the groupComp method case + regressors_two_sample = Node( + Function( + function = self.get_two_sample_t_test_regressors, + input_names = [ + 'equal_range_ids', + 'equal_indifference_ids', + 'subject_list', + ], + output_names = ['regressors', 'groups'] + ), + name = 'regressors_two_sample', + ) + regressors_two_sample.inputs.subject_list = self.subject_list + + # Add missing connections group_level_analysis.connect([ - (select_files, merge_copes, [('cope', 'in_files')]), - (select_files, merge_varcopes, [('varcope', 'in_files')]), - (get_contrasts, regressors_two_sample, [ - ('equal_range_ids', 'equal_range_ids'), - ('equal_indifference_ids', 'equal_indifference_ids') + (get_equal_range_subjects, regressors_two_sample, [ + ('out_list', 'equal_range_id') + ]), + (get_equal_indifference_subjects, regressors_two_sample, [ + ('out_list', 'equal_indifference_id') ]), (regressors_two_sample, specify_model, [ ('regressors', 'regressors'), - ('groups', 'groups') - ]) + ('groups', 'groups')]) ]) - group_level_analysis.connect([ - (merge_copes, estimate_model, [('merged_file', 'cope_file')]), - (merge_varcopes, estimate_model, [('merged_file', 'var_cope_file')]), - (specify_model, estimate_model, [ - ('design_mat', 'design_file'), - ('design_con', 't_con_file'), - ('design_grp', 'cov_split_file') - ]), - (merge_copes, randomise, [('merged_file', 'in_file')]), - (specify_model, randomise, [ - ('design_mat', 'design_mat'), - ('design_con', 'tcon') - ]), - (randomise, data_sink, [ - ('t_corrected_p_files', - f'group_level_analysis_{method}_nsub_{nb_subjects}.@tcorpfile'), - ('tstat_files', f'group_level_analysis_{method}_nsub_{nb_subjects}.@tstat') - ]), - (estimate_model, data_sink, [ - ('zstats', f'group_level_analysis_{method}_nsub_{nb_subjects}.@zstats'), - ('tstats', f'group_level_analysis_{method}_nsub_{nb_subjects}.@tstats') - ]) - ]) - return group_level_analysis def get_hypotheses_outputs(self): @@ -977,40 +936,40 @@ def get_hypotheses_outputs(self): nb_sub = len(self.subject_list) files = [ join(f'group_level_analysis_equalIndifference_nsub_{nb_sub}', - '_contrast_id_pgain', 'randomise_tfce_corrp_tstat1.nii.gz'), + '_contrast_id_1', 'randomise_tfce_corrp_tstat1.nii.gz'), join(f'group_level_analysis_equalIndifference_nsub_{nb_sub}', - '_contrast_id_pgain', 'zstat1.nii.gz'), + '_contrast_id_1', 'zstat1.nii.gz'), join(f'group_level_analysis_equalRange_nsub_{nb_sub}', - '_contrast_id_pgain', 'randomise_tfce_corrp_tstat1.nii.gz'), + '_contrast_id_1', 'randomise_tfce_corrp_tstat1.nii.gz'), join(f'group_level_analysis_equalRange_nsub_{nb_sub}', - '_contrast_id_pgain', 'zstat1.nii.gz'), + '_contrast_id_1', 'zstat1.nii.gz'), join(f'group_level_analysis_equalIndifference_nsub_{nb_sub}', - '_contrast_id_pgain', 'randomise_tfce_corrp_tstat1.nii.gz'), + '_contrast_id_1', 'randomise_tfce_corrp_tstat1.nii.gz'), join(f'group_level_analysis_equalIndifference_nsub_{nb_sub}', - '_contrast_id_pgain', 'zstat1.nii.gz'), + '_contrast_id_1', 'zstat1.nii.gz'), join(f'group_level_analysis_equalRange_nsub_{nb_sub}', - '_contrast_id_pgain', 'randomise_tfce_corrp_tstat1.nii.gz'), + '_contrast_id_1', 'randomise_tfce_corrp_tstat1.nii.gz'), join(f'group_level_analysis_equalRange_nsub_{nb_sub}', - '_contrast_id_pgain', 'zstat1.nii.gz'), + '_contrast_id_1', 'zstat1.nii.gz'), join(f'group_level_analysis_equalIndifference_nsub_{nb_sub}', - '_contrast_id_ploss', 'randomise_tfce_corrp_tstat2.nii.gz'), + '_contrast_id_2', 'randomise_tfce_corrp_tstat2.nii.gz'), join(f'group_level_analysis_equalIndifference_nsub_{nb_sub}', - '_contrast_id_ploss', 'zstat2.nii.gz'), + '_contrast_id_2', 'zstat2.nii.gz'), join(f'group_level_analysis_equalRange_nsub_{nb_sub}', - '_contrast_id_ploss', 'randomise_tfce_corrp_tstat2.nii.gz'), + '_contrast_id_2', 'randomise_tfce_corrp_tstat2.nii.gz'), join(f'group_level_analysis_equalRange_nsub_{nb_sub}', - '_contrast_id_ploss', 'zstat2.nii.gz'), + '_contrast_id_2', 'zstat2.nii.gz'), join(f'group_level_analysis_equalIndifference_nsub_{nb_sub}', - '_contrast_id_ploss', 'randomise_tfce_corrp_tstat1.nii.gz'), + '_contrast_id_2', 'randomise_tfce_corrp_tstat1.nii.gz'), join(f'group_level_analysis_equalIndifference_nsub_{nb_sub}', - '_contrast_id_ploss', 'zstat1.nii.gz'), + '_contrast_id_2', 'zstat1.nii.gz'), join(f'group_level_analysis_equalRange_nsub_{nb_sub}', - '_contrast_id_ploss', 'randomise_tfce_corrp_tstat1.nii.gz'), + '_contrast_id_2', 'randomise_tfce_corrp_tstat1.nii.gz'), join(f'group_level_analysis_equalRange_nsub_{nb_sub}', - '_contrast_id_ploss', 'zstat1.nii.gz'), + '_contrast_id_2', 'zstat1.nii.gz'), join(f'group_level_analysis_groupComp_nsub_{nb_sub}', - '_contrast_id_ploss', 'randomise_tfce_corrp_tstat1.nii.gz'), + '_contrast_id_2', 'randomise_tfce_corrp_tstat1.nii.gz'), join(f'group_level_analysis_groupComp_nsub_{nb_sub}', - '_contrast_id_ploss', 'zstat1.nii.gz') + '_contrast_id_2', 'zstat1.nii.gz') ] return [join(self.directories.output_dir, f) for f in files] diff --git a/tests/pipelines/test_team_08MQ.py b/tests/pipelines/test_team_08MQ.py index 298107b1..28ad2659 100644 --- a/tests/pipelines/test_team_08MQ.py +++ b/tests/pipelines/test_team_08MQ.py @@ -30,6 +30,12 @@ def test_create(): # 1 - check the parameters assert pipeline.fwhm == 6.0 assert pipeline.team_id == '08MQ' + assert pipeline.contrast_list == ['1', '2', '3'] + assert pipeline.run_level_contasts == [ + ('positive_effect_gain', 'T', ['gain', 'loss'], [1, 0]), + ('positive_effect_loss', 'T', ['gain', 'loss'], [0, 1]), + ('negative_effect_loss', 'T', ['gain', 'loss'], [0, -1]) + ] # 2 - check workflows assert isinstance(pipeline.get_preprocessing(), Workflow) @@ -79,37 +85,6 @@ def test_subject_information(mocker): [4.071, 11.834], [4.071, 11.834], [4.071, 11.834], [4.071, 11.834] ] - @staticmethod - @mark.unit_test - def test_run_level_contrasts(): - """ Test the get_run_level_contrasts method """ - - contrasts = PipelineTeam08MQ.get_run_level_contrasts() - assert contrasts[0] == ('positive_effect_gain', 'T', ['gain', 'loss'], [1, 0]) - assert contrasts[1] == ('positive_effect_loss', 'T', ['gain', 'loss'], [0, 1]) - assert contrasts[2] == ('negative_effect_loss', 'T', ['gain', 'loss'], [0, -1]) - - @staticmethod - @mark.unit_test - def test_subgroups_contrasts(mocker): - """ Test the get_subgroups_contrasts method """ - - helpers.mock_participants_data(mocker) - - cei, cer, vei, ver, eii, eri = PipelineTeam08MQ.get_subgroups_contrasts( - ['sub-001/_contrast_id_1/cope1.nii.gz', 'sub-001/_contrast_id_2/cope1.nii.gz', 'sub-002/_contrast_id_1/cope1.nii.gz', 'sub-002/_contrast_id_2/cope1.nii.gz', 'sub-003/_contrast_id_1/cope1.nii.gz', 'sub-003/_contrast_id_2/cope1.nii.gz', 'sub-004/_contrast_id_1/cope1.nii.gz', 'sub-004/_contrast_id_2/cope1.nii.gz'], # copes - ['sub-001/_contrast_id_1/varcope1.nii.gz', 'sub-001/_contrast_id_2/varcope1.nii.gz', 'sub-002/_contrast_id_1/varcope1.nii.gz', 'sub-002/_contrast_id_2/varcope1.nii.gz', 'sub-003/_contrast_id_1/varcope1.nii.gz', 'sub-003/_contrast_id_2/varcope1.nii.gz', 'sub-004/_contrast_id_1/varcope1.nii.gz', 'sub-004/_contrast_id_2/varcope1.nii.gz'], # varcopes - ['001', '002', '003', '004'], # subject_list - ['fake_participants_file_path'] # participants file - ) - - assert cei == ['sub-001/_contrast_id_1/cope1.nii.gz', 'sub-001/_contrast_id_2/cope1.nii.gz', 'sub-003/_contrast_id_1/cope1.nii.gz', 'sub-003/_contrast_id_2/cope1.nii.gz'] - assert cer == ['sub-002/_contrast_id_1/cope1.nii.gz', 'sub-002/_contrast_id_2/cope1.nii.gz', 'sub-004/_contrast_id_1/cope1.nii.gz', 'sub-004/_contrast_id_2/cope1.nii.gz'] - assert vei == ['sub-001/_contrast_id_1/varcope1.nii.gz', 'sub-001/_contrast_id_2/varcope1.nii.gz', 'sub-003/_contrast_id_1/varcope1.nii.gz', 'sub-003/_contrast_id_2/varcope1.nii.gz'] - assert ver == ['sub-002/_contrast_id_1/varcope1.nii.gz', 'sub-002/_contrast_id_2/varcope1.nii.gz', 'sub-004/_contrast_id_1/varcope1.nii.gz', 'sub-004/_contrast_id_2/varcope1.nii.gz'] - assert eii == ['001', '003'] - assert eri == ['002', '004'] - @staticmethod @mark.unit_test def test_one_sample_t_test_regressors(): From 74331069ed6ec0b708c7735692766f283b1e01cd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Mon, 20 Nov 2023 17:12:01 +0100 Subject: [PATCH 085/116] [REFAC] using narps_open.core functions [TEST] events data in a file --- narps_open/pipelines/team_08MQ.py | 74 ++++++++++------------------ tests/pipelines/__init__.py | 31 ------------ tests/pipelines/test_team_08MQ.py | 43 +++++++++++++--- tests/test_data/pipelines/events.tsv | 6 +++ 4 files changed, 67 insertions(+), 87 deletions(-) delete mode 100644 tests/pipelines/__init__.py create mode 100644 tests/test_data/pipelines/events.tsv diff --git a/narps_open/pipelines/team_08MQ.py b/narps_open/pipelines/team_08MQ.py index 3bf5dd65..ba0dfe85 100644 --- a/narps_open/pipelines/team_08MQ.py +++ b/narps_open/pipelines/team_08MQ.py @@ -28,6 +28,7 @@ from narps_open.data.task import TaskInformation from narps_open.data.participants import get_group from narps_open.core.common import remove_file, list_intersection, elements_in_string, clean_list + # Setup FSL FSLCommand.set_default_output_type('NIFTI_GZ') @@ -45,29 +46,6 @@ def __init__(self): ('negative_effect_loss', 'T', ['gain', 'loss'], [0, -1]) ] - def remove_files(_, files): - """ - This method is used in a Function node to fully remove - files generated by a Node, once they aren't needed anymore. - - Parameters: - - _: Node input only used for triggering the Node - - files: str or list, a single filename or a list of filenames to remove - """ - from os import remove - - if isinstance(files, str): - files = [files] - - try: - for file in files: - remove(file) - except OSError as error: - print(error) - else: - print('The following files were successfully deleted.') - print(files) - def get_preprocessing(self): """ Return a Nipype workflow describing the prerpocessing part of the pipeline """ @@ -237,35 +215,35 @@ def get_preprocessing(self): compute_confounds.inputs.repetition_time = TaskInformation()['RepetitionTime'] # Function Nodes remove_files - Remove sizeable files once they aren't needed - remove_func_0 = Node(Function( - function = self.remove_files, - input_names = ['_', 'files'], + remove_func_0 = MapNode(Function( + function = remove_file, + input_names = ['_', 'file'], output_names = [] - ), name = 'remove_func_0') + ), name = 'remove_func_0', iterfield = 'file') - remove_func_1 = Node(Function( - function = self.remove_files, - input_names = ['_', 'files'], + remove_func_1 = MapNode(Function( + function = remove_file, + input_names = ['_', 'file'], output_names = [] - ), name = 'remove_func_1') + ), name = 'remove_func_1', iterfield = 'file') - remove_func_2 = Node(Function( - function = self.remove_files, - input_names = ['_', 'files'], + remove_func_2 = MapNode(Function( + function = remove_file, + input_names = ['_', 'file'], output_names = [] - ), name = 'remove_func_2') + ), name = 'remove_func_2', iterfield = 'file') - remove_func_3 = Node(Function( - function = self.remove_files, - input_names = ['_', 'files'], + remove_func_3 = MapNode(Function( + function = remove_file, + input_names = ['_', 'file'], output_names = [] - ), name = 'remove_func_3') + ), name = 'remove_func_3', iterfield = 'file') - remove_func_4 = Node(Function( - function = self.remove_files, - input_names = ['_', 'files'], + remove_func_4 = MapNode(Function( + function = remove_file, + input_names = ['_', 'file'], output_names = [] - ), name = 'remove_func_4') + ), name = 'remove_func_4', iterfield = 'file') preprocessing = Workflow(base_dir = self.directories.working_dir, name = 'preprocessing') preprocessing.config['execution']['stop_on_first_crash'] = 'true' @@ -326,15 +304,15 @@ def get_preprocessing(self): (alignment_func_to_mni, data_sink, [('output_image', 'preprocessing.@output_image')]), # File removals - (motion_correction, remove_func_0, [('out_file', 'files')]), + (motion_correction, remove_func_0, [('out_file', 'file')]), (data_sink, remove_func_0, [('out_file', '_')]), - (slice_time_correction, remove_func_1, [('slice_time_corrected_file', 'files')]), + (slice_time_correction, remove_func_1, [('slice_time_corrected_file', 'file')]), (data_sink, remove_func_1, [('out_file', '_')]), - (smoothing, remove_func_2, [('smoothed_file', 'files')]), + (smoothing, remove_func_2, [('smoothed_file', 'file')]), (data_sink, remove_func_2, [('out_file', '_')]), - (alignment_func_to_anat, remove_func_3, [('out_file', 'files')]), + (alignment_func_to_anat, remove_func_3, [('out_file', 'file')]), (data_sink, remove_func_3, [('out_file', '_')]), - (alignment_func_to_mni, remove_func_4, [('output_image', 'files')]), + (alignment_func_to_mni, remove_func_4, [('output_image', 'file')]), (data_sink, remove_func_4, [('out_file', '_')]) ]) diff --git a/tests/pipelines/__init__.py b/tests/pipelines/__init__.py deleted file mode 100644 index 2325a9b9..00000000 --- a/tests/pipelines/__init__.py +++ /dev/null @@ -1,31 +0,0 @@ -#!/usr/bin/python -# coding: utf-8 - -""" -Configuration for testing of the narps_open.pipelines modules. -""" - -from pytest import helpers - -@helpers.register -def mock_event_data(mocker): - """ Mocks the return of the open function with the contents of a fake event file """ - - fake_event_data = 'onset duration\tgain\tloss\tRT\tparticipant_response\n' - fake_event_data += '4.071\t4\t14\t6\t2.388\tweakly_accept\n' - fake_event_data += '11.834\t4\t34\t14\t2.289\tstrongly_accept\n' - - mocker.patch('builtins.open', mocker.mock_open(read_data = fake_event_data)) - - -@helpers.register -def mock_participants_data(mocker): - """ Mocks the return of the open function with the contents of a fake participants file """ - - fake_participants_data = 'participant_id\tgroup\tgender\tage\n' - fake_participants_data += 'sub-001\tequalIndifference\tM\t24\n' - fake_participants_data += 'sub-002\tequalRange\tM\t25\n' - fake_participants_data += 'sub-003\tequalIndifference\tF\t27\n' - fake_participants_data += 'sub-004\tequalRange\tM\t25\n' - - mocker.patch('builtins.open', mocker.mock_open(read_data = fake_participants_data)) diff --git a/tests/pipelines/test_team_08MQ.py b/tests/pipelines/test_team_08MQ.py index 28ad2659..ecca139f 100644 --- a/tests/pipelines/test_team_08MQ.py +++ b/tests/pipelines/test_team_08MQ.py @@ -10,11 +10,14 @@ pytest -q test_team_08MQ.py pytest -q test_team_08MQ.py -k """ +from os.path import join from pytest import helpers, mark +from numpy import isclose from nipype import Workflow from nipype.interfaces.base import Bunch +from narps_open.utils.configuration import Configuration from narps_open.pipelines.team_08MQ import PipelineTeam08MQ class TestPipelinesTeam08MQ: @@ -70,20 +73,44 @@ def test_outputs(): @staticmethod @mark.unit_test - def test_subject_information(mocker): + def test_subject_information(): """ Test the get_subject_information method """ - helpers.mock_event_data(mocker) - - information = PipelineTeam08MQ.get_subject_information('fake_event_file_path')[0] + information = PipelineTeam08MQ.get_subject_information(join( + Configuration()['directories']['test_data'], + 'pipelines', + 'events.tsv' + ))[0] assert isinstance(information, Bunch) - assert information.amplitudes == [[1.0, 1.0], [14.0, 34.0], [6.0, 14.0], [1.0, 1.0]] - assert information.durations == [[4.0, 4.0], [2.388, 2.289], [2.388, 2.289], [4.0, 4.0]] assert information.conditions == ['event', 'gain', 'loss', 'response'] - assert information.onsets == [ - [4.071, 11.834], [4.071, 11.834], [4.071, 11.834], [4.071, 11.834] + + reference_amplitudes = [ + [1.0, 1.0, 1.0, 1.0, 1.0], + [14.0, 34.0, 38.0, 10.0, 16.0], + [6.0, 14.0, 19.0, 15.0, 17.0], + [1.0, 1.0, 0.0, -1.0, -1.0] + ] + for reference_array, test_array in zip(reference_amplitudes, information.amplitudes): + assert isclose(reference_array, test_array).all() + + reference_durations = [ + [4.0, 4.0, 4.0, 4.0, 4.0], + [2.388, 2.289, 0.0, 2.08, 2.288], + [2.388, 2.289, 0.0, 2.08, 2.288], + [4.0, 4.0, 4.0, 4.0, 4.0] + ] + for reference_array, test_array in zip(reference_durations, information.durations): + assert isclose(reference_array, test_array).all() + + reference_onsets = [ + [4.071, 11.834, 19.535, 27.535, 36.435], + [4.071, 11.834, 19.535, 27.535, 36.435], + [4.071, 11.834, 19.535, 27.535, 36.435], + [4.071, 11.834, 19.535, 27.535, 36.435] ] + for reference_array, test_array in zip(reference_onsets, information.onsets): + assert isclose(reference_array, test_array).all() @staticmethod @mark.unit_test diff --git a/tests/test_data/pipelines/events.tsv b/tests/test_data/pipelines/events.tsv new file mode 100644 index 00000000..4b8f04e6 --- /dev/null +++ b/tests/test_data/pipelines/events.tsv @@ -0,0 +1,6 @@ +onset duration gain loss RT participant_response +4.071 4 14 6 2.388 weakly_accept +11.834 4 34 14 2.289 strongly_accept +19.535 4 38 19 0 NoResp +27.535 4 10 15 2.08 strongly_reject +36.435 4 16 17 2.288 weakly_reject \ No newline at end of file From 053d72cdef357bf02e9e72f8e20d4bf05c25caf2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Tue, 21 Nov 2023 11:52:14 +0100 Subject: [PATCH 086/116] [BUG] remove_file input naming [skip ci] --- narps_open/pipelines/team_08MQ.py | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/narps_open/pipelines/team_08MQ.py b/narps_open/pipelines/team_08MQ.py index ba0dfe85..0e55856f 100644 --- a/narps_open/pipelines/team_08MQ.py +++ b/narps_open/pipelines/team_08MQ.py @@ -217,33 +217,33 @@ def get_preprocessing(self): # Function Nodes remove_files - Remove sizeable files once they aren't needed remove_func_0 = MapNode(Function( function = remove_file, - input_names = ['_', 'file'], + input_names = ['_', 'file_name'], output_names = [] - ), name = 'remove_func_0', iterfield = 'file') + ), name = 'remove_func_0', iterfield = 'file_name') remove_func_1 = MapNode(Function( function = remove_file, - input_names = ['_', 'file'], + input_names = ['_', 'file_name'], output_names = [] - ), name = 'remove_func_1', iterfield = 'file') + ), name = 'remove_func_1', iterfield = 'file_name') remove_func_2 = MapNode(Function( function = remove_file, - input_names = ['_', 'file'], + input_names = ['_', 'file_name'], output_names = [] - ), name = 'remove_func_2', iterfield = 'file') + ), name = 'remove_func_2', iterfield = 'file_name') remove_func_3 = MapNode(Function( function = remove_file, - input_names = ['_', 'file'], + input_names = ['_', 'file_name'], output_names = [] - ), name = 'remove_func_3', iterfield = 'file') + ), name = 'remove_func_3', iterfield = 'file_name') remove_func_4 = MapNode(Function( function = remove_file, - input_names = ['_', 'file'], + input_names = ['_', 'file_name'], output_names = [] - ), name = 'remove_func_4', iterfield = 'file') + ), name = 'remove_func_4', iterfield = 'file_name') preprocessing = Workflow(base_dir = self.directories.working_dir, name = 'preprocessing') preprocessing.config['execution']['stop_on_first_crash'] = 'true' @@ -304,15 +304,15 @@ def get_preprocessing(self): (alignment_func_to_mni, data_sink, [('output_image', 'preprocessing.@output_image')]), # File removals - (motion_correction, remove_func_0, [('out_file', 'file')]), + (motion_correction, remove_func_0, [('out_file', 'file_name')]), (data_sink, remove_func_0, [('out_file', '_')]), - (slice_time_correction, remove_func_1, [('slice_time_corrected_file', 'file')]), + (slice_time_correction, remove_func_1, [('slice_time_corrected_file', 'file_name')]), (data_sink, remove_func_1, [('out_file', '_')]), - (smoothing, remove_func_2, [('smoothed_file', 'file')]), + (smoothing, remove_func_2, [('smoothed_file', 'file_name')]), (data_sink, remove_func_2, [('out_file', '_')]), - (alignment_func_to_anat, remove_func_3, [('out_file', 'file')]), + (alignment_func_to_anat, remove_func_3, [('out_file', 'file_name')]), (data_sink, remove_func_3, [('out_file', '_')]), - (alignment_func_to_mni, remove_func_4, [('output_image', 'file')]), + (alignment_func_to_mni, remove_func_4, [('output_image', 'file_name')]), (data_sink, remove_func_4, [('out_file', '_')]) ]) From 34b649e7c2be7281f3e830f6316e466a3e0a9be3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Wed, 22 Nov 2023 11:03:55 +0100 Subject: [PATCH 087/116] [BUG] remove sub- from participants id [skip ci] --- narps_open/data/participants.py | 8 ++++---- narps_open/pipelines/team_08MQ.py | 3 +-- tests/data/test_participants.py | 4 ++-- 3 files changed, 7 insertions(+), 8 deletions(-) diff --git a/narps_open/data/participants.py b/narps_open/data/participants.py index 835e834f..b0d6213e 100644 --- a/narps_open/data/participants.py +++ b/narps_open/data/participants.py @@ -51,9 +51,9 @@ def get_participants_subset(nb_participants: int = 108) -> list: return get_all_participants()[0:nb_participants] def get_group(group_name: str) -> list: - """ Return a list containing all the participants inside the group_name group + """ Return a list containing all the participants inside the group_name group """ - Warning : the subject ids are return as written in the participants file (i.e.: 'sub-*') - """ participants = get_participants_information() - return participants.loc[participants['group'] == group_name]['participant_id'].values.tolist() + group = participants.loc[participants['group'] == group_name]['participant_id'].values.tolist() + + return [p.replace('sub-', '') for p in group] diff --git a/narps_open/pipelines/team_08MQ.py b/narps_open/pipelines/team_08MQ.py index 0e55856f..bd71a119 100644 --- a/narps_open/pipelines/team_08MQ.py +++ b/narps_open/pipelines/team_08MQ.py @@ -703,8 +703,7 @@ def get_group_level_analysis_sub_workflow(self, method): 'cope' : join(self.directories.output_dir, 'subject_level_analysis', '_contrast_id_{contrast_id}_subject_id_*', 'cope1.nii.gz'), 'varcope' : join(self.directories.output_dir, 'subject_level_analysis', - '_contrast_id_{contrast_id}_subject_id_*', 'varcope1.nii.gz'), - 'participants' : 'participants.tsv' + '_contrast_id_{contrast_id}_subject_id_*', 'varcope1.nii.gz') } select_files = Node(SelectFiles(templates), name = 'select_files') select_files.inputs.base_directory = self.directories.dataset_dir diff --git a/tests/data/test_participants.py b/tests/data/test_participants.py index f36f0a05..eaf313fb 100644 --- a/tests/data/test_participants.py +++ b/tests/data/test_participants.py @@ -112,5 +112,5 @@ def test_get_group(mock_participants_data): """ Test the get_group function """ assert part.get_group('') == [] - assert part.get_group('equalRange') == ['sub-002', 'sub-004'] - assert part.get_group('equalIndifference') == ['sub-001', 'sub-003'] + assert part.get_group('equalRange') == ['002', '004'] + assert part.get_group('equalIndifference') == ['001', '003'] From 97001667ad6a2fb1616f1ce2479f7cdb8cdfa632 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Wed, 22 Nov 2023 11:19:49 +0100 Subject: [PATCH 088/116] [BUG] typo in get_two_sample_t_test_regressors inputs [skip ci] --- narps_open/pipelines/team_08MQ.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/narps_open/pipelines/team_08MQ.py b/narps_open/pipelines/team_08MQ.py index bd71a119..6d53c93f 100644 --- a/narps_open/pipelines/team_08MQ.py +++ b/narps_open/pipelines/team_08MQ.py @@ -895,10 +895,10 @@ def get_group_level_analysis_sub_workflow(self, method): # Add missing connections group_level_analysis.connect([ (get_equal_range_subjects, regressors_two_sample, [ - ('out_list', 'equal_range_id') + ('out_list', 'equal_range_ids') ]), (get_equal_indifference_subjects, regressors_two_sample, [ - ('out_list', 'equal_indifference_id') + ('out_list', 'equal_indifference_ids') ]), (regressors_two_sample, specify_model, [ ('regressors', 'regressors'), From 8c73773c8c72987bf320710fb4a16da24e80565d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Tue, 28 Nov 2023 16:32:44 +0100 Subject: [PATCH 089/116] Compute brightness threshold for SUSAN [skip ci] --- narps_open/pipelines/team_08MQ.py | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/narps_open/pipelines/team_08MQ.py b/narps_open/pipelines/team_08MQ.py index 6d53c93f..e95e1fde 100644 --- a/narps_open/pipelines/team_08MQ.py +++ b/narps_open/pipelines/team_08MQ.py @@ -11,7 +11,7 @@ from nipype.interfaces.io import SelectFiles, DataSink from nipype.interfaces.fsl import ( # General usage - FSLCommand, + FSLCommand, ImageStats, # Preprocessing FAST, BET, ErodeImage, PrepareFieldmap, MCFLIRT, SliceTimer, Threshold, Info, SUSAN, FLIRT, ApplyXFM, ConvertXFM, @@ -177,9 +177,13 @@ def get_preprocessing(self): slice_time_correction = Node(SliceTimer(), name = 'slice_time_correction') slice_time_correction.inputs.time_repetition = TaskInformation()['RepetitionTime'] + # ImageStats Node - Compute median of voxel values to derive SUSAN's brightness_threshold + compute_median = Node(ImageStats(), name = 'compute_median') + compute_median.inputs.op_string = '-p 50' # Median is calculated as the 50th percentile + # SUSAN Node - smoothing of functional images + # we set brightness_threshold to .75x median of the input file, as performed by fMRIprep smoothing = Node(SUSAN(), name = 'smoothing') - smoothing.inputs.brightness_threshold = 2000.0 # TODO : which value ? smoothing.inputs.fwhm = self.fwhm # ApplyXFM Node - Alignment of white matter to functional space @@ -287,6 +291,10 @@ def get_preprocessing(self): (select_files, motion_correction, [('sbref', 'ref_file')]), (motion_correction, slice_time_correction, [('out_file', 'in_file')]), (slice_time_correction, smoothing, [('slice_time_corrected_file', 'in_file')]), + (slice_time_correction, compute_median, [('slice_time_corrected_file', 'in_file')]), + (compute_median, smoothing, [( + ('out_stat', lambda x : .75 * x), 'brightness_threshold') + ]), (smoothing, alignment_func_to_anat, [('smoothed_file', 'in_file')]), (coregistration_sbref, alignment_func_to_anat, [('out_matrix_file', 'in_matrix_file')]), (brain_extraction_anat, alignment_func_to_anat, [('out_file', 'reference')]), From 8e736f7369421122cec1f9da203ff584e98377f9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Tue, 28 Nov 2023 17:02:02 +0100 Subject: [PATCH 090/116] Use ANTs ApplyTransforms for alignment_func_to_anat Node [skip ci] --- narps_open/pipelines/team_08MQ.py | 24 ++++++++++++++---------- 1 file changed, 14 insertions(+), 10 deletions(-) diff --git a/narps_open/pipelines/team_08MQ.py b/narps_open/pipelines/team_08MQ.py index e95e1fde..5a37f657 100644 --- a/narps_open/pipelines/team_08MQ.py +++ b/narps_open/pipelines/team_08MQ.py @@ -22,7 +22,7 @@ from nipype.interfaces.fsl.utils import Merge as MergeImages from nipype.algorithms.confounds import CompCor from nipype.algorithms.modelgen import SpecifyModel -from nipype.interfaces.ants import Registration, WarpTimeSeriesImageMultiTransform +from nipype.interfaces.ants import Registration, WarpTimeSeriesImageMultiTransform, ApplyTransforms from narps_open.pipelines import Pipeline from narps_open.data.task import TaskInformation @@ -178,6 +178,7 @@ def get_preprocessing(self): slice_time_correction.inputs.time_repetition = TaskInformation()['RepetitionTime'] # ImageStats Node - Compute median of voxel values to derive SUSAN's brightness_threshold + # we do not need to filter on not-zero values (option -P) because a mask is passed compute_median = Node(ImageStats(), name = 'compute_median') compute_median.inputs.op_string = '-p 50' # Median is calculated as the 50th percentile @@ -194,9 +195,9 @@ def get_preprocessing(self): alignment_csf = Node(ApplyXFM(), name = 'alignment_csf') alignment_csf.inputs.apply_xfm = True - # ApplyWarp Node - Alignment of functional data to anatomical space - alignment_func_to_anat = Node(ApplyXFM(), name = 'alignment_func_to_anat') - alignment_func_to_anat.inputs.apply_xfm = True + # ApplyTransforms Node - Alignment of functional data to anatomical space + # warning : ApplyTransforms only accepts a list as transforms input + alignment_func_to_anat = Node(ApplyTransforms(), name = 'alignment_func_to_anat') # Select Node - Change the order of transforms coming from ANTs Registration reverse_transform_order = Node(Select(), name = 'reverse_transform_order') @@ -292,19 +293,22 @@ def get_preprocessing(self): (motion_correction, slice_time_correction, [('out_file', 'in_file')]), (slice_time_correction, smoothing, [('slice_time_corrected_file', 'in_file')]), (slice_time_correction, compute_median, [('slice_time_corrected_file', 'in_file')]), + (brain_extraction_func, compute_median, [('mask_file', 'mask_file')]), (compute_median, smoothing, [( ('out_stat', lambda x : .75 * x), 'brightness_threshold') ]), - (smoothing, alignment_func_to_anat, [('smoothed_file', 'in_file')]), - (coregistration_sbref, alignment_func_to_anat, [('out_matrix_file', 'in_matrix_file')]), - (brain_extraction_anat, alignment_func_to_anat, [('out_file', 'reference')]), - (alignment_func_to_anat, alignment_func_to_mni, [('out_file', 'input_image')]), + (smoothing, alignment_func_to_anat, [('smoothed_file', 'input_image')]), + (coregistration_sbref, alignment_func_to_anat, [( + ('out_matrix_file', lambda x : [x]), 'transforms') + ]), + (brain_extraction_anat, alignment_func_to_anat, [('out_file', 'reference_image')]), + (alignment_func_to_anat, alignment_func_to_mni, [('output_image', 'input_image')]), (normalization_anat, reverse_transform_order, [('forward_transforms', 'inlist')]), (reverse_transform_order, alignment_func_to_mni, [('out', 'transformation_series')]), (merge_masks, compute_confounds, [('out', 'mask_files')]), # Masks are in the func space (slice_time_correction, compute_confounds, [ ('slice_time_corrected_file', 'realigned_file') - ]), + ]), # Outputs of preprocessing (motion_correction, data_sink, [('par_file', 'preprocessing.@par_file')]), @@ -318,7 +322,7 @@ def get_preprocessing(self): (data_sink, remove_func_1, [('out_file', '_')]), (smoothing, remove_func_2, [('smoothed_file', 'file_name')]), (data_sink, remove_func_2, [('out_file', '_')]), - (alignment_func_to_anat, remove_func_3, [('out_file', 'file_name')]), + (alignment_func_to_anat, remove_func_3, [('output_image', 'file_name')]), (data_sink, remove_func_3, [('out_file', '_')]), (alignment_func_to_mni, remove_func_4, [('output_image', 'file_name')]), (data_sink, remove_func_4, [('out_file', '_')]) From 36e8fdda9fca8f21016a17a902cbaf347d7c0d8d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Wed, 29 Nov 2023 14:55:43 +0100 Subject: [PATCH 091/116] [narps_open.core] adding list_to_file function [skip ci] --- narps_open/core/common.py | 21 +++++++++++++ tests/core/test_common.py | 63 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 84 insertions(+) diff --git a/narps_open/core/common.py b/narps_open/core/common.py index e40d4e9a..c40f2907 100644 --- a/narps_open/core/common.py +++ b/narps_open/core/common.py @@ -63,3 +63,24 @@ def list_intersection(list_1: list, list_2: list) -> list: - list, the intersection of list_1 and list_2 """ return [e for e in list_1 if e in list_2] + +def list_to_file(input_list: list, file_name: str = 'elements.tsv') -> str: + """ + Create a tsv file containing elements of the input list. + This function is meant to be used in a Nipype Function Node. + + Parameters : + - input_list: list + + Returns: + - output_file: path to the created file + """ + from os.path import abspath + output_file = abspath(file_name) + + # Write un element per line + with open(output_file, 'w') as writer: + for element in input_list: + writer.write(f'{element}\n') + + return output_file diff --git a/tests/core/test_common.py b/tests/core/test_common.py index 3e00fd1b..64c385e9 100644 --- a/tests/core/test_common.py +++ b/tests/core/test_common.py @@ -317,3 +317,66 @@ def test_connect_list_intersection(remove_test_dir): test_file_2 = join(TEMPORARY_DIR, 'test_workflow', 'node_2', '_report', 'report.rst') with open(test_file_2, 'r', encoding = 'utf-8') as file: assert f'* out_value : {output_list_2}' in file.read() + + @staticmethod + @mark.unit_test + def test_node_list_to_file_1(): + """ Test the list_to_file function as a nipype.Node """ + + # Inputs + input_list = ['001', 23.560, 'azerty', False, None] + + # Create a Nipype Node using list_to_file + test_node = Node(Function( + function = co.list_to_file, + input_names = ['input_list'], + output_names = ['out_file'] + ), name = 'test_node') + test_node.inputs.input_list = input_list + test_node.run() + + # Expected output (in the Node's working directory) + out_file = join(test_node.output_dir(), 'elements.tsv') + out_list = [str(a) for a in input_list] + + # Check file was created + assert exists(out_file) + + # Check file was created + with open(out_file, 'r', encoding = 'utf-8') as file: + for list_element, file_element in zip(out_list, file.read().split('\n')): + assert list_element == file_element + + @staticmethod + @mark.unit_test + def test_node_list_to_file_2(): + """ Test the list_to_file function as a nipype.Node + Test changing name of output file + """ + + # Inputs + input_list = ['001', 23.560, [2.0, 1, 53, True], False, None] + file_name = 'custom_filename.txt' + + # Create a Nipype Node using list_to_file + test_node = Node(Function( + function = co.list_to_file, + input_names = ['input_list', 'file_name'], + output_names = ['out_file'] + ), name = 'test_node') + test_node.inputs.input_list = input_list + test_node.inputs.file_name = file_name + test_node.run() + + # Expected output + out_file = join(test_node.output_dir(), file_name) + out_list = [str(a) for a in input_list] + + # Check file was created + assert exists(out_file) + + # Check file was created + with open(out_file, 'r', encoding = 'utf-8') as file: + for list_element, file_element in zip(out_list, file.read().split('\n')): + assert list_element == file_element + From b979f1cbe845dd4ffa7d2384317b3fa5e8b1d238 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Wed, 29 Nov 2023 14:56:39 +0100 Subject: [PATCH 092/116] [08MQ] custom timings as input of SliceTimer [skip ci] --- narps_open/pipelines/team_08MQ.py | 21 +++++++++++++++++---- 1 file changed, 17 insertions(+), 4 deletions(-) diff --git a/narps_open/pipelines/team_08MQ.py b/narps_open/pipelines/team_08MQ.py index 5a37f657..ce6c7984 100644 --- a/narps_open/pipelines/team_08MQ.py +++ b/narps_open/pipelines/team_08MQ.py @@ -27,7 +27,9 @@ from narps_open.pipelines import Pipeline from narps_open.data.task import TaskInformation from narps_open.data.participants import get_group -from narps_open.core.common import remove_file, list_intersection, elements_in_string, clean_list +from narps_open.core.common import ( + remove_file, list_intersection, elements_in_string, clean_list, list_to_file + ) # Setup FSL FSLCommand.set_default_output_type('NIFTI_GZ') @@ -85,7 +87,6 @@ def get_preprocessing(self): # BET Node - Brain extraction for anatomical images brain_extraction_anat = Node(BET(), name = 'brain_extraction_anat') brain_extraction_anat.inputs.frac = 0.5 - #brain_extraction_anat.inputs.mask = True # TODO ? # FAST Node - Segmentation of anatomical images segmentation_anat = Node(FAST(), name = 'segmentation_anat') @@ -173,6 +174,15 @@ def get_preprocessing(self): motion_correction.inputs.interpolation = 'spline' # should be 'trilinear' motion_correction.inputs.save_plots = True # Save transformation parameters + # Function Nodes get_slice_timings - Create a file with acquisition timing for each slide + slice_timings = Node(Function( + function = list_to_file, + input_names = ['input_list', 'file_name'], + output_names = ['output_file'] + ), name = 'slice_timings') + slice_timings.inputs.input_list = TaskInformation()['SliceTiming'] + slice_timings.inputs.file_name = 'slice_timings.tsv' + # SliceTimer Node - Slice time correction slice_time_correction = Node(SliceTimer(), name = 'slice_time_correction') slice_time_correction.inputs.time_repetition = TaskInformation()['RepetitionTime'] @@ -186,6 +196,7 @@ def get_preprocessing(self): # we set brightness_threshold to .75x median of the input file, as performed by fMRIprep smoothing = Node(SUSAN(), name = 'smoothing') smoothing.inputs.fwhm = self.fwhm + compute_brightness_threshold = lambda x : .75 * x # ApplyXFM Node - Alignment of white matter to functional space alignment_white_matter = Node(ApplyXFM(), name = 'alignment_white_matter') @@ -198,6 +209,7 @@ def get_preprocessing(self): # ApplyTransforms Node - Alignment of functional data to anatomical space # warning : ApplyTransforms only accepts a list as transforms input alignment_func_to_anat = Node(ApplyTransforms(), name = 'alignment_func_to_anat') + transform_as_list = lambda x : [x] # Select Node - Change the order of transforms coming from ANTs Registration reverse_transform_order = Node(Select(), name = 'reverse_transform_order') @@ -290,16 +302,17 @@ def get_preprocessing(self): (select_files, brain_extraction_func, [('func', 'in_file')]), (brain_extraction_func, motion_correction, [('out_file', 'in_file')]), (select_files, motion_correction, [('sbref', 'ref_file')]), + (slice_timings, slice_time_correction, [('output_file', 'custom_timings')]), (motion_correction, slice_time_correction, [('out_file', 'in_file')]), (slice_time_correction, smoothing, [('slice_time_corrected_file', 'in_file')]), (slice_time_correction, compute_median, [('slice_time_corrected_file', 'in_file')]), (brain_extraction_func, compute_median, [('mask_file', 'mask_file')]), (compute_median, smoothing, [( - ('out_stat', lambda x : .75 * x), 'brightness_threshold') + ('out_stat', compute_brightness_threshold), 'brightness_threshold') ]), (smoothing, alignment_func_to_anat, [('smoothed_file', 'input_image')]), (coregistration_sbref, alignment_func_to_anat, [( - ('out_matrix_file', lambda x : [x]), 'transforms') + ('out_matrix_file', transform_as_list), 'transforms') ]), (brain_extraction_anat, alignment_func_to_anat, [('out_file', 'reference_image')]), (alignment_func_to_anat, alignment_func_to_mni, [('output_image', 'input_image')]), From 839e53d31fd842f8a0dbd5a0e4164e08c1872c83 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Thu, 30 Nov 2023 15:27:43 +0100 Subject: [PATCH 093/116] Typo preprocessing --- narps_open/pipelines/team_08MQ.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/narps_open/pipelines/team_08MQ.py b/narps_open/pipelines/team_08MQ.py index ce6c7984..208f09e2 100644 --- a/narps_open/pipelines/team_08MQ.py +++ b/narps_open/pipelines/team_08MQ.py @@ -49,7 +49,7 @@ def __init__(self): ] def get_preprocessing(self): - """ Return a Nipype workflow describing the prerpocessing part of the pipeline """ + """ Return a Nipype workflow describing the preprocessing part of the pipeline """ # IdentityInterface node - allows to iterate over subjects and runs information_source = Node(IdentityInterface( From 153e7471fdb16173e9218bdcc32cd814d65e5c44 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Tue, 5 Dec 2023 10:04:26 +0100 Subject: [PATCH 094/116] Computing masks : output of preprocessing [skip ci] --- narps_open/pipelines/team_08MQ.py | 64 ++++++++++++++++++++++++++----- 1 file changed, 55 insertions(+), 9 deletions(-) diff --git a/narps_open/pipelines/team_08MQ.py b/narps_open/pipelines/team_08MQ.py index 208f09e2..ee19c276 100644 --- a/narps_open/pipelines/team_08MQ.py +++ b/narps_open/pipelines/team_08MQ.py @@ -20,6 +20,7 @@ FLAMEO, Randomise, MultipleRegressDesign ) from nipype.interfaces.fsl.utils import Merge as MergeImages +from nipype.interfaces.fsl.maths import MultiImageMaths from nipype.algorithms.confounds import CompCor from nipype.algorithms.modelgen import SpecifyModel from nipype.interfaces.ants import Registration, WarpTimeSeriesImageMultiTransform, ApplyTransforms @@ -211,6 +212,10 @@ def get_preprocessing(self): alignment_func_to_anat = Node(ApplyTransforms(), name = 'alignment_func_to_anat') transform_as_list = lambda x : [x] + # ApplyTransforms Node - Alignment of functional brain mask to anatomical space + # warning : ApplyTransforms only accepts a list as transforms input + alignment_func_mask_to_anat = Node(ApplyTransforms(), name = 'alignment_func_mask_to_anat') + # Select Node - Change the order of transforms coming from ANTs Registration reverse_transform_order = Node(Select(), name = 'reverse_transform_order') reverse_transform_order.inputs.index = [1, 0] @@ -221,6 +226,12 @@ def get_preprocessing(self): alignment_func_to_mni.inputs.reference_image = \ Info.standard_image('MNI152_T1_2mm_brain.nii.gz') + # ApplyWarp Node - Alignment of functional data to MNI space + alignment_func_mask_to_mni = Node(WarpTimeSeriesImageMultiTransform(), + name = 'alignment_func_mask_to_mni') + alignment_func_mask_to_mni.inputs.reference_image = \ + Info.standard_image('MNI152_T1_2mm_brain.nii.gz') + # Merge Node - Merge the two masks (WM and CSF) in one input for the next node merge_masks = Node(Merge(2), name = 'merge_masks') @@ -315,18 +326,30 @@ def get_preprocessing(self): ('out_matrix_file', transform_as_list), 'transforms') ]), (brain_extraction_anat, alignment_func_to_anat, [('out_file', 'reference_image')]), + (brain_extraction_func, alignment_func_mask_to_anat, [('mask_file', 'input_image')]), + (coregistration_sbref, alignment_func_mask_to_anat, [( + ('out_matrix_file', transform_as_list), 'transforms') + ]), + (brain_extraction_anat, alignment_func_mask_to_anat, [ + ('out_file', 'reference_image') + ]), (alignment_func_to_anat, alignment_func_to_mni, [('output_image', 'input_image')]), + (alignment_func_mask_to_anat, alignment_func_mask_to_mni, [('output_image', 'input_image')]), (normalization_anat, reverse_transform_order, [('forward_transforms', 'inlist')]), (reverse_transform_order, alignment_func_to_mni, [('out', 'transformation_series')]), - (merge_masks, compute_confounds, [('out', 'mask_files')]), # Masks are in the func space + (reverse_transform_order, alignment_func_mask_to_mni, [('out', 'transformation_series')]), + (merge_masks, compute_confounds, [('out', 'mask_files')]), #Masks are in the func space (slice_time_correction, compute_confounds, [ ('slice_time_corrected_file', 'realigned_file') ]), # Outputs of preprocessing (motion_correction, data_sink, [('par_file', 'preprocessing.@par_file')]), - (compute_confounds, data_sink, [('components_file', 'preprocessing.@components_file')]), + (compute_confounds, data_sink, [ + ('components_file', 'preprocessing.@components_file')]), (alignment_func_to_mni, data_sink, [('output_image', 'preprocessing.@output_image')]), + (alignment_func_mask_to_mni, data_sink, [ + ('output_image', 'preprocessing.@output_mask')]), # File removals (motion_correction, remove_func_0, [('out_file', 'file_name')]), @@ -352,7 +375,8 @@ def get_preprocessing_outputs(self): 'file': [ 'components_file.txt', 'sub-{subject_id}_task-MGT_run-{run_id}_bold_brain_mcf.nii.gz.par', - 'sub-{subject_id}_task-MGT_run-{run_id}_bold_brain_mcf_st_smooth_flirt_wtsimt.nii.gz' + 'sub-{subject_id}_task-MGT_run-{run_id}_bold_brain_mcf_st_smooth_flirt_wtsimt.nii.gz', + # mask file 'sub-{subject_id}_task-MGT_run-{run_id}_bold_brain_mcf_st_smooth_flirt_wtsimt.nii.gz' ] } parameter_sets = product(*parameters.values()) @@ -403,13 +427,13 @@ def get_subject_information(event_file): durations['event'].append(float(info[1])) amplitudes['event'].append(1.0) onsets['gain'].append(float(info[0])) - durations['gain'].append(float(info[4])) # TODO : change to info[1] (= 4) ? + durations['gain'].append(float(info[1])) amplitudes['gain'].append(float(info[2])) onsets['loss'].append(float(info[0])) - durations['loss'].append(float(info[4])) # TODO : change to info[1] (= 4) ? + durations['loss'].append(float(info[1])) amplitudes['loss'].append(float(info[3])) onsets['response'].append(float(info[0])) - durations['response'].append(float(info[1])) # TODO : change to info[4] (= RT) ? + durations['response'].append(float(info[1])) if 'accept' in info[5]: amplitudes['response'].append(1.0) elif 'reject' in info[5]: @@ -581,6 +605,8 @@ def get_subject_level_analysis(self): 'cope' : join(self.directories.output_dir, 'run_level_analysis', '_run_id_*_subject_id_{subject_id}', 'results', 'cope{contrast_id}.nii.gz'), 'varcope' : join(self.directories.output_dir, 'run_level_analysis', + '_run_id_*_subject_id_{subject_id}', 'results', 'varcope{contrast_id}.nii.gz'), + 'masks' : join(self.directories.output_dir, 'preprocessing', '_run_id_*_subject_id_{subject_id}', 'results', 'varcope{contrast_id}.nii.gz') } select_files = Node(SelectFiles(templates), name = 'select_files') @@ -602,10 +628,19 @@ def get_subject_level_analysis(self): merge_varcopes = Node(MergeImages(), name = 'merge_varcopes') merge_varcopes.inputs.dimension = 't' + # Split Node - Split mask list to serve them as inputs of the MultiImageMaths node. + split_masks = Node(Split(), name = 'split_masks') + split_masks.inputs.splits = [1, len(self.run_list) - 1] + split_masks.inputs.squeeze = True # Unfold one-element splits removing the list + + # MultiImageMaths Node - Create a subject mask by + # computing the intersection of all run masks. + mask_intersection = Node(MultiImageMaths(), name = 'mask_intersection') + mask_intersection.op_string = '-mul %s ' * (len(self.run_list) - 1) + # FLAMEO Node - Estimate model estimate_model = Node(FLAMEO(), name = 'estimate_model') estimate_model.inputs.run_mode = 'fe' # Fixed effect - estimate_model.inputs.mask_file = Info.standard_image('MNI152_T1_2mm_brain_mask.nii.gz') # Second level (single-subject, mean of all four scans) analyses: Fixed effects analysis. subject_level_analysis = Workflow( @@ -617,8 +652,12 @@ def get_subject_level_analysis(self): ('contrast_id', 'contrast_id')]), (select_files, merge_copes, [('cope', 'in_files')]), (select_files, merge_varcopes, [('varcope', 'in_files')]), + (select_files, split_masks, [('masks', 'inlist')]), + (split_masks, mask_intersection, [('out1', 'in_file')]), + (split_masks, mask_intersection, [('out2', 'operand_files')]), (merge_copes, estimate_model, [('merged_file', 'cope_file')]), (merge_varcopes, estimate_model, [('merged_file', 'var_cope_file')]), + (mask_intersection, estimate_model, [('out_file', 'mask_file')]), (generate_model, estimate_model, [ ('design_mat', 'design_file'), ('design_con', 't_con_file'), @@ -770,13 +809,20 @@ def get_group_level_analysis_sub_workflow(self, method): merge_varcopes = Node(MergeImages(), name = 'merge_varcopes') merge_varcopes.inputs.dimension = 't' + # MultiImageMaths Node - Create a group mask by + # computing the intersection of all subject masks. + mask_intersection = Node(MultiImageMaths(), name = 'mask_intersection') + mask_intersection.inputs.in_file = + mask_intersection.op_string = '-mul %s' + maths.inputs.operand_files = ["functional2.nii", "functional3.nii"] + maths.inputs.out_file = "functional4.nii" + # MultipleRegressDesign Node - Specify model specify_model = Node(MultipleRegressDesign(), name = 'specify_model') # FLAMEO Node - Estimate model estimate_model = Node(FLAMEO(), name = 'estimate_model') estimate_model.inputs.run_mode = 'ols' # Ordinary least squares - estimate_model.inputs.mask_file = Info.standard_image('MNI152_T1_2mm_brain_mask.nii.gz') # Randomise Node - randomise = Node(Randomise(), name = 'randomise') @@ -785,7 +831,6 @@ def get_group_level_analysis_sub_workflow(self, method): randomise.inputs.vox_p_values = True randomise.inputs.c_thresh = 0.05 randomise.inputs.tfce_E = 0.01 - randomise.inputs.mask = Info.standard_image('MNI152_T1_2mm_brain_mask.nii.gz') # Compute the number of participants used to do the analysis nb_subjects = len(self.subject_list) @@ -803,6 +848,7 @@ def get_group_level_analysis_sub_workflow(self, method): (get_varcopes, merge_varcopes,[(('out_list', clean_list), 'in_files')]), (merge_copes, estimate_model, [('merged_file', 'cope_file')]), (merge_varcopes, estimate_model, [('merged_file', 'var_cope_file')]), + (mask_intersection, estimate_model, [('out_file', 'mask_file')]), (specify_model, estimate_model, [ ('design_mat', 'design_file'), ('design_con', 't_con_file'), From 01bfb8ed271faacff00ba58af35ed471b26ed1b9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Tue, 5 Dec 2023 10:09:50 +0100 Subject: [PATCH 095/116] Computing masks : output of preprocessing [skip ci] --- narps_open/pipelines/team_08MQ.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/narps_open/pipelines/team_08MQ.py b/narps_open/pipelines/team_08MQ.py index ee19c276..19e7a64b 100644 --- a/narps_open/pipelines/team_08MQ.py +++ b/narps_open/pipelines/team_08MQ.py @@ -812,7 +812,7 @@ def get_group_level_analysis_sub_workflow(self, method): # MultiImageMaths Node - Create a group mask by # computing the intersection of all subject masks. mask_intersection = Node(MultiImageMaths(), name = 'mask_intersection') - mask_intersection.inputs.in_file = + #TODO mask_intersection.inputs.in_file = mask_intersection.op_string = '-mul %s' maths.inputs.operand_files = ["functional2.nii", "functional3.nii"] maths.inputs.out_file = "functional4.nii" From 9a16b06096e46ab70bf463392aaf38b5c4ff7b04 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Tue, 5 Dec 2023 15:03:35 +0100 Subject: [PATCH 096/116] Back to ApplyXFM, with no_resample option [skip ci] --- narps_open/pipelines/team_08MQ.py | 55 +++++++++++++++++-------------- 1 file changed, 30 insertions(+), 25 deletions(-) diff --git a/narps_open/pipelines/team_08MQ.py b/narps_open/pipelines/team_08MQ.py index 19e7a64b..7bc19e8a 100644 --- a/narps_open/pipelines/team_08MQ.py +++ b/narps_open/pipelines/team_08MQ.py @@ -202,19 +202,22 @@ def get_preprocessing(self): # ApplyXFM Node - Alignment of white matter to functional space alignment_white_matter = Node(ApplyXFM(), name = 'alignment_white_matter') alignment_white_matter.inputs.apply_xfm = True + alignment_white_matter.inputs.no_resample = True # ApplyXFM Node - Alignment of CSF to functional space alignment_csf = Node(ApplyXFM(), name = 'alignment_csf') alignment_csf.inputs.apply_xfm = True + alignment_csf.inputs.no_resample = True - # ApplyTransforms Node - Alignment of functional data to anatomical space - # warning : ApplyTransforms only accepts a list as transforms input - alignment_func_to_anat = Node(ApplyTransforms(), name = 'alignment_func_to_anat') - transform_as_list = lambda x : [x] + # ApplyXFM Node - Alignment of functional data to anatomical space + alignment_func_to_anat = Node(ApplyXFM(), name = 'alignment_func_to_anat') + alignment_func_to_anat.inputs.apply_xfm = True + alignment_func_to_anat.inputs.no_resample = True # ApplyTransforms Node - Alignment of functional brain mask to anatomical space - # warning : ApplyTransforms only accepts a list as transforms input - alignment_func_mask_to_anat = Node(ApplyTransforms(), name = 'alignment_func_mask_to_anat') + alignment_func_mask_to_anat = Node(ApplyXFM(), name = 'alignment_func_mask_to_anat') + alignment_func_mask_to_anat.inputs.apply_xfm = True + alignment_func_mask_to_anat.inputs.no_resample = True # Select Node - Change the order of transforms coming from ANTs Registration reverse_transform_order = Node(Select(), name = 'reverse_transform_order') @@ -318,30 +321,32 @@ def get_preprocessing(self): (slice_time_correction, smoothing, [('slice_time_corrected_file', 'in_file')]), (slice_time_correction, compute_median, [('slice_time_corrected_file', 'in_file')]), (brain_extraction_func, compute_median, [('mask_file', 'mask_file')]), - (compute_median, smoothing, [( - ('out_stat', compute_brightness_threshold), 'brightness_threshold') - ]), - (smoothing, alignment_func_to_anat, [('smoothed_file', 'input_image')]), - (coregistration_sbref, alignment_func_to_anat, [( - ('out_matrix_file', transform_as_list), 'transforms') - ]), - (brain_extraction_anat, alignment_func_to_anat, [('out_file', 'reference_image')]), - (brain_extraction_func, alignment_func_mask_to_anat, [('mask_file', 'input_image')]), - (coregistration_sbref, alignment_func_mask_to_anat, [( - ('out_matrix_file', transform_as_list), 'transforms') - ]), - (brain_extraction_anat, alignment_func_mask_to_anat, [ - ('out_file', 'reference_image') - ]), - (alignment_func_to_anat, alignment_func_to_mni, [('output_image', 'input_image')]), - (alignment_func_mask_to_anat, alignment_func_mask_to_mni, [('output_image', 'input_image')]), + (compute_median, smoothing, [ + (('out_stat', compute_brightness_threshold), 'brightness_threshold') + ]), + (smoothing, alignment_func_to_anat, [('smoothed_file', 'in_file')]), + (coregistration_sbref, alignment_func_to_anat, [ + ('out_matrix_file', 'in_matrix_file') + ]), + (brain_extraction_anat, alignment_func_to_anat, [('out_file', 'reference')]), + (brain_extraction_func, alignment_func_mask_to_anat, [('mask_file', 'in_file')]), + (coregistration_sbref, alignment_func_mask_to_anat, [ + ('out_matrix_file', 'in_matrix_file') + ]), + (brain_extraction_anat, alignment_func_mask_to_anat, [('out_file', 'reference')]), + (alignment_func_to_anat, alignment_func_to_mni, [('out_file', 'input_image')]), + (alignment_func_mask_to_anat, alignment_func_mask_to_mni, [ + ('output_image', 'input_image') + ]), (normalization_anat, reverse_transform_order, [('forward_transforms', 'inlist')]), (reverse_transform_order, alignment_func_to_mni, [('out', 'transformation_series')]), - (reverse_transform_order, alignment_func_mask_to_mni, [('out', 'transformation_series')]), + (reverse_transform_order, alignment_func_mask_to_mni, [ + ('out', 'transformation_series') + ]), (merge_masks, compute_confounds, [('out', 'mask_files')]), #Masks are in the func space (slice_time_correction, compute_confounds, [ ('slice_time_corrected_file', 'realigned_file') - ]), + ]), # Outputs of preprocessing (motion_correction, data_sink, [('par_file', 'preprocessing.@par_file')]), From 502c206531804a0cc338ea14bd674a7d7a83ae0a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Tue, 5 Dec 2023 15:04:49 +0100 Subject: [PATCH 097/116] Back to ApplyXFM, with no_resample option [skip ci] --- narps_open/pipelines/team_08MQ.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/narps_open/pipelines/team_08MQ.py b/narps_open/pipelines/team_08MQ.py index 7bc19e8a..3ecbb21c 100644 --- a/narps_open/pipelines/team_08MQ.py +++ b/narps_open/pipelines/team_08MQ.py @@ -363,7 +363,7 @@ def get_preprocessing(self): (data_sink, remove_func_1, [('out_file', '_')]), (smoothing, remove_func_2, [('smoothed_file', 'file_name')]), (data_sink, remove_func_2, [('out_file', '_')]), - (alignment_func_to_anat, remove_func_3, [('output_image', 'file_name')]), + (alignment_func_to_anat, remove_func_3, [('out_file', 'file_name')]), (data_sink, remove_func_3, [('out_file', '_')]), (alignment_func_to_mni, remove_func_4, [('output_image', 'file_name')]), (data_sink, remove_func_4, [('out_file', '_')]) From 8adcf865e1e49315dd87e3e4ce4c4647524ec11a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Tue, 5 Dec 2023 15:12:20 +0100 Subject: [PATCH 098/116] Back to ApplyXFM, with no_resample option [skip ci] --- narps_open/pipelines/team_08MQ.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/narps_open/pipelines/team_08MQ.py b/narps_open/pipelines/team_08MQ.py index 3ecbb21c..04a89b0a 100644 --- a/narps_open/pipelines/team_08MQ.py +++ b/narps_open/pipelines/team_08MQ.py @@ -336,7 +336,7 @@ def get_preprocessing(self): (brain_extraction_anat, alignment_func_mask_to_anat, [('out_file', 'reference')]), (alignment_func_to_anat, alignment_func_to_mni, [('out_file', 'input_image')]), (alignment_func_mask_to_anat, alignment_func_mask_to_mni, [ - ('output_image', 'input_image') + ('out_file', 'input_image') ]), (normalization_anat, reverse_transform_order, [('forward_transforms', 'inlist')]), (reverse_transform_order, alignment_func_to_mni, [('out', 'transformation_series')]), @@ -819,8 +819,8 @@ def get_group_level_analysis_sub_workflow(self, method): mask_intersection = Node(MultiImageMaths(), name = 'mask_intersection') #TODO mask_intersection.inputs.in_file = mask_intersection.op_string = '-mul %s' - maths.inputs.operand_files = ["functional2.nii", "functional3.nii"] - maths.inputs.out_file = "functional4.nii" + #maths.inputs.operand_files = ["functional2.nii", "functional3.nii"] + #maths.inputs.out_file = "functional4.nii" # MultipleRegressDesign Node - Specify model specify_model = Node(MultipleRegressDesign(), name = 'specify_model') From eb8ae1cb7b70dcc178b66b33c546b6fe1e25d7eb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Tue, 5 Dec 2023 15:37:24 +0100 Subject: [PATCH 099/116] Mask intersection of func as input of subject_level estimate model [skip ci] --- narps_open/pipelines/team_08MQ.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/narps_open/pipelines/team_08MQ.py b/narps_open/pipelines/team_08MQ.py index 04a89b0a..7e457532 100644 --- a/narps_open/pipelines/team_08MQ.py +++ b/narps_open/pipelines/team_08MQ.py @@ -612,7 +612,7 @@ def get_subject_level_analysis(self): 'varcope' : join(self.directories.output_dir, 'run_level_analysis', '_run_id_*_subject_id_{subject_id}', 'results', 'varcope{contrast_id}.nii.gz'), 'masks' : join(self.directories.output_dir, 'preprocessing', - '_run_id_*_subject_id_{subject_id}', 'results', 'varcope{contrast_id}.nii.gz') + '_run_id_*_subject_id_{subject_id}', 'sub-{subject_id}_task-MGT_run-*_bold_brain_mask_flirt_wtsimt.nii.gz') } select_files = Node(SelectFiles(templates), name = 'select_files') select_files.inputs.base_directory = self.directories.dataset_dir From 2789d4471a76657fd027f6b6cc38de4ad0dbbc23 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Tue, 5 Dec 2023 16:09:54 +0100 Subject: [PATCH 100/116] Mask intersection of func as input of subject_level estimate model [skip ci] --- narps_open/pipelines/team_08MQ.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/narps_open/pipelines/team_08MQ.py b/narps_open/pipelines/team_08MQ.py index 7e457532..033c953b 100644 --- a/narps_open/pipelines/team_08MQ.py +++ b/narps_open/pipelines/team_08MQ.py @@ -612,7 +612,8 @@ def get_subject_level_analysis(self): 'varcope' : join(self.directories.output_dir, 'run_level_analysis', '_run_id_*_subject_id_{subject_id}', 'results', 'varcope{contrast_id}.nii.gz'), 'masks' : join(self.directories.output_dir, 'preprocessing', - '_run_id_*_subject_id_{subject_id}', 'sub-{subject_id}_task-MGT_run-*_bold_brain_mask_flirt_wtsimt.nii.gz') + '_run_id_*_subject_id_{subject_id}', + 'sub-{subject_id}_task-MGT_run-*_bold_brain_mask_flirt_wtsimt.nii.gz') } select_files = Node(SelectFiles(templates), name = 'select_files') select_files.inputs.base_directory = self.directories.dataset_dir @@ -641,7 +642,7 @@ def get_subject_level_analysis(self): # MultiImageMaths Node - Create a subject mask by # computing the intersection of all run masks. mask_intersection = Node(MultiImageMaths(), name = 'mask_intersection') - mask_intersection.op_string = '-mul %s ' * (len(self.run_list) - 1) + mask_intersection.inputs.op_string = '-mul %s ' * (len(self.run_list) - 1) # FLAMEO Node - Estimate model estimate_model = Node(FLAMEO(), name = 'estimate_model') From 5e02694f38e9e15d5b504b108f3c3a3b75c6e94a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Tue, 5 Dec 2023 16:57:29 +0100 Subject: [PATCH 101/116] Mask intersection in group_level analysis [skip ci] --- narps_open/pipelines/team_08MQ.py | 34 +++++++++++++++++++------------ tests/pipelines/test_team_08MQ.py | 4 ++-- 2 files changed, 23 insertions(+), 15 deletions(-) diff --git a/narps_open/pipelines/team_08MQ.py b/narps_open/pipelines/team_08MQ.py index 033c953b..8fa6023d 100644 --- a/narps_open/pipelines/team_08MQ.py +++ b/narps_open/pipelines/team_08MQ.py @@ -607,9 +607,9 @@ def get_subject_level_analysis(self): # SelectFiles Node - select necessary files templates = { - 'cope' : join(self.directories.output_dir, 'run_level_analysis', + 'copes' : join(self.directories.output_dir, 'run_level_analysis', '_run_id_*_subject_id_{subject_id}', 'results', 'cope{contrast_id}.nii.gz'), - 'varcope' : join(self.directories.output_dir, 'run_level_analysis', + 'varcopes' : join(self.directories.output_dir, 'run_level_analysis', '_run_id_*_subject_id_{subject_id}', 'results', 'varcope{contrast_id}.nii.gz'), 'masks' : join(self.directories.output_dir, 'preprocessing', '_run_id_*_subject_id_{subject_id}', @@ -656,8 +656,8 @@ def get_subject_level_analysis(self): (information_source, select_files, [ ('subject_id', 'subject_id'), ('contrast_id', 'contrast_id')]), - (select_files, merge_copes, [('cope', 'in_files')]), - (select_files, merge_varcopes, [('varcope', 'in_files')]), + (select_files, merge_copes, [('copes', 'in_files')]), + (select_files, merge_varcopes, [('varcopes', 'in_files')]), (select_files, split_masks, [('masks', 'inlist')]), (split_masks, mask_intersection, [('out1', 'in_file')]), (split_masks, mask_intersection, [('out2', 'operand_files')]), @@ -668,6 +668,7 @@ def get_subject_level_analysis(self): ('design_mat', 'design_file'), ('design_con', 't_con_file'), ('design_grp', 'cov_split_file')]), + (mask_intersection, data_sink, [('out_file', 'subject_level_analysis.@mask')]), (estimate_model, data_sink, [ ('zstats', 'subject_level_analysis.@stats'), ('tstats', 'subject_level_analysis.@tstats'), @@ -770,10 +771,12 @@ def get_group_level_analysis_sub_workflow(self, method): # SelectFiles Node - select necessary files templates = { - 'cope' : join(self.directories.output_dir, 'subject_level_analysis', + 'copes' : join(self.directories.output_dir, 'subject_level_analysis', '_contrast_id_{contrast_id}_subject_id_*', 'cope1.nii.gz'), - 'varcope' : join(self.directories.output_dir, 'subject_level_analysis', - '_contrast_id_{contrast_id}_subject_id_*', 'varcope1.nii.gz') + 'varcopes' : join(self.directories.output_dir, 'subject_level_analysis', + '_contrast_id_{contrast_id}_subject_id_*', 'varcope1.nii.gz'), + 'masks' : join(self.directories.output_dir, 'subject_level_analysis', + '_contrast_id_{contrast_id}_subject_id_*', 'mask') } select_files = Node(SelectFiles(templates), name = 'select_files') select_files.inputs.base_directory = self.directories.dataset_dir @@ -815,13 +818,15 @@ def get_group_level_analysis_sub_workflow(self, method): merge_varcopes = Node(MergeImages(), name = 'merge_varcopes') merge_varcopes.inputs.dimension = 't' + # Split Node - Split mask list to serve them as inputs of the MultiImageMaths node. + split_masks = Node(Split(), name = 'split_masks') + split_masks.inputs.splits = [1, len(self.subject_list) - 1] + split_masks.inputs.squeeze = True + # MultiImageMaths Node - Create a group mask by # computing the intersection of all subject masks. mask_intersection = Node(MultiImageMaths(), name = 'mask_intersection') - #TODO mask_intersection.inputs.in_file = - mask_intersection.op_string = '-mul %s' - #maths.inputs.operand_files = ["functional2.nii", "functional3.nii"] - #maths.inputs.out_file = "functional4.nii" + mask_intersection.inputs.op_string = '-mul %s ' * (len(self.subject_list) - 1) # MultipleRegressDesign Node - Specify model specify_model = Node(MultipleRegressDesign(), name = 'specify_model') @@ -848,8 +853,11 @@ def get_group_level_analysis_sub_workflow(self, method): ) group_level_analysis.connect([ (information_source, select_files, [('contrast_id', 'contrast_id')]), - (select_files, get_copes, [('cope', 'input_str')]), - (select_files, get_varcopes, [('varcope', 'input_str')]), + (select_files, get_copes, [('copes', 'input_str')]), + (select_files, get_varcopes, [('varcopes', 'input_str')]), + (select_files, split_masks, [('masks', 'inlist')]), + (split_masks, mask_intersection, [('out1', 'in_file')]), + (split_masks, mask_intersection, [('out2', 'operand_files')]), (get_copes, merge_copes, [(('out_list', clean_list), 'in_files')]), (get_varcopes, merge_varcopes,[(('out_list', clean_list), 'in_files')]), (merge_copes, estimate_model, [('merged_file', 'cope_file')]), diff --git a/tests/pipelines/test_team_08MQ.py b/tests/pipelines/test_team_08MQ.py index ecca139f..c9b701fc 100644 --- a/tests/pipelines/test_team_08MQ.py +++ b/tests/pipelines/test_team_08MQ.py @@ -96,8 +96,8 @@ def test_subject_information(): reference_durations = [ [4.0, 4.0, 4.0, 4.0, 4.0], - [2.388, 2.289, 0.0, 2.08, 2.288], - [2.388, 2.289, 0.0, 2.08, 2.288], + [4.0, 4.0, 4.0, 4.0, 4.0], + [4.0, 4.0, 4.0, 4.0, 4.0], [4.0, 4.0, 4.0, 4.0, 4.0] ] for reference_array, test_array in zip(reference_durations, information.durations): From 26f0e090ce9d0ed839b81e4e36f8d64083c5c438 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Wed, 6 Dec 2023 10:23:58 +0100 Subject: [PATCH 102/116] Mask input + mask intersection inside group level analysis [skip ci] --- narps_open/pipelines/team_08MQ.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/narps_open/pipelines/team_08MQ.py b/narps_open/pipelines/team_08MQ.py index 8fa6023d..0a3565ee 100644 --- a/narps_open/pipelines/team_08MQ.py +++ b/narps_open/pipelines/team_08MQ.py @@ -776,7 +776,8 @@ def get_group_level_analysis_sub_workflow(self, method): 'varcopes' : join(self.directories.output_dir, 'subject_level_analysis', '_contrast_id_{contrast_id}_subject_id_*', 'varcope1.nii.gz'), 'masks' : join(self.directories.output_dir, 'subject_level_analysis', - '_contrast_id_{contrast_id}_subject_id_*', 'mask') + '_contrast_id_{contrast_id}_subject_id_*', + 'sub-*_task-MGT_run-*_bold_brain_mask_flirt_wtsimt_maths.nii.gz') } select_files = Node(SelectFiles(templates), name = 'select_files') select_files.inputs.base_directory = self.directories.dataset_dir From 8ac81d1710ba84faf038bbe80066a8485d65fa70 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Wed, 6 Dec 2023 15:04:42 +0100 Subject: [PATCH 103/116] Adding mask to randomise Node [skip ci] --- narps_open/pipelines/team_08MQ.py | 1 + 1 file changed, 1 insertion(+) diff --git a/narps_open/pipelines/team_08MQ.py b/narps_open/pipelines/team_08MQ.py index 0a3565ee..b59d97c4 100644 --- a/narps_open/pipelines/team_08MQ.py +++ b/narps_open/pipelines/team_08MQ.py @@ -870,6 +870,7 @@ def get_group_level_analysis_sub_workflow(self, method): ('design_grp', 'cov_split_file') ]), (merge_copes, randomise, [('merged_file', 'in_file')]), + (mask_intersection, randomise, [('out_file', 'mask')]), (specify_model, randomise, [ ('design_mat', 'design_mat'), ('design_con', 'tcon') From ad0b24f1ec80bc98bef8fb847ae5f1b3bc3ac3e8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Mon, 11 Dec 2023 14:29:55 +0100 Subject: [PATCH 104/116] Actually passing mask tu compute median node [skip ci] --- narps_open/pipelines/team_08MQ.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/narps_open/pipelines/team_08MQ.py b/narps_open/pipelines/team_08MQ.py index b59d97c4..2e6ee48e 100644 --- a/narps_open/pipelines/team_08MQ.py +++ b/narps_open/pipelines/team_08MQ.py @@ -191,7 +191,9 @@ def get_preprocessing(self): # ImageStats Node - Compute median of voxel values to derive SUSAN's brightness_threshold # we do not need to filter on not-zero values (option -P) because a mask is passed compute_median = Node(ImageStats(), name = 'compute_median') - compute_median.inputs.op_string = '-p 50' # Median is calculated as the 50th percentile + compute_median.inputs.op_string = '-p 50 -k %s' + # Median is calculated as the 50th percentile + # -k option adds a mask # SUSAN Node - smoothing of functional images # we set brightness_threshold to .75x median of the input file, as performed by fMRIprep From cb99973e4a2a3423489c0beec3bf2a176b483dd4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Mon, 11 Dec 2023 14:35:15 +0100 Subject: [PATCH 105/116] Actually passing mask tu compute median node [skip ci] --- narps_open/pipelines/team_08MQ.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/narps_open/pipelines/team_08MQ.py b/narps_open/pipelines/team_08MQ.py index 2e6ee48e..408f4f7c 100644 --- a/narps_open/pipelines/team_08MQ.py +++ b/narps_open/pipelines/team_08MQ.py @@ -248,7 +248,7 @@ def get_preprocessing(self): compute_confounds.inputs.repetition_time = TaskInformation()['RepetitionTime'] # Function Nodes remove_files - Remove sizeable files once they aren't needed - remove_func_0 = MapNode(Function( + """remove_func_0 = MapNode(Function( function = remove_file, input_names = ['_', 'file_name'], output_names = [] @@ -276,7 +276,7 @@ def get_preprocessing(self): function = remove_file, input_names = ['_', 'file_name'], output_names = [] - ), name = 'remove_func_4', iterfield = 'file_name') + ), name = 'remove_func_4', iterfield = 'file_name')""" preprocessing = Workflow(base_dir = self.directories.working_dir, name = 'preprocessing') preprocessing.config['execution']['stop_on_first_crash'] = 'true' @@ -356,7 +356,7 @@ def get_preprocessing(self): ('components_file', 'preprocessing.@components_file')]), (alignment_func_to_mni, data_sink, [('output_image', 'preprocessing.@output_image')]), (alignment_func_mask_to_mni, data_sink, [ - ('output_image', 'preprocessing.@output_mask')]), + ('output_image', 'preprocessing.@output_mask')])""", # File removals (motion_correction, remove_func_0, [('out_file', 'file_name')]), @@ -368,7 +368,7 @@ def get_preprocessing(self): (alignment_func_to_anat, remove_func_3, [('out_file', 'file_name')]), (data_sink, remove_func_3, [('out_file', '_')]), (alignment_func_to_mni, remove_func_4, [('output_image', 'file_name')]), - (data_sink, remove_func_4, [('out_file', '_')]) + (data_sink, remove_func_4, [('out_file', '_')])""" ]) return preprocessing From 09cfe2f79768f6929ca78c7244a4021f6b07a352 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Mon, 11 Dec 2023 14:36:51 +0100 Subject: [PATCH 106/116] Actually passing mask tu compute median node [skip ci] --- narps_open/pipelines/team_08MQ.py | 45 +------------------------------ 1 file changed, 1 insertion(+), 44 deletions(-) diff --git a/narps_open/pipelines/team_08MQ.py b/narps_open/pipelines/team_08MQ.py index 408f4f7c..470332f8 100644 --- a/narps_open/pipelines/team_08MQ.py +++ b/narps_open/pipelines/team_08MQ.py @@ -247,37 +247,6 @@ def get_preprocessing(self): compute_confounds.inputs.merge_method = 'union' compute_confounds.inputs.repetition_time = TaskInformation()['RepetitionTime'] - # Function Nodes remove_files - Remove sizeable files once they aren't needed - """remove_func_0 = MapNode(Function( - function = remove_file, - input_names = ['_', 'file_name'], - output_names = [] - ), name = 'remove_func_0', iterfield = 'file_name') - - remove_func_1 = MapNode(Function( - function = remove_file, - input_names = ['_', 'file_name'], - output_names = [] - ), name = 'remove_func_1', iterfield = 'file_name') - - remove_func_2 = MapNode(Function( - function = remove_file, - input_names = ['_', 'file_name'], - output_names = [] - ), name = 'remove_func_2', iterfield = 'file_name') - - remove_func_3 = MapNode(Function( - function = remove_file, - input_names = ['_', 'file_name'], - output_names = [] - ), name = 'remove_func_3', iterfield = 'file_name') - - remove_func_4 = MapNode(Function( - function = remove_file, - input_names = ['_', 'file_name'], - output_names = [] - ), name = 'remove_func_4', iterfield = 'file_name')""" - preprocessing = Workflow(base_dir = self.directories.working_dir, name = 'preprocessing') preprocessing.config['execution']['stop_on_first_crash'] = 'true' preprocessing.connect([ @@ -356,19 +325,7 @@ def get_preprocessing(self): ('components_file', 'preprocessing.@components_file')]), (alignment_func_to_mni, data_sink, [('output_image', 'preprocessing.@output_image')]), (alignment_func_mask_to_mni, data_sink, [ - ('output_image', 'preprocessing.@output_mask')])""", - - # File removals - (motion_correction, remove_func_0, [('out_file', 'file_name')]), - (data_sink, remove_func_0, [('out_file', '_')]), - (slice_time_correction, remove_func_1, [('slice_time_corrected_file', 'file_name')]), - (data_sink, remove_func_1, [('out_file', '_')]), - (smoothing, remove_func_2, [('smoothed_file', 'file_name')]), - (data_sink, remove_func_2, [('out_file', '_')]), - (alignment_func_to_anat, remove_func_3, [('out_file', 'file_name')]), - (data_sink, remove_func_3, [('out_file', '_')]), - (alignment_func_to_mni, remove_func_4, [('output_image', 'file_name')]), - (data_sink, remove_func_4, [('out_file', '_')])""" + ('output_image', 'preprocessing.@output_mask')]) ]) return preprocessing From bfd163edfe53c0c4385577687d746bfebeef3c9f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Mon, 11 Dec 2023 16:38:04 +0100 Subject: [PATCH 107/116] Adding mask to compute median node [skip ci] --- narps_open/pipelines/team_08MQ.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/narps_open/pipelines/team_08MQ.py b/narps_open/pipelines/team_08MQ.py index 470332f8..f70ea30e 100644 --- a/narps_open/pipelines/team_08MQ.py +++ b/narps_open/pipelines/team_08MQ.py @@ -189,11 +189,13 @@ def get_preprocessing(self): slice_time_correction.inputs.time_repetition = TaskInformation()['RepetitionTime'] # ImageStats Node - Compute median of voxel values to derive SUSAN's brightness_threshold + # -k option adds a mask + # -p computes the 50th percentile (= median) # we do not need to filter on not-zero values (option -P) because a mask is passed + # Warning : these options must be passed in the right order + # (i.e.: apply mask then compute stat) compute_median = Node(ImageStats(), name = 'compute_median') - compute_median.inputs.op_string = '-p 50 -k %s' - # Median is calculated as the 50th percentile - # -k option adds a mask + compute_median.inputs.op_string = '-k %s -p 50' # SUSAN Node - smoothing of functional images # we set brightness_threshold to .75x median of the input file, as performed by fMRIprep From 7f5191cb973ff07f652539f3a83e02be6dd2f1de Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Tue, 12 Dec 2023 10:30:26 +0100 Subject: [PATCH 108/116] Adding wm sgmentation to coregistration_sbref node working with bbr cost function [skip ci] --- narps_open/pipelines/team_08MQ.py | 1 + 1 file changed, 1 insertion(+) diff --git a/narps_open/pipelines/team_08MQ.py b/narps_open/pipelines/team_08MQ.py index f70ea30e..43cdcf7b 100644 --- a/narps_open/pipelines/team_08MQ.py +++ b/narps_open/pipelines/team_08MQ.py @@ -282,6 +282,7 @@ def get_preprocessing(self): # High contrast functional volume (select_files, coregistration_sbref, [('sbref', 'in_file')]), (select_files, coregistration_sbref, [('anat', 'reference')]), + (threshold_white_matter, coregistration_sbref, [('out_file', 'wm_seg')]), (convert_to_fieldmap, coregistration_sbref, [('out_fieldmap', 'fieldmap')]), (coregistration_sbref, inverse_func_to_anat, [('out_matrix_file', 'in_file')]), From d83d0662d8b47f81ce7b811fa9028e2080803500 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Tue, 12 Dec 2023 14:10:01 +0100 Subject: [PATCH 109/116] Adding brain extraction of sbrefs [skip ci] --- narps_open/pipelines/team_08MQ.py | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/narps_open/pipelines/team_08MQ.py b/narps_open/pipelines/team_08MQ.py index 43cdcf7b..b854480a 100644 --- a/narps_open/pipelines/team_08MQ.py +++ b/narps_open/pipelines/team_08MQ.py @@ -153,6 +153,12 @@ def get_preprocessing(self): # PrepareFieldmap Node - Convert phase and magnitude to fieldmap images convert_to_fieldmap = Node(PrepareFieldmap(), name = 'convert_to_fieldmap') + # BET Node - Brain extraction for high contrast functional images + brain_extraction_sbref = Node(BET(), name = 'brain_extraction_sbref') + brain_extraction_sbref.inputs.frac = 0.3 + brain_extraction_sbref.inputs.mask = True + brain_extraction_sbref.inputs.functional = False # 3D data + # FLIRT Node - Align high contrast functional images to anatomical # (i.e.: single-band reference images a.k.a. sbref) coregistration_sbref = Node(FLIRT(), name = 'coregistration_sbref') @@ -280,8 +286,9 @@ def get_preprocessing(self): (select_files, convert_to_fieldmap, [('phasediff', 'in_phase')]), # High contrast functional volume - (select_files, coregistration_sbref, [('sbref', 'in_file')]), - (select_files, coregistration_sbref, [('anat', 'reference')]), + (select_files, brain_extraction_sbref, [('sbref', 'in_file')]), + (brain_extraction_sbref, coregistration_sbref, [('out_file', 'in_file')]), + (brain_extraction_anat, coregistration_sbref, [('out_file', 'reference')]), (threshold_white_matter, coregistration_sbref, [('out_file', 'wm_seg')]), (convert_to_fieldmap, coregistration_sbref, [('out_fieldmap', 'fieldmap')]), (coregistration_sbref, inverse_func_to_anat, [('out_matrix_file', 'in_file')]), From 86da1a1100db9f0e6a52790f875ba130dc65ff3f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Tue, 12 Dec 2023 15:57:39 +0100 Subject: [PATCH 110/116] Using unthresholded wm for coregistration_sbref node [skip ci] --- narps_open/pipelines/team_08MQ.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/narps_open/pipelines/team_08MQ.py b/narps_open/pipelines/team_08MQ.py index b854480a..c46500d3 100644 --- a/narps_open/pipelines/team_08MQ.py +++ b/narps_open/pipelines/team_08MQ.py @@ -289,7 +289,7 @@ def get_preprocessing(self): (select_files, brain_extraction_sbref, [('sbref', 'in_file')]), (brain_extraction_sbref, coregistration_sbref, [('out_file', 'in_file')]), (brain_extraction_anat, coregistration_sbref, [('out_file', 'reference')]), - (threshold_white_matter, coregistration_sbref, [('out_file', 'wm_seg')]), + (split_segmentation_maps, coregistration_sbref, [('out2', 'wm_seg')]), (convert_to_fieldmap, coregistration_sbref, [('out_fieldmap', 'fieldmap')]), (coregistration_sbref, inverse_func_to_anat, [('out_matrix_file', 'in_file')]), From 483a4d07c7dd21d19c53b6baca1b52bc397dcc7e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Tue, 12 Dec 2023 17:13:01 +0100 Subject: [PATCH 111/116] Bug with WM selection [skip ci] --- narps_open/pipelines/team_08MQ.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/narps_open/pipelines/team_08MQ.py b/narps_open/pipelines/team_08MQ.py index c46500d3..913d6c8e 100644 --- a/narps_open/pipelines/team_08MQ.py +++ b/narps_open/pipelines/team_08MQ.py @@ -96,6 +96,9 @@ def get_preprocessing(self): segmentation_anat.inputs.probability_maps = False # Only output partial volume estimation # Split Node - Split probability maps as they output from the segmentation node + # outputs.out1 is CSF + # outputs.out2 is grey matter + # outputs.out3 is white matter split_segmentation_maps = Node(Split(), name = 'split_segmentation_maps') split_segmentation_maps.inputs.splits = [1, 1, 1] split_segmentation_maps.inputs.squeeze = True # Unfold one-element splits removing the list @@ -267,7 +270,7 @@ def get_preprocessing(self): (brain_extraction_anat, segmentation_anat, [('out_file', 'in_files')]), (brain_extraction_anat, normalization_anat, [('out_file', 'moving_image')]), (segmentation_anat, split_segmentation_maps, [('partial_volume_files', 'inlist')]), - (split_segmentation_maps, threshold_white_matter, [('out2', 'in_file')]), + (split_segmentation_maps, threshold_white_matter, [('out3', 'in_file')]), (split_segmentation_maps, threshold_csf, [('out1', 'in_file')]), (threshold_white_matter, erode_white_matter, [('out_file', 'in_file')]), (threshold_csf, erode_csf, [('out_file', 'in_file')]), @@ -289,7 +292,7 @@ def get_preprocessing(self): (select_files, brain_extraction_sbref, [('sbref', 'in_file')]), (brain_extraction_sbref, coregistration_sbref, [('out_file', 'in_file')]), (brain_extraction_anat, coregistration_sbref, [('out_file', 'reference')]), - (split_segmentation_maps, coregistration_sbref, [('out2', 'wm_seg')]), + (split_segmentation_maps, coregistration_sbref, [('out3', 'wm_seg')]), (convert_to_fieldmap, coregistration_sbref, [('out_fieldmap', 'fieldmap')]), (coregistration_sbref, inverse_func_to_anat, [('out_matrix_file', 'in_file')]), From 843eb1f8772c2b65ca5d24256b03df7a6f90133f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Tue, 12 Dec 2023 18:52:51 +0100 Subject: [PATCH 112/116] Adding back remove functions for testing on 20 subjects [skip ci] --- narps_open/pipelines/team_08MQ.py | 41 ++++++++++++++++++++++++++++++- 1 file changed, 40 insertions(+), 1 deletion(-) diff --git a/narps_open/pipelines/team_08MQ.py b/narps_open/pipelines/team_08MQ.py index 913d6c8e..76ea664a 100644 --- a/narps_open/pipelines/team_08MQ.py +++ b/narps_open/pipelines/team_08MQ.py @@ -258,6 +258,33 @@ def get_preprocessing(self): compute_confounds.inputs.merge_method = 'union' compute_confounds.inputs.repetition_time = TaskInformation()['RepetitionTime'] + # Function Nodes remove_files - Remove sizeable files once they aren't needed + remove_func_0 = MapNode(Function( + function = remove_file, + input_names = ['_', 'file_name'], + output_names = [] + ), name = 'remove_func_0', iterfield = 'file_name') + remove_func_1 = MapNode(Function( + function = remove_file, + input_names = ['_', 'file_name'], + output_names = [] + ), name = 'remove_func_1', iterfield = 'file_name') + remove_func_2 = MapNode(Function( + function = remove_file, + input_names = ['_', 'file_name'], + output_names = [] + ), name = 'remove_func_2', iterfield = 'file_name') + remove_func_3 = MapNode(Function( + function = remove_file, + input_names = ['_', 'file_name'], + output_names = [] + ), name = 'remove_func_3', iterfield = 'file_name') + remove_func_4 = MapNode(Function( + function = remove_file, + input_names = ['_', 'file_name'], + output_names = [] + ), name = 'remove_func_4', iterfield = 'file_name') + preprocessing = Workflow(base_dir = self.directories.working_dir, name = 'preprocessing') preprocessing.config['execution']['stop_on_first_crash'] = 'true' preprocessing.connect([ @@ -338,7 +365,19 @@ def get_preprocessing(self): ('components_file', 'preprocessing.@components_file')]), (alignment_func_to_mni, data_sink, [('output_image', 'preprocessing.@output_image')]), (alignment_func_mask_to_mni, data_sink, [ - ('output_image', 'preprocessing.@output_mask')]) + ('output_image', 'preprocessing.@output_mask')]), + + # File removals + (motion_correction, remove_func_0, [('out_file', 'file_name')]), + (data_sink, remove_func_0, [('out_file', '_')]), + (slice_time_correction, remove_func_1, [('slice_time_corrected_file', 'file_name')]), + (data_sink, remove_func_1, [('out_file', '_')]), + (smoothing, remove_func_2, [('smoothed_file', 'file_name')]), + (data_sink, remove_func_2, [('out_file', '_')]), + (alignment_func_to_anat, remove_func_3, [('out_file', 'file_name')]), + (data_sink, remove_func_3, [('out_file', '_')]), + (alignment_func_to_mni, remove_func_4, [('output_image', 'file_name')]), + (data_sink, remove_func_4, [('out_file', '_')]) ]) return preprocessing From e0f599fa8ff8c06657a7150846b885cb55f6e7e7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Mon, 18 Dec 2023 16:06:42 +0100 Subject: [PATCH 113/116] Removing more intermediate results [skip ci] --- narps_open/pipelines/team_08MQ.py | 50 +++++++++++++------------------ 1 file changed, 21 insertions(+), 29 deletions(-) diff --git a/narps_open/pipelines/team_08MQ.py b/narps_open/pipelines/team_08MQ.py index 76ea664a..3c62ec1d 100644 --- a/narps_open/pipelines/team_08MQ.py +++ b/narps_open/pipelines/team_08MQ.py @@ -258,32 +258,21 @@ def get_preprocessing(self): compute_confounds.inputs.merge_method = 'union' compute_confounds.inputs.repetition_time = TaskInformation()['RepetitionTime'] + # Merge Node - Merge file names to be removed after datasink node is performed + merge_removable_files = Node(Merge(8), name = 'merge_removable_files') + merge_removable_files.inputs.ravel_inputs = True + # Function Nodes remove_files - Remove sizeable files once they aren't needed - remove_func_0 = MapNode(Function( - function = remove_file, - input_names = ['_', 'file_name'], - output_names = [] - ), name = 'remove_func_0', iterfield = 'file_name') - remove_func_1 = MapNode(Function( - function = remove_file, - input_names = ['_', 'file_name'], - output_names = [] - ), name = 'remove_func_1', iterfield = 'file_name') - remove_func_2 = MapNode(Function( - function = remove_file, - input_names = ['_', 'file_name'], - output_names = [] - ), name = 'remove_func_2', iterfield = 'file_name') - remove_func_3 = MapNode(Function( + remove_after_datasink = MapNode(Function( function = remove_file, input_names = ['_', 'file_name'], output_names = [] - ), name = 'remove_func_3', iterfield = 'file_name') - remove_func_4 = MapNode(Function( + ), name = 'remove_after_datasink', iterfield = 'file_name') + remove_func = MapNode(Function( function = remove_file, input_names = ['_', 'file_name'], output_names = [] - ), name = 'remove_func_4', iterfield = 'file_name') + ), name = 'remove_func', iterfield = 'file_name') preprocessing = Workflow(base_dir = self.directories.working_dir, name = 'preprocessing') preprocessing.config['execution']['stop_on_first_crash'] = 'true' @@ -368,16 +357,19 @@ def get_preprocessing(self): ('output_image', 'preprocessing.@output_mask')]), # File removals - (motion_correction, remove_func_0, [('out_file', 'file_name')]), - (data_sink, remove_func_0, [('out_file', '_')]), - (slice_time_correction, remove_func_1, [('slice_time_corrected_file', 'file_name')]), - (data_sink, remove_func_1, [('out_file', '_')]), - (smoothing, remove_func_2, [('smoothed_file', 'file_name')]), - (data_sink, remove_func_2, [('out_file', '_')]), - (alignment_func_to_anat, remove_func_3, [('out_file', 'file_name')]), - (data_sink, remove_func_3, [('out_file', '_')]), - (alignment_func_to_mni, remove_func_4, [('output_image', 'file_name')]), - (data_sink, remove_func_4, [('out_file', '_')]) + (alignment_func_to_anat, remove_func, [('out_file', 'file_name')]), + (alignment_func_to_mni, remove_func, [('output_image', '_')]), + + (motion_correction, merge_removable_files, [('out_file', 'in1')]), + (slice_time_correction, merge_removable_files, [('slice_time_corrected_file', 'in2')]), + (smoothing, merge_removable_files, [('smoothed_file', 'in3')]), + (alignment_func_to_mni, merge_removable_files, [('output_image', 'in4')]), + (brain_extraction_func, merge_removable_files, [('out_file', 'in5')]), + (brain_extraction_anat, merge_removable_files, [('out_file', 'in6')]), + (bias_field_correction, merge_removable_files, [('restored_image', 'in7')]), + (normalization_anat, merge_removable_files, [('forward_transforms', 'in8')]), + (merge_removable_files, remove_after_datasink, [('out', 'file_name')]), + (data_sink, remove_after_datasink, [('out_file', '_')]) ]) return preprocessing From 6c228f972aec3de4f758f651c58e75d21a987788 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Wed, 20 Dec 2023 11:00:48 +0100 Subject: [PATCH 114/116] Force 2.0mm resampling for alignment_func_to_anat node [skip ci] --- narps_open/pipelines/team_08MQ.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/narps_open/pipelines/team_08MQ.py b/narps_open/pipelines/team_08MQ.py index 3c62ec1d..61075eb8 100644 --- a/narps_open/pipelines/team_08MQ.py +++ b/narps_open/pipelines/team_08MQ.py @@ -223,8 +223,10 @@ def get_preprocessing(self): alignment_csf.inputs.no_resample = True # ApplyXFM Node - Alignment of functional data to anatomical space + # To save disk space we force isotropic resampling with 2.0 mm voxel dimension + # instead of 1.0 mm as reference file would suggest alignment_func_to_anat = Node(ApplyXFM(), name = 'alignment_func_to_anat') - alignment_func_to_anat.inputs.apply_xfm = True + alignment_func_to_anat.inputs.apply_isoxfm = 2.0 alignment_func_to_anat.inputs.no_resample = True # ApplyTransforms Node - Alignment of functional brain mask to anatomical space From 40a67e1abe9470d969b7b0275abbb090dfd17eea Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Wed, 20 Dec 2023 11:02:06 +0100 Subject: [PATCH 115/116] Force 2.0mm resampling for alignment_func_to_anat node [skip ci] --- narps_open/pipelines/team_08MQ.py | 1 + 1 file changed, 1 insertion(+) diff --git a/narps_open/pipelines/team_08MQ.py b/narps_open/pipelines/team_08MQ.py index 61075eb8..c924e507 100644 --- a/narps_open/pipelines/team_08MQ.py +++ b/narps_open/pipelines/team_08MQ.py @@ -226,6 +226,7 @@ def get_preprocessing(self): # To save disk space we force isotropic resampling with 2.0 mm voxel dimension # instead of 1.0 mm as reference file would suggest alignment_func_to_anat = Node(ApplyXFM(), name = 'alignment_func_to_anat') + alignment_func_to_anat.inputs.apply_xfm = False alignment_func_to_anat.inputs.apply_isoxfm = 2.0 alignment_func_to_anat.inputs.no_resample = True From 3250a6d069d8374c8d5d5f8a8f9ea556136f4e69 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Wed, 20 Dec 2023 11:19:19 +0100 Subject: [PATCH 116/116] [TEST][PEP8] [skip ci] --- narps_open/pipelines/team_08MQ.py | 23 ++++++++++++++--------- tests/pipelines/test_team_08MQ.py | 4 ++-- 2 files changed, 16 insertions(+), 11 deletions(-) diff --git a/narps_open/pipelines/team_08MQ.py b/narps_open/pipelines/team_08MQ.py index c924e507..9766c3ce 100644 --- a/narps_open/pipelines/team_08MQ.py +++ b/narps_open/pipelines/team_08MQ.py @@ -23,7 +23,7 @@ from nipype.interfaces.fsl.maths import MultiImageMaths from nipype.algorithms.confounds import CompCor from nipype.algorithms.modelgen import SpecifyModel -from nipype.interfaces.ants import Registration, WarpTimeSeriesImageMultiTransform, ApplyTransforms +from nipype.interfaces.ants import Registration, WarpTimeSeriesImageMultiTransform from narps_open.pipelines import Pipeline from narps_open.data.task import TaskInformation @@ -222,11 +222,12 @@ def get_preprocessing(self): alignment_csf.inputs.apply_xfm = True alignment_csf.inputs.no_resample = True - # ApplyXFM Node - Alignment of functional data to anatomical space + # FLIRT Node - Alignment of functional data to anatomical space # To save disk space we force isotropic resampling with 2.0 mm voxel dimension - # instead of 1.0 mm as reference file would suggest - alignment_func_to_anat = Node(ApplyXFM(), name = 'alignment_func_to_anat') - alignment_func_to_anat.inputs.apply_xfm = False + # instead of 1.0 mm as reference file would suggest. + # We have to use FLIRT instead of ApplyXFM because there is a bug with + # apply_isoxfm and the latter. + alignment_func_to_anat = Node(FLIRT(), name = 'alignment_func_to_anat') alignment_func_to_anat.inputs.apply_isoxfm = 2.0 alignment_func_to_anat.inputs.no_resample = True @@ -281,7 +282,9 @@ def get_preprocessing(self): preprocessing.config['execution']['stop_on_first_crash'] = 'true' preprocessing.connect([ # Inputs - (information_source, select_files, [('subject_id', 'subject_id'), ('run_id', 'run_id')]), + (information_source, select_files, [ + ('subject_id', 'subject_id'), ('run_id', 'run_id') + ]), # Anatomical images (select_files, bias_field_correction, [('anat', 'in_files')]), @@ -358,7 +361,7 @@ def get_preprocessing(self): (alignment_func_to_mni, data_sink, [('output_image', 'preprocessing.@output_image')]), (alignment_func_mask_to_mni, data_sink, [ ('output_image', 'preprocessing.@output_mask')]), - + # File removals (alignment_func_to_anat, remove_func, [('out_file', 'file_name')]), (alignment_func_to_mni, remove_func, [('output_image', '_')]), @@ -387,7 +390,7 @@ def get_preprocessing_outputs(self): 'components_file.txt', 'sub-{subject_id}_task-MGT_run-{run_id}_bold_brain_mcf.nii.gz.par', 'sub-{subject_id}_task-MGT_run-{run_id}_bold_brain_mcf_st_smooth_flirt_wtsimt.nii.gz', - # mask file 'sub-{subject_id}_task-MGT_run-{run_id}_bold_brain_mcf_st_smooth_flirt_wtsimt.nii.gz' + 'sub-{subject_id}_task-MGT_run-{run_id}_bold_brain_mask_flirt_wtsimt.nii.gz' ] } parameter_sets = product(*parameters.values()) @@ -537,7 +540,9 @@ def get_run_level_analysis(self): name = 'run_level_analysis' ) run_level_analysis.connect([ - (information_source, select_files, [('subject_id', 'subject_id'), ('run_id', 'run_id')]), + (information_source, select_files, [ + ('subject_id', 'subject_id'), ('run_id', 'run_id') + ]), (select_files, subject_information, [('event', 'event_file')]), (subject_information, specify_model, [('subject_info', 'subject_info')]), (select_files, specify_model, [('motion', 'realignment_parameters')]), diff --git a/tests/pipelines/test_team_08MQ.py b/tests/pipelines/test_team_08MQ.py index c9b701fc..b962557f 100644 --- a/tests/pipelines/test_team_08MQ.py +++ b/tests/pipelines/test_team_08MQ.py @@ -57,7 +57,7 @@ def test_outputs(): pipeline = PipelineTeam08MQ() # 1 - 1 subject outputs pipeline.subject_list = ['001'] - assert len(pipeline.get_preprocessing_outputs()) == 3*4 + assert len(pipeline.get_preprocessing_outputs()) == 4*4 assert len(pipeline.get_run_level_outputs()) == 8+4*3*4 assert len(pipeline.get_subject_level_outputs()) == 4*3 assert len(pipeline.get_group_level_outputs()) == 0 @@ -65,7 +65,7 @@ def test_outputs(): # 2 - 4 subjects outputs pipeline.subject_list = ['001', '002', '003', '004'] - assert len(pipeline.get_preprocessing_outputs()) == 3*4*4 + assert len(pipeline.get_preprocessing_outputs()) == 4*4*4 assert len(pipeline.get_run_level_outputs()) == (8+4*3*4)*4 assert len(pipeline.get_subject_level_outputs()) == 4*3*4 assert len(pipeline.get_group_level_outputs()) == 0