From 831b7efec54bae5aa5f84dd0c47b93946f492f73 Mon Sep 17 00:00:00 2001 From: elodiegermani1 Date: Thu, 21 Sep 2023 14:11:15 -0400 Subject: [PATCH 01/24] [REPRO] Pre-processing pipeline implemented and tested. --- narps_open/pipelines/team_3C6G.py | 815 ++++++++++++++++++++++++++++++ tests/pipelines/test_team_3C6G.py | 79 +++ 2 files changed, 894 insertions(+) create mode 100644 narps_open/pipelines/team_3C6G.py create mode 100644 tests/pipelines/test_team_3C6G.py diff --git a/narps_open/pipelines/team_3C6G.py b/narps_open/pipelines/team_3C6G.py new file mode 100644 index 00000000..210d468a --- /dev/null +++ b/narps_open/pipelines/team_3C6G.py @@ -0,0 +1,815 @@ +#!/usr/bin/python +# coding: utf-8 + +""" +This template can be use to reproduce a pipeline using SPM as main software. + +- Replace all occurrences of 3C6G by the actual id of the team. +- All lines starting with [INFO], are meant to help you during the reproduction, these can be removed +eventually. +- Also remove lines starting with [TODO], once you did what they suggested. +""" + +# [TODO] Only import modules you use further in te code, remove others from the import section + +from os.path import join + +# [INFO] The import of base objects from Nipype, to create Workflows +from nipype import Node, Workflow # , JoinNode, MapNode + +# [INFO] a list of interfaces used to manpulate data +from nipype.interfaces.utility import IdentityInterface, Function +from nipype.interfaces.io import SelectFiles, DataSink +from nipype.algorithms.misc import Gunzip + +# [INFO] a list of SPM-specific interfaces +from nipype.algorithms.modelgen import SpecifySPMModel +from nipype.interfaces.spm import ( + Realign, Coregister, NewSegment, Normalize12, Smooth, + Level1Design, OneSampleTTestDesign, TwoSampleTTestDesign, + EstimateModel, EstimateContrast, Threshold + ) +from nipype.interfaces.fsl import ( + ExtractROI + ) + +# [INFO] In order to inherit from Pipeline +from narps_open.pipelines import Pipeline + +class PipelineTeam3C6G(Pipeline): + """ A class that defines the pipeline of team 3C6G """ + + def __init__(self): + super().__init__() + # [INFO] Remove the init method completely if unused + # [TODO] Init the attributes of the pipeline, if any other than the ones defined + # in the pipeline class + + self.fwhm = 6.0 + self.team_id = '3C6G' + self.contrast_list = ['0001', '0002', '0003'] + + def get_vox_dims(volume : list) -> list: + ''' + Function that gives the voxel dimension of an image. + Not used here but if we use it, modify the connection to : + (?, normalize_func, [('?', 'apply_to_files'), + (('?', get_vox_dims), + 'write_voxel_sizes')]) + Args: + volume: list | str + List of str or str that represent a path to a Nifti image. + Returns: + list: + size of the voxels in the volume or in the first volume of the list. + ''' + import nibabel as nb + if isinstance(volume, list): + volume = volume[0] + nii = nb.load(volume) + hdr = nii.header + voxdims = hdr.get_zooms() + return [float(voxdims[0]), float(voxdims[1]), float(voxdims[2])] + + def get_preprocessing(self): + """ Return a Nipype workflow describing the prerpocessing part of the pipeline """ + + # [INFO] The following part stays the same for all preprocessing pipelines + + # IdentityInterface node - allows to iterate over subjects and runs + info_source = Node( + IdentityInterface(fields=['subject_id', 'run_id']), + name='info_source' + ) + info_source.iterables = [ + ('subject_id', self.subject_list), + ('run_id', self.run_list), + ] + + # Templates to select files node + file_templates = { + 'anat': join( + 'sub-{subject_id}', 'anat', 'sub-{subject_id}_T1w.nii.gz' + ), + 'func': join( + 'sub-{subject_id}', 'func', 'sub-{subject_id}_task-MGT_run-{run_id}_bold.nii.gz' + ) + } + + # SelectFiles node - to select necessary files + select_files = Node( + SelectFiles( + file_templates, + base_directory = self.directories.dataset_dir + ), + name='select_files' + ) + + # DataSink Node - store the wanted results in the wanted repository + data_sink = Node( + DataSink( + base_directory = self.directories.output_dir + ), + name='data_sink', + ) + + # [INFO] The following part has to be modified with nodes of the pipeline + gunzip_func = Node ( + Gunzip(), + name='gunzip_func' + ) + + gunzip_anat = Node ( + Gunzip(), + name='gunzip_anat' + ) + + # 1 - Rigid-body realignment in SPM12 using 1st scan as referenced scan and normalized mutual information. + realign = Node( + Realign( + register_to_mean=False + ), + name='realign' + ) + + # Extract 1st image + extract_first = Node( + ExtractROI( + t_min = 1, + t_size = 1, + output_type='NIFTI' + ), + name = 'extract_first' + ) + + # 2 - Co-registration in SPM12 using default parameters. + coregister = Node( + Coregister( + cost_function='nmi' + ), + name = 'coregister' + ) + + # 3 - Unified segmentation using tissue probability maps in SPM12. + # Unified segmentation in SPM12 to MNI space (the MNI-space tissue probability maps used in segmentation) using default parameters. + # Bias-field correction in the context of unified segmentation in SPM12. + tissue1 = [('/opt/spm12-r7771/spm12_mcr/spm12/tpm/TPM.nii', 1), 1, (True,False), (True, False)] + tissue2 = [('/opt/spm12-r7771/spm12_mcr/spm12/tpm/TPM.nii', 2), 1, (True,False), (True, False)] + tissue3 = [('/opt/spm12-r7771/spm12_mcr/spm12/tpm/TPM.nii', 3), 2, (True,False), (True, False)] + tissue4 = [('/opt/spm12-r7771/spm12_mcr/spm12/tpm/TPM.nii', 4), 3, (True,False), (True, False)] + tissue5 = [('/opt/spm12-r7771/spm12_mcr/spm12/tpm/TPM.nii', 5), 4, (True,False), (True, False)] + tissue6 = [('/opt/spm12-r7771/spm12_mcr/spm12/tpm/TPM.nii', 6), 2, (True,False), (True, False)] + tissue_list = [tissue1, tissue2, tissue3, tissue4, tissue5, tissue6] + + segment = Node( + NewSegment( + write_deformation_fields = [True, True], + tissues = tissue_list + ), + name = 'segment' + ) + + # 4 - Spatial normalization of functional images + normalize = Node( + Normalize12( + jobtype = 'write' + ), + name = 'normalize' + ) + + # 5 - 6 mm fixed FWHM smoothing in MNI volume + smooth = Node( + Smooth( + fwhm=self.fwhm), + name = 'smooth' + ) + + # [INFO] The following part defines the nipype workflow and the connections between nodes + + preprocessing = Workflow( + base_dir = self.directories.working_dir, + name = 'preprocessing' + ) + + # [TODO] Add the connections the workflow needs + # [INFO] Input and output names can be found on NiPype documentation + preprocessing.connect( + [ + ( + info_source, + select_files, + [('subject_id', 'subject_id'), ('run_id', 'run_id')], + ), + ( + select_files, + gunzip_anat, + [('anat', 'in_file')] + ), + ( + select_files, + gunzip_func, + [('func', 'in_file')] + ), + ( + gunzip_func, + realign, + [('out_file', 'in_files')], + ), + ( + realign, + extract_first, + [('realigned_files', 'in_file')], + ), + ( + extract_first, + coregister, + [('roi_file', 'source')], + ), + ( + realign, + coregister, + [('realigned_files', 'apply_to_files')], + ), + ( + gunzip_anat, + coregister, + [('out_file', 'target')], + ), + ( + gunzip_anat, + segment, + [('out_file', 'channel_files')], + ), + ( + segment, + normalize, + [('forward_deformation_field', 'deformation_file')], + ), + ( + coregister, + normalize, + [('coregistered_files', 'apply_to_files')], + ), + ( + normalize, + smooth, + [('normalized_files', 'in_files')], + ), + ( + smooth, + data_sink, + [('smoothed_files', 'preprocessing.@smoothed')], + ), + ( + realign, + data_sink, + [('realignment_parameters', 'preprocessing.@motion_parameters')], + ), + ( + segment, + data_sink, + [('native_class_images', 'preprocessing.@segmented'), + ('normalized_class_images', 'preprocessing.@segmented_normalized')], + ), + + ] + ) + + # [INFO] Here we simply return the created workflow + return preprocessing + + def get_preprocessing_outputs(self): + """ Return the names of the files the preprocessing analysis is supposed to generate. """ + + # Smoothed maps + templates = [join( + self.directories.output_dir, + 'preprocessing', '_run_id_{run_id}_subject_id_{subject_id}', + 'swrrsub-{subject_id}_task-MGT_run-{run_id}_bold.nii')] + + # Motion parameters file + templates += [join( + self.directories.output_dir, + 'preprocessing', '_run_id_{run_id}_subject_id_{subject_id}', + 'rp_sub-{subject_id}_task-MGT_run-{run_id}_bold.txt')] + + # Segmentation maps + templates += [join( + self.directories.output_dir, + 'preprocessing', '_run_id_{run_id}_subject_id_{subject_id}', + f'c{i}'+'sub-{subject_id}_T1w.nii')\ + for i in range(1,7)] + + templates += [join( + self.directories.output_dir, + 'preprocessing', '_run_id_{run_id}_subject_id_{subject_id}', + f'wc{i}'+'sub-{subject_id}_T1w.nii')\ + for i in range(1,7)] + + # Format with subject_ids + return_list = [] + for template in templates: + return_list += [template.format(subject_id = s, run_id = r) for r in self.run_list for s in self.subject_list] + + return return_list + + + # [INFO] There was no run level analysis for the pipelines using SPM + def get_run_level_analysis(self): + """ Return a Nipype workflow describing the run level analysis part of the pipeline """ + return None + + # [INFO] This function is used in the subject level analysis pipelines using SPM + # [TODO] Adapt this example to your specific pipeline + def get_subject_infos(event_files: list, runs: list): + """ + -MGT task (taken from .tsv files, duration = 4) with canonical HRF (no derivatives) + -Parametric modulator gain (from "gain" column in event .tsv file) + - Parametric modulator loss (from "loss" column in event .tsv file) + -highpass DCT filtering in SPM (using default period of 1/128 s) + -6 movement regressors from realignment + + Create Bunchs for specifySPMModel. + + Parameters : + - event_files: list of events files (one per run) for the subject + - runs: list of runs to use + + Returns : + - subject_info : list of Bunch for 1st level analysis. + """ + from nipype.interfaces.base import Bunch + + condition_names = ['trial'] + onset = {} + duration = {} + weights_gain = {} + weights_loss = {} + + # Loop over number of runs + for run_id in range(len(runs)): + + # Create dictionary items with empty lists + onset.update({s + '_run' + str(run_id + 1): [] for s in condition_names}) + duration.update({s + '_run' + str(run_id + 1): [] for s in condition_names}) + weights_gain.update({'gain_run' + str(run_id + 1): []}) + weights_loss.update({'loss_run' + str(run_id + 1): []}) + + with open(event_files[run_id], 'rt') as event_file: + next(event_file) # skip the header + + for line in event_file: + info = line.strip().split() + + for condition in condition_names: + val = condition + '_run' + str(run_id + 1) # trial_run1 or accepting_run1 + val_gain = 'gain_run' + str(run_id + 1) # gain_run1 + val_loss = 'loss_run' + str(run_id + 1) # loss_run1 + if condition == 'trial': + onset[val].append(float(info[0])) # onsets for trial_run1 + duration[val].append(float(4)) + weights_gain[val_gain].append(float(info[2])) + weights_loss[val_loss].append(float(info[3])) + + # Bunching is done per run, i.e. trial_run1, trial_run2, etc. + # But names must not have '_run1' etc because we concatenate runs + subject_info = [] + for run_id in range(len(runs)): + + conditions = [s + '_run' + str(run_id + 1) for s in condition_names] + gain = 'gain_run' + str(run_id + 1) + loss = 'loss_run' + str(run_id + 1) + + subject_info.insert( + run_id, + Bunch( + conditions = condition_names, + onsets = [onset[c] for c in conditions], + durations = [duration[c] for c in conditions], + amplitudes = None, + tmod = None, + pmod = [ + Bunch( + name = ['gain', 'loss'], + poly = [1, 1], + param = [weights_gain[gain], weights_loss[loss]], + ), + None, + ], + regressor_names = None, + regressors = None, + ), + ) + + return subject_info + + # [INFO] This function creates the contrasts that will be analyzed in the first level analysis + # [TODO] Adapt this example to your specific pipeline + def get_contrasts(): + """ + Create the list of tuples that represents contrasts. + Each contrast is in the form : + (Name,Stat,[list of condition names],[weights on those conditions]) + + Returns: + - contrasts: list of tuples, list of contrasts to analyze + """ + # List of condition names + conditions = ['trial', 'trialxgain^1', 'trialxloss^1'] + + # Create contrasts + trial = ('trial', 'T', conditions, [1, 0, 0]) + effect_gain = ('effect_of_gain', 'T', conditions, [0, 1, 0]) + neg_effect_gain = ('neg_effect_of_gain', 'T', conditions, [0, -1, 0]) + effect_loss = ('effect_of_loss', 'T', conditions, [0, 0, 1]) + neg_effect_loss = ('neg_effect_of_loss', 'T', conditions, [0, 0, -1]) + + contrasts = [trial, effect_gain, effect_loss] + + return contrasts + + def get_subject_level_analysis(self): + """ Return a Nipype workflow describing the subject level analysis part of the pipeline """ + + # [INFO] The following part stays the same for all pipelines + + # Infosource Node - To iterate on subjects + info_source = Node( + IdentityInterface( + fields = ['subject_id', 'dataset_dir', 'results_dir', 'working_dir', 'run_list'], + dataset_dir = self.directories.dataset_dir, + results_dir = self.directories.results_dir, + working_dir = self.directories.working_dir, + run_list = self.run_list + ), + name='info_source', + ) + info_source.iterables = [('subject_id', self.subject_list)] + + # Templates to select files node + # [TODO] Change the name of the files depending on the filenames of results of preprocessing + templates = { + 'func': join( + self.directories.results_dir, + 'preprocess', + '_run_id_*_subject_id_{subject_id}', + 'complete_filename_{subject_id}_complete_filename.nii', + ), + 'event': join( + self.directories.dataset_dir, + 'sub-{subject_id}', + 'func', + 'sub-{subject_id}_task-MGT_run-*_events.tsv', + ), + 'parameters': join( + self.directories.results_dir, + 'preprocess', + '_run_id_*_subject_id_{subject_id}', + 'complete_filename_{subject_id}_complete_filename.txt', + ) + + } + + # SelectFiles node - to select necessary files + select_files = Node( + SelectFiles(templates, base_directory = self.directories.dataset_dir), + name = 'select_files' + ) + + # DataSink Node - store the wanted results in the wanted repository + data_sink = Node( + DataSink(base_directory = self.directories.output_dir), + name = 'data_sink' + ) + + # [INFO] This is the node executing the get_subject_infos_spm function + # Subject Infos node - get subject specific condition information + subject_infos = Node( + Function( + input_names = ['event_files', 'runs'], + output_names = ['subject_info'], + function = self.get_subject_infos, + ), + name = 'subject_infos', + ) + subject_infos.inputs.runs = self.run_list + + # [INFO] This is the node executing the get_contrasts function + # Contrasts node - to get contrasts + contrasts = Node( + Function( + output_names = ['contrasts'], + function = self.get_contrasts, + ), + name = 'contrasts', + ) + + # [INFO] The following part has to be modified with nodes of the pipeline + + # [TODO] For each node, replace 'node_name' by an explicit name, and use it for both: + # - the name of the variable in which you store the Node object + # - the 'name' attribute of the Node + # [TODO] The node_function refers to a NiPype interface that you must import + # at the beginning of the file. + # SpecifyModel - generates SPM-specific Model + specify_model = Node( + SpecifySPMModel( + concatenate_runs = True, + input_units = 'secs', + output_units = 'secs', + time_repetition = self.tr, + high_pass_filter_cutoff = 128), + name = 'specify_model' + ) + + # Level1Design - generates an SPM design matrix + l1_design = Node( + Level1Design( + bases = {'hrf': {'derivs': [0, 0]}}, + timing_units = 'secs', + interscan_interval = self.tr), + name = 'l1_design' + ) + + # EstimateModel - estimate the parameters of the model + l1_estimate = Node( + EstimateModel( + estimation_method = {'Classical': 1}), + name = 'l1_estimate' + ) + + # EstimateContrast - estimates contrasts + contrast_estimate = Node( + EstimateContrast(), + name = 'contrast_estimate' + ) + + # [INFO] The following part defines the nipype workflow and the connections between nodes + + subject_level_analysis = Workflow( + base_dir = self.directories.working_dir, + name = 'subject_level_analysis' + ) + # [TODO] Add the connections the workflow needs + # [INFO] Input and output names can be found on NiPype documentation + subject_level_analysis.connect([ + ( + info_source, + select_files, + [('subject_id', 'subject_id')] + ), + ( + select_files, + subject_infos, + [('event', 'event_files')] + ), + ( + subject_infos, + specify_model, + [('subject_info', 'subject_info')] + ), + ( + contrasts, + contrast_estimate, + [('contrasts', 'contrasts')] + ), + ( + select_files, + specify_model, + [('func', 'functional_runs'), ('parameters', 'realignment_parameters')] + ), + ( + specify_model, + l1_design, + [('session_info', 'session_info')] + ), + ( + l1_design, + l1_estimate, + [('spm_mat_file', 'spm_mat_file')] + ), + ( + l1_estimate, + contrast_estimate, + [('spm_mat_file', 'spm_mat_file'), + ('beta_images', 'beta_images'), + ('residual_image', 'residual_image')] + ), + ( + contrast_estimate, + datasink, + [('con_images', 'l1_analysis.@con_images'), + ('spmT_images', 'l1_analysis.@spmT_images'), + ('spm_mat_file', 'l1_analysis.@spm_mat_file')] + ), + ]) + + # [INFO] Here we simply return the created workflow + return subject_level_analysis + + def get_subject_level_outputs(self): + """ Return the names of the files the subject level analysis is supposed to generate. """ + + # Contrat maps + templates = [join( + self.directories.output_dir, + 'l1_analysis', '_subject_id_{subject_id}', f'con_{contrast_id}.nii')\ + for contrast_id in self.contrast_list] + + # SPM.mat file + templates += [join( + self.directories.output_dir, + 'l1_analysis', '_subject_id_{subject_id}', 'SPM.mat')] + + # spmT maps + templates += [join( + self.directories.output_dir, + 'l1_analysis', '_subject_id_{subject_id}', f'spmT_{contrast_id}.nii')\ + for contrast_id in self.contrast_list] + + # Format with subject_ids + return_list = [] + for template in templates: + return_list += [template.format(subject_id = s) for s in self.subject_list] + + return return_list + + # [INFO] This function returns the list of ids and files of each group of participants + # to do analyses for both groups, and one between the two groups. + def get_subset_contrasts( + file_list, subject_list: list, participants_file: str + ): + """ + This function return the file list containing only the files belonging + to the subjects in the wanted group. + + Parameters : + - file_list : original file list selected by selectfiles node + - subject_list : list of subject IDs that are in the wanted group for the analysis + - participants_file: str, file containing participants characteristics + + Returns : + - equal_indifference_id : a list of subject ids in the equalIndifference group + - equal_range_id : a list of subject ids in the equalRange group + - equal_indifference_files : a subset of file_list corresponding to subjects + in the equalIndifference group + - equal_range_files : a subset of file_list corresponding to subjects + in the equalRange group + """ + equal_indifference_id = [] + equal_range_id = [] + equal_indifference_files = [] + equal_range_files = [] + + # Reading file containing participants IDs and groups + with open(participants_file, 'rt') as file: + next(file) # skip the header + for line in file: + info = line.strip().split() + if info[0][-3:] in subject_list and info[1] == 'equalIndifference': + equal_indifference_id.append(info[0][-3:]) + elif info[0][-3:] in subject_list and info[1] == 'equalRange': + equal_range_id.append(info[0][-3:]) + + for file in file_list: + sub_id = file.split('/') + if sub_id[-2][-3:] in equal_indifference_id: + equal_indifference_files.append(file) + elif sub_id[-2][-3:] in equal_range_id: + equal_range_files.append(file) + + return equal_indifference_id, equal_range_id, equal_indifference_files, equal_range_files + + def get_group_level_analysis(self): + """ + Return all workflows for the group level analysis. + + Returns; + - a list of nipype.WorkFlow + """ + + methods = ['equalRange', 'equalIndifference', 'groupComp'] + return [self.get_group_level_analysis_sub_workflow(method) for method in methods] + + def get_group_level_analysis_sub_workflow(self, method): + """ + Return a workflow for the group level analysis. + + Parameters: + - method: one of 'equalRange', 'equalIndifference' or 'groupComp' + + Returns: + - group_level_analysis: nipype.WorkFlow + """ + # [INFO] The following part stays the same for all preprocessing pipelines + + # Infosource node - iterate over the list of contrasts generated + # by the subject level analysis + info_source = Node( + IdentityInterface( + fields = ['contrast_id', 'subjects'], + subjects = self.subject_list + ), + name = 'info_source', + ) + info_source.iterables = [('contrast_id', self.contrast_list)] + + # Templates to select files node + # [TODO] Change the name of the files depending on the filenames + # of results of first level analysis + templates = { + 'contrast': join( + self.directories.results_dir, + 'subject_level_analysis', + '_subject_id_*', + 'complete_filename_{contrast_id}_complete_filename.nii', + ), + 'participants': join( + self.directories.dataset_dir, + 'participants.tsv' + ) + } + select_files = Node( + SelectFiles( + templates, + base_directory = self.directories.results_dir, + force_list = True + ), + name = 'select_files', + ) + + # Datasink node - to save important files + data_sink = Node( + DataSink(base_directory = self.directories.output_dir), + name = 'data_sink', + ) + + # Contrasts node - to select subset of contrasts + sub_contrasts = Node( + Function( + input_names = ['file_list', 'method', 'subject_list', 'participants_file'], + output_names = [ + 'equalIndifference_id', + 'equalRange_id', + 'equalIndifference_files', + 'equalRange_files', + ], + function = self.get_subset_contrasts, + ), + name = 'sub_contrasts', + ) + sub_contrasts.inputs.method = method + + # [INFO] The following part has to be modified with nodes of the pipeline + + # [TODO] For each node, replace 'node_name' by an explicit name, and use it for both: + # - the name of the variable in which you store the Node object + # - the 'name' attribute of the Node + # [TODO] The node_function refers to a NiPype interface that you must import + # at the beginning of the file. + node_name = Node( + node_function, + name = 'node_name' + ) + + # [INFO] The following part defines the nipype workflow and the connections between nodes + + # Compute the number of participants used to do the analysis + nb_subjects = len(self.subject_list) + + # Declare the workflow + group_level_analysis = Workflow( + base_dir = self.directories.working_dir, + name = f'group_level_analysis_{method}_nsub_{nb_subjects}' + ) + group_level_analysis.connect( + [ + ( + info_source, + select_files, + [('contrast_id', 'contrast_id')], + ), + (info_source, sub_contrasts, [('subjects', 'subject_list')]), + ( + select_files, + sub_contrasts, + [('contrast', 'file_list'), ('participants', 'participants_file')], + ), # Complete with other links between nodes + ] + ) + + # [INFO] Here we define the contrasts used for the group level analysis, depending on the + # method used. + if method in ('equalRange', 'equalIndifference'): + contrasts = [('Group', 'T', ['mean'], [1]), ('Group', 'T', ['mean'], [-1])] + + elif method == 'groupComp': + contrasts = [ + ('Eq range vs Eq indiff in loss', 'T', ['Group_{1}', 'Group_{2}'], [1, -1]) + ] + + # [INFO] Here we simply return the created workflow + return group_level_analysis + + def get_hypotheses_outputs(self): + pass diff --git a/tests/pipelines/test_team_3C6G.py b/tests/pipelines/test_team_3C6G.py new file mode 100644 index 00000000..5917949c --- /dev/null +++ b/tests/pipelines/test_team_3C6G.py @@ -0,0 +1,79 @@ +#!/usr/bin/python +# coding: utf-8 + +""" Tests of the 'narps_open.pipelines.team_3C6G' module. + +Launch this test with PyTest + +Usage: +====== + pytest -q test_team_3C6G.py + pytest -q test_team_3C6G.py -k +""" + +from pytest import helpers, mark +from nipype import Workflow + +from narps_open.pipelines.team_3C6G import PipelineTeam3C6G + +class TestPipelinesTeam3C6G: + """ A class that contains all the unit tests for the PipelineTeam3C6G class.""" + + @staticmethod + @mark.unit_test + def test_create(): + """ Test the creation of a PipelineTeam3C6G object """ + + pipeline = PipelineTeam3C6G() + + # 1 - check the parameters + assert pipeline.fwhm == 6.0 + assert pipeline.team_id == '3C6G' + + # 2 - check workflows + assert isinstance(pipeline.get_preprocessing(), Workflow) + assert pipeline.get_run_level_analysis() is None + assert isinstance(pipeline.get_subject_level_analysis(), Workflow) + group_level = pipeline.get_group_level_analysis() + + assert len(group_level) == 3 + for sub_workflow in group_level: + assert isinstance(sub_workflow, Workflow) + + @staticmethod + @mark.unit_test + def test_outputs(): + """ Test the expected outputs of a PipelineTeam3C6G object """ + pipeline = PipelineTeam3C6G() + # 1 - 1 subject - 1 run outputs + pipeline.subject_list = ['001'] + pipeline.run_list = ['01'] + assert len(pipeline.get_preprocessing_outputs()) == 14 + assert len(pipeline.get_run_level_outputs()) == 0 + #assert len(pipeline.get_subject_level_outputs()) == 7 + #assert len(pipeline.get_group_level_outputs()) == 63 + #assert len(pipeline.get_hypotheses_outputs()) == 18 + + # 2 - 1 subject - 4 runs outputs + pipeline.subject_list = ['001'] + pipeline.run_list = ['01', '02', '03', '04'] + assert len(pipeline.get_preprocessing_outputs()) == 56 + assert len(pipeline.get_run_level_outputs()) == 0 + #assert len(pipeline.get_subject_level_outputs()) == 7 + #assert len(pipeline.get_group_level_outputs()) == 63 + #assert len(pipeline.get_hypotheses_outputs()) == 18 + + # 2 - 4 subjects outputs + pipeline.subject_list = ['001', '002', '003', '004'] + pipeline.run_list = ['01', '02', '03', '04'] + assert len(pipeline.get_preprocessing_outputs()) == 224 + assert len(pipeline.get_run_level_outputs()) == 0 + #assert len(pipeline.get_subject_level_outputs()) == 28 + #assert len(pipeline.get_group_level_outputs()) == 63 + #assert len(pipeline.get_hypotheses_outputs()) == 18 + + @staticmethod + @mark.pipeline_test + def test_execution(): + """ Test the execution of a PipelineTeam3C6G and compare results """ + helpers.test_pipeline_evaluation('3C6G') From 742f967800e88abc3a202f5f8bd70ee529abd713 Mon Sep 17 00:00:00 2001 From: elodiegermani1 Date: Thu, 21 Sep 2023 14:24:42 -0400 Subject: [PATCH 02/24] [REPRO] correction of bugs for tests. --- narps_open/pipelines/team_3C6G.py | 11 +++++------ tests/pipelines/test_team_3C6G.py | 6 +++--- 2 files changed, 8 insertions(+), 9 deletions(-) diff --git a/narps_open/pipelines/team_3C6G.py b/narps_open/pipelines/team_3C6G.py index 210d468a..46ecae08 100644 --- a/narps_open/pipelines/team_3C6G.py +++ b/narps_open/pipelines/team_3C6G.py @@ -451,9 +451,9 @@ def get_subject_level_analysis(self): templates = { 'func': join( self.directories.results_dir, - 'preprocess', + 'preprocessing', '_run_id_*_subject_id_{subject_id}', - 'complete_filename_{subject_id}_complete_filename.nii', + 'swrrsub-{subject_id}_task-MGT_run-*_bold.nii', ), 'event': join( self.directories.dataset_dir, @@ -463,11 +463,10 @@ def get_subject_level_analysis(self): ), 'parameters': join( self.directories.results_dir, - 'preprocess', + 'preprocessing', '_run_id_*_subject_id_{subject_id}', - 'complete_filename_{subject_id}_complete_filename.txt', + 'rp_sub-{subject_id}_task-MGT_run-*_bold.txt', ) - } # SelectFiles node - to select necessary files @@ -597,7 +596,7 @@ def get_subject_level_analysis(self): ), ( contrast_estimate, - datasink, + data_sink, [('con_images', 'l1_analysis.@con_images'), ('spmT_images', 'l1_analysis.@spmT_images'), ('spm_mat_file', 'l1_analysis.@spm_mat_file')] diff --git a/tests/pipelines/test_team_3C6G.py b/tests/pipelines/test_team_3C6G.py index 5917949c..db3f268d 100644 --- a/tests/pipelines/test_team_3C6G.py +++ b/tests/pipelines/test_team_3C6G.py @@ -50,7 +50,7 @@ def test_outputs(): pipeline.run_list = ['01'] assert len(pipeline.get_preprocessing_outputs()) == 14 assert len(pipeline.get_run_level_outputs()) == 0 - #assert len(pipeline.get_subject_level_outputs()) == 7 + assert len(pipeline.get_subject_level_outputs()) == 7 #assert len(pipeline.get_group_level_outputs()) == 63 #assert len(pipeline.get_hypotheses_outputs()) == 18 @@ -59,7 +59,7 @@ def test_outputs(): pipeline.run_list = ['01', '02', '03', '04'] assert len(pipeline.get_preprocessing_outputs()) == 56 assert len(pipeline.get_run_level_outputs()) == 0 - #assert len(pipeline.get_subject_level_outputs()) == 7 + assert len(pipeline.get_subject_level_outputs()) == 7 #assert len(pipeline.get_group_level_outputs()) == 63 #assert len(pipeline.get_hypotheses_outputs()) == 18 @@ -68,7 +68,7 @@ def test_outputs(): pipeline.run_list = ['01', '02', '03', '04'] assert len(pipeline.get_preprocessing_outputs()) == 224 assert len(pipeline.get_run_level_outputs()) == 0 - #assert len(pipeline.get_subject_level_outputs()) == 28 + assert len(pipeline.get_subject_level_outputs()) == 28 #assert len(pipeline.get_group_level_outputs()) == 63 #assert len(pipeline.get_hypotheses_outputs()) == 18 From 0e5b38493a7c3d1b2735af62b93a6ae1ad385bd6 Mon Sep 17 00:00:00 2001 From: elodiegermani1 Date: Thu, 21 Sep 2023 14:54:01 -0400 Subject: [PATCH 03/24] [REPRO] Add group-level pipeline. Implement tests for outputs. --- narps_open/pipelines/team_3C6G.py | 305 ++++++++++++++++++++---------- tests/pipelines/test_team_3C6G.py | 18 +- 2 files changed, 215 insertions(+), 108 deletions(-) diff --git a/narps_open/pipelines/team_3C6G.py b/narps_open/pipelines/team_3C6G.py index 46ecae08..071ad80c 100644 --- a/narps_open/pipelines/team_3C6G.py +++ b/narps_open/pipelines/team_3C6G.py @@ -47,7 +47,7 @@ def __init__(self): self.fwhm = 6.0 self.team_id = '3C6G' - self.contrast_list = ['0001', '0002', '0003'] + self.contrast_list = ['0001', '0002', '0003', '0004', '0005'] def get_vox_dims(volume : list) -> list: ''' @@ -424,7 +424,7 @@ def get_contrasts(): effect_loss = ('effect_of_loss', 'T', conditions, [0, 0, 1]) neg_effect_loss = ('neg_effect_of_loss', 'T', conditions, [0, 0, -1]) - contrasts = [trial, effect_gain, effect_loss] + contrasts = [trial, effect_gain, effect_loss, neg_effect_gain, neg_effect_loss] return contrasts @@ -526,7 +526,8 @@ def get_subject_level_analysis(self): Level1Design( bases = {'hrf': {'derivs': [0, 0]}}, timing_units = 'secs', - interscan_interval = self.tr), + interscan_interval = self.tr, + model_serial_correlations='AR(1)'), name = 'l1_design' ) @@ -700,115 +701,221 @@ def get_group_level_analysis_sub_workflow(self, method): Returns: - group_level_analysis: nipype.WorkFlow """ - # [INFO] The following part stays the same for all preprocessing pipelines + # Compute the number of participants used to do the analysis + nb_subjects = len(self.subject_list) - # Infosource node - iterate over the list of contrasts generated - # by the subject level analysis - info_source = Node( + # Infosource - iterate over the list of contrasts + infosource_groupanalysis = Node( IdentityInterface( - fields = ['contrast_id', 'subjects'], - subjects = self.subject_list - ), - name = 'info_source', - ) - info_source.iterables = [('contrast_id', self.contrast_list)] + fields = ['contrast_id', 'subjects']), + name = 'infosource_groupanalysis') + infosource_groupanalysis.iterables = [('contrast_id', self.contrast_list)] - # Templates to select files node - # [TODO] Change the name of the files depending on the filenames - # of results of first level analysis + # SelectFiles templates = { - 'contrast': join( - self.directories.results_dir, - 'subject_level_analysis', - '_subject_id_*', - 'complete_filename_{contrast_id}_complete_filename.nii', + # Contrast for all participants + 'contrast' : join(self.directories.output_dir, + 'l1_analysis', '_subject_id_*', 'con_{contrast_id}.nii'), + # Participants file + 'participants' : join(self.directories.dataset_dir, 'participants.tsv') + } + + selectfiles_groupanalysis = Node(SelectFiles( + templates, base_directory = self.directories.results_dir, force_list = True), + name = 'selectfiles_groupanalysis') + + # Datasink - save important files + datasink_groupanalysis = Node(DataSink( + base_directory = str(self.directories.output_dir) ), - 'participants': join( - self.directories.dataset_dir, - 'participants.tsv' - ) - } - select_files = Node( - SelectFiles( - templates, - base_directory = self.directories.results_dir, - force_list = True - ), - name = 'select_files', - ) + name = 'datasink_groupanalysis') + + # Function node get_subset_contrasts - select subset of contrasts + sub_contrasts = Node(Function( + function = self.get_subset_contrasts, + input_names = ['file_list', 'subject_list', 'participants_file'], + output_names = [ + 'equalIndifference_id', + 'equalRange_id', + 'equalIndifference_files', + 'equalRange_files']), + name = 'sub_contrasts') + sub_contrasts.inputs.subject_list = self.subject_list + + # Estimate model + estimate_model = Node(EstimateModel( + estimation_method = {'Classical':1}), + name = 'estimate_model') + + # Estimate contrasts + estimate_contrast = Node(EstimateContrast( + group_contrast = True), + name = 'estimate_contrast') + + # Create thresholded maps + threshold = MapNode(Threshold( + height_threshold = 0.001, height_threshold_type = 'p-value', + extent_fdr_p_threshold = 0.05, + force_activation = True), + name = 'threshold', + iterfield = ['stat_image', 'contrast_index']) + + l2_analysis = Workflow( + base_dir = self.directories.working_dir, + name = f'l2_analysis_{method}_nsub_{nb_subjects}') + l2_analysis.connect([ + (infosource_groupanalysis, selectfiles_groupanalysis, [ + ('contrast_id', 'contrast_id')]), + (selectfiles_groupanalysis, sub_contrasts, [ + ('contrast', 'file_list'), + ('participants', 'participants_file')]), + (estimate_model, estimate_contrast, [('spm_mat_file', 'spm_mat_file'), + ('residual_image', 'residual_image'), + ('beta_images', 'beta_images')]), + (estimate_contrast, threshold, [('spm_mat_file', 'spm_mat_file'), + ('spmT_images', 'stat_image')]), + (estimate_model, datasink_groupanalysis, [ + ('mask_image', f'l2_analysis_{method}_nsub_{nb_subjects}.@mask')]), + (estimate_contrast, datasink_groupanalysis, [ + ('spm_mat_file', f'l2_analysis_{method}_nsub_{nb_subjects}.@spm_mat'), + ('spmT_images', f'l2_analysis_{method}_nsub_{nb_subjects}.@T'), + ('con_images', f'l2_analysis_{method}_nsub_{nb_subjects}.@con')]), + (threshold, datasink_groupanalysis, [ + ('thresholded_map', f'l2_analysis_{method}_nsub_{nb_subjects}.@thresh')])]) - # Datasink node - to save important files - data_sink = Node( - DataSink(base_directory = self.directories.output_dir), - name = 'data_sink', - ) + if method in ('equalRange', 'equalIndifference'): + contrasts = [('Group', 'T', ['mean'], [1]), ('Group', 'T', ['mean'], [-1])] - # Contrasts node - to select subset of contrasts - sub_contrasts = Node( - Function( - input_names = ['file_list', 'method', 'subject_list', 'participants_file'], - output_names = [ - 'equalIndifference_id', - 'equalRange_id', - 'equalIndifference_files', - 'equalRange_files', - ], - function = self.get_subset_contrasts, - ), - name = 'sub_contrasts', - ) - sub_contrasts.inputs.method = method + threshold.inputs.contrast_index = [1, 2] + threshold.synchronize = True - # [INFO] The following part has to be modified with nodes of the pipeline + ## Specify design matrix + one_sample_t_test_design = Node(OneSampleTTestDesign(), + name = 'one_sample_t_test_design') - # [TODO] For each node, replace 'node_name' by an explicit name, and use it for both: - # - the name of the variable in which you store the Node object - # - the 'name' attribute of the Node - # [TODO] The node_function refers to a NiPype interface that you must import - # at the beginning of the file. - node_name = Node( - node_function, - name = 'node_name' - ) + l2_analysis.connect([ + (sub_contrasts, one_sample_t_test_design, [(f'{method}_files', 'in_files')]), + (one_sample_t_test_design, estimate_model, [('spm_mat_file', 'spm_mat_file')])]) - # [INFO] The following part defines the nipype workflow and the connections between nodes - - # Compute the number of participants used to do the analysis - nb_subjects = len(self.subject_list) + elif method == 'groupComp': + contrasts = [ + ('Eq range vs Eq indiff in loss', 'T', ['Group_{1}', 'Group_{2}'], [-1, 1])] + + threshold.inputs.contrast_index = [1] + threshold.synchronize = True + + # Node for the design matrix + two_sample_t_test_design = Node(TwoSampleTTestDesign(), + name = 'two_sample_t_test_design') + + l2_analysis.connect([ + (sub_contrasts, two_sample_t_test_design, [ + ('equalRange_files', 'group1_files'), + ('equalIndifference_files', 'group2_files')]), + (two_sample_t_test_design, estimate_model, [ + ('spm_mat_file', 'spm_mat_file')]) + ]) + + estimate_contrast.inputs.contrasts = contrasts + + return l2_analysis + + def get_group_level_outputs(self): + """ Return all names for the files the group level analysis is supposed to generate. """ + + # Handle equalRange and equalIndifference + parameters = { + 'contrast_id': self.contrast_list, + 'method': ['equalRange', 'equalIndifference'], + 'file': [ + 'con_0001.nii', 'con_0002.nii', 'mask.nii', 'SPM.mat', + 'spmT_0001.nii', 'spmT_0002.nii', + join('_threshold0', 'spmT_0001_thr.nii'), join('_threshold1', 'spmT_0002_thr.nii') + ], + 'nb_subjects' : [str(len(self.subject_list))] + } + parameter_sets = product(*parameters.values()) + template = join( + self.directories.output_dir, + 'l2_analysis_{method}_nsub_{nb_subjects}', + '_contrast_id_{contrast_id}', + '{file}' + ) - # Declare the workflow - group_level_analysis = Workflow( - base_dir = self.directories.working_dir, - name = f'group_level_analysis_{method}_nsub_{nb_subjects}' - ) - group_level_analysis.connect( - [ - ( - info_source, - select_files, - [('contrast_id', 'contrast_id')], - ), - (info_source, sub_contrasts, [('subjects', 'subject_list')]), - ( - select_files, - sub_contrasts, - [('contrast', 'file_list'), ('participants', 'participants_file')], - ), # Complete with other links between nodes - ] - ) + return_list = [template.format(**dict(zip(parameters.keys(), parameter_values)))\ + for parameter_values in parameter_sets] - # [INFO] Here we define the contrasts used for the group level analysis, depending on the - # method used. - if method in ('equalRange', 'equalIndifference'): - contrasts = [('Group', 'T', ['mean'], [1]), ('Group', 'T', ['mean'], [-1])] + # Handle groupComp + parameters = { + 'contrast_id': self.contrast_list, + 'method': ['groupComp'], + 'file': [ + 'con_0001.nii', 'mask.nii', 'SPM.mat', 'spmT_0001.nii', + join('_threshold0', 'spmT_0001_thr.nii') + ], + 'nb_subjects' : [str(len(self.subject_list))] + } + parameter_sets = product(*parameters.values()) + template = join( + self.directories.output_dir, + 'l2_analysis_{method}_nsub_{nb_subjects}', + '_contrast_id_{contrast_id}', + '{file}' + ) - elif method == 'groupComp': - contrasts = [ - ('Eq range vs Eq indiff in loss', 'T', ['Group_{1}', 'Group_{2}'], [1, -1]) - ] + return_list += [template.format(**dict(zip(parameters.keys(), parameter_values)))\ + for parameter_values in parameter_sets] - # [INFO] Here we simply return the created workflow - return group_level_analysis + return return_list def get_hypotheses_outputs(self): - pass + """ Return all hypotheses output file names. """ + nb_sub = len(self.subject_list) + files = [ + # Hypothesis 1 + join(f'l2_analysis_equalIndifference_nsub_{nb_sub}', + '_contrast_id_0002', '_threshold0', 'spmT_0001_thr.nii'), + join(f'l2_analysis_equalIndifference_nsub_{nb_sub}', + '_contrast_id_0002', 'spmT_0001.nii'), + # Hypothesis 2 + join(f'l2_analysis_equalRange_nsub_{nb_sub}', + '_contrast_id_0002', '_threshold0', 'spmT_0001_thr.nii'), + join(f'l2_analysis_equalRange_nsub_{nb_sub}', + '_contrast_id_0002', 'spmT_0001.nii'), + # Hypothesis 3 + join(f'l2_analysis_equalIndifference_nsub_{nb_sub}', + '_contrast_id_0002', '_threshold0', 'spmT_0001_thr.nii'), + join(f'l2_analysis_equalIndifference_nsub_{nb_sub}', + '_contrast_id_0002', 'spmT_0001.nii'), + # Hypothesis 4 + join(f'l2_analysis_equalRange_nsub_{nb_sub}', + '_contrast_id_0002', '_threshold0', 'spmT_0001_thr.nii'), + join(f'l2_analysis_equalRange_nsub_{nb_sub}', + '_contrast_id_0002', 'spmT_0001.nii'), + # Hypothesis 5 + join(f'l2_analysis_equalIndifference_nsub_{nb_sub}', + '_contrast_id_0005', '_threshold0', 'spmT_0001_thr.nii'), + join(f'l2_analysis_equalIndifference_nsub_{nb_sub}', + '_contrast_id_0005', 'spmT_0001.nii'), + # Hypothesis 6 + join(f'l2_analysis_equalRange_nsub_{nb_sub}', + '_contrast_id_0005', '_threshold0', 'spmT_0001_thr.nii'), + join(f'l2_analysis_equalRange_nsub_{nb_sub}', + '_contrast_id_0005', 'spmT_0001.nii'), + # Hypothesis 7 + join(f'l2_analysis_equalIndifference_nsub_{nb_sub}', + '_contrast_id_0003', '_threshold0', 'spmT_0001_thr.nii'), + join(f'l2_analysis_equalIndifference_nsub_{nb_sub}', + '_contrast_id_0003', 'spmT_0001.nii'), + # Hypothesis 8 + join(f'l2_analysis_equalRange_nsub_{nb_sub}', + '_contrast_id_0003', '_threshold0', 'spmT_0001_thr.nii'), + join(f'l2_analysis_equalRange_nsub_{nb_sub}', + '_contrast_id_0003', 'spmT_0001.nii'), + # Hypothesis 9 + join(f'l2_analysis_groupComp_nsub_{nb_sub}', + '_contrast_id_0003', '_threshold0', 'spmT_0001_thr.nii'), + join(f'l2_analysis_groupComp_nsub_{nb_sub}', + '_contrast_id_0003', 'spmT_0001.nii') + ] + return [join(self.directories.output_dir, f) for f in files] diff --git a/tests/pipelines/test_team_3C6G.py b/tests/pipelines/test_team_3C6G.py index db3f268d..9ecc554e 100644 --- a/tests/pipelines/test_team_3C6G.py +++ b/tests/pipelines/test_team_3C6G.py @@ -50,27 +50,27 @@ def test_outputs(): pipeline.run_list = ['01'] assert len(pipeline.get_preprocessing_outputs()) == 14 assert len(pipeline.get_run_level_outputs()) == 0 - assert len(pipeline.get_subject_level_outputs()) == 7 - #assert len(pipeline.get_group_level_outputs()) == 63 - #assert len(pipeline.get_hypotheses_outputs()) == 18 + assert len(pipeline.get_subject_level_outputs()) == 11 + assert len(pipeline.get_group_level_outputs()) == 105 + assert len(pipeline.get_hypotheses_outputs()) == 18 # 2 - 1 subject - 4 runs outputs pipeline.subject_list = ['001'] pipeline.run_list = ['01', '02', '03', '04'] assert len(pipeline.get_preprocessing_outputs()) == 56 assert len(pipeline.get_run_level_outputs()) == 0 - assert len(pipeline.get_subject_level_outputs()) == 7 - #assert len(pipeline.get_group_level_outputs()) == 63 - #assert len(pipeline.get_hypotheses_outputs()) == 18 + assert len(pipeline.get_subject_level_outputs()) == 11 + assert len(pipeline.get_group_level_outputs()) == 63 + assert len(pipeline.get_hypotheses_outputs()) == 18 # 2 - 4 subjects outputs pipeline.subject_list = ['001', '002', '003', '004'] pipeline.run_list = ['01', '02', '03', '04'] assert len(pipeline.get_preprocessing_outputs()) == 224 assert len(pipeline.get_run_level_outputs()) == 0 - assert len(pipeline.get_subject_level_outputs()) == 28 - #assert len(pipeline.get_group_level_outputs()) == 63 - #assert len(pipeline.get_hypotheses_outputs()) == 18 + assert len(pipeline.get_subject_level_outputs()) == 44 + assert len(pipeline.get_group_level_outputs()) == 105 + assert len(pipeline.get_hypotheses_outputs()) == 18 @staticmethod @mark.pipeline_test From 6de8fe4d762290a2145d6df092544790e813f6ba Mon Sep 17 00:00:00 2001 From: elodiegermani1 Date: Thu, 21 Sep 2023 14:59:26 -0400 Subject: [PATCH 04/24] [REPRO] Fix bug imports bugs --- narps_open/pipelines/team_3C6G.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/narps_open/pipelines/team_3C6G.py b/narps_open/pipelines/team_3C6G.py index 071ad80c..dc134001 100644 --- a/narps_open/pipelines/team_3C6G.py +++ b/narps_open/pipelines/team_3C6G.py @@ -13,9 +13,10 @@ # [TODO] Only import modules you use further in te code, remove others from the import section from os.path import join +from itertools import product # [INFO] The import of base objects from Nipype, to create Workflows -from nipype import Node, Workflow # , JoinNode, MapNode +from nipype import Node, Workflow, MapNode # [INFO] a list of interfaces used to manpulate data from nipype.interfaces.utility import IdentityInterface, Function From 822d3e9f37023dfdfc314a146e1d23cda6cee3fe Mon Sep 17 00:00:00 2001 From: elodiegermani1 Date: Thu, 21 Sep 2023 15:04:41 -0400 Subject: [PATCH 05/24] [REPRO] Errors in test file. --- tests/pipelines/test_team_3C6G.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/pipelines/test_team_3C6G.py b/tests/pipelines/test_team_3C6G.py index 9ecc554e..fe0ee0f5 100644 --- a/tests/pipelines/test_team_3C6G.py +++ b/tests/pipelines/test_team_3C6G.py @@ -60,7 +60,7 @@ def test_outputs(): assert len(pipeline.get_preprocessing_outputs()) == 56 assert len(pipeline.get_run_level_outputs()) == 0 assert len(pipeline.get_subject_level_outputs()) == 11 - assert len(pipeline.get_group_level_outputs()) == 63 + assert len(pipeline.get_group_level_outputs()) == 105 assert len(pipeline.get_hypotheses_outputs()) == 18 # 2 - 4 subjects outputs From 934f4559c6b57dc33fc5372420c1207d87236f82 Mon Sep 17 00:00:00 2001 From: elodiegermani1 Date: Thu, 21 Sep 2023 15:09:41 -0400 Subject: [PATCH 06/24] [REPRO] Add 3C6G to pipeline reproduced. --- narps_open/pipelines/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/narps_open/pipelines/__init__.py b/narps_open/pipelines/__init__.py index c3834fb6..4af20d7d 100644 --- a/narps_open/pipelines/__init__.py +++ b/narps_open/pipelines/__init__.py @@ -21,7 +21,7 @@ '27SS': None, '2T6S': 'PipelineTeam2T6S', '2T7P': None, - '3C6G': None, + '3C6G': 'PipelineTeam3C6G', '3PQ2': None, '3TR7': None, '43FJ': None, From a83379e1edd77f16162ad7e65a4fd4bb98928cd1 Mon Sep 17 00:00:00 2001 From: elodiegermani1 Date: Thu, 21 Sep 2023 15:18:37 -0400 Subject: [PATCH 07/24] [REPRO] Remove pipeline 3C6G from the list of the reproduced pipeline. Check with Boris to see what needs to be modified to add to pass the pipeline_test --- narps_open/pipelines/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/narps_open/pipelines/__init__.py b/narps_open/pipelines/__init__.py index 4af20d7d..c3834fb6 100644 --- a/narps_open/pipelines/__init__.py +++ b/narps_open/pipelines/__init__.py @@ -21,7 +21,7 @@ '27SS': None, '2T6S': 'PipelineTeam2T6S', '2T7P': None, - '3C6G': 'PipelineTeam3C6G', + '3C6G': None, '3PQ2': None, '3TR7': None, '43FJ': None, From e1464778758a619ff2bebb9275678a603326f4b4 Mon Sep 17 00:00:00 2001 From: elodiegermani1 Date: Thu, 21 Sep 2023 16:15:19 -0400 Subject: [PATCH 08/24] [REPRO] Fix some bugs on the subject-level pipeline. --- narps_open/pipelines/team_3C6G.py | 226 +++++++++++++++--------------- 1 file changed, 114 insertions(+), 112 deletions(-) diff --git a/narps_open/pipelines/team_3C6G.py b/narps_open/pipelines/team_3C6G.py index dc134001..7d3db658 100644 --- a/narps_open/pipelines/team_3C6G.py +++ b/narps_open/pipelines/team_3C6G.py @@ -37,6 +37,7 @@ # [INFO] In order to inherit from Pipeline from narps_open.pipelines import Pipeline + class PipelineTeam3C6G(Pipeline): """ A class that defines the pipeline of team 3C6G """ @@ -50,6 +51,116 @@ def __init__(self): self.team_id = '3C6G' self.contrast_list = ['0001', '0002', '0003', '0004', '0005'] + # [INFO] This function is used in the subject level analysis pipelines using SPM + # [TODO] Adapt this example to your specific pipeline + + def get_subject_infos(event_files: list, runs: list): + """ + -MGT task (taken from .tsv files, duration = 4) with canonical HRF (no derivatives) + -Parametric modulator gain (from "gain" column in event .tsv file) + - Parametric modulator loss (from "loss" column in event .tsv file) + -highpass DCT filtering in SPM (using default period of 1/128 s) + -6 movement regressors from realignment + + Create Bunchs for specifySPMModel. + + Parameters : + - event_files: list of events files (one per run) for the subject + - runs: list of runs to use + + Returns : + - subject_info : list of Bunch for 1st level analysis. + """ + from nipype.interfaces.base import Bunch + + condition_names = ['trial'] + onset = {} + duration = {} + weights_gain = {} + weights_loss = {} + + # Loop over number of runs + for run_id in range(len(runs)): + + # Create dictionary items with empty lists + onset.update({s + '_run' + str(run_id + 1): [] for s in condition_names}) + duration.update({s + '_run' + str(run_id + 1): [] for s in condition_names}) + weights_gain.update({'gain_run' + str(run_id + 1): []}) + weights_loss.update({'loss_run' + str(run_id + 1): []}) + + with open(event_files[run_id], 'rt') as event_file: + next(event_file) # skip the header + + for line in event_file: + info = line.strip().split() + + for condition in condition_names: + val = condition + '_run' + str(run_id + 1) # trial_run1 or accepting_run1 + val_gain = 'gain_run' + str(run_id + 1) # gain_run1 + val_loss = 'loss_run' + str(run_id + 1) # loss_run1 + if condition == 'trial': + onset[val].append(float(info[0])) # onsets for trial_run1 + duration[val].append(float(4)) + weights_gain[val_gain].append(float(info[2])) + weights_loss[val_loss].append(float(info[3])) + + # Bunching is done per run, i.e. trial_run1, trial_run2, etc. + # But names must not have '_run1' etc because we concatenate runs + subject_info = [] + for run_id in range(len(runs)): + + conditions = [s + '_run' + str(run_id + 1) for s in condition_names] + gain = 'gain_run' + str(run_id + 1) + loss = 'loss_run' + str(run_id + 1) + + subject_info.insert( + run_id, + Bunch( + conditions = condition_names, + onsets = [onset[c] for c in conditions], + durations = [duration[c] for c in conditions], + amplitudes = None, + tmod = None, + pmod = [ + Bunch( + name = ['gain', 'loss'], + poly = [1, 1], + param = [weights_gain[gain], weights_loss[loss]], + ), + None, + ], + regressor_names = None, + regressors = None, + ), + ) + + return subject_info + + # [INFO] This function creates the contrasts that will be analyzed in the first level analysis + # [TODO] Adapt this example to your specific pipeline + def get_contrasts(): + """ + Create the list of tuples that represents contrasts. + Each contrast is in the form : + (Name,Stat,[list of condition names],[weights on those conditions]) + + Returns: + - contrasts: list of tuples, list of contrasts to analyze + """ + # List of condition names + conditions = ['trial', 'trialxgain^1', 'trialxloss^1'] + + # Create contrasts + trial = ('trial', 'T', conditions, [1, 0, 0]) + effect_gain = ('effect_of_gain', 'T', conditions, [0, 1, 0]) + neg_effect_gain = ('neg_effect_of_gain', 'T', conditions, [0, -1, 0]) + effect_loss = ('effect_of_loss', 'T', conditions, [0, 0, 1]) + neg_effect_loss = ('neg_effect_of_loss', 'T', conditions, [0, 0, -1]) + + contrasts = [trial, effect_gain, effect_loss, neg_effect_gain, neg_effect_loss] + + return contrasts + def get_vox_dims(volume : list) -> list: ''' Function that gives the voxel dimension of an image. @@ -320,115 +431,6 @@ def get_run_level_analysis(self): """ Return a Nipype workflow describing the run level analysis part of the pipeline """ return None - # [INFO] This function is used in the subject level analysis pipelines using SPM - # [TODO] Adapt this example to your specific pipeline - def get_subject_infos(event_files: list, runs: list): - """ - -MGT task (taken from .tsv files, duration = 4) with canonical HRF (no derivatives) - -Parametric modulator gain (from "gain" column in event .tsv file) - - Parametric modulator loss (from "loss" column in event .tsv file) - -highpass DCT filtering in SPM (using default period of 1/128 s) - -6 movement regressors from realignment - - Create Bunchs for specifySPMModel. - - Parameters : - - event_files: list of events files (one per run) for the subject - - runs: list of runs to use - - Returns : - - subject_info : list of Bunch for 1st level analysis. - """ - from nipype.interfaces.base import Bunch - - condition_names = ['trial'] - onset = {} - duration = {} - weights_gain = {} - weights_loss = {} - - # Loop over number of runs - for run_id in range(len(runs)): - - # Create dictionary items with empty lists - onset.update({s + '_run' + str(run_id + 1): [] for s in condition_names}) - duration.update({s + '_run' + str(run_id + 1): [] for s in condition_names}) - weights_gain.update({'gain_run' + str(run_id + 1): []}) - weights_loss.update({'loss_run' + str(run_id + 1): []}) - - with open(event_files[run_id], 'rt') as event_file: - next(event_file) # skip the header - - for line in event_file: - info = line.strip().split() - - for condition in condition_names: - val = condition + '_run' + str(run_id + 1) # trial_run1 or accepting_run1 - val_gain = 'gain_run' + str(run_id + 1) # gain_run1 - val_loss = 'loss_run' + str(run_id + 1) # loss_run1 - if condition == 'trial': - onset[val].append(float(info[0])) # onsets for trial_run1 - duration[val].append(float(4)) - weights_gain[val_gain].append(float(info[2])) - weights_loss[val_loss].append(float(info[3])) - - # Bunching is done per run, i.e. trial_run1, trial_run2, etc. - # But names must not have '_run1' etc because we concatenate runs - subject_info = [] - for run_id in range(len(runs)): - - conditions = [s + '_run' + str(run_id + 1) for s in condition_names] - gain = 'gain_run' + str(run_id + 1) - loss = 'loss_run' + str(run_id + 1) - - subject_info.insert( - run_id, - Bunch( - conditions = condition_names, - onsets = [onset[c] for c in conditions], - durations = [duration[c] for c in conditions], - amplitudes = None, - tmod = None, - pmod = [ - Bunch( - name = ['gain', 'loss'], - poly = [1, 1], - param = [weights_gain[gain], weights_loss[loss]], - ), - None, - ], - regressor_names = None, - regressors = None, - ), - ) - - return subject_info - - # [INFO] This function creates the contrasts that will be analyzed in the first level analysis - # [TODO] Adapt this example to your specific pipeline - def get_contrasts(): - """ - Create the list of tuples that represents contrasts. - Each contrast is in the form : - (Name,Stat,[list of condition names],[weights on those conditions]) - - Returns: - - contrasts: list of tuples, list of contrasts to analyze - """ - # List of condition names - conditions = ['trial', 'trialxgain^1', 'trialxloss^1'] - - # Create contrasts - trial = ('trial', 'T', conditions, [1, 0, 0]) - effect_gain = ('effect_of_gain', 'T', conditions, [0, 1, 0]) - neg_effect_gain = ('neg_effect_of_gain', 'T', conditions, [0, -1, 0]) - effect_loss = ('effect_of_loss', 'T', conditions, [0, 0, 1]) - neg_effect_loss = ('neg_effect_of_loss', 'T', conditions, [0, 0, -1]) - - contrasts = [trial, effect_gain, effect_loss, neg_effect_gain, neg_effect_loss] - - return contrasts - def get_subject_level_analysis(self): """ Return a Nipype workflow describing the subject level analysis part of the pipeline """ @@ -451,7 +453,7 @@ def get_subject_level_analysis(self): # [TODO] Change the name of the files depending on the filenames of results of preprocessing templates = { 'func': join( - self.directories.results_dir, + self.directories.output_dir, 'preprocessing', '_run_id_*_subject_id_{subject_id}', 'swrrsub-{subject_id}_task-MGT_run-*_bold.nii', @@ -463,7 +465,7 @@ def get_subject_level_analysis(self): 'sub-{subject_id}_task-MGT_run-*_events.tsv', ), 'parameters': join( - self.directories.results_dir, + self.directories.output_dir, 'preprocessing', '_run_id_*_subject_id_{subject_id}', 'rp_sub-{subject_id}_task-MGT_run-*_bold.txt', @@ -472,7 +474,7 @@ def get_subject_level_analysis(self): # SelectFiles node - to select necessary files select_files = Node( - SelectFiles(templates, base_directory = self.directories.dataset_dir), + SelectFiles(templates, base_directory = self.directories.output_dir), name = 'select_files' ) From 8f85cf28c85cfc22677f68377c9c9d65d32f9a81 Mon Sep 17 00:00:00 2001 From: elodiegermani1 Date: Mon, 12 Feb 2024 10:41:00 +0100 Subject: [PATCH 09/24] [Test] --- narps_open/pipelines/team_3C6G.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/narps_open/pipelines/team_3C6G.py b/narps_open/pipelines/team_3C6G.py index 7d3db658..ccebc659 100644 --- a/narps_open/pipelines/team_3C6G.py +++ b/narps_open/pipelines/team_3C6G.py @@ -1,4 +1,4 @@ -#!/usr/bin/python +#!/usr/bin/python # coding: utf-8 """ From 91f1291247aa4bd013a51b25aa5b6b91a71eaf8b Mon Sep 17 00:00:00 2001 From: elodiegermani1 Date: Mon, 12 Feb 2024 10:48:16 +0100 Subject: [PATCH 10/24] Add team name to init.py --- narps_open/pipelines/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/narps_open/pipelines/__init__.py b/narps_open/pipelines/__init__.py index c3834fb6..4af20d7d 100644 --- a/narps_open/pipelines/__init__.py +++ b/narps_open/pipelines/__init__.py @@ -21,7 +21,7 @@ '27SS': None, '2T6S': 'PipelineTeam2T6S', '2T7P': None, - '3C6G': None, + '3C6G': 'PipelineTeam3C6G', '3PQ2': None, '3TR7': None, '43FJ': None, From 04d66caa5a9a24d9d5d0f93bac92c6182d56513a Mon Sep 17 00:00:00 2001 From: elodiegermani1 Date: Mon, 12 Feb 2024 11:02:30 +0100 Subject: [PATCH 11/24] [REPRO] Change path to SPM due to changes in versions --- narps_open/pipelines/team_3C6G.py | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/narps_open/pipelines/team_3C6G.py b/narps_open/pipelines/team_3C6G.py index ccebc659..b141188d 100644 --- a/narps_open/pipelines/team_3C6G.py +++ b/narps_open/pipelines/team_3C6G.py @@ -30,6 +30,7 @@ Level1Design, OneSampleTTestDesign, TwoSampleTTestDesign, EstimateModel, EstimateContrast, Threshold ) +from nipype.interfaces.spm.base import Info as SPMInfo from nipype.interfaces.fsl import ( ExtractROI ) @@ -265,12 +266,15 @@ def get_preprocessing(self): # 3 - Unified segmentation using tissue probability maps in SPM12. # Unified segmentation in SPM12 to MNI space (the MNI-space tissue probability maps used in segmentation) using default parameters. # Bias-field correction in the context of unified segmentation in SPM12. - tissue1 = [('/opt/spm12-r7771/spm12_mcr/spm12/tpm/TPM.nii', 1), 1, (True,False), (True, False)] - tissue2 = [('/opt/spm12-r7771/spm12_mcr/spm12/tpm/TPM.nii', 2), 1, (True,False), (True, False)] - tissue3 = [('/opt/spm12-r7771/spm12_mcr/spm12/tpm/TPM.nii', 3), 2, (True,False), (True, False)] - tissue4 = [('/opt/spm12-r7771/spm12_mcr/spm12/tpm/TPM.nii', 4), 3, (True,False), (True, False)] - tissue5 = [('/opt/spm12-r7771/spm12_mcr/spm12/tpm/TPM.nii', 5), 4, (True,False), (True, False)] - tissue6 = [('/opt/spm12-r7771/spm12_mcr/spm12/tpm/TPM.nii', 6), 2, (True,False), (True, False)] + # Get SPM Tissue Probability Maps file + spm_tissues_file = join(SPMInfo.getinfo()['path'], 'tpm', 'TPM.nii') + + tissue1 = [(spm_tissues_file, 1), 1, (True,False), (True, False)] + tissue2 = [(spm_tissues_file, 2), 1, (True,False), (True, False)] + tissue3 = [(spm_tissues_file, 3), 2, (True,False), (True, False)] + tissue4 = [(spm_tissues_file, 4), 3, (True,False), (True, False)] + tissue5 = [(spm_tissues_file, 5), 4, (True,False), (True, False)] + tissue6 = [(spm_tissues_file, 6), 2, (True,False), (True, False)] tissue_list = [tissue1, tissue2, tissue3, tissue4, tissue5, tissue6] segment = Node( From c31742e6d3c5fb8b2bb7d8a8d6b6520e3e007bbd Mon Sep 17 00:00:00 2001 From: elodiegermani1 Date: Mon, 12 Feb 2024 11:20:10 +0100 Subject: [PATCH 12/24] Change base directory for selectfiles --- narps_open/pipelines/team_3C6G.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/narps_open/pipelines/team_3C6G.py b/narps_open/pipelines/team_3C6G.py index b141188d..0aefd77f 100644 --- a/narps_open/pipelines/team_3C6G.py +++ b/narps_open/pipelines/team_3C6G.py @@ -478,7 +478,7 @@ def get_subject_level_analysis(self): # SelectFiles node - to select necessary files select_files = Node( - SelectFiles(templates, base_directory = self.directories.output_dir), + SelectFiles(templates, base_directory = self.directories.dataset_dir), name = 'select_files' ) @@ -728,7 +728,7 @@ def get_group_level_analysis_sub_workflow(self, method): } selectfiles_groupanalysis = Node(SelectFiles( - templates, base_directory = self.directories.results_dir, force_list = True), + templates, base_directory = self.directories.dataset_dir, force_list = True), name = 'selectfiles_groupanalysis') # Datasink - save important files From fce373b8602ac356c38d7d2858f495b04169cf0b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Fri, 13 Dec 2024 14:48:54 +0100 Subject: [PATCH 13/24] Refactoring preprocessing + first level [skip ci] --- narps_open/pipelines/team_3C6G.py | 598 +++++++++--------------------- 1 file changed, 168 insertions(+), 430 deletions(-) diff --git a/narps_open/pipelines/team_3C6G.py b/narps_open/pipelines/team_3C6G.py index 0aefd77f..c56487ff 100644 --- a/narps_open/pipelines/team_3C6G.py +++ b/narps_open/pipelines/team_3C6G.py @@ -1,29 +1,15 @@ #!/usr/bin/python # coding: utf-8 -""" -This template can be use to reproduce a pipeline using SPM as main software. - -- Replace all occurrences of 3C6G by the actual id of the team. -- All lines starting with [INFO], are meant to help you during the reproduction, these can be removed -eventually. -- Also remove lines starting with [TODO], once you did what they suggested. -""" - -# [TODO] Only import modules you use further in te code, remove others from the import section +""" Write the work of NARPS team 3C6G using Nipype """ from os.path import join from itertools import product -# [INFO] The import of base objects from Nipype, to create Workflows from nipype import Node, Workflow, MapNode - -# [INFO] a list of interfaces used to manpulate data from nipype.interfaces.utility import IdentityInterface, Function from nipype.interfaces.io import SelectFiles, DataSink from nipype.algorithms.misc import Gunzip - -# [INFO] a list of SPM-specific interfaces from nipype.algorithms.modelgen import SpecifySPMModel from nipype.interfaces.spm import ( Realign, Coregister, NewSegment, Normalize12, Smooth, @@ -35,27 +21,30 @@ ExtractROI ) -# [INFO] In order to inherit from Pipeline from narps_open.pipelines import Pipeline - +from narps_open.core.common import get_voxel_dimensions +from narps_open.data.task import TaskInformation class PipelineTeam3C6G(Pipeline): """ A class that defines the pipeline of team 3C6G """ def __init__(self): super().__init__() - # [INFO] Remove the init method completely if unused - # [TODO] Init the attributes of the pipeline, if any other than the ones defined - # in the pipeline class - self.fwhm = 6.0 self.team_id = '3C6G' self.contrast_list = ['0001', '0002', '0003', '0004', '0005'] - # [INFO] This function is used in the subject level analysis pipelines using SPM - # [TODO] Adapt this example to your specific pipeline + # Create contrasts + conditions = ['trial', 'trialxgain^1', 'trialxloss^1'] + self.subject_level_contrasts = [ + ['trial', 'T', conditions, [1, 0, 0]], + ['effect_of_gain', 'T', conditions, [0, 1, 0]], + ['neg_effect_of_gain', 'T', conditions, [0, -1, 0]], + ['effect_of_loss', 'T', conditions, [0, 0, 1]], + ['neg_effect_of_loss', 'T', conditions, [0, 0, -1]] + ] - def get_subject_infos(event_files: list, runs: list): + def get_subject_information(event_files: list, runs: list): """ -MGT task (taken from .tsv files, duration = 4) with canonical HRF (no derivatives) -Parametric modulator gain (from "gain" column in event .tsv file) @@ -137,261 +126,104 @@ def get_subject_infos(event_files: list, runs: list): return subject_info - # [INFO] This function creates the contrasts that will be analyzed in the first level analysis - # [TODO] Adapt this example to your specific pipeline - def get_contrasts(): - """ - Create the list of tuples that represents contrasts. - Each contrast is in the form : - (Name,Stat,[list of condition names],[weights on those conditions]) - - Returns: - - contrasts: list of tuples, list of contrasts to analyze - """ - # List of condition names - conditions = ['trial', 'trialxgain^1', 'trialxloss^1'] - - # Create contrasts - trial = ('trial', 'T', conditions, [1, 0, 0]) - effect_gain = ('effect_of_gain', 'T', conditions, [0, 1, 0]) - neg_effect_gain = ('neg_effect_of_gain', 'T', conditions, [0, -1, 0]) - effect_loss = ('effect_of_loss', 'T', conditions, [0, 0, 1]) - neg_effect_loss = ('neg_effect_of_loss', 'T', conditions, [0, 0, -1]) - - contrasts = [trial, effect_gain, effect_loss, neg_effect_gain, neg_effect_loss] - - return contrasts - - def get_vox_dims(volume : list) -> list: - ''' - Function that gives the voxel dimension of an image. - Not used here but if we use it, modify the connection to : - (?, normalize_func, [('?', 'apply_to_files'), - (('?', get_vox_dims), - 'write_voxel_sizes')]) - Args: - volume: list | str - List of str or str that represent a path to a Nifti image. - Returns: - list: - size of the voxels in the volume or in the first volume of the list. - ''' - import nibabel as nb - if isinstance(volume, list): - volume = volume[0] - nii = nb.load(volume) - hdr = nii.header - voxdims = hdr.get_zooms() - return [float(voxdims[0]), float(voxdims[1]), float(voxdims[2])] - def get_preprocessing(self): """ Return a Nipype workflow describing the prerpocessing part of the pipeline """ - # [INFO] The following part stays the same for all preprocessing pipelines + # Workflow initialization + preprocessing = Workflow( + base_dir = self.directories.working_dir, + name = 'preprocessing' + ) - # IdentityInterface node - allows to iterate over subjects and runs - info_source = Node( - IdentityInterface(fields=['subject_id', 'run_id']), - name='info_source' + # IDENTITY INTERFACE - allows to iterate over subjects and runs + information_source = Node(IdentityInterface( + fields = ['subject_id', 'run_id']), + name = 'information_source' ) - info_source.iterables = [ + information_source.iterables = [ ('subject_id', self.subject_list), ('run_id', self.run_list), ] - # Templates to select files node + # SELECT FILES - to select necessary files file_templates = { - 'anat': join( - 'sub-{subject_id}', 'anat', 'sub-{subject_id}_T1w.nii.gz' - ), - 'func': join( - 'sub-{subject_id}', 'func', 'sub-{subject_id}_task-MGT_run-{run_id}_bold.nii.gz' - ) + 'anat': join('sub-{subject_id}', 'anat', 'sub-{subject_id}_T1w.nii.gz'), + 'func': join('sub-{subject_id}', 'func', + 'sub-{subject_id}_task-MGT_run-{run_id}_bold.nii.gz') } + select_files = Node(SelectFiles(file_templates), name = 'select_files') + select_files.inputs.base_directory = self.directories.dataset_dir + preprocessing.connect(information_source, 'subject_id', select_files, 'subject_id') + preprocessing.connect(information_source, 'run_id', select_files, 'run_id') + + # GUNZIP input files + gunzip_func = Node(Gunzip(), name = 'gunzip_func') + gunzip_anat = Node(Gunzip(), name = 'gunzip_anat') + preprocessing.connect(select_files, 'func', gunzip_func, 'in_file') + preprocessing.connect(select_files, 'anat', gunzip_anat, 'in_file') + + # REALIGN - rigid-body realignment in SPM12 using 1st scan as referenced scan + # and normalized mutual information. + realign = Node(Realign(), name = 'realign') + realign.inputs.register_to_mean = False + preprocessing.connect(gunzip_func, 'out_file', realign, 'in_files') + + # EXTRACTROI - extracting the first image of func + extract_first_image = Node(ExtractROI(), name = 'extract_first_image') + extract_first_image.inputs.t_min = 1 + extract_first_image.inputs.t_size = 1 + extract_first_image.inputs.output_type='NIFTI' + preprocessing.connect(realign, 'realigned_files', extract_first_image, 'in_file') + + # COREGISTER - Co-registration in SPM12 using default parameters. + coregister = Node(Coregister(), name = 'coregister') + coregister.inputs.cost_function='nmi' + preprocessing.connect(extract_first_image, 'roi_file', coregister, 'source') + preprocessing.connect(gunzip_anat, 'out_file', coregister, 'target') + preprocessing.connect(realign, 'realigned_files', coregister, 'apply_to_files') - # SelectFiles node - to select necessary files - select_files = Node( - SelectFiles( - file_templates, - base_directory = self.directories.dataset_dir - ), - name='select_files' - ) - - # DataSink Node - store the wanted results in the wanted repository - data_sink = Node( - DataSink( - base_directory = self.directories.output_dir - ), - name='data_sink', - ) - - # [INFO] The following part has to be modified with nodes of the pipeline - gunzip_func = Node ( - Gunzip(), - name='gunzip_func' - ) - - gunzip_anat = Node ( - Gunzip(), - name='gunzip_anat' - ) - - # 1 - Rigid-body realignment in SPM12 using 1st scan as referenced scan and normalized mutual information. - realign = Node( - Realign( - register_to_mean=False - ), - name='realign' - ) - - # Extract 1st image - extract_first = Node( - ExtractROI( - t_min = 1, - t_size = 1, - output_type='NIFTI' - ), - name = 'extract_first' - ) - - # 2 - Co-registration in SPM12 using default parameters. - coregister = Node( - Coregister( - cost_function='nmi' - ), - name = 'coregister' - ) - - # 3 - Unified segmentation using tissue probability maps in SPM12. - # Unified segmentation in SPM12 to MNI space (the MNI-space tissue probability maps used in segmentation) using default parameters. - # Bias-field correction in the context of unified segmentation in SPM12. # Get SPM Tissue Probability Maps file spm_tissues_file = join(SPMInfo.getinfo()['path'], 'tpm', 'TPM.nii') - tissue1 = [(spm_tissues_file, 1), 1, (True,False), (True, False)] - tissue2 = [(spm_tissues_file, 2), 1, (True,False), (True, False)] - tissue3 = [(spm_tissues_file, 3), 2, (True,False), (True, False)] - tissue4 = [(spm_tissues_file, 4), 3, (True,False), (True, False)] - tissue5 = [(spm_tissues_file, 5), 4, (True,False), (True, False)] - tissue6 = [(spm_tissues_file, 6), 2, (True,False), (True, False)] - tissue_list = [tissue1, tissue2, tissue3, tissue4, tissue5, tissue6] - - segment = Node( - NewSegment( - write_deformation_fields = [True, True], - tissues = tissue_list - ), - name = 'segment' - ) - - # 4 - Spatial normalization of functional images - normalize = Node( - Normalize12( - jobtype = 'write' - ), - name = 'normalize' - ) - - # 5 - 6 mm fixed FWHM smoothing in MNI volume - smooth = Node( - Smooth( - fwhm=self.fwhm), - name = 'smooth' - ) - - # [INFO] The following part defines the nipype workflow and the connections between nodes - - preprocessing = Workflow( - base_dir = self.directories.working_dir, - name = 'preprocessing' - ) - - # [TODO] Add the connections the workflow needs - # [INFO] Input and output names can be found on NiPype documentation + # NEW SEGMENT - Unified segmentation using tissue probability maps in SPM12. + # Unified segmentation in SPM12 to MNI space + # (the MNI-space tissue probability maps used in segmentation) using default parameters. + # Bias-field correction in the context of unified segmentation in SPM12. + segmentation = Node(NewSegment(), name = 'segmentation') + segmentation.inputs.write_deformation_fields = [True, True] + segmentation.inputs.tissues = [ + [(spm_tissues_file, 1), 1, (True,False), (True, False)], + [(spm_tissues_file, 2), 1, (True,False), (True, False)], + [(spm_tissues_file, 3), 2, (True,False), (True, False)], + [(spm_tissues_file, 4), 3, (True,False), (True, False)], + [(spm_tissues_file, 5), 4, (True,False), (True, False)], + [(spm_tissues_file, 6), 2, (True,False), (True, False)] + ] + preprocessing.connect(gunzip_anat, 'out_file', segmentation, 'channel_files') + + # NORMALIZE12 - Spatial normalization of functional images + normalize = Node(Normalize12(), name = 'normalize') + normalize.inputs.jobtype = 'write' + preprocessing.connect(segment, 'forward_deformation_field', normalize, 'deformation_file') + preprocessing.connect(coregister, 'coregistered_files', normalize, 'apply_to_files') + + # SMOOTHING - 6 mm fixed FWHM smoothing in MNI volume + smoothing = Node(Smooth(), name = 'smoothing') + smoothing.inputs.fwhm = self.fwhm + preprocessing.connect(normalize, 'normalized_files', smoothing, 'in_files') + + # DATASINK - store the wanted results in the wanted repository + data_sink = Node(DataSink(), name='data_sink') + data_sink.inputs.base_directory = self.directories.output_dir preprocessing.connect( - [ - ( - info_source, - select_files, - [('subject_id', 'subject_id'), ('run_id', 'run_id')], - ), - ( - select_files, - gunzip_anat, - [('anat', 'in_file')] - ), - ( - select_files, - gunzip_func, - [('func', 'in_file')] - ), - ( - gunzip_func, - realign, - [('out_file', 'in_files')], - ), - ( - realign, - extract_first, - [('realigned_files', 'in_file')], - ), - ( - extract_first, - coregister, - [('roi_file', 'source')], - ), - ( - realign, - coregister, - [('realigned_files', 'apply_to_files')], - ), - ( - gunzip_anat, - coregister, - [('out_file', 'target')], - ), - ( - gunzip_anat, - segment, - [('out_file', 'channel_files')], - ), - ( - segment, - normalize, - [('forward_deformation_field', 'deformation_file')], - ), - ( - coregister, - normalize, - [('coregistered_files', 'apply_to_files')], - ), - ( - normalize, - smooth, - [('normalized_files', 'in_files')], - ), - ( - smooth, - data_sink, - [('smoothed_files', 'preprocessing.@smoothed')], - ), - ( - realign, - data_sink, - [('realignment_parameters', 'preprocessing.@motion_parameters')], - ), - ( - segment, - data_sink, - [('native_class_images', 'preprocessing.@segmented'), - ('normalized_class_images', 'preprocessing.@segmented_normalized')], - ), - - ] - ) + segmentation, 'native_class_images', data_sink, 'preprocessing.@segmented') + preprocessing.connect( + segmentation, 'normalized_class_images', + data_sink, 'preprocessing.@segmented_normalized') + preprocessing.connect( + realign, 'realignment_parameters', data_sink, 'preprocessing.@motion_parameters') + preprocessing.connect(smoothing, 'smoothed_files', data_sink, 'preprocessing.@smoothed') - # [INFO] Here we simply return the created workflow return preprocessing def get_preprocessing_outputs(self): @@ -429,8 +261,6 @@ def get_preprocessing_outputs(self): return return_list - - # [INFO] There was no run level analysis for the pipelines using SPM def get_run_level_analysis(self): """ Return a Nipype workflow describing the run level analysis part of the pipeline """ return None @@ -438,200 +268,110 @@ def get_run_level_analysis(self): def get_subject_level_analysis(self): """ Return a Nipype workflow describing the subject level analysis part of the pipeline """ - # [INFO] The following part stays the same for all pipelines - - # Infosource Node - To iterate on subjects - info_source = Node( - IdentityInterface( - fields = ['subject_id', 'dataset_dir', 'results_dir', 'working_dir', 'run_list'], - dataset_dir = self.directories.dataset_dir, - results_dir = self.directories.results_dir, - working_dir = self.directories.working_dir, - run_list = self.run_list - ), - name='info_source', + # Workflow initialization + subject_level_analysis = Workflow( + base_dir = self.directories.working_dir, + name = 'subject_level_analysis' ) - info_source.iterables = [('subject_id', self.subject_list)] - # Templates to select files node - # [TODO] Change the name of the files depending on the filenames of results of preprocessing + # IDENTITY INTERFACE - Allows to iterate on subjects + information_source = Node(IdentityInterface(fields = ['subject_id']), name = 'information_source') + information_source.iterables = [('subject_id', self.subject_list)] + + # SELECTFILES - to select necessary files templates = { - 'func': join( - self.directories.output_dir, - 'preprocessing', + 'func': join(self.directories.output_dir, 'preprocessing', '_run_id_*_subject_id_{subject_id}', 'swrrsub-{subject_id}_task-MGT_run-*_bold.nii', ), - 'event': join( - self.directories.dataset_dir, - 'sub-{subject_id}', - 'func', + 'event': join(self.directories.dataset_dir, 'sub-{subject_id}', 'func', 'sub-{subject_id}_task-MGT_run-*_events.tsv', ), - 'parameters': join( - self.directories.output_dir, - 'preprocessing', + 'parameters': join(self.directories.output_dir, 'preprocessing', '_run_id_*_subject_id_{subject_id}', 'rp_sub-{subject_id}_task-MGT_run-*_bold.txt', ) } + select_files = Node(SelectFiles(templates), name = 'select_files') + select_files.inputs.base_directory = self.directories.dataset_dir + subject_level_analysis.connect(information_source, 'subject_id', select_files, 'subject_id') - # SelectFiles node - to select necessary files - select_files = Node( - SelectFiles(templates, base_directory = self.directories.dataset_dir), - name = 'select_files' - ) - - # DataSink Node - store the wanted results in the wanted repository - data_sink = Node( - DataSink(base_directory = self.directories.output_dir), - name = 'data_sink' - ) - - # [INFO] This is the node executing the get_subject_infos_spm function - # Subject Infos node - get subject specific condition information - subject_infos = Node( + # FUNCTION node get_subject_information - get subject specific condition information + subject_information = Node( Function( input_names = ['event_files', 'runs'], output_names = ['subject_info'], - function = self.get_subject_infos, - ), - name = 'subject_infos', - ) - subject_infos.inputs.runs = self.run_list - - # [INFO] This is the node executing the get_contrasts function - # Contrasts node - to get contrasts - contrasts = Node( - Function( - output_names = ['contrasts'], - function = self.get_contrasts, + function = self.get_subject_information, ), - name = 'contrasts', - ) - - # [INFO] The following part has to be modified with nodes of the pipeline - - # [TODO] For each node, replace 'node_name' by an explicit name, and use it for both: - # - the name of the variable in which you store the Node object - # - the 'name' attribute of the Node - # [TODO] The node_function refers to a NiPype interface that you must import - # at the beginning of the file. - # SpecifyModel - generates SPM-specific Model - specify_model = Node( - SpecifySPMModel( - concatenate_runs = True, - input_units = 'secs', - output_units = 'secs', - time_repetition = self.tr, - high_pass_filter_cutoff = 128), - name = 'specify_model' - ) - - # Level1Design - generates an SPM design matrix - l1_design = Node( - Level1Design( - bases = {'hrf': {'derivs': [0, 0]}}, - timing_units = 'secs', - interscan_interval = self.tr, - model_serial_correlations='AR(1)'), - name = 'l1_design' - ) - - # EstimateModel - estimate the parameters of the model - l1_estimate = Node( - EstimateModel( - estimation_method = {'Classical': 1}), - name = 'l1_estimate' + name = 'subject_information', ) + subject_information.inputs.runs = self.run_list + subject_level_analysis.connect(select_files, 'event', subject_information, 'event_files') + + # SPECIFY MODEL - generates SPM-specific Model + specify_model = Node(SpecifySPMModel(), name = 'specify_model') + specify_model.inputs.concatenate_runs = True + specify_model.inputs.input_units = 'secs' + specify_model.inputs.output_units = 'secs' + specify_model.inputs.time_repetition = TaskInformation()['RepetitionTime'] + specify_model.inputs.high_pass_filter_cutoff = 128 + subject_level_analysis.connect( + subject_information, 'subject_info', specify_model, 'subject_info') + subject_level_analysis.connect(select_files, 'func', specify_model, 'functional_runs') + subject_level_analysis.connect( + select_files, 'parameters', specify_model, 'realignment_parameters') + + # LEVEL1 DESIGN - generates an SPM design matrix + model_design = Node(Level1Design(), name = 'model_design') + model_design.inputs.bases = {'hrf': {'derivs': [0, 0]}} + model_design.inputs.timing_units = 'secs' + model_design.inputs.interscan_interval = TaskInformation()['RepetitionTime'] + model_design.inputs.model_serial_correlations = 'AR(1)' + subject_level_analysis.connect(specify_model, 'session_info', model_design, 'session_info') + + # ESTIMATE MODEL - estimate the parameters of the model + model_estimate = Node(EstimateModel(), name = 'model_estimate') + model_estimate.inputs.estimation_method = {'Classical': 1} + subject_level_analysis.connect( + model_design, 'spm_mat_file', model_estimate, 'spm_mat_file') + + # ESTIMATE CONTRAST - estimates contrasts + contrast_estimate = Node(EstimateContrast(), name = 'contrast_estimate') + contrast_estimate.inputs.contrasts = self.subject_level_contrasts + subject_level_analysis.connect( + model_estimate, 'spm_mat_file', contrast_estimate, 'spm_mat_file') + subject_level_analysis.connect( + model_estimate, 'beta_images', contrast_estimate, 'beta_images') + subject_level_analysis.connect( + model_estimate, 'residual_image', contrast_estimate, 'residual_image') - # EstimateContrast - estimates contrasts - contrast_estimate = Node( - EstimateContrast(), - name = 'contrast_estimate' - ) - - # [INFO] The following part defines the nipype workflow and the connections between nodes - - subject_level_analysis = Workflow( - base_dir = self.directories.working_dir, - name = 'subject_level_analysis' - ) - # [TODO] Add the connections the workflow needs - # [INFO] Input and output names can be found on NiPype documentation - subject_level_analysis.connect([ - ( - info_source, - select_files, - [('subject_id', 'subject_id')] - ), - ( - select_files, - subject_infos, - [('event', 'event_files')] - ), - ( - subject_infos, - specify_model, - [('subject_info', 'subject_info')] - ), - ( - contrasts, - contrast_estimate, - [('contrasts', 'contrasts')] - ), - ( - select_files, - specify_model, - [('func', 'functional_runs'), ('parameters', 'realignment_parameters')] - ), - ( - specify_model, - l1_design, - [('session_info', 'session_info')] - ), - ( - l1_design, - l1_estimate, - [('spm_mat_file', 'spm_mat_file')] - ), - ( - l1_estimate, - contrast_estimate, - [('spm_mat_file', 'spm_mat_file'), - ('beta_images', 'beta_images'), - ('residual_image', 'residual_image')] - ), - ( - contrast_estimate, - data_sink, - [('con_images', 'l1_analysis.@con_images'), - ('spmT_images', 'l1_analysis.@spmT_images'), - ('spm_mat_file', 'l1_analysis.@spm_mat_file')] - ), - ]) + # DataSink Node - store the wanted results in the wanted repository + data_sink = Node(DataSink(), name = 'data_sink') + data_sink.inputs.base_directory = self.directories.output_dir + subject_level_analysis.connect( + contrast_estimate, 'con_images', data_sink, 'subject_level_analysis.@con_images') + subject_level_analysis.connect( + contrast_estimate, 'spmT_images', data_sink, 'subject_level_analysis.@spmT_images') + subject_level_analysis.connect( + contrast_estimate, 'spm_mat_file', data_sink, 'subject_level_analysis.@spm_mat_file') - # [INFO] Here we simply return the created workflow return subject_level_analysis def get_subject_level_outputs(self): """ Return the names of the files the subject level analysis is supposed to generate. """ # Contrat maps - templates = [join( - self.directories.output_dir, - 'l1_analysis', '_subject_id_{subject_id}', f'con_{contrast_id}.nii')\ + templates = [join(self.directories.output_dir, 'subject_level_analysis', + '_subject_id_{subject_id}', f'con_{contrast_id}.nii')\ for contrast_id in self.contrast_list] # SPM.mat file - templates += [join( - self.directories.output_dir, - 'l1_analysis', '_subject_id_{subject_id}', 'SPM.mat')] + templates += [join(self.directories.output_dir, 'subject_level_analysis', + '_subject_id_{subject_id}', 'SPM.mat')] # spmT maps - templates += [join( - self.directories.output_dir, - 'l1_analysis', '_subject_id_{subject_id}', f'spmT_{contrast_id}.nii')\ + templates += [join(self.directories.output_dir, 'subject_level_analysis', + '_subject_id_{subject_id}', f'spmT_{contrast_id}.nii')\ for contrast_id in self.contrast_list] # Format with subject_ids @@ -641,8 +381,6 @@ def get_subject_level_outputs(self): return return_list - # [INFO] This function returns the list of ids and files of each group of participants - # to do analyses for both groups, and one between the two groups. def get_subset_contrasts( file_list, subject_list: list, participants_file: str ): @@ -722,7 +460,7 @@ def get_group_level_analysis_sub_workflow(self, method): templates = { # Contrast for all participants 'contrast' : join(self.directories.output_dir, - 'l1_analysis', '_subject_id_*', 'con_{contrast_id}.nii'), + 'subject_level_analysis', '_subject_id_*', 'con_{contrast_id}.nii'), # Participants file 'participants' : join(self.directories.dataset_dir, 'participants.tsv') } From 69a51bb52f0711cf4156ad137bfd6ce529ee440a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Fri, 13 Dec 2024 16:02:30 +0100 Subject: [PATCH 14/24] Refac group level [skip ci] --- narps_open/pipelines/team_3C6G.py | 385 +++++++++++++++++------------- 1 file changed, 213 insertions(+), 172 deletions(-) diff --git a/narps_open/pipelines/team_3C6G.py b/narps_open/pipelines/team_3C6G.py index c56487ff..094e18c1 100644 --- a/narps_open/pipelines/team_3C6G.py +++ b/narps_open/pipelines/team_3C6G.py @@ -1,4 +1,4 @@ -#!/usr/bin/python +#!/usr/bin/python # coding: utf-8 """ Write the work of NARPS team 3C6G using Nipype """ @@ -7,7 +7,7 @@ from itertools import product from nipype import Node, Workflow, MapNode -from nipype.interfaces.utility import IdentityInterface, Function +from nipype.interfaces.utility import IdentityInterface, Function, Merge from nipype.interfaces.io import SelectFiles, DataSink from nipype.algorithms.misc import Gunzip from nipype.algorithms.modelgen import SpecifySPMModel @@ -22,8 +22,12 @@ ) from narps_open.pipelines import Pipeline -from narps_open.core.common import get_voxel_dimensions from narps_open.data.task import TaskInformation +from narps_open.data.participants import get_group +from narps_open.core.common import ( + remove_parent_directory, list_intersection, elements_in_string, clean_list + ) +from narps_open.utils.configuration import Configuration class PipelineTeam3C6G(Pipeline): """ A class that defines the pipeline of team 3C6G """ @@ -163,7 +167,7 @@ def get_preprocessing(self): preprocessing.connect(select_files, 'anat', gunzip_anat, 'in_file') # REALIGN - rigid-body realignment in SPM12 using 1st scan as referenced scan - # and normalized mutual information. + # and normalized mutual information. realign = Node(Realign(), name = 'realign') realign.inputs.register_to_mean = False preprocessing.connect(gunzip_func, 'out_file', realign, 'in_files') @@ -185,7 +189,7 @@ def get_preprocessing(self): # Get SPM Tissue Probability Maps file spm_tissues_file = join(SPMInfo.getinfo()['path'], 'tpm', 'TPM.nii') - # NEW SEGMENT - Unified segmentation using tissue probability maps in SPM12. + # NEW SEGMENT - Unified segmentation using tissue probability maps in SPM12. # Unified segmentation in SPM12 to MNI space # (the MNI-space tissue probability maps used in segmentation) using default parameters. # Bias-field correction in the context of unified segmentation in SPM12. @@ -204,7 +208,8 @@ def get_preprocessing(self): # NORMALIZE12 - Spatial normalization of functional images normalize = Node(Normalize12(), name = 'normalize') normalize.inputs.jobtype = 'write' - preprocessing.connect(segment, 'forward_deformation_field', normalize, 'deformation_file') + preprocessing.connect( + segmentation, 'forward_deformation_field', normalize, 'deformation_file') preprocessing.connect(coregister, 'coregistered_files', normalize, 'apply_to_files') # SMOOTHING - 6 mm fixed FWHM smoothing in MNI volume @@ -215,15 +220,37 @@ def get_preprocessing(self): # DATASINK - store the wanted results in the wanted repository data_sink = Node(DataSink(), name='data_sink') data_sink.inputs.base_directory = self.directories.output_dir - preprocessing.connect( - segmentation, 'native_class_images', data_sink, 'preprocessing.@segmented') - preprocessing.connect( - segmentation, 'normalized_class_images', - data_sink, 'preprocessing.@segmented_normalized') preprocessing.connect( realign, 'realignment_parameters', data_sink, 'preprocessing.@motion_parameters') preprocessing.connect(smoothing, 'smoothed_files', data_sink, 'preprocessing.@smoothed') + # Remove large files, if requested + if Configuration()['pipelines']['remove_unused_data']: + + # Merge Node - Merge file names to be removed after datasink node is performed + merge_removable_files = Node(Merge(7), name = 'merge_removable_files') + merge_removable_files.inputs.ravel_inputs = True + + # Function Nodes remove_files - Remove sizeable files once they aren't needed + remove_after_datasink = MapNode(Function( + function = remove_parent_directory, + input_names = ['_', 'file_name'], + output_names = [] + ), name = 'remove_after_datasink', iterfield = 'file_name') + + # Add connections + preprocessing.connect([ + (gunzip_func, merge_removable_files, [('out_file', 'in1')]), + (gunzip_anat, merge_removable_files, [('out_file', 'in2')]), + (realign, merge_removable_files, [('realigned_files', 'in3')]), + (extract_first_image, merge_removable_files, [('realigned_files', 'in4')]), + (coregister, merge_removable_files, [('realigned_files', 'in5')]), + (normalize, merge_removable_files, [('realigned_files', 'in6')]), + (smoothing, merge_removable_files, [('smoothed_files', 'in7')]), + (merge_removable_files, remove_after_datasink, [('out', 'file_name')]), + (data_sink, remove_after_datasink, [('out_file', '_')]) + ]) + return preprocessing def get_preprocessing_outputs(self): @@ -232,32 +259,33 @@ def get_preprocessing_outputs(self): # Smoothed maps templates = [join( self.directories.output_dir, - 'preprocessing', '_run_id_{run_id}_subject_id_{subject_id}', + 'preprocessing', '_run_id_{run_id}_subject_id_{subject_id}', 'swrrsub-{subject_id}_task-MGT_run-{run_id}_bold.nii')] # Motion parameters file templates += [join( self.directories.output_dir, - 'preprocessing', '_run_id_{run_id}_subject_id_{subject_id}', + 'preprocessing', '_run_id_{run_id}_subject_id_{subject_id}', 'rp_sub-{subject_id}_task-MGT_run-{run_id}_bold.txt')] # Segmentation maps templates += [join( self.directories.output_dir, - 'preprocessing', '_run_id_{run_id}_subject_id_{subject_id}', + 'preprocessing', '_run_id_{run_id}_subject_id_{subject_id}', f'c{i}'+'sub-{subject_id}_T1w.nii')\ for i in range(1,7)] templates += [join( self.directories.output_dir, - 'preprocessing', '_run_id_{run_id}_subject_id_{subject_id}', + 'preprocessing', '_run_id_{run_id}_subject_id_{subject_id}', f'wc{i}'+'sub-{subject_id}_T1w.nii')\ for i in range(1,7)] # Format with subject_ids return_list = [] for template in templates: - return_list += [template.format(subject_id = s, run_id = r) for r in self.run_list for s in self.subject_list] + return_list += [template.format(subject_id = s, run_id = r)\ + for r in self.run_list for s in self.subject_list] return return_list @@ -275,7 +303,8 @@ def get_subject_level_analysis(self): ) # IDENTITY INTERFACE - Allows to iterate on subjects - information_source = Node(IdentityInterface(fields = ['subject_id']), name = 'information_source') + information_source = Node(IdentityInterface(fields = ['subject_id']), + name = 'information_source') information_source.iterables = [('subject_id', self.subject_list)] # SELECTFILES - to select necessary files @@ -311,7 +340,7 @@ def get_subject_level_analysis(self): # SPECIFY MODEL - generates SPM-specific Model specify_model = Node(SpecifySPMModel(), name = 'specify_model') specify_model.inputs.concatenate_runs = True - specify_model.inputs.input_units = 'secs' + specify_model.inputs.input_units = 'secs' specify_model.inputs.output_units = 'secs' specify_model.inputs.time_repetition = TaskInformation()['RepetitionTime'] specify_model.inputs.high_pass_filter_cutoff = 128 @@ -381,50 +410,6 @@ def get_subject_level_outputs(self): return return_list - def get_subset_contrasts( - file_list, subject_list: list, participants_file: str - ): - """ - This function return the file list containing only the files belonging - to the subjects in the wanted group. - - Parameters : - - file_list : original file list selected by selectfiles node - - subject_list : list of subject IDs that are in the wanted group for the analysis - - participants_file: str, file containing participants characteristics - - Returns : - - equal_indifference_id : a list of subject ids in the equalIndifference group - - equal_range_id : a list of subject ids in the equalRange group - - equal_indifference_files : a subset of file_list corresponding to subjects - in the equalIndifference group - - equal_range_files : a subset of file_list corresponding to subjects - in the equalRange group - """ - equal_indifference_id = [] - equal_range_id = [] - equal_indifference_files = [] - equal_range_files = [] - - # Reading file containing participants IDs and groups - with open(participants_file, 'rt') as file: - next(file) # skip the header - for line in file: - info = line.strip().split() - if info[0][-3:] in subject_list and info[1] == 'equalIndifference': - equal_indifference_id.append(info[0][-3:]) - elif info[0][-3:] in subject_list and info[1] == 'equalRange': - equal_range_id.append(info[0][-3:]) - - for file in file_list: - sub_id = file.split('/') - if sub_id[-2][-3:] in equal_indifference_id: - equal_indifference_files.append(file) - elif sub_id[-2][-3:] in equal_range_id: - equal_range_files.append(file) - - return equal_indifference_id, equal_range_id, equal_indifference_files, equal_range_files - def get_group_level_analysis(self): """ Return all workflows for the group level analysis. @@ -449,121 +434,180 @@ def get_group_level_analysis_sub_workflow(self, method): # Compute the number of participants used to do the analysis nb_subjects = len(self.subject_list) - # Infosource - iterate over the list of contrasts - infosource_groupanalysis = Node( + # Initialize workflow + group_level_analysis = Workflow( + base_dir = self.directories.working_dir, + name = f'group_level_analysis_{method}_nsub_{nb_subjects}') + + # IDENTITY INTERFACE - iterate over the list of contrasts + information_source = Node( IdentityInterface( fields = ['contrast_id', 'subjects']), - name = 'infosource_groupanalysis') - infosource_groupanalysis.iterables = [('contrast_id', self.contrast_list)] + name = 'information_source') + information_source.iterables = [('contrast_id', self.contrast_list)] - # SelectFiles + # SELECT FILES - select contrasts for all subjects templates = { - # Contrast for all participants - 'contrast' : join(self.directories.output_dir, - 'subject_level_analysis', '_subject_id_*', 'con_{contrast_id}.nii'), - # Participants file - 'participants' : join(self.directories.dataset_dir, 'participants.tsv') + 'contrast' : join('subject_level_analysis', '_subject_id_*', 'con_{contrast_id}.nii') } - - selectfiles_groupanalysis = Node(SelectFiles( - templates, base_directory = self.directories.dataset_dir, force_list = True), - name = 'selectfiles_groupanalysis') - - # Datasink - save important files - datasink_groupanalysis = Node(DataSink( - base_directory = str(self.directories.output_dir) + select_files = Node(SelectFiles(templates), name = 'select_files') + select_files.inputs.base_directory = self.directories.output_dir + select_files.inputs.force_list = True + group_level_analysis.connect( + information_source, 'contrast_id', select_files, 'contrast_id') + + # Function Node get_equal_range_subjects + # Get subjects in the equalRange group and in the subject_list + get_equal_range_subjects = Node(Function( + function = list_intersection, + input_names = ['list_1', 'list_2'], + output_names = ['out_list'] + ), + name = 'get_equal_range_subjects' + ) + get_equal_range_subjects.inputs.list_1 = get_group('equalRange') + get_equal_range_subjects.inputs.list_2 = self.subject_list + + # Function Node get_equal_indifference_subjects + # Get subjects in the equalIndifference group and in the subject_list + get_equal_indifference_subjects = Node(Function( + function = list_intersection, + input_names = ['list_1', 'list_2'], + output_names = ['out_list'] + ), + name = 'get_equal_indifference_subjects' + ) + get_equal_indifference_subjects.inputs.list_1 = get_group('equalIndifference') + get_equal_indifference_subjects.inputs.list_2 = self.subject_list + + # Create a function to complete the subject ids out from the get_equal_*_subjects nodes + # If not complete, subject id '001' in search patterns + # would match all contrast files with 'con_0001.nii'. + complete_subject_ids = lambda l : [f'_subject_id_{a}' for a in l] + + # Function Node elements_in_string + # Get contrast files for required subjects + # Note : using a MapNode with elements_in_string requires using clean_list to remove + # None values from the out_list + get_contrasts = MapNode(Function( + function = elements_in_string, + input_names = ['input_str', 'elements'], + output_names = ['out_list'] ), - name = 'datasink_groupanalysis') - - # Function node get_subset_contrasts - select subset of contrasts - sub_contrasts = Node(Function( - function = self.get_subset_contrasts, - input_names = ['file_list', 'subject_list', 'participants_file'], - output_names = [ - 'equalIndifference_id', - 'equalRange_id', - 'equalIndifference_files', - 'equalRange_files']), - name = 'sub_contrasts') - sub_contrasts.inputs.subject_list = self.subject_list - - # Estimate model - estimate_model = Node(EstimateModel( - estimation_method = {'Classical':1}), - name = 'estimate_model') + name = 'get_contrasts', iterfield = 'input_str' + ) + group_level_analysis.connect(select_files, 'contrasts', get_contrasts, 'input_str') + + # ESTIMATE MODEL - (inputs are set below, depending on the method used) + estimate_model = Node(EstimateModel(), name = 'estimate_model') + estimate_model.inputs.estimation_method = {'Classical':1} # Estimate contrasts - estimate_contrast = Node(EstimateContrast( - group_contrast = True), - name = 'estimate_contrast') + estimate_contrast = Node(EstimateContrast(), name = 'estimate_contrast') + estimate_contrast.inputs.group_contrast = True + group_level_analysis.connect( + estimate_model, 'spm_mat_file', estimate_contrast, 'spm_mat_file') + group_level_analysis.connect( + estimate_model, 'residual_image', estimate_contrast, 'residual_image') + group_level_analysis.connect( + estimate_model, 'beta_images', estimate_contrast, 'beta_images') # Create thresholded maps - threshold = MapNode(Threshold( - height_threshold = 0.001, height_threshold_type = 'p-value', - extent_fdr_p_threshold = 0.05, - force_activation = True), - name = 'threshold', - iterfield = ['stat_image', 'contrast_index']) - - l2_analysis = Workflow( - base_dir = self.directories.working_dir, - name = f'l2_analysis_{method}_nsub_{nb_subjects}') - l2_analysis.connect([ - (infosource_groupanalysis, selectfiles_groupanalysis, [ - ('contrast_id', 'contrast_id')]), - (selectfiles_groupanalysis, sub_contrasts, [ - ('contrast', 'file_list'), - ('participants', 'participants_file')]), - (estimate_model, estimate_contrast, [('spm_mat_file', 'spm_mat_file'), - ('residual_image', 'residual_image'), - ('beta_images', 'beta_images')]), - (estimate_contrast, threshold, [('spm_mat_file', 'spm_mat_file'), - ('spmT_images', 'stat_image')]), - (estimate_model, datasink_groupanalysis, [ - ('mask_image', f'l2_analysis_{method}_nsub_{nb_subjects}.@mask')]), - (estimate_contrast, datasink_groupanalysis, [ - ('spm_mat_file', f'l2_analysis_{method}_nsub_{nb_subjects}.@spm_mat'), - ('spmT_images', f'l2_analysis_{method}_nsub_{nb_subjects}.@T'), - ('con_images', f'l2_analysis_{method}_nsub_{nb_subjects}.@con')]), - (threshold, datasink_groupanalysis, [ - ('thresholded_map', f'l2_analysis_{method}_nsub_{nb_subjects}.@thresh')])]) + threshold = MapNode(Threshold(), + name = 'threshold', iterfield = ['stat_image', 'contrast_index']) + threshold.inputs.height_threshold = 0.001 + threshold.inputs.height_threshold_type = 'p-value' + threshold.inputs.extent_fdr_p_threshold = 0.05 + threshold.inputs.force_activation = True + group_level_analysis.connect( + estimate_contrast, 'spm_mat_file', threshold, 'spm_mat_file') + group_level_analysis.connect( + estimate_contrast, 'spmT_images', threshold, 'stat_image') if method in ('equalRange', 'equalIndifference'): - contrasts = [('Group', 'T', ['mean'], [1]), ('Group', 'T', ['mean'], [-1])] + estimate_contrast.inputs = [ + ('Group', 'T', ['mean'], [1]), ('Group', 'T', ['mean'], [-1]) + ] threshold.inputs.contrast_index = [1, 2] threshold.synchronize = True - ## Specify design matrix + # Specify design matrix one_sample_t_test_design = Node(OneSampleTTestDesign(), name = 'one_sample_t_test_design') - - l2_analysis.connect([ - (sub_contrasts, one_sample_t_test_design, [(f'{method}_files', 'in_files')]), - (one_sample_t_test_design, estimate_model, [('spm_mat_file', 'spm_mat_file')])]) + group_level_analysis.connect( + one_sample_t_test_design, 'spm_mat_file', estimate_model, 'spm_mat_file') + group_level_analysis.connect( + get_contrasts, ('out_list', clean_list), one_sample_t_test_design, 'in_files') + + if method == 'equalRange': + group_level_analysis.connect( + get_equal_range_subjects, ('out_list', complete_subject_ids), + get_contrasts, 'elements' + ) + + elif method == 'equalIndifference': + group_level_analysis.connect( + get_equal_indifference_subjects, ('out_list', complete_subject_ids), + get_contrasts, 'elements' + ) elif method == 'groupComp': - contrasts = [ - ('Eq range vs Eq indiff in loss', 'T', ['Group_{1}', 'Group_{2}'], [-1, 1])] + estimate_contrast.inputs.contrasts = [ + ('Eq range vs Eq indiff in loss', 'T', ['Group_{1}', 'Group_{2}'], [-1, 1]) + ] threshold.inputs.contrast_index = [1] threshold.synchronize = True + # Function Node elements_in_string + # Get contrast files for required subjects + # Note : using a MapNode with elements_in_string requires using clean_list to remove + # None values from the out_list + get_contrasts_2 = MapNode(Function( + function = elements_in_string, + input_names = ['input_str', 'elements'], + output_names = ['out_list'] + ), + name = 'get_contrasts_2', iterfield = 'input_str' + ) + # Node for the design matrix two_sample_t_test_design = Node(TwoSampleTTestDesign(), name = 'two_sample_t_test_design') - l2_analysis.connect([ - (sub_contrasts, two_sample_t_test_design, [ - ('equalRange_files', 'group1_files'), - ('equalIndifference_files', 'group2_files')]), - (two_sample_t_test_design, estimate_model, [ - ('spm_mat_file', 'spm_mat_file')]) + group_level_analysis.connect([ + (select_files, get_contrasts_2, [('contrasts', 'input_str')]), + (get_equal_range_subjects, get_contrasts, [ + (('out_list', complete_subject_ids), 'elements') + ]), + (get_equal_indifference_subjects, get_contrasts_2, [ + (('out_list', complete_subject_ids), 'elements') + ]), + (get_contrasts, two_sample_t_test_design, [ + (('out_list', clean_list), 'group1_files') + ]), + (get_contrasts_2, two_sample_t_test_design, [ + (('out_list', clean_list), 'group2_files') + ]), + (two_sample_t_test_design, estimate_model, [('spm_mat_file', 'spm_mat_file')]) ]) - estimate_contrast.inputs.contrasts = contrasts - - return l2_analysis + # Datasink - save important files + data_sink = Node(DataSink(), name = 'data_sink') + data_sink.inputs.base_directory = self.directories.output_dir + group_level_analysis.connect(estimate_model, 'mask_image', + data_sink, f'group_level_analysis_{method}_nsub_{nb_subjects}.@mask') + group_level_analysis.connect(estimate_contrast, 'spm_mat_file', + data_sink, f'group_level_analysis_{method}_nsub_{nb_subjects}.@spm_mat') + group_level_analysis.connect(estimate_contrast, 'spmT_images', + data_sink, f'group_level_analysis_{method}_nsub_{nb_subjects}.@T') + group_level_analysis.connect(estimate_contrast, 'con_images', + data_sink, f'group_level_analysis_{method}_nsub_{nb_subjects}.@con') + group_level_analysis.connect(threshold, 'thresholded_map', + data_sink, f'group_level_analysis_{method}_nsub_{nb_subjects}.@thresh') + + return group_level_analysis def get_group_level_outputs(self): """ Return all names for the files the group level analysis is supposed to generate. """ @@ -580,12 +624,9 @@ def get_group_level_outputs(self): 'nb_subjects' : [str(len(self.subject_list))] } parameter_sets = product(*parameters.values()) - template = join( - self.directories.output_dir, - 'l2_analysis_{method}_nsub_{nb_subjects}', - '_contrast_id_{contrast_id}', - '{file}' - ) + template = join(self.directories.output_dir, + 'group_level_analysis_{method}_nsub_{nb_subjects}', '_contrast_id_{contrast_id}', + '{file}') return_list = [template.format(**dict(zip(parameters.keys(), parameter_values)))\ for parameter_values in parameter_sets] @@ -603,7 +644,7 @@ def get_group_level_outputs(self): parameter_sets = product(*parameters.values()) template = join( self.directories.output_dir, - 'l2_analysis_{method}_nsub_{nb_subjects}', + 'group_level_analysis_{method}_nsub_{nb_subjects}', '_contrast_id_{contrast_id}', '{file}' ) @@ -618,49 +659,49 @@ def get_hypotheses_outputs(self): nb_sub = len(self.subject_list) files = [ # Hypothesis 1 - join(f'l2_analysis_equalIndifference_nsub_{nb_sub}', + join(f'group_level_analysis_equalIndifference_nsub_{nb_sub}', '_contrast_id_0002', '_threshold0', 'spmT_0001_thr.nii'), - join(f'l2_analysis_equalIndifference_nsub_{nb_sub}', + join(f'group_level_analysis_equalIndifference_nsub_{nb_sub}', '_contrast_id_0002', 'spmT_0001.nii'), # Hypothesis 2 - join(f'l2_analysis_equalRange_nsub_{nb_sub}', + join(f'group_level_analysis_equalRange_nsub_{nb_sub}', '_contrast_id_0002', '_threshold0', 'spmT_0001_thr.nii'), - join(f'l2_analysis_equalRange_nsub_{nb_sub}', + join(f'group_level_analysis_equalRange_nsub_{nb_sub}', '_contrast_id_0002', 'spmT_0001.nii'), # Hypothesis 3 - join(f'l2_analysis_equalIndifference_nsub_{nb_sub}', + join(f'group_level_analysis_equalIndifference_nsub_{nb_sub}', '_contrast_id_0002', '_threshold0', 'spmT_0001_thr.nii'), - join(f'l2_analysis_equalIndifference_nsub_{nb_sub}', + join(f'group_level_analysis_equalIndifference_nsub_{nb_sub}', '_contrast_id_0002', 'spmT_0001.nii'), # Hypothesis 4 - join(f'l2_analysis_equalRange_nsub_{nb_sub}', + join(f'group_level_analysis_equalRange_nsub_{nb_sub}', '_contrast_id_0002', '_threshold0', 'spmT_0001_thr.nii'), - join(f'l2_analysis_equalRange_nsub_{nb_sub}', + join(f'group_level_analysis_equalRange_nsub_{nb_sub}', '_contrast_id_0002', 'spmT_0001.nii'), # Hypothesis 5 - join(f'l2_analysis_equalIndifference_nsub_{nb_sub}', + join(f'group_level_analysis_equalIndifference_nsub_{nb_sub}', '_contrast_id_0005', '_threshold0', 'spmT_0001_thr.nii'), - join(f'l2_analysis_equalIndifference_nsub_{nb_sub}', + join(f'group_level_analysis_equalIndifference_nsub_{nb_sub}', '_contrast_id_0005', 'spmT_0001.nii'), # Hypothesis 6 - join(f'l2_analysis_equalRange_nsub_{nb_sub}', + join(f'group_level_analysis_equalRange_nsub_{nb_sub}', '_contrast_id_0005', '_threshold0', 'spmT_0001_thr.nii'), - join(f'l2_analysis_equalRange_nsub_{nb_sub}', + join(f'group_level_analysis_equalRange_nsub_{nb_sub}', '_contrast_id_0005', 'spmT_0001.nii'), # Hypothesis 7 - join(f'l2_analysis_equalIndifference_nsub_{nb_sub}', + join(f'group_level_analysis_equalIndifference_nsub_{nb_sub}', '_contrast_id_0003', '_threshold0', 'spmT_0001_thr.nii'), - join(f'l2_analysis_equalIndifference_nsub_{nb_sub}', + join(f'group_level_analysis_equalIndifference_nsub_{nb_sub}', '_contrast_id_0003', 'spmT_0001.nii'), # Hypothesis 8 - join(f'l2_analysis_equalRange_nsub_{nb_sub}', + join(f'group_level_analysis_equalRange_nsub_{nb_sub}', '_contrast_id_0003', '_threshold0', 'spmT_0001_thr.nii'), - join(f'l2_analysis_equalRange_nsub_{nb_sub}', + join(f'group_level_analysis_equalRange_nsub_{nb_sub}', '_contrast_id_0003', 'spmT_0001.nii'), # Hypothesis 9 - join(f'l2_analysis_groupComp_nsub_{nb_sub}', + join(f'group_level_analysis_groupComp_nsub_{nb_sub}', '_contrast_id_0003', '_threshold0', 'spmT_0001_thr.nii'), - join(f'l2_analysis_groupComp_nsub_{nb_sub}', + join(f'group_level_analysis_groupComp_nsub_{nb_sub}', '_contrast_id_0003', 'spmT_0001.nii') ] return [join(self.directories.output_dir, f) for f in files] From 2dc13337befec392412ce504cf5c3cb174b26c52 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Fri, 13 Dec 2024 16:21:41 +0100 Subject: [PATCH 15/24] Adding tests [skip ci] --- narps_open/pipelines/team_3C6G.py | 124 +++++++++++------------------- tests/pipelines/test_team_3C6G.py | 30 ++++++++ 2 files changed, 74 insertions(+), 80 deletions(-) diff --git a/narps_open/pipelines/team_3C6G.py b/narps_open/pipelines/team_3C6G.py index 094e18c1..b2b61173 100644 --- a/narps_open/pipelines/team_3C6G.py +++ b/narps_open/pipelines/team_3C6G.py @@ -48,87 +48,54 @@ def __init__(self): ['neg_effect_of_loss', 'T', conditions, [0, 0, -1]] ] - def get_subject_information(event_files: list, runs: list): + def get_subject_information(event_file: str, short_run_id: int): """ - -MGT task (taken from .tsv files, duration = 4) with canonical HRF (no derivatives) - -Parametric modulator gain (from "gain" column in event .tsv file) - - Parametric modulator loss (from "loss" column in event .tsv file) - -highpass DCT filtering in SPM (using default period of 1/128 s) - -6 movement regressors from realignment - - Create Bunchs for specifySPMModel. + Create Bunchs of subject event information for specifySPMModel. Parameters : - - event_files: list of events files (one per run) for the subject - - runs: list of runs to use + - event_file: str, events file for a run of a subject + - short_run_id: str, an identifier for the run corresponding to the event_file + must be '1' for the first run, '2' for the second run, etc. Returns : - - subject_info : list of Bunch for 1st level analysis. + - subject_info : Bunch corresponding to the event file """ from nipype.interfaces.base import Bunch - condition_names = ['trial'] - onset = {} - duration = {} - weights_gain = {} - weights_loss = {} - - # Loop over number of runs - for run_id in range(len(runs)): - - # Create dictionary items with empty lists - onset.update({s + '_run' + str(run_id + 1): [] for s in condition_names}) - duration.update({s + '_run' + str(run_id + 1): [] for s in condition_names}) - weights_gain.update({'gain_run' + str(run_id + 1): []}) - weights_loss.update({'loss_run' + str(run_id + 1): []}) - - with open(event_files[run_id], 'rt') as event_file: - next(event_file) # skip the header - - for line in event_file: - info = line.strip().split() - - for condition in condition_names: - val = condition + '_run' + str(run_id + 1) # trial_run1 or accepting_run1 - val_gain = 'gain_run' + str(run_id + 1) # gain_run1 - val_loss = 'loss_run' + str(run_id + 1) # loss_run1 - if condition == 'trial': - onset[val].append(float(info[0])) # onsets for trial_run1 - duration[val].append(float(4)) - weights_gain[val_gain].append(float(info[2])) - weights_loss[val_loss].append(float(info[3])) - - # Bunching is done per run, i.e. trial_run1, trial_run2, etc. - # But names must not have '_run1' etc because we concatenate runs - subject_info = [] - for run_id in range(len(runs)): - - conditions = [s + '_run' + str(run_id + 1) for s in condition_names] - gain = 'gain_run' + str(run_id + 1) - loss = 'loss_run' + str(run_id + 1) - - subject_info.insert( - run_id, + onsets = [] + durations = [] + weights_gain = [] + weights_loss = [] + + # Parse event file + with open(event_file[short_run_id], 'rt') as file: + next(file) # skip the header + + for line in file: + info = line.strip().split() + + onsets.append(float(info[0])) + durations.append(4.0) + weights_gain.append(float(info[2])) + weights_loss.append(float(info[3])) + + # Create bunch + return Bunch( + conditions = [f'trial_run{short_run_id}'], + onsets = [onsets], + durations = [durations], + amplitudes = None, + tmod = None, + pmod = [ Bunch( - conditions = condition_names, - onsets = [onset[c] for c in conditions], - durations = [duration[c] for c in conditions], - amplitudes = None, - tmod = None, - pmod = [ - Bunch( - name = ['gain', 'loss'], - poly = [1, 1], - param = [weights_gain[gain], weights_loss[loss]], - ), - None, - ], - regressor_names = None, - regressors = None, - ), - ) - - return subject_info + name = [f'gain_run{short_run_id}', f'loss_run{short_run_id}'], + poly = [1, 1], + param = [weights_gain, weights_loss] + ) + ], + regressor_names = None, + regressors = None + ) def get_preprocessing(self): """ Return a Nipype workflow describing the prerpocessing part of the pipeline """ @@ -326,15 +293,12 @@ def get_subject_level_analysis(self): subject_level_analysis.connect(information_source, 'subject_id', select_files, 'subject_id') # FUNCTION node get_subject_information - get subject specific condition information - subject_information = Node( - Function( - input_names = ['event_files', 'runs'], - output_names = ['subject_info'], + subject_information = MapNode(Function( function = self.get_subject_information, - ), - name = 'subject_information', - ) - subject_information.inputs.runs = self.run_list + input_names = ['event_files', 'runs'], + output_names = ['subject_info']), + name = 'subject_information', iterfield = ['event_file', 'short_run_id']) + subject_information.inputs.short_run_id = list(range(1, len(self.run_list) + 1)) subject_level_analysis.connect(select_files, 'event', subject_information, 'event_files') # SPECIFY MODEL - generates SPM-specific Model diff --git a/tests/pipelines/test_team_3C6G.py b/tests/pipelines/test_team_3C6G.py index fe0ee0f5..353d250c 100644 --- a/tests/pipelines/test_team_3C6G.py +++ b/tests/pipelines/test_team_3C6G.py @@ -72,6 +72,36 @@ def test_outputs(): assert len(pipeline.get_group_level_outputs()) == 105 assert len(pipeline.get_hypotheses_outputs()) == 18 + @staticmethod + @mark.unit_test + def test_subject_information(): + """ Test the get_subject_information method """ + + # Get test files + test_file = join(Configuration()['directories']['test_data'], 'pipelines', 'events.tsv') + + bunch = PipelineTeam98BT.get_subject_information(test_file, 1) + + # Compare bunches to expected + assert isinstance(bunch, Bunch) + assert bunch.conditions == ['trial_run1'] + helpers.compare_float_2d_arrays(bunch.onsets, [ + [4.071, 11.834, 19.535, 27.535, 36.435]]) + helpers.compare_float_2d_arrays(bunch.durations, [ + [4.0, 4.0, 4.0, 4.0, 4.0]]) + assert bunch.amplitudes is None + assert bunch.tmod is None + assert bunch.regressor_names is None + assert bunch.regressors is None + pmod = bunch.pmod[0] + assert isinstance(pmod, Bunch) + assert pmod.name == ['gain_run1', 'loss_run1'] + assert pmod.poly == [1, 1] + helpers.compare_float_2d_arrays(pmod.param, [ + [14.0, 34.0, 38.0, 10.0, 16.0], + [6.0, 14.0, 19.0, 15.0, 17.0] + ]) + @staticmethod @mark.pipeline_test def test_execution(): From 3af88542c8cc1a34c88ac0371107cac0478ad7e7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Tue, 17 Dec 2024 11:41:12 +0100 Subject: [PATCH 16/24] Issues after refactoring [skip ci] --- narps_open/pipelines/team_3C6G.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/narps_open/pipelines/team_3C6G.py b/narps_open/pipelines/team_3C6G.py index b2b61173..548a14e4 100644 --- a/narps_open/pipelines/team_3C6G.py +++ b/narps_open/pipelines/team_3C6G.py @@ -210,9 +210,9 @@ def get_preprocessing(self): (gunzip_func, merge_removable_files, [('out_file', 'in1')]), (gunzip_anat, merge_removable_files, [('out_file', 'in2')]), (realign, merge_removable_files, [('realigned_files', 'in3')]), - (extract_first_image, merge_removable_files, [('realigned_files', 'in4')]), - (coregister, merge_removable_files, [('realigned_files', 'in5')]), - (normalize, merge_removable_files, [('realigned_files', 'in6')]), + (extract_first_image, merge_removable_files, [('roi_file', 'in4')]), + (coregister, merge_removable_files, [('coregistered_files', 'in5')]), + (normalize, merge_removable_files, [('normalized_files', 'in6')]), (smoothing, merge_removable_files, [('smoothed_files', 'in7')]), (merge_removable_files, remove_after_datasink, [('out', 'file_name')]), (data_sink, remove_after_datasink, [('out_file', '_')]) From d84be4fa08081a9994cc349fefcc416ca715bdc9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Tue, 17 Dec 2024 12:02:20 +0100 Subject: [PATCH 17/24] Issues after refactoring [skip ci] --- narps_open/pipelines/team_3C6G.py | 102 +++++++++++++++--------------- tests/pipelines/test_team_3C6G.py | 5 +- 2 files changed, 55 insertions(+), 52 deletions(-) diff --git a/narps_open/pipelines/team_3C6G.py b/narps_open/pipelines/team_3C6G.py index 548a14e4..42411dc1 100644 --- a/narps_open/pipelines/team_3C6G.py +++ b/narps_open/pipelines/team_3C6G.py @@ -48,55 +48,6 @@ def __init__(self): ['neg_effect_of_loss', 'T', conditions, [0, 0, -1]] ] - def get_subject_information(event_file: str, short_run_id: int): - """ - Create Bunchs of subject event information for specifySPMModel. - - Parameters : - - event_file: str, events file for a run of a subject - - short_run_id: str, an identifier for the run corresponding to the event_file - must be '1' for the first run, '2' for the second run, etc. - - Returns : - - subject_info : Bunch corresponding to the event file - """ - from nipype.interfaces.base import Bunch - - onsets = [] - durations = [] - weights_gain = [] - weights_loss = [] - - # Parse event file - with open(event_file[short_run_id], 'rt') as file: - next(file) # skip the header - - for line in file: - info = line.strip().split() - - onsets.append(float(info[0])) - durations.append(4.0) - weights_gain.append(float(info[2])) - weights_loss.append(float(info[3])) - - # Create bunch - return Bunch( - conditions = [f'trial_run{short_run_id}'], - onsets = [onsets], - durations = [durations], - amplitudes = None, - tmod = None, - pmod = [ - Bunch( - name = [f'gain_run{short_run_id}', f'loss_run{short_run_id}'], - poly = [1, 1], - param = [weights_gain, weights_loss] - ) - ], - regressor_names = None, - regressors = None - ) - def get_preprocessing(self): """ Return a Nipype workflow describing the prerpocessing part of the pipeline """ @@ -260,6 +211,55 @@ def get_run_level_analysis(self): """ Return a Nipype workflow describing the run level analysis part of the pipeline """ return None + def get_subject_information(event_file: str, short_run_id: int): + """ + Create Bunchs of subject event information for specifySPMModel. + + Parameters : + - event_file: str, events file for a run of a subject + - short_run_id: str, an identifier for the run corresponding to the event_file + must be '1' for the first run, '2' for the second run, etc. + + Returns : + - subject_info : Bunch corresponding to the event file + """ + from nipype.interfaces.base import Bunch + + onsets = [] + durations = [] + weights_gain = [] + weights_loss = [] + + # Parse event file + with open(event_file, 'rt') as file: + next(file) # skip the header + + for line in file: + info = line.strip().split() + + onsets.append(float(info[0])) + durations.append(4.0) + weights_gain.append(float(info[2])) + weights_loss.append(float(info[3])) + + # Create bunch + return Bunch( + conditions = [f'trial_run{short_run_id}'], + onsets = [onsets], + durations = [durations], + amplitudes = None, + tmod = None, + pmod = [ + Bunch( + name = [f'gain_run{short_run_id}', f'loss_run{short_run_id}'], + poly = [1, 1], + param = [weights_gain, weights_loss] + ) + ], + regressor_names = None, + regressors = None + ) + def get_subject_level_analysis(self): """ Return a Nipype workflow describing the subject level analysis part of the pipeline """ @@ -295,7 +295,7 @@ def get_subject_level_analysis(self): # FUNCTION node get_subject_information - get subject specific condition information subject_information = MapNode(Function( function = self.get_subject_information, - input_names = ['event_files', 'runs'], + input_names = ['event_files', 'short_run_id'], output_names = ['subject_info']), name = 'subject_information', iterfield = ['event_file', 'short_run_id']) subject_information.inputs.short_run_id = list(range(1, len(self.run_list) + 1)) @@ -489,7 +489,7 @@ def get_group_level_analysis_sub_workflow(self, method): estimate_contrast, 'spmT_images', threshold, 'stat_image') if method in ('equalRange', 'equalIndifference'): - estimate_contrast.inputs = [ + estimate_contrast.inputs.contrasts = [ ('Group', 'T', ['mean'], [1]), ('Group', 'T', ['mean'], [-1]) ] diff --git a/tests/pipelines/test_team_3C6G.py b/tests/pipelines/test_team_3C6G.py index 353d250c..ffd0ffd0 100644 --- a/tests/pipelines/test_team_3C6G.py +++ b/tests/pipelines/test_team_3C6G.py @@ -10,10 +10,13 @@ pytest -q test_team_3C6G.py pytest -q test_team_3C6G.py -k """ +from os.path import join from pytest import helpers, mark from nipype import Workflow +from nipype.interfaces.base import Bunch +from narps_open.utils.configuration import Configuration from narps_open.pipelines.team_3C6G import PipelineTeam3C6G class TestPipelinesTeam3C6G: @@ -80,7 +83,7 @@ def test_subject_information(): # Get test files test_file = join(Configuration()['directories']['test_data'], 'pipelines', 'events.tsv') - bunch = PipelineTeam98BT.get_subject_information(test_file, 1) + bunch = PipelineTeam3C6G.get_subject_information(test_file, 1) # Compare bunches to expected assert isinstance(bunch, Bunch) From de365f9d2201481d3629aa3654d4c6768a02f352 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Tue, 17 Dec 2024 15:49:58 +0100 Subject: [PATCH 18/24] Split subject and run parts in preprocessing --- narps_open/pipelines/team_3C6G.py | 42 ++++++++++++++++++++----------- 1 file changed, 27 insertions(+), 15 deletions(-) diff --git a/narps_open/pipelines/team_3C6G.py b/narps_open/pipelines/team_3C6G.py index 42411dc1..bdf69ea8 100644 --- a/narps_open/pipelines/team_3C6G.py +++ b/narps_open/pipelines/team_3C6G.py @@ -56,33 +56,45 @@ def get_preprocessing(self): base_dir = self.directories.working_dir, name = 'preprocessing' ) + # IDENTITY INTERFACE - allows to iterate over subjects + information_source_subject = Node(IdentityInterface( + fields = ['subject_id']), + name = 'information_source_subject' + ) + information_source_subject.iterables = ('subject_id', self.subject_list) - # IDENTITY INTERFACE - allows to iterate over subjects and runs - information_source = Node(IdentityInterface( + # IDENTITY INTERFACE - allows to iterate over runs + information_source_runs = Node(IdentityInterface( fields = ['subject_id', 'run_id']), - name = 'information_source' + name = 'information_source_runs' ) - information_source.iterables = [ - ('subject_id', self.subject_list), - ('run_id', self.run_list), - ] + information_source_runs.iterables = ('run_id', self.run_list) + preprocessing.connect( + information_source_subject, 'subject_id', information_source_runs, 'subject_id') - # SELECT FILES - to select necessary files + # SELECT FILES - to select subject files + file_templates = {'anat': join('sub-{subject_id}', 'anat', 'sub-{subject_id}_T1w.nii.gz')} + select_subject_files = Node(SelectFiles(file_templates), name = 'select_subject_files') + select_subject_files.inputs.base_directory = self.directories.dataset_dir + preprocessing.connect( + information_source_subject, 'subject_id', select_subject_files, 'subject_id') + + # SELECT FILES - to select run files file_templates = { - 'anat': join('sub-{subject_id}', 'anat', 'sub-{subject_id}_T1w.nii.gz'), 'func': join('sub-{subject_id}', 'func', 'sub-{subject_id}_task-MGT_run-{run_id}_bold.nii.gz') } - select_files = Node(SelectFiles(file_templates), name = 'select_files') - select_files.inputs.base_directory = self.directories.dataset_dir - preprocessing.connect(information_source, 'subject_id', select_files, 'subject_id') - preprocessing.connect(information_source, 'run_id', select_files, 'run_id') + select_run_files = Node(SelectFiles(file_templates), name = 'select_run_files') + select_run_files.inputs.base_directory = self.directories.dataset_dir + preprocessing.connect( + information_source_runs, 'subject_id', select_run_files, 'subject_id') + preprocessing.connect(information_source_runs, 'run_id', select_run_files, 'run_id') # GUNZIP input files gunzip_func = Node(Gunzip(), name = 'gunzip_func') gunzip_anat = Node(Gunzip(), name = 'gunzip_anat') - preprocessing.connect(select_files, 'func', gunzip_func, 'in_file') - preprocessing.connect(select_files, 'anat', gunzip_anat, 'in_file') + preprocessing.connect(select_run_files, 'func', gunzip_func, 'in_file') + preprocessing.connect(select_subject_files, 'anat', gunzip_anat, 'in_file') # REALIGN - rigid-body realignment in SPM12 using 1st scan as referenced scan # and normalized mutual information. From ff8819a15edc6dc51eb0a328fd15b0da006634d0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Wed, 18 Dec 2024 10:13:25 +0100 Subject: [PATCH 19/24] Typo with subject information [skip ci] --- narps_open/pipelines/team_3C6G.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/narps_open/pipelines/team_3C6G.py b/narps_open/pipelines/team_3C6G.py index bdf69ea8..041b93f1 100644 --- a/narps_open/pipelines/team_3C6G.py +++ b/narps_open/pipelines/team_3C6G.py @@ -307,11 +307,11 @@ def get_subject_level_analysis(self): # FUNCTION node get_subject_information - get subject specific condition information subject_information = MapNode(Function( function = self.get_subject_information, - input_names = ['event_files', 'short_run_id'], + input_names = ['event_file', 'short_run_id'], output_names = ['subject_info']), name = 'subject_information', iterfield = ['event_file', 'short_run_id']) subject_information.inputs.short_run_id = list(range(1, len(self.run_list) + 1)) - subject_level_analysis.connect(select_files, 'event', subject_information, 'event_files') + subject_level_analysis.connect(select_files, 'event', subject_information, 'event_file') # SPECIFY MODEL - generates SPM-specific Model specify_model = Node(SpecifySPMModel(), name = 'specify_model') From f8a3c8ad903664627c75ac85495d126cfb65ae5b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Thu, 19 Dec 2024 17:08:10 +0100 Subject: [PATCH 20/24] Removing segmentation maps from preprocessing outputs [skip ci] --- narps_open/pipelines/team_3C6G.py | 13 ------------- tests/pipelines/test_team_3C6G.py | 6 +++--- 2 files changed, 3 insertions(+), 16 deletions(-) diff --git a/narps_open/pipelines/team_3C6G.py b/narps_open/pipelines/team_3C6G.py index 041b93f1..618941c0 100644 --- a/narps_open/pipelines/team_3C6G.py +++ b/narps_open/pipelines/team_3C6G.py @@ -198,19 +198,6 @@ def get_preprocessing_outputs(self): 'preprocessing', '_run_id_{run_id}_subject_id_{subject_id}', 'rp_sub-{subject_id}_task-MGT_run-{run_id}_bold.txt')] - # Segmentation maps - templates += [join( - self.directories.output_dir, - 'preprocessing', '_run_id_{run_id}_subject_id_{subject_id}', - f'c{i}'+'sub-{subject_id}_T1w.nii')\ - for i in range(1,7)] - - templates += [join( - self.directories.output_dir, - 'preprocessing', '_run_id_{run_id}_subject_id_{subject_id}', - f'wc{i}'+'sub-{subject_id}_T1w.nii')\ - for i in range(1,7)] - # Format with subject_ids return_list = [] for template in templates: diff --git a/tests/pipelines/test_team_3C6G.py b/tests/pipelines/test_team_3C6G.py index ffd0ffd0..c1d488e0 100644 --- a/tests/pipelines/test_team_3C6G.py +++ b/tests/pipelines/test_team_3C6G.py @@ -51,7 +51,7 @@ def test_outputs(): # 1 - 1 subject - 1 run outputs pipeline.subject_list = ['001'] pipeline.run_list = ['01'] - assert len(pipeline.get_preprocessing_outputs()) == 14 + assert len(pipeline.get_preprocessing_outputs()) == 2 assert len(pipeline.get_run_level_outputs()) == 0 assert len(pipeline.get_subject_level_outputs()) == 11 assert len(pipeline.get_group_level_outputs()) == 105 @@ -60,7 +60,7 @@ def test_outputs(): # 2 - 1 subject - 4 runs outputs pipeline.subject_list = ['001'] pipeline.run_list = ['01', '02', '03', '04'] - assert len(pipeline.get_preprocessing_outputs()) == 56 + assert len(pipeline.get_preprocessing_outputs()) == 2 * 4 assert len(pipeline.get_run_level_outputs()) == 0 assert len(pipeline.get_subject_level_outputs()) == 11 assert len(pipeline.get_group_level_outputs()) == 105 @@ -69,7 +69,7 @@ def test_outputs(): # 2 - 4 subjects outputs pipeline.subject_list = ['001', '002', '003', '004'] pipeline.run_list = ['01', '02', '03', '04'] - assert len(pipeline.get_preprocessing_outputs()) == 224 + assert len(pipeline.get_preprocessing_outputs()) == 2 * 4 * 4 assert len(pipeline.get_run_level_outputs()) == 0 assert len(pipeline.get_subject_level_outputs()) == 44 assert len(pipeline.get_group_level_outputs()) == 105 From bb30c91dd13143ccbb8110ad954884993b26c092 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Fri, 20 Dec 2024 08:55:03 +0100 Subject: [PATCH 21/24] Preprocessing outputs [skip ci] --- narps_open/pipelines/team_3C6G.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/narps_open/pipelines/team_3C6G.py b/narps_open/pipelines/team_3C6G.py index 618941c0..3c338d83 100644 --- a/narps_open/pipelines/team_3C6G.py +++ b/narps_open/pipelines/team_3C6G.py @@ -189,13 +189,13 @@ def get_preprocessing_outputs(self): # Smoothed maps templates = [join( self.directories.output_dir, - 'preprocessing', '_run_id_{run_id}_subject_id_{subject_id}', + 'preprocessing', '_subject_id_{subject_id}', '_run_id_{run_id}', 'swrrsub-{subject_id}_task-MGT_run-{run_id}_bold.nii')] # Motion parameters file templates += [join( self.directories.output_dir, - 'preprocessing', '_run_id_{run_id}_subject_id_{subject_id}', + 'preprocessing', '_subject_id_{subject_id}', '_run_id_{run_id}', 'rp_sub-{subject_id}_task-MGT_run-{run_id}_bold.txt')] # Format with subject_ids @@ -276,14 +276,14 @@ def get_subject_level_analysis(self): # SELECTFILES - to select necessary files templates = { 'func': join(self.directories.output_dir, 'preprocessing', - '_run_id_*_subject_id_{subject_id}', + '_subject_id_{subject_id}', '_run_id_*', 'swrrsub-{subject_id}_task-MGT_run-*_bold.nii', ), 'event': join(self.directories.dataset_dir, 'sub-{subject_id}', 'func', 'sub-{subject_id}_task-MGT_run-*_events.tsv', ), 'parameters': join(self.directories.output_dir, 'preprocessing', - '_run_id_*_subject_id_{subject_id}', + '_subject_id_{subject_id}', '_run_id_*', 'rp_sub-{subject_id}_task-MGT_run-*_bold.txt', ) } From 3b8a0f824c1183ef6535d5bc0c8889c75713dbde Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Fri, 20 Dec 2024 14:35:14 +0100 Subject: [PATCH 22/24] Remove preprocessing files [skip ci] --- narps_open/pipelines/team_3C6G.py | 37 ++++++++++++++++++------------- 1 file changed, 22 insertions(+), 15 deletions(-) diff --git a/narps_open/pipelines/team_3C6G.py b/narps_open/pipelines/team_3C6G.py index 3c338d83..841b8d0d 100644 --- a/narps_open/pipelines/team_3C6G.py +++ b/narps_open/pipelines/team_3C6G.py @@ -156,29 +156,36 @@ def get_preprocessing(self): # Remove large files, if requested if Configuration()['pipelines']['remove_unused_data']: + # Function Nodes remove_files - Remove sizeable anat files once they aren't needed + remove_anat_after_datasink = Node(Function( + function = remove_parent_directory, + input_names = ['_', 'file_name'], + output_names = [] + ), name = 'remove_anat_after_datasink') + preprocessing.connect([ + (gunzip_anat, remove_anat_after_datasink, [('out_file', 'file_name')]), + (data_sink, remove_anat_after_datasink, [('out_file', '_')]) + ]) # Merge Node - Merge file names to be removed after datasink node is performed - merge_removable_files = Node(Merge(7), name = 'merge_removable_files') - merge_removable_files.inputs.ravel_inputs = True + merge_removable_func_files = Node(Merge(6), name = 'merge_removable_func_files') + merge_removable_func_files.inputs.ravel_inputs = True # Function Nodes remove_files - Remove sizeable files once they aren't needed - remove_after_datasink = MapNode(Function( + remove_func_after_datasink = MapNode(Function( function = remove_parent_directory, input_names = ['_', 'file_name'], output_names = [] - ), name = 'remove_after_datasink', iterfield = 'file_name') - - # Add connections + ), name = 'remove_func_after_datasink', iterfield = 'file_name') preprocessing.connect([ - (gunzip_func, merge_removable_files, [('out_file', 'in1')]), - (gunzip_anat, merge_removable_files, [('out_file', 'in2')]), - (realign, merge_removable_files, [('realigned_files', 'in3')]), - (extract_first_image, merge_removable_files, [('roi_file', 'in4')]), - (coregister, merge_removable_files, [('coregistered_files', 'in5')]), - (normalize, merge_removable_files, [('normalized_files', 'in6')]), - (smoothing, merge_removable_files, [('smoothed_files', 'in7')]), - (merge_removable_files, remove_after_datasink, [('out', 'file_name')]), - (data_sink, remove_after_datasink, [('out_file', '_')]) + (gunzip_func, merge_removable_func_files, [('out_file', 'in1')]), + (realign, merge_removable_func_files, [('realigned_files', 'in2')]), + (extract_first_image, merge_removable_func_files, [('roi_file', 'in3')]), + (coregister, merge_removable_func_files, [('coregistered_files', 'in4')]), + (normalize, merge_removable_func_files, [('normalized_files', 'in5')]), + (smoothing, merge_removable_func_files, [('smoothed_files', 'in6')]), + (merge_removable_func_files, remove_func_after_datasink, [('out', 'file_name')]), + (data_sink, remove_func_after_datasink, [('out_file', '_')]) ]) return preprocessing From 7fafd706f53bd205aa80d5b5ca8a81fd9f894490 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Fri, 20 Dec 2024 14:43:13 +0100 Subject: [PATCH 23/24] Regressors and contrast naming for first level [skip ci] --- narps_open/pipelines/team_3C6G.py | 8 +++----- tests/pipelines/test_team_3C6G.py | 6 +++--- 2 files changed, 6 insertions(+), 8 deletions(-) diff --git a/narps_open/pipelines/team_3C6G.py b/narps_open/pipelines/team_3C6G.py index 841b8d0d..c76c5133 100644 --- a/narps_open/pipelines/team_3C6G.py +++ b/narps_open/pipelines/team_3C6G.py @@ -217,14 +217,12 @@ def get_run_level_analysis(self): """ Return a Nipype workflow describing the run level analysis part of the pipeline """ return None - def get_subject_information(event_file: str, short_run_id: int): + def get_subject_information(event_file: str): """ Create Bunchs of subject event information for specifySPMModel. Parameters : - event_file: str, events file for a run of a subject - - short_run_id: str, an identifier for the run corresponding to the event_file - must be '1' for the first run, '2' for the second run, etc. Returns : - subject_info : Bunch corresponding to the event file @@ -250,14 +248,14 @@ def get_subject_information(event_file: str, short_run_id: int): # Create bunch return Bunch( - conditions = [f'trial_run{short_run_id}'], + conditions = [f'trial'], onsets = [onsets], durations = [durations], amplitudes = None, tmod = None, pmod = [ Bunch( - name = [f'gain_run{short_run_id}', f'loss_run{short_run_id}'], + name = [f'gain', f'loss'], poly = [1, 1], param = [weights_gain, weights_loss] ) diff --git a/tests/pipelines/test_team_3C6G.py b/tests/pipelines/test_team_3C6G.py index c1d488e0..c21586ea 100644 --- a/tests/pipelines/test_team_3C6G.py +++ b/tests/pipelines/test_team_3C6G.py @@ -83,11 +83,11 @@ def test_subject_information(): # Get test files test_file = join(Configuration()['directories']['test_data'], 'pipelines', 'events.tsv') - bunch = PipelineTeam3C6G.get_subject_information(test_file, 1) + bunch = PipelineTeam3C6G.get_subject_information(test_file) # Compare bunches to expected assert isinstance(bunch, Bunch) - assert bunch.conditions == ['trial_run1'] + assert bunch.conditions == ['trial'] helpers.compare_float_2d_arrays(bunch.onsets, [ [4.071, 11.834, 19.535, 27.535, 36.435]]) helpers.compare_float_2d_arrays(bunch.durations, [ @@ -98,7 +98,7 @@ def test_subject_information(): assert bunch.regressors is None pmod = bunch.pmod[0] assert isinstance(pmod, Bunch) - assert pmod.name == ['gain_run1', 'loss_run1'] + assert pmod.name == ['gain', 'loss'] assert pmod.poly == [1, 1] helpers.compare_float_2d_arrays(pmod.param, [ [14.0, 34.0, 38.0, 10.0, 16.0], From 417ab24530e5582bd4b717c220412c9fd7efe03c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Boris=20Cl=C3=A9net?= Date: Fri, 20 Dec 2024 16:53:07 +0100 Subject: [PATCH 24/24] Issue with run_id after get_subject_information change [skip ci] --- narps_open/pipelines/team_3C6G.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/narps_open/pipelines/team_3C6G.py b/narps_open/pipelines/team_3C6G.py index c76c5133..25afffe5 100644 --- a/narps_open/pipelines/team_3C6G.py +++ b/narps_open/pipelines/team_3C6G.py @@ -299,10 +299,9 @@ def get_subject_level_analysis(self): # FUNCTION node get_subject_information - get subject specific condition information subject_information = MapNode(Function( function = self.get_subject_information, - input_names = ['event_file', 'short_run_id'], + input_names = ['event_file'], output_names = ['subject_info']), - name = 'subject_information', iterfield = ['event_file', 'short_run_id']) - subject_information.inputs.short_run_id = list(range(1, len(self.run_list) + 1)) + name = 'subject_information', iterfield = ['event_file']) subject_level_analysis.connect(select_files, 'event', subject_information, 'event_file') # SPECIFY MODEL - generates SPM-specific Model