-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
#16 Start thinking on tiny morphologist use case implementation
- Loading branch information
Showing
2 changed files
with
192 additions
and
0 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,89 @@ | ||
from soma.controller import field, file | ||
from capsul.api import Process, Pipeline | ||
|
||
|
||
class BiasCorrection(Process): | ||
input: field(type_=file()) | ||
strength: float = 0.8 | ||
output: field(type_=file(), output=True) | ||
|
||
def execute(self): | ||
with open(self.input) as f: | ||
content = self.read() | ||
content = f'{content}\nBias correction with strength={self.strength}' | ||
with open(self.output, 'w') as f: | ||
f.write(content) | ||
|
||
class SPMNormalization(Process): | ||
input: field(type_=file()) | ||
template: field(type_=file()) | ||
output: field(type_=file(), output=True) | ||
|
||
def execute(self): | ||
with open(self.input) as f: | ||
content = self.read() | ||
content = f'{content}\nSPM normalization with template "{self.template}"' | ||
with open(self.output, 'w') as f: | ||
f.write(content) | ||
|
||
class AimsNormalization(Process): | ||
input: field(type_=file()) | ||
origin: field(type_=list[float], default_factory=lambda: [1.2, 3.4, 5.6]) | ||
output: field(type_=file(), output=True) | ||
|
||
def execute(self): | ||
with open(self.input) as f: | ||
content = self.read() | ||
content = f'{content}\nSPM normalization with origin={self.origin}' | ||
with open(self.output, 'w') as f: | ||
f.write(content) | ||
|
||
class SplitBrain(Process): | ||
input: field(type_=file()) | ||
right_output: field(type_=file(), output=True) | ||
left_output: field(type_=file(), output=True) | ||
|
||
def execute(self): | ||
with open(self.input) as f: | ||
content = self.read() | ||
content = f'{content}\nBias correction with strength={self.strength}' | ||
with open(self.output, 'w') as f: | ||
f.write(content) | ||
|
||
|
||
class ProcessHemisphere(Process): | ||
input: field(type_=file()) | ||
output: field(type_=file(), output=True) | ||
|
||
def execute(self): | ||
with open(self.input) as f: | ||
content = self.read() | ||
content = f'{content}\nProcess hemisphere' | ||
with open(self.output, 'w') as f: | ||
f.write(content) | ||
|
||
class TinyMorphologist(Pipeline): | ||
def pipeline_definition(self): | ||
self.add_process('nobias', BiasCorrection) | ||
self.add_switch('normalization', ['none', 'spm', 'aims'], ['output']) | ||
self.add_process('spm_normalization', SPMNormalization) | ||
self.add_process('aims_normalization', AimsNormalization) | ||
self.add_process('split', SplitBrain) | ||
self.add_process('right_hemi', ProcessHemisphere) | ||
self.add_process('left_hemi', ProcessHemisphere) | ||
|
||
self.add_link('nobias.output->normalization.none_switch_output') | ||
|
||
self.add_link('nobias.output->spm_normalization.input') | ||
self.add_link('spm_normalization.output->normalization.spm_switch_output') | ||
|
||
self.add_link('nobias.output->aims_normalization.input') | ||
self.add_link('aims_normalization.output->normalization.aims_switch_output') | ||
|
||
self.add_link('normalization.output->split.input') | ||
self.add_link('split.right_output->right_hemi.input') | ||
self.export_parameter('right_hemi', 'output', 'right_hemisphere') | ||
self.add_link('split.left_output->left_hemi.input') | ||
self.export_parameter('left_hemi', 'output', 'left_hemisphere') | ||
|
||
|
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,103 @@ | ||
import os | ||
import shutil | ||
import tempfile | ||
from bv_use_cases import tiny_morphologist | ||
|
||
from capsul.api import Capsul | ||
|
||
subjects = ( | ||
'aleksander', | ||
'casimiro', | ||
'christophorus', | ||
'christy', | ||
'conchobhar', | ||
'cornelia', | ||
'dakila', | ||
'demosthenes', | ||
'devin', | ||
'ferit', | ||
'gautam', | ||
'hikmat', | ||
'isbel', | ||
'ivona', | ||
'jordana', | ||
'justyn', | ||
'katrina', | ||
'lyda', | ||
'melite', | ||
'mina', | ||
'odalric', | ||
'rainbow', | ||
'rashn', | ||
'shufen', | ||
'simona', | ||
'svanhildur', | ||
'thilini', | ||
'til', | ||
'vanessza', | ||
'victoria' | ||
) | ||
|
||
# Create temporary directory for the use case | ||
tmp = tempfile.mkdtemp() | ||
try: | ||
# Create BIDS directory | ||
bids = f'{tmp}/bids' | ||
# Write Capsul specific information | ||
os.mkdir(bids) | ||
with open(f'{bids}/capsul.json', 'w') as f: | ||
json.dump({ | ||
'paths_layout': 'bids-1.6' | ||
}, f) | ||
|
||
# Create BrainVISA directory | ||
brainvisa = f'{tmp}/brainvisa' | ||
os.mkdir(brainvisa) | ||
# Write Capsul specific information | ||
with open(f'{brainvisa}/capsul.json', 'w') as f: | ||
json.dump({ | ||
'paths_layout': 'brainvisa-6.0' | ||
}, f) | ||
|
||
# Generate fake T1 and T2 data in bids directory | ||
for subject in subjects: | ||
for session in ('m0', 'm12', 'm24'): | ||
for data_type in ('T1w', 'T2w'): | ||
file = f'{bids}/rawdata/sub-{subject}/ses-{session}/anat/sub-{subject}_ses-{session}_{data_type}.nii' | ||
d = os.path.dirname(file) | ||
if not os.path.exists(d): | ||
os.makedirs(d) | ||
with open(file, 'w') as f: | ||
print(f'{data_type} acquisition for subject {subject} acquired in session {session}', file=f) | ||
|
||
capsul = Capsul() | ||
# Input dataset is declared as following BIDS organization in capsul.json | ||
# therefore a BIDS specific object is returned | ||
input_dataset = capsul.dataset(bids) | ||
# Output dataset is declared as following BrainVISA organization in capsul.json | ||
# therefore a BrainVISA specific object is returned | ||
output_dataset = capsul.dataset(brainvisa) | ||
# Create a main pipeline that will contain all the morphologist pipelines | ||
# we want to execute | ||
processing_pipeline = capsul.custom_pipeline() | ||
# Parse the dataset with BIDS-specific query (here "suffix" is part | ||
# of BIDS specification). The object returned contains info for main | ||
# BIDS fields (sub, ses, acq, etc.) | ||
for t1_mri in dataset.find(suffix='T1w'): | ||
# Create a TinyMorphologist pipeline | ||
tiny_morphologist = capsul.executable('bv_use_cases.tiny_morphologist.TinyMorphologist') | ||
# Set the input data | ||
tiny_morphologist.input = t1_mri.path | ||
# Complete outputs following BraiVISA organization | ||
# Make the link between BIDS metadata and BrainVISA metadata | ||
output_dataset.set_output_paths(tiny_morphologist, | ||
subject=t1_mri.sub, | ||
acquisition=t1_mri.acq, | ||
) | ||
# Add the current TinyMorhpologist pipeline to the main | ||
# pipeline that will be executed | ||
custom_pipeline.add_executable(tiny_morphologist) | ||
# Finally execute all the TinyMorphologist instances | ||
capsul.run(processing_pipeline) | ||
finally: | ||
shutil.rmtree(tmp) |