Skip to content
This repository has been archived by the owner on Dec 20, 2024. It is now read-only.

Commit

Permalink
Merge remote-tracking branch 'upstream/main' into AddGPExperimentScripts
Browse files Browse the repository at this point in the history
  • Loading branch information
oesteban committed Oct 1, 2024
2 parents ea93c4a + b1d9928 commit 3b326f7
Show file tree
Hide file tree
Showing 14 changed files with 891 additions and 161 deletions.
77 changes: 48 additions & 29 deletions .github/workflows/test.yml
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
name: Tests
name: Unit and integration tests

on:
push:
Expand All @@ -19,6 +19,8 @@ defaults:
# Force tox and pytest to use color
env:
FORCE_COLOR: true
TEST_DATA_HOME: /home/runner/eddymotion-tests/
ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS: 4

concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
Expand All @@ -28,42 +30,59 @@ permissions:
contents: read

jobs:
stable:
# Check each OS, all supported Python, minimum versions and latest releases
runs-on: ${{ matrix.os }}
strategy:
matrix:
os: ['ubuntu-latest']
python-version: ['3.10', '3.11', '3.12']
dependencies: ['full', 'pre']
include:
- os: ubuntu-latest
python-version: '3.10'
dependencies: 'min'

env:
DEPENDS: ${{ matrix.dependencies }}
test:
runs-on: 'ubuntu-latest'

steps:
- uses: actions/checkout@v4
with:
submodules: recursive
fetch-depth: 0
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v5

- uses: mamba-org/[email protected]
with:
environment-file: env.yml
init-shell: bash
cache-environment: true
cache-environment-key: environment-v1
cache-downloads: false
post-cleanup: 'none'
generate-run-shell: true
# https://github.com/mamba-org/setup-micromamba/issues/225
micromamba-version: 1.5.10-0
micromamba-binary-path: /home/runner/micromamba-bin-versioned/micromamba

- uses: actions/cache/restore@v4
with:
python-version: ${{ matrix.python-version }}
allow-prereleases: true
- name: Display Python version
run: python -c "import sys; print(sys.version)"
- name: Install tox
path: /home/runner/eddymotion-tests/
key: data-v0

- name: Get test data with DataLad
shell: micromamba-shell {0}
run: |
if [[ ! -d "${TEST_DATA_HOME}" ]]; then
datalad install -rg --source=https://gin.g-node.org/nipreps-data/tests-eddymotion.git ${TEST_DATA_HOME}
else
cd ${TEST_DATA_HOME}
datalad update --merge -r .
datalad get -r -J4 *
fi
- uses: actions/cache/save@v4
with:
path: /home/runner/eddymotion-tests/
key: data-v0

- name: Install editable
shell: micromamba-shell {0}
run: |
pip install -e .
- name: Run tests
shell: micromamba-shell {0}
run: |
python -m pip install --upgrade pip
python -m pip install tox tox-gh-actions
- name: Show tox config
run: tox c
- name: Run tox
run: tox -v --exit-and-dump-after 1200
pytest --doctest-modules --cov eddymotion -n auto -x --cov-report xml \
--junitxml=test-results.xml -v src test
- uses: codecov/codecov-action@v4
if: ${{ always() }}
with:
Expand Down
37 changes: 37 additions & 0 deletions env.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,37 @@
name: eddymotion
channels:
- conda-forge
# Update this ~yearly; last updated Jan 2024
dependencies:
- python=3.12
# Intel Math Kernel Library for numpy
- mkl=2023.2.0
- mkl-service=2.4.0
# git-annex for templateflow users with DataLad superdatasets
- git-annex=*=alldep*
# Workflow dependencies: ANTs
- ants=2.5
- pip
- pip:
- build
- coverage
- datalad
- dipy >= 1.3.0
- hatch
- hatchling
- h5py
- joblib
- nest-asyncio >= 1.5.1
- nipype >= 1.5.1, < 2.0
- nireports
- nitransforms >= 21, < 24
- numpy >= 1.17.3
- pytest
- pytest-cov
- pytest-env
- pytest-xdist
- scikit_learn >= 0.18
- scikit-image >= 0.14.2
- scipy >= 1.8.0
variables:
FSLOUTPUTTYPE: NIFTI_GZ
11 changes: 7 additions & 4 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -66,8 +66,6 @@ plotting = ["nilearn"]

resmon = ["psutil >=5.4"]

popylar = ["popylar >= 0.2"]

test = [
"coverage",
"pytest >= 4.4",
Expand All @@ -76,10 +74,15 @@ test = [
"pytest-xdist >= 1.28"
]

antsopt = [
"ConfigSpace",
"smac",
]

# Aliases
docs = ["eddymotion[doc]"]
tests = ["eddymotion[test]"]
all = ["eddymotion[doc,test,dev,plotting,resmon,popylar]"]
all = ["eddymotion[doc,test,dev,plotting,resmon,antsopt]"]

[project.scripts]
eddymotion = "eddymotion.cli.run:main"
Expand Down Expand Up @@ -164,7 +167,7 @@ known-first-party=["eddymotion"]
pythonpath = "src/ test/"
norecursedirs = [".*", "_*"]
addopts = "-v --doctest-modules"
doctest_optionflags = "ALLOW_UNICODE NORMALIZE_WHITESPACE"
doctest_optionflags = "ALLOW_UNICODE NORMALIZE_WHITESPACE ELLIPSIS"
env = "PYTHONHASHSEED=0"
filterwarnings = ["ignore::DeprecationWarning"]

Expand Down
222 changes: 222 additions & 0 deletions scripts/optimize_registration.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,222 @@
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
#
# Copyright 2024 The NiPreps Developers <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# We support and encourage derived works from this project, please read
# about our expectations at
#
# https://www.nipreps.org/community/licensing/
#
"""Optimize ANTs' configurations."""

import asyncio
import logging
from os import getenv
from pathlib import Path
from shutil import rmtree

import nibabel as nb
import nitransforms as nt
import numpy as np
from ConfigSpace import Configuration, ConfigurationSpace
from smac import HyperparameterOptimizationFacade, Scenario
from smac.utils.configspace import get_config_hash

from eddymotion.registration import ants as erants
from eddymotion.registration import utils

logger = logging.getLogger("ants-optimization")

# When inside ipython / jupyter
# import nest_asyncio
# nest_asyncio.apply()

TIME_PENALTY_WEIGHT = 0.1
SEED = 2390232

## Generate config dictionary
configdict = {
# "convergence_threshold": (1e-5, 1e-6),
# "winsorize_lower_quantile": (0.001, 0.2),
# "winsorize_upper_quantile": (0.9, 0.999),
"transform_parameters": (0.01, 2.0),
"smoothing_sigmas": (0.0, 1.0),
"shrink_factors": (1, 2),
"radius_or_number_of_bins": (3, 5),
"sampling_percentage": (0.1, 0.4),
# "metric": ["GC"],
"sampling_strategy": ["Random", "Regular"],
}
paramspace = ConfigurationSpace(configdict)


async def ants(cmd, cwd, stdout, stderr, semaphore):
async with semaphore:
proc = await asyncio.create_subprocess_shell(
cmd,
cwd=cwd,
stdout=stdout,
stderr=stderr,
)
returncode = await proc.wait()
return returncode


DATASET_PATH = Path(getenv("TEST_DATA_HOME", f"{getenv('HOME')}/.cache/eddymotion-tests"))

WORKDIR = Path.home() / "tmp" / "eddymotiondev"
WORKDIR.mkdir(parents=True, exist_ok=True)

EXPERIMENTDIR = WORKDIR / "smac"
if EXPERIMENTDIR.exists():
rmtree(EXPERIMENTDIR, ignore_errors=True)

EXPERIMENTDIR.mkdir(parents=True, exist_ok=True)

rng = np.random.default_rng(SEED)
MOTION_PARAMETERS = np.hstack(
(rng.uniform(-0.4, 0.4, size=(60, 3)), rng.uniform(-2.0, 2.0, size=(60, 3)))
)
CONVERSIONS = [
nb.affines.from_matvec(nb.eulerangles.euler2mat(*parameters[:3]), parameters[3:])
for parameters in MOTION_PARAMETERS
]

REFERENCES = (
DATASET_PATH / "dwi-b0_desc-avg.nii.gz",
DATASET_PATH / "hcph-b0_desc-avg.nii.gz",
)


async def train_coro(
config: Configuration,
seed: int = 0,
verbose: bool = False,
) -> float:
tmp_folder = EXPERIMENTDIR / get_config_hash(config)
tmp_folder.mkdir(parents=True, exist_ok=True)
align_kwargs = {k: config[k] for k in configdict.keys()}

ref_xfms = []
tasks = []
semaphore = asyncio.Semaphore(18)
nconv = len(CONVERSIONS)
for i, T in enumerate(CONVERSIONS):
for j in (0, 1):
fixed_path = REFERENCES[j]
brainmask_path = DATASET_PATH / fixed_path.name.replace("desc-avg", "desc-brain_mask")
refnii = nb.load(fixed_path)
xfm = nt.linear.Affine(T, reference=refnii)
ref_xfms.append(xfm)

index = i * len(REFERENCES) + j
moving_path = tmp_folder / f"test-{index:04d}.nii.gz"
(~xfm).apply(refnii, reference=refnii).to_filename(moving_path)

cmdline = erants.generate_command(
fixed_path,
moving_path,
fixedmask_path=brainmask_path,
output_transform_prefix=f"conversion-{index:04d}",
**align_kwargs,
)

tasks.append(
ants(
cmdline,
cwd=str(tmp_folder),
stdout=Path(tmp_folder / f"ants-{index:04d}.out").open("w+"),
stderr=Path(tmp_folder / f"ants-{index:04d}.err").open("w+"),
semaphore=semaphore,
)
)

results = await asyncio.gather(*tasks, return_exceptions=True)

diff = []
times = []
start = []
for i, r in enumerate(results):
if r:
return 1e6

j = i % 2
fixed_path = REFERENCES[j]
brainmask_path = DATASET_PATH / fixed_path.name.replace("desc-avg", "desc-brain_mask")

fixednii = nb.load(fixed_path)
movingnii = nb.load(tmp_folder / f"test-{i:04d}.nii.gz")
xform = nt.linear.Affine(
nt.io.itk.ITKLinearTransform.from_filename(
tmp_folder / f"conversion-{i:04d}0GenericAffine.mat"
).to_ras(
reference=fixednii,
moving=movingnii,
),
)

masknii = nb.load(brainmask_path)
initial_error = utils.displacements_within_mask(
masknii,
ref_xfms[i],
)

disps = utils.displacements_within_mask(
masknii,
xform,
ref_xfms[i],
)
diff.append(np.percentile(disps, 95))
start.append(np.percentile(initial_error, 95))

# Parse log -- Total elapsed time: 1.0047e+00
for line in reversed(Path(tmp_folder / f"ants-{i:04d}.out").read_text().splitlines()):
if line.strip().startswith("Total elapsed time:"):
times.append(float(line.strip().split(" ")[-1]))

meandiff = np.mean(diff)
meantime = np.mean(times)
error = ((1.0 - TIME_PENALTY_WEIGHT) * meandiff + TIME_PENALTY_WEIGHT * meantime) / np.mean(
start
)

logger.info(
f"Normalized objective ({nconv} it.): {error:0.3f} "
f"({meandiff:0.2f} mm | {meantime:0.2f} s). "
f"Avg. p95 initial error: {np.mean(start):0.2f} mm."
)
if verbose:
logger.info(f"\n\nParameters:\n{align_kwargs}" f"\n\nConversions folder: {tmp_folder}.")

return error


def train(config: Configuration, seed: int = 0) -> float:
loop = asyncio.get_event_loop()
return loop.run_until_complete(train_coro(config, seed))


# Scenario object specifying the optimization environment
scenario = Scenario(paramspace, n_trials=200)

# Use SMAC to find the best configuration/hyperparameters
smac = HyperparameterOptimizationFacade(scenario, train)
incumbent = smac.optimize()

print(incumbent)

loop = asyncio.get_event_loop()
loop.run_until_complete(train_coro(incumbent, verbose=True))
Loading

0 comments on commit 3b326f7

Please sign in to comment.