From b0e624bf1d953a9eb1c652c910d26eb72ea8bd34 Mon Sep 17 00:00:00 2001 From: NiPreps Bot Date: Wed, 30 Oct 2024 02:09:04 +0000 Subject: [PATCH] docs(main): Update docs of development line --- main/.buildinfo | 2 +- main/_modules/eddymotion/cli/parser.html | 11 +- main/_modules/eddymotion/cli/run.html | 11 +- main/_modules/eddymotion/data/dmri.html | 11 +- main/_modules/eddymotion/data/filtering.html | 11 +- main/_modules/eddymotion/data/pet.html | 11 +- main/_modules/eddymotion/data/splitting.html | 11 +- main/_modules/eddymotion/data/utils.html | 11 +- main/_modules/eddymotion/estimator.html | 13 +- main/_modules/eddymotion/exceptions.html | 11 +- main/_modules/eddymotion/math/utils.html | 11 +- main/_modules/eddymotion/model/base.html | 11 +- main/_modules/eddymotion/model/dmri.html | 11 +- main/_modules/eddymotion/model/gpr.html | 955 ++++++++++++++++++ main/_modules/eddymotion/model/pet.html | 11 +- .../eddymotion/registration/ants.html | 11 +- .../eddymotion/registration/utils.html | 11 +- .../eddymotion/testing/simulations.html | 11 +- main/_modules/eddymotion/utils.html | 11 +- main/_modules/eddymotion/viz/signals.html | 11 +- main/_modules/index.html | 12 +- .../_sources/api/eddymotion.model.gpr.rst.txt | 7 + main/_sources/api/eddymotion.model.rst.txt | 1 + main/_static/documentation_options.js | 2 +- main/api/eddymotion.cli.html | 11 +- main/api/eddymotion.cli.parser.html | 11 +- main/api/eddymotion.cli.run.html | 11 +- main/api/eddymotion.data.dmri.html | 11 +- main/api/eddymotion.data.filtering.html | 11 +- main/api/eddymotion.data.html | 11 +- main/api/eddymotion.data.pet.html | 11 +- main/api/eddymotion.data.splitting.html | 11 +- main/api/eddymotion.data.utils.html | 11 +- main/api/eddymotion.estimator.html | 11 +- main/api/eddymotion.exceptions.html | 11 +- main/api/eddymotion.math.html | 11 +- main/api/eddymotion.math.utils.html | 11 +- main/api/eddymotion.model.base.html | 11 +- main/api/eddymotion.model.dmri.html | 17 +- main/api/eddymotion.model.gpr.html | 741 ++++++++++++++ main/api/eddymotion.model.html | 12 +- main/api/eddymotion.model.pet.html | 17 +- main/api/eddymotion.registration.ants.html | 11 +- main/api/eddymotion.registration.html | 11 +- main/api/eddymotion.registration.utils.html | 11 +- main/api/eddymotion.testing.html | 11 +- main/api/eddymotion.testing.simulations.html | 23 +- main/api/eddymotion.utils.html | 11 +- main/api/eddymotion.viz.html | 11 +- main/api/eddymotion.viz.signals.html | 13 +- main/changes.html | 11 +- main/developers.html | 39 +- main/genindex.html | 114 ++- main/index.html | 11 +- main/installation.html | 11 +- main/links.html | 11 +- main/objects.inv | Bin 2151 -> 2421 bytes main/py-modindex.html | 17 +- main/running.html | 11 +- main/search.html | 11 +- main/searchindex.js | 2 +- main/usage.html | 11 +- 62 files changed, 2171 insertions(+), 300 deletions(-) create mode 100644 main/_modules/eddymotion/model/gpr.html create mode 100644 main/_sources/api/eddymotion.model.gpr.rst.txt create mode 100644 main/api/eddymotion.model.gpr.html diff --git a/main/.buildinfo b/main/.buildinfo index 590a8b6f..c2f187f0 100644 --- a/main/.buildinfo +++ b/main/.buildinfo @@ -1,4 +1,4 @@ # Sphinx build info version 1 # This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done. -config: b653a8fbb9dbb05add28ed0cd449ad69 +config: 05dbe88b606d77b21e3f378bdfa07057 tags: 645f666f9bcd5a90fca523b33c5a78b7 diff --git a/main/_modules/eddymotion/cli/parser.html b/main/_modules/eddymotion/cli/parser.html index e3b5e8a1..a10edc26 100644 --- a/main/_modules/eddymotion/cli/parser.html +++ b/main/_modules/eddymotion/cli/parser.html @@ -5,7 +5,7 @@ - eddymotion.cli.parser - eddymotion 24.0.0.dev207 documentation + eddymotion.cli.parser - eddymotion 24.0.0.dev211 documentation @@ -166,7 +166,7 @@
-
eddymotion 24.0.0.dev207 documentation
+
eddymotion 24.0.0.dev211 documentation
@@ -189,14 +189,14 @@ diff --git a/main/_modules/eddymotion/cli/run.html b/main/_modules/eddymotion/cli/run.html index c4ec9ff1..d157ca65 100644 --- a/main/_modules/eddymotion/cli/run.html +++ b/main/_modules/eddymotion/cli/run.html @@ -5,7 +5,7 @@ - eddymotion.cli.run - eddymotion 24.0.0.dev207 documentation + eddymotion.cli.run - eddymotion 24.0.0.dev211 documentation @@ -166,7 +166,7 @@
@@ -189,14 +189,14 @@ diff --git a/main/_modules/eddymotion/data/dmri.html b/main/_modules/eddymotion/data/dmri.html index efe9bedf..833d4c63 100644 --- a/main/_modules/eddymotion/data/dmri.html +++ b/main/_modules/eddymotion/data/dmri.html @@ -5,7 +5,7 @@ - eddymotion.data.dmri - eddymotion 24.0.0.dev207 documentation + eddymotion.data.dmri - eddymotion 24.0.0.dev211 documentation @@ -166,7 +166,7 @@
@@ -189,14 +189,14 @@ diff --git a/main/_modules/eddymotion/data/filtering.html b/main/_modules/eddymotion/data/filtering.html index 8442738f..707745ca 100644 --- a/main/_modules/eddymotion/data/filtering.html +++ b/main/_modules/eddymotion/data/filtering.html @@ -5,7 +5,7 @@ - eddymotion.data.filtering - eddymotion 24.0.0.dev207 documentation + eddymotion.data.filtering - eddymotion 24.0.0.dev211 documentation @@ -166,7 +166,7 @@
@@ -189,14 +189,14 @@ diff --git a/main/_modules/eddymotion/data/pet.html b/main/_modules/eddymotion/data/pet.html index 22740745..cd323acd 100644 --- a/main/_modules/eddymotion/data/pet.html +++ b/main/_modules/eddymotion/data/pet.html @@ -5,7 +5,7 @@ - eddymotion.data.pet - eddymotion 24.0.0.dev207 documentation + eddymotion.data.pet - eddymotion 24.0.0.dev211 documentation @@ -166,7 +166,7 @@
@@ -189,14 +189,14 @@ diff --git a/main/_modules/eddymotion/data/splitting.html b/main/_modules/eddymotion/data/splitting.html index 8d438f8b..d77a448b 100644 --- a/main/_modules/eddymotion/data/splitting.html +++ b/main/_modules/eddymotion/data/splitting.html @@ -5,7 +5,7 @@ - eddymotion.data.splitting - eddymotion 24.0.0.dev207 documentation + eddymotion.data.splitting - eddymotion 24.0.0.dev211 documentation @@ -166,7 +166,7 @@
@@ -189,14 +189,14 @@ diff --git a/main/_modules/eddymotion/data/utils.html b/main/_modules/eddymotion/data/utils.html index b1c59c3f..f290927c 100644 --- a/main/_modules/eddymotion/data/utils.html +++ b/main/_modules/eddymotion/data/utils.html @@ -5,7 +5,7 @@ - eddymotion.data.utils - eddymotion 24.0.0.dev207 documentation + eddymotion.data.utils - eddymotion 24.0.0.dev211 documentation @@ -166,7 +166,7 @@
@@ -189,14 +189,14 @@ diff --git a/main/_modules/eddymotion/estimator.html b/main/_modules/eddymotion/estimator.html index 56a7d998..b2ac900b 100644 --- a/main/_modules/eddymotion/estimator.html +++ b/main/_modules/eddymotion/estimator.html @@ -5,7 +5,7 @@ - eddymotion.estimator - eddymotion 24.0.0.dev207 documentation + eddymotion.estimator - eddymotion 24.0.0.dev211 documentation @@ -166,7 +166,7 @@
@@ -189,14 +189,14 @@
@@ -189,14 +189,14 @@ diff --git a/main/_modules/eddymotion/math/utils.html b/main/_modules/eddymotion/math/utils.html index 49ff16d2..aafe20a4 100644 --- a/main/_modules/eddymotion/math/utils.html +++ b/main/_modules/eddymotion/math/utils.html @@ -5,7 +5,7 @@ - eddymotion.math.utils - eddymotion 24.0.0.dev207 documentation + eddymotion.math.utils - eddymotion 24.0.0.dev211 documentation @@ -166,7 +166,7 @@
@@ -189,14 +189,14 @@ diff --git a/main/_modules/eddymotion/model/base.html b/main/_modules/eddymotion/model/base.html index 3a3f294d..43fcf6a3 100644 --- a/main/_modules/eddymotion/model/base.html +++ b/main/_modules/eddymotion/model/base.html @@ -5,7 +5,7 @@ - eddymotion.model.base - eddymotion 24.0.0.dev207 documentation + eddymotion.model.base - eddymotion 24.0.0.dev211 documentation @@ -166,7 +166,7 @@
@@ -189,14 +189,14 @@ diff --git a/main/_modules/eddymotion/model/dmri.html b/main/_modules/eddymotion/model/dmri.html index 0c04b08c..3180dee9 100644 --- a/main/_modules/eddymotion/model/dmri.html +++ b/main/_modules/eddymotion/model/dmri.html @@ -5,7 +5,7 @@ - eddymotion.model.dmri - eddymotion 24.0.0.dev207 documentation + eddymotion.model.dmri - eddymotion 24.0.0.dev211 documentation @@ -166,7 +166,7 @@
@@ -189,14 +189,14 @@ diff --git a/main/_modules/eddymotion/model/gpr.html b/main/_modules/eddymotion/model/gpr.html new file mode 100644 index 00000000..e4049eeb --- /dev/null +++ b/main/_modules/eddymotion/model/gpr.html @@ -0,0 +1,955 @@ + + + + + + + + eddymotion.model.gpr - eddymotion 24.0.0.dev211 documentation + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark, in light mode + + + + + + + + + + + + + + + Auto light/dark, in dark mode + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +Skip to content + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+
+ +
+ +
+
+

Source code for eddymotion.model.gpr

+# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
+# vi: set ft=python sts=4 ts=4 sw=4 et:
+#
+# Copyright The NiPreps Developers <nipreps@gmail.com>
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# We support and encourage derived works from this project, please read
+# about our expectations at
+#
+#     https://www.nipreps.org/community/licensing/
+#
+"""Derivations from scikit-learn for Gaussian Processes."""
+
+from __future__ import annotations
+
+from numbers import Integral, Real
+from typing import Callable, Mapping, Sequence
+
+import numpy as np
+from scipy import optimize
+from scipy.optimize._minimize import Bounds
+from sklearn.gaussian_process import GaussianProcessRegressor
+from sklearn.gaussian_process.kernels import (
+    Hyperparameter,
+    Kernel,
+)
+from sklearn.metrics.pairwise import cosine_similarity
+from sklearn.utils._param_validation import Interval, StrOptions
+
+BOUNDS_A: tuple[float, float] = (0.1, 2.35)
+"""The limits for the parameter *a* (angular distance in rad)."""
+BOUNDS_LAMBDA: tuple[float, float] = (1e-3, 1000)
+"""The limits for the parameter λ (signal scaling factor)."""
+THETA_EPSILON: float = 1e-5
+"""Minimum nonzero angle."""
+LBFGS_CONFIGURABLE_OPTIONS = {"disp", "maxiter", "ftol", "gtol"}
+"""The set of extended options that can be set on the default BFGS."""
+CONFIGURABLE_OPTIONS: Mapping[str, set] = {
+    "Nelder-Mead": {"disp", "maxiter", "adaptive", "fatol"},
+    "CG": {"disp", "maxiter", "gtol"},
+}
+"""
+A mapping from optimizer names to the option set they allow.
+
+Add new optimizers to this list, including what options may be
+configured.
+"""
+NONGRADIENT_METHODS = {"Nelder-Mead"}
+"""A set of gradients that do not allow analytical gradients."""
+SUPPORTED_OPTIMIZERS = set(CONFIGURABLE_OPTIONS.keys()) | {"fmin_l_bfgs_b"}
+"""A set of supported optimizers (automatically created)."""
+
+
+
+[docs] +class EddyMotionGPR(GaussianProcessRegressor): + r""" + A GP regressor specialized for eddymotion. + + This specialization of the default GP regressor is created to allow + the following extended behaviors: + + * Pacify Scikit-learn's estimator parameter checker to allow optimizers + given by name (as a string) other than the default BFGS. + * Enable custom options of optimizers. + See :obj:`~scipy.optimize.minimize` for the available options. + Please note that only a few of them are currently supported. + + In the future, this specialization would be the right place for hyperparameter + optimization using cross-validation and such. + + In principle, Scikit-Learn's implementation normalizes the training data + as in [Andersson15]_ (see + `FSL's souce code <https://git.fmrib.ox.ac.uk/fsl/eddy/-/blob/2480dda293d4cec83014454db3a193b87921f6b0/DiffusionGP.cpp#L218>`__). + From their paper (p. 167, end of first column): + + Typically one just substracts the mean (:math:`\bar{\mathbf{f}}`) + from :math:`\mathbf{f}` and then add it back to + :math:`f^{*}`, which is analogous to what is often done in + "traditional" regression. + + Finally, the parameter :math:`\sigma^2` maps on to Scikit-learn's ``alpha`` + of the regressor. + Because it is not a parameter of the kernel, hyperparameter selection + through gradient-descent with analytical gradient calculations + would not work (the derivative of the kernel w.r.t. alpha is zero). + + I believe this is overlooked in [Andersson15]_, or they actually did not + use analytical gradient-descent: + + *A note on optimisation* + + It is suggested, for example in Rasmussen and Williams (2006), that + an optimisation method that uses derivative information should be + used when finding the hyperparameters that maximise Eq. (12). + The reason for that is that such methods typically use fewer steps, and + when the cost of calculating the derivatives is small/moderate compared + to calculating the functions itself (as is the case for Eq. (12)) then + execution time can be much shorter. + However, we found that for the multi-shell case a heuristic optimisation + method such as the Nelder-Mead simplex method (Nelder and Mead, 1965) was + frequently better at avoiding local maxima. + Hence, that was the method we used for all optimisations in the present + paper. + + **Multi-shell regression (TODO).** + For multi-shell modeling, the kernel :math:`k(\textbf{x}, \textbf{x'})` + is updated following Eq. (14) in [Andersson15]_. + + .. math:: + k(\textbf{x}, \textbf{x'}) = C_{\theta}(\mathbf{g}, \mathbf{g'}; a) C_{b}(|b - b'|; \ell) + + and :math:`C_{b}` is based the log of the b-values ratio, a measure of distance along the + b-direction, according to Eq. (15) given by: + + .. math:: + C_{b}(b, b'; \ell) = \exp\left( - \frac{(\log b - \log b')^2}{2 \ell^2} \right), + + :math:`b` and :math:`b'` being the b-values, and :math:`\mathbf{g}` and + :math:`\mathbf{g'}` the unit diffusion-encoding gradient unit vectors of the + shells; and :math:`{a, \ell}` some hyperparameters. + + The full GP regression kernel :math:`\mathbf{K}` is then updated for a 2-shell case as + follows (Eq. (16) in [Andersson15]_): + + .. math:: + \begin{equation} + \mathbf{K} = \left[ + \begin{matrix} + \lambda C_{\theta}(\theta (\mathbf{G}_{1}); a) + \sigma_{1}^{2} \mathbf{I} & + \lambda C_{\theta}(\theta (\mathbf{G}_{2}, \mathbf{G}_{1}); a) C_{b}(b_{2}, b_{1}; \ell) \\ + \lambda C_{\theta}(\theta (\mathbf{G}_{1}, \mathbf{G}_{2}); a) C_{b}(b_{1}, b_{2}; \ell) & + \lambda C_{\theta}(\theta (\mathbf{G}_{2}); a) + \sigma_{2}^{2} \mathbf{I} \\ + \end{matrix} + \right] + \end{equation} + + References + ---------- + .. [Andersson15] J. L. R. Andersson. et al., An integrated approach to + correction for off-resonance effects and subject movement in diffusion MR + imaging, NeuroImage 125 (2016) 1063-11078 + + """ + + _parameter_constraints: dict = { + "kernel": [None, Kernel], + "alpha": [Interval(Real, 0, None, closed="left"), np.ndarray], + "optimizer": [StrOptions(SUPPORTED_OPTIMIZERS), callable, None], + "n_restarts_optimizer": [Interval(Integral, 0, None, closed="left")], + "copy_X_train": ["boolean"], + "normalize_y": ["boolean"], + "n_targets": [Interval(Integral, 1, None, closed="left"), None], + "random_state": ["random_state"], + } + + def __init__( + self, + kernel: Kernel | None = None, + *, + alpha: float = 0.5, + optimizer: str | Callable | None = "fmin_l_bfgs_b", + n_restarts_optimizer: int = 0, + copy_X_train: bool = True, + normalize_y: bool = True, + n_targets: int | None = None, + random_state: int | None = None, + eval_gradient: bool = True, + tol: float | None = None, + disp: bool | int | None = None, + maxiter: int | None = None, + ftol: float | None = None, + gtol: float | None = None, + adaptive: bool | int | None = None, + fatol: float | None = None, + ): + super().__init__( + kernel, + alpha=alpha, + optimizer=optimizer, + n_restarts_optimizer=n_restarts_optimizer, + normalize_y=normalize_y, + copy_X_train=copy_X_train, + n_targets=n_targets, + random_state=random_state, + ) + + self.tol = tol + self.eval_gradient = eval_gradient if optimizer not in NONGRADIENT_METHODS else False + self.maxiter = maxiter + self.disp = disp + self.ftol = ftol + self.gtol = gtol + self.adaptive = adaptive + self.fatol = fatol + + def _constrained_optimization( + self, + obj_func: Callable, + initial_theta: np.ndarray, + bounds: Sequence[tuple[float, float]] | Bounds, + ) -> tuple[float, float]: + options = {} + if self.optimizer == "fmin_l_bfgs_b": + from sklearn.utils.optimize import _check_optimize_result + + for name in LBFGS_CONFIGURABLE_OPTIONS: + if (value := getattr(self, name, None)) is not None: + options[name] = value + + opt_res = optimize.minimize( + obj_func, + initial_theta, + method="L-BFGS-B", + bounds=bounds, + jac=self.eval_gradient, + options=options, + args=(self.eval_gradient,), + tol=self.tol, + ) + _check_optimize_result("lbfgs", opt_res) + return opt_res.x, opt_res.fun + + if isinstance(self.optimizer, str) and self.optimizer in CONFIGURABLE_OPTIONS: + for name in CONFIGURABLE_OPTIONS[self.optimizer]: + if (value := getattr(self, name, None)) is not None: + options[name] = value + + opt_res = optimize.minimize( + obj_func, + initial_theta, + method=self.optimizer, + bounds=bounds, + jac=self.eval_gradient, + options=options, + args=(self.eval_gradient,), + tol=self.tol, + ) + return opt_res.x, opt_res.fun + + if callable(self.optimizer): + return self.optimizer(obj_func, initial_theta, bounds=bounds) + + raise ValueError(f"Unknown optimizer {self.optimizer}.")
+ + + +
+[docs] +class ExponentialKriging(Kernel): + """A scikit-learn's kernel for DWI signals.""" + + def __init__( + self, + beta_a: float = 0.01, + beta_l: float = 2.0, + a_bounds: tuple[float, float] = BOUNDS_A, + l_bounds: tuple[float, float] = BOUNDS_LAMBDA, + ): + r""" + Initialize an exponential Kriging kernel. + + Parameters + ---------- + beta_a : :obj:`float`, optional + Minimum angle in rads. + beta_l : :obj:`float`, optional + The :math:`\lambda` hyperparameter. + a_bounds : :obj:`tuple`, optional + Bounds for the a parameter. + l_bounds : :obj:`tuple`, optional + Bounds for the :math:`\lambda` hyperparameter. + + """ + self.beta_a = beta_a + self.beta_l = beta_l + self.a_bounds = a_bounds + self.l_bounds = l_bounds + + @property + def hyperparameter_a(self) -> Hyperparameter: + return Hyperparameter("beta_a", "numeric", self.a_bounds) + + @property + def hyperparameter_beta_l(self) -> Hyperparameter: + return Hyperparameter("beta_l", "numeric", self.l_bounds) + + def __call__( + self, X: np.ndarray, Y: np.ndarray | None = None, eval_gradient: bool = False + ) -> np.ndarray | tuple[np.ndarray, np.ndarray]: + """ + Return the kernel K(X, Y) and optionally its gradient. + + Parameters + ---------- + X : :obj:`~numpy.ndarray` + Gradient table (X) + Y : :obj:`~numpy.ndarray`, optional + Gradient table (Y, optional) + eval_gradient : :obj:`bool`, optional + Determines whether the gradient with respect to the log of + the kernel hyperparameter is computed. + Only supported when Y is ``None``. + + Returns + ------- + K : ndarray of shape (n_samples_X, n_samples_Y) + Kernel k(X, Y) + + K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims),\ + optional + The gradient of the kernel k(X, X) with respect to the log of the + hyperparameter of the kernel. Only returned when `eval_gradient` + is True. + + """ + thetas = compute_pairwise_angles(X, Y) + C_theta = exponential_covariance(thetas, self.beta_a) + + if not eval_gradient: + return self.beta_l * C_theta + + K_gradient = np.zeros((*thetas.shape, 2)) + K_gradient[..., 0] = self.beta_l * C_theta * thetas / self.beta_a**2 # Derivative w.r.t. a + K_gradient[..., 1] = C_theta + + return self.beta_l * C_theta, K_gradient + +
+[docs] + def diag(self, X: np.ndarray) -> np.ndarray: + """Returns the diagonal of the kernel k(X, X). + + The result of this method is identical to np.diag(self(X)); however, + it can be evaluated more efficiently since only the diagonal is + evaluated. + + Parameters + ---------- + X : ndarray of shape (n_samples_X, n_features) + Left argument of the returned kernel k(X, Y) + + Returns + ------- + K_diag : ndarray of shape (n_samples_X,) + Diagonal of kernel k(X, X) + """ + return self.beta_l * np.ones(X.shape[0])
+ + +
+[docs] + def is_stationary(self) -> bool: + """Returns whether the kernel is stationary.""" + return True
+ + + def __repr__(self) -> str: + return f"ExponentialKriging (a={self.beta_a}, λ={self.beta_l})"
+ + + +
+[docs] +class SphericalKriging(Kernel): + """A scikit-learn's kernel for DWI signals.""" + + def __init__( + self, + beta_a: float = 1.38, + beta_l: float = 0.5, + a_bounds: tuple[float, float] = BOUNDS_A, + l_bounds: tuple[float, float] = BOUNDS_LAMBDA, + ): + r""" + Initialize a spherical Kriging kernel. + + Parameters + ---------- + beta_a : :obj:`float`, optional + Minimum angle in rads. + beta_l : :obj:`float`, optional + The :math:`\lambda` hyperparameter. + a_bounds : :obj:`tuple`, optional + Bounds for the ``a`` parameter. + l_bounds : :obj:`tuple`, optional + Bounds for the :math:`\lambda` hyperparameter. + + """ + self.beta_a = beta_a + self.beta_l = beta_l + self.a_bounds = a_bounds + self.l_bounds = l_bounds + + @property + def hyperparameter_a(self) -> Hyperparameter: + return Hyperparameter("beta_a", "numeric", self.a_bounds) + + @property + def hyperparameter_beta_l(self) -> Hyperparameter: + return Hyperparameter("beta_l", "numeric", self.l_bounds) + + def __call__( + self, X: np.ndarray, Y: np.ndarray | None = None, eval_gradient: bool = False + ) -> np.ndarray | tuple[np.ndarray, np.ndarray]: + """ + Return the kernel K(X, Y) and optionally its gradient. + + Parameters + ---------- + X : :obj:`~numpy.ndarray` + Gradient table (X) + Y : :obj:`~numpy.ndarray`, optional + Gradient table (Y, optional) + eval_gradient : :obj:`bool`, optional + Determines whether the gradient with respect to the log of + the kernel hyperparameter is computed. + Only supported when Y is ``None``. + + Returns + ------- + K : ndarray of shape (n_samples_X, n_samples_Y) + Kernel k(X, Y) + + K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims),\ + optional + The gradient of the kernel k(X, X) with respect to the log of the + hyperparameter of the kernel. Only returned when ``eval_gradient`` + is True. + + """ + thetas = compute_pairwise_angles(X, Y) + C_theta = spherical_covariance(thetas, self.beta_a) + + if not eval_gradient: + return self.beta_l * C_theta + + deriv_a = np.zeros_like(thetas) + nonzero = thetas <= self.beta_a + deriv_a[nonzero] = ( + 1.5 + * self.beta_l + * (thetas[nonzero] / self.beta_a**2 - thetas[nonzero] ** 3 / self.beta_a**4) + ) + K_gradient = np.dstack((deriv_a, C_theta)) + + return self.beta_l * C_theta, K_gradient + +
+[docs] + def diag(self, X: np.ndarray) -> np.ndarray: + """Returns the diagonal of the kernel k(X, X). + + The result of this method is identical to np.diag(self(X)); however, + it can be evaluated more efficiently since only the diagonal is + evaluated. + + Parameters + ---------- + X : ndarray of shape (n_samples_X, n_features) + Left argument of the returned kernel k(X, Y) + + Returns + ------- + K_diag : ndarray of shape (n_samples_X,) + Diagonal of kernel k(X, X) + """ + return self.beta_l * np.ones(X.shape[0])
+ + +
+[docs] + def is_stationary(self) -> bool: + """Returns whether the kernel is stationary.""" + return True
+ + + def __repr__(self) -> str: + return f"SphericalKriging (a={self.beta_a}, λ={self.beta_l})"
+ + + +
+[docs] +def exponential_covariance(theta: np.ndarray, a: float) -> np.ndarray: + r""" + Compute the exponential covariance for given distances and scale parameter. + + Implements :math:`C_{\theta}`, following Eq. (9) in [Andersson15]_: + + .. math:: + \begin{equation} + C(\theta) = e^{-\theta/a} \,\, \text{for} \, 0 \leq \theta \leq \pi, + \end{equation} + + :math:`\theta` being computed as: + + .. math:: + \theta(\mathbf{g}, \mathbf{g'}) = \arccos(|\langle \mathbf{g}, \mathbf{g'} \rangle|) + + Parameters + ---------- + theta : :obj:`~numpy.ndarray` + Array of distances between points. + a : :obj:`float` + Scale parameter that controls the range of the covariance function. + + Returns + ------- + :obj:`~numpy.ndarray` + Exponential covariance values for the input distances. + + """ + return np.exp(-theta / a)
+ + + +
+[docs] +def spherical_covariance(theta: np.ndarray, a: float) -> np.ndarray: + r""" + Compute the spherical covariance for given distances and scale parameter. + + Implements :math:`C_{\theta}`, following Eq. (10) in [Andersson15]_: + + .. math:: + \begin{equation} + C(\theta) = + \begin{cases} + 1 - \frac{3 \theta}{2 a} + \frac{\theta^3}{2 a^3} & \textnormal{if} \; \theta \leq a \\ + 0 & \textnormal{if} \; \theta > a + \end{cases} + \end{equation} + + :math:`\theta` being computed as: + + .. math:: + \theta(\mathbf{g}, \mathbf{g'}) = \arccos(|\langle \mathbf{g}, \mathbf{g'} \rangle|) + + Parameters + ---------- + theta : :obj:`~numpy.ndarray` + Array of distances between points. + a : :obj:`float` + Scale parameter that controls the range of the covariance function. + + Returns + ------- + :obj:`~numpy.ndarray` + Spherical covariance values for the input distances. + + """ + return np.where(theta <= a, 1 - 1.5 * theta / a + 0.5 * (theta**3) / (a**3), 0.0)
+ + + +
+[docs] +def compute_pairwise_angles( + X: np.ndarray, + Y: np.ndarray | None = None, + closest_polarity: bool = True, + dense_output: bool = True, +) -> np.ndarray: + r"""Compute pairwise angles across diffusion gradient encoding directions. + + Following [Andersson15]_, it computes the smallest of the angles between + each pair if ``closest_polarity`` is ``True``, i.e., + + .. math:: + + \theta(\mathbf{g}, \mathbf{g'}) = \arccos(|\langle \mathbf{g}, \mathbf{g'} \rangle|) + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples_X, n_features) + Input data. + Y : {array-like, sparse matrix} of shape (n_samples_Y, n_features), optional + Input data. If ``None``, the output will be the pairwise + similarities between all samples in ``X``. + dense_output : :obj:`bool`, optional + Whether to return dense output even when the input is sparse. If + ``False``, the output is sparse if both input arrays are sparse. + closest_polarity : :obj:`bool`, optional + ``True`` to consider the smallest of the two angles between the crossing + lines resulting from reversing each vector pair. + + Returns + ------- + :obj:`~numpy.ndarray` + Pairwise angles across diffusion gradient encoding directions. + + Examples + -------- + >>> X = np.asarray([(1.0, -1.0), (0.0, 0.0), (0.0, 0.0)]).T + >>> compute_pairwise_angles(X, closest_polarity=False)[0, 1] # doctest: +ELLIPSIS + 3.1415... + >>> X = np.asarray([(1.0, -1.0), (0.0, 0.0), (0.0, 0.0)]).T + >>> Y = np.asarray([(1.0, -1.0), (0.0, 0.0), (0.0, 0.0)]).T + >>> compute_pairwise_angles(X, Y, closest_polarity=False)[0, 1] # doctest: +ELLIPSIS + 3.1415... + >>> X = np.asarray([(1.0, -1.0), (0.0, 0.0), (0.0, 0.0)]).T + >>> compute_pairwise_angles(X)[0, 1] + 0.0 + + """ + + cosines = np.clip(cosine_similarity(X, Y, dense_output=dense_output), -1.0, 1.0) + thetas = np.arccos(np.abs(cosines)) if closest_polarity else np.arccos(cosines) + thetas[np.abs(thetas) < THETA_EPSILON] = 0.0 + return thetas
+ +
+
+
+
+ + +
+
+ + Made with Sphinx and @pradyunsg's + + Furo + +
+
+ +
+
+ +
+
+ +
+
+ + + + + + \ No newline at end of file diff --git a/main/_modules/eddymotion/model/pet.html b/main/_modules/eddymotion/model/pet.html index a7f8dbcb..2ccb7b1a 100644 --- a/main/_modules/eddymotion/model/pet.html +++ b/main/_modules/eddymotion/model/pet.html @@ -5,7 +5,7 @@ - eddymotion.model.pet - eddymotion 24.0.0.dev207 documentation + eddymotion.model.pet - eddymotion 24.0.0.dev211 documentation @@ -166,7 +166,7 @@
@@ -189,14 +189,14 @@ diff --git a/main/_modules/eddymotion/registration/ants.html b/main/_modules/eddymotion/registration/ants.html index 92907e49..546f0b57 100644 --- a/main/_modules/eddymotion/registration/ants.html +++ b/main/_modules/eddymotion/registration/ants.html @@ -5,7 +5,7 @@ - eddymotion.registration.ants - eddymotion 24.0.0.dev207 documentation + eddymotion.registration.ants - eddymotion 24.0.0.dev211 documentation @@ -166,7 +166,7 @@
@@ -189,14 +189,14 @@ diff --git a/main/_modules/eddymotion/registration/utils.html b/main/_modules/eddymotion/registration/utils.html index fbb6949f..a877cd4f 100644 --- a/main/_modules/eddymotion/registration/utils.html +++ b/main/_modules/eddymotion/registration/utils.html @@ -5,7 +5,7 @@ - eddymotion.registration.utils - eddymotion 24.0.0.dev207 documentation + eddymotion.registration.utils - eddymotion 24.0.0.dev211 documentation @@ -166,7 +166,7 @@
@@ -189,14 +189,14 @@ diff --git a/main/_modules/eddymotion/testing/simulations.html b/main/_modules/eddymotion/testing/simulations.html index 1c7de10d..3924ef8a 100644 --- a/main/_modules/eddymotion/testing/simulations.html +++ b/main/_modules/eddymotion/testing/simulations.html @@ -5,7 +5,7 @@ - eddymotion.testing.simulations - eddymotion 24.0.0.dev207 documentation + eddymotion.testing.simulations - eddymotion 24.0.0.dev211 documentation @@ -166,7 +166,7 @@
@@ -189,14 +189,14 @@ diff --git a/main/_modules/eddymotion/utils.html b/main/_modules/eddymotion/utils.html index 0616ccf0..db68ea54 100644 --- a/main/_modules/eddymotion/utils.html +++ b/main/_modules/eddymotion/utils.html @@ -5,7 +5,7 @@ - eddymotion.utils - eddymotion 24.0.0.dev207 documentation + eddymotion.utils - eddymotion 24.0.0.dev211 documentation @@ -166,7 +166,7 @@
@@ -189,14 +189,14 @@ diff --git a/main/_modules/eddymotion/viz/signals.html b/main/_modules/eddymotion/viz/signals.html index 5af5253e..bb7c8ed0 100644 --- a/main/_modules/eddymotion/viz/signals.html +++ b/main/_modules/eddymotion/viz/signals.html @@ -5,7 +5,7 @@ - eddymotion.viz.signals - eddymotion 24.0.0.dev207 documentation + eddymotion.viz.signals - eddymotion 24.0.0.dev211 documentation @@ -166,7 +166,7 @@
@@ -189,14 +189,14 @@ diff --git a/main/_modules/index.html b/main/_modules/index.html index 449639a2..a4b7c1a4 100644 --- a/main/_modules/index.html +++ b/main/_modules/index.html @@ -5,7 +5,7 @@ - Overview: module code - eddymotion 24.0.0.dev207 documentation + Overview: module code - eddymotion 24.0.0.dev211 documentation @@ -166,7 +166,7 @@
@@ -189,14 +189,14 @@ +
diff --git a/main/_sources/api/eddymotion.model.gpr.rst.txt b/main/_sources/api/eddymotion.model.gpr.rst.txt new file mode 100644 index 00000000..c1d92c2c --- /dev/null +++ b/main/_sources/api/eddymotion.model.gpr.rst.txt @@ -0,0 +1,7 @@ +eddymotion.model.gpr module +=========================== + +.. automodule:: eddymotion.model.gpr + :members: + :undoc-members: + :show-inheritance: diff --git a/main/_sources/api/eddymotion.model.rst.txt b/main/_sources/api/eddymotion.model.rst.txt index cc3c6fae..b759fb58 100644 --- a/main/_sources/api/eddymotion.model.rst.txt +++ b/main/_sources/api/eddymotion.model.rst.txt @@ -14,4 +14,5 @@ Submodules eddymotion.model.base eddymotion.model.dmri + eddymotion.model.gpr eddymotion.model.pet diff --git a/main/_static/documentation_options.js b/main/_static/documentation_options.js index 4ccc2535..df920f96 100644 --- a/main/_static/documentation_options.js +++ b/main/_static/documentation_options.js @@ -1,5 +1,5 @@ const DOCUMENTATION_OPTIONS = { - VERSION: '24.0.0.dev207', + VERSION: '24.0.0.dev211', LANGUAGE: 'en', COLLAPSE_INDEX: false, BUILDER: 'html', diff --git a/main/api/eddymotion.cli.html b/main/api/eddymotion.cli.html index d6fde614..30fec92c 100644 --- a/main/api/eddymotion.cli.html +++ b/main/api/eddymotion.cli.html @@ -6,7 +6,7 @@ - eddymotion.cli package - eddymotion 24.0.0.dev207 documentation + eddymotion.cli package - eddymotion 24.0.0.dev211 documentation @@ -167,7 +167,7 @@
@@ -190,14 +190,14 @@
@@ -190,14 +190,14 @@ +
diff --git a/main/api/eddymotion.cli.run.html b/main/api/eddymotion.cli.run.html index 9611c947..011266d7 100644 --- a/main/api/eddymotion.cli.run.html +++ b/main/api/eddymotion.cli.run.html @@ -6,7 +6,7 @@ - eddymotion.cli.run module - eddymotion 24.0.0.dev207 documentation + eddymotion.cli.run module - eddymotion 24.0.0.dev211 documentation @@ -167,7 +167,7 @@
@@ -190,14 +190,14 @@ +
diff --git a/main/api/eddymotion.data.dmri.html b/main/api/eddymotion.data.dmri.html index 04a544b3..53175faf 100644 --- a/main/api/eddymotion.data.dmri.html +++ b/main/api/eddymotion.data.dmri.html @@ -6,7 +6,7 @@ - eddymotion.data.dmri module - eddymotion 24.0.0.dev207 documentation + eddymotion.data.dmri module - eddymotion 24.0.0.dev211 documentation @@ -167,7 +167,7 @@
@@ -190,14 +190,14 @@ +
diff --git a/main/api/eddymotion.data.filtering.html b/main/api/eddymotion.data.filtering.html index 3d3bd740..fc34489a 100644 --- a/main/api/eddymotion.data.filtering.html +++ b/main/api/eddymotion.data.filtering.html @@ -6,7 +6,7 @@ - eddymotion.data.filtering module - eddymotion 24.0.0.dev207 documentation + eddymotion.data.filtering module - eddymotion 24.0.0.dev211 documentation @@ -167,7 +167,7 @@
@@ -190,14 +190,14 @@ +
diff --git a/main/api/eddymotion.data.html b/main/api/eddymotion.data.html index 924dcaaa..6d4499f3 100644 --- a/main/api/eddymotion.data.html +++ b/main/api/eddymotion.data.html @@ -6,7 +6,7 @@ - eddymotion.data package - eddymotion 24.0.0.dev207 documentation + eddymotion.data package - eddymotion 24.0.0.dev211 documentation @@ -167,7 +167,7 @@
@@ -190,14 +190,14 @@
@@ -190,14 +190,14 @@ +
diff --git a/main/api/eddymotion.data.splitting.html b/main/api/eddymotion.data.splitting.html index 57cb9ab9..3c64950c 100644 --- a/main/api/eddymotion.data.splitting.html +++ b/main/api/eddymotion.data.splitting.html @@ -6,7 +6,7 @@ - eddymotion.data.splitting module - eddymotion 24.0.0.dev207 documentation + eddymotion.data.splitting module - eddymotion 24.0.0.dev211 documentation @@ -167,7 +167,7 @@
@@ -190,14 +190,14 @@ +
diff --git a/main/api/eddymotion.data.utils.html b/main/api/eddymotion.data.utils.html index 5e84073a..506de1ff 100644 --- a/main/api/eddymotion.data.utils.html +++ b/main/api/eddymotion.data.utils.html @@ -6,7 +6,7 @@ - eddymotion.data.utils module - eddymotion 24.0.0.dev207 documentation + eddymotion.data.utils module - eddymotion 24.0.0.dev211 documentation @@ -167,7 +167,7 @@
@@ -190,14 +190,14 @@ +
diff --git a/main/api/eddymotion.estimator.html b/main/api/eddymotion.estimator.html index 50b71f59..2559ba4a 100644 --- a/main/api/eddymotion.estimator.html +++ b/main/api/eddymotion.estimator.html @@ -6,7 +6,7 @@ - eddymotion.estimator module - eddymotion 24.0.0.dev207 documentation + eddymotion.estimator module - eddymotion 24.0.0.dev211 documentation @@ -167,7 +167,7 @@
@@ -190,14 +190,14 @@ +
diff --git a/main/api/eddymotion.exceptions.html b/main/api/eddymotion.exceptions.html index b5a737d5..55899d5f 100644 --- a/main/api/eddymotion.exceptions.html +++ b/main/api/eddymotion.exceptions.html @@ -6,7 +6,7 @@ - eddymotion.exceptions module - eddymotion 24.0.0.dev207 documentation + eddymotion.exceptions module - eddymotion 24.0.0.dev211 documentation @@ -167,7 +167,7 @@
@@ -190,14 +190,14 @@ +
diff --git a/main/api/eddymotion.math.html b/main/api/eddymotion.math.html index 0f806739..872c6792 100644 --- a/main/api/eddymotion.math.html +++ b/main/api/eddymotion.math.html @@ -6,7 +6,7 @@ - eddymotion.math package - eddymotion 24.0.0.dev207 documentation + eddymotion.math package - eddymotion 24.0.0.dev211 documentation @@ -167,7 +167,7 @@
@@ -190,14 +190,14 @@
@@ -190,14 +190,14 @@ +
diff --git a/main/api/eddymotion.model.base.html b/main/api/eddymotion.model.base.html index b96dffbd..8a198835 100644 --- a/main/api/eddymotion.model.base.html +++ b/main/api/eddymotion.model.base.html @@ -6,7 +6,7 @@ - eddymotion.model.base module - eddymotion 24.0.0.dev207 documentation + eddymotion.model.base module - eddymotion 24.0.0.dev211 documentation @@ -167,7 +167,7 @@
@@ -190,14 +190,14 @@ +
diff --git a/main/api/eddymotion.model.dmri.html b/main/api/eddymotion.model.dmri.html index 357fccec..c44bfe4a 100644 --- a/main/api/eddymotion.model.dmri.html +++ b/main/api/eddymotion.model.dmri.html @@ -3,10 +3,10 @@ - + - eddymotion.model.dmri module - eddymotion 24.0.0.dev207 documentation + eddymotion.model.dmri module - eddymotion 24.0.0.dev211 documentation @@ -167,7 +167,7 @@
@@ -190,14 +190,14 @@ diff --git a/main/api/eddymotion.model.gpr.html b/main/api/eddymotion.model.gpr.html new file mode 100644 index 00000000..fa392792 --- /dev/null +++ b/main/api/eddymotion.model.gpr.html @@ -0,0 +1,741 @@ + + + + + + + + + eddymotion.model.gpr module - eddymotion 24.0.0.dev211 documentation + + + + + + + + + + + + + + + + + Contents + + + + + + Menu + + + + + + + + Expand + + + + + + Light mode + + + + + + + + + + + + + + Dark mode + + + + + + + Auto light/dark, in light mode + + + + + + + + + + + + + + + Auto light/dark, in dark mode + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +Skip to content + + + +
+
+
+ +
+ +
+
+ +
+ +
+
+ +
+
+
+ + + + + Back to top + +
+ +
+ +
+ +
+
+
+

eddymotion.model.gpr module

+

Derivations from scikit-learn for Gaussian Processes.

+
+
+eddymotion.model.gpr.BOUNDS_A: tuple[float, float] = (0.1, 2.35)
+

The limits for the parameter a (angular distance in rad).

+
+ +
+
+eddymotion.model.gpr.BOUNDS_LAMBDA: tuple[float, float] = (0.001, 1000)
+

The limits for the parameter λ (signal scaling factor).

+
+ +
+
+eddymotion.model.gpr.CONFIGURABLE_OPTIONS: Mapping[str, set] = {'CG': {'disp', 'gtol', 'maxiter'}, 'Nelder-Mead': {'adaptive', 'disp', 'fatol', 'maxiter'}}
+

A mapping from optimizer names to the option set they allow.

+

Add new optimizers to this list, including what options may be +configured.

+
+ +
+
+class eddymotion.model.gpr.EddyMotionGPR(*args: Any, **kwargs: Any)[source]
+

Bases: GaussianProcessRegressor

+

A GP regressor specialized for eddymotion.

+

This specialization of the default GP regressor is created to allow +the following extended behaviors:

+
    +
  • Pacify Scikit-learn’s estimator parameter checker to allow optimizers +given by name (as a string) other than the default BFGS.

  • +
  • Enable custom options of optimizers. +See minimize for the available options. +Please note that only a few of them are currently supported.

  • +
+

In the future, this specialization would be the right place for hyperparameter +optimization using cross-validation and such.

+

In principle, Scikit-Learn’s implementation normalizes the training data +as in [Andersson15] (see +FSL’s souce code). +From their paper (p. 167, end of first column):

+
+

Typically one just substracts the mean (\(\bar{\mathbf{f}}\)) +from \(\mathbf{f}\) and then add it back to +\(f^{*}\), which is analogous to what is often done in +“traditional” regression.

+
+

Finally, the parameter \(\sigma^2\) maps on to Scikit-learn’s alpha +of the regressor. +Because it is not a parameter of the kernel, hyperparameter selection +through gradient-descent with analytical gradient calculations +would not work (the derivative of the kernel w.r.t. alpha is zero).

+

I believe this is overlooked in [Andersson15], or they actually did not +use analytical gradient-descent:

+
+

A note on optimisation

+

It is suggested, for example in Rasmussen and Williams (2006), that +an optimisation method that uses derivative information should be +used when finding the hyperparameters that maximise Eq. (12). +The reason for that is that such methods typically use fewer steps, and +when the cost of calculating the derivatives is small/moderate compared +to calculating the functions itself (as is the case for Eq. (12)) then +execution time can be much shorter. +However, we found that for the multi-shell case a heuristic optimisation +method such as the Nelder-Mead simplex method (Nelder and Mead, 1965) was +frequently better at avoiding local maxima. +Hence, that was the method we used for all optimisations in the present +paper.

+
+

Multi-shell regression (TODO). +For multi-shell modeling, the kernel \(k(\textbf{x}, \textbf{x'})\) +is updated following Eq. (14) in [Andersson15].

+
+
+\[k(\textbf{x}, \textbf{x'}) = C_{\theta}(\mathbf{g}, \mathbf{g'}; a) C_{b}(|b - b'|; \ell)\]
+
+

and \(C_{b}\) is based the log of the b-values ratio, a measure of distance along the +b-direction, according to Eq. (15) given by:

+
+
+\[C_{b}(b, b'; \ell) = \exp\left( - \frac{(\log b - \log b')^2}{2 \ell^2} \right),\]
+
+

\(b\) and \(b'\) being the b-values, and \(\mathbf{g}\) and +\(\mathbf{g'}\) the unit diffusion-encoding gradient unit vectors of the +shells; and \({a, \ell}\) some hyperparameters.

+

The full GP regression kernel \(\mathbf{K}\) is then updated for a 2-shell case as +follows (Eq. (16) in [Andersson15]):

+
+
+\[\begin{split}\begin{equation} +\mathbf{K} = \left[ +\begin{matrix} + \lambda C_{\theta}(\theta (\mathbf{G}_{1}); a) + \sigma_{1}^{2} \mathbf{I} & + \lambda C_{\theta}(\theta (\mathbf{G}_{2}, \mathbf{G}_{1}); a) C_{b}(b_{2}, b_{1}; \ell) \\ + \lambda C_{\theta}(\theta (\mathbf{G}_{1}, \mathbf{G}_{2}); a) C_{b}(b_{1}, b_{2}; \ell) & + \lambda C_{\theta}(\theta (\mathbf{G}_{2}); a) + \sigma_{2}^{2} \mathbf{I} \\ +\end{matrix} +\right] +\end{equation}\end{split}\]
+
+

References

+
+
+[Andersson15] +(1,2,3,4,5,6,7) +

J. L. R. Andersson. et al., An integrated approach to +correction for off-resonance effects and subject movement in diffusion MR +imaging, NeuroImage 125 (2016) 1063-11078

+
+
+
+ +
+
+class eddymotion.model.gpr.ExponentialKriging(*args: Any, **kwargs: Any)[source]
+

Bases: Kernel

+

A scikit-learn’s kernel for DWI signals.

+
+
+diag(X: numpy.ndarray) numpy.ndarray[source]
+

Returns the diagonal of the kernel k(X, X).

+

The result of this method is identical to np.diag(self(X)); however, +it can be evaluated more efficiently since only the diagonal is +evaluated.

+
+
Parameters:
+

X (ndarray of shape (n_samples_X, n_features)) – Left argument of the returned kernel k(X, Y)

+
+
Returns:
+

K_diag – Diagonal of kernel k(X, X)

+
+
Return type:
+

ndarray of shape (n_samples_X,)

+
+
+
+ +
+
+property hyperparameter_a: sklearn.gaussian_process.kernels.Hyperparameter
+
+ +
+
+property hyperparameter_beta_l: sklearn.gaussian_process.kernels.Hyperparameter
+
+ +
+
+is_stationary() bool[source]
+

Returns whether the kernel is stationary.

+
+ +
+ +
+
+eddymotion.model.gpr.LBFGS_CONFIGURABLE_OPTIONS = {'disp', 'ftol', 'gtol', 'maxiter'}
+

The set of extended options that can be set on the default BFGS.

+
+ +
+
+eddymotion.model.gpr.NONGRADIENT_METHODS = {'Nelder-Mead'}
+

A set of gradients that do not allow analytical gradients.

+
+ +
+
+eddymotion.model.gpr.SUPPORTED_OPTIMIZERS = {'CG', 'Nelder-Mead', 'fmin_l_bfgs_b'}
+

A set of supported optimizers (automatically created).

+
+ +
+
+class eddymotion.model.gpr.SphericalKriging(*args: Any, **kwargs: Any)[source]
+

Bases: Kernel

+

A scikit-learn’s kernel for DWI signals.

+
+
+diag(X: numpy.ndarray) numpy.ndarray[source]
+

Returns the diagonal of the kernel k(X, X).

+

The result of this method is identical to np.diag(self(X)); however, +it can be evaluated more efficiently since only the diagonal is +evaluated.

+
+
Parameters:
+

X (ndarray of shape (n_samples_X, n_features)) – Left argument of the returned kernel k(X, Y)

+
+
Returns:
+

K_diag – Diagonal of kernel k(X, X)

+
+
Return type:
+

ndarray of shape (n_samples_X,)

+
+
+
+ +
+
+property hyperparameter_a: sklearn.gaussian_process.kernels.Hyperparameter
+
+ +
+
+property hyperparameter_beta_l: sklearn.gaussian_process.kernels.Hyperparameter
+
+ +
+
+is_stationary() bool[source]
+

Returns whether the kernel is stationary.

+
+ +
+ +
+
+eddymotion.model.gpr.THETA_EPSILON: float = 1e-05
+

Minimum nonzero angle.

+
+ +
+
+eddymotion.model.gpr.compute_pairwise_angles(X: np.ndarray, Y: np.ndarray | None = None, closest_polarity: bool = True, dense_output: bool = True) np.ndarray[source]
+

Compute pairwise angles across diffusion gradient encoding directions.

+

Following [Andersson15], it computes the smallest of the angles between +each pair if closest_polarity is True, i.e.,

+
+
+\[\theta(\mathbf{g}, \mathbf{g'}) = \arccos(|\langle \mathbf{g}, \mathbf{g'} \rangle|)\]
+
+
+
Parameters:
+
    +
  • X ({array-like, sparse matrix} of shape (n_samples_X, n_features)) – Input data.

  • +
  • Y ({array-like, sparse matrix} of shape (n_samples_Y, n_features), optional) – Input data. If None, the output will be the pairwise +similarities between all samples in X.

  • +
  • dense_output (bool, optional) – Whether to return dense output even when the input is sparse. If +False, the output is sparse if both input arrays are sparse.

  • +
  • closest_polarity (bool, optional) –

    +
    +
    True to consider the smallest of the two angles between the crossing

    lines resulting from reversing each vector pair.

    +
    +
    +
  • +
+
+
Returns:
+

Pairwise angles across diffusion gradient encoding directions.

+
+
Return type:
+

ndarray

+
+
+

Examples

+
>>> X = np.asarray([(1.0, -1.0), (0.0, 0.0), (0.0, 0.0)]).T
+>>> compute_pairwise_angles(X, closest_polarity=False)[0, 1]  
+3.1415...
+>>> X = np.asarray([(1.0, -1.0), (0.0, 0.0), (0.0, 0.0)]).T
+>>> Y = np.asarray([(1.0, -1.0), (0.0, 0.0), (0.0, 0.0)]).T
+>>> compute_pairwise_angles(X, Y, closest_polarity=False)[0, 1]  
+3.1415...
+>>> X = np.asarray([(1.0, -1.0), (0.0, 0.0), (0.0, 0.0)]).T
+>>> compute_pairwise_angles(X)[0, 1]
+0.0
+
+
+
+ +
+
+eddymotion.model.gpr.exponential_covariance(theta: numpy.ndarray, a: float) numpy.ndarray[source]
+

Compute the exponential covariance for given distances and scale parameter.

+

Implements \(C_{\theta}\), following Eq. (9) in [Andersson15]:

+
+
+\[\begin{equation} +C(\theta) = e^{-\theta/a} \,\, \text{for} \, 0 \leq \theta \leq \pi, +\end{equation}\]
+
+

\(\theta\) being computed as:

+
+
+\[\theta(\mathbf{g}, \mathbf{g'}) = \arccos(|\langle \mathbf{g}, \mathbf{g'} \rangle|)\]
+
+
+
Parameters:
+
    +
  • theta (ndarray) – Array of distances between points.

  • +
  • a (float) – Scale parameter that controls the range of the covariance function.

  • +
+
+
Returns:
+

Exponential covariance values for the input distances.

+
+
Return type:
+

ndarray

+
+
+
+ +
+
+eddymotion.model.gpr.spherical_covariance(theta: numpy.ndarray, a: float) numpy.ndarray[source]
+

Compute the spherical covariance for given distances and scale parameter.

+

Implements \(C_{\theta}\), following Eq. (10) in [Andersson15]:

+
+
+\[\begin{split}\begin{equation} +C(\theta) = +\begin{cases} +1 - \frac{3 \theta}{2 a} + \frac{\theta^3}{2 a^3} & \textnormal{if} \; \theta \leq a \\ +0 & \textnormal{if} \; \theta > a +\end{cases} +\end{equation}\end{split}\]
+
+

\(\theta\) being computed as:

+
+
+\[\theta(\mathbf{g}, \mathbf{g'}) = \arccos(|\langle \mathbf{g}, \mathbf{g'} \rangle|)\]
+
+
+
Parameters:
+
    +
  • theta (ndarray) – Array of distances between points.

  • +
  • a (float) – Scale parameter that controls the range of the covariance function.

  • +
+
+
Returns:
+

Spherical covariance values for the input distances.

+
+
Return type:
+

ndarray

+
+
+
+ +
+ +
+
+ +
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/main/api/eddymotion.model.html b/main/api/eddymotion.model.html index f917baef..f3d23b59 100644 --- a/main/api/eddymotion.model.html +++ b/main/api/eddymotion.model.html @@ -6,7 +6,7 @@ - eddymotion.model package - eddymotion 24.0.0.dev207 documentation + eddymotion.model package - eddymotion 24.0.0.dev211 documentation @@ -167,7 +167,7 @@
@@ -190,14 +190,14 @@ -
+
diff --git a/main/api/eddymotion.registration.ants.html b/main/api/eddymotion.registration.ants.html index 21de1877..652d6f85 100644 --- a/main/api/eddymotion.registration.ants.html +++ b/main/api/eddymotion.registration.ants.html @@ -6,7 +6,7 @@ - eddymotion.registration.ants module - eddymotion 24.0.0.dev207 documentation + eddymotion.registration.ants module - eddymotion 24.0.0.dev211 documentation @@ -167,7 +167,7 @@
@@ -190,14 +190,14 @@ +
diff --git a/main/api/eddymotion.registration.html b/main/api/eddymotion.registration.html index a53f5f75..0634004e 100644 --- a/main/api/eddymotion.registration.html +++ b/main/api/eddymotion.registration.html @@ -6,7 +6,7 @@ - eddymotion.registration package - eddymotion 24.0.0.dev207 documentation + eddymotion.registration package - eddymotion 24.0.0.dev211 documentation @@ -167,7 +167,7 @@
@@ -190,14 +190,14 @@
@@ -190,14 +190,14 @@ +
diff --git a/main/api/eddymotion.testing.html b/main/api/eddymotion.testing.html index 397bbf23..d62076ab 100644 --- a/main/api/eddymotion.testing.html +++ b/main/api/eddymotion.testing.html @@ -6,7 +6,7 @@ - eddymotion.testing package - eddymotion 24.0.0.dev207 documentation + eddymotion.testing package - eddymotion 24.0.0.dev211 documentation @@ -167,7 +167,7 @@
@@ -190,14 +190,14 @@
@@ -190,14 +190,14 @@ +
diff --git a/main/api/eddymotion.utils.html b/main/api/eddymotion.utils.html index 220dd7cf..3107227c 100644 --- a/main/api/eddymotion.utils.html +++ b/main/api/eddymotion.utils.html @@ -6,7 +6,7 @@ - eddymotion.utils module - eddymotion 24.0.0.dev207 documentation + eddymotion.utils module - eddymotion 24.0.0.dev211 documentation @@ -167,7 +167,7 @@
@@ -190,14 +190,14 @@ +
diff --git a/main/api/eddymotion.viz.html b/main/api/eddymotion.viz.html index f1617ec3..a029c625 100644 --- a/main/api/eddymotion.viz.html +++ b/main/api/eddymotion.viz.html @@ -6,7 +6,7 @@ - eddymotion.viz package - eddymotion 24.0.0.dev207 documentation + eddymotion.viz package - eddymotion 24.0.0.dev211 documentation @@ -167,7 +167,7 @@
@@ -190,14 +190,14 @@
@@ -190,14 +190,14 @@ +
diff --git a/main/changes.html b/main/changes.html index 3ccb4a04..f0d447aa 100644 --- a/main/changes.html +++ b/main/changes.html @@ -6,7 +6,7 @@ - What’s new? - eddymotion 24.0.0.dev207 documentation + What’s new? - eddymotion 24.0.0.dev211 documentation @@ -167,7 +167,7 @@
@@ -190,14 +190,14 @@
@@ -190,14 +190,14 @@
@@ -188,14 +188,14 @@