From b89edf5a62c24f8858289dffdd26860cb1036450 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jon=20Haitz=20Legarreta=20Gorro=C3=B1o?= Date: Sat, 7 Dec 2024 18:56:06 -0500 Subject: [PATCH 1/8] DOC: Fix typos in GPR model documentation Fix typos in GPR model documentation. --- src/eddymotion/model/gpr.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/eddymotion/model/gpr.py b/src/eddymotion/model/gpr.py index 6798d4bd..f738b9dd 100644 --- a/src/eddymotion/model/gpr.py +++ b/src/eddymotion/model/gpr.py @@ -80,10 +80,10 @@ class EddyMotionGPR(GaussianProcessRegressor): In principle, Scikit-Learn's implementation normalizes the training data as in [Andersson15]_ (see - `FSL's souce code `__). + `FSL's source code `__). From their paper (p. 167, end of first column): - Typically one just substracts the mean (:math:`\bar{\mathbf{f}}`) + Typically one just subtracts the mean (:math:`\bar{\mathbf{f}}`) from :math:`\mathbf{f}` and then add it back to :math:`f^{*}`, which is analogous to what is often done in "traditional" regression. From 57e2eb60800f9dd4644855ae0bcbfcc7b62b12be Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jon=20Haitz=20Legarreta=20Gorro=C3=B1o?= Date: Sat, 7 Dec 2024 18:56:58 -0500 Subject: [PATCH 2/8] DOC: Expand GP acronym when introduced for the first time in GPR model Expand GP acronym when introduced for the first time in GPR model module. --- src/eddymotion/model/gpr.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/eddymotion/model/gpr.py b/src/eddymotion/model/gpr.py index f738b9dd..16949cfd 100644 --- a/src/eddymotion/model/gpr.py +++ b/src/eddymotion/model/gpr.py @@ -64,7 +64,7 @@ class EddyMotionGPR(GaussianProcessRegressor): r""" - A GP regressor specialized for eddymotion. + A Gaussian process (GP) regressor specialized for eddymotion. This specialization of the default GP regressor is created to allow the following extended behaviors: From bb212ab737d01f441bb190b601d6974346d1b1c7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jon=20Haitz=20Legarreta=20Gorro=C3=B1o?= Date: Sat, 7 Dec 2024 18:58:07 -0500 Subject: [PATCH 3/8] DOC: Prefer impersonal style in `EddyMotionGPR` hpm optimization comment Prefer impersonal style in `EddyMotionGPR` hyperparameter optimization comment. --- src/eddymotion/model/gpr.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/eddymotion/model/gpr.py b/src/eddymotion/model/gpr.py index 16949cfd..34174b57 100644 --- a/src/eddymotion/model/gpr.py +++ b/src/eddymotion/model/gpr.py @@ -94,8 +94,8 @@ class EddyMotionGPR(GaussianProcessRegressor): through gradient-descent with analytical gradient calculations would not work (the derivative of the kernel w.r.t. alpha is zero). - I believe this is overlooked in [Andersson15]_, or they actually did not - use analytical gradient-descent: + This might have been overlooked in [Andersson15]_, or else they actually did + not use analytical gradient-descent: *A note on optimisation* From 34d1ff0e2021f76f359ac94c43202f381bf8d0ff Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jon=20Haitz=20Legarreta=20Gorro=C3=B1o?= Date: Sat, 7 Dec 2024 19:03:19 -0500 Subject: [PATCH 4/8] DOC: Do not introduce unnecessary line breaks within paragraph Do not introduce unnecessary line breaks within paragraph. Take advantage of the commit to use literal highlighting syntax for the `alpha` parameter for the sake of consistency. --- src/eddymotion/model/gpr.py | 20 +++++++++----------- 1 file changed, 9 insertions(+), 11 deletions(-) diff --git a/src/eddymotion/model/gpr.py b/src/eddymotion/model/gpr.py index 34174b57..b189dcbf 100644 --- a/src/eddymotion/model/gpr.py +++ b/src/eddymotion/model/gpr.py @@ -89,10 +89,9 @@ class EddyMotionGPR(GaussianProcessRegressor): "traditional" regression. Finally, the parameter :math:`\sigma^2` maps on to Scikit-learn's ``alpha`` - of the regressor. - Because it is not a parameter of the kernel, hyperparameter selection - through gradient-descent with analytical gradient calculations - would not work (the derivative of the kernel w.r.t. alpha is zero). + of the regressor. Because it is not a parameter of the kernel, hyperparameter + selection through gradient-descent with analytical gradient calculations + would not work (the derivative of the kernel w.r.t. ``alpha`` is zero). This might have been overlooked in [Andersson15]_, or else they actually did not use analytical gradient-descent: @@ -105,13 +104,12 @@ class EddyMotionGPR(GaussianProcessRegressor): The reason for that is that such methods typically use fewer steps, and when the cost of calculating the derivatives is small/moderate compared to calculating the functions itself (as is the case for Eq. (12)) then - execution time can be much shorter. - However, we found that for the multi-shell case a heuristic optimisation - method such as the Nelder-Mead simplex method (Nelder and Mead, 1965) was - frequently better at avoiding local maxima. - Hence, that was the method we used for all optimisations in the present - paper. - + execution time can be much shorter. However, we found that for the + multi-shell case a heuristic optimisation method such as the Nelder-Mead + simplex method (Nelder and Mead, 1965) was frequently better at avoiding + local maxima. Hence, that was the method we used for all optimisations + in the present paper. + **Multi-shell regression (TODO).** For multi-shell modeling, the kernel :math:`k(\textbf{x}, \textbf{x'})` is updated following Eq. (14) in [Andersson15]_. From 6b9a1fa3854a31721b703fb78fa9d1f6b2fffe58 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jon=20Haitz=20Legarreta=20Gorro=C3=B1o?= Date: Sat, 7 Dec 2024 19:06:33 -0500 Subject: [PATCH 5/8] DOC: Use literal highlighting syntax for `a` parameter Use literal highlighting syntax for the `a` parameter in `gpr` module `ExponentialKriging` initialization method. --- src/eddymotion/model/gpr.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/eddymotion/model/gpr.py b/src/eddymotion/model/gpr.py index b189dcbf..3888602d 100644 --- a/src/eddymotion/model/gpr.py +++ b/src/eddymotion/model/gpr.py @@ -271,7 +271,7 @@ def __init__( beta_l : :obj:`float`, optional The :math:`\lambda` hyperparameter. a_bounds : :obj:`tuple`, optional - Bounds for the a parameter. + Bounds for the ``a`` parameter. l_bounds : :obj:`tuple`, optional Bounds for the :math:`\lambda` hyperparameter. From a5ccc0fc6c1c7bd4927d0408494a85df61d88f3f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jon=20Haitz=20Legarreta=20Gorro=C3=B1o?= Date: Sat, 7 Dec 2024 19:09:22 -0500 Subject: [PATCH 6/8] DOC: Use `obj` Sphinx role to reference `NumPy` arrays consistently Use `obj` Sphinx role to reference `NumPy` arrays consistently in `gpr` module docstrings. --- src/eddymotion/model/gpr.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/src/eddymotion/model/gpr.py b/src/eddymotion/model/gpr.py index 3888602d..d225d1e6 100644 --- a/src/eddymotion/model/gpr.py +++ b/src/eddymotion/model/gpr.py @@ -308,10 +308,10 @@ def __call__( Returns ------- - K : ndarray of shape (n_samples_X, n_samples_Y) + K : :obj:`~numpy.ndarray` of shape (n_samples_X, n_samples_Y) Kernel k(X, Y) - K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims),\ + K_gradient : :obj:`~numpy.ndarray` of shape (n_samples_X, n_samples_X, n_dims),\ optional The gradient of the kernel k(X, X) with respect to the log of the hyperparameter of the kernel. Only returned when `eval_gradient` @@ -339,12 +339,12 @@ def diag(self, X: np.ndarray) -> np.ndarray: Parameters ---------- - X : ndarray of shape (n_samples_X, n_features) + X : :obj:`~numpy.ndarray` of shape (n_samples_X, n_features) Left argument of the returned kernel k(X, Y) Returns ------- - K_diag : ndarray of shape (n_samples_X,) + K_diag : :obj:`~numpy.ndarray` of shape (n_samples_X,) Diagonal of kernel k(X, X) """ return self.beta_l * np.ones(X.shape[0]) @@ -414,10 +414,10 @@ def __call__( Returns ------- - K : ndarray of shape (n_samples_X, n_samples_Y) + K : :obj:`~numpy.ndarray` of shape (n_samples_X, n_samples_Y) Kernel k(X, Y) - K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims),\ + K_gradient : :obj:`~numpy.ndarray` of shape (n_samples_X, n_samples_X, n_dims),\ optional The gradient of the kernel k(X, X) with respect to the log of the hyperparameter of the kernel. Only returned when ``eval_gradient`` @@ -450,12 +450,12 @@ def diag(self, X: np.ndarray) -> np.ndarray: Parameters ---------- - X : ndarray of shape (n_samples_X, n_features) + X : :obj:`~numpy.ndarray` of shape (n_samples_X, n_features) Left argument of the returned kernel k(X, Y) Returns ------- - K_diag : ndarray of shape (n_samples_X,) + K_diag : :obj:`~numpy.ndarray` of shape (n_samples_X,) Diagonal of kernel k(X, X) """ return self.beta_l * np.ones(X.shape[0]) From 5d10d90414618dbeabc30d2e22de55048515ea40 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jon=20Haitz=20Legarreta=20Gorro=C3=B1o?= Date: Sat, 7 Dec 2024 21:14:05 -0500 Subject: [PATCH 7/8] DOC: Add `__init__` documentation to class documentation Add `__init__` documentation to class documentation: - Add a function to the documentation configuration file that processes the signatures and adds the `__init__` method signature to the class signatures. - Make the class documentation contain the documentation from both the class and the `__ini__` method setting the `autoclass_content` option to `both`. Remove the short description of the `__init__` methods so that the class documentation reads seamlessly. --- docs/conf.py | 27 +++++++++++++++++++++++++++ src/eddymotion/model/gpr.py | 2 -- 2 files changed, 27 insertions(+), 2 deletions(-) diff --git a/docs/conf.py b/docs/conf.py index 6826697d..e9cd3542 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -234,6 +234,11 @@ apidoc_separate_modules = True apidoc_extra_args = ["--module-first", "-d 1", "-T"] + +# -- Options for autodoc extension ------------------------------------------- +autoclass_content = "both" + + # -- Options for intersphinx extension --------------------------------------- # Example configuration for intersphinx: refer to the Python standard library. @@ -253,3 +258,25 @@ # -- Options for versioning extension ---------------------------------------- scv_show_banner = True + + +# -- Special functions ------------------------------------------------------- +import inspect + + +def autodoc_process_signature(app, what, name, obj, options, signature, return_annotation): + """Replace the class signature by the signature from cls.__init__""" + + if what == "class" and hasattr(obj, "__init__"): + try: + init_signature = inspect.signature(obj.__init__) + # Convert the Signature object to a string + return str(init_signature), return_annotation + except ValueError: + # Handle cases where `inspect.signature` fails + return signature, return_annotation + return signature, return_annotation + + +def setup(app): + app.connect("autodoc-process-signature", autodoc_process_signature) diff --git a/src/eddymotion/model/gpr.py b/src/eddymotion/model/gpr.py index 6798d4bd..1d1c4da6 100644 --- a/src/eddymotion/model/gpr.py +++ b/src/eddymotion/model/gpr.py @@ -264,7 +264,6 @@ def __init__( l_bounds: tuple[float, float] = BOUNDS_LAMBDA, ): r""" - Initialize an exponential Kriging kernel. Parameters ---------- @@ -370,7 +369,6 @@ def __init__( l_bounds: tuple[float, float] = BOUNDS_LAMBDA, ): r""" - Initialize a spherical Kriging kernel. Parameters ---------- From e26943b5cdf3d1d61077edfe7bfd75c36cfad715 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jon=20Haitz=20Legarreta=20Gorro=C3=B1o?= Date: Wed, 27 Nov 2024 08:46:57 -0500 Subject: [PATCH 8/8] DOC: Fix cut passages from `README` in documentation index file Fix cut passages from `README` in documentation index file: add comments around the flowchart image in the `README` file and change the `start-line` and `end-line` directive pair for the `end-before` and `start-after` pair to skip the flowchart image only. The `start-line` and `end-line` directives were making such that relevant passages of the `README` file providing context for the tool were not being included. Thus the text shown in the documentation index file had some missing parts, and was not making sense. --- README.rst | 4 ++++ docs/index.rst | 5 ++--- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/README.rst b/README.rst index a90f0d37..56aad44c 100644 --- a/README.rst +++ b/README.rst @@ -47,9 +47,13 @@ More recently, Cieslak et al. [#r3]_ integrated both approaches in *SHORELine*, the work of ``eddy`` and *SHORELine*, while generalizing these methods to multiple acquisition schemes (single-shell, multi-shell, and diffusion spectrum imaging) using diffusion models available with DIPY [#r5]_. +.. BEGIN FLOWCHART + .. image:: https://raw.githubusercontent.com/nipreps/eddymotion/507fc9bab86696d5330fd6a86c3870968243aea8/docs/_static/eddymotion-flowchart.svg :alt: The eddymotion flowchart +.. END FLOWCHART + .. [#r1] S. Ben-Amitay et al., Motion correction and registration of high b-value diffusion weighted images, Magnetic Resonance in Medicine 67:1694–1702 (2012) .. [#r2] J. L. R. Andersson. et al., An integrated approach to correction for off-resonance effects and subject movement diff --git a/docs/index.rst b/docs/index.rst index 8f3de245..0467a8e4 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -1,9 +1,8 @@ .. include:: links.rst .. include:: ../README.rst - :end-line: 29 + :end-before: BEGIN FLOWCHART .. include:: ../README.rst - :start-line: 34 - + :start-after: END FLOWCHART .. image:: _static/eddymotion-flowchart.svg :alt: The eddymotion flowchart