From 585f71acb25995bf7ea69cc5bd593399bb197cbb Mon Sep 17 00:00:00 2001 From: Ronny Bergmann Date: Tue, 20 Aug 2024 14:52:57 +0200 Subject: [PATCH] First base for a glossary. --- ext/ManoptLRUCacheExt.jl | 2 +- src/Manopt.jl | 2 + src/{plans => }/documentation_glossary.jl | 108 ++++++++++-------- src/helpers/checks.jl | 4 +- src/plans/augmented_lagrangian_plan.jl | 6 +- src/plans/cache.jl | 2 +- src/plans/conjugate_residual_plan.jl | 26 ++--- src/plans/gradient_plan.jl | 34 +++--- src/plans/hessian_plan.jl | 2 +- src/plans/higher_order_primal_dual_plan.jl | 8 +- src/plans/interior_point_Newton_plan.jl | 2 +- src/plans/manifold_default_factory.jl | 2 +- src/plans/nonlinear_least_squares_plan.jl | 4 +- src/plans/plan.jl | 1 - src/plans/stepsize.jl | 2 +- src/solvers/ChambollePock.jl | 22 ++-- src/solvers/DouglasRachford.jl | 4 +- src/solvers/LevenbergMarquardt.jl | 4 +- src/solvers/NelderMead.jl | 6 +- src/solvers/cma_es.jl | 6 +- src/solvers/conjugate_residual.jl | 18 +-- src/solvers/convex_bundle_method.jl | 4 +- src/solvers/cyclic_proximal_point.jl | 2 +- .../difference-of-convex-proximal-point.jl | 2 +- src/solvers/difference_of_convex_algorithm.jl | 6 +- src/solvers/exact_penalty_method.jl | 2 +- src/solvers/interior_point_Newton.jl | 16 +-- src/solvers/primal_dual_semismooth_Newton.jl | 4 +- src/solvers/proximal_bundle_method.jl | 2 +- src/solvers/quasi_Newton.jl | 4 +- src/solvers/stochastic_gradient_descent.jl | 4 +- src/solvers/subgradient.jl | 2 +- .../truncated_conjugate_gradient_descent.jl | 8 +- 33 files changed, 166 insertions(+), 155 deletions(-) rename src/{plans => }/documentation_glossary.jl (86%) diff --git a/ext/ManoptLRUCacheExt.jl b/ext/ManoptLRUCacheExt.jl index 6e2274f3cb..f416f50f42 100644 --- a/ext/ManoptLRUCacheExt.jl +++ b/ext/ManoptLRUCacheExt.jl @@ -27,7 +27,7 @@ Given a vector of symbols `caches`, this function sets up the # Keyword arguments -* `p=`$(Manopt._link_rand()): a point on a manifold, to both infer its type for keys and initialize caches +* `p=`$(Manopt._link(:rand)): a point on a manifold, to both infer its type for keys and initialize caches * `value=0.0`: a value both typing and initialising number-caches, the default is for (Float) values like the cost. * `X=zero_vector(M, p)`: diff --git a/src/Manopt.jl b/src/Manopt.jl index 9b3b724141..ad8f4d7af1 100644 --- a/src/Manopt.jl +++ b/src/Manopt.jl @@ -149,6 +149,8 @@ using Requires using SparseArrays using Statistics +include("documentation_glossary.jl") + """ Rn(args; kwargs...) Rn(s::Symbol=:Manifolds, args; kwargs...) diff --git a/src/plans/documentation_glossary.jl b/src/documentation_glossary.jl similarity index 86% rename from src/plans/documentation_glossary.jl rename to src/documentation_glossary.jl index a5c7e0bf98..e2515bae31 100644 --- a/src/plans/documentation_glossary.jl +++ b/src/documentation_glossary.jl @@ -28,6 +28,7 @@ if that entrs is * a function, it is called with `args...` and `kwargs...` passed * a dictionary, then the arguments and keyword arguments are passed to this dictionary, assuming `args[1]` is a symbol """ +#do not document for now, until we have an internals section glossary(s::Symbol, args...; kwargs...) = glossary(_manopt_glossary, s, args...; kwargs...) function glossary(g::_MANOPT_DOC_TYPE, s::Symbol, args...; kwargs...) return glossary(g[s], args...; kwargs...) @@ -58,6 +59,9 @@ _tex(args...; kwargs...) = glossary(:LaTeX, args...; kwargs...) # Mathematics and semantic symbols # :symbol the symbol, # :descr the description +define!(:Math, :M, _tex(:Cal, "M")) +define!(:Math, :Manifold, :symbol, _tex(:Cal, "M")) +define!(:Math, :Manifold, :descrption, "the Riemannian manifold") define!( :Math, :vector_transport, :symbol, (a="⋅", b="⋅") -> raw"\mathcal T_{" * "$a←$b" * "}" ) @@ -67,7 +71,7 @@ _math(args...; kwargs...) = glossary(:Math, args...; kwargs...) # Links # Collect short forms for links, especially Interdocs ones. _manopt_glossary[:Link] = _MANOPT_DOC_TYPE() -_link(args, kwargs...) = glossary(:Link, args...; kwargs...) +_link(args...; kwargs...) = glossary(:Link, args...; kwargs...) define!(:Link, :Manopt, "[`Manopt.jl`](https://manoptjl.org)") define!( :Link, @@ -75,7 +79,18 @@ define!( (; M="M") -> "[`rand`](@extref Base.rand-Tuple{AbstractManifold})$(length(M) > 0 ? "`($M)`" : "")", ) - +define!( + :Link, + :zero_vector, + (; M="M", p="p") -> + "[`zero_vector`](@extref `ManifoldsBase.zero_vector-Tuple{AbstractManifold, Any}`)$(length(M) > 0 ? "`($M, $p)`" : "")", +) +define!( + :Link, + :manifold_dimension, + (; M="M") -> + "[`manifold_dimension`](@extref `ManifoldsBase.manifold_dimension-Tuple{AbstractManifold}`)$(length(M) > 0 ? "`($M)`" : "")", +) # --- # Variables # in fields, keyword arguments, parameters @@ -113,39 +128,51 @@ define!( ) define!(:Variable, :p, :type, "P") define!(:Variable, :p, :default, (; M="M") -> _link(:rand; M=M)) -#= Old ones -_var[:vector_transport_method] = Dict( - :description => - (; M="M", p="p") -> - "a vector transport ``$(_math[:vector_transport][:symbol]())`` to use, see [the section on vector transports](@extref ManifoldsBase :doc:`vector_transports`)", - :type => "AbstractVectorTransportMethod", - :default => - (; M="M", p="p") -> - "[`default_vector_transport_method`](@extref `ManifoldsBase.default_vector_transport_method-Tuple{AbstractManifold}`)`($M, typeof($p))`", + +define!( + :Variable, + :vector_transport_method, + :description, + (; M="M", p="p") -> + "a vector transport ``$(_math(:vector_transport, :symbol))`` to use, see [the section on vector transports](@extref ManifoldsBase :doc:`vector_transports`)", +) +define!(:Variable, :vector_transport_method, :type, "AbstractVectorTransportMethodP") +define!( + :Variable, + :vector_transport_method, + :default, + (; M="M", p="p") -> + "[`default_vector_transport_method`](@extref `ManifoldsBase.default_vector_transport_method-Tuple{AbstractManifold}`)`($M, typeof($p))`", ) -_var[:X] = Dict( - :description => - (; M="M", p="p") -> - "a tangent bector at the point `$p` on the manifold ``$(_l[:Cal]("M"))``", - :type => "T", - :default => (; M="M", p="p") -> "`zero_vector($M,$p)`", # TODO Fix when the Links dictionary exists -) =# -# --- -# Problems -# --- -# Notes -_manopt_glossary[:Note] = _MANOPT_DOC_TYPE() -_note = _manopt_glossary[:Note] -_note[:ManifoldDefaultFactory] = - (type::String,) -> """ +define!( + :Variable, + :X, + :description, + (; M="M", p="p") -> + "a tangent bector at the point ``$p`` on the manifold ``$(_tex(:Cal, M))``", +) +define!(:Variable, :X, :type, "X") +define!(:Variable, :X, :default, (; M="M", p="p") -> _link(:zero_vector; M=M, p=p)) + +# +# +# Notes / Remarks +_note(args...; kwargs...) = glossary(:Note, args...; kwargs...) + +define!( + :Note, + :ManifoldDefaultFactory, + (type::String) -> """ !!! info This function generates a [`ManifoldDefaultsFactory`](@ref) for [`$(type)`]()@ref). If you do not provide a manifold, the manifold `M` later provided to (usually) generate the corresponding [`AbstractManoptSolverState`](@ref) will be used. This affects all arguments and keyword argumentss with defaults that depend on the manifold, unless provided with a value here. -""" +""", +) + # --- # Old strings @@ -165,7 +192,7 @@ _l_norm(v, i="") = raw"\lVert" * "$v" * raw"\rVert" * "_{$i}" _l_Manifold(M="M") = _tex(:Cal, "M") _l_M = "$(_l_Manifold())" _l_TpM(p="p") = "T_{$p}$_l_M" -_l_DΛ = "DΛ: T_{m}$(_l_M) → T_{Λ(m)}$(_l_Manifold("N"))" +_l_DΛ = "DΛ: T_{m}$(_math(:M)) → T_{Λ(m)}$(_l_Manifold("N"))" _l_grad_long = raw"\operatorname{grad} f: \mathcal M → T\mathcal M" _l_Hess_long = "$_l_Hess f(p)[⋅]: $(_l_TpM()) → $(_l_TpM())" _l_retr = raw"\operatorname{retr}" @@ -188,23 +215,6 @@ function _math_sequence(name, index, i_start=1, i_end="n") return "\\{$(name)_{$index}\\}_{i=$(i_start)}^{$i_end}" end -# -# -# Links - -function _link_zero_vector(M="M", p="p") - arg = length(M) > 0 ? "`($M, $p)`" : "" - return "[`zero_vector`](@extref `ManifoldsBase.zero_vector-Tuple{AbstractManifold, Any}`)$arg" -end -function _link_manifold_dimension(M="M") - arg = length(M) > 0 ? "`($M)`" : "" - return "[`manifold_dimension`](@extref `ManifoldsBase.manifold_dimension-Tuple{AbstractManifold}`)$arg" -end -function _link_rand(M="M") - arg = length(M) > 0 ? "`($M)`" : "" - return "[`rand`](@extref Base.rand-Tuple{AbstractManifold})$arg" -end - # # # Problems @@ -265,8 +275,8 @@ _sc_all = "[` & `](@ref StopWhenAll)" # Fields _field_at_iteration = "`at_iteration`: an integer indicating at which the stopping criterion last indicted to stop, which might also be before the solver started (`0`). Any negative value indicates that this was not yet the case; " -_field_iterate = "`p`: the current iterate ``p=p^{(k)} ∈ $(_l_M)``" -_field_gradient = "`X`: the current gradient ``$(_l_grad)f(p^{(k)}) ∈ T_p$(_l_M)``" +_field_iterate = "`p`: the current iterate ``p=p^{(k)} ∈ $(_math(:M))``" +_field_gradient = "`X`: the current gradient ``$(_l_grad)f(p^{(k)}) ∈ T_p$(_math(:M))``" _field_subgradient = "`X` : the current subgradient ``$(_l_subgrad)f(p^{(k)}) ∈ T_p$_l_M``" _field_inv_retr = "`inverse_retraction_method::`[`AbstractInverseRetractionMethod`](@extref `ManifoldsBase.AbstractInverseRetractionMethod`) : an inverse retraction ``$(_l_retr)^{-1}``" _field_p = raw"`p`, an initial value `p` ``= p^{(0)} ∈ \mathcal M``" @@ -293,7 +303,7 @@ All other keyword arguments are passed to [`decorate_state!`](@ref) for state de [`decorate_objective!`](@ref) for objective, respectively. """ -_kw_p_default = "`p=`$(_link_rand())" +_kw_p_default = "`p=`$(Manopt._link(:rand))" _kw_p = raw"specify an initial value for the point `p`." _kw_retraction_method_default = raw"`retraction_method=`[`default_retraction_method`](@extref `ManifoldsBase.default_retraction_method-Tuple{AbstractManifold}`)`(M, typeof(p))`" @@ -315,7 +325,7 @@ end _kw_vector_transport_method_default = "`vector_transport_method=`[`default_vector_transport_method`](@extref `ManifoldsBase.default_vector_transport_method-Tuple{AbstractManifold}`)`(M, typeof(p))`" _kw_vector_transport_method = "a vector transport ``$(_math(:vector_transport, :symbol))`` to use, see [the section on vector transports](@extref ManifoldsBase :doc:`vector_transports`)." -_kw_X_default = "`X=`$(_link_zero_vector())" +_kw_X_default = "`X=`$(_link(:zero_vector))" _kw_X = raw"specify a memory internally to store a tangent vector" _kw_X_init = raw"specify an initial value for the tangent vector" diff --git a/src/helpers/checks.jl b/src/helpers/checks.jl index 07c9b8ee41..22e73ce674 100644 --- a/src/helpers/checks.jl +++ b/src/helpers/checks.jl @@ -82,7 +82,7 @@ Verify numerically whether the gradient `grad_f(M,p)` of `f(M,p)` is correct, th $_doc_check_gradient_formula or in other words, that the error between the function ``f`` and its first order Taylor -behaves in error ``$_l[:Cal]("O") O(t^2)``, which indicates that the gradient is correct, +behaves in error ``$(_tex(:Cal, "O"))(t^2)``, which indicates that the gradient is correct, cf. also [Boumal:2023; Section 4.8](@cite). Note that if the errors are below the given tolerance and the method is exact, @@ -161,7 +161,7 @@ The approximation is then $_doc_check_Hess_formula or in other words, that the error between the function ``f`` and its second order Taylor -behaves in error ``$_l[:Cal]("O") (t^3)``, which indicates that the Hessian is correct, +behaves in error ``$(_tex(:Cal, "O"))(t^3)``, which indicates that the Hessian is correct, cf. also [Boumal:2023; Section 6.8](@cite). Note that if the errors are below the given tolerance and the method is exact, diff --git a/src/plans/augmented_lagrangian_plan.jl b/src/plans/augmented_lagrangian_plan.jl index 35d944728e..62c7ef4325 100644 --- a/src/plans/augmented_lagrangian_plan.jl +++ b/src/plans/augmented_lagrangian_plan.jl @@ -1,5 +1,5 @@ -#_doc_al_Cost() = "$(_l[:Cal]("L"))_\\rho(p, μ, λ)" -_doc_al_Cost(iter) = "$(_l[:Cal]("L"))_{ρ^{($iter)}}(p, μ^{($iter)}, λ^{($iter)})" +#_doc_al_Cost() = "$(_tex(:Cal, "L"))_\\rho(p, μ, λ)" +_doc_al_Cost(iter) = "$(_tex(:Cal, "L"))_{ρ^{($iter)}}(p, μ^{($iter)}, λ^{($iter)})" _doc_AL_Cost_long = raw""" ```math \mathcal L_\rho(p, μ, λ) @@ -69,7 +69,7 @@ additionally this gradient does accept a positional last argument to specify the for the internal gradient call of the constrained objective. based on the internal [`ConstrainedManifoldObjective`](@ref) and computes the gradient -`$_l_grad $(_l[:Cal]("L"))_{ρ}(p, μ, λ)``, see also [`AugmentedLagrangianCost`](@ref). +`$_l_grad $(_tex(:Cal, "L"))_{ρ}(p, μ, λ)``, see also [`AugmentedLagrangianCost`](@ref). ## Fields diff --git a/src/plans/cache.jl b/src/plans/cache.jl index 0b58720637..586e6167a2 100644 --- a/src/plans/cache.jl +++ b/src/plans/cache.jl @@ -15,7 +15,7 @@ Both `X` and `c` are accompanied by booleans to keep track of their validity. ## Keyword arguments -* `p=`$(_link_rand()): a point on the manifold to initialize the cache with +* `p=`$(Manopt._link(:rand)): a point on the manifold to initialize the cache with * `X=get_gradient(M, obj, p)` or `zero_vector(M,p)`: a tangent vector to store the gradient in, see also `initialize=` * `c=[`get_cost`](@ref)`(M, obj, p)` or `0.0`: a value to store the cost function in `initialize` diff --git a/src/plans/conjugate_residual_plan.jl b/src/plans/conjugate_residual_plan.jl index 6dda153326..fdf1d9ded0 100644 --- a/src/plans/conjugate_residual_plan.jl +++ b/src/plans/conjugate_residual_plan.jl @@ -3,7 +3,7 @@ # Objective. _doc_CR_cost = """ ```math -f(X) = $(_l[:frac](1,2)) $(_l_norm(_l[:Cal]("A")*"[X] + b","p"))^2,\\qquad X ∈ $(_l_TpM()), +f(X) = $(_tex(:frac, 1,2)) $(_l_norm(_tex(:Cal, "A")*"[X] + b","p"))^2,\\qquad X ∈ $(_l_TpM()), ``` """ @doc """ @@ -13,9 +13,9 @@ Model the objective $(_doc_CR_cost) -defined on the tangent space ``$(_l_TpM)`` at ``p`` on the manifold ``$(_l_M)``. +defined on the tangent space ``$(_l_TpM)`` at ``p`` on the manifold ``$(_math(:M))``. -In other words this is an objective to solve ``$(_l[:Cal]("A")) = -b(p)`` +In other words this is an objective to solve ``$(_tex(:Cal, "A")) = -b(p)`` for some linear symmetric operator and a vector function. Note the minus on the right hand side, which makes this objective especially tailored for (iteratively) solving Newton-like equations. @@ -84,7 +84,7 @@ end @doc """ get_b(TpM::TangentSpace, slso::SymmetricLinearSystemObjective) -evaluate the stored value for computing the right hand side ``b`` in ``$(_l[:Cal]("A"))=-b``. +evaluate the stored value for computing the right hand side ``b`` in ``$(_tex(:Cal, "A"))=-b``. """ function get_b( TpM::TangentSpace, slso::SymmetricLinearSystemObjective{AllocatingEvaluation} @@ -106,7 +106,7 @@ evaluate the gradient of $(_doc_CR_cost) -Which is ``$(_l_grad) f(X) = $(_l[:Cal]("A"))[X]+b``. This can be computed in-place of `Y`. +Which is ``$(_l_grad) f(X) = $(_tex(:Cal, "A"))[X]+b``. This can be computed in-place of `Y`. """ function get_gradient(TpM::TangentSpace, slso::SymmetricLinearSystemObjective, X) p = base_point(TpM) @@ -141,7 +141,7 @@ evaluate the Hessian of $(_doc_CR_cost) -Which is ``$(_l_Hess) f(X)[Y] = $(_l[:Cal]("A"))[V]``. This can be computed in-place of `W`. +Which is ``$(_l_Hess) f(X)[Y] = $(_tex(:Cal, "A"))[V]``. This can be computed in-place of `W`. """ function get_hessian( TpM::TangentSpace, slso::SymmetricLinearSystemObjective{AllocatingEvaluation}, X, V @@ -179,10 +179,10 @@ A state for the [`conjugate_residual`](@ref) solver. # Fields * `X::T`: the iterate -* `r::T`: the residual ``r = -b(p) - $(_l[:Cal]("A"))(p)[X]`` +* `r::T`: the residual ``r = -b(p) - $(_tex(:Cal, "A"))(p)[X]`` * `d::T`: the conjugate direction -* `Ar::T`, `Ad::T`: storages for ``$(_l[:Cal]("A"))(p)[d]``, ``$(_l[:Cal]("A"))(p)[r]`` -* `rAr::R`: internal field for storing ``⟨ r, $(_l[:Cal]("A"))(p)[r] ⟩`` +* `Ar::T`, `Ad::T`: storages for ``$(_tex(:Cal, "A"))(p)[d]``, ``$(_tex(:Cal, "A"))(p)[r]`` +* `rAr::R`: internal field for storing ``⟨ r, $(_tex(:Cal, "A"))(p)[r] ⟩`` * `α::R`: a step length * `β::R`: the conjugate coefficient * `stop::TStop`: a [`StoppingCriterion`](@ref) for the solver @@ -197,14 +197,14 @@ Initialise the state with default values. ## Keyword arguments -* `X``$(_link_rand("TpM"))` +* `X``$(_link(:zero_vector))` * `r=-get_gradient(TpM, slso, X)` * `d=copy(TpM, r)` * `Ar=get_hessian(TpM, slso, X, r)` * `Ad=copy(TpM, Ar)` * `α::R=0.0` * `β::R=0.0` -* `stopping_criterion=`[`StopAfterIteration`](@ref)`($(_link_manifold_dimension()))`$(_sc_any)[`StopWhenGradientNormLess`](@ref)`(1e-8)` +* `stopping_criterion=`[`StopAfterIteration`](@ref)`($(_link(:manifold_dimension)))`$(_sc_any)[`StopWhenGradientNormLess`](@ref)`(1e-8)` # See also @@ -292,10 +292,10 @@ Stop when re relative residual in the [`conjugate_residual`](@ref) is below a certain threshold, i.e. ```math -$(_l_ds)$(_l[:frac](_l_norm("r^{(k)"),"c")) ≤ ε, +$(_l_ds)$(_tex(:frac, _l_norm("r^{(k)"),"c")) ≤ ε, ``` -where ``c = $(_l_norm("b"))`` of the initial vector from the vector field in ``$(_l[:Cal]("A"))(p)[X] + b(p) = 0_p``, +where ``c = $(_l_norm("b"))`` of the initial vector from the vector field in ``$(_tex(:Cal, "A"))(p)[X] + b(p) = 0_p``, from the [`conjugate_residual`](@ref) # Fields diff --git a/src/plans/gradient_plan.jl b/src/plans/gradient_plan.jl index 1cd5a571dd..abd2b9dca9 100644 --- a/src/plans/gradient_plan.jl +++ b/src/plans/gradient_plan.jl @@ -288,12 +288,12 @@ direction update. # Fields -$(_var[:field](:p, "p_old")) +$(_var(:Field, :p, "p_old")) * `momentum::Real`: factor for the momentum * `direction`: internal [`DirectionUpdateRule`](@ref) to determine directions to add the momentum to. -$(_var[:field](:vector_transport_method)) -$(_var[:field](:X, "X_old")) +$(_var(:Field, :vector_transport_method)) +$(_var(:Field, :X, "X_old")) # Constructors @@ -304,11 +304,11 @@ Initialize a momentum gradient rule to `s`, where `p` and `X` are memory for int ## Keyword arguments -$(_var[:keyword](:p)) +$(_var(:Keyword, :p)) * `s=`[`IdentityUpdateRule`](@ref)`()` * `momentum=0.2` -$(_var[:keyword](:vector_transport_method)) -$(_var[:keyword](:X)) +$(_var(:Keyword, :vector_transport_method)) +$(_var(:Keyword, :X)) # See also @@ -362,13 +362,13 @@ last direction multiplied by momentum ``m``. # Keyword arguments -* $(_kw_p_default) +$(_var(:Keyword, :p)) * `direction=`[`IdentityUpdateRule`](@ref) preprocess the actual gradient before adding momentum -* $(_kw_X_default)` +$(_var(:Keyword, :X)) * `momentum=0.2` amount of momentum to use -* $(_kw_vector_transport_method_default): $(_kw_vector_transport_method) +$(_var(:Keyword, :vector_transport_method)) -$(_note[:ManifoldDefaultFactory]("MomentumGradientRule")) +$(_note(:ManifoldDefaultFactory, "MomentumGradientRule")) """ MomentumGradient(args...; kwargs...) = ManifoldDefaultsFactory(Manopt.MomentumGradientRule, args...; kwargs...) @@ -464,7 +464,7 @@ them to the current iterates tangent space. * $(_kw_X_default)` * `vector_transport_method=default_vector_transport_method(M, typeof(p)), -$(_note[:ManifoldDefaultFactory]("AverageGradientRule")) +$(_note(:ManifoldDefaultFactory, "AverageGradientRule")) """ AverageGradient(args...; kwargs...) = ManifoldDefaultsFactory(Manopt.AverageGradientRule, args...; kwargs...) @@ -548,19 +548,19 @@ end Assume ``f`` is ``L``-Lipschitz and ``μ``-strongly convex. Given -* a step size ``h_k<$(_l[:frac]("1","L"))`` (from the [`GradientDescentState`](@ref) +* a step size ``h_k<$(_tex(:frac, "1", "L"))`` (from the [`GradientDescentState`](@ref) * a `shrinkage` parameter ``β_k`` * and a current iterate ``p_k`` * as well as the interim values ``γ_k`` and ``v_k`` from the previous iterate. This compute a Nesterov type update using the following steps, see [ZhangSra:2018](@cite) -1. Compute the positive root ``α_k∈(0,1)`` of ``α^2 = h_k$(_l[:bigl])((1-α_k)γ_k+α_k μ$(_l[:bigr]))``. -2. Set ``$(_l[:bar]("γ"))_k+1 = (1-α_k)γ_k + α_kμ`` +1. Compute the positive root ``α_k∈(0,1)`` of ``α^2 = h_k$(_tex(:bigl))((1-α_k)γ_k+α_k μ$(_tex(:bigr)))``. +2. Set ``$(_tex(:bar, "γ"))_k+1 = (1-α_k)γ_k + α_kμ`` 3. ``y_k = $(_l_retr)_{p_k}\\Bigl(\\frac{α_kγ_k}{γ_k + α_kμ}$(_l_retr)^{-1}_{p_k}v_k \\Bigr)`` 4. ``x_{k+1} = $(_l_retr)_{y_k}(-h_k $(_l_grad)f(y_k))`` -5. ``v_{k+1} = $(_l_retr)_{y_k}\\Bigl(\\frac{(1-α_k)γ_k}{$(_l[:bar]("γ"))_k}$(_l_retr)_{y_k}^{-1}(v_k) - \\frac{α_k}{$(_l[:bar]("γ"))_{k+1}}$(_l_grad)f(y_k) \\Bigr)`` -6. ``γ_{k+1} = \\frac{1}{1+β_k}$(_l[:bar]("γ"))_{k+1}`` +5. ``v_{k+1} = $(_l_retr)_{y_k}\\Bigl(\\frac{(1-α_k)γ_k}{$(_tex(:bar, "γ"))_k}$(_l_retr)_{y_k}^{-1}(v_k) - \\frac{α_k}{$(_tex(:bar, "γ"))_{k+1}}$(_l_grad)f(y_k) \\Bigr)`` +6. ``γ_{k+1} = \\frac{1}{1+β_k}$(_tex(:bar, "γ"))_{k+1}`` Then the direction from ``p_k`` to ``p_k+1`` by ``d = $(_l_retr)^{-1}_{p_k}p_{k+1}`` is returned. @@ -576,7 +576,7 @@ Then the direction from ``p_k`` to ``p_k+1`` by ``d = $(_l_retr)^{-1}_{p_k}p_{k+ * `shrinkage = k -> 0.8` * $(_kw_inverse_retraction_method_default): $(_kw_inverse_retraction_method) -$(_note[:ManifoldDefaultFactory]("NesterovRule")) +$(_note(:ManifoldDefaultFactory, "NesterovRule")) """ function Nesterov(args...; kwargs...) return ManifoldDefaultsFactory(Manopt.NesterovRule, args...; kwargs...) diff --git a/src/plans/hessian_plan.jl b/src/plans/hessian_plan.jl index 3900e29424..30c105430c 100644 --- a/src/plans/hessian_plan.jl +++ b/src/plans/hessian_plan.jl @@ -221,7 +221,7 @@ of a function ``f`` the Hessian is approximated as follows: let ``c`` be a stepsize, ``X ∈ $(_l_TpM())`` a tangent vector and ``q = $_doc_ApproxHessian_step`` be a step in direction ``X`` of length ``c`` following a retraction Then the Hessian is approximated by the finite difference of the gradients, -where ``$(_math[:vector_transport][:symbol]())`` is a vector transport. +where ``$(_math(:vector_transport, :symbol))`` is a vector transport. $_doc_ApproxHessian_formula diff --git a/src/plans/higher_order_primal_dual_plan.jl b/src/plans/higher_order_primal_dual_plan.jl index eee0e84f97..4161684a9d 100644 --- a/src/plans/higher_order_primal_dual_plan.jl +++ b/src/plans/higher_order_primal_dual_plan.jl @@ -95,10 +95,10 @@ Generate a state for the [`primal_dual_semismooth_Newton`](@ref). ## Keyword arguments -* `m=`$(_link_rand()) -* `n=``$(_link_rand("N")) -* `p=`$(_link_rand()) -* `X=`$(_link_zero_vector()) +* `m=`$(Manopt._link(:rand)) +* `n=``$(Manopt._link(:rand; M="N")) +* `p=`$(Manopt._link(:rand)) +* `X=`$(Manopt._link(:zero_vector)) * `primal_stepsize=1/sqrt(8)` * `dual_stepsize=1/sqrt(8)` * `reg_param=1e-5` diff --git a/src/plans/interior_point_Newton_plan.jl b/src/plans/interior_point_Newton_plan.jl index f3bb0584a9..1ed098755a 100644 --- a/src/plans/interior_point_Newton_plan.jl +++ b/src/plans/interior_point_Newton_plan.jl @@ -89,7 +89,7 @@ Let `m` and `n` denote the number of inequality and equality constraints, respec * `retraction_method=default_retraction_method(M, typeof(p))` * `step_objective=`[`ManifoldGradientObjective`](@ref)`(`[`KKTVectorFieldNormSq`](@ref)`(cmo)`, [`KKTVectorFieldNormSqGradient`](@ref)`(cmo)`; evaluation=[`InplaceEvaluation`](@ref)`())` * `vector_space=`[`Rn`](@ref Manopt.Rn): a function that, given an integer, returns the manifold to be used for the vector space components ``ℝ^m,ℝ^n`` -* `step_problem`: wrap the manifold ``$(_l_M) × ℝ^m × ℝ^n × ℝ^m`` +* `step_problem`: wrap the manifold ``$(_math(:M)) × ℝ^m × ℝ^n × ℝ^m`` * `step_state`: the [`StepsizeState`](@ref) with point and search direction * `stepsize`: an [`ArmijoLinesearch`](@ref) with the [`InteriorPointCentralityCondition`](@ref) as additional condition to accept a step. Note that this step size operates on its own `step_problem`and `step_state` diff --git a/src/plans/manifold_default_factory.jl b/src/plans/manifold_default_factory.jl index d665f2ec11..89f4493907 100644 --- a/src/plans/manifold_default_factory.jl +++ b/src/plans/manifold_default_factory.jl @@ -1,7 +1,7 @@ """ ManifoldDefaultsFactory{M,T,A,K} -A generic factory to postpone the instantiation of certain types from within $(_link[:Manopt]), +A generic factory to postpone the instantiation of certain types from within $(_link(:Manopt)), in order to be able to adapt it to defaults from different manifolds and/or postpone the decission on which manifold to use to a later point diff --git a/src/plans/nonlinear_least_squares_plan.jl b/src/plans/nonlinear_least_squares_plan.jl index 83dbcae6cc..9425e1ee1c 100644 --- a/src/plans/nonlinear_least_squares_plan.jl +++ b/src/plans/nonlinear_least_squares_plan.jl @@ -8,7 +8,7 @@ A type for nonlinear least squares problems. Specify a nonlinear least squares problem # Fields -* `f` a function ``f: $(_l_M) → ℝ^d`` to minimize +* `f` a function ``f: $(_math(:M)) → ℝ^d`` to minimize * `jacobian!!` Jacobian of the function ``f`` * `jacobian_tangent_basis` the basis of tangent space used for computing the Jacobian. * `num_components` number of values returned by `f` (equal to `d`). @@ -170,7 +170,7 @@ The following fields are keyword arguments * `damping_term_min=0.1` * `η=0.2`, * `expect_zero_residual=false` -* `initial_gradient=`$(_link_zero_vector()) +* `initial_gradient=`$(_link(:zero_vector)) * $_kw_retraction_method_default * `stopping_criterion=`[`StopAfterIteration`](@ref)`(200)`$_sc_any[`StopWhenGradientNormLess`](@ref)`(1e-12)`$_sc_any[`StopWhenStepsizeLess`](@ref)`(1e-12)` diff --git a/src/plans/plan.jl b/src/plans/plan.jl index b0cecd01b6..9a5465654c 100644 --- a/src/plans/plan.jl +++ b/src/plans/plan.jl @@ -101,7 +101,6 @@ by `set_parameter!(:Mode, "")`. """ is_tutorial_mode() = (get_parameter(:Mode) == "Tutorial") -include("documentation_glossary.jl") include("manifold_default_factory.jl") include("objective.jl") include("problem.jl") diff --git a/src/plans/stepsize.jl b/src/plans/stepsize.jl index f2c8db816f..e60ad010f7 100644 --- a/src/plans/stepsize.jl +++ b/src/plans/stepsize.jl @@ -551,7 +551,7 @@ and $_doc_NM_linesearch2 -where ``α_{k-1}`` is the step size computed in the last iteration and ``$(_math[:vector_transport][:symbol]())`` is a vector transport. +where ``α_{k-1}`` is the step size computed in the last iteration and ``$(_math(:vector_transport, :symbol))`` is a vector transport. Then the Barzilai—Borwein step size is $_doc_NM_BB diff --git a/src/solvers/ChambollePock.jl b/src/solvers/ChambollePock.jl index a5e9b94589..911cec717d 100644 --- a/src/solvers/ChambollePock.jl +++ b/src/solvers/ChambollePock.jl @@ -10,12 +10,12 @@ stores all options and variables within a linearized or exact Chambolle Pock. * $(_field_inv_retr) * `inverse_retraction_method_dual::`[`AbstractInverseRetractionMethod`](@extref `ManifoldsBase.AbstractInverseRetractionMethod`): an inverse retraction ``$(_l_retr)^{-1}`` on ``$(_l_Manifold("N"))`` -* `m::P`: base point on ``$(_l_M)`` +* `m::P`: base point on ``$(_math(:M))`` * `n::Q`: base point on ``$(_l_Manifold("N"))`` -* `p::P`: an initial point on ``p^{(0)} ∈ $(_l_M)`` +* `p::P`: an initial point on ``p^{(0)} ∈ $(_math(:M))`` * `pbar::P`: the relaxed iterate used in the next dual update step (when using `:primal` relaxation) * `primal_stepsize::R`: proximal parameter of the primal prox -* `X::T`: an initial tangent vector ``X^{(0)} ∈ T_{p^{(0)}}$(_l_M)`` +* `X::T`: an initial tangent vector ``X^{(0)} ∈ T_{p^{(0)}}$(_math(:M))`` * `Xbar::T`: the relaxed iterate used in the next primal update step (when using `:dual` relaxation) * `relaxation::R`: relaxation in the primal relaxation step (to compute `pbar`: * `relax::Symbol: which variable to relax (`:primal` or `:dual`: @@ -26,9 +26,9 @@ stores all options and variables within a linearized or exact Chambolle Pock. * `update_dual_base`: function `(pr, st, k) -> n` to update the dual base * $(_field_vector_transp) * `vector_transport_method_dual::`[`AbstractVectorTransportMethod`](@extref `ManifoldsBase.AbstractVectorTransportMethod`): - a vector transport ``$(_math[:vector_transport][:symbol]())``on ``$(_l_Manifold("N"))`` + a vector transport ``$(_math(:vector_transport, :symbol))``on ``$(_l_Manifold("N"))`` -Here, `P` is a point type on ``$(_l_M)``, `T` its tangent vector type, `Q` a point type on ``$(_l_Manifold("N"))``, +Here, `P` is a point type on ``$(_math(:M))``, `T` its tangent vector type, `Q` a point type on ``$(_l_Manifold("N"))``, and `R<:Real` is a real number type where for the last two the functions a [`AbstractManoptProblem`](@ref)` p`, @@ -44,10 +44,10 @@ If you activate these to be different from the default identity, you have to pro # Keyword arguments -* `n=``$(_link_rand("N")) -* `p=`$(_link_rand()) -* `m=`$(_link_rand()) -* `X=`$(_link_zero_vector()) +* `n=``$(Manopt._link(:rand; M="N")) +* `p=`$(Manopt._link(:rand)) +* `m=`$(Manopt._link(:rand)) +* `X=`$(Manopt._link(:zero_vector)) * `acceleration=0.0` * `dual_stepsize=1/sqrt(8)` * `primal_stepsize=1/sqrt(8)` @@ -63,7 +63,7 @@ If you activate these to be different from the default identity, you have to pro * `update_dual_base=missing` * $_kw_vector_transport_method_default: $_kw_vector_transport_method * `vector_transport_method=`[`default_vector_transport_method`](@extref `ManifoldsBase.default_vector_transport_method-Tuple{AbstractManifold}`)`(N, typeof(n))`: - a vector transport ``$(_math[:vector_transport][:symbol]())`` to use on ``$(_l_Manifold("N"))``, see [the section on vector transports](@extref ManifoldsBase :doc:`vector_transports`). + a vector transport ``$(_math(:vector_transport, :symbol))`` to use on ``$(_l_Manifold("N"))``, see [the section on vector transports](@extref ManifoldsBase :doc:`vector_transports`). if `Manifolds.jl` is loaded, `N` is also a keyword argument and set to `TangentBundle(M)` by default. """ @@ -253,7 +253,7 @@ For more details on the algorithm, see [BergmannHerzogSilvaLouzeiroTenbrinckVida * $_kw_retraction_method_default: $_kw_retraction_method * $_kw_vector_transport_method_default: $_kw_vector_transport_method * `vector_transport_method_dual=`[`default_vector_transport_method`](@extref `ManifoldsBase.default_vector_transport_method-Tuple{AbstractManifold}`)`(N, typeof(n))`: - a vector transport ``$(_math[:vector_transport][:symbol]())`` to use on $(_l_Manifold("N")), see [the section on vector transports](@extref ManifoldsBase :doc:`vector_transports`). + a vector transport ``$(_math(:vector_transport, :symbol))`` to use on $(_l_Manifold("N")), see [the section on vector transports](@extref ManifoldsBase :doc:`vector_transports`). $_doc_sec_output """ diff --git a/src/solvers/DouglasRachford.jl b/src/solvers/DouglasRachford.jl index fa624019fa..ef959d9d61 100644 --- a/src/solvers/DouglasRachford.jl +++ b/src/solvers/DouglasRachford.jl @@ -147,11 +147,11 @@ _doc_Douglas_Rachford = """ DouglasRachford!(M, f, proxes_f, p) DouglasRachford!(M, mpo, p) -Compute the Douglas-Rachford algorithm on the manifold ``$(_l_M)``, starting from `p`` +Compute the Douglas-Rachford algorithm on the manifold ``$(_math(:M))``, starting from `p`` given the (two) proximal maps `proxes_f`, see [ BergmannPerschSteidl:2016](@cite). For ``k>2`` proximal maps, the problem is reformulated using the parallel Douglas Rachford: -a vectorial proximal map on the power manifold ``$(_l_M)^k`` is introduced as the first +a vectorial proximal map on the power manifold ``$(_math(:M))^k`` is introduced as the first proximal map and the second proximal map of the is set to the [`mean`](@extref Statistics.mean-Tuple{AbstractManifold, Vararg{Any}}) (Riemannian center of mass). This hence also boils down to two proximal maps, though each evaluates proximal maps in parallel, that is, component wise in a vector. diff --git a/src/solvers/LevenbergMarquardt.jl b/src/solvers/LevenbergMarquardt.jl index 0130d42608..47351a32bc 100644 --- a/src/solvers/LevenbergMarquardt.jl +++ b/src/solvers/LevenbergMarquardt.jl @@ -11,7 +11,7 @@ Solve an optimization problem of the form $(_doc_LM_formula) -where ``f: $(_l_M) → ℝ^d`` is a continuously differentiable function, +where ``f: $(_math(:M)) → ℝ^d`` is a continuously differentiable function, using the Riemannian Levenberg-Marquardt algorithm [Peeters:1993](@cite). The implementation follows Algorithm 1 [AdachiOkunoTakeda:2022](@cite). The second signature performs the optimization in-place of `p`. @@ -19,7 +19,7 @@ The second signature performs the optimization in-place of `p`. # Input $(_arg_M) -* `f`: a cost function ``f: $(_l_M) M→ℝ^d`` +* `f`: a cost function ``f: $(_math(:M)) M→ℝ^d`` * `jacobian_f`: the Jacobian of ``f``. The Jacobian is supposed to accept a keyword argument `basis_domain` which specifies basis of the tangent space at a given point in which the Jacobian is to be calculated. By default it should be the `DefaultOrthonormalBasis`. diff --git a/src/solvers/NelderMead.jl b/src/solvers/NelderMead.jl index d89833d643..143983b6a5 100644 --- a/src/solvers/NelderMead.jl +++ b/src/solvers/NelderMead.jl @@ -9,7 +9,7 @@ A simplex for the Nelder-Mead algorithm. NelderMeadSimplex(M::AbstractManifold) Construct a simplex using ``d+1`` random points from manifold `M`, -where ``d`` is the $(_link_manifold_dimension("")) of `M`. +where ``d`` is the $(_link(:manifold_dimension; M="")) of `M`. NelderMeadSimplex( M::AbstractManifold, @@ -61,7 +61,7 @@ of the Euclidean case. The default is given in brackets, the required value rang after the description * `population::`[`NelderMeadSimplex`](@ref): a population (set) of ``d+1`` points ``x_i``, ``i=1,…,n+1``, where ``d`` - is the $(_link_manifold_dimension("")) of `M`. + is the $(_link(:manifold_dimension; M="")) of `M`. * $_field_step * `α`: the reflection parameter ``α > 0``: * `γ` the expansion parameter ``γ > 0``: @@ -216,7 +216,7 @@ or Algorithm 4.1 in [http://www.optimization-online.org/DB_FILE/2007/08/1742.pdf $_arg_M $_arg_f * `population::`[`NelderMeadSimplex`](@ref)`=`[`NelderMeadSimplex`](@ref)`(M)`: an initial simplex of ``d+1`` points, where ``d`` - is the $(_link_manifold_dimension("")) of `M`. + is the $(_link(:manifold_dimension; M="")) of `M`. # Keyword arguments diff --git a/src/solvers/cma_es.jl b/src/solvers/cma_es.jl index 2b97d8fe99..e5d1fd959d 100644 --- a/src/solvers/cma_es.jl +++ b/src/solvers/cma_es.jl @@ -347,12 +347,12 @@ setting. # Input -* `M`: a manifold ``$(_l_M) M`` -* `f`: a cost function ``f: $(_l_M)→ℝ`` to find a minimizer ``p^*`` for +* `M`: a manifold ``$(_math(:M))`` +* `f`: a cost function ``f: $(_math(:M))→ℝ`` to find a minimizer ``p^*`` for # Optional -* `p_m=`$(_link_rand()): an initial point `p` +* `p_m=`$(Manopt._link(:rand)): an initial point `p` * `σ=1.0`: initial standard deviation * `λ`: (`4 + Int(floor(3 * log(manifold_dimension(M))))`population size (can be increased for a more thorough global search but decreasing is not recommended) diff --git a/src/solvers/conjugate_residual.jl b/src/solvers/conjugate_residual.jl index f140573c66..9dddc0b1e7 100644 --- a/src/solvers/conjugate_residual.jl +++ b/src/solvers/conjugate_residual.jl @@ -4,9 +4,9 @@ conjugate_residual!(TpM::TangentSpace, A, b, p) conjugate_residual!(TpM::TangentSpace, slso::SymmetricLinearSystemObjective, p) -Compute the solution of ``$(_l[:Cal]("A"))(p)[X] + b(p) = 0_p ``, where +Compute the solution of ``$(_tex(:Cal, "A"))(p)[X] + b(p) = 0_p ``, where -* ``$(_l[:Cal]("A"))`` is a linear, symmetric operator on ``$(_l_TpM)`` +* ``$(_tex(:Cal, "A"))`` is a linear, symmetric operator on ``$(_l_TpM)`` * ``b`` is a vector field on the manifold * ``X ∈ $(_l_TpM)`` is a tangent vector * ``0_p`` is the zero vector ``$(_l_TpM)``. @@ -14,21 +14,21 @@ Compute the solution of ``$(_l[:Cal]("A"))(p)[X] + b(p) = 0_p ``, where This implementation follows Algorithm 3 in [LaiYoshise:2024](@cite) and is initalised with ``X^{(0)}`` as the zero vector and -* the initial residual ``r^{(0)} = -b(p) - $(_l[:Cal]("A"))(p)[X^{(0)}]`` +* the initial residual ``r^{(0)} = -b(p) - $(_tex(:Cal, "A"))(p)[X^{(0)}]`` * the initial conjugate direction ``d^{(0)} = r^{(0)}`` -* initialize ``Y^{(0)} = $(_l[:Cal]("A"))(p)[X^{(0)}]`` +* initialize ``Y^{(0)} = $(_tex(:Cal, "A"))(p)[X^{(0)}]`` performed the following steps at iteration ``k=0,…`` until the `stopping_criterion` is fulfilled. -1. compute a step size ``α_k = $(_l_ds)$(_l[:frac]("⟨ r^{(k)}, $(_l[:Cal]("A"))(p)[r^{(k)}] ⟩_p","⟨ $(_l[:Cal]("A"))(p)[d^{(k)}], $(_l[:Cal]("A"))(p)[d^{(k)}] ⟩_p"))`` +1. compute a step size ``α_k = $(_l_ds)$(_tex(:frac, "⟨ r^{(k)}, $(_tex(:Cal, "A"))(p)[r^{(k)}] ⟩_p","⟨ $(_tex(:Cal, "A"))(p)[d^{(k)}], $(_tex(:Cal, "A"))(p)[d^{(k)}] ⟩_p"))`` 2. do a step ``X^{(k+1)} = X^{(k)} + α_kd^{(k)}`` 2. update the residual ``r^{(k+1)} = r^{(k)} + α_k Y^{(k)}`` -4. compute ``Z = $(_l[:Cal]("A"))(p)[r^{(k+1)}]`` -5. Update the conjugate coefficient ``β_k = $(_l_ds)$(_l[:frac]("⟨ r^{(k+1)}, $(_l[:Cal]("A"))(p)[r^{(k+1)}] ⟩_p", "⟨ r^{(k)}, $(_l[:Cal]("A"))(p)[r^{(k)}] ⟩_p"))`` +4. compute ``Z = $(_tex(:Cal, "A"))(p)[r^{(k+1)}]`` +5. Update the conjugate coefficient ``β_k = $(_l_ds)$(_tex(:frac, "⟨ r^{(k+1)}, $(_tex(:Cal, "A"))(p)[r^{(k+1)}] ⟩_p", "⟨ r^{(k)}, $(_tex(:Cal, "A"))(p)[r^{(k)}] ⟩_p"))`` 6. Update the conjugate direction ``d^{(k+1)} = r^{(k+1)} + β_kd^{(k)}`` 7. Update ``Y^{(k+1)} = -Z + β_k Y^{(k)}`` -Note that the right hand side of Step 7 is the same as evaluating ``$(_l[:Cal]("A"))[d^{(k+1)}]``, but avoids the actual evaluation +Note that the right hand side of Step 7 is the same as evaluating ``$(_tex(:Cal, "A"))[d^{(k+1)}]``, but avoids the actual evaluation # Input @@ -40,7 +40,7 @@ Note that the right hand side of Step 7 is the same as evaluating ``$(_l[:Cal](" # Keyword arguments * `evaluation=`[`AllocatingEvaluation`](@ref) specify whether `A` and `b` are implemented allocating or in-place -* `stopping_criterion::`[`StoppingCriterion`](@ref)`=`[`StopAfterIteration`](@ref)`(`$(_link_manifold_dimension())$_sc_any[`StopWhenRelativeResidualLess`](@ref)`(c,1e-8)`, +* `stopping_criterion::`[`StoppingCriterion`](@ref)`=`[`StopAfterIteration`](@ref)`(`$(_link(:manifold_dimension))$_sc_any[`StopWhenRelativeResidualLess`](@ref)`(c,1e-8)`, where `c` is the norm of ``$(_l_norm("b"))``. # Output diff --git a/src/solvers/convex_bundle_method.jl b/src/solvers/convex_bundle_method.jl index 577e250cb2..dd61f1dafa 100644 --- a/src/solvers/convex_bundle_method.jl +++ b/src/solvers/convex_bundle_method.jl @@ -62,7 +62,7 @@ Most of the following keyword arguments set default values for the fields mentio * $(_kw_inverse_retraction_method_default): $(_kw_inverse_retraction_method) * $(_kw_retraction_method_default): $(_kw_retraction_method) * `stopping_criterion=`[`StopWhenLagrangeMultiplierLess`](@ref)`(1e-8)`$(_sc_any)[`StopAfterIteration`](@ref)`(5000)` -* `X=`$(_link_zero_vector()) specify the type of tangent vector to use. +* `X=`$(_link(:zero_vector)) specify the type of tangent vector to use. * $(_kw_vector_transport_method_default): $(_kw_vector_transport_method) * `sub_problem=`[`convex_bundle_method_subsolver`](@ref) * `sub_state=[`AllocatingEvaluation`](@ref) @@ -317,7 +317,7 @@ $(_arg_p) * $(_kw_retraction_method_default): $(_kw_retraction_method) * `stopping_criterion=`[`StopWhenLagrangeMultiplierLess`](@ref)`(1e-8)`$(_sc_any)[`StopAfterIteration`](@ref)`(5000)`: $(_kw_stopping_criterion) -* `X=`$(_link_zero_vector()) specify the type of tangent vector to use. +* `X=`$(_link(:zero_vector)) specify the type of tangent vector to use. * $(_kw_vector_transport_method_default): $(_kw_vector_transport_method) * `sub_problem=`[`convex_bundle_method_subsolver`](@ref): a Manopt problem or a closed form solution as a function for the sub problem * `sub_state=[`AllocatingEvaluation`](@ref): specify a solver for the sub problem or how the closed form solution function is evaluated. diff --git a/src/solvers/cyclic_proximal_point.jl b/src/solvers/cyclic_proximal_point.jl index 78431900e8..4fad95b5ec 100644 --- a/src/solvers/cyclic_proximal_point.jl +++ b/src/solvers/cyclic_proximal_point.jl @@ -26,7 +26,7 @@ perform a cyclic proximal point algorithm. This can be done in-place of `p`. # Input $(_arg_M) -* `f`: a cost function ``f: $(_l_M) M→ℝ`` to minimize +* `f`: a cost function ``f: $(_math(:M)) M→ℝ`` to minimize * `proxes_f`: an Array of proximal maps (`Function`s) `(M,λ,p) -> q` or `(M, q, λ, p) -> q` for the summands of ``f`` (see `evaluation`) where `f` and the proximal maps `proxes_f` can also be given directly as a [`ManifoldProximalMapObjective`](@ref) `mpo` diff --git a/src/solvers/difference-of-convex-proximal-point.jl b/src/solvers/difference-of-convex-proximal-point.jl index 311218b32d..66767b09d6 100644 --- a/src/solvers/difference-of-convex-proximal-point.jl +++ b/src/solvers/difference-of-convex-proximal-point.jl @@ -161,7 +161,7 @@ _doc_DCPPA = """ Compute the difference of convex proximal point algorithm [SouzaOliveira:2015](@cite) to minimize ```math - $(_l_argmin)_{p∈$(_l_M)} g(p) - h(p) + $(_l_argmin)_{p∈$(_math(:M))} g(p) - h(p) ``` where you have to provide the subgradient ``∂h`` of ``h`` and either diff --git a/src/solvers/difference_of_convex_algorithm.jl b/src/solvers/difference_of_convex_algorithm.jl index 4ed33c9ab5..c2bc671c07 100644 --- a/src/solvers/difference_of_convex_algorithm.jl +++ b/src/solvers/difference_of_convex_algorithm.jl @@ -17,7 +17,7 @@ It comes in two forms, depending on the realisation of the `subproblem`. The sub task consists of a method to solve ```math - $(_l_argmin)_{q∈$(_l_M)}\\ g(p) - ⟨X, $(_l_log)_p q⟩ + $(_l_argmin)_{q∈$(_math(:M))}\\ g(p) - ⟨X, $(_l_log)_p q⟩ ``` is needed. Besides a problem and a state, one can also provide a function and @@ -116,7 +116,7 @@ _doc_DoC = """ Compute the difference of convex algorithm [BergmannFerreiraSantosSouza:2023](@cite) to minimize ```math - $(_l_argmin)_{p∈$(_l_M)}\\ g(p) - h(p) + $(_l_argmin)_{p∈$(_math(:M))}\\ g(p) - h(p) ``` where you need to provide ``f(p) = g(p) - h(p)``, ``g`` and the subdifferential ``∂h`` of ``h``. @@ -127,7 +127,7 @@ Then repeat for ``k=0,1,…`` 1. Take ``X^{(k)} ∈ ∂h(p^{(k)})`` 2. Set the next iterate to the solution of the subproblem ```math - p^{(k+1)} ∈ $(_l_argmin)_{q ∈ $(_l_M)} g(q) - ⟨X^{(k)}, $(_l_log)_{p^{(k)}}q⟩ + p^{(k+1)} ∈ $(_l_argmin)_{q ∈ $(_math(:M))} g(q) - ⟨X^{(k)}, $(_l_log)_{p^{(k)}}q⟩ ``` until the stopping criterion (see the `stopping_criterion` keyword is fulfilled. diff --git a/src/solvers/exact_penalty_method.jl b/src/solvers/exact_penalty_method.jl index 5aa06c2fdf..2d793a2faf 100644 --- a/src/solvers/exact_penalty_method.jl +++ b/src/solvers/exact_penalty_method.jl @@ -199,7 +199,7 @@ $(_doc_EPM_penalty) Since this is non-smooth, a [`SmoothingTechnique`](@ref) with parameter `u` is applied, see the [`ExactPenaltyCost`](@ref). -In every step ``k`` of the exact penalty method, the smoothed objective is then minimized over all ``p ∈$(_l_M)``. +In every step ``k`` of the exact penalty method, the smoothed objective is then minimized over all ``p ∈$(_math(:M))``. Then, the accuracy tolerance ``ϵ`` and the smoothing parameter ``u`` are updated by setting $(_doc_EMP_ϵ_update) diff --git a/src/solvers/interior_point_Newton.jl b/src/solvers/interior_point_Newton.jl index e45f2434f2..7d726e47a5 100644 --- a/src/solvers/interior_point_Newton.jl +++ b/src/solvers/interior_point_Newton.jl @@ -37,11 +37,11 @@ the constraints are further fulfilled. # Input -* `M`: a manifold ``$(_l_M)`` -* `f`: a cost function ``f : $(_l_M) → ℝ`` to minimize -* `grad_f`: the gradient ``$(_l_grad) f : $(_l_M) → T $(_l_M)`` of ``f`` -* `Hess_f`: the Hessian ``$(_l_Hess)f(p): T_p$(_l_M) → T_p$(_l_M)``, ``X ↦ $(_l_Hess)f(p)[X] = ∇_X$(_l_grad)f(p)`` -* `p=$(_link_rand()): an initial value ``p ∈ $(_l_M)`` +* `M`: a manifold ``$(_math(:M))`` +* `f`: a cost function ``f : $(_math(:M)) → ℝ`` to minimize +* `grad_f`: the gradient ``$(_l_grad) f : $(_math(:M)) → T $(_math(:M))`` of ``f`` +* `Hess_f`: the Hessian ``$(_l_Hess)f(p): T_p$(_math(:M)) → T_p$(_math(:M))``, ``X ↦ $(_l_Hess)f(p)[X] = ∇_X$(_l_grad)f(p)`` +$(_var(:Field, :p)) or a [`ConstrainedManifoldObjective`](@ref) `cmo` containing `f`, `grad_f`, `Hess_f`, and the constraints @@ -74,7 +74,7 @@ pass a [`ConstrainedManifoldObjective`](@ref) `cmo` * `s=copy(μ)`: initial value for the slack variables * `σ=`[`calculate_σ`](@ref)`(M, cmo, p, μ, λ, s)`: scaling factor for the barrier parameter `β` in the sub problem, which is updated during the iterations * `step_objective`: a [`ManifoldGradientObjective`](@ref) of the norm of the KKT vector field [`KKTVectorFieldNormSq`](@ref) and its gradient [`KKTVectorFieldNormSqGradient`](@ref) -* `step_problem`: the manifold ``$(_l_M) × ℝ^m × ℝ^n × ℝ^m`` together with the `step_objective` +* `step_problem`: the manifold ``$(_math(:M)) × ℝ^m × ℝ^n × ℝ^m`` together with the `step_objective` as the problem the linesearch `stepsize=` employs for determining a step size * `step_state`: the [`StepsizeState`](@ref) with point and search direction * `stepsize` an [`ArmijoLinesearch`](@ref) with the [`InteriorPointCentralityCondition`](@ref) as @@ -83,11 +83,11 @@ pass a [`ConstrainedManifoldObjective`](@ref) `cmo` a stopping criterion, by default depending on the residual of the KKT vector field or a maximal number of steps, which ever hits first. * `sub_kwargs=(;)`: keyword arguments to decorate the sub options, for example debug, that automatically respects the main solvers debug options (like sub-sampling) as well * `sub_objective`: The [`SymmetricLinearSystemObjective`](@ref) modelling the system of equations to use in the sub solver, - includes the [`CondensedKKTVectorFieldJacobian`](@ref) ``$(_l[:Cal]("A"))(X)`` and the [`CondensedKKTVectorField`](@ref) ``b`` in ``$(_l[:Cal]("A"))(X) + b = 0`` we aim to solve. + includes the [`CondensedKKTVectorFieldJacobian`](@ref) ``$(_tex(:Cal, "A"))(X)`` and the [`CondensedKKTVectorField`](@ref) ``b`` in ``$(_tex(:Cal, "A"))(X) + b = 0`` we aim to solve. $(_kw_used_in("sub_problem")) * `sub_stopping_criterion=`[`StopAfterIteration`](@ref)`(manifold_dimension(M))`[` | `](@ref StopWhenAny)[`StopWhenRelativeResidualLess`](@ref)`(c,1e-8)`, where ``c = $(_l_norm("b"))`` from the system to solve. $(_kw_used_in("sub_state")) -* `sub_problem`: combining the `sub_objective` and the tangent space at ``(p,λ)``` on the manifold ``$(_l_M) × ℝ^n`` to a manopt problem. +* `sub_problem`: combining the `sub_objective` and the tangent space at ``(p,λ)``` on the manifold ``$(_math(:M)) × ℝ^n`` to a manopt problem. This is the manifold and objective for the sub solver. * `sub_state=`[`ConjugateResidualState`](@ref): a state specifying the subsolver. This default is also decorated with the `sub_kwargs...`. * `vector_space=`[`Rn`](@ref Manopt.Rn) a function that, given an integer, returns the manifold to be used for the vector space components ``ℝ^m,ℝ^n`` diff --git a/src/solvers/primal_dual_semismooth_Newton.jl b/src/solvers/primal_dual_semismooth_Newton.jl index 6b02e54128..aca5d28562 100644 --- a/src/solvers/primal_dual_semismooth_Newton.jl +++ b/src/solvers/primal_dual_semismooth_Newton.jl @@ -14,8 +14,8 @@ Perform the Primal-Dual Riemannian semismooth Newton algorithm. $(_doc_PDSN_formula) -* `p, X`: primal and dual start points ``p∈$(_l_M)`` and ``X ∈ T_n$(_l_Manifold("N"))`` -* `m,n`: base points on ``$(_l_M)`` and ``$(_l_Manifold("N"))`, respectively. +* `p, X`: primal and dual start points ``p∈$(_math(:M))`` and ``X ∈ T_n$(_l_Manifold("N"))`` +* `m,n`: base points on ``$(_math(:M))`` and ``$(_l_Manifold("N"))`, respectively. * `linearized_forward_operator`: the linearization ``DΛ(⋅)[⋅]`` of the operator ``Λ(⋅)``. * `adjoint_linearized_operator`: the adjoint ``DΛ^*`` of the linearized operator ``DΛ(m): $(_l_TpM("m")) → T_{Λ(m)}$(_l_Manifold("N"))`` * `prox_F, prox_G_Dual`: the proximal maps of ``F`` and ``G^\\ast_n`` diff --git a/src/solvers/proximal_bundle_method.jl b/src/solvers/proximal_bundle_method.jl index fc51fac4fc..457c0377a2 100644 --- a/src/solvers/proximal_bundle_method.jl +++ b/src/solvers/proximal_bundle_method.jl @@ -52,7 +52,7 @@ Generate the state for the [`proximal_bundle_method`](@ref) on the manifold `M` * `sub_problem=`[`proximal_bundle_method_subsolver`](@ref) * `sub_state=`[`AllocatingEvaluation`](@ref) * $(_kw_vector_transport_method_default): $(_kw_vector_transport_method) -* `X=`$(_link_zero_vector()) specify the type of tangent vector to use. +* `X=`$(_link(:zero_vector)) specify the type of tangent vector to use. """ mutable struct ProximalBundleMethodState{ P, diff --git a/src/solvers/quasi_Newton.jl b/src/solvers/quasi_Newton.jl index ce6051b44e..2f5ea0f64f 100644 --- a/src/solvers/quasi_Newton.jl +++ b/src/solvers/quasi_Newton.jl @@ -186,10 +186,10 @@ $(_problem_default) with start point `p`. The iterations can be done in-place of `p```=p^{(0)}``. The ``k``th iteration consists of -1. Compute the search direction ``η^{(k)} = -$(_l[:Cal]("B"))_k [$(_l_grad)f (p^{(k)})]`` or solve ``$(_l[:Cal]("H"))_k [η^{(k)}] = -$(_l_grad)f (p^{(k)})]``. +1. Compute the search direction ``η^{(k)} = -$(_tex(:Cal, "B"))_k [$(_l_grad)f (p^{(k)})]`` or solve ``$(_tex(:Cal, "H"))_k [η^{(k)}] = -$(_l_grad)f (p^{(k)})]``. 2. Determine a suitable stepsize ``α_k`` along the curve ``γ(α) = R_{p^{(k)}}(α η^{(k)})``, usually by using [`WolfePowellLinesearch`](@ref). 3. Compute ``p^{(k+1)} = R_{p^{(k)}}(α_k η^{(k)})``. -4. Define ``s_k = $(_l[:Cal]("T"))_{p^{(k)}, α_k η^{(k)}}(α_k η^{(k)})`` and ``y_k = $(_l_grad)f(p^{(k+1)}) - $(_l[:Cal]("T"))_{p^{(k)}, α_k η^{(k)}}($(_l_grad)f(p^{(k)}))``, where ``$(_l[:Cal]("T"))`` denotes a vector transport. +4. Define ``s_k = $(_tex(:Cal, "T"))_{p^{(k)}, α_k η^{(k)}}(α_k η^{(k)})`` and ``y_k = $(_l_grad)f(p^{(k+1)}) - $(_tex(:Cal, "T"))_{p^{(k)}, α_k η^{(k)}}($(_l_grad)f(p^{(k)}))``, where ``$(_tex(:Cal, "T"))`` denotes a vector transport. 5. Compute the new approximate Hessian ``H_{k+1}`` or its inverse ``B_{k+1}``. # Input diff --git a/src/solvers/stochastic_gradient_descent.jl b/src/solvers/stochastic_gradient_descent.jl index 240ddb9359..4a1620ad59 100644 --- a/src/solvers/stochastic_gradient_descent.jl +++ b/src/solvers/stochastic_gradient_descent.jl @@ -23,7 +23,7 @@ Create a `StochasticGradientDescentState` with start point `p`. # Keyword arguments -* `direction=`[`StochasticGradient`](@ref)`($(_link_zero_vector())) +* `direction=`[`StochasticGradient`](@ref)`($(_link(:zero_vector))) * `order_type=:RandomOrder`` * `order=Int[]`: specify how to store the order of indices for the next epoche * $(_kw_p_default): $(_kw_p) @@ -161,7 +161,7 @@ then using the `cost=` keyword does not have any effect since if so, the cost is # Keyword arguments * `cost=missing`: you can provide a cost function for example to track the function value -* `direction=`[`StochasticGradient`](@ref)`($(_link_zero_vector())) +* `direction=`[`StochasticGradient`](@ref)`($(_link(:zero_vector))) * $(_kw_evaluation_default): $(_kw_evaluation) * `evaluation_order=:Random`: specify whether to use a randomly permuted sequence (`:FixedRandom`:, a per cycle permuted sequence (`:Linear`) or the default `:Random` one. diff --git a/src/solvers/subgradient.jl b/src/solvers/subgradient.jl index 4128716460..e3c075f620 100644 --- a/src/solvers/subgradient.jl +++ b/src/solvers/subgradient.jl @@ -103,7 +103,7 @@ For more details see [FerreiraOliveira:1998](@cite). $(_arg_M) $(_arg_f) -* `∂f`: the (sub)gradient ``∂ f: $(_l_M) → T$(_l_M)`` of f +* `∂f`: the (sub)gradient ``∂ f: $(_math(:M)) → T$(_math(:M))`` of f $(_arg_p) alternatively to `f` and `∂f` a [`ManifoldSubgradientObjective`](@ref) `sgo` can be provided. diff --git a/src/solvers/truncated_conjugate_gradient_descent.jl b/src/solvers/truncated_conjugate_gradient_descent.jl index 6a3e3390ec..5721a63b25 100644 --- a/src/solvers/truncated_conjugate_gradient_descent.jl +++ b/src/solvers/truncated_conjugate_gradient_descent.jl @@ -35,13 +35,13 @@ Initialise the TCG state. ## Keyword arguments -* `X=`$(_link_zero_vector()) specify the type of tangent vector to use. +* `X=`$(_link(:zero_vector)) specify the type of tangent vector to use. * `κ=0.1` * `project!::F=copyto!`: initialise the numerical stabilisation to just copy the result * `randomize=false` * `θ=1.0` * `trust_region_radius=`[`injectivity_radius`](@extref `ManifoldsBase.injectivity_radius-Tuple{AbstractManifold}`)`(base_manifold(TpM)) / 4` -* `stopping_criterion=`[`StopAfterIteration`](@ref)`(`$(_link_manifold_dimension("base_manifold(Tpm)"))`)` +* `stopping_criterion=`[`StopAfterIteration`](@ref)`(`$(_link(:manifold_dimension; M="base_manifold(Tpm)"))`)` $(_sc_any)[`StopWhenResidualIsReducedByFactorOrPower`](@ref)`(; κ=κ, θ=θ)`$(_sc_any)[`StopWhenTrustRegionIsExceeded`](@ref)`()` $(_sc_any)[`StopWhenCurvatureIsNegative`](@ref)`()`$(_sc_any)[`StopWhenModelIncreased`](@ref)`()`: $(_kw_stopping_criterion) @@ -421,7 +421,7 @@ solve the trust-region subproblem $(_doc_TCG_subproblem) -on a manifold ``$(_l_M)`` by using the Steihaug-Toint truncated conjugate-gradient (tCG) method. +on a manifold ``$(_math(:M))`` by using the Steihaug-Toint truncated conjugate-gradient (tCG) method. This can be done inplace of `X`. For a description of the algorithm and theorems offering convergence guarantees, @@ -453,7 +453,7 @@ directly. * `randomize=false`: indicate whether `X` is initialised to a random vector or not. This disables preconditioning. * $(_kw_retraction_method_default): $(_kw_retraction_method) -* `stopping_criterion=`[`StopAfterIteration`](@ref)`(`$(_link_manifold_dimension("base_manifold(Tpm)"))`)` +* `stopping_criterion=`[`StopAfterIteration`](@ref)`(`$(_link(:manifold_dimension; M="base_manifold(Tpm)"))`)` $(_sc_any)[`StopWhenResidualIsReducedByFactorOrPower`](@ref)`(; κ=κ, θ=θ)`$(_sc_any)[`StopWhenTrustRegionIsExceeded`](@ref)`()` $(_sc_any)[`StopWhenCurvatureIsNegative`](@ref)`()`$(_sc_any)[`StopWhenModelIncreased`](@ref)`()`: $(_kw_stopping_criterion)