From 12b8ef45b34cb3510a77752a4179a8c81369660c Mon Sep 17 00:00:00 2001 From: nHackel Date: Fri, 23 Aug 2024 11:20:46 +0200 Subject: [PATCH] Fix smaller formating issues in docu --- docs/src/literate/examples/compressed_sensing.jl | 2 +- docs/src/literate/examples/computed_tomography.jl | 4 ++-- docs/src/literate/examples/getting_started.jl | 6 +++--- docs/src/literate/howto/weighting.jl | 2 +- docs/src/solvers.md | 3 ++- 5 files changed, 9 insertions(+), 8 deletions(-) diff --git a/docs/src/literate/examples/compressed_sensing.jl b/docs/src/literate/examples/compressed_sensing.jl index 4e81d0f..41ed15f 100644 --- a/docs/src/literate/examples/compressed_sensing.jl +++ b/docs/src/literate/examples/compressed_sensing.jl @@ -46,7 +46,7 @@ fig # To recover the image from the measurement vector, we solve the TV-regularized least squares problem: # ```math # \begin{equation} -# \underset{\mathbf{x}}{argmin} \frac{1}{2}\vert\vert \mathbf{A}\mathbf{x}-\mathbf{b} \vert\vert_2^2 + \vert\vert\mathbf{x}\vert\vert_{\lambda\text{TV}} . +# \underset{\mathbf{x}}{argmin} \frac{1}{2}\vert\vert \mathbf{A}\mathbf{x}-\mathbf{b} \vert\vert_2^2 + \lambda\vert\vert\mathbf{x}\vert\vert_{\text{TV}} . # \end{equation} # ``` diff --git a/docs/src/literate/examples/computed_tomography.jl b/docs/src/literate/examples/computed_tomography.jl index 2d1fca2..1159dc8 100644 --- a/docs/src/literate/examples/computed_tomography.jl +++ b/docs/src/literate/examples/computed_tomography.jl @@ -13,7 +13,7 @@ N = 256 image = shepp_logan(N, SheppLoganToft()) size(image) -# This produces a 64x64 image of a Shepp-Logan phantom. +# This produces a 256x256 image of a Shepp-Logan phantom. using RadonKA, LinearOperatorCollection angles = collect(range(0, π, 256)) @@ -43,7 +43,7 @@ fig # To recover the image from the measurement vector, we solve the $l^2_2$-regularized least squares problem # ```math # \begin{equation} -# \underset{\mathbf{x}}{argmin} \frac{1}{2}\vert\vert \mathbf{A}\mathbf{x}-\mathbf{b} \vert\vert_2^2 + \vert\vert\mathbf{x}\vert\vert^2_2 . +# \underset{\mathbf{x}}{argmin} \frac{1}{2}\vert\vert \mathbf{A}\mathbf{x}-\mathbf{b} \vert\vert_2^2 + \lambda\vert\vert\mathbf{x}\vert\vert^2_2 . # \end{equation} # ``` diff --git a/docs/src/literate/examples/getting_started.jl b/docs/src/literate/examples/getting_started.jl index 750d334..d4050d9 100644 --- a/docs/src/literate/examples/getting_started.jl +++ b/docs/src/literate/examples/getting_started.jl @@ -23,7 +23,7 @@ using RegularizedLeastSquares # \underset{\mathbf{x}}{argmin} \frac{1}{2}\vert\vert \mathbf{A}\mathbf{x}-\mathbf{b} \vert\vert_2^2 + \mathbf{R(x)} . # \end{equation} # ``` -# where $\mathbf{A}$ is a linear operator, $\mathbf{y}$ is the measurement vector, and $\mathbf{R(x)}$ is an (optional) regularization term. +# where $\mathbf{A}$ is a linear operator, $\mathbf{b}$ is the measurement vector, and $\mathbf{R(x)}$ is an (optional) regularization term. # The goal is to retrieve an approximation of the unknown vector $\mathbf{x}$. In this first exampel we will just work with simple random arrays. For more advanced examples, please refer to the examples. A = rand(32, 16) @@ -41,11 +41,11 @@ isapprox(x, x_approx, rtol = 0.001) # The CGNR algorithm can solve optimzation problems of the form: # ```math # \begin{equation} -# \underset{\mathbf{x}}{argmin} \frac{1}{2}\vert\vert \mathbf{A}\mathbf{x}-\mathbf{b} \vert\vert_2^2 + \vert\vert\mathbf{x}\vert\vert^2_2 . +# \underset{\mathbf{x}}{argmin} \frac{1}{2}\vert\vert \mathbf{A}\mathbf{x}-\mathbf{b} \vert\vert_2^2 + \lambda\vert\vert\mathbf{x}\vert\vert^2_2 . # \end{equation} # ``` -# The corresponding solver can be built with the L2 regularization term: +# The corresponding solver can be built with the $l^2_2$-regularization term: solver = createLinearSolver(CGNR, A; reg = L2Regularization(0.0001), iterations=32); x_approx = solve!(solver, b) isapprox(x, x_approx, rtol = 0.001) diff --git a/docs/src/literate/howto/weighting.jl b/docs/src/literate/howto/weighting.jl index 54fc6eb..d5682d4 100644 --- a/docs/src/literate/howto/weighting.jl +++ b/docs/src/literate/howto/weighting.jl @@ -11,7 +11,7 @@ # In the following, we will solve a weighted least squares problem of the form: # ```math # \begin{equation} -# \underset{\mathbf{x}}{argmin} \frac{1}{2}\vert\vert \mathbf{A}\mathbf{x}-\mathbf{b} \vert\vert_\mathbf{W}^2 + \vert\vert\mathbf{x}\vert\vert^2_2 . +# \underset{\mathbf{x}}{argmin} \frac{1}{2}\vert\vert \mathbf{A}\mathbf{x}-\mathbf{b} \vert\vert_\mathbf{W}^2 + \lambda\vert\vert\mathbf{x}\vert\vert^2_2 . # \end{equation} # ``` using RegularizedLeastSquares, LinearOperatorCollection, LinearAlgebra diff --git a/docs/src/solvers.md b/docs/src/solvers.md index 84a96de..dc6294a 100644 --- a/docs/src/solvers.md +++ b/docs/src/solvers.md @@ -82,4 +82,5 @@ SolverVariant(A; kwargs...) = Solver(A, VariantState(kwargs...)) function iterate(solver::Solver, state::VarianteState) # Custom iteration -end \ No newline at end of file +end +``` \ No newline at end of file