From a9cff87c0e5ae0646a1c9121fcc26c10bcb7435e Mon Sep 17 00:00:00 2001 From: nHackel Date: Tue, 28 May 2024 16:02:41 +0200 Subject: [PATCH] Test different arrayTypes for prox maps --- test/testProxMaps.jl | 70 +++++++++++++++++++++++--------------------- 1 file changed, 37 insertions(+), 33 deletions(-) diff --git a/test/testProxMaps.jl b/test/testProxMaps.jl index 788f1148..eaf1522b 100644 --- a/test/testProxMaps.jl +++ b/test/testProxMaps.jl @@ -1,5 +1,5 @@ # check Thikonov proximal map -function testL2Prox(N=1024; numPeaks=5, λ=0.01) +function testL2Prox(N=256; numPeaks=5, λ=0.01, arrayType = Array) @info "test L2-regularization" Random.seed!(1234) x = zeros(N) @@ -9,14 +9,14 @@ function testL2Prox(N=1024; numPeaks=5, λ=0.01) # x_l2 = 1. / (1. + 2. *λ)*x x_l2 = copy(x) - prox!(L2Regularization, x_l2, λ) + x_l2 = Array(prox!(L2Regularization, arrayType(x_l2), λ)) @test norm(x_l2 - 1.0/(1.0+2.0*λ)*x) / norm(1.0/(1.0+2.0*λ)*x) ≈ 0 atol=0.001 # check decrease of objective function @test 0.5*norm(x-x_l2)^2 + norm(L2Regularization, x_l2, λ) <= norm(L2Regularization, x, λ) end # denoise a signal consisting of a number of delta peaks -function testL1Prox(N=1024; numPeaks=5, σ=0.03) +function testL1Prox(N=256; numPeaks=5, σ=0.03, arrayType = Array) @info "test L1-regularization" Random.seed!(1234) x = zeros(N) @@ -28,7 +28,7 @@ function testL1Prox(N=1024; numPeaks=5, σ=0.03) xNoisy = x .+ σ/sqrt(2.0)*(randn(N)+1im*randn(N)) x_l1 = copy(xNoisy) - prox!(L1Regularization, x_l1, 2*σ) + x_l1 = Array(prox!(L1Regularization, arrayType(x_l1), 2*σ)) # solution should be better then without denoising @info "rel. L1 error : $(norm(x - x_l1)/ norm(x)) vs $(norm(x - xNoisy)/ norm(x))" @@ -41,7 +41,7 @@ end # denoise a signal consisting of multiple slices with delta peaks at the same locations # only the last slices are noisy. # Thus, the first slices serve as a reference to enhance denoising -function testL21Prox(N=1024; numPeaks=5, numSlices=8, noisySlices=2, σ=0.05) +function testL21Prox(N=256; numPeaks=5, numSlices=8, noisySlices=2, σ=0.05, arrayType = Array) @info "test L21-regularization" Random.seed!(1234) x = zeros(ComplexF64,N,numSlices) @@ -59,7 +59,7 @@ function testL21Prox(N=1024; numPeaks=5, numSlices=8, noisySlices=2, σ=0.05) prox!(L1Regularization, x_l1, 2*σ) x_l21 = copy(xNoisy) - prox!(L21Regularization, x_l21, 2*σ,slices=numSlices) + x_l21 = Array(prox!(L21Regularization, arrayType(x_l21), 2*σ,slices=numSlices)) # solution should be better then without denoising and with l1-denoising @info "rel. L21 error : $(norm(x - x_l21)/ norm(x)) vs $(norm(x - xNoisy)/ norm(x))" @@ -72,7 +72,7 @@ function testL21Prox(N=1024; numPeaks=5, numSlices=8, noisySlices=2, σ=0.05) end # denoise a piece-wise constant signal using TV regularization -function testTVprox(N=1024; numEdges=5, σ=0.05) +function testTVprox(N=256; numEdges=5, σ=0.05, arrayType = Array) @info "test TV-regularization" Random.seed!(1234) x = zeros(ComplexF64,N,N) @@ -91,7 +91,7 @@ function testTVprox(N=1024; numEdges=5, σ=0.05) prox!(L1Regularization, x_l1, 2*σ) x_tv = copy(xNoisy) - prox!(TVRegularization, x_tv, 2*σ, shape=(N,N), dims=1:2) + x_tv = Array(prox!(TVRegularization, arrayType(x_tv), 2*σ, shape=(N,N), dims=1:2)) @info "rel. TV error : $(norm(x - x_tv)/ norm(x)) vs $(norm(x - xNoisy)/ norm(x))" @test norm(x - x_tv) <= norm(x - xNoisy) @@ -103,7 +103,7 @@ function testTVprox(N=1024; numEdges=5, σ=0.05) end # denoise a signal that is piecewise constant along a given direction -function testDirectionalTVprox(N=256; numEdges=5, σ=0.05, T=ComplexF64) +function testDirectionalTVprox(N=256; numEdges=5, σ=0.05, T=ComplexF64, arrayType = Array) x = zeros(T,N,N) for i=1:numEdges idx = rand(0:N-1) @@ -115,7 +115,7 @@ function testDirectionalTVprox(N=256; numEdges=5, σ=0.05, T=ComplexF64) xNoisy .+= (σ/sqrt(2)) .* randn(T, N, N) x_tv = copy(xNoisy) - prox!(TVRegularization, vec(x_tv), 2*σ, shape=(N,N), dims=1) + x_tv = Array(reshape(prox!(TVRegularization, arrayType(vec(x_tv)), 2*σ, shape=(N,N), dims=1), N, N)) x_tv2 = copy(xNoisy) for i=1:N @@ -131,19 +131,19 @@ function testDirectionalTVprox(N=256; numEdges=5, σ=0.05, T=ComplexF64) ## cf. Condat and gradient based algorithm x_tv3 = copy(xNoisy) - prox!(TVRegularization, vec(x_tv3), 2*σ, shape=(N,N), dims=(1,)) + x_tv3 = Array(reshape(prox!(TVRegularization, vec(x_tv3), 2*σ, shape=(N,N), dims=(1,)), N, N)) @test norm(x_tv-x_tv3) / norm(x) ≈ 0 atol=1e-2 end # test enforcement of positivity constraint -function testPositive(N=1024) +function testPositive(N=256; arrayType = Array) @info "test positivity-constraint" Random.seed!(1234) x = randn(N) .+ 1im*randn(N) xPos = real.(x) xPos[findall(x->x<0,xPos)] .= 0 xProj = copy(x) - prox!(PositiveRegularization, xProj) + xProj = Array(prox!(PositiveRegularization, arrayType(xProj))) @test norm(xProj-xPos)/norm(xPos) ≈ 0 atol=1.e-4 # check decrease of objective function @@ -151,20 +151,20 @@ function testPositive(N=1024) end # test enforcement of "realness"-constraint -function testProj(N=1012) +function testProj(N=1012; arrayType = Array) @info "test realness-constraint" Random.seed!(1234) x = randn(N) .+ 1im*randn(N) xReal = real.(x) xProj = copy(x) - prox!(ProjectionRegularization, xProj, projFunc=x->real(x)) + xProj = Array(prox!(ProjectionRegularization, arrayType(xProj), projFunc=x->real(x))) @test norm(xProj-xReal)/norm(xReal) ≈ 0 atol=1.e-4 # check decrease of objective function @test 0.5*norm(x-xProj)^2+norm(ProjectionRegularization, xProj,projFunc=x->real(x)) <= norm(ProjectionRegularization, x,projFunc=x->real(x)) end # test denoising of a low-rank matrix -function testNuclear(N=32,rank=2;σ=0.05) +function testNuclear(N=32,rank=2;σ=0.05, arrayType = Array) @info "test nuclear norm regularization" Random.seed!(1234) x = zeros(ComplexF64,N,N); @@ -183,7 +183,7 @@ function testNuclear(N=32,rank=2;σ=0.05) xNoisy[:] += σ/sqrt(2.0)*(randn(N*N)+1im*randn(N*N)) x_lr = copy(xNoisy) - prox!(NuclearRegularization, x_lr,5*σ,svtShape=(32,32)) + x_lr = Array(prox!(NuclearRegularization, arrayType(x_lr),5*σ,svtShape=(32,32))) @test norm(x - x_lr) <= norm(x - xNoisy) @test norm(x - x_lr) / norm(x) ≈ 0 atol=0.05 @info "rel. LR error : $(norm(x - x_lr)/ norm(x)) vs $(norm(x - xNoisy)/ norm(x))" @@ -191,7 +191,7 @@ function testNuclear(N=32,rank=2;σ=0.05) @test 0.5*norm(xNoisy-x_lr)^2+norm(NuclearRegularization, x_lr,5*σ,svtShape=(N,N)) <= norm(NuclearRegularization, xNoisy,5*σ,svtShape=(N,N)) end -function testLLR(shape=(32,32,80),blockSize=(4,4);σ=0.05) +function testLLR(shape=(32,32,80),blockSize=(4,4);σ=0.05, arrayType = Array) @info "test LLR regularization" Random.seed!(1234) x = zeros(ComplexF64,shape); @@ -211,7 +211,7 @@ function testLLR(shape=(32,32,80),blockSize=(4,4);σ=0.05) xNoisy[:] += σ/sqrt(2.0)*(randn(prod(shape))+1im*randn(prod(shape))) x_llr = copy(xNoisy) - prox!(LLRRegularization, x_llr,10*σ,shape=shape[1:2],blockSize=blockSize,randshift=false) + x_llr = Array(prox!(LLRRegularization, arrayType(x_llr),10*σ,shape=shape[1:2],blockSize=blockSize,randshift=false)) @test norm(x - x_llr) <= norm(x - xNoisy) @test norm(x - x_llr) / norm(x) ≈ 0 atol=0.05 @info "rel. LLR error : $(norm(x - x_llr)/ norm(x)) vs $(norm(x - xNoisy)/ norm(x))" @@ -219,7 +219,7 @@ function testLLR(shape=(32,32,80),blockSize=(4,4);σ=0.05) @test 0.5*norm(xNoisy-x_llr)^2+norm(LLRRegularization, x_llr,10*σ,shape=shape[1:2],blockSize=blockSize,randshift=false) <= norm(LLRRegularization, xNoisy,10*σ,shape=shape[1:2],blockSize=blockSize,randshift=false) end -function testLLROverlapping(shape=(32,32,80),blockSize=(4,4);σ=0.05) +function testLLROverlapping(shape=(32,32,80),blockSize=(4,4);σ=0.05, arrayType = Array) @info "test Overlapping LLR regularization" Random.seed!(1234) x = zeros(ComplexF64,shape); @@ -247,7 +247,7 @@ function testLLROverlapping(shape=(32,32,80),blockSize=(4,4);σ=0.05) @test 0.5*norm(xNoisy-x_llr)^2+normLLR(x_llr,10*σ,shape=shape[1:2],blockSize=blockSize,randshift=false) <= normLLR(xNoisy,10*σ,shape=shape[1:2],blockSize=blockSize,randshift=false) end -function testLLR_3D(shape=(32,32,32,80),blockSize=(4,4,4);σ=0.05) +function testLLR_3D(shape=(32,32,32,80),blockSize=(4,4,4);σ=0.05, arrayType = Array) @info "test LLR 3D regularization" Random.seed!(1234) x = zeros(ComplexF64,shape) @@ -269,7 +269,7 @@ function testLLR_3D(shape=(32,32,32,80),blockSize=(4,4,4);σ=0.05) xNoisy[:] += σ/sqrt(2.0)*(randn(prod(shape))+1im*randn(prod(shape))) x_llr = copy(xNoisy) - prox!(LLRRegularization, x_llr,10*σ,shape=shape[1:end-1],blockSize=blockSize,randshift=false) + x_llr = Array(prox!(LLRRegularization, arrayType(x_llr),10*σ,shape=shape[1:end-1],blockSize=blockSize,randshift=false)) @test norm(x - x_llr) <= norm(x - xNoisy) @test norm(x - x_llr) / norm(x) ≈ 0 atol=0.05 @info "rel. LLR 3D error : $(norm(x - x_llr)/ norm(x)) vs $(norm(x - xNoisy)/ norm(x))" @@ -296,16 +296,20 @@ function testConversion() end @testset "Proximal Maps" begin - @testset "L2 Prox" testL2Prox() - @testset "L1 Prox" testL1Prox() - @testset "L21 Prox" testL21Prox() - @testset "TV Prox" testTVprox() - @testset "TV Prox Directional" testDirectionalTVprox() - @testset "Positive Prox" testPositive() - @testset "Projection Prox" testProj() - @testset "Nuclear Prox" testNuclear() - #@testset "LLR Prox" testLLR() - #@testset "LLR Prox Overlapping" #testLLROverlapping() - #@testset "LLR Prox 3D" testLLR_3D() + for arrayType in arrayTypes + @testset "$arrayType" begin + @testset "L2 Prox" testL2Prox(;arrayType) + @testset "L1 Prox" testL1Prox(;arrayType) + @testset "L21 Prox" testL21Prox(;arrayType) + @testset "TV Prox" testTVprox(;arrayType) + @testset "TV Prox Directional" testDirectionalTVprox(;arrayType) + @testset "Positive Prox" testPositive(;arrayType) + @testset "Projection Prox" testProj(;arrayType) + @testset "Nuclear Prox" testNuclear(;arrayType) + #@testset "LLR Prox: $arrayType" testLLR(;arrayType) + #@testset "LLR Prox Overlapping: $arrayType" testLLROverlapping(;arrayType) + #@testset "LLR Prox 3D: $arrayType" testLLR_3D(;arrayType) + end + end @testset "Prox Lambda Conversion" testConversion() end