Skip to content

Commit

Permalink
Test different arrayTypes for prox maps
Browse files Browse the repository at this point in the history
  • Loading branch information
nHackel committed May 28, 2024
1 parent ba840ec commit a9cff87
Showing 1 changed file with 37 additions and 33 deletions.
70 changes: 37 additions & 33 deletions test/testProxMaps.jl
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
# check Thikonov proximal map
function testL2Prox(N=1024; numPeaks=5, λ=0.01)
function testL2Prox(N=256; numPeaks=5, λ=0.01, arrayType = Array)
@info "test L2-regularization"
Random.seed!(1234)
x = zeros(N)
Expand All @@ -9,14 +9,14 @@ function testL2Prox(N=1024; numPeaks=5, λ=0.01)

# x_l2 = 1. / (1. + 2. *λ)*x
x_l2 = copy(x)
prox!(L2Regularization, x_l2, λ)
x_l2 = Array(prox!(L2Regularization, arrayType(x_l2), λ))
@test norm(x_l2 - 1.0/(1.0+2.0*λ)*x) / norm(1.0/(1.0+2.0*λ)*x) 0 atol=0.001
# check decrease of objective function
@test 0.5*norm(x-x_l2)^2 + norm(L2Regularization, x_l2, λ) <= norm(L2Regularization, x, λ)
end

# denoise a signal consisting of a number of delta peaks
function testL1Prox(N=1024; numPeaks=5, σ=0.03)
function testL1Prox(N=256; numPeaks=5, σ=0.03, arrayType = Array)
@info "test L1-regularization"
Random.seed!(1234)
x = zeros(N)
Expand All @@ -28,7 +28,7 @@ function testL1Prox(N=1024; numPeaks=5, σ=0.03)
xNoisy = x .+ σ/sqrt(2.0)*(randn(N)+1im*randn(N))

x_l1 = copy(xNoisy)
prox!(L1Regularization, x_l1, 2*σ)
x_l1 = Array(prox!(L1Regularization, arrayType(x_l1), 2*σ))

# solution should be better then without denoising
@info "rel. L1 error : $(norm(x - x_l1)/ norm(x)) vs $(norm(x - xNoisy)/ norm(x))"
Expand All @@ -41,7 +41,7 @@ end
# denoise a signal consisting of multiple slices with delta peaks at the same locations
# only the last slices are noisy.
# Thus, the first slices serve as a reference to enhance denoising
function testL21Prox(N=1024; numPeaks=5, numSlices=8, noisySlices=2, σ=0.05)
function testL21Prox(N=256; numPeaks=5, numSlices=8, noisySlices=2, σ=0.05, arrayType = Array)
@info "test L21-regularization"
Random.seed!(1234)
x = zeros(ComplexF64,N,numSlices)
Expand All @@ -59,7 +59,7 @@ function testL21Prox(N=1024; numPeaks=5, numSlices=8, noisySlices=2, σ=0.05)
prox!(L1Regularization, x_l1, 2*σ)

x_l21 = copy(xNoisy)
prox!(L21Regularization, x_l21, 2*σ,slices=numSlices)
x_l21 = Array(prox!(L21Regularization, arrayType(x_l21), 2*σ,slices=numSlices))

# solution should be better then without denoising and with l1-denoising
@info "rel. L21 error : $(norm(x - x_l21)/ norm(x)) vs $(norm(x - xNoisy)/ norm(x))"
Expand All @@ -72,7 +72,7 @@ function testL21Prox(N=1024; numPeaks=5, numSlices=8, noisySlices=2, σ=0.05)
end

# denoise a piece-wise constant signal using TV regularization
function testTVprox(N=1024; numEdges=5, σ=0.05)
function testTVprox(N=256; numEdges=5, σ=0.05, arrayType = Array)
@info "test TV-regularization"
Random.seed!(1234)
x = zeros(ComplexF64,N,N)
Expand All @@ -91,7 +91,7 @@ function testTVprox(N=1024; numEdges=5, σ=0.05)
prox!(L1Regularization, x_l1, 2*σ)

x_tv = copy(xNoisy)
prox!(TVRegularization, x_tv, 2*σ, shape=(N,N), dims=1:2)
x_tv = Array(prox!(TVRegularization, arrayType(x_tv), 2*σ, shape=(N,N), dims=1:2))

@info "rel. TV error : $(norm(x - x_tv)/ norm(x)) vs $(norm(x - xNoisy)/ norm(x))"
@test norm(x - x_tv) <= norm(x - xNoisy)
Expand All @@ -103,7 +103,7 @@ function testTVprox(N=1024; numEdges=5, σ=0.05)
end

# denoise a signal that is piecewise constant along a given direction
function testDirectionalTVprox(N=256; numEdges=5, σ=0.05, T=ComplexF64)
function testDirectionalTVprox(N=256; numEdges=5, σ=0.05, T=ComplexF64, arrayType = Array)
x = zeros(T,N,N)
for i=1:numEdges
idx = rand(0:N-1)
Expand All @@ -115,7 +115,7 @@ function testDirectionalTVprox(N=256; numEdges=5, σ=0.05, T=ComplexF64)
xNoisy .+=/sqrt(2)) .* randn(T, N, N)

x_tv = copy(xNoisy)
prox!(TVRegularization, vec(x_tv), 2*σ, shape=(N,N), dims=1)
x_tv = Array(reshape(prox!(TVRegularization, arrayType(vec(x_tv)), 2*σ, shape=(N,N), dims=1), N, N))

x_tv2 = copy(xNoisy)
for i=1:N
Expand All @@ -131,40 +131,40 @@ function testDirectionalTVprox(N=256; numEdges=5, σ=0.05, T=ComplexF64)

## cf. Condat and gradient based algorithm
x_tv3 = copy(xNoisy)
prox!(TVRegularization, vec(x_tv3), 2*σ, shape=(N,N), dims=(1,))
x_tv3 = Array(reshape(prox!(TVRegularization, vec(x_tv3), 2*σ, shape=(N,N), dims=(1,)), N, N))
@test norm(x_tv-x_tv3) / norm(x) 0 atol=1e-2
end

# test enforcement of positivity constraint
function testPositive(N=1024)
function testPositive(N=256; arrayType = Array)
@info "test positivity-constraint"
Random.seed!(1234)
x = randn(N) .+ 1im*randn(N)
xPos = real.(x)
xPos[findall(x->x<0,xPos)] .= 0
xProj = copy(x)
prox!(PositiveRegularization, xProj)
xProj = Array(prox!(PositiveRegularization, arrayType(xProj)))

@test norm(xProj-xPos)/norm(xPos) 0 atol=1.e-4
# check decrease of objective function
@test 0.5*norm(x-xProj)^2+norm(PositiveRegularization, xProj) <= norm(PositiveRegularization, x)
end

# test enforcement of "realness"-constraint
function testProj(N=1012)
function testProj(N=1012; arrayType = Array)
@info "test realness-constraint"
Random.seed!(1234)
x = randn(N) .+ 1im*randn(N)
xReal = real.(x)
xProj = copy(x)
prox!(ProjectionRegularization, xProj, projFunc=x->real(x))
xProj = Array(prox!(ProjectionRegularization, arrayType(xProj), projFunc=x->real(x)))
@test norm(xProj-xReal)/norm(xReal) 0 atol=1.e-4
# check decrease of objective function
@test 0.5*norm(x-xProj)^2+norm(ProjectionRegularization, xProj,projFunc=x->real(x)) <= norm(ProjectionRegularization, x,projFunc=x->real(x))
end

# test denoising of a low-rank matrix
function testNuclear(N=32,rank=2=0.05)
function testNuclear(N=32,rank=2=0.05, arrayType = Array)
@info "test nuclear norm regularization"
Random.seed!(1234)
x = zeros(ComplexF64,N,N);
Expand All @@ -183,15 +183,15 @@ function testNuclear(N=32,rank=2;σ=0.05)
xNoisy[:] += σ/sqrt(2.0)*(randn(N*N)+1im*randn(N*N))

x_lr = copy(xNoisy)
prox!(NuclearRegularization, x_lr,5*σ,svtShape=(32,32))
x_lr = Array(prox!(NuclearRegularization, arrayType(x_lr),5*σ,svtShape=(32,32)))
@test norm(x - x_lr) <= norm(x - xNoisy)
@test norm(x - x_lr) / norm(x) 0 atol=0.05
@info "rel. LR error : $(norm(x - x_lr)/ norm(x)) vs $(norm(x - xNoisy)/ norm(x))"
# check decrease of objective function
@test 0.5*norm(xNoisy-x_lr)^2+norm(NuclearRegularization, x_lr,5*σ,svtShape=(N,N)) <= norm(NuclearRegularization, xNoisy,5*σ,svtShape=(N,N))
end

function testLLR(shape=(32,32,80),blockSize=(4,4);σ=0.05)
function testLLR(shape=(32,32,80),blockSize=(4,4);σ=0.05, arrayType = Array)
@info "test LLR regularization"
Random.seed!(1234)
x = zeros(ComplexF64,shape);
Expand All @@ -211,15 +211,15 @@ function testLLR(shape=(32,32,80),blockSize=(4,4);σ=0.05)
xNoisy[:] += σ/sqrt(2.0)*(randn(prod(shape))+1im*randn(prod(shape)))

x_llr = copy(xNoisy)
prox!(LLRRegularization, x_llr,10*σ,shape=shape[1:2],blockSize=blockSize,randshift=false)
x_llr = Array(prox!(LLRRegularization, arrayType(x_llr),10*σ,shape=shape[1:2],blockSize=blockSize,randshift=false))
@test norm(x - x_llr) <= norm(x - xNoisy)
@test norm(x - x_llr) / norm(x) 0 atol=0.05
@info "rel. LLR error : $(norm(x - x_llr)/ norm(x)) vs $(norm(x - xNoisy)/ norm(x))"
# check decrease of objective function
@test 0.5*norm(xNoisy-x_llr)^2+norm(LLRRegularization, x_llr,10*σ,shape=shape[1:2],blockSize=blockSize,randshift=false) <= norm(LLRRegularization, xNoisy,10*σ,shape=shape[1:2],blockSize=blockSize,randshift=false)
end

function testLLROverlapping(shape=(32,32,80),blockSize=(4,4);σ=0.05)
function testLLROverlapping(shape=(32,32,80),blockSize=(4,4);σ=0.05, arrayType = Array)
@info "test Overlapping LLR regularization"
Random.seed!(1234)
x = zeros(ComplexF64,shape);
Expand Down Expand Up @@ -247,7 +247,7 @@ function testLLROverlapping(shape=(32,32,80),blockSize=(4,4);σ=0.05)
@test 0.5*norm(xNoisy-x_llr)^2+normLLR(x_llr,10*σ,shape=shape[1:2],blockSize=blockSize,randshift=false) <= normLLR(xNoisy,10*σ,shape=shape[1:2],blockSize=blockSize,randshift=false)
end

function testLLR_3D(shape=(32,32,32,80),blockSize=(4,4,4);σ=0.05)
function testLLR_3D(shape=(32,32,32,80),blockSize=(4,4,4);σ=0.05, arrayType = Array)
@info "test LLR 3D regularization"
Random.seed!(1234)
x = zeros(ComplexF64,shape)
Expand All @@ -269,7 +269,7 @@ function testLLR_3D(shape=(32,32,32,80),blockSize=(4,4,4);σ=0.05)
xNoisy[:] += σ/sqrt(2.0)*(randn(prod(shape))+1im*randn(prod(shape)))

x_llr = copy(xNoisy)
prox!(LLRRegularization, x_llr,10*σ,shape=shape[1:end-1],blockSize=blockSize,randshift=false)
x_llr = Array(prox!(LLRRegularization, arrayType(x_llr),10*σ,shape=shape[1:end-1],blockSize=blockSize,randshift=false))
@test norm(x - x_llr) <= norm(x - xNoisy)
@test norm(x - x_llr) / norm(x) 0 atol=0.05
@info "rel. LLR 3D error : $(norm(x - x_llr)/ norm(x)) vs $(norm(x - xNoisy)/ norm(x))"
Expand All @@ -296,16 +296,20 @@ function testConversion()
end

@testset "Proximal Maps" begin
@testset "L2 Prox" testL2Prox()
@testset "L1 Prox" testL1Prox()
@testset "L21 Prox" testL21Prox()
@testset "TV Prox" testTVprox()
@testset "TV Prox Directional" testDirectionalTVprox()
@testset "Positive Prox" testPositive()
@testset "Projection Prox" testProj()
@testset "Nuclear Prox" testNuclear()
#@testset "LLR Prox" testLLR()
#@testset "LLR Prox Overlapping" #testLLROverlapping()
#@testset "LLR Prox 3D" testLLR_3D()
for arrayType in arrayTypes
@testset "$arrayType" begin
@testset "L2 Prox" testL2Prox(;arrayType)
@testset "L1 Prox" testL1Prox(;arrayType)
@testset "L21 Prox" testL21Prox(;arrayType)
@testset "TV Prox" testTVprox(;arrayType)
@testset "TV Prox Directional" testDirectionalTVprox(;arrayType)
@testset "Positive Prox" testPositive(;arrayType)
@testset "Projection Prox" testProj(;arrayType)
@testset "Nuclear Prox" testNuclear(;arrayType)
#@testset "LLR Prox: $arrayType" testLLR(;arrayType)
#@testset "LLR Prox Overlapping: $arrayType" testLLROverlapping(;arrayType)
#@testset "LLR Prox 3D: $arrayType" testLLR_3D(;arrayType)
end
end
@testset "Prox Lambda Conversion" testConversion()
end

0 comments on commit a9cff87

Please sign in to comment.