diff --git a/src/SparseIR.jl b/src/SparseIR.jl index dbab101..4246c67 100644 --- a/src/SparseIR.jl +++ b/src/SparseIR.jl @@ -11,20 +11,20 @@ export overlap export LogisticKernel, RegularizedBoseKernel export AugmentedBasis, TauConst, TauLinear, MatsubaraConst export TauSampling, MatsubaraSampling, evaluate, fit, evaluate!, fit!, - MatsubaraSampling64F, MatsubaraSampling64B, TauSampling64, sampling_points + MatsubaraSampling64F, MatsubaraSampling64B, TauSampling64, sampling_points using MultiFloats: MultiFloats, Float64x2, _call_big # If we call MultiFloats.use_bigfloat_transcendentals() like MultiFloats # recommends, we get an error during precompilation: for name in (:exp, :sinh, :cosh) eval(:(function Base.$name(x::Float64x2) - Float64x2(_call_big($name, x, precision(Float64x2) + 20)) + Float64x2(_call_big($name, x, 107 + 20)) # 107 == precision(Float64x2) end)) end -using LinearAlgebra: LinearAlgebra, cond, dot, svd, SVD, QRIteration, mul! +using LinearAlgebra: LinearAlgebra, cond, dot, svd, SVD, QRIteration, mul!, norm using QuadGK: gauss, quadgk using Bessels: sphericalbesselj -using PrecompileTools +using PrecompileTools: PrecompileTools, @compile_workload include("_linalg.jl") include("_roots.jl") diff --git a/src/augment.jl b/src/augment.jl index cada295..ef90ad4 100644 --- a/src/augment.jl +++ b/src/augment.jl @@ -147,11 +147,11 @@ augmentedfunction(aτ::AugmentedTauFunction) = aτ.a AugmentedTauFunction(fbasis, faug) = AugmentedTauFunction(AugmentedFunction(fbasis, faug)) -xmin(aτ::AugmentedTauFunction) = fbasis(aτ).xmin -xmax(aτ::AugmentedTauFunction) = fbasis(aτ).xmax +xmin(aτ::AugmentedTauFunction) = xmin(fbasis(aτ)) +xmax(aτ::AugmentedTauFunction) = xmax(fbasis(aτ)) function deriv(aτ::AugmentedTauFunction, n=Val(1)) - dbasis = deriv.(fbasis(aτ), n) + dbasis = PiecewiseLegendrePolyVector(deriv.(fbasis(aτ), n)) daug = deriv.(faug(aτ), n) return AugmentedTauFunction(dbasis, daug) end diff --git a/src/basis.jl b/src/basis.jl index af04ecf..5f8fed0 100644 --- a/src/basis.jl +++ b/src/basis.jl @@ -59,14 +59,14 @@ Construct a finite temperature basis suitable for the given `S` (`Fermionic` or `Bosonic`) and cutoffs `β` and `ωmax`. """ function FiniteTempBasis{S}(β::Real, ωmax::Real, ε=nothing; max_size=nothing, - kernel=LogisticKernel(β * ωmax), - sve_result=SVEResult(kernel; ε)) where {S} + kernel=LogisticKernel(β * ωmax), + sve_result=SVEResult(kernel; ε)) where {S} FiniteTempBasis(S(), β, ωmax, ε; max_size, kernel, sve_result) end function FiniteTempBasis(statistics::Statistics, β::Real, ωmax::Real, ε=nothing; - max_size=nothing, kernel=LogisticKernel(β * ωmax), - sve_result=SVEResult(kernel; ε)) + max_size=nothing, kernel=LogisticKernel(β * ωmax), + sve_result=SVEResult(kernel; ε)) β > 0 || throw(DomainError(β, "Inverse temperature β must be positive")) ωmax ≥ 0 || throw(DomainError(ωmax, "Frequency cutoff ωmax must be non-negative")) @@ -82,10 +82,10 @@ function FiniteTempBasis(statistics::Statistics, β::Real, ωmax::Real, ε=nothi # knots according to: tau = β/2 * (x + 1), w = ωmax * y. Scaling # the data is not necessary as the normalization is inferred. ωmax = Λ(kernel) / β - u_knots = (β / 2) .* (u_.knots .+ 1) - v_knots = ωmax .* v_.knots - u = PiecewiseLegendrePolyVector(u_, u_knots; Δx=(β / 2) .* u_.Δx, symm=u_.symm) - v = PiecewiseLegendrePolyVector(v_, v_knots; Δx=ωmax .* v_.Δx, symm=v_.symm) + u_knots = (β / 2) .* (knots(u_) .+ 1) + v_knots = ωmax .* knots(v_) + u = PiecewiseLegendrePolyVector(u_, u_knots; Δx=(β / 2) .* Δx(u_), symm=symm(u_)) + v = PiecewiseLegendrePolyVector(v_, v_knots; Δx=ωmax .* Δx(v_), symm=symm(v_)) # The singular values are scaled to match the change of variables, with # the additional complexity that the kernel may have an additional @@ -94,9 +94,9 @@ function FiniteTempBasis(statistics::Statistics, β::Real, ωmax::Real, ε=nothi # HACK: as we don't yet support Fourier transforms on anything but the # unit interval, we need to scale the underlying data. - û_base_full = PiecewiseLegendrePolyVector(sqrt(β) .* sve_result.u.data, sve_result.u) + û_base_full = PiecewiseLegendrePolyVector(sqrt(β) .* data(sve_result.u), sve_result.u) û_full = PiecewiseLegendreFTVector(û_base_full, statistics; - n_asymp=conv_radius(kernel)) + n_asymp=conv_radius(kernel)) û = û_full[1:length(s)] return FiniteTempBasis(kernel, sve_result, accuracy, float(β), u, v, s, û, û_full) @@ -113,8 +113,8 @@ end function Base.getindex(basis::FiniteTempBasis, i::AbstractRange) FiniteTempBasis(statistics(basis), β(basis), ωmax(basis), nothing; - max_size=range_to_length(i), kernel=basis.kernel, - sve_result=basis.sve_result) + max_size=range_to_length(i), kernel=basis.kernel, + sve_result=basis.sve_result) end significance(basis::FiniteTempBasis) = basis.s ./ first(basis.s) @@ -148,8 +148,8 @@ since ``Λ == β * ωmax`` stays constant. function rescale(basis::FiniteTempBasis, new_β) new_ωmax = Λ(kernel(basis)) / new_β return FiniteTempBasis(statistics(basis), new_β, new_ωmax, nothing; - max_size=length(basis), kernel=kernel(basis), - sve_result=sve_result(basis)) + max_size=length(basis), kernel=kernel(basis), + sve_result=sve_result(basis)) end """ @@ -160,15 +160,15 @@ Construct `FiniteTempBasis` objects for fermion and bosons using the same `LogisticKernel` instance. """ function finite_temp_bases(β::Real, ωmax::Real, ε=nothing; - kernel=LogisticKernel(β * ωmax), - sve_result=SVEResult(kernel; ε)) + kernel=LogisticKernel(β * ωmax), + sve_result=SVEResult(kernel; ε)) basis_f = FiniteTempBasis{Fermionic}(β, ωmax, ε; sve_result, kernel) basis_b = FiniteTempBasis{Bosonic}(β, ωmax, ε; sve_result, kernel) return basis_f, basis_b end function default_sampling_points(u::PiecewiseLegendrePolyVector, L::Integer) - (u.xmin, u.xmax) == (-1, 1) || error("Expecting unscaled functions here.") + (xmin(u), xmax(u)) == (-1, 1) || error("Expecting unscaled functions here.") # For orthogonal polynomials (the high-T limit of IR), we know that the # ideal sampling points for a basis of size L are the roots of the L-th @@ -188,8 +188,8 @@ function default_sampling_points(u::PiecewiseLegendrePolyVector, L::Integer) # local extrema, is slightly worse conditioned than putting it in the # middel. This can be understood by the fact that the roots never # occur right at the border. - left = (first(maxima) + poly.xmin) / 2 - right = (last(maxima) + poly.xmax) / 2 + left = (first(maxima) + xmin(poly)) / 2 + right = (last(maxima) + xmax(poly)) / 2 x₀ = [left; maxima; right] end @@ -202,7 +202,7 @@ function default_sampling_points(u::PiecewiseLegendrePolyVector, L::Integer) end function default_matsubara_sampling_points(û::PiecewiseLegendreFTVector, L::Integer; - fence=false, positive_only=false) + fence=false, positive_only=false) l_requested = L # The number of sign changes is always odd for bosonic basis and even for fermionic diff --git a/src/poly.jl b/src/poly.jl index 0c7ce65..6d9a5a4 100644 --- a/src/poly.jl +++ b/src/poly.jl @@ -20,47 +20,57 @@ struct PiecewiseLegendrePoly <: Function xm :: Vector{Float64} inv_xs :: Vector{Float64} - norm :: Vector{Float64} + norms :: Vector{Float64} function PiecewiseLegendrePoly(polyorder::Integer, xmin::Real, - xmax::Real, knots::AbstractVector, - Δx::AbstractVector, data::AbstractMatrix, symm::Integer, - l::Integer, xm::AbstractVector, inv_xs::AbstractVector, - norm::AbstractVector) + xmax::Real, knots::AbstractVector, + Δx::AbstractVector, data::AbstractMatrix, symm::Integer, + l::Integer, xm::AbstractVector, inv_xs::AbstractVector, + norms::AbstractVector) !any(isnan, data) || error("data contains NaN") issorted(knots) || error("knots must be monotonically increasing") @inbounds for i in eachindex(Δx) Δx[i] ≈ knots[i + 1] - knots[i] || error("Δx must work with knots") end - return new(polyorder, xmin, xmax, knots, Δx, data, symm, l, xm, inv_xs, norm) + return new(polyorder, xmin, xmax, knots, Δx, data, symm, l, xm, inv_xs, norms) end end -function PiecewiseLegendrePoly(data, p::PiecewiseLegendrePoly; symm=p.symm) - return PiecewiseLegendrePoly(p.polyorder, p.xmin, p.xmax, copy(p.knots), copy(p.Δx), - data, symm, p.l, copy(p.xm), copy(p.inv_xs), copy(p.norm)) +function PiecewiseLegendrePoly(data, p::PiecewiseLegendrePoly; symm=symm(p)) + return PiecewiseLegendrePoly( + polyorder(p), xmin(p), xmax(p), copy(knots(p)), copy(Δx(p)), + data, symm, p.l, copy(p.xm), copy(p.inv_xs), copy(norms(p))) end function PiecewiseLegendrePoly(data::Matrix, knots::Vector, l::Integer; - Δx=diff(knots), symm=0) + Δx=diff(knots), symm=0) polyorder, nsegments = size(data) length(knots) == nsegments + 1 || error("Invalid knots array") xm = @views @. (knots[begin:(end - 1)] + knots[(begin + 1):end]) / 2 inv_xs = 2 ./ Δx - norm = sqrt.(inv_xs) + norms = sqrt.(inv_xs) return PiecewiseLegendrePoly(polyorder, first(knots), last(knots), knots, - Δx, data, symm, l, xm, inv_xs, norm) + Δx, data, symm, l, xm, inv_xs, norms) end Base.size(::PiecewiseLegendrePoly) = () +xmin(p::PiecewiseLegendrePoly) = p.xmin +xmax(p::PiecewiseLegendrePoly) = p.xmax +knots(p::PiecewiseLegendrePoly) = p.knots +Δx(p::PiecewiseLegendrePoly) = p.Δx +symm(p::PiecewiseLegendrePoly) = p.symm +data(p::PiecewiseLegendrePoly) = p.data +norms(p::PiecewiseLegendrePoly) = p.norms +polyorder(p::PiecewiseLegendrePoly) = p.polyorder + function Base.show(io::IO, ::MIME"text/plain", p::PiecewiseLegendrePoly) - print(io, "PiecewiseLegendrePoly on [$(p.xmin), $(p.xmax)], order=$(p.polyorder)") + print(io, "PiecewiseLegendrePoly on [$(xmin(p)), $(xmax(p))], order=$(polyorder(p))") end @inline function (poly::PiecewiseLegendrePoly)(x::Real) i, x̃ = split(poly, x) - return @inbounds legval(x̃, @view poly.data[:, i]) * poly.norm[i] + return @inbounds legval(x̃, @view data(poly)[:, i]) * norms(poly)[i] end (poly::PiecewiseLegendrePoly)(xs::AbstractVector) = poly.(xs) @@ -80,10 +90,10 @@ using adaptive Gauss-Legendre quadrature. difficulties of the integrand may occur (e.g. singularities, discontinuities). """ function overlap(poly::PiecewiseLegendrePoly, f::F; - rtol=eps(), return_error=false, maxevals=10^4, points=Float64[]) where {F} + rtol=eps(), return_error=false, maxevals=10^4, points=Float64[]) where {F} int_result, int_error = quadgk(x -> poly(x) * f(x), - unique!(sort!([poly.knots; points]))...; - rtol, order=10, maxevals) + unique!(sort!([knots(poly); points]))...; + rtol, order=10, maxevals) if return_error return int_result, int_error else @@ -102,7 +112,7 @@ function deriv(poly::PiecewiseLegendrePoly, ::Val{n}=Val(1)) where {n} @views @inbounds for i in axes(ddata, 2) ddata[:, i] .*= poly.inv_xs[i]^n end - return PiecewiseLegendrePoly(ddata, poly; symm=(-1)^n * poly.symm) + return PiecewiseLegendrePoly(ddata, poly; symm=(-1)^n * symm(poly)) end """ @@ -111,18 +121,18 @@ end Find all roots of the piecewise polynomial `poly`. """ function roots(poly::PiecewiseLegendrePoly; tol=1e-10, alpha=Val(2)) - grid = poly.knots + grid = knots(poly) grid = refine_grid(grid, alpha) return find_all(poly, grid) end function Base.checkbounds(::Type{Bool}, poly::PiecewiseLegendrePoly, x::Real) - poly.xmin ≤ x ≤ poly.xmax + xmin(poly) ≤ x ≤ xmax(poly) end function Base.checkbounds(poly::PiecewiseLegendrePoly, x::Real) checkbounds(Bool, poly, x) || - throw(DomainError(x, "The domain is [$(poly.xmin), $(poly.xmax)]")) + throw(DomainError(x, "The domain is [$(xmin(poly)), $(xmax(poly))]")) end """ @@ -135,25 +145,25 @@ Find segment of poly's domain that covers `x`. @inline function split(poly, x::Real) @boundscheck checkbounds(poly, x) - i = max(searchsortedlast(poly.knots, x; lt=≤), 1) + i = max(searchsortedlast(knots(poly), x; lt=≤), 1) x̃ = x - poly.xm[i] x̃ *= poly.inv_xs[i] return i, x̃ end function Base.:*(poly::PiecewiseLegendrePoly, factor::Number) - return PiecewiseLegendrePoly(poly.data * factor, poly.knots, poly.l; - Δx=poly.Δx, symm=poly.symm) + return PiecewiseLegendrePoly(data(poly) * factor, knots(poly), poly.l; + Δx=Δx(poly), symm=symm(poly)) end Base.:*(factor::Number, poly::PiecewiseLegendrePoly) = poly * factor function Base.:+(p1::PiecewiseLegendrePoly, p2::PiecewiseLegendrePoly) - p1.knots == p2.knots || error("knots must be the same") - return PiecewiseLegendrePoly(p1.data + p2.data, p1.knots, -1; - Δx=p1.Δx, symm=p1.symm == p2.symm ? p1.symm : 0) + knots(p1) == knots(p2) || error("knots must be the same") + return PiecewiseLegendrePoly(data(p1) + data(p2), knots(p1), -1; + Δx=Δx(p1), symm=symm(p1) == symm(p2) ? symm(p1) : 0) end function Base.:-(poly::PiecewiseLegendrePoly) - return PiecewiseLegendrePoly(-poly.data, poly.knots, -1; - Δx=poly.Δx, symm=poly.symm) + return PiecewiseLegendrePoly(-data(poly), knots(poly), -1; + Δx=Δx(poly), symm=symm(poly)) end Base.:-(p1::PiecewiseLegendrePoly, p2::PiecewiseLegendrePoly) = p1 + (-p2) @@ -164,62 +174,81 @@ Base.:-(p1::PiecewiseLegendrePoly, p2::PiecewiseLegendrePoly) = p1 + (-p2) """ PiecewiseLegendrePolyVector -Alias for `Vector{PiecewiseLegendrePoly}`. +Contains a `Vector{PiecewiseLegendrePoly}`. """ -const PiecewiseLegendrePolyVector = Vector{PiecewiseLegendrePoly} +struct PiecewiseLegendrePolyVector <: AbstractVector{PiecewiseLegendrePoly} + polyvec::Vector{PiecewiseLegendrePoly} +end + +Base.size(polys::PiecewiseLegendrePolyVector) = size(polys.polyvec) + +Base.IndexStyle(::Type{PiecewiseLegendrePolyVector}) = IndexLinear() + +Base.getindex(polys::PiecewiseLegendrePolyVector, i) = getindex(polys.polyvec, i) +function Base.getindex(polys::PiecewiseLegendrePolyVector, i::AbstractVector) + PiecewiseLegendrePolyVector(getindex(polys.polyvec, i)) +end + +Base.setindex!(polys::PiecewiseLegendrePolyVector, p, i) = setindex!(polys.polyvec, p, i) + +function Base.similar(polys::PiecewiseLegendrePolyVector) + PiecewiseLegendrePolyVector(similar(polys.polyvec)) +end function Base.show(io::IO, ::MIME"text/plain", polys::PiecewiseLegendrePolyVector) print(io, "$(length(polys))-element PiecewiseLegendrePolyVector ") - print(io, "on [$(polys.xmin), $(polys.xmax)]") + print(io, "on [$(xmin(polys)), $(xmax(polys))]") end -function Vector{PiecewiseLegendrePoly}(data::AbstractArray{T,3}, knots::Vector{T}; - symm=zeros(Int, size(data, 3))) where {T<:Real} - [PiecewiseLegendrePoly(data[:, :, i], knots, i - 1; symm=symm[i]) - for i in axes(data, 3)] +function PiecewiseLegendrePolyVector(data::AbstractArray{T,3}, knots::Vector{T}; + symm=zeros(Int, size(data, 3))) where {T<:Real} + PiecewiseLegendrePolyVector(map(axes(data, 3)) do i + PiecewiseLegendrePoly(data[:, :, i], knots, i - 1; symm=symm[i]) + end) end -function Vector{PiecewiseLegendrePoly}(polys::PiecewiseLegendrePolyVector, - knots::AbstractVector; Δx=diff(knots), symm=0) +function PiecewiseLegendrePolyVector(polys::PiecewiseLegendrePolyVector, + knots::AbstractVector; Δx=diff(knots), symm=0) length(polys) == length(symm) || throw(DimensionMismatch("Sizes of polys and symm don't match")) - return map(zip(polys, symm)) do (poly, sym) + PiecewiseLegendrePolyVector(map(zip(polys, symm)) do (poly, sym) PiecewiseLegendrePoly(poly.data, knots, poly.l; Δx, symm=sym) - end + end) end -function Vector{PiecewiseLegendrePoly}(data::AbstractArray{T,3}, - polys::PiecewiseLegendrePolyVector) where {T} +function PiecewiseLegendrePolyVector(data::AbstractArray{T,3}, + polys::PiecewiseLegendrePolyVector) where {T} size(data, 3) == length(polys) || throw(DimensionMismatch("Sizes of data and polys don't match")) - [PiecewiseLegendrePoly(data[:, :, i], polys[i]) for i in eachindex(polys)] + PiecewiseLegendrePolyVector(map(eachindex(polys)) do i + PiecewiseLegendrePoly(data[:, :, i], polys[i]) + end) end -(polys::PiecewiseLegendrePolyVector)(x) = [poly(x) for poly in polys] -function (polys::PiecewiseLegendrePolyVector)(x::AbstractArray) - reshape(mapreduce(polys, vcat, x; init=Float64[]), (length(polys), size(x)...)) +for name in (:xmin, :xmax, :knots, :Δx, :polyorder, :norms) + eval(:($name(polys::PiecewiseLegendrePolyVector) = $name(first(polys.polyvec)))) end -function Base.getproperty(polys::PiecewiseLegendrePolyVector, sym::Symbol) - if sym === :symm - return map(poly -> poly.symm, polys) - elseif sym === :data - data = Array{Float64,3}(undef, size(first(polys).data)..., length(polys)) - @inbounds for i in eachindex(polys) - data[:, :, i] .= polys[i].data - end - return data - elseif (sym === :xmin || sym === :xmax || sym === :knots || sym === :Δx || - sym === :polyorder || sym === :xm || sym === :inv_xs || sym === :norm) - return getproperty(first(polys), sym) +symm(polys::PiecewiseLegendrePolyVector) = map(symm, polys) + +function data(polys::PiecewiseLegendrePolyVector) + data = Array{Float64,3}(undef, size(first(polys).data)..., length(polys)) + @inbounds for i in eachindex(polys) + data[:, :, i] .= polys[i].data end + return data +end + +(polys::PiecewiseLegendrePolyVector)(x) = [poly(x) for poly in polys] +function (polys::PiecewiseLegendrePolyVector)(x::AbstractArray) + reshape(mapreduce(polys, vcat, x; init=Float64[]), (length(polys), size(x)...)) end # Backward compatibility function overlap(polys::PiecewiseLegendrePolyVector, f::F; rtol=eps(), - return_error=false) where {F} + return_error=false) where {F} overlap.(polys, f; rtol, return_error) end @@ -259,37 +288,52 @@ case `n` must be even, or antiperiodically (`freq=:odd`), in which case `n` must be odd. """ struct PiecewiseLegendreFT{S<:Statistics} <: Function - poly :: PiecewiseLegendrePoly - statistics :: S - n_asymp :: Float64 - model :: PowerModel{Float64} + poly :: PiecewiseLegendrePoly + n_asymp :: Float64 + model :: PowerModel{Float64} end function PiecewiseLegendreFT(poly::PiecewiseLegendrePoly, stat::Statistics; n_asymp=Inf) - (poly.xmin, poly.xmax) == (-1, 1) || error("Only interval [-1, 1] is supported") + (xmin(poly), xmax(poly)) == (-1, 1) || error("Only interval [-1, 1] is supported") model = power_model(stat, poly) - PiecewiseLegendreFT(poly, stat, Float64(n_asymp), model) + PiecewiseLegendreFT{typeof(stat)}(poly, Float64(n_asymp), model) end -const PiecewiseLegendreFTVector{S} = Vector{PiecewiseLegendreFT{S}} +n_asymp(polyFT::PiecewiseLegendreFT) = polyFT.n_asymp +statistics(::PiecewiseLegendreFT{S}) where {S} = S() +zeta(polyFT::PiecewiseLegendreFT) = zeta(statistics(polyFT)) +poly(polyFT::PiecewiseLegendreFT) = polyFT.poly -function PiecewiseLegendreFTVector(polys::PiecewiseLegendrePolyVector, - stat::Statistics; n_asymp=Inf) - [PiecewiseLegendreFT(poly, stat; n_asymp) for poly in polys] +struct PiecewiseLegendreFTVector{S} <: AbstractVector{PiecewiseLegendreFT{S}} + polyvec::Vector{PiecewiseLegendreFT{S}} end -function Base.getproperty(polyFTs::PiecewiseLegendreFTVector, sym::Symbol) - return if sym === :stat || sym === :n_asymp - getproperty(first(polyFTs), sym) - elseif sym === :poly - map(p -> p.poly, polyFTs) - end +Base.size(polys::PiecewiseLegendreFTVector) = size(polys.polyvec) + +Base.IndexStyle(::Type{PiecewiseLegendreFTVector}) = IndexLinear() + +Base.getindex(polys::PiecewiseLegendreFTVector, i) = getindex(polys.polyvec, i) +function Base.getindex(polys::PiecewiseLegendreFTVector, i::AbstractVector) + PiecewiseLegendreFTVector(getindex(polys.polyvec, i)) +end + +Base.setindex!(polys::PiecewiseLegendreFTVector, p, i) = setindex!(polys.polyvec, p, i) + +function Base.similar(polys::PiecewiseLegendreFTVector) + PiecewiseLegendreFTVector(similar(polys.polyvec)) +end + +function PiecewiseLegendreFTVector(polys::PiecewiseLegendrePolyVector, + stat::Statistics; n_asymp=Inf) + PiecewiseLegendreFTVector(map(polys) do poly + PiecewiseLegendreFT(poly, stat; n_asymp) + end) end -statistics(polyFT::PiecewiseLegendreFT) = polyFT.statistics +n_asymp(polyFTs::PiecewiseLegendreFTVector) = n_asymp(first(polyFTs)) statistics(polyFTs::PiecewiseLegendreFTVector) = statistics(first(polyFTs)) -zeta(polyFT::PiecewiseLegendreFT) = zeta(statistics(polyFT)) zeta(polyFTs::PiecewiseLegendreFTVector) = zeta(first(polyFTs)) +poly(polyFTs::PiecewiseLegendreFTVector) = PiecewiseLegendrePolyVector(map(poly, polyFTs)) """ (polyFT::PiecewiseLegendreFT)(ω) @@ -297,10 +341,10 @@ zeta(polyFTs::PiecewiseLegendreFTVector) = zeta(first(polyFTs)) Obtain Fourier transform of polynomial for given `MatsubaraFreq` `ω`. """ function (polyFT::Union{PiecewiseLegendreFT{S}, - PiecewiseLegendreFTVector{S}})(ω::MatsubaraFreq{S}) where {S} + PiecewiseLegendreFTVector{S}})(ω::MatsubaraFreq{S}) where {S} n = Int(ω) - return if abs(n) < polyFT.n_asymp - compute_unl_inner(polyFT.poly, n) + return if abs(n) < n_asymp(polyFT) + compute_unl_inner(poly(polyFT), n) else giw(polyFT, n) end @@ -337,7 +381,7 @@ end Obtain extrema of Fourier-transformed polynomial. """ function find_extrema(û::PiecewiseLegendreFT; part=nothing, grid=DEFAULT_GRID, - positive_only=false) + positive_only=false) f = func_for_part(û, part) x₀ = discrete_extrema(f, grid) x₀ .= 2x₀ .+ zeta(statistics(û)) @@ -346,7 +390,7 @@ function find_extrema(û::PiecewiseLegendreFT; part=nothing, grid=DEFAULT_GRID, end function sign_changes(û::PiecewiseLegendreFT; part=nothing, grid=DEFAULT_GRID, - positive_only=false) + positive_only=false) f = func_for_part(û, part) x₀ = find_all(f, grid) x₀ .= 2x₀ .+ zeta(statistics(û)) @@ -356,7 +400,7 @@ end function func_for_part(polyFT::PiecewiseLegendreFT{S}, part=nothing) where {S} if isnothing(part) - parity = polyFT.poly.symm + parity = symm(poly(polyFT)) if parity == 1 part = statistics(polyFT) isa Bosonic ? real : imag elseif parity == -1 @@ -380,7 +424,7 @@ end function derivs(ppoly, x) res = [ppoly(x)] - for _ in 2:(ppoly.polyorder) + for _ in 2:(polyorder(ppoly)) ppoly = deriv(ppoly) push!(res, ppoly(x)) end @@ -414,17 +458,18 @@ function compute_unl_inner(poly::PiecewiseLegendrePoly, wn) wred = π / 4 * wn phase_wi = phase_stable(poly, wn) res = zero(ComplexF64) - @inbounds for order in axes(poly.data, 1), j in axes(poly.data, 2) - res += poly.data[order, j] * get_tnl(order - 1, wred * poly.Δx[j]) * phase_wi[j] / - poly.norm[j] + @inbounds for order in axes(data(poly), 1), j in axes(data(poly), 2) + res += data(poly)[order, j] * get_tnl(order - 1, wred * Δx(poly)[j]) * phase_wi[j] / + norms(poly)[j] end return res / sqrt(2) end function compute_unl_inner(polys::PiecewiseLegendrePolyVector, wn) - p = reshape(range(0; length=polys.polyorder), (1, :)) + p = reshape(range(0; length=polyorder(polys)), (1, :)) wred = π / 4 * wn phase_wi = phase_stable(polys, wn) - t_pin = permutedims(@. get_tnl(p, wred * polys.Δx) * phase_wi / (sqrt(2) * polys.norm)) + t_pin = permutedims(get_tnl.(p, wred .* Δx(polys)) .* phase_wi ./ + (sqrt(2) .* norms(polys))) return [dot(poly.data, t_pin) for poly in polys] end @@ -474,15 +519,15 @@ Phase factor for the piecewise Legendre to Matsubara transform. Compute the following phase factor in a stable way: - exp.(iπ/2 * wn * cumsum(poly.Δx)) + exp.(iπ/2 * wn * cumsum(Δx(poly))) """ function phase_stable(poly, wn::Integer) - xmid_diff, extra_shift = shift_xmid(poly.knots, poly.Δx) + xmid_diff, extra_shift = shift_xmid(knots(poly), Δx(poly)) @. im^mod(wn * (extra_shift + 1), 4) * cispi(wn * xmid_diff / 2) end function phase_stable(poly, wn) - xmid_diff, extra_shift = shift_xmid(poly.knots, poly.Δx) + xmid_diff, extra_shift = shift_xmid(knots(poly), Δx(poly)) delta_wn, wn = modf(wn) wn = trunc(Int, wn) diff --git a/src/sve.jl b/src/sve.jl index f3ab858..34aafab 100644 --- a/src/sve.jl +++ b/src/sve.jl @@ -43,7 +43,7 @@ function SamplingSVE(kernel, ε, ::Type{T}=Float64; n_gauss=nothing) where {T} gauss_x, gauss_y = piecewise(rule, segs_x), piecewise(rule, segs_y) return SamplingSVE(kernel, ε, n_gauss, nsvals(sve_hints_), - rule, segs_x, segs_y, gauss_x, gauss_y) + rule, segs_x, segs_y, gauss_x, gauss_y) end """ @@ -81,7 +81,7 @@ struct CentrosymmSVE{K<:AbstractKernel,SVEEVEN<:AbstractSVE,SVEODD<:AbstractSVE} end function CentrosymmSVE(kernel, ε, ::Type{T}; InnerSVE=SamplingSVE, - n_gauss=nothing) where {T} + n_gauss=nothing) where {T} even = InnerSVE(get_symmetrized(kernel, +1), ε, T; n_gauss) odd = InnerSVE(get_symmetrized(kernel, -1), ε, T; n_gauss) return CentrosymmSVE(kernel, ε, even, odd, max(even.nsvals_hint, odd.nsvals_hint)) @@ -151,9 +151,9 @@ Returns: An `SVEResult` containing the truncated singular value expansion. """ function SVEResult(kernel::AbstractKernel; - Twork=nothing, cutoff=nothing, ε=nothing, lmax=typemax(Int), - n_gauss=nothing, svd_strat=:auto, - SVEstrat=iscentrosymmetric(kernel) ? CentrosymmSVE : SamplingSVE) + Twork=nothing, cutoff=nothing, ε=nothing, lmax=typemax(Int), + n_gauss=nothing, svd_strat=:auto, + SVEstrat=iscentrosymmetric(kernel) ? CentrosymmSVE : SamplingSVE) safe_ε, Twork_actual, svd_strat = choose_accuracy(ε, Twork, svd_strat) sve = SVEstrat(kernel, safe_ε, Twork_actual; n_gauss) @@ -203,9 +203,9 @@ function postprocess(sve::SamplingSVE, (u,), (s,), (v,)) cmat = legendre_collocation(sve.rule) u_data = reshape(cmat * reshape(u_x, (size(u_x, 1), :)), - (:, size(u_x, 2), size(u_x, 3))) + (:, size(u_x, 2), size(u_x, 3))) v_data = reshape(cmat * reshape(v_y, (size(v_y, 1), :)), - (:, size(v_y, 2), size(v_y, 3))) + (:, size(v_y, 2), size(v_y, 3))) dsegs_x = diff(sve.segs_x) dsegs_y = diff(sve.segs_y) @@ -224,8 +224,8 @@ function postprocess(sve::CentrosymmSVE, u, s, v) u_odd, s_odd, v_odd = postprocess(sve.odd, u[2:2], s[2:2], v[2:2]) # Merge two sets - u = [u_even; u_odd] - v = [v_even; v_odd] + u = PiecewiseLegendrePolyVector([u_even; u_odd]) + v = PiecewiseLegendrePolyVector([v_even; v_odd]) s = [s_even; s_odd] signs = [fill(1, length(s_even)); fill(-1, length(s_odd))] diff --git a/test/Project.toml b/test/Project.toml index d680726..1e148a0 100644 --- a/test/Project.toml +++ b/test/Project.toml @@ -2,6 +2,7 @@ name = "SparseIR Tests" [deps] Aqua = "4c88cf16-eb10-579e-8560-4a9242c79595" +ExplicitImports = "7d51a73a-1435-4ff3-83d9-f097790105c7" Logging = "56ddb016-857b-54e1-b83d-db4d58db5568" Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" diff --git a/test/augment.jl b/test/augment.jl index b76e016..02af1e6 100644 --- a/test/augment.jl +++ b/test/augment.jl @@ -87,7 +87,7 @@ using SparseIR.LinearAlgebra @test SparseIR.xmax(basis_aug.u) == β @test SparseIR.deriv(basis_aug.u)(0.8)[3:end] == - SparseIR.deriv.(SparseIR.fbasis(basis_aug.u))(0.8) + SparseIR.PiecewiseLegendrePolyVector(SparseIR.deriv.(SparseIR.fbasis(basis_aug.u)))(0.8) @test SparseIR.zeta(basis_aug.uhat) == 0 diff --git a/test/poly.jl b/test/poly.jl index 12e5141..a5b1655 100644 --- a/test/poly.jl +++ b/test/poly.jl @@ -80,8 +80,8 @@ isdefined(Main, :sve_logistic) || include("_conftest.jl") @test size(u[1](rand(30))) == (30,) - @test_throws DomainError u(u.xmax + 123) - @test_throws DomainError u(u.xmin - 123) + @test_throws DomainError u(SparseIR.xmax(u) + 123) + @test_throws DomainError u(SparseIR.xmin(u) - 123) int_result, int_error = SparseIR.overlap(u[1], u[1]; return_error=true) @@ -92,10 +92,10 @@ isdefined(Main, :sve_logistic) || include("_conftest.jl") @test size(u(rand(2, 3, 4))) == (length(u), 2, 3, 4) - @test (u.xmin, u.xmax) === (-1.0, 1.0) - @test u.knots == u[end].knots - @test u.Δx == u[1].Δx - @test u.symm[2] == u[2].symm + @test (SparseIR.xmin(u), SparseIR.xmax(u)) === (-1.0, 1.0) + @test SparseIR.knots(u) == u[end].knots + @test SparseIR.Δx(u) == u[1].Δx + @test SparseIR.symm(u)[2] == SparseIR.symm(u[2]) @test all(<(eps()), SparseIR.overlap(u, sin)[1:2:end]) @test all(<(eps()), SparseIR.overlap(u, cos)[2:2:end]) diff --git a/test/runtests.jl b/test/runtests.jl index 47a91c8..f73b2af 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -1,23 +1,34 @@ +using SparseIR using Test using Aqua +using ExplicitImports -include("_conftest.jl") +@testset verbose=true "SparseIR tests" begin + @testset verbose=true "Aqua" begin + Aqua.test_all(SparseIR; ambiguities=false, piracies=false) + end -Aqua.test_all(SparseIR; ambiguities=false, piracies=false) + @testset verbose=true "No implicit imports" begin + @test check_no_implicit_imports(SparseIR) === nothing + @test check_no_stale_explicit_imports(SparseIR) === nothing + end -include("freq.jl") -include("gauss.jl") -include("kernel.jl") -include("poly.jl") -include("sve.jl") -include("svd.jl") -include("basis.jl") -include("sampling.jl") -include("augment.jl") -include("dlr.jl") -include("_linalg.jl") -include("_roots.jl") -include("_specfuncs.jl") -include("_multifloat_funcs.jl") + @testset verbose=true "Actual code" begin + include("_conftest.jl") -nothing # without this we get messy output from the testset printed in the REPL + include("freq.jl") + include("gauss.jl") + include("kernel.jl") + include("poly.jl") + include("sve.jl") + include("svd.jl") + include("basis.jl") + include("sampling.jl") + include("augment.jl") + include("dlr.jl") + include("_linalg.jl") + include("_roots.jl") + include("_specfuncs.jl") + include("_multifloat_funcs.jl") + end +end diff --git a/test/sve.jl b/test/sve.jl index 0befd17..d8a8766 100644 --- a/test/sve.jl +++ b/test/sve.jl @@ -7,7 +7,7 @@ isdefined(Main, :sve_logistic) || include("_conftest.jl") function check_smooth(u, s, uscale, fudge_factor) ε = eps(eltype(s)) - x = u.knots[(begin + 1):(end - 1)] + x = SparseIR.knots(u)[(begin + 1):(end - 1)] jump = abs.(u(x .+ ε) - u(x .- ε)) compare_below = abs.(u(x .- ε) - u(x .- 3ε)) @@ -39,7 +39,7 @@ a ⪅ b = leaq(a, b) end @testset "num roots û with stat = $stat, Λ = $Λ" for stat in (Fermionic(), Bosonic()), - Λ in (10, 42, 10_000) + Λ in (10, 42, 10_000) basis = FiniteTempBasis(stat, 1, Λ; sve_result=sve_logistic[Λ]) for i in [1, 2, 8, 11] @@ -49,7 +49,7 @@ a ⪅ b = leaq(a, b) end @testset "accuracy with stat = $stat, Λ = $Λ" for stat in (Fermionic(), Bosonic()), - Λ in (10, 42, 10_000) + Λ in (10, 42, 10_000) basis = FiniteTempBasis(stat, 4, Λ; sve_result=sve_logistic[Λ]) @test 0 < SparseIR.accuracy(basis) ⪅ last(SparseIR.significance(basis)) @@ -57,34 +57,37 @@ a ⪅ b = leaq(a, b) @test SparseIR.accuracy(basis) ⪅ last(basis.s) / first(basis.s) end - @testset "choose_accuracy" begin with_logger(NullLogger()) do # suppress output of warnings - @test SparseIR.choose_accuracy(nothing, nothing) == - (2.2204460492503131e-16, Float64x2, :default) - @test SparseIR.choose_accuracy(nothing, Float64) == - (1.4901161193847656e-8, Float64, :default) - @test SparseIR.choose_accuracy(nothing, Float64x2) == - (2.2204460492503131e-16, Float64x2, :default) - @test SparseIR.choose_accuracy(1e-6, nothing) == (1.0e-6, Float64, :default) - @test SparseIR.choose_accuracy(1e-8, nothing) == (1.0e-8, Float64x2, :default) - - @test SparseIR.choose_accuracy(1e-20, nothing) == (1.0e-20, Float64x2, :default) - @test_logs (:warn, - """Basis cutoff is 1.0e-20, which is below √ε with ε = 4.9303806576313238e-32. - Expect singular values and basis functions for large l to have lower precision - than the cutoff.""") SparseIR.choose_accuracy(1e-20, nothing) - - @test SparseIR.choose_accuracy(1e-10, Float64) == (1.0e-10, Float64, :accurate) - @test_logs (:warn, - """Basis cutoff is 1.0e-10, which is below √ε with ε = 2.220446049250313e-16. - Expect singular values and basis functions for large l to have lower precision - than the cutoff.""") SparseIR.choose_accuracy(1e-10, Float64) - - @test SparseIR.choose_accuracy(1e-6, Float64) == (1.0e-6, Float64, :default) - - @test SparseIR.choose_accuracy(1e-6, Float64, :auto) == (1.0e-6, Float64, :default) - @test SparseIR.choose_accuracy(1e-6, Float64, :accurate) == - (1.0e-6, Float64, :accurate) - end end + @testset "choose_accuracy" begin + with_logger(NullLogger()) do # suppress output of warnings + @test SparseIR.choose_accuracy(nothing, nothing) == + (2.2204460492503131e-16, Float64x2, :default) + @test SparseIR.choose_accuracy(nothing, Float64) == + (1.4901161193847656e-8, Float64, :default) + @test SparseIR.choose_accuracy(nothing, Float64x2) == + (2.2204460492503131e-16, Float64x2, :default) + @test SparseIR.choose_accuracy(1e-6, nothing) == (1.0e-6, Float64, :default) + @test SparseIR.choose_accuracy(1e-8, nothing) == (1.0e-8, Float64x2, :default) + + @test SparseIR.choose_accuracy(1e-20, nothing) == (1.0e-20, Float64x2, :default) + @test_logs (:warn, + """Basis cutoff is 1.0e-20, which is below √ε with ε = 4.9303806576313238e-32. +Expect singular values and basis functions for large l to have lower precision +than the cutoff.""") SparseIR.choose_accuracy(1e-20, nothing) + + @test SparseIR.choose_accuracy(1e-10, Float64) == (1.0e-10, Float64, :accurate) + @test_logs (:warn, + """Basis cutoff is 1.0e-10, which is below √ε with ε = 2.220446049250313e-16. +Expect singular values and basis functions for large l to have lower precision +than the cutoff.""") SparseIR.choose_accuracy(1e-10, Float64) + + @test SparseIR.choose_accuracy(1e-6, Float64) == (1.0e-6, Float64, :default) + + @test SparseIR.choose_accuracy(1e-6, Float64, :auto) == + (1.0e-6, Float64, :default) + @test SparseIR.choose_accuracy(1e-6, Float64, :accurate) == + (1.0e-6, Float64, :accurate) + end + end @testset "truncate" begin sve = SparseIR.CentrosymmSVE(LogisticKernel(5), 1e-6, Float64)