From 9e07d9100b3259c20cad99b952daaa6138eeaf3e Mon Sep 17 00:00:00 2001 From: Katharine Hyatt Date: Sat, 31 Aug 2024 13:57:42 -0400 Subject: [PATCH 001/537] Move lets to testsets and add codeunits tests --- test/strings/types.jl | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/test/strings/types.jl b/test/strings/types.jl index dbcf65b1d843b..d1e89e0e85196 100644 --- a/test/strings/types.jl +++ b/test/strings/types.jl @@ -302,8 +302,7 @@ end ## Cstring tests ## -# issue #13974: comparison against pointers -let +@testset "issue #13974: comparison against pointers" begin str = String("foobar") ptr = pointer(str) cstring = Cstring(ptr) @@ -324,10 +323,15 @@ let @test C_NULL != cstring end -# issue #31381: eltype(Cstring) != Cchar -let +@testset "issue #31381: eltype(Cstring) != Cchar" begin s = Cstring(C_NULL) @test eltype(Cstring) == Cchar @test eltype(s) == Cchar @test pointer(s) isa Ptr{Cchar} end + +@testset "Codeunits" begin + s = "I'm a string!" + @test codeunit(s) == UInt8 + @test codeunit(s, Int8(1)) == codeunit(s, 1) +end From 66349d411efd36211524da1741df129fd504b658 Mon Sep 17 00:00:00 2001 From: Katharine Hyatt Date: Sat, 31 Aug 2024 14:17:04 -0400 Subject: [PATCH 002/537] Few more missing AnnotatedStrings tests --- test/strings/annotated.jl | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/test/strings/annotated.jl b/test/strings/annotated.jl index 90aaadd6ede24..ee53c3d5846eb 100644 --- a/test/strings/annotated.jl +++ b/test/strings/annotated.jl @@ -5,14 +5,22 @@ @test str == Base.AnnotatedString(str.string, Tuple{UnitRange{Int}, Pair{Symbol, Any}}[]) @test length(str) == 11 @test ncodeunits(str) == 11 + @test codeunits(str) == codeunits("some string") + @test codeunit(str) == UInt8 + @test codeunit(str, 1) == codeunit("some string", 1) + @test firstindex(str) == firstindex("some string") @test convert(Base.AnnotatedString, str) === str @test eltype(str) == Base.AnnotatedChar{eltype(str.string)} @test first(str) == Base.AnnotatedChar(first(str.string), Pair{Symbol, Any}[]) @test str[1:4] isa SubString{typeof(str)} @test str[1:4] == Base.AnnotatedString("some") + big_byte_str = Base.AnnotatedString("आख") + @test_throws StringIndexError big_byte_str[5] @test "a" * str == Base.AnnotatedString("asome string") @test str * "a" == Base.AnnotatedString("some stringa") @test str * str == Base.AnnotatedString("some stringsome string") + @test cmp(str, "some stringy thingy") == -1 + @test cmp("some stringy thingy", str) == 1 @test str[3:4] == SubString("me") @test SubString("me") == str[3:4] Base.annotate!(str, 1:4, :thing => 0x01) From 7a645dd23af4491d1d03f4f1cba5d46d8268677d Mon Sep 17 00:00:00 2001 From: Jishnu Bhattacharya Date: Tue, 10 Sep 2024 12:09:54 +0530 Subject: [PATCH 003/537] Avoid materializing arrays in bidiag matmul (#55450) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Currently, small `Bidiagonal`/`Tridiagonal` matrices are materialized in matrix multiplications, but this is wasteful and unnecessary. This PR changes this to use a naive matrix multiplication for small matrices, and fall back to the banded multiplication for larger ones. Multiplication by a `Bidiagonal` falls back to a banded matrix multiplication for all sizes in the current implementation, and iterates in a cache-friendly manner for the non-`Bidiagonal` matrix. In certain cases, the matrices were being materialized if the non-structured matrix was small, even if the structured matrix was large. This is changed as well in this PR. Some improvements in performance: ```julia julia> B = Bidiagonal(rand(3), rand(2), :U); A = rand(size(B)...); C = similar(A); julia> @btime mul!($C, $A, $B); 193.152 ns (6 allocations: 352 bytes) # nightly v"1.12.0-DEV.1034" 18.826 ns (0 allocations: 0 bytes) # This PR julia> T = Tridiagonal(rand(99), rand(100), rand(99)); A = rand(2, size(T,2)); C = similar(A); julia> @btime mul!($C, $A, $T); 9.398 μs (8 allocations: 79.94 KiB) # nightly 416.407 ns (0 allocations: 0 bytes) # This PR julia> B = Bidiagonal(rand(300), rand(299), :U); A = rand(20000, size(B,2)); C = similar(A); julia> @btime mul!($C, $A, $B); 33.395 ms (0 allocations: 0 bytes) # nightly 6.695 ms (0 allocations: 0 bytes) # This PR (cache-friendly) ``` Closes https://github.com/JuliaLang/julia/pull/55414 --------- Co-authored-by: Daniel Karrasch --- stdlib/LinearAlgebra/src/LinearAlgebra.jl | 4 +- stdlib/LinearAlgebra/src/bidiag.jl | 330 +++++++++++++++++++--- stdlib/LinearAlgebra/test/bidiag.jl | 85 ++++-- stdlib/LinearAlgebra/test/tridiag.jl | 71 +++++ 4 files changed, 422 insertions(+), 68 deletions(-) diff --git a/stdlib/LinearAlgebra/src/LinearAlgebra.jl b/stdlib/LinearAlgebra/src/LinearAlgebra.jl index 27d4255fb656b..17216845b350c 100644 --- a/stdlib/LinearAlgebra/src/LinearAlgebra.jl +++ b/stdlib/LinearAlgebra/src/LinearAlgebra.jl @@ -673,7 +673,9 @@ matprod_dest(A::Diagonal, B::Diagonal, TS) = _matprod_dest_diag(B, TS) _matprod_dest_diag(A, TS) = similar(A, TS) function _matprod_dest_diag(A::SymTridiagonal, TS) n = size(A, 1) - Tridiagonal(similar(A, TS, n-1), similar(A, TS, n), similar(A, TS, n-1)) + ev = similar(A, TS, max(0, n-1)) + dv = similar(A, TS, n) + Tridiagonal(ev, dv, similar(ev)) end # Special handling for adj/trans vec diff --git a/stdlib/LinearAlgebra/src/bidiag.jl b/stdlib/LinearAlgebra/src/bidiag.jl index d86bad7e41435..8bc5b1c47f366 100644 --- a/stdlib/LinearAlgebra/src/bidiag.jl +++ b/stdlib/LinearAlgebra/src/bidiag.jl @@ -557,7 +557,8 @@ end # function to get the internally stored vectors for Bidiagonal and [Sym]Tridiagonal # to avoid allocations in _mul! below (#24324, #24578) _diag(A::Tridiagonal, k) = k == -1 ? A.dl : k == 0 ? A.d : A.du -_diag(A::SymTridiagonal, k) = k == 0 ? A.dv : A.ev +_diag(A::SymTridiagonal{<:Number}, k) = k == 0 ? A.dv : A.ev +_diag(A::SymTridiagonal, k) = k == 0 ? view(A, diagind(A, IndexStyle(A))) : view(A, diagind(A, 1, IndexStyle(A))) function _diag(A::Bidiagonal, k) if k == 0 return A.dv @@ -577,12 +578,45 @@ function _bibimul!(C, A, B, _add) check_A_mul_B!_sizes(size(C), size(A), size(B)) n = size(A,1) iszero(n) && return C - n <= 3 && return mul!(C, Array(A), Array(B), _add.alpha, _add.beta) + if n <= 3 + # naive multiplication + for I in CartesianIndices(C) + _modify!(_add, sum(A[I[1], k] * B[k, I[2]] for k in axes(A,2)), C, I) + end + return C + end # We use `_rmul_or_fill!` instead of `_modify!` here since using # `_modify!` in the following loop will not update the # off-diagonal elements for non-zero beta. _rmul_or_fill!(C, _add.beta) iszero(_add.alpha) && return C + @inbounds begin + # first column of C + C[1,1] += _add(A[1,1]*B[1,1] + A[1, 2]*B[2,1]) + C[2,1] += _add(A[2,1]*B[1,1] + A[2,2]*B[2,1]) + C[3,1] += _add(A[3,2]*B[2,1]) + # second column of C + C[1,2] += _add(A[1,1]*B[1,2] + A[1,2]*B[2,2]) + C[2,2] += _add(A[2,1]*B[1,2] + A[2,2]*B[2,2] + A[2,3]*B[3,2]) + C[3,2] += _add(A[3,2]*B[2,2] + A[3,3]*B[3,2]) + C[4,2] += _add(A[4,3]*B[3,2]) + end # inbounds + # middle columns + __bibimul!(C, A, B, _add) + @inbounds begin + C[n-3,n-1] += _add(A[n-3,n-2]*B[n-2,n-1]) + C[n-2,n-1] += _add(A[n-2,n-2]*B[n-2,n-1] + A[n-2,n-1]*B[n-1,n-1]) + C[n-1,n-1] += _add(A[n-1,n-2]*B[n-2,n-1] + A[n-1,n-1]*B[n-1,n-1] + A[n-1,n]*B[n,n-1]) + C[n, n-1] += _add(A[n,n-1]*B[n-1,n-1] + A[n,n]*B[n,n-1]) + # last column of C + C[n-2, n] += _add(A[n-2,n-1]*B[n-1,n]) + C[n-1, n] += _add(A[n-1,n-1]*B[n-1,n ] + A[n-1,n]*B[n,n ]) + C[n, n] += _add(A[n,n-1]*B[n-1,n ] + A[n,n]*B[n,n ]) + end # inbounds + C +end +function __bibimul!(C, A, B, _add) + n = size(A,1) Al = _diag(A, -1) Ad = _diag(A, 0) Au = _diag(A, 1) @@ -590,44 +624,198 @@ function _bibimul!(C, A, B, _add) Bd = _diag(B, 0) Bu = _diag(B, 1) @inbounds begin - # first row of C - C[1,1] += _add(A[1,1]*B[1,1] + A[1, 2]*B[2, 1]) - C[1,2] += _add(A[1,1]*B[1,2] + A[1,2]*B[2,2]) - C[1,3] += _add(A[1,2]*B[2,3]) - # second row of C - C[2,1] += _add(A[2,1]*B[1,1] + A[2,2]*B[2,1]) - C[2,2] += _add(A[2,1]*B[1,2] + A[2,2]*B[2,2] + A[2,3]*B[3,2]) - C[2,3] += _add(A[2,2]*B[2,3] + A[2,3]*B[3,3]) - C[2,4] += _add(A[2,3]*B[3,4]) for j in 3:n-2 - Ajj₋1 = Al[j-1] - Ajj = Ad[j] + Aj₋2j₋1 = Au[j-2] + Aj₋1j = Au[j-1] Ajj₊1 = Au[j] - Bj₋1j₋2 = Bl[j-2] - Bj₋1j₋1 = Bd[j-1] + Aj₋1j₋1 = Ad[j-1] + Ajj = Ad[j] + Aj₊1j₊1 = Ad[j+1] + Ajj₋1 = Al[j-1] + Aj₊1j = Al[j] + Aj₊2j₊1 = Al[j+1] Bj₋1j = Bu[j-1] - Bjj₋1 = Bl[j-1] Bjj = Bd[j] - Bjj₊1 = Bu[j] Bj₊1j = Bl[j] - Bj₊1j₊1 = Bd[j+1] - Bj₊1j₊2 = Bu[j+1] - C[j,j-2] += _add( Ajj₋1*Bj₋1j₋2) - C[j, j-1] += _add(Ajj₋1*Bj₋1j₋1 + Ajj*Bjj₋1) - C[j, j ] += _add(Ajj₋1*Bj₋1j + Ajj*Bjj + Ajj₊1*Bj₊1j) - C[j, j+1] += _add(Ajj *Bjj₊1 + Ajj₊1*Bj₊1j₊1) - C[j, j+2] += _add(Ajj₊1*Bj₊1j₊2) + + C[j-2, j] += _add(Aj₋2j₋1*Bj₋1j) + C[j-1, j] += _add(Aj₋1j₋1*Bj₋1j + Aj₋1j*Bjj) + C[j, j] += _add(Ajj₋1*Bj₋1j + Ajj*Bjj + Ajj₊1*Bj₊1j) + C[j+1, j] += _add(Aj₊1j*Bjj + Aj₊1j₊1*Bj₊1j) + C[j+2, j] += _add(Aj₊2j₊1*Bj₊1j) end - # row before last of C - C[n-1,n-3] += _add(A[n-1,n-2]*B[n-2,n-3]) - C[n-1,n-2] += _add(A[n-1,n-1]*B[n-1,n-2] + A[n-1,n-2]*B[n-2,n-2]) - C[n-1,n-1] += _add(A[n-1,n-2]*B[n-2,n-1] + A[n-1,n-1]*B[n-1,n-1] + A[n-1,n]*B[n,n-1]) - C[n-1,n ] += _add(A[n-1,n-1]*B[n-1,n ] + A[n-1, n]*B[n ,n ]) - # last row of C - C[n,n-2] += _add(A[n,n-1]*B[n-1,n-2]) - C[n,n-1] += _add(A[n,n-1]*B[n-1,n-1] + A[n,n]*B[n,n-1]) - C[n,n ] += _add(A[n,n-1]*B[n-1,n ] + A[n,n]*B[n,n ]) - end # inbounds + end + C +end +function __bibimul!(C, A, B::Bidiagonal, _add) + n = size(A,1) + Al = _diag(A, -1) + Ad = _diag(A, 0) + Au = _diag(A, 1) + Bd = _diag(B, 0) + if B.uplo == 'U' + Bu = _diag(B, 1) + @inbounds begin + for j in 3:n-2 + Aj₋2j₋1 = Au[j-2] + Aj₋1j = Au[j-1] + Aj₋1j₋1 = Ad[j-1] + Ajj = Ad[j] + Ajj₋1 = Al[j-1] + Aj₊1j = Al[j] + Bj₋1j = Bu[j-1] + Bjj = Bd[j] + + C[j-2, j] += _add(Aj₋2j₋1*Bj₋1j) + C[j-1, j] += _add(Aj₋1j₋1*Bj₋1j + Aj₋1j*Bjj) + C[j, j] += _add(Ajj₋1*Bj₋1j + Ajj*Bjj) + C[j+1, j] += _add(Aj₊1j*Bjj) + end + end + else # B.uplo == 'L' + Bl = _diag(B, -1) + @inbounds begin + for j in 3:n-2 + Aj₋1j = Au[j-1] + Ajj₊1 = Au[j] + Ajj = Ad[j] + Aj₊1j₊1 = Ad[j+1] + Aj₊1j = Al[j] + Aj₊2j₊1 = Al[j+1] + Bjj = Bd[j] + Bj₊1j = Bl[j] + + C[j-1, j] += _add(Aj₋1j*Bjj) + C[j, j] += _add(Ajj*Bjj + Ajj₊1*Bj₊1j) + C[j+1, j] += _add(Aj₊1j*Bjj + Aj₊1j₊1*Bj₊1j) + C[j+2, j] += _add(Aj₊2j₊1*Bj₊1j) + end + end + end + C +end +function __bibimul!(C, A::Bidiagonal, B, _add) + n = size(A,1) + Bl = _diag(B, -1) + Bd = _diag(B, 0) + Bu = _diag(B, 1) + Ad = _diag(A, 0) + if A.uplo == 'U' + Au = _diag(A, 1) + @inbounds begin + for j in 3:n-2 + Aj₋2j₋1 = Au[j-2] + Aj₋1j = Au[j-1] + Ajj₊1 = Au[j] + Aj₋1j₋1 = Ad[j-1] + Ajj = Ad[j] + Aj₊1j₊1 = Ad[j+1] + Bj₋1j = Bu[j-1] + Bjj = Bd[j] + Bj₊1j = Bl[j] + + C[j-2, j] += _add(Aj₋2j₋1*Bj₋1j) + C[j-1, j] += _add(Aj₋1j₋1*Bj₋1j + Aj₋1j*Bjj) + C[j, j] += _add(Ajj*Bjj + Ajj₊1*Bj₊1j) + C[j+1, j] += _add(Aj₊1j₊1*Bj₊1j) + end + end + else # A.uplo == 'L' + Al = _diag(A, -1) + @inbounds begin + for j in 3:n-2 + Aj₋1j₋1 = Ad[j-1] + Ajj = Ad[j] + Aj₊1j₊1 = Ad[j+1] + Ajj₋1 = Al[j-1] + Aj₊1j = Al[j] + Aj₊2j₊1 = Al[j+1] + Bj₋1j = Bu[j-1] + Bjj = Bd[j] + Bj₊1j = Bl[j] + + C[j-1, j] += _add(Aj₋1j₋1*Bj₋1j) + C[j, j] += _add(Ajj₋1*Bj₋1j + Ajj*Bjj) + C[j+1, j] += _add(Aj₊1j*Bjj + Aj₊1j₊1*Bj₊1j) + C[j+2, j] += _add(Aj₊2j₊1*Bj₊1j) + end + end + end + C +end +function __bibimul!(C, A::Bidiagonal, B::Bidiagonal, _add) + n = size(A,1) + Ad = _diag(A, 0) + Bd = _diag(B, 0) + if A.uplo == 'U' && B.uplo == 'U' + Au = _diag(A, 1) + Bu = _diag(B, 1) + @inbounds begin + for j in 3:n-2 + Aj₋2j₋1 = Au[j-2] + Aj₋1j = Au[j-1] + Aj₋1j₋1 = Ad[j-1] + Ajj = Ad[j] + Bj₋1j = Bu[j-1] + Bjj = Bd[j] + + C[j-2, j] += _add(Aj₋2j₋1*Bj₋1j) + C[j-1, j] += _add(Aj₋1j₋1*Bj₋1j + Aj₋1j*Bjj) + C[j, j] += _add(Ajj*Bjj) + end + end + elseif A.uplo == 'U' && B.uplo == 'L' + Au = _diag(A, 1) + Bl = _diag(B, -1) + @inbounds begin + for j in 3:n-2 + Aj₋1j = Au[j-1] + Ajj₊1 = Au[j] + Ajj = Ad[j] + Aj₊1j₊1 = Ad[j+1] + Bjj = Bd[j] + Bj₊1j = Bl[j] + + C[j-1, j] += _add(Aj₋1j*Bjj) + C[j, j] += _add(Ajj*Bjj + Ajj₊1*Bj₊1j) + C[j+1, j] += _add(Aj₊1j₊1*Bj₊1j) + end + end + elseif A.uplo == 'L' && B.uplo == 'U' + Al = _diag(A, -1) + Bu = _diag(B, 1) + @inbounds begin + for j in 3:n-2 + Aj₋1j₋1 = Ad[j-1] + Ajj = Ad[j] + Ajj₋1 = Al[j-1] + Aj₊1j = Al[j] + Bj₋1j = Bu[j-1] + Bjj = Bd[j] + + C[j-1, j] += _add(Aj₋1j₋1*Bj₋1j) + C[j, j] += _add(Ajj₋1*Bj₋1j + Ajj*Bjj) + C[j+1, j] += _add(Aj₊1j*Bjj) + end + end + else # A.uplo == 'L' && B.uplo == 'L' + Al = _diag(A, -1) + Bl = _diag(B, -1) + @inbounds begin + for j in 3:n-2 + Ajj = Ad[j] + Aj₊1j₊1 = Ad[j+1] + Aj₊1j = Al[j] + Aj₊2j₊1 = Al[j+1] + Bjj = Bd[j] + Bj₊1j = Bl[j] + + C[j, j] += _add(Ajj*Bjj) + C[j+1, j] += _add(Aj₊1j*Bjj + Aj₊1j₊1*Bj₊1j) + C[j+2, j] += _add(Aj₊2j₊1*Bj₊1j) + end + end + end C end @@ -744,7 +932,52 @@ function _mul!(C::AbstractVecOrMat, A::BiTriSym, B::AbstractVecOrMat, _add::MulA nB = size(B,2) (iszero(nA) || iszero(nB)) && return C iszero(_add.alpha) && return _rmul_or_fill!(C, _add.beta) - nA <= 3 && return mul!(C, Array(A), Array(B), _add.alpha, _add.beta) + if nA <= 3 + # naive multiplication + for I in CartesianIndices(C) + col = Base.tail(Tuple(I)) + _modify!(_add, sum(A[I[1], k] * B[k, col...] for k in axes(A,2)), C, I) + end + return C + end + _mul_bitrisym!(C, A, B, _add) +end +function _mul_bitrisym!(C::AbstractVecOrMat, A::Bidiagonal, B::AbstractVecOrMat, _add::MulAddMul) + nA = size(A,1) + nB = size(B,2) + d = A.dv + if A.uplo == 'U' + u = A.ev + @inbounds begin + for j = 1:nB + b₀, b₊ = B[1, j], B[2, j] + _modify!(_add, d[1]*b₀ + u[1]*b₊, C, (1, j)) + for i = 2:nA - 1 + b₀, b₊ = b₊, B[i + 1, j] + _modify!(_add, d[i]*b₀ + u[i]*b₊, C, (i, j)) + end + _modify!(_add, d[nA]*b₊, C, (nA, j)) + end + end + else + l = A.ev + @inbounds begin + for j = 1:nB + b₀, b₊ = B[1, j], B[2, j] + _modify!(_add, d[1]*b₀, C, (1, j)) + for i = 2:nA - 1 + b₋, b₀, b₊ = b₀, b₊, B[i + 1, j] + _modify!(_add, l[i - 1]*b₋ + d[i]*b₀, C, (i, j)) + end + _modify!(_add, l[nA - 1]*b₀ + d[nA]*b₊, C, (nA, j)) + end + end + end + C +end +function _mul_bitrisym!(C::AbstractVecOrMat, A::TriSym, B::AbstractVecOrMat, _add::MulAddMul) + nA = size(A,1) + nB = size(B,2) l = _diag(A, -1) d = _diag(A, 0) u = _diag(A, 1) @@ -769,8 +1002,9 @@ function _mul!(C::AbstractMatrix, A::AbstractMatrix, B::TriSym, _add::MulAddMul) m = size(B,2) (iszero(m) || iszero(n)) && return C iszero(_add.alpha) && return _rmul_or_fill!(C, _add.beta) - if n <= 3 || m <= 1 - return mul!(C, Array(A), Array(B), _add.alpha, _add.beta) + if m == 1 + B11 = B[1,1] + return mul!(C, A, B11, _add.alpha, _add.beta) end Bl = _diag(B, -1) Bd = _diag(B, 0) @@ -804,21 +1038,18 @@ function _mul!(C::AbstractMatrix, A::AbstractMatrix, B::Bidiagonal, _add::MulAdd m, n = size(A) (iszero(m) || iszero(n)) && return C iszero(_add.alpha) && return _rmul_or_fill!(C, _add.beta) - if size(A, 1) <= 3 || size(B, 2) <= 1 - return mul!(C, Array(A), Array(B), _add.alpha, _add.beta) - end @inbounds if B.uplo == 'U' + for j in n:-1:2, i in 1:m + _modify!(_add, A[i,j] * B.dv[j] + A[i,j-1] * B.ev[j-1], C, (i, j)) + end for i in 1:m - for j in n:-1:2 - _modify!(_add, A[i,j] * B.dv[j] + A[i,j-1] * B.ev[j-1], C, (i, j)) - end _modify!(_add, A[i,1] * B.dv[1], C, (i, 1)) end else # uplo == 'L' + for j in 1:n-1, i in 1:m + _modify!(_add, A[i,j] * B.dv[j] + A[i,j+1] * B.ev[j], C, (i, j)) + end for i in 1:m - for j in 1:n-1 - _modify!(_add, A[i,j] * B.dv[j] + A[i,j+1] * B.ev[j], C, (i, j)) - end _modify!(_add, A[i,n] * B.dv[n], C, (i, n)) end end @@ -834,7 +1065,12 @@ function _dibimul!(C, A, B, _add) check_A_mul_B!_sizes(size(C), size(A), size(B)) n = size(A,1) iszero(n) && return C - n <= 3 && return mul!(C, Array(A), Array(B), _add.alpha, _add.beta) + if n <= 3 + for I in CartesianIndices(C) + _modify!(_add, A.diag[I[1]] * B[I[1], I[2]], C, I) + end + return C + end _rmul_or_fill!(C, _add.beta) # see the same use above iszero(_add.alpha) && return C Ad = A.diag diff --git a/stdlib/LinearAlgebra/test/bidiag.jl b/stdlib/LinearAlgebra/test/bidiag.jl index ef50658a642fb..58c228e39e226 100644 --- a/stdlib/LinearAlgebra/test/bidiag.jl +++ b/stdlib/LinearAlgebra/test/bidiag.jl @@ -1026,26 +1026,71 @@ end @test_throws "cannot set entry" B[1,2] = 4 end -@testset "mul with empty arrays" begin - A = zeros(5,0) - B = Bidiagonal(zeros(0), zeros(0), :U) - BL = Bidiagonal(zeros(5), zeros(4), :U) - @test size(A * B) == size(A) - @test size(BL * A) == size(A) - @test size(B * B) == size(B) - C = similar(A) - @test mul!(C, A, B) == A * B - @test mul!(C, BL, A) == BL * A - @test mul!(similar(B), B, B) == B * B - @test mul!(similar(B, size(B)), B, B) == B * B - - v = zeros(size(B,2)) - @test size(B * v) == size(v) - @test mul!(similar(v), B, v) == B * v - - D = Diagonal(zeros(size(B,2))) - @test size(B * D) == size(D * B) == size(D) - @test mul!(similar(D), B, D) == mul!(similar(D), D, B) == B * D +@testset "mul for small matrices" begin + @testset for n in 0:6 + D = Diagonal(rand(n)) + v = rand(n) + @testset for uplo in (:L, :U) + B = Bidiagonal(rand(n), rand(max(n-1,0)), uplo) + M = Matrix(B) + + @test B * v ≈ M * v + @test mul!(similar(v), B, v) ≈ M * v + @test mul!(ones(size(v)), B, v, 2, 3) ≈ M * v * 2 .+ 3 + + @test B * B ≈ M * M + @test mul!(similar(B, size(B)), B, B) ≈ M * M + @test mul!(ones(size(B)), B, B, 2, 4) ≈ M * M * 2 .+ 4 + + for m in 0:6 + AL = rand(m,n) + AR = rand(n,m) + @test AL * B ≈ AL * M + @test B * AR ≈ M * AR + @test mul!(similar(AL), AL, B) ≈ AL * M + @test mul!(similar(AR), B, AR) ≈ M * AR + @test mul!(ones(size(AL)), AL, B, 2, 4) ≈ AL * M * 2 .+ 4 + @test mul!(ones(size(AR)), B, AR, 2, 4) ≈ M * AR * 2 .+ 4 + end + + @test B * D ≈ M * D + @test D * B ≈ D * M + @test mul!(similar(B), B, D) ≈ M * D + @test mul!(similar(B), B, D) ≈ M * D + @test mul!(similar(B, size(B)), D, B) ≈ D * M + @test mul!(similar(B, size(B)), B, D) ≈ M * D + @test mul!(ones(size(B)), D, B, 2, 4) ≈ D * M * 2 .+ 4 + @test mul!(ones(size(B)), B, D, 2, 4) ≈ M * D * 2 .+ 4 + end + BL = Bidiagonal(rand(n), rand(max(0, n-1)), :L) + ML = Matrix(BL) + BU = Bidiagonal(rand(n), rand(max(0, n-1)), :U) + MU = Matrix(BU) + T = Tridiagonal(zeros(max(0, n-1)), zeros(n), zeros(max(0, n-1))) + @test mul!(T, BL, BU) ≈ ML * MU + @test mul!(T, BU, BL) ≈ MU * ML + T = Tridiagonal(ones(max(0, n-1)), ones(n), ones(max(0, n-1))) + @test mul!(copy(T), BL, BU, 2, 3) ≈ ML * MU * 2 + T * 3 + @test mul!(copy(T), BU, BL, 2, 3) ≈ MU * ML * 2 + T * 3 + end + + n = 4 + arr = SizedArrays.SizedArray{(2,2)}(reshape([1:4;],2,2)) + for B in ( + Bidiagonal(fill(arr,n), fill(arr,n-1), :L), + Bidiagonal(fill(arr,n), fill(arr,n-1), :U), + ) + @test B * B ≈ Matrix(B) * Matrix(B) + BL = Bidiagonal(fill(arr,n), fill(arr,n-1), :L) + BU = Bidiagonal(fill(arr,n), fill(arr,n-1), :U) + @test BL * B ≈ Matrix(BL) * Matrix(B) + @test BU * B ≈ Matrix(BU) * Matrix(B) + @test B * BL ≈ Matrix(B) * Matrix(BL) + @test B * BU ≈ Matrix(B) * Matrix(BU) + D = Diagonal(fill(arr,n)) + @test D * B ≈ Matrix(D) * Matrix(B) + @test B * D ≈ Matrix(B) * Matrix(D) + end end end # module TestBidiagonal diff --git a/stdlib/LinearAlgebra/test/tridiag.jl b/stdlib/LinearAlgebra/test/tridiag.jl index 3330fa682fe5e..15ac7f9f2147f 100644 --- a/stdlib/LinearAlgebra/test/tridiag.jl +++ b/stdlib/LinearAlgebra/test/tridiag.jl @@ -970,4 +970,75 @@ end @test sprint(show, S) == "SymTridiagonal($(repr(diag(S))), $(repr(diag(S,1))))" end +@testset "mul for small matrices" begin + @testset for n in 0:6 + for T in ( + Tridiagonal(rand(max(n-1,0)), rand(n), rand(max(n-1,0))), + SymTridiagonal(rand(n), rand(max(n-1,0))), + ) + M = Matrix(T) + @test T * T ≈ M * M + @test mul!(similar(T, size(T)), T, T) ≈ M * M + @test mul!(ones(size(T)), T, T, 2, 4) ≈ M * M * 2 .+ 4 + + for m in 0:6 + AR = rand(n,m) + AL = rand(m,n) + @test AL * T ≈ AL * M + @test T * AR ≈ M * AR + @test mul!(similar(AL), AL, T) ≈ AL * M + @test mul!(similar(AR), T, AR) ≈ M * AR + @test mul!(ones(size(AL)), AL, T, 2, 4) ≈ AL * M * 2 .+ 4 + @test mul!(ones(size(AR)), T, AR, 2, 4) ≈ M * AR * 2 .+ 4 + end + + v = rand(n) + @test T * v ≈ M * v + @test mul!(similar(v), T, v) ≈ M * v + + D = Diagonal(rand(n)) + @test T * D ≈ M * D + @test D * T ≈ D * M + @test mul!(Tridiagonal(similar(T)), D, T) ≈ D * M + @test mul!(Tridiagonal(similar(T)), T, D) ≈ M * D + @test mul!(similar(T, size(T)), D, T) ≈ D * M + @test mul!(similar(T, size(T)), T, D) ≈ M * D + @test mul!(ones(size(T)), D, T, 2, 4) ≈ D * M * 2 .+ 4 + @test mul!(ones(size(T)), T, D, 2, 4) ≈ M * D * 2 .+ 4 + + for uplo in (:U, :L) + B = Bidiagonal(rand(n), rand(max(0, n-1)), uplo) + @test T * B ≈ M * B + @test B * T ≈ B * M + if n <= 2 + @test mul!(Tridiagonal(similar(T)), B, T) ≈ B * M + @test mul!(Tridiagonal(similar(T)), T, B) ≈ M * B + end + @test mul!(similar(T, size(T)), B, T) ≈ B * M + @test mul!(similar(T, size(T)), T, B) ≈ M * B + @test mul!(ones(size(T)), B, T, 2, 4) ≈ B * M * 2 .+ 4 + @test mul!(ones(size(T)), T, B, 2, 4) ≈ M * B * 2 .+ 4 + end + end + end + + n = 4 + arr = SizedArrays.SizedArray{(2,2)}(reshape([1:4;],2,2)) + for T in ( + SymTridiagonal(fill(arr,n), fill(arr,n-1)), + Tridiagonal(fill(arr,n-1), fill(arr,n), fill(arr,n-1)), + ) + @test T * T ≈ Matrix(T) * Matrix(T) + BL = Bidiagonal(fill(arr,n), fill(arr,n-1), :L) + BU = Bidiagonal(fill(arr,n), fill(arr,n-1), :U) + @test BL * T ≈ Matrix(BL) * Matrix(T) + @test BU * T ≈ Matrix(BU) * Matrix(T) + @test T * BL ≈ Matrix(T) * Matrix(BL) + @test T * BU ≈ Matrix(T) * Matrix(BU) + D = Diagonal(fill(arr,n)) + @test D * T ≈ Matrix(D) * Matrix(T) + @test T * D ≈ Matrix(T) * Matrix(D) + end +end + end # module TestTridiagonal From d2807927656efe2bfbfe4402f83e2ea417306db2 Mon Sep 17 00:00:00 2001 From: Ian Butterworth Date: Tue, 10 Sep 2024 07:33:56 -0400 Subject: [PATCH 004/537] Fix `@time_imports` extension recognition (#55718) --- base/loading.jl | 41 ++++++++++++++++++++++++++++++----------- 1 file changed, 30 insertions(+), 11 deletions(-) diff --git a/base/loading.jl b/base/loading.jl index fe86a8c198461..4e70d2bc257ea 100644 --- a/base/loading.jl +++ b/base/loading.jl @@ -1203,7 +1203,7 @@ const TIMING_IMPORTS = Threads.Atomic{Int}(0) # these return either the array of modules loaded from the path / content given # or an Exception that describes why it couldn't be loaded # and it reconnects the Base.Docs.META -function _include_from_serialized(pkg::PkgId, path::String, ocachepath::Union{Nothing, String}, depmods::Vector{Any}, ignore_native::Union{Nothing,Bool}=nothing) +function _include_from_serialized(pkg::PkgId, path::String, ocachepath::Union{Nothing, String}, depmods::Vector{Any}, ignore_native::Union{Nothing,Bool}=nothing; register::Bool=true) if isnothing(ignore_native) if JLOptions().code_coverage == 0 && JLOptions().malloc_log == 0 ignore_native = false @@ -1252,13 +1252,14 @@ function _include_from_serialized(pkg::PkgId, path::String, ocachepath::Union{No for M in restored M = M::Module if parentmodule(M) === M && PkgId(M) == pkg + register && register_root_module(M) if timing_imports elapsed = round((time_ns() - t_before) / 1e6, digits = 1) comp_time, recomp_time = cumulative_compile_time_ns() .- t_comp_before print(lpad(elapsed, 9), " ms ") - parentid = get(EXT_PRIMED, pkg, nothing) - if parentid !== nothing - print(parentid.name, " → ") + ext_parent = extension_parent_name(M) + if ext_parent !== nothing + print(ext_parent::String, " → ") end print(pkg.name) if comp_time > 0 @@ -1280,6 +1281,27 @@ function _include_from_serialized(pkg::PkgId, path::String, ocachepath::Union{No end end +# if M is an extension, return the string name of the parent. Otherwise return nothing +function extension_parent_name(M::Module) + rootmodule = moduleroot(M) + src_path = pathof(rootmodule) + src_path === nothing && return nothing + pkgdir_parts = splitpath(src_path) + ext_pos = findlast(==("ext"), pkgdir_parts) + if ext_pos !== nothing && ext_pos >= length(pkgdir_parts) - 2 + parent_package_root = joinpath(pkgdir_parts[1:ext_pos-1]...) + parent_package_project_file = locate_project_file(parent_package_root) + if parent_package_project_file isa String + d = parsed_toml(parent_package_project_file) + name = get(d, "name", nothing) + if name !== nothing + return name + end + end + end + return nothing +end + function register_restored_modules(sv::SimpleVector, pkg::PkgId, path::String) # This function is also used by PkgCacheInspector.jl restored = sv[1]::Vector{Any} @@ -1461,7 +1483,7 @@ function _insert_extension_triggers(parent::PkgId, extensions::Dict{String, Any} triggers = triggers::Union{String, Vector{String}} triggers isa String && (triggers = [triggers]) id = PkgId(uuid5(parent.uuid::UUID, ext), ext) - if id in keys(EXT_PRIMED) || haskey(Base.loaded_modules, id) + if haskey(EXT_PRIMED, id) || haskey(Base.loaded_modules, id) continue # extension is already primed or loaded, don't add it again end EXT_PRIMED[id] = parent @@ -1890,8 +1912,7 @@ function _tryrequire_from_serialized(pkg::PkgId, path::String, ocachepath::Union depmods[i] = dep end # then load the file - loaded = _include_from_serialized(pkg, path, ocachepath, depmods, ignore_native) - loaded isa Module && register_root_module(loaded) + loaded = _include_from_serialized(pkg, path, ocachepath, depmods, ignore_native; register = true) return loaded end @@ -1958,8 +1979,7 @@ end if dep === nothing try set_pkgorigin_version_path(modkey, modpath) - dep = _include_from_serialized(modkey, modcachepath, modocachepath, modstaledeps) - dep isa Module && stalecheck && register_root_module(dep) + dep = _include_from_serialized(modkey, modcachepath, modocachepath, modstaledeps; register = stalecheck) finally end_loading(modkey, dep) end @@ -1975,9 +1995,8 @@ end end restored = get(loaded_precompiles, pkg => newbuild_id, nothing) if !isa(restored, Module) - restored = _include_from_serialized(pkg, path_to_try, ocachefile, staledeps) + restored = _include_from_serialized(pkg, path_to_try, ocachefile, staledeps; register = stalecheck) end - isa(restored, Module) && stalecheck && register_root_module(restored) isa(restored, Module) && return restored @debug "Deserialization checks failed while attempting to load cache from $path_to_try" exception=restored @label check_next_path From 3653b3898647d4c2528afde1f54bd3e65e3aa8ee Mon Sep 17 00:00:00 2001 From: Jameson Nash Date: Tue, 10 Sep 2024 12:51:55 -0400 Subject: [PATCH 005/537] drop typed GEP calls (#55708) Now that we use LLVM 18, and almost have LLVM 19 support, do cleanup to remove LLVM 15/16 type pointer support. LLVM now slightly prefers that we rewrite our complex GEP to use a simple emit_ptrgep call instead, which is also much simpler for julia to emit also. --- src/ccall.cpp | 11 +- src/cgutils.cpp | 126 ++++++-------------- src/codegen.cpp | 123 ++++++++----------- src/intrinsics.cpp | 4 +- src/llvm-alloc-opt.cpp | 21 +--- src/llvm-codegen-shared.h | 45 +++---- src/llvm-late-gc-lowering.cpp | 72 +---------- src/llvm-ptls.cpp | 4 +- test/llvmpasses/alloc-opt-gcframe.ll | 2 +- test/llvmpasses/late-lower-gc-addrspaces.ll | 56 ++++----- test/llvmpasses/late-lower-gc.ll | 70 +++++------ test/llvmpasses/names.jl | 3 +- 12 files changed, 187 insertions(+), 350 deletions(-) diff --git a/src/ccall.cpp b/src/ccall.cpp index 7ab8cfa974d6f..eac130ea43189 100644 --- a/src/ccall.cpp +++ b/src/ccall.cpp @@ -1854,8 +1854,8 @@ static jl_cgval_t emit_ccall(jl_codectx_t &ctx, jl_value_t **args, size_t nargs) ctx.builder.SetInsertPoint(checkBB); auto signal_page_load = ctx.builder.CreateLoad( ctx.types().T_size, - ctx.builder.CreateConstInBoundsGEP1_32(ctx.types().T_size, - get_current_signal_page_from_ptls(ctx.builder, ctx.types().T_size, get_current_ptls(ctx), ctx.tbaa().tbaa_const), -1), + emit_ptrgep(ctx, get_current_signal_page_from_ptls(ctx.builder, get_current_ptls(ctx), ctx.tbaa().tbaa_const), + -sizeof(size_t)), true); setName(ctx.emission_context, signal_page_load, "signal_page_load"); ctx.builder.CreateBr(contBB); @@ -1870,8 +1870,7 @@ static jl_cgval_t emit_ccall(jl_codectx_t &ctx, jl_value_t **args, size_t nargs) auto obj = emit_pointer_from_objref(ctx, boxed(ctx, argv[0])); // T_pprjlvalue // The inbounds gep makes it more clear to LLVM that the resulting value is not // a null pointer. - auto strp = ctx.builder.CreateConstInBoundsGEP1_32(ctx.types().T_prjlvalue, obj, 1); - setName(ctx.emission_context, strp, "string_ptr"); + auto strp = emit_ptrgep(ctx, obj, ctx.types().sizeof_ptr, "string_ptr"); JL_GC_POP(); return mark_or_box_ccall_result(ctx, strp, retboxed, rt, unionall, static_rt); } @@ -1882,9 +1881,7 @@ static jl_cgval_t emit_ccall(jl_codectx_t &ctx, jl_value_t **args, size_t nargs) auto obj = emit_pointer_from_objref(ctx, boxed(ctx, argv[0])); // T_pprjlvalue // The inbounds gep makes it more clear to LLVM that the resulting value is not // a null pointer. - auto strp = ctx.builder.CreateConstInBoundsGEP1_32( - ctx.types().T_prjlvalue, obj, (sizeof(jl_sym_t) + sizeof(void*) - 1) / sizeof(void*)); - setName(ctx.emission_context, strp, "symbol_name"); + auto strp = emit_ptrgep(ctx, obj, sizeof(jl_sym_t), "symbol_name"); JL_GC_POP(); return mark_or_box_ccall_result(ctx, strp, retboxed, rt, unionall, static_rt); } diff --git a/src/cgutils.cpp b/src/cgutils.cpp index bec84d9901279..2a234f399f5c1 100644 --- a/src/cgutils.cpp +++ b/src/cgutils.cpp @@ -130,14 +130,8 @@ static Value *stringConstPtr( } // Doesn't need to be aligned, we shouldn't operate on these like julia objects GlobalVariable *gv = get_pointer_to_constant(emission_context, Data, Align(1), "_j_str_" + StringRef(ctxt.data(), ctxt.size()), *M); - Value *zero = ConstantInt::get(Type::getInt32Ty(irbuilder.getContext()), 0); - Value *Args[] = { zero, zero }; - auto gep = irbuilder.CreateInBoundsGEP(gv->getValueType(), - // AddrSpaceCast in case globals are in non-0 AS - irbuilder.CreateAddrSpaceCast(gv, gv->getValueType()->getPointerTo(0)), - Args); - setName(emission_context, gep, "string_const_ptr"); - return gep; + // AddrSpaceCast in case globals are in non-0 AS + return irbuilder.CreateAddrSpaceCast(gv, gv->getValueType()->getPointerTo(0)); } @@ -621,12 +615,6 @@ static unsigned convert_struct_offset(jl_codectx_t &ctx, Type *lty, unsigned byt return convert_struct_offset(ctx.builder.GetInsertBlock()->getModule()->getDataLayout(), lty, byte_offset); } -static Value *emit_struct_gep(jl_codectx_t &ctx, Type *lty, Value *base, unsigned byte_offset) -{ - unsigned idx = convert_struct_offset(ctx, lty, byte_offset); - return ctx.builder.CreateConstInBoundsGEP2_32(lty, base, 0, idx); -} - static Type *_julia_struct_to_llvm(jl_codegen_params_t *ctx, LLVMContext &ctxt, jl_value_t *jt, bool *isboxed, bool llvmcall=false); static Type *_julia_type_to_llvm(jl_codegen_params_t *ctx, LLVMContext &ctxt, jl_value_t *jt, bool *isboxed) @@ -1200,10 +1188,10 @@ static Value *emit_typeof(jl_codectx_t &ctx, const jl_cgval_t &p, bool maybenull static Value *emit_datatype_types(jl_codectx_t &ctx, Value *dt) { Value *Ptr = decay_derived(ctx, dt); - Value *Idx = ConstantInt::get(ctx.types().T_size, offsetof(jl_datatype_t, types) / sizeof(void*)); + unsigned Idx = offsetof(jl_datatype_t, types); jl_aliasinfo_t ai = jl_aliasinfo_t::fromTBAA(ctx, ctx.tbaa().tbaa_const); auto types = ai.decorateInst(ctx.builder.CreateAlignedLoad( - ctx.types().T_pjlvalue, ctx.builder.CreateInBoundsGEP(ctx.types().T_pjlvalue, Ptr, Idx), Align(sizeof(void*)))); + ctx.types().T_pjlvalue, emit_ptrgep(ctx, Ptr, Idx), Align(sizeof(void*)))); setName(ctx.emission_context, types, "datatype_types"); return types; } @@ -1222,16 +1210,13 @@ static Value *emit_datatype_size(jl_codectx_t &ctx, Value *dt, bool add_isunion= { jl_aliasinfo_t ai = jl_aliasinfo_t::fromTBAA(ctx, ctx.tbaa().tbaa_const); Value *Ptr = decay_derived(ctx, dt); - Value *Idx = ConstantInt::get(ctx.types().T_size, offsetof(jl_datatype_t, layout) / sizeof(int32_t*)); - Ptr = ctx.builder.CreateInBoundsGEP(getPointerTy(ctx.builder.getContext()), Ptr, Idx); + Ptr = emit_ptrgep(ctx, Ptr, offsetof(jl_datatype_t, layout)); Ptr = ai.decorateInst(ctx.builder.CreateAlignedLoad(getPointerTy(ctx.builder.getContext()), Ptr, Align(sizeof(int32_t*)))); - Idx = ConstantInt::get(ctx.types().T_size, offsetof(jl_datatype_layout_t, size) / sizeof(int32_t)); - Value *SizePtr = ctx.builder.CreateInBoundsGEP(getInt32Ty(ctx.builder.getContext()), Ptr, Idx); + Value *SizePtr = emit_ptrgep(ctx, Ptr, offsetof(jl_datatype_layout_t, size)); Value *Size = ai.decorateInst(ctx.builder.CreateAlignedLoad(getInt32Ty(ctx.builder.getContext()), SizePtr, Align(sizeof(int32_t)))); setName(ctx.emission_context, Size, "datatype_size"); if (add_isunion) { - Idx = ConstantInt::get(ctx.types().T_size, offsetof(jl_datatype_layout_t, flags) / sizeof(int8_t)); - Value *FlagPtr = ctx.builder.CreateInBoundsGEP(getInt8Ty(ctx.builder.getContext()), Ptr, Idx); + Value *FlagPtr = emit_ptrgep(ctx, Ptr, offsetof(jl_datatype_layout_t, flags)); Value *Flag = ai.decorateInst(ctx.builder.CreateAlignedLoad(getInt16Ty(ctx.builder.getContext()), FlagPtr, Align(sizeof(int16_t)))); Flag = ctx.builder.CreateLShr(Flag, 4); Flag = ctx.builder.CreateAnd(Flag, ConstantInt::get(Flag->getType(), 1)); @@ -1308,7 +1293,7 @@ static Value *emit_datatype_mutabl(jl_codectx_t &ctx, Value *dt) static Value *emit_datatype_isprimitivetype(jl_codectx_t &ctx, Value *typ) { Value *isprimitive; - isprimitive = ctx.builder.CreateConstInBoundsGEP1_32(getInt8Ty(ctx.builder.getContext()), decay_derived(ctx, typ), offsetof(jl_datatype_t, hash) + sizeof(((jl_datatype_t*)nullptr)->hash)); + isprimitive = emit_ptrgep(ctx, decay_derived(ctx, typ), offsetof(jl_datatype_t, hash) + sizeof(((jl_datatype_t*)nullptr)->hash)); jl_aliasinfo_t ai = jl_aliasinfo_t::fromTBAA(ctx, ctx.tbaa().tbaa_const); isprimitive = ai.decorateInst(ctx.builder.CreateAlignedLoad(getInt8Ty(ctx.builder.getContext()), isprimitive, Align(1))); isprimitive = ctx.builder.CreateLShr(isprimitive, 7); @@ -1320,10 +1305,7 @@ static Value *emit_datatype_isprimitivetype(jl_codectx_t &ctx, Value *typ) static Value *emit_datatype_name(jl_codectx_t &ctx, Value *dt) { unsigned n = offsetof(jl_datatype_t, name) / sizeof(char*); - Value *vptr = ctx.builder.CreateInBoundsGEP( - ctx.types().T_pjlvalue, - maybe_decay_tracked(ctx, dt), - ConstantInt::get(ctx.types().T_size, n)); + Value *vptr = emit_ptrgep(ctx, maybe_decay_tracked(ctx, dt), n * sizeof(jl_value_t*)); jl_aliasinfo_t ai = jl_aliasinfo_t::fromTBAA(ctx, ctx.tbaa().tbaa_const); auto name = ai.decorateInst(ctx.builder.CreateAlignedLoad(ctx.types().T_pjlvalue, vptr, Align(sizeof(void*)))); setName(ctx.emission_context, name, "datatype_name"); @@ -1522,7 +1504,7 @@ static Value *emit_typeof(jl_codectx_t &ctx, Value *v, bool maybenull, bool just // we lied a bit: this wasn't really an object (though it was valid for GC rooting) // and we need to use it as an index to get the real object now Module *M = jl_Module; - Value *smallp = ctx.builder.CreateInBoundsGEP(getInt8Ty(ctx.builder.getContext()), prepare_global_in(M, jl_small_typeof_var), tag); + Value *smallp = emit_ptrgep(ctx, prepare_global_in(M, jl_small_typeof_var), tag); jl_aliasinfo_t ai = jl_aliasinfo_t::fromTBAA(ctx, ctx.tbaa().tbaa_const); auto small = ctx.builder.CreateAlignedLoad(typetag->getType(), smallp, M->getDataLayout().getPointerABIAlignment(0)); small->setMetadata(LLVMContext::MD_nonnull, MDNode::get(M->getContext(), None)); @@ -1802,7 +1784,7 @@ static void emit_typecheck(jl_codectx_t &ctx, const jl_cgval_t &x, jl_value_t *t static Value *emit_isconcrete(jl_codectx_t &ctx, Value *typ) { Value *isconcrete; - isconcrete = ctx.builder.CreateConstInBoundsGEP1_32(getInt8Ty(ctx.builder.getContext()), decay_derived(ctx, typ), offsetof(jl_datatype_t, hash) + sizeof(((jl_datatype_t*)nullptr)->hash)); + isconcrete = emit_ptrgep(ctx, decay_derived(ctx, typ), offsetof(jl_datatype_t, hash) + sizeof(((jl_datatype_t*)nullptr)->hash)); jl_aliasinfo_t ai = jl_aliasinfo_t::fromTBAA(ctx, ctx.tbaa().tbaa_const); isconcrete = ai.decorateInst(ctx.builder.CreateAlignedLoad(getInt8Ty(ctx.builder.getContext()), isconcrete, Align(1))); isconcrete = ctx.builder.CreateLShr(isconcrete, 1); @@ -2848,36 +2830,14 @@ static jl_cgval_t emit_getfield_knownidx(jl_codectx_t &ctx, const jl_cgval_t &st if (strct.ispointer()) { auto tbaa = best_field_tbaa(ctx, strct, jt, idx, byte_offset); Value *staddr = data_pointer(ctx, strct); - bool isboxed; - Type *lt = julia_type_to_llvm(ctx, (jl_value_t*)jt, &isboxed); Value *addr; - if (isboxed) { - // byte_offset == 0 is an important special case here, e.g. - // for single field wrapper types. Introducing the bitcast - // can pessimize mem2reg - if (byte_offset > 0) { - addr = ctx.builder.CreateInBoundsGEP( - getInt8Ty(ctx.builder.getContext()), - staddr, - ConstantInt::get(ctx.types().T_size, byte_offset)); - } - else { - addr = staddr; - } - } - else { - if (jl_is_vecelement_type((jl_value_t*)jt)) - addr = staddr; // VecElement types are unwrapped in LLVM. - else if (isa(lt)) - addr = emit_struct_gep(ctx, lt, staddr, byte_offset); - else - addr = ctx.builder.CreateConstInBoundsGEP2_32(lt, staddr, 0, idx); - if (addr != staddr) { - setNameWithField(ctx.emission_context, addr, get_objname, jt, idx, Twine("_ptr")); - } - } - if (jl_field_isptr(jt, idx)) { + if (jl_is_vecelement_type((jl_value_t*)jt) || byte_offset == 0) + addr = staddr; // VecElement types are unwrapped in LLVM. + else + addr = emit_ptrgep(ctx, staddr, byte_offset); + if (addr != staddr) setNameWithField(ctx.emission_context, addr, get_objname, jt, idx, Twine("_ptr")); + if (jl_field_isptr(jt, idx)) { LoadInst *Load = ctx.builder.CreateAlignedLoad(ctx.types().T_prjlvalue, addr, Align(sizeof(void*))); setNameWithField(ctx.emission_context, Load, get_objname, jt, idx, Twine()); Load->setOrdering(order <= jl_memory_order_notatomic ? AtomicOrdering::Unordered : get_llvm_atomic_order(order)); @@ -2894,14 +2854,7 @@ static jl_cgval_t emit_getfield_knownidx(jl_codectx_t &ctx, const jl_cgval_t &st bool isptr = (union_max == 0); assert(!isptr && fsz < jl_field_size(jt, idx)); (void)isptr; size_t fsz1 = jl_field_size(jt, idx) - 1; - Value *ptindex; - if (isboxed) { - ptindex = ctx.builder.CreateConstInBoundsGEP1_32( - getInt8Ty(ctx.builder.getContext()), staddr, byte_offset + fsz1); - } - else { - ptindex = emit_struct_gep(ctx, cast(lt), staddr, byte_offset + fsz1); - } + Value *ptindex = emit_ptrgep(ctx, staddr, byte_offset + fsz1); auto val = emit_unionload(ctx, addr, ptindex, jfty, fsz, al, tbaa, !jl_field_isconst(jt, idx), union_max, strct.tbaa); if (val.V && val.V != addr) { setNameWithField(ctx.emission_context, val.V, get_objname, jt, idx, Twine()); @@ -2957,15 +2910,15 @@ static jl_cgval_t emit_getfield_knownidx(jl_codectx_t &ctx, const jl_cgval_t &st for (; i < fsz / align; i++) { unsigned fld = st_idx + i; Value *fldv = ctx.builder.CreateExtractValue(obj, ArrayRef(fld)); - Value *fldp = ctx.builder.CreateConstInBoundsGEP1_32(ET, lv, i); + Value *fldp = emit_ptrgep(ctx, lv, i * align); ctx.builder.CreateAlignedStore(fldv, fldp, Align(align)); } // emit remaining bytes up to tindex if (i < ptindex - st_idx) { - Value *staddr = ctx.builder.CreateConstInBoundsGEP1_32(ET, lv, i); + Value *staddr = emit_ptrgep(ctx, lv, i * align); for (; i < ptindex - st_idx; i++) { Value *fldv = ctx.builder.CreateExtractValue(obj, ArrayRef(st_idx + i)); - Value *fldp = ctx.builder.CreateConstInBoundsGEP1_32(getInt8Ty(ctx.builder.getContext()), staddr, i); + Value *fldp = emit_ptrgep(ctx, staddr, i); ctx.builder.CreateAlignedStore(fldv, fldp, Align(1)); } } @@ -3105,7 +3058,7 @@ static Value *emit_genericmemoryowner(jl_codectx_t &ctx, Value *t) LI->setMetadata(LLVMContext::MD_nonnull, MDNode::get(ctx.builder.getContext(), None)); jl_aliasinfo_t aliasinfo_mem = jl_aliasinfo_t::fromTBAA(ctx, ctx.tbaa().tbaa_memoryown); aliasinfo_mem.decorateInst(LI); - addr = ctx.builder.CreateConstInBoundsGEP1_32(ctx.types().T_prjlvalue, m, JL_SMALL_BYTE_ALIGNMENT / sizeof(void*)); + addr = emit_ptrgep(ctx, m, JL_SMALL_BYTE_ALIGNMENT); Value *foreign = ctx.builder.CreateICmpNE(addr, decay_derived(ctx, LI)); return emit_guarded_test(ctx, foreign, t, [&] { addr = ctx.builder.CreateConstInBoundsGEP1_32(ctx.types().T_jlgenericmemory, m, 1); @@ -3867,19 +3820,14 @@ static jl_cgval_t emit_setfield(jl_codectx_t &ctx, auto tbaa = best_field_tbaa(ctx, strct, sty, idx0, byte_offset); Value *addr = data_pointer(ctx, strct); if (byte_offset > 0) { - addr = ctx.builder.CreateInBoundsGEP( - getInt8Ty(ctx.builder.getContext()), - addr, - ConstantInt::get(ctx.types().T_size, byte_offset)); + addr = emit_ptrgep(ctx, addr, byte_offset); setNameWithField(ctx.emission_context, addr, get_objname, sty, idx0, Twine("_ptr")); } jl_value_t *jfty = jl_field_type(sty, idx0); bool isboxed = jl_field_isptr(sty, idx0); if (!isboxed && jl_is_uniontype(jfty)) { size_t fsz1 = jl_field_size(sty, idx0) - 1; - Value *ptindex = ctx.builder.CreateInBoundsGEP(getInt8Ty(ctx.builder.getContext()), - addr, - ConstantInt::get(ctx.types().T_size, fsz1)); + Value *ptindex = emit_ptrgep(ctx, addr, fsz1); setNameWithField(ctx.emission_context, ptindex, get_objname, sty, idx0, Twine(".tindex_ptr")); return union_store(ctx, addr, ptindex, rhs, cmp, jfty, tbaa, ctx.tbaa().tbaa_unionselbyte, Order, FailOrder, @@ -3971,8 +3919,8 @@ static jl_cgval_t emit_new_struct(jl_codectx_t &ctx, jl_value_t *ty, size_t narg if (!init_as_value) { // avoid unboxing the argument explicitly // and use memcpy instead - Instruction *inst; - dest = inst = cast(ctx.builder.CreateConstInBoundsGEP2_32(lt, strct, 0, llvm_idx)); + Instruction *inst = cast(emit_ptrgep(ctx, strct, offs)); + dest = inst; // Our promotion point needs to come before // A) All of our arguments' promotion points // B) Any instructions we insert at any of our arguments' promotion points @@ -4025,16 +3973,16 @@ static jl_cgval_t emit_new_struct(jl_codectx_t &ctx, jl_value_t *ty, size_t narg // emit all of the align-sized words unsigned i = 0; for (; i < fsz1 / al; i++) { - Value *fldp = ctx.builder.CreateConstInBoundsGEP1_32(ET, lv, i); + Value *fldp = emit_ptrgep(ctx, lv, i * al); jl_aliasinfo_t ai = jl_aliasinfo_t::fromTBAA(ctx, ctx.tbaa().tbaa_stack); Value *fldv = ai.decorateInst(ctx.builder.CreateAlignedLoad(ET, fldp, Align(al))); strct = ctx.builder.CreateInsertValue(strct, fldv, ArrayRef(llvm_idx + i)); } // emit remaining bytes up to tindex if (i < ptindex - llvm_idx) { - Value *staddr = ctx.builder.CreateConstInBoundsGEP1_32(ET, lv, i); + Value *staddr = emit_ptrgep(ctx, lv, i * al); for (; i < ptindex - llvm_idx; i++) { - Value *fldp = ctx.builder.CreateConstInBoundsGEP1_32(getInt8Ty(ctx.builder.getContext()), staddr, i); + Value *fldp = emit_ptrgep(ctx, staddr, i); jl_aliasinfo_t ai = jl_aliasinfo_t::fromTBAA(ctx, ctx.tbaa().tbaa_stack); Value *fldv = ai.decorateInst(ctx.builder.CreateAlignedLoad(getInt8Ty(ctx.builder.getContext()), fldp, Align(1))); strct = ctx.builder.CreateInsertValue(strct, fldv, ArrayRef(llvm_idx + i)); @@ -4047,7 +3995,7 @@ static jl_cgval_t emit_new_struct(jl_codectx_t &ctx, jl_value_t *ty, size_t narg fval = ctx.builder.CreateInsertValue(strct, fval, ArrayRef(llvm_idx)); } else { - Value *ptindex = emit_struct_gep(ctx, lt, strct, offs + fsz1); + Value *ptindex = emit_ptrgep(ctx, strct, offs + fsz1); jl_aliasinfo_t ai = jl_aliasinfo_t::fromTBAA(ctx, ctx.tbaa().tbaa_unionselbyte); ai.decorateInst(ctx.builder.CreateAlignedStore(tindex, ptindex, Align(1))); if (!rhs_union.isghost) @@ -4083,14 +4031,15 @@ static jl_cgval_t emit_new_struct(jl_codectx_t &ctx, jl_value_t *ty, size_t narg if (!jl_field_isptr(sty, i) && jl_is_uniontype(jl_field_type(sty, i))) { unsigned offs = jl_field_offset(sty, i); int fsz = jl_field_size(sty, i) - 1; - unsigned llvm_idx = convert_struct_offset(ctx, cast(lt), offs + fsz); - if (init_as_value) + if (init_as_value) { + unsigned llvm_idx = convert_struct_offset(ctx, cast(lt), offs + fsz); strct = ctx.builder.CreateInsertValue(strct, ConstantInt::get(getInt8Ty(ctx.builder.getContext()), 0), ArrayRef(llvm_idx)); + } else { jl_aliasinfo_t ai = jl_aliasinfo_t::fromTBAA(ctx, ctx.tbaa().tbaa_unionselbyte); ai.decorateInst(ctx.builder.CreateAlignedStore( ConstantInt::get(getInt8Ty(ctx.builder.getContext()), 0), - ctx.builder.CreateConstInBoundsGEP2_32(lt, strct, 0, llvm_idx), + emit_ptrgep(ctx, strct, offs + fsz), Align(1))); } } @@ -4126,8 +4075,7 @@ static jl_cgval_t emit_new_struct(jl_codectx_t &ctx, jl_value_t *ty, size_t narg jl_aliasinfo_t ai = jl_aliasinfo_t::fromTBAA(ctx, ctx.tbaa().tbaa_unionselbyte); ai.decorateInst(ctx.builder.CreateAlignedStore( ConstantInt::get(getInt8Ty(ctx.builder.getContext()), 0), - ctx.builder.CreateInBoundsGEP(getInt8Ty(ctx.builder.getContext()), strct, - ConstantInt::get(ctx.types().T_size, jl_field_offset(sty, i) + jl_field_size(sty, i) - 1)), + emit_ptrgep(ctx, strct, jl_field_offset(sty, i) + jl_field_size(sty, i) - 1), Align(1))); } } @@ -4169,9 +4117,7 @@ static Value *emit_defer_signal(jl_codectx_t &ctx) { ++EmittedDeferSignal; Value *ptls = get_current_ptls(ctx); - Constant *offset = ConstantInt::getSigned(getInt32Ty(ctx.builder.getContext()), - offsetof(jl_tls_states_t, defer_signal) / sizeof(sig_atomic_t)); - return ctx.builder.CreateInBoundsGEP(ctx.types().T_sigatomic, ptls, ArrayRef(offset), "jl_defer_signal"); + return emit_ptrgep(ctx, ptls, offsetof(jl_tls_states_t, defer_signal)); } #ifndef JL_NDEBUG diff --git a/src/codegen.cpp b/src/codegen.cpp index 9f80791f2882d..9184e4895ab6d 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -2161,6 +2161,20 @@ static inline GlobalVariable *prepare_global_in(Module *M, GlobalVariable *G) return cast(local); } +static Value *emit_ptrgep(jl_codectx_t &ctx, Value *base, size_t byte_offset, const Twine &Name="") +{ + auto *gep = ctx.builder.CreateConstInBoundsGEP1_32(getInt8Ty(ctx.builder.getContext()), base, byte_offset); + setName(ctx.emission_context, gep, Name); + return gep; +} + +static Value *emit_ptrgep(jl_codectx_t &ctx, Value *base, Value *byte_offset, const Twine &Name="") +{ + auto *gep = ctx.builder.CreateInBoundsGEP(getInt8Ty(ctx.builder.getContext()), base, byte_offset, Name); + setName(ctx.emission_context, gep, Name); + return gep; +} + // --- convenience functions for tagging llvm values with julia types --- @@ -2211,7 +2225,7 @@ static void undef_derived_strct(jl_codectx_t &ctx, Value *ptr, jl_datatype_t *st size_t i, np = sty->layout->npointers; auto T_prjlvalue = JuliaType::get_prjlvalue_ty(ctx.builder.getContext()); for (i = 0; i < np; i++) { - Value *fld = ctx.builder.CreateConstInBoundsGEP1_32(T_prjlvalue, ptr, jl_ptr_offset(sty, i)); + Value *fld = emit_ptrgep(ctx, ptr, jl_ptr_offset(sty, i) * sizeof(jl_value_t*)); jl_aliasinfo_t ai = jl_aliasinfo_t::fromTBAA(ctx, tbaa); ai.decorateInst(ctx.builder.CreateStore(Constant::getNullValue(T_prjlvalue), fld)); } @@ -3542,8 +3556,6 @@ static Value *emit_bits_compare(jl_codectx_t &ctx, jl_cgval_t arg1, jl_cgval_t a return ctx.builder.CreateICmpEQ(answer, ConstantInt::get(getInt32Ty(ctx.builder.getContext()), 0)); } else if (sz > 512 && jl_struct_try_layout(sty) && sty->layout->flags.isbitsegal) { - Type *TInt8 = getInt8Ty(ctx.builder.getContext()); - Type *TInt1 = getInt1Ty(ctx.builder.getContext()); Value *varg1 = arg1.ispointer() ? data_pointer(ctx, arg1) : value_to_pointer(ctx, arg1).V; Value *varg2 = arg2.ispointer() ? data_pointer(ctx, arg2) : @@ -3562,8 +3574,8 @@ static Value *emit_bits_compare(jl_codectx_t &ctx, jl_cgval_t arg1, jl_cgval_t a Value *ptr1 = varg1; Value *ptr2 = varg2; if (desc.offset != 0) { - ptr1 = ctx.builder.CreateConstInBoundsGEP1_32(TInt8, ptr1, desc.offset); - ptr2 = ctx.builder.CreateConstInBoundsGEP1_32(TInt8, ptr2, desc.offset); + ptr1 = emit_ptrgep(ctx, ptr1, desc.offset); + ptr2 = emit_ptrgep(ctx, ptr2, desc.offset); } Value *new_ptr1 = ptr1; @@ -3573,7 +3585,7 @@ static Value *emit_bits_compare(jl_codectx_t &ctx, jl_cgval_t arg1, jl_cgval_t a PHINode *answerphi = nullptr; if (desc.nrepeats != 1) { // Set up loop - endptr1 = ctx.builder.CreateConstInBoundsGEP1_32(TInt8, ptr1, desc.nrepeats * (desc.data_bytes + desc.padding_bytes));; + endptr1 = emit_ptrgep(ctx, ptr1, desc.nrepeats * (desc.data_bytes + desc.padding_bytes));; BasicBlock *currBB = ctx.builder.GetInsertBlock(); loopBB = BasicBlock::Create(ctx.builder.getContext(), "egal_loop", ctx.f); @@ -3581,6 +3593,7 @@ static Value *emit_bits_compare(jl_codectx_t &ctx, jl_cgval_t arg1, jl_cgval_t a ctx.builder.CreateBr(loopBB); ctx.builder.SetInsertPoint(loopBB); + Type *TInt1 = getInt1Ty(ctx.builder.getContext()); answerphi = ctx.builder.CreatePHI(TInt1, 2); answerphi->addIncoming(answer ? answer : ConstantInt::get(TInt1, 1), currBB); answer = answerphi; @@ -3588,11 +3601,11 @@ static Value *emit_bits_compare(jl_codectx_t &ctx, jl_cgval_t arg1, jl_cgval_t a PHINode *itr1 = ctx.builder.CreatePHI(ptr1->getType(), 2); PHINode *itr2 = ctx.builder.CreatePHI(ptr2->getType(), 2); - new_ptr1 = ctx.builder.CreateConstInBoundsGEP1_32(TInt8, itr1, desc.data_bytes + desc.padding_bytes); + new_ptr1 = emit_ptrgep(ctx, itr1, desc.data_bytes + desc.padding_bytes); itr1->addIncoming(ptr1, currBB); itr1->addIncoming(new_ptr1, loopBB); - Value *new_ptr2 = ctx.builder.CreateConstInBoundsGEP1_32(TInt8, itr2, desc.data_bytes + desc.padding_bytes); + Value *new_ptr2 = emit_ptrgep(ctx, itr2, desc.data_bytes + desc.padding_bytes); itr2->addIncoming(ptr2, currBB); itr2->addIncoming(new_ptr2, loopBB); @@ -4074,7 +4087,7 @@ static bool emit_f_opmemory(jl_codectx_t &ctx, jl_cgval_t *ret, jl_value_t *f, ptindex = ctx.builder.CreateInBoundsGEP(AT, data, mlen); data = ctx.builder.CreateInBoundsGEP(AT, data, idx0); } - ptindex = ctx.builder.CreateInBoundsGEP(getInt8Ty(ctx.builder.getContext()), ptindex, idx0); + ptindex = emit_ptrgep(ctx, ptindex, idx0); *ret = union_store(ctx, data, ptindex, val, cmp, ety, ctx.tbaa().tbaa_arraybuf, ctx.tbaa().tbaa_arrayselbyte, Order, FailOrder, @@ -4089,7 +4102,7 @@ static bool emit_f_opmemory(jl_codectx_t &ctx, jl_cgval_t *ret, jl_value_t *f, assert(ptr); lock = ptr; // ptr += sizeof(lock); - ptr = ctx.builder.CreateConstInBoundsGEP1_32(getInt8Ty(ctx.builder.getContext()), ptr, LLT_ALIGN(sizeof(jl_mutex_t), JL_SMALL_BYTE_ALIGNMENT)); + ptr = emit_ptrgep(ctx, ptr, LLT_ALIGN(sizeof(jl_mutex_t), JL_SMALL_BYTE_ALIGNMENT)); } Value *data_owner = NULL; // owner object against which the write barrier must check if (isboxed || layout->first_ptr >= 0) { // if elements are just bits, don't need a write barrier @@ -4204,7 +4217,7 @@ static bool emit_builtin_call(jl_codectx_t &ctx, jl_cgval_t *ret, jl_value_t *f, #ifdef _P64 nva = ctx.builder.CreateTrunc(nva, getInt32Ty(ctx.builder.getContext())); #endif - Value *theArgs = ctx.builder.CreateInBoundsGEP(ctx.types().T_prjlvalue, ctx.argArray, ConstantInt::get(ctx.types().T_size, ctx.nReqArgs)); + Value *theArgs = emit_ptrgep(ctx, ctx.argArray, ctx.nReqArgs * sizeof(jl_value_t*)); Value *r = ctx.builder.CreateCall(prepare_call(jlapplygeneric_func), { theF, theArgs, nva }); *ret = mark_julia_type(ctx, r, true, jl_any_type); return true; @@ -4354,7 +4367,7 @@ static bool emit_builtin_call(jl_codectx_t &ctx, jl_cgval_t *ret, jl_value_t *f, ptindex = ctx.builder.CreateInBoundsGEP(AT, data, mlen); data = ctx.builder.CreateInBoundsGEP(AT, data, idx0); } - ptindex = ctx.builder.CreateInBoundsGEP(getInt8Ty(ctx.builder.getContext()), ptindex, idx0); + ptindex = emit_ptrgep(ctx, ptindex, idx0); size_t elsz_c = 0, al_c = 0; int union_max = jl_islayout_inline(ety, &elsz_c, &al_c); assert(union_max && LLT_ALIGN(elsz_c, al_c) == elsz && al_c == al); @@ -4367,7 +4380,7 @@ static bool emit_builtin_call(jl_codectx_t &ctx, jl_cgval_t *ret, jl_value_t *f, assert(ptr); lock = ptr; // ptr += sizeof(lock); - ptr = ctx.builder.CreateConstInBoundsGEP1_32(getInt8Ty(ctx.builder.getContext()), ptr, LLT_ALIGN(sizeof(jl_mutex_t), JL_SMALL_BYTE_ALIGNMENT)); + ptr = emit_ptrgep(ctx, ptr, LLT_ALIGN(sizeof(jl_mutex_t), JL_SMALL_BYTE_ALIGNMENT)); emit_lockstate_value(ctx, lock, true); } *ret = typed_load(ctx, ptr, nullptr, ety, @@ -4458,10 +4471,10 @@ static bool emit_builtin_call(jl_codectx_t &ctx, jl_cgval_t *ret, jl_value_t *f, if (needlock) { // n.b. no actual lock acquire needed, as the check itself only needs to load a single pointer and check for null // elem += sizeof(lock); - elem = ctx.builder.CreateConstInBoundsGEP1_32(getInt8Ty(ctx.builder.getContext()), elem, LLT_ALIGN(sizeof(jl_mutex_t), JL_SMALL_BYTE_ALIGNMENT)); + elem = emit_ptrgep(ctx, elem, LLT_ALIGN(sizeof(jl_mutex_t), JL_SMALL_BYTE_ALIGNMENT)); } if (!isboxed) - elem = ctx.builder.CreateConstInBoundsGEP1_32(ctx.types().T_prjlvalue, elem, layout->first_ptr); + elem = emit_ptrgep(ctx, elem, layout->first_ptr * sizeof(void*)); // emit this using the same type as jl_builtin_memoryrefget // so that LLVM may be able to load-load forward them and fold the result auto tbaa = isboxed ? ctx.tbaa().tbaa_ptrarraybuf : ctx.tbaa().tbaa_arraybuf; @@ -4549,7 +4562,7 @@ static bool emit_builtin_call(jl_codectx_t &ctx, jl_cgval_t *ret, jl_value_t *f, if (load->getPointerOperand() == ctx.slots[ctx.vaSlot].boxroot && ctx.argArray) { Value *valen = emit_n_varargs(ctx); jl_cgval_t va_ary( // fake instantiation of a cgval, in order to call emit_bounds_check (it only checks the `.V` field) - ctx.builder.CreateInBoundsGEP(ctx.types().T_prjlvalue, ctx.argArray, ConstantInt::get(ctx.types().T_size, ctx.nReqArgs)), + emit_ptrgep(ctx, ctx.argArray, ctx.nReqArgs * sizeof(jl_value_t*)), NULL, NULL); Value *idx = emit_unbox(ctx, ctx.types().T_size, fld, (jl_value_t*)jl_long_type); idx = emit_bounds_check(ctx, va_ary, NULL, idx, valen, boundscheck); @@ -4899,7 +4912,7 @@ static bool emit_builtin_call(jl_codectx_t &ctx, jl_cgval_t *ret, jl_value_t *f, if (!jl_field_isptr(stt, fieldidx)) offs += ((jl_datatype_t*)jl_field_type(stt, fieldidx))->layout->first_ptr; Value *ptr = data_pointer(ctx, obj); - Value *addr = ctx.builder.CreateConstInBoundsGEP1_32(ctx.types().T_prjlvalue, ptr, offs); + Value *addr = emit_ptrgep(ctx, ptr, offs * sizeof(jl_value_t*)); // emit this using the same type as emit_getfield_knownidx // so that LLVM may be able to load-load forward them and fold the result jl_aliasinfo_t ai = jl_aliasinfo_t::fromTBAA(ctx, tbaa); @@ -5583,10 +5596,7 @@ static jl_cgval_t emit_sparam(jl_codectx_t &ctx, size_t i) } } assert(ctx.spvals_ptr != NULL); - Value *bp = ctx.builder.CreateConstInBoundsGEP1_32( - ctx.types().T_prjlvalue, - ctx.spvals_ptr, - i + sizeof(jl_svec_t) / sizeof(jl_value_t*)); + Value *bp = emit_ptrgep(ctx, ctx.spvals_ptr, i * sizeof(jl_value_t*) + sizeof(jl_svec_t)); jl_aliasinfo_t ai = jl_aliasinfo_t::fromTBAA(ctx, ctx.tbaa().tbaa_const); Value *sp = ai.decorateInst(ctx.builder.CreateAlignedLoad(ctx.types().T_prjlvalue, bp, Align(sizeof(void*)))); setName(ctx.emission_context, sp, "sparam"); @@ -5639,10 +5649,7 @@ static jl_cgval_t emit_isdefined(jl_codectx_t &ctx, jl_value_t *sym, int allow_i } } assert(ctx.spvals_ptr != NULL); - Value *bp = ctx.builder.CreateConstInBoundsGEP1_32( - ctx.types().T_prjlvalue, - ctx.spvals_ptr, - i + sizeof(jl_svec_t) / sizeof(jl_value_t*)); + Value *bp = emit_ptrgep(ctx, ctx.spvals_ptr, i * sizeof(jl_value_t*) + sizeof(jl_svec_t)); jl_aliasinfo_t ai = jl_aliasinfo_t::fromTBAA(ctx, ctx.tbaa().tbaa_const); Value *sp = ai.decorateInst(ctx.builder.CreateAlignedLoad(ctx.types().T_prjlvalue, bp, Align(sizeof(void*)))); isnull = ctx.builder.CreateICmpNE(emit_typeof(ctx, sp, false, true), emit_tagfrom(ctx, jl_tvar_type)); @@ -6753,34 +6760,26 @@ static void allocate_gc_frame(jl_codectx_t &ctx, BasicBlock *b0, bool or_new=fal static Value *get_current_task(jl_codectx_t &ctx) { - return get_current_task_from_pgcstack(ctx.builder, ctx.types().T_size, ctx.pgcstack); + return get_current_task_from_pgcstack(ctx.builder, ctx.pgcstack); } // Get PTLS through current task. static Value *get_current_ptls(jl_codectx_t &ctx) { - return get_current_ptls_from_task(ctx.builder, ctx.types().T_size, get_current_task(ctx), ctx.tbaa().tbaa_gcframe); + return get_current_ptls_from_task(ctx.builder, get_current_task(ctx), ctx.tbaa().tbaa_gcframe); } // Get the address of the world age of the current task static Value *get_tls_world_age_field(jl_codectx_t &ctx) { Value *ct = get_current_task(ctx); - return ctx.builder.CreateInBoundsGEP( - ctx.types().T_size, - ct, - ConstantInt::get(ctx.types().T_size, offsetof(jl_task_t, world_age) / ctx.types().sizeof_ptr), - "world_age"); + return emit_ptrgep(ctx, ct, offsetof(jl_task_t, world_age), "world_age"); } static Value *get_scope_field(jl_codectx_t &ctx) { Value *ct = get_current_task(ctx); - return ctx.builder.CreateInBoundsGEP( - ctx.types().T_prjlvalue, - ct, - ConstantInt::get(ctx.types().T_size, offsetof(jl_task_t, scope) / ctx.types().sizeof_ptr), - "current_scope"); + return emit_ptrgep(ctx, ct, offsetof(jl_task_t, scope), "current_scope"); } static Function *emit_tojlinvoke(jl_code_instance_t *codeinst, StringRef theFptrName, Module *M, jl_codegen_params_t ¶ms) @@ -6912,10 +6911,7 @@ static void emit_cfunc_invalidate( case jl_returninfo_t::SRet: { if (return_roots) { Value *root1 = gf_thunk->arg_begin() + 1; // root1 has type [n x {}*]* - #if JL_LLVM_VERSION < 170000 - assert(cast(root1->getType())->isOpaqueOrPointeeTypeMatches(get_returnroots_type(ctx, return_roots))); - #endif - root1 = ctx.builder.CreateConstInBoundsGEP2_32(get_returnroots_type(ctx, return_roots), root1, 0, 0); + // store the whole object in the first slot ctx.builder.CreateStore(gf_ret, root1); } Align alignment(julia_alignment(rettype)); @@ -7094,10 +7090,7 @@ static Function* gen_cfun_wrapper( if (calltype) { LoadInst *lam_max = ctx.builder.CreateAlignedLoad( ctx.types().T_size, - ctx.builder.CreateConstInBoundsGEP1_32( - ctx.types().T_size, - literal_pointer_val(ctx, (jl_value_t*)codeinst), - offsetof(jl_code_instance_t, max_world) / ctx.types().sizeof_ptr), + emit_ptrgep(ctx, literal_pointer_val(ctx, (jl_value_t*)codeinst), offsetof(jl_code_instance_t, max_world)), ctx.types().alignof_ptr); age_ok = ctx.builder.CreateICmpUGE(lam_max, world_v); } @@ -7178,7 +7171,7 @@ static Function* gen_cfun_wrapper( *closure_types = jl_alloc_vec_any(0); jl_array_ptr_1d_push(*closure_types, jargty); Value *runtime_dt = ctx.builder.CreateAlignedLoad(ctx.types().T_prjlvalue, - ctx.builder.CreateConstInBoundsGEP1_32(ctx.types().T_prjlvalue, nestPtr, jl_array_nrows(*closure_types)), + emit_ptrgep(ctx, nestPtr, jl_array_nrows(*closure_types) * ctx.types().sizeof_ptr), Align(sizeof(void*))); BasicBlock *boxedBB = BasicBlock::Create(ctx.builder.getContext(), "isboxed", cw); BasicBlock *loadBB = BasicBlock::Create(ctx.builder.getContext(), "need-load", cw); @@ -7244,7 +7237,7 @@ static Function* gen_cfun_wrapper( *closure_types = jl_alloc_vec_any(0); jl_array_ptr_1d_push(*closure_types, jargty); Value *runtime_dt = ctx.builder.CreateAlignedLoad(ctx.types().T_prjlvalue, - ctx.builder.CreateConstInBoundsGEP1_32(ctx.types().T_prjlvalue, nestPtr, jl_array_nrows(*closure_types)), + emit_ptrgep(ctx, nestPtr, jl_array_nrows(*closure_types) * ctx.types().sizeof_ptr), Align(sizeof(void*))); Value *strct = box_ccall_result(ctx, val, runtime_dt, jargty); inputarg = mark_julia_type(ctx, strct, true, jargty_proper); @@ -7823,7 +7816,7 @@ static Function *gen_invoke_wrapper(jl_method_instance_t *lam, jl_value_t *jlret theArg = funcArg; } else { - Value *argPtr = ctx.builder.CreateConstInBoundsGEP1_32(ctx.types().T_prjlvalue, argArray, i - 1); + Value *argPtr = emit_ptrgep(ctx, argArray, (i - 1) * ctx.types().sizeof_ptr); jl_aliasinfo_t ai = jl_aliasinfo_t::fromTBAA(ctx, ctx.tbaa().tbaa_const); theArg = ai.decorateInst(maybe_mark_load_dereferenceable( ctx.builder.CreateAlignedLoad(ctx.types().T_prjlvalue, argPtr, Align(sizeof(void*))), @@ -7850,7 +7843,7 @@ static Function *gen_invoke_wrapper(jl_method_instance_t *lam, jl_value_t *jlret theArg = funcArg; else theArg = ctx.builder.CreateAlignedLoad(ctx.types().T_prjlvalue, - ctx.builder.CreateConstInBoundsGEP1_32(ctx.types().T_prjlvalue, argArray, retarg - 1), + emit_ptrgep(ctx, argArray, (retarg - 1) * ctx.types().sizeof_ptr), Align(sizeof(void*))); retval = mark_julia_type(ctx, theArg, true, jl_any_type); } @@ -7969,7 +7962,7 @@ static jl_returninfo_t get_specsig_function(jl_codectx_t &ctx, Module *M, Value param.addAttribute(Attribute::NoCapture); param.addAttribute(Attribute::NoUndef); attrs.push_back(AttributeSet::get(ctx.builder.getContext(), param)); - fsig.push_back(get_returnroots_type(ctx, props.return_roots)->getPointerTo(0)); + fsig.push_back(ctx.types().T_ptr); argnames.push_back("return_roots"); } @@ -8069,9 +8062,9 @@ static jl_returninfo_t get_specsig_function(jl_codectx_t &ctx, Module *M, Value return props; } -static void emit_sret_roots(jl_codectx_t &ctx, bool isptr, Value *Src, Type *T, Value *Shadow, Type *ShadowT, unsigned count) +static void emit_sret_roots(jl_codectx_t &ctx, bool isptr, Value *Src, Type *T, Value *Shadow, unsigned count) { - unsigned emitted = TrackWithShadow(Src, T, isptr, Shadow, ShadowT, ctx.builder); //This comes from Late-GC-Lowering?? + unsigned emitted = TrackWithShadow(Src, T, isptr, Shadow, ctx.builder); //This comes from Late-GC-Lowering?? assert(emitted == count); (void)emitted; (void)count; } @@ -8773,9 +8766,7 @@ static jl_llvm_functions_t // Load closure world Value *oc_this = decay_derived(ctx, &*AI++); Value *argaddr = oc_this; - Value *worldaddr = ctx.builder.CreateInBoundsGEP( - getInt8Ty(ctx.builder.getContext()), argaddr, - ConstantInt::get(ctx.types().T_size, offsetof(jl_opaque_closure_t, world))); + Value *worldaddr = emit_ptrgep(ctx, argaddr, offsetof(jl_opaque_closure_t, world)); jl_cgval_t closure_world = typed_load(ctx, worldaddr, NULL, (jl_value_t*)jl_long_type, nullptr, nullptr, false, AtomicOrdering::NotAtomic, false, ctx.types().alignof_ptr.value()); @@ -8783,9 +8774,7 @@ static jl_llvm_functions_t emit_unbox_store(ctx, closure_world, world_age_field, ctx.tbaa().tbaa_gcframe, ctx.types().alignof_ptr); // Load closure env - Value *envaddr = ctx.builder.CreateInBoundsGEP( - getInt8Ty(ctx.builder.getContext()), argaddr, - ConstantInt::get(ctx.types().T_size, offsetof(jl_opaque_closure_t, captures))); + Value *envaddr = emit_ptrgep(ctx, argaddr, offsetof(jl_opaque_closure_t, captures)); jl_cgval_t closure_env = typed_load(ctx, envaddr, NULL, (jl_value_t*)jl_any_type, nullptr, nullptr, true, AtomicOrdering::NotAtomic, false, sizeof(void*)); @@ -8800,7 +8789,7 @@ static jl_llvm_functions_t theArg = mark_julia_type(ctx, fArg, true, vi.value.typ); } else { - Value *argPtr = ctx.builder.CreateConstInBoundsGEP1_32(ctx.types().T_prjlvalue, argArray, i - 1); + Value *argPtr = emit_ptrgep(ctx, argArray, (i - 1) * ctx.types().sizeof_ptr); jl_aliasinfo_t ai = jl_aliasinfo_t::fromTBAA(ctx, ctx.tbaa().tbaa_const); Value *load = ai.decorateInst(maybe_mark_load_dereferenceable( ctx.builder.CreateAlignedLoad(ctx.types().T_prjlvalue, argPtr, Align(sizeof(void*))), @@ -8876,10 +8865,8 @@ static jl_llvm_functions_t restTuple = ctx.builder.CreateCall(F, { Constant::getNullValue(ctx.types().T_prjlvalue), - ctx.builder.CreateInBoundsGEP(ctx.types().T_prjlvalue, argArray, - ConstantInt::get(ctx.types().T_size, nreq - 1)), - ctx.builder.CreateSub(argCount, - ConstantInt::get(getInt32Ty(ctx.builder.getContext()), nreq - 1)) }); + emit_ptrgep(ctx, argArray, (nreq - 1) * sizeof(jl_value_t*)), + ctx.builder.CreateSub(argCount, ctx.builder.getInt32(nreq - 1)) }); restTuple->setAttributes(F->getAttributes()); ctx.builder.CreateStore(restTuple, vi.boxroot); } @@ -9319,7 +9306,7 @@ static jl_llvm_functions_t if (retvalinfo.ispointer()) { if (returninfo.return_roots) { Type *store_ty = julia_type_to_llvm(ctx, retvalinfo.typ); - emit_sret_roots(ctx, true, data_pointer(ctx, retvalinfo), store_ty, f->arg_begin() + 1, get_returnroots_type(ctx, returninfo.return_roots), returninfo.return_roots); + emit_sret_roots(ctx, true, data_pointer(ctx, retvalinfo), store_ty, f->arg_begin() + 1, returninfo.return_roots); } if (returninfo.cc == jl_returninfo_t::SRet) { assert(jl_is_concrete_type(jlrettype)); @@ -9336,7 +9323,7 @@ static jl_llvm_functions_t Value *Val = retvalinfo.V; if (returninfo.return_roots) { assert(julia_type_to_llvm(ctx, retvalinfo.typ) == store_ty); - emit_sret_roots(ctx, false, Val, store_ty, f->arg_begin() + 1, get_returnroots_type(ctx, returninfo.return_roots), returninfo.return_roots); + emit_sret_roots(ctx, false, Val, store_ty, f->arg_begin() + 1, returninfo.return_roots); } ctx.builder.CreateAlignedStore(Val, sret, Align(julia_alignment(retvalinfo.typ))); assert(retvalinfo.TIndex == NULL && "unreachable"); // unimplemented representation @@ -9447,11 +9434,7 @@ static jl_llvm_functions_t ctx.builder.CreateBr(handlr); } ctx.builder.SetInsertPoint(tryblk); - auto ehptr = ctx.builder.CreateInBoundsGEP( - ctx.types().T_ptr, - ct, - ConstantInt::get(ctx.types().T_size, offsetof(jl_task_t, eh) / ctx.types().sizeof_ptr), - "eh"); + auto ehptr = emit_ptrgep(ctx, ct, offsetof(jl_task_t, eh)); ctx.builder.CreateAlignedStore(ehbuf, ehptr, ctx.types().alignof_ptr); } } diff --git a/src/intrinsics.cpp b/src/intrinsics.cpp index 4bfe3f184d24b..194b45886bb0d 100644 --- a/src/intrinsics.cpp +++ b/src/intrinsics.cpp @@ -767,7 +767,7 @@ static jl_cgval_t emit_pointerref(jl_codectx_t &ctx, ArrayRef argv) LLT_ALIGN(size, jl_datatype_align(ety)))); setName(ctx.emission_context, im1, "pointerref_offset"); Value *thePtr = emit_unbox(ctx, getPointerTy(ctx.builder.getContext()), e, e.typ); - thePtr = ctx.builder.CreateInBoundsGEP(getInt8Ty(ctx.builder.getContext()), thePtr, im1); + thePtr = emit_ptrgep(ctx, thePtr, im1); setName(ctx.emission_context, thePtr, "pointerref_src"); MDNode *tbaa = best_tbaa(ctx.tbaa(), ety); emit_memcpy(ctx, strct, jl_aliasinfo_t::fromTBAA(ctx, tbaa), thePtr, jl_aliasinfo_t::fromTBAA(ctx, nullptr), size, Align(sizeof(jl_value_t*)), Align(align_nb)); @@ -848,7 +848,7 @@ static jl_cgval_t emit_pointerset(jl_codectx_t &ctx, ArrayRef argv) im1 = ctx.builder.CreateMul(im1, ConstantInt::get(ctx.types().T_size, LLT_ALIGN(size, jl_datatype_align(ety)))); setName(ctx.emission_context, im1, "pointerset_offset"); - auto gep = ctx.builder.CreateInBoundsGEP(getInt8Ty(ctx.builder.getContext()), thePtr, im1); + auto gep = emit_ptrgep(ctx, thePtr, im1); setName(ctx.emission_context, gep, "pointerset_ptr"); emit_memcpy(ctx, gep, jl_aliasinfo_t::fromTBAA(ctx, nullptr), x, size, Align(align_nb), Align(julia_alignment(ety))); } diff --git a/src/llvm-alloc-opt.cpp b/src/llvm-alloc-opt.cpp index 5984ad55d221c..188955fd50972 100644 --- a/src/llvm-alloc-opt.cpp +++ b/src/llvm-alloc-opt.cpp @@ -770,26 +770,7 @@ void Optimizer::moveToStack(CallInst *orig_inst, size_t sz, bool has_ref, AllocF user->replaceUsesOfWith(orig_i, replace); } else if (isa(user) || isa(user)) { - #if JL_LLVM_VERSION >= 170000 - #ifndef JL_NDEBUG - auto cast_t = PointerType::get(user->getType(), new_i->getType()->getPointerAddressSpace()); - Type *new_t = new_i->getType(); - assert(cast_t == new_t); - #endif - auto replace_i = new_i; - #else - auto cast_t = PointerType::getWithSamePointeeType(cast(user->getType()), new_i->getType()->getPointerAddressSpace()); - auto replace_i = new_i; - Type *new_t = new_i->getType(); - if (cast_t != new_t) { - // Shouldn't get here when using opaque pointers, so the new BitCastInst is fine - assert(cast_t->getContext().supportsTypedPointers()); - replace_i = new BitCastInst(replace_i, cast_t, "", user); - replace_i->setDebugLoc(user->getDebugLoc()); - replace_i->takeName(user); - } - #endif - push_frame(user, replace_i); + push_frame(user, new_i); } else if (auto gep = dyn_cast(user)) { SmallVector IdxOperands(gep->idx_begin(), gep->idx_end()); diff --git a/src/llvm-codegen-shared.h b/src/llvm-codegen-shared.h index 242dab021f101..956c04dbc7ded 100644 --- a/src/llvm-codegen-shared.h +++ b/src/llvm-codegen-shared.h @@ -125,7 +125,7 @@ struct CountTrackedPointers { CountTrackedPointers(llvm::Type *T, bool ignore_loaded=false); }; -unsigned TrackWithShadow(llvm::Value *Src, llvm::Type *T, bool isptr, llvm::Value *Dst, llvm::Type *DTy, llvm::IRBuilder<> &irbuilder); +unsigned TrackWithShadow(llvm::Value *Src, llvm::Type *T, bool isptr, llvm::Value *Dst, llvm::IRBuilder<> &irbuilder); llvm::SmallVector ExtractTrackedValues(llvm::Value *Src, llvm::Type *STy, bool isptr, llvm::IRBuilder<> &irbuilder, llvm::ArrayRef perm_offsets={}); static inline void llvm_dump(llvm::Value *v) @@ -187,45 +187,39 @@ static inline llvm::Instruction *tbaa_decorate(llvm::MDNode *md, llvm::Instructi } // Get PTLS through current task. -static inline llvm::Value *get_current_task_from_pgcstack(llvm::IRBuilder<> &builder, llvm::Type *T_size, llvm::Value *pgcstack) +static inline llvm::Value *get_current_task_from_pgcstack(llvm::IRBuilder<> &builder, llvm::Value *pgcstack) { using namespace llvm; - auto T_pjlvalue = JuliaType::get_pjlvalue_ty(builder.getContext()); + auto i8 = builder.getInt8Ty(); const int pgcstack_offset = offsetof(jl_task_t, gcstack); - return builder.CreateInBoundsGEP( - T_pjlvalue, pgcstack, - ConstantInt::get(T_size, -(pgcstack_offset / sizeof(void *))), - "current_task"); + return builder.CreateConstInBoundsGEP1_32(i8, pgcstack, -pgcstack_offset, "current_task"); } // Get PTLS through current task. -static inline llvm::Value *get_current_ptls_from_task(llvm::IRBuilder<> &builder, llvm::Type *T_size, llvm::Value *current_task, llvm::MDNode *tbaa) +static inline llvm::Value *get_current_ptls_from_task(llvm::IRBuilder<> &builder, llvm::Value *current_task, llvm::MDNode *tbaa) { using namespace llvm; - auto T_pjlvalue = JuliaType::get_pjlvalue_ty(builder.getContext()); + auto i8 = builder.getInt8Ty(); + auto T_ptr = builder.getPtrTy(); const int ptls_offset = offsetof(jl_task_t, ptls); - llvm::Value *pptls = builder.CreateInBoundsGEP( - T_pjlvalue, current_task, - ConstantInt::get(T_size, ptls_offset / sizeof(void *)), - "ptls_field"); - LoadInst *ptls_load = builder.CreateAlignedLoad(T_pjlvalue, - pptls, Align(sizeof(void *)), "ptls_load"); + llvm::Value *pptls = builder.CreateConstInBoundsGEP1_32(i8, current_task, ptls_offset, "ptls_field"); + LoadInst *ptls_load = builder.CreateAlignedLoad(T_ptr, pptls, Align(sizeof(void *)), "ptls_load"); // Note: Corresponding store (`t->ptls = ptls`) happens in `ctx_switch` of tasks.c. tbaa_decorate(tbaa, ptls_load); return ptls_load; } // Get signal page through current task. -static inline llvm::Value *get_current_signal_page_from_ptls(llvm::IRBuilder<> &builder, llvm::Type *T_size, llvm::Value *ptls, llvm::MDNode *tbaa) +static inline llvm::Value *get_current_signal_page_from_ptls(llvm::IRBuilder<> &builder, llvm::Value *ptls, llvm::MDNode *tbaa) { using namespace llvm; // return builder.CreateCall(prepare_call(reuse_signal_page_func)); - auto T_psize = T_size->getPointerTo(); - int nthfield = offsetof(jl_tls_states_t, safepoint) / sizeof(void *); - llvm::Value *psafepoint = builder.CreateInBoundsGEP( - T_psize, ptls, ConstantInt::get(T_size, nthfield)); + auto T_ptr = builder.getPtrTy(); + auto i8 = builder.getInt8Ty(); + int nthfield = offsetof(jl_tls_states_t, safepoint); + llvm::Value *psafepoint = builder.CreateConstInBoundsGEP1_32(i8, ptls, nthfield); LoadInst *ptls_load = builder.CreateAlignedLoad( - T_psize, psafepoint, Align(sizeof(void *)), "safepoint"); + T_ptr, psafepoint, Align(sizeof(void *)), "safepoint"); tbaa_decorate(tbaa, ptls_load); return ptls_load; } @@ -239,7 +233,7 @@ static inline void emit_signal_fence(llvm::IRBuilder<> &builder) static inline void emit_gc_safepoint(llvm::IRBuilder<> &builder, llvm::Type *T_size, llvm::Value *ptls, llvm::MDNode *tbaa, bool final = false) { using namespace llvm; - llvm::Value *signal_page = get_current_signal_page_from_ptls(builder, T_size, ptls, tbaa); + llvm::Value *signal_page = get_current_signal_page_from_ptls(builder, ptls, tbaa); emit_signal_fence(builder); Module *M = builder.GetInsertBlock()->getModule(); LLVMContext &C = builder.getContext(); @@ -250,8 +244,7 @@ static inline void emit_gc_safepoint(llvm::IRBuilder<> &builder, llvm::Type *T_s else { Function *F = M->getFunction("julia.safepoint"); if (!F) { - auto T_psize = T_size->getPointerTo(); - FunctionType *FT = FunctionType::get(Type::getVoidTy(C), {T_psize}, false); + FunctionType *FT = FunctionType::get(Type::getVoidTy(C), {T_size->getPointerTo()}, false); F = Function::Create(FT, Function::ExternalLinkage, "julia.safepoint", M); #if JL_LLVM_VERSION >= 160000 F->setMemoryEffects(MemoryEffects::inaccessibleOrArgMemOnly()); @@ -268,8 +261,8 @@ static inline llvm::Value *emit_gc_state_set(llvm::IRBuilder<> &builder, llvm::T { using namespace llvm; Type *T_int8 = state->getType(); - Constant *offset = ConstantInt::getSigned(builder.getInt32Ty(), offsetof(jl_tls_states_t, gc_state)); - Value *gc_state = builder.CreateInBoundsGEP(T_int8, ptls, ArrayRef(offset), "gc_state"); + unsigned offset = offsetof(jl_tls_states_t, gc_state); + Value *gc_state = builder.CreateConstInBoundsGEP1_32(T_int8, ptls, offset, "gc_state"); if (old_state == nullptr) { old_state = builder.CreateLoad(T_int8, gc_state); cast(old_state)->setOrdering(AtomicOrdering::Monotonic); diff --git a/src/llvm-late-gc-lowering.cpp b/src/llvm-late-gc-lowering.cpp index e08f08860dfaf..8d1d5ff73b261 100644 --- a/src/llvm-late-gc-lowering.cpp +++ b/src/llvm-late-gc-lowering.cpp @@ -350,15 +350,7 @@ void LateLowerGCFrame::LiftSelect(State &S, SelectInst *SI) { ConstantInt::get(Type::getInt32Ty(Cond->getContext()), i), "", SI); } - #if JL_LLVM_VERSION >= 170000 assert(FalseElem->getType() == TrueElem->getType()); - #else - if (FalseElem->getType() != TrueElem->getType()) { - // Shouldn't get here when using opaque pointers, so the new BitCastInst is fine - assert(FalseElem->getContext().supportsTypedPointers()); - FalseElem = new BitCastInst(FalseElem, TrueElem->getType(), "", SI); - } - #endif SelectInst *SelectBase = SelectInst::Create(Cond, TrueElem, FalseElem, "gclift", SI); int Number = ++S.MaxPtrNumber; S.AllPtrNumbering[SelectBase] = Number; @@ -427,33 +419,7 @@ void LateLowerGCFrame::LiftPhi(State &S, PHINode *Phi) { BaseElem = Base; else BaseElem = IncomingBases[i]; - #if JL_LLVM_VERSION >= 170000 assert(BaseElem->getType() == T_prjlvalue); - #else - if (BaseElem->getType() != T_prjlvalue) { - // Shouldn't get here when using opaque pointers, so the new BitCastInst is fine - assert(BaseElem->getContext().supportsTypedPointers()); - auto &remap = CastedRoots[i][BaseElem]; - if (!remap) { - if (auto constant = dyn_cast(BaseElem)) { - remap = ConstantExpr::getBitCast(constant, T_prjlvalue, ""); - } else { - Instruction *InsertBefore; - if (auto arg = dyn_cast(BaseElem)) { - InsertBefore = &*arg->getParent()->getEntryBlock().getFirstInsertionPt(); - } else { - assert(isa(BaseElem) && "Unknown value type detected!"); - InsertBefore = cast(BaseElem)->getNextNonDebugInstruction(); - } - while (isa(InsertBefore)) { - InsertBefore = InsertBefore->getNextNonDebugInstruction(); - } - remap = new BitCastInst(BaseElem, T_prjlvalue, "", InsertBefore); - } - } - BaseElem = remap; - } - #endif lift->addIncoming(BaseElem, IncomingBB); } } @@ -1528,14 +1494,11 @@ SmallVector ExtractTrackedValues(Value *Src, Type *STy, bool isptr, I return Ptrs; } -unsigned TrackWithShadow(Value *Src, Type *STy, bool isptr, Value *Dst, Type *DTy, IRBuilder<> &irbuilder) { +unsigned TrackWithShadow(Value *Src, Type *STy, bool isptr, Value *Dst, IRBuilder<> &irbuilder) { auto Ptrs = ExtractTrackedValues(Src, STy, isptr, irbuilder); for (unsigned i = 0; i < Ptrs.size(); ++i) { - Value *Elem = Ptrs[i];// Dst has type `[n x {}*]*` - Value *Slot = irbuilder.CreateConstInBoundsGEP2_32(DTy, Dst, 0, i); - #if JL_LLVM_VERSION < 170000 - assert(cast(Dst->getType())->isOpaqueOrPointeeTypeMatches(DTy)); - #endif + Value *Elem = Ptrs[i]; + Value *Slot = irbuilder.CreateConstInBoundsGEP1_32(irbuilder.getInt8Ty(), Dst, i * sizeof(void*)); StoreInst *shadowStore = irbuilder.CreateAlignedStore(Elem, Slot, Align(sizeof(void*))); shadowStore->setOrdering(AtomicOrdering::NotAtomic); // TODO: shadowStore->setMetadata(LLVMContext::MD_tbaa, tbaa_gcframe); @@ -2133,7 +2096,7 @@ bool LateLowerGCFrame::CleanupIR(Function &F, State *S, bool *CFGModified) { // the type tag. (Note that if the size is not a constant, it will call // gc_alloc_obj, and will redundantly set the tag.) auto allocBytesIntrinsic = getOrDeclare(jl_intrinsics::GCAllocBytes); - auto ptls = get_current_ptls_from_task(builder, T_size, CI->getArgOperand(0), tbaa_gcframe); + auto ptls = get_current_ptls_from_task(builder, CI->getArgOperand(0), tbaa_gcframe); auto newI = builder.CreateCall( allocBytesIntrinsic, { @@ -2319,15 +2282,7 @@ void LateLowerGCFrame::PlaceGCFrameStore(State &S, unsigned R, unsigned MinColor // Pointee types don't have semantics, so the optimizer is // free to rewrite them if convenient. We need to change // it back here for the store. - #if JL_LLVM_VERSION >= 170000 assert(Val->getType() == T_prjlvalue); - #else - if (Val->getType() != T_prjlvalue) { - // Shouldn't get here when using opaque pointers, so the new BitCastInst is fine - assert(Val->getContext().supportsTypedPointers()); - Val = new BitCastInst(Val, T_prjlvalue, "", InsertBefore); - } - #endif new StoreInst(Val, slotAddress, InsertBefore); } @@ -2407,18 +2362,7 @@ void LateLowerGCFrame::PlaceRootsAndUpdateCalls(SmallVectorImpl &Colors, St for (CallInst *II : ToDelete) { II->eraseFromParent(); } - #if JL_LLVM_VERSION >= 170000 assert(slotAddress->getType() == AI->getType()); - #else - if (slotAddress->getType() != AI->getType()) { - // If we're replacing an ArrayAlloca, the pointer element type may need to be fixed up - // Shouldn't get here when using opaque pointers, so the new BitCastInst is fine - assert(slotAddress->getContext().supportsTypedPointers()); - auto BCI = new BitCastInst(slotAddress, AI->getType()); - BCI->insertAfter(slotAddress); - slotAddress = BCI; - } - #endif AI->replaceAllUsesWith(slotAddress); AI->eraseFromParent(); AI = NULL; @@ -2443,15 +2387,7 @@ void LateLowerGCFrame::PlaceRootsAndUpdateCalls(SmallVectorImpl &Colors, St slotAddress->insertAfter(gcframe); auto ValExpr = std::make_pair(Base, isa(Base->getType()) ? -1 : i); auto Elem = MaybeExtractScalar(S, ValExpr, SI); - #if JL_LLVM_VERSION >= 170000 assert(Elem->getType() == T_prjlvalue); - #else - if (Elem->getType() != T_prjlvalue) { - // Shouldn't get here when using opaque pointers, so the new BitCastInst is fine - assert(Elem->getContext().supportsTypedPointers()); - Elem = new BitCastInst(Elem, T_prjlvalue, "", SI); - } - #endif //auto Idxs = ArrayRef(Tracked[i]); //Value *Elem = ExtractScalar(Base, true, Idxs, SI); Value *shadowStore = new StoreInst(Elem, slotAddress, SI); diff --git a/src/llvm-ptls.cpp b/src/llvm-ptls.cpp index 736c1acd9525a..488dd46cade21 100644 --- a/src/llvm-ptls.cpp +++ b/src/llvm-ptls.cpp @@ -191,7 +191,7 @@ void LowerPTLS::fix_pgcstack_use(CallInst *pgcstack, Function *pgcstack_getter, builder.SetInsertPoint(fastTerm->getParent()); fastTerm->removeFromParent(); MDNode *tbaa = tbaa_gcframe; - Value *prior = emit_gc_unsafe_enter(builder, T_size, get_current_ptls_from_task(builder, T_size, get_current_task_from_pgcstack(builder, T_size, pgcstack), tbaa), true); + Value *prior = emit_gc_unsafe_enter(builder, T_size, get_current_ptls_from_task(builder, get_current_task_from_pgcstack(builder, pgcstack), tbaa), true); builder.Insert(fastTerm); phi->addIncoming(pgcstack, fastTerm->getParent()); // emit pre-return cleanup @@ -203,7 +203,7 @@ void LowerPTLS::fix_pgcstack_use(CallInst *pgcstack, Function *pgcstack_getter, for (auto &BB : *pgcstack->getParent()->getParent()) { if (isa(BB.getTerminator())) { builder.SetInsertPoint(BB.getTerminator()); - emit_gc_unsafe_leave(builder, T_size, get_current_ptls_from_task(builder, T_size, get_current_task_from_pgcstack(builder, T_size, phi), tbaa), last_gc_state, true); + emit_gc_unsafe_leave(builder, T_size, get_current_ptls_from_task(builder, get_current_task_from_pgcstack(builder, phi), tbaa), last_gc_state, true); } } } diff --git a/test/llvmpasses/alloc-opt-gcframe.ll b/test/llvmpasses/alloc-opt-gcframe.ll index e8644899f0914..f53a4d5c01df7 100644 --- a/test/llvmpasses/alloc-opt-gcframe.ll +++ b/test/llvmpasses/alloc-opt-gcframe.ll @@ -10,7 +10,7 @@ target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128" ; CHECK-NOT: @julia.gc_alloc_obj ; OPAQUE: %current_task = getelementptr inbounds ptr, ptr %gcstack, i64 -12 -; OPAQUE: [[ptls_field:%.*]] = getelementptr inbounds ptr, ptr %current_task, i64 16 +; OPAQUE: [[ptls_field:%.*]] = getelementptr inbounds i8, ptr %current_task, ; OPAQUE-NEXT: [[ptls_load:%.*]] = load ptr, ptr [[ptls_field]], align 8, !tbaa !0 ; OPAQUE-NEXT: %v = call noalias nonnull align {{[0-9]+}} dereferenceable({{[0-9]+}}) ptr addrspace(10) @ijl_gc_small_alloc(ptr [[ptls_load]], i32 [[SIZE_T:[0-9]+]], i32 16, i64 {{.*}} @tag {{.*}}) ; OPAQUE: store atomic ptr addrspace(10) @tag, ptr addrspace(10) {{.*}} unordered, align 8, !tbaa !4 diff --git a/test/llvmpasses/late-lower-gc-addrspaces.ll b/test/llvmpasses/late-lower-gc-addrspaces.ll index 702e44b2b0e28..9c041664a9682 100644 --- a/test/llvmpasses/late-lower-gc-addrspaces.ll +++ b/test/llvmpasses/late-lower-gc-addrspaces.ll @@ -1,6 +1,6 @@ ; This file is a part of Julia. License is MIT: https://julialang.org/license -; RUN: opt --load-pass-plugin=libjulia-codegen%shlibext -passes='function(LateLowerGCFrame)' -S %s | FileCheck %s --check-prefixes=CHECK,OPAQUE +; RUN: opt --load-pass-plugin=libjulia-codegen%shlibext -passes='function(LateLowerGCFrame)' -S %s | FileCheck %s target triple = "amdgcn-amd-amdhsa" target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5-G1-ni:7-ni:10:11:12:13" @@ -19,28 +19,28 @@ define void @gc_frame_lowering(i64 %a, i64 %b) { top: ; CHECK-LABEL: @gc_frame_lowering -; OPAQUE: %gcframe = call ptr @julia.new_gc_frame(i32 2) -; OPAQUE: %pgcstack = call ptr @julia.get_pgcstack() +; CHECK: %gcframe = call ptr @julia.new_gc_frame(i32 2) +; CHECK: %pgcstack = call ptr @julia.get_pgcstack() %pgcstack = call {}*** @julia.get_pgcstack() -; OPAQUE-NEXT: call void @julia.push_gc_frame(ptr %gcframe, i32 2) -; OPAQUE-NEXT: call ptr addrspace(10) @jl_box_int64 +; CHECK-NEXT: call void @julia.push_gc_frame(ptr %gcframe, i32 2) +; CHECK-NEXT: call ptr addrspace(10) @jl_box_int64 %aboxed = call {} addrspace(10)* @jl_box_int64(i64 signext %a) -; OPAQUE: [[GEP0:%.*]] = call ptr @julia.get_gc_frame_slot(ptr %gcframe, i32 [[GEPSLOT0:[0-9]+]]) -; OPAQUE-NEXT: store ptr addrspace(10) %aboxed, ptr [[GEP0]] +; CHECK: [[GEP0:%.*]] = call ptr @julia.get_gc_frame_slot(ptr %gcframe, i32 [[GEPSLOT0:[0-9]+]]) +; CHECK-NEXT: store ptr addrspace(10) %aboxed, ptr [[GEP0]] %bboxed = call {} addrspace(10)* @jl_box_int64(i64 signext %b) ; CHECK-NEXT: %bboxed = ; Make sure the same gc slot isn't re-used -; OPAQUE-NOT: call ptr @julia.get_gc_frame_slot(ptr %gcframe, i32 [[GEPSLOT0]]) -; OPAQUE: [[GEP1:%.*]] = call ptr @julia.get_gc_frame_slot(ptr %gcframe, i32 [[GEPSLOT1:[0-9]+]]) -; OPAQUE-NEXT: store ptr addrspace(10) %bboxed, ptr [[GEP1]] +; CHECK-NOT: call ptr @julia.get_gc_frame_slot(ptr %gcframe, i32 [[GEPSLOT0]]) +; CHECK: [[GEP1:%.*]] = call ptr @julia.get_gc_frame_slot(ptr %gcframe, i32 [[GEPSLOT1:[0-9]+]]) +; CHECK-NEXT: store ptr addrspace(10) %bboxed, ptr [[GEP1]] ; CHECK-NEXT: call void @boxed_simple call void @boxed_simple({} addrspace(10)* %aboxed, {} addrspace(10)* %bboxed) -; OPAQUE-NEXT: call void @julia.pop_gc_frame(ptr %gcframe) +; CHECK-NEXT: call void @julia.pop_gc_frame(ptr %gcframe) ret void } @@ -51,14 +51,14 @@ top: %0 = bitcast {}*** %pgcstack to {}** %current_task = getelementptr inbounds {}*, {}** %0, i64 -12 -; OPAQUE: %current_task = getelementptr inbounds ptr, ptr %0, i64 -12 -; OPAQUE-NEXT: [[ptls_field:%.*]] = getelementptr inbounds ptr, ptr %current_task, i64 16 -; OPAQUE-NEXT: [[ptls_load:%.*]] = load ptr, ptr [[ptls_field]], align 8, !tbaa !0 -; OPAQUE-NEXT: %v = call noalias nonnull ptr addrspace(10) @julia.gc_alloc_bytes(ptr [[ptls_load]], [[SIZE_T:i.[0-9]+]] 8, i64 {{.*}} @tag {{.*}}) -; OPAQUE-NEXT: [[V_HEADROOM:%.*]] = getelementptr inbounds ptr addrspace(10), ptr addrspace(10) %v, i64 -1 -; OPAQUE-NEXT: store atomic ptr addrspace(10) @tag, ptr addrspace(10) [[V_HEADROOM]] unordered, align 8, !tbaa !4 +; CHECK: %current_task = getelementptr inbounds ptr, ptr %0, i64 -12 +; CHECK-NEXT: [[ptls_field:%.*]] = getelementptr inbounds i8, ptr %current_task, +; CHECK-NEXT: [[ptls_load:%.*]] = load ptr, ptr [[ptls_field]], align 8, !tbaa !0 +; CHECK-NEXT: %v = call noalias nonnull ptr addrspace(10) @julia.gc_alloc_bytes(ptr [[ptls_load]], [[SIZE_T:i.[0-9]+]] 8, i64 {{.*}} @tag {{.*}}) +; CHECK-NEXT: [[V_HEADROOM:%.*]] = getelementptr inbounds ptr addrspace(10), ptr addrspace(10) %v, i64 -1 +; CHECK-NEXT: store atomic ptr addrspace(10) @tag, ptr addrspace(10) [[V_HEADROOM]] unordered, align 8, !tbaa !4 %v = call noalias {} addrspace(10)* @julia.gc_alloc_obj({}** %current_task, i64 8, {} addrspace(10)* @tag) -; OPAQUE-NEXT: ret ptr addrspace(10) %v +; CHECK-NEXT: ret ptr addrspace(10) %v ret {} addrspace(10)* %v } @@ -74,20 +74,20 @@ top: %0 = bitcast {}*** %pgcstack to {}** %current_task = getelementptr inbounds {}*, {}** %0, i64 -12 -; OPAQUE: %current_task = getelementptr inbounds ptr, ptr %0, i64 -12 -; OPAQUE-NEXT: [[ptls_field:%.*]] = getelementptr inbounds ptr, ptr %current_task, i64 16 -; OPAQUE-NEXT: [[ptls_load:%.*]] = load ptr, ptr [[ptls_field]], align 8, !tbaa !0 -; OPAQUE-NEXT: %v = call noalias nonnull ptr addrspace(10) @julia.gc_alloc_bytes(ptr [[ptls_load]], [[SIZE_T:i.[0-9]+]] 8, i64 {{.*}} @tag {{.*}}) -; OPAQUE-NEXT: [[V_HEADROOM:%.*]] = getelementptr inbounds ptr addrspace(10), ptr addrspace(10) %v, i64 -1 -; OPAQUE-NEXT: store atomic ptr addrspace(10) @tag, ptr addrspace(10) [[V_HEADROOM]] unordered, align 8, !tbaa !4 +; CHECK: %current_task = getelementptr inbounds ptr, ptr %0, i64 -12 +; CHECK-NEXT: [[ptls_field:%.*]] = getelementptr inbounds i8, ptr %current_task, +; CHECK-NEXT: [[ptls_load:%.*]] = load ptr, ptr [[ptls_field]], align 8, !tbaa !0 +; CHECK-NEXT: %v = call noalias nonnull ptr addrspace(10) @julia.gc_alloc_bytes(ptr [[ptls_load]], [[SIZE_T:i.[0-9]+]] 8, i64 {{.*}} @tag {{.*}}) +; CHECK-NEXT: [[V_HEADROOM:%.*]] = getelementptr inbounds ptr addrspace(10), ptr addrspace(10) %v, i64 -1 +; CHECK-NEXT: store atomic ptr addrspace(10) @tag, ptr addrspace(10) [[V_HEADROOM]] unordered, align 8, !tbaa !4 %v = call noalias {} addrspace(10)* @julia.gc_alloc_obj({}** %current_task, i64 8, {} addrspace(10)* @tag) -; OPAQUE-NEXT: %v64 = bitcast ptr addrspace(10) %v to ptr addrspace(10) +; CHECK-NEXT: %v64 = bitcast ptr addrspace(10) %v to ptr addrspace(10) %v64 = bitcast {} addrspace(10)* %v to i64 addrspace(10)* -; OPAQUE-NEXT: %loadedval = load i64, ptr addrspace(10) %v64, align 8, !range !7 +; CHECK-NEXT: %loadedval = load i64, ptr addrspace(10) %v64, align 8, !range !7 %loadedval = load i64, i64 addrspace(10)* %v64, align 8, !range !0, !invariant.load !1 -; OPAQUE-NEXT: store i64 %loadedval, ptr addrspace(10) %v64, align 8, !noalias !8 +; CHECK-NEXT: store i64 %loadedval, ptr addrspace(10) %v64, align 8, !noalias !8 store i64 %loadedval, i64 addrspace(10)* %v64, align 8, !noalias !2 -; OPAQUE-NEXT: %lv2 = load i64, ptr addrspace(10) %v64, align 8, !tbaa !11, !range !7 +; CHECK-NEXT: %lv2 = load i64, ptr addrspace(10) %v64, align 8, !tbaa !11, !range !7 %lv2 = load i64, i64 addrspace(10)* %v64, align 8, !range !0, !tbaa !4 ; CHECK-NEXT: ret void ret void diff --git a/test/llvmpasses/late-lower-gc.ll b/test/llvmpasses/late-lower-gc.ll index 093cab1358141..d294847db8f9d 100644 --- a/test/llvmpasses/late-lower-gc.ll +++ b/test/llvmpasses/late-lower-gc.ll @@ -1,6 +1,6 @@ ; This file is a part of Julia. License is MIT: https://julialang.org/license -; RUN: opt --load-pass-plugin=libjulia-codegen%shlibext -passes='function(LateLowerGCFrame)' -S %s | FileCheck %s --check-prefixes=CHECK,OPAQUE +; RUN: opt --load-pass-plugin=libjulia-codegen%shlibext -passes='function(LateLowerGCFrame)' -S %s | FileCheck %s @tag = external addrspace(10) global {}, align 16 @@ -16,28 +16,28 @@ define void @gc_frame_lowering(i64 %a, i64 %b) { top: ; CHECK-LABEL: @gc_frame_lowering -; OPAQUE: %gcframe = call ptr @julia.new_gc_frame(i32 2) -; OPAQUE: %pgcstack = call ptr @julia.get_pgcstack() +; CHECK: %gcframe = call ptr @julia.new_gc_frame(i32 2) +; CHECK: %pgcstack = call ptr @julia.get_pgcstack() %pgcstack = call {}*** @julia.get_pgcstack() -; OPAQUE-NEXT: call void @julia.push_gc_frame(ptr %gcframe, i32 2) -; OPAQUE-NEXT: call ptr addrspace(10) @jl_box_int64 +; CHECK-NEXT: call void @julia.push_gc_frame(ptr %gcframe, i32 2) +; CHECK-NEXT: call ptr addrspace(10) @jl_box_int64 %aboxed = call {} addrspace(10)* @jl_box_int64(i64 signext %a) -; OPAQUE: [[GEP0:%.*]] = call ptr @julia.get_gc_frame_slot(ptr %gcframe, i32 [[GEPSLOT0:[0-9]+]]) -; OPAQUE-NEXT: store ptr addrspace(10) %aboxed, ptr [[GEP0]] +; CHECK: [[GEP0:%.*]] = call ptr @julia.get_gc_frame_slot(ptr %gcframe, i32 [[GEPSLOT0:[0-9]+]]) +; CHECK-NEXT: store ptr addrspace(10) %aboxed, ptr [[GEP0]] %bboxed = call {} addrspace(10)* @jl_box_int64(i64 signext %b) ; CHECK-NEXT: %bboxed = ; Make sure the same gc slot isn't re-used -; OPAQUE-NOT: call ptr @julia.get_gc_frame_slot(ptr %gcframe, i32 [[GEPSLOT0]]) -; OPAQUE: [[GEP1:%.*]] = call ptr @julia.get_gc_frame_slot(ptr %gcframe, i32 [[GEPSLOT1:[0-9]+]]) -; OPAQUE-NEXT: store ptr addrspace(10) %bboxed, ptr [[GEP1]] +; CHECK-NOT: call ptr @julia.get_gc_frame_slot(ptr %gcframe, i32 [[GEPSLOT0]]) +; CHECK: [[GEP1:%.*]] = call ptr @julia.get_gc_frame_slot(ptr %gcframe, i32 [[GEPSLOT1:[0-9]+]]) +; CHECK-NEXT: store ptr addrspace(10) %bboxed, ptr [[GEP1]] ; CHECK-NEXT: call void @boxed_simple call void @boxed_simple({} addrspace(10)* %aboxed, {} addrspace(10)* %bboxed) -; OPAQUE-NEXT: call void @julia.pop_gc_frame(ptr %gcframe) +; CHECK-NEXT: call void @julia.pop_gc_frame(ptr %gcframe) ret void } @@ -48,14 +48,14 @@ top: %0 = bitcast {}*** %pgcstack to {}** %current_task = getelementptr inbounds {}*, {}** %0, i64 -12 -; OPAQUE: %current_task = getelementptr inbounds ptr, ptr %0, i64 -12 -; OPAQUE-NEXT: [[ptls_field:%.*]] = getelementptr inbounds ptr, ptr %current_task, i64 16 -; OPAQUE-NEXT: [[ptls_load:%.*]] = load ptr, ptr [[ptls_field]], align 8, !tbaa !0 -; OPAQUE-NEXT: %v = call noalias nonnull ptr addrspace(10) @julia.gc_alloc_bytes(ptr [[ptls_load]], [[SIZE_T:i.[0-9]+]] 8, i64 {{.*}} @tag {{.*}}) -; OPAQUE-NEXT: [[V_HEADROOM:%.*]] = getelementptr inbounds ptr addrspace(10), ptr addrspace(10) %v, i64 -1 -; OPAQUE-NEXT: store atomic ptr addrspace(10) @tag, ptr addrspace(10) [[V_HEADROOM]] unordered, align 8, !tbaa !4 +; CHECK: %current_task = getelementptr inbounds ptr, ptr %0, i64 -12 +; CHECK-NEXT: [[ptls_field:%.*]] = getelementptr inbounds i8, ptr %current_task, +; CHECK-NEXT: [[ptls_load:%.*]] = load ptr, ptr [[ptls_field]], align 8, !tbaa !0 +; CHECK-NEXT: %v = call noalias nonnull ptr addrspace(10) @julia.gc_alloc_bytes(ptr [[ptls_load]], [[SIZE_T:i.[0-9]+]] 8, i64 {{.*}} @tag {{.*}}) +; CHECK-NEXT: [[V_HEADROOM:%.*]] = getelementptr inbounds ptr addrspace(10), ptr addrspace(10) %v, i64 -1 +; CHECK-NEXT: store atomic ptr addrspace(10) @tag, ptr addrspace(10) [[V_HEADROOM]] unordered, align 8, !tbaa !4 %v = call noalias {} addrspace(10)* @julia.gc_alloc_obj({}** %current_task, i64 8, {} addrspace(10)* @tag) -; OPAQUE-NEXT: ret ptr addrspace(10) %v +; CHECK-NEXT: ret ptr addrspace(10) %v ret {} addrspace(10)* %v } @@ -71,20 +71,20 @@ top: %0 = bitcast {}*** %pgcstack to {}** %current_task = getelementptr inbounds {}*, {}** %0, i64 -12 -; OPAQUE: %current_task = getelementptr inbounds ptr, ptr %0, i64 -12 -; OPAQUE-NEXT: [[ptls_field:%.*]] = getelementptr inbounds ptr, ptr %current_task, i64 16 -; OPAQUE-NEXT: [[ptls_load:%.*]] = load ptr, ptr [[ptls_field]], align 8, !tbaa !0 -; OPAQUE-NEXT: %v = call noalias nonnull ptr addrspace(10) @julia.gc_alloc_bytes(ptr [[ptls_load]], [[SIZE_T:i.[0-9]+]] 8, i64 {{.*}} @tag {{.*}}) -; OPAQUE-NEXT: [[V_HEADROOM:%.*]] = getelementptr inbounds ptr addrspace(10), ptr addrspace(10) %v, i64 -1 -; OPAQUE-NEXT: store atomic ptr addrspace(10) @tag, ptr addrspace(10) [[V_HEADROOM]] unordered, align 8, !tbaa !4 +; CHECK: %current_task = getelementptr inbounds ptr, ptr %0, i64 -12 +; CHECK-NEXT: [[ptls_field:%.*]] = getelementptr inbounds i8, ptr %current_task, +; CHECK-NEXT: [[ptls_load:%.*]] = load ptr, ptr [[ptls_field]], align 8, !tbaa !0 +; CHECK-NEXT: %v = call noalias nonnull ptr addrspace(10) @julia.gc_alloc_bytes(ptr [[ptls_load]], [[SIZE_T:i.[0-9]+]] 8, i64 {{.*}} @tag {{.*}}) +; CHECK-NEXT: [[V_HEADROOM:%.*]] = getelementptr inbounds ptr addrspace(10), ptr addrspace(10) %v, i64 -1 +; CHECK-NEXT: store atomic ptr addrspace(10) @tag, ptr addrspace(10) [[V_HEADROOM]] unordered, align 8, !tbaa !4 %v = call noalias {} addrspace(10)* @julia.gc_alloc_obj({}** %current_task, i64 8, {} addrspace(10)* @tag) -; OPAQUE-NEXT: %v64 = bitcast ptr addrspace(10) %v to ptr addrspace(10) +; CHECK-NEXT: %v64 = bitcast ptr addrspace(10) %v to ptr addrspace(10) %v64 = bitcast {} addrspace(10)* %v to i64 addrspace(10)* -; OPAQUE-NEXT: %loadedval = load i64, ptr addrspace(10) %v64, align 8, !range !7 +; CHECK-NEXT: %loadedval = load i64, ptr addrspace(10) %v64, align 8, !range !7 %loadedval = load i64, i64 addrspace(10)* %v64, align 8, !range !0, !invariant.load !1 -; OPAQUE-NEXT: store i64 %loadedval, ptr addrspace(10) %v64, align 8, !noalias !8 +; CHECK-NEXT: store i64 %loadedval, ptr addrspace(10) %v64, align 8, !noalias !8 store i64 %loadedval, i64 addrspace(10)* %v64, align 8, !noalias !2 -; OPAQUE-NEXT: %lv2 = load i64, ptr addrspace(10) %v64, align 8, !tbaa !11, !range !7 +; CHECK-NEXT: %lv2 = load i64, ptr addrspace(10) %v64, align 8, !tbaa !11, !range !7 %lv2 = load i64, i64 addrspace(10)* %v64, align 8, !range !0, !tbaa !4 ; CHECK-NEXT: ret void ret void @@ -162,13 +162,13 @@ define void @decayar([2 x {} addrspace(10)* addrspace(11)*] %ar) { ; CHECK-LABEL: @decayar -; OPAQUE: %gcframe = call ptr @julia.new_gc_frame(i32 2) -; OPAQUE: [[gc_slot_addr_:%.*]]1 = call ptr @julia.get_gc_frame_slot(ptr %gcframe, i32 1) -; OPAQUE: store ptr addrspace(10) %l0, ptr [[gc_slot_addr_:%.*]], align 8 -; OPAQUE: [[gc_slot_addr_:%.*]] = call ptr @julia.get_gc_frame_slot(ptr %gcframe, i32 0) -; OPAQUE: store ptr addrspace(10) %l1, ptr [[gc_slot_addr_:%.*]], align 8 -; OPAQUE: %r = call i32 @callee_root(ptr addrspace(10) %l0, ptr addrspace(10) %l1) -; OPAQUE: call void @julia.pop_gc_frame(ptr %gcframe) +; CHECK: %gcframe = call ptr @julia.new_gc_frame(i32 2) +; CHECK: [[gc_slot_addr_:%.*]]1 = call ptr @julia.get_gc_frame_slot(ptr %gcframe, i32 1) +; CHECK: store ptr addrspace(10) %l0, ptr [[gc_slot_addr_:%.*]], align 8 +; CHECK: [[gc_slot_addr_:%.*]] = call ptr @julia.get_gc_frame_slot(ptr %gcframe, i32 0) +; CHECK: store ptr addrspace(10) %l1, ptr [[gc_slot_addr_:%.*]], align 8 +; CHECK: %r = call i32 @callee_root(ptr addrspace(10) %l0, ptr addrspace(10) %l1) +; CHECK: call void @julia.pop_gc_frame(ptr %gcframe) !0 = !{i64 0, i64 23} !1 = !{!1} diff --git a/test/llvmpasses/names.jl b/test/llvmpasses/names.jl index fe692d0fab787..1ab2204044804 100644 --- a/test/llvmpasses/names.jl +++ b/test/llvmpasses/names.jl @@ -135,7 +135,8 @@ emit(f2, Float64, Float64, Float64, Float64, Float64, Float64, Float64) # CHECK: define {{(swiftcc )?}}nonnull ptr @julia_f5 # CHECK-SAME: %"a::A" -# CHECK: %"a::A.b_ptr.c_ptr.d +# CHECK: %"a::A.d +# COM: this text check relies on our LLVM code emission being relatively poor, which is not always the case emit(f5, A) # CHECK: define {{(swiftcc )?}}nonnull ptr @julia_f6 From bcf41ba0cdd2aa8c1f21c8aa08b52b017ffbd014 Mon Sep 17 00:00:00 2001 From: Shuhei Kadowaki <40514306+aviatesk@users.noreply.github.com> Date: Wed, 11 Sep 2024 02:04:35 +0900 Subject: [PATCH 006/537] minor fixup for JuliaLang/julia#55705 (#55726) --- base/compiler/abstractinterpretation.jl | 3 ++- test/compiler/inference.jl | 4 ++++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/base/compiler/abstractinterpretation.jl b/base/compiler/abstractinterpretation.jl index 83a39ce10d891..bb5f2dd1ad180 100644 --- a/base/compiler/abstractinterpretation.jl +++ b/base/compiler/abstractinterpretation.jl @@ -2232,7 +2232,8 @@ function abstract_throw_methoderror(interp::AbstractInterpreter, argtypes::Vecto elseif !isvarargtype(argtypes[2]) MethodError else - tmerge(𝕃ᵢ, MethodError, ArgumentError) + ⊔ = join(typeinf_lattice(interp)) + MethodError ⊔ ArgumentError end return CallMeta(Union{}, exct, EFFECTS_THROWS, NoCallInfo()) end diff --git a/test/compiler/inference.jl b/test/compiler/inference.jl index f15df49d75745..9454c53a09fb7 100644 --- a/test/compiler/inference.jl +++ b/test/compiler/inference.jl @@ -6137,3 +6137,7 @@ end == TypeError @test Base.infer_exception_type((Char,)) do x invoke(f_invoke_exct, Tuple{Number}, x) end == TypeError + +@test Base.infer_exception_type((Vector{Any},)) do args + Core.throw_methoderror(args...) +end == Union{MethodError,ArgumentError} From a7c9235afe27fd34e31fcce387ade7f508d45003 Mon Sep 17 00:00:00 2001 From: Jameson Nash Date: Tue, 10 Sep 2024 14:43:33 -0400 Subject: [PATCH 007/537] [REPL] prevent silent hang if precompile script async blocks fail (#55685) --- stdlib/REPL/src/Terminals.jl | 2 ++ stdlib/REPL/src/precompile.jl | 12 ++++++------ 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/stdlib/REPL/src/Terminals.jl b/stdlib/REPL/src/Terminals.jl index 4f3e99f1d206c..0cf6888d248e8 100644 --- a/stdlib/REPL/src/Terminals.jl +++ b/stdlib/REPL/src/Terminals.jl @@ -97,6 +97,7 @@ abstract type UnixTerminal <: TextTerminal end pipe_reader(t::UnixTerminal) = t.in_stream::IO pipe_writer(t::UnixTerminal) = t.out_stream::IO +@nospecialize mutable struct TerminalBuffer <: UnixTerminal out_stream::IO end @@ -107,6 +108,7 @@ mutable struct TTYTerminal <: UnixTerminal out_stream::IO err_stream::IO end +@specialize const CSI = "\x1b[" diff --git a/stdlib/REPL/src/precompile.jl b/stdlib/REPL/src/precompile.jl index a6effb9f013fc..82a1a0bb78ee8 100644 --- a/stdlib/REPL/src/precompile.jl +++ b/stdlib/REPL/src/precompile.jl @@ -96,7 +96,7 @@ let repltask = @task try Base.run_std_repl(REPL, false, :yes, true) finally - redirect_stderr(isopen(orig_stderr) ? orig_stderr : devnull) + redirect_stdin(isopen(orig_stdin) ? orig_stdin : devnull) redirect_stdout(isopen(orig_stdout) ? orig_stdout : devnull) close(pts) end @@ -106,14 +106,14 @@ let redirect_stdin(pts) redirect_stdout(pts) redirect_stderr(pts) - REPL.print_qualified_access_warning(Base.Iterators, Base, :minimum) # trigger the warning while stderr is suppressed try - schedule(repltask) - # wait for the definitive prompt before start writing to the TTY - readuntil(output_copy, JULIA_PROMPT) + REPL.print_qualified_access_warning(Base.Iterators, Base, :minimum) # trigger the warning while stderr is suppressed finally redirect_stderr(isopen(orig_stderr) ? orig_stderr : devnull) end + schedule(repltask) + # wait for the definitive prompt before start writing to the TTY + readuntil(output_copy, JULIA_PROMPT) write(debug_output, "\n#### REPL STARTED ####\n") sleep(0.1) readavailable(output_copy) @@ -148,9 +148,9 @@ let write(ptm, "$CTRL_D") wait(repltask) finally - close(pts) redirect_stdin(isopen(orig_stdin) ? orig_stdin : devnull) redirect_stdout(isopen(orig_stdout) ? orig_stdout : devnull) + close(pts) end wait(tee) end From 56451d8eb18f6a1f9339eeb656033b417ec615b6 Mon Sep 17 00:00:00 2001 From: Jakob Nybo Nissen Date: Tue, 10 Sep 2024 20:45:38 +0200 Subject: [PATCH 008/537] Various fixes to byte / bytearray search (#54579) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This was originally intended as a targeted fix to #54578, but I ran into a bunch of smaller issues with this code that also needed to be solved and it turned out to be difficult to fix them with small, trivial PRs. I would also like to refactor this whole file, but I want these correctness fixes to be merged first, because a larger refactoring has higher risk of getting stuck without getting reviewed and merged. ## Larger things that needs decisions * The internal union `Base.ByteArray` has been deleted. Instead, the unions `DenseInt8` and `DenseUInt8` have been added. These more comprehensively cover the types that was meant, e.g. `Memory{UInt8}` was incorrectly not covered by the former. As stated in the TODO, the concept of a "memory backed dense byte array" is needed throughout Julia, so this ideally needs to be implemented as a single type and used throughout Base. The fix here is a decent temporary solution. See #53178 #54581 * The `findall` docstring between two arrays was incorrectly not attached to the method - now it is. **Note that this change _changes_ the documentation** since it includes a docstring that was previously missed. Hence, it's an API addition. * Added a new minimal `testhelpers/OffsetDenseArrays.jl` which provide a `DenseVector` with offset axes for testing purposes. ## Trivial fixes * `findfirst(==(Int8(-1)), [0xff])` and similar findlast, findnext and findprev is no longer buggy, see #54578 * `findfirst([0x0ff], Int8[-1])` is similarly no longer buggy, see #54578 * `findnext(==('\xa6'), "æ", 1)` and `findprev(==('\xa6'), "æa", 2)` no longer incorrectly throws an error * The byte-oriented find* functions now work correctly with offset arrays * Fixed incorrect use of `GC.@preserve`, where the pointer was taken before the preserve block. * More of the optimised string methods now also apply to `SubString{String}` Closes #54578 Co-authored-by: Martin Holters --- base/char.jl | 1 + base/strings/search.jl | 136 +++++++++++++++++--------- test/strings/search.jl | 49 ++++++++++ test/testhelpers/OffsetDenseArrays.jl | 31 ++++++ 4 files changed, 170 insertions(+), 47 deletions(-) create mode 100644 test/testhelpers/OffsetDenseArrays.jl diff --git a/base/char.jl b/base/char.jl index bc68a672ce0ca..2e8410f6903e2 100644 --- a/base/char.jl +++ b/base/char.jl @@ -223,6 +223,7 @@ hash(x::Char, h::UInt) = hash_uint64(((bitcast(UInt32, x) + UInt64(0xd4d64234)) << 32) ⊻ UInt64(h)) first_utf8_byte(c::Char) = (bitcast(UInt32, c) >> 24) % UInt8 +first_utf8_byte(c::AbstractChar) = first_utf8_byte(Char(c)::Char) # fallbacks: isless(x::AbstractChar, y::AbstractChar) = isless(Char(x), Char(y)) diff --git a/base/strings/search.jl b/base/strings/search.jl index b9c14f06e0898..9bd69ae2f8a03 100644 --- a/base/strings/search.jl +++ b/base/strings/search.jl @@ -10,7 +10,29 @@ match strings with [`match`](@ref). """ abstract type AbstractPattern end -nothing_sentinel(i) = i == 0 ? nothing : i +# TODO: These unions represent bytes in memory that can be accessed via a pointer. +# this property is used throughout Julia, e.g. also in IO code. +# This deserves a better solution - see #53178. +# If such a better solution comes in place, these unions should be replaced. +const DenseInt8 = Union{ + DenseArray{Int8}, + FastContiguousSubArray{Int8,N,<:DenseArray} where N +} + +# Note: This union is different from that above in that it includes CodeUnits. +# Currently, this is redundant as CodeUnits <: DenseVector, but this subtyping +# is buggy and may be removed in the future, see #54002 +const DenseUInt8 = Union{ + DenseArray{UInt8}, + FastContiguousSubArray{UInt8,N,<:DenseArray} where N, + CodeUnits{UInt8, <:Union{String, SubString{String}}}, + FastContiguousSubArray{UInt8,N,<:CodeUnits{UInt8, <:Union{String, SubString{String}}}} where N, +} + +const DenseUInt8OrInt8 = Union{DenseUInt8, DenseInt8} + +last_byteindex(x::Union{String, SubString{String}}) = ncodeunits(x) +last_byteindex(x::DenseUInt8OrInt8) = lastindex(x) function last_utf8_byte(c::Char) u = reinterpret(UInt32, c) @@ -30,11 +52,11 @@ function findnext(pred::Fix2{<:Union{typeof(isequal),typeof(==)},<:AbstractChar} end @inbounds isvalid(s, i) || string_index_err(s, i) c = pred.x - c ≤ '\x7f' && return nothing_sentinel(_search(s, c % UInt8, i)) + c ≤ '\x7f' && return _search(s, first_utf8_byte(c), i) while true i = _search(s, first_utf8_byte(c), i) - i == 0 && return nothing - pred(s[i]) && return i + i === nothing && return nothing + isvalid(s, i) && pred(s[i]) && return i i = nextind(s, i) end end @@ -47,31 +69,41 @@ const DenseBytes = Union{ CodeUnits{UInt8, <:Union{String, SubString{String}}}, } -const ByteArray = Union{DenseBytes, DenseArrayType{Int8}} +function findfirst(pred::Fix2{<:Union{typeof(isequal),typeof(==)},<:Union{UInt8, Int8}}, a::Union{DenseInt8, DenseUInt8}) + findnext(pred, a, firstindex(a)) +end -findfirst(pred::Fix2{<:Union{typeof(isequal),typeof(==)},<:Union{Int8,UInt8}}, a::ByteArray) = - nothing_sentinel(_search(a, pred.x)) +function findnext(pred::Fix2{<:Union{typeof(isequal),typeof(==)},UInt8}, a::DenseUInt8, i::Integer) + _search(a, pred.x, i) +end -findnext(pred::Fix2{<:Union{typeof(isequal),typeof(==)},<:Union{Int8,UInt8}}, a::ByteArray, i::Integer) = - nothing_sentinel(_search(a, pred.x, i)) +function findnext(pred::Fix2{<:Union{typeof(isequal),typeof(==)},Int8}, a::DenseInt8, i::Integer) + _search(a, pred.x, i) +end -findfirst(::typeof(iszero), a::ByteArray) = nothing_sentinel(_search(a, zero(UInt8))) -findnext(::typeof(iszero), a::ByteArray, i::Integer) = nothing_sentinel(_search(a, zero(UInt8), i)) +# iszero is special, in that the bitpattern for zero for Int8 and UInt8 is the same, +# so we can use memchr even if we search for an Int8 in an UInt8 array or vice versa +findfirst(::typeof(iszero), a::DenseUInt8OrInt8) = _search(a, zero(UInt8)) +findnext(::typeof(iszero), a::DenseUInt8OrInt8, i::Integer) = _search(a, zero(UInt8), i) -function _search(a::Union{String,SubString{String},<:ByteArray}, b::Union{Int8,UInt8}, i::Integer = 1) - if i < 1 +function _search(a::Union{String,SubString{String},DenseUInt8OrInt8}, b::Union{Int8,UInt8}, i::Integer = firstindex(a)) + fst = firstindex(a) + lst = last_byteindex(a) + if i < fst throw(BoundsError(a, i)) end - n = sizeof(a) - if i > n - return i == n+1 ? 0 : throw(BoundsError(a, i)) + n_bytes = lst - i + 1 + if i > lst + return i == lst+1 ? nothing : throw(BoundsError(a, i)) end - p = pointer(a) - q = GC.@preserve a ccall(:memchr, Ptr{UInt8}, (Ptr{UInt8}, Int32, Csize_t), p+i-1, b, n-i+1) - return q == C_NULL ? 0 : Int(q-p+1) + GC.@preserve a begin + p = pointer(a) + q = ccall(:memchr, Ptr{UInt8}, (Ptr{UInt8}, Int32, Csize_t), p+i-fst, b, n_bytes) + end + return q == C_NULL ? nothing : (q-p+fst) % Int end -function _search(a::ByteArray, b::AbstractChar, i::Integer = 1) +function _search(a::DenseUInt8, b::AbstractChar, i::Integer = firstindex(a)) if isascii(b) _search(a,UInt8(b),i) else @@ -80,41 +112,51 @@ function _search(a::ByteArray, b::AbstractChar, i::Integer = 1) end function findprev(pred::Fix2{<:Union{typeof(isequal),typeof(==)},<:AbstractChar}, - s::String, i::Integer) + s::Union{String, SubString{String}}, i::Integer) c = pred.x - c ≤ '\x7f' && return nothing_sentinel(_rsearch(s, c % UInt8, i)) + c ≤ '\x7f' && return _rsearch(s, first_utf8_byte(c), i) b = first_utf8_byte(c) while true i = _rsearch(s, b, i) - i == 0 && return nothing - pred(s[i]) && return i + i == nothing && return nothing + isvalid(s, i) && pred(s[i]) && return i i = prevind(s, i) end end -findlast(pred::Fix2{<:Union{typeof(isequal),typeof(==)},<:Union{Int8,UInt8}}, a::ByteArray) = - nothing_sentinel(_rsearch(a, pred.x)) +function findlast(pred::Fix2{<:Union{typeof(isequal),typeof(==)},<:Union{Int8,UInt8}}, a::DenseUInt8OrInt8) + findprev(pred, a, lastindex(a)) +end -findprev(pred::Fix2{<:Union{typeof(isequal),typeof(==)},<:Union{Int8,UInt8}}, a::ByteArray, i::Integer) = - nothing_sentinel(_rsearch(a, pred.x, i)) +function findprev(pred::Fix2{<:Union{typeof(isequal),typeof(==)},Int8}, a::DenseInt8, i::Integer) + _rsearch(a, pred.x, i) +end -findlast(::typeof(iszero), a::ByteArray) = nothing_sentinel(_rsearch(a, zero(UInt8))) -findprev(::typeof(iszero), a::ByteArray, i::Integer) = nothing_sentinel(_rsearch(a, zero(UInt8), i)) +function findprev(pred::Fix2{<:Union{typeof(isequal),typeof(==)},UInt8}, a::DenseUInt8, i::Integer) + _rsearch(a, pred.x, i) +end -function _rsearch(a::Union{String,ByteArray}, b::Union{Int8,UInt8}, i::Integer = sizeof(a)) - if i < 1 - return i == 0 ? 0 : throw(BoundsError(a, i)) +# See comments above for findfirst(::typeof(iszero)) methods +findlast(::typeof(iszero), a::DenseUInt8OrInt8) = _rsearch(a, zero(UInt8)) +findprev(::typeof(iszero), a::DenseUInt8OrInt8, i::Integer) = _rsearch(a, zero(UInt8), i) + +function _rsearch(a::Union{String,SubString{String},DenseUInt8OrInt8}, b::Union{Int8,UInt8}, i::Integer = last_byteindex(a)) + fst = firstindex(a) + lst = last_byteindex(a) + if i < fst + return i == fst - 1 ? nothing : throw(BoundsError(a, i)) + end + if i > lst + return i == lst+1 ? nothing : throw(BoundsError(a, i)) end - n = sizeof(a) - if i > n - return i == n+1 ? 0 : throw(BoundsError(a, i)) + GC.@preserve a begin + p = pointer(a) + q = ccall(:memrchr, Ptr{UInt8}, (Ptr{UInt8}, Int32, Csize_t), p, b, i-fst+1) end - p = pointer(a) - q = GC.@preserve a ccall(:memrchr, Ptr{UInt8}, (Ptr{UInt8}, Int32, Csize_t), p, b, i) - return q == C_NULL ? 0 : Int(q-p+1) + return q == C_NULL ? nothing : (q-p+fst) % Int end -function _rsearch(a::ByteArray, b::AbstractChar, i::Integer = length(a)) +function _rsearch(a::DenseUInt8, b::AbstractChar, i::Integer = length(a)) if isascii(b) _rsearch(a,UInt8(b),i) else @@ -224,18 +266,19 @@ end in(c::AbstractChar, s::AbstractString) = (findfirst(isequal(c),s)!==nothing) -function _searchindex(s::Union{AbstractString,ByteArray}, +function _searchindex(s::Union{AbstractString,DenseUInt8OrInt8}, t::Union{AbstractString,AbstractChar,Int8,UInt8}, i::Integer) + sentinel = firstindex(s) - 1 x = Iterators.peel(t) if isnothing(x) - return 1 <= i <= nextind(s,lastindex(s))::Int ? i : + return firstindex(s) <= i <= nextind(s,lastindex(s))::Int ? i : throw(BoundsError(s, i)) end t1, trest = x while true i = findnext(isequal(t1),s,i) - if i === nothing return 0 end + if i === nothing return sentinel end ii = nextind(s, i)::Int a = Iterators.Stateful(trest) matched = all(splat(==), zip(SubString(s, ii), a)) @@ -509,9 +552,8 @@ julia> findall(UInt8[1,2], UInt8[1,2,3,1,2]) !!! compat "Julia 1.3" This method requires at least Julia 1.3. """ - -function findall(t::Union{AbstractString, AbstractPattern, AbstractVector{<:Union{Int8,UInt8}}}, - s::Union{AbstractString, AbstractPattern, AbstractVector{<:Union{Int8,UInt8}}}, +function findall(t::Union{AbstractString, AbstractPattern, AbstractVector{UInt8}}, + s::Union{AbstractString, AbstractPattern, AbstractVector{UInt8}}, ; overlap::Bool=false) found = UnitRange{Int}[] i, e = firstindex(s), lastindex(s) @@ -564,7 +606,7 @@ function _rsearchindex(s::AbstractString, end end -function _rsearchindex(s::String, t::String, i::Integer) +function _rsearchindex(s::Union{String, SubString{String}}, t::Union{String, SubString{String}}, i::Integer) # Check for fast case of a single byte if lastindex(t) == 1 return something(findprev(isequal(t[1]), s, i), 0) diff --git a/test/strings/search.jl b/test/strings/search.jl index 692286359868d..d8883bad24b48 100644 --- a/test/strings/search.jl +++ b/test/strings/search.jl @@ -155,6 +155,16 @@ for str in [u8str] @test findprev(isequal('ε'), str, 4) === nothing end +# See the comments in #54579 +@testset "Search for invalid chars" begin + @test findfirst(==('\xff'), "abc\xffde") == 4 + @test findprev(isequal('\xa6'), "abc\xa69", 5) == 4 + @test isnothing(findfirst(==('\xff'), "abcdeæd")) + + @test isnothing(findnext(==('\xa6'), "æ", 1)) + @test isnothing(findprev(==('\xa6'), "æa", 2)) +end + # string forward search with a single-char string @test findfirst("x", astr) === nothing @test findfirst("H", astr) == 1:1 @@ -445,6 +455,45 @@ end @test_throws BoundsError findprev(pattern, A, -3) end end + + @test findall([0x01, 0x02], [0x03, 0x01, 0x02, 0x01, 0x02, 0x06]) == [2:3, 4:5] + @test isempty(findall([0x04, 0x05], [0x03, 0x04, 0x06])) +end + +# Issue 54578 +@testset "No conflation of Int8 and UInt8" begin + # Work for mixed types if the values are the same + @test findfirst(==(Int8(1)), [0x01]) == 1 + @test findnext(iszero, Int8[0, -2, 0, -3], 2) == 3 + @test findfirst(Int8[1,4], UInt8[0, 2, 4, 1, 8, 1, 4, 2]) == 6:7 + @test findprev(UInt8[5, 6], Int8[1, 9, 2, 5, 6, 3], 6) == 4:5 + + # Returns nothing for the same methods if the values are different, + # even if the bitpatterns are the same + @test isnothing(findfirst(==(Int8(-1)), [0xff])) + @test isnothing(findnext(isequal(0xff), Int8[-1, -2, -1], 2)) + @test isnothing(findfirst(UInt8[0xff, 0xfe], Int8[0, -1, -2, 1, 8, 1, 4, 2])) + @test isnothing(findprev(UInt8[0xff, 0xfe], Int8[1, 9, 2, -1, -2, 3], 6)) +end + +@testset "DenseArray with offsets" begin + isdefined(Main, :OffsetDenseArrays) || @eval Main include("../testhelpers/OffsetDenseArrays.jl") + OffsetDenseArrays = Main.OffsetDenseArrays + + A = OffsetDenseArrays.OffsetDenseArray(collect(0x61:0x69), 100) + @test findfirst(==(0x61), A) == 101 + @test findlast(==(0x61), A) == 101 + @test findfirst(==(0x00), A) === nothing + + @test findfirst([0x62, 0x63, 0x64], A) == 102:104 + @test findlast([0x63, 0x64], A) == 103:104 + @test findall([0x62, 0x63], A) == [102:103] + + @test findfirst(iszero, A) === nothing + A = OffsetDenseArrays.OffsetDenseArray([0x01, 0x02, 0x00, 0x03], -100) + @test findfirst(iszero, A) == -97 + @test findnext(==(0x02), A, -99) == -98 + @test findnext(==(0x02), A, -97) === nothing end # issue 32568 diff --git a/test/testhelpers/OffsetDenseArrays.jl b/test/testhelpers/OffsetDenseArrays.jl new file mode 100644 index 0000000000000..44a1b8d627800 --- /dev/null +++ b/test/testhelpers/OffsetDenseArrays.jl @@ -0,0 +1,31 @@ +""" + module OffsetDenseArrays + +A minimal implementation of an offset array which is also <: DenseArray. +""" +module OffsetDenseArrays + +struct OffsetDenseArray{A <: DenseVector, T} <: DenseVector{T} + x::A + offset::Int +end +OffsetDenseArray(x::AbstractVector{T}, i::Integer) where {T} = OffsetDenseArray{typeof(x), T}(x, Int(i)) + +Base.size(x::OffsetDenseArray) = size(x.x) +Base.pointer(x::OffsetDenseArray) = pointer(x.x) + +function Base.getindex(x::OffsetDenseArray, i::Integer) + @boundscheck checkbounds(x.x, i - x.offset) + x.x[i - x.offset] +end + +function Base.setindex(x::OffsetDenseArray, v, i::Integer) + @boundscheck checkbounds(x.x, i - x.offset) + x.x[i - x.offset] = v +end + +IndexStyle(::Type{<:OffsetDenseArray}) = Base.IndexLinear() +Base.axes(x::OffsetDenseArray) = (x.offset + 1 : x.offset + length(x.x),) +Base.keys(x::OffsetDenseArray) = only(axes(x)) + +end # module From c6c449ccdb75fbd72700704ae669e1f98e512206 Mon Sep 17 00:00:00 2001 From: Jameson Nash Date: Tue, 10 Sep 2024 16:13:52 -0400 Subject: [PATCH 009/537] codegen: deduplicate code for calling a specsig (#55728) I am tired of having 3 gratuitously different versions of this code to maintain. --- src/codegen.cpp | 296 +++++++++--------------------------------------- 1 file changed, 56 insertions(+), 240 deletions(-) diff --git a/src/codegen.cpp b/src/codegen.cpp index 9184e4895ab6d..a82056eb36e21 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -2342,7 +2342,8 @@ static inline jl_cgval_t mark_julia_type(jl_codectx_t &ctx, Value *v, bool isbox // replace T::Type{T} with T return ghostValue(ctx, typ); } - } else if (jl_is_datatype(typ) && jl_is_datatype_singleton((jl_datatype_t*)typ)) { + } + else if (jl_is_datatype(typ) && jl_is_datatype_singleton((jl_datatype_t*)typ)) { // no need to explicitly load/store a constant/ghost value return ghostValue(ctx, typ); } @@ -5007,17 +5008,13 @@ static CallInst *emit_jlcall(jl_codectx_t &ctx, JuliaFunction<> *theFptr, Value return emit_jlcall(ctx, prepare_call(theFptr), theF, argv, nargs, trampoline); } -static jl_cgval_t emit_call_specfun_other(jl_codectx_t &ctx, bool is_opaque_closure, jl_value_t *specTypes, jl_value_t *jlretty, llvm::Value *callee, StringRef specFunctionObject, jl_code_instance_t *fromexternal, - ArrayRef argv, size_t nargs, jl_returninfo_t::CallingConv *cc, unsigned *return_roots, jl_value_t *inferred_retty) +static jl_cgval_t emit_call_specfun_other(jl_codectx_t &ctx, bool is_opaque_closure, jl_value_t *specTypes, jl_value_t *jlretty, jl_returninfo_t &returninfo, jl_code_instance_t *fromexternal, + ArrayRef argv, size_t nargs) { ++EmittedSpecfunCalls; // emit specialized call site bool gcstack_arg = JL_FEAT_TEST(ctx, gcstack_arg); - jl_returninfo_t returninfo = get_specsig_function(ctx, jl_Module, callee, specFunctionObject, specTypes, jlretty, is_opaque_closure, gcstack_arg); FunctionType *cft = returninfo.decl.getFunctionType(); - *cc = returninfo.cc; - *return_roots = returninfo.return_roots; - size_t nfargs = cft->getNumParams(); SmallVector argvals(nfargs); unsigned idx = 0; @@ -5059,16 +5056,17 @@ static jl_cgval_t emit_call_specfun_other(jl_codectx_t &ctx, bool is_opaque_clos // n.b.: specTypes is required to be a datatype by construction for specsig jl_cgval_t arg = argv[i]; if (is_opaque_closure && i == 0) { - // Special optimization for opaque closures: We know that specsig opaque - // closures don't look at their type tag (they are fairly quickly discarded - // for their environments). Therefore, we can just pass these as a pointer, - // rather than a boxed value. + // Special implementation for opaque closures: their jt and thus + // julia_type_to_llvm values are likely wrong, so override the + // behavior here to directly pass the expected pointer based instead + // just on passing arg as a pointer arg = value_to_pointer(ctx, arg); argvals[idx] = decay_derived(ctx, data_pointer(ctx, arg)); } else if (is_uniquerep_Type(jt)) { continue; - } else { + } + else { bool isboxed = deserves_argbox(jt); Type *et = isboxed ? ctx.types().T_prjlvalue : julia_type_to_llvm(ctx, jt); if (type_is_ghost(et)) @@ -5079,7 +5077,6 @@ static jl_cgval_t emit_call_specfun_other(jl_codectx_t &ctx, bool is_opaque_clos } else if (et->isAggregateType()) { arg = value_to_pointer(ctx, arg); - // can lazy load on demand, no copy needed argvals[idx] = decay_derived(ctx, data_pointer(ctx, arg)); } else { @@ -5135,7 +5132,7 @@ static jl_cgval_t emit_call_specfun_other(jl_codectx_t &ctx, bool is_opaque_clos ctx.builder.CreateICmpEQ( ctx.builder.CreateAnd(tindex, ConstantInt::get(getInt8Ty(ctx.builder.getContext()), UNION_BOX_MARKER)), ConstantInt::get(getInt8Ty(ctx.builder.getContext()), 0)), - decay_derived(ctx, argvals[0]), + decay_derived(ctx, result), decay_derived(ctx, box) ); retval = mark_julia_slot(derived, @@ -5149,6 +5146,19 @@ static jl_cgval_t emit_call_specfun_other(jl_codectx_t &ctx, bool is_opaque_clos retval = mark_julia_slot(NULL, jlretty, call, ctx.tbaa().tbaa_stack); break; } + return retval; +} + +static jl_cgval_t emit_call_specfun_other(jl_codectx_t &ctx, bool is_opaque_closure, jl_value_t *specTypes, jl_value_t *jlretty, llvm::Value *callee, StringRef specFunctionObject, jl_code_instance_t *fromexternal, + ArrayRef argv, size_t nargs, jl_returninfo_t::CallingConv *cc, unsigned *nreturn_roots, jl_value_t *inferred_retty) +{ + ++EmittedSpecfunCalls; + // emit specialized call site + bool gcstack_arg = JL_FEAT_TEST(ctx, gcstack_arg); + jl_returninfo_t returninfo = get_specsig_function(ctx, jl_Module, callee, specFunctionObject, specTypes, jlretty, is_opaque_closure, gcstack_arg); + *cc = returninfo.cc; + *nreturn_roots = returninfo.return_roots; + jl_cgval_t retval = emit_call_specfun_other(ctx, is_opaque_closure, specTypes, jlretty, returninfo, fromexternal, argv, nargs); // see if inference has a different / better type for the call than the lambda return update_julia_type(ctx, retval, inferred_retty); } @@ -6248,7 +6258,8 @@ static std::pair get_oc_function(jl_codectx_t &ctx, jl_met if (closure_method->source) { mi = jl_specializations_get_linfo(closure_method, sigtype, jl_emptysvec); ci = (jl_code_instance_t*)jl_rettype_inferred_addr(mi, ctx.min_world, ctx.max_world); - } else { + } + else { mi = (jl_method_instance_t*)jl_atomic_load_relaxed(&closure_method->specializations); assert(jl_is_method_instance(mi)); ci = jl_atomic_load_relaxed(&mi->cache); @@ -6291,7 +6302,8 @@ static std::pair get_oc_function(jl_codectx_t &ctx, jl_met closure_decls.specFunctionObject; if (GlobalValue *V = jl_Module->getNamedValue(fname)) { F = cast(V); - } else { + } + else { F = Function::Create(get_func_sig(ctx.builder.getContext()), Function::ExternalLinkage, fname, jl_Module); @@ -6302,7 +6314,8 @@ static std::pair get_oc_function(jl_codectx_t &ctx, jl_met Function *specF = NULL; if (!isspecsig) { specF = F; - } else { + } + else { //emission context holds context lock so can get module specF = closure_m.getModuleUnlocked()->getFunction(closure_decls.specFunctionObject); if (specF) { @@ -6817,14 +6830,6 @@ static Function *emit_tojlinvoke(jl_code_instance_t *codeinst, StringRef theFptr return f; } -static Type *get_returnroots_type(jl_codectx_t &ctx, unsigned rootcount) { - return ArrayType::get(ctx.types().T_prjlvalue, rootcount); -} - -static Type *get_unionbytes_type(LLVMContext &C, unsigned unionbytes) { - return ArrayType::get(getInt8Ty(C), unionbytes); -} - static void emit_cfunc_invalidate( Function *gf_thunk, jl_returninfo_t::CallingConv cc, unsigned return_roots, jl_value_t *calltype, jl_value_t *rettype, bool is_for_opaque_closure, @@ -6876,15 +6881,13 @@ static void emit_cfunc_invalidate( else { Value *arg_v = &*AI; ++AI; - Type *at = arg_v->getType(); if ((i == 0 && is_for_opaque_closure) || (!isboxed && et->isAggregateType())) { myargs[i] = mark_julia_slot(arg_v, jt, NULL, ctx.tbaa().tbaa_const); } else { - assert(at == et); + assert(arg_v->getType() == et); myargs[i] = mark_julia_type(ctx, arg_v, isboxed, jt); } - (void)at; } } assert(AI == gf_thunk->arg_end()); @@ -7306,77 +7309,9 @@ static Function* gen_cfun_wrapper( bool is_opaque_closure = jl_is_method(lam->def.value) && lam->def.method->is_for_opaque_closure; assert(calltype == 3); // emit a specsig call - bool gcstack_arg = JL_FEAT_TEST(ctx, gcstack_arg); StringRef protoname = jl_ExecutionEngine->getFunctionAtAddress((uintptr_t)callptr, invoke, codeinst); + bool gcstack_arg = JL_FEAT_TEST(ctx, gcstack_arg); jl_returninfo_t returninfo = get_specsig_function(ctx, M, NULL, protoname, lam->specTypes, astrt, is_opaque_closure, gcstack_arg); - FunctionType *cft = returninfo.decl.getFunctionType(); - jlfunc_sret = (returninfo.cc == jl_returninfo_t::SRet); - - // TODO: Can use use emit_call_specfun_other here? - SmallVector args; - Value *result = nullptr; - if (jlfunc_sret || returninfo.cc == jl_returninfo_t::Union) { - // fuse the two sret together, or emit an alloca to hold it - if (sig.sret && jlfunc_sret) { - result = emit_bitcast(ctx, sretPtr, cft->getParamType(0)); - } - else { - if (jlfunc_sret) { - result = emit_static_alloca(ctx, getAttributeAtIndex(returninfo.attrs, 1, Attribute::StructRet).getValueAsType()); - setName(ctx.emission_context, result, "sret"); - #if JL_LLVM_VERSION < 170000 - assert(cast(result->getType())->hasSameElementTypeAs(cast(cft->getParamType(0)))); - #endif - } else { - result = emit_static_alloca(ctx, get_unionbytes_type(ctx.builder.getContext(), returninfo.union_bytes)); - setName(ctx.emission_context, result, "result_union"); - #if JL_LLVM_VERSION < 170000 - assert(cast(result->getType())->hasSameElementTypeAs(cast(cft->getParamType(0)))); - #endif - } - } - args.push_back(result); - } - if (returninfo.return_roots) { - AllocaInst *return_roots = emit_static_alloca(ctx, get_returnroots_type(ctx, returninfo.return_roots)); - setName(ctx.emission_context, return_roots, "return_roots"); - args.push_back(return_roots); - } - if (gcstack_arg) - args.push_back(ctx.pgcstack); - for (size_t i = 0; i < nargs + 1; i++) { - // figure out how to repack the arguments - jl_cgval_t &inputarg = inputargs[i]; - Value *arg; - jl_value_t *spect = (i == 0 && is_opaque_closure) ? (jl_value_t*)jl_any_type : - jl_nth_slot_type(lam->specTypes, i); - // n.b. specTypes is required to be a datatype by construction for specsig - bool isboxed = deserves_argbox(spect); - Type *T = isboxed ? ctx.types().T_prjlvalue : julia_type_to_llvm(ctx, spect); - if (is_uniquerep_Type(spect)) { - continue; - } - else if (isboxed) { - arg = boxed(ctx, inputarg); - } - else if (type_is_ghost(T)) { - continue; // ghost types are skipped by the specsig method signature - } - else if (T->isAggregateType()) { - // aggregate types are passed by pointer - inputarg = value_to_pointer(ctx, inputarg); - arg = decay_derived(ctx, data_pointer(ctx, inputarg)); - } - else { - arg = emit_unbox(ctx, T, inputarg, spect); - assert(!isa(arg)); - } - - // add to argument list - args.push_back(arg); - } - Value *theFptr = returninfo.decl.getCallee(); - assert(theFptr); if (age_ok) { funcName += "_gfthunk"; Function *gf_thunk = Function::Create(returninfo.decl.getFunctionType(), @@ -7388,49 +7323,17 @@ static Function* gen_cfun_wrapper( // but which has the signature of a specsig emit_cfunc_invalidate(gf_thunk, returninfo.cc, returninfo.return_roots, lam->specTypes, codeinst->rettype, is_opaque_closure, nargs + 1, ctx.emission_context, min_world, max_world); - theFptr = ctx.builder.CreateSelect(age_ok, theFptr, gf_thunk); + returninfo.decl = FunctionCallee(returninfo.decl.getFunctionType(), ctx.builder.CreateSelect(age_ok, returninfo.decl.getCallee(), gf_thunk)); } - - #if JL_LLVM_VERSION < 170000 - assert(cast(theFptr->getType())->isOpaqueOrPointeeTypeMatches(returninfo.decl.getFunctionType())); - #endif - CallInst *call = ctx.builder.CreateCall( - returninfo.decl.getFunctionType(), - theFptr, ArrayRef(args)); - call->setAttributes(returninfo.attrs); - if (gcstack_arg) - call->setCallingConv(CallingConv::Swift); - - switch (returninfo.cc) { - case jl_returninfo_t::Boxed: - retval = mark_julia_type(ctx, call, true, astrt); - break; - case jl_returninfo_t::Register: - retval = mark_julia_type(ctx, call, false, astrt); - break; - case jl_returninfo_t::SRet: - retval = mark_julia_slot(result, astrt, NULL, ctx.tbaa().tbaa_stack); - break; - case jl_returninfo_t::Union: { - Value *box = ctx.builder.CreateExtractValue(call, 0); - Value *tindex = ctx.builder.CreateExtractValue(call, 1); - Value *derived = ctx.builder.CreateSelect( - ctx.builder.CreateICmpEQ( - ctx.builder.CreateAnd(tindex, ConstantInt::get(getInt8Ty(ctx.builder.getContext()), UNION_BOX_MARKER)), - ConstantInt::get(getInt8Ty(ctx.builder.getContext()), 0)), - decay_derived(ctx, result), - decay_derived(ctx, box)); - retval = mark_julia_slot(derived, - astrt, - tindex, - ctx.tbaa().tbaa_stack); - assert(box->getType() == ctx.types().T_prjlvalue); - retval.Vboxed = box; - break; - } - case jl_returninfo_t::Ghosts: - retval = mark_julia_slot(NULL, astrt, call, ctx.tbaa().tbaa_stack); - break; + retval = emit_call_specfun_other(ctx, is_opaque_closure, lam->specTypes, codeinst->rettype, returninfo, nullptr, inputargs, nargs + 1); + jlfunc_sret = (returninfo.cc == jl_returninfo_t::SRet); + if (jlfunc_sret && sig.sret) { + // fuse the two sret together + assert(retval.ispointer()); + AllocaInst *result = cast(retval.V); + retval.V = sretPtr; + result->replaceAllUsesWith(sretPtr); + result->eraseFromParent(); } } @@ -7729,7 +7632,7 @@ const char *jl_generate_ccallable(LLVMOrcThreadSafeModuleRef llvmmod, void *sysi } // generate a julia-callable function that calls f (AKA lam) -static Function *gen_invoke_wrapper(jl_method_instance_t *lam, jl_value_t *jlretty, const jl_returninfo_t &f, int retarg, StringRef funcName, +static void gen_invoke_wrapper(jl_method_instance_t *lam, jl_value_t *jlretty, jl_returninfo_t &f, unsigned nargs, int retarg, StringRef funcName, Module *M, jl_codegen_params_t ¶ms) { ++GeneratedInvokeWrappers; @@ -7757,86 +7660,26 @@ static Function *gen_invoke_wrapper(jl_method_instance_t *lam, jl_value_t *jlret ctx.builder.SetCurrentDebugLocation(noDbg); allocate_gc_frame(ctx, b0); - // TODO: replace this with emit_call_specfun_other? - FunctionType *ftype = const_cast(f.decl).getFunctionType(); - size_t nfargs = ftype->getNumParams(); - SmallVector args(nfargs); - unsigned idx = 0; - AllocaInst *result = NULL; - switch (f.cc) { - case jl_returninfo_t::Boxed: - case jl_returninfo_t::Register: - case jl_returninfo_t::Ghosts: - break; - case jl_returninfo_t::SRet: - #if JL_LLVM_VERSION < 170000 - assert(cast(ftype->getParamType(0))->isOpaqueOrPointeeTypeMatches(getAttributeAtIndex(f.attrs, 1, Attribute::StructRet).getValueAsType())); - #endif - result = ctx.builder.CreateAlloca(getAttributeAtIndex(f.attrs, 1, Attribute::StructRet).getValueAsType()); - setName(ctx.emission_context, result, "sret"); - args[idx] = result; - idx++; - break; - case jl_returninfo_t::Union: - result = ctx.builder.CreateAlloca(ArrayType::get(getInt8Ty(ctx.builder.getContext()), f.union_bytes)); - if (f.union_align > 1) - result->setAlignment(Align(f.union_align)); - args[idx] = result; - idx++; - setName(ctx.emission_context, result, "result_union"); - break; - } - if (f.return_roots) { - AllocaInst *return_roots = emit_static_alloca(ctx, ArrayType::get(ctx.types().T_prjlvalue, f.return_roots)); - setName(ctx.emission_context, return_roots, "return_roots"); - args[idx] = return_roots; - idx++; - } - bool gcstack_arg = JL_FEAT_TEST(ctx, gcstack_arg); - if (gcstack_arg) { - args[idx] = ctx.pgcstack; - idx++; - } + SmallVector argv(nargs); bool is_opaque_closure = jl_is_method(lam->def.value) && lam->def.method->is_for_opaque_closure; - for (size_t i = 0; i < jl_nparams(lam->specTypes) && idx < nfargs; ++i) { + jl_aliasinfo_t ai = jl_aliasinfo_t::fromTBAA(ctx, ctx.tbaa().tbaa_const); + for (size_t i = 0; i < nargs; ++i) { jl_value_t *ty = ((i == 0) && is_opaque_closure) ? (jl_value_t*)jl_any_type : jl_nth_slot_type(lam->specTypes, i); - // n.b. specTypes is required to be a datatype by construction for specsig - bool isboxed = deserves_argbox(ty); - Type *lty = isboxed ? ctx.types().T_prjlvalue : julia_type_to_llvm(ctx, ty); - if (type_is_ghost(lty) || is_uniquerep_Type(ty)) - continue; Value *theArg; if (i == 0) { - // This function adapts from generic jlcall to OC specsig. Generic jlcall pointers - // come in as ::Tracked, but specsig expected ::Derived. - if (is_opaque_closure) - theArg = decay_derived(ctx, funcArg); - else - theArg = funcArg; + theArg = funcArg; } else { Value *argPtr = emit_ptrgep(ctx, argArray, (i - 1) * ctx.types().sizeof_ptr); - jl_aliasinfo_t ai = jl_aliasinfo_t::fromTBAA(ctx, ctx.tbaa().tbaa_const); theArg = ai.decorateInst(maybe_mark_load_dereferenceable( ctx.builder.CreateAlignedLoad(ctx.types().T_prjlvalue, argPtr, Align(sizeof(void*))), false, ty)); } - if (!isboxed) { - theArg = decay_derived(ctx, theArg); - if (!lty->isAggregateType()) // keep "aggregate" type values in place as pointers - theArg = ctx.builder.CreateAlignedLoad(lty, theArg, Align(julia_alignment(ty))); - } - assert(!isa(theArg)); - args[idx] = theArg; - idx++; + argv[i] = mark_julia_type(ctx, theArg, true, ty); } - CallInst *call = ctx.builder.CreateCall(f.decl, args); - call->setAttributes(f.attrs); - if (gcstack_arg) - call->setCallingConv(CallingConv::Swift); - jl_cgval_t retval; + jl_cgval_t retval = emit_call_specfun_other(ctx, is_opaque_closure, lam->specTypes, jlretty, f, nullptr, argv, nargs); if (retarg != -1) { Value *theArg; if (retarg == 0) @@ -7847,34 +7690,7 @@ static Function *gen_invoke_wrapper(jl_method_instance_t *lam, jl_value_t *jlret Align(sizeof(void*))); retval = mark_julia_type(ctx, theArg, true, jl_any_type); } - else { - switch (f.cc) { - case jl_returninfo_t::Boxed: - retval = mark_julia_type(ctx, call, true, jlretty); - break; - case jl_returninfo_t::Register: - retval = mark_julia_type(ctx, call, false, jlretty); - break; - case jl_returninfo_t::SRet: - retval = mark_julia_slot(result, jlretty, NULL, ctx.tbaa().tbaa_stack); - break; - case jl_returninfo_t::Union: - // result is technically not right here, but `boxed` will only look at it - // for the unboxed values, so it's ok. - retval = mark_julia_slot(result, - jlretty, - ctx.builder.CreateExtractValue(call, 1), - ctx.tbaa().tbaa_stack); - retval.Vboxed = ctx.builder.CreateExtractValue(call, 0); - assert(retval.Vboxed->getType() == ctx.types().T_prjlvalue); - break; - case jl_returninfo_t::Ghosts: - retval = mark_julia_slot(NULL, jlretty, call, ctx.tbaa().tbaa_stack); - break; - } - } ctx.builder.CreateRet(boxed(ctx, retval)); - return w; } static jl_returninfo_t get_specsig_function(jl_codectx_t &ctx, Module *M, Value *fval, StringRef name, jl_value_t *sig, jl_value_t *jlrettype, bool is_opaque_closure, bool gcstack_arg, BitVector *used_arguments, size_t *arg_offset) @@ -7986,22 +7802,21 @@ static jl_returninfo_t get_specsig_function(jl_codectx_t &ctx, Module *M, Value bool isboxed = false; Type *ty = NULL; if (i == 0 && is_opaque_closure) { - ty = PointerType::get(ctx.types().T_jlvalue, AddressSpace::Derived); - isboxed = true; // true-ish anyway - we might not have the type tag + ty = nullptr; // special token to avoid computing this unnecessarily } else { if (is_uniquerep_Type(jt)) continue; isboxed = deserves_argbox(jt); ty = isboxed ? ctx.types().T_prjlvalue : julia_type_to_llvm(ctx, jt); + if (type_is_ghost(ty)) + continue; } - if (type_is_ghost(ty)) - continue; AttrBuilder param(ctx.builder.getContext()); - if (ty->isAggregateType()) { // aggregate types are passed by pointer + if (ty == nullptr || ty->isAggregateType()) { // aggregate types are passed by pointer param.addAttribute(Attribute::NoCapture); param.addAttribute(Attribute::ReadOnly); - ty = PointerType::get(ty, AddressSpace::Derived); + ty = ctx.builder.getPtrTy(AddressSpace::Derived); } else if (isboxed && jl_is_immutable_datatype(jt)) { param.addAttribute(Attribute::ReadOnly); @@ -8351,7 +8166,8 @@ static jl_llvm_functions_t std::string wrapName; raw_string_ostream(wrapName) << "jfptr_" << ctx.name << "_" << jl_atomic_fetch_add_relaxed(&globalUniqueGeneratedNames, 1); declarations.functionObject = wrapName; - (void)gen_invoke_wrapper(lam, jlrettype, returninfo, retarg, declarations.functionObject, M, ctx.emission_context); + size_t nparams = jl_nparams(lam->specTypes); + gen_invoke_wrapper(lam, jlrettype, returninfo, nparams, retarg, declarations.functionObject, M, ctx.emission_context); // TODO: add attributes: maybe_mark_argument_dereferenceable(Arg, argType) // TODO: add attributes: dereferenceable // TODO: (if needsparams) add attributes: dereferenceable, readonly, nocapture From d7e417d6b8112cf90aa689ca6a00197729aeedb3 Mon Sep 17 00:00:00 2001 From: Nathan Zimmerberg <39104088+nhz2@users.noreply.github.com> Date: Tue, 10 Sep 2024 23:35:40 -0400 Subject: [PATCH 010/537] Fix "Various fixes to byte / bytearray search" (#55734) Fixes the conflict between #54593 and #54579 `_search` returns `nothing` instead of zero as a sentinal in #54579 --- base/strings/search.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/base/strings/search.jl b/base/strings/search.jl index 9bd69ae2f8a03..a481b3af775e0 100644 --- a/base/strings/search.jl +++ b/base/strings/search.jl @@ -178,7 +178,7 @@ function findall( i = firstindex(s) while true i = _search(s, byte, i) - iszero(i) && return result + isnothing(i) && return result i += 1 index = i - ncu # If the char is invalid, it's possible that its first byte is From bee75f73bbc4b295ff0899ac9aced852c6719364 Mon Sep 17 00:00:00 2001 From: Zentrik Date: Wed, 11 Sep 2024 11:52:14 +0100 Subject: [PATCH 011/537] Fix `make binary-dist` when using `USE_BINARYBUILDER_LLVM=0` (#55731) `make binary-dist` expects lld to be in usr/tools but it ends up in usr/bin so I copied it into usr/tools. Should fix the scheduled source tests which currently fail at linking. I think this is also broken with `USE_BINARYBUILDER_LLVM=0` and `BUILD_LLD=0`, maybe https://github.com/JuliaLang/julia/commit/ceaeb7b71bc76afaca2f3b80998164a47e30ce33 is the fix? --------- Co-authored-by: Zentrik --- deps/llvm.mk | 3 +++ 1 file changed, 3 insertions(+) diff --git a/deps/llvm.mk b/deps/llvm.mk index 08aff443dcff8..73697069a4fac 100644 --- a/deps/llvm.mk +++ b/deps/llvm.mk @@ -292,6 +292,9 @@ ifeq ($(OS),Darwin) # https://github.com/JuliaLang/julia/issues/29981 LLVM_INSTALL += && ln -s libLLVM.dylib $2$$(build_shlibdir)/libLLVM-$$(LLVM_VER_SHORT).dylib endif +ifeq ($(BUILD_LLD), 1) +LLVM_INSTALL += && cp $2$$(build_bindir)/lld$$(EXE) $2$$(build_depsbindir) +endif $(eval $(call staged-install, \ llvm,$$(LLVM_SRC_DIR)/build_$$(LLVM_BUILDTYPE), \ From 255162c7197e973d0427cc11d1e0117cdd76a1bf Mon Sep 17 00:00:00 2001 From: Ian Butterworth Date: Wed, 11 Sep 2024 11:50:05 -0400 Subject: [PATCH 012/537] Precompile the `@time_imports` printing so it doesn't confuse reports (#55729) Makes functions for the report printing that can be precompiled into the sysimage. --- base/loading.jl | 94 +++++++++++++++++++++------------- contrib/generate_precompile.jl | 9 ++++ 2 files changed, 66 insertions(+), 37 deletions(-) diff --git a/base/loading.jl b/base/loading.jl index 4e70d2bc257ea..8d180845f942f 100644 --- a/base/loading.jl +++ b/base/loading.jl @@ -1254,22 +1254,9 @@ function _include_from_serialized(pkg::PkgId, path::String, ocachepath::Union{No if parentmodule(M) === M && PkgId(M) == pkg register && register_root_module(M) if timing_imports - elapsed = round((time_ns() - t_before) / 1e6, digits = 1) + elapsed_time = time_ns() - t_before comp_time, recomp_time = cumulative_compile_time_ns() .- t_comp_before - print(lpad(elapsed, 9), " ms ") - ext_parent = extension_parent_name(M) - if ext_parent !== nothing - print(ext_parent::String, " → ") - end - print(pkg.name) - if comp_time > 0 - printstyled(" ", Ryu.writefixed(Float64(100 * comp_time / (elapsed * 1e6)), 2), "% compilation time", color = Base.info_color()) - end - if recomp_time > 0 - perc = Float64(100 * recomp_time / comp_time) - printstyled(" (", perc < 1 ? "<1" : Ryu.writefixed(perc, 0), "% recompilation)", color = Base.warn_color()) - end - println() + print_time_imports_report(M, elapsed_time, comp_time, recomp_time) end return M end @@ -1281,6 +1268,52 @@ function _include_from_serialized(pkg::PkgId, path::String, ocachepath::Union{No end end +# printing functions for @time_imports +# note that the time inputs are UInt64 on all platforms. Give default values here so that we don't have +# confusing UInt64 types in generate_precompile.jl +function print_time_imports_report( + mod::Module, + elapsed_time::UInt64=UInt64(1), + comp_time::UInt64=UInt64(1), + recomp_time::UInt64=UInt64(1) + ) + print(lpad(round(elapsed_time / 1e6, digits=1), 9), " ms ") + ext_parent = extension_parent_name(mod) + if ext_parent !== nothing + print(ext_parent::String, " → ") + end + print(string(mod)) + if comp_time > 0 + perc = Ryu.writefixed(Float64(100 * comp_time / (elapsed_time)), 2) + printstyled(" $perc% compilation time", color = Base.info_color()) + end + if recomp_time > 0 + perc = Float64(100 * recomp_time / comp_time) + perc_show = perc < 1 ? "<1" : Ryu.writefixed(perc, 0) + printstyled(" ($perc_show% recompilation)", color = Base.warn_color()) + end + println() +end +function print_time_imports_report_init( + mod::Module, i::Int=1, + elapsed_time::UInt64=UInt64(1), + comp_time::UInt64=UInt64(1), + recomp_time::UInt64=UInt64(1) + ) + connector = i > 1 ? "├" : "┌" + printstyled(" $connector ", color = :light_black) + print("$(round(elapsed_time / 1e6, digits=1)) ms $mod.__init__() ") + if comp_time > 0 + perc = Ryu.writefixed(Float64(100 * (comp_time) / elapsed_time), 2) + printstyled("$perc% compilation time", color = Base.info_color()) + end + if recomp_time > 0 + perc = Float64(100 * recomp_time / comp_time) + printstyled(" ($(perc < 1 ? "<1" : Ryu.writefixed(perc, 0))% recompilation)", color = Base.warn_color()) + end + println() +end + # if M is an extension, return the string name of the parent. Otherwise return nothing function extension_parent_name(M::Module) rootmodule = moduleroot(M) @@ -1338,31 +1371,18 @@ function run_module_init(mod::Module, i::Int=1) # `i` informs ordering for the `@time_imports` report formatting if TIMING_IMPORTS[] == 0 ccall(:jl_init_restored_module, Cvoid, (Any,), mod) - else - if isdefined(mod, :__init__) - connector = i > 1 ? "├" : "┌" - printstyled(" $connector ", color = :light_black) - - elapsedtime = time_ns() - cumulative_compile_timing(true) - compile_elapsedtimes = cumulative_compile_time_ns() + elseif isdefined(mod, :__init__) + elapsed_time = time_ns() + cumulative_compile_timing(true) + compile_elapsedtimes = cumulative_compile_time_ns() - ccall(:jl_init_restored_module, Cvoid, (Any,), mod) + ccall(:jl_init_restored_module, Cvoid, (Any,), mod) - elapsedtime = (time_ns() - elapsedtime) / 1e6 - cumulative_compile_timing(false); - comp_time, recomp_time = (cumulative_compile_time_ns() .- compile_elapsedtimes) ./ 1e6 + elapsed_time = time_ns() - elapsed_time + cumulative_compile_timing(false); + comp_time, recomp_time = cumulative_compile_time_ns() .- compile_elapsedtimes - print("$(round(elapsedtime, digits=1)) ms $mod.__init__() ") - if comp_time > 0 - printstyled(Ryu.writefixed(Float64(100 * comp_time / elapsedtime), 2), "% compilation time", color = Base.info_color()) - end - if recomp_time > 0 - perc = Float64(100 * recomp_time / comp_time) - printstyled(" ($(perc < 1 ? "<1" : Ryu.writefixed(perc, 0))% recompilation)", color = Base.warn_color()) - end - println() - end + print_time_imports_report_init(mod, i, elapsed_time, comp_time, recomp_time) end end diff --git a/contrib/generate_precompile.jl b/contrib/generate_precompile.jl index 2a0e4faff7f1c..50ae560e99401 100644 --- a/contrib/generate_precompile.jl +++ b/contrib/generate_precompile.jl @@ -39,6 +39,8 @@ precompile(Base.__require_prelocked, (Base.PkgId, Nothing)) precompile(Base._require, (Base.PkgId, Nothing)) precompile(Base.indexed_iterate, (Pair{Symbol, Union{Nothing, String}}, Int)) precompile(Base.indexed_iterate, (Pair{Symbol, Union{Nothing, String}}, Int, Int)) +precompile(Tuple{typeof(Base.Threads.atomic_add!), Base.Threads.Atomic{Int}, Int}) +precompile(Tuple{typeof(Base.Threads.atomic_sub!), Base.Threads.Atomic{Int}, Int}) # Pkg loading precompile(Tuple{typeof(Base.Filesystem.normpath), String, String, Vararg{String}}) @@ -161,6 +163,8 @@ for match = Base._methods(+, (Int, Int), -1, Base.get_world_counter()) push!(Expr[], Expr(:return, false)) vcat(String[], String[]) k, v = (:hello => nothing) + Base.print_time_imports_report(Base) + Base.print_time_imports_report_init(Base) # Preferences uses these get(Dict{String,Any}(), "missing", nothing) @@ -172,6 +176,11 @@ for match = Base._methods(+, (Int, Int), -1, Base.get_world_counter()) # interactive startup uses this write(IOBuffer(), "") + # not critical, but helps hide unrelated compilation from @time when using --trace-compile + foo() = rand(2,2) * rand(2,2) + @time foo() + @time foo() + break # only actually need to do this once end """ From 1eabe90fa054abc74f541798ae4dfcc9445bb564 Mon Sep 17 00:00:00 2001 From: Jameson Nash Date: Wed, 11 Sep 2024 14:04:10 -0400 Subject: [PATCH 013/537] codegen: some cleanup of layout computations (#55730) Change Alloca to take an explicit alignment, rather than relying on LLVM to guess our intended alignment from the DataLayout. Eventually we should try to change this code to just get all layout data from julia queries (jl_field_offset, julia_alignment, etc.) instead of relying on creating an LLVM element type for memory and inspecting it (CountTrackedPointers, DataLayout, and so on). --- src/ccall.cpp | 14 +++++--------- src/cgutils.cpp | 36 ++++++++++++++++-------------------- src/codegen.cpp | 34 +++++++++++++++------------------- src/intrinsics.cpp | 14 ++++++++------ 4 files changed, 44 insertions(+), 54 deletions(-) diff --git a/src/ccall.cpp b/src/ccall.cpp index eac130ea43189..e336de8e3574f 100644 --- a/src/ccall.cpp +++ b/src/ccall.cpp @@ -446,15 +446,13 @@ static Value *llvm_type_rewrite( const DataLayout &DL = ctx.builder.GetInsertBlock()->getModule()->getDataLayout(); Align align = std::max(DL.getPrefTypeAlign(target_type), DL.getPrefTypeAlign(from_type)); if (DL.getTypeAllocSize(target_type) >= DL.getTypeAllocSize(from_type)) { - to = emit_static_alloca(ctx, target_type); + to = emit_static_alloca(ctx, target_type, align); setName(ctx.emission_context, to, "type_rewrite_buffer"); - cast(to)->setAlignment(align); from = to; } else { - from = emit_static_alloca(ctx, from_type); + from = emit_static_alloca(ctx, from_type, align); setName(ctx.emission_context, from, "type_rewrite_buffer"); - cast(from)->setAlignment(align); to = from; } ctx.builder.CreateAlignedStore(v, from, align); @@ -555,9 +553,8 @@ static Value *julia_to_native( // pass the address of an alloca'd thing, not a box // since those are immutable. - Value *slot = emit_static_alloca(ctx, to); Align align(julia_alignment(jlto)); - cast(slot)->setAlignment(align); + Value *slot = emit_static_alloca(ctx, to, align); setName(ctx.emission_context, slot, "native_convert_buffer"); if (!jvinfo.ispointer()) { jl_aliasinfo_t ai = jl_aliasinfo_t::fromTBAA(ctx, jvinfo.tbaa); @@ -2090,7 +2087,7 @@ jl_cgval_t function_sig_t::emit_a_ccall( if (sret) { assert(!retboxed && jl_is_datatype(rt) && "sret return type invalid"); if (jl_is_pointerfree(rt)) { - result = emit_static_alloca(ctx, lrt); + result = emit_static_alloca(ctx, lrt, Align(julia_alignment(rt))); setName(ctx.emission_context, result, "ccall_sret"); sretty = lrt; argvals[0] = result; @@ -2266,9 +2263,8 @@ jl_cgval_t function_sig_t::emit_a_ccall( if (DL.getTypeStoreSize(resultTy) > rtsz) { // ARM and AArch64 can use a LLVM type larger than the julia type. // When this happens, cast through memory. - auto slot = emit_static_alloca(ctx, resultTy); + auto slot = emit_static_alloca(ctx, resultTy, boxalign); setName(ctx.emission_context, slot, "type_pun_slot"); - slot->setAlignment(boxalign); ctx.builder.CreateAlignedStore(result, slot, boxalign); jl_aliasinfo_t ai = jl_aliasinfo_t::fromTBAA(ctx, tbaa); emit_memcpy(ctx, strct, ai, slot, ai, rtsz, boxalign, boxalign); diff --git a/src/cgutils.cpp b/src/cgutils.cpp index 2a234f399f5c1..bf5c67ae9f849 100644 --- a/src/cgutils.cpp +++ b/src/cgutils.cpp @@ -1937,18 +1937,22 @@ static jl_cgval_t typed_load(jl_codectx_t &ctx, Value *ptr, Value *idx_0based, j ctx.builder.CreateFence(Order); return ghostValue(ctx, jltype); } + if (isboxed) + alignment = sizeof(void*); + else if (!alignment) + alignment = julia_alignment(jltype); unsigned nb = isboxed ? sizeof(void*) : jl_datatype_size(jltype); // note that nb == jl_Module->getDataLayout().getTypeAllocSize(elty) or getTypeStoreSize, depending on whether it is a struct or primitive type AllocaInst *intcast = NULL; if (Order == AtomicOrdering::NotAtomic) { if (!isboxed && !aliasscope && elty->isAggregateType() && !CountTrackedPointers(elty).count) { - intcast = emit_static_alloca(ctx, elty); + intcast = emit_static_alloca(ctx, elty, Align(alignment)); setName(ctx.emission_context, intcast, "aggregate_load_box"); } } else { if (!isboxed && !elty->isIntOrPtrTy()) { - intcast = emit_static_alloca(ctx, elty); + intcast = emit_static_alloca(ctx, elty, Align(alignment)); setName(ctx.emission_context, intcast, "atomic_load_box"); elty = Type::getIntNTy(ctx.builder.getContext(), 8 * nb); } @@ -1963,10 +1967,6 @@ static jl_cgval_t typed_load(jl_codectx_t &ctx, Value *ptr, Value *idx_0based, j if (idx_0based) data = ctx.builder.CreateInBoundsGEP(elty, data, idx_0based); Value *instr = nullptr; - if (isboxed) - alignment = sizeof(void*); - else if (!alignment) - alignment = julia_alignment(jltype); if (intcast && Order == AtomicOrdering::NotAtomic) { emit_memcpy(ctx, intcast, jl_aliasinfo_t::fromTBAA(ctx, ctx.tbaa().tbaa_stack), data, jl_aliasinfo_t::fromTBAA(ctx, tbaa), nb, Align(alignment), intcast->getAlign()); } @@ -2053,6 +2053,10 @@ static jl_cgval_t typed_store(jl_codectx_t &ctx, ret = update_julia_type(ctx, ret, jltype); return ret; }; + if (isboxed) + alignment = sizeof(void*); + else if (!alignment) + alignment = julia_alignment(jltype); Type *elty = isboxed ? ctx.types().T_prjlvalue : julia_type_to_llvm(ctx, jltype); if (type_is_ghost(elty) || (issetfieldonce && !maybe_null_if_boxed) || @@ -2095,7 +2099,7 @@ static jl_cgval_t typed_store(jl_codectx_t &ctx, intcast_eltyp = elty; elty = Type::getIntNTy(ctx.builder.getContext(), 8 * nb); if (!issetfield) { - intcast = emit_static_alloca(ctx, elty); + intcast = emit_static_alloca(ctx, elty, Align(alignment)); setName(ctx.emission_context, intcast, "atomic_store_box"); } } @@ -2121,10 +2125,6 @@ static jl_cgval_t typed_store(jl_codectx_t &ctx, if (realelty != elty) r = ctx.builder.CreateZExt(r, elty); } - if (isboxed) - alignment = sizeof(void*); - else if (!alignment) - alignment = julia_alignment(jltype); Value *instr = nullptr; Value *Compare = nullptr; Value *Success = nullptr; @@ -2657,10 +2657,8 @@ static jl_cgval_t emit_unionload(jl_codectx_t &ctx, Value *addr, Value *ptindex, if (fsz > 0 && mutabl) { // move value to an immutable stack slot (excluding tindex) Type *AT = ArrayType::get(IntegerType::get(ctx.builder.getContext(), 8 * al), (fsz + al - 1) / al); - AllocaInst *lv = emit_static_alloca(ctx, AT); + AllocaInst *lv = emit_static_alloca(ctx, AT, Align(al)); setName(ctx.emission_context, lv, "immutable_union"); - if (al > 1) - lv->setAlignment(Align(al)); jl_aliasinfo_t ai = jl_aliasinfo_t::fromTBAA(ctx, tbaa); emit_memcpy(ctx, lv, ai, addr, ai, fsz, Align(al), Align(al)); addr = lv; @@ -2903,7 +2901,7 @@ static jl_cgval_t emit_getfield_knownidx(jl_codectx_t &ctx, const jl_cgval_t &st unsigned st_idx = convert_struct_offset(ctx, T, byte_offset); IntegerType *ET = cast(T->getStructElementType(st_idx)); unsigned align = (ET->getBitWidth() + 7) / 8; - lv = emit_static_alloca(ctx, ET); + lv = emit_static_alloca(ctx, ET, Align(align)); lv->setOperand(0, ConstantInt::get(getInt32Ty(ctx.builder.getContext()), (fsz + align - 1) / align)); // emit all of the align-sized words unsigned i = 0; @@ -3324,10 +3322,8 @@ static AllocaInst *try_emit_union_alloca(jl_codectx_t &ctx, jl_uniontype_t *ut, // at least some of the values can live on the stack // try to pick an Integer type size such that SROA will emit reasonable code Type *AT = ArrayType::get(IntegerType::get(ctx.builder.getContext(), 8 * min_align), (nbytes + min_align - 1) / min_align); - AllocaInst *lv = emit_static_alloca(ctx, AT); + AllocaInst *lv = emit_static_alloca(ctx, AT, Align(align)); setName(ctx.emission_context, lv, "unionalloca"); - if (align > 1) - lv->setAlignment(Align(align)); return lv; } return NULL; @@ -3886,7 +3882,7 @@ static jl_cgval_t emit_new_struct(jl_codectx_t &ctx, jl_value_t *ty, size_t narg } } else { - strct = emit_static_alloca(ctx, lt); + strct = emit_static_alloca(ctx, lt, Align(julia_alignment(ty))); setName(ctx.emission_context, strct, arg_typename); if (tracked.count) undef_derived_strct(ctx, strct, sty, ctx.tbaa().tbaa_stack); @@ -3966,7 +3962,7 @@ static jl_cgval_t emit_new_struct(jl_codectx_t &ctx, jl_value_t *ty, size_t narg if (fsz1 > 0 && !fval_info.isghost) { Type *ET = IntegerType::get(ctx.builder.getContext(), 8 * al); assert(lt->getStructElementType(llvm_idx) == ET); - AllocaInst *lv = emit_static_alloca(ctx, ET); + AllocaInst *lv = emit_static_alloca(ctx, ET, Align(al)); setName(ctx.emission_context, lv, "unioninit"); lv->setOperand(0, ConstantInt::get(getInt32Ty(ctx.builder.getContext()), (fsz1 + al - 1) / al)); emit_unionmove(ctx, lv, ctx.tbaa().tbaa_stack, fval_info, nullptr); diff --git a/src/codegen.cpp b/src/codegen.cpp index a82056eb36e21..73a5f844b31da 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -2208,10 +2208,10 @@ static GlobalVariable *get_pointer_to_constant(jl_codegen_params_t &emission_con return gv; } -static AllocaInst *emit_static_alloca(jl_codectx_t &ctx, Type *lty) +static AllocaInst *emit_static_alloca(jl_codectx_t &ctx, Type *lty, Align align) { ++EmittedAllocas; - return new AllocaInst(lty, ctx.topalloca->getModule()->getDataLayout().getAllocaAddrSpace(), "", /*InsertBefore=*/ctx.topalloca); + return new AllocaInst(lty, ctx.topalloca->getModule()->getDataLayout().getAllocaAddrSpace(), nullptr, align, "", /*InsertBefore=*/ctx.topalloca); } static void undef_derived_strct(jl_codectx_t &ctx, Value *ptr, jl_datatype_t *sty, MDNode *tbaa) @@ -2323,7 +2323,7 @@ static inline jl_cgval_t value_to_pointer(jl_codectx_t &ctx, Value *v, jl_value_ loc = get_pointer_to_constant(ctx.emission_context, cast(v), Align(julia_alignment(typ)), "_j_const", *jl_Module); } else { - loc = emit_static_alloca(ctx, v->getType()); + loc = emit_static_alloca(ctx, v->getType(), Align(julia_alignment(typ))); ctx.builder.CreateStore(v, loc); } return mark_julia_slot(loc, typ, tindex, ctx.tbaa().tbaa_stack); @@ -2435,7 +2435,7 @@ static void alloc_def_flag(jl_codectx_t &ctx, jl_varinfo_t& vi) { assert((!vi.boxroot || vi.pTIndex) && "undef check is null pointer for boxed things"); if (vi.usedUndef) { - vi.defFlag = emit_static_alloca(ctx, getInt1Ty(ctx.builder.getContext())); + vi.defFlag = emit_static_alloca(ctx, getInt1Ty(ctx.builder.getContext()), Align(1)); setName(ctx.emission_context, vi.defFlag, "isdefined"); store_def_flag(ctx, vi, false); } @@ -5025,25 +5025,20 @@ static jl_cgval_t emit_call_specfun_other(jl_codectx_t &ctx, bool is_opaque_clos case jl_returninfo_t::Ghosts: break; case jl_returninfo_t::SRet: - result = emit_static_alloca(ctx, getAttributeAtIndex(returninfo.attrs, 1, Attribute::StructRet).getValueAsType()); - #if JL_LLVM_VERSION < 170000 - assert(cast(result->getType())->hasSameElementTypeAs(cast(cft->getParamType(0)))); - #endif + result = emit_static_alloca(ctx, getAttributeAtIndex(returninfo.attrs, 1, Attribute::StructRet).getValueAsType(), Align(julia_alignment(jlretty))); argvals[idx] = result; idx++; break; case jl_returninfo_t::Union: - result = emit_static_alloca(ctx, ArrayType::get(getInt8Ty(ctx.builder.getContext()), returninfo.union_bytes)); + result = emit_static_alloca(ctx, ArrayType::get(getInt8Ty(ctx.builder.getContext()), returninfo.union_bytes), Align(returninfo.union_align)); setName(ctx.emission_context, result, "sret_box"); - if (returninfo.union_align > 1) - result->setAlignment(Align(returninfo.union_align)); argvals[idx] = result; idx++; break; } if (returninfo.return_roots) { - AllocaInst *return_roots = emit_static_alloca(ctx, ArrayType::get(ctx.types().T_prjlvalue, returninfo.return_roots)); + AllocaInst *return_roots = emit_static_alloca(ctx, ArrayType::get(ctx.types().T_prjlvalue, returninfo.return_roots), Align(alignof(jl_value_t*))); argvals[idx] = return_roots; idx++; } @@ -5922,11 +5917,10 @@ static void emit_phinode_assign(jl_codectx_t &ctx, ssize_t idx, jl_value_t *r) if (vtype->isAggregateType() && CountTrackedPointers(vtype).count == 0) { // the value will be moved into dest in the predecessor critical block. // here it's moved into phi in the successor (from dest) - dest = emit_static_alloca(ctx, vtype); - Value *phi = emit_static_alloca(ctx, vtype); - ctx.builder.CreateMemCpy(phi, Align(julia_alignment(phiType)), - dest, dest->getAlign(), - jl_datatype_size(phiType), false); + Align align(julia_alignment(phiType)); + dest = emit_static_alloca(ctx, vtype, align); + Value *phi = emit_static_alloca(ctx, vtype, align); + ctx.builder.CreateMemCpy(phi, align, dest, align, jl_datatype_size(phiType), false); ctx.builder.CreateLifetimeEnd(dest); slot = mark_julia_slot(phi, phiType, NULL, ctx.tbaa().tbaa_stack); } @@ -7737,6 +7731,8 @@ static jl_returninfo_t get_specsig_function(jl_codectx_t &ctx, Module *M, Value if (tracked.count && !tracked.all) props.return_roots = tracked.count; props.cc = jl_returninfo_t::SRet; + props.union_bytes = jl_datatype_size(jlrettype); + props.union_align = props.union_minalign = jl_datatype_align(jlrettype); // sret is always passed from alloca assert(M); fsig.push_back(rt->getPointerTo(M->getDataLayout().getAllocaAddrSpace())); @@ -8365,13 +8361,13 @@ static jl_llvm_functions_t if (lv) { lv->setName(jl_symbol_name(s)); varinfo.value = mark_julia_slot(lv, jt, NULL, ctx.tbaa().tbaa_stack); - varinfo.pTIndex = emit_static_alloca(ctx, getInt8Ty(ctx.builder.getContext())); + varinfo.pTIndex = emit_static_alloca(ctx, getInt8Ty(ctx.builder.getContext()), Align(1)); setName(ctx.emission_context, varinfo.pTIndex, "tindex"); // TODO: attach debug metadata to this variable } else if (allunbox) { // all ghost values just need a selector allocated - AllocaInst *lv = emit_static_alloca(ctx, getInt8Ty(ctx.builder.getContext())); + AllocaInst *lv = emit_static_alloca(ctx, getInt8Ty(ctx.builder.getContext()), Align(1)); lv->setName(jl_symbol_name(s)); varinfo.pTIndex = lv; varinfo.value.tbaa = NULL; diff --git a/src/intrinsics.cpp b/src/intrinsics.cpp index 194b45886bb0d..c747edfeffe5f 100644 --- a/src/intrinsics.cpp +++ b/src/intrinsics.cpp @@ -405,10 +405,11 @@ static Value *emit_unboxed_coercion(jl_codectx_t &ctx, Type *to, Value *unboxed) } else if (!ty->isIntOrPtrTy() && !ty->isFloatingPointTy()) { assert(DL.getTypeSizeInBits(ty) == DL.getTypeSizeInBits(to)); - AllocaInst *cast = emit_static_alloca(ctx, ty); + Align align = std::max(DL.getPrefTypeAlign(ty), DL.getPrefTypeAlign(to)); + AllocaInst *cast = emit_static_alloca(ctx, ty, align); setName(ctx.emission_context, cast, "coercion"); - ctx.builder.CreateStore(unboxed, cast); - unboxed = ctx.builder.CreateLoad(to, cast); + ctx.builder.CreateAlignedStore(unboxed, cast, align); + unboxed = ctx.builder.CreateAlignedLoad(to, cast, align); } else if (frompointer) { Type *INTT_to = INTT(to, DL); @@ -692,10 +693,11 @@ static jl_cgval_t generic_cast( // understood that everything is implicitly rounded to 23 bits, // but if we start looking at more bits we need to actually do the // rounding first instead of carrying around incorrect low bits. - Value *jlfloattemp_var = emit_static_alloca(ctx, from->getType()); + Align align(julia_alignment((jl_value_t*)jlto)); + Value *jlfloattemp_var = emit_static_alloca(ctx, from->getType(), align); setName(ctx.emission_context, jlfloattemp_var, "rounding_slot"); - ctx.builder.CreateStore(from, jlfloattemp_var); - from = ctx.builder.CreateLoad(from->getType(), jlfloattemp_var, /*force this to load from the stack*/true); + ctx.builder.CreateAlignedStore(from, jlfloattemp_var, align); + from = ctx.builder.CreateAlignedLoad(from->getType(), jlfloattemp_var, align, /*force this to load from the stack*/true); setName(ctx.emission_context, from, "rounded"); } } From bf6962ca282831a29c6b4e8a2617e63fce623150 Mon Sep 17 00:00:00 2001 From: Ian Butterworth Date: Wed, 11 Sep 2024 19:25:01 -0400 Subject: [PATCH 014/537] Add some loading / LazyArtifacts precompiles to the sysimage (#55740) Fixes https://github.com/JuliaLang/julia/issues/55725 These help LazyArtifacts mainly but seem beneficial for the sysimage. --- contrib/generate_precompile.jl | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/contrib/generate_precompile.jl b/contrib/generate_precompile.jl index 50ae560e99401..d3e73a1b1865a 100644 --- a/contrib/generate_precompile.jl +++ b/contrib/generate_precompile.jl @@ -42,6 +42,13 @@ precompile(Base.indexed_iterate, (Pair{Symbol, Union{Nothing, String}}, Int, Int precompile(Tuple{typeof(Base.Threads.atomic_add!), Base.Threads.Atomic{Int}, Int}) precompile(Tuple{typeof(Base.Threads.atomic_sub!), Base.Threads.Atomic{Int}, Int}) +# LazyArtifacts (but more generally helpful) +precompile(Tuple{Type{Base.Val{x} where x}, Module}) +precompile(Tuple{Type{NamedTuple{(:honor_overrides,), T} where T<:Tuple}, Tuple{Bool}}) +precompile(Tuple{typeof(Base.unique!), Array{String, 1}}) +precompile(Tuple{typeof(Base.invokelatest), Any}) +precompile(Tuple{typeof(Base.vcat), Array{String, 1}, Array{String, 1}}) + # Pkg loading precompile(Tuple{typeof(Base.Filesystem.normpath), String, String, Vararg{String}}) precompile(Tuple{typeof(Base.append!), Array{String, 1}, Array{String, 1}}) From 22eded8bbfeb6557c012e4b78c3c69c993d0d4e9 Mon Sep 17 00:00:00 2001 From: Gabriele Bozzola Date: Wed, 11 Sep 2024 21:14:37 -0700 Subject: [PATCH 015/537] Update stable version number in readme to v1.10.5 (#55742) --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index bd9e9b9c0bd02..465adcf049922 100644 --- a/README.md +++ b/README.md @@ -92,7 +92,7 @@ and then use the command prompt to change into the resulting julia directory. By Julia. However, most users should use the [most recent stable version](https://github.com/JuliaLang/julia/releases) of Julia. You can get this version by running: - git checkout v1.10.4 + git checkout v1.10.5 To build the `julia` executable, run `make` from within the julia directory. From 945517ba4e15f7470b8790a696ba5404ef047f2f Mon Sep 17 00:00:00 2001 From: Cody Tapscott <84105208+topolarity@users.noreply.github.com> Date: Thu, 12 Sep 2024 03:41:49 -0400 Subject: [PATCH 016/537] Add `invokelatest` barrier to `string(...)` in `@assert` (#55739) This change protects `@assert` from invalidations to `Base.string(...)` by adding an `invokelatest` barrier. A common source of invalidations right now is `print(io, join(args...))`. The problem is: 1. Inference concludes that `join(::Any...)` returns `Union{String,AnnotatedString}` 2. The `print` call is union-split to `String` and `AnnotatedString` 3. This code is now invalidated when StyledStrings defines `print(io, ::AnnotatedString)` The invalidation chain for `@assert` is similar: ` @assert 1 == 1` calls into `string(::Expr)` which calls into `print(io, join(args::Any...))`. Unfortunately that leads to the invalidation of almost all `@assert`s without an explicit error message Similar to https://github.com/JuliaLang/julia/pull/55583#issuecomment-2308969806 --- base/error.jl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/base/error.jl b/base/error.jl index d169cdc8085ac..ee533cee0b57d 100644 --- a/base/error.jl +++ b/base/error.jl @@ -232,12 +232,12 @@ macro assert(ex, msgs...) msg = msg # pass-through elseif !isempty(msgs) && (isa(msg, Expr) || isa(msg, Symbol)) # message is an expression needing evaluating - msg = :(Main.Base.string($(esc(msg)))) + msg = :(Main.Base.invokelatest(Main.Base.string, $(esc(msg)))) elseif isdefined(Main, :Base) && isdefined(Main.Base, :string) && applicable(Main.Base.string, msg) msg = Main.Base.string(msg) else # string() might not be defined during bootstrap - msg = :(_assert_tostring($(Expr(:quote,msg)))) + msg = :(Main.Base.invokelatest(_assert_tostring, $(Expr(:quote,msg)))) end return :($(esc(ex)) ? $(nothing) : throw(AssertionError($msg))) end From 8a9f384d594878e58dd46ce30a42ba03e50ce824 Mon Sep 17 00:00:00 2001 From: Christian Guinard <28689358+christiangnrd@users.noreply.github.com> Date: Thu, 12 Sep 2024 12:59:20 -0300 Subject: [PATCH 017/537] Don't show string concatenation error hint with zero arg `+` (#55749) Closes #55745 --- base/errorshow.jl | 2 +- test/errorshow.jl | 6 ++++++ 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/base/errorshow.jl b/base/errorshow.jl index a3bf464439d44..d805cb64fb81e 100644 --- a/base/errorshow.jl +++ b/base/errorshow.jl @@ -1067,7 +1067,7 @@ Experimental.register_error_hint(nonsetable_type_hint_handler, MethodError) # (probably attempting concatenation) function string_concatenation_hint_handler(io, ex, arg_types, kwargs) @nospecialize - if (ex.f === +) && all(i -> i <: AbstractString, arg_types) + if (ex.f === +) && !isempty(arg_types) && all(i -> i <: AbstractString, arg_types) print(io, "\nString concatenation is performed with ") printstyled(io, "*", color=:cyan) print(io, " (See also: https://docs.julialang.org/en/v1/manual/strings/#man-concatenation).") diff --git a/test/errorshow.jl b/test/errorshow.jl index 80352ddeaa9cf..a82ab7743dc5a 100644 --- a/test/errorshow.jl +++ b/test/errorshow.jl @@ -1079,6 +1079,12 @@ let err_str @test occursin("String concatenation is performed with *", err_str) end +# https://github.com/JuliaLang/julia/issues/55745 +let err_str + err_str = @except_str +() MethodError + @test !occursin("String concatenation is performed with *", err_str) +end + struct MissingLength; end struct MissingSize; end Base.IteratorSize(::Type{MissingSize}) = Base.HasShape{2}() From 76428563cdccf34aa030dffe1303bff8e12d742c Mon Sep 17 00:00:00 2001 From: Nick Robinson Date: Thu, 12 Sep 2024 20:17:17 +0100 Subject: [PATCH 018/537] Don't leave trailing whitespace when printing do-block expr (#55738) Before, when printing a `do`-block, we'd print a white-space after `do` even if no arguments follow. Now we don't print that space. --------- Co-authored-by: Lilith Orion Hafner --- base/show.jl | 8 ++++++-- test/show.jl | 5 +++++ 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/base/show.jl b/base/show.jl index 0a2976e7ebe42..ec6776d81f2d5 100644 --- a/base/show.jl +++ b/base/show.jl @@ -2196,8 +2196,12 @@ function show_unquoted(io::IO, ex::Expr, indent::Int, prec::Int, quote_level::In elseif head === :do && nargs == 2 iob = IOContext(io, beginsym=>false) show_unquoted(iob, args[1], indent, -1, quote_level) - print(io, " do ") - show_list(iob, (((args[2]::Expr).args[1])::Expr).args, ", ", 0, 0, quote_level) + print(io, " do") + do_args = (((args[2]::Expr).args[1])::Expr).args + if !isempty(do_args) + print(io, ' ') + show_list(iob, do_args, ", ", 0, 0, quote_level) + end for stmt in (((args[2]::Expr).args[2])::Expr).args print(io, '\n', " "^(indent + indent_width)) show_unquoted(iob, stmt, indent + indent_width, -1, quote_level) diff --git a/test/show.jl b/test/show.jl index 65c65111606c5..d9c3585b7c1df 100644 --- a/test/show.jl +++ b/test/show.jl @@ -2768,3 +2768,8 @@ let topmi = ccall(:jl_new_method_instance_uninit, Ref{Core.MethodInstance}, ()); topmi.def = Main @test contains(repr(topmi), "Toplevel MethodInstance") end + +@testset "show() no trailing whitespace" begin + do_expr1 = :(foo() do; bar(); end) + @test !contains(sprint(show, do_expr1), " \n") +end From 4079648edf487626cc56cc8e6a518e67343614a7 Mon Sep 17 00:00:00 2001 From: Gabriel Baraldi Date: Thu, 12 Sep 2024 16:52:47 -0300 Subject: [PATCH 019/537] Don't pass lSystem to the linker since macos always links it (#55722) This stops it complaing about duplicated libs. For libunwind there isn't much we can do because it's part of lsystem and we also need out own. --- cli/Makefile | 2 -- 1 file changed, 2 deletions(-) diff --git a/cli/Makefile b/cli/Makefile index 7b8d3587f5386..3cc0af1a76afd 100644 --- a/cli/Makefile +++ b/cli/Makefile @@ -25,8 +25,6 @@ else ifeq ($(OS),FreeBSD) LOADER_LDFLAGS += -Wl,--no-as-needed -ldl -lpthread -rdynamic -lc -Wl,--as-needed else ifeq ($(OS),OpenBSD) LOADER_LDFLAGS += -Wl,--no-as-needed -lpthread -rdynamic -lc -Wl,--as-needed -else ifeq ($(OS),Darwin) -LOADER_LDFLAGS += -lSystem endif # Build list of dependent libraries that must be opened From e52a46c5c192bfe16853ae0b63ac33b280fba063 Mon Sep 17 00:00:00 2001 From: Neven Sajko Date: Thu, 12 Sep 2024 23:02:46 +0200 Subject: [PATCH 020/537] define `numerator` and `denominator` for `Complex` (#55694) Fixes #55693 --- base/rational.jl | 11 +++++++++-- test/rational.jl | 17 +++++++++++++++++ 2 files changed, 26 insertions(+), 2 deletions(-) diff --git a/base/rational.jl b/base/rational.jl index fb1824acb6b31..b4e450fd73abc 100644 --- a/base/rational.jl +++ b/base/rational.jl @@ -293,8 +293,14 @@ julia> numerator(4) 4 ``` """ -numerator(x::Integer) = x +numerator(x::Union{Integer,Complex{<:Integer}}) = x numerator(x::Rational) = x.num +function numerator(z::Complex{<:Rational}) + den = denominator(z) + reim = (real(z), imag(z)) + result = checked_mul.(numerator.(reim), div.(den, denominator.(reim))) + complex(result...) +end """ denominator(x) @@ -310,8 +316,9 @@ julia> denominator(4) 1 ``` """ -denominator(x::Integer) = one(x) +denominator(x::Union{Integer,Complex{<:Integer}}) = one(x) denominator(x::Rational) = x.den +denominator(z::Complex{<:Rational}) = lcm(denominator(real(z)), denominator(imag(z))) sign(x::Rational) = oftype(x, sign(x.num)) signbit(x::Rational) = signbit(x.num) diff --git a/test/rational.jl b/test/rational.jl index c6f81372de0b9..20a0971068876 100644 --- a/test/rational.jl +++ b/test/rational.jl @@ -801,3 +801,20 @@ end @test rationalize(Int64, nextfloat(0.1) * im; tol=0) == precise_next * im @test rationalize(0.1im; tol=eps(0.1)) == rationalize(0.1im) end + +@testset "complex numerator, denominator" begin + z = complex(3*3, 2*3*5) + @test z === numerator(z) === numerator(z // 2) === numerator(z // 5) + @test complex(3, 2*5) === numerator(z // 3) + @test isone(denominator(z)) + @test 2 === denominator(z // 2) + @test 1 === denominator(z // 3) + @test 5 === denominator(z // 5) + for den ∈ 1:10 + q = z // den + @test q === (numerator(q)//denominator(q)) + end + @testset "do not overflow silently" begin + @test_throws OverflowError numerator(Int8(1)//Int8(31) + Int8(8)im//Int8(3)) + end +end From 94f8a3d6723c61df052a431769a1726fd27a8cc7 Mon Sep 17 00:00:00 2001 From: Katharine Hyatt Date: Fri, 13 Sep 2024 08:17:21 -0400 Subject: [PATCH 021/537] More testsets for SubString and a few missing tests (#55656) Co-authored-by: Simeon David Schaub --- test/strings/types.jl | 527 ++++++++++++++++++++++-------------------- 1 file changed, 271 insertions(+), 256 deletions(-) diff --git a/test/strings/types.jl b/test/strings/types.jl index d1e89e0e85196..c09652c3a608d 100644 --- a/test/strings/types.jl +++ b/test/strings/types.jl @@ -2,196 +2,211 @@ ## SubString and Cstring tests ## -## SubString tests ## -u8str = "∀ ε > 0, ∃ δ > 0: |x-y| < δ ⇒ |f(x)-f(y)| < ε" -u8str2 = u8str^2 -len_u8str = length(u8str) -slen_u8str = length(u8str) -len_u8str2 = length(u8str2) -slen_u8str2 = length(u8str2) - -@test len_u8str2 == 2 * len_u8str -@test slen_u8str2 == 2 * slen_u8str - -u8str2plain = String(u8str2) - -for i1 = 1:length(u8str2) - if !isvalid(u8str2, i1); continue; end - for i2 = i1:length(u8str2) - if !isvalid(u8str2, i2); continue; end - @test length(u8str2[i1:i2]) == length(u8str2plain[i1:i2]) - @test length(u8str2[i1:i2]) == length(u8str2plain[i1:i2]) - @test u8str2[i1:i2] == u8str2plain[i1:i2] +@testset "SubString" begin + u8str = "∀ ε > 0, ∃ δ > 0: |x-y| < δ ⇒ |f(x)-f(y)| < ε" + u8str2 = u8str^2 + len_u8str = length(u8str) + slen_u8str = length(u8str) + len_u8str2 = length(u8str2) + slen_u8str2 = length(u8str2) + + @test len_u8str2 == 2 * len_u8str + @test slen_u8str2 == 2 * slen_u8str + + u8str2plain = String(u8str2) + @test !isascii(u8str2) + @test cmp(u8str2, u8str^3) == -1 + @test cmp(u8str2, u8str2) == 0 + @test cmp(u8str^3, u8str2) == 1 + @test codeunit(u8str2) == codeunit(u8str2plain) + + @test convert(Union{String, SubString{String}}, u8str2) === u8str2 + @test convert(Union{String, SubString{String}}, u8str2plain) === u8str2plain + + for i1 = 1:ncodeunits(u8str2) + if !isvalid(u8str2, i1); continue; end + for i2 = i1:ncodeunits(u8str2) + if !isvalid(u8str2, i2); continue; end + @test length(u8str2[i1:i2]) == length(u8str2plain[i1:i2]) + @test length(u8str2[i1:i2]) == length(u8str2plain[i1:i2]) + @test u8str2[i1:i2] == u8str2plain[i1:i2] + end end -end -# tests that SubString of a single multibyte `Char` string, like "∀" which takes 3 bytes -# gives the same result as `getindex` (except that it is a view not a copy) -for idx in 0:1 - @test SubString("∀", 1, idx) == "∀"[1:idx] -end + # tests that SubString of a single multibyte `Char` string, like "∀" which takes 3 bytes + # gives the same result as `getindex` (except that it is a view not a copy) + for idx in 0:1 + @test SubString("∀", 1, idx) == "∀"[1:idx] + end -# Substring provided with invalid end index throws BoundsError -@test_throws StringIndexError SubString("∀", 1, 2) -@test_throws StringIndexError SubString("∀", 1, 3) -@test_throws BoundsError SubString("∀", 1, 4) - -# Substring provided with invalid start index throws BoundsError -@test SubString("∀∀", 1:1) == "∀" -@test SubString("∀∀", 1:4) == "∀∀" -@test SubString("∀∀", 4:4) == "∀" -@test_throws StringIndexError SubString("∀∀", 1:2) -@test_throws StringIndexError SubString("∀∀", 1:5) -@test_throws StringIndexError SubString("∀∀", 2:4) -@test_throws BoundsError SubString("∀∀", 0:1) -@test_throws BoundsError SubString("∀∀", 0:4) -@test_throws BoundsError SubString("∀∀", 1:7) -@test_throws BoundsError SubString("∀∀", 4:7) - -# tests for SubString of more than one multibyte `Char` string -# we are consistent with `getindex` for `String` -for idx in [0, 1, 4] - @test SubString("∀∀", 1, idx) == "∀∀"[1:idx] - @test SubString("∀∀", 4, idx) == "∀∀"[4:idx] -end + @testset "invalid end index" begin + # Substring provided with invalid end index throws BoundsError + @test_throws StringIndexError SubString("∀", 1, 2) + @test_throws StringIndexError SubString("∀", 1, 3) + @test_throws BoundsError SubString("∀", 1, 4) + end -# index beyond lastindex("∀∀") -for idx in [2:3; 5:6] - @test_throws StringIndexError SubString("∀∀", 1, idx) -end -for idx in 7:8 - @test_throws BoundsError SubString("∀∀", 1, idx) -end + @testset "invalid start index" begin + # Substring provided with invalid start index throws BoundsError + @test SubString("∀∀", 1:1) == "∀" + @test SubString("∀∀", 1:4) == "∀∀" + @test SubString("∀∀", 4:4) == "∀" + @test_throws StringIndexError SubString("∀∀", 1:2) + @test_throws StringIndexError SubString("∀∀", 1:5) + @test_throws StringIndexError SubString("∀∀", 2:4) + @test_throws BoundsError SubString("∀∀", 0:1) + @test_throws BoundsError SubString("∀∀", 0:4) + @test_throws BoundsError SubString("∀∀", 1:7) + @test_throws BoundsError SubString("∀∀", 4:7) + end + + # tests for SubString of more than one multibyte `Char` string + # we are consistent with `getindex` for `String` + for idx in [0, 1, 4] + @test SubString("∀∀", 1, idx) == "∀∀"[1:idx] + @test SubString("∀∀", 4, idx) == "∀∀"[4:idx] + end -let str="tempus fugit" #length(str)==12 - ss=SubString(str,1,lastindex(str)) #match source string - @test length(ss)==length(str) + @testset "index beyond lastindex(\"∀∀\")" begin + for idx in [2:3; 5:6] + @test_throws StringIndexError SubString("∀∀", 1, idx) + end + for idx in 7:8 + @test_throws BoundsError SubString("∀∀", 1, idx) + end + end - ss=SubString(str,1:lastindex(str)) - @test length(ss)==length(str) + let str="tempus fugit" #length(str)==12 + ss=SubString(str,1,lastindex(str)) #match source string + @test length(ss)==length(str) - ss=SubString(str,1,0) #empty SubString - @test length(ss)==0 + ss=SubString(str,1:lastindex(str)) + @test length(ss)==length(str) - ss=SubString(str,1:0) - @test length(ss)==0 + ss=SubString(str,1,0) #empty SubString + @test length(ss)==0 - @test_throws BoundsError SubString(str, 14, 20) #start indexing beyond source string length - @test_throws BoundsError SubString(str, 10, 16) #end indexing beyond source string length + ss=SubString(str,1:0) + @test length(ss)==0 - @test_throws BoundsError SubString("", 1, 4) #empty source string - @test_throws BoundsError SubString("", 1, 1) #empty source string, identical start and end index - @test_throws BoundsError SubString("", 10, 12) - @test SubString("", 12, 10) == "" -end + @test_throws BoundsError SubString(str, 14, 20) #start indexing beyond source string length + @test_throws BoundsError SubString(str, 10, 16) #end indexing beyond source string length -@test SubString("foobar", big(1), big(3)) == "foo" - -let str = "aa\u2200\u2222bb" - u = SubString(str, 3, 6) - @test length(u) == 2 - b = IOBuffer() - write(b, u) - @test String(take!(b)) == "\u2200\u2222" - - @test_throws StringIndexError SubString(str, 4, 5) - @test_throws BoundsError iterate(u, 0) - @test_throws BoundsError iterate(u, 8) - @test_throws BoundsError getindex(u, 0) - @test_throws BoundsError getindex(u, 7) - @test_throws BoundsError getindex(u, 0:1) - @test_throws BoundsError getindex(u, 7:7) - @test reverseind(u, 1) == 4 - @test typeof(Base.cconvert(Ptr{Int8}, u)) == SubString{String} - @test Base.cconvert(Ptr{Int8}, u) == u -end + @test_throws BoundsError SubString("", 1, 4) #empty source string + @test_throws BoundsError SubString("", 1, 1) #empty source string, identical start and end index + @test_throws BoundsError SubString("", 10, 12) + @test SubString("", 12, 10) == "" + end -let str = "føøbar" - @test_throws BoundsError SubString(str, 10, 10) - u = SubString(str, 4, 3) - @test length(u) == 0 - b = IOBuffer() - write(b, u) - @test String(take!(b)) == "" -end + @test SubString("foobar", big(1), big(3)) == "foo" + + let str = "aa\u2200\u2222bb" + u = SubString(str, 3, 6) + @test length(u) == 2 + b = IOBuffer() + write(b, u) + @test String(take!(b)) == "\u2200\u2222" + + @test_throws StringIndexError SubString(str, 4, 5) + @test_throws BoundsError iterate(u, 0) + @test_throws BoundsError iterate(u, 8) + @test_throws BoundsError getindex(u, 0) + @test_throws BoundsError getindex(u, 7) + @test_throws BoundsError getindex(u, 0:1) + @test_throws BoundsError getindex(u, 7:7) + @test reverseind(u, 1) == 4 + @test typeof(Base.cconvert(Ptr{Int8}, u)) == SubString{String} + @test Base.cconvert(Ptr{Int8}, u) == u + end -# search and SubString (issue #5679) -let str = "Hello, world!" - u = SubString(str, 1, 5) - @test findlast("World", u) === nothing - @test findlast(isequal('z'), u) === nothing - @test findlast("ll", u) == 3:4 -end + let str = "føøbar" + @test_throws BoundsError SubString(str, 10, 10) + u = SubString(str, 4, 3) + @test length(u) == 0 + b = IOBuffer() + write(b, u) + @test String(take!(b)) == "" + end -# SubString created from SubString -let str = "Hello, world!" - u = SubString(str, 2, 5) - for idx in 1:4 - @test SubString(u, 2, idx) == u[2:idx] - @test SubString(u, 2:idx) == u[2:idx] + @testset "search and SubString (issue #5679)" begin + str = "Hello, world!" + u = SubString(str, 1, 5) + @test findlast("World", u) === nothing + @test findlast(isequal('z'), u) === nothing + @test findlast("ll", u) == 3:4 end - @test_throws BoundsError SubString(u, 1, 10) - @test_throws BoundsError SubString(u, 1:10) - @test_throws BoundsError SubString(u, 20:30) - @test SubString(u, 20:15) == "" - @test_throws BoundsError SubString(u, -1:10) - @test SubString(u, -1, -10) == "" - @test SubString(SubString("123", 1, 2), -10, -20) == "" -end -# sizeof -@test sizeof(SubString("abc\u2222def",4,4)) == 3 - -# issue #3710 -@test prevind(SubString("{var}",2,4),4) == 3 - -# issue #4183 -@test split(SubString("x", 2, 0), "y") == [""] - -# issue #6772 -@test parse(Float64, SubString("10",1,1)) === 1.0 -@test parse(Float64, SubString("1 0",1,1)) === 1.0 -@test parse(Float32, SubString("10",1,1)) === 1.0f0 - -# issue #5870 -@test !occursin(Regex("aa"), SubString("",1,0)) -@test occursin(Regex(""), SubString("",1,0)) - -# isvalid, length, prevind, nextind for SubString{String} -let s = "lorem ipsum", sdict = Dict( - SubString(s, 1, 11) => "lorem ipsum", - SubString(s, 1, 6) => "lorem ", - SubString(s, 1, 0) => "", - SubString(s, 2, 4) => "ore", - SubString(s, 2, 11) => "orem ipsum", - SubString(s, 15, 14) => "", -) - for (ss, s) in sdict - @test ncodeunits(ss) == ncodeunits(s) - for i in -2:13 - @test isvalid(ss, i) == isvalid(s, i) - end - for i in 1:ncodeunits(ss), j = i-1:ncodeunits(ss) - @test length(ss, i, j) == length(s, i, j) + @testset "SubString created from SubString" begin + str = "Hello, world!" + u = SubString(str, 2, 5) + for idx in 1:4 + @test SubString(u, 2, idx) == u[2:idx] + @test SubString(u, 2:idx) == u[2:idx] end + @test_throws BoundsError SubString(u, 1, 10) + @test_throws BoundsError SubString(u, 1:10) + @test_throws BoundsError SubString(u, 20:30) + @test SubString(u, 20:15) == "" + @test_throws BoundsError SubString(u, -1:10) + @test SubString(u, -1, -10) == "" + @test SubString(SubString("123", 1, 2), -10, -20) == "" + end + + # sizeof + @test sizeof(SubString("abc\u2222def",4,4)) == 3 + + # issue #3710 + @test prevind(SubString("{var}",2,4),4) == 3 + + # issue #4183 + @test split(SubString("x", 2, 0), "y") == [""] + + @testset "issue #6772" begin + @test parse(Float64, SubString("10",1,1)) === 1.0 + @test parse(Float64, SubString("1 0",1,1)) === 1.0 + @test parse(Float32, SubString("10",1,1)) === 1.0f0 + end + + @testset "issue #5870" begin + @test !occursin(Regex("aa"), SubString("",1,0)) + @test occursin(Regex(""), SubString("",1,0)) end - for (ss, s) in sdict - @test length(ss) == length(s) - for i in 0:ncodeunits(ss), j = 0:length(ss)+1 - @test prevind(ss, i+1, j) == prevind(s, i+1, j) - @test nextind(ss, i, j) == nextind(s, i, j) + @testset" isvalid, length, prevind, nextind for SubString{String}" begin + s = "lorem ipsum" + sdict = Dict( + SubString(s, 1, 11) => "lorem ipsum", + SubString(s, 1, 6) => "lorem ", + SubString(s, 1, 0) => "", + SubString(s, 2, 4) => "ore", + SubString(s, 2, 11) => "orem ipsum", + SubString(s, 15, 14) => "", + ) + for (ss, s) in sdict + @test ncodeunits(ss) == ncodeunits(s) + for i in -2:13 + @test isvalid(ss, i) == isvalid(s, i) + end + for i in 1:ncodeunits(ss), j = i-1:ncodeunits(ss) + @test length(ss, i, j) == length(s, i, j) + end + end + for (ss, s) in sdict + @test length(ss) == length(s) + for i in 0:ncodeunits(ss), j = 0:length(ss)+1 + @test prevind(ss, i+1, j) == prevind(s, i+1, j) + @test nextind(ss, i, j) == nextind(s, i, j) + end + @test_throws BoundsError prevind(s, 0) + @test_throws BoundsError prevind(ss, 0) + @test_throws BoundsError nextind(s, ncodeunits(ss)+1) + @test_throws BoundsError nextind(ss, ncodeunits(ss)+1) end - @test_throws BoundsError prevind(s, 0) - @test_throws BoundsError prevind(ss, 0) - @test_throws BoundsError nextind(s, ncodeunits(ss)+1) - @test_throws BoundsError nextind(ss, ncodeunits(ss)+1) end -end -# proper nextind/prevind/thisind for SubString{String} -let rng = MersenneTwister(1), strs = ["∀∃∀"*String(rand(rng, UInt8, 40))*"∀∃∀", + rng = MersenneTwister(1) + strs = ["∀∃∀"*String(rand(rng, UInt8, 40))*"∀∃∀", String(rand(rng, UInt8, 50))] - for s in strs + @testset "proper nextind/prevind/thisind for SubString{String}: $(repr(s))" for s in strs a = 0 while a <= ncodeunits(s) a = nextind(s, a) @@ -223,111 +238,111 @@ let rng = MersenneTwister(1), strs = ["∀∃∀"*String(rand(rng, UInt8, 40))*" end end end -end -# for isvalid(SubString{String}) -let s = "Σx + βz - 2" - for i in -1:ncodeunits(s)+2 - if checkbounds(Bool, s, i) - if isvalid(s, i) - ss = SubString(s, 1, i) - for j = 1:ncodeunits(ss) - @test isvalid(ss, j) == isvalid(s, j) + # for isvalid(SubString{String}) + let s = "Σx + βz - 2" + for i in -1:ncodeunits(s)+2 + if checkbounds(Bool, s, i) + if isvalid(s, i) + ss = SubString(s, 1, i) + for j = 1:ncodeunits(ss) + @test isvalid(ss, j) == isvalid(s, j) + end + else + @test_throws StringIndexError SubString(s, 1, i) end + elseif i > 0 + @test_throws BoundsError SubString(s, 1, i) else - @test_throws StringIndexError SubString(s, 1, i) + @test SubString(s, 1, i) == "" end - elseif i > 0 - @test_throws BoundsError SubString(s, 1, i) - else - @test SubString(s, 1, i) == "" end end -end -let ss = SubString("hello", 1, 5) - @test length(ss, 1, 0) == 0 - @test_throws BoundsError length(ss, 1, -1) - @test_throws BoundsError length(ss, 1, 6) - @test_throws BoundsError length(ss, 1, 10) - @test_throws BoundsError prevind(ss, 0, 1) - @test prevind(ss, 1, 1) == 0 - @test prevind(ss, 6, 1) == 5 - @test_throws BoundsError prevind(ss, 7, 1) - @test_throws BoundsError nextind(ss, -1, 1) - @test nextind(ss, 0, 1) == 1 - @test nextind(ss, 5, 1) == 6 - @test_throws BoundsError nextind(ss, 6, 1) -end + let ss = SubString("hello", 1, 5) + @test length(ss, 1, 0) == 0 + @test_throws BoundsError length(ss, 1, -1) + @test_throws BoundsError length(ss, 1, 6) + @test_throws BoundsError length(ss, 1, 10) + @test_throws BoundsError prevind(ss, 0, 1) + @test prevind(ss, 1, 1) == 0 + @test prevind(ss, 6, 1) == 5 + @test_throws BoundsError prevind(ss, 7, 1) + @test_throws BoundsError nextind(ss, -1, 1) + @test nextind(ss, 0, 1) == 1 + @test nextind(ss, 5, 1) == 6 + @test_throws BoundsError nextind(ss, 6, 1) + end -# length(SubString{String}) performance specialization -let s = "|η(α)-ϕ(κ)| < ε" - @test length(SubString(s, 1, 0)) == length(s[1:0]) - @test length(SubString(s, 4, 4)) == length(s[4:4]) - @test length(SubString(s, 1, 7)) == length(s[1:7]) - @test length(SubString(s, 4, 11)) == length(s[4:11]) -end + # length(SubString{String}) performance specialization + let s = "|η(α)-ϕ(κ)| < ε" + @test length(SubString(s, 1, 0)) == length(s[1:0]) + @test length(SubString(s, 4, 4)) == length(s[4:4]) + @test length(SubString(s, 1, 7)) == length(s[1:7]) + @test length(SubString(s, 4, 11)) == length(s[4:11]) + end -@testset "reverseind" for T in (String, SubString, GenericString) - for prefix in ("", "abcd", "\U0001d6a4\U0001d4c1", "\U0001d6a4\U0001d4c1c", " \U0001d6a4\U0001d4c1") - for suffix in ("", "abcde", "\U0001d4c1β\U0001d6a4", "\U0001d4c1β\U0001d6a4c", " \U0001d4c1β\U0001d6a4") - for c in ('X', 'δ', '\U0001d6a5') - s = convert(T, string(prefix, c, suffix)) - r = reverse(s) - ri = findfirst(isequal(c), r) - @test c == s[reverseind(s, ri)] == r[ri] - s = convert(T, string(prefix, prefix, c, suffix, suffix)) - pre = convert(T, prefix) - sb = SubString(s, nextind(pre, lastindex(pre)), - lastindex(convert(T, string(prefix, prefix, c, suffix)))) - r = reverse(sb) - ri = findfirst(isequal(c), r) - @test c == sb[reverseind(sb, ri)] == r[ri] + @testset "reverseind" for T in (String, SubString, GenericString) + for prefix in ("", "abcd", "\U0001d6a4\U0001d4c1", "\U0001d6a4\U0001d4c1c", " \U0001d6a4\U0001d4c1") + for suffix in ("", "abcde", "\U0001d4c1β\U0001d6a4", "\U0001d4c1β\U0001d6a4c", " \U0001d4c1β\U0001d6a4") + for c in ('X', 'δ', '\U0001d6a5') + s = convert(T, string(prefix, c, suffix)) + r = reverse(s) + ri = findfirst(isequal(c), r) + @test c == s[reverseind(s, ri)] == r[ri] + s = convert(T, string(prefix, prefix, c, suffix, suffix)) + pre = convert(T, prefix) + sb = SubString(s, nextind(pre, lastindex(pre)), + lastindex(convert(T, string(prefix, prefix, c, suffix)))) + r = reverse(sb) + ri = findfirst(isequal(c), r) + @test c == sb[reverseind(sb, ri)] == r[ri] + end end end end -end -@testset "reverseind of empty strings" begin - for s in ("", - SubString("", 1, 0), - SubString("ab", 1, 0), - SubString("ab", 2, 1), - SubString("ab", 3, 2), - GenericString("")) - @test reverseind(s, 0) == 1 - @test reverseind(s, 1) == 0 + @testset "reverseind of empty strings" begin + for s in ("", + SubString("", 1, 0), + SubString("ab", 1, 0), + SubString("ab", 2, 1), + SubString("ab", 3, 2), + GenericString("")) + @test reverseind(s, 0) == 1 + @test reverseind(s, 1) == 0 + end end end -## Cstring tests ## - -@testset "issue #13974: comparison against pointers" begin - str = String("foobar") - ptr = pointer(str) - cstring = Cstring(ptr) - @test ptr == cstring - @test cstring == ptr - - # convenient NULL string creation from Ptr{Cvoid} - nullstr = Cstring(C_NULL) - - # Comparisons against NULL strings - @test ptr != nullstr - @test nullstr != ptr - - # Short-hand comparison against C_NULL - @test nullstr == C_NULL - @test C_NULL == nullstr - @test cstring != C_NULL - @test C_NULL != cstring -end +@testset "Cstring" begin + @testset "issue #13974: comparison against pointers" begin + str = String("foobar") + ptr = pointer(str) + cstring = Cstring(ptr) + @test ptr == cstring + @test cstring == ptr + + # convenient NULL string creation from Ptr{Cvoid} + nullstr = Cstring(C_NULL) + + # Comparisons against NULL strings + @test ptr != nullstr + @test nullstr != ptr + + # Short-hand comparison against C_NULL + @test nullstr == C_NULL + @test C_NULL == nullstr + @test cstring != C_NULL + @test C_NULL != cstring + end -@testset "issue #31381: eltype(Cstring) != Cchar" begin - s = Cstring(C_NULL) - @test eltype(Cstring) == Cchar - @test eltype(s) == Cchar - @test pointer(s) isa Ptr{Cchar} + @testset "issue #31381: eltype(Cstring) != Cchar" begin + s = Cstring(C_NULL) + @test eltype(Cstring) == Cchar + @test eltype(s) == Cchar + @test pointer(s) isa Ptr{Cchar} + end end @testset "Codeunits" begin From 467ab852bb7392843ce74a17de89c221e0f64df0 Mon Sep 17 00:00:00 2001 From: Katharine Hyatt Date: Fri, 13 Sep 2024 08:18:03 -0400 Subject: [PATCH 022/537] Reorganize search tests into testsets (#55658) Some of these tests are nearly 10 years old! Organized some of them into testsets just in case one breaks in the future, should make it easier to find the problem. --------- Co-authored-by: Simeon David Schaub --- test/strings/search.jl | 506 +++++++++++++++++++++-------------------- 1 file changed, 258 insertions(+), 248 deletions(-) diff --git a/test/strings/search.jl b/test/strings/search.jl index d8883bad24b48..c43327fe2971b 100644 --- a/test/strings/search.jl +++ b/test/strings/search.jl @@ -4,26 +4,27 @@ astr = "Hello, world.\n" u8str = "∀ ε > 0, ∃ δ > 0: |x-y| < δ ⇒ |f(x)-f(y)| < ε" -# I think these should give error on 4 also, and "" is not treated -# consistently with SubString("",1,1), nor with Char[] -for ind in (0, 5) - @test_throws BoundsError findnext(SubString("",1,1), "foo", ind) - @test_throws BoundsError findprev(SubString("",1,1), "foo", ind) -end +@testset "BoundsError for findnext/findprev" begin + # I think these should give error on 4 also, and "" is not treated + # consistently with SubString("",1,1), nor with Char[] + for ind in (0, 5) + @test_throws BoundsError findnext(SubString("",1,1), "foo", ind) + @test_throws BoundsError findprev(SubString("",1,1), "foo", ind) + end -# Note: the commented out test will be enabled after fixes to make -# sure that findnext/findprev are consistent -# no matter what type of AbstractString the second argument is -@test_throws BoundsError findnext(isequal('a'), "foo", 0) -@test_throws BoundsError findnext(in(Char[]), "foo", 5) -# @test_throws BoundsError findprev(in(Char[]), "foo", 0) -@test_throws BoundsError findprev(in(Char[]), "foo", 5) + # Note: the commented out test will be enabled after fixes to make + # sure that findnext/findprev are consistent + # no matter what type of AbstractString the second argument is + @test_throws BoundsError findnext(isequal('a'), "foo", 0) + @test_throws BoundsError findnext(in(Char[]), "foo", 5) + # @test_throws BoundsError findprev(in(Char[]), "foo", 0) + @test_throws BoundsError findprev(in(Char[]), "foo", 5) -# @test_throws ErrorException in("foobar","bar") -@test_throws BoundsError findnext(isequal(0x1),b"\x1\x2",0) + # @test_throws ErrorException in("foobar","bar") + @test_throws BoundsError findnext(isequal(0x1),b"\x1\x2",0) +end -# ascii forward search -for str in [astr, GenericString(astr)] +@testset "ascii forward search $(typeof(str))" for str in [astr, GenericString(astr)] @test_throws BoundsError findnext(isequal('z'), str, 0) @test_throws BoundsError findnext(isequal('∀'), str, 0) @test findfirst(isequal('x'), str) === nothing @@ -41,9 +42,7 @@ for str in [astr, GenericString(astr)] @test findnext(isequal('\n'), str, 15) === nothing @test_throws BoundsError findnext(isequal('ε'), str, nextind(str,lastindex(str))+1) @test_throws BoundsError findnext(isequal('a'), str, nextind(str,lastindex(str))+1) -end -for str in [astr, GenericString(astr)] @test_throws BoundsError findnext('z', str, 0) @test_throws BoundsError findnext('∀', str, 0) @test findfirst('x', str) === nothing @@ -65,8 +64,8 @@ for str in [astr, GenericString(astr)] @test_throws BoundsError findnext('a', str, nextind(str,lastindex(str))+1) end -# ascii backward search -for str in [astr] +@testset "ascii backward search" begin + str = astr @test findlast(isequal('x'), str) === nothing @test findlast(isequal('\0'), str) === nothing @test findlast(isequal('\u80'), str) === nothing @@ -81,9 +80,7 @@ for str in [astr] @test findlast(isequal(','), str) == 6 @test findprev(isequal(','), str, 5) === nothing @test findlast(isequal('\n'), str) == 14 -end -for str in [astr] @test findlast('x', str) === nothing @test findlast('\0', str) === nothing @test findlast('\u80', str) === nothing @@ -102,8 +99,7 @@ for str in [astr] @test findlast('\n', str) == 14 end -# utf-8 forward search -for str in (u8str, GenericString(u8str)) +@testset "utf-8 forward search $(typeof(str))" for str in (u8str, GenericString(u8str)) @test_throws BoundsError findnext(isequal('z'), str, 0) @test_throws BoundsError findnext(isequal('∀'), str, 0) @test findfirst(isequal('z'), str) === nothing @@ -132,8 +128,8 @@ for str in (u8str, GenericString(u8str)) @test_throws BoundsError findnext(isequal('a'), str, nextind(str,lastindex(str))+1) end -# utf-8 backward search -for str in [u8str] +@testset "utf-8 backward search" begin + str = u8str @test findlast(isequal('z'), str) === nothing @test findlast(isequal('\0'), str) === nothing @test findlast(isequal('\u80'), str) === nothing @@ -155,6 +151,128 @@ for str in [u8str] @test findprev(isequal('ε'), str, 4) === nothing end +@testset "string forward search with a single-char string" begin + @test findfirst("x", astr) === nothing + @test findfirst("H", astr) == 1:1 + @test findnext("H", astr, 2) === nothing + @test findfirst("l", astr) == 3:3 + @test findnext("l", astr, 4) == 4:4 + @test findnext("l", astr, 5) == 11:11 + @test findnext("l", astr, 12) === nothing + @test findfirst("\n", astr) == 14:14 + @test findnext("\n", astr, 15) === nothing + + @test findfirst("z", u8str) === nothing + @test findfirst("∄", u8str) === nothing + @test findfirst("∀", u8str) == 1:1 + @test findnext("∀", u8str, 4) === nothing + @test findfirst("∃", u8str) == 13:13 + @test findnext("∃", u8str, 16) === nothing + @test findfirst("x", u8str) == 26:26 + @test findnext("x", u8str, 27) == 43:43 + @test findnext("x", u8str, 44) === nothing + @test findfirst("ε", u8str) == 5:5 + @test findnext("ε", u8str, 7) == 54:54 + @test findnext("ε", u8str, 56) === nothing +end + +@testset "findprev backward search with a single-char string" begin + @test findlast("x", astr) === nothing + @test findlast("H", astr) == 1:1 + @test findprev("H", astr, 2) == 1:1 + @test findprev("H", astr, 0) === nothing + @test findlast("l", astr) == 11:11 + @test findprev("l", astr, 10) == 4:4 + @test findprev("l", astr, 4) == 4:4 + @test findprev("l", astr, 3) == 3:3 + @test findprev("l", astr, 2) === nothing + @test findlast("\n", astr) == 14:14 + @test findprev("\n", astr, 13) === nothing + + @test findlast("z", u8str) === nothing + @test findlast("∄", u8str) === nothing + @test findlast("∀", u8str) == 1:1 + @test findprev("∀", u8str, 0) === nothing + #TODO: setting the limit in the middle of a wide char + # makes findnext fail but findprev succeed. + # Should findprev fail as well? + #@test findprev("∀", u8str, 2) === nothing # gives 1:3 + @test findlast("∃", u8str) == 13:13 + @test findprev("∃", u8str, 12) === nothing + @test findlast("x", u8str) == 43:43 + @test findprev("x", u8str, 42) == 26:26 + @test findprev("x", u8str, 25) === nothing + @test findlast("ε", u8str) == 54:54 + @test findprev("ε", u8str, 53) == 5:5 + @test findprev("ε", u8str, 4) === nothing +end + +@testset "string forward search with a single-char regex" begin + @test findfirst(r"x", astr) === nothing + @test findfirst(r"H", astr) == 1:1 + @test findnext(r"H", astr, 2) === nothing + @test findfirst(r"l", astr) == 3:3 + @test findnext(r"l", astr, 4) == 4:4 + @test findnext(r"l", astr, 5) == 11:11 + @test findnext(r"l", astr, 12) === nothing + @test findfirst(r"\n", astr) == 14:14 + @test findnext(r"\n", astr, 15) === nothing + @test findfirst(r"z", u8str) === nothing + @test findfirst(r"∄", u8str) === nothing + @test findfirst(r"∀", u8str) == 1:1 + @test findnext(r"∀", u8str, 4) === nothing + @test findfirst(r"∀", u8str) == findfirst(r"\u2200", u8str) + @test findnext(r"∀", u8str, 4) == findnext(r"\u2200", u8str, 4) + @test findfirst(r"∃", u8str) == 13:13 + @test findnext(r"∃", u8str, 16) === nothing + @test findfirst(r"x", u8str) == 26:26 + @test findnext(r"x", u8str, 27) == 43:43 + @test findnext(r"x", u8str, 44) === nothing + @test findfirst(r"ε", u8str) == 5:5 + @test findnext(r"ε", u8str, 7) == 54:54 + @test findnext(r"ε", u8str, 56) === nothing + for i = 1:lastindex(astr) + @test findnext(r"."s, astr, i) == i:i + end + for i = 1:lastindex(u8str) + if isvalid(u8str,i) + @test findnext(r"."s, u8str, i) == i:i + end + end +end + +@testset "string forward search with a zero-char string" begin + for i = 1:lastindex(astr) + @test findnext("", astr, i) == i:i-1 + end + for i = 1:lastindex(u8str) + @test findnext("", u8str, i) == i:i-1 + end + @test findfirst("", "") === 1:0 +end + +@testset "string backward search with a zero-char string" begin + for i = 1:lastindex(astr) + @test findprev("", astr, i) == i:i-1 + end + for i = 1:lastindex(u8str) + @test findprev("", u8str, i) == i:i-1 + end + @test findlast("", "") === 1:0 +end + +@testset "string forward search with a zero-char regex" begin + for i = 1:lastindex(astr) + @test findnext(r"", astr, i) == i:i-1 + end + for i = 1:lastindex(u8str) + # TODO: should regex search fast-forward invalid indices? + if isvalid(u8str,i) + @test findnext(r"", u8str, i) == i:i-1 + end + end +end + # See the comments in #54579 @testset "Search for invalid chars" begin @test findfirst(==('\xff'), "abc\xffde") == 4 @@ -165,238 +283,130 @@ end @test isnothing(findprev(==('\xa6'), "æa", 2)) end -# string forward search with a single-char string -@test findfirst("x", astr) === nothing -@test findfirst("H", astr) == 1:1 -@test findnext("H", astr, 2) === nothing -@test findfirst("l", astr) == 3:3 -@test findnext("l", astr, 4) == 4:4 -@test findnext("l", astr, 5) == 11:11 -@test findnext("l", astr, 12) === nothing -@test findfirst("\n", astr) == 14:14 -@test findnext("\n", astr, 15) === nothing - -@test findfirst("z", u8str) === nothing -@test findfirst("∄", u8str) === nothing -@test findfirst("∀", u8str) == 1:1 -@test findnext("∀", u8str, 4) === nothing -@test findfirst("∃", u8str) == 13:13 -@test findnext("∃", u8str, 16) === nothing -@test findfirst("x", u8str) == 26:26 -@test findnext("x", u8str, 27) == 43:43 -@test findnext("x", u8str, 44) === nothing -@test findfirst("ε", u8str) == 5:5 -@test findnext("ε", u8str, 7) == 54:54 -@test findnext("ε", u8str, 56) === nothing - -# strifindprev backward search with a single-char string -@test findlast("x", astr) === nothing -@test findlast("H", astr) == 1:1 -@test findprev("H", astr, 2) == 1:1 -@test findprev("H", astr, 0) === nothing -@test findlast("l", astr) == 11:11 -@test findprev("l", astr, 10) == 4:4 -@test findprev("l", astr, 4) == 4:4 -@test findprev("l", astr, 3) == 3:3 -@test findprev("l", astr, 2) === nothing -@test findlast("\n", astr) == 14:14 -@test findprev("\n", astr, 13) === nothing - -@test findlast("z", u8str) === nothing -@test findlast("∄", u8str) === nothing -@test findlast("∀", u8str) == 1:1 -@test findprev("∀", u8str, 0) === nothing -#TODO: setting the limit in the middle of a wide char -# makes findnext fail but findprev succeed. -# Should findprev fail as well? -#@test findprev("∀", u8str, 2) === nothing # gives 1:3 -@test findlast("∃", u8str) == 13:13 -@test findprev("∃", u8str, 12) === nothing -@test findlast("x", u8str) == 43:43 -@test findprev("x", u8str, 42) == 26:26 -@test findprev("x", u8str, 25) === nothing -@test findlast("ε", u8str) == 54:54 -@test findprev("ε", u8str, 53) == 5:5 -@test findprev("ε", u8str, 4) === nothing - -# string forward search with a single-char regex -@test findfirst(r"x", astr) === nothing -@test findfirst(r"H", astr) == 1:1 -@test findnext(r"H", astr, 2) === nothing -@test findfirst(r"l", astr) == 3:3 -@test findnext(r"l", astr, 4) == 4:4 -@test findnext(r"l", astr, 5) == 11:11 -@test findnext(r"l", astr, 12) === nothing -@test findfirst(r"\n", astr) == 14:14 -@test findnext(r"\n", astr, 15) === nothing -@test findfirst(r"z", u8str) === nothing -@test findfirst(r"∄", u8str) === nothing -@test findfirst(r"∀", u8str) == 1:1 -@test findnext(r"∀", u8str, 4) === nothing -@test findfirst(r"∀", u8str) == findfirst(r"\u2200", u8str) -@test findnext(r"∀", u8str, 4) == findnext(r"\u2200", u8str, 4) -@test findfirst(r"∃", u8str) == 13:13 -@test findnext(r"∃", u8str, 16) === nothing -@test findfirst(r"x", u8str) == 26:26 -@test findnext(r"x", u8str, 27) == 43:43 -@test findnext(r"x", u8str, 44) === nothing -@test findfirst(r"ε", u8str) == 5:5 -@test findnext(r"ε", u8str, 7) == 54:54 -@test findnext(r"ε", u8str, 56) === nothing -for i = 1:lastindex(astr) - @test findnext(r"."s, astr, i) == i:i -end -for i = 1:lastindex(u8str) - if isvalid(u8str,i) - @test findnext(r"."s, u8str, i) == i:i - end +@testset "string forward search with a two-char string literal" begin + @test findfirst("xx", "foo,bar,baz") === nothing + @test findfirst("fo", "foo,bar,baz") == 1:2 + @test findnext("fo", "foo,bar,baz", 3) === nothing + @test findfirst("oo", "foo,bar,baz") == 2:3 + @test findnext("oo", "foo,bar,baz", 4) === nothing + @test findfirst("o,", "foo,bar,baz") == 3:4 + @test findnext("o,", "foo,bar,baz", 5) === nothing + @test findfirst(",b", "foo,bar,baz") == 4:5 + @test findnext(",b", "foo,bar,baz", 6) == 8:9 + @test findnext(",b", "foo,bar,baz", 10) === nothing + @test findfirst("az", "foo,bar,baz") == 10:11 + @test findnext("az", "foo,bar,baz", 12) === nothing end -# string forward search with a zero-char string -for i = 1:lastindex(astr) - @test findnext("", astr, i) == i:i-1 +@testset "issue #9365" begin + # string forward search with a two-char UTF-8 (2 byte) string literal + @test findfirst("éé", "ééé") == 1:3 + @test findnext("éé", "ééé", 1) == 1:3 + # string forward search with a two-char UTF-8 (3 byte) string literal + @test findfirst("€€", "€€€") == 1:4 + @test findnext("€€", "€€€", 1) == 1:4 + # string forward search with a two-char UTF-8 (4 byte) string literal + @test findfirst("\U1f596\U1f596", "\U1f596\U1f596\U1f596") == 1:5 + @test findnext("\U1f596\U1f596", "\U1f596\U1f596\U1f596", 1) == 1:5 + + # string forward search with a two-char UTF-8 (2 byte) string literal + @test findfirst("éé", "éé") == 1:3 + @test findnext("éé", "éé", 1) == 1:3 + # string forward search with a two-char UTF-8 (3 byte) string literal + @test findfirst("€€", "€€") == 1:4 + @test findnext("€€", "€€", 1) == 1:4 + # string forward search with a two-char UTF-8 (4 byte) string literal + @test findfirst("\U1f596\U1f596", "\U1f596\U1f596") == 1:5 + @test findnext("\U1f596\U1f596", "\U1f596\U1f596", 1) == 1:5 + + # string backward search with a two-char UTF-8 (2 byte) string literal + @test findlast("éé", "ééé") == 3:5 + @test findprev("éé", "ééé", lastindex("ééé")) == 3:5 + # string backward search with a two-char UTF-8 (3 byte) string literal + @test findlast("€€", "€€€") == 4:7 + @test findprev("€€", "€€€", lastindex("€€€")) == 4:7 + # string backward search with a two-char UTF-8 (4 byte) string literal + @test findlast("\U1f596\U1f596", "\U1f596\U1f596\U1f596") == 5:9 + @test findprev("\U1f596\U1f596", "\U1f596\U1f596\U1f596", lastindex("\U1f596\U1f596\U1f596")) == 5:9 + + # string backward search with a two-char UTF-8 (2 byte) string literal + @test findlast("éé", "éé") == 1:3 # should really be 1:4! + @test findprev("éé", "éé", lastindex("ééé")) == 1:3 + # string backward search with a two-char UTF-8 (3 byte) string literal + @test findlast("€€", "€€") == 1:4 # should really be 1:6! + @test findprev("€€", "€€", lastindex("€€€")) == 1:4 + # string backward search with a two-char UTF-8 (4 byte) string literal + @test findlast("\U1f596\U1f596", "\U1f596\U1f596") == 1:5 # should really be 1:8! + @test findprev("\U1f596\U1f596", "\U1f596\U1f596", lastindex("\U1f596\U1f596\U1f596")) == 1:5 end -for i = 1:lastindex(u8str) - @test findnext("", u8str, i) == i:i-1 + +@testset "string backward search with a two-char string literal" begin + @test findlast("xx", "foo,bar,baz") === nothing + @test findlast("fo", "foo,bar,baz") == 1:2 + @test findprev("fo", "foo,bar,baz", 1) === nothing + @test findlast("oo", "foo,bar,baz") == 2:3 + @test findprev("oo", "foo,bar,baz", 2) === nothing + @test findlast("o,", "foo,bar,baz") == 3:4 + @test findprev("o,", "foo,bar,baz", 1) === nothing + @test findlast(",b", "foo,bar,baz") == 8:9 + @test findprev(",b", "foo,bar,baz", 6) == 4:5 + @test findprev(",b", "foo,bar,baz", 3) === nothing + @test findlast("az", "foo,bar,baz") == 10:11 + @test findprev("az", "foo,bar,baz", 10) === nothing end -@test findfirst("", "") === 1:0 -# string backward search with a zero-char string -for i = 1:lastindex(astr) - @test findprev("", astr, i) == i:i-1 +@testset "string search with a two-char regex" begin + @test findfirst(r"xx", "foo,bar,baz") === nothing + @test findfirst(r"fo", "foo,bar,baz") == 1:2 + @test findnext(r"fo", "foo,bar,baz", 3) === nothing + @test findfirst(r"oo", "foo,bar,baz") == 2:3 + @test findnext(r"oo", "foo,bar,baz", 4) === nothing + @test findfirst(r"o,", "foo,bar,baz") == 3:4 + @test findnext(r"o,", "foo,bar,baz", 5) === nothing + @test findfirst(r",b", "foo,bar,baz") == 4:5 + @test findnext(r",b", "foo,bar,baz", 6) == 8:9 + @test findnext(r",b", "foo,bar,baz", 10) === nothing + @test findfirst(r"az", "foo,bar,baz") == 10:11 + @test findnext(r"az", "foo,bar,baz", 12) === nothing end -for i = 1:lastindex(u8str) - @test findprev("", u8str, i) == i:i-1 + +@testset "occursin/contains" begin + # occursin with a String and Char needle + @test occursin("o", "foo") + @test occursin('o', "foo") + # occursin in curried form + @test occursin("foo")("o") + @test occursin("foo")('o') + + # contains + @test contains("foo", "o") + @test contains("foo", 'o') + # contains in curried form + @test contains("o")("foo") + @test contains('o')("foo") + + @test_throws ErrorException "ab" ∈ "abc" end -@test findlast("", "") === 1:0 -# string forward search with a zero-char regex -for i = 1:lastindex(astr) - @test findnext(r"", astr, i) == i:i-1 +@testset "issue #15723" begin + @test findfirst(isequal('('), "⨳(") == 4 + @test findnext(isequal('('), "(⨳(", 2) == 5 + @test findlast(isequal('('), "(⨳(") == 5 + @test findprev(isequal('('), "(⨳(", 2) == 1 + + @test @inferred findall(isequal('a'), "éa") == [3] + @test @inferred findall(isequal('€'), "€€") == [1, 4] + @test @inferred isempty(findall(isequal('é'), "")) end -for i = 1:lastindex(u8str) - # TODO: should regex search fast-forward invalid indices? - if isvalid(u8str,i) - @test findnext(r"", u8str, i) == i:i-1 - end + + +@testset "issue #18109" begin + s_18109 = "fooα🐨βcd3" + @test findlast(isequal('o'), s_18109) == 3 + @test findfirst(isequal('d'), s_18109) == 13 end -# string forward search with a two-char string literal -@test findfirst("xx", "foo,bar,baz") === nothing -@test findfirst("fo", "foo,bar,baz") == 1:2 -@test findnext("fo", "foo,bar,baz", 3) === nothing -@test findfirst("oo", "foo,bar,baz") == 2:3 -@test findnext("oo", "foo,bar,baz", 4) === nothing -@test findfirst("o,", "foo,bar,baz") == 3:4 -@test findnext("o,", "foo,bar,baz", 5) === nothing -@test findfirst(",b", "foo,bar,baz") == 4:5 -@test findnext(",b", "foo,bar,baz", 6) == 8:9 -@test findnext(",b", "foo,bar,baz", 10) === nothing -@test findfirst("az", "foo,bar,baz") == 10:11 -@test findnext("az", "foo,bar,baz", 12) === nothing - -# issue #9365 -# string forward search with a two-char UTF-8 (2 byte) string literal -@test findfirst("éé", "ééé") == 1:3 -@test findnext("éé", "ééé", 1) == 1:3 -# string forward search with a two-char UTF-8 (3 byte) string literal -@test findfirst("€€", "€€€") == 1:4 -@test findnext("€€", "€€€", 1) == 1:4 -# string forward search with a two-char UTF-8 (4 byte) string literal -@test findfirst("\U1f596\U1f596", "\U1f596\U1f596\U1f596") == 1:5 -@test findnext("\U1f596\U1f596", "\U1f596\U1f596\U1f596", 1) == 1:5 - -# string forward search with a two-char UTF-8 (2 byte) string literal -@test findfirst("éé", "éé") == 1:3 -@test findnext("éé", "éé", 1) == 1:3 -# string forward search with a two-char UTF-8 (3 byte) string literal -@test findfirst("€€", "€€") == 1:4 -@test findnext("€€", "€€", 1) == 1:4 -# string forward search with a two-char UTF-8 (4 byte) string literal -@test findfirst("\U1f596\U1f596", "\U1f596\U1f596") == 1:5 -@test findnext("\U1f596\U1f596", "\U1f596\U1f596", 1) == 1:5 - -# string backward search with a two-char UTF-8 (2 byte) string literal -@test findlast("éé", "ééé") == 3:5 -@test findprev("éé", "ééé", lastindex("ééé")) == 3:5 -# string backward search with a two-char UTF-8 (3 byte) string literal -@test findlast("€€", "€€€") == 4:7 -@test findprev("€€", "€€€", lastindex("€€€")) == 4:7 -# string backward search with a two-char UTF-8 (4 byte) string literal -@test findlast("\U1f596\U1f596", "\U1f596\U1f596\U1f596") == 5:9 -@test findprev("\U1f596\U1f596", "\U1f596\U1f596\U1f596", lastindex("\U1f596\U1f596\U1f596")) == 5:9 - -# string backward search with a two-char UTF-8 (2 byte) string literal -@test findlast("éé", "éé") == 1:3 # should really be 1:4! -@test findprev("éé", "éé", lastindex("ééé")) == 1:3 -# string backward search with a two-char UTF-8 (3 byte) string literal -@test findlast("€€", "€€") == 1:4 # should really be 1:6! -@test findprev("€€", "€€", lastindex("€€€")) == 1:4 -# string backward search with a two-char UTF-8 (4 byte) string literal -@test findlast("\U1f596\U1f596", "\U1f596\U1f596") == 1:5 # should really be 1:8! -@test findprev("\U1f596\U1f596", "\U1f596\U1f596", lastindex("\U1f596\U1f596\U1f596")) == 1:5 - -# string backward search with a two-char string literal -@test findlast("xx", "foo,bar,baz") === nothing -@test findlast("fo", "foo,bar,baz") == 1:2 -@test findprev("fo", "foo,bar,baz", 1) === nothing -@test findlast("oo", "foo,bar,baz") == 2:3 -@test findprev("oo", "foo,bar,baz", 2) === nothing -@test findlast("o,", "foo,bar,baz") == 3:4 -@test findprev("o,", "foo,bar,baz", 1) === nothing -@test findlast(",b", "foo,bar,baz") == 8:9 -@test findprev(",b", "foo,bar,baz", 6) == 4:5 -@test findprev(",b", "foo,bar,baz", 3) === nothing -@test findlast("az", "foo,bar,baz") == 10:11 -@test findprev("az", "foo,bar,baz", 10) === nothing - -# string search with a two-char regex -@test findfirst(r"xx", "foo,bar,baz") === nothing -@test findfirst(r"fo", "foo,bar,baz") == 1:2 -@test findnext(r"fo", "foo,bar,baz", 3) === nothing -@test findfirst(r"oo", "foo,bar,baz") == 2:3 -@test findnext(r"oo", "foo,bar,baz", 4) === nothing -@test findfirst(r"o,", "foo,bar,baz") == 3:4 -@test findnext(r"o,", "foo,bar,baz", 5) === nothing -@test findfirst(r",b", "foo,bar,baz") == 4:5 -@test findnext(r",b", "foo,bar,baz", 6) == 8:9 -@test findnext(r",b", "foo,bar,baz", 10) === nothing -@test findfirst(r"az", "foo,bar,baz") == 10:11 -@test findnext(r"az", "foo,bar,baz", 12) === nothing - -# occursin with a String and Char needle -@test occursin("o", "foo") -@test occursin('o', "foo") -# occursin in curried form -@test occursin("foo")("o") -@test occursin("foo")('o') - -# contains -@test contains("foo", "o") -@test contains("foo", 'o') -# contains in curried form -@test contains("o")("foo") -@test contains('o')("foo") - -@test_throws ErrorException "ab" ∈ "abc" - -# issue #15723 -@test findfirst(isequal('('), "⨳(") == 4 -@test findnext(isequal('('), "(⨳(", 2) == 5 -@test findlast(isequal('('), "(⨳(") == 5 -@test findprev(isequal('('), "(⨳(", 2) == 1 - -@test @inferred findall(isequal('a'), "éa") == [3] -@test @inferred findall(isequal('€'), "€€") == [1, 4] -@test @inferred isempty(findall(isequal('é'), "")) - -# issue #18109 -s_18109 = "fooα🐨βcd3" -@test findlast(isequal('o'), s_18109) == 3 -@test findfirst(isequal('d'), s_18109) == 13 - -# findall (issue #31788) -@testset "findall" begin +@testset "findall (issue #31788)" begin @test findall("fooo", "foo") == UnitRange{Int}[] @test findall("ing", "Spinning laughing dancing") == [6:8, 15:17, 23:25] @test all(findall("", "foo") .=== [1:0, 2:1, 3:2, 4:3]) # use === to compare empty ranges From 2616634a17fdd286e64d16d454bb8077c54d51c9 Mon Sep 17 00:00:00 2001 From: Jeff Bezanson Date: Fri, 13 Sep 2024 15:57:19 -0400 Subject: [PATCH 023/537] fix #45494, error in ssa conversion with complex type decl (#55744) We were missing a call to `renumber-assigned-ssavalues` in the case where the declared type is used to assert the type of a value taken from a closure box. --- src/julia-syntax.scm | 5 ++++- test/syntax.jl | 10 ++++++++++ 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/src/julia-syntax.scm b/src/julia-syntax.scm index 6815921375184..d6bc03091f37b 100644 --- a/src/julia-syntax.scm +++ b/src/julia-syntax.scm @@ -3977,7 +3977,10 @@ f(x) = yt(x) (val (if (equal? typ '(core Any)) val `(call (core typeassert) ,val - ,(cl-convert typ fname lam namemap defined toplevel interp opaq parsed-method-stack globals locals))))) + ,(let ((convt (cl-convert typ fname lam namemap defined toplevel interp opaq parsed-method-stack globals locals))) + (if (or (symbol-like? convt) (quoted? convt)) + convt + (renumber-assigned-ssavalues convt))))))) `(block ,@(if (eq? box access) '() `((= ,access ,box))) ,undefcheck diff --git a/test/syntax.jl b/test/syntax.jl index da69bd98dc010..1b630a56f84f8 100644 --- a/test/syntax.jl +++ b/test/syntax.jl @@ -3975,3 +3975,13 @@ module UsingFailedExplicit using .A: x as x @test x === 1 end + +# issue #45494 +begin + local b::Tuple{<:Any} = (0,) + function f45494() + b = b + b + end +end +@test f45494() === (0,) From 2ee655139d2ac9bae9e33e7af318e477aa40c2c0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mos=C3=A8=20Giordano?= <765740+giordano@users.noreply.github.com> Date: Fri, 13 Sep 2024 23:47:28 +0100 Subject: [PATCH 024/537] Revert "Avoid materializing arrays in bidiag matmul" (#55737) Reverts JuliaLang/julia#55450. @jishnub suggested reverting this PR to fix #55727. --- stdlib/LinearAlgebra/src/LinearAlgebra.jl | 4 +- stdlib/LinearAlgebra/src/bidiag.jl | 330 +++------------------- stdlib/LinearAlgebra/test/bidiag.jl | 85 ++---- stdlib/LinearAlgebra/test/tridiag.jl | 71 ----- 4 files changed, 68 insertions(+), 422 deletions(-) diff --git a/stdlib/LinearAlgebra/src/LinearAlgebra.jl b/stdlib/LinearAlgebra/src/LinearAlgebra.jl index 17216845b350c..27d4255fb656b 100644 --- a/stdlib/LinearAlgebra/src/LinearAlgebra.jl +++ b/stdlib/LinearAlgebra/src/LinearAlgebra.jl @@ -673,9 +673,7 @@ matprod_dest(A::Diagonal, B::Diagonal, TS) = _matprod_dest_diag(B, TS) _matprod_dest_diag(A, TS) = similar(A, TS) function _matprod_dest_diag(A::SymTridiagonal, TS) n = size(A, 1) - ev = similar(A, TS, max(0, n-1)) - dv = similar(A, TS, n) - Tridiagonal(ev, dv, similar(ev)) + Tridiagonal(similar(A, TS, n-1), similar(A, TS, n), similar(A, TS, n-1)) end # Special handling for adj/trans vec diff --git a/stdlib/LinearAlgebra/src/bidiag.jl b/stdlib/LinearAlgebra/src/bidiag.jl index 8bc5b1c47f366..d86bad7e41435 100644 --- a/stdlib/LinearAlgebra/src/bidiag.jl +++ b/stdlib/LinearAlgebra/src/bidiag.jl @@ -557,8 +557,7 @@ end # function to get the internally stored vectors for Bidiagonal and [Sym]Tridiagonal # to avoid allocations in _mul! below (#24324, #24578) _diag(A::Tridiagonal, k) = k == -1 ? A.dl : k == 0 ? A.d : A.du -_diag(A::SymTridiagonal{<:Number}, k) = k == 0 ? A.dv : A.ev -_diag(A::SymTridiagonal, k) = k == 0 ? view(A, diagind(A, IndexStyle(A))) : view(A, diagind(A, 1, IndexStyle(A))) +_diag(A::SymTridiagonal, k) = k == 0 ? A.dv : A.ev function _diag(A::Bidiagonal, k) if k == 0 return A.dv @@ -578,45 +577,12 @@ function _bibimul!(C, A, B, _add) check_A_mul_B!_sizes(size(C), size(A), size(B)) n = size(A,1) iszero(n) && return C - if n <= 3 - # naive multiplication - for I in CartesianIndices(C) - _modify!(_add, sum(A[I[1], k] * B[k, I[2]] for k in axes(A,2)), C, I) - end - return C - end + n <= 3 && return mul!(C, Array(A), Array(B), _add.alpha, _add.beta) # We use `_rmul_or_fill!` instead of `_modify!` here since using # `_modify!` in the following loop will not update the # off-diagonal elements for non-zero beta. _rmul_or_fill!(C, _add.beta) iszero(_add.alpha) && return C - @inbounds begin - # first column of C - C[1,1] += _add(A[1,1]*B[1,1] + A[1, 2]*B[2,1]) - C[2,1] += _add(A[2,1]*B[1,1] + A[2,2]*B[2,1]) - C[3,1] += _add(A[3,2]*B[2,1]) - # second column of C - C[1,2] += _add(A[1,1]*B[1,2] + A[1,2]*B[2,2]) - C[2,2] += _add(A[2,1]*B[1,2] + A[2,2]*B[2,2] + A[2,3]*B[3,2]) - C[3,2] += _add(A[3,2]*B[2,2] + A[3,3]*B[3,2]) - C[4,2] += _add(A[4,3]*B[3,2]) - end # inbounds - # middle columns - __bibimul!(C, A, B, _add) - @inbounds begin - C[n-3,n-1] += _add(A[n-3,n-2]*B[n-2,n-1]) - C[n-2,n-1] += _add(A[n-2,n-2]*B[n-2,n-1] + A[n-2,n-1]*B[n-1,n-1]) - C[n-1,n-1] += _add(A[n-1,n-2]*B[n-2,n-1] + A[n-1,n-1]*B[n-1,n-1] + A[n-1,n]*B[n,n-1]) - C[n, n-1] += _add(A[n,n-1]*B[n-1,n-1] + A[n,n]*B[n,n-1]) - # last column of C - C[n-2, n] += _add(A[n-2,n-1]*B[n-1,n]) - C[n-1, n] += _add(A[n-1,n-1]*B[n-1,n ] + A[n-1,n]*B[n,n ]) - C[n, n] += _add(A[n,n-1]*B[n-1,n ] + A[n,n]*B[n,n ]) - end # inbounds - C -end -function __bibimul!(C, A, B, _add) - n = size(A,1) Al = _diag(A, -1) Ad = _diag(A, 0) Au = _diag(A, 1) @@ -624,198 +590,44 @@ function __bibimul!(C, A, B, _add) Bd = _diag(B, 0) Bu = _diag(B, 1) @inbounds begin + # first row of C + C[1,1] += _add(A[1,1]*B[1,1] + A[1, 2]*B[2, 1]) + C[1,2] += _add(A[1,1]*B[1,2] + A[1,2]*B[2,2]) + C[1,3] += _add(A[1,2]*B[2,3]) + # second row of C + C[2,1] += _add(A[2,1]*B[1,1] + A[2,2]*B[2,1]) + C[2,2] += _add(A[2,1]*B[1,2] + A[2,2]*B[2,2] + A[2,3]*B[3,2]) + C[2,3] += _add(A[2,2]*B[2,3] + A[2,3]*B[3,3]) + C[2,4] += _add(A[2,3]*B[3,4]) for j in 3:n-2 - Aj₋2j₋1 = Au[j-2] - Aj₋1j = Au[j-1] - Ajj₊1 = Au[j] - Aj₋1j₋1 = Ad[j-1] - Ajj = Ad[j] - Aj₊1j₊1 = Ad[j+1] Ajj₋1 = Al[j-1] - Aj₊1j = Al[j] - Aj₊2j₊1 = Al[j+1] + Ajj = Ad[j] + Ajj₊1 = Au[j] + Bj₋1j₋2 = Bl[j-2] + Bj₋1j₋1 = Bd[j-1] Bj₋1j = Bu[j-1] + Bjj₋1 = Bl[j-1] Bjj = Bd[j] + Bjj₊1 = Bu[j] Bj₊1j = Bl[j] - - C[j-2, j] += _add(Aj₋2j₋1*Bj₋1j) - C[j-1, j] += _add(Aj₋1j₋1*Bj₋1j + Aj₋1j*Bjj) - C[j, j] += _add(Ajj₋1*Bj₋1j + Ajj*Bjj + Ajj₊1*Bj₊1j) - C[j+1, j] += _add(Aj₊1j*Bjj + Aj₊1j₊1*Bj₊1j) - C[j+2, j] += _add(Aj₊2j₊1*Bj₊1j) - end - end - C -end -function __bibimul!(C, A, B::Bidiagonal, _add) - n = size(A,1) - Al = _diag(A, -1) - Ad = _diag(A, 0) - Au = _diag(A, 1) - Bd = _diag(B, 0) - if B.uplo == 'U' - Bu = _diag(B, 1) - @inbounds begin - for j in 3:n-2 - Aj₋2j₋1 = Au[j-2] - Aj₋1j = Au[j-1] - Aj₋1j₋1 = Ad[j-1] - Ajj = Ad[j] - Ajj₋1 = Al[j-1] - Aj₊1j = Al[j] - Bj₋1j = Bu[j-1] - Bjj = Bd[j] - - C[j-2, j] += _add(Aj₋2j₋1*Bj₋1j) - C[j-1, j] += _add(Aj₋1j₋1*Bj₋1j + Aj₋1j*Bjj) - C[j, j] += _add(Ajj₋1*Bj₋1j + Ajj*Bjj) - C[j+1, j] += _add(Aj₊1j*Bjj) - end - end - else # B.uplo == 'L' - Bl = _diag(B, -1) - @inbounds begin - for j in 3:n-2 - Aj₋1j = Au[j-1] - Ajj₊1 = Au[j] - Ajj = Ad[j] - Aj₊1j₊1 = Ad[j+1] - Aj₊1j = Al[j] - Aj₊2j₊1 = Al[j+1] - Bjj = Bd[j] - Bj₊1j = Bl[j] - - C[j-1, j] += _add(Aj₋1j*Bjj) - C[j, j] += _add(Ajj*Bjj + Ajj₊1*Bj₊1j) - C[j+1, j] += _add(Aj₊1j*Bjj + Aj₊1j₊1*Bj₊1j) - C[j+2, j] += _add(Aj₊2j₊1*Bj₊1j) - end - end - end - C -end -function __bibimul!(C, A::Bidiagonal, B, _add) - n = size(A,1) - Bl = _diag(B, -1) - Bd = _diag(B, 0) - Bu = _diag(B, 1) - Ad = _diag(A, 0) - if A.uplo == 'U' - Au = _diag(A, 1) - @inbounds begin - for j in 3:n-2 - Aj₋2j₋1 = Au[j-2] - Aj₋1j = Au[j-1] - Ajj₊1 = Au[j] - Aj₋1j₋1 = Ad[j-1] - Ajj = Ad[j] - Aj₊1j₊1 = Ad[j+1] - Bj₋1j = Bu[j-1] - Bjj = Bd[j] - Bj₊1j = Bl[j] - - C[j-2, j] += _add(Aj₋2j₋1*Bj₋1j) - C[j-1, j] += _add(Aj₋1j₋1*Bj₋1j + Aj₋1j*Bjj) - C[j, j] += _add(Ajj*Bjj + Ajj₊1*Bj₊1j) - C[j+1, j] += _add(Aj₊1j₊1*Bj₊1j) - end + Bj₊1j₊1 = Bd[j+1] + Bj₊1j₊2 = Bu[j+1] + C[j,j-2] += _add( Ajj₋1*Bj₋1j₋2) + C[j, j-1] += _add(Ajj₋1*Bj₋1j₋1 + Ajj*Bjj₋1) + C[j, j ] += _add(Ajj₋1*Bj₋1j + Ajj*Bjj + Ajj₊1*Bj₊1j) + C[j, j+1] += _add(Ajj *Bjj₊1 + Ajj₊1*Bj₊1j₊1) + C[j, j+2] += _add(Ajj₊1*Bj₊1j₊2) end - else # A.uplo == 'L' - Al = _diag(A, -1) - @inbounds begin - for j in 3:n-2 - Aj₋1j₋1 = Ad[j-1] - Ajj = Ad[j] - Aj₊1j₊1 = Ad[j+1] - Ajj₋1 = Al[j-1] - Aj₊1j = Al[j] - Aj₊2j₊1 = Al[j+1] - Bj₋1j = Bu[j-1] - Bjj = Bd[j] - Bj₊1j = Bl[j] - - C[j-1, j] += _add(Aj₋1j₋1*Bj₋1j) - C[j, j] += _add(Ajj₋1*Bj₋1j + Ajj*Bjj) - C[j+1, j] += _add(Aj₊1j*Bjj + Aj₊1j₊1*Bj₊1j) - C[j+2, j] += _add(Aj₊2j₊1*Bj₊1j) - end - end - end - C -end -function __bibimul!(C, A::Bidiagonal, B::Bidiagonal, _add) - n = size(A,1) - Ad = _diag(A, 0) - Bd = _diag(B, 0) - if A.uplo == 'U' && B.uplo == 'U' - Au = _diag(A, 1) - Bu = _diag(B, 1) - @inbounds begin - for j in 3:n-2 - Aj₋2j₋1 = Au[j-2] - Aj₋1j = Au[j-1] - Aj₋1j₋1 = Ad[j-1] - Ajj = Ad[j] - Bj₋1j = Bu[j-1] - Bjj = Bd[j] - - C[j-2, j] += _add(Aj₋2j₋1*Bj₋1j) - C[j-1, j] += _add(Aj₋1j₋1*Bj₋1j + Aj₋1j*Bjj) - C[j, j] += _add(Ajj*Bjj) - end - end - elseif A.uplo == 'U' && B.uplo == 'L' - Au = _diag(A, 1) - Bl = _diag(B, -1) - @inbounds begin - for j in 3:n-2 - Aj₋1j = Au[j-1] - Ajj₊1 = Au[j] - Ajj = Ad[j] - Aj₊1j₊1 = Ad[j+1] - Bjj = Bd[j] - Bj₊1j = Bl[j] - - C[j-1, j] += _add(Aj₋1j*Bjj) - C[j, j] += _add(Ajj*Bjj + Ajj₊1*Bj₊1j) - C[j+1, j] += _add(Aj₊1j₊1*Bj₊1j) - end - end - elseif A.uplo == 'L' && B.uplo == 'U' - Al = _diag(A, -1) - Bu = _diag(B, 1) - @inbounds begin - for j in 3:n-2 - Aj₋1j₋1 = Ad[j-1] - Ajj = Ad[j] - Ajj₋1 = Al[j-1] - Aj₊1j = Al[j] - Bj₋1j = Bu[j-1] - Bjj = Bd[j] - - C[j-1, j] += _add(Aj₋1j₋1*Bj₋1j) - C[j, j] += _add(Ajj₋1*Bj₋1j + Ajj*Bjj) - C[j+1, j] += _add(Aj₊1j*Bjj) - end - end - else # A.uplo == 'L' && B.uplo == 'L' - Al = _diag(A, -1) - Bl = _diag(B, -1) - @inbounds begin - for j in 3:n-2 - Ajj = Ad[j] - Aj₊1j₊1 = Ad[j+1] - Aj₊1j = Al[j] - Aj₊2j₊1 = Al[j+1] - Bjj = Bd[j] - Bj₊1j = Bl[j] - - C[j, j] += _add(Ajj*Bjj) - C[j+1, j] += _add(Aj₊1j*Bjj + Aj₊1j₊1*Bj₊1j) - C[j+2, j] += _add(Aj₊2j₊1*Bj₊1j) - end - end - end + # row before last of C + C[n-1,n-3] += _add(A[n-1,n-2]*B[n-2,n-3]) + C[n-1,n-2] += _add(A[n-1,n-1]*B[n-1,n-2] + A[n-1,n-2]*B[n-2,n-2]) + C[n-1,n-1] += _add(A[n-1,n-2]*B[n-2,n-1] + A[n-1,n-1]*B[n-1,n-1] + A[n-1,n]*B[n,n-1]) + C[n-1,n ] += _add(A[n-1,n-1]*B[n-1,n ] + A[n-1, n]*B[n ,n ]) + # last row of C + C[n,n-2] += _add(A[n,n-1]*B[n-1,n-2]) + C[n,n-1] += _add(A[n,n-1]*B[n-1,n-1] + A[n,n]*B[n,n-1]) + C[n,n ] += _add(A[n,n-1]*B[n-1,n ] + A[n,n]*B[n,n ]) + end # inbounds C end @@ -932,52 +744,7 @@ function _mul!(C::AbstractVecOrMat, A::BiTriSym, B::AbstractVecOrMat, _add::MulA nB = size(B,2) (iszero(nA) || iszero(nB)) && return C iszero(_add.alpha) && return _rmul_or_fill!(C, _add.beta) - if nA <= 3 - # naive multiplication - for I in CartesianIndices(C) - col = Base.tail(Tuple(I)) - _modify!(_add, sum(A[I[1], k] * B[k, col...] for k in axes(A,2)), C, I) - end - return C - end - _mul_bitrisym!(C, A, B, _add) -end -function _mul_bitrisym!(C::AbstractVecOrMat, A::Bidiagonal, B::AbstractVecOrMat, _add::MulAddMul) - nA = size(A,1) - nB = size(B,2) - d = A.dv - if A.uplo == 'U' - u = A.ev - @inbounds begin - for j = 1:nB - b₀, b₊ = B[1, j], B[2, j] - _modify!(_add, d[1]*b₀ + u[1]*b₊, C, (1, j)) - for i = 2:nA - 1 - b₀, b₊ = b₊, B[i + 1, j] - _modify!(_add, d[i]*b₀ + u[i]*b₊, C, (i, j)) - end - _modify!(_add, d[nA]*b₊, C, (nA, j)) - end - end - else - l = A.ev - @inbounds begin - for j = 1:nB - b₀, b₊ = B[1, j], B[2, j] - _modify!(_add, d[1]*b₀, C, (1, j)) - for i = 2:nA - 1 - b₋, b₀, b₊ = b₀, b₊, B[i + 1, j] - _modify!(_add, l[i - 1]*b₋ + d[i]*b₀, C, (i, j)) - end - _modify!(_add, l[nA - 1]*b₀ + d[nA]*b₊, C, (nA, j)) - end - end - end - C -end -function _mul_bitrisym!(C::AbstractVecOrMat, A::TriSym, B::AbstractVecOrMat, _add::MulAddMul) - nA = size(A,1) - nB = size(B,2) + nA <= 3 && return mul!(C, Array(A), Array(B), _add.alpha, _add.beta) l = _diag(A, -1) d = _diag(A, 0) u = _diag(A, 1) @@ -1002,9 +769,8 @@ function _mul!(C::AbstractMatrix, A::AbstractMatrix, B::TriSym, _add::MulAddMul) m = size(B,2) (iszero(m) || iszero(n)) && return C iszero(_add.alpha) && return _rmul_or_fill!(C, _add.beta) - if m == 1 - B11 = B[1,1] - return mul!(C, A, B11, _add.alpha, _add.beta) + if n <= 3 || m <= 1 + return mul!(C, Array(A), Array(B), _add.alpha, _add.beta) end Bl = _diag(B, -1) Bd = _diag(B, 0) @@ -1038,18 +804,21 @@ function _mul!(C::AbstractMatrix, A::AbstractMatrix, B::Bidiagonal, _add::MulAdd m, n = size(A) (iszero(m) || iszero(n)) && return C iszero(_add.alpha) && return _rmul_or_fill!(C, _add.beta) + if size(A, 1) <= 3 || size(B, 2) <= 1 + return mul!(C, Array(A), Array(B), _add.alpha, _add.beta) + end @inbounds if B.uplo == 'U' - for j in n:-1:2, i in 1:m - _modify!(_add, A[i,j] * B.dv[j] + A[i,j-1] * B.ev[j-1], C, (i, j)) - end for i in 1:m + for j in n:-1:2 + _modify!(_add, A[i,j] * B.dv[j] + A[i,j-1] * B.ev[j-1], C, (i, j)) + end _modify!(_add, A[i,1] * B.dv[1], C, (i, 1)) end else # uplo == 'L' - for j in 1:n-1, i in 1:m - _modify!(_add, A[i,j] * B.dv[j] + A[i,j+1] * B.ev[j], C, (i, j)) - end for i in 1:m + for j in 1:n-1 + _modify!(_add, A[i,j] * B.dv[j] + A[i,j+1] * B.ev[j], C, (i, j)) + end _modify!(_add, A[i,n] * B.dv[n], C, (i, n)) end end @@ -1065,12 +834,7 @@ function _dibimul!(C, A, B, _add) check_A_mul_B!_sizes(size(C), size(A), size(B)) n = size(A,1) iszero(n) && return C - if n <= 3 - for I in CartesianIndices(C) - _modify!(_add, A.diag[I[1]] * B[I[1], I[2]], C, I) - end - return C - end + n <= 3 && return mul!(C, Array(A), Array(B), _add.alpha, _add.beta) _rmul_or_fill!(C, _add.beta) # see the same use above iszero(_add.alpha) && return C Ad = A.diag diff --git a/stdlib/LinearAlgebra/test/bidiag.jl b/stdlib/LinearAlgebra/test/bidiag.jl index 58c228e39e226..ef50658a642fb 100644 --- a/stdlib/LinearAlgebra/test/bidiag.jl +++ b/stdlib/LinearAlgebra/test/bidiag.jl @@ -1026,71 +1026,26 @@ end @test_throws "cannot set entry" B[1,2] = 4 end -@testset "mul for small matrices" begin - @testset for n in 0:6 - D = Diagonal(rand(n)) - v = rand(n) - @testset for uplo in (:L, :U) - B = Bidiagonal(rand(n), rand(max(n-1,0)), uplo) - M = Matrix(B) - - @test B * v ≈ M * v - @test mul!(similar(v), B, v) ≈ M * v - @test mul!(ones(size(v)), B, v, 2, 3) ≈ M * v * 2 .+ 3 - - @test B * B ≈ M * M - @test mul!(similar(B, size(B)), B, B) ≈ M * M - @test mul!(ones(size(B)), B, B, 2, 4) ≈ M * M * 2 .+ 4 - - for m in 0:6 - AL = rand(m,n) - AR = rand(n,m) - @test AL * B ≈ AL * M - @test B * AR ≈ M * AR - @test mul!(similar(AL), AL, B) ≈ AL * M - @test mul!(similar(AR), B, AR) ≈ M * AR - @test mul!(ones(size(AL)), AL, B, 2, 4) ≈ AL * M * 2 .+ 4 - @test mul!(ones(size(AR)), B, AR, 2, 4) ≈ M * AR * 2 .+ 4 - end - - @test B * D ≈ M * D - @test D * B ≈ D * M - @test mul!(similar(B), B, D) ≈ M * D - @test mul!(similar(B), B, D) ≈ M * D - @test mul!(similar(B, size(B)), D, B) ≈ D * M - @test mul!(similar(B, size(B)), B, D) ≈ M * D - @test mul!(ones(size(B)), D, B, 2, 4) ≈ D * M * 2 .+ 4 - @test mul!(ones(size(B)), B, D, 2, 4) ≈ M * D * 2 .+ 4 - end - BL = Bidiagonal(rand(n), rand(max(0, n-1)), :L) - ML = Matrix(BL) - BU = Bidiagonal(rand(n), rand(max(0, n-1)), :U) - MU = Matrix(BU) - T = Tridiagonal(zeros(max(0, n-1)), zeros(n), zeros(max(0, n-1))) - @test mul!(T, BL, BU) ≈ ML * MU - @test mul!(T, BU, BL) ≈ MU * ML - T = Tridiagonal(ones(max(0, n-1)), ones(n), ones(max(0, n-1))) - @test mul!(copy(T), BL, BU, 2, 3) ≈ ML * MU * 2 + T * 3 - @test mul!(copy(T), BU, BL, 2, 3) ≈ MU * ML * 2 + T * 3 - end - - n = 4 - arr = SizedArrays.SizedArray{(2,2)}(reshape([1:4;],2,2)) - for B in ( - Bidiagonal(fill(arr,n), fill(arr,n-1), :L), - Bidiagonal(fill(arr,n), fill(arr,n-1), :U), - ) - @test B * B ≈ Matrix(B) * Matrix(B) - BL = Bidiagonal(fill(arr,n), fill(arr,n-1), :L) - BU = Bidiagonal(fill(arr,n), fill(arr,n-1), :U) - @test BL * B ≈ Matrix(BL) * Matrix(B) - @test BU * B ≈ Matrix(BU) * Matrix(B) - @test B * BL ≈ Matrix(B) * Matrix(BL) - @test B * BU ≈ Matrix(B) * Matrix(BU) - D = Diagonal(fill(arr,n)) - @test D * B ≈ Matrix(D) * Matrix(B) - @test B * D ≈ Matrix(B) * Matrix(D) - end +@testset "mul with empty arrays" begin + A = zeros(5,0) + B = Bidiagonal(zeros(0), zeros(0), :U) + BL = Bidiagonal(zeros(5), zeros(4), :U) + @test size(A * B) == size(A) + @test size(BL * A) == size(A) + @test size(B * B) == size(B) + C = similar(A) + @test mul!(C, A, B) == A * B + @test mul!(C, BL, A) == BL * A + @test mul!(similar(B), B, B) == B * B + @test mul!(similar(B, size(B)), B, B) == B * B + + v = zeros(size(B,2)) + @test size(B * v) == size(v) + @test mul!(similar(v), B, v) == B * v + + D = Diagonal(zeros(size(B,2))) + @test size(B * D) == size(D * B) == size(D) + @test mul!(similar(D), B, D) == mul!(similar(D), D, B) == B * D end end # module TestBidiagonal diff --git a/stdlib/LinearAlgebra/test/tridiag.jl b/stdlib/LinearAlgebra/test/tridiag.jl index 15ac7f9f2147f..3330fa682fe5e 100644 --- a/stdlib/LinearAlgebra/test/tridiag.jl +++ b/stdlib/LinearAlgebra/test/tridiag.jl @@ -970,75 +970,4 @@ end @test sprint(show, S) == "SymTridiagonal($(repr(diag(S))), $(repr(diag(S,1))))" end -@testset "mul for small matrices" begin - @testset for n in 0:6 - for T in ( - Tridiagonal(rand(max(n-1,0)), rand(n), rand(max(n-1,0))), - SymTridiagonal(rand(n), rand(max(n-1,0))), - ) - M = Matrix(T) - @test T * T ≈ M * M - @test mul!(similar(T, size(T)), T, T) ≈ M * M - @test mul!(ones(size(T)), T, T, 2, 4) ≈ M * M * 2 .+ 4 - - for m in 0:6 - AR = rand(n,m) - AL = rand(m,n) - @test AL * T ≈ AL * M - @test T * AR ≈ M * AR - @test mul!(similar(AL), AL, T) ≈ AL * M - @test mul!(similar(AR), T, AR) ≈ M * AR - @test mul!(ones(size(AL)), AL, T, 2, 4) ≈ AL * M * 2 .+ 4 - @test mul!(ones(size(AR)), T, AR, 2, 4) ≈ M * AR * 2 .+ 4 - end - - v = rand(n) - @test T * v ≈ M * v - @test mul!(similar(v), T, v) ≈ M * v - - D = Diagonal(rand(n)) - @test T * D ≈ M * D - @test D * T ≈ D * M - @test mul!(Tridiagonal(similar(T)), D, T) ≈ D * M - @test mul!(Tridiagonal(similar(T)), T, D) ≈ M * D - @test mul!(similar(T, size(T)), D, T) ≈ D * M - @test mul!(similar(T, size(T)), T, D) ≈ M * D - @test mul!(ones(size(T)), D, T, 2, 4) ≈ D * M * 2 .+ 4 - @test mul!(ones(size(T)), T, D, 2, 4) ≈ M * D * 2 .+ 4 - - for uplo in (:U, :L) - B = Bidiagonal(rand(n), rand(max(0, n-1)), uplo) - @test T * B ≈ M * B - @test B * T ≈ B * M - if n <= 2 - @test mul!(Tridiagonal(similar(T)), B, T) ≈ B * M - @test mul!(Tridiagonal(similar(T)), T, B) ≈ M * B - end - @test mul!(similar(T, size(T)), B, T) ≈ B * M - @test mul!(similar(T, size(T)), T, B) ≈ M * B - @test mul!(ones(size(T)), B, T, 2, 4) ≈ B * M * 2 .+ 4 - @test mul!(ones(size(T)), T, B, 2, 4) ≈ M * B * 2 .+ 4 - end - end - end - - n = 4 - arr = SizedArrays.SizedArray{(2,2)}(reshape([1:4;],2,2)) - for T in ( - SymTridiagonal(fill(arr,n), fill(arr,n-1)), - Tridiagonal(fill(arr,n-1), fill(arr,n), fill(arr,n-1)), - ) - @test T * T ≈ Matrix(T) * Matrix(T) - BL = Bidiagonal(fill(arr,n), fill(arr,n-1), :L) - BU = Bidiagonal(fill(arr,n), fill(arr,n-1), :U) - @test BL * T ≈ Matrix(BL) * Matrix(T) - @test BU * T ≈ Matrix(BU) * Matrix(T) - @test T * BL ≈ Matrix(T) * Matrix(BL) - @test T * BU ≈ Matrix(T) * Matrix(BU) - D = Diagonal(fill(arr,n)) - @test D * T ≈ Matrix(D) * Matrix(T) - @test T * D ≈ Matrix(T) * Matrix(D) - end -end - end # module TestTridiagonal From 243bdede3d686b1eaa634db190f2bab3e5f4de72 Mon Sep 17 00:00:00 2001 From: Ian Butterworth Date: Sat, 14 Sep 2024 12:22:31 -0400 Subject: [PATCH 025/537] Add a docs section about loading/precomp/ttfx time tuning (#55569) --- doc/src/manual/performance-tips.md | 119 +++++++++++++++++++++++++++++ 1 file changed, 119 insertions(+) diff --git a/doc/src/manual/performance-tips.md b/doc/src/manual/performance-tips.md index 38e27476f0af8..436d58f54754a 100644 --- a/doc/src/manual/performance-tips.md +++ b/doc/src/manual/performance-tips.md @@ -1394,6 +1394,125 @@ Prominent examples include [MKL.jl](https://github.com/JuliaLinearAlgebra/MKL.jl These are external packages, so we will not discuss them in detail here. Please refer to their respective documentations (especially because they have different behaviors than OpenBLAS with respect to multithreading). +## Execution latency, package loading and package precompiling time + +### Reducing time to first plot etc. + +The first time a julia method is called it (and any methods it calls, or ones that can be statically determined) will be +compiled. The [`@time`](@ref) macro family illustrates this. + +``` +julia> foo() = rand(2,2) * rand(2,2) +foo (generic function with 1 method) + +julia> @time @eval foo(); + 0.252395 seconds (1.12 M allocations: 56.178 MiB, 2.93% gc time, 98.12% compilation time) + +julia> @time @eval foo(); + 0.000156 seconds (63 allocations: 2.453 KiB) +``` + +Note that `@time @eval` is better for measuring compilation time because without [`@eval`](@ref), some compilation may +already be done before timing starts. + +When developing a package, you may be able to improve the experience of your users with *precompilation* +so that when they use the package, the code they use is already compiled. To precompile package code effectively, it's +recommended to use [`PrecompileTools.jl`](https://julialang.github.io/PrecompileTools.jl/stable/) to run a +"precompile workload" during precompilation time that is representative of typical package usage, which will cache the +native compiled code into the package `pkgimage` cache, greatly reducing "time to first execution" (often referred to as +TTFX) for such usage. + +Note that [`PrecompileTools.jl`](https://julialang.github.io/PrecompileTools.jl/stable/) workloads can be +disabled and sometimes configured via Preferences if you do not want to spend the extra time precompiling, which +may be the case during development of a package. + +### Reducing package loading time + +Keeping the time taken to load the package down is usually helpful. +General good practice for package developers includes: + +1. Reduce your dependencies to those you really need. Consider using [package extensions](@ref) to support interoperability with other packages without bloating your essential dependencies. +3. Avoid use of [`__init__()`](@ref) functions unless there is no alternative, especially those which might trigger a lot + of compilation, or just take a long time to execute. +4. Where possible, fix [invalidations](https://julialang.org/blog/2020/08/invalidations/) among your dependencies and from your package code. + +The tool [`@time_imports`](@ref) can be useful in the REPL to review the above factors. + +```julia-repl +julia> @time @time_imports using Plots + 0.5 ms Printf + 16.4 ms Dates + 0.7 ms Statistics + ┌ 23.8 ms SuiteSparse_jll.__init__() 86.11% compilation time (100% recompilation) + 90.1 ms SuiteSparse_jll 91.57% compilation time (82% recompilation) + 0.9 ms Serialization + ┌ 39.8 ms SparseArrays.CHOLMOD.__init__() 99.47% compilation time (100% recompilation) + 166.9 ms SparseArrays 23.74% compilation time (100% recompilation) + 0.4 ms Statistics → SparseArraysExt + 0.5 ms TOML + 8.0 ms Preferences + 0.3 ms PrecompileTools + 0.2 ms Reexport +... many deps omitted for example ... + 1.4 ms Tar + ┌ 73.8 ms p7zip_jll.__init__() 99.93% compilation time (100% recompilation) + 79.4 ms p7zip_jll 92.91% compilation time (100% recompilation) + ┌ 27.7 ms GR.GRPreferences.__init__() 99.77% compilation time (100% recompilation) + 43.0 ms GR 64.26% compilation time (100% recompilation) + ┌ 2.1 ms Plots.__init__() 91.80% compilation time (100% recompilation) + 300.9 ms Plots 0.65% compilation time (100% recompilation) + 1.795602 seconds (3.33 M allocations: 190.153 MiB, 7.91% gc time, 39.45% compilation time: 97% of which was recompilation) + +``` + +Notice that in this example there are multiple packages loaded, some with `__init__()` functions, some of which cause +compilation of which some is recompilation. Recompilation is caused by earlier packages invalidating methods, then in +these cases when the following packages run their `__init__()` function some hit recompilation before the code can be run. + +Further, note the `Statistics` extension `SparseArraysExt` has been activated because `SparseArrays` is in the dependency +tree. i.e. see `0.4 ms Statistics → SparseArraysExt`. + +This report gives a good opportunity to review whether the cost of dependency load time is worth the functionality it brings. +Also the `Pkg` utility `why` can be used to report why a an indirect dependency exists. + +``` +(CustomPackage) pkg> why FFMPEG_jll + Plots → FFMPEG → FFMPEG_jll + Plots → GR → GR_jll → FFMPEG_jll +``` + +or to see the indirect dependencies that a package brings in, you can `pkg> rm` the package, see the deps that are removed +from the manifest, then revert the change with `pkg> undo`. + +If loading time is dominated by slow `__init__()` methods having compilation, one verbose way to identify what is being +compiled is to use the julia args `--trace-compile=stderr --trace-compile-timing` which will report a [`precompile`](@ref) +statement each time a method is compiled, along with how long compilation took. For instance, the full setup would be: + +``` +$ julia --startup-file=no --trace-compile=stderr --trace-compile-timing +julia> @time @time_imports using CustomPackage +... +``` + +Note the `--startup-file=no` which helps isolate the test from packages you may have in your `startup.jl`. + +More analysis of the reasons for recompilation can be achieved with the +[`SnoopCompile`](https://github.com/timholy/SnoopCompile.jl) package. + +### Reducing precompilation time + +If package precompilation is taking a long time, one option is to set the following internal and then precompile. +``` +julia> Base.PRECOMPILE_TRACE_COMPILE[] = "stderr" + +pkg> precompile +``` + +This has the effect of setting `--trace-compile=stderr --trace-compile-timing` in the precompilation processes themselves, +so will show which methods are precompiled and how long they took to precompile. + +There are also profiling options such as [using the external profiler Tracy to profile the precompilation process](@ref Profiling-package-precompilation-with-Tracy). + ## Miscellaneous From 346f38bceabf3dab1d3912fe822a663735c91d4a Mon Sep 17 00:00:00 2001 From: Lilith Orion Hafner Date: Sat, 14 Sep 2024 14:40:10 -0500 Subject: [PATCH 026/537] Add compat entry for `Base.donotdelete` (#55773) --- base/docs/basedocs.jl | 3 +++ 1 file changed, 3 insertions(+) diff --git a/base/docs/basedocs.jl b/base/docs/basedocs.jl index e03d0db78f29f..0fc253bd73d1c 100644 --- a/base/docs/basedocs.jl +++ b/base/docs/basedocs.jl @@ -3712,6 +3712,9 @@ unused and delete the entire benchmark code). which the value of the arguments of this intrinsic were available (in a register, in memory, etc.). +!!! compat "Julia 1.8" + This method was added in Julia 1.8. + # Examples ```julia From f4fb87b0f9c3d8e5bcc9901701acd81534789293 Mon Sep 17 00:00:00 2001 From: Ian Butterworth Date: Sun, 15 Sep 2024 07:12:12 -0400 Subject: [PATCH 027/537] REPL: precompile in its own module because Main is closed. Add check for unexpected errors. (#55759) --- stdlib/REPL/src/REPL.jl | 4 ++-- stdlib/REPL/src/precompile.jl | 27 ++++++++++++++++++++++----- 2 files changed, 24 insertions(+), 7 deletions(-) diff --git a/stdlib/REPL/src/REPL.jl b/stdlib/REPL/src/REPL.jl index ddf2f55d0b9f7..44fe0446240c6 100644 --- a/stdlib/REPL/src/REPL.jl +++ b/stdlib/REPL/src/REPL.jl @@ -758,11 +758,11 @@ setmodifiers!(c::REPLCompletionProvider, m::LineEdit.Modifiers) = c.modifiers = Set `mod` as the default contextual module in the REPL, both for evaluating expressions and printing them. """ -function activate(mod::Module=Main) +function activate(mod::Module=Main; interactive_utils::Bool=true) mistate = (Base.active_repl::LineEditREPL).mistate mistate === nothing && return nothing mistate.active_module = mod - Base.load_InteractiveUtils(mod) + interactive_utils && Base.load_InteractiveUtils(mod) return nothing end diff --git a/stdlib/REPL/src/precompile.jl b/stdlib/REPL/src/precompile.jl index 82a1a0bb78ee8..c42def9078759 100644 --- a/stdlib/REPL/src/precompile.jl +++ b/stdlib/REPL/src/precompile.jl @@ -14,6 +14,19 @@ finally end let + # these are intentionally triggered + allowed_errors = [ + "BoundsError: attempt to access 0-element Vector{Any} at index [1]", + "MethodError: no method matching f(::$Int, ::$Int)", + "Padding of type", # reinterpret docstring has ERROR examples + ] + function check_errors(out) + str = String(out) + if occursin("ERROR:", str) && !any(occursin(e, str) for e in allowed_errors) + @error "Unexpected error (Review REPL precompilation with debug_output on):\n$str" + exit(1) + end + end ## Debugging options # View the code sent to the repl by setting this to `stdout` debug_output = devnull # or stdout @@ -25,6 +38,8 @@ let DOWN_ARROW = "\e[B" repl_script = """ + import REPL + REPL.activate(REPL.Precompile; interactive_utils=false) # Main is closed so we can't evaluate in it 2+2 print("") printstyled("a", "b") @@ -47,6 +62,7 @@ let [][1] Base.Iterators.minimum cd("complete_path\t\t$CTRL_C + REPL.activate(; interactive_utils=false) println("done") """ @@ -113,10 +129,10 @@ let end schedule(repltask) # wait for the definitive prompt before start writing to the TTY - readuntil(output_copy, JULIA_PROMPT) + check_errors(readuntil(output_copy, JULIA_PROMPT)) write(debug_output, "\n#### REPL STARTED ####\n") sleep(0.1) - readavailable(output_copy) + check_errors(readavailable(output_copy)) # Input our script precompile_lines = split(repl_script::String, '\n'; keepempty=false) curr = 0 @@ -124,16 +140,16 @@ let sleep(0.1) curr += 1 # consume any other output - bytesavailable(output_copy) > 0 && readavailable(output_copy) + bytesavailable(output_copy) > 0 && check_errors(readavailable(output_copy)) # push our input write(debug_output, "\n#### inputting statement: ####\n$(repr(l))\n####\n") # If the line ends with a CTRL_C, don't write an extra newline, which would # cause a second empty prompt. Our code below expects one new prompt per # input line and can race out of sync with the unexpected second line. endswith(l, CTRL_C) ? write(ptm, l) : write(ptm, l, "\n") - readuntil(output_copy, "\n") + check_errors(readuntil(output_copy, "\n")) # wait for the next prompt-like to appear - readuntil(output_copy, "\n") + check_errors(readuntil(output_copy, "\n")) strbuf = "" while !eof(output_copy) strbuf *= String(readavailable(output_copy)) @@ -143,6 +159,7 @@ let occursin(HELP_PROMPT, strbuf) && break sleep(0.1) end + check_errors(strbuf) end write(debug_output, "\n#### COMPLETED - Closing REPL ####\n") write(ptm, "$CTRL_D") From 4633607ce9b9f077f32f89f09a136e04389bbac2 Mon Sep 17 00:00:00 2001 From: Lilith Orion Hafner Date: Sun, 15 Sep 2024 07:11:22 -0500 Subject: [PATCH 028/537] Try to put back previously flakey addmul tests (#55775) Partial revert of #50071, inspired by conversation in https://github.com/JuliaLang/julia/issues/49966#issuecomment-2350935477 Ran the tests 100 times to make sure we're not putting back something that's still flaky. Closes #49966 --- stdlib/LinearAlgebra/test/addmul.jl | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/stdlib/LinearAlgebra/test/addmul.jl b/stdlib/LinearAlgebra/test/addmul.jl index 3fff8289242f7..72fdf687bf5c3 100644 --- a/stdlib/LinearAlgebra/test/addmul.jl +++ b/stdlib/LinearAlgebra/test/addmul.jl @@ -164,8 +164,7 @@ end Bc = Matrix(B) returned_mat = mul!(C, A, B, α, β) @test returned_mat === C - # This test is skipped because it is flakey, but should be fixed and put back (see #49966) - @test_skip collect(returned_mat) ≈ α * Ac * Bc + β * Cc rtol=rtol + @test collect(returned_mat) ≈ α * Ac * Bc + β * Cc rtol=rtol y = C[:, 1] x = B[:, 1] @@ -190,8 +189,7 @@ end returned_mat = mul!(C, Af, Bf, α, β) @test returned_mat === C - # This test is skipped because it is flakey, but should be fixed and put back (see #49966) - @test_skip collect(returned_mat) ≈ α * Ac * Bc + β * Cc rtol=rtol + @test collect(returned_mat) ≈ α * Ac * Bc + β * Cc rtol=rtol end end end @@ -203,8 +201,7 @@ end Bc = Matrix(B) returned_mat = mul!(C, A, B, α, zero(eltype(C))) @test returned_mat === C - # This test is skipped because it is flakey, but should be fixed and put back (see #49966) - @test_skip collect(returned_mat) ≈ α * Ac * Bc rtol=rtol + @test collect(returned_mat) ≈ α * Ac * Bc rtol=rtol end end From a993cd8f81a6bc02a88ee5cf036f4e29c36d5580 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mos=C3=A8=20Giordano?= <765740+giordano@users.noreply.github.com> Date: Sun, 15 Sep 2024 21:24:47 +0100 Subject: [PATCH 029/537] Print results of `runtests` with `printstyled` (#55780) This ensures escape characters are used only if `stdout` can accept them. --- test/runtests.jl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/runtests.jl b/test/runtests.jl index c46472ac93fa8..e48e896f4069e 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -438,9 +438,9 @@ cd(@__DIR__) do # o_ts.verbose = true # set to true to show all timings when successful Test.print_test_results(o_ts, 1) if !o_ts.anynonpass - println(" \033[32;1mSUCCESS\033[0m") + printstyled(" SUCCESS\n"; bold=true, color=:green) else - println(" \033[31;1mFAILURE\033[0m\n") + printstyled(" FAILURE\n\n"; bold=true, color=:red) skipped > 0 && println("$skipped test", skipped > 1 ? "s were" : " was", " skipped due to failure.") println("The global RNG seed was 0x$(string(seed, base = 16)).\n") From 55c40ce52eb5c249efdff421101190b2a111d541 Mon Sep 17 00:00:00 2001 From: Jeff Bezanson Date: Mon, 16 Sep 2024 13:25:20 -0400 Subject: [PATCH 030/537] move null check in `unsafe_convert` of RefValue (#55766) LLVM can optimize out this check but our optimizer can't, so this leads to smaller IR in most cases. --- base/refvalue.jl | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/base/refvalue.jl b/base/refvalue.jl index 000088ff0ce76..7a0f2f84e2206 100644 --- a/base/refvalue.jl +++ b/base/refvalue.jl @@ -46,9 +46,9 @@ function unsafe_convert(P::Union{Type{Ptr{T}},Type{Ptr{Cvoid}}}, b::RefValue{T}) # Instead, explicitly load the pointer from the `RefValue`, # which also ensures this returns same pointer as the one rooted in the `RefValue` object. p = atomic_pointerref(Ptr{Ptr{Cvoid}}(pointer_from_objref(b)), :monotonic) - end - if p == C_NULL - throw(UndefRefError()) + if p == C_NULL + throw(UndefRefError()) + end end return p end From 753296e89ddc484e54937ce7195a3f152fd5a14a Mon Sep 17 00:00:00 2001 From: Gabriel Baraldi Date: Mon, 16 Sep 2024 15:58:40 -0300 Subject: [PATCH 031/537] Fix hang in tmerge_types_slow (#55757) Fixes https://github.com/JuliaLang/julia/issues/55751 Co-authored-by: Jameson Nash --- base/compiler/typelimits.jl | 1 + test/compiler/inference.jl | 11 +++++++++++ 2 files changed, 12 insertions(+) diff --git a/base/compiler/typelimits.jl b/base/compiler/typelimits.jl index 91a44d3b117ab..3d0e5f3d0877d 100644 --- a/base/compiler/typelimits.jl +++ b/base/compiler/typelimits.jl @@ -831,6 +831,7 @@ end typenames[i] = Any.name simplify[i] = false types[j] = widen + typenames[j] = ijname break end end diff --git a/test/compiler/inference.jl b/test/compiler/inference.jl index 9454c53a09fb7..d1382d3c84b82 100644 --- a/test/compiler/inference.jl +++ b/test/compiler/inference.jl @@ -6141,3 +6141,14 @@ end == TypeError @test Base.infer_exception_type((Vector{Any},)) do args Core.throw_methoderror(args...) end == Union{MethodError,ArgumentError} + +# Issue https://github.com/JuliaLang/julia/issues/55751 + +abstract type AbstractGrid55751{T, N} <: AbstractArray{T, N} end +struct Grid55751{T, N, AT} <: AbstractGrid55751{T, N} + axes::AT +end + +t155751 = Union{AbstractArray{UInt8, 4}, Array{Float32, 4}, Grid55751{Float32, 3, _A} where _A} +t255751 = Array{Float32, 3} +@test Core.Compiler.tmerge_types_slow(t155751,t255751) == AbstractArray # shouldn't hang From 5aad7617c3ac49155d03748a451f56215b28dec4 Mon Sep 17 00:00:00 2001 From: Ian Butterworth Date: Mon, 16 Sep 2024 15:08:57 -0400 Subject: [PATCH 032/537] trace-compile: color recompilation yellow (#55763) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Marks recompilation of a method that produced a `precompile` statement as yellow, or if color isn't supported adds a trailing comment: `# recompilation`. The coloring matches the `@time_imports` coloring. i.e. an excerpt of ``` % ./julia --start=no --trace-compile=stderr --trace-compile-timing -e "using InteractiveUtils; @time @time_imports using Plots" ``` ![Screenshot 2024-09-13 at 5 04 24 PM](https://github.com/user-attachments/assets/85bd99e0-586e-4070-994f-2d845be0d9e7) --- NEWS.md | 1 + doc/man/julia.1 | 3 ++- doc/src/manual/command-line-interface.md | 2 +- src/gf.c | 21 ++++++++++++++++----- src/jloptions.c | 4 +++- 5 files changed, 23 insertions(+), 8 deletions(-) diff --git a/NEWS.md b/NEWS.md index c12cc3c64300c..9ecdd87f0c2bb 100644 --- a/NEWS.md +++ b/NEWS.md @@ -56,6 +56,7 @@ variables. ([#53742]). * `--project=@temp` starts Julia with a temporary environment. * New `--trace-compile-timing` option to report how long each method reported by `--trace-compile` took to compile, in ms. ([#54662]) +* `--trace-compile` now prints recompiled methods in yellow or with a trailing comment if color is not supported ([#55763]) Multi-threading changes ----------------------- diff --git a/doc/man/julia.1 b/doc/man/julia.1 index ebac4362b39a6..536a23bd37894 100644 --- a/doc/man/julia.1 +++ b/doc/man/julia.1 @@ -283,7 +283,8 @@ Generate an incremental output file (rather than complete) .TP --trace-compile={stderr|name} -Print precompile statements for methods compiled during execution or save to a path +Print precompile statements for methods compiled during execution or save to stderr or a path. +Methods that were recompiled are printed in yellow or with a trailing comment if color is not supported .TP --trace-compile-timing= diff --git a/doc/src/manual/command-line-interface.md b/doc/src/manual/command-line-interface.md index d1ed576c42a4f..41c3eacd61d26 100644 --- a/doc/src/manual/command-line-interface.md +++ b/doc/src/manual/command-line-interface.md @@ -214,7 +214,7 @@ The following is a complete list of command-line switches available when launchi |`--output-bc ` |Generate LLVM bitcode (.bc)| |`--output-asm ` |Generate an assembly file (.s)| |`--output-incremental={yes\|no*}` |Generate an incremental output file (rather than complete)| -|`--trace-compile={stderr\|name}` |Print precompile statements for methods compiled during execution or save to a path| +|`--trace-compile={stderr\|name}` |Print precompile statements for methods compiled during execution or save to stderr or a path. Methods that were recompiled are printed in yellow or with a trailing comment if color is not supported| |`--trace-compile-timing` |If --trace-compile is enabled show how long each took to compile in ms| |`--image-codegen` |Force generate code in imaging mode| |`--permalloc-pkgimg={yes\|no*}` |Copy the data section of package images into memory| diff --git a/src/gf.c b/src/gf.c index 970cb62b8a862..e6f5b4ee007f7 100644 --- a/src/gf.c +++ b/src/gf.c @@ -2514,7 +2514,7 @@ jl_code_instance_t *jl_method_inferred_with_abi(jl_method_instance_t *mi JL_PROP jl_mutex_t precomp_statement_out_lock; -static void record_precompile_statement(jl_method_instance_t *mi, double compilation_time) +static void record_precompile_statement(jl_method_instance_t *mi, double compilation_time, int is_recompile) { static ios_t f_precompile; static JL_STREAM* s_precompile = NULL; @@ -2539,11 +2539,22 @@ static void record_precompile_statement(jl_method_instance_t *mi, double compila } } if (!jl_has_free_typevars(mi->specTypes)) { + if (is_recompile && s_precompile == JL_STDERR && jl_options.color != JL_OPTIONS_COLOR_OFF) + jl_printf(s_precompile, "\e[33m"); if (jl_options.trace_compile_timing) jl_printf(s_precompile, "#= %6.1f ms =# ", compilation_time / 1e6); jl_printf(s_precompile, "precompile("); jl_static_show(s_precompile, mi->specTypes); - jl_printf(s_precompile, ")\n"); + jl_printf(s_precompile, ")"); + if (is_recompile) { + if (s_precompile == JL_STDERR && jl_options.color != JL_OPTIONS_COLOR_OFF) { + jl_printf(s_precompile, "\e[0m"); + } + else { + jl_printf(s_precompile, " # recompile"); + } + } + jl_printf(s_precompile, "\n"); if (s_precompile != JL_STDERR) ios_flush(&f_precompile); } @@ -2674,7 +2685,7 @@ jl_code_instance_t *jl_compile_method_internal(jl_method_instance_t *mi, size_t // unspec is probably not specsig, but might be using specptr jl_atomic_store_relaxed(&codeinst->specsigflags, specsigflags & ~0b1); // clear specsig flag jl_mi_cache_insert(mi, codeinst); - record_precompile_statement(mi, 0); + record_precompile_statement(mi, 0, 0); return codeinst; } } @@ -2691,7 +2702,7 @@ jl_code_instance_t *jl_compile_method_internal(jl_method_instance_t *mi, size_t 0, 1, ~(size_t)0, 0, jl_nothing, 0, NULL); jl_atomic_store_release(&codeinst->invoke, jl_fptr_interpret_call); jl_mi_cache_insert(mi, codeinst); - record_precompile_statement(mi, 0); + record_precompile_statement(mi, 0, 0); return codeinst; } if (compile_option == JL_OPTIONS_COMPILE_OFF) { @@ -2740,7 +2751,7 @@ jl_code_instance_t *jl_compile_method_internal(jl_method_instance_t *mi, size_t codeinst = NULL; } else if (did_compile && codeinst->owner == jl_nothing) { - record_precompile_statement(mi, compile_time); + record_precompile_statement(mi, compile_time, is_recompile); } JL_GC_POP(); } diff --git a/src/jloptions.c b/src/jloptions.c index 4cdec2c7b367f..f63f4de020e26 100644 --- a/src/jloptions.c +++ b/src/jloptions.c @@ -258,7 +258,9 @@ static const char opts_hidden[] = " --output-incremental={yes|no*} Generate an incremental output file (rather than\n" " complete)\n" " --trace-compile={stderr|name} Print precompile statements for methods compiled\n" - " during execution or save to a path\n" + " during execution or save to stderr or a path. Methods that\n" + " were recompiled are printed in yellow or with a trailing\n" + " comment if color is not supported\n" " --trace-compile-timing If --trace-compile is enabled show how long each took to\n" " compile in ms\n" " --image-codegen Force generate code in imaging mode\n" From 02549d5c54ec8ac8c7e62ea470803350bb3d1899 Mon Sep 17 00:00:00 2001 From: Ian Butterworth Date: Tue, 17 Sep 2024 04:05:24 -0400 Subject: [PATCH 033/537] Use PrecompileTools mechanics to compile REPL (#55782) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fixes https://github.com/JuliaLang/julia/issues/55778 Based on discussion here https://github.com/JuliaLang/julia/issues/55778#issuecomment-2352428043 With this `?reinterpret` feels instant, with only these precompiles at the start. ![Screenshot 2024-09-16 at 9 49 39 AM](https://github.com/user-attachments/assets/20dc016d-c6f7-4870-acd7-0e795dcf541b) --- stdlib/REPL/src/precompile.jl | 39 ++++++++++++++++++++++++++++++----- 1 file changed, 34 insertions(+), 5 deletions(-) diff --git a/stdlib/REPL/src/precompile.jl b/stdlib/REPL/src/precompile.jl index c42def9078759..6bcec6415ba96 100644 --- a/stdlib/REPL/src/precompile.jl +++ b/stdlib/REPL/src/precompile.jl @@ -13,7 +13,7 @@ finally Base._track_dependencies[] = true end -let +function repl_workload() # these are intentionally triggered allowed_errors = [ "BoundsError: attempt to access 0-element Vector{Any} at index [1]", @@ -175,9 +175,38 @@ let nothing end -precompile(Tuple{typeof(Base.setindex!), Base.Dict{Any, Any}, Any, Int}) -precompile(Tuple{typeof(Base.delete!), Base.Set{Any}, String}) -precompile(Tuple{typeof(Base.:(==)), Char, String}) -precompile(Tuple{typeof(Base.reseteof), Base.TTY}) +# Copied from PrecompileTools.jl +let + function check_edges(node) + parentmi = node.mi_info.mi + for child in node.children + childmi = child.mi_info.mi + if !(isdefined(childmi, :backedges) && parentmi ∈ childmi.backedges) + precompile(childmi.specTypes) + end + check_edges(child) + end + end + + if Base.generating_output() && Base.JLOptions().use_pkgimages != 0 + Core.Compiler.Timings.reset_timings() + Core.Compiler.__set_measure_typeinf(true) + try + repl_workload() + finally + Core.Compiler.__set_measure_typeinf(false) + Core.Compiler.Timings.close_current_timer() + end + roots = Core.Compiler.Timings._timings[1].children + for child in roots + precompile(child.mi_info.mi.specTypes) + check_edges(child) + end + precompile(Tuple{typeof(Base.setindex!), Base.Dict{Any, Any}, Any, Int}) + precompile(Tuple{typeof(Base.delete!), Base.Set{Any}, String}) + precompile(Tuple{typeof(Base.:(==)), Char, String}) + precompile(Tuple{typeof(Base.reseteof), Base.TTY}) + end +end end # Precompile From f8086062906d80ef56c0dcd595a13438ef028293 Mon Sep 17 00:00:00 2001 From: Shuhei Kadowaki <40514306+aviatesk@users.noreply.github.com> Date: Wed, 18 Sep 2024 00:41:52 +0900 Subject: [PATCH 034/537] use `inferencebarrier` instead of `invokelatest` for 1-arg `@assert` (#55783) This version would be better as per this comment: I confirmed this still allows us to avoid invalidations reported at JuliaLang/julia#55583. --- base/error.jl | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/base/error.jl b/base/error.jl index ee533cee0b57d..c49ede624607d 100644 --- a/base/error.jl +++ b/base/error.jl @@ -232,12 +232,14 @@ macro assert(ex, msgs...) msg = msg # pass-through elseif !isempty(msgs) && (isa(msg, Expr) || isa(msg, Symbol)) # message is an expression needing evaluating - msg = :(Main.Base.invokelatest(Main.Base.string, $(esc(msg)))) + # N.B. To reduce the risk of invalidation caused by the complex callstack involved + # with `string`, use `inferencebarrier` here to hide this `string` from the compiler. + msg = :(Main.Base.inferencebarrier(Main.Base.string)($(esc(msg)))) elseif isdefined(Main, :Base) && isdefined(Main.Base, :string) && applicable(Main.Base.string, msg) msg = Main.Base.string(msg) else # string() might not be defined during bootstrap - msg = :(Main.Base.invokelatest(_assert_tostring, $(Expr(:quote,msg)))) + msg = :(Main.Base.inferencebarrier(_assert_tostring)($(Expr(:quote,msg)))) end return :($(esc(ex)) ? $(nothing) : throw(AssertionError($msg))) end From 61c044ca4fe35ed357c77fc50d9a8cf70f6724b0 Mon Sep 17 00:00:00 2001 From: Gabriel Baraldi Date: Tue, 17 Sep 2024 13:18:44 -0300 Subject: [PATCH 035/537] Inline statically known method errors. (#54972) This replaces the `Expr(:call, ...)` with a call of a new builtin `Core.throw_methoderror` This is useful because it makes very clear if something is a static method error or a plain dynamic dispatch that always errors. Tools such as AllocCheck or juliac can notice that this is not a genuine dynamic dispatch, and prevent it from becoming a false positive compile-time error. Dependent on https://github.com/JuliaLang/julia/pull/55705 --------- Co-authored-by: Cody Tapscott --- base/compiler/abstractinterpretation.jl | 41 ++++++++------- base/compiler/ssair/inlining.jl | 52 +++++++++++-------- base/compiler/stmtinfo.jl | 17 ++++-- base/compiler/tfuncs.jl | 7 ++- base/compiler/types.jl | 6 +++ test/compiler/AbstractInterpreter.jl | 9 ++++ .../compiler/EscapeAnalysis/EscapeAnalysis.jl | 4 +- test/compiler/inline.jl | 39 +++++++++----- test/threads_exec.jl | 1 + 9 files changed, 111 insertions(+), 65 deletions(-) diff --git a/base/compiler/abstractinterpretation.jl b/base/compiler/abstractinterpretation.jl index bb5f2dd1ad180..f126389c42d2d 100644 --- a/base/compiler/abstractinterpretation.jl +++ b/base/compiler/abstractinterpretation.jl @@ -209,8 +209,7 @@ function abstract_call_gf_by_type(interp::AbstractInterpreter, @nospecialize(f), rettype = exctype = Any all_effects = Effects() else - if (matches isa MethodMatches ? (!matches.fullmatch || any_ambig(matches)) : - (!all(matches.fullmatches) || any_ambig(matches))) + if !fully_covering(matches) || any_ambig(matches) # Account for the fact that we may encounter a MethodError with a non-covered or ambiguous signature. all_effects = Effects(all_effects; nothrow=false) exctype = exctype ⊔ₚ MethodError @@ -275,21 +274,23 @@ struct MethodMatches applicable::Vector{Any} info::MethodMatchInfo valid_worlds::WorldRange - mt::MethodTable - fullmatch::Bool end -any_ambig(info::MethodMatchInfo) = info.results.ambig +any_ambig(result::MethodLookupResult) = result.ambig +any_ambig(info::MethodMatchInfo) = any_ambig(info.results) any_ambig(m::MethodMatches) = any_ambig(m.info) +fully_covering(info::MethodMatchInfo) = info.fullmatch +fully_covering(m::MethodMatches) = fully_covering(m.info) struct UnionSplitMethodMatches applicable::Vector{Any} applicable_argtypes::Vector{Vector{Any}} info::UnionSplitInfo valid_worlds::WorldRange - mts::Vector{MethodTable} - fullmatches::Vector{Bool} end -any_ambig(m::UnionSplitMethodMatches) = any(any_ambig, m.info.matches) +any_ambig(info::UnionSplitInfo) = any(any_ambig, info.matches) +any_ambig(m::UnionSplitMethodMatches) = any_ambig(m.info) +fully_covering(info::UnionSplitInfo) = all(info.fullmatches) +fully_covering(m::UnionSplitMethodMatches) = fully_covering(m.info) function find_method_matches(interp::AbstractInterpreter, argtypes::Vector{Any}, @nospecialize(atype); max_union_splitting::Int = InferenceParams(interp).max_union_splitting, @@ -307,7 +308,7 @@ is_union_split_eligible(𝕃::AbstractLattice, argtypes::Vector{Any}, max_union_ function find_union_split_method_matches(interp::AbstractInterpreter, argtypes::Vector{Any}, @nospecialize(atype), max_methods::Int) split_argtypes = switchtupleunion(typeinf_lattice(interp), argtypes) - infos = MethodMatchInfo[] + infos = MethodLookupResult[] applicable = Any[] applicable_argtypes = Vector{Any}[] # arrays like `argtypes`, including constants, for each match valid_worlds = WorldRange() @@ -323,29 +324,29 @@ function find_union_split_method_matches(interp::AbstractInterpreter, argtypes:: if matches === nothing return FailedMethodMatch("For one of the union split cases, too many methods matched") end - push!(infos, MethodMatchInfo(matches)) + push!(infos, matches) for m in matches push!(applicable, m) push!(applicable_argtypes, arg_n) end valid_worlds = intersect(valid_worlds, matches.valid_worlds) thisfullmatch = any(match::MethodMatch->match.fully_covers, matches) - found = false + mt_found = false for (i, mt′) in enumerate(mts) if mt′ === mt fullmatches[i] &= thisfullmatch - found = true + mt_found = true break end end - if !found + if !mt_found push!(mts, mt) push!(fullmatches, thisfullmatch) end end - info = UnionSplitInfo(infos) + info = UnionSplitInfo(infos, mts, fullmatches) return UnionSplitMethodMatches( - applicable, applicable_argtypes, info, valid_worlds, mts, fullmatches) + applicable, applicable_argtypes, info, valid_worlds) end function find_simple_method_matches(interp::AbstractInterpreter, @nospecialize(atype), max_methods::Int) @@ -360,10 +361,9 @@ function find_simple_method_matches(interp::AbstractInterpreter, @nospecialize(a # (assume this will always be true, so we don't compute / update valid age in this case) return FailedMethodMatch("Too many methods matched") end - info = MethodMatchInfo(matches) fullmatch = any(match::MethodMatch->match.fully_covers, matches) - return MethodMatches( - matches.matches, info, matches.valid_worlds, mt, fullmatch) + info = MethodMatchInfo(matches, mt, fullmatch) + return MethodMatches(matches.matches, info, matches.valid_worlds) end """ @@ -584,9 +584,10 @@ function add_call_backedges!(interp::AbstractInterpreter, @nospecialize(rettype) # also need an edge to the method table in case something gets # added that did not intersect with any existing method if isa(matches, MethodMatches) - matches.fullmatch || add_mt_backedge!(sv, matches.mt, atype) + fully_covering(matches) || add_mt_backedge!(sv, matches.info.mt, atype) else - for (thisfullmatch, mt) in zip(matches.fullmatches, matches.mts) + matches::UnionSplitMethodMatches + for (thisfullmatch, mt) in zip(matches.info.fullmatches, matches.info.mts) thisfullmatch || add_mt_backedge!(sv, mt, atype) end end diff --git a/base/compiler/ssair/inlining.jl b/base/compiler/ssair/inlining.jl index 70318b9e1a979..727e015b67062 100644 --- a/base/compiler/ssair/inlining.jl +++ b/base/compiler/ssair/inlining.jl @@ -50,12 +50,13 @@ struct InliningCase end struct UnionSplit - fully_covered::Bool + handled_all_cases::Bool # All possible dispatches are included in the cases + fully_covered::Bool # All handled cases are fully covering atype::DataType cases::Vector{InliningCase} bbs::Vector{Int} - UnionSplit(fully_covered::Bool, atype::DataType, cases::Vector{InliningCase}) = - new(fully_covered, atype, cases, Int[]) + UnionSplit(handled_all_cases::Bool, fully_covered::Bool, atype::DataType, cases::Vector{InliningCase}) = + new(handled_all_cases, fully_covered, atype, cases, Int[]) end struct InliningEdgeTracker @@ -215,7 +216,7 @@ end function cfg_inline_unionsplit!(ir::IRCode, idx::Int, union_split::UnionSplit, state::CFGInliningState, params::OptimizationParams) - (; fully_covered, #=atype,=# cases, bbs) = union_split + (; handled_all_cases, fully_covered, #=atype,=# cases, bbs) = union_split inline_into_block!(state, block_for_inst(ir, idx)) from_bbs = Int[] delete!(state.split_targets, length(state.new_cfg_blocks)) @@ -235,7 +236,7 @@ function cfg_inline_unionsplit!(ir::IRCode, idx::Int, union_split::UnionSplit, end end push!(from_bbs, length(state.new_cfg_blocks)) - if !(i == length(cases) && fully_covered) + if !(i == length(cases) && (handled_all_cases && fully_covered)) # This block will have the next condition or the final else case push!(state.new_cfg_blocks, BasicBlock(StmtRange(idx, idx))) push!(state.new_cfg_blocks[cond_bb].succs, length(state.new_cfg_blocks)) @@ -244,7 +245,10 @@ function cfg_inline_unionsplit!(ir::IRCode, idx::Int, union_split::UnionSplit, end end # The edge from the fallback block. - fully_covered || push!(from_bbs, length(state.new_cfg_blocks)) + # NOTE This edge is only required for `!handled_all_cases` and not `!fully_covered`, + # since in the latter case we inline `Core.throw_methoderror` into the fallback + # block, which is must-throw, making the subsequent code path unreachable. + !handled_all_cases && push!(from_bbs, length(state.new_cfg_blocks)) # This block will be the block everyone returns to push!(state.new_cfg_blocks, BasicBlock(StmtRange(idx, idx), from_bbs, orig_succs)) join_bb = length(state.new_cfg_blocks) @@ -523,7 +527,7 @@ assuming their order stays the same post-discovery in `ml_matches`. function ir_inline_unionsplit!(compact::IncrementalCompact, idx::Int, argexprs::Vector{Any}, union_split::UnionSplit, boundscheck::Symbol, todo_bbs::Vector{Tuple{Int,Int}}, interp::AbstractInterpreter) - (; fully_covered, atype, cases, bbs) = union_split + (; handled_all_cases, fully_covered, atype, cases, bbs) = union_split stmt, typ, line = compact.result[idx][:stmt], compact.result[idx][:type], compact.result[idx][:line] join_bb = bbs[end] pn = PhiNode() @@ -538,7 +542,7 @@ function ir_inline_unionsplit!(compact::IncrementalCompact, idx::Int, argexprs:: cond = true nparams = fieldcount(atype) @assert nparams == fieldcount(mtype) - if !(i == ncases && fully_covered) + if !(i == ncases && fully_covered && handled_all_cases) for i = 1:nparams aft, mft = fieldtype(atype, i), fieldtype(mtype, i) # If this is always true, we don't need to check for it @@ -597,14 +601,18 @@ function ir_inline_unionsplit!(compact::IncrementalCompact, idx::Int, argexprs:: end bb += 1 # We're now in the fall through block, decide what to do - if !fully_covered + if !handled_all_cases ssa = insert_node_here!(compact, NewInstruction(stmt, typ, line)) push!(pn.edges, bb) push!(pn.values, ssa) insert_node_here!(compact, NewInstruction(GotoNode(join_bb), Any, line)) finish_current_bb!(compact, 0) + elseif !fully_covered + insert_node_here!(compact, NewInstruction(Expr(:call, GlobalRef(Core, :throw_methoderror), argexprs...), Union{}, line)) + insert_node_here!(compact, NewInstruction(ReturnNode(), Union{}, line)) + finish_current_bb!(compact, 0) + ncases == 0 && return insert_node_here!(compact, NewInstruction(nothing, Any, line)) end - # We're now in the join block. return insert_node_here!(compact, NewInstruction(pn, typ, line)) end @@ -1348,10 +1356,6 @@ function compute_inlining_cases(@nospecialize(info::CallInfo), flag::UInt32, sig # Too many applicable methods # Or there is a (partial?) ambiguity return nothing - elseif length(meth) == 0 - # No applicable methods; try next union split - handled_all_cases = false - continue end local split_fully_covered = false for (j, match) in enumerate(meth) @@ -1392,12 +1396,16 @@ function compute_inlining_cases(@nospecialize(info::CallInfo), flag::UInt32, sig handled_all_cases &= handle_any_const_result!(cases, result, match, argtypes, info, flag, state; allow_typevars=true) end + if !fully_covered + atype = argtypes_to_type(sig.argtypes) + # We will emit an inline MethodError so we need a backedge to the MethodTable + add_uncovered_edges!(state.edges, info, atype) + end elseif !isempty(cases) # if we've not seen all candidates, union split is valid only for dispatch tuples filter!(case::InliningCase->isdispatchtuple(case.sig), cases) end - - return cases, (handled_all_cases & fully_covered), joint_effects + return cases, handled_all_cases, fully_covered, joint_effects end function handle_call!(todo::Vector{Pair{Int,Any}}, @@ -1405,9 +1413,9 @@ function handle_call!(todo::Vector{Pair{Int,Any}}, state::InliningState) cases = compute_inlining_cases(info, flag, sig, state) cases === nothing && return nothing - cases, all_covered, joint_effects = cases + cases, handled_all_cases, fully_covered, joint_effects = cases atype = argtypes_to_type(sig.argtypes) - handle_cases!(todo, ir, idx, stmt, atype, cases, all_covered, joint_effects) + handle_cases!(todo, ir, idx, stmt, atype, cases, handled_all_cases, fully_covered, joint_effects) end function handle_match!(cases::Vector{InliningCase}, @@ -1496,19 +1504,19 @@ function concrete_result_item(result::ConcreteResult, @nospecialize(info::CallIn end function handle_cases!(todo::Vector{Pair{Int,Any}}, ir::IRCode, idx::Int, stmt::Expr, - @nospecialize(atype), cases::Vector{InliningCase}, all_covered::Bool, + @nospecialize(atype), cases::Vector{InliningCase}, handled_all_cases::Bool, fully_covered::Bool, joint_effects::Effects) # If we only have one case and that case is fully covered, we may either # be able to do the inlining now (for constant cases), or push it directly # onto the todo list - if all_covered && length(cases) == 1 + if fully_covered && handled_all_cases && length(cases) == 1 handle_single_case!(todo, ir, idx, stmt, cases[1].item) - elseif length(cases) > 0 + elseif length(cases) > 0 || handled_all_cases isa(atype, DataType) || return nothing for case in cases isa(case.sig, DataType) || return nothing end - push!(todo, idx=>UnionSplit(all_covered, atype, cases)) + push!(todo, idx=>UnionSplit(handled_all_cases, fully_covered, atype, cases)) else add_flag!(ir[SSAValue(idx)], flags_for_effects(joint_effects)) end diff --git a/base/compiler/stmtinfo.jl b/base/compiler/stmtinfo.jl index 69d2ac7ae45a0..33fca90b6261e 100644 --- a/base/compiler/stmtinfo.jl +++ b/base/compiler/stmtinfo.jl @@ -33,10 +33,13 @@ not a call to a generic function. """ struct MethodMatchInfo <: CallInfo results::MethodLookupResult + mt::MethodTable + fullmatch::Bool end nsplit_impl(info::MethodMatchInfo) = 1 getsplit_impl(info::MethodMatchInfo, idx::Int) = (@assert idx == 1; info.results) getresult_impl(::MethodMatchInfo, ::Int) = nothing +add_uncovered_edges_impl(edges::Vector{Any}, info::MethodMatchInfo, @nospecialize(atype)) = (!info.fullmatch && push!(edges, info.mt, atype); ) """ info::UnionSplitInfo <: CallInfo @@ -48,20 +51,27 @@ each partition (`info.matches::Vector{MethodMatchInfo}`). This info is illegal on any statement that is not a call to a generic function. """ struct UnionSplitInfo <: CallInfo - matches::Vector{MethodMatchInfo} + matches::Vector{MethodLookupResult} + mts::Vector{MethodTable} + fullmatches::Vector{Bool} end nmatches(info::MethodMatchInfo) = length(info.results) function nmatches(info::UnionSplitInfo) n = 0 for mminfo in info.matches - n += nmatches(mminfo) + n += length(mminfo) end return n end nsplit_impl(info::UnionSplitInfo) = length(info.matches) -getsplit_impl(info::UnionSplitInfo, idx::Int) = getsplit_impl(info.matches[idx], 1) +getsplit_impl(info::UnionSplitInfo, idx::Int) = info.matches[idx] getresult_impl(::UnionSplitInfo, ::Int) = nothing +function add_uncovered_edges_impl(edges::Vector{Any}, info::UnionSplitInfo, @nospecialize(atype)) + for (mt, fullmatch) in zip(info.mts, info.fullmatches) + !fullmatch && push!(edges, mt, atype) + end +end abstract type ConstResult end @@ -105,6 +115,7 @@ end nsplit_impl(info::ConstCallInfo) = nsplit(info.call) getsplit_impl(info::ConstCallInfo, idx::Int) = getsplit(info.call, idx) getresult_impl(info::ConstCallInfo, idx::Int) = info.results[idx] +add_uncovered_edges_impl(edges::Vector{Any}, info::ConstCallInfo, @nospecialize(atype)) = add_uncovered_edges!(edges, info.call, atype) """ info::MethodResultPure <: CallInfo diff --git a/base/compiler/tfuncs.jl b/base/compiler/tfuncs.jl index 64a93bd07c2fa..6bb73ded8660d 100644 --- a/base/compiler/tfuncs.jl +++ b/base/compiler/tfuncs.jl @@ -2983,9 +2983,9 @@ function abstract_applicable(interp::AbstractInterpreter, argtypes::Vector{Any}, # also need an edge to the method table in case something gets # added that did not intersect with any existing method if isa(matches, MethodMatches) - matches.fullmatch || add_mt_backedge!(sv, matches.mt, atype) + fully_covering(matches) || add_mt_backedge!(sv, matches.info.mt, atype) else - for (thisfullmatch, mt) in zip(matches.fullmatches, matches.mts) + for (thisfullmatch, mt) in zip(matches.info.fullmatches, matches.info.mts) thisfullmatch || add_mt_backedge!(sv, mt, atype) end end @@ -3001,8 +3001,7 @@ function abstract_applicable(interp::AbstractInterpreter, argtypes::Vector{Any}, add_backedge!(sv, edge) end - if isa(matches, MethodMatches) ? (!matches.fullmatch || any_ambig(matches)) : - (!all(matches.fullmatches) || any_ambig(matches)) + if !fully_covering(matches) || any_ambig(matches) # Account for the fact that we may encounter a MethodError with a non-covered or ambiguous signature. rt = Bool end diff --git a/base/compiler/types.jl b/base/compiler/types.jl index f315b7968fd9b..015b1dbc00a6f 100644 --- a/base/compiler/types.jl +++ b/base/compiler/types.jl @@ -450,10 +450,16 @@ abstract type CallInfo end nsplit(info::CallInfo) = nsplit_impl(info)::Union{Nothing,Int} getsplit(info::CallInfo, idx::Int) = getsplit_impl(info, idx)::MethodLookupResult +add_uncovered_edges!(edges::Vector{Any}, info::CallInfo, @nospecialize(atype)) = add_uncovered_edges_impl(edges, info, atype) + getresult(info::CallInfo, idx::Int) = getresult_impl(info, idx) +# must implement `nsplit`, `getsplit`, and `add_uncovered_edges!` to opt in to inlining nsplit_impl(::CallInfo) = nothing getsplit_impl(::CallInfo, ::Int) = error("unexpected call into `getsplit`") +add_uncovered_edges_impl(edges::Vector{Any}, info::CallInfo, @nospecialize(atype)) = error("unexpected call into `add_uncovered_edges!`") + +# must implement `getresult` to opt in to extended lattice return information getresult_impl(::CallInfo, ::Int) = nothing @specialize diff --git a/test/compiler/AbstractInterpreter.jl b/test/compiler/AbstractInterpreter.jl index d95354cefa80c..e92b67f980942 100644 --- a/test/compiler/AbstractInterpreter.jl +++ b/test/compiler/AbstractInterpreter.jl @@ -409,6 +409,7 @@ end CC.nsplit_impl(info::NoinlineCallInfo) = CC.nsplit(info.info) CC.getsplit_impl(info::NoinlineCallInfo, idx::Int) = CC.getsplit(info.info, idx) CC.getresult_impl(info::NoinlineCallInfo, idx::Int) = CC.getresult(info.info, idx) +CC.add_uncovered_edges_impl(edges::Vector{Any}, info::NoinlineCallInfo, @nospecialize(atype)) = CC.add_uncovered_edges!(edges, info.info, atype) function CC.abstract_call(interp::NoinlineInterpreter, arginfo::CC.ArgInfo, si::CC.StmtInfo, sv::CC.InferenceState, max_methods::Int) @@ -431,6 +432,8 @@ end @inline function inlined_usually(x, y, z) return x * y + z end +foo_split(x::Float64) = 1 +foo_split(x::Int) = 2 # check if the inlining algorithm works as expected let src = code_typed1((Float64,Float64,Float64)) do x, y, z @@ -444,6 +447,7 @@ let NoinlineModule = Module() main_func(x, y, z) = inlined_usually(x, y, z) @eval NoinlineModule noinline_func(x, y, z) = $inlined_usually(x, y, z) @eval OtherModule other_func(x, y, z) = $inlined_usually(x, y, z) + @eval NoinlineModule bar_split_error() = $foo_split(Core.compilerbarrier(:type, nothing)) interp = NoinlineInterpreter(Set((NoinlineModule,))) @@ -473,6 +477,11 @@ let NoinlineModule = Module() @test count(isinvoke(:inlined_usually), src.code) == 0 @test count(iscall((src, inlined_usually)), src.code) == 0 end + + let src = code_typed1(NoinlineModule.bar_split_error) + @test count(iscall((src, foo_split)), src.code) == 0 + @test count(iscall((src, Core.throw_methoderror)), src.code) > 0 + end end # Make sure that Core.Compiler has enough NamedTuple infrastructure diff --git a/test/compiler/EscapeAnalysis/EscapeAnalysis.jl b/test/compiler/EscapeAnalysis/EscapeAnalysis.jl index 31c21f7228014..8c3e065818208 100644 --- a/test/compiler/EscapeAnalysis/EscapeAnalysis.jl +++ b/test/compiler/EscapeAnalysis/EscapeAnalysis.jl @@ -2240,13 +2240,13 @@ end # accounts for ThrownEscape via potential MethodError # no method error -@noinline identity_if_string(x::SafeRef) = (println("preventing inlining"); nothing) +@noinline identity_if_string(x::SafeRef{<:AbstractString}) = (println("preventing inlining"); nothing) let result = code_escapes((SafeRef{String},)) do x identity_if_string(x) end @test has_no_escape(ignore_argescape(result.state[Argument(2)])) end -let result = code_escapes((Union{SafeRef{String},Nothing},)) do x +let result = code_escapes((SafeRef,)) do x identity_if_string(x) end i = only(findall(iscall((result.ir, identity_if_string)), result.ir.stmts.stmt)) diff --git a/test/compiler/inline.jl b/test/compiler/inline.jl index a8b5fd66dcd0d..80c8ddbb08c69 100644 --- a/test/compiler/inline.jl +++ b/test/compiler/inline.jl @@ -876,7 +876,7 @@ let src = code_typed1((Any,)) do x abstract_unionsplit_fallback(x) end @test count(isinvoke(:abstract_unionsplit_fallback), src.code) == 2 - @test count(iscall((src, abstract_unionsplit_fallback)), src.code) == 1 # fallback dispatch + @test count(iscall((src, Core.throw_methoderror)), src.code) == 1 # fallback method error end let src = code_typed1((Union{Type,Number},)) do x abstract_unionsplit_fallback(x) @@ -912,7 +912,7 @@ let src = code_typed1((Any,)) do x @test count(iscall((src, typeof)), src.code) == 2 @test count(isinvoke(:println), src.code) == 0 @test count(iscall((src, println)), src.code) == 0 - @test count(iscall((src, abstract_unionsplit_fallback)), src.code) == 1 # fallback dispatch + @test count(iscall((src, Core.throw_methoderror)), src.code) == 1 # fallback method error end let src = code_typed1((Union{Type,Number},)) do x abstract_unionsplit_fallback(false, x) @@ -960,8 +960,8 @@ let # aggressive inlining of single, abstract method match end |> only |> first # both callsites should be inlined @test count(isinvoke(:has_free_typevars), src.code) == 2 - # `isGoodType(y::Any)` isn't fully covered, thus a runtime type check and fallback dynamic dispatch should be inserted - @test count(iscall((src,isGoodType)), src.code) == 1 + # `isGoodType(y::Any)` isn't fully covered, so the fallback is a method error + @test count(iscall((src, Core.throw_methoderror)), src.code) == 1 # fallback method error end @inline isGoodType2(cnd, @nospecialize x::Type) = @@ -973,8 +973,8 @@ let # aggressive inlining of single, abstract method match (with constant-prop'e # both callsite should be inlined with constant-prop'ed result @test count(isinvoke(:isType), src.code) == 2 @test count(isinvoke(:has_free_typevars), src.code) == 0 - # `isGoodType(y::Any)` isn't fully covered, thus a runtime type check and fallback dynamic dispatch should be inserted - @test count(iscall((src,isGoodType2)), src.code) == 1 + # `isGoodType(y::Any)` isn't fully covered, thus a MethodError gets inserted + @test count(iscall((src, Core.throw_methoderror)), src.code) == 1 # fallback method error end @noinline function checkBadType!(@nospecialize x::Type) @@ -989,8 +989,8 @@ let # aggressive static dispatch of single, abstract method match end |> only |> first # both callsites should be resolved statically @test count(isinvoke(:checkBadType!), src.code) == 2 - # `checkBadType!(y::Any)` isn't fully covered, thus a runtime type check and fallback dynamic dispatch should be inserted - @test count(iscall((src,checkBadType!)), src.code) == 1 + # `checkBadType!(y::Any)` isn't fully covered, thus a MethodError gets inserted + @test count(iscall((src, Core.throw_methoderror)), src.code) == 1 # fallback method error end @testset "late_inline_special_case!" begin @@ -2004,7 +2004,7 @@ f48397(::Tuple{String,String}) = :ok let src = code_typed1((Union{Bool,Tuple{String,Any}},)) do x f48397(x) end - @test any(iscall((src, f48397)), src.code) + @test any(iscall((src, Core.throw_methoderror)), src.code) # fallback method error) end g48397::Union{Bool,Tuple{String,Any}} = ("48397", 48397) let res = @test_throws MethodError let @@ -2175,11 +2175,6 @@ let src = code_typed1() do @test count(isinvoke(:iterate), src.code) == 0 end -# JuliaLang/julia#53062: proper `joint_effects` for call with empty method matches -let ir = first(only(Base.code_ircode(setproperty!, (Base.RefValue{Int},Symbol,Base.RefValue{Int})))) - i = findfirst(iscall((ir, convert)), ir.stmts.stmt)::Int - @test iszero(ir.stmts.flag[i] & Core.Compiler.IR_FLAG_NOTHROW) -end function issue53062(cond) x = Ref{Int}(0) if cond @@ -2214,3 +2209,19 @@ let ir = Base.code_ircode((Issue52644,); optimize_until="Inlining") do t @test irfunc(Issue52644(Tuple{})) === :DataType @test_throws MethodError irfunc(Issue52644(Tuple{<:Integer})) end + +foo_split(x::Float64) = 1 +foo_split(x::Int) = 2 +bar_inline_error() = foo_split(nothing) +bar_split_error() = foo_split(Core.compilerbarrier(:type,nothing)) + +let src = code_typed1(bar_inline_error, Tuple{}) + # Should inline method errors + @test count(iscall((src, foo_split)), src.code) == 0 + @test count(iscall((src, Core.throw_methoderror)), src.code) > 0 +end +let src = code_typed1(bar_split_error, Tuple{}) + # Should inline method errors + @test count(iscall((src, foo_split)), src.code) == 0 + @test count(iscall((src, Core.throw_methoderror)), src.code) > 0 +end diff --git a/test/threads_exec.jl b/test/threads_exec.jl index 595f8991d58d7..ac54dd009390c 100644 --- a/test/threads_exec.jl +++ b/test/threads_exec.jl @@ -1235,6 +1235,7 @@ end @testset "throw=true" begin tasks, event = create_tasks() push!(tasks, Threads.@spawn error("Error")) + wait(tasks[end]; throw=false) @test_throws CompositeException begin waitany(convert_tasks(tasks_type, tasks); throw=true) From 48ddd2dc2859f41fc1a82876cc5654fb31d7b2ba Mon Sep 17 00:00:00 2001 From: norci Date: Wed, 18 Sep 2024 01:14:35 +0800 Subject: [PATCH 036/537] Fix shell `cd` error when working dir has been deleted (#41244) root cause: if current dir has been deleted, then pwd() will throw an IOError: pwd(): no such file or directory (ENOENT) --------- Co-authored-by: Ian Butterworth --- base/client.jl | 13 +++++++++---- test/file.jl | 20 ++++++++++++++++++++ 2 files changed, 29 insertions(+), 4 deletions(-) diff --git a/base/client.jl b/base/client.jl index 2ca88c40aeb7e..a04556507d5dc 100644 --- a/base/client.jl +++ b/base/client.jl @@ -41,7 +41,6 @@ function repl_cmd(cmd, out) if isempty(cmd.exec) throw(ArgumentError("no cmd to execute")) elseif cmd.exec[1] == "cd" - new_oldpwd = pwd() if length(cmd.exec) > 2 throw(ArgumentError("cd method only takes one argument")) elseif length(cmd.exec) == 2 @@ -52,11 +51,17 @@ function repl_cmd(cmd, out) end dir = ENV["OLDPWD"] end - cd(dir) else - cd() + dir = homedir() end - ENV["OLDPWD"] = new_oldpwd + try + ENV["OLDPWD"] = pwd() + catch ex + ex isa IOError || rethrow() + # if current dir has been deleted, then pwd() will throw an IOError: pwd(): no such file or directory (ENOENT) + delete!(ENV, "OLDPWD") + end + cd(dir) println(out, pwd()) else @static if !Sys.iswindows() diff --git a/test/file.jl b/test/file.jl index de6d488056a02..de258c92e02bc 100644 --- a/test/file.jl +++ b/test/file.jl @@ -1908,6 +1908,26 @@ end end end +@testset "pwd tests" begin + mktempdir() do dir + cd(dir) do + withenv("OLDPWD" => nothing) do + io = IOBuffer() + Base.repl_cmd(@cmd("cd"), io) + Base.repl_cmd(@cmd("cd -"), io) + @test realpath(pwd()) == realpath(dir) + if !Sys.iswindows() + # Delete the working directory and check we can cd out of it + # Cannot delete the working directory on Windows + rm(dir) + @test_throws Base._UVError("pwd()", Base.UV_ENOENT) pwd() + Base.repl_cmd(@cmd("cd \\~"), io) + end + end + end + end +end + @testset "readdir tests" begin ≛(a, b) = sort(a) == sort(b) mktempdir() do dir From 00739173311983479fee3f5f826e4b763872860a Mon Sep 17 00:00:00 2001 From: Jameson Nash Date: Tue, 17 Sep 2024 19:47:48 -0400 Subject: [PATCH 037/537] codegen: fix bits compare for UnionAll (#55770) Fixes #55768 in two parts: one is making the type computation in emit_bits_compare agree with the parent function and two is not using the optimized egal code for UnionAll kinds, which is different from how the egal code itself works for kinds. --- src/codegen.cpp | 21 ++++++++++++--------- test/compiler/codegen.jl | 5 +++++ 2 files changed, 17 insertions(+), 9 deletions(-) diff --git a/src/codegen.cpp b/src/codegen.cpp index 73a5f844b31da..6ae4f56a53ee2 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -3484,25 +3484,26 @@ static size_t emit_masked_bits_compare(callback &emit_desc, jl_datatype_t *aty, static Value *emit_bits_compare(jl_codectx_t &ctx, jl_cgval_t arg1, jl_cgval_t arg2) { ++EmittedBitsCompares; + jl_value_t *argty = (arg1.constant ? jl_typeof(arg1.constant) : arg1.typ); bool isboxed; Type *at = julia_type_to_llvm(ctx, arg1.typ, &isboxed); - assert(jl_is_datatype(arg1.typ) && arg1.typ == arg2.typ && !isboxed); + assert(jl_is_datatype(arg1.typ) && arg1.typ == (arg2.constant ? jl_typeof(arg2.constant) : arg2.typ) && !isboxed); if (type_is_ghost(at)) return ConstantInt::get(getInt1Ty(ctx.builder.getContext()), 1); if (at->isIntegerTy() || at->isPointerTy() || at->isFloatingPointTy()) { Type *at_int = INTT(at, ctx.emission_context.DL); - Value *varg1 = emit_unbox(ctx, at_int, arg1, arg1.typ); - Value *varg2 = emit_unbox(ctx, at_int, arg2, arg2.typ); + Value *varg1 = emit_unbox(ctx, at_int, arg1, argty); + Value *varg2 = emit_unbox(ctx, at_int, arg2, argty); return ctx.builder.CreateICmpEQ(varg1, varg2); } if (at->isVectorTy()) { - jl_svec_t *types = ((jl_datatype_t*)arg1.typ)->types; + jl_svec_t *types = ((jl_datatype_t*)argty)->types; Value *answer = ConstantInt::get(getInt1Ty(ctx.builder.getContext()), 1); - Value *varg1 = emit_unbox(ctx, at, arg1, arg1.typ); - Value *varg2 = emit_unbox(ctx, at, arg2, arg2.typ); + Value *varg1 = emit_unbox(ctx, at, arg1, argty); + Value *varg2 = emit_unbox(ctx, at, arg2, argty); for (size_t i = 0, l = jl_svec_len(types); i < l; i++) { jl_value_t *fldty = jl_svecref(types, i); Value *subAns, *fld1, *fld2; @@ -3517,7 +3518,7 @@ static Value *emit_bits_compare(jl_codectx_t &ctx, jl_cgval_t arg1, jl_cgval_t a } if (at->isAggregateType()) { // Struct or Array - jl_datatype_t *sty = (jl_datatype_t*)arg1.typ; + jl_datatype_t *sty = (jl_datatype_t*)argty; size_t sz = jl_datatype_size(sty); if (sz > 512 && !sty->layout->flags.haspadding && sty->layout->flags.isbitsegal) { Value *varg1 = arg1.ispointer() ? data_pointer(ctx, arg1) : @@ -3721,8 +3722,10 @@ static Value *emit_f_is(jl_codectx_t &ctx, const jl_cgval_t &arg1, const jl_cgva if (jl_type_intersection(rt1, rt2) == (jl_value_t*)jl_bottom_type) // types are disjoint (exhaustive test) return ConstantInt::get(getInt1Ty(ctx.builder.getContext()), 0); - bool justbits1 = jl_is_concrete_immutable(rt1); - bool justbits2 = jl_is_concrete_immutable(rt2); + // can compare any concrete immutable by bits, except for UnionAll + // which has a special non-bits based egal + bool justbits1 = jl_is_concrete_immutable(rt1) && !jl_is_kind(rt1); + bool justbits2 = jl_is_concrete_immutable(rt2) && !jl_is_kind(rt2); if (justbits1 || justbits2) { // whether this type is unique'd by value return emit_nullcheck_guard2(ctx, nullcheck1, nullcheck2, [&] () -> Value* { jl_datatype_t *typ = (jl_datatype_t*)(justbits1 ? rt1 : rt2); diff --git a/test/compiler/codegen.jl b/test/compiler/codegen.jl index 0260113044a3b..07308713bb789 100644 --- a/test/compiler/codegen.jl +++ b/test/compiler/codegen.jl @@ -999,3 +999,8 @@ for (T, StructName) in ((Int128, :Issue55558), (UInt128, :UIssue55558)) @test sizeof($(StructName)) == 48 broken=broken_i128 end end + +@noinline Base.@nospecializeinfer f55768(@nospecialize z::UnionAll) = z === Vector +@test f55768(Vector) +@test f55768(Vector{T} where T) +@test !f55768(Vector{S} where S) From 95a32db42c3cca841006836ddd88c18aac3afb5a Mon Sep 17 00:00:00 2001 From: Diogo Netto <61364108+d-netto@users.noreply.github.com> Date: Wed, 18 Sep 2024 17:51:34 -0300 Subject: [PATCH 038/537] use libuv to measure maxrss (#55806) Libuv has a wrapper around rusage on Unix (and its equivalent on Windows). We should probably use it. --- src/sys.c | 25 +++++-------------------- 1 file changed, 5 insertions(+), 20 deletions(-) diff --git a/src/sys.c b/src/sys.c index 712d232da363a..b54edc32b32b6 100644 --- a/src/sys.c +++ b/src/sys.c @@ -772,26 +772,11 @@ JL_DLLEXPORT jl_sym_t *jl_get_ARCH(void) JL_NOTSAFEPOINT JL_DLLEXPORT size_t jl_maxrss(void) { -#if defined(_OS_WINDOWS_) - PROCESS_MEMORY_COUNTERS counter; - GetProcessMemoryInfo( GetCurrentProcess( ), &counter, sizeof(counter) ); - return (size_t)counter.PeakWorkingSetSize; - -// FIXME: `rusage` is available on OpenBSD, DragonFlyBSD and NetBSD as well. -// All of them return `ru_maxrss` in kilobytes. -#elif defined(_OS_LINUX_) || defined(_OS_DARWIN_) || defined (_OS_FREEBSD_) || defined (_OS_OPENBSD_) - struct rusage rusage; - getrusage( RUSAGE_SELF, &rusage ); - -#if defined(_OS_LINUX_) || defined(_OS_FREEBSD_) || defined (_OS_OPENBSD_) - return (size_t)(rusage.ru_maxrss * 1024); -#else - return (size_t)rusage.ru_maxrss; -#endif - -#else - return (size_t)0; -#endif + uv_rusage_t rusage; + if (uv_getrusage(&rusage) == 0) { + return rusage.ru_maxrss * 1024; + } + return 0; } // Simple `rand()` like function, with global seed and added thread-safety From e4c8d4f7976162dcf5eebc15d93d2408cb6d0666 Mon Sep 17 00:00:00 2001 From: Ian Butterworth Date: Wed, 18 Sep 2024 17:49:15 -0400 Subject: [PATCH 039/537] REPL: use atreplinit to change the active module during precompilation (#55805) --- stdlib/REPL/src/precompile.jl | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/stdlib/REPL/src/precompile.jl b/stdlib/REPL/src/precompile.jl index 6bcec6415ba96..f7961a205e0b1 100644 --- a/stdlib/REPL/src/precompile.jl +++ b/stdlib/REPL/src/precompile.jl @@ -37,9 +37,21 @@ function repl_workload() UP_ARROW = "\e[A" DOWN_ARROW = "\e[B" + # This is notified as soon as the first prompt appears + repl_init_event = Base.Event() + + atreplinit() do repl + # Main is closed so we can't evaluate in it, but atreplinit runs at + # a time that repl.mistate === nothing so REPL.activate fails. So do + # it async and wait for the first prompt to know its ready. + t = @async begin + wait(repl_init_event) + REPL.activate(REPL.Precompile; interactive_utils=false) + end + Base.errormonitor(t) + end + repl_script = """ - import REPL - REPL.activate(REPL.Precompile; interactive_utils=false) # Main is closed so we can't evaluate in it 2+2 print("") printstyled("a", "b") @@ -62,7 +74,6 @@ function repl_workload() [][1] Base.Iterators.minimum cd("complete_path\t\t$CTRL_C - REPL.activate(; interactive_utils=false) println("done") """ @@ -159,6 +170,7 @@ function repl_workload() occursin(HELP_PROMPT, strbuf) && break sleep(0.1) end + notify(repl_init_event) check_errors(strbuf) end write(debug_output, "\n#### COMPLETED - Closing REPL ####\n") From 441bcd05feb3cbb325bc169f6e9e9d1a989f5f9f Mon Sep 17 00:00:00 2001 From: DilumAluthgeBot <43731525+DilumAluthgeBot@users.noreply.github.com> Date: Wed, 18 Sep 2024 19:23:39 -0400 Subject: [PATCH 040/537] =?UTF-8?q?=F0=9F=A4=96=20[master]=20Bump=20the=20?= =?UTF-8?q?Pkg=20stdlib=20from=20299a35610=20to=20308f9d32f=20(#55808)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../Pkg-299a356100f54215388502148979189aff760822.tar.gz/md5 | 1 - .../Pkg-299a356100f54215388502148979189aff760822.tar.gz/sha512 | 1 - .../Pkg-308f9d32fcec769fbed8cf6c5a17d54753ca1f5b.tar.gz/md5 | 1 + .../Pkg-308f9d32fcec769fbed8cf6c5a17d54753ca1f5b.tar.gz/sha512 | 1 + stdlib/Pkg.version | 2 +- 5 files changed, 3 insertions(+), 3 deletions(-) delete mode 100644 deps/checksums/Pkg-299a356100f54215388502148979189aff760822.tar.gz/md5 delete mode 100644 deps/checksums/Pkg-299a356100f54215388502148979189aff760822.tar.gz/sha512 create mode 100644 deps/checksums/Pkg-308f9d32fcec769fbed8cf6c5a17d54753ca1f5b.tar.gz/md5 create mode 100644 deps/checksums/Pkg-308f9d32fcec769fbed8cf6c5a17d54753ca1f5b.tar.gz/sha512 diff --git a/deps/checksums/Pkg-299a356100f54215388502148979189aff760822.tar.gz/md5 b/deps/checksums/Pkg-299a356100f54215388502148979189aff760822.tar.gz/md5 deleted file mode 100644 index 3c112b99f88d9..0000000000000 --- a/deps/checksums/Pkg-299a356100f54215388502148979189aff760822.tar.gz/md5 +++ /dev/null @@ -1 +0,0 @@ -791c9ca37077fdc36b959a17904dd935 diff --git a/deps/checksums/Pkg-299a356100f54215388502148979189aff760822.tar.gz/sha512 b/deps/checksums/Pkg-299a356100f54215388502148979189aff760822.tar.gz/sha512 deleted file mode 100644 index c7c212047d2b0..0000000000000 --- a/deps/checksums/Pkg-299a356100f54215388502148979189aff760822.tar.gz/sha512 +++ /dev/null @@ -1 +0,0 @@ -96520326931685d4300e825a302010f113e942aaa55aa4ff12caf3e9df314309df993c97753ae482c2198db67678423885bf5ea40c743c8e4b6ef96d7b8d4472 diff --git a/deps/checksums/Pkg-308f9d32fcec769fbed8cf6c5a17d54753ca1f5b.tar.gz/md5 b/deps/checksums/Pkg-308f9d32fcec769fbed8cf6c5a17d54753ca1f5b.tar.gz/md5 new file mode 100644 index 0000000000000..b59e1d8427b8b --- /dev/null +++ b/deps/checksums/Pkg-308f9d32fcec769fbed8cf6c5a17d54753ca1f5b.tar.gz/md5 @@ -0,0 +1 @@ +b48c15e727d96a7525e0b800180d46f4 diff --git a/deps/checksums/Pkg-308f9d32fcec769fbed8cf6c5a17d54753ca1f5b.tar.gz/sha512 b/deps/checksums/Pkg-308f9d32fcec769fbed8cf6c5a17d54753ca1f5b.tar.gz/sha512 new file mode 100644 index 0000000000000..4f4bce61f1f0f --- /dev/null +++ b/deps/checksums/Pkg-308f9d32fcec769fbed8cf6c5a17d54753ca1f5b.tar.gz/sha512 @@ -0,0 +1 @@ +edc2c19bccf6b00e3ea7c4e0b1af36ca86c7e3f521d8c3c05a930ce3d961fb0259a98ae27be5c3e052418f9b4e7ca74cc4d3fee59dac12d47bd1ac5cd9e34fbe diff --git a/stdlib/Pkg.version b/stdlib/Pkg.version index 3d4a627d6e472..602fbcc648e59 100644 --- a/stdlib/Pkg.version +++ b/stdlib/Pkg.version @@ -1,4 +1,4 @@ PKG_BRANCH = master -PKG_SHA1 = 299a356100f54215388502148979189aff760822 +PKG_SHA1 = 308f9d32fcec769fbed8cf6c5a17d54753ca1f5b PKG_GIT_URL := https://github.com/JuliaLang/Pkg.jl.git PKG_TAR_URL = https://api.github.com/repos/JuliaLang/Pkg.jl/tarball/$1 From 86c567f6d9039ba58036031e7218dffe08b1cc16 Mon Sep 17 00:00:00 2001 From: Cody Tapscott <84105208+topolarity@users.noreply.github.com> Date: Wed, 18 Sep 2024 20:04:04 -0400 Subject: [PATCH 041/537] Improve codegen for `Core.throw_methoderror` and `Core.current_scope` (#55803) This slightly improves our (LLVM) codegen for `Core.throw_methoderror` and `Core.current_scope` ```julia julia> foo() = Core.current_scope() julia> bar() = Core.throw_methoderror(+, nothing) ``` Before: ```llvm ; Function Signature: foo() define nonnull ptr @julia_foo_2488() #0 { top: %0 = call ptr @jl_get_builtin_fptr(ptr nonnull @"+Core.#current_scope#2491.jit") %Builtin_ret = call nonnull ptr %0(ptr nonnull @"jl_global#2492.jit", ptr null, i32 0) ret ptr %Builtin_ret } ; Function Signature: bar() define void @julia_bar_589() #0 { top: %jlcallframe1 = alloca [2 x ptr], align 8 %0 = call ptr @jl_get_builtin_fptr(ptr nonnull @"+Core.#throw_methoderror#591.jit") %jl_nothing = load ptr, ptr @jl_nothing, align 8 store ptr @"jl_global#593.jit", ptr %jlcallframe1, align 8 %1 = getelementptr inbounds ptr, ptr %jlcallframe1, i64 1 store ptr %jl_nothing, ptr %1, align 8 %Builtin_ret = call nonnull ptr %0(ptr nonnull @"jl_global#592.jit", ptr nonnull %jlcallframe1, i32 2) call void @llvm.trap() unreachable } ``` After: ```llvm ; Function Signature: foo() define nonnull ptr @julia_foo_713() #0 { top: %thread_ptr = call ptr asm "movq %fs:0, $0", "=r"() #5 %tls_ppgcstack = getelementptr inbounds i8, ptr %thread_ptr, i64 -8 %tls_pgcstack = load ptr, ptr %tls_ppgcstack, align 8 %current_scope = getelementptr inbounds i8, ptr %tls_pgcstack, i64 -72 %0 = load ptr, ptr %current_scope, align 8 ret ptr %0 } ; Function Signature: bar() define void @julia_bar_1581() #0 { top: %jlcallframe1 = alloca [2 x ptr], align 8 %jl_nothing = load ptr, ptr @jl_nothing, align 8 store ptr @"jl_global#1583.jit", ptr %jlcallframe1, align 8 %0 = getelementptr inbounds ptr, ptr %jlcallframe1, i64 1 store ptr %jl_nothing, ptr %0, align 8 %jl_f_throw_methoderror_ret = call nonnull ptr @jl_f_throw_methoderror(ptr null, ptr nonnull %jlcallframe1, i32 2) call void @llvm.trap() unreachable } ``` --- src/builtins.c | 2 +- src/codegen.cpp | 13 ++++++++++++- src/staticdata.c | 3 ++- 3 files changed, 15 insertions(+), 3 deletions(-) diff --git a/src/builtins.c b/src/builtins.c index 75c4d02c898b2..96c4cec0f5087 100644 --- a/src/builtins.c +++ b/src/builtins.c @@ -2444,7 +2444,7 @@ void jl_init_primitives(void) JL_GC_DISABLED add_builtin_func("finalizer", jl_f_finalizer); add_builtin_func("_compute_sparams", jl_f__compute_sparams); add_builtin_func("_svec_ref", jl_f__svec_ref); - add_builtin_func("current_scope", jl_f_current_scope); + jl_builtin_current_scope = add_builtin_func("current_scope", jl_f_current_scope); add_builtin_func("throw_methoderror", jl_f_throw_methoderror); // builtin types diff --git a/src/codegen.cpp b/src/codegen.cpp index 6ae4f56a53ee2..6d4ecc63e5ca1 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -1619,6 +1619,7 @@ static const auto &builtin_func_map() { { jl_f__call_in_world_addr, new JuliaFunction<>{XSTR(jl_f__call_in_world), get_func_sig, get_func_attrs} }, { jl_f__call_in_world_total_addr, new JuliaFunction<>{XSTR(jl_f__call_in_world_total), get_func_sig, get_func_attrs} }, { jl_f_throw_addr, new JuliaFunction<>{XSTR(jl_f_throw), get_func_sig, get_func_attrs} }, + { jl_f_throw_methoderror_addr, new JuliaFunction<>{XSTR(jl_f_throw_methoderror), get_func_sig, get_func_attrs} }, { jl_f_tuple_addr, jltuple_func }, { jl_f_svec_addr, new JuliaFunction<>{XSTR(jl_f_svec), get_func_sig, get_func_attrs} }, { jl_f_applicable_addr, new JuliaFunction<>{XSTR(jl_f_applicable), get_func_sig, get_func_attrs} }, @@ -1644,7 +1645,8 @@ static const auto &builtin_func_map() { { jl_f_donotdelete_addr, new JuliaFunction<>{XSTR(jl_f_donotdelete), get_donotdelete_sig, get_donotdelete_func_attrs} }, { jl_f_compilerbarrier_addr, new JuliaFunction<>{XSTR(jl_f_compilerbarrier), get_func_sig, get_func_attrs} }, { jl_f_finalizer_addr, new JuliaFunction<>{XSTR(jl_f_finalizer), get_func_sig, get_func_attrs} }, - { jl_f__svec_ref_addr, new JuliaFunction<>{XSTR(jl_f__svec_ref), get_func_sig, get_func_attrs} } + { jl_f__svec_ref_addr, new JuliaFunction<>{XSTR(jl_f__svec_ref), get_func_sig, get_func_attrs} }, + { jl_f_current_scope_addr, new JuliaFunction<>{XSTR(jl_f_current_scope), get_func_sig, get_func_attrs} }, }; return builtins; } @@ -2117,6 +2119,7 @@ static jl_cgval_t emit_sparam(jl_codectx_t &ctx, size_t i); static Value *emit_condition(jl_codectx_t &ctx, const jl_cgval_t &condV, const Twine &msg); static Value *get_current_task(jl_codectx_t &ctx); static Value *get_current_ptls(jl_codectx_t &ctx); +static Value *get_scope_field(jl_codectx_t &ctx); static Value *get_tls_world_age_field(jl_codectx_t &ctx); static void CreateTrap(IRBuilder<> &irbuilder, bool create_new_block = true); static CallInst *emit_jlcall(jl_codectx_t &ctx, FunctionCallee theFptr, Value *theF, @@ -4944,6 +4947,14 @@ static bool emit_builtin_call(jl_codectx_t &ctx, jl_cgval_t *ret, jl_value_t *f, return true; } + else if (f == jl_builtin_current_scope && (nargs == 0)) { + jl_aliasinfo_t scope_ai = jl_aliasinfo_t::fromTBAA(ctx, ctx.tbaa().tbaa_gcframe); + Instruction *v = scope_ai.decorateInst( + ctx.builder.CreateAlignedLoad(ctx.types().T_prjlvalue, get_scope_field(ctx), ctx.types().alignof_ptr)); + *ret = mark_julia_type(ctx, v, /*boxed*/ true, rt); + return true; + } + else if (f == jl_builtin_donotdelete) { // For now we emit this as a vararg call to the builtin // (which doesn't look at the arguments). In the future, diff --git a/src/staticdata.c b/src/staticdata.c index b991dfe8f37f3..363aa46b62221 100644 --- a/src/staticdata.c +++ b/src/staticdata.c @@ -100,7 +100,7 @@ extern "C" { // TODO: put WeakRefs on the weak_refs list during deserialization // TODO: handle finalizers -#define NUM_TAGS 192 +#define NUM_TAGS 193 // An array of references that need to be restored from the sysimg // This is a manually constructed dual of the gvars array, which would be produced by codegen for Julia code, for C. @@ -312,6 +312,7 @@ jl_value_t **const*const get_tags(void) { INSERT_TAG(jl_builtin_modifyglobal); INSERT_TAG(jl_builtin_replaceglobal); INSERT_TAG(jl_builtin_setglobalonce); + INSERT_TAG(jl_builtin_current_scope); // n.b. must update NUM_TAGS when you add something here #undef INSERT_TAG assert(i == NUM_TAGS - 1); From 58b239c5b8eaffec3b9b99e6d7c37e8ae6129d6d Mon Sep 17 00:00:00 2001 From: Shuhei Kadowaki <40514306+aviatesk@users.noreply.github.com> Date: Thu, 19 Sep 2024 12:08:34 +0900 Subject: [PATCH 042/537] a minor improvement for EA-based `:effect_free`-ness refinement (#55796) --- base/compiler/optimize.jl | 6 ++++-- base/compiler/types.jl | 2 +- test/compiler/effects.jl | 8 ++++++++ 3 files changed, 13 insertions(+), 3 deletions(-) diff --git a/base/compiler/optimize.jl b/base/compiler/optimize.jl index fb712b1c71b12..6b0cf981930ad 100644 --- a/base/compiler/optimize.jl +++ b/base/compiler/optimize.jl @@ -702,6 +702,8 @@ function check_all_args_noescape!(sv::PostOptAnalysisState, ir::IRCode, @nospeci else return false end + has_no_escape(x::EscapeAnalysis.EscapeInfo) = + EscapeAnalysis.has_no_escape(EscapeAnalysis.ignore_argescape(x)) for i = startidx:length(stmt.args) arg = stmt.args[i] argt = argextype(arg, ir) @@ -710,7 +712,7 @@ function check_all_args_noescape!(sv::PostOptAnalysisState, ir::IRCode, @nospeci end # See if we can find the allocation if isa(arg, Argument) - if EscapeAnalysis.has_no_escape(EscapeAnalysis.ignore_argescape(estate[arg])) + if has_no_escape(estate[arg]) # Even if we prove everything else effect_free, the best we can # say is :effect_free_if_argmem_only if sv.effect_free_if_argmem_only === nothing @@ -721,7 +723,7 @@ function check_all_args_noescape!(sv::PostOptAnalysisState, ir::IRCode, @nospeci end return false elseif isa(arg, SSAValue) - EscapeAnalysis.has_no_escape(estate[arg]) || return false + has_no_escape(estate[arg]) || return false check_all_args_noescape!(sv, ir, ir[arg][:stmt], estate) || return false else return false diff --git a/base/compiler/types.jl b/base/compiler/types.jl index 015b1dbc00a6f..b475e360dac02 100644 --- a/base/compiler/types.jl +++ b/base/compiler/types.jl @@ -457,7 +457,7 @@ getresult(info::CallInfo, idx::Int) = getresult_impl(info, idx) # must implement `nsplit`, `getsplit`, and `add_uncovered_edges!` to opt in to inlining nsplit_impl(::CallInfo) = nothing getsplit_impl(::CallInfo, ::Int) = error("unexpected call into `getsplit`") -add_uncovered_edges_impl(edges::Vector{Any}, info::CallInfo, @nospecialize(atype)) = error("unexpected call into `add_uncovered_edges!`") +add_uncovered_edges_impl(::Vector{Any}, ::CallInfo, _) = error("unexpected call into `add_uncovered_edges!`") # must implement `getresult` to opt in to extended lattice return information getresult_impl(::CallInfo, ::Int) = nothing diff --git a/test/compiler/effects.jl b/test/compiler/effects.jl index 11c30aad0b9a4..8bc5f27e31766 100644 --- a/test/compiler/effects.jl +++ b/test/compiler/effects.jl @@ -1141,6 +1141,14 @@ end @test_broken Core.Compiler.is_effect_free(Base.infer_effects(set_arr_with_unused_arg_2, (Vector{Int},))) @test_broken Core.Compiler.is_effect_free_if_inaccessiblememonly(Base.infer_effects(set_arg_arr!, (Vector{Int},))) +# EA-based refinement of :effect_free +function f_EA_refine(ax, b) + bx = Ref{Any}() + @noinline bx[] = b + return ax[] + b +end +@test Core.Compiler.is_effect_free(Base.infer_effects(f_EA_refine, (Base.RefValue{Int},Int))) + function issue51837(; openquotechar::Char, newlinechar::Char) ncodeunits(openquotechar) == 1 || throw(ArgumentError("`openquotechar` must be a single-byte character")) if !isnothing(newlinechar) From 4045e7baa496b2f14dfe562dbe3700d0c91006ff Mon Sep 17 00:00:00 2001 From: Jeff Bezanson Date: Thu, 19 Sep 2024 07:37:10 -0400 Subject: [PATCH 043/537] fix #52986, regression in `@doc` of macro without REPL loaded (#55795) fix #52986 --- base/docs/Docs.jl | 4 ++++ test/docs.jl | 1 + 2 files changed, 5 insertions(+) diff --git a/base/docs/Docs.jl b/base/docs/Docs.jl index 1327a1f795d4f..1a2403bbb8644 100644 --- a/base/docs/Docs.jl +++ b/base/docs/Docs.jl @@ -577,6 +577,10 @@ function _doc(binding::Binding, sig::Type = Union{}) for msig in multidoc.order sig <: msig && return multidoc.docs[msig] end + # if no matching signatures, return first + if !isempty(multidoc.docs) + return first(values(multidoc.docs)) + end end end return nothing diff --git a/test/docs.jl b/test/docs.jl index f62f7f8b63b2c..a6ef6afec5807 100644 --- a/test/docs.jl +++ b/test/docs.jl @@ -4,6 +4,7 @@ import Base.Docs: meta, @var, DocStr, parsedoc # check that @doc can work before REPL is loaded @test !startswith(read(`$(Base.julia_cmd()) -E '@doc sin'`, String), "nothing") +@test !startswith(read(`$(Base.julia_cmd()) -E '@doc @time'`, String), "nothing") using Markdown using REPL From a73ba3bab7ddbf087bb64ef8d236923d8d7f0051 Mon Sep 17 00:00:00 2001 From: Timothy Date: Thu, 19 Sep 2024 19:38:28 +0800 Subject: [PATCH 044/537] Assume that docstring code with no lang is julia (#55465) --- stdlib/Markdown/src/render/terminal/render.jl | 2 +- stdlib/REPL/src/docview.jl | 24 ++++++++++++++++++- test/docs.jl | 4 ++-- 3 files changed, 26 insertions(+), 4 deletions(-) diff --git a/stdlib/Markdown/src/render/terminal/render.jl b/stdlib/Markdown/src/render/terminal/render.jl index 619b2c8b8ef4a..a97d273131536 100644 --- a/stdlib/Markdown/src/render/terminal/render.jl +++ b/stdlib/Markdown/src/render/terminal/render.jl @@ -116,7 +116,7 @@ function term(io::AnnotIO, md::Header{l}, columns) where l end function term(io::IO, md::Code, columns) - code = if md.language ∈ ("", "julia") + code = if md.language == "julia" highlight(md.code) elseif md.language == "julia-repl" || Base.startswith(md.language, "jldoctest") hl = AnnotatedString(md.code) diff --git a/stdlib/REPL/src/docview.jl b/stdlib/REPL/src/docview.jl index 5086aa0c9485c..3c5e102bb657e 100644 --- a/stdlib/REPL/src/docview.jl +++ b/stdlib/REPL/src/docview.jl @@ -81,7 +81,8 @@ function formatdoc(d::DocStr) for part in d.text formatdoc(buffer, d, part) end - Markdown.MD(Any[Markdown.parse(seekstart(buffer))]) + md = Markdown.MD(Any[Markdown.parse(seekstart(buffer))]) + assume_julia_code!(md) end @noinline formatdoc(buffer, d, part) = print(buffer, part) @@ -95,6 +96,27 @@ function parsedoc(d::DocStr) d.object end +""" + assume_julia_code!(doc::Markdown.MD) -> doc + +Assume that code blocks with no language specified are Julia code. +""" +function assume_julia_code!(doc::Markdown.MD) + assume_julia_code!(doc.content) + doc +end + +function assume_julia_code!(blocks::Vector) + for (i, block) in enumerate(blocks) + if block isa Markdown.Code && block.language == "" + blocks[i] = Markdown.Code("julia", block.code) + elseif block isa Vector || block isa Markdown.MD + assume_julia_code!(block) + end + end + blocks +end + ## Trimming long help ("# Extended help") struct Message # For direct messages to the terminal diff --git a/test/docs.jl b/test/docs.jl index a6ef6afec5807..92d45fe05e397 100644 --- a/test/docs.jl +++ b/test/docs.jl @@ -575,8 +575,8 @@ end let T = meta(DocVars)[@var(DocVars.T)], S = meta(DocVars)[@var(DocVars.S)], - Tname = Markdown.parse("```\n$(curmod_prefix)DocVars.T\n```"), - Sname = Markdown.parse("```\n$(curmod_prefix)DocVars.S\n```") + Tname = Markdown.parse("```julia\n$(curmod_prefix)DocVars.T\n```"), + Sname = Markdown.parse("```julia\n$(curmod_prefix)DocVars.S\n```") # Splicing the expression directly doesn't work @test docstrings_equal(T.docs[Union{}], doc""" From b8093deefac19084646db9cfce0bf8ad7ea91ed8 Mon Sep 17 00:00:00 2001 From: Jishnu Bhattacharya Date: Thu, 19 Sep 2024 23:31:36 +0530 Subject: [PATCH 045/537] Broadcast binary ops involving strided triangular (#55798) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Currently, we evaluate expressions like `(A::UpperTriangular) + (B::UpperTriangular)` using broadcasting if both `A` and `B` have strided parents, and forward the summation to the parents otherwise. This PR changes this to use broadcasting if either of the two has a strided parent. This avoids accessing the parent corresponding to the structural zero elements, as the index might not be initialized. Fixes https://github.com/JuliaLang/julia/issues/55590 This isn't a general fix, as we still sum the parents if neither is strided. However, it will address common cases. This also improves performance, as we only need to loop over one half: ```julia julia> using LinearAlgebra julia> U = UpperTriangular(zeros(100,100)); julia> B = Bidiagonal(zeros(100), zeros(99), :U); julia> @btime $U + $B; 35.530 μs (4 allocations: 78.22 KiB) # nightly 13.441 μs (4 allocations: 78.22 KiB) # This PR ``` --- stdlib/LinearAlgebra/src/symmetric.jl | 8 +-- stdlib/LinearAlgebra/src/triangular.jl | 91 ++++++++++++++++++-------- stdlib/LinearAlgebra/test/symmetric.jl | 25 +++++++ 3 files changed, 94 insertions(+), 30 deletions(-) diff --git a/stdlib/LinearAlgebra/src/symmetric.jl b/stdlib/LinearAlgebra/src/symmetric.jl index ab7b5ee031260..a7739596a73bb 100644 --- a/stdlib/LinearAlgebra/src/symmetric.jl +++ b/stdlib/LinearAlgebra/src/symmetric.jl @@ -687,10 +687,10 @@ for f in (:+, :-) @eval begin $f(A::Hermitian, B::Symmetric{<:Real}) = $f(A, Hermitian(parent(B), sym_uplo(B.uplo))) $f(A::Symmetric{<:Real}, B::Hermitian) = $f(Hermitian(parent(A), sym_uplo(A.uplo)), B) - $f(A::SymTridiagonal, B::Symmetric) = Symmetric($f(A, B.data), sym_uplo(B.uplo)) - $f(A::Symmetric, B::SymTridiagonal) = Symmetric($f(A.data, B), sym_uplo(A.uplo)) - $f(A::SymTridiagonal{<:Real}, B::Hermitian) = Hermitian($f(A, B.data), sym_uplo(B.uplo)) - $f(A::Hermitian, B::SymTridiagonal{<:Real}) = Hermitian($f(A.data, B), sym_uplo(A.uplo)) + $f(A::SymTridiagonal, B::Symmetric) = $f(Symmetric(A, sym_uplo(B.uplo)), B) + $f(A::Symmetric, B::SymTridiagonal) = $f(A, Symmetric(B, sym_uplo(A.uplo))) + $f(A::SymTridiagonal{<:Real}, B::Hermitian) = $f(Hermitian(A, sym_uplo(B.uplo)), B) + $f(A::Hermitian, B::SymTridiagonal{<:Real}) = $f(A, Hermitian(B, sym_uplo(A.uplo))) end end diff --git a/stdlib/LinearAlgebra/src/triangular.jl b/stdlib/LinearAlgebra/src/triangular.jl index df0d0d4fd0d8b..74eab6a392723 100644 --- a/stdlib/LinearAlgebra/src/triangular.jl +++ b/stdlib/LinearAlgebra/src/triangular.jl @@ -850,35 +850,74 @@ fillstored!(A::UpperTriangular, x) = (fillband!(A.data, x, 0, size(A,2)-1); fillstored!(A::UnitUpperTriangular, x) = (fillband!(A.data, x, 1, size(A,2)-1); A) # Binary operations -+(A::UpperTriangular, B::UpperTriangular) = UpperTriangular(A.data + B.data) -+(A::LowerTriangular, B::LowerTriangular) = LowerTriangular(A.data + B.data) -+(A::UpperTriangular, B::UnitUpperTriangular) = UpperTriangular(A.data + triu(B.data, 1) + I) -+(A::LowerTriangular, B::UnitLowerTriangular) = LowerTriangular(A.data + tril(B.data, -1) + I) -+(A::UnitUpperTriangular, B::UpperTriangular) = UpperTriangular(triu(A.data, 1) + B.data + I) -+(A::UnitLowerTriangular, B::LowerTriangular) = LowerTriangular(tril(A.data, -1) + B.data + I) -+(A::UnitUpperTriangular, B::UnitUpperTriangular) = UpperTriangular(triu(A.data, 1) + triu(B.data, 1) + 2I) -+(A::UnitLowerTriangular, B::UnitLowerTriangular) = LowerTriangular(tril(A.data, -1) + tril(B.data, -1) + 2I) +# use broadcasting if the parents are strided, where we loop only over the triangular part +function +(A::UpperTriangular, B::UpperTriangular) + (parent(A) isa StridedMatrix || parent(B) isa StridedMatrix) && return A .+ B + UpperTriangular(A.data + B.data) +end +function +(A::LowerTriangular, B::LowerTriangular) + (parent(A) isa StridedMatrix || parent(B) isa StridedMatrix) && return A .+ B + LowerTriangular(A.data + B.data) +end +function +(A::UpperTriangular, B::UnitUpperTriangular) + (parent(A) isa StridedMatrix || parent(B) isa StridedMatrix) && return A .+ B + UpperTriangular(A.data + triu(B.data, 1) + I) +end +function +(A::LowerTriangular, B::UnitLowerTriangular) + (parent(A) isa StridedMatrix || parent(B) isa StridedMatrix) && return A .+ B + LowerTriangular(A.data + tril(B.data, -1) + I) +end +function +(A::UnitUpperTriangular, B::UpperTriangular) + (parent(A) isa StridedMatrix || parent(B) isa StridedMatrix) && return A .+ B + UpperTriangular(triu(A.data, 1) + B.data + I) +end +function +(A::UnitLowerTriangular, B::LowerTriangular) + (parent(A) isa StridedMatrix || parent(B) isa StridedMatrix) && return A .+ B + LowerTriangular(tril(A.data, -1) + B.data + I) +end +function +(A::UnitUpperTriangular, B::UnitUpperTriangular) + (parent(A) isa StridedMatrix || parent(B) isa StridedMatrix) && return A .+ B + UpperTriangular(triu(A.data, 1) + triu(B.data, 1) + 2I) +end +function +(A::UnitLowerTriangular, B::UnitLowerTriangular) + (parent(A) isa StridedMatrix || parent(B) isa StridedMatrix) && return A .+ B + LowerTriangular(tril(A.data, -1) + tril(B.data, -1) + 2I) +end +(A::AbstractTriangular, B::AbstractTriangular) = copyto!(similar(parent(A)), A) + copyto!(similar(parent(B)), B) --(A::UpperTriangular, B::UpperTriangular) = UpperTriangular(A.data - B.data) --(A::LowerTriangular, B::LowerTriangular) = LowerTriangular(A.data - B.data) --(A::UpperTriangular, B::UnitUpperTriangular) = UpperTriangular(A.data - triu(B.data, 1) - I) --(A::LowerTriangular, B::UnitLowerTriangular) = LowerTriangular(A.data - tril(B.data, -1) - I) --(A::UnitUpperTriangular, B::UpperTriangular) = UpperTriangular(triu(A.data, 1) - B.data + I) --(A::UnitLowerTriangular, B::LowerTriangular) = LowerTriangular(tril(A.data, -1) - B.data + I) --(A::UnitUpperTriangular, B::UnitUpperTriangular) = UpperTriangular(triu(A.data, 1) - triu(B.data, 1)) --(A::UnitLowerTriangular, B::UnitLowerTriangular) = LowerTriangular(tril(A.data, -1) - tril(B.data, -1)) --(A::AbstractTriangular, B::AbstractTriangular) = copyto!(similar(parent(A)), A) - copyto!(similar(parent(B)), B) - -# use broadcasting if the parents are strided, where we loop only over the triangular part -for op in (:+, :-) - for TM1 in (:LowerTriangular, :UnitLowerTriangular), TM2 in (:LowerTriangular, :UnitLowerTriangular) - @eval $op(A::$TM1{<:Any, <:StridedMaybeAdjOrTransMat}, B::$TM2{<:Any, <:StridedMaybeAdjOrTransMat}) = broadcast($op, A, B) - end - for TM1 in (:UpperTriangular, :UnitUpperTriangular), TM2 in (:UpperTriangular, :UnitUpperTriangular) - @eval $op(A::$TM1{<:Any, <:StridedMaybeAdjOrTransMat}, B::$TM2{<:Any, <:StridedMaybeAdjOrTransMat}) = broadcast($op, A, B) - end +function -(A::UpperTriangular, B::UpperTriangular) + (parent(A) isa StridedMatrix || parent(B) isa StridedMatrix) && return A .- B + UpperTriangular(A.data - B.data) +end +function -(A::LowerTriangular, B::LowerTriangular) + (parent(A) isa StridedMatrix || parent(B) isa StridedMatrix) && return A .- B + LowerTriangular(A.data - B.data) +end +function -(A::UpperTriangular, B::UnitUpperTriangular) + (parent(A) isa StridedMatrix || parent(B) isa StridedMatrix) && return A .- B + UpperTriangular(A.data - triu(B.data, 1) - I) +end +function -(A::LowerTriangular, B::UnitLowerTriangular) + (parent(A) isa StridedMatrix || parent(B) isa StridedMatrix) && return A .- B + LowerTriangular(A.data - tril(B.data, -1) - I) end +function -(A::UnitUpperTriangular, B::UpperTriangular) + (parent(A) isa StridedMatrix || parent(B) isa StridedMatrix) && return A .- B + UpperTriangular(triu(A.data, 1) - B.data + I) +end +function -(A::UnitLowerTriangular, B::LowerTriangular) + (parent(A) isa StridedMatrix || parent(B) isa StridedMatrix) && return A .- B + LowerTriangular(tril(A.data, -1) - B.data + I) +end +function -(A::UnitUpperTriangular, B::UnitUpperTriangular) + (parent(A) isa StridedMatrix || parent(B) isa StridedMatrix) && return A .- B + UpperTriangular(triu(A.data, 1) - triu(B.data, 1)) +end +function -(A::UnitLowerTriangular, B::UnitLowerTriangular) + (parent(A) isa StridedMatrix || parent(B) isa StridedMatrix) && return A .- B + LowerTriangular(tril(A.data, -1) - tril(B.data, -1)) +end +-(A::AbstractTriangular, B::AbstractTriangular) = copyto!(similar(parent(A)), A) - copyto!(similar(parent(B)), B) function kron(A::UpperTriangular{<:Number,<:StridedMaybeAdjOrTransMat}, B::UpperTriangular{<:Number,<:StridedMaybeAdjOrTransMat}) C = UpperTriangular(Matrix{promote_op(*, eltype(A), eltype(B))}(undef, _kronsize(A, B))) diff --git a/stdlib/LinearAlgebra/test/symmetric.jl b/stdlib/LinearAlgebra/test/symmetric.jl index 939e677039dc7..7a51ab9d454af 100644 --- a/stdlib/LinearAlgebra/test/symmetric.jl +++ b/stdlib/LinearAlgebra/test/symmetric.jl @@ -1135,4 +1135,29 @@ end end end +@testset "partly iniitalized matrices" begin + a = Matrix{BigFloat}(undef, 2,2) + a[1] = 1; a[3] = 1; a[4] = 1 + h = Hermitian(a) + s = Symmetric(a) + d = Diagonal([1,1]) + symT = SymTridiagonal([1 1;1 1]) + @test h+d == Array(h) + Array(d) + @test h+symT == Array(h) + Array(symT) + @test s+d == Array(s) + Array(d) + @test s+symT == Array(s) + Array(symT) + @test h-d == Array(h) - Array(d) + @test h-symT == Array(h) - Array(symT) + @test s-d == Array(s) - Array(d) + @test s-symT == Array(s) - Array(symT) + @test d+h == Array(d) + Array(h) + @test symT+h == Array(symT) + Array(h) + @test d+s == Array(d) + Array(s) + @test symT+s == Array(symT) + Array(s) + @test d-h == Array(d) - Array(h) + @test symT-h == Array(symT) - Array(h) + @test d-s == Array(d) - Array(s) + @test symT-s == Array(symT) - Array(s) +end + end # module TestSymmetric From a31a880d9b5da6fd55e9afba9fab3715d86379c6 Mon Sep 17 00:00:00 2001 From: Jishnu Bhattacharya Date: Thu, 19 Sep 2024 23:34:17 +0530 Subject: [PATCH 046/537] Reland " Avoid materializing arrays in bidiag matmul #55450" (#55777) This relands #55450 and adds tests for the failing case noted in https://github.com/JuliaLang/julia/issues/55727. The `addmul` tests that were failing earlier pass with this change. The issue in the earlier PR was that we were not exiting quickly for `iszero(alpha)` in `_bibimul!` for small matrices, and were computing the result as `C .= A * B * alpha + C * beta`. The problem with this is that if `A * B` contains `NaN`s, this propagates to `C` even if `alpha === 0.0`. This is fixed now, and the result is only computed if `!iszero(alpha)`. --- stdlib/LinearAlgebra/src/LinearAlgebra.jl | 4 +- stdlib/LinearAlgebra/src/bidiag.jl | 341 ++++++++++++++++++---- stdlib/LinearAlgebra/test/addmul.jl | 22 ++ stdlib/LinearAlgebra/test/bidiag.jl | 67 +++++ stdlib/LinearAlgebra/test/tridiag.jl | 71 +++++ 5 files changed, 453 insertions(+), 52 deletions(-) diff --git a/stdlib/LinearAlgebra/src/LinearAlgebra.jl b/stdlib/LinearAlgebra/src/LinearAlgebra.jl index 27d4255fb656b..17216845b350c 100644 --- a/stdlib/LinearAlgebra/src/LinearAlgebra.jl +++ b/stdlib/LinearAlgebra/src/LinearAlgebra.jl @@ -673,7 +673,9 @@ matprod_dest(A::Diagonal, B::Diagonal, TS) = _matprod_dest_diag(B, TS) _matprod_dest_diag(A, TS) = similar(A, TS) function _matprod_dest_diag(A::SymTridiagonal, TS) n = size(A, 1) - Tridiagonal(similar(A, TS, n-1), similar(A, TS, n), similar(A, TS, n-1)) + ev = similar(A, TS, max(0, n-1)) + dv = similar(A, TS, n) + Tridiagonal(ev, dv, similar(ev)) end # Special handling for adj/trans vec diff --git a/stdlib/LinearAlgebra/src/bidiag.jl b/stdlib/LinearAlgebra/src/bidiag.jl index d86bad7e41435..12d638f52add6 100644 --- a/stdlib/LinearAlgebra/src/bidiag.jl +++ b/stdlib/LinearAlgebra/src/bidiag.jl @@ -557,7 +557,8 @@ end # function to get the internally stored vectors for Bidiagonal and [Sym]Tridiagonal # to avoid allocations in _mul! below (#24324, #24578) _diag(A::Tridiagonal, k) = k == -1 ? A.dl : k == 0 ? A.d : A.du -_diag(A::SymTridiagonal, k) = k == 0 ? A.dv : A.ev +_diag(A::SymTridiagonal{<:Number}, k) = k == 0 ? A.dv : A.ev +_diag(A::SymTridiagonal, k) = k == 0 ? view(A, diagind(A, IndexStyle(A))) : view(A, diagind(A, 1, IndexStyle(A))) function _diag(A::Bidiagonal, k) if k == 0 return A.dv @@ -577,12 +578,45 @@ function _bibimul!(C, A, B, _add) check_A_mul_B!_sizes(size(C), size(A), size(B)) n = size(A,1) iszero(n) && return C - n <= 3 && return mul!(C, Array(A), Array(B), _add.alpha, _add.beta) # We use `_rmul_or_fill!` instead of `_modify!` here since using # `_modify!` in the following loop will not update the # off-diagonal elements for non-zero beta. _rmul_or_fill!(C, _add.beta) iszero(_add.alpha) && return C + if n <= 3 + # naive multiplication + for I in CartesianIndices(C) + C[I] += _add(sum(A[I[1], k] * B[k, I[2]] for k in axes(A,2))) + end + return C + end + @inbounds begin + # first column of C + C[1,1] += _add(A[1,1]*B[1,1] + A[1, 2]*B[2,1]) + C[2,1] += _add(A[2,1]*B[1,1] + A[2,2]*B[2,1]) + C[3,1] += _add(A[3,2]*B[2,1]) + # second column of C + C[1,2] += _add(A[1,1]*B[1,2] + A[1,2]*B[2,2]) + C[2,2] += _add(A[2,1]*B[1,2] + A[2,2]*B[2,2] + A[2,3]*B[3,2]) + C[3,2] += _add(A[3,2]*B[2,2] + A[3,3]*B[3,2]) + C[4,2] += _add(A[4,3]*B[3,2]) + end # inbounds + # middle columns + __bibimul!(C, A, B, _add) + @inbounds begin + C[n-3,n-1] += _add(A[n-3,n-2]*B[n-2,n-1]) + C[n-2,n-1] += _add(A[n-2,n-2]*B[n-2,n-1] + A[n-2,n-1]*B[n-1,n-1]) + C[n-1,n-1] += _add(A[n-1,n-2]*B[n-2,n-1] + A[n-1,n-1]*B[n-1,n-1] + A[n-1,n]*B[n,n-1]) + C[n, n-1] += _add(A[n,n-1]*B[n-1,n-1] + A[n,n]*B[n,n-1]) + # last column of C + C[n-2, n] += _add(A[n-2,n-1]*B[n-1,n]) + C[n-1, n] += _add(A[n-1,n-1]*B[n-1,n ] + A[n-1,n]*B[n,n ]) + C[n, n] += _add(A[n,n-1]*B[n-1,n ] + A[n,n]*B[n,n ]) + end # inbounds + C +end +function __bibimul!(C, A, B, _add) + n = size(A,1) Al = _diag(A, -1) Ad = _diag(A, 0) Au = _diag(A, 1) @@ -590,44 +624,198 @@ function _bibimul!(C, A, B, _add) Bd = _diag(B, 0) Bu = _diag(B, 1) @inbounds begin - # first row of C - C[1,1] += _add(A[1,1]*B[1,1] + A[1, 2]*B[2, 1]) - C[1,2] += _add(A[1,1]*B[1,2] + A[1,2]*B[2,2]) - C[1,3] += _add(A[1,2]*B[2,3]) - # second row of C - C[2,1] += _add(A[2,1]*B[1,1] + A[2,2]*B[2,1]) - C[2,2] += _add(A[2,1]*B[1,2] + A[2,2]*B[2,2] + A[2,3]*B[3,2]) - C[2,3] += _add(A[2,2]*B[2,3] + A[2,3]*B[3,3]) - C[2,4] += _add(A[2,3]*B[3,4]) for j in 3:n-2 - Ajj₋1 = Al[j-1] - Ajj = Ad[j] + Aj₋2j₋1 = Au[j-2] + Aj₋1j = Au[j-1] Ajj₊1 = Au[j] - Bj₋1j₋2 = Bl[j-2] - Bj₋1j₋1 = Bd[j-1] + Aj₋1j₋1 = Ad[j-1] + Ajj = Ad[j] + Aj₊1j₊1 = Ad[j+1] + Ajj₋1 = Al[j-1] + Aj₊1j = Al[j] + Aj₊2j₊1 = Al[j+1] Bj₋1j = Bu[j-1] - Bjj₋1 = Bl[j-1] Bjj = Bd[j] - Bjj₊1 = Bu[j] Bj₊1j = Bl[j] - Bj₊1j₊1 = Bd[j+1] - Bj₊1j₊2 = Bu[j+1] - C[j,j-2] += _add( Ajj₋1*Bj₋1j₋2) - C[j, j-1] += _add(Ajj₋1*Bj₋1j₋1 + Ajj*Bjj₋1) - C[j, j ] += _add(Ajj₋1*Bj₋1j + Ajj*Bjj + Ajj₊1*Bj₊1j) - C[j, j+1] += _add(Ajj *Bjj₊1 + Ajj₊1*Bj₊1j₊1) - C[j, j+2] += _add(Ajj₊1*Bj₊1j₊2) + + C[j-2, j] += _add(Aj₋2j₋1*Bj₋1j) + C[j-1, j] += _add(Aj₋1j₋1*Bj₋1j + Aj₋1j*Bjj) + C[j, j] += _add(Ajj₋1*Bj₋1j + Ajj*Bjj + Ajj₊1*Bj₊1j) + C[j+1, j] += _add(Aj₊1j*Bjj + Aj₊1j₊1*Bj₊1j) + C[j+2, j] += _add(Aj₊2j₊1*Bj₊1j) end - # row before last of C - C[n-1,n-3] += _add(A[n-1,n-2]*B[n-2,n-3]) - C[n-1,n-2] += _add(A[n-1,n-1]*B[n-1,n-2] + A[n-1,n-2]*B[n-2,n-2]) - C[n-1,n-1] += _add(A[n-1,n-2]*B[n-2,n-1] + A[n-1,n-1]*B[n-1,n-1] + A[n-1,n]*B[n,n-1]) - C[n-1,n ] += _add(A[n-1,n-1]*B[n-1,n ] + A[n-1, n]*B[n ,n ]) - # last row of C - C[n,n-2] += _add(A[n,n-1]*B[n-1,n-2]) - C[n,n-1] += _add(A[n,n-1]*B[n-1,n-1] + A[n,n]*B[n,n-1]) - C[n,n ] += _add(A[n,n-1]*B[n-1,n ] + A[n,n]*B[n,n ]) - end # inbounds + end + C +end +function __bibimul!(C, A, B::Bidiagonal, _add) + n = size(A,1) + Al = _diag(A, -1) + Ad = _diag(A, 0) + Au = _diag(A, 1) + Bd = _diag(B, 0) + if B.uplo == 'U' + Bu = _diag(B, 1) + @inbounds begin + for j in 3:n-2 + Aj₋2j₋1 = Au[j-2] + Aj₋1j = Au[j-1] + Aj₋1j₋1 = Ad[j-1] + Ajj = Ad[j] + Ajj₋1 = Al[j-1] + Aj₊1j = Al[j] + Bj₋1j = Bu[j-1] + Bjj = Bd[j] + + C[j-2, j] += _add(Aj₋2j₋1*Bj₋1j) + C[j-1, j] += _add(Aj₋1j₋1*Bj₋1j + Aj₋1j*Bjj) + C[j, j] += _add(Ajj₋1*Bj₋1j + Ajj*Bjj) + C[j+1, j] += _add(Aj₊1j*Bjj) + end + end + else # B.uplo == 'L' + Bl = _diag(B, -1) + @inbounds begin + for j in 3:n-2 + Aj₋1j = Au[j-1] + Ajj₊1 = Au[j] + Ajj = Ad[j] + Aj₊1j₊1 = Ad[j+1] + Aj₊1j = Al[j] + Aj₊2j₊1 = Al[j+1] + Bjj = Bd[j] + Bj₊1j = Bl[j] + + C[j-1, j] += _add(Aj₋1j*Bjj) + C[j, j] += _add(Ajj*Bjj + Ajj₊1*Bj₊1j) + C[j+1, j] += _add(Aj₊1j*Bjj + Aj₊1j₊1*Bj₊1j) + C[j+2, j] += _add(Aj₊2j₊1*Bj₊1j) + end + end + end + C +end +function __bibimul!(C, A::Bidiagonal, B, _add) + n = size(A,1) + Bl = _diag(B, -1) + Bd = _diag(B, 0) + Bu = _diag(B, 1) + Ad = _diag(A, 0) + if A.uplo == 'U' + Au = _diag(A, 1) + @inbounds begin + for j in 3:n-2 + Aj₋2j₋1 = Au[j-2] + Aj₋1j = Au[j-1] + Ajj₊1 = Au[j] + Aj₋1j₋1 = Ad[j-1] + Ajj = Ad[j] + Aj₊1j₊1 = Ad[j+1] + Bj₋1j = Bu[j-1] + Bjj = Bd[j] + Bj₊1j = Bl[j] + + C[j-2, j] += _add(Aj₋2j₋1*Bj₋1j) + C[j-1, j] += _add(Aj₋1j₋1*Bj₋1j + Aj₋1j*Bjj) + C[j, j] += _add(Ajj*Bjj + Ajj₊1*Bj₊1j) + C[j+1, j] += _add(Aj₊1j₊1*Bj₊1j) + end + end + else # A.uplo == 'L' + Al = _diag(A, -1) + @inbounds begin + for j in 3:n-2 + Aj₋1j₋1 = Ad[j-1] + Ajj = Ad[j] + Aj₊1j₊1 = Ad[j+1] + Ajj₋1 = Al[j-1] + Aj₊1j = Al[j] + Aj₊2j₊1 = Al[j+1] + Bj₋1j = Bu[j-1] + Bjj = Bd[j] + Bj₊1j = Bl[j] + + C[j-1, j] += _add(Aj₋1j₋1*Bj₋1j) + C[j, j] += _add(Ajj₋1*Bj₋1j + Ajj*Bjj) + C[j+1, j] += _add(Aj₊1j*Bjj + Aj₊1j₊1*Bj₊1j) + C[j+2, j] += _add(Aj₊2j₊1*Bj₊1j) + end + end + end + C +end +function __bibimul!(C, A::Bidiagonal, B::Bidiagonal, _add) + n = size(A,1) + Ad = _diag(A, 0) + Bd = _diag(B, 0) + if A.uplo == 'U' && B.uplo == 'U' + Au = _diag(A, 1) + Bu = _diag(B, 1) + @inbounds begin + for j in 3:n-2 + Aj₋2j₋1 = Au[j-2] + Aj₋1j = Au[j-1] + Aj₋1j₋1 = Ad[j-1] + Ajj = Ad[j] + Bj₋1j = Bu[j-1] + Bjj = Bd[j] + + C[j-2, j] += _add(Aj₋2j₋1*Bj₋1j) + C[j-1, j] += _add(Aj₋1j₋1*Bj₋1j + Aj₋1j*Bjj) + C[j, j] += _add(Ajj*Bjj) + end + end + elseif A.uplo == 'U' && B.uplo == 'L' + Au = _diag(A, 1) + Bl = _diag(B, -1) + @inbounds begin + for j in 3:n-2 + Aj₋1j = Au[j-1] + Ajj₊1 = Au[j] + Ajj = Ad[j] + Aj₊1j₊1 = Ad[j+1] + Bjj = Bd[j] + Bj₊1j = Bl[j] + + C[j-1, j] += _add(Aj₋1j*Bjj) + C[j, j] += _add(Ajj*Bjj + Ajj₊1*Bj₊1j) + C[j+1, j] += _add(Aj₊1j₊1*Bj₊1j) + end + end + elseif A.uplo == 'L' && B.uplo == 'U' + Al = _diag(A, -1) + Bu = _diag(B, 1) + @inbounds begin + for j in 3:n-2 + Aj₋1j₋1 = Ad[j-1] + Ajj = Ad[j] + Ajj₋1 = Al[j-1] + Aj₊1j = Al[j] + Bj₋1j = Bu[j-1] + Bjj = Bd[j] + + C[j-1, j] += _add(Aj₋1j₋1*Bj₋1j) + C[j, j] += _add(Ajj₋1*Bj₋1j + Ajj*Bjj) + C[j+1, j] += _add(Aj₊1j*Bjj) + end + end + else # A.uplo == 'L' && B.uplo == 'L' + Al = _diag(A, -1) + Bl = _diag(B, -1) + @inbounds begin + for j in 3:n-2 + Ajj = Ad[j] + Aj₊1j₊1 = Ad[j+1] + Aj₊1j = Al[j] + Aj₊2j₊1 = Al[j+1] + Bjj = Bd[j] + Bj₊1j = Bl[j] + + C[j, j] += _add(Ajj*Bjj) + C[j+1, j] += _add(Aj₊1j*Bjj + Aj₊1j₊1*Bj₊1j) + C[j+2, j] += _add(Aj₊2j₊1*Bj₊1j) + end + end + end C end @@ -744,7 +932,52 @@ function _mul!(C::AbstractVecOrMat, A::BiTriSym, B::AbstractVecOrMat, _add::MulA nB = size(B,2) (iszero(nA) || iszero(nB)) && return C iszero(_add.alpha) && return _rmul_or_fill!(C, _add.beta) - nA <= 3 && return mul!(C, Array(A), Array(B), _add.alpha, _add.beta) + if nA <= 3 + # naive multiplication + for I in CartesianIndices(C) + col = Base.tail(Tuple(I)) + _modify!(_add, sum(A[I[1], k] * B[k, col...] for k in axes(A,2)), C, I) + end + return C + end + _mul_bitrisym!(C, A, B, _add) +end +function _mul_bitrisym!(C::AbstractVecOrMat, A::Bidiagonal, B::AbstractVecOrMat, _add::MulAddMul) + nA = size(A,1) + nB = size(B,2) + d = A.dv + if A.uplo == 'U' + u = A.ev + @inbounds begin + for j = 1:nB + b₀, b₊ = B[1, j], B[2, j] + _modify!(_add, d[1]*b₀ + u[1]*b₊, C, (1, j)) + for i = 2:nA - 1 + b₀, b₊ = b₊, B[i + 1, j] + _modify!(_add, d[i]*b₀ + u[i]*b₊, C, (i, j)) + end + _modify!(_add, d[nA]*b₊, C, (nA, j)) + end + end + else + l = A.ev + @inbounds begin + for j = 1:nB + b₀, b₊ = B[1, j], B[2, j] + _modify!(_add, d[1]*b₀, C, (1, j)) + for i = 2:nA - 1 + b₋, b₀, b₊ = b₀, b₊, B[i + 1, j] + _modify!(_add, l[i - 1]*b₋ + d[i]*b₀, C, (i, j)) + end + _modify!(_add, l[nA - 1]*b₀ + d[nA]*b₊, C, (nA, j)) + end + end + end + C +end +function _mul_bitrisym!(C::AbstractVecOrMat, A::TriSym, B::AbstractVecOrMat, _add::MulAddMul) + nA = size(A,1) + nB = size(B,2) l = _diag(A, -1) d = _diag(A, 0) u = _diag(A, 1) @@ -767,10 +1000,10 @@ function _mul!(C::AbstractMatrix, A::AbstractMatrix, B::TriSym, _add::MulAddMul) check_A_mul_B!_sizes(size(C), size(A), size(B)) n = size(A,1) m = size(B,2) - (iszero(m) || iszero(n)) && return C - iszero(_add.alpha) && return _rmul_or_fill!(C, _add.beta) - if n <= 3 || m <= 1 - return mul!(C, Array(A), Array(B), _add.alpha, _add.beta) + (iszero(_add.alpha) || iszero(m)) && return _rmul_or_fill!(C, _add.beta) + if m == 1 + B11 = B[1,1] + return mul!(C, A, B11, _add.alpha, _add.beta) end Bl = _diag(B, -1) Bd = _diag(B, 0) @@ -804,21 +1037,18 @@ function _mul!(C::AbstractMatrix, A::AbstractMatrix, B::Bidiagonal, _add::MulAdd m, n = size(A) (iszero(m) || iszero(n)) && return C iszero(_add.alpha) && return _rmul_or_fill!(C, _add.beta) - if size(A, 1) <= 3 || size(B, 2) <= 1 - return mul!(C, Array(A), Array(B), _add.alpha, _add.beta) - end @inbounds if B.uplo == 'U' + for j in n:-1:2, i in 1:m + _modify!(_add, A[i,j] * B.dv[j] + A[i,j-1] * B.ev[j-1], C, (i, j)) + end for i in 1:m - for j in n:-1:2 - _modify!(_add, A[i,j] * B.dv[j] + A[i,j-1] * B.ev[j-1], C, (i, j)) - end _modify!(_add, A[i,1] * B.dv[1], C, (i, 1)) end else # uplo == 'L' + for j in 1:n-1, i in 1:m + _modify!(_add, A[i,j] * B.dv[j] + A[i,j+1] * B.ev[j], C, (i, j)) + end for i in 1:m - for j in 1:n-1 - _modify!(_add, A[i,j] * B.dv[j] + A[i,j+1] * B.ev[j], C, (i, j)) - end _modify!(_add, A[i,n] * B.dv[n], C, (i, n)) end end @@ -834,9 +1064,17 @@ function _dibimul!(C, A, B, _add) check_A_mul_B!_sizes(size(C), size(A), size(B)) n = size(A,1) iszero(n) && return C - n <= 3 && return mul!(C, Array(A), Array(B), _add.alpha, _add.beta) - _rmul_or_fill!(C, _add.beta) # see the same use above + # ensure that we fill off-band elements in the destination + _rmul_or_fill!(C, _add.beta) iszero(_add.alpha) && return C + if n <= 3 + # For simplicity, use a naive multiplication for small matrices + # that loops over all elements. + for I in CartesianIndices(C) + C[I] += _add(A.diag[I[1]] * B[I[1], I[2]]) + end + return C + end Ad = A.diag Bl = _diag(B, -1) Bd = _diag(B, 0) @@ -870,7 +1108,8 @@ function _dibimul!(C::AbstractMatrix, A::Diagonal, B::Bidiagonal, _add) check_A_mul_B!_sizes(size(C), size(A), size(B)) n = size(A,1) iszero(n) && return C - _rmul_or_fill!(C, _add.beta) # see the same use above + # ensure that we fill off-band elements in the destination + _rmul_or_fill!(C, _add.beta) iszero(_add.alpha) && return C Ad = A.diag Bdv, Bev = B.dv, B.ev diff --git a/stdlib/LinearAlgebra/test/addmul.jl b/stdlib/LinearAlgebra/test/addmul.jl index 72fdf687bf5c3..208fa930e8ee1 100644 --- a/stdlib/LinearAlgebra/test/addmul.jl +++ b/stdlib/LinearAlgebra/test/addmul.jl @@ -217,4 +217,26 @@ end end end +@testset "issue #55727" begin + C = zeros(1,1) + @testset "$(nameof(typeof(A)))" for A in Any[Diagonal([NaN]), + Bidiagonal([NaN], Float64[], :U), + Bidiagonal([NaN], Float64[], :L), + SymTridiagonal([NaN], Float64[]), + Tridiagonal(Float64[], [NaN], Float64[]), + ] + @testset "$(nameof(typeof(B)))" for B in Any[ + Diagonal([1.0]), + Bidiagonal([1.0], Float64[], :U), + Bidiagonal([1.0], Float64[], :L), + SymTridiagonal([1.0], Float64[]), + Tridiagonal(Float64[], [1.0], Float64[]), + ] + C .= 0 + @test mul!(C, A, B, 0.0, false)[] === 0.0 + @test mul!(C, B, A, 0.0, false)[] === 0.0 + end + end +end + end # module diff --git a/stdlib/LinearAlgebra/test/bidiag.jl b/stdlib/LinearAlgebra/test/bidiag.jl index ef50658a642fb..edad29d4ec180 100644 --- a/stdlib/LinearAlgebra/test/bidiag.jl +++ b/stdlib/LinearAlgebra/test/bidiag.jl @@ -1048,4 +1048,71 @@ end @test mul!(similar(D), B, D) == mul!(similar(D), D, B) == B * D end +@testset "mul for small matrices" begin + @testset for n in 0:6 + D = Diagonal(rand(n)) + v = rand(n) + @testset for uplo in (:L, :U) + B = Bidiagonal(rand(n), rand(max(n-1,0)), uplo) + M = Matrix(B) + + @test B * v ≈ M * v + @test mul!(similar(v), B, v) ≈ M * v + @test mul!(ones(size(v)), B, v, 2, 3) ≈ M * v * 2 .+ 3 + + @test B * B ≈ M * M + @test mul!(similar(B, size(B)), B, B) ≈ M * M + @test mul!(ones(size(B)), B, B, 2, 4) ≈ M * M * 2 .+ 4 + + for m in 0:6 + AL = rand(m,n) + AR = rand(n,m) + @test AL * B ≈ AL * M + @test B * AR ≈ M * AR + @test mul!(similar(AL), AL, B) ≈ AL * M + @test mul!(similar(AR), B, AR) ≈ M * AR + @test mul!(ones(size(AL)), AL, B, 2, 4) ≈ AL * M * 2 .+ 4 + @test mul!(ones(size(AR)), B, AR, 2, 4) ≈ M * AR * 2 .+ 4 + end + + @test B * D ≈ M * D + @test D * B ≈ D * M + @test mul!(similar(B), B, D) ≈ M * D + @test mul!(similar(B), B, D) ≈ M * D + @test mul!(similar(B, size(B)), D, B) ≈ D * M + @test mul!(similar(B, size(B)), B, D) ≈ M * D + @test mul!(ones(size(B)), D, B, 2, 4) ≈ D * M * 2 .+ 4 + @test mul!(ones(size(B)), B, D, 2, 4) ≈ M * D * 2 .+ 4 + end + BL = Bidiagonal(rand(n), rand(max(0, n-1)), :L) + ML = Matrix(BL) + BU = Bidiagonal(rand(n), rand(max(0, n-1)), :U) + MU = Matrix(BU) + T = Tridiagonal(zeros(max(0, n-1)), zeros(n), zeros(max(0, n-1))) + @test mul!(T, BL, BU) ≈ ML * MU + @test mul!(T, BU, BL) ≈ MU * ML + T = Tridiagonal(ones(max(0, n-1)), ones(n), ones(max(0, n-1))) + @test mul!(copy(T), BL, BU, 2, 3) ≈ ML * MU * 2 + T * 3 + @test mul!(copy(T), BU, BL, 2, 3) ≈ MU * ML * 2 + T * 3 + end + + n = 4 + arr = SizedArrays.SizedArray{(2,2)}(reshape([1:4;],2,2)) + for B in ( + Bidiagonal(fill(arr,n), fill(arr,n-1), :L), + Bidiagonal(fill(arr,n), fill(arr,n-1), :U), + ) + @test B * B ≈ Matrix(B) * Matrix(B) + BL = Bidiagonal(fill(arr,n), fill(arr,n-1), :L) + BU = Bidiagonal(fill(arr,n), fill(arr,n-1), :U) + @test BL * B ≈ Matrix(BL) * Matrix(B) + @test BU * B ≈ Matrix(BU) * Matrix(B) + @test B * BL ≈ Matrix(B) * Matrix(BL) + @test B * BU ≈ Matrix(B) * Matrix(BU) + D = Diagonal(fill(arr,n)) + @test D * B ≈ Matrix(D) * Matrix(B) + @test B * D ≈ Matrix(B) * Matrix(D) + end +end + end # module TestBidiagonal diff --git a/stdlib/LinearAlgebra/test/tridiag.jl b/stdlib/LinearAlgebra/test/tridiag.jl index 3330fa682fe5e..15ac7f9f2147f 100644 --- a/stdlib/LinearAlgebra/test/tridiag.jl +++ b/stdlib/LinearAlgebra/test/tridiag.jl @@ -970,4 +970,75 @@ end @test sprint(show, S) == "SymTridiagonal($(repr(diag(S))), $(repr(diag(S,1))))" end +@testset "mul for small matrices" begin + @testset for n in 0:6 + for T in ( + Tridiagonal(rand(max(n-1,0)), rand(n), rand(max(n-1,0))), + SymTridiagonal(rand(n), rand(max(n-1,0))), + ) + M = Matrix(T) + @test T * T ≈ M * M + @test mul!(similar(T, size(T)), T, T) ≈ M * M + @test mul!(ones(size(T)), T, T, 2, 4) ≈ M * M * 2 .+ 4 + + for m in 0:6 + AR = rand(n,m) + AL = rand(m,n) + @test AL * T ≈ AL * M + @test T * AR ≈ M * AR + @test mul!(similar(AL), AL, T) ≈ AL * M + @test mul!(similar(AR), T, AR) ≈ M * AR + @test mul!(ones(size(AL)), AL, T, 2, 4) ≈ AL * M * 2 .+ 4 + @test mul!(ones(size(AR)), T, AR, 2, 4) ≈ M * AR * 2 .+ 4 + end + + v = rand(n) + @test T * v ≈ M * v + @test mul!(similar(v), T, v) ≈ M * v + + D = Diagonal(rand(n)) + @test T * D ≈ M * D + @test D * T ≈ D * M + @test mul!(Tridiagonal(similar(T)), D, T) ≈ D * M + @test mul!(Tridiagonal(similar(T)), T, D) ≈ M * D + @test mul!(similar(T, size(T)), D, T) ≈ D * M + @test mul!(similar(T, size(T)), T, D) ≈ M * D + @test mul!(ones(size(T)), D, T, 2, 4) ≈ D * M * 2 .+ 4 + @test mul!(ones(size(T)), T, D, 2, 4) ≈ M * D * 2 .+ 4 + + for uplo in (:U, :L) + B = Bidiagonal(rand(n), rand(max(0, n-1)), uplo) + @test T * B ≈ M * B + @test B * T ≈ B * M + if n <= 2 + @test mul!(Tridiagonal(similar(T)), B, T) ≈ B * M + @test mul!(Tridiagonal(similar(T)), T, B) ≈ M * B + end + @test mul!(similar(T, size(T)), B, T) ≈ B * M + @test mul!(similar(T, size(T)), T, B) ≈ M * B + @test mul!(ones(size(T)), B, T, 2, 4) ≈ B * M * 2 .+ 4 + @test mul!(ones(size(T)), T, B, 2, 4) ≈ M * B * 2 .+ 4 + end + end + end + + n = 4 + arr = SizedArrays.SizedArray{(2,2)}(reshape([1:4;],2,2)) + for T in ( + SymTridiagonal(fill(arr,n), fill(arr,n-1)), + Tridiagonal(fill(arr,n-1), fill(arr,n), fill(arr,n-1)), + ) + @test T * T ≈ Matrix(T) * Matrix(T) + BL = Bidiagonal(fill(arr,n), fill(arr,n-1), :L) + BU = Bidiagonal(fill(arr,n), fill(arr,n-1), :U) + @test BL * T ≈ Matrix(BL) * Matrix(T) + @test BU * T ≈ Matrix(BU) * Matrix(T) + @test T * BL ≈ Matrix(T) * Matrix(BL) + @test T * BU ≈ Matrix(T) * Matrix(BU) + D = Diagonal(fill(arr,n)) + @test D * T ≈ Matrix(D) * Matrix(T) + @test T * D ≈ Matrix(T) * Matrix(D) + end +end + end # module TestTridiagonal From 550f3215654906a0446f3a723abf09e85a298beb Mon Sep 17 00:00:00 2001 From: Shuhei Kadowaki <40514306+aviatesk@users.noreply.github.com> Date: Fri, 20 Sep 2024 13:59:38 +0900 Subject: [PATCH 047/537] move the test case added in #50174 to test/core.jl (#55811) Also renames the name of the test function to avoid name collision. --- test/compiler/AbstractInterpreter.jl | 5 ----- test/core.jl | 5 +++++ 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/test/compiler/AbstractInterpreter.jl b/test/compiler/AbstractInterpreter.jl index e92b67f980942..bab4fe02a5168 100644 --- a/test/compiler/AbstractInterpreter.jl +++ b/test/compiler/AbstractInterpreter.jl @@ -484,11 +484,6 @@ let NoinlineModule = Module() end end -# Make sure that Core.Compiler has enough NamedTuple infrastructure -# to properly give error messages for basic kwargs... -Core.eval(Core.Compiler, quote f(;a=1) = a end) -@test_throws MethodError Core.Compiler.f(;b=2) - # custom inferred data # ==================== diff --git a/test/core.jl b/test/core.jl index 4db7f0e401fa0..d41a58a7ccb2e 100644 --- a/test/core.jl +++ b/test/core.jl @@ -8288,3 +8288,8 @@ end @test_broken (Tuple{Vararg{T}} where T) === Union{Tuple{T, T, Vararg{T}} where T, Tuple{}, Tuple{T} where T} @test sizeof(Pair{Union{typeof(Union{}),Nothing}, Union{Type{Union{}},Nothing}}(Union{}, Union{})) == 2 + +# Make sure that Core.Compiler has enough NamedTuple infrastructure +# to properly give error messages for basic kwargs... +Core.eval(Core.Compiler, quote issue50174(;a=1) = a end) +@test_throws MethodError Core.Compiler.issue50174(;b=2) From b30f80d99738161f0477832ab62da7da4d1bfe82 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mos=C3=A8=20Giordano?= <765740+giordano@users.noreply.github.com> Date: Fri, 20 Sep 2024 10:18:53 +0100 Subject: [PATCH 048/537] [Random] Avoid conversion to `Float32` in `Float16` sampler (#55819) --- stdlib/Random/src/Xoshiro.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stdlib/Random/src/Xoshiro.jl b/stdlib/Random/src/Xoshiro.jl index 5569d6d5c1da5..1909effbbc9e6 100644 --- a/stdlib/Random/src/Xoshiro.jl +++ b/stdlib/Random/src/Xoshiro.jl @@ -294,7 +294,7 @@ rand(r::Union{TaskLocalRNG, Xoshiro}, ::SamplerTrivial{UInt52{UInt64}}) = ran rand(r::Union{TaskLocalRNG, Xoshiro}, ::SamplerTrivial{UInt104{UInt128}}) = rand(r, UInt104Raw()) rand(r::Union{TaskLocalRNG, Xoshiro}, ::SamplerTrivial{CloseOpen01{Float16}}) = - Float16(Float32(rand(r, UInt16) >>> 5) * Float32(0x1.0p-11)) + Float16(rand(r, UInt16) >>> 5) * Float16(0x1.0p-11) rand(r::Union{TaskLocalRNG, Xoshiro}, ::SamplerTrivial{CloseOpen01{Float32}}) = Float32(rand(r, UInt32) >>> 8) * Float32(0x1.0p-24) From 7f7a472168f65043013b6b0692ac6b450ca07ae5 Mon Sep 17 00:00:00 2001 From: Shuhei Kadowaki <40514306+aviatesk@users.noreply.github.com> Date: Fri, 20 Sep 2024 18:31:32 +0900 Subject: [PATCH 049/537] simplify the fields of `UnionSplitInfo` (#55815) xref: --- base/compiler/abstractinterpretation.jl | 68 +++++++++++++------------ base/compiler/stmtinfo.jl | 23 +++++---- base/compiler/tfuncs.jl | 32 ++++-------- 3 files changed, 60 insertions(+), 63 deletions(-) diff --git a/base/compiler/abstractinterpretation.jl b/base/compiler/abstractinterpretation.jl index f126389c42d2d..68b8394b72c3d 100644 --- a/base/compiler/abstractinterpretation.jl +++ b/base/compiler/abstractinterpretation.jl @@ -280,6 +280,12 @@ any_ambig(info::MethodMatchInfo) = any_ambig(info.results) any_ambig(m::MethodMatches) = any_ambig(m.info) fully_covering(info::MethodMatchInfo) = info.fullmatch fully_covering(m::MethodMatches) = fully_covering(m.info) +function add_uncovered_edges!(sv::AbsIntState, info::MethodMatchInfo, @nospecialize(atype)) + fully_covering(info) || add_mt_backedge!(sv, info.mt, atype) + nothing +end +add_uncovered_edges!(sv::AbsIntState, matches::MethodMatches, @nospecialize(atype)) = + add_uncovered_edges!(sv, matches.info, atype) struct UnionSplitMethodMatches applicable::Vector{Any} @@ -287,10 +293,28 @@ struct UnionSplitMethodMatches info::UnionSplitInfo valid_worlds::WorldRange end -any_ambig(info::UnionSplitInfo) = any(any_ambig, info.matches) +any_ambig(info::UnionSplitInfo) = any(any_ambig, info.split) any_ambig(m::UnionSplitMethodMatches) = any_ambig(m.info) -fully_covering(info::UnionSplitInfo) = all(info.fullmatches) +fully_covering(info::UnionSplitInfo) = all(fully_covering, info.split) fully_covering(m::UnionSplitMethodMatches) = fully_covering(m.info) +function add_uncovered_edges!(sv::AbsIntState, info::UnionSplitInfo, @nospecialize(atype)) + all(fully_covering, info.split) && return nothing + # add mt backedges with removing duplications + for mt in uncovered_method_tables(info) + add_mt_backedge!(sv, mt, atype) + end +end +add_uncovered_edges!(sv::AbsIntState, matches::UnionSplitMethodMatches, @nospecialize(atype)) = + add_uncovered_edges!(sv, matches.info, atype) +function uncovered_method_tables(info::UnionSplitInfo) + mts = MethodTable[] + for mminfo in info.split + fully_covering(mminfo) && continue + any(mt′::MethodTable->mt′===mminfo.mt, mts) && continue + push!(mts, mminfo.mt) + end + return mts +end function find_method_matches(interp::AbstractInterpreter, argtypes::Vector{Any}, @nospecialize(atype); max_union_splitting::Int = InferenceParams(interp).max_union_splitting, @@ -308,43 +332,30 @@ is_union_split_eligible(𝕃::AbstractLattice, argtypes::Vector{Any}, max_union_ function find_union_split_method_matches(interp::AbstractInterpreter, argtypes::Vector{Any}, @nospecialize(atype), max_methods::Int) split_argtypes = switchtupleunion(typeinf_lattice(interp), argtypes) - infos = MethodLookupResult[] + infos = MethodMatchInfo[] applicable = Any[] applicable_argtypes = Vector{Any}[] # arrays like `argtypes`, including constants, for each match valid_worlds = WorldRange() - mts = MethodTable[] - fullmatches = Bool[] for i in 1:length(split_argtypes) arg_n = split_argtypes[i]::Vector{Any} sig_n = argtypes_to_type(arg_n) mt = ccall(:jl_method_table_for, Any, (Any,), sig_n) mt === nothing && return FailedMethodMatch("Could not identify method table for call") mt = mt::MethodTable - matches = findall(sig_n, method_table(interp); limit = max_methods) - if matches === nothing + thismatches = findall(sig_n, method_table(interp); limit = max_methods) + if thismatches === nothing return FailedMethodMatch("For one of the union split cases, too many methods matched") end - push!(infos, matches) - for m in matches + for m in thismatches push!(applicable, m) push!(applicable_argtypes, arg_n) end - valid_worlds = intersect(valid_worlds, matches.valid_worlds) - thisfullmatch = any(match::MethodMatch->match.fully_covers, matches) - mt_found = false - for (i, mt′) in enumerate(mts) - if mt′ === mt - fullmatches[i] &= thisfullmatch - mt_found = true - break - end - end - if !mt_found - push!(mts, mt) - push!(fullmatches, thisfullmatch) - end + valid_worlds = intersect(valid_worlds, thismatches.valid_worlds) + thisfullmatch = any(match::MethodMatch->match.fully_covers, thismatches) + thisinfo = MethodMatchInfo(thismatches, mt, thisfullmatch) + push!(infos, thisinfo) end - info = UnionSplitInfo(infos, mts, fullmatches) + info = UnionSplitInfo(infos) return UnionSplitMethodMatches( applicable, applicable_argtypes, info, valid_worlds) end @@ -583,14 +594,7 @@ function add_call_backedges!(interp::AbstractInterpreter, @nospecialize(rettype) end # also need an edge to the method table in case something gets # added that did not intersect with any existing method - if isa(matches, MethodMatches) - fully_covering(matches) || add_mt_backedge!(sv, matches.info.mt, atype) - else - matches::UnionSplitMethodMatches - for (thisfullmatch, mt) in zip(matches.info.fullmatches, matches.info.mts) - thisfullmatch || add_mt_backedge!(sv, mt, atype) - end - end + add_uncovered_edges!(sv, matches, atype) return nothing end diff --git a/base/compiler/stmtinfo.jl b/base/compiler/stmtinfo.jl index 33fca90b6261e..ac5ffbdd5d76d 100644 --- a/base/compiler/stmtinfo.jl +++ b/base/compiler/stmtinfo.jl @@ -39,7 +39,10 @@ end nsplit_impl(info::MethodMatchInfo) = 1 getsplit_impl(info::MethodMatchInfo, idx::Int) = (@assert idx == 1; info.results) getresult_impl(::MethodMatchInfo, ::Int) = nothing -add_uncovered_edges_impl(edges::Vector{Any}, info::MethodMatchInfo, @nospecialize(atype)) = (!info.fullmatch && push!(edges, info.mt, atype); ) +function add_uncovered_edges_impl(edges::Vector{Any}, info::MethodMatchInfo, @nospecialize(atype)) + fully_covering(info) || push!(edges, info.mt, atype) + nothing +end """ info::UnionSplitInfo <: CallInfo @@ -51,25 +54,25 @@ each partition (`info.matches::Vector{MethodMatchInfo}`). This info is illegal on any statement that is not a call to a generic function. """ struct UnionSplitInfo <: CallInfo - matches::Vector{MethodLookupResult} - mts::Vector{MethodTable} - fullmatches::Vector{Bool} + split::Vector{MethodMatchInfo} end nmatches(info::MethodMatchInfo) = length(info.results) function nmatches(info::UnionSplitInfo) n = 0 - for mminfo in info.matches - n += length(mminfo) + for mminfo in info.split + n += nmatches(mminfo) end return n end -nsplit_impl(info::UnionSplitInfo) = length(info.matches) -getsplit_impl(info::UnionSplitInfo, idx::Int) = info.matches[idx] +nsplit_impl(info::UnionSplitInfo) = length(info.split) +getsplit_impl(info::UnionSplitInfo, idx::Int) = getsplit(info.split[idx], 1) getresult_impl(::UnionSplitInfo, ::Int) = nothing function add_uncovered_edges_impl(edges::Vector{Any}, info::UnionSplitInfo, @nospecialize(atype)) - for (mt, fullmatch) in zip(info.mts, info.fullmatches) - !fullmatch && push!(edges, mt, atype) + all(fully_covering, info.split) && return nothing + # add mt backedges with removing duplications + for mt in uncovered_method_tables(info) + push!(edges, mt, atype) end end diff --git a/base/compiler/tfuncs.jl b/base/compiler/tfuncs.jl index 6bb73ded8660d..ab3b50763deec 100644 --- a/base/compiler/tfuncs.jl +++ b/base/compiler/tfuncs.jl @@ -2979,33 +2979,23 @@ function abstract_applicable(interp::AbstractInterpreter, argtypes::Vector{Any}, else (; valid_worlds, applicable) = matches update_valid_age!(sv, valid_worlds) - - # also need an edge to the method table in case something gets - # added that did not intersect with any existing method - if isa(matches, MethodMatches) - fully_covering(matches) || add_mt_backedge!(sv, matches.info.mt, atype) - else - for (thisfullmatch, mt) in zip(matches.info.fullmatches, matches.info.mts) - thisfullmatch || add_mt_backedge!(sv, mt, atype) - end - end - napplicable = length(applicable) if napplicable == 0 rt = Const(false) # never any matches + elseif !fully_covering(matches) || any_ambig(matches) + # Account for the fact that we may encounter a MethodError with a non-covered or ambiguous signature. + rt = Bool else rt = Const(true) # has applicable matches - for i in 1:napplicable - match = applicable[i]::MethodMatch - edge = specialize_method(match)::MethodInstance - add_backedge!(sv, edge) - end - - if !fully_covering(matches) || any_ambig(matches) - # Account for the fact that we may encounter a MethodError with a non-covered or ambiguous signature. - rt = Bool - end end + for i in 1:napplicable + match = applicable[i]::MethodMatch + edge = specialize_method(match)::MethodInstance + add_backedge!(sv, edge) + end + # also need an edge to the method table in case something gets + # added that did not intersect with any existing method + add_uncovered_edges!(sv, matches, atype) end return CallMeta(rt, Union{}, EFFECTS_TOTAL, NoCallInfo()) end From 220742d6194acba995eda822c82fdf647d6896ee Mon Sep 17 00:00:00 2001 From: Alexander Plavin Date: Fri, 20 Sep 2024 08:45:54 -0400 Subject: [PATCH 050/537] Add errorhint for nonexisting fields and properties (#55165) I played a bit with error hints and crafted this: ```julia julia> (1+2im).real ERROR: FieldError: type Complex has no field real, available fields: `re`, `im` julia> nothing.xy ERROR: FieldError: type Nothing has no field xy; Nothing has no fields at all. julia> svd(rand(2,2)).VV ERROR: FieldError: type SVD has no field VV, available fields: `U`, `S`, `Vt` Available properties: `V` ``` --------- Co-authored-by: Lilith Orion Hafner --- base/docs/basedocs.jl | 2 +- base/errorshow.jl | 31 ++++++++++++++++++++++++++++--- base/reflection.jl | 2 +- test/errorshow.jl | 14 +++++++++----- 4 files changed, 39 insertions(+), 10 deletions(-) diff --git a/base/docs/basedocs.jl b/base/docs/basedocs.jl index 0fc253bd73d1c..e28b3a21659a8 100644 --- a/base/docs/basedocs.jl +++ b/base/docs/basedocs.jl @@ -1694,7 +1694,7 @@ julia> ab = AB(1, 3) AB(1.0f0, 3.0) julia> ab.c # field `c` doesn't exist -ERROR: FieldError: type AB has no field c +ERROR: FieldError: type AB has no field `c`, available fields: `a`, `b` Stacktrace: [...] ``` diff --git a/base/errorshow.jl b/base/errorshow.jl index d805cb64fb81e..9c8aad8b6ee2c 100644 --- a/base/errorshow.jl +++ b/base/errorshow.jl @@ -378,7 +378,7 @@ end function showerror(io::IO, exc::FieldError) @nospecialize - print(io, "FieldError: type $(exc.type |> nameof) has no field $(exc.field)") + print(io, "FieldError: type $(exc.type |> nameof) has no field `$(exc.field)`") Base.Experimental.show_error_hints(io, exc) end @@ -1102,7 +1102,7 @@ end Experimental.register_error_hint(methods_on_iterable, MethodError) # Display a hint in case the user tries to access non-member fields of container type datastructures -function fielderror_hint_handler(io, exc) +function fielderror_dict_hint_handler(io, exc) @nospecialize field = exc.field type = exc.type @@ -1113,7 +1113,32 @@ function fielderror_hint_handler(io, exc) end end -Experimental.register_error_hint(fielderror_hint_handler, FieldError) +Experimental.register_error_hint(fielderror_dict_hint_handler, FieldError) + +function fielderror_listfields_hint_handler(io, exc) + fields = fieldnames(exc.type) + if isempty(fields) + print(io, "; $(nameof(exc.type)) has no fields at all.") + else + print(io, ", available fields: $(join(map(k -> "`$k`", fields), ", "))") + end + props = _propertynames_bytype(exc.type) + isnothing(props) && return + props = setdiff(props, fields) + isempty(props) && return + print(io, "\nAvailable properties: $(join(map(k -> "`$k`", props), ", "))") +end + +function _propertynames_bytype(T::Type) + which(propertynames, (T,)) === which(propertynames, (Any,)) && return nothing + inferred_names = promote_op(Val∘propertynames, T) + inferred_names isa DataType && inferred_names <: Val || return nothing + inferred_names = inferred_names.parameters[1] + inferred_names isa NTuple{<:Any, Symbol} || return nothing + return Symbol[inferred_names[i] for i in 1:length(inferred_names)] +end + +Experimental.register_error_hint(fielderror_listfields_hint_handler, FieldError) # ExceptionStack implementation size(s::ExceptionStack) = size(s.stack) diff --git a/base/reflection.jl b/base/reflection.jl index 2ddd34b0f73c1..5b395efc58190 100644 --- a/base/reflection.jl +++ b/base/reflection.jl @@ -1017,7 +1017,7 @@ julia> struct Foo end julia> Base.fieldindex(Foo, :z) -ERROR: FieldError: type Foo has no field z +ERROR: FieldError: type Foo has no field `z`, available fields: `x`, `y` Stacktrace: [...] diff --git a/test/errorshow.jl b/test/errorshow.jl index a82ab7743dc5a..3ede370553212 100644 --- a/test/errorshow.jl +++ b/test/errorshow.jl @@ -10,7 +10,8 @@ Base.Experimental.register_error_hint(Base.noncallable_number_hint_handler, Meth Base.Experimental.register_error_hint(Base.string_concatenation_hint_handler, MethodError) Base.Experimental.register_error_hint(Base.methods_on_iterable, MethodError) Base.Experimental.register_error_hint(Base.nonsetable_type_hint_handler, MethodError) -Base.Experimental.register_error_hint(Base.fielderror_hint_handler, FieldError) +Base.Experimental.register_error_hint(Base.fielderror_listfields_hint_handler, FieldError) +Base.Experimental.register_error_hint(Base.fielderror_dict_hint_handler, FieldError) @testset "SystemError" begin err = try; systemerror("reason", Cint(0)); false; catch ex; ex; end::SystemError @@ -808,12 +809,13 @@ end @test_throws ArgumentError("invalid index: \"foo\" of type String") [1]["foo"] @test_throws ArgumentError("invalid index: nothing of type Nothing") [1][nothing] -# issue #53618 -@testset "FieldErrorHint" begin +# issue #53618, pr #55165 +@testset "FieldErrorHints" begin struct FieldFoo a::Float32 b::Int end + Base.propertynames(foo::FieldFoo) = (:a, :x, :y) s = FieldFoo(1, 2) @@ -823,7 +825,9 @@ end # Check error message first errorMsg = sprint(Base.showerror, ex) - @test occursin("FieldError: type FieldFoo has no field c", errorMsg) + @test occursin("FieldError: type FieldFoo has no field `c`", errorMsg) + @test occursin("available fields: `a`, `b`", errorMsg) + @test occursin("Available properties: `x`, `y`", errorMsg) d = Dict(s => 1) @@ -840,7 +844,7 @@ end ex = test.value::FieldError errorMsg = sprint(Base.showerror, ex) - @test occursin("FieldError: type Dict has no field c", errorMsg) + @test occursin("FieldError: type Dict has no field `c`", errorMsg) # Check hint message hintExpected = "Did you mean to access dict values using key: `:c` ? Consider using indexing syntax dict[:c]\n" @test occursin(hintExpected, errorMsg) From 44bef0df7a115334c10abac88aeba333b12cce2d Mon Sep 17 00:00:00 2001 From: Guillaume Dalle <22795598+gdalle@users.noreply.github.com> Date: Fri, 20 Sep 2024 17:59:39 +0200 Subject: [PATCH 051/537] Improve printing of several arguments (#55754) Following a discussion on [Discourse](https://discourse.julialang.org/t/string-optimisation-in-julia/119301/10?u=gdalle), this PR tries to improve `print` (and variants) for more than one argument. The idea is that `for` is type-unstable over the tuple `args`, while `foreach` unrolls. --------- Co-authored-by: Steven G. Johnson --- base/strings/io.jl | 24 +++++------------------- 1 file changed, 5 insertions(+), 19 deletions(-) diff --git a/base/strings/io.jl b/base/strings/io.jl index 754e058cd2f54..c78e3e2e043b6 100644 --- a/base/strings/io.jl +++ b/base/strings/io.jl @@ -42,9 +42,7 @@ end function print(io::IO, xs...) lock(io) try - for x in xs - print(io, x) - end + foreach(Fix1(print, io), xs) finally unlock(io) end @@ -138,15 +136,9 @@ function print_to_string(xs...) if isempty(xs) return "" end - siz::Int = 0 - for x in xs - siz += _str_sizehint(x) - end - # specialized for performance reasons + siz = sum(_str_sizehint, xs; init = 0) s = IOBuffer(sizehint=siz) - for x in xs - print(s, x) - end + print(s, xs...) String(_unsafe_take!(s)) end @@ -154,16 +146,10 @@ function string_with_env(env, xs...) if isempty(xs) return "" end - siz::Int = 0 - for x in xs - siz += _str_sizehint(x) - end - # specialized for performance reasons + siz = sum(_str_sizehint, xs; init = 0) s = IOBuffer(sizehint=siz) env_io = IOContext(s, env) - for x in xs - print(env_io, x) - end + print(env_io, xs...) String(_unsafe_take!(s)) end From bce8f0441edae17d31abc7e7e7541659d4658704 Mon Sep 17 00:00:00 2001 From: Denis Barucic Date: Fri, 20 Sep 2024 22:23:06 +0200 Subject: [PATCH 052/537] Markdown: support `parse(::AbstractString)` (#55747) `Markdown.parse` is documented to accept `AbstractString` but it was implemented by calling `IOBuffer` on the string argument. `IOBuffer`, however, is documented only for `String` arguments. This commit changes the current `parse(::AbstractString)` to `parse(::String)` and implements `parse(::AbstractString)` by converting the argument to `String`. Now, even `LazyString`s can be parsed to Markdown representation. Fixes #55732 --- stdlib/Markdown/src/Markdown.jl | 3 ++- stdlib/Markdown/test/runtests.jl | 4 ++++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/stdlib/Markdown/src/Markdown.jl b/stdlib/Markdown/src/Markdown.jl index 1832e3a6a6956..0d45d9e534df2 100644 --- a/stdlib/Markdown/src/Markdown.jl +++ b/stdlib/Markdown/src/Markdown.jl @@ -56,7 +56,8 @@ const MARKDOWN_FACES = [ __init__() = foreach(addface!, MARKDOWN_FACES) -parse(markdown::AbstractString; flavor = julia) = parse(IOBuffer(markdown), flavor = flavor) +parse(markdown::String; flavor = julia) = parse(IOBuffer(markdown), flavor = flavor) +parse(markdown::AbstractString; flavor = julia) = parse(String(markdown), flavor = flavor) parse_file(file::AbstractString; flavor = julia) = parse(read(file, String), flavor = flavor) function mdexpr(s, flavor = :julia) diff --git a/stdlib/Markdown/test/runtests.jl b/stdlib/Markdown/test/runtests.jl index ffdb735f3b7cd..35608f75b2426 100644 --- a/stdlib/Markdown/test/runtests.jl +++ b/stdlib/Markdown/test/runtests.jl @@ -1308,3 +1308,7 @@ end # https://github.com/JuliaLang/julia/issues/37757 @test insert_hlines(nothing) === nothing end + +@testset "Lazy Strings" begin + @test Markdown.parse(lazy"foo") == Markdown.parse("foo") +end From 911e02558d0c145a192facd28808b68e157aa5af Mon Sep 17 00:00:00 2001 From: Simeon David Schaub Date: Sat, 21 Sep 2024 09:58:15 +0200 Subject: [PATCH 053/537] better error for esc outside of macro expansion (#55797) fixes #55788 --------- Co-authored-by: Jeff Bezanson --- src/julia-syntax.scm | 4 ++++ test/syntax.jl | 2 ++ 2 files changed, 6 insertions(+) diff --git a/src/julia-syntax.scm b/src/julia-syntax.scm index d6bc03091f37b..f1acb9c3250e1 100644 --- a/src/julia-syntax.scm +++ b/src/julia-syntax.scm @@ -5001,6 +5001,10 @@ f(x) = yt(x) ((≔ ⩴ ≕ :=) (error (string "unsupported assignment operator \"" (deparse (car e)) "\""))) + ;; bare :escape + ((escape) + (error (string "\"esc(...)\" used outside of macro expansion"))) + ((error) (error (cadr e))) (else diff --git a/test/syntax.jl b/test/syntax.jl index 1b630a56f84f8..c19721b5c54b3 100644 --- a/test/syntax.jl +++ b/test/syntax.jl @@ -3985,3 +3985,5 @@ begin end end @test f45494() === (0,) + +@test_throws "\"esc(...)\" used outside of macro expansion" eval(esc(:(const x=1))) From d9555c6e7286d121b8625013548f945bc0bffc58 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mateus=20Ara=C3=BAjo?= Date: Sun, 22 Sep 2024 08:33:25 +0200 Subject: [PATCH 054/537] allow kronecker product between recursive triangular matrices (#55527) Using the recently introduced recursive `zero` I can remove the specialization to `<:Number` as @dkarrasch wanted to do in #54413. --------- Co-authored-by: Jishnu Bhattacharya --- stdlib/LinearAlgebra/src/triangular.jl | 16 ++++++++-------- stdlib/LinearAlgebra/test/triangular.jl | 16 ++++++++++++++++ 2 files changed, 24 insertions(+), 8 deletions(-) diff --git a/stdlib/LinearAlgebra/src/triangular.jl b/stdlib/LinearAlgebra/src/triangular.jl index 74eab6a392723..03634aa7d68e1 100644 --- a/stdlib/LinearAlgebra/src/triangular.jl +++ b/stdlib/LinearAlgebra/src/triangular.jl @@ -919,21 +919,21 @@ function -(A::UnitLowerTriangular, B::UnitLowerTriangular) end -(A::AbstractTriangular, B::AbstractTriangular) = copyto!(similar(parent(A)), A) - copyto!(similar(parent(B)), B) -function kron(A::UpperTriangular{<:Number,<:StridedMaybeAdjOrTransMat}, B::UpperTriangular{<:Number,<:StridedMaybeAdjOrTransMat}) - C = UpperTriangular(Matrix{promote_op(*, eltype(A), eltype(B))}(undef, _kronsize(A, B))) +function kron(A::UpperTriangular{T,<:StridedMaybeAdjOrTransMat}, B::UpperTriangular{S,<:StridedMaybeAdjOrTransMat}) where {T,S} + C = UpperTriangular(Matrix{promote_op(*, T, S)}(undef, _kronsize(A, B))) return kron!(C, A, B) end -function kron(A::LowerTriangular{<:Number,<:StridedMaybeAdjOrTransMat}, B::LowerTriangular{<:Number,<:StridedMaybeAdjOrTransMat}) - C = LowerTriangular(Matrix{promote_op(*, eltype(A), eltype(B))}(undef, _kronsize(A, B))) +function kron(A::LowerTriangular{T,<:StridedMaybeAdjOrTransMat}, B::LowerTriangular{S,<:StridedMaybeAdjOrTransMat}) where {T,S} + C = LowerTriangular(Matrix{promote_op(*, T, S)}(undef, _kronsize(A, B))) return kron!(C, A, B) end -function kron!(C::UpperTriangular{<:Number,<:StridedMaybeAdjOrTransMat}, A::UpperTriangular{<:Number,<:StridedMaybeAdjOrTransMat}, B::UpperTriangular{<:Number,<:StridedMaybeAdjOrTransMat}) +function kron!(C::UpperTriangular{<:Any,<:StridedMaybeAdjOrTransMat}, A::UpperTriangular{<:Any,<:StridedMaybeAdjOrTransMat}, B::UpperTriangular{<:Any,<:StridedMaybeAdjOrTransMat}) size(C) == _kronsize(A, B) || throw(DimensionMismatch("kron!")) _triukron!(C.data, A.data, B.data) return C end -function kron!(C::LowerTriangular{<:Number,<:StridedMaybeAdjOrTransMat}, A::LowerTriangular{<:Number,<:StridedMaybeAdjOrTransMat}, B::LowerTriangular{<:Number,<:StridedMaybeAdjOrTransMat}) +function kron!(C::LowerTriangular{<:Any,<:StridedMaybeAdjOrTransMat}, A::LowerTriangular{<:Any,<:StridedMaybeAdjOrTransMat}, B::LowerTriangular{<:Any,<:StridedMaybeAdjOrTransMat}) size(C) == _kronsize(A, B) || throw(DimensionMismatch("kron!")) _trilkron!(C.data, A.data, B.data) return C @@ -952,7 +952,7 @@ function _triukron!(C, A, B) C[inB+k, jnB+l] = Aij * B[k, l] end for k = 1:(l-1) - C[inB+l, jnB+k] = zero(eltype(C)) + C[inB+l, jnB+k] = zero(C[inB+k, jnB+l]) end end end @@ -984,7 +984,7 @@ function _trilkron!(C, A, B) C[inB+k, jnB+l] = Aij * B[k, l] end for k = (l+1):n_B - C[inB+l, jnB+k] = zero(eltype(C)) + C[inB+l, jnB+k] = zero(C[inB+k, jnB+l]) end end end diff --git a/stdlib/LinearAlgebra/test/triangular.jl b/stdlib/LinearAlgebra/test/triangular.jl index a09d0092e9f39..42c5494f73e6f 100644 --- a/stdlib/LinearAlgebra/test/triangular.jl +++ b/stdlib/LinearAlgebra/test/triangular.jl @@ -1048,6 +1048,9 @@ end @test 2\L == 2\B @test real(L) == real(B) @test imag(L) == imag(B) + if MT == LowerTriangular + @test isa(kron(L,L), MT) + end @test kron(L,L) == kron(B,B) @test transpose!(MT(copy(A))) == transpose(L) broken=!(A isa Matrix) @test adjoint!(MT(copy(A))) == adjoint(L) broken=!(A isa Matrix) @@ -1070,6 +1073,9 @@ end @test 2\U == 2\B @test real(U) == real(B) @test imag(U) == imag(B) + if MT == UpperTriangular + @test isa(kron(U,U), MT) + end @test kron(U,U) == kron(B,B) @test transpose!(MT(copy(A))) == transpose(U) broken=!(A isa Matrix) @test adjoint!(MT(copy(A))) == adjoint(U) broken=!(A isa Matrix) @@ -1081,10 +1087,20 @@ end for T in (UpperTriangular, LowerTriangular) t = T(fill(ones(2,2), 2, 2)) m = Matrix(t) + @test isa(kron(t,t), T) @test kron(t, t) ≈ kron(m, m) end end +@testset "kron with triangular matrices of mixed eltypes" begin + for T in (UpperTriangular, LowerTriangular) + U = T(Matrix{Union{Missing,Int}}(fill(2, 2, 2))) + U[1, 1] = missing + @test kron(U, U)[2, 3] == 0 + @test kron(U, U)[3, 2] == 0 + end +end + @testset "copyto! tests" begin @testset "copyto! with aliasing (#39460)" begin M = Matrix(reshape(1:36, 6, 6)) From d6fa66ff6a983c08fce478346241879c7db31dce Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mos=C3=A8=20Giordano?= <765740+giordano@users.noreply.github.com> Date: Sun, 22 Sep 2024 13:08:59 +0100 Subject: [PATCH 055/537] [Dates] Make test more robust against non-UTC timezones (#55829) `%M` is the format specifier for the minutes, not the month (which should be `%m`), and it was used twice. Also, on macOS `Libc.strptime` internally calls `mktime` which depends on the local timezone. We now temporarily set `TZ=UTC` to avoid depending on the local timezone. Fix #55827. --- stdlib/Dates/test/types.jl | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/stdlib/Dates/test/types.jl b/stdlib/Dates/test/types.jl index f5284b376ca4a..29395ccf3a271 100644 --- a/stdlib/Dates/test/types.jl +++ b/stdlib/Dates/test/types.jl @@ -263,7 +263,11 @@ end end @testset "issue #31524" begin - dt1 = Libc.strptime("%Y-%M-%dT%H:%M:%SZ", "2018-11-16T10:26:14Z") + # Ensure the result doesn't depend on local timezone, especially on macOS + # where an extra internal call to `mktime` is affected by timezone settings. + dt1 = withenv("TZ" => "UTC") do + Libc.strptime("%Y-%m-%dT%H:%M:%SZ", "2018-11-16T10:26:14Z") + end dt2 = Libc.TmStruct(14, 30, 5, 10, 1, 99, 3, 40, 0) time = Time(dt1) From 4964c9789c571d86884078de81040847e1e3d21d Mon Sep 17 00:00:00 2001 From: DilumAluthgeBot <43731525+DilumAluthgeBot@users.noreply.github.com> Date: Sun, 22 Sep 2024 10:59:09 -0400 Subject: [PATCH 056/537] =?UTF-8?q?=F0=9F=A4=96=20[master]=20Bump=20the=20?= =?UTF-8?q?Pkg=20stdlib=20from=20308f9d32f=20to=20ef9f76c17=20(#55838)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../Pkg-308f9d32fcec769fbed8cf6c5a17d54753ca1f5b.tar.gz/md5 | 1 - .../Pkg-308f9d32fcec769fbed8cf6c5a17d54753ca1f5b.tar.gz/sha512 | 1 - .../Pkg-ef9f76c175872bab6803da4a5fa3fd99bce3d03a.tar.gz/md5 | 1 + .../Pkg-ef9f76c175872bab6803da4a5fa3fd99bce3d03a.tar.gz/sha512 | 1 + stdlib/Pkg.version | 2 +- 5 files changed, 3 insertions(+), 3 deletions(-) delete mode 100644 deps/checksums/Pkg-308f9d32fcec769fbed8cf6c5a17d54753ca1f5b.tar.gz/md5 delete mode 100644 deps/checksums/Pkg-308f9d32fcec769fbed8cf6c5a17d54753ca1f5b.tar.gz/sha512 create mode 100644 deps/checksums/Pkg-ef9f76c175872bab6803da4a5fa3fd99bce3d03a.tar.gz/md5 create mode 100644 deps/checksums/Pkg-ef9f76c175872bab6803da4a5fa3fd99bce3d03a.tar.gz/sha512 diff --git a/deps/checksums/Pkg-308f9d32fcec769fbed8cf6c5a17d54753ca1f5b.tar.gz/md5 b/deps/checksums/Pkg-308f9d32fcec769fbed8cf6c5a17d54753ca1f5b.tar.gz/md5 deleted file mode 100644 index b59e1d8427b8b..0000000000000 --- a/deps/checksums/Pkg-308f9d32fcec769fbed8cf6c5a17d54753ca1f5b.tar.gz/md5 +++ /dev/null @@ -1 +0,0 @@ -b48c15e727d96a7525e0b800180d46f4 diff --git a/deps/checksums/Pkg-308f9d32fcec769fbed8cf6c5a17d54753ca1f5b.tar.gz/sha512 b/deps/checksums/Pkg-308f9d32fcec769fbed8cf6c5a17d54753ca1f5b.tar.gz/sha512 deleted file mode 100644 index 4f4bce61f1f0f..0000000000000 --- a/deps/checksums/Pkg-308f9d32fcec769fbed8cf6c5a17d54753ca1f5b.tar.gz/sha512 +++ /dev/null @@ -1 +0,0 @@ -edc2c19bccf6b00e3ea7c4e0b1af36ca86c7e3f521d8c3c05a930ce3d961fb0259a98ae27be5c3e052418f9b4e7ca74cc4d3fee59dac12d47bd1ac5cd9e34fbe diff --git a/deps/checksums/Pkg-ef9f76c175872bab6803da4a5fa3fd99bce3d03a.tar.gz/md5 b/deps/checksums/Pkg-ef9f76c175872bab6803da4a5fa3fd99bce3d03a.tar.gz/md5 new file mode 100644 index 0000000000000..39dbb56dbaf53 --- /dev/null +++ b/deps/checksums/Pkg-ef9f76c175872bab6803da4a5fa3fd99bce3d03a.tar.gz/md5 @@ -0,0 +1 @@ +080b5cb82d208245cba014f1dfcb8033 diff --git a/deps/checksums/Pkg-ef9f76c175872bab6803da4a5fa3fd99bce3d03a.tar.gz/sha512 b/deps/checksums/Pkg-ef9f76c175872bab6803da4a5fa3fd99bce3d03a.tar.gz/sha512 new file mode 100644 index 0000000000000..2f95d4a0e28da --- /dev/null +++ b/deps/checksums/Pkg-ef9f76c175872bab6803da4a5fa3fd99bce3d03a.tar.gz/sha512 @@ -0,0 +1 @@ +1b91505c78d2608afa89ceea16f645bb41c0737815aec1853ad72c9751e7299b264135c9a40a6319f68b973073a151619b925d7a9655c46526bccf501b116113 diff --git a/stdlib/Pkg.version b/stdlib/Pkg.version index 602fbcc648e59..f5ca169a775c6 100644 --- a/stdlib/Pkg.version +++ b/stdlib/Pkg.version @@ -1,4 +1,4 @@ PKG_BRANCH = master -PKG_SHA1 = 308f9d32fcec769fbed8cf6c5a17d54753ca1f5b +PKG_SHA1 = ef9f76c175872bab6803da4a5fa3fd99bce3d03a PKG_GIT_URL := https://github.com/JuliaLang/Pkg.jl.git PKG_TAR_URL = https://api.github.com/repos/JuliaLang/Pkg.jl/tarball/$1 From 9136bddb6c36050e03529e2db456e6ea2e380557 Mon Sep 17 00:00:00 2001 From: Jishnu Bhattacharya Date: Sun, 22 Sep 2024 22:10:08 +0530 Subject: [PATCH 057/537] lmul!/rmul! for banded matrices (#55823) This adds fast methods for `lmul!` and `rmul!` between banded matrices and numbers. Performance impact: ```julia julia> T = Tridiagonal(rand(999), rand(1000), rand(999)); julia> @btime rmul!($T, 0.2); 4.686 ms (0 allocations: 0 bytes) # nightly v"1.12.0-DEV.1225" 669.355 ns (0 allocations: 0 bytes) # this PR ``` --- stdlib/LinearAlgebra/src/bidiag.jl | 26 +++++++++++++++ stdlib/LinearAlgebra/src/diagonal.jl | 20 ++++++++++++ stdlib/LinearAlgebra/src/tridiag.jl | 47 +++++++++++++++++++++++++++ stdlib/LinearAlgebra/test/bidiag.jl | 13 ++++++++ stdlib/LinearAlgebra/test/diagonal.jl | 11 +++++++ stdlib/LinearAlgebra/test/tridiag.jl | 13 ++++++++ 6 files changed, 130 insertions(+) diff --git a/stdlib/LinearAlgebra/src/bidiag.jl b/stdlib/LinearAlgebra/src/bidiag.jl index 12d638f52add6..0aab9ceeca6b9 100644 --- a/stdlib/LinearAlgebra/src/bidiag.jl +++ b/stdlib/LinearAlgebra/src/bidiag.jl @@ -441,6 +441,32 @@ end -(A::Bidiagonal)=Bidiagonal(-A.dv,-A.ev,A.uplo) *(A::Bidiagonal, B::Number) = Bidiagonal(A.dv*B, A.ev*B, A.uplo) *(B::Number, A::Bidiagonal) = Bidiagonal(B*A.dv, B*A.ev, A.uplo) +function rmul!(B::Bidiagonal, x::Number) + if size(B,1) > 1 + isupper = B.uplo == 'U' + row, col = 1 + isupper, 1 + !isupper + # ensure that zeros are preserved on scaling + y = B[row,col] * x + iszero(y) || throw(ArgumentError(LazyString(lazy"cannot set index ($row, $col) off ", + lazy"the tridiagonal band to a nonzero value ($y)"))) + end + @. B.dv *= x + @. B.ev *= x + return B +end +function lmul!(x::Number, B::Bidiagonal) + if size(B,1) > 1 + isupper = B.uplo == 'U' + row, col = 1 + isupper, 1 + !isupper + # ensure that zeros are preserved on scaling + y = x * B[row,col] + iszero(y) || throw(ArgumentError(LazyString(lazy"cannot set index ($row, $col) off ", + lazy"the tridiagonal band to a nonzero value ($y)"))) + end + @. B.dv = x * B.dv + @. B.ev = x * B.ev + return B +end /(A::Bidiagonal, B::Number) = Bidiagonal(A.dv/B, A.ev/B, A.uplo) \(B::Number, A::Bidiagonal) = Bidiagonal(B\A.dv, B\A.ev, A.uplo) diff --git a/stdlib/LinearAlgebra/src/diagonal.jl b/stdlib/LinearAlgebra/src/diagonal.jl index 23d2422d13654..d762549a2b228 100644 --- a/stdlib/LinearAlgebra/src/diagonal.jl +++ b/stdlib/LinearAlgebra/src/diagonal.jl @@ -274,6 +274,26 @@ end (*)(x::Number, D::Diagonal) = Diagonal(x * D.diag) (*)(D::Diagonal, x::Number) = Diagonal(D.diag * x) +function lmul!(x::Number, D::Diagonal) + if size(D,1) > 1 + # ensure that zeros are preserved on scaling + y = D[2,1] * x + iszero(y) || throw(ArgumentError(LazyString("cannot set index (2, 1) off ", + lazy"the tridiagonal band to a nonzero value ($y)"))) + end + @. D.diag = x * D.diag + return D +end +function rmul!(D::Diagonal, x::Number) + if size(D,1) > 1 + # ensure that zeros are preserved on scaling + y = x * D[2,1] + iszero(y) || throw(ArgumentError(LazyString("cannot set index (2, 1) off ", + lazy"the tridiagonal band to a nonzero value ($y)"))) + end + @. D.diag *= x + return D +end (/)(D::Diagonal, x::Number) = Diagonal(D.diag / x) (\)(x::Number, D::Diagonal) = Diagonal(x \ D.diag) (^)(D::Diagonal, a::Number) = Diagonal(D.diag .^ a) diff --git a/stdlib/LinearAlgebra/src/tridiag.jl b/stdlib/LinearAlgebra/src/tridiag.jl index 84c79f57debc7..e755ce63e9b2a 100644 --- a/stdlib/LinearAlgebra/src/tridiag.jl +++ b/stdlib/LinearAlgebra/src/tridiag.jl @@ -228,6 +228,29 @@ end -(A::SymTridiagonal) = SymTridiagonal(-A.dv, -A.ev) *(A::SymTridiagonal, B::Number) = SymTridiagonal(A.dv*B, A.ev*B) *(B::Number, A::SymTridiagonal) = SymTridiagonal(B*A.dv, B*A.ev) +function rmul!(A::SymTridiagonal, x::Number) + if size(A,1) > 2 + # ensure that zeros are preserved on scaling + y = A[3,1] * x + iszero(y) || throw(ArgumentError(LazyString("cannot set index (3, 1) off ", + lazy"the tridiagonal band to a nonzero value ($y)"))) + end + A.dv .*= x + _evview(A) .*= x + return A +end +function lmul!(x::Number, B::SymTridiagonal) + if size(B,1) > 2 + # ensure that zeros are preserved on scaling + y = x * B[3,1] + iszero(y) || throw(ArgumentError(LazyString("cannot set index (3, 1) off ", + lazy"the tridiagonal band to a nonzero value ($y)"))) + end + @. B.dv = x * B.dv + ev = _evview(B) + @. ev = x * ev + return B +end /(A::SymTridiagonal, B::Number) = SymTridiagonal(A.dv/B, A.ev/B) \(B::Number, A::SymTridiagonal) = SymTridiagonal(B\A.dv, B\A.ev) ==(A::SymTridiagonal{<:Number}, B::SymTridiagonal{<:Number}) = @@ -836,6 +859,30 @@ tr(M::Tridiagonal) = sum(M.d) -(A::Tridiagonal) = Tridiagonal(-A.dl, -A.d, -A.du) *(A::Tridiagonal, B::Number) = Tridiagonal(A.dl*B, A.d*B, A.du*B) *(B::Number, A::Tridiagonal) = Tridiagonal(B*A.dl, B*A.d, B*A.du) +function rmul!(T::Tridiagonal, x::Number) + if size(T,1) > 2 + # ensure that zeros are preserved on scaling + y = T[3,1] * x + iszero(y) || throw(ArgumentError(LazyString("cannot set index (3, 1) off ", + lazy"the tridiagonal band to a nonzero value ($y)"))) + end + T.dl .*= x + T.d .*= x + T.du .*= x + return T +end +function lmul!(x::Number, T::Tridiagonal) + if size(T,1) > 2 + # ensure that zeros are preserved on scaling + y = x * T[3,1] + iszero(y) || throw(ArgumentError(LazyString("cannot set index (3, 1) off ", + lazy"the tridiagonal band to a nonzero value ($y)"))) + end + @. T.dl = x * T.dl + @. T.d = x * T.d + @. T.du = x * T.du + return T +end /(A::Tridiagonal, B::Number) = Tridiagonal(A.dl/B, A.d/B, A.du/B) \(B::Number, A::Tridiagonal) = Tridiagonal(B\A.dl, B\A.d, B\A.du) diff --git a/stdlib/LinearAlgebra/test/bidiag.jl b/stdlib/LinearAlgebra/test/bidiag.jl index edad29d4ec180..d633a99a2390e 100644 --- a/stdlib/LinearAlgebra/test/bidiag.jl +++ b/stdlib/LinearAlgebra/test/bidiag.jl @@ -969,6 +969,19 @@ end end end +@testset "rmul!/lmul! with numbers" begin + for T in (Bidiagonal(rand(4), rand(3), :U), Bidiagonal(rand(4), rand(3), :L)) + @test rmul!(copy(T), 0.2) ≈ rmul!(Array(T), 0.2) + @test lmul!(0.2, copy(T)) ≈ lmul!(0.2, Array(T)) + @test_throws ArgumentError rmul!(T, NaN) + @test_throws ArgumentError lmul!(NaN, T) + end + for T in (Bidiagonal(rand(1), rand(0), :U), Bidiagonal(rand(1), rand(0), :L)) + @test all(isnan, rmul!(copy(T), NaN)) + @test all(isnan, lmul!(NaN, copy(T))) + end +end + @testset "mul with Diagonal" begin for n in 0:4 dv, ev = rand(n), rand(max(n-1,0)) diff --git a/stdlib/LinearAlgebra/test/diagonal.jl b/stdlib/LinearAlgebra/test/diagonal.jl index 83d5e4fcdf170..dfb901908ba69 100644 --- a/stdlib/LinearAlgebra/test/diagonal.jl +++ b/stdlib/LinearAlgebra/test/diagonal.jl @@ -1345,6 +1345,17 @@ end end end +@testset "rmul!/lmul! with numbers" begin + D = Diagonal(rand(4)) + @test rmul!(copy(D), 0.2) ≈ rmul!(Array(D), 0.2) + @test lmul!(0.2, copy(D)) ≈ lmul!(0.2, Array(D)) + @test_throws ArgumentError rmul!(D, NaN) + @test_throws ArgumentError lmul!(NaN, D) + D = Diagonal(rand(1)) + @test all(isnan, rmul!(copy(D), NaN)) + @test all(isnan, lmul!(NaN, copy(D))) +end + @testset "+/- with block Symmetric/Hermitian" begin for p in ([1 2; 3 4], [1 2+im; 2-im 4+2im]) m = SizedArrays.SizedArray{(2,2)}(p) diff --git a/stdlib/LinearAlgebra/test/tridiag.jl b/stdlib/LinearAlgebra/test/tridiag.jl index 15ac7f9f2147f..826a6e62355d0 100644 --- a/stdlib/LinearAlgebra/test/tridiag.jl +++ b/stdlib/LinearAlgebra/test/tridiag.jl @@ -935,6 +935,19 @@ end end end +@testset "rmul!/lmul! with numbers" begin + for T in (SymTridiagonal(rand(4), rand(3)), Tridiagonal(rand(3), rand(4), rand(3))) + @test rmul!(copy(T), 0.2) ≈ rmul!(Array(T), 0.2) + @test lmul!(0.2, copy(T)) ≈ lmul!(0.2, Array(T)) + @test_throws ArgumentError rmul!(T, NaN) + @test_throws ArgumentError lmul!(NaN, T) + end + for T in (SymTridiagonal(rand(2), rand(1)), Tridiagonal(rand(1), rand(2), rand(1))) + @test all(isnan, rmul!(copy(T), NaN)) + @test all(isnan, lmul!(NaN, copy(T))) + end +end + @testset "mul with empty arrays" begin A = zeros(5,0) T = Tridiagonal(zeros(0), zeros(0), zeros(0)) From f62a380368484913dd022c99055056a027268134 Mon Sep 17 00:00:00 2001 From: Jishnu Bhattacharya Date: Mon, 23 Sep 2024 18:32:04 +0530 Subject: [PATCH 058/537] Specialize indexing triangular matrices with BandIndex (#55644) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit With this, certain indexing operations involving a `BandIndex` may be evaluated as constants. This isn't used directly presently, but might allow for more performant broadcasting in the future. With this, ```julia julia> n = 3; T = Tridiagonal(rand(n-1), rand(n), rand(n-1)); julia> @code_warntype ((T,j) -> UpperTriangular(T)[LinearAlgebra.BandIndex(2,j)])(T, 1) MethodInstance for (::var"#17#18")(::Tridiagonal{Float64, Vector{Float64}}, ::Int64) from (::var"#17#18")(T, j) @ Main REPL[12]:1 Arguments #self#::Core.Const(var"#17#18"()) T::Tridiagonal{Float64, Vector{Float64}} j::Int64 Body::Float64 1 ─ %1 = Main.UpperTriangular(T)::UpperTriangular{Float64, Tridiagonal{Float64, Vector{Float64}}} │ %2 = LinearAlgebra.BandIndex::Core.Const(LinearAlgebra.BandIndex) │ %3 = (%2)(2, j)::Core.PartialStruct(LinearAlgebra.BandIndex, Any[Core.Const(2), Int64]) │ %4 = Base.getindex(%1, %3)::Core.Const(0.0) └── return %4 ``` The indexing operation may be evaluated at compile-time, as the band index is constant-propagated. --- stdlib/LinearAlgebra/src/bidiag.jl | 5 ++-- stdlib/LinearAlgebra/src/dense.jl | 2 +- stdlib/LinearAlgebra/src/triangular.jl | 14 +++++++++ stdlib/LinearAlgebra/test/triangular.jl | 38 ++++++++++++++++++++++++- 4 files changed, 55 insertions(+), 4 deletions(-) diff --git a/stdlib/LinearAlgebra/src/bidiag.jl b/stdlib/LinearAlgebra/src/bidiag.jl index 0aab9ceeca6b9..e5482cbba5595 100644 --- a/stdlib/LinearAlgebra/src/bidiag.jl +++ b/stdlib/LinearAlgebra/src/bidiag.jl @@ -166,10 +166,11 @@ end end @inline function getindex(A::Bidiagonal{T}, b::BandIndex) where T - @boundscheck checkbounds(A, _cartinds(b)) + @boundscheck checkbounds(A, b) if b.band == 0 return @inbounds A.dv[b.index] - elseif b.band == _offdiagind(A.uplo) + elseif b.band ∈ (-1,1) && b.band == _offdiagind(A.uplo) + # we explicitly compare the possible bands as b.band may be constant-propagated return @inbounds A.ev[b.index] else return bidiagzero(A, Tuple(_cartinds(b))...) diff --git a/stdlib/LinearAlgebra/src/dense.jl b/stdlib/LinearAlgebra/src/dense.jl index 62096cbb172f2..aacc5479bfa9d 100644 --- a/stdlib/LinearAlgebra/src/dense.jl +++ b/stdlib/LinearAlgebra/src/dense.jl @@ -110,7 +110,7 @@ norm2(x::Union{Array{T},StridedVector{T}}) where {T<:BlasFloat} = # Conservative assessment of types that have zero(T) defined for themselves haszero(::Type) = false haszero(::Type{T}) where {T<:Number} = isconcretetype(T) -@propagate_inbounds _zero(M::AbstractArray{T}, i, j) where {T} = haszero(T) ? zero(T) : zero(M[i,j]) +@propagate_inbounds _zero(M::AbstractArray{T}, inds...) where {T} = haszero(T) ? zero(T) : zero(M[inds...]) """ triu!(M, k::Integer) diff --git a/stdlib/LinearAlgebra/src/triangular.jl b/stdlib/LinearAlgebra/src/triangular.jl index 03634aa7d68e1..e1d61e4035966 100644 --- a/stdlib/LinearAlgebra/src/triangular.jl +++ b/stdlib/LinearAlgebra/src/triangular.jl @@ -236,6 +236,20 @@ Base.isstored(A::UpperTriangular, i::Int, j::Int) = @propagate_inbounds getindex(A::UpperTriangular, i::Int, j::Int) = i <= j ? A.data[i,j] : _zero(A.data,j,i) +# these specialized getindex methods enable constant-propagation of the band +Base.@constprop :aggressive @propagate_inbounds function getindex(A::UnitLowerTriangular{T}, b::BandIndex) where {T} + b.band < 0 ? A.data[b] : ifelse(b.band == 0, oneunit(T), zero(T)) +end +Base.@constprop :aggressive @propagate_inbounds function getindex(A::LowerTriangular, b::BandIndex) + b.band <= 0 ? A.data[b] : _zero(A.data, b) +end +Base.@constprop :aggressive @propagate_inbounds function getindex(A::UnitUpperTriangular{T}, b::BandIndex) where {T} + b.band > 0 ? A.data[b] : ifelse(b.band == 0, oneunit(T), zero(T)) +end +Base.@constprop :aggressive @propagate_inbounds function getindex(A::UpperTriangular, b::BandIndex) + b.band >= 0 ? A.data[b] : _zero(A.data, b) +end + _zero_triangular_half_str(::Type{<:UpperOrUnitUpperTriangular}) = "lower" _zero_triangular_half_str(::Type{<:LowerOrUnitLowerTriangular}) = "upper" diff --git a/stdlib/LinearAlgebra/test/triangular.jl b/stdlib/LinearAlgebra/test/triangular.jl index 42c5494f73e6f..ec9a3079e2643 100644 --- a/stdlib/LinearAlgebra/test/triangular.jl +++ b/stdlib/LinearAlgebra/test/triangular.jl @@ -6,7 +6,7 @@ debug = false using Test, LinearAlgebra, Random using LinearAlgebra: BlasFloat, errorbounds, full!, transpose!, UnitUpperTriangular, UnitLowerTriangular, - mul!, rdiv!, rmul!, lmul! + mul!, rdiv!, rmul!, lmul!, BandIndex const BASE_TEST_PATH = joinpath(Sys.BINDIR, "..", "share", "julia", "test") @@ -1286,4 +1286,40 @@ end end end +@testset "indexing with a BandIndex" begin + # these tests should succeed even if the linear index along + # the band isn't a constant, or type-inferred at all + M = rand(Int,2,2) + f(A,j, v::Val{n}) where {n} = Val(A[BandIndex(n,j)]) + function common_tests(M, ind) + j = ind[] + @test @inferred(f(UpperTriangular(M), j, Val(-1))) == Val(0) + @test @inferred(f(UnitUpperTriangular(M), j, Val(-1))) == Val(0) + @test @inferred(f(UnitUpperTriangular(M), j, Val(0))) == Val(1) + @test @inferred(f(LowerTriangular(M), j, Val(1))) == Val(0) + @test @inferred(f(UnitLowerTriangular(M), j, Val(1))) == Val(0) + @test @inferred(f(UnitLowerTriangular(M), j, Val(0))) == Val(1) + end + common_tests(M, Any[1]) + + M = Diagonal([1,2]) + common_tests(M, Any[1]) + # extra tests for banded structure of the parent + for T in (UpperTriangular, UnitUpperTriangular) + @test @inferred(f(T(M), 1, Val(1))) == Val(0) + end + for T in (LowerTriangular, UnitLowerTriangular) + @test @inferred(f(T(M), 1, Val(-1))) == Val(0) + end + + M = Tridiagonal([1,2], [1,2,3], [1,2]) + common_tests(M, Any[1]) + for T in (UpperTriangular, UnitUpperTriangular) + @test @inferred(f(T(M), 1, Val(2))) == Val(0) + end + for T in (LowerTriangular, UnitLowerTriangular) + @test @inferred(f(T(M), 1, Val(-2))) == Val(0) + end +end + end # module TestTriangular From 0fade450a183470b01c656a9001512ef2f1aae47 Mon Sep 17 00:00:00 2001 From: Ian Butterworth Date: Mon, 23 Sep 2024 11:39:00 -0400 Subject: [PATCH 059/537] Replace regex package module checks with actual code checks (#55824) Fixes https://github.com/JuliaLang/julia/issues/55792 Replaces https://github.com/JuliaLang/julia/pull/55822 Improves what https://github.com/JuliaLang/julia/pull/51635 was trying to do i.e. ``` ERROR: LoadError: `using/import Printf` outside of a Module detected. Importing a package outside of a module is not allowed during package precompilation. ``` --- base/loading.jl | 49 ++++++++------------- test/loading.jl | 106 --------------------------------------------- test/precompile.jl | 74 +++++++++++++++++++++++++++++++ 3 files changed, 92 insertions(+), 137 deletions(-) diff --git a/base/loading.jl b/base/loading.jl index 8d180845f942f..2c4a7a16ec7c0 100644 --- a/base/loading.jl +++ b/base/loading.jl @@ -1524,6 +1524,7 @@ function _insert_extension_triggers(parent::PkgId, extensions::Dict{String, Any} end end +precompiling_package::Bool = false loading_extension::Bool = false precompiling_extension::Bool = false function run_extension_callbacks(extid::ExtensionId) @@ -2215,6 +2216,11 @@ For more details regarding code loading, see the manual sections on [modules](@r [parallel computing](@ref code-availability). """ function require(into::Module, mod::Symbol) + if into === Base.__toplevel__ && precompiling_package + # this error type needs to match the error type compilecache throws for non-125 errors. + error("`using/import $mod` outside of a Module detected. Importing a package outside of a module \ + is not allowed during package precompilation.") + end if _require_world_age[] != typemax(UInt) Base.invoke_in_world(_require_world_age[], __require, into, mod) else @@ -2792,41 +2798,10 @@ function load_path_setup_code(load_path::Bool=true) return code end -""" - check_src_module_wrap(srcpath::String) - -Checks that a package entry file `srcpath` has a module declaration, and that it is before any using/import statements. -""" -function check_src_module_wrap(pkg::PkgId, srcpath::String) - module_rgx = r"^(|end |\"\"\" )\s*(?:@)*(?:bare)?module\s" - load_rgx = r"\b(?:using|import)\s" - load_seen = false - inside_string = false - for s in eachline(srcpath) - if count("\"\"\"", s) == 1 - # ignore module docstrings - inside_string = !inside_string - end - inside_string && continue - if contains(s, module_rgx) - if load_seen - throw(ErrorException("Package $(repr("text/plain", pkg)) source file $srcpath has a using/import before a module declaration.")) - end - return true - end - if startswith(s, load_rgx) - load_seen = true - end - end - throw(ErrorException("Package $(repr("text/plain", pkg)) source file $srcpath does not contain a module declaration.")) -end - # this is called in the external process that generates precompiled package files function include_package_for_output(pkg::PkgId, input::String, depot_path::Vector{String}, dl_load_path::Vector{String}, load_path::Vector{String}, concrete_deps::typeof(_concrete_dependencies), source::Union{Nothing,String}) - check_src_module_wrap(pkg, input) - append!(empty!(Base.DEPOT_PATH), depot_path) append!(empty!(Base.DL_LOAD_PATH), dl_load_path) append!(empty!(Base.LOAD_PATH), load_path) @@ -2853,6 +2828,17 @@ function include_package_for_output(pkg::PkgId, input::String, depot_path::Vecto finally Core.Compiler.track_newly_inferred.x = false end + # check that the package defined the expected module so we can give a nice error message if not + Base.check_package_module_loaded(pkg) +end + +function check_package_module_loaded(pkg::PkgId) + if !haskey(Base.loaded_modules, pkg) + # match compilecache error type for non-125 errors + error("$(repr("text/plain", pkg)) did not define the expected module `$(pkg.name)`, \ + check for typos in package module name") + end + return nothing end const PRECOMPILE_TRACE_COMPILE = Ref{String}() @@ -2927,6 +2913,7 @@ function create_expr_cache(pkg::PkgId, input::String, output::String, output_o:: empty!(Base.EXT_DORMITORY) # If we have a custom sysimage with `EXT_DORMITORY` prepopulated Base.track_nested_precomp($precomp_stack) Base.precompiling_extension = $(loading_extension) + Base.precompiling_package = true Base.include_package_for_output($(pkg_str(pkg)), $(repr(abspath(input))), $(repr(depot_path)), $(repr(dl_load_path)), $(repr(load_path)), $deps, $(repr(source_path(nothing)))) """) diff --git a/test/loading.jl b/test/loading.jl index 8db8405ef2a83..fb200bf7a0a93 100644 --- a/test/loading.jl +++ b/test/loading.jl @@ -855,22 +855,6 @@ end end end -@testset "error message loading pkg bad module name" begin - mktempdir() do tmp - old_loadpath = copy(LOAD_PATH) - try - push!(LOAD_PATH, tmp) - write(joinpath(tmp, "BadCase.jl"), "module badcase end") - @test_logs (:warn, r"The call to compilecache failed.*") match_mode=:any begin - @test_throws ErrorException("package `BadCase` did not define the expected module `BadCase`, \ - check for typos in package module name") (@eval using BadCase) - end - finally - copy!(LOAD_PATH, old_loadpath) - end - end -end - @testset "Preferences loading" begin mktempdir() do dir this_uuid = uuid4() @@ -1268,96 +1252,6 @@ end @test success(`$(Base.julia_cmd()) --startup-file=no -e 'using Statistics'`) end -@testset "checking srcpath modules" begin - p = Base.PkgId("Dummy") - fpath, _ = mktemp() - @testset "valid" begin - write(fpath, """ - module Foo - using Bar - end - """) - @test Base.check_src_module_wrap(p, fpath) - - write(fpath, """ - baremodule Foo - using Bar - end - """) - @test Base.check_src_module_wrap(p, fpath) - - write(fpath, """ - \"\"\" - Foo - using Foo - \"\"\" - module Foo - using Bar - end - """) - @test Base.check_src_module_wrap(p, fpath) - - write(fpath, """ - \"\"\" Foo \"\"\" - module Foo - using Bar - end - """) - @test Base.check_src_module_wrap(p, fpath) - - write(fpath, """ - \"\"\" - Foo - \"\"\" module Foo - using Bar - end - """) - @test Base.check_src_module_wrap(p, fpath) - - write(fpath, """ - @doc let x = 1 - x - end module Foo - using Bar - end - """) - @test Base.check_src_module_wrap(p, fpath) - - write(fpath, """ - # using foo - module Foo - using Bar - end - """) - @test Base.check_src_module_wrap(p, fpath) - end - @testset "invalid" begin - write(fpath, """ - # module Foo - using Bar - # end - """) - @test_throws ErrorException Base.check_src_module_wrap(p, fpath) - - write(fpath, """ - using Bar - module Foo - end - """) - @test_throws ErrorException Base.check_src_module_wrap(p, fpath) - - write(fpath, """ - using Bar - """) - @test_throws ErrorException Base.check_src_module_wrap(p, fpath) - - write(fpath, """ - x = 1 - """) - @test_throws ErrorException Base.check_src_module_wrap(p, fpath) - end -end - @testset "relocatable upgrades #51989" begin mktempdir() do depot # realpath is needed because Pkg is used for one of the precompile paths below, and Pkg calls realpath on the diff --git a/test/precompile.jl b/test/precompile.jl index bc738e557bb51..7a6e41061f9b1 100644 --- a/test/precompile.jl +++ b/test/precompile.jl @@ -2093,4 +2093,78 @@ precompile_test_harness("Binding Unique") do load_path @test UniqueBinding2.thebinding2 === ccall(:jl_get_module_binding, Ref{Core.Binding}, (Any, Any, Cint), UniqueBinding2, :thebinding, true) end +precompile_test_harness("Detecting importing outside of a package module") do load_path + io = IOBuffer() + write(joinpath(load_path, "ImportBeforeMod.jl"), + """ + import Printf + module ImportBeforeMod + end #module + """) + @test_throws r"Failed to precompile ImportBeforeMod" Base.compilecache(Base.identify_package("ImportBeforeMod"), io, io) + @test occursin( + "`using/import Printf` outside of a Module detected. Importing a package outside of a module is not allowed during package precompilation.", + String(take!(io))) + + + write(joinpath(load_path, "HarmlessComments.jl"), + """ + # import Printf + #= + import Printf + =# + module HarmlessComments + end #module + # import Printf + #= + import Printf + =# + """) + Base.compilecache(Base.identify_package("HarmlessComments")) + + + write(joinpath(load_path, "ImportAfterMod.jl"), """ + module ImportAfterMod + end #module + import Printf + """) + @test_throws r"Failed to precompile ImportAfterMod" Base.compilecache(Base.identify_package("ImportAfterMod"), io, io) + @test occursin( + "`using/import Printf` outside of a Module detected. Importing a package outside of a module is not allowed during package precompilation.", + String(take!(io))) +end + +precompile_test_harness("No package module") do load_path + io = IOBuffer() + write(joinpath(load_path, "NoModule.jl"), + """ + 1 + """) + @test_throws r"Failed to precompile NoModule" Base.compilecache(Base.identify_package("NoModule"), io, io) + @test occursin( + "NoModule [top-level] did not define the expected module `NoModule`, check for typos in package module name", + String(take!(io))) + + + write(joinpath(load_path, "WrongModuleName.jl"), + """ + module DifferentName + x = 1 + end #module + """) + @test_throws r"Failed to precompile WrongModuleName" Base.compilecache(Base.identify_package("WrongModuleName"), io, io) + @test occursin( + "WrongModuleName [top-level] did not define the expected module `WrongModuleName`, check for typos in package module name", + String(take!(io))) + + + write(joinpath(load_path, "NoModuleWithImport.jl"), """ + import Printf + """) + @test_throws r"Failed to precompile NoModuleWithImport" Base.compilecache(Base.identify_package("NoModuleWithImport"), io, io) + @test occursin( + "`using/import Printf` outside of a Module detected. Importing a package outside of a module is not allowed during package precompilation.", + String(take!(io))) +end + finish_precompile_test!() From fc9f1470458ad6bdeb1f56c1150d651814a0a164 Mon Sep 17 00:00:00 2001 From: Ian Butterworth Date: Mon, 23 Sep 2024 18:48:18 -0400 Subject: [PATCH 060/537] fall back to slower stat filesize if optimized filesize fails (#55641) --- base/iostream.jl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/base/iostream.jl b/base/iostream.jl index 762f881cfbecb..74908344e078e 100644 --- a/base/iostream.jl +++ b/base/iostream.jl @@ -230,8 +230,8 @@ end function filesize(s::IOStream) sz = @_lock_ios s ccall(:ios_filesize, Int64, (Ptr{Cvoid},), s.ios) if sz == -1 - err = Libc.errno() - throw(IOError(string("filesize: ", Libc.strerror(err), " for ", s.name), err)) + # if `s` is not seekable `ios_filesize` can fail, so fall back to slower stat method + sz = filesize(stat(s)) end return sz end From 9f1c0686405173a0ebd065e3322e44a3e7ec0a26 Mon Sep 17 00:00:00 2001 From: Lilith Orion Hafner Date: Mon, 23 Sep 2024 21:53:46 -0500 Subject: [PATCH 061/537] Use "index" instead of "subscript" to refer to indexing in performance tips (#55846) --- doc/src/manual/performance-tips.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/src/manual/performance-tips.md b/doc/src/manual/performance-tips.md index 436d58f54754a..417d5ac7a4ca1 100644 --- a/doc/src/manual/performance-tips.md +++ b/doc/src/manual/performance-tips.md @@ -1537,7 +1537,7 @@ be modified as suggested by the warnings. Sometimes you can enable better optimization by promising certain program properties. * Use [`@inbounds`](@ref) to eliminate array bounds checking within expressions. Be certain before doing - this. If the subscripts are ever out of bounds, you may suffer crashes or silent corruption. + this. If the indices are ever out of bounds, you may suffer crashes or silent corruption. * Use [`@fastmath`](@ref) to allow floating point optimizations that are correct for real numbers, but lead to differences for IEEE numbers. Be careful when doing this, as this may change numerical results. This corresponds to the `-ffast-math` option of clang. From db6d277f6226daa5739940a4642277cfe0a884ea Mon Sep 17 00:00:00 2001 From: Kristoffer Carlsson Date: Tue, 24 Sep 2024 10:02:47 +0200 Subject: [PATCH 062/537] privatize annotated string API, take two (#55845) https://github.com/JuliaLang/julia/pull/55453 is stuck on StyledStrings and Base documentation being entangled and there isn't a good way to have the documentation of Base types / methods live in an stdlib. This is a stop gap solution to finally be able to move forwards with 1.11. --- base/public.jl | 7 ------- base/strings/annotated.jl | 14 -------------- doc/src/base/strings.md | 19 ++++++++++++++----- doc/src/manual/strings.md | 4 ++++ 4 files changed, 18 insertions(+), 26 deletions(-) diff --git a/base/public.jl b/base/public.jl index 803766a0cec1b..2e8e777d2f91d 100644 --- a/base/public.jl +++ b/base/public.jl @@ -21,15 +21,8 @@ public ImmutableDict, OneTo, LogRange, - AnnotatedString, - AnnotatedChar, UUID, -# Annotated strings - annotatedstring, - annotate!, - annotations, - # Semaphores Semaphore, acquire, diff --git a/base/strings/annotated.jl b/base/strings/annotated.jl index be4c6887d4a6d..9a0b4b2825436 100644 --- a/base/strings/annotated.jl +++ b/base/strings/annotated.jl @@ -39,13 +39,6 @@ the combined range. See also [`AnnotatedChar`](@ref), [`annotatedstring`](@ref), [`annotations`](@ref), and [`annotate!`](@ref). -!!! warning - While the constructors are part of the Base public API, the fields - of `AnnotatedString` are not. This is to allow for potential future - changes in the implementation of this type. Instead use the - [`annotations`](@ref), and [`annotate!`](@ref) getter/setter - functions. - # Constructors ```julia @@ -81,13 +74,6 @@ More specifically, this is a simple wrapper around any other See also: [`AnnotatedString`](@ref), [`annotatedstring`](@ref), `annotations`, and `annotate!`. -!!! warning - While the constructors are part of the Base public API, the fields - of `AnnotatedChar` are not. This it to allow for potential future - changes in the implementation of this type. Instead use the - [`annotations`](@ref), and [`annotate!`](@ref) getter/setter - functions. - # Constructors ```julia diff --git a/doc/src/base/strings.md b/doc/src/base/strings.md index b7d16ffc7d487..a9637a1a7be3a 100644 --- a/doc/src/base/strings.md +++ b/doc/src/base/strings.md @@ -17,11 +17,6 @@ Core.String(::AbstractString) Base.SubString Base.LazyString Base.@lazy_str -Base.AnnotatedString -Base.AnnotatedChar -Base.annotatedstring -Base.annotations -Base.annotate! Base.transcode Base.unsafe_string Base.ncodeunits(::AbstractString) @@ -101,3 +96,17 @@ Base.escape_string Base.escape_raw_string Base.unescape_string ``` + +## `AnnotatedString`s + +!!! note + The API for AnnotatedStrings is considered experimental and is subject to change between + Julia versions. + +```@docs +Base.AnnotatedString +Base.AnnotatedChar +Base.annotatedstring +Base.annotations +Base.annotate! +``` diff --git a/doc/src/manual/strings.md b/doc/src/manual/strings.md index 5ba27b3921cec..c04e5e6d6760e 100644 --- a/doc/src/manual/strings.md +++ b/doc/src/manual/strings.md @@ -1207,6 +1207,10 @@ last backslash escapes a quote, since these backslashes appear before a quote. ## [Annotated Strings](@id man-annotated-strings) +!!! note + The API for AnnotatedStrings is considered experimental and is subject to change between + Julia versions. + It is sometimes useful to be able to hold metadata relating to regions of a string. A [`AnnotatedString`](@ref Base.AnnotatedString) wraps another string and allows for regions of it to be annotated with labelled values (`:label => value`). From c3af4fc24564c3ecda59a26648abe919f090929a Mon Sep 17 00:00:00 2001 From: DilumAluthgeBot <43731525+DilumAluthgeBot@users.noreply.github.com> Date: Tue, 24 Sep 2024 05:48:28 -0400 Subject: [PATCH 063/537] =?UTF-8?q?=F0=9F=A4=96=20[master]=20Bump=20the=20?= =?UTF-8?q?Downloads=20stdlib=20from=201061ecc=20to=2089d3c7d=20(#55854)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Stdlib: Downloads URL: https://github.com/JuliaLang/Downloads.jl.git Stdlib branch: master Julia branch: master Old commit: 1061ecc New commit: 89d3c7d Julia version: 1.12.0-DEV Downloads version: 1.6.0(It's okay that it doesn't match) Bump invoked by: @KristofferC Powered by: [BumpStdlibs.jl](https://github.com/JuliaLang/BumpStdlibs.jl) Diff: https://github.com/JuliaLang/Downloads.jl/compare/1061ecc377a053fce0df94e1a19e5260f7c030f5...89d3c7dded535a77551e763a437a6d31e4d9bf84 ``` $ git log --oneline 1061ecc..89d3c7d 89d3c7d fix cancelling upload requests (#259) df33406 gracefully cancel a request (#256) ``` Co-authored-by: Dilum Aluthge --- .../md5 | 1 - .../sha512 | 1 - .../md5 | 1 + .../sha512 | 1 + stdlib/Downloads.version | 2 +- 5 files changed, 3 insertions(+), 3 deletions(-) delete mode 100644 deps/checksums/Downloads-1061ecc377a053fce0df94e1a19e5260f7c030f5.tar.gz/md5 delete mode 100644 deps/checksums/Downloads-1061ecc377a053fce0df94e1a19e5260f7c030f5.tar.gz/sha512 create mode 100644 deps/checksums/Downloads-89d3c7dded535a77551e763a437a6d31e4d9bf84.tar.gz/md5 create mode 100644 deps/checksums/Downloads-89d3c7dded535a77551e763a437a6d31e4d9bf84.tar.gz/sha512 diff --git a/deps/checksums/Downloads-1061ecc377a053fce0df94e1a19e5260f7c030f5.tar.gz/md5 b/deps/checksums/Downloads-1061ecc377a053fce0df94e1a19e5260f7c030f5.tar.gz/md5 deleted file mode 100644 index f42bbedb6d415..0000000000000 --- a/deps/checksums/Downloads-1061ecc377a053fce0df94e1a19e5260f7c030f5.tar.gz/md5 +++ /dev/null @@ -1 +0,0 @@ -70878dd96911d6960537dfee2a820d98 diff --git a/deps/checksums/Downloads-1061ecc377a053fce0df94e1a19e5260f7c030f5.tar.gz/sha512 b/deps/checksums/Downloads-1061ecc377a053fce0df94e1a19e5260f7c030f5.tar.gz/sha512 deleted file mode 100644 index 83164cad9a89d..0000000000000 --- a/deps/checksums/Downloads-1061ecc377a053fce0df94e1a19e5260f7c030f5.tar.gz/sha512 +++ /dev/null @@ -1 +0,0 @@ -87d2bdc6c85cbbce5302aab8ffe92fc542c9c71a396844fcc04c0416be059b00298b4816ab5e5491dbf865660a3a6152f1c245875a1ec75fb49b4c7ba0d303d8 diff --git a/deps/checksums/Downloads-89d3c7dded535a77551e763a437a6d31e4d9bf84.tar.gz/md5 b/deps/checksums/Downloads-89d3c7dded535a77551e763a437a6d31e4d9bf84.tar.gz/md5 new file mode 100644 index 0000000000000..611f3dd448d98 --- /dev/null +++ b/deps/checksums/Downloads-89d3c7dded535a77551e763a437a6d31e4d9bf84.tar.gz/md5 @@ -0,0 +1 @@ +2472bd6434d21c4b3e3199437e6fdcf7 diff --git a/deps/checksums/Downloads-89d3c7dded535a77551e763a437a6d31e4d9bf84.tar.gz/sha512 b/deps/checksums/Downloads-89d3c7dded535a77551e763a437a6d31e4d9bf84.tar.gz/sha512 new file mode 100644 index 0000000000000..6937982e838f3 --- /dev/null +++ b/deps/checksums/Downloads-89d3c7dded535a77551e763a437a6d31e4d9bf84.tar.gz/sha512 @@ -0,0 +1 @@ +0a3fa9a09de81aa9676dbc7448408c7503f45e42519a2667540ad890316c7da089c95de5464a2032171f963c6f3cba73d6b3c246f1c7ac6ede283fc8132d5209 diff --git a/stdlib/Downloads.version b/stdlib/Downloads.version index cb041d86d7f66..b539771fbdb47 100644 --- a/stdlib/Downloads.version +++ b/stdlib/Downloads.version @@ -1,4 +1,4 @@ DOWNLOADS_BRANCH = master -DOWNLOADS_SHA1 = 1061ecc377a053fce0df94e1a19e5260f7c030f5 +DOWNLOADS_SHA1 = 89d3c7dded535a77551e763a437a6d31e4d9bf84 DOWNLOADS_GIT_URL := https://github.com/JuliaLang/Downloads.jl.git DOWNLOADS_TAR_URL = https://api.github.com/repos/JuliaLang/Downloads.jl/tarball/$1 From 2943833bea8a8c05f47de1edc154bbb6888547a1 Mon Sep 17 00:00:00 2001 From: Richard Littauer Date: Wed, 25 Sep 2024 00:00:13 +1200 Subject: [PATCH 064/537] docs: Small edits to noteworthy differences (#55852) - The first line edit changes it so that the Julia example goes first, not the Python example, keeping with the general flow of the lines above. - The second adds a "the" that is missing. --- doc/src/manual/noteworthy-differences.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/src/manual/noteworthy-differences.md b/doc/src/manual/noteworthy-differences.md index 181fe0a30eb38..33285bde8a066 100644 --- a/doc/src/manual/noteworthy-differences.md +++ b/doc/src/manual/noteworthy-differences.md @@ -220,8 +220,8 @@ For users coming to Julia from R, these are some noteworthy differences: * Unlike Python, Julia allows [AbstractArrays with arbitrary indexes](https://julialang.org/blog/2017/04/offset-arrays/). Python's special interpretation of negative indexing, `a[-1]` and `a[-2]`, should be written `a[end]` and `a[end-1]` in Julia. - * Julia requires `end` for indexing until the last element. `x[1:]` in Python is equivalent to `x[2:end]` in Julia. - * In Julia, `:` before any object creates a [`Symbol`](@ref) or *quotes* an expression; so, `x[:5]` is same as `x[5]`. If you want to get the first `n` elements of an array, then use range indexing. + * Julia requires `end` for indexing until the last element. `x[2:end]` in Julia is equivalent to `x[1:]` in Python. + * In Julia, `:` before any object creates a [`Symbol`](@ref) or *quotes* an expression; so, `x[:5]` is the same as `x[5]`. If you want to get the first `n` elements of an array, then use range indexing. * Julia's range indexing has the format of `x[start:step:stop]`, whereas Python's format is `x[start:(stop+1):step]`. Hence, `x[0:10:2]` in Python is equivalent to `x[1:2:10]` in Julia. Similarly, `x[::-1]` in Python, which refers to the reversed array, is equivalent to `x[end:-1:1]` in Julia. * In Julia, ranges can be constructed independently as `start:step:stop`, the same syntax it uses in array-indexing. The `range` function is also supported. From a06a80162bb9bdf6f7e91dc18e7ccf5c12673ca4 Mon Sep 17 00:00:00 2001 From: Timothy Date: Wed, 25 Sep 2024 03:15:48 +0800 Subject: [PATCH 065/537] Add filesystem func to transform a path to a URI (#55454) In a few places across Base and the stdlib, we emit paths that we like people to be able to click on in their terminal and editor. Up to this point, we have relied on auto-filepath detection, but this does not allow for alternative link text, such as contracted paths. Doing so (via OSC 8 terminal links for example) requires filepath URI encoding. This functionality was previously part of a PR modifying stacktrace printing (#51816), but after that became held up for unrelated reasons and another PR appeared that would benefit from this utility (#55335), I've split out this functionality so it can be used before the stacktrace printing PR is resolved. --- base/path.jl | 56 ++++++++++++++++++++++++++++++++++++++++++++++++++++ test/path.jl | 13 ++++++++++++ 2 files changed, 69 insertions(+) diff --git a/base/path.jl b/base/path.jl index 3b8124f34f174..f6d3266d9738c 100644 --- a/base/path.jl +++ b/base/path.jl @@ -613,3 +613,59 @@ relpath(path::AbstractString, startpath::AbstractString) = for f in (:isdirpath, :splitdir, :splitdrive, :splitext, :normpath, :abspath) @eval $f(path::AbstractString) = $f(String(path)) end + +""" + uripath(path::AbstractString) + +Encode `path` as a URI as per [RFC8089: The "file" URI +Scheme](https://www.rfc-editor.org/rfc/rfc8089), [RFC3986: Uniform Resource +Identifier (URI): Generic Syntax](https://www.rfc-editor.org/rfc/rfc3986), and +the [Freedesktop File URI spec](https://www.freedesktop.org/wiki/Specifications/file-uri-spec/). + +## Examples + +```julia-repl +julia> uripath("/home/user/example file.jl") # On a unix machine +"file:///home/user/example%20file.jl" + +juila> uripath("C:\\Users\\user\\example file.jl") # On a windows machine +"file:///C:/Users/user/example%20file.jl" +``` +""" +function uripath end + +@static if Sys.iswindows() + function uripath(path::String) + percent_escape(s) = # RFC3986 Section 2.1 + '%' * join(map(b -> uppercase(string(b, base=16)), codeunits(s)), '%') + encode_uri_component(s) = # RFC3986 Section 2.3 + replace(s, r"[^A-Za-z0-9\-_.~/]+" => percent_escape) + path = abspath(path) + if startswith(path, "\\\\") # UNC path, RFC8089 Appendix E.3 + unixpath = join(eachsplit(path, path_separator_re, keepempty=false), '/') + string("file://", encode_uri_component(unixpath)) # RFC8089 Section 2 + else + drive, localpath = splitdrive(path) # Assuming that non-UNC absolute paths on Windows always have a drive component + unixpath = join(eachsplit(localpath, path_separator_re, keepempty=false), '/') + encdrive = replace(encode_uri_component(drive), "%3A" => ':', "%7C" => '|') # RFC8089 Appendices D.2, E.2.1, and E.2.2 + string("file:///", encdrive, '/', encode_uri_component(unixpath)) # RFC8089 Section 2 + end + end +else + function uripath(path::String) + percent_escape(s) = # RFC3986 Section 2.1 + '%' * join(map(b -> uppercase(string(b, base=16)), codeunits(s)), '%') + encode_uri_component(s) = # RFC3986 Section 2.3 + replace(s, r"[^A-Za-z0-9\-_.~/]+" => percent_escape) + localpath = join(eachsplit(abspath(path), path_separator_re, keepempty=false), '/') + host = if ispath("/proc/sys/fs/binfmt_misc/WSLInterop") # WSL sigil + distro = get(ENV, "WSL_DISTRO_NAME", "") # See + "wsl\$/$distro" # See and + else + gethostname() # Freedesktop File URI Spec, Hostnames section + end + string("file://", encode_uri_component(host), '/', encode_uri_component(localpath)) # RFC8089 Section 2 + end +end + +uripath(path::AbstractString) = uripath(String(path)) diff --git a/test/path.jl b/test/path.jl index 2f4f2d0983a58..4c2c7034577d5 100644 --- a/test/path.jl +++ b/test/path.jl @@ -311,6 +311,19 @@ test_relpath() end + @testset "uripath" begin + host = if Sys.iswindows() "" else gethostname() end + sysdrive, uridrive = if Sys.iswindows() "C:\\", "C:/" else "/", "" end + @test Base.Filesystem.uripath("$(sysdrive)some$(sep)file.txt") == "file://$host/$(uridrive)some/file.txt" + @test Base.Filesystem.uripath("$(sysdrive)another$(sep)$(sep)folder$(sep)file.md") == "file://$host/$(uridrive)another/folder/file.md" + @test Base.Filesystem.uripath("$(sysdrive)some file with ^odd% chars") == "file://$host/$(uridrive)some%20file%20with%20%5Eodd%25%20chars" + @test Base.Filesystem.uripath("$(sysdrive)weird chars like @#&()[]{}") == "file://$host/$(uridrive)weird%20chars%20like%20%40%23%26%28%29%5B%5D%7B%7D" + @test Base.Filesystem.uripath("$sysdrive") == "file://$host/$uridrive" + @test Base.Filesystem.uripath(".") == Base.Filesystem.uripath(pwd()) + @test Base.Filesystem.uripath("$(sysdrive)unicode$(sep)Δεδομένα") == "file://$host/$(uridrive)unicode/%CE%94%CE%B5%CE%B4%CE%BF%CE%BC%CE%AD%CE%BD%CE%B1" + @test Base.Filesystem.uripath("$(sysdrive)unicode$(sep)🧮🐛🔨") == "file://$host/$(uridrive)unicode/%F0%9F%A7%AE%F0%9F%90%9B%F0%9F%94%A8" + end + if Sys.iswindows() @testset "issue #23646" begin @test lowercase(relpath("E:\\a\\b", "C:\\c")) == "e:\\a\\b" From 060035d1eaec95a8c3f138896a5e42dc871381fe Mon Sep 17 00:00:00 2001 From: Neven Sajko Date: Tue, 24 Sep 2024 22:22:13 +0200 Subject: [PATCH 066/537] constrain the path argument of `include` functions to `AbstractString` (#55466) Each `Module` defined with `module` automatically gets an `include` function with two methods. Each of those two methods takes a file path as its last argument. Even though the path argument is unconstrained by dispatch, it's documented as constrained with `::AbstractString`: https://docs.julialang.org/en/v1.11-dev/base/base/#include Furthermore, I think that any invocation of `include` with a non-`AbstractString` path will necessarily throw a `MethodError` eventually. Thus this change should be harmless. Adding the type constraint to the path argument is an improvement because any possible exception would be thrown earlier than before. Apart from modules defined with `module`, the same issue is present with the anonymous modules created by `evalfile`, which is also addressed. Sidenote: `evalfile` seems to be completely untested apart from the test added here. Co-authored-by: Florian --- base/loading.jl | 4 ++-- src/jlfrontend.scm | 4 ++-- test/loading.jl | 11 +++++++++++ test/testhelpers/just_module.jl | 1 + 4 files changed, 16 insertions(+), 4 deletions(-) create mode 100644 test/testhelpers/just_module.jl diff --git a/base/loading.jl b/base/loading.jl index 2c4a7a16ec7c0..cf7e41a0b5b2b 100644 --- a/base/loading.jl +++ b/base/loading.jl @@ -2773,8 +2773,8 @@ function evalfile(path::AbstractString, args::Vector{String}=String[]) Expr(:toplevel, :(const ARGS = $args), :(eval(x) = $(Expr(:core, :eval))(__anon__, x)), - :(include(x) = $(Expr(:top, :include))(__anon__, x)), - :(include(mapexpr::Function, x) = $(Expr(:top, :include))(mapexpr, __anon__, x)), + :(include(x::AbstractString) = $(Expr(:top, :include))(__anon__, x)), + :(include(mapexpr::Function, x::AbstractString) = $(Expr(:top, :include))(mapexpr, __anon__, x)), :(include($path)))) end evalfile(path::AbstractString, args::Vector) = evalfile(path, String[args...]) diff --git a/src/jlfrontend.scm b/src/jlfrontend.scm index 2c5f42eda5ce8..463e39c41d00a 100644 --- a/src/jlfrontend.scm +++ b/src/jlfrontend.scm @@ -211,11 +211,11 @@ (block ,@loc (call (core eval) ,name ,x))) - (= (call include ,x) + (= (call include (:: ,x (top AbstractString))) (block ,@loc (call (core _call_latest) (top include) ,name ,x))) - (= (call include (:: ,mex (top Function)) ,x) + (= (call include (:: ,mex (top Function)) (:: ,x (top AbstractString))) (block ,@loc (call (core _call_latest) (top include) ,mex ,name ,x))))) diff --git a/test/loading.jl b/test/loading.jl index fb200bf7a0a93..bdaca7f9dc69e 100644 --- a/test/loading.jl +++ b/test/loading.jl @@ -793,6 +793,17 @@ import .Foo28190.Libdl; import Libdl end end +@testset "`::AbstractString` constraint on the path argument to `include`" begin + for m ∈ (NotPkgModule, evalfile("testhelpers/just_module.jl")) + let i = m.include + @test !applicable(i, (nothing,)) + @test !applicable(i, (identity, nothing,)) + @test !hasmethod(i, Tuple{Nothing}) + @test !hasmethod(i, Tuple{Function,Nothing}) + end + end +end + @testset "`Base.project_names` and friends" begin # Some functions in Pkg assumes that these tuples have the same length n = length(Base.project_names) diff --git a/test/testhelpers/just_module.jl b/test/testhelpers/just_module.jl new file mode 100644 index 0000000000000..71bd87e660eae --- /dev/null +++ b/test/testhelpers/just_module.jl @@ -0,0 +1 @@ +@__MODULE__ From b0db75d7be83aa1a019e0801b30bb647d1d3e01e Mon Sep 17 00:00:00 2001 From: Ian Butterworth Date: Tue, 24 Sep 2024 16:42:36 -0400 Subject: [PATCH 067/537] Mmap: fix grow! for non file IOs (#55849) Fixes https://github.com/JuliaLang/julia/issues/54203 Requires #55641 Based on https://github.com/JuliaLang/julia/pull/55641#issuecomment-2334162489 cc. @JakeZw @ronisbr --------- Co-authored-by: Jameson Nash --- stdlib/Mmap/src/Mmap.jl | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/stdlib/Mmap/src/Mmap.jl b/stdlib/Mmap/src/Mmap.jl index e6987582bf511..df2f4f1a19991 100644 --- a/stdlib/Mmap/src/Mmap.jl +++ b/stdlib/Mmap/src/Mmap.jl @@ -86,6 +86,8 @@ grow!(::Anonymous,o::Integer,l::Integer) = return function grow!(io::IO, offset::Integer, len::Integer) pos = position(io) filelen = filesize(io) + # If non-regular file skip trying to grow since we know that will fail the ftruncate syscall + filelen == 0 && !isfile(io) && return if filelen < offset + len failure = ccall(:jl_ftruncate, Cint, (Cint, Int64), fd(io), offset+len) Base.systemerror(:ftruncate, failure != 0) @@ -218,7 +220,7 @@ function mmap(io::IO, # platform-specific mmapping @static if Sys.isunix() prot, flags, iswrite = settings(file_desc, shared) - if requestedSizeLarger + if requestedSizeLarger && isfile(io) # add a condition to this line to ensure it only checks files if iswrite if grow grow!(io, offset, len) From 25cbe006f3a610c204d8f2f67f1200a13a8ce349 Mon Sep 17 00:00:00 2001 From: Jameson Nash Date: Tue, 24 Sep 2024 18:11:43 -0400 Subject: [PATCH 068/537] codegen: split gc roots from other bits on stack (#55767) In order to help avoid memory provenance issues, and better utilize stack space (somewhat), and use FCA less, change the preferred representation of an immutable object to be a pair of `` values. This packing requires some care at the boundaries and if the expected field alignment exceeds that of a pointer. The change is expected to eventually make codegen more flexible at representing unions of values with both bits and pointer regions. Eventually we can also have someone improve the late-gc-lowering pass to take advantage of this increased information accuracy, but currently it will not be any better than before at laying out the frame. --- src/ccall.cpp | 39 +- src/cgutils.cpp | 718 ++++++++++++++++++++------- src/codegen.cpp | 877 +++++++++++++++++++-------------- src/intrinsics.cpp | 54 +- src/llvm-codegen-shared.h | 1 - src/llvm-final-gc-lowering.cpp | 2 +- src/llvm-gc-interface-passes.h | 9 +- src/llvm-late-gc-lowering.cpp | 84 +++- test/compiler/codegen.jl | 5 +- 9 files changed, 1146 insertions(+), 643 deletions(-) diff --git a/src/ccall.cpp b/src/ccall.cpp index e336de8e3574f..2de5be6906e7c 100644 --- a/src/ccall.cpp +++ b/src/ccall.cpp @@ -441,22 +441,13 @@ static Value *llvm_type_rewrite( // we need to use this alloca copy trick instead // On ARM and AArch64, the ABI requires casting through memory to different // sizes. - Value *from; - Value *to; const DataLayout &DL = ctx.builder.GetInsertBlock()->getModule()->getDataLayout(); Align align = std::max(DL.getPrefTypeAlign(target_type), DL.getPrefTypeAlign(from_type)); - if (DL.getTypeAllocSize(target_type) >= DL.getTypeAllocSize(from_type)) { - to = emit_static_alloca(ctx, target_type, align); - setName(ctx.emission_context, to, "type_rewrite_buffer"); - from = to; - } - else { - from = emit_static_alloca(ctx, from_type, align); - setName(ctx.emission_context, from, "type_rewrite_buffer"); - to = from; - } - ctx.builder.CreateAlignedStore(v, from, align); - auto pun = ctx.builder.CreateAlignedLoad(target_type, to, align); + size_t nb = std::max(DL.getTypeAllocSize(target_type), DL.getTypeAllocSize(from_type)); + AllocaInst *cast = emit_static_alloca(ctx, nb, align); + setName(ctx.emission_context, cast, "type_rewrite_buffer"); + ctx.builder.CreateAlignedStore(v, cast, align); + auto pun = ctx.builder.CreateAlignedLoad(target_type, cast, align); setName(ctx.emission_context, pun, "type_rewrite"); return pun; } @@ -494,7 +485,7 @@ static const std::string make_errmsg(const char *fname, int n, const char *err) return msg.str(); } -static void typeassert_input(jl_codectx_t &ctx, const jl_cgval_t &jvinfo, jl_value_t *jlto, jl_unionall_t *jlto_env, int argn) +static jl_cgval_t typeassert_input(jl_codectx_t &ctx, const jl_cgval_t &jvinfo, jl_value_t *jlto, jl_unionall_t *jlto_env, int argn) { if (jlto != (jl_value_t*)jl_any_type && !jl_subtype(jvinfo.typ, jlto)) { if (jlto == (jl_value_t*)jl_voidpointer_type) { @@ -502,6 +493,7 @@ static void typeassert_input(jl_codectx_t &ctx, const jl_cgval_t &jvinfo, jl_val if (!jl_is_cpointer_type(jvinfo.typ)) { // emit a typecheck, if not statically known to be correct emit_cpointercheck(ctx, jvinfo, make_errmsg("ccall", argn + 1, "")); + return update_julia_type(ctx, jvinfo, (jl_value_t*)jl_pointer_type); } } else { @@ -526,8 +518,10 @@ static void typeassert_input(jl_codectx_t &ctx, const jl_cgval_t &jvinfo, jl_val ctx.builder.CreateUnreachable(); ctx.builder.SetInsertPoint(passBB); } + return update_julia_type(ctx, jvinfo, jlto); } } + return jvinfo; } // Emit code to convert argument to form expected by C ABI @@ -537,7 +531,7 @@ static void typeassert_input(jl_codectx_t &ctx, const jl_cgval_t &jvinfo, jl_val static Value *julia_to_native( jl_codectx_t &ctx, Type *to, bool toboxed, jl_value_t *jlto, jl_unionall_t *jlto_env, - const jl_cgval_t &jvinfo, + jl_cgval_t jvinfo, bool byRef, int argn) { // We're passing Any @@ -547,7 +541,7 @@ static Value *julia_to_native( } assert(jl_is_datatype(jlto) && jl_struct_try_layout((jl_datatype_t*)jlto)); - typeassert_input(ctx, jvinfo, jlto, jlto_env, argn); + jvinfo = typeassert_input(ctx, jvinfo, jlto, jlto_env, argn); if (!byRef) return emit_unbox(ctx, to, jvinfo, jlto); @@ -556,14 +550,7 @@ static Value *julia_to_native( Align align(julia_alignment(jlto)); Value *slot = emit_static_alloca(ctx, to, align); setName(ctx.emission_context, slot, "native_convert_buffer"); - if (!jvinfo.ispointer()) { - jl_aliasinfo_t ai = jl_aliasinfo_t::fromTBAA(ctx, jvinfo.tbaa); - ai.decorateInst(ctx.builder.CreateStore(emit_unbox(ctx, to, jvinfo, jlto), slot)); - } - else { - jl_aliasinfo_t ai = jl_aliasinfo_t::fromTBAA(ctx, jvinfo.tbaa); - emit_memcpy(ctx, slot, ai, jvinfo, jl_datatype_size(jlto), align, align); - } + emit_unbox_store(ctx, jvinfo, slot, ctx.tbaa().tbaa_stack, align); return slot; } @@ -1991,7 +1978,7 @@ static jl_cgval_t emit_ccall(jl_codectx_t &ctx, jl_value_t **args, size_t nargs) // If the value is not boxed, try to compute the object id without // reboxing it. auto T_p_derived = PointerType::get(ctx.builder.getContext(), AddressSpace::Derived); - if (!val.isghost && !val.ispointer()) + if (!val.isghost) val = value_to_pointer(ctx, val); Value *args[] = { emit_typeof(ctx, val, false, true), diff --git a/src/cgutils.cpp b/src/cgutils.cpp index bf5c67ae9f849..9124638ce7446 100644 --- a/src/cgutils.cpp +++ b/src/cgutils.cpp @@ -323,6 +323,8 @@ static bool type_is_permalloc(jl_value_t *typ) } +// find the offset of pointer fields which never need a write barrier since their type-analysis +// shows they are permanently rooted static void find_perm_offsets(jl_datatype_t *typ, SmallVectorImpl &res, unsigned offset) { // This is a inlined field at `offset`. @@ -346,14 +348,37 @@ static void find_perm_offsets(jl_datatype_t *typ, SmallVectorImpl &res } } -static llvm::SmallVector get_gc_roots_for(jl_codectx_t &ctx, const jl_cgval_t &x) +// load a pointer to N inlined_roots into registers (as a SmallVector) +static llvm::SmallVector load_gc_roots(jl_codectx_t &ctx, Value *inline_roots_ptr, size_t npointers, bool isVolatile=false) +{ + SmallVector gcroots(npointers); + Type *T_prjlvalue = ctx.types().T_prjlvalue; + auto roots_ai = jl_aliasinfo_t::fromTBAA(ctx, ctx.tbaa().tbaa_gcframe); + for (size_t i = 0; i < npointers; i++) { + auto *ptr = ctx.builder.CreateAlignedLoad(T_prjlvalue, emit_ptrgep(ctx, inline_roots_ptr, i * sizeof(jl_value_t*)), Align(sizeof(void*)), isVolatile); + roots_ai.decorateInst(ptr); + gcroots[i] = ptr; + } + return gcroots; +} + +// inlined bool indicates whether this must return the inlined roots inside x separately, or whether x itself may be used as the root (if x is already isboxed) +static llvm::SmallVector get_gc_roots_for(jl_codectx_t &ctx, const jl_cgval_t &x, bool inlined=false) { if (x.constant || x.typ == jl_bottom_type) return {}; - if (x.Vboxed) // superset of x.isboxed + if (!inlined && x.Vboxed) // superset of x.isboxed return {x.Vboxed}; - assert(!x.isboxed); - if (x.ispointer()) { + assert(!x.isboxed || !inlined); + if (!x.inline_roots.empty()) { + // if (!inlined) { // TODO: implement this filter operation + // SmallVector perm_offsets; + // find_perm_offsets(typ, perm_offsets, 0); + // return filter(!in(perm_offsets), x.inline_roots) + // } + return x.inline_roots; + } + if (!inlined && x.ispointer()) { assert(x.V); assert(x.V->getType()->getPointerAddressSpace() != AddressSpace::Tracked); return {x.V}; @@ -363,8 +388,7 @@ static llvm::SmallVector get_gc_roots_for(jl_codectx_t &ctx, co Type *T = julia_type_to_llvm(ctx, jltype); Value *agg = emit_unbox(ctx, T, x, jltype); SmallVector perm_offsets; - if (jltype && jl_is_datatype(jltype) && ((jl_datatype_t*)jltype)->layout) - find_perm_offsets((jl_datatype_t*)jltype, perm_offsets, 0); + find_perm_offsets((jl_datatype_t*)jltype, perm_offsets, 0); return ExtractTrackedValues(agg, agg->getType(), false, ctx.builder, perm_offsets); } // nothing here to root, move along @@ -1078,6 +1102,247 @@ static void emit_memcpy(jl_codectx_t &ctx, Value *dst, jl_aliasinfo_t const &dst emit_memcpy_llvm(ctx, dst, dst_ai, data_pointer(ctx, src), src_ai, sz, align_dst, align_src, is_volatile); } +static bool allpointers(jl_datatype_t *typ) +{ + return jl_datatype_size(typ) == typ->layout->npointers * sizeof(void*); +} + +// compute the space required by split_value_into, by simulating it +// returns (sizeof(split_value), n_pointers) +static std::pair split_value_size(jl_datatype_t *typ) +{ + assert(jl_is_datatype(typ)); + size_t dst_off = 0; + bool hasptr = typ->layout->first_ptr >= 0; + size_t npointers = hasptr ? typ->layout->npointers : 0; + // drop the data pointer if the entire structure is just pointers + // TODO: eventually we could drop the slots for the pointers from inside the + // types to pack it together, but this can change the alignment of the bits + // in the fields inside, even if those bits have no pointers themselves. So + // we would actually need to compute, for each pointer, whether any + // subsequent field needed the extra alignment (for example, we can + // drop space for any runs of two/four pointer). Some of these + // functions are already written in a way to support that, but not + // fully implemented yet. + bool nodata = allpointers(typ); + if (nodata) + dst_off = 0; + else + dst_off = jl_datatype_size(typ); + return std::make_pair(dst_off, npointers); +} + +// take a value `x` and split its bits into dst and the roots into inline_roots +static void split_value_into(jl_codectx_t &ctx, const jl_cgval_t &x, Align align_src, Value *dst, Align align_dst, jl_aliasinfo_t const &dst_ai, Value *inline_roots_ptr, jl_aliasinfo_t const &roots_ai, bool isVolatileStore=false) +{ + jl_datatype_t *typ = (jl_datatype_t*)x.typ; + assert(jl_is_concrete_type(x.typ)); + auto src_ai = jl_aliasinfo_t::fromTBAA(ctx, x.tbaa); + Type *T_prjlvalue = ctx.types().T_prjlvalue; + if (!x.inline_roots.empty()) { + auto sizes = split_value_size(typ); + if (sizes.first > 0) + emit_memcpy(ctx, dst, dst_ai, x.V, src_ai, sizes.first, align_dst, align_src, isVolatileStore); + for (size_t i = 0; i < sizes.second; i++) { + Value *unbox = x.inline_roots[i]; + roots_ai.decorateInst(ctx.builder.CreateAlignedStore(unbox, emit_ptrgep(ctx, inline_roots_ptr, i * sizeof(void*)), Align(sizeof(void*)), isVolatileStore)); + } + return; + } + if (inline_roots_ptr == nullptr) { + emit_unbox_store(ctx, x, dst, ctx.tbaa().tbaa_stack, align_dst, isVolatileStore); + return; + } + Value *src = data_pointer(ctx, value_to_pointer(ctx, x)); + bool isstack = isa(src->stripInBoundsOffsets()) || src_ai.tbaa == ctx.tbaa().tbaa_stack; + size_t dst_off = 0; + size_t src_off = 0; + bool hasptr = typ->layout->first_ptr >= 0; + size_t npointers = hasptr ? typ->layout->npointers : 0; + bool nodata = allpointers(typ); + for (size_t i = 0; true; i++) { + bool last = i == npointers; + size_t ptr = last ? jl_datatype_size(typ) : (jl_ptr_offset(typ, i) * sizeof(void*)); + if (ptr > src_off) { + emit_memcpy(ctx, + emit_ptrgep(ctx, dst, dst_off), + dst_ai, + emit_ptrgep(ctx, src, src_off), + src_ai, + ptr - src_off, + align_dst, + align_src, + isVolatileStore); + dst_off += ptr - src_off; + } + if (last) + break; + auto *load = ctx.builder.CreateAlignedLoad(T_prjlvalue, emit_ptrgep(ctx, src, ptr), Align(sizeof(void*))); + if (!isstack) + load->setOrdering(AtomicOrdering::Unordered); + src_ai.decorateInst(load); + roots_ai.decorateInst(ctx.builder.CreateAlignedStore(load, emit_ptrgep(ctx, inline_roots_ptr, i * sizeof(void*)), Align(sizeof(void*)), isVolatileStore)); + align_src = align_dst = Align(sizeof(void*)); + src_off = ptr + sizeof(void*); + if (!nodata) { + // store an undef pointer here, to make sure nobody looks at this + dst_ai.decorateInst(ctx.builder.CreateAlignedStore( + ctx.builder.getIntN(sizeof(void*) * 8, (uint64_t)-1), + emit_ptrgep(ctx, dst, dst_off), + align_src, + isVolatileStore)); + dst_off += sizeof(void*); + assert(dst_off == src_off); + } + } +} + +static void split_value_into(jl_codectx_t &ctx, const jl_cgval_t &x, Align align_src, Value *dst, Align align_dst, jl_aliasinfo_t const &dst_ai, MutableArrayRef inline_roots) +{ + jl_datatype_t *typ = (jl_datatype_t*)x.typ; + assert(jl_is_concrete_type(x.typ)); + auto src_ai = jl_aliasinfo_t::fromTBAA(ctx, x.tbaa); + Type *T_prjlvalue = ctx.types().T_prjlvalue; + if (!x.inline_roots.empty()) { + auto sizes = split_value_size(typ); + if (sizes.first > 0) + emit_memcpy(ctx, dst, dst_ai, x.V, src_ai, sizes.first, align_dst, align_src); + for (size_t i = 0; i < sizes.second; i++) + inline_roots[i] = x.inline_roots[i]; + return; + } + if (inline_roots.empty()) { + emit_unbox_store(ctx, x, dst, ctx.tbaa().tbaa_stack, align_dst); + return; + } + Value *src = data_pointer(ctx, value_to_pointer(ctx, x)); + bool isstack = isa(src->stripInBoundsOffsets()) || src_ai.tbaa == ctx.tbaa().tbaa_stack; + size_t dst_off = 0; + size_t src_off = 0; + bool hasptr = typ->layout->first_ptr >= 0; + size_t npointers = hasptr ? typ->layout->npointers : 0; + bool nodata = allpointers(typ); + for (size_t i = 0; true; i++) { + bool last = i == npointers; + size_t ptr = last ? jl_datatype_size(typ) : (jl_ptr_offset(typ, i) * sizeof(void*)); + if (ptr > src_off) { + emit_memcpy(ctx, + emit_ptrgep(ctx, dst, dst_off), + dst_ai, + emit_ptrgep(ctx, src, src_off), + src_ai, + ptr - src_off, + align_dst, + align_src); + dst_off += ptr - src_off; + } + if (last) + break; + auto *load = ctx.builder.CreateAlignedLoad(T_prjlvalue, emit_ptrgep(ctx, src, ptr), Align(sizeof(void*))); + if (!isstack) + load->setOrdering(AtomicOrdering::Unordered); + src_ai.decorateInst(load); + inline_roots[i] = load; + align_src = align_dst = Align(sizeof(void*)); + src_off = ptr + sizeof(void*); + if (!nodata) { + // store an undef pointer here, to make sure nobody looks at this + dst_ai.decorateInst(ctx.builder.CreateAlignedStore( + ctx.builder.getIntN(sizeof(void*) * 8, (uint64_t)-1), + emit_ptrgep(ctx, dst, dst_off), + align_src)); + dst_off += sizeof(void*); + assert(dst_off == src_off); + } + } +} + +static std::pair> split_value(jl_codectx_t &ctx, const jl_cgval_t &x, Align x_alignment) +{ + jl_datatype_t *typ = (jl_datatype_t*)x.typ; + auto sizes = split_value_size(typ); + Align align_dst(julia_alignment((jl_value_t*)typ)); + AllocaInst *bits = sizes.first > 0 ? emit_static_alloca(ctx, sizes.first, align_dst) : nullptr; + SmallVector roots(sizes.second); + auto stack_ai = jl_aliasinfo_t::fromTBAA(ctx, ctx.tbaa().tbaa_stack); + split_value_into(ctx, x, x_alignment, bits, align_dst, stack_ai, MutableArrayRef(roots)); + return std::make_pair(bits, roots); +} + +// Return the offset values corresponding to jl_field_offset, but into the two buffers for a split value (or -1) +static std::pair split_value_field(jl_datatype_t *typ, unsigned idx) +{ + size_t fldoff = jl_field_offset(typ, idx); + size_t src_off = 0; + size_t dst_off = 0; + assert(typ->layout->first_ptr >= 0); + size_t npointers = typ->layout->npointers; + bool nodata = allpointers(typ); + for (size_t i = 0; i < npointers; i++) { + size_t ptr = jl_ptr_offset(typ, i) * sizeof(void*); + if (ptr >= fldoff) { + if (ptr >= fldoff + jl_field_size(typ, idx)) + break; + bool onlyptr = jl_field_isptr(typ, idx) || allpointers((jl_datatype_t*)jl_field_type(typ, idx)); + return std::make_pair(onlyptr ? -1 : dst_off + fldoff - src_off, i); + } + dst_off += ptr - src_off; + src_off = ptr + sizeof(void*); + if (!nodata) { + assert(dst_off + sizeof(void*) == src_off); + dst_off = src_off; + } + } + return std::make_pair(dst_off + fldoff - src_off, -1); +} + +// Copy `x` to `dst`, where `x` was a split value and dst needs to have a native layout, copying any inlined roots back into their native location. +// This does not respect roots, so you must call emit_write_multibarrier afterwards. +static void recombine_value(jl_codectx_t &ctx, const jl_cgval_t &x, Value *dst, jl_aliasinfo_t const &dst_ai, Align alignment, bool isVolatileStore) +{ + jl_datatype_t *typ = (jl_datatype_t*)x.typ; + assert(jl_is_concrete_type(x.typ)); + assert(typ->layout->first_ptr >= 0 && !x.inline_roots.empty()); + Align align_dst = alignment; + Align align_src(julia_alignment(x.typ)); + Value *src = x.V; + auto src_ai = jl_aliasinfo_t::fromTBAA(ctx, x.tbaa); + size_t dst_off = 0; + size_t src_off = 0; + size_t npointers = typ->layout->npointers; + bool nodata = allpointers(typ); + bool isstack = isa(dst->stripInBoundsOffsets()) || dst_ai.tbaa == ctx.tbaa().tbaa_stack; + for (size_t i = 0; true; i++) { + bool last = i == npointers; + size_t ptr = last ? jl_datatype_size(typ) : (jl_ptr_offset(typ, i) * sizeof(void*)); + if (ptr > dst_off) { + emit_memcpy(ctx, + emit_ptrgep(ctx, dst, dst_off), + dst_ai, + emit_ptrgep(ctx, src, src_off), + src_ai, + ptr - dst_off, + align_dst, + align_src, + isVolatileStore); + src_off += ptr - dst_off; + } + if (last) + break; + auto *root = x.inline_roots[i]; + auto *store = ctx.builder.CreateAlignedStore(root, emit_ptrgep(ctx, dst, ptr), Align(sizeof(void*)), isVolatileStore); + if (!isstack) + store->setOrdering(AtomicOrdering::Unordered); + dst_ai.decorateInst(store); + align_dst = align_src = Align(sizeof(void*)); + dst_off = ptr + sizeof(void*); + if (!nodata) { + assert(src_off + sizeof(void*) == dst_off); + src_off = dst_off; + } + } +} + static Value *emit_tagfrom(jl_codectx_t &ctx, jl_datatype_t *dt) { if (dt->smalltag) @@ -1421,15 +1686,23 @@ static void null_load_check(jl_codectx_t &ctx, Value *v, jl_module_t *scope, jl_ } template -static Value *emit_guarded_test(jl_codectx_t &ctx, Value *ifnot, Value *defval, Func &&func) +static void emit_guarded_test(jl_codectx_t &ctx, Value *ifnot, MutableArrayRef defval, Func &&func) { - if (!ifnot) { - return func(); + if (ifnot == nullptr) { + auto res = func(); + assert(res.size() == defval.size()); + for (size_t i = 0; i < defval.size(); i++) + defval[i] = res[i]; + return; } if (auto Cond = dyn_cast(ifnot)) { if (Cond->isZero()) - return defval; - return func(); + return; + auto res = func(); + assert(res.size() == defval.size()); + for (size_t i = 0; i < defval.size(); i++) + defval[i] = res[i]; + return; } ++EmittedGuards; BasicBlock *currBB = ctx.builder.GetInsertBlock(); @@ -1438,16 +1711,33 @@ static Value *emit_guarded_test(jl_codectx_t &ctx, Value *ifnot, Value *defval, ctx.builder.CreateCondBr(ifnot, passBB, exitBB); ctx.builder.SetInsertPoint(passBB); auto res = func(); + assert(res.size() == defval.size()); passBB = ctx.builder.GetInsertBlock(); ctx.builder.CreateBr(exitBB); ctx.builder.SetInsertPoint(exitBB); - if (defval == nullptr) + for (size_t i = 0; i < defval.size(); i++) { + PHINode *phi = ctx.builder.CreatePHI(defval[i]->getType(), 2); + phi->addIncoming(defval[i], currBB); + phi->addIncoming(res[i], passBB); + setName(ctx.emission_context, phi, "guard_res"); + defval[i] = phi; + } +} + +template +static Value *emit_guarded_test(jl_codectx_t &ctx, Value *ifnot, Value *defval, Func &&func) +{ + MutableArrayRef res(&defval, defval == nullptr ? 0 : 1); + auto funcwrap = [&func] () -> SmallVector { + auto res = func(); + if (res == nullptr) + return {}; + return {res}; + }; + emit_guarded_test(ctx, ifnot, res, funcwrap); + if (res.empty()) return nullptr; - PHINode *phi = ctx.builder.CreatePHI(defval->getType(), 2); - phi->addIncoming(defval, currBB); - phi->addIncoming(res, passBB); - setName(ctx.emission_context, phi, "guard_res"); - return phi; + return res[0]; } template @@ -1755,7 +2045,7 @@ static std::pair emit_isa(jl_codectx_t &ctx, const jl_cgval_t &x, // declare that the pointer is legal (for zero bytes) even though it might be undef. static Value *emit_isa_and_defined(jl_codectx_t &ctx, const jl_cgval_t &val, jl_value_t *typ) { - return emit_nullcheck_guard(ctx, val.ispointer() ? val.V : nullptr, [&] { + return emit_nullcheck_guard(ctx, val.inline_roots.empty() && val.ispointer() ? val.V : nullptr, [&] { return emit_isa(ctx, val, typ, Twine()).first; }); } @@ -1838,6 +2128,9 @@ static Value *emit_bounds_check(jl_codectx_t &ctx, const jl_cgval_t &ainfo, jl_v if (ainfo.isghost) { a = Constant::getNullValue(getPointerTy(ctx.builder.getContext())); } + else if (!ainfo.inline_roots.empty()) { + a = value_to_pointer(ctx, ainfo).V; + } else if (!ainfo.ispointer()) { // CreateAlloca is OK here since we are on an error branch Value *tempSpace = ctx.builder.CreateAlloca(a->getType()); @@ -1869,6 +2162,7 @@ static Value *CreateSimplifiedExtractValue(jl_codectx_t &ctx, Value *Agg, ArrayR static void emit_write_barrier(jl_codectx_t&, Value*, ArrayRef); static void emit_write_barrier(jl_codectx_t&, Value*, Value*); static void emit_write_multibarrier(jl_codectx_t&, Value*, Value*, jl_value_t*); +static void emit_write_multibarrier(jl_codectx_t &ctx, Value *parent, const jl_cgval_t &x); SmallVector first_ptr(Type *T) { @@ -1930,7 +2224,6 @@ static jl_cgval_t typed_load(jl_codectx_t &ctx, Value *ptr, Value *idx_0based, j bool maybe_null_if_boxed = true, unsigned alignment = 0, Value **nullcheck = nullptr) { - // TODO: we should use unordered loads for anything with CountTrackedPointers(elty).count > 0 (if not otherwise locked) Type *elty = isboxed ? ctx.types().T_prjlvalue : julia_type_to_llvm(ctx, jltype); if (type_is_ghost(elty)) { if (isStrongerThanMonotonic(Order)) @@ -1941,74 +2234,71 @@ static jl_cgval_t typed_load(jl_codectx_t &ctx, Value *ptr, Value *idx_0based, j alignment = sizeof(void*); else if (!alignment) alignment = julia_alignment(jltype); + if (idx_0based) + ptr = ctx.builder.CreateInBoundsGEP(elty, ptr, idx_0based); unsigned nb = isboxed ? sizeof(void*) : jl_datatype_size(jltype); // note that nb == jl_Module->getDataLayout().getTypeAllocSize(elty) or getTypeStoreSize, depending on whether it is a struct or primitive type AllocaInst *intcast = NULL; - if (Order == AtomicOrdering::NotAtomic) { - if (!isboxed && !aliasscope && elty->isAggregateType() && !CountTrackedPointers(elty).count) { - intcast = emit_static_alloca(ctx, elty, Align(alignment)); - setName(ctx.emission_context, intcast, "aggregate_load_box"); + if (Order == AtomicOrdering::NotAtomic && !isboxed && !aliasscope && elty->isAggregateType() && !jl_is_genericmemoryref_type(jltype)) { + // use split_value to do this load + auto src = mark_julia_slot(ptr, jltype, NULL, tbaa); + auto copy = split_value(ctx, src, Align(alignment)); + if (maybe_null_if_boxed && !copy.second.empty()) { + null_pointer_check(ctx, copy.second[0], nullcheck); } + return mark_julia_slot(copy.first, jltype, NULL, ctx.tbaa().tbaa_stack, copy.second); } - else { + Type *realelty = elty; + if (Order != AtomicOrdering::NotAtomic) { if (!isboxed && !elty->isIntOrPtrTy()) { intcast = emit_static_alloca(ctx, elty, Align(alignment)); setName(ctx.emission_context, intcast, "atomic_load_box"); - elty = Type::getIntNTy(ctx.builder.getContext(), 8 * nb); + realelty = elty = Type::getIntNTy(ctx.builder.getContext(), 8 * nb); + } + if (isa(elty)) { + unsigned nb2 = PowerOf2Ceil(nb); + if (nb != nb2) + elty = Type::getIntNTy(ctx.builder.getContext(), 8 * nb2); } } - Type *realelty = elty; - if (Order != AtomicOrdering::NotAtomic && isa(elty)) { - unsigned nb2 = PowerOf2Ceil(nb); - if (nb != nb2) - elty = Type::getIntNTy(ctx.builder.getContext(), 8 * nb2); - } - Value *data = ptr; - if (idx_0based) - data = ctx.builder.CreateInBoundsGEP(elty, data, idx_0based); Value *instr = nullptr; - if (intcast && Order == AtomicOrdering::NotAtomic) { - emit_memcpy(ctx, intcast, jl_aliasinfo_t::fromTBAA(ctx, ctx.tbaa().tbaa_stack), data, jl_aliasinfo_t::fromTBAA(ctx, tbaa), nb, Align(alignment), intcast->getAlign()); + if (!isboxed && jl_is_genericmemoryref_type(jltype)) { + // load these FCA as individual fields, so LLVM does not need to split them later + Value *fld0 = ctx.builder.CreateStructGEP(elty, ptr, 0); + LoadInst *load0 = ctx.builder.CreateAlignedLoad(elty->getStructElementType(0), fld0, Align(alignment), false); + load0->setOrdering(Order); + jl_aliasinfo_t ai = jl_aliasinfo_t::fromTBAA(ctx, tbaa); + ai.scope = MDNode::concatenate(aliasscope, ai.scope); + ai.decorateInst(load0); + Value *fld1 = ctx.builder.CreateStructGEP(elty, ptr, 1); + LoadInst *load1 = ctx.builder.CreateAlignedLoad(elty->getStructElementType(1), fld1, Align(alignment), false); + static_assert(offsetof(jl_genericmemoryref_t, ptr_or_offset) == 0, "wrong field order"); + maybe_mark_load_dereferenceable(load1, true, sizeof(void*)*2, alignof(void*)); + load1->setOrdering(Order); + ai.decorateInst(load1); + instr = Constant::getNullValue(elty); + instr = ctx.builder.CreateInsertValue(instr, load0, 0); + instr = ctx.builder.CreateInsertValue(instr, load1, 1); } else { - if (!isboxed && jl_is_genericmemoryref_type(jltype)) { - // load these FCA as individual fields, so LLVM does not need to split them later - Value *fld0 = ctx.builder.CreateStructGEP(elty, data, 0); - LoadInst *load0 = ctx.builder.CreateAlignedLoad(elty->getStructElementType(0), fld0, Align(alignment), false); - load0->setOrdering(Order); - jl_aliasinfo_t ai = jl_aliasinfo_t::fromTBAA(ctx, tbaa); - ai.scope = MDNode::concatenate(aliasscope, ai.scope); - ai.decorateInst(load0); - Value *fld1 = ctx.builder.CreateStructGEP(elty, data, 1); - LoadInst *load1 = ctx.builder.CreateAlignedLoad(elty->getStructElementType(1), fld1, Align(alignment), false); - static_assert(offsetof(jl_genericmemoryref_t, ptr_or_offset) == 0, "wrong field order"); - maybe_mark_load_dereferenceable(load1, true, sizeof(void*)*2, alignof(void*)); - load1->setOrdering(Order); - ai.decorateInst(load1); - instr = Constant::getNullValue(elty); - instr = ctx.builder.CreateInsertValue(instr, load0, 0); - instr = ctx.builder.CreateInsertValue(instr, load1, 1); - } - else { - LoadInst *load = ctx.builder.CreateAlignedLoad(elty, data, Align(alignment), false); - load->setOrdering(Order); - if (isboxed) - maybe_mark_load_dereferenceable(load, true, jltype); - jl_aliasinfo_t ai = jl_aliasinfo_t::fromTBAA(ctx, tbaa); - ai.scope = MDNode::concatenate(aliasscope, ai.scope); - ai.decorateInst(load); - instr = load; - } - if (elty != realelty) - instr = ctx.builder.CreateTrunc(instr, realelty); - if (intcast) { - ctx.builder.CreateStore(instr, intcast); - instr = nullptr; - } + LoadInst *load = ctx.builder.CreateAlignedLoad(elty, ptr, Align(alignment), false); + load->setOrdering(Order); + if (isboxed) + maybe_mark_load_dereferenceable(load, true, jltype); + jl_aliasinfo_t ai = jl_aliasinfo_t::fromTBAA(ctx, tbaa); + ai.scope = MDNode::concatenate(aliasscope, ai.scope); + ai.decorateInst(load); + instr = load; + } + if (elty != realelty) + instr = ctx.builder.CreateTrunc(instr, realelty); + if (intcast) { + ctx.builder.CreateAlignedStore(instr, intcast, Align(alignment)); + instr = nullptr; } if (maybe_null_if_boxed) { if (intcast) - instr = ctx.builder.CreateLoad(intcast->getAllocatedType(), intcast); + instr = ctx.builder.CreateAlignedLoad(intcast->getAllocatedType(), intcast, Align(alignment)); Value *first_ptr = isboxed ? instr : extract_first_ptr(ctx, instr); if (first_ptr) null_pointer_check(ctx, first_ptr, nullcheck); @@ -2021,7 +2311,7 @@ static jl_cgval_t typed_load(jl_codectx_t &ctx, Value *ptr, Value *idx_0based, j // ConstantAsMetadata::get(ConstantInt::get(T_int8, 0)), // ConstantAsMetadata::get(ConstantInt::get(T_int8, 2)) })); if (intcast) - instr = ctx.builder.CreateLoad(intcast->getAllocatedType(), intcast); + instr = ctx.builder.CreateAlignedLoad(intcast->getAllocatedType(), intcast, Align(alignment)); instr = ctx.builder.CreateTrunc(instr, getInt1Ty(ctx.builder.getContext())); } if (instr) @@ -2119,7 +2409,7 @@ static jl_cgval_t typed_store(jl_codectx_t &ctx, emit_unbox_store(ctx, rhs, intcast, ctx.tbaa().tbaa_stack, intcast->getAlign()); r = ctx.builder.CreateLoad(realelty, intcast); } - else if (aliasscope || Order != AtomicOrdering::NotAtomic || tracked_pointers) { + else if (aliasscope || Order != AtomicOrdering::NotAtomic || (tracked_pointers && rhs.inline_roots.empty())) { r = emit_unbox(ctx, realelty, rhs, jltype); } if (realelty != elty) @@ -2279,8 +2569,7 @@ static jl_cgval_t typed_store(jl_codectx_t &ctx, if (!tracked_pointers) // oldval is a slot, so put the oldval back ctx.builder.CreateStore(realCompare, intcast); } - else if (Order != AtomicOrdering::NotAtomic) { - assert(!tracked_pointers); + else if (Order != AtomicOrdering::NotAtomic || (tracked_pointers && rhs.inline_roots.empty())) { r = emit_unbox(ctx, realelty, rhs, jltype); } if (realelty != elty) @@ -2393,23 +2682,30 @@ static jl_cgval_t typed_store(jl_codectx_t &ctx, ctx.builder.SetInsertPoint(DoneBB); if (needlock) emit_lockstate_value(ctx, needlock, false); - if (parent != NULL && r && tracked_pointers && (!isboxed || !type_is_permalloc(rhs.typ))) { + if (parent != NULL && tracked_pointers && (!isboxed || !type_is_permalloc(rhs.typ))) { if (isreplacefield || issetfieldonce) { BasicBlock *BB = BasicBlock::Create(ctx.builder.getContext(), "xchg_wb", ctx.f); DoneBB = BasicBlock::Create(ctx.builder.getContext(), "done_xchg_wb", ctx.f); ctx.builder.CreateCondBr(Success, BB, DoneBB); ctx.builder.SetInsertPoint(BB); } - if (realelty != elty) - r = ctx.builder.Insert(CastInst::Create(Instruction::Trunc, r, realelty)); - if (intcast) { - ctx.builder.CreateStore(r, intcast); - r = ctx.builder.CreateLoad(intcast_eltyp, intcast); + if (r) { + if (realelty != elty) + r = ctx.builder.Insert(CastInst::Create(Instruction::Trunc, r, realelty)); + if (intcast) { + ctx.builder.CreateStore(r, intcast); + r = ctx.builder.CreateLoad(intcast_eltyp, intcast); + } + if (!isboxed) + emit_write_multibarrier(ctx, parent, r, rhs.typ); + else + emit_write_barrier(ctx, parent, r); + } + else { + assert(!isboxed); + assert(!rhs.inline_roots.empty()); + emit_write_multibarrier(ctx, parent, rhs); } - if (!isboxed) - emit_write_multibarrier(ctx, parent, r, rhs.typ); - else if (!type_is_permalloc(rhs.typ)) - emit_write_barrier(ctx, parent, r); if (isreplacefield || issetfieldonce) { ctx.builder.CreateBr(DoneBB); ctx.builder.SetInsertPoint(DoneBB); @@ -2524,7 +2820,7 @@ static bool emit_getfield_unknownidx(jl_codectx_t &ctx, } assert(!jl_is_vecelement_type((jl_value_t*)stt)); - if (!strct.ispointer()) { // unboxed + if (strct.inline_roots.empty() && !strct.ispointer()) { // unboxed assert(jl_is_concrete_immutable((jl_value_t*)stt)); bool isboxed = is_datatype_all_pointers(stt); jl_svec_t *types = stt->types; @@ -2580,7 +2876,8 @@ static bool emit_getfield_unknownidx(jl_codectx_t &ctx, } bool maybeatomic = stt->name->atomicfields != NULL; - if (strct.ispointer() && !maybeatomic) { // boxed or stack + if ((strct.inline_roots.empty() && strct.ispointer()) && !maybeatomic) { // boxed or stack + // COMBAK: inline_roots support could be implemented for this if (order != jl_memory_order_notatomic && order != jl_memory_order_unspecified) { emit_atomic_error(ctx, "getfield: non-atomic field cannot be accessed atomically"); *ret = jl_cgval_t(); // unreachable @@ -2656,8 +2953,7 @@ static jl_cgval_t emit_unionload(jl_codectx_t &ctx, Value *addr, Value *ptindex, Value *tindex = ctx.builder.CreateNUWAdd(ConstantInt::get(getInt8Ty(ctx.builder.getContext()), 1), tindex0); if (fsz > 0 && mutabl) { // move value to an immutable stack slot (excluding tindex) - Type *AT = ArrayType::get(IntegerType::get(ctx.builder.getContext(), 8 * al), (fsz + al - 1) / al); - AllocaInst *lv = emit_static_alloca(ctx, AT, Align(al)); + AllocaInst *lv = emit_static_alloca(ctx, fsz, Align(al)); setName(ctx.emission_context, lv, "immutable_union"); jl_aliasinfo_t ai = jl_aliasinfo_t::fromTBAA(ctx, tbaa); emit_memcpy(ctx, lv, ai, addr, ai, fsz, Align(al), Align(al)); @@ -2825,7 +3121,41 @@ static jl_cgval_t emit_getfield_knownidx(jl_codectx_t &ctx, const jl_cgval_t &st } bool maybe_null = field_may_be_null(strct, jt, idx); size_t byte_offset = jl_field_offset(jt, idx); - if (strct.ispointer()) { + if (!strct.inline_roots.empty()) { + assert(!isatomic && !needlock); + auto tbaa = best_field_tbaa(ctx, strct, jt, idx, byte_offset); + auto offsets = split_value_field(jt, idx); + bool hasptr = offsets.second >= 0; + assert(hasptr == jl_field_isptr(jt, idx) || jl_type_hasptr(jfty)); + ArrayRef roots; + if (hasptr) { + roots = ArrayRef(strct.inline_roots).slice(offsets.second, jl_field_isptr(jt, idx) ? 1 : ((jl_datatype_t*)jfty)->layout->npointers); + if (maybe_null) + null_pointer_check(ctx, roots[0], nullcheck); + } + if (jl_field_isptr(jt, idx)) { + return mark_julia_type(ctx, roots[0], true, jfty); + } + Value *addr = offsets.first < 0 ? nullptr : offsets.first == 0 ? strct.V : emit_ptrgep(ctx, strct.V, offsets.first); + if (jl_is_uniontype(jfty)) { + size_t fsz = 0, al = 0; + int union_max = jl_islayout_inline(jfty, &fsz, &al); + size_t fsz1 = jl_field_size(jt, idx) - 1; + bool isptr = (union_max == 0); + assert(!isptr && fsz < jl_field_size(jt, idx)); (void)isptr; + Value *ptindex = emit_ptrgep(ctx, addr, fsz1); + return emit_unionload(ctx, addr, ptindex, jfty, fsz, al, tbaa, false, union_max, strct.tbaa); + } + else if (jfty == (jl_value_t*)jl_bool_type) { + unsigned align = jl_field_align(jt, idx); + return typed_load(ctx, addr, NULL, jfty, tbaa, nullptr, false, + AtomicOrdering::NotAtomic, maybe_null, align, nullcheck); + } + else { + return mark_julia_slot(addr, jfty, nullptr, tbaa, roots); + } + } + else if (strct.ispointer()) { auto tbaa = best_field_tbaa(ctx, strct, jt, idx, byte_offset); Value *staddr = data_pointer(ctx, strct); Value *addr; @@ -2901,8 +3231,7 @@ static jl_cgval_t emit_getfield_knownidx(jl_codectx_t &ctx, const jl_cgval_t &st unsigned st_idx = convert_struct_offset(ctx, T, byte_offset); IntegerType *ET = cast(T->getStructElementType(st_idx)); unsigned align = (ET->getBitWidth() + 7) / 8; - lv = emit_static_alloca(ctx, ET, Align(align)); - lv->setOperand(0, ConstantInt::get(getInt32Ty(ctx.builder.getContext()), (fsz + align - 1) / align)); + lv = emit_static_alloca(ctx, fsz, Align(align)); // emit all of the align-sized words unsigned i = 0; for (; i < fsz / align; i++) { @@ -3079,16 +3408,12 @@ static void init_bits_value(jl_codectx_t &ctx, Value *newv, Value *v, MDNode *tb ai.decorateInst(ctx.builder.CreateAlignedStore(v, newv, alignment)); } -static void init_bits_cgval(jl_codectx_t &ctx, Value *newv, const jl_cgval_t& v, MDNode *tbaa) +static void init_bits_cgval(jl_codectx_t &ctx, Value *newv, const jl_cgval_t &v) { - // newv should already be tagged - if (v.ispointer()) { - unsigned align = std::max(julia_alignment(v.typ), (unsigned)sizeof(void*)); - emit_memcpy(ctx, newv, jl_aliasinfo_t::fromTBAA(ctx, tbaa), v, jl_datatype_size(v.typ), Align(align), Align(julia_alignment(v.typ))); - } - else { - init_bits_value(ctx, newv, v.V, tbaa); - } + MDNode *tbaa = jl_is_mutable(v.typ) ? ctx.tbaa().tbaa_mutab : ctx.tbaa().tbaa_immut; + Align newv_align{std::max(julia_alignment(v.typ), (unsigned)sizeof(void*))}; + newv = maybe_decay_tracked(ctx, newv); + emit_unbox_store(ctx, v, newv, tbaa, newv_align); } static jl_value_t *static_constant_instance(const llvm::DataLayout &DL, Constant *constant, jl_value_t *jt) @@ -3205,7 +3530,7 @@ static Value *_boxed_special(jl_codectx_t &ctx, const jl_cgval_t &vinfo, Type *t if (t == getInt1Ty(ctx.builder.getContext())) return track_pjlvalue(ctx, julia_bool(ctx, as_value(ctx, t, vinfo))); - if (ctx.linfo && jl_is_method(ctx.linfo->def.method) && !vinfo.ispointer()) { // don't bother codegen pre-boxing for toplevel + if (ctx.linfo && jl_is_method(ctx.linfo->def.method) && vinfo.inline_roots.empty() && !vinfo.ispointer()) { // don't bother codegen pre-boxing for toplevel if (Constant *c = dyn_cast(vinfo.V)) { jl_value_t *s = static_constant_instance(jl_Module->getDataLayout(), c, jt); if (s) { @@ -3320,9 +3645,8 @@ static AllocaInst *try_emit_union_alloca(jl_codectx_t &ctx, jl_uniontype_t *ut, union_alloca_type(ut, allunbox, nbytes, align, min_align); if (nbytes > 0) { // at least some of the values can live on the stack - // try to pick an Integer type size such that SROA will emit reasonable code - Type *AT = ArrayType::get(IntegerType::get(ctx.builder.getContext(), 8 * min_align), (nbytes + min_align - 1) / min_align); - AllocaInst *lv = emit_static_alloca(ctx, AT, Align(align)); + assert(align % min_align == 0); + AllocaInst *lv = emit_static_alloca(ctx, nbytes, Align(align)); setName(ctx.emission_context, lv, "unionalloca"); return lv; } @@ -3379,7 +3703,7 @@ static Value *box_union(jl_codectx_t &ctx, const jl_cgval_t &vinfo, const SmallB if (!box) { box = emit_allocobj(ctx, jt, true); setName(ctx.emission_context, box, "unionbox"); - init_bits_cgval(ctx, box, vinfo_r, jl_is_mutable(jt) ? ctx.tbaa().tbaa_mutab : ctx.tbaa().tbaa_immut); + init_bits_cgval(ctx, box, vinfo_r); } } tempBB = ctx.builder.GetInsertBlock(); // could have changed @@ -3502,14 +3826,14 @@ static Value *boxed(jl_codectx_t &ctx, const jl_cgval_t &vinfo, bool is_promotab box = box_union(ctx, vinfo, skip_none); } else { - assert(vinfo.V && "Missing data for unboxed value."); + assert((vinfo.V || !vinfo.inline_roots.empty()) && "Missing data for unboxed value."); assert(jl_is_concrete_immutable(jt) && "This type shouldn't have been unboxed."); Type *t = julia_type_to_llvm(ctx, jt); assert(!type_is_ghost(t)); // ghost values should have been handled by vinfo.constant above! box = _boxed_special(ctx, vinfo, t); if (!box) { bool do_promote = vinfo.promotion_point; - if (do_promote && is_promotable) { + if (do_promote && is_promotable && vinfo.inline_roots.empty()) { auto IP = ctx.builder.saveIP(); ctx.builder.SetInsertPoint(vinfo.promotion_point); box = emit_allocobj(ctx, (jl_datatype_t*)jt, true); @@ -3523,13 +3847,14 @@ static Value *boxed(jl_codectx_t &ctx, const jl_cgval_t &vinfo, bool is_promotab // end illegal IR originalAlloca->eraseFromParent(); ctx.builder.restoreIP(IP); - } else { + } + else { auto arg_typename = [&] JL_NOTSAFEPOINT { return "box::" + std::string(jl_symbol_name(((jl_datatype_t*)(jt))->name->name)); }; box = emit_allocobj(ctx, (jl_datatype_t*)jt, true); setName(ctx.emission_context, box, arg_typename); - init_bits_cgval(ctx, box, vinfo, jl_is_mutable(jt) ? ctx.tbaa().tbaa_mutab : ctx.tbaa().tbaa_immut); + init_bits_cgval(ctx, box, vinfo); } } } @@ -3542,30 +3867,25 @@ static void emit_unionmove(jl_codectx_t &ctx, Value *dest, MDNode *tbaa_dst, con if (AllocaInst *ai = dyn_cast(dest)) // TODO: make this a lifetime_end & dereferenceable annotation? ctx.builder.CreateAlignedStore(UndefValue::get(ai->getAllocatedType()), ai, ai->getAlign()); - if (jl_is_concrete_type(src.typ) || src.constant) { - jl_value_t *typ = src.constant ? jl_typeof(src.constant) : src.typ; + if (src.constant) { + jl_value_t *typ = jl_typeof(src.constant); assert(skip || jl_is_pointerfree(typ)); if (jl_is_pointerfree(typ)) { - unsigned alignment = julia_alignment(typ); - if (!src.ispointer() || src.constant) { + emit_guarded_test(ctx, skip, nullptr, [&] { + unsigned alignment = julia_alignment(typ); + emit_unbox_store(ctx, mark_julia_const(ctx, src.constant), dest, tbaa_dst, Align(alignment), isVolatile); + return nullptr; + }); + } + } + else if (jl_is_concrete_type(src.typ)) { + assert(skip || jl_is_pointerfree(src.typ)); + if (jl_is_pointerfree(src.typ)) { + emit_guarded_test(ctx, skip, nullptr, [&] { + unsigned alignment = julia_alignment(src.typ); emit_unbox_store(ctx, src, dest, tbaa_dst, Align(alignment), isVolatile); - } - else { - Value *src_ptr = data_pointer(ctx, src); - unsigned nb = jl_datatype_size(typ); - // TODO: this branch may be bad for performance, but is necessary to work around LLVM bugs with the undef option that we want to use: - // select copy dest -> dest to simulate an undef value / conditional copy - // if (skip) src_ptr = ctx.builder.CreateSelect(skip, dest, src_ptr); - auto f = [&] { - (void)emit_memcpy(ctx, dest, jl_aliasinfo_t::fromTBAA(ctx, tbaa_dst), src_ptr, - jl_aliasinfo_t::fromTBAA(ctx, src.tbaa), nb, Align(alignment), Align(alignment), isVolatile); - return nullptr; - }; - if (skip) - emit_guarded_test(ctx, skip, nullptr, f); - else - f(); - } + return nullptr; + }); } } else if (src.TIndex) { @@ -3615,17 +3935,13 @@ static void emit_unionmove(jl_codectx_t &ctx, Value *dest, MDNode *tbaa_dst, con } else { assert(src.isboxed && "expected boxed value for sizeof/alignment computation"); - auto f = [&] { + emit_guarded_test(ctx, skip, nullptr, [&] { Value *datatype = emit_typeof(ctx, src, false, false); Value *copy_bytes = emit_datatype_size(ctx, datatype); - (void)emit_memcpy(ctx, dest, jl_aliasinfo_t::fromTBAA(ctx, tbaa_dst), data_pointer(ctx, src), - jl_aliasinfo_t::fromTBAA(ctx, src.tbaa), copy_bytes, Align(1), Align(1), isVolatile); + emit_memcpy(ctx, dest, jl_aliasinfo_t::fromTBAA(ctx, tbaa_dst), data_pointer(ctx, src), + jl_aliasinfo_t::fromTBAA(ctx, src.tbaa), copy_bytes, Align(1), Align(1), isVolatile); return nullptr; - }; - if (skip) - emit_guarded_test(ctx, skip, nullptr, f); - else - f(); + }); } } @@ -3714,6 +4030,12 @@ static void emit_write_multibarrier(jl_codectx_t &ctx, Value *parent, Value *agg emit_write_barrier(ctx, parent, ptrs); } +static void emit_write_multibarrier(jl_codectx_t &ctx, Value *parent, const jl_cgval_t &x) +{ + auto ptrs = get_gc_roots_for(ctx, x, true); + emit_write_barrier(ctx, parent, ptrs); +} + static jl_cgval_t union_store(jl_codectx_t &ctx, Value *ptr, Value *ptindex, jl_cgval_t rhs, jl_cgval_t cmp, jl_value_t *jltype, MDNode *tbaa, MDNode *tbaa_tindex, @@ -3854,25 +4176,24 @@ static jl_cgval_t emit_new_struct(jl_codectx_t &ctx, jl_value_t *ty, size_t narg Type *lt = julia_type_to_llvm(ctx, ty); unsigned na = nargs < nf ? nargs : nf; - // whether we should perform the initialization with the struct as a IR value - // or instead initialize the stack buffer with stores - auto tracked = CountTrackedPointers(lt); + // choose whether we should perform the initialization with the struct as a IR value + // or instead initialize the stack buffer with stores (the later is nearly always better) + auto tracked = split_value_size(sty); + assert(CountTrackedPointers(lt).count == tracked.second); bool init_as_value = false; if (lt->isVectorTy() || jl_is_vecelement_type(ty)) { // maybe also check the size ? init_as_value = true; } - else if (tracked.count) { - init_as_value = true; - } Instruction *promotion_point = nullptr; ssize_t promotion_ssa = -1; Value *strct; + SmallVector inline_roots; if (type_is_ghost(lt)) { - strct = NULL; + strct = nullptr; } else if (init_as_value) { - if (tracked.count) { + if (tracked.second) { strct = Constant::getNullValue(lt); } else { @@ -3881,11 +4202,19 @@ static jl_cgval_t emit_new_struct(jl_codectx_t &ctx, jl_value_t *ty, size_t narg strct = ctx.builder.CreateFreeze(strct); } } + else if (tracked.second) { + inline_roots.resize(tracked.second, Constant::getNullValue(ctx.types().T_prjlvalue)); + strct = nullptr; + if (tracked.first) { + AllocaInst *bits = emit_static_alloca(ctx, tracked.first, Align(julia_alignment(ty))); + strct = bits; + setName(ctx.emission_context, bits, arg_typename); + is_promotable = false; // wrong layout for promotion + } + } else { strct = emit_static_alloca(ctx, lt, Align(julia_alignment(ty))); setName(ctx.emission_context, strct, arg_typename); - if (tracked.count) - undef_derived_strct(ctx, strct, sty, ctx.tbaa().tbaa_stack); } for (unsigned i = 0; i < na; i++) { @@ -3897,25 +4226,32 @@ static jl_cgval_t emit_new_struct(jl_codectx_t &ctx, jl_value_t *ty, size_t narg fval_info = update_julia_type(ctx, fval_info, jtype); if (fval_info.typ == jl_bottom_type) return jl_cgval_t(); + if (type_is_ghost(lt)) + continue; + Type *fty = julia_type_to_llvm(ctx, jtype); + if (type_is_ghost(fty)) + continue; + Instruction *dest = nullptr; + MutableArrayRef roots; + ssize_t offs = jl_field_offset(sty, i); + ssize_t ptrsoffs = -1; + if (!inline_roots.empty()) + std::tie(offs, ptrsoffs) = split_value_field(sty, i); + unsigned llvm_idx = init_as_value ? ((i > 0 && isa(lt)) ? convert_struct_offset(ctx, lt, offs) : i) : -1u; // TODO: Use (post-)domination instead. bool field_promotable = !jl_is_uniontype(jtype) && !init_as_value && fval_info.promotion_ssa != -1 && + fval_info.inline_roots.empty() && inline_roots.empty() && // these need to be compatible, if they were to be implemented fval_info.promotion_point && fval_info.promotion_point->getParent() == ctx.builder.GetInsertBlock(); if (field_promotable) { savedIP = ctx.builder.saveIP(); ctx.builder.SetInsertPoint(fval_info.promotion_point); } - if (type_is_ghost(lt)) - continue; - Type *fty = julia_type_to_llvm(ctx, jtype); - if (type_is_ghost(fty)) - continue; - Value *dest = NULL; - unsigned offs = jl_field_offset(sty, i); - unsigned llvm_idx = (i > 0 && isa(lt)) ? convert_struct_offset(ctx, lt, offs) : i; if (!init_as_value) { // avoid unboxing the argument explicitly // and use memcpy instead - Instruction *inst = cast(emit_ptrgep(ctx, strct, offs)); + Instruction *inst = strct && offs >= 0 ? cast(emit_ptrgep(ctx, strct, offs)) : nullptr; + if (!inline_roots.empty() && ptrsoffs >= 0) + roots = MutableArrayRef(inline_roots).slice(ptrsoffs, jl_field_isptr(sty, i) ? 1 : ((jl_datatype_t*)jtype)->layout->npointers); dest = inst; // Our promotion point needs to come before // A) All of our arguments' promotion points @@ -3936,10 +4272,13 @@ static jl_cgval_t emit_new_struct(jl_codectx_t &ctx, jl_value_t *ty, size_t narg if (jl_field_isptr(sty, i)) { fval = boxed(ctx, fval_info, field_promotable); if (!init_as_value) { - jl_aliasinfo_t ai = jl_aliasinfo_t::fromTBAA(ctx, ctx.tbaa().tbaa_stack); - StoreInst *SI = cast(ai.decorateInst( - ctx.builder.CreateAlignedStore(fval, dest, Align(jl_field_align(sty, i))))); - SI->setOrdering(AtomicOrdering::Unordered); + if (dest) { + jl_aliasinfo_t ai = jl_aliasinfo_t::fromTBAA(ctx, ctx.tbaa().tbaa_stack); + ai.decorateInst(ctx.builder.CreateAlignedStore(fval, dest, Align(jl_field_align(sty, i)))); + } + else { + roots[0] = fval; + } } } else if (jl_is_uniontype(jtype)) { @@ -3962,9 +4301,8 @@ static jl_cgval_t emit_new_struct(jl_codectx_t &ctx, jl_value_t *ty, size_t narg if (fsz1 > 0 && !fval_info.isghost) { Type *ET = IntegerType::get(ctx.builder.getContext(), 8 * al); assert(lt->getStructElementType(llvm_idx) == ET); - AllocaInst *lv = emit_static_alloca(ctx, ET, Align(al)); + AllocaInst *lv = emit_static_alloca(ctx, fsz1, Align(al)); setName(ctx.emission_context, lv, "unioninit"); - lv->setOperand(0, ConstantInt::get(getInt32Ty(ctx.builder.getContext()), (fsz1 + al - 1) / al)); emit_unionmove(ctx, lv, ctx.tbaa().tbaa_stack, fval_info, nullptr); // emit all of the align-sized words unsigned i = 0; @@ -4002,9 +4340,14 @@ static jl_cgval_t emit_new_struct(jl_codectx_t &ctx, jl_value_t *ty, size_t narg if (field_promotable) { fval_info.V->replaceAllUsesWith(dest); cast(fval_info.V)->eraseFromParent(); - } else if (init_as_value) { + } + else if (init_as_value) { fval = emit_unbox(ctx, fty, fval_info, jtype); - } else { + } + else if (!roots.empty()) { + split_value_into(ctx, fval_info, Align(julia_alignment(jtype)), dest, Align(jl_field_align(sty, i)), jl_aliasinfo_t::fromTBAA(ctx, ctx.tbaa().tbaa_stack), roots); + } + else { emit_unbox_store(ctx, fval_info, dest, ctx.tbaa().tbaa_stack, Align(jl_field_align(sty, i))); } } @@ -4025,7 +4368,11 @@ static jl_cgval_t emit_new_struct(jl_codectx_t &ctx, jl_value_t *ty, size_t narg } for (size_t i = nargs; i < nf; i++) { if (!jl_field_isptr(sty, i) && jl_is_uniontype(jl_field_type(sty, i))) { - unsigned offs = jl_field_offset(sty, i); + ssize_t offs = jl_field_offset(sty, i); + ssize_t ptrsoffs = -1; + if (!inline_roots.empty()) + std::tie(offs, ptrsoffs) = split_value_field(sty, i); + assert(ptrsoffs < 0 && offs >= 0); int fsz = jl_field_size(sty, i) - 1; if (init_as_value) { unsigned llvm_idx = convert_struct_offset(ctx, cast(lt), offs + fsz); @@ -4033,19 +4380,23 @@ static jl_cgval_t emit_new_struct(jl_codectx_t &ctx, jl_value_t *ty, size_t narg } else { jl_aliasinfo_t ai = jl_aliasinfo_t::fromTBAA(ctx, ctx.tbaa().tbaa_unionselbyte); - ai.decorateInst(ctx.builder.CreateAlignedStore( - ConstantInt::get(getInt8Ty(ctx.builder.getContext()), 0), - emit_ptrgep(ctx, strct, offs + fsz), - Align(1))); + Instruction *dest = cast(emit_ptrgep(ctx, strct, offs + fsz)); + if (promotion_point == nullptr) + promotion_point = dest; + ai.decorateInst(ctx.builder.CreateAlignedStore(ctx.builder.getInt8(0), dest, Align(1))); } } } - if (promotion_point && nargs < nf) { + if (nargs < nf) { assert(!init_as_value); IRBuilderBase::InsertPoint savedIP = ctx.builder.saveIP(); - ctx.builder.SetInsertPoint(promotion_point); - promotion_point = cast(ctx.builder.CreateFreeze(UndefValue::get(lt))); - ctx.builder.CreateStore(promotion_point, strct); + if (promotion_point) + ctx.builder.SetInsertPoint(promotion_point); + if (strct) { + promotion_point = cast(ctx.builder.CreateFreeze(UndefValue::get(lt))); + jl_aliasinfo_t ai = jl_aliasinfo_t::fromTBAA(ctx, ctx.tbaa().tbaa_stack); + ai.decorateInst(ctx.builder.CreateStore(promotion_point, strct)); + } ctx.builder.restoreIP(savedIP); } if (type_is_ghost(lt)) @@ -4053,7 +4404,7 @@ static jl_cgval_t emit_new_struct(jl_codectx_t &ctx, jl_value_t *ty, size_t narg else if (init_as_value) return mark_julia_type(ctx, strct, false, ty); else { - jl_cgval_t ret = mark_julia_slot(strct, ty, NULL, ctx.tbaa().tbaa_stack); + jl_cgval_t ret = mark_julia_slot(strct, ty, NULL, ctx.tbaa().tbaa_stack, inline_roots); if (is_promotable && promotion_point) { ret.promotion_point = promotion_point; ret.promotion_ssa = promotion_ssa; @@ -4157,7 +4508,20 @@ static jl_cgval_t _emit_memoryref(jl_codectx_t &ctx, const jl_cgval_t &mem, cons static Value *emit_memoryref_FCA(jl_codectx_t &ctx, const jl_cgval_t &ref, const jl_datatype_layout_t *layout) { - if (ref.ispointer()) { + if (!ref.inline_roots.empty()) { + LLVMContext &C = ctx.builder.getContext(); + StructType *type = get_memoryref_type(C, ctx.types().T_size, layout, 0); + LoadInst *load0 = ctx.builder.CreateLoad(type->getElementType(0), ref.V); + jl_aliasinfo_t ai0 = jl_aliasinfo_t::fromTBAA(ctx, ref.tbaa); + ai0.decorateInst(load0); + setName(ctx.emission_context, load0, "memory_ref_FCA0"); + Value *root = ctx.builder.CreateBitCast(ref.inline_roots[0], type->getElementType(1)); + Value *load = Constant::getNullValue(type); + load = ctx.builder.CreateInsertValue(load, load0, 0); + load = ctx.builder.CreateInsertValue(load, root, 1); + return load; + } + else if (ref.ispointer()) { LLVMContext &C = ctx.builder.getContext(); Type *type = get_memoryref_type(C, ctx.types().T_size, layout, 0); LoadInst *load = ctx.builder.CreateLoad(type, data_pointer(ctx, ref)); diff --git a/src/codegen.cpp b/src/codegen.cpp index 6d4ecc63e5ca1..c719f4ff54078 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -1816,11 +1816,12 @@ struct jl_cgval_t { Value *Vboxed; Value *TIndex; // if `V` is an unboxed (tagged) Union described by `typ`, this gives the DataType index (1-based, small int) as an i8 + SmallVector inline_roots; // if present, `V` is a pointer, but not in canonical layout jl_value_t *constant; // constant value (rooted in linfo.def.roots) - jl_value_t *typ; // the original type of V, never NULL + jl_value_t *typ; // the original type of V, never nullptr bool isboxed; // whether this value is a jl_value_t* allocated on the heap with the right type tag bool isghost; // whether this value is "ghost" - MDNode *tbaa; // The related tbaa node. Non-NULL iff this holds an address. + MDNode *tbaa; // The related tbaa node. Non-nullptr iff this holds an address. // If non-null, this memory location may be promoted on use, by hoisting the // destination memory above the promotion point. Instruction *promotion_point; @@ -1831,13 +1832,15 @@ struct jl_cgval_t { bool ispointer() const { // whether this value is compatible with `data_pointer` + assert(inline_roots.empty()); return tbaa != nullptr; } jl_cgval_t(Value *Vval, jl_value_t *typ, Value *tindex) : // general value constructor - V(Vval), // V is allowed to be NULL in a jl_varinfo_t context, but not during codegen contexts + V(Vval), // V is allowed to be nullptr in a jl_varinfo_t context, but not during codegen contexts Vboxed(nullptr), TIndex(tindex), - constant(NULL), + inline_roots(), + constant(nullptr), typ(typ), isboxed(false), isghost(false), @@ -1845,13 +1848,15 @@ struct jl_cgval_t { promotion_point(nullptr), promotion_ssa(-1) { - assert(TIndex == NULL || TIndex->getType() == getInt8Ty(TIndex->getContext())); + assert(TIndex == nullptr || TIndex->getType() == getInt8Ty(TIndex->getContext())); } - jl_cgval_t(Value *Vptr, bool isboxed, jl_value_t *typ, Value *tindex, MDNode *tbaa) : // general pointer constructor + jl_cgval_t(Value *Vptr, bool isboxed, jl_value_t *typ, Value *tindex, MDNode *tbaa, Value* inline_roots) = delete; + jl_cgval_t(Value *Vptr, bool isboxed, jl_value_t *typ, Value *tindex, MDNode *tbaa, ArrayRef inline_roots) : // general pointer constructor V(Vptr), Vboxed(isboxed ? Vptr : nullptr), TIndex(tindex), - constant(NULL), + inline_roots(inline_roots), + constant(nullptr), typ(typ), isboxed(isboxed), isghost(false), @@ -1861,15 +1866,16 @@ struct jl_cgval_t { { if (Vboxed) assert(Vboxed->getType() == JuliaType::get_prjlvalue_ty(Vboxed->getContext())); - assert(tbaa != NULL); - assert(!(isboxed && TIndex != NULL)); - assert(TIndex == NULL || TIndex->getType() == getInt8Ty(TIndex->getContext())); + assert(tbaa != nullptr); + assert(!(isboxed && TIndex != nullptr)); + assert(TIndex == nullptr || TIndex->getType() == getInt8Ty(TIndex->getContext())); } explicit jl_cgval_t(jl_value_t *typ) : // ghost value constructor - // mark explicit to avoid being used implicitly for conversion from NULL (use jl_cgval_t() instead) - V(NULL), - Vboxed(NULL), - TIndex(NULL), + // mark explicit to avoid being used implicitly for conversion from nullptr (use jl_cgval_t() instead) + V(nullptr), + Vboxed(nullptr), + TIndex(nullptr), + inline_roots(), constant(((jl_datatype_t*)typ)->instance), typ(typ), isboxed(false), @@ -1885,6 +1891,7 @@ struct jl_cgval_t { V(v.V), Vboxed(v.Vboxed), TIndex(tindex), + inline_roots(v.inline_roots), constant(v.constant), typ(typ), isboxed(v.isboxed), @@ -1898,17 +1905,18 @@ struct jl_cgval_t { // this constructor expects we had a badly or equivalently typed version // make sure we aren't discarding the actual type information if (v.TIndex) { - assert((TIndex == NULL) == jl_is_concrete_type(typ)); + assert((TIndex == nullptr) == jl_is_concrete_type(typ)); } else { assert(isboxed || v.typ == typ || tindex); } } explicit jl_cgval_t() : // undef / unreachable constructor - V(NULL), - Vboxed(NULL), - TIndex(NULL), - constant(NULL), + V(nullptr), + Vboxed(nullptr), + TIndex(nullptr), + inline_roots(), + constant(nullptr), typ(jl_bottom_type), isboxed(false), isghost(true), @@ -1924,6 +1932,7 @@ struct jl_varinfo_t { Instruction *boxroot; // an address, if the var might be in a jl_value_t** stack slot (marked ctx.tbaa().tbaa_const, if appropriate) jl_cgval_t value; // a stack slot or constant value Value *pTIndex; // i8* stack slot for the value.TIndex tag describing `value.V` + AllocaInst *inline_roots; // stack roots for the inline_roots array, if needed DILocalVariable *dinfo; // if the variable might be used undefined and is not boxed // this i1 flag is true when it is defined @@ -1934,11 +1943,12 @@ struct jl_varinfo_t { bool usedUndef; bool used; - jl_varinfo_t(LLVMContext &ctxt) : boxroot(NULL), + jl_varinfo_t(LLVMContext &ctxt) : boxroot(nullptr), value(jl_cgval_t()), - pTIndex(NULL), - dinfo(NULL), - defFlag(NULL), + pTIndex(nullptr), + inline_roots(nullptr), + dinfo(nullptr), + defFlag(nullptr), isSA(false), isVolatile(false), isArgument(false), @@ -1962,7 +1972,7 @@ class jl_codectx_t { std::map phic_slots; std::map > scope_restore; SmallVector SAvalues; - SmallVector, 0> PhiNodes; + SmallVector, jl_value_t *>, 0> PhiNodes; SmallVector ssavalue_assigned; SmallVector ssavalue_usecount; jl_module_t *module = NULL; @@ -2110,7 +2120,8 @@ jl_aliasinfo_t jl_aliasinfo_t::fromTBAA(jl_codectx_t &ctx, MDNode *tbaa) { } static Type *julia_type_to_llvm(jl_codectx_t &ctx, jl_value_t *jt, bool *isboxed = NULL); -static jl_returninfo_t get_specsig_function(jl_codectx_t &ctx, Module *M, Value *fval, StringRef name, jl_value_t *sig, jl_value_t *jlrettype, bool is_opaque_closure, bool gcstack_arg, BitVector *used_arguments=nullptr, size_t *args_begin=nullptr); +static jl_returninfo_t get_specsig_function(jl_codectx_t &ctx, Module *M, Value *fval, StringRef name, jl_value_t *sig, jl_value_t *jlrettype, bool is_opaque_closure, bool gcstack_arg, + ArrayRef ArgNames=None, unsigned nreq=0); static jl_cgval_t emit_expr(jl_codectx_t &ctx, jl_value_t *expr, ssize_t ssaval = -1); static Value *global_binding_pointer(jl_codectx_t &ctx, jl_module_t *m, jl_sym_t *s, jl_binding_t **pbnd, bool assign, bool alloc); @@ -2133,6 +2144,7 @@ static jl_cgval_t emit_invoke(jl_codectx_t &ctx, const jl_cgval_t &lival, ArrayR static Value *literal_pointer_val(jl_codectx_t &ctx, jl_value_t *p); static unsigned julia_alignment(jl_value_t *jt); +static void recombine_value(jl_codectx_t &ctx, const jl_cgval_t &x, Value *dst, jl_aliasinfo_t const &dst_ai, Align alignment, bool isVolatile); static GlobalVariable *prepare_global_in(Module *M, JuliaVariable *G) { @@ -2217,6 +2229,28 @@ static AllocaInst *emit_static_alloca(jl_codectx_t &ctx, Type *lty, Align align) return new AllocaInst(lty, ctx.topalloca->getModule()->getDataLayout().getAllocaAddrSpace(), nullptr, align, "", /*InsertBefore=*/ctx.topalloca); } +static AllocaInst *emit_static_alloca(jl_codectx_t &ctx, unsigned nb, Align align) +{ + // Stupid hack: SROA takes hints from the element type, and will happily split this allocation into lots of unaligned bits + // if it cannot find something better to do, which is terrible for performance. + // However, if we emit this with an element size equal to the alignment, it will instead split it into aligned chunks + // which is great for performance and vectorization. + if (alignTo(nb, align) == align.value()) // don't bother with making an array of length 1 + return emit_static_alloca(ctx, ctx.builder.getIntNTy(align.value() * 8), align); + return emit_static_alloca(ctx, ArrayType::get(ctx.builder.getIntNTy(align.value() * 8), alignTo(nb, align) / align.value()), align); +} + +static AllocaInst *emit_static_roots(jl_codectx_t &ctx, unsigned nroots) +{ + AllocaInst *staticroots = emit_static_alloca(ctx, ctx.types().T_prjlvalue, Align(sizeof(void*))); + staticroots->setOperand(0, ConstantInt::get(getInt32Ty(ctx.builder.getContext()), nroots)); + IRBuilder<> builder(ctx.topalloca); + jl_aliasinfo_t ai = jl_aliasinfo_t::fromTBAA(ctx, ctx.tbaa().tbaa_gcframe); + // make sure these are nullptr early from LLVM's perspective, in case it decides to SROA it + ai.decorateInst(builder.CreateMemSet(staticroots, builder.getInt8(0), nroots * sizeof(void*), staticroots->getAlign()))->moveAfter(ctx.topalloca); + return staticroots; +} + static void undef_derived_strct(jl_codectx_t &ctx, Value *ptr, jl_datatype_t *sty, MDNode *tbaa) { assert(ptr->getType()->getPointerAddressSpace() != AddressSpace::Tracked); @@ -2264,7 +2298,7 @@ static inline jl_cgval_t ghostValue(jl_codectx_t &ctx, jl_value_t *typ) if (jl_is_type_type(typ)) { assert(is_uniquerep_Type(typ)); // replace T::Type{T} with T, by assuming that T must be a leaftype of some sort - jl_cgval_t constant(NULL, true, typ, NULL, best_tbaa(ctx.tbaa(), typ)); + jl_cgval_t constant(NULL, true, typ, NULL, best_tbaa(ctx.tbaa(), typ), None); constant.constant = jl_tparam0(typ); if (typ == (jl_value_t*)jl_typeofbottom_type->super) constant.isghost = true; @@ -2288,16 +2322,16 @@ static inline jl_cgval_t mark_julia_const(jl_codectx_t &ctx, jl_value_t *jv) if (jl_is_datatype_singleton((jl_datatype_t*)typ)) return ghostValue(ctx, typ); } - jl_cgval_t constant(NULL, true, typ, NULL, best_tbaa(ctx.tbaa(), typ)); + jl_cgval_t constant(NULL, true, typ, NULL, best_tbaa(ctx.tbaa(), typ), None); constant.constant = jv; return constant; } -static inline jl_cgval_t mark_julia_slot(Value *v, jl_value_t *typ, Value *tindex, MDNode *tbaa) +static inline jl_cgval_t mark_julia_slot(Value *v, jl_value_t *typ, Value *tindex, MDNode *tbaa, ArrayRef inline_roots=None) { // this enables lazy-copying of immutable values and stack or argument slots - jl_cgval_t tagval(v, false, typ, tindex, tbaa); + jl_cgval_t tagval(v, false, typ, tindex, tbaa, inline_roots); return tagval; } @@ -2317,22 +2351,41 @@ static bool valid_as_globalinit(const Value *v) { static Value *zext_struct(jl_codectx_t &ctx, Value *V); +// TODO: in the future, assume all callers will handle the interior pointers separately, and have +// have zext_struct strip them out, so we aren't saving those to the stack here causing shadow stores +// to be necessary too static inline jl_cgval_t value_to_pointer(jl_codectx_t &ctx, Value *v, jl_value_t *typ, Value *tindex) { Value *loc; v = zext_struct(ctx, v); + Align align(julia_alignment(typ)); if (valid_as_globalinit(v)) { // llvm can't handle all the things that could be inside a ConstantExpr assert(jl_is_concrete_type(typ)); // not legal to have an unboxed abstract type - loc = get_pointer_to_constant(ctx.emission_context, cast(v), Align(julia_alignment(typ)), "_j_const", *jl_Module); + loc = get_pointer_to_constant(ctx.emission_context, cast(v), align, "_j_const", *jl_Module); } else { - loc = emit_static_alloca(ctx, v->getType(), Align(julia_alignment(typ))); - ctx.builder.CreateStore(v, loc); + loc = emit_static_alloca(ctx, v->getType(), align); + ctx.builder.CreateAlignedStore(v, loc, align); } return mark_julia_slot(loc, typ, tindex, ctx.tbaa().tbaa_stack); } static inline jl_cgval_t value_to_pointer(jl_codectx_t &ctx, const jl_cgval_t &v) { + if (!v.inline_roots.empty()) { + //if (v.V == nullptr) { + // AllocaInst *loc = emit_static_roots(ctx, v.inline_roots.size()); + // for (size_t i = 0; i < v.inline_roots.counts(); i++) + // ctx.builder.CreateAlignedStore(v.inline_roots[i], emit_ptrgep(ctx, loc, i * sizeof(void*)), Align(sizeof(void*))); + // return mark_julia_slot(loc, v.typ, v.TIndex, ctx.tbaa().tbaa_gcframe); + //} + Align align(julia_alignment(v.typ)); + Type *ty = julia_type_to_llvm(ctx, v.typ); + AllocaInst *loc = emit_static_alloca(ctx, ty, align); + auto tbaa = v.V == nullptr ? ctx.tbaa().tbaa_gcframe : ctx.tbaa().tbaa_stack; + auto stack_ai = jl_aliasinfo_t::fromTBAA(ctx, tbaa); + recombine_value(ctx, v, loc, stack_ai, align, false); + return mark_julia_slot(loc, v.typ, v.TIndex, tbaa); + } if (v.ispointer()) return v; return value_to_pointer(ctx, v.V, v.typ, v.TIndex); @@ -2354,13 +2407,14 @@ static inline jl_cgval_t mark_julia_type(jl_codectx_t &ctx, Value *v, bool isbox if (type_is_ghost(T)) { return ghostValue(ctx, typ); } - if (v && !isboxed && v->getType()->isAggregateType() && CountTrackedPointers(v->getType()).count == 0) { + if (v && !isboxed && v->getType()->isAggregateType()) { // eagerly put this back onto the stack // llvm mem2reg pass will remove this if unneeded - return value_to_pointer(ctx, v, typ, NULL); + if (CountTrackedPointers(v->getType()).count == 0) + return value_to_pointer(ctx, v, typ, NULL); } if (isboxed) - return jl_cgval_t(v, isboxed, typ, NULL, best_tbaa(ctx.tbaa(), typ)); + return jl_cgval_t(v, isboxed, typ, NULL, best_tbaa(ctx.tbaa(), typ), None); return jl_cgval_t(v, typ, NULL); } @@ -2395,7 +2449,7 @@ static inline jl_cgval_t update_julia_type(jl_codectx_t &ctx, const jl_cgval_t & if (alwaysboxed) { // discovered that this union-split type must actually be isboxed if (v.Vboxed) { - return jl_cgval_t(v.Vboxed, true, typ, NULL, best_tbaa(ctx.tbaa(), typ)); + return jl_cgval_t(v.Vboxed, true, typ, NULL, best_tbaa(ctx.tbaa(), typ), v.inline_roots); } else { // type mismatch (there weren't any boxed values in the union) @@ -2624,14 +2678,14 @@ static jl_cgval_t convert_julia_type_union(jl_codectx_t &ctx, const jl_cgval_t & decay_derived(ctx, boxv), decay_derived(ctx, slotv)); } - jl_cgval_t newv = jl_cgval_t(slotv, false, typ, new_tindex, tbaa); + jl_cgval_t newv = jl_cgval_t(slotv, false, typ, new_tindex, tbaa, v.inline_roots); assert(boxv->getType() == ctx.types().T_prjlvalue); newv.Vboxed = boxv; return newv; } } else { - return jl_cgval_t(boxed(ctx, v), true, typ, NULL, best_tbaa(ctx.tbaa(), typ)); + return jl_cgval_t(boxed(ctx, v), true, typ, NULL, best_tbaa(ctx.tbaa(), typ), None); } return jl_cgval_t(v, typ, new_tindex); } @@ -2662,7 +2716,7 @@ static jl_cgval_t convert_julia_type(jl_codectx_t &ctx, const jl_cgval_t &v, jl_ if (skip) { *skip = ctx.builder.CreateNot(emit_exactly_isa(ctx, v, (jl_datatype_t*)typ, true)); } - return jl_cgval_t(v.Vboxed, true, typ, NULL, best_tbaa(ctx.tbaa(), typ)); + return jl_cgval_t(v.Vboxed, true, typ, NULL, best_tbaa(ctx.tbaa(), typ), v.inline_roots); } if (mustbox_union) { // type mismatch: there weren't any boxed values in the union @@ -2684,7 +2738,7 @@ static jl_cgval_t convert_julia_type(jl_codectx_t &ctx, const jl_cgval_t &v, jl_ unsigned new_idx = get_box_tindex((jl_datatype_t*)v.typ, typ); if (new_idx) { new_tindex = ConstantInt::get(getInt8Ty(ctx.builder.getContext()), new_idx); - if (v.V && !v.ispointer()) { + if (v.V && v.inline_roots.empty() && !v.ispointer()) { // TODO: remove this branch once all consumers of v.TIndex understand how to handle a non-ispointer value return jl_cgval_t(value_to_pointer(ctx, v), typ, new_tindex); } @@ -2708,7 +2762,7 @@ static jl_cgval_t convert_julia_type(jl_codectx_t &ctx, const jl_cgval_t &v, jl_ } if (makeboxed) { // convert to a simple isboxed value - return jl_cgval_t(boxed(ctx, v), true, typ, NULL, best_tbaa(ctx.tbaa(), typ)); + return mark_julia_type(ctx, boxed(ctx, v), true, typ); } } return jl_cgval_t(v, typ, new_tindex); @@ -3524,9 +3578,9 @@ static Value *emit_bits_compare(jl_codectx_t &ctx, jl_cgval_t arg1, jl_cgval_t a jl_datatype_t *sty = (jl_datatype_t*)argty; size_t sz = jl_datatype_size(sty); if (sz > 512 && !sty->layout->flags.haspadding && sty->layout->flags.isbitsegal) { - Value *varg1 = arg1.ispointer() ? data_pointer(ctx, arg1) : + Value *varg1 = arg1.inline_roots.empty() && arg1.ispointer() ? data_pointer(ctx, arg1) : value_to_pointer(ctx, arg1).V; - Value *varg2 = arg2.ispointer() ? data_pointer(ctx, arg2) : + Value *varg2 = arg2.inline_roots.empty() && arg2.ispointer() ? data_pointer(ctx, arg2) : value_to_pointer(ctx, arg2).V; varg1 = emit_pointer_from_objref(ctx, varg1); varg2 = emit_pointer_from_objref(ctx, varg2); @@ -3561,9 +3615,9 @@ static Value *emit_bits_compare(jl_codectx_t &ctx, jl_cgval_t arg1, jl_cgval_t a return ctx.builder.CreateICmpEQ(answer, ConstantInt::get(getInt32Ty(ctx.builder.getContext()), 0)); } else if (sz > 512 && jl_struct_try_layout(sty) && sty->layout->flags.isbitsegal) { - Value *varg1 = arg1.ispointer() ? data_pointer(ctx, arg1) : + Value *varg1 = arg1.inline_roots.empty() && arg1.ispointer() ? data_pointer(ctx, arg1) : value_to_pointer(ctx, arg1).V; - Value *varg2 = arg2.ispointer() ? data_pointer(ctx, arg2) : + Value *varg2 = arg2.inline_roots.empty() && arg2.ispointer() ? data_pointer(ctx, arg2) : value_to_pointer(ctx, arg2).V; varg1 = emit_pointer_from_objref(ctx, varg1); varg2 = emit_pointer_from_objref(ctx, varg2); @@ -4610,34 +4664,33 @@ static bool emit_builtin_call(jl_codectx_t &ctx, jl_cgval_t *ret, jl_value_t *f, // For tuples, we can emit code even if we don't know the exact // type (e.g. because we don't know the length). This is possible // as long as we know that all elements are of the same (leaf) type. - if (obj.ispointer()) { - if (order != jl_memory_order_notatomic && order != jl_memory_order_unspecified) { - emit_atomic_error(ctx, "getfield: non-atomic field cannot be accessed atomically"); - *ret = jl_cgval_t(); // unreachable - return true; - } - // Determine which was the type that was homogeneous - jl_value_t *jt = jl_tparam0(utt); - if (jl_is_vararg(jt)) - jt = jl_unwrap_vararg(jt); - assert(jl_is_datatype(jt)); - // This is not necessary for correctness, but allows to omit - // the extra code for getting the length of the tuple - if (!bounds_check_enabled(ctx, boundscheck)) { - vidx = ctx.builder.CreateSub(vidx, ConstantInt::get(ctx.types().T_size, 1)); - } - else { - vidx = emit_bounds_check(ctx, obj, (jl_value_t*)obj.typ, vidx, - emit_datatype_nfields(ctx, emit_typeof(ctx, obj, false, false)), - jl_true); - } - bool isboxed = !jl_datatype_isinlinealloc((jl_datatype_t*)jt, 0); - Value *ptr = data_pointer(ctx, obj); - *ret = typed_load(ctx, ptr, vidx, - isboxed ? (jl_value_t*)jl_any_type : jt, - obj.tbaa, nullptr, isboxed, AtomicOrdering::NotAtomic, false); + jl_cgval_t ptrobj = obj.isboxed ? obj : value_to_pointer(ctx, obj); + if (order != jl_memory_order_notatomic && order != jl_memory_order_unspecified) { + emit_atomic_error(ctx, "getfield: non-atomic field cannot be accessed atomically"); + *ret = jl_cgval_t(); // unreachable return true; } + // Determine which was the type that was homogeneous + jl_value_t *jt = jl_tparam0(utt); + if (jl_is_vararg(jt)) + jt = jl_unwrap_vararg(jt); + assert(jl_is_datatype(jt)); + // This is not necessary for correctness, but allows to omit + // the extra code for getting the length of the tuple + if (!bounds_check_enabled(ctx, boundscheck)) { + vidx = ctx.builder.CreateSub(vidx, ConstantInt::get(ctx.types().T_size, 1)); + } + else { + vidx = emit_bounds_check(ctx, ptrobj, (jl_value_t*)ptrobj.typ, vidx, + emit_datatype_nfields(ctx, emit_typeof(ctx, ptrobj, false, false)), + jl_true); + } + bool isboxed = !jl_datatype_isinlinealloc((jl_datatype_t*)jt, 0); + Value *ptr = data_pointer(ctx, ptrobj); + *ret = typed_load(ctx, ptr, vidx, + isboxed ? (jl_value_t*)jl_any_type : jt, + ptrobj.tbaa, nullptr, isboxed, AtomicOrdering::NotAtomic, false); + return true; } // Unknown object, but field known to be integer @@ -4914,7 +4967,12 @@ static bool emit_builtin_call(jl_codectx_t &ctx, jl_cgval_t *ret, jl_value_t *f, else if (jl_field_isptr(stt, fieldidx) || jl_type_hasptr(jl_field_type(stt, fieldidx))) { Value *fldv; size_t offs = jl_field_offset(stt, fieldidx) / sizeof(jl_value_t*); - if (obj.ispointer()) { + if (!obj.inline_roots.empty()) { + auto offsets = split_value_field(stt, fieldidx); + assert(offsets.second >= 0); + fldv = obj.inline_roots[offsets.second]; + } + else if (obj.ispointer()) { auto tbaa = best_field_tbaa(ctx, obj, stt, fieldidx, offs); if (!jl_field_isptr(stt, fieldidx)) offs += ((jl_datatype_t*)jl_field_type(stt, fieldidx))->layout->first_ptr; @@ -5033,26 +5091,18 @@ static jl_cgval_t emit_call_specfun_other(jl_codectx_t &ctx, bool is_opaque_clos SmallVector argvals(nfargs); unsigned idx = 0; AllocaInst *result = nullptr; - switch (returninfo.cc) { - case jl_returninfo_t::Boxed: - case jl_returninfo_t::Register: - case jl_returninfo_t::Ghosts: - break; - case jl_returninfo_t::SRet: - result = emit_static_alloca(ctx, getAttributeAtIndex(returninfo.attrs, 1, Attribute::StructRet).getValueAsType(), Align(julia_alignment(jlretty))); - argvals[idx] = result; - idx++; - break; - case jl_returninfo_t::Union: - result = emit_static_alloca(ctx, ArrayType::get(getInt8Ty(ctx.builder.getContext()), returninfo.union_bytes), Align(returninfo.union_align)); + + if (returninfo.cc == jl_returninfo_t::SRet || returninfo.cc == jl_returninfo_t::Union) { + result = emit_static_alloca(ctx, returninfo.union_bytes, Align(returninfo.union_align)); setName(ctx.emission_context, result, "sret_box"); argvals[idx] = result; idx++; - break; } + AllocaInst *return_roots = nullptr; if (returninfo.return_roots) { - AllocaInst *return_roots = emit_static_alloca(ctx, ArrayType::get(ctx.types().T_prjlvalue, returninfo.return_roots), Align(alignof(jl_value_t*))); + assert(returninfo.cc == jl_returninfo_t::SRet); + return_roots = emit_static_roots(ctx, returninfo.return_roots); argvals[idx] = return_roots; idx++; } @@ -5063,16 +5113,27 @@ static jl_cgval_t emit_call_specfun_other(jl_codectx_t &ctx, bool is_opaque_clos for (size_t i = 0; i < nargs; i++) { jl_value_t *jt = jl_nth_slot_type(specTypes, i); // n.b.: specTypes is required to be a datatype by construction for specsig - jl_cgval_t arg = argv[i]; if (is_opaque_closure && i == 0) { // Special implementation for opaque closures: their jt and thus // julia_type_to_llvm values are likely wrong, so override the // behavior here to directly pass the expected pointer based instead // just on passing arg as a pointer - arg = value_to_pointer(ctx, arg); - argvals[idx] = decay_derived(ctx, data_pointer(ctx, arg)); + jl_cgval_t arg = argv[i]; + if (arg.isghost) { + argvals[idx] = Constant::getNullValue(ctx.builder.getPtrTy(AddressSpace::Derived)); + } + else { + if (!arg.isboxed) + arg = value_to_pointer(ctx, arg); + argvals[idx] = decay_derived(ctx, data_pointer(ctx, arg)); + } + idx++; + continue; } - else if (is_uniquerep_Type(jt)) { + jl_cgval_t arg = update_julia_type(ctx, argv[i], jt); + if (arg.typ == jl_bottom_type) + return jl_cgval_t(); + if (is_uniquerep_Type(jt)) { continue; } else { @@ -5085,8 +5146,24 @@ static jl_cgval_t emit_call_specfun_other(jl_codectx_t &ctx, bool is_opaque_clos argvals[idx] = boxed(ctx, arg); } else if (et->isAggregateType()) { - arg = value_to_pointer(ctx, arg); - argvals[idx] = decay_derived(ctx, data_pointer(ctx, arg)); + auto tracked = CountTrackedPointers(et); + if (tracked.count && !tracked.all) { + Value *val = arg.V; + SmallVector roots(arg.inline_roots); + if (roots.empty()) + std::tie(val, roots) = split_value(ctx, arg, Align(jl_datatype_align(jt))); + AllocaInst *proots = emit_static_roots(ctx, roots.size()); + for (size_t i = 0; i < roots.size(); i++) + ctx.builder.CreateAlignedStore(roots[i], emit_ptrgep(ctx, proots, i * sizeof(void*)), Align(sizeof(void*))); + assert(val); + argvals[idx++] = decay_derived(ctx, val); + argvals[idx] = proots; + } + else { + if (!arg.isboxed) + arg = value_to_pointer(ctx, arg); + argvals[idx] = decay_derived(ctx, data_pointer(ctx, arg)); + } } else { Value *val = emit_unbox(ctx, et, arg, jt); @@ -5132,7 +5209,7 @@ static jl_cgval_t emit_call_specfun_other(jl_codectx_t &ctx, bool is_opaque_clos break; case jl_returninfo_t::SRet: assert(result); - retval = mark_julia_slot(result, jlretty, NULL, ctx.tbaa().tbaa_stack); + retval = mark_julia_slot(result, jlretty, NULL, ctx.tbaa().tbaa_gcframe, load_gc_roots(ctx, return_roots, returninfo.return_roots)); break; case jl_returninfo_t::Union: { Value *box = ctx.builder.CreateExtractValue(call, 0); @@ -5460,7 +5537,7 @@ static jl_cgval_t emit_call(jl_codectx_t &ctx, jl_expr_t *ex, jl_value_t *rt, bo JuliaFunction<> *cc; if (f.typ == (jl_value_t*)jl_intrinsic_type) { fptr = prepare_call(jlintrinsic_func); - F = f.ispointer() ? data_pointer(ctx, f) : value_to_pointer(ctx, f).V; + F = f.inline_roots.empty() && f.ispointer() ? data_pointer(ctx, f) : value_to_pointer(ctx, f).V; F = decay_derived(ctx, F); cc = julia_call3; } @@ -5712,42 +5789,53 @@ static jl_cgval_t emit_isdefined(jl_codectx_t &ctx, jl_value_t *sym, int allow_i } static jl_cgval_t emit_varinfo(jl_codectx_t &ctx, jl_varinfo_t &vi, jl_sym_t *varname) { - jl_value_t *typ = vi.value.typ; jl_cgval_t v; Value *isnull = NULL; if (vi.boxroot == NULL || vi.pTIndex != NULL) { - if ((!vi.isVolatile && vi.isSA) || vi.isArgument || vi.value.constant || !vi.value.V) { + if ((!vi.isVolatile && vi.isSA) || vi.isArgument || vi.value.constant || !(vi.value.V || vi.inline_roots)) { v = vi.value; if (vi.pTIndex) v.TIndex = ctx.builder.CreateAlignedLoad(getInt8Ty(ctx.builder.getContext()), vi.pTIndex, Align(1)); } else { // copy value to a non-mutable (non-volatile SSA) location - AllocaInst *varslot = cast(vi.value.V); - setName(ctx.emission_context, varslot, jl_symbol_name(varname)); - Type *T = varslot->getAllocatedType(); - assert(!varslot->isArrayAllocation() && "variables not expected to be VLA"); - AllocaInst *ssaslot = cast(varslot->clone()); - setName(ctx.emission_context, ssaslot, jl_symbol_name(varname) + StringRef(".ssa")); - ssaslot->insertAfter(varslot); - if (vi.isVolatile) { - Value *unbox = ctx.builder.CreateAlignedLoad(ssaslot->getAllocatedType(), varslot, - varslot->getAlign(), - true); - ctx.builder.CreateAlignedStore(unbox, ssaslot, ssaslot->getAlign()); - } - else { - const DataLayout &DL = jl_Module->getDataLayout(); - uint64_t sz = DL.getTypeStoreSize(T); - emit_memcpy(ctx, ssaslot, jl_aliasinfo_t::fromTBAA(ctx, ctx.tbaa().tbaa_stack), vi.value, sz, ssaslot->getAlign(), varslot->getAlign()); + // since this might be a union slot, the most convenient approach to copying + // is to move the whole alloca chunk + AllocaInst *ssaslot = nullptr; + if (vi.value.V) { + auto stack_ai = jl_aliasinfo_t::fromTBAA(ctx, ctx.tbaa().tbaa_stack); + AllocaInst *varslot = cast(vi.value.V); + Type *T = varslot->getAllocatedType(); + assert(!varslot->isArrayAllocation() && "variables not expected to be VLA"); + ssaslot = cast(varslot->clone()); + setName(ctx.emission_context, ssaslot, varslot->getName() + StringRef(".ssa")); + ssaslot->insertAfter(varslot); + if (vi.isVolatile) { + Value *unbox = ctx.builder.CreateAlignedLoad(ssaslot->getAllocatedType(), varslot, varslot->getAlign(), true); + stack_ai.decorateInst(ctx.builder.CreateAlignedStore(unbox, ssaslot, ssaslot->getAlign())); + } + else { + const DataLayout &DL = jl_Module->getDataLayout(); + uint64_t sz = DL.getTypeStoreSize(T); + emit_memcpy(ctx, ssaslot, stack_ai, vi.value, sz, ssaslot->getAlign(), varslot->getAlign()); + } } Value *tindex = NULL; if (vi.pTIndex) tindex = ctx.builder.CreateAlignedLoad(getInt8Ty(ctx.builder.getContext()), vi.pTIndex, Align(1), vi.isVolatile); - v = mark_julia_slot(ssaslot, vi.value.typ, tindex, ctx.tbaa().tbaa_stack); + v = mark_julia_slot(ssaslot, vi.value.typ, tindex, ctx.tbaa().tbaa_stack, None); + } + if (vi.inline_roots) { + AllocaInst *varslot = vi.inline_roots; + size_t nroots = cast(varslot->getArraySize())->getZExtValue(); + auto T_prjlvalue = varslot->getAllocatedType(); + if (auto AT = dyn_cast(T_prjlvalue)) { + nroots *= AT->getNumElements(); + T_prjlvalue = AT->getElementType(); + } + assert(T_prjlvalue == ctx.types().T_prjlvalue); + v.inline_roots = load_gc_roots(ctx, varslot, nroots, vi.isVolatile); } - if (vi.boxroot == NULL) - v = update_julia_type(ctx, v, typ); if (vi.usedUndef) { assert(vi.defFlag); isnull = ctx.builder.CreateAlignedLoad(getInt1Ty(ctx.builder.getContext()), vi.defFlag, Align(1), vi.isVolatile); @@ -5758,7 +5846,7 @@ static jl_cgval_t emit_varinfo(jl_codectx_t &ctx, jl_varinfo_t &vi, jl_sym_t *va Value *box_isnull = NULL; if (vi.usedUndef) box_isnull = ctx.builder.CreateICmpNE(boxed, Constant::getNullValue(ctx.types().T_prjlvalue)); - maybe_mark_load_dereferenceable(boxed, vi.usedUndef || vi.pTIndex, typ); + maybe_mark_load_dereferenceable(boxed, vi.usedUndef || vi.pTIndex, vi.value.typ); if (vi.pTIndex) { // value is either boxed in the stack slot, or unboxed in value // as indicated by testing (pTIndex & UNION_BOX_MARKER) @@ -5767,15 +5855,14 @@ static jl_cgval_t emit_varinfo(jl_codectx_t &ctx, jl_varinfo_t &vi, jl_sym_t *va ConstantInt::get(getInt8Ty(ctx.builder.getContext()), 0)); if (vi.usedUndef) isnull = ctx.builder.CreateSelect(load_unbox, isnull, box_isnull); - if (v.V) { // v.V will be null if it is a union of all ghost values + if (v.V) // v.V will be null if it is a union of all ghost values v.V = ctx.builder.CreateSelect(load_unbox, decay_derived(ctx, v.V), decay_derived(ctx, boxed)); - } else + else v.V = boxed; v.Vboxed = boxed; - v = update_julia_type(ctx, v, typ); } else { - v = mark_julia_type(ctx, boxed, true, typ); + v = mark_julia_type(ctx, boxed, true, vi.value.typ); if (vi.usedUndef) isnull = box_isnull; } @@ -5807,49 +5894,27 @@ static void emit_vi_assignment_unboxed(jl_codectx_t &ctx, jl_varinfo_t &vi, Valu store_def_flag(ctx, vi, true); if (!vi.value.constant) { // check that this is not a virtual store - assert(vi.value.ispointer() || (vi.pTIndex && vi.value.V == NULL)); + assert(vi.inline_roots || vi.value.ispointer() || (vi.pTIndex && vi.value.V == NULL)); // store value - if (vi.value.V == NULL) { - // all ghost values in destination - nothing to copy or store - } - else if (rval_info.constant || !rval_info.ispointer()) { - if (rval_info.isghost) { - // all ghost values in source - nothing to copy or store - } - else { - if (rval_info.typ != vi.value.typ && !vi.pTIndex && !rval_info.TIndex) { - // isbits cast-on-assignment is invalid. this branch should be dead-code. - CreateTrap(ctx.builder); - } - else { - Value *dest = vi.value.V; - if (vi.pTIndex) // TODO: use lifetime-end here instead - ctx.builder.CreateStore(UndefValue::get(cast(vi.value.V)->getAllocatedType()), vi.value.V); - Type *store_ty = julia_type_to_llvm(ctx, rval_info.constant ? jl_typeof(rval_info.constant) : rval_info.typ); - jl_aliasinfo_t ai = jl_aliasinfo_t::fromTBAA(ctx, ctx.tbaa().tbaa_stack); - ai.decorateInst(ctx.builder.CreateStore( - emit_unbox(ctx, store_ty, rval_info, rval_info.typ), - dest, - vi.isVolatile)); - } - } - } - else { - if (vi.pTIndex == NULL) { - assert(jl_is_concrete_type(vi.value.typ)); - // Sometimes we can get into situations where the LHS and RHS - // are the same slot. We're not allowed to memcpy in that case - // due to LLVM bugs. - // This check should probably mostly catch the relevant situations. - if (vi.value.V != rval_info.V) { - Value *copy_bytes = ConstantInt::get(getInt32Ty(ctx.builder.getContext()), jl_datatype_size(vi.value.typ)); - Align alignment(julia_alignment(rval_info.typ)); - emit_memcpy(ctx, vi.value.V, jl_aliasinfo_t::fromTBAA(ctx, ctx.tbaa().tbaa_stack), rval_info, copy_bytes, - alignment, alignment, vi.isVolatile); - } - } + rval_info = update_julia_type(ctx, rval_info, vi.value.typ); + if (rval_info.typ == jl_bottom_type) + return; + if (vi.pTIndex && vi.value.V) // TODO: use lifetime-end here instead + ctx.builder.CreateStore(UndefValue::get(cast(vi.value.V)->getAllocatedType()), vi.value.V); + // Sometimes we can get into situations where the LHS and RHS + // are the same slot. We're not allowed to memcpy in that case + // due to LLVM bugs. + // This check should probably mostly catch the relevant situations. + if (vi.value.V != nullptr ? vi.value.V != rval_info.V : vi.inline_roots != nullptr) { + MDNode *tbaa = ctx.tbaa().tbaa_stack; // Use vi.value.tbaa ? + if (rval_info.TIndex) + emit_unionmove(ctx, vi.value.V, tbaa, rval_info, /*skip*/isboxed, vi.isVolatile); else { - emit_unionmove(ctx, vi.value.V, ctx.tbaa().tbaa_stack, rval_info, /*skip*/isboxed, vi.isVolatile); + Align align(julia_alignment(rval_info.typ)); + if (vi.inline_roots) + split_value_into(ctx, rval_info, align, vi.value.V, align, jl_aliasinfo_t::fromTBAA(ctx, tbaa), vi.inline_roots, jl_aliasinfo_t::fromTBAA(ctx, ctx.tbaa().tbaa_gcframe), vi.isVolatile); + else + emit_unbox_store(ctx, rval_info, vi.value.V, tbaa, align, vi.isVolatile); } } } @@ -5864,7 +5929,8 @@ static void emit_phinode_assign(jl_codectx_t &ctx, ssize_t idx, jl_value_t *r) jl_value_t *phiType = NULL; if (jl_is_array(ssavalue_types)) { phiType = jl_array_ptr_ref(ssavalue_types, idx); - } else { + } + else { phiType = (jl_value_t*)jl_any_type; } jl_array_t *edges = (jl_array_t*)jl_fieldref_noalloc(r, 0); @@ -5874,6 +5940,7 @@ static void emit_phinode_assign(jl_codectx_t &ctx, ssize_t idx, jl_value_t *r) return; } AllocaInst *dest = nullptr; + SmallVector roots; // N.B.: For any memory space, used as a phi, // we need to emit space twice here. The reason for this is that // phi nodes may be arguments of other phi nodes, so if we don't @@ -5884,7 +5951,7 @@ static void emit_phinode_assign(jl_codectx_t &ctx, ssize_t idx, jl_value_t *r) size_t min_align, nbytes; dest = try_emit_union_alloca(ctx, ((jl_uniontype_t*)phiType), allunbox, min_align, nbytes); if (dest) { - Instruction *phi = dest->clone(); + AllocaInst *phi = cast(dest->clone()); phi->insertAfter(dest); PHINode *Tindex_phi = PHINode::Create(getInt8Ty(ctx.builder.getContext()), jl_array_nrows(edges), "tindex_phi"); Tindex_phi->insertInto(BB, InsertPt); @@ -5893,14 +5960,14 @@ static void emit_phinode_assign(jl_codectx_t &ctx, ssize_t idx, jl_value_t *r) Value *isboxed = ctx.builder.CreateICmpNE( ctx.builder.CreateAnd(Tindex_phi, ConstantInt::get(getInt8Ty(ctx.builder.getContext()), UNION_BOX_MARKER)), ConstantInt::get(getInt8Ty(ctx.builder.getContext()), 0)); - ctx.builder.CreateMemCpy(phi, MaybeAlign(min_align), dest, dest->getAlign(), nbytes, false); + ctx.builder.CreateMemCpy(phi, Align(min_align), dest, dest->getAlign(), nbytes, false); ctx.builder.CreateLifetimeEnd(dest); Value *ptr = ctx.builder.CreateSelect(isboxed, decay_derived(ctx, ptr_phi), decay_derived(ctx, phi)); jl_cgval_t val = mark_julia_slot(ptr, phiType, Tindex_phi, best_tbaa(ctx.tbaa(), phiType)); val.Vboxed = ptr_phi; - ctx.PhiNodes.push_back(std::make_tuple(val, BB, dest, ptr_phi, r)); + ctx.PhiNodes.push_back(std::make_tuple(val, BB, dest, ptr_phi, roots, r)); ctx.SAvalues[idx] = val; ctx.ssavalue_assigned[idx] = true; return; @@ -5909,7 +5976,7 @@ static void emit_phinode_assign(jl_codectx_t &ctx, ssize_t idx, jl_value_t *r) PHINode *Tindex_phi = PHINode::Create(getInt8Ty(ctx.builder.getContext()), jl_array_nrows(edges), "tindex_phi"); Tindex_phi->insertInto(BB, InsertPt); jl_cgval_t val = mark_julia_slot(NULL, phiType, Tindex_phi, ctx.tbaa().tbaa_stack); - ctx.PhiNodes.push_back(std::make_tuple(val, BB, dest, (PHINode*)NULL, r)); + ctx.PhiNodes.push_back(std::make_tuple(val, BB, dest, (PHINode*)nullptr, roots, r)); ctx.SAvalues[idx] = val; ctx.ssavalue_assigned[idx] = true; return; @@ -5928,22 +5995,38 @@ static void emit_phinode_assign(jl_codectx_t &ctx, ssize_t idx, jl_value_t *r) } jl_cgval_t slot; PHINode *value_phi = NULL; - if (vtype->isAggregateType() && CountTrackedPointers(vtype).count == 0) { + if (!isboxed && vtype->isAggregateType()) { // the value will be moved into dest in the predecessor critical block. // here it's moved into phi in the successor (from dest) - Align align(julia_alignment(phiType)); - dest = emit_static_alloca(ctx, vtype, align); - Value *phi = emit_static_alloca(ctx, vtype, align); - ctx.builder.CreateMemCpy(phi, align, dest, align, jl_datatype_size(phiType), false); - ctx.builder.CreateLifetimeEnd(dest); - slot = mark_julia_slot(phi, phiType, NULL, ctx.tbaa().tbaa_stack); + auto tracked = CountTrackedPointers(vtype); + if (tracked.count) { + roots.resize(tracked.count); + assert(tracked.count == split_value_size((jl_datatype_t*)phiType).second); + for (size_t nr = 0; nr < tracked.count; nr++) { + auto root_phi = PHINode::Create(ctx.types().T_prjlvalue, jl_array_nrows(edges), "root_phi"); + root_phi->insertInto(BB, InsertPt); + roots[nr] = root_phi; + } + } + AllocaInst *phi = nullptr; + if (!tracked.all) { + Align align(julia_alignment(phiType)); + unsigned nb = jl_datatype_size(phiType); + dest = emit_static_alloca(ctx, nb, align); + phi = cast(dest->clone()); + phi->insertBefore(dest); + ctx.builder.CreateMemCpy(phi, align, dest, align, nb, false); + ctx.builder.CreateLifetimeEnd(dest); + } + slot = mark_julia_slot(phi, phiType, NULL, ctx.tbaa().tbaa_stack, + roots.empty() ? ArrayRef() : ArrayRef((Value *const *)&roots.front(), roots.size())); } else { value_phi = PHINode::Create(vtype, jl_array_nrows(edges), "value_phi"); value_phi->insertInto(BB, InsertPt); slot = mark_julia_type(ctx, value_phi, isboxed, phiType); } - ctx.PhiNodes.push_back(std::make_tuple(slot, BB, dest, value_phi, r)); + ctx.PhiNodes.push_back(std::make_tuple(slot, BB, dest, value_phi, roots, r)); ctx.SAvalues[idx] = slot; ctx.ssavalue_assigned[idx] = true; return; @@ -5963,8 +6046,9 @@ static void emit_ssaval_assign(jl_codectx_t &ctx, ssize_t ssaidx_0based, jl_valu it = ctx.phic_slots.emplace(ssaidx_0based, jl_varinfo_t(ctx.builder.getContext())).first; } slot = emit_varinfo(ctx, it->second, jl_symbol("phic")); - } else { - slot = emit_expr(ctx, r, ssaidx_0based); // slot could be a jl_value_t (unboxed) or jl_value_t* (ispointer) + } + else { + slot = emit_expr(ctx, r, ssaidx_0based); } if (slot.isboxed || slot.TIndex) { // see if inference suggested a different type for the ssavalue than the expression @@ -6123,11 +6207,22 @@ static void emit_upsilonnode(jl_codectx_t &ctx, ssize_t phic, jl_value_t *val) vi.pTIndex, Align(1), true); } else if (vi.value.V && !vi.value.constant && vi.value.typ != jl_bottom_type) { - assert(vi.value.ispointer()); - Type *T = cast(vi.value.V)->getAllocatedType(); - if (CountTrackedPointers(T).count) { - // make sure gc pointers (including ptr_phi of union-split) are initialized to NULL - ctx.builder.CreateStore(Constant::getNullValue(T), vi.value.V, true); + assert(vi.inline_roots || vi.value.ispointer()); + if (vi.inline_roots) { + // memory optimization: make gc pointers re-initialized to NULL + AllocaInst *ssaroots = vi.inline_roots; + size_t nroots = cast(ssaroots->getArraySize())->getZExtValue(); + auto T_prjlvalue = ssaroots->getAllocatedType(); + if (auto AT = dyn_cast(T_prjlvalue)) { + nroots *= AT->getNumElements(); + T_prjlvalue = AT->getElementType(); + } + assert(T_prjlvalue == ctx.types().T_prjlvalue); + Value *nullval = Constant::getNullValue(T_prjlvalue); + auto stack_ai = jl_aliasinfo_t::fromTBAA(ctx, ctx.tbaa().tbaa_gcframe); + for (size_t i = 0; i < nroots; i++) { + stack_ai.decorateInst(ctx.builder.CreateAlignedStore(nullval, emit_ptrgep(ctx, ssaroots, i * sizeof(void*)), ssaroots->getAlign(), true)); + } } } } @@ -6865,14 +6960,17 @@ static void emit_cfunc_invalidate( ++AI; // gcstack_arg } for (size_t i = 0; i < nargs; i++) { + // n.b. calltype is required to be a datatype by construction for specsig jl_value_t *jt = jl_nth_slot_type(calltype, i); - // n.b. specTypes is required to be a datatype by construction for specsig - bool isboxed = false; - Type *et; if (i == 0 && is_for_opaque_closure) { - et = PointerType::get(ctx.types().T_jlvalue, AddressSpace::Derived); + Value *arg_v = &*AI; + ++AI; + myargs[i] = mark_julia_slot(arg_v, jt, NULL, ctx.tbaa().tbaa_const); + continue; } - else if (deserves_argbox(jt)) { + bool isboxed = false; + Type *et; + if (deserves_argbox(jt)) { et = ctx.types().T_prjlvalue; isboxed = true; } @@ -6889,8 +6987,14 @@ static void emit_cfunc_invalidate( else { Value *arg_v = &*AI; ++AI; - if ((i == 0 && is_for_opaque_closure) || (!isboxed && et->isAggregateType())) { - myargs[i] = mark_julia_slot(arg_v, jt, NULL, ctx.tbaa().tbaa_const); + if (!isboxed && et->isAggregateType()) { + auto tracked = CountTrackedPointers(et); + SmallVector roots; + if (tracked.count && !tracked.all) { + roots = load_gc_roots(ctx, &*AI, tracked.count); + ++AI; + } + myargs[i] = mark_julia_slot(arg_v, jt, NULL, ctx.tbaa().tbaa_const, roots); } else { assert(arg_v->getType() == et); @@ -6903,6 +7007,7 @@ static void emit_cfunc_invalidate( jl_cgval_t gf_retbox = mark_julia_type(ctx, gf_ret, true, jl_any_type); if (cc != jl_returninfo_t::Boxed) { emit_typecheck(ctx, gf_retbox, rettype, "cfunction"); + gf_retbox = update_julia_type(ctx, gf_retbox, rettype); } switch (cc) { @@ -6920,14 +7025,15 @@ static void emit_cfunc_invalidate( break; } case jl_returninfo_t::SRet: { + Value *sret = &*gf_thunk->arg_begin(); + Align align(julia_alignment(rettype)); if (return_roots) { - Value *root1 = gf_thunk->arg_begin() + 1; // root1 has type [n x {}*]* - // store the whole object in the first slot - ctx.builder.CreateStore(gf_ret, root1); + Value *roots = gf_thunk->arg_begin() + 1; // root1 has type [n x {}*]* + split_value_into(ctx, gf_retbox, align, sret, align, jl_aliasinfo_t::fromTBAA(ctx, ctx.tbaa().tbaa_stack), roots, jl_aliasinfo_t::fromTBAA(ctx, ctx.tbaa().tbaa_gcframe)); + } + else { + emit_unbox_store(ctx, gf_retbox, sret, ctx.tbaa().tbaa_stack, align); } - Align alignment(julia_alignment(rettype)); - emit_memcpy(ctx, &*gf_thunk->arg_begin(), jl_aliasinfo_t::fromTBAA(ctx, nullptr), gf_ret, - jl_aliasinfo_t::fromTBAA(ctx, nullptr), jl_datatype_size(rettype), Align(alignment), Align(alignment)); ctx.builder.CreateRetVoid(); break; } @@ -7698,14 +7804,18 @@ static void gen_invoke_wrapper(jl_method_instance_t *lam, jl_value_t *jlretty, j Align(sizeof(void*))); retval = mark_julia_type(ctx, theArg, true, jl_any_type); } - ctx.builder.CreateRet(boxed(ctx, retval)); + if (retval.typ == jl_bottom_type) + CreateTrap(ctx.builder, false); + else + ctx.builder.CreateRet(boxed(ctx, retval)); } -static jl_returninfo_t get_specsig_function(jl_codectx_t &ctx, Module *M, Value *fval, StringRef name, jl_value_t *sig, jl_value_t *jlrettype, bool is_opaque_closure, bool gcstack_arg, BitVector *used_arguments, size_t *arg_offset) +static jl_returninfo_t get_specsig_function(jl_codectx_t &ctx, Module *M, Value *fval, StringRef name, jl_value_t *sig, jl_value_t *jlrettype, bool is_opaque_closure, bool gcstack_arg, + ArrayRef ArgNames, unsigned nreq) { jl_returninfo_t props = {}; - SmallVector fsig; - SmallVector argnames; + SmallVector fsig; + SmallVector argnames; Type *rt = NULL; Type *srt = NULL; if (jlrettype == (jl_value_t*)jl_bottom_type) { @@ -7742,8 +7852,10 @@ static jl_returninfo_t get_specsig_function(jl_codectx_t &ctx, Module *M, Value if (rt != getVoidTy(ctx.builder.getContext()) && deserves_sret(jlrettype, rt)) { auto tracked = CountTrackedPointers(rt, true); assert(!tracked.derived); - if (tracked.count && !tracked.all) + if (tracked.count && !tracked.all) { props.return_roots = tracked.count; + assert(props.return_roots == ((jl_datatype_t*)jlrettype)->layout->npointers); + } props.cc = jl_returninfo_t::SRet; props.union_bytes = jl_datatype_size(jlrettype); props.union_align = props.union_minalign = jl_datatype_align(jlrettype); @@ -7801,29 +7913,22 @@ static jl_returninfo_t get_specsig_function(jl_codectx_t &ctx, Module *M, Value argnames.push_back("pgcstack_arg"); } - if (arg_offset) - *arg_offset = fsig.size(); size_t nparams = jl_nparams(sig); - if (used_arguments) - used_arguments->resize(nparams); - for (size_t i = 0; i < nparams; i++) { jl_value_t *jt = jl_tparam(sig, i); bool isboxed = false; - Type *ty = NULL; - if (i == 0 && is_opaque_closure) { - ty = nullptr; // special token to avoid computing this unnecessarily - } - else { + Type *et = nullptr; + if (i != 0 || !is_opaque_closure) { // special token for OC argument if (is_uniquerep_Type(jt)) continue; isboxed = deserves_argbox(jt); - ty = isboxed ? ctx.types().T_prjlvalue : julia_type_to_llvm(ctx, jt); - if (type_is_ghost(ty)) + et = isboxed ? ctx.types().T_prjlvalue : julia_type_to_llvm(ctx, jt); + if (type_is_ghost(et)) continue; } AttrBuilder param(ctx.builder.getContext()); - if (ty == nullptr || ty->isAggregateType()) { // aggregate types are passed by pointer + Type *ty = et; + if (et == nullptr || et->isAggregateType()) { // aggregate types are passed by pointer param.addAttribute(Attribute::NoCapture); param.addAttribute(Attribute::ReadOnly); ty = ctx.builder.getPtrTy(AddressSpace::Derived); @@ -7838,8 +7943,26 @@ static jl_returninfo_t get_specsig_function(jl_codectx_t &ctx, Module *M, Value } attrs.push_back(AttributeSet::get(ctx.builder.getContext(), param)); fsig.push_back(ty); - if (used_arguments) - used_arguments->set(i); + size_t argno = i < nreq ? i : nreq; + std::string genname; + if (!ArgNames.empty()) { + genname = ArgNames[argno]; + if (genname.empty()) + genname = (StringRef("#") + Twine(argno + 1)).str(); + if (i >= nreq) + genname += (StringRef("[") + Twine(i - nreq + 1) + StringRef("]")).str(); + const char *arg_typename = jl_is_datatype(jt) ? jl_symbol_name(((jl_datatype_t*)jt)->name->name) : ""; + argnames.push_back((genname + StringRef("::") + arg_typename).str()); + } + if (et && et->isAggregateType()) { + auto tracked = CountTrackedPointers(et); + if (tracked.count && !tracked.all) { + attrs.push_back(AttributeSet::get(ctx.builder.getContext(), param)); + fsig.push_back(ctx.builder.getPtrTy(M->getDataLayout().getAllocaAddrSpace())); + if (!genname.empty()) + argnames.push_back((Twine(".roots.") + genname).str()); + } + } } AttributeSet FnAttrs; @@ -7887,12 +8010,6 @@ static jl_returninfo_t get_specsig_function(jl_codectx_t &ctx, Module *M, Value return props; } -static void emit_sret_roots(jl_codectx_t &ctx, bool isptr, Value *Src, Type *T, Value *Shadow, unsigned count) -{ - unsigned emitted = TrackWithShadow(Src, T, isptr, Shadow, ctx.builder); //This comes from Late-GC-Lowering?? - assert(emitted == count); (void)emitted; (void)count; -} - static DISubroutineType * get_specsig_di(jl_codectx_t &ctx, jl_debugcache_t &debuginfo, jl_value_t *rt, jl_value_t *sig, DIBuilder &dbuilder) { @@ -8105,49 +8222,26 @@ static jl_llvm_functions_t Function *f = NULL; bool has_sret = false; if (specsig) { // assumes !va and !needsparams - BitVector used_args; - size_t args_begin; - returninfo = get_specsig_function(ctx, M, NULL, declarations.specFunctionObject, lam->specTypes, - jlrettype, ctx.is_opaque_closure, JL_FEAT_TEST(ctx,gcstack_arg), &used_args, &args_begin); - f = cast(returninfo.decl.getCallee()); - has_sret = (returninfo.cc == jl_returninfo_t::SRet || returninfo.cc == jl_returninfo_t::Union); - jl_init_function(f, ctx.emission_context.TargetTriple); + SmallVector ArgNames(0); if (ctx.emission_context.debug_level >= 2) { - auto arg_typename = [&](size_t i) JL_NOTSAFEPOINT { - auto tp = jl_tparam(lam->specTypes, i); - return jl_is_datatype(tp) ? jl_symbol_name(((jl_datatype_t*)tp)->name->name) : ""; - }; - size_t nreal = 0; - for (size_t i = 0; i < std::min(nreq, static_cast(used_args.size())); i++) { + ArgNames.resize(ctx.nargs, ""); + for (int i = 0; i < ctx.nargs; i++) { jl_sym_t *argname = slot_symbol(ctx, i); if (argname == jl_unused_sym) continue; - if (used_args.test(i)) { - auto &arg = *f->getArg(args_begin++); - nreal++; - auto name = jl_symbol_name(argname); - if (!name[0]) { - arg.setName(StringRef("#") + Twine(nreal) + StringRef("::") + arg_typename(i)); - } else { - arg.setName(name + StringRef("::") + arg_typename(i)); - } - } - } - if (va && ctx.vaSlot != -1) { - size_t vidx = 0; - for (size_t i = nreq; i < used_args.size(); i++) { - if (used_args.test(i)) { - auto &arg = *f->getArg(args_begin++); - auto type = arg_typename(i); - const char *name = jl_symbol_name(slot_symbol(ctx, ctx.vaSlot)); - if (!name[0]) - name = "..."; - vidx++; - arg.setName(name + StringRef("[") + Twine(vidx) + StringRef("]::") + type); - } - } + const char *name = jl_symbol_name(argname); + if (name[0] == '\0' && ctx.vaSlot == i) + ArgNames[i] = "..."; + else + ArgNames[i] = name; } } + returninfo = get_specsig_function(ctx, M, NULL, declarations.specFunctionObject, lam->specTypes, + jlrettype, ctx.is_opaque_closure, JL_FEAT_TEST(ctx,gcstack_arg), + ArgNames, nreq); + f = cast(returninfo.decl.getCallee()); + has_sret = (returninfo.cc == jl_returninfo_t::SRet || returninfo.cc == jl_returninfo_t::Union); + jl_init_function(f, ctx.emission_context.TargetTriple); // common pattern: see if all return statements are an argument in that // case the apply-generic call can re-use the original box for the return @@ -8348,14 +8442,16 @@ static jl_llvm_functions_t allocate_gc_frame(ctx, b0); Value *last_age = NULL; auto world_age_field = get_tls_world_age_field(ctx); - jl_aliasinfo_t ai = jl_aliasinfo_t::fromTBAA(ctx, ctx.tbaa().tbaa_gcframe); - last_age = ai.decorateInst(ctx.builder.CreateAlignedLoad( - ctx.types().T_size, world_age_field, ctx.types().alignof_ptr)); - ctx.world_age_at_entry = last_age; // Load world age for use in get_tls_world_age + { // scope + jl_aliasinfo_t ai = jl_aliasinfo_t::fromTBAA(ctx, ctx.tbaa().tbaa_gcframe); + last_age = ai.decorateInst(ctx.builder.CreateAlignedLoad( + ctx.types().T_size, world_age_field, ctx.types().alignof_ptr)); + ctx.world_age_at_entry = last_age; // Load world age for use in get_tls_world_age + } // step 7. allocate local variables slots // must be in the first basic block for the llvm mem2reg pass to work - auto allocate_local = [&ctx, &dbuilder, &debugcache, topdebugloc, va, debug_enabled, M](jl_varinfo_t &varinfo, jl_sym_t *s, int i) { + auto allocate_local = [&ctx, &dbuilder, &debugcache, topdebugloc, va, debug_enabled](jl_varinfo_t &varinfo, jl_sym_t *s, int i) { jl_value_t *jt = varinfo.value.typ; assert(!varinfo.boxroot); // variables shouldn't have memory locs already if (varinfo.value.constant) { @@ -8375,13 +8471,13 @@ static jl_llvm_functions_t if (lv) { lv->setName(jl_symbol_name(s)); varinfo.value = mark_julia_slot(lv, jt, NULL, ctx.tbaa().tbaa_stack); - varinfo.pTIndex = emit_static_alloca(ctx, getInt8Ty(ctx.builder.getContext()), Align(1)); + varinfo.pTIndex = emit_static_alloca(ctx, 1, Align(1)); setName(ctx.emission_context, varinfo.pTIndex, "tindex"); // TODO: attach debug metadata to this variable } else if (allunbox) { // all ghost values just need a selector allocated - AllocaInst *lv = emit_static_alloca(ctx, getInt8Ty(ctx.builder.getContext()), Align(1)); + AllocaInst *lv = emit_static_alloca(ctx, 1, Align(1)); lv->setName(jl_symbol_name(s)); varinfo.pTIndex = lv; varinfo.value.tbaa = NULL; @@ -8394,30 +8490,25 @@ static jl_llvm_functions_t return; } else if (deserves_stack(jt)) { - bool isboxed; - Type *vtype = julia_type_to_llvm(ctx, jt, &isboxed); - assert(!isboxed); - assert(!type_is_ghost(vtype) && "constants should already be handled"); - Value *lv = new AllocaInst(vtype, M->getDataLayout().getAllocaAddrSpace(), nullptr, Align(jl_datatype_align(jt)), jl_symbol_name(s), /*InsertBefore*/ctx.topalloca); - if (CountTrackedPointers(vtype).count) { - StoreInst *SI = new StoreInst(Constant::getNullValue(vtype), lv, false, Align(sizeof(void*))); - SI->insertAfter(ctx.topalloca); - } - varinfo.value = mark_julia_slot(lv, jt, NULL, ctx.tbaa().tbaa_stack); + auto sizes = split_value_size((jl_datatype_t*)jt); + AllocaInst *bits = sizes.first > 0 ? emit_static_alloca(ctx, sizes.first, Align(julia_alignment(jt))) : nullptr; + AllocaInst *roots = sizes.second > 0 ? emit_static_roots(ctx, sizes.second) : nullptr; + if (bits) bits->setName(jl_symbol_name(s)); + if (roots) roots->setName(StringRef(".roots.") + jl_symbol_name(s)); + varinfo.value = mark_julia_slot(bits, jt, NULL, ctx.tbaa().tbaa_stack, None); + varinfo.inline_roots = roots; alloc_def_flag(ctx, varinfo); if (debug_enabled && varinfo.dinfo) { assert((Metadata*)varinfo.dinfo->getType() != debugcache.jl_pvalue_dillvmt); - dbuilder.insertDeclare(lv, varinfo.dinfo, dbuilder.createExpression(), + dbuilder.insertDeclare(bits ? bits : roots, varinfo.dinfo, dbuilder.createExpression(), topdebugloc, ctx.builder.GetInsertBlock()); } return; } // otherwise give it a boxroot in this function - AllocaInst *av = new AllocaInst(ctx.types().T_prjlvalue, M->getDataLayout().getAllocaAddrSpace(), - nullptr, Align(sizeof(jl_value_t*)), jl_symbol_name(s), /*InsertBefore*/ctx.topalloca); - StoreInst *SI = new StoreInst(Constant::getNullValue(ctx.types().T_prjlvalue), av, false, Align(sizeof(void*))); - SI->insertAfter(ctx.topalloca); + AllocaInst *av = emit_static_roots(ctx, 1); + av->setName(jl_symbol_name(s)); varinfo.boxroot = av; if (debug_enabled && varinfo.dinfo) { SmallVector addr; @@ -8504,12 +8595,18 @@ static jl_llvm_functions_t ++AI; AttrBuilder param(ctx.builder.getContext(), f->getAttributes().getParamAttrs(Arg->getArgNo())); jl_cgval_t theArg; - if (llvmArgType->isAggregateType()) { + if (!isboxed && llvmArgType->isAggregateType()) { maybe_mark_argument_dereferenceable(param, argType); - theArg = mark_julia_slot(Arg, argType, NULL, ctx.tbaa().tbaa_const); // this argument is by-pointer + SmallVector roots; + auto tracked = CountTrackedPointers(llvmArgType); + if (tracked.count && !tracked.all) { + roots = load_gc_roots(ctx, &*AI, tracked.count); + ++AI; + } + theArg = mark_julia_slot(Arg, argType, NULL, ctx.tbaa().tbaa_const, roots); // this argument is by-pointer } else { - if (isboxed) // e.g. is-pointer + if (isboxed) maybe_mark_argument_dereferenceable(param, argType); theArg = mark_julia_type(ctx, Arg, isboxed, argType); if (theArg.tbaa == ctx.tbaa().tbaa_immut) @@ -8566,95 +8663,92 @@ static jl_llvm_functions_t bool isboxed = deserves_argbox(argType); Type *llvmArgType = NULL; if (i == 0 && ctx.is_opaque_closure) { - isboxed = true; - llvmArgType = PointerType::get(ctx.types().T_jlvalue, AddressSpace::Derived); + isboxed = false; + llvmArgType = ctx.builder.getPtrTy(AddressSpace::Derived); argType = (jl_value_t*)jl_any_type; } else { llvmArgType = isboxed ? ctx.types().T_prjlvalue : julia_type_to_llvm(ctx, argType); } - if (s == jl_unused_sym) { - if (specsig && !type_is_ghost(llvmArgType) && !is_uniquerep_Type(argType)) - ++AI; - continue; - } jl_varinfo_t &vi = ctx.slots[i]; - jl_cgval_t theArg; if (s == jl_unused_sym || vi.value.constant) { assert(vi.boxroot == NULL); - if (specsig && !type_is_ghost(llvmArgType) && !is_uniquerep_Type(argType)) + if (specsig && !type_is_ghost(llvmArgType) && !is_uniquerep_Type(argType)) { ++AI; + auto tracked = CountTrackedPointers(llvmArgType); + if (tracked.count && !tracked.all) + ++AI; + } + continue; } - else { - // If this is an opaque closure, implicitly load the env and switch - // the world age. - if (i == 0 && ctx.is_opaque_closure) { - // Load closure world - Value *oc_this = decay_derived(ctx, &*AI++); - Value *argaddr = oc_this; - Value *worldaddr = emit_ptrgep(ctx, argaddr, offsetof(jl_opaque_closure_t, world)); + jl_cgval_t theArg; + // If this is an opaque closure, implicitly load the env and switch + // the world age. + if (i == 0 && ctx.is_opaque_closure) { + // Load closure world + Value *oc_this = decay_derived(ctx, &*AI++); + Value *argaddr = oc_this; + Value *worldaddr = emit_ptrgep(ctx, argaddr, offsetof(jl_opaque_closure_t, world)); - jl_cgval_t closure_world = typed_load(ctx, worldaddr, NULL, (jl_value_t*)jl_long_type, - nullptr, nullptr, false, AtomicOrdering::NotAtomic, false, ctx.types().alignof_ptr.value()); - ctx.world_age_at_entry = closure_world.V; // The tls world in a OC is the world of the closure - emit_unbox_store(ctx, closure_world, world_age_field, ctx.tbaa().tbaa_gcframe, ctx.types().alignof_ptr); + jl_cgval_t closure_world = typed_load(ctx, worldaddr, NULL, (jl_value_t*)jl_long_type, + nullptr, nullptr, false, AtomicOrdering::NotAtomic, false, ctx.types().alignof_ptr.value()); + ctx.world_age_at_entry = closure_world.V; // The tls world in a OC is the world of the closure + emit_unbox_store(ctx, closure_world, world_age_field, ctx.tbaa().tbaa_gcframe, ctx.types().alignof_ptr); - // Load closure env - Value *envaddr = emit_ptrgep(ctx, argaddr, offsetof(jl_opaque_closure_t, captures)); + // Load closure env + Value *envaddr = emit_ptrgep(ctx, argaddr, offsetof(jl_opaque_closure_t, captures)); - jl_cgval_t closure_env = typed_load(ctx, envaddr, NULL, (jl_value_t*)jl_any_type, - nullptr, nullptr, true, AtomicOrdering::NotAtomic, false, sizeof(void*)); - theArg = update_julia_type(ctx, closure_env, vi.value.typ); - } - else if (specsig) { - theArg = get_specsig_arg(argType, llvmArgType, isboxed); + jl_cgval_t closure_env = typed_load(ctx, envaddr, NULL, (jl_value_t*)jl_any_type, + nullptr, nullptr, true, AtomicOrdering::NotAtomic, false, sizeof(void*)); + theArg = update_julia_type(ctx, closure_env, vi.value.typ); + } + else if (specsig) { + theArg = get_specsig_arg(argType, llvmArgType, isboxed); + } + else { + if (i == 0) { + // first (function) arg is separate in jlcall + theArg = mark_julia_type(ctx, fArg, true, vi.value.typ); } else { - if (i == 0) { - // first (function) arg is separate in jlcall - theArg = mark_julia_type(ctx, fArg, true, vi.value.typ); - } - else { - Value *argPtr = emit_ptrgep(ctx, argArray, (i - 1) * ctx.types().sizeof_ptr); - jl_aliasinfo_t ai = jl_aliasinfo_t::fromTBAA(ctx, ctx.tbaa().tbaa_const); - Value *load = ai.decorateInst(maybe_mark_load_dereferenceable( - ctx.builder.CreateAlignedLoad(ctx.types().T_prjlvalue, argPtr, Align(sizeof(void*))), - false, vi.value.typ)); - theArg = mark_julia_type(ctx, load, true, vi.value.typ); - if (debug_enabled && vi.dinfo && !vi.boxroot) { - SmallVector addr; + Value *argPtr = emit_ptrgep(ctx, argArray, (i - 1) * ctx.types().sizeof_ptr); + jl_aliasinfo_t ai = jl_aliasinfo_t::fromTBAA(ctx, ctx.tbaa().tbaa_const); + Value *load = ai.decorateInst(maybe_mark_load_dereferenceable( + ctx.builder.CreateAlignedLoad(ctx.types().T_prjlvalue, argPtr, Align(sizeof(void*))), + false, vi.value.typ)); + theArg = mark_julia_type(ctx, load, true, vi.value.typ); + if (debug_enabled && vi.dinfo && !vi.boxroot) { + SmallVector addr; + addr.push_back(llvm::dwarf::DW_OP_deref); + addr.push_back(llvm::dwarf::DW_OP_plus_uconst); + addr.push_back((i - 1) * sizeof(void*)); + if ((Metadata*)vi.dinfo->getType() != debugcache.jl_pvalue_dillvmt) addr.push_back(llvm::dwarf::DW_OP_deref); - addr.push_back(llvm::dwarf::DW_OP_plus_uconst); - addr.push_back((i - 1) * sizeof(void*)); - if ((Metadata*)vi.dinfo->getType() != debugcache.jl_pvalue_dillvmt) - addr.push_back(llvm::dwarf::DW_OP_deref); - dbuilder.insertDeclare(pargArray, vi.dinfo, dbuilder.createExpression(addr), - topdebugloc, - ctx.builder.GetInsertBlock()); - } + dbuilder.insertDeclare(pargArray, vi.dinfo, dbuilder.createExpression(addr), + topdebugloc, + ctx.builder.GetInsertBlock()); } } + } - - if (vi.boxroot == NULL) { - assert(vi.value.V == NULL && "unexpected variable slot created for argument"); - // keep track of original (possibly boxed) value to avoid re-boxing or moving - vi.value = theArg; - if (debug_enabled && vi.dinfo && theArg.V) { - if (theArg.ispointer()) { - dbuilder.insertDeclare(theArg.V, vi.dinfo, dbuilder.createExpression(), - topdebugloc, ctx.builder.GetInsertBlock()); - } - else { - dbuilder.insertDbgValueIntrinsic(theArg.V, vi.dinfo, dbuilder.createExpression(), - topdebugloc, ctx.builder.GetInsertBlock()); - } + if (vi.boxroot == nullptr) { + assert(vi.value.V == nullptr && vi.inline_roots == nullptr && "unexpected variable slot created for argument"); + // keep track of original (possibly boxed) value to avoid re-boxing or moving + vi.value = theArg; + if (debug_enabled && vi.dinfo && theArg.V) { + if (!theArg.inline_roots.empty() || theArg.ispointer()) { + dbuilder.insertDeclare(theArg.V, vi.dinfo, dbuilder.createExpression(), + topdebugloc, ctx.builder.GetInsertBlock()); + } + else { + dbuilder.insertDbgValueIntrinsic(theArg.V, vi.dinfo, dbuilder.createExpression(), + topdebugloc, ctx.builder.GetInsertBlock()); } } - else { - Value *argp = boxed(ctx, theArg); - ctx.builder.CreateStore(argp, vi.boxroot); - } + } + else { + Value *argp = boxed(ctx, theArg); + ctx.builder.CreateStore(argp, vi.boxroot); } } // step 9. allocate rest argument @@ -9129,29 +9223,31 @@ static jl_llvm_functions_t break; } if (sret) { - if (retvalinfo.ispointer()) { - if (returninfo.return_roots) { - Type *store_ty = julia_type_to_llvm(ctx, retvalinfo.typ); - emit_sret_roots(ctx, true, data_pointer(ctx, retvalinfo), store_ty, f->arg_begin() + 1, returninfo.return_roots); - } + Align align(returninfo.union_align); + if (!returninfo.return_roots && !retvalinfo.inline_roots.empty()) { + assert(retvalinfo.V == nullptr); + assert(returninfo.cc == jl_returninfo_t::SRet); + split_value_into(ctx, retvalinfo, align, nullptr, align, + jl_aliasinfo_t::fromTBAA(ctx, ctx.tbaa().tbaa_stack), sret, jl_aliasinfo_t::fromTBAA(ctx, ctx.tbaa().tbaa_gcframe)); + } + else if (returninfo.return_roots) { + assert(returninfo.cc == jl_returninfo_t::SRet); + Value *return_roots = f->arg_begin() + 1; + split_value_into(ctx, retvalinfo, align, sret, align, + jl_aliasinfo_t::fromTBAA(ctx, ctx.tbaa().tbaa_stack), return_roots, jl_aliasinfo_t::fromTBAA(ctx, ctx.tbaa().tbaa_gcframe)); + } + else if (retvalinfo.ispointer()) { if (returninfo.cc == jl_returninfo_t::SRet) { assert(jl_is_concrete_type(jlrettype)); - Align alignment(julia_alignment(jlrettype)); emit_memcpy(ctx, sret, jl_aliasinfo_t::fromTBAA(ctx, nullptr), retvalinfo, - jl_datatype_size(jlrettype), alignment, alignment); + jl_datatype_size(jlrettype), align, align); } else { // must be jl_returninfo_t::Union emit_unionmove(ctx, sret, nullptr, retvalinfo, /*skip*/isboxed_union); } } else { - Type *store_ty = retvalinfo.V->getType(); - Value *Val = retvalinfo.V; - if (returninfo.return_roots) { - assert(julia_type_to_llvm(ctx, retvalinfo.typ) == store_ty); - emit_sret_roots(ctx, false, Val, store_ty, f->arg_begin() + 1, returninfo.return_roots); - } - ctx.builder.CreateAlignedStore(Val, sret, Align(julia_alignment(retvalinfo.typ))); + ctx.builder.CreateAlignedStore(retvalinfo.V, sret, align); assert(retvalinfo.TIndex == NULL && "unreachable"); // unimplemented representation } } @@ -9288,8 +9384,9 @@ static jl_llvm_functions_t PHINode *VN; jl_value_t *r; AllocaInst *dest; + SmallVector roots; BasicBlock *PhiBB; - std::tie(phi_result, PhiBB, dest, VN, r) = tup; + std::tie(phi_result, PhiBB, dest, VN, roots, r) = tup; jl_value_t *phiType = phi_result.typ; jl_array_t *edges = (jl_array_t*)jl_fieldref_noalloc(r, 0); jl_array_t *values = (jl_array_t*)jl_fieldref_noalloc(r, 1); @@ -9347,6 +9444,7 @@ static jl_llvm_functions_t val = mark_julia_const(ctx, val.constant); // be over-conservative at making sure `.typ` is set concretely, not tindex if (!jl_is_uniontype(phiType) || !TindexN) { if (VN) { + assert(roots.empty() && !dest); Value *V; if (val.typ == (jl_value_t*)jl_bottom_type) { V = undef_value_for_type(VN->getType()); @@ -9367,14 +9465,34 @@ static jl_llvm_functions_t VN->addIncoming(V, ctx.builder.GetInsertBlock()); assert(!TindexN); } - else if (dest && val.typ != (jl_value_t*)jl_bottom_type) { + else if ((dest || !roots.empty()) && val.typ != (jl_value_t*)jl_bottom_type) { // must be careful to emit undef here (rather than a bitcast or // load of val) if the runtime type of val isn't phiType + auto tracked = split_value_size((jl_datatype_t*)phiType).second; Value *isvalid = emit_isa_and_defined(ctx, val, phiType); - emit_guarded_test(ctx, isvalid, nullptr, [&] { - emit_unbox_store(ctx, update_julia_type(ctx, val, phiType), dest, ctx.tbaa().tbaa_stack, Align(julia_alignment(phiType))); - return nullptr; + assert(roots.size() == tracked && isvalid != nullptr); + SmallVector incomingroots(0); + if (tracked) + incomingroots.resize(tracked, Constant::getNullValue(ctx.types().T_prjlvalue)); + emit_guarded_test(ctx, isvalid, incomingroots, [&] { + jl_cgval_t typedval = update_julia_type(ctx, val, phiType); + SmallVector mayberoots(tracked, Constant::getNullValue(ctx.types().T_prjlvalue)); + if (typedval.typ != jl_bottom_type) { + Align align(julia_alignment(phiType)); + if (tracked) + split_value_into(ctx, typedval, align, dest, align, jl_aliasinfo_t::fromTBAA(ctx, ctx.tbaa().tbaa_stack), mayberoots); + else + emit_unbox_store(ctx, typedval, dest, ctx.tbaa().tbaa_stack, align); + } + return mayberoots; }); + for (size_t nr = 0; nr < tracked; nr++) + roots[nr]->addIncoming(incomingroots[nr], ctx.builder.GetInsertBlock()); + } + else if (!roots.empty()) { + Value *V = Constant::getNullValue(ctx.types().T_prjlvalue); + for (size_t nr = 0; nr < roots.size(); nr++) + roots[nr]->addIncoming(V, ctx.builder.GetInsertBlock()); } } else { @@ -9383,6 +9501,7 @@ static jl_llvm_functions_t // `V` is always initialized when it is used. // Ref https://gcc.gnu.org/bugzilla/show_bug.cgi?id=96629 Value *V = nullptr; + assert(roots.empty()); if (val.typ == (jl_value_t*)jl_bottom_type) { if (VN) V = undef_value_for_type(VN->getType()); @@ -9473,11 +9592,10 @@ static jl_llvm_functions_t } if (TindexN) TindexN->addIncoming(RTindex, FromBB); - if (dest) { + if (dest) ctx.builder.CreateLifetimeStart(dest); - if (CountTrackedPointers(dest->getAllocatedType()).count) - ctx.builder.CreateStore(Constant::getNullValue(dest->getAllocatedType()), dest); - } + for (size_t nr = 0; nr < roots.size(); nr++) + roots[nr]->addIncoming(Constant::getNullValue(ctx.types().T_prjlvalue), FromBB); ctx.builder.ClearInsertionPoint(); } } @@ -9524,15 +9642,19 @@ static jl_llvm_functions_t if (ctx.vaSlot > 0) { // remove VA allocation if we never referenced it + assert(ctx.slots[ctx.vaSlot].isSA && ctx.slots[ctx.vaSlot].isArgument); Instruction *root = cast_or_null(ctx.slots[ctx.vaSlot].boxroot); if (root) { - Instruction *store_value = NULL; bool have_real_use = false; for (Use &U : root->uses()) { User *RU = U.getUser(); if (StoreInst *SRU = dyn_cast(RU)) { - if (!store_value) - store_value = dyn_cast(SRU->getValueOperand()); + assert(isa(SRU->getValueOperand()) || SRU->getValueOperand() == restTuple); + (void)SRU; + } + else if (MemSetInst *MSI = dyn_cast(RU)) { + assert(MSI->getValue() == ctx.builder.getInt8(0)); + (void)MSI; } else if (isa(RU)) { } @@ -9554,7 +9676,6 @@ static jl_llvm_functions_t if (use) use->eraseFromParent(); root->eraseFromParent(); - assert(!store_value || store_value == restTuple); restTuple->eraseFromParent(); } } diff --git a/src/intrinsics.cpp b/src/intrinsics.cpp index c747edfeffe5f..09916297e16ff 100644 --- a/src/intrinsics.cpp +++ b/src/intrinsics.cpp @@ -441,14 +441,14 @@ static Value *emit_unbox(jl_codectx_t &ctx, Type *to, const jl_cgval_t &x, jl_va // up being dead code, and type inference knows that the other // branch's type is the only one that matters. if (type_is_ghost(to)) { - return NULL; + return nullptr; } CreateTrap(ctx.builder); return UndefValue::get(to); // type mismatch error } - Constant *c = x.constant ? julia_const_to_llvm(ctx, x.constant) : NULL; - if (!x.ispointer() || c) { // already unboxed, but sometimes need conversion + Constant *c = x.constant ? julia_const_to_llvm(ctx, x.constant) : nullptr; + if ((x.inline_roots.empty() && !x.ispointer()) || c != nullptr) { // already unboxed, but sometimes need conversion Value *unboxed = c ? c : x.V; return emit_unboxed_coercion(ctx, to, unboxed); } @@ -473,28 +473,17 @@ static Value *emit_unbox(jl_codectx_t &ctx, Type *to, const jl_cgval_t &x, jl_va } unsigned alignment = julia_alignment(jt); - Type *ptype = to->getPointerTo(); - if (p->getType() != ptype && isa(p)) { - // LLVM's mem2reg can't handle coercion if the load/store type does - // not match the type of the alloca. As such, it is better to - // perform the load using the alloca's type and then perform the - // appropriate coercion manually. - AllocaInst *AI = cast(p); - Type *AllocType = AI->getAllocatedType(); - const DataLayout &DL = jl_Module->getDataLayout(); - if (!AI->isArrayAllocation() && - (AllocType->isFloatingPointTy() || AllocType->isIntegerTy() || AllocType->isPointerTy()) && - (to->isFloatingPointTy() || to->isIntegerTy() || to->isPointerTy()) && - DL.getTypeSizeInBits(AllocType) == DL.getTypeSizeInBits(to)) { - Instruction *load = ctx.builder.CreateAlignedLoad(AllocType, p, Align(alignment)); - setName(ctx.emission_context, load, p->getName() + ".unbox"); - jl_aliasinfo_t ai = jl_aliasinfo_t::fromTBAA(ctx, x.tbaa); - return emit_unboxed_coercion(ctx, to, ai.decorateInst(load)); - } + jl_aliasinfo_t ai = jl_aliasinfo_t::fromTBAA(ctx, x.tbaa); + if (!x.inline_roots.empty()) { + assert(x.typ == jt); + AllocaInst *combined = emit_static_alloca(ctx, to, Align(alignment)); + auto combined_ai = jl_aliasinfo_t::fromTBAA(ctx, ctx.tbaa().tbaa_stack); + recombine_value(ctx, x, combined, combined_ai, Align(alignment), false); + p = combined; + ai = combined_ai; } Instruction *load = ctx.builder.CreateAlignedLoad(to, p, Align(alignment)); setName(ctx.emission_context, load, p->getName() + ".unbox"); - jl_aliasinfo_t ai = jl_aliasinfo_t::fromTBAA(ctx, x.tbaa); return ai.decorateInst(load); } @@ -508,18 +497,25 @@ static void emit_unbox_store(jl_codectx_t &ctx, const jl_cgval_t &x, Value *dest return; } + auto dest_ai = jl_aliasinfo_t::fromTBAA(ctx, tbaa_dest); + + if (!x.inline_roots.empty()) { + recombine_value(ctx, x, dest, dest_ai, alignment, isVolatile); + return; + } + if (!x.ispointer()) { // already unboxed, but sometimes need conversion (e.g. f32 -> i32) assert(x.V); Value *unboxed = zext_struct(ctx, x.V); StoreInst *store = ctx.builder.CreateAlignedStore(unboxed, dest, alignment); store->setVolatile(isVolatile); - jl_aliasinfo_t ai = jl_aliasinfo_t::fromTBAA(ctx, tbaa_dest); - ai.decorateInst(store); + dest_ai.decorateInst(store); return; } Value *src = data_pointer(ctx, x); - emit_memcpy(ctx, dest, jl_aliasinfo_t::fromTBAA(ctx, tbaa_dest), src, jl_aliasinfo_t::fromTBAA(ctx, x.tbaa), jl_datatype_size(x.typ), Align(alignment), Align(julia_alignment(x.typ)), isVolatile); + auto src_ai = jl_aliasinfo_t::fromTBAA(ctx, x.tbaa); + emit_memcpy(ctx, dest, dest_ai, src, src_ai, jl_datatype_size(x.typ), Align(alignment), Align(julia_alignment(x.typ)), isVolatile); } static jl_datatype_t *staticeval_bitstype(const jl_cgval_t &targ) @@ -832,10 +828,9 @@ static jl_cgval_t emit_pointerset(jl_codectx_t &ctx, ArrayRef argv) Value *im1 = ctx.builder.CreateSub(idx, ConstantInt::get(ctx.types().T_size, 1)); setName(ctx.emission_context, im1, "pointerset_idx"); - Value *thePtr; + Value *thePtr = emit_unbox(ctx, getPointerTy(ctx.builder.getContext()), e, e.typ); if (ety == (jl_value_t*)jl_any_type) { // unsafe_store to Ptr{Any} is allowed to implicitly drop GC roots. - thePtr = emit_unbox(ctx, ctx.types().T_size->getPointerTo(), e, e.typ); auto gep = ctx.builder.CreateInBoundsGEP(ctx.types().T_size, thePtr, im1); setName(ctx.emission_context, gep, "pointerset_ptr"); auto val = ctx.builder.CreatePtrToInt(emit_pointer_from_objref(ctx, boxed(ctx, x)), ctx.types().T_size); @@ -844,8 +839,10 @@ static jl_cgval_t emit_pointerset(jl_codectx_t &ctx, ArrayRef argv) jl_aliasinfo_t ai = jl_aliasinfo_t::fromTBAA(ctx, ctx.tbaa().tbaa_data); ai.decorateInst(store); } + else if (!x.inline_roots.empty()) { + recombine_value(ctx, e, thePtr, jl_aliasinfo_t(), Align(align_nb), false); + } else if (x.ispointer()) { - thePtr = emit_unbox(ctx, getPointerTy(ctx.builder.getContext()), e, e.typ); uint64_t size = jl_datatype_size(ety); im1 = ctx.builder.CreateMul(im1, ConstantInt::get(ctx.types().T_size, LLT_ALIGN(size, jl_datatype_align(ety)))); @@ -859,7 +856,6 @@ static jl_cgval_t emit_pointerset(jl_codectx_t &ctx, ArrayRef argv) Type *ptrty = julia_type_to_llvm(ctx, ety, &isboxed); assert(!isboxed); if (!type_is_ghost(ptrty)) { - thePtr = emit_unbox(ctx, ptrty->getPointerTo(), e, e.typ); thePtr = ctx.builder.CreateInBoundsGEP(ptrty, thePtr, im1); typed_store(ctx, thePtr, x, jl_cgval_t(), ety, ctx.tbaa().tbaa_data, nullptr, nullptr, isboxed, AtomicOrdering::NotAtomic, AtomicOrdering::NotAtomic, align_nb, nullptr, true, false, false, false, false, false, nullptr, "atomic_pointerset", nullptr, nullptr); diff --git a/src/llvm-codegen-shared.h b/src/llvm-codegen-shared.h index 956c04dbc7ded..a99e18f3e3762 100644 --- a/src/llvm-codegen-shared.h +++ b/src/llvm-codegen-shared.h @@ -125,7 +125,6 @@ struct CountTrackedPointers { CountTrackedPointers(llvm::Type *T, bool ignore_loaded=false); }; -unsigned TrackWithShadow(llvm::Value *Src, llvm::Type *T, bool isptr, llvm::Value *Dst, llvm::IRBuilder<> &irbuilder); llvm::SmallVector ExtractTrackedValues(llvm::Value *Src, llvm::Type *STy, bool isptr, llvm::IRBuilder<> &irbuilder, llvm::ArrayRef perm_offsets={}); static inline void llvm_dump(llvm::Value *v) diff --git a/src/llvm-final-gc-lowering.cpp b/src/llvm-final-gc-lowering.cpp index 0605098bec361..76dcd944890ab 100644 --- a/src/llvm-final-gc-lowering.cpp +++ b/src/llvm-final-gc-lowering.cpp @@ -202,9 +202,9 @@ bool FinalLowerGC::runOnFunction(Function &F) } while (0) LOWER_INTRINSIC(newGCFrame, lowerNewGCFrame); + LOWER_INTRINSIC(getGCFrameSlot, lowerGetGCFrameSlot); LOWER_INTRINSIC(pushGCFrame, lowerPushGCFrame); LOWER_INTRINSIC(popGCFrame, lowerPopGCFrame); - LOWER_INTRINSIC(getGCFrameSlot, lowerGetGCFrameSlot); LOWER_INTRINSIC(GCAllocBytes, lowerGCAllocBytes); LOWER_INTRINSIC(queueGCRoot, lowerQueueGCRoot); LOWER_INTRINSIC(safepoint, lowerSafepoint); diff --git a/src/llvm-gc-interface-passes.h b/src/llvm-gc-interface-passes.h index cb485751d407b..d33567e887118 100644 --- a/src/llvm-gc-interface-passes.h +++ b/src/llvm-gc-interface-passes.h @@ -312,7 +312,6 @@ struct State { SmallVector> CalleeRoots; // We don't bother doing liveness on Allocas that were not mem2reg'ed. // they just get directly sunk into the root array. - SmallVector Allocas; DenseMap ArrayAllocas; DenseMap ShadowAllocas; SmallVector, 0> TrackedStores; @@ -332,9 +331,9 @@ struct LateLowerGCFrame: private JuliaPassContext { void MaybeNoteDef(State &S, BBState &BBS, Value *Def, const ArrayRef &SafepointsSoFar, SmallVector &&RefinedPtr = SmallVector()); - void NoteUse(State &S, BBState &BBS, Value *V, LargeSparseBitVector &Uses); - void NoteUse(State &S, BBState &BBS, Value *V) { - NoteUse(S, BBS, V, BBS.UpExposedUses); + void NoteUse(State &S, BBState &BBS, Value *V, LargeSparseBitVector &Uses, Function &F); + void NoteUse(State &S, BBState &BBS, Value *V, Function &F) { + NoteUse(S, BBS, V, BBS.UpExposedUses, F); } void LiftPhi(State &S, PHINode *Phi); @@ -348,7 +347,7 @@ struct LateLowerGCFrame: private JuliaPassContext { SmallVector NumberAll(State &S, Value *V); SmallVector NumberAllBase(State &S, Value *Base); - void NoteOperandUses(State &S, BBState &BBS, User &UI); + void NoteOperandUses(State &S, BBState &BBS, Instruction &UI); void MaybeTrackDst(State &S, MemTransferInst *MI); void MaybeTrackStore(State &S, StoreInst *I); State LocalScan(Function &F); diff --git a/src/llvm-late-gc-lowering.cpp b/src/llvm-late-gc-lowering.cpp index 8d1d5ff73b261..1d390a5115207 100644 --- a/src/llvm-late-gc-lowering.cpp +++ b/src/llvm-late-gc-lowering.cpp @@ -695,8 +695,15 @@ static int NoteSafepoint(State &S, BBState &BBS, CallInst *CI, SmallVectorImpl(V->getType())) { +// if (isSpecialPtr(V->getType())) +// if (isa(V) && !isa(V)) +// F.dump(); +// } +//#endif if (isa(V)) return; if (isa(V->getType())) { @@ -718,9 +725,9 @@ void LateLowerGCFrame::NoteUse(State &S, BBState &BBS, Value *V, LargeSparseBitV } } -void LateLowerGCFrame::NoteOperandUses(State &S, BBState &BBS, User &UI) { +void LateLowerGCFrame::NoteOperandUses(State &S, BBState &BBS, Instruction &UI) { for (Use &U : UI.operands()) { - NoteUse(S, BBS, U); + NoteUse(S, BBS, U, *UI.getFunction()); } } @@ -1377,7 +1384,7 @@ State LateLowerGCFrame::LocalScan(Function &F) { unsigned nIncoming = Phi->getNumIncomingValues(); for (unsigned i = 0; i < nIncoming; ++i) { BBState &IncomingBBS = S.BBStates[Phi->getIncomingBlock(i)]; - NoteUse(S, IncomingBBS, Phi->getIncomingValue(i), IncomingBBS.PhiOuts); + NoteUse(S, IncomingBBS, Phi->getIncomingValue(i), IncomingBBS.PhiOuts, F); } } else if (tracked.count) { // We need to insert extra phis for the GC roots @@ -1403,7 +1410,7 @@ State LateLowerGCFrame::LocalScan(Function &F) { } else if (auto *AI = dyn_cast(&I)) { Type *ElT = AI->getAllocatedType(); if (AI->isStaticAlloca() && isa(ElT) && ElT->getPointerAddressSpace() == AddressSpace::Tracked) { - S.Allocas.push_back(AI); + S.ArrayAllocas[AI] = cast(AI->getArraySize())->getZExtValue(); } } } @@ -1494,18 +1501,17 @@ SmallVector ExtractTrackedValues(Value *Src, Type *STy, bool isptr, I return Ptrs; } -unsigned TrackWithShadow(Value *Src, Type *STy, bool isptr, Value *Dst, IRBuilder<> &irbuilder) { - auto Ptrs = ExtractTrackedValues(Src, STy, isptr, irbuilder); - for (unsigned i = 0; i < Ptrs.size(); ++i) { - Value *Elem = Ptrs[i]; - Value *Slot = irbuilder.CreateConstInBoundsGEP1_32(irbuilder.getInt8Ty(), Dst, i * sizeof(void*)); - StoreInst *shadowStore = irbuilder.CreateAlignedStore(Elem, Slot, Align(sizeof(void*))); - shadowStore->setOrdering(AtomicOrdering::NotAtomic); - // TODO: shadowStore->setMetadata(LLVMContext::MD_tbaa, tbaa_gcframe); - } - return Ptrs.size(); -} - +//static unsigned TrackWithShadow(Value *Src, Type *STy, bool isptr, Value *Dst, IRBuilder<> &irbuilder) { +// auto Ptrs = ExtractTrackedValues(Src, STy, isptr, irbuilder); +// for (unsigned i = 0; i < Ptrs.size(); ++i) { +// Value *Elem = Ptrs[i]; +// Value *Slot = irbuilder.CreateConstInBoundsGEP1_32(irbuilder.getInt8Ty(), Dst, i * sizeof(void*)); +// StoreInst *shadowStore = irbuilder.CreateAlignedStore(Elem, Slot, Align(sizeof(void*))); +// shadowStore->setOrdering(AtomicOrdering::NotAtomic); +// // TODO: shadowStore->setMetadata(LLVMContext::MD_tbaa, tbaa_gcframe); +// } +// return Ptrs.size(); +//} // turn a memcpy into a set of loads void LateLowerGCFrame::MaybeTrackDst(State &S, MemTransferInst *MI) { @@ -2321,7 +2327,7 @@ void LateLowerGCFrame::PlaceRootsAndUpdateCalls(SmallVectorImpl &Colors, St MaxColor = C; // Insert instructions for the actual gc frame - if (MaxColor != -1 || !S.Allocas.empty() || !S.ArrayAllocas.empty() || !S.TrackedStores.empty()) { + if (MaxColor != -1 || !S.ArrayAllocas.empty() || !S.TrackedStores.empty()) { // Create and push a GC frame. auto gcframe = CallInst::Create( getOrDeclare(jl_intrinsics::newGCFrame), @@ -2334,6 +2340,43 @@ void LateLowerGCFrame::PlaceRootsAndUpdateCalls(SmallVectorImpl &Colors, St {gcframe, ConstantInt::get(T_int32, 0)}); pushGcframe->insertAfter(pgcstack); + // we don't run memsetopt after this, so run a basic approximation of it + // that removes any redundant memset calls in the prologue since getGCFrameSlot already includes the null store + Instruction *toerase = nullptr; + for (auto &I : F->getEntryBlock()) { + if (toerase) + toerase->eraseFromParent(); + toerase = nullptr; + Value *ptr; + Value *value; + bool isvolatile; + if (auto *SI = dyn_cast(&I)) { + ptr = SI->getPointerOperand(); + value = SI->getValueOperand(); + isvolatile = SI->isVolatile(); + } + else if (auto *MSI = dyn_cast(&I)) { + ptr = MSI->getDest(); + value = MSI->getValue(); + isvolatile = MSI->isVolatile(); + } + else { + continue; + } + ptr = ptr->stripInBoundsOffsets(); + AllocaInst *AI = dyn_cast(ptr); + if (isa(ptr)) + break; + if (!S.ArrayAllocas.count(AI)) + continue; + if (isvolatile || !isa(value) || !cast(value)->isNullValue()) + break; // stop once we reach a pointer operation that couldn't be analyzed or isn't a null store + toerase = &I; + } + if (toerase) + toerase->eraseFromParent(); + toerase = nullptr; + // Replace Allocas unsigned AllocaSlot = 2; // first two words are metadata auto replace_alloca = [this, gcframe, &AllocaSlot, T_int32](AllocaInst *&AI) { @@ -2367,11 +2410,6 @@ void LateLowerGCFrame::PlaceRootsAndUpdateCalls(SmallVectorImpl &Colors, St AI->eraseFromParent(); AI = NULL; }; - for (AllocaInst *AI : S.Allocas) { - auto ns = cast(AI->getArraySize())->getZExtValue(); - replace_alloca(AI); - AllocaSlot += ns; - } for (auto AI : S.ArrayAllocas) { replace_alloca(AI.first); AllocaSlot += AI.second; diff --git a/test/compiler/codegen.jl b/test/compiler/codegen.jl index 07308713bb789..26ae965b35319 100644 --- a/test/compiler/codegen.jl +++ b/test/compiler/codegen.jl @@ -501,10 +501,9 @@ function f37262(x) end end @testset "#37262" begin - str = "store volatile { i8, {}*, {}*, {}*, {}* } zeroinitializer, { i8, {}*, {}*, {}*, {}* }* %phic" - str_opaque = "store volatile { i8, ptr, ptr, ptr, ptr } zeroinitializer, ptr %phic" + str_opaque = "getelementptr inbounds i8, ptr %.roots.phic, i32 8\n store volatile ptr null" llvmstr = get_llvm(f37262, (Bool,), false, false, false) - @test (contains(llvmstr, str) || contains(llvmstr, str_opaque)) || llvmstr + @test contains(llvmstr, str_opaque) @test f37262(Base.inferencebarrier(true)) === nothing end From c601b113be8f81a00711455349a3dfab7755de9d Mon Sep 17 00:00:00 2001 From: Daniel Wennberg Date: Wed, 25 Sep 2024 05:26:48 -0700 Subject: [PATCH 069/537] Update TaskLocalRNG docstring according to #49110 (#55863) Since #49110, which is included in 1.10 and 1.11, spawning a task no longer advances the parent task's RNG state, so this statement in the docs was incorrect. --- stdlib/Random/src/Xoshiro.jl | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/stdlib/Random/src/Xoshiro.jl b/stdlib/Random/src/Xoshiro.jl index 1909effbbc9e6..09a3e386e9a2b 100644 --- a/stdlib/Random/src/Xoshiro.jl +++ b/stdlib/Random/src/Xoshiro.jl @@ -185,8 +185,8 @@ end TaskLocalRNG The `TaskLocalRNG` has state that is local to its task, not its thread. -It is seeded upon task creation, from the state of its parent task. -Therefore, task creation is an event that changes the parent's RNG state. +It is seeded upon task creation, from the state of its parent task, but without +advancing the state of the parent's RNG. As an upside, the `TaskLocalRNG` is pretty fast, and permits reproducible multithreaded simulations (barring race conditions), independent of scheduler @@ -203,6 +203,9 @@ may be any integer. !!! compat "Julia 1.11" Seeding `TaskLocalRNG()` with a negative integer seed requires at least Julia 1.11. + +!!! compat "Julia 1.10" + Task creation no longer advances the parent task's RNG state as of Julia 1.10. """ struct TaskLocalRNG <: AbstractRNG end TaskLocalRNG(::Nothing) = TaskLocalRNG() From 6e5e87b2cafda840b90347c2e74202d2608d7c29 Mon Sep 17 00:00:00 2001 From: Gabriel Baraldi Date: Wed, 25 Sep 2024 09:40:06 -0300 Subject: [PATCH 070/537] Root globals in toplevel exprs (#54433) This fixes #54422, the code here assumes that top level exprs are always rooted, but I don't see that referenced anywhere else, or guaranteed, so conservatively always root objects that show up in code. --- src/codegen.cpp | 5 +++-- test/gc.jl | 14 ++++++++++++++ 2 files changed, 17 insertions(+), 2 deletions(-) diff --git a/src/codegen.cpp b/src/codegen.cpp index c719f4ff54078..abb21fcbca27e 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -6473,8 +6473,9 @@ static jl_cgval_t emit_expr(jl_codectx_t &ctx, jl_value_t *expr, ssize_t ssaidx_ jl_value_t *val = expr; if (jl_is_quotenode(expr)) val = jl_fieldref_noalloc(expr, 0); - if (jl_is_method(ctx.linfo->def.method)) // toplevel exprs are already rooted - val = jl_ensure_rooted(ctx, val); + // Toplevel exprs are rooted but because codegen assumes this is constant, it removes the write barriers for this code. + // This means we have to globally root the value here. (The other option would be to change how we optimize toplevel code) + val = jl_ensure_rooted(ctx, val); return mark_julia_const(ctx, val); } diff --git a/test/gc.jl b/test/gc.jl index f924f4952cfb0..e46ff0ed73fd9 100644 --- a/test/gc.jl +++ b/test/gc.jl @@ -72,3 +72,17 @@ end @testset "Base.GC docstrings" begin @test isempty(Docs.undocumented_names(GC)) end + +#testset doesn't work here because this needs to run in top level +#Check that we ensure objects in toplevel exprs are rooted +global dims54422 = [] # allocate the Binding +GC.gc(); GC.gc(); # force the binding to be old +GC.enable(false); # prevent new objects from being old +@eval begin + Base.Experimental.@force_compile # use the compiler + dims54422 = $([]) + nothing +end +GC.enable(true); GC.gc(false) # incremental collection +@test typeof(dims54422) == Vector{Any} +@test isempty(dims54422) From 7a76e32c0e28133c3e229df7009c1eb7a6cc86d5 Mon Sep 17 00:00:00 2001 From: Jameson Nash Date: Wed, 25 Sep 2024 19:35:03 -0400 Subject: [PATCH 071/537] codegen: fix alignment typos (#55880) So easy to type jl_datatype_align to get the natural alignment instead of julia_alignment to get the actual alignment. This should fix the Revise workload. Change is visible with ``` julia> code_llvm(Random.XoshiroSimd.forkRand, (Random.TaskLocalRNG, Base.Val{8})) ``` --- src/cgutils.cpp | 2 ++ src/codegen.cpp | 4 ++-- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/src/cgutils.cpp b/src/cgutils.cpp index 9124638ce7446..7f96bb1047abc 100644 --- a/src/cgutils.cpp +++ b/src/cgutils.cpp @@ -3637,6 +3637,8 @@ static void union_alloca_type(jl_uniontype_t *ut, }, (jl_value_t*)ut, counter); + if (align > JL_HEAP_ALIGNMENT) + align = JL_HEAP_ALIGNMENT; } static AllocaInst *try_emit_union_alloca(jl_codectx_t &ctx, jl_uniontype_t *ut, bool &allunbox, size_t &min_align, size_t &nbytes) diff --git a/src/codegen.cpp b/src/codegen.cpp index abb21fcbca27e..a452e0fccd0c5 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -5151,7 +5151,7 @@ static jl_cgval_t emit_call_specfun_other(jl_codectx_t &ctx, bool is_opaque_clos Value *val = arg.V; SmallVector roots(arg.inline_roots); if (roots.empty()) - std::tie(val, roots) = split_value(ctx, arg, Align(jl_datatype_align(jt))); + std::tie(val, roots) = split_value(ctx, arg, Align(julia_alignment(jt))); AllocaInst *proots = emit_static_roots(ctx, roots.size()); for (size_t i = 0; i < roots.size(); i++) ctx.builder.CreateAlignedStore(roots[i], emit_ptrgep(ctx, proots, i * sizeof(void*)), Align(sizeof(void*))); @@ -7859,7 +7859,7 @@ static jl_returninfo_t get_specsig_function(jl_codectx_t &ctx, Module *M, Value } props.cc = jl_returninfo_t::SRet; props.union_bytes = jl_datatype_size(jlrettype); - props.union_align = props.union_minalign = jl_datatype_align(jlrettype); + props.union_align = props.union_minalign = julia_alignment(jlrettype); // sret is always passed from alloca assert(M); fsig.push_back(rt->getPointerTo(M->getDataLayout().getAllocaAddrSpace())); From e4b29f71e7ca0e033ff3510b06d7534e4045e068 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mos=C3=A8=20Giordano?= <765740+giordano@users.noreply.github.com> Date: Thu, 26 Sep 2024 15:34:03 +0100 Subject: [PATCH 072/537] Fix some corner cases of `isapprox` with unsigned integers (#55828) --- base/floatfuncs.jl | 4 +++- test/floatfuncs.jl | 29 +++++++++++++++++++++++++++++ 2 files changed, 32 insertions(+), 1 deletion(-) diff --git a/base/floatfuncs.jl b/base/floatfuncs.jl index 67e7899b4107c..2c26f7cff1133 100644 --- a/base/floatfuncs.jl +++ b/base/floatfuncs.jl @@ -232,7 +232,9 @@ function isapprox(x::Integer, y::Integer; if norm === abs && atol < 1 && rtol == 0 return x == y else - return norm(x - y) <= max(atol, rtol*max(norm(x), norm(y))) + # We need to take the difference `max` - `min` when comparing unsigned integers. + _x, _y = x < y ? (x, y) : (y, x) + return norm(_y - _x) <= max(atol, rtol*max(norm(_x), norm(_y))) end end diff --git a/test/floatfuncs.jl b/test/floatfuncs.jl index f33ec75b58322..d5d697634bcfa 100644 --- a/test/floatfuncs.jl +++ b/test/floatfuncs.jl @@ -257,6 +257,35 @@ end end end +@testset "isapprox and unsigned integers" begin + for T in Base.BitUnsigned_types + # Test also combinations of different integer types + W = widen(T) + # The order of the operands for difference between unsigned integers is + # very important, test both combinations. + @test isapprox(T(42), T(42); rtol=T(0), atol=0.5) + @test isapprox(T(42), W(42); rtol=T(0), atol=0.5) + @test !isapprox(T(0), T(1); rtol=T(0), atol=0.5) + @test !isapprox(T(1), T(0); rtol=T(0), atol=0.5) + @test isapprox(T(1), T(3); atol=T(2)) + @test isapprox(T(4), T(2); atol=T(2)) + @test isapprox(T(1), W(3); atol=T(2)) + @test isapprox(T(4), W(2); atol=T(2)) + @test isapprox(T(5), T(7); atol=typemax(T)) + @test isapprox(T(8), T(6); atol=typemax(T)) + @test isapprox(T(1), T(2); rtol=1) + @test isapprox(T(6), T(3); rtol=1) + @test isapprox(T(1), W(2); rtol=1) + @test isapprox(T(6), W(3); rtol=1) + @test !isapprox(typemin(T), typemax(T)) + @test !isapprox(typemax(T), typemin(T)) + @test !isapprox(typemin(T), typemax(T); atol=typemax(T)-T(1)) + @test !isapprox(typemax(T), typemin(T); atol=typemax(T)-T(1)) + @test isapprox(typemin(T), typemax(T); atol=typemax(T)) + @test isapprox(typemax(T), typemin(T); atol=typemax(T)) + end +end + @testset "Conversion from floating point to unsigned integer near extremes (#51063)" begin @test_throws InexactError UInt32(4.2949673f9) @test_throws InexactError UInt64(1.8446744f19) From a5178a7c71d2253dd4b714dd2257f6d721e08534 Mon Sep 17 00:00:00 2001 From: DilumAluthgeBot <43731525+DilumAluthgeBot@users.noreply.github.com> Date: Thu, 26 Sep 2024 20:19:51 -0400 Subject: [PATCH 073/537] =?UTF-8?q?=F0=9F=A4=96=20[master]=20Bump=20the=20?= =?UTF-8?q?Pkg=20stdlib=20from=20ef9f76c17=20to=2051d4910c1=20(#55896)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../Pkg-51d4910c114a863d888659cb8962c1e161b2a421.tar.gz/md5 | 1 + .../Pkg-51d4910c114a863d888659cb8962c1e161b2a421.tar.gz/sha512 | 1 + .../Pkg-ef9f76c175872bab6803da4a5fa3fd99bce3d03a.tar.gz/md5 | 1 - .../Pkg-ef9f76c175872bab6803da4a5fa3fd99bce3d03a.tar.gz/sha512 | 1 - stdlib/Pkg.version | 2 +- 5 files changed, 3 insertions(+), 3 deletions(-) create mode 100644 deps/checksums/Pkg-51d4910c114a863d888659cb8962c1e161b2a421.tar.gz/md5 create mode 100644 deps/checksums/Pkg-51d4910c114a863d888659cb8962c1e161b2a421.tar.gz/sha512 delete mode 100644 deps/checksums/Pkg-ef9f76c175872bab6803da4a5fa3fd99bce3d03a.tar.gz/md5 delete mode 100644 deps/checksums/Pkg-ef9f76c175872bab6803da4a5fa3fd99bce3d03a.tar.gz/sha512 diff --git a/deps/checksums/Pkg-51d4910c114a863d888659cb8962c1e161b2a421.tar.gz/md5 b/deps/checksums/Pkg-51d4910c114a863d888659cb8962c1e161b2a421.tar.gz/md5 new file mode 100644 index 0000000000000..b5b82565470c0 --- /dev/null +++ b/deps/checksums/Pkg-51d4910c114a863d888659cb8962c1e161b2a421.tar.gz/md5 @@ -0,0 +1 @@ +88b8a25a8d465ac8cc94d13bc5f51707 diff --git a/deps/checksums/Pkg-51d4910c114a863d888659cb8962c1e161b2a421.tar.gz/sha512 b/deps/checksums/Pkg-51d4910c114a863d888659cb8962c1e161b2a421.tar.gz/sha512 new file mode 100644 index 0000000000000..a746b269d91f0 --- /dev/null +++ b/deps/checksums/Pkg-51d4910c114a863d888659cb8962c1e161b2a421.tar.gz/sha512 @@ -0,0 +1 @@ +22262687f3bf75292ab0170e19a9c4a494022a653b2811443b8c52bc099dee0fddd09f6632ae42b3193adf3b0693ddcb6679b5d91e50a500f65261df5b7ced7d diff --git a/deps/checksums/Pkg-ef9f76c175872bab6803da4a5fa3fd99bce3d03a.tar.gz/md5 b/deps/checksums/Pkg-ef9f76c175872bab6803da4a5fa3fd99bce3d03a.tar.gz/md5 deleted file mode 100644 index 39dbb56dbaf53..0000000000000 --- a/deps/checksums/Pkg-ef9f76c175872bab6803da4a5fa3fd99bce3d03a.tar.gz/md5 +++ /dev/null @@ -1 +0,0 @@ -080b5cb82d208245cba014f1dfcb8033 diff --git a/deps/checksums/Pkg-ef9f76c175872bab6803da4a5fa3fd99bce3d03a.tar.gz/sha512 b/deps/checksums/Pkg-ef9f76c175872bab6803da4a5fa3fd99bce3d03a.tar.gz/sha512 deleted file mode 100644 index 2f95d4a0e28da..0000000000000 --- a/deps/checksums/Pkg-ef9f76c175872bab6803da4a5fa3fd99bce3d03a.tar.gz/sha512 +++ /dev/null @@ -1 +0,0 @@ -1b91505c78d2608afa89ceea16f645bb41c0737815aec1853ad72c9751e7299b264135c9a40a6319f68b973073a151619b925d7a9655c46526bccf501b116113 diff --git a/stdlib/Pkg.version b/stdlib/Pkg.version index f5ca169a775c6..34233c58702b4 100644 --- a/stdlib/Pkg.version +++ b/stdlib/Pkg.version @@ -1,4 +1,4 @@ PKG_BRANCH = master -PKG_SHA1 = ef9f76c175872bab6803da4a5fa3fd99bce3d03a +PKG_SHA1 = 51d4910c114a863d888659cb8962c1e161b2a421 PKG_GIT_URL := https://github.com/JuliaLang/Pkg.jl.git PKG_TAR_URL = https://api.github.com/repos/JuliaLang/Pkg.jl/tarball/$1 From 32b9e1ac9fa31019aa3779b3c401a80bc94cb61f Mon Sep 17 00:00:00 2001 From: Ian Butterworth Date: Thu, 26 Sep 2024 21:06:46 -0400 Subject: [PATCH 074/537] Profile: fix order of fields in heapsnapshot & improve formatting (#55890) --- src/gc-heap-snapshot.cpp | 42 ++++++++----------- stdlib/Profile/src/heapsnapshot_reassemble.jl | 10 ++++- 2 files changed, 27 insertions(+), 25 deletions(-) diff --git a/src/gc-heap-snapshot.cpp b/src/gc-heap-snapshot.cpp index fcda11dad4f8a..72eb17115f4c7 100644 --- a/src/gc-heap-snapshot.cpp +++ b/src/gc-heap-snapshot.cpp @@ -618,38 +618,32 @@ void final_serialize_heap_snapshot(ios_t *json, ios_t *strings, HeapSnapshot &sn { // mimicking https://github.com/nodejs/node/blob/5fd7a72e1c4fbaf37d3723c4c81dce35c149dc84/deps/v8/src/profiler/heap-snapshot-generator.cc#L2567-L2567 // also https://github.com/microsoft/vscode-v8-heap-tools/blob/c5b34396392397925ecbb4ecb904a27a2754f2c1/v8-heap-parser/src/decoder.rs#L43-L51 - ios_printf(json, "{\"snapshot\":{"); + ios_printf(json, "{\"snapshot\":{\n"); - ios_printf(json, "\"meta\":{"); - ios_printf(json, "\"node_fields\":[\"type\",\"name\",\"id\",\"self_size\",\"edge_count\",\"trace_node_id\",\"detachedness\"],"); - ios_printf(json, "\"node_types\":["); + ios_printf(json, " \"meta\":{\n"); + ios_printf(json, " \"node_fields\":[\"type\",\"name\",\"id\",\"self_size\",\"edge_count\",\"trace_node_id\",\"detachedness\"],\n"); + ios_printf(json, " \"node_types\":["); snapshot.node_types.print_json_array(json, false); ios_printf(json, ","); - ios_printf(json, "\"string\", \"number\", \"number\", \"number\", \"number\", \"number\"],"); - ios_printf(json, "\"edge_fields\":[\"type\",\"name_or_index\",\"to_node\"],"); - ios_printf(json, "\"edge_types\":["); + ios_printf(json, "\"string\", \"number\", \"number\", \"number\", \"number\", \"number\"],\n"); + ios_printf(json, " \"edge_fields\":[\"type\",\"name_or_index\",\"to_node\"],\n"); + ios_printf(json, " \"edge_types\":["); snapshot.edge_types.print_json_array(json, false); ios_printf(json, ","); - ios_printf(json, "\"string_or_number\",\"from_node\"],"); + ios_printf(json, "\"string_or_number\",\"from_node\"],\n"); // not used. Required by microsoft/vscode-v8-heap-tools - ios_printf(json, "\"trace_function_info_fields\":[\"function_id\",\"name\",\"script_name\",\"script_id\",\"line\",\"column\"],"); - ios_printf(json, "\"trace_node_fields\":[\"id\",\"function_info_index\",\"count\",\"size\",\"children\"],"); - ios_printf(json, "\"sample_fields\":[\"timestamp_us\",\"last_assigned_id\"],"); - ios_printf(json, "\"location_fields\":[\"object_index\",\"script_id\",\"line\",\"column\"]"); + ios_printf(json, " \"trace_function_info_fields\":[\"function_id\",\"name\",\"script_name\",\"script_id\",\"line\",\"column\"],\n"); + ios_printf(json, " \"trace_node_fields\":[\"id\",\"function_info_index\",\"count\",\"size\",\"children\"],\n"); + ios_printf(json, " \"sample_fields\":[\"timestamp_us\",\"last_assigned_id\"],\n"); + ios_printf(json, " \"location_fields\":[\"object_index\",\"script_id\",\"line\",\"column\"]\n"); // end not used - ios_printf(json, "},\n"); // end "meta" + ios_printf(json, " },\n"); // end "meta" - ios_printf(json, "\"node_count\":%zu,", snapshot.num_nodes); - ios_printf(json, "\"edge_count\":%zu,", snapshot.num_edges); - ios_printf(json, "\"trace_function_count\":0"); // not used. Required by microsoft/vscode-v8-heap-tools - ios_printf(json, "},\n"); // end "snapshot" - - // not used. Required by microsoft/vscode-v8-heap-tools - ios_printf(json, "\"trace_function_infos\":[],"); - ios_printf(json, "\"trace_tree\":[],"); - ios_printf(json, "\"samples\":[],"); - ios_printf(json, "\"locations\":[]"); - // end not used + ios_printf(json, " \"node_count\":%zu,\n", snapshot.num_nodes); + ios_printf(json, " \"edge_count\":%zu,\n", snapshot.num_edges); + ios_printf(json, " \"trace_function_count\":0\n"); // not used. Required by microsoft/vscode-v8-heap-tools + ios_printf(json, "}\n"); // end "snapshot" + // this } is removed by the julia reassembler in Profile ios_printf(json, "}"); } diff --git a/stdlib/Profile/src/heapsnapshot_reassemble.jl b/stdlib/Profile/src/heapsnapshot_reassemble.jl index 2413ae538b8ac..b2d86ee1f27b6 100644 --- a/stdlib/Profile/src/heapsnapshot_reassemble.jl +++ b/stdlib/Profile/src/heapsnapshot_reassemble.jl @@ -155,7 +155,8 @@ function assemble_snapshot(in_prefix, io::IO) _write_decimal_number(io, nodes.edge_count[i], _digits_buf) print(io, ",0,0") end - print(io, "],\"edges\":[") + print(io, "],\n") + print(io, "\"edges\":[") e = 1 for n in 1:length(nodes) count = nodes.edge_count[n] @@ -177,6 +178,13 @@ function assemble_snapshot(in_prefix, io::IO) end println(io, "],") + # not used. Required by microsoft/vscode-v8-heap-tools + # This order of these fields is required by chrome dev tools otherwise loading fails + println(io, "\"trace_function_infos\":[],") + println(io, "\"trace_tree\":[],") + println(io, "\"samples\":[],") + println(io, "\"locations\":[],") + println(io, "\"strings\":[") open(string(in_prefix, ".strings"), "r") do strings_io first = true From 60be4094fb4eb4d4e4780b920a96e027522cd692 Mon Sep 17 00:00:00 2001 From: Ian Butterworth Date: Thu, 26 Sep 2024 21:07:47 -0400 Subject: [PATCH 075/537] Profile: Improve generation of clickable terminal links (#55857) --- base/path.jl | 13 ++++----- stdlib/Profile/src/Profile.jl | 53 +++++++++++++++++++++-------------- 2 files changed, 37 insertions(+), 29 deletions(-) diff --git a/base/path.jl b/base/path.jl index f6d3266d9738c..69c8d22c63c54 100644 --- a/base/path.jl +++ b/base/path.jl @@ -614,6 +614,11 @@ for f in (:isdirpath, :splitdir, :splitdrive, :splitext, :normpath, :abspath) @eval $f(path::AbstractString) = $f(String(path)) end +# RFC3986 Section 2.1 +percent_escape(s) = '%' * join(map(b -> uppercase(string(b, base=16)), codeunits(s)), '%') +# RFC3986 Section 2.3 +encode_uri_component(s) = replace(s, r"[^A-Za-z0-9\-_.~/]+" => percent_escape) + """ uripath(path::AbstractString) @@ -636,10 +641,6 @@ function uripath end @static if Sys.iswindows() function uripath(path::String) - percent_escape(s) = # RFC3986 Section 2.1 - '%' * join(map(b -> uppercase(string(b, base=16)), codeunits(s)), '%') - encode_uri_component(s) = # RFC3986 Section 2.3 - replace(s, r"[^A-Za-z0-9\-_.~/]+" => percent_escape) path = abspath(path) if startswith(path, "\\\\") # UNC path, RFC8089 Appendix E.3 unixpath = join(eachsplit(path, path_separator_re, keepempty=false), '/') @@ -653,10 +654,6 @@ function uripath end end else function uripath(path::String) - percent_escape(s) = # RFC3986 Section 2.1 - '%' * join(map(b -> uppercase(string(b, base=16)), codeunits(s)), '%') - encode_uri_component(s) = # RFC3986 Section 2.3 - replace(s, r"[^A-Za-z0-9\-_.~/]+" => percent_escape) localpath = join(eachsplit(abspath(path), path_separator_re, keepempty=false), '/') host = if ispath("/proc/sys/fs/binfmt_misc/WSLInterop") # WSL sigil distro = get(ENV, "WSL_DISTRO_NAME", "") # See diff --git a/stdlib/Profile/src/Profile.jl b/stdlib/Profile/src/Profile.jl index c7ef1efb35945..b753c9ca88f24 100644 --- a/stdlib/Profile/src/Profile.jl +++ b/stdlib/Profile/src/Profile.jl @@ -7,7 +7,7 @@ Profiling support. ## CPU profiling - `@profile foo()` to profile a specific call. -- `Profile.print()` to print the report. +- `Profile.print()` to print the report. Paths are clickable links in supported terminals and specialized for JULIA_EDITOR etc. - `Profile.clear()` to clear the buffer. - Send a $(Sys.isbsd() ? "SIGINFO (ctrl-t)" : "SIGUSR1") signal to the process to automatically trigger a profile and print. @@ -198,7 +198,9 @@ const META_OFFSET_THREADID = 5 Prints profiling results to `io` (by default, `stdout`). If you do not supply a `data` vector, the internal buffer of accumulated backtraces -will be used. +will be used. Paths are clickable links in supported terminals and +specialized for [`JULIA_EDITOR`](@ref) with line numbers, or just file +links if no editor is set. The keyword arguments can be any combination of: @@ -807,26 +809,35 @@ end # make a terminal-clickable link to the file and linenum. # Similar to `define_default_editors` in `Base.Filesystem` but for creating URIs not commands function editor_link(path::String, linenum::Int) - editor = get(ENV, "JULIA_EDITOR", "") - - if editor == "code" - return "vscode://file/$path:$linenum" - elseif editor == "subl" || editor == "sublime_text" - return "subl://$path:$linenum" - elseif editor == "idea" || occursin("idea", editor) - return "idea://open?file=$path&line=$linenum" - elseif editor == "pycharm" - return "pycharm://open?file=$path&line=$linenum" - elseif editor == "atom" - return "atom://core/open/file?filename=$path&line=$linenum" - elseif editor == "emacsclient" - return "emacs://open?file=$path&line=$linenum" - elseif editor == "vim" || editor == "nvim" - return "vim://open?file=$path&line=$linenum" - else - # TODO: convert the path to a generic URI (line numbers are not supported by generic URI) - return path + # Note: the editor path can include spaces (if escaped) and flags. + editor = nothing + for var in ["JULIA_EDITOR", "VISUAL", "EDITOR"] + str = get(ENV, var, nothing) + str isa String || continue + editor = str + break + end + path_encoded = Base.Filesystem.encode_uri_component(path) + if editor !== nothing + if editor == "code" + return "vscode://file/$path_encoded:$linenum" + elseif editor == "subl" || editor == "sublime_text" + return "subl://open?url=file://$path_encoded&line=$linenum" + elseif editor == "idea" || occursin("idea", editor) + return "idea://open?file=$path_encoded&line=$linenum" + elseif editor == "pycharm" + return "pycharm://open?file=$path_encoded&line=$linenum" + elseif editor == "atom" + return "atom://core/open/file?filename=$path_encoded&line=$linenum" + elseif editor == "emacsclient" || editor == "emacs" + return "emacs://open?file=$path_encoded&line=$linenum" + elseif editor == "vim" || editor == "nvim" + # Note: Vim/Nvim may not support standard URI schemes without specific plugins + return "vim://open?file=$path_encoded&line=$linenum" + end end + # fallback to generic URI, but line numbers are not supported by generic URI + return Base.Filesystem.uripath(path) end function print_flat(io::IO, lilist::Vector{StackFrame}, From 4b27a169bda6ac970fc677962c30af51a6a9ca74 Mon Sep 17 00:00:00 2001 From: Shuhei Kadowaki <40514306+aviatesk@users.noreply.github.com> Date: Fri, 27 Sep 2024 11:34:58 +0900 Subject: [PATCH 076/537] inference: add missing `TypeVar` handling for `instanceof_tfunc` (#55884) I thought these sort of problems had been addressed by d60f92c, but it seems some were missed. Specifically, `t.a` and `t.b` from `t::Union` could be `TypeVar`, and if they are passed to a subroutine or recursed without being unwrapped or rewrapped, errors like JuliaLang/julia#55882 could occur. This commit resolves the issue by calling `unwraptv` in the `Union` handling within `instanceof_tfunc`. I also found a similar issue inside `nfields_tfunc`, so that has also been fixed, and test cases have been added. While I haven't been able to make up a test case specifically for the fix in `instanceof_tfunc`, I have confirmed that this commit certainly fixes the issue reported in JuliaLang/julia#55882. - fixes JuliaLang/julia#55882 --- base/compiler/tfuncs.jl | 8 ++++---- test/compiler/inference.jl | 3 +++ 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/base/compiler/tfuncs.jl b/base/compiler/tfuncs.jl index ab3b50763deec..cc8ba227bd088 100644 --- a/base/compiler/tfuncs.jl +++ b/base/compiler/tfuncs.jl @@ -135,8 +135,8 @@ function instanceof_tfunc(@nospecialize(t), astag::Bool=false, @nospecialize(tro end return tr, isexact, isconcrete, istype elseif isa(t, Union) - ta, isexact_a, isconcrete_a, istype_a = instanceof_tfunc(t.a, astag, troot) - tb, isexact_b, isconcrete_b, istype_b = instanceof_tfunc(t.b, astag, troot) + ta, isexact_a, isconcrete_a, istype_a = instanceof_tfunc(unwraptv(t.a), astag, troot) + tb, isexact_b, isconcrete_b, istype_b = instanceof_tfunc(unwraptv(t.b), astag, troot) isconcrete = isconcrete_a && isconcrete_b istype = istype_a && istype_b # most users already handle the Union case, so here we assume that @@ -563,9 +563,9 @@ add_tfunc(Core.sizeof, 1, 1, sizeof_tfunc, 1) end end if isa(x, Union) - na = nfields_tfunc(𝕃, x.a) + na = nfields_tfunc(𝕃, unwraptv(x.a)) na === Int && return Int - return tmerge(na, nfields_tfunc(𝕃, x.b)) + return tmerge(𝕃, na, nfields_tfunc(𝕃, unwraptv(x.b))) end return Int end diff --git a/test/compiler/inference.jl b/test/compiler/inference.jl index d1382d3c84b82..46009e0790942 100644 --- a/test/compiler/inference.jl +++ b/test/compiler/inference.jl @@ -6152,3 +6152,6 @@ end t155751 = Union{AbstractArray{UInt8, 4}, Array{Float32, 4}, Grid55751{Float32, 3, _A} where _A} t255751 = Array{Float32, 3} @test Core.Compiler.tmerge_types_slow(t155751,t255751) == AbstractArray # shouldn't hang + +issue55882_nfields(x::Union{T,Nothing}) where T<:Number = nfields(x) +@test Base.infer_return_type(issue55882_nfields) <: Int From 0dbb6eb679c1c124c212ae9ce399004873041cf1 Mon Sep 17 00:00:00 2001 From: Milan Bouchet-Valat Date: Fri, 27 Sep 2024 10:29:32 +0200 Subject: [PATCH 077/537] Install terminfo data under /usr/share/julia (#55881) Just like all other libraries, we don't want internal Julia files to mess with system files. Introduced by https://github.com/JuliaLang/julia/pull/55411. --- Makefile | 2 +- base/terminfo.jl | 2 +- deps/terminfo.mk | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/Makefile b/Makefile index 735d342a79eb5..e440f243d876e 100644 --- a/Makefile +++ b/Makefile @@ -410,7 +410,7 @@ endif $(INSTALL_F) $(JULIAHOME)/contrib/julia.appdata.xml $(DESTDIR)$(datarootdir)/metainfo/ # Install terminal info database ifneq ($(WITH_TERMINFO),0) - cp -R -L $(build_datarootdir)/terminfo $(DESTDIR)$(datarootdir) + cp -R -L $(build_datarootdir)/julia/terminfo $(DESTDIR)$(datarootdir)/julia/ endif # Update RPATH entries and JL_SYSTEM_IMAGE_PATH if $(private_libdir_rel) != $(build_private_libdir_rel) diff --git a/base/terminfo.jl b/base/terminfo.jl index 79713f4a86aa3..8ea8387077d36 100644 --- a/base/terminfo.jl +++ b/base/terminfo.jl @@ -262,7 +262,7 @@ function find_terminfo_file(term::String) append!(terminfo_dirs, replace(split(ENV["TERMINFO_DIRS"], ':'), "" => "/usr/share/terminfo")) - push!(terminfo_dirs, normpath(Sys.BINDIR, DATAROOTDIR, "terminfo")) + push!(terminfo_dirs, normpath(Sys.BINDIR, DATAROOTDIR, "julia", "terminfo")) Sys.isunix() && push!(terminfo_dirs, "/etc/terminfo", "/lib/terminfo", "/usr/share/terminfo") for dir in terminfo_dirs diff --git a/deps/terminfo.mk b/deps/terminfo.mk index 63194f786f566..60865838a813e 100644 --- a/deps/terminfo.mk +++ b/deps/terminfo.mk @@ -22,8 +22,8 @@ $(BUILDDIR)/TermInfoDB-v$(TERMINFO_VER)/build-checked: $(BUILDDIR)/TermInfoDB-v$ echo 1 > $@ define TERMINFO_INSTALL - mkdir -p $2/$$(build_datarootdir) - cp -R $1/terminfo $2/$$(build_datarootdir) + mkdir -p $2/$$(build_datarootdir)/julia + cp -R $1/terminfo $2/$$(build_datarootdir)/julia/ endef $(eval $(call staged-install, \ terminfo,TermInfoDB-v$(TERMINFO_VER), \ From 6e33dfb202e5a0adce02fd29220f6314101edc1c Mon Sep 17 00:00:00 2001 From: Diogo Netto <61364108+d-netto@users.noreply.github.com> Date: Fri, 27 Sep 2024 08:37:07 -0300 Subject: [PATCH 078/537] expose metric to report reasons why full GCs were triggered (#55826) Additional GC observability tool. This will help us to diagnose why some of our servers are triggering so many full GCs in certain circumstances. --- base/timing.jl | 27 +++++++++++++++++++++++++++ src/gc-stock.c | 15 +++++++++++++-- src/gc-stock.h | 14 ++++++++++++++ test/gc.jl | 11 +++++++++++ 4 files changed, 65 insertions(+), 2 deletions(-) diff --git a/base/timing.jl b/base/timing.jl index 80ebb74abee26..6d97d70d2f04c 100644 --- a/base/timing.jl +++ b/base/timing.jl @@ -104,6 +104,33 @@ function gc_page_utilization_data() return Base.unsafe_wrap(Array, page_utilization_raw, JL_GC_N_MAX_POOLS, own=false) end +# must be kept in sync with `src/gc-stock.h`` +const FULL_SWEEP_REASONS = [:FULL_SWEEP_REASON_SWEEP_ALWAYS_FULL, :FULL_SWEEP_REASON_FORCED_FULL_SWEEP, + :FULL_SWEEP_REASON_USER_MAX_EXCEEDED, :FULL_SWEEP_REASON_LARGE_PROMOTION_RATE] + +""" + Base.full_sweep_reasons() + +Return a dictionary of the number of times each full sweep reason has occurred. + +The reasons are: +- `:FULL_SWEEP_REASON_SWEEP_ALWAYS_FULL`: Full sweep was caused due to `always_full` being set in the GC debug environment +- `:FULL_SWEEP_REASON_FORCED_FULL_SWEEP`: Full sweep was forced by `GC.gc(true)` +- `:FULL_SWEEP_REASON_USER_MAX_EXCEEDED`: Full sweep was forced due to the system reaching the heap soft size limit +- `:FULL_SWEEP_REASON_LARGE_PROMOTION_RATE`: Full sweep was forced by a large promotion rate across GC generations + +Note that the set of reasons is not guaranteed to be stable across minor versions of Julia. +""" +function full_sweep_reasons() + reason = cglobal(:jl_full_sweep_reasons, UInt64) + reasons_as_array = Base.unsafe_wrap(Vector{UInt64}, reason, length(FULL_SWEEP_REASONS), own=false) + d = Dict{Symbol, Int64}() + for (i, r) in enumerate(FULL_SWEEP_REASONS) + d[r] = reasons_as_array[i] + end + return d +end + """ Base.jit_total_bytes() diff --git a/src/gc-stock.c b/src/gc-stock.c index d25f8917f302d..6b97881909bbd 100644 --- a/src/gc-stock.c +++ b/src/gc-stock.c @@ -40,6 +40,8 @@ uv_sem_t gc_sweep_assists_needed; uv_mutex_t gc_queue_observer_lock; // Tag for sentinel nodes in bigval list uintptr_t gc_bigval_sentinel_tag; +// Table recording number of full GCs due to each reason +JL_DLLEXPORT uint64_t jl_full_sweep_reasons[FULL_SWEEP_NUM_REASONS]; // Flag that tells us whether we need to support conservative marking // of objects. @@ -3043,10 +3045,12 @@ static int _jl_gc_collect(jl_ptls_t ptls, jl_gc_collection_t collection) // we either free some space or get an OOM error. if (gc_sweep_always_full) { sweep_full = 1; + gc_count_full_sweep_reason(FULL_SWEEP_REASON_SWEEP_ALWAYS_FULL); } if (collection == JL_GC_FULL && !prev_sweep_full) { sweep_full = 1; recollect = 1; + gc_count_full_sweep_reason(FULL_SWEEP_REASON_FORCED_FULL_SWEEP); } if (sweep_full) { // these are the difference between the number of gc-perm bytes scanned @@ -3182,10 +3186,17 @@ static int _jl_gc_collect(jl_ptls_t ptls, jl_gc_collection_t collection) } double old_ratio = (double)promoted_bytes/(double)heap_size; - if (heap_size > user_max || old_ratio > 0.15) + if (heap_size > user_max) { next_sweep_full = 1; - else + gc_count_full_sweep_reason(FULL_SWEEP_REASON_USER_MAX_EXCEEDED); + } + else if (old_ratio > 0.15) { + next_sweep_full = 1; + gc_count_full_sweep_reason(FULL_SWEEP_REASON_LARGE_PROMOTION_RATE); + } + else { next_sweep_full = 0; + } if (heap_size > user_max || thrashing) under_pressure = 1; // sweeping is over diff --git a/src/gc-stock.h b/src/gc-stock.h index 45c93bf4289ae..46f7d3e11e105 100644 --- a/src/gc-stock.h +++ b/src/gc-stock.h @@ -505,6 +505,20 @@ FORCE_INLINE void gc_big_object_link(bigval_t *sentinel_node, bigval_t *node) JL sentinel_node->next = node; } +// Must be kept in sync with `base/timing.jl` +#define FULL_SWEEP_REASON_SWEEP_ALWAYS_FULL (0) +#define FULL_SWEEP_REASON_FORCED_FULL_SWEEP (1) +#define FULL_SWEEP_REASON_USER_MAX_EXCEEDED (2) +#define FULL_SWEEP_REASON_LARGE_PROMOTION_RATE (3) +#define FULL_SWEEP_NUM_REASONS (4) + +extern JL_DLLEXPORT uint64_t jl_full_sweep_reasons[FULL_SWEEP_NUM_REASONS]; +STATIC_INLINE void gc_count_full_sweep_reason(int reason) JL_NOTSAFEPOINT +{ + assert(reason >= 0 && reason < FULL_SWEEP_NUM_REASONS); + jl_full_sweep_reasons[reason]++; +} + extern uv_mutex_t gc_perm_lock; extern uv_mutex_t gc_threads_lock; extern uv_cond_t gc_threads_cond; diff --git a/test/gc.jl b/test/gc.jl index e46ff0ed73fd9..c532f17f04eb5 100644 --- a/test/gc.jl +++ b/test/gc.jl @@ -49,6 +49,13 @@ function issue_54275_test() @test !live_bytes_has_grown_too_much end +function full_sweep_reasons_test() + GC.gc() + reasons = Base.full_sweep_reasons() + @test reasons[:FULL_SWEEP_REASON_FORCED_FULL_SWEEP] >= 1 + @test keys(reasons) == Set(Base.FULL_SWEEP_REASONS) +end + # !!! note: # Since we run our tests on 32bit OS as well we confine ourselves # to parameters that allocate about 512MB of objects. Max RSS is lower @@ -73,6 +80,10 @@ end @test isempty(Docs.undocumented_names(GC)) end +@testset "Full GC reasons" begin + full_sweep_reasons_test() +end + #testset doesn't work here because this needs to run in top level #Check that we ensure objects in toplevel exprs are rooted global dims54422 = [] # allocate the Binding From 3aad027fc5631f2b5ca81e0133518f134b2b6c03 Mon Sep 17 00:00:00 2001 From: Jameson Nash Date: Fri, 27 Sep 2024 10:15:24 -0400 Subject: [PATCH 079/537] Revert "Improve printing of several arguments" (#55894) Reverts JuliaLang/julia#55754 as it overrode some performance heuristics which appeared to be giving a significant gain/loss in performance: Closes https://github.com/JuliaLang/julia/issues/55893 --- base/strings/io.jl | 24 +++++++++++++++++++----- 1 file changed, 19 insertions(+), 5 deletions(-) diff --git a/base/strings/io.jl b/base/strings/io.jl index c78e3e2e043b6..754e058cd2f54 100644 --- a/base/strings/io.jl +++ b/base/strings/io.jl @@ -42,7 +42,9 @@ end function print(io::IO, xs...) lock(io) try - foreach(Fix1(print, io), xs) + for x in xs + print(io, x) + end finally unlock(io) end @@ -136,9 +138,15 @@ function print_to_string(xs...) if isempty(xs) return "" end - siz = sum(_str_sizehint, xs; init = 0) + siz::Int = 0 + for x in xs + siz += _str_sizehint(x) + end + # specialized for performance reasons s = IOBuffer(sizehint=siz) - print(s, xs...) + for x in xs + print(s, x) + end String(_unsafe_take!(s)) end @@ -146,10 +154,16 @@ function string_with_env(env, xs...) if isempty(xs) return "" end - siz = sum(_str_sizehint, xs; init = 0) + siz::Int = 0 + for x in xs + siz += _str_sizehint(x) + end + # specialized for performance reasons s = IOBuffer(sizehint=siz) env_io = IOContext(s, env) - print(env_io, xs...) + for x in xs + print(env_io, x) + end String(_unsafe_take!(s)) end From 00f0a6c63c1e5ce996fba5ef187522f4990ee9b4 Mon Sep 17 00:00:00 2001 From: David Widmann Date: Sat, 28 Sep 2024 02:21:21 +0200 Subject: [PATCH 080/537] Do not trigger deprecation warnings in `Test.detect_ambiguities` and `Test.detect_unbound_args` (#55869) #55868 --- stdlib/Test/src/Test.jl | 4 ++-- test/ambiguous.jl | 16 ++++++++++++++++ 2 files changed, 18 insertions(+), 2 deletions(-) diff --git a/stdlib/Test/src/Test.jl b/stdlib/Test/src/Test.jl index b4ada2ce3a9cf..46bc2d8790cec 100644 --- a/stdlib/Test/src/Test.jl +++ b/stdlib/Test/src/Test.jl @@ -2087,7 +2087,7 @@ function detect_ambiguities(mods::Module...; while !isempty(work) mod = pop!(work) for n in names(mod, all = true) - Base.isdeprecated(mod, n) && continue + (!Base.isbindingresolved(mod, n) || Base.isdeprecated(mod, n)) && continue if !isdefined(mod, n) if is_in_mods(mod, recursive, mods) if allowed_undefineds === nothing || GlobalRef(mod, n) ∉ allowed_undefineds @@ -2158,7 +2158,7 @@ function detect_unbound_args(mods...; while !isempty(work) mod = pop!(work) for n in names(mod, all = true) - Base.isdeprecated(mod, n) && continue + (!Base.isbindingresolved(mod, n) || Base.isdeprecated(mod, n)) && continue if !isdefined(mod, n) if is_in_mods(mod, recursive, mods) if allowed_undefineds === nothing || GlobalRef(mod, n) ∉ allowed_undefineds diff --git a/test/ambiguous.jl b/test/ambiguous.jl index acdfdc70ba30c..2f8a4193cf592 100644 --- a/test/ambiguous.jl +++ b/test/ambiguous.jl @@ -162,6 +162,22 @@ end ambs = detect_ambiguities(Ambig48312) @test length(ambs) == 4 +module UnboundAmbig55868 + module B + struct C end + export C + Base.@deprecate_binding D C + end + using .B + export C, D +end +@test !Base.isbindingresolved(UnboundAmbig55868, :C) +@test !Base.isbindingresolved(UnboundAmbig55868, :D) +@test isempty(detect_unbound_args(UnboundAmbig55868)) +@test isempty(detect_ambiguities(UnboundAmbig55868)) +@test !Base.isbindingresolved(UnboundAmbig55868, :C) +@test !Base.isbindingresolved(UnboundAmbig55868, :D) + # Test that Core and Base are free of ambiguities # not using isempty so this prints more information when it fails @testset "detect_ambiguities" begin From 4a4ca9c815207a80ea81b884b196dfeafc3cb877 Mon Sep 17 00:00:00 2001 From: Jameson Nash Date: Fri, 27 Sep 2024 21:49:29 -0400 Subject: [PATCH 081/537] do not intentionally suppress errors in precompile script from being reported or failing the result (#55909) I was slightly annoying that the build was set up to succeed if this step failed, so I removed the error suppression and fixed up the script slightly --- contrib/generate_precompile.jl | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/contrib/generate_precompile.jl b/contrib/generate_precompile.jl index d3e73a1b1865a..60f7290c7a0ac 100644 --- a/contrib/generate_precompile.jl +++ b/contrib/generate_precompile.jl @@ -347,8 +347,7 @@ generate_precompile_statements() = try # Make sure `ansi_enablecursor` is printe print_state("step1" => "F$n_step1") return :ok end - Base.errormonitor(step1) - !PARALLEL_PRECOMPILATION && wait(step1) + PARALLEL_PRECOMPILATION ? bind(statements_step1, step1) : wait(step1) # Create a staging area where all the loaded packages are available PrecompileStagingArea = Module() @@ -362,7 +361,7 @@ generate_precompile_statements() = try # Make sure `ansi_enablecursor` is printe # Make statements unique statements = Set{String}() # Execute the precompile statements - for sts in [statements_step1,], statement in sts + for statement in statements_step1 # Main should be completely clean occursin("Main.", statement) && continue Base.in!(statement, statements) && continue @@ -398,6 +397,7 @@ generate_precompile_statements() = try # Make sure `ansi_enablecursor` is printe println() # Seems like a reasonable number right now, adjust as needed # comment out if debugging script + have_repl = false n_succeeded > (have_repl ? 650 : 90) || @warn "Only $n_succeeded precompile statements" fetch(step1) == :ok || throw("Step 1 of collecting precompiles failed.") @@ -408,7 +408,6 @@ generate_precompile_statements() = try # Make sure `ansi_enablecursor` is printe finally fancyprint && print(ansi_enablecursor) GC.gc(true); GC.gc(false); # reduce memory footprint - return end generate_precompile_statements() From ff0a1befb9cff915abb06c551e1bed4ab7331790 Mon Sep 17 00:00:00 2001 From: Jishnu Bhattacharya Date: Sat, 28 Sep 2024 15:45:14 +0530 Subject: [PATCH 082/537] Remove eigvecs method for SymTridiagonal (#55903) The fallback method does the same, so this specialized method isn't necessary --- stdlib/LinearAlgebra/src/tridiag.jl | 2 -- 1 file changed, 2 deletions(-) diff --git a/stdlib/LinearAlgebra/src/tridiag.jl b/stdlib/LinearAlgebra/src/tridiag.jl index e755ce63e9b2a..ca61eb8519d42 100644 --- a/stdlib/LinearAlgebra/src/tridiag.jl +++ b/stdlib/LinearAlgebra/src/tridiag.jl @@ -319,8 +319,6 @@ eigmax(A::SymTridiagonal) = eigvals(A, size(A, 1):size(A, 1))[1] eigmin(A::SymTridiagonal) = eigvals(A, 1:1)[1] #Compute selected eigenvectors only corresponding to particular eigenvalues -eigvecs(A::SymTridiagonal) = eigen(A).vectors - """ eigvecs(A::SymTridiagonal[, eigvals]) -> Matrix From 97ecdb8595c4a1fbe68ba6f39b3244e8cdabc2c6 Mon Sep 17 00:00:00 2001 From: Jeff Bezanson Date: Sat, 28 Sep 2024 19:02:49 -0400 Subject: [PATCH 083/537] add --trim option for generating smaller binaries (#55047) This adds a command line option `--trim` that builds images where code is only included if it is statically reachable from methods marked using the new function `entrypoint`. Compile-time errors are given for call sites that are too dynamic to allow trimming the call graph (however there is an `unsafe` option if you want to try building anyway to see what happens). The PR has two other components. One is changes to Base that generally allow more code to be compiled in this mode. These changes will either be merged in separate PRs or moved to a separate part of the workflow (where we will build a custom system image for this purpose). The branch is set up this way to make it easy to check out and try the functionality. The other component is everything in the `juliac/` directory, which implements a compiler driver script based on this new option, along with some examples and tests. This will eventually become a package "app" that depends on PackageCompiler and provides a CLI for all of this stuff, so it will not be merged here. To try an example: ``` julia contrib/juliac.jl --output-exe hello --trim test/trimming/hello.jl ``` When stripped the resulting executable is currently about 900kb on my machine. Also includes a lot of work by @topolarity --------- Co-authored-by: Gabriel Baraldi Co-authored-by: Tim Holy Co-authored-by: Cody Tapscott --- Makefile | 4 +- NEWS.md | 2 + base/experimental.jl | 14 + base/libuv.jl | 5 +- base/options.jl | 1 + base/reflection.jl | 11 +- base/strings/io.jl | 4 + contrib/julia-config.jl | 2 +- contrib/juliac-buildscript.jl | 277 ++++++++++++++++++ contrib/juliac.jl | 110 +++++++ doc/src/devdocs/sysimg.md | 77 +++++ doc/src/manual/command-line-interface.md | 2 +- src/aotcompile.cpp | 69 ++++- src/cgutils.cpp | 12 + src/codegen-stubs.c | 1 + src/codegen.cpp | 355 +++++++++++++++++++++-- src/gf.c | 26 +- src/init.c | 11 +- src/jitlayers.h | 6 +- src/jl_exported_funcs.inc | 3 + src/jloptions.c | 26 +- src/jloptions.h | 1 + src/julia.expmap.in | 4 +- src/julia.h | 7 + src/julia_internal.h | 7 +- src/module.c | 2 +- src/precompile.c | 10 +- src/precompile_utils.c | 80 +++++ src/staticdata.c | 190 ++++++++++-- src/support/arraylist.h | 17 +- stdlib/LinearAlgebra/src/blas.jl | 2 +- stdlib/LinearAlgebra/src/lbt.jl | 4 +- test/Makefile | 8 +- test/trimming/Makefile | 55 ++++ test/trimming/hello.jl | 6 + test/trimming/init.c | 9 + test/trimming/trimming.jl | 7 + 37 files changed, 1338 insertions(+), 89 deletions(-) create mode 100644 contrib/juliac-buildscript.jl create mode 100644 contrib/juliac.jl create mode 100644 test/trimming/Makefile create mode 100644 test/trimming/hello.jl create mode 100644 test/trimming/init.c create mode 100644 test/trimming/trimming.jl diff --git a/Makefile b/Makefile index e440f243d876e..4fd8b878c5d1f 100644 --- a/Makefile +++ b/Makefile @@ -82,7 +82,7 @@ julia-deps: | $(DIRS) $(build_datarootdir)/julia/base $(build_datarootdir)/julia julia-stdlib: | $(DIRS) julia-deps @$(MAKE) $(QUIET_MAKE) -C $(BUILDROOT)/stdlib -julia-base: julia-deps $(build_sysconfdir)/julia/startup.jl $(build_man1dir)/julia.1 $(build_datarootdir)/julia/julia-config.jl +julia-base: julia-deps $(build_sysconfdir)/julia/startup.jl $(build_man1dir)/julia.1 $(build_datarootdir)/julia/julia-config.jl $(build_datarootdir)/julia/juliac.jl $(build_datarootdir)/julia/juliac-buildscript.jl @$(MAKE) $(QUIET_MAKE) -C $(BUILDROOT)/base julia-libccalltest: julia-deps @@ -181,7 +181,7 @@ $(build_sysconfdir)/julia/startup.jl: $(JULIAHOME)/etc/startup.jl | $(build_sysc @echo Creating usr/etc/julia/startup.jl @cp $< $@ -$(build_datarootdir)/julia/julia-config.jl: $(JULIAHOME)/contrib/julia-config.jl | $(build_datarootdir)/julia +$(build_datarootdir)/julia/%: $(JULIAHOME)/contrib/% | $(build_datarootdir)/julia $(INSTALL_M) $< $(dir $@) $(build_depsbindir)/stringreplace: $(JULIAHOME)/contrib/stringreplace.c | $(build_depsbindir) diff --git a/NEWS.md b/NEWS.md index 9ecdd87f0c2bb..ca2bf1f615012 100644 --- a/NEWS.md +++ b/NEWS.md @@ -4,6 +4,8 @@ Julia v1.12 Release Notes New language features --------------------- +- New option `--trim` for building "trimmed" binaries, where code not provably reachable from entry points + is removed. Entry points can be marked using `Base.Experimental.entrypoint` ([#55047]). - A new keyword argument `usings::Bool` has been added to `names`. By using this, we can now find all the names available in module `A` by `names(A; all=true, imported=true, usings=true)`. ([#54609]) - the `@atomic(...)` macro family supports now the reference assignment syntax, e.g. diff --git a/base/experimental.jl b/base/experimental.jl index 58c7258120f3f..6e757e9fa0e5f 100644 --- a/base/experimental.jl +++ b/base/experimental.jl @@ -457,4 +457,18 @@ without adding them to the global method table. """ :@MethodTable +""" + Base.Experimental.entrypoint(f, argtypes::Tuple) + +Mark a method for inclusion when the `--trim` option is specified. +""" +function entrypoint(@nospecialize(f), @nospecialize(argtypes::Tuple)) + entrypoint(Tuple{Core.Typeof(f), argtypes...}) +end + +function entrypoint(@nospecialize(argt::Type)) + ccall(:jl_add_entrypoint, Int32, (Any,), argt) + nothing +end + end diff --git a/base/libuv.jl b/base/libuv.jl index 143201598fde0..3c9f79dfa7b2c 100644 --- a/base/libuv.jl +++ b/base/libuv.jl @@ -133,7 +133,10 @@ function uv_return_spawn end function uv_asynccb end function uv_timercb end -function reinit_stdio() +reinit_stdio() = _reinit_stdio() +# we need this so it can be called by codegen to print errors, even after +# reinit_stdio has been redefined by the juliac build script. +function _reinit_stdio() global stdin = init_stdio(ccall(:jl_stdin_stream, Ptr{Cvoid}, ()))::IO global stdout = init_stdio(ccall(:jl_stdout_stream, Ptr{Cvoid}, ()))::IO global stderr = init_stdio(ccall(:jl_stderr_stream, Ptr{Cvoid}, ()))::IO diff --git a/base/options.jl b/base/options.jl index 41ce3c9e20909..1de7a2acb1e06 100644 --- a/base/options.jl +++ b/base/options.jl @@ -58,6 +58,7 @@ struct JLOptions permalloc_pkgimg::Int8 heap_size_hint::UInt64 trace_compile_timing::Int8 + trim::Int8 end # This runs early in the sysimage != is not defined yet diff --git a/base/reflection.jl b/base/reflection.jl index 5b395efc58190..fe48b6f9aa6b9 100644 --- a/base/reflection.jl +++ b/base/reflection.jl @@ -1504,6 +1504,13 @@ struct CodegenParams """ use_jlplt::Cint + """ + If enabled, only provably reachable code (from functions marked with `entrypoint`) is included + in the output system image. Errors or warnings can be given for call sites too dynamic to handle. + The option is disabled by default. (0=>disabled, 1=>safe (static errors), 2=>unsafe, 3=>unsafe plus warnings) + """ + trim::Cint + """ A pointer of type @@ -1519,14 +1526,14 @@ struct CodegenParams prefer_specsig::Bool=false, gnu_pubnames::Bool=true, debug_info_kind::Cint = default_debug_info_kind(), debug_info_level::Cint = Cint(JLOptions().debug_level), safepoint_on_entry::Bool=true, - gcstack_arg::Bool=true, use_jlplt::Bool=true, + gcstack_arg::Bool=true, use_jlplt::Bool=true, trim::Cint=Cint(0), lookup::Ptr{Cvoid}=unsafe_load(cglobal(:jl_rettype_inferred_addr, Ptr{Cvoid}))) return new( Cint(track_allocations), Cint(code_coverage), Cint(prefer_specsig), Cint(gnu_pubnames), debug_info_kind, debug_info_level, Cint(safepoint_on_entry), - Cint(gcstack_arg), Cint(use_jlplt), + Cint(gcstack_arg), Cint(use_jlplt), Cint(trim), lookup) end end diff --git a/base/strings/io.jl b/base/strings/io.jl index 754e058cd2f54..df34712b519d5 100644 --- a/base/strings/io.jl +++ b/base/strings/io.jl @@ -51,6 +51,8 @@ function print(io::IO, xs...) return nothing end +setfield!(typeof(print).name.mt, :max_args, 10, :monotonic) + """ println([io::IO], xs...) @@ -74,6 +76,7 @@ julia> String(take!(io)) """ println(io::IO, xs...) = print(io, xs..., "\n") +setfield!(typeof(println).name.mt, :max_args, 10, :monotonic) ## conversion of general objects to strings ## """ @@ -149,6 +152,7 @@ function print_to_string(xs...) end String(_unsafe_take!(s)) end +setfield!(typeof(print_to_string).name.mt, :max_args, 10, :monotonic) function string_with_env(env, xs...) if isempty(xs) diff --git a/contrib/julia-config.jl b/contrib/julia-config.jl index c692b3f522fb2..8b1eb55cbe4f4 100755 --- a/contrib/julia-config.jl +++ b/contrib/julia-config.jl @@ -67,7 +67,7 @@ function ldlibs(doframework) "julia" end if Sys.isunix() - return "-Wl,-rpath,$(shell_escape(libDir())) -Wl,-rpath,$(shell_escape(private_libDir())) -l$libname" + return "-L$(shell_escape(private_libDir())) -Wl,-rpath,$(shell_escape(libDir())) -Wl,-rpath,$(shell_escape(private_libDir())) -l$libname" else return "-l$libname -lopenlibm" end diff --git a/contrib/juliac-buildscript.jl b/contrib/juliac-buildscript.jl new file mode 100644 index 0000000000000..50f96198c416b --- /dev/null +++ b/contrib/juliac-buildscript.jl @@ -0,0 +1,277 @@ +# Script to run in the process that generates juliac's object file output + +inputfile = ARGS[1] +output_type = ARGS[2] +add_ccallables = ARGS[3] == "true" + +# Initialize some things not usually initialized when output is requested +Sys.__init__() +Base.init_depot_path() +Base.init_load_path() +Base.init_active_project() +task = current_task() +task.rngState0 = 0x5156087469e170ab +task.rngState1 = 0x7431eaead385992c +task.rngState2 = 0x503e1d32781c2608 +task.rngState3 = 0x3a77f7189200c20b +task.rngState4 = 0x5502376d099035ae +uuid_tuple = (UInt64(0), UInt64(0)) +ccall(:jl_set_module_uuid, Cvoid, (Any, NTuple{2, UInt64}), Base.__toplevel__, uuid_tuple) +ccall(:jl_set_newly_inferred, Cvoid, (Any,), Core.Compiler.newly_inferred) + +# Patch methods in Core and Base + +@eval Core begin + DomainError(@nospecialize(val), @nospecialize(msg::AbstractString)) = (@noinline; $(Expr(:new, :DomainError, :val, :msg))) +end + +(f::Base.RedirectStdStream)(io::Core.CoreSTDOUT) = Base._redirect_io_global(io, f.unix_fd) + +@eval Base begin + _assert_tostring(msg) = "" + reinit_stdio() = nothing + JuliaSyntax.enable_in_core!() = nothing + init_active_project() = ACTIVE_PROJECT[] = nothing + set_active_project(projfile::Union{AbstractString,Nothing}) = ACTIVE_PROJECT[] = projfile + disable_library_threading() = nothing + start_profile_listener() = nothing + @inline function invokelatest(f::F, args...; kwargs...) where F + return f(args...; kwargs...) + end + function sprint(f::F, args::Vararg{Any,N}; context=nothing, sizehint::Integer=0) where {F<:Function,N} + s = IOBuffer(sizehint=sizehint) + if context isa Tuple + f(IOContext(s, context...), args...) + elseif context !== nothing + f(IOContext(s, context), args...) + else + f(s, args...) + end + String(_unsafe_take!(s)) + end + function show_typeish(io::IO, @nospecialize(T)) + if T isa Type + show(io, T) + elseif T isa TypeVar + print(io, (T::TypeVar).name) + else + print(io, "?") + end + end + function show(io::IO, T::Type) + if T isa DataType + print(io, T.name.name) + if T !== T.name.wrapper && length(T.parameters) > 0 + print(io, "{") + first = true + for p in T.parameters + if !first + print(io, ", ") + end + first = false + if p isa Int + show(io, p) + elseif p isa Type + show(io, p) + elseif p isa Symbol + print(io, ":") + print(io, p) + elseif p isa TypeVar + print(io, p.name) + else + print(io, "?") + end + end + print(io, "}") + end + elseif T isa Union + print(io, "Union{") + show_typeish(io, T.a) + print(io, ", ") + show_typeish(io, T.b) + print(io, "}") + elseif T isa UnionAll + print(io, T.body::Type) + print(io, " where ") + print(io, T.var.name) + end + end + show_type_name(io::IO, tn::Core.TypeName) = print(io, tn.name) + + mapreduce(f::F, op::F2, A::AbstractArrayOrBroadcasted; dims=:, init=_InitialValue()) where {F, F2} = + _mapreduce_dim(f, op, init, A, dims) + mapreduce(f::F, op::F2, A::AbstractArrayOrBroadcasted...; kw...) where {F, F2} = + reduce(op, map(f, A...); kw...) + + _mapreduce_dim(f::F, op::F2, nt, A::AbstractArrayOrBroadcasted, ::Colon) where {F, F2} = + mapfoldl_impl(f, op, nt, A) + + _mapreduce_dim(f::F, op::F2, ::_InitialValue, A::AbstractArrayOrBroadcasted, ::Colon) where {F, F2} = + _mapreduce(f, op, IndexStyle(A), A) + + _mapreduce_dim(f::F, op::F2, nt, A::AbstractArrayOrBroadcasted, dims) where {F, F2} = + mapreducedim!(f, op, reducedim_initarray(A, dims, nt), A) + + _mapreduce_dim(f::F, op::F2, ::_InitialValue, A::AbstractArrayOrBroadcasted, dims) where {F,F2} = + mapreducedim!(f, op, reducedim_init(f, op, A, dims), A) + + mapreduce_empty_iter(f::F, op::F2, itr, ItrEltype) where {F, F2} = + reduce_empty_iter(MappingRF(f, op), itr, ItrEltype) + mapreduce_first(f::F, op::F2, x) where {F,F2} = reduce_first(op, f(x)) + + _mapreduce(f::F, op::F2, A::AbstractArrayOrBroadcasted) where {F,F2} = _mapreduce(f, op, IndexStyle(A), A) + mapreduce_empty(::typeof(identity), op::F, T) where {F} = reduce_empty(op, T) + mapreduce_empty(::typeof(abs), op::F, T) where {F} = abs(reduce_empty(op, T)) + mapreduce_empty(::typeof(abs2), op::F, T) where {F} = abs2(reduce_empty(op, T)) +end +@eval Base.Unicode begin + function utf8proc_map(str::Union{String,SubString{String}}, options::Integer, chartransform::F = identity) where F + nwords = utf8proc_decompose(str, options, C_NULL, 0, chartransform) + buffer = Base.StringVector(nwords*4) + nwords = utf8proc_decompose(str, options, buffer, nwords, chartransform) + nbytes = ccall(:utf8proc_reencode, Int, (Ptr{UInt8}, Int, Cint), buffer, nwords, options) + nbytes < 0 && utf8proc_error(nbytes) + return String(resize!(buffer, nbytes)) + end +end +@eval Base.GMP begin + function __init__() + try + ccall((:__gmp_set_memory_functions, libgmp), Cvoid, + (Ptr{Cvoid},Ptr{Cvoid},Ptr{Cvoid}), + cglobal(:jl_gc_counted_malloc), + cglobal(:jl_gc_counted_realloc_with_old_size), + cglobal(:jl_gc_counted_free_with_size)) + ZERO.alloc, ZERO.size, ZERO.d = 0, 0, C_NULL + ONE.alloc, ONE.size, ONE.d = 1, 1, pointer(_ONE) + catch ex + Base.showerror_nostdio(ex, "WARNING: Error during initialization of module GMP") + end + # This only works with a patched version of GMP, ignore otherwise + try + ccall((:__gmp_set_alloc_overflow_function, libgmp), Cvoid, + (Ptr{Cvoid},), + cglobal(:jl_throw_out_of_memory_error)) + ALLOC_OVERFLOW_FUNCTION[] = true + catch ex + # ErrorException("ccall: could not find function...") + if typeof(ex) != ErrorException + rethrow() + end + end + end +end +@eval Base.Sort begin + issorted(itr; + lt::T=isless, by::F=identity, rev::Union{Bool,Nothing}=nothing, order::Ordering=Forward) where {T,F} = + issorted(itr, ord(lt,by,rev,order)) +end +@eval Base.TOML begin + function try_return_datetime(p, year, month, day, h, m, s, ms) + return DateTime(year, month, day, h, m, s, ms) + end + function try_return_date(p, year, month, day) + return Date(year, month, day) + end + function parse_local_time(l::Parser) + h = @try parse_int(l, false) + h in 0:23 || return ParserError(ErrParsingDateTime) + _, m, s, ms = @try _parse_local_time(l, true) + # TODO: Could potentially parse greater accuracy for the + # fractional seconds here. + return try_return_time(l, h, m, s, ms) + end + function try_return_time(p, h, m, s, ms) + return Time(h, m, s, ms) + end +end + +# Load user code + +import Base.Experimental.entrypoint + +let mod = Base.include(Base.__toplevel__, inputfile) + if !isa(mod, Module) + mod = Main + end + if output_type == "--output-exe" && isdefined(mod, :main) && !add_ccallables + entrypoint(mod.main, ()) + end + #entrypoint(join, (Base.GenericIOBuffer{Memory{UInt8}}, Array{Base.SubString{String}, 1}, String)) + #entrypoint(join, (Base.GenericIOBuffer{Memory{UInt8}}, Array{String, 1}, Char)) + entrypoint(Base.task_done_hook, (Task,)) + entrypoint(Base.wait, ()) + entrypoint(Base.trypoptask, (Base.StickyWorkqueue,)) + entrypoint(Base.checktaskempty, ()) + if add_ccallables + ccall(:jl_add_ccallable_entrypoints, Cvoid, ()) + end +end + +# Additional method patches depending on whether user code loads certain stdlibs + +let loaded = Symbol.(Base.loaded_modules_array()) # TODO better way to do this + if :SparseArrays in loaded + using SparseArrays + @eval SparseArrays.CHOLMOD begin + function __init__() + ccall((:SuiteSparse_config_malloc_func_set, :libsuitesparseconfig), + Cvoid, (Ptr{Cvoid},), cglobal(:jl_malloc, Ptr{Cvoid})) + ccall((:SuiteSparse_config_calloc_func_set, :libsuitesparseconfig), + Cvoid, (Ptr{Cvoid},), cglobal(:jl_calloc, Ptr{Cvoid})) + ccall((:SuiteSparse_config_realloc_func_set, :libsuitesparseconfig), + Cvoid, (Ptr{Cvoid},), cglobal(:jl_realloc, Ptr{Cvoid})) + ccall((:SuiteSparse_config_free_func_set, :libsuitesparseconfig), + Cvoid, (Ptr{Cvoid},), cglobal(:jl_free, Ptr{Cvoid})) + end + end + end + if :Artifacts in loaded + using Artifacts + @eval Artifacts begin + function _artifact_str(__module__, artifacts_toml, name, path_tail, artifact_dict, hash, platform, _::Val{lazyartifacts}) where lazyartifacts + moduleroot = Base.moduleroot(__module__) + if haskey(Base.module_keys, moduleroot) + # Process overrides for this UUID, if we know what it is + process_overrides(artifact_dict, Base.module_keys[moduleroot].uuid) + end + + # If the artifact exists, we're in the happy path and we can immediately + # return the path to the artifact: + dirs = artifact_paths(hash; honor_overrides=true) + for dir in dirs + if isdir(dir) + return jointail(dir, path_tail) + end + end + end + end + end + if :Pkg in loaded + using Pkg + @eval Pkg begin + __init__() = rand() #TODO, methods that do nothing don't get codegened + end + end + if :StyledStrings in loaded + using StyledStrings + @eval StyledStrings begin + __init__() = rand() + end + end +end + +empty!(Core.ARGS) +empty!(Base.ARGS) +empty!(LOAD_PATH) +empty!(DEPOT_PATH) +empty!(Base.TOML_CACHE.d) +Base.TOML.reinit!(Base.TOML_CACHE.p, "") +Base.ACTIVE_PROJECT[] = nothing +@eval Base begin + PROGRAM_FILE = "" +end +@eval Sys begin + BINDIR = "" + STDLIB = "" +end diff --git a/contrib/juliac.jl b/contrib/juliac.jl new file mode 100644 index 0000000000000..61e0e91958667 --- /dev/null +++ b/contrib/juliac.jl @@ -0,0 +1,110 @@ +# Julia compiler wrapper script +# NOTE: The interface and location of this script are considered unstable/experimental + +cmd = Base.julia_cmd() +cmd = `$cmd --startup-file=no --history-file=no` +output_type = nothing # exe, sharedlib, sysimage +trim = nothing +outname = nothing +file = nothing +add_ccallables = false + +help = findfirst(x->x == "--help", ARGS) +if help !== nothing + println( + """ + Usage: julia juliac.jl [--output-exe | --output-lib | --output-sysimage] [options] + --trim= Only output code statically determined to be reachable + --compile-ccallable Include all methods marked `@ccallable` in output + --verbose Request verbose output + """) + exit(0) +end + +let i = 1 + while i <= length(ARGS) + arg = ARGS[i] + if arg == "--output-exe" || arg == "--output-lib" || arg == "--output-sysimage" + isnothing(output_type) || error("Multiple output types specified") + global output_type = arg + i == length(ARGS) && error("Output specifier requires an argument") + global outname = ARGS[i+1] + i += 1 + elseif startswith(arg, "--trim") + arg = split(arg, '=') + if length(arg) == 1 + global trim = "safe" + else + global trim = arg[2] + end + elseif arg == "--compile-ccallable" + global add_ccallables = true + else + if arg[1] == '-' || !isnothing(file) + println("Unexpected argument `$arg`") + exit(1) + end + global file = arg + end + i += 1 + end +end + +isnothing(outname) && error("No output file specified") +isnothing(file) && error("No input file specified") + +absfile = abspath(file) +cflags = readchomp(`$(cmd) $(joinpath(Sys.BINDIR, Base.DATAROOTDIR,"julia", "julia-config.jl")) --cflags `) +cflags = Base.shell_split(cflags) +allflags = readchomp(`$(cmd) $(joinpath(Sys.BINDIR, Base.DATAROOTDIR,"julia", "julia-config.jl")) --allflags`) +allflags = Base.shell_split(allflags) +tmpdir = mktempdir(cleanup=false) +initsrc_path = joinpath(tmpdir, "init.c") +init_path = joinpath(tmpdir, "init.a") +img_path = joinpath(tmpdir, "img.a") +bc_path = joinpath(tmpdir, "img-bc.a") + +open(initsrc_path, "w") do io + print(io, """ + #include + __attribute__((constructor)) void static_init(void) { + if (jl_is_initialized()) + return; + julia_init(JL_IMAGE_IN_MEMORY); + jl_exception_clear(); + } + """) +end + +static_call_graph_arg() = isnothing(trim) ? `` : `--trim=$(trim)` +is_verbose() = verbose ? `--verbose-compilation=yes` : `` +cmd = addenv(`$cmd --project=$(Base.active_project()) --output-o $img_path --output-incremental=no --strip-ir --strip-metadata $(static_call_graph_arg()) $(joinpath(@__DIR__,"juliac-buildscript.jl")) $absfile $output_type $add_ccallables`, "OPENBLAS_NUM_THREADS" => 1, "JULIA_NUM_THREADS" => 1) + +if !success(pipeline(cmd; stdout, stderr)) + println(stderr, "\nFailed to compile $file") + exit(1) +end + +run(`cc $(cflags) -g -c -o $init_path $initsrc_path`) + +if output_type == "--output-lib" || output_type == "--output-sysimage" + of, ext = splitext(outname) + soext = "." * Base.BinaryPlatforms.platform_dlext() + if ext == "" + outname = of * soext + end +end + +julia_libs = Base.shell_split(Base.isdebugbuild() ? "-ljulia-debug -ljulia-internal-debug" : "-ljulia -ljulia-internal") +try + if output_type == "--output-lib" + run(`cc $(allflags) -o $outname -shared -Wl,$(Base.Linking.WHOLE_ARCHIVE) $img_path -Wl,$(Base.Linking.NO_WHOLE_ARCHIVE) $init_path $(julia_libs)`) + elseif output_type == "--output-sysimage" + run(`cc $(allflags) -o $outname -shared -Wl,$(Base.Linking.WHOLE_ARCHIVE) $img_path -Wl,$(Base.Linking.NO_WHOLE_ARCHIVE) $(julia_libs)`) + else + run(`cc $(allflags) -o $outname -Wl,$(Base.Linking.WHOLE_ARCHIVE) $img_path -Wl,$(Base.Linking.NO_WHOLE_ARCHIVE) $init_path $(julia_libs)`) + end +catch + println("\nCompilation failed.") + exit(1) +end diff --git a/doc/src/devdocs/sysimg.md b/doc/src/devdocs/sysimg.md index 7d4f7afdbb86a..64c309e1fb02a 100644 --- a/doc/src/devdocs/sysimg.md +++ b/doc/src/devdocs/sysimg.md @@ -117,3 +117,80 @@ See code comments for each components for more implementation details. depending on the ISA. The target selection will prefer exact CPU name match, larger vector register size, and larger number of features. An overview of this process is in `src/processor.cpp`. + +## Trimming + +System images are typically quite large, since Base includes a lot of functionality, and by +default system images also include several packages such as LinearAlgebra for convenience +and backwards compatibility. Most programs will use only a fraction of the functions in +these packages. Therefore it makes sense to build binaries that exclude unused functions +to save space, referred to as "trimming". + +While the basic idea of trimming is sound, Julia has dynamic and reflective features that make it +difficult (or impossible) to know in general which functions are unused. As an extreme example, +consider code like + +``` +getglobal(Base, Symbol(readchomp(stdin)))(1) +``` + +This code reads a function name from `stdin` and calls the named function from Base on the value +`1`. In this case it is impossible to predict which function will be called, so no functions +can reliably be considered "unused". With some noteworthy exceptions (Julia's own REPL being +one of them), most real-world programs do not do things like this. + +Less extreme cases occur, for example, when there are type instabilities that make it impossible +for the compiler to predict which method will be called. However, if code is well-typed and does +not use reflection, a complete and (hopefully) relatively small set of needed methods can be +determined, and the rest can be removed. The `--trim` command-line option requests this kind of +compilation. + +When `--trim` is specified in a command used to build a system image, the compiler begins +tracing calls starting at methods marked using `Base.Experimental.entrypoint`. If a call is too +dynamic to reasonably narrow down the possible call targets, an error is given at compile +time showing the location of the call. For testing purposes, it is possible to skip these +errors by specifying `--trim=unsafe` or `--trim=unsafe-warn`. Then you will get a system +image built, but it may crash at run time if needed code is not present. + +It typically makes sense to specify `--strip-ir` along with `--trim`, since trimmed binaries +are fully compiled and therefore don't need Julia IR. At some point we may make `--trim` imply +`--strip-ir`, but for now we have kept them orthogonal. + +To get the smallest possible binary, it will also help to specify `--strip-metadata` and +run the Unix `strip` utility. However, those steps remove Julia-specific and native (DWARF format) +debug info, respectively, and so will make debugging more difficult. + +### Common problems + +- The Base global variables `stdin`, `stdout`, and `stderr` are non-constant and so their + types are not known. All printing should use a specific IO object with a known type. + The easiest substitution is to use `print(Core.stdout, x)` instead of `print(x)` or + `print(stdout, x)`. +- Use tools like `JET`, `Cthulhu`, and/or `SnoopCompile` to identify failures of type-inference, and + follow our [Performance Tips](@ref) to fix them. + +### Compatibility concerns + +We have identified many small changes to Base that significantly increase the set of programs +that can be reliably trimmed. Unfortunately some of those changes would be considered breaking, +and so are only applied when trimming is requested (this is done by an external build script, +currently maintained inside the test suite as `test/trimming/buildscript.jl`). +Therefore in many cases trimming will require you to opt in to new variants of Base and some +standard libraries. + +If you want to use trimming, it is important to set up continuous integration testing that +performs a trimmed build and fully tests the resulting program. +Fortunately, if your program successfully compiles with `--trim` then it is very likely to work +the same as it did before. However, CI is needed to ensure that your program continues to build +with trimming as you develop it. + +Package authors may wish to test that their package is "trimming safe", however this is impossible +in general. Trimming is only expected to work given concrete entry points such as `main()` and +library entry points meant to be called from outside Julia. For generic packages, existing tests +for type stability like `@inferred` and `JET` are about as close as you can get to checking +trim compatibility. + +Trimming also introduces new compatibility issues between minor versions of Julia. At this time, +we are not able to guarantee that a program that can be trimmed in one version of Julia +can also be trimmed in all future versions of Julia. However, breakage of that kind is expected +to be rare. We also plan to try to *increase* the set of programs that can be trimmed over time. diff --git a/doc/src/manual/command-line-interface.md b/doc/src/manual/command-line-interface.md index 41c3eacd61d26..ef20e51ea6e4e 100644 --- a/doc/src/manual/command-line-interface.md +++ b/doc/src/manual/command-line-interface.md @@ -218,7 +218,7 @@ The following is a complete list of command-line switches available when launchi |`--trace-compile-timing` |If --trace-compile is enabled show how long each took to compile in ms| |`--image-codegen` |Force generate code in imaging mode| |`--permalloc-pkgimg={yes\|no*}` |Copy the data section of package images into memory| - +|`--trim={no*|safe|unsafe|unsafe-warn}` |Build a sysimage including only code provably reachable from methods marked by calling `entrypoint`. The three non-default options differ in how they handle dynamic call sites. In safe mode, such sites result in compile-time errors. In unsafe mode, such sites are allowed but the resulting binary might be missing needed code and can throw runtime errors. With unsafe-warn, such sites will trigger warnings at compile-time and might error at runtime.| !!! compat "Julia 1.1" In Julia 1.0, the default `--project=@.` option did not search up from the root diff --git a/src/aotcompile.cpp b/src/aotcompile.cpp index b4c8ef6095a55..c2f112f9c9d5c 100644 --- a/src/aotcompile.cpp +++ b/src/aotcompile.cpp @@ -95,6 +95,17 @@ void jl_get_function_id_impl(void *native_code, jl_code_instance_t *codeinst, } } +extern "C" JL_DLLEXPORT_CODEGEN +void jl_get_llvm_mis_impl(void *native_code, arraylist_t* MIs) +{ + jl_native_code_desc_t *data = (jl_native_code_desc_t*)native_code; + auto map = data->jl_fvar_map; + for (auto &ci : map) { + jl_method_instance_t *mi = ci.first->def; + arraylist_push(MIs, mi); + } +} + extern "C" JL_DLLEXPORT_CODEGEN void jl_get_llvm_gvs_impl(void *native_code, arraylist_t *gvs) { @@ -284,6 +295,7 @@ jl_code_instance_t *jl_ci_cache_lookup(const jl_cgparams_t &cgparams, jl_method_ jl_value_t *ci = cgparams.lookup(mi, world, world); JL_GC_PROMISE_ROOTED(ci); jl_code_instance_t *codeinst = NULL; + JL_GC_PUSH1(&codeinst); if (ci != jl_nothing && jl_atomic_load_relaxed(&((jl_code_instance_t *)ci)->inferred) != jl_nothing) { codeinst = (jl_code_instance_t*)ci; } @@ -301,9 +313,11 @@ jl_code_instance_t *jl_ci_cache_lookup(const jl_cgparams_t &cgparams, jl_method_ jl_mi_cache_insert(mi, codeinst); } } + JL_GC_POP(); return codeinst; } +arraylist_t new_invokes; // takes the running content that has collected in the shadow module and dump it to disk // this builds the object file portion of the sysimage files for fast startup, and can // also be used be extern consumers like GPUCompiler.jl to obtain a module containing @@ -353,8 +367,12 @@ void *jl_create_native_impl(jl_array_t *methods, LLVMOrcThreadSafeModuleRef llvm params.imaging_mode = imaging; params.debug_level = cgparams->debug_info_level; params.external_linkage = _external_linkage; + arraylist_new(&new_invokes, 0); size_t compile_for[] = { jl_typeinf_world, _world }; - for (int worlds = 0; worlds < 2; worlds++) { + int worlds = 0; + if (jl_options.trim != JL_TRIM_NO) + worlds = 1; + for (; worlds < 2; worlds++) { JL_TIMING(NATIVE_AOT, NATIVE_Codegen); size_t this_world = compile_for[worlds]; if (!this_world) @@ -373,6 +391,7 @@ void *jl_create_native_impl(jl_array_t *methods, LLVMOrcThreadSafeModuleRef llvm continue; } mi = (jl_method_instance_t*)item; +compile_mi: src = NULL; // if this method is generally visible to the current compilation world, // and this is either the primary world, or not applicable in the primary world @@ -380,16 +399,47 @@ void *jl_create_native_impl(jl_array_t *methods, LLVMOrcThreadSafeModuleRef llvm if (jl_atomic_load_relaxed(&mi->def.method->primary_world) <= this_world && this_world <= jl_atomic_load_relaxed(&mi->def.method->deleted_world)) { // find and prepare the source code to compile jl_code_instance_t *codeinst = jl_ci_cache_lookup(*cgparams, mi, this_world); - if (codeinst && !params.compiled_functions.count(codeinst)) { + if (jl_options.trim != JL_TRIM_NO && !codeinst) { + // If we're building a small image, we need to compile everything + // to ensure that we have all the information we need. + jl_safe_printf("Codegen decided not to compile code root"); + jl_(mi); + abort(); + } + if (codeinst && !params.compiled_functions.count(codeinst) && !data->jl_fvar_map.count(codeinst)) { // now add it to our compilation results - JL_GC_PROMISE_ROOTED(codeinst->rettype); - orc::ThreadSafeModule result_m = jl_create_ts_module(name_from_method_instance(codeinst->def), - params.tsctx, clone.getModuleUnlocked()->getDataLayout(), - Triple(clone.getModuleUnlocked()->getTargetTriple())); - jl_llvm_functions_t decls = jl_emit_codeinst(result_m, codeinst, NULL, params); - if (result_m) - params.compiled_functions[codeinst] = {std::move(result_m), std::move(decls)}; + // Const returns do not do codegen, but juliac inspects codegen results so make a dummy fvar entry to represent it + if (jl_options.trim != JL_TRIM_NO && jl_atomic_load_relaxed(&codeinst->invoke) == jl_fptr_const_return_addr) { + data->jl_fvar_map[codeinst] = std::make_tuple((uint32_t)-3, (uint32_t)-3); + } else { + JL_GC_PROMISE_ROOTED(codeinst->rettype); + orc::ThreadSafeModule result_m = jl_create_ts_module(name_from_method_instance(codeinst->def), + params.tsctx, clone.getModuleUnlocked()->getDataLayout(), + Triple(clone.getModuleUnlocked()->getTargetTriple())); + jl_llvm_functions_t decls = jl_emit_codeinst(result_m, codeinst, NULL, params); + if (result_m) + params.compiled_functions[codeinst] = {std::move(result_m), std::move(decls)}; + else if (jl_options.trim != JL_TRIM_NO) { + // if we're building a small image, we need to compile everything + // to ensure that we have all the information we need. + jl_safe_printf("codegen failed to compile code root"); + jl_(mi); + abort(); + } + } } + } else if (this_world != jl_typeinf_world) { + /* + jl_safe_printf("Codegen could not find requested codeinstance to be compiled\n"); + jl_(mi); + abort(); + */ + } + // TODO: is goto the best way to do this? + jl_compile_workqueue(params, policy); + mi = (jl_method_instance_t*)arraylist_pop(&new_invokes); + if (mi != NULL) { + goto compile_mi; } } @@ -397,6 +447,7 @@ void *jl_create_native_impl(jl_array_t *methods, LLVMOrcThreadSafeModuleRef llvm jl_compile_workqueue(params, policy); } JL_GC_POP(); + arraylist_free(&new_invokes); // process the globals array, before jl_merge_module destroys them SmallVector gvars(params.global_targets.size()); diff --git a/src/cgutils.cpp b/src/cgutils.cpp index 7f96bb1047abc..4547e693755cd 100644 --- a/src/cgutils.cpp +++ b/src/cgutils.cpp @@ -2336,6 +2336,12 @@ static jl_cgval_t typed_store(jl_codectx_t &ctx, ret = emit_invoke(ctx, *modifyop, argv, 3, (jl_value_t*)jl_any_type); } else { + if (trim_may_error(ctx.params->trim)) { + // if we know the return type, we can assume the result is of that type + errs() << "ERROR: Dynamic call to setfield/modifyfield\n"; + errs() << "In " << ctx.builder.getCurrentDebugLocation()->getFilename() << ":" << ctx.builder.getCurrentDebugLocation()->getLine() << "\n"; + print_stacktrace(ctx, ctx.params->trim); + } Value *callval = emit_jlcall(ctx, jlapplygeneric_func, nullptr, argv, 3, julia_call); ret = mark_julia_type(ctx, callval, true, jl_any_type); } @@ -4077,6 +4083,12 @@ static jl_cgval_t union_store(jl_codectx_t &ctx, rhs = emit_invoke(ctx, *modifyop, argv, 3, (jl_value_t*)jl_any_type); } else { + if (trim_may_error(ctx.params->trim)) { + // if we know the return type, we can assume the result is of that type + errs() << "ERROR: Dynamic call to setfield/modifyfield\n"; + errs() << "In " << ctx.builder.getCurrentDebugLocation()->getFilename() << ":" << ctx.builder.getCurrentDebugLocation()->getLine() << "\n"; + print_stacktrace(ctx, ctx.params->trim); + } Value *callval = emit_jlcall(ctx, jlapplygeneric_func, nullptr, argv, 3, julia_call); rhs = mark_julia_type(ctx, callval, true, jl_any_type); } diff --git a/src/codegen-stubs.c b/src/codegen-stubs.c index 41812d903816c..7ddb68fd6b036 100644 --- a/src/codegen-stubs.c +++ b/src/codegen-stubs.c @@ -15,6 +15,7 @@ JL_DLLEXPORT void jl_dump_native_fallback(void *native_code, ios_t *z, ios_t *s) UNAVAILABLE JL_DLLEXPORT void jl_get_llvm_gvs_fallback(void *native_code, arraylist_t *gvs) UNAVAILABLE JL_DLLEXPORT void jl_get_llvm_external_fns_fallback(void *native_code, arraylist_t *gvs) UNAVAILABLE +JL_DLLEXPORT void jl_get_llvm_mis_fallback(void *native_code, arraylist_t* MIs) UNAVAILABLE JL_DLLEXPORT void jl_extern_c_fallback(jl_function_t *f, jl_value_t *rt, jl_value_t *argt, char *name) UNAVAILABLE JL_DLLEXPORT jl_value_t *jl_dump_method_asm_fallback(jl_method_instance_t *linfo, size_t world, diff --git a/src/codegen.cpp b/src/codegen.cpp index a452e0fccd0c5..a7a985284c87b 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -16,6 +16,7 @@ #include #include #include +#include #include // target machine computation @@ -1651,31 +1652,23 @@ static const auto &builtin_func_map() { return builtins; } +static const auto &may_dispatch_builtins() { + static std::unordered_set builtins( + {jl_f__apply_iterate_addr, + jl_f__apply_pure_addr, + jl_f__call_in_world_addr, + jl_f__call_in_world_total_addr, + jl_f__call_latest_addr, + }); + return builtins; +} + static const auto jl_new_opaque_closure_jlcall_func = new JuliaFunction<>{XSTR(jl_new_opaque_closure_jlcall), get_func_sig, get_func_attrs}; static _Atomic(uint64_t) globalUniqueGeneratedNames{1}; // --- code generation --- -extern "C" { - jl_cgparams_t jl_default_cgparams = { - /* track_allocations */ 1, - /* code_coverage */ 1, - /* prefer_specsig */ 0, -#ifdef _OS_WINDOWS_ - /* gnu_pubnames */ 0, -#else - /* gnu_pubnames */ 1, -#endif - /* debug_info_kind */ (int) DICompileUnit::DebugEmissionKind::FullDebug, - /* debug_line_info */ 1, - /* safepoint_on_entry */ 1, - /* gcstack_arg */ 1, - /* use_jlplt*/ 1, - /* lookup */ jl_rettype_inferred_addr }; -} - - static MDNode *best_tbaa(jl_tbaacache_t &tbaa_cache, jl_value_t *jt) { jt = jl_unwrap_unionall(jt); if (jt == (jl_value_t*)jl_datatype_type || @@ -1987,7 +1980,7 @@ class jl_codectx_t { size_t max_world = -1; const char *name = NULL; StringRef file{}; - ssize_t *line = NULL; + int32_t line = -1; Value *spvals_ptr = NULL; Value *argArray = NULL; Value *argCount = NULL; @@ -2146,6 +2139,179 @@ static Value *literal_pointer_val(jl_codectx_t &ctx, jl_value_t *p); static unsigned julia_alignment(jl_value_t *jt); static void recombine_value(jl_codectx_t &ctx, const jl_cgval_t &x, Value *dst, jl_aliasinfo_t const &dst_ai, Align alignment, bool isVolatile); +static void print_stack_crumbs(jl_codectx_t &ctx) +{ + errs() << "\n"; + errs() << "Stacktrace:\n"; + jl_method_instance_t *caller = ctx.linfo; + jl_((jl_value_t*)caller); + errs() << "In " << ctx.file << ":" << ctx.line << "\n"; + while (true) { + auto it = ctx.emission_context.enqueuers.find(caller); + if (it != ctx.emission_context.enqueuers.end()) { + caller = std::get(it->second); + } else { + break; + } + if (caller) { + if (jl_is_method_instance(caller)) { + for (auto it2 = std::get(it->second).begin(); it2 != (std::prev(std::get(it->second).end())); ++it2) { + auto frame = *it2; + errs() << std::get<0>(frame) << " \n"; + errs() << "In " << std::get<1>(frame) << ":" << std::get(frame) << "\n"; + } + auto &frame = std::get(it->second).front(); + jl_((jl_value_t*)caller); + errs() << "In " << std::get<1>(frame) << ":" << std::get(frame) << "\n"; + } + } + else + break; + } + abort(); +} + +static jl_value_t *StackFrame( + jl_value_t *linfo, + std::string fn_name, + std::string filepath, + int32_t lineno, + jl_value_t *inlined) +{ + jl_value_t *StackFrame = jl_get_global(jl_base_module, jl_symbol("StackFrame")); + assert(StackFrame != nullptr); + + jl_value_t *args[7] = { + /* func */ (jl_value_t *)jl_symbol(fn_name.c_str()), + /* line */ (jl_value_t *)jl_symbol(filepath.c_str()), + /* line */ jl_box_int32(lineno), + /* linfo */ (jl_value_t *)linfo, + /* from_c */ jl_false, + /* inlined */ inlined, + /* pointer */ jl_box_uint64(0) + }; + + jl_value_t *frame = nullptr; + JL_TRY { + frame = jl_apply_generic(StackFrame, args, 7); + } JL_CATCH { + jl_safe_printf("Error creating stack frame\n"); + } + return frame; +} + +static void push_frames(jl_codectx_t &ctx, jl_method_instance_t *caller, jl_method_instance_t *callee, int no_debug=false) +{ + CallFrames frames; + auto it = ctx.emission_context.enqueuers.find(callee); + if (it != ctx.emission_context.enqueuers.end()) + return; + if (no_debug) { // Used in tojlinvoke + frames.push_back({ctx.funcName, "", 0}); + ctx.emission_context.enqueuers.insert({callee, {caller, std::move(frames)}}); + return; + } + auto DL = ctx.builder.getCurrentDebugLocation(); + auto filename = std::string(DL->getFilename()); + auto line = DL->getLine(); + auto fname = std::string(DL->getScope()->getSubprogram()->getName()); + frames.push_back({fname, filename, line}); + auto DI = DL.getInlinedAt(); + while (DI) { + auto filename = std::string(DI->getFilename()); + auto line = DI->getLine(); + auto fname = std::string(DI->getScope()->getSubprogram()->getName()); + frames.push_back({fname, filename, line}); + DI = DI->getInlinedAt(); + } + ctx.emission_context.enqueuers.insert({callee, {caller, std::move(frames)}}); +} + +static jl_array_t* build_stack_crumbs(jl_codectx_t &ctx) JL_NOTSAFEPOINT +{ + static intptr_t counter = 5; + jl_method_instance_t *caller = (jl_method_instance_t*)counter; //nothing serves as a sentinel for the bottom for the stack + push_frames(ctx, ctx.linfo, (jl_method_instance_t*)caller); + counter++; + jl_array_t *out = jl_alloc_array_1d(jl_array_any_type, 0); + JL_GC_PUSH1(&out); + while (true) { + auto it = ctx.emission_context.enqueuers.find(caller); + if (it != ctx.emission_context.enqueuers.end()) { + caller = std::get(it->second); + } else { + break; + } + if (caller) { + assert(ctx.emission_context.enqueuers.count(caller) == 1); + if (jl_is_method_instance(caller)) { + //TODO: Use a subrange when C++20 is a thing + for (auto it2 = std::get(it->second).begin(); it2 != (std::prev(std::get(it->second).end())); ++it2) { + auto frame = *it2; + jl_value_t *stackframe = StackFrame(jl_nothing, std::get<0>(frame), std::get<1>(frame), std::get(frame), jl_true); + if (stackframe == nullptr) + print_stack_crumbs(ctx); + jl_array_ptr_1d_push(out, stackframe); + } + auto &frame = std::get(it->second).back(); + jl_value_t *stackframe = StackFrame((jl_value_t *)caller, std::get<0>(frame), std::get<1>(frame), std::get(frame), jl_false); + if (stackframe == nullptr) + print_stack_crumbs(ctx); + jl_array_ptr_1d_push(out, stackframe); + } + } + else + break; + } + JL_GC_POP(); + return out; +} + +static void print_stacktrace(jl_codectx_t &ctx, int trim) +{ + jl_task_t *ct = jl_get_current_task(); + assert(ct); + + // Temporarily operate in the current age + size_t last_age = ct->world_age; + ct->world_age = jl_get_world_counter(); + jl_array_t* bt = build_stack_crumbs(ctx); + JL_GC_PUSH1(&bt); + + // Call `reinit_stdio` to get TTY IO objects (w/ color) + jl_value_t *reinit_stdio = jl_get_global(jl_base_module, jl_symbol("_reinit_stdio")); + assert(reinit_stdio); + jl_apply_generic(reinit_stdio, nullptr, 0); + + // Show the backtrace + jl_value_t *show_backtrace = jl_get_global(jl_base_module, jl_symbol("show_backtrace")); + jl_value_t *base_stderr = jl_get_global(jl_base_module, jl_symbol("stderr")); + assert(show_backtrace && base_stderr); + + JL_TRY { + jl_value_t *args[2] = { base_stderr, (jl_value_t *)bt }; + jl_apply_generic(show_backtrace, args, 2); + } JL_CATCH { + jl_printf(JL_STDERR,"Error showing backtrace\n"); + print_stack_crumbs(ctx); + } + + jl_printf(JL_STDERR, "\n\n"); + JL_GC_POP(); + ct->world_age = last_age; + + if (trim == JL_TRIM_SAFE) { + jl_printf(JL_STDERR,"Aborting compilation due to finding a dynamic dispatch"); + exit(1); + } + return; +} + +static int trim_may_error(int trim) +{ + return (trim == JL_TRIM_SAFE) || (trim == JL_TRIM_UNSAFE_WARN); +} + static GlobalVariable *prepare_global_in(Module *M, JuliaVariable *G) { return G->realize(M); @@ -4281,6 +4447,12 @@ static bool emit_builtin_call(jl_codectx_t &ctx, jl_cgval_t *ret, jl_value_t *f, Value *theArgs = emit_ptrgep(ctx, ctx.argArray, ctx.nReqArgs * sizeof(jl_value_t*)); Value *r = ctx.builder.CreateCall(prepare_call(jlapplygeneric_func), { theF, theArgs, nva }); *ret = mark_julia_type(ctx, r, true, jl_any_type); + if (trim_may_error(ctx.params->trim)) { + // if we know the return type, we can assume the result is of that type + errs() << "ERROR: Dynamic call to Core._apply_iterate detected\n"; + errs() << "In " << ctx.builder.getCurrentDebugLocation()->getFilename() << ":" << ctx.builder.getCurrentDebugLocation()->getLine() << "\n"; + print_stacktrace(ctx, ctx.params->trim); + } return true; } } @@ -5388,12 +5560,25 @@ static jl_cgval_t emit_invoke(jl_codectx_t &ctx, const jl_cgval_t &lival, ArrayR if (need_to_emit) { Function *trampoline_decl = cast(jl_Module->getNamedValue(protoname)); ctx.call_targets[codeinst] = {cc, return_roots, trampoline_decl, specsig}; + if (trim_may_error(ctx.params->trim)) + push_frames(ctx, ctx.linfo, mi); } } } } } if (!handled) { + if (trim_may_error(ctx.params->trim)) { + if (lival.constant) { + arraylist_push(&new_invokes, lival.constant); + push_frames(ctx, ctx.linfo, (jl_method_instance_t*)lival.constant); + } else { + errs() << "Dynamic call to unknown function"; + errs() << "In " << ctx.builder.getCurrentDebugLocation()->getFilename() << ":" << ctx.builder.getCurrentDebugLocation()->getLine() << "\n"; + + print_stacktrace(ctx, ctx.params->trim); + } + } Value *r = emit_jlcall(ctx, jlinvoke_func, boxed(ctx, lival), argv, nargs, julia_call2); result = mark_julia_type(ctx, r, true, rt); } @@ -5453,7 +5638,12 @@ static jl_cgval_t emit_invoke_modify(jl_codectx_t &ctx, jl_expr_t *ex, jl_value_ return mark_julia_type(ctx, oldnew, true, rt); } } - + if (trim_may_error(ctx.params->trim)) { + errs() << "ERROR: dynamic invoke modify call to"; + jl_(args[0]); + errs() << "In " << ctx.builder.getCurrentDebugLocation()->getFilename() << ":" << ctx.builder.getCurrentDebugLocation()->getLine() << "\n"; + print_stacktrace(ctx, ctx.params->trim); + } // emit function and arguments Value *callval = emit_jlcall(ctx, jlapplygeneric_func, nullptr, argv, nargs, julia_call); return mark_julia_type(ctx, callval, true, rt); @@ -5523,10 +5713,15 @@ static jl_cgval_t emit_call(jl_codectx_t &ctx, jl_expr_t *ex, jl_value_t *rt, bo bool handled = emit_builtin_call(ctx, &result, f.constant, argv, nargs - 1, rt, ex, is_promotable); if (handled) return result; - + jl_fptr_args_t builtin_fptr = jl_get_builtin_fptr((jl_datatype_t*)jl_typeof(f.constant)); // special case for some known builtin not handled by emit_builtin_call - auto it = builtin_func_map().find(jl_get_builtin_fptr((jl_datatype_t*)jl_typeof(f.constant))); + auto it = builtin_func_map().find(builtin_fptr); if (it != builtin_func_map().end()) { + if (trim_may_error(ctx.params->trim) && may_dispatch_builtins().count(builtin_fptr)) { + errs() << "ERROR: Dynamic call to builtin" << jl_symbol_name(((jl_datatype_t*)jl_typeof(f.constant))->name->name); + errs() << "In " << ctx.builder.getCurrentDebugLocation()->getFilename() << ":" << ctx.builder.getCurrentDebugLocation()->getLine() << "\n"; + print_stacktrace(ctx, ctx.params->trim); + } Value *ret = emit_jlcall(ctx, it->second, Constant::getNullValue(ctx.types().T_prjlvalue), ArrayRef(argv).drop_front(), nargs - 1, julia_call); setName(ctx.emission_context, ret, it->second->name + "_ret"); return mark_julia_type(ctx, ret, true, rt); @@ -5565,7 +5760,79 @@ static jl_cgval_t emit_call(jl_codectx_t &ctx, jl_expr_t *ex, jl_value_t *rt, bo } } } + int failed_dispatch = !argv[0].constant; + if (ctx.params->trim != JL_TRIM_NO) { + size_t min_valid = 1; + size_t max_valid = ~(size_t)0; + size_t latest_world = jl_get_world_counter(); // TODO: marshal the world age of the compilation here. + + // Find all methods matching the call signature + jl_array_t *matches = NULL; + jl_value_t *tup = NULL; + JL_GC_PUSH2(&tup, &matches); + if (!failed_dispatch) { + SmallVector argtypes; + for (auto& arg: argv) + argtypes.push_back(arg.typ); + tup = jl_apply_tuple_type_v(argtypes.data(), argtypes.size()); + matches = (jl_array_t*)jl_matching_methods((jl_tupletype_t*)tup, jl_nothing, 10 /*TODO: make global*/, 1, + latest_world, &min_valid, &max_valid, NULL); + if ((jl_value_t*)matches == jl_nothing) + failed_dispatch = 1; + } + + // Expand each matching method to its unique specialization, if it has exactly one + if (!failed_dispatch) { + size_t k; + size_t len = new_invokes.len; + for (k = 0; k < jl_array_nrows(matches); k++) { + jl_method_match_t *match = (jl_method_match_t *)jl_array_ptr_ref(matches, k); + jl_method_instance_t *mi = jl_method_match_to_mi(match, latest_world, min_valid, max_valid, 0); + if (!mi) { + if (jl_array_nrows(matches) == 1) { + // if the method match is not compileable, but there is only one, fall back to + // unspecialized implementation + mi = jl_get_unspecialized(match->method); + } + else { + new_invokes.len = len; + failed_dispatch = 1; + break; + } + } + arraylist_push(&new_invokes, mi); + } + } + JL_GC_POP(); + } + if (failed_dispatch && trim_may_error(ctx.params->trim)) { + errs() << "Dynamic call to "; + jl_jmp_buf *old_buf = jl_get_safe_restore(); + jl_jmp_buf buf; + jl_set_safe_restore(&buf); + if (!jl_setjmp(buf, 0)) { + jl_static_show((JL_STREAM*)STDERR_FILENO, (jl_value_t*)args[0]); + jl_printf((JL_STREAM*)STDERR_FILENO,"("); + for (size_t i = 1; i < nargs; ++i) { + jl_value_t *typ = argv[i].typ; + if (!jl_is_concrete_type(typ)) // Print type in red + jl_printf((JL_STREAM*)STDERR_FILENO, "\x1b[31m"); + jl_static_show((JL_STREAM*)STDERR_FILENO, (jl_value_t*)argv[i].typ); + if (!jl_is_concrete_type(typ)) + jl_printf((JL_STREAM*)STDERR_FILENO, "\x1b[0m"); + if (i != nargs-1) + jl_printf((JL_STREAM*)STDERR_FILENO,", "); + } + jl_printf((JL_STREAM*)STDERR_FILENO,")\n"); + } + else { + jl_printf((JL_STREAM*)STDERR_FILENO, "\n!!! ERROR while printing error -- ABORTING !!!\n"); + } + jl_set_safe_restore(old_buf); + errs() << "In " << ctx.builder.getCurrentDebugLocation()->getFilename() << ":" << ctx.builder.getCurrentDebugLocation()->getLine() << "\n"; + print_stacktrace(ctx, ctx.params->trim); + } // emit function and arguments Value *callval = emit_jlcall(ctx, jlapplygeneric_func, nullptr, argv, n_generic_args, julia_call); return mark_julia_type(ctx, callval, true, rt); @@ -6710,6 +6977,13 @@ static jl_cgval_t emit_expr(jl_codectx_t &ctx, jl_value_t *expr, ssize_t ssaidx_ ((jl_method_t*)source.constant)->nargs > 0 && jl_is_valid_oc_argtype((jl_tupletype_t*)argt.constant, (jl_method_t*)source.constant); + if (!can_optimize && trim_may_error(ctx.params->trim)) { + // if we know the return type, we can assume the result is of that type + errs() << "ERROR: Dynamic call to OpaqueClosure method\n"; + errs() << "In " << ctx.builder.getCurrentDebugLocation()->getFilename() << ":" << ctx.builder.getCurrentDebugLocation()->getLine() << "\n"; + print_stacktrace(ctx, ctx.params->trim); + } + if (can_optimize) { jl_value_t *closure_t = NULL; jl_value_t *env_t = NULL; @@ -6909,6 +7183,11 @@ static Function *emit_tojlinvoke(jl_code_instance_t *codeinst, StringRef theFptr GlobalVariable::InternalLinkage, name, M); jl_init_function(f, params.TargetTriple); + if (trim_may_error(params.params->trim)) { + arraylist_push(&new_invokes, codeinst->def); // Try t compile this invoke + // TODO: Debuginfo! + push_frames(ctx, ctx.linfo, codeinst->def, 1); + } jl_name_jlfunc_args(params, f); //f->setAlwaysInline(); ctx.f = f; // for jl_Module @@ -8126,6 +8405,7 @@ static jl_llvm_functions_t if (lam && jl_is_method(lam->def.method)) { toplineno = lam->def.method->line; ctx.file = jl_symbol_name(lam->def.method->file); + ctx.line = lam->def.method->line; } else if ((jl_value_t*)src->debuginfo != jl_nothing) { // look for the file and line info of the original start of this block, as reported by lowering @@ -8134,6 +8414,7 @@ static jl_llvm_functions_t debuginfo = debuginfo->linetable; ctx.file = jl_debuginfo_file(debuginfo); struct jl_codeloc_t lineidx = jl_uncompress1_codeloc(debuginfo->codelocs, 0); + ctx.line = lineidx.line; toplineno = std::max((int32_t)0, lineidx.line); } if (ctx.file.empty()) @@ -9904,7 +10185,7 @@ void jl_compile_workqueue( if (it == params.compiled_functions.end()) { // Reinfer the function. The JIT came along and removed the inferred // method body. See #34993 - if (policy != CompilationPolicy::Default && + if ((policy != CompilationPolicy::Default || params.params->trim) && jl_atomic_load_relaxed(&codeinst->inferred) == jl_nothing) { // XXX: SOURCE_MODE_FORCE_SOURCE is wrong here (neither sufficient nor necessary) codeinst = jl_type_infer(codeinst->def, jl_atomic_load_relaxed(&codeinst->max_world), SOURCE_MODE_FORCE_SOURCE); @@ -9935,6 +10216,16 @@ void jl_compile_workqueue( if (proto.specsig) { // expected specsig if (!preal_specsig) { + if (params.params->trim) { + auto it = params.compiled_functions.find(codeinst); //TODO: What to do about this + errs() << "Bailed out to invoke when compiling:"; + jl_(codeinst->def); + if (it != params.compiled_functions.end()) { + errs() << it->second.second.functionObject << "\n"; + errs() << it->second.second.specFunctionObject << "\n"; + } else + errs() << "codeinst not in compile_functions\n"; + } // emit specsig-to-(jl)invoke conversion StringRef invokeName; if (invoke != NULL) @@ -10124,6 +10415,22 @@ int jl_opaque_ptrs_set = 0; extern "C" void jl_init_llvm(void) { + jl_default_cgparams = { + /* track_allocations */ 1, + /* code_coverage */ 1, + /* prefer_specsig */ 0, +#ifdef _OS_WINDOWS_ + /* gnu_pubnames */ 0, +#else + /* gnu_pubnames */ 1, +#endif + /* debug_info_kind */ (int) DICompileUnit::DebugEmissionKind::FullDebug, + /* debug_info_level */ (int) jl_options.debug_level, + /* safepoint_on_entry */ 1, + /* gcstack_arg */ 1, + /* use_jlplt*/ 1, + /* trim */ 0, + /* lookup */ jl_rettype_inferred_addr }; jl_page_size = jl_getpagesize(); jl_default_debug_info_kind = (int) DICompileUnit::DebugEmissionKind::FullDebug; jl_default_cgparams.debug_info_level = (int) jl_options.debug_level; diff --git a/src/gf.c b/src/gf.c index e6f5b4ee007f7..321711c839aa8 100644 --- a/src/gf.c +++ b/src/gf.c @@ -1360,8 +1360,7 @@ static inline jl_typemap_entry_t *lookup_leafcache(jl_genericmemory_t *leafcache } return NULL; } - -static jl_method_instance_t *cache_method( +jl_method_instance_t *cache_method( jl_methtable_t *mt, _Atomic(jl_typemap_t*) *cache, jl_value_t *parent JL_PROPAGATES_ROOT, jl_tupletype_t *tt, // the original tupletype of the signature jl_method_t *definition, @@ -1707,7 +1706,7 @@ static void method_overwrite(jl_typemap_entry_t *newentry, jl_method_t *oldvalue jl_printf(s, ".\n"); jl_uv_flush(s); } - if (jl_generating_output()) { + if (jl_generating_output() && jl_options.incremental) { jl_printf(JL_STDERR, "ERROR: Method overwriting is not permitted during Module precompilation. Use `__precompile__(false)` to opt-out of precompilation.\n"); jl_throw(jl_precompilable_error); } @@ -2411,7 +2410,7 @@ JL_DLLEXPORT jl_method_instance_t *jl_method_lookup(jl_value_t **args, size_t na // spvals is any matched static parameter values, m is the Method, // full is a boolean indicating if that method fully covers the input // -// lim is the max # of methods to return. if there are more, returns jl_false. +// lim is the max # of methods to return. if there are more, returns jl_nothing. // Negative values stand for no limit. // Unless lim == -1, remove matches that are unambiguously covered by earlier ones JL_DLLEXPORT jl_value_t *jl_matching_methods(jl_tupletype_t *types, jl_value_t *mt, int lim, int include_ambiguous, @@ -2431,7 +2430,7 @@ JL_DLLEXPORT jl_value_t *jl_matching_methods(jl_tupletype_t *types, jl_value_t * return ml_matches((jl_methtable_t*)mt, types, lim, include_ambiguous, 1, world, 1, min_valid, max_valid, ambig); } -jl_method_instance_t *jl_get_unspecialized(jl_method_t *def JL_PROPAGATES_ROOT) +JL_DLLEXPORT jl_method_instance_t *jl_get_unspecialized(jl_method_t *def JL_PROPAGATES_ROOT) { // one unspecialized version of a function can be shared among all cached specializations if (!jl_is_method(def) || def->source == NULL) { @@ -2910,7 +2909,7 @@ jl_method_instance_t *jl_normalize_to_compilable_mi(jl_method_instance_t *mi JL_ } // return a MethodInstance for a compileable method_match -jl_method_instance_t *jl_method_match_to_mi(jl_method_match_t *match, size_t world, size_t min_valid, size_t max_valid, int mt_cache) +JL_DLLEXPORT jl_method_instance_t *jl_method_match_to_mi(jl_method_match_t *match, size_t world, size_t min_valid, size_t max_valid, int mt_cache) { jl_method_t *m = match->method; jl_svec_t *env = match->sparams; @@ -3112,6 +3111,21 @@ JL_DLLEXPORT int jl_compile_hint(jl_tupletype_t *types) return 1; } +JL_DLLEXPORT int jl_add_entrypoint(jl_tupletype_t *types) +{ + size_t world = jl_atomic_load_acquire(&jl_world_counter); + size_t min_valid = 0; + size_t max_valid = ~(size_t)0; + jl_method_instance_t *mi = jl_get_compile_hint_specialization(types, world, &min_valid, &max_valid, 1); + if (mi == NULL) + return 0; + JL_GC_PROMISE_ROOTED(mi); + if (jl_generating_output() && jl_options.trim) { + arraylist_push(jl_entrypoint_mis, mi); + } + return 1; +} + // add type of `f` to front of argument tuple type jl_value_t *jl_argtype_with_function(jl_value_t *f, jl_value_t *types0) { diff --git a/src/init.c b/src/init.c index 86c0877b14289..413d4e8055e54 100644 --- a/src/init.c +++ b/src/init.c @@ -44,6 +44,7 @@ extern BOOL (WINAPI *hSymRefreshModuleList)(HANDLE); // list of modules being deserialized with __init__ methods jl_array_t *jl_module_init_order; +arraylist_t *jl_entrypoint_mis; JL_DLLEXPORT size_t jl_page_size; @@ -721,6 +722,7 @@ static void restore_fp_env(void) static NOINLINE void _finish_julia_init(JL_IMAGE_SEARCH rel, jl_ptls_t ptls, jl_task_t *ct); JL_DLLEXPORT int jl_default_debug_info_kind; +JL_DLLEXPORT jl_cgparams_t jl_default_cgparams; static void init_global_mutexes(void) { JL_MUTEX_INIT(&jl_modules_mutex, "jl_modules_mutex"); @@ -841,8 +843,10 @@ static NOINLINE void _finish_julia_init(JL_IMAGE_SEARCH rel, jl_ptls_t ptls, jl_ JL_TIMING(JULIA_INIT, JULIA_INIT); jl_resolve_sysimg_location(rel); // loads sysimg if available, and conditionally sets jl_options.cpu_target - if (rel == JL_IMAGE_IN_MEMORY) + if (rel == JL_IMAGE_IN_MEMORY) { jl_set_sysimg_so(jl_exe_handle); + jl_options.image_file = jl_options.julia_bin; + } else if (jl_options.image_file) jl_preload_sysimg_so(jl_options.image_file); if (jl_options.cpu_target == NULL) @@ -899,6 +903,11 @@ static NOINLINE void _finish_julia_init(JL_IMAGE_SEARCH rel, jl_ptls_t ptls, jl_ JL_GC_POP(); } + if (jl_options.trim) { + jl_entrypoint_mis = (arraylist_t *)malloc_s(sizeof(arraylist_t)); + arraylist_new(jl_entrypoint_mis, 0); + } + if (jl_options.handle_signals == JL_OPTIONS_HANDLE_SIGNALS_ON) jl_install_sigint_handler(); } diff --git a/src/jitlayers.h b/src/jitlayers.h index 107782e354d4a..93669c2351d88 100644 --- a/src/jitlayers.h +++ b/src/jitlayers.h @@ -29,7 +29,7 @@ #include "llvm-version.h" #include #include - +#include // As of LLVM 13, there are two runtime JIT linker implementations, the older // RuntimeDyld (used via orc::RTDyldObjectLinkingLayer) and the newer JITLink @@ -65,6 +65,7 @@ using namespace llvm; extern "C" jl_cgparams_t jl_default_cgparams; +extern arraylist_t new_invokes; DEFINE_SIMPLE_CONVERSION_FUNCTIONS(orc::ThreadSafeContext, LLVMOrcThreadSafeContextRef) DEFINE_SIMPLE_CONVERSION_FUNCTIONS(orc::ThreadSafeModule, LLVMOrcThreadSafeModuleRef) @@ -211,7 +212,7 @@ struct jl_codegen_call_target_t { typedef SmallVector, 0> jl_workqueue_t; // TODO DenseMap? typedef std::map> jl_compiled_functions_t; - +typedef std::list> CallFrames; struct jl_codegen_params_t { orc::ThreadSafeContext tsctx; orc::ThreadSafeContext::Lock tsctx_lock; @@ -230,6 +231,7 @@ struct jl_codegen_params_t { std::map ditypes; std::map llvmtypes; DenseMap mergedConstants; + llvm::MapVector> enqueuers; // Map from symbol name (in a certain library) to its GV in sysimg and the // DL handle address in the current session. StringMap> libMapGV; diff --git a/src/jl_exported_funcs.inc b/src/jl_exported_funcs.inc index 7abf2b055bb8c..0c712ef37cb5b 100644 --- a/src/jl_exported_funcs.inc +++ b/src/jl_exported_funcs.inc @@ -344,6 +344,8 @@ XX(jl_new_typevar) \ XX(jl_next_from_addrinfo) \ XX(jl_normalize_to_compilable_sig) \ + XX(jl_method_match_to_mi) \ + XX(jl_get_unspecialized) \ XX(jl_no_exc_handler) \ XX(jl_object_id) \ XX(jl_object_id_) \ @@ -522,6 +524,7 @@ YY(jl_dump_native) \ YY(jl_get_llvm_gvs) \ YY(jl_get_llvm_external_fns) \ + YY(jl_get_llvm_mis) \ YY(jl_dump_function_asm) \ YY(jl_LLVMCreateDisasm) \ YY(jl_LLVMDisasmInstruction) \ diff --git a/src/jloptions.c b/src/jloptions.c index f63f4de020e26..530d5e2577a9a 100644 --- a/src/jloptions.c +++ b/src/jloptions.c @@ -101,6 +101,7 @@ JL_DLLEXPORT void jl_init_options(void) 0, // permalloc_pkgimg 0, // heap-size-hint 0, // trace_compile_timing + 0, // trim }; jl_options_initialized = 1; } @@ -251,7 +252,7 @@ static const char opts_hidden[] = " --strip-ir Remove IR (intermediate representation) of compiled\n" " functions\n\n" - // compiler debugging (see the devdocs for tips on using these options) + // compiler debugging and experimental (see the devdocs for tips on using these options) " --output-unopt-bc Generate unoptimized LLVM bitcode (.bc)\n" " --output-bc Generate LLVM bitcode (.bc)\n" " --output-asm Generate an assembly file (.s)\n" @@ -265,6 +266,13 @@ static const char opts_hidden[] = " compile in ms\n" " --image-codegen Force generate code in imaging mode\n" " --permalloc-pkgimg={yes|no*} Copy the data section of package images into memory\n" + " --trim={no*|safe|unsafe|unsafe-warn}\n" + " Build a sysimage including only code provably reachable\n" + " from methods marked by calling `entrypoint`. In unsafe\n" + " mode, the resulting binary might be missing needed code\n" + " and can throw errors. With unsafe-warn warnings will be\n" + " printed for dynamic call sites that might lead to such\n" + " errors. In safe mode compile-time errors are given instead.\n" ; JL_DLLEXPORT void jl_parse_opts(int *argcp, char ***argvp) @@ -311,7 +319,8 @@ JL_DLLEXPORT void jl_parse_opts(int *argcp, char ***argvp) opt_strip_ir, opt_heap_size_hint, opt_gc_threads, - opt_permalloc_pkgimg + opt_permalloc_pkgimg, + opt_trim, }; static const char* const shortopts = "+vhqH:e:E:L:J:C:it:p:O:g:m:"; static const struct option longopts[] = { @@ -375,6 +384,7 @@ JL_DLLEXPORT void jl_parse_opts(int *argcp, char ***argvp) { "strip-ir", no_argument, 0, opt_strip_ir }, { "permalloc-pkgimg",required_argument, 0, opt_permalloc_pkgimg }, { "heap-size-hint", required_argument, 0, opt_heap_size_hint }, + { "trim", optional_argument, 0, opt_trim }, { 0, 0, 0, 0 } }; @@ -934,6 +944,18 @@ JL_DLLEXPORT void jl_parse_opts(int *argcp, char ***argvp) else jl_errorf("julia: invalid argument to --permalloc-pkgimg={yes|no} (%s)", optarg); break; + case opt_trim: + if (optarg == NULL || !strcmp(optarg,"safe")) + jl_options.trim = JL_TRIM_SAFE; + else if (!strcmp(optarg,"no")) + jl_options.trim = JL_TRIM_NO; + else if (!strcmp(optarg,"unsafe")) + jl_options.trim = JL_TRIM_UNSAFE; + else if (!strcmp(optarg,"unsafe-warn")) + jl_options.trim = JL_TRIM_UNSAFE_WARN; + else + jl_errorf("julia: invalid argument to --trim={safe|no|unsafe|unsafe-warn} (%s)", optarg); + break; default: jl_errorf("julia: unhandled option -- %c\n" "This is a bug, please report it.", c); diff --git a/src/jloptions.h b/src/jloptions.h index aac2a64a373a8..3d7deedb59e15 100644 --- a/src/jloptions.h +++ b/src/jloptions.h @@ -62,6 +62,7 @@ typedef struct { int8_t permalloc_pkgimg; uint64_t heap_size_hint; int8_t trace_compile_timing; + int8_t trim; } jl_options_t; #endif diff --git a/src/julia.expmap.in b/src/julia.expmap.in index e5f9ee890205f..29366f6296a85 100644 --- a/src/julia.expmap.in +++ b/src/julia.expmap.in @@ -5,8 +5,8 @@ asprintf; bitvector_*; ios_*; - arraylist_grow; - small_arraylist_grow; + arraylist_*; + small_arraylist_*; jl_*; ijl_*; _jl_mutex_*; diff --git a/src/julia.h b/src/julia.h index abb8a57ff13b0..73b96cf0183d1 100644 --- a/src/julia.h +++ b/src/julia.h @@ -2579,6 +2579,11 @@ JL_DLLEXPORT int jl_generating_output(void) JL_NOTSAFEPOINT; #define JL_OPTIONS_USE_PKGIMAGES_YES 1 #define JL_OPTIONS_USE_PKGIMAGES_NO 0 +#define JL_TRIM_NO 0 +#define JL_TRIM_SAFE 1 +#define JL_TRIM_UNSAFE 2 +#define JL_TRIM_UNSAFE_WARN 3 + // Version information #include // Generated file @@ -2626,10 +2631,12 @@ typedef struct { int gcstack_arg; // Pass the ptls value as an argument with swiftself int use_jlplt; // Whether to use the Julia PLT mechanism or emit symbols directly + int trim; // can we emit dynamic dispatches? // Cache access. Default: jl_rettype_inferred_native. jl_codeinstance_lookup_t lookup; } jl_cgparams_t; extern JL_DLLEXPORT int jl_default_debug_info_kind; +extern JL_DLLEXPORT jl_cgparams_t jl_default_cgparams; typedef struct { int emit_metadata; diff --git a/src/julia_internal.h b/src/julia_internal.h index f00667d016796..9a61c3d18356f 100644 --- a/src/julia_internal.h +++ b/src/julia_internal.h @@ -664,8 +664,9 @@ JL_DLLEXPORT jl_code_instance_t *jl_compile_method_internal(jl_method_instance_t JL_DLLEXPORT jl_code_instance_t *jl_get_method_inferred( jl_method_instance_t *mi JL_PROPAGATES_ROOT, jl_value_t *rettype, size_t min_world, size_t max_world, jl_debuginfo_t *edges); -jl_method_instance_t *jl_get_unspecialized(jl_method_t *def JL_PROPAGATES_ROOT); +JL_DLLEXPORT jl_method_instance_t *jl_get_unspecialized(jl_method_t *def JL_PROPAGATES_ROOT); JL_DLLEXPORT void jl_read_codeinst_invoke(jl_code_instance_t *ci, uint8_t *specsigflags, jl_callptr_t *invoke, void **specptr, int waitcompile) JL_NOTSAFEPOINT; +JL_DLLEXPORT jl_method_instance_t *jl_method_match_to_mi(jl_method_match_t *match, size_t world, size_t min_valid, size_t max_valid, int mt_cache); JL_DLLEXPORT jl_code_instance_t *jl_new_codeinst_uninit(jl_method_instance_t *mi, jl_value_t *owner); JL_DLLEXPORT jl_code_instance_t *jl_new_codeinst( @@ -683,6 +684,7 @@ JL_DLLEXPORT const char *jl_debuginfo_name(jl_value_t *func) JL_NOTSAFEPOINT; JL_DLLEXPORT void jl_compile_method_instance(jl_method_instance_t *mi, jl_tupletype_t *types, size_t world); JL_DLLEXPORT int jl_compile_hint(jl_tupletype_t *types); +JL_DLLEXPORT int jl_add_entrypoint(jl_tupletype_t *types); jl_code_info_t *jl_code_for_interpreter(jl_method_instance_t *lam JL_PROPAGATES_ROOT, size_t world); jl_value_t *jl_code_or_ci_for_interpreter(jl_method_instance_t *lam JL_PROPAGATES_ROOT, size_t world); int jl_code_requires_compiler(jl_code_info_t *src, int include_force_compile); @@ -853,6 +855,7 @@ extern htable_t jl_current_modules JL_GLOBALLY_ROOTED; extern JL_DLLEXPORT jl_module_t *jl_precompile_toplevel_module JL_GLOBALLY_ROOTED; extern jl_genericmemory_t *jl_global_roots_list JL_GLOBALLY_ROOTED; extern jl_genericmemory_t *jl_global_roots_keyset JL_GLOBALLY_ROOTED; +extern arraylist_t *jl_entrypoint_mis; JL_DLLEXPORT int jl_is_globally_rooted(jl_value_t *val JL_MAYBE_UNROOTED) JL_NOTSAFEPOINT; JL_DLLEXPORT jl_value_t *jl_as_global_root(jl_value_t *val, int insert) JL_GLOBALLY_ROOTED; @@ -1902,7 +1905,7 @@ JL_DLLIMPORT void jl_get_function_id(void *native_code, jl_code_instance_t *ncod int32_t *func_idx, int32_t *specfunc_idx); JL_DLLIMPORT void jl_register_fptrs(uint64_t image_base, const struct _jl_image_fptrs_t *fptrs, jl_method_instance_t **linfos, size_t n); - +JL_DLLIMPORT void jl_get_llvm_mis(void *native_code, arraylist_t* MIs); JL_DLLIMPORT void jl_init_codegen(void); JL_DLLIMPORT void jl_teardown_codegen(void) JL_NOTSAFEPOINT; JL_DLLIMPORT int jl_getFunctionInfo(jl_frame_t **frames, uintptr_t pointer, int skipC, int noInline) JL_NOTSAFEPOINT; diff --git a/src/module.c b/src/module.c index 96d94049cff13..a6c05d279f5b0 100644 --- a/src/module.c +++ b/src/module.c @@ -856,7 +856,7 @@ JL_DLLEXPORT int jl_binding_resolved_p(jl_module_t *m, jl_sym_t *var) return kind == BINDING_KIND_DECLARED || !jl_bkind_is_some_guard(kind); } -static uint_t bindingkey_hash(size_t idx, jl_value_t *data) +uint_t bindingkey_hash(size_t idx, jl_value_t *data) { jl_binding_t *b = (jl_binding_t*)jl_svecref(data, idx); // This must always happen inside the lock jl_sym_t *var = b->globalref->name; diff --git a/src/precompile.c b/src/precompile.c index c40e867ea699e..5088d45a5ad74 100644 --- a/src/precompile.c +++ b/src/precompile.c @@ -116,14 +116,16 @@ JL_DLLEXPORT void jl_write_compiler_output(void) if (f) { jl_array_ptr_1d_push(jl_module_init_order, m); int setting = jl_get_module_compile((jl_module_t*)m); - if (setting != JL_OPTIONS_COMPILE_OFF && - setting != JL_OPTIONS_COMPILE_MIN) { + if ((setting != JL_OPTIONS_COMPILE_OFF && (jl_options.trim || + (setting != JL_OPTIONS_COMPILE_MIN)))) { // TODO: this would be better handled if moved entirely to jl_precompile // since it's a slightly duplication of effort jl_value_t *tt = jl_is_type(f) ? (jl_value_t*)jl_wrap_Type(f) : jl_typeof(f); JL_GC_PUSH1(&tt); tt = jl_apply_tuple_type_v(&tt, 1); jl_compile_hint((jl_tupletype_t*)tt); + if (jl_options.trim) + jl_add_entrypoint((jl_tupletype_t*)tt); JL_GC_POP(); } } @@ -188,6 +190,10 @@ JL_DLLEXPORT void jl_write_compiler_output(void) jl_printf(JL_STDERR, "\n ** incremental compilation may be broken for this module **\n\n"); } } + if (jl_options.trim) { + exit(0); // Some finalizers need to run and we've blown up the bindings table + // TODO: Is this still needed + } JL_GC_POP(); jl_gc_enable_finalizers(ct, 1); } diff --git a/src/precompile_utils.c b/src/precompile_utils.c index 5a4f599d1f0eb..a78d1e66dbb51 100644 --- a/src/precompile_utils.c +++ b/src/precompile_utils.c @@ -321,3 +321,83 @@ static void *jl_precompile_worklist(jl_array_t *worklist, jl_array_t *extext_met JL_GC_POP(); return native_code; } + +static int enq_ccallable_entrypoints_(jl_typemap_entry_t *def, void *closure) +{ + jl_method_t *m = def->func.method; + if (m->external_mt) + return 1; + if (m->ccallable) + jl_add_entrypoint((jl_tupletype_t*)jl_svecref(m->ccallable, 1)); + return 1; +} + +static int enq_ccallable_entrypoints(jl_methtable_t *mt, void *env) +{ + return jl_typemap_visitor(jl_atomic_load_relaxed(&mt->defs), enq_ccallable_entrypoints_, env); +} + +JL_DLLEXPORT void jl_add_ccallable_entrypoints(void) +{ + jl_foreach_reachable_mtable(enq_ccallable_entrypoints, NULL); +} + +static void *jl_precompile_trimmed(size_t world) +{ + // array of MethodInstances and ccallable aliases to include in the output + jl_array_t *m = jl_alloc_vec_any(0); + jl_value_t *ccallable = NULL; + JL_GC_PUSH2(&m, &ccallable); + jl_method_instance_t *mi; + while (1) + { + mi = (jl_method_instance_t*)arraylist_pop(jl_entrypoint_mis); + if (mi == NULL) + break; + assert(jl_is_method_instance(mi)); + + jl_array_ptr_1d_push(m, (jl_value_t*)mi); + ccallable = (jl_value_t *)mi->def.method->ccallable; + if (ccallable) + jl_array_ptr_1d_push(m, ccallable); + } + + jl_cgparams_t params = jl_default_cgparams; + params.trim = jl_options.trim; + void *native_code = jl_create_native(m, NULL, ¶ms, 0, /* imaging */ 1, 0, + world); + JL_GC_POP(); + return native_code; +} + +static void jl_rebuild_methtables(arraylist_t* MIs, htable_t* mtables) +{ + size_t i; + for (i = 0; i < MIs->len; i++) { + jl_method_instance_t *mi = (jl_method_instance_t*)MIs->items[i]; + jl_method_t *m = mi->def.method; + jl_methtable_t *old_mt = jl_method_get_table(m); + if ((jl_value_t *)old_mt == jl_nothing) + continue; + jl_sym_t *name = old_mt->name; + if (!ptrhash_has(mtables, old_mt)) + ptrhash_put(mtables, old_mt, jl_new_method_table(name, m->module)); + jl_methtable_t *mt = (jl_methtable_t*)ptrhash_get(mtables, old_mt); + size_t world = jl_atomic_load_acquire(&jl_world_counter); + jl_value_t * lookup = jl_methtable_lookup(mt, m->sig, world); + // Check if the method is already in the new table, if not then insert it there + if (lookup == jl_nothing || (jl_method_t*)lookup != m) { + //TODO: should this be a function like unsafe_insert_method? + size_t min_world = jl_atomic_load_relaxed(&m->primary_world); + size_t max_world = jl_atomic_load_relaxed(&m->deleted_world); + jl_atomic_store_relaxed(&m->primary_world, ~(size_t)0); + jl_atomic_store_relaxed(&m->deleted_world, 1); + jl_typemap_entry_t *newentry = jl_method_table_add(mt, m, NULL); + jl_atomic_store_relaxed(&m->primary_world, min_world); + jl_atomic_store_relaxed(&m->deleted_world, max_world); + jl_atomic_store_relaxed(&newentry->min_world, min_world); + jl_atomic_store_relaxed(&newentry->max_world, max_world); + } + } + +} diff --git a/src/staticdata.c b/src/staticdata.c index 363aa46b62221..f54cc9692eaea 100644 --- a/src/staticdata.c +++ b/src/staticdata.c @@ -363,6 +363,9 @@ static void *to_seroder_entry(size_t idx) return (void*)((char*)HT_NOTFOUND + 1 + idx); } +static htable_t new_methtables; +static size_t precompilation_world; + static int ptr_cmp(const void *l, const void *r) { uintptr_t left = *(const uintptr_t*)l; @@ -770,22 +773,41 @@ static uintptr_t jl_fptr_id(void *fptr) #define jl_queue_for_serialization(s, v) jl_queue_for_serialization_((s), (jl_value_t*)(v), 1, 0) static void jl_queue_for_serialization_(jl_serializer_state *s, jl_value_t *v, int recursive, int immediate) JL_GC_DISABLED; - static void jl_queue_module_for_serialization(jl_serializer_state *s, jl_module_t *m) JL_GC_DISABLED { jl_queue_for_serialization(s, m->name); jl_queue_for_serialization(s, m->parent); - jl_queue_for_serialization(s, jl_atomic_load_relaxed(&m->bindings)); + if (jl_options.trim) { + jl_queue_for_serialization_(s, (jl_value_t*)jl_atomic_load_relaxed(&m->bindings), 0, 1); + } else { + jl_queue_for_serialization(s, jl_atomic_load_relaxed(&m->bindings)); + } jl_queue_for_serialization(s, jl_atomic_load_relaxed(&m->bindingkeyset)); - if (jl_options.strip_metadata) { + if (jl_options.strip_metadata || jl_options.trim) { jl_svec_t *table = jl_atomic_load_relaxed(&m->bindings); for (size_t i = 0; i < jl_svec_len(table); i++) { jl_binding_t *b = (jl_binding_t*)jl_svecref(table, i); if ((void*)b == jl_nothing) break; - jl_sym_t *name = b->globalref->name; - if (name == jl_docmeta_sym && jl_get_binding_value(b)) - record_field_change((jl_value_t**)&b->value, jl_nothing); + if (jl_options.strip_metadata) { + jl_sym_t *name = b->globalref->name; + if (name == jl_docmeta_sym && jl_get_binding_value(b)) + record_field_change((jl_value_t**)&b->value, jl_nothing); + } + if (jl_options.trim) { + jl_value_t *val = jl_get_binding_value(b); + // keep binding objects that are defined and ... + if (val && + // ... point to modules ... + (jl_is_module(val) || + // ... or point to __init__ methods ... + !strcmp(jl_symbol_name(b->globalref->name), "__init__") || + // ... or point to Base functions accessed by the runtime + (m == jl_base_module && (!strcmp(jl_symbol_name(b->globalref->name), "wait") || + !strcmp(jl_symbol_name(b->globalref->name), "task_done_hook"))))) { + jl_queue_for_serialization(s, b); + } + } } } @@ -944,6 +966,23 @@ static void jl_insert_into_serialization_queue(jl_serializer_state *s, jl_value_ jl_queue_for_serialization_(s, get_replaceable_field((jl_value_t**)&bpart->next, 0), 1, immediate); } else if (layout->nfields > 0) { + if (jl_options.trim) { + if (jl_is_method(v)) { + jl_method_t *m = (jl_method_t *)v; + if (jl_is_svec(jl_atomic_load_relaxed(&m->specializations))) + jl_queue_for_serialization_(s, (jl_value_t*)jl_atomic_load_relaxed(&m->specializations), 0, 1); + } + else if (jl_typetagis(v, jl_typename_type)) { + jl_typename_t *tn = (jl_typename_t*)v; + if (tn->mt != NULL && !tn->mt->frozen) { + jl_methtable_t * new_methtable = (jl_methtable_t *)ptrhash_get(&new_methtables, tn->mt); + if (new_methtable != HT_NOTFOUND) + record_field_change((jl_value_t **)&tn->mt, (jl_value_t*)new_methtable); + else + record_field_change((jl_value_t **)&tn->mt, NULL); + } + } + } char *data = (char*)jl_data_ptr(v); size_t i, np = layout->npointers; for (i = 0; i < np; i++) { @@ -989,6 +1028,7 @@ done_fields: ; } } + static void jl_queue_for_serialization_(jl_serializer_state *s, jl_value_t *v, int recursive, int immediate) JL_GC_DISABLED { if (!jl_needs_serialization(s, v)) @@ -2407,6 +2447,53 @@ static void jl_prune_type_cache_linear(jl_svec_t *cache) jl_svecset(cache, ins++, jl_nothing); } +uint_t bindingkey_hash(size_t idx, jl_value_t *data); + +static void jl_prune_module_bindings(jl_module_t * m) JL_GC_DISABLED +{ + jl_svec_t * bindings = jl_atomic_load_relaxed(&m->bindings); + size_t l = jl_svec_len(bindings), i; + arraylist_t bindings_list; + arraylist_new(&bindings_list, 0); + if (l == 0) + return; + for (i = 0; i < l; i++) { + jl_value_t *ti = jl_svecref(bindings, i); + if (ti == jl_nothing) + continue; + jl_binding_t *ref = ((jl_binding_t*)ti); + if (!((ptrhash_get(&serialization_order, ref) == HT_NOTFOUND) && + (ptrhash_get(&serialization_order, ref->globalref) == HT_NOTFOUND))) { + jl_svecset(bindings, i, jl_nothing); + arraylist_push(&bindings_list, ref); + } + } + jl_genericmemory_t* bindingkeyset = jl_atomic_load_relaxed(&m->bindingkeyset); + _Atomic(jl_genericmemory_t*)bindingkeyset2; + jl_atomic_store_relaxed(&bindingkeyset2,(jl_genericmemory_t*)jl_an_empty_memory_any); + jl_svec_t *bindings2 = jl_alloc_svec_uninit(bindings_list.len); + for (i = 0; i < bindings_list.len; i++) { + jl_binding_t *ref = (jl_binding_t*)bindings_list.items[i]; + jl_svecset(bindings2, i, ref); + jl_smallintset_insert(&bindingkeyset2, (jl_value_t*)m, bindingkey_hash, i, (jl_value_t*)bindings2); + } + void *idx = ptrhash_get(&serialization_order, bindings); + assert(idx != HT_NOTFOUND && idx != (void*)(uintptr_t)-1); + assert(serialization_queue.items[(char*)idx - 1 - (char*)HT_NOTFOUND] == bindings); + ptrhash_put(&serialization_order, bindings2, idx); + serialization_queue.items[(char*)idx - 1 - (char*)HT_NOTFOUND] = bindings2; + + idx = ptrhash_get(&serialization_order, bindingkeyset); + assert(idx != HT_NOTFOUND && idx != (void*)(uintptr_t)-1); + assert(serialization_queue.items[(char*)idx - 1 - (char*)HT_NOTFOUND] == bindingkeyset); + ptrhash_put(&serialization_order, jl_atomic_load_relaxed(&bindingkeyset2), idx); + serialization_queue.items[(char*)idx - 1 - (char*)HT_NOTFOUND] = jl_atomic_load_relaxed(&bindingkeyset2); + jl_atomic_store_relaxed(&m->bindings, bindings2); + jl_atomic_store_relaxed(&m->bindingkeyset, jl_atomic_load_relaxed(&bindingkeyset2)); + jl_gc_wb(m, bindings2); + jl_gc_wb(m, jl_atomic_load_relaxed(&bindingkeyset2)); +} + static void strip_slotnames(jl_array_t *slotnames) { // replace slot names with `?`, except unused_sym since the compiler looks at it @@ -2473,7 +2560,7 @@ static int strip_all_codeinfos__(jl_typemap_entry_t *def, void *_env) if (m->source) { int stripped_ir = 0; if (jl_options.strip_ir) { - int should_strip_ir = 0; + int should_strip_ir = jl_options.trim; if (!should_strip_ir) { if (jl_atomic_load_relaxed(&m->unspecialized)) { jl_code_instance_t *unspec = jl_atomic_load_relaxed(&jl_atomic_load_relaxed(&m->unspecialized)->cache); @@ -2675,8 +2762,46 @@ static void jl_save_system_image_to_stream(ios_t *f, jl_array_t *mod_array, // strip metadata and IR when requested if (jl_options.strip_metadata || jl_options.strip_ir) jl_strip_all_codeinfos(); + // collect needed methods and replace method tables that are in the tags array + htable_new(&new_methtables, 0); + arraylist_t MIs; + arraylist_new(&MIs, 0); + arraylist_t gvars; + arraylist_new(&gvars, 0); + arraylist_t external_fns; + arraylist_new(&external_fns, 0); int en = jl_gc_enable(0); + if (native_functions) { + jl_get_llvm_gvs(native_functions, &gvars); + jl_get_llvm_external_fns(native_functions, &external_fns); + if (jl_options.trim) + jl_get_llvm_mis(native_functions, &MIs); + } + if (jl_options.trim) { + jl_rebuild_methtables(&MIs, &new_methtables); + jl_methtable_t *mt = (jl_methtable_t *)ptrhash_get(&new_methtables, jl_type_type_mt); + JL_GC_PROMISE_ROOTED(mt); + if (mt != HT_NOTFOUND) + jl_type_type_mt = mt; + else + jl_type_type_mt = jl_new_method_table(jl_type_type_mt->name, jl_type_type_mt->module); + + mt = (jl_methtable_t *)ptrhash_get(&new_methtables, jl_kwcall_mt); + JL_GC_PROMISE_ROOTED(mt); + if (mt != HT_NOTFOUND) + jl_kwcall_mt = mt; + else + jl_kwcall_mt = jl_new_method_table(jl_kwcall_mt->name, jl_kwcall_mt->module); + + mt = (jl_methtable_t *)ptrhash_get(&new_methtables, jl_nonfunction_mt); + JL_GC_PROMISE_ROOTED(mt); + if (mt != HT_NOTFOUND) + jl_nonfunction_mt = mt; + else + jl_nonfunction_mt = jl_new_method_table(jl_nonfunction_mt->name, jl_nonfunction_mt->module); + } + nsym_tag = 0; htable_new(&symbol_table, 0); htable_new(&fptr_to_id, sizeof(id_to_fptrs) / sizeof(*id_to_fptrs)); @@ -2722,14 +2847,6 @@ static void jl_save_system_image_to_stream(ios_t *f, jl_array_t *mod_array, htable_new(&s.callers_with_edges, 0); jl_value_t **const*const tags = get_tags(); // worklist == NULL ? get_tags() : NULL; - arraylist_t gvars; - arraylist_t external_fns; - arraylist_new(&gvars, 0); - arraylist_new(&external_fns, 0); - if (native_functions) { - jl_get_llvm_gvs(native_functions, &gvars); - jl_get_llvm_external_fns(native_functions, &external_fns); - } if (worklist == NULL) { // empty!(Core.ARGS) @@ -2788,6 +2905,8 @@ static void jl_save_system_image_to_stream(ios_t *f, jl_array_t *mod_array, // step 1.2: ensure all gvars are part of the sysimage too record_gvars(&s, &gvars); record_external_fns(&s, &external_fns); + if (jl_options.trim) + record_gvars(&s, &MIs); jl_serialize_reachable(&s); // step 1.3: prune (garbage collect) special weak references from the jl_global_roots_list if (worklist == NULL) { @@ -2808,8 +2927,30 @@ static void jl_save_system_image_to_stream(ios_t *f, jl_array_t *mod_array, // step 1.4: prune (garbage collect) some special weak references from // built-in type caches too for (i = 0; i < serialization_queue.len; i++) { - jl_typename_t *tn = (jl_typename_t*)serialization_queue.items[i]; - if (jl_is_typename(tn)) { + jl_value_t *v = (jl_value_t*)serialization_queue.items[i]; + if (jl_options.trim) { + if (jl_is_method(v)){ + jl_method_t *m = (jl_method_t*)v; + jl_value_t *specializations_ = jl_atomic_load_relaxed(&m->specializations); + if (!jl_is_svec(specializations_)) + continue; + + jl_svec_t *specializations = (jl_svec_t *)specializations_; + size_t l = jl_svec_len(specializations), i; + for (i = 0; i < l; i++) { + jl_value_t *mi = jl_svecref(specializations, i); + if (mi == jl_nothing) + continue; + if (ptrhash_get(&serialization_order, mi) == HT_NOTFOUND) + jl_svecset(specializations, i, jl_nothing); + } + } else if (jl_is_module(v)) { + jl_prune_module_bindings((jl_module_t*)v); + } + } + // Not else + if (jl_is_typename(v)) { + jl_typename_t *tn = (jl_typename_t*)v; jl_atomic_store_relaxed(&tn->cache, jl_prune_type_cache_hash(jl_atomic_load_relaxed(&tn->cache))); jl_gc_wb(tn, jl_atomic_load_relaxed(&tn->cache)); @@ -2918,7 +3059,9 @@ static void jl_save_system_image_to_stream(ios_t *f, jl_array_t *mod_array, jl_write_value(&s, global_roots_keyset); jl_write_value(&s, s.ptls->root_task->tls); write_uint32(f, jl_get_gs_ctr()); - write_uint(f, jl_atomic_load_acquire(&jl_world_counter)); + size_t world = jl_atomic_load_acquire(&jl_world_counter); + // assert(world == precompilation_world); // This triggers on a normal build of julia + write_uint(f, world); write_uint(f, jl_typeinf_world); } else { @@ -2971,6 +3114,7 @@ static void jl_save_system_image_to_stream(ios_t *f, jl_array_t *mod_array, htable_free(&nullptrs); htable_free(&symbol_table); htable_free(&fptr_to_id); + htable_free(&new_methtables); nsym_tag = 0; jl_gc_enable(en); @@ -3000,6 +3144,10 @@ static void jl_write_header_for_incremental(ios_t *f, jl_array_t *worklist, jl_a JL_DLLEXPORT void jl_create_system_image(void **_native_data, jl_array_t *worklist, bool_t emit_split, ios_t **s, ios_t **z, jl_array_t **udeps, int64_t *srctextpos) { + if (jl_options.strip_ir || jl_options.trim) { + // make sure this is precompiled for jl_foreach_reachable_mtable + jl_get_loaded_modules(); + } jl_gc_collect(JL_GC_FULL); jl_gc_collect(JL_GC_INCREMENTAL); // sweep finalizers JL_TIMING(SYSIMG_DUMP, SYSIMG_DUMP); @@ -3049,7 +3197,11 @@ JL_DLLEXPORT void jl_create_system_image(void **_native_data, jl_array_t *workli } } else if (_native_data != NULL) { - *_native_data = jl_precompile(jl_options.compile_enabled == JL_OPTIONS_COMPILE_ALL); + precompilation_world = jl_atomic_load_acquire(&jl_world_counter); + if (jl_options.trim) + *_native_data = jl_precompile_trimmed(precompilation_world); + else + *_native_data = jl_precompile(jl_options.compile_enabled == JL_OPTIONS_COMPILE_ALL); } // Make sure we don't run any Julia code concurrently after this point diff --git a/src/support/arraylist.h b/src/support/arraylist.h index 6ad2f0e2f28c9..a83bd2808756c 100644 --- a/src/support/arraylist.h +++ b/src/support/arraylist.h @@ -20,11 +20,11 @@ typedef struct { void *_space[AL_N_INLINE]; } arraylist_t; -arraylist_t *arraylist_new(arraylist_t *a, size_t size) JL_NOTSAFEPOINT; -void arraylist_free(arraylist_t *a) JL_NOTSAFEPOINT; +JL_DLLEXPORT arraylist_t *arraylist_new(arraylist_t *a, size_t size) JL_NOTSAFEPOINT; +JL_DLLEXPORT void arraylist_free(arraylist_t *a) JL_NOTSAFEPOINT; -void arraylist_push(arraylist_t *a, void *elt) JL_NOTSAFEPOINT; -void *arraylist_pop(arraylist_t *a) JL_NOTSAFEPOINT; +JL_DLLEXPORT void arraylist_push(arraylist_t *a, void *elt) JL_NOTSAFEPOINT; +JL_DLLEXPORT void *arraylist_pop(arraylist_t *a) JL_NOTSAFEPOINT; JL_DLLEXPORT void arraylist_grow(arraylist_t *a, size_t n) JL_NOTSAFEPOINT; typedef struct { @@ -34,11 +34,12 @@ typedef struct { void *_space[SMALL_AL_N_INLINE]; } small_arraylist_t; -small_arraylist_t *small_arraylist_new(small_arraylist_t *a, uint32_t size) JL_NOTSAFEPOINT; -void small_arraylist_free(small_arraylist_t *a) JL_NOTSAFEPOINT; -void small_arraylist_push(small_arraylist_t *a, void *elt) JL_NOTSAFEPOINT; -void *small_arraylist_pop(small_arraylist_t *a) JL_NOTSAFEPOINT; +JL_DLLEXPORT small_arraylist_t *small_arraylist_new(small_arraylist_t *a, uint32_t size) JL_NOTSAFEPOINT; +JL_DLLEXPORT void small_arraylist_free(small_arraylist_t *a) JL_NOTSAFEPOINT; + +JL_DLLEXPORT void small_arraylist_push(small_arraylist_t *a, void *elt) JL_NOTSAFEPOINT; +JL_DLLEXPORT void *small_arraylist_pop(small_arraylist_t *a) JL_NOTSAFEPOINT; JL_DLLEXPORT void small_arraylist_grow(small_arraylist_t *a, uint32_t n) JL_NOTSAFEPOINT; #ifdef __cplusplus diff --git a/stdlib/LinearAlgebra/src/blas.jl b/stdlib/LinearAlgebra/src/blas.jl index 413b7866c5444..3c15630091162 100644 --- a/stdlib/LinearAlgebra/src/blas.jl +++ b/stdlib/LinearAlgebra/src/blas.jl @@ -159,7 +159,7 @@ function check() interface = USE_BLAS64 ? :ilp64 : :lp64 if !any(lib.interface == interface for lib in config.loaded_libs) interfacestr = uppercase(string(interface)) - @error("No loaded BLAS libraries were built with $interfacestr support.") + println(Core.stderr, "No loaded BLAS libraries were built with $interfacestr support.") exit(1) end end diff --git a/stdlib/LinearAlgebra/src/lbt.jl b/stdlib/LinearAlgebra/src/lbt.jl index 606ddedbe1343..81d10f930c8c5 100644 --- a/stdlib/LinearAlgebra/src/lbt.jl +++ b/stdlib/LinearAlgebra/src/lbt.jl @@ -17,7 +17,7 @@ end macro get_warn(map, key) return quote if !haskey($(esc(map)), $(esc(key))) - @warn(string("[LBT] Unknown key into ", $(string(map)), ": ", $(esc(key)), ", defaulting to :unknown")) + println(Core.stderr, string("Warning: [LBT] Unknown key into ", $(string(map)), ": ", $(esc(key)), ", defaulting to :unknown")) # All the unknown values share a common value: `-1` $(esc(map))[$(esc(LBT_INTERFACE_UNKNOWN))] else @@ -132,7 +132,7 @@ struct LBTConfig if str_ptr != C_NULL push!(exported_symbols, unsafe_string(str_ptr)) else - @error("NULL string in lbt_config.exported_symbols[$(sym_idx)]") + println(Core.stderr, "Error: NULL string in lbt_config.exported_symbols[$(sym_idx)]") end end diff --git a/test/Makefile b/test/Makefile index 1b9cb377c943d..6ebdd3c764fd5 100644 --- a/test/Makefile +++ b/test/Makefile @@ -24,6 +24,8 @@ EMBEDDING_ARGS := "JULIA=$(JULIA_EXECUTABLE)" "BIN=$(SRCDIR)/embedding" "CC=$(CC GCEXT_ARGS := "JULIA=$(JULIA_EXECUTABLE)" "BIN=$(SRCDIR)/gcext" "CC=$(CC)" +TRIMMING_ARGS := "JULIA=$(JULIA_EXECUTABLE)" "BIN=$(JULIAHOME)/usr/bin" "CC=$(CC)" + default: $(TESTS): @@ -66,6 +68,9 @@ embedding: gcext: @$(MAKE) -C $(SRCDIR)/$@ check $(GCEXT_ARGS) +trimming: + @$(MAKE) -C $(SRCDIR)/$@ check $(TRIMMING_ARGS) + clangsa: @$(MAKE) -C $(SRCDIR)/$@ @@ -73,5 +78,6 @@ clean: @$(MAKE) -C embedding $@ $(EMBEDDING_ARGS) @$(MAKE) -C gcext $@ $(GCEXT_ARGS) @$(MAKE) -C llvmpasses $@ + @$(MAKE) -C trimming $@ $(TRIMMING_ARGS) -.PHONY: $(TESTS) $(addprefix revise-, $(TESTS)) relocatedepot revise-relocatedepot embedding gcext clangsa clean +.PHONY: $(TESTS) $(addprefix revise-, $(TESTS)) relocatedepot revise-relocatedepot embedding gcext trimming clangsa clean diff --git a/test/trimming/Makefile b/test/trimming/Makefile new file mode 100644 index 0000000000000..c6e105d637013 --- /dev/null +++ b/test/trimming/Makefile @@ -0,0 +1,55 @@ +# This file is a part of Julia. License is MIT: https://julialang.org/license + +# This Makefile template requires the following variables to be set +# in the environment or on the command-line: +# JULIA: path to julia[.exe] executable +# BIN: binary build directory + +ifndef JULIA + $(error "Please pass JULIA=[path of target julia binary], or set as environment variable!") +endif +ifndef BIN + $(error "Please pass BIN=[path of build directory], or set as environment variable!") +endif + +#============================================================================= +# location of test source +SRCDIR := $(abspath $(dir $(lastword $(MAKEFILE_LIST)))) +JULIAHOME := $(abspath $(SRCDIR)/../..) +BUILDSCRIPT := $(BIN)/../share/julia/juliac-buildscript.jl +include $(JULIAHOME)/Make.inc + +# get the executable suffix, if any +EXE := $(suffix $(abspath $(JULIA))) + +# get compiler and linker flags. (see: `contrib/julia-config.jl`) +JULIA_CONFIG := $(JULIA) -e 'include(joinpath(Sys.BINDIR, Base.DATAROOTDIR, "julia", "julia-config.jl"))' -- +CPPFLAGS_ADD := +CFLAGS_ADD = $(shell $(JULIA_CONFIG) --cflags) +LDFLAGS_ADD = -lm $(shell $(JULIA_CONFIG) --ldflags --ldlibs) -ljulia-internal + +#============================================================================= + +release: hello$(EXE) + +hello.o: $(SRCDIR)/hello.jl $(BUILDSCRIPT) + $(JULIA) -t 1 -J $(BIN)/../lib/julia/sys.so --startup-file=no --history-file=no --output-o $@ --output-incremental=no --strip-ir --strip-metadata --trim $(BUILDSCRIPT) $(SRCDIR)/hello.jl --output-exe true + +init.o: $(SRCDIR)/init.c + $(CC) -c -o $@ $< $(CPPFLAGS_ADD) $(CPPFLAGS) $(CFLAGS_ADD) $(CFLAGS) + +hello$(EXE): hello.o init.o + $(CC) -o $@ $(WHOLE_ARCHIVE) hello.o $(NO_WHOLE_ARCHIVE) init.o $(CPPFLAGS_ADD) $(CPPFLAGS) $(CFLAGS_ADD) $(CFLAGS) $(LDFLAGS_ADD) $(LDFLAGS) + +check: hello$(EXE) + $(JULIA) --depwarn=error $(SRCDIR)/../runtests.jl $(SRCDIR)/trimming + +clean: + -rm -f hello$(EXE) init.o hello.o + +.PHONY: release clean check + +# Makefile debugging trick: +# call print-VARIABLE to see the runtime value of any variable +print-%: + @echo '$*=$($*)' diff --git a/test/trimming/hello.jl b/test/trimming/hello.jl new file mode 100644 index 0000000000000..307bf820f325b --- /dev/null +++ b/test/trimming/hello.jl @@ -0,0 +1,6 @@ +module MyApp +Base.@ccallable function main()::Cint + println(Core.stdout, "Hello, world!") + return 0 +end +end diff --git a/test/trimming/init.c b/test/trimming/init.c new file mode 100644 index 0000000000000..ea1b02f8e5c8f --- /dev/null +++ b/test/trimming/init.c @@ -0,0 +1,9 @@ +#include + +__attribute__((constructor)) void static_init(void) +{ + if (jl_is_initialized()) + return; + julia_init(JL_IMAGE_IN_MEMORY); + jl_exception_clear(); +} diff --git a/test/trimming/trimming.jl b/test/trimming/trimming.jl new file mode 100644 index 0000000000000..dfacae7f8e531 --- /dev/null +++ b/test/trimming/trimming.jl @@ -0,0 +1,7 @@ +using Test + +exe_path = joinpath(@__DIR__, "hello"*splitext(Base.julia_exename())[2]) + +@test readchomp(`$exe_path`) == "Hello, world!" + +@test filesize(exe_path) < filesize(unsafe_string(Base.JLOptions().image_file))/10 From 17445fe752b7b99633ca306af0981baca9f66bda Mon Sep 17 00:00:00 2001 From: Neven Sajko Date: Sun, 29 Sep 2024 11:41:59 +0200 Subject: [PATCH 084/537] fix rawbigints OOB issues (#55917) Fixes issues introduced in #50691 and found in #55906: * use `@inbounds` and `@boundscheck` macros in rawbigints, for catching OOB with `--check-bounds=yes` * fix OOB in `truncate` --- base/rawbigints.jl | 31 ++++++++++++++++++++++--------- test/mpfr.jl | 9 +++++++++ 2 files changed, 31 insertions(+), 9 deletions(-) diff --git a/base/rawbigints.jl b/base/rawbigints.jl index 6508bea05be0f..a9bb18e163e2d 100644 --- a/base/rawbigints.jl +++ b/base/rawbigints.jl @@ -21,14 +21,21 @@ reversed_index(n::Int, i::Int) = n - i - 1 reversed_index(x, i::Int, v::Val) = reversed_index(elem_count(x, v), i)::Int split_bit_index(x::RawBigInt, i::Int) = divrem(i, word_length(x), RoundToZero) +function get_elem_words_raw(x::RawBigInt{T}, i::Int) where {T} + @boundscheck if (i < 0) || (elem_count(x, Val(:words)) ≤ i) + throw(BoundsError(x, i)) + end + d = x.d + j = i + 1 + (GC.@preserve d unsafe_load(Ptr{T}(pointer(d)), j))::T +end + """ `i` is the zero-based index of the wanted word in `x`, starting from the less significant words. """ -function get_elem(x::RawBigInt{T}, i::Int, ::Val{:words}, ::Val{:ascending}) where {T} - # `i` must be non-negative and less than `x.word_count` - d = x.d - (GC.@preserve d unsafe_load(Ptr{T}(pointer(d)), i + 1))::T +function get_elem(x::RawBigInt, i::Int, ::Val{:words}, ::Val{:ascending}) + @inbounds @inline get_elem_words_raw(x, i) end function get_elem(x, i::Int, v::Val, ::Val{:descending}) @@ -96,7 +103,8 @@ end """ Returns an integer of type `R`, consisting of the `len` most -significant bits of `x`. +significant bits of `x`. If there are less than `len` bits in `x`, +the least significant bits are zeroed. """ function truncated(::Type{R}, x::RawBigInt, len::Int) where {R<:Integer} ret = zero(R) @@ -104,17 +112,22 @@ function truncated(::Type{R}, x::RawBigInt, len::Int) where {R<:Integer} word_count, bit_count_in_word = split_bit_index(x, len) k = word_length(x) vals = (Val(:words), Val(:descending)) + lenx = elem_count(x, first(vals)) for w ∈ 0:(word_count - 1) ret <<= k - word = get_elem(x, w, vals...) - ret |= R(word) + if w < lenx + word = get_elem(x, w, vals...) + ret |= R(word) + end end if !iszero(bit_count_in_word) ret <<= bit_count_in_word - wrd = get_elem(x, word_count, vals...) - ret |= R(wrd >>> (k - bit_count_in_word)) + if word_count < lenx + wrd = get_elem(x, word_count, vals...) + ret |= R(wrd >>> (k - bit_count_in_word)) + end end end ret::R diff --git a/test/mpfr.jl b/test/mpfr.jl index 9a9698ba72c2c..63da732df1c09 100644 --- a/test/mpfr.jl +++ b/test/mpfr.jl @@ -1088,3 +1088,12 @@ end clear_flags() end end + +@testset "RawBigInt truncation OOB read" begin + @testset "T: $T" for T ∈ (UInt8, UInt16, UInt32, UInt64, UInt128) + v = Base.RawBigInt{T}("a"^sizeof(T), 1) + @testset "bit_count: $bit_count" for bit_count ∈ (0:10:80) + @test Base.truncated(UInt128, v, bit_count) isa Any + end + end +end From 4da067167fc414ea4329be3b4fdc516914e102cd Mon Sep 17 00:00:00 2001 From: Kristoffer Carlsson Date: Mon, 30 Sep 2024 12:52:59 +0200 Subject: [PATCH 085/537] prevent loading other extensions when precompiling an extension (#55589) The current way of loading extensions when precompiling an extension very easily leads to cycles. For example, if you have more than one extension and you happen to transitively depend on the triggers of one of your extensions you will immediately hit a cycle where the extensions will try to load each other indefinitely. This is an issue because you cannot directly influence your transitive dependency graph so from this p.o.v the current system of loading extension is "unsound". The test added here checks this scenario and we can now precompile and load it without any warnings or issues. Would have made https://github.com/JuliaLang/julia/issues/55517 a non issue. Fixes https://github.com/JuliaLang/julia/issues/55557 --------- Co-authored-by: KristofferC --- base/loading.jl | 16 ++++--- base/precompilation.jl | 47 +------------------ test/loading.jl | 13 +++++ .../Extensions/CyclicExtensions/Manifest.toml | 21 +++++++++ .../Extensions/CyclicExtensions/Project.toml | 13 +++++ .../Extensions/CyclicExtensions/ext/ExtA.jl | 6 +++ .../Extensions/CyclicExtensions/ext/ExtB.jl | 6 +++ .../CyclicExtensions/src/CyclicExtensions.jl | 7 +++ 8 files changed, 76 insertions(+), 53 deletions(-) create mode 100644 test/project/Extensions/CyclicExtensions/Manifest.toml create mode 100644 test/project/Extensions/CyclicExtensions/Project.toml create mode 100644 test/project/Extensions/CyclicExtensions/ext/ExtA.jl create mode 100644 test/project/Extensions/CyclicExtensions/ext/ExtB.jl create mode 100644 test/project/Extensions/CyclicExtensions/src/CyclicExtensions.jl diff --git a/base/loading.jl b/base/loading.jl index cf7e41a0b5b2b..fbf6bb4af50aa 100644 --- a/base/loading.jl +++ b/base/loading.jl @@ -1387,7 +1387,9 @@ function run_module_init(mod::Module, i::Int=1) end function run_package_callbacks(modkey::PkgId) - run_extension_callbacks(modkey) + if !precompiling_extension + run_extension_callbacks(modkey) + end assert_havelock(require_lock) unlock(require_lock) try @@ -2843,7 +2845,7 @@ end const PRECOMPILE_TRACE_COMPILE = Ref{String}() function create_expr_cache(pkg::PkgId, input::String, output::String, output_o::Union{Nothing, String}, - concrete_deps::typeof(_concrete_dependencies), flags::Cmd=``, internal_stderr::IO = stderr, internal_stdout::IO = stdout) + concrete_deps::typeof(_concrete_dependencies), flags::Cmd=``, internal_stderr::IO = stderr, internal_stdout::IO = stdout, isext::Bool=false) @nospecialize internal_stderr internal_stdout rm(output, force=true) # Remove file if it exists output_o === nothing || rm(output_o, force=true) @@ -2912,7 +2914,7 @@ function create_expr_cache(pkg::PkgId, input::String, output::String, output_o:: write(io.in, """ empty!(Base.EXT_DORMITORY) # If we have a custom sysimage with `EXT_DORMITORY` prepopulated Base.track_nested_precomp($precomp_stack) - Base.precompiling_extension = $(loading_extension) + Base.precompiling_extension = $(loading_extension | isext) Base.precompiling_package = true Base.include_package_for_output($(pkg_str(pkg)), $(repr(abspath(input))), $(repr(depot_path)), $(repr(dl_load_path)), $(repr(load_path)), $deps, $(repr(source_path(nothing)))) @@ -2970,18 +2972,18 @@ This can be used to reduce package load times. Cache files are stored in `DEPOT_PATH[1]/compiled`. See [Module initialization and precompilation](@ref) for important notes. """ -function compilecache(pkg::PkgId, internal_stderr::IO = stderr, internal_stdout::IO = stdout; flags::Cmd=``, reasons::Union{Dict{String,Int},Nothing}=Dict{String,Int}()) +function compilecache(pkg::PkgId, internal_stderr::IO = stderr, internal_stdout::IO = stdout; flags::Cmd=``, reasons::Union{Dict{String,Int},Nothing}=Dict{String,Int}(), isext::Bool=false) @nospecialize internal_stderr internal_stdout path = locate_package(pkg) path === nothing && throw(ArgumentError("$(repr("text/plain", pkg)) not found during precompilation")) - return compilecache(pkg, path, internal_stderr, internal_stdout; flags, reasons) + return compilecache(pkg, path, internal_stderr, internal_stdout; flags, reasons, isext) end const MAX_NUM_PRECOMPILE_FILES = Ref(10) function compilecache(pkg::PkgId, path::String, internal_stderr::IO = stderr, internal_stdout::IO = stdout, keep_loaded_modules::Bool = true; flags::Cmd=``, cacheflags::CacheFlags=CacheFlags(), - reasons::Union{Dict{String,Int},Nothing}=Dict{String,Int}()) + reasons::Union{Dict{String,Int},Nothing}=Dict{String,Int}(), isext::Bool=false) @nospecialize internal_stderr internal_stdout # decide where to put the resulting cache file @@ -3021,7 +3023,7 @@ function compilecache(pkg::PkgId, path::String, internal_stderr::IO = stderr, in close(tmpio_o) close(tmpio_so) end - p = create_expr_cache(pkg, path, tmppath, tmppath_o, concrete_deps, flags, internal_stderr, internal_stdout) + p = create_expr_cache(pkg, path, tmppath, tmppath_o, concrete_deps, flags, internal_stderr, internal_stdout, isext) if success(p) if cache_objects diff --git a/base/precompilation.jl b/base/precompilation.jl index d3f076633f386..b351ce67cfbad 100644 --- a/base/precompilation.jl +++ b/base/precompilation.jl @@ -435,51 +435,6 @@ function precompilepkgs(pkgs::Vector{String}=String[]; # consider exts of direct deps to be direct deps so that errors are reported append!(direct_deps, keys(filter(d->last(d) in keys(env.project_deps), exts))) - # An extension effectively depends on another extension if it has all the the - # dependencies of that other extension - function expand_dependencies(depsmap) - function visit!(visited, node, all_deps) - if node in visited - return - end - push!(visited, node) - for dep in get(Set{Base.PkgId}, depsmap, node) - if !(dep in all_deps) - push!(all_deps, dep) - visit!(visited, dep, all_deps) - end - end - end - - depsmap_transitive = Dict{Base.PkgId, Set{Base.PkgId}}() - for package in keys(depsmap) - # Initialize a set to keep track of all dependencies for 'package' - all_deps = Set{Base.PkgId}() - visited = Set{Base.PkgId}() - visit!(visited, package, all_deps) - # Update depsmap with the complete set of dependencies for 'package' - depsmap_transitive[package] = all_deps - end - return depsmap_transitive - end - - depsmap_transitive = expand_dependencies(depsmap) - - for (_, extensions_1) in pkg_exts_map - for extension_1 in extensions_1 - deps_ext_1 = depsmap_transitive[extension_1] - for (_, extensions_2) in pkg_exts_map - for extension_2 in extensions_2 - extension_1 == extension_2 && continue - deps_ext_2 = depsmap_transitive[extension_2] - if issubset(deps_ext_2, deps_ext_1) - push!(depsmap[extension_1], extension_2) - end - end - end - end - end - @debug "precompile: deps collected" # this loop must be run after the full depsmap has been populated for (pkg, pkg_exts) in pkg_exts_map @@ -852,7 +807,7 @@ function precompilepkgs(pkgs::Vector{String}=String[]; t = @elapsed ret = precompile_pkgs_maybe_cachefile_lock(io, print_lock, fancyprint, pkg_config, pkgspidlocked, hascolor) do Base.with_logger(Base.NullLogger()) do # The false here means we ignore loaded modules, so precompile for a fresh session - Base.compilecache(pkg, sourcepath, std_pipe, std_pipe, false; flags, cacheflags) + Base.compilecache(pkg, sourcepath, std_pipe, std_pipe, false; flags, cacheflags, isext = haskey(exts, pkg)) end end if ret isa Base.PrecompilableError diff --git a/test/loading.jl b/test/loading.jl index bdaca7f9dc69e..b66fd632f23fa 100644 --- a/test/loading.jl +++ b/test/loading.jl @@ -1155,6 +1155,19 @@ end finally copy!(LOAD_PATH, old_load_path) end + + # Extension with cycles in dependencies + code = """ + using CyclicExtensions + Base.get_extension(CyclicExtensions, :ExtA) isa Module || error("expected extension to load") + Base.get_extension(CyclicExtensions, :ExtB) isa Module || error("expected extension to load") + CyclicExtensions.greet() + """ + proj = joinpath(@__DIR__, "project", "Extensions", "CyclicExtensions") + cmd = `$(Base.julia_cmd()) --startup-file=no -e $code` + cmd = addenv(cmd, "JULIA_LOAD_PATH" => proj) + @test occursin("Hello Cycles!", String(read(cmd))) + finally try rm(depot_path, force=true, recursive=true) diff --git a/test/project/Extensions/CyclicExtensions/Manifest.toml b/test/project/Extensions/CyclicExtensions/Manifest.toml new file mode 100644 index 0000000000000..a506825cf7995 --- /dev/null +++ b/test/project/Extensions/CyclicExtensions/Manifest.toml @@ -0,0 +1,21 @@ +# This file is machine-generated - editing it directly is not advised + +julia_version = "1.10.4" +manifest_format = "2.0" +project_hash = "ec25ff8df3a5e2212a173c3de2c7d716cc47cd36" + +[[deps.ExtDep]] +deps = ["SomePackage"] +path = "../ExtDep.jl" +uuid = "fa069be4-f60b-4d4c-8b95-f8008775090c" +version = "0.1.0" + +[[deps.ExtDep2]] +path = "../ExtDep2" +uuid = "55982ee5-2ad5-4c40-8cfe-5e9e1b01500d" +version = "0.1.0" + +[[deps.SomePackage]] +path = "../SomePackage" +uuid = "678608ae-7bb3-42c7-98b1-82102067a3d8" +version = "0.1.0" diff --git a/test/project/Extensions/CyclicExtensions/Project.toml b/test/project/Extensions/CyclicExtensions/Project.toml new file mode 100644 index 0000000000000..08d539dcc40ae --- /dev/null +++ b/test/project/Extensions/CyclicExtensions/Project.toml @@ -0,0 +1,13 @@ +name = "CyclicExtensions" +uuid = "17d4f0df-b55c-4714-ac4b-55fa23f7355c" +version = "0.1.0" + +[deps] +ExtDep = "fa069be4-f60b-4d4c-8b95-f8008775090c" + +[weakdeps] +SomePackage = "678608ae-7bb3-42c7-98b1-82102067a3d8" + +[extensions] +ExtA = ["SomePackage"] +ExtB = ["SomePackage"] diff --git a/test/project/Extensions/CyclicExtensions/ext/ExtA.jl b/test/project/Extensions/CyclicExtensions/ext/ExtA.jl new file mode 100644 index 0000000000000..fa0c0961633cb --- /dev/null +++ b/test/project/Extensions/CyclicExtensions/ext/ExtA.jl @@ -0,0 +1,6 @@ +module ExtA + +using CyclicExtensions +using SomePackage + +end diff --git a/test/project/Extensions/CyclicExtensions/ext/ExtB.jl b/test/project/Extensions/CyclicExtensions/ext/ExtB.jl new file mode 100644 index 0000000000000..8f6da556d39b8 --- /dev/null +++ b/test/project/Extensions/CyclicExtensions/ext/ExtB.jl @@ -0,0 +1,6 @@ +module ExtB + +using CyclicExtensions +using SomePackage + +end diff --git a/test/project/Extensions/CyclicExtensions/src/CyclicExtensions.jl b/test/project/Extensions/CyclicExtensions/src/CyclicExtensions.jl new file mode 100644 index 0000000000000..f1c2ec2077562 --- /dev/null +++ b/test/project/Extensions/CyclicExtensions/src/CyclicExtensions.jl @@ -0,0 +1,7 @@ +module CyclicExtensions + +using ExtDep + +greet() = print("Hello Cycles!") + +end # module CyclicExtensions From 2a2878c143b87e5184565c895d090aab6e9017e9 Mon Sep 17 00:00:00 2001 From: Cody Tapscott <84105208+topolarity@users.noreply.github.com> Date: Mon, 30 Sep 2024 14:02:59 -0400 Subject: [PATCH 086/537] TOML: Avoid type-pirating `Base.TOML.Parser` (#55892) Since stdlibs can be duplicated but Base never is, `Base.require_stdlib` makes type piracy even more complicated than it normally would be. To adapt, this changes `TOML.Parser` to be a type defined by the TOML stdlib, so that we can define methods on it without committing type-piracy and avoid problems like Pkg.jl#4017 Resolves https://github.com/JuliaLang/Pkg.jl/issues/4017#issuecomment-2377589989 --- stdlib/TOML/src/TOML.jl | 43 ++++++++++++++++++++++++++++------------- 1 file changed, 30 insertions(+), 13 deletions(-) diff --git a/stdlib/TOML/src/TOML.jl b/stdlib/TOML/src/TOML.jl index 94d2808c0bc24..b37a5ca83c251 100644 --- a/stdlib/TOML/src/TOML.jl +++ b/stdlib/TOML/src/TOML.jl @@ -25,7 +25,7 @@ module Internals end # https://github.com/JuliaLang/julia/issues/36605 -readstring(f::AbstractString) = isfile(f) ? read(f, String) : error(repr(f), ": No such file") +_readstring(f::AbstractString) = isfile(f) ? read(f, String) : error(repr(f), ": No such file") """ Parser() @@ -36,12 +36,14 @@ explicitly create a `Parser` but instead one directly use use will however reuse some internal data structures which can be beneficial for performance if a larger number of small files are parsed. """ -const Parser = Internals.Parser +struct Parser + _p::Internals.Parser{Dates} +end # Dates-enabled constructors -Parser() = Parser{Dates}() -Parser(io::IO) = Parser{Dates}(io) -Parser(str::String; filepath=nothing) = Parser{Dates}(str; filepath) +Parser() = Parser(Internals.Parser{Dates}()) +Parser(io::IO) = Parser(Internals.Parser{Dates}(io)) +Parser(str::String; filepath=nothing) = Parser(Internals.Parser{Dates}(str; filepath)) """ parsefile(f::AbstractString) @@ -53,9 +55,9 @@ Parse file `f` and return the resulting table (dictionary). Throw a See also [`TOML.tryparsefile`](@ref). """ parsefile(f::AbstractString) = - Internals.parse(Parser(readstring(f); filepath=abspath(f))) + Internals.parse(Internals.Parser{Dates}(_readstring(f); filepath=abspath(f))) parsefile(p::Parser, f::AbstractString) = - Internals.parse(Internals.reinit!(p, readstring(f); filepath=abspath(f))) + Internals.parse(Internals.reinit!(p._p, _readstring(f); filepath=abspath(f))) """ tryparsefile(f::AbstractString) @@ -67,9 +69,9 @@ Parse file `f` and return the resulting table (dictionary). Return a See also [`TOML.parsefile`](@ref). """ tryparsefile(f::AbstractString) = - Internals.tryparse(Parser(readstring(f); filepath=abspath(f))) + Internals.tryparse(Internals.Parser{Dates}(_readstring(f); filepath=abspath(f))) tryparsefile(p::Parser, f::AbstractString) = - Internals.tryparse(Internals.reinit!(p, readstring(f); filepath=abspath(f))) + Internals.tryparse(Internals.reinit!(p._p, _readstring(f); filepath=abspath(f))) """ parse(x::Union{AbstractString, IO}) @@ -80,10 +82,11 @@ Throw a [`ParserError`](@ref) upon failure. See also [`TOML.tryparse`](@ref). """ +parse(p::Parser) = Internals.parse(p._p) parse(str::AbstractString) = - Internals.parse(Parser(String(str))) + Internals.parse(Internals.Parser{Dates}(String(str))) parse(p::Parser, str::AbstractString) = - Internals.parse(Internals.reinit!(p, String(str))) + Internals.parse(Internals.reinit!(p._p, String(str))) parse(io::IO) = parse(read(io, String)) parse(p::Parser, io::IO) = parse(p, read(io, String)) @@ -96,10 +99,11 @@ Return a [`ParserError`](@ref) upon failure. See also [`TOML.parse`](@ref). """ +tryparse(p::Parser) = Internals.tryparse(p._p) tryparse(str::AbstractString) = - Internals.tryparse(Parser(String(str))) + Internals.tryparse(Internals.Parser{Dates}(String(str))) tryparse(p::Parser, str::AbstractString) = - Internals.tryparse(Internals.reinit!(p, String(str))) + Internals.tryparse(Internals.reinit!(p._p, String(str))) tryparse(io::IO) = tryparse(read(io, String)) tryparse(p::Parser, io::IO) = tryparse(p, read(io, String)) @@ -131,4 +135,17 @@ supported type. """ const print = Internals.Printer.print +public Parser, parsefile, tryparsefile, parse, tryparse, ParserError, print + +# These methods are private Base interfaces, but we do our best to support them over +# the TOML stdlib types anyway to minimize downstream breakage. +Base.TOMLCache(p::Parser) = Base.TOMLCache(p._p, Dict{String, Base.CachedTOMLDict}()) +Base.TOMLCache(p::Parser, d::Base.CachedTOMLDict) = Base.TOMLCache(p._p, d) +Base.TOMLCache(p::Parser, d::Dict{String, Dict{String, Any}}) = Base.TOMLCache(p._p, d) + +Internals.reinit!(p::Parser, str::String; filepath::Union{Nothing, String}=nothing) = + Internals.reinit!(p._p, str; filepath) +Internals.parse(p::Parser) = Internals.parse(p._p) +Internals.tryparse(p::Parser) = Internals.tryparse(p._p) + end From e500754118c64ecc16836f426c251582fddbffb5 Mon Sep 17 00:00:00 2001 From: Jameson Nash Date: Wed, 25 Sep 2024 11:22:23 -0400 Subject: [PATCH 087/537] [FileWatching] fix PollingFileWatcher design and add workaround for a stat bug What started as an innocent fix for a stat bug on Apple (#48667) turned into a full blown investigation into the design problems with the libuv backend for PollingFileWatcher, and writing my own implementation of it instead which could avoid those singled-threaded concurrency bugs. --- base/libuv.jl | 8 +- base/reflection.jl | 3 +- base/stat.jl | 111 +++++------ src/sys.c | 1 - stdlib/FileWatching/src/FileWatching.jl | 240 ++++++++++++++---------- stdlib/FileWatching/test/runtests.jl | 9 +- test/file.jl | 10 + 7 files changed, 215 insertions(+), 167 deletions(-) diff --git a/base/libuv.jl b/base/libuv.jl index 3c9f79dfa7b2c..306854e9f4436 100644 --- a/base/libuv.jl +++ b/base/libuv.jl @@ -26,10 +26,10 @@ for r in uv_req_types @eval const $(Symbol("_sizeof_", lowercase(string(r)))) = uv_sizeof_req($r) end -uv_handle_data(handle) = ccall(:jl_uv_handle_data, Ptr{Cvoid}, (Ptr{Cvoid},), handle) -uv_req_data(handle) = ccall(:jl_uv_req_data, Ptr{Cvoid}, (Ptr{Cvoid},), handle) -uv_req_set_data(req, data) = ccall(:jl_uv_req_set_data, Cvoid, (Ptr{Cvoid}, Any), req, data) -uv_req_set_data(req, data::Ptr{Cvoid}) = ccall(:jl_uv_req_set_data, Cvoid, (Ptr{Cvoid}, Ptr{Cvoid}), req, data) +uv_handle_data(handle) = ccall(:uv_handle_get_data, Ptr{Cvoid}, (Ptr{Cvoid},), handle) +uv_req_data(handle) = ccall(:uv_req_get_data, Ptr{Cvoid}, (Ptr{Cvoid},), handle) +uv_req_set_data(req, data) = ccall(:uv_req_set_data, Cvoid, (Ptr{Cvoid}, Any), req, data) +uv_req_set_data(req, data::Ptr{Cvoid}) = ccall(:uv_handle_set_data, Cvoid, (Ptr{Cvoid}, Ptr{Cvoid}), req, data) macro handle_as(hand, typ) return quote diff --git a/base/reflection.jl b/base/reflection.jl index fe48b6f9aa6b9..be0209872db34 100644 --- a/base/reflection.jl +++ b/base/reflection.jl @@ -964,7 +964,7 @@ use it in the following manner to summarize information about a struct: julia> structinfo(T) = [(fieldoffset(T,i), fieldname(T,i), fieldtype(T,i)) for i = 1:fieldcount(T)]; julia> structinfo(Base.Filesystem.StatStruct) -13-element Vector{Tuple{UInt64, Symbol, Type}}: +14-element Vector{Tuple{UInt64, Symbol, Type}}: (0x0000000000000000, :desc, Union{RawFD, String}) (0x0000000000000008, :device, UInt64) (0x0000000000000010, :inode, UInt64) @@ -978,6 +978,7 @@ julia> structinfo(Base.Filesystem.StatStruct) (0x0000000000000050, :blocks, Int64) (0x0000000000000058, :mtime, Float64) (0x0000000000000060, :ctime, Float64) + (0x0000000000000068, :ioerrno, Int32) ``` """ fieldoffset(x::DataType, idx::Integer) = (@_foldable_meta; ccall(:jl_get_field_offset, Csize_t, (Any, Cint), x, idx)) diff --git a/base/stat.jl b/base/stat.jl index 506b5644dccbc..c6fb239a96404 100644 --- a/base/stat.jl +++ b/base/stat.jl @@ -63,6 +63,7 @@ struct StatStruct blocks :: Int64 mtime :: Float64 ctime :: Float64 + ioerrno :: Int32 end @eval function Base.:(==)(x::StatStruct, y::StatStruct) # do not include `desc` in equality or hash @@ -80,22 +81,23 @@ end end) end -StatStruct() = StatStruct("", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) -StatStruct(buf::Union{Vector{UInt8},Ptr{UInt8}}) = StatStruct("", buf) -StatStruct(desc::Union{AbstractString, OS_HANDLE}, buf::Union{Vector{UInt8},Ptr{UInt8}}) = StatStruct( +StatStruct() = StatStruct("", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, Base.UV_ENOENT) +StatStruct(buf::Union{Memory{UInt8},Vector{UInt8},Ptr{UInt8}}, ioerrno::Int32) = StatStruct("", buf, ioerrno) +StatStruct(desc::Union{AbstractString, OS_HANDLE}, buf::Union{Memory{UInt8},Vector{UInt8},Ptr{UInt8}}, ioerrno::Int32) = StatStruct( desc isa OS_HANDLE ? desc : String(desc), - ccall(:jl_stat_dev, UInt32, (Ptr{UInt8},), buf), - ccall(:jl_stat_ino, UInt32, (Ptr{UInt8},), buf), - ccall(:jl_stat_mode, UInt32, (Ptr{UInt8},), buf), - ccall(:jl_stat_nlink, UInt32, (Ptr{UInt8},), buf), - ccall(:jl_stat_uid, UInt32, (Ptr{UInt8},), buf), - ccall(:jl_stat_gid, UInt32, (Ptr{UInt8},), buf), - ccall(:jl_stat_rdev, UInt32, (Ptr{UInt8},), buf), - ccall(:jl_stat_size, UInt64, (Ptr{UInt8},), buf), - ccall(:jl_stat_blksize, UInt64, (Ptr{UInt8},), buf), - ccall(:jl_stat_blocks, UInt64, (Ptr{UInt8},), buf), - ccall(:jl_stat_mtime, Float64, (Ptr{UInt8},), buf), - ccall(:jl_stat_ctime, Float64, (Ptr{UInt8},), buf), + ioerrno != 0 ? zero(UInt32) : ccall(:jl_stat_dev, UInt32, (Ptr{UInt8},), buf), + ioerrno != 0 ? zero(UInt32) : ccall(:jl_stat_ino, UInt32, (Ptr{UInt8},), buf), + ioerrno != 0 ? zero(UInt32) : ccall(:jl_stat_mode, UInt32, (Ptr{UInt8},), buf), + ioerrno != 0 ? zero(UInt32) : ccall(:jl_stat_nlink, UInt32, (Ptr{UInt8},), buf), + ioerrno != 0 ? zero(UInt32) : ccall(:jl_stat_uid, UInt32, (Ptr{UInt8},), buf), + ioerrno != 0 ? zero(UInt32) : ccall(:jl_stat_gid, UInt32, (Ptr{UInt8},), buf), + ioerrno != 0 ? zero(UInt32) : ccall(:jl_stat_rdev, UInt32, (Ptr{UInt8},), buf), + ioerrno != 0 ? zero(UInt64) : ccall(:jl_stat_size, UInt64, (Ptr{UInt8},), buf), + ioerrno != 0 ? zero(UInt64) : ccall(:jl_stat_blksize, UInt64, (Ptr{UInt8},), buf), + ioerrno != 0 ? zero(UInt64) : ccall(:jl_stat_blocks, UInt64, (Ptr{UInt8},), buf), + ioerrno != 0 ? zero(Float64) : ccall(:jl_stat_mtime, Float64, (Ptr{UInt8},), buf), + ioerrno != 0 ? zero(Float64) : ccall(:jl_stat_ctime, Float64, (Ptr{UInt8},), buf), + ioerrno ) function iso_datetime_with_relative(t, tnow) @@ -130,35 +132,41 @@ end function show_statstruct(io::IO, st::StatStruct, oneline::Bool) print(io, oneline ? "StatStruct(" : "StatStruct for ") show(io, st.desc) - oneline || print(io, "\n ") - print(io, " size: ", st.size, " bytes") - oneline || print(io, "\n") - print(io, " device: ", st.device) - oneline || print(io, "\n ") - print(io, " inode: ", st.inode) - oneline || print(io, "\n ") - print(io, " mode: 0o", string(filemode(st), base = 8, pad = 6), " (", filemode_string(st), ")") - oneline || print(io, "\n ") - print(io, " nlink: ", st.nlink) - oneline || print(io, "\n ") - print(io, " uid: $(st.uid)") - username = getusername(st.uid) - username === nothing || print(io, " (", username, ")") - oneline || print(io, "\n ") - print(io, " gid: ", st.gid) - groupname = getgroupname(st.gid) - groupname === nothing || print(io, " (", groupname, ")") - oneline || print(io, "\n ") - print(io, " rdev: ", st.rdev) - oneline || print(io, "\n ") - print(io, " blksz: ", st.blksize) - oneline || print(io, "\n") - print(io, " blocks: ", st.blocks) - tnow = round(UInt, time()) - oneline || print(io, "\n ") - print(io, " mtime: ", iso_datetime_with_relative(st.mtime, tnow)) - oneline || print(io, "\n ") - print(io, " ctime: ", iso_datetime_with_relative(st.ctime, tnow)) + code = st.ioerrno + if code != 0 + print(io, oneline ? " " : "\n ") + print(io, Base.uverrorname(code), ": ", Base.struverror(code)) + else + oneline || print(io, "\n ") + print(io, " size: ", st.size, " bytes") + oneline || print(io, "\n") + print(io, " device: ", st.device) + oneline || print(io, "\n ") + print(io, " inode: ", st.inode) + oneline || print(io, "\n ") + print(io, " mode: 0o", string(filemode(st), base = 8, pad = 6), " (", filemode_string(st), ")") + oneline || print(io, "\n ") + print(io, " nlink: ", st.nlink) + oneline || print(io, "\n ") + print(io, " uid: $(st.uid)") + username = getusername(st.uid) + username === nothing || print(io, " (", username, ")") + oneline || print(io, "\n ") + print(io, " gid: ", st.gid) + groupname = getgroupname(st.gid) + groupname === nothing || print(io, " (", groupname, ")") + oneline || print(io, "\n ") + print(io, " rdev: ", st.rdev) + oneline || print(io, "\n ") + print(io, " blksz: ", st.blksize) + oneline || print(io, "\n") + print(io, " blocks: ", st.blocks) + tnow = round(UInt, time()) + oneline || print(io, "\n ") + print(io, " mtime: ", iso_datetime_with_relative(st.mtime, tnow)) + oneline || print(io, "\n ") + print(io, " ctime: ", iso_datetime_with_relative(st.ctime, tnow)) + end oneline && print(io, ")") return nothing end @@ -168,18 +176,13 @@ show(io::IO, ::MIME"text/plain", st::StatStruct) = show_statstruct(io, st, false # stat & lstat functions +checkstat(s::StatStruct) = Int(s.ioerrno) in (0, Base.UV_ENOENT, Base.UV_ENOTDIR, Base.UV_EINVAL) ? s : uv_error(string("stat(", repr(s.desc), ")"), s.ioerrno) + macro stat_call(sym, arg1type, arg) return quote - stat_buf = zeros(UInt8, Int(ccall(:jl_sizeof_stat, Int32, ()))) + stat_buf = fill!(Memory{UInt8}(undef, Int(ccall(:jl_sizeof_stat, Int32, ()))), 0x00) r = ccall($(Expr(:quote, sym)), Int32, ($(esc(arg1type)), Ptr{UInt8}), $(esc(arg)), stat_buf) - if !(r in (0, Base.UV_ENOENT, Base.UV_ENOTDIR, Base.UV_EINVAL)) - uv_error(string("stat(", repr($(esc(arg))), ")"), r) - end - st = StatStruct($(esc(arg)), stat_buf) - if ispath(st) != (r == 0) - error("stat returned zero type for a valid path") - end - return st + return checkstat(StatStruct($(esc(arg)), stat_buf, r)) end end @@ -334,7 +337,7 @@ Return `true` if a valid filesystem entity exists at `path`, otherwise returns `false`. This is the generalization of [`isfile`](@ref), [`isdir`](@ref) etc. """ -ispath(st::StatStruct) = filemode(st) & 0xf000 != 0x0000 +ispath(st::StatStruct) = st.ioerrno == 0 function ispath(path::String) # We use `access()` and `F_OK` to determine if a given path exists. `F_OK` comes from `unistd.h`. F_OK = 0x00 diff --git a/src/sys.c b/src/sys.c index b54edc32b32b6..fa9054bb93e9a 100644 --- a/src/sys.c +++ b/src/sys.c @@ -102,7 +102,6 @@ JL_DLLEXPORT int32_t jl_nb_available(ios_t *s) // --- dir/file stuff --- -JL_DLLEXPORT int jl_sizeof_uv_fs_t(void) { return sizeof(uv_fs_t); } JL_DLLEXPORT char *jl_uv_fs_t_ptr(uv_fs_t *req) { return (char*)req->ptr; } JL_DLLEXPORT char *jl_uv_fs_t_path(uv_fs_t *req) { return (char*)req->path; } diff --git a/stdlib/FileWatching/src/FileWatching.jl b/stdlib/FileWatching/src/FileWatching.jl index 0c987ad01c828..4ea6fcedd59bb 100644 --- a/stdlib/FileWatching/src/FileWatching.jl +++ b/stdlib/FileWatching/src/FileWatching.jl @@ -22,11 +22,11 @@ export trymkpidlock import Base: @handle_as, wait, close, eventloop, notify_error, IOError, - _sizeof_uv_poll, _sizeof_uv_fs_poll, _sizeof_uv_fs_event, _uv_hook_close, uv_error, _UVError, - iolock_begin, iolock_end, associate_julia_struct, disassociate_julia_struct, - preserve_handle, unpreserve_handle, isreadable, iswritable, isopen, - |, getproperty, propertynames -import Base.Filesystem.StatStruct + uv_req_data, uv_req_set_data, associate_julia_struct, disassociate_julia_struct, + _sizeof_uv_poll, _sizeof_uv_fs, _sizeof_uv_fs_event, _uv_hook_close, uv_error, _UVError, + iolock_begin, iolock_end, preserve_handle, unpreserve_handle, + isreadable, iswritable, isopen, |, getproperty, propertynames +import Base.Filesystem: StatStruct, uv_fs_req_cleanup if Sys.iswindows() import Base.WindowsRawSocket end @@ -126,31 +126,30 @@ mutable struct FolderMonitor end end +# this is similar to uv_fs_poll, but strives to avoid the design mistakes that make it unsuitable for any usable purpose +# https://github.com/libuv/libuv/issues/4543 mutable struct PollingFileWatcher - @atomic handle::Ptr{Cvoid} file::String - interval::UInt32 - notify::Base.ThreadSynchronizer - active::Bool - curr_error::Int32 - curr_stat::StatStruct + interval::Float64 + const notify::Base.ThreadSynchronizer # lock protects all fields which can be changed (including interval and file, if you really must) + timer::Union{Nothing,Timer} + const stat_req::Memory{UInt8} + active::Bool # whether there is already an uv_fspollcb in-flight, so to speak + closed::Bool # whether the user has explicitly destroyed this + ioerrno::Int32 # the stat errno as of the last result + prev_stat::StatStruct # the stat as of the last successful result PollingFileWatcher(file::AbstractString, interval::Float64=5.007) = PollingFileWatcher(String(file), interval) function PollingFileWatcher(file::String, interval::Float64=5.007) # same default as nodejs - handle = Libc.malloc(_sizeof_uv_fs_poll) - this = new(handle, file, round(UInt32, interval * 1000), Base.ThreadSynchronizer(), false, 0, StatStruct()) - associate_julia_struct(handle, this) - iolock_begin() - err = ccall(:uv_fs_poll_init, Int32, (Ptr{Cvoid}, Ptr{Cvoid}), eventloop(), handle) - if err != 0 - Libc.free(handle) - throw(_UVError("PollingFileWatcher", err)) - end - finalizer(uvfinalize, this) - iolock_end() + stat_req = Memory{UInt8}(undef, Int(_sizeof_uv_fs)) + this = new(file, interval, Base.ThreadSynchronizer(), nothing, stat_req, false, false, 0, StatStruct()) + uv_req_set_data(stat_req, this) + wait(this) # initialize with the current stat before return return this end end +Base.stat(pfw::PollingFileWatcher) = Base.checkstat(@lock pfw.notify pfw.prev_stat) + mutable struct _FDWatcher @atomic handle::Ptr{Cvoid} fdnum::Int # this is NOT the file descriptor @@ -327,7 +326,7 @@ function close(t::FDWatcher) close(t.watcher, mask) end -function uvfinalize(uv::Union{FileMonitor, FolderMonitor, PollingFileWatcher}) +function uvfinalize(uv::Union{FileMonitor, FolderMonitor}) iolock_begin() if uv.handle != C_NULL disassociate_julia_struct(uv) # close (and free) without notify @@ -336,7 +335,7 @@ function uvfinalize(uv::Union{FileMonitor, FolderMonitor, PollingFileWatcher}) iolock_end() end -function close(t::Union{FileMonitor, FolderMonitor, PollingFileWatcher}) +function close(t::Union{FileMonitor, FolderMonitor}) iolock_begin() if t.handle != C_NULL ccall(:jl_close_uv, Cvoid, (Ptr{Cvoid},), t.handle) @@ -344,6 +343,21 @@ function close(t::Union{FileMonitor, FolderMonitor, PollingFileWatcher}) iolock_end() end +function close(pfw::PollingFileWatcher) + timer = nothing + lock(pfw.notify) + try + pfw.closed = true + notify(pfw.notify, false) + timer = pfw.timer + pfw.timer = nothing + finally + unlock(pfw.notify) + end + timer === nothing || close(timer) + nothing +end + function _uv_hook_close(uv::_FDWatcher) # fyi: jl_atexit_hook can cause this to get called too Libc.free(@atomicswap :monotonic uv.handle = C_NULL) @@ -351,18 +365,6 @@ function _uv_hook_close(uv::_FDWatcher) nothing end -function _uv_hook_close(uv::PollingFileWatcher) - lock(uv.notify) - try - uv.active = false - Libc.free(@atomicswap :monotonic uv.handle = C_NULL) - notify(uv.notify, StatStruct()) - finally - unlock(uv.notify) - end - nothing -end - function _uv_hook_close(uv::FileMonitor) lock(uv.notify) try @@ -388,7 +390,7 @@ end isopen(fm::FileMonitor) = fm.handle != C_NULL isopen(fm::FolderMonitor) = fm.handle != C_NULL -isopen(pfw::PollingFileWatcher) = pfw.handle != C_NULL +isopen(pfw::PollingFileWatcher) = !pfw.closed isopen(pfw::_FDWatcher) = pfw.refcount != (0, 0) isopen(pfw::FDWatcher) = !pfw.mask.timedout @@ -449,21 +451,50 @@ function uv_pollcb(handle::Ptr{Cvoid}, status::Int32, events::Int32) nothing end -function uv_fspollcb(handle::Ptr{Cvoid}, status::Int32, prev::Ptr, curr::Ptr) - t = @handle_as handle PollingFileWatcher - old_status = t.curr_error - t.curr_error = status - if status == 0 - t.curr_stat = StatStruct(convert(Ptr{UInt8}, curr)) - end - if status == 0 || status != old_status - prev_stat = StatStruct(convert(Ptr{UInt8}, prev)) - lock(t.notify) - try - notify(t.notify, prev_stat) - finally - unlock(t.notify) +function uv_fspollcb(req::Ptr{Cvoid}) + pfw = unsafe_pointer_to_objref(uv_req_data(req))::PollingFileWatcher + pfw.active = false + unpreserve_handle(pfw) + @assert pointer(pfw.stat_req) == req + r = Int32(ccall(:uv_fs_get_result, Cssize_t, (Ptr{Cvoid},), req)) + statbuf = ccall(:uv_fs_get_statbuf, Ptr{UInt8}, (Ptr{Cvoid},), req) + curr_stat = StatStruct(pfw.file, statbuf, r) + uv_fs_req_cleanup(req) + lock(pfw.notify) + try + if !isempty(pfw.notify) # discard the update if nobody watching + if pfw.ioerrno != r || (r == 0 && pfw.prev_stat != curr_stat) + if r == 0 + pfw.prev_stat = curr_stat + end + pfw.ioerrno = r + notify(pfw.notify, true) + end + pfw.timer = Timer(pfw.interval) do t + # async task + iolock_begin() + lock(pfw.notify) + try + if pfw.timer === t # use identity check to test if this callback is stale by the time we got the lock + pfw.timer = nothing + @assert !pfw.active + if isopen(pfw) && !isempty(pfw.notify) + preserve_handle(pfw) + err = ccall(:uv_fs_stat, Cint, (Ptr{Cvoid}, Ptr{Cvoid}, Cstring, Ptr{Cvoid}), + eventloop(), pfw.stat_req, pfw.file, uv_jl_fspollcb) + err == 0 || notify(pfw.notify, _UVError("PollingFileWatcher (start)", err), error=true) # likely just ENOMEM + pfw.active = true + end + end + finally + unlock(pfw.notify) + end + iolock_end() + nothing + end end + finally + unlock(pfw.notify) end nothing end @@ -475,7 +506,7 @@ global uv_jl_fseventscb_folder::Ptr{Cvoid} function __init__() global uv_jl_pollcb = @cfunction(uv_pollcb, Cvoid, (Ptr{Cvoid}, Cint, Cint)) - global uv_jl_fspollcb = @cfunction(uv_fspollcb, Cvoid, (Ptr{Cvoid}, Cint, Ptr{Cvoid}, Ptr{Cvoid})) + global uv_jl_fspollcb = @cfunction(uv_fspollcb, Cvoid, (Ptr{Cvoid},)) global uv_jl_fseventscb_file = @cfunction(uv_fseventscb_file, Cvoid, (Ptr{Cvoid}, Ptr{Int8}, Int32, Int32)) global uv_jl_fseventscb_folder = @cfunction(uv_fseventscb_folder, Cvoid, (Ptr{Cvoid}, Ptr{Int8}, Int32, Int32)) @@ -504,35 +535,6 @@ function start_watching(t::_FDWatcher) nothing end -function start_watching(t::PollingFileWatcher) - iolock_begin() - t.handle == C_NULL && throw(ArgumentError("PollingFileWatcher is closed")) - if !t.active - uv_error("PollingFileWatcher (start)", - ccall(:uv_fs_poll_start, Int32, (Ptr{Cvoid}, Ptr{Cvoid}, Cstring, UInt32), - t.handle, uv_jl_fspollcb::Ptr{Cvoid}, t.file, t.interval)) - t.active = true - end - iolock_end() - nothing -end - -function stop_watching(t::PollingFileWatcher) - iolock_begin() - lock(t.notify) - try - if t.active && isempty(t.notify) - t.active = false - uv_error("PollingFileWatcher (stop)", - ccall(:uv_fs_poll_stop, Int32, (Ptr{Cvoid},), t.handle)) - end - finally - unlock(t.notify) - end - iolock_end() - nothing -end - function start_watching(t::FileMonitor) iolock_begin() t.handle == C_NULL && throw(ArgumentError("FileMonitor is closed")) @@ -640,28 +642,65 @@ end function wait(pfw::PollingFileWatcher) iolock_begin() - preserve_handle(pfw) lock(pfw.notify) - local prevstat + prevstat = pfw.prev_stat + havechange = false + timer = nothing try - start_watching(pfw) + # we aren't too strict about the first interval after `wait`, but rather always + # check right away to see if it had immediately changed again, and then repeatedly + # after interval again until success + pfw.closed && throw(ArgumentError("PollingFileWatcher is closed")) + timer = pfw.timer + pfw.timer = nothing # disable Timer callback + # start_watching + if !pfw.active + preserve_handle(pfw) + err = ccall(:uv_fs_stat, Cint, (Ptr{Cvoid}, Ptr{Cvoid}, Cstring, Ptr{Cvoid}), + eventloop(), pfw.stat_req, pfw.file, uv_jl_fspollcb) + err == 0 || uv_error("PollingFileWatcher (start)", err) # likely just ENOMEM + pfw.active = true + end iolock_end() - prevstat = wait(pfw.notify)::StatStruct + havechange = wait(pfw.notify)::Bool unlock(pfw.notify) iolock_begin() - lock(pfw.notify) - finally - unlock(pfw.notify) - unpreserve_handle(pfw) + catch + # stop_watching: cleanup any timers from before or after starting this wait before it failed, if there are no other watchers + latetimer = nothing + try + if isempty(pfw.notify) + latetimer = pfw.timer + pfw.timer = nothing + end + finally + unlock(pfw.notify) + end + if timer !== nothing || latetimer !== nothing + iolock_end() + timer === nothing || close(timer) + latetimer === nothing || close(latetimer) + iolock_begin() + end + rethrow() end - stop_watching(pfw) iolock_end() - if pfw.handle == C_NULL + timer === nothing || close(timer) # cleanup resources so we don't hang on exit + if !havechange # user canceled by calling close return prevstat, EOFError() - elseif pfw.curr_error != 0 - return prevstat, _UVError("PollingFileWatcher", pfw.curr_error) + end + # grab the most up-to-date stat result as of this time, even if it was a bit newer than the notify call + lock(pfw.notify) + currstat = pfw.prev_stat + ioerrno = pfw.ioerrno + unlock(pfw.notify) + if ioerrno == 0 + @assert currstat.ioerrno == 0 + return prevstat, currstat + elseif ioerrno in (Base.UV_ENOENT, Base.UV_ENOTDIR, Base.UV_EINVAL) + return prevstat, StatStruct(pfw.file, Ptr{UInt8}(0), ioerrno) else - return prevstat, pfw.curr_stat + return prevstat, _UVError("PollingFileWatcher", ioerrno) end end @@ -880,9 +919,9 @@ The `previous` status is always a `StatStruct`, but it may have all of the field The `current` status object may be a `StatStruct`, an `EOFError` (indicating the timeout elapsed), or some other `Exception` subtype (if the `stat` operation failed - for example, if the path does not exist). -To determine when a file was modified, compare `current isa StatStruct && mtime(prev) != mtime(current)` to detect -notification of changes. However, using [`watch_file`](@ref) for this operation is preferred, since -it is more reliable and efficient, although in some situations it may not be available. +To determine when a file was modified, compare `!(current isa StatStruct && prev == current)` to detect +notification of changes to the mtime or inode. However, using [`watch_file`](@ref) for this operation +is preferred, since it is more reliable and efficient, although in some situations it may not be available. """ function poll_file(s::AbstractString, interval_seconds::Real=5.007, timeout_s::Real=-1) pfw = PollingFileWatcher(s, Float64(interval_seconds)) @@ -893,12 +932,7 @@ function poll_file(s::AbstractString, interval_seconds::Real=5.007, timeout_s::R close(pfw) end end - statdiff = wait(pfw) - if isa(statdiff[2], IOError) - # file didn't initially exist, continue watching for it to be created (or the error to change) - statdiff = wait(pfw) - end - return statdiff + return wait(pfw) finally close(pfw) @isdefined(timer) && close(timer) diff --git a/stdlib/FileWatching/test/runtests.jl b/stdlib/FileWatching/test/runtests.jl index 2592aea024386..c9d7a4317fd08 100644 --- a/stdlib/FileWatching/test/runtests.jl +++ b/stdlib/FileWatching/test/runtests.jl @@ -2,6 +2,7 @@ using Test, FileWatching using Base: uv_error, Experimental +using Base.Filesystem: StatStruct @testset "FileWatching" begin @@ -218,7 +219,7 @@ function test_timeout(tval) @async test_file_poll(channel, 10, tval) tr = take!(channel) end - @test tr[1] === Base.Filesystem.StatStruct() && tr[2] === EOFError() + @test ispath(tr[1]::StatStruct) && tr[2] === EOFError() @test tval <= t_elapsed end @@ -231,7 +232,7 @@ function test_touch(slval) write(f, "Hello World\n") close(f) tr = take!(channel) - @test ispath(tr[1]) && ispath(tr[2]) + @test ispath(tr[1]::StatStruct) && ispath(tr[2]::StatStruct) fetch(t) end @@ -435,8 +436,8 @@ end @test_throws(Base._UVError("FolderMonitor (start)", Base.UV_ENOENT), watch_folder("____nonexistent_file", 10)) @test(@elapsed( - @test(poll_file("____nonexistent_file", 1, 3.1) === - (Base.Filesystem.StatStruct(), EOFError()))) > 3) + @test(poll_file("____nonexistent_file", 1, 3.1) == + (StatStruct(), EOFError()))) > 3) unwatch_folder(dir) @test isempty(FileWatching.watched_folders) diff --git a/test/file.jl b/test/file.jl index de258c92e02bc..a4262c4eaaa21 100644 --- a/test/file.jl +++ b/test/file.jl @@ -2128,6 +2128,16 @@ Base.joinpath(x::URI50890) = URI50890(x.f) @test !isnothing(Base.Filesystem.getusername(s.uid)) @test !isnothing(Base.Filesystem.getgroupname(s.gid)) end + s = Base.Filesystem.StatStruct() + stat_show_str = sprint(show, s) + stat_show_str_multi = sprint(show, MIME("text/plain"), s) + @test startswith(stat_show_str, "StatStruct(\"\" ENOENT: ") && endswith(stat_show_str, ")") + @test startswith(stat_show_str_multi, "StatStruct for \"\"\n ENOENT: ") && !endswith(stat_show_str_multi, r"\s") + s = Base.Filesystem.StatStruct("my/test", Ptr{UInt8}(0), Int32(Base.UV_ENOTDIR)) + stat_show_str = sprint(show, s) + stat_show_str_multi = sprint(show, MIME("text/plain"), s) + @test startswith(stat_show_str, "StatStruct(\"my/test\" ENOTDIR: ") && endswith(stat_show_str, ")") + @test startswith(stat_show_str_multi, "StatStruct for \"my/test\"\n ENOTDIR: ") && !endswith(stat_show_str_multi, r"\s") end @testset "diskstat() works" begin From b6e0136466396bc781406c0ab2f036f64cc818d7 Mon Sep 17 00:00:00 2001 From: Jameson Nash Date: Thu, 26 Sep 2024 13:57:52 -0400 Subject: [PATCH 088/537] [FileWatching] fix FileMonitor similarly and improve pidfile reliability Previously pidfile used the same poll_interval as sleep to detect if this code made any concurrency mistakes, but we do not really need to do that once FileMonitor is fixed to be reliable in the presence of parallel concurrency (instead of using watch_file). --- stdlib/FileWatching/src/FileWatching.jl | 108 ++++++++++-------------- stdlib/FileWatching/src/pidfile.jl | 46 +++++++--- stdlib/FileWatching/test/runtests.jl | 11 +-- 3 files changed, 84 insertions(+), 81 deletions(-) diff --git a/stdlib/FileWatching/src/FileWatching.jl b/stdlib/FileWatching/src/FileWatching.jl index 4ea6fcedd59bb..b24f352943ec5 100644 --- a/stdlib/FileWatching/src/FileWatching.jl +++ b/stdlib/FileWatching/src/FileWatching.jl @@ -38,13 +38,13 @@ const UV_CHANGE = Int32(2) struct FileEvent renamed::Bool changed::Bool - timedout::Bool + timedout::Bool # aka canceled FileEvent(r::Bool, c::Bool, t::Bool) = new(r, c, t) end FileEvent() = FileEvent(false, false, true) FileEvent(flags::Integer) = FileEvent((flags & UV_RENAME) != 0, (flags & UV_CHANGE) != 0, - false) + iszero(flags)) |(a::FileEvent, b::FileEvent) = FileEvent(a.renamed | b.renamed, a.changed | b.changed, @@ -80,23 +80,26 @@ iswritable(f::FDEvent) = f.writable mutable struct FileMonitor @atomic handle::Ptr{Cvoid} - file::String - notify::Base.ThreadSynchronizer - events::Int32 - active::Bool + const file::String + const notify::Base.ThreadSynchronizer + events::Int32 # accumulator for events that occurred since the last wait call, similar to Event with autoreset + ioerrno::Int32 # record the error, if any occurs (unlikely) FileMonitor(file::AbstractString) = FileMonitor(String(file)) function FileMonitor(file::String) handle = Libc.malloc(_sizeof_uv_fs_event) - this = new(handle, file, Base.ThreadSynchronizer(), 0, false) + this = new(handle, file, Base.ThreadSynchronizer(), 0, 0) associate_julia_struct(handle, this) iolock_begin() err = ccall(:uv_fs_event_init, Cint, (Ptr{Cvoid}, Ptr{Cvoid}), eventloop(), handle) if err != 0 Libc.free(handle) - throw(_UVError("FileMonitor", err)) + uv_error("FileMonitor", err) end - iolock_end() finalizer(uvfinalize, this) + uv_error("FileMonitor (start)", + ccall(:uv_fs_event_start, Int32, (Ptr{Cvoid}, Ptr{Cvoid}, Cstring, Int32), + this.handle, uv_jl_fseventscb_file::Ptr{Cvoid}, file, 0)) + iolock_end() return this end end @@ -104,8 +107,8 @@ end mutable struct FolderMonitor @atomic handle::Ptr{Cvoid} # notify::Channel{Any} # eltype = Union{Pair{String, FileEvent}, IOError} - notify::Base.ThreadSynchronizer - channel::Vector{Any} # eltype = Pair{String, FileEvent} + const notify::Base.ThreadSynchronizer + const channel::Vector{Any} # eltype = Pair{String, FileEvent} FolderMonitor(folder::AbstractString) = FolderMonitor(String(folder)) function FolderMonitor(folder::String) handle = Libc.malloc(_sizeof_uv_fs_event) @@ -152,9 +155,9 @@ Base.stat(pfw::PollingFileWatcher) = Base.checkstat(@lock pfw.notify pfw.prev_st mutable struct _FDWatcher @atomic handle::Ptr{Cvoid} - fdnum::Int # this is NOT the file descriptor + const fdnum::Int # this is NOT the file descriptor refcount::Tuple{Int, Int} - notify::Base.ThreadSynchronizer + const notify::Base.ThreadSynchronizer events::Int32 active::Tuple{Bool, Bool} @@ -275,7 +278,7 @@ end mutable struct FDWatcher # WARNING: make sure `close` has been manually called on this watcher before closing / destroying `fd` - watcher::_FDWatcher + const watcher::_FDWatcher mask::FDEvent function FDWatcher(fd::RawFD, readable::Bool, writable::Bool) return FDWatcher(fd, FDEvent(readable, writable, false, false)) @@ -368,9 +371,8 @@ end function _uv_hook_close(uv::FileMonitor) lock(uv.notify) try - uv.active = false Libc.free(@atomicswap :monotonic uv.handle = C_NULL) - notify(uv.notify, FileEvent()) + notify(uv.notify) finally unlock(uv.notify) end @@ -399,10 +401,12 @@ function uv_fseventscb_file(handle::Ptr{Cvoid}, filename::Ptr, events::Int32, st lock(t.notify) try if status != 0 + t.ioerrno = status notify_error(t.notify, _UVError("FileMonitor", status)) - else - t.events |= events - notify(t.notify, FileEvent(events)) + uvfinalize(t) + elseif events != t.events + events = t.events |= events + notify(t.notify, all=false) end finally unlock(t.notify) @@ -535,35 +539,6 @@ function start_watching(t::_FDWatcher) nothing end -function start_watching(t::FileMonitor) - iolock_begin() - t.handle == C_NULL && throw(ArgumentError("FileMonitor is closed")) - if !t.active - uv_error("FileMonitor (start)", - ccall(:uv_fs_event_start, Int32, (Ptr{Cvoid}, Ptr{Cvoid}, Cstring, Int32), - t.handle, uv_jl_fseventscb_file::Ptr{Cvoid}, t.file, 0)) - t.active = true - end - iolock_end() - nothing -end - -function stop_watching(t::FileMonitor) - iolock_begin() - lock(t.notify) - try - if t.active && isempty(t.notify) - t.active = false - uv_error("FileMonitor (stop)", - ccall(:uv_fs_event_stop, Int32, (Ptr{Cvoid},), t.handle)) - end - finally - unlock(t.notify) - end - iolock_end() - nothing -end - # n.b. this _wait may return spuriously early with a timedout event function _wait(fdw::_FDWatcher, mask::FDEvent) iolock_begin() @@ -705,26 +680,23 @@ function wait(pfw::PollingFileWatcher) end function wait(m::FileMonitor) - iolock_begin() + m.handle == C_NULL && throw(EOFError()) preserve_handle(m) lock(m.notify) - local events try - start_watching(m) - iolock_end() - events = wait(m.notify)::FileEvent - events |= FileEvent(m.events) - m.events = 0 - unlock(m.notify) - iolock_begin() - lock(m.notify) + while true + m.handle == C_NULL && throw(EOFError()) + events = @atomicswap :not_atomic m.events = 0 + events == 0 || return FileEvent(events) + if m.ioerrno != 0 + uv_error("FileMonitor", m.ioerrno) + end + wait(m.notify) + end finally unlock(m.notify) unpreserve_handle(m) end - stop_watching(m) - iolock_end() - return events end function wait(m::FolderMonitor) @@ -743,6 +715,7 @@ function wait(m::FolderMonitor) end return evt::Pair{String, FileEvent} end +Base.take!(m::FolderMonitor) = wait(m) # Channel-like API """ @@ -823,7 +796,12 @@ function watch_file(s::String, timeout_s::Float64=-1.0) close(fm) end end - return wait(fm) + try + return wait(fm) + catch ex + ex isa EOFError && return FileEvent() + rethrow() + end finally close(fm) @isdefined(timer) && close(timer) @@ -851,7 +829,7 @@ This behavior of this function varies slightly across platforms. See """ watch_folder(s::AbstractString, timeout_s::Real=-1) = watch_folder(String(s), timeout_s) function watch_folder(s::String, timeout_s::Real=-1) - fm = get!(watched_folders, s) do + fm = @lock watched_folders get!(watched_folders[], s) do return FolderMonitor(s) end local timer @@ -898,12 +876,12 @@ It is not recommended to do this while another task is waiting for """ unwatch_folder(s::AbstractString) = unwatch_folder(String(s)) function unwatch_folder(s::String) - fm = pop!(watched_folders, s, nothing) + fm = @lock watched_folders pop!(watched_folders[], s, nothing) fm === nothing || close(fm) nothing end -const watched_folders = Dict{String, FolderMonitor}() +const watched_folders = Lockable(Dict{String, FolderMonitor}()) """ poll_file(path::AbstractString, interval_s::Real=5.007, timeout_s::Real=-1) -> (previous::StatStruct, current) diff --git a/stdlib/FileWatching/src/pidfile.jl b/stdlib/FileWatching/src/pidfile.jl index 4c821a3d897e4..95b8f20face29 100644 --- a/stdlib/FileWatching/src/pidfile.jl +++ b/stdlib/FileWatching/src/pidfile.jl @@ -4,14 +4,14 @@ module Pidfile export mkpidlock, trymkpidlock using Base: - IOError, UV_EEXIST, UV_ESRCH, + IOError, UV_EEXIST, UV_ESRCH, UV_ENOENT, Process using Base.Filesystem: File, open, JL_O_CREAT, JL_O_RDWR, JL_O_RDONLY, JL_O_EXCL, rename, samefile, path_separator -using ..FileWatching: watch_file +using ..FileWatching: FileMonitor using Base.Sys: iswindows """ @@ -256,19 +256,43 @@ function open_exclusive(path::String; end end # fall-back: wait for the lock - + watch = Lockable(Core.Box(nothing)) while true - # start the file-watcher prior to checking for the pidfile existence - t = @async try - watch_file(path, poll_interval) + # now try again to create it + # try to start the file-watcher prior to checking for the pidfile existence + watch = try + FileMonitor(path) catch ex isa(ex, IOError) || rethrow(ex) - sleep(poll_interval) # if the watch failed, convert to just doing a sleep + ex.code != UV_ENOENT # if the file was deleted in the meantime, don't sleep at all, even if the lock fails + end + timeout = nothing + if watch isa FileMonitor && stale_age > 0 + let watch = watch + timeout = Timer(stale_age) do t + close(watch) + end + end + end + try + file = tryopen_exclusive(path, mode) + file === nothing || return file + if watch isa FileMonitor + try + Base.wait(watch) # will time-out after stale_age passes + catch ex + isa(ex, EOFError) || isa(ex, IOError) || rethrow(ex) + end + end + if watch === true # if the watch failed, convert to just doing a sleep + sleep(poll_interval) + end + finally + # something changed about the path, so watch is now possibly monitoring the wrong file handle + # it will need to be recreated just before the next tryopen_exclusive attempt + timeout isa Timer && close(timeout) + watch isa FileMonitor && close(watch) end - # now try again to create it - file = tryopen_exclusive(path, mode) - file === nothing || return file - Base.wait(t) # sleep for a bit before trying again if stale_age > 0 && stale_pidfile(path, stale_age, refresh) # if the file seems stale, try to remove it before attempting again # set stale_age to zero so we won't attempt again, even if the attempt fails diff --git a/stdlib/FileWatching/test/runtests.jl b/stdlib/FileWatching/test/runtests.jl index c9d7a4317fd08..11df8849048f8 100644 --- a/stdlib/FileWatching/test/runtests.jl +++ b/stdlib/FileWatching/test/runtests.jl @@ -169,12 +169,13 @@ file = joinpath(dir, "afile.txt") # initialize a watch_folder instance and create afile.txt function test_init_afile() - @test isempty(FileWatching.watched_folders) + watched_folders = FileWatching.watched_folders + @test @lock watched_folders isempty(watched_folders[]) @test(watch_folder(dir, 0) == ("" => FileWatching.FileEvent())) @test @elapsed(@test(watch_folder(dir, 0) == ("" => FileWatching.FileEvent()))) <= 0.5 - @test length(FileWatching.watched_folders) == 1 + @test @lock(watched_folders, length(FileWatching.watched_folders[])) == 1 @test unwatch_folder(dir) === nothing - @test isempty(FileWatching.watched_folders) + @test @lock watched_folders isempty(watched_folders[]) @test 0.002 <= @elapsed(@test(watch_folder(dir, 0.004) == ("" => FileWatching.FileEvent()))) @test 0.002 <= @elapsed(@test(watch_folder(dir, 0.004) == ("" => FileWatching.FileEvent()))) <= 0.5 @test unwatch_folder(dir) === nothing @@ -204,7 +205,7 @@ function test_init_afile() @test unwatch_folder(dir) === nothing @test(watch_folder(dir, 0) == ("" => FileWatching.FileEvent())) @test 0.9 <= @elapsed(@test(watch_folder(dir, 1) == ("" => FileWatching.FileEvent()))) - @test length(FileWatching.watched_folders) == 1 + @test @lock(watched_folders, length(FileWatching.watched_folders[])) == 1 nothing end @@ -440,7 +441,7 @@ end (StatStruct(), EOFError()))) > 3) unwatch_folder(dir) -@test isempty(FileWatching.watched_folders) +@test @lock FileWatching.watched_folders isempty(FileWatching.watched_folders[]) rm(file) rm(dir) From f8d17e7ad4857ba3164ca1c4df8d118dbf42b429 Mon Sep 17 00:00:00 2001 From: Jameson Nash Date: Thu, 26 Sep 2024 15:04:26 -0400 Subject: [PATCH 089/537] [FileWatching] reorganize file and add docs --- stdlib/FileWatching/docs/src/index.md | 16 +- stdlib/FileWatching/src/FileWatching.jl | 386 +++++++++++++++--------- stdlib/FileWatching/test/runtests.jl | 6 +- 3 files changed, 248 insertions(+), 160 deletions(-) diff --git a/stdlib/FileWatching/docs/src/index.md b/stdlib/FileWatching/docs/src/index.md index 1b2212fcc5a28..15d4e39a45117 100644 --- a/stdlib/FileWatching/docs/src/index.md +++ b/stdlib/FileWatching/docs/src/index.md @@ -5,11 +5,17 @@ EditURL = "https://github.com/JuliaLang/julia/blob/master/stdlib/FileWatching/do # [File Events](@id lib-filewatching) ```@docs -FileWatching.poll_fd -FileWatching.poll_file -FileWatching.watch_file -FileWatching.watch_folder -FileWatching.unwatch_folder +poll_fd +poll_file +watch_file +watch_folder +unwatch_folder +``` +```@docs +FileMonitor +FolderMonitor +PollingFileWatcher +FDWatcher ``` # Pidfile diff --git a/stdlib/FileWatching/src/FileWatching.jl b/stdlib/FileWatching/src/FileWatching.jl index b24f352943ec5..7c743ce634193 100644 --- a/stdlib/FileWatching/src/FileWatching.jl +++ b/stdlib/FileWatching/src/FileWatching.jl @@ -6,7 +6,7 @@ Utilities for monitoring files and file descriptors for events. module FileWatching export - # one-shot API (returns results): + # one-shot API (returns results, race-y): watch_file, # efficient for small numbers of files watch_folder, # efficient for large numbers of files unwatch_folder, @@ -78,6 +78,134 @@ isreadable(f::FDEvent) = f.readable iswritable(f::FDEvent) = f.writable |(a::FDEvent, b::FDEvent) = FDEvent(getfield(a, :events) | getfield(b, :events)) +# Callback functions + +function uv_fseventscb_file(handle::Ptr{Cvoid}, filename::Ptr, events::Int32, status::Int32) + t = @handle_as handle FileMonitor + lock(t.notify) + try + if status != 0 + t.ioerrno = status + notify_error(t.notify, _UVError("FileMonitor", status)) + uvfinalize(t) + elseif events != t.events + events = t.events |= events + notify(t.notify, all=false) + end + finally + unlock(t.notify) + end + nothing +end + +function uv_fseventscb_folder(handle::Ptr{Cvoid}, filename::Ptr, events::Int32, status::Int32) + t = @handle_as handle FolderMonitor + lock(t.notify) + try + if status != 0 + notify_error(t.notify, _UVError("FolderMonitor", status)) + else + fname = (filename == C_NULL) ? "" : unsafe_string(convert(Cstring, filename)) + push!(t.channel, fname => FileEvent(events)) + notify(t.notify) + end + finally + unlock(t.notify) + end + nothing +end + +function uv_pollcb(handle::Ptr{Cvoid}, status::Int32, events::Int32) + t = @handle_as handle _FDWatcher + lock(t.notify) + try + if status != 0 + notify_error(t.notify, _UVError("FDWatcher", status)) + else + t.events |= events + if t.active[1] || t.active[2] + if isempty(t.notify) + # if we keep hearing about events when nobody appears to be listening, + # stop the poll to save cycles + t.active = (false, false) + ccall(:uv_poll_stop, Int32, (Ptr{Cvoid},), t.handle) + end + end + notify(t.notify, events) + end + finally + unlock(t.notify) + end + nothing +end + +function uv_fspollcb(req::Ptr{Cvoid}) + pfw = unsafe_pointer_to_objref(uv_req_data(req))::PollingFileWatcher + pfw.active = false + unpreserve_handle(pfw) + @assert pointer(pfw.stat_req) == req + r = Int32(ccall(:uv_fs_get_result, Cssize_t, (Ptr{Cvoid},), req)) + statbuf = ccall(:uv_fs_get_statbuf, Ptr{UInt8}, (Ptr{Cvoid},), req) + curr_stat = StatStruct(pfw.file, statbuf, r) + uv_fs_req_cleanup(req) + lock(pfw.notify) + try + if !isempty(pfw.notify) # must discard the update if nobody watching + if pfw.ioerrno != r || (r == 0 && pfw.prev_stat != curr_stat) + if r == 0 + pfw.prev_stat = curr_stat + end + pfw.ioerrno = r + notify(pfw.notify, true) + end + pfw.timer = Timer(pfw.interval) do t + # async task + iolock_begin() + lock(pfw.notify) + try + if pfw.timer === t # use identity check to test if this callback is stale by the time we got the lock + pfw.timer = nothing + @assert !pfw.active + if isopen(pfw) && !isempty(pfw.notify) + preserve_handle(pfw) + uv_jl_fspollcb = @cfunction(uv_fspollcb, Cvoid, (Ptr{Cvoid},)) + err = ccall(:uv_fs_stat, Cint, (Ptr{Cvoid}, Ptr{Cvoid}, Cstring, Ptr{Cvoid}), + eventloop(), pfw.stat_req, pfw.file, uv_jl_fspollcb::Ptr{Cvoid}) + err == 0 || notify(pfw.notify, _UVError("PollingFileWatcher (start)", err), error=true) # likely just ENOMEM + pfw.active = true + end + end + finally + unlock(pfw.notify) + end + iolock_end() + nothing + end + end + finally + unlock(pfw.notify) + end + nothing +end + +# Types + +""" + FileMonitor(path::AbstractString) + +Watch file or directory `path` (which must exist) for changes until a change occurs. This +function does not poll the file system and instead uses platform-specific functionality to +receive notifications from the operating system (e.g. via inotify on Linux). See the NodeJS +documentation linked below for details. + +`fm = FileMonitor(path)` acts like an auto-reset Event, so `wait(fm)` blocks until there has +been at least one event in the file originally at the given path and then returns an object +with boolean fields `renamed`, `changed`, `timedout` summarizing all changes that have +occurred since the last call to `wait` returned. + +This behavior of this function varies slightly across platforms. See + for more detailed information. +""" mutable struct FileMonitor @atomic handle::Ptr{Cvoid} const file::String @@ -96,6 +224,7 @@ mutable struct FileMonitor uv_error("FileMonitor", err) end finalizer(uvfinalize, this) + uv_jl_fseventscb_file = @cfunction(uv_fseventscb_file, Cvoid, (Ptr{Cvoid}, Ptr{Int8}, Int32, Int32)) uv_error("FileMonitor (start)", ccall(:uv_fs_event_start, Int32, (Ptr{Cvoid}, Ptr{Cvoid}, Cstring, Int32), this.handle, uv_jl_fseventscb_file::Ptr{Cvoid}, file, 0)) @@ -104,6 +233,23 @@ mutable struct FileMonitor end end + +""" + FolderMonitor(folder::AbstractString) + +Watch a file or directory `path` for changes until a change has occurred. This function does +not poll the file system and instead uses platform-specific functionality to receive +notifications from the operating system (e.g. via inotify on Linux). See the NodeJS +documentation linked below for details. + +This acts similar to a Channel, so calling `take!` (or `wait`) blocks until some change has +occurred. The `wait` function will return a pair where the first field is the name of the +changed file (if available) and the second field is an object with boolean fields `renamed` +and `changed`, giving the event that occurred on it. + +This behavior of this function varies slightly across platforms. See + for more detailed information. +""" mutable struct FolderMonitor @atomic handle::Ptr{Cvoid} # notify::Channel{Any} # eltype = Union{Pair{String, FileEvent}, IOError} @@ -121,6 +267,7 @@ mutable struct FolderMonitor throw(_UVError("FolderMonitor", err)) end finalizer(uvfinalize, this) + uv_jl_fseventscb_folder = @cfunction(uv_fseventscb_folder, Cvoid, (Ptr{Cvoid}, Ptr{Int8}, Int32, Int32)) uv_error("FolderMonitor (start)", ccall(:uv_fs_event_start, Int32, (Ptr{Cvoid}, Ptr{Cvoid}, Cstring, Int32), handle, uv_jl_fseventscb_folder::Ptr{Cvoid}, folder, 0)) @@ -131,6 +278,28 @@ end # this is similar to uv_fs_poll, but strives to avoid the design mistakes that make it unsuitable for any usable purpose # https://github.com/libuv/libuv/issues/4543 +""" + PollingFileWatcher(path::AbstractString, interval_s::Real=5.007) + +Monitor a file for changes by polling `stat` every `interval_s` seconds until a change +occurs or `timeout_s` seconds have elapsed. The `interval_s` should be a long period; the +default is 5.007 seconds. Call `stat` on it to get the most recent, but old, result. + +This acts like an auto-reset Event, so calling `wait` blocks until the `stat` result has +changed since the previous value captured upon entry to the `wait` call. The `wait` function +will return a pair of status objects `(previous, current)` once any `stat` change is +detected since the previous time that `wait` was called. The `previous` status is always a +`StatStruct`, but it may have all of the fields zeroed (indicating the file didn't +previously exist, or wasn't previously accessible). + +The `current` status object may be a `StatStruct`, an `EOFError` (if the wait is canceled by +closing this object), or some other `Exception` subtype (if the `stat` operation failed: for +example, if the path is removed). Note that `stat` value may be outdated if the file has +changed again multiple times. + +Using [`FileMonitor`](@ref) for this operation is preferred, since it is more reliable and +efficient, although in some situations it may not be available. +""" mutable struct PollingFileWatcher file::String interval::Float64 @@ -151,8 +320,6 @@ mutable struct PollingFileWatcher end end -Base.stat(pfw::PollingFileWatcher) = Base.checkstat(@lock pfw.notify pfw.prev_stat) - mutable struct _FDWatcher @atomic handle::Ptr{Cvoid} const fdnum::Int # this is NOT the file descriptor @@ -276,6 +443,25 @@ mutable struct _FDWatcher end end +""" + FDWatcher(fd::Union{RawFD,WindowsRawSocket}, readable::Bool, writable::Bool) + +Monitor a file descriptor `fd` for changes in the read or write availability. + +The keyword arguments determine which of read and/or write status should be monitored; at +least one of them must be set to `true`. + +The returned value is an object with boolean fields `readable`, `writable`, and `timedout`, +giving the result of the polling. + +This acts like a level-set event, so calling `wait` blocks until one of those conditions is +met, but then continues to return without blocking until the condition is cleared (either +there is no more to read, or no more space in the write buffer, or both). + +!!! warning + You must call `close` manually, when finished with this object, before the fd + argument is closed. Failure to do so risks serious crashes. +""" mutable struct FDWatcher # WARNING: make sure `close` has been manually called on this watcher before closing / destroying `fd` const watcher::_FDWatcher @@ -396,148 +582,7 @@ isopen(pfw::PollingFileWatcher) = !pfw.closed isopen(pfw::_FDWatcher) = pfw.refcount != (0, 0) isopen(pfw::FDWatcher) = !pfw.mask.timedout -function uv_fseventscb_file(handle::Ptr{Cvoid}, filename::Ptr, events::Int32, status::Int32) - t = @handle_as handle FileMonitor - lock(t.notify) - try - if status != 0 - t.ioerrno = status - notify_error(t.notify, _UVError("FileMonitor", status)) - uvfinalize(t) - elseif events != t.events - events = t.events |= events - notify(t.notify, all=false) - end - finally - unlock(t.notify) - end - nothing -end - -function uv_fseventscb_folder(handle::Ptr{Cvoid}, filename::Ptr, events::Int32, status::Int32) - t = @handle_as handle FolderMonitor - lock(t.notify) - try - if status != 0 - notify_error(t.notify, _UVError("FolderMonitor", status)) - else - fname = (filename == C_NULL) ? "" : unsafe_string(convert(Cstring, filename)) - push!(t.channel, fname => FileEvent(events)) - notify(t.notify) - end - finally - unlock(t.notify) - end - nothing -end - -function uv_pollcb(handle::Ptr{Cvoid}, status::Int32, events::Int32) - t = @handle_as handle _FDWatcher - lock(t.notify) - try - if status != 0 - notify_error(t.notify, _UVError("FDWatcher", status)) - else - t.events |= events - if t.active[1] || t.active[2] - if isempty(t.notify) - # if we keep hearing about events when nobody appears to be listening, - # stop the poll to save cycles - t.active = (false, false) - ccall(:uv_poll_stop, Int32, (Ptr{Cvoid},), t.handle) - end - end - notify(t.notify, events) - end - finally - unlock(t.notify) - end - nothing -end - -function uv_fspollcb(req::Ptr{Cvoid}) - pfw = unsafe_pointer_to_objref(uv_req_data(req))::PollingFileWatcher - pfw.active = false - unpreserve_handle(pfw) - @assert pointer(pfw.stat_req) == req - r = Int32(ccall(:uv_fs_get_result, Cssize_t, (Ptr{Cvoid},), req)) - statbuf = ccall(:uv_fs_get_statbuf, Ptr{UInt8}, (Ptr{Cvoid},), req) - curr_stat = StatStruct(pfw.file, statbuf, r) - uv_fs_req_cleanup(req) - lock(pfw.notify) - try - if !isempty(pfw.notify) # discard the update if nobody watching - if pfw.ioerrno != r || (r == 0 && pfw.prev_stat != curr_stat) - if r == 0 - pfw.prev_stat = curr_stat - end - pfw.ioerrno = r - notify(pfw.notify, true) - end - pfw.timer = Timer(pfw.interval) do t - # async task - iolock_begin() - lock(pfw.notify) - try - if pfw.timer === t # use identity check to test if this callback is stale by the time we got the lock - pfw.timer = nothing - @assert !pfw.active - if isopen(pfw) && !isempty(pfw.notify) - preserve_handle(pfw) - err = ccall(:uv_fs_stat, Cint, (Ptr{Cvoid}, Ptr{Cvoid}, Cstring, Ptr{Cvoid}), - eventloop(), pfw.stat_req, pfw.file, uv_jl_fspollcb) - err == 0 || notify(pfw.notify, _UVError("PollingFileWatcher (start)", err), error=true) # likely just ENOMEM - pfw.active = true - end - end - finally - unlock(pfw.notify) - end - iolock_end() - nothing - end - end - finally - unlock(pfw.notify) - end - nothing -end - -global uv_jl_pollcb::Ptr{Cvoid} -global uv_jl_fspollcb::Ptr{Cvoid} -global uv_jl_fseventscb_file::Ptr{Cvoid} -global uv_jl_fseventscb_folder::Ptr{Cvoid} - -function __init__() - global uv_jl_pollcb = @cfunction(uv_pollcb, Cvoid, (Ptr{Cvoid}, Cint, Cint)) - global uv_jl_fspollcb = @cfunction(uv_fspollcb, Cvoid, (Ptr{Cvoid},)) - global uv_jl_fseventscb_file = @cfunction(uv_fseventscb_file, Cvoid, (Ptr{Cvoid}, Ptr{Int8}, Int32, Int32)) - global uv_jl_fseventscb_folder = @cfunction(uv_fseventscb_folder, Cvoid, (Ptr{Cvoid}, Ptr{Int8}, Int32, Int32)) - - Base.mkpidlock_hook = mkpidlock - Base.trymkpidlock_hook = trymkpidlock - Base.parse_pidfile_hook = Pidfile.parse_pidfile - - nothing -end - -function start_watching(t::_FDWatcher) - iolock_begin() - t.handle == C_NULL && throw(ArgumentError("FDWatcher is closed")) - readable = t.refcount[1] > 0 - writable = t.refcount[2] > 0 - if t.active[1] != readable || t.active[2] != writable - # make sure the READABLE / WRITEABLE state is updated - uv_error("FDWatcher (start)", - ccall(:uv_poll_start, Int32, (Ptr{Cvoid}, Int32, Ptr{Cvoid}), - t.handle, - (readable ? UV_READABLE : 0) | (writable ? UV_WRITABLE : 0), - uv_jl_pollcb::Ptr{Cvoid})) - t.active = (readable, writable) - end - iolock_end() - nothing -end +Base.stat(pfw::PollingFileWatcher) = Base.checkstat(@lock pfw.notify pfw.prev_stat) # n.b. this _wait may return spuriously early with a timedout event function _wait(fdw::_FDWatcher, mask::FDEvent) @@ -549,7 +594,20 @@ function _wait(fdw::_FDWatcher, mask::FDEvent) if !isopen(fdw) # !open throw(EOFError()) elseif events.timedout - start_watching(fdw) # make sure the poll is active + fdw.handle == C_NULL && throw(ArgumentError("FDWatcher is closed")) + # start_watching to make sure the poll is active + readable = fdw.refcount[1] > 0 + writable = fdw.refcount[2] > 0 + if fdw.active[1] != readable || fdw.active[2] != writable + # make sure the READABLE / WRITEABLE state is updated + uv_jl_pollcb = @cfunction(uv_pollcb, Cvoid, (Ptr{Cvoid}, Cint, Cint)) + uv_error("FDWatcher (start)", + ccall(:uv_poll_start, Int32, (Ptr{Cvoid}, Int32, Ptr{Cvoid}), + fdw.handle, + (readable ? UV_READABLE : 0) | (writable ? UV_WRITABLE : 0), + uv_jl_pollcb::Ptr{Cvoid})) + fdw.active = (readable, writable) + end iolock_end() return FDEvent(wait(fdw.notify)::Int32) else @@ -631,8 +689,9 @@ function wait(pfw::PollingFileWatcher) # start_watching if !pfw.active preserve_handle(pfw) + uv_jl_fspollcb = @cfunction(uv_fspollcb, Cvoid, (Ptr{Cvoid},)) err = ccall(:uv_fs_stat, Cint, (Ptr{Cvoid}, Ptr{Cvoid}, Cstring, Ptr{Cvoid}), - eventloop(), pfw.stat_req, pfw.file, uv_jl_fspollcb) + eventloop(), pfw.stat_req, pfw.file, uv_jl_fspollcb::Ptr{Cvoid}) err == 0 || uv_error("PollingFileWatcher (start)", err) # likely just ENOMEM pfw.active = true end @@ -664,7 +723,8 @@ function wait(pfw::PollingFileWatcher) if !havechange # user canceled by calling close return prevstat, EOFError() end - # grab the most up-to-date stat result as of this time, even if it was a bit newer than the notify call + # grab the most up-to-date stat result as of this time, even if it was a bit newer than + # the notify call (unlikely, as there would need to be a concurrent call to wait) lock(pfw.notify) currstat = pfw.prev_stat ioerrno = pfw.ioerrno @@ -729,6 +789,10 @@ least one of them must be set to `true`. The returned value is an object with boolean fields `readable`, `writable`, and `timedout`, giving the result of the polling. + +This is a thin wrapper over calling `wait` on a [`FDWatcher`](@ref), which implements the +functionality but requires the user to call `close` manually when finished with it, or risk +serious crashes. """ function poll_fd(s::Union{RawFD, Sys.iswindows() ? WindowsRawSocket : Union{}}, timeout_s::Real=-1; readable=false, writable=false) mask = FDEvent(readable, writable, false, false) @@ -786,6 +850,15 @@ giving the result of watching the file. This behavior of this function varies slightly across platforms. See for more detailed information. + +This is a thin wrapper over calling `wait` on a [`FileMonitor`](@ref). This function has a +small race window between consecutive calls to `watch_file` where the file might change +without being detected. To avoid this race, use + + fm = FileMonitor(path) + wait(fm) + +directly, re-using the same `fm` each time you `wait`. """ function watch_file(s::String, timeout_s::Float64=-1.0) fm = FileMonitor(s) @@ -812,7 +885,7 @@ watch_file(s::AbstractString, timeout_s::Real=-1) = watch_file(String(s), Float6 """ watch_folder(path::AbstractString, timeout_s::Real=-1) -Watches a file or directory `path` for changes until a change has occurred or `timeout_s` +Watch a file or directory `path` for changes until a change has occurred or `timeout_s` seconds have elapsed. This function does not poll the file system and instead uses platform-specific functionality to receive notifications from the operating system (e.g. via inotify on Linux). See the NodeJS documentation linked below for details. @@ -826,6 +899,8 @@ giving the event. This behavior of this function varies slightly across platforms. See for more detailed information. + +This function is a thin wrapper over calling `wait` on a [`FolderMonitor`](@ref), with added timeout support. """ watch_folder(s::AbstractString, timeout_s::Real=-1) = watch_folder(String(s), timeout_s) function watch_folder(s::String, timeout_s::Real=-1) @@ -895,11 +970,15 @@ The `previous` status is always a `StatStruct`, but it may have all of the field (indicating the file didn't previously exist, or wasn't previously accessible). The `current` status object may be a `StatStruct`, an `EOFError` (indicating the timeout elapsed), -or some other `Exception` subtype (if the `stat` operation failed - for example, if the path does not exist). +or some other `Exception` subtype (if the `stat` operation failed: for example, if the path does not exist). To determine when a file was modified, compare `!(current isa StatStruct && prev == current)` to detect notification of changes to the mtime or inode. However, using [`watch_file`](@ref) for this operation is preferred, since it is more reliable and efficient, although in some situations it may not be available. + +This is a thin wrapper over calling `wait` on a [`PollingFileWatcher`](@ref), which implements +the functionality, but this function has a small race window between consecutive calls to +`poll_file` where the file might change without being detected. """ function poll_file(s::AbstractString, interval_seconds::Real=5.007, timeout_s::Real=-1) pfw = PollingFileWatcher(s, Float64(interval_seconds)) @@ -920,4 +999,11 @@ end include("pidfile.jl") import .Pidfile: mkpidlock, trymkpidlock +function __init__() + Base.mkpidlock_hook = mkpidlock + Base.trymkpidlock_hook = trymkpidlock + Base.parse_pidfile_hook = Pidfile.parse_pidfile + nothing +end + end diff --git a/stdlib/FileWatching/test/runtests.jl b/stdlib/FileWatching/test/runtests.jl index 11df8849048f8..def555154264d 100644 --- a/stdlib/FileWatching/test/runtests.jl +++ b/stdlib/FileWatching/test/runtests.jl @@ -452,10 +452,6 @@ rm(dir) include("pidfile.jl") end -@testset "Docstrings" begin - undoc = Docs.undocumented_names(FileWatching) - @test_broken isempty(undoc) - @test undoc == [:FDWatcher, :FileMonitor, :FolderMonitor, :PollingFileWatcher] -end +@test isempty(Docs.undocumented_names(FileWatching)) end # testset From bb25910328570835f6a2fdbb3b8ca93b14a65858 Mon Sep 17 00:00:00 2001 From: Kiran Pamnany Date: Mon, 30 Sep 2024 15:41:20 -0400 Subject: [PATCH 090/537] Add `--trace-dispatch` (#55848) --- NEWS.md | 1 + base/options.jl | 1 + doc/man/julia.1 | 4 ++ doc/src/manual/command-line-interface.md | 1 + src/gf.c | 57 +++++++++++++++++++++++- src/jloptions.c | 8 ++++ src/jloptions.h | 1 + src/jltypes.c | 2 +- src/julia.h | 8 +++- src/method.c | 2 +- src/staticdata.c | 2 +- src/staticdata_utils.c | 3 +- test/cmdlineargs.jl | 22 +++++++++ test/core.jl | 2 +- 14 files changed, 106 insertions(+), 8 deletions(-) diff --git a/NEWS.md b/NEWS.md index ca2bf1f615012..cc1bbc7449e5d 100644 --- a/NEWS.md +++ b/NEWS.md @@ -59,6 +59,7 @@ variables. ([#53742]). * New `--trace-compile-timing` option to report how long each method reported by `--trace-compile` took to compile, in ms. ([#54662]) * `--trace-compile` now prints recompiled methods in yellow or with a trailing comment if color is not supported ([#55763]) +* New `--trace-dispatch` option to report methods that are dynamically dispatched ([#55848]). Multi-threading changes ----------------------- diff --git a/base/options.jl b/base/options.jl index 1de7a2acb1e06..f535c27d99122 100644 --- a/base/options.jl +++ b/base/options.jl @@ -34,6 +34,7 @@ struct JLOptions can_inline::Int8 polly::Int8 trace_compile::Ptr{UInt8} + trace_dispatch::Ptr{UInt8} fast_math::Int8 worker::Int8 cookie::Ptr{UInt8} diff --git a/doc/man/julia.1 b/doc/man/julia.1 index 536a23bd37894..56cb690d66eeb 100644 --- a/doc/man/julia.1 +++ b/doc/man/julia.1 @@ -290,6 +290,10 @@ Methods that were recompiled are printed in yellow or with a trailing comment if --trace-compile-timing= If --trace-compile is enabled show how long each took to compile in ms +.TP +--trace-dispatch={stderr|name} +Print precompile statements for methods dispatched during execution or save to stderr or a path. + .TP -image-codegen Force generate code in imaging mode diff --git a/doc/src/manual/command-line-interface.md b/doc/src/manual/command-line-interface.md index ef20e51ea6e4e..5255720e55cd7 100644 --- a/doc/src/manual/command-line-interface.md +++ b/doc/src/manual/command-line-interface.md @@ -216,6 +216,7 @@ The following is a complete list of command-line switches available when launchi |`--output-incremental={yes\|no*}` |Generate an incremental output file (rather than complete)| |`--trace-compile={stderr\|name}` |Print precompile statements for methods compiled during execution or save to stderr or a path. Methods that were recompiled are printed in yellow or with a trailing comment if color is not supported| |`--trace-compile-timing` |If --trace-compile is enabled show how long each took to compile in ms| +|`--trace-dispatch={stderr\|name}` |Print precompile statements for methods dispatched during execution or save to stderr or a path.| |`--image-codegen` |Force generate code in imaging mode| |`--permalloc-pkgimg={yes\|no*}` |Copy the data section of package images into memory| |`--trim={no*|safe|unsafe|unsafe-warn}` |Build a sysimage including only code provably reachable from methods marked by calling `entrypoint`. The three non-default options differ in how they handle dynamic call sites. In safe mode, such sites result in compile-time errors. In unsafe mode, such sites are allowed but the resulting binary might be missing needed code and can throw runtime errors. With unsafe-warn, such sites will trigger warnings at compile-time and might error at runtime.| diff --git a/src/gf.c b/src/gf.c index 321711c839aa8..56ebe6fe2fa84 100644 --- a/src/gf.c +++ b/src/gf.c @@ -2560,6 +2560,38 @@ static void record_precompile_statement(jl_method_instance_t *mi, double compila JL_UNLOCK(&precomp_statement_out_lock); } +jl_mutex_t dispatch_statement_out_lock; + +static void record_dispatch_statement(jl_method_instance_t *mi) +{ + static ios_t f_dispatch; + static JL_STREAM* s_dispatch = NULL; + jl_method_t *def = mi->def.method; + if (!jl_is_method(def)) + return; + + JL_LOCK(&dispatch_statement_out_lock); + if (s_dispatch == NULL) { + const char *t = jl_options.trace_dispatch; + if (!strncmp(t, "stderr", 6)) { + s_dispatch = JL_STDERR; + } + else { + if (ios_file(&f_dispatch, t, 1, 1, 1, 1) == NULL) + jl_errorf("cannot open dispatch statement file \"%s\" for writing", t); + s_dispatch = (JL_STREAM*) &f_dispatch; + } + } + if (!jl_has_free_typevars(mi->specTypes)) { + jl_printf(s_dispatch, "precompile("); + jl_static_show(s_dispatch, mi->specTypes); + jl_printf(s_dispatch, ")\n"); + if (s_dispatch != JL_STDERR) + ios_flush(&f_dispatch); + } + JL_UNLOCK(&dispatch_statement_out_lock); +} + // If waitcompile is 0, this will return NULL if compiling is on-going in the JIT. This is // useful for the JIT itself, since it just doesn't cause redundant work or missed updates, // but merely causes it to look into the current JIT worklist. @@ -3067,7 +3099,8 @@ static void jl_compile_now(jl_method_instance_t *mi) JL_DLLEXPORT void jl_compile_method_instance(jl_method_instance_t *mi, jl_tupletype_t *types, size_t world) { size_t tworld = jl_typeinf_world; - jl_atomic_store_relaxed(&mi->precompiled, 1); + uint8_t miflags = jl_atomic_load_relaxed(&mi->flags) | JL_MI_FLAGS_MASK_PRECOMPILED; + jl_atomic_store_relaxed(&mi->flags, miflags); if (jl_generating_output()) { jl_compile_now(mi); // In addition to full compilation of the compilation-signature, if `types` is more specific (e.g. due to nospecialize), @@ -3082,7 +3115,8 @@ JL_DLLEXPORT void jl_compile_method_instance(jl_method_instance_t *mi, jl_tuplet types2 = jl_type_intersection_env((jl_value_t*)types, (jl_value_t*)mi->def.method->sig, &tpenv2); jl_method_instance_t *mi2 = jl_specializations_get_linfo(mi->def.method, (jl_value_t*)types2, tpenv2); JL_GC_POP(); - jl_atomic_store_relaxed(&mi2->precompiled, 1); + miflags = jl_atomic_load_relaxed(&mi2->flags) | JL_MI_FLAGS_MASK_PRECOMPILED; + jl_atomic_store_relaxed(&mi2->flags, miflags); if (jl_rettype_inferred_native(mi2, world, world) == jl_nothing) (void)jl_type_infer(mi2, world, SOURCE_MODE_NOT_REQUIRED); if (jl_typeinf_func && jl_atomic_load_relaxed(&mi->def.method->primary_world) <= tworld) { @@ -3358,6 +3392,16 @@ STATIC_INLINE jl_method_instance_t *jl_lookup_generic_(jl_value_t *F, jl_value_t jl_method_error(F, args, nargs, world); // unreachable } + // mfunc is about to be dispatched + if (jl_options.trace_dispatch != NULL) { + uint8_t miflags = jl_atomic_load_relaxed(&mfunc->flags); + uint8_t was_dispatched = miflags & JL_MI_FLAGS_MASK_DISPATCHED; + if (!was_dispatched) { + miflags |= JL_MI_FLAGS_MASK_DISPATCHED; + jl_atomic_store_relaxed(&mfunc->flags, miflags); + record_dispatch_statement(mfunc); + } + } } #ifdef JL_TRACE @@ -3480,6 +3524,15 @@ jl_value_t *jl_gf_invoke_by_method(jl_method_t *method, jl_value_t *gf, jl_value jl_gc_sync_total_bytes(last_alloc); // discard allocation count from compilation } JL_GC_PROMISE_ROOTED(mfunc); + if (jl_options.trace_dispatch != NULL) { + uint8_t miflags = jl_atomic_load_relaxed(&mfunc->flags); + uint8_t was_dispatched = miflags & JL_MI_FLAGS_MASK_DISPATCHED; + if (!was_dispatched) { + miflags |= JL_MI_FLAGS_MASK_DISPATCHED; + jl_atomic_store_relaxed(&mfunc->flags, miflags); + record_dispatch_statement(mfunc); + } + } size_t world = jl_current_task->world_age; return _jl_invoke(gf, args, nargs - 1, mfunc, world); } diff --git a/src/jloptions.c b/src/jloptions.c index 530d5e2577a9a..35f0a76e3f6e7 100644 --- a/src/jloptions.c +++ b/src/jloptions.c @@ -77,6 +77,7 @@ JL_DLLEXPORT void jl_init_options(void) 1, // can_inline JL_OPTIONS_POLLY_ON, // polly NULL, // trace_compile + NULL, // trace_dispatch JL_OPTIONS_FAST_MATH_DEFAULT, 0, // worker NULL, // cookie @@ -294,6 +295,7 @@ JL_DLLEXPORT void jl_parse_opts(int *argcp, char ***argvp) opt_polly, opt_trace_compile, opt_trace_compile_timing, + opt_trace_dispatch, opt_math_mode, opt_worker, opt_bind_to, @@ -372,6 +374,7 @@ JL_DLLEXPORT void jl_parse_opts(int *argcp, char ***argvp) { "polly", required_argument, 0, opt_polly }, { "trace-compile", required_argument, 0, opt_trace_compile }, { "trace-compile-timing", no_argument, 0, opt_trace_compile_timing }, + { "trace-dispatch", required_argument, 0, opt_trace_dispatch }, { "math-mode", required_argument, 0, opt_math_mode }, { "handle-signals", required_argument, 0, opt_handle_signals }, // hidden command line options @@ -828,6 +831,11 @@ JL_DLLEXPORT void jl_parse_opts(int *argcp, char ***argvp) case opt_trace_compile_timing: jl_options.trace_compile_timing = 1; break; + case opt_trace_dispatch: + jl_options.trace_dispatch = strdup(optarg); + if (!jl_options.trace_dispatch) + jl_errorf("fatal error: failed to allocate memory: %s", strerror(errno)); + break; case opt_math_mode: if (!strcmp(optarg,"ieee")) jl_options.fast_math = JL_OPTIONS_FAST_MATH_OFF; diff --git a/src/jloptions.h b/src/jloptions.h index 3d7deedb59e15..e58797caace3c 100644 --- a/src/jloptions.h +++ b/src/jloptions.h @@ -38,6 +38,7 @@ typedef struct { int8_t can_inline; int8_t polly; const char *trace_compile; + const char *trace_dispatch; int8_t fast_math; int8_t worker; const char *cookie; diff --git a/src/jltypes.c b/src/jltypes.c index fbc8e9f7f7f16..11f1d11a14edc 100644 --- a/src/jltypes.c +++ b/src/jltypes.c @@ -3617,7 +3617,7 @@ void jl_init_types(void) JL_GC_DISABLED "backedges", "cache", "cache_with_orig", - "precompiled"), + "flags"), jl_svec(7, jl_new_struct(jl_uniontype_type, jl_method_type, jl_module_type), jl_any_type, diff --git a/src/julia.h b/src/julia.h index 73b96cf0183d1..c6ff729a308eb 100644 --- a/src/julia.h +++ b/src/julia.h @@ -410,8 +410,14 @@ struct _jl_method_instance_t { jl_array_t *backedges; // list of method-instances which call this method-instance; `invoke` records (invokesig, caller) pairs _Atomic(struct _jl_code_instance_t*) cache; uint8_t cache_with_orig; // !cache_with_specTypes - _Atomic(uint8_t) precompiled; // true if this instance was generated by an explicit `precompile(...)` call + + // flags for this method instance + // bit 0: generated by an explicit `precompile(...)` + // bit 1: dispatched + _Atomic(uint8_t) flags; }; +#define JL_MI_FLAGS_MASK_PRECOMPILED 0x01 +#define JL_MI_FLAGS_MASK_DISPATCHED 0x02 // OpaqueClosure typedef struct _jl_opaque_closure_t { diff --git a/src/method.c b/src/method.c index d4457b1549353..6aba60e7fe12c 100644 --- a/src/method.c +++ b/src/method.c @@ -629,7 +629,7 @@ JL_DLLEXPORT jl_method_instance_t *jl_new_method_instance_uninit(void) mi->backedges = NULL; jl_atomic_store_relaxed(&mi->cache, NULL); mi->cache_with_orig = 0; - jl_atomic_store_relaxed(&mi->precompiled, 0); + jl_atomic_store_relaxed(&mi->flags, 0); return mi; } diff --git a/src/staticdata.c b/src/staticdata.c index f54cc9692eaea..aa9a16daab7a5 100644 --- a/src/staticdata.c +++ b/src/staticdata.c @@ -1718,7 +1718,7 @@ static void jl_write_values(jl_serializer_state *s) JL_GC_DISABLED else if (jl_is_method_instance(v)) { assert(f == s->s); jl_method_instance_t *newmi = (jl_method_instance_t*)&f->buf[reloc_offset]; - jl_atomic_store_relaxed(&newmi->precompiled, 0); + jl_atomic_store_relaxed(&newmi->flags, 0); } else if (jl_is_code_instance(v)) { assert(f == s->s); diff --git a/src/staticdata_utils.c b/src/staticdata_utils.c index f39e5357c6782..81aed233af5c0 100644 --- a/src/staticdata_utils.c +++ b/src/staticdata_utils.c @@ -159,7 +159,8 @@ static int has_backedge_to_worklist(jl_method_instance_t *mi, htable_t *visited, if (jl_is_method(mod)) mod = ((jl_method_t*)mod)->module; assert(jl_is_module(mod)); - if (jl_atomic_load_relaxed(&mi->precompiled) || !jl_object_in_image((jl_value_t*)mod) || type_in_worklist(mi->specTypes)) { + uint8_t is_precompiled = jl_atomic_load_relaxed(&mi->flags) & JL_MI_FLAGS_MASK_PRECOMPILED; + if (is_precompiled || !jl_object_in_image((jl_value_t*)mod) || type_in_worklist(mi->specTypes)) { return 1; } if (!mi->backedges) { diff --git a/test/cmdlineargs.jl b/test/cmdlineargs.jl index c6720e23739d8..cc3f8950f0dc0 100644 --- a/test/cmdlineargs.jl +++ b/test/cmdlineargs.jl @@ -787,6 +787,17 @@ let exename = `$(Base.julia_cmd()) --startup-file=no --color=no` # tested in test/parallel.jl) @test errors_not_signals(`$exename --worker=true`) + # --trace-compile + let + io = IOBuffer() + v = writereadpipeline( + "foo(x) = begin Base.Experimental.@force_compile; x; end; foo(1)", + `$exename --trace-compile=stderr -i`, + stderr=io) + _stderr = String(take!(io)) + @test occursin("precompile(Tuple{typeof(Main.foo), Int", _stderr) + end + # --trace-compile-timing let io = IOBuffer() @@ -798,6 +809,17 @@ let exename = `$(Base.julia_cmd()) --startup-file=no --color=no` @test occursin(" ms =# precompile(Tuple{typeof(Main.foo), Int", _stderr) end + # --trace-dispatch + let + io = IOBuffer() + v = writereadpipeline( + "foo(x) = begin Base.Experimental.@force_compile; x; end; foo(1)", + `$exename --trace-dispatch=stderr -i`, + stderr=io) + _stderr = String(take!(io)) + @test occursin("precompile(Tuple{typeof(Main.foo), Int", _stderr) + end + # test passing arguments mktempdir() do dir testfile, io = mktemp(dir) diff --git a/test/core.jl b/test/core.jl index d41a58a7ccb2e..1395817d8615e 100644 --- a/test/core.jl +++ b/test/core.jl @@ -34,7 +34,7 @@ for (T, c) in ( (Core.CodeInfo, []), (Core.CodeInstance, [:next, :min_world, :max_world, :inferred, :debuginfo, :ipo_purity_bits, :invoke, :specptr, :specsigflags, :precompile]), (Core.Method, [:primary_world, :deleted_world]), - (Core.MethodInstance, [:cache, :precompiled]), + (Core.MethodInstance, [:cache, :flags]), (Core.MethodTable, [:defs, :leafcache, :cache, :max_args]), (Core.TypeMapEntry, [:next, :min_world, :max_world]), (Core.TypeMapLevel, [:arg1, :targ, :name1, :tname, :list, :any]), From a7c5056b722182adfd183fdc7bdfdef39cd8e28e Mon Sep 17 00:00:00 2001 From: Florian Date: Tue, 1 Oct 2024 01:41:23 +0200 Subject: [PATCH 091/537] relocation: account for trailing path separator in depot paths (#55355) Fixes #55340 --- base/loading.jl | 26 ++++++++++++++++---------- src/precompile.c | 17 +++++++++++++---- src/staticdata_utils.c | 20 ++++++++++++++------ test/relocatedepot.jl | 34 +++++++++++++++++++++++++++++----- 4 files changed, 72 insertions(+), 25 deletions(-) diff --git a/base/loading.jl b/base/loading.jl index fbf6bb4af50aa..9080a2271fb27 100644 --- a/base/loading.jl +++ b/base/loading.jl @@ -3165,16 +3165,9 @@ mutable struct CacheHeaderIncludes const modpath::Vector{String} # seemingly not needed in Base, but used by Revise end -function replace_depot_path(path::AbstractString) - for depot in DEPOT_PATH - !isdir(depot) && continue - - # Strip extraneous pathseps through normalization. - if isdirpath(depot) - depot = dirname(depot) - end - - if startswith(path, depot) +function replace_depot_path(path::AbstractString, depots::Vector{String}=normalize_depots_for_relocation()) + for depot in depots + if startswith(path, string(depot, Filesystem.pathsep())) || path == depot path = replace(path, depot => "@depot"; count=1) break end @@ -3182,6 +3175,19 @@ function replace_depot_path(path::AbstractString) return path end +function normalize_depots_for_relocation() + depots = String[] + sizehint!(depots, length(DEPOT_PATH)) + for d in DEPOT_PATH + isdir(d) || continue + if isdirpath(d) + d = dirname(d) + end + push!(depots, abspath(d)) + end + return depots +end + function restore_depot_path(path::AbstractString, depot::AbstractString) replace(path, r"^@depot" => depot; count=1) end diff --git a/src/precompile.c b/src/precompile.c index 5088d45a5ad74..c21cf5367fba6 100644 --- a/src/precompile.c +++ b/src/precompile.c @@ -39,9 +39,17 @@ void write_srctext(ios_t *f, jl_array_t *udeps, int64_t srctextpos) { static jl_value_t *replace_depot_func = NULL; if (!replace_depot_func) replace_depot_func = jl_get_global(jl_base_module, jl_symbol("replace_depot_path")); + static jl_value_t *normalize_depots_func = NULL; + if (!normalize_depots_func) + normalize_depots_func = jl_get_global(jl_base_module, jl_symbol("normalize_depots_for_relocation")); ios_t srctext; - jl_value_t *deptuple = NULL; - JL_GC_PUSH2(&deptuple, &udeps); + jl_value_t *deptuple = NULL, *depots = NULL; + JL_GC_PUSH3(&deptuple, &udeps, &depots); + jl_task_t *ct = jl_current_task; + size_t last_age = ct->world_age; + ct->world_age = jl_atomic_load_acquire(&jl_world_counter); + depots = jl_apply(&normalize_depots_func, 1); + ct->world_age = last_age; for (size_t i = 0; i < len; i++) { deptuple = jl_array_ptr_ref(udeps, i); jl_value_t *depmod = jl_fieldref(deptuple, 0); // module @@ -60,13 +68,14 @@ void write_srctext(ios_t *f, jl_array_t *udeps, int64_t srctextpos) { } jl_value_t **replace_depot_args; - JL_GC_PUSHARGS(replace_depot_args, 2); + JL_GC_PUSHARGS(replace_depot_args, 3); replace_depot_args[0] = replace_depot_func; replace_depot_args[1] = abspath; + replace_depot_args[2] = depots; jl_task_t *ct = jl_current_task; size_t last_age = ct->world_age; ct->world_age = jl_atomic_load_acquire(&jl_world_counter); - jl_value_t *depalias = (jl_value_t*)jl_apply(replace_depot_args, 2); + jl_value_t *depalias = (jl_value_t*)jl_apply(replace_depot_args, 3); ct->world_age = last_age; JL_GC_POP(); diff --git a/src/staticdata_utils.c b/src/staticdata_utils.c index 81aed233af5c0..8eb223d3cfbde 100644 --- a/src/staticdata_utils.c +++ b/src/staticdata_utils.c @@ -753,6 +753,16 @@ static int64_t write_dependency_list(ios_t *s, jl_array_t* worklist, jl_array_t static jl_value_t *replace_depot_func = NULL; if (!replace_depot_func) replace_depot_func = jl_get_global(jl_base_module, jl_symbol("replace_depot_path")); + static jl_value_t *normalize_depots_func = NULL; + if (!normalize_depots_func) + normalize_depots_func = jl_get_global(jl_base_module, jl_symbol("normalize_depots_for_relocation")); + + jl_value_t *depots = NULL, *prefs_hash = NULL, *prefs_list = NULL; + JL_GC_PUSH2(&depots, &prefs_list); + last_age = ct->world_age; + ct->world_age = jl_atomic_load_acquire(&jl_world_counter); + depots = jl_apply(&normalize_depots_func, 1); + ct->world_age = last_age; // write a placeholder for total size so that we can quickly seek past all of the // dependencies if we don't need them @@ -765,13 +775,14 @@ static int64_t write_dependency_list(ios_t *s, jl_array_t* worklist, jl_array_t if (replace_depot_func) { jl_value_t **replace_depot_args; - JL_GC_PUSHARGS(replace_depot_args, 2); + JL_GC_PUSHARGS(replace_depot_args, 3); replace_depot_args[0] = replace_depot_func; replace_depot_args[1] = deppath; + replace_depot_args[2] = depots; ct = jl_current_task; size_t last_age = ct->world_age; ct->world_age = jl_atomic_load_acquire(&jl_world_counter); - deppath = (jl_value_t*)jl_apply(replace_depot_args, 2); + deppath = (jl_value_t*)jl_apply(replace_depot_args, 3); ct->world_age = last_age; JL_GC_POP(); } @@ -804,9 +815,6 @@ static int64_t write_dependency_list(ios_t *s, jl_array_t* worklist, jl_array_t write_int32(s, 0); // terminator, for ease of reading // Calculate Preferences hash for current package. - jl_value_t *prefs_hash = NULL; - jl_value_t *prefs_list = NULL; - JL_GC_PUSH1(&prefs_list); if (jl_base_module) { // Toplevel module is the module we're currently compiling, use it to get our preferences hash jl_value_t * toplevel = (jl_value_t*)jl_get_global(jl_base_module, jl_symbol("__toplevel__")); @@ -853,7 +861,7 @@ static int64_t write_dependency_list(ios_t *s, jl_array_t* worklist, jl_array_t write_int32(s, 0); write_uint64(s, 0); } - JL_GC_POP(); // for prefs_list + JL_GC_POP(); // for depots, prefs_list // write a dummy file position to indicate the beginning of the source-text pos = ios_pos(s); diff --git a/test/relocatedepot.jl b/test/relocatedepot.jl index 039d422c35e25..2ef6dec90dbc1 100644 --- a/test/relocatedepot.jl +++ b/test/relocatedepot.jl @@ -1,3 +1,5 @@ +# This file is a part of Julia. License is MIT: https://julialang.org/license + using Test @@ -26,16 +28,38 @@ end if !test_relocated_depot - @testset "insert @depot tag in path" begin + @testset "edge cases when inserting @depot tag in path" begin + # insert @depot only once for first match test_harness() do mktempdir() do dir pushfirst!(DEPOT_PATH, dir) - path = dir*dir - @test Base.replace_depot_path(path) == "@depot"*dir + if Sys.iswindows() + # dirs start with a drive letter instead of a path separator + path = dir*Base.Filesystem.pathsep()*dir + @test Base.replace_depot_path(path) == "@depot"*Base.Filesystem.pathsep()*dir + else + path = dir*dir + @test Base.replace_depot_path(path) == "@depot"*dir + end + end + + # 55340 + empty!(DEPOT_PATH) + mktempdir() do dir + jlrc = joinpath(dir, "julia-rc2") + jl = joinpath(dir, "julia") + mkdir(jl) + push!(DEPOT_PATH, jl) + @test Base.replace_depot_path(jl) == "@depot" + @test Base.replace_depot_path(string(jl,Base.Filesystem.pathsep())) == + string("@depot",Base.Filesystem.pathsep()) + @test Base.replace_depot_path(jlrc) != "@depot-rc2" + @test Base.replace_depot_path(jlrc) == jlrc end end + # deal with and without trailing path separators test_harness() do mktempdir() do dir pushfirst!(DEPOT_PATH, dir) @@ -43,9 +67,9 @@ if !test_relocated_depot if isdirpath(DEPOT_PATH[1]) DEPOT_PATH[1] = dirname(DEPOT_PATH[1]) # strip trailing pathsep end - tag = joinpath("@depot", "") # append a pathsep + tag = string("@depot", Base.Filesystem.pathsep()) @test startswith(Base.replace_depot_path(path), tag) - DEPOT_PATH[1] = joinpath(DEPOT_PATH[1], "") # append a pathsep + DEPOT_PATH[1] = string(DEPOT_PATH[1], Base.Filesystem.pathsep()) @test startswith(Base.replace_depot_path(path), tag) popfirst!(DEPOT_PATH) @test !startswith(Base.replace_depot_path(path), tag) From 32ad9e60347ed83efe3778fd6f7a2702aadb3cfe Mon Sep 17 00:00:00 2001 From: Jameson Nash Date: Mon, 30 Sep 2024 22:32:58 -0400 Subject: [PATCH 092/537] change compiler to be stackless (#55575) This change ensures the compiler uses very little stack, making it compatible with running on any arbitrary system stack size and depths much more reliably. It also could be further modified now to easily add various forms of pause-able/resumable inference, since there is no implicit state on the stack--everything is local and explicit now. Whereas before, less than 900 frames would crash in less than a second: ``` $ time ./julia -e 'f(::Val{N}) where {N} = N <= 0 ? 0 : f(Val(N - 1)); f(Val(1000))' Warning: detected a stack overflow; program state may be corrupted, so further execution might be unreliable. Internal error: during type inference of f(Base.Val{1000}) Encountered stack overflow. This might be caused by recursion over very long tuples or argument lists. [23763] signal 6: Abort trap: 6 in expression starting at none:1 __pthread_kill at /usr/lib/system/libsystem_kernel.dylib (unknown line) Allocations: 1 (Pool: 1; Big: 0); GC: 0 Abort trap: 6 real 0m0.233s user 0m0.165s sys 0m0.049s ```` Now: it is effectively unlimited, as long as you are willing to wait for it: ``` $ time ./julia -e 'f(::Val{N}) where {N} = N <= 0 ? 0 : f(Val(N - 1)); f(Val(50000))' info: inference of f(Base.Val{50000}) from f(Base.Val{N}) where {N} exceeding 2500 frames (may be slow). info: inference of f(Base.Val{50000}) from f(Base.Val{N}) where {N} exceeding 5000 frames (may be slow). info: inference of f(Base.Val{50000}) from f(Base.Val{N}) where {N} exceeding 10000 frames (may be slow). info: inference of f(Base.Val{50000}) from f(Base.Val{N}) where {N} exceeding 20000 frames (may be slow). info: inference of f(Base.Val{50000}) from f(Base.Val{N}) where {N} exceeding 40000 frames (may be slow). real 7m4.988s $ time ./julia -e 'f(::Val{N}) where {N} = N <= 0 ? 0 : f(Val(N - 1)); f(Val(1000))' real 0m0.214s user 0m0.164s sys 0m0.044s $ time ./julia -e '@noinline f(::Val{N}) where {N} = N <= 0 ? GC.safepoint() : f(Val(N - 1)); f(Val(5000))' info: inference of f(Base.Val{5000}) from f(Base.Val{N}) where {N} exceeding 2500 frames (may be slow). info: inference of f(Base.Val{5000}) from f(Base.Val{N}) where {N} exceeding 5000 frames (may be slow). real 0m8.609s user 0m8.358s sys 0m0.240s ``` --- base/compiler/abstractinterpretation.jl | 1402 +++++++++++++---------- base/compiler/inferencestate.jl | 102 +- base/compiler/ssair/ir.jl | 1 + base/compiler/ssair/irinterp.jl | 38 +- base/compiler/ssair/verify.jl | 5 +- base/compiler/tfuncs.jl | 102 +- base/compiler/typeinfer.jl | 237 ++-- base/compiler/types.jl | 8 + base/reflection.jl | 2 +- test/compiler/AbstractInterpreter.jl | 9 +- test/compiler/inference.jl | 107 -- 11 files changed, 1048 insertions(+), 965 deletions(-) diff --git a/base/compiler/abstractinterpretation.jl b/base/compiler/abstractinterpretation.jl index 68b8394b72c3d..96355f2a6b5dd 100644 --- a/base/compiler/abstractinterpretation.jl +++ b/base/compiler/abstractinterpretation.jl @@ -47,223 +47,210 @@ function abstract_call_gf_by_type(interp::AbstractInterpreter, @nospecialize(f), matches = find_method_matches(interp, argtypes, atype; max_methods) if isa(matches, FailedMethodMatch) add_remark!(interp, sv, matches.reason) - return CallMeta(Any, Any, Effects(), NoCallInfo()) + return Future(CallMeta(Any, Any, Effects(), NoCallInfo())) end (; valid_worlds, applicable, info) = matches update_valid_age!(sv, valid_worlds) - napplicable = length(applicable) + + # final result + gfresult = Future{CallMeta}() + # intermediate work for computing gfresult rettype = exctype = Bottom edges = MethodInstance[] conditionals = nothing # keeps refinement information of call argument types when the return type is boolean - seen = 0 # number of signatures actually inferred + seenall = true const_results = nothing # or const_results::Vector{Union{Nothing,ConstResult}} if any const results are available - multiple_matches = napplicable > 1 fargs = arginfo.fargs all_effects = EFFECTS_TOTAL slotrefinements = nothing # keeps refinement information on slot types obtained from call signature - for i in 1:napplicable - match = applicable[i]::MethodMatch - method = match.method - sig = match.spec_types - if bail_out_toplevel_call(interp, InferenceLoopState(sig, rettype, all_effects), sv) - # only infer concrete call sites in top-level expressions - add_remark!(interp, sv, "Refusing to infer non-concrete call site in top-level expression") - break - end - this_rt = Bottom - this_exct = Bottom - splitunions = false - # TODO: this used to trigger a bug in inference recursion detection, and is unmaintained now - # sigtuple = unwrap_unionall(sig)::DataType - # splitunions = 1 < unionsplitcost(sigtuple.parameters) * napplicable <= InferenceParams(interp).max_union_splitting - if splitunions - splitsigs = switchtupleunion(sig) - for sig_n in splitsigs - result = abstract_call_method(interp, method, sig_n, svec(), multiple_matches, si, sv) - (; rt, exct, edge, effects, volatile_inf_result) = result + # split the for loop off into a function, so that we can pause and restart it at will + i::Int = 1 + f = Core.Box(f) + atype = Core.Box(atype) + function infercalls(interp, sv) + napplicable = length(applicable) + multiple_matches = napplicable > 1 + while i <= napplicable + match = applicable[i]::MethodMatch + method = match.method + sig = match.spec_types + if bail_out_toplevel_call(interp, InferenceLoopState(sig, rettype, all_effects), sv) + # only infer concrete call sites in top-level expressions + add_remark!(interp, sv, "Refusing to infer non-concrete call site in top-level expression") + seenall = false + break + end + # TODO: this is unmaintained now as it didn't seem to improve things, though it does avoid hard-coding the union split at the higher level, + # it also can hurt infer-ability of some constrained parameter types (e.g. quacks like a duck) + # sigtuple = unwrap_unionall(sig)::DataType + # splitunions = 1 < unionsplitcost(sigtuple.parameters) * napplicable <= InferenceParams(interp).max_union_splitting + #if splitunions + # splitsigs = switchtupleunion(sig) + # for sig_n in splitsigs + # result = abstract_call_method(interp, method, sig_n, svec(), multiple_matches, si, sv)::Future + # handle1(...) + # end + #end + mresult = abstract_call_method(interp, method, sig, match.sparams, multiple_matches, si, sv)::Future + function handle1(interp, sv) + local (; rt, exct, edge, effects, volatile_inf_result) = mresult[] + this_conditional = ignorelimited(rt) + this_rt = widenwrappedconditional(rt) + this_exct = exct + # try constant propagation with argtypes for this match + # this is in preparation for inlining, or improving the return result this_argtypes = isa(matches, MethodMatches) ? argtypes : matches.applicable_argtypes[i] this_arginfo = ArgInfo(fargs, this_argtypes) const_call_result = abstract_call_method_with_const_args(interp, - result, f, this_arginfo, si, match, sv) + mresult[], f.contents, this_arginfo, si, match, sv) const_result = volatile_inf_result if const_call_result !== nothing - if const_call_result.rt ⊑ₚ rt - rt = const_call_result.rt + this_const_conditional = ignorelimited(const_call_result.rt) + this_const_rt = widenwrappedconditional(const_call_result.rt) + if this_const_rt ⊑ₚ this_rt + # As long as the const-prop result we have is not *worse* than + # what we found out on types, we'd like to use it. Even if the + # end result is exactly equivalent, it is likely that the IR + # we produced while constproping is better than that with + # generic types. + # Return type of const-prop' inference can be wider than that of non const-prop' inference + # e.g. in cases when there are cycles but cached result is still accurate + this_conditional = this_const_conditional + this_rt = this_const_rt (; effects, const_result, edge) = const_call_result elseif is_better_effects(const_call_result.effects, effects) (; effects, const_result, edge) = const_call_result else add_remark!(interp, sv, "[constprop] Discarded because the result was wider than inference") end - if const_call_result.exct ⋤ exct - (; exct, const_result, edge) = const_call_result + # Treat the exception type separately. Currently, constprop often cannot determine the exception type + # because consistent-cy does not apply to exceptions. + if const_call_result.exct ⋤ this_exct + this_exct = const_call_result.exct + (; const_result, edge) = const_call_result else add_remark!(interp, sv, "[constprop] Discarded exception type because result was wider than inference") end end + all_effects = merge_effects(all_effects, effects) if const_result !== nothing if const_results === nothing - const_results = fill!(Vector{Union{Nothing,ConstResult}}(undef, #=TODO=#napplicable), nothing) + const_results = fill!(Vector{Union{Nothing,ConstResult}}(undef, napplicable), nothing) end const_results[i] = const_result end edge === nothing || push!(edges, edge) - this_rt = this_rt ⊔ₚ rt - this_exct = this_exct ⊔ₚ exct - if bail_out_call(interp, this_rt, sv) - break + @assert !(this_conditional isa Conditional || this_rt isa MustAlias) "invalid lattice element returned from inter-procedural context" + if can_propagate_conditional(this_conditional, argtypes) + # The only case where we need to keep this in rt is where + # we can directly propagate the conditional to a slot argument + # that is not one of our arguments, otherwise we keep all the + # relevant information in `conditionals` below. + this_rt = this_conditional end - end - this_conditional = ignorelimited(this_rt) - this_rt = widenwrappedconditional(this_rt) - else - result = abstract_call_method(interp, method, sig, match.sparams, multiple_matches, si, sv) - (; rt, exct, edge, effects, volatile_inf_result) = result - this_conditional = ignorelimited(rt) - this_rt = widenwrappedconditional(rt) - this_exct = exct - # try constant propagation with argtypes for this match - # this is in preparation for inlining, or improving the return result - this_argtypes = isa(matches, MethodMatches) ? argtypes : matches.applicable_argtypes[i] - this_arginfo = ArgInfo(fargs, this_argtypes) - const_call_result = abstract_call_method_with_const_args(interp, - result, f, this_arginfo, si, match, sv) - const_result = volatile_inf_result - if const_call_result !== nothing - this_const_conditional = ignorelimited(const_call_result.rt) - this_const_rt = widenwrappedconditional(const_call_result.rt) - if this_const_rt ⊑ₚ this_rt - # As long as the const-prop result we have is not *worse* than - # what we found out on types, we'd like to use it. Even if the - # end result is exactly equivalent, it is likely that the IR - # we produced while constproping is better than that with - # generic types. - # Return type of const-prop' inference can be wider than that of non const-prop' inference - # e.g. in cases when there are cycles but cached result is still accurate - this_conditional = this_const_conditional - this_rt = this_const_rt - (; effects, const_result, edge) = const_call_result - elseif is_better_effects(const_call_result.effects, effects) - (; effects, const_result, edge) = const_call_result - else - add_remark!(interp, sv, "[constprop] Discarded because the result was wider than inference") + + rettype = rettype ⊔ₚ this_rt + exctype = exctype ⊔ₚ this_exct + if has_conditional(𝕃ₚ, sv) && this_conditional !== Bottom && is_lattice_bool(𝕃ₚ, rettype) && fargs !== nothing + if conditionals === nothing + conditionals = Any[Bottom for _ in 1:length(argtypes)], + Any[Bottom for _ in 1:length(argtypes)] + end + for i = 1:length(argtypes) + cnd = conditional_argtype(𝕃ᵢ, this_conditional, sig, argtypes, i) + conditionals[1][i] = conditionals[1][i] ⊔ᵢ cnd.thentype + conditionals[2][i] = conditionals[2][i] ⊔ᵢ cnd.elsetype + end end - # Treat the exception type separately. Currently, constprop often cannot determine the exception type - # because consistent-cy does not apply to exceptions. - if const_call_result.exct ⋤ this_exct - this_exct = const_call_result.exct - (; const_result, edge) = const_call_result - else - add_remark!(interp, sv, "[constprop] Discarded exception type because result was wider than inference") + if i < napplicable && bail_out_call(interp, InferenceLoopState(sig, rettype, all_effects), sv) + add_remark!(interp, sv, "Call inference reached maximally imprecise information. Bailing on.") + seenall = false + i = napplicable # break in outer function end + i += 1 + return true end - all_effects = merge_effects(all_effects, effects) - if const_result !== nothing - if const_results === nothing - const_results = fill!(Vector{Union{Nothing,ConstResult}}(undef, napplicable), nothing) - end - const_results[i] = const_result + if isready(mresult) && handle1(interp, sv) + continue + else + push!(sv.tasks, handle1) + return false end - edge === nothing || push!(edges, edge) - end - @assert !(this_conditional isa Conditional || this_rt isa MustAlias) "invalid lattice element returned from inter-procedural context" - seen += 1 + end # while - if can_propagate_conditional(this_conditional, argtypes) - # The only case where we need to keep this in rt is where - # we can directly propagate the conditional to a slot argument - # that is not one of our arguments, otherwise we keep all the - # relevant information in `conditionals` below. - this_rt = this_conditional + if const_results !== nothing + @assert napplicable == nmatches(info) == length(const_results) + info = ConstCallInfo(info, const_results) end - rettype = rettype ⊔ₚ this_rt - exctype = exctype ⊔ₚ this_exct - if has_conditional(𝕃ₚ, sv) && this_conditional !== Bottom && is_lattice_bool(𝕃ₚ, rettype) && fargs !== nothing - if conditionals === nothing - conditionals = Any[Bottom for _ in 1:length(argtypes)], - Any[Bottom for _ in 1:length(argtypes)] + if seenall + if !fully_covering(matches) || any_ambig(matches) + # Account for the fact that we may encounter a MethodError with a non-covered or ambiguous signature. + all_effects = Effects(all_effects; nothrow=false) + exctype = exctype ⊔ₚ MethodError end - for i = 1:length(argtypes) - cnd = conditional_argtype(𝕃ᵢ, this_conditional, sig, argtypes, i) - conditionals[1][i] = conditionals[1][i] ⊔ᵢ cnd.thentype - conditionals[2][i] = conditionals[2][i] ⊔ᵢ cnd.elsetype + if sv isa InferenceState && fargs !== nothing + slotrefinements = collect_slot_refinements(𝕃ᵢ, applicable, argtypes, fargs, sv) end - end - if bail_out_call(interp, InferenceLoopState(sig, rettype, all_effects), sv) - add_remark!(interp, sv, "Call inference reached maximally imprecise information. Bailing on.") - break - end - end - - if const_results !== nothing - @assert napplicable == nmatches(info) == length(const_results) - info = ConstCallInfo(info, const_results) - end - - if seen ≠ napplicable - # there is unanalyzed candidate, widen type and effects to the top - rettype = exctype = Any - all_effects = Effects() - else - if !fully_covering(matches) || any_ambig(matches) - # Account for the fact that we may encounter a MethodError with a non-covered or ambiguous signature. - all_effects = Effects(all_effects; nothrow=false) - exctype = exctype ⊔ₚ MethodError - end - if sv isa InferenceState && fargs !== nothing - slotrefinements = collect_slot_refinements(𝕃ᵢ, applicable, argtypes, fargs, sv) - end - end - - rettype = from_interprocedural!(interp, rettype, sv, arginfo, conditionals) - - # Also considering inferring the compilation signature for this method, so - # it is available to the compiler in case it ends up needing it. - if (isa(sv, InferenceState) && infer_compilation_signature(interp) && - (1 == seen == napplicable) && rettype !== Any && rettype !== Bottom && - !is_removable_if_unused(all_effects)) - match = applicable[1]::MethodMatch - method = match.method - sig = match.spec_types - mi = specialize_method(match; preexisting=true) - if mi !== nothing && !const_prop_methodinstance_heuristic(interp, mi, arginfo, sv) - csig = get_compileable_sig(method, sig, match.sparams) - if csig !== nothing && csig !== sig - abstract_call_method(interp, method, csig, match.sparams, multiple_matches, StmtInfo(false), sv) + else + # there is unanalyzed candidate, widen type and effects to the top + rettype = exctype = Any + all_effects = Effects() + end + + rettype = from_interprocedural!(interp, rettype, sv, arginfo, conditionals) + + # Also considering inferring the compilation signature for this method, so + # it is available to the compiler in case it ends up needing it. + if (isa(sv, InferenceState) && infer_compilation_signature(interp) && + (seenall && 1 == napplicable) && rettype !== Any && rettype !== Bottom && + !is_removable_if_unused(all_effects)) + match = applicable[1]::MethodMatch + method = match.method + sig = match.spec_types + mi = specialize_method(match; preexisting=true) + if mi !== nothing && !const_prop_methodinstance_heuristic(interp, mi, arginfo, sv) + csig = get_compileable_sig(method, sig, match.sparams) + if csig !== nothing && csig !== sig + abstract_call_method(interp, method, csig, match.sparams, multiple_matches, StmtInfo(false), sv)::Future + end end end - end - if call_result_unused(si) && !(rettype === Bottom) - add_remark!(interp, sv, "Call result type was widened because the return value is unused") - # We're mainly only here because the optimizer might want this code, - # but we ourselves locally don't typically care about it locally - # (beyond checking if it always throws). - # So avoid adding an edge, since we don't want to bother attempting - # to improve our result even if it does change (to always throw), - # and avoid keeping track of a more complex result type. - rettype = Any - end - any_slot_refined = slotrefinements !== nothing - add_call_backedges!(interp, rettype, all_effects, any_slot_refined, edges, matches, atype, sv) - if isa(sv, InferenceState) - # TODO (#48913) implement a proper recursion handling for irinterp: - # This works just because currently the `:terminate` condition guarantees that - # irinterp doesn't fail into unresolved cycles, but it's not a good solution. - # We should revisit this once we have a better story for handling cycles in irinterp. - if !isempty(sv.pclimitations) # remove self, if present - delete!(sv.pclimitations, sv) - for caller in callers_in_cycle(sv) - delete!(sv.pclimitations, caller) + if call_result_unused(si) && !(rettype === Bottom) + add_remark!(interp, sv, "Call result type was widened because the return value is unused") + # We're mainly only here because the optimizer might want this code, + # but we ourselves locally don't typically care about it locally + # (beyond checking if it always throws). + # So avoid adding an edge, since we don't want to bother attempting + # to improve our result even if it does change (to always throw), + # and avoid keeping track of a more complex result type. + rettype = Any + end + any_slot_refined = slotrefinements !== nothing + add_call_backedges!(interp, rettype, all_effects, any_slot_refined, edges, matches, atype.contents, sv) + if isa(sv, InferenceState) + # TODO (#48913) implement a proper recursion handling for irinterp: + # This works just because currently the `:terminate` condition guarantees that + # irinterp doesn't fail into unresolved cycles, but it's not a good solution. + # We should revisit this once we have a better story for handling cycles in irinterp. + if !isempty(sv.pclimitations) # remove self, if present + delete!(sv.pclimitations, sv) + for caller in callers_in_cycle(sv) + delete!(sv.pclimitations, caller) + end end end - end - return CallMeta(rettype, exctype, all_effects, info, slotrefinements) + gfresult[] = CallMeta(rettype, exctype, all_effects, info, slotrefinements) + return true + end # infercalls + # start making progress on the first call + infercalls(interp, sv) || push!(sv.tasks, infercalls) + return gfresult end struct FailedMethodMatch @@ -607,9 +594,9 @@ function abstract_call_method(interp::AbstractInterpreter, hardlimit::Bool, si::StmtInfo, sv::AbsIntState) sigtuple = unwrap_unionall(sig) sigtuple isa DataType || - return MethodCallResult(Any, Any, false, false, nothing, Effects()) + return Future(MethodCallResult(Any, Any, false, false, nothing, Effects())) all(@nospecialize(x) -> valid_as_lattice(unwrapva(x), true), sigtuple.parameters) || - return MethodCallResult(Union{}, Any, false, false, nothing, EFFECTS_THROWS) # catch bad type intersections early + return Future(MethodCallResult(Union{}, Any, false, false, nothing, EFFECTS_THROWS)) # catch bad type intersections early if is_nospecializeinfer(method) sig = get_nospecializeinfer_sig(method, sig, sparams) @@ -634,7 +621,7 @@ function abstract_call_method(interp::AbstractInterpreter, # we have a self-cycle in the call-graph, but not in the inference graph (typically): # break this edge now (before we record it) by returning early # (non-typically, this means that we lose the ability to detect a guaranteed StackOverflow in some cases) - return MethodCallResult(Any, Any, true, true, nothing, Effects()) + return Future(MethodCallResult(Any, Any, true, true, nothing, Effects())) end topmost = nothing edgecycle = true @@ -689,7 +676,7 @@ function abstract_call_method(interp::AbstractInterpreter, # since it's very unlikely that we'll try to inline this, # or want make an invoke edge to its calling convention return type. # (non-typically, this means that we lose the ability to detect a guaranteed StackOverflow in some cases) - return MethodCallResult(Any, Any, true, true, nothing, Effects()) + return Future(MethodCallResult(Any, Any, true, true, nothing, Effects())) end add_remark!(interp, sv, washardlimit ? RECURSION_MSG_HARDLIMIT : RECURSION_MSG) # TODO (#48913) implement a proper recursion handling for irinterp: @@ -745,31 +732,7 @@ function abstract_call_method(interp::AbstractInterpreter, sparams = recomputed[2]::SimpleVector end - (; rt, exct, edge, effects, volatile_inf_result) = typeinf_edge(interp, method, sig, sparams, sv) - - if edge === nothing - edgecycle = edgelimited = true - end - - # we look for the termination effect override here as well, since the :terminates effect - # may have been tainted due to recursion at this point even if it's overridden - if is_effect_overridden(sv, :terminates_globally) - # this frame is known to terminate - effects = Effects(effects, terminates=true) - elseif is_effect_overridden(method, :terminates_globally) - # this edge is known to terminate - effects = Effects(effects; terminates=true) - elseif edgecycle - # Some sort of recursion was detected. - if edge !== nothing && !edgelimited && !is_edge_recursed(edge, sv) - # no `MethodInstance` cycles -- don't taint :terminate - else - # we cannot guarantee that the call will terminate - effects = Effects(effects; terminates=false) - end - end - - return MethodCallResult(rt, exct, edgecycle, edgelimited, edge, effects, volatile_inf_result) + return typeinf_edge(interp, method, sig, sparams, sv, edgecycle, edgelimited) end function edge_matches_sv(interp::AbstractInterpreter, frame::AbsIntState, @@ -1331,7 +1294,7 @@ const_prop_result(inf_result::InferenceResult) = inf_result.ipo_effects, inf_result.linfo) # return cached result of constant analysis -return_cached_result(::AbstractInterpreter, inf_result::InferenceResult, ::AbsIntState) = +return_localcache_result(::AbstractInterpreter, inf_result::InferenceResult, ::AbsIntState) = const_prop_result(inf_result) function compute_forwarded_argtypes(interp::AbstractInterpreter, arginfo::ArgInfo, sv::AbsIntState) @@ -1361,7 +1324,7 @@ function const_prop_call(interp::AbstractInterpreter, return nothing end @assert inf_result.linfo === mi "MethodInstance for cached inference result does not match" - return return_cached_result(interp, inf_result, sv) + return return_localcache_result(interp, inf_result, sv) end overridden_by_const = falses(length(argtypes)) for i = 1:length(argtypes) @@ -1375,7 +1338,7 @@ function const_prop_call(interp::AbstractInterpreter, end # perform fresh constant prop' inf_result = InferenceResult(mi, argtypes, overridden_by_const) - frame = InferenceState(inf_result, #=cache_mode=#:local, interp) + frame = InferenceState(inf_result, #=cache_mode=#:local, interp) # TODO: this should also be converted to a stackless Future if frame === nothing add_remark!(interp, sv, "[constprop] Could not retrieve the source") return nothing # this is probably a bad generated function (unsound), but just ignore it @@ -1517,9 +1480,9 @@ function precise_container_type(interp::AbstractInterpreter, @nospecialize(itft) widet = typ.typ if isa(widet, DataType) if widet.name === Tuple.name - return AbstractIterationResult(typ.fields, nothing) + return Future(AbstractIterationResult(typ.fields, nothing)) elseif widet.name === _NAMEDTUPLE_NAME - return AbstractIterationResult(typ.fields, nothing) + return Future(AbstractIterationResult(typ.fields, nothing)) end end end @@ -1527,7 +1490,7 @@ function precise_container_type(interp::AbstractInterpreter, @nospecialize(itft) if isa(typ, Const) val = typ.val if isa(val, SimpleVector) || isa(val, Tuple) || isa(val, NamedTuple) - return AbstractIterationResult(Any[ Const(val[i]) for i in 1:length(val) ], nothing) # avoid making a tuple Generator here! + return Future(AbstractIterationResult(Any[ Const(val[i]) for i in 1:length(val) ], nothing)) # avoid making a tuple Generator here! end end @@ -1544,18 +1507,18 @@ function precise_container_type(interp::AbstractInterpreter, @nospecialize(itft) # refine the Union to remove elements that are not valid tags for objects filter!(@nospecialize(x) -> valid_as_lattice(x, true), utis) if length(utis) == 0 - return AbstractIterationResult(Any[], nothing) # oops, this statement was actually unreachable + return Future(AbstractIterationResult(Any[], nothing)) # oops, this statement was actually unreachable elseif length(utis) == 1 tti = utis[1] tti0 = rewrap_unionall(tti, tti0) else if any(@nospecialize(t) -> !isa(t, DataType) || !(t <: Tuple) || !isknownlength(t), utis) - return AbstractIterationResult(Any[Vararg{Any}], nothing, Effects()) + return Future(AbstractIterationResult(Any[Vararg{Any}], nothing, Effects())) end ltp = length((utis[1]::DataType).parameters) for t in utis if length((t::DataType).parameters) != ltp - return AbstractIterationResult(Any[Vararg{Any}], nothing) + return Future(AbstractIterationResult(Any[Vararg{Any}], nothing)) end end result = Any[ Union{} for _ in 1:ltp ] @@ -1566,14 +1529,14 @@ function precise_container_type(interp::AbstractInterpreter, @nospecialize(itft) result[j] = tmerge(result[j], rewrap_unionall(tps[j], tti0)) end end - return AbstractIterationResult(result, nothing) + return Future(AbstractIterationResult(result, nothing)) end end if tti0 <: Tuple if isa(tti0, DataType) - return AbstractIterationResult(Any[ p for p in tti0.parameters ], nothing) + return Future(AbstractIterationResult(Any[ p for p in tti0.parameters ], nothing)) elseif !isa(tti, DataType) - return AbstractIterationResult(Any[Vararg{Any}], nothing) + return Future(AbstractIterationResult(Any[Vararg{Any}], nothing)) else len = length(tti.parameters) last = tti.parameters[len] @@ -1586,17 +1549,17 @@ function precise_container_type(interp::AbstractInterpreter, @nospecialize(itft) elts[len] = Vararg{elts[len]} end end - return AbstractIterationResult(elts, nothing) + return Future(AbstractIterationResult(elts, nothing)) end elseif tti0 === SimpleVector - return AbstractIterationResult(Any[Vararg{Any}], nothing) + return Future(AbstractIterationResult(Any[Vararg{Any}], nothing)) elseif tti0 === Any - return AbstractIterationResult(Any[Vararg{Any}], nothing, Effects()) + return Future(AbstractIterationResult(Any[Vararg{Any}], nothing, Effects())) elseif tti0 <: Array || tti0 <: GenericMemory if eltype(tti0) === Union{} - return AbstractIterationResult(Any[], nothing) + return Future(AbstractIterationResult(Any[], nothing)) end - return AbstractIterationResult(Any[Vararg{eltype(tti0)}], nothing) + return Future(AbstractIterationResult(Any[Vararg{eltype(tti0)}], nothing)) else return abstract_iteration(interp, itft, typ, sv) end @@ -1607,95 +1570,144 @@ function abstract_iteration(interp::AbstractInterpreter, @nospecialize(itft), @n if isa(itft, Const) iteratef = itft.val else - return AbstractIterationResult(Any[Vararg{Any}], nothing, Effects()) + return Future(AbstractIterationResult(Any[Vararg{Any}], nothing, Effects())) end @assert !isvarargtype(itertype) - call = abstract_call_known(interp, iteratef, ArgInfo(nothing, Any[itft, itertype]), StmtInfo(true), sv) - stateordonet = call.rt - info = call.info - # Return Bottom if this is not an iterator. - # WARNING: Changes to the iteration protocol must be reflected here, - # this is not just an optimization. - # TODO: this doesn't realize that Array, GenericMemory, SimpleVector, Tuple, and NamedTuple do not use the iterate protocol - stateordonet === Bottom && return AbstractIterationResult(Any[Bottom], AbstractIterationInfo(CallMeta[CallMeta(Bottom, Any, call.effects, info)], true)) - valtype = statetype = Bottom - ret = Any[] - calls = CallMeta[call] - stateordonet_widened = widenconst(stateordonet) - 𝕃ᵢ = typeinf_lattice(interp) - # Try to unroll the iteration up to max_tuple_splat, which covers any finite - # length iterators, or interesting prefix - while true - if stateordonet_widened === Nothing - return AbstractIterationResult(ret, AbstractIterationInfo(calls, true)) - end - if Nothing <: stateordonet_widened || length(ret) >= InferenceParams(interp).max_tuple_splat - break - end - if !isa(stateordonet_widened, DataType) || !(stateordonet_widened <: Tuple) || isvatuple(stateordonet_widened) || length(stateordonet_widened.parameters) != 2 - break - end - nstatetype = getfield_tfunc(𝕃ᵢ, stateordonet, Const(2)) - # If there's no new information in this statetype, don't bother continuing, - # the iterator won't be finite. - if ⊑(𝕃ᵢ, nstatetype, statetype) - return AbstractIterationResult(Any[Bottom], AbstractIterationInfo(calls, false), EFFECTS_THROWS) - end - valtype = getfield_tfunc(𝕃ᵢ, stateordonet, Const(1)) - push!(ret, valtype) - statetype = nstatetype - call = abstract_call_known(interp, iteratef, ArgInfo(nothing, Any[Const(iteratef), itertype, statetype]), StmtInfo(true), sv) - stateordonet = call.rt + iterateresult = Future{AbstractIterationResult}() + call1future = abstract_call_known(interp, iteratef, ArgInfo(nothing, Any[itft, itertype]), StmtInfo(true), sv)::Future + function inferiterate(interp, sv) + call1 = call1future[] + stateordonet = call1.rt + # Return Bottom if this is not an iterator. + # WARNING: Changes to the iteration protocol must be reflected here, + # this is not just an optimization. + # TODO: this doesn't realize that Array, GenericMemory, SimpleVector, Tuple, and NamedTuple do not use the iterate protocol + if stateordonet === Bottom + iterateresult[] = AbstractIterationResult(Any[Bottom], AbstractIterationInfo(CallMeta[CallMeta(Bottom, Any, call1.effects, call1.info)], true)) + return true + end stateordonet_widened = widenconst(stateordonet) - push!(calls, call) - end - # From here on, we start asking for results on the widened types, rather than - # the precise (potentially const) state type - # statetype and valtype are reinitialized in the first iteration below from the - # (widened) stateordonet, which has not yet been fully analyzed in the loop above - valtype = statetype = Bottom - may_have_terminated = Nothing <: stateordonet_widened - while valtype !== Any - nounion = typeintersect(stateordonet_widened, Tuple{Any,Any}) - if nounion !== Union{} && !isa(nounion, DataType) - # nounion is of a type we cannot handle - valtype = Any - break - end - if nounion === Union{} || (nounion.parameters[1] <: valtype && nounion.parameters[2] <: statetype) - # reached a fixpoint or iterator failed/gave invalid answer - if !hasintersect(stateordonet_widened, Nothing) - # ... but cannot terminate - if !may_have_terminated - # ... and cannot have terminated prior to this loop - return AbstractIterationResult(Any[Bottom], AbstractIterationInfo(calls, false), Effects()) - else - # iterator may have terminated prior to this loop, but not during it - valtype = Bottom + calls = CallMeta[call1] + valtype = statetype = Bottom + ret = Any[] + 𝕃ᵢ = typeinf_lattice(interp) + may_have_terminated = false + local call2future::Future{CallMeta} + + nextstate::UInt8 = 0x0 + function inferiterate_2arg(interp, sv) + if nextstate === 0x1 + nextstate = 0xff + @goto state1 + elseif nextstate === 0x2 + nextstate = 0xff + @goto state2 + else + @assert nextstate === 0x0 + nextstate = 0xff + end + + # Try to unroll the iteration up to max_tuple_splat, which covers any finite + # length iterators, or interesting prefix + while true + if stateordonet_widened === Nothing + iterateresult[] = AbstractIterationResult(ret, AbstractIterationInfo(calls, true)) + return true + end + if Nothing <: stateordonet_widened || length(ret) >= InferenceParams(interp).max_tuple_splat + break + end + if !isa(stateordonet_widened, DataType) || !(stateordonet_widened <: Tuple) || isvatuple(stateordonet_widened) || length(stateordonet_widened.parameters) != 2 + break + end + nstatetype = getfield_tfunc(𝕃ᵢ, stateordonet, Const(2)) + # If there's no new information in this statetype, don't bother continuing, + # the iterator won't be finite. + if ⊑(𝕃ᵢ, nstatetype, statetype) + iterateresult[] = AbstractIterationResult(Any[Bottom], AbstractIterationInfo(calls, false), EFFECTS_THROWS) + return true + end + valtype = getfield_tfunc(𝕃ᵢ, stateordonet, Const(1)) + push!(ret, valtype) + statetype = nstatetype + call2future = abstract_call_known(interp, iteratef, ArgInfo(nothing, Any[Const(iteratef), itertype, statetype]), StmtInfo(true), sv)::Future + if !isready(call2future) + nextstate = 0x1 + return false + @label state1 + end + let call = call2future[] + push!(calls, call) + stateordonet = call.rt + stateordonet_widened = widenconst(stateordonet) end end - break - end - valtype = tmerge(valtype, nounion.parameters[1]) - statetype = tmerge(statetype, nounion.parameters[2]) - call = abstract_call_known(interp, iteratef, ArgInfo(nothing, Any[Const(iteratef), itertype, statetype]), StmtInfo(true), sv) - push!(calls, call) - stateordonet = call.rt - stateordonet_widened = widenconst(stateordonet) - end - if valtype !== Union{} - push!(ret, Vararg{valtype}) + # From here on, we start asking for results on the widened types, rather than + # the precise (potentially const) state type + # statetype and valtype are reinitialized in the first iteration below from the + # (widened) stateordonet, which has not yet been fully analyzed in the loop above + valtype = statetype = Bottom + may_have_terminated = Nothing <: stateordonet_widened + while valtype !== Any + nounion = typeintersect(stateordonet_widened, Tuple{Any,Any}) + if nounion !== Union{} && !isa(nounion, DataType) + # nounion is of a type we cannot handle + valtype = Any + break + end + if nounion === Union{} || (nounion.parameters[1] <: valtype && nounion.parameters[2] <: statetype) + # reached a fixpoint or iterator failed/gave invalid answer + if !hasintersect(stateordonet_widened, Nothing) + # ... but cannot terminate + if may_have_terminated + # ... and iterator may have terminated prior to this loop, but not during it + valtype = Bottom + else + # ... or cannot have terminated prior to this loop + iterateresult[] = AbstractIterationResult(Any[Bottom], AbstractIterationInfo(calls, false), Effects()) + return true + end + end + break + end + valtype = tmerge(valtype, nounion.parameters[1]) + statetype = tmerge(statetype, nounion.parameters[2]) + call2future = abstract_call_known(interp, iteratef, ArgInfo(nothing, Any[Const(iteratef), itertype, statetype]), StmtInfo(true), sv)::Future + if !isready(call2future) + nextstate = 0x2 + return false + @label state2 + end + let call = call2future[] + push!(calls, call) + stateordonet = call.rt + stateordonet_widened = widenconst(stateordonet) + end + end + if valtype !== Union{} + push!(ret, Vararg{valtype}) + end + iterateresult[] = AbstractIterationResult(ret, AbstractIterationInfo(calls, false)) + return true + end # inferiterate_2arg + # continue making progress as much as possible, on iterate(arg, state) + inferiterate_2arg(interp, sv) || push!(sv.tasks, inferiterate_2arg) + return true + end # inferiterate + # continue making progress as soon as possible, on iterate(arg) + if !(isready(call1future) && inferiterate(interp, sv)) + push!(sv.tasks, inferiterate) end - return AbstractIterationResult(ret, AbstractIterationInfo(calls, false)) + return iterateresult end # do apply(af, fargs...), where af is a function value function abstract_apply(interp::AbstractInterpreter, argtypes::Vector{Any}, si::StmtInfo, sv::AbsIntState, max_methods::Int=get_max_methods(interp, sv)) - itft = argtype_by_index(argtypes, 2) + itft = Core.Box(argtype_by_index(argtypes, 2)) aft = argtype_by_index(argtypes, 3) - (itft === Bottom || aft === Bottom) && return CallMeta(Bottom, Any, EFFECTS_THROWS, NoCallInfo()) + (itft.contents === Bottom || aft === Bottom) && return Future(CallMeta(Bottom, Any, EFFECTS_THROWS, NoCallInfo())) aargtypes = argtype_tail(argtypes, 4) aftw = widenconst(aft) if !isa(aft, Const) && !isa(aft, PartialOpaque) && (!isType(aftw) || has_free_typevars(aftw)) @@ -1703,100 +1715,155 @@ function abstract_apply(interp::AbstractInterpreter, argtypes::Vector{Any}, si:: add_remark!(interp, sv, "Core._apply_iterate called on a function of a non-concrete type") # bail now, since it seems unlikely that abstract_call will be able to do any better after splitting # this also ensures we don't call abstract_call_gf_by_type below on an IntrinsicFunction or Builtin - return CallMeta(Any, Any, Effects(), NoCallInfo()) + return Future(CallMeta(Any, Any, Effects(), NoCallInfo())) end end res = Union{} - nargs = length(aargtypes) splitunions = 1 < unionsplitcost(typeinf_lattice(interp), aargtypes) <= InferenceParams(interp).max_apply_union_enum - ctypes = [Any[aft]] - infos = Vector{MaybeAbstractIterationInfo}[MaybeAbstractIterationInfo[]] - effects = EFFECTS_TOTAL - for i = 1:nargs - ctypes´ = Vector{Any}[] - infos′ = Vector{MaybeAbstractIterationInfo}[] - for ti in (splitunions ? uniontypes(aargtypes[i]) : Any[aargtypes[i]]) - if !isvarargtype(ti) - (;cti, info, ai_effects) = precise_container_type(interp, itft, ti, sv) - else - (;cti, info, ai_effects) = precise_container_type(interp, itft, unwrapva(ti), sv) - # We can't represent a repeating sequence of the same types, - # so tmerge everything together to get one type that represents - # everything. - argt = cti[end] - if isvarargtype(argt) - argt = unwrapva(argt) + ctypes::Vector{Vector{Any}} = [Any[aft]] + infos::Vector{Vector{MaybeAbstractIterationInfo}} = Vector{MaybeAbstractIterationInfo}[MaybeAbstractIterationInfo[]] + all_effects::Effects = EFFECTS_TOTAL + retinfos = ApplyCallInfo[] + retinfo = UnionSplitApplyCallInfo(retinfos) + exctype = Union{} + ctypes´ = Vector{Any}[] + infos´ = Vector{MaybeAbstractIterationInfo}[] + local ti, argtypesi + local ctfuture::Future{AbstractIterationResult} + local callfuture::Future{CallMeta} + + applyresult = Future{CallMeta}() + # split the rest into a resumable state machine + i::Int = 1 + j::Int = 1 + nextstate::UInt8 = 0x0 + function infercalls(interp, sv) + # n.b. Remember that variables will lose their values across restarts, + # so be sure to manually hoist any values that must be preserved and do + # not rely on program order. + # This is a little more complex than the closure continuations often used elsewhere, but avoids needing to manage all of that indentation + if nextstate === 0x1 + nextstate = 0xff + @goto state1 + elseif nextstate === 0x2 + nextstate = 0xff + @goto state2 + elseif nextstate === 0x3 + nextstate = 0xff + @goto state3 + else + @assert nextstate === 0x0 + nextstate = 0xff + end + while i <= length(aargtypes) + argtypesi = (splitunions ? uniontypes(aargtypes[i]) : Any[aargtypes[i]]) + i += 1 + j = 1 + while j <= length(argtypesi) + ti = argtypesi[j] + j += 1 + if !isvarargtype(ti) + ctfuture = precise_container_type(interp, itft.contents, ti, sv)::Future + if !isready(ctfuture) + nextstate = 0x1 + return false + @label state1 + end + (;cti, info, ai_effects) = ctfuture[] + else + ctfuture = precise_container_type(interp, itft.contents, unwrapva(ti), sv)::Future + if !isready(ctfuture) + nextstate = 0x2 + return false + @label state2 + end + (;cti, info, ai_effects) = ctfuture[] + # We can't represent a repeating sequence of the same types, + # so tmerge everything together to get one type that represents + # everything. + argt = cti[end] + if isvarargtype(argt) + argt = unwrapva(argt) + end + for k in 1:(length(cti)-1) + argt = tmerge(argt, cti[k]) + end + cti = Any[Vararg{argt}] end - for i in 1:(length(cti)-1) - argt = tmerge(argt, cti[i]) + all_effects = merge_effects(all_effects, ai_effects) + if info !== nothing + for call in info.each + all_effects = merge_effects(all_effects, call.effects) + end + end + if any(@nospecialize(t) -> t === Bottom, cti) + continue + end + for k = 1:length(ctypes) + ct = ctypes[k] + if isvarargtype(ct[end]) + # This is vararg, we're not gonna be able to do any inlining, + # drop the info + info = nothing + tail = tuple_tail_elem(typeinf_lattice(interp), unwrapva(ct[end]), cti) + push!(ctypes´, push!(ct[1:(end - 1)], tail)) + else + push!(ctypes´, append!(ct[:], cti)) + end + push!(infos´, push!(copy(infos[k]), info)) end - cti = Any[Vararg{argt}] end - effects = merge_effects(effects, ai_effects) - if info !== nothing - for call in info.each - effects = merge_effects(effects, call.effects) + # swap for the new array and empty the temporary one + ctypes´, ctypes = ctypes, ctypes´ + infos´, infos = infos, infos´ + empty!(ctypes´) + empty!(infos´) + end + all_effects.nothrow || (exctype = Any) + + i = 1 + while i <= length(ctypes) + ct = ctypes[i] + lct = length(ct) + # truncate argument list at the first Vararg + for k = 1:lct-1 + cti = ct[k] + if isvarargtype(cti) + ct[k] = tuple_tail_elem(typeinf_lattice(interp), unwrapva(cti), ct[(k+1):lct]) + resize!(ct, k) + break end end - if any(@nospecialize(t) -> t === Bottom, cti) - continue + callfuture = abstract_call(interp, ArgInfo(nothing, ct), si, sv, max_methods)::Future + if !isready(callfuture) + nextstate = 0x3 + return false + @label state3 end - for j = 1:length(ctypes) - ct = ctypes[j]::Vector{Any} - if isvarargtype(ct[end]) - # This is vararg, we're not gonna be able to do any inlining, - # drop the info - info = nothing - tail = tuple_tail_elem(typeinf_lattice(interp), unwrapva(ct[end]), cti) - push!(ctypes´, push!(ct[1:(end - 1)], tail)) - else - push!(ctypes´, append!(ct[:], cti)) + let (; info, rt, exct, effects) = callfuture[] + push!(retinfos, ApplyCallInfo(info, infos[i])) + res = tmerge(typeinf_lattice(interp), res, rt) + exctype = tmerge(typeinf_lattice(interp), exctype, exct) + all_effects = merge_effects(all_effects, effects) + if i < length(ctypes) && bail_out_apply(interp, InferenceLoopState(ctypes[i], res, all_effects), sv) + add_remark!(interp, sv, "_apply_iterate inference reached maximally imprecise information. Bailing on.") + # there is unanalyzed candidate, widen type and effects to the top + let retinfo = NoCallInfo() # NOTE this is necessary to prevent the inlining processing + applyresult[] = CallMeta(Any, Any, Effects(), retinfo) + return true + end end - push!(infos′, push!(copy(infos[j]), info)) end + i += 1 end - ctypes = ctypes´ - infos = infos′ - end - retinfos = ApplyCallInfo[] - retinfo = UnionSplitApplyCallInfo(retinfos) - napplicable = length(ctypes) - seen = 0 - exct = effects.nothrow ? Union{} : Any - for i = 1:napplicable - ct = ctypes[i] - arginfo = infos[i] - lct = length(ct) - # truncate argument list at the first Vararg - for i = 1:lct-1 - cti = ct[i] - if isvarargtype(cti) - ct[i] = tuple_tail_elem(typeinf_lattice(interp), unwrapva(cti), ct[(i+1):lct]) - resize!(ct, i) - break - end - end - call = abstract_call(interp, ArgInfo(nothing, ct), si, sv, max_methods) - seen += 1 - push!(retinfos, ApplyCallInfo(call.info, arginfo)) - res = tmerge(typeinf_lattice(interp), res, call.rt) - exct = tmerge(typeinf_lattice(interp), exct, call.exct) - effects = merge_effects(effects, call.effects) - if bail_out_apply(interp, InferenceLoopState(ct, res, effects), sv) - add_remark!(interp, sv, "_apply_iterate inference reached maximally imprecise information. Bailing on.") - break - end - end - if seen ≠ napplicable - # there is unanalyzed candidate, widen type and effects to the top - res = Any - exct = Any - effects = Effects() - retinfo = NoCallInfo() # NOTE this is necessary to prevent the inlining processing + # TODO: Add a special info type to capture all the iteration info. + # For now, only propagate info if we don't also union-split the iteration + applyresult[] = CallMeta(res, exctype, all_effects, retinfo) + return true end - # TODO: Add a special info type to capture all the iteration info. - # For now, only propagate info if we don't also union-split the iteration - return CallMeta(res, exct, effects, retinfo) + # start making progress on the first call + infercalls(interp, sv) || push!(sv.tasks, infercalls) + return applyresult end function argtype_by_index(argtypes::Vector{Any}, i::Int) @@ -2135,66 +2202,69 @@ function abstract_invoke(interp::AbstractInterpreter, arginfo::ArgInfo, si::Stmt argtypes = arginfo.argtypes ft′ = argtype_by_index(argtypes, 2) ft = widenconst(ft′) - ft === Bottom && return CallMeta(Bottom, Any, EFFECTS_THROWS, NoCallInfo()) + ft === Bottom && return Future(CallMeta(Bottom, Any, EFFECTS_THROWS, NoCallInfo())) (types, isexact, isconcrete, istype) = instanceof_tfunc(argtype_by_index(argtypes, 3), false) - isexact || return CallMeta(Any, Any, Effects(), NoCallInfo()) + isexact || return Future(CallMeta(Any, Any, Effects(), NoCallInfo())) unwrapped = unwrap_unionall(types) - types === Bottom && return CallMeta(Bottom, Any, EFFECTS_THROWS, NoCallInfo()) + types === Bottom && return Future(CallMeta(Bottom, Any, EFFECTS_THROWS, NoCallInfo())) if !(unwrapped isa DataType && unwrapped.name === Tuple.name) - return CallMeta(Bottom, TypeError, EFFECTS_THROWS, NoCallInfo()) + return Future(CallMeta(Bottom, TypeError, EFFECTS_THROWS, NoCallInfo())) end argtype = argtypes_to_type(argtype_tail(argtypes, 4)) nargtype = typeintersect(types, argtype) - nargtype === Bottom && return CallMeta(Bottom, TypeError, EFFECTS_THROWS, NoCallInfo()) - nargtype isa DataType || return CallMeta(Any, Any, Effects(), NoCallInfo()) # other cases are not implemented below - isdispatchelem(ft) || return CallMeta(Any, Any, Effects(), NoCallInfo()) # check that we might not have a subtype of `ft` at runtime, before doing supertype lookup below + nargtype === Bottom && return Future(CallMeta(Bottom, TypeError, EFFECTS_THROWS, NoCallInfo())) + nargtype isa DataType || return Future(CallMeta(Any, Any, Effects(), NoCallInfo())) # other cases are not implemented below + isdispatchelem(ft) || return Future(CallMeta(Any, Any, Effects(), NoCallInfo())) # check that we might not have a subtype of `ft` at runtime, before doing supertype lookup below ft = ft::DataType lookupsig = rewrap_unionall(Tuple{ft, unwrapped.parameters...}, types)::Type nargtype = Tuple{ft, nargtype.parameters...} argtype = Tuple{ft, argtype.parameters...} match, valid_worlds = findsup(lookupsig, method_table(interp)) - match === nothing && return CallMeta(Any, Any, Effects(), NoCallInfo()) + match === nothing && return Future(CallMeta(Any, Any, Effects(), NoCallInfo())) update_valid_age!(sv, valid_worlds) method = match.method tienv = ccall(:jl_type_intersection_with_env, Any, (Any, Any), nargtype, method.sig)::SimpleVector - ti = tienv[1]; env = tienv[2]::SimpleVector - result = abstract_call_method(interp, method, ti, env, false, si, sv) - (; rt, exct, edge, effects, volatile_inf_result) = result + ti = tienv[1] + env = tienv[2]::SimpleVector + mresult = abstract_call_method(interp, method, ti, env, false, si, sv)::Future match = MethodMatch(ti, env, method, argtype <: method.sig) - res = nothing - sig = match.spec_types - argtypes′ = invoke_rewrite(argtypes) - fargs = arginfo.fargs - fargs′ = fargs === nothing ? nothing : invoke_rewrite(fargs) - arginfo = ArgInfo(fargs′, argtypes′) - # # typeintersect might have narrowed signature, but the accuracy gain doesn't seem worth the cost involved with the lattice comparisons - # for i in 1:length(argtypes′) - # t, a = ti.parameters[i], argtypes′[i] - # argtypes′[i] = t ⊑ a ? t : a - # end - 𝕃ₚ = ipo_lattice(interp) - ⊑, ⋤, ⊔ = partialorder(𝕃ₚ), strictneqpartialorder(𝕃ₚ), join(𝕃ₚ) - f = singleton_type(ft′) - invokecall = InvokeCall(types, lookupsig) - const_call_result = abstract_call_method_with_const_args(interp, - result, f, arginfo, si, match, sv, invokecall) - const_result = volatile_inf_result - if const_call_result !== nothing - if const_call_result.rt ⊑ rt - (; rt, effects, const_result, edge) = const_call_result + return Future{CallMeta}(mresult, interp, sv) do result, interp, sv + (; rt, exct, edge, effects, volatile_inf_result) = result + res = nothing + sig = match.spec_types + argtypes′ = invoke_rewrite(argtypes) + fargs = arginfo.fargs + fargs′ = fargs === nothing ? nothing : invoke_rewrite(fargs) + arginfo = ArgInfo(fargs′, argtypes′) + # # typeintersect might have narrowed signature, but the accuracy gain doesn't seem worth the cost involved with the lattice comparisons + # for i in 1:length(argtypes′) + # t, a = ti.parameters[i], argtypes′[i] + # argtypes′[i] = t ⊑ a ? t : a + # end + 𝕃ₚ = ipo_lattice(interp) + ⊑, ⋤, ⊔ = partialorder(𝕃ₚ), strictneqpartialorder(𝕃ₚ), join(𝕃ₚ) + f = singleton_type(ft′) + invokecall = InvokeCall(types, lookupsig) + const_call_result = abstract_call_method_with_const_args(interp, + result, f, arginfo, si, match, sv, invokecall) + const_result = volatile_inf_result + if const_call_result !== nothing + if const_call_result.rt ⊑ rt + (; rt, effects, const_result, edge) = const_call_result + end + if const_call_result.exct ⋤ exct + (; exct, const_result, edge) = const_call_result + end end - if const_call_result.exct ⋤ exct - (; exct, const_result, edge) = const_call_result + rt = from_interprocedural!(interp, rt, sv, arginfo, sig) + info = InvokeCallInfo(match, const_result) + edge !== nothing && add_invoke_backedge!(sv, lookupsig, edge) + if !match.fully_covers + effects = Effects(effects; nothrow=false) + exct = exct ⊔ TypeError end + return CallMeta(rt, exct, effects, info) end - rt = from_interprocedural!(interp, rt, sv, arginfo, sig) - info = InvokeCallInfo(match, const_result) - edge !== nothing && add_invoke_backedge!(sv, lookupsig, edge) - if !match.fully_covers - effects = Effects(effects; nothrow=false) - exct = exct ⊔ TypeError - end - return CallMeta(rt, exct, effects, info) end function invoke_rewrite(xs::Vector{Any}) @@ -2207,10 +2277,12 @@ end function abstract_finalizer(interp::AbstractInterpreter, argtypes::Vector{Any}, sv::AbsIntState) if length(argtypes) == 3 finalizer_argvec = Any[argtypes[2], argtypes[3]] - call = abstract_call(interp, ArgInfo(nothing, finalizer_argvec), StmtInfo(false), sv, #=max_methods=#1) - return CallMeta(Nothing, Any, Effects(), FinalizerInfo(call.info, call.effects)) + call = abstract_call(interp, ArgInfo(nothing, finalizer_argvec), StmtInfo(false), sv, #=max_methods=#1)::Future + return Future{CallMeta}(call, interp, sv) do call, interp, sv + return CallMeta(Nothing, Any, Effects(), FinalizerInfo(call.info, call.effects)) + end end - return CallMeta(Nothing, Any, Effects(), NoCallInfo()) + return Future(CallMeta(Nothing, Any, Effects(), NoCallInfo())) end function abstract_throw(interp::AbstractInterpreter, argtypes::Vector{Any}, ::AbsIntState) @@ -2228,7 +2300,7 @@ function abstract_throw(interp::AbstractInterpreter, argtypes::Vector{Any}, ::Ab else exct = ArgumentError end - return CallMeta(Union{}, exct, EFFECTS_THROWS, NoCallInfo()) + return Future(CallMeta(Union{}, exct, EFFECTS_THROWS, NoCallInfo())) end function abstract_throw_methoderror(interp::AbstractInterpreter, argtypes::Vector{Any}, ::AbsIntState) @@ -2240,7 +2312,7 @@ function abstract_throw_methoderror(interp::AbstractInterpreter, argtypes::Vecto ⊔ = join(typeinf_lattice(interp)) MethodError ⊔ ArgumentError end - return CallMeta(Union{}, exct, EFFECTS_THROWS, NoCallInfo()) + return Future(CallMeta(Union{}, exct, EFFECTS_THROWS, NoCallInfo())) end # call where the function is known exactly @@ -2285,60 +2357,70 @@ function abstract_call_known(interp::AbstractInterpreter, @nospecialize(f), end end end - return CallMeta(rt, exct, effects, NoCallInfo(), refinements) + return Future(CallMeta(rt, exct, effects, NoCallInfo(), refinements)) elseif isa(f, Core.OpaqueClosure) # calling an OpaqueClosure about which we have no information returns no information - return CallMeta(typeof(f).parameters[2], Any, Effects(), NoCallInfo()) + return Future(CallMeta(typeof(f).parameters[2], Any, Effects(), NoCallInfo())) elseif f === TypeVar && !isvarargtype(argtypes[end]) # Manually look through the definition of TypeVar to # make sure to be able to get `PartialTypeVar`s out. - 2 ≤ la ≤ 4 || return CallMeta(Bottom, Any, EFFECTS_THROWS, NoCallInfo()) - n = argtypes[2] - ub_var = Const(Any) - lb_var = Const(Union{}) - if la == 4 - ub_var = argtypes[4] - lb_var = argtypes[3] - elseif la == 3 - ub_var = argtypes[3] - end + 2 ≤ la ≤ 4 || return Future(CallMeta(Bottom, Any, EFFECTS_THROWS, NoCallInfo())) # make sure generic code is prepared for inlining if needed later - call = let T = Any[Type{TypeVar}, Any, Any, Any] + let T = Any[Type{TypeVar}, Any, Any, Any] resize!(T, la) atype = Tuple{T...} T[1] = Const(TypeVar) - abstract_call_gf_by_type(interp, f, ArgInfo(nothing, T), si, atype, sv, max_methods) - end - pT = typevar_tfunc(𝕃ᵢ, n, lb_var, ub_var) - typevar_argtypes = Any[n, lb_var, ub_var] - effects = builtin_effects(𝕃ᵢ, Core._typevar, typevar_argtypes, pT) - if effects.nothrow - exct = Union{} - else - exct = builtin_exct(𝕃ᵢ, Core._typevar, typevar_argtypes, pT) + let call = abstract_call_gf_by_type(interp, f, ArgInfo(nothing, T), si, atype, sv, max_methods)::Future + return Future{CallMeta}(call, interp, sv) do call, interp, sv + n = argtypes[2] + ub_var = Const(Any) + lb_var = Const(Union{}) + if la == 4 + ub_var = argtypes[4] + lb_var = argtypes[3] + elseif la == 3 + ub_var = argtypes[3] + end + pT = typevar_tfunc(𝕃ᵢ, n, lb_var, ub_var) + typevar_argtypes = Any[n, lb_var, ub_var] + effects = builtin_effects(𝕃ᵢ, Core._typevar, typevar_argtypes, pT) + if effects.nothrow + exct = Union{} + else + exct = builtin_exct(𝕃ᵢ, Core._typevar, typevar_argtypes, pT) + end + return CallMeta(pT, exct, effects, call.info) + end + end end - return CallMeta(pT, exct, effects, call.info) elseif f === UnionAll - call = abstract_call_gf_by_type(interp, f, ArgInfo(nothing, Any[Const(UnionAll), Any, Any]), si, Tuple{Type{UnionAll}, Any, Any}, sv, max_methods) - return abstract_call_unionall(interp, argtypes, call) + let call = abstract_call_gf_by_type(interp, f, ArgInfo(nothing, Any[Const(UnionAll), Any, Any]), si, Tuple{Type{UnionAll}, Any, Any}, sv, max_methods)::Future + return Future{CallMeta}(call, interp, sv) do call, interp, sv + return abstract_call_unionall(interp, argtypes, call) + end + end elseif f === Tuple && la == 2 aty = argtypes[2] ty = isvarargtype(aty) ? unwrapva(aty) : widenconst(aty) if !isconcretetype(ty) - return CallMeta(Tuple, Any, EFFECTS_UNKNOWN, NoCallInfo()) + return Future(CallMeta(Tuple, Any, EFFECTS_UNKNOWN, NoCallInfo())) end elseif is_return_type(f) return return_type_tfunc(interp, argtypes, si, sv) elseif la == 3 && f === Core.:(!==) # mark !== as exactly a negated call to === - call = abstract_call_gf_by_type(interp, f, ArgInfo(fargs, Any[Const(f), Any, Any]), si, Tuple{typeof(f), Any, Any}, sv, max_methods) - rty = abstract_call_known(interp, (===), arginfo, si, sv, max_methods).rt - if isa(rty, Conditional) - return CallMeta(Conditional(rty.slot, rty.elsetype, rty.thentype), Bottom, EFFECTS_TOTAL, NoCallInfo()) # swap if-else - elseif isa(rty, Const) - return CallMeta(Const(rty.val === false), Bottom, EFFECTS_TOTAL, MethodResultPure()) - end - return call + let callfuture = abstract_call_gf_by_type(interp, f, ArgInfo(fargs, Any[Const(f), Any, Any]), si, Tuple{typeof(f), Any, Any}, sv, max_methods)::Future, + rtfuture = abstract_call_known(interp, (===), arginfo, si, sv, max_methods)::Future + return Future{CallMeta}(isready(callfuture) && isready(rtfuture), interp, sv) do interp, sv + local rty = rtfuture[].rt + if isa(rty, Conditional) + return CallMeta(Conditional(rty.slot, rty.elsetype, rty.thentype), Bottom, EFFECTS_TOTAL, NoCallInfo()) # swap if-else + elseif isa(rty, Const) + return CallMeta(Const(rty.val === false), Bottom, EFFECTS_TOTAL, MethodResultPure()) + end + return callfuture[] + end + end elseif la == 3 && f === Core.:(>:) # mark issupertype as a exact alias for issubtype # swap T1 and T2 arguments and call <: @@ -2350,12 +2432,12 @@ function abstract_call_known(interp::AbstractInterpreter, @nospecialize(f), argtypes = Any[typeof(<:), argtypes[3], argtypes[2]] return abstract_call_known(interp, <:, ArgInfo(fargs, argtypes), si, sv, max_methods) elseif la == 2 && f === Core.typename - return CallMeta(typename_static(argtypes[2]), Bottom, EFFECTS_TOTAL, MethodResultPure()) + return Future(CallMeta(typename_static(argtypes[2]), Bottom, EFFECTS_TOTAL, MethodResultPure())) elseif f === Core._hasmethod - return _hasmethod_tfunc(interp, argtypes, sv) + return Future(_hasmethod_tfunc(interp, argtypes, sv)) end atype = argtypes_to_type(argtypes) - return abstract_call_gf_by_type(interp, f, arginfo, si, atype, sv, max_methods) + return abstract_call_gf_by_type(interp, f, arginfo, si, atype, sv, max_methods)::Future end function abstract_call_opaque_closure(interp::AbstractInterpreter, @@ -2364,40 +2446,44 @@ function abstract_call_opaque_closure(interp::AbstractInterpreter, tt = closure.typ ocargsig = rewrap_unionall((unwrap_unionall(tt)::DataType).parameters[1], tt) ocargsig′ = unwrap_unionall(ocargsig) - ocargsig′ isa DataType || return CallMeta(Any, Any, Effects(), NoCallInfo()) + ocargsig′ isa DataType || return Future(CallMeta(Any, Any, Effects(), NoCallInfo())) ocsig = rewrap_unionall(Tuple{Tuple, ocargsig′.parameters...}, ocargsig) - hasintersect(sig, ocsig) || return CallMeta(Union{}, Union{MethodError,TypeError}, EFFECTS_THROWS, NoCallInfo()) + hasintersect(sig, ocsig) || return Future(CallMeta(Union{}, Union{MethodError,TypeError}, EFFECTS_THROWS, NoCallInfo())) ocmethod = closure.source::Method - result = abstract_call_method(interp, ocmethod, sig, Core.svec(), false, si, sv) - (; rt, exct, edge, effects, volatile_inf_result) = result match = MethodMatch(sig, Core.svec(), ocmethod, sig <: ocsig) - 𝕃ₚ = ipo_lattice(interp) - ⊑, ⋤, ⊔ = partialorder(𝕃ₚ), strictneqpartialorder(𝕃ₚ), join(𝕃ₚ) - const_result = volatile_inf_result - if !result.edgecycle - const_call_result = abstract_call_method_with_const_args(interp, result, - nothing, arginfo, si, match, sv) - if const_call_result !== nothing - if const_call_result.rt ⊑ rt - (; rt, effects, const_result, edge) = const_call_result - end - if const_call_result.exct ⋤ exct - (; exct, const_result, edge) = const_call_result + mresult = abstract_call_method(interp, ocmethod, sig, Core.svec(), false, si, sv) + ocsig_box = Core.Box(ocsig) + return Future{CallMeta}(mresult, interp, sv) do result, interp, sv + (; rt, exct, edge, effects, volatile_inf_result, edgecycle) = result + 𝕃ₚ = ipo_lattice(interp) + ⊑, ⋤, ⊔ = partialorder(𝕃ₚ), strictneqpartialorder(𝕃ₚ), join(𝕃ₚ) + const_result = volatile_inf_result + if !edgecycle + const_call_result = abstract_call_method_with_const_args(interp, result, + nothing, arginfo, si, match, sv) + if const_call_result !== nothing + if const_call_result.rt ⊑ rt + (; rt, effects, const_result, edge) = const_call_result + end + if const_call_result.exct ⋤ exct + (; exct, const_result, edge) = const_call_result + end end end - end - if check # analyze implicit type asserts on argument and return type - rty = (unwrap_unionall(tt)::DataType).parameters[2] - rty = rewrap_unionall(rty isa TypeVar ? rty.ub : rty, tt) - if !(rt ⊑ rty && sig ⊑ ocsig) - effects = Effects(effects; nothrow=false) - exct = exct ⊔ TypeError + if check # analyze implicit type asserts on argument and return type + ftt = closure.typ + rty = (unwrap_unionall(ftt)::DataType).parameters[2] + rty = rewrap_unionall(rty isa TypeVar ? rty.ub : rty, ftt) + if !(rt ⊑ rty && sig ⊑ ocsig_box.contents) + effects = Effects(effects; nothrow=false) + exct = exct ⊔ TypeError + end end + rt = from_interprocedural!(interp, rt, sv, arginfo, match.spec_types) + info = OpaqueClosureCallInfo(match, const_result) + edge !== nothing && add_backedge!(sv, edge) + return CallMeta(rt, exct, effects, info) end - rt = from_interprocedural!(interp, rt, sv, arginfo, match.spec_types) - info = OpaqueClosureCallInfo(match, const_result) - edge !== nothing && add_backedge!(sv, edge) - return CallMeta(rt, exct, effects, info) end function most_general_argtypes(closure::PartialOpaque) @@ -2422,17 +2508,17 @@ function abstract_call_unknown(interp::AbstractInterpreter, @nospecialize(ft), wft = widenconst(ft) if hasintersect(wft, Builtin) add_remark!(interp, sv, "Could not identify method table for call") - return CallMeta(Any, Any, Effects(), NoCallInfo()) + return Future(CallMeta(Any, Any, Effects(), NoCallInfo())) elseif hasintersect(wft, Core.OpaqueClosure) uft = unwrap_unionall(wft) if isa(uft, DataType) - return CallMeta(rewrap_unionall(uft.parameters[2], wft), Any, Effects(), NoCallInfo()) + return Future(CallMeta(rewrap_unionall(uft.parameters[2], wft), Any, Effects(), NoCallInfo())) end - return CallMeta(Any, Any, Effects(), NoCallInfo()) + return Future(CallMeta(Any, Any, Effects(), NoCallInfo())) end # non-constant function, but the number of arguments is known and the `f` is not a builtin or intrinsic atype = argtypes_to_type(arginfo.argtypes) - return abstract_call_gf_by_type(interp, nothing, arginfo, si, atype, sv, max_methods) + return abstract_call_gf_by_type(interp, nothing, arginfo, si, atype, sv, max_methods)::Future end # call where the function is any lattice element @@ -2503,7 +2589,7 @@ function abstract_eval_cfunction(interp::AbstractInterpreter, e::Expr, vtypes::U # this may be the wrong world for the call, # but some of the result is likely to be valid anyways # and that may help generate better codegen - abstract_call(interp, ArgInfo(nothing, at), StmtInfo(false), sv) + abstract_call(interp, ArgInfo(nothing, at), StmtInfo(false), sv)::Future rt = e.args[1] isa(rt, Type) || (rt = Any) return RTEffects(rt, Any, EFFECTS_UNKNOWN) @@ -2544,6 +2630,7 @@ function abstract_eval_value_expr(interp::AbstractInterpreter, e::Expr, sv::AbsI # TODO: We still have non-linearized cglobal @assert e.args[1] === Core.tuple || e.args[1] === GlobalRef(Core, :tuple) else + @assert e.head !== :(=) # Some of our tests expect us to handle invalid IR here and error later # - permit that for now. # @assert false "Unexpected EXPR head in value position" @@ -2592,8 +2679,13 @@ function abstract_call(interp::AbstractInterpreter, arginfo::ArgInfo, sv::Infere add_curr_ssaflag!(sv, IR_FLAG_UNUSED) end si = StmtInfo(!unused) - call = abstract_call(interp, arginfo, si, sv) - sv.stmt_info[sv.currpc] = call.info + call = abstract_call(interp, arginfo, si, sv)::Future + Future{Nothing}(call, interp, sv) do call, interp, sv + # this only is needed for the side-effect, sequenced before any task tries to consume the return value, + # which this will do even without returning this Future + sv.stmt_info[sv.currpc] = call.info + nothing + end return call end @@ -2602,11 +2694,14 @@ function abstract_eval_call(interp::AbstractInterpreter, e::Expr, vtypes::Union{ ea = e.args argtypes = collect_argtypes(interp, ea, vtypes, sv) if argtypes === nothing - return RTEffects(Bottom, Any, Effects()) + return Future(RTEffects(Bottom, Any, Effects())) end arginfo = ArgInfo(ea, argtypes) - (; rt, exct, effects, refinements) = abstract_call(interp, arginfo, sv) - return RTEffects(rt, exct, effects, refinements) + call = abstract_call(interp, arginfo, sv)::Future + return Future{RTEffects}(call, interp, sv) do call, interp, sv + (; rt, exct, effects, refinements) = call + return RTEffects(rt, exct, effects, refinements) + end end function abstract_eval_new(interp::AbstractInterpreter, e::Expr, vtypes::Union{VarTable,Nothing}, @@ -2736,12 +2831,15 @@ function abstract_eval_new_opaque_closure(interp::AbstractInterpreter, e::Expr, argtypes = most_general_argtypes(rt) pushfirst!(argtypes, rt.env) callinfo = abstract_call_opaque_closure(interp, rt, - ArgInfo(nothing, argtypes), StmtInfo(true), sv, #=check=#false) - sv.stmt_info[sv.currpc] = OpaqueClosureCreateInfo(callinfo) + ArgInfo(nothing, argtypes), StmtInfo(true), sv, #=check=#false)::Future + Future{Nothing}(callinfo, interp, sv) do callinfo, interp, sv + sv.stmt_info[sv.currpc] = OpaqueClosureCreateInfo(callinfo) + nothing + end end end end - return RTEffects(rt, Any, effects) + return Future(RTEffects(rt, Any, effects)) end function abstract_eval_copyast(interp::AbstractInterpreter, e::Expr, vtypes::Union{VarTable,Nothing}, @@ -2837,7 +2935,7 @@ function abstract_eval_static_parameter(::AbstractInterpreter, e::Expr, sv::AbsI end function abstract_eval_statement_expr(interp::AbstractInterpreter, e::Expr, vtypes::Union{VarTable,Nothing}, - sv::AbsIntState) + sv::AbsIntState)::Future{RTEffects} ehead = e.head if ehead === :call return abstract_eval_call(interp, e, vtypes, sv) @@ -2935,43 +3033,7 @@ function stmt_taints_inbounds_consistency(sv::AbsIntState) return has_curr_ssaflag(sv, IR_FLAG_INBOUNDS) end -function abstract_eval_statement(interp::AbstractInterpreter, @nospecialize(e), vtypes::VarTable, sv::InferenceState) - if !isa(e, Expr) - if isa(e, PhiNode) - add_curr_ssaflag!(sv, IR_FLAGS_REMOVABLE) - # Implement convergence for PhiNodes. In particular, PhiNodes need to tmerge over - # the incoming values from all iterations, but `abstract_eval_phi` will only tmerge - # over the first and last iterations. By tmerging in the current old_rt, we ensure that - # we will not lose an intermediate value. - rt = abstract_eval_phi(interp, e, vtypes, sv) - old_rt = sv.ssavaluetypes[sv.currpc] - rt = old_rt === NOT_FOUND ? rt : tmerge(typeinf_lattice(interp), old_rt, rt) - return RTEffects(rt, Union{}, EFFECTS_TOTAL) - end - (; rt, exct, effects, refinements) = abstract_eval_special_value(interp, e, vtypes, sv) - else - (; rt, exct, effects, refinements) = abstract_eval_statement_expr(interp, e, vtypes, sv) - if effects.noub === NOUB_IF_NOINBOUNDS - if has_curr_ssaflag(sv, IR_FLAG_INBOUNDS) - effects = Effects(effects; noub=ALWAYS_FALSE) - elseif !propagate_inbounds(sv) - # The callee read our inbounds flag, but unless we propagate inbounds, - # we ourselves don't read our parent's inbounds. - effects = Effects(effects; noub=ALWAYS_TRUE) - end - end - e = e::Expr - @assert !isa(rt, TypeVar) "unhandled TypeVar" - rt = maybe_singleton_const(rt) - if !isempty(sv.pclimitations) - if rt isa Const || rt === Union{} - empty!(sv.pclimitations) - else - rt = LimitedAccuracy(rt, sv.pclimitations) - sv.pclimitations = IdSet{InferenceState}() - end - end - end +function merge_override_effects!(interp::AbstractInterpreter, effects::Effects, sv::InferenceState) # N.B.: This only applies to the effects of the statement itself. # It is possible for arguments (GlobalRef/:static_parameter) to throw, # but these will be recomputed during SSA construction later. @@ -2979,8 +3041,11 @@ function abstract_eval_statement(interp::AbstractInterpreter, @nospecialize(e), effects = override_effects(effects, override) set_curr_ssaflag!(sv, flags_for_effects(effects), IR_FLAGS_EFFECTS) merge_effects!(interp, sv, effects) + return effects +end - return RTEffects(rt, exct, effects, refinements) +function abstract_eval_statement(interp::AbstractInterpreter, @nospecialize(e), vtypes::VarTable, sv::InferenceState) + @assert !isa(e, Union{Expr, PhiNode, NewvarNode}) end function override_effects(effects::Effects, override::EffectsOverride) @@ -3226,60 +3291,6 @@ function handle_control_backedge!(interp::AbstractInterpreter, frame::InferenceS return nothing end -struct BasicStmtChange - changes::Union{Nothing,StateUpdate} - rt::Any # extended lattice element or `nothing` - `nothing` if this statement may not be used as an SSA Value - exct::Any - # TODO effects::Effects - refinements # ::Union{Nothing,SlotRefinement,Vector{Any}} - function BasicStmtChange(changes::Union{Nothing,StateUpdate}, rt::Any, exct::Any, - refinements=nothing) - @nospecialize rt exct refinements - return new(changes, rt, exct, refinements) - end -end - -@inline function abstract_eval_basic_statement(interp::AbstractInterpreter, - @nospecialize(stmt), pc_vartable::VarTable, frame::InferenceState) - if isa(stmt, NewvarNode) - changes = StateUpdate(stmt.slot, VarState(Bottom, true)) - return BasicStmtChange(changes, nothing, Union{}) - elseif !isa(stmt, Expr) - (; rt, exct) = abstract_eval_statement(interp, stmt, pc_vartable, frame) - return BasicStmtChange(nothing, rt, exct) - end - changes = nothing - hd = stmt.head - if hd === :(=) - (; rt, exct, refinements) = abstract_eval_statement(interp, stmt.args[2], pc_vartable, frame) - if rt === Bottom - return BasicStmtChange(nothing, Bottom, exct, refinements) - end - lhs = stmt.args[1] - if isa(lhs, SlotNumber) - changes = StateUpdate(lhs, VarState(rt, false)) - elseif isa(lhs, GlobalRef) - handle_global_assignment!(interp, frame, lhs, rt) - elseif !isa(lhs, SSAValue) - merge_effects!(interp, frame, EFFECTS_UNKNOWN) - end - return BasicStmtChange(changes, rt, exct, refinements) - elseif hd === :method - fname = stmt.args[1] - if isa(fname, SlotNumber) - changes = StateUpdate(fname, VarState(Any, false)) - end - return BasicStmtChange(changes, nothing, Union{}) - elseif (hd === :code_coverage_effect || ( - hd !== :boundscheck && # :boundscheck can be narrowed to Bool - is_meta_expr(stmt))) - return BasicStmtChange(nothing, Nothing, Bottom) - else - (; rt, exct, refinements) = abstract_eval_statement(interp, stmt, pc_vartable, frame) - return BasicStmtChange(nothing, rt, exct, refinements) - end -end - function update_bbstate!(𝕃ᵢ::AbstractLattice, frame::InferenceState, bb::Int, vartable::VarTable) bbtable = frame.bb_vartables[bb] if bbtable === nothing @@ -3379,27 +3390,45 @@ function update_cycle_worklists!(callback, frame::InferenceState) end # make as much progress on `frame` as possible (without handling cycles) -function typeinf_local(interp::AbstractInterpreter, frame::InferenceState) +struct CurrentState + result::Future + currstate::VarTable + bbstart::Int + bbend::Int + CurrentState(result::Future, currstate::VarTable, bbstart::Int, bbend::Int) = new(result, currstate, bbstart, bbend) + CurrentState() = new() +end +function typeinf_local(interp::AbstractInterpreter, frame::InferenceState, nextresult::CurrentState) @assert !is_inferred(frame) W = frame.ip ssavaluetypes = frame.ssavaluetypes bbs = frame.cfg.blocks nbbs = length(bbs) 𝕃ᵢ = typeinf_lattice(interp) - + states = frame.bb_vartables currbb = frame.currbb + currpc = frame.currpc + + if isdefined(nextresult, :result) + # for reasons that are fairly unclear, some state is arbitrarily on the stack instead in the InferenceState as normal + bbstart = nextresult.bbstart + bbend = nextresult.bbend + currstate = nextresult.currstate + @goto injectresult + end + if currbb != 1 currbb = frame.currbb = _bits_findnext(W.bits, 1)::Int # next basic block end - - states = frame.bb_vartables currstate = copy(states[currbb]::VarTable) while currbb <= nbbs delete!(W, currbb) bbstart = first(bbs[currbb].stmts) bbend = last(bbs[currbb].stmts) - for currpc in bbstart:bbend + currpc = bbstart - 1 + while currpc < bbend + currpc += 1 frame.currpc = currpc empty_backedges!(frame, currpc) stmt = frame.src.code[currpc] @@ -3511,14 +3540,14 @@ function typeinf_local(interp::AbstractInterpreter, frame::InferenceState) return caller.ssavaluetypes[caller_pc] !== Any end end - ssavaluetypes[frame.currpc] = Any + ssavaluetypes[currpc] = Any @goto find_next_bb elseif isa(stmt, EnterNode) ssavaluetypes[currpc] = Any add_curr_ssaflag!(frame, IR_FLAG_NOTHROW) if isdefined(stmt, :scope) scopet = abstract_eval_value(interp, stmt.scope, currstate, frame) - handler = gethandler(frame, frame.currpc+1)::TryCatchFrame + handler = gethandler(frame, currpc + 1)::TryCatchFrame @assert handler.scopet !== nothing if !⊑(𝕃ᵢ, scopet, handler.scopet) handler.scopet = tmerge(𝕃ᵢ, scopet, handler.scopet) @@ -3537,8 +3566,91 @@ function typeinf_local(interp::AbstractInterpreter, frame::InferenceState) # Fall through terminator - treat as regular stmt end # Process non control-flow statements - (; changes, rt, exct, refinements) = abstract_eval_basic_statement(interp, - stmt, currstate, frame) + @assert isempty(frame.tasks) + rt = nothing + exct = Bottom + changes = nothing + refinements = nothing + effects = nothing + if isa(stmt, NewvarNode) + changes = StateUpdate(stmt.slot, VarState(Bottom, true)) + elseif isa(stmt, PhiNode) + add_curr_ssaflag!(frame, IR_FLAGS_REMOVABLE) + # Implement convergence for PhiNodes. In particular, PhiNodes need to tmerge over + # the incoming values from all iterations, but `abstract_eval_phi` will only tmerge + # over the first and last iterations. By tmerging in the current old_rt, we ensure that + # we will not lose an intermediate value. + rt = abstract_eval_phi(interp, stmt, currstate, frame) + old_rt = frame.ssavaluetypes[currpc] + rt = old_rt === NOT_FOUND ? rt : tmerge(typeinf_lattice(interp), old_rt, rt) + else + lhs = nothing + if isexpr(stmt, :(=)) + lhs = stmt.args[1] + stmt = stmt.args[2] + end + if !isa(stmt, Expr) + (; rt, exct, effects, refinements) = abstract_eval_special_value(interp, stmt, currstate, frame) + else + hd = stmt.head + if hd === :method + fname = stmt.args[1] + if isa(fname, SlotNumber) + changes = StateUpdate(fname, VarState(Any, false)) + end + elseif (hd === :code_coverage_effect || ( + hd !== :boundscheck && # :boundscheck can be narrowed to Bool + is_meta_expr(stmt))) + rt = Nothing + else + result = abstract_eval_statement_expr(interp, stmt, currstate, frame)::Future + if !isready(result) || !isempty(frame.tasks) + return CurrentState(result, currstate, bbstart, bbend) + @label injectresult + # reload local variables + stmt = frame.src.code[currpc] + changes = nothing + lhs = nothing + if isexpr(stmt, :(=)) + lhs = stmt.args[1] + stmt = stmt.args[2] + end + result = nextresult.result::Future{RTEffects} + end + result = result[] + (; rt, exct, effects, refinements) = result + if effects.noub === NOUB_IF_NOINBOUNDS + if has_curr_ssaflag(frame, IR_FLAG_INBOUNDS) + effects = Effects(effects; noub=ALWAYS_FALSE) + elseif !propagate_inbounds(frame) + # The callee read our inbounds flag, but unless we propagate inbounds, + # we ourselves don't read our parent's inbounds. + effects = Effects(effects; noub=ALWAYS_TRUE) + end + end + @assert !isa(rt, TypeVar) "unhandled TypeVar" + rt = maybe_singleton_const(rt) + if !isempty(frame.pclimitations) + if rt isa Const || rt === Union{} + empty!(frame.pclimitations) + else + rt = LimitedAccuracy(rt, frame.pclimitations) + frame.pclimitations = IdSet{InferenceState}() + end + end + end + end + effects === nothing || merge_override_effects!(interp, effects, frame) + if lhs !== nothing && rt !== Bottom + if isa(lhs, SlotNumber) + changes = StateUpdate(lhs, VarState(rt, false)) + elseif isa(lhs, GlobalRef) + handle_global_assignment!(interp, frame, lhs, rt) + elseif !isa(lhs, SSAValue) + merge_effects!(interp, frame, EFFECTS_UNKNOWN) + end + end + end if !has_curr_ssaflag(frame, IR_FLAG_NOTHROW) if exct !== Union{} update_exc_bestguess!(interp, exct, frame) @@ -3601,7 +3713,7 @@ function typeinf_local(interp::AbstractInterpreter, frame::InferenceState) end end # while currbb <= nbbs - nothing + return CurrentState() end function apply_refinement!(𝕃ᵢ::AbstractLattice, slot::SlotNumber, @nospecialize(newtyp), @@ -3652,31 +3764,81 @@ function condition_object_change(currstate::VarTable, condt::Conditional, end # make as much progress on `frame` as possible (by handling cycles) -function typeinf_nocycle(interp::AbstractInterpreter, frame::InferenceState) - typeinf_local(interp, frame) - @assert isempty(frame.ip) +warnlength::Int = 2500 +function typeinf(interp::AbstractInterpreter, frame::InferenceState) callstack = frame.callstack::Vector{AbsIntState} - frame.cycleid == length(callstack) && return true - - no_active_ips_in_callers = false - while true - # If the current frame is not the top part of a cycle, continue to the top of the cycle before resuming work - frame.cycleid == frame.frameid || return false - # If done, return and finalize this cycle - no_active_ips_in_callers && return true - # Otherwise, do at least one iteration over the entire current cycle - no_active_ips_in_callers = true - for i = reverse(frame.cycleid:length(callstack)) - caller = callstack[i]::InferenceState - if !isempty(caller.ip) - # Note that `typeinf_local(interp, caller)` can potentially modify the other frames - # `frame.cycleid`, which is why making incremental progress requires the - # outer while loop. - typeinf_local(interp, caller) - no_active_ips_in_callers = false - end - update_valid_age!(caller, frame.valid_worlds) + nextstates = CurrentState[] + takenext = frame.frameid + minwarn = warnlength + takeprev = 0 + while takenext >= frame.frameid + callee = takenext == 0 ? frame : callstack[takenext]::InferenceState + if !isempty(callstack) + if length(callstack) - frame.frameid >= minwarn + topmethod = callstack[1].linfo + topmethod.def isa Method || (topmethod = callstack[2].linfo) + print(Core.stderr, "info: inference of ", topmethod, " exceeding ", length(callstack), " frames (may be slow).\n") + minwarn *= 2 + end + topcallee = (callstack[end]::InferenceState) + if topcallee.cycleid != callee.cycleid + callee = topcallee + takenext = length(callstack) + end + end + nextstateid = takenext + 1 - frame.frameid + while length(nextstates) < nextstateid + push!(nextstates, CurrentState()) + end + if doworkloop(interp, callee) + # First drain the workloop. Note that since some scheduled work doesn't + # affect the result (e.g. cfunction or abstract_call_method on + # get_compileable_sig), but still must be finished up since it may see and + # change the local variables of the InferenceState at currpc, we do this + # even if the nextresult status is already completed. + continue + elseif isdefined(nextstates[nextstateid], :result) || !isempty(callee.ip) + # Next make progress on this frame + prev = length(callee.tasks) + 1 + nextstates[nextstateid] = typeinf_local(interp, callee, nextstates[nextstateid]) + reverse!(callee.tasks, prev) + elseif callee.cycleid == length(callstack) + # With no active ip's and no cycles, frame is done + finish_nocycle(interp, callee) + callee.frameid == 0 && break + takenext = length(callstack) + nextstateid = takenext + 1 - frame.frameid + #@assert length(nextstates) == nextstateid + 1 + #@assert all(i -> !isdefined(nextstates[i], :result), nextstateid+1:length(nextstates)) + resize!(nextstates, nextstateid) + elseif callee.cycleid == callee.frameid + # If the current frame is the top part of a cycle, check if the whole cycle + # is done, and if not, pick the next item to work on. + no_active_ips_in_cycle = true + for i = callee.cycleid:length(callstack) + caller = callstack[i]::InferenceState + @assert caller.cycleid == callee.cycleid + if !isempty(caller.tasks) || isdefined(nextstates[i+1-frame.frameid], :result) || !isempty(caller.ip) + no_active_ips_in_cycle = false + break + end + end + if no_active_ips_in_cycle + finish_cycle(interp, callstack, callee.cycleid) + end + takenext = length(callstack) + nextstateid = takenext + 1 - frame.frameid + if no_active_ips_in_cycle + #@assert all(i -> !isdefined(nextstates[i], :result), nextstateid+1:length(nextstates)) + resize!(nextstates, nextstateid) + else + #@assert length(nextstates) == nextstateid + end + else + # Continue to the next frame in this cycle + takenext = takenext - 1 end end - return true + #@assert all(nextresult -> !isdefined(nextresult, :result), nextstates) + return is_inferred(frame) end diff --git a/base/compiler/inferencestate.jl b/base/compiler/inferencestate.jl index 6953dea5b9bd7..05d95d1d5bdc7 100644 --- a/base/compiler/inferencestate.jl +++ b/base/compiler/inferencestate.jl @@ -251,6 +251,7 @@ mutable struct InferenceState stmt_info::Vector{CallInfo} #= intermediate states for interprocedural abstract interpretation =# + tasks::Vector{WorkThunk} pclimitations::IdSet{InferenceState} # causes of precision restrictions (LimitedAccuracy) on currpc ssavalue limitations::IdSet{InferenceState} # causes of precision restrictions (LimitedAccuracy) on return cycle_backedges::Vector{Tuple{InferenceState, Int}} # call-graph backedges connecting from callee to caller @@ -328,6 +329,7 @@ mutable struct InferenceState limitations = IdSet{InferenceState}() cycle_backedges = Vector{Tuple{InferenceState,Int}}() callstack = AbsIntState[] + tasks = WorkThunk[] valid_worlds = WorldRange(1, get_world_counter()) bestguess = Bottom @@ -351,7 +353,7 @@ mutable struct InferenceState this = new( mi, world, mod, sptypes, slottypes, src, cfg, method_info, currbb, currpc, ip, handler_info, ssavalue_uses, bb_vartables, ssavaluetypes, stmt_edges, stmt_info, - pclimitations, limitations, cycle_backedges, callstack, 0, 0, 0, + tasks, pclimitations, limitations, cycle_backedges, callstack, 0, 0, 0, result, unreachable, valid_worlds, bestguess, exc_bestguess, ipo_effects, restrict_abstract_call_sites, cache_mode, insert_coverage, interp) @@ -800,6 +802,7 @@ mutable struct IRInterpretationState const ssa_refined::BitSet const lazyreachability::LazyCFGReachability valid_worlds::WorldRange + const tasks::Vector{WorkThunk} const edges::Vector{Any} callstack #::Vector{AbsIntState} frameid::Int @@ -825,10 +828,11 @@ mutable struct IRInterpretationState ssa_refined = BitSet() lazyreachability = LazyCFGReachability(ir) valid_worlds = WorldRange(min_world, max_world == typemax(UInt) ? get_world_counter() : max_world) + tasks = WorkThunk[] edges = Any[] callstack = AbsIntState[] return new(method_info, ir, mi, world, curridx, argtypes_refined, ir.sptypes, tpdum, - ssa_refined, lazyreachability, valid_worlds, edges, callstack, 0, 0) + ssa_refined, lazyreachability, valid_worlds, tasks, edges, callstack, 0, 0) end end @@ -870,6 +874,7 @@ function print_callstack(frame::AbsIntState) print(frame_instance(sv)) is_cached(sv) || print(" [uncached]") sv.parentid == idx - 1 || print(" [parent=", sv.parentid, "]") + isempty(callers_in_cycle(sv)) || print(" [cycle=", sv.cycleid, "]") println() @assert sv.frameid == idx end @@ -994,7 +999,10 @@ of the same cycle, only if it is part of a cycle with multiple frames. function callers_in_cycle(sv::InferenceState) callstack = sv.callstack::Vector{AbsIntState} cycletop = cycleid = sv.cycleid - while cycletop < length(callstack) && (callstack[cycletop + 1]::InferenceState).cycleid == cycleid + while cycletop < length(callstack) + frame = callstack[cycletop + 1] + frame isa InferenceState || break + frame.cycleid == cycleid || break cycletop += 1 end return AbsIntCycle(callstack, cycletop == cycleid ? 0 : cycleid, cycletop) @@ -1054,6 +1062,7 @@ function merge_effects!(::AbstractInterpreter, caller::InferenceState, effects:: effects = Effects(effects; effect_free=ALWAYS_TRUE) end caller.ipo_effects = merge_effects(caller.ipo_effects, effects) + nothing end merge_effects!(::AbstractInterpreter, ::IRInterpretationState, ::Effects) = return @@ -1116,3 +1125,90 @@ function get_max_methods_for_module(mod::Module) max_methods < 0 && return nothing return max_methods end + +""" + Future{T} + +Delayed return value for a value of type `T`, similar to RefValue{T}, but +explicitly represents completed as a `Bool` rather than as `isdefined`. +Set once with `f[] = v` and accessed with `f[]` afterwards. + +Can also be constructed with the `completed` flag value and a closure to +produce `x`, as well as the additional arguments to avoid always capturing the +same couple of values. +""" +struct Future{T} + later::Union{Nothing,RefValue{T}} + now::Union{Nothing,T} + Future{T}() where {T} = new{T}(RefValue{T}(), nothing) + Future{T}(x) where {T} = new{T}(nothing, x) + Future(x::T) where {T} = new{T}(nothing, x) +end +isready(f::Future) = f.later === nothing +getindex(f::Future{T}) where {T} = (later = f.later; later === nothing ? f.now::T : later[]) +setindex!(f::Future, v) = something(f.later)[] = v +convert(::Type{Future{T}}, x) where {T} = Future{T}(x) # support return type conversion +convert(::Type{Future{T}}, x::Future) where {T} = x::Future{T} +function Future{T}(f, immediate::Bool, interp::AbstractInterpreter, sv::AbsIntState) where {T} + if immediate + return Future{T}(f(interp, sv)) + else + @assert applicable(f, interp, sv) + result = Future{T}() + push!(sv.tasks, function (interp, sv) + result[] = f(interp, sv) + return true + end) + return result + end +end +function Future{T}(f, prev::Future{S}, interp::AbstractInterpreter, sv::AbsIntState) where {T, S} + later = prev.later + if later === nothing + return Future{T}(f(prev[], interp, sv)) + else + @assert Core._hasmethod(Tuple{Core.Typeof(f), S, typeof(interp), typeof(sv)}) + result = Future{T}() + push!(sv.tasks, function (interp, sv) + result[] = f(later[], interp, sv) # capture just later, instead of all of prev + return true + end) + return result + end +end + + +""" + doworkloop(args...) + +Run a tasks inside the abstract interpreter, returning false if there are none. +Tasks will be run in DFS post-order tree order, such that all child tasks will +be run in the order scheduled, prior to running any subsequent tasks. This +allows tasks to generate more child tasks, which will be run before anything else. +Each task will be run repeatedly when returning `false`, until it returns `true`. +""" +function doworkloop(interp::AbstractInterpreter, sv::AbsIntState) + tasks = sv.tasks + prev = length(tasks) + prev == 0 && return false + task = pop!(tasks) + completed = task(interp, sv) + tasks = sv.tasks # allow dropping gc root over the previous call + completed isa Bool || throw(TypeError(:return, "", Bool, task)) # print the task on failure as part of the error message, instead of just "@ workloop:line" + completed || push!(tasks, task) + # efficient post-order visitor: items pushed are executed in reverse post order such + # that later items are executed before earlier ones, but are fully executed + # (including any dependencies scheduled by them) before going on to the next item + reverse!(tasks, #=start=#prev) + return true +end + + +#macro workthunk(name::Symbol, body) +# name = esc(name) +# body = esc(body) +# return replace_linenums!( +# :(function $name($(esc(interp)), $(esc(sv))) +# $body +# end), __source__) +#end diff --git a/base/compiler/ssair/ir.jl b/base/compiler/ssair/ir.jl index 960da88ddffc8..fdcb4621c5c0f 100644 --- a/base/compiler/ssair/ir.jl +++ b/base/compiler/ssair/ir.jl @@ -1432,6 +1432,7 @@ function process_node!(compact::IncrementalCompact, result_idx::Int, inst::Instr elseif isa(stmt, OldSSAValue) ssa_rename[idx] = ssa_rename[stmt.id] elseif isa(stmt, GotoNode) && cfg_transforms_enabled + stmt.label < 0 && (println(stmt); println(compact)) label = bb_rename_succ[stmt.label] @assert label > 0 ssa_rename[idx] = SSAValue(result_idx) diff --git a/base/compiler/ssair/irinterp.jl b/base/compiler/ssair/irinterp.jl index 1aeb87accbcd7..ca8ca770df413 100644 --- a/base/compiler/ssair/irinterp.jl +++ b/base/compiler/ssair/irinterp.jl @@ -51,8 +51,11 @@ end function abstract_call(interp::AbstractInterpreter, arginfo::ArgInfo, irsv::IRInterpretationState) si = StmtInfo(true) # TODO better job here? - call = abstract_call(interp, arginfo, si, irsv) - irsv.ir.stmts[irsv.curridx][:info] = call.info + call = abstract_call(interp, arginfo, si, irsv)::Future + Future{Nothing}(call, interp, irsv) do call, interp, irsv + irsv.ir.stmts[irsv.curridx][:info] = call.info + nothing + end return call end @@ -143,7 +146,19 @@ function reprocess_instruction!(interp::AbstractInterpreter, inst::Instruction, head = stmt.head if (head === :call || head === :foreigncall || head === :new || head === :splatnew || head === :static_parameter || head === :isdefined || head === :boundscheck) - (; rt, effects) = abstract_eval_statement_expr(interp, stmt, nothing, irsv) + @assert isempty(irsv.tasks) # TODO: this whole function needs to be converted to a stackless design to be a valid AbsIntState, but this should work here for now + result = abstract_eval_statement_expr(interp, stmt, nothing, irsv) + reverse!(irsv.tasks) + while true + if length(irsv.callstack) > irsv.frameid + typeinf(interp, irsv.callstack[irsv.frameid + 1]) + elseif !doworkloop(interp, irsv) + break + end + end + @assert length(irsv.callstack) == irsv.frameid && isempty(irsv.tasks) + result isa Future && (result = result[]) + (; rt, effects) = result add_flag!(inst, flags_for_effects(effects)) elseif head === :invoke rt, (nothrow, noub) = abstract_eval_invoke_inst(interp, inst, irsv) @@ -293,7 +308,7 @@ function is_all_const_call(@nospecialize(stmt), interp::AbstractInterpreter, irs return true end -function _ir_abstract_constant_propagation(interp::AbstractInterpreter, irsv::IRInterpretationState; +function ir_abstract_constant_propagation(interp::AbstractInterpreter, irsv::IRInterpretationState; externally_refined::Union{Nothing,BitSet} = nothing) (; ir, tpdum, ssa_refined) = irsv @@ -449,18 +464,3 @@ function _ir_abstract_constant_propagation(interp::AbstractInterpreter, irsv::IR return Pair{Any,Tuple{Bool,Bool}}(maybe_singleton_const(ultimate_rt), (nothrow, noub)) end - -function ir_abstract_constant_propagation(interp::NativeInterpreter, irsv::IRInterpretationState) - if __measure_typeinf__[] - inf_frame = Timings.InferenceFrameInfo(irsv.mi, irsv.world, VarState[], Any[], length(irsv.ir.argtypes)) - Timings.enter_new_timer(inf_frame) - ret = _ir_abstract_constant_propagation(interp, irsv) - append!(inf_frame.slottypes, irsv.ir.argtypes) - Timings.exit_current_timer(inf_frame) - return ret - else - return _ir_abstract_constant_propagation(interp, irsv) - end -end -ir_abstract_constant_propagation(interp::AbstractInterpreter, irsv::IRInterpretationState) = - _ir_abstract_constant_propagation(interp, irsv) diff --git a/base/compiler/ssair/verify.jl b/base/compiler/ssair/verify.jl index a4286177e93a4..268991282c483 100644 --- a/base/compiler/ssair/verify.jl +++ b/base/compiler/ssair/verify.jl @@ -1,9 +1,11 @@ # This file is a part of Julia. License is MIT: https://julialang.org/license function maybe_show_ir(ir::IRCode) - if isdefined(Core, :Main) + if isdefined(Core, :Main) && isdefined(Core.Main, :Base) # ensure we use I/O that does not yield, as this gets called during compilation invokelatest(Core.Main.Base.show, Core.stdout, "text/plain", ir) + else + Core.show(ir) end end @@ -25,6 +27,7 @@ is_toplevel_expr_head(head::Symbol) = head === :global || head === :method || he is_value_pos_expr_head(head::Symbol) = head === :static_parameter function check_op(ir::IRCode, domtree::DomTree, @nospecialize(op), use_bb::Int, use_idx::Int, printed_use_idx::Int, print::Bool, isforeigncall::Bool, arg_idx::Int, allow_frontend_forms::Bool) if isa(op, SSAValue) + op.id > 0 || @verify_error "Def ($(op.id)) is invalid in final IR" if op.id > length(ir.stmts) def_bb = block_for_inst(ir.cfg, ir.new_nodes.info[op.id - length(ir.stmts)].pos) else diff --git a/base/compiler/tfuncs.jl b/base/compiler/tfuncs.jl index cc8ba227bd088..a6b7e53c6f320 100644 --- a/base/compiler/tfuncs.jl +++ b/base/compiler/tfuncs.jl @@ -1383,10 +1383,10 @@ end nargs = length(argtypes) if !isempty(argtypes) && isvarargtype(argtypes[nargs]) - nargs - 1 <= maxargs || return CallMeta(Bottom, Any, EFFECTS_THROWS, NoCallInfo()) - nargs + 1 >= op_argi || return CallMeta(Any, Any, Effects(), NoCallInfo()) + nargs - 1 <= maxargs || return Future(CallMeta(Bottom, Any, EFFECTS_THROWS, NoCallInfo())) + nargs + 1 >= op_argi || return Future(CallMeta(Any, Any, Effects(), NoCallInfo())) else - minargs <= nargs <= maxargs || return CallMeta(Bottom, Any, EFFECTS_THROWS, NoCallInfo()) + minargs <= nargs <= maxargs || return Future(CallMeta(Bottom, Any, EFFECTS_THROWS, NoCallInfo())) end 𝕃ᵢ = typeinf_lattice(interp) if ff === modifyfield! @@ -1417,15 +1417,22 @@ end op = unwrapva(argtypes[op_argi]) v = unwrapva(argtypes[v_argi]) callinfo = abstract_call(interp, ArgInfo(nothing, Any[op, TF, v]), StmtInfo(true), sv, #=max_methods=#1) - TF2 = tmeet(callinfo.rt, widenconst(TF)) - if TF2 === Bottom - RT = Bottom - elseif isconcretetype(RT) && has_nontrivial_extended_info(𝕃ᵢ, TF2) # isconcrete condition required to form a PartialStruct - RT = PartialStruct(RT, Any[TF, TF2]) + TF = Core.Box(TF) + RT = Core.Box(RT) + return Future{CallMeta}(callinfo, interp, sv) do callinfo, interp, sv + TF = TF.contents + RT = RT.contents + TF2 = tmeet(callinfo.rt, widenconst(TF)) + if TF2 === Bottom + RT = Bottom + elseif isconcretetype(RT) && has_nontrivial_extended_info(𝕃ᵢ, TF2) # isconcrete condition required to form a PartialStruct + RT = PartialStruct(RT, Any[TF, TF2]) + end + info = ModifyOpInfo(callinfo.info) + return CallMeta(RT, Any, Effects(), info) end - info = ModifyOpInfo(callinfo.info) end - return CallMeta(RT, Any, Effects(), info) + return Future(CallMeta(RT, Any, Effects(), info)) end # we could use tuple_tfunc instead of widenconst, but `o` is mutable, so that is unlikely to be beneficial @@ -2895,17 +2902,17 @@ end function return_type_tfunc(interp::AbstractInterpreter, argtypes::Vector{Any}, si::StmtInfo, sv::AbsIntState) UNKNOWN = CallMeta(Type, Any, Effects(EFFECTS_THROWS; nortcall=false), NoCallInfo()) if !(2 <= length(argtypes) <= 3) - return UNKNOWN + return Future(UNKNOWN) end tt = widenslotwrapper(argtypes[end]) if !isa(tt, Const) && !(isType(tt) && !has_free_typevars(tt)) - return UNKNOWN + return Future(UNKNOWN) end af_argtype = isa(tt, Const) ? tt.val : (tt::DataType).parameters[1] if !isa(af_argtype, DataType) || !(af_argtype <: Tuple) - return UNKNOWN + return Future(UNKNOWN) end if length(argtypes) == 3 @@ -2918,7 +2925,7 @@ function return_type_tfunc(interp::AbstractInterpreter, argtypes::Vector{Any}, s end if !(isa(aft, Const) || (isType(aft) && !has_free_typevars(aft)) || (isconcretetype(aft) && !(aft <: Builtin) && !iskindtype(aft))) - return UNKNOWN + return Future(UNKNOWN) end # effects are not an issue if we know this statement will get removed, but if it does not get removed, @@ -2926,7 +2933,7 @@ function return_type_tfunc(interp::AbstractInterpreter, argtypes::Vector{Any}, s RT_CALL_EFFECTS = Effects(EFFECTS_TOTAL; nortcall=false) if contains_is(argtypes_vec, Union{}) - return CallMeta(Const(Union{}), Union{}, RT_CALL_EFFECTS, NoCallInfo()) + return Future(CallMeta(Const(Union{}), Union{}, RT_CALL_EFFECTS, NoCallInfo())) end # Run the abstract_call without restricting abstract call @@ -2935,42 +2942,45 @@ function return_type_tfunc(interp::AbstractInterpreter, argtypes::Vector{Any}, s if isa(sv, InferenceState) old_restrict = sv.restrict_abstract_call_sites sv.restrict_abstract_call_sites = false - call = abstract_call(interp, ArgInfo(nothing, argtypes_vec), si, sv, #=max_methods=#-1) - sv.restrict_abstract_call_sites = old_restrict - else - call = abstract_call(interp, ArgInfo(nothing, argtypes_vec), si, sv, #=max_methods=#-1) - end - info = verbose_stmt_info(interp) ? MethodResultPure(ReturnTypeCallInfo(call.info)) : MethodResultPure() - rt = widenslotwrapper(call.rt) - if isa(rt, Const) - # output was computed to be constant - return CallMeta(Const(typeof(rt.val)), Union{}, RT_CALL_EFFECTS, info) - end - rt = widenconst(rt) - if rt === Bottom || (isconcretetype(rt) && !iskindtype(rt)) - # output cannot be improved so it is known for certain - return CallMeta(Const(rt), Union{}, RT_CALL_EFFECTS, info) - elseif isa(sv, InferenceState) && !isempty(sv.pclimitations) - # conservatively express uncertainty of this result - # in two ways: both as being a subtype of this, and - # because of LimitedAccuracy causes - return CallMeta(Type{<:rt}, Union{}, RT_CALL_EFFECTS, info) - elseif isa(tt, Const) || isconstType(tt) - # input arguments were known for certain - # XXX: this doesn't imply we know anything about rt - return CallMeta(Const(rt), Union{}, RT_CALL_EFFECTS, info) - elseif isType(rt) - return CallMeta(Type{rt}, Union{}, RT_CALL_EFFECTS, info) - else - return CallMeta(Type{<:rt}, Union{}, RT_CALL_EFFECTS, info) + end + call = abstract_call(interp, ArgInfo(nothing, argtypes_vec), si, sv, #=max_methods=#-1) + tt = Core.Box(tt) + return Future{CallMeta}(call, interp, sv) do call, interp, sv + if isa(sv, InferenceState) + sv.restrict_abstract_call_sites = old_restrict + end + info = verbose_stmt_info(interp) ? MethodResultPure(ReturnTypeCallInfo(call.info)) : MethodResultPure() + rt = widenslotwrapper(call.rt) + if isa(rt, Const) + # output was computed to be constant + return CallMeta(Const(typeof(rt.val)), Union{}, RT_CALL_EFFECTS, info) + end + rt = widenconst(rt) + if rt === Bottom || (isconcretetype(rt) && !iskindtype(rt)) + # output cannot be improved so it is known for certain + return CallMeta(Const(rt), Union{}, RT_CALL_EFFECTS, info) + elseif isa(sv, InferenceState) && !isempty(sv.pclimitations) + # conservatively express uncertainty of this result + # in two ways: both as being a subtype of this, and + # because of LimitedAccuracy causes + return CallMeta(Type{<:rt}, Union{}, RT_CALL_EFFECTS, info) + elseif isa(tt.contents, Const) || isconstType(tt.contents) + # input arguments were known for certain + # XXX: this doesn't imply we know anything about rt + return CallMeta(Const(rt), Union{}, RT_CALL_EFFECTS, info) + elseif isType(rt) + return CallMeta(Type{rt}, Union{}, RT_CALL_EFFECTS, info) + else + return CallMeta(Type{<:rt}, Union{}, RT_CALL_EFFECTS, info) + end end end # a simplified model of abstract_call_gf_by_type for applicable function abstract_applicable(interp::AbstractInterpreter, argtypes::Vector{Any}, sv::AbsIntState, max_methods::Int) - length(argtypes) < 2 && return CallMeta(Bottom, Any, EFFECTS_THROWS, NoCallInfo()) - isvarargtype(argtypes[2]) && return CallMeta(Bool, Any, EFFECTS_THROWS, NoCallInfo()) + length(argtypes) < 2 && return Future(CallMeta(Bottom, Any, EFFECTS_THROWS, NoCallInfo())) + isvarargtype(argtypes[2]) && return Future(CallMeta(Bool, Any, EFFECTS_THROWS, NoCallInfo())) argtypes = argtypes[2:end] atype = argtypes_to_type(argtypes) matches = find_method_matches(interp, argtypes, atype; max_methods) @@ -2997,7 +3007,7 @@ function abstract_applicable(interp::AbstractInterpreter, argtypes::Vector{Any}, # added that did not intersect with any existing method add_uncovered_edges!(sv, matches, atype) end - return CallMeta(rt, Union{}, EFFECTS_TOTAL, NoCallInfo()) + return Future(CallMeta(rt, Union{}, EFFECTS_TOTAL, NoCallInfo())) end add_tfunc(applicable, 1, INT_INF, @nospecs((𝕃::AbstractLattice, f, args...)->Bool), 40) diff --git a/base/compiler/typeinfer.jl b/base/compiler/typeinfer.jl index 315a068e611fe..77a2e02129ce4 100644 --- a/base/compiler/typeinfer.jl +++ b/base/compiler/typeinfer.jl @@ -56,7 +56,7 @@ end Timing(mi_info, start_time, cur_start_time, time, children) = Timing(mi_info, start_time, cur_start_time, time, children, nothing) Timing(mi_info, start_time) = Timing(mi_info, start_time, start_time, UInt64(0), Timing[]) -_time_ns() = ccall(:jl_hrtime, UInt64, ()) # Re-implemented here because Base not yet available. +_time_ns() = ccall(:jl_hrtime, UInt64, ()) # We keep a stack of the Timings for each of the MethodInstances currently being timed. # Since type inference currently operates via a depth-first search (during abstract @@ -77,114 +77,14 @@ const ROOTmi = Core.Compiler.specialize_method( Empty out the previously recorded type inference timings (`Core.Compiler._timings`), and start the ROOT() timer again. `ROOT()` measures all time spent _outside_ inference. """ -function reset_timings() - empty!(_timings) - push!(_timings, Timing( - # The MethodInstance for ROOT(), and default empty values for other fields. - InferenceFrameInfo(ROOTmi, 0x0, Core.Compiler.VarState[], Any[Core.Const(ROOT)], 1), - _time_ns())) - return nothing -end -reset_timings() - -# (This is split into a function so that it can be called both in this module, at the top -# of `enter_new_timer()`, and once at the Very End of the operation, by whoever started -# the operation and called `reset_timings()`.) -# NOTE: the @inline annotations here are not to make it faster, but to reduce the gap between -# timer manipulations and the tasks we're timing. -@inline function close_current_timer() - stop_time = _time_ns() - parent_timer = _timings[end] - accum_time = stop_time - parent_timer.cur_start_time - - # Add in accum_time ("modify" the immutable struct) - @inbounds begin - _timings[end] = Timing( - parent_timer.mi_info, - parent_timer.start_time, - parent_timer.cur_start_time, - parent_timer.time + accum_time, - parent_timer.children, - parent_timer.bt, - ) - end - return nothing -end - -@inline function enter_new_timer(frame) - # Very first thing, stop the active timer: get the current time and add in the - # time since it was last started to its aggregate exclusive time. - close_current_timer() - - mi_info = _typeinf_identifier(frame) - - # Start the new timer right before returning - push!(_timings, Timing(mi_info, UInt64(0))) - len = length(_timings) - new_timer = @inbounds _timings[len] - # Set the current time _after_ appending the node, to try to exclude the - # overhead from measurement. - start = _time_ns() - - @inbounds begin - _timings[len] = Timing( - new_timer.mi_info, - start, - start, - new_timer.time, - new_timer.children, - ) - end - - return nothing -end - -# _expected_frame_ is not needed within this function; it is used in the `@assert`, to -# assert that indeed we are always returning to a parent after finishing all of its -# children (that is, asserting that inference proceeds via depth-first-search). -@inline function exit_current_timer(_expected_frame_) - # Finish the new timer - stop_time = _time_ns() - - expected_mi_info = _typeinf_identifier(_expected_frame_) - - # Grab the new timer again because it might have been modified in _timings - # (since it's an immutable struct) - # And remove it from the current timings stack - new_timer = pop!(_timings) - Core.Compiler.@assert new_timer.mi_info.mi === expected_mi_info.mi - - # Prepare to unwind one level of the stack and record in the parent - parent_timer = _timings[end] - - accum_time = stop_time - new_timer.cur_start_time - # Add in accum_time ("modify" the immutable struct) - new_timer = Timing( - new_timer.mi_info, - new_timer.start_time, - new_timer.cur_start_time, - new_timer.time + accum_time, - new_timer.children, - parent_timer.mi_info.mi === ROOTmi ? backtrace() : nothing, - ) - # Record the final timing with the original parent timer - push!(parent_timer.children, new_timer) - - # And finally restart the parent timer: - len = length(_timings) - @inbounds begin - _timings[len] = Timing( - parent_timer.mi_info, - parent_timer.start_time, - _time_ns(), - parent_timer.time, - parent_timer.children, - parent_timer.bt, - ) - end - - return nothing -end +function reset_timings() end +push!(_timings, Timing( + # The MethodInstance for ROOT(), and default empty values for other fields. + InferenceFrameInfo(ROOTmi, 0x0, Core.Compiler.VarState[], Any[Core.Const(ROOT)], 1), + _time_ns())) +function close_current_timer() end +function enter_new_timer(frame) end +function exit_current_timer(_expected_frame_) end end # module Timings @@ -194,19 +94,7 @@ end # module Timings If set to `true`, record per-method-instance timings within type inference in the Compiler. """ __set_measure_typeinf(onoff::Bool) = __measure_typeinf__[] = onoff -const __measure_typeinf__ = fill(false) - -# Wrapper around `_typeinf` that optionally records the exclusive time for each invocation. -function typeinf(interp::AbstractInterpreter, frame::InferenceState) - if __measure_typeinf__[] - Timings.enter_new_timer(frame) - v = _typeinf(interp, frame) - Timings.exit_current_timer(frame) - return v - else - return _typeinf(interp, frame) - end -end +const __measure_typeinf__ = RefValue{Bool}(false) function finish!(interp::AbstractInterpreter, caller::InferenceState; can_discard_trees::Bool=may_discard_trees(interp)) @@ -258,19 +146,6 @@ function finish!(interp::AbstractInterpreter, caller::InferenceState; return nothing end -function _typeinf(interp::AbstractInterpreter, frame::InferenceState) - typeinf_nocycle(interp, frame) || return false # frame is now part of a higher cycle - # with no active ip's, frame is done - frames = frame.callstack::Vector{AbsIntState} - if length(frames) == frame.cycleid - finish_nocycle(interp, frame) - else - @assert frame.cycleid != 0 - finish_cycle(interp, frames, frame.cycleid) - end - return true -end - function finish_nocycle(::AbstractInterpreter, frame::InferenceState) finishinfer!(frame, frame.interp) opt = frame.result.src @@ -762,16 +637,11 @@ function merge_call_chain!(interp::AbstractInterpreter, parent::InferenceState, add_cycle_backedge!(parent, child) parent.cycleid === ancestorid && break child = parent - parent = frame_parent(child) - while !isa(parent, InferenceState) - # XXX we may miss some edges here? - parent = frame_parent(parent::IRInterpretationState) - end + parent = frame_parent(child)::InferenceState end # ensure that walking the callstack has the same cycleid (DAG) for frame = reverse(ancestorid:length(frames)) - frame = frames[frame] - frame isa InferenceState || continue + frame = frames[frame]::InferenceState frame.cycleid == ancestorid && break @assert frame.cycleid > ancestorid frame.cycleid = ancestorid @@ -796,9 +666,9 @@ end # returned instead. function resolve_call_cycle!(interp::AbstractInterpreter, mi::MethodInstance, parent::AbsIntState) # TODO (#48913) implement a proper recursion handling for irinterp: - # This works just because currently the `:terminate` condition guarantees that - # irinterp doesn't fail into unresolved cycles, but it's not a good solution. - # We should revisit this once we have a better story for handling cycles in irinterp. + # This works currently just because the irinterp code doesn't get used much with + # `@assume_effects`, so it never sees a cycle normally, but that may not be a sustainable solution. + parent isa InferenceState || return false frames = parent.callstack::Vector{AbsIntState} uncached = false for frame = reverse(1:length(frames)) @@ -837,15 +707,43 @@ struct EdgeCallResult end # return cached result of regular inference -function return_cached_result(::AbstractInterpreter, codeinst::CodeInstance, caller::AbsIntState) +function return_cached_result(interp::AbstractInterpreter, method::Method, codeinst::CodeInstance, caller::AbsIntState, edgecycle::Bool, edgelimited::Bool) rt = cached_return_type(codeinst) effects = ipo_effects(codeinst) update_valid_age!(caller, WorldRange(min_world(codeinst), max_world(codeinst))) - return EdgeCallResult(rt, codeinst.exctype, codeinst.def, effects) + return Future(EdgeCall_to_MethodCall_Result(interp, caller, method, EdgeCallResult(rt, codeinst.exctype, codeinst.def, effects), edgecycle, edgelimited)) +end + +function EdgeCall_to_MethodCall_Result(interp::AbstractInterpreter, sv::AbsIntState, method::Method, result::EdgeCallResult, edgecycle::Bool, edgelimited::Bool) + (; rt, exct, edge, effects, volatile_inf_result) = result + + if edge === nothing + edgecycle = edgelimited = true + end + + # we look for the termination effect override here as well, since the :terminates effect + # may have been tainted due to recursion at this point even if it's overridden + if is_effect_overridden(sv, :terminates_globally) + # this frame is known to terminate + effects = Effects(effects, terminates=true) + elseif is_effect_overridden(method, :terminates_globally) + # this edge is known to terminate + effects = Effects(effects; terminates=true) + elseif edgecycle + # Some sort of recursion was detected. + if edge !== nothing && !edgelimited && !is_edge_recursed(edge, sv) + # no `MethodInstance` cycles -- don't taint :terminate + else + # we cannot guarantee that the call will terminate + effects = Effects(effects; terminates=false) + end + end + + return MethodCallResult(rt, exct, edgecycle, edgelimited, edge, effects, volatile_inf_result) end # compute (and cache) an inferred AST and return the current best estimate of the result type -function typeinf_edge(interp::AbstractInterpreter, method::Method, @nospecialize(atype), sparams::SimpleVector, caller::AbsIntState) +function typeinf_edge(interp::AbstractInterpreter, method::Method, @nospecialize(atype), sparams::SimpleVector, caller::AbsIntState, edgecycle::Bool, edgelimited::Bool) mi = specialize_method(method, atype, sparams)::MethodInstance cache_mode = CACHE_MODE_GLOBAL # cache edge targets globally by default force_inline = is_stmt_inline(get_curr_ssaflag(caller)) @@ -859,13 +757,13 @@ function typeinf_edge(interp::AbstractInterpreter, method::Method, @nospecialize cache_mode = CACHE_MODE_VOLATILE else @assert codeinst.def === mi "MethodInstance for cached edge does not match" - return return_cached_result(interp, codeinst, caller) + return return_cached_result(interp, method, codeinst, caller, edgecycle, edgelimited) end end end if ccall(:jl_get_module_infer, Cint, (Any,), method.module) == 0 && !generating_output(#=incremental=#false) add_remark!(interp, caller, "[typeinf_edge] Inference is disabled for the target module") - return EdgeCallResult(Any, Any, nothing, Effects()) + return Future(EdgeCall_to_MethodCall_Result(interp, caller, method, EdgeCallResult(Any, Any, nothing, Effects()), edgecycle, edgelimited)) end if !is_cached(caller) && frame_parent(caller) === nothing # this caller exists to return to the user @@ -886,7 +784,7 @@ function typeinf_edge(interp::AbstractInterpreter, method::Method, @nospecialize cache_mode = CACHE_MODE_VOLATILE else @assert codeinst.def === mi "MethodInstance for cached edge does not match" - return return_cached_result(interp, codeinst, caller) + return return_cached_result(interp, method, codeinst, caller, edgecycle, edgelimited) end end end @@ -902,31 +800,40 @@ function typeinf_edge(interp::AbstractInterpreter, method::Method, @nospecialize if cache_mode == CACHE_MODE_GLOBAL engine_reject(interp, ci) end - return EdgeCallResult(Any, Any, nothing, Effects()) + return Future(EdgeCall_to_MethodCall_Result(interp, caller, method, EdgeCallResult(Any, Any, nothing, Effects()), edgecycle, edgelimited)) end assign_parentchild!(frame, caller) - typeinf(interp, frame) - update_valid_age!(caller, frame.valid_worlds) - isinferred = is_inferred(frame) - edge = isinferred ? mi : nothing - effects = isinferred ? frame.result.ipo_effects : # effects are adjusted already within `finish` for ipo_effects - adjust_effects(effects_for_cycle(frame.ipo_effects), method) - exc_bestguess = refine_exception_type(frame.exc_bestguess, effects) - # propagate newly inferred source to the inliner, allowing efficient inlining w/o deserialization: - # note that this result is cached globally exclusively, so we can use this local result destructively - volatile_inf_result = isinferred ? VolatileInferenceResult(result) : nothing - return EdgeCallResult(frame.bestguess, exc_bestguess, edge, effects, volatile_inf_result) + # the actual inference task for this edge is going to be scheduled within `typeinf_local` via the callstack queue + # while splitting off the rest of the work for this caller into a separate workq thunk + let mresult = Future{MethodCallResult}() + push!(caller.tasks, function get_infer_result(interp, caller) + update_valid_age!(caller, frame.valid_worlds) + local isinferred = is_inferred(frame) + local edge = isinferred ? mi : nothing + local effects = isinferred ? frame.result.ipo_effects : # effects are adjusted already within `finish` for ipo_effects + adjust_effects(effects_for_cycle(frame.ipo_effects), method) + local exc_bestguess = refine_exception_type(frame.exc_bestguess, effects) + # propagate newly inferred source to the inliner, allowing efficient inlining w/o deserialization: + # note that this result is cached globally exclusively, so we can use this local result destructively + local volatile_inf_result = isinferred ? VolatileInferenceResult(result) : nothing + local edgeresult = EdgeCallResult(frame.bestguess, exc_bestguess, edge, effects, volatile_inf_result) + mresult[] = EdgeCall_to_MethodCall_Result(interp, caller, method, edgeresult, edgecycle, edgelimited) + return true + end) + return mresult + end elseif frame === true # unresolvable cycle add_remark!(interp, caller, "[typeinf_edge] Unresolvable cycle") - return EdgeCallResult(Any, Any, nothing, Effects()) + return Future(EdgeCall_to_MethodCall_Result(interp, caller, method, EdgeCallResult(Any, Any, nothing, Effects()), edgecycle, edgelimited)) end # return the current knowledge about this cycle frame = frame::InferenceState update_valid_age!(caller, frame.valid_worlds) effects = adjust_effects(effects_for_cycle(frame.ipo_effects), method) exc_bestguess = refine_exception_type(frame.exc_bestguess, effects) - return EdgeCallResult(frame.bestguess, exc_bestguess, nothing, effects) + edgeresult = EdgeCallResult(frame.bestguess, exc_bestguess, nothing, effects) + return Future(EdgeCall_to_MethodCall_Result(interp, caller, method, edgeresult, edgecycle, edgelimited)) end # The `:terminates` effect bit must be conservatively tainted unless recursion cycle has diff --git a/base/compiler/types.jl b/base/compiler/types.jl index b475e360dac02..c51785f23ea29 100644 --- a/base/compiler/types.jl +++ b/base/compiler/types.jl @@ -1,4 +1,12 @@ # This file is a part of Julia. License is MIT: https://julialang.org/license +# + +const WorkThunk = Any +# #@eval struct WorkThunk +# thunk::Core.OpaqueClosure{Tuple{Vector{Tasks}}, Bool} +# WorkThunk(work) = new($(Expr(:opaque_closure, :(Tuple{Vector{Tasks}}), :Bool, :Bool, :((tasks) -> work(tasks))))) # @opaque Vector{Tasks}->Bool (tasks)->work(tasks) +# end +# (p::WorkThunk)() = p.thunk() """ AbstractInterpreter diff --git a/base/reflection.jl b/base/reflection.jl index fe48b6f9aa6b9..df29b9a5b1a4e 100644 --- a/base/reflection.jl +++ b/base/reflection.jl @@ -2447,7 +2447,7 @@ true ``` """ function hasmethod(@nospecialize(f), @nospecialize(t)) - return Core._hasmethod(f, t isa Type ? t : to_tuple_type(t)) + return Core._hasmethod(signature_type(f, t)) end function Core.kwcall(kwargs::NamedTuple, ::typeof(hasmethod), @nospecialize(f), @nospecialize(t)) diff --git a/test/compiler/AbstractInterpreter.jl b/test/compiler/AbstractInterpreter.jl index bab4fe02a5168..009128b289ade 100644 --- a/test/compiler/AbstractInterpreter.jl +++ b/test/compiler/AbstractInterpreter.jl @@ -415,10 +415,13 @@ function CC.abstract_call(interp::NoinlineInterpreter, arginfo::CC.ArgInfo, si::CC.StmtInfo, sv::CC.InferenceState, max_methods::Int) ret = @invoke CC.abstract_call(interp::CC.AbstractInterpreter, arginfo::CC.ArgInfo, si::CC.StmtInfo, sv::CC.InferenceState, max_methods::Int) - if sv.mod in noinline_modules(interp) - return CC.CallMeta(ret.rt, ret.exct, ret.effects, NoinlineCallInfo(ret.info)) + return CC.Future{CC.CallMeta}(ret, interp, sv) do ret, interp, sv + if sv.mod in noinline_modules(interp) + (;rt, exct, effects, info) = ret + return CC.CallMeta(rt, exct, effects, NoinlineCallInfo(info)) + end + return ret end - return ret end function CC.src_inlining_policy(interp::NoinlineInterpreter, @nospecialize(src), @nospecialize(info::CallInfo), stmt_flag::UInt32) diff --git a/test/compiler/inference.jl b/test/compiler/inference.jl index 46009e0790942..7c7726413004a 100644 --- a/test/compiler/inference.jl +++ b/test/compiler/inference.jl @@ -3887,113 +3887,6 @@ f_apply_cglobal(args...) = cglobal(args...) f37532(T, x) = (Core.bitcast(Ptr{T}, x); x) @test Base.return_types(f37532, Tuple{Any, Int}) == Any[Int] -# PR #37749 -# Helper functions for Core.Compiler.Timings. These are normally accessed via a package - -# usually (SnoopCompileCore). -function time_inference(f) - Core.Compiler.Timings.reset_timings() - Core.Compiler.__set_measure_typeinf(true) - f() - Core.Compiler.__set_measure_typeinf(false) - Core.Compiler.Timings.close_current_timer() - return Core.Compiler.Timings._timings[1] -end -function depth(t::Core.Compiler.Timings.Timing) - maximum(depth.(t.children), init=0) + 1 -end -function flatten_times(t::Core.Compiler.Timings.Timing) - collect(Iterators.flatten([(t.time => t.mi_info,), flatten_times.(t.children)...])) -end -# Some very limited testing of timing the type inference (#37749). -@testset "Core.Compiler.Timings" begin - # Functions that call each other - @eval module M1 - i(x) = x+5 - i2(x) = x+2 - h(a::Array) = i2(a[1]::Integer) + i(a[1]::Integer) + 2 - g(y::Integer, x) = h(Any[y]) + Int(x) - end - timing1 = time_inference() do - @eval M1.g(2, 3.0) - end - @test occursin(r"Core.Compiler.Timings.Timing\(InferenceFrameInfo for Core.Compiler.Timings.ROOT\(\)\) with \d+ children", sprint(show, timing1)) - # The last two functions to be inferred should be `i` and `i2`, inferred at runtime with - # their concrete types. - @test sort([mi_info.mi.def.name for (time,mi_info) in flatten_times(timing1)[end-1:end]]) == [:i, :i2] - @test all(child->isa(child.bt, Vector), timing1.children) - @test all(child->child.bt===nothing, timing1.children[1].children) - # Test the stacktrace - @test isa(stacktrace(timing1.children[1].bt), Vector{Base.StackTraces.StackFrame}) - # Test that inference has cached some of the Method Instances - timing2 = time_inference() do - @eval M1.g(2, 3.0) - end - @test length(flatten_times(timing2)) < length(flatten_times(timing1)) - # Printing of InferenceFrameInfo for mi.def isa Module - @eval module M2 - i(x) = x+5 - i2(x) = x+2 - h(a::Array) = i2(a[1]::Integer) + i(a[1]::Integer) + 2 - g(y::Integer, x) = h(Any[y]) + Int(x) - end - # BEGIN LINE NUMBER SENSITIVITY (adjust the line offset below as needed) - timingmod = time_inference() do - @eval @testset "Outer" begin - @testset "Inner" begin - for i = 1:2 M2.g(2, 3.0) end - end - end - end - @test occursin("thunk from $(@__MODULE__) starting at $(@__FILE__):$((@__LINE__) - 6)", string(timingmod.children)) - # END LINE NUMBER SENSITIVITY - - # Recursive function - @eval module _Recursive f(n::Integer) = n == 0 ? 0 : f(n-1) + 1 end - timing = time_inference() do - @eval _Recursive.f(Base.inferencebarrier(5)) - end - @test 2 <= depth(timing) <= 3 # root -> f (-> +) - @test 2 <= length(flatten_times(timing)) <= 3 # root, f, + - - # Functions inferred with multiple constants - @eval module C - i(x) = x === 0 ? 0 : 1 / x - a(x) = i(0) * i(x) - b() = i(0) * i(1) * i(0) - function loopc(n) - s = 0 - for i = 1:n - s += i - end - return s - end - call_loopc() = loopc(5) - myfloor(::Type{T}, x) where T = floor(T, x) - d(x) = myfloor(Int16, x) - end - timing = time_inference() do - @eval C.a(2) - @eval C.b() - @eval C.call_loopc() - @eval C.d(3.2) - end - ft = flatten_times(timing) - @test !isempty(ft) - str = sprint(show, ft) - @test occursin("InferenceFrameInfo for /(1::$Int, ::$Int)", str) # inference constants - @test occursin("InferenceFrameInfo for Core.Compiler.Timings.ROOT()", str) # qualified - # loopc has internal slots, check constant printing in this case - sel = filter(ti -> ti.second.mi.def.name === :loopc, ft) - ifi = sel[end].second - @test length(ifi.slottypes) > ifi.nargs - str = sprint(show, sel) - @test occursin("InferenceFrameInfo for $(@__MODULE__).C.loopc(5::$Int)", str) - # check that types aren't double-printed as `T::Type{T}` - sel = filter(ti -> ti.second.mi.def.name === :myfloor, ft) - str = sprint(show, sel) - @test occursin("InferenceFrameInfo for $(@__MODULE__).C.myfloor(::Type{Int16}, ::Float64)", str) -end - # issue #37638 @test only(Base.return_types(() -> (nothing, Any[]...)[2])) isa Type From 1bd610f9ab9dd6e2145d1731c0fb8f7e84208876 Mon Sep 17 00:00:00 2001 From: Shuhei Kadowaki <40514306+aviatesk@users.noreply.github.com> Date: Tue, 1 Oct 2024 13:26:56 +0900 Subject: [PATCH 093/537] optimizer: simplify the finalizer inlining pass a bit (#55934) Minor adjustments have been made to the algorithm of the finalizer inlining pass. Previously, it required that the finalizer registration dominate all uses, but this is not always necessary as far as the finalizer inlining point dominates all the uses. So the check has been relaxed. Other minor fixes have been made as well, but their importance is low. --- base/compiler/optimize.jl | 2 +- base/compiler/ssair/inlining.jl | 1 - base/compiler/ssair/passes.jl | 103 +++++++++++++------------------- test/compiler/inline.jl | 2 - 4 files changed, 42 insertions(+), 66 deletions(-) diff --git a/base/compiler/optimize.jl b/base/compiler/optimize.jl index 6b0cf981930ad..1971b47323f5d 100644 --- a/base/compiler/optimize.jl +++ b/base/compiler/optimize.jl @@ -647,7 +647,7 @@ end function refine_effects!(interp::AbstractInterpreter, sv::PostOptAnalysisState) if !is_effect_free(sv.result.ipo_effects) && sv.all_effect_free && !isempty(sv.ea_analysis_pending) ir = sv.ir - nargs = length(ir.argtypes) + nargs = let def = sv.result.linfo.def; isa(def, Method) ? Int(def.nargs) : 0; end estate = EscapeAnalysis.analyze_escapes(ir, nargs, optimizer_lattice(interp), GetNativeEscapeCache(interp)) argescapes = EscapeAnalysis.ArgEscapeCache(estate) stack_analysis_result!(sv.result, argescapes) diff --git a/base/compiler/ssair/inlining.jl b/base/compiler/ssair/inlining.jl index 727e015b67062..9f250b156cd2f 100644 --- a/base/compiler/ssair/inlining.jl +++ b/base/compiler/ssair/inlining.jl @@ -1597,7 +1597,6 @@ function handle_finalizer_call!(ir::IRCode, idx::Int, stmt::Expr, info::Finalize push!(stmt.args, item1.invoke) elseif isa(item1, ConstantCase) push!(stmt.args, nothing) - push!(stmt.args, item1.val) end end return nothing diff --git a/base/compiler/ssair/passes.jl b/base/compiler/ssair/passes.jl index 37d79e2bd7b0c..3981f7382d707 100644 --- a/base/compiler/ssair/passes.jl +++ b/base/compiler/ssair/passes.jl @@ -1564,10 +1564,12 @@ end is_nothrow(ir::IRCode, ssa::SSAValue) = has_flag(ir[ssa], IR_FLAG_NOTHROW) -function reachable_blocks(cfg::CFG, from_bb::Int, to_bb::Union{Nothing,Int} = nothing) +function reachable_blocks(cfg::CFG, from_bb::Int, to_bb::Int) worklist = Int[from_bb] visited = BitSet(from_bb) - if to_bb !== nothing + if to_bb == from_bb + return visited + else push!(visited, to_bb) end function visit!(bb::Int) @@ -1582,100 +1584,78 @@ function reachable_blocks(cfg::CFG, from_bb::Int, to_bb::Union{Nothing,Int} = no return visited end -function try_resolve_finalizer!(ir::IRCode, idx::Int, finalizer_idx::Int, defuse::SSADefUse, +function try_resolve_finalizer!(ir::IRCode, alloc_idx::Int, finalizer_idx::Int, defuse::SSADefUse, inlining::InliningState, lazydomtree::LazyDomtree, lazypostdomtree::LazyPostDomtree, @nospecialize(info::CallInfo)) # For now, require that: # 1. The allocation dominates the finalizer registration - # 2. The finalizer registration dominates all uses reachable from the - # finalizer registration. - # 3. The insertion block for the finalizer is the post-dominator of all - # uses and the finalizer registration block. The insertion block must - # be dominated by the finalizer registration block. - # 4. The path from the finalizer registration to the finalizer inlining + # 2. The insertion block for the finalizer is the post-dominator of all + # uses (including the finalizer registration). + # 3. The path from the finalizer registration to the finalizer inlining # location is nothrow # - # TODO: We could relax item 3, by inlining the finalizer multiple times. + # TODO: We could relax the check 2, by inlining the finalizer multiple times. # Check #1: The allocation dominates the finalizer registration domtree = get!(lazydomtree) finalizer_bb = block_for_inst(ir, finalizer_idx) - alloc_bb = block_for_inst(ir, idx) + alloc_bb = block_for_inst(ir, alloc_idx) dominates(domtree, alloc_bb, finalizer_bb) || return nothing - bb_insert_block::Int = finalizer_bb - bb_insert_idx::Union{Int,Nothing} = finalizer_idx - function note_block_use!(usebb::Int, useidx::Int) - new_bb_insert_block = nearest_common_dominator(get!(lazypostdomtree), - bb_insert_block, usebb) - if new_bb_insert_block == bb_insert_block && bb_insert_idx !== nothing - bb_insert_idx = max(bb_insert_idx::Int, useidx) - elseif new_bb_insert_block == usebb - bb_insert_idx = useidx + # Check #2: The insertion block for the finalizer is the post-dominator of all uses + insert_bb::Int = finalizer_bb + insert_idx::Union{Int,Nothing} = finalizer_idx + function note_defuse!(x::Union{Int,SSAUse}) + defuse_idx = x isa SSAUse ? x.idx : x + defuse_idx == finalizer_idx && return nothing + defuse_bb = block_for_inst(ir, defuse_idx) + new_insert_bb = nearest_common_dominator(get!(lazypostdomtree), + insert_bb, defuse_bb) + if new_insert_bb == insert_bb && insert_idx !== nothing + insert_idx = max(insert_idx::Int, defuse_idx) + elseif new_insert_bb == defuse_bb + insert_idx = defuse_idx else - bb_insert_idx = nothing + insert_idx = nothing end - bb_insert_block = new_bb_insert_block + insert_bb = new_insert_bb nothing end - - # Collect all reachable blocks between the finalizer registration and the - # insertion point - blocks = reachable_blocks(ir.cfg, finalizer_bb, alloc_bb) - - # Check #2 - function check_defuse(x::Union{Int,SSAUse}) - duidx = x isa SSAUse ? x.idx : x - duidx == finalizer_idx && return true - bb = block_for_inst(ir, duidx) - # Not reachable from finalizer registration - we're ok - bb ∉ blocks && return true - note_block_use!(bb, duidx) - if dominates(domtree, finalizer_bb, bb) - return true - else - return false - end - end - all(check_defuse, defuse.uses) || return nothing - all(check_defuse, defuse.defs) || return nothing - bb_insert_block != 0 || return nothing # verify post-dominator of all uses exists - - # Check #3 - dominates(domtree, finalizer_bb, bb_insert_block) || return nothing + foreach(note_defuse!, defuse.uses) + foreach(note_defuse!, defuse.defs) + insert_bb != 0 || return nothing # verify post-dominator of all uses exists if !OptimizationParams(inlining.interp).assume_fatal_throw # Collect all reachable blocks between the finalizer registration and the # insertion point - blocks = finalizer_bb == bb_insert_block ? Int[finalizer_bb] : - reachable_blocks(ir.cfg, finalizer_bb, bb_insert_block) + blocks = reachable_blocks(ir.cfg, finalizer_bb, insert_bb) - # Check #4 - function check_range_nothrow(ir::IRCode, s::Int, e::Int) + # Check #3 + function check_range_nothrow(s::Int, e::Int) return all(s:e) do sidx::Int sidx == finalizer_idx && return true - sidx == idx && return true + sidx == alloc_idx && return true return is_nothrow(ir, SSAValue(sidx)) end end for bb in blocks range = ir.cfg.blocks[bb].stmts s, e = first(range), last(range) - if bb == bb_insert_block - bb_insert_idx === nothing && continue - e = bb_insert_idx + if bb == insert_bb + insert_idx === nothing && continue + e = insert_idx end if bb == finalizer_bb s = finalizer_idx end - check_range_nothrow(ir, s, e) || return nothing + check_range_nothrow(s, e) || return nothing end end # Ok, legality check complete. Figure out the exact statement where we're # going to inline the finalizer. - loc = bb_insert_idx === nothing ? first(ir.cfg.blocks[bb_insert_block].stmts) : bb_insert_idx::Int - attach_after = bb_insert_idx !== nothing + loc = insert_idx === nothing ? first(ir.cfg.blocks[insert_bb].stmts) : insert_idx::Int + attach_after = insert_idx !== nothing finalizer_stmt = ir[SSAValue(finalizer_idx)][:stmt] argexprs = Any[finalizer_stmt.args[2], finalizer_stmt.args[3]] @@ -1702,11 +1682,10 @@ function try_resolve_finalizer!(ir::IRCode, idx::Int, finalizer_idx::Int, defuse return nothing end -function sroa_mutables!(ir::IRCode, defuses::IdDict{Int, Tuple{SPCSet, SSADefUse}}, used_ssas::Vector{Int}, lazydomtree::LazyDomtree, inlining::Union{Nothing, InliningState}) +function sroa_mutables!(ir::IRCode, defuses::IdDict{Int,Tuple{SPCSet,SSADefUse}}, used_ssas::Vector{Int}, lazydomtree::LazyDomtree, inlining::Union{Nothing,InliningState}) 𝕃ₒ = inlining === nothing ? SimpleInferenceLattice.instance : optimizer_lattice(inlining.interp) lazypostdomtree = LazyPostDomtree(ir) for (defidx, (intermediaries, defuse)) in defuses - intermediaries = collect(intermediaries) # Check if there are any uses we did not account for. If so, the variable # escapes and we cannot eliminate the allocation. This works, because we're guaranteed # not to include any intermediaries that have dead uses. As a result, missing uses will only ever @@ -1906,7 +1885,7 @@ function sroa_mutables!(ir::IRCode, defuses::IdDict{Int, Tuple{SPCSet, SSADefUse end end -function form_new_preserves(origex::Expr, intermediates::Vector{Int}, new_preserves::Vector{Any}) +function form_new_preserves(origex::Expr, intermediaries::Union{Vector{Int},SPCSet}, new_preserves::Vector{Any}) newex = Expr(:foreigncall) nccallargs = length(origex.args[3]::SimpleVector) for i in 1:(6+nccallargs-1) @@ -1915,7 +1894,7 @@ function form_new_preserves(origex::Expr, intermediates::Vector{Int}, new_preser for i in (6+nccallargs):length(origex.args) x = origex.args[i] # don't need to preserve intermediaries - if isa(x, SSAValue) && x.id in intermediates + if isa(x, SSAValue) && x.id in intermediaries continue end push!(newex.args, x) diff --git a/test/compiler/inline.jl b/test/compiler/inline.jl index 80c8ddbb08c69..fceb920352482 100644 --- a/test/compiler/inline.jl +++ b/test/compiler/inline.jl @@ -1570,7 +1570,6 @@ let @test get_finalization_count() == 1000 end - function cfg_finalization7(io) for i = -999:1000 o = DoAllocWithField(0) @@ -1597,7 +1596,6 @@ let @test get_finalization_count() == 1000 end - # optimize `[push!|pushfirst!](::Vector{Any}, x...)` @testset "optimize `$f(::Vector{Any}, x...)`" for f = Any[push!, pushfirst!] @eval begin From 06e7b9d292ed4ced5b523fe94daef30332eabbd3 Mon Sep 17 00:00:00 2001 From: Jishnu Bhattacharya Date: Tue, 1 Oct 2024 10:13:29 +0530 Subject: [PATCH 094/537] Limit `@inbounds` to indexing in the dual-iterator branch in `copyto_unaliased!` (#55919) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This simplifies the `copyto_unalised!` implementation where the source and destination have different `IndexStyle`s, and limits the `@inbounds` to only the indexing operation. In particular, the iteration over `eachindex(dest)` is not marked as `@inbounds` anymore. This seems to help with performance when the destination uses Cartesian indexing. Reduced implementation of the branch: ```julia function copyto_proposed!(dest, src) axes(dest) == axes(src) || throw(ArgumentError("incompatible sizes")) iterdest, itersrc = eachindex(dest), eachindex(src) for (destind, srcind) in zip(iterdest, itersrc) @inbounds dest[destind] = src[srcind] end dest end function copyto_current!(dest, src) axes(dest) == axes(src) || throw(ArgumentError("incompatible sizes")) iterdest, itersrc = eachindex(dest), eachindex(src) ret = iterate(iterdest) @inbounds for a in src idx, state = ret::NTuple{2,Any} dest[idx] = a ret = iterate(iterdest, state) end dest end function copyto_current_limitinbounds!(dest, src) axes(dest) == axes(src) || throw(ArgumentError("incompatible sizes")) iterdest, itersrc = eachindex(dest), eachindex(src) ret = iterate(iterdest) for isrc in itersrc idx, state = ret::NTuple{2,Any} @inbounds dest[idx] = src[isrc] ret = iterate(iterdest, state) end dest end ``` ```julia julia> a = zeros(40000,4000); b = rand(size(a)...); julia> av = view(a, UnitRange.(axes(a))...); julia> @btime copyto_current!($av, $b); 617.704 ms (0 allocations: 0 bytes) julia> @btime copyto_current_limitinbounds!($av, $b); 304.146 ms (0 allocations: 0 bytes) julia> @btime copyto_proposed!($av, $b); 240.217 ms (0 allocations: 0 bytes) julia> versioninfo() Julia Version 1.12.0-DEV.1260 Commit 4a4ca9c8152 (2024-09-28 01:49 UTC) Build Info: Official https://julialang.org release Platform Info: OS: Linux (x86_64-linux-gnu) CPU: 8 × Intel(R) Core(TM) i5-10310U CPU @ 1.70GHz WORD_SIZE: 64 LLVM: libLLVM-18.1.7 (ORCJIT, skylake) Threads: 1 default, 0 interactive, 1 GC (on 8 virtual cores) Environment: JULIA_EDITOR = subl ``` I'm not quite certain why the proposed implementation here (`copyto_proposed!`) is even faster than `copyto_current_limitinbounds!`. In any case, `copyto_proposed!` is easier to read, so I'm not complaining. This fixes https://github.com/JuliaLang/julia/issues/53158 --- base/abstractarray.jl | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/base/abstractarray.jl b/base/abstractarray.jl index 754ab20660ab8..e877a87c2cdd1 100644 --- a/base/abstractarray.jl +++ b/base/abstractarray.jl @@ -1101,11 +1101,8 @@ function copyto_unaliased!(deststyle::IndexStyle, dest::AbstractArray, srcstyle: end else # Dual-iterator implementation - ret = iterate(iterdest) - @inbounds for a in src - idx, state = ret::NTuple{2,Any} - dest[idx] = a - ret = iterate(iterdest, state) + for (Idest, Isrc) in zip(iterdest, itersrc) + @inbounds dest[Idest] = src[Isrc] end end end From 1cfda3f9b1a88c8f6069b2cec03fbc957f3ccd3f Mon Sep 17 00:00:00 2001 From: Jishnu Bhattacharya Date: Tue, 1 Oct 2024 18:10:42 +0530 Subject: [PATCH 095/537] Strong zero in Diagonal triple multiplication (#55927) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Currently, triple multiplication with a `LinearAlgebra.BandedMatrix` sandwiched between two `Diagonal`s isn't associative, as this is implemented using broadcasting, which doesn't assume a strong zero, whereas the two-term matrix multiplication does. ```julia julia> D = Diagonal(StepRangeLen(NaN, 0, 3)); julia> B = Bidiagonal(1:3, 1:2, :U); julia> D * B * D 3×3 Matrix{Float64}: NaN NaN NaN NaN NaN NaN NaN NaN NaN julia> (D * B) * D 3×3 Bidiagonal{Float64, Vector{Float64}}: NaN NaN ⋅ ⋅ NaN NaN ⋅ ⋅ NaN julia> D * (B * D) 3×3 Bidiagonal{Float64, Vector{Float64}}: NaN NaN ⋅ ⋅ NaN NaN ⋅ ⋅ NaN ``` This PR ensures that the 3-term multiplication is evaluated as a sequence of two-term multiplications, which fixes this issue. This also improves performance, as only the bands need to be evaluated now. ```julia julia> D = Diagonal(1:1000); B = Bidiagonal(1:1000, 1:999, :U); julia> @btime $D * $B * $D; 656.364 μs (11 allocations: 7.63 MiB) # v"1.12.0-DEV.1262" 2.483 μs (12 allocations: 31.50 KiB) # This PR ``` --- stdlib/LinearAlgebra/src/special.jl | 2 ++ stdlib/LinearAlgebra/test/diagonal.jl | 11 +++++++++++ 2 files changed, 13 insertions(+) diff --git a/stdlib/LinearAlgebra/src/special.jl b/stdlib/LinearAlgebra/src/special.jl index 5a7c98cfdf32c..32a5476842933 100644 --- a/stdlib/LinearAlgebra/src/special.jl +++ b/stdlib/LinearAlgebra/src/special.jl @@ -112,6 +112,8 @@ for op in (:+, :-) end end +(*)(Da::Diagonal, A::BandedMatrix, Db::Diagonal) = _tri_matmul(Da, A, Db) + # disambiguation between triangular and banded matrices, banded ones "dominate" _mul!(C::AbstractMatrix, A::AbstractTriangular, B::BandedMatrix, alpha::Number, beta::Number) = @stable_muladdmul _mul!(C, A, B, MulAddMul(alpha, beta)) diff --git a/stdlib/LinearAlgebra/test/diagonal.jl b/stdlib/LinearAlgebra/test/diagonal.jl index dfb901908ba69..98f5498c71033 100644 --- a/stdlib/LinearAlgebra/test/diagonal.jl +++ b/stdlib/LinearAlgebra/test/diagonal.jl @@ -1265,6 +1265,17 @@ end @test *(Diagonal(ones(n)), Diagonal(1:n), Diagonal(ones(n)), Diagonal(1:n)) isa Diagonal end +@testset "triple multiplication with a sandwiched BandedMatrix" begin + D = Diagonal(StepRangeLen(NaN, 0, 4)); + B = Bidiagonal(1:4, 1:3, :U) + C = D * B * D + @test iszero(diag(C, 2)) + # test associativity + C1 = (D * B) * D + C2 = D * (B * D) + @test diag(C,2) == diag(C1,2) == diag(C2,2) +end + @testset "diagind" begin D = Diagonal(1:4) M = Matrix(D) From c3b7573c756ee1e6752f34fa8f1dce77bff4d6b7 Mon Sep 17 00:00:00 2001 From: Jishnu Bhattacharya Date: Tue, 1 Oct 2024 18:38:34 +0530 Subject: [PATCH 096/537] Fix dispatch on `alg` in Float16 Hermitian eigen (#55928) Currently, ```julia julia> using LinearAlgebra julia> A = Hermitian(reshape(Float16[1:16;], 4, 4)); julia> eigen(A).values |> typeof Vector{Float16} (alias for Array{Float16, 1}) julia> eigen(A, LinearAlgebra.QRIteration()).values |> typeof Vector{Float32} (alias for Array{Float32, 1}) ``` This PR moves the specialization on the `eltype` to an internal method, so that firstly all `alg`s dispatch to that method, and secondly, there are no ambiguities introduce by specializing the top-level `eigen`. The latter currently causes test failures in `StaticArrays` (https://github.com/JuliaArrays/StaticArrays.jl/actions/runs/11092206012/job/30816955210?pr=1279), and should be fixed by this PR. --- stdlib/LinearAlgebra/src/symmetriceigen.jl | 19 ++++++++++++------- stdlib/LinearAlgebra/test/symmetriceigen.jl | 5 +++++ 2 files changed, 17 insertions(+), 7 deletions(-) diff --git a/stdlib/LinearAlgebra/src/symmetriceigen.jl b/stdlib/LinearAlgebra/src/symmetriceigen.jl index fee524a702187..68a1b29f5dbc7 100644 --- a/stdlib/LinearAlgebra/src/symmetriceigen.jl +++ b/stdlib/LinearAlgebra/src/symmetriceigen.jl @@ -20,13 +20,6 @@ function eigen!(A::RealHermSymComplexHerm{<:BlasReal,<:StridedMatrix}, alg::Algo throw(ArgumentError("Unsupported value for `alg` keyword.")) end end -function eigen(A::RealHermSymComplexHerm{Float16}; sortby::Union{Function,Nothing}=nothing) - S = eigtype(eltype(A)) - E = eigen!(eigencopy_oftype(A, S), sortby=sortby) - values = convert(AbstractVector{Float16}, E.values) - vectors = convert(AbstractMatrix{isreal(E.vectors) ? Float16 : Complex{Float16}}, E.vectors) - return Eigen(values, vectors) -end """ eigen(A::Union{Hermitian, Symmetric}, alg::Algorithm = default_eigen_alg(A)) -> Eigen @@ -53,10 +46,22 @@ The default `alg` used may change in the future. The following functions are available for `Eigen` objects: [`inv`](@ref), [`det`](@ref), and [`isposdef`](@ref). """ function eigen(A::RealHermSymComplexHerm, alg::Algorithm = default_eigen_alg(A); sortby::Union{Function,Nothing}=nothing) + _eigen(A, alg; sortby) +end + +# we dispatch on the eltype in an internal method to avoid ambiguities +function _eigen(A::RealHermSymComplexHerm, alg::Algorithm; sortby) S = eigtype(eltype(A)) eigen!(eigencopy_oftype(A, S), alg; sortby) end +function _eigen(A::RealHermSymComplexHerm{Float16}, alg::Algorithm; sortby::Union{Function,Nothing}=nothing) + S = eigtype(eltype(A)) + E = eigen!(eigencopy_oftype(A, S), alg, sortby=sortby) + values = convert(AbstractVector{Float16}, E.values) + vectors = convert(AbstractMatrix{isreal(E.vectors) ? Float16 : Complex{Float16}}, E.vectors) + return Eigen(values, vectors) +end eigen!(A::RealHermSymComplexHerm{<:BlasReal,<:StridedMatrix}, irange::UnitRange) = Eigen(LAPACK.syevr!('V', 'I', A.uplo, A.data, 0.0, 0.0, irange.start, irange.stop, -1.0)...) diff --git a/stdlib/LinearAlgebra/test/symmetriceigen.jl b/stdlib/LinearAlgebra/test/symmetriceigen.jl index d55d1deb6bf33..71087ae4d8d24 100644 --- a/stdlib/LinearAlgebra/test/symmetriceigen.jl +++ b/stdlib/LinearAlgebra/test/symmetriceigen.jl @@ -171,6 +171,11 @@ end @test D isa Eigen{ComplexF16, Float16, Matrix{ComplexF16}, Vector{Float16}} @test D.values ≈ D32.values @test D.vectors ≈ D32.vectors + + # ensure that different algorithms dispatch correctly + λ, V = eigen(C, LinearAlgebra.QRIteration()) + @test λ isa Vector{Float16} + @test C * V ≈ V * Diagonal(λ) end @testset "complex Symmetric" begin From 4eb2e4787f67437d18738cff491f5aa4de6a6c03 Mon Sep 17 00:00:00 2001 From: Jishnu Bhattacharya Date: Tue, 1 Oct 2024 19:11:28 +0530 Subject: [PATCH 097/537] Remove specialized `ishermitian` method for `Diagonal{<:Real}` (#55948) The fallback method for `Diagonal{<:Number}` handles this already by checking that the `diag` is real, so we don't need this additional specialization. --- stdlib/LinearAlgebra/src/diagonal.jl | 1 - 1 file changed, 1 deletion(-) diff --git a/stdlib/LinearAlgebra/src/diagonal.jl b/stdlib/LinearAlgebra/src/diagonal.jl index d762549a2b228..0a95bac5ffb93 100644 --- a/stdlib/LinearAlgebra/src/diagonal.jl +++ b/stdlib/LinearAlgebra/src/diagonal.jl @@ -227,7 +227,6 @@ Base._reverse(A::Diagonal, dims) = reverse!(Matrix(A); dims) Base._reverse(A::Diagonal, ::Colon) = Diagonal(reverse(A.diag)) Base._reverse!(A::Diagonal, ::Colon) = (reverse!(A.diag); A) -ishermitian(D::Diagonal{<:Real}) = true ishermitian(D::Diagonal{<:Number}) = isreal(D.diag) ishermitian(D::Diagonal) = all(ishermitian, D.diag) issymmetric(D::Diagonal{<:Number}) = true From 81ce6a41d737f15d8bbc2788190dcb5565e20b8b Mon Sep 17 00:00:00 2001 From: Ian Butterworth Date: Tue, 1 Oct 2024 10:32:57 -0400 Subject: [PATCH 098/537] Fix logic in `?` docstring example (#55945) --- base/docs/basedocs.jl | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/base/docs/basedocs.jl b/base/docs/basedocs.jl index e28b3a21659a8..a142ecffdb732 100644 --- a/base/docs/basedocs.jl +++ b/base/docs/basedocs.jl @@ -937,11 +937,14 @@ expression, rather than the side effects that evaluating `b` or `c` may have. See the manual section on [control flow](@ref man-conditional-evaluation) for more details. # Examples -``` +```jldoctest julia> x = 1; y = 2; -julia> x > y ? println("x is larger") : println("y is larger") -y is larger +julia> x > y ? println("x is larger") : println("x is not larger") +x is not larger + +julia> x > y ? "x is larger" : x == y ? "x and y are equal" : "y is larger" +"y is larger" ``` """ kw"?", kw"?:" From cf8df9a7a056d02d1953f1bb8d07946cc1ec6876 Mon Sep 17 00:00:00 2001 From: Shuhei Kadowaki <40514306+aviatesk@users.noreply.github.com> Date: Tue, 1 Oct 2024 23:41:09 +0900 Subject: [PATCH 099/537] fix `unwrap_macrocalls` (#55950) The implementation of `unwrap_macrocalls` has assumed that what `:macrocall` wraps is always an `Expr` object, but that is not necessarily correct: ```julia julia> Base.@assume_effects :nothrow @show 42 ERROR: LoadError: TypeError: in typeassert, expected Expr, got a value of type Int64 Stacktrace: [1] unwrap_macrocalls(ex::Expr) @ Base ./expr.jl:906 [2] var"@assume_effects"(__source__::LineNumberNode, __module__::Module, args::Vararg{Any}) @ Base ./expr.jl:756 in expression starting at REPL[1]:1 ``` This commit addresses this issue. --- base/expr.jl | 4 ++-- test/core.jl | 11 +++++++++++ 2 files changed, 13 insertions(+), 2 deletions(-) diff --git a/base/expr.jl b/base/expr.jl index c4f64b89de8b6..478ccd7d7cc20 100644 --- a/base/expr.jl +++ b/base/expr.jl @@ -902,8 +902,8 @@ end unwrap_macrocalls(@nospecialize(x)) = x function unwrap_macrocalls(ex::Expr) inner = ex - while inner.head === :macrocall - inner = inner.args[end]::Expr + while isexpr(inner, :macrocall) + inner = inner.args[end] end return inner end diff --git a/test/core.jl b/test/core.jl index 1395817d8615e..62fde5261bfd3 100644 --- a/test/core.jl +++ b/test/core.jl @@ -8293,3 +8293,14 @@ end # to properly give error messages for basic kwargs... Core.eval(Core.Compiler, quote issue50174(;a=1) = a end) @test_throws MethodError Core.Compiler.issue50174(;b=2) + +let s = mktemp() do path, io + xxx = 42 + redirect_stdout(io) do + Base.@assume_effects :nothrow @show xxx + end + flush(io) + read(path, String) + end + @test strip(s) == "xxx = 42" +end From 75393f6618782c87d4b321bb587b375c0d52326a Mon Sep 17 00:00:00 2001 From: Jameson Nash Date: Tue, 1 Oct 2024 10:42:30 -0400 Subject: [PATCH 100/537] make faster BigFloats (#55906) We can coalesce the two required allocations for the MFPR BigFloat API design into one allocation, hopefully giving a easy performance boost. It would have been slightly easier and more efficient if MPFR BigFloat was already a VLA instead of containing a pointer here, but that does not prevent the optimization. --- base/Base.jl | 1 - base/mpfr.jl | 161 ++++++++++++++++-------- base/{rawbigints.jl => rawbigfloats.jl} | 68 ++++------ stdlib/Random/src/generation.jl | 2 +- test/dict.jl | 2 +- test/mpfr.jl | 6 +- 6 files changed, 138 insertions(+), 102 deletions(-) rename base/{rawbigints.jl => rawbigfloats.jl} (58%) diff --git a/base/Base.jl b/base/Base.jl index 10a8dd1532f92..23633f0b5138b 100644 --- a/base/Base.jl +++ b/base/Base.jl @@ -306,7 +306,6 @@ end include("hashing.jl") include("rounding.jl") include("div.jl") -include("rawbigints.jl") include("float.jl") include("twiceprecision.jl") include("complex.jl") diff --git a/base/mpfr.jl b/base/mpfr.jl index d393469aa26a1..9d1a0843ebe06 100644 --- a/base/mpfr.jl +++ b/base/mpfr.jl @@ -18,12 +18,10 @@ import setrounding, maxintfloat, widen, significand, frexp, tryparse, iszero, isone, big, _string_n, decompose, minmax, _precision_with_base_2, sinpi, cospi, sincospi, tanpi, sind, cosd, tand, asind, acosd, atand, - uinttype, exponent_max, exponent_min, ieee754_representation, significand_mask, - RawBigIntRoundingIncrementHelper, truncated, RawBigInt - + uinttype, exponent_max, exponent_min, ieee754_representation, significand_mask using .Base.Libc -import ..Rounding: +import ..Rounding: Rounding, rounding_raw, setrounding_raw, rounds_to_nearest, rounds_away_from_zero, tie_breaker_is_to_even, correct_rounding_requires_increment @@ -39,7 +37,6 @@ else const libmpfr = "libmpfr.so.6" end - version() = VersionNumber(unsafe_string(ccall((:mpfr_get_version,libmpfr), Ptr{Cchar}, ()))) patches() = split(unsafe_string(ccall((:mpfr_get_patches,libmpfr), Ptr{Cchar}, ())),' ') @@ -120,44 +117,116 @@ const mpfr_special_exponent_zero = typemin(Clong) + true const mpfr_special_exponent_nan = mpfr_special_exponent_zero + true const mpfr_special_exponent_inf = mpfr_special_exponent_nan + true +struct BigFloatLayout + prec::Clong + sign::Cint + exp::Clong + d::Ptr{Limb} + # possible padding + p::Limb # Tuple{Vararg{Limb}} +end +const offset_prec = fieldoffset(BigFloatLayout, 1) % Int +const offset_sign = fieldoffset(BigFloatLayout, 2) % Int +const offset_exp = fieldoffset(BigFloatLayout, 3) % Int +const offset_d = fieldoffset(BigFloatLayout, 4) % Int +const offset_p_limbs = ((fieldoffset(BigFloatLayout, 5) % Int + sizeof(Limb) - 1) ÷ sizeof(Limb)) +const offset_p = offset_p_limbs * sizeof(Limb) + """ BigFloat <: AbstractFloat Arbitrary precision floating point number type. """ -mutable struct BigFloat <: AbstractFloat - prec::Clong - sign::Cint - exp::Clong - d::Ptr{Limb} - # _d::Buffer{Limb} # Julia gc handle for memory @ d - _d::String # Julia gc handle for memory @ d (optimized) +struct BigFloat <: AbstractFloat + d::Memory{Limb} # Not recommended for general use: # used internally by, e.g. deepcopy - global function _BigFloat(prec::Clong, sign::Cint, exp::Clong, d::String) - # ccall-based version, inlined below - #z = new(zero(Clong), zero(Cint), zero(Clong), C_NULL, d) - #ccall((:mpfr_custom_init,libmpfr), Cvoid, (Ptr{Limb}, Clong), d, prec) # currently seems to be a no-op in mpfr - #NAN_KIND = Cint(0) - #ccall((:mpfr_custom_init_set,libmpfr), Cvoid, (Ref{BigFloat}, Cint, Clong, Ptr{Limb}), z, NAN_KIND, prec, d) - #return z - return new(prec, sign, exp, pointer(d), d) - end + global _BigFloat(d::Memory{Limb}) = new(d) function BigFloat(; precision::Integer=_precision_with_base_2(BigFloat)) precision < 1 && throw(DomainError(precision, "`precision` cannot be less than 1.")) nb = ccall((:mpfr_custom_get_size,libmpfr), Csize_t, (Clong,), precision) - nb = (nb + Core.sizeof(Limb) - 1) ÷ Core.sizeof(Limb) # align to number of Limb allocations required for this - #d = Vector{Limb}(undef, nb) - d = _string_n(nb * Core.sizeof(Limb)) - EXP_NAN = mpfr_special_exponent_nan - return _BigFloat(Clong(precision), one(Cint), EXP_NAN, d) # +NAN + nl = (nb + offset_p + sizeof(Limb) - 1) ÷ Core.sizeof(Limb) # align to number of Limb allocations required for this + d = Memory{Limb}(undef, nl % Int) + # ccall-based version, inlined below + z = _BigFloat(d) # initialize to +NAN + #ccall((:mpfr_custom_init,libmpfr), Cvoid, (Ptr{Limb}, Clong), BigFloatData(d), prec) # currently seems to be a no-op in mpfr + #NAN_KIND = Cint(0) + #ccall((:mpfr_custom_init_set,libmpfr), Cvoid, (Ref{BigFloat}, Cint, Clong, Ptr{Limb}), z, NAN_KIND, prec, BigFloatData(d)) + z.prec = Clong(precision) + z.sign = one(Cint) + z.exp = mpfr_special_exponent_nan + return z end end -# The rounding mode here shouldn't matter. -significand_limb_count(x::BigFloat) = div(sizeof(x._d), sizeof(Limb), RoundToZero) +""" +Segment of raw words of bits interpreted as a big integer. Less +significant words come first. Each word is in machine-native bit-order. +""" +struct BigFloatData{Limb} + d::Memory{Limb} +end + +# BigFloat interface +@inline function Base.getproperty(x::BigFloat, s::Symbol) + d = getfield(x, :d) + p = Base.unsafe_convert(Ptr{Limb}, d) + if s === :prec + return GC.@preserve d unsafe_load(Ptr{Clong}(p) + offset_prec) + elseif s === :sign + return GC.@preserve d unsafe_load(Ptr{Cint}(p) + offset_sign) + elseif s === :exp + return GC.@preserve d unsafe_load(Ptr{Clong}(p) + offset_exp) + elseif s === :d + return BigFloatData(d) + else + return throw(FieldError(typeof(x), s)) + end +end + +@inline function Base.setproperty!(x::BigFloat, s::Symbol, v) + d = getfield(x, :d) + p = Base.unsafe_convert(Ptr{Limb}, d) + if s === :prec + return GC.@preserve d unsafe_store!(Ptr{Clong}(p) + offset_prec, v) + elseif s === :sign + return GC.@preserve d unsafe_store!(Ptr{Cint}(p) + offset_sign, v) + elseif s === :exp + return GC.@preserve d unsafe_store!(Ptr{Clong}(p) + offset_exp, v) + #elseif s === :d # not mutable + else + return throw(FieldError(x, s)) + end +end + +# Ref interface: make sure the conversion to C is done properly +Base.unsafe_convert(::Type{Ref{BigFloat}}, x::Ptr{BigFloat}) = error("not compatible with mpfr") +Base.unsafe_convert(::Type{Ref{BigFloat}}, x::Ref{BigFloat}) = error("not compatible with mpfr") +Base.cconvert(::Type{Ref{BigFloat}}, x::BigFloat) = x.d # BigFloatData is the Ref type for BigFloat +function Base.unsafe_convert(::Type{Ref{BigFloat}}, x::BigFloatData) + d = getfield(x, :d) + p = Base.unsafe_convert(Ptr{Limb}, d) + GC.@preserve d unsafe_store!(Ptr{Ptr{Limb}}(p) + offset_d, p + offset_p, :monotonic) # :monotonic ensure that TSAN knows that this isn't a data race + return Ptr{BigFloat}(p) +end +Base.unsafe_convert(::Type{Ptr{Limb}}, fd::BigFloatData) = Base.unsafe_convert(Ptr{Limb}, getfield(fd, :d)) + offset_p +function Base.setindex!(fd::BigFloatData, v, i) + d = getfield(fd, :d) + @boundscheck 1 <= i <= length(d) - offset_p_limbs || throw(BoundsError(fd, i)) + @inbounds d[i + offset_p_limbs] = v + return fd +end +function Base.getindex(fd::BigFloatData, i) + d = getfield(fd, :d) + @boundscheck 1 <= i <= length(d) - offset_p_limbs || throw(BoundsError(fd, i)) + @inbounds d[i + offset_p_limbs] +end +Base.length(fd::BigFloatData) = length(getfield(fd, :d)) - offset_p_limbs +Base.copyto!(fd::BigFloatData, limbs) = copyto!(getfield(fd, :d), offset_p_limbs + 1, limbs) # for Random + +include("rawbigfloats.jl") rounding_raw(::Type{BigFloat}) = something(Base.ScopedValues.get(CURRENT_ROUNDING_MODE), ROUNDING_MODE[]) setrounding_raw(::Type{BigFloat}, r::MPFRRoundingMode) = ROUNDING_MODE[]=r @@ -165,24 +234,12 @@ function setrounding_raw(f::Function, ::Type{BigFloat}, r::MPFRRoundingMode) Base.ScopedValues.@with(CURRENT_ROUNDING_MODE => r, f()) end - rounding(::Type{BigFloat}) = convert(RoundingMode, rounding_raw(BigFloat)) setrounding(::Type{BigFloat}, r::RoundingMode) = setrounding_raw(BigFloat, convert(MPFRRoundingMode, r)) setrounding(f::Function, ::Type{BigFloat}, r::RoundingMode) = setrounding_raw(f, BigFloat, convert(MPFRRoundingMode, r)) -# overload the definition of unsafe_convert to ensure that `x.d` is assigned -# it may have been dropped in the event that the BigFloat was serialized -Base.unsafe_convert(::Type{Ref{BigFloat}}, x::Ptr{BigFloat}) = x -@inline function Base.unsafe_convert(::Type{Ref{BigFloat}}, x::Ref{BigFloat}) - x = x[] - if x.d == C_NULL - x.d = pointer(x._d) - end - return convert(Ptr{BigFloat}, Base.pointer_from_objref(x)) -end - """ BigFloat(x::Union{Real, AbstractString} [, rounding::RoundingMode=rounding(BigFloat)]; [precision::Integer=precision(BigFloat)]) @@ -283,17 +340,18 @@ function BigFloat(x::Float64, r::MPFRRoundingMode=rounding_raw(BigFloat); precis nlimbs = (precision + 8*Core.sizeof(Limb) - 1) ÷ (8*Core.sizeof(Limb)) # Limb is a CLong which is a UInt32 on windows (thank M$) which makes this more complicated and slower. + zd = z.d if Limb === UInt64 for i in 1:nlimbs-1 - unsafe_store!(z.d, 0x0, i) + @inbounds setindex!(zd, 0x0, i) end - unsafe_store!(z.d, val, nlimbs) + @inbounds setindex!(zd, val, nlimbs) else for i in 1:nlimbs-2 - unsafe_store!(z.d, 0x0, i) + @inbounds setindex!(zd, 0x0, i) end - unsafe_store!(z.d, val % UInt32, nlimbs-1) - unsafe_store!(z.d, (val >> 32) % UInt32, nlimbs) + @inbounds setindex!(zd, val % UInt32, nlimbs-1) + @inbounds setindex!(zd, (val >> 32) % UInt32, nlimbs) end z end @@ -440,12 +498,12 @@ function to_ieee754(::Type{T}, x::BigFloat, rm) where {T<:AbstractFloat} ret_u = if is_regular & !rounds_to_inf & !rounds_to_zero if !exp_is_huge_p # significand - v = RawBigInt{Limb}(x._d, significand_limb_count(x)) + v = x.d::BigFloatData len = max(ieee_precision + min(exp_diff, 0), 0)::Int signif = truncated(U, v, len) & significand_mask(T) # round up if necessary - rh = RawBigIntRoundingIncrementHelper(v, len) + rh = BigFloatDataRoundingIncrementHelper(v, len) incr = correct_rounding_requires_increment(rh, rm, sb) # exponent @@ -1193,10 +1251,8 @@ set_emin!(x) = check_exponent_err(ccall((:mpfr_set_emin, libmpfr), Cint, (Clong, function Base.deepcopy_internal(x::BigFloat, stackdict::IdDict) get!(stackdict, x) do - # d = copy(x._d) - d = x._d - d′ = GC.@preserve d unsafe_string(pointer(d), sizeof(d)) # creates a definitely-new String - y = _BigFloat(x.prec, x.sign, x.exp, d′) + d′ = copy(getfield(x, :d)) + y = _BigFloat(d′) #ccall((:mpfr_custom_move,libmpfr), Cvoid, (Ref{BigFloat}, Ptr{Limb}), y, d) # unnecessary return y end::BigFloat @@ -1210,7 +1266,8 @@ function decompose(x::BigFloat)::Tuple{BigInt, Int, Int} s.size = cld(x.prec, 8*sizeof(Limb)) # limbs b = s.size * sizeof(Limb) # bytes ccall((:__gmpz_realloc2, libgmp), Cvoid, (Ref{BigInt}, Culong), s, 8b) # bits - memcpy(s.d, x.d, b) + xd = x.d + GC.@preserve xd memcpy(s.d, Base.unsafe_convert(Ptr{Limb}, xd), b) s, x.exp - 8b, x.sign end diff --git a/base/rawbigints.jl b/base/rawbigfloats.jl similarity index 58% rename from base/rawbigints.jl rename to base/rawbigfloats.jl index a9bb18e163e2d..4377edfc463d8 100644 --- a/base/rawbigints.jl +++ b/base/rawbigfloats.jl @@ -1,41 +1,21 @@ # This file is a part of Julia. License is MIT: https://julialang.org/license -""" -Segment of raw words of bits interpreted as a big integer. Less -significant words come first. Each word is in machine-native bit-order. -""" -struct RawBigInt{T<:Unsigned} - d::String - word_count::Int - - function RawBigInt{T}(d::String, word_count::Int) where {T<:Unsigned} - new{T}(d, word_count) - end -end +# Some operations on BigFloat can be done more directly by treating the data portion ("BigFloatData") as a BigInt -elem_count(x::RawBigInt, ::Val{:words}) = x.word_count +elem_count(x::BigFloatData, ::Val{:words}) = length(x) elem_count(x::Unsigned, ::Val{:bits}) = sizeof(x) * 8 -word_length(::RawBigInt{T}) where {T} = elem_count(zero(T), Val(:bits)) -elem_count(x::RawBigInt{T}, ::Val{:bits}) where {T} = word_length(x) * elem_count(x, Val(:words)) +word_length(::BigFloatData{T}) where {T} = elem_count(zero(T), Val(:bits)) +elem_count(x::BigFloatData{T}, ::Val{:bits}) where {T} = word_length(x) * elem_count(x, Val(:words)) reversed_index(n::Int, i::Int) = n - i - 1 reversed_index(x, i::Int, v::Val) = reversed_index(elem_count(x, v), i)::Int -split_bit_index(x::RawBigInt, i::Int) = divrem(i, word_length(x), RoundToZero) - -function get_elem_words_raw(x::RawBigInt{T}, i::Int) where {T} - @boundscheck if (i < 0) || (elem_count(x, Val(:words)) ≤ i) - throw(BoundsError(x, i)) - end - d = x.d - j = i + 1 - (GC.@preserve d unsafe_load(Ptr{T}(pointer(d)), j))::T -end +split_bit_index(x::BigFloatData, i::Int) = divrem(i, word_length(x), RoundToZero) """ `i` is the zero-based index of the wanted word in `x`, starting from the less significant words. """ -function get_elem(x::RawBigInt, i::Int, ::Val{:words}, ::Val{:ascending}) - @inbounds @inline get_elem_words_raw(x, i) +function get_elem(x::BigFloatData{T}, i::Int, ::Val{:words}, ::Val{:ascending}) where {T} + @inbounds return x[i + 1]::T end function get_elem(x, i::Int, v::Val, ::Val{:descending}) @@ -43,9 +23,9 @@ function get_elem(x, i::Int, v::Val, ::Val{:descending}) get_elem(x, j, v, Val(:ascending)) end -word_is_nonzero(x::RawBigInt, i::Int, v::Val) = !iszero(get_elem(x, i, Val(:words), v)) +word_is_nonzero(x::BigFloatData, i::Int, v::Val) = !iszero(get_elem(x, i, Val(:words), v)) -word_is_nonzero(x::RawBigInt, v::Val) = let x = x +word_is_nonzero(x::BigFloatData, v::Val) = let x = x i -> word_is_nonzero(x, i, v) end @@ -53,7 +33,7 @@ end Returns a `Bool` indicating whether the `len` least significant words of `x` are nonzero. """ -function tail_is_nonzero(x::RawBigInt, len::Int, ::Val{:words}) +function tail_is_nonzero(x::BigFloatData, len::Int, ::Val{:words}) any(word_is_nonzero(x, Val(:ascending)), 0:(len - 1)) end @@ -61,7 +41,7 @@ end Returns a `Bool` indicating whether the `len` least significant bits of the `i`-th (zero-based index) word of `x` are nonzero. """ -function tail_is_nonzero(x::RawBigInt, len::Int, i::Int, ::Val{:word}) +function tail_is_nonzero(x::BigFloatData, len::Int, i::Int, ::Val{:word}) !iszero(len) && !iszero(get_elem(x, i, Val(:words), Val(:ascending)) << (word_length(x) - len)) end @@ -70,7 +50,7 @@ end Returns a `Bool` indicating whether the `len` least significant bits of `x` are nonzero. """ -function tail_is_nonzero(x::RawBigInt, len::Int, ::Val{:bits}) +function tail_is_nonzero(x::BigFloatData, len::Int, ::Val{:bits}) if 0 < len word_count, bit_count_in_word = split_bit_index(x, len) tail_is_nonzero(x, bit_count_in_word, word_count, Val(:word)) || @@ -90,7 +70,7 @@ end """ Returns a `Bool` that is the `i`-th (zero-based index) bit of `x`. """ -function get_elem(x::RawBigInt, i::Int, ::Val{:bits}, v::Val{:ascending}) +function get_elem(x::BigFloatData, i::Int, ::Val{:bits}, v::Val{:ascending}) vb = Val(:bits) if 0 ≤ i < elem_count(x, vb) word_index, bit_index_in_word = split_bit_index(x, i) @@ -106,7 +86,7 @@ Returns an integer of type `R`, consisting of the `len` most significant bits of `x`. If there are less than `len` bits in `x`, the least significant bits are zeroed. """ -function truncated(::Type{R}, x::RawBigInt, len::Int) where {R<:Integer} +function truncated(::Type{R}, x::BigFloatData, len::Int) where {R<:Integer} ret = zero(R) if 0 < len word_count, bit_count_in_word = split_bit_index(x, len) @@ -116,7 +96,7 @@ function truncated(::Type{R}, x::RawBigInt, len::Int) where {R<:Integer} for w ∈ 0:(word_count - 1) ret <<= k - if w < lenx + if w < lenx # if the output type is larger, truncate turns into zero-extend word = get_elem(x, w, vals...) ret |= R(word) end @@ -124,7 +104,7 @@ function truncated(::Type{R}, x::RawBigInt, len::Int) where {R<:Integer} if !iszero(bit_count_in_word) ret <<= bit_count_in_word - if word_count < lenx + if word_count < lenx # if the output type is larger, truncate turns into zero-extend wrd = get_elem(x, word_count, vals...) ret |= R(wrd >>> (k - bit_count_in_word)) end @@ -133,14 +113,14 @@ function truncated(::Type{R}, x::RawBigInt, len::Int) where {R<:Integer} ret::R end -struct RawBigIntRoundingIncrementHelper{T<:Unsigned} - n::RawBigInt{T} +struct BigFloatDataRoundingIncrementHelper{T<:Unsigned} + n::BigFloatData{T} trunc_len::Int final_bit::Bool round_bit::Bool - function RawBigIntRoundingIncrementHelper{T}(n::RawBigInt{T}, len::Int) where {T<:Unsigned} + function BigFloatDataRoundingIncrementHelper{T}(n::BigFloatData{T}, len::Int) where {T<:Unsigned} vals = (Val(:bits), Val(:descending)) f = get_elem(n, len - 1, vals...) r = get_elem(n, len , vals...) @@ -148,15 +128,15 @@ struct RawBigIntRoundingIncrementHelper{T<:Unsigned} end end -function RawBigIntRoundingIncrementHelper(n::RawBigInt{T}, len::Int) where {T<:Unsigned} - RawBigIntRoundingIncrementHelper{T}(n, len) +function BigFloatDataRoundingIncrementHelper(n::BigFloatData{T}, len::Int) where {T<:Unsigned} + BigFloatDataRoundingIncrementHelper{T}(n, len) end -(h::RawBigIntRoundingIncrementHelper)(::Rounding.FinalBit) = h.final_bit +(h::BigFloatDataRoundingIncrementHelper)(::Rounding.FinalBit) = h.final_bit -(h::RawBigIntRoundingIncrementHelper)(::Rounding.RoundBit) = h.round_bit +(h::BigFloatDataRoundingIncrementHelper)(::Rounding.RoundBit) = h.round_bit -function (h::RawBigIntRoundingIncrementHelper)(::Rounding.StickyBit) +function (h::BigFloatDataRoundingIncrementHelper)(::Rounding.StickyBit) v = Val(:bits) n = h.n tail_is_nonzero(n, elem_count(n, v) - h.trunc_len - 1, v) diff --git a/stdlib/Random/src/generation.jl b/stdlib/Random/src/generation.jl index d8bb48d2764d2..b605dff9e5d80 100644 --- a/stdlib/Random/src/generation.jl +++ b/stdlib/Random/src/generation.jl @@ -66,7 +66,7 @@ function _rand!(rng::AbstractRNG, z::BigFloat, sp::SamplerBigFloat) limbs[end] |= Limb_high_bit end z.sign = 1 - GC.@preserve limbs unsafe_copyto!(z.d, pointer(limbs), sp.nlimbs) + copyto!(z.d, limbs) randbool end diff --git a/test/dict.jl b/test/dict.jl index 13c60d5a6a053..909afb3607907 100644 --- a/test/dict.jl +++ b/test/dict.jl @@ -1049,7 +1049,7 @@ Dict(1 => rand(2,3), 'c' => "asdf") # just make sure this does not trigger a dep # issue #26939 d26939 = WeakKeyDict() - (@noinline d -> d[big"1.0" + 1.1] = 1)(d26939) + (@noinline d -> d[big"1" + 1] = 1)(d26939) GC.gc() # primarily to make sure this doesn't segfault @test count(d26939) == 0 @test length(d26939.ht) == 1 diff --git a/test/mpfr.jl b/test/mpfr.jl index 63da732df1c09..c212bdfc92821 100644 --- a/test/mpfr.jl +++ b/test/mpfr.jl @@ -1089,11 +1089,11 @@ end end end -@testset "RawBigInt truncation OOB read" begin +@testset "BigFloatData truncation OOB read" begin @testset "T: $T" for T ∈ (UInt8, UInt16, UInt32, UInt64, UInt128) - v = Base.RawBigInt{T}("a"^sizeof(T), 1) + v = Base.MPFR.BigFloatData{T}(fill(typemax(T), 1 + Base.MPFR.offset_p_limbs)) @testset "bit_count: $bit_count" for bit_count ∈ (0:10:80) - @test Base.truncated(UInt128, v, bit_count) isa Any + @test Base.MPFR.truncated(UInt128, v, bit_count) isa Any end end end From 03f8523f27b55f75e16ff1ef592c2bbb1eafd46c Mon Sep 17 00:00:00 2001 From: Valentin Churavy Date: Tue, 1 Oct 2024 16:53:59 +0200 Subject: [PATCH 101/537] Add propagate_inbounds_meta to atomic genericmemory ops (#55902) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit `memoryref(mem, i)` will otherwise emit a boundscheck. ``` ; │ @ /home/vchuravy/WorkstealingQueues/src/CLL.jl:53 within `setindex_atomic!` @ genericmemory.jl:329 ; │┌ @ boot.jl:545 within `memoryref` %ptls_field = getelementptr inbounds i8, ptr %tls_pgcstack, i64 16 %ptls_load = load ptr, ptr %ptls_field, align 8 %"box::GenericMemoryRef" = call noalias nonnull align 8 dereferenceable(32) ptr @ijl_gc_small_alloc(ptr %ptls_load, i32 552, i32 32, i64 23456076646928) #9 %"box::GenericMemoryRef.tag_addr" = getelementptr inbounds i64, ptr %"box::GenericMemoryRef", i64 -1 store atomic i64 23456076646928, ptr %"box::GenericMemoryRef.tag_addr" unordered, align 8 store ptr %memoryref_data, ptr %"box::GenericMemoryRef", align 8 %.repack8 = getelementptr inbounds { ptr, ptr }, ptr %"box::GenericMemoryRef", i64 0, i32 1 store ptr %memoryref_mem, ptr %.repack8, align 8 call void @ijl_bounds_error_int(ptr nonnull %"box::GenericMemoryRef", i64 %7) unreachable ``` For the Julia code: ```julia function Base.setindex_atomic!(buf::WSBuffer{T}, order::Symbol, val::T, idx::Int64) where T @inbounds Base.setindex_atomic!(buf.buffer, order, val,((idx - 1) & buf.mask) + 1) end ``` from https://github.com/gbaraldi/WorkstealingQueues.jl/blob/0ebc57237cf0c90feedf99e4338577d04b67805b/src/CLL.jl#L41 --- base/genericmemory.jl | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/base/genericmemory.jl b/base/genericmemory.jl index 6537839320206..c4ebbc6ca14e1 100644 --- a/base/genericmemory.jl +++ b/base/genericmemory.jl @@ -320,11 +320,13 @@ end # get, set(once), modify, swap and replace at index, atomically function getindex_atomic(mem::GenericMemory, order::Symbol, i::Int) + @_propagate_inbounds_meta memref = memoryref(mem, i) return memoryrefget(memref, order, @_boundscheck) end function setindex_atomic!(mem::GenericMemory, order::Symbol, val, i::Int) + @_propagate_inbounds_meta T = eltype(mem) memref = memoryref(mem, i) return memoryrefset!( @@ -342,6 +344,7 @@ function setindexonce_atomic!( val, i::Int, ) + @_propagate_inbounds_meta T = eltype(mem) memref = memoryref(mem, i) return Core.memoryrefsetonce!( @@ -354,11 +357,13 @@ function setindexonce_atomic!( end function modifyindex_atomic!(mem::GenericMemory, order::Symbol, op, val, i::Int) + @_propagate_inbounds_meta memref = memoryref(mem, i) return Core.memoryrefmodify!(memref, op, val, order, @_boundscheck) end function swapindex_atomic!(mem::GenericMemory, order::Symbol, val, i::Int) + @_propagate_inbounds_meta T = eltype(mem) memref = memoryref(mem, i) return Core.memoryrefswap!( @@ -377,6 +382,7 @@ function replaceindex_atomic!( desired, i::Int, ) + @_propagate_inbounds_meta T = eltype(mem) memref = memoryref(mem, i) return Core.memoryrefreplace!( From dd310849adbf9f089d7e21c142b513deb8ff7b01 Mon Sep 17 00:00:00 2001 From: Neven Sajko Date: Tue, 1 Oct 2024 20:56:25 +0200 Subject: [PATCH 102/537] fix rounding mode in construction of `BigFloat` from pi (#55911) The default argument of the method was outdated, reading the global default rounding directly, bypassing the `ScopedValue` stuff. --- base/irrationals.jl | 2 +- test/rounding.jl | 25 +++++++++++++++++++++++++ 2 files changed, 26 insertions(+), 1 deletion(-) diff --git a/base/irrationals.jl b/base/irrationals.jl index eafe388162353..b3073c503238a 100644 --- a/base/irrationals.jl +++ b/base/irrationals.jl @@ -216,7 +216,7 @@ function irrational(sym, val, def) esym = esc(sym) qsym = esc(Expr(:quote, sym)) bigconvert = isa(def,Symbol) ? quote - function Base.BigFloat(::Irrational{$qsym}, r::MPFR.MPFRRoundingMode=MPFR.ROUNDING_MODE[]; precision=precision(BigFloat)) + function Base.BigFloat(::Irrational{$qsym}, r::MPFR.MPFRRoundingMode=Rounding.rounding_raw(BigFloat); precision=precision(BigFloat)) c = BigFloat(;precision=precision) ccall(($(string("mpfr_const_", def)), :libmpfr), Cint, (Ref{BigFloat}, MPFR.MPFRRoundingMode), c, r) diff --git a/test/rounding.jl b/test/rounding.jl index 76b15ec1d9118..6fad6f62e8dfe 100644 --- a/test/rounding.jl +++ b/test/rounding.jl @@ -470,3 +470,28 @@ end @test prevfloat(f) < i end end + +@testset "π to `BigFloat` with `setrounding`" begin + function irrational_to_big_float(c::AbstractIrrational) + BigFloat(c) + end + + function irrational_to_big_float_with_rounding_mode(c::AbstractIrrational, rm::RoundingMode) + f = () -> irrational_to_big_float(c) + setrounding(f, BigFloat, rm) + end + + function irrational_to_big_float_with_rounding_mode_and_precision(c::AbstractIrrational, rm::RoundingMode, prec::Int) + f = () -> irrational_to_big_float_with_rounding_mode(c, rm) + setprecision(f, BigFloat, prec) + end + + for c ∈ (π, MathConstants.γ, MathConstants.catalan) + for p ∈ 1:40 + @test ( + irrational_to_big_float_with_rounding_mode_and_precision(c, RoundDown, p) < c < + irrational_to_big_float_with_rounding_mode_and_precision(c, RoundUp, p) + ) + end + end +end From a45d701e216139a9ef6d5e1f674e943d18677c8d Mon Sep 17 00:00:00 2001 From: Shuhei Kadowaki <40514306+aviatesk@users.noreply.github.com> Date: Wed, 2 Oct 2024 17:59:29 +0900 Subject: [PATCH 103/537] fix `nonsetable_type_hint_handler` (#55962) The current implementation is wrong, causing it to display inappropriate hints like the following: ```julia julia> s = Some("foo"); julia> s[] = "bar" ERROR: MethodError: no method matching setindex!(::Some{String}, ::String) The function `setindex!` exists, but no method is defined for this combination of argument types. You attempted to index the type String, rather than an instance of the type. Make sure you create the type using its constructor: d = String([...]) rather than d = String Stacktrace: [1] top-level scope @ REPL[2]:1 ``` --- base/errorshow.jl | 2 +- test/errorshow.jl | 14 ++++++++------ 2 files changed, 9 insertions(+), 7 deletions(-) diff --git a/base/errorshow.jl b/base/errorshow.jl index 9c8aad8b6ee2c..20bdee1de6ec0 100644 --- a/base/errorshow.jl +++ b/base/errorshow.jl @@ -1052,7 +1052,7 @@ function nonsetable_type_hint_handler(io, ex, arg_types, kwargs) print(io, "\nAre you trying to index into an array? For multi-dimensional arrays, separate the indices with commas: ") printstyled(io, "a[1, 2]", color=:cyan) print(io, " rather than a[1][2]") - else isType(T) + elseif isType(T) Tx = T.parameters[1] print(io, "\nYou attempted to index the type $Tx, rather than an instance of the type. Make sure you create the type using its constructor: ") printstyled(io, "d = $Tx([...])", color=:cyan) diff --git a/test/errorshow.jl b/test/errorshow.jl index 3ede370553212..db22fea1131d1 100644 --- a/test/errorshow.jl +++ b/test/errorshow.jl @@ -739,8 +739,7 @@ end pop!(Base.Experimental._hint_handlers[DomainError]) # order is undefined, don't copy this struct ANumber <: Number end -let err_str - err_str = @except_str ANumber()(3 + 4) MethodError +let err_str = @except_str ANumber()(3 + 4) MethodError @test occursin("objects of type $(curmod_prefix)ANumber are not callable", err_str) @test count(==("Maybe you forgot to use an operator such as *, ^, %, / etc. ?"), split(err_str, '\n')) == 1 # issue 40478 @@ -748,22 +747,25 @@ let err_str @test count(==("Maybe you forgot to use an operator such as *, ^, %, / etc. ?"), split(err_str, '\n')) == 1 end -let err_str - a = [1 2; 3 4]; +let a = [1 2; 3 4]; err_str = @except_str (a[1][2] = 5) MethodError @test occursin("\nAre you trying to index into an array? For multi-dimensional arrays, separate the indices with commas: ", err_str) @test occursin("a[1, 2]", err_str) @test occursin("rather than a[1][2]", err_str) end -let err_str - d = Dict +let d = Dict err_str = @except_str (d[1] = 5) MethodError @test occursin("\nYou attempted to index the type Dict, rather than an instance of the type. Make sure you create the type using its constructor: ", err_str) @test occursin("d = Dict([...])", err_str) @test occursin(" rather than d = Dict", err_str) end +let s = Some("foo") + err_str = @except_str (s[] = "bar") MethodError + @test !occursin("You attempted to index the type String", err_str) +end + # Execute backtrace once before checking formatting, see #38858 backtrace() From fbb3e1175d52abec0ff4ca83d8c9e126d9f8a06b Mon Sep 17 00:00:00 2001 From: Ian Butterworth Date: Wed, 2 Oct 2024 07:27:35 -0400 Subject: [PATCH 104/537] REPL: make UndefVarError aware of imported modules (#55932) --- base/experimental.jl | 4 ++-- stdlib/REPL/src/REPL.jl | 12 +++++++++++- 2 files changed, 13 insertions(+), 3 deletions(-) diff --git a/base/experimental.jl b/base/experimental.jl index 6e757e9fa0e5f..648b5da0ed9a1 100644 --- a/base/experimental.jl +++ b/base/experimental.jl @@ -319,9 +319,9 @@ function show_error_hints(io, ex, args...) for handler in hinters try @invokelatest handler(io, ex, args...) - catch err + catch tn = typeof(handler).name - @error "Hint-handler $handler for $(typeof(ex)) in $(tn.module) caused an error" + @error "Hint-handler $handler for $(typeof(ex)) in $(tn.module) caused an error" exception=current_exceptions() end end end diff --git a/stdlib/REPL/src/REPL.jl b/stdlib/REPL/src/REPL.jl index 44fe0446240c6..272b907165341 100644 --- a/stdlib/REPL/src/REPL.jl +++ b/stdlib/REPL/src/REPL.jl @@ -74,7 +74,17 @@ end function _UndefVarError_warnfor(io::IO, m::Module, var::Symbol) Base.isbindingresolved(m, var) || return false (Base.isexported(m, var) || Base.ispublic(m, var)) || return false - print(io, "\nHint: a global variable of this name also exists in $m.") + active_mod = Base.active_module() + print(io, "\nHint: ") + if isdefined(active_mod, Symbol(m)) + print(io, "a global variable of this name also exists in $m.") + else + if Symbol(m) == var + print(io, "$m is loaded but not imported in the active module $active_mod.") + else + print(io, "a global variable of this name may be made accessible by importing $m in the current active module $active_mod") + end + end return true end From 5fc582b3fcc8adbd5e4b9a8df790a63fcb7f7a9c Mon Sep 17 00:00:00 2001 From: Shuhei Kadowaki <40514306+aviatesk@users.noreply.github.com> Date: Thu, 3 Oct 2024 07:17:00 +0900 Subject: [PATCH 105/537] fix test/staged.jl (#55967) In particular, the implementation of `overdub_generator54341` was dangerous. This fixes it up. --- test/staged.jl | 23 ++++++++++++++++------- 1 file changed, 16 insertions(+), 7 deletions(-) diff --git a/test/staged.jl b/test/staged.jl index aec4a3bf135d3..0112dd73b45f7 100644 --- a/test/staged.jl +++ b/test/staged.jl @@ -381,11 +381,18 @@ let @test length(ir.cfg.blocks) == 1 end +function generate_lambda_ex(world::UInt, source::LineNumberNode, + argnames::Core.SimpleVector, spnames::Core.SimpleVector, + body::Expr) + stub = Core.GeneratedFunctionStub(identity, argnames, spnames) + return stub(world, source, body) +end + # Test that `Core.CachedGenerator` works as expected struct Generator54916 <: Core.CachedGenerator end function (::Generator54916)(world::UInt, source::LineNumberNode, args...) - stub = Core.GeneratedFunctionStub(identity, Core.svec(:doit54916, :func, :arg), Core.svec()) - return stub(world, source, :(func(arg))) + return generate_lambda_ex(world, source, + Core.svec(:doit54916, :func, :arg), Core.svec(), :(func(arg))) end @eval function doit54916(func, arg) $(Expr(:meta, :generated, Generator54916())) @@ -412,8 +419,8 @@ function generator49715(world, source, self, f, tt) sig = Tuple{f, tt.parameters...} mi = Base._which(sig; world) error("oh no") - stub = Core.GeneratedFunctionStub(identity, Core.svec(:methodinstance, :ctx, :x, :f), Core.svec()) - stub(world, source, :(nothing)) + return generate_lambda_ex(world, source, + Core.svec(:doit49715, :f, :tt), Core.svec(), :(nothing)) end @eval function doit49715(f, tt) $(Expr(:meta, :generated, generator49715)) @@ -426,9 +433,10 @@ function overdubbee54341(a, b) a + b end const overdubee_codeinfo54341 = code_lowered(overdubbee54341, Tuple{Any, Any})[1] -function overdub_generator54341(world::UInt, source::LineNumberNode, args...) - if length(args) != 2 - :(error("Wrong number of arguments")) +function overdub_generator54341(world::UInt, source::LineNumberNode, selftype, fargtypes) + if length(fargtypes) != 2 + return generate_lambda_ex(world, source, + Core.svec(:overdub54341, :args), Core.svec(), :(error("Wrong number of arguments"))) else return copy(overdubee_codeinfo54341) end @@ -438,3 +446,4 @@ end $(Expr(:meta, :generated_only)) end @test overdub54341(1, 2) == 3 +@test_throws "Wrong number of arguments" overdub54341(1, 2, 3) From d19bb472fbc92c1d93645426025f77e449cee763 Mon Sep 17 00:00:00 2001 From: Keno Fischer Date: Wed, 2 Oct 2024 19:46:24 -0400 Subject: [PATCH 106/537] Explicitly store a module's location (#55963) Revise wants to know what file a module's `module` definition is in. Currently it does this by looking at the source location for the implicitly generated `eval` method. This is terrible for two reasons: 1. The method may not exist if the module is a baremodule (which is not particularly common, which is probably why we haven't seen it). 2. The fact that the implicitly generated `eval` method has this location information is an implementation detail that I'd like to get rid of (#55949). This PR adds explicit file/line info to `Module`, so that Revise doesn't have to use the hack anymore. --- base/reflection.jl | 11 +++++++++++ src/jl_exported_funcs.inc | 1 + src/julia.h | 2 ++ src/module.c | 10 ++++++++++ src/staticdata.c | 3 +++ src/toplevel.c | 4 ++++ 6 files changed, 31 insertions(+) diff --git a/base/reflection.jl b/base/reflection.jl index f738ca1a618ae..80eeb4c4efb12 100644 --- a/base/reflection.jl +++ b/base/reflection.jl @@ -76,6 +76,17 @@ function fullname(m::Module) return (fullname(mp)..., mn) end +""" + moduleloc(m::Module) -> LineNumberNode + +Get the location of the `module` definition. +""" +function moduleloc(m::Module) + line = Ref{Int32}(0) + file = ccall(:jl_module_getloc, Ref{Symbol}, (Any, Ref{Int32}), m, line) + return LineNumberNode(Int(line[]), file) +end + """ names(x::Module; all::Bool=false, imported::Bool=false, usings::Bool=false) -> Vector{Symbol} diff --git a/src/jl_exported_funcs.inc b/src/jl_exported_funcs.inc index 0c712ef37cb5b..a00a0171d23b7 100644 --- a/src/jl_exported_funcs.inc +++ b/src/jl_exported_funcs.inc @@ -316,6 +316,7 @@ XX(jl_module_name) \ XX(jl_module_names) \ XX(jl_module_parent) \ + XX(jl_module_getloc) \ XX(jl_module_public) \ XX(jl_module_public_p) \ XX(jl_module_use) \ diff --git a/src/julia.h b/src/julia.h index c6ff729a308eb..ed3d9bf825658 100644 --- a/src/julia.h +++ b/src/julia.h @@ -710,6 +710,8 @@ typedef struct _jl_module_t { struct _jl_module_t *parent; _Atomic(jl_svec_t*) bindings; _Atomic(jl_genericmemory_t*) bindingkeyset; // index lookup by name into bindings + jl_sym_t *file; + int32_t line; // hidden fields: arraylist_t usings; // modules with all bindings potentially imported jl_uuid_t build_id; diff --git a/src/module.c b/src/module.c index a6c05d279f5b0..f4da7e1e994de 100644 --- a/src/module.c +++ b/src/module.c @@ -52,6 +52,8 @@ JL_DLLEXPORT jl_module_t *jl_new_module_(jl_sym_t *name, jl_module_t *parent, ui m->compile = -1; m->infer = -1; m->max_methods = -1; + m->file = name; // Using the name as a placeholder is better than nothing + m->line = 0; m->hash = parent == NULL ? bitmix(name->hash, jl_module_type->hash) : bitmix(name->hash, parent->hash); JL_MUTEX_INIT(&m->lock, "module->lock"); @@ -1179,6 +1181,14 @@ jl_module_t *jl_module_root(jl_module_t *m) } } +JL_DLLEXPORT jl_sym_t *jl_module_getloc(jl_module_t *m, int32_t *line) +{ + if (line) { + *line = m->line; + } + return m->file; +} + JL_DLLEXPORT jl_uuid_t jl_module_build_id(jl_module_t *m) { return m->build_id; } JL_DLLEXPORT jl_uuid_t jl_module_uuid(jl_module_t* m) { return m->uuid; } diff --git a/src/staticdata.c b/src/staticdata.c index aa9a16daab7a5..0a8cbe6db7c67 100644 --- a/src/staticdata.c +++ b/src/staticdata.c @@ -1259,6 +1259,9 @@ static void jl_write_module(jl_serializer_state *s, uintptr_t item, jl_module_t jl_atomic_store_relaxed(&newm->bindingkeyset, NULL); arraylist_push(&s->relocs_list, (void*)(reloc_offset + offsetof(jl_module_t, bindingkeyset))); arraylist_push(&s->relocs_list, (void*)backref_id(s, jl_atomic_load_relaxed(&m->bindingkeyset), s->link_ids_relocs)); + newm->file = NULL; + arraylist_push(&s->relocs_list, (void*)(reloc_offset + offsetof(jl_module_t, file))); + arraylist_push(&s->relocs_list, (void*)backref_id(s, m->file, s->link_ids_relocs)); // write out the usings list memset(&newm->usings._space, 0, sizeof(newm->usings._space)); diff --git a/src/toplevel.c b/src/toplevel.c index 5d17a3fcf89a7..8caa8b086ec00 100644 --- a/src/toplevel.c +++ b/src/toplevel.c @@ -213,6 +213,10 @@ static jl_value_t *jl_eval_module_expr(jl_module_t *parent_module, jl_expr_t *ex form = NULL; } + newm->file = jl_symbol(filename); + jl_gc_wb_knownold(newm, newm->file); + newm->line = lineno; + for (int i = 0; i < jl_array_nrows(exprs); i++) { // process toplevel form ct->world_age = jl_atomic_load_acquire(&jl_world_counter); From 3034fc5e3f76d46e57409b4b098577dfa60cc3fa Mon Sep 17 00:00:00 2001 From: Dennis Hoelgaard Bal <61620837+KronosTheLate@users.noreply.github.com> Date: Thu, 3 Oct 2024 01:56:06 +0200 Subject: [PATCH 107/537] mergewith: add single argument example to docstring (#55964) I ran into this edge case. I though it should be documented. --------- Co-authored-by: Lilith Orion Hafner --- base/abstractdict.jl | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/base/abstractdict.jl b/base/abstractdict.jl index 62a5b3ee9e1b0..85a726b4cdbf4 100644 --- a/base/abstractdict.jl +++ b/base/abstractdict.jl @@ -392,6 +392,10 @@ Dict{String, Float64} with 3 entries: julia> ans == mergewith(+)(a, b) true + +julia> mergewith(-, Dict(), Dict(:a=>1)) # Combining function only used if key is present in both +Dict{Any, Any} with 1 entry: + :a => 1 ``` """ mergewith(combine, d::AbstractDict, others::AbstractDict...) = From 77c5875b3cbe85e7fb0bb5a7e796809c901ede95 Mon Sep 17 00:00:00 2001 From: Michael Cho Date: Wed, 2 Oct 2024 20:46:36 -0400 Subject: [PATCH 108/537] [build] avoid libedit linkage and align libccalllazy* SONAMEs (#55968) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit While building the 1.11.0-rc4 in Homebrew[^1] in preparation for 1.11.0 release (and to confirm Sequoia successfully builds) I noticed some odd linkage for our Linux builds, which included of: 1. LLVM libraries were linking to `libedit.so`, e.g. ``` Dynamic Section: NEEDED libedit.so.0 NEEDED libz.so.1 NEEDED libzstd.so.1 NEEDED libstdc++.so.6 NEEDED libm.so.6 NEEDED libgcc_s.so.1 NEEDED libc.so.6 NEEDED ld-linux-x86-64.so.2 SONAME libLLVM-16jl.so ``` CMakeCache.txt showed ``` //Use libedit if available. LLVM_ENABLE_LIBEDIT:BOOL=ON ``` Which might be overriding `HAVE_LIBEDIT` at https://github.com/JuliaLang/llvm-project/blob/julia-release/16.x/llvm/cmake/config-ix.cmake#L222-L225. So just added `LLVM_ENABLE_LIBEDIT` 2. Wasn't sure if there was a reason for this but `libccalllazy*` had mismatched SONAME: ```console ❯ objdump -p lib/julia/libccalllazy* | rg '\.so' lib/julia/libccalllazybar.so: file format elf64-x86-64 NEEDED ccalllazyfoo.so SONAME ccalllazybar.so lib/julia/libccalllazyfoo.so: file format elf64-x86-64 SONAME ccalllazyfoo.so ``` Modifying this, but can drop if intentional. --- [^1]: https://github.com/Homebrew/homebrew-core/pull/192116 --- deps/llvm.mk | 2 +- src/Makefile | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/deps/llvm.mk b/deps/llvm.mk index 73697069a4fac..3f4bc3e6746f0 100644 --- a/deps/llvm.mk +++ b/deps/llvm.mk @@ -102,7 +102,7 @@ endif LLVM_CMAKE += -DLLVM_TOOLS_INSTALL_DIR=$(call rel_path,$(build_prefix),$(build_depsbindir)) LLVM_CMAKE += -DLLVM_UTILS_INSTALL_DIR=$(call rel_path,$(build_prefix),$(build_depsbindir)) LLVM_CMAKE += -DLLVM_INCLUDE_UTILS=ON -DLLVM_INSTALL_UTILS=ON -LLVM_CMAKE += -DLLVM_BINDINGS_LIST="" -DLLVM_ENABLE_BINDINGS=OFF -DLLVM_INCLUDE_DOCS=Off -DLLVM_ENABLE_TERMINFO=Off -DHAVE_LIBEDIT=Off +LLVM_CMAKE += -DLLVM_BINDINGS_LIST="" -DLLVM_ENABLE_BINDINGS=OFF -DLLVM_INCLUDE_DOCS=Off -DLLVM_ENABLE_TERMINFO=Off -DHAVE_LIBEDIT=Off -DLLVM_ENABLE_LIBEDIT=OFF ifeq ($(LLVM_ASSERTIONS), 1) LLVM_CMAKE += -DLLVM_ENABLE_ASSERTIONS:BOOL=ON endif # LLVM_ASSERTIONS diff --git a/src/Makefile b/src/Makefile index 52e673aa6cc1a..a6b1f433b73ce 100644 --- a/src/Makefile +++ b/src/Makefile @@ -287,10 +287,10 @@ endif $(INSTALL_NAME_CMD)libccalltest.$(SHLIB_EXT) $@ $(build_shlibdir)/libccalllazyfoo.$(SHLIB_EXT): $(SRCDIR)/ccalllazyfoo.c - @$(call PRINT_CC, $(CC) $(JCFLAGS) $(JL_CFLAGS) $(JCPPFLAGS) $(FLAGS) -O3 $< $(fPIC) -shared -o $@ $(LDFLAGS) $(COMMON_LIBPATHS) $(call SONAME_FLAGS,ccalllazyfoo.$(SHLIB_EXT))) + @$(call PRINT_CC, $(CC) $(JCFLAGS) $(JL_CFLAGS) $(JCPPFLAGS) $(FLAGS) -O3 $< $(fPIC) -shared -o $@ $(LDFLAGS) $(COMMON_LIBPATHS) $(call SONAME_FLAGS,libccalllazyfoo.$(SHLIB_EXT))) $(build_shlibdir)/libccalllazybar.$(SHLIB_EXT): $(SRCDIR)/ccalllazybar.c $(build_shlibdir)/libccalllazyfoo.$(SHLIB_EXT) - @$(call PRINT_CC, $(CC) $(JCFLAGS) $(JL_CFLAGS) $(JCPPFLAGS) $(FLAGS) -O3 $< $(fPIC) -shared -o $@ $(LDFLAGS) $(COMMON_LIBPATHS) $(call SONAME_FLAGS,ccalllazybar.$(SHLIB_EXT)) -lccalllazyfoo) + @$(call PRINT_CC, $(CC) $(JCFLAGS) $(JL_CFLAGS) $(JCPPFLAGS) $(FLAGS) -O3 $< $(fPIC) -shared -o $@ $(LDFLAGS) $(COMMON_LIBPATHS) $(call SONAME_FLAGS,libccalllazybar.$(SHLIB_EXT)) -lccalllazyfoo) $(build_shlibdir)/libllvmcalltest.$(SHLIB_EXT): $(SRCDIR)/llvmcalltest.cpp $(LLVM_CONFIG_ABSOLUTE) @$(call PRINT_CC, $(CXX) $(LLVM_CXXFLAGS) $(FLAGS) $(CPPFLAGS) $(CXXFLAGS) -O3 $< $(fPIC) -shared -o $@ $(LDFLAGS) $(COMMON_LIBPATHS) $(NO_WHOLE_ARCHIVE) $(CG_LLVMLINK)) -lpthread From 234baad6c4406819af9778c1d4f753cd15f149a3 Mon Sep 17 00:00:00 2001 From: "David K. Zhang" Date: Thu, 3 Oct 2024 13:26:45 +0000 Subject: [PATCH 109/537] Add missing `copy!(::AbstractMatrix, ::UniformScaling)` method (#55970) Hi everyone! First PR to Julia here. It was noticed in a Slack thread yesterday that `copy!(A, I)` doesn't work, but `copyto!(A, I)` does. This PR adds the missing method for `copy!(::AbstractMatrix, ::UniformScaling)`, which simply defers to `copyto!`, and corresponding tests. I added a `compat` notice for Julia 1.12. --------- Co-authored-by: Lilith Orion Hafner --- stdlib/LinearAlgebra/src/uniformscaling.jl | 10 ++++++++++ stdlib/LinearAlgebra/test/uniformscaling.jl | 7 +++++++ 2 files changed, 17 insertions(+) diff --git a/stdlib/LinearAlgebra/src/uniformscaling.jl b/stdlib/LinearAlgebra/src/uniformscaling.jl index b75886b8d99fb..472ea53078f87 100644 --- a/stdlib/LinearAlgebra/src/uniformscaling.jl +++ b/stdlib/LinearAlgebra/src/uniformscaling.jl @@ -403,6 +403,16 @@ function copyto!(A::Tridiagonal, J::UniformScaling) return A end +""" + copy!(dest::AbstractMatrix, src::UniformScaling) + +Copies a [`UniformScaling`](@ref) onto a matrix. + +!!! compat "Julia 1.12" + This method is available as of Julia 1.12. +""" +Base.copy!(A::AbstractMatrix, J::UniformScaling) = copyto!(A, J) + function cond(J::UniformScaling{T}) where T onereal = inv(one(real(J.λ))) return J.λ ≠ zero(T) ? onereal : oftype(onereal, Inf) diff --git a/stdlib/LinearAlgebra/test/uniformscaling.jl b/stdlib/LinearAlgebra/test/uniformscaling.jl index 92547e8648d8a..d335cd6f63521 100644 --- a/stdlib/LinearAlgebra/test/uniformscaling.jl +++ b/stdlib/LinearAlgebra/test/uniformscaling.jl @@ -226,6 +226,13 @@ let @test copyto!(B, J) == [λ zero(λ)] end + @testset "copy!" begin + A = Matrix{Int}(undef, (3,3)) + @test copy!(A, I) == one(A) + B = Matrix{ComplexF64}(undef, (1,2)) + @test copy!(B, J) == [λ zero(λ)] + end + @testset "binary ops with vectors" begin v = complex.(randn(3), randn(3)) # As shown in #20423@GitHub, vector acts like x1 matrix when participating in linear algebra From be401635fe02b28ce994e2e3cae0733d101f8927 Mon Sep 17 00:00:00 2001 From: Lilith Orion Hafner Date: Thu, 3 Oct 2024 08:28:32 -0500 Subject: [PATCH 110/537] Add forward progress update to NEWS.md (#54089) Closes #40009 which was left open because of the needs news tag. --------- Co-authored-by: Ian Butterworth --- NEWS.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/NEWS.md b/NEWS.md index cc1bbc7449e5d..fb1fcf381cc7f 100644 --- a/NEWS.md +++ b/NEWS.md @@ -37,6 +37,10 @@ Language changes expression within a given `:toplevel` expression to make use of macros defined earlier in the same `:toplevel` expression. ([#53515]) + - Trivial infinite loops (like `while true; end`) are no longer undefined + behavior. Infinite loops that actually do things (e.g. have side effects + or sleep) were never and are still not undefined behavior. ([#52999]) + Compiler/Runtime improvements ----------------------------- From 6b9719f767d98fa7d6e0d86adf0e204ed226f90e Mon Sep 17 00:00:00 2001 From: Keno Fischer Date: Thu, 3 Oct 2024 11:22:29 -0400 Subject: [PATCH 111/537] Fix an intermittent test failure in `core` test (#55973) The test wants to assert that `Module` is not resolved in `Main`, but other tests do resolve this identifier, so the test can fail depending on test order (and I've been seeing such failures on CI recently). Fix that by running the test in a fresh subprocess. --- test/core.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/core.jl b/test/core.jl index 62fde5261bfd3..b27832209a835 100644 --- a/test/core.jl +++ b/test/core.jl @@ -1183,7 +1183,7 @@ end # Make sure that `Module` is not resolved to `Core.Module` during sysimg generation # so that users can define their own binding named `Module` in Main. -@test !Base.isbindingresolved(Main, :Module) +@test success(`$(Base.julia_cmd()) -e '@assert !Base.isbindingresolved(Main, :Module)'`) # Module() constructor @test names(Module(:anonymous), all = true, imported = true) == [:anonymous] From 42737f79e02bbaf444a9d93e6668b3c55cdb8a6e Mon Sep 17 00:00:00 2001 From: Ian Butterworth Date: Thu, 3 Oct 2024 16:23:21 -0400 Subject: [PATCH 112/537] fix comma logic in time_print (#55977) Minor formatting fix --- base/timing.jl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/base/timing.jl b/base/timing.jl index 6d97d70d2f04c..9686c5b33bccd 100644 --- a/base/timing.jl +++ b/base/timing.jl @@ -206,7 +206,7 @@ function time_print(io::IO, elapsedtime, bytes=0, gctime=0, allocs=0, lock_confl print(io, length(timestr) < 10 ? (" "^(10 - length(timestr))) : "") end print(io, timestr, " seconds") - parens = bytes != 0 || allocs != 0 || gctime > 0 || compile_time > 0 + parens = bytes != 0 || allocs != 0 || gctime > 0 || compile_time > 0 || lock_conflicts > 0 parens && print(io, " (") if bytes != 0 || allocs != 0 allocs, ma = prettyprint_getunits(allocs, length(_cnt_units), Int64(1000)) @@ -228,7 +228,7 @@ function time_print(io::IO, elapsedtime, bytes=0, gctime=0, allocs=0, lock_confl print(io, ", ", lock_conflicts, " lock conflict$plural") end if compile_time > 0 - if bytes != 0 || allocs != 0 || gctime > 0 + if bytes != 0 || allocs != 0 || gctime > 0 || lock_conflicts > 0 print(io, ", ") end print(io, Ryu.writefixed(Float64(100*compile_time/elapsedtime), 2), "% compilation time") From b6b5528da1ea8f322b80247ee4c6c3e65b5a236e Mon Sep 17 00:00:00 2001 From: Shuhei Kadowaki <40514306+aviatesk@users.noreply.github.com> Date: Fri, 4 Oct 2024 06:01:42 +0900 Subject: [PATCH 113/537] optimizer: fix up the inlining algorithm to use correct `nargs`/`isva` (#55976) It appears that inlining.jl was not updated in JuliaLang/julia#54341. Specifically, using `nargs`/`isva` from `mi.def::Method` in `ir_prepare_inlining!` causes the following error to occur: ```julia function generate_lambda_ex(world::UInt, source::LineNumberNode, argnames, spnames, @nospecialize body) stub = Core.GeneratedFunctionStub(identity, Core.svec(argnames...), Core.svec(spnames...)) return stub(world, source, body) end function overdubbee54341(a, b) return a + b end const overdubee_codeinfo54341 = code_lowered(overdubbee54341, Tuple{Any, Any})[1] function overdub_generator54341(world::UInt, source::LineNumberNode, selftype, fargtypes) if length(fargtypes) != 2 return generate_lambda_ex(world, source, (:overdub54341, :args), (), :(error("Wrong number of arguments"))) else return copy(overdubee_codeinfo54341) end end @eval function overdub54341(args...) $(Expr(:meta, :generated, overdub_generator54341)) $(Expr(:meta, :generated_only)) end topfunc(x) = overdub54341(x, 2) ``` ```julia julia> topfunc(1) Internal error: during type inference of topfunc(Int64) Encountered unexpected error in runtime: BoundsError(a=Array{Any, 1}(dims=(2,), mem=Memory{Any}(8, 0x10632e780)[SSAValue(2), SSAValue(3), #, #, #, #, #, #]), i=(3,)) throw_boundserror at ./essentials.jl:14 getindex at ./essentials.jl:909 [inlined] ssa_substitute_op! at ./compiler/ssair/inlining.jl:1798 ssa_substitute_op! at ./compiler/ssair/inlining.jl:1852 ir_inline_item! at ./compiler/ssair/inlining.jl:386 ... ``` This commit updates the abstract interpretation and inlining algorithm to use the `nargs`/`isva` values held by `CodeInfo`. Similar modifications have also been made to EscapeAnalysis.jl. @nanosoldier `runbenchmarks("inference", vs=":master")` --- base/compiler/abstractinterpretation.jl | 2 +- base/compiler/inferencestate.jl | 25 +++++---- base/compiler/optimize.jl | 14 ++--- base/compiler/ssair/inlining.jl | 68 ++++++++++++++----------- base/compiler/ssair/passes.jl | 4 +- base/compiler/stmtinfo.jl | 1 + base/compiler/typeinfer.jl | 2 +- base/compiler/types.jl | 7 ++- test/compiler/EscapeAnalysis/EAUtils.jl | 11 ++-- test/staged.jl | 14 ++--- 10 files changed, 81 insertions(+), 67 deletions(-) diff --git a/base/compiler/abstractinterpretation.jl b/base/compiler/abstractinterpretation.jl index 96355f2a6b5dd..c8a25be422637 100644 --- a/base/compiler/abstractinterpretation.jl +++ b/base/compiler/abstractinterpretation.jl @@ -1282,7 +1282,7 @@ function semi_concrete_eval_call(interp::AbstractInterpreter, effects = Effects(effects; noub=ALWAYS_TRUE) end exct = refine_exception_type(result.exct, effects) - return ConstCallResults(rt, exct, SemiConcreteResult(mi, ir, effects), effects, mi) + return ConstCallResults(rt, exct, SemiConcreteResult(mi, ir, effects, spec_info(irsv)), effects, mi) end end end diff --git a/base/compiler/inferencestate.jl b/base/compiler/inferencestate.jl index 05d95d1d5bdc7..5f8fb82caaa34 100644 --- a/base/compiler/inferencestate.jl +++ b/base/compiler/inferencestate.jl @@ -236,7 +236,7 @@ mutable struct InferenceState slottypes::Vector{Any} src::CodeInfo cfg::CFG - method_info::MethodInfo + spec_info::SpecInfo #= intermediate states for local abstract interpretation =# currbb::Int @@ -294,7 +294,7 @@ mutable struct InferenceState sptypes = sptypes_from_meth_instance(mi) code = src.code::Vector{Any} cfg = compute_basic_blocks(code) - method_info = MethodInfo(src) + spec_info = SpecInfo(src) currbb = currpc = 1 ip = BitSet(1) # TODO BitSetBoundedMinPrioritySet(1) @@ -351,7 +351,7 @@ mutable struct InferenceState restrict_abstract_call_sites = isa(def, Module) this = new( - mi, world, mod, sptypes, slottypes, src, cfg, method_info, + mi, world, mod, sptypes, slottypes, src, cfg, spec_info, currbb, currpc, ip, handler_info, ssavalue_uses, bb_vartables, ssavaluetypes, stmt_edges, stmt_info, tasks, pclimitations, limitations, cycle_backedges, callstack, 0, 0, 0, result, unreachable, valid_worlds, bestguess, exc_bestguess, ipo_effects, @@ -791,7 +791,7 @@ end # TODO add `result::InferenceResult` and put the irinterp result into the inference cache? mutable struct IRInterpretationState - const method_info::MethodInfo + const spec_info::SpecInfo const ir::IRCode const mi::MethodInstance const world::UInt @@ -809,7 +809,7 @@ mutable struct IRInterpretationState parentid::Int function IRInterpretationState(interp::AbstractInterpreter, - method_info::MethodInfo, ir::IRCode, mi::MethodInstance, argtypes::Vector{Any}, + spec_info::SpecInfo, ir::IRCode, mi::MethodInstance, argtypes::Vector{Any}, world::UInt, min_world::UInt, max_world::UInt) curridx = 1 given_argtypes = Vector{Any}(undef, length(argtypes)) @@ -831,7 +831,7 @@ mutable struct IRInterpretationState tasks = WorkThunk[] edges = Any[] callstack = AbsIntState[] - return new(method_info, ir, mi, world, curridx, argtypes_refined, ir.sptypes, tpdum, + return new(spec_info, ir, mi, world, curridx, argtypes_refined, ir.sptypes, tpdum, ssa_refined, lazyreachability, valid_worlds, tasks, edges, callstack, 0, 0) end end @@ -845,14 +845,13 @@ function IRInterpretationState(interp::AbstractInterpreter, else isa(src, CodeInfo) || return nothing end - method_info = MethodInfo(src) + spec_info = SpecInfo(src) ir = inflate_ir(src, mi) argtypes = va_process_argtypes(optimizer_lattice(interp), argtypes, src.nargs, src.isva) - return IRInterpretationState(interp, method_info, ir, mi, argtypes, world, + return IRInterpretationState(interp, spec_info, ir, mi, argtypes, world, codeinst.min_world, codeinst.max_world) end - # AbsIntState # =========== @@ -927,11 +926,11 @@ is_constproped(::IRInterpretationState) = true is_cached(sv::InferenceState) = !iszero(sv.cache_mode & CACHE_MODE_GLOBAL) is_cached(::IRInterpretationState) = false -method_info(sv::InferenceState) = sv.method_info -method_info(sv::IRInterpretationState) = sv.method_info +spec_info(sv::InferenceState) = sv.spec_info +spec_info(sv::IRInterpretationState) = sv.spec_info -propagate_inbounds(sv::AbsIntState) = method_info(sv).propagate_inbounds -method_for_inference_limit_heuristics(sv::AbsIntState) = method_info(sv).method_for_inference_limit_heuristics +propagate_inbounds(sv::AbsIntState) = spec_info(sv).propagate_inbounds +method_for_inference_limit_heuristics(sv::AbsIntState) = spec_info(sv).method_for_inference_limit_heuristics frame_world(sv::InferenceState) = sv.world frame_world(sv::IRInterpretationState) = sv.world diff --git a/base/compiler/optimize.jl b/base/compiler/optimize.jl index 1971b47323f5d..02f6b46e2e73f 100644 --- a/base/compiler/optimize.jl +++ b/base/compiler/optimize.jl @@ -644,10 +644,10 @@ function ((; code_cache)::GetNativeEscapeCache)(mi::MethodInstance) return false end -function refine_effects!(interp::AbstractInterpreter, sv::PostOptAnalysisState) +function refine_effects!(interp::AbstractInterpreter, opt::OptimizationState, sv::PostOptAnalysisState) if !is_effect_free(sv.result.ipo_effects) && sv.all_effect_free && !isempty(sv.ea_analysis_pending) ir = sv.ir - nargs = let def = sv.result.linfo.def; isa(def, Method) ? Int(def.nargs) : 0; end + nargs = Int(opt.src.nargs) estate = EscapeAnalysis.analyze_escapes(ir, nargs, optimizer_lattice(interp), GetNativeEscapeCache(interp)) argescapes = EscapeAnalysis.ArgEscapeCache(estate) stack_analysis_result!(sv.result, argescapes) @@ -939,7 +939,8 @@ function check_inconsistentcy!(sv::PostOptAnalysisState, scanner::BBScanner) end end -function ipo_dataflow_analysis!(interp::AbstractInterpreter, ir::IRCode, result::InferenceResult) +function ipo_dataflow_analysis!(interp::AbstractInterpreter, opt::OptimizationState, + ir::IRCode, result::InferenceResult) if !is_ipo_dataflow_analysis_profitable(result.ipo_effects) return false end @@ -967,13 +968,13 @@ function ipo_dataflow_analysis!(interp::AbstractInterpreter, ir::IRCode, result: end end - return refine_effects!(interp, sv) + return refine_effects!(interp, opt, sv) end # run the optimization work function optimize(interp::AbstractInterpreter, opt::OptimizationState, caller::InferenceResult) - @timeit "optimizer" ir = run_passes_ipo_safe(opt.src, opt, caller) - ipo_dataflow_analysis!(interp, ir, caller) + @timeit "optimizer" ir = run_passes_ipo_safe(opt.src, opt) + ipo_dataflow_analysis!(interp, opt, ir, caller) return finish(interp, opt, ir, caller) end @@ -995,7 +996,6 @@ matchpass(::Nothing, _, _) = false function run_passes_ipo_safe( ci::CodeInfo, sv::OptimizationState, - caller::InferenceResult, optimize_until = nothing, # run all passes by default ) __stage__ = 0 # used by @pass diff --git a/base/compiler/ssair/inlining.jl b/base/compiler/ssair/inlining.jl index 9f250b156cd2f..5017b619469ff 100644 --- a/base/compiler/ssair/inlining.jl +++ b/base/compiler/ssair/inlining.jl @@ -12,6 +12,8 @@ struct InliningTodo mi::MethodInstance # The IR of the inlinee ir::IRCode + # The SpecInfo for the inlinee + spec_info::SpecInfo # The DebugInfo table for the inlinee di::DebugInfo # If the function being inlined is a single basic block we can use a @@ -20,8 +22,8 @@ struct InliningTodo # Effects of the call statement effects::Effects end -function InliningTodo(mi::MethodInstance, (ir, di)::Tuple{IRCode, DebugInfo}, effects::Effects) - return InliningTodo(mi, ir, di, linear_inline_eligible(ir), effects) +function InliningTodo(mi::MethodInstance, ir::IRCode, spec_info::SpecInfo, di::DebugInfo, effects::Effects) + return InliningTodo(mi, ir, spec_info, di, linear_inline_eligible(ir), effects) end struct ConstantCase @@ -321,7 +323,8 @@ function ir_inline_linetable!(debuginfo::DebugInfoStream, inlinee_debuginfo::Deb end function ir_prepare_inlining!(insert_node!::Inserter, inline_target::Union{IRCode, IncrementalCompact}, - ir::IRCode, di::DebugInfo, mi::MethodInstance, inlined_at::NTuple{3,Int32}, argexprs::Vector{Any}) + ir::IRCode, spec_info::SpecInfo, di::DebugInfo, mi::MethodInstance, + inlined_at::NTuple{3,Int32}, argexprs::Vector{Any}) def = mi.def::Method debuginfo = inline_target isa IRCode ? inline_target.debuginfo : inline_target.ir.debuginfo topline = new_inlined_at = ir_inline_linetable!(debuginfo, di, inlined_at) @@ -334,8 +337,8 @@ function ir_prepare_inlining!(insert_node!::Inserter, inline_target::Union{IRCod spvals_ssa = insert_node!( removable_if_unused(NewInstruction(Expr(:call, Core._compute_sparams, def, argexprs...), SimpleVector, topline))) end - if def.isva - nargs_def = Int(def.nargs::Int32) + if spec_info.isva + nargs_def = spec_info.nargs if nargs_def > 0 argexprs = fix_va_argexprs!(insert_node!, inline_target, argexprs, nargs_def, topline) end @@ -362,7 +365,7 @@ function ir_inline_item!(compact::IncrementalCompact, idx::Int, argexprs::Vector item::InliningTodo, boundscheck::Symbol, todo_bbs::Vector{Tuple{Int, Int}}) # Ok, do the inlining here inlined_at = compact.result[idx][:line] - ssa_substitute = ir_prepare_inlining!(InsertHere(compact), compact, item.ir, item.di, item.mi, inlined_at, argexprs) + ssa_substitute = ir_prepare_inlining!(InsertHere(compact), compact, item.ir, item.spec_info, item.di, item.mi, inlined_at, argexprs) boundscheck = has_flag(compact.result[idx], IR_FLAG_INBOUNDS) ? :off : boundscheck # If the iterator already moved on to the next basic block, @@ -860,15 +863,14 @@ function resolve_todo(mi::MethodInstance, result::Union{Nothing,InferenceResult, if inferred_result isa ConstantCase add_inlining_backedge!(et, mi) return inferred_result - end - if inferred_result isa InferredResult + elseif inferred_result isa InferredResult (; src, effects) = inferred_result elseif inferred_result isa CodeInstance src = @atomic :monotonic inferred_result.inferred effects = decode_effects(inferred_result.ipo_purity_bits) - else - src = nothing - effects = Effects() + else # there is no cached source available, bail out + return compileable_specialization(mi, Effects(), et, info; + compilesig_invokes=OptimizationParams(state.interp).compilesig_invokes) end # the duplicated check might have been done already within `analyze_method!`, but still @@ -883,9 +885,12 @@ function resolve_todo(mi::MethodInstance, result::Union{Nothing,InferenceResult, compilesig_invokes=OptimizationParams(state.interp).compilesig_invokes) add_inlining_backedge!(et, mi) - ir = inferred_result isa CodeInstance ? retrieve_ir_for_inlining(inferred_result, src) : - retrieve_ir_for_inlining(mi, src, preserve_local_sources) - return InliningTodo(mi, ir, effects) + if inferred_result isa CodeInstance + ir, spec_info, debuginfo = retrieve_ir_for_inlining(inferred_result, src) + else + ir, spec_info, debuginfo = retrieve_ir_for_inlining(mi, src, preserve_local_sources) + end + return InliningTodo(mi, ir, spec_info, debuginfo, effects) end # the special resolver for :invoke-d call @@ -901,23 +906,17 @@ function resolve_todo(mi::MethodInstance, @nospecialize(info::CallInfo), flag::U if cached_result isa ConstantCase add_inlining_backedge!(et, mi) return cached_result - end - if cached_result isa InferredResult - (; src, effects) = cached_result elseif cached_result isa CodeInstance src = @atomic :monotonic cached_result.inferred effects = decode_effects(cached_result.ipo_purity_bits) - else - src = nothing - effects = Effects() + else # there is no cached source available, bail out + return nothing end - preserve_local_sources = true src_inlining_policy(state.interp, src, info, flag) || return nothing - ir = cached_result isa CodeInstance ? retrieve_ir_for_inlining(cached_result, src) : - retrieve_ir_for_inlining(mi, src, preserve_local_sources) + ir, spec_info, debuginfo = retrieve_ir_for_inlining(cached_result, src) add_inlining_backedge!(et, mi) - return InliningTodo(mi, ir, effects) + return InliningTodo(mi, ir, spec_info, debuginfo, effects) end function validate_sparams(sparams::SimpleVector) @@ -971,22 +970,29 @@ function analyze_method!(match::MethodMatch, argtypes::Vector{Any}, return resolve_todo(mi, volatile_inf_result, info, flag, state; invokesig) end -function retrieve_ir_for_inlining(cached_result::CodeInstance, src::MaybeCompressed) - src = _uncompressed_ir(cached_result, src)::CodeInfo - return inflate_ir!(src, cached_result.def), src.debuginfo +function retrieve_ir_for_inlining(cached_result::CodeInstance, src::String) + src = _uncompressed_ir(cached_result, src) + return inflate_ir!(src, cached_result.def), SpecInfo(src), src.debuginfo +end +function retrieve_ir_for_inlining(cached_result::CodeInstance, src::CodeInfo) + return inflate_ir!(copy(src), cached_result.def), SpecInfo(src), src.debuginfo end function retrieve_ir_for_inlining(mi::MethodInstance, src::CodeInfo, preserve_local_sources::Bool) if preserve_local_sources src = copy(src) end - return inflate_ir!(src, mi), src.debuginfo + return inflate_ir!(src, mi), SpecInfo(src), src.debuginfo end function retrieve_ir_for_inlining(mi::MethodInstance, ir::IRCode, preserve_local_sources::Bool) if preserve_local_sources ir = copy(ir) end + # COMBAK this is not correct, we should make `InferenceResult` propagate `SpecInfo` + spec_info = let m = mi.def::Method + SpecInfo(Int(m.nargs), m.isva, false, nothing) + end ir.debuginfo.def = mi - return ir, DebugInfo(ir.debuginfo, length(ir.stmts)) + return ir, spec_info, DebugInfo(ir.debuginfo, length(ir.stmts)) end function handle_single_case!(todo::Vector{Pair{Int,Any}}, @@ -1466,8 +1472,8 @@ function semiconcrete_result_item(result::SemiConcreteResult, add_inlining_backedge!(et, mi) preserve_local_sources = OptimizationParams(state.interp).preserve_local_sources - ir = retrieve_ir_for_inlining(mi, result.ir, preserve_local_sources) - return InliningTodo(mi, ir, result.effects) + ir, _, debuginfo = retrieve_ir_for_inlining(mi, result.ir, preserve_local_sources) + return InliningTodo(mi, ir, result.spec_info, debuginfo, result.effects) end function handle_semi_concrete_result!(cases::Vector{InliningCase}, result::SemiConcreteResult, diff --git a/base/compiler/ssair/passes.jl b/base/compiler/ssair/passes.jl index 3981f7382d707..e227249b48598 100644 --- a/base/compiler/ssair/passes.jl +++ b/base/compiler/ssair/passes.jl @@ -1532,7 +1532,7 @@ function try_inline_finalizer!(ir::IRCode, argexprs::Vector{Any}, idx::Int, end src_inlining_policy(inlining.interp, src, info, IR_FLAG_NULL) || return false - src, di = retrieve_ir_for_inlining(code, src) + src, spec_info, di = retrieve_ir_for_inlining(code, src) # For now: Require finalizer to only have one basic block length(src.cfg.blocks) == 1 || return false @@ -1542,7 +1542,7 @@ function try_inline_finalizer!(ir::IRCode, argexprs::Vector{Any}, idx::Int, # TODO: Should there be a special line number node for inlined finalizers? inline_at = ir[SSAValue(idx)][:line] - ssa_substitute = ir_prepare_inlining!(InsertBefore(ir, SSAValue(idx)), ir, src, di, mi, inline_at, argexprs) + ssa_substitute = ir_prepare_inlining!(InsertBefore(ir, SSAValue(idx)), ir, src, spec_info, di, mi, inline_at, argexprs) # TODO: Use the actual inliner here rather than open coding this special purpose inliner. ssa_rename = Vector{Any}(undef, length(src.stmts)) diff --git a/base/compiler/stmtinfo.jl b/base/compiler/stmtinfo.jl index ac5ffbdd5d76d..9dba7a4459f9e 100644 --- a/base/compiler/stmtinfo.jl +++ b/base/compiler/stmtinfo.jl @@ -94,6 +94,7 @@ struct SemiConcreteResult <: ConstResult mi::MethodInstance ir::IRCode effects::Effects + spec_info::SpecInfo end # XXX Technically this does not represent a result of constant inference, but rather that of diff --git a/base/compiler/typeinfer.jl b/base/compiler/typeinfer.jl index 77a2e02129ce4..8b85f7c6f35f1 100644 --- a/base/compiler/typeinfer.jl +++ b/base/compiler/typeinfer.jl @@ -941,7 +941,7 @@ function typeinf_ircode(interp::AbstractInterpreter, mi::MethodInstance, end (; result) = frame opt = OptimizationState(frame, interp) - ir = run_passes_ipo_safe(opt.src, opt, result, optimize_until) + ir = run_passes_ipo_safe(opt.src, opt, optimize_until) rt = widenconst(ignorelimited(result.result)) return ir, rt end diff --git a/base/compiler/types.jl b/base/compiler/types.jl index c51785f23ea29..ecf2417fd6199 100644 --- a/base/compiler/types.jl +++ b/base/compiler/types.jl @@ -41,11 +41,14 @@ struct StmtInfo used::Bool end -struct MethodInfo +struct SpecInfo + nargs::Int + isva::Bool propagate_inbounds::Bool method_for_inference_limit_heuristics::Union{Nothing,Method} end -MethodInfo(src::CodeInfo) = MethodInfo( +SpecInfo(src::CodeInfo) = SpecInfo( + Int(src.nargs), src.isva, src.propagate_inbounds, src.method_for_inference_limit_heuristics::Union{Nothing,Method}) diff --git a/test/compiler/EscapeAnalysis/EAUtils.jl b/test/compiler/EscapeAnalysis/EAUtils.jl index 188ec93ebc5be..b8ad4589db626 100644 --- a/test/compiler/EscapeAnalysis/EAUtils.jl +++ b/test/compiler/EscapeAnalysis/EAUtils.jl @@ -116,12 +116,14 @@ CC.get_inference_world(interp::EscapeAnalyzer) = interp.world CC.get_inference_cache(interp::EscapeAnalyzer) = interp.inf_cache CC.cache_owner(::EscapeAnalyzer) = EAToken() -function CC.ipo_dataflow_analysis!(interp::EscapeAnalyzer, ir::IRCode, caller::InferenceResult) +function CC.ipo_dataflow_analysis!(interp::EscapeAnalyzer, opt::OptimizationState, + ir::IRCode, caller::InferenceResult) # run EA on all frames that have been optimized - nargs = let def = caller.linfo.def; isa(def, Method) ? Int(def.nargs) : 0; end + nargs = Int(opt.src.nargs) + 𝕃ₒ = CC.optimizer_lattice(interp) get_escape_cache = GetEscapeCache(interp) estate = try - analyze_escapes(ir, nargs, CC.optimizer_lattice(interp), get_escape_cache) + analyze_escapes(ir, nargs, 𝕃ₒ, get_escape_cache) catch err @error "error happened within EA, inspect `Main.failed_escapeanalysis`" Main.failed_escapeanalysis = FailedAnalysis(ir, nargs, get_escape_cache) @@ -133,7 +135,8 @@ function CC.ipo_dataflow_analysis!(interp::EscapeAnalyzer, ir::IRCode, caller::I end record_escapes!(interp, caller, estate, ir) - @invoke CC.ipo_dataflow_analysis!(interp::AbstractInterpreter, ir::IRCode, caller::InferenceResult) + @invoke CC.ipo_dataflow_analysis!(interp::AbstractInterpreter, opt::OptimizationState, + ir::IRCode, caller::InferenceResult) end function record_escapes!(interp::EscapeAnalyzer, diff --git a/test/staged.jl b/test/staged.jl index 0112dd73b45f7..1b28144639f97 100644 --- a/test/staged.jl +++ b/test/staged.jl @@ -382,9 +382,8 @@ let end function generate_lambda_ex(world::UInt, source::LineNumberNode, - argnames::Core.SimpleVector, spnames::Core.SimpleVector, - body::Expr) - stub = Core.GeneratedFunctionStub(identity, argnames, spnames) + argnames, spnames, @nospecialize body) + stub = Core.GeneratedFunctionStub(identity, Core.svec(argnames...), Core.svec(spnames...)) return stub(world, source, body) end @@ -392,7 +391,7 @@ end struct Generator54916 <: Core.CachedGenerator end function (::Generator54916)(world::UInt, source::LineNumberNode, args...) return generate_lambda_ex(world, source, - Core.svec(:doit54916, :func, :arg), Core.svec(), :(func(arg))) + (:doit54916, :func, :arg), (), :(func(arg))) end @eval function doit54916(func, arg) $(Expr(:meta, :generated, Generator54916())) @@ -420,7 +419,7 @@ function generator49715(world, source, self, f, tt) mi = Base._which(sig; world) error("oh no") return generate_lambda_ex(world, source, - Core.svec(:doit49715, :f, :tt), Core.svec(), :(nothing)) + (:doit49715, :f, :tt), (), nothing) end @eval function doit49715(f, tt) $(Expr(:meta, :generated, generator49715)) @@ -436,7 +435,7 @@ const overdubee_codeinfo54341 = code_lowered(overdubbee54341, Tuple{Any, Any})[1 function overdub_generator54341(world::UInt, source::LineNumberNode, selftype, fargtypes) if length(fargtypes) != 2 return generate_lambda_ex(world, source, - Core.svec(:overdub54341, :args), Core.svec(), :(error("Wrong number of arguments"))) + (:overdub54341, :args), (), :(error("Wrong number of arguments"))) else return copy(overdubee_codeinfo54341) end @@ -446,4 +445,7 @@ end $(Expr(:meta, :generated_only)) end @test overdub54341(1, 2) == 3 +# check if the inlining pass handles `nargs`/`isva` correctly +@test first(only(code_typed((Int,Int)) do x, y; @inline overdub54341(x, y); end)) isa Core.CodeInfo +@test first(only(code_typed((Int,)) do x; @inline overdub54341(x, 1); end)) isa Core.CodeInfo @test_throws "Wrong number of arguments" overdub54341(1, 2, 3) From b9d9b69165493f6fc03870d975be05c67f14a30b Mon Sep 17 00:00:00 2001 From: Jishnu Bhattacharya Date: Fri, 4 Oct 2024 09:02:29 +0530 Subject: [PATCH 114/537] Add `.zed` directory to `.gitignore` (#55974) Similar to the `vscode` config directory, we may ignore the `zed` directory as well. --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index 524a12d066c4d..80bdd67619454 100644 --- a/.gitignore +++ b/.gitignore @@ -34,6 +34,7 @@ .DS_Store .idea/* .vscode/* +.zed/* *.heapsnapshot .cache # Buildkite: Ignore the entire .buildkite directory From b2cb6a3fb1071f2bd056cc78e048e58e9186e9b4 Mon Sep 17 00:00:00 2001 From: N5N3 <2642243996@qq.com> Date: Sun, 9 Jun 2024 23:36:26 +0800 Subject: [PATCH 115/537] typeintersect: reduce unneeded allocations from `merge_env` `merge_env` and `final_merge_env` could be skipped for emptiness test or if we know there's only 1 valid Union state. --- src/subtype.c | 19 +++++++++++++++---- 1 file changed, 15 insertions(+), 4 deletions(-) diff --git a/src/subtype.c b/src/subtype.c index 2011ca8b1c705..acfce3649236f 100644 --- a/src/subtype.c +++ b/src/subtype.c @@ -4138,9 +4138,13 @@ static jl_value_t *intersect_all(jl_value_t *x, jl_value_t *y, jl_stenv_t *e) save_env(e, &se, 1); int niter = 0, total_iter = 0; is[0] = intersect(x, y, e, 0); // root - if (is[0] != jl_bottom_type) + if (is[0] == jl_bottom_type) { + restore_env(e, &se, 1); + } + else if (!e->emptiness_only && has_next_union_state(e, 1)) { niter = merge_env(e, &me, &se, niter); - restore_env(e, &se, 1); + restore_env(e, &se, 1); + } while (next_union_state(e, 1)) { if (e->emptiness_only && is[0] != jl_bottom_type) break; @@ -4148,9 +4152,16 @@ static jl_value_t *intersect_all(jl_value_t *x, jl_value_t *y, jl_stenv_t *e) e->Runions.more = 0; is[1] = intersect(x, y, e, 0); - if (is[1] != jl_bottom_type) + if (is[1] == jl_bottom_type) { + restore_env(e, &se, 1); + } + else if (niter > 0 || (!e->emptiness_only && has_next_union_state(e, 1))) { niter = merge_env(e, &me, &se, niter); - restore_env(e, &se, 1); + restore_env(e, &se, 1); + } + else { + assert(is[0] == jl_bottom_type); + } if (is[0] == jl_bottom_type) is[0] = is[1]; else if (is[1] != jl_bottom_type) { From 5d69bbbb1ab54b44bf970b4ca0f2156f7f14332c Mon Sep 17 00:00:00 2001 From: N5N3 <2642243996@qq.com> Date: Sun, 9 Jun 2024 23:49:14 +0800 Subject: [PATCH 116/537] typeintersect: trunc env before nested `intersect_all` if valid. This only covers the simplest cases. We might want a full dependence analysis and keep env length minimum in the future. --- src/subtype.c | 25 ++++++++++++++++++++++++- 1 file changed, 24 insertions(+), 1 deletion(-) diff --git a/src/subtype.c b/src/subtype.c index acfce3649236f..65ee4d5916bce 100644 --- a/src/subtype.c +++ b/src/subtype.c @@ -2423,24 +2423,47 @@ static jl_value_t *intersect_aside(jl_value_t *x, jl_value_t *y, jl_stenv_t *e, if (obviously_egal(x, y)) return x; + jl_varbinding_t *vars = NULL; + jl_varbinding_t *bbprev = NULL; + jl_varbinding_t *xb = jl_is_typevar(x) ? lookup(e, (jl_tvar_t *)x) : NULL; + jl_varbinding_t *yb = jl_is_typevar(y) ? lookup(e, (jl_tvar_t *)y) : NULL; + int simple_x = !jl_has_free_typevars(!jl_is_typevar(x) ? x : xb ? xb->ub : ((jl_tvar_t *)x)->ub); + int simple_y = !jl_has_free_typevars(!jl_is_typevar(y) ? y : yb ? yb->ub : ((jl_tvar_t *)y)->ub); + if (simple_x && simple_y && !(xb && yb)) { + vars = e->vars; + e->vars = xb ? xb : yb; + if (e->vars != NULL) { + bbprev = e->vars->prev; + e->vars->prev = NULL; + } + } jl_saved_unionstate_t oldRunions; push_unionstate(&oldRunions, &e->Runions); int savedepth = e->invdepth; e->invdepth = depth; jl_value_t *res = intersect_all(x, y, e); e->invdepth = savedepth; pop_unionstate(&e->Runions, &oldRunions); + if (bbprev) e->vars->prev = bbprev; + if (vars) e->vars = vars; return res; } static jl_value_t *intersect_union(jl_value_t *x, jl_uniontype_t *u, jl_stenv_t *e, int8_t R, int param) { - if (param == 2 || (!jl_has_free_typevars(x) && !jl_has_free_typevars((jl_value_t*)u))) { + int no_free = !jl_has_free_typevars(x) && !jl_has_free_typevars((jl_value_t*)u); + if (param == 2 || no_free) { jl_value_t *a=NULL, *b=NULL; JL_GC_PUSH2(&a, &b); + jl_varbinding_t *vars = NULL; + if (no_free) { + vars = e->vars; + e->vars = NULL; + } jl_saved_unionstate_t oldRunions; push_unionstate(&oldRunions, &e->Runions); a = R ? intersect_all(x, u->a, e) : intersect_all(u->a, x, e); b = R ? intersect_all(x, u->b, e) : intersect_all(u->b, x, e); pop_unionstate(&e->Runions, &oldRunions); + if (vars) e->vars = vars; jl_value_t *i = simple_join(a,b); JL_GC_POP(); return i; From 636a35d83ca16d2077fc507701f41d50f409c7a5 Mon Sep 17 00:00:00 2001 From: Ian Butterworth Date: Fri, 4 Oct 2024 09:57:53 -0400 Subject: [PATCH 117/537] `@time` actually fix time report commas & add tests (#55982) https://github.com/JuliaLang/julia/pull/55977 looked simple but wasn't quite right because of a bad pattern in the lock conflicts report section. So fix and add tests. --- base/timing.jl | 7 +++++-- test/misc.jl | 9 +++++++++ 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/base/timing.jl b/base/timing.jl index 9686c5b33bccd..b094aa230e1c2 100644 --- a/base/timing.jl +++ b/base/timing.jl @@ -206,7 +206,7 @@ function time_print(io::IO, elapsedtime, bytes=0, gctime=0, allocs=0, lock_confl print(io, length(timestr) < 10 ? (" "^(10 - length(timestr))) : "") end print(io, timestr, " seconds") - parens = bytes != 0 || allocs != 0 || gctime > 0 || compile_time > 0 || lock_conflicts > 0 + parens = bytes != 0 || allocs != 0 || gctime > 0 || lock_conflicts > 0 || compile_time > 0 parens && print(io, " (") if bytes != 0 || allocs != 0 allocs, ma = prettyprint_getunits(allocs, length(_cnt_units), Int64(1000)) @@ -224,8 +224,11 @@ function time_print(io::IO, elapsedtime, bytes=0, gctime=0, allocs=0, lock_confl print(io, Ryu.writefixed(Float64(100*gctime/elapsedtime), 2), "% gc time") end if lock_conflicts > 0 + if bytes != 0 || allocs != 0 || gctime > 0 + print(io, ", ") + end plural = lock_conflicts == 1 ? "" : "s" - print(io, ", ", lock_conflicts, " lock conflict$plural") + print(io, lock_conflicts, " lock conflict$plural") end if compile_time > 0 if bytes != 0 || allocs != 0 || gctime > 0 || lock_conflicts > 0 diff --git a/test/misc.jl b/test/misc.jl index 66b70956935cd..e089395ce4557 100644 --- a/test/misc.jl +++ b/test/misc.jl @@ -360,6 +360,15 @@ let foo() = 1 @test @timev foo() true end +# this is internal, but used for easy testing +@test sprint(Base.time_print, 1e9) == " 1.000000 seconds" +@test sprint(Base.time_print, 1e9, 111, 0, 222) == " 1.000000 seconds (222 allocations: 111 bytes)" +@test sprint(Base.time_print, 1e9, 111, 0.5e9, 222) == " 1.000000 seconds (222 allocations: 111 bytes, 50.00% gc time)" +@test sprint(Base.time_print, 1e9, 111, 0, 222, 333) == " 1.000000 seconds (222 allocations: 111 bytes, 333 lock conflicts)" +@test sprint(Base.time_print, 1e9, 0, 0, 0, 333) == " 1.000000 seconds (333 lock conflicts)" +@test sprint(Base.time_print, 1e9, 111, 0, 222, 333, 0.25e9) == " 1.000000 seconds (222 allocations: 111 bytes, 333 lock conflicts, 25.00% compilation time)" +@test sprint(Base.time_print, 1e9, 111, 0.5e9, 222, 333, 0.25e9, 0.175e9) == " 1.000000 seconds (222 allocations: 111 bytes, 50.00% gc time, 333 lock conflicts, 25.00% compilation time: 70% of which was recompilation)" + # @showtime @test @showtime true let foo() = true From 80d67d579f89d264939220d09377f55fc7bfbcb2 Mon Sep 17 00:00:00 2001 From: Shuhei Kadowaki <40514306+aviatesk@users.noreply.github.com> Date: Sat, 5 Oct 2024 00:59:16 +0900 Subject: [PATCH 118/537] adjust EA to JuliaLang/julia#52527 (#55986) `EnterNode.catch_dest` can now be `0` after the `try`/`catch` elision feature implemented in JuliaLang/julia#52527, and we actually need to adjust `EscapeAnalysis.compute_frameinfo` too. --- .../ssair/EscapeAnalysis/EscapeAnalysis.jl | 10 ++++++---- test/compiler/EscapeAnalysis/EscapeAnalysis.jl | 17 +++++++++++++++++ test/compiler/irpasses.jl | 7 ++----- 3 files changed, 25 insertions(+), 9 deletions(-) diff --git a/base/compiler/ssair/EscapeAnalysis/EscapeAnalysis.jl b/base/compiler/ssair/EscapeAnalysis/EscapeAnalysis.jl index 6967efe495be1..0ad55d6fbcd9e 100644 --- a/base/compiler/ssair/EscapeAnalysis/EscapeAnalysis.jl +++ b/base/compiler/ssair/EscapeAnalysis/EscapeAnalysis.jl @@ -732,11 +732,13 @@ function compute_frameinfo(ir::IRCode) inst = ir[SSAValue(idx)] stmt = inst[:stmt] if isa(stmt, EnterNode) - @assert idx ≤ nstmts "try/catch inside new_nodes unsupported" - tryregions === nothing && (tryregions = UnitRange{Int}[]) leave_block = stmt.catch_dest - leave_pc = first(ir.cfg.blocks[leave_block].stmts) - push!(tryregions, idx:leave_pc) + if leave_block ≠ 0 + @assert idx ≤ nstmts "try/catch inside new_nodes unsupported" + tryregions === nothing && (tryregions = UnitRange{Int}[]) + leave_pc = first(ir.cfg.blocks[leave_block].stmts) + push!(tryregions, idx:leave_pc) + end elseif arrayinfo !== nothing # TODO this super limited alias analysis is able to handle only very simple cases # this should be replaced with a proper forward dimension analysis diff --git a/test/compiler/EscapeAnalysis/EscapeAnalysis.jl b/test/compiler/EscapeAnalysis/EscapeAnalysis.jl index 8c3e065818208..99bd86228f50a 100644 --- a/test/compiler/EscapeAnalysis/EscapeAnalysis.jl +++ b/test/compiler/EscapeAnalysis/EscapeAnalysis.jl @@ -2299,4 +2299,21 @@ let result = code_escapes((SafeRef{String},Any)) do x, y @test has_all_escape(result.state[Argument(3)]) # y end +@eval function scope_folding() + $(Expr(:tryfinally, + Expr(:block, + Expr(:tryfinally, :(), :(), 2), + :(return Core.current_scope())), + :(), 1)) +end +@eval function scope_folding_opt() + $(Expr(:tryfinally, + Expr(:block, + Expr(:tryfinally, :(), :(), :(Base.inferencebarrier(2))), + :(return Core.current_scope())), + :(), :(Base.inferencebarrier(1)))) +end +@test (@code_escapes scope_folding()) isa EAUtils.EscapeResult +@test (@code_escapes scope_folding_opt()) isa EAUtils.EscapeResult + end # module test_EA diff --git a/test/compiler/irpasses.jl b/test/compiler/irpasses.jl index 281317ac25bf8..740ac5f4958e4 100644 --- a/test/compiler/irpasses.jl +++ b/test/compiler/irpasses.jl @@ -576,7 +576,6 @@ let # lifting `isa` through Core.ifelse @test count(iscall((src, isa)), src.code) == 0 end - let # lifting `isdefined` through PhiNode src = code_typed1((Bool,Some{Int},)) do c, x y = c ? x : nothing @@ -1035,8 +1034,7 @@ exc39508 = ErrorException("expected") end @test test39508() === exc39508 -let - # `typeassert` elimination after SROA +let # `typeassert` elimination after SROA # NOTE we can remove this optimization once inference is able to reason about memory-effects src = @eval Module() begin mutable struct Foo; x; end @@ -1051,8 +1049,7 @@ let @test count(iscall((src, typeassert)), src.code) == 0 end -let - # Test for https://github.com/JuliaLang/julia/issues/43402 +let # Test for https://github.com/JuliaLang/julia/issues/43402 # Ensure that structs required not used outside of the ccall, # still get listed in the ccall_preserves From a8d7b68072d15d0ec1a19e27a8064253bea912c5 Mon Sep 17 00:00:00 2001 From: Jameson Nash Date: Thu, 19 Sep 2024 12:45:52 -0400 Subject: [PATCH 119/537] Improvements to JITLink Seeing what this will look like, since it has a number of features (delayed compilation, concurrent compilation) that are starting to become important, so it would be nice to switch to only supporting one common implementation of memory management. Refs #50248 I am expecting https://github.com/llvm/llvm-project/issues/63236 may cause some problems, since we reconfigured some CI machines to minimize that issue, but it is still likely relevant. --- src/codegen.cpp | 14 +-- src/jitlayers.cpp | 289 +++++++++++++++++++++++++--------------------- src/jitlayers.h | 26 ++--- 3 files changed, 174 insertions(+), 155 deletions(-) diff --git a/src/codegen.cpp b/src/codegen.cpp index a7a985284c87b..cee88a3d20f3d 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -10396,7 +10396,7 @@ static void init_jit_functions(void) } #ifdef JL_USE_INTEL_JITEVENTS -char jl_using_intel_jitevents; // Non-zero if running under Intel VTune Amplifier +char jl_using_intel_jitevents = 0; // Non-zero if running under Intel VTune Amplifier #endif #ifdef JL_USE_OPROFILE_JITEVENTS @@ -10510,9 +10510,6 @@ extern "C" void jl_init_llvm(void) #if defined(JL_USE_INTEL_JITEVENTS) || \ defined(JL_USE_OPROFILE_JITEVENTS) || \ defined(JL_USE_PERF_JITEVENTS) -#ifdef JL_USE_JITLINK -#pragma message("JIT profiling support (JL_USE_*_JITEVENTS) not yet available on platforms that use JITLink") -#else const char *jit_profiling = getenv("ENABLE_JITPROFILING"); #if defined(JL_USE_INTEL_JITEVENTS) @@ -10529,24 +10526,23 @@ extern "C" void jl_init_llvm(void) #if defined(JL_USE_PERF_JITEVENTS) if (jit_profiling && atoi(jit_profiling)) { - jl_using_perf_jitevents= 1; + jl_using_perf_jitevents = 1; } #endif #ifdef JL_USE_INTEL_JITEVENTS if (jl_using_intel_jitevents) - jl_ExecutionEngine->RegisterJITEventListener(JITEventListener::createIntelJITEventListener()); + jl_ExecutionEngine->enableIntelJITEventListener(); #endif #ifdef JL_USE_OPROFILE_JITEVENTS if (jl_using_oprofile_jitevents) - jl_ExecutionEngine->RegisterJITEventListener(JITEventListener::createOProfileJITEventListener()); + jl_ExecutionEngine->enableOProfileJITEventListener(); #endif #ifdef JL_USE_PERF_JITEVENTS if (jl_using_perf_jitevents) - jl_ExecutionEngine->RegisterJITEventListener(JITEventListener::createPerfJITEventListener()); -#endif + jl_ExecutionEngine->enablePerfJITEventListener(); #endif #endif diff --git a/src/jitlayers.cpp b/src/jitlayers.cpp index 442103c91be0f..7489b086105e6 100644 --- a/src/jitlayers.cpp +++ b/src/jitlayers.cpp @@ -14,6 +14,15 @@ #include #include #include +#if JL_LLVM_VERSION >= 180000 +#include +#include +#include +#endif +#if JL_LLVM_VERSION >= 190000 +#include +#include +#endif #include #include #include @@ -142,13 +151,14 @@ void jl_dump_llvm_opt_impl(void *s) **jl_ExecutionEngine->get_dump_llvm_opt_stream() = (ios_t*)s; } +#ifndef JL_USE_JITLINK static int jl_add_to_ee( orc::ThreadSafeModule &M, const StringMap &NewExports, DenseMap &Queued, SmallVectorImpl &Stack) JL_NOTSAFEPOINT; +#endif static void jl_decorate_module(Module &M) JL_NOTSAFEPOINT; -static uint64_t getAddressForFunction(StringRef fname) JL_NOTSAFEPOINT; void jl_link_global(GlobalVariable *GV, void *addr) JL_NOTSAFEPOINT { @@ -177,23 +187,6 @@ void jl_jit_globals(std::map &globals) JL_NOTSAFEPOINT } } -// used for image_codegen, where we keep all the gvs external -// so we can't jit them directly into each module -static orc::ThreadSafeModule jl_get_globals_module(orc::ThreadSafeContext &ctx, const DataLayout &DL, const Triple &T, std::map &globals) JL_NOTSAFEPOINT -{ - auto lock = ctx.getLock(); - auto GTSM = jl_create_ts_module("globals", ctx, DL, T); - auto GM = GTSM.getModuleUnlocked(); - for (auto &global : globals) { - auto GV = global.second; - auto GV2 = new GlobalVariable(*GM, GV->getValueType(), GV->isConstant(), GlobalValue::ExternalLinkage, literal_static_pointer_val(global.first, GV->getValueType()), GV->getName(), nullptr, GV->getThreadLocalMode(), GV->getAddressSpace(), false); - GV2->copyAttributesFrom(GV); - GV2->setDSOLocal(true); - GV2->setAlignment(GV->getAlign()); - } - return GTSM; -} - // this generates llvm code for the lambda info // and adds the result to the jitlayers // (and the shadow module), @@ -238,8 +231,21 @@ static jl_callptr_t _jl_compile_codeinst( // to ensure that the globals are defined when they are compiled. if (params.imaging_mode) { // Won't contain any PLT/dlsym calls, so no need to optimize those - jl_ExecutionEngine->addModule(jl_get_globals_module(params.tsctx, params.DL, params.TargetTriple, params.global_targets)); - } else { + if (!params.global_targets.empty()) { + void **globalslots = new void*[params.global_targets.size()]; + void **slot = globalslots; + for (auto &global : params.global_targets) { + auto GV = global.second; + *slot = global.first; + jl_ExecutionEngine->addGlobalMapping(GV->getName(), (uintptr_t)slot); + slot++; + } +#ifdef __clang_analyzer__ + static void **leaker = globalslots; // for the purpose of the analyzer, we need to expressly leak this variable or it thinks we forgot to free it +#endif + } + } + else { StringMap NewGlobals; for (auto &global : params.global_targets) { NewGlobals[global.second->getName()] = global.first; @@ -255,6 +261,7 @@ static jl_callptr_t _jl_compile_codeinst( } } +#ifndef JL_USE_JITLINK // Collect the exported functions from the params.compiled_functions modules, // which form dependencies on which functions need to be // compiled first. Cycles of functions are compiled together. @@ -281,18 +288,40 @@ static jl_callptr_t _jl_compile_codeinst( jl_add_to_ee(M, NewExports, Queued, Stack); assert(Queued.empty() && Stack.empty() && !M); } +#else + for (auto &def : params.compiled_functions) { + // Add the results to the execution engine now + orc::ThreadSafeModule &M = std::get<0>(def.second); + if (M) + jl_ExecutionEngine->addModule(std::move(M)); + } +#endif ++CompiledCodeinsts; MaxWorkqueueSize.updateMax(params.compiled_functions.size()); IndirectCodeinsts += params.compiled_functions.size() - 1; } + // batch compile job for all new functions + SmallVector NewDefs; + for (auto &def : params.compiled_functions) { + jl_llvm_functions_t &decls = std::get<1>(def.second); + if (decls.functionObject != "jl_fptr_args" && + decls.functionObject != "jl_fptr_sparam" && + decls.functionObject != "jl_f_opaque_closure_call") + NewDefs.push_back(decls.functionObject); + if (!decls.specFunctionObject.empty()) + NewDefs.push_back(decls.specFunctionObject); + } + auto Addrs = jl_ExecutionEngine->findSymbols(NewDefs); + size_t i = 0; + size_t nextaddr = 0; for (auto &def : params.compiled_functions) { jl_code_instance_t *this_code = def.first; if (i < jl_timing_print_limit) jl_timing_show_func_sig(this_code->def->specTypes, JL_TIMING_DEFAULT_BLOCK); - jl_llvm_functions_t decls = std::get<1>(def.second); + jl_llvm_functions_t &decls = std::get<1>(def.second); jl_callptr_t addr; bool isspecsig = false; if (decls.functionObject == "jl_fptr_args") { @@ -305,12 +334,16 @@ static jl_callptr_t _jl_compile_codeinst( addr = jl_f_opaque_closure_call_addr; } else { - addr = (jl_callptr_t)getAddressForFunction(decls.functionObject); + assert(NewDefs[nextaddr] == decls.functionObject); + addr = (jl_callptr_t)Addrs[nextaddr++]; + assert(addr); isspecsig = true; } if (!decls.specFunctionObject.empty()) { void *prev_specptr = NULL; - auto spec = (void*)getAddressForFunction(decls.specFunctionObject); + assert(NewDefs[nextaddr] == decls.specFunctionObject); + void *spec = (void*)Addrs[nextaddr++]; + assert(spec); if (jl_atomic_cmpswap_acqrel(&this_code->specptr.fptr, &prev_specptr, spec)) { // only set specsig and invoke if we were the first to set specptr jl_atomic_store_relaxed(&this_code->specsigflags, (uint8_t) isspecsig); @@ -601,48 +634,6 @@ static auto countBasicBlocks(const Function &F) JL_NOTSAFEPOINT static constexpr size_t N_optlevels = 4; -static Expected validateExternRelocations(orc::ThreadSafeModule TSM, orc::MaterializationResponsibility &R) JL_NOTSAFEPOINT { -#if !defined(JL_NDEBUG) && !defined(JL_USE_JITLINK) - auto isIntrinsicFunction = [](GlobalObject &GO) JL_NOTSAFEPOINT { - auto F = dyn_cast(&GO); - if (!F) - return false; - return F->isIntrinsic() || F->getName().starts_with("julia."); - }; - // validate the relocations for M (only for RuntimeDyld, JITLink performs its own symbol validation) - auto Err = TSM.withModuleDo([isIntrinsicFunction](Module &M) JL_NOTSAFEPOINT { - Error Err = Error::success(); - for (auto &GO : make_early_inc_range(M.global_objects())) { - if (!GO.isDeclarationForLinker()) - continue; - if (GO.use_empty()) { - GO.eraseFromParent(); - continue; - } - if (isIntrinsicFunction(GO)) - continue; - auto sym = jl_ExecutionEngine->findUnmangledSymbol(GO.getName()); - if (sym) - continue; - // TODO have we ever run into this check? It's been guaranteed to not - // fire in an assert build, since previously LLVM would abort due to - // not handling the error if we didn't find the unmangled symbol - if (SectionMemoryManager::getSymbolAddressInProcess( - jl_ExecutionEngine->getMangledName(GO.getName()))) { - consumeError(sym.takeError()); - continue; - } - Err = joinErrors(std::move(Err), sym.takeError()); - } - return Err; - }); - if (Err) { - return std::move(Err); - } -#endif - return std::move(TSM); -} - static Expected selectOptLevel(orc::ThreadSafeModule TSM, orc::MaterializationResponsibility &R) { TSM.withModuleDo([](Module &M) { size_t opt_level = std::max(static_cast(jl_options.opt_level), 0); @@ -673,18 +664,6 @@ static Expected selectOptLevel(orc::ThreadSafeModule TSM, return std::move(TSM); } -static void recordDebugTSM(orc::MaterializationResponsibility &, orc::ThreadSafeModule TSM) JL_NOTSAFEPOINT { - auto ptr = TSM.withModuleDo([](Module &M) JL_NOTSAFEPOINT { - auto md = M.getModuleFlag("julia.__jit_debug_tsm_addr"); - if (!md) - return static_cast(nullptr); - return reinterpret_cast(cast(cast(md)->getValue())->getZExtValue()); - }); - if (ptr) { - *ptr = std::move(TSM); - } -} - void jl_register_jit_object(const object::ObjectFile &debugObj, std::function getLoadAddress, std::function lookupWriteAddress); @@ -1584,6 +1563,7 @@ JuliaOJIT::JuliaOJIT() #ifdef JL_USE_JITLINK MemMgr(createJITLinkMemoryManager()), ObjectLayer(ES, *MemMgr), + CompileLayer(ES, ObjectLayer, std::make_unique>(orc::irManglingOptionsFromTargetOptions(TM->Options), *TM)), #else MemMgr(createRTDyldMemoryManager()), ObjectLayer( @@ -1593,15 +1573,12 @@ JuliaOJIT::JuliaOJIT() return result; } ), -#endif LockLayer(ObjectLayer), CompileLayer(ES, LockLayer, std::make_unique>(orc::irManglingOptionsFromTargetOptions(TM->Options), *TM)), +#endif JITPointersLayer(ES, CompileLayer, orc::IRTransformLayer::TransformFunction(JITPointersT(SharedBytes, RLST_mutex))), OptimizeLayer(ES, JITPointersLayer, orc::IRTransformLayer::TransformFunction(OptimizerT(*TM, PrintLLVMTimers, llvm_printing_mutex))), - OptSelLayer(ES, OptimizeLayer, orc::IRTransformLayer::TransformFunction(selectOptLevel)), - DepsVerifyLayer(ES, OptSelLayer, orc::IRTransformLayer::TransformFunction(validateExternRelocations)), - ExternalCompileLayer(ES, LockLayer, - std::make_unique>(orc::irManglingOptionsFromTargetOptions(TM->Options), *TM)) + OptSelLayer(ES, OptimizeLayer, orc::IRTransformLayer::TransformFunction(selectOptLevel)) { JL_MUTEX_INIT(&this->jitlock, "JuliaOJIT"); #ifdef JL_USE_JITLINK @@ -1625,7 +1602,6 @@ JuliaOJIT::JuliaOJIT() registerRTDyldJITObject(Object, LO, MemMgr); }); #endif - CompileLayer.setNotifyCompiled(recordDebugTSM); std::string ErrorStr; @@ -1786,8 +1762,8 @@ void JuliaOJIT::addModule(orc::ThreadSafeModule TSM) { JL_TIMING(LLVM_JIT, JIT_Total); ++ModulesAdded; +#ifndef JL_USE_JITLINK orc::SymbolLookupSet NewExports; - orc::ThreadSafeModule CurrentlyCompiling; TSM.withModuleDo([&](Module &M) JL_NOTSAFEPOINT { for (auto &F : M.global_values()) { if (!F.isDeclaration() && F.getLinkage() == GlobalValue::ExternalLinkage) { @@ -1796,42 +1772,24 @@ void JuliaOJIT::addModule(orc::ThreadSafeModule TSM) } } assert(!verifyLLVMIR(M)); - auto jit_debug_tsm_addr = ConstantInt::get(Type::getIntNTy(M.getContext(), sizeof(void*) * CHAR_BIT), (uintptr_t) &CurrentlyCompiling); - M.addModuleFlag(Module::Error, "julia.__jit_debug_tsm_addr", jit_debug_tsm_addr); }); +#endif - // TODO: what is the performance characteristics of this? - auto Err = DepsVerifyLayer.add(JD, std::move(TSM)); + auto Err = OptSelLayer.add(JD, std::move(TSM)); if (Err) { ES.reportError(std::move(Err)); errs() << "Failed to add module to JIT!\n"; - if (CurrentlyCompiling) { - CurrentlyCompiling.withModuleDo([](Module &M) JL_NOTSAFEPOINT { errs() << "Dumping failing module\n" << M << "\n"; }); - } else { - errs() << "Module unavailable to be printed\n"; - } abort(); } +#ifndef JL_USE_JITLINK // force eager compilation (for now), due to memory management specifics // (can't handle compilation recursion) auto Lookups = ES.lookup({{&JD, orc::JITDylibLookupFlags::MatchExportedSymbolsOnly}}, NewExports); if (!Lookups) { ES.reportError(Lookups.takeError()); errs() << "Failed to lookup symbols in module!\n"; - if (CurrentlyCompiling) { - CurrentlyCompiling.withModuleDo([](Module &M) JL_NOTSAFEPOINT { errs() << "Dumping failing module\n" << M << "\n"; }); - } else { - errs() << "Module unavailable to be printed\n"; - } - } - for (auto &Sym : *Lookups) { - #if JL_LLVM_VERSION >= 170000 - assert(Sym.second.getAddress()); - #else - assert(Sym.second); - #endif - (void) Sym; } +#endif } Error JuliaOJIT::addExternalModule(orc::JITDylib &JD, orc::ThreadSafeModule TSM, bool ShouldOptimize) @@ -1850,12 +1808,33 @@ Error JuliaOJIT::addExternalModule(orc::JITDylib &JD, orc::ThreadSafeModule TSM, return Error::success(); })) return Err; - return ExternalCompileLayer.add(JD.getDefaultResourceTracker(), std::move(TSM)); + return CompileLayer.add(JD.getDefaultResourceTracker(), std::move(TSM)); } Error JuliaOJIT::addObjectFile(orc::JITDylib &JD, std::unique_ptr Obj) { assert(Obj && "Can not add null object"); +#ifdef JL_USE_JITLINK + return ObjectLayer.add(JD.getDefaultResourceTracker(), std::move(Obj)); +#else return LockLayer.add(JD.getDefaultResourceTracker(), std::move(Obj)); +#endif +} + +SmallVector JuliaOJIT::findSymbols(ArrayRef Names) +{ + DenseMap Unmangled; + orc::SymbolLookupSet Exports; + for (StringRef Name : Names) { + auto Mangled = ES.intern(getMangledName(Name)); + Unmangled[NonOwningSymbolStringPtr(Mangled)] = Unmangled.size(); + Exports.add(std::move(Mangled)); + } + SymbolMap Syms = cantFail(ES.lookup(orc::makeJITDylibSearchOrder(ArrayRef(&JD)), std::move(Exports))); + SmallVector Addrs(Names.size()); + for (auto it : Syms) { + Addrs[Unmangled.at(orc::NonOwningSymbolStringPtr(it.first))] = it.second.getAddress().getValue(); + } + return Addrs; } #if JL_LLVM_VERSION >= 170000 @@ -1887,7 +1866,7 @@ uint64_t JuliaOJIT::getGlobalValueAddress(StringRef Name) consumeError(addr.takeError()); return 0; } - return cantFail(std::move(addr)).getAddress().getValue(); + return addr->getAddress().getValue(); } uint64_t JuliaOJIT::getFunctionAddress(StringRef Name) @@ -1897,7 +1876,7 @@ uint64_t JuliaOJIT::getFunctionAddress(StringRef Name) consumeError(addr.takeError()); return 0; } - return cantFail(std::move(addr)).getAddress().getValue(); + return addr->getAddress().getValue(); } #else JL_JITSymbol JuliaOJIT::findSymbol(StringRef Name, bool ExportedSymbolsOnly) @@ -1973,41 +1952,92 @@ StringRef JuliaOJIT::getFunctionAtAddress(uint64_t Addr, jl_callptr_t invoke, jl return *fname; } - #ifdef JL_USE_JITLINK -extern "C" orc::shared::CWrapperFunctionResult -llvm_orc_registerJITLoaderGDBAllocAction(const char *Data, size_t Size); +#if JL_LLVM_VERSION >= 170000 +#define addAbsoluteToMap(map,name) \ + (map[mangle(#name)] = {ExecutorAddr::fromPtr(&name), JITSymbolFlags::Exported | JITSymbolFlags::Callable}, orc::ExecutorAddr::fromPtr(&name)) +#else +#define addAbsoluteToMap(map,name) \ + (map[mangle(#name)] = JITEvaluatedSymbol::fromPointer(&name, JITSymbolFlags::Exported | JITSymbolFlags::Callable), orc::ExecutorAddr::fromPtr(&name)) +#endif void JuliaOJIT::enableJITDebuggingSupport() { orc::SymbolMap GDBFunctions; - #if JL_LLVM_VERSION >= 170000 - GDBFunctions[mangle("llvm_orc_registerJITLoaderGDBAllocAction")] = {ExecutorAddr::fromPtr(&llvm_orc_registerJITLoaderGDBAllocAction), JITSymbolFlags::Exported | JITSymbolFlags::Callable}; - GDBFunctions[mangle("llvm_orc_registerJITLoaderGDBWrapper")] = {ExecutorAddr::fromPtr(&llvm_orc_registerJITLoaderGDBWrapper), JITSymbolFlags::Exported | JITSymbolFlags::Callable}; - #else - GDBFunctions[mangle("llvm_orc_registerJITLoaderGDBAllocAction")] = JITEvaluatedSymbol::fromPointer(&llvm_orc_registerJITLoaderGDBAllocAction, JITSymbolFlags::Exported | JITSymbolFlags::Callable); - GDBFunctions[mangle("llvm_orc_registerJITLoaderGDBWrapper")] = JITEvaluatedSymbol::fromPointer(&llvm_orc_registerJITLoaderGDBWrapper, JITSymbolFlags::Exported | JITSymbolFlags::Callable); - #endif + addAbsoluteToMap(GDBFunctions,llvm_orc_registerJITLoaderGDBAllocAction); + auto registerJITLoaderGDBWrapper = addAbsoluteToMap(GDBFunctions,llvm_orc_registerJITLoaderGDBWrapper); cantFail(JD.define(orc::absoluteSymbols(GDBFunctions))); if (TM->getTargetTriple().isOSBinFormatMachO()) ObjectLayer.addPlugin(cantFail(orc::GDBJITDebugInfoRegistrationPlugin::Create(ES, JD, TM->getTargetTriple()))); #ifndef _COMPILER_ASAN_ENABLED_ // TODO: Fix duplicated sections spam #51794 else if (TM->getTargetTriple().isOSBinFormatELF()) //EPCDebugObjectRegistrar doesn't take a JITDylib, so we have to directly provide the call address - ObjectLayer.addPlugin(std::make_unique(ES, std::make_unique(ES, orc::ExecutorAddr::fromPtr(&llvm_orc_registerJITLoaderGDBWrapper)))); + ObjectLayer.addPlugin(std::make_unique(ES, std::make_unique(ES, registerJITLoaderGDBWrapper))); +#endif +} + +void JuliaOJIT::enableIntelJITEventListener() +{ +#if JL_LLVM_VERSION >= 190000 + if (TT.isOSBinFormatELF()) { + orc::SymbolMap VTuneFunctions; + auto RegisterImplAddr = addAbsoluteToMap(VTuneFunctions,llvm_orc_registerVTuneImpl); + auto UnregisterImplAddr = addAbsoluteToMap(VTuneFunctions,llvm_orc_unregisterVTuneImpl); + ObjectLayer.addPlugin(cantFail(DebugInfoPreservationPlugin::Create())); + //ObjectLayer.addPlugin(cantFail(VTuneSupportPlugin::Create(ES.getExecutorProcessControl(), + // JD, /*EmitDebugInfo=*/true, + // /*TestMode=*/false))); + bool EmitDebugInfo = true; + ObjectLayer.addPlugin(std::make_unique( + ES.getExecutorProcessControl(), RegisterImplAddr, UnregisterImplAddr, EmitDebugInfo)); + } +#endif +} + +void JuliaOJIT::enableOProfileJITEventListener() +{ + // implement when available in LLVM +} + +void JuliaOJIT::enablePerfJITEventListener() +{ +#if JL_LLVM_VERSION >= 180000 + orc::SymbolMap PerfFunctions; + auto StartAddr = addAbsoluteToMap(PerfFunctions,llvm_orc_registerJITLoaderPerfStart); + auto EndAddr = addAbsoluteToMap(PerfFunctions,llvm_orc_registerJITLoaderPerfEnd); + auto ImplAddr = addAbsoluteToMap(PerfFunctions,llvm_orc_registerJITLoaderPerfImpl); + cantFail(JD.define(orc::absoluteSymbols(PerfFunctions))); + if (TM->getTargetTriple().isOSBinFormatELF()) { + ObjectLayer.addPlugin(cantFail(DebugInfoPreservationPlugin::Create())); + //ObjectLayer.addPlugin(cantFail(PerfSupportPlugin::Create( + // ES.getExecutorProcessControl(), *JD, true, true))); + bool EmitDebugInfo = true, EmitUnwindInfo = true; + ObjectLayer.addPlugin(std::make_unique( + ES.getExecutorProcessControl(), StartAddr, EndAddr, ImplAddr, EmitDebugInfo, EmitUnwindInfo)); + } #endif } #else +void JuliaOJIT::RegisterJITEventListener(JITEventListener *L) +{ + if (L) + ObjectLayer.registerJITEventListener(*L); +} void JuliaOJIT::enableJITDebuggingSupport() { RegisterJITEventListener(JITEventListener::createGDBRegistrationListener()); } - -void JuliaOJIT::RegisterJITEventListener(JITEventListener *L) +void JuliaOJIT::enableIntelJITEventListener() { - if (!L) - return; - this->ObjectLayer.registerJITEventListener(*L); + RegisterJITEventListener(JITEventListener::createIntelJITEventListener()); +} +void JuliaOJIT::enableOProfileJITEventListener() +{ + RegisterJITEventListener(JITEventListener::createOProfileJITEventListener()); +} +void JuliaOJIT::enablePerfJITEventListener() +{ + RegisterJITEventListener(JITEventListener::createPerfJITEventListener()); } #endif @@ -2251,6 +2281,7 @@ static void jl_decorate_module(Module &M) { } } +#ifndef JL_USE_JITLINK // Implements Tarjan's SCC (strongly connected components) algorithm, simplified to remove the count variable static int jl_add_to_ee( orc::ThreadSafeModule &M, @@ -2316,13 +2347,7 @@ static int jl_add_to_ee( jl_ExecutionEngine->addModule(std::move(M)); return 0; } - -static uint64_t getAddressForFunction(StringRef fname) -{ - auto addr = jl_ExecutionEngine->getFunctionAddress(fname); - assert(addr); - return addr; -} +#endif // helper function for adding a DLLImport (dlsym) address to the execution engine void add_named_global(StringRef name, void *addr) diff --git a/src/jitlayers.h b/src/jitlayers.h index 93669c2351d88..3353a4093bd27 100644 --- a/src/jitlayers.h +++ b/src/jitlayers.h @@ -47,7 +47,7 @@ // and feature support (e.g. Windows, JITEventListeners for various profilers, // etc.). Thus, we currently only use JITLink where absolutely required, that is, // for Mac/aarch64 and Linux/aarch64. -// #define JL_FORCE_JITLINK +//#define JL_FORCE_JITLINK #if defined(_COMPILER_ASAN_ENABLED_) || defined(_COMPILER_MSAN_ENABLED_) || defined(_COMPILER_TSAN_ENABLED_) # define HAS_SANITIZER @@ -363,7 +363,6 @@ class JuliaOJIT { typedef orc::ObjectLinkingLayer ObjLayerT; #else typedef orc::RTDyldObjectLinkingLayer ObjLayerT; -#endif struct LockLayerT : public orc::ObjectLayer { LockLayerT(orc::ObjectLayer &BaseLayer) JL_NOTSAFEPOINT : orc::ObjectLayer(BaseLayer.getExecutionSession()), BaseLayer(BaseLayer) {} @@ -381,11 +380,11 @@ class JuliaOJIT { orc::ObjectLayer &BaseLayer; std::mutex EmissionMutex; }; +#endif typedef orc::IRCompileLayer CompileLayerT; typedef orc::IRTransformLayer JITPointersLayerT; typedef orc::IRTransformLayer OptimizeLayerT; typedef orc::IRTransformLayer OptSelLayerT; - typedef orc::IRTransformLayer DepsVerifyLayerT; typedef object::OwningBinary OwningObj; template - void registerObject(const ObjT &Obj, const LoadResult &LO); +#ifndef JL_USE_JITLINK + void RegisterJITEventListener(JITEventListener *L) JL_NOTSAFEPOINT; +#endif public: @@ -511,10 +509,9 @@ class JuliaOJIT { ~JuliaOJIT() JL_NOTSAFEPOINT; void enableJITDebuggingSupport() JL_NOTSAFEPOINT; -#ifndef JL_USE_JITLINK - // JITLink doesn't support old JITEventListeners (yet). - void RegisterJITEventListener(JITEventListener *L) JL_NOTSAFEPOINT; -#endif + void enableIntelJITEventListener() JL_NOTSAFEPOINT; + void enableOProfileJITEventListener() JL_NOTSAFEPOINT; + void enablePerfJITEventListener() JL_NOTSAFEPOINT; orc::SymbolStringPtr mangle(StringRef Name) JL_NOTSAFEPOINT; void addGlobalMapping(StringRef Name, uint64_t Addr) JL_NOTSAFEPOINT; @@ -525,7 +522,7 @@ class JuliaOJIT { bool ShouldOptimize = false) JL_NOTSAFEPOINT; Error addObjectFile(orc::JITDylib &JD, std::unique_ptr Obj) JL_NOTSAFEPOINT; - orc::IRCompileLayer &getIRCompileLayer() JL_NOTSAFEPOINT { return ExternalCompileLayer; }; + orc::IRCompileLayer &getIRCompileLayer() JL_NOTSAFEPOINT { return CompileLayer; }; orc::ExecutionSession &getExecutionSession() JL_NOTSAFEPOINT { return ES; } orc::JITDylib &getExternalJITDylib() JL_NOTSAFEPOINT { return ExternalJD; } @@ -533,6 +530,7 @@ class JuliaOJIT { Expected findSymbol(StringRef Name, bool ExportedSymbolsOnly) JL_NOTSAFEPOINT; Expected findUnmangledSymbol(StringRef Name) JL_NOTSAFEPOINT; Expected findExternalJDSymbol(StringRef Name, bool ExternalJDOnly) JL_NOTSAFEPOINT; + SmallVector findSymbols(ArrayRef Names) JL_NOTSAFEPOINT; #else JITEvaluatedSymbol findSymbol(StringRef Name, bool ExportedSymbolsOnly) JL_NOTSAFEPOINT; JITEvaluatedSymbol findUnmangledSymbol(StringRef Name) JL_NOTSAFEPOINT; @@ -616,13 +614,13 @@ class JuliaOJIT { const std::unique_ptr MemMgr; #endif ObjLayerT ObjectLayer; +#ifndef JL_USE_JITLINK LockLayerT LockLayer; +#endif CompileLayerT CompileLayer; JITPointersLayerT JITPointersLayer; OptimizeLayerT OptimizeLayer; OptSelLayerT OptSelLayer; - DepsVerifyLayerT DepsVerifyLayer; - CompileLayerT ExternalCompileLayer; }; extern JuliaOJIT *jl_ExecutionEngine; std::unique_ptr jl_create_llvm_module(StringRef name, LLVMContext &ctx, const DataLayout &DL = jl_ExecutionEngine->getDataLayout(), const Triple &triple = jl_ExecutionEngine->getTargetTriple()) JL_NOTSAFEPOINT; From 218edeaa80c7d70609e2029f81287137d7b201dd Mon Sep 17 00:00:00 2001 From: Jameson Nash Date: Fri, 4 Oct 2024 14:38:47 +0000 Subject: [PATCH 120/537] rewrite catchjmp asm to use normal relocations instead of manual editing --- src/cgmemmgr.cpp | 29 -------------- src/codegen.cpp | 1 + src/debug-registry.h | 3 +- src/debuginfo.cpp | 47 ++++++++-------------- src/jitlayers.cpp | 95 +++++++++++++++++++++++++++----------------- 5 files changed, 78 insertions(+), 97 deletions(-) diff --git a/src/cgmemmgr.cpp b/src/cgmemmgr.cpp index c78e6092ca5db..8557698a4e513 100644 --- a/src/cgmemmgr.cpp +++ b/src/cgmemmgr.cpp @@ -833,28 +833,6 @@ class RTDyldMemoryManagerJL : public SectionMemoryManager { mapAddresses(Dyld, ro_alloc); mapAddresses(Dyld, exe_alloc); } -#ifdef _OS_WINDOWS_ - template - void *lookupWriteAddressFor(void *rt_addr, Alloc &&allocator) - { - for (auto &alloc: allocator->allocations) { - if (alloc.rt_addr == rt_addr) { - return alloc.wr_addr; - } - } - return nullptr; - } - void *lookupWriteAddressFor(void *rt_addr) - { - if (!ro_alloc) - return rt_addr; - if (void *ptr = lookupWriteAddressFor(rt_addr, ro_alloc)) - return ptr; - if (void *ptr = lookupWriteAddressFor(rt_addr, exe_alloc)) - return ptr; - return rt_addr; - } -#endif // _OS_WINDOWS_ }; uint8_t *RTDyldMemoryManagerJL::allocateCodeSection(uintptr_t Size, @@ -947,13 +925,6 @@ void RTDyldMemoryManagerJL::deregisterEHFrames(uint8_t *Addr, } -#ifdef _OS_WINDOWS_ -void *lookupWriteAddressFor(RTDyldMemoryManager *memmgr, void *rt_addr) -{ - return ((RTDyldMemoryManagerJL*)memmgr)->lookupWriteAddressFor(rt_addr); -} -#endif - RTDyldMemoryManager* createRTDyldMemoryManager() { return new RTDyldMemoryManagerJL(); diff --git a/src/codegen.cpp b/src/codegen.cpp index cee88a3d20f3d..bcda527416676 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -10371,6 +10371,7 @@ static void init_jit_functions(void) #ifdef _OS_WINDOWS_ #if defined(_CPU_X86_64_) + add_named_global("__julia_personality", &__julia_personality); #if defined(_COMPILER_GCC_) add_named_global("___chkstk_ms", &___chkstk_ms); #else diff --git a/src/debug-registry.h b/src/debug-registry.h index 85a94245ce6aa..4c9e13d8cd72d 100644 --- a/src/debug-registry.h +++ b/src/debug-registry.h @@ -145,8 +145,7 @@ class JITDebugInfoRegistry void add_code_in_flight(llvm::StringRef name, jl_code_instance_t *codeinst, const llvm::DataLayout &DL) JL_NOTSAFEPOINT; jl_method_instance_t *lookupLinfo(size_t pointer) JL_NOTSAFEPOINT; void registerJITObject(const llvm::object::ObjectFile &Object, - std::function getLoadAddress, - std::function lookupWriteAddress); + std::function getLoadAddress); objectmap_t& getObjectMap() JL_NOTSAFEPOINT; void add_image_info(image_info_t info) JL_NOTSAFEPOINT; bool get_image_info(uint64_t base, image_info_t *info) const JL_NOTSAFEPOINT; diff --git a/src/debuginfo.cpp b/src/debuginfo.cpp index 84550811072fe..cfaf8d4c70ee9 100644 --- a/src/debuginfo.cpp +++ b/src/debuginfo.cpp @@ -223,11 +223,21 @@ static void create_PRUNTIME_FUNCTION(uint8_t *Code, size_t Size, StringRef fnnam #endif void JITDebugInfoRegistry::registerJITObject(const object::ObjectFile &Object, - std::function getLoadAddress, - std::function lookupWriteAddress) + std::function getLoadAddress) { object::section_iterator EndSection = Object.section_end(); + bool anyfunctions = false; + for (const object::SymbolRef &sym_iter : Object.symbols()) { + object::SymbolRef::Type SymbolType = cantFail(sym_iter.getType()); + if (SymbolType != object::SymbolRef::ST_Function) + continue; + anyfunctions = true; + break; + } + if (!anyfunctions) + return; + #ifdef _CPU_ARM_ // ARM does not have/use .eh_frame uint64_t arm_exidx_addr = 0; @@ -281,14 +291,13 @@ void JITDebugInfoRegistry::registerJITObject(const object::ObjectFile &Object, #if defined(_OS_WINDOWS_) uint64_t SectionAddrCheck = 0; uint64_t SectionLoadCheck = 0; (void)SectionLoadCheck; - uint64_t SectionWriteCheck = 0; (void)SectionWriteCheck; uint8_t *UnwindData = NULL; #if defined(_CPU_X86_64_) uint8_t *catchjmp = NULL; for (const object::SymbolRef &sym_iter : Object.symbols()) { StringRef sName = cantFail(sym_iter.getName()); if (sName.equals("__UnwindData") || sName.equals("__catchjmp")) { - uint64_t Addr = cantFail(sym_iter.getAddress()); + uint64_t Addr = cantFail(sym_iter.getAddress()); // offset into object (including section offset) auto Section = cantFail(sym_iter.getSection()); assert(Section != EndSection && Section->isText()); uint64_t SectionAddr = Section->getAddress(); @@ -300,10 +309,7 @@ void JITDebugInfoRegistry::registerJITObject(const object::ObjectFile &Object, SectionLoadCheck == SectionLoadAddr); SectionAddrCheck = SectionAddr; SectionLoadCheck = SectionLoadAddr; - SectionWriteCheck = SectionLoadAddr; - if (lookupWriteAddress) - SectionWriteCheck = (uintptr_t)lookupWriteAddress((void*)SectionLoadAddr); - Addr += SectionWriteCheck - SectionLoadCheck; + Addr += SectionLoadAddr - SectionAddr; if (sName.equals("__UnwindData")) { UnwindData = (uint8_t*)Addr; } @@ -314,25 +320,7 @@ void JITDebugInfoRegistry::registerJITObject(const object::ObjectFile &Object, } assert(catchjmp); assert(UnwindData); - assert(SectionAddrCheck); assert(SectionLoadCheck); - assert(!memcmp(catchjmp, "\0\0\0\0\0\0\0\0\0\0\0\0", 12) && - !memcmp(UnwindData, "\0\0\0\0\0\0\0\0\0\0\0\0", 12)); - catchjmp[0] = 0x48; - catchjmp[1] = 0xb8; // mov RAX, QWORD PTR [&__julia_personality] - *(uint64_t*)(&catchjmp[2]) = (uint64_t)&__julia_personality; - catchjmp[10] = 0xff; - catchjmp[11] = 0xe0; // jmp RAX - UnwindData[0] = 0x09; // version info, UNW_FLAG_EHANDLER - UnwindData[1] = 4; // size of prolog (bytes) - UnwindData[2] = 2; // count of unwind codes (slots) - UnwindData[3] = 0x05; // frame register (rbp) = rsp - UnwindData[4] = 4; // second instruction - UnwindData[5] = 0x03; // mov RBP, RSP - UnwindData[6] = 1; // first instruction - UnwindData[7] = 0x50; // push RBP - *(DWORD*)&UnwindData[8] = (DWORD)(catchjmp - (uint8_t*)SectionWriteCheck); // relative location of catchjmp - UnwindData -= SectionWriteCheck - SectionLoadCheck; #endif // defined(_OS_X86_64_) #endif // defined(_OS_WINDOWS_) @@ -353,7 +341,7 @@ void JITDebugInfoRegistry::registerJITObject(const object::ObjectFile &Object, uint64_t SectionAddr = Section->getAddress(); StringRef secName = cantFail(Section->getName()); uint64_t SectionLoadAddr = getLoadAddress(secName); - Addr -= SectionAddr - SectionLoadAddr; + Addr += SectionLoadAddr - SectionAddr; StringRef sName = cantFail(sym_iter.getName()); uint64_t SectionSize = Section->getSize(); size_t Size = sym_size.second; @@ -404,10 +392,9 @@ void JITDebugInfoRegistry::registerJITObject(const object::ObjectFile &Object, } void jl_register_jit_object(const object::ObjectFile &Object, - std::function getLoadAddress, - std::function lookupWriteAddress) + std::function getLoadAddress) { - getJITDebugRegistry().registerJITObject(Object, getLoadAddress, lookupWriteAddress); + getJITDebugRegistry().registerJITObject(Object, getLoadAddress); } // TODO: convert the safe names from aotcomile.cpp:makeSafeName back into symbols diff --git a/src/jitlayers.cpp b/src/jitlayers.cpp index 7489b086105e6..d1757cadee05c 100644 --- a/src/jitlayers.cpp +++ b/src/jitlayers.cpp @@ -665,8 +665,7 @@ static Expected selectOptLevel(orc::ThreadSafeModule TSM, } void jl_register_jit_object(const object::ObjectFile &debugObj, - std::function getLoadAddress, - std::function lookupWriteAddress); + std::function getLoadAddress); namespace { @@ -726,7 +725,7 @@ class JLDebuginfoPlugin : public ObjectLinkingLayer::Plugin { return result->second; }; - jl_register_jit_object(*NewInfo->Object, getLoadAddress, nullptr); + jl_register_jit_object(*NewInfo->Object, getLoadAddress); PendingObjs.erase(&MR); } @@ -957,10 +956,6 @@ class ForwardingMemoryManager : public RuntimeDyld::MemoryManager { }; -#if defined(_OS_WINDOWS_) && defined(_CPU_X86_64_) -void *lookupWriteAddressFor(RTDyldMemoryManager *MemMgr, void *rt_addr); -#endif - void registerRTDyldJITObject(const object::ObjectFile &Object, const RuntimeDyld::LoadedObjectInfo &L, const std::shared_ptr &MemMgr) @@ -983,14 +978,7 @@ void registerRTDyldJITObject(const object::ObjectFile &Object, }; auto DebugObject = L.getObjectForDebug(Object); // ELF requires us to make a copy to mutate the header with the section load addresses. On other platforms this is a no-op. - jl_register_jit_object(DebugObject.getBinary() ? *DebugObject.getBinary() : Object, - getLoadAddress, -#if defined(_OS_WINDOWS_) && defined(_CPU_X86_64_) - [MemMgr](void *p) { return lookupWriteAddressFor(MemMgr.get(), p); } -#else - nullptr -#endif - ); + jl_register_jit_object(DebugObject.getBinary() ? *DebugObject.getBinary() : Object, getLoadAddress); } namespace { static std::unique_ptr createTargetMachine() JL_NOTSAFEPOINT { @@ -1281,8 +1269,9 @@ namespace { } // Windows needs some inline asm to help - // build unwind tables - jl_decorate_module(M); + // build unwind tables, if they have any functions to decorate + if (!M.functions().empty()) + jl_decorate_module(M); }); return std::move(TSM); } @@ -2255,30 +2244,64 @@ static void jl_decorate_module(Module &M) { if (TT.isOSWindows() && TT.getArch() == Triple::x86_64) { // Add special values used by debuginfo to build the UnwindData table registration for Win64 // This used to be GV, but with https://reviews.llvm.org/D100944 we no longer can emit GV into `.text` - // TODO: The data is set in debuginfo.cpp but it should be okay to actually emit it here. - std::string inline_asm = "\ - .section "; - inline_asm += + // and with JITLink it became difficult to change the content afterwards, but we + // would prefer that this simple content wasn't recompiled in every single module, + // so we emit the necessary PLT trampoline as inline assembly. + // This is somewhat duplicated with the .pdata section, but we haven't been able to + // use that yet due to relocation issues. +#define ASM_USES_ELF // use ELF or COFF syntax based on FORCE_ELF + StringRef inline_asm( + ".section" #if JL_LLVM_VERSION >= 180000 - ".ltext,\"ax\",@progbits"; + " .ltext,\"ax\",@progbits\n" #else - ".text"; + " .text\n" #endif - inline_asm += "\n\ - .type __UnwindData,@object \n\ - .p2align 2, 0x90 \n\ - __UnwindData: \n\ - .zero 12 \n\ - .size __UnwindData, 12 \n\ - \n\ - .type __catchjmp,@object \n\ - .p2align 2, 0x90 \n\ - __catchjmp: \n\ - .zero 12 \n\ - .size __catchjmp, 12"; - + ".globl __julia_personality\n" + "\n" +#ifdef ASM_USES_ELF + ".type __UnwindData,@object\n" +#else + ".def __UnwindData\n" + ".scl 2\n" + ".type 0\n" + ".endef\n" +#endif + ".p2align 2, 0x90\n" + "__UnwindData:\n" + " .byte 0x09;\n" // version info, UNW_FLAG_EHANDLER + " .byte 4;\n" // size of prolog (bytes) + " .byte 2;\n" // count of unwind codes (slots) + " .byte 0x05;\n" // frame register (rbp) = rsp + " .byte 4;\n" // second instruction + " .byte 0x03;\n" // mov RBP, RSP + " .byte 1;\n" // first instruction + " .byte 0x50;\n" // push RBP + " .int __catchjmp - " +#if JL_LLVM_VERSION >= 180000 + ".ltext;\n" // Section-relative offset (if using COFF and JITLink, this can be relative to __ImageBase instead, though then we could possibly use pdata/xdata directly then) +#else + ".text;\n" +#endif + ".size __UnwindData, 12\n" + "\n" +#ifdef ASM_USES_ELF + ".type __catchjmp,@function\n" +#else + ".def __catchjmp\n" + ".scl 2\n" + ".type 32\n" + ".endef\n" +#endif + ".p2align 2, 0x90\n" + "__catchjmp:\n" + " movabsq $__julia_personality, %rax\n" + " jmpq *%rax\n" + ".size __catchjmp, . - __catchjmp\n" + "\n"); M.appendModuleInlineAsm(inline_asm); } +#undef ASM_USES_ELF } #ifndef JL_USE_JITLINK From 7e2d8035990e67810ccbbc9fb5da810b1b83b774 Mon Sep 17 00:00:00 2001 From: Jameson Nash Date: Fri, 4 Oct 2024 12:03:53 -0400 Subject: [PATCH 121/537] add logic to prefer loading modules that are already loaded (#55908) Iterate over the list of existing loaded modules for PkgId whenever loading a new module for PkgId, so that we will use that existing build_id content if it otherwise passes the other stale_checks. --- base/Base.jl | 2 +- base/loading.jl | 184 +++++++++++++++++++++++++++--------------------- test/loading.jl | 36 +++++++++- 3 files changed, 138 insertions(+), 84 deletions(-) diff --git a/base/Base.jl b/base/Base.jl index 23633f0b5138b..84e10ca788ba2 100644 --- a/base/Base.jl +++ b/base/Base.jl @@ -648,7 +648,7 @@ function __init__() empty!(explicit_loaded_modules) empty!(loaded_precompiles) # If we load a packageimage when building the image this might not be empty for (mod, key) in module_keys - loaded_precompiles[key => module_build_id(mod)] = mod + push!(get!(Vector{Module}, loaded_precompiles, key), mod) end if haskey(ENV, "JULIA_MAX_NUM_PRECOMPILE_FILES") MAX_NUM_PRECOMPILE_FILES[] = parse(Int, ENV["JULIA_MAX_NUM_PRECOMPILE_FILES"]) diff --git a/base/loading.jl b/base/loading.jl index 9080a2271fb27..475ce7f50eae7 100644 --- a/base/loading.jl +++ b/base/loading.jl @@ -1231,7 +1231,7 @@ function _include_from_serialized(pkg::PkgId, path::String, ocachepath::Union{No dep = depmods[i] dep isa Module && continue _, depkey, depbuild_id = dep::Tuple{String, PkgId, UInt128} - dep = loaded_precompiles[depkey => depbuild_id] + dep = something(maybe_loaded_precompile(depkey, depbuild_id)) @assert PkgId(dep) == depkey && module_build_id(dep) === depbuild_id depmods[i] = dep end @@ -1337,6 +1337,7 @@ end function register_restored_modules(sv::SimpleVector, pkg::PkgId, path::String) # This function is also used by PkgCacheInspector.jl + assert_havelock(require_lock) restored = sv[1]::Vector{Any} for M in restored M = M::Module @@ -1345,7 +1346,7 @@ function register_restored_modules(sv::SimpleVector, pkg::PkgId, path::String) end if parentmodule(M) === M push!(loaded_modules_order, M) - loaded_precompiles[pkg => module_build_id(M)] = M + push!(get!(Vector{Module}, loaded_precompiles, pkg), M) end end @@ -1945,90 +1946,102 @@ end assert_havelock(require_lock) paths = find_all_in_cache_path(pkg, DEPOT_PATH) newdeps = PkgId[] - for path_to_try in paths::Vector{String} - staledeps = stale_cachefile(pkg, build_id, sourcepath, path_to_try; reasons, stalecheck) - if staledeps === true - continue - end - try - staledeps, ocachefile, newbuild_id = staledeps::Tuple{Vector{Any}, Union{Nothing, String}, UInt128} - # finish checking staledeps module graph - for i in eachindex(staledeps) - dep = staledeps[i] - dep isa Module && continue - modpath, modkey, modbuild_id = dep::Tuple{String, PkgId, UInt128} - modpaths = find_all_in_cache_path(modkey, DEPOT_PATH) - for modpath_to_try in modpaths - modstaledeps = stale_cachefile(modkey, modbuild_id, modpath, modpath_to_try; stalecheck) - if modstaledeps === true - continue - end - modstaledeps, modocachepath, _ = modstaledeps::Tuple{Vector{Any}, Union{Nothing, String}, UInt128} - staledeps[i] = (modpath, modkey, modbuild_id, modpath_to_try, modstaledeps, modocachepath) - @goto check_next_dep + try_build_ids = UInt128[build_id] + if build_id == UInt128(0) + let loaded = get(loaded_precompiles, pkg, nothing) + if loaded !== nothing + for mod in loaded # try these in reverse original load order to see if one is already valid + pushfirst!(try_build_ids, module_build_id(mod)) end - @debug "Rejecting cache file $path_to_try because required dependency $modkey with build ID $(UUID(modbuild_id)) is missing from the cache." - @goto check_next_path - @label check_next_dep - end - M = get(loaded_precompiles, pkg => newbuild_id, nothing) - if isa(M, Module) - stalecheck && register_root_module(M) - return M end - if stalecheck - try - touch(path_to_try) # update timestamp of precompilation file - catch ex # file might be read-only and then we fail to update timestamp, which is fine - ex isa IOError || rethrow() - end + end + end + for build_id in try_build_ids + for path_to_try in paths::Vector{String} + staledeps = stale_cachefile(pkg, build_id, sourcepath, path_to_try; reasons, stalecheck) + if staledeps === true + continue end - # finish loading module graph into staledeps - # TODO: call all start_loading calls (in reverse order) before calling any _include_from_serialized, since start_loading will drop the loading lock - for i in eachindex(staledeps) - dep = staledeps[i] - dep isa Module && continue - modpath, modkey, modbuild_id, modcachepath, modstaledeps, modocachepath = dep::Tuple{String, PkgId, UInt128, String, Vector{Any}, Union{Nothing, String}} - dep = start_loading(modkey, modbuild_id, stalecheck) - while true - if dep isa Module - if PkgId(dep) == modkey && module_build_id(dep) === modbuild_id - break - else - @debug "Rejecting cache file $path_to_try because module $modkey got loaded at a different version than expected." - @goto check_next_path + try + staledeps, ocachefile, newbuild_id = staledeps::Tuple{Vector{Any}, Union{Nothing, String}, UInt128} + # finish checking staledeps module graph + for i in eachindex(staledeps) + dep = staledeps[i] + dep isa Module && continue + modpath, modkey, modbuild_id = dep::Tuple{String, PkgId, UInt128} + modpaths = find_all_in_cache_path(modkey, DEPOT_PATH) + for modpath_to_try in modpaths + modstaledeps = stale_cachefile(modkey, modbuild_id, modpath, modpath_to_try; stalecheck) + if modstaledeps === true + continue end + modstaledeps, modocachepath, _ = modstaledeps::Tuple{Vector{Any}, Union{Nothing, String}, UInt128} + staledeps[i] = (modpath, modkey, modbuild_id, modpath_to_try, modstaledeps, modocachepath) + @goto check_next_dep + end + @debug "Rejecting cache file $path_to_try because required dependency $modkey with build ID $(UUID(modbuild_id)) is missing from the cache." + @goto check_next_path + @label check_next_dep + end + M = maybe_loaded_precompile(pkg, newbuild_id) + if isa(M, Module) + stalecheck && register_root_module(M) + return M + end + if stalecheck + try + touch(path_to_try) # update timestamp of precompilation file + catch ex # file might be read-only and then we fail to update timestamp, which is fine + ex isa IOError || rethrow() end - if dep === nothing - try - set_pkgorigin_version_path(modkey, modpath) - dep = _include_from_serialized(modkey, modcachepath, modocachepath, modstaledeps; register = stalecheck) - finally - end_loading(modkey, dep) + end + # finish loading module graph into staledeps + # TODO: call all start_loading calls (in reverse order) before calling any _include_from_serialized, since start_loading will drop the loading lock + for i in eachindex(staledeps) + dep = staledeps[i] + dep isa Module && continue + modpath, modkey, modbuild_id, modcachepath, modstaledeps, modocachepath = dep::Tuple{String, PkgId, UInt128, String, Vector{Any}, Union{Nothing, String}} + dep = start_loading(modkey, modbuild_id, stalecheck) + while true + if dep isa Module + if PkgId(dep) == modkey && module_build_id(dep) === modbuild_id + break + else + @debug "Rejecting cache file $path_to_try because module $modkey got loaded at a different version than expected." + @goto check_next_path + end end - if !isa(dep, Module) - @debug "Rejecting cache file $path_to_try because required dependency $modkey failed to load from cache file for $modcachepath." exception=dep - @goto check_next_path - else - push!(newdeps, modkey) + if dep === nothing + try + set_pkgorigin_version_path(modkey, modpath) + dep = _include_from_serialized(modkey, modcachepath, modocachepath, modstaledeps; register = stalecheck) + finally + end_loading(modkey, dep) + end + if !isa(dep, Module) + @debug "Rejecting cache file $path_to_try because required dependency $modkey failed to load from cache file for $modcachepath." exception=dep + @goto check_next_path + else + push!(newdeps, modkey) + end end end + staledeps[i] = dep end - staledeps[i] = dep - end - restored = get(loaded_precompiles, pkg => newbuild_id, nothing) - if !isa(restored, Module) - restored = _include_from_serialized(pkg, path_to_try, ocachefile, staledeps; register = stalecheck) - end - isa(restored, Module) && return restored - @debug "Deserialization checks failed while attempting to load cache from $path_to_try" exception=restored - @label check_next_path - finally - for modkey in newdeps - insert_extension_triggers(modkey) - stalecheck && run_package_callbacks(modkey) + restored = maybe_loaded_precompile(pkg, newbuild_id) + if !isa(restored, Module) + restored = _include_from_serialized(pkg, path_to_try, ocachefile, staledeps; register = stalecheck) + end + isa(restored, Module) && return restored + @debug "Deserialization checks failed while attempting to load cache from $path_to_try" exception=restored + @label check_next_path + finally + for modkey in newdeps + insert_extension_triggers(modkey) + stalecheck && run_package_callbacks(modkey) + end + empty!(newdeps) end - empty!(newdeps) end end return nothing @@ -2047,7 +2060,7 @@ function start_loading(modkey::PkgId, build_id::UInt128, stalecheck::Bool) loaded = stalecheck ? maybe_root_module(modkey) : nothing loaded isa Module && return loaded if build_id != UInt128(0) - loaded = get(loaded_precompiles, modkey => build_id, nothing) + loaded = maybe_loaded_precompile(modkey, build_id) loaded isa Module && return loaded end loading = get(package_locks, modkey, nothing) @@ -2377,12 +2390,21 @@ const pkgorigins = Dict{PkgId,PkgOrigin}() const explicit_loaded_modules = Dict{PkgId,Module}() # Emptied on Julia start const loaded_modules = Dict{PkgId,Module}() # available to be explicitly loaded -const loaded_precompiles = Dict{Pair{PkgId,UInt128},Module}() # extended (complete) list of modules, available to be loaded +const loaded_precompiles = Dict{PkgId,Vector{Module}}() # extended (complete) list of modules, available to be loaded const loaded_modules_order = Vector{Module}() const module_keys = IdDict{Module,PkgId}() # the reverse of loaded_modules root_module_key(m::Module) = @lock require_lock module_keys[m] +function maybe_loaded_precompile(key::PkgId, buildid::UInt128) + assert_havelock(require_lock) + mods = get(loaded_precompiles, key, nothing) + mods === nothing && return + for mod in mods + module_build_id(mod) == buildid && return mod + end +end + function module_build_id(m::Module) hi, lo = ccall(:jl_module_build_id, NTuple{2,UInt64}, (Any,), m) return (UInt128(hi) << 64) | lo @@ -2403,7 +2425,7 @@ end end end end - haskey(loaded_precompiles, key => module_build_id(m)) || push!(loaded_modules_order, m) + maybe_loaded_precompile(key, module_build_id(m)) === nothing && push!(loaded_modules_order, m) loaded_modules[key] = m explicit_loaded_modules[key] = m module_keys[m] = key @@ -3789,8 +3811,8 @@ end for i in 1:ndeps req_key, req_build_id = required_modules[i] # Check if module is already loaded - if !stalecheck && haskey(loaded_precompiles, req_key => req_build_id) - M = loaded_precompiles[req_key => req_build_id] + M = stalecheck ? nothing : maybe_loaded_precompile(req_key, req_build_id) + if M !== nothing @assert PkgId(M) == req_key && module_build_id(M) === req_build_id depmods[i] = M elseif root_module_exists(req_key) diff --git a/test/loading.jl b/test/loading.jl index b66fd632f23fa..1674a9f59a0c3 100644 --- a/test/loading.jl +++ b/test/loading.jl @@ -1,10 +1,10 @@ # This file is a part of Julia. License is MIT: https://julialang.org/license -original_depot_path = copy(Base.DEPOT_PATH) - using Test # Tests for @__LINE__ inside and outside of macros +# NOTE: the __LINE__ numbers for these first couple tests are significant, so +# adding any lines here will make those tests fail @test (@__LINE__) == 8 macro macro_caller_lineno() @@ -33,6 +33,9 @@ end @test @nested_LINE_expansion() == ((@__LINE__() - 4, @__LINE__() - 12), @__LINE__()) @test @nested_LINE_expansion2() == ((@__LINE__() - 5, @__LINE__() - 9), @__LINE__()) +original_depot_path = copy(Base.DEPOT_PATH) +include("precompile_utils.jl") + loaded_files = String[] push!(Base.include_callbacks, (mod::Module, fn::String) -> push!(loaded_files, fn)) include("test_sourcepath.jl") @@ -1603,3 +1606,32 @@ end copy!(LOAD_PATH, old_load_path) end end + +@testset "require_stdlib loading duplication" begin + depot_path = mktempdir() + oldBase64 = nothing + try + push!(empty!(DEPOT_PATH), depot_path) + Base64_key = Base.PkgId(Base.UUID("2a0f44e3-6c83-55bd-87e4-b1978d98bd5f"), "Base64") + oldBase64 = Base.unreference_module(Base64_key) + cc = Base.compilecache(Base64_key) + @test Base.isprecompiled(Base64_key, cachepaths=String[cc[1]]) + empty!(DEPOT_PATH) + Base.require_stdlib(Base64_key) + push!(DEPOT_PATH, depot_path) + append!(DEPOT_PATH, original_depot_path) + oldloaded = @lock(Base.require_lock, length(get(Base.loaded_precompiles, Base64_key, Module[]))) + Base.require(Base64_key) + @test @lock(Base.require_lock, length(get(Base.loaded_precompiles, Base64_key, Module[]))) == oldloaded + Base.unreference_module(Base64_key) + empty!(DEPOT_PATH) + push!(DEPOT_PATH, depot_path) + Base.require(Base64_key) + @test @lock(Base.require_lock, length(get(Base.loaded_precompiles, Base64_key, Module[]))) == oldloaded + 1 + Base.unreference_module(Base64_key) + finally + oldBase64 === nothing || Base.register_root_module(oldBase64) + copy!(DEPOT_PATH, original_depot_path) + rm(depot_path, force=true, recursive=true) + end +end From 7986e17dd215367bfcdc5700eacf2a2d89f61d50 Mon Sep 17 00:00:00 2001 From: Jameson Nash Date: Fri, 4 Oct 2024 12:05:15 -0400 Subject: [PATCH 122/537] Apple: fix bus error on smaller readonly file in unix (#55859) Enables the fix for #28245 in #44354 for Apple now that the Julia bugs are fixed by #55641 and #55877. Closes #28245 --- stdlib/Mmap/src/Mmap.jl | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/stdlib/Mmap/src/Mmap.jl b/stdlib/Mmap/src/Mmap.jl index df2f4f1a19991..7d57bf053940d 100644 --- a/stdlib/Mmap/src/Mmap.jl +++ b/stdlib/Mmap/src/Mmap.jl @@ -213,9 +213,7 @@ function mmap(io::IO, szfile = convert(Csize_t, len + offset) requestedSizeLarger = false if !(io isa Mmap.Anonymous) - @static if !Sys.isapple() - requestedSizeLarger = szfile > filesize(io) - end + requestedSizeLarger = szfile > filesize(io) end # platform-specific mmapping @static if Sys.isunix() @@ -231,9 +229,6 @@ function mmap(io::IO, throw(ArgumentError("unable to increase file size to $szfile due to read-only permissions")) end end - @static if Sys.isapple() - iswrite && grow && grow!(io, offset, len) - end # mmap the file ptr = ccall(:jl_mmap, Ptr{Cvoid}, (Ptr{Cvoid}, Csize_t, Cint, Cint, RawFD, Int64), C_NULL, mmaplen, prot, flags, file_desc, offset_page) From 1c5cd96f57bddeeccc1e99d12f6efae26ed6e333 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mos=C3=A8=20Giordano?= <765740+giordano@users.noreply.github.com> Date: Fri, 4 Oct 2024 21:02:29 +0100 Subject: [PATCH 123/537] Add `Float16` to `Base.HWReal` (#55929) --- base/intfuncs.jl | 2 +- test/intfuncs.jl | 17 +++++++++++++++++ 2 files changed, 18 insertions(+), 1 deletion(-) diff --git a/base/intfuncs.jl b/base/intfuncs.jl index 8d46fcffa3ad5..06a0213e7141c 100644 --- a/base/intfuncs.jl +++ b/base/intfuncs.jl @@ -362,7 +362,7 @@ end # Restrict inlining to hardware-supported arithmetic types, which # are fast enough to benefit from inlining. -const HWReal = Union{Int8,Int16,Int32,Int64,UInt8,UInt16,UInt32,UInt64,Float32,Float64} +const HWReal = Union{Int8,Int16,Int32,Int64,UInt8,UInt16,UInt32,UInt64,Float16,Float32,Float64} const HWNumber = Union{HWReal, Complex{<:HWReal}, Rational{<:HWReal}} # Inline x^2 and x^3 for Val diff --git a/test/intfuncs.jl b/test/intfuncs.jl index deb1dd10681e8..6f1bde69dddfe 100644 --- a/test/intfuncs.jl +++ b/test/intfuncs.jl @@ -616,3 +616,20 @@ end @test Base.infer_effects(gcdx, (Int,Int)) |> Core.Compiler.is_foldable @test Base.infer_effects(invmod, (Int,Int)) |> Core.Compiler.is_foldable @test Base.infer_effects(binomial, (Int,Int)) |> Core.Compiler.is_foldable + +@testset "literal power" begin + @testset for T in Base.uniontypes(Base.HWReal) + ns = (T(0), T(1), T(5)) + if T <: AbstractFloat + ns = (ns..., T(3.14), T(-2.71)) + end + for n in ns + @test n ^ 0 === T(1) + @test n ^ 1 === n + @test n ^ 2 === n * n + @test n ^ 3 === n * n * n + @test n ^ -1 ≈ inv(n) + @test n ^ -2 ≈ inv(n) * inv(n) + end + end +end From 675da9df8710444a868d10b9206a683daff3fde3 Mon Sep 17 00:00:00 2001 From: Mo-Gul Date: Fri, 4 Oct 2024 23:29:41 +0200 Subject: [PATCH 124/537] docs: make mod an operator (#55988) --- base/range.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/base/range.jl b/base/range.jl index 8b30222382c9a..ce2d2e2821c24 100644 --- a/base/range.jl +++ b/base/range.jl @@ -1485,7 +1485,7 @@ end """ mod(x::Integer, r::AbstractUnitRange) -Find `y` in the range `r` such that ``x ≡ y (mod n)``, where `n = length(r)`, +Find `y` in the range `r` such that ``x ≡ y (\\mod n)``, where `n = length(r)`, i.e. `y = mod(x - first(r), n) + first(r)`. See also [`mod1`](@ref). From fe864374d50b121aeb3ce5473c206338b21df5e5 Mon Sep 17 00:00:00 2001 From: Ian Butterworth Date: Fri, 4 Oct 2024 20:28:31 -0400 Subject: [PATCH 125/537] InteractiveUtils: add `@trace_compile` and `@trace_dispatch` (#55915) --- NEWS.md | 4 ++ doc/src/manual/performance-tips.md | 6 +- src/gf.c | 54 ++++++++++++++++-- src/julia_internal.h | 6 ++ stdlib/InteractiveUtils/docs/src/index.md | 2 + .../InteractiveUtils/src/InteractiveUtils.jl | 2 +- stdlib/InteractiveUtils/src/macros.jl | 55 +++++++++++++++++++ stdlib/InteractiveUtils/test/runtests.jl | 34 +++++++++++- 8 files changed, 151 insertions(+), 12 deletions(-) diff --git a/NEWS.md b/NEWS.md index fb1fcf381cc7f..bb22c9f940a78 100644 --- a/NEWS.md +++ b/NEWS.md @@ -182,6 +182,10 @@ Standard library changes #### InteractiveUtils +* New macros `@trace_compile` and `@trace_dispatch` for running an expression with + `--trace-compile=stderr --trace-compile-timing` and `--trace-dispatch=stderr` respectively enabled. + ([#55915]) + Deprecated or removed --------------------- diff --git a/doc/src/manual/performance-tips.md b/doc/src/manual/performance-tips.md index 417d5ac7a4ca1..3033720b5df8c 100644 --- a/doc/src/manual/performance-tips.md +++ b/doc/src/manual/performance-tips.md @@ -1486,11 +1486,11 @@ from the manifest, then revert the change with `pkg> undo`. If loading time is dominated by slow `__init__()` methods having compilation, one verbose way to identify what is being compiled is to use the julia args `--trace-compile=stderr --trace-compile-timing` which will report a [`precompile`](@ref) -statement each time a method is compiled, along with how long compilation took. For instance, the full setup would be: +statement each time a method is compiled, along with how long compilation took. The InteractiveUtils macro +[`@trace_compile`](@ref) provides a way to enable those args for a specific call. So a call for a complete report report would look like: ``` -$ julia --startup-file=no --trace-compile=stderr --trace-compile-timing -julia> @time @time_imports using CustomPackage +julia> @time @time_imports @trace_compile using CustomPackage ... ``` diff --git a/src/gf.c b/src/gf.c index 56ebe6fe2fa84..fc2e62ebff96b 100644 --- a/src/gf.c +++ b/src/gf.c @@ -2513,12 +2513,32 @@ jl_code_instance_t *jl_method_inferred_with_abi(jl_method_instance_t *mi JL_PROP jl_mutex_t precomp_statement_out_lock; +_Atomic(uint8_t) jl_force_trace_compile_timing_enabled = 0; + +/** + * @brief Enable force trace compile to stderr with timing. + */ +JL_DLLEXPORT void jl_force_trace_compile_timing_enable(void) +{ + // Increment the flag to allow reentrant callers to `@trace_compile`. + jl_atomic_fetch_add(&jl_force_trace_compile_timing_enabled, 1); +} +/** + * @brief Disable force trace compile to stderr with timing. + */ +JL_DLLEXPORT void jl_force_trace_compile_timing_disable(void) +{ + // Increment the flag to allow reentrant callers to `@trace_compile`. + jl_atomic_fetch_add(&jl_force_trace_compile_timing_enabled, -1); +} + static void record_precompile_statement(jl_method_instance_t *mi, double compilation_time, int is_recompile) { static ios_t f_precompile; static JL_STREAM* s_precompile = NULL; jl_method_t *def = mi->def.method; - if (jl_options.trace_compile == NULL) + uint8_t force_trace_compile = jl_atomic_load_relaxed(&jl_force_trace_compile_timing_enabled); + if (force_trace_compile == 0 && jl_options.trace_compile == NULL) return; if (!jl_is_method(def)) return; @@ -2528,7 +2548,7 @@ static void record_precompile_statement(jl_method_instance_t *mi, double compila JL_LOCK(&precomp_statement_out_lock); if (s_precompile == NULL) { const char *t = jl_options.trace_compile; - if (!strncmp(t, "stderr", 6)) { + if (force_trace_compile || !strncmp(t, "stderr", 6)) { s_precompile = JL_STDERR; } else { @@ -2540,7 +2560,7 @@ static void record_precompile_statement(jl_method_instance_t *mi, double compila if (!jl_has_free_typevars(mi->specTypes)) { if (is_recompile && s_precompile == JL_STDERR && jl_options.color != JL_OPTIONS_COLOR_OFF) jl_printf(s_precompile, "\e[33m"); - if (jl_options.trace_compile_timing) + if (force_trace_compile || jl_options.trace_compile_timing) jl_printf(s_precompile, "#= %6.1f ms =# ", compilation_time / 1e6); jl_printf(s_precompile, "precompile("); jl_static_show(s_precompile, mi->specTypes); @@ -2562,6 +2582,25 @@ static void record_precompile_statement(jl_method_instance_t *mi, double compila jl_mutex_t dispatch_statement_out_lock; +_Atomic(uint8_t) jl_force_trace_dispatch_enabled = 0; + +/** + * @brief Enable force trace dispatch to stderr. + */ +JL_DLLEXPORT void jl_force_trace_dispatch_enable(void) +{ + // Increment the flag to allow reentrant callers to `@trace_dispatch`. + jl_atomic_fetch_add(&jl_force_trace_dispatch_enabled, 1); +} +/** + * @brief Disable force trace dispatch to stderr. + */ +JL_DLLEXPORT void jl_force_trace_dispatch_disable(void) +{ + // Increment the flag to allow reentrant callers to `@trace_dispatch`. + jl_atomic_fetch_add(&jl_force_trace_dispatch_enabled, -1); +} + static void record_dispatch_statement(jl_method_instance_t *mi) { static ios_t f_dispatch; @@ -2570,10 +2609,11 @@ static void record_dispatch_statement(jl_method_instance_t *mi) if (!jl_is_method(def)) return; + uint8_t force_trace_dispatch = jl_atomic_load_relaxed(&jl_force_trace_dispatch_enabled); JL_LOCK(&dispatch_statement_out_lock); if (s_dispatch == NULL) { const char *t = jl_options.trace_dispatch; - if (!strncmp(t, "stderr", 6)) { + if (force_trace_dispatch || !strncmp(t, "stderr", 6)) { s_dispatch = JL_STDERR; } else { @@ -3393,7 +3433,8 @@ STATIC_INLINE jl_method_instance_t *jl_lookup_generic_(jl_value_t *F, jl_value_t // unreachable } // mfunc is about to be dispatched - if (jl_options.trace_dispatch != NULL) { + uint8_t force_trace_dispatch = jl_atomic_load_relaxed(&jl_force_trace_dispatch_enabled); + if (force_trace_dispatch || jl_options.trace_dispatch != NULL) { uint8_t miflags = jl_atomic_load_relaxed(&mfunc->flags); uint8_t was_dispatched = miflags & JL_MI_FLAGS_MASK_DISPATCHED; if (!was_dispatched) { @@ -3524,7 +3565,8 @@ jl_value_t *jl_gf_invoke_by_method(jl_method_t *method, jl_value_t *gf, jl_value jl_gc_sync_total_bytes(last_alloc); // discard allocation count from compilation } JL_GC_PROMISE_ROOTED(mfunc); - if (jl_options.trace_dispatch != NULL) { + uint8_t force_trace_dispatch = jl_atomic_load_relaxed(&jl_force_trace_dispatch_enabled); + if (force_trace_dispatch || jl_options.trace_dispatch != NULL) { uint8_t miflags = jl_atomic_load_relaxed(&mfunc->flags); uint8_t was_dispatched = miflags & JL_MI_FLAGS_MASK_DISPATCHED; if (!was_dispatched) { diff --git a/src/julia_internal.h b/src/julia_internal.h index 9a61c3d18356f..20d90fede3d5e 100644 --- a/src/julia_internal.h +++ b/src/julia_internal.h @@ -1171,6 +1171,12 @@ JL_DLLEXPORT jl_code_instance_t *jl_cache_uninferred(jl_method_instance_t *mi, j JL_DLLEXPORT jl_code_instance_t *jl_new_codeinst_for_uninferred(jl_method_instance_t *mi, jl_code_info_t *src); JL_DLLEXPORT extern jl_value_t *(*const jl_rettype_inferred_addr)(jl_method_instance_t *mi JL_PROPAGATES_ROOT, size_t min_world, size_t max_world) JL_NOTSAFEPOINT; +JL_DLLEXPORT void jl_force_trace_compile_timing_enable(void); +JL_DLLEXPORT void jl_force_trace_compile_timing_disable(void); + +JL_DLLEXPORT void jl_force_trace_dispatch_enable(void); +JL_DLLEXPORT void jl_force_trace_dispatch_disable(void); + uint32_t jl_module_next_counter(jl_module_t *m) JL_NOTSAFEPOINT; jl_tupletype_t *arg_type_tuple(jl_value_t *arg1, jl_value_t **args, size_t nargs); diff --git a/stdlib/InteractiveUtils/docs/src/index.md b/stdlib/InteractiveUtils/docs/src/index.md index dbfb42b9a931d..69b68a27e4e81 100644 --- a/stdlib/InteractiveUtils/docs/src/index.md +++ b/stdlib/InteractiveUtils/docs/src/index.md @@ -33,5 +33,7 @@ InteractiveUtils.@code_llvm InteractiveUtils.code_native InteractiveUtils.@code_native InteractiveUtils.@time_imports +InteractiveUtils.@trace_compile +InteractiveUtils.@trace_dispatch InteractiveUtils.clipboard ``` diff --git a/stdlib/InteractiveUtils/src/InteractiveUtils.jl b/stdlib/InteractiveUtils/src/InteractiveUtils.jl index 835988ddf149f..f3c1ff7fba59f 100644 --- a/stdlib/InteractiveUtils/src/InteractiveUtils.jl +++ b/stdlib/InteractiveUtils/src/InteractiveUtils.jl @@ -11,7 +11,7 @@ Base.Experimental.@optlevel 1 export apropos, edit, less, code_warntype, code_llvm, code_native, methodswith, varinfo, versioninfo, subtypes, supertypes, @which, @edit, @less, @functionloc, @code_warntype, - @code_typed, @code_lowered, @code_llvm, @code_native, @time_imports, clipboard + @code_typed, @code_lowered, @code_llvm, @code_native, @time_imports, clipboard, @trace_compile, @trace_dispatch import Base.Docs.apropos diff --git a/stdlib/InteractiveUtils/src/macros.jl b/stdlib/InteractiveUtils/src/macros.jl index bb56c47b4f9ca..211687df47954 100644 --- a/stdlib/InteractiveUtils/src/macros.jl +++ b/stdlib/InteractiveUtils/src/macros.jl @@ -256,6 +256,28 @@ macro time_imports(ex) end end +macro trace_compile(ex) + quote + try + ccall(:jl_force_trace_compile_timing_enable, Cvoid, ()) + $(esc(ex)) + finally + ccall(:jl_force_trace_compile_timing_disable, Cvoid, ()) + end + end +end + +macro trace_dispatch(ex) + quote + try + ccall(:jl_force_trace_dispatch_enable, Cvoid, ()) + $(esc(ex)) + finally + ccall(:jl_force_trace_dispatch_disable, Cvoid, ()) + end + end +end + """ @functionloc @@ -409,3 +431,36 @@ julia> @time_imports using CSV """ :@time_imports + +""" + @trace_compile + +A macro to execute an expression and show any methods that were compiled (or recompiled in yellow), +like the julia args `--trace-compile=stderr --trace-compile-timing` but specifically for a call. + +```julia-repl +julia> @trace_compile rand(2,2) * rand(2,2) +#= 39.1 ms =# precompile(Tuple{typeof(Base.rand), Int64, Int64}) +#= 102.0 ms =# precompile(Tuple{typeof(Base.:(*)), Array{Float64, 2}, Array{Float64, 2}}) +2×2 Matrix{Float64}: + 0.421704 0.864841 + 0.211262 0.444366 +``` + +!!! compat "Julia 1.12" + This macro requires at least Julia 1.12 + +""" +:@trace_compile + +""" + @trace_dispatch + +A macro to execute an expression and report methods that were compiled via dynamic dispatch, +like the julia arg `--trace-dispatch=stderr` but specifically for a call. + +!!! compat "Julia 1.12" + This macro requires at least Julia 1.12 + +""" +:@trace_dispatch diff --git a/stdlib/InteractiveUtils/test/runtests.jl b/stdlib/InteractiveUtils/test/runtests.jl index 424564b70384c..8e7090cb53020 100644 --- a/stdlib/InteractiveUtils/test/runtests.jl +++ b/stdlib/InteractiveUtils/test/runtests.jl @@ -708,7 +708,7 @@ let length((@code_lowered sum(1:10)).code) end -@testset "@time_imports" begin +@testset "@time_imports, @trace_compile, @trace_dispatch" begin mktempdir() do dir cd(dir) do try @@ -717,7 +717,16 @@ end write(foo_file, """ module Foo3242 - foo() = 1 + function foo() + Base.Experimental.@force_compile + foo(1) + end + foo(x) = x + function bar() + Base.Experimental.@force_compile + bar(1) + end + bar(x) = x end """) @@ -734,6 +743,27 @@ end @test occursin("ms Foo3242", String(buf)) + fname = tempname() + f = open(fname, "w") + redirect_stderr(f) do + @trace_compile @eval Foo3242.foo() + end + close(f) + buf = read(fname) + rm(fname) + + @test occursin("ms =# precompile(", String(buf)) + + fname = tempname() + f = open(fname, "w") + redirect_stderr(f) do + @trace_dispatch @eval Foo3242.bar() + end + close(f) + buf = read(fname) + rm(fname) + + @test occursin("precompile(", String(buf)) finally filter!((≠)(dir), LOAD_PATH) end From 3a132cfc7661f850340d0360df6d3279d1aa7e16 Mon Sep 17 00:00:00 2001 From: Neven Sajko Date: Sat, 5 Oct 2024 05:29:19 +0200 Subject: [PATCH 126/537] Profile: document heap snapshot viewing tools (#55743) --- stdlib/Profile/docs/src/index.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/stdlib/Profile/docs/src/index.md b/stdlib/Profile/docs/src/index.md index 5b4db77b9cb16..0b358e5decfa9 100644 --- a/stdlib/Profile/docs/src/index.md +++ b/stdlib/Profile/docs/src/index.md @@ -155,3 +155,8 @@ julia> Profile.HeapSnapshot.assemble_snapshot("snapshot", "snapshot.heapsnapshot The resulting heap snapshot file can be uploaded to chrome devtools to be viewed. For more information, see the [chrome devtools docs](https://developer.chrome.com/docs/devtools/memory-problems/heap-snapshots/#view_snapshots). +An alternative for analyzing Chromium heap snapshots is with the VS Code extension +`ms-vscode.vscode-js-profile-flame`. + +The Firefox heap snapshots are of a different format, and Firefox currently may +*not* be used for viewing the heap snapshots generated by Julia. From fb77d60fb7088a3b8ccac73ea3476b9a8c88d455 Mon Sep 17 00:00:00 2001 From: Christian Guinard <28689358+christiangnrd@users.noreply.github.com> Date: Sat, 5 Oct 2024 01:24:42 -0300 Subject: [PATCH 127/537] [REPL] Fix #55850 by using `safe_realpath` instead of `abspath` in `projname` (#55851) --- stdlib/REPL/src/Pkg_beforeload.jl | 2 +- stdlib/REPL/test/repl.jl | 11 ++++++++++- 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/stdlib/REPL/src/Pkg_beforeload.jl b/stdlib/REPL/src/Pkg_beforeload.jl index ebd0cd255ce19..472fbc924668d 100644 --- a/stdlib/REPL/src/Pkg_beforeload.jl +++ b/stdlib/REPL/src/Pkg_beforeload.jl @@ -88,7 +88,7 @@ function projname(project_file::String) end for depot in Base.DEPOT_PATH envdir = joinpath(depot, "environments") - if startswith(abspath(project_file), abspath(envdir)) + if startswith(safe_realpath(project_file), safe_realpath(envdir)) return "@" * name end end diff --git a/stdlib/REPL/test/repl.jl b/stdlib/REPL/test/repl.jl index f4d594b2a02e1..85a8137fa003e 100644 --- a/stdlib/REPL/test/repl.jl +++ b/stdlib/REPL/test/repl.jl @@ -1966,11 +1966,20 @@ end @testset "Dummy Pkg prompt" begin # do this in an empty depot to test default for new users - withenv("JULIA_DEPOT_PATH" => mktempdir(), "JULIA_LOAD_PATH" => nothing) do + withenv("JULIA_DEPOT_PATH" => mktempdir() * (Sys.iswindows() ? ";" : ":"), "JULIA_LOAD_PATH" => nothing) do prompt = readchomp(`$(Base.julia_cmd()[1]) --startup-file=no -e "using REPL; print(REPL.Pkg_promptf())"`) @test prompt == "(@v$(VERSION.major).$(VERSION.minor)) pkg> " end + # Issue 55850 + tmp_55850 = mktempdir() + tmp_sym_link = joinpath(tmp_55850, "sym") + symlink(tmp_55850, tmp_sym_link; dir_target=true) + withenv("JULIA_DEPOT_PATH" => tmp_sym_link * (Sys.iswindows() ? ";" : ":"), "JULIA_LOAD_PATH" => nothing) do + prompt = readchomp(`$(Base.julia_cmd()[1]) --startup-file=no -e "using REPL; print(REPL.projname(REPL.find_project_file()))"`) + @test prompt == "@v$(VERSION.major).$(VERSION.minor)" + end + get_prompt(proj::String) = readchomp(`$(Base.julia_cmd()[1]) --startup-file=no $(proj) -e "using REPL; print(REPL.Pkg_promptf())"`) @test get_prompt("--project=$(pkgdir(REPL))") == "(REPL) pkg> " From 096c1d2bcbae1b7388ee4285b89da50dbb9f7094 Mon Sep 17 00:00:00 2001 From: Shuhei Kadowaki <40514306+aviatesk@users.noreply.github.com> Date: Sat, 5 Oct 2024 14:38:53 +0900 Subject: [PATCH 128/537] optimizer: enable load forwarding with the `finalizer` elision (#55991) When the finalizer elision pass is used, load forwarding is not performed currently, regardless of whether the pass succeeds or not. But this is not necessary, and by keeping the `setfield!` call, we can safely forward `getfield` even if finalizer elision is tried. --- base/compiler/ssair/passes.jl | 16 +++++++++------- test/compiler/inline.jl | 26 ++++++++++++++++++++++++++ 2 files changed, 35 insertions(+), 7 deletions(-) diff --git a/base/compiler/ssair/passes.jl b/base/compiler/ssair/passes.jl index e227249b48598..0e2272524a0ed 100644 --- a/base/compiler/ssair/passes.jl +++ b/base/compiler/ssair/passes.jl @@ -1707,22 +1707,24 @@ function sroa_mutables!(ir::IRCode, defuses::IdDict{Int,Tuple{SPCSet,SSADefUse}} ismutabletype(typ) || continue typ = typ::DataType # First check for any finalizer calls - finalizer_idx = nothing - for use in defuse.uses + finalizer_useidx = nothing + for (useidx, use) in enumerate(defuse.uses) if use.kind === :finalizer # For now: Only allow one finalizer per allocation - finalizer_idx !== nothing && @goto skip - finalizer_idx = use.idx + finalizer_useidx !== nothing && @goto skip + finalizer_useidx = useidx end end - if finalizer_idx !== nothing && inlining !== nothing + all_eliminated = all_forwarded = true + if finalizer_useidx !== nothing && inlining !== nothing + finalizer_idx = defuse.uses[finalizer_useidx].idx try_resolve_finalizer!(ir, defidx, finalizer_idx, defuse, inlining, lazydomtree, lazypostdomtree, ir[SSAValue(finalizer_idx)][:info]) - continue + deleteat!(defuse.uses, finalizer_useidx) + all_eliminated = all_forwarded = false # can't eliminate `setfield!` calls safely end # Partition defuses by field fielddefuse = SSADefUse[SSADefUse() for _ = 1:fieldcount(typ)] - all_eliminated = all_forwarded = true for use in defuse.uses if use.kind === :preserve for du in fielddefuse diff --git a/test/compiler/inline.jl b/test/compiler/inline.jl index fceb920352482..2de6d9950d4e4 100644 --- a/test/compiler/inline.jl +++ b/test/compiler/inline.jl @@ -1596,6 +1596,32 @@ let @test get_finalization_count() == 1000 end +# Load forwarding with `finalizer` elision +let src = code_typed1((Int,)) do x + xs = finalizer(Ref(x)) do obj + @noinline + Base.@assume_effects :nothrow :notaskstate + Core.println("finalizing: ", obj[]) + end + Base.@assume_effects :nothrow @noinline println("xs[] = ", @inline xs[]) + return xs[] + end + @test count(iscall((src, getfield)), src.code) == 0 +end +let src = code_typed1((Int,)) do x + xs = finalizer(Ref(x)) do obj + @noinline + Base.@assume_effects :nothrow :notaskstate + Core.println("finalizing: ", obj[]) + end + Base.@assume_effects :nothrow @noinline println("xs[] = ", @inline xs[]) + xs[] += 1 + return xs[] + end + @test count(iscall((src, getfield)), src.code) == 0 + @test count(iscall((src, setfield!)), src.code) == 1 +end + # optimize `[push!|pushfirst!](::Vector{Any}, x...)` @testset "optimize `$f(::Vector{Any}, x...)`" for f = Any[push!, pushfirst!] @eval begin From 5d12c6de4399e62e5e863a9b25b676ce4e26ce97 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mos=C3=A8=20Giordano?= <765740+giordano@users.noreply.github.com> Date: Sat, 5 Oct 2024 08:34:48 +0100 Subject: [PATCH 129/537] Avoid `stat`-ing stdlib path if it's unreadable (#55992) --- base/loading.jl | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/base/loading.jl b/base/loading.jl index 475ce7f50eae7..c69e37e4d56ea 100644 --- a/base/loading.jl +++ b/base/loading.jl @@ -3864,10 +3864,17 @@ end # now check if this file's content hash has changed relative to its source files if stalecheck - if !samefile(includes[1].filename, modpath) && !samefile(fixup_stdlib_path(includes[1].filename), modpath) - @debug "Rejecting cache file $cachefile because it is for file $(includes[1].filename) not file $modpath" - record_reason(reasons, "wrong source") - return true # cache file was compiled from a different path + if !samefile(includes[1].filename, modpath) + # In certain cases the path rewritten by `fixup_stdlib_path` may + # point to an unreadable directory, make sure we can `stat` the + # file before comparing it with `modpath`. + stdlib_path = fixup_stdlib_path(includes[1].filename) + if !(isreadable(stdlib_path) && samefile(stdlib_path, modpath)) + !samefile(fixup_stdlib_path(includes[1].filename), modpath) + @debug "Rejecting cache file $cachefile because it is for file $(includes[1].filename) not file $modpath" + record_reason(reasons, "wrong source") + return true # cache file was compiled from a different path + end end for (modkey, req_modkey) in requires # verify that `require(modkey, name(req_modkey))` ==> `req_modkey` From ae9472993ba299a7596b2025adb4d9abf7e02e9b Mon Sep 17 00:00:00 2001 From: Neven Sajko Date: Sat, 5 Oct 2024 11:46:02 +0200 Subject: [PATCH 130/537] doc: manual: cmd: fix Markdown in table entry for `--trim` (#55979) --- doc/src/manual/command-line-interface.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/src/manual/command-line-interface.md b/doc/src/manual/command-line-interface.md index 5255720e55cd7..734d7031db5e8 100644 --- a/doc/src/manual/command-line-interface.md +++ b/doc/src/manual/command-line-interface.md @@ -219,7 +219,7 @@ The following is a complete list of command-line switches available when launchi |`--trace-dispatch={stderr\|name}` |Print precompile statements for methods dispatched during execution or save to stderr or a path.| |`--image-codegen` |Force generate code in imaging mode| |`--permalloc-pkgimg={yes\|no*}` |Copy the data section of package images into memory| -|`--trim={no*|safe|unsafe|unsafe-warn}` |Build a sysimage including only code provably reachable from methods marked by calling `entrypoint`. The three non-default options differ in how they handle dynamic call sites. In safe mode, such sites result in compile-time errors. In unsafe mode, such sites are allowed but the resulting binary might be missing needed code and can throw runtime errors. With unsafe-warn, such sites will trigger warnings at compile-time and might error at runtime.| +|`--trim={no*\|safe\|unsafe\|unsafe-warn}` |Build a sysimage including only code provably reachable from methods marked by calling `entrypoint`. The three non-default options differ in how they handle dynamic call sites. In safe mode, such sites result in compile-time errors. In unsafe mode, such sites are allowed but the resulting binary might be missing needed code and can throw runtime errors. With unsafe-warn, such sites will trigger warnings at compile-time and might error at runtime.| !!! compat "Julia 1.1" In Julia 1.0, the default `--project=@.` option did not search up from the root From 03c0b899f0457cf818268093b72af1b1b5d9868c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mos=C3=A8=20Giordano?= <765740+giordano@users.noreply.github.com> Date: Sat, 5 Oct 2024 19:32:40 +0100 Subject: [PATCH 131/537] Avoid conversions to `Float64` in non-literal powers of `Float16` (#55994) Co-authored-by: Alex Arslan --- base/math.jl | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/base/math.jl b/base/math.jl index da51ab3a17bd0..16a8a547e8de1 100644 --- a/base/math.jl +++ b/base/math.jl @@ -1276,14 +1276,12 @@ end return ifelse(isfinite(x) & isfinite(err), muladd(x, y, err), x*y) end -function ^(x::Float32, n::Integer) +function ^(x::Union{Float16,Float32}, n::Integer) n == -2 && return (i=inv(x); i*i) n == 3 && return x*x*x #keep compatibility with literal_pow - n < 0 && return Float32(Base.power_by_squaring(inv(Float64(x)),-n)) - Float32(Base.power_by_squaring(Float64(x),n)) + n < 0 && return oftype(x, Base.power_by_squaring(inv(widen(x)),-n)) + oftype(x, Base.power_by_squaring(widen(x),n)) end -@inline ^(x::Float16, y::Integer) = Float16(Float32(x) ^ y) -@inline literal_pow(::typeof(^), x::Float16, ::Val{p}) where {p} = Float16(literal_pow(^,Float32(x),Val(p))) ## rem2pi-related calculations ## From ab6df86f77b526f93d7c1a315924cf42dc8e1feb Mon Sep 17 00:00:00 2001 From: Jakob Nybo Nissen Date: Sun, 6 Oct 2024 02:13:40 +0200 Subject: [PATCH 132/537] Remove unreachable error branch in memset calls (and in repeat) (#55985) Some places use the pattern memset(A, v, length(A)), which requires a conversion UInt(length(A)). This is technically fallible, but can't actually fail when A is a Memory or Array. Remove the dead error branch by casting to UInt instead. Similarly, in repeat(x, r), r is first checked to be nonnegative, then converted to UInt, then used in multiple calls where it is converted to UInt each time. Here, only do it once. --- base/array.jl | 2 +- base/genericmemory.jl | 2 +- base/iddict.jl | 2 +- base/strings/string.jl | 3 ++- base/strings/substring.jl | 1 + 5 files changed, 6 insertions(+), 4 deletions(-) diff --git a/base/array.jl b/base/array.jl index 648fedd5036e1..5b3e6cc398479 100644 --- a/base/array.jl +++ b/base/array.jl @@ -415,7 +415,7 @@ function fill!(a::Union{Array{UInt8}, Array{Int8}}, x::Integer) ref = a.ref t = @_gc_preserve_begin ref p = unsafe_convert(Ptr{Cvoid}, ref) - memset(p, x isa eltype(a) ? x : convert(eltype(a), x), length(a)) + memset(p, x isa eltype(a) ? x : convert(eltype(a), x), length(a) % UInt) @_gc_preserve_end t return a end diff --git a/base/genericmemory.jl b/base/genericmemory.jl index c4ebbc6ca14e1..91b87ab14c6b1 100644 --- a/base/genericmemory.jl +++ b/base/genericmemory.jl @@ -190,7 +190,7 @@ function fill!(a::Union{Memory{UInt8}, Memory{Int8}}, x::Integer) t = @_gc_preserve_begin a p = unsafe_convert(Ptr{Cvoid}, a) T = eltype(a) - memset(p, x isa T ? x : convert(T, x), length(a)) + memset(p, x isa T ? x : convert(T, x), length(a) % UInt) @_gc_preserve_end t return a end diff --git a/base/iddict.jl b/base/iddict.jl index 9c133d5ba23c6..f1632e93427a8 100644 --- a/base/iddict.jl +++ b/base/iddict.jl @@ -126,7 +126,7 @@ function empty!(d::IdDict) d.ht = Memory{Any}(undef, 32) ht = d.ht t = @_gc_preserve_begin ht - memset(unsafe_convert(Ptr{Cvoid}, ht), 0, sizeof(ht)) + memset(unsafe_convert(Ptr{Cvoid}, ht), 0, sizeof(ht) % UInt) @_gc_preserve_end t d.ndel = 0 d.count = 0 diff --git a/base/strings/string.jl b/base/strings/string.jl index 90d6e5b26ccd3..a46ee60e4f023 100644 --- a/base/strings/string.jl +++ b/base/strings/string.jl @@ -570,9 +570,10 @@ julia> repeat('A', 3) ``` """ function repeat(c::AbstractChar, r::Integer) + r < 0 && throw(ArgumentError("can't repeat a character $r times")) + r = UInt(r)::UInt c = Char(c)::Char r == 0 && return "" - r < 0 && throw(ArgumentError("can't repeat a character $r times")) u = bswap(reinterpret(UInt32, c)) n = 4 - (leading_zeros(u | 0xff) >> 3) s = _string_n(n*r) diff --git a/base/strings/substring.jl b/base/strings/substring.jl index 2a6b4ae7b9a22..50717d3c27e23 100644 --- a/base/strings/substring.jl +++ b/base/strings/substring.jl @@ -272,6 +272,7 @@ end function repeat(s::Union{String, SubString{String}}, r::Integer) r < 0 && throw(ArgumentError("can't repeat a string $r times")) + r = UInt(r)::UInt r == 0 && return "" r == 1 && return String(s) n = sizeof(s) From 9bceed846e9c50ad64046b8d2ed033681f250993 Mon Sep 17 00:00:00 2001 From: Mo-Gul Date: Sun, 6 Oct 2024 04:32:13 +0200 Subject: [PATCH 133/537] fix up docstring of `mod` (#56000) --- base/range.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/base/range.jl b/base/range.jl index ce2d2e2821c24..4b5d076dcf436 100644 --- a/base/range.jl +++ b/base/range.jl @@ -1485,7 +1485,7 @@ end """ mod(x::Integer, r::AbstractUnitRange) -Find `y` in the range `r` such that ``x ≡ y (\\mod n)``, where `n = length(r)`, +Find `y` in the range `r` such that `x` ≡ `y` (mod `n`), where `n = length(r)`, i.e. `y = mod(x - first(r), n) + first(r)`. See also [`mod1`](@ref). From 8248bf420d951c159374b8e77b705e52f14baaaa Mon Sep 17 00:00:00 2001 From: spaette <111918424+spaette@users.noreply.github.com> Date: Sun, 6 Oct 2024 07:42:27 +0200 Subject: [PATCH 134/537] fix typos (#56008) these are all in markdown files Co-authored-by: spaette --- doc/src/devdocs/build/distributing.md | 2 +- doc/src/devdocs/gc.md | 2 +- doc/src/devdocs/llvm.md | 2 +- doc/src/manual/environment-variables.md | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/doc/src/devdocs/build/distributing.md b/doc/src/devdocs/build/distributing.md index 99c08923b415b..ed06c20fa0df3 100644 --- a/doc/src/devdocs/build/distributing.md +++ b/doc/src/devdocs/build/distributing.md @@ -108,7 +108,7 @@ Alternatively, Julia may be built as a framework by invoking `make` with the Windows ------- -Instructions for reating a Julia distribution on Windows are described in the +Instructions for creating a Julia distribution on Windows are described in the [build devdocs for Windows](https://github.com/JuliaLang/julia/blob/master/doc/src/devdocs/build/windows.md). Notes on BLAS and LAPACK diff --git a/doc/src/devdocs/gc.md b/doc/src/devdocs/gc.md index 9b9038c9445f3..a45e8afb271ce 100644 --- a/doc/src/devdocs/gc.md +++ b/doc/src/devdocs/gc.md @@ -21,7 +21,7 @@ lists. Metadata for free pages, however, may be stored into three separate lock- Julia's pool allocator follows a "tiered" allocation discipline. When requesting a memory page for the pool allocator, Julia will: -- Try to claim a page from `page_pool_lazily_freed`, which contains pages which were empty on the last stop-the-world phase, but not yet madivsed by a concurrent sweeper GC thread. +- Try to claim a page from `page_pool_lazily_freed`, which contains pages which were empty on the last stop-the-world phase, but not yet madvised by a concurrent sweeper GC thread. - If it failed claiming a page from `page_pool_lazily_freed`, it will try to claim a page from `the page_pool_clean`, which contains pages which were mmaped on a previous page allocation request but never accessed. diff --git a/doc/src/devdocs/llvm.md b/doc/src/devdocs/llvm.md index ab8f7dde50022..c4b80f632cd4e 100644 --- a/doc/src/devdocs/llvm.md +++ b/doc/src/devdocs/llvm.md @@ -17,7 +17,7 @@ The code for lowering Julia AST to LLVM IR or interpreting it directly is in dir | `cgutils.cpp` | Lowering utilities, notably for array and tuple accesses | | `codegen.cpp` | Top-level of code generation, pass list, lowering builtins | | `debuginfo.cpp` | Tracks debug information for JIT code | -| `disasm.cpp` | Handles native object file and JIT code diassembly | +| `disasm.cpp` | Handles native object file and JIT code disassembly | | `gf.c` | Generic functions | | `intrinsics.cpp` | Lowering intrinsics | | `jitlayers.cpp` | JIT-specific code, ORC compilation layers/utilities | diff --git a/doc/src/manual/environment-variables.md b/doc/src/manual/environment-variables.md index 1fb11018a22e7..b86822e0be4b7 100644 --- a/doc/src/manual/environment-variables.md +++ b/doc/src/manual/environment-variables.md @@ -144,7 +144,7 @@ files, artifacts, etc. For example, to switch the user depot to `/foo/bar` just ```sh export JULIA_DEPOT_PATH="/foo/bar:" ``` -All package operations, like cloning registrise or installing packages, will now write to +All package operations, like cloning registries or installing packages, will now write to `/foo/bar`, but since the empty entry is expanded to the default system depot, any bundled resources will still be available. If you really only want to use the depot at `/foo/bar`, and not load any bundled resources, simply set the environment variable to `/foo/bar` From 2ae0b7e071b4c128b99485e04a33aaad2c4cd645 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mos=C3=A8=20Giordano?= <765740+giordano@users.noreply.github.com> Date: Sun, 6 Oct 2024 09:34:42 +0100 Subject: [PATCH 135/537] Vectorise random vectors of `Float16` (#55997) --- stdlib/Random/src/XoshiroSimd.jl | 34 ++++++++++++++++++++++++-------- 1 file changed, 26 insertions(+), 8 deletions(-) diff --git a/stdlib/Random/src/XoshiroSimd.jl b/stdlib/Random/src/XoshiroSimd.jl index 6d4886f31d22b..1c5f8306cc302 100644 --- a/stdlib/Random/src/XoshiroSimd.jl +++ b/stdlib/Random/src/XoshiroSimd.jl @@ -44,6 +44,17 @@ simdThreshold(::Type{Bool}) = 640 l = Float32(li >>> 8) * Float32(0x1.0p-24) (UInt64(reinterpret(UInt32, u)) << 32) | UInt64(reinterpret(UInt32, l)) end +@inline function _bits2float(x::UInt64, ::Type{Float16}) + i1 = (x>>>48) % UInt16 + i2 = (x>>>32) % UInt16 + i3 = (x>>>16) % UInt16 + i4 = x % UInt16 + f1 = Float16(i1 >>> 5) * Float16(0x1.0p-11) + f2 = Float16(i2 >>> 5) * Float16(0x1.0p-11) + f3 = Float16(i3 >>> 5) * Float16(0x1.0p-11) + f4 = Float16(i4 >>> 5) * Float16(0x1.0p-11) + return (UInt64(reinterpret(UInt16, f1)) << 48) | (UInt64(reinterpret(UInt16, f2)) << 32) | (UInt64(reinterpret(UInt16, f3)) << 16) | UInt64(reinterpret(UInt16, f4)) +end # required operations. These could be written more concisely with `ntuple`, but the compiler # sometimes refuses to properly vectorize. @@ -118,6 +129,18 @@ for N in [4,8,16] ret <$N x i64> %i """ @eval @inline _bits2float(x::$VT, ::Type{Float32}) = llvmcall($code, $VT, Tuple{$VT}, x) + + code = """ + %as16 = bitcast <$N x i64> %0 to <$(4N) x i16> + %shiftamt = shufflevector <1 x i16> , <1 x i16> undef, <$(4N) x i32> zeroinitializer + %sh = lshr <$(4N) x i16> %as16, %shiftamt + %f = uitofp <$(4N) x i16> %sh to <$(4N) x half> + %scale = shufflevector <1 x half> , <1 x half> undef, <$(4N) x i32> zeroinitializer + %m = fmul <$(4N) x half> %f, %scale + %i = bitcast <$(4N) x half> %m to <$N x i64> + ret <$N x i64> %i + """ + @eval @inline _bits2float(x::$VT, ::Type{Float16}) = llvmcall($code, $VT, Tuple{$VT}, x) end end @@ -137,7 +160,7 @@ end _id(x, T) = x -@inline function xoshiro_bulk(rng::Union{TaskLocalRNG, Xoshiro}, dst::Ptr{UInt8}, len::Int, T::Union{Type{UInt8}, Type{Bool}, Type{Float32}, Type{Float64}}, ::Val{N}, f::F = _id) where {N, F} +@inline function xoshiro_bulk(rng::Union{TaskLocalRNG, Xoshiro}, dst::Ptr{UInt8}, len::Int, T::Union{Type{UInt8}, Type{Bool}, Type{Float16}, Type{Float32}, Type{Float64}}, ::Val{N}, f::F = _id) where {N, F} if len >= simdThreshold(T) written = xoshiro_bulk_simd(rng, dst, len, T, Val(N), f) len -= written @@ -265,13 +288,8 @@ end end -function rand!(rng::Union{TaskLocalRNG, Xoshiro}, dst::Array{Float32}, ::SamplerTrivial{CloseOpen01{Float32}}) - GC.@preserve dst xoshiro_bulk(rng, convert(Ptr{UInt8}, pointer(dst)), length(dst)*4, Float32, xoshiroWidth(), _bits2float) - dst -end - -function rand!(rng::Union{TaskLocalRNG, Xoshiro}, dst::Array{Float64}, ::SamplerTrivial{CloseOpen01{Float64}}) - GC.@preserve dst xoshiro_bulk(rng, convert(Ptr{UInt8}, pointer(dst)), length(dst)*8, Float64, xoshiroWidth(), _bits2float) +function rand!(rng::Union{TaskLocalRNG, Xoshiro}, dst::Array{T}, ::SamplerTrivial{CloseOpen01{T}}) where {T<:Union{Float16,Float32,Float64}} + GC.@preserve dst xoshiro_bulk(rng, convert(Ptr{UInt8}, pointer(dst)), length(dst)*sizeof(T), T, xoshiroWidth(), _bits2float) dst end From 7cc195cf19a317e633a9ea575c842396a2fa70a8 Mon Sep 17 00:00:00 2001 From: Sagnac <83491030+Sagnac@users.noreply.github.com> Date: Sun, 6 Oct 2024 13:58:30 +0000 Subject: [PATCH 136/537] Clarify `div` docstring for floating-point input (#55918) Closes #55837 This is a variant of the warning found in the `fld` docstring clarifying floating-point behaviour. --- base/div.jl | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/base/div.jl b/base/div.jl index 8988f2b70f27b..3fec8d2f5cdf3 100644 --- a/base/div.jl +++ b/base/div.jl @@ -43,6 +43,21 @@ julia> div(4, 3, RoundFromZero) julia> div(-4, 3, RoundFromZero) -2 ``` +Because `div(x, y)` implements strictly correct truncated rounding based on the true +value of floating-point numbers, unintuitive situations can arise. For example: +```jldoctest +julia> div(6.0, 0.1) +59.0 +julia> 6.0 / 0.1 +60.0 +julia> 6.0 / big(0.1) +59.99999999999999666933092612453056361837965690217069245739573412231113406246995 +``` +What is happening here is that the true value of the floating-point number written +as `0.1` is slightly larger than the numerical value 1/10 while `6.0` represents +the number 6 precisely. Therefore the true value of `6.0 / 0.1` is slightly less +than 60. When doing division, this is rounded to precisely `60.0`, but +`div(6.0, 0.1, RoundToZero)` always truncates the true value, so the result is `59.0`. """ div(x, y, r::RoundingMode) From 43f4afec44eb8119121e2eaee68dac12885397ef Mon Sep 17 00:00:00 2001 From: Alexander Plavin Date: Sun, 6 Oct 2024 14:44:10 -0400 Subject: [PATCH 137/537] improve getproperty(Pairs) warnings (#55989) - Only call `depwarn` if the field is `itr` or `data`; otherwise let the field error happen as normal - Give a more specific deprecation warning. --- base/deprecated.jl | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/base/deprecated.jl b/base/deprecated.jl index f88a53526aa37..b43a4227d42c4 100644 --- a/base/deprecated.jl +++ b/base/deprecated.jl @@ -432,7 +432,8 @@ const All16{T,N} = Tuple{T,T,T,T,T,T,T,T, # the plan is to eventually overload getproperty to access entries of the dict @noinline function getproperty(x::Pairs, s::Symbol) - depwarn("use values(kwargs) and keys(kwargs) instead of kwargs.data and kwargs.itr", :getproperty, force=true) + s == :data && depwarn("use values(kwargs) instead of kwargs.data", :getproperty, force=true) + s == :itr && depwarn("use keys(kwargs) instead of kwargs.itr", :getproperty, force=true) return getfield(x, s) end From 2ca88addb25035d523f06ec14e6fafd6f5047d38 Mon Sep 17 00:00:00 2001 From: Cody Tapscott <84105208+topolarity@users.noreply.github.com> Date: Sun, 6 Oct 2024 15:16:59 -0400 Subject: [PATCH 138/537] Document type-piracy / type-leakage restrictions for `require_stdlib` (#56005) I was a recent offender in https://github.com/JuliaLang/Pkg.jl/issues/4017#issuecomment-2377589989 This PR tries to lay down some guidelines for the behavior that stdlibs and the callers of `require_stdlib` must adhere to to avoid "duplicate stdlib" bugs These bugs are particularly nasty because they are experienced semi-rarely and under pretty specific circumstances (they only occur when `require_stdlib` loads another copy of a stdlib, often in a particular order and/or with a particular state of your pre-compile / loading cache) so they may make it a long way through a pre-release cycle without an actionable bug report. --- base/loading.jl | 40 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 40 insertions(+) diff --git a/base/loading.jl b/base/loading.jl index c69e37e4d56ea..4eac272848baf 100644 --- a/base/loading.jl +++ b/base/loading.jl @@ -2610,6 +2610,46 @@ function _require_from_serialized(uuidkey::PkgId, path::String, ocachepath::Unio end # load a serialized file directly from append_bundled_depot_path for uuidkey without stalechecks +""" + require_stdlib(package_uuidkey::PkgId, ext::Union{Nothing, String}=nothing) + +!!! warning "May load duplicate copies of stdlib packages." + + This requires that all stdlib packages loaded are compatible with having concurrent + copies of themselves loaded into memory. It also places additional restrictions on + the kinds of type-piracy that are allowed in stdlibs, since type-piracy can cause the + dispatch table to become visibly "torn" across multiple different packages. + + The specific requirements are: + + The import side (caller of `require_stdlib`) must not leak any stdlib types, esp. + to any context that may have a conflicting copy of the stdlib(s) (or vice-versa). + - e.g., if an output is forwarded to user code, it must contain only Base types. + - e.g., if an output contains types from the stdlib, it must be consumed "internally" + before reaching user code. + + The imported code (loaded stdlibs) must be very careful about type piracy: + - It must not access any global state that may differ between stdlib copies in + type-pirated methods. + - It must not return any stdlib types from any type-pirated public methods (since + a loaded duplicate would overwrite the Base method again, returning different + types that don't correspond to the user-accessible copy of the stdlib). + - It must not pass / discriminate stdlib types in type-pirated methods, except + indirectly via methods defined in Base and implemented (w/o type-piracy) in + all copies of the stdlib over their respective types. + + The idea behind the above restrictions is that any type-pirated methods in the stdlib + must return a result that is simultaneously correct for all of the stdlib's loaded + copies, including accounting for global state differences and split type identities. + + Furthermore, any imported code must not leak any stdlib types to globals and containers + (e.g. Vectors and mutable structs) in upstream Modules, since this will also lead to + type-confusion when the type is later pulled out in user / stdlib code. + + For examples of issues like the above, see: + [1] https://github.com/JuliaLang/Pkg.jl/issues/4017#issuecomment-2377589989 + [2] https://github.com/JuliaLang/StyledStrings.jl/issues/91#issuecomment-2379602914 +""" function require_stdlib(package_uuidkey::PkgId, ext::Union{Nothing, String}=nothing) @lock require_lock begin # the PkgId of the ext, or package if not an ext From c2a2e38079c1b540e93e8524cf53da4153358481 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mos=C3=A8=20Giordano?= <765740+giordano@users.noreply.github.com> Date: Sun, 6 Oct 2024 23:10:51 +0100 Subject: [PATCH 139/537] [LinearAlgebra] Remove unreliable doctests (#56011) The exact textual representation of the output of these doctests depend on the specific kernel used by the BLAS backend, and can vary between versions of OpenBLAS (as it did in #41973), or between different CPUs, which makes these doctests unreliable. Fix #55998. --- stdlib/LinearAlgebra/src/LinearAlgebra.jl | 26 ++++------------------- stdlib/LinearAlgebra/src/hessenberg.jl | 2 +- 2 files changed, 5 insertions(+), 23 deletions(-) diff --git a/stdlib/LinearAlgebra/src/LinearAlgebra.jl b/stdlib/LinearAlgebra/src/LinearAlgebra.jl index 17216845b350c..3ecb714a6cfe1 100644 --- a/stdlib/LinearAlgebra/src/LinearAlgebra.jl +++ b/stdlib/LinearAlgebra/src/LinearAlgebra.jl @@ -394,17 +394,8 @@ julia> Y = zero(X); julia> ldiv!(Y, qr(A), X); -julia> Y -3-element Vector{Float64}: - 0.7128099173553719 - -0.051652892561983674 - 0.10020661157024757 - -julia> A\\X -3-element Vector{Float64}: - 0.7128099173553719 - -0.05165289256198333 - 0.10020661157024785 +julia> Y ≈ A\\X +true ``` """ ldiv!(Y, A, B) @@ -435,17 +426,8 @@ julia> Y = copy(X); julia> ldiv!(qr(A), X); -julia> X -3-element Vector{Float64}: - 0.7128099173553719 - -0.051652892561983674 - 0.10020661157024757 - -julia> A\\Y -3-element Vector{Float64}: - 0.7128099173553719 - -0.05165289256198333 - 0.10020661157024785 +julia> X ≈ A\\Y +true ``` """ ldiv!(A, B) diff --git a/stdlib/LinearAlgebra/src/hessenberg.jl b/stdlib/LinearAlgebra/src/hessenberg.jl index bbaca3c878293..524e57711ce3a 100644 --- a/stdlib/LinearAlgebra/src/hessenberg.jl +++ b/stdlib/LinearAlgebra/src/hessenberg.jl @@ -446,7 +446,7 @@ This is useful because multiple shifted solves `(F + μ*I) \\ b` Iterating the decomposition produces the factors `F.Q, F.H, F.μ`. # Examples -```jldoctest +```julia-repl julia> A = [4. 9. 7.; 4. 4. 1.; 4. 3. 2.] 3×3 Matrix{Float64}: 4.0 9.0 7.0 From 4d2184159b15137b587b2f765c508621dce3a4d3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mateus=20Ara=C3=BAjo?= Date: Mon, 7 Oct 2024 04:31:49 +0200 Subject: [PATCH 140/537] cleanup functions of Hermitian matrices (#55951) The functions of Hermitian matrices are a bit of a mess. For example, if we have a Hermitian matrix `a` with negative eigenvalues, `a^0.5` doesn't produce the `Symmetric` wrapper, but `sqrt(a)` does. On the other hand, if we have a positive definite `b`, `b^0.5` will be `Hermitian`, but `sqrt(b)` will be `Symmetric`: ```julia using LinearAlgebra a = Hermitian([1.0 2.0;2.0 1.0]) a^0.5 sqrt(a) b = Hermitian([2.0 1.0; 1.0 2.0]) b^0.5 sqrt(b) ``` This sort of arbitrary assignment of wrappers happens with pretty much all functions defined there. There's also some oddities, such as `cis` being the only function defined for `SymTridiagonal`, even though all `eigen`-based functions work, and `cbrt` being the only function not defined for complex Hermitian matrices. I did a cleanup: I defined all functions for `SymTridiagonal` and `Hermitian{<:Complex}`, and always assigned the appropriate wrapper, preserving the input one when possible. There's an inconsistency remaining that I didn't fix, that only `sqrt` and `log` accept a tolerance argument, as changing that is probably breaking. There were also hardly any tests that I could find (only `exp`, `log`, `cis`, and `sqrt`). I'm happy to add them if it's desired. --- stdlib/LinearAlgebra/src/symmetric.jl | 158 +++++++++++++++----------- 1 file changed, 93 insertions(+), 65 deletions(-) diff --git a/stdlib/LinearAlgebra/src/symmetric.jl b/stdlib/LinearAlgebra/src/symmetric.jl index a7739596a73bb..e17eb80d25453 100644 --- a/stdlib/LinearAlgebra/src/symmetric.jl +++ b/stdlib/LinearAlgebra/src/symmetric.jl @@ -810,26 +810,32 @@ end # Matrix functions ^(A::Symmetric{<:Real}, p::Integer) = sympow(A, p) ^(A::Symmetric{<:Complex}, p::Integer) = sympow(A, p) -function sympow(A::Symmetric, p::Integer) - if p < 0 - return Symmetric(Base.power_by_squaring(inv(A), -p)) - else - return Symmetric(Base.power_by_squaring(A, p)) - end -end -function ^(A::Symmetric{<:Real}, p::Real) - isinteger(p) && return integerpow(A, p) - F = eigen(A) - if all(λ -> λ ≥ 0, F.values) - return Symmetric((F.vectors * Diagonal((F.values).^p)) * F.vectors') - else - return Symmetric((F.vectors * Diagonal(complex.(F.values).^p)) * F.vectors') +^(A::SymTridiagonal{<:Real}, p::Integer) = sympow(A, p) +^(A::SymTridiagonal{<:Complex}, p::Integer) = sympow(A, p) +for hermtype in (:Symmetric, :SymTridiagonal) + @eval begin + function sympow(A::$hermtype, p::Integer) + if p < 0 + return Symmetric(Base.power_by_squaring(inv(A), -p)) + else + return Symmetric(Base.power_by_squaring(A, p)) + end + end + function ^(A::$hermtype{<:Real}, p::Real) + isinteger(p) && return integerpow(A, p) + F = eigen(A) + if all(λ -> λ ≥ 0, F.values) + return Symmetric((F.vectors * Diagonal((F.values).^p)) * F.vectors') + else + return Symmetric((F.vectors * Diagonal(complex.(F.values).^p)) * F.vectors') + end + end + function ^(A::$hermtype{<:Complex}, p::Real) + isinteger(p) && return integerpow(A, p) + return Symmetric(schurpow(A, p)) + end end end -function ^(A::Symmetric{<:Complex}, p::Real) - isinteger(p) && return integerpow(A, p) - return Symmetric(schurpow(A, p)) -end function ^(A::Hermitian, p::Integer) if p < 0 retmat = Base.power_by_squaring(inv(A), -p) @@ -855,16 +861,25 @@ function ^(A::Hermitian{T}, p::Real) where T return Hermitian(retmat) end else - return (F.vectors * Diagonal((complex.(F.values).^p))) * F.vectors' + retmat = (F.vectors * Diagonal((complex.(F.values).^p))) * F.vectors' + if T <: Real + return Symmetric(retmat) + else + return retmat + end end end -for func in (:exp, :cos, :sin, :tan, :cosh, :sinh, :tanh, :atan, :asinh, :atanh) - @eval begin - function ($func)(A::HermOrSym{<:Real}) - F = eigen(A) - return Symmetric((F.vectors * Diagonal(($func).(F.values))) * F.vectors') +for func in (:exp, :cos, :sin, :tan, :cosh, :sinh, :tanh, :atan, :asinh, :atanh, :cbrt) + for (hermtype, wrapper) in [(:Symmetric, :Symmetric), (:SymTridiagonal, :Symmetric), (:Hermitian, :Hermitian)] + @eval begin + function ($func)(A::$hermtype{<:Real}) + F = eigen(A) + return $wrapper((F.vectors * Diagonal(($func).(F.values))) * F.vectors') + end end + end + @eval begin function ($func)(A::Hermitian{<:Complex}) n = checksquare(A) F = eigen(A) @@ -877,23 +892,34 @@ for func in (:exp, :cos, :sin, :tan, :cosh, :sinh, :tanh, :atan, :asinh, :atanh) end end -function cis(A::Union{RealHermSymComplexHerm,SymTridiagonal{<:Real}}) +for wrapper in (:Symmetric, :Hermitian, :SymTridiagonal) + @eval begin + function cis(A::$wrapper{<:Real}) + F = eigen(A) + return Symmetric(F.vectors .* cis.(F.values') * F.vectors') + end + end +end +function cis(A::Hermitian{<:Complex}) F = eigen(A) - # The returned matrix is unitary, and is complex-symmetric for real A return F.vectors .* cis.(F.values') * F.vectors' end + for func in (:acos, :asin) - @eval begin - function ($func)(A::HermOrSym{<:Real}) - F = eigen(A) - if all(λ -> -1 ≤ λ ≤ 1, F.values) - retmat = (F.vectors * Diagonal(($func).(F.values))) * F.vectors' - else - retmat = (F.vectors * Diagonal(($func).(complex.(F.values)))) * F.vectors' + for (hermtype, wrapper) in [(:Symmetric, :Symmetric), (:SymTridiagonal, :Symmetric), (:Hermitian, :Hermitian)] + @eval begin + function ($func)(A::$hermtype{<:Real}) + F = eigen(A) + if all(λ -> -1 ≤ λ ≤ 1, F.values) + return $wrapper((F.vectors * Diagonal(($func).(F.values))) * F.vectors') + else + return Symmetric((F.vectors * Diagonal(($func).(complex.(F.values)))) * F.vectors') + end end - return Symmetric(retmat) end + end + @eval begin function ($func)(A::Hermitian{<:Complex}) n = checksquare(A) F = eigen(A) @@ -910,14 +936,17 @@ for func in (:acos, :asin) end end -function acosh(A::HermOrSym{<:Real}) - F = eigen(A) - if all(λ -> λ ≥ 1, F.values) - retmat = (F.vectors * Diagonal(acosh.(F.values))) * F.vectors' - else - retmat = (F.vectors * Diagonal(acosh.(complex.(F.values)))) * F.vectors' +for (hermtype, wrapper) in [(:Symmetric, :Symmetric), (:SymTridiagonal, :Symmetric), (:Hermitian, :Hermitian)] + @eval begin + function acosh(A::$hermtype{<:Real}) + F = eigen(A) + if all(λ -> λ ≥ 1, F.values) + return $wrapper((F.vectors * Diagonal(acosh.(F.values))) * F.vectors') + else + return Symmetric((F.vectors * Diagonal(acosh.(complex.(F.values)))) * F.vectors') + end + end end - return Symmetric(retmat) end function acosh(A::Hermitian{<:Complex}) n = checksquare(A) @@ -933,14 +962,18 @@ function acosh(A::Hermitian{<:Complex}) end end -function sincos(A::HermOrSym{<:Real}) - n = checksquare(A) - F = eigen(A) - S, C = Diagonal(similar(A, (n,))), Diagonal(similar(A, (n,))) - for i in 1:n - S.diag[i], C.diag[i] = sincos(F.values[i]) +for (hermtype, wrapper) in [(:Symmetric, :Symmetric), (:SymTridiagonal, :Symmetric), (:Hermitian, :Hermitian)] + @eval begin + function sincos(A::$hermtype{<:Real}) + n = checksquare(A) + F = eigen(A) + S, C = Diagonal(similar(A, (n,))), Diagonal(similar(A, (n,))) + for i in 1:n + S.diag[i], C.diag[i] = sincos(F.values[i]) + end + return $wrapper((F.vectors * S) * F.vectors'), $wrapper((F.vectors * C) * F.vectors') + end end - return Symmetric((F.vectors * S) * F.vectors'), Symmetric((F.vectors * C) * F.vectors') end function sincos(A::Hermitian{<:Complex}) n = checksquare(A) @@ -962,18 +995,20 @@ for func in (:log, :sqrt) # sqrt has rtol arg to handle matrices that are semidefinite up to roundoff errors rtolarg = func === :sqrt ? Any[Expr(:kw, :(rtol::Real), :(eps(real(float(one(T))))*size(A,1)))] : Any[] rtolval = func === :sqrt ? :(-maximum(abs, F.values) * rtol) : 0 - @eval begin - function ($func)(A::HermOrSym{T}; $(rtolarg...)) where {T<:Real} - F = eigen(A) - λ₀ = $rtolval # treat λ ≥ λ₀ as "zero" eigenvalues up to roundoff - if all(λ -> λ ≥ λ₀, F.values) - retmat = (F.vectors * Diagonal(($func).(max.(0, F.values)))) * F.vectors' - else - retmat = (F.vectors * Diagonal(($func).(complex.(F.values)))) * F.vectors' + for (hermtype, wrapper) in [(:Symmetric, :Symmetric), (:SymTridiagonal, :Symmetric), (:Hermitian, :Hermitian)] + @eval begin + function ($func)(A::$hermtype{T}; $(rtolarg...)) where {T<:Real} + F = eigen(A) + λ₀ = $rtolval # treat λ ≥ λ₀ as "zero" eigenvalues up to roundoff + if all(λ -> λ ≥ λ₀, F.values) + return $wrapper((F.vectors * Diagonal(($func).(max.(0, F.values)))) * F.vectors') + else + return Symmetric((F.vectors * Diagonal(($func).(complex.(F.values)))) * F.vectors') + end end - return Symmetric(retmat) end - + end + @eval begin function ($func)(A::Hermitian{T}; $(rtolarg...)) where {T<:Complex} n = checksquare(A) F = eigen(A) @@ -992,13 +1027,6 @@ for func in (:log, :sqrt) end end -# Cube root of a real-valued symmetric matrix -function cbrt(A::HermOrSym{<:Real}) - F = eigen(A) - A = F.vectors * Diagonal(cbrt.(F.values)) * F.vectors' - return A -end - """ hermitianpart(A::AbstractMatrix, uplo::Symbol=:U) -> Hermitian From 57e3c9e4bfd2bc3d54a4923066ed2c3f087b1311 Mon Sep 17 00:00:00 2001 From: Ian Butterworth Date: Mon, 7 Oct 2024 02:31:59 -0400 Subject: [PATCH 141/537] Fix no-arg `ScopedValues.@with` within a scope (#56019) Fixes https://github.com/JuliaLang/julia/issues/56017 --- base/scopedvalues.jl | 2 ++ test/scopedvalues.jl | 6 ++++++ 2 files changed, 8 insertions(+) diff --git a/base/scopedvalues.jl b/base/scopedvalues.jl index 6ccd4687c5c65..39e3c2c076718 100644 --- a/base/scopedvalues.jl +++ b/base/scopedvalues.jl @@ -85,6 +85,8 @@ struct Scope values::ScopeStorage end +Scope(scope::Scope) = scope + function Scope(parent::Union{Nothing, Scope}, key::ScopedValue{T}, value) where T val = convert(T, value) if parent === nothing diff --git a/test/scopedvalues.jl b/test/scopedvalues.jl index 61b10c557c455..2c2f4a510c1c9 100644 --- a/test/scopedvalues.jl +++ b/test/scopedvalues.jl @@ -138,6 +138,12 @@ end @test sval[] == 1 @test sval_float[] == 1.0 end + @with sval=>2 sval_float=>2.0 begin + @with begin + @test sval[] == 2 + @test sval_float[] == 2.0 + end + end end @testset "isassigned" begin From 87acb9e029c8d0f1a83223737b7539e1db4e4b33 Mon Sep 17 00:00:00 2001 From: Jishnu Bhattacharya Date: Mon, 7 Oct 2024 17:27:12 +0530 Subject: [PATCH 142/537] LinearAlgebra: make matprod_dest public (#55537) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Currently, in a matrix multiplication `A * B`, we use `B` to construct the destination. However, this may not produce the optimal destination type, and is essentially single-dispatch. Letting packages specialize `matprod_dest` would help us obtain the optimal type by dispatching on both the arguments. This may significantly improve performance in the matrix multiplication. As an example: ```julia julia> using LinearAlgebra, FillArrays, SparseArrays julia> F = Fill(3, 10, 10); julia> s = sprand(10, 10, 0.1); julia> @btime $F * $s; 15.225 μs (10 allocations: 4.14 KiB) julia> typeof(F * s) SparseMatrixCSC{Float64, Int64} julia> nnz(F * s) 80 julia> VERSION v"1.12.0-DEV.1074" ``` In this case, the destination is a sparse matrix with 80% of its elements filled and being set one-by-one, which is terrible for performance. Instead, if we specialize `matprod_dest` to return a dense destination, we may obtain ```julia julia> LinearAlgebra.matprod_dest(F::FillArrays.AbstractFill, S::SparseMatrixCSC, ::Type{T}) where {T} = Matrix{T}(undef, size(F,1), size(S,2)) julia> @btime $F * $s; 754.632 ns (2 allocations: 944 bytes) julia> typeof(F * s) Matrix{Float64} ``` Potentially, this may be improved further by specializing `mul!`, but this is a 20x improvement just by choosing the right destination. Since this is being made public, we may want to bikeshed on an appropriate name for the function. --- stdlib/LinearAlgebra/src/LinearAlgebra.jl | 3 ++- stdlib/LinearAlgebra/src/matmul.jl | 10 +++++++++- 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/stdlib/LinearAlgebra/src/LinearAlgebra.jl b/stdlib/LinearAlgebra/src/LinearAlgebra.jl index 3ecb714a6cfe1..49d73127ba7ba 100644 --- a/stdlib/LinearAlgebra/src/LinearAlgebra.jl +++ b/stdlib/LinearAlgebra/src/LinearAlgebra.jl @@ -174,7 +174,8 @@ public AbstractTriangular, isbanded, peakflops, symmetric, - symmetric_type + symmetric_type, + matprod_dest const BlasFloat = Union{Float64,Float32,ComplexF64,ComplexF32} const BlasReal = Union{Float64,Float32} diff --git a/stdlib/LinearAlgebra/src/matmul.jl b/stdlib/LinearAlgebra/src/matmul.jl index 9a232d3ad1e51..b70f7d47b28dd 100644 --- a/stdlib/LinearAlgebra/src/matmul.jl +++ b/stdlib/LinearAlgebra/src/matmul.jl @@ -123,7 +123,15 @@ function (*)(A::AbstractMatrix, B::AbstractMatrix) mul!(matprod_dest(A, B, TS), A, B) end -matprod_dest(A, B, TS) = similar(B, TS, (size(A, 1), size(B, 2))) +""" + matprod_dest(A, B, T) + +Return an appropriate `AbstractArray` with element type `T` that may be used to store the result of `A * B`. + +!!! compat + This function requires at least Julia 1.11 +""" +matprod_dest(A, B, T) = similar(B, T, (size(A, 1), size(B, 2))) # optimization for dispatching to BLAS, e.g. *(::Matrix{Float32}, ::Matrix{Float64}) # but avoiding the case *(::Matrix{<:BlasComplex}, ::Matrix{<:BlasReal}) From c7071e1eb2369211cf02a1e7dbae365f5fba3fc9 Mon Sep 17 00:00:00 2001 From: Tim Besard Date: Mon, 7 Oct 2024 16:01:23 +0200 Subject: [PATCH 143/537] Sockets: Warn when local network access not granted. (#56023) Works around https://github.com/JuliaLang/julia/issues/56022 --- stdlib/Sockets/test/runtests.jl | 2 ++ 1 file changed, 2 insertions(+) diff --git a/stdlib/Sockets/test/runtests.jl b/stdlib/Sockets/test/runtests.jl index 2c50b4a0f8b4a..02a290ddbaa63 100644 --- a/stdlib/Sockets/test/runtests.jl +++ b/stdlib/Sockets/test/runtests.jl @@ -453,6 +453,8 @@ end catch e if isa(e, Base.IOError) && Base.uverrorname(e.code) == "EPERM" @warn "UDP IPv4 broadcast test skipped (permission denied upon send, restrictive firewall?)" + elseif Sys.isapple() && isa(e, Base.IOError) && Base.uverrorname(e.code) == "EHOSTUNREACH" + @warn "UDP IPv4 broadcast test skipped (local network access not granded?)" else rethrow() end From d4987a368cee336d9d8f3b7baeb54a89c5024e0a Mon Sep 17 00:00:00 2001 From: Zentrik Date: Mon, 7 Oct 2024 19:37:43 +0100 Subject: [PATCH 144/537] Update test due to switch to intel syntax by default in #48103 (#55993) --- stdlib/InteractiveUtils/test/runtests.jl | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/stdlib/InteractiveUtils/test/runtests.jl b/stdlib/InteractiveUtils/test/runtests.jl index 8e7090cb53020..851391ec6c249 100644 --- a/stdlib/InteractiveUtils/test/runtests.jl +++ b/stdlib/InteractiveUtils/test/runtests.jl @@ -547,9 +547,9 @@ if Sys.ARCH === :x86_64 || occursin(ix86, string(Sys.ARCH)) output = replace(String(take!(buf)), r"#[^\r\n]+" => "") @test !occursin(rgx, output) - code_native(buf, linear_foo, ()) - output = String(take!(buf)) - @test occursin(rgx, output) + code_native(buf, linear_foo, (), debuginfo = :none) + output = replace(String(take!(buf)), r"#[^\r\n]+" => "") + @test !occursin(rgx, output) @testset "binary" begin # check the RET instruction (opcode: C3) From 4cdd864e535b16e928c2eff43e6f1583bd77209d Mon Sep 17 00:00:00 2001 From: Jameson Nash Date: Mon, 7 Oct 2024 19:54:51 -0400 Subject: [PATCH 145/537] add require_lock call to maybe_loaded_precompile (#56027) If we expect this to be a public API (https://github.com/timholy/Revise.jl for some reason is trying to access this state), we should lock around it for consistency with the other similar functions. Needed for https://github.com/timholy/Revise.jl/pull/856 --- base/loading.jl | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/base/loading.jl b/base/loading.jl index 4eac272848baf..fe4a4770628da 100644 --- a/base/loading.jl +++ b/base/loading.jl @@ -2397,12 +2397,13 @@ const module_keys = IdDict{Module,PkgId}() # the reverse of loaded_modules root_module_key(m::Module) = @lock require_lock module_keys[m] function maybe_loaded_precompile(key::PkgId, buildid::UInt128) - assert_havelock(require_lock) + @lock require_lock begin mods = get(loaded_precompiles, key, nothing) mods === nothing && return for mod in mods module_build_id(mod) == buildid && return mod end + end end function module_build_id(m::Module) From 24555b81c81e12d19aa59a07b9eb634393f32fcb Mon Sep 17 00:00:00 2001 From: Neven Sajko Date: Tue, 8 Oct 2024 15:25:00 +0200 Subject: [PATCH 146/537] fix `power_by_squaring`: use `promote` instead of type inference (#55634) Fixes #53504 Fixes #55633 --- base/intfuncs.jl | 6 +++++- test/math.jl | 22 ++++++++++++++++++++++ 2 files changed, 27 insertions(+), 1 deletion(-) diff --git a/base/intfuncs.jl b/base/intfuncs.jl index 06a0213e7141c..0421877a16623 100644 --- a/base/intfuncs.jl +++ b/base/intfuncs.jl @@ -298,7 +298,11 @@ function invmod(n::T) where {T<:BitInteger} end # ^ for any x supporting * -to_power_type(x) = convert(Base._return_type(*, Tuple{typeof(x), typeof(x)}), x) +function to_power_type(x::Number) + T = promote_type(typeof(x), typeof(one(x)), typeof(x*x)) + convert(T, x) +end +to_power_type(x) = oftype(x*x, x) @noinline throw_domerr_powbysq(::Any, p) = throw(DomainError(p, LazyString( "Cannot raise an integer x to a negative power ", p, ".", "\nConvert input to float."))) diff --git a/test/math.jl b/test/math.jl index c0a2d8bf8c9f8..5a9f3248e59f4 100644 --- a/test/math.jl +++ b/test/math.jl @@ -1498,6 +1498,28 @@ end n = Int64(1024 / log2(E)) @test E^n == Inf @test E^float(n) == Inf + + # #55633 + struct Issue55633_1 <: Number end + struct Issue55633_3 <: Number end + struct Issue55633_9 <: Number end + Base.one(::Issue55633_3) = Issue55633_1() + Base.:(*)(::Issue55633_3, ::Issue55633_3) = Issue55633_9() + Base.promote_rule(::Type{Issue55633_1}, ::Type{Issue55633_3}) = Int + Base.promote_rule(::Type{Issue55633_3}, ::Type{Issue55633_9}) = Int + Base.promote_rule(::Type{Issue55633_1}, ::Type{Issue55633_9}) = Int + Base.promote_rule(::Type{Issue55633_1}, ::Type{Int}) = Int + Base.promote_rule(::Type{Issue55633_3}, ::Type{Int}) = Int + Base.promote_rule(::Type{Issue55633_9}, ::Type{Int}) = Int + Base.convert(::Type{Int}, ::Issue55633_1) = 1 + Base.convert(::Type{Int}, ::Issue55633_3) = 3 + Base.convert(::Type{Int}, ::Issue55633_9) = 9 + for x ∈ (im, pi, Issue55633_3()) + p = promote(one(x), x, x*x) + for y ∈ 0:2 + @test all((t -> ===(t...)), zip(x^y, p[y + 1])) + end + end end # Test that sqrt behaves correctly and doesn't exhibit fp80 double rounding. From 8d515ed8ad443d4bb8c530468d5b7ae0f3b50a0b Mon Sep 17 00:00:00 2001 From: Ian Butterworth Date: Tue, 8 Oct 2024 13:03:32 -0400 Subject: [PATCH 147/537] Don't show keymap `@error` for hints (#56041) It's too disruptive to show errors for hints. The error will still be shown if tab is pressed. Helps issues like https://github.com/JuliaLang/julia/issues/56037 --- stdlib/REPL/src/LineEdit.jl | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/stdlib/REPL/src/LineEdit.jl b/stdlib/REPL/src/LineEdit.jl index 5af03e0df9b6d..c92dca8c8e015 100644 --- a/stdlib/REPL/src/LineEdit.jl +++ b/stdlib/REPL/src/LineEdit.jl @@ -382,7 +382,13 @@ function check_for_hint(s::MIState) # Requires making space for them earlier in refresh_multi_line return clear_hint(st) end - completions, partial, should_complete = complete_line(st.p.complete, st, s.active_module; hint = true)::Tuple{Vector{String},String,Bool} + + completions, partial, should_complete = try + complete_line(st.p.complete, st, s.active_module; hint = true)::Tuple{Vector{String},String,Bool} + catch + @debug "error completing line for hint" exception=current_exceptions() + return clear_hint(st) + end isempty(completions) && return clear_hint(st) # Don't complete for single chars, given e.g. `x` completes to `xor` if length(partial) > 1 && should_complete From e516e4c63d383efb24613d27e6fab0ffffed5a88 Mon Sep 17 00:00:00 2001 From: Tim Besard Date: Wed, 9 Oct 2024 07:35:48 +0200 Subject: [PATCH 148/537] Fix typo in sockets tests. (#56038) --- stdlib/Sockets/test/runtests.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stdlib/Sockets/test/runtests.jl b/stdlib/Sockets/test/runtests.jl index 02a290ddbaa63..778d9f7415bcc 100644 --- a/stdlib/Sockets/test/runtests.jl +++ b/stdlib/Sockets/test/runtests.jl @@ -454,7 +454,7 @@ end if isa(e, Base.IOError) && Base.uverrorname(e.code) == "EPERM" @warn "UDP IPv4 broadcast test skipped (permission denied upon send, restrictive firewall?)" elseif Sys.isapple() && isa(e, Base.IOError) && Base.uverrorname(e.code) == "EHOSTUNREACH" - @warn "UDP IPv4 broadcast test skipped (local network access not granded?)" + @warn "UDP IPv4 broadcast test skipped (local network access not granted?)" else rethrow() end From 5117d042b3488eebc929fdfceba98562e18c5ac0 Mon Sep 17 00:00:00 2001 From: Shuhei Kadowaki <40514306+aviatesk@users.noreply.github.com> Date: Wed, 9 Oct 2024 18:38:32 +0900 Subject: [PATCH 149/537] EA: use `is_mutation_free_argtype` for the escapability check (#56028) EA has been using `isbitstype` for type-level escapability checks, but a better criterion (`is_mutation_free`) is available these days, so we would like to use that instead. --- .../ssair/EscapeAnalysis/EscapeAnalysis.jl | 10 +- .../compiler/EscapeAnalysis/EscapeAnalysis.jl | 116 +++++++++--------- 2 files changed, 63 insertions(+), 63 deletions(-) diff --git a/base/compiler/ssair/EscapeAnalysis/EscapeAnalysis.jl b/base/compiler/ssair/EscapeAnalysis/EscapeAnalysis.jl index 0ad55d6fbcd9e..a0abacb617085 100644 --- a/base/compiler/ssair/EscapeAnalysis/EscapeAnalysis.jl +++ b/base/compiler/ssair/EscapeAnalysis/EscapeAnalysis.jl @@ -24,10 +24,10 @@ using ._TOP_MOD: # Base definitions isempty, ismutabletype, keys, last, length, max, min, missing, pop!, push!, pushfirst!, unwrap_unionall, !, !=, !==, &, *, +, -, :, <, <<, =>, >, |, ∈, ∉, ∩, ∪, ≠, ≤, ≥, ⊆ using Core.Compiler: # Core.Compiler specific definitions - Bottom, IRCode, IR_FLAG_NOTHROW, InferenceResult, SimpleInferenceLattice, + AbstractLattice, Bottom, IRCode, IR_FLAG_NOTHROW, InferenceResult, SimpleInferenceLattice, argextype, fieldcount_noerror, hasintersect, has_flag, intrinsic_nothrow, - is_meta_expr_head, isbitstype, isexpr, println, setfield!_nothrow, singleton_type, - try_compute_field, try_compute_fieldidx, widenconst, ⊑, AbstractLattice + is_meta_expr_head, is_mutation_free_argtype, isexpr, println, setfield!_nothrow, + singleton_type, try_compute_field, try_compute_fieldidx, widenconst, ⊑ include(x) = _TOP_MOD.include(@__MODULE__, x) if _TOP_MOD === Core.Compiler @@ -859,7 +859,7 @@ function add_escape_change!(astate::AnalysisState, @nospecialize(x), xinfo::Esca xinfo === ⊥ && return nothing # performance optimization xidx = iridx(x, astate.estate) if xidx !== nothing - if force || !isbitstype(widenconst(argextype(x, astate.ir))) + if force || !is_mutation_free_argtype(argextype(x, astate.ir)) push!(astate.changes, EscapeChange(xidx, xinfo)) end end @@ -869,7 +869,7 @@ end function add_liveness_change!(astate::AnalysisState, @nospecialize(x), livepc::Int) xidx = iridx(x, astate.estate) if xidx !== nothing - if !isbitstype(widenconst(argextype(x, astate.ir))) + if !is_mutation_free_argtype(argextype(x, astate.ir)) push!(astate.changes, LivenessChange(xidx, livepc)) end end diff --git a/test/compiler/EscapeAnalysis/EscapeAnalysis.jl b/test/compiler/EscapeAnalysis/EscapeAnalysis.jl index 99bd86228f50a..9afe49c01562d 100644 --- a/test/compiler/EscapeAnalysis/EscapeAnalysis.jl +++ b/test/compiler/EscapeAnalysis/EscapeAnalysis.jl @@ -290,7 +290,7 @@ end let # typeassert result = code_escapes((Any,)) do x - y = x::String + y = x::Base.RefValue{Any} return y end r = only(findall(isreturn, result.ir.stmts.stmt)) @@ -305,11 +305,6 @@ end r = only(findall(isreturn, result.ir.stmts.stmt)) @test has_return_escape(result.state[Argument(2)], r) @test !has_all_escape(result.state[Argument(2)]) - - result = code_escapes((Module,)) do m - isdefined(m, 10) # throws - end - @test has_thrown_escape(result.state[Argument(2)]) end end @@ -685,8 +680,8 @@ end @test has_all_escape(result.state[Argument(2)]) end let result = @eval EATModule() begin - const Rx = SafeRef{String}("Rx") - $code_escapes((String,)) do s + const Rx = SafeRef{Any}(nothing) + $code_escapes((Base.RefValue{String},)) do s setfield!(Rx, :x, s) Core.sizeof(Rx[]) end @@ -712,7 +707,7 @@ end # ------------ # field escape should propagate to :new arguments - let result = code_escapes((String,)) do a + let result = code_escapes((Base.RefValue{String},)) do a o = SafeRef(a) Core.donotdelete(o) return o[] @@ -722,7 +717,7 @@ end @test has_return_escape(result.state[Argument(2)], r) @test is_load_forwardable(result.state[SSAValue(i)]) end - let result = code_escapes((String,)) do a + let result = code_escapes((Base.RefValue{String},)) do a t = SafeRef((a,)) f = t[][1] return f @@ -731,9 +726,8 @@ end r = only(findall(isreturn, result.ir.stmts.stmt)) @test has_return_escape(result.state[Argument(2)], r) @test is_load_forwardable(result.state[SSAValue(i)]) - result.state[SSAValue(i)].AliasInfo end - let result = code_escapes((String, String)) do a, b + let result = code_escapes((Base.RefValue{String}, Base.RefValue{String})) do a, b obj = SafeRefs(a, b) Core.donotdelete(obj) fld1 = obj[1] @@ -748,31 +742,31 @@ end end # field escape should propagate to `setfield!` argument - let result = code_escapes((String,)) do a - o = SafeRef("foo") + let result = code_escapes((Base.RefValue{String},)) do a + o = SafeRef(Ref("foo")) Core.donotdelete(o) o[] = a return o[] end - i = only(findall(isnew, result.ir.stmts.stmt)) + i = last(findall(isnew, result.ir.stmts.stmt)) r = only(findall(isreturn, result.ir.stmts.stmt)) @test has_return_escape(result.state[Argument(2)], r) @test is_load_forwardable(result.state[SSAValue(i)]) end # propagate escape information imposed on return value of `setfield!` call - let result = code_escapes((String,)) do a - obj = SafeRef("foo") + let result = code_escapes((Base.RefValue{String},)) do a + obj = SafeRef(Ref("foo")) Core.donotdelete(obj) return (obj[] = a) end - i = only(findall(isnew, result.ir.stmts.stmt)) + i = last(findall(isnew, result.ir.stmts.stmt)) r = only(findall(isreturn, result.ir.stmts.stmt)) @test has_return_escape(result.state[Argument(2)], r) @test is_load_forwardable(result.state[SSAValue(i)]) end # nested allocations - let result = code_escapes((String,)) do a + let result = code_escapes((Base.RefValue{String},)) do a o1 = SafeRef(a) o2 = SafeRef(o1) return o2[] @@ -787,7 +781,7 @@ end end end end - let result = code_escapes((String,)) do a + let result = code_escapes((Base.RefValue{String},)) do a o1 = (a,) o2 = (o1,) return o2[1] @@ -802,7 +796,7 @@ end end end end - let result = code_escapes((String,)) do a + let result = code_escapes((Base.RefValue{String},)) do a o1 = SafeRef(a) o2 = SafeRef(o1) o1′ = o2[] @@ -844,7 +838,7 @@ end @test has_return_escape(result.state[SSAValue(i)], r) end end - let result = code_escapes((String,)) do x + let result = code_escapes((Base.RefValue{String},)) do x o = Ref(x) Core.donotdelete(o) broadcast(identity, o) @@ -892,7 +886,7 @@ end end end # when ϕ-node merges values with different types - let result = code_escapes((Bool,String,String,String)) do cond, x, y, z + let result = code_escapes((Bool,Base.RefValue{String},Base.RefValue{String},Base.RefValue{String})) do cond, x, y, z local out if cond ϕ = SafeRef(x) @@ -904,7 +898,7 @@ end end r = only(findall(isreturn, result.ir.stmts.stmt)) t = only(findall(iscall((result.ir, throw)), result.ir.stmts.stmt)) - ϕ = only(findall(==(Union{SafeRef{String},SafeRefs{String,String}}), result.ir.stmts.type)) + ϕ = only(findall(==(Union{SafeRef{Base.RefValue{String}},SafeRefs{Base.RefValue{String},Base.RefValue{String}}}), result.ir.stmts.type)) @test has_return_escape(result.state[Argument(3)], r) # x @test !has_return_escape(result.state[Argument(4)], r) # y @test has_return_escape(result.state[Argument(5)], r) # z @@ -1038,7 +1032,7 @@ end end # alias via typeassert let result = code_escapes((Any,)) do a - r = a::String + r = a::Base.RefValue{String} return r end r = only(findall(isreturn, result.ir.stmts.stmt)) @@ -1077,11 +1071,11 @@ end @test has_all_escape(result.state[Argument(3)]) # a end # alias via ϕ-node - let result = code_escapes((Bool,String)) do cond, x + let result = code_escapes((Bool,Base.RefValue{String})) do cond, x if cond - ϕ2 = ϕ1 = SafeRef("foo") + ϕ2 = ϕ1 = SafeRef(Ref("foo")) else - ϕ2 = ϕ1 = SafeRef("bar") + ϕ2 = ϕ1 = SafeRef(Ref("bar")) end ϕ2[] = x return ϕ1[] @@ -1094,14 +1088,16 @@ end @test is_load_forwardable(result.state[SSAValue(i)]) end for i in findall(isnew, result.ir.stmts.stmt) - @test is_load_forwardable(result.state[SSAValue(i)]) + if result.ir[SSAValue(i)][:type] <: SafeRef + @test is_load_forwardable(result.state[SSAValue(i)]) + end end end - let result = code_escapes((Bool,Bool,String)) do cond1, cond2, x + let result = code_escapes((Bool,Bool,Base.RefValue{String})) do cond1, cond2, x if cond1 - ϕ2 = ϕ1 = SafeRef("foo") + ϕ2 = ϕ1 = SafeRef(Ref("foo")) else - ϕ2 = ϕ1 = SafeRef("bar") + ϕ2 = ϕ1 = SafeRef(Ref("bar")) end cond2 && (ϕ2[] = x) return ϕ1[] @@ -1114,12 +1110,14 @@ end @test is_load_forwardable(result.state[SSAValue(i)]) end for i in findall(isnew, result.ir.stmts.stmt) - @test is_load_forwardable(result.state[SSAValue(i)]) + if result.ir[SSAValue(i)][:type] <: SafeRef + @test is_load_forwardable(result.state[SSAValue(i)]) + end end end # alias via π-node let result = code_escapes((Any,)) do x - if isa(x, String) + if isa(x, Base.RefValue{String}) return x end throw("error!") @@ -1213,7 +1211,7 @@ end # conservatively handle unknown field: # all fields should be escaped, but the allocation itself doesn't need to be escaped - let result = code_escapes((String, Symbol)) do a, fld + let result = code_escapes((Base.RefValue{String}, Symbol)) do a, fld obj = SafeRef(a) return getfield(obj, fld) end @@ -1222,7 +1220,7 @@ end @test has_return_escape(result.state[Argument(2)], r) # a @test !is_load_forwardable(result.state[SSAValue(i)]) # obj end - let result = code_escapes((String, String, Symbol)) do a, b, fld + let result = code_escapes((Base.RefValue{String}, Base.RefValue{String}, Symbol)) do a, b, fld obj = SafeRefs(a, b) return getfield(obj, fld) # should escape both `a` and `b` end @@ -1232,7 +1230,7 @@ end @test has_return_escape(result.state[Argument(3)], r) # b @test !is_load_forwardable(result.state[SSAValue(i)]) # obj end - let result = code_escapes((String, String, Int)) do a, b, idx + let result = code_escapes((Base.RefValue{String}, Base.RefValue{String}, Int)) do a, b, idx obj = SafeRefs(a, b) return obj[idx] # should escape both `a` and `b` end @@ -1242,33 +1240,33 @@ end @test has_return_escape(result.state[Argument(3)], r) # b @test !is_load_forwardable(result.state[SSAValue(i)]) # obj end - let result = code_escapes((String, String, Symbol)) do a, b, fld - obj = SafeRefs("a", "b") + let result = code_escapes((Base.RefValue{String}, Base.RefValue{String}, Symbol)) do a, b, fld + obj = SafeRefs(Ref("a"), Ref("b")) setfield!(obj, fld, a) return obj[2] # should escape `a` end - i = only(findall(isnew, result.ir.stmts.stmt)) + i = last(findall(isnew, result.ir.stmts.stmt)) r = only(findall(isreturn, result.ir.stmts.stmt)) @test has_return_escape(result.state[Argument(2)], r) # a @test !has_return_escape(result.state[Argument(3)], r) # b @test !is_load_forwardable(result.state[SSAValue(i)]) # obj end - let result = code_escapes((String, Symbol)) do a, fld - obj = SafeRefs("a", "b") + let result = code_escapes((Base.RefValue{String}, Symbol)) do a, fld + obj = SafeRefs(Ref("a"), Ref("b")) setfield!(obj, fld, a) return obj[1] # this should escape `a` end - i = only(findall(isnew, result.ir.stmts.stmt)) + i = last(findall(isnew, result.ir.stmts.stmt)) r = only(findall(isreturn, result.ir.stmts.stmt)) @test has_return_escape(result.state[Argument(2)], r) # a @test !is_load_forwardable(result.state[SSAValue(i)]) # obj end - let result = code_escapes((String, String, Int)) do a, b, idx - obj = SafeRefs("a", "b") + let result = code_escapes((Base.RefValue{String}, Base.RefValue{String}, Int)) do a, b, idx + obj = SafeRefs(Ref("a"), Ref("b")) obj[idx] = a return obj[2] # should escape `a` end - i = only(findall(isnew, result.ir.stmts.stmt)) + i = last(findall(isnew, result.ir.stmts.stmt)) r = only(findall(isreturn, result.ir.stmts.stmt)) @test has_return_escape(result.state[Argument(2)], r) # a @test !has_return_escape(result.state[Argument(3)], r) # b @@ -1280,7 +1278,7 @@ end let result = @eval EATModule() begin @noinline getx(obj) = obj[] - $code_escapes((String,)) do a + $code_escapes((Base.RefValue{String},)) do a obj = SafeRef(a) fld = getx(obj) return fld @@ -1294,8 +1292,8 @@ end end # TODO interprocedural alias analysis - let result = code_escapes((SafeRef{String},)) do s - s[] = "bar" + let result = code_escapes((SafeRef{Base.RefValue{String}},)) do s + s[] = Ref("bar") global GV = s[] nothing end @@ -1335,7 +1333,7 @@ end let result = @eval EATModule() begin @noinline mysetindex!(x, a) = x[1] = a const Ax = Vector{Any}(undef, 1) - $code_escapes((String,)) do s + $code_escapes((Base.RefValue{String},)) do s mysetindex!(Ax, s) end end @@ -1391,11 +1389,11 @@ end end # handle conflicting field information correctly - let result = code_escapes((Bool,String,String,)) do cnd, baz, qux + let result = code_escapes((Bool,Base.RefValue{String},Base.RefValue{String},)) do cnd, baz, qux if cnd - o = SafeRef("foo") + o = SafeRef(Ref("foo")) else - o = SafeRefs("bar", baz) + o = SafeRefs(Ref("bar"), baz) r = getfield(o, 2) end if cnd @@ -1409,12 +1407,14 @@ end @test has_return_escape(result.state[Argument(3)], r) # baz @test has_return_escape(result.state[Argument(4)], r) # qux for new in findall(isnew, result.ir.stmts.stmt) - @test is_load_forwardable(result.state[SSAValue(new)]) + if !(result.ir[SSAValue(new)][:type] <: Base.RefValue) + @test is_load_forwardable(result.state[SSAValue(new)]) + end end end - let result = code_escapes((Bool,String,String,)) do cnd, baz, qux + let result = code_escapes((Bool,Base.RefValue{String},Base.RefValue{String},)) do cnd, baz, qux if cnd - o = SafeRefs("foo", "bar") + o = SafeRefs(Ref("foo"), Ref("bar")) r = setfield!(o, 2, baz) else o = SafeRef(qux) @@ -2141,9 +2141,9 @@ end # propagate escapes imposed on call arguments @noinline broadcast_noescape2(b) = broadcast(identity, b) let result = code_escapes() do - broadcast_noescape2(Ref("Hi")) + broadcast_noescape2(Ref(Ref("Hi"))) end - i = only(findall(isnew, result.ir.stmts.stmt)) + i = last(findall(isnew, result.ir.stmts.stmt)) @test_broken !has_return_escape(result.state[SSAValue(i)]) # TODO interprocedural alias analysis @test !has_thrown_escape(result.state[SSAValue(i)]) end From ecf41b18cce0e41455a8144f220e9e6171987194 Mon Sep 17 00:00:00 2001 From: Cody Tapscott <84105208+topolarity@users.noreply.github.com> Date: Wed, 9 Oct 2024 06:03:41 -0400 Subject: [PATCH 150/537] effects: fix `Base.@_noub_meta` (#56061) This had the incorrect number of arguments to `Expr(:purity, ...)` causing it to be silently ignored. --- base/essentials.jl | 3 ++- src/method.c | 2 ++ test/compiler/effects.jl | 7 ++++++- 3 files changed, 10 insertions(+), 2 deletions(-) diff --git a/base/essentials.jl b/base/essentials.jl index 32c44a9571f23..0e7be924c908c 100644 --- a/base/essentials.jl +++ b/base/essentials.jl @@ -337,7 +337,8 @@ macro _noub_meta() #=:inaccessiblememonly=#false, #=:noub=#true, #=:noub_if_noinbounds=#false, - #=:consistent_overlay=#false)) + #=:consistent_overlay=#false, + #=:nortcall=#false)) end # can be used in place of `@assume_effects :notaskstate` (supposed to be used for bootstrapping) macro _notaskstate_meta() diff --git a/src/method.c b/src/method.c index 6aba60e7fe12c..629816319b334 100644 --- a/src/method.c +++ b/src/method.c @@ -491,6 +491,8 @@ jl_code_info_t *jl_new_code_info_from_ir(jl_expr_t *ir) if (consistent_overlay) li->purity.overrides.ipo_consistent_overlay = consistent_overlay; int8_t nortcall = jl_unbox_bool(jl_exprarg(ma, 10)); if (nortcall) li->purity.overrides.ipo_nortcall = nortcall; + } else { + assert(jl_expr_nargs(ma) == 0); } } else diff --git a/test/compiler/effects.jl b/test/compiler/effects.jl index 8bc5f27e31766..c8a699b294d37 100644 --- a/test/compiler/effects.jl +++ b/test/compiler/effects.jl @@ -810,7 +810,12 @@ end # @test !Core.Compiler.is_nothrow(effects) # end #end -# + +@test Core.Compiler.is_noub(Base.infer_effects(Base._growbeg!, (Vector{Int}, Int))) +@test Core.Compiler.is_noub(Base.infer_effects(Base._growbeg!, (Vector{Any}, Int))) +@test Core.Compiler.is_noub(Base.infer_effects(Base._growend!, (Vector{Int}, Int))) +@test Core.Compiler.is_noub(Base.infer_effects(Base._growend!, (Vector{Any}, Int))) + # tuple indexing # -------------- From 18046c2c41d4a04a9a9a59d26e230989260f0502 Mon Sep 17 00:00:00 2001 From: Cody Tapscott <84105208+topolarity@users.noreply.github.com> Date: Wed, 9 Oct 2024 08:10:34 -0400 Subject: [PATCH 151/537] effects: improve `:noub_if_noinbounds` documentation (#56060) Just a small touch-up --- base/compiler/effects.jl | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/base/compiler/effects.jl b/base/compiler/effects.jl index 7778c96e019e5..b22b9396408e3 100644 --- a/base/compiler/effects.jl +++ b/base/compiler/effects.jl @@ -47,7 +47,8 @@ following meanings: * `ALWAYS_TRUE`: this method is guaranteed to not execute any undefined behavior (for any input). * `ALWAYS_FALSE`: this method may execute undefined behavior. * `NOUB_IF_NOINBOUNDS`: this method is guaranteed to not execute any undefined behavior - if the caller does not set nor propagate the `@inbounds` context. + under the assumption that its `@checkbounds` code is not elided (which happens when the + caller does not set nor propagate the `@inbounds` context) Note that undefined behavior may technically cause the method to violate any other effect assertions (such as `:consistent` or `:effect_free`) as well, but we do not model this, and they assume the absence of undefined behavior. From 38dbd1149be625ab478229f4532d7378e474981b Mon Sep 17 00:00:00 2001 From: Jishnu Bhattacharya Date: Wed, 9 Oct 2024 18:09:04 +0530 Subject: [PATCH 152/537] Disallow assigning asymmetric values to SymTridiagonal (#56068) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Currently, we can assign an asymmetric value to a `SymTridiagonal`, which goes against what `setindex!` is expected to do. This is because `SymTridiagonal` symmetrizes the values along the diagonal, so setting a diagonal entry to an asymmetric value would lead to a subsequent `getindex` producing a different result. ```julia julia> s = SMatrix{2,2}(1:4); julia> S = SymTridiagonal(fill(s,4), fill(s,3)) 4×4 SymTridiagonal{SMatrix{2, 2, Int64, 4}, Vector{SMatrix{2, 2, Int64, 4}}}: [1 3; 3 4] [1 3; 2 4] ⋅ ⋅ [1 2; 3 4] [1 3; 3 4] [1 3; 2 4] ⋅ ⋅ [1 2; 3 4] [1 3; 3 4] [1 3; 2 4] ⋅ ⋅ [1 2; 3 4] [1 3; 3 4] julia> S[1,1] = s 2×2 SMatrix{2, 2, Int64, 4} with indices SOneTo(2)×SOneTo(2): 1 3 2 4 julia> S[1,1] == s false julia> S[1,1] 2×2 Symmetric{Int64, SMatrix{2, 2, Int64, 4}} with indices SOneTo(2)×SOneTo(2): 1 3 3 4 ``` After this PR, ```julia julia> S[1,1] = s ERROR: ArgumentError: cannot set a diagonal entry of a SymTridiagonal to an asymmetric value ``` --- stdlib/LinearAlgebra/src/tridiag.jl | 1 + stdlib/LinearAlgebra/test/tridiag.jl | 3 +++ 2 files changed, 4 insertions(+) diff --git a/stdlib/LinearAlgebra/src/tridiag.jl b/stdlib/LinearAlgebra/src/tridiag.jl index ca61eb8519d42..f5df1a4e6a895 100644 --- a/stdlib/LinearAlgebra/src/tridiag.jl +++ b/stdlib/LinearAlgebra/src/tridiag.jl @@ -497,6 +497,7 @@ Base._reverse!(A::SymTridiagonal, dims::Colon) = (reverse!(A.dv); reverse!(A.ev) @inline function setindex!(A::SymTridiagonal, x, i::Integer, j::Integer) @boundscheck checkbounds(A, i, j) if i == j + issymmetric(x) || throw(ArgumentError("cannot set a diagonal entry of a SymTridiagonal to an asymmetric value")) @inbounds A.dv[i] = x else throw(ArgumentError(lazy"cannot set off-diagonal entry ($i, $j)")) diff --git a/stdlib/LinearAlgebra/test/tridiag.jl b/stdlib/LinearAlgebra/test/tridiag.jl index 826a6e62355d0..b6e93341b1946 100644 --- a/stdlib/LinearAlgebra/test/tridiag.jl +++ b/stdlib/LinearAlgebra/test/tridiag.jl @@ -505,6 +505,9 @@ end @test_throws ArgumentError diag(A, 2) @test_throws ArgumentError diag(A, n+1) @test_throws ArgumentError diag(A, -n-1) + A[1,1] = Symmetric(2M) + @test A[1,1] == Symmetric(2M) + @test_throws ArgumentError A[1,1] = M @test tr(A) == sum(diag(A)) @test issymmetric(tr(A)) From 9c5578315643b356825327613a27f6faaf14c494 Mon Sep 17 00:00:00 2001 From: Jishnu Bhattacharya Date: Wed, 9 Oct 2024 18:19:12 +0530 Subject: [PATCH 153/537] Remove unused matrix type params in diag methods (#56048) These parameters are not used in the method, and are unnecessary for dispatch. --- stdlib/LinearAlgebra/src/bidiag.jl | 2 +- stdlib/LinearAlgebra/src/diagonal.jl | 2 +- stdlib/LinearAlgebra/src/tridiag.jl | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/stdlib/LinearAlgebra/src/bidiag.jl b/stdlib/LinearAlgebra/src/bidiag.jl index e5482cbba5595..c600de147aa39 100644 --- a/stdlib/LinearAlgebra/src/bidiag.jl +++ b/stdlib/LinearAlgebra/src/bidiag.jl @@ -402,7 +402,7 @@ function triu!(M::Bidiagonal{T}, k::Integer=0) where T return M end -function diag(M::Bidiagonal{T}, n::Integer=0) where T +function diag(M::Bidiagonal, n::Integer=0) # every branch call similar(..., ::Int) to make sure the # same vector type is returned independent of n if n == 0 diff --git a/stdlib/LinearAlgebra/src/diagonal.jl b/stdlib/LinearAlgebra/src/diagonal.jl index 0a95bac5ffb93..dba4d9da79708 100644 --- a/stdlib/LinearAlgebra/src/diagonal.jl +++ b/stdlib/LinearAlgebra/src/diagonal.jl @@ -751,7 +751,7 @@ adjoint(D::Diagonal) = Diagonal(adjoint.(D.diag)) permutedims(D::Diagonal) = D permutedims(D::Diagonal, perm) = (Base.checkdims_perm(axes(D), axes(D), perm); D) -function diag(D::Diagonal{T}, k::Integer=0) where T +function diag(D::Diagonal, k::Integer=0) # every branch call similar(..., ::Int) to make sure the # same vector type is returned independent of k if k == 0 diff --git a/stdlib/LinearAlgebra/src/tridiag.jl b/stdlib/LinearAlgebra/src/tridiag.jl index f5df1a4e6a895..e75e1e5eefb3d 100644 --- a/stdlib/LinearAlgebra/src/tridiag.jl +++ b/stdlib/LinearAlgebra/src/tridiag.jl @@ -676,7 +676,7 @@ issymmetric(S::Tridiagonal) = all(issymmetric, S.d) && all(Iterators.map((x, y) \(A::Adjoint{<:Any,<:Tridiagonal}, B::Adjoint{<:Any,<:AbstractVecOrMat}) = copy(A) \ B -function diag(M::Tridiagonal{T}, n::Integer=0) where T +function diag(M::Tridiagonal, n::Integer=0) # every branch call similar(..., ::Int) to make sure the # same vector type is returned independent of n if n == 0 From 91da4bf9323b79e00e446d6471d3b43d4c8ee4c4 Mon Sep 17 00:00:00 2001 From: Jishnu Bhattacharya Date: Wed, 9 Oct 2024 18:21:40 +0530 Subject: [PATCH 154/537] LinearAlgebra: diagzero for non-OneTo axes (#55252) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Currently, the off-diagonal zeros for a block-`Diagonal` matrix is computed using `diagzero`, which calls `zeros` for the sizes of the elements. This returns an `Array`, unless one specializes `diagzero` for the custom `Diagonal` matrix type. This PR defines a `zeroslike` function that dispatches on the axes of the elements, which lets packages specialize on the axes to return custom `AbstractArray`s. Choosing to specialize on the `eltype` avoids the need to specialize on the container, and allows packages to return appropriate types for custom axis types. With this, ```julia julia> LinearAlgebra.zeroslike(::Type{S}, ax::Tuple{SOneTo, Vararg{SOneTo}}) where {S<:SMatrix} = SMatrix{map(length, ax)...}(ntuple(_->zero(eltype(S)), prod(length, ax))) julia> D = Diagonal(fill(SMatrix{2,3}(1:6), 2)) 2×2 Diagonal{SMatrix{2, 3, Int64, 6}, Vector{SMatrix{2, 3, Int64, 6}}}: [1 3 5; 2 4 6] ⋅ ⋅ [1 3 5; 2 4 6] julia> D[1,2] # now an SMatrix 2×3 SMatrix{2, 3, Int64, 6} with indices SOneTo(2)×SOneTo(3): 0 0 0 0 0 0 julia> LinearAlgebra.zeroslike(::Type{S}, ax::Tuple{SOneTo, Vararg{SOneTo}}) where {S<:MMatrix} = MMatrix{map(length, ax)...}(ntuple(_->zero(eltype(S)), prod(length, ax))) julia> D = Diagonal(fill(MMatrix{2,3}(1:6), 2)) 2×2 Diagonal{MMatrix{2, 3, Int64, 6}, Vector{MMatrix{2, 3, Int64, 6}}}: [1 3 5; 2 4 6] ⋅ ⋅ [1 3 5; 2 4 6] julia> D[1,2] # now an MMatrix 2×3 MMatrix{2, 3, Int64, 6} with indices SOneTo(2)×SOneTo(3): 0 0 0 0 0 0 ``` The reason this can't be the default behavior is that we are not guaranteed that there exists a `similar` method that accepts the combination of axes. This is why we have to fall back to using the sizes, unless a specialized method is provided by a package. One positive outcome of this is that indexing into such a block-diagonal matrix will now usually be type-stable, which mitigates https://github.com/JuliaLang/julia/issues/45535 to some extent (although it doesn't resolve the issue). I've also updated the `getindex` for `Bidiagonal` to use `diagzero`, instead of the similarly defined `bidiagzero` function that it was using. Structured block matrices may now use `diagzero` uniformly to generate the zero elements. --- NEWS.md | 2 ++ stdlib/LinearAlgebra/src/LinearAlgebra.jl | 1 + stdlib/LinearAlgebra/src/bidiag.jl | 15 +++++++-------- stdlib/LinearAlgebra/src/diagonal.jl | 23 +++++++++++++++++++++-- stdlib/LinearAlgebra/test/bidiag.jl | 10 ++++++++++ stdlib/LinearAlgebra/test/diagonal.jl | 7 +++++++ test/testhelpers/SizedArrays.jl | 3 +++ 7 files changed, 51 insertions(+), 10 deletions(-) diff --git a/NEWS.md b/NEWS.md index bb22c9f940a78..9aebf5d42d954 100644 --- a/NEWS.md +++ b/NEWS.md @@ -138,6 +138,8 @@ Standard library changes (callable via `cholesky[!](A, RowMaximum())`) ([#54619]). * The number of default BLAS threads now respects process affinity, instead of using total number of logical threads available on the system ([#55574]). +* A new function `zeroslike` is added that is used to generate the zero elements for matrix-valued banded matrices. + Custom array types may specialize this function to return an appropriate result. ([#55252]) #### Logging diff --git a/stdlib/LinearAlgebra/src/LinearAlgebra.jl b/stdlib/LinearAlgebra/src/LinearAlgebra.jl index 49d73127ba7ba..15354603943c2 100644 --- a/stdlib/LinearAlgebra/src/LinearAlgebra.jl +++ b/stdlib/LinearAlgebra/src/LinearAlgebra.jl @@ -175,6 +175,7 @@ public AbstractTriangular, peakflops, symmetric, symmetric_type, + zeroslike, matprod_dest const BlasFloat = Union{Float64,Float32,ComplexF64,ComplexF32} diff --git a/stdlib/LinearAlgebra/src/bidiag.jl b/stdlib/LinearAlgebra/src/bidiag.jl index c600de147aa39..381afd2f09a61 100644 --- a/stdlib/LinearAlgebra/src/bidiag.jl +++ b/stdlib/LinearAlgebra/src/bidiag.jl @@ -118,15 +118,14 @@ Bidiagonal(A::Bidiagonal) = A Bidiagonal{T}(A::Bidiagonal{T}) where {T} = A Bidiagonal{T}(A::Bidiagonal) where {T} = Bidiagonal{T}(A.dv, A.ev, A.uplo) -bidiagzero(::Bidiagonal{T}, i, j) where {T} = zero(T) -function bidiagzero(A::Bidiagonal{<:AbstractMatrix}, i, j) - Tel = eltype(eltype(A.dv)) +function diagzero(A::Bidiagonal{<:AbstractMatrix}, i, j) + Tel = eltype(A) if i < j && A.uplo == 'U' #= top right zeros =# - return zeros(Tel, size(A.ev[i], 1), size(A.ev[j-1], 2)) + return zeroslike(Tel, axes(A.ev[i], 1), axes(A.ev[j-1], 2)) elseif j < i && A.uplo == 'L' #= bottom left zeros =# - return zeros(Tel, size(A.ev[i-1], 1), size(A.ev[j], 2)) + return zeroslike(Tel, axes(A.ev[i-1], 1), axes(A.ev[j], 2)) else - return zeros(Tel, size(A.dv[i], 1), size(A.dv[j], 2)) + return zeroslike(Tel, axes(A.dv[i], 1), axes(A.dv[j], 2)) end end @@ -161,7 +160,7 @@ end elseif i == j - _offdiagind(A.uplo) return @inbounds A.ev[A.uplo == 'U' ? i : j] else - return bidiagzero(A, i, j) + return diagzero(A, i, j) end end @@ -173,7 +172,7 @@ end # we explicitly compare the possible bands as b.band may be constant-propagated return @inbounds A.ev[b.index] else - return bidiagzero(A, Tuple(_cartinds(b))...) + return diagzero(A, Tuple(_cartinds(b))...) end end diff --git a/stdlib/LinearAlgebra/src/diagonal.jl b/stdlib/LinearAlgebra/src/diagonal.jl index dba4d9da79708..0c93024f33a9a 100644 --- a/stdlib/LinearAlgebra/src/diagonal.jl +++ b/stdlib/LinearAlgebra/src/diagonal.jl @@ -185,8 +185,27 @@ end end r end -diagzero(::Diagonal{T}, i, j) where {T} = zero(T) -diagzero(D::Diagonal{<:AbstractMatrix{T}}, i, j) where {T} = zeros(T, size(D.diag[i], 1), size(D.diag[j], 2)) +""" + diagzero(A::AbstractMatrix, i, j) + +Return the appropriate zero element `A[i, j]` corresponding to a banded matrix `A`. +""" +diagzero(A::AbstractMatrix, i, j) = zero(eltype(A)) +diagzero(D::Diagonal{M}, i, j) where {M<:AbstractMatrix} = + zeroslike(M, axes(D.diag[i], 1), axes(D.diag[j], 2)) +# dispatching on the axes permits specializing on the axis types to return something other than an Array +zeroslike(M::Type, ax::Vararg{Union{AbstractUnitRange, Integer}}) = zeroslike(M, ax) +""" + zeroslike(::Type{M}, ax::Tuple{AbstractUnitRange, Vararg{AbstractUnitRange}}) where {M<:AbstractMatrix} + zeroslike(::Type{M}, sz::Tuple{Integer, Vararg{Integer}}) where {M<:AbstractMatrix} + +Return an appropriate zero-ed array similar to `M`, with either the axes `ax` or the size `sz`. +This will be used as a structural zero element of a matrix-valued banded matrix. +By default, `zeroslike` falls back to using the size along each axis to construct the array. +""" +zeroslike(M::Type, ax::Tuple{AbstractUnitRange, Vararg{AbstractUnitRange}}) = zeroslike(M, map(length, ax)) +zeroslike(M::Type, sz::Tuple{Integer, Vararg{Integer}}) = zeros(M, sz) +zeroslike(::Type{M}, sz::Tuple{Integer, Vararg{Integer}}) where {M<:AbstractMatrix} = zeros(eltype(M), sz) @inline function getindex(D::Diagonal, b::BandIndex) @boundscheck checkbounds(D, b) diff --git a/stdlib/LinearAlgebra/test/bidiag.jl b/stdlib/LinearAlgebra/test/bidiag.jl index d633a99a2390e..628e59debe8b7 100644 --- a/stdlib/LinearAlgebra/test/bidiag.jl +++ b/stdlib/LinearAlgebra/test/bidiag.jl @@ -839,6 +839,16 @@ end B = Bidiagonal(dv, ev, :U) @test B == Matrix{eltype(B)}(B) end + + @testset "non-standard axes" begin + LinearAlgebra.diagzero(T::Type, ax::Tuple{SizedArrays.SOneTo, Vararg{SizedArrays.SOneTo}}) = + zeros(T, ax) + + s = SizedArrays.SizedArray{(2,2)}([1 2; 3 4]) + B = Bidiagonal(fill(s,4), fill(s,3), :U) + @test @inferred(B[2,1]) isa typeof(s) + @test all(iszero, B[2,1]) + end end @testset "copyto!" begin diff --git a/stdlib/LinearAlgebra/test/diagonal.jl b/stdlib/LinearAlgebra/test/diagonal.jl index 98f5498c71033..85fe963e3592b 100644 --- a/stdlib/LinearAlgebra/test/diagonal.jl +++ b/stdlib/LinearAlgebra/test/diagonal.jl @@ -815,6 +815,13 @@ end D = Diagonal(fill(S,3)) @test D * fill(S,2,3)' == fill(S * S', 3, 2) @test fill(S,3,2)' * D == fill(S' * S, 2, 3) + + @testset "indexing with non-standard-axes" begin + s = SizedArrays.SizedArray{(2,2)}([1 2; 3 4]) + D = Diagonal(fill(s,3)) + @test @inferred(D[1,2]) isa typeof(s) + @test all(iszero, D[1,2]) + end end @testset "Eigensystem for block diagonal (issue #30681)" begin diff --git a/test/testhelpers/SizedArrays.jl b/test/testhelpers/SizedArrays.jl index 495fe03347ee7..e52e965a64859 100644 --- a/test/testhelpers/SizedArrays.jl +++ b/test/testhelpers/SizedArrays.jl @@ -99,4 +99,7 @@ mul!(dest::AbstractMatrix, S1::SizedMatrix, S2::SizedMatrix, α::Number, β::Num mul!(dest::AbstractVector, M::AbstractMatrix, v::SizedVector, α::Number, β::Number) = mul!(dest, M, _data(v), α, β) +LinearAlgebra.zeroslike(::Type{S}, ax::Tuple{SizedArrays.SOneTo, Vararg{SizedArrays.SOneTo}}) where {S<:SizedArray} = + zeros(eltype(S), ax) + end From 44620b62614c653a59ac932fc66782d3fefb09b7 Mon Sep 17 00:00:00 2001 From: Thomas Christensen Date: Wed, 9 Oct 2024 16:28:56 +0200 Subject: [PATCH 155/537] Multi-argument `gcdx(a, b, c...)` (#55935) Previously, `gcdx` only worked for two arguments - but the underlying idea extends to any (nonzero) number of arguments. Similarly, `gcd` already works for 1, 2, 3+ arguments. This PR implements the 1 and 3+ argument versions of `gcdx`, following the [wiki page](https://en.wikipedia.org/wiki/Extended_Euclidean_algorithm#The_case_of_more_than_two_numbers) for the Extended Euclidean algorithm. --- base/intfuncs.jl | 26 ++++++++++++++++++++++++-- test/rational.jl | 16 ++++++++++++++++ 2 files changed, 40 insertions(+), 2 deletions(-) diff --git a/base/intfuncs.jl b/base/intfuncs.jl index 0421877a16623..ec450aff2dff2 100644 --- a/base/intfuncs.jl +++ b/base/intfuncs.jl @@ -165,17 +165,24 @@ end # return (gcd(a, b), x, y) such that ax+by == gcd(a, b) """ - gcdx(a, b) + gcdx(a, b...) Computes the greatest common (positive) divisor of `a` and `b` and their Bézout coefficients, i.e. the integer coefficients `u` and `v` that satisfy -``ua+vb = d = gcd(a, b)``. ``gcdx(a, b)`` returns ``(d, u, v)``. +``u*a + v*b = d = gcd(a, b)``. ``gcdx(a, b)`` returns ``(d, u, v)``. + +For more arguments than two, i.e., `gcdx(a, b, c, ...)` the Bézout coefficients are computed +recursively, returning a solution `(d, u, v, w, ...)` to +``u*a + v*b + w*c + ... = d = gcd(a, b, c, ...)``. The arguments may be integer and rational numbers. !!! compat "Julia 1.4" Rational arguments require Julia 1.4 or later. +!!! compat "Julia 1.12" + More or fewer arguments than two require Julia 1.12 or later. + # Examples ```jldoctest julia> gcdx(12, 42) @@ -183,6 +190,9 @@ julia> gcdx(12, 42) julia> gcdx(240, 46) (2, -9, 47) + +julia> gcdx(15, 12, 20) +(1, 7, -7, -1) ``` !!! note @@ -215,6 +225,18 @@ Base.@assume_effects :terminates_locally function gcdx(a::Integer, b::Integer) end gcdx(a::Real, b::Real) = gcdx(promote(a,b)...) gcdx(a::T, b::T) where T<:Real = throw(MethodError(gcdx, (a,b))) +gcdx(a::Real) = (gcd(a), signbit(a) ? -one(a) : one(a)) +function gcdx(a::Real, b::Real, cs::Real...) + # a solution to the 3-arg `gcdx(a,b,c)` problem, `u*a + v*b + w*c = gcd(a,b,c)`, can be + # obtained from the 2-arg problem in three steps: + # 1. `gcdx(a,b)`: solve `i*a + j*b = d′ = gcd(a,b)` for `(i,j)` + # 2. `gcdx(d′,c)`: solve `x*gcd(a,b) + yc = gcd(gcd(a,b),c) = gcd(a,b,c)` for `(x,y)` + # 3. return `d = gcd(a,b,c)`, `u = i*x`, `v = j*x`, and `w = y` + # the N-arg solution proceeds similarly by recursion + d, i, j = gcdx(a, b) + d′, x, ys... = gcdx(d, cs...) + return d′, i*x, j*x, ys... +end # multiplicative inverse of n mod m, error if none diff --git a/test/rational.jl b/test/rational.jl index 20a0971068876..90b5414a6fe89 100644 --- a/test/rational.jl +++ b/test/rational.jl @@ -702,6 +702,22 @@ end end end +@testset "gcdx for 1 and 3+ arguments" begin + # one-argument + @test gcdx(7) == (7, 1) + @test gcdx(-7) == (7, -1) + @test gcdx(1//4) == (1//4, 1) + + # 3+ arguments + @test gcdx(2//3) == gcdx(2//3) == (2//3, 1) + @test gcdx(15, 12, 20) == (1, 7, -7, -1) + @test gcdx(60//4, 60//5, 60//3) == (1//1, 7, -7, -1) + abcd = (105, 1638, 2145, 3185) + d, uvwp... = gcdx(abcd...) + @test d == sum(abcd .* uvwp) # u*a + v*b + w*c + p*d == gcd(a, b, c, d) + @test (@inferred gcdx(1, 2, 3, 4, 5, 6, 7, 8, 9, 10)) isa NTuple{11, Int} +end + @testset "Binary operations with Integer" begin @test 1//2 - 1 == -1//2 @test -1//2 + 1 == 1//2 From d4ca92c2718c952d1bc2807f1096da3b02a4852a Mon Sep 17 00:00:00 2001 From: Oscar Smith Date: Wed, 9 Oct 2024 22:31:24 -0400 Subject: [PATCH 156/537] fix `_growbeg!` unncessary resizing (#56029) This was very explicitly designed such that if there was a bunch of extra space at the end of the array, we would copy rather than allocating, but by making `newmemlen` be at least `overallocation(memlen)` rather than `overallocation(len)`, this branch was never hit. found by https://github.com/JuliaLang/julia/issues/56026 --- base/array.jl | 9 +++++++-- test/arrayops.jl | 5 +++++ 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/base/array.jl b/base/array.jl index 5b3e6cc398479..d8a9c725c8102 100644 --- a/base/array.jl +++ b/base/array.jl @@ -1071,13 +1071,14 @@ function _growbeg!(a::Vector, delta::Integer) setfield!(a, :ref, @inbounds memoryref(ref, 1 - delta)) else @noinline (function() + @_terminates_locally_meta memlen = length(mem) if offset + len - 1 > memlen || offset < 1 throw(ConcurrencyViolationError("Vector has invalid state. Don't modify internal fields incorrectly, or resize without correct locks")) end # since we will allocate the array in the middle of the memory we need at least 2*delta extra space # the +1 is because I didn't want to have an off by 1 error. - newmemlen = max(overallocation(memlen), len + 2 * delta + 1) + newmemlen = max(overallocation(len), len + 2 * delta + 1) newoffset = div(newmemlen - newlen, 2) + 1 # If there is extra data after the end of the array we can use that space so long as there is enough # space at the end that there won't be quadratic behavior with a mix of growth from both ends. @@ -1086,10 +1087,14 @@ function _growbeg!(a::Vector, delta::Integer) if newoffset + newlen < memlen newoffset = div(memlen - newlen, 2) + 1 newmem = mem + unsafe_copyto!(newmem, newoffset + delta, mem, offset, len) + for j in offset:newoffset+delta-1 + @inbounds _unsetindex!(mem, j) + end else newmem = array_new_memory(mem, newmemlen) + unsafe_copyto!(newmem, newoffset + delta, mem, offset, len) end - unsafe_copyto!(newmem, newoffset + delta, mem, offset, len) if ref !== a.ref @noinline throw(ConcurrencyViolationError("Vector can not be resized concurrently")) end diff --git a/test/arrayops.jl b/test/arrayops.jl index 333b68e287c4c..ec8f54828b965 100644 --- a/test/arrayops.jl +++ b/test/arrayops.jl @@ -496,6 +496,11 @@ end @test vc == [v[1:(i-1)]; 5; v[i:end]] end @test_throws BoundsError insert!(v, 5, 5) + + # test that data is copied when there is plenty of room to do so + v = empty!(collect(1:100)) + pushfirst!(v, 1) + @test length(v.ref.mem) == 100 end @testset "popat!(::Vector, i, [default])" begin From 84a2458e0504d92b3db32bb367e449377c802593 Mon Sep 17 00:00:00 2001 From: Ian Butterworth Date: Thu, 10 Oct 2024 03:09:41 -0400 Subject: [PATCH 157/537] REPL: hide any prints to stdio during `complete_line` (#55959) --- stdlib/REPL/src/LineEdit.jl | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/stdlib/REPL/src/LineEdit.jl b/stdlib/REPL/src/LineEdit.jl index c92dca8c8e015..3ac403df54007 100644 --- a/stdlib/REPL/src/LineEdit.jl +++ b/stdlib/REPL/src/LineEdit.jl @@ -366,7 +366,14 @@ end # Prompt Completions & Hints function complete_line(s::MIState) set_action!(s, :complete_line) - if complete_line(state(s), s.key_repeats, s.active_module) + # suppress stderr/stdout prints during completion computation + # i.e. ambiguous qualification warnings that are printed to stderr + # TODO: remove this suppression once such warnings are better handled + # TODO: but before that change Pipe to devnull once devnull redirects work for JL_STDERR etc. + completions_exist = redirect_stdio(;stderr=Pipe(), stdout=Pipe()) do + complete_line(state(s), s.key_repeats, s.active_module) + end + if completions_exist return refresh_line(s) else beep(s) @@ -384,7 +391,13 @@ function check_for_hint(s::MIState) end completions, partial, should_complete = try - complete_line(st.p.complete, st, s.active_module; hint = true)::Tuple{Vector{String},String,Bool} + # suppress stderr/stdout prints during completion computation + # i.e. ambiguous qualification warnings that are printed to stderr + # TODO: remove this suppression once such warnings are better handled + # TODO: but before that change Pipe to devnull once devnull redirects work for JL_STDERR etc. + completions, partial, should_complete = redirect_stdio(;stderr=Pipe(), stdout=Pipe()) do + complete_line(st.p.complete, st, s.active_module; hint = true)::Tuple{Vector{String},String,Bool} + end catch @debug "error completing line for hint" exception=current_exceptions() return clear_hint(st) From ce83853a86ecbbc84078ea707a0395c34bdf3a13 Mon Sep 17 00:00:00 2001 From: Oscar Smith Date: Thu, 10 Oct 2024 09:36:39 -0400 Subject: [PATCH 158/537] teach llvm-alloc-helpers about `gc_loaded` (#56030) combined with https://github.com/JuliaLang/julia/pull/55913, the compiler is smart enough to fully remove ``` function f() m = Memory{Int}(undef, 3) @inbounds m[1] = 2 @inbounds m[2] = 2 @inbounds m[3] = 4 @inbounds return m[1] + m[2] + m[3] end ``` --- src/llvm-alloc-helpers.cpp | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/llvm-alloc-helpers.cpp b/src/llvm-alloc-helpers.cpp index 9d2fba832839c..75030f8565221 100644 --- a/src/llvm-alloc-helpers.cpp +++ b/src/llvm-alloc-helpers.cpp @@ -245,6 +245,11 @@ void jl_alloc::runEscapeAnalysis(llvm::CallInst *I, EscapeAnalysisRequiredArgs r required.use_info.addrescaped = true; return true; } + if (required.pass.gc_loaded_func == callee) { + required.use_info.haspreserve = true; + required.use_info.hasload = true; + return true; + } if (required.pass.typeof_func == callee) { required.use_info.hastypeof = true; assert(use->get() == I); From 6fa4af56fa888b2474618eade27aeab47ddd097f Mon Sep 17 00:00:00 2001 From: Jameson Nash Date: Thu, 10 Oct 2024 09:49:12 -0400 Subject: [PATCH 159/537] mpfr: prevent changing precision (#56049) Changing precision requires reallocating the data field, which is better done by making a new BigFloat (since they are conceptually immutable anyways). Also do a bit a cleanup while here. Closes #56044 --- base/mpfr.jl | 32 +++++++++++++++++++++----------- 1 file changed, 21 insertions(+), 11 deletions(-) diff --git a/base/mpfr.jl b/base/mpfr.jl index 9d1a0843ebe06..1e39f52b9d1a3 100644 --- a/base/mpfr.jl +++ b/base/mpfr.jl @@ -142,7 +142,10 @@ struct BigFloat <: AbstractFloat # Not recommended for general use: # used internally by, e.g. deepcopy - global _BigFloat(d::Memory{Limb}) = new(d) + global function _BigFloat(d::Memory{Limb}) + Base.unsafe_convert(Ref{BigFloat}, BigFloatData(d)) # force early initialization of pointer field of z.d + return new(d) + end function BigFloat(; precision::Integer=_precision_with_base_2(BigFloat)) precision < 1 && throw(DomainError(precision, "`precision` cannot be less than 1.")) @@ -150,14 +153,17 @@ struct BigFloat <: AbstractFloat nl = (nb + offset_p + sizeof(Limb) - 1) ÷ Core.sizeof(Limb) # align to number of Limb allocations required for this d = Memory{Limb}(undef, nl % Int) # ccall-based version, inlined below - z = _BigFloat(d) # initialize to +NAN #ccall((:mpfr_custom_init,libmpfr), Cvoid, (Ptr{Limb}, Clong), BigFloatData(d), prec) # currently seems to be a no-op in mpfr #NAN_KIND = Cint(0) #ccall((:mpfr_custom_init_set,libmpfr), Cvoid, (Ref{BigFloat}, Cint, Clong, Ptr{Limb}), z, NAN_KIND, prec, BigFloatData(d)) - z.prec = Clong(precision) - z.sign = one(Cint) - z.exp = mpfr_special_exponent_nan - return z + p = Base.unsafe_convert(Ptr{Limb}, d) + GC.@preserve d begin # initialize to +NAN + unsafe_store!(Ptr{Clong}(p) + offset_prec, Clong(precision)) + unsafe_store!(Ptr{Cint}(p) + offset_sign, one(Cint)) + unsafe_store!(Ptr{Clong}(p) + offset_exp, mpfr_special_exponent_nan) + unsafe_store!(Ptr{Ptr{Limb}}(p) + offset_d, p + offset_p) + end + return new(d) end end @@ -186,16 +192,16 @@ end end end +# While BigFloat (like all Numbers) is considered immutable, for practical reasons +# of writing the algorithms on it we allow mutating sign, exp, and the contents of d @inline function Base.setproperty!(x::BigFloat, s::Symbol, v) d = getfield(x, :d) p = Base.unsafe_convert(Ptr{Limb}, d) - if s === :prec - return GC.@preserve d unsafe_store!(Ptr{Clong}(p) + offset_prec, v) - elseif s === :sign + if s === :sign return GC.@preserve d unsafe_store!(Ptr{Cint}(p) + offset_sign, v) elseif s === :exp return GC.@preserve d unsafe_store!(Ptr{Clong}(p) + offset_exp, v) - #elseif s === :d # not mutable + #elseif s === :d || s === :prec # not mutable else return throw(FieldError(x, s)) end @@ -208,7 +214,11 @@ Base.cconvert(::Type{Ref{BigFloat}}, x::BigFloat) = x.d # BigFloatData is the Re function Base.unsafe_convert(::Type{Ref{BigFloat}}, x::BigFloatData) d = getfield(x, :d) p = Base.unsafe_convert(Ptr{Limb}, d) - GC.@preserve d unsafe_store!(Ptr{Ptr{Limb}}(p) + offset_d, p + offset_p, :monotonic) # :monotonic ensure that TSAN knows that this isn't a data race + dptrptr = Ptr{Ptr{Limb}}(p) + offset_d + dptr = p + offset_p + GC.@preserve d if unsafe_load(dptrptr, :monotonic) != dptr # make sure this pointer value was recomputed after any deserialization or copying + unsafe_store!(dptrptr, dptr, :monotonic) # :monotonic ensure that TSAN knows that this isn't a data race + end return Ptr{BigFloat}(p) end Base.unsafe_convert(::Type{Ptr{Limb}}, fd::BigFloatData) = Base.unsafe_convert(Ptr{Limb}, getfield(fd, :d)) + offset_p From 224ff57eafd0f34b00648597bfa5a28455a50b23 Mon Sep 17 00:00:00 2001 From: Jameson Nash Date: Thu, 10 Oct 2024 09:57:47 -0400 Subject: [PATCH 160/537] stackwalk: fix jl_thread_suspend_and_get_state race (#56047) There was a missing re-assignment of old = -1; at the end of that loop which means in the ABA case, we accidentally actually acquire the lock on the thread despite not actually having stopped the thread; or in the counter-case, we try to run through this logic with old==-1 on the next iteration, and that isn't valid either (jl_thread_suspend_and_get_state should return failure and the loop will abort too early). Fix #56046 --- src/stackwalk.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/stackwalk.c b/src/stackwalk.c index 6aa36fa8b499c..7fb4de0372738 100644 --- a/src/stackwalk.c +++ b/src/stackwalk.c @@ -1196,8 +1196,8 @@ JL_DLLEXPORT size_t jl_record_backtrace(jl_task_t *t, jl_bt_element_t *bt_data, } bt_context_t *context = NULL; bt_context_t c; - int16_t old = -1; - while (!jl_atomic_cmpswap(&t->tid, &old, ptls->tid) && old != ptls->tid) { + int16_t old; + for (old = -1; !jl_atomic_cmpswap(&t->tid, &old, ptls->tid) && old != ptls->tid; old = -1) { int lockret = jl_lock_stackwalk(); // if this task is already running somewhere, we need to stop the thread it is running on and query its state if (!jl_thread_suspend_and_get_state(old, 1, &c)) { From d60837f29e861fad4afe6c29dba788e44b627bf4 Mon Sep 17 00:00:00 2001 From: Neven Sajko Date: Thu, 10 Oct 2024 16:10:33 +0200 Subject: [PATCH 161/537] irrationals: restrict assume effects annotations to known types (#55886) Other changes: * replace `:total` with the less powerful `:foldable` * add an `<:Integer` dispatch constraint on the `rationalize` method, closes #55872 * replace `Rational{<:Integer}` with just `Rational`, they're equal Other issues, related to `BigFloat` precision, are still present in irrationals.jl, to be fixed by followup PRs, including #55853. Fixes #55874 --- base/irrationals.jl | 22 ++++++++++++++-------- base/mathconstants.jl | 20 ++++++++++++++++++++ 2 files changed, 34 insertions(+), 8 deletions(-) diff --git a/base/irrationals.jl b/base/irrationals.jl index b3073c503238a..c51b66045723f 100644 --- a/base/irrationals.jl +++ b/base/irrationals.jl @@ -51,8 +51,7 @@ AbstractFloat(x::AbstractIrrational) = Float64(x)::Float64 Float16(x::AbstractIrrational) = Float16(Float32(x)::Float32) Complex{T}(x::AbstractIrrational) where {T<:Real} = Complex{T}(T(x)) -# XXX this may change `DEFAULT_PRECISION`, thus not effect free -@assume_effects :total function Rational{T}(x::AbstractIrrational) where T<:Integer +function _irrational_to_rational(::Type{T}, x::AbstractIrrational) where T<:Integer o = precision(BigFloat) p = 256 while true @@ -66,13 +65,16 @@ Complex{T}(x::AbstractIrrational) where {T<:Real} = Complex{T}(T(x)) p += 32 end end -Rational{BigInt}(x::AbstractIrrational) = throw(ArgumentError("Cannot convert an AbstractIrrational to a Rational{BigInt}: use rationalize(BigInt, x) instead")) +Rational{T}(x::AbstractIrrational) where {T<:Integer} = _irrational_to_rational(T, x) +_throw_argument_error_irrational_to_rational_bigint() = throw(ArgumentError("Cannot convert an AbstractIrrational to a Rational{BigInt}: use rationalize(BigInt, x) instead")) +Rational{BigInt}(::AbstractIrrational) = _throw_argument_error_irrational_to_rational_bigint() -@assume_effects :total function (t::Type{T})(x::AbstractIrrational, r::RoundingMode) where T<:Union{Float32,Float64} +function _irrational_to_float(::Type{T}, x::AbstractIrrational, r::RoundingMode) where T<:Union{Float32,Float64} setprecision(BigFloat, 256) do T(BigFloat(x)::BigFloat, r) end end +(::Type{T})(x::AbstractIrrational, r::RoundingMode) where {T<:Union{Float32,Float64}} = _irrational_to_float(T, x, r) float(::Type{<:AbstractIrrational}) = Float64 @@ -110,14 +112,18 @@ end <=(x::AbstractFloat, y::AbstractIrrational) = x < y # Irrational vs Rational -@assume_effects :total function rationalize(::Type{T}, x::AbstractIrrational; tol::Real=0) where T +function _rationalize_irrational(::Type{T}, x::AbstractIrrational, tol::Real) where {T<:Integer} return rationalize(T, big(x), tol=tol) end -@assume_effects :total function lessrational(rx::Rational{<:Integer}, x::AbstractIrrational) - # an @assume_effects :total version of `<` for determining if the rationalization of - # an irrational number required rounding up or down +function rationalize(::Type{T}, x::AbstractIrrational; tol::Real=0) where {T<:Integer} + return _rationalize_irrational(T, x, tol) +end +function _lessrational(rx::Rational, x::AbstractIrrational) return rx < big(x) end +function lessrational(rx::Rational, x::AbstractIrrational) + return _lessrational(rx, x) +end function <(x::AbstractIrrational, y::Rational{T}) where T T <: Unsigned && x < 0.0 && return true rx = rationalize(T, x) diff --git a/base/mathconstants.jl b/base/mathconstants.jl index 4bb8c409acf00..de6b98cea634d 100644 --- a/base/mathconstants.jl +++ b/base/mathconstants.jl @@ -16,6 +16,26 @@ Base.@irrational γ euler Base.@irrational φ (1+sqrt(big(5)))/2 Base.@irrational catalan catalan +const _KnownIrrational = Union{ + typeof(π), typeof(ℯ), typeof(γ), typeof(φ), typeof(catalan) +} + +function Rational{BigInt}(::_KnownIrrational) + Base._throw_argument_error_irrational_to_rational_bigint() +end +Base.@assume_effects :foldable function Rational{T}(x::_KnownIrrational) where {T<:Integer} + Base._irrational_to_rational(T, x) +end +Base.@assume_effects :foldable function (::Type{T})(x::_KnownIrrational, r::RoundingMode) where {T<:Union{Float32,Float64}} + Base._irrational_to_float(T, x, r) +end +Base.@assume_effects :foldable function rationalize(::Type{T}, x::_KnownIrrational; tol::Real=0) where {T<:Integer} + Base._rationalize_irrational(T, x, tol) +end +Base.@assume_effects :foldable function Base.lessrational(rx::Rational, x::_KnownIrrational) + Base._lessrational(rx, x) +end + # aliases """ π From e95860c8595934af535398d160a2b461eeccffc5 Mon Sep 17 00:00:00 2001 From: Neven Sajko Date: Thu, 10 Oct 2024 16:56:22 +0200 Subject: [PATCH 162/537] update `hash` doc string: `widen` not required any more (#55867) Implementing `widen` isn't a requirement any more, since #26022. --- base/hashing.jl | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/base/hashing.jl b/base/hashing.jl index 7de9f47de3182..d4a6217de6edb 100644 --- a/base/hashing.jl +++ b/base/hashing.jl @@ -11,9 +11,7 @@ optional second argument `h` is another hash code to be mixed with the result. New types should implement the 2-argument form, typically by calling the 2-argument `hash` method recursively in order to mix hashes of the contents with each other (and with `h`). Typically, any type that implements `hash` should also implement its own [`==`](@ref) (hence -[`isequal`](@ref)) to guarantee the property mentioned above. Types supporting subtraction -(operator `-`) should also implement [`widen`](@ref), which is required to hash -values inside heterogeneous arrays. +[`isequal`](@ref)) to guarantee the property mentioned above. The hash value may change when a new Julia process is started. From a233425f7afd4529b429a1b9e7a8ebb2e402680e Mon Sep 17 00:00:00 2001 From: Jishnu Bhattacharya Date: Thu, 10 Oct 2024 21:31:35 +0530 Subject: [PATCH 163/537] Merge `diag` methods for triangular matrices (#56086) --- stdlib/LinearAlgebra/src/triangular.jl | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/stdlib/LinearAlgebra/src/triangular.jl b/stdlib/LinearAlgebra/src/triangular.jl index e1d61e4035966..3f06bd2efe29a 100644 --- a/stdlib/LinearAlgebra/src/triangular.jl +++ b/stdlib/LinearAlgebra/src/triangular.jl @@ -494,10 +494,8 @@ adjoint!(A::UnitLowerTriangular) = UnitUpperTriangular(copytri!(A.data, 'L' , tr adjoint!(A::UpperTriangular) = LowerTriangular(copytri!(A.data, 'U' , true, true)) adjoint!(A::UnitUpperTriangular) = UnitLowerTriangular(copytri!(A.data, 'U' , true, false)) -diag(A::LowerTriangular) = diag(A.data) -diag(A::UnitLowerTriangular) = fill(oneunit(eltype(A)), size(A,1)) -diag(A::UpperTriangular) = diag(A.data) -diag(A::UnitUpperTriangular) = fill(oneunit(eltype(A)), size(A,1)) +diag(A::UpperOrLowerTriangular) = diag(A.data) +diag(A::Union{UnitLowerTriangular, UnitUpperTriangular}) = fill(oneunit(eltype(A)), size(A,1)) # Unary operations -(A::LowerTriangular) = LowerTriangular(-A.data) From dc609a7328ffe42552dea8b79456d29557c08ac3 Mon Sep 17 00:00:00 2001 From: Kristoffer Carlsson Date: Thu, 10 Oct 2024 18:36:31 +0200 Subject: [PATCH 164/537] slightly improve inference in precompilation code (#56084) Avoids the ``` 11: signature Tuple{typeof(convert), Type{String}, Any} triggered MethodInstance for Base.Precompilation.ExplicitEnv(::String) (84 children) ``` shown in https://github.com/JuliaLang/julia/issues/56080#issuecomment-2404765120 Co-authored-by: KristofferC --- base/precompilation.jl | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/base/precompilation.jl b/base/precompilation.jl index b351ce67cfbad..ea98b0c415ab4 100644 --- a/base/precompilation.jl +++ b/base/precompilation.jl @@ -43,7 +43,7 @@ function ExplicitEnv(envpath::String=Base.active_project()) # Collect all direct dependencies of the project for key in ["deps", "weakdeps", "extras"] - for (name, _uuid) in get(Dict{String, Any}, project_d, key)::Dict{String, Any} + for (name, _uuid::String) in get(Dict{String, Any}, project_d, key)::Dict{String, Any} v = key == "deps" ? project_deps : key == "weakdeps" ? project_weakdeps : key == "extras" ? project_extras : @@ -107,9 +107,8 @@ function ExplicitEnv(envpath::String=Base.active_project()) sizehint!(name_to_uuid, length(manifest_d)) sizehint!(lookup_strategy, length(manifest_d)) - for (name, pkg_infos) in get_deps(manifest_d) - pkg_infos = pkg_infos::Vector{Any} - for pkg_info in pkg_infos + for (name, pkg_infos::Vector{Any}) in get_deps(manifest_d) + for pkg_info::Dict{String, Any} in pkg_infos m_uuid = UUID(pkg_info["uuid"]::String) # If we have multiple packages with the same name we will overwrite things here @@ -141,8 +140,7 @@ function ExplicitEnv(envpath::String=Base.active_project()) # Extensions deps_pkg = get(Dict{String, Any}, pkg_info, "extensions")::Dict{String, Any} - for (ext, triggers) in deps_pkg - triggers = triggers::Union{String, Vector{String}} + for (ext, triggers::Union{String, Vector{String}}) in deps_pkg if triggers isa String triggers = [triggers] end @@ -176,7 +174,7 @@ function ExplicitEnv(envpath::String=Base.active_project()) if proj_name !== nothing && proj_uuid !== nothing deps_expanded[proj_uuid] = filter!(!=(proj_uuid), collect(values(project_deps))) extensions_expanded[proj_uuid] = project_extensions - path = get(project_d, "path", nothing) + path = get(project_d, "path", nothing)::Union{String, Nothing} entry_point = path !== nothing ? path : dirname(envpath) lookup_strategy[proj_uuid] = entry_point end From a007e807623f0bbb820315b8ce3340bd3d41262b Mon Sep 17 00:00:00 2001 From: Kristoffer Carlsson Date: Thu, 10 Oct 2024 18:37:05 +0200 Subject: [PATCH 165/537] avoid defining `convert(Vector{String}, ...)` in LibGit2 (#56082) This is a weird conversion function to define. Seems cleaner to use the iteration interface for this. Also avoids some invalidations (https://github.com/JuliaLang/julia/issues/56080#issuecomment-2404765120) Co-authored-by: KristofferC --- stdlib/LibGit2/src/reference.jl | 2 +- stdlib/LibGit2/src/remote.jl | 4 ++-- stdlib/LibGit2/src/repository.jl | 2 +- stdlib/LibGit2/src/strarray.jl | 7 ++++--- stdlib/LibGit2/src/tag.jl | 2 +- stdlib/LibGit2/src/types.jl | 2 +- stdlib/LibGit2/test/libgit2-tests.jl | 2 +- 7 files changed, 11 insertions(+), 10 deletions(-) diff --git a/stdlib/LibGit2/src/reference.jl b/stdlib/LibGit2/src/reference.jl index 8a9bc5cf1a6de..de6be0dbe9543 100644 --- a/stdlib/LibGit2/src/reference.jl +++ b/stdlib/LibGit2/src/reference.jl @@ -215,7 +215,7 @@ function ref_list(repo::GitRepo) sa_ref = Ref(StrArrayStruct()) @check ccall((:git_reference_list, libgit2), Cint, (Ptr{StrArrayStruct}, Ptr{Cvoid}), sa_ref, repo) - res = convert(Vector{String}, sa_ref[]) + res = collect(sa_ref[]) free(sa_ref) res end diff --git a/stdlib/LibGit2/src/remote.jl b/stdlib/LibGit2/src/remote.jl index 5081eff56dd46..5b815f946fb17 100644 --- a/stdlib/LibGit2/src/remote.jl +++ b/stdlib/LibGit2/src/remote.jl @@ -215,7 +215,7 @@ function fetch_refspecs(rmt::GitRemote) sa_ref = Ref(StrArrayStruct()) @check ccall((:git_remote_get_fetch_refspecs, libgit2), Cint, (Ptr{StrArrayStruct}, Ptr{Cvoid}), sa_ref, rmt) - res = convert(Vector{String}, sa_ref[]) + res = collect(sa_ref[]) free(sa_ref) res end @@ -245,7 +245,7 @@ function push_refspecs(rmt::GitRemote) sa_ref = Ref(StrArrayStruct()) @check ccall((:git_remote_get_push_refspecs, libgit2), Cint, (Ptr{StrArrayStruct}, Ptr{Cvoid}), sa_ref, rmt) - res = convert(Vector{String}, sa_ref[]) + res = collect(sa_ref[]) free(sa_ref) res end diff --git a/stdlib/LibGit2/src/repository.jl b/stdlib/LibGit2/src/repository.jl index 192a6870f639b..9c8d379578b96 100644 --- a/stdlib/LibGit2/src/repository.jl +++ b/stdlib/LibGit2/src/repository.jl @@ -518,7 +518,7 @@ function remotes(repo::GitRepo) @assert repo.ptr != C_NULL @check ccall((:git_remote_list, libgit2), Cint, (Ptr{StrArrayStruct}, Ptr{Cvoid}), sa_ref, repo) - res = convert(Vector{String}, sa_ref[]) + res = collect(sa_ref[]) free(sa_ref) return res end diff --git a/stdlib/LibGit2/src/strarray.jl b/stdlib/LibGit2/src/strarray.jl index db0803680f72b..78e38a9502128 100644 --- a/stdlib/LibGit2/src/strarray.jl +++ b/stdlib/LibGit2/src/strarray.jl @@ -1,6 +1,5 @@ # This file is a part of Julia. License is MIT: https://julialang.org/license - function Base.cconvert(::Type{Ptr{StrArrayStruct}}, x::Vector) str_ref = Base.cconvert(Ref{Cstring}, x) sa_ref = Ref(StrArrayStruct(Base.unsafe_convert(Ref{Cstring}, str_ref), length(x))) @@ -10,6 +9,8 @@ function Base.unsafe_convert(::Type{Ptr{StrArrayStruct}}, rr::Tuple{Ref{StrArray Base.unsafe_convert(Ptr{StrArrayStruct}, first(rr)) end -function Base.convert(::Type{Vector{String}}, sa::StrArrayStruct) - [unsafe_string(unsafe_load(sa.strings, i)) for i = 1:sa.count] +Base.length(sa::StrArrayStruct) = sa.count +function Base.iterate(sa::StrArrayStruct, state=1) + state > sa.count && return nothing + (unsafe_string(unsafe_load(sa.strings, state)), state+1) end diff --git a/stdlib/LibGit2/src/tag.jl b/stdlib/LibGit2/src/tag.jl index bbb0c97a484ec..73f010590e9c1 100644 --- a/stdlib/LibGit2/src/tag.jl +++ b/stdlib/LibGit2/src/tag.jl @@ -10,7 +10,7 @@ function tag_list(repo::GitRepo) sa_ref = Ref(StrArrayStruct()) @check ccall((:git_tag_list, libgit2), Cint, (Ptr{StrArrayStruct}, Ptr{Cvoid}), sa_ref, repo) - res = convert(Vector{String}, sa_ref[]) + res = collect(sa_ref[]) free(sa_ref) res end diff --git a/stdlib/LibGit2/src/types.jl b/stdlib/LibGit2/src/types.jl index 7a4ad37a68ca5..9228bec175737 100644 --- a/stdlib/LibGit2/src/types.jl +++ b/stdlib/LibGit2/src/types.jl @@ -78,7 +78,7 @@ When fetching data from LibGit2, a typical usage would look like: ```julia sa_ref = Ref(StrArrayStruct()) @check ccall(..., (Ptr{StrArrayStruct},), sa_ref) -res = convert(Vector{String}, sa_ref[]) +res = collect(sa_ref[]) free(sa_ref) ``` In particular, note that `LibGit2.free` should be called afterward on the `Ref` object. diff --git a/stdlib/LibGit2/test/libgit2-tests.jl b/stdlib/LibGit2/test/libgit2-tests.jl index 72ca1019ff9e0..9ab75ed1dc39b 100644 --- a/stdlib/LibGit2/test/libgit2-tests.jl +++ b/stdlib/LibGit2/test/libgit2-tests.jl @@ -95,7 +95,7 @@ end p = ["XXX","YYY"] a = Base.cconvert(Ptr{LibGit2.StrArrayStruct}, p) b = Base.unsafe_convert(Ptr{LibGit2.StrArrayStruct}, a) - @test p == convert(Vector{String}, unsafe_load(b)) + @test p == collect(unsafe_load(b)) @noinline gcuse(a) = a gcuse(a) end From 746655287b04b4092e1bf43d82013ffd310ed35c Mon Sep 17 00:00:00 2001 From: Cody Tapscott <84105208+topolarity@users.noreply.github.com> Date: Thu, 10 Oct 2024 20:23:14 -0400 Subject: [PATCH 166/537] array: inline `convert` where possible (#56034) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This improves a common scenario, where someone wants to `push!` a poorly-typed object onto a well-typed Vector. For example: ```julia const NT = @NamedTuple{x::Int,y::Any} foo(v::Vector{NT}, x::Int, @nospecialize(y)) = push!(v, (; x, y)) ``` The `(; x, y)` is slightly poorly-typed here. It could have any type for its `.y` field before it is converted inside the `push!` to a NamedTuple with `y::Any` Without this PR, the dispatch for this `push!` cannot be inferred: ```julia julia> code_typed(foo, (Vector{NT}, Int, Any))[1] CodeInfo( 1 ─ ... │ %4 = %new(%3, x, y)::NamedTuple{(:x, :y), <:Tuple{Int64, Any}} │ %5 = Main.push!(v, %4)::Vector{@NamedTuple{x::Int64, y}} └── return %5 ) => Vector{@NamedTuple{x::Int64, y}} ``` With this PR, the above dynamic call is fully statically resolved and inlined (and therefore `--trim` compatible) --- base/array.jl | 42 +++++++++++++++++++++++++++++++++------- base/namedtuple.jl | 5 +++-- test/abstractarray.jl | 9 +++++++++ test/compiler/effects.jl | 2 +- 4 files changed, 48 insertions(+), 10 deletions(-) diff --git a/base/array.jl b/base/array.jl index d8a9c725c8102..9bd632f794aa5 100644 --- a/base/array.jl +++ b/base/array.jl @@ -324,9 +324,13 @@ copyto!(dest::Array{T}, src::Array{T}) where {T} = copyto!(dest, 1, src, 1, leng # N.B: The generic definition in multidimensional.jl covers, this, this is just here # for bootstrapping purposes. function fill!(dest::Array{T}, x) where T - xT = x isa T ? x : convert(T, x)::T + @inline + x = x isa T ? x : convert(T, x)::T + return _fill!(dest, x) +end +function _fill!(dest::Array{T}, x::T) where T for i in eachindex(dest) - @inbounds dest[i] = xT + @inbounds dest[i] = x end return dest end @@ -980,12 +984,22 @@ Dict{String, Int64} with 2 entries: function setindex! end function setindex!(A::Array{T}, x, i::Int) where {T} + @_propagate_inbounds_meta + x = x isa T ? x : convert(T, x)::T + return _setindex!(A, x, i) +end +function _setindex!(A::Array{T}, x::T, i::Int) where {T} @_noub_if_noinbounds_meta @boundscheck (i - 1)%UInt < length(A)%UInt || throw_boundserror(A, (i,)) - memoryrefset!(memoryrefnew(A.ref, i, false), x isa T ? x : convert(T,x)::T, :not_atomic, false) + memoryrefset!(memoryrefnew(A.ref, i, false), x, :not_atomic, false) return A end function setindex!(A::Array{T}, x, i1::Int, i2::Int, I::Int...) where {T} + @_propagate_inbounds_meta + x = x isa T ? x : convert(T, x)::T + return _setindex!(A, x, i1, i2, I...) +end +function _setindex!(A::Array{T}, x, i1::Int, i2::Int, I::Int...) where {T} @inline @_noub_if_noinbounds_meta @boundscheck checkbounds(A, i1, i2, I...) # generally _to_linear_index requires bounds checking @@ -1267,10 +1281,16 @@ See also [`pushfirst!`](@ref). function push! end function push!(a::Vector{T}, item) where T + @inline # convert first so we don't grow the array if the assignment won't work - itemT = item isa T ? item : convert(T, item)::T + # and also to avoid a dynamic dynamic dispatch in the common case that + # `item` is poorly-typed and `a` is well-typed + item = item isa T ? item : convert(T, item)::T + return _push!(a, item) +end +function _push!(a::Vector{T}, item::T) where T _growend!(a, 1) - @_safeindex a[length(a)] = itemT + @_safeindex a[length(a)] = item return a end @@ -1664,7 +1684,11 @@ julia> pushfirst!([1, 2, 3, 4], 5, 6) ``` """ function pushfirst!(a::Vector{T}, item) where T + @inline item = item isa T ? item : convert(T, item)::T + return _pushfirst!(a, item) +end +function _pushfirst!(a::Vector{T}, item::T) where T _growbeg!(a, 1) @_safeindex a[1] = item return a @@ -1750,12 +1774,16 @@ julia> insert!(Any[1:6;], 3, "here") ``` """ function insert!(a::Array{T,1}, i::Integer, item) where T + @_propagate_inbounds_meta + item = item isa T ? item : convert(T, item)::T + return _insert!(a, i, item) +end +function _insert!(a::Array{T,1}, i::Integer, item::T) where T @_noub_meta # Throw convert error before changing the shape of the array - _item = item isa T ? item : convert(T, item)::T _growat!(a, i, 1) # :noub, because _growat! already did bound check - @inbounds a[i] = _item + @inbounds a[i] = item return a end diff --git a/base/namedtuple.jl b/base/namedtuple.jl index e316dbd37ccf5..a7379121b2ce2 100644 --- a/base/namedtuple.jl +++ b/base/namedtuple.jl @@ -179,10 +179,11 @@ nextind(@nospecialize(t::NamedTuple), i::Integer) = Int(i)+1 convert(::Type{NT}, nt::NT) where {names, NT<:NamedTuple{names}} = nt convert(::Type{NT}, nt::NT) where {names, T<:Tuple, NT<:NamedTuple{names,T}} = nt -convert(::Type{NT}, t::Tuple) where {NT<:NamedTuple} = NT(t) +convert(::Type{NT}, t::Tuple) where {NT<:NamedTuple} = (@inline NT(t))::NT function convert(::Type{NamedTuple{names,T}}, nt::NamedTuple{names}) where {names,T<:Tuple} - NamedTuple{names,T}(T(nt))::NamedTuple{names,T} + NT = NamedTuple{names,T} + (@inline NT(nt))::NT end function convert(::Type{NT}, nt::NamedTuple{names}) where {names, NT<:NamedTuple{names}} diff --git a/test/abstractarray.jl b/test/abstractarray.jl index f655d9abe423f..b40956b433630 100644 --- a/test/abstractarray.jl +++ b/test/abstractarray.jl @@ -2,6 +2,8 @@ using Random, LinearAlgebra +include("compiler/irutils.jl") + isdefined(Main, :InfiniteArrays) || @eval Main include("testhelpers/InfiniteArrays.jl") using .Main.InfiniteArrays @@ -1403,6 +1405,8 @@ end Base.push!(tpa::TestPushArray{T}, a::T) where T = push!(tpa.data, a) Base.pushfirst!(tpa::TestPushArray{T}, a::T) where T = pushfirst!(tpa.data, a) +push_slightly_abstract_namedtuple(v::Vector{@NamedTuple{x::Int,y::Any}}, x::Int, @nospecialize(y)) = push!(v, (; x, y)) + @testset "push! and pushfirst!" begin a_orig = [1] tpa = TestPushArray{Int, 2}(a_orig) @@ -1412,6 +1416,11 @@ Base.pushfirst!(tpa::TestPushArray{T}, a::T) where T = pushfirst!(tpa.data, a) tpa = TestPushArray{Int, 2}(a_orig) pushfirst!(tpa, 6, 5, 4, 3, 2) @test tpa.data == reverse(collect(1:6)) + + let src = code_typed1(push_slightly_abstract_namedtuple, (Vector{@NamedTuple{x::Int,y::Any}},Int,Any)) + # After optimization, all `push!` and `convert` calls should have been inlined + @test all((x)->!iscall((src, push!))(x) && !iscall((src, convert))(x), src.code) + end end mutable struct SimpleArray{T} <: AbstractVector{T} diff --git a/test/compiler/effects.jl b/test/compiler/effects.jl index c8a699b294d37..cdc26cddc440d 100644 --- a/test/compiler/effects.jl +++ b/test/compiler/effects.jl @@ -1225,7 +1225,7 @@ end @test Core.Compiler.is_noub_if_noinbounds(Base.infer_effects(getindex, (Vector{Int},Int))) @test Core.Compiler.is_noub_if_noinbounds(Base.infer_effects(getindex, (Vector{Any},Int))) @test Core.Compiler.is_noub_if_noinbounds(Base.infer_effects(setindex!, (Vector{Int},Int,Int))) -@test Core.Compiler.is_noub_if_noinbounds(Base.infer_effects(setindex!, (Vector{Any},Any,Int))) +@test Core.Compiler.is_noub_if_noinbounds(Base.infer_effects(Base._setindex!, (Vector{Any},Any,Int))) @test Core.Compiler.is_noub_if_noinbounds(Base.infer_effects(isassigned, (Vector{Int},Int))) @test Core.Compiler.is_noub_if_noinbounds(Base.infer_effects(isassigned, (Vector{Any},Int))) @test Base.infer_effects((Vector{Int},Int)) do xs, i From 055e37ead67b04266229bee765ade9e361b8f791 Mon Sep 17 00:00:00 2001 From: Jishnu Bhattacharya Date: Fri, 11 Oct 2024 07:50:40 +0530 Subject: [PATCH 167/537] Remove some unnecessary `real` specializations for structured matrices (#56083) The `real(::AbstractArray{<:Rea})` fallback method should handle these cases correctly. --- stdlib/LinearAlgebra/src/hessenberg.jl | 1 - stdlib/LinearAlgebra/src/triangular.jl | 1 - 2 files changed, 2 deletions(-) diff --git a/stdlib/LinearAlgebra/src/hessenberg.jl b/stdlib/LinearAlgebra/src/hessenberg.jl index 524e57711ce3a..bfe2fdd41aace 100644 --- a/stdlib/LinearAlgebra/src/hessenberg.jl +++ b/stdlib/LinearAlgebra/src/hessenberg.jl @@ -70,7 +70,6 @@ Base.dataids(A::UpperHessenberg) = Base.dataids(parent(A)) Base.unaliascopy(A::UpperHessenberg) = UpperHessenberg(Base.unaliascopy(parent(A))) copy(H::UpperHessenberg) = UpperHessenberg(copy(H.data)) -real(H::UpperHessenberg{<:Real}) = H real(H::UpperHessenberg{<:Complex}) = UpperHessenberg(triu!(real(H.data),-1)) imag(H::UpperHessenberg) = UpperHessenberg(triu!(imag(H.data),-1)) diff --git a/stdlib/LinearAlgebra/src/triangular.jl b/stdlib/LinearAlgebra/src/triangular.jl index 3f06bd2efe29a..ee63865b65d6e 100644 --- a/stdlib/LinearAlgebra/src/triangular.jl +++ b/stdlib/LinearAlgebra/src/triangular.jl @@ -45,7 +45,6 @@ for t in (:LowerTriangular, :UnitLowerTriangular, :UpperTriangular, :UnitUpperTr copy(A::$t) = $t(copy(A.data)) Base.unaliascopy(A::$t) = $t(Base.unaliascopy(A.data)) - real(A::$t{<:Real}) = A real(A::$t{<:Complex}) = (B = real(A.data); $t(B)) real(A::$t{<:Complex, <:StridedMaybeAdjOrTransMat}) = $t(real.(A)) end From 41b1778117b26251c11de1ad2f562cc7d00e0d43 Mon Sep 17 00:00:00 2001 From: Jishnu Bhattacharya Date: Fri, 11 Oct 2024 07:51:33 +0530 Subject: [PATCH 168/537] Combine `diag` methods for `SymTridiagonal` (#56014) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Currently, there are two branches, one for an `eltype` that is a `Number`, and the other that deals with generic `eltype`s. They do similar things, so we may combine these, and use branches wherever necessary to retain the performance. We also may replace explicit materialized arrays by generators in `copyto!`. Overall, this improves performance in `diag` for matrices of matrices, whereas the performance in the common case of matrices of numbers remains unchanged. ```julia julia> using StaticArrays, LinearAlgebra julia> s = SMatrix{2,2}(1:4); julia> S = SymTridiagonal(fill(s,100), fill(s,99)); julia> @btime diag($S); 1.292 μs (5 allocations: 7.16 KiB) # nightly, v"1.12.0-DEV.1317" 685.012 ns (3 allocations: 3.19 KiB) # This PR ``` This PR also allows computing the `diag` for more values of the band index `n`: ```julia julia> diag(S,99) 1-element Vector{SMatrix{2, 2, Int64, 4}}: [0 0; 0 0] ``` This would work as long as `getindex` works for the `SymTridiagonal` for that band, and the zero element may be converted to the `eltype`. --- stdlib/LinearAlgebra/src/tridiag.jl | 41 ++++++++-------------------- stdlib/LinearAlgebra/test/tridiag.jl | 29 ++++++++++++++------ 2 files changed, 33 insertions(+), 37 deletions(-) diff --git a/stdlib/LinearAlgebra/src/tridiag.jl b/stdlib/LinearAlgebra/src/tridiag.jl index e75e1e5eefb3d..c1af12514e020 100644 --- a/stdlib/LinearAlgebra/src/tridiag.jl +++ b/stdlib/LinearAlgebra/src/tridiag.jl @@ -183,44 +183,27 @@ issymmetric(S::SymTridiagonal) = true tr(S::SymTridiagonal) = sum(symmetric, S.dv) -@noinline function throw_diag_outofboundserror(n, sz) - sz1, sz2 = sz - throw(ArgumentError(LazyString(lazy"requested diagonal, $n, must be at least $(-sz1) ", - lazy"and at most $sz2 for an $(sz1)-by-$(sz2) matrix"))) -end +_diagiter(M::SymTridiagonal{<:Number}) = M.dv +_diagiter(M::SymTridiagonal) = (symmetric(x, :U) for x in M.dv) +_eviter_transposed(M::SymTridiagonal{<:Number}) = _evview(M) +_eviter_transposed(M::SymTridiagonal) = (transpose(x) for x in _evview(M)) -function diag(M::SymTridiagonal{T}, n::Integer=0) where T<:Number - # every branch call similar(..., ::Int) to make sure the - # same vector type is returned independent of n - absn = abs(n) - if absn == 0 - return copyto!(similar(M.dv, length(M.dv)), M.dv) - elseif absn == 1 - return copyto!(similar(M.ev, length(M.dv)-1), _evview(M)) - elseif absn <= size(M,1) - v = similar(M.dv, size(M,1)-absn) - for i in eachindex(v) - v[i] = M[BandIndex(n,i)] - end - return v - else - throw_diag_outofboundserror(n, size(M)) - end -end function diag(M::SymTridiagonal, n::Integer=0) # every branch call similar(..., ::Int) to make sure the # same vector type is returned independent of n + v = similar(M.dv, max(0, length(M.dv)-abs(n))) if n == 0 - return copyto!(similar(M.dv, length(M.dv)), symmetric.(M.dv, :U)) + return copyto!(v, _diagiter(M)) elseif n == 1 - return copyto!(similar(M.ev, length(M.dv)-1), _evview(M)) + return copyto!(v, _evview(M)) elseif n == -1 - return copyto!(similar(M.ev, length(M.dv)-1), transpose.(_evview(M))) - elseif n <= size(M,1) - throw(ArgumentError("requested diagonal contains undefined zeros of an array type")) + return copyto!(v, _eviter_transposed(M)) else - throw_diag_outofboundserror(n, size(M)) + for i in eachindex(v) + v[i] = M[BandIndex(n,i)] + end end + return v end +(A::SymTridiagonal, B::SymTridiagonal) = SymTridiagonal(A.dv+B.dv, _evview(A)+_evview(B)) diff --git a/stdlib/LinearAlgebra/test/tridiag.jl b/stdlib/LinearAlgebra/test/tridiag.jl index b6e93341b1946..b1d52ab8c5679 100644 --- a/stdlib/LinearAlgebra/test/tridiag.jl +++ b/stdlib/LinearAlgebra/test/tridiag.jl @@ -287,8 +287,13 @@ end @test (@inferred diag(A, 1))::typeof(d) == (mat_type == Tridiagonal ? du : dl) @test (@inferred diag(A, -1))::typeof(d) == dl @test (@inferred diag(A, n-1))::typeof(d) == zeros(elty, 1) - @test_throws ArgumentError diag(A, -n - 1) - @test_throws ArgumentError diag(A, n + 1) + if A isa SymTridiagonal + @test isempty(@inferred diag(A, -n - 1)) + @test isempty(@inferred diag(A, n + 1)) + else + @test_throws ArgumentError diag(A, -n - 1) + @test_throws ArgumentError diag(A, n + 1) + end GA = mat_type == Tridiagonal ? mat_type(GenericArray.((dl, d, du))...) : mat_type(GenericArray.((d, dl))...) @test (@inferred diag(GA))::typeof(GenericArray(d)) == GenericArray(d) @test (@inferred diag(GA, -1))::typeof(GenericArray(d)) == GenericArray(dl) @@ -501,10 +506,11 @@ end @test @inferred diag(A, 1) == fill(M, n-1) @test @inferred diag(A, 0) == fill(Symmetric(M), n) @test @inferred diag(A, -1) == fill(transpose(M), n-1) - @test_throws ArgumentError diag(A, -2) - @test_throws ArgumentError diag(A, 2) - @test_throws ArgumentError diag(A, n+1) - @test_throws ArgumentError diag(A, -n-1) + @test_broken diag(A, -2) == fill(M, n-2) + @test_broken diag(A, 2) == fill(M, n-2) + @test isempty(@inferred diag(A, n+1)) + @test isempty(@inferred diag(A, -n-1)) + A[1,1] = Symmetric(2M) @test A[1,1] == Symmetric(2M) @test_throws ArgumentError A[1,1] = M @@ -519,8 +525,8 @@ end @test @inferred diag(A, 1) == fill(M, n-1) @test @inferred diag(A, 0) == fill(M, n) @test @inferred diag(A, -1) == fill(M, n-1) - @test_throws MethodError diag(A, -2) - @test_throws MethodError diag(A, 2) + @test_broken diag(A, -2) == fill(M, n-2) + @test_broken diag(A, 2) == fill(M, n-2) @test_throws ArgumentError diag(A, n+1) @test_throws ArgumentError diag(A, -n-1) @@ -532,6 +538,13 @@ end A = Tridiagonal(ev, dv, ev) @test A == Matrix{eltype(A)}(A) end + + M = SizedArrays.SizedArray{(2,2)}([1 2; 3 4]) + S = SymTridiagonal(fill(M,4), fill(M,3)) + @test diag(S,2) == fill(zero(M), 2) + @test diag(S,-2) == fill(zero(M), 2) + @test isempty(diag(S,4)) + @test isempty(diag(S,-4)) end @testset "Issue 12068" begin From d55f38a8d853e6ea8aa07a48c713cb026bd688e2 Mon Sep 17 00:00:00 2001 From: Simeon David Schaub Date: Fri, 11 Oct 2024 07:51:36 +0200 Subject: [PATCH 169/537] fix `Vararg{T,T} where T` crashing `code_typed` (#56081) Not sure this is the right place to fix this error, perhaps `match.spec_types` should always be a tuple of valid types? fixes #55916 --------- Co-authored-by: Jameson Nash --- base/compiler/abstractinterpretation.jl | 1 + test/compiler/inference.jl | 7 +++++++ 2 files changed, 8 insertions(+) diff --git a/base/compiler/abstractinterpretation.jl b/base/compiler/abstractinterpretation.jl index c8a25be422637..70623453e1666 100644 --- a/base/compiler/abstractinterpretation.jl +++ b/base/compiler/abstractinterpretation.jl @@ -547,6 +547,7 @@ function collect_slot_refinements(𝕃ᵢ::AbstractLattice, applicable::Vector{A sigt = Bottom for j = 1:length(applicable) match = applicable[j]::MethodMatch + valid_as_lattice(match.spec_types, true) || continue sigt = sigt ⊔ fieldtype(match.spec_types, i) end if sigt ⊏ argt # i.e. signature type is strictly more specific than the type of the argument slot diff --git a/test/compiler/inference.jl b/test/compiler/inference.jl index 7c7726413004a..e3b1ac499e986 100644 --- a/test/compiler/inference.jl +++ b/test/compiler/inference.jl @@ -6048,3 +6048,10 @@ t255751 = Array{Float32, 3} issue55882_nfields(x::Union{T,Nothing}) where T<:Number = nfields(x) @test Base.infer_return_type(issue55882_nfields) <: Int + +# issue #55916 +f55916(x) = 1 +f55916(::Vararg{T,T}) where {T} = "2" +g55916(x) = f55916(x) +# this shouldn't error +@test only(code_typed(g55916, (Any,); optimize=false))[2] == Int From 8169e012d4cbec569cf85f5c92e0343e335b569f Mon Sep 17 00:00:00 2001 From: Tim Besard Date: Fri, 11 Oct 2024 07:54:16 +0200 Subject: [PATCH 170/537] [libblastrampoline_jll] Upgrade to v5.11.1 (#56094) v5.11.1 is a patch release with a couple of RISC-V fixes. --- deps/blastrampoline.version | 6 +- deps/checksums/blastrampoline | 68 +++++++++++------------ stdlib/libblastrampoline_jll/Project.toml | 2 +- 3 files changed, 38 insertions(+), 38 deletions(-) diff --git a/deps/blastrampoline.version b/deps/blastrampoline.version index fd055e1ae8120..95771acad9ffa 100644 --- a/deps/blastrampoline.version +++ b/deps/blastrampoline.version @@ -2,6 +2,6 @@ BLASTRAMPOLINE_JLL_NAME := libblastrampoline ## source build -BLASTRAMPOLINE_VER := 5.11.0 -BLASTRAMPOLINE_BRANCH=v5.11.0 -BLASTRAMPOLINE_SHA1=05083d50611b5538df69706f0a952d8e642b0b4b +BLASTRAMPOLINE_VER := 5.11.1 +BLASTRAMPOLINE_BRANCH=v5.11.1 +BLASTRAMPOLINE_SHA1=b09277feafd342520b8476ce443d35327b5e55b4 diff --git a/deps/checksums/blastrampoline b/deps/checksums/blastrampoline index edb8cadc74846..ac028ceb6e124 100644 --- a/deps/checksums/blastrampoline +++ b/deps/checksums/blastrampoline @@ -1,34 +1,34 @@ -blastrampoline-05083d50611b5538df69706f0a952d8e642b0b4b.tar.gz/md5/700b22cb26291736bd1263cd2a7f2d75 -blastrampoline-05083d50611b5538df69706f0a952d8e642b0b4b.tar.gz/sha512/967c16d28834df112916c0904dd4c7231a1c5e4edf279adb26411faa17da28eee4680ce2347b3941520dccbc768944277a8f724b21976960d00f840349b90e36 -libblastrampoline.v5.11.0+0.aarch64-apple-darwin.tar.gz/md5/769458d40e004d6126cae6b34351068f -libblastrampoline.v5.11.0+0.aarch64-apple-darwin.tar.gz/sha512/75a726b9a4f41b70344ceb9e1f1a7ad370bfa84ce44c70b8a965061d777871e3bf2237ae055da7e6202ddef78932ba8baf2a01a675b1b0cec5338ef16ea2081b -libblastrampoline.v5.11.0+0.aarch64-linux-gnu.tar.gz/md5/d92cf3f3fa1e977ea3a1a74acc8442d1 -libblastrampoline.v5.11.0+0.aarch64-linux-gnu.tar.gz/sha512/3354f4eec2a410f81cc0546a04ce98ddd416d441c1701a59ec5bebea99af8823b5af10a85cb4e3377548422c6d6a0a870f2e7a05ad0cda52c6143361d59ba4fb -libblastrampoline.v5.11.0+0.aarch64-linux-musl.tar.gz/md5/41d060c03202b662e47bda5fbf7b1e84 -libblastrampoline.v5.11.0+0.aarch64-linux-musl.tar.gz/sha512/54a05516e12350441c33341fde53bc912aa52dc4b746089c2d21cb75f24f0fb140849a520327db6f52895743eab090b59fa974a2a426a49f8b4e38693340a306 -libblastrampoline.v5.11.0+0.armv6l-linux-gnueabihf.tar.gz/md5/4930dceefac63e7aa5a93e1ba0e00e59 -libblastrampoline.v5.11.0+0.armv6l-linux-gnueabihf.tar.gz/sha512/dafce083c2b409ead61fdbdf4f46b7c93cab00c82a74a181d381c4a93f1e7af035cd6caf407b0199c1f8c2f2f68f93d67938ef092fa4a8d1133f0ea73fb51a9c -libblastrampoline.v5.11.0+0.armv6l-linux-musleabihf.tar.gz/md5/82346cc4ddeaa29ea7a081edfdfcb08b -libblastrampoline.v5.11.0+0.armv6l-linux-musleabihf.tar.gz/sha512/72e387bd661096a46077e8c15e12f8a6f18fd6aaf30af0678d00eca0d83af10758874643f5716539dd38269e831e4649d45db739aeb60996bf1b96277cea1d17 -libblastrampoline.v5.11.0+0.armv7l-linux-gnueabihf.tar.gz/md5/7e8f115268e8c62acaa2a53ecd32e2fe -libblastrampoline.v5.11.0+0.armv7l-linux-gnueabihf.tar.gz/sha512/4210c306ff7ccb53aa6c9f45e134c63b238c563ed753f7536dfc21f6962dfea35d9de62e429e2685b70d0db780ac766b72fd5e76e2d62df74000e3e5d553c30f -libblastrampoline.v5.11.0+0.armv7l-linux-musleabihf.tar.gz/md5/7f388611c477b528a091f697b0d334d9 -libblastrampoline.v5.11.0+0.armv7l-linux-musleabihf.tar.gz/sha512/e9b017dfa8c19cb940395b253f3b28511a6619469fabff7ab1671ed0936e9e0681d1385c3d1f5d6417ccb65ffbdcf53a0c8519d4ef8e89f9500a05ca00296144 -libblastrampoline.v5.11.0+0.i686-linux-gnu.tar.gz/md5/254948ea87a435251b1e064a77b3d635 -libblastrampoline.v5.11.0+0.i686-linux-gnu.tar.gz/sha512/5a51d3c20c49c497a8f0c2d2e7b38b49ec5e367c7013a7f0efa4fc099639da20ef9c0bfdbdfbdc40b27ce61f189b18f5cf617d7a0ed4bc5300da692f7d6b77a4 -libblastrampoline.v5.11.0+0.i686-linux-musl.tar.gz/md5/a9504870af8db1e247be02c5e188f7a5 -libblastrampoline.v5.11.0+0.i686-linux-musl.tar.gz/sha512/5f0109168a16edb8ca66fcf10c2c10b57fe9c3061c0b08dac4dea936538fa5854aa1b66079f127b5d9902288b61772054013256aa307b682de38e350b1bbb367 -libblastrampoline.v5.11.0+0.i686-w64-mingw32.tar.gz/md5/815822f6bacb42c35b80bc77458c5c49 -libblastrampoline.v5.11.0+0.i686-w64-mingw32.tar.gz/sha512/c82f8c6fe0b7917860e5601c79e35d56297c53b6f7f992841d4f048e7981533e459f9db0805a16d82a9e03d452489760def0d9c57181dcfa5dc363102180eecd -libblastrampoline.v5.11.0+0.powerpc64le-linux-gnu.tar.gz/md5/ee30c9cb4c51df03026f9e471040e9cc -libblastrampoline.v5.11.0+0.powerpc64le-linux-gnu.tar.gz/sha512/5055d83a1b0625364ddd97652a4c6fa39c795078123cad33a085283889274f66c9dc053be0591c14be262dc7eef666726afa922c66ae8d05c2791c3d6bd7009e -libblastrampoline.v5.11.0+0.x86_64-apple-darwin.tar.gz/md5/210cd354c9b4a8aa2a2b55723597e58b -libblastrampoline.v5.11.0+0.x86_64-apple-darwin.tar.gz/sha512/1ee65d598f9f8a2cf7137135c8c2c431520b1cde319fc33dddfbdae9fe01d986e979a97c24cf85c090cc40064cfe47c376dfeb088ff417d17868c4df84fb2fd4 -libblastrampoline.v5.11.0+0.x86_64-linux-gnu.tar.gz/md5/e2213c42eebee6e45079ef6831077b3f -libblastrampoline.v5.11.0+0.x86_64-linux-gnu.tar.gz/sha512/ab2c3026d34962a2ca5116d71a4e8eaaca5182d53f21edd3e4be81ce26e74e427c88797308af7fbbf1b9ee615e0383acf0dae1d0eb207ebc64dddaf927f00b48 -libblastrampoline.v5.11.0+0.x86_64-linux-musl.tar.gz/md5/8cde3c618e882ea2b7c8a017a69175c7 -libblastrampoline.v5.11.0+0.x86_64-linux-musl.tar.gz/sha512/8a3aca5691c3936d114c804471b2429b9ae81308f020247765614d2f792f93a012263ce4baa31cf42f4dacc23a7161a4c7f9debfba8d9028879f1ed3fc4e2433 -libblastrampoline.v5.11.0+0.x86_64-unknown-freebsd.tar.gz/md5/b02eb694e1486ef8ffe9534ac2bd5ec6 -libblastrampoline.v5.11.0+0.x86_64-unknown-freebsd.tar.gz/sha512/989273809ae567d7e7193529740423ac1870eae3a0effeecc67f84da914d81649786f393e101f013b7232ef5fe35066d89b3cb776ad0e87394799491ef28a467 -libblastrampoline.v5.11.0+0.x86_64-w64-mingw32.tar.gz/md5/6e7f602ab0bf5a5c28bf4e959a1bbf77 -libblastrampoline.v5.11.0+0.x86_64-w64-mingw32.tar.gz/sha512/556e7ca1a2576c1d7825ac1bc2449ffe2cd40391cf316b10f60681a5c736939c97eb5221c2837640928b5544f89f44cb14ca44ccf54092376390ea1a6012c9e5 +blastrampoline-b09277feafd342520b8476ce443d35327b5e55b4.tar.gz/md5/7516eaaa5777a93cf387da1bf4b14c8a +blastrampoline-b09277feafd342520b8476ce443d35327b5e55b4.tar.gz/sha512/00fea70f713be77be10bb014e7dad957616ea59d882e2bfa75d7b8b7237dd59d735cfb944b9cac3fa34fbe7b0a78c89c25b605bdea33e2c17278f29874e20363 +libblastrampoline.v5.11.1+0.aarch64-apple-darwin.tar.gz/md5/93ee5c360913b8ed7c558a2edeb7014b +libblastrampoline.v5.11.1+0.aarch64-apple-darwin.tar.gz/sha512/3f6e78d8c966fce6eecf82931186907cc10b95ceb71d5cfc3ee958b20a11d0e24d1a399fb7fba4cf7180fa61f3d0965db6e6ca9d99dd8c4ab56d36713fd9a327 +libblastrampoline.v5.11.1+0.aarch64-linux-gnu.tar.gz/md5/aad5e3585f585d54d9ebcf822bbe32cb +libblastrampoline.v5.11.1+0.aarch64-linux-gnu.tar.gz/sha512/11ff9227e16898895ad6cbd36853093941b243a49962785a5ab8b7dc2426831a2750ab5882ee814e3a662e8b9f8aecb273d750b88a4ea5a213e20c93cb121ce1 +libblastrampoline.v5.11.1+0.aarch64-linux-musl.tar.gz/md5/462639b4b21f5b7626febfdd1ae1f824 +libblastrampoline.v5.11.1+0.aarch64-linux-musl.tar.gz/sha512/866004e3fcdb5ab7418c8a2cae8f820c5739a511b9d0b32d0013ef72ff99f87396f5912d8fbd6bf4d01d7432715c6971ad1a5419c34fa7b048d0fbbe0f8520d2 +libblastrampoline.v5.11.1+0.armv6l-linux-gnueabihf.tar.gz/md5/8a48cc8243257362dbc920dcadc42a22 +libblastrampoline.v5.11.1+0.armv6l-linux-gnueabihf.tar.gz/sha512/bb4048c0e1ebbb89fc82b7cdabb0a4d9263b5344390c934b66c3a227631661ae956287870e4b156935f0a3c322049ceed3138fc033c92561fccf3675317af5b8 +libblastrampoline.v5.11.1+0.armv6l-linux-musleabihf.tar.gz/md5/53c12d04337b63d18f4a5469a36132b6 +libblastrampoline.v5.11.1+0.armv6l-linux-musleabihf.tar.gz/sha512/fbb9e1cd3c80cf6eada43c7b3d3e6990a2b54c3f7de492ba5407d64841e705a68a5c7aa8bf4873f3204a7f8a9631a0135e2e08b57d4291b32d0f928e887c1e14 +libblastrampoline.v5.11.1+0.armv7l-linux-gnueabihf.tar.gz/md5/08963ae41481cbd4d7d9c9790b8e161e +libblastrampoline.v5.11.1+0.armv7l-linux-gnueabihf.tar.gz/sha512/428e952b3ec6904c9aa233fab1a860a30b043aa8e7508978406a0aafffee03b4e73b51dcd1eaa8550032edf51bd84e1c8356cdbd180d48791c5c0486c3a925a1 +libblastrampoline.v5.11.1+0.armv7l-linux-musleabihf.tar.gz/md5/fae4f9b44ddca8f74f8999fe3a9f0a91 +libblastrampoline.v5.11.1+0.armv7l-linux-musleabihf.tar.gz/sha512/afd37260ee0ecc0a1fe34f0e78cb1fd563e8d0cad025bc8ad733186a56c1c1faa4ffb4de593aead0b21513c9108847e08734ec14443ab8c0c36468f990bdf38e +libblastrampoline.v5.11.1+0.i686-linux-gnu.tar.gz/md5/3d664f435a559022a8309f271a8376e5 +libblastrampoline.v5.11.1+0.i686-linux-gnu.tar.gz/sha512/60a2863237f0b668237c6b68c0671ecf17d62272b047f2ad5e6b466aeb7e0e92fa1207e9c107de7c96a2b8974925f2af69324104c22fa1c51a9cc207b84e2d22 +libblastrampoline.v5.11.1+0.i686-linux-musl.tar.gz/md5/3d63e967ae8301329e9a79a0882c14f6 +libblastrampoline.v5.11.1+0.i686-linux-musl.tar.gz/sha512/9c3950bccf578b3b3b609398ab7a05c13cb86ded686c585f916c521adb533589166530c825af8095bb6d88b9ae0d14dae992a53b578af502f19811be1aecc185 +libblastrampoline.v5.11.1+0.i686-w64-mingw32.tar.gz/md5/99890890c7e600d0817775026baca09b +libblastrampoline.v5.11.1+0.i686-w64-mingw32.tar.gz/sha512/87904de1637967e1ba6a17b788c7ae3d049934553d14302c715db829f1a2aaa55c35f3c04d3ef0fce7a589e66d41fba939906a5dd5b19daf3ede343d298bc018 +libblastrampoline.v5.11.1+0.powerpc64le-linux-gnu.tar.gz/md5/bda2bbfb9af8eb655fead11a6ce142cb +libblastrampoline.v5.11.1+0.powerpc64le-linux-gnu.tar.gz/sha512/ca318ff7b362ee5f15654c669f4acf45d4530499daa2b8e64da179c2b0ba2bddb0d0b30dc08b3427a55dd2f0ee239b7c00fb93bd27572d14a863677bf22a0173 +libblastrampoline.v5.11.1+0.x86_64-apple-darwin.tar.gz/md5/dec773fbfbf218b35e942325cf9305dc +libblastrampoline.v5.11.1+0.x86_64-apple-darwin.tar.gz/sha512/c7d4828689361c9a8708b7cf1b0b1fa4f237e2a50b45f71457782b84fcc88c757e00bc91f19e9c7bc94d1c69420ec2c4ebe39c62f9fd140e72ff8a408879474c +libblastrampoline.v5.11.1+0.x86_64-linux-gnu.tar.gz/md5/88545391ae715b0f83b786f6eb7a6ee5 +libblastrampoline.v5.11.1+0.x86_64-linux-gnu.tar.gz/sha512/f041dac97783108b6b4e90a74315c3c4074c82ab926b1d3c1b90dac03dd1b7ea60dbb96b0c36b34b9e386732c8f546c7c54ea8111c650d0454cfb6015535ddf2 +libblastrampoline.v5.11.1+0.x86_64-linux-musl.tar.gz/md5/7c8353b779cfae36984a0a806f985a7b +libblastrampoline.v5.11.1+0.x86_64-linux-musl.tar.gz/sha512/5288123a4cb81befac2b2504c503303e0cf7d6eee3e9ba3195378900b0204745ed0e818f31a1d344bd552ff06a9904075b1fb742eea5f1f5de907c0df141b8ca +libblastrampoline.v5.11.1+0.x86_64-unknown-freebsd.tar.gz/md5/7bc51751c09a1772d2f8638e5d3e4655 +libblastrampoline.v5.11.1+0.x86_64-unknown-freebsd.tar.gz/sha512/5fde7423915964e4491f9fc46da9fb046fc85a434408dd4cb61521efe70d090e7b5dd2a995345318b287f03c9f21c15de2f627244332038b5dc99e28c88a29b3 +libblastrampoline.v5.11.1+0.x86_64-w64-mingw32.tar.gz/md5/6e7f602ab0bf5a5c28bf4e959a1bbf77 +libblastrampoline.v5.11.1+0.x86_64-w64-mingw32.tar.gz/sha512/556e7ca1a2576c1d7825ac1bc2449ffe2cd40391cf316b10f60681a5c736939c97eb5221c2837640928b5544f89f44cb14ca44ccf54092376390ea1a6012c9e5 diff --git a/stdlib/libblastrampoline_jll/Project.toml b/stdlib/libblastrampoline_jll/Project.toml index 1dd22b7fb8d40..eb71a4a9d532c 100644 --- a/stdlib/libblastrampoline_jll/Project.toml +++ b/stdlib/libblastrampoline_jll/Project.toml @@ -1,6 +1,6 @@ name = "libblastrampoline_jll" uuid = "8e850b90-86db-534c-a0d3-1478176c7d93" -version = "5.11.0+0" +version = "5.11.1+0" [deps] Libdl = "8f399da3-3557-5675-b5ff-fb832c97cbdb" From 9c619c311b6b1d1b5151e2d0047a853d5b017380 Mon Sep 17 00:00:00 2001 From: Ian Butterworth Date: Fri, 11 Oct 2024 01:56:25 -0400 Subject: [PATCH 171/537] Revert "REPL: hide any prints to stdio during `complete_line`" (#56102) --- stdlib/REPL/src/LineEdit.jl | 17 ++--------------- 1 file changed, 2 insertions(+), 15 deletions(-) diff --git a/stdlib/REPL/src/LineEdit.jl b/stdlib/REPL/src/LineEdit.jl index 3ac403df54007..c92dca8c8e015 100644 --- a/stdlib/REPL/src/LineEdit.jl +++ b/stdlib/REPL/src/LineEdit.jl @@ -366,14 +366,7 @@ end # Prompt Completions & Hints function complete_line(s::MIState) set_action!(s, :complete_line) - # suppress stderr/stdout prints during completion computation - # i.e. ambiguous qualification warnings that are printed to stderr - # TODO: remove this suppression once such warnings are better handled - # TODO: but before that change Pipe to devnull once devnull redirects work for JL_STDERR etc. - completions_exist = redirect_stdio(;stderr=Pipe(), stdout=Pipe()) do - complete_line(state(s), s.key_repeats, s.active_module) - end - if completions_exist + if complete_line(state(s), s.key_repeats, s.active_module) return refresh_line(s) else beep(s) @@ -391,13 +384,7 @@ function check_for_hint(s::MIState) end completions, partial, should_complete = try - # suppress stderr/stdout prints during completion computation - # i.e. ambiguous qualification warnings that are printed to stderr - # TODO: remove this suppression once such warnings are better handled - # TODO: but before that change Pipe to devnull once devnull redirects work for JL_STDERR etc. - completions, partial, should_complete = redirect_stdio(;stderr=Pipe(), stdout=Pipe()) do - complete_line(st.p.complete, st, s.active_module; hint = true)::Tuple{Vector{String},String,Bool} - end + complete_line(st.p.complete, st, s.active_module; hint = true)::Tuple{Vector{String},String,Bool} catch @debug "error completing line for hint" exception=current_exceptions() return clear_hint(st) From 1438b1578941d0f1cc8f8c958cf3bd2927fd482c Mon Sep 17 00:00:00 2001 From: Ian Butterworth Date: Fri, 11 Oct 2024 01:57:36 -0400 Subject: [PATCH 172/537] Remove warning from c when binding is ambiguous (#56103) --- doc/src/manual/modules.md | 4 ++-- src/module.c | 5 ----- 2 files changed, 2 insertions(+), 7 deletions(-) diff --git a/doc/src/manual/modules.md b/doc/src/manual/modules.md index 6b335305ac569..b4f0fd78c816a 100644 --- a/doc/src/manual/modules.md +++ b/doc/src/manual/modules.md @@ -283,14 +283,14 @@ julia> module B B ``` -The statement `using .A, .B` works, but when you try to call `f`, you get a warning +The statement `using .A, .B` works, but when you try to call `f`, you get an error with a hint ```jldoctest module_manual julia> using .A, .B julia> f -WARNING: both B and A export "f"; uses of it in module Main must be qualified ERROR: UndefVarError: `f` not defined in `Main` +Hint: It looks like two or more modules export different bindings with this name, resulting in ambiguity. Try explicitly importing it from a particular module, or qualifying the name with the module it should come from. ``` Here, Julia cannot decide which `f` you are referring to, so you have to make a choice. The following solutions are commonly used: diff --git a/src/module.c b/src/module.c index f4da7e1e994de..36c35f50b44af 100644 --- a/src/module.c +++ b/src/module.c @@ -430,11 +430,6 @@ static jl_binding_t *using_resolve_binding(jl_module_t *m JL_PROPAGATES_ROOT, jl tempb = jl_get_module_binding(m, var, 1); tempbpart = jl_get_binding_partition(tempb, jl_current_task->world_age); jl_atomic_store_release(&tempbpart->restriction, encode_restriction(NULL, BINDING_KIND_FAILED)); - jl_printf(JL_STDERR, - "WARNING: both %s and %s export \"%s\"; uses of it in module %s must be qualified\n", - jl_symbol_name(owner->name), - jl_symbol_name(imp->name), jl_symbol_name(var), - jl_symbol_name(m->name)); } return NULL; } From 9844d854408d2312d70003fdde247c7195bffa4c Mon Sep 17 00:00:00 2001 From: Kristoffer Carlsson Date: Fri, 11 Oct 2024 13:51:51 +0200 Subject: [PATCH 173/537] make `Base.ANSIIterator` have a concrete field (#56088) Avoids the invalidation ``` backedges: 1: superseding sizeof(s::AbstractString) @ Base strings/basic.jl:177 with MethodInstance for sizeof(::AbstractString) (75 children) ``` shown in https://github.com/JuliaLang/julia/issues/56080#issuecomment-2404765120. Co-authored-by: KristofferC --- base/show.jl | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/base/show.jl b/base/show.jl index ec6776d81f2d5..66560265e3b42 100644 --- a/base/show.jl +++ b/base/show.jl @@ -72,13 +72,13 @@ ncodeunits(c::ANSIDelimiter) = ncodeunits(c.del) textwidth(::ANSIDelimiter) = 0 # An iterator similar to `pairs(::String)` but whose values are Char or ANSIDelimiter -struct ANSIIterator - captures::RegexMatchIterator +struct ANSIIterator{S} + captures::RegexMatchIterator{S} end ANSIIterator(s::AbstractString) = ANSIIterator(eachmatch(ansi_regex, s)) -IteratorSize(::Type{ANSIIterator}) = SizeUnknown() -eltype(::Type{ANSIIterator}) = Pair{Int, Union{Char,ANSIDelimiter}} +IteratorSize(::Type{<:ANSIIterator}) = SizeUnknown() +eltype(::Type{<:ANSIIterator}) = Pair{Int, Union{Char,ANSIDelimiter}} function iterate(I::ANSIIterator, (i, m_st)=(1, iterate(I.captures))) m_st === nothing && return nothing m, (j, new_m_st) = m_st From f3a36d74eeb1f8c6439affcc33e2a304550dc217 Mon Sep 17 00:00:00 2001 From: N5N3 <2642243996@qq.com> Date: Fri, 11 Oct 2024 23:16:26 +0800 Subject: [PATCH 174/537] Subtype: some performance tuning. (#56007) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The main motivation of this PR is to fix #55807. dc689fe8700f70f4a4e2dbaaf270f26b87e79e04 tries to remove the slow `may_contain_union_decision` check by re-organizing the code path. Now the fast path has been removed and most of its optimization has been integrated into the preserved slow path. Since the slow path stores all inner ∃ decisions on the outer most R stack, there might be overflow risk. aee69a41441b4306ba3ee5e845bc96cb45d9b327 should fix that concern. The reported MWE now becomes ```julia 0.000002 seconds 0.000040 seconds (105 allocations: 4.828 KiB, 52.00% compilation time) 0.000023 seconds (105 allocations: 4.828 KiB, 49.36% compilation time) 0.000026 seconds (105 allocations: 4.828 KiB, 50.38% compilation time) 0.000027 seconds (105 allocations: 4.828 KiB, 54.95% compilation time) 0.000019 seconds (106 allocations: 4.922 KiB, 49.73% compilation time) 0.000024 seconds (105 allocations: 4.828 KiB, 52.24% compilation time) ``` Local bench also shows that 72855cd slightly accelerates `OmniPackage.jl`'s loading ```julia julia> @time using OmniPackage # v1.11rc4 20.525278 seconds (25.36 M allocations: 1.606 GiB, 8.48% gc time, 12.89% compilation time: 77% of which was recompilation) # v1.11rc4+aee69a4+72855cd 19.527871 seconds (24.92 M allocations: 1.593 GiB, 8.88% gc time, 15.13% compilation time: 82% of which was recompilation) ``` --- src/subtype.c | 298 +++++++++++++++++++++++++++++--------------------- 1 file changed, 173 insertions(+), 125 deletions(-) diff --git a/src/subtype.c b/src/subtype.c index 65ee4d5916bce..5edcd100ee8e0 100644 --- a/src/subtype.c +++ b/src/subtype.c @@ -39,20 +39,24 @@ extern "C" { // Union type decision points are discovered while the algorithm works. // If a new Union decision is encountered, the `more` flag is set to tell // the forall/exists loop to grow the stack. -// TODO: the stack probably needs to be artificially large because of some -// deeper problem (see #21191) and could be shrunk once that is fixed + +typedef struct jl_bits_stack_t { + uint32_t data[16]; + struct jl_bits_stack_t *next; +} jl_bits_stack_t; + typedef struct { int16_t depth; int16_t more; int16_t used; - uint32_t stack[100]; // stack of bits represented as a bit vector + jl_bits_stack_t stack; } jl_unionstate_t; typedef struct { int16_t depth; int16_t more; int16_t used; - void *stack; + uint8_t *stack; } jl_saved_unionstate_t; // Linked list storing the type variable environment. A new jl_varbinding_t @@ -131,37 +135,111 @@ static jl_varbinding_t *lookup(jl_stenv_t *e, jl_tvar_t *v) JL_GLOBALLY_ROOTED J } #endif +// union-stack tools + static int statestack_get(jl_unionstate_t *st, int i) JL_NOTSAFEPOINT { - assert(i >= 0 && i < sizeof(st->stack) * 8); + assert(i >= 0 && i <= 32767); // limited by the depth bit. // get the `i`th bit in an array of 32-bit words - return (st->stack[i>>5] & (1u<<(i&31))) != 0; + jl_bits_stack_t *stack = &st->stack; + while (i >= sizeof(stack->data) * 8) { + // We should have set this bit. + assert(stack->next); + stack = stack->next; + i -= sizeof(stack->data) * 8; + } + return (stack->data[i>>5] & (1u<<(i&31))) != 0; } static void statestack_set(jl_unionstate_t *st, int i, int val) JL_NOTSAFEPOINT { - assert(i >= 0 && i < sizeof(st->stack) * 8); + assert(i >= 0 && i <= 32767); // limited by the depth bit. + jl_bits_stack_t *stack = &st->stack; + while (i >= sizeof(stack->data) * 8) { + if (__unlikely(stack->next == NULL)) { + stack->next = (jl_bits_stack_t *)malloc(sizeof(jl_bits_stack_t)); + stack->next->next = NULL; + } + stack = stack->next; + i -= sizeof(stack->data) * 8; + } if (val) - st->stack[i>>5] |= (1u<<(i&31)); + stack->data[i>>5] |= (1u<<(i&31)); else - st->stack[i>>5] &= ~(1u<<(i&31)); + stack->data[i>>5] &= ~(1u<<(i&31)); +} + +#define has_next_union_state(e, R) ((((R) ? &(e)->Runions : &(e)->Lunions)->more) != 0) + +static int next_union_state(jl_stenv_t *e, int8_t R) JL_NOTSAFEPOINT +{ + jl_unionstate_t *state = R ? &e->Runions : &e->Lunions; + if (state->more == 0) + return 0; + // reset `used` and let `pick_union_decision` clean the stack. + state->used = state->more; + statestack_set(state, state->used - 1, 1); + return 1; } -#define push_unionstate(saved, src) \ - do { \ - (saved)->depth = (src)->depth; \ - (saved)->more = (src)->more; \ - (saved)->used = (src)->used; \ - (saved)->stack = alloca(((src)->used+7)/8); \ - memcpy((saved)->stack, &(src)->stack, ((src)->used+7)/8); \ +static int pick_union_decision(jl_stenv_t *e, int8_t R) JL_NOTSAFEPOINT +{ + jl_unionstate_t *state = R ? &e->Runions : &e->Lunions; + if (state->depth >= state->used) { + statestack_set(state, state->used, 0); + state->used++; + } + int ui = statestack_get(state, state->depth); + state->depth++; + if (ui == 0) + state->more = state->depth; // memorize that this was the deepest available choice + return ui; +} + +static jl_value_t *pick_union_element(jl_value_t *u JL_PROPAGATES_ROOT, jl_stenv_t *e, int8_t R) JL_NOTSAFEPOINT +{ + do { + if (pick_union_decision(e, R)) + u = ((jl_uniontype_t*)u)->b; + else + u = ((jl_uniontype_t*)u)->a; + } while (jl_is_uniontype(u)); + return u; +} + +#define push_unionstate(saved, src) \ + do { \ + (saved)->depth = (src)->depth; \ + (saved)->more = (src)->more; \ + (saved)->used = (src)->used; \ + jl_bits_stack_t *srcstack = &(src)->stack; \ + int pushbits = ((saved)->used+7)/8; \ + (saved)->stack = (uint8_t *)alloca(pushbits); \ + for (int n = 0; n < pushbits; n += sizeof(srcstack->data)) { \ + assert(srcstack != NULL); \ + int rest = pushbits - n; \ + if (rest > sizeof(srcstack->data)) \ + rest = sizeof(srcstack->data); \ + memcpy(&(saved)->stack[n], &srcstack->data, rest); \ + srcstack = srcstack->next; \ + } \ } while (0); -#define pop_unionstate(dst, saved) \ - do { \ - (dst)->depth = (saved)->depth; \ - (dst)->more = (saved)->more; \ - (dst)->used = (saved)->used; \ - memcpy(&(dst)->stack, (saved)->stack, ((saved)->used+7)/8); \ +#define pop_unionstate(dst, saved) \ + do { \ + (dst)->depth = (saved)->depth; \ + (dst)->more = (saved)->more; \ + (dst)->used = (saved)->used; \ + jl_bits_stack_t *dststack = &(dst)->stack; \ + int popbits = ((saved)->used+7)/8; \ + for (int n = 0; n < popbits; n += sizeof(dststack->data)) { \ + assert(dststack != NULL); \ + int rest = popbits - n; \ + if (rest > sizeof(dststack->data)) \ + rest = sizeof(dststack->data); \ + memcpy(&dststack->data, &(saved)->stack[n], rest); \ + dststack = dststack->next; \ + } \ } while (0); static int current_env_length(jl_stenv_t *e) @@ -264,6 +342,18 @@ static void free_env(jl_savedenv_t *se) JL_NOTSAFEPOINT se->buf = NULL; } +static void free_stenv(jl_stenv_t *e) JL_NOTSAFEPOINT +{ + for (int R = 0; R < 2; R++) { + jl_bits_stack_t *temp = R ? e->Runions.stack.next : e->Lunions.stack.next; + while (temp != NULL) { + jl_bits_stack_t *next = temp->next; + free(temp); + temp = next; + } + } +} + static void restore_env(jl_stenv_t *e, jl_savedenv_t *se, int root) JL_NOTSAFEPOINT { jl_value_t **roots = NULL; @@ -587,44 +677,6 @@ static jl_value_t *simple_meet(jl_value_t *a, jl_value_t *b, int overesi) static int subtype(jl_value_t *x, jl_value_t *y, jl_stenv_t *e, int param); -#define has_next_union_state(e, R) ((((R) ? &(e)->Runions : &(e)->Lunions)->more) != 0) - -static int next_union_state(jl_stenv_t *e, int8_t R) JL_NOTSAFEPOINT -{ - jl_unionstate_t *state = R ? &e->Runions : &e->Lunions; - if (state->more == 0) - return 0; - // reset `used` and let `pick_union_decision` clean the stack. - state->used = state->more; - statestack_set(state, state->used - 1, 1); - return 1; -} - -static int pick_union_decision(jl_stenv_t *e, int8_t R) JL_NOTSAFEPOINT -{ - jl_unionstate_t *state = R ? &e->Runions : &e->Lunions; - if (state->depth >= state->used) { - statestack_set(state, state->used, 0); - state->used++; - } - int ui = statestack_get(state, state->depth); - state->depth++; - if (ui == 0) - state->more = state->depth; // memorize that this was the deepest available choice - return ui; -} - -static jl_value_t *pick_union_element(jl_value_t *u JL_PROPAGATES_ROOT, jl_stenv_t *e, int8_t R) JL_NOTSAFEPOINT -{ - do { - if (pick_union_decision(e, R)) - u = ((jl_uniontype_t*)u)->b; - else - u = ((jl_uniontype_t*)u)->a; - } while (jl_is_uniontype(u)); - return u; -} - static int local_forall_exists_subtype(jl_value_t *x, jl_value_t *y, jl_stenv_t *e, int param, int limit_slow); // subtype for variable bounds consistency check. needs its own forall/exists environment. @@ -1513,37 +1565,12 @@ static int is_definite_length_tuple_type(jl_value_t *x) return k == JL_VARARG_NONE || k == JL_VARARG_INT; } -static int _forall_exists_subtype(jl_value_t *x, jl_value_t *y, jl_stenv_t *e, int param, int *count, int *noRmore); - -static int may_contain_union_decision(jl_value_t *x, jl_stenv_t *e, jl_typeenv_t *log) JL_NOTSAFEPOINT +static int is_exists_typevar(jl_value_t *x, jl_stenv_t *e) { - if (x == NULL || x == (jl_value_t*)jl_any_type || x == jl_bottom_type) - return 0; - if (jl_is_unionall(x)) - return may_contain_union_decision(((jl_unionall_t *)x)->body, e, log); - if (jl_is_datatype(x)) { - jl_datatype_t *xd = (jl_datatype_t *)x; - for (int i = 0; i < jl_nparams(xd); i++) { - jl_value_t *param = jl_tparam(xd, i); - if (jl_is_vararg(param)) - param = jl_unwrap_vararg(param); - if (may_contain_union_decision(param, e, log)) - return 1; - } - return 0; - } if (!jl_is_typevar(x)) - return jl_is_type(x); - jl_typeenv_t *t = log; - while (t != NULL) { - if (x == (jl_value_t *)t->var) - return 1; - t = t->prev; - } - jl_typeenv_t newlog = { (jl_tvar_t*)x, NULL, log }; - jl_varbinding_t *xb = lookup(e, (jl_tvar_t *)x); - return may_contain_union_decision(xb ? xb->lb : ((jl_tvar_t *)x)->lb, e, &newlog) || - may_contain_union_decision(xb ? xb->ub : ((jl_tvar_t *)x)->ub, e, &newlog); + return 0; + jl_varbinding_t *vb = lookup(e, (jl_tvar_t *)x); + return vb && vb->right; } static int has_exists_typevar(jl_value_t *x, jl_stenv_t *e) JL_NOTSAFEPOINT @@ -1574,31 +1601,9 @@ static int local_forall_exists_subtype(jl_value_t *x, jl_value_t *y, jl_stenv_t int kindy = !jl_has_free_typevars(y); if (kindx && kindy) return jl_subtype(x, y); - if (may_contain_union_decision(y, e, NULL) && pick_union_decision(e, 1) == 0) { - jl_saved_unionstate_t oldRunions; push_unionstate(&oldRunions, &e->Runions); - e->Lunions.used = e->Runions.used = 0; - e->Lunions.depth = e->Runions.depth = 0; - e->Lunions.more = e->Runions.more = 0; - int count = 0, noRmore = 0; - sub = _forall_exists_subtype(x, y, e, param, &count, &noRmore); - pop_unionstate(&e->Runions, &oldRunions); - // We could skip the slow path safely if - // 1) `_∀_∃_subtype` has tested all cases - // 2) `_∀_∃_subtype` returns 1 && `x` and `y` contain no ∃ typevar - // Once `limit_slow == 1`, also skip it if - // 1) `_∀_∃_subtype` returns 0 - // 2) the left `Union` looks big - // TODO: `limit_slow` ignores complexity from inner `local_∀_exists_subtype`. - if (limit_slow == -1) - limit_slow = kindx || kindy; - int skip = noRmore || (limit_slow && (count > 3 || !sub)) || - (sub && (kindx || !has_exists_typevar(x, e)) && - (kindy || !has_exists_typevar(y, e))); - if (skip) - e->Runions.more = oldRmore; - } - else { - // slow path + int has_exists = (!kindx && has_exists_typevar(x, e)) || + (!kindy && has_exists_typevar(y, e)); + if (has_exists && (is_exists_typevar(x, e) != is_exists_typevar(y, e))) { e->Lunions.used = 0; while (1) { e->Lunions.more = 0; @@ -1607,7 +1612,51 @@ static int local_forall_exists_subtype(jl_value_t *x, jl_value_t *y, jl_stenv_t if (!sub || !next_union_state(e, 0)) break; } + return sub; } + if (limit_slow == -1) + limit_slow = kindx || kindy; + jl_savedenv_t se; + save_env(e, &se, has_exists); + int count, limited = 0, ini_count = 0; + jl_saved_unionstate_t latestLunions = {0, 0, 0, NULL}; + while (1) { + count = ini_count; + if (ini_count == 0) + e->Lunions.used = 0; + else + pop_unionstate(&e->Lunions, &latestLunions); + while (1) { + e->Lunions.more = 0; + e->Lunions.depth = 0; + if (count < 4) count++; + sub = subtype(x, y, e, param); + if (limit_slow && count == 4) + limited = 1; + if (!sub || !next_union_state(e, 0)) + break; + if (limited || !has_exists || e->Runions.more == oldRmore) { + // re-save env and freeze the ∃decision for previous ∀Union + // Note: We could ignore the rest `∃Union` decisions if `x` and `y` + // contain no ∃ typevar, as they have no effect on env. + ini_count = count; + push_unionstate(&latestLunions, &e->Lunions); + re_save_env(e, &se, has_exists); + e->Runions.more = oldRmore; + } + } + if (sub || e->Runions.more == oldRmore) + break; + assert(e->Runions.more > oldRmore); + next_union_state(e, 1); + restore_env(e, &se, has_exists); // also restore Rdepth here + e->Runions.more = oldRmore; + } + if (!sub) + assert(e->Runions.more == oldRmore); + else if (limited || !has_exists) + e->Runions.more = oldRmore; + free_env(&se); return sub; } @@ -1677,7 +1726,7 @@ static int exists_subtype(jl_value_t *x, jl_value_t *y, jl_stenv_t *e, jl_savede } } -static int _forall_exists_subtype(jl_value_t *x, jl_value_t *y, jl_stenv_t *e, int param, int *count, int *noRmore) +static int forall_exists_subtype(jl_value_t *x, jl_value_t *y, jl_stenv_t *e, int param) { // The depth recursion has the following shape, after simplification: // ∀₁ @@ -1689,12 +1738,8 @@ static int _forall_exists_subtype(jl_value_t *x, jl_value_t *y, jl_stenv_t *e, i e->Lunions.used = 0; int sub; - if (count) *count = 0; - if (noRmore) *noRmore = 1; while (1) { sub = exists_subtype(x, y, e, &se, param); - if (count) *count = (*count < 4) ? *count + 1 : 4; - if (noRmore) *noRmore = *noRmore && e->Runions.more == 0; if (!sub || !next_union_state(e, 0)) break; re_save_env(e, &se, 1); @@ -1704,11 +1749,6 @@ static int _forall_exists_subtype(jl_value_t *x, jl_value_t *y, jl_stenv_t *e, i return sub; } -static int forall_exists_subtype(jl_value_t *x, jl_value_t *y, jl_stenv_t *e, int param) -{ - return _forall_exists_subtype(x, y, e, param, NULL, NULL); -} - static void init_stenv(jl_stenv_t *e, jl_value_t **env, int envsz) { e->vars = NULL; @@ -1728,6 +1768,8 @@ static void init_stenv(jl_stenv_t *e, jl_value_t **env, int envsz) e->Lunions.depth = 0; e->Runions.depth = 0; e->Lunions.more = 0; e->Runions.more = 0; e->Lunions.used = 0; e->Runions.used = 0; + e->Lunions.stack.next = NULL; + e->Runions.stack.next = NULL; } // subtyping entry points @@ -2157,6 +2199,7 @@ JL_DLLEXPORT int jl_subtype_env(jl_value_t *x, jl_value_t *y, jl_value_t **env, } init_stenv(&e, env, envsz); int subtype = forall_exists_subtype(x, y, &e, 0); + free_stenv(&e); assert(obvious_subtype == 3 || obvious_subtype == subtype || jl_has_free_typevars(x) || jl_has_free_typevars(y)); #ifndef NDEBUG if (obvious_subtype == 0 || (obvious_subtype == 1 && envsz == 0)) @@ -2249,6 +2292,7 @@ JL_DLLEXPORT int jl_types_equal(jl_value_t *a, jl_value_t *b) { init_stenv(&e, NULL, 0); int subtype = forall_exists_subtype(a, b, &e, 0); + free_stenv(&e); assert(subtype_ab == 3 || subtype_ab == subtype || jl_has_free_typevars(a) || jl_has_free_typevars(b)); #ifndef NDEBUG if (subtype_ab != 0 && subtype_ab != 1) // ensures that running in a debugger doesn't change the result @@ -2265,6 +2309,7 @@ JL_DLLEXPORT int jl_types_equal(jl_value_t *a, jl_value_t *b) { init_stenv(&e, NULL, 0); int subtype = forall_exists_subtype(b, a, &e, 0); + free_stenv(&e); assert(subtype_ba == 3 || subtype_ba == subtype || jl_has_free_typevars(a) || jl_has_free_typevars(b)); #ifndef NDEBUG if (subtype_ba != 0 && subtype_ba != 1) // ensures that running in a debugger doesn't change the result @@ -4230,7 +4275,9 @@ static jl_value_t *intersect_types(jl_value_t *x, jl_value_t *y, int emptiness_o init_stenv(&e, NULL, 0); e.intersection = e.ignore_free = 1; e.emptiness_only = emptiness_only; - return intersect_all(x, y, &e); + jl_value_t *ans = intersect_all(x, y, &e); + free_stenv(&e); + return ans; } JL_DLLEXPORT jl_value_t *jl_intersect_types(jl_value_t *x, jl_value_t *y) @@ -4407,6 +4454,7 @@ jl_value_t *jl_type_intersection_env_s(jl_value_t *a, jl_value_t *b, jl_svec_t * memset(env, 0, szb*sizeof(void*)); e.envsz = szb; *ans = intersect_all(a, b, &e); + free_stenv(&e); if (*ans == jl_bottom_type) goto bot; // TODO: code dealing with method signatures is not able to handle unions, so if // `a` and `b` are both tuples, we need to be careful and may not return a union, From 0d09f3d0baa8843bd41fed013dad3fd9d69ae28d Mon Sep 17 00:00:00 2001 From: Jameson Nash Date: Fri, 11 Oct 2024 13:23:52 -0400 Subject: [PATCH 175/537] rearrange jl_delete_thread to be thread-safe (#56097) Prior to this, especially on macOS, the gc-safepoint here would cause the process to segfault as we had already freed the current_task state. Rearrange this code so that the GC interactions (except for the atomic store to current_task) are all handled before entering GC safe, and then signaling the thread is deleted (via setting current_task = NULL, published by jl_unlock_profile_wr to other threads) is last. ``` ERROR: Exception handler triggered on unmanaged thread. Process 53827 stopped * thread #5, stop reason = EXC_BAD_ACCESS (code=2, address=0x100018008) frame #0: 0x0000000100b74344 libjulia-internal.1.12.0.dylib`jl_delete_thread [inlined] jl_gc_state_set(ptls=0x000000011f8b3200, state='\x02', old_state=) at julia_threads.h:272:9 [opt] 269 assert(old_state != JL_GC_CONCURRENT_COLLECTOR_THREAD); 270 jl_atomic_store_release(&ptls->gc_state, state); 271 if (state == JL_GC_STATE_UNSAFE || old_state == JL_GC_STATE_UNSAFE) -> 272 jl_gc_safepoint_(ptls); 273 return old_state; 274 } 275 STATIC_INLINE int8_t jl_gc_state_save_and_set(jl_ptls_t ptls, Target 0: (julia) stopped. (lldb) up frame #1: 0x0000000100b74320 libjulia-internal.1.12.0.dylib`jl_delete_thread [inlined] jl_gc_state_save_and_set(ptls=0x000000011f8b3200, state='\x02') at julia_threads.h:278:12 [opt] 275 STATIC_INLINE int8_t jl_gc_state_save_and_set(jl_ptls_t ptls, 276 int8_t state) 277 { -> 278 return jl_gc_state_set(ptls, state, jl_atomic_load_relaxed(&ptls->gc_state)); 279 } 280 #ifdef __clang_gcanalyzer__ 281 // these might not be a safepoint (if they are no-op safe=>safe transitions), but we have to assume it could be (statically) (lldb) frame #2: 0x0000000100b7431c libjulia-internal.1.12.0.dylib`jl_delete_thread(value=0x000000011f8b3200) at threading.c:537:11 [opt] 534 ptls->root_task = NULL; 535 jl_free_thread_gc_state(ptls); 536 // then park in safe-region -> 537 (void)jl_gc_safe_enter(ptls); 538 } ``` (test incorporated into https://github.com/JuliaLang/julia/pull/55793) --- src/threading.c | 46 +++++++++++++++++++++++++--------------------- 1 file changed, 25 insertions(+), 21 deletions(-) diff --git a/src/threading.c b/src/threading.c index 44b1192528531..c26028d2f3da2 100644 --- a/src/threading.c +++ b/src/threading.c @@ -464,6 +464,30 @@ static void jl_delete_thread(void *value) JL_NOTSAFEPOINT_ENTER // prior unsafe-region (before we let it release the stack memory) (void)jl_gc_unsafe_enter(ptls); scheduler_delete_thread(ptls); + // need to clear pgcstack and eh, but we can clear everything now too + jl_task_t *ct = jl_atomic_load_relaxed(&ptls->current_task); + jl_task_frame_noreturn(ct); + if (jl_set_task_tid(ptls->root_task, ptls->tid)) { + // the system will probably free this stack memory soon + // so prevent any other thread from accessing it later + if (ct != ptls->root_task) + jl_task_frame_noreturn(ptls->root_task); + } + else { + // Uh oh. The user cleared the sticky bit so it started running + // elsewhere, then called pthread_exit on this thread from another + // Task, which will free the stack memory of that root task soon. This + // is not recoverable. Though we could just hang here, a fatal message + // is likely better. + jl_safe_printf("fatal: thread exited from wrong Task.\n"); + abort(); + } + ptls->previous_exception = NULL; + // allow the page root_task is on to be freed + ptls->root_task = NULL; + jl_free_thread_gc_state(ptls); + // park in safe-region from here on (this may run GC again) + (void)jl_gc_safe_enter(ptls); // try to free some state we do not need anymore #ifndef _OS_WINDOWS_ void *signal_stack = ptls->signal_stack; @@ -502,21 +526,7 @@ static void jl_delete_thread(void *value) JL_NOTSAFEPOINT_ENTER #else pthread_mutex_lock(&in_signal_lock); #endif - // need to clear pgcstack and eh, but we can clear everything now too - jl_task_frame_noreturn(jl_atomic_load_relaxed(&ptls->current_task)); - if (jl_set_task_tid(ptls->root_task, ptls->tid)) { - // the system will probably free this stack memory soon - // so prevent any other thread from accessing it later - jl_task_frame_noreturn(ptls->root_task); - } - else { - // Uh oh. The user cleared the sticky bit so it started running - // elsewhere, then called pthread_exit on this thread. This is not - // recoverable. Though we could just hang here, a fatal message is better. - jl_safe_printf("fatal: thread exited from wrong Task.\n"); - abort(); - } - jl_atomic_store_relaxed(&ptls->current_task, NULL); // dead + jl_atomic_store_relaxed(&ptls->current_task, NULL); // indicate dead // finally, release all of the locks we had grabbed #ifdef _OS_WINDOWS_ jl_unlock_profile_wr(); @@ -529,12 +539,6 @@ static void jl_delete_thread(void *value) JL_NOTSAFEPOINT_ENTER #endif free(ptls->bt_data); small_arraylist_free(&ptls->locks); - ptls->previous_exception = NULL; - // allow the page root_task is on to be freed - ptls->root_task = NULL; - jl_free_thread_gc_state(ptls); - // then park in safe-region - (void)jl_gc_safe_enter(ptls); } //// debugging hack: if we are exiting too fast for error message printing on threads, From 22cde34ae3cee38c2829245e0937e8be173df6cb Mon Sep 17 00:00:00 2001 From: Tim Besard Date: Fri, 11 Oct 2024 21:05:00 +0200 Subject: [PATCH 176/537] OpenBLAS: Use dynamic architecture support on AArch64. (#56107) We already do so on Yggdrasil, so this just makes both source and binary builds behave similarly. Closes https://github.com/JuliaLang/julia/issues/56075 --- Make.inc | 4 +--- deps/openblas.mk | 1 + 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/Make.inc b/Make.inc index f078a0c84f806..53aee8a269732 100644 --- a/Make.inc +++ b/Make.inc @@ -1002,16 +1002,14 @@ endif # If we are running on ARM, set certain options automatically ifneq (,$(findstring arm,$(ARCH))) JCFLAGS += -fsigned-char -USE_BLAS64:=0 OPENBLAS_DYNAMIC_ARCH:=0 OPENBLAS_TARGET_ARCH:=ARMV7 +BINARY:=32 endif # If we are running on aarch64 (e.g. ARMv8 or ARM64), set certain options automatically ifneq (,$(findstring aarch64,$(ARCH))) -OPENBLAS_DYNAMIC_ARCH:=0 OPENBLAS_TARGET_ARCH:=ARMV8 -USE_BLAS64:=1 BINARY:=64 endif diff --git a/deps/openblas.mk b/deps/openblas.mk index affd1c7a7aa55..fbaa2e7a0fb92 100644 --- a/deps/openblas.mk +++ b/deps/openblas.mk @@ -31,6 +31,7 @@ endif endif # 64-bit BLAS interface +$(error USE_BLAS64: $(USE_BLAS64)) ifeq ($(USE_BLAS64), 1) OPENBLAS_BUILD_OPTS += INTERFACE64=1 SYMBOLSUFFIX="$(OPENBLAS_SYMBOLSUFFIX)" LIBPREFIX="libopenblas$(OPENBLAS_LIBNAMESUFFIX)" ifeq ($(OS), Darwin) From ad85277ee468389cc5b09a1d1eb357f1b558b412 Mon Sep 17 00:00:00 2001 From: Cody Tapscott <84105208+topolarity@users.noreply.github.com> Date: Fri, 11 Oct 2024 16:30:21 -0400 Subject: [PATCH 177/537] IRShow: label builtin / intrinsic / dynamic calls in `code_typed` (#56036) This makes it much easier to spot dynamic dispatches --- base/compiler/abstractinterpretation.jl | 1 + base/compiler/optimize.jl | 25 +++- base/compiler/ssair/ir.jl | 36 ++++-- base/compiler/ssair/show.jl | 126 +++++++++++++++---- base/compiler/types.jl | 2 + base/deprecated.jl | 1 + doc/src/base/reflection.md | 8 +- doc/src/devdocs/inference.md | 12 +- stdlib/InteractiveUtils/src/codeview.jl | 10 +- stdlib/InteractiveUtils/test/highlighting.jl | 5 +- test/compiler/EscapeAnalysis/EAUtils.jl | 2 +- 11 files changed, 179 insertions(+), 49 deletions(-) diff --git a/base/compiler/abstractinterpretation.jl b/base/compiler/abstractinterpretation.jl index 70623453e1666..04a62700e9de7 100644 --- a/base/compiler/abstractinterpretation.jl +++ b/base/compiler/abstractinterpretation.jl @@ -3114,6 +3114,7 @@ end abstract_eval_ssavalue(s::SSAValue, sv::InferenceState) = abstract_eval_ssavalue(s, sv.ssavaluetypes) function abstract_eval_ssavalue(s::SSAValue, ssavaluetypes::Vector{Any}) + (1 ≤ s.id ≤ length(ssavaluetypes)) || throw(InvalidIRError()) typ = ssavaluetypes[s.id] if typ === NOT_FOUND return Bottom diff --git a/base/compiler/optimize.jl b/base/compiler/optimize.jl index 02f6b46e2e73f..5f0c5077688f8 100644 --- a/base/compiler/optimize.jl +++ b/base/compiler/optimize.jl @@ -411,26 +411,35 @@ function argextype(@nospecialize(x), compact::IncrementalCompact, sptypes::Vecto isa(x, AnySSAValue) && return types(compact)[x] return argextype(x, compact, sptypes, compact.ir.argtypes) end -argextype(@nospecialize(x), src::CodeInfo, sptypes::Vector{VarState}) = argextype(x, src, sptypes, src.slottypes::Vector{Any}) +function argextype(@nospecialize(x), src::CodeInfo, sptypes::Vector{VarState}) + return argextype(x, src, sptypes, src.slottypes::Union{Vector{Any},Nothing}) +end function argextype( @nospecialize(x), src::Union{IRCode,IncrementalCompact,CodeInfo}, - sptypes::Vector{VarState}, slottypes::Vector{Any}) + sptypes::Vector{VarState}, slottypes::Union{Vector{Any},Nothing}) if isa(x, Expr) if x.head === :static_parameter - return sptypes[x.args[1]::Int].typ + idx = x.args[1]::Int + (1 ≤ idx ≤ length(sptypes)) || throw(InvalidIRError()) + return sptypes[idx].typ elseif x.head === :boundscheck return Bool elseif x.head === :copyast + length(x.args) == 0 && throw(InvalidIRError()) return argextype(x.args[1], src, sptypes, slottypes) end Core.println("argextype called on Expr with head ", x.head, " which is not valid for IR in argument-position.") @assert false elseif isa(x, SlotNumber) + slottypes === nothing && return Any + (1 ≤ x.id ≤ length(slottypes)) || throw(InvalidIRError()) return slottypes[x.id] elseif isa(x, SSAValue) return abstract_eval_ssavalue(x, src) elseif isa(x, Argument) + slottypes === nothing && return Any + (1 ≤ x.n ≤ length(slottypes)) || throw(InvalidIRError()) return slottypes[x.n] elseif isa(x, QuoteNode) return Const(x.value) @@ -444,7 +453,15 @@ function argextype( return Const(x) end end -abstract_eval_ssavalue(s::SSAValue, src::CodeInfo) = abstract_eval_ssavalue(s, src.ssavaluetypes::Vector{Any}) +function abstract_eval_ssavalue(s::SSAValue, src::CodeInfo) + ssavaluetypes = src.ssavaluetypes + if ssavaluetypes isa Int + (1 ≤ s.id ≤ ssavaluetypes) || throw(InvalidIRError()) + return Any + else + return abstract_eval_ssavalue(s, ssavaluetypes::Vector{Any}) + end +end abstract_eval_ssavalue(s::SSAValue, src::Union{IRCode,IncrementalCompact}) = types(src)[s] """ diff --git a/base/compiler/ssair/ir.jl b/base/compiler/ssair/ir.jl index fdcb4621c5c0f..90eab43a3f25b 100644 --- a/base/compiler/ssair/ir.jl +++ b/base/compiler/ssair/ir.jl @@ -313,6 +313,7 @@ Instruction(is::InstructionStream) = Instruction(is, add_new_idx!(is)) fldarray = getfield(getfield(node, :data), fld) fldidx = getfield(node, :idx) (fld === :line) && return (fldarray[3fldidx-2], fldarray[3fldidx-1], fldarray[3fldidx-0]) + (1 ≤ fldidx ≤ length(fldarray)) || throw(InvalidIRError()) return fldarray[fldidx] end @inline function setindex!(node::Instruction, @nospecialize(val), fld::Symbol) @@ -481,11 +482,16 @@ function block_for_inst(ir::IRCode, inst::Int) end function getindex(ir::IRCode, s::SSAValue) + id = s.id + (id ≥ 1) || throw(InvalidIRError()) nstmts = length(ir.stmts) - if s.id <= nstmts - return ir.stmts[s.id] + if id <= nstmts + return ir.stmts[id] else - return ir.new_nodes.stmts[s.id - nstmts] + id -= nstmts + stmts = ir.new_nodes.stmts + (id ≤ length(stmts)) || throw(InvalidIRError()) + return stmts[id] end end @@ -801,12 +807,13 @@ end types(ir::Union{IRCode, IncrementalCompact}) = TypesView(ir) function getindex(compact::IncrementalCompact, ssa::SSAValue) - @assert ssa.id < compact.result_idx + (1 ≤ ssa.id ≤ compact.result_idx) || throw(InvalidIRError()) return compact.result[ssa.id] end function getindex(compact::IncrementalCompact, ssa::OldSSAValue) id = ssa.id + (id ≥ 1) || throw(InvalidIRError()) if id < compact.idx new_idx = compact.ssa_rename[id]::Int return compact.result[new_idx] @@ -818,12 +825,15 @@ function getindex(compact::IncrementalCompact, ssa::OldSSAValue) return compact.ir.new_nodes.stmts[id] end id -= length(compact.ir.new_nodes) + (id ≤ length(compact.pending_nodes.stmts)) || throw(InvalidIRError()) return compact.pending_nodes.stmts[id] end function getindex(compact::IncrementalCompact, ssa::NewSSAValue) if ssa.id < 0 - return compact.new_new_nodes.stmts[-ssa.id] + stmts = compact.new_new_nodes.stmts + (-ssa.id ≤ length(stmts)) || throw(InvalidIRError()) + return stmts[-ssa.id] else return compact[SSAValue(ssa.id)] end @@ -1069,6 +1079,7 @@ function getindex(view::TypesView, v::OldSSAValue) id = v.id ir = view.ir.ir stmts = ir.stmts + (id ≥ 1) || throw(InvalidIRError()) if id <= length(stmts) return stmts[id][:type] end @@ -1077,7 +1088,9 @@ function getindex(view::TypesView, v::OldSSAValue) return ir.new_nodes.stmts[id][:type] end id -= length(ir.new_nodes) - return view.ir.pending_nodes.stmts[id][:type] + stmts = view.ir.pending_nodes.stmts + (id ≤ length(stmts)) || throw(InvalidIRError()) + return stmts[id][:type] end function kill_current_use!(compact::IncrementalCompact, @nospecialize(val)) @@ -1204,20 +1217,27 @@ end getindex(view::TypesView, idx::SSAValue) = getindex(view, idx.id) function getindex(view::TypesView, idx::Int) + (idx ≥ 1) || throw(InvalidIRError()) if isa(view.ir, IncrementalCompact) && idx < view.ir.result_idx return view.ir.result[idx][:type] elseif isa(view.ir, IncrementalCompact) && view.ir.renamed_new_nodes if idx <= length(view.ir.result) return view.ir.result[idx][:type] else - return view.ir.new_new_nodes.stmts[idx - length(view.ir.result)][:type] + idx -= length(view.ir.result) + stmts = view.ir.new_new_nodes.stmts + (idx ≤ length(stmts)) || throw(InvalidIRError()) + return stmts[idx][:type] end else ir = isa(view.ir, IncrementalCompact) ? view.ir.ir : view.ir if idx <= length(ir.stmts) return ir.stmts[idx][:type] else - return ir.new_nodes.stmts[idx - length(ir.stmts)][:type] + idx -= length(ir.stmts) + stmts = ir.new_nodes.stmts + (idx ≤ length(stmts)) || throw(InvalidIRError()) + return stmts[idx][:type] end end end diff --git a/base/compiler/ssair/show.jl b/base/compiler/ssair/show.jl index 7d936a1688aba..f3e11445d6c6c 100644 --- a/base/compiler/ssair/show.jl +++ b/base/compiler/ssair/show.jl @@ -14,6 +14,8 @@ end import Base: show_unquoted using Base: printstyled, with_output_color, prec_decl, @invoke +using Core.Compiler: VarState, InvalidIRError, argextype, widenconst, singleton_type, + sptypes_from_meth_instance, EMPTY_SPTYPES function Base.show(io::IO, cfg::CFG) print(io, "CFG with $(length(cfg.blocks)) blocks:") @@ -31,7 +33,50 @@ function Base.show(io::IO, cfg::CFG) end end -function print_stmt(io::IO, idx::Int, @nospecialize(stmt), used::BitSet, maxlength_idx::Int, color::Bool, show_type::Bool) +function maybe_argextype( + @nospecialize(x), + src::Union{IRCode,IncrementalCompact,CodeInfo}, + sptypes::Vector{VarState}, +) + return try + argextype(x, src, sptypes) + catch err + !(err isa InvalidIRError) && rethrow() + nothing + end +end + +const inlined_apply_iterate_types = Union{Array,Memory,Tuple,NamedTuple,Core.SimpleVector} + +function builtin_call_has_dispatch( + @nospecialize(f), + args::Vector{Any}, + src::Union{IRCode,IncrementalCompact,CodeInfo}, + sptypes::Vector{VarState}, +) + if f === Core._apply_iterate && length(args) >= 3 + # The implementation of _apply_iterate has hand-inlined implementations + # for (v::Union{Tuple,NamedTuple,Memory,Array,SimpleVector}...) + # which perform no dynamic dispatch + constructort = maybe_argextype(args[3], src, sptypes) + if constructort === nothing || !(widenconst(constructort) <: Core.Builtin) + return true + end + for arg in args[4:end] + argt = maybe_argextype(arg, src, sptypes) + if argt === nothing || !(widenconst(argt) <: inlined_apply_iterate_types) + return true + end + end + elseif (f === Core._apply_pure || f === Core._call_in_world || f === Core._call_in_world_total || f === Core._call_latest) + # These apply-like builtins are effectively dynamic calls + return true + end + return false +end + +function print_stmt(io::IO, idx::Int, @nospecialize(stmt), code::Union{IRCode,CodeInfo,IncrementalCompact}, + sptypes::Vector{VarState}, used::BitSet, maxlength_idx::Int, color::Bool, show_type::Bool, label_dynamic_calls::Bool) if idx in used idx_s = string(idx) pad = " "^(maxlength_idx - length(idx_s) + 1) @@ -51,7 +96,7 @@ function print_stmt(io::IO, idx::Int, @nospecialize(stmt), used::BitSet, maxleng elseif isexpr(stmt, :invoke) && length(stmt.args) >= 2 && isa(stmt.args[1], MethodInstance) stmt = stmt::Expr # TODO: why is this here, and not in Base.show_unquoted - print(io, "invoke ") + printstyled(io, " invoke "; color = :light_black) mi = stmt.args[1]::Core.MethodInstance show_unquoted(io, stmt.args[2], indent) print(io, "(") @@ -66,6 +111,28 @@ function print_stmt(io::IO, idx::Int, @nospecialize(stmt), used::BitSet, maxleng end join(io, (print_arg(i) for i = 3:length(stmt.args)), ", ") print(io, ")") + elseif isexpr(stmt, :call) && length(stmt.args) >= 1 && label_dynamic_calls + ft = maybe_argextype(stmt.args[1], code, sptypes) + f = singleton_type(ft) + if isa(f, Core.IntrinsicFunction) + printstyled(io, "intrinsic "; color = :light_black) + elseif isa(f, Core.Builtin) + if builtin_call_has_dispatch(f, stmt.args, code, sptypes) + printstyled(io, "dynamic builtin "; color = :yellow) + else + printstyled(io, " builtin "; color = :light_black) + end + elseif ft === nothing + # This should only happen when, e.g., printing a call that targets + # an out-of-bounds SSAValue or similar + # (i.e. under normal circumstances, dead code) + printstyled(io, " unknown "; color = :light_black) + elseif widenconst(ft) <: Core.Builtin + printstyled(io, "dynamic builtin "; color = :yellow) + else + printstyled(io, " dynamic "; color = :yellow) + end + show_unquoted(io, stmt, indent, show_type ? prec_decl : 0) # given control flow information, we prefer to print these with the basic block #, instead of the ssa % elseif isa(stmt, EnterNode) print(io, "enter #", stmt.catch_dest, "") @@ -563,16 +630,28 @@ end - `should_print_stmt(idx::Int) -> Bool`: whether the statement at index `idx` should be printed as part of the IR or not - `bb_color`: color used for printing the basic block brackets on the left +- `label_dynamic_calls`: whether to label calls as dynamic / builtin / intrinsic """ struct IRShowConfig line_info_preprinter line_info_postprinter should_print_stmt bb_color::Symbol - function IRShowConfig(line_info_preprinter, line_info_postprinter=default_expr_type_printer; - should_print_stmt=Returns(true), bb_color::Symbol=:light_black) - return new(line_info_preprinter, line_info_postprinter, should_print_stmt, bb_color) - end + label_dynamic_calls::Bool + + IRShowConfig( + line_info_preprinter, + line_info_postprinter=default_expr_type_printer; + should_print_stmt=Returns(true), + bb_color::Symbol=:light_black, + label_dynamic_calls=true + ) = new( + line_info_preprinter, + line_info_postprinter, + should_print_stmt, + bb_color, + label_dynamic_calls + ) end struct _UNDEF @@ -628,13 +707,14 @@ end # at index `idx`. This function is repeatedly called until it returns `nothing`. # to iterate nodes that are to be inserted after the statement, set `attach_after=true`. function show_ir_stmt(io::IO, code::Union{IRCode, CodeInfo, IncrementalCompact}, idx::Int, config::IRShowConfig, - used::BitSet, cfg::CFG, bb_idx::Int; pop_new_node! = Returns(nothing), only_after::Bool=false) + sptypes::Vector{VarState}, used::BitSet, cfg::CFG, bb_idx::Int; pop_new_node! = Returns(nothing), only_after::Bool=false) return show_ir_stmt(io, code, idx, config.line_info_preprinter, config.line_info_postprinter, - used, cfg, bb_idx; pop_new_node!, only_after, config.bb_color) + sptypes, used, cfg, bb_idx; pop_new_node!, only_after, config.bb_color, config.label_dynamic_calls) end function show_ir_stmt(io::IO, code::Union{IRCode, CodeInfo, IncrementalCompact}, idx::Int, line_info_preprinter, line_info_postprinter, - used::BitSet, cfg::CFG, bb_idx::Int; pop_new_node! = Returns(nothing), only_after::Bool=false, bb_color=:light_black) + sptypes::Vector{VarState}, used::BitSet, cfg::CFG, bb_idx::Int; pop_new_node! = Returns(nothing), only_after::Bool=false, + bb_color=:light_black, label_dynamic_calls::Bool=true) stmt = _stmt(code, idx) type = _type(code, idx) max_bb_idx_size = length(string(length(cfg.blocks))) @@ -693,7 +773,7 @@ function show_ir_stmt(io::IO, code::Union{IRCode, CodeInfo, IncrementalCompact}, show_type = should_print_ssa_type(new_node_inst) let maxlength_idx=maxlength_idx, show_type=show_type with_output_color(:green, io) do io′ - print_stmt(io′, node_idx, new_node_inst, used, maxlength_idx, false, show_type) + print_stmt(io′, node_idx, new_node_inst, code, sptypes, used, maxlength_idx, false, show_type, label_dynamic_calls) end end @@ -722,7 +802,7 @@ function show_ir_stmt(io::IO, code::Union{IRCode, CodeInfo, IncrementalCompact}, stmt = statement_indices_to_labels(stmt, cfg) end show_type = type !== nothing && should_print_ssa_type(stmt) - print_stmt(io, idx, stmt, used, maxlength_idx, true, show_type) + print_stmt(io, idx, stmt, code, sptypes, used, maxlength_idx, true, show_type, label_dynamic_calls) if type !== nothing # ignore types for pre-inference code if type === UNDEF # This is an error, but can happen if passes don't update their type information @@ -881,10 +961,10 @@ end default_config(code::CodeInfo) = IRShowConfig(statementidx_lineinfo_printer(code)) function show_ir_stmts(io::IO, ir::Union{IRCode, CodeInfo, IncrementalCompact}, inds, config::IRShowConfig, - used::BitSet, cfg::CFG, bb_idx::Int; pop_new_node! = Returns(nothing)) + sptypes::Vector{VarState}, used::BitSet, cfg::CFG, bb_idx::Int; pop_new_node! = Returns(nothing)) for idx in inds if config.should_print_stmt(ir, idx, used) - bb_idx = show_ir_stmt(io, ir, idx, config, used, cfg, bb_idx; pop_new_node!) + bb_idx = show_ir_stmt(io, ir, idx, config, sptypes, used, cfg, bb_idx; pop_new_node!) elseif bb_idx <= length(cfg.blocks) && idx == cfg.blocks[bb_idx].stmts.stop bb_idx += 1 end @@ -904,7 +984,7 @@ function show_ir(io::IO, ir::IRCode, config::IRShowConfig=default_config(ir); cfg = ir.cfg maxssaid = length(ir.stmts) + Core.Compiler.length(ir.new_nodes) let io = IOContext(io, :maxssaid=>maxssaid) - show_ir_stmts(io, ir, 1:length(ir.stmts), config, used, cfg, 1; pop_new_node!) + show_ir_stmts(io, ir, 1:length(ir.stmts), config, ir.sptypes, used, cfg, 1; pop_new_node!) end finish_show_ir(io, cfg, config) end @@ -913,8 +993,12 @@ function show_ir(io::IO, ci::CodeInfo, config::IRShowConfig=default_config(ci); pop_new_node! = Returns(nothing)) used = stmts_used(io, ci) cfg = compute_basic_blocks(ci.code) + parent = ci.parent + sptypes = if parent isa MethodInstance + sptypes_from_meth_instance(parent) + else EMPTY_SPTYPES end let io = IOContext(io, :maxssaid=>length(ci.code)) - show_ir_stmts(io, ci, 1:length(ci.code), config, used, cfg, 1; pop_new_node!) + show_ir_stmts(io, ci, 1:length(ci.code), config, sptypes, used, cfg, 1; pop_new_node!) end finish_show_ir(io, cfg, config) end @@ -963,8 +1047,8 @@ function show_ir(io::IO, compact::IncrementalCompact, config::IRShowConfig=defau pop_new_node! = new_nodes_iter(compact) maxssaid = length(compact.result) + Core.Compiler.length(compact.new_new_nodes) bb_idx = let io = IOContext(io, :maxssaid=>maxssaid) - show_ir_stmts(io, compact, 1:compact.result_idx-1, config, used_compacted, - compact_cfg, 1; pop_new_node!) + show_ir_stmts(io, compact, 1:compact.result_idx-1, config, compact.ir.sptypes, + used_compacted, compact_cfg, 1; pop_new_node!) end @@ -995,13 +1079,13 @@ function show_ir(io::IO, compact::IncrementalCompact, config::IRShowConfig=defau let io = IOContext(io, :maxssaid=>maxssaid) # first show any new nodes to be attached after the last compacted statement if compact.idx > 1 - show_ir_stmt(io, compact.ir, compact.idx-1, config, used_uncompacted, - uncompacted_cfg, bb_idx; pop_new_node!, only_after=true) + show_ir_stmt(io, compact.ir, compact.idx-1, config, compact.ir.sptypes, + used_uncompacted, uncompacted_cfg, bb_idx; pop_new_node!, only_after=true) end # then show the actual uncompacted IR - show_ir_stmts(io, compact.ir, compact.idx:length(stmts), config, used_uncompacted, - uncompacted_cfg, bb_idx; pop_new_node!) + show_ir_stmts(io, compact.ir, compact.idx:length(stmts), config, compact.ir.sptypes, + used_uncompacted, uncompacted_cfg, bb_idx; pop_new_node!) end finish_show_ir(io, uncompacted_cfg, config) diff --git a/base/compiler/types.jl b/base/compiler/types.jl index ecf2417fd6199..210adf7be96b2 100644 --- a/base/compiler/types.jl +++ b/base/compiler/types.jl @@ -28,6 +28,8 @@ the following methods to satisfy the `AbstractInterpreter` API requirement: abstract type AbstractLattice end +struct InvalidIRError <: Exception end + struct ArgInfo fargs::Union{Nothing,Vector{Any}} argtypes::Vector{Any} diff --git a/base/deprecated.jl b/base/deprecated.jl index b43a4227d42c4..953de358a68ee 100644 --- a/base/deprecated.jl +++ b/base/deprecated.jl @@ -26,6 +26,7 @@ const __internal_changes_list = ( :miuninferredrm, :codeinfonargs, # #54341 :ocnopartial, + :printcodeinfocalls, # Add new change names above this line ) diff --git a/doc/src/base/reflection.md b/doc/src/base/reflection.md index 0f0af140b605f..9228fb38322df 100644 --- a/doc/src/base/reflection.md +++ b/doc/src/base/reflection.md @@ -100,9 +100,9 @@ as assignments, branches, and calls: ```jldoctest; setup = (using Base: +, sin) julia> Meta.lower(@__MODULE__, :( [1+2, sin(0.5)] )) :($(Expr(:thunk, CodeInfo( -1 ─ %1 = 1 + 2 -│ %2 = sin(0.5) -│ %3 = Base.vect(%1, %2) +1 ─ %1 = dynamic 1 + 2 +│ %2 = dynamic sin(0.5) +│ %3 = dynamic Base.vect(%1, %2) └── return %3 )))) ``` @@ -146,7 +146,7 @@ debug information printed. julia> InteractiveUtils.@code_typed debuginfo=:source +(1,1) CodeInfo( @ int.jl:87 within `+` -1 ─ %1 = Base.add_int(x, y)::Int64 +1 ─ %1 = intrinsic Base.add_int(x, y)::Int64 └── return %1 ) => Int64 ``` diff --git a/doc/src/devdocs/inference.md b/doc/src/devdocs/inference.md index c885441e4dd84..98f020dda1d8e 100644 --- a/doc/src/devdocs/inference.md +++ b/doc/src/devdocs/inference.md @@ -98,16 +98,16 @@ as follows: julia> Base.print_statement_costs(stdout, map, (typeof(sqrt), Tuple{Int},)) # map(sqrt, (2,)) map(f, t::Tuple{Any}) @ Base tuple.jl:281 0 1 ─ %1 = $(Expr(:boundscheck, true))::Bool - 0 │ %2 = Base.getfield(_3, 1, %1)::Int64 - 1 │ %3 = Base.sitofp(Float64, %2)::Float64 - 0 │ %4 = Base.lt_float(%3, 0.0)::Bool + 0 │ %2 = builtin Base.getfield(_3, 1, %1)::Int64 + 1 │ %3 = intrinsic Base.sitofp(Float64, %2)::Float64 + 0 │ %4 = intrinsic Base.lt_float(%3, 0.0)::Bool 0 └── goto #3 if not %4 - 0 2 ─ invoke Base.Math.throw_complex_domainerror(:sqrt::Symbol, %3::Float64)::Union{} + 0 2 ─ invoke Base.Math.throw_complex_domainerror(:sqrt::Symbol, %3::Float64)::Union{} 0 └── unreachable - 20 3 ─ %8 = Base.Math.sqrt_llvm(%3)::Float64 + 20 3 ─ %8 = intrinsic Base.Math.sqrt_llvm(%3)::Float64 0 └── goto #4 0 4 ─ goto #5 - 0 5 ─ %11 = Core.tuple(%8)::Tuple{Float64} + 0 5 ─ %11 = builtin Core.tuple(%8)::Tuple{Float64} 0 └── return %11 ``` diff --git a/stdlib/InteractiveUtils/src/codeview.jl b/stdlib/InteractiveUtils/src/codeview.jl index 9f1538cd4a7fe..e3ef0a14a6608 100644 --- a/stdlib/InteractiveUtils/src/codeview.jl +++ b/stdlib/InteractiveUtils/src/codeview.jl @@ -54,7 +54,7 @@ function is_expected_union(u::Union) return true end -function print_warntype_codeinfo(io::IO, src::Core.CodeInfo, @nospecialize(rettype), nargs::Int; lineprinter) +function print_warntype_codeinfo(io::IO, src::Core.CodeInfo, @nospecialize(rettype), nargs::Int; lineprinter, label_dynamic_calls) if src.slotnames !== nothing slotnames = Base.sourceinfo_slotnames(src) io = IOContext(io, :SOURCE_SLOTNAMES => slotnames) @@ -74,7 +74,7 @@ function print_warntype_codeinfo(io::IO, src::Core.CodeInfo, @nospecialize(retty print(io, "Body") warntype_type_printer(io; type=rettype, used=true) println(io) - irshow_config = Base.IRShow.IRShowConfig(lineprinter(src), warntype_type_printer) + irshow_config = Base.IRShow.IRShowConfig(lineprinter(src), warntype_type_printer; label_dynamic_calls) Base.IRShow.show_ir(io, src, irshow_config) println(io) end @@ -154,7 +154,8 @@ function code_warntype(io::IO, @nospecialize(f), @nospecialize(tt=Base.default_t nargs::Int = 0 if isa(f, Core.OpaqueClosure) isa(f.source, Method) && (nargs = f.source.nargs) - print_warntype_codeinfo(io, Base.code_typed_opaque_closure(f, tt)[1]..., nargs; lineprinter) + print_warntype_codeinfo(io, Base.code_typed_opaque_closure(f, tt)[1]..., nargs; + lineprinter, label_dynamic_calls = optimize) return nothing end tt = Base.signature_type(f, tt) @@ -167,7 +168,8 @@ function code_warntype(io::IO, @nospecialize(f), @nospecialize(tt=Base.default_t mi.def isa Method && (nargs = (mi.def::Method).nargs) print_warntype_mi(io, mi) if src isa Core.CodeInfo - print_warntype_codeinfo(io, src, src.rettype, nargs; lineprinter) + print_warntype_codeinfo(io, src, src.rettype, nargs; + lineprinter, label_dynamic_calls = optimize) else println(io, " inference not successful") end diff --git a/stdlib/InteractiveUtils/test/highlighting.jl b/stdlib/InteractiveUtils/test/highlighting.jl index 3531618e10dfc..f49464557f926 100644 --- a/stdlib/InteractiveUtils/test/highlighting.jl +++ b/stdlib/InteractiveUtils/test/highlighting.jl @@ -34,7 +34,10 @@ end c = Base.text_colors[Base.warn_color()] InteractiveUtils.highlighting[:warntype] = false code_warntype(IOContext(io, :color => true), f, Tuple{Int64}) - @test !occursin(c, String(take!(io))) + @test !any([ + occursin("Body", line) && occursin(c, line) + for line in split(String(take!(io)), "\n") + ]) InteractiveUtils.highlighting[:warntype] = true code_warntype(IOContext(io, :color => true), f, Tuple{Int64}) @test occursin(c, String(take!(io))) diff --git a/test/compiler/EscapeAnalysis/EAUtils.jl b/test/compiler/EscapeAnalysis/EAUtils.jl index b8ad4589db626..c41e61e231892 100644 --- a/test/compiler/EscapeAnalysis/EAUtils.jl +++ b/test/compiler/EscapeAnalysis/EAUtils.jl @@ -291,7 +291,7 @@ function print_with_info(preprint, postprint, io::IO, ir::IRCode, source::Bool) bb_idx_prev = bb_idx = 1 for idx = 1:length(ir.stmts) preprint(io, idx) - bb_idx = Base.IRShow.show_ir_stmt(io, ir, idx, line_info_preprinter, line_info_postprinter, used, ir.cfg, bb_idx) + bb_idx = Base.IRShow.show_ir_stmt(io, ir, idx, line_info_preprinter, line_info_postprinter, ir.sptypes, used, ir.cfg, bb_idx) postprint(io, idx, bb_idx != bb_idx_prev) bb_idx_prev = bb_idx end From cff9cca8cabefc5289acab9161b523171441abac Mon Sep 17 00:00:00 2001 From: DilumAluthgeBot <43731525+DilumAluthgeBot@users.noreply.github.com> Date: Fri, 11 Oct 2024 22:07:23 -0400 Subject: [PATCH 178/537] =?UTF-8?q?=F0=9F=A4=96=20[master]=20Bump=20the=20?= =?UTF-8?q?Pkg=20stdlib=20from=2051d4910c1=20to=20fbaa2e337=20(#56124)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../Pkg-51d4910c114a863d888659cb8962c1e161b2a421.tar.gz/md5 | 1 - .../Pkg-51d4910c114a863d888659cb8962c1e161b2a421.tar.gz/sha512 | 1 - .../Pkg-fbaa2e3370b4ab922919892640e5d1b0bcb14037.tar.gz/md5 | 1 + .../Pkg-fbaa2e3370b4ab922919892640e5d1b0bcb14037.tar.gz/sha512 | 1 + stdlib/Pkg.version | 2 +- 5 files changed, 3 insertions(+), 3 deletions(-) delete mode 100644 deps/checksums/Pkg-51d4910c114a863d888659cb8962c1e161b2a421.tar.gz/md5 delete mode 100644 deps/checksums/Pkg-51d4910c114a863d888659cb8962c1e161b2a421.tar.gz/sha512 create mode 100644 deps/checksums/Pkg-fbaa2e3370b4ab922919892640e5d1b0bcb14037.tar.gz/md5 create mode 100644 deps/checksums/Pkg-fbaa2e3370b4ab922919892640e5d1b0bcb14037.tar.gz/sha512 diff --git a/deps/checksums/Pkg-51d4910c114a863d888659cb8962c1e161b2a421.tar.gz/md5 b/deps/checksums/Pkg-51d4910c114a863d888659cb8962c1e161b2a421.tar.gz/md5 deleted file mode 100644 index b5b82565470c0..0000000000000 --- a/deps/checksums/Pkg-51d4910c114a863d888659cb8962c1e161b2a421.tar.gz/md5 +++ /dev/null @@ -1 +0,0 @@ -88b8a25a8d465ac8cc94d13bc5f51707 diff --git a/deps/checksums/Pkg-51d4910c114a863d888659cb8962c1e161b2a421.tar.gz/sha512 b/deps/checksums/Pkg-51d4910c114a863d888659cb8962c1e161b2a421.tar.gz/sha512 deleted file mode 100644 index a746b269d91f0..0000000000000 --- a/deps/checksums/Pkg-51d4910c114a863d888659cb8962c1e161b2a421.tar.gz/sha512 +++ /dev/null @@ -1 +0,0 @@ -22262687f3bf75292ab0170e19a9c4a494022a653b2811443b8c52bc099dee0fddd09f6632ae42b3193adf3b0693ddcb6679b5d91e50a500f65261df5b7ced7d diff --git a/deps/checksums/Pkg-fbaa2e3370b4ab922919892640e5d1b0bcb14037.tar.gz/md5 b/deps/checksums/Pkg-fbaa2e3370b4ab922919892640e5d1b0bcb14037.tar.gz/md5 new file mode 100644 index 0000000000000..762a180d93031 --- /dev/null +++ b/deps/checksums/Pkg-fbaa2e3370b4ab922919892640e5d1b0bcb14037.tar.gz/md5 @@ -0,0 +1 @@ +4ea351427d5b43617abae557670c3313 diff --git a/deps/checksums/Pkg-fbaa2e3370b4ab922919892640e5d1b0bcb14037.tar.gz/sha512 b/deps/checksums/Pkg-fbaa2e3370b4ab922919892640e5d1b0bcb14037.tar.gz/sha512 new file mode 100644 index 0000000000000..eef70ab9b62d5 --- /dev/null +++ b/deps/checksums/Pkg-fbaa2e3370b4ab922919892640e5d1b0bcb14037.tar.gz/sha512 @@ -0,0 +1 @@ +9e91076974ab1dcb1c85e2c8acaf3404f4e82dcd2118d215d4a8413a1e00462ca47891bdae983441a8621015c082421de1f2e26b9b2ee18c1e3c13d58bd1d261 diff --git a/stdlib/Pkg.version b/stdlib/Pkg.version index 34233c58702b4..fc67189981d59 100644 --- a/stdlib/Pkg.version +++ b/stdlib/Pkg.version @@ -1,4 +1,4 @@ PKG_BRANCH = master -PKG_SHA1 = 51d4910c114a863d888659cb8962c1e161b2a421 +PKG_SHA1 = fbaa2e3370b4ab922919892640e5d1b0bcb14037 PKG_GIT_URL := https://github.com/JuliaLang/Pkg.jl.git PKG_TAR_URL = https://api.github.com/repos/JuliaLang/Pkg.jl/tarball/$1 From dc344285d5be2bfdf4ead01effa95643b7babc8b Mon Sep 17 00:00:00 2001 From: Simeon David Schaub Date: Sat, 12 Oct 2024 07:07:20 +0200 Subject: [PATCH 179/537] Fix type instability of closures capturing types (2) (#40985) Instead of closures lowering to `typeof` for the types of captured fields, this introduces a new function `_typeof_captured_variable` that returns `Type{T}` if `T` is a type (w/o free typevars). - replaces/closes #35970 - fixes #23618 --------- Co-authored-by: Takafumi Arakaki Co-authored-by: Shuhei Kadowaki --- base/boot.jl | 20 ++++++++++++++++++++ base/reflection.jl | 3 ++- src/julia-syntax.scm | 2 +- test/compiler/inline.jl | 12 ++++++------ test/core.jl | 28 ++++++++++++++++++++++++++++ 5 files changed, 57 insertions(+), 8 deletions(-) diff --git a/base/boot.jl b/base/boot.jl index 608e273d4b514..861c83a2edac5 100644 --- a/base/boot.jl +++ b/base/boot.jl @@ -272,6 +272,21 @@ end Expr(@nospecialize args...) = _expr(args...) _is_internal(__module__) = __module__ === Core +# can be used in place of `@assume_effects :total` (supposed to be used for bootstrapping) +macro _total_meta() + return _is_internal(__module__) && Expr(:meta, Expr(:purity, + #=:consistent=#true, + #=:effect_free=#true, + #=:nothrow=#true, + #=:terminates_globally=#true, + #=:terminates_locally=#false, + #=:notaskstate=#true, + #=:inaccessiblememonly=#true, + #=:noub=#true, + #=:noub_if_noinbounds=#false, + #=:consistent_overlay=#false, + #=:nortcall=#true)) +end # can be used in place of `@assume_effects :foldable` (supposed to be used for bootstrapping) macro _foldable_meta() return _is_internal(__module__) && Expr(:meta, Expr(:purity, @@ -310,6 +325,11 @@ convert(::Type{T}, x::T) where {T} = x cconvert(::Type{T}, x) where {T} = convert(T, x) unsafe_convert(::Type{T}, x::T) where {T} = x +# will be inserted by the frontend for closures +_typeof_captured_variable(@nospecialize t) = (@_total_meta; t isa Type && has_free_typevars(t) ? typeof(t) : Typeof(t)) + +has_free_typevars(@nospecialize t) = (@_total_meta; ccall(:jl_has_free_typevars, Int32, (Any,), t) === Int32(1)) + # dispatch token indicating a kwarg (keyword sorter) call function kwcall end # deprecated internal functions: diff --git a/base/reflection.jl b/base/reflection.jl index 80eeb4c4efb12..49d640ea40bab 100644 --- a/base/reflection.jl +++ b/base/reflection.jl @@ -862,7 +862,8 @@ end iskindtype(@nospecialize t) = (t === DataType || t === UnionAll || t === Union || t === typeof(Bottom)) isconcretedispatch(@nospecialize t) = isconcretetype(t) && !iskindtype(t) -has_free_typevars(@nospecialize(t)) = (@_total_meta; ccall(:jl_has_free_typevars, Cint, (Any,), t) != 0) + +using Core: has_free_typevars # equivalent to isa(v, Type) && isdispatchtuple(Tuple{v}) || v === Union{} # and is thus perhaps most similar to the old (pre-1.0) `isleaftype` query diff --git a/src/julia-syntax.scm b/src/julia-syntax.scm index f1acb9c3250e1..4b3e6ae96898b 100644 --- a/src/julia-syntax.scm +++ b/src/julia-syntax.scm @@ -4246,7 +4246,7 @@ f(x) = yt(x) (filter identity (map (lambda (v ve) (if (is-var-boxed? v lam) #f - `(call (core typeof) ,ve))) + `(call (core _typeof_captured_variable) ,ve))) capt-vars var-exprs))))) `(new ,(if (null? P) type-name diff --git a/test/compiler/inline.jl b/test/compiler/inline.jl index 2de6d9950d4e4..99fed00a7269d 100644 --- a/test/compiler/inline.jl +++ b/test/compiler/inline.jl @@ -951,34 +951,34 @@ end end # issue 43104 - +_has_free_typevars(t) = ccall(:jl_has_free_typevars, Cint, (Any,), t) != 0 @inline isGoodType(@nospecialize x::Type) = - x !== Any && !(@noinline Base.has_free_typevars(x)) + x !== Any && !(@noinline _has_free_typevars(x)) let # aggressive inlining of single, abstract method match src = code_typed((Type, Any,)) do x, y isGoodType(x), isGoodType(y) end |> only |> first # both callsites should be inlined - @test count(isinvoke(:has_free_typevars), src.code) == 2 + @test count(isinvoke(:_has_free_typevars), src.code) == 2 # `isGoodType(y::Any)` isn't fully covered, so the fallback is a method error @test count(iscall((src, Core.throw_methoderror)), src.code) == 1 # fallback method error end @inline isGoodType2(cnd, @nospecialize x::Type) = - x !== Any && !(@noinline (cnd ? Core.Compiler.isType : Base.has_free_typevars)(x)) + x !== Any && !(@noinline (cnd ? Core.Compiler.isType : _has_free_typevars)(x)) let # aggressive inlining of single, abstract method match (with constant-prop'ed) src = code_typed((Type, Any,)) do x, y isGoodType2(true, x), isGoodType2(true, y) end |> only |> first # both callsite should be inlined with constant-prop'ed result @test count(isinvoke(:isType), src.code) == 2 - @test count(isinvoke(:has_free_typevars), src.code) == 0 + @test count(isinvoke(:_has_free_typevars), src.code) == 0 # `isGoodType(y::Any)` isn't fully covered, thus a MethodError gets inserted @test count(iscall((src, Core.throw_methoderror)), src.code) == 1 # fallback method error end @noinline function checkBadType!(@nospecialize x::Type) - if x === Any || Base.has_free_typevars(x) + if x === Any || _has_free_typevars(x) println(x) end return nothing diff --git a/test/core.jl b/test/core.jl index b27832209a835..5ba0e99e730d4 100644 --- a/test/core.jl +++ b/test/core.jl @@ -796,6 +796,34 @@ end @test foo21900 == 10 @test bar21900 == 11 +let f = g -> x -> g(x) + @test f(Int)(1.0) === 1 + @test @inferred(f(Int)) isa Function + @test fieldtype(typeof(f(Int)), 1) === Type{Int} + @test @inferred(f(Rational{Int})) isa Function + @test fieldtype(typeof(f(Rational{Int})), 1) === Type{Rational{Int}} + @test_broken @inferred(f(Rational)) isa Function + @test fieldtype(typeof(f(Rational)), 1) === Type{Rational} + @test_broken @inferred(f(Rational{Core.TypeVar(:T)})) isa Function + @test fieldtype(typeof(f(Rational{Core.TypeVar(:T)})), 1) === DataType +end +let f() = (T = Rational{Core.TypeVar(:T)}; () -> T) + @test f() isa Function + @test Base.infer_return_type(f()) == DataType + @test fieldtype(typeof(f()), 1) === DataType + t = f()() + @test t isa DataType + @test t.name.wrapper == Rational + @test length(t.parameters) == 1 + @test t.parameters[1] isa Core.TypeVar +end +function issue23618(a::AbstractVector) + T = eltype(a) + b = Vector{T}() + return [Set{T}() for x in a] +end +@test Base.infer_return_type(issue23618, (Vector{Int},)) == Vector{Set{Int}} + # ? syntax @test (true ? 1 : false ? 2 : 3) == 1 From cb1b83de308a835d089c8231a4d697ab9c761588 Mon Sep 17 00:00:00 2001 From: Tim Besard Date: Sat, 12 Oct 2024 14:16:39 +0200 Subject: [PATCH 180/537] Remove debug error statement from Makefile. (#56127) --- deps/openblas.mk | 1 - 1 file changed, 1 deletion(-) diff --git a/deps/openblas.mk b/deps/openblas.mk index fbaa2e7a0fb92..affd1c7a7aa55 100644 --- a/deps/openblas.mk +++ b/deps/openblas.mk @@ -31,7 +31,6 @@ endif endif # 64-bit BLAS interface -$(error USE_BLAS64: $(USE_BLAS64)) ifeq ($(USE_BLAS64), 1) OPENBLAS_BUILD_OPTS += INTERFACE64=1 SYMBOLSUFFIX="$(OPENBLAS_SYMBOLSUFFIX)" LIBPREFIX="libopenblas$(OPENBLAS_LIBNAMESUFFIX)" ifeq ($(OS), Darwin) From b3d587d513cd454122e25c1c6a85620b7ca1af5c Mon Sep 17 00:00:00 2001 From: spaette <111918424+spaette@users.noreply.github.com> Date: Sat, 12 Oct 2024 16:47:46 +0200 Subject: [PATCH 181/537] align markdown table (#56122) @gbaraldi `#51197` @spaette `#56008` fix innocuous malalignment of table after those pulls were merged --- doc/src/devdocs/llvm.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/src/devdocs/llvm.md b/doc/src/devdocs/llvm.md index c4b80f632cd4e..fdecb0472388a 100644 --- a/doc/src/devdocs/llvm.md +++ b/doc/src/devdocs/llvm.md @@ -11,13 +11,13 @@ The code for lowering Julia AST to LLVM IR or interpreting it directly is in dir | File | Description | |:-------------------------------- |:------------------------------------------------------------------ | -| `aotcompile.cpp` | Compiler C-interface entry and object file emission | +| `aotcompile.cpp` | Compiler C-interface entry and object file emission | | `builtins.c` | Builtin functions | | `ccall.cpp` | Lowering [`ccall`](@ref) | | `cgutils.cpp` | Lowering utilities, notably for array and tuple accesses | | `codegen.cpp` | Top-level of code generation, pass list, lowering builtins | | `debuginfo.cpp` | Tracks debug information for JIT code | -| `disasm.cpp` | Handles native object file and JIT code disassembly | +| `disasm.cpp` | Handles native object file and JIT code disassembly | | `gf.c` | Generic functions | | `intrinsics.cpp` | Lowering intrinsics | | `jitlayers.cpp` | JIT-specific code, ORC compilation layers/utilities | From 1713f797d2d04009b5d678c8d1208a0ef9fd1059 Mon Sep 17 00:00:00 2001 From: David Little Date: Sat, 12 Oct 2024 14:44:51 -0400 Subject: [PATCH 182/537] Improve IOBuffer docs (#56024) Based on the discussion in #55978, I have tried to clarify the documentation of `IOBuffer`. --- base/iobuffer.jl | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/base/iobuffer.jl b/base/iobuffer.jl index c0c2731eec08b..bd924fd040496 100644 --- a/base/iobuffer.jl +++ b/base/iobuffer.jl @@ -59,6 +59,15 @@ It may take optional keyword arguments: When `data` is not given, the buffer will be both readable and writable by default. +!!! warning "Passing `data` as scratch space to `IOBuffer` with `write=true` may give unexpected behavior" + Once `write` is called on an `IOBuffer`, it is best to consider any + previous references to `data` invalidated; in effect `IOBuffer` "owns" + this data until a call to `take!`. Any indirect mutations to `data` + could lead to undefined behavior by breaking the abstractions expected + by `IOBuffer`. If `write=true` the IOBuffer may store data at any + offset leaving behind arbitrary values at other offsets. If `maxsize > length(data)`, + the IOBuffer might re-allocate the data entirely, which + may or may not be visible in any outstanding bindings to `array`. # Examples ```jldoctest julia> io = IOBuffer(); From 80e60c89bb2b4fb193000c78e7f056c303dc6859 Mon Sep 17 00:00:00 2001 From: Zentrik Date: Sun, 13 Oct 2024 07:25:29 +0100 Subject: [PATCH 183/537] Comment out url and fix typo in stackwalk.c (#56131) Introduced in #55623 --- src/stackwalk.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/stackwalk.c b/src/stackwalk.c index 7fb4de0372738..5377d091cb780 100644 --- a/src/stackwalk.c +++ b/src/stackwalk.c @@ -933,7 +933,7 @@ extern bt_context_t *jl_to_bt_context(void *sigctx) JL_NOTSAFEPOINT; int jl_simulate_longjmp(jl_jmp_buf mctx, bt_context_t *c) JL_NOTSAFEPOINT { #if (defined(_COMPILER_ASAN_ENABLED_) || defined(_COMPILER_TSAN_ENABLED_)) - https://github.com/llvm/llvm-project/blob/main/compiler-rt/lib/hwasan/hwasan_interceptors.cpp + // https://github.com/llvm/llvm-project/blob/main/compiler-rt/lib/hwasan/hwasan_interceptors.cpp return 0; #elif defined(_OS_WINDOWS_) _JUMP_BUFFER* _ctx = (_JUMP_BUFFER*)mctx; @@ -1049,7 +1049,7 @@ int jl_simulate_longjmp(jl_jmp_buf mctx, bt_context_t *c) JL_NOTSAFEPOINT mc->regs[28] = (*_ctx)[9]; mc->regs[29] = (*_ctx)[10]; // aka fp mc->regs[30] = (*_ctx)[11]; // aka lr - // Yes, they did skip 12 why writing the code originally; and, no, I do not know why. + // Yes, they did skip 12 when writing the code originally; and, no, I do not know why. mc->sp = (*_ctx)[13]; mcfp->vregs[7] = (*_ctx)[14]; // aka d8 mcfp->vregs[8] = (*_ctx)[15]; // aka d9 From 3db1c62d62148e56997da50863b0fd6c0eab35c8 Mon Sep 17 00:00:00 2001 From: Tim Besard Date: Sun, 13 Oct 2024 12:56:26 +0200 Subject: [PATCH 184/537] libgit2: Always use the bundled PCRE library. (#56129) This is how Yggdrasil builds the library. --- deps/libgit2.mk | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/deps/libgit2.mk b/deps/libgit2.mk index b65ac022885a3..022582d48c78e 100644 --- a/deps/libgit2.mk +++ b/deps/libgit2.mk @@ -42,6 +42,10 @@ ifneq (,$(findstring $(OS),Linux FreeBSD OpenBSD)) LIBGIT2_OPTS += -DUSE_HTTPS="mbedTLS" -DUSE_SHA1="CollisionDetection" -DCMAKE_INSTALL_RPATH="\$$ORIGIN" endif +# use the bundled distribution of libpcre. we should consider linking against the +# pcre2 library we're building anyway, but this is currently how Yggdrasil does it. +LIBGIT2_OPTS += -DREGEX_BACKEND="builtin" + LIBGIT2_SRC_PATH := $(SRCCACHE)/$(LIBGIT2_SRC_DIR) $(BUILDDIR)/$(LIBGIT2_SRC_DIR)/build-configured: $(LIBGIT2_SRC_PATH)/source-extracted From 4609aad97ea5b4ffdc721f9217647481e7c740d3 Mon Sep 17 00:00:00 2001 From: Alex Arslan Date: Sun, 13 Oct 2024 08:33:45 -0700 Subject: [PATCH 185/537] Update JLL build versions (#56133) This commit encompasses the following changes: - Updating the JLL build version for Clang, dSFMT, GMP, LibUV, LibUnwind, LLD, LLVM, libLLVM, MbedTLS, MPFR, OpenBLAS, OpenLibm, p7zip, PCRE2, SuiteSparse, and Zlib. - Updating CompilerSupportLibraries to v1.2.0. The library versions contained in this release of CSL don't differ from v1.1.1, the only difference is that v1.2.0 includes FreeBSD AArch64. - Updating nghttp2 from 1.60.0 to 1.63.0. See [here](https://github.com/nghttp2/nghttp2/releases) for changes between these versions. - Adding `aarch64-unknown-freebsd` to the list of triplets to check when refreshing checksums. Note that dependencies that link to MbedTLS (Curl, LibSSH2, LibGit2) are excluded here. They'll be updated once a resolution is reached for the OpenSSL switching saga. Once that happens, FreeBSD AArch64 should be able to be built without any dependency source builds. --- contrib/refresh_checksums.mk | 2 +- deps/checksums/blastrampoline | 2 + deps/checksums/clang | 220 ++++---- deps/checksums/compilersupportlibraries | 188 +++---- deps/checksums/dsfmt | 66 +-- deps/checksums/gmp | 118 ++-- deps/checksums/libuv | 66 +-- deps/checksums/lld | 220 ++++---- deps/checksums/llvm | 508 +++++++++--------- deps/checksums/llvmunwind | 32 -- deps/checksums/mbedtls | 66 +-- deps/checksums/mpfr | 66 +-- deps/checksums/nghttp2 | 70 +-- deps/checksums/openblas | 188 +++---- deps/checksums/openlibm | 66 +-- deps/checksums/p7zip | 66 +-- deps/checksums/pcre | 66 +-- deps/checksums/suitesparse | 66 +-- deps/checksums/unwind | 50 +- deps/checksums/zlib | 66 +-- deps/clang.version | 2 +- deps/lld.version | 2 +- deps/llvm-tools.version | 4 +- deps/llvm.version | 2 +- deps/nghttp2.version | 2 +- .../CompilerSupportLibraries_jll/Project.toml | 2 +- stdlib/GMP_jll/Project.toml | 2 +- stdlib/LLD_jll/Project.toml | 2 +- stdlib/LibUV_jll/Project.toml | 2 +- stdlib/LibUnwind_jll/Project.toml | 2 +- stdlib/MPFR_jll/Project.toml | 2 +- stdlib/Manifest.toml | 32 +- stdlib/MbedTLS_jll/Project.toml | 2 +- stdlib/OpenBLAS_jll/Project.toml | 2 +- stdlib/OpenLibm_jll/Project.toml | 2 +- stdlib/PCRE2_jll/Project.toml | 2 +- stdlib/SuiteSparse_jll/Project.toml | 2 +- stdlib/Zlib_jll/Project.toml | 2 +- stdlib/dSFMT_jll/Project.toml | 2 +- stdlib/libLLVM_jll/Project.toml | 2 +- stdlib/nghttp2_jll/Project.toml | 2 +- stdlib/nghttp2_jll/test/runtests.jl | 2 +- stdlib/p7zip_jll/Project.toml | 2 +- 43 files changed, 1144 insertions(+), 1126 deletions(-) diff --git a/contrib/refresh_checksums.mk b/contrib/refresh_checksums.mk index f67088141ccd4..e7bf2fd7c2efc 100644 --- a/contrib/refresh_checksums.mk +++ b/contrib/refresh_checksums.mk @@ -19,7 +19,7 @@ all: checksum pack-checksum # Get this list via: # using BinaryBuilder # print("TRIPLETS=\"$(join(sort(triplet.(BinaryBuilder.supported_platforms(;experimental=true))), " "))\"") -TRIPLETS=aarch64-apple-darwin aarch64-linux-gnu aarch64-linux-musl armv6l-linux-gnueabihf armv6l-linux-musleabihf armv7l-linux-gnueabihf armv7l-linux-musleabihf i686-linux-gnu i686-linux-musl i686-w64-mingw32 powerpc64le-linux-gnu x86_64-apple-darwin x86_64-linux-gnu x86_64-linux-musl x86_64-unknown-freebsd x86_64-w64-mingw32 +TRIPLETS=aarch64-apple-darwin aarch64-linux-gnu aarch64-linux-musl aarch64-unknown-freebsd armv6l-linux-gnueabihf armv6l-linux-musleabihf armv7l-linux-gnueabihf armv7l-linux-musleabihf i686-linux-gnu i686-linux-musl i686-w64-mingw32 powerpc64le-linux-gnu x86_64-apple-darwin x86_64-linux-gnu x86_64-linux-musl x86_64-unknown-freebsd x86_64-w64-mingw32 CLANG_TRIPLETS=$(filter %-darwin %-freebsd,$(TRIPLETS)) NON_CLANG_TRIPLETS=$(filter-out %-darwin %-freebsd,$(TRIPLETS)) diff --git a/deps/checksums/blastrampoline b/deps/checksums/blastrampoline index ac028ceb6e124..cbde7fa45b1e2 100644 --- a/deps/checksums/blastrampoline +++ b/deps/checksums/blastrampoline @@ -6,6 +6,8 @@ libblastrampoline.v5.11.1+0.aarch64-linux-gnu.tar.gz/md5/aad5e3585f585d54d9ebcf8 libblastrampoline.v5.11.1+0.aarch64-linux-gnu.tar.gz/sha512/11ff9227e16898895ad6cbd36853093941b243a49962785a5ab8b7dc2426831a2750ab5882ee814e3a662e8b9f8aecb273d750b88a4ea5a213e20c93cb121ce1 libblastrampoline.v5.11.1+0.aarch64-linux-musl.tar.gz/md5/462639b4b21f5b7626febfdd1ae1f824 libblastrampoline.v5.11.1+0.aarch64-linux-musl.tar.gz/sha512/866004e3fcdb5ab7418c8a2cae8f820c5739a511b9d0b32d0013ef72ff99f87396f5912d8fbd6bf4d01d7432715c6971ad1a5419c34fa7b048d0fbbe0f8520d2 +libblastrampoline.v5.11.1+0.aarch64-unknown-freebsd.tar.gz/md5/b6ce7d6d46d2ae772d4c3f629e754486 +libblastrampoline.v5.11.1+0.aarch64-unknown-freebsd.tar.gz/sha512/b2e7990cd0f7bb1bc376118955e397599c44aa3d09b0e87524ed8fed4bbb1d6a2b9c1bc02806bbeb86812ab0083c8016fe3c38894e0eb339025cf30f0cd64ffc libblastrampoline.v5.11.1+0.armv6l-linux-gnueabihf.tar.gz/md5/8a48cc8243257362dbc920dcadc42a22 libblastrampoline.v5.11.1+0.armv6l-linux-gnueabihf.tar.gz/sha512/bb4048c0e1ebbb89fc82b7cdabb0a4d9263b5344390c934b66c3a227631661ae956287870e4b156935f0a3c322049ceed3138fc033c92561fccf3675317af5b8 libblastrampoline.v5.11.1+0.armv6l-linux-musleabihf.tar.gz/md5/53c12d04337b63d18f4a5469a36132b6 diff --git a/deps/checksums/clang b/deps/checksums/clang index 7dc297db9c05b..2158589b5cef5 100644 --- a/deps/checksums/clang +++ b/deps/checksums/clang @@ -1,108 +1,112 @@ -Clang.v18.1.7+2.aarch64-apple-darwin-llvm_version+18.asserts.tar.gz/md5/ce3e582bcf2f92fdaf778339e8c51910 -Clang.v18.1.7+2.aarch64-apple-darwin-llvm_version+18.asserts.tar.gz/sha512/4f977e8f0912f52b9b4054089a53a05f60bf7ae352c39b2541e68fecf3c21969d6d1b85e40d71d61040b65f7c60a2c33c8d259734bc1d2ddf77392fc425025cb -Clang.v18.1.7+2.aarch64-apple-darwin-llvm_version+18.tar.gz/md5/1eda08774c2f9975de32bdce4ffc72bd -Clang.v18.1.7+2.aarch64-apple-darwin-llvm_version+18.tar.gz/sha512/c76ec1de9a25f4f8bd309336830cc07e1113b941ced12cb46976b24aebd4ab3d261c943dbc9cdfb34a01f27073af6f598dded31a4e03c62f229cd2e7d5982af6 -Clang.v18.1.7+2.aarch64-linux-gnu-cxx03-llvm_version+18.asserts.tar.gz/md5/2817b0eeb83eff4e1f580729e02564ab -Clang.v18.1.7+2.aarch64-linux-gnu-cxx03-llvm_version+18.asserts.tar.gz/sha512/88242559299836c7a7b7d3a216353fc6880a587a839793ed71d6d053318d6e2071ff218587a082f2b5dd9fb2b0952b4c60e62030d707435607303708bb1e6d81 -Clang.v18.1.7+2.aarch64-linux-gnu-cxx03-llvm_version+18.tar.gz/md5/d3f92998b7cc35a507cb1071baae8b02 -Clang.v18.1.7+2.aarch64-linux-gnu-cxx03-llvm_version+18.tar.gz/sha512/be22296623f604927e2e815a1cc149addda6d567270a50b2cdf77fe5b09f74313210a1ca7b1b3194592da23490ba1ccfdab9f520ce7219989e646f12208e418a -Clang.v18.1.7+2.aarch64-linux-gnu-cxx11-llvm_version+18.asserts.tar.gz/md5/716300acfdee4415f1afa3b5571b102b -Clang.v18.1.7+2.aarch64-linux-gnu-cxx11-llvm_version+18.asserts.tar.gz/sha512/b97efb3c461ea7d2736a3a8bb6b6b5c99f02df9a095f11291319c629d44f1fb934b124d38af6be3e5cc7103c6f85793d7f185c607383461de5d0c846560a1d1b -Clang.v18.1.7+2.aarch64-linux-gnu-cxx11-llvm_version+18.tar.gz/md5/034f44b2fc61791234d9580402002fb2 -Clang.v18.1.7+2.aarch64-linux-gnu-cxx11-llvm_version+18.tar.gz/sha512/0b4ff55afcec0b1e8fbd09fab57de8b44d5ded360d3b53132c7a7df8d3a3b83a495bf6e0c706784e678c6de46be3a72e8bfe562c7f8dfad90b82880849625e35 -Clang.v18.1.7+2.aarch64-linux-musl-cxx03-llvm_version+18.asserts.tar.gz/md5/54211070d63a2afac6350d06442cb145 -Clang.v18.1.7+2.aarch64-linux-musl-cxx03-llvm_version+18.asserts.tar.gz/sha512/a58f8afe9a20f202cf3956f758dc13a10be240d78877a02cd006d7e972751ed65623eef7e92a7256d9ed9157d6e277302f93b58f583d86d386ed4945f3c7d875 -Clang.v18.1.7+2.aarch64-linux-musl-cxx03-llvm_version+18.tar.gz/md5/7084567b3637fe64088fdce357a255de -Clang.v18.1.7+2.aarch64-linux-musl-cxx03-llvm_version+18.tar.gz/sha512/77ae83e159a814a7117cc859a0b2aa7a5d41f983d45b7eb1ce2fd2e93f8733ee067ac8c9fad9d5af90f852b8802043ef39c29b44430b2594892e57b61ccb680b -Clang.v18.1.7+2.aarch64-linux-musl-cxx11-llvm_version+18.asserts.tar.gz/md5/9e294d16a6e1c2c76c03f32cbbbfbe23 -Clang.v18.1.7+2.aarch64-linux-musl-cxx11-llvm_version+18.asserts.tar.gz/sha512/b8f83542b51f5cf953f6baed185550394744a8466307ee08525bf18a651fcecd7daafb98e75a0866b0e9a95a524e8940be7ae1878ba80d856182dcb7f7d2254e -Clang.v18.1.7+2.aarch64-linux-musl-cxx11-llvm_version+18.tar.gz/md5/70a41c2ffd55d2d87a7b8728287eb9fd -Clang.v18.1.7+2.aarch64-linux-musl-cxx11-llvm_version+18.tar.gz/sha512/44bb3dea7227ee991b2666c43a88613d5b5d382eb560b5ad1f1184d38680c85a2ef961bac6ad71c2b920702c1ec6e09296198e7ff5e2929f4ba7839e55896e3f -Clang.v18.1.7+2.armv6l-linux-gnueabihf-cxx03-llvm_version+18.asserts.tar.gz/md5/95ee1406f8575898eb52e2c86ae18992 -Clang.v18.1.7+2.armv6l-linux-gnueabihf-cxx03-llvm_version+18.asserts.tar.gz/sha512/4da66e4d397491836b3e539258844346fe50bff41e6c0628cbb5c0eac76147bd91d1720cec1523452efdb063adf6ef8792dc278244e1f8e194ef60a180442c56 -Clang.v18.1.7+2.armv6l-linux-gnueabihf-cxx03-llvm_version+18.tar.gz/md5/6c4e4e892b54ce81d73a8598728083e3 -Clang.v18.1.7+2.armv6l-linux-gnueabihf-cxx03-llvm_version+18.tar.gz/sha512/53d08fd8b6782867cfa6ce001b14a2fde38bc9ffc85c7e148aebf59dd9c1c535b54eaea816c39fcff42abc456c1047ed13d688917302bcc5a281abe368bd29bb -Clang.v18.1.7+2.armv6l-linux-gnueabihf-cxx11-llvm_version+18.asserts.tar.gz/md5/5acc5853111bcd529eeb06ea31b329e5 -Clang.v18.1.7+2.armv6l-linux-gnueabihf-cxx11-llvm_version+18.asserts.tar.gz/sha512/b1794f7cdfba838a7e43de8f66700ae44fd16d8f06300e8ab955044ae9bc96110c5ea72691841cd3787cdc93dfb91c6b257702c20390689a8d1b45a994db2fd8 -Clang.v18.1.7+2.armv6l-linux-gnueabihf-cxx11-llvm_version+18.tar.gz/md5/c4de50252e557fb126360001ddae6a97 -Clang.v18.1.7+2.armv6l-linux-gnueabihf-cxx11-llvm_version+18.tar.gz/sha512/9343a7272c76d5341bb49273ff8d43bed09ad99b2879ec51cfb8946174181b286af82d85e2d3a13a375c7e7859e51e4a4f06031a6a3fe7e540700cfc6a795741 -Clang.v18.1.7+2.armv6l-linux-musleabihf-cxx03-llvm_version+18.asserts.tar.gz/md5/af301478b20e56cb7fa1160cda2573a2 -Clang.v18.1.7+2.armv6l-linux-musleabihf-cxx03-llvm_version+18.asserts.tar.gz/sha512/8822c58df101c239221fead6fb523e677da04a065b42849a2e6ffff03dfd81e07f162a9bbdd29490ad9c0e0a33d362eec46608b9e6e42dfb4889da1c22191c91 -Clang.v18.1.7+2.armv6l-linux-musleabihf-cxx03-llvm_version+18.tar.gz/md5/901d2808599d5ac5ac7b5ca4bc39833d -Clang.v18.1.7+2.armv6l-linux-musleabihf-cxx03-llvm_version+18.tar.gz/sha512/820756cad00b1fe927801a253bd3077709c2b067ae79f9e1812f3cc9e85a0b7ac2ce1534031b7c6f7bda3364b7173c1c508e7c7d316920fb9bb901c16c1b18c7 -Clang.v18.1.7+2.armv6l-linux-musleabihf-cxx11-llvm_version+18.asserts.tar.gz/md5/d1f368604084e907c382aaf00efe452c -Clang.v18.1.7+2.armv6l-linux-musleabihf-cxx11-llvm_version+18.asserts.tar.gz/sha512/523b25f6b79e222eb65b5f4cd8f23b0d2c8b25b29af0df88efe45546ea57c7dabd88baef454fa0b76342d8d364739107271f25d3504380fdec5c9d225fcc2521 -Clang.v18.1.7+2.armv6l-linux-musleabihf-cxx11-llvm_version+18.tar.gz/md5/e57c116b2ad1cf32307eb4e600ac80be -Clang.v18.1.7+2.armv6l-linux-musleabihf-cxx11-llvm_version+18.tar.gz/sha512/63366b983c7aac9fe1246b25432b2200c8316f569f6930eb12de3c867f448ffccb8756d418f92eae7751d4c9ce6c42cee38237e429b81530819684fd5150c93a -Clang.v18.1.7+2.armv7l-linux-gnueabihf-cxx03-llvm_version+18.asserts.tar.gz/md5/645929ce42276db10ab79184a60cd6e3 -Clang.v18.1.7+2.armv7l-linux-gnueabihf-cxx03-llvm_version+18.asserts.tar.gz/sha512/65555ed26d9bd670b8363e5dad949822c2bf0e141a5418e1dc30c3f8a4733dd050620e40be2e7552c2551ecb30d4ef3e8f74cb240f1d441a9720a25f5a3bcaa7 -Clang.v18.1.7+2.armv7l-linux-gnueabihf-cxx03-llvm_version+18.tar.gz/md5/8424c6c6318dfa7bebeac33917b29453 -Clang.v18.1.7+2.armv7l-linux-gnueabihf-cxx03-llvm_version+18.tar.gz/sha512/6cf90c253f6b22358c2389a2347af2febd010117b22de0cc91ad713b8c8224627398004567c96b673650212eb5bd40bb97b9a637d46ddfeb3c72388d83445017 -Clang.v18.1.7+2.armv7l-linux-gnueabihf-cxx11-llvm_version+18.asserts.tar.gz/md5/ea8151dc1dc32befe579c7f9d7f13898 -Clang.v18.1.7+2.armv7l-linux-gnueabihf-cxx11-llvm_version+18.asserts.tar.gz/sha512/ed518423e9ec35afd7983471cf9ff1e971b840f637f34e0f62a1f6c7379ea59d4dafbeb9a311d39761733ecc98c0318ce3d8883298f8998e9c741441c7c9616b -Clang.v18.1.7+2.armv7l-linux-gnueabihf-cxx11-llvm_version+18.tar.gz/md5/70ed39b13bcb0435fee63bc30ae25a39 -Clang.v18.1.7+2.armv7l-linux-gnueabihf-cxx11-llvm_version+18.tar.gz/sha512/b2afa383346875514c62129c2991b3604c4fd3d507ecf4fc4244dec81d08b30218f5aa03dc4977185c2c9fb2d08848ddd373e448883ab472e5221ae5bf285c99 -Clang.v18.1.7+2.armv7l-linux-musleabihf-cxx03-llvm_version+18.asserts.tar.gz/md5/e6798835128f663f0c837aed4463e34b -Clang.v18.1.7+2.armv7l-linux-musleabihf-cxx03-llvm_version+18.asserts.tar.gz/sha512/c99856e16bd42ff967479e2c89690ea41268f1d1f868e2628482eafdfa53a0d69ed7c21ecc68ff0859eef07d9fe02f4844fad5f13df26cee6cea3a4254446096 -Clang.v18.1.7+2.armv7l-linux-musleabihf-cxx03-llvm_version+18.tar.gz/md5/92c1bd54b0474244e35c51952966a55b -Clang.v18.1.7+2.armv7l-linux-musleabihf-cxx03-llvm_version+18.tar.gz/sha512/2d7c3b60ba8b11cf903bc5ea720193852027cbe61ea0c8d6fac70be8f97691da3d36663aac6e61b68185dd83b42d09ad61dea973d9390271210d690295e4902c -Clang.v18.1.7+2.armv7l-linux-musleabihf-cxx11-llvm_version+18.asserts.tar.gz/md5/c495d594f8ce1f701d1bab54d0b60521 -Clang.v18.1.7+2.armv7l-linux-musleabihf-cxx11-llvm_version+18.asserts.tar.gz/sha512/0261bf45403daccf236723383341dc791e9cb3b291bde97812378d85aed785f083d5deea3bf806480a04ef1b972b00dccfd0537e43532a066c64733b817c3d77 -Clang.v18.1.7+2.armv7l-linux-musleabihf-cxx11-llvm_version+18.tar.gz/md5/41541de24d625271bdd5fad867b8eb0c -Clang.v18.1.7+2.armv7l-linux-musleabihf-cxx11-llvm_version+18.tar.gz/sha512/595226ad7ef75ab8ae03adb456b4ee9e884e9554c720b6c4ecbc38c75d446ddba7898be94630673074f09f40c6dc3e18fea9cee5a91b8b0e4727d20a180f670c -Clang.v18.1.7+2.i686-linux-gnu-cxx03-llvm_version+18.asserts.tar.gz/md5/8bd8ca0436611e78882939067f6277f7 -Clang.v18.1.7+2.i686-linux-gnu-cxx03-llvm_version+18.asserts.tar.gz/sha512/27c7b06e93fb0fb516b1b240e0df6c95e8bad6aea04d637ba065c6fafd087bfa94d9136afd39273c8d82d9c467395dcbd7b16f6a4b829acb0c0d4a5677676a5b -Clang.v18.1.7+2.i686-linux-gnu-cxx03-llvm_version+18.tar.gz/md5/424bfbd7b69ddf7b1199afaacde3e028 -Clang.v18.1.7+2.i686-linux-gnu-cxx03-llvm_version+18.tar.gz/sha512/9c48d949309aef6ee39371ff39a4f12c31bf3f25ddd288b317b2a17a803db73850cba2886598a1d10c4c154d511a4b79958d1acc012e92491a63f3925c522873 -Clang.v18.1.7+2.i686-linux-gnu-cxx11-llvm_version+18.asserts.tar.gz/md5/6b0b3e045ad64ecdc9848898f30d5f34 -Clang.v18.1.7+2.i686-linux-gnu-cxx11-llvm_version+18.asserts.tar.gz/sha512/6c0f4bdabbbc94fc9e1fedc138b0bce99d383e380ae7222fb70f5935f17701d549f6486956c8a21731061e4bf60bbc52794f6ce6858b4d2adb89bf80f88795c0 -Clang.v18.1.7+2.i686-linux-gnu-cxx11-llvm_version+18.tar.gz/md5/3b7a461ebf957756aeb2a2455b0a298c -Clang.v18.1.7+2.i686-linux-gnu-cxx11-llvm_version+18.tar.gz/sha512/74641a3636dd58c69415b19f0cb1de444215e22cfa9f0268fd549b5c53b206811d8beecdeb9692285613468d9a0569e836d225fb8361218438346914f6282839 -Clang.v18.1.7+2.i686-w64-mingw32-cxx03-llvm_version+18.asserts.tar.gz/md5/5e7b9ad5fc3af3bfdf262687cd248dfa -Clang.v18.1.7+2.i686-w64-mingw32-cxx03-llvm_version+18.asserts.tar.gz/sha512/c54835fdf8e3e442b7c774d445c2f13c5dd8b3224f4ae165e72cc893ee5453d0112a9ca6d543b17f2c02a89471e2cff7cf022dc4c8188a5df25d101dd0f954b9 -Clang.v18.1.7+2.i686-w64-mingw32-cxx03-llvm_version+18.tar.gz/md5/3204bd8074d42920a6707cc8624c0dfe -Clang.v18.1.7+2.i686-w64-mingw32-cxx03-llvm_version+18.tar.gz/sha512/74b26c4556ca18645cc15647d8abdbd46fb94c75169934af885e5773a880c066b2ff221402fdb4a53417b2c97ce589783f7fae6a8d56ee89cc1f70577b02b2a1 -Clang.v18.1.7+2.i686-w64-mingw32-cxx11-llvm_version+18.asserts.tar.gz/md5/7922c04964e0c1a5b44e95480290930d -Clang.v18.1.7+2.i686-w64-mingw32-cxx11-llvm_version+18.asserts.tar.gz/sha512/4f0d675c1b85dc3e5007a62a7cfea412ca432d1276a259db3ed5a1bf0f33d6c555f16010de717a62e0e065e7c1dbaa66c281815eb9629d2b6c720b152820e582 -Clang.v18.1.7+2.i686-w64-mingw32-cxx11-llvm_version+18.tar.gz/md5/e023eba0ea0a327f53013d5e4d50d0cb -Clang.v18.1.7+2.i686-w64-mingw32-cxx11-llvm_version+18.tar.gz/sha512/9fbdebce9c7375a20d1cd10e39a0c26b131af686cb5771034a6afc6cab08855e0cada2add616c01394424383333950d0dde9c55a9477fa139cf0ca3fc438b229 -Clang.v18.1.7+2.powerpc64le-linux-gnu-cxx03-llvm_version+18.asserts.tar.gz/md5/a6c7d64ede931fb19e066a1c191e2f6d -Clang.v18.1.7+2.powerpc64le-linux-gnu-cxx03-llvm_version+18.asserts.tar.gz/sha512/1a085a4ea1efb910f2b529f3c0e51be4a5e31debbefd00ceefeddc352b36bea6d0de5a06ea7d509098d16416b536ffed3da8485feefad7a2f11b1bc148a0c8c2 -Clang.v18.1.7+2.powerpc64le-linux-gnu-cxx03-llvm_version+18.tar.gz/md5/692af94ca3e5c3d229cbb459e266aadf -Clang.v18.1.7+2.powerpc64le-linux-gnu-cxx03-llvm_version+18.tar.gz/sha512/b27f05cfb0ada89cefc5a6f6527583b6b43d03525954d5b1ad1c807712efdb8750ea558a230b587a0c0d9e77c54d9f8978cc2f3884653808c7409eab1b32a055 -Clang.v18.1.7+2.powerpc64le-linux-gnu-cxx11-llvm_version+18.asserts.tar.gz/md5/3b59b6aa4b18b5dbbc632811f2ffa270 -Clang.v18.1.7+2.powerpc64le-linux-gnu-cxx11-llvm_version+18.asserts.tar.gz/sha512/f8c4b593f969c723ff1931c4875ed52497d83d74b94121890e10c9fcca5f6bddc5067555dee9949e61e426586ae3e568375fc44f318a07b70571ee34fdf7032c -Clang.v18.1.7+2.powerpc64le-linux-gnu-cxx11-llvm_version+18.tar.gz/md5/bc4be32ad57b13c3dabc80684a176ba7 -Clang.v18.1.7+2.powerpc64le-linux-gnu-cxx11-llvm_version+18.tar.gz/sha512/19a8346547b6c6adc2a9156e4b913b20137593752efa3648ad532b08de67cf015bba1eb023204755f48904c3381a3665c6c54fc8233c50e887a22ceebc652303 -Clang.v18.1.7+2.x86_64-apple-darwin-llvm_version+18.asserts.tar.gz/md5/05f37d069c7d59ec245d961d0928cb37 -Clang.v18.1.7+2.x86_64-apple-darwin-llvm_version+18.asserts.tar.gz/sha512/3b0956fe770fd9230319bfcaefab4922f9aee3df3e8516edf81cb7d322132ee9ab899af4464c75b1042aa99e3bcb07ede6de5646bba2a57995fc2eb32d4d0861 -Clang.v18.1.7+2.x86_64-apple-darwin-llvm_version+18.tar.gz/md5/0304434211ff4101a148fcc0c96455d4 -Clang.v18.1.7+2.x86_64-apple-darwin-llvm_version+18.tar.gz/sha512/a033dc589fc95e63547b7ca82964116bec33ad6e78ac131934d4bb16988756d36c24d74761ca93b0e47dada1f3d2a63071cb3721ddb9af457cbeb164fe5f0f54 -Clang.v18.1.7+2.x86_64-linux-gnu-cxx03-llvm_version+18.asserts.tar.gz/md5/4e5d1064d90f24d57d63f08b61baaab5 -Clang.v18.1.7+2.x86_64-linux-gnu-cxx03-llvm_version+18.asserts.tar.gz/sha512/cbfbe8b6f2be80e59b69d25d6af901ccb4807b12180208b69afa7223dd7d5249255265bc319c9402a1b0d1f0995940e3e72d7ecf1009f60d83021f8d35626a46 -Clang.v18.1.7+2.x86_64-linux-gnu-cxx03-llvm_version+18.tar.gz/md5/22fead15b4c45398ca869821d04ce015 -Clang.v18.1.7+2.x86_64-linux-gnu-cxx03-llvm_version+18.tar.gz/sha512/2ee7a7d3f293f7b63c89bbe3b541722c502a840883804ffe272848f4ac99b7a8ed350ebe92ec434dfdf03d1f4a5531c1367859f4a4603c98325abe5a0ad71177 -Clang.v18.1.7+2.x86_64-linux-gnu-cxx11-llvm_version+18.asserts.tar.gz/md5/46dd01b10377cc3d45c6a42cac0a07e5 -Clang.v18.1.7+2.x86_64-linux-gnu-cxx11-llvm_version+18.asserts.tar.gz/sha512/957677ce4251938d0c5e066448762b38a21bcce5ed424072ccd58085167d61b7e45a88fe32375f6bbd43dfb579b65a9afc09a886a650fc634a8fb9c81f27c9e3 -Clang.v18.1.7+2.x86_64-linux-gnu-cxx11-llvm_version+18.tar.gz/md5/bd9a61ea186a39162201341f0739fe84 -Clang.v18.1.7+2.x86_64-linux-gnu-cxx11-llvm_version+18.tar.gz/sha512/7a06d2a9ef20e88daa00d627d482ebbb6bf7223219d8b2a24aa60ac9eda24649d206b093d5bdb88b65c1e2b0d1ba0ad7dd927697e2bbac65bc9b42f9d14ad0d9 -Clang.v18.1.7+2.x86_64-linux-musl-cxx03-llvm_version+18.asserts.tar.gz/md5/60c98c6cc7d4446fb52b7585bc8709f3 -Clang.v18.1.7+2.x86_64-linux-musl-cxx03-llvm_version+18.asserts.tar.gz/sha512/4d55464b4499a45f774e1000a8b015326d114103a3d348fb263367e5506ca6659444ea6ee2767712903757e83939cd446aff6fe2351438b644f0057053422b58 -Clang.v18.1.7+2.x86_64-linux-musl-cxx03-llvm_version+18.tar.gz/md5/90a512d1881c4af1f1abfd5e90e37356 -Clang.v18.1.7+2.x86_64-linux-musl-cxx03-llvm_version+18.tar.gz/sha512/62d6d855aebd49f132d6470c7b0d5a0b965c6489b025046c1ea73fc53336030d6c5b4c867523a9206821f7fcf62fdb37ef0b7ff4b5eb04d07f40b65edd2c8e0f -Clang.v18.1.7+2.x86_64-linux-musl-cxx11-llvm_version+18.asserts.tar.gz/md5/c9eb9acb605d774db9636b82bf2e5f41 -Clang.v18.1.7+2.x86_64-linux-musl-cxx11-llvm_version+18.asserts.tar.gz/sha512/96e1440b3b0378edf8907d4cf779b1c53d63f6d00fa798efe1b6aaa289135aba8fd00a8d6f55d9678136e9e07d0c189293aec64f46e66788b938e1f8e1fc2199 -Clang.v18.1.7+2.x86_64-linux-musl-cxx11-llvm_version+18.tar.gz/md5/5837070450c81d44395468d8e3671dc7 -Clang.v18.1.7+2.x86_64-linux-musl-cxx11-llvm_version+18.tar.gz/sha512/0e8b674c0360f9586f03c7f5d0ffd5bc73dcde1e88eddf7d6360c1461adb8efffb104d8f454116a6a6cdc909973d0876745590b21009a9de56e12ce6e1c2e8fc -Clang.v18.1.7+2.x86_64-unknown-freebsd-llvm_version+18.asserts.tar.gz/md5/5c198d35df5cf6435f4f5ac91a78be01 -Clang.v18.1.7+2.x86_64-unknown-freebsd-llvm_version+18.asserts.tar.gz/sha512/9ba0a532f499933320145834aec2b57a70410bf67af649ed675f00aebfd59de7c80e6f5d19e7ad57029a573090e63c5eba4b42b498a374810b48c8668b50dcaa -Clang.v18.1.7+2.x86_64-unknown-freebsd-llvm_version+18.tar.gz/md5/8ac88c856d946e29d1121426de44e6bc -Clang.v18.1.7+2.x86_64-unknown-freebsd-llvm_version+18.tar.gz/sha512/94af63ad3fb17d9c07f5256e2d474effc0e3d5ef66f4a9f3ffeb9bdd8f1577c35e4d0aceb8b4746ab857d8f164141790ed494b7f687e644e040d2f3820f9e1fe -Clang.v18.1.7+2.x86_64-w64-mingw32-cxx03-llvm_version+18.asserts.tar.gz/md5/b4be546ff44019cf46d3250dd9a4321f -Clang.v18.1.7+2.x86_64-w64-mingw32-cxx03-llvm_version+18.asserts.tar.gz/sha512/7ce5e4d68e18021392355359f59931219eeec3be4edd01f7a18b7bee499b589414bcea73820ee38dbc3b5ab12d912a93374b4a616b10ba491f5d41b6b33f3d9e -Clang.v18.1.7+2.x86_64-w64-mingw32-cxx03-llvm_version+18.tar.gz/md5/4616c348320d8704215d58c7268de6d7 -Clang.v18.1.7+2.x86_64-w64-mingw32-cxx03-llvm_version+18.tar.gz/sha512/b4c21147ed21d41321e04b092d47f99338c6ac7d50b8328ceb8ae26d6382955cbcd655dddd39f0de3d3c36a5fda7084a33272aad9f6cd9585c87fee68be73a68 -Clang.v18.1.7+2.x86_64-w64-mingw32-cxx11-llvm_version+18.asserts.tar.gz/md5/bf9cf2efb938b68ac7e1560c464f9051 -Clang.v18.1.7+2.x86_64-w64-mingw32-cxx11-llvm_version+18.asserts.tar.gz/sha512/ca29438393d393912571a96ce59bdaadcacbb329342c42a0de0e8d8ab52f69d4e6966822c0743d99b1a277c8715c1f72ddd490b781b45bd691df2c137ed42a1d -Clang.v18.1.7+2.x86_64-w64-mingw32-cxx11-llvm_version+18.tar.gz/md5/94138893eaaa99f37354317bc13cf7e0 -Clang.v18.1.7+2.x86_64-w64-mingw32-cxx11-llvm_version+18.tar.gz/sha512/1b03d8d4e407372875667f25f74abdaac9be0b81c6229dc1c4c1714589efde6b1f8c76302a2545b103ee4f9812fa78f9e06e5d5bb5bc3903ce579328899faa2f +Clang.v18.1.7+3.aarch64-apple-darwin-llvm_version+18.asserts.tar.gz/md5/1dfebd0db436a282c2ccb01375e48419 +Clang.v18.1.7+3.aarch64-apple-darwin-llvm_version+18.asserts.tar.gz/sha512/d5a8fc8be8bdcfb98c3f868c1a08cb18bffaca0c9fc6efbb11beaadf40ed5ca7e2a70c3be783a7cc93b23f39e06167784f63e91abe726240ad62d11210337794 +Clang.v18.1.7+3.aarch64-apple-darwin-llvm_version+18.tar.gz/md5/f82250af13bd879486677cbf1ae0b7dd +Clang.v18.1.7+3.aarch64-apple-darwin-llvm_version+18.tar.gz/sha512/c4f67a59e30ea7bfb9ac83f07b1e07c856113dbc674d3a7d01cc7bbc326a1529f97d0e1a08a3aa60e110f901dba6d4888bae7060e24065444baaf633482108d7 +Clang.v18.1.7+3.aarch64-linux-gnu-cxx03-llvm_version+18.asserts.tar.gz/md5/2817b0eeb83eff4e1f580729e02564ab +Clang.v18.1.7+3.aarch64-linux-gnu-cxx03-llvm_version+18.asserts.tar.gz/sha512/88242559299836c7a7b7d3a216353fc6880a587a839793ed71d6d053318d6e2071ff218587a082f2b5dd9fb2b0952b4c60e62030d707435607303708bb1e6d81 +Clang.v18.1.7+3.aarch64-linux-gnu-cxx03-llvm_version+18.tar.gz/md5/d3f92998b7cc35a507cb1071baae8b02 +Clang.v18.1.7+3.aarch64-linux-gnu-cxx03-llvm_version+18.tar.gz/sha512/be22296623f604927e2e815a1cc149addda6d567270a50b2cdf77fe5b09f74313210a1ca7b1b3194592da23490ba1ccfdab9f520ce7219989e646f12208e418a +Clang.v18.1.7+3.aarch64-linux-gnu-cxx11-llvm_version+18.asserts.tar.gz/md5/716300acfdee4415f1afa3b5571b102b +Clang.v18.1.7+3.aarch64-linux-gnu-cxx11-llvm_version+18.asserts.tar.gz/sha512/b97efb3c461ea7d2736a3a8bb6b6b5c99f02df9a095f11291319c629d44f1fb934b124d38af6be3e5cc7103c6f85793d7f185c607383461de5d0c846560a1d1b +Clang.v18.1.7+3.aarch64-linux-gnu-cxx11-llvm_version+18.tar.gz/md5/034f44b2fc61791234d9580402002fb2 +Clang.v18.1.7+3.aarch64-linux-gnu-cxx11-llvm_version+18.tar.gz/sha512/0b4ff55afcec0b1e8fbd09fab57de8b44d5ded360d3b53132c7a7df8d3a3b83a495bf6e0c706784e678c6de46be3a72e8bfe562c7f8dfad90b82880849625e35 +Clang.v18.1.7+3.aarch64-linux-musl-cxx03-llvm_version+18.asserts.tar.gz/md5/54211070d63a2afac6350d06442cb145 +Clang.v18.1.7+3.aarch64-linux-musl-cxx03-llvm_version+18.asserts.tar.gz/sha512/a58f8afe9a20f202cf3956f758dc13a10be240d78877a02cd006d7e972751ed65623eef7e92a7256d9ed9157d6e277302f93b58f583d86d386ed4945f3c7d875 +Clang.v18.1.7+3.aarch64-linux-musl-cxx03-llvm_version+18.tar.gz/md5/7084567b3637fe64088fdce357a255de +Clang.v18.1.7+3.aarch64-linux-musl-cxx03-llvm_version+18.tar.gz/sha512/77ae83e159a814a7117cc859a0b2aa7a5d41f983d45b7eb1ce2fd2e93f8733ee067ac8c9fad9d5af90f852b8802043ef39c29b44430b2594892e57b61ccb680b +Clang.v18.1.7+3.aarch64-linux-musl-cxx11-llvm_version+18.asserts.tar.gz/md5/9e294d16a6e1c2c76c03f32cbbbfbe23 +Clang.v18.1.7+3.aarch64-linux-musl-cxx11-llvm_version+18.asserts.tar.gz/sha512/b8f83542b51f5cf953f6baed185550394744a8466307ee08525bf18a651fcecd7daafb98e75a0866b0e9a95a524e8940be7ae1878ba80d856182dcb7f7d2254e +Clang.v18.1.7+3.aarch64-linux-musl-cxx11-llvm_version+18.tar.gz/md5/70a41c2ffd55d2d87a7b8728287eb9fd +Clang.v18.1.7+3.aarch64-linux-musl-cxx11-llvm_version+18.tar.gz/sha512/44bb3dea7227ee991b2666c43a88613d5b5d382eb560b5ad1f1184d38680c85a2ef961bac6ad71c2b920702c1ec6e09296198e7ff5e2929f4ba7839e55896e3f +Clang.v18.1.7+3.aarch64-unknown-freebsd-llvm_version+18.asserts.tar.gz/md5/1f673de0cc2ec59cc62dee6040b2d6b7 +Clang.v18.1.7+3.aarch64-unknown-freebsd-llvm_version+18.asserts.tar.gz/sha512/9b2e64cd2cd510677375f3d07d434f46066adb7464751dfeaebb057129f6b092d8425b0728f60dd9a2ec4cb29625ffc5cda57acf1d5465d5f82765369954c58a +Clang.v18.1.7+3.aarch64-unknown-freebsd-llvm_version+18.tar.gz/md5/0d91f5a19060c6a1b1dadb3befa0fe6a +Clang.v18.1.7+3.aarch64-unknown-freebsd-llvm_version+18.tar.gz/sha512/9f9aaa36e1dab2d98a17602ed0b27163729928bfe4ac0f7b565cff1e0a653855b0f3e404830cb77ff35d93c0d5c42ed11d2506aecb5ec8d3752fbdfeb0ff5b4c +Clang.v18.1.7+3.armv6l-linux-gnueabihf-cxx03-llvm_version+18.asserts.tar.gz/md5/95ee1406f8575898eb52e2c86ae18992 +Clang.v18.1.7+3.armv6l-linux-gnueabihf-cxx03-llvm_version+18.asserts.tar.gz/sha512/4da66e4d397491836b3e539258844346fe50bff41e6c0628cbb5c0eac76147bd91d1720cec1523452efdb063adf6ef8792dc278244e1f8e194ef60a180442c56 +Clang.v18.1.7+3.armv6l-linux-gnueabihf-cxx03-llvm_version+18.tar.gz/md5/6c4e4e892b54ce81d73a8598728083e3 +Clang.v18.1.7+3.armv6l-linux-gnueabihf-cxx03-llvm_version+18.tar.gz/sha512/53d08fd8b6782867cfa6ce001b14a2fde38bc9ffc85c7e148aebf59dd9c1c535b54eaea816c39fcff42abc456c1047ed13d688917302bcc5a281abe368bd29bb +Clang.v18.1.7+3.armv6l-linux-gnueabihf-cxx11-llvm_version+18.asserts.tar.gz/md5/5acc5853111bcd529eeb06ea31b329e5 +Clang.v18.1.7+3.armv6l-linux-gnueabihf-cxx11-llvm_version+18.asserts.tar.gz/sha512/b1794f7cdfba838a7e43de8f66700ae44fd16d8f06300e8ab955044ae9bc96110c5ea72691841cd3787cdc93dfb91c6b257702c20390689a8d1b45a994db2fd8 +Clang.v18.1.7+3.armv6l-linux-gnueabihf-cxx11-llvm_version+18.tar.gz/md5/c4de50252e557fb126360001ddae6a97 +Clang.v18.1.7+3.armv6l-linux-gnueabihf-cxx11-llvm_version+18.tar.gz/sha512/9343a7272c76d5341bb49273ff8d43bed09ad99b2879ec51cfb8946174181b286af82d85e2d3a13a375c7e7859e51e4a4f06031a6a3fe7e540700cfc6a795741 +Clang.v18.1.7+3.armv6l-linux-musleabihf-cxx03-llvm_version+18.asserts.tar.gz/md5/af301478b20e56cb7fa1160cda2573a2 +Clang.v18.1.7+3.armv6l-linux-musleabihf-cxx03-llvm_version+18.asserts.tar.gz/sha512/8822c58df101c239221fead6fb523e677da04a065b42849a2e6ffff03dfd81e07f162a9bbdd29490ad9c0e0a33d362eec46608b9e6e42dfb4889da1c22191c91 +Clang.v18.1.7+3.armv6l-linux-musleabihf-cxx03-llvm_version+18.tar.gz/md5/901d2808599d5ac5ac7b5ca4bc39833d +Clang.v18.1.7+3.armv6l-linux-musleabihf-cxx03-llvm_version+18.tar.gz/sha512/820756cad00b1fe927801a253bd3077709c2b067ae79f9e1812f3cc9e85a0b7ac2ce1534031b7c6f7bda3364b7173c1c508e7c7d316920fb9bb901c16c1b18c7 +Clang.v18.1.7+3.armv6l-linux-musleabihf-cxx11-llvm_version+18.asserts.tar.gz/md5/d1f368604084e907c382aaf00efe452c +Clang.v18.1.7+3.armv6l-linux-musleabihf-cxx11-llvm_version+18.asserts.tar.gz/sha512/523b25f6b79e222eb65b5f4cd8f23b0d2c8b25b29af0df88efe45546ea57c7dabd88baef454fa0b76342d8d364739107271f25d3504380fdec5c9d225fcc2521 +Clang.v18.1.7+3.armv6l-linux-musleabihf-cxx11-llvm_version+18.tar.gz/md5/e57c116b2ad1cf32307eb4e600ac80be +Clang.v18.1.7+3.armv6l-linux-musleabihf-cxx11-llvm_version+18.tar.gz/sha512/63366b983c7aac9fe1246b25432b2200c8316f569f6930eb12de3c867f448ffccb8756d418f92eae7751d4c9ce6c42cee38237e429b81530819684fd5150c93a +Clang.v18.1.7+3.armv7l-linux-gnueabihf-cxx03-llvm_version+18.asserts.tar.gz/md5/645929ce42276db10ab79184a60cd6e3 +Clang.v18.1.7+3.armv7l-linux-gnueabihf-cxx03-llvm_version+18.asserts.tar.gz/sha512/65555ed26d9bd670b8363e5dad949822c2bf0e141a5418e1dc30c3f8a4733dd050620e40be2e7552c2551ecb30d4ef3e8f74cb240f1d441a9720a25f5a3bcaa7 +Clang.v18.1.7+3.armv7l-linux-gnueabihf-cxx03-llvm_version+18.tar.gz/md5/8424c6c6318dfa7bebeac33917b29453 +Clang.v18.1.7+3.armv7l-linux-gnueabihf-cxx03-llvm_version+18.tar.gz/sha512/6cf90c253f6b22358c2389a2347af2febd010117b22de0cc91ad713b8c8224627398004567c96b673650212eb5bd40bb97b9a637d46ddfeb3c72388d83445017 +Clang.v18.1.7+3.armv7l-linux-gnueabihf-cxx11-llvm_version+18.asserts.tar.gz/md5/ea8151dc1dc32befe579c7f9d7f13898 +Clang.v18.1.7+3.armv7l-linux-gnueabihf-cxx11-llvm_version+18.asserts.tar.gz/sha512/ed518423e9ec35afd7983471cf9ff1e971b840f637f34e0f62a1f6c7379ea59d4dafbeb9a311d39761733ecc98c0318ce3d8883298f8998e9c741441c7c9616b +Clang.v18.1.7+3.armv7l-linux-gnueabihf-cxx11-llvm_version+18.tar.gz/md5/70ed39b13bcb0435fee63bc30ae25a39 +Clang.v18.1.7+3.armv7l-linux-gnueabihf-cxx11-llvm_version+18.tar.gz/sha512/b2afa383346875514c62129c2991b3604c4fd3d507ecf4fc4244dec81d08b30218f5aa03dc4977185c2c9fb2d08848ddd373e448883ab472e5221ae5bf285c99 +Clang.v18.1.7+3.armv7l-linux-musleabihf-cxx03-llvm_version+18.asserts.tar.gz/md5/e6798835128f663f0c837aed4463e34b +Clang.v18.1.7+3.armv7l-linux-musleabihf-cxx03-llvm_version+18.asserts.tar.gz/sha512/c99856e16bd42ff967479e2c89690ea41268f1d1f868e2628482eafdfa53a0d69ed7c21ecc68ff0859eef07d9fe02f4844fad5f13df26cee6cea3a4254446096 +Clang.v18.1.7+3.armv7l-linux-musleabihf-cxx03-llvm_version+18.tar.gz/md5/92c1bd54b0474244e35c51952966a55b +Clang.v18.1.7+3.armv7l-linux-musleabihf-cxx03-llvm_version+18.tar.gz/sha512/2d7c3b60ba8b11cf903bc5ea720193852027cbe61ea0c8d6fac70be8f97691da3d36663aac6e61b68185dd83b42d09ad61dea973d9390271210d690295e4902c +Clang.v18.1.7+3.armv7l-linux-musleabihf-cxx11-llvm_version+18.asserts.tar.gz/md5/c495d594f8ce1f701d1bab54d0b60521 +Clang.v18.1.7+3.armv7l-linux-musleabihf-cxx11-llvm_version+18.asserts.tar.gz/sha512/0261bf45403daccf236723383341dc791e9cb3b291bde97812378d85aed785f083d5deea3bf806480a04ef1b972b00dccfd0537e43532a066c64733b817c3d77 +Clang.v18.1.7+3.armv7l-linux-musleabihf-cxx11-llvm_version+18.tar.gz/md5/41541de24d625271bdd5fad867b8eb0c +Clang.v18.1.7+3.armv7l-linux-musleabihf-cxx11-llvm_version+18.tar.gz/sha512/595226ad7ef75ab8ae03adb456b4ee9e884e9554c720b6c4ecbc38c75d446ddba7898be94630673074f09f40c6dc3e18fea9cee5a91b8b0e4727d20a180f670c +Clang.v18.1.7+3.i686-linux-gnu-cxx03-llvm_version+18.asserts.tar.gz/md5/8bd8ca0436611e78882939067f6277f7 +Clang.v18.1.7+3.i686-linux-gnu-cxx03-llvm_version+18.asserts.tar.gz/sha512/27c7b06e93fb0fb516b1b240e0df6c95e8bad6aea04d637ba065c6fafd087bfa94d9136afd39273c8d82d9c467395dcbd7b16f6a4b829acb0c0d4a5677676a5b +Clang.v18.1.7+3.i686-linux-gnu-cxx03-llvm_version+18.tar.gz/md5/424bfbd7b69ddf7b1199afaacde3e028 +Clang.v18.1.7+3.i686-linux-gnu-cxx03-llvm_version+18.tar.gz/sha512/9c48d949309aef6ee39371ff39a4f12c31bf3f25ddd288b317b2a17a803db73850cba2886598a1d10c4c154d511a4b79958d1acc012e92491a63f3925c522873 +Clang.v18.1.7+3.i686-linux-gnu-cxx11-llvm_version+18.asserts.tar.gz/md5/6b0b3e045ad64ecdc9848898f30d5f34 +Clang.v18.1.7+3.i686-linux-gnu-cxx11-llvm_version+18.asserts.tar.gz/sha512/6c0f4bdabbbc94fc9e1fedc138b0bce99d383e380ae7222fb70f5935f17701d549f6486956c8a21731061e4bf60bbc52794f6ce6858b4d2adb89bf80f88795c0 +Clang.v18.1.7+3.i686-linux-gnu-cxx11-llvm_version+18.tar.gz/md5/3b7a461ebf957756aeb2a2455b0a298c +Clang.v18.1.7+3.i686-linux-gnu-cxx11-llvm_version+18.tar.gz/sha512/74641a3636dd58c69415b19f0cb1de444215e22cfa9f0268fd549b5c53b206811d8beecdeb9692285613468d9a0569e836d225fb8361218438346914f6282839 +Clang.v18.1.7+3.i686-w64-mingw32-cxx03-llvm_version+18.asserts.tar.gz/md5/7533ca14f2932c35881ec05a5fb1e550 +Clang.v18.1.7+3.i686-w64-mingw32-cxx03-llvm_version+18.asserts.tar.gz/sha512/a1e55212b92c6b6dffc7e7b316c98e421e8384f65d4339455694c53643a3509b817d2ecb4e8dcd5f147dcf1be3920bcf82c1cb1732b23657bc7e36abb800d21e +Clang.v18.1.7+3.i686-w64-mingw32-cxx03-llvm_version+18.tar.gz/md5/5525f1e02315a128195cacb7f6cf7d44 +Clang.v18.1.7+3.i686-w64-mingw32-cxx03-llvm_version+18.tar.gz/sha512/9ee9fe4b1f52dc6533f177256e60b0579943e8bb5ba34118e5a02d25b6a4419133f3f819aae1e02d916cc17edd09330facdc6625d66564ad3cbd97ebfc439e32 +Clang.v18.1.7+3.i686-w64-mingw32-cxx11-llvm_version+18.asserts.tar.gz/md5/9f442a545e9c3fbb0898b7a233e5079f +Clang.v18.1.7+3.i686-w64-mingw32-cxx11-llvm_version+18.asserts.tar.gz/sha512/99cf06a5cda26001ed8d8bb4915a6a5993d4c9c5a7a038ccff99a3fa752f207b02095bdf1689f5cb9a2584a7e3ef26436b840896fe9a5b9b626980ebc7d85751 +Clang.v18.1.7+3.i686-w64-mingw32-cxx11-llvm_version+18.tar.gz/md5/9910ade7fdfc95ac2db3113fbfde42e0 +Clang.v18.1.7+3.i686-w64-mingw32-cxx11-llvm_version+18.tar.gz/sha512/6267f1b3dbbf7900bd72cd5700756e1e2c783157b87b1829af552f7dac36f749d9c7d2662235892105c959e1425914e944fbdd2f9521d2da7de321efe6c793a1 +Clang.v18.1.7+3.powerpc64le-linux-gnu-cxx03-llvm_version+18.asserts.tar.gz/md5/a6c7d64ede931fb19e066a1c191e2f6d +Clang.v18.1.7+3.powerpc64le-linux-gnu-cxx03-llvm_version+18.asserts.tar.gz/sha512/1a085a4ea1efb910f2b529f3c0e51be4a5e31debbefd00ceefeddc352b36bea6d0de5a06ea7d509098d16416b536ffed3da8485feefad7a2f11b1bc148a0c8c2 +Clang.v18.1.7+3.powerpc64le-linux-gnu-cxx03-llvm_version+18.tar.gz/md5/692af94ca3e5c3d229cbb459e266aadf +Clang.v18.1.7+3.powerpc64le-linux-gnu-cxx03-llvm_version+18.tar.gz/sha512/b27f05cfb0ada89cefc5a6f6527583b6b43d03525954d5b1ad1c807712efdb8750ea558a230b587a0c0d9e77c54d9f8978cc2f3884653808c7409eab1b32a055 +Clang.v18.1.7+3.powerpc64le-linux-gnu-cxx11-llvm_version+18.asserts.tar.gz/md5/3b59b6aa4b18b5dbbc632811f2ffa270 +Clang.v18.1.7+3.powerpc64le-linux-gnu-cxx11-llvm_version+18.asserts.tar.gz/sha512/f8c4b593f969c723ff1931c4875ed52497d83d74b94121890e10c9fcca5f6bddc5067555dee9949e61e426586ae3e568375fc44f318a07b70571ee34fdf7032c +Clang.v18.1.7+3.powerpc64le-linux-gnu-cxx11-llvm_version+18.tar.gz/md5/bc4be32ad57b13c3dabc80684a176ba7 +Clang.v18.1.7+3.powerpc64le-linux-gnu-cxx11-llvm_version+18.tar.gz/sha512/19a8346547b6c6adc2a9156e4b913b20137593752efa3648ad532b08de67cf015bba1eb023204755f48904c3381a3665c6c54fc8233c50e887a22ceebc652303 +Clang.v18.1.7+3.x86_64-apple-darwin-llvm_version+18.asserts.tar.gz/md5/13436ae410728f67c914fa7aed304736 +Clang.v18.1.7+3.x86_64-apple-darwin-llvm_version+18.asserts.tar.gz/sha512/3f83f1659580f4c5085b2da1c1a90581dcb3c45f5da1cf4d1801e230bb56fdb78a98cfe41b755949b34316ae08c55f5b2d558bb4026503ef2afa895b59dc861c +Clang.v18.1.7+3.x86_64-apple-darwin-llvm_version+18.tar.gz/md5/fa79485d88d173e15fb99b2f7fd793bc +Clang.v18.1.7+3.x86_64-apple-darwin-llvm_version+18.tar.gz/sha512/4886be75294979cdb55030747c664bd4cc2a2fa1489790d744e918a39fddcc5c214d4f39755d58206fd1bfd077774302b2be506ee80e4d0a2e2e2de642dbf124 +Clang.v18.1.7+3.x86_64-linux-gnu-cxx03-llvm_version+18.asserts.tar.gz/md5/4e5d1064d90f24d57d63f08b61baaab5 +Clang.v18.1.7+3.x86_64-linux-gnu-cxx03-llvm_version+18.asserts.tar.gz/sha512/cbfbe8b6f2be80e59b69d25d6af901ccb4807b12180208b69afa7223dd7d5249255265bc319c9402a1b0d1f0995940e3e72d7ecf1009f60d83021f8d35626a46 +Clang.v18.1.7+3.x86_64-linux-gnu-cxx03-llvm_version+18.tar.gz/md5/22fead15b4c45398ca869821d04ce015 +Clang.v18.1.7+3.x86_64-linux-gnu-cxx03-llvm_version+18.tar.gz/sha512/2ee7a7d3f293f7b63c89bbe3b541722c502a840883804ffe272848f4ac99b7a8ed350ebe92ec434dfdf03d1f4a5531c1367859f4a4603c98325abe5a0ad71177 +Clang.v18.1.7+3.x86_64-linux-gnu-cxx11-llvm_version+18.asserts.tar.gz/md5/46dd01b10377cc3d45c6a42cac0a07e5 +Clang.v18.1.7+3.x86_64-linux-gnu-cxx11-llvm_version+18.asserts.tar.gz/sha512/957677ce4251938d0c5e066448762b38a21bcce5ed424072ccd58085167d61b7e45a88fe32375f6bbd43dfb579b65a9afc09a886a650fc634a8fb9c81f27c9e3 +Clang.v18.1.7+3.x86_64-linux-gnu-cxx11-llvm_version+18.tar.gz/md5/bd9a61ea186a39162201341f0739fe84 +Clang.v18.1.7+3.x86_64-linux-gnu-cxx11-llvm_version+18.tar.gz/sha512/7a06d2a9ef20e88daa00d627d482ebbb6bf7223219d8b2a24aa60ac9eda24649d206b093d5bdb88b65c1e2b0d1ba0ad7dd927697e2bbac65bc9b42f9d14ad0d9 +Clang.v18.1.7+3.x86_64-linux-musl-cxx03-llvm_version+18.asserts.tar.gz/md5/60c98c6cc7d4446fb52b7585bc8709f3 +Clang.v18.1.7+3.x86_64-linux-musl-cxx03-llvm_version+18.asserts.tar.gz/sha512/4d55464b4499a45f774e1000a8b015326d114103a3d348fb263367e5506ca6659444ea6ee2767712903757e83939cd446aff6fe2351438b644f0057053422b58 +Clang.v18.1.7+3.x86_64-linux-musl-cxx03-llvm_version+18.tar.gz/md5/90a512d1881c4af1f1abfd5e90e37356 +Clang.v18.1.7+3.x86_64-linux-musl-cxx03-llvm_version+18.tar.gz/sha512/62d6d855aebd49f132d6470c7b0d5a0b965c6489b025046c1ea73fc53336030d6c5b4c867523a9206821f7fcf62fdb37ef0b7ff4b5eb04d07f40b65edd2c8e0f +Clang.v18.1.7+3.x86_64-linux-musl-cxx11-llvm_version+18.asserts.tar.gz/md5/c9eb9acb605d774db9636b82bf2e5f41 +Clang.v18.1.7+3.x86_64-linux-musl-cxx11-llvm_version+18.asserts.tar.gz/sha512/96e1440b3b0378edf8907d4cf779b1c53d63f6d00fa798efe1b6aaa289135aba8fd00a8d6f55d9678136e9e07d0c189293aec64f46e66788b938e1f8e1fc2199 +Clang.v18.1.7+3.x86_64-linux-musl-cxx11-llvm_version+18.tar.gz/md5/5837070450c81d44395468d8e3671dc7 +Clang.v18.1.7+3.x86_64-linux-musl-cxx11-llvm_version+18.tar.gz/sha512/0e8b674c0360f9586f03c7f5d0ffd5bc73dcde1e88eddf7d6360c1461adb8efffb104d8f454116a6a6cdc909973d0876745590b21009a9de56e12ce6e1c2e8fc +Clang.v18.1.7+3.x86_64-unknown-freebsd-llvm_version+18.asserts.tar.gz/md5/f94431ce7b8a12774925348a076e39e9 +Clang.v18.1.7+3.x86_64-unknown-freebsd-llvm_version+18.asserts.tar.gz/sha512/cdbcf5bd32a9fa4d5204e77f12d60b1fde540fc93243236f26896106d21f3b2106b0c3fcd93b1a7bbd6a9c4688200837f309b216ec9f334f8c8f28144b36d4ca +Clang.v18.1.7+3.x86_64-unknown-freebsd-llvm_version+18.tar.gz/md5/4ca4824a441d51cd4d1fe3516d7841fb +Clang.v18.1.7+3.x86_64-unknown-freebsd-llvm_version+18.tar.gz/sha512/ac0a046ede4b3c9bc75bbf7d1189e4679df6c35ca50e97fd6dadf437aba00816f66038db5dfddcfe2c49140c8416c79cfa4b67db371b4185ee897e0585b96301 +Clang.v18.1.7+3.x86_64-w64-mingw32-cxx03-llvm_version+18.asserts.tar.gz/md5/844031bd67137863f8e7dcd65aa6e45b +Clang.v18.1.7+3.x86_64-w64-mingw32-cxx03-llvm_version+18.asserts.tar.gz/sha512/56efe56f02f0d13e03ba029cc2ccf2aaf2d50479d8153b7922392ff90327e3cded2c1e7fc8cd799737cd988e64bb9c74f2c0ea6156a04fc08f22a4dbe6156cba +Clang.v18.1.7+3.x86_64-w64-mingw32-cxx03-llvm_version+18.tar.gz/md5/cc2705c3a856574835383aac7185ab32 +Clang.v18.1.7+3.x86_64-w64-mingw32-cxx03-llvm_version+18.tar.gz/sha512/eb037e996168d6d8987ff50c45e879f5e9779b044075f91cd8bbfe096260cd155b36f80bad840e88e1ab7970517e692875d5e84adc447153f167dfed886e0442 +Clang.v18.1.7+3.x86_64-w64-mingw32-cxx11-llvm_version+18.asserts.tar.gz/md5/2103b507b6aec55f8cb58a0c86aa461c +Clang.v18.1.7+3.x86_64-w64-mingw32-cxx11-llvm_version+18.asserts.tar.gz/sha512/d9a4d6eeec2aac1bc41a0be40526842e782d0796a306d3c1b5e53f7f146628ed974c8a4c4dce8baff5734d973966b4f3e1310be40b90ced9981ace4c4369a257 +Clang.v18.1.7+3.x86_64-w64-mingw32-cxx11-llvm_version+18.tar.gz/md5/daf3d83095fbad33bbb120314d6b53f7 +Clang.v18.1.7+3.x86_64-w64-mingw32-cxx11-llvm_version+18.tar.gz/sha512/e68a71d0d89d16e0c5c9182b8a3336c67179f37e247c8eef3f21e362a3258ff4815f258d2430ca3883a52a95bc26c8e2c42e3dd081f4998ed309813f3d0a4aa6 diff --git a/deps/checksums/compilersupportlibraries b/deps/checksums/compilersupportlibraries index 48843f21c0feb..a03ae8ee83f9a 100644 --- a/deps/checksums/compilersupportlibraries +++ b/deps/checksums/compilersupportlibraries @@ -1,92 +1,96 @@ -CompilerSupportLibraries.v1.1.1+0.aarch64-apple-darwin-libgfortran5.tar.gz/md5/20ebaad57850393b6ac9fa924e511fe4 -CompilerSupportLibraries.v1.1.1+0.aarch64-apple-darwin-libgfortran5.tar.gz/sha512/020de4d8b0ff6bedbadaa305ff8445e6849f12053762ea4aa68412d1ec763dbd86f479587a2fbb862487f1feb04d976c38099ddf3887817a3d32b3f029cf85b1 -CompilerSupportLibraries.v1.1.1+0.aarch64-linux-gnu-libgfortran3.tar.gz/md5/d641904255ee412c45b089d92c53262b -CompilerSupportLibraries.v1.1.1+0.aarch64-linux-gnu-libgfortran3.tar.gz/sha512/ace0383fe9bd64faeed1fb05a11bbec932bd56b8460d06d2b7c3e1b5f4f6e9a9b3345937088684e5cd1ca9a85ef1a5ff56a97a1f60449cd6e35247de1e123d81 -CompilerSupportLibraries.v1.1.1+0.aarch64-linux-gnu-libgfortran4.tar.gz/md5/2a71f320d8b9242ad26aabed74cbf404 -CompilerSupportLibraries.v1.1.1+0.aarch64-linux-gnu-libgfortran4.tar.gz/sha512/03e2a4482baaca2d6ce5cc207224d03bd7851486ebe8072c7317f5fcdd641395d945552d9462ab44a9f2e4b0ffaa3874a76f314d67bc0f75393a1151ab518611 -CompilerSupportLibraries.v1.1.1+0.aarch64-linux-gnu-libgfortran5.tar.gz/md5/1beec15ad689a5f572040ca2a7b6a880 -CompilerSupportLibraries.v1.1.1+0.aarch64-linux-gnu-libgfortran5.tar.gz/sha512/27bbe212a8d43e841cf8f3e9964b72bc220fea03cf5e65721b02d2f3aa5193acdce41e512578ed6be935b413cd0d2224a6bcd2e9624931f39092ba3cfc5cbcc0 -CompilerSupportLibraries.v1.1.1+0.aarch64-linux-musl-libgfortran3.tar.gz/md5/9e949c2efe48a7b2a62bff7e1ffdede0 -CompilerSupportLibraries.v1.1.1+0.aarch64-linux-musl-libgfortran3.tar.gz/sha512/2947acb250f8ff4936da5ed02ddbfa492fc38bc87baa588a36bb892ba68b6636a912cda976f8fff00cc7a710c3bfb185826b4cd4a726750ef5f161d5f1aa21a2 -CompilerSupportLibraries.v1.1.1+0.aarch64-linux-musl-libgfortran4.tar.gz/md5/7202764b1a89a748b07460d9c40a9279 -CompilerSupportLibraries.v1.1.1+0.aarch64-linux-musl-libgfortran4.tar.gz/sha512/63236225a9becdd166c4395ea5081c64f57bc51af89c2edb5abeb419d6eb8224a380a633afd861bb84a12435fd19c8554cbe5ffadf8324ff2c7f17021ed53e69 -CompilerSupportLibraries.v1.1.1+0.aarch64-linux-musl-libgfortran5.tar.gz/md5/f66c30d3cec8057ae47f05df022ead51 -CompilerSupportLibraries.v1.1.1+0.aarch64-linux-musl-libgfortran5.tar.gz/sha512/5329d9469bb0f47560e52b15eb21ab70e0e2da0275bdb2f8e6ed4feb132bc9989a6b44984329455104546c95d05a05f8fb4f1cf232856219ba005100f4b16dc3 -CompilerSupportLibraries.v1.1.1+0.armv6l-linux-gnueabihf-libgfortran3.tar.gz/md5/05ff63780f5b7c8c6c590c3626f32ac0 -CompilerSupportLibraries.v1.1.1+0.armv6l-linux-gnueabihf-libgfortran3.tar.gz/sha512/8d3c4149531f3782f5efbb6a6fbbb7080ba005298ba962b5bc5f66250ea9fde91b34836ed909c16f306d21d2e358f985360962e9362a8e807ccd4254da3bb19b -CompilerSupportLibraries.v1.1.1+0.armv6l-linux-gnueabihf-libgfortran4.tar.gz/md5/3ca2b6e8101d831e546c1b6ed2ca9a42 -CompilerSupportLibraries.v1.1.1+0.armv6l-linux-gnueabihf-libgfortran4.tar.gz/sha512/21a0b9c5acde96c0a91303f4f395e55f272d5585ad18f0365105188d129a3ca94ad66d4dd99b471abdf41a7a7262a3b258fd04b887110ad15255b284cd1612b0 -CompilerSupportLibraries.v1.1.1+0.armv6l-linux-gnueabihf-libgfortran5.tar.gz/md5/d4d560b8ecce0ff2cb4dbc88cb25942a -CompilerSupportLibraries.v1.1.1+0.armv6l-linux-gnueabihf-libgfortran5.tar.gz/sha512/d405f61525af1b2fe85107a70ed67b8a1eb767923487fa71539e0f49d6e70358c8a24f4ef1c224256cf677af99b54a2f8243f1e207350fcb14d426a7a6bb3915 -CompilerSupportLibraries.v1.1.1+0.armv6l-linux-musleabihf-libgfortran3.tar.gz/md5/8c6eddaa156fd0afee28ac5a154bc3f7 -CompilerSupportLibraries.v1.1.1+0.armv6l-linux-musleabihf-libgfortran3.tar.gz/sha512/b9fc86bb706ad98d61b63eb4cc8bfce6b2c67b58ba2cebecea7574f44790cce044bb1b4db1d20050b59538fa43b51cb352d752c77333a0f0621fde47c63a3596 -CompilerSupportLibraries.v1.1.1+0.armv6l-linux-musleabihf-libgfortran4.tar.gz/md5/0a54c16fea86c6dadb39eff65c465528 -CompilerSupportLibraries.v1.1.1+0.armv6l-linux-musleabihf-libgfortran4.tar.gz/sha512/c635c636384d3af5b4b078be7398fbc665a185eae69dd223279affb4836fb5c575d6ab296ae940ccbe73777bdb5e355f4f28a2fa27606ac143ff424641c60c65 -CompilerSupportLibraries.v1.1.1+0.armv6l-linux-musleabihf-libgfortran5.tar.gz/md5/892dfd91703f0f77d170a5371a1c25d4 -CompilerSupportLibraries.v1.1.1+0.armv6l-linux-musleabihf-libgfortran5.tar.gz/sha512/8ac59d00192c0e847168e61b3e93957f3909aab59ba8d05e47686a9f8b7226496f89b932151c42198ec966ccd47721cdf547a247ea4e5c61b22bfccce2ec591c -CompilerSupportLibraries.v1.1.1+0.armv7l-linux-gnueabihf-libgfortran3.tar.gz/md5/05ff63780f5b7c8c6c590c3626f32ac0 -CompilerSupportLibraries.v1.1.1+0.armv7l-linux-gnueabihf-libgfortran3.tar.gz/sha512/8d3c4149531f3782f5efbb6a6fbbb7080ba005298ba962b5bc5f66250ea9fde91b34836ed909c16f306d21d2e358f985360962e9362a8e807ccd4254da3bb19b -CompilerSupportLibraries.v1.1.1+0.armv7l-linux-gnueabihf-libgfortran4.tar.gz/md5/3ca2b6e8101d831e546c1b6ed2ca9a42 -CompilerSupportLibraries.v1.1.1+0.armv7l-linux-gnueabihf-libgfortran4.tar.gz/sha512/21a0b9c5acde96c0a91303f4f395e55f272d5585ad18f0365105188d129a3ca94ad66d4dd99b471abdf41a7a7262a3b258fd04b887110ad15255b284cd1612b0 -CompilerSupportLibraries.v1.1.1+0.armv7l-linux-gnueabihf-libgfortran5.tar.gz/md5/d4d560b8ecce0ff2cb4dbc88cb25942a -CompilerSupportLibraries.v1.1.1+0.armv7l-linux-gnueabihf-libgfortran5.tar.gz/sha512/d405f61525af1b2fe85107a70ed67b8a1eb767923487fa71539e0f49d6e70358c8a24f4ef1c224256cf677af99b54a2f8243f1e207350fcb14d426a7a6bb3915 -CompilerSupportLibraries.v1.1.1+0.armv7l-linux-musleabihf-libgfortran3.tar.gz/md5/8c6eddaa156fd0afee28ac5a154bc3f7 -CompilerSupportLibraries.v1.1.1+0.armv7l-linux-musleabihf-libgfortran3.tar.gz/sha512/b9fc86bb706ad98d61b63eb4cc8bfce6b2c67b58ba2cebecea7574f44790cce044bb1b4db1d20050b59538fa43b51cb352d752c77333a0f0621fde47c63a3596 -CompilerSupportLibraries.v1.1.1+0.armv7l-linux-musleabihf-libgfortran4.tar.gz/md5/0a54c16fea86c6dadb39eff65c465528 -CompilerSupportLibraries.v1.1.1+0.armv7l-linux-musleabihf-libgfortran4.tar.gz/sha512/c635c636384d3af5b4b078be7398fbc665a185eae69dd223279affb4836fb5c575d6ab296ae940ccbe73777bdb5e355f4f28a2fa27606ac143ff424641c60c65 -CompilerSupportLibraries.v1.1.1+0.armv7l-linux-musleabihf-libgfortran5.tar.gz/md5/892dfd91703f0f77d170a5371a1c25d4 -CompilerSupportLibraries.v1.1.1+0.armv7l-linux-musleabihf-libgfortran5.tar.gz/sha512/8ac59d00192c0e847168e61b3e93957f3909aab59ba8d05e47686a9f8b7226496f89b932151c42198ec966ccd47721cdf547a247ea4e5c61b22bfccce2ec591c -CompilerSupportLibraries.v1.1.1+0.i686-linux-gnu-libgfortran3.tar.gz/md5/3094705222b6b61fd6a10422a73e1149 -CompilerSupportLibraries.v1.1.1+0.i686-linux-gnu-libgfortran3.tar.gz/sha512/27f874cde357ffa45aaa10f2e620ec0f8ab4e5a8bf4607fc023a2ec42040bcc9a724f959237c340d67451f8621402fa05133c1420086b87135f40326c30b97af -CompilerSupportLibraries.v1.1.1+0.i686-linux-gnu-libgfortran4.tar.gz/md5/ba0acaff60648efa3915348a8a353df8 -CompilerSupportLibraries.v1.1.1+0.i686-linux-gnu-libgfortran4.tar.gz/sha512/0b6aaf75363cbe6133ca3aed351ab58ef1e441f61375f5baf702d8043813c459d48e8af17630f1a07dc22772ec9b02076af33726ed94e6314ae37d5a139d6dcc -CompilerSupportLibraries.v1.1.1+0.i686-linux-gnu-libgfortran5.tar.gz/md5/95f1d57cfc43677e40bfc121bce79274 -CompilerSupportLibraries.v1.1.1+0.i686-linux-gnu-libgfortran5.tar.gz/sha512/edacd9960e9de1236c91752e103cddfc018d697e87fabb3cceadf36153b4e97842ef284bd1532290a5620007234882b4c4cd4f36525b61763d97b2f608358262 -CompilerSupportLibraries.v1.1.1+0.i686-linux-musl-libgfortran3.tar.gz/md5/f37fe1818e1634476c44afae478611c8 -CompilerSupportLibraries.v1.1.1+0.i686-linux-musl-libgfortran3.tar.gz/sha512/6e4e3eb5ac9570bfdf5280f59167eb6c4a74f3aa152afb4c5d180b9a6cdbdca557e7dd13f0b5b76943b45a65e848fe77c5b3bbc6ddb0fd846d03fbc9fbedf7ce -CompilerSupportLibraries.v1.1.1+0.i686-linux-musl-libgfortran4.tar.gz/md5/b4ffd52179aa0006c56f279b87cb7556 -CompilerSupportLibraries.v1.1.1+0.i686-linux-musl-libgfortran4.tar.gz/sha512/a047ac7db204c31802f646351af51c55fe06498e851b2df58d7f93f75d9c0067f8736f247f108991ec01ac7f86f3026ecf58b5f2f3a76d7eab00130754e7f704 -CompilerSupportLibraries.v1.1.1+0.i686-linux-musl-libgfortran5.tar.gz/md5/2d38fc835f236f89f457fdf859ccb903 -CompilerSupportLibraries.v1.1.1+0.i686-linux-musl-libgfortran5.tar.gz/sha512/51fbe41efbce33b1cf3728df6fa59fd0e85a13308b3e868fe9f70f4d67857615f83542ba69be824a73e89959503dd7a11335d1c495704bd7d6cad6656d0c5d57 -CompilerSupportLibraries.v1.1.1+0.i686-w64-mingw32-libgfortran3.tar.gz/md5/9650002f6729c0964d33afcab334d77d -CompilerSupportLibraries.v1.1.1+0.i686-w64-mingw32-libgfortran3.tar.gz/sha512/0b7907811a13d09b7b33203c7e46888308c7d6fcf5d69790babafc39f640541551f784264247f159a552f15df1ddd061c421a93b983d838d3bd7f85ba6427f70 -CompilerSupportLibraries.v1.1.1+0.i686-w64-mingw32-libgfortran4.tar.gz/md5/47e9fb99906b9647e26e4126a913074e -CompilerSupportLibraries.v1.1.1+0.i686-w64-mingw32-libgfortran4.tar.gz/sha512/d7285691fbe1318e48e061d678e54890762cc16996652a34b190924cc1462d24ab0b08729945eb25f4bef60e60d50f3e78db57d4cda0302b8ba579db8a1311e1 -CompilerSupportLibraries.v1.1.1+0.i686-w64-mingw32-libgfortran5.tar.gz/md5/b588b2710f2b83d2c70c6104e585a3bd -CompilerSupportLibraries.v1.1.1+0.i686-w64-mingw32-libgfortran5.tar.gz/sha512/b62a63b0c8750f85fc265db88456307b794e912352a68997c7cce06444391307c03edbe5b901833f53c5bd55f5a1e61a586538b08487cc139a2d71fccdce1d31 -CompilerSupportLibraries.v1.1.1+0.powerpc64le-linux-gnu-libgfortran3.tar.gz/md5/7cce4f3dc057ebebaa677bf6f0d51e9e -CompilerSupportLibraries.v1.1.1+0.powerpc64le-linux-gnu-libgfortran3.tar.gz/sha512/a0dd93905f0ede4da5e2fbacf2579154db8ac8e9963c77fb62284489686f2aa372925b3341742d86430a839267421af55f6e1e413473d17f13a1a199e6a904a0 -CompilerSupportLibraries.v1.1.1+0.powerpc64le-linux-gnu-libgfortran4.tar.gz/md5/06ee6aaeca78b3e9005f53f1fa32731f -CompilerSupportLibraries.v1.1.1+0.powerpc64le-linux-gnu-libgfortran4.tar.gz/sha512/ff0e33ce9f93b3a867cf409b95e763efbc8f4dde65ed19107eb14d29460d084f253e03ebd6375f1da996182b3d96e1fda4abff06507258da9a89ece36663db84 -CompilerSupportLibraries.v1.1.1+0.powerpc64le-linux-gnu-libgfortran5.tar.gz/md5/483251d28076ee959dff131d13d7e53b -CompilerSupportLibraries.v1.1.1+0.powerpc64le-linux-gnu-libgfortran5.tar.gz/sha512/a7c9053a8c1b784cb6459762f26e0c2106a9758cbe2aefe8975a14aaaf61b8a08e51c465e733e44d01537beb59d467c57e536ebd8b27b7b68f46945174c469c7 -CompilerSupportLibraries.v1.1.1+0.x86_64-apple-darwin-libgfortran3.tar.gz/md5/a147bf3a6d6550c177b8a784b9b02e21 -CompilerSupportLibraries.v1.1.1+0.x86_64-apple-darwin-libgfortran3.tar.gz/sha512/c6f7a13f0195eae8f7ad980a4b24de9b155be69c4437522723411f9866a4aee3c5b350ee2f0c95f41f19aba43acaca78309881157e8498df0664c902d0c05a5d -CompilerSupportLibraries.v1.1.1+0.x86_64-apple-darwin-libgfortran4.tar.gz/md5/3f19c9d0e723a8d5591357ac3a9452a0 -CompilerSupportLibraries.v1.1.1+0.x86_64-apple-darwin-libgfortran4.tar.gz/sha512/5752bac310d80ed2dc1fc3d6580300d185787b9b933e31c8e0f572099abd0727d9483da8f9af858f706e96a183d2b10702c44381a080438cbb17d6459321ccfb -CompilerSupportLibraries.v1.1.1+0.x86_64-apple-darwin-libgfortran5.tar.gz/md5/ad0f0e2fe3e7d147a0a27271a2aba0fc -CompilerSupportLibraries.v1.1.1+0.x86_64-apple-darwin-libgfortran5.tar.gz/sha512/f42231adea3d0b6133c3b5bc5fbf765bc6a7ba8ef0f407fa1b8def36dd8a71d20ef39fb6e57b43208489c2795a96562cdbf15f3d20b3f3a09edb29b99d19a33a -CompilerSupportLibraries.v1.1.1+0.x86_64-linux-gnu-libgfortran3.tar.gz/md5/4c78d56dbbbff682c0a78d11fb9d1e70 -CompilerSupportLibraries.v1.1.1+0.x86_64-linux-gnu-libgfortran3.tar.gz/sha512/0e9d6dcc4b8fddaaa94a26a46e915d33fb474f8a8ee14edd4d1c7e774846c44c5c5d852649a4f70409c99ac0e1d458077b7f0eb7dc0b0326ee8b625644d7074d -CompilerSupportLibraries.v1.1.1+0.x86_64-linux-gnu-libgfortran4.tar.gz/md5/039d37f813b183c75feebadd21011eb6 -CompilerSupportLibraries.v1.1.1+0.x86_64-linux-gnu-libgfortran4.tar.gz/sha512/05e7291de1fd2520247402f0db9d348fdd7a02d8dd9133ac65701f88d237110a3cc6c6e2c5717364ab786b6e6063038ec10c9605e77bc4dbe1064a0e77617f5d -CompilerSupportLibraries.v1.1.1+0.x86_64-linux-gnu-libgfortran5.tar.gz/md5/a985f13a85eb14d1b6339ba4983dc372 -CompilerSupportLibraries.v1.1.1+0.x86_64-linux-gnu-libgfortran5.tar.gz/sha512/27468ccd5642e6e11bd5972684518a0fb883bf4835ac18f5279c3fce97b1779131c7d9e39d8de26a15c293c832946334e964919f51d7679cd0569ce82b938579 -CompilerSupportLibraries.v1.1.1+0.x86_64-linux-musl-libgfortran3.tar.gz/md5/9d86ce2fe481ea97a1fd098bd47d524c -CompilerSupportLibraries.v1.1.1+0.x86_64-linux-musl-libgfortran3.tar.gz/sha512/a865a4127bacaedd81b6c81279f6a44bc3497ab29a0401f66da1abfc0738ea459be9f158d06969c161a65925739665084bec5f8650a8cd1e8f0d08f1f44d729f -CompilerSupportLibraries.v1.1.1+0.x86_64-linux-musl-libgfortran4.tar.gz/md5/86d9db869a7af6c96dea39f5d9d90505 -CompilerSupportLibraries.v1.1.1+0.x86_64-linux-musl-libgfortran4.tar.gz/sha512/01e0c69b04138989200ded92eddae6ff1873d3a440d17273d08bee40d53b2929e35bfd14be051074fe78671cac34ac2dd7360c1571790ee52f94a5921de42a65 -CompilerSupportLibraries.v1.1.1+0.x86_64-linux-musl-libgfortran5.tar.gz/md5/e72d28df4bcb60ab2f3389046e7c83a8 -CompilerSupportLibraries.v1.1.1+0.x86_64-linux-musl-libgfortran5.tar.gz/sha512/cac193a26328ddeff5f7bcc3d7207101c574f9bdb1bff5c2b925315c5c2404a2fdb6591d1968f30931373fbfcae9bda784c72e65580ad3acc398448cd193f65d -CompilerSupportLibraries.v1.1.1+0.x86_64-unknown-freebsd-libgfortran3.tar.gz/md5/35642304a9a2f435cf5214b2715198fe -CompilerSupportLibraries.v1.1.1+0.x86_64-unknown-freebsd-libgfortran3.tar.gz/sha512/a67f41ba31c99a064f504f508711537f9e90089ca5352bfc2698c3fcd3e499ca716f07ffeac4fb1b88c2c934f7f380f262af8c863d3b16ac7e805d5c805ab358 -CompilerSupportLibraries.v1.1.1+0.x86_64-unknown-freebsd-libgfortran4.tar.gz/md5/01df0fbb265e5ff1a480a7a5e23b0835 -CompilerSupportLibraries.v1.1.1+0.x86_64-unknown-freebsd-libgfortran4.tar.gz/sha512/57a79f2b8e846c1514dcb18420f26ae2889962040f410b746836cab4395749155fa9cd9d00d4c25954c0ffa72f9f3823b1b50688a20ddf675301f64e0d4b5c7e -CompilerSupportLibraries.v1.1.1+0.x86_64-unknown-freebsd-libgfortran5.tar.gz/md5/1f1f6380ce8815cc9cedcea0b40860e7 -CompilerSupportLibraries.v1.1.1+0.x86_64-unknown-freebsd-libgfortran5.tar.gz/sha512/a88ea8af8c8df792861812bfdf7f1bcaae31582ab78ce78b47a0dc6fd57b93441c0471f529ce23877131ac9701c6eed72ce89241746e18271f3686fbd718138c -CompilerSupportLibraries.v1.1.1+0.x86_64-w64-mingw32-libgfortran3.tar.gz/md5/38fc8c445a1a610db40a7609155e22d6 -CompilerSupportLibraries.v1.1.1+0.x86_64-w64-mingw32-libgfortran3.tar.gz/sha512/085652c7ca583c3623611ca9262b70765c9936c9feb5f9034b2c6b6d6677a7a1d7d201b83d82d0d268f3190bd1a62eab0124e8fae3625407dee7f1df89d4106c -CompilerSupportLibraries.v1.1.1+0.x86_64-w64-mingw32-libgfortran4.tar.gz/md5/f3f89eb3c2e441fde6e6b9c1c1a61183 -CompilerSupportLibraries.v1.1.1+0.x86_64-w64-mingw32-libgfortran4.tar.gz/sha512/c53f79e20ad043ab099873f38ece98c6bed22950610ba88b9c178a4bd943039cc426473828d509deb8c65c93309da1de87bdf36fb3954b8f8047277c418fe2e0 -CompilerSupportLibraries.v1.1.1+0.x86_64-w64-mingw32-libgfortran5.tar.gz/md5/024f7133425db23e215dc55589bb9171 -CompilerSupportLibraries.v1.1.1+0.x86_64-w64-mingw32-libgfortran5.tar.gz/sha512/819945496ea48dd44d8c0f12a11a358b7d1ebf198d60fbad576d74ddee68cdea98070cdd11ca96567d0c772ec007c03cbc83ff5c7d2ad737cbd486fe0c9afcd5 +CompilerSupportLibraries.v1.2.0+0.aarch64-apple-darwin-libgfortran5.tar.gz/md5/20ebaad57850393b6ac9fa924e511fe4 +CompilerSupportLibraries.v1.2.0+0.aarch64-apple-darwin-libgfortran5.tar.gz/sha512/020de4d8b0ff6bedbadaa305ff8445e6849f12053762ea4aa68412d1ec763dbd86f479587a2fbb862487f1feb04d976c38099ddf3887817a3d32b3f029cf85b1 +CompilerSupportLibraries.v1.2.0+0.aarch64-linux-gnu-libgfortran3.tar.gz/md5/d641904255ee412c45b089d92c53262b +CompilerSupportLibraries.v1.2.0+0.aarch64-linux-gnu-libgfortran3.tar.gz/sha512/ace0383fe9bd64faeed1fb05a11bbec932bd56b8460d06d2b7c3e1b5f4f6e9a9b3345937088684e5cd1ca9a85ef1a5ff56a97a1f60449cd6e35247de1e123d81 +CompilerSupportLibraries.v1.2.0+0.aarch64-linux-gnu-libgfortran4.tar.gz/md5/2a71f320d8b9242ad26aabed74cbf404 +CompilerSupportLibraries.v1.2.0+0.aarch64-linux-gnu-libgfortran4.tar.gz/sha512/03e2a4482baaca2d6ce5cc207224d03bd7851486ebe8072c7317f5fcdd641395d945552d9462ab44a9f2e4b0ffaa3874a76f314d67bc0f75393a1151ab518611 +CompilerSupportLibraries.v1.2.0+0.aarch64-linux-gnu-libgfortran5.tar.gz/md5/1beec15ad689a5f572040ca2a7b6a880 +CompilerSupportLibraries.v1.2.0+0.aarch64-linux-gnu-libgfortran5.tar.gz/sha512/27bbe212a8d43e841cf8f3e9964b72bc220fea03cf5e65721b02d2f3aa5193acdce41e512578ed6be935b413cd0d2224a6bcd2e9624931f39092ba3cfc5cbcc0 +CompilerSupportLibraries.v1.2.0+0.aarch64-linux-musl-libgfortran3.tar.gz/md5/9e949c2efe48a7b2a62bff7e1ffdede0 +CompilerSupportLibraries.v1.2.0+0.aarch64-linux-musl-libgfortran3.tar.gz/sha512/2947acb250f8ff4936da5ed02ddbfa492fc38bc87baa588a36bb892ba68b6636a912cda976f8fff00cc7a710c3bfb185826b4cd4a726750ef5f161d5f1aa21a2 +CompilerSupportLibraries.v1.2.0+0.aarch64-linux-musl-libgfortran4.tar.gz/md5/7202764b1a89a748b07460d9c40a9279 +CompilerSupportLibraries.v1.2.0+0.aarch64-linux-musl-libgfortran4.tar.gz/sha512/63236225a9becdd166c4395ea5081c64f57bc51af89c2edb5abeb419d6eb8224a380a633afd861bb84a12435fd19c8554cbe5ffadf8324ff2c7f17021ed53e69 +CompilerSupportLibraries.v1.2.0+0.aarch64-linux-musl-libgfortran5.tar.gz/md5/f66c30d3cec8057ae47f05df022ead51 +CompilerSupportLibraries.v1.2.0+0.aarch64-linux-musl-libgfortran5.tar.gz/sha512/5329d9469bb0f47560e52b15eb21ab70e0e2da0275bdb2f8e6ed4feb132bc9989a6b44984329455104546c95d05a05f8fb4f1cf232856219ba005100f4b16dc3 +CompilerSupportLibraries.v1.2.0+0.aarch64-unknown-freebsd-libgfortran4.tar.gz/md5/1d8ae93fe000440d00c404ba5044f169 +CompilerSupportLibraries.v1.2.0+0.aarch64-unknown-freebsd-libgfortran4.tar.gz/sha512/6733bd456c389c7c2cd83c5e44aa575552aa7ab5549a5b3efefbc745a6129aa76d78bacb1441208fc77c58b36f1b0775aa3a44bb97e6769ff730744ecf5e8abc +CompilerSupportLibraries.v1.2.0+0.aarch64-unknown-freebsd-libgfortran5.tar.gz/md5/bf1a5a3320a0a38133f04861afab33b8 +CompilerSupportLibraries.v1.2.0+0.aarch64-unknown-freebsd-libgfortran5.tar.gz/sha512/221502795c075f64196dae687a35b83aa83a9a1ecf1ec3e9f51613bd7431c526015e412132a081e00ca13a5730d733330df79baad6fccc8758c17db9877e59dd +CompilerSupportLibraries.v1.2.0+0.armv6l-linux-gnueabihf-libgfortran3.tar.gz/md5/05ff63780f5b7c8c6c590c3626f32ac0 +CompilerSupportLibraries.v1.2.0+0.armv6l-linux-gnueabihf-libgfortran3.tar.gz/sha512/8d3c4149531f3782f5efbb6a6fbbb7080ba005298ba962b5bc5f66250ea9fde91b34836ed909c16f306d21d2e358f985360962e9362a8e807ccd4254da3bb19b +CompilerSupportLibraries.v1.2.0+0.armv6l-linux-gnueabihf-libgfortran4.tar.gz/md5/3ca2b6e8101d831e546c1b6ed2ca9a42 +CompilerSupportLibraries.v1.2.0+0.armv6l-linux-gnueabihf-libgfortran4.tar.gz/sha512/21a0b9c5acde96c0a91303f4f395e55f272d5585ad18f0365105188d129a3ca94ad66d4dd99b471abdf41a7a7262a3b258fd04b887110ad15255b284cd1612b0 +CompilerSupportLibraries.v1.2.0+0.armv6l-linux-gnueabihf-libgfortran5.tar.gz/md5/d4d560b8ecce0ff2cb4dbc88cb25942a +CompilerSupportLibraries.v1.2.0+0.armv6l-linux-gnueabihf-libgfortran5.tar.gz/sha512/d405f61525af1b2fe85107a70ed67b8a1eb767923487fa71539e0f49d6e70358c8a24f4ef1c224256cf677af99b54a2f8243f1e207350fcb14d426a7a6bb3915 +CompilerSupportLibraries.v1.2.0+0.armv6l-linux-musleabihf-libgfortran3.tar.gz/md5/8c6eddaa156fd0afee28ac5a154bc3f7 +CompilerSupportLibraries.v1.2.0+0.armv6l-linux-musleabihf-libgfortran3.tar.gz/sha512/b9fc86bb706ad98d61b63eb4cc8bfce6b2c67b58ba2cebecea7574f44790cce044bb1b4db1d20050b59538fa43b51cb352d752c77333a0f0621fde47c63a3596 +CompilerSupportLibraries.v1.2.0+0.armv6l-linux-musleabihf-libgfortran4.tar.gz/md5/0a54c16fea86c6dadb39eff65c465528 +CompilerSupportLibraries.v1.2.0+0.armv6l-linux-musleabihf-libgfortran4.tar.gz/sha512/c635c636384d3af5b4b078be7398fbc665a185eae69dd223279affb4836fb5c575d6ab296ae940ccbe73777bdb5e355f4f28a2fa27606ac143ff424641c60c65 +CompilerSupportLibraries.v1.2.0+0.armv6l-linux-musleabihf-libgfortran5.tar.gz/md5/892dfd91703f0f77d170a5371a1c25d4 +CompilerSupportLibraries.v1.2.0+0.armv6l-linux-musleabihf-libgfortran5.tar.gz/sha512/8ac59d00192c0e847168e61b3e93957f3909aab59ba8d05e47686a9f8b7226496f89b932151c42198ec966ccd47721cdf547a247ea4e5c61b22bfccce2ec591c +CompilerSupportLibraries.v1.2.0+0.armv7l-linux-gnueabihf-libgfortran3.tar.gz/md5/05ff63780f5b7c8c6c590c3626f32ac0 +CompilerSupportLibraries.v1.2.0+0.armv7l-linux-gnueabihf-libgfortran3.tar.gz/sha512/8d3c4149531f3782f5efbb6a6fbbb7080ba005298ba962b5bc5f66250ea9fde91b34836ed909c16f306d21d2e358f985360962e9362a8e807ccd4254da3bb19b +CompilerSupportLibraries.v1.2.0+0.armv7l-linux-gnueabihf-libgfortran4.tar.gz/md5/3ca2b6e8101d831e546c1b6ed2ca9a42 +CompilerSupportLibraries.v1.2.0+0.armv7l-linux-gnueabihf-libgfortran4.tar.gz/sha512/21a0b9c5acde96c0a91303f4f395e55f272d5585ad18f0365105188d129a3ca94ad66d4dd99b471abdf41a7a7262a3b258fd04b887110ad15255b284cd1612b0 +CompilerSupportLibraries.v1.2.0+0.armv7l-linux-gnueabihf-libgfortran5.tar.gz/md5/d4d560b8ecce0ff2cb4dbc88cb25942a +CompilerSupportLibraries.v1.2.0+0.armv7l-linux-gnueabihf-libgfortran5.tar.gz/sha512/d405f61525af1b2fe85107a70ed67b8a1eb767923487fa71539e0f49d6e70358c8a24f4ef1c224256cf677af99b54a2f8243f1e207350fcb14d426a7a6bb3915 +CompilerSupportLibraries.v1.2.0+0.armv7l-linux-musleabihf-libgfortran3.tar.gz/md5/8c6eddaa156fd0afee28ac5a154bc3f7 +CompilerSupportLibraries.v1.2.0+0.armv7l-linux-musleabihf-libgfortran3.tar.gz/sha512/b9fc86bb706ad98d61b63eb4cc8bfce6b2c67b58ba2cebecea7574f44790cce044bb1b4db1d20050b59538fa43b51cb352d752c77333a0f0621fde47c63a3596 +CompilerSupportLibraries.v1.2.0+0.armv7l-linux-musleabihf-libgfortran4.tar.gz/md5/0a54c16fea86c6dadb39eff65c465528 +CompilerSupportLibraries.v1.2.0+0.armv7l-linux-musleabihf-libgfortran4.tar.gz/sha512/c635c636384d3af5b4b078be7398fbc665a185eae69dd223279affb4836fb5c575d6ab296ae940ccbe73777bdb5e355f4f28a2fa27606ac143ff424641c60c65 +CompilerSupportLibraries.v1.2.0+0.armv7l-linux-musleabihf-libgfortran5.tar.gz/md5/892dfd91703f0f77d170a5371a1c25d4 +CompilerSupportLibraries.v1.2.0+0.armv7l-linux-musleabihf-libgfortran5.tar.gz/sha512/8ac59d00192c0e847168e61b3e93957f3909aab59ba8d05e47686a9f8b7226496f89b932151c42198ec966ccd47721cdf547a247ea4e5c61b22bfccce2ec591c +CompilerSupportLibraries.v1.2.0+0.i686-linux-gnu-libgfortran3.tar.gz/md5/3094705222b6b61fd6a10422a73e1149 +CompilerSupportLibraries.v1.2.0+0.i686-linux-gnu-libgfortran3.tar.gz/sha512/27f874cde357ffa45aaa10f2e620ec0f8ab4e5a8bf4607fc023a2ec42040bcc9a724f959237c340d67451f8621402fa05133c1420086b87135f40326c30b97af +CompilerSupportLibraries.v1.2.0+0.i686-linux-gnu-libgfortran4.tar.gz/md5/ba0acaff60648efa3915348a8a353df8 +CompilerSupportLibraries.v1.2.0+0.i686-linux-gnu-libgfortran4.tar.gz/sha512/0b6aaf75363cbe6133ca3aed351ab58ef1e441f61375f5baf702d8043813c459d48e8af17630f1a07dc22772ec9b02076af33726ed94e6314ae37d5a139d6dcc +CompilerSupportLibraries.v1.2.0+0.i686-linux-gnu-libgfortran5.tar.gz/md5/95f1d57cfc43677e40bfc121bce79274 +CompilerSupportLibraries.v1.2.0+0.i686-linux-gnu-libgfortran5.tar.gz/sha512/edacd9960e9de1236c91752e103cddfc018d697e87fabb3cceadf36153b4e97842ef284bd1532290a5620007234882b4c4cd4f36525b61763d97b2f608358262 +CompilerSupportLibraries.v1.2.0+0.i686-linux-musl-libgfortran3.tar.gz/md5/f37fe1818e1634476c44afae478611c8 +CompilerSupportLibraries.v1.2.0+0.i686-linux-musl-libgfortran3.tar.gz/sha512/6e4e3eb5ac9570bfdf5280f59167eb6c4a74f3aa152afb4c5d180b9a6cdbdca557e7dd13f0b5b76943b45a65e848fe77c5b3bbc6ddb0fd846d03fbc9fbedf7ce +CompilerSupportLibraries.v1.2.0+0.i686-linux-musl-libgfortran4.tar.gz/md5/b4ffd52179aa0006c56f279b87cb7556 +CompilerSupportLibraries.v1.2.0+0.i686-linux-musl-libgfortran4.tar.gz/sha512/a047ac7db204c31802f646351af51c55fe06498e851b2df58d7f93f75d9c0067f8736f247f108991ec01ac7f86f3026ecf58b5f2f3a76d7eab00130754e7f704 +CompilerSupportLibraries.v1.2.0+0.i686-linux-musl-libgfortran5.tar.gz/md5/2d38fc835f236f89f457fdf859ccb903 +CompilerSupportLibraries.v1.2.0+0.i686-linux-musl-libgfortran5.tar.gz/sha512/51fbe41efbce33b1cf3728df6fa59fd0e85a13308b3e868fe9f70f4d67857615f83542ba69be824a73e89959503dd7a11335d1c495704bd7d6cad6656d0c5d57 +CompilerSupportLibraries.v1.2.0+0.i686-w64-mingw32-libgfortran3.tar.gz/md5/9650002f6729c0964d33afcab334d77d +CompilerSupportLibraries.v1.2.0+0.i686-w64-mingw32-libgfortran3.tar.gz/sha512/0b7907811a13d09b7b33203c7e46888308c7d6fcf5d69790babafc39f640541551f784264247f159a552f15df1ddd061c421a93b983d838d3bd7f85ba6427f70 +CompilerSupportLibraries.v1.2.0+0.i686-w64-mingw32-libgfortran4.tar.gz/md5/47e9fb99906b9647e26e4126a913074e +CompilerSupportLibraries.v1.2.0+0.i686-w64-mingw32-libgfortran4.tar.gz/sha512/d7285691fbe1318e48e061d678e54890762cc16996652a34b190924cc1462d24ab0b08729945eb25f4bef60e60d50f3e78db57d4cda0302b8ba579db8a1311e1 +CompilerSupportLibraries.v1.2.0+0.i686-w64-mingw32-libgfortran5.tar.gz/md5/b588b2710f2b83d2c70c6104e585a3bd +CompilerSupportLibraries.v1.2.0+0.i686-w64-mingw32-libgfortran5.tar.gz/sha512/b62a63b0c8750f85fc265db88456307b794e912352a68997c7cce06444391307c03edbe5b901833f53c5bd55f5a1e61a586538b08487cc139a2d71fccdce1d31 +CompilerSupportLibraries.v1.2.0+0.powerpc64le-linux-gnu-libgfortran3.tar.gz/md5/7cce4f3dc057ebebaa677bf6f0d51e9e +CompilerSupportLibraries.v1.2.0+0.powerpc64le-linux-gnu-libgfortran3.tar.gz/sha512/a0dd93905f0ede4da5e2fbacf2579154db8ac8e9963c77fb62284489686f2aa372925b3341742d86430a839267421af55f6e1e413473d17f13a1a199e6a904a0 +CompilerSupportLibraries.v1.2.0+0.powerpc64le-linux-gnu-libgfortran4.tar.gz/md5/06ee6aaeca78b3e9005f53f1fa32731f +CompilerSupportLibraries.v1.2.0+0.powerpc64le-linux-gnu-libgfortran4.tar.gz/sha512/ff0e33ce9f93b3a867cf409b95e763efbc8f4dde65ed19107eb14d29460d084f253e03ebd6375f1da996182b3d96e1fda4abff06507258da9a89ece36663db84 +CompilerSupportLibraries.v1.2.0+0.powerpc64le-linux-gnu-libgfortran5.tar.gz/md5/483251d28076ee959dff131d13d7e53b +CompilerSupportLibraries.v1.2.0+0.powerpc64le-linux-gnu-libgfortran5.tar.gz/sha512/a7c9053a8c1b784cb6459762f26e0c2106a9758cbe2aefe8975a14aaaf61b8a08e51c465e733e44d01537beb59d467c57e536ebd8b27b7b68f46945174c469c7 +CompilerSupportLibraries.v1.2.0+0.x86_64-apple-darwin-libgfortran3.tar.gz/md5/a147bf3a6d6550c177b8a784b9b02e21 +CompilerSupportLibraries.v1.2.0+0.x86_64-apple-darwin-libgfortran3.tar.gz/sha512/c6f7a13f0195eae8f7ad980a4b24de9b155be69c4437522723411f9866a4aee3c5b350ee2f0c95f41f19aba43acaca78309881157e8498df0664c902d0c05a5d +CompilerSupportLibraries.v1.2.0+0.x86_64-apple-darwin-libgfortran4.tar.gz/md5/3f19c9d0e723a8d5591357ac3a9452a0 +CompilerSupportLibraries.v1.2.0+0.x86_64-apple-darwin-libgfortran4.tar.gz/sha512/5752bac310d80ed2dc1fc3d6580300d185787b9b933e31c8e0f572099abd0727d9483da8f9af858f706e96a183d2b10702c44381a080438cbb17d6459321ccfb +CompilerSupportLibraries.v1.2.0+0.x86_64-apple-darwin-libgfortran5.tar.gz/md5/ad0f0e2fe3e7d147a0a27271a2aba0fc +CompilerSupportLibraries.v1.2.0+0.x86_64-apple-darwin-libgfortran5.tar.gz/sha512/f42231adea3d0b6133c3b5bc5fbf765bc6a7ba8ef0f407fa1b8def36dd8a71d20ef39fb6e57b43208489c2795a96562cdbf15f3d20b3f3a09edb29b99d19a33a +CompilerSupportLibraries.v1.2.0+0.x86_64-linux-gnu-libgfortran3.tar.gz/md5/4c78d56dbbbff682c0a78d11fb9d1e70 +CompilerSupportLibraries.v1.2.0+0.x86_64-linux-gnu-libgfortran3.tar.gz/sha512/0e9d6dcc4b8fddaaa94a26a46e915d33fb474f8a8ee14edd4d1c7e774846c44c5c5d852649a4f70409c99ac0e1d458077b7f0eb7dc0b0326ee8b625644d7074d +CompilerSupportLibraries.v1.2.0+0.x86_64-linux-gnu-libgfortran4.tar.gz/md5/039d37f813b183c75feebadd21011eb6 +CompilerSupportLibraries.v1.2.0+0.x86_64-linux-gnu-libgfortran4.tar.gz/sha512/05e7291de1fd2520247402f0db9d348fdd7a02d8dd9133ac65701f88d237110a3cc6c6e2c5717364ab786b6e6063038ec10c9605e77bc4dbe1064a0e77617f5d +CompilerSupportLibraries.v1.2.0+0.x86_64-linux-gnu-libgfortran5.tar.gz/md5/a985f13a85eb14d1b6339ba4983dc372 +CompilerSupportLibraries.v1.2.0+0.x86_64-linux-gnu-libgfortran5.tar.gz/sha512/27468ccd5642e6e11bd5972684518a0fb883bf4835ac18f5279c3fce97b1779131c7d9e39d8de26a15c293c832946334e964919f51d7679cd0569ce82b938579 +CompilerSupportLibraries.v1.2.0+0.x86_64-linux-musl-libgfortran3.tar.gz/md5/9d86ce2fe481ea97a1fd098bd47d524c +CompilerSupportLibraries.v1.2.0+0.x86_64-linux-musl-libgfortran3.tar.gz/sha512/a865a4127bacaedd81b6c81279f6a44bc3497ab29a0401f66da1abfc0738ea459be9f158d06969c161a65925739665084bec5f8650a8cd1e8f0d08f1f44d729f +CompilerSupportLibraries.v1.2.0+0.x86_64-linux-musl-libgfortran4.tar.gz/md5/86d9db869a7af6c96dea39f5d9d90505 +CompilerSupportLibraries.v1.2.0+0.x86_64-linux-musl-libgfortran4.tar.gz/sha512/01e0c69b04138989200ded92eddae6ff1873d3a440d17273d08bee40d53b2929e35bfd14be051074fe78671cac34ac2dd7360c1571790ee52f94a5921de42a65 +CompilerSupportLibraries.v1.2.0+0.x86_64-linux-musl-libgfortran5.tar.gz/md5/e72d28df4bcb60ab2f3389046e7c83a8 +CompilerSupportLibraries.v1.2.0+0.x86_64-linux-musl-libgfortran5.tar.gz/sha512/cac193a26328ddeff5f7bcc3d7207101c574f9bdb1bff5c2b925315c5c2404a2fdb6591d1968f30931373fbfcae9bda784c72e65580ad3acc398448cd193f65d +CompilerSupportLibraries.v1.2.0+0.x86_64-unknown-freebsd-libgfortran3.tar.gz/md5/35642304a9a2f435cf5214b2715198fe +CompilerSupportLibraries.v1.2.0+0.x86_64-unknown-freebsd-libgfortran3.tar.gz/sha512/a67f41ba31c99a064f504f508711537f9e90089ca5352bfc2698c3fcd3e499ca716f07ffeac4fb1b88c2c934f7f380f262af8c863d3b16ac7e805d5c805ab358 +CompilerSupportLibraries.v1.2.0+0.x86_64-unknown-freebsd-libgfortran4.tar.gz/md5/01df0fbb265e5ff1a480a7a5e23b0835 +CompilerSupportLibraries.v1.2.0+0.x86_64-unknown-freebsd-libgfortran4.tar.gz/sha512/57a79f2b8e846c1514dcb18420f26ae2889962040f410b746836cab4395749155fa9cd9d00d4c25954c0ffa72f9f3823b1b50688a20ddf675301f64e0d4b5c7e +CompilerSupportLibraries.v1.2.0+0.x86_64-unknown-freebsd-libgfortran5.tar.gz/md5/1f1f6380ce8815cc9cedcea0b40860e7 +CompilerSupportLibraries.v1.2.0+0.x86_64-unknown-freebsd-libgfortran5.tar.gz/sha512/a88ea8af8c8df792861812bfdf7f1bcaae31582ab78ce78b47a0dc6fd57b93441c0471f529ce23877131ac9701c6eed72ce89241746e18271f3686fbd718138c +CompilerSupportLibraries.v1.2.0+0.x86_64-w64-mingw32-libgfortran3.tar.gz/md5/38fc8c445a1a610db40a7609155e22d6 +CompilerSupportLibraries.v1.2.0+0.x86_64-w64-mingw32-libgfortran3.tar.gz/sha512/085652c7ca583c3623611ca9262b70765c9936c9feb5f9034b2c6b6d6677a7a1d7d201b83d82d0d268f3190bd1a62eab0124e8fae3625407dee7f1df89d4106c +CompilerSupportLibraries.v1.2.0+0.x86_64-w64-mingw32-libgfortran4.tar.gz/md5/f3f89eb3c2e441fde6e6b9c1c1a61183 +CompilerSupportLibraries.v1.2.0+0.x86_64-w64-mingw32-libgfortran4.tar.gz/sha512/c53f79e20ad043ab099873f38ece98c6bed22950610ba88b9c178a4bd943039cc426473828d509deb8c65c93309da1de87bdf36fb3954b8f8047277c418fe2e0 +CompilerSupportLibraries.v1.2.0+0.x86_64-w64-mingw32-libgfortran5.tar.gz/md5/024f7133425db23e215dc55589bb9171 +CompilerSupportLibraries.v1.2.0+0.x86_64-w64-mingw32-libgfortran5.tar.gz/sha512/819945496ea48dd44d8c0f12a11a358b7d1ebf198d60fbad576d74ddee68cdea98070cdd11ca96567d0c772ec007c03cbc83ff5c7d2ad737cbd486fe0c9afcd5 diff --git a/deps/checksums/dsfmt b/deps/checksums/dsfmt index 0666e51efa994..99ba378adcd4c 100644 --- a/deps/checksums/dsfmt +++ b/deps/checksums/dsfmt @@ -1,34 +1,36 @@ -dSFMT.v2.2.5+0.aarch64-apple-darwin.tar.gz/md5/36284767f523bb297633d7da17a7db5a -dSFMT.v2.2.5+0.aarch64-apple-darwin.tar.gz/sha512/e6434c154db4c7187f227a550b159a8db8cfffc514323ca31112744a80a007ba5c95f2274cee30c0aa8caf1b20fb643cb814651a622b8e4bb2e5652878e504d2 -dSFMT.v2.2.5+0.aarch64-linux-gnu.tar.gz/md5/260e14855dbc7773a2ca906d58cc57f2 -dSFMT.v2.2.5+0.aarch64-linux-gnu.tar.gz/sha512/820ca4c6afde931e855b74015150f4ffbb513276c3fa7dbcc1ec8d34c02d4989fb7424a6e4f81f93d054811b5f54f8633d955b05acdb088387ee90f1c3b00915 -dSFMT.v2.2.5+0.aarch64-linux-musl.tar.gz/md5/7ddccbad6b5c9de4be187fe76637a0d8 -dSFMT.v2.2.5+0.aarch64-linux-musl.tar.gz/sha512/e3c225da00927096e3a6cd4abc681fba8f469cb74828e7054d4f5684d71dcb8e75c9a81f14fa10bfbb78f62f9567a31a92edcca8d797e5810a2a44a3fc17bc84 -dSFMT.v2.2.5+0.armv6l-linux-gnueabihf.tar.gz/md5/a70329e0a6c57009c6b6950fd34089f6 -dSFMT.v2.2.5+0.armv6l-linux-gnueabihf.tar.gz/sha512/4418c42165660adc050e872ef834f920c89ed6a0d2b816821672b1e862e947aad7efd023289da9bf05bb2eb9ec4b9d2561c403e2d5384d5314a4ba016b1f9cfc -dSFMT.v2.2.5+0.armv6l-linux-musleabihf.tar.gz/md5/6ffc798b8a0c847fa5cb93640bd66ab3 -dSFMT.v2.2.5+0.armv6l-linux-musleabihf.tar.gz/sha512/94e5ae07d0b1420abd7290519bce6f77deae634bbb4df31e3f02416bf509e555a9b1c9d19dd77ca76a308c2b86d5c9d4718b9ef83c13167b88a8181d8ca7e73a -dSFMT.v2.2.5+0.armv7l-linux-gnueabihf.tar.gz/md5/660d95aa08580ca1716a89c4d8b1eb24 -dSFMT.v2.2.5+0.armv7l-linux-gnueabihf.tar.gz/sha512/bc757a9f805047be5375f92c10a3f3eab69345a4ec5cc997f763e66be36144a74d414ff926df8e17b9d5a2394189269c3188c55e0b7c75a72495394d65510cef -dSFMT.v2.2.5+0.armv7l-linux-musleabihf.tar.gz/md5/78c487049092fe61949d506637c713bb -dSFMT.v2.2.5+0.armv7l-linux-musleabihf.tar.gz/sha512/03ddada4478f05eab7d2971b2deaf2cba91f084d7ce66fc8219bcb3cf5c308ea13959fed95568ca80f4ce11794e197092984919265716de8f2558e2cb30d94ce -dSFMT.v2.2.5+0.i686-linux-gnu.tar.gz/md5/11463fd3981a8c143d7aed691d18d4e0 -dSFMT.v2.2.5+0.i686-linux-gnu.tar.gz/sha512/db946a4fbd8a3163b8b1c25e02bfc4a841da7d2532892a99037bd48ac98e1840691e8cc0127d9457a82667a0131e4826cb4e9d0a13f127afc62da4eb68af5a3e -dSFMT.v2.2.5+0.i686-linux-musl.tar.gz/md5/a61405f72c9a3bba5718f078c68e61a5 -dSFMT.v2.2.5+0.i686-linux-musl.tar.gz/sha512/726f130bbbfd0dece4185b89a25a73f3b5b950ebfb7f86aea6e9cbcf9ae932e591d20b854de0b4985103dbf8b4b7cb3560661c5070af971cd2c1f3ec3e1ea7d2 -dSFMT.v2.2.5+0.i686-w64-mingw32.tar.gz/md5/3bc27ef8f26c7a26f096cf1d558d408d -dSFMT.v2.2.5+0.i686-w64-mingw32.tar.gz/sha512/ea3608d3ae3874ea57a1a08f69abe2a1638bc340db71c6fe3c4fd5637d8c54943bf16b099a46817387c1ed4cb5f3cd1c0ff19ae8a4ed85dd555555821af06374 -dSFMT.v2.2.5+0.powerpc64le-linux-gnu.tar.gz/md5/fd8c73961ef7c82201e6d86e8bf4324c -dSFMT.v2.2.5+0.powerpc64le-linux-gnu.tar.gz/sha512/1bd0ebd019cfc6f25f7ba007547c5ee297854655b93c55e90d8ead420875de5a087e38956693d5e901ff2abf667c72aa66fb34f587b82adf4b91b3d5d666b5c7 -dSFMT.v2.2.5+0.x86_64-apple-darwin.tar.gz/md5/6be9f2d3cd8d45a3fc1c3feeebcbbf00 -dSFMT.v2.2.5+0.x86_64-apple-darwin.tar.gz/sha512/5d17c2c0eedad6739b41b8a613e9e452df484136ecd11aed1f6b9f426ae1deaef9faf721810080ebc1701a88a3e7ae91b1992893598c33b342c3f876661f2f8e -dSFMT.v2.2.5+0.x86_64-linux-gnu.tar.gz/md5/fa671f4ca14b171d53c8866d03f9162a -dSFMT.v2.2.5+0.x86_64-linux-gnu.tar.gz/sha512/2e242a1448da0508ea88cc1a106f1e74f8d7e7562cd82b80d86abf9a8b454653ad7612e25c30ce00c23757e8a5b7b5736253b00a52f9473af6c5d4df768138f2 -dSFMT.v2.2.5+0.x86_64-linux-musl.tar.gz/md5/c648294163882ec539ab646542c74880 -dSFMT.v2.2.5+0.x86_64-linux-musl.tar.gz/sha512/9e96a47d660854b6517364f0db40a2f4e0e3b814499a0349f7cf550b1c8d04589fca5eb4a75bf34f36d1b5d1b2277b3e9a961c887092abedd08f438e025329e7 -dSFMT.v2.2.5+0.x86_64-unknown-freebsd.tar.gz/md5/5b53e6c5b78102f563742b4b3d888ec6 -dSFMT.v2.2.5+0.x86_64-unknown-freebsd.tar.gz/sha512/5db5902c7ec2624add768b9e2866f9aac224a31bcb4114d450c45717e2b244521b7c511c059527d557a71639cff98e190a38cd3e28db5be0b1faf0a1762cb1a5 -dSFMT.v2.2.5+0.x86_64-w64-mingw32.tar.gz/md5/386adb3b7593c222dc7a1060a1356b21 -dSFMT.v2.2.5+0.x86_64-w64-mingw32.tar.gz/sha512/fe2ab5021126807b37042e89a22ef9a869c6a0a028680df445773b2affd11c2b02148be07d53504ea3842bb38bb62fe039529688266c1cba3545a892bd4dc185 +dSFMT.v2.2.5+1.aarch64-apple-darwin.tar.gz/md5/1ac287cb891e0bb758e5ae1195e661b7 +dSFMT.v2.2.5+1.aarch64-apple-darwin.tar.gz/sha512/c604d55fb955e9d707e26b654670f07f18ddd0dc93c1a2b678b9cea9b84a24e21c88eb49d39e3e74c930cdffa35e45f5a63e96ecb0a098e8ea538438dc7281bd +dSFMT.v2.2.5+1.aarch64-linux-gnu.tar.gz/md5/260e14855dbc7773a2ca906d58cc57f2 +dSFMT.v2.2.5+1.aarch64-linux-gnu.tar.gz/sha512/820ca4c6afde931e855b74015150f4ffbb513276c3fa7dbcc1ec8d34c02d4989fb7424a6e4f81f93d054811b5f54f8633d955b05acdb088387ee90f1c3b00915 +dSFMT.v2.2.5+1.aarch64-linux-musl.tar.gz/md5/7ddccbad6b5c9de4be187fe76637a0d8 +dSFMT.v2.2.5+1.aarch64-linux-musl.tar.gz/sha512/e3c225da00927096e3a6cd4abc681fba8f469cb74828e7054d4f5684d71dcb8e75c9a81f14fa10bfbb78f62f9567a31a92edcca8d797e5810a2a44a3fc17bc84 +dSFMT.v2.2.5+1.aarch64-unknown-freebsd.tar.gz/md5/84f560104ab5eac8f214559645235350 +dSFMT.v2.2.5+1.aarch64-unknown-freebsd.tar.gz/sha512/3668a37d2516c304b296e2dd7b93a45decb37774088b03438b6d7dec71766d98b2ca1d61c1b317f86ca118d078f53817b6bc86f0ed487185e18b5cc786060592 +dSFMT.v2.2.5+1.armv6l-linux-gnueabihf.tar.gz/md5/a70329e0a6c57009c6b6950fd34089f6 +dSFMT.v2.2.5+1.armv6l-linux-gnueabihf.tar.gz/sha512/4418c42165660adc050e872ef834f920c89ed6a0d2b816821672b1e862e947aad7efd023289da9bf05bb2eb9ec4b9d2561c403e2d5384d5314a4ba016b1f9cfc +dSFMT.v2.2.5+1.armv6l-linux-musleabihf.tar.gz/md5/6ffc798b8a0c847fa5cb93640bd66ab3 +dSFMT.v2.2.5+1.armv6l-linux-musleabihf.tar.gz/sha512/94e5ae07d0b1420abd7290519bce6f77deae634bbb4df31e3f02416bf509e555a9b1c9d19dd77ca76a308c2b86d5c9d4718b9ef83c13167b88a8181d8ca7e73a +dSFMT.v2.2.5+1.armv7l-linux-gnueabihf.tar.gz/md5/660d95aa08580ca1716a89c4d8b1eb24 +dSFMT.v2.2.5+1.armv7l-linux-gnueabihf.tar.gz/sha512/bc757a9f805047be5375f92c10a3f3eab69345a4ec5cc997f763e66be36144a74d414ff926df8e17b9d5a2394189269c3188c55e0b7c75a72495394d65510cef +dSFMT.v2.2.5+1.armv7l-linux-musleabihf.tar.gz/md5/78c487049092fe61949d506637c713bb +dSFMT.v2.2.5+1.armv7l-linux-musleabihf.tar.gz/sha512/03ddada4478f05eab7d2971b2deaf2cba91f084d7ce66fc8219bcb3cf5c308ea13959fed95568ca80f4ce11794e197092984919265716de8f2558e2cb30d94ce +dSFMT.v2.2.5+1.i686-linux-gnu.tar.gz/md5/11463fd3981a8c143d7aed691d18d4e0 +dSFMT.v2.2.5+1.i686-linux-gnu.tar.gz/sha512/db946a4fbd8a3163b8b1c25e02bfc4a841da7d2532892a99037bd48ac98e1840691e8cc0127d9457a82667a0131e4826cb4e9d0a13f127afc62da4eb68af5a3e +dSFMT.v2.2.5+1.i686-linux-musl.tar.gz/md5/a61405f72c9a3bba5718f078c68e61a5 +dSFMT.v2.2.5+1.i686-linux-musl.tar.gz/sha512/726f130bbbfd0dece4185b89a25a73f3b5b950ebfb7f86aea6e9cbcf9ae932e591d20b854de0b4985103dbf8b4b7cb3560661c5070af971cd2c1f3ec3e1ea7d2 +dSFMT.v2.2.5+1.i686-w64-mingw32.tar.gz/md5/3bc27ef8f26c7a26f096cf1d558d408d +dSFMT.v2.2.5+1.i686-w64-mingw32.tar.gz/sha512/ea3608d3ae3874ea57a1a08f69abe2a1638bc340db71c6fe3c4fd5637d8c54943bf16b099a46817387c1ed4cb5f3cd1c0ff19ae8a4ed85dd555555821af06374 +dSFMT.v2.2.5+1.powerpc64le-linux-gnu.tar.gz/md5/fd8c73961ef7c82201e6d86e8bf4324c +dSFMT.v2.2.5+1.powerpc64le-linux-gnu.tar.gz/sha512/1bd0ebd019cfc6f25f7ba007547c5ee297854655b93c55e90d8ead420875de5a087e38956693d5e901ff2abf667c72aa66fb34f587b82adf4b91b3d5d666b5c7 +dSFMT.v2.2.5+1.x86_64-apple-darwin.tar.gz/md5/c8c0cd02cb1aa5e363b0c28a3fc4cf65 +dSFMT.v2.2.5+1.x86_64-apple-darwin.tar.gz/sha512/ac29d4b8aae51349474c9191822f92f69105e19521afe2bd9fc6b16385256610ae31e34cd70d894ed03299f1fd155f0a1db79969d1ed35eea44d11521e2030ab +dSFMT.v2.2.5+1.x86_64-linux-gnu.tar.gz/md5/fa671f4ca14b171d53c8866d03f9162a +dSFMT.v2.2.5+1.x86_64-linux-gnu.tar.gz/sha512/2e242a1448da0508ea88cc1a106f1e74f8d7e7562cd82b80d86abf9a8b454653ad7612e25c30ce00c23757e8a5b7b5736253b00a52f9473af6c5d4df768138f2 +dSFMT.v2.2.5+1.x86_64-linux-musl.tar.gz/md5/c648294163882ec539ab646542c74880 +dSFMT.v2.2.5+1.x86_64-linux-musl.tar.gz/sha512/9e96a47d660854b6517364f0db40a2f4e0e3b814499a0349f7cf550b1c8d04589fca5eb4a75bf34f36d1b5d1b2277b3e9a961c887092abedd08f438e025329e7 +dSFMT.v2.2.5+1.x86_64-unknown-freebsd.tar.gz/md5/4960e4ab2ecb6ae1025f9e7bf4c9a7b8 +dSFMT.v2.2.5+1.x86_64-unknown-freebsd.tar.gz/sha512/a2e8bbe382a0ebdd7b69fafdc901f33767f53b9f8b37a89104f2ef897bb5ec27bc8d3bc21f5cff52ca4f29b3a6a10535f7e5f16ef917a9323858c75f1569ea60 +dSFMT.v2.2.5+1.x86_64-w64-mingw32.tar.gz/md5/386adb3b7593c222dc7a1060a1356b21 +dSFMT.v2.2.5+1.x86_64-w64-mingw32.tar.gz/sha512/fe2ab5021126807b37042e89a22ef9a869c6a0a028680df445773b2affd11c2b02148be07d53504ea3842bb38bb62fe039529688266c1cba3545a892bd4dc185 dsfmt-2.2.5.tar.gz/md5/d22e476b52cdee7d5b90d2f289570073 dsfmt-2.2.5.tar.gz/sha512/951e8669350f750b8915a819e704eae0a9b9c9518b3e3b9a1905f9ca0d25cc4c2486cb479e258a4a114e9c26ceb73a6c4e9f1cc02ed19173aeb8f20189754f6b diff --git a/deps/checksums/gmp b/deps/checksums/gmp index 0c7dd415e6f16..c786fddafef5e 100644 --- a/deps/checksums/gmp +++ b/deps/checksums/gmp @@ -1,60 +1,62 @@ -GMP.v6.3.0+0.aarch64-apple-darwin.tar.gz/md5/70a730ecf64eefb5a13f4524e29a6388 -GMP.v6.3.0+0.aarch64-apple-darwin.tar.gz/sha512/51791b4ae0ede1db4c6e7759072d125ca56f6a3a3e43fd5970981a3b2d651f28fe0abefce4b3ad0589d3a46c143054d20fee801bbd423bd2a4c12ba97314c39c -GMP.v6.3.0+0.aarch64-linux-gnu-cxx03.tar.gz/md5/e2b0bf1317259972cdc4f0e6fc3c2bc8 -GMP.v6.3.0+0.aarch64-linux-gnu-cxx03.tar.gz/sha512/8de1dd5d6971c76693c67222725c9eb0a1d276a55a28cd49d94115123100bfe45144652421d4cde468dce67a5630736f4174c9491c8a6e2543aadcb44f1f2d12 -GMP.v6.3.0+0.aarch64-linux-gnu-cxx11.tar.gz/md5/2017b6215ed99c3aed8b04abe75cb3e9 -GMP.v6.3.0+0.aarch64-linux-gnu-cxx11.tar.gz/sha512/78b22106f96348f0d9222279fdf8d1e3f5bd400f771fb0c54dd4045985ee05b896e3097f788739eefab9a9ab09a885aad65c4adb31ae5ba59b7ab22ca10bb574 -GMP.v6.3.0+0.aarch64-linux-musl-cxx03.tar.gz/md5/6477f35f92203db871f56f047b99a1fe -GMP.v6.3.0+0.aarch64-linux-musl-cxx03.tar.gz/sha512/66a6d18979c1ee9a5d06323a717d0a5dd73efc196087349408e739d7aa0444e8ee1af4bd634f85dfd4cfa4c97c24dda4ba472b490f50409581aff967c81b0750 -GMP.v6.3.0+0.aarch64-linux-musl-cxx11.tar.gz/md5/4648558f1e42b8e679f5be494a910402 -GMP.v6.3.0+0.aarch64-linux-musl-cxx11.tar.gz/sha512/9b7ff68a412bccd423b3cffefbc6350db6db8f3f7657713767187c2c2ea3b09d835e1c80d34ab4407f79fccbec82594e024787def27b9ad2ee7ea01ef1607b53 -GMP.v6.3.0+0.armv6l-linux-gnueabihf-cxx03.tar.gz/md5/6cabb238d148b3e2e76e8527e65893cd -GMP.v6.3.0+0.armv6l-linux-gnueabihf-cxx03.tar.gz/sha512/07b5673b4680781b7d42399213ecd491ede8883bbf1825689ad6678986a76581f6c4e53f17353f63bec8db8df5ed3fbddc228694eecc54ae7fc949f106bb8f14 -GMP.v6.3.0+0.armv6l-linux-gnueabihf-cxx11.tar.gz/md5/0257216ad4e96b404d456f07fcc30b09 -GMP.v6.3.0+0.armv6l-linux-gnueabihf-cxx11.tar.gz/sha512/ae8bbbbe3992f78186fe7535e450330e94e6630540eefbdfb51bb5014afd90feac0b1583e3fd2bbf226e61523647b3ec6324188bd6267c353a2a98594566c02b -GMP.v6.3.0+0.armv6l-linux-musleabihf-cxx03.tar.gz/md5/48b949c062ea27dc0dbcc07ea5387821 -GMP.v6.3.0+0.armv6l-linux-musleabihf-cxx03.tar.gz/sha512/03699c20b5c50dbd44f45a0f5f115c6b10b4e8de68d747bceba605c3090469c819b82ad7e57fe7702c1700c25aae6ab9394a22ded319bc58c80e9d20692b610e -GMP.v6.3.0+0.armv6l-linux-musleabihf-cxx11.tar.gz/md5/847ba3116072a523e1ff4ce83e5a18a8 -GMP.v6.3.0+0.armv6l-linux-musleabihf-cxx11.tar.gz/sha512/402548acd57f4112bf2435803f35ea93fd8d07f3df0e2f053b0bec6b08aa3dff4052990a724e2547ce35a29ee376b17d34b7e7e2ab45ecb4981ffc99c56f1a9f -GMP.v6.3.0+0.armv7l-linux-gnueabihf-cxx03.tar.gz/md5/5cc75b66059c3b8b5fbf9b8fcb781b10 -GMP.v6.3.0+0.armv7l-linux-gnueabihf-cxx03.tar.gz/sha512/1ef583d014c825e1d4e6d5f7e2d84c3ba183ba9490410f5d424760e275b7032e98f8377d87ed349d4969c6ef8f9b961a1e8df6f40efb406d41983446a9510303 -GMP.v6.3.0+0.armv7l-linux-gnueabihf-cxx11.tar.gz/md5/c0295c143bcb6b53d6184e2852ce35c5 -GMP.v6.3.0+0.armv7l-linux-gnueabihf-cxx11.tar.gz/sha512/3c74edb123a6f4147b416e5f7f25903bc859ac5f58f141bd463d3dff8cc2928fedf176f20869a1018a2731c1d7170444b3b3405c8f89c3fc22dc2edf9c036c24 -GMP.v6.3.0+0.armv7l-linux-musleabihf-cxx03.tar.gz/md5/a67696b02a7f67405dd84252c908e071 -GMP.v6.3.0+0.armv7l-linux-musleabihf-cxx03.tar.gz/sha512/73ba1809cfc68199401974f73e7a37b1fe00d4c0cf3e58ed85d161a8fbac4390aeb28591c3108fc503ef8fb5b131d027cb76dcf5d7731698997c2f377d929dce -GMP.v6.3.0+0.armv7l-linux-musleabihf-cxx11.tar.gz/md5/484f00cd5b0beec20f63cd6734d02611 -GMP.v6.3.0+0.armv7l-linux-musleabihf-cxx11.tar.gz/sha512/46fc56f945647f5c8577ad45f540a034f747604e5a89230d9d419b10d5f0571c7580e18e1138ea920efc08b25798c0c7110e15359de17dce3b6db7f07b8ceb3a -GMP.v6.3.0+0.i686-linux-gnu-cxx03.tar.gz/md5/d36d84638e2e5f927d15f07c55919f5f -GMP.v6.3.0+0.i686-linux-gnu-cxx03.tar.gz/sha512/61c62084ab90d25f7168281c7fb672f5bcafdf909afbf66847cfaa1077dd5474b2c27464eb76cac45f5e319aca0c4f7367fc238b83d2dde46ba90a7c1f396dfb -GMP.v6.3.0+0.i686-linux-gnu-cxx11.tar.gz/md5/d87627470bdcac981f7b004c27ac9a89 -GMP.v6.3.0+0.i686-linux-gnu-cxx11.tar.gz/sha512/2a34028687f75422b43f5365b0a8c9530b29473d41bfec4fb9822f074f813b8c6c1fc9efbfbb17a7e4d3d66f2549b5589b3fdbd08711a365330deb72be4958d0 -GMP.v6.3.0+0.i686-linux-musl-cxx03.tar.gz/md5/a2f2fc663bcacfc3e7d6aff29a52de23 -GMP.v6.3.0+0.i686-linux-musl-cxx03.tar.gz/sha512/a30a5d0ee78e747f074b3a5f0a26b9ba99b7553b3c83411a3cb9298814e605509194e9f0d8934caaa1cb7b78eef521805bbc86a297aebd06473ba80a20ffc443 -GMP.v6.3.0+0.i686-linux-musl-cxx11.tar.gz/md5/246b24935442815ff75a13b3dcf24756 -GMP.v6.3.0+0.i686-linux-musl-cxx11.tar.gz/sha512/ca351c4b93adf3f3e40f93c7b0cd61b33ec10049d39e8d33975f46d509efcded67600e6b19d8018a29ee893027d7a28edef0b19c1d70451d072a7a0989e9317d -GMP.v6.3.0+0.i686-w64-mingw32-cxx03.tar.gz/md5/c3b321ae48db0cb8dac4e09e2722e56c -GMP.v6.3.0+0.i686-w64-mingw32-cxx03.tar.gz/sha512/6a6feeb8baf6d499409a9010295b474a8c6de461fa0e34562d53e58190b66c50e278fae7560495cd85ea6f5b41f9e8c6e950ff4f451d26d0757e1d1696e8bca5 -GMP.v6.3.0+0.i686-w64-mingw32-cxx11.tar.gz/md5/3f633b0ff74c2a44350855fc6ce310b8 -GMP.v6.3.0+0.i686-w64-mingw32-cxx11.tar.gz/sha512/eecb17dec70fe84d90f47e1958672d273c865da9607ba3056c9c923a6ff9a3cab5b30414389d8f0c7f5ae5d87c05999964ed0900c80ae5afb525eaec00f401e2 -GMP.v6.3.0+0.powerpc64le-linux-gnu-cxx03.tar.gz/md5/8b5f113ad7fd4a312229cfe8c2d1abca -GMP.v6.3.0+0.powerpc64le-linux-gnu-cxx03.tar.gz/sha512/36525ffc0ac5c363810c47945c34c81daabf88cf1f9c60d236447249d06332d3f5a130b431ab2d1c0148eb5413a4fa66bdd50671f2e7fcb77858d9fcdf83a94c -GMP.v6.3.0+0.powerpc64le-linux-gnu-cxx11.tar.gz/md5/7f1237e9668136b00dd719a5cad3b6aa -GMP.v6.3.0+0.powerpc64le-linux-gnu-cxx11.tar.gz/sha512/46a6efe23173a12299da371121847d16d7950ffe5c87d1221b54c5e95dafbf723c4a327b1c2e832d4742a91254aa40fd5d8152d6d0801769b2efd4f83a042afd -GMP.v6.3.0+0.x86_64-apple-darwin.tar.gz/md5/cd2d1b309aea2c781a9c28470fd2f0eb -GMP.v6.3.0+0.x86_64-apple-darwin.tar.gz/sha512/d7f94d80f1ba170c9553601d1af323bef7bbb98575b80b58b3d7b37d69d81cdee0e132fb4fa20393a0e8719984c785d0c7e5c8ae2c29c62ffbd82b00375993d4 -GMP.v6.3.0+0.x86_64-linux-gnu-cxx03.tar.gz/md5/5be8efef65dafe52e5726ef24238ae36 -GMP.v6.3.0+0.x86_64-linux-gnu-cxx03.tar.gz/sha512/f4c303fe915c89fecdb5a333a30412e0cfb04e07b4f1bc2f726179243dbc61d60ae5b0773a6bd5da8a10cb8764e448bc88035a639ea88d2e06f04e55074d8551 -GMP.v6.3.0+0.x86_64-linux-gnu-cxx11.tar.gz/md5/66f9a3858d07591227f2bc057c3c988b -GMP.v6.3.0+0.x86_64-linux-gnu-cxx11.tar.gz/sha512/5611b9bfd24efac0a189bbd85533e1cd2bee7f833f5ae0a06343f2c1d92925e0d0f0758b99c43520293348ad61f98e1b470829514c35d208697988d8b469fc41 -GMP.v6.3.0+0.x86_64-linux-musl-cxx03.tar.gz/md5/edaa83f6432ff7e75e106d8bfd03d509 -GMP.v6.3.0+0.x86_64-linux-musl-cxx03.tar.gz/sha512/1587e7b91e387da9c23559826c161fa4d447250bd7b6565f0b9fedc36e7502dc2b59caa8157abcb7e7862d24d696470289bd650511b07e8711ecf5a462330b6d -GMP.v6.3.0+0.x86_64-linux-musl-cxx11.tar.gz/md5/e668c4f0c1246aa1510c36f246b1b483 -GMP.v6.3.0+0.x86_64-linux-musl-cxx11.tar.gz/sha512/cf4bd47a5ddb067a57e852855fbd637a93f3652c3327af256f74e9e265c9e0de7c5be78b3e7bcbf08a03916876ecdc05cc294149e2c3d472a30fedc2e6dded47 -GMP.v6.3.0+0.x86_64-unknown-freebsd.tar.gz/md5/4cbf56d2884aa357291321b182d07cb8 -GMP.v6.3.0+0.x86_64-unknown-freebsd.tar.gz/sha512/0c723b8e0f5fabf9e43945d3fb355c3d7b036662a8d6542629aaff27164f12d13b2a19f5c4964f165466705b231884b7f7193d7a01a0e9d3644da1d79af79631 -GMP.v6.3.0+0.x86_64-w64-mingw32-cxx03.tar.gz/md5/02e8f5d66c15731117cf805e0a4c4976 -GMP.v6.3.0+0.x86_64-w64-mingw32-cxx03.tar.gz/sha512/1f94805fe9f34f4e77c54e92625615d91ade617468483409037d0693c3bf106187916d9d21e92681673faae158b376133c0ede643f31bfc9f73ac29c9fd13bcc -GMP.v6.3.0+0.x86_64-w64-mingw32-cxx11.tar.gz/md5/10752137fccc73175872db07749d6f49 -GMP.v6.3.0+0.x86_64-w64-mingw32-cxx11.tar.gz/sha512/3a5d7e8125f3b538a2e59e9c6919db36c974575e6b1950451cb60307da68dc092c4ce21b8f49c40871aadf3bd07681b43eea9c7bf37ba383da9a0e80c30b176e +GMP.v6.3.0+1.aarch64-apple-darwin.tar.gz/md5/70a730ecf64eefb5a13f4524e29a6388 +GMP.v6.3.0+1.aarch64-apple-darwin.tar.gz/sha512/51791b4ae0ede1db4c6e7759072d125ca56f6a3a3e43fd5970981a3b2d651f28fe0abefce4b3ad0589d3a46c143054d20fee801bbd423bd2a4c12ba97314c39c +GMP.v6.3.0+1.aarch64-linux-gnu-cxx03.tar.gz/md5/e2b0bf1317259972cdc4f0e6fc3c2bc8 +GMP.v6.3.0+1.aarch64-linux-gnu-cxx03.tar.gz/sha512/8de1dd5d6971c76693c67222725c9eb0a1d276a55a28cd49d94115123100bfe45144652421d4cde468dce67a5630736f4174c9491c8a6e2543aadcb44f1f2d12 +GMP.v6.3.0+1.aarch64-linux-gnu-cxx11.tar.gz/md5/2017b6215ed99c3aed8b04abe75cb3e9 +GMP.v6.3.0+1.aarch64-linux-gnu-cxx11.tar.gz/sha512/78b22106f96348f0d9222279fdf8d1e3f5bd400f771fb0c54dd4045985ee05b896e3097f788739eefab9a9ab09a885aad65c4adb31ae5ba59b7ab22ca10bb574 +GMP.v6.3.0+1.aarch64-linux-musl-cxx03.tar.gz/md5/6477f35f92203db871f56f047b99a1fe +GMP.v6.3.0+1.aarch64-linux-musl-cxx03.tar.gz/sha512/66a6d18979c1ee9a5d06323a717d0a5dd73efc196087349408e739d7aa0444e8ee1af4bd634f85dfd4cfa4c97c24dda4ba472b490f50409581aff967c81b0750 +GMP.v6.3.0+1.aarch64-linux-musl-cxx11.tar.gz/md5/4648558f1e42b8e679f5be494a910402 +GMP.v6.3.0+1.aarch64-linux-musl-cxx11.tar.gz/sha512/9b7ff68a412bccd423b3cffefbc6350db6db8f3f7657713767187c2c2ea3b09d835e1c80d34ab4407f79fccbec82594e024787def27b9ad2ee7ea01ef1607b53 +GMP.v6.3.0+1.aarch64-unknown-freebsd.tar.gz/md5/362bc3fdbcd6d74b9fddb8a4d640d99a +GMP.v6.3.0+1.aarch64-unknown-freebsd.tar.gz/sha512/8e560b4d1014382d784ccf7c9dc6365526566301ec6a28d115170c0be92b8e6033b6c08f922104e405cf978204579754f0740aae97d0a334e47ed6f684aa4af4 +GMP.v6.3.0+1.armv6l-linux-gnueabihf-cxx03.tar.gz/md5/6cabb238d148b3e2e76e8527e65893cd +GMP.v6.3.0+1.armv6l-linux-gnueabihf-cxx03.tar.gz/sha512/07b5673b4680781b7d42399213ecd491ede8883bbf1825689ad6678986a76581f6c4e53f17353f63bec8db8df5ed3fbddc228694eecc54ae7fc949f106bb8f14 +GMP.v6.3.0+1.armv6l-linux-gnueabihf-cxx11.tar.gz/md5/0257216ad4e96b404d456f07fcc30b09 +GMP.v6.3.0+1.armv6l-linux-gnueabihf-cxx11.tar.gz/sha512/ae8bbbbe3992f78186fe7535e450330e94e6630540eefbdfb51bb5014afd90feac0b1583e3fd2bbf226e61523647b3ec6324188bd6267c353a2a98594566c02b +GMP.v6.3.0+1.armv6l-linux-musleabihf-cxx03.tar.gz/md5/48b949c062ea27dc0dbcc07ea5387821 +GMP.v6.3.0+1.armv6l-linux-musleabihf-cxx03.tar.gz/sha512/03699c20b5c50dbd44f45a0f5f115c6b10b4e8de68d747bceba605c3090469c819b82ad7e57fe7702c1700c25aae6ab9394a22ded319bc58c80e9d20692b610e +GMP.v6.3.0+1.armv6l-linux-musleabihf-cxx11.tar.gz/md5/847ba3116072a523e1ff4ce83e5a18a8 +GMP.v6.3.0+1.armv6l-linux-musleabihf-cxx11.tar.gz/sha512/402548acd57f4112bf2435803f35ea93fd8d07f3df0e2f053b0bec6b08aa3dff4052990a724e2547ce35a29ee376b17d34b7e7e2ab45ecb4981ffc99c56f1a9f +GMP.v6.3.0+1.armv7l-linux-gnueabihf-cxx03.tar.gz/md5/5cc75b66059c3b8b5fbf9b8fcb781b10 +GMP.v6.3.0+1.armv7l-linux-gnueabihf-cxx03.tar.gz/sha512/1ef583d014c825e1d4e6d5f7e2d84c3ba183ba9490410f5d424760e275b7032e98f8377d87ed349d4969c6ef8f9b961a1e8df6f40efb406d41983446a9510303 +GMP.v6.3.0+1.armv7l-linux-gnueabihf-cxx11.tar.gz/md5/c0295c143bcb6b53d6184e2852ce35c5 +GMP.v6.3.0+1.armv7l-linux-gnueabihf-cxx11.tar.gz/sha512/3c74edb123a6f4147b416e5f7f25903bc859ac5f58f141bd463d3dff8cc2928fedf176f20869a1018a2731c1d7170444b3b3405c8f89c3fc22dc2edf9c036c24 +GMP.v6.3.0+1.armv7l-linux-musleabihf-cxx03.tar.gz/md5/a67696b02a7f67405dd84252c908e071 +GMP.v6.3.0+1.armv7l-linux-musleabihf-cxx03.tar.gz/sha512/73ba1809cfc68199401974f73e7a37b1fe00d4c0cf3e58ed85d161a8fbac4390aeb28591c3108fc503ef8fb5b131d027cb76dcf5d7731698997c2f377d929dce +GMP.v6.3.0+1.armv7l-linux-musleabihf-cxx11.tar.gz/md5/484f00cd5b0beec20f63cd6734d02611 +GMP.v6.3.0+1.armv7l-linux-musleabihf-cxx11.tar.gz/sha512/46fc56f945647f5c8577ad45f540a034f747604e5a89230d9d419b10d5f0571c7580e18e1138ea920efc08b25798c0c7110e15359de17dce3b6db7f07b8ceb3a +GMP.v6.3.0+1.i686-linux-gnu-cxx03.tar.gz/md5/d36d84638e2e5f927d15f07c55919f5f +GMP.v6.3.0+1.i686-linux-gnu-cxx03.tar.gz/sha512/61c62084ab90d25f7168281c7fb672f5bcafdf909afbf66847cfaa1077dd5474b2c27464eb76cac45f5e319aca0c4f7367fc238b83d2dde46ba90a7c1f396dfb +GMP.v6.3.0+1.i686-linux-gnu-cxx11.tar.gz/md5/d87627470bdcac981f7b004c27ac9a89 +GMP.v6.3.0+1.i686-linux-gnu-cxx11.tar.gz/sha512/2a34028687f75422b43f5365b0a8c9530b29473d41bfec4fb9822f074f813b8c6c1fc9efbfbb17a7e4d3d66f2549b5589b3fdbd08711a365330deb72be4958d0 +GMP.v6.3.0+1.i686-linux-musl-cxx03.tar.gz/md5/a2f2fc663bcacfc3e7d6aff29a52de23 +GMP.v6.3.0+1.i686-linux-musl-cxx03.tar.gz/sha512/a30a5d0ee78e747f074b3a5f0a26b9ba99b7553b3c83411a3cb9298814e605509194e9f0d8934caaa1cb7b78eef521805bbc86a297aebd06473ba80a20ffc443 +GMP.v6.3.0+1.i686-linux-musl-cxx11.tar.gz/md5/246b24935442815ff75a13b3dcf24756 +GMP.v6.3.0+1.i686-linux-musl-cxx11.tar.gz/sha512/ca351c4b93adf3f3e40f93c7b0cd61b33ec10049d39e8d33975f46d509efcded67600e6b19d8018a29ee893027d7a28edef0b19c1d70451d072a7a0989e9317d +GMP.v6.3.0+1.i686-w64-mingw32-cxx03.tar.gz/md5/c3b321ae48db0cb8dac4e09e2722e56c +GMP.v6.3.0+1.i686-w64-mingw32-cxx03.tar.gz/sha512/6a6feeb8baf6d499409a9010295b474a8c6de461fa0e34562d53e58190b66c50e278fae7560495cd85ea6f5b41f9e8c6e950ff4f451d26d0757e1d1696e8bca5 +GMP.v6.3.0+1.i686-w64-mingw32-cxx11.tar.gz/md5/3f633b0ff74c2a44350855fc6ce310b8 +GMP.v6.3.0+1.i686-w64-mingw32-cxx11.tar.gz/sha512/eecb17dec70fe84d90f47e1958672d273c865da9607ba3056c9c923a6ff9a3cab5b30414389d8f0c7f5ae5d87c05999964ed0900c80ae5afb525eaec00f401e2 +GMP.v6.3.0+1.powerpc64le-linux-gnu-cxx03.tar.gz/md5/8b5f113ad7fd4a312229cfe8c2d1abca +GMP.v6.3.0+1.powerpc64le-linux-gnu-cxx03.tar.gz/sha512/36525ffc0ac5c363810c47945c34c81daabf88cf1f9c60d236447249d06332d3f5a130b431ab2d1c0148eb5413a4fa66bdd50671f2e7fcb77858d9fcdf83a94c +GMP.v6.3.0+1.powerpc64le-linux-gnu-cxx11.tar.gz/md5/7f1237e9668136b00dd719a5cad3b6aa +GMP.v6.3.0+1.powerpc64le-linux-gnu-cxx11.tar.gz/sha512/46a6efe23173a12299da371121847d16d7950ffe5c87d1221b54c5e95dafbf723c4a327b1c2e832d4742a91254aa40fd5d8152d6d0801769b2efd4f83a042afd +GMP.v6.3.0+1.x86_64-apple-darwin.tar.gz/md5/cd2d1b309aea2c781a9c28470fd2f0eb +GMP.v6.3.0+1.x86_64-apple-darwin.tar.gz/sha512/d7f94d80f1ba170c9553601d1af323bef7bbb98575b80b58b3d7b37d69d81cdee0e132fb4fa20393a0e8719984c785d0c7e5c8ae2c29c62ffbd82b00375993d4 +GMP.v6.3.0+1.x86_64-linux-gnu-cxx03.tar.gz/md5/5be8efef65dafe52e5726ef24238ae36 +GMP.v6.3.0+1.x86_64-linux-gnu-cxx03.tar.gz/sha512/f4c303fe915c89fecdb5a333a30412e0cfb04e07b4f1bc2f726179243dbc61d60ae5b0773a6bd5da8a10cb8764e448bc88035a639ea88d2e06f04e55074d8551 +GMP.v6.3.0+1.x86_64-linux-gnu-cxx11.tar.gz/md5/66f9a3858d07591227f2bc057c3c988b +GMP.v6.3.0+1.x86_64-linux-gnu-cxx11.tar.gz/sha512/5611b9bfd24efac0a189bbd85533e1cd2bee7f833f5ae0a06343f2c1d92925e0d0f0758b99c43520293348ad61f98e1b470829514c35d208697988d8b469fc41 +GMP.v6.3.0+1.x86_64-linux-musl-cxx03.tar.gz/md5/edaa83f6432ff7e75e106d8bfd03d509 +GMP.v6.3.0+1.x86_64-linux-musl-cxx03.tar.gz/sha512/1587e7b91e387da9c23559826c161fa4d447250bd7b6565f0b9fedc36e7502dc2b59caa8157abcb7e7862d24d696470289bd650511b07e8711ecf5a462330b6d +GMP.v6.3.0+1.x86_64-linux-musl-cxx11.tar.gz/md5/e668c4f0c1246aa1510c36f246b1b483 +GMP.v6.3.0+1.x86_64-linux-musl-cxx11.tar.gz/sha512/cf4bd47a5ddb067a57e852855fbd637a93f3652c3327af256f74e9e265c9e0de7c5be78b3e7bcbf08a03916876ecdc05cc294149e2c3d472a30fedc2e6dded47 +GMP.v6.3.0+1.x86_64-unknown-freebsd.tar.gz/md5/4cbf56d2884aa357291321b182d07cb8 +GMP.v6.3.0+1.x86_64-unknown-freebsd.tar.gz/sha512/0c723b8e0f5fabf9e43945d3fb355c3d7b036662a8d6542629aaff27164f12d13b2a19f5c4964f165466705b231884b7f7193d7a01a0e9d3644da1d79af79631 +GMP.v6.3.0+1.x86_64-w64-mingw32-cxx03.tar.gz/md5/02e8f5d66c15731117cf805e0a4c4976 +GMP.v6.3.0+1.x86_64-w64-mingw32-cxx03.tar.gz/sha512/1f94805fe9f34f4e77c54e92625615d91ade617468483409037d0693c3bf106187916d9d21e92681673faae158b376133c0ede643f31bfc9f73ac29c9fd13bcc +GMP.v6.3.0+1.x86_64-w64-mingw32-cxx11.tar.gz/md5/10752137fccc73175872db07749d6f49 +GMP.v6.3.0+1.x86_64-w64-mingw32-cxx11.tar.gz/sha512/3a5d7e8125f3b538a2e59e9c6919db36c974575e6b1950451cb60307da68dc092c4ce21b8f49c40871aadf3bd07681b43eea9c7bf37ba383da9a0e80c30b176e gmp-6.3.0.tar.bz2/md5/c1cd6ef33085e9cb818b9b08371f9000 gmp-6.3.0.tar.bz2/sha512/3b684c9bcb9ede2b7e54d0ba4c9764bfa17c20d4f3000017c553b6f1e135b536949580ff37341680c25dc236cfe0ba1db8cfdfe619ce013656189ef0871b89f8 diff --git a/deps/checksums/libuv b/deps/checksums/libuv index 6887c3fe62f41..49869af795d45 100644 --- a/deps/checksums/libuv +++ b/deps/checksums/libuv @@ -1,34 +1,36 @@ -LibUV.v2.0.1+18.aarch64-apple-darwin.tar.gz/md5/f176c76e5e2096dea8443302cf9550b8 -LibUV.v2.0.1+18.aarch64-apple-darwin.tar.gz/sha512/4301b13953a08a758b86e30af3261fd9a291ce3829b4d98e71e2a2c040e322e284c5a6eb4bc7189cc352f4b1cf7041e2cfd3380d511d88c151df3101ad74594e -LibUV.v2.0.1+18.aarch64-linux-gnu.tar.gz/md5/c81515783363702a1bd4b65fd6d7f36b -LibUV.v2.0.1+18.aarch64-linux-gnu.tar.gz/sha512/011429365337f5a45e56ca7a42709866bb994c747a1170d870f5f3ddfff2d36138866ee9278ac01172bc71bde8dee404bcb9cae9c7b44222bf1cc883659df269 -LibUV.v2.0.1+18.aarch64-linux-musl.tar.gz/md5/e74d5ea4912dd326b2705638faa7b805 -LibUV.v2.0.1+18.aarch64-linux-musl.tar.gz/sha512/a26a9f2c9051816230324071c502321f7af3885d581a400615858a93a4cd457226048d15b0e7f6a73d12659763c705b5ab519e9f5b35c6d886b9fd5babbfe352 -LibUV.v2.0.1+18.armv6l-linux-gnueabihf.tar.gz/md5/6df38bcf5d0a61dee63d16b73d0c9a24 -LibUV.v2.0.1+18.armv6l-linux-gnueabihf.tar.gz/sha512/d5354a6532061de0a58965ce0e427bde52f9ae0ee39a98e1a33de4c414fddcba9ba139ddf91be7321a4ccc97bbf7a8a8357ff10cf60f83c0a6bff7d839d6d7a8 -LibUV.v2.0.1+18.armv6l-linux-musleabihf.tar.gz/md5/6f02a24cfbfae3032fadceaea1faed39 -LibUV.v2.0.1+18.armv6l-linux-musleabihf.tar.gz/sha512/7fd107eb9a5ea84b488ea02e4fbedc9fe13bb11be859986a47af38f40ad775dd9f738c790878a3503437bcac1eb26ad9fe26f4aa0d3cb45c980b4c5abc9aec99 -LibUV.v2.0.1+18.armv7l-linux-gnueabihf.tar.gz/md5/96b09dec72f7e9b7409fa2920e67c866 -LibUV.v2.0.1+18.armv7l-linux-gnueabihf.tar.gz/sha512/6a0f79fc15c944fabba5c65180b665bc9769c6ff25863e330049f48b3a4394b448492f5a9a76bb7f8dbd3ce44dfc6f9ccdc2c71c42e1c749e88070fe99b1db69 -LibUV.v2.0.1+18.armv7l-linux-musleabihf.tar.gz/md5/f44e4b2521a813181f943895bdb0dd3c -LibUV.v2.0.1+18.armv7l-linux-musleabihf.tar.gz/sha512/cda1413dca817f772e8b343db0c6de0ef6b8f269e9a6a2ef3403c2582aeab554f46281bbb1eb4659c259198ef47fe26aab648a281e66f80aaf2f2cda0a23ac05 -LibUV.v2.0.1+18.i686-linux-gnu.tar.gz/md5/1f231d89cf9c04515d2d107a5d786cc8 -LibUV.v2.0.1+18.i686-linux-gnu.tar.gz/sha512/089cb8a372cdee0cbc0e78fc201611bb9bafd99af9a78e09d6097a6b70e7c4aa001ebd86f944b0a885c072093c529bf86bcaa32bde4fc1934407a858c1a5a764 -LibUV.v2.0.1+18.i686-linux-musl.tar.gz/md5/01cfc2a9e2536dbd330267917abb19ce -LibUV.v2.0.1+18.i686-linux-musl.tar.gz/sha512/72f3588cb464a60e61f8998242aaa2abdf93df920a2feba5e1d66ef0f2498488df0ec415cbb499d7f75c47bdfc7e3a2fdda6a94383492e0ad13e464eb1314ff8 -LibUV.v2.0.1+18.i686-w64-mingw32.tar.gz/md5/8c6599aab9ed4c46e52f03683aac664e -LibUV.v2.0.1+18.i686-w64-mingw32.tar.gz/sha512/13f0565f7244a8bcf1ab43fac91a856dc86d214877033a3cefee8c2179c1a275dfd7dda32e9017763acac2ba42ab6799934a58f5feaa38fb6cf2253dd713f57a -LibUV.v2.0.1+18.powerpc64le-linux-gnu.tar.gz/md5/af0e43d9d0aa91dd82b63220d96991ef -LibUV.v2.0.1+18.powerpc64le-linux-gnu.tar.gz/sha512/9fabe3089e4fc60e910770c32d36300ce8ef36c28e8cc9c72fbecba6eb80285ee8174e84e4452fb4ce90ee7c7f94e99b03fce47d8c579bd614bfffd553f93666 -LibUV.v2.0.1+18.x86_64-apple-darwin.tar.gz/md5/871040e874eedae54553d8f1c91b9133 -LibUV.v2.0.1+18.x86_64-apple-darwin.tar.gz/sha512/d5eee08b65e4bb8b444c61ac277bec9ef944b9279dd7f0732b3cd91d47788c77938e5db71e019e01bbe7785a75df95faf14368764f700c6b7a6b9e4d96d6b4c2 -LibUV.v2.0.1+18.x86_64-linux-gnu.tar.gz/md5/d2d186952c6d017fe33f6a6bea63a3ea -LibUV.v2.0.1+18.x86_64-linux-gnu.tar.gz/sha512/15501534bf5721e6bb668aabe6dc6375349f7a284e28df0609d00982e7e456908bd6868722391afa7f44a5c82faedc8cf544f69a0e4fb9fb0d529b3ae3d44d78 -LibUV.v2.0.1+18.x86_64-linux-musl.tar.gz/md5/271d4d40a1ae53ed5b2376e5936cfcf9 -LibUV.v2.0.1+18.x86_64-linux-musl.tar.gz/sha512/1956f059ed01f66b72349d6561b04e6a89b7257c0f838d7fbdd2cee79bd126bb46b93bf944a042b5a6a235762a7a0cdd117207342dd55a0c58653a70b4a38d48 -LibUV.v2.0.1+18.x86_64-unknown-freebsd.tar.gz/md5/62fe8523948914fbe7e28bf0b8d73594 -LibUV.v2.0.1+18.x86_64-unknown-freebsd.tar.gz/sha512/e6486888028c96975f74bc9313cba9706f6bf2be085aa776c44cbb2886753b2eee62469a0be92eb0542df1d0f51db3b34c7ba5e46842e16c6ff1d20e11b75322 -LibUV.v2.0.1+18.x86_64-w64-mingw32.tar.gz/md5/ae103f24b6e1830cdbe02143826fe551 -LibUV.v2.0.1+18.x86_64-w64-mingw32.tar.gz/sha512/f814085c135815947f342ff24fa0e1015e283ccece84a5b8dd5ccec0f5928a129e5fd79100a33b131376ad696f70b5acadcc5a02a7e6544635ecf7e18003ba1c +LibUV.v2.0.1+19.aarch64-apple-darwin.tar.gz/md5/f176c76e5e2096dea8443302cf9550b8 +LibUV.v2.0.1+19.aarch64-apple-darwin.tar.gz/sha512/4301b13953a08a758b86e30af3261fd9a291ce3829b4d98e71e2a2c040e322e284c5a6eb4bc7189cc352f4b1cf7041e2cfd3380d511d88c151df3101ad74594e +LibUV.v2.0.1+19.aarch64-linux-gnu.tar.gz/md5/c81515783363702a1bd4b65fd6d7f36b +LibUV.v2.0.1+19.aarch64-linux-gnu.tar.gz/sha512/011429365337f5a45e56ca7a42709866bb994c747a1170d870f5f3ddfff2d36138866ee9278ac01172bc71bde8dee404bcb9cae9c7b44222bf1cc883659df269 +LibUV.v2.0.1+19.aarch64-linux-musl.tar.gz/md5/e74d5ea4912dd326b2705638faa7b805 +LibUV.v2.0.1+19.aarch64-linux-musl.tar.gz/sha512/a26a9f2c9051816230324071c502321f7af3885d581a400615858a93a4cd457226048d15b0e7f6a73d12659763c705b5ab519e9f5b35c6d886b9fd5babbfe352 +LibUV.v2.0.1+19.aarch64-unknown-freebsd.tar.gz/md5/f2fe50ada3b6935af4f6b28fbc3940b2 +LibUV.v2.0.1+19.aarch64-unknown-freebsd.tar.gz/sha512/c4ba0190d21c6edb561062b2615792e9b4c2474dfc200d9dba12a3add44e1fbc0b74989748d85576f0a6e42d8e0bc02f6cb13b5963f3a56b00edffe6348a9f26 +LibUV.v2.0.1+19.armv6l-linux-gnueabihf.tar.gz/md5/6df38bcf5d0a61dee63d16b73d0c9a24 +LibUV.v2.0.1+19.armv6l-linux-gnueabihf.tar.gz/sha512/d5354a6532061de0a58965ce0e427bde52f9ae0ee39a98e1a33de4c414fddcba9ba139ddf91be7321a4ccc97bbf7a8a8357ff10cf60f83c0a6bff7d839d6d7a8 +LibUV.v2.0.1+19.armv6l-linux-musleabihf.tar.gz/md5/6f02a24cfbfae3032fadceaea1faed39 +LibUV.v2.0.1+19.armv6l-linux-musleabihf.tar.gz/sha512/7fd107eb9a5ea84b488ea02e4fbedc9fe13bb11be859986a47af38f40ad775dd9f738c790878a3503437bcac1eb26ad9fe26f4aa0d3cb45c980b4c5abc9aec99 +LibUV.v2.0.1+19.armv7l-linux-gnueabihf.tar.gz/md5/96b09dec72f7e9b7409fa2920e67c866 +LibUV.v2.0.1+19.armv7l-linux-gnueabihf.tar.gz/sha512/6a0f79fc15c944fabba5c65180b665bc9769c6ff25863e330049f48b3a4394b448492f5a9a76bb7f8dbd3ce44dfc6f9ccdc2c71c42e1c749e88070fe99b1db69 +LibUV.v2.0.1+19.armv7l-linux-musleabihf.tar.gz/md5/f44e4b2521a813181f943895bdb0dd3c +LibUV.v2.0.1+19.armv7l-linux-musleabihf.tar.gz/sha512/cda1413dca817f772e8b343db0c6de0ef6b8f269e9a6a2ef3403c2582aeab554f46281bbb1eb4659c259198ef47fe26aab648a281e66f80aaf2f2cda0a23ac05 +LibUV.v2.0.1+19.i686-linux-gnu.tar.gz/md5/1f231d89cf9c04515d2d107a5d786cc8 +LibUV.v2.0.1+19.i686-linux-gnu.tar.gz/sha512/089cb8a372cdee0cbc0e78fc201611bb9bafd99af9a78e09d6097a6b70e7c4aa001ebd86f944b0a885c072093c529bf86bcaa32bde4fc1934407a858c1a5a764 +LibUV.v2.0.1+19.i686-linux-musl.tar.gz/md5/01cfc2a9e2536dbd330267917abb19ce +LibUV.v2.0.1+19.i686-linux-musl.tar.gz/sha512/72f3588cb464a60e61f8998242aaa2abdf93df920a2feba5e1d66ef0f2498488df0ec415cbb499d7f75c47bdfc7e3a2fdda6a94383492e0ad13e464eb1314ff8 +LibUV.v2.0.1+19.i686-w64-mingw32.tar.gz/md5/8c6599aab9ed4c46e52f03683aac664e +LibUV.v2.0.1+19.i686-w64-mingw32.tar.gz/sha512/13f0565f7244a8bcf1ab43fac91a856dc86d214877033a3cefee8c2179c1a275dfd7dda32e9017763acac2ba42ab6799934a58f5feaa38fb6cf2253dd713f57a +LibUV.v2.0.1+19.powerpc64le-linux-gnu.tar.gz/md5/af0e43d9d0aa91dd82b63220d96991ef +LibUV.v2.0.1+19.powerpc64le-linux-gnu.tar.gz/sha512/9fabe3089e4fc60e910770c32d36300ce8ef36c28e8cc9c72fbecba6eb80285ee8174e84e4452fb4ce90ee7c7f94e99b03fce47d8c579bd614bfffd553f93666 +LibUV.v2.0.1+19.x86_64-apple-darwin.tar.gz/md5/871040e874eedae54553d8f1c91b9133 +LibUV.v2.0.1+19.x86_64-apple-darwin.tar.gz/sha512/d5eee08b65e4bb8b444c61ac277bec9ef944b9279dd7f0732b3cd91d47788c77938e5db71e019e01bbe7785a75df95faf14368764f700c6b7a6b9e4d96d6b4c2 +LibUV.v2.0.1+19.x86_64-linux-gnu.tar.gz/md5/d2d186952c6d017fe33f6a6bea63a3ea +LibUV.v2.0.1+19.x86_64-linux-gnu.tar.gz/sha512/15501534bf5721e6bb668aabe6dc6375349f7a284e28df0609d00982e7e456908bd6868722391afa7f44a5c82faedc8cf544f69a0e4fb9fb0d529b3ae3d44d78 +LibUV.v2.0.1+19.x86_64-linux-musl.tar.gz/md5/271d4d40a1ae53ed5b2376e5936cfcf9 +LibUV.v2.0.1+19.x86_64-linux-musl.tar.gz/sha512/1956f059ed01f66b72349d6561b04e6a89b7257c0f838d7fbdd2cee79bd126bb46b93bf944a042b5a6a235762a7a0cdd117207342dd55a0c58653a70b4a38d48 +LibUV.v2.0.1+19.x86_64-unknown-freebsd.tar.gz/md5/62fe8523948914fbe7e28bf0b8d73594 +LibUV.v2.0.1+19.x86_64-unknown-freebsd.tar.gz/sha512/e6486888028c96975f74bc9313cba9706f6bf2be085aa776c44cbb2886753b2eee62469a0be92eb0542df1d0f51db3b34c7ba5e46842e16c6ff1d20e11b75322 +LibUV.v2.0.1+19.x86_64-w64-mingw32.tar.gz/md5/ae103f24b6e1830cdbe02143826fe551 +LibUV.v2.0.1+19.x86_64-w64-mingw32.tar.gz/sha512/f814085c135815947f342ff24fa0e1015e283ccece84a5b8dd5ccec0f5928a129e5fd79100a33b131376ad696f70b5acadcc5a02a7e6544635ecf7e18003ba1c libuv-af4172ec713ee986ba1a989b9e33993a07c60c9e.tar.gz/md5/c1a7d3c74ef3999052f3bfe426264353 libuv-af4172ec713ee986ba1a989b9e33993a07c60c9e.tar.gz/sha512/a3f16863b711ddeeb5ab8d135d7df7a4be19cc2b9821fc78c8cd3ba421231d39b7d8bd9965321455094fda01584842a58f60612d93082b4fe32210b8aa44d999 diff --git a/deps/checksums/lld b/deps/checksums/lld index cdcae063f68ff..fff3140025e8d 100644 --- a/deps/checksums/lld +++ b/deps/checksums/lld @@ -1,108 +1,112 @@ -LLD.v18.1.7+2.aarch64-apple-darwin-llvm_version+18.asserts.tar.gz/md5/64c9a9f1758b9b292e0a3ef37f16ea41 -LLD.v18.1.7+2.aarch64-apple-darwin-llvm_version+18.asserts.tar.gz/sha512/cc740aaeb6ed29c56b2881e1488606338e4bd0e049ca4a5b8312b1d9129b778224570336698347e4562d632db9049e0e91ecce34ef68acb23a8bbf62455a81cc -LLD.v18.1.7+2.aarch64-apple-darwin-llvm_version+18.tar.gz/md5/1a8e11dba5cb574cde42de2b9703ff79 -LLD.v18.1.7+2.aarch64-apple-darwin-llvm_version+18.tar.gz/sha512/290300229576bb9155fe6bd24c0ee21beb41d0f2a46b208ab5a657b0199a7376c1f4cb07204c8ee1e6d202efe30ca040a6fff63c69b174120de3eb9866e344f4 -LLD.v18.1.7+2.aarch64-linux-gnu-cxx03-llvm_version+18.asserts.tar.gz/md5/cea134f347bae257cf5f55b6388cef81 -LLD.v18.1.7+2.aarch64-linux-gnu-cxx03-llvm_version+18.asserts.tar.gz/sha512/16b59143e929791b0c3e56cfb4970d8b3c87adf6e847fa9e2aac17c4ff2aa311ba2c7511c1b0ae2f39d9aa92f87ad4d99c042fe35bec391ac865fedb72bd3b1e -LLD.v18.1.7+2.aarch64-linux-gnu-cxx03-llvm_version+18.tar.gz/md5/5f903bab0e38fa608e8965acce6f020e -LLD.v18.1.7+2.aarch64-linux-gnu-cxx03-llvm_version+18.tar.gz/sha512/01e5f6a32958e04174c545f57c6c3b1bc88ccfd5ab18dcb9d67b92b55ebc7655a03bf963c4eaf7e5c3792d4691427a89db372e7534c6c8f965f8a715a32d9284 -LLD.v18.1.7+2.aarch64-linux-gnu-cxx11-llvm_version+18.asserts.tar.gz/md5/241a55374fd067f3736a2bb929e47015 -LLD.v18.1.7+2.aarch64-linux-gnu-cxx11-llvm_version+18.asserts.tar.gz/sha512/f1fedea4e6b5f6f3bbf4d705034d6c51b06f011c2ecec1ae49c5b7bd123891eee8b991462d60be7fffd58f7c773afe910a06ec0b55b37eed9b4d09b9fdbd5068 -LLD.v18.1.7+2.aarch64-linux-gnu-cxx11-llvm_version+18.tar.gz/md5/ff018c7448a7589935333e46739ee2c4 -LLD.v18.1.7+2.aarch64-linux-gnu-cxx11-llvm_version+18.tar.gz/sha512/b646c6a945b8f42b396164a8e87fc2e54b1ad05681f438dfba83fdd3712a60167aaffcb0300bc42d904eb4bd34c002a71642b59540ca01e64d6fecc6daaacdd8 -LLD.v18.1.7+2.aarch64-linux-musl-cxx03-llvm_version+18.asserts.tar.gz/md5/e6ee9423a82322b9233cafb1c92eed2d -LLD.v18.1.7+2.aarch64-linux-musl-cxx03-llvm_version+18.asserts.tar.gz/sha512/c915582a9ce2dfa8721741fb1ed19b719ba40f0092c2d29ebd68829ee558cef0b044a5e40985cff88e89129cbeed052d85fa5c6b6d87f9b3a68a6e89079ab4f3 -LLD.v18.1.7+2.aarch64-linux-musl-cxx03-llvm_version+18.tar.gz/md5/cc55112e2db358cf26d7bae3211d8e4f -LLD.v18.1.7+2.aarch64-linux-musl-cxx03-llvm_version+18.tar.gz/sha512/0ecb43045419020eea911f1767dae23a6b1e81bb155ec493e911a9412e45f7ec71461aea2e6fe346e641747139cae43d9435ccecaa7fd6a234e4d69bb06606ed -LLD.v18.1.7+2.aarch64-linux-musl-cxx11-llvm_version+18.asserts.tar.gz/md5/498b2909f80b20588135466d5211bc80 -LLD.v18.1.7+2.aarch64-linux-musl-cxx11-llvm_version+18.asserts.tar.gz/sha512/120fff24e85cf970670b20b5f4509475a3ae0d7621f8f67d018f3a7625548d736a3abc89f015966b1329c6b0a08a1db832e035ee3bae384e2c5864b73a856600 -LLD.v18.1.7+2.aarch64-linux-musl-cxx11-llvm_version+18.tar.gz/md5/1bcd298d5292f8e51f19b97fa4b27ab0 -LLD.v18.1.7+2.aarch64-linux-musl-cxx11-llvm_version+18.tar.gz/sha512/695c42557f9ee53b2e10bbf74653fbad4d02124b962a1f50cf719d2821607dfbb9c1bf638dbbc9e0e544f3020a9ef4a82decd13f886cc41ddf47c07a5e40eaa1 -LLD.v18.1.7+2.armv6l-linux-gnueabihf-cxx03-llvm_version+18.asserts.tar.gz/md5/2323ff933feaf3754b442bee48a63607 -LLD.v18.1.7+2.armv6l-linux-gnueabihf-cxx03-llvm_version+18.asserts.tar.gz/sha512/47b8e490b89e04fb8886dae438e3ddcd53c4e98045de2b0def3988671827528c8e9ae296411464c0f17cc64bd3956644673f47a3817237f27e1c3ed16ac8ef01 -LLD.v18.1.7+2.armv6l-linux-gnueabihf-cxx03-llvm_version+18.tar.gz/md5/37cf8528666064a434296f2e0039e9c6 -LLD.v18.1.7+2.armv6l-linux-gnueabihf-cxx03-llvm_version+18.tar.gz/sha512/ea1504a859509f8a16030db7a65f42f0e78d67adf5946497f2178bf25456c0f2583af72c636881a4bdd1210dc0d377bdf300ef55aef5db8c56995424a1886059 -LLD.v18.1.7+2.armv6l-linux-gnueabihf-cxx11-llvm_version+18.asserts.tar.gz/md5/1c341f2b161e2320d3d1a74685887f54 -LLD.v18.1.7+2.armv6l-linux-gnueabihf-cxx11-llvm_version+18.asserts.tar.gz/sha512/4f6fc099293deb1a2cf729ea7edd6e17fea0dc8b9fae9acfe34e00b1f5c798933df9538c805c8d28c6279eb38f9ebae2a1aeb1a2f23087352c6eeb3b27b63ddc -LLD.v18.1.7+2.armv6l-linux-gnueabihf-cxx11-llvm_version+18.tar.gz/md5/e306d59c71b0958c77108e650fac2612 -LLD.v18.1.7+2.armv6l-linux-gnueabihf-cxx11-llvm_version+18.tar.gz/sha512/79fd7cec0e169a9555ec9b0acc3248991e2e37a1d5bb422808ffcfd4f47e79321560b7985c82dfe070fb0b5ded5c160d83e358399c6e7608eeb62cd4a1406f88 -LLD.v18.1.7+2.armv6l-linux-musleabihf-cxx03-llvm_version+18.asserts.tar.gz/md5/c1d080f1aebb58778d730578fb113290 -LLD.v18.1.7+2.armv6l-linux-musleabihf-cxx03-llvm_version+18.asserts.tar.gz/sha512/1f420da1897bd0a61413321aaaf032e8ed38d59e6dfe3313ca3a6ee6582ae6c566e3761ca8fcd1f5a964337ba8a9b3e73dc55ad68aca139beeb45e43d51e862b -LLD.v18.1.7+2.armv6l-linux-musleabihf-cxx03-llvm_version+18.tar.gz/md5/6f4e0c7d2fe9ac254650dcd2842dafa8 -LLD.v18.1.7+2.armv6l-linux-musleabihf-cxx03-llvm_version+18.tar.gz/sha512/bbc71b334250e5e6429766d88595adbb671a206630987ec2a27e05711ff0f844487dffc1c136052ec11394e9d5c51c70d1b75d5348f97d3bf7fab463291e9dc8 -LLD.v18.1.7+2.armv6l-linux-musleabihf-cxx11-llvm_version+18.asserts.tar.gz/md5/76925b9a7bc249b2227390c479c54f8d -LLD.v18.1.7+2.armv6l-linux-musleabihf-cxx11-llvm_version+18.asserts.tar.gz/sha512/20643ecb79732e3ae9666116dbd0763c18b808afa78e6a14998aadc7265cccd6efd28670592db61d3d27b8d3023be4c5f3df41fff9e1b38d61abf76829090b4f -LLD.v18.1.7+2.armv6l-linux-musleabihf-cxx11-llvm_version+18.tar.gz/md5/399b9aac140d9050088fdb187ed4645f -LLD.v18.1.7+2.armv6l-linux-musleabihf-cxx11-llvm_version+18.tar.gz/sha512/8bab65965670fe392e78d0b9dc78c92cdcf202898f6d5a3174eb89ca5cb95b995675c8a7d81bbc4e95e490ad1a43d9d29d7907b7006789c0143a1d8f24cccaeb -LLD.v18.1.7+2.armv7l-linux-gnueabihf-cxx03-llvm_version+18.asserts.tar.gz/md5/026a4f5ae9eb3ac05e5e8fa894d77a5b -LLD.v18.1.7+2.armv7l-linux-gnueabihf-cxx03-llvm_version+18.asserts.tar.gz/sha512/4bca8bd558619260cddf4e2f4593cbb2a0691b5ccc6d1dea6dfcc5a2b5f51d7d1a76c35e481244e211e2eacf32bd628df5ad0e6c75e5185bb1d9b569f6acbfd3 -LLD.v18.1.7+2.armv7l-linux-gnueabihf-cxx03-llvm_version+18.tar.gz/md5/f898ceabcba052b7e6713a2b2c208a92 -LLD.v18.1.7+2.armv7l-linux-gnueabihf-cxx03-llvm_version+18.tar.gz/sha512/92be1910f795390be5f15ba5b2c220a3209a5f7ac04fca3f5229486628bcf5d2f20cf6e4dda8b41d6beaaff42a68a9ddb95fdacc6eae33b9183b581e9a194895 -LLD.v18.1.7+2.armv7l-linux-gnueabihf-cxx11-llvm_version+18.asserts.tar.gz/md5/e366058cf69a4367945bdba9523f2a0b -LLD.v18.1.7+2.armv7l-linux-gnueabihf-cxx11-llvm_version+18.asserts.tar.gz/sha512/45a786e8d0162bd5bd01c029691d2928d3744ef4a7a1efc2e39755dee2f9a9ae23ee725f0454ca601cb9c082a342209e9129df851314b5757c74767b13508fc4 -LLD.v18.1.7+2.armv7l-linux-gnueabihf-cxx11-llvm_version+18.tar.gz/md5/665a8502170729c86ea95a7ea2fcce0f -LLD.v18.1.7+2.armv7l-linux-gnueabihf-cxx11-llvm_version+18.tar.gz/sha512/c1a2a85c9ce14af8c91bc9a599393c52c0b8a585057366b1ceeed34c5db44641ecd0c9b377bee80cb4951fc7102fbb4f21fd050126bfa5bb4e582ffefee17035 -LLD.v18.1.7+2.armv7l-linux-musleabihf-cxx03-llvm_version+18.asserts.tar.gz/md5/b90b2130262f63f5189cc8e4a65e4433 -LLD.v18.1.7+2.armv7l-linux-musleabihf-cxx03-llvm_version+18.asserts.tar.gz/sha512/c1cbfd38c82d676c3fdbec486691334cf7bf4115d9ef2665012b71725c28545a49f34edf5689ea0352822c811c24c89cc152d1fccd1586b17ae8e6b2503641df -LLD.v18.1.7+2.armv7l-linux-musleabihf-cxx03-llvm_version+18.tar.gz/md5/2d5360da4b2c9ffcea5d0a646a7c114b -LLD.v18.1.7+2.armv7l-linux-musleabihf-cxx03-llvm_version+18.tar.gz/sha512/73323e0937fe4423883480294c8df44744acde4f47380e35535cbe69c855c0e35e86a1eced3085ae0285f284f47af5ef237f4650bf2b6a8b9d5308efce88fa02 -LLD.v18.1.7+2.armv7l-linux-musleabihf-cxx11-llvm_version+18.asserts.tar.gz/md5/a9b9a65938a7701aaac6fa706481c867 -LLD.v18.1.7+2.armv7l-linux-musleabihf-cxx11-llvm_version+18.asserts.tar.gz/sha512/fe8243aa131ad8be54f0fca5754c2e68ec39049004ec8feed499731c5228a7a46e303ba866b9f9a55e5318c73d8a46d964673e111f6c60e5ae1628c568d7d894 -LLD.v18.1.7+2.armv7l-linux-musleabihf-cxx11-llvm_version+18.tar.gz/md5/0d9592a287c9231ae2db65000be2cea2 -LLD.v18.1.7+2.armv7l-linux-musleabihf-cxx11-llvm_version+18.tar.gz/sha512/4ee192dd33f518d2735a829ac8f822b5672b39e8c2254987aea6e5f2f0056213bd85d84c4050d52ba9ac8c35762521c324fe2d6e18db0396e7142af9cb61a561 -LLD.v18.1.7+2.i686-linux-gnu-cxx03-llvm_version+18.asserts.tar.gz/md5/d487598dec9969485dcf785fc0968bd4 -LLD.v18.1.7+2.i686-linux-gnu-cxx03-llvm_version+18.asserts.tar.gz/sha512/8d3117739919696b9b0c9ae398f1b1e9db8bd3e2e27839f62b3551c22ae2517f8abb69e57e23d125002bb466889b7352e69c1e9dfd9abf1c5433f274e928b341 -LLD.v18.1.7+2.i686-linux-gnu-cxx03-llvm_version+18.tar.gz/md5/943434b08dffb54e8cf04ae7bee34923 -LLD.v18.1.7+2.i686-linux-gnu-cxx03-llvm_version+18.tar.gz/sha512/77b7bbc5d988cf36ecd10609e091cf24dea134cd32c7ee96dec7bfe1a4942553b6205653edc16c8454261f621966daeb267f42562172bab4cec9693ad733d867 -LLD.v18.1.7+2.i686-linux-gnu-cxx11-llvm_version+18.asserts.tar.gz/md5/cb9e371947ad415de048636ed78ca48f -LLD.v18.1.7+2.i686-linux-gnu-cxx11-llvm_version+18.asserts.tar.gz/sha512/c00b696fa146e8c29b37f15f78ab3325db9b3f5b3514e615f145b4eb7c9c8788662cfb6255b7dead596cad8c576b378f7459c2c85d462b597ba5e21adbac0536 -LLD.v18.1.7+2.i686-linux-gnu-cxx11-llvm_version+18.tar.gz/md5/485f061ee8425f042e4dd3042388bf8a -LLD.v18.1.7+2.i686-linux-gnu-cxx11-llvm_version+18.tar.gz/sha512/845a47a36c61b305bb70b1249f6fb7c4e8f740acff90d3e850ab2e887f7d959ae263431a02305bf7587e4194463f9932769d500a19709bc479eb6e6168325421 -LLD.v18.1.7+2.i686-w64-mingw32-cxx03-llvm_version+18.asserts.tar.gz/md5/f234526410e779188f3d22da438a4926 -LLD.v18.1.7+2.i686-w64-mingw32-cxx03-llvm_version+18.asserts.tar.gz/sha512/12e2c9fc5385ff142bf82956268230fb01a6f1a1fdb3a6c1e13afd341dd2eea970b707168d5f45960dc9ebbf6d6598af0ceba371172f624ae823ea1eef4e9031 -LLD.v18.1.7+2.i686-w64-mingw32-cxx03-llvm_version+18.tar.gz/md5/e68cab4aec1abcfce12a13e3d1f67dac -LLD.v18.1.7+2.i686-w64-mingw32-cxx03-llvm_version+18.tar.gz/sha512/67755b34ebe04f4d28be3be2a37df46b5e900f38dc4908875f66671fbb740cf033f2fd9af5116635f55025f330f7b1a478cd4900db9d00e4699b591a16269100 -LLD.v18.1.7+2.i686-w64-mingw32-cxx11-llvm_version+18.asserts.tar.gz/md5/4a71aef80b75b2ea1a5b7f8521afcf5f -LLD.v18.1.7+2.i686-w64-mingw32-cxx11-llvm_version+18.asserts.tar.gz/sha512/9deb3e9615ae15dba8c744b22416243304d30f100c8d17538fcedbc18787147505f74ecc2f933fc54101527847503142cfe84a46a95ca3c57987996e3b8583f1 -LLD.v18.1.7+2.i686-w64-mingw32-cxx11-llvm_version+18.tar.gz/md5/9b28ee75d05cbaabff57fd45cc0d1cf7 -LLD.v18.1.7+2.i686-w64-mingw32-cxx11-llvm_version+18.tar.gz/sha512/bfd3d6cfd4a5a2fbfe940f64d47a86a598360e90619f8175a2d1306f0894610f13fc44ba099ad59d2989beabf491df08a5611bcef3fd61b6391ea0230b11a432 -LLD.v18.1.7+2.powerpc64le-linux-gnu-cxx03-llvm_version+18.asserts.tar.gz/md5/7962fc6f08531f0dcfa44bd667f31582 -LLD.v18.1.7+2.powerpc64le-linux-gnu-cxx03-llvm_version+18.asserts.tar.gz/sha512/2c936064685f12ed6764c187192023118e97dcbff6ca1656f0304a40772b4ecf55ee0296b3c2a00760f5bb437162e2b737635fdd59b889d35756d697fc7e6b72 -LLD.v18.1.7+2.powerpc64le-linux-gnu-cxx03-llvm_version+18.tar.gz/md5/3eb4d78af670d446f696449a5e71e3ba -LLD.v18.1.7+2.powerpc64le-linux-gnu-cxx03-llvm_version+18.tar.gz/sha512/315dc76799f3e443fdb5ebbecf96a08070f8251930a26995de892b8e67bd35bbb365f2cc5fd93bc7cbcbc9edd08280ee8d2a36b28a704866dd3fdddae4969455 -LLD.v18.1.7+2.powerpc64le-linux-gnu-cxx11-llvm_version+18.asserts.tar.gz/md5/e73cadd0354897bd5bb611cc1c027858 -LLD.v18.1.7+2.powerpc64le-linux-gnu-cxx11-llvm_version+18.asserts.tar.gz/sha512/6f444a4ea22e7108ab75686ce9cd78c0db0a677e39e8434896fb1ec90b9dc013abf7de1024d329a9726dabf229a8a68c27a11f211092e676715d282efb7b8504 -LLD.v18.1.7+2.powerpc64le-linux-gnu-cxx11-llvm_version+18.tar.gz/md5/aeb310f106f31126dbe53459e36d33bd -LLD.v18.1.7+2.powerpc64le-linux-gnu-cxx11-llvm_version+18.tar.gz/sha512/cd18c115415dd92bc7fbb5c29cacc5848b1f3851c3a526ff9c0813ad46824df0a4f13a66b1e6641ed11b44b5b937390619f01666fe6d5a047f1772f0ad03c941 -LLD.v18.1.7+2.x86_64-apple-darwin-llvm_version+18.asserts.tar.gz/md5/300dc28f7af6aaa69cec9a214a57fdb8 -LLD.v18.1.7+2.x86_64-apple-darwin-llvm_version+18.asserts.tar.gz/sha512/dcb40c5934118c204968cb963a3fae91179eb1e31f5397975ca98c8a7aaecaf2a7f81847bc426fd306bb76970794502ed4f94d8f461b96ea90362130f44520e7 -LLD.v18.1.7+2.x86_64-apple-darwin-llvm_version+18.tar.gz/md5/e1f23fef82fbfcbc28899677f12658b3 -LLD.v18.1.7+2.x86_64-apple-darwin-llvm_version+18.tar.gz/sha512/b6b585060832d53827376ac6c00cc8bd5dfbf091c38c87020f6de42adc86dbe4fc33ec2c17f4433176c79a509681d694ed1502b179efcee2c4dd4c10a26e87a2 -LLD.v18.1.7+2.x86_64-linux-gnu-cxx03-llvm_version+18.asserts.tar.gz/md5/5dc96eef71dc28611bc998ef966371c6 -LLD.v18.1.7+2.x86_64-linux-gnu-cxx03-llvm_version+18.asserts.tar.gz/sha512/781993c75bb07db96d02b5a7e779116864730a9bb941b64420a435956a7ecd501b5b2673f1854c09ece5f0c73559d5723c271d6352be57ddae6801a695629362 -LLD.v18.1.7+2.x86_64-linux-gnu-cxx03-llvm_version+18.tar.gz/md5/8a1fe0ccf7699ab7a7a514b620112a70 -LLD.v18.1.7+2.x86_64-linux-gnu-cxx03-llvm_version+18.tar.gz/sha512/d002083045d3eb7c749f2e97527c1228cd317a8138ff254228e43594a6cabee47fa363785466ebc2874cc438457640ff08a836eec7334afac451506ea7bbed03 -LLD.v18.1.7+2.x86_64-linux-gnu-cxx11-llvm_version+18.asserts.tar.gz/md5/331be92bd3d76bb8e86991b7832ad41a -LLD.v18.1.7+2.x86_64-linux-gnu-cxx11-llvm_version+18.asserts.tar.gz/sha512/7b1c6df53311a17a92a41cb67ec476f947949c4ca5d15a643badaf9f01e76a186abbb6e156f95ad1605d83250df4e081164986a6b7fcb3238076b0ec5a3bb565 -LLD.v18.1.7+2.x86_64-linux-gnu-cxx11-llvm_version+18.tar.gz/md5/97c7f5267ad6927f699a25ce44f55a70 -LLD.v18.1.7+2.x86_64-linux-gnu-cxx11-llvm_version+18.tar.gz/sha512/7b847c6026fd7daeb17a4459b852562ce6664b2f406664be672bcc384dd5a79b9505561fc61dd8fb78a903a2ed4978f322cccad19f5a3966bac856e877c11ef7 -LLD.v18.1.7+2.x86_64-linux-musl-cxx03-llvm_version+18.asserts.tar.gz/md5/c86da6a396fcdddbd26cfd71c0f70458 -LLD.v18.1.7+2.x86_64-linux-musl-cxx03-llvm_version+18.asserts.tar.gz/sha512/8d5b75b43167080b8ea456e516c9ace02ee6066ce715a56f0b42cb8045b965b1cf8d4ebc0786c23be4544693ff858816a6257b0638ec11e077df32ead62f7efb -LLD.v18.1.7+2.x86_64-linux-musl-cxx03-llvm_version+18.tar.gz/md5/d72e175272ed893688d18e868120c575 -LLD.v18.1.7+2.x86_64-linux-musl-cxx03-llvm_version+18.tar.gz/sha512/9a46eeca8c7a8be65ed487a74227534e08a257e404814c44730f12a5bebc8cd160998cfd5ed30189aa606ddbe602e1b1788e465e4a210103c6726a7fd230abc3 -LLD.v18.1.7+2.x86_64-linux-musl-cxx11-llvm_version+18.asserts.tar.gz/md5/0206fdaa9582ae3bddaed1b6fd7a8cb5 -LLD.v18.1.7+2.x86_64-linux-musl-cxx11-llvm_version+18.asserts.tar.gz/sha512/584a67f603f656ca5d27aa0ef2e425ad385612aff06cdc1d534b5944939a09246c93954fc153b8a89acff721e657a8903af9a640abc252d4e452f348781bca98 -LLD.v18.1.7+2.x86_64-linux-musl-cxx11-llvm_version+18.tar.gz/md5/0dd14af342467eac2e13cad4acbc881f -LLD.v18.1.7+2.x86_64-linux-musl-cxx11-llvm_version+18.tar.gz/sha512/918f2c66898f828414009fa6ee273da5bd654e4b787ebb4d703f0be27e388b46870d68bd58c4f45638d276c61c1bfe2f3c67fbf34dfb5578158d072f82d927de -LLD.v18.1.7+2.x86_64-unknown-freebsd-llvm_version+18.asserts.tar.gz/md5/b373e1bf2a24f34496754438e563a5e9 -LLD.v18.1.7+2.x86_64-unknown-freebsd-llvm_version+18.asserts.tar.gz/sha512/a739f29e332be74cbcc544903d08bbcc12c3e9f5c3d02d130ef1c69426ead2c74b14f98ac79e88ba29fb2e2dc3b28b7d81c9d42f2e27e0ce9442f6a0e81bb8f0 -LLD.v18.1.7+2.x86_64-unknown-freebsd-llvm_version+18.tar.gz/md5/1fdbf6aa0751788611054f7e98024104 -LLD.v18.1.7+2.x86_64-unknown-freebsd-llvm_version+18.tar.gz/sha512/2015b8e84642b2434d1089908354b47b080d5683f1c7eb2c09de09abb3674e7119ce4ecfa858bf8129fd9e9075bb45f2e53a997421f2457aa9b5c4a9d7edfec8 -LLD.v18.1.7+2.x86_64-w64-mingw32-cxx03-llvm_version+18.asserts.tar.gz/md5/85bd5a9e312e83a09fa5b7fd6abfba76 -LLD.v18.1.7+2.x86_64-w64-mingw32-cxx03-llvm_version+18.asserts.tar.gz/sha512/0a5cba5c65abc72361a780f64e64c463797aefe52994699d8d785437b773530e49a5fc2746e36bc5a31aabf4eb4343870aa448f8fa2b119ede3e1c4ea228cc9d -LLD.v18.1.7+2.x86_64-w64-mingw32-cxx03-llvm_version+18.tar.gz/md5/ab07ed76a796d86cb6ac2ae4fc563eab -LLD.v18.1.7+2.x86_64-w64-mingw32-cxx03-llvm_version+18.tar.gz/sha512/485117c7e1daca401c1cfe77324e8f5961f6f33ed2a3c907f4c4a2bf9c45c14d929959cf8e4d9cca9ad112a3ce6a851e336cd793bd5ee284c87b9fe487700ecb -LLD.v18.1.7+2.x86_64-w64-mingw32-cxx11-llvm_version+18.asserts.tar.gz/md5/852449a26af61d8554fb1b4c22c4384a -LLD.v18.1.7+2.x86_64-w64-mingw32-cxx11-llvm_version+18.asserts.tar.gz/sha512/a080d2da5ff4b832822e099f150f0c15b985d54678a9508711f7f435d6ceec68eba12b5f8c25db0b4841dc5c5edb003b74b4fef391292b9407d7bda73d35c4f5 -LLD.v18.1.7+2.x86_64-w64-mingw32-cxx11-llvm_version+18.tar.gz/md5/eb999bcb67f789b6443dbfe907bc61e4 -LLD.v18.1.7+2.x86_64-w64-mingw32-cxx11-llvm_version+18.tar.gz/sha512/811f72ce250184ccdfa30aa992884f1bdd0a791fa125f089037bf1af45b844d76807c5662a904ec9312b79efc565fd0957f195a70a39248eed99ff53f3284cba +LLD.v18.1.7+3.aarch64-apple-darwin-llvm_version+18.asserts.tar.gz/md5/229323a0b31c29b4221d79ace1a76820 +LLD.v18.1.7+3.aarch64-apple-darwin-llvm_version+18.asserts.tar.gz/sha512/c00fb8bf309f0cc6c8cb4465cc0062a8b1a848d9460c53241be654d88c598847b4590b4afa4b71c4859cfc67490942eddd79ae9ac4d75a9b0e392fbf67389a92 +LLD.v18.1.7+3.aarch64-apple-darwin-llvm_version+18.tar.gz/md5/ce7804a6a846d0d951aae34607c43bdc +LLD.v18.1.7+3.aarch64-apple-darwin-llvm_version+18.tar.gz/sha512/164adec7649a36b2967872884866de1c57f6f54e1c24f955593f9f6a10cd89c69493a64a37bf9f001ce3576baed867423d138dfb1df0139b4c1312e81001b167 +LLD.v18.1.7+3.aarch64-linux-gnu-cxx03-llvm_version+18.asserts.tar.gz/md5/cea134f347bae257cf5f55b6388cef81 +LLD.v18.1.7+3.aarch64-linux-gnu-cxx03-llvm_version+18.asserts.tar.gz/sha512/16b59143e929791b0c3e56cfb4970d8b3c87adf6e847fa9e2aac17c4ff2aa311ba2c7511c1b0ae2f39d9aa92f87ad4d99c042fe35bec391ac865fedb72bd3b1e +LLD.v18.1.7+3.aarch64-linux-gnu-cxx03-llvm_version+18.tar.gz/md5/5f903bab0e38fa608e8965acce6f020e +LLD.v18.1.7+3.aarch64-linux-gnu-cxx03-llvm_version+18.tar.gz/sha512/01e5f6a32958e04174c545f57c6c3b1bc88ccfd5ab18dcb9d67b92b55ebc7655a03bf963c4eaf7e5c3792d4691427a89db372e7534c6c8f965f8a715a32d9284 +LLD.v18.1.7+3.aarch64-linux-gnu-cxx11-llvm_version+18.asserts.tar.gz/md5/241a55374fd067f3736a2bb929e47015 +LLD.v18.1.7+3.aarch64-linux-gnu-cxx11-llvm_version+18.asserts.tar.gz/sha512/f1fedea4e6b5f6f3bbf4d705034d6c51b06f011c2ecec1ae49c5b7bd123891eee8b991462d60be7fffd58f7c773afe910a06ec0b55b37eed9b4d09b9fdbd5068 +LLD.v18.1.7+3.aarch64-linux-gnu-cxx11-llvm_version+18.tar.gz/md5/ff018c7448a7589935333e46739ee2c4 +LLD.v18.1.7+3.aarch64-linux-gnu-cxx11-llvm_version+18.tar.gz/sha512/b646c6a945b8f42b396164a8e87fc2e54b1ad05681f438dfba83fdd3712a60167aaffcb0300bc42d904eb4bd34c002a71642b59540ca01e64d6fecc6daaacdd8 +LLD.v18.1.7+3.aarch64-linux-musl-cxx03-llvm_version+18.asserts.tar.gz/md5/e6ee9423a82322b9233cafb1c92eed2d +LLD.v18.1.7+3.aarch64-linux-musl-cxx03-llvm_version+18.asserts.tar.gz/sha512/c915582a9ce2dfa8721741fb1ed19b719ba40f0092c2d29ebd68829ee558cef0b044a5e40985cff88e89129cbeed052d85fa5c6b6d87f9b3a68a6e89079ab4f3 +LLD.v18.1.7+3.aarch64-linux-musl-cxx03-llvm_version+18.tar.gz/md5/cc55112e2db358cf26d7bae3211d8e4f +LLD.v18.1.7+3.aarch64-linux-musl-cxx03-llvm_version+18.tar.gz/sha512/0ecb43045419020eea911f1767dae23a6b1e81bb155ec493e911a9412e45f7ec71461aea2e6fe346e641747139cae43d9435ccecaa7fd6a234e4d69bb06606ed +LLD.v18.1.7+3.aarch64-linux-musl-cxx11-llvm_version+18.asserts.tar.gz/md5/498b2909f80b20588135466d5211bc80 +LLD.v18.1.7+3.aarch64-linux-musl-cxx11-llvm_version+18.asserts.tar.gz/sha512/120fff24e85cf970670b20b5f4509475a3ae0d7621f8f67d018f3a7625548d736a3abc89f015966b1329c6b0a08a1db832e035ee3bae384e2c5864b73a856600 +LLD.v18.1.7+3.aarch64-linux-musl-cxx11-llvm_version+18.tar.gz/md5/1bcd298d5292f8e51f19b97fa4b27ab0 +LLD.v18.1.7+3.aarch64-linux-musl-cxx11-llvm_version+18.tar.gz/sha512/695c42557f9ee53b2e10bbf74653fbad4d02124b962a1f50cf719d2821607dfbb9c1bf638dbbc9e0e544f3020a9ef4a82decd13f886cc41ddf47c07a5e40eaa1 +LLD.v18.1.7+3.aarch64-unknown-freebsd-llvm_version+18.asserts.tar.gz/md5/f0e0668d29253cd834418c88ad63df31 +LLD.v18.1.7+3.aarch64-unknown-freebsd-llvm_version+18.asserts.tar.gz/sha512/f910fd8ca972b1cbe0704d4d73273e2d6911d31ae5fe842250802cd33453e4fa2ed03ae4b4df43ea4df13711cf2409c16b1c44832b44cb05f7681488c4402681 +LLD.v18.1.7+3.aarch64-unknown-freebsd-llvm_version+18.tar.gz/md5/84f79f1ce1fcd57ec4bd499a684da120 +LLD.v18.1.7+3.aarch64-unknown-freebsd-llvm_version+18.tar.gz/sha512/d0e4a7ecff0e3f499dc22a9409ab8bff9099d4fdf191916426be917695c7fd55043b41cb0fa21541c3d6a6c202736b5c7b8fce53244e3ac713560a47a0ed6588 +LLD.v18.1.7+3.armv6l-linux-gnueabihf-cxx03-llvm_version+18.asserts.tar.gz/md5/2323ff933feaf3754b442bee48a63607 +LLD.v18.1.7+3.armv6l-linux-gnueabihf-cxx03-llvm_version+18.asserts.tar.gz/sha512/47b8e490b89e04fb8886dae438e3ddcd53c4e98045de2b0def3988671827528c8e9ae296411464c0f17cc64bd3956644673f47a3817237f27e1c3ed16ac8ef01 +LLD.v18.1.7+3.armv6l-linux-gnueabihf-cxx03-llvm_version+18.tar.gz/md5/37cf8528666064a434296f2e0039e9c6 +LLD.v18.1.7+3.armv6l-linux-gnueabihf-cxx03-llvm_version+18.tar.gz/sha512/ea1504a859509f8a16030db7a65f42f0e78d67adf5946497f2178bf25456c0f2583af72c636881a4bdd1210dc0d377bdf300ef55aef5db8c56995424a1886059 +LLD.v18.1.7+3.armv6l-linux-gnueabihf-cxx11-llvm_version+18.asserts.tar.gz/md5/1c341f2b161e2320d3d1a74685887f54 +LLD.v18.1.7+3.armv6l-linux-gnueabihf-cxx11-llvm_version+18.asserts.tar.gz/sha512/4f6fc099293deb1a2cf729ea7edd6e17fea0dc8b9fae9acfe34e00b1f5c798933df9538c805c8d28c6279eb38f9ebae2a1aeb1a2f23087352c6eeb3b27b63ddc +LLD.v18.1.7+3.armv6l-linux-gnueabihf-cxx11-llvm_version+18.tar.gz/md5/e306d59c71b0958c77108e650fac2612 +LLD.v18.1.7+3.armv6l-linux-gnueabihf-cxx11-llvm_version+18.tar.gz/sha512/79fd7cec0e169a9555ec9b0acc3248991e2e37a1d5bb422808ffcfd4f47e79321560b7985c82dfe070fb0b5ded5c160d83e358399c6e7608eeb62cd4a1406f88 +LLD.v18.1.7+3.armv6l-linux-musleabihf-cxx03-llvm_version+18.asserts.tar.gz/md5/c1d080f1aebb58778d730578fb113290 +LLD.v18.1.7+3.armv6l-linux-musleabihf-cxx03-llvm_version+18.asserts.tar.gz/sha512/1f420da1897bd0a61413321aaaf032e8ed38d59e6dfe3313ca3a6ee6582ae6c566e3761ca8fcd1f5a964337ba8a9b3e73dc55ad68aca139beeb45e43d51e862b +LLD.v18.1.7+3.armv6l-linux-musleabihf-cxx03-llvm_version+18.tar.gz/md5/6f4e0c7d2fe9ac254650dcd2842dafa8 +LLD.v18.1.7+3.armv6l-linux-musleabihf-cxx03-llvm_version+18.tar.gz/sha512/bbc71b334250e5e6429766d88595adbb671a206630987ec2a27e05711ff0f844487dffc1c136052ec11394e9d5c51c70d1b75d5348f97d3bf7fab463291e9dc8 +LLD.v18.1.7+3.armv6l-linux-musleabihf-cxx11-llvm_version+18.asserts.tar.gz/md5/76925b9a7bc249b2227390c479c54f8d +LLD.v18.1.7+3.armv6l-linux-musleabihf-cxx11-llvm_version+18.asserts.tar.gz/sha512/20643ecb79732e3ae9666116dbd0763c18b808afa78e6a14998aadc7265cccd6efd28670592db61d3d27b8d3023be4c5f3df41fff9e1b38d61abf76829090b4f +LLD.v18.1.7+3.armv6l-linux-musleabihf-cxx11-llvm_version+18.tar.gz/md5/399b9aac140d9050088fdb187ed4645f +LLD.v18.1.7+3.armv6l-linux-musleabihf-cxx11-llvm_version+18.tar.gz/sha512/8bab65965670fe392e78d0b9dc78c92cdcf202898f6d5a3174eb89ca5cb95b995675c8a7d81bbc4e95e490ad1a43d9d29d7907b7006789c0143a1d8f24cccaeb +LLD.v18.1.7+3.armv7l-linux-gnueabihf-cxx03-llvm_version+18.asserts.tar.gz/md5/026a4f5ae9eb3ac05e5e8fa894d77a5b +LLD.v18.1.7+3.armv7l-linux-gnueabihf-cxx03-llvm_version+18.asserts.tar.gz/sha512/4bca8bd558619260cddf4e2f4593cbb2a0691b5ccc6d1dea6dfcc5a2b5f51d7d1a76c35e481244e211e2eacf32bd628df5ad0e6c75e5185bb1d9b569f6acbfd3 +LLD.v18.1.7+3.armv7l-linux-gnueabihf-cxx03-llvm_version+18.tar.gz/md5/f898ceabcba052b7e6713a2b2c208a92 +LLD.v18.1.7+3.armv7l-linux-gnueabihf-cxx03-llvm_version+18.tar.gz/sha512/92be1910f795390be5f15ba5b2c220a3209a5f7ac04fca3f5229486628bcf5d2f20cf6e4dda8b41d6beaaff42a68a9ddb95fdacc6eae33b9183b581e9a194895 +LLD.v18.1.7+3.armv7l-linux-gnueabihf-cxx11-llvm_version+18.asserts.tar.gz/md5/e366058cf69a4367945bdba9523f2a0b +LLD.v18.1.7+3.armv7l-linux-gnueabihf-cxx11-llvm_version+18.asserts.tar.gz/sha512/45a786e8d0162bd5bd01c029691d2928d3744ef4a7a1efc2e39755dee2f9a9ae23ee725f0454ca601cb9c082a342209e9129df851314b5757c74767b13508fc4 +LLD.v18.1.7+3.armv7l-linux-gnueabihf-cxx11-llvm_version+18.tar.gz/md5/665a8502170729c86ea95a7ea2fcce0f +LLD.v18.1.7+3.armv7l-linux-gnueabihf-cxx11-llvm_version+18.tar.gz/sha512/c1a2a85c9ce14af8c91bc9a599393c52c0b8a585057366b1ceeed34c5db44641ecd0c9b377bee80cb4951fc7102fbb4f21fd050126bfa5bb4e582ffefee17035 +LLD.v18.1.7+3.armv7l-linux-musleabihf-cxx03-llvm_version+18.asserts.tar.gz/md5/b90b2130262f63f5189cc8e4a65e4433 +LLD.v18.1.7+3.armv7l-linux-musleabihf-cxx03-llvm_version+18.asserts.tar.gz/sha512/c1cbfd38c82d676c3fdbec486691334cf7bf4115d9ef2665012b71725c28545a49f34edf5689ea0352822c811c24c89cc152d1fccd1586b17ae8e6b2503641df +LLD.v18.1.7+3.armv7l-linux-musleabihf-cxx03-llvm_version+18.tar.gz/md5/2d5360da4b2c9ffcea5d0a646a7c114b +LLD.v18.1.7+3.armv7l-linux-musleabihf-cxx03-llvm_version+18.tar.gz/sha512/73323e0937fe4423883480294c8df44744acde4f47380e35535cbe69c855c0e35e86a1eced3085ae0285f284f47af5ef237f4650bf2b6a8b9d5308efce88fa02 +LLD.v18.1.7+3.armv7l-linux-musleabihf-cxx11-llvm_version+18.asserts.tar.gz/md5/a9b9a65938a7701aaac6fa706481c867 +LLD.v18.1.7+3.armv7l-linux-musleabihf-cxx11-llvm_version+18.asserts.tar.gz/sha512/fe8243aa131ad8be54f0fca5754c2e68ec39049004ec8feed499731c5228a7a46e303ba866b9f9a55e5318c73d8a46d964673e111f6c60e5ae1628c568d7d894 +LLD.v18.1.7+3.armv7l-linux-musleabihf-cxx11-llvm_version+18.tar.gz/md5/0d9592a287c9231ae2db65000be2cea2 +LLD.v18.1.7+3.armv7l-linux-musleabihf-cxx11-llvm_version+18.tar.gz/sha512/4ee192dd33f518d2735a829ac8f822b5672b39e8c2254987aea6e5f2f0056213bd85d84c4050d52ba9ac8c35762521c324fe2d6e18db0396e7142af9cb61a561 +LLD.v18.1.7+3.i686-linux-gnu-cxx03-llvm_version+18.asserts.tar.gz/md5/d487598dec9969485dcf785fc0968bd4 +LLD.v18.1.7+3.i686-linux-gnu-cxx03-llvm_version+18.asserts.tar.gz/sha512/8d3117739919696b9b0c9ae398f1b1e9db8bd3e2e27839f62b3551c22ae2517f8abb69e57e23d125002bb466889b7352e69c1e9dfd9abf1c5433f274e928b341 +LLD.v18.1.7+3.i686-linux-gnu-cxx03-llvm_version+18.tar.gz/md5/943434b08dffb54e8cf04ae7bee34923 +LLD.v18.1.7+3.i686-linux-gnu-cxx03-llvm_version+18.tar.gz/sha512/77b7bbc5d988cf36ecd10609e091cf24dea134cd32c7ee96dec7bfe1a4942553b6205653edc16c8454261f621966daeb267f42562172bab4cec9693ad733d867 +LLD.v18.1.7+3.i686-linux-gnu-cxx11-llvm_version+18.asserts.tar.gz/md5/cb9e371947ad415de048636ed78ca48f +LLD.v18.1.7+3.i686-linux-gnu-cxx11-llvm_version+18.asserts.tar.gz/sha512/c00b696fa146e8c29b37f15f78ab3325db9b3f5b3514e615f145b4eb7c9c8788662cfb6255b7dead596cad8c576b378f7459c2c85d462b597ba5e21adbac0536 +LLD.v18.1.7+3.i686-linux-gnu-cxx11-llvm_version+18.tar.gz/md5/485f061ee8425f042e4dd3042388bf8a +LLD.v18.1.7+3.i686-linux-gnu-cxx11-llvm_version+18.tar.gz/sha512/845a47a36c61b305bb70b1249f6fb7c4e8f740acff90d3e850ab2e887f7d959ae263431a02305bf7587e4194463f9932769d500a19709bc479eb6e6168325421 +LLD.v18.1.7+3.i686-w64-mingw32-cxx03-llvm_version+18.asserts.tar.gz/md5/e4f97e8334e1f29ad9083d051a50eab9 +LLD.v18.1.7+3.i686-w64-mingw32-cxx03-llvm_version+18.asserts.tar.gz/sha512/13ff037881da8a2333129bb702f515a0eb1afb3e4f27298c035c133ce5c512fa643b2a90df38d6f61b1dd5e86e32998b9061241358b61be794caba2b989efb70 +LLD.v18.1.7+3.i686-w64-mingw32-cxx03-llvm_version+18.tar.gz/md5/570f50ef6523cb8133b160af8fa2057e +LLD.v18.1.7+3.i686-w64-mingw32-cxx03-llvm_version+18.tar.gz/sha512/69ec402469b2b2c85aabca1c8b36edd0c53b7e678e4c56fd96062b62a57b7ff1008f328d71e6aee36d4270a41a7bf84f62f934007398618b5426202d9614305d +LLD.v18.1.7+3.i686-w64-mingw32-cxx11-llvm_version+18.asserts.tar.gz/md5/0503dc3e4e69ca6fd7e2a5dac9c4ef3a +LLD.v18.1.7+3.i686-w64-mingw32-cxx11-llvm_version+18.asserts.tar.gz/sha512/9b6c851341c2642d5ed9169326b4de9eda50ea06b1270a721d2e85bce8ffe4c595cd491e0a218c3a418aed526f881737fbb44cb417cd5ba7db972bcbaa6ad0d1 +LLD.v18.1.7+3.i686-w64-mingw32-cxx11-llvm_version+18.tar.gz/md5/08b22e98c514d48ddb1039b44f64f480 +LLD.v18.1.7+3.i686-w64-mingw32-cxx11-llvm_version+18.tar.gz/sha512/5e5b7c66d5fec3ff1a9cb7989d62887699cc3e70ab36a94e6f157cb0b9adbe8d63f5f1a74cfb6765cf46851087019b12ccf09ea848ed6456d17cdc796a5bf2e8 +LLD.v18.1.7+3.powerpc64le-linux-gnu-cxx03-llvm_version+18.asserts.tar.gz/md5/7962fc6f08531f0dcfa44bd667f31582 +LLD.v18.1.7+3.powerpc64le-linux-gnu-cxx03-llvm_version+18.asserts.tar.gz/sha512/2c936064685f12ed6764c187192023118e97dcbff6ca1656f0304a40772b4ecf55ee0296b3c2a00760f5bb437162e2b737635fdd59b889d35756d697fc7e6b72 +LLD.v18.1.7+3.powerpc64le-linux-gnu-cxx03-llvm_version+18.tar.gz/md5/3eb4d78af670d446f696449a5e71e3ba +LLD.v18.1.7+3.powerpc64le-linux-gnu-cxx03-llvm_version+18.tar.gz/sha512/315dc76799f3e443fdb5ebbecf96a08070f8251930a26995de892b8e67bd35bbb365f2cc5fd93bc7cbcbc9edd08280ee8d2a36b28a704866dd3fdddae4969455 +LLD.v18.1.7+3.powerpc64le-linux-gnu-cxx11-llvm_version+18.asserts.tar.gz/md5/e73cadd0354897bd5bb611cc1c027858 +LLD.v18.1.7+3.powerpc64le-linux-gnu-cxx11-llvm_version+18.asserts.tar.gz/sha512/6f444a4ea22e7108ab75686ce9cd78c0db0a677e39e8434896fb1ec90b9dc013abf7de1024d329a9726dabf229a8a68c27a11f211092e676715d282efb7b8504 +LLD.v18.1.7+3.powerpc64le-linux-gnu-cxx11-llvm_version+18.tar.gz/md5/aeb310f106f31126dbe53459e36d33bd +LLD.v18.1.7+3.powerpc64le-linux-gnu-cxx11-llvm_version+18.tar.gz/sha512/cd18c115415dd92bc7fbb5c29cacc5848b1f3851c3a526ff9c0813ad46824df0a4f13a66b1e6641ed11b44b5b937390619f01666fe6d5a047f1772f0ad03c941 +LLD.v18.1.7+3.x86_64-apple-darwin-llvm_version+18.asserts.tar.gz/md5/9493a58ed62367b45a055c8880de0924 +LLD.v18.1.7+3.x86_64-apple-darwin-llvm_version+18.asserts.tar.gz/sha512/5a448c87ad627235d7d2c8f8f8866af0f6872c3f7775123edb09b23b772f165fa020fe0c592ad100f8c777213fe1346b642a556df66ed003771eb0e76345215a +LLD.v18.1.7+3.x86_64-apple-darwin-llvm_version+18.tar.gz/md5/d397b37abf0026ca69fa6657dd791e27 +LLD.v18.1.7+3.x86_64-apple-darwin-llvm_version+18.tar.gz/sha512/9e9fc915389bfa09cbe8b977f22a3466ccda052f415b3b5fdfc97a15e089d4f887fba97d6bfe6e17104f09bebe48c859bad25e9f2cabc179000247292eafca1b +LLD.v18.1.7+3.x86_64-linux-gnu-cxx03-llvm_version+18.asserts.tar.gz/md5/5dc96eef71dc28611bc998ef966371c6 +LLD.v18.1.7+3.x86_64-linux-gnu-cxx03-llvm_version+18.asserts.tar.gz/sha512/781993c75bb07db96d02b5a7e779116864730a9bb941b64420a435956a7ecd501b5b2673f1854c09ece5f0c73559d5723c271d6352be57ddae6801a695629362 +LLD.v18.1.7+3.x86_64-linux-gnu-cxx03-llvm_version+18.tar.gz/md5/8a1fe0ccf7699ab7a7a514b620112a70 +LLD.v18.1.7+3.x86_64-linux-gnu-cxx03-llvm_version+18.tar.gz/sha512/d002083045d3eb7c749f2e97527c1228cd317a8138ff254228e43594a6cabee47fa363785466ebc2874cc438457640ff08a836eec7334afac451506ea7bbed03 +LLD.v18.1.7+3.x86_64-linux-gnu-cxx11-llvm_version+18.asserts.tar.gz/md5/331be92bd3d76bb8e86991b7832ad41a +LLD.v18.1.7+3.x86_64-linux-gnu-cxx11-llvm_version+18.asserts.tar.gz/sha512/7b1c6df53311a17a92a41cb67ec476f947949c4ca5d15a643badaf9f01e76a186abbb6e156f95ad1605d83250df4e081164986a6b7fcb3238076b0ec5a3bb565 +LLD.v18.1.7+3.x86_64-linux-gnu-cxx11-llvm_version+18.tar.gz/md5/97c7f5267ad6927f699a25ce44f55a70 +LLD.v18.1.7+3.x86_64-linux-gnu-cxx11-llvm_version+18.tar.gz/sha512/7b847c6026fd7daeb17a4459b852562ce6664b2f406664be672bcc384dd5a79b9505561fc61dd8fb78a903a2ed4978f322cccad19f5a3966bac856e877c11ef7 +LLD.v18.1.7+3.x86_64-linux-musl-cxx03-llvm_version+18.asserts.tar.gz/md5/c86da6a396fcdddbd26cfd71c0f70458 +LLD.v18.1.7+3.x86_64-linux-musl-cxx03-llvm_version+18.asserts.tar.gz/sha512/8d5b75b43167080b8ea456e516c9ace02ee6066ce715a56f0b42cb8045b965b1cf8d4ebc0786c23be4544693ff858816a6257b0638ec11e077df32ead62f7efb +LLD.v18.1.7+3.x86_64-linux-musl-cxx03-llvm_version+18.tar.gz/md5/d72e175272ed893688d18e868120c575 +LLD.v18.1.7+3.x86_64-linux-musl-cxx03-llvm_version+18.tar.gz/sha512/9a46eeca8c7a8be65ed487a74227534e08a257e404814c44730f12a5bebc8cd160998cfd5ed30189aa606ddbe602e1b1788e465e4a210103c6726a7fd230abc3 +LLD.v18.1.7+3.x86_64-linux-musl-cxx11-llvm_version+18.asserts.tar.gz/md5/0206fdaa9582ae3bddaed1b6fd7a8cb5 +LLD.v18.1.7+3.x86_64-linux-musl-cxx11-llvm_version+18.asserts.tar.gz/sha512/584a67f603f656ca5d27aa0ef2e425ad385612aff06cdc1d534b5944939a09246c93954fc153b8a89acff721e657a8903af9a640abc252d4e452f348781bca98 +LLD.v18.1.7+3.x86_64-linux-musl-cxx11-llvm_version+18.tar.gz/md5/0dd14af342467eac2e13cad4acbc881f +LLD.v18.1.7+3.x86_64-linux-musl-cxx11-llvm_version+18.tar.gz/sha512/918f2c66898f828414009fa6ee273da5bd654e4b787ebb4d703f0be27e388b46870d68bd58c4f45638d276c61c1bfe2f3c67fbf34dfb5578158d072f82d927de +LLD.v18.1.7+3.x86_64-unknown-freebsd-llvm_version+18.asserts.tar.gz/md5/d1862068a670d4c04887513b914e11a8 +LLD.v18.1.7+3.x86_64-unknown-freebsd-llvm_version+18.asserts.tar.gz/sha512/c5a91657667394e468e71d9c07df0c71918d63d094d2598875f75cf3830d8502e70f59fba59b07a2d1e0551f58d0487521c856e68e4122fd6a6f7ebd1c7c0f58 +LLD.v18.1.7+3.x86_64-unknown-freebsd-llvm_version+18.tar.gz/md5/8dc0ec01029765dbfdd28d63bea8cfca +LLD.v18.1.7+3.x86_64-unknown-freebsd-llvm_version+18.tar.gz/sha512/234e9db1177003a074c6ca7236c589424b4617d1a359f5f9e2ba6095a7f317d62ac731319b4b4513c523e80c15b82c99ff0fc9df5f76fad452955492e9935b1d +LLD.v18.1.7+3.x86_64-w64-mingw32-cxx03-llvm_version+18.asserts.tar.gz/md5/7beb510d766ac1e16017aa6924e88659 +LLD.v18.1.7+3.x86_64-w64-mingw32-cxx03-llvm_version+18.asserts.tar.gz/sha512/bd18b733a6b2fbbeef7f8af2f13dade0330a525c83b4faed5a5d2507007be2f2f7be70f99d05524fa94ae1dca524be64adbb9dc87485477f62109f44cbae95fe +LLD.v18.1.7+3.x86_64-w64-mingw32-cxx03-llvm_version+18.tar.gz/md5/9ecca76cea81cd1d0fd3470778145371 +LLD.v18.1.7+3.x86_64-w64-mingw32-cxx03-llvm_version+18.tar.gz/sha512/d1548402dfcb4aa0cf3c9e445a9810e5d8bc2411de9943b57e892ec82af29e214f6d93c58af9cd0de9b4fa5a0438e4c1fe0b9591a9582143d470e7a42e685f4a +LLD.v18.1.7+3.x86_64-w64-mingw32-cxx11-llvm_version+18.asserts.tar.gz/md5/b1de7acc21fe51c1486854cd46b71bae +LLD.v18.1.7+3.x86_64-w64-mingw32-cxx11-llvm_version+18.asserts.tar.gz/sha512/9f8457c12801584340b3fbf846920299756359016d151018562f8c14e0a03f657fdb6eb1d7418fdfbf586c59e670d866384e822de9bde15b2dbd031ce5e6af8d +LLD.v18.1.7+3.x86_64-w64-mingw32-cxx11-llvm_version+18.tar.gz/md5/373a7007eb8b526811604fb0161f73af +LLD.v18.1.7+3.x86_64-w64-mingw32-cxx11-llvm_version+18.tar.gz/sha512/b586815621f698c7d6ff995c93e11ea1ec55e7e7c0e34ad874f64b942ecd73685cce150d51804bdd371ec42671e7814e364944276ec91282b9b8b8226a6d5786 diff --git a/deps/checksums/llvm b/deps/checksums/llvm index 122aeb9a53337..1b375e6e72c5d 100644 --- a/deps/checksums/llvm +++ b/deps/checksums/llvm @@ -1,252 +1,260 @@ -LLVM.v18.1.7+2.aarch64-apple-darwin-llvm_version+18.asserts.tar.gz/md5/341e3f0b5c160100f5e12783b8f779c0 -LLVM.v18.1.7+2.aarch64-apple-darwin-llvm_version+18.asserts.tar.gz/sha512/65b2c2091db1364adba4fe1e4ec6b9d6582432a0a0751cd0a3aa1c69798633b3aa5ff09d3de4e47d552d55d5ba81bc86662f1784cff2ed58e800452488cf9d50 -LLVM.v18.1.7+2.aarch64-apple-darwin-llvm_version+18.tar.gz/md5/249910dce0a9ee089711b71626972b26 -LLVM.v18.1.7+2.aarch64-apple-darwin-llvm_version+18.tar.gz/sha512/1eba4ecfefb56a00390e5c528c467f921d64e9ca40f5fdb4d7fe0d7ee995f03d253887f7fe40ee285f03b12fa7a194543d18cade6af8a24bf47e56b06a78d901 -LLVM.v18.1.7+2.aarch64-linux-gnu-cxx03-llvm_version+18.asserts.tar.gz/md5/7bc3125dd810bcc44ea2d454b6caa683 -LLVM.v18.1.7+2.aarch64-linux-gnu-cxx03-llvm_version+18.asserts.tar.gz/sha512/86742a4476481b14145855ead8a5acc6397782f6d3445f900ac2de0570f1fcf53563cf5e1f3cb59886282083ce63756604f1ca2434e9e427cdc1bd1f68373581 -LLVM.v18.1.7+2.aarch64-linux-gnu-cxx03-llvm_version+18.tar.gz/md5/4eae06d9e6272aef23afc191501810fd -LLVM.v18.1.7+2.aarch64-linux-gnu-cxx03-llvm_version+18.tar.gz/sha512/fb75927982b1428b05b765bd5ac017b2c15d89990b7e6cb582b9e1a3ec04d09801d25d5cc6c037a12c205edb7c0f7a2d33832a2d1de7920711e9720dc3ca3655 -LLVM.v18.1.7+2.aarch64-linux-gnu-cxx11-llvm_version+18.asserts.tar.gz/md5/cd86e18a63cd6e84a1493acf0df4e267 -LLVM.v18.1.7+2.aarch64-linux-gnu-cxx11-llvm_version+18.asserts.tar.gz/sha512/1dfefc4600368467ab90ccb527a9fdb012b9b7f485d932a0db8c4b1b81985fad931b74494b76ef2162e46280447d39a055b5681b33a17c564c50094de29aeb13 -LLVM.v18.1.7+2.aarch64-linux-gnu-cxx11-llvm_version+18.tar.gz/md5/c7cf7daa7c11827ae4f9fb2e16f3cce3 -LLVM.v18.1.7+2.aarch64-linux-gnu-cxx11-llvm_version+18.tar.gz/sha512/dabe2940606a671a8e3b4f28bb9e813d000650203c382372142457020f2ccd498534903aa99320afb7ff960a62d752ee6cb724e74745bc1bad1051e12cf78ab4 -LLVM.v18.1.7+2.aarch64-linux-musl-cxx03-llvm_version+18.asserts.tar.gz/md5/62e575b89fd92d9206abebc19b084abf -LLVM.v18.1.7+2.aarch64-linux-musl-cxx03-llvm_version+18.asserts.tar.gz/sha512/7ac029567fd68fee82b7096e2fe278ee5cd2935494433b1faace036469c54bc471d614d0bb339750429dd88f3e723165d2dacaa627f73c3647c6f3b51a4a3034 -LLVM.v18.1.7+2.aarch64-linux-musl-cxx03-llvm_version+18.tar.gz/md5/5d39ef811bc78204ebfc7e98111469cf -LLVM.v18.1.7+2.aarch64-linux-musl-cxx03-llvm_version+18.tar.gz/sha512/10fc9a64d63351e168bc79fa63bcaa6fd49c8483e5ecc40a66216192588367e9b47ec3ea2c047e88f39ea8f1caf8052726f4bc8858223f7744606156b4133970 -LLVM.v18.1.7+2.aarch64-linux-musl-cxx11-llvm_version+18.asserts.tar.gz/md5/f072fe487e5d1b717aec49a6244adf05 -LLVM.v18.1.7+2.aarch64-linux-musl-cxx11-llvm_version+18.asserts.tar.gz/sha512/42b03a2562728ac86e751abab2e8233d583baf006e69b107d002a9258844ad53f62e6332eab3790364940d478c7ebab6d3e0e2194220e8436f40e6b75063d1a2 -LLVM.v18.1.7+2.aarch64-linux-musl-cxx11-llvm_version+18.tar.gz/md5/eabf0239298f13ff4893011e75828bdf -LLVM.v18.1.7+2.aarch64-linux-musl-cxx11-llvm_version+18.tar.gz/sha512/34724d9c9a550c85d406021d7265e1848b002b8f212427eebff6e8f03ec6acc336efb0c2cd9d9e1c76329e7c84a84a9d852b8de5897550d957e0e9385129033d -LLVM.v18.1.7+2.armv6l-linux-gnueabihf-cxx03-llvm_version+18.asserts.tar.gz/md5/1910b5daa31db6542f0c762901ab7d43 -LLVM.v18.1.7+2.armv6l-linux-gnueabihf-cxx03-llvm_version+18.asserts.tar.gz/sha512/c43e8091e9946ba1d8849734a25b258df95b4759a79676565b624930d4a19805a78b66b1d193e528f95174d909d7895d4a4e49fe8ca298a24dc40d25c95900b1 -LLVM.v18.1.7+2.armv6l-linux-gnueabihf-cxx03-llvm_version+18.tar.gz/md5/a5198b13dc75ad3454e05aa6cdaca48f -LLVM.v18.1.7+2.armv6l-linux-gnueabihf-cxx03-llvm_version+18.tar.gz/sha512/9ec8078a1a7246f1545fe074783d6b88ce9b50f62b0438ff5637f6dedf5bcac427cc252c350354b7063f79f4e31a19f699c168c15bc6547a207da497026c2827 -LLVM.v18.1.7+2.armv6l-linux-gnueabihf-cxx11-llvm_version+18.asserts.tar.gz/md5/f569654ecdd8ec2a50986ccac8388c69 -LLVM.v18.1.7+2.armv6l-linux-gnueabihf-cxx11-llvm_version+18.asserts.tar.gz/sha512/9b50e3be1577a753f0ce42704846bd126229d8dd9f28bfcbda58c4f18e4b9ca4ec6bb9b57de61b3b9af8157a2983aeffb8af782a073e5e19a8ccc261cbea9601 -LLVM.v18.1.7+2.armv6l-linux-gnueabihf-cxx11-llvm_version+18.tar.gz/md5/496de8c9e2361f44ac6933480620d07f -LLVM.v18.1.7+2.armv6l-linux-gnueabihf-cxx11-llvm_version+18.tar.gz/sha512/02a8ecfb6e81e0fe07fb0d616a84a590e23e944588c18348c32265bf6bf19196beec189a0bc40514e379e97a9c8bef83557260839800fabe9f8e39e96689713d -LLVM.v18.1.7+2.armv6l-linux-musleabihf-cxx03-llvm_version+18.asserts.tar.gz/md5/05bc7406fd0a703edbc912bb3230eb37 -LLVM.v18.1.7+2.armv6l-linux-musleabihf-cxx03-llvm_version+18.asserts.tar.gz/sha512/898dd4c19dd0f22dcd1bd44264daa8dc64340c890c3368fac7451da1ac872a687d55b5eb50ae4e156c2dc4ece226ec05775daebafe9d8b53eb83b72d2986ff92 -LLVM.v18.1.7+2.armv6l-linux-musleabihf-cxx03-llvm_version+18.tar.gz/md5/d6ca30fc3a2796ebda2451f80846883d -LLVM.v18.1.7+2.armv6l-linux-musleabihf-cxx03-llvm_version+18.tar.gz/sha512/d7dc96e1bbca38272b1ca78b3ff995fc30434937a58815c63d0a9b4a017964cfb269a1f3203ad8374870257152229941d420f098644375b5f4d1b88fe39e0dff -LLVM.v18.1.7+2.armv6l-linux-musleabihf-cxx11-llvm_version+18.asserts.tar.gz/md5/6eb1a197150ad6c165b82c5e0e0db102 -LLVM.v18.1.7+2.armv6l-linux-musleabihf-cxx11-llvm_version+18.asserts.tar.gz/sha512/a159598c2bf351ea79d01e8a454a82bbd9823c080399520af3182e57259957ad07834b03c336e6225857da365e8ec1aa9f65b0ddd0821883ae817cb81f8e6dab -LLVM.v18.1.7+2.armv6l-linux-musleabihf-cxx11-llvm_version+18.tar.gz/md5/116d849cb2fb4b1c8c517397b2b04192 -LLVM.v18.1.7+2.armv6l-linux-musleabihf-cxx11-llvm_version+18.tar.gz/sha512/7b2596c76d2814fc30992ba78e5c8f93519442fa76004187de9830732b80bfc6c77f5d7aca042c20d8f868cd682bb6f71e3fa32940bc8c7b401753dc4ac2f331 -LLVM.v18.1.7+2.armv7l-linux-gnueabihf-cxx03-llvm_version+18.asserts.tar.gz/md5/27837dc854a173bd37a20f92383f6913 -LLVM.v18.1.7+2.armv7l-linux-gnueabihf-cxx03-llvm_version+18.asserts.tar.gz/sha512/1719205cba6de969e8724a99444bf958d5a7943ae90ee2dd11193f56ddfd4f0edf6d9af6da2e67787a64b91d994fee76bd8ffde36486c5229a980c2c4ef07e29 -LLVM.v18.1.7+2.armv7l-linux-gnueabihf-cxx03-llvm_version+18.tar.gz/md5/f0016c21c045e205131ea22dc711acaf -LLVM.v18.1.7+2.armv7l-linux-gnueabihf-cxx03-llvm_version+18.tar.gz/sha512/6d192b7e21c7ee3327d288b890f4c5dd03e5f53dcba6905a34cab96b7ad0ab6364f5271af88d95e60aab8f569a8840d17e16f27f6fcdafcaf537d5d4a651dca7 -LLVM.v18.1.7+2.armv7l-linux-gnueabihf-cxx11-llvm_version+18.asserts.tar.gz/md5/9a2bad4518966db29e37e7c88388e779 -LLVM.v18.1.7+2.armv7l-linux-gnueabihf-cxx11-llvm_version+18.asserts.tar.gz/sha512/b9a10af9dcbacf1f129d4e9b4cf562a6a4687252cc8a0fcd78f52d75c0c20be0ff32e67413a7902a628b04e7fac1091d35b64b145e33814899796009b6ed2853 -LLVM.v18.1.7+2.armv7l-linux-gnueabihf-cxx11-llvm_version+18.tar.gz/md5/77c4e24c1e44ce14bc6476954f294a15 -LLVM.v18.1.7+2.armv7l-linux-gnueabihf-cxx11-llvm_version+18.tar.gz/sha512/d9d90a4ac788dbbc1b532623a380d4cca8813ecdf8b7b4a8cfff769499e50a1433bac618234bd0765d8a4f50aafb3fa724d16ac71baf75ae5a2b4396fa2bd017 -LLVM.v18.1.7+2.armv7l-linux-musleabihf-cxx03-llvm_version+18.asserts.tar.gz/md5/b29e36dcf5a0aa05734f1d6a0afd6944 -LLVM.v18.1.7+2.armv7l-linux-musleabihf-cxx03-llvm_version+18.asserts.tar.gz/sha512/ab46a835f9843c5b3427101bcd0c5d2b8acf79693aa9b8d4282d499f25df4ca248a81fc94ddd96c75d69d3c6b3814b225eed81bec32fbe9199bffdd605f7fec8 -LLVM.v18.1.7+2.armv7l-linux-musleabihf-cxx03-llvm_version+18.tar.gz/md5/a411269f925cc968a0438562262e6d97 -LLVM.v18.1.7+2.armv7l-linux-musleabihf-cxx03-llvm_version+18.tar.gz/sha512/04f275603134b0ea0f23da377e4983765885f2b1954d5c617134af9f103470a5e50dfda18bcddb836852db2382f1c134db40df00b36c8bd00e7a9e6ff1a9e684 -LLVM.v18.1.7+2.armv7l-linux-musleabihf-cxx11-llvm_version+18.asserts.tar.gz/md5/841921e33407e15eeeaa76354aa2b737 -LLVM.v18.1.7+2.armv7l-linux-musleabihf-cxx11-llvm_version+18.asserts.tar.gz/sha512/e1fb8b75e141cc90916c5c81c31ee91336911983c525f38eab86682ba69679dfbe1f10c9b673323632fc75f38cacc2af47a3d5d5d1031ec9a2a60cebd68d501b -LLVM.v18.1.7+2.armv7l-linux-musleabihf-cxx11-llvm_version+18.tar.gz/md5/7342a1d7b1d2c0fed7f5edf1c331ffa8 -LLVM.v18.1.7+2.armv7l-linux-musleabihf-cxx11-llvm_version+18.tar.gz/sha512/dae8ca11fa8d34f99ee19a95bcd108a65b9e6a6ddf2e5a9b126f2ba1b1cdff6b7ec21e9590d70b3785593435bb71e47703d9765811db814a90aa8a47940421ff -LLVM.v18.1.7+2.i686-linux-gnu-cxx03-llvm_version+18.asserts.tar.gz/md5/10aac489dfa10a77427a82958f525da2 -LLVM.v18.1.7+2.i686-linux-gnu-cxx03-llvm_version+18.asserts.tar.gz/sha512/a87f721df4fc5f6e929a54d8e41e55fb366a051a610836923213bfa42a7f1593de880391131619653cc3571bb76a4c82e011852ee5a6005523957c9f0937e6ba -LLVM.v18.1.7+2.i686-linux-gnu-cxx03-llvm_version+18.tar.gz/md5/7f231fd359f9297261c22f95d8f738c8 -LLVM.v18.1.7+2.i686-linux-gnu-cxx03-llvm_version+18.tar.gz/sha512/fdd6441011609ef341108ff2d108c6f320d415b621a69922aeacc555c3d1ae6090a0f600f24e229a609b88ba9c1868900791a6590033b7dad333ad11f8a6365b -LLVM.v18.1.7+2.i686-linux-gnu-cxx11-llvm_version+18.asserts.tar.gz/md5/c4523a485082044553e1a89049dc4734 -LLVM.v18.1.7+2.i686-linux-gnu-cxx11-llvm_version+18.asserts.tar.gz/sha512/db365e63bbb5189f7f348e2fd51e627ddfebf838ca9dfc6c0f8a7bbf6b8a2a03d78ea3ccdf08b0c2674f4cf5a0979506efa643554091ba751f16051bdf42ca9f -LLVM.v18.1.7+2.i686-linux-gnu-cxx11-llvm_version+18.tar.gz/md5/bcd10e4f3e5a4b00d52441e0094de1c9 -LLVM.v18.1.7+2.i686-linux-gnu-cxx11-llvm_version+18.tar.gz/sha512/b17fae89a3dfaa9428cf48c9c0866477cc75edda6aa3800702227cc9e3d6ebaacbd60cccc96acb4ccde56a2de531dea5a436bac8e6c450a4674daae23b878037 -LLVM.v18.1.7+2.i686-w64-mingw32-cxx03-llvm_version+18.asserts.tar.gz/md5/2be8cf274b7667adf8d967a27abdede0 -LLVM.v18.1.7+2.i686-w64-mingw32-cxx03-llvm_version+18.asserts.tar.gz/sha512/15f58c9a00aca5bf828708089912f128adfa3b719cc2fa8b9b4cd7ff7722d02375bc9a961b02d5c6a6c9ab637b626d78876741bd824353aab944e1c3b6719837 -LLVM.v18.1.7+2.i686-w64-mingw32-cxx03-llvm_version+18.tar.gz/md5/0dce4be3e8cead78cd3d12ca0796d560 -LLVM.v18.1.7+2.i686-w64-mingw32-cxx03-llvm_version+18.tar.gz/sha512/cd60b39f2ccfca8ae0a497292819e9cc1893f6c3b2162fa9bb3136187351cfb1d6e4855141f1e9252bdee7e97ad61c0560566c2e9f73fe77a26b7f4ffadfdcdd -LLVM.v18.1.7+2.i686-w64-mingw32-cxx11-llvm_version+18.asserts.tar.gz/md5/f2548c8f4bf1edb488642245221829b2 -LLVM.v18.1.7+2.i686-w64-mingw32-cxx11-llvm_version+18.asserts.tar.gz/sha512/1604986526156a40ea82f50ddd0465d06df9faf306835f1dbbdac7da7f97c60fe684cd6c64acd8833a9f8b1d16f80c123ceef94fc16f255f815b93f1d41251e4 -LLVM.v18.1.7+2.i686-w64-mingw32-cxx11-llvm_version+18.tar.gz/md5/1c268e3e93ab3a34b3c05322c2fb0dc9 -LLVM.v18.1.7+2.i686-w64-mingw32-cxx11-llvm_version+18.tar.gz/sha512/f111ca82e196ea9507bb089b9d10990de1acb1a94778c40012ba6bfc16cf362369fb1f9dcc869ce14545439df21f432589ec004816a1ba0323c5edecc2b84211 -LLVM.v18.1.7+2.powerpc64le-linux-gnu-cxx03-llvm_version+18.asserts.tar.gz/md5/b39ce0b0f143c3bef4dade99251003bc -LLVM.v18.1.7+2.powerpc64le-linux-gnu-cxx03-llvm_version+18.asserts.tar.gz/sha512/62148e1e0a31d6b28effda0a5016d9335005b27ffdc5be1d184efcbb13f13e29eca52eca19cc6800d1d0421c0e67a36027e05d5fdc967dae686b5bfd112fb2b6 -LLVM.v18.1.7+2.powerpc64le-linux-gnu-cxx03-llvm_version+18.tar.gz/md5/9475748210eb5b1947fe3aa6673b6c29 -LLVM.v18.1.7+2.powerpc64le-linux-gnu-cxx03-llvm_version+18.tar.gz/sha512/54320295e59e5903db558b6be0220442dbaf7ea78e1612d54a35cbe014541b354ea708679da00851b962140b6da77301e27b656fd478666d3f0f710382c13a85 -LLVM.v18.1.7+2.powerpc64le-linux-gnu-cxx11-llvm_version+18.asserts.tar.gz/md5/6a533054ccfc3d1b0920eabcfb45ee03 -LLVM.v18.1.7+2.powerpc64le-linux-gnu-cxx11-llvm_version+18.asserts.tar.gz/sha512/3871620aeea2ccaf6e4b17a675c5504624fc6d8ed57bf4e5b66e0372b7124e4f3d1e0f10baa1018d5a1ac5bc4bf0e9d2143e84827712fda1f512fed24829f1b9 -LLVM.v18.1.7+2.powerpc64le-linux-gnu-cxx11-llvm_version+18.tar.gz/md5/3fc6d1b7d59b98823d6016f97835b7c5 -LLVM.v18.1.7+2.powerpc64le-linux-gnu-cxx11-llvm_version+18.tar.gz/sha512/745942235e40f2ab71a5eaef2768842823620d4a4dc7454a7512fb2bd95bc8a74323eec6a4b33edf1ef935151c18a20172f60fcca2fca1ff3a37b1e019ea4640 -LLVM.v18.1.7+2.x86_64-apple-darwin-llvm_version+18.asserts.tar.gz/md5/f069af39cbbb650e293093b5989324a8 -LLVM.v18.1.7+2.x86_64-apple-darwin-llvm_version+18.asserts.tar.gz/sha512/34685eccd8c1cf7b72a52bf353de16bd0cac13959584217ce5d0995b52f506909955a7051ff7b29ab9d9c3f603af8f7db936f11e4bde83f5acf16415de62880b -LLVM.v18.1.7+2.x86_64-apple-darwin-llvm_version+18.tar.gz/md5/819a9695c365b9365b6cdba7cf9288b2 -LLVM.v18.1.7+2.x86_64-apple-darwin-llvm_version+18.tar.gz/sha512/4280768862b19918e11b6a7ed09f150270e71cf4560b18b224b3591c460c9375777e73e41eda375271d719f23b211daf3ed51b3c87bf4ee4429344d14f1ed7a5 -LLVM.v18.1.7+2.x86_64-linux-gnu-cxx03-llvm_version+18.asserts.tar.gz/md5/28ae362155ce224cef605cee53e36d0b -LLVM.v18.1.7+2.x86_64-linux-gnu-cxx03-llvm_version+18.asserts.tar.gz/sha512/d90f25e57f92a9da68245ceb15316e3868bf657d7e744f37cce5ccb4945777ec82fc5d470ba4fc104fe7aaabfff7b0dc260838a45331e4360b0fd14c59a55666 -LLVM.v18.1.7+2.x86_64-linux-gnu-cxx03-llvm_version+18.tar.gz/md5/d10ec63510dc1a043ee0a4e37b49eacd -LLVM.v18.1.7+2.x86_64-linux-gnu-cxx03-llvm_version+18.tar.gz/sha512/54c393208d1f51661e631cba62a21c0685fb58827067d5ea7c42fb3d6dd8c8db99d8ee1b3c304abc25510bcb0265d86ca03e1ce19be4faa252d97cfc8a1b52cb -LLVM.v18.1.7+2.x86_64-linux-gnu-cxx11-llvm_version+18.asserts.tar.gz/md5/2c1e000206c9e7c6c8e7515eb8115e3e -LLVM.v18.1.7+2.x86_64-linux-gnu-cxx11-llvm_version+18.asserts.tar.gz/sha512/12c0ead798e43448a30699b5386b3d88aac49aaef9bae283ea6d089a1c66df7293f4f220a2b5c3d96e73e556e37e745f38d81f5c68e09a86a2b19a6695eff460 -LLVM.v18.1.7+2.x86_64-linux-gnu-cxx11-llvm_version+18.tar.gz/md5/21d6c5d5e422412b88ffce50862efb29 -LLVM.v18.1.7+2.x86_64-linux-gnu-cxx11-llvm_version+18.tar.gz/sha512/5e8e17ba79134e9752c7fbd28b62e4616574a5e1dfcb0980160a3aad28a2f6cec4e48ed1acf73ca1f94d74397f7ee3eba53cb1280699e40c451295590ede3fe3 -LLVM.v18.1.7+2.x86_64-linux-musl-cxx03-llvm_version+18.asserts.tar.gz/md5/293fdc43431493f915a3e0a5b3c6d587 -LLVM.v18.1.7+2.x86_64-linux-musl-cxx03-llvm_version+18.asserts.tar.gz/sha512/27e13a4334a3bfb3c91fd06abcc4eca7a347f4bffcbce40834302d153ef29756295121b42ac433c266668af1428ffa08ed12ce75f21fef44cd7ac1d8bdfd155a -LLVM.v18.1.7+2.x86_64-linux-musl-cxx03-llvm_version+18.tar.gz/md5/2825dac8280d0563b7f521a9eb8c0563 -LLVM.v18.1.7+2.x86_64-linux-musl-cxx03-llvm_version+18.tar.gz/sha512/7f4549ac7b63e58d8c149f6b22bd997545713477a1df3b32adf640f3951580df1645f08756d9ba80c479160cf5759e3f9372396655a35cdca14f4be4afc4ae22 -LLVM.v18.1.7+2.x86_64-linux-musl-cxx11-llvm_version+18.asserts.tar.gz/md5/0c0da0eccec4a092fc0e9a915716ed6f -LLVM.v18.1.7+2.x86_64-linux-musl-cxx11-llvm_version+18.asserts.tar.gz/sha512/e538e29c4d52d9aaf151670619702541fed8231ae4c7fb9431a425d10eea95433087034a37da8fe468bd27a1c882f6f8eb9549ef71964124db10e99f4b402ba5 -LLVM.v18.1.7+2.x86_64-linux-musl-cxx11-llvm_version+18.tar.gz/md5/6b4fd19277c978306441da3b58ab86a1 -LLVM.v18.1.7+2.x86_64-linux-musl-cxx11-llvm_version+18.tar.gz/sha512/6216b3e1dc6aea979d8b5abc4cc0faf510e4e64441b1d18b4b36c45d65e874e9046e14eea67efb88f3219449ef048d34fcb751b15c59f8a299aa822b426d50ae -LLVM.v18.1.7+2.x86_64-unknown-freebsd-llvm_version+18.asserts.tar.gz/md5/b7956d25e0e5ced19df637b4fadaa532 -LLVM.v18.1.7+2.x86_64-unknown-freebsd-llvm_version+18.asserts.tar.gz/sha512/ad632493095a8fc3638ff48514c9902215378532c1455cb19d70da9f2ae46fdd91ad4a8b5a3151bedd38dda9f07c21f9a25d8e095ded7ba843f9bbeb005e1bd4 -LLVM.v18.1.7+2.x86_64-unknown-freebsd-llvm_version+18.tar.gz/md5/392f0f0f61fb672002c7473c64a63ccc -LLVM.v18.1.7+2.x86_64-unknown-freebsd-llvm_version+18.tar.gz/sha512/d620dcee0b20e3aa4b2fcb7ae835933b33b5e4c4b5d9102b885c70b1dcec535239eb5a3d6b56b51f7b049943a2c79950bcd4a4425610f7a1531f6c452eac03bb -LLVM.v18.1.7+2.x86_64-w64-mingw32-cxx03-llvm_version+18.asserts.tar.gz/md5/0b41650067323bbe0c5edd5c060b517d -LLVM.v18.1.7+2.x86_64-w64-mingw32-cxx03-llvm_version+18.asserts.tar.gz/sha512/111a21a5b491a77c69ee724b37d15b0c7baea387bb6a36695a1c2dd5f6e2eedb0ed211513145d8a6ce4dd6329b2de67e9bfce1b03fbf911b906a33a39e573f9a -LLVM.v18.1.7+2.x86_64-w64-mingw32-cxx03-llvm_version+18.tar.gz/md5/a9079da821bee8e4b5aebf47a46cd9f8 -LLVM.v18.1.7+2.x86_64-w64-mingw32-cxx03-llvm_version+18.tar.gz/sha512/7088945264d1ccead492e81636086390fad91b0e071e9f3a54ef903b619ac2a7bd38fa5e0e04ea1e299f3985e04838cd5b7a2dffd666b8e7dbbf3b419f74df88 -LLVM.v18.1.7+2.x86_64-w64-mingw32-cxx11-llvm_version+18.asserts.tar.gz/md5/4ccb3d0eabf8253cbdc1192b04c78d4f -LLVM.v18.1.7+2.x86_64-w64-mingw32-cxx11-llvm_version+18.asserts.tar.gz/sha512/9d817068dcc2b60c77fa639aa7632cbf071746e7dba62fe524c095f86e88b9323c3ab82ed5af0dc8b1af9c3e6f0da18be53d92e7c05e2d056c84e5a4e974b6d8 -LLVM.v18.1.7+2.x86_64-w64-mingw32-cxx11-llvm_version+18.tar.gz/md5/a88f7a9f42d2cb5567c84d7fa2a2732d -LLVM.v18.1.7+2.x86_64-w64-mingw32-cxx11-llvm_version+18.tar.gz/sha512/9b16cbf75e9971dd4950cd79aef85396a7d8522a572f1c8017af82725cb335674741af680e1dd10c731987a321d3afd5e3e85718d3c3fdd1c9de4803e72a66ac -LLVMLibUnwind.v12.0.1+0.aarch64-apple-darwin.tar.gz/md5/b95ad4844e649bf46db43683b55b9f4f -LLVMLibUnwind.v12.0.1+0.aarch64-apple-darwin.tar.gz/sha512/15e0996aebe6db91fe58121001aa7ea4b23685ead3c26b5d89afae34b535e34b4e801a971f4854d8e1a1fbc805cece06272470622eef863e225358113a127913 -LLVMLibUnwind.v12.0.1+0.aarch64-linux-gnu.tar.gz/md5/6d8783dc9b86c9884e0877f0d8ac4167 -LLVMLibUnwind.v12.0.1+0.aarch64-linux-gnu.tar.gz/sha512/d3b0c81498220d77e4f3cc684fb2cc0653792c381207390e695ac30bc74249f96a333a406b2cebdaca14e0b0a27b188cba6209bb5c1cbbb5c184d5626dbdc7a0 -LLVMLibUnwind.v12.0.1+0.aarch64-linux-musl.tar.gz/md5/052a35e879d52244e4b0804be875a38f -LLVMLibUnwind.v12.0.1+0.aarch64-linux-musl.tar.gz/sha512/d1b34fb97f9928e046d3131a050454710a93d38e60287b7e3c92f179f436586d3230cf90b0ca0eb8a3f9ef89fef7b1ffd7d52871645dfa233a8b07ca87ea2ee4 -LLVMLibUnwind.v12.0.1+0.armv6l-linux-gnueabihf.tar.gz/md5/1ad96a03a5dde506b5c05773b1849ec4 -LLVMLibUnwind.v12.0.1+0.armv6l-linux-gnueabihf.tar.gz/sha512/82306fb7b920fa7c71bd53b23d6915e7f256e8da9679cc926a53bb0d879f1f4469f43efe556ca32c9ef59e27b435572c7b39859090652635db4eeefdec0d1685 -LLVMLibUnwind.v12.0.1+0.armv6l-linux-musleabihf.tar.gz/md5/6a24fcd3a4dc3b1a98bb7963b1bb4930 -LLVMLibUnwind.v12.0.1+0.armv6l-linux-musleabihf.tar.gz/sha512/9ba6b83ccec061a1e5260c807dc8afd6e18799431b25a7e65b97662cc4db02509d02ea07fe12025d80914cec7383624b1c8fc9add46511c668e184ede263ac52 -LLVMLibUnwind.v12.0.1+0.armv7l-linux-gnueabihf.tar.gz/md5/09f1bfcf58a4124561553ab5005f9538 -LLVMLibUnwind.v12.0.1+0.armv7l-linux-gnueabihf.tar.gz/sha512/b0907cb857131183ffc338780c6c6dd1d48bf0ba61c3da1b8f20cf9a943373173b621cf9b2e8f1fbc657059a896b84aa025e6d4f0f1d1e8b623fac3e96541765 -LLVMLibUnwind.v12.0.1+0.armv7l-linux-musleabihf.tar.gz/md5/19158bcfae716b26f924d67c4e719342 -LLVMLibUnwind.v12.0.1+0.armv7l-linux-musleabihf.tar.gz/sha512/a90be57990b6699cb737ba96904e94e1f082601ca9d01e670f025b5500f526980741921c9cf672accab78cb5327714ab6ecdbb875174088f0773ebb627a98819 -LLVMLibUnwind.v12.0.1+0.i686-linux-gnu.tar.gz/md5/ba75556eb96b2bcdaf73ff68386d3bc3 -LLVMLibUnwind.v12.0.1+0.i686-linux-gnu.tar.gz/sha512/612fb765695b7aae11ef29608eedf8b959f60c021287a67b03a2a0f57a5814001ffa9b261c9d60d5f3d0582c06c2b41f75fd3afb66a045a248bd43d29e304c97 -LLVMLibUnwind.v12.0.1+0.i686-linux-musl.tar.gz/md5/2fcbceeb1bfde29be0cbca8bb6718bfe -LLVMLibUnwind.v12.0.1+0.i686-linux-musl.tar.gz/sha512/58f281cfc70b3f8a59cf4faa7732824637c811ddc5ea6a058f294f4c3ed4fa6c8ddab5c007567b439f2854635cf4fd146284059bfbc73e7006000ced9383f705 -LLVMLibUnwind.v12.0.1+0.i686-w64-mingw32.tar.gz/md5/153c028d97dceb6924414a7a9a137e1e -LLVMLibUnwind.v12.0.1+0.i686-w64-mingw32.tar.gz/sha512/7ae1f197600eabde9036ae58623de34a6d25636d7861777e324eb97902f65e26c6f3775e757178f8914b0cb6c2e925413f5ffc6abc9b6138470dc9e67a17f212 -LLVMLibUnwind.v12.0.1+0.powerpc64le-linux-gnu.tar.gz/md5/c08a6cf3e1baf156eb05003ed4e9ebe9 -LLVMLibUnwind.v12.0.1+0.powerpc64le-linux-gnu.tar.gz/sha512/f74e44986622329990842cb3ff549ff9254c81863d8bee468b0e58b7621067e7e7f7f18e4cbeafad6a05e0c107323de6828a78dc7afbcd7cd1892383ff417968 -LLVMLibUnwind.v12.0.1+0.x86_64-apple-darwin.tar.gz/md5/caf151150e56827be09acca6964d2b18 -LLVMLibUnwind.v12.0.1+0.x86_64-apple-darwin.tar.gz/sha512/cb3e7aa71367ec4a115bccc2e8ac6bd5d9f22b3935b3889eee1fbf7303c5f553d7d3108977bc1f6c9b6917a6ed9e10bff211fd56b8169233ceae287b112894c2 -LLVMLibUnwind.v12.0.1+0.x86_64-linux-gnu.tar.gz/md5/d95874cbf6f8b55bc314c3968a6a4563 -LLVMLibUnwind.v12.0.1+0.x86_64-linux-gnu.tar.gz/sha512/4986a8d9cc9d8761a99a4f02d017b424484233d4cbe2d4f49ccd371591384b1b8d1c4d31cb908505b86b00f2b164568e57751dd949d91af203ee4a582971798a -LLVMLibUnwind.v12.0.1+0.x86_64-linux-musl.tar.gz/md5/89077d871e15425b1f4c2451fb19a1b2 -LLVMLibUnwind.v12.0.1+0.x86_64-linux-musl.tar.gz/sha512/b65a218b05ade2e2d1582188897b036a4596d09cf65558f178c49c1a1a62b7d992b1d99fbe86a027dc83b614f178e6061f3dfb695b18a8e2b6bf76779b741d96 -LLVMLibUnwind.v12.0.1+0.x86_64-unknown-freebsd.tar.gz/md5/54ac594b4c8e7f261034a8829dad5e34 -LLVMLibUnwind.v12.0.1+0.x86_64-unknown-freebsd.tar.gz/sha512/a43756afd92081e6dd7244d162862fc318b41ca110a5e8be6e4ee2d8fdfd8fb0f79961ae55e48913e055779791bd1c0ecd34fd59281fb66b3c4f24a1f44128f0 -LLVMLibUnwind.v12.0.1+0.x86_64-w64-mingw32.tar.gz/md5/83cf8fc2a085a73b8af4245a82b7d32f -LLVMLibUnwind.v12.0.1+0.x86_64-w64-mingw32.tar.gz/sha512/297a5c7b33bd3f57878871eccb3b9879ea5549639523a1b9db356b710cafb232906a74d668315340d60ba0c5087d3400f14ab92c3704e32e062e6b546abf7df6 -libLLVM.v18.1.7+2.aarch64-apple-darwin-llvm_version+18.asserts.tar.gz/md5/2ea6046caf5a3d519ab1c3309a2eea31 -libLLVM.v18.1.7+2.aarch64-apple-darwin-llvm_version+18.asserts.tar.gz/sha512/079720b30c61ded8499eefdb314477d58bd121e9f326d98696ee39b2ed91f806d5f67e68b6fbef8613a992175fe34694e5efe83e87ef3bfbed67d6b7fc41ebf9 -libLLVM.v18.1.7+2.aarch64-apple-darwin-llvm_version+18.tar.gz/md5/62c49bc7767d1ff114dc6b6a996449ae -libLLVM.v18.1.7+2.aarch64-apple-darwin-llvm_version+18.tar.gz/sha512/c708472b325cd73b94e10003bf3267b0ecbf3627072302fb22e78336974f2c7855c8597420efc954bca30aee17cec55277aa0c95a01cfff38d5d77df50c807f7 -libLLVM.v18.1.7+2.aarch64-linux-gnu-cxx03-llvm_version+18.asserts.tar.gz/md5/766a2de98d275877bb676ff1f23e972f -libLLVM.v18.1.7+2.aarch64-linux-gnu-cxx03-llvm_version+18.asserts.tar.gz/sha512/3b353ea038fafefc13ccb4a81c7242d569c206362605be374fb312cb495f385796d052c3a7e08c7fe6ecaa3018e2a7e3dfa43d71a8c3a94987f7dc7aa378fd22 -libLLVM.v18.1.7+2.aarch64-linux-gnu-cxx03-llvm_version+18.tar.gz/md5/0684a6b210b799a8a0f45a286f3dfcc5 -libLLVM.v18.1.7+2.aarch64-linux-gnu-cxx03-llvm_version+18.tar.gz/sha512/4221e2d74117bd7e89aba2945030c1507e51999b236814fd23036565364c328392e87032daf1b9fe274ed89fcf9a6dcd203f0f1c8602c2a08d3fcfa189a5fefe -libLLVM.v18.1.7+2.aarch64-linux-gnu-cxx11-llvm_version+18.asserts.tar.gz/md5/6b460256e923637e5107d67859eb60ba -libLLVM.v18.1.7+2.aarch64-linux-gnu-cxx11-llvm_version+18.asserts.tar.gz/sha512/7d3f2736afe4022842529b1355cf9914b7a1c7b1e261f814a4523ad30a0cf0189056d5117a06720bbb7a844a435bb632ddbda2daadbf7e01c0120452cd13e6a3 -libLLVM.v18.1.7+2.aarch64-linux-gnu-cxx11-llvm_version+18.tar.gz/md5/c2b13a6a296adbb4be91dd3bb5be0877 -libLLVM.v18.1.7+2.aarch64-linux-gnu-cxx11-llvm_version+18.tar.gz/sha512/9086937e718125afd535b0066ee08a3523161a94fa7ef3c9a3e86bfe760f251b6ea7b035888e61a0e7f192ed25c9bd0f4dc153df86e08569e7067a7a30ba48c5 -libLLVM.v18.1.7+2.aarch64-linux-musl-cxx03-llvm_version+18.asserts.tar.gz/md5/758d33fe0b2b3d0371708614365450e8 -libLLVM.v18.1.7+2.aarch64-linux-musl-cxx03-llvm_version+18.asserts.tar.gz/sha512/79a662f72ba1b89b373d1d143ee880a12cb128211e79182e7befe8b3e50298b594de2ce489ca8bcdeadb17fceee811622f8bfcbc3e232cefdaf9927177469eec -libLLVM.v18.1.7+2.aarch64-linux-musl-cxx03-llvm_version+18.tar.gz/md5/2dcbb811be8985bfed3c8b37733c0d40 -libLLVM.v18.1.7+2.aarch64-linux-musl-cxx03-llvm_version+18.tar.gz/sha512/17f6fbd96ed5029f360c101cedad127881e14b42498d66f717448d99ca1909057ae79169d934e08157edcc7467db4b3941bdda26a2e9f42645963eec51f27e29 -libLLVM.v18.1.7+2.aarch64-linux-musl-cxx11-llvm_version+18.asserts.tar.gz/md5/bd3b904b5f9464aaaf87c41b899c8ca5 -libLLVM.v18.1.7+2.aarch64-linux-musl-cxx11-llvm_version+18.asserts.tar.gz/sha512/fa99e8025419a18f548f658ea589771c2803480c3cb3a25cfb75e26ed0993b7b37bba204d7cba1475319a71159813b2b58a3b3327ba24d264cf80ef24263628d -libLLVM.v18.1.7+2.aarch64-linux-musl-cxx11-llvm_version+18.tar.gz/md5/b4f9038d5c3c13207111ee1a9a918cba -libLLVM.v18.1.7+2.aarch64-linux-musl-cxx11-llvm_version+18.tar.gz/sha512/e8b97bee30f597cc06d31175e12f0c2035aef0054e8abdb431f31b1e9d440d561bd9bc6637a403441aa7f3e1d2a46c600734e17e3b7ed0ae899c92df91758780 -libLLVM.v18.1.7+2.armv6l-linux-gnueabihf-cxx03-llvm_version+18.asserts.tar.gz/md5/06d8e634b4a6914efc18b7962df52021 -libLLVM.v18.1.7+2.armv6l-linux-gnueabihf-cxx03-llvm_version+18.asserts.tar.gz/sha512/cf6aeed1eaf652e5830e34dd2ba88abc33668953281146106bbfdbc92f5f225645f00ff5b4a0eb902baf904362ab4eb32192fa50ee5b2672e8b031fe2550f9a8 -libLLVM.v18.1.7+2.armv6l-linux-gnueabihf-cxx03-llvm_version+18.tar.gz/md5/53e83804b63e6ae4d0f1c97abcbbd1c8 -libLLVM.v18.1.7+2.armv6l-linux-gnueabihf-cxx03-llvm_version+18.tar.gz/sha512/45b3ee9b105ef2ef106fa8ac7b8e902cd1d6bf3c9bfb57edeca9e14f1654714d23fb086b369a9fd3cbb828c04fee4cfe80d2b2a2bfaa852d3ac65c0d213d8c62 -libLLVM.v18.1.7+2.armv6l-linux-gnueabihf-cxx11-llvm_version+18.asserts.tar.gz/md5/91b6cf00564053d385e30b34e5b8778e -libLLVM.v18.1.7+2.armv6l-linux-gnueabihf-cxx11-llvm_version+18.asserts.tar.gz/sha512/9111f3f02b49bf78340c9b0c5c1325a1ca09b62c83aefece1121573dcc21dce095060351f18997971e5cfbaab346cb12c75cdc0fbe8fa92aca2e8a68b5f5f577 -libLLVM.v18.1.7+2.armv6l-linux-gnueabihf-cxx11-llvm_version+18.tar.gz/md5/f6c91b71dfd73c7301a4e3de48e072de -libLLVM.v18.1.7+2.armv6l-linux-gnueabihf-cxx11-llvm_version+18.tar.gz/sha512/581d7e1e4d85aeaf082fa31555074471705e391de0771bf66665807afb5192c79c481ca30e73a25f4e2d48d4d325f0198e39bcbfaed2c9bc7477ee917667f5ce -libLLVM.v18.1.7+2.armv6l-linux-musleabihf-cxx03-llvm_version+18.asserts.tar.gz/md5/ce41ee46959e5e3a17b6c99293afedb7 -libLLVM.v18.1.7+2.armv6l-linux-musleabihf-cxx03-llvm_version+18.asserts.tar.gz/sha512/73d8c5af750ea9deef822aec58d8697243ca154bc4435ac0b0ab8c90fc97750e91fa55f8de7b8283eb1ab19951cda3e3c4c60834bcf13730163e593126a8eb57 -libLLVM.v18.1.7+2.armv6l-linux-musleabihf-cxx03-llvm_version+18.tar.gz/md5/67ed5b654852dad400aef17fb542703f -libLLVM.v18.1.7+2.armv6l-linux-musleabihf-cxx03-llvm_version+18.tar.gz/sha512/07f70c57e27eea37f520f6f0a954b54d2506530d5eb5a74e5a8526ba8ef55a948073c49037544b602d03d0aa482704292eac943f0a83421386ccbfbf22ee8510 -libLLVM.v18.1.7+2.armv6l-linux-musleabihf-cxx11-llvm_version+18.asserts.tar.gz/md5/5b8bd88d49ce21e5b63af6f77782eed4 -libLLVM.v18.1.7+2.armv6l-linux-musleabihf-cxx11-llvm_version+18.asserts.tar.gz/sha512/cef1c561ae388b2baa08e39dc195989cb795d8a2747f5f11e0dc9d9e107b9e99dbba465335376beff2e1b326512f6afc962775e0b246f3edcfadf509235cabd8 -libLLVM.v18.1.7+2.armv6l-linux-musleabihf-cxx11-llvm_version+18.tar.gz/md5/5fbf26d20b2ce3f61edc9a9ca2eb5284 -libLLVM.v18.1.7+2.armv6l-linux-musleabihf-cxx11-llvm_version+18.tar.gz/sha512/2c564c95d648458b9a0f0c963246cf5564c625107682f680390b6db5fde0e2b15a964fd3fd23734b5b2bb135db1fc698812d61b3f275710593f4defaee4a9c23 -libLLVM.v18.1.7+2.armv7l-linux-gnueabihf-cxx03-llvm_version+18.asserts.tar.gz/md5/c81bc29a75acf4f806f3eb13bf890604 -libLLVM.v18.1.7+2.armv7l-linux-gnueabihf-cxx03-llvm_version+18.asserts.tar.gz/sha512/c8c922a0a4fefd549f1c2ba396a3cab9cf7738aa82e7ccf7ca29c090260e2d73ec45d6f2b07173d584f6074b10fa04052114deef6ecb6f53ea87f1924074137a -libLLVM.v18.1.7+2.armv7l-linux-gnueabihf-cxx03-llvm_version+18.tar.gz/md5/1fcb40ba1a427105b4e7d13a6c11dc78 -libLLVM.v18.1.7+2.armv7l-linux-gnueabihf-cxx03-llvm_version+18.tar.gz/sha512/392c9ee85ba7ab6697bb8979c7f443d1d25f7ac9178e96a886401cfc68d75a43ce98bf3038a7ba70a9a990f65e604d38e043472cec3badb25fbd1b38cfbb7162 -libLLVM.v18.1.7+2.armv7l-linux-gnueabihf-cxx11-llvm_version+18.asserts.tar.gz/md5/427a19eaf69725d11bb33f48de9cb205 -libLLVM.v18.1.7+2.armv7l-linux-gnueabihf-cxx11-llvm_version+18.asserts.tar.gz/sha512/542e209b10c13d8dca867247a7414f84adb832f40051fcbdf0dcb09bc9664a77248e1b0ea1687805847dd9f5a05b86475dd76aba427c9a1bc83f8502444c60bd -libLLVM.v18.1.7+2.armv7l-linux-gnueabihf-cxx11-llvm_version+18.tar.gz/md5/ab34bfa2950014936edd13a7b5db8170 -libLLVM.v18.1.7+2.armv7l-linux-gnueabihf-cxx11-llvm_version+18.tar.gz/sha512/6376b25d0278e5c97581480fb4d54371b09a08be88f4cc39d2c7b3875f1189cef60c1be6bea5e12b0cf306cef8b394bc7d00f8b0fd95d749bd1b4eb318af7e15 -libLLVM.v18.1.7+2.armv7l-linux-musleabihf-cxx03-llvm_version+18.asserts.tar.gz/md5/cb6300fe87fd7cb9840f3bc44af26878 -libLLVM.v18.1.7+2.armv7l-linux-musleabihf-cxx03-llvm_version+18.asserts.tar.gz/sha512/a7984cd90fef55559142fc05d91b0da1f37f77f25214e93ff7641b7c3958f08dc7c082611915dbfda4bbbaa392656ac8604d4f75369777dacfb78baee2f99b16 -libLLVM.v18.1.7+2.armv7l-linux-musleabihf-cxx03-llvm_version+18.tar.gz/md5/b8a4e8ef43340e9cbdf5e4479c6a5a56 -libLLVM.v18.1.7+2.armv7l-linux-musleabihf-cxx03-llvm_version+18.tar.gz/sha512/fc249f2b666c8a8129e05ea08c773cbeb7af6d37791f271461eedd99adcfc5082e8609ed096d8a46edd1e73505352712a41e0ddc247a371f78227aab01fbe0f3 -libLLVM.v18.1.7+2.armv7l-linux-musleabihf-cxx11-llvm_version+18.asserts.tar.gz/md5/5864689df3298be4b1b4df1ae0412d3a -libLLVM.v18.1.7+2.armv7l-linux-musleabihf-cxx11-llvm_version+18.asserts.tar.gz/sha512/8f32f73e366c3a6993fa8d6b8cd1a9391611b0644cd4a77a4f7a235c037fdb75308d99b5a23ada6e4a73ed5fbd8f929a981d6bf317d79d52396220c221619303 -libLLVM.v18.1.7+2.armv7l-linux-musleabihf-cxx11-llvm_version+18.tar.gz/md5/6bf798476c4e94716cc47a95580104ad -libLLVM.v18.1.7+2.armv7l-linux-musleabihf-cxx11-llvm_version+18.tar.gz/sha512/9dbd27a000dd3c3dda9047d366a667c4b179cc61582525adb0f8227e8055413ce46efcbc1530305400239656e2f1016fb8833fb7f4734714078e035d388f3531 -libLLVM.v18.1.7+2.i686-linux-gnu-cxx03-llvm_version+18.asserts.tar.gz/md5/66e2889f86ae6bc1977419e6d9be729e -libLLVM.v18.1.7+2.i686-linux-gnu-cxx03-llvm_version+18.asserts.tar.gz/sha512/d0cac798c4979b4d818d36596b173e523cba3f41ff7ab1e2111f6a75c3e819e563e207a547328f005c5a93c7f8f88c17bf43c1139b5c2690df4f1d719f82920a -libLLVM.v18.1.7+2.i686-linux-gnu-cxx03-llvm_version+18.tar.gz/md5/0534b72d6d33c8573f79dce8a2a5a6e6 -libLLVM.v18.1.7+2.i686-linux-gnu-cxx03-llvm_version+18.tar.gz/sha512/6beaf1b45eec8b46fbf92f692f53e6df40bf48e50589aeb5ef99240a5a3ec9089ffb350dda6df24530937d613bf6d2cc4da76e92921ea00def9d2d38ac5bbeba -libLLVM.v18.1.7+2.i686-linux-gnu-cxx11-llvm_version+18.asserts.tar.gz/md5/2cf9a1ca20472179ce4a9eb3a949457b -libLLVM.v18.1.7+2.i686-linux-gnu-cxx11-llvm_version+18.asserts.tar.gz/sha512/cebae06ccee12a14d20d3056ce0519b1e774e3c9d9200a783262fcc40aee6d7aabfb08714bf53b88e03d8b09a96d3cda248a70c16188f8c707b291642998262a -libLLVM.v18.1.7+2.i686-linux-gnu-cxx11-llvm_version+18.tar.gz/md5/4712f6a46e0ff407ece958a7701511b9 -libLLVM.v18.1.7+2.i686-linux-gnu-cxx11-llvm_version+18.tar.gz/sha512/9a0a2dfa2076b93027f766277a6890cf94d67c131697f74945e92cf13ae64e84c09d3dd744498986fb22ad5e5465300aa9c8ae6632fcf919a0932515edfcc1e6 -libLLVM.v18.1.7+2.i686-w64-mingw32-cxx03-llvm_version+18.asserts.tar.gz/md5/b944ae477232ef10d213b4c7743280fb -libLLVM.v18.1.7+2.i686-w64-mingw32-cxx03-llvm_version+18.asserts.tar.gz/sha512/25ff757620baaf6fbacb375b103dc0dd9af6a23c3d3bca567c182a6357a367ca125d7b6c66927d7db23816865b6ec783157352fba08532336de467be80efcb9c -libLLVM.v18.1.7+2.i686-w64-mingw32-cxx03-llvm_version+18.tar.gz/md5/52345a44b3ac74b3cdf93852bbc63710 -libLLVM.v18.1.7+2.i686-w64-mingw32-cxx03-llvm_version+18.tar.gz/sha512/3e5b449b0f1bab302c45f9ee9f04d2cfbb01ce24e86096aa610fdf360ad65828f1b73734beb28b3d3c249ba8ef657d2663c5492940504f47c973038733b15248 -libLLVM.v18.1.7+2.i686-w64-mingw32-cxx11-llvm_version+18.asserts.tar.gz/md5/36e058b96771b4cf77e29b800227fa03 -libLLVM.v18.1.7+2.i686-w64-mingw32-cxx11-llvm_version+18.asserts.tar.gz/sha512/98873cb2963c4469b0f69ad1d9d9e27056aabfb46a2642dfa3507b7fe2f0b0fc41c3991a2543125291783699e39fcbcac0bd6e92fa8f0df97609a85c340fd25b -libLLVM.v18.1.7+2.i686-w64-mingw32-cxx11-llvm_version+18.tar.gz/md5/3b3823fbafabea289a769958f633dcdb -libLLVM.v18.1.7+2.i686-w64-mingw32-cxx11-llvm_version+18.tar.gz/sha512/91a9c1ad6f37cb1186ba3392935fb55d49e0f8d6afc768cf881886f9b1d8b0a2b0ecf0c81a8e32e36d32cac04c065ac852bdb95ba5ff6780c00a763583a02973 -libLLVM.v18.1.7+2.powerpc64le-linux-gnu-cxx03-llvm_version+18.asserts.tar.gz/md5/bbf060d61b294b86f7e3dde381b00b8a -libLLVM.v18.1.7+2.powerpc64le-linux-gnu-cxx03-llvm_version+18.asserts.tar.gz/sha512/632372d41f6e400a10fae27c6cd06a5a344cfb5902cad7928cb4133f14f36f0a3373e69e73ce9baf52f518340593c3a5a16173ef59a1878e6300e9975aeaa157 -libLLVM.v18.1.7+2.powerpc64le-linux-gnu-cxx03-llvm_version+18.tar.gz/md5/3d730b713e01cdb5a7a5a46028afd41b -libLLVM.v18.1.7+2.powerpc64le-linux-gnu-cxx03-llvm_version+18.tar.gz/sha512/052ab4fa7ac3b2c430601753ab078cdc9fd6db7f65ee0b76bb05473f4c5b99ec8919ad9d347425f1928cf619548e992c86ba97f9994218f50bca617e43d2f0d9 -libLLVM.v18.1.7+2.powerpc64le-linux-gnu-cxx11-llvm_version+18.asserts.tar.gz/md5/bf9dcb92ba8c031ae62ed4434fd5447f -libLLVM.v18.1.7+2.powerpc64le-linux-gnu-cxx11-llvm_version+18.asserts.tar.gz/sha512/e53be14dd02a2cef8eccafb9301d29c51d652c635703529c1444947002993f6639083eb8bef13af21c9796717ce4b3129dcdcbe2751a1173d39e321db8f6e3c7 -libLLVM.v18.1.7+2.powerpc64le-linux-gnu-cxx11-llvm_version+18.tar.gz/md5/b5cab0fc7c6643c6dd161f1e553ef1a0 -libLLVM.v18.1.7+2.powerpc64le-linux-gnu-cxx11-llvm_version+18.tar.gz/sha512/4032634449e2669479761c4323096b152f8df4948e3a97eea10f0b400fbf2a00d1edda59b74a714b62c4e204b113d8ecda78d828c3344ebe8bd750d14b3c4c7d -libLLVM.v18.1.7+2.x86_64-apple-darwin-llvm_version+18.asserts.tar.gz/md5/054e06d882173ede2886c510e8519c80 -libLLVM.v18.1.7+2.x86_64-apple-darwin-llvm_version+18.asserts.tar.gz/sha512/eb97ec25354badcac1b8a4a09fd9e04cfbb7d35493c54cff82af9ffa4c2dc5070c9232a86e900d6eb9acb03f1c572fcde8d2a865477bf6c9fbfc139763a9dd1c -libLLVM.v18.1.7+2.x86_64-apple-darwin-llvm_version+18.tar.gz/md5/f1c23200365b659f0dc07cc6d0a32c60 -libLLVM.v18.1.7+2.x86_64-apple-darwin-llvm_version+18.tar.gz/sha512/fad13fef7e7584b3f756fce9125950e788e79608cf5d0c023cb8f8a4e79001afefa8060f7866875e4861a268b3020e50305e66bf472360c1d92fce12d7a81ba9 -libLLVM.v18.1.7+2.x86_64-linux-gnu-cxx03-llvm_version+18.asserts.tar.gz/md5/69564913bae176a167d24d3291ef7af7 -libLLVM.v18.1.7+2.x86_64-linux-gnu-cxx03-llvm_version+18.asserts.tar.gz/sha512/b8eeb86b66d767218e59671bdd597623238eea72319913c2ac5e116faec3f4c13739a24f3b95338ed857ec29e714dc0308e4ddbfe359332b3c27ad5235052342 -libLLVM.v18.1.7+2.x86_64-linux-gnu-cxx03-llvm_version+18.tar.gz/md5/bc9d5637fe30f21d2231a98371e798e4 -libLLVM.v18.1.7+2.x86_64-linux-gnu-cxx03-llvm_version+18.tar.gz/sha512/4efbc2823322abe80d0134d35926767bd9cab717cde9308726a6a8891e5a707476138888c695ed399e3dddb57baf17abbc43a0a338cea2e5c0f472ab427c12e3 -libLLVM.v18.1.7+2.x86_64-linux-gnu-cxx11-llvm_version+18.asserts.tar.gz/md5/8492ff91e6dbd1a66edd8aaf0390a582 -libLLVM.v18.1.7+2.x86_64-linux-gnu-cxx11-llvm_version+18.asserts.tar.gz/sha512/6443bd2fa9c5beecc2b002c26595f2cf3a8e2ea5eb49aa4c00f7252a6623fe0f8c01824941ebe5475460641285c4e56a5203056c1b93a78250b7e48fb5ac9e00 -libLLVM.v18.1.7+2.x86_64-linux-gnu-cxx11-llvm_version+18.tar.gz/md5/6918c9978fd8b5887c66eee76950478d -libLLVM.v18.1.7+2.x86_64-linux-gnu-cxx11-llvm_version+18.tar.gz/sha512/d455a4f433bf3ea1b5100b9d45199bc785e4b6fbc7659bf06cbde6ada471134e7d4243d3a3a1f71d579126ef8371d70e59f174e124b3ff8d4842e9ee83e2dea4 -libLLVM.v18.1.7+2.x86_64-linux-musl-cxx03-llvm_version+18.asserts.tar.gz/md5/075f87d106dd95c8e9c6e7e157b5e9db -libLLVM.v18.1.7+2.x86_64-linux-musl-cxx03-llvm_version+18.asserts.tar.gz/sha512/8132379d8f44a21082c7a90f58a7dffb0c6ee725efd58a959d4023787411b080d72913bb1e89a35072f97aaf1ca512ab1d027b37eaed819e3c053d7a0cf64269 -libLLVM.v18.1.7+2.x86_64-linux-musl-cxx03-llvm_version+18.tar.gz/md5/4cfc2838a77f05883f82e50b3723dcfe -libLLVM.v18.1.7+2.x86_64-linux-musl-cxx03-llvm_version+18.tar.gz/sha512/20079c81cd6a4020b087485be1ab4928b3bd3e1a53728cc98137a35b969484278093bc75a9e51ddfd8331556577c5fb3109d74dc2eccffa93b5390e0fabff2b1 -libLLVM.v18.1.7+2.x86_64-linux-musl-cxx11-llvm_version+18.asserts.tar.gz/md5/5b8cbf00631bd4540b7335a86302a1fe -libLLVM.v18.1.7+2.x86_64-linux-musl-cxx11-llvm_version+18.asserts.tar.gz/sha512/51ba9a4b74b740905cee4baf7f4e5f3620ed81e0746f49cd352d874ebedab95277c5031123f880c9239b7dbf505b10f6531f79c8a6b0482a652b8324f4137cf5 -libLLVM.v18.1.7+2.x86_64-linux-musl-cxx11-llvm_version+18.tar.gz/md5/11010cc2d58b1a8c6a6e7bc24df0c0db -libLLVM.v18.1.7+2.x86_64-linux-musl-cxx11-llvm_version+18.tar.gz/sha512/a6bdd9a2a2fa9a572e74ced69c3ce9d1b84cde18155ec9bc7dfbaba411ee6c43d229e6fb333eff66fb63b632b485b46b7cb1657c0c49d9d9bb849fa13f0bbc7b -libLLVM.v18.1.7+2.x86_64-unknown-freebsd-llvm_version+18.asserts.tar.gz/md5/8afe26d16d9fdb0fe6c0248c51b4f053 -libLLVM.v18.1.7+2.x86_64-unknown-freebsd-llvm_version+18.asserts.tar.gz/sha512/32a92685f417c1887aef3cd8a9cadccc4de3e560ba8fc42e8db721f273a3451927b24dc4a2c2e83446e32a84d47f714fc3c22ce71989f2e97c5ca23a1783b8d6 -libLLVM.v18.1.7+2.x86_64-unknown-freebsd-llvm_version+18.tar.gz/md5/59d8d911907127ff56f5eafcd8663300 -libLLVM.v18.1.7+2.x86_64-unknown-freebsd-llvm_version+18.tar.gz/sha512/9b0bf6f9d8d32ccbec349c249b79fd0fa3b4949c04b69c9d408f19dfa3b4f00e5cfa51b798234721f72f2793161d6af6491856e10e6a507976b0da6ed7a8065b -libLLVM.v18.1.7+2.x86_64-w64-mingw32-cxx03-llvm_version+18.asserts.tar.gz/md5/b0d9a7eca92d40ecbfa47461d52659e2 -libLLVM.v18.1.7+2.x86_64-w64-mingw32-cxx03-llvm_version+18.asserts.tar.gz/sha512/dc4a91e164d88ff51b4a642b556d5767156f28d1efafa533f5d7c619e05535e2000afb2ea47469a90f5a19f970e8f0522f35d59ec250e2f9b42ce22fadb9ffd3 -libLLVM.v18.1.7+2.x86_64-w64-mingw32-cxx03-llvm_version+18.tar.gz/md5/92a60309ad33391415c6703edbbd5423 -libLLVM.v18.1.7+2.x86_64-w64-mingw32-cxx03-llvm_version+18.tar.gz/sha512/2fe90ac804d94bcf0d4058a8b8f0c274e405ffee7df0175f5e7ccd5014b29a813af48152870e1af0a79df8d3eec3118c233bc4f5b3f8439fd9792931140ee944 -libLLVM.v18.1.7+2.x86_64-w64-mingw32-cxx11-llvm_version+18.asserts.tar.gz/md5/0964df17cb98d2d869a33468477f9901 -libLLVM.v18.1.7+2.x86_64-w64-mingw32-cxx11-llvm_version+18.asserts.tar.gz/sha512/2c062acd62175d32dda773e9116608ced814a64ab06ea73f89958437178e2603b268638e88162fb81c22e5947cf4cc925b1af10c6f9320be22c92b279b278992 -libLLVM.v18.1.7+2.x86_64-w64-mingw32-cxx11-llvm_version+18.tar.gz/md5/7dfb8e61e972c66f1d754cb979bc0309 -libLLVM.v18.1.7+2.x86_64-w64-mingw32-cxx11-llvm_version+18.tar.gz/sha512/d462b6fe7aea75f6fee6c5c2f24576569b5deac8027fb88240e16c55a54d68b7dcb06b3ec4ab514616fb88549fc2f10fb1d587a641d6f29fa66273904bb9cfd8 +LLVM.v18.1.7+3.aarch64-apple-darwin-llvm_version+18.asserts.tar.gz/md5/f8c2d285a6db7c3b89d295b32b78f07b +LLVM.v18.1.7+3.aarch64-apple-darwin-llvm_version+18.asserts.tar.gz/sha512/99d923fff09b70093962cb32d2a12a2d2355824c1c3404900d593cfd0e95a4b52744e7d3fcd22407651916adc2e1534637437630843762c3f2c0c650881aa0e6 +LLVM.v18.1.7+3.aarch64-apple-darwin-llvm_version+18.tar.gz/md5/2ad6bf2ab91cb75bc3bb627b1859997b +LLVM.v18.1.7+3.aarch64-apple-darwin-llvm_version+18.tar.gz/sha512/bd06a3adcae64700f4051a18705e7937539b3cdfa61dda38260398a8896401a267b718594631d71afc68a3b273b0d05f6018927c3a08c070bd6c45d53b19c78a +LLVM.v18.1.7+3.aarch64-linux-gnu-cxx03-llvm_version+18.asserts.tar.gz/md5/7bc3125dd810bcc44ea2d454b6caa683 +LLVM.v18.1.7+3.aarch64-linux-gnu-cxx03-llvm_version+18.asserts.tar.gz/sha512/86742a4476481b14145855ead8a5acc6397782f6d3445f900ac2de0570f1fcf53563cf5e1f3cb59886282083ce63756604f1ca2434e9e427cdc1bd1f68373581 +LLVM.v18.1.7+3.aarch64-linux-gnu-cxx03-llvm_version+18.tar.gz/md5/4eae06d9e6272aef23afc191501810fd +LLVM.v18.1.7+3.aarch64-linux-gnu-cxx03-llvm_version+18.tar.gz/sha512/fb75927982b1428b05b765bd5ac017b2c15d89990b7e6cb582b9e1a3ec04d09801d25d5cc6c037a12c205edb7c0f7a2d33832a2d1de7920711e9720dc3ca3655 +LLVM.v18.1.7+3.aarch64-linux-gnu-cxx11-llvm_version+18.asserts.tar.gz/md5/cd86e18a63cd6e84a1493acf0df4e267 +LLVM.v18.1.7+3.aarch64-linux-gnu-cxx11-llvm_version+18.asserts.tar.gz/sha512/1dfefc4600368467ab90ccb527a9fdb012b9b7f485d932a0db8c4b1b81985fad931b74494b76ef2162e46280447d39a055b5681b33a17c564c50094de29aeb13 +LLVM.v18.1.7+3.aarch64-linux-gnu-cxx11-llvm_version+18.tar.gz/md5/c7cf7daa7c11827ae4f9fb2e16f3cce3 +LLVM.v18.1.7+3.aarch64-linux-gnu-cxx11-llvm_version+18.tar.gz/sha512/dabe2940606a671a8e3b4f28bb9e813d000650203c382372142457020f2ccd498534903aa99320afb7ff960a62d752ee6cb724e74745bc1bad1051e12cf78ab4 +LLVM.v18.1.7+3.aarch64-linux-musl-cxx03-llvm_version+18.asserts.tar.gz/md5/62e575b89fd92d9206abebc19b084abf +LLVM.v18.1.7+3.aarch64-linux-musl-cxx03-llvm_version+18.asserts.tar.gz/sha512/7ac029567fd68fee82b7096e2fe278ee5cd2935494433b1faace036469c54bc471d614d0bb339750429dd88f3e723165d2dacaa627f73c3647c6f3b51a4a3034 +LLVM.v18.1.7+3.aarch64-linux-musl-cxx03-llvm_version+18.tar.gz/md5/5d39ef811bc78204ebfc7e98111469cf +LLVM.v18.1.7+3.aarch64-linux-musl-cxx03-llvm_version+18.tar.gz/sha512/10fc9a64d63351e168bc79fa63bcaa6fd49c8483e5ecc40a66216192588367e9b47ec3ea2c047e88f39ea8f1caf8052726f4bc8858223f7744606156b4133970 +LLVM.v18.1.7+3.aarch64-linux-musl-cxx11-llvm_version+18.asserts.tar.gz/md5/f072fe487e5d1b717aec49a6244adf05 +LLVM.v18.1.7+3.aarch64-linux-musl-cxx11-llvm_version+18.asserts.tar.gz/sha512/42b03a2562728ac86e751abab2e8233d583baf006e69b107d002a9258844ad53f62e6332eab3790364940d478c7ebab6d3e0e2194220e8436f40e6b75063d1a2 +LLVM.v18.1.7+3.aarch64-linux-musl-cxx11-llvm_version+18.tar.gz/md5/eabf0239298f13ff4893011e75828bdf +LLVM.v18.1.7+3.aarch64-linux-musl-cxx11-llvm_version+18.tar.gz/sha512/34724d9c9a550c85d406021d7265e1848b002b8f212427eebff6e8f03ec6acc336efb0c2cd9d9e1c76329e7c84a84a9d852b8de5897550d957e0e9385129033d +LLVM.v18.1.7+3.aarch64-unknown-freebsd-llvm_version+18.asserts.tar.gz/md5/8b736710b2c749fccf0a782f3b887ec2 +LLVM.v18.1.7+3.aarch64-unknown-freebsd-llvm_version+18.asserts.tar.gz/sha512/d7458ead5a604781a117e54a03dc6f3fc47e932298c68af425a6725ef4767bb512c910316818081d5e27d9d08b4ce1792d684c0014271fd492eedaf47acc5eb3 +LLVM.v18.1.7+3.aarch64-unknown-freebsd-llvm_version+18.tar.gz/md5/ed0487ad3494352ffebfac51ef947168 +LLVM.v18.1.7+3.aarch64-unknown-freebsd-llvm_version+18.tar.gz/sha512/e13082056be94335b1f4253afe3c4a25555b6bd10c5d68052f01117415dab344a3f883a9f25ff4ac630262756dd15825e74395650d80181c85c0663d7028a9f5 +LLVM.v18.1.7+3.armv6l-linux-gnueabihf-cxx03-llvm_version+18.asserts.tar.gz/md5/1910b5daa31db6542f0c762901ab7d43 +LLVM.v18.1.7+3.armv6l-linux-gnueabihf-cxx03-llvm_version+18.asserts.tar.gz/sha512/c43e8091e9946ba1d8849734a25b258df95b4759a79676565b624930d4a19805a78b66b1d193e528f95174d909d7895d4a4e49fe8ca298a24dc40d25c95900b1 +LLVM.v18.1.7+3.armv6l-linux-gnueabihf-cxx03-llvm_version+18.tar.gz/md5/a5198b13dc75ad3454e05aa6cdaca48f +LLVM.v18.1.7+3.armv6l-linux-gnueabihf-cxx03-llvm_version+18.tar.gz/sha512/9ec8078a1a7246f1545fe074783d6b88ce9b50f62b0438ff5637f6dedf5bcac427cc252c350354b7063f79f4e31a19f699c168c15bc6547a207da497026c2827 +LLVM.v18.1.7+3.armv6l-linux-gnueabihf-cxx11-llvm_version+18.asserts.tar.gz/md5/f569654ecdd8ec2a50986ccac8388c69 +LLVM.v18.1.7+3.armv6l-linux-gnueabihf-cxx11-llvm_version+18.asserts.tar.gz/sha512/9b50e3be1577a753f0ce42704846bd126229d8dd9f28bfcbda58c4f18e4b9ca4ec6bb9b57de61b3b9af8157a2983aeffb8af782a073e5e19a8ccc261cbea9601 +LLVM.v18.1.7+3.armv6l-linux-gnueabihf-cxx11-llvm_version+18.tar.gz/md5/496de8c9e2361f44ac6933480620d07f +LLVM.v18.1.7+3.armv6l-linux-gnueabihf-cxx11-llvm_version+18.tar.gz/sha512/02a8ecfb6e81e0fe07fb0d616a84a590e23e944588c18348c32265bf6bf19196beec189a0bc40514e379e97a9c8bef83557260839800fabe9f8e39e96689713d +LLVM.v18.1.7+3.armv6l-linux-musleabihf-cxx03-llvm_version+18.asserts.tar.gz/md5/05bc7406fd0a703edbc912bb3230eb37 +LLVM.v18.1.7+3.armv6l-linux-musleabihf-cxx03-llvm_version+18.asserts.tar.gz/sha512/898dd4c19dd0f22dcd1bd44264daa8dc64340c890c3368fac7451da1ac872a687d55b5eb50ae4e156c2dc4ece226ec05775daebafe9d8b53eb83b72d2986ff92 +LLVM.v18.1.7+3.armv6l-linux-musleabihf-cxx03-llvm_version+18.tar.gz/md5/d6ca30fc3a2796ebda2451f80846883d +LLVM.v18.1.7+3.armv6l-linux-musleabihf-cxx03-llvm_version+18.tar.gz/sha512/d7dc96e1bbca38272b1ca78b3ff995fc30434937a58815c63d0a9b4a017964cfb269a1f3203ad8374870257152229941d420f098644375b5f4d1b88fe39e0dff +LLVM.v18.1.7+3.armv6l-linux-musleabihf-cxx11-llvm_version+18.asserts.tar.gz/md5/6eb1a197150ad6c165b82c5e0e0db102 +LLVM.v18.1.7+3.armv6l-linux-musleabihf-cxx11-llvm_version+18.asserts.tar.gz/sha512/a159598c2bf351ea79d01e8a454a82bbd9823c080399520af3182e57259957ad07834b03c336e6225857da365e8ec1aa9f65b0ddd0821883ae817cb81f8e6dab +LLVM.v18.1.7+3.armv6l-linux-musleabihf-cxx11-llvm_version+18.tar.gz/md5/116d849cb2fb4b1c8c517397b2b04192 +LLVM.v18.1.7+3.armv6l-linux-musleabihf-cxx11-llvm_version+18.tar.gz/sha512/7b2596c76d2814fc30992ba78e5c8f93519442fa76004187de9830732b80bfc6c77f5d7aca042c20d8f868cd682bb6f71e3fa32940bc8c7b401753dc4ac2f331 +LLVM.v18.1.7+3.armv7l-linux-gnueabihf-cxx03-llvm_version+18.asserts.tar.gz/md5/27837dc854a173bd37a20f92383f6913 +LLVM.v18.1.7+3.armv7l-linux-gnueabihf-cxx03-llvm_version+18.asserts.tar.gz/sha512/1719205cba6de969e8724a99444bf958d5a7943ae90ee2dd11193f56ddfd4f0edf6d9af6da2e67787a64b91d994fee76bd8ffde36486c5229a980c2c4ef07e29 +LLVM.v18.1.7+3.armv7l-linux-gnueabihf-cxx03-llvm_version+18.tar.gz/md5/f0016c21c045e205131ea22dc711acaf +LLVM.v18.1.7+3.armv7l-linux-gnueabihf-cxx03-llvm_version+18.tar.gz/sha512/6d192b7e21c7ee3327d288b890f4c5dd03e5f53dcba6905a34cab96b7ad0ab6364f5271af88d95e60aab8f569a8840d17e16f27f6fcdafcaf537d5d4a651dca7 +LLVM.v18.1.7+3.armv7l-linux-gnueabihf-cxx11-llvm_version+18.asserts.tar.gz/md5/9a2bad4518966db29e37e7c88388e779 +LLVM.v18.1.7+3.armv7l-linux-gnueabihf-cxx11-llvm_version+18.asserts.tar.gz/sha512/b9a10af9dcbacf1f129d4e9b4cf562a6a4687252cc8a0fcd78f52d75c0c20be0ff32e67413a7902a628b04e7fac1091d35b64b145e33814899796009b6ed2853 +LLVM.v18.1.7+3.armv7l-linux-gnueabihf-cxx11-llvm_version+18.tar.gz/md5/77c4e24c1e44ce14bc6476954f294a15 +LLVM.v18.1.7+3.armv7l-linux-gnueabihf-cxx11-llvm_version+18.tar.gz/sha512/d9d90a4ac788dbbc1b532623a380d4cca8813ecdf8b7b4a8cfff769499e50a1433bac618234bd0765d8a4f50aafb3fa724d16ac71baf75ae5a2b4396fa2bd017 +LLVM.v18.1.7+3.armv7l-linux-musleabihf-cxx03-llvm_version+18.asserts.tar.gz/md5/b29e36dcf5a0aa05734f1d6a0afd6944 +LLVM.v18.1.7+3.armv7l-linux-musleabihf-cxx03-llvm_version+18.asserts.tar.gz/sha512/ab46a835f9843c5b3427101bcd0c5d2b8acf79693aa9b8d4282d499f25df4ca248a81fc94ddd96c75d69d3c6b3814b225eed81bec32fbe9199bffdd605f7fec8 +LLVM.v18.1.7+3.armv7l-linux-musleabihf-cxx03-llvm_version+18.tar.gz/md5/a411269f925cc968a0438562262e6d97 +LLVM.v18.1.7+3.armv7l-linux-musleabihf-cxx03-llvm_version+18.tar.gz/sha512/04f275603134b0ea0f23da377e4983765885f2b1954d5c617134af9f103470a5e50dfda18bcddb836852db2382f1c134db40df00b36c8bd00e7a9e6ff1a9e684 +LLVM.v18.1.7+3.armv7l-linux-musleabihf-cxx11-llvm_version+18.asserts.tar.gz/md5/841921e33407e15eeeaa76354aa2b737 +LLVM.v18.1.7+3.armv7l-linux-musleabihf-cxx11-llvm_version+18.asserts.tar.gz/sha512/e1fb8b75e141cc90916c5c81c31ee91336911983c525f38eab86682ba69679dfbe1f10c9b673323632fc75f38cacc2af47a3d5d5d1031ec9a2a60cebd68d501b +LLVM.v18.1.7+3.armv7l-linux-musleabihf-cxx11-llvm_version+18.tar.gz/md5/7342a1d7b1d2c0fed7f5edf1c331ffa8 +LLVM.v18.1.7+3.armv7l-linux-musleabihf-cxx11-llvm_version+18.tar.gz/sha512/dae8ca11fa8d34f99ee19a95bcd108a65b9e6a6ddf2e5a9b126f2ba1b1cdff6b7ec21e9590d70b3785593435bb71e47703d9765811db814a90aa8a47940421ff +LLVM.v18.1.7+3.i686-linux-gnu-cxx03-llvm_version+18.asserts.tar.gz/md5/10aac489dfa10a77427a82958f525da2 +LLVM.v18.1.7+3.i686-linux-gnu-cxx03-llvm_version+18.asserts.tar.gz/sha512/a87f721df4fc5f6e929a54d8e41e55fb366a051a610836923213bfa42a7f1593de880391131619653cc3571bb76a4c82e011852ee5a6005523957c9f0937e6ba +LLVM.v18.1.7+3.i686-linux-gnu-cxx03-llvm_version+18.tar.gz/md5/7f231fd359f9297261c22f95d8f738c8 +LLVM.v18.1.7+3.i686-linux-gnu-cxx03-llvm_version+18.tar.gz/sha512/fdd6441011609ef341108ff2d108c6f320d415b621a69922aeacc555c3d1ae6090a0f600f24e229a609b88ba9c1868900791a6590033b7dad333ad11f8a6365b +LLVM.v18.1.7+3.i686-linux-gnu-cxx11-llvm_version+18.asserts.tar.gz/md5/c4523a485082044553e1a89049dc4734 +LLVM.v18.1.7+3.i686-linux-gnu-cxx11-llvm_version+18.asserts.tar.gz/sha512/db365e63bbb5189f7f348e2fd51e627ddfebf838ca9dfc6c0f8a7bbf6b8a2a03d78ea3ccdf08b0c2674f4cf5a0979506efa643554091ba751f16051bdf42ca9f +LLVM.v18.1.7+3.i686-linux-gnu-cxx11-llvm_version+18.tar.gz/md5/bcd10e4f3e5a4b00d52441e0094de1c9 +LLVM.v18.1.7+3.i686-linux-gnu-cxx11-llvm_version+18.tar.gz/sha512/b17fae89a3dfaa9428cf48c9c0866477cc75edda6aa3800702227cc9e3d6ebaacbd60cccc96acb4ccde56a2de531dea5a436bac8e6c450a4674daae23b878037 +LLVM.v18.1.7+3.i686-w64-mingw32-cxx03-llvm_version+18.asserts.tar.gz/md5/6bb986b1c9b66ca24c976e6534726b00 +LLVM.v18.1.7+3.i686-w64-mingw32-cxx03-llvm_version+18.asserts.tar.gz/sha512/1fd7cf1c80594561a8b83cf993192299e8a96046bd1e2f6eb330898c5e2dd0fc7c6ee0e3115d4e4049b83c71e724fab19a5d468e72fd141d8a2c4c02d831b71a +LLVM.v18.1.7+3.i686-w64-mingw32-cxx03-llvm_version+18.tar.gz/md5/c44aad21aef3b92fa0b1543ab9e4b93a +LLVM.v18.1.7+3.i686-w64-mingw32-cxx03-llvm_version+18.tar.gz/sha512/1aed6fb716a576b132d13397c927b36f00d78a42e5273168f1eacd208e366c55328286c56bae0abaf2c7ee424e7f19f4e096cd53f7d7caf863a0d58de1a2386e +LLVM.v18.1.7+3.i686-w64-mingw32-cxx11-llvm_version+18.asserts.tar.gz/md5/c3494f146906e178c5e5e32c10f6fec6 +LLVM.v18.1.7+3.i686-w64-mingw32-cxx11-llvm_version+18.asserts.tar.gz/sha512/a0fe26f88492ce8416257e76a5938a65b4911822c9c3e3bd0e3455adae1beaa952a769d616e8f8525c3bac64a6e3cd7f1dfd68800b5e7db94ad63320a2716e2b +LLVM.v18.1.7+3.i686-w64-mingw32-cxx11-llvm_version+18.tar.gz/md5/4644616c2e8937169500c200fb56322a +LLVM.v18.1.7+3.i686-w64-mingw32-cxx11-llvm_version+18.tar.gz/sha512/1250c5c9541782dabb5f0063bb2a18ee15a5dcd0e8b675e78474fa7dce2d51dd97e1bc4eee0a526a73f7812c57e41faa85e021fea4de74d33c62ae67ca555d73 +LLVM.v18.1.7+3.powerpc64le-linux-gnu-cxx03-llvm_version+18.asserts.tar.gz/md5/b39ce0b0f143c3bef4dade99251003bc +LLVM.v18.1.7+3.powerpc64le-linux-gnu-cxx03-llvm_version+18.asserts.tar.gz/sha512/62148e1e0a31d6b28effda0a5016d9335005b27ffdc5be1d184efcbb13f13e29eca52eca19cc6800d1d0421c0e67a36027e05d5fdc967dae686b5bfd112fb2b6 +LLVM.v18.1.7+3.powerpc64le-linux-gnu-cxx03-llvm_version+18.tar.gz/md5/9475748210eb5b1947fe3aa6673b6c29 +LLVM.v18.1.7+3.powerpc64le-linux-gnu-cxx03-llvm_version+18.tar.gz/sha512/54320295e59e5903db558b6be0220442dbaf7ea78e1612d54a35cbe014541b354ea708679da00851b962140b6da77301e27b656fd478666d3f0f710382c13a85 +LLVM.v18.1.7+3.powerpc64le-linux-gnu-cxx11-llvm_version+18.asserts.tar.gz/md5/6a533054ccfc3d1b0920eabcfb45ee03 +LLVM.v18.1.7+3.powerpc64le-linux-gnu-cxx11-llvm_version+18.asserts.tar.gz/sha512/3871620aeea2ccaf6e4b17a675c5504624fc6d8ed57bf4e5b66e0372b7124e4f3d1e0f10baa1018d5a1ac5bc4bf0e9d2143e84827712fda1f512fed24829f1b9 +LLVM.v18.1.7+3.powerpc64le-linux-gnu-cxx11-llvm_version+18.tar.gz/md5/3fc6d1b7d59b98823d6016f97835b7c5 +LLVM.v18.1.7+3.powerpc64le-linux-gnu-cxx11-llvm_version+18.tar.gz/sha512/745942235e40f2ab71a5eaef2768842823620d4a4dc7454a7512fb2bd95bc8a74323eec6a4b33edf1ef935151c18a20172f60fcca2fca1ff3a37b1e019ea4640 +LLVM.v18.1.7+3.x86_64-apple-darwin-llvm_version+18.asserts.tar.gz/md5/4bf72195bb2b3fafd98bd3f1966dfd0a +LLVM.v18.1.7+3.x86_64-apple-darwin-llvm_version+18.asserts.tar.gz/sha512/6554fd0374875428d0479e192ac3c70823a1143ac9acf0fafb3332f6c03e7fc8d14513512152bc995c186024bc36de77c5e7895ac1382f962b22b1089c3cf176 +LLVM.v18.1.7+3.x86_64-apple-darwin-llvm_version+18.tar.gz/md5/5631a8736cab900c3fcfeb559abc54a2 +LLVM.v18.1.7+3.x86_64-apple-darwin-llvm_version+18.tar.gz/sha512/55d93ffcc0125720f7db379396c5a79e98408225aebebc72fdd05b38605e73481eef46c219f59088b3bdea6257a7a7e369e6e0110019164374ac35bb49897738 +LLVM.v18.1.7+3.x86_64-linux-gnu-cxx03-llvm_version+18.asserts.tar.gz/md5/28ae362155ce224cef605cee53e36d0b +LLVM.v18.1.7+3.x86_64-linux-gnu-cxx03-llvm_version+18.asserts.tar.gz/sha512/d90f25e57f92a9da68245ceb15316e3868bf657d7e744f37cce5ccb4945777ec82fc5d470ba4fc104fe7aaabfff7b0dc260838a45331e4360b0fd14c59a55666 +LLVM.v18.1.7+3.x86_64-linux-gnu-cxx03-llvm_version+18.tar.gz/md5/d10ec63510dc1a043ee0a4e37b49eacd +LLVM.v18.1.7+3.x86_64-linux-gnu-cxx03-llvm_version+18.tar.gz/sha512/54c393208d1f51661e631cba62a21c0685fb58827067d5ea7c42fb3d6dd8c8db99d8ee1b3c304abc25510bcb0265d86ca03e1ce19be4faa252d97cfc8a1b52cb +LLVM.v18.1.7+3.x86_64-linux-gnu-cxx11-llvm_version+18.asserts.tar.gz/md5/2c1e000206c9e7c6c8e7515eb8115e3e +LLVM.v18.1.7+3.x86_64-linux-gnu-cxx11-llvm_version+18.asserts.tar.gz/sha512/12c0ead798e43448a30699b5386b3d88aac49aaef9bae283ea6d089a1c66df7293f4f220a2b5c3d96e73e556e37e745f38d81f5c68e09a86a2b19a6695eff460 +LLVM.v18.1.7+3.x86_64-linux-gnu-cxx11-llvm_version+18.tar.gz/md5/21d6c5d5e422412b88ffce50862efb29 +LLVM.v18.1.7+3.x86_64-linux-gnu-cxx11-llvm_version+18.tar.gz/sha512/5e8e17ba79134e9752c7fbd28b62e4616574a5e1dfcb0980160a3aad28a2f6cec4e48ed1acf73ca1f94d74397f7ee3eba53cb1280699e40c451295590ede3fe3 +LLVM.v18.1.7+3.x86_64-linux-musl-cxx03-llvm_version+18.asserts.tar.gz/md5/293fdc43431493f915a3e0a5b3c6d587 +LLVM.v18.1.7+3.x86_64-linux-musl-cxx03-llvm_version+18.asserts.tar.gz/sha512/27e13a4334a3bfb3c91fd06abcc4eca7a347f4bffcbce40834302d153ef29756295121b42ac433c266668af1428ffa08ed12ce75f21fef44cd7ac1d8bdfd155a +LLVM.v18.1.7+3.x86_64-linux-musl-cxx03-llvm_version+18.tar.gz/md5/2825dac8280d0563b7f521a9eb8c0563 +LLVM.v18.1.7+3.x86_64-linux-musl-cxx03-llvm_version+18.tar.gz/sha512/7f4549ac7b63e58d8c149f6b22bd997545713477a1df3b32adf640f3951580df1645f08756d9ba80c479160cf5759e3f9372396655a35cdca14f4be4afc4ae22 +LLVM.v18.1.7+3.x86_64-linux-musl-cxx11-llvm_version+18.asserts.tar.gz/md5/0c0da0eccec4a092fc0e9a915716ed6f +LLVM.v18.1.7+3.x86_64-linux-musl-cxx11-llvm_version+18.asserts.tar.gz/sha512/e538e29c4d52d9aaf151670619702541fed8231ae4c7fb9431a425d10eea95433087034a37da8fe468bd27a1c882f6f8eb9549ef71964124db10e99f4b402ba5 +LLVM.v18.1.7+3.x86_64-linux-musl-cxx11-llvm_version+18.tar.gz/md5/6b4fd19277c978306441da3b58ab86a1 +LLVM.v18.1.7+3.x86_64-linux-musl-cxx11-llvm_version+18.tar.gz/sha512/6216b3e1dc6aea979d8b5abc4cc0faf510e4e64441b1d18b4b36c45d65e874e9046e14eea67efb88f3219449ef048d34fcb751b15c59f8a299aa822b426d50ae +LLVM.v18.1.7+3.x86_64-unknown-freebsd-llvm_version+18.asserts.tar.gz/md5/689ce55ca1eb1be8090a7dad2e5f1a86 +LLVM.v18.1.7+3.x86_64-unknown-freebsd-llvm_version+18.asserts.tar.gz/sha512/a2ebd80e71375abafdaa45d4d104c1822d2205bd680b8c8541aa90dbc54d530e348a64a18acfba14cb66c078f0386d54375bf26cddef935a348e874b99609312 +LLVM.v18.1.7+3.x86_64-unknown-freebsd-llvm_version+18.tar.gz/md5/dbb26e6bd19d71607248446d38ea0a42 +LLVM.v18.1.7+3.x86_64-unknown-freebsd-llvm_version+18.tar.gz/sha512/eecaafa95e1df14f57f93e44732a23b1fb734af73bb533c8b4662dd0ddcfe696271571b97e2a5346581c000336f9fa0b28bf1c92535490e5174649a7e01b6019 +LLVM.v18.1.7+3.x86_64-w64-mingw32-cxx03-llvm_version+18.asserts.tar.gz/md5/51981c5aac875046101670896de92c2d +LLVM.v18.1.7+3.x86_64-w64-mingw32-cxx03-llvm_version+18.asserts.tar.gz/sha512/466da0868068d27dfa8284a3431925c9cfed9314f681bbadd0c331ae67a1acb975015a739abfea239e7f93a2fd7d439601f5d8421d7fa4fcceec5730649686a7 +LLVM.v18.1.7+3.x86_64-w64-mingw32-cxx03-llvm_version+18.tar.gz/md5/65da06ac7ef16d3e3ea6137cb9a943f4 +LLVM.v18.1.7+3.x86_64-w64-mingw32-cxx03-llvm_version+18.tar.gz/sha512/6c70bcd54d1cbe502b7d9db50a59a62a8a10e4e90d7d607d61ed7737a70474aba2db5f5151b1dc03f965a84d8770d4be6f248ed1f4bc6c9e63298abecb936f1e +LLVM.v18.1.7+3.x86_64-w64-mingw32-cxx11-llvm_version+18.asserts.tar.gz/md5/0a4cefbd15c37cb418cfaac56b789146 +LLVM.v18.1.7+3.x86_64-w64-mingw32-cxx11-llvm_version+18.asserts.tar.gz/sha512/7fd5c69bfde6264ae4e548ec9c399dd09b1a5fe4b9cced23d6bc4257f0f67874b838d53ee8d6eef7fc01ee9d086758e06f00bb0a0388b97de2eb85143a47192a +LLVM.v18.1.7+3.x86_64-w64-mingw32-cxx11-llvm_version+18.tar.gz/md5/da2430483844823d31bcc5f302252ac2 +LLVM.v18.1.7+3.x86_64-w64-mingw32-cxx11-llvm_version+18.tar.gz/sha512/19e9168b44d40acdc0d924e16f93c315237207a4441ae78997c511135872e557f654236bc859453069671145e81e961ac93c9dfa601d1b6631b9ccfa09b929b3 +LLVMLibUnwind.v14.0.6+0.aarch64-apple-darwin.tar.gz/md5/d8584e0e3dc26ea7404d3719cea9e233 +LLVMLibUnwind.v14.0.6+0.aarch64-apple-darwin.tar.gz/sha512/7a0396eaace91b9b4d013c209605d468a7ff9b99ede9fdd57602539a6fa6f3ea84a440f32840056a1234df3ef1896739ea0820fee72b4f208096c553fc54adb9 +LLVMLibUnwind.v14.0.6+0.aarch64-linux-gnu.tar.gz/md5/d6edea561b61173d05aa79936e49f6b7 +LLVMLibUnwind.v14.0.6+0.aarch64-linux-gnu.tar.gz/sha512/9fbe29ec6a33c719bc9a4dd19911ceded9622269c042192d339a6cf45aa8209ad64c424167c094ca01293438af5930f091acba0538b3fe640a746297f5cc8cb3 +LLVMLibUnwind.v14.0.6+0.aarch64-linux-musl.tar.gz/md5/3ec68c87e4bddd024ee0ca6adc2b3b96 +LLVMLibUnwind.v14.0.6+0.aarch64-linux-musl.tar.gz/sha512/be3cd9d5510c2693dee1494c36c479d32311ff83f5b2d31c08508a3dd370788961ce46e9025afe148a0febd05942fd294370a357dd717bee353d8a108617f6de +LLVMLibUnwind.v14.0.6+0.armv6l-linux-gnueabihf.tar.gz/md5/8ca5a926d69124225d485d679232a54f +LLVMLibUnwind.v14.0.6+0.armv6l-linux-gnueabihf.tar.gz/sha512/353f540b342bc54877e7a41fe65c9eeac525fd91bf4cddbe1b3ec2ed93c3751beaf8316a4d31530502b067100b160301262e10cbe4407db3abf1ceb5d9a74eb2 +LLVMLibUnwind.v14.0.6+0.armv6l-linux-musleabihf.tar.gz/md5/4e5b576958f2a2e708eb5918ceef0de0 +LLVMLibUnwind.v14.0.6+0.armv6l-linux-musleabihf.tar.gz/sha512/2e98c472d3ee25c2e062efa4eb21ac9cfc49b26ea9d99ad4a8e7660c4c09f121d31193bd161f54ea332ce94785d601897311e9e6668adb1e25e2b666e0d5bb3f +LLVMLibUnwind.v14.0.6+0.armv7l-linux-gnueabihf.tar.gz/md5/1c81a886e799663ce8d04400c5b516a9 +LLVMLibUnwind.v14.0.6+0.armv7l-linux-gnueabihf.tar.gz/sha512/236b78b9a17eaae74ab07349ac8dde16c3abbd48e0d075abd1c195d60efff48e2fbf799554df114ea3d3dba937e0369430a2788bde2a1201126e026ef6cdac42 +LLVMLibUnwind.v14.0.6+0.armv7l-linux-musleabihf.tar.gz/md5/0371f43ebcb571d0a635739252b88986 +LLVMLibUnwind.v14.0.6+0.armv7l-linux-musleabihf.tar.gz/sha512/605318ae3737e26ff89d6291311a7db3bc3ec7c8d1f2e72ae40fd3d9df0754ee2ebfb77687122605f26d76d62effb85157bc39982814920d5af46c124e71a5ff +LLVMLibUnwind.v14.0.6+0.i686-linux-gnu.tar.gz/md5/cd3f1cdf404b6102754ced4bd3a890f6 +LLVMLibUnwind.v14.0.6+0.i686-linux-gnu.tar.gz/sha512/65fe2c5b1e04da1e1d8111a0b0083fa0fa9447eaea7af7a018c09fe6d5506566c491bbad296a7be8c488ca3495016ae16a6879d69f057f8866d94910147dee03 +LLVMLibUnwind.v14.0.6+0.i686-linux-musl.tar.gz/md5/abac9b416d2ba5abcf5ce849f43ffa96 +LLVMLibUnwind.v14.0.6+0.i686-linux-musl.tar.gz/sha512/fed677ed6f103c56eb9dd4578fa37a56ed2a4bc803aa1997c5af19762a623d2f82db1f72f429448d66fcef3b37af2104e6cb782f023aaabef086a921a862b042 +LLVMLibUnwind.v14.0.6+0.i686-w64-mingw32.tar.gz/md5/4c71ffd7c8cabb1c0ed6290b193883c5 +LLVMLibUnwind.v14.0.6+0.i686-w64-mingw32.tar.gz/sha512/6b1421a3268170467225112167cdb33fec962181993a2dad5594d4ee0623ac88ee0588cdc7d0656dc1cb9129ef96f621a97a224731cd161134d7d63c8fd32c16 +LLVMLibUnwind.v14.0.6+0.powerpc64le-linux-gnu.tar.gz/md5/06faf505f0dc354afcd01113cfc57af2 +LLVMLibUnwind.v14.0.6+0.powerpc64le-linux-gnu.tar.gz/sha512/1f9dfbd403e2ce121e126c217baede178cb1323012bb5e3cd1f778ff51e4216aed9dd69036e2baffbd60a6f5ae438ddaba6c13809459e94bb00be3f7bfc8c30e +LLVMLibUnwind.v14.0.6+0.x86_64-apple-darwin.tar.gz/md5/516a11d99306e3f214968a7951b07a06 +LLVMLibUnwind.v14.0.6+0.x86_64-apple-darwin.tar.gz/sha512/885738599bbd96f20083f9b9368ce3f243bd5868d3ac9a45189de6cb40b6664a6dcdaece159989e504670231db8c2addfa8d544003eb0cdabba960e4ab6a4470 +LLVMLibUnwind.v14.0.6+0.x86_64-linux-gnu.tar.gz/md5/d851b90ea3f9664774316169fc494e21 +LLVMLibUnwind.v14.0.6+0.x86_64-linux-gnu.tar.gz/sha512/a1f529454f0881baaa508481ba97ecffb040fa92141b4cbc72278adcf8b84f0766fa918aea7fb99ce690c4fd80c36fec365987625db42f4e7bb36ad24ce177d0 +LLVMLibUnwind.v14.0.6+0.x86_64-linux-musl.tar.gz/md5/dc4e86eb2effe1f6cb0d0ceda635f226 +LLVMLibUnwind.v14.0.6+0.x86_64-linux-musl.tar.gz/sha512/c52de384853890f9df81aa9e422c1ba3fde12b2ae9c7b60b9ecdc6d0c88eab495dd336af2b6cd2c31d6eddcd0a213954eadbc7884bc39ce2039cec672eac32fe +LLVMLibUnwind.v14.0.6+0.x86_64-unknown-freebsd.tar.gz/md5/8477e3624c73a820d8ab82a53e1e10fa +LLVMLibUnwind.v14.0.6+0.x86_64-unknown-freebsd.tar.gz/sha512/32ce031245a5b59a779cd77fa3c9bf05ee59e48c913b75d4964bea49f37da232c59a42ad993f7b5edc88322148c1d7394984349682bfce3b69d33a51756ac8e3 +LLVMLibUnwind.v14.0.6+0.x86_64-w64-mingw32.tar.gz/md5/7be93eccbdb0aff427c43af651073d66 +LLVMLibUnwind.v14.0.6+0.x86_64-w64-mingw32.tar.gz/sha512/89a61a81ec664c72107ac09e717200b00434350bf77064267180bc0c101a59e0ee8c8af4dd6fe75eacdeb14e82743c138b2fc558ca08550d8796b8db93f89da4 +libLLVM.v18.1.7+3.aarch64-apple-darwin-llvm_version+18.asserts.tar.gz/md5/f7ce9539d0802dd4b5e5e673d36d1a99 +libLLVM.v18.1.7+3.aarch64-apple-darwin-llvm_version+18.asserts.tar.gz/sha512/7a54be16ccc327731c802380d29f2c9ee5e635cd6af0b7eb6b69e9d3b0b4fecb74147359af182def3b016ec4445891bdb91eb0d541b783e451e8263968c25161 +libLLVM.v18.1.7+3.aarch64-apple-darwin-llvm_version+18.tar.gz/md5/cd946ab46745ce71ad7438cf0f30cfd0 +libLLVM.v18.1.7+3.aarch64-apple-darwin-llvm_version+18.tar.gz/sha512/15f8bcdf6f66e654d5d6e950392ced62586e2bf7c2b0845db78282669c5440c2140432950c7726fcc8910c7113685cc29ac880de565f85b77536d63dbab0a8b5 +libLLVM.v18.1.7+3.aarch64-linux-gnu-cxx03-llvm_version+18.asserts.tar.gz/md5/766a2de98d275877bb676ff1f23e972f +libLLVM.v18.1.7+3.aarch64-linux-gnu-cxx03-llvm_version+18.asserts.tar.gz/sha512/3b353ea038fafefc13ccb4a81c7242d569c206362605be374fb312cb495f385796d052c3a7e08c7fe6ecaa3018e2a7e3dfa43d71a8c3a94987f7dc7aa378fd22 +libLLVM.v18.1.7+3.aarch64-linux-gnu-cxx03-llvm_version+18.tar.gz/md5/0684a6b210b799a8a0f45a286f3dfcc5 +libLLVM.v18.1.7+3.aarch64-linux-gnu-cxx03-llvm_version+18.tar.gz/sha512/4221e2d74117bd7e89aba2945030c1507e51999b236814fd23036565364c328392e87032daf1b9fe274ed89fcf9a6dcd203f0f1c8602c2a08d3fcfa189a5fefe +libLLVM.v18.1.7+3.aarch64-linux-gnu-cxx11-llvm_version+18.asserts.tar.gz/md5/6b460256e923637e5107d67859eb60ba +libLLVM.v18.1.7+3.aarch64-linux-gnu-cxx11-llvm_version+18.asserts.tar.gz/sha512/7d3f2736afe4022842529b1355cf9914b7a1c7b1e261f814a4523ad30a0cf0189056d5117a06720bbb7a844a435bb632ddbda2daadbf7e01c0120452cd13e6a3 +libLLVM.v18.1.7+3.aarch64-linux-gnu-cxx11-llvm_version+18.tar.gz/md5/c2b13a6a296adbb4be91dd3bb5be0877 +libLLVM.v18.1.7+3.aarch64-linux-gnu-cxx11-llvm_version+18.tar.gz/sha512/9086937e718125afd535b0066ee08a3523161a94fa7ef3c9a3e86bfe760f251b6ea7b035888e61a0e7f192ed25c9bd0f4dc153df86e08569e7067a7a30ba48c5 +libLLVM.v18.1.7+3.aarch64-linux-musl-cxx03-llvm_version+18.asserts.tar.gz/md5/758d33fe0b2b3d0371708614365450e8 +libLLVM.v18.1.7+3.aarch64-linux-musl-cxx03-llvm_version+18.asserts.tar.gz/sha512/79a662f72ba1b89b373d1d143ee880a12cb128211e79182e7befe8b3e50298b594de2ce489ca8bcdeadb17fceee811622f8bfcbc3e232cefdaf9927177469eec +libLLVM.v18.1.7+3.aarch64-linux-musl-cxx03-llvm_version+18.tar.gz/md5/2dcbb811be8985bfed3c8b37733c0d40 +libLLVM.v18.1.7+3.aarch64-linux-musl-cxx03-llvm_version+18.tar.gz/sha512/17f6fbd96ed5029f360c101cedad127881e14b42498d66f717448d99ca1909057ae79169d934e08157edcc7467db4b3941bdda26a2e9f42645963eec51f27e29 +libLLVM.v18.1.7+3.aarch64-linux-musl-cxx11-llvm_version+18.asserts.tar.gz/md5/bd3b904b5f9464aaaf87c41b899c8ca5 +libLLVM.v18.1.7+3.aarch64-linux-musl-cxx11-llvm_version+18.asserts.tar.gz/sha512/fa99e8025419a18f548f658ea589771c2803480c3cb3a25cfb75e26ed0993b7b37bba204d7cba1475319a71159813b2b58a3b3327ba24d264cf80ef24263628d +libLLVM.v18.1.7+3.aarch64-linux-musl-cxx11-llvm_version+18.tar.gz/md5/b4f9038d5c3c13207111ee1a9a918cba +libLLVM.v18.1.7+3.aarch64-linux-musl-cxx11-llvm_version+18.tar.gz/sha512/e8b97bee30f597cc06d31175e12f0c2035aef0054e8abdb431f31b1e9d440d561bd9bc6637a403441aa7f3e1d2a46c600734e17e3b7ed0ae899c92df91758780 +libLLVM.v18.1.7+3.aarch64-unknown-freebsd-llvm_version+18.asserts.tar.gz/md5/1f59987d027a3bc930fca6bef917f739 +libLLVM.v18.1.7+3.aarch64-unknown-freebsd-llvm_version+18.asserts.tar.gz/sha512/7bd0532e11abf1c4979e59d513257d53ea940f15c08d2fa30dc16e59e11d1899dcd2abe4a35dd3c7719aa49aacfa1b0e49049df3548336e5ec64355319129b30 +libLLVM.v18.1.7+3.aarch64-unknown-freebsd-llvm_version+18.tar.gz/md5/e4ff6f08094846700acc4e55d5b79e93 +libLLVM.v18.1.7+3.aarch64-unknown-freebsd-llvm_version+18.tar.gz/sha512/8a575e9640e5ff9b75ef4e970f203139e51afbcbf1b82c774fbe4a0176c22c51029533c188fb89068c1714eb3c8b1b232804f276a68c0c40aa0a6611ae72d1ce +libLLVM.v18.1.7+3.armv6l-linux-gnueabihf-cxx03-llvm_version+18.asserts.tar.gz/md5/06d8e634b4a6914efc18b7962df52021 +libLLVM.v18.1.7+3.armv6l-linux-gnueabihf-cxx03-llvm_version+18.asserts.tar.gz/sha512/cf6aeed1eaf652e5830e34dd2ba88abc33668953281146106bbfdbc92f5f225645f00ff5b4a0eb902baf904362ab4eb32192fa50ee5b2672e8b031fe2550f9a8 +libLLVM.v18.1.7+3.armv6l-linux-gnueabihf-cxx03-llvm_version+18.tar.gz/md5/53e83804b63e6ae4d0f1c97abcbbd1c8 +libLLVM.v18.1.7+3.armv6l-linux-gnueabihf-cxx03-llvm_version+18.tar.gz/sha512/45b3ee9b105ef2ef106fa8ac7b8e902cd1d6bf3c9bfb57edeca9e14f1654714d23fb086b369a9fd3cbb828c04fee4cfe80d2b2a2bfaa852d3ac65c0d213d8c62 +libLLVM.v18.1.7+3.armv6l-linux-gnueabihf-cxx11-llvm_version+18.asserts.tar.gz/md5/91b6cf00564053d385e30b34e5b8778e +libLLVM.v18.1.7+3.armv6l-linux-gnueabihf-cxx11-llvm_version+18.asserts.tar.gz/sha512/9111f3f02b49bf78340c9b0c5c1325a1ca09b62c83aefece1121573dcc21dce095060351f18997971e5cfbaab346cb12c75cdc0fbe8fa92aca2e8a68b5f5f577 +libLLVM.v18.1.7+3.armv6l-linux-gnueabihf-cxx11-llvm_version+18.tar.gz/md5/f6c91b71dfd73c7301a4e3de48e072de +libLLVM.v18.1.7+3.armv6l-linux-gnueabihf-cxx11-llvm_version+18.tar.gz/sha512/581d7e1e4d85aeaf082fa31555074471705e391de0771bf66665807afb5192c79c481ca30e73a25f4e2d48d4d325f0198e39bcbfaed2c9bc7477ee917667f5ce +libLLVM.v18.1.7+3.armv6l-linux-musleabihf-cxx03-llvm_version+18.asserts.tar.gz/md5/ce41ee46959e5e3a17b6c99293afedb7 +libLLVM.v18.1.7+3.armv6l-linux-musleabihf-cxx03-llvm_version+18.asserts.tar.gz/sha512/73d8c5af750ea9deef822aec58d8697243ca154bc4435ac0b0ab8c90fc97750e91fa55f8de7b8283eb1ab19951cda3e3c4c60834bcf13730163e593126a8eb57 +libLLVM.v18.1.7+3.armv6l-linux-musleabihf-cxx03-llvm_version+18.tar.gz/md5/67ed5b654852dad400aef17fb542703f +libLLVM.v18.1.7+3.armv6l-linux-musleabihf-cxx03-llvm_version+18.tar.gz/sha512/07f70c57e27eea37f520f6f0a954b54d2506530d5eb5a74e5a8526ba8ef55a948073c49037544b602d03d0aa482704292eac943f0a83421386ccbfbf22ee8510 +libLLVM.v18.1.7+3.armv6l-linux-musleabihf-cxx11-llvm_version+18.asserts.tar.gz/md5/5b8bd88d49ce21e5b63af6f77782eed4 +libLLVM.v18.1.7+3.armv6l-linux-musleabihf-cxx11-llvm_version+18.asserts.tar.gz/sha512/cef1c561ae388b2baa08e39dc195989cb795d8a2747f5f11e0dc9d9e107b9e99dbba465335376beff2e1b326512f6afc962775e0b246f3edcfadf509235cabd8 +libLLVM.v18.1.7+3.armv6l-linux-musleabihf-cxx11-llvm_version+18.tar.gz/md5/5fbf26d20b2ce3f61edc9a9ca2eb5284 +libLLVM.v18.1.7+3.armv6l-linux-musleabihf-cxx11-llvm_version+18.tar.gz/sha512/2c564c95d648458b9a0f0c963246cf5564c625107682f680390b6db5fde0e2b15a964fd3fd23734b5b2bb135db1fc698812d61b3f275710593f4defaee4a9c23 +libLLVM.v18.1.7+3.armv7l-linux-gnueabihf-cxx03-llvm_version+18.asserts.tar.gz/md5/c81bc29a75acf4f806f3eb13bf890604 +libLLVM.v18.1.7+3.armv7l-linux-gnueabihf-cxx03-llvm_version+18.asserts.tar.gz/sha512/c8c922a0a4fefd549f1c2ba396a3cab9cf7738aa82e7ccf7ca29c090260e2d73ec45d6f2b07173d584f6074b10fa04052114deef6ecb6f53ea87f1924074137a +libLLVM.v18.1.7+3.armv7l-linux-gnueabihf-cxx03-llvm_version+18.tar.gz/md5/1fcb40ba1a427105b4e7d13a6c11dc78 +libLLVM.v18.1.7+3.armv7l-linux-gnueabihf-cxx03-llvm_version+18.tar.gz/sha512/392c9ee85ba7ab6697bb8979c7f443d1d25f7ac9178e96a886401cfc68d75a43ce98bf3038a7ba70a9a990f65e604d38e043472cec3badb25fbd1b38cfbb7162 +libLLVM.v18.1.7+3.armv7l-linux-gnueabihf-cxx11-llvm_version+18.asserts.tar.gz/md5/427a19eaf69725d11bb33f48de9cb205 +libLLVM.v18.1.7+3.armv7l-linux-gnueabihf-cxx11-llvm_version+18.asserts.tar.gz/sha512/542e209b10c13d8dca867247a7414f84adb832f40051fcbdf0dcb09bc9664a77248e1b0ea1687805847dd9f5a05b86475dd76aba427c9a1bc83f8502444c60bd +libLLVM.v18.1.7+3.armv7l-linux-gnueabihf-cxx11-llvm_version+18.tar.gz/md5/ab34bfa2950014936edd13a7b5db8170 +libLLVM.v18.1.7+3.armv7l-linux-gnueabihf-cxx11-llvm_version+18.tar.gz/sha512/6376b25d0278e5c97581480fb4d54371b09a08be88f4cc39d2c7b3875f1189cef60c1be6bea5e12b0cf306cef8b394bc7d00f8b0fd95d749bd1b4eb318af7e15 +libLLVM.v18.1.7+3.armv7l-linux-musleabihf-cxx03-llvm_version+18.asserts.tar.gz/md5/cb6300fe87fd7cb9840f3bc44af26878 +libLLVM.v18.1.7+3.armv7l-linux-musleabihf-cxx03-llvm_version+18.asserts.tar.gz/sha512/a7984cd90fef55559142fc05d91b0da1f37f77f25214e93ff7641b7c3958f08dc7c082611915dbfda4bbbaa392656ac8604d4f75369777dacfb78baee2f99b16 +libLLVM.v18.1.7+3.armv7l-linux-musleabihf-cxx03-llvm_version+18.tar.gz/md5/b8a4e8ef43340e9cbdf5e4479c6a5a56 +libLLVM.v18.1.7+3.armv7l-linux-musleabihf-cxx03-llvm_version+18.tar.gz/sha512/fc249f2b666c8a8129e05ea08c773cbeb7af6d37791f271461eedd99adcfc5082e8609ed096d8a46edd1e73505352712a41e0ddc247a371f78227aab01fbe0f3 +libLLVM.v18.1.7+3.armv7l-linux-musleabihf-cxx11-llvm_version+18.asserts.tar.gz/md5/5864689df3298be4b1b4df1ae0412d3a +libLLVM.v18.1.7+3.armv7l-linux-musleabihf-cxx11-llvm_version+18.asserts.tar.gz/sha512/8f32f73e366c3a6993fa8d6b8cd1a9391611b0644cd4a77a4f7a235c037fdb75308d99b5a23ada6e4a73ed5fbd8f929a981d6bf317d79d52396220c221619303 +libLLVM.v18.1.7+3.armv7l-linux-musleabihf-cxx11-llvm_version+18.tar.gz/md5/6bf798476c4e94716cc47a95580104ad +libLLVM.v18.1.7+3.armv7l-linux-musleabihf-cxx11-llvm_version+18.tar.gz/sha512/9dbd27a000dd3c3dda9047d366a667c4b179cc61582525adb0f8227e8055413ce46efcbc1530305400239656e2f1016fb8833fb7f4734714078e035d388f3531 +libLLVM.v18.1.7+3.i686-linux-gnu-cxx03-llvm_version+18.asserts.tar.gz/md5/66e2889f86ae6bc1977419e6d9be729e +libLLVM.v18.1.7+3.i686-linux-gnu-cxx03-llvm_version+18.asserts.tar.gz/sha512/d0cac798c4979b4d818d36596b173e523cba3f41ff7ab1e2111f6a75c3e819e563e207a547328f005c5a93c7f8f88c17bf43c1139b5c2690df4f1d719f82920a +libLLVM.v18.1.7+3.i686-linux-gnu-cxx03-llvm_version+18.tar.gz/md5/0534b72d6d33c8573f79dce8a2a5a6e6 +libLLVM.v18.1.7+3.i686-linux-gnu-cxx03-llvm_version+18.tar.gz/sha512/6beaf1b45eec8b46fbf92f692f53e6df40bf48e50589aeb5ef99240a5a3ec9089ffb350dda6df24530937d613bf6d2cc4da76e92921ea00def9d2d38ac5bbeba +libLLVM.v18.1.7+3.i686-linux-gnu-cxx11-llvm_version+18.asserts.tar.gz/md5/2cf9a1ca20472179ce4a9eb3a949457b +libLLVM.v18.1.7+3.i686-linux-gnu-cxx11-llvm_version+18.asserts.tar.gz/sha512/cebae06ccee12a14d20d3056ce0519b1e774e3c9d9200a783262fcc40aee6d7aabfb08714bf53b88e03d8b09a96d3cda248a70c16188f8c707b291642998262a +libLLVM.v18.1.7+3.i686-linux-gnu-cxx11-llvm_version+18.tar.gz/md5/4712f6a46e0ff407ece958a7701511b9 +libLLVM.v18.1.7+3.i686-linux-gnu-cxx11-llvm_version+18.tar.gz/sha512/9a0a2dfa2076b93027f766277a6890cf94d67c131697f74945e92cf13ae64e84c09d3dd744498986fb22ad5e5465300aa9c8ae6632fcf919a0932515edfcc1e6 +libLLVM.v18.1.7+3.i686-w64-mingw32-cxx03-llvm_version+18.asserts.tar.gz/md5/274c51cc4dc133d7470ef82987b78df6 +libLLVM.v18.1.7+3.i686-w64-mingw32-cxx03-llvm_version+18.asserts.tar.gz/sha512/24944b1fec24bd21f2f773480c7783975b2cce5ef9909f285c959d954669b98ae18a174126440c03de28d1fa9b055f4bd092104dcb29d8c0c07400dd8e4cb493 +libLLVM.v18.1.7+3.i686-w64-mingw32-cxx03-llvm_version+18.tar.gz/md5/8b36d976399e4b603a1c4f8bce1510fc +libLLVM.v18.1.7+3.i686-w64-mingw32-cxx03-llvm_version+18.tar.gz/sha512/4f5a1169cd566898357c98f86786bf86f6f1d9282327f8026c7d04359fa7148f4026ef2de765debfb45d4013368cbf420e78802289ceea253a9ed2f58e89db8a +libLLVM.v18.1.7+3.i686-w64-mingw32-cxx11-llvm_version+18.asserts.tar.gz/md5/121a0c243591d8295fd3063821569e01 +libLLVM.v18.1.7+3.i686-w64-mingw32-cxx11-llvm_version+18.asserts.tar.gz/sha512/e55fbf36802e7d8547e1aa0f60c650b29cc3dbeaff67e6b6a095e0647d6a8c6f55bc7cf72daaeb6f3d2e87e831b3cb275d8c3b4beea2413de8a1cfbac4771ec0 +libLLVM.v18.1.7+3.i686-w64-mingw32-cxx11-llvm_version+18.tar.gz/md5/7af4fdf2475dcf896750e046edc9fd2c +libLLVM.v18.1.7+3.i686-w64-mingw32-cxx11-llvm_version+18.tar.gz/sha512/e8294e855565109e70d0596402dd8b7886174034242cbc6deb55f481a306c85ed9840732b3cb346c2ed5ce10a3d42647f2d1a97d2e998805089533880a326197 +libLLVM.v18.1.7+3.powerpc64le-linux-gnu-cxx03-llvm_version+18.asserts.tar.gz/md5/bbf060d61b294b86f7e3dde381b00b8a +libLLVM.v18.1.7+3.powerpc64le-linux-gnu-cxx03-llvm_version+18.asserts.tar.gz/sha512/632372d41f6e400a10fae27c6cd06a5a344cfb5902cad7928cb4133f14f36f0a3373e69e73ce9baf52f518340593c3a5a16173ef59a1878e6300e9975aeaa157 +libLLVM.v18.1.7+3.powerpc64le-linux-gnu-cxx03-llvm_version+18.tar.gz/md5/3d730b713e01cdb5a7a5a46028afd41b +libLLVM.v18.1.7+3.powerpc64le-linux-gnu-cxx03-llvm_version+18.tar.gz/sha512/052ab4fa7ac3b2c430601753ab078cdc9fd6db7f65ee0b76bb05473f4c5b99ec8919ad9d347425f1928cf619548e992c86ba97f9994218f50bca617e43d2f0d9 +libLLVM.v18.1.7+3.powerpc64le-linux-gnu-cxx11-llvm_version+18.asserts.tar.gz/md5/bf9dcb92ba8c031ae62ed4434fd5447f +libLLVM.v18.1.7+3.powerpc64le-linux-gnu-cxx11-llvm_version+18.asserts.tar.gz/sha512/e53be14dd02a2cef8eccafb9301d29c51d652c635703529c1444947002993f6639083eb8bef13af21c9796717ce4b3129dcdcbe2751a1173d39e321db8f6e3c7 +libLLVM.v18.1.7+3.powerpc64le-linux-gnu-cxx11-llvm_version+18.tar.gz/md5/b5cab0fc7c6643c6dd161f1e553ef1a0 +libLLVM.v18.1.7+3.powerpc64le-linux-gnu-cxx11-llvm_version+18.tar.gz/sha512/4032634449e2669479761c4323096b152f8df4948e3a97eea10f0b400fbf2a00d1edda59b74a714b62c4e204b113d8ecda78d828c3344ebe8bd750d14b3c4c7d +libLLVM.v18.1.7+3.x86_64-apple-darwin-llvm_version+18.asserts.tar.gz/md5/9f31ae627df95fb4818d8bb96e17c941 +libLLVM.v18.1.7+3.x86_64-apple-darwin-llvm_version+18.asserts.tar.gz/sha512/da67146a80ba3615e5e46455144c5f4a25919e391aadd3d63c9c645b639d68f8883a61e947b767f4583f666e653721c53d5d4098c8af2abd81691f941fdde686 +libLLVM.v18.1.7+3.x86_64-apple-darwin-llvm_version+18.tar.gz/md5/55fc5ae75087cb1ff1f08a1ef65f8b94 +libLLVM.v18.1.7+3.x86_64-apple-darwin-llvm_version+18.tar.gz/sha512/a000c0e349722f6b0196cc9a10aff8040dbe6a679bd79787c96c1de76968df636ab79dc24a31e4da960502858514fd74c3586c37411381d7ca68c5474576f7e0 +libLLVM.v18.1.7+3.x86_64-linux-gnu-cxx03-llvm_version+18.asserts.tar.gz/md5/69564913bae176a167d24d3291ef7af7 +libLLVM.v18.1.7+3.x86_64-linux-gnu-cxx03-llvm_version+18.asserts.tar.gz/sha512/b8eeb86b66d767218e59671bdd597623238eea72319913c2ac5e116faec3f4c13739a24f3b95338ed857ec29e714dc0308e4ddbfe359332b3c27ad5235052342 +libLLVM.v18.1.7+3.x86_64-linux-gnu-cxx03-llvm_version+18.tar.gz/md5/bc9d5637fe30f21d2231a98371e798e4 +libLLVM.v18.1.7+3.x86_64-linux-gnu-cxx03-llvm_version+18.tar.gz/sha512/4efbc2823322abe80d0134d35926767bd9cab717cde9308726a6a8891e5a707476138888c695ed399e3dddb57baf17abbc43a0a338cea2e5c0f472ab427c12e3 +libLLVM.v18.1.7+3.x86_64-linux-gnu-cxx11-llvm_version+18.asserts.tar.gz/md5/8492ff91e6dbd1a66edd8aaf0390a582 +libLLVM.v18.1.7+3.x86_64-linux-gnu-cxx11-llvm_version+18.asserts.tar.gz/sha512/6443bd2fa9c5beecc2b002c26595f2cf3a8e2ea5eb49aa4c00f7252a6623fe0f8c01824941ebe5475460641285c4e56a5203056c1b93a78250b7e48fb5ac9e00 +libLLVM.v18.1.7+3.x86_64-linux-gnu-cxx11-llvm_version+18.tar.gz/md5/6918c9978fd8b5887c66eee76950478d +libLLVM.v18.1.7+3.x86_64-linux-gnu-cxx11-llvm_version+18.tar.gz/sha512/d455a4f433bf3ea1b5100b9d45199bc785e4b6fbc7659bf06cbde6ada471134e7d4243d3a3a1f71d579126ef8371d70e59f174e124b3ff8d4842e9ee83e2dea4 +libLLVM.v18.1.7+3.x86_64-linux-musl-cxx03-llvm_version+18.asserts.tar.gz/md5/075f87d106dd95c8e9c6e7e157b5e9db +libLLVM.v18.1.7+3.x86_64-linux-musl-cxx03-llvm_version+18.asserts.tar.gz/sha512/8132379d8f44a21082c7a90f58a7dffb0c6ee725efd58a959d4023787411b080d72913bb1e89a35072f97aaf1ca512ab1d027b37eaed819e3c053d7a0cf64269 +libLLVM.v18.1.7+3.x86_64-linux-musl-cxx03-llvm_version+18.tar.gz/md5/4cfc2838a77f05883f82e50b3723dcfe +libLLVM.v18.1.7+3.x86_64-linux-musl-cxx03-llvm_version+18.tar.gz/sha512/20079c81cd6a4020b087485be1ab4928b3bd3e1a53728cc98137a35b969484278093bc75a9e51ddfd8331556577c5fb3109d74dc2eccffa93b5390e0fabff2b1 +libLLVM.v18.1.7+3.x86_64-linux-musl-cxx11-llvm_version+18.asserts.tar.gz/md5/5b8cbf00631bd4540b7335a86302a1fe +libLLVM.v18.1.7+3.x86_64-linux-musl-cxx11-llvm_version+18.asserts.tar.gz/sha512/51ba9a4b74b740905cee4baf7f4e5f3620ed81e0746f49cd352d874ebedab95277c5031123f880c9239b7dbf505b10f6531f79c8a6b0482a652b8324f4137cf5 +libLLVM.v18.1.7+3.x86_64-linux-musl-cxx11-llvm_version+18.tar.gz/md5/11010cc2d58b1a8c6a6e7bc24df0c0db +libLLVM.v18.1.7+3.x86_64-linux-musl-cxx11-llvm_version+18.tar.gz/sha512/a6bdd9a2a2fa9a572e74ced69c3ce9d1b84cde18155ec9bc7dfbaba411ee6c43d229e6fb333eff66fb63b632b485b46b7cb1657c0c49d9d9bb849fa13f0bbc7b +libLLVM.v18.1.7+3.x86_64-unknown-freebsd-llvm_version+18.asserts.tar.gz/md5/566390f0f0fa92c4a9a400e25e7086d0 +libLLVM.v18.1.7+3.x86_64-unknown-freebsd-llvm_version+18.asserts.tar.gz/sha512/31981cc3be65117d8dfcb0254dcdecd79b0f141a61864db4e50b81fbe7a1db431b71f9ef43bbeb320e4ae33bb00f2db42d83f849ce6ca5044445cd5de9572566 +libLLVM.v18.1.7+3.x86_64-unknown-freebsd-llvm_version+18.tar.gz/md5/b753aba58a0704da416bb06cd97acdd7 +libLLVM.v18.1.7+3.x86_64-unknown-freebsd-llvm_version+18.tar.gz/sha512/99358ace0ef20138284c3f8b28b46dd431b460d1c92034fc918233a266c9be398eba63d1758a388fb39935123c65f72969e01231e54b27cff771cdabef9171c2 +libLLVM.v18.1.7+3.x86_64-w64-mingw32-cxx03-llvm_version+18.asserts.tar.gz/md5/52cee10b0dd37d9a4487d3762e1902c3 +libLLVM.v18.1.7+3.x86_64-w64-mingw32-cxx03-llvm_version+18.asserts.tar.gz/sha512/c44d305ffcb2939779a071a5a78ca9469654e36c5e4cf3e0e78603c85ec30eae3c8ab2594df19812d51dba7cea565c16a70f514faf30bc43b8f37592f57aa059 +libLLVM.v18.1.7+3.x86_64-w64-mingw32-cxx03-llvm_version+18.tar.gz/md5/eef5f1bc5a0026bf96f33e2254b93711 +libLLVM.v18.1.7+3.x86_64-w64-mingw32-cxx03-llvm_version+18.tar.gz/sha512/df39558259dd59f7b602581e7afdf67e77c854c1192b53b24a5c2d133a4a74b3f44e74682f9f02745ef97a969de92566a7633c46816a031b14cb04006af845de +libLLVM.v18.1.7+3.x86_64-w64-mingw32-cxx11-llvm_version+18.asserts.tar.gz/md5/bbe95b31b958f187d49692d4856d84af +libLLVM.v18.1.7+3.x86_64-w64-mingw32-cxx11-llvm_version+18.asserts.tar.gz/sha512/3035b3b8b1cd1349c893aa47f066a1b8b7610f69ff0c4f2f3325a377818fd8bb12ad5485730be354bc2a9982db405b5954dbda39bc7cff38dc22966a6d86c5d5 +libLLVM.v18.1.7+3.x86_64-w64-mingw32-cxx11-llvm_version+18.tar.gz/md5/0e21a6d22dd45d125d0e98fe8f72e8c7 +libLLVM.v18.1.7+3.x86_64-w64-mingw32-cxx11-llvm_version+18.tar.gz/sha512/efbbad538c6f8b773d7ef1019a9b754e1ce7da59ea5f00f452fa7f7cc93c40f248762eb7f708e3d2fa7f9bdbc0b680d6e6502a07bbca0d4e701b51b0565d625e llvm-julia-18.1.7-2.tar.gz/md5/5c0ae4abc4ce31a86d5d6d4ecabc2683 llvm-julia-18.1.7-2.tar.gz/sha512/b4d1dde929a8670eec1a9b25abe23fbc926a922e61b60ed99b52b440cd07cb026e7f746878292db4cd0cb422d9b87ecc4ee4b2b141f8e9411855d18da51facb9 -llvmunwind-12.0.1.tar.xz/md5/4ec327cee517fdb1f6a20e83748e2c7b -llvmunwind-12.0.1.tar.xz/sha512/847b6ba03010a43f4fdbfdc49bf16d18fd18474d01584712e651b11191814bf7c1cf53475021d9ee447ed78413202b4ed97973d7bdd851d3e49f8d06f55a7af4 +llvm-project-14.0.6.tar.xz/md5/0b3373eded268dc27e2e874872fed4eb +llvm-project-14.0.6.tar.xz/sha512/6fc6eeb60fac698702d1aac495fc0161eb7216a1f8db2020af8fccec5837831f7cc20dc2a169bf4f0b5f520748280b4a86621f3697d622aa58faaa45dbfaad13 diff --git a/deps/checksums/llvmunwind b/deps/checksums/llvmunwind index a90d28717dd85..e69de29bb2d1d 100644 --- a/deps/checksums/llvmunwind +++ b/deps/checksums/llvmunwind @@ -1,32 +0,0 @@ -LLVMLibUnwind.v14.0.6+0.aarch64-apple-darwin.tar.gz/md5/d8584e0e3dc26ea7404d3719cea9e233 -LLVMLibUnwind.v14.0.6+0.aarch64-apple-darwin.tar.gz/sha512/7a0396eaace91b9b4d013c209605d468a7ff9b99ede9fdd57602539a6fa6f3ea84a440f32840056a1234df3ef1896739ea0820fee72b4f208096c553fc54adb9 -LLVMLibUnwind.v14.0.6+0.aarch64-linux-gnu.tar.gz/md5/d6edea561b61173d05aa79936e49f6b7 -LLVMLibUnwind.v14.0.6+0.aarch64-linux-gnu.tar.gz/sha512/9fbe29ec6a33c719bc9a4dd19911ceded9622269c042192d339a6cf45aa8209ad64c424167c094ca01293438af5930f091acba0538b3fe640a746297f5cc8cb3 -LLVMLibUnwind.v14.0.6+0.aarch64-linux-musl.tar.gz/md5/3ec68c87e4bddd024ee0ca6adc2b3b96 -LLVMLibUnwind.v14.0.6+0.aarch64-linux-musl.tar.gz/sha512/be3cd9d5510c2693dee1494c36c479d32311ff83f5b2d31c08508a3dd370788961ce46e9025afe148a0febd05942fd294370a357dd717bee353d8a108617f6de -LLVMLibUnwind.v14.0.6+0.armv6l-linux-gnueabihf.tar.gz/md5/8ca5a926d69124225d485d679232a54f -LLVMLibUnwind.v14.0.6+0.armv6l-linux-gnueabihf.tar.gz/sha512/353f540b342bc54877e7a41fe65c9eeac525fd91bf4cddbe1b3ec2ed93c3751beaf8316a4d31530502b067100b160301262e10cbe4407db3abf1ceb5d9a74eb2 -LLVMLibUnwind.v14.0.6+0.armv6l-linux-musleabihf.tar.gz/md5/4e5b576958f2a2e708eb5918ceef0de0 -LLVMLibUnwind.v14.0.6+0.armv6l-linux-musleabihf.tar.gz/sha512/2e98c472d3ee25c2e062efa4eb21ac9cfc49b26ea9d99ad4a8e7660c4c09f121d31193bd161f54ea332ce94785d601897311e9e6668adb1e25e2b666e0d5bb3f -LLVMLibUnwind.v14.0.6+0.armv7l-linux-gnueabihf.tar.gz/md5/1c81a886e799663ce8d04400c5b516a9 -LLVMLibUnwind.v14.0.6+0.armv7l-linux-gnueabihf.tar.gz/sha512/236b78b9a17eaae74ab07349ac8dde16c3abbd48e0d075abd1c195d60efff48e2fbf799554df114ea3d3dba937e0369430a2788bde2a1201126e026ef6cdac42 -LLVMLibUnwind.v14.0.6+0.armv7l-linux-musleabihf.tar.gz/md5/0371f43ebcb571d0a635739252b88986 -LLVMLibUnwind.v14.0.6+0.armv7l-linux-musleabihf.tar.gz/sha512/605318ae3737e26ff89d6291311a7db3bc3ec7c8d1f2e72ae40fd3d9df0754ee2ebfb77687122605f26d76d62effb85157bc39982814920d5af46c124e71a5ff -LLVMLibUnwind.v14.0.6+0.i686-linux-gnu.tar.gz/md5/cd3f1cdf404b6102754ced4bd3a890f6 -LLVMLibUnwind.v14.0.6+0.i686-linux-gnu.tar.gz/sha512/65fe2c5b1e04da1e1d8111a0b0083fa0fa9447eaea7af7a018c09fe6d5506566c491bbad296a7be8c488ca3495016ae16a6879d69f057f8866d94910147dee03 -LLVMLibUnwind.v14.0.6+0.i686-linux-musl.tar.gz/md5/abac9b416d2ba5abcf5ce849f43ffa96 -LLVMLibUnwind.v14.0.6+0.i686-linux-musl.tar.gz/sha512/fed677ed6f103c56eb9dd4578fa37a56ed2a4bc803aa1997c5af19762a623d2f82db1f72f429448d66fcef3b37af2104e6cb782f023aaabef086a921a862b042 -LLVMLibUnwind.v14.0.6+0.i686-w64-mingw32.tar.gz/md5/4c71ffd7c8cabb1c0ed6290b193883c5 -LLVMLibUnwind.v14.0.6+0.i686-w64-mingw32.tar.gz/sha512/6b1421a3268170467225112167cdb33fec962181993a2dad5594d4ee0623ac88ee0588cdc7d0656dc1cb9129ef96f621a97a224731cd161134d7d63c8fd32c16 -LLVMLibUnwind.v14.0.6+0.powerpc64le-linux-gnu.tar.gz/md5/06faf505f0dc354afcd01113cfc57af2 -LLVMLibUnwind.v14.0.6+0.powerpc64le-linux-gnu.tar.gz/sha512/1f9dfbd403e2ce121e126c217baede178cb1323012bb5e3cd1f778ff51e4216aed9dd69036e2baffbd60a6f5ae438ddaba6c13809459e94bb00be3f7bfc8c30e -LLVMLibUnwind.v14.0.6+0.x86_64-apple-darwin.tar.gz/md5/516a11d99306e3f214968a7951b07a06 -LLVMLibUnwind.v14.0.6+0.x86_64-apple-darwin.tar.gz/sha512/885738599bbd96f20083f9b9368ce3f243bd5868d3ac9a45189de6cb40b6664a6dcdaece159989e504670231db8c2addfa8d544003eb0cdabba960e4ab6a4470 -LLVMLibUnwind.v14.0.6+0.x86_64-linux-gnu.tar.gz/md5/d851b90ea3f9664774316169fc494e21 -LLVMLibUnwind.v14.0.6+0.x86_64-linux-gnu.tar.gz/sha512/a1f529454f0881baaa508481ba97ecffb040fa92141b4cbc72278adcf8b84f0766fa918aea7fb99ce690c4fd80c36fec365987625db42f4e7bb36ad24ce177d0 -LLVMLibUnwind.v14.0.6+0.x86_64-linux-musl.tar.gz/md5/dc4e86eb2effe1f6cb0d0ceda635f226 -LLVMLibUnwind.v14.0.6+0.x86_64-linux-musl.tar.gz/sha512/c52de384853890f9df81aa9e422c1ba3fde12b2ae9c7b60b9ecdc6d0c88eab495dd336af2b6cd2c31d6eddcd0a213954eadbc7884bc39ce2039cec672eac32fe -LLVMLibUnwind.v14.0.6+0.x86_64-unknown-freebsd.tar.gz/md5/8477e3624c73a820d8ab82a53e1e10fa -LLVMLibUnwind.v14.0.6+0.x86_64-unknown-freebsd.tar.gz/sha512/32ce031245a5b59a779cd77fa3c9bf05ee59e48c913b75d4964bea49f37da232c59a42ad993f7b5edc88322148c1d7394984349682bfce3b69d33a51756ac8e3 -LLVMLibUnwind.v14.0.6+0.x86_64-w64-mingw32.tar.gz/md5/7be93eccbdb0aff427c43af651073d66 -LLVMLibUnwind.v14.0.6+0.x86_64-w64-mingw32.tar.gz/sha512/89a61a81ec664c72107ac09e717200b00434350bf77064267180bc0c101a59e0ee8c8af4dd6fe75eacdeb14e82743c138b2fc558ca08550d8796b8db93f89da4 diff --git a/deps/checksums/mbedtls b/deps/checksums/mbedtls index 2db4d7fed384f..e52066b6f4bac 100644 --- a/deps/checksums/mbedtls +++ b/deps/checksums/mbedtls @@ -1,34 +1,36 @@ -MbedTLS.v2.28.6+0.aarch64-apple-darwin.tar.gz/md5/c97705b08c6bf695fa7a11a42167df94 -MbedTLS.v2.28.6+0.aarch64-apple-darwin.tar.gz/sha512/91825c3a495045ca74ceb5a23e3d7e9387701e401911b147d905a49892b1a5a9f22662a4f16a7f4468c5a807f2980b66e3409ea1ff7e04c6fdac0b105472e200 -MbedTLS.v2.28.6+0.aarch64-linux-gnu.tar.gz/md5/8ebaaeefd75c805227229086c262d0e7 -MbedTLS.v2.28.6+0.aarch64-linux-gnu.tar.gz/sha512/89983c1f9f9d7b901619522afcd12c6bc1996757edeb9f3012954992f82f3b36ae50f49dcf7731623fca197946e4281eecffdc29a5819f04e7f6203afd4eb93a -MbedTLS.v2.28.6+0.aarch64-linux-musl.tar.gz/md5/b40b2ba247f4ff755e15daad13c5a255 -MbedTLS.v2.28.6+0.aarch64-linux-musl.tar.gz/sha512/4cb4f2213b631dda0caa8baafa8effc9c8592c72a6a5b826fce060cd81f8f77c188c9ddc76595b47078db3c35b3043d9bf0cb891d822a940df87982de56dec44 -MbedTLS.v2.28.6+0.armv6l-linux-gnueabihf.tar.gz/md5/c6dd1cb1aba1075c73c41719a03c5ab5 -MbedTLS.v2.28.6+0.armv6l-linux-gnueabihf.tar.gz/sha512/981a8925dd90418150625e9467cc791e4a9d5223e7df6ead113ec41a279a5dd7e8ebcecb5b87611ef451fc6483fd6eb5bf984cf528037ad742e68b4be94e5c07 -MbedTLS.v2.28.6+0.armv6l-linux-musleabihf.tar.gz/md5/c30ed777bd74d269656f7e9bc8163765 -MbedTLS.v2.28.6+0.armv6l-linux-musleabihf.tar.gz/sha512/f04014181082561195caa4d3b178480bb5cce7f459d76aca8cdaa2f615d105b24871656ce4cbf8d9ec33f0424de35a16f12d4964a1f0fab9a416e5d18a468c94 -MbedTLS.v2.28.6+0.armv7l-linux-gnueabihf.tar.gz/md5/256f8327773ea2d0d6b4649541c34e84 -MbedTLS.v2.28.6+0.armv7l-linux-gnueabihf.tar.gz/sha512/ab4c9e82752386a0fd642a709bc90b712d6aaff78309968f1fdbf1121a790a9c0227ddd8e79373359cea9c75b21e762f600abea42036609571ba999531b50852 -MbedTLS.v2.28.6+0.armv7l-linux-musleabihf.tar.gz/md5/249ada3e9a7ad4eba08270e575ae68ec -MbedTLS.v2.28.6+0.armv7l-linux-musleabihf.tar.gz/sha512/0682e65f4257c3d237ba8cfc643be4430341888ec4cd17c2dc3018350aa7ff176e834a69ebc9d240b383a7aed439b34e45c237310ad66043956700b782323793 -MbedTLS.v2.28.6+0.i686-linux-gnu.tar.gz/md5/d0a176d2843ac780884395c90971bf68 -MbedTLS.v2.28.6+0.i686-linux-gnu.tar.gz/sha512/c2f96f314d0e5d9bffe46dc7d0adceb038db81e8c9d9a3c0fb0a237849d0d568d249e2df6c275d27a74a9122d0a53b38e5d8521807669a9c82bd67befbea169c -MbedTLS.v2.28.6+0.i686-linux-musl.tar.gz/md5/9c7501c6b04df53f8d56cd59dd42ae4c -MbedTLS.v2.28.6+0.i686-linux-musl.tar.gz/sha512/6fd35f9c2e1c5822920bc1d9315dc68b10694ee5507cc512868615c3d35dc389fa67038b9ab79fa86ea7ff6bf5f6f1eed053fafcc519080559057dcaff813ec5 -MbedTLS.v2.28.6+0.i686-w64-mingw32.tar.gz/md5/1eef46b3c89a81973778817a8856673c -MbedTLS.v2.28.6+0.i686-w64-mingw32.tar.gz/sha512/f202595cf971825601d5e12263eef0dd101e9be971d15592a12187f1d170fafaab358f02db89458f495ddc8922f66fbd662123b0d6df527fffa514e9f410784a -MbedTLS.v2.28.6+0.powerpc64le-linux-gnu.tar.gz/md5/fec1779ff02d71d5e94b3f1455453fc0 -MbedTLS.v2.28.6+0.powerpc64le-linux-gnu.tar.gz/sha512/e97ae38c555f6b45e33c023c7e07c982d36501f6c2dc36121bb73f2fb08db3fa3ab7f4ab0d9ecb622d25bfe1816eab3a6190d2034a05a66b7425c36a637623e0 -MbedTLS.v2.28.6+0.x86_64-apple-darwin.tar.gz/md5/6d44a0c126affaedad544460da9415ab -MbedTLS.v2.28.6+0.x86_64-apple-darwin.tar.gz/sha512/bf074429f32f51d954bc0c242fb4455ec6ead0e8337a3e5ab9e5b0df47d8a195947a488169f743db63d70b245be80084cd0d78f2211b6cd4b9524010b2c893cc -MbedTLS.v2.28.6+0.x86_64-linux-gnu.tar.gz/md5/95641af7a92c8c83d82264dd2275692c -MbedTLS.v2.28.6+0.x86_64-linux-gnu.tar.gz/sha512/3606ecd5a566e643cc03959a3eac9a45cb4c644006ee5820b852dfc22d40b85d75f5c018c46776954d92001986ecb49238058ca3d99340f9a689875b690aa6e7 -MbedTLS.v2.28.6+0.x86_64-linux-musl.tar.gz/md5/aee58ac107ca0d9e1eb5d7de8146ec8d -MbedTLS.v2.28.6+0.x86_64-linux-musl.tar.gz/sha512/86219aa5ba3280da39e91beded7455160c1ebc274c3158b9f0703a2c034756a9a9e51e5354d22ce983fcd026157d81f471446e6ee2743cae2663384e3e796176 -MbedTLS.v2.28.6+0.x86_64-unknown-freebsd.tar.gz/md5/67857ac031b10fb6a0620b453477653b -MbedTLS.v2.28.6+0.x86_64-unknown-freebsd.tar.gz/sha512/118f3c662580c88d092610be08b60236939c7fd7feab4cd524c7c1e2e2e1b557bddbd603902b697142695889ea6c0a8087982020cd5e7267c9c7c82b49622460 -MbedTLS.v2.28.6+0.x86_64-w64-mingw32.tar.gz/md5/15ebd14ae435b64b2a0006ee7bc21bd4 -MbedTLS.v2.28.6+0.x86_64-w64-mingw32.tar.gz/sha512/7b327ecd405417a3be6ad4ba746656c9b25b70f09985e3e53b07416ab6f271f630eee638c98be938d5cb827c92b5bf656c02865685306389efba2275a1b2113f +MbedTLS.v2.28.6+1.aarch64-apple-darwin.tar.gz/md5/c97705b08c6bf695fa7a11a42167df94 +MbedTLS.v2.28.6+1.aarch64-apple-darwin.tar.gz/sha512/91825c3a495045ca74ceb5a23e3d7e9387701e401911b147d905a49892b1a5a9f22662a4f16a7f4468c5a807f2980b66e3409ea1ff7e04c6fdac0b105472e200 +MbedTLS.v2.28.6+1.aarch64-linux-gnu.tar.gz/md5/8ebaaeefd75c805227229086c262d0e7 +MbedTLS.v2.28.6+1.aarch64-linux-gnu.tar.gz/sha512/89983c1f9f9d7b901619522afcd12c6bc1996757edeb9f3012954992f82f3b36ae50f49dcf7731623fca197946e4281eecffdc29a5819f04e7f6203afd4eb93a +MbedTLS.v2.28.6+1.aarch64-linux-musl.tar.gz/md5/b40b2ba247f4ff755e15daad13c5a255 +MbedTLS.v2.28.6+1.aarch64-linux-musl.tar.gz/sha512/4cb4f2213b631dda0caa8baafa8effc9c8592c72a6a5b826fce060cd81f8f77c188c9ddc76595b47078db3c35b3043d9bf0cb891d822a940df87982de56dec44 +MbedTLS.v2.28.6+1.aarch64-unknown-freebsd.tar.gz/md5/51774d7907dc1a72d7c6e1b6cff02347 +MbedTLS.v2.28.6+1.aarch64-unknown-freebsd.tar.gz/sha512/b85292a75d4ba6fc3996ed497f0951f0dc0a3846e1df83f36b7d3ed3fc30687efdc1742848f6fb5a06e204fa9eb66837c8fbef16e6329f50763086bafef14fb7 +MbedTLS.v2.28.6+1.armv6l-linux-gnueabihf.tar.gz/md5/c6dd1cb1aba1075c73c41719a03c5ab5 +MbedTLS.v2.28.6+1.armv6l-linux-gnueabihf.tar.gz/sha512/981a8925dd90418150625e9467cc791e4a9d5223e7df6ead113ec41a279a5dd7e8ebcecb5b87611ef451fc6483fd6eb5bf984cf528037ad742e68b4be94e5c07 +MbedTLS.v2.28.6+1.armv6l-linux-musleabihf.tar.gz/md5/c30ed777bd74d269656f7e9bc8163765 +MbedTLS.v2.28.6+1.armv6l-linux-musleabihf.tar.gz/sha512/f04014181082561195caa4d3b178480bb5cce7f459d76aca8cdaa2f615d105b24871656ce4cbf8d9ec33f0424de35a16f12d4964a1f0fab9a416e5d18a468c94 +MbedTLS.v2.28.6+1.armv7l-linux-gnueabihf.tar.gz/md5/256f8327773ea2d0d6b4649541c34e84 +MbedTLS.v2.28.6+1.armv7l-linux-gnueabihf.tar.gz/sha512/ab4c9e82752386a0fd642a709bc90b712d6aaff78309968f1fdbf1121a790a9c0227ddd8e79373359cea9c75b21e762f600abea42036609571ba999531b50852 +MbedTLS.v2.28.6+1.armv7l-linux-musleabihf.tar.gz/md5/249ada3e9a7ad4eba08270e575ae68ec +MbedTLS.v2.28.6+1.armv7l-linux-musleabihf.tar.gz/sha512/0682e65f4257c3d237ba8cfc643be4430341888ec4cd17c2dc3018350aa7ff176e834a69ebc9d240b383a7aed439b34e45c237310ad66043956700b782323793 +MbedTLS.v2.28.6+1.i686-linux-gnu.tar.gz/md5/d0a176d2843ac780884395c90971bf68 +MbedTLS.v2.28.6+1.i686-linux-gnu.tar.gz/sha512/c2f96f314d0e5d9bffe46dc7d0adceb038db81e8c9d9a3c0fb0a237849d0d568d249e2df6c275d27a74a9122d0a53b38e5d8521807669a9c82bd67befbea169c +MbedTLS.v2.28.6+1.i686-linux-musl.tar.gz/md5/9c7501c6b04df53f8d56cd59dd42ae4c +MbedTLS.v2.28.6+1.i686-linux-musl.tar.gz/sha512/6fd35f9c2e1c5822920bc1d9315dc68b10694ee5507cc512868615c3d35dc389fa67038b9ab79fa86ea7ff6bf5f6f1eed053fafcc519080559057dcaff813ec5 +MbedTLS.v2.28.6+1.i686-w64-mingw32.tar.gz/md5/1eef46b3c89a81973778817a8856673c +MbedTLS.v2.28.6+1.i686-w64-mingw32.tar.gz/sha512/f202595cf971825601d5e12263eef0dd101e9be971d15592a12187f1d170fafaab358f02db89458f495ddc8922f66fbd662123b0d6df527fffa514e9f410784a +MbedTLS.v2.28.6+1.powerpc64le-linux-gnu.tar.gz/md5/fec1779ff02d71d5e94b3f1455453fc0 +MbedTLS.v2.28.6+1.powerpc64le-linux-gnu.tar.gz/sha512/e97ae38c555f6b45e33c023c7e07c982d36501f6c2dc36121bb73f2fb08db3fa3ab7f4ab0d9ecb622d25bfe1816eab3a6190d2034a05a66b7425c36a637623e0 +MbedTLS.v2.28.6+1.x86_64-apple-darwin.tar.gz/md5/6d44a0c126affaedad544460da9415ab +MbedTLS.v2.28.6+1.x86_64-apple-darwin.tar.gz/sha512/bf074429f32f51d954bc0c242fb4455ec6ead0e8337a3e5ab9e5b0df47d8a195947a488169f743db63d70b245be80084cd0d78f2211b6cd4b9524010b2c893cc +MbedTLS.v2.28.6+1.x86_64-linux-gnu.tar.gz/md5/95641af7a92c8c83d82264dd2275692c +MbedTLS.v2.28.6+1.x86_64-linux-gnu.tar.gz/sha512/3606ecd5a566e643cc03959a3eac9a45cb4c644006ee5820b852dfc22d40b85d75f5c018c46776954d92001986ecb49238058ca3d99340f9a689875b690aa6e7 +MbedTLS.v2.28.6+1.x86_64-linux-musl.tar.gz/md5/aee58ac107ca0d9e1eb5d7de8146ec8d +MbedTLS.v2.28.6+1.x86_64-linux-musl.tar.gz/sha512/86219aa5ba3280da39e91beded7455160c1ebc274c3158b9f0703a2c034756a9a9e51e5354d22ce983fcd026157d81f471446e6ee2743cae2663384e3e796176 +MbedTLS.v2.28.6+1.x86_64-unknown-freebsd.tar.gz/md5/67857ac031b10fb6a0620b453477653b +MbedTLS.v2.28.6+1.x86_64-unknown-freebsd.tar.gz/sha512/118f3c662580c88d092610be08b60236939c7fd7feab4cd524c7c1e2e2e1b557bddbd603902b697142695889ea6c0a8087982020cd5e7267c9c7c82b49622460 +MbedTLS.v2.28.6+1.x86_64-w64-mingw32.tar.gz/md5/1ca2c982712620941c4b0d731251dfff +MbedTLS.v2.28.6+1.x86_64-w64-mingw32.tar.gz/sha512/cef70c00c79e421ce92424bbfda259b4e233d7be3489db1b8cbac7e926d9429be6c88fb806664db60210427748810ea08117066480e8e17c60cb61485b639669 mbedtls-2.28.6.tar.gz/md5/768932cee6c42f7f4751362091ac56d4 mbedtls-2.28.6.tar.gz/sha512/a5c876489bf89908f34626c879f68e8f962d84b50756df17b6b75dfb93e08fe163ed3f32bf70e89bce9080d15257a4cbd2679b743bf8f2e2d7a04606c5811c05 diff --git a/deps/checksums/mpfr b/deps/checksums/mpfr index 050e9cbd8d5a8..4d029986663c9 100644 --- a/deps/checksums/mpfr +++ b/deps/checksums/mpfr @@ -1,34 +1,36 @@ -MPFR.v4.2.1+0.aarch64-apple-darwin.tar.gz/md5/816f9ff59070f21f1df2f310e2606c06 -MPFR.v4.2.1+0.aarch64-apple-darwin.tar.gz/sha512/dad9adba7a8867d1ce26d77efb5c33b602b920a2cdbec84ea58a054cfab3ab7df54d2bda101de72b71604e7844993f1e216b002ba092e69277d0764040216c81 -MPFR.v4.2.1+0.aarch64-linux-gnu.tar.gz/md5/c1e3c9619af6454d8adae9bcbd911dba -MPFR.v4.2.1+0.aarch64-linux-gnu.tar.gz/sha512/5d916492aa73d11e022a7ca3f31940ceb8f8667bdf878ba29d6256736a380a2f6a11ac90cd8de3f1d3454a79165db240a1b971b9794fd21692ed64502ec34b9a -MPFR.v4.2.1+0.aarch64-linux-musl.tar.gz/md5/8ada267e2d23eb0c65ab2d2df02362d5 -MPFR.v4.2.1+0.aarch64-linux-musl.tar.gz/sha512/0c7f18e6d0f3e2052541e3279dfa9a74eb34067ac4fea0b17ab805cd73010cc83f8d7cb4eda8f4a904da398268d1c0d638c35521a9f339f8c7c3b5f159f27277 -MPFR.v4.2.1+0.armv6l-linux-gnueabihf.tar.gz/md5/42bdb78eee83f496d7da699ad9603913 -MPFR.v4.2.1+0.armv6l-linux-gnueabihf.tar.gz/sha512/edaa9ece1404a606d6b635406ad5e721c8d094ffa1c73ce19222afc2b4ea7b3b9e23e7c5589ae10fd9f4c4aefa265773bcfce6c510efbca57782115d43daeb13 -MPFR.v4.2.1+0.armv6l-linux-musleabihf.tar.gz/md5/2213207772b8a50de4768816fdc20e2f -MPFR.v4.2.1+0.armv6l-linux-musleabihf.tar.gz/sha512/d24debc38b8135ac5c10c4ea19de0c69126b6881940b4e182118e12cc2c7cf0aca2db065620f0cca636742da32eddec5bda3b4f449a035274f05120c977ed449 -MPFR.v4.2.1+0.armv7l-linux-gnueabihf.tar.gz/md5/a0d9fe20c9ff0027b6816ee0102b1f9a -MPFR.v4.2.1+0.armv7l-linux-gnueabihf.tar.gz/sha512/97ce02898dc0d29a616048fd7ecee3100a710f7a30a21f2276c01675749034a5241be88bd46dff3dbf9ea0adca98a4357bd16e43fa9520e7a02477494c2d072e -MPFR.v4.2.1+0.armv7l-linux-musleabihf.tar.gz/md5/7898b9047c914b290b5928af5df63030 -MPFR.v4.2.1+0.armv7l-linux-musleabihf.tar.gz/sha512/cbefa9588752c65751630832417c1c42e4819d49ff9a505f61c2567ef4271097e585542fa898efd61409a43e439d827bb79f693a0937d0a3a427b39535979588 -MPFR.v4.2.1+0.i686-linux-gnu.tar.gz/md5/15fa598e5c1c723ff6cd2ad3ea51e437 -MPFR.v4.2.1+0.i686-linux-gnu.tar.gz/sha512/2ec4cf0c88363bc9fb39522bbcd6a9c2311c38efb166f604aab118fed39712beea68367ff5c4cabb2b7b3f5a53469414b8552fd22a70a637cbbfc936f0c4851b -MPFR.v4.2.1+0.i686-linux-musl.tar.gz/md5/6dc6a00d3ea22e2c60374d49926598d6 -MPFR.v4.2.1+0.i686-linux-musl.tar.gz/sha512/4a90356091b53d7238dda59f6e9c5c420614f16460dc67310e581611ad46a2dd3324d6164cfecf1bcd660b8f2e473f0afe137aac954c608b11be3acbda648e14 -MPFR.v4.2.1+0.i686-w64-mingw32.tar.gz/md5/bda99a916573607716c61473153a1927 -MPFR.v4.2.1+0.i686-w64-mingw32.tar.gz/sha512/ed3f45ff5ac8f4588584dd80036d9f3623651c87276a9b624955c831009dc33f8804c2845bd187ba750515725c29d65ac5d70c71db1b953c618cd771d2b066d0 -MPFR.v4.2.1+0.powerpc64le-linux-gnu.tar.gz/md5/ac70f716bddd5323b4add663b473b52d -MPFR.v4.2.1+0.powerpc64le-linux-gnu.tar.gz/sha512/ebb0f5ea76c892b7a4e4636706e71f476aaea58bb88e1734a7966c44495fda8c81318e0e8629e208185f0fc8d0c73b6f3463034cd831dfb5fbbd493a0689bc06 -MPFR.v4.2.1+0.x86_64-apple-darwin.tar.gz/md5/ff13e865e3be717b0fffc16296cb2f56 -MPFR.v4.2.1+0.x86_64-apple-darwin.tar.gz/sha512/98479210910945714da0285a40803674242581894a731ba4709c70dc1341849e736a88aa4914df0ff536c15f8848c417e712ff6abeb25047d300f8b215fd131f -MPFR.v4.2.1+0.x86_64-linux-gnu.tar.gz/md5/48194b9f92ad01b168e8b9612f4c9559 -MPFR.v4.2.1+0.x86_64-linux-gnu.tar.gz/sha512/638eb40d23fd492972809cdc3326ad4c2c99d3eae1ca5f7c0da6e0e335bb596de2899da5b3e65153225654b2cd9a805298e7241a21395e07d0b333eb1f101b5d -MPFR.v4.2.1+0.x86_64-linux-musl.tar.gz/md5/0babbb823964ccebf63b42fd07f08936 -MPFR.v4.2.1+0.x86_64-linux-musl.tar.gz/sha512/880b685d9b456fa2bf78e707273783423f9ff00791b529eba00c5e1b94ff96f4ba01e680152a4d6b45b695e3c1169d07f793db42c5a4120861813d5458dfc828 -MPFR.v4.2.1+0.x86_64-unknown-freebsd.tar.gz/md5/f11d634e5a19177fe36b2b2f6f5727ca -MPFR.v4.2.1+0.x86_64-unknown-freebsd.tar.gz/sha512/291245c06edf31b2e39b6774359ebd4f95b924f19d2a7e8581822a5bf908426d00f0452c061a027da0d7d4bb2fa1bb7ef8ab6d8e49bc848d6d7450a8d5c8a9c4 -MPFR.v4.2.1+0.x86_64-w64-mingw32.tar.gz/md5/e6d1347d5da312f7301d578ce9d7c4d9 -MPFR.v4.2.1+0.x86_64-w64-mingw32.tar.gz/sha512/3ea4b944172be250677ef271f1e10c2b95861755f203795a50b8d0f76f72498897059271e44e038625c3b73cccbd0165685d60afa994180d42e912bffbe86729 +MPFR.v4.2.1+1.aarch64-apple-darwin.tar.gz/md5/816f9ff59070f21f1df2f310e2606c06 +MPFR.v4.2.1+1.aarch64-apple-darwin.tar.gz/sha512/dad9adba7a8867d1ce26d77efb5c33b602b920a2cdbec84ea58a054cfab3ab7df54d2bda101de72b71604e7844993f1e216b002ba092e69277d0764040216c81 +MPFR.v4.2.1+1.aarch64-linux-gnu.tar.gz/md5/c1e3c9619af6454d8adae9bcbd911dba +MPFR.v4.2.1+1.aarch64-linux-gnu.tar.gz/sha512/5d916492aa73d11e022a7ca3f31940ceb8f8667bdf878ba29d6256736a380a2f6a11ac90cd8de3f1d3454a79165db240a1b971b9794fd21692ed64502ec34b9a +MPFR.v4.2.1+1.aarch64-linux-musl.tar.gz/md5/8ada267e2d23eb0c65ab2d2df02362d5 +MPFR.v4.2.1+1.aarch64-linux-musl.tar.gz/sha512/0c7f18e6d0f3e2052541e3279dfa9a74eb34067ac4fea0b17ab805cd73010cc83f8d7cb4eda8f4a904da398268d1c0d638c35521a9f339f8c7c3b5f159f27277 +MPFR.v4.2.1+1.aarch64-unknown-freebsd.tar.gz/md5/8aa99bf9c6157b8bb2833d8987ce0806 +MPFR.v4.2.1+1.aarch64-unknown-freebsd.tar.gz/sha512/6e4f547596eb8dd8ee2e1d3aefd7c73eed744add401c1f93d9951a9187c96fa9fc39be14683723dcb43cdf6891ea0021dc3416e43a0e2ec2038b0d1cd7c8434e +MPFR.v4.2.1+1.armv6l-linux-gnueabihf.tar.gz/md5/42bdb78eee83f496d7da699ad9603913 +MPFR.v4.2.1+1.armv6l-linux-gnueabihf.tar.gz/sha512/edaa9ece1404a606d6b635406ad5e721c8d094ffa1c73ce19222afc2b4ea7b3b9e23e7c5589ae10fd9f4c4aefa265773bcfce6c510efbca57782115d43daeb13 +MPFR.v4.2.1+1.armv6l-linux-musleabihf.tar.gz/md5/2213207772b8a50de4768816fdc20e2f +MPFR.v4.2.1+1.armv6l-linux-musleabihf.tar.gz/sha512/d24debc38b8135ac5c10c4ea19de0c69126b6881940b4e182118e12cc2c7cf0aca2db065620f0cca636742da32eddec5bda3b4f449a035274f05120c977ed449 +MPFR.v4.2.1+1.armv7l-linux-gnueabihf.tar.gz/md5/a0d9fe20c9ff0027b6816ee0102b1f9a +MPFR.v4.2.1+1.armv7l-linux-gnueabihf.tar.gz/sha512/97ce02898dc0d29a616048fd7ecee3100a710f7a30a21f2276c01675749034a5241be88bd46dff3dbf9ea0adca98a4357bd16e43fa9520e7a02477494c2d072e +MPFR.v4.2.1+1.armv7l-linux-musleabihf.tar.gz/md5/7898b9047c914b290b5928af5df63030 +MPFR.v4.2.1+1.armv7l-linux-musleabihf.tar.gz/sha512/cbefa9588752c65751630832417c1c42e4819d49ff9a505f61c2567ef4271097e585542fa898efd61409a43e439d827bb79f693a0937d0a3a427b39535979588 +MPFR.v4.2.1+1.i686-linux-gnu.tar.gz/md5/ac5a9db4bef94e7062dac463b5f87346 +MPFR.v4.2.1+1.i686-linux-gnu.tar.gz/sha512/2b5f3656e25065bfd83c81ee75999e6162c6e5436fcb0e3e3a767e2d941a556b4ebd3bebab78c63e8165105f81576959d8ad6e6d9cef1052751e39849e85df73 +MPFR.v4.2.1+1.i686-linux-musl.tar.gz/md5/6dc6a00d3ea22e2c60374d49926598d6 +MPFR.v4.2.1+1.i686-linux-musl.tar.gz/sha512/4a90356091b53d7238dda59f6e9c5c420614f16460dc67310e581611ad46a2dd3324d6164cfecf1bcd660b8f2e473f0afe137aac954c608b11be3acbda648e14 +MPFR.v4.2.1+1.i686-w64-mingw32.tar.gz/md5/7f7158a28ce8f262b897b38218f57958 +MPFR.v4.2.1+1.i686-w64-mingw32.tar.gz/sha512/8fbae0f1dd36534d4b9c63192c6e5cb1e531732d8eb1ab36783a6c71182f24ef80245b31a03460fd2f412fd0acaf1c4b9c8b574725271391217a3977b9ae4c79 +MPFR.v4.2.1+1.powerpc64le-linux-gnu.tar.gz/md5/ac70f716bddd5323b4add663b473b52d +MPFR.v4.2.1+1.powerpc64le-linux-gnu.tar.gz/sha512/ebb0f5ea76c892b7a4e4636706e71f476aaea58bb88e1734a7966c44495fda8c81318e0e8629e208185f0fc8d0c73b6f3463034cd831dfb5fbbd493a0689bc06 +MPFR.v4.2.1+1.x86_64-apple-darwin.tar.gz/md5/ff13e865e3be717b0fffc16296cb2f56 +MPFR.v4.2.1+1.x86_64-apple-darwin.tar.gz/sha512/98479210910945714da0285a40803674242581894a731ba4709c70dc1341849e736a88aa4914df0ff536c15f8848c417e712ff6abeb25047d300f8b215fd131f +MPFR.v4.2.1+1.x86_64-linux-gnu.tar.gz/md5/ca582be47601b8e6edb9d39f2881f44a +MPFR.v4.2.1+1.x86_64-linux-gnu.tar.gz/sha512/44a2e6158fde9fa8eaa6fac513dd5a8cae25a4b8879e5bb752a3f6af53d750c3a8e79be669ad87925b10c559cf9518fae431a607a342c48c00a390555e7e7b1f +MPFR.v4.2.1+1.x86_64-linux-musl.tar.gz/md5/0babbb823964ccebf63b42fd07f08936 +MPFR.v4.2.1+1.x86_64-linux-musl.tar.gz/sha512/880b685d9b456fa2bf78e707273783423f9ff00791b529eba00c5e1b94ff96f4ba01e680152a4d6b45b695e3c1169d07f793db42c5a4120861813d5458dfc828 +MPFR.v4.2.1+1.x86_64-unknown-freebsd.tar.gz/md5/f11d634e5a19177fe36b2b2f6f5727ca +MPFR.v4.2.1+1.x86_64-unknown-freebsd.tar.gz/sha512/291245c06edf31b2e39b6774359ebd4f95b924f19d2a7e8581822a5bf908426d00f0452c061a027da0d7d4bb2fa1bb7ef8ab6d8e49bc848d6d7450a8d5c8a9c4 +MPFR.v4.2.1+1.x86_64-w64-mingw32.tar.gz/md5/dcfad84470f15484443734feccbf8bf6 +MPFR.v4.2.1+1.x86_64-w64-mingw32.tar.gz/sha512/ceba1814fa671c2ba3e1ffeb6c736776981052e14111112fe963b5c11fd070136f8f022c5c21895f1f4f5084a5612fa673dddbb6b9622d7cade9b62eefcc8a14 mpfr-4.2.1.tar.bz2/md5/7765afa036e4ce7fb0e02bce0fef894b mpfr-4.2.1.tar.bz2/sha512/c81842532ecc663348deb7400d911ad71933d3b525a2f9e5adcd04265c9c0fdd1f22eca229f482703ac7f222ef209fc9e339dd1fa47d72ae57f7f70b2336a76f diff --git a/deps/checksums/nghttp2 b/deps/checksums/nghttp2 index f8226d4f68b3d..e552dfe9329b0 100644 --- a/deps/checksums/nghttp2 +++ b/deps/checksums/nghttp2 @@ -1,34 +1,36 @@ -nghttp2-1.60.0.tar.bz2/md5/ec20d9a6df7cc006894f72f81f9f2b42 -nghttp2-1.60.0.tar.bz2/sha512/95b76dd492dad490640469c4b806dd1a446f11143bc990220ff106fe4bfb76cdc4dfb112e0297c543b2d828f2870aa09ba820d88e3e9dedb29c8f3d3c9741af8 -nghttp2.v1.60.0+0.aarch64-apple-darwin.tar.gz/md5/dbf9f8161a124dc88ba44b54094b96e4 -nghttp2.v1.60.0+0.aarch64-apple-darwin.tar.gz/sha512/1997f473ea802afb09d7e13feb4eec9c11ad1d161cf83659ef6059a7c81639e00f8a3461c3538c81ea025e359b0927c3a362cef4a57e6544ad27588683142203 -nghttp2.v1.60.0+0.aarch64-linux-gnu.tar.gz/md5/d2e821a693d7d0720f0158b3e19ef7fa -nghttp2.v1.60.0+0.aarch64-linux-gnu.tar.gz/sha512/4165a1282d125b461d670d7d953c8a06b6508d1b97383a4126bc2fa9641454a9e0be749dbbaf772f2c2e6ea8cc3e64eb980cb0e09ac3d2fe5533eb3e6f7fa9e8 -nghttp2.v1.60.0+0.aarch64-linux-musl.tar.gz/md5/61ecc91336fcddb0f58af6af167e9a81 -nghttp2.v1.60.0+0.aarch64-linux-musl.tar.gz/sha512/802c7455e8f1ddfea74d3de3ceb937d1d10312f51594257cd406aedd67c181ada6ee5115bca00f8ee340a1471e2903bbe0159a0c08b80c556188647345e2c85b -nghttp2.v1.60.0+0.armv6l-linux-gnueabihf.tar.gz/md5/2998ae8d24d1bd540a29e0c6054bfcc8 -nghttp2.v1.60.0+0.armv6l-linux-gnueabihf.tar.gz/sha512/5b2235a0c8bded57adcbab11dbe97b85a7d6d8a083c155bd74b0ac5546aa861730e88b615f1cbfa1071fcc2eb252aae8508e926ad3d5a1ddf0374536c260217e -nghttp2.v1.60.0+0.armv6l-linux-musleabihf.tar.gz/md5/7ebec92e3b340e25b952ccc4e714aa2e -nghttp2.v1.60.0+0.armv6l-linux-musleabihf.tar.gz/sha512/eb0e5c584527182816203ce9bfc35688a969803104ffd17dd4ac3720c27a4fcde3b3b471bf66fda8ac83ec8a56aa82d6d40f492ce06cbf6af39fafc60f35574d -nghttp2.v1.60.0+0.armv7l-linux-gnueabihf.tar.gz/md5/8c124c0daf59c622aedc7b9f1423d522 -nghttp2.v1.60.0+0.armv7l-linux-gnueabihf.tar.gz/sha512/6e03246d1bfef7f184da68ac0eacc975dcb99172f2f352ce4ea5f5ae77536812163874d7ffc4fcb2df65dc51079880fdb83afc8988b73edb241cb641dc72f1fb -nghttp2.v1.60.0+0.armv7l-linux-musleabihf.tar.gz/md5/79968e1cb68c3b0518db528933251b0e -nghttp2.v1.60.0+0.armv7l-linux-musleabihf.tar.gz/sha512/f40790aa9a86fa2f44072c36a33416a7a8b4778881233989f8ed64ccb84f59ccdf3632b7a9d48d3e434e26cbd48c020e5d208da8fcb96e4e4ad41757e050213a -nghttp2.v1.60.0+0.i686-linux-gnu.tar.gz/md5/1580bf21084fa62ec26020f5c89430a1 -nghttp2.v1.60.0+0.i686-linux-gnu.tar.gz/sha512/cf83afe1bb796c57e220c0ba32a6990830df50cd91f82c781f2722d7b0ca5e5fbd8b708a0204be65bb8838c85b548f867c97e85941d124b81c67e01738f1db1a -nghttp2.v1.60.0+0.i686-linux-musl.tar.gz/md5/605eb6cd67b6fe3a1ba2d95413635831 -nghttp2.v1.60.0+0.i686-linux-musl.tar.gz/sha512/2c626b76898b4d782038661601fe34580c3cd560a519a46c4f6bc62d94ab987c7f2984350fc65933c00850cd2fe0b942fc64fcb23d2fb7db29bfed5866291b1a -nghttp2.v1.60.0+0.i686-w64-mingw32.tar.gz/md5/5b5694f36addbc503bc1e78e57159e5a -nghttp2.v1.60.0+0.i686-w64-mingw32.tar.gz/sha512/e70069b1dde2cf4dd041c4cc1c1ff40f67c20a8954b88d997fd7bf03d925b08148bc55293380dffce8c3b550a0e5768c94066e1a3b881ce4109ee94076c9a8b8 -nghttp2.v1.60.0+0.powerpc64le-linux-gnu.tar.gz/md5/1d073bba8e90c970bf1325a3d150d129 -nghttp2.v1.60.0+0.powerpc64le-linux-gnu.tar.gz/sha512/7e6f3895316d47a701944e8ee192b56f66aa05bf212c41164d25a0507f0e54c4c58c856e1c464fe3ec3eae78e0fe09ba8cf8b595c246faa3300a797750677180 -nghttp2.v1.60.0+0.x86_64-apple-darwin.tar.gz/md5/27d405bf53d4d438f74f91176d638741 -nghttp2.v1.60.0+0.x86_64-apple-darwin.tar.gz/sha512/59c4b4cca09e9a99e2e9ccc765068870824b907577c385313568ea29cd395caa3352bda230238888412b625e4a428a24c9ae0e59d122730cbd8025c6edbf0196 -nghttp2.v1.60.0+0.x86_64-linux-gnu.tar.gz/md5/ed1fe996e4c3e51d9ea8f724883dd3bc -nghttp2.v1.60.0+0.x86_64-linux-gnu.tar.gz/sha512/0b20db04ef7b2cc470b9abaab05d0e1e7ea3d674c1ed47c63e1cda00b98a6f10ce19ceb77ebd5ece28f6e4a2cf46227f5858f767ff0f04feed867c57941793ee -nghttp2.v1.60.0+0.x86_64-linux-musl.tar.gz/md5/cf3fcdb5720633700e4f9a9d8cd0cfc0 -nghttp2.v1.60.0+0.x86_64-linux-musl.tar.gz/sha512/d8f87b354de0f47be21b8e3aab6c2b05ee2af377e4bcc7df692fc4dd361ee5b731a190a0d9b4d4fdedf9c3a6a8a300f43338b38ac096da39ec13d4b79b544144 -nghttp2.v1.60.0+0.x86_64-unknown-freebsd.tar.gz/md5/b83aba7b3bd97ed7de770a597d6ec374 -nghttp2.v1.60.0+0.x86_64-unknown-freebsd.tar.gz/sha512/e40e47835bb0d5d548fbcfb28a64124323422bcdab411bcee7d4288cea765c6c82d7f4586980ee28b6ff310c6c7313aa4185ede192cd94839fbe708ab1ed14a7 -nghttp2.v1.60.0+0.x86_64-w64-mingw32.tar.gz/md5/37ba862c196b4d7c063cddc87722f7ff -nghttp2.v1.60.0+0.x86_64-w64-mingw32.tar.gz/sha512/ce0b70b4ad5cb30b83e672c3875fac7bcc8fc039506f05fef552a9d9cb53f053187dd02da4550dd7e5ef9aaaf8d587cee331eace0335f663d4190bacbc4ff9a2 +nghttp2-1.63.0.tar.bz2/md5/c29228929c3c323ecd0eae172f1eb9d5 +nghttp2-1.63.0.tar.bz2/sha512/a328b4642c6ca4395adfcaaf4e6eb6dbd39fa7bd86f872a76260af59a5a830e0ff5ad015865d6bc00e0baa8e4d0d9a67b4b97e9d78e5e05d1c53522364e5e235 +nghttp2.v1.63.0+1.aarch64-apple-darwin.tar.gz/md5/d11717d2fa4d04a374c55b86c790366a +nghttp2.v1.63.0+1.aarch64-apple-darwin.tar.gz/sha512/10dd24435818b66c92399941d3ea3280451f039e59112a866f973cdfe5602136e366f2bdb4638a1264f48fde737be4e50a49e44934971b4c69284377f4d9cf53 +nghttp2.v1.63.0+1.aarch64-linux-gnu.tar.gz/md5/5ebe2395d0d31b6481a4218468ec82a4 +nghttp2.v1.63.0+1.aarch64-linux-gnu.tar.gz/sha512/e7aff0eaa99be3b5e8539f160e1e5cf53bf1efa6b5f07d625796353a36ef12473946562731cedd570205229d263f81fd692b222a01297b09c89ce0e49edeff7a +nghttp2.v1.63.0+1.aarch64-linux-musl.tar.gz/md5/95c492eeca62b92baf8f8fa11a1da41f +nghttp2.v1.63.0+1.aarch64-linux-musl.tar.gz/sha512/10c0b3f560b1ff7ca9fe5cbc3340ec7789ecdeddf5d857d5c1245d1519074bd105f68c29714f36c8c9b688c5bf42133b30cbabed450410b79bb4f1f1d1474ef6 +nghttp2.v1.63.0+1.aarch64-unknown-freebsd.tar.gz/md5/c6fe5cea3d1386532f75f512e3641a7c +nghttp2.v1.63.0+1.aarch64-unknown-freebsd.tar.gz/sha512/20437e066e13b770f014e30b57830237bf38c3ecc56e716208c5103a7c242fec6cedcf78e641004891afa40ce945bfe1319d11aab4810a76ceeb6ff10373984c +nghttp2.v1.63.0+1.armv6l-linux-gnueabihf.tar.gz/md5/995dbd522109b882eaf7bedec84e8372 +nghttp2.v1.63.0+1.armv6l-linux-gnueabihf.tar.gz/sha512/faf7ca13cd1d3050a5c693c61a5e92b560e1c0c2e30833162cc7aeefcd31c018b2015dbdbf543f38ddec2aefe78927af5f30f3938dc6a67b3b84fc399513c8cf +nghttp2.v1.63.0+1.armv6l-linux-musleabihf.tar.gz/md5/7bf7063ee64eb9b41212a6c39111de4f +nghttp2.v1.63.0+1.armv6l-linux-musleabihf.tar.gz/sha512/3091a4373508e1913e69d76688fd31d57d8e01e15c39c8859b025ac19ff23a0d396dc771fb0a2860ee1913866c0b0dbc4432fbcd0283b5cbecfb02235c738762 +nghttp2.v1.63.0+1.armv7l-linux-gnueabihf.tar.gz/md5/645de11170289308ce00ed089853271c +nghttp2.v1.63.0+1.armv7l-linux-gnueabihf.tar.gz/sha512/bd974e629cd2626fc67a20b175e82e3a0dc742ee0c1b82b505395ebb7ce282def7d123b9bd8c4f7e3386db6c2c3d38d94475a00d96efb504a06fc2371db5a9e2 +nghttp2.v1.63.0+1.armv7l-linux-musleabihf.tar.gz/md5/8b5776769ec5577fde617814ccf57a7c +nghttp2.v1.63.0+1.armv7l-linux-musleabihf.tar.gz/sha512/7d463687abdfb600fcc11fd62b4973cdabdcfc076c5ace554af126ba6b0d925b9ff3eb3a9f730af6e4ef39d6ca1930084833a159d205a4230c88bbc82da7a3b6 +nghttp2.v1.63.0+1.i686-linux-gnu.tar.gz/md5/beb06b6d634fce5c15453c216c50d6b7 +nghttp2.v1.63.0+1.i686-linux-gnu.tar.gz/sha512/0616ffb2a98aa4cfdfbcaa408a6b33815a8e52af60417081854afb7b500e433f9fd63dff633df40507829217e96058f8b27184e50f442c175480e975f2387e13 +nghttp2.v1.63.0+1.i686-linux-musl.tar.gz/md5/ab15ac7ffaaeb7e69f7cad24ed3a6c4e +nghttp2.v1.63.0+1.i686-linux-musl.tar.gz/sha512/c9c18a6e255f4e3f81a9ee27b8ac58149507bce3dc9e79acbba8573ed66908c6cf3dcd29d9b989e47788977e6dec474c291f764fc1cfd718f20557001ca363c7 +nghttp2.v1.63.0+1.i686-w64-mingw32.tar.gz/md5/415858a5c10d7f2473174224e44ef3f6 +nghttp2.v1.63.0+1.i686-w64-mingw32.tar.gz/sha512/575c425df04c084980e8b9149579b787bf7ff38eecd958f38164db0bb0b4c331d4f9d6534f2c2cf1a105fc922ecba1d654a6a48a9a390f53bbcb4e8c2edbb0c7 +nghttp2.v1.63.0+1.powerpc64le-linux-gnu.tar.gz/md5/b2b8216a50aa7dd670e14ad47de31c3f +nghttp2.v1.63.0+1.powerpc64le-linux-gnu.tar.gz/sha512/c41570c8aa245fc2dce2a824cd73c4132ae2a65f9347d097462624917d637da34652c7f693e595a1f1ff1376171be4e2d48633d00dd6cafb00cd6cc974d9fa9e +nghttp2.v1.63.0+1.x86_64-apple-darwin.tar.gz/md5/878b3344f3656d663e33d7afc2fefb21 +nghttp2.v1.63.0+1.x86_64-apple-darwin.tar.gz/sha512/3f9cd4b20b149f8166f8411dc922c80a483113722f9a54c6274b3d4527dfd54bd5723fbd60535d8e90196f1d4e6e5c415f839b34cc4dc8c98eceee5466666471 +nghttp2.v1.63.0+1.x86_64-linux-gnu.tar.gz/md5/96fbea72cc42fa050fb0c9cb065588c8 +nghttp2.v1.63.0+1.x86_64-linux-gnu.tar.gz/sha512/47dd191ec2eb91b34ac9acd1802515778b2cea930c7c5876db3812566d3959e8a0e47bcee9601866180e40c9fd382d1599ca205a6229aaebdfa47e689f6ebd23 +nghttp2.v1.63.0+1.x86_64-linux-musl.tar.gz/md5/d68d6aac8629d95b2cd1b218152cc529 +nghttp2.v1.63.0+1.x86_64-linux-musl.tar.gz/sha512/fdac9dacd1392322cdc308997d7205b7194f897e21ff15fe4eda7c9c8a3b0b2b777cbc54723d884c341ef46467fc4fa056054ebff0f849090464ced031ff98f7 +nghttp2.v1.63.0+1.x86_64-unknown-freebsd.tar.gz/md5/61ecaf1d84e6b203ca7e601a91dc68e3 +nghttp2.v1.63.0+1.x86_64-unknown-freebsd.tar.gz/sha512/c25a017148ff7f01299a9a7cead87251f1a31fd404f4b1f5413fe9b09823ae471173f8d0828b096bb0ac7411e23d354f2c9e2596668d38d9a509831c6b4f5624 +nghttp2.v1.63.0+1.x86_64-w64-mingw32.tar.gz/md5/60ceb3ff3f5287b2782dfd98db4f1816 +nghttp2.v1.63.0+1.x86_64-w64-mingw32.tar.gz/sha512/971fd9e24c393fc594ae813caa7b14052320d924309bea0bd77c847ce0e86803cb803be051812ea029baf632deff6f9f8200dbc41f9bc143eba098adc294e757 diff --git a/deps/checksums/openblas b/deps/checksums/openblas index 08bd98646c24b..74cdaa26c30b2 100644 --- a/deps/checksums/openblas +++ b/deps/checksums/openblas @@ -1,94 +1,98 @@ -OpenBLAS.v0.3.28+2.aarch64-apple-darwin-libgfortran5.tar.gz/md5/312aa603d089d680205dad7d5da58195 -OpenBLAS.v0.3.28+2.aarch64-apple-darwin-libgfortran5.tar.gz/sha512/ffb0069561f52f8ac2f8affe937a00592e0c5d75c6d64bb0d5c93d1c925c93a46b763638031c88818b9dcef4a7b149ee3f15792a812e87f57a8ad086604164c4 -OpenBLAS.v0.3.28+2.aarch64-linux-gnu-libgfortran3.tar.gz/md5/7c43d9e9ac07820130a3d5faefdef882 -OpenBLAS.v0.3.28+2.aarch64-linux-gnu-libgfortran3.tar.gz/sha512/3ade0f098796148c37b118f9c052bad4e40431b4792f001043f040f8b1e4b7c3bae512f56ea21e6c0111246b2200e7720fe720a56a19dd11d1fba789344f29e3 -OpenBLAS.v0.3.28+2.aarch64-linux-gnu-libgfortran4.tar.gz/md5/cd2fe87dac703c8bfa25406aa732b88a -OpenBLAS.v0.3.28+2.aarch64-linux-gnu-libgfortran4.tar.gz/sha512/2aea68bd8f1db2ac920951c8d9a47ce8c071f3736ee8aad8d185a09be25234a0ffd11b9f9640015b82770ba3b3fad9aa511cc43501c1bb5a3a44f1fb7ccd5692 -OpenBLAS.v0.3.28+2.aarch64-linux-gnu-libgfortran5.tar.gz/md5/e3db2bf2f1f38aeee8530c78f3ec049a -OpenBLAS.v0.3.28+2.aarch64-linux-gnu-libgfortran5.tar.gz/sha512/a0ccb92e818650ac3cbc292d5af1a000ee9b123953cc3eb16e2479e926af3f2be0ed9858e3c0c1075b1b9dd70ec1e51b9dce2c9d45b999d296aa050d257a3cb1 -OpenBLAS.v0.3.28+2.aarch64-linux-musl-libgfortran3.tar.gz/md5/5bb605738930037259e773ebdb4a7041 -OpenBLAS.v0.3.28+2.aarch64-linux-musl-libgfortran3.tar.gz/sha512/967e0f33be7b743d9617627a947a802286962a46c7c3b2418aaa1504cffc5f311b01e1702b35ded18ae3686b1914c6085213b03fa8a51e0a7ca16dc4cfee8504 -OpenBLAS.v0.3.28+2.aarch64-linux-musl-libgfortran4.tar.gz/md5/ce175e82b9c6597c546552e79a43f934 -OpenBLAS.v0.3.28+2.aarch64-linux-musl-libgfortran4.tar.gz/sha512/8ff5dff293d9786fc4f541b209b35afcbe325c13ddd0f9c8f9bfca8ba5c318c7890152260a5441b9e9088751ce03b1ff8f0f5d6fd4f142fae34bdb7390d1952c -OpenBLAS.v0.3.28+2.aarch64-linux-musl-libgfortran5.tar.gz/md5/cae6aabbdccf31fb78b234785b52d48a -OpenBLAS.v0.3.28+2.aarch64-linux-musl-libgfortran5.tar.gz/sha512/ac842023e5db243fcfada22adca051bd2ffa04fca496454539931eede159e5d0490d444c338684c2d178c3367b23b8f3d76c544e30f1897bbed181f56237619f -OpenBLAS.v0.3.28+2.armv6l-linux-gnueabihf-libgfortran3.tar.gz/md5/5d1f45f53dd1730051095fb8e027b14f -OpenBLAS.v0.3.28+2.armv6l-linux-gnueabihf-libgfortran3.tar.gz/sha512/0b1f91e86b5078b7cd6b64bc429a0e63bb5adf28df1baa336e67819fbd2c09f59b643c39e580f63e3bbccdc631c5d5e14c7d8afa6af94250453ce5286958f90f -OpenBLAS.v0.3.28+2.armv6l-linux-gnueabihf-libgfortran4.tar.gz/md5/8b3e3ea928975c575798d47466aafb82 -OpenBLAS.v0.3.28+2.armv6l-linux-gnueabihf-libgfortran4.tar.gz/sha512/ebac0f7047dd8b97d85e4251953a23824701af02754afd6808f13eb276326b30eb292c85fa717fbd2f21b929e6a9816a012b8ea378a0fa27e671f81435f5d3b9 -OpenBLAS.v0.3.28+2.armv6l-linux-gnueabihf-libgfortran5.tar.gz/md5/5aacfce96d5673b4d8341cb097d22c4a -OpenBLAS.v0.3.28+2.armv6l-linux-gnueabihf-libgfortran5.tar.gz/sha512/b84dc2b8cbe5453555182c3fcd8624d7a2b28fe3826d54fde3b77ad2c33e60309317d150f07554dd85e168b0ac1f91537a5c2c17fff9c02dd9216f01161e4965 -OpenBLAS.v0.3.28+2.armv6l-linux-musleabihf-libgfortran3.tar.gz/md5/dfeac22ee204868cf254dab5ae79382b -OpenBLAS.v0.3.28+2.armv6l-linux-musleabihf-libgfortran3.tar.gz/sha512/710117eb7400a0aacf69d6053730eb3b3ff4767f8d38defb2aaad94aebf1646a794489e78a8f46b469901159cdca73dd2b9460fff11e95daa4a2642cab721a25 -OpenBLAS.v0.3.28+2.armv6l-linux-musleabihf-libgfortran4.tar.gz/md5/13ff2a40bc55839bdef76b796db1eb76 -OpenBLAS.v0.3.28+2.armv6l-linux-musleabihf-libgfortran4.tar.gz/sha512/eb61fe6c0221e8f9d7a626b8d088ae1497155341dafb69835e7d53af76689ae212e1e4621e0729df5d896888c0b2d7354a24f7b57fe1d68f0b35c26bcf096699 -OpenBLAS.v0.3.28+2.armv6l-linux-musleabihf-libgfortran5.tar.gz/md5/aa7349724ba1d47256705777e755289a -OpenBLAS.v0.3.28+2.armv6l-linux-musleabihf-libgfortran5.tar.gz/sha512/25ab56c44b7d0d5de17344f39071e6894e878e89b5e35412a3c9fe345abd2eef76d7816cabb6407c7c521c3bf67a4741b37ad7e580962ead9275273e431f1fb3 -OpenBLAS.v0.3.28+2.armv7l-linux-gnueabihf-libgfortran3.tar.gz/md5/5d1f45f53dd1730051095fb8e027b14f -OpenBLAS.v0.3.28+2.armv7l-linux-gnueabihf-libgfortran3.tar.gz/sha512/0b1f91e86b5078b7cd6b64bc429a0e63bb5adf28df1baa336e67819fbd2c09f59b643c39e580f63e3bbccdc631c5d5e14c7d8afa6af94250453ce5286958f90f -OpenBLAS.v0.3.28+2.armv7l-linux-gnueabihf-libgfortran4.tar.gz/md5/8b3e3ea928975c575798d47466aafb82 -OpenBLAS.v0.3.28+2.armv7l-linux-gnueabihf-libgfortran4.tar.gz/sha512/ebac0f7047dd8b97d85e4251953a23824701af02754afd6808f13eb276326b30eb292c85fa717fbd2f21b929e6a9816a012b8ea378a0fa27e671f81435f5d3b9 -OpenBLAS.v0.3.28+2.armv7l-linux-gnueabihf-libgfortran5.tar.gz/md5/5aacfce96d5673b4d8341cb097d22c4a -OpenBLAS.v0.3.28+2.armv7l-linux-gnueabihf-libgfortran5.tar.gz/sha512/b84dc2b8cbe5453555182c3fcd8624d7a2b28fe3826d54fde3b77ad2c33e60309317d150f07554dd85e168b0ac1f91537a5c2c17fff9c02dd9216f01161e4965 -OpenBLAS.v0.3.28+2.armv7l-linux-musleabihf-libgfortran3.tar.gz/md5/dfeac22ee204868cf254dab5ae79382b -OpenBLAS.v0.3.28+2.armv7l-linux-musleabihf-libgfortran3.tar.gz/sha512/710117eb7400a0aacf69d6053730eb3b3ff4767f8d38defb2aaad94aebf1646a794489e78a8f46b469901159cdca73dd2b9460fff11e95daa4a2642cab721a25 -OpenBLAS.v0.3.28+2.armv7l-linux-musleabihf-libgfortran4.tar.gz/md5/13ff2a40bc55839bdef76b796db1eb76 -OpenBLAS.v0.3.28+2.armv7l-linux-musleabihf-libgfortran4.tar.gz/sha512/eb61fe6c0221e8f9d7a626b8d088ae1497155341dafb69835e7d53af76689ae212e1e4621e0729df5d896888c0b2d7354a24f7b57fe1d68f0b35c26bcf096699 -OpenBLAS.v0.3.28+2.armv7l-linux-musleabihf-libgfortran5.tar.gz/md5/aa7349724ba1d47256705777e755289a -OpenBLAS.v0.3.28+2.armv7l-linux-musleabihf-libgfortran5.tar.gz/sha512/25ab56c44b7d0d5de17344f39071e6894e878e89b5e35412a3c9fe345abd2eef76d7816cabb6407c7c521c3bf67a4741b37ad7e580962ead9275273e431f1fb3 -OpenBLAS.v0.3.28+2.i686-linux-gnu-libgfortran3.tar.gz/md5/53087cc770708c57d2654fd0095b64df -OpenBLAS.v0.3.28+2.i686-linux-gnu-libgfortran3.tar.gz/sha512/90961448ae40b0445bf881d0815aec54d2096ad235dc8e3db8d698a72961ef9a97e7fcd08f79c83cd1f7c5a341464f52a90351d927d5f1c3e9c8ee32b17970db -OpenBLAS.v0.3.28+2.i686-linux-gnu-libgfortran4.tar.gz/md5/ee910e19faa961bde11fdf90c211df9d -OpenBLAS.v0.3.28+2.i686-linux-gnu-libgfortran4.tar.gz/sha512/f5cfecfe965991cfd7843eff71efa71d6842058565bb63657e909b2942e58a8c7506aa66335308961e59f392da16e1177d79542ae509795566a14122f67a1782 -OpenBLAS.v0.3.28+2.i686-linux-gnu-libgfortran5.tar.gz/md5/fe52ba7ca8e16f37aa04b79248e0471d -OpenBLAS.v0.3.28+2.i686-linux-gnu-libgfortran5.tar.gz/sha512/79b5108886d60f12424709a841e359dc1cf23cef21bb0ee6d1a48043ac48a35dac1637e43c8ebf3f2e10dd34721993a7a12c5776f2975dd5bd7b6e29e1a9adc3 -OpenBLAS.v0.3.28+2.i686-linux-musl-libgfortran3.tar.gz/md5/88d8ff421d29456f1d7670ceaf8867ca -OpenBLAS.v0.3.28+2.i686-linux-musl-libgfortran3.tar.gz/sha512/91c1bd8142845d11fecba87a719315a14218e3863955ddd2ed82cecd4a2c177a48c660b6aac374ee9a11008245c0ced1bae70eaf5a1a6e3114db02e09a96396f -OpenBLAS.v0.3.28+2.i686-linux-musl-libgfortran4.tar.gz/md5/3035066a53032b551e49f56b323e941d -OpenBLAS.v0.3.28+2.i686-linux-musl-libgfortran4.tar.gz/sha512/f218e152a1c92bd374599814612add8010aedc78113cbe06465e8a1ee7f66892bb654cad687aa55555e74f3a65d74608692d41c9f0ce6c0bc63475ef62ab55b7 -OpenBLAS.v0.3.28+2.i686-linux-musl-libgfortran5.tar.gz/md5/f7cf36ac9a0cbb535952ec73f2e6c9ea -OpenBLAS.v0.3.28+2.i686-linux-musl-libgfortran5.tar.gz/sha512/00ab052d9fa4a72a640545782019f24ed6017b36aa89c5e659ce73b1e821817f560c09f71b26c027c0a05bd13567c71a6d7f5995d1c39ab233bec56cd3a7fd9e -OpenBLAS.v0.3.28+2.i686-w64-mingw32-libgfortran3.tar.gz/md5/b65414bb15539e5aa2f5f1c7984edb94 -OpenBLAS.v0.3.28+2.i686-w64-mingw32-libgfortran3.tar.gz/sha512/847ada020bb92fe6ea81dfffaf855707a529c9c0f7e246e802b9521e5c7d4aa36104d04279c09a905a797184cdf05a6fabf84711b7661ecb14e9ac2fba251f61 -OpenBLAS.v0.3.28+2.i686-w64-mingw32-libgfortran4.tar.gz/md5/0b626ebb8b3fc49b946723a9a2a21a91 -OpenBLAS.v0.3.28+2.i686-w64-mingw32-libgfortran4.tar.gz/sha512/b5bba23878399fc1ff20abc2e2eb4acb9691ce982f290e33384732452774a0b447bd0fb01ee696d10ad8b03d99eec905662af92bd3b499d9fe6db419e05d2573 -OpenBLAS.v0.3.28+2.i686-w64-mingw32-libgfortran5.tar.gz/md5/cb99d7d4944c5283a1a0142683e1d377 -OpenBLAS.v0.3.28+2.i686-w64-mingw32-libgfortran5.tar.gz/sha512/b77d3225e60f49506917bfff78c187df7157dbc834eccda2fa03d03eef8214b225682888a411a8b6e4b29a8d7e2b0ca625ea8c56b84ecc39e1f4f1012523c096 -OpenBLAS.v0.3.28+2.powerpc64le-linux-gnu-libgfortran3.tar.gz/md5/c6e5d4867a068e08b3f56f474e498b81 -OpenBLAS.v0.3.28+2.powerpc64le-linux-gnu-libgfortran3.tar.gz/sha512/de6249439758a501bfd27d3ef04ec04cc06edf64de73f0709a6a40a2eaf40bd3d5d77dfd54b7b19e2f6bf6c104b4416d3e225faa0cff4cb631785c08d90b8614 -OpenBLAS.v0.3.28+2.powerpc64le-linux-gnu-libgfortran4.tar.gz/md5/32e70466cfa3cfec65ab4cad3abc5f03 -OpenBLAS.v0.3.28+2.powerpc64le-linux-gnu-libgfortran4.tar.gz/sha512/2642385a5e9fc8e9c3839a5a44f9753b21b5078725f7d0c3e1ebe96b76129a3b8e2627d92629dee4f6fd7e8e51e86a7fbedc80cbe4d1a6812cea363559950da0 -OpenBLAS.v0.3.28+2.powerpc64le-linux-gnu-libgfortran5.tar.gz/md5/e2332831bd88d57132241697952819e7 -OpenBLAS.v0.3.28+2.powerpc64le-linux-gnu-libgfortran5.tar.gz/sha512/ad03edf9ac56bf6311f0ca70a1bc359242accfe82cba9e42f39f6cb1c3006226179ff9be8218847889cae10fac13bc33f60837e1e3249e309172da7fbc25400f -OpenBLAS.v0.3.28+2.x86_64-apple-darwin-libgfortran3.tar.gz/md5/27c24775af446a44a72a28ffd197696d -OpenBLAS.v0.3.28+2.x86_64-apple-darwin-libgfortran3.tar.gz/sha512/2af8caa33bee88efff84653f3932b04e8fd4aabb1bf16d49fa73657b0ec13c9457fde7ab3f953fc9b01da5c2841c3c9b588e3b0f559b89df0e6268468d1f7cc8 -OpenBLAS.v0.3.28+2.x86_64-apple-darwin-libgfortran4.tar.gz/md5/414e701d918d5fba08a12de6979db4b5 -OpenBLAS.v0.3.28+2.x86_64-apple-darwin-libgfortran4.tar.gz/sha512/949886d388b80e19b944d102852f2bb58ffa03c42e624986dd9dc076797c996634d4a8fc0f04544451d6848c2079969816979e1f68a999b2747e9dd5472be7a6 -OpenBLAS.v0.3.28+2.x86_64-apple-darwin-libgfortran5.tar.gz/md5/29fcf62c0280cc10f91d22189a2e8de8 -OpenBLAS.v0.3.28+2.x86_64-apple-darwin-libgfortran5.tar.gz/sha512/02e75d4ecf9cd922157a72c0ca2e713cf336b125df3982cd5f7cc4f2a04367ad4c2b1190ca2a0a9df8b639c7ebcfc9783066e99dd0b13acde7b02038391e8567 -OpenBLAS.v0.3.28+2.x86_64-linux-gnu-libgfortran3.tar.gz/md5/147d5e8eb2ec78fc8a31bdb091fab001 -OpenBLAS.v0.3.28+2.x86_64-linux-gnu-libgfortran3.tar.gz/sha512/2319eda568800c0b1f2d96a8a36c59b1bbd792c06de1d740aea3f1e49798242426ea8d10c100c42c3c281702e2b4f5b673b6ab5252b276d48542e875bcaa3094 -OpenBLAS.v0.3.28+2.x86_64-linux-gnu-libgfortran4.tar.gz/md5/448857d9c4b2e95afc12a14c75b24055 -OpenBLAS.v0.3.28+2.x86_64-linux-gnu-libgfortran4.tar.gz/sha512/3e7c8cd55e0b15a30992b1e0b48a6e2ae36fd9babf689fa5595c7de94aec401de1d7821d45a22bf14cd5c45c708bc8fa3511d34d732dadd4daaca3f49e200bdb -OpenBLAS.v0.3.28+2.x86_64-linux-gnu-libgfortran5.tar.gz/md5/3aaf417685b44e0e505208f7b31b981a -OpenBLAS.v0.3.28+2.x86_64-linux-gnu-libgfortran5.tar.gz/sha512/f7b1d123e48ede93fe624a79d9535a8915bfa3441d7a6f9c6643467027414c9f2538e299858ea98bbb49d4e6d385a6a491063cb1878ac3b0b3d6a8f7ff0a48df -OpenBLAS.v0.3.28+2.x86_64-linux-musl-libgfortran3.tar.gz/md5/5723136deaaf4b2e5960fb0774943288 -OpenBLAS.v0.3.28+2.x86_64-linux-musl-libgfortran3.tar.gz/sha512/127ea8b2b0d8d4586a23a2b8ecbf148d512efe68626e89b0688c3c9e29ed9420b45ae86755c1467313c565f9f3835762051d7086a815b813dbe6e9eb05fb4be1 -OpenBLAS.v0.3.28+2.x86_64-linux-musl-libgfortran4.tar.gz/md5/80b1b9cf5346916edda653174a987aa2 -OpenBLAS.v0.3.28+2.x86_64-linux-musl-libgfortran4.tar.gz/sha512/77e1387ec969bbed4945d2a598a1cd04d258265c4b2d5c43af92118eb32e0c69e40619a20ea1835f277febcfea068b241343d44932afef832bdcfd2e9f618f0a -OpenBLAS.v0.3.28+2.x86_64-linux-musl-libgfortran5.tar.gz/md5/44dcedf01c938d1a1c67dd3bc90ab61d -OpenBLAS.v0.3.28+2.x86_64-linux-musl-libgfortran5.tar.gz/sha512/e490d49b8d41d73ab3e71aca8c691ca58704f0fc6930cbfcc203f97b8db8d83144bad597a2c53ff0c0c4f7c40316d975a1b589a3603873d508f6beeb75970c5b -OpenBLAS.v0.3.28+2.x86_64-unknown-freebsd-libgfortran3.tar.gz/md5/0e8a7e88b54cb836292c289d1c456fa9 -OpenBLAS.v0.3.28+2.x86_64-unknown-freebsd-libgfortran3.tar.gz/sha512/0e9b3af6839b9c41c950bb4d8b739f0243a890af7092ef9f3a00e4931f2acc3820afb78e40c7bfef716dcd3230c1d0acc7b0b37f30eb47441b476bd7540745e6 -OpenBLAS.v0.3.28+2.x86_64-unknown-freebsd-libgfortran4.tar.gz/md5/5fc47ad55780c99ef9cab7ef1b26d9c0 -OpenBLAS.v0.3.28+2.x86_64-unknown-freebsd-libgfortran4.tar.gz/sha512/c531201e4abddd652efeb5801658f5c1e4891578f181e99d6e41fc0d3bc6347b82e5e928ff8a717ee1e75bb0a6a765260bf7c99fce44aa24c21f1c5a5e3c1e3b -OpenBLAS.v0.3.28+2.x86_64-unknown-freebsd-libgfortran5.tar.gz/md5/dc127f3ab984b5d47b325d5701ab73cd -OpenBLAS.v0.3.28+2.x86_64-unknown-freebsd-libgfortran5.tar.gz/sha512/50850911703320894a2e1e996c5de4613b5f9e3012f5cbf591f3677799599c45d9cc4c42cf310bdc6ba91ef550e52f6424bbbabdf47f96748d4669d94e6b46a4 -OpenBLAS.v0.3.28+2.x86_64-w64-mingw32-libgfortran3.tar.gz/md5/937847e2ad00539f3422d1ecb9d26d55 -OpenBLAS.v0.3.28+2.x86_64-w64-mingw32-libgfortran3.tar.gz/sha512/751d889661ddd46cd5718b49e34f826a4fb34b1b992251a5a975bc0af15b74a75d8a56f403e8fae570223477b2b8927d9cb36764e4b9e466045d5f317b8e7196 -OpenBLAS.v0.3.28+2.x86_64-w64-mingw32-libgfortran4.tar.gz/md5/180c54c50362d05696589b270693ee8f -OpenBLAS.v0.3.28+2.x86_64-w64-mingw32-libgfortran4.tar.gz/sha512/2e3b76be5b7c4a7dc45f07e17493abd7ef9185e92429d8fa4d38766e0da96dd0777b619a9e420d2e1142bdab2ae1f755f9bc9ad97ee9a7927741778f89b9135f -OpenBLAS.v0.3.28+2.x86_64-w64-mingw32-libgfortran5.tar.gz/md5/2f0fac7c96af66ea63fce26e409f4db6 -OpenBLAS.v0.3.28+2.x86_64-w64-mingw32-libgfortran5.tar.gz/sha512/141522971447c38b4908342f3ad09ffb18142d2e79b44f66fd80047b44c09216c9b94c39f776e3093f9ceb6bc4d6270cbbfb4209b2fc0debfe93e7145cb4dbff +OpenBLAS.v0.3.28+3.aarch64-apple-darwin-libgfortran5.tar.gz/md5/312aa603d089d680205dad7d5da58195 +OpenBLAS.v0.3.28+3.aarch64-apple-darwin-libgfortran5.tar.gz/sha512/ffb0069561f52f8ac2f8affe937a00592e0c5d75c6d64bb0d5c93d1c925c93a46b763638031c88818b9dcef4a7b149ee3f15792a812e87f57a8ad086604164c4 +OpenBLAS.v0.3.28+3.aarch64-linux-gnu-libgfortran3.tar.gz/md5/7c43d9e9ac07820130a3d5faefdef882 +OpenBLAS.v0.3.28+3.aarch64-linux-gnu-libgfortran3.tar.gz/sha512/3ade0f098796148c37b118f9c052bad4e40431b4792f001043f040f8b1e4b7c3bae512f56ea21e6c0111246b2200e7720fe720a56a19dd11d1fba789344f29e3 +OpenBLAS.v0.3.28+3.aarch64-linux-gnu-libgfortran4.tar.gz/md5/cd2fe87dac703c8bfa25406aa732b88a +OpenBLAS.v0.3.28+3.aarch64-linux-gnu-libgfortran4.tar.gz/sha512/2aea68bd8f1db2ac920951c8d9a47ce8c071f3736ee8aad8d185a09be25234a0ffd11b9f9640015b82770ba3b3fad9aa511cc43501c1bb5a3a44f1fb7ccd5692 +OpenBLAS.v0.3.28+3.aarch64-linux-gnu-libgfortran5.tar.gz/md5/e3db2bf2f1f38aeee8530c78f3ec049a +OpenBLAS.v0.3.28+3.aarch64-linux-gnu-libgfortran5.tar.gz/sha512/a0ccb92e818650ac3cbc292d5af1a000ee9b123953cc3eb16e2479e926af3f2be0ed9858e3c0c1075b1b9dd70ec1e51b9dce2c9d45b999d296aa050d257a3cb1 +OpenBLAS.v0.3.28+3.aarch64-linux-musl-libgfortran3.tar.gz/md5/5bb605738930037259e773ebdb4a7041 +OpenBLAS.v0.3.28+3.aarch64-linux-musl-libgfortran3.tar.gz/sha512/967e0f33be7b743d9617627a947a802286962a46c7c3b2418aaa1504cffc5f311b01e1702b35ded18ae3686b1914c6085213b03fa8a51e0a7ca16dc4cfee8504 +OpenBLAS.v0.3.28+3.aarch64-linux-musl-libgfortran4.tar.gz/md5/ce175e82b9c6597c546552e79a43f934 +OpenBLAS.v0.3.28+3.aarch64-linux-musl-libgfortran4.tar.gz/sha512/8ff5dff293d9786fc4f541b209b35afcbe325c13ddd0f9c8f9bfca8ba5c318c7890152260a5441b9e9088751ce03b1ff8f0f5d6fd4f142fae34bdb7390d1952c +OpenBLAS.v0.3.28+3.aarch64-linux-musl-libgfortran5.tar.gz/md5/cae6aabbdccf31fb78b234785b52d48a +OpenBLAS.v0.3.28+3.aarch64-linux-musl-libgfortran5.tar.gz/sha512/ac842023e5db243fcfada22adca051bd2ffa04fca496454539931eede159e5d0490d444c338684c2d178c3367b23b8f3d76c544e30f1897bbed181f56237619f +OpenBLAS.v0.3.28+3.aarch64-unknown-freebsd-libgfortran4.tar.gz/md5/875223f1a3867d1d77ca911da1f12e7d +OpenBLAS.v0.3.28+3.aarch64-unknown-freebsd-libgfortran4.tar.gz/sha512/a53eced30cd5d85bf13f17959f0d43127a1d967dfa3fc18fd931b8a0670d8f4fa7fa4e5360937ec301a195e8c4757d2454c8d1d189e6429b97fe3b322559c970 +OpenBLAS.v0.3.28+3.aarch64-unknown-freebsd-libgfortran5.tar.gz/md5/efc5b9b88bbb515b88b4cd84d280d6f2 +OpenBLAS.v0.3.28+3.aarch64-unknown-freebsd-libgfortran5.tar.gz/sha512/16581e2b61500c939f3be0d1e1aab3c103c2cdf56b9e5880368ff87bd2ecec89e6ee6ed00f2db90208ca26132c0b92f318084b0b2644ed93e72ca3c9706f951c +OpenBLAS.v0.3.28+3.armv6l-linux-gnueabihf-libgfortran3.tar.gz/md5/5d1f45f53dd1730051095fb8e027b14f +OpenBLAS.v0.3.28+3.armv6l-linux-gnueabihf-libgfortran3.tar.gz/sha512/0b1f91e86b5078b7cd6b64bc429a0e63bb5adf28df1baa336e67819fbd2c09f59b643c39e580f63e3bbccdc631c5d5e14c7d8afa6af94250453ce5286958f90f +OpenBLAS.v0.3.28+3.armv6l-linux-gnueabihf-libgfortran4.tar.gz/md5/8b3e3ea928975c575798d47466aafb82 +OpenBLAS.v0.3.28+3.armv6l-linux-gnueabihf-libgfortran4.tar.gz/sha512/ebac0f7047dd8b97d85e4251953a23824701af02754afd6808f13eb276326b30eb292c85fa717fbd2f21b929e6a9816a012b8ea378a0fa27e671f81435f5d3b9 +OpenBLAS.v0.3.28+3.armv6l-linux-gnueabihf-libgfortran5.tar.gz/md5/5aacfce96d5673b4d8341cb097d22c4a +OpenBLAS.v0.3.28+3.armv6l-linux-gnueabihf-libgfortran5.tar.gz/sha512/b84dc2b8cbe5453555182c3fcd8624d7a2b28fe3826d54fde3b77ad2c33e60309317d150f07554dd85e168b0ac1f91537a5c2c17fff9c02dd9216f01161e4965 +OpenBLAS.v0.3.28+3.armv6l-linux-musleabihf-libgfortran3.tar.gz/md5/dfeac22ee204868cf254dab5ae79382b +OpenBLAS.v0.3.28+3.armv6l-linux-musleabihf-libgfortran3.tar.gz/sha512/710117eb7400a0aacf69d6053730eb3b3ff4767f8d38defb2aaad94aebf1646a794489e78a8f46b469901159cdca73dd2b9460fff11e95daa4a2642cab721a25 +OpenBLAS.v0.3.28+3.armv6l-linux-musleabihf-libgfortran4.tar.gz/md5/13ff2a40bc55839bdef76b796db1eb76 +OpenBLAS.v0.3.28+3.armv6l-linux-musleabihf-libgfortran4.tar.gz/sha512/eb61fe6c0221e8f9d7a626b8d088ae1497155341dafb69835e7d53af76689ae212e1e4621e0729df5d896888c0b2d7354a24f7b57fe1d68f0b35c26bcf096699 +OpenBLAS.v0.3.28+3.armv6l-linux-musleabihf-libgfortran5.tar.gz/md5/aa7349724ba1d47256705777e755289a +OpenBLAS.v0.3.28+3.armv6l-linux-musleabihf-libgfortran5.tar.gz/sha512/25ab56c44b7d0d5de17344f39071e6894e878e89b5e35412a3c9fe345abd2eef76d7816cabb6407c7c521c3bf67a4741b37ad7e580962ead9275273e431f1fb3 +OpenBLAS.v0.3.28+3.armv7l-linux-gnueabihf-libgfortran3.tar.gz/md5/5d1f45f53dd1730051095fb8e027b14f +OpenBLAS.v0.3.28+3.armv7l-linux-gnueabihf-libgfortran3.tar.gz/sha512/0b1f91e86b5078b7cd6b64bc429a0e63bb5adf28df1baa336e67819fbd2c09f59b643c39e580f63e3bbccdc631c5d5e14c7d8afa6af94250453ce5286958f90f +OpenBLAS.v0.3.28+3.armv7l-linux-gnueabihf-libgfortran4.tar.gz/md5/8b3e3ea928975c575798d47466aafb82 +OpenBLAS.v0.3.28+3.armv7l-linux-gnueabihf-libgfortran4.tar.gz/sha512/ebac0f7047dd8b97d85e4251953a23824701af02754afd6808f13eb276326b30eb292c85fa717fbd2f21b929e6a9816a012b8ea378a0fa27e671f81435f5d3b9 +OpenBLAS.v0.3.28+3.armv7l-linux-gnueabihf-libgfortran5.tar.gz/md5/5aacfce96d5673b4d8341cb097d22c4a +OpenBLAS.v0.3.28+3.armv7l-linux-gnueabihf-libgfortran5.tar.gz/sha512/b84dc2b8cbe5453555182c3fcd8624d7a2b28fe3826d54fde3b77ad2c33e60309317d150f07554dd85e168b0ac1f91537a5c2c17fff9c02dd9216f01161e4965 +OpenBLAS.v0.3.28+3.armv7l-linux-musleabihf-libgfortran3.tar.gz/md5/dfeac22ee204868cf254dab5ae79382b +OpenBLAS.v0.3.28+3.armv7l-linux-musleabihf-libgfortran3.tar.gz/sha512/710117eb7400a0aacf69d6053730eb3b3ff4767f8d38defb2aaad94aebf1646a794489e78a8f46b469901159cdca73dd2b9460fff11e95daa4a2642cab721a25 +OpenBLAS.v0.3.28+3.armv7l-linux-musleabihf-libgfortran4.tar.gz/md5/13ff2a40bc55839bdef76b796db1eb76 +OpenBLAS.v0.3.28+3.armv7l-linux-musleabihf-libgfortran4.tar.gz/sha512/eb61fe6c0221e8f9d7a626b8d088ae1497155341dafb69835e7d53af76689ae212e1e4621e0729df5d896888c0b2d7354a24f7b57fe1d68f0b35c26bcf096699 +OpenBLAS.v0.3.28+3.armv7l-linux-musleabihf-libgfortran5.tar.gz/md5/aa7349724ba1d47256705777e755289a +OpenBLAS.v0.3.28+3.armv7l-linux-musleabihf-libgfortran5.tar.gz/sha512/25ab56c44b7d0d5de17344f39071e6894e878e89b5e35412a3c9fe345abd2eef76d7816cabb6407c7c521c3bf67a4741b37ad7e580962ead9275273e431f1fb3 +OpenBLAS.v0.3.28+3.i686-linux-gnu-libgfortran3.tar.gz/md5/53087cc770708c57d2654fd0095b64df +OpenBLAS.v0.3.28+3.i686-linux-gnu-libgfortran3.tar.gz/sha512/90961448ae40b0445bf881d0815aec54d2096ad235dc8e3db8d698a72961ef9a97e7fcd08f79c83cd1f7c5a341464f52a90351d927d5f1c3e9c8ee32b17970db +OpenBLAS.v0.3.28+3.i686-linux-gnu-libgfortran4.tar.gz/md5/ee910e19faa961bde11fdf90c211df9d +OpenBLAS.v0.3.28+3.i686-linux-gnu-libgfortran4.tar.gz/sha512/f5cfecfe965991cfd7843eff71efa71d6842058565bb63657e909b2942e58a8c7506aa66335308961e59f392da16e1177d79542ae509795566a14122f67a1782 +OpenBLAS.v0.3.28+3.i686-linux-gnu-libgfortran5.tar.gz/md5/fe52ba7ca8e16f37aa04b79248e0471d +OpenBLAS.v0.3.28+3.i686-linux-gnu-libgfortran5.tar.gz/sha512/79b5108886d60f12424709a841e359dc1cf23cef21bb0ee6d1a48043ac48a35dac1637e43c8ebf3f2e10dd34721993a7a12c5776f2975dd5bd7b6e29e1a9adc3 +OpenBLAS.v0.3.28+3.i686-linux-musl-libgfortran3.tar.gz/md5/88d8ff421d29456f1d7670ceaf8867ca +OpenBLAS.v0.3.28+3.i686-linux-musl-libgfortran3.tar.gz/sha512/91c1bd8142845d11fecba87a719315a14218e3863955ddd2ed82cecd4a2c177a48c660b6aac374ee9a11008245c0ced1bae70eaf5a1a6e3114db02e09a96396f +OpenBLAS.v0.3.28+3.i686-linux-musl-libgfortran4.tar.gz/md5/3035066a53032b551e49f56b323e941d +OpenBLAS.v0.3.28+3.i686-linux-musl-libgfortran4.tar.gz/sha512/f218e152a1c92bd374599814612add8010aedc78113cbe06465e8a1ee7f66892bb654cad687aa55555e74f3a65d74608692d41c9f0ce6c0bc63475ef62ab55b7 +OpenBLAS.v0.3.28+3.i686-linux-musl-libgfortran5.tar.gz/md5/f7cf36ac9a0cbb535952ec73f2e6c9ea +OpenBLAS.v0.3.28+3.i686-linux-musl-libgfortran5.tar.gz/sha512/00ab052d9fa4a72a640545782019f24ed6017b36aa89c5e659ce73b1e821817f560c09f71b26c027c0a05bd13567c71a6d7f5995d1c39ab233bec56cd3a7fd9e +OpenBLAS.v0.3.28+3.i686-w64-mingw32-libgfortran3.tar.gz/md5/b19d09297372e071805ba033afb55def +OpenBLAS.v0.3.28+3.i686-w64-mingw32-libgfortran3.tar.gz/sha512/eb1138578033167ececfe428db17fe28fad70631da3c25532edb4204fe733821156d6c619b6541fd47d53d335d6ab11b3d1ac1effb56031a2f782a5e8d863a89 +OpenBLAS.v0.3.28+3.i686-w64-mingw32-libgfortran4.tar.gz/md5/98ed2a8f2d3249438b913d5f35715a53 +OpenBLAS.v0.3.28+3.i686-w64-mingw32-libgfortran4.tar.gz/sha512/fbc32d81a4189ac170b18a029419bc98bb0655ee4d485f4bd165a394d223b80ba77f848d94a9ad96d926291de3db4a7602abd81c44fec55e4591dfe0aa91e29e +OpenBLAS.v0.3.28+3.i686-w64-mingw32-libgfortran5.tar.gz/md5/cb99d7d4944c5283a1a0142683e1d377 +OpenBLAS.v0.3.28+3.i686-w64-mingw32-libgfortran5.tar.gz/sha512/b77d3225e60f49506917bfff78c187df7157dbc834eccda2fa03d03eef8214b225682888a411a8b6e4b29a8d7e2b0ca625ea8c56b84ecc39e1f4f1012523c096 +OpenBLAS.v0.3.28+3.powerpc64le-linux-gnu-libgfortran3.tar.gz/md5/c6e5d4867a068e08b3f56f474e498b81 +OpenBLAS.v0.3.28+3.powerpc64le-linux-gnu-libgfortran3.tar.gz/sha512/de6249439758a501bfd27d3ef04ec04cc06edf64de73f0709a6a40a2eaf40bd3d5d77dfd54b7b19e2f6bf6c104b4416d3e225faa0cff4cb631785c08d90b8614 +OpenBLAS.v0.3.28+3.powerpc64le-linux-gnu-libgfortran4.tar.gz/md5/32e70466cfa3cfec65ab4cad3abc5f03 +OpenBLAS.v0.3.28+3.powerpc64le-linux-gnu-libgfortran4.tar.gz/sha512/2642385a5e9fc8e9c3839a5a44f9753b21b5078725f7d0c3e1ebe96b76129a3b8e2627d92629dee4f6fd7e8e51e86a7fbedc80cbe4d1a6812cea363559950da0 +OpenBLAS.v0.3.28+3.powerpc64le-linux-gnu-libgfortran5.tar.gz/md5/e2332831bd88d57132241697952819e7 +OpenBLAS.v0.3.28+3.powerpc64le-linux-gnu-libgfortran5.tar.gz/sha512/ad03edf9ac56bf6311f0ca70a1bc359242accfe82cba9e42f39f6cb1c3006226179ff9be8218847889cae10fac13bc33f60837e1e3249e309172da7fbc25400f +OpenBLAS.v0.3.28+3.x86_64-apple-darwin-libgfortran3.tar.gz/md5/27c24775af446a44a72a28ffd197696d +OpenBLAS.v0.3.28+3.x86_64-apple-darwin-libgfortran3.tar.gz/sha512/2af8caa33bee88efff84653f3932b04e8fd4aabb1bf16d49fa73657b0ec13c9457fde7ab3f953fc9b01da5c2841c3c9b588e3b0f559b89df0e6268468d1f7cc8 +OpenBLAS.v0.3.28+3.x86_64-apple-darwin-libgfortran4.tar.gz/md5/414e701d918d5fba08a12de6979db4b5 +OpenBLAS.v0.3.28+3.x86_64-apple-darwin-libgfortran4.tar.gz/sha512/949886d388b80e19b944d102852f2bb58ffa03c42e624986dd9dc076797c996634d4a8fc0f04544451d6848c2079969816979e1f68a999b2747e9dd5472be7a6 +OpenBLAS.v0.3.28+3.x86_64-apple-darwin-libgfortran5.tar.gz/md5/29fcf62c0280cc10f91d22189a2e8de8 +OpenBLAS.v0.3.28+3.x86_64-apple-darwin-libgfortran5.tar.gz/sha512/02e75d4ecf9cd922157a72c0ca2e713cf336b125df3982cd5f7cc4f2a04367ad4c2b1190ca2a0a9df8b639c7ebcfc9783066e99dd0b13acde7b02038391e8567 +OpenBLAS.v0.3.28+3.x86_64-linux-gnu-libgfortran3.tar.gz/md5/147d5e8eb2ec78fc8a31bdb091fab001 +OpenBLAS.v0.3.28+3.x86_64-linux-gnu-libgfortran3.tar.gz/sha512/2319eda568800c0b1f2d96a8a36c59b1bbd792c06de1d740aea3f1e49798242426ea8d10c100c42c3c281702e2b4f5b673b6ab5252b276d48542e875bcaa3094 +OpenBLAS.v0.3.28+3.x86_64-linux-gnu-libgfortran4.tar.gz/md5/448857d9c4b2e95afc12a14c75b24055 +OpenBLAS.v0.3.28+3.x86_64-linux-gnu-libgfortran4.tar.gz/sha512/3e7c8cd55e0b15a30992b1e0b48a6e2ae36fd9babf689fa5595c7de94aec401de1d7821d45a22bf14cd5c45c708bc8fa3511d34d732dadd4daaca3f49e200bdb +OpenBLAS.v0.3.28+3.x86_64-linux-gnu-libgfortran5.tar.gz/md5/3aaf417685b44e0e505208f7b31b981a +OpenBLAS.v0.3.28+3.x86_64-linux-gnu-libgfortran5.tar.gz/sha512/f7b1d123e48ede93fe624a79d9535a8915bfa3441d7a6f9c6643467027414c9f2538e299858ea98bbb49d4e6d385a6a491063cb1878ac3b0b3d6a8f7ff0a48df +OpenBLAS.v0.3.28+3.x86_64-linux-musl-libgfortran3.tar.gz/md5/5723136deaaf4b2e5960fb0774943288 +OpenBLAS.v0.3.28+3.x86_64-linux-musl-libgfortran3.tar.gz/sha512/127ea8b2b0d8d4586a23a2b8ecbf148d512efe68626e89b0688c3c9e29ed9420b45ae86755c1467313c565f9f3835762051d7086a815b813dbe6e9eb05fb4be1 +OpenBLAS.v0.3.28+3.x86_64-linux-musl-libgfortran4.tar.gz/md5/80b1b9cf5346916edda653174a987aa2 +OpenBLAS.v0.3.28+3.x86_64-linux-musl-libgfortran4.tar.gz/sha512/77e1387ec969bbed4945d2a598a1cd04d258265c4b2d5c43af92118eb32e0c69e40619a20ea1835f277febcfea068b241343d44932afef832bdcfd2e9f618f0a +OpenBLAS.v0.3.28+3.x86_64-linux-musl-libgfortran5.tar.gz/md5/44dcedf01c938d1a1c67dd3bc90ab61d +OpenBLAS.v0.3.28+3.x86_64-linux-musl-libgfortran5.tar.gz/sha512/e490d49b8d41d73ab3e71aca8c691ca58704f0fc6930cbfcc203f97b8db8d83144bad597a2c53ff0c0c4f7c40316d975a1b589a3603873d508f6beeb75970c5b +OpenBLAS.v0.3.28+3.x86_64-unknown-freebsd-libgfortran3.tar.gz/md5/0e8a7e88b54cb836292c289d1c456fa9 +OpenBLAS.v0.3.28+3.x86_64-unknown-freebsd-libgfortran3.tar.gz/sha512/0e9b3af6839b9c41c950bb4d8b739f0243a890af7092ef9f3a00e4931f2acc3820afb78e40c7bfef716dcd3230c1d0acc7b0b37f30eb47441b476bd7540745e6 +OpenBLAS.v0.3.28+3.x86_64-unknown-freebsd-libgfortran4.tar.gz/md5/5fc47ad55780c99ef9cab7ef1b26d9c0 +OpenBLAS.v0.3.28+3.x86_64-unknown-freebsd-libgfortran4.tar.gz/sha512/c531201e4abddd652efeb5801658f5c1e4891578f181e99d6e41fc0d3bc6347b82e5e928ff8a717ee1e75bb0a6a765260bf7c99fce44aa24c21f1c5a5e3c1e3b +OpenBLAS.v0.3.28+3.x86_64-unknown-freebsd-libgfortran5.tar.gz/md5/dc127f3ab984b5d47b325d5701ab73cd +OpenBLAS.v0.3.28+3.x86_64-unknown-freebsd-libgfortran5.tar.gz/sha512/50850911703320894a2e1e996c5de4613b5f9e3012f5cbf591f3677799599c45d9cc4c42cf310bdc6ba91ef550e52f6424bbbabdf47f96748d4669d94e6b46a4 +OpenBLAS.v0.3.28+3.x86_64-w64-mingw32-libgfortran3.tar.gz/md5/937847e2ad00539f3422d1ecb9d26d55 +OpenBLAS.v0.3.28+3.x86_64-w64-mingw32-libgfortran3.tar.gz/sha512/751d889661ddd46cd5718b49e34f826a4fb34b1b992251a5a975bc0af15b74a75d8a56f403e8fae570223477b2b8927d9cb36764e4b9e466045d5f317b8e7196 +OpenBLAS.v0.3.28+3.x86_64-w64-mingw32-libgfortran4.tar.gz/md5/180c54c50362d05696589b270693ee8f +OpenBLAS.v0.3.28+3.x86_64-w64-mingw32-libgfortran4.tar.gz/sha512/2e3b76be5b7c4a7dc45f07e17493abd7ef9185e92429d8fa4d38766e0da96dd0777b619a9e420d2e1142bdab2ae1f755f9bc9ad97ee9a7927741778f89b9135f +OpenBLAS.v0.3.28+3.x86_64-w64-mingw32-libgfortran5.tar.gz/md5/2f0fac7c96af66ea63fce26e409f4db6 +OpenBLAS.v0.3.28+3.x86_64-w64-mingw32-libgfortran5.tar.gz/sha512/141522971447c38b4908342f3ad09ffb18142d2e79b44f66fd80047b44c09216c9b94c39f776e3093f9ceb6bc4d6270cbbfb4209b2fc0debfe93e7145cb4dbff openblas-5ef8b1964658f9cb6a6324a06f6a1a022609b0c5.tar.gz/md5/f7a1fe86cefbf7d4f2608843c7833ca7 openblas-5ef8b1964658f9cb6a6324a06f6a1a022609b0c5.tar.gz/sha512/5f6020e958967a12a3c5b18bde13331f9c0602bd073563f35cd7cec848c92b45f30ca362819b12cd16989c0e4641ee3e63db8322d1092f61b31ba2e4068dd7a7 diff --git a/deps/checksums/openlibm b/deps/checksums/openlibm index 452abb133c671..e8c17e1efd26e 100644 --- a/deps/checksums/openlibm +++ b/deps/checksums/openlibm @@ -1,34 +1,36 @@ -OpenLibm.v0.8.1+2.aarch64-apple-darwin.tar.gz/md5/9ce53048e8944f6edff44f75b731229c -OpenLibm.v0.8.1+2.aarch64-apple-darwin.tar.gz/sha512/3a14e28db0656b47a473e19ca0afae1f8b72dd01e108d6b6cb52dc24fc03e4a43db867616b375369e82177bb274fbcfeb8f24b488ee68871e8da8463e9090adf -OpenLibm.v0.8.1+2.aarch64-linux-gnu.tar.gz/md5/8b284fe2905c3e5315291f5e5f27ca8b -OpenLibm.v0.8.1+2.aarch64-linux-gnu.tar.gz/sha512/d326181349ee7f74b73611cd71f933e93c38c11d6db9a1cd4fee49d1ac06c7f244f4cfc6ab373dd52909064117405b3d4fa39e5c626464c066ab53f1cd26dc4a -OpenLibm.v0.8.1+2.aarch64-linux-musl.tar.gz/md5/dc40ad1f2e53a3b914dcca364b6ead77 -OpenLibm.v0.8.1+2.aarch64-linux-musl.tar.gz/sha512/3779d8cd23c5987a15666e2160e40f5a6fc5e7d350c9e3c86d8af8c99515a8cb1f3b5e8438dae0f3cf0b5e1cb2c0cb74c5dd5a06c65e0c2a2382d86dacfaf9fb -OpenLibm.v0.8.1+2.armv6l-linux-gnueabihf.tar.gz/md5/7c9e56f6124b85e7dee74601f8c16abd -OpenLibm.v0.8.1+2.armv6l-linux-gnueabihf.tar.gz/sha512/a78e15177992025462d334a9d5b10b9c7f6710d77ac36056fe7a1cc3bc3fada87f16696366578cfa5f325d5f746639c41c5d80b4885814014d29556d63bd4c7c -OpenLibm.v0.8.1+2.armv6l-linux-musleabihf.tar.gz/md5/78d9e3178fdf93a35f7d2b0b00753dc6 -OpenLibm.v0.8.1+2.armv6l-linux-musleabihf.tar.gz/sha512/ff7b78786f7035eaa08770ddf7d4eb2984595a318c3ac4dfbe4091ca398e00638df2e77bc2ab5fd159defd0927d4fe46b7e824cf055fbae4860bfa12347e8c5b -OpenLibm.v0.8.1+2.armv7l-linux-gnueabihf.tar.gz/md5/7c9e56f6124b85e7dee74601f8c16abd -OpenLibm.v0.8.1+2.armv7l-linux-gnueabihf.tar.gz/sha512/a78e15177992025462d334a9d5b10b9c7f6710d77ac36056fe7a1cc3bc3fada87f16696366578cfa5f325d5f746639c41c5d80b4885814014d29556d63bd4c7c -OpenLibm.v0.8.1+2.armv7l-linux-musleabihf.tar.gz/md5/78d9e3178fdf93a35f7d2b0b00753dc6 -OpenLibm.v0.8.1+2.armv7l-linux-musleabihf.tar.gz/sha512/ff7b78786f7035eaa08770ddf7d4eb2984595a318c3ac4dfbe4091ca398e00638df2e77bc2ab5fd159defd0927d4fe46b7e824cf055fbae4860bfa12347e8c5b -OpenLibm.v0.8.1+2.i686-linux-gnu.tar.gz/md5/e9942dca99f024ae27876ea5ab1592a9 -OpenLibm.v0.8.1+2.i686-linux-gnu.tar.gz/sha512/406e39894a643bf99c493585fa631800bbbcd6c36aaa9e677de772f7ceaed93b462fdf797235174e22baf2f34c26527f400e282061954b34f05b389acaba1e29 -OpenLibm.v0.8.1+2.i686-linux-musl.tar.gz/md5/0037f2e2113282d49967eba72f215c4b -OpenLibm.v0.8.1+2.i686-linux-musl.tar.gz/sha512/96666332a814232084340791384505acf964064dba4f7b62db51a7ae4416237decb40318dc07b9a041547fd4ff77f204f42bc5c7f029e590af1ee1dd6196d843 -OpenLibm.v0.8.1+2.i686-w64-mingw32.tar.gz/md5/73193f2e5149d07008902adfbf1b74b2 -OpenLibm.v0.8.1+2.i686-w64-mingw32.tar.gz/sha512/e8202b59b8f922bcc908b8b8e6687a674faa701689f5c6175d83fea0bcc5d73f74bed37660e60406f37873dab1d8489e0fd1506294791adfa61a069555eababf -OpenLibm.v0.8.1+2.powerpc64le-linux-gnu.tar.gz/md5/01997fb48464f94f59f4708bd26eabc3 -OpenLibm.v0.8.1+2.powerpc64le-linux-gnu.tar.gz/sha512/1e1d8901fd3aab0948be5c387b8d5bd0db12766fe00bf800ee3100aa0d5973c7aa03ef9c9b4e34942e5e2b46b64035d7f8d7b070113db031d4611f2a7dd02ca3 -OpenLibm.v0.8.1+2.x86_64-apple-darwin.tar.gz/md5/6cb5a472d6c1446acfca11bb8f7283d6 -OpenLibm.v0.8.1+2.x86_64-apple-darwin.tar.gz/sha512/e52f399002544d94536c3bda742d3cc5b0995929d656eeb0e808954fb800fd8e5cfc0ab57279fbccab44fc33a1207ab345d78e685d519ff7f02cca8f554b9c06 -OpenLibm.v0.8.1+2.x86_64-linux-gnu.tar.gz/md5/e1c7dc61e98d5b8aa68de3462a2620a4 -OpenLibm.v0.8.1+2.x86_64-linux-gnu.tar.gz/sha512/fe6d74a2522d75374b87ac9746d444d75a768e069f24f3fbfc6a140aa9d073fa54e8899861f839e647b9261e660c5f2b5555f52fab39ef84a74685b632e89df9 -OpenLibm.v0.8.1+2.x86_64-linux-musl.tar.gz/md5/5fe8eb59d21732a80f432720419324b3 -OpenLibm.v0.8.1+2.x86_64-linux-musl.tar.gz/sha512/0d1b22ca01eda89caa1832b63b1d7ddafe0fedf5906680e817100e2176cbbae95f576409706a9ea1834bc692b72009f4fd244586df30228d18e626bf25fc040a -OpenLibm.v0.8.1+2.x86_64-unknown-freebsd.tar.gz/md5/2bcdf32fdef91433763e32be029814d9 -OpenLibm.v0.8.1+2.x86_64-unknown-freebsd.tar.gz/sha512/97854736fc8c797abd5a5c331e5795dfa9124ac108a76fc2bcac518f5750a08884717d611bb98222b13387bcd27e1c3f4ec841547859e87fafbbe8c7dcd7381a -OpenLibm.v0.8.1+2.x86_64-w64-mingw32.tar.gz/md5/e22079c6e610c9543cca0fb88495d989 -OpenLibm.v0.8.1+2.x86_64-w64-mingw32.tar.gz/sha512/67081bcf360a62eee3928bd1b9d5302ed29b4a176245721723692d5ef938a828379617847308f26a2c7bc0cb2d0dce129d4b8c65c0446c611126894c0aaa5ea8 +OpenLibm.v0.8.1+3.aarch64-apple-darwin.tar.gz/md5/9ce53048e8944f6edff44f75b731229c +OpenLibm.v0.8.1+3.aarch64-apple-darwin.tar.gz/sha512/3a14e28db0656b47a473e19ca0afae1f8b72dd01e108d6b6cb52dc24fc03e4a43db867616b375369e82177bb274fbcfeb8f24b488ee68871e8da8463e9090adf +OpenLibm.v0.8.1+3.aarch64-linux-gnu.tar.gz/md5/8b284fe2905c3e5315291f5e5f27ca8b +OpenLibm.v0.8.1+3.aarch64-linux-gnu.tar.gz/sha512/d326181349ee7f74b73611cd71f933e93c38c11d6db9a1cd4fee49d1ac06c7f244f4cfc6ab373dd52909064117405b3d4fa39e5c626464c066ab53f1cd26dc4a +OpenLibm.v0.8.1+3.aarch64-linux-musl.tar.gz/md5/dc40ad1f2e53a3b914dcca364b6ead77 +OpenLibm.v0.8.1+3.aarch64-linux-musl.tar.gz/sha512/3779d8cd23c5987a15666e2160e40f5a6fc5e7d350c9e3c86d8af8c99515a8cb1f3b5e8438dae0f3cf0b5e1cb2c0cb74c5dd5a06c65e0c2a2382d86dacfaf9fb +OpenLibm.v0.8.1+3.aarch64-unknown-freebsd.tar.gz/md5/f5e9441d81626b958396e585083e0bdb +OpenLibm.v0.8.1+3.aarch64-unknown-freebsd.tar.gz/sha512/1078823b0f5f48cd9f6dc753213b6b3f8112476c9df70192b042fd9bbb597fff34da009f376b6e67034681fcb07810a1a22b0dc83112fbbbaa60dac189164a41 +OpenLibm.v0.8.1+3.armv6l-linux-gnueabihf.tar.gz/md5/7c9e56f6124b85e7dee74601f8c16abd +OpenLibm.v0.8.1+3.armv6l-linux-gnueabihf.tar.gz/sha512/a78e15177992025462d334a9d5b10b9c7f6710d77ac36056fe7a1cc3bc3fada87f16696366578cfa5f325d5f746639c41c5d80b4885814014d29556d63bd4c7c +OpenLibm.v0.8.1+3.armv6l-linux-musleabihf.tar.gz/md5/78d9e3178fdf93a35f7d2b0b00753dc6 +OpenLibm.v0.8.1+3.armv6l-linux-musleabihf.tar.gz/sha512/ff7b78786f7035eaa08770ddf7d4eb2984595a318c3ac4dfbe4091ca398e00638df2e77bc2ab5fd159defd0927d4fe46b7e824cf055fbae4860bfa12347e8c5b +OpenLibm.v0.8.1+3.armv7l-linux-gnueabihf.tar.gz/md5/7c9e56f6124b85e7dee74601f8c16abd +OpenLibm.v0.8.1+3.armv7l-linux-gnueabihf.tar.gz/sha512/a78e15177992025462d334a9d5b10b9c7f6710d77ac36056fe7a1cc3bc3fada87f16696366578cfa5f325d5f746639c41c5d80b4885814014d29556d63bd4c7c +OpenLibm.v0.8.1+3.armv7l-linux-musleabihf.tar.gz/md5/78d9e3178fdf93a35f7d2b0b00753dc6 +OpenLibm.v0.8.1+3.armv7l-linux-musleabihf.tar.gz/sha512/ff7b78786f7035eaa08770ddf7d4eb2984595a318c3ac4dfbe4091ca398e00638df2e77bc2ab5fd159defd0927d4fe46b7e824cf055fbae4860bfa12347e8c5b +OpenLibm.v0.8.1+3.i686-linux-gnu.tar.gz/md5/69b0c561e8f70e12f78a47bbcc28d43f +OpenLibm.v0.8.1+3.i686-linux-gnu.tar.gz/sha512/916bedde7b75aaa10a7517aa6a24da924e896aa46159447722010aa60a8c0974da8c2aa847d0a5853d391e7f3b792371304aa18a6d72d998f38f2a00b7179c30 +OpenLibm.v0.8.1+3.i686-linux-musl.tar.gz/md5/0037f2e2113282d49967eba72f215c4b +OpenLibm.v0.8.1+3.i686-linux-musl.tar.gz/sha512/96666332a814232084340791384505acf964064dba4f7b62db51a7ae4416237decb40318dc07b9a041547fd4ff77f204f42bc5c7f029e590af1ee1dd6196d843 +OpenLibm.v0.8.1+3.i686-w64-mingw32.tar.gz/md5/a2a5ba90531660f1e758df91bb11c2f9 +OpenLibm.v0.8.1+3.i686-w64-mingw32.tar.gz/sha512/b177c124dbe2dd491b49bf01b58b639629e2039c60dbd8ef1acf42985a7bd5ac1c5950a803b19e3ed5436ebd0a83f1e7af505d5f90b270467600ecab3e8a5cda +OpenLibm.v0.8.1+3.powerpc64le-linux-gnu.tar.gz/md5/01997fb48464f94f59f4708bd26eabc3 +OpenLibm.v0.8.1+3.powerpc64le-linux-gnu.tar.gz/sha512/1e1d8901fd3aab0948be5c387b8d5bd0db12766fe00bf800ee3100aa0d5973c7aa03ef9c9b4e34942e5e2b46b64035d7f8d7b070113db031d4611f2a7dd02ca3 +OpenLibm.v0.8.1+3.x86_64-apple-darwin.tar.gz/md5/6cb5a472d6c1446acfca11bb8f7283d6 +OpenLibm.v0.8.1+3.x86_64-apple-darwin.tar.gz/sha512/e52f399002544d94536c3bda742d3cc5b0995929d656eeb0e808954fb800fd8e5cfc0ab57279fbccab44fc33a1207ab345d78e685d519ff7f02cca8f554b9c06 +OpenLibm.v0.8.1+3.x86_64-linux-gnu.tar.gz/md5/e1c7dc61e98d5b8aa68de3462a2620a4 +OpenLibm.v0.8.1+3.x86_64-linux-gnu.tar.gz/sha512/fe6d74a2522d75374b87ac9746d444d75a768e069f24f3fbfc6a140aa9d073fa54e8899861f839e647b9261e660c5f2b5555f52fab39ef84a74685b632e89df9 +OpenLibm.v0.8.1+3.x86_64-linux-musl.tar.gz/md5/5fe8eb59d21732a80f432720419324b3 +OpenLibm.v0.8.1+3.x86_64-linux-musl.tar.gz/sha512/0d1b22ca01eda89caa1832b63b1d7ddafe0fedf5906680e817100e2176cbbae95f576409706a9ea1834bc692b72009f4fd244586df30228d18e626bf25fc040a +OpenLibm.v0.8.1+3.x86_64-unknown-freebsd.tar.gz/md5/2bcdf32fdef91433763e32be029814d9 +OpenLibm.v0.8.1+3.x86_64-unknown-freebsd.tar.gz/sha512/97854736fc8c797abd5a5c331e5795dfa9124ac108a76fc2bcac518f5750a08884717d611bb98222b13387bcd27e1c3f4ec841547859e87fafbbe8c7dcd7381a +OpenLibm.v0.8.1+3.x86_64-w64-mingw32.tar.gz/md5/31a75f828f782130bf6a463521a11f04 +OpenLibm.v0.8.1+3.x86_64-w64-mingw32.tar.gz/sha512/d54f688940229a5fc3db958460556d362c81e2e0a7bac010537123e5ff102b17d84123ee2e164151d51fb8ee7524e0888531e14d2c5ebfb3d6847b03af0086ad openlibm-ae2d91698508701c83cab83714d42a1146dccf85.tar.gz/md5/19408d70bf042a109e1c267a53740089 openlibm-ae2d91698508701c83cab83714d42a1146dccf85.tar.gz/sha512/9597fdcbc4af8369e6eecc3f8e86f251661cc64d236578f3ee8a6b39e77a47951446e1a0fe1151513da153e7ed17bf39aa5a36c32153d0d0400232bed2839e22 diff --git a/deps/checksums/p7zip b/deps/checksums/p7zip index 272f1d768161f..2fe4fb874bec4 100644 --- a/deps/checksums/p7zip +++ b/deps/checksums/p7zip @@ -1,34 +1,36 @@ p7zip-17.05.tar.gz/md5/de921a08f37242a8eed8e4a758fbcb58 p7zip-17.05.tar.gz/sha512/97a7cfd15287998eb049c320548477be496c4ddf6b45c833c42adca4ab88719b07a442ae2e71cf2dc3b30a0777a3acab0a1a30f01fd85bacffa3fa9bd22c3f7d -p7zip.v17.5.0+0.aarch64-apple-darwin.tar.gz/md5/2a254e251901b3d1ddfd7aff23a6e5eb -p7zip.v17.5.0+0.aarch64-apple-darwin.tar.gz/sha512/8efb9a2c9bcab388e523adba3dc0b876e8ae34e2440c3eee01fd780eb87c8619c7a7bbdc46d703ccefff6aa6ad64c4e4b45b723136ab1f6fd6de4f52e75ebbbf -p7zip.v17.5.0+0.aarch64-linux-gnu.tar.gz/md5/bb1f3773fd409dbb91a10f7d9d2e99b5 -p7zip.v17.5.0+0.aarch64-linux-gnu.tar.gz/sha512/e95ccc342be644570d218d25403b91a7db9ee983fbf8cce3deff453355d68d426f9301eaac865a98691025b596b8cd77ebebf6184c0eaf8b2f294bc6763b9a4b -p7zip.v17.5.0+0.aarch64-linux-musl.tar.gz/md5/3fac518a6a70412294d71ca510958cf2 -p7zip.v17.5.0+0.aarch64-linux-musl.tar.gz/sha512/fc127790739bf8a8b918b2e83753d86f5e79ee8706bde4cc79d74d9f7d846aae99a109da4b2b3cc92ccedc1eef4d52a555a65a95f588e173e0fecc11f2ca21e6 -p7zip.v17.5.0+0.armv6l-linux-gnueabihf.tar.gz/md5/355410848192de3b02d12fd663867f4b -p7zip.v17.5.0+0.armv6l-linux-gnueabihf.tar.gz/sha512/8f103b41e755d157d70dacca89a0ef4610bea109686b4005e8edd5f79ed2e6419c00c2625d0ab90e6e33fa389e670490d8de263c0bdae952cc34cbbf440e275f -p7zip.v17.5.0+0.armv6l-linux-musleabihf.tar.gz/md5/34363b227306fce34a728af54b71064f -p7zip.v17.5.0+0.armv6l-linux-musleabihf.tar.gz/sha512/8dd7b37ce6223c9fedcaa999eb806eb6dec8c4a3133d3c07e2456cb8543b8e4f5b881c1bff2d2e25f19b1312b18673e9013aeff87d6a274eec6c451b1ba0d6b9 -p7zip.v17.5.0+0.armv7l-linux-gnueabihf.tar.gz/md5/dbb1fc0cf3bea674442ff8cc932a94cd -p7zip.v17.5.0+0.armv7l-linux-gnueabihf.tar.gz/sha512/c4d71d905fa420391417786ed206a0c334475dd0df8baa1fc3f6560ce548db11805003d0d0b35bb622fe818c761f2b0abe0796d1cbfce2a922da69e697f056a2 -p7zip.v17.5.0+0.armv7l-linux-musleabihf.tar.gz/md5/d188b5dd453faedb616ba9c48fdeab6b -p7zip.v17.5.0+0.armv7l-linux-musleabihf.tar.gz/sha512/ea30a775370502ca9e271b87cbda528d0c51d63ce0df41883d4dbc1527a32f251d797f3692fcf9b883b5fbaaad80515b971a8f8fe09ba102978b19a0ecb58528 -p7zip.v17.5.0+0.i686-linux-gnu.tar.gz/md5/dc02bdde045a0b6b22cf14d6960e63ed -p7zip.v17.5.0+0.i686-linux-gnu.tar.gz/sha512/d2d0dd14a5fc1163fea2276e0925bfa8d075d5dba1d8018e4e3160977d3b09642b2e521d8e57d049abaf0e2ea391a846f0b0136b3c59e8b476c8c52ac5210447 -p7zip.v17.5.0+0.i686-linux-musl.tar.gz/md5/0b8658147938a8ec109ee2b3b0a0665f -p7zip.v17.5.0+0.i686-linux-musl.tar.gz/sha512/411b2950f5928c537b87ba0651c09c08e57afed765db9fee89eda8b12939ef0da94c8ba38c0a24ba46b4513a0e4cca798eb09f2b20a011099ed3cf14455dd19e -p7zip.v17.5.0+0.i686-w64-mingw32.tar.gz/md5/98bdd8767c77a35f71303ff490a3d363 -p7zip.v17.5.0+0.i686-w64-mingw32.tar.gz/sha512/14f08071af74297df8bfe1d9f7efa3c0212e62ace573848f17b729e4c36dc3861110f3c5cc9315364c318e5b040736443a24492e86d76161993653a309996eb3 -p7zip.v17.5.0+0.powerpc64le-linux-gnu.tar.gz/md5/b18c917b9852898a9b9d6d24bcc6863e -p7zip.v17.5.0+0.powerpc64le-linux-gnu.tar.gz/sha512/0148dc8a0bc9c95212d7f8e2f92ee24e968eb7290fe72c7ae02e286bf5c05dd6b1f10b32350a7ff37777ed5a8cc21f3303f464620f3394c7a4690ae98bf77299 -p7zip.v17.5.0+0.x86_64-apple-darwin.tar.gz/md5/da31752a2556644d39e48649bb0111de -p7zip.v17.5.0+0.x86_64-apple-darwin.tar.gz/sha512/0695ad111263d2fadfdf9a46ce7ee80def0bf60db7d1c2585ed2af6fc945fb169311a9f1ffc6f95fb43b0b03694d2d1be9136d3d78ba2ef2b19228987883a385 -p7zip.v17.5.0+0.x86_64-linux-gnu.tar.gz/md5/2fb55d86e4eaccb0488bd637d088b996 -p7zip.v17.5.0+0.x86_64-linux-gnu.tar.gz/sha512/38ac355157d59c09f308fc29964d0e9c1466c9633efd8d3c6ff3c738abce2af45ebc6b92a29f56d5e7baa4871f9f39b14ecfcbedd4e2f4ca7c0fe6627c6b13e7 -p7zip.v17.5.0+0.x86_64-linux-musl.tar.gz/md5/f0bd567a851d2dd9d306552ffafbca3a -p7zip.v17.5.0+0.x86_64-linux-musl.tar.gz/sha512/e60047a6e7e3496cb6658f87c8c88676f399cd9f3d0d7daa880b6be09cd5525f7f22776896f1375722b47555514ff8c018f02ce800ec3fd0ed922e16e8a6d657 -p7zip.v17.5.0+0.x86_64-unknown-freebsd.tar.gz/md5/d37bd26e39a3ec84f262636f70624341 -p7zip.v17.5.0+0.x86_64-unknown-freebsd.tar.gz/sha512/0604a880c19f9d72d5828edd75be641625c29f230b3a5e7d70ec3812c014c96b76ee7b45e0e80f49be63f109a48700e75d1e5be01b5ae7b46d42dafda9885e8c -p7zip.v17.5.0+0.x86_64-w64-mingw32.tar.gz/md5/f02c7b2481dee880b096340a8735350f -p7zip.v17.5.0+0.x86_64-w64-mingw32.tar.gz/sha512/08b717c1b072d1309f6af8973eb09b1a482abb7ae7d01fba79873d4310a7c11292e2e8779029f99cc60627ed0d064224bc87782e587c520f970b840b7b838052 +p7zip.v17.5.0+1.aarch64-apple-darwin.tar.gz/md5/2a254e251901b3d1ddfd7aff23a6e5eb +p7zip.v17.5.0+1.aarch64-apple-darwin.tar.gz/sha512/8efb9a2c9bcab388e523adba3dc0b876e8ae34e2440c3eee01fd780eb87c8619c7a7bbdc46d703ccefff6aa6ad64c4e4b45b723136ab1f6fd6de4f52e75ebbbf +p7zip.v17.5.0+1.aarch64-linux-gnu.tar.gz/md5/bb1f3773fd409dbb91a10f7d9d2e99b5 +p7zip.v17.5.0+1.aarch64-linux-gnu.tar.gz/sha512/e95ccc342be644570d218d25403b91a7db9ee983fbf8cce3deff453355d68d426f9301eaac865a98691025b596b8cd77ebebf6184c0eaf8b2f294bc6763b9a4b +p7zip.v17.5.0+1.aarch64-linux-musl.tar.gz/md5/3fac518a6a70412294d71ca510958cf2 +p7zip.v17.5.0+1.aarch64-linux-musl.tar.gz/sha512/fc127790739bf8a8b918b2e83753d86f5e79ee8706bde4cc79d74d9f7d846aae99a109da4b2b3cc92ccedc1eef4d52a555a65a95f588e173e0fecc11f2ca21e6 +p7zip.v17.5.0+1.aarch64-unknown-freebsd.tar.gz/md5/4190f8d7d42572b3fdab0fa382417d43 +p7zip.v17.5.0+1.aarch64-unknown-freebsd.tar.gz/sha512/5b0cb08374b8561873f76cb2b8bcbb8de1ff4c91bde23222cc1b650c6ea2fff265e48b6190551ed136324a47d25e1d357a754295b674e74b4628b20223ad067d +p7zip.v17.5.0+1.armv6l-linux-gnueabihf.tar.gz/md5/355410848192de3b02d12fd663867f4b +p7zip.v17.5.0+1.armv6l-linux-gnueabihf.tar.gz/sha512/8f103b41e755d157d70dacca89a0ef4610bea109686b4005e8edd5f79ed2e6419c00c2625d0ab90e6e33fa389e670490d8de263c0bdae952cc34cbbf440e275f +p7zip.v17.5.0+1.armv6l-linux-musleabihf.tar.gz/md5/34363b227306fce34a728af54b71064f +p7zip.v17.5.0+1.armv6l-linux-musleabihf.tar.gz/sha512/8dd7b37ce6223c9fedcaa999eb806eb6dec8c4a3133d3c07e2456cb8543b8e4f5b881c1bff2d2e25f19b1312b18673e9013aeff87d6a274eec6c451b1ba0d6b9 +p7zip.v17.5.0+1.armv7l-linux-gnueabihf.tar.gz/md5/dbb1fc0cf3bea674442ff8cc932a94cd +p7zip.v17.5.0+1.armv7l-linux-gnueabihf.tar.gz/sha512/c4d71d905fa420391417786ed206a0c334475dd0df8baa1fc3f6560ce548db11805003d0d0b35bb622fe818c761f2b0abe0796d1cbfce2a922da69e697f056a2 +p7zip.v17.5.0+1.armv7l-linux-musleabihf.tar.gz/md5/d188b5dd453faedb616ba9c48fdeab6b +p7zip.v17.5.0+1.armv7l-linux-musleabihf.tar.gz/sha512/ea30a775370502ca9e271b87cbda528d0c51d63ce0df41883d4dbc1527a32f251d797f3692fcf9b883b5fbaaad80515b971a8f8fe09ba102978b19a0ecb58528 +p7zip.v17.5.0+1.i686-linux-gnu.tar.gz/md5/dc02bdde045a0b6b22cf14d6960e63ed +p7zip.v17.5.0+1.i686-linux-gnu.tar.gz/sha512/d2d0dd14a5fc1163fea2276e0925bfa8d075d5dba1d8018e4e3160977d3b09642b2e521d8e57d049abaf0e2ea391a846f0b0136b3c59e8b476c8c52ac5210447 +p7zip.v17.5.0+1.i686-linux-musl.tar.gz/md5/0b8658147938a8ec109ee2b3b0a0665f +p7zip.v17.5.0+1.i686-linux-musl.tar.gz/sha512/411b2950f5928c537b87ba0651c09c08e57afed765db9fee89eda8b12939ef0da94c8ba38c0a24ba46b4513a0e4cca798eb09f2b20a011099ed3cf14455dd19e +p7zip.v17.5.0+1.i686-w64-mingw32.tar.gz/md5/98bdd8767c77a35f71303ff490a3d363 +p7zip.v17.5.0+1.i686-w64-mingw32.tar.gz/sha512/14f08071af74297df8bfe1d9f7efa3c0212e62ace573848f17b729e4c36dc3861110f3c5cc9315364c318e5b040736443a24492e86d76161993653a309996eb3 +p7zip.v17.5.0+1.powerpc64le-linux-gnu.tar.gz/md5/b18c917b9852898a9b9d6d24bcc6863e +p7zip.v17.5.0+1.powerpc64le-linux-gnu.tar.gz/sha512/0148dc8a0bc9c95212d7f8e2f92ee24e968eb7290fe72c7ae02e286bf5c05dd6b1f10b32350a7ff37777ed5a8cc21f3303f464620f3394c7a4690ae98bf77299 +p7zip.v17.5.0+1.x86_64-apple-darwin.tar.gz/md5/da31752a2556644d39e48649bb0111de +p7zip.v17.5.0+1.x86_64-apple-darwin.tar.gz/sha512/0695ad111263d2fadfdf9a46ce7ee80def0bf60db7d1c2585ed2af6fc945fb169311a9f1ffc6f95fb43b0b03694d2d1be9136d3d78ba2ef2b19228987883a385 +p7zip.v17.5.0+1.x86_64-linux-gnu.tar.gz/md5/2fb55d86e4eaccb0488bd637d088b996 +p7zip.v17.5.0+1.x86_64-linux-gnu.tar.gz/sha512/38ac355157d59c09f308fc29964d0e9c1466c9633efd8d3c6ff3c738abce2af45ebc6b92a29f56d5e7baa4871f9f39b14ecfcbedd4e2f4ca7c0fe6627c6b13e7 +p7zip.v17.5.0+1.x86_64-linux-musl.tar.gz/md5/f0bd567a851d2dd9d306552ffafbca3a +p7zip.v17.5.0+1.x86_64-linux-musl.tar.gz/sha512/e60047a6e7e3496cb6658f87c8c88676f399cd9f3d0d7daa880b6be09cd5525f7f22776896f1375722b47555514ff8c018f02ce800ec3fd0ed922e16e8a6d657 +p7zip.v17.5.0+1.x86_64-unknown-freebsd.tar.gz/md5/d37bd26e39a3ec84f262636f70624341 +p7zip.v17.5.0+1.x86_64-unknown-freebsd.tar.gz/sha512/0604a880c19f9d72d5828edd75be641625c29f230b3a5e7d70ec3812c014c96b76ee7b45e0e80f49be63f109a48700e75d1e5be01b5ae7b46d42dafda9885e8c +p7zip.v17.5.0+1.x86_64-w64-mingw32.tar.gz/md5/f02c7b2481dee880b096340a8735350f +p7zip.v17.5.0+1.x86_64-w64-mingw32.tar.gz/sha512/08b717c1b072d1309f6af8973eb09b1a482abb7ae7d01fba79873d4310a7c11292e2e8779029f99cc60627ed0d064224bc87782e587c520f970b840b7b838052 diff --git a/deps/checksums/pcre b/deps/checksums/pcre index 744d16540d6c8..018ffd5201653 100644 --- a/deps/checksums/pcre +++ b/deps/checksums/pcre @@ -1,34 +1,36 @@ -PCRE2.v10.43.0+0.aarch64-apple-darwin.tar.gz/md5/f1bee27b8d9465c14eaf9362701fb795 -PCRE2.v10.43.0+0.aarch64-apple-darwin.tar.gz/sha512/33b8f6e3703f0a52cd2d57897c28e35fb3c63af459296a2fef4e414dc99239617833b2ab176068d6aab690122a34a9ab9b6042dfff54b5a30ad60429a809818d -PCRE2.v10.43.0+0.aarch64-linux-gnu.tar.gz/md5/c55a569260e302f315f4a1bd185346ab -PCRE2.v10.43.0+0.aarch64-linux-gnu.tar.gz/sha512/be4d2883e69d562898a157424b2baa146fe79545a8c10935cf25b54e498ca2c14fae026fa0d958d175895fe2cb695d0f96ef7f09fecbf54e1cee4a55b81a382b -PCRE2.v10.43.0+0.aarch64-linux-musl.tar.gz/md5/fb041ccace415ccc26263968c6435a47 -PCRE2.v10.43.0+0.aarch64-linux-musl.tar.gz/sha512/06672ebe18e0f6bfa1dd2d5c02e10d9fd67236a73fd38ee2e8f4496d98f297f7866760f0be3b9cebeca348a5d748a3719e416b84cec96a90c71eac55afbbd905 -PCRE2.v10.43.0+0.armv6l-linux-gnueabihf.tar.gz/md5/4f303a4cbf26abb7bf4ffb8bfe3d636d -PCRE2.v10.43.0+0.armv6l-linux-gnueabihf.tar.gz/sha512/dddb3b227ee48d8329f6c65c5d0fce9f460eccaec98594a05bf28d1d9af01397cf7ef86c96e88b0e96030a7f6d8406461f78dd5fa558db8fc8f7bfb3b522ed54 -PCRE2.v10.43.0+0.armv6l-linux-musleabihf.tar.gz/md5/eade1fff90404bf3584fd15b62be0cfa -PCRE2.v10.43.0+0.armv6l-linux-musleabihf.tar.gz/sha512/351f6fa11c39b90fcc4086bd00b1b1126ed92272595f0b745757ca4e7e360c84d244446a871029245c3bcf838b23f42d908f858e44fae7deb9002a36cb76753c -PCRE2.v10.43.0+0.armv7l-linux-gnueabihf.tar.gz/md5/daa0a34b2cf0b71a6f8e1f9456cd4b06 -PCRE2.v10.43.0+0.armv7l-linux-gnueabihf.tar.gz/sha512/ae72956ae7a9a5f315bfc816fdbb500937a170dfea306a28289ec9eac57d883cf2fa5a467ce9406eea80546b632a272c63bbb48b89ebe6d9f69d30366fd84180 -PCRE2.v10.43.0+0.armv7l-linux-musleabihf.tar.gz/md5/90bfb9e4efd7c92a2bb6a1a48fd88ecb -PCRE2.v10.43.0+0.armv7l-linux-musleabihf.tar.gz/sha512/147ac98d82fec4695de0c43c87d3d9242b9c024bc6df7ad7504d17ef6a12a029ed703c4deade0e2b24faf5283d66309f880d62f8c4834f27b2cc8889587d7abe -PCRE2.v10.43.0+0.i686-linux-gnu.tar.gz/md5/6fde649bf449c4122438fff32c0706ab -PCRE2.v10.43.0+0.i686-linux-gnu.tar.gz/sha512/edfaa15490497723c095eaa5df26194637b0606e9dce7b89b400024ef8ac42e21f010bb31c2cee5c735ce82fc8de0c42bf2b35b095a1e70a9a111d3bfba6da64 -PCRE2.v10.43.0+0.i686-linux-musl.tar.gz/md5/73aa8d13cc48338a5071e30b3a899109 -PCRE2.v10.43.0+0.i686-linux-musl.tar.gz/sha512/200e2d3ffd68f49b76c70a5be80cb0ae9703049214674485a2ab24abaaea7aefd6dec2042a14bd48cc52b04379f57322ec1e1788dc8c00896e1074921725d9cc -PCRE2.v10.43.0+0.i686-w64-mingw32.tar.gz/md5/4ddf0f31c97463e5216ed71afc4fb014 -PCRE2.v10.43.0+0.i686-w64-mingw32.tar.gz/sha512/75903d81668a66a5c4d830e31657391d507883943d86245998f224655406dcc6a95ba4f5fad20dcf608a98d6ccf49abe50107993448669b03c42a878d8466611 -PCRE2.v10.43.0+0.powerpc64le-linux-gnu.tar.gz/md5/64cb71080da1c97eba3a440ff53d298c -PCRE2.v10.43.0+0.powerpc64le-linux-gnu.tar.gz/sha512/16348b96a45c7a7d86775cb1d082b4d1c060e5a8acfb37554885d8da0db87430d8a40f834f008a90f4a7b1c07b8329df96836ba0430ecec506a143b7347bb101 -PCRE2.v10.43.0+0.x86_64-apple-darwin.tar.gz/md5/31bbb2485f5e06c3616fb061ffb2f022 -PCRE2.v10.43.0+0.x86_64-apple-darwin.tar.gz/sha512/3284ee63ed1e5631267efacb354a1d90bd1b7db0bc81d7233c9580eee4a9af06093c1c4f240786c34299df89a36a17ed92598fc302074f5a200c56cc96081bf1 -PCRE2.v10.43.0+0.x86_64-linux-gnu.tar.gz/md5/2fb7e0e9bbc32dddf543f4d395b50d3f -PCRE2.v10.43.0+0.x86_64-linux-gnu.tar.gz/sha512/5a533a3a01f817689077377835dc88edf914459ed0df7323f8f4dba602a47fd6af700075feb1f448221366b1cf7e2d717c615a5c506eb4ca2db9c600fd290fb0 -PCRE2.v10.43.0+0.x86_64-linux-musl.tar.gz/md5/b432063c93aa477dd0883428191041f8 -PCRE2.v10.43.0+0.x86_64-linux-musl.tar.gz/sha512/36475e90e29d7324046fe1da669fb37f667245a680df23f3978394964e14eb9bda3fd56703ad62cd56e27a5af77d8b6b9612516457ae803cef0627bd919e4628 -PCRE2.v10.43.0+0.x86_64-unknown-freebsd.tar.gz/md5/6124870a991e70c2ed8a64d8f3258760 -PCRE2.v10.43.0+0.x86_64-unknown-freebsd.tar.gz/sha512/4645a2d05af149467f2e4ce5e48853b57c585d6a5950c70726d04bc71a5d82f50809af141ad98e99671e764ac74965651ecad1c49a849caa8fd077c7f4911c7c -PCRE2.v10.43.0+0.x86_64-w64-mingw32.tar.gz/md5/cc4e9f45471f538c1fefa657ab99b878 -PCRE2.v10.43.0+0.x86_64-w64-mingw32.tar.gz/sha512/eed45e621263cb307b6e8ab42e2c12cf9e1d61ad523760fd721a85765c359b74d580752ca7c3d222e0cba26a74e872a6d43dbf2dbf08e4733a3e709417e48651 +PCRE2.v10.43.0+1.aarch64-apple-darwin.tar.gz/md5/f1bee27b8d9465c14eaf9362701fb795 +PCRE2.v10.43.0+1.aarch64-apple-darwin.tar.gz/sha512/33b8f6e3703f0a52cd2d57897c28e35fb3c63af459296a2fef4e414dc99239617833b2ab176068d6aab690122a34a9ab9b6042dfff54b5a30ad60429a809818d +PCRE2.v10.43.0+1.aarch64-linux-gnu.tar.gz/md5/c55a569260e302f315f4a1bd185346ab +PCRE2.v10.43.0+1.aarch64-linux-gnu.tar.gz/sha512/be4d2883e69d562898a157424b2baa146fe79545a8c10935cf25b54e498ca2c14fae026fa0d958d175895fe2cb695d0f96ef7f09fecbf54e1cee4a55b81a382b +PCRE2.v10.43.0+1.aarch64-linux-musl.tar.gz/md5/fb041ccace415ccc26263968c6435a47 +PCRE2.v10.43.0+1.aarch64-linux-musl.tar.gz/sha512/06672ebe18e0f6bfa1dd2d5c02e10d9fd67236a73fd38ee2e8f4496d98f297f7866760f0be3b9cebeca348a5d748a3719e416b84cec96a90c71eac55afbbd905 +PCRE2.v10.43.0+1.aarch64-unknown-freebsd.tar.gz/md5/8c73fe6faa94102616cfafcc6cc1bf9d +PCRE2.v10.43.0+1.aarch64-unknown-freebsd.tar.gz/sha512/464a892e646fb5aa028d2e96e6f8beaa0c15f0ef56a6ba3388cba4ce85151448b0dfd51357a3e8dea4505957394ffbab14ceb29b9fc73a67e2b2f54dd28a7aed +PCRE2.v10.43.0+1.armv6l-linux-gnueabihf.tar.gz/md5/4f303a4cbf26abb7bf4ffb8bfe3d636d +PCRE2.v10.43.0+1.armv6l-linux-gnueabihf.tar.gz/sha512/dddb3b227ee48d8329f6c65c5d0fce9f460eccaec98594a05bf28d1d9af01397cf7ef86c96e88b0e96030a7f6d8406461f78dd5fa558db8fc8f7bfb3b522ed54 +PCRE2.v10.43.0+1.armv6l-linux-musleabihf.tar.gz/md5/eade1fff90404bf3584fd15b62be0cfa +PCRE2.v10.43.0+1.armv6l-linux-musleabihf.tar.gz/sha512/351f6fa11c39b90fcc4086bd00b1b1126ed92272595f0b745757ca4e7e360c84d244446a871029245c3bcf838b23f42d908f858e44fae7deb9002a36cb76753c +PCRE2.v10.43.0+1.armv7l-linux-gnueabihf.tar.gz/md5/daa0a34b2cf0b71a6f8e1f9456cd4b06 +PCRE2.v10.43.0+1.armv7l-linux-gnueabihf.tar.gz/sha512/ae72956ae7a9a5f315bfc816fdbb500937a170dfea306a28289ec9eac57d883cf2fa5a467ce9406eea80546b632a272c63bbb48b89ebe6d9f69d30366fd84180 +PCRE2.v10.43.0+1.armv7l-linux-musleabihf.tar.gz/md5/90bfb9e4efd7c92a2bb6a1a48fd88ecb +PCRE2.v10.43.0+1.armv7l-linux-musleabihf.tar.gz/sha512/147ac98d82fec4695de0c43c87d3d9242b9c024bc6df7ad7504d17ef6a12a029ed703c4deade0e2b24faf5283d66309f880d62f8c4834f27b2cc8889587d7abe +PCRE2.v10.43.0+1.i686-linux-gnu.tar.gz/md5/6fde649bf449c4122438fff32c0706ab +PCRE2.v10.43.0+1.i686-linux-gnu.tar.gz/sha512/edfaa15490497723c095eaa5df26194637b0606e9dce7b89b400024ef8ac42e21f010bb31c2cee5c735ce82fc8de0c42bf2b35b095a1e70a9a111d3bfba6da64 +PCRE2.v10.43.0+1.i686-linux-musl.tar.gz/md5/73aa8d13cc48338a5071e30b3a899109 +PCRE2.v10.43.0+1.i686-linux-musl.tar.gz/sha512/200e2d3ffd68f49b76c70a5be80cb0ae9703049214674485a2ab24abaaea7aefd6dec2042a14bd48cc52b04379f57322ec1e1788dc8c00896e1074921725d9cc +PCRE2.v10.43.0+1.i686-w64-mingw32.tar.gz/md5/4ddf0f31c97463e5216ed71afc4fb014 +PCRE2.v10.43.0+1.i686-w64-mingw32.tar.gz/sha512/75903d81668a66a5c4d830e31657391d507883943d86245998f224655406dcc6a95ba4f5fad20dcf608a98d6ccf49abe50107993448669b03c42a878d8466611 +PCRE2.v10.43.0+1.powerpc64le-linux-gnu.tar.gz/md5/64cb71080da1c97eba3a440ff53d298c +PCRE2.v10.43.0+1.powerpc64le-linux-gnu.tar.gz/sha512/16348b96a45c7a7d86775cb1d082b4d1c060e5a8acfb37554885d8da0db87430d8a40f834f008a90f4a7b1c07b8329df96836ba0430ecec506a143b7347bb101 +PCRE2.v10.43.0+1.x86_64-apple-darwin.tar.gz/md5/31bbb2485f5e06c3616fb061ffb2f022 +PCRE2.v10.43.0+1.x86_64-apple-darwin.tar.gz/sha512/3284ee63ed1e5631267efacb354a1d90bd1b7db0bc81d7233c9580eee4a9af06093c1c4f240786c34299df89a36a17ed92598fc302074f5a200c56cc96081bf1 +PCRE2.v10.43.0+1.x86_64-linux-gnu.tar.gz/md5/2fb7e0e9bbc32dddf543f4d395b50d3f +PCRE2.v10.43.0+1.x86_64-linux-gnu.tar.gz/sha512/5a533a3a01f817689077377835dc88edf914459ed0df7323f8f4dba602a47fd6af700075feb1f448221366b1cf7e2d717c615a5c506eb4ca2db9c600fd290fb0 +PCRE2.v10.43.0+1.x86_64-linux-musl.tar.gz/md5/b432063c93aa477dd0883428191041f8 +PCRE2.v10.43.0+1.x86_64-linux-musl.tar.gz/sha512/36475e90e29d7324046fe1da669fb37f667245a680df23f3978394964e14eb9bda3fd56703ad62cd56e27a5af77d8b6b9612516457ae803cef0627bd919e4628 +PCRE2.v10.43.0+1.x86_64-unknown-freebsd.tar.gz/md5/6124870a991e70c2ed8a64d8f3258760 +PCRE2.v10.43.0+1.x86_64-unknown-freebsd.tar.gz/sha512/4645a2d05af149467f2e4ce5e48853b57c585d6a5950c70726d04bc71a5d82f50809af141ad98e99671e764ac74965651ecad1c49a849caa8fd077c7f4911c7c +PCRE2.v10.43.0+1.x86_64-w64-mingw32.tar.gz/md5/cc4e9f45471f538c1fefa657ab99b878 +PCRE2.v10.43.0+1.x86_64-w64-mingw32.tar.gz/sha512/eed45e621263cb307b6e8ab42e2c12cf9e1d61ad523760fd721a85765c359b74d580752ca7c3d222e0cba26a74e872a6d43dbf2dbf08e4733a3e709417e48651 pcre2-10.43.tar.bz2/md5/c8e2043cbc4abb80e76dba323f7c409f pcre2-10.43.tar.bz2/sha512/8ac1520c32e9e5672404aaf6104e23c9ee5c3c28ad28ff101435599d813cbb20e0491a3fd34e012b4411b3e0366a4c6dfa3f02d093acaa6ff0ab25478bb7ade9 diff --git a/deps/checksums/suitesparse b/deps/checksums/suitesparse index acec99b39879c..bac143325196f 100644 --- a/deps/checksums/suitesparse +++ b/deps/checksums/suitesparse @@ -1,34 +1,36 @@ SuiteSparse-7.8.0.tar.gz/md5/ad42a80d28bb56a1fce15f6e7332e04e SuiteSparse-7.8.0.tar.gz/sha512/91aff0aee26e938ba88a8f92db15b0db0ecc6ada3b60153bb299f53a45ccda131db4bc66f890c220034c900180d0bb3a5fb3e2686fec7d6174f5900a3ee64424 -SuiteSparse.v7.8.0+0.aarch64-apple-darwin.tar.gz/md5/38379e14a53663a9c23f32ed56801676 -SuiteSparse.v7.8.0+0.aarch64-apple-darwin.tar.gz/sha512/3f2a7aa7778a22d150bad9ecb8d03edfa75707a07545e65660c8ccc4b0a9fb058ccab29e21e4728741d40d390d28922d521d3841e16258cf8e26acacadfc1fbd -SuiteSparse.v7.8.0+0.aarch64-linux-gnu.tar.gz/md5/bc52c7df0a442c0fb9aafb83d60878f4 -SuiteSparse.v7.8.0+0.aarch64-linux-gnu.tar.gz/sha512/436e79ea0774d6ffb571b513e385ef48d9cc70b72010cffdc23d606ad6c8984c8b49e2422ce8881def0722f3f608e4ecb87e6752dd80cf7988addd330c5ded13 -SuiteSparse.v7.8.0+0.aarch64-linux-musl.tar.gz/md5/87e4c2588efc39723621ac5010ddf2e5 -SuiteSparse.v7.8.0+0.aarch64-linux-musl.tar.gz/sha512/17115826716bb48f16e4593941be275d47012d112e54d8826c75fde119ffc9f66accd02353b309365b59779d7af3ac220f31ab7cf7eea165b209a93ecdc4102f -SuiteSparse.v7.8.0+0.armv6l-linux-gnueabihf.tar.gz/md5/b1490603aa129942d8e4c9581853cd0a -SuiteSparse.v7.8.0+0.armv6l-linux-gnueabihf.tar.gz/sha512/e23c3532784e295ae72b811d285c3729c3f8ac1b5ee1621e831b6b2824a5b357e4bfa49e09174de7763fc3ebcab6b84ef16536bc1cf6f4bc0543b1b229209178 -SuiteSparse.v7.8.0+0.armv6l-linux-musleabihf.tar.gz/md5/f8199358882f76dd30bcce741b837de1 -SuiteSparse.v7.8.0+0.armv6l-linux-musleabihf.tar.gz/sha512/2c8d4ec21bfe253d3d32a5f5f09601b9b2864149f63f53067b157f5f7315fb04236bf5b19a1e5b4569e2c73127dcbb1703d56c7d06fc3ab9ae155902b7a1c2a9 -SuiteSparse.v7.8.0+0.armv7l-linux-gnueabihf.tar.gz/md5/cc3aa1a013cc91e7076dddf20fba9f60 -SuiteSparse.v7.8.0+0.armv7l-linux-gnueabihf.tar.gz/sha512/a6b8cfbc345a089f12e55d8d44061dcce30f94c2d79fc520d6c5dfe433ac2e362d049fac72278cb59d4b3760ca08d5e350b7e2658fa5e8c77ce8608f67c2c4c4 -SuiteSparse.v7.8.0+0.armv7l-linux-musleabihf.tar.gz/md5/0d7797d31c30c53bf219cdc0a48e64dc -SuiteSparse.v7.8.0+0.armv7l-linux-musleabihf.tar.gz/sha512/a7df8938ee6a04f62169bedd29c8408951cf33a43e0f529fb4d1e360bdad6462a50b2af297adb5f51fd726e1ced1fc8fcda7feeeafbeb44000bfe02a8e29c29e -SuiteSparse.v7.8.0+0.i686-linux-gnu.tar.gz/md5/e48fa3d2e00f210e964c21e4ff27efae -SuiteSparse.v7.8.0+0.i686-linux-gnu.tar.gz/sha512/3088c2af476285eb8549cf6aa56381156d49513a274348f86fbf01aa9ce0712961471f83fa50b261f3f365a302b88eb20ef0bb35b58c07a2cfb5dc337fdb72c1 -SuiteSparse.v7.8.0+0.i686-linux-musl.tar.gz/md5/e55202dbeca107a0c25a4f09d5d68915 -SuiteSparse.v7.8.0+0.i686-linux-musl.tar.gz/sha512/0f4de2e62016914b4d1bcb9b13bd8cb2bebefc5f0a532e103948b9aae79a20462ac7b74a3e968d4f99076c37dbbafb747699cd151e831ff89d297f78478fb84f -SuiteSparse.v7.8.0+0.i686-w64-mingw32.tar.gz/md5/cb971bc1042196e527f95015c8bc5ef8 -SuiteSparse.v7.8.0+0.i686-w64-mingw32.tar.gz/sha512/d445a7790e3ac5392f75c9f4ec30cd1c812354b04388b4c6c6cea2423d2f0dac7173b17a8a2b7a7f4af10321601f96819a7702f9beac0397d85916d99493bc39 -SuiteSparse.v7.8.0+0.powerpc64le-linux-gnu.tar.gz/md5/12058f122b548a37070770d1847f3ce9 -SuiteSparse.v7.8.0+0.powerpc64le-linux-gnu.tar.gz/sha512/f375feeb8448ea90ce8d9f31c7e1230f6868316f06094ba0155069dded4f8da2e1b54d462ef9cfc77abd76147740d4066236dcf1fcea91f8a7141819962ad0ae -SuiteSparse.v7.8.0+0.x86_64-apple-darwin.tar.gz/md5/1bd473f2a25f1ebcea8acc858e2594b4 -SuiteSparse.v7.8.0+0.x86_64-apple-darwin.tar.gz/sha512/034af137deee5bf0ebf3746745d09ad50ce135cd4768a2049bb9811478ff90e6ed8e2c990e277b4c3b38a3a5e9eaa856938eb86239ca445fa64b6dab6af7e996 -SuiteSparse.v7.8.0+0.x86_64-linux-gnu.tar.gz/md5/c58a86d9f25e6705941105d9e41f084c -SuiteSparse.v7.8.0+0.x86_64-linux-gnu.tar.gz/sha512/56447062802f01815ffb014624423c6fd3ab6e16b642b2fe37972a151b02865965c95ca3d1a455c6d51cd31633aea8a732b235b55d68e6779c17b293c488fa43 -SuiteSparse.v7.8.0+0.x86_64-linux-musl.tar.gz/md5/ba6e10ba61c209df94f18ab51fe2dd90 -SuiteSparse.v7.8.0+0.x86_64-linux-musl.tar.gz/sha512/3b8fc504cfb4a3b628d5b955a482bad08c85e09e529f833855a84b847721247aaa469f96adef6b218a1ba5896cde91664cc819ba33115e3cc309e72140841ca3 -SuiteSparse.v7.8.0+0.x86_64-unknown-freebsd.tar.gz/md5/a50c69142a42c14edac4ce94b86b138a -SuiteSparse.v7.8.0+0.x86_64-unknown-freebsd.tar.gz/sha512/963be0dccd1a594df08fe5135ef4ac13e1d707841c3e97d31ba5477d0d6ec26bad9be1c52d9fd78f199740a53950353adbdd767469f3bf01ea1e3ee843eb6c1a -SuiteSparse.v7.8.0+0.x86_64-w64-mingw32.tar.gz/md5/7ca11ba89bd09183cc5a9320d6e8a4a7 -SuiteSparse.v7.8.0+0.x86_64-w64-mingw32.tar.gz/sha512/e1d5def1103bbf0bb29c08cdd3bf21ba60456353694985c66f8e55a31d54a32c5b891e56e1ffe30f9e1223c49283d267e483e2f1b999f566099c239b3eed1d78 +SuiteSparse.v7.8.0+1.aarch64-apple-darwin.tar.gz/md5/38379e14a53663a9c23f32ed56801676 +SuiteSparse.v7.8.0+1.aarch64-apple-darwin.tar.gz/sha512/3f2a7aa7778a22d150bad9ecb8d03edfa75707a07545e65660c8ccc4b0a9fb058ccab29e21e4728741d40d390d28922d521d3841e16258cf8e26acacadfc1fbd +SuiteSparse.v7.8.0+1.aarch64-linux-gnu.tar.gz/md5/bc52c7df0a442c0fb9aafb83d60878f4 +SuiteSparse.v7.8.0+1.aarch64-linux-gnu.tar.gz/sha512/436e79ea0774d6ffb571b513e385ef48d9cc70b72010cffdc23d606ad6c8984c8b49e2422ce8881def0722f3f608e4ecb87e6752dd80cf7988addd330c5ded13 +SuiteSparse.v7.8.0+1.aarch64-linux-musl.tar.gz/md5/87e4c2588efc39723621ac5010ddf2e5 +SuiteSparse.v7.8.0+1.aarch64-linux-musl.tar.gz/sha512/17115826716bb48f16e4593941be275d47012d112e54d8826c75fde119ffc9f66accd02353b309365b59779d7af3ac220f31ab7cf7eea165b209a93ecdc4102f +SuiteSparse.v7.8.0+1.aarch64-unknown-freebsd.tar.gz/md5/108a78ec5d21c910b1f0d3cd58b2b18e +SuiteSparse.v7.8.0+1.aarch64-unknown-freebsd.tar.gz/sha512/730f93e317305073acda619044296eb1844bc1380719a9c2f2f255bebd7c0c827317ff99ce06a081521f9441c3ca7fbcb2362a310ef3c5d289f485b2628c3d80 +SuiteSparse.v7.8.0+1.armv6l-linux-gnueabihf.tar.gz/md5/b1490603aa129942d8e4c9581853cd0a +SuiteSparse.v7.8.0+1.armv6l-linux-gnueabihf.tar.gz/sha512/e23c3532784e295ae72b811d285c3729c3f8ac1b5ee1621e831b6b2824a5b357e4bfa49e09174de7763fc3ebcab6b84ef16536bc1cf6f4bc0543b1b229209178 +SuiteSparse.v7.8.0+1.armv6l-linux-musleabihf.tar.gz/md5/f8199358882f76dd30bcce741b837de1 +SuiteSparse.v7.8.0+1.armv6l-linux-musleabihf.tar.gz/sha512/2c8d4ec21bfe253d3d32a5f5f09601b9b2864149f63f53067b157f5f7315fb04236bf5b19a1e5b4569e2c73127dcbb1703d56c7d06fc3ab9ae155902b7a1c2a9 +SuiteSparse.v7.8.0+1.armv7l-linux-gnueabihf.tar.gz/md5/cc3aa1a013cc91e7076dddf20fba9f60 +SuiteSparse.v7.8.0+1.armv7l-linux-gnueabihf.tar.gz/sha512/a6b8cfbc345a089f12e55d8d44061dcce30f94c2d79fc520d6c5dfe433ac2e362d049fac72278cb59d4b3760ca08d5e350b7e2658fa5e8c77ce8608f67c2c4c4 +SuiteSparse.v7.8.0+1.armv7l-linux-musleabihf.tar.gz/md5/0d7797d31c30c53bf219cdc0a48e64dc +SuiteSparse.v7.8.0+1.armv7l-linux-musleabihf.tar.gz/sha512/a7df8938ee6a04f62169bedd29c8408951cf33a43e0f529fb4d1e360bdad6462a50b2af297adb5f51fd726e1ced1fc8fcda7feeeafbeb44000bfe02a8e29c29e +SuiteSparse.v7.8.0+1.i686-linux-gnu.tar.gz/md5/e48fa3d2e00f210e964c21e4ff27efae +SuiteSparse.v7.8.0+1.i686-linux-gnu.tar.gz/sha512/3088c2af476285eb8549cf6aa56381156d49513a274348f86fbf01aa9ce0712961471f83fa50b261f3f365a302b88eb20ef0bb35b58c07a2cfb5dc337fdb72c1 +SuiteSparse.v7.8.0+1.i686-linux-musl.tar.gz/md5/e55202dbeca107a0c25a4f09d5d68915 +SuiteSparse.v7.8.0+1.i686-linux-musl.tar.gz/sha512/0f4de2e62016914b4d1bcb9b13bd8cb2bebefc5f0a532e103948b9aae79a20462ac7b74a3e968d4f99076c37dbbafb747699cd151e831ff89d297f78478fb84f +SuiteSparse.v7.8.0+1.i686-w64-mingw32.tar.gz/md5/e8f4de53ec4ae74554e76bd52702d7a4 +SuiteSparse.v7.8.0+1.i686-w64-mingw32.tar.gz/sha512/f944f14e62408f04a9966cd927cbbbe26b00a4beccc85ab8923dc4028875b0395c6b5e56efba1fd2f29fb954543ca83e800685ffafcdfdd97351a7d4926349a8 +SuiteSparse.v7.8.0+1.powerpc64le-linux-gnu.tar.gz/md5/12058f122b548a37070770d1847f3ce9 +SuiteSparse.v7.8.0+1.powerpc64le-linux-gnu.tar.gz/sha512/f375feeb8448ea90ce8d9f31c7e1230f6868316f06094ba0155069dded4f8da2e1b54d462ef9cfc77abd76147740d4066236dcf1fcea91f8a7141819962ad0ae +SuiteSparse.v7.8.0+1.x86_64-apple-darwin.tar.gz/md5/1bd473f2a25f1ebcea8acc858e2594b4 +SuiteSparse.v7.8.0+1.x86_64-apple-darwin.tar.gz/sha512/034af137deee5bf0ebf3746745d09ad50ce135cd4768a2049bb9811478ff90e6ed8e2c990e277b4c3b38a3a5e9eaa856938eb86239ca445fa64b6dab6af7e996 +SuiteSparse.v7.8.0+1.x86_64-linux-gnu.tar.gz/md5/c58a86d9f25e6705941105d9e41f084c +SuiteSparse.v7.8.0+1.x86_64-linux-gnu.tar.gz/sha512/56447062802f01815ffb014624423c6fd3ab6e16b642b2fe37972a151b02865965c95ca3d1a455c6d51cd31633aea8a732b235b55d68e6779c17b293c488fa43 +SuiteSparse.v7.8.0+1.x86_64-linux-musl.tar.gz/md5/ba6e10ba61c209df94f18ab51fe2dd90 +SuiteSparse.v7.8.0+1.x86_64-linux-musl.tar.gz/sha512/3b8fc504cfb4a3b628d5b955a482bad08c85e09e529f833855a84b847721247aaa469f96adef6b218a1ba5896cde91664cc819ba33115e3cc309e72140841ca3 +SuiteSparse.v7.8.0+1.x86_64-unknown-freebsd.tar.gz/md5/a50c69142a42c14edac4ce94b86b138a +SuiteSparse.v7.8.0+1.x86_64-unknown-freebsd.tar.gz/sha512/963be0dccd1a594df08fe5135ef4ac13e1d707841c3e97d31ba5477d0d6ec26bad9be1c52d9fd78f199740a53950353adbdd767469f3bf01ea1e3ee843eb6c1a +SuiteSparse.v7.8.0+1.x86_64-w64-mingw32.tar.gz/md5/7ca11ba89bd09183cc5a9320d6e8a4a7 +SuiteSparse.v7.8.0+1.x86_64-w64-mingw32.tar.gz/sha512/e1d5def1103bbf0bb29c08cdd3bf21ba60456353694985c66f8e55a31d54a32c5b891e56e1ffe30f9e1223c49283d267e483e2f1b999f566099c239b3eed1d78 diff --git a/deps/checksums/unwind b/deps/checksums/unwind index 317809053abeb..5d4967cb0cf22 100644 --- a/deps/checksums/unwind +++ b/deps/checksums/unwind @@ -1,26 +1,28 @@ -LibUnwind.v1.8.1+1.aarch64-linux-gnu.tar.gz/md5/0f789b9e5b2604a39cc363c4c513a808 -LibUnwind.v1.8.1+1.aarch64-linux-gnu.tar.gz/sha512/4c9c8250bfd84a96135a5e9ecdd4500214996c39852609d3a3983c2c5de44a728d9ce6b71bd649c1725e186db077f74df93a99f07452a31d344c17315eedb33d -LibUnwind.v1.8.1+1.aarch64-linux-musl.tar.gz/md5/356deb10e57d4c7e7bf7dbc728d6628d -LibUnwind.v1.8.1+1.aarch64-linux-musl.tar.gz/sha512/a998eebe7a4928bd417620bef0de9728c080f5d9714f15314ac190b333efa1bd7a21207156d56c132515bd3f7154d60204f1fac2dac5468560a7017682527c78 -LibUnwind.v1.8.1+1.armv6l-linux-gnueabihf.tar.gz/md5/b0ff12f5f0c801e5e280a142a1b7a188 -LibUnwind.v1.8.1+1.armv6l-linux-gnueabihf.tar.gz/sha512/68003f39eaf55c8742e821a228889590e8673cbafb74013a5b4f6a0c08ee372cb6b102a574e89ce9f46a38dd3d31ef75de95762f72a31a8ec9d7f495affaeb77 -LibUnwind.v1.8.1+1.armv6l-linux-musleabihf.tar.gz/md5/b04c77d707875989777ecfed66bd2dad -LibUnwind.v1.8.1+1.armv6l-linux-musleabihf.tar.gz/sha512/fb20586a0cbc998a0482d4102d8b8e5b2f802af519e25c440a64f67554468b29c6999a9ec5509ba375714beb93a4b48e8dbf71e6089c25ecd63b11eead844041 -LibUnwind.v1.8.1+1.armv7l-linux-gnueabihf.tar.gz/md5/e948016b4179d34727b456bc768cd8e1 -LibUnwind.v1.8.1+1.armv7l-linux-gnueabihf.tar.gz/sha512/6fc64e8ac7248540b95c321103d234f2c8633087f261e368251fe2cf6ea4e0654325716ac7017ae966edc4ddbb004a0f808d6e25cca766faaf505ca1f8f4aee7 -LibUnwind.v1.8.1+1.armv7l-linux-musleabihf.tar.gz/md5/660cf49c34a2ead1afbdcb44491e174a -LibUnwind.v1.8.1+1.armv7l-linux-musleabihf.tar.gz/sha512/edf337d176440c210f5860e90771758335256fe9d2f179d506656bccf92a9f9aa478d176d4b0db2213945ae847dad5bb88265110c92cfcd538d5740858b6a3f0 -LibUnwind.v1.8.1+1.i686-linux-gnu.tar.gz/md5/7032a70cfecb88cdd49cc3a4879456c6 -LibUnwind.v1.8.1+1.i686-linux-gnu.tar.gz/sha512/e34acc8f270c5156ede3ac3377d0f428c672daed869570734351c6b5a8946d65b5c0c041b713dddefedef81e55c65f5683aed0fec0d366e2d0207d8b902b0e33 -LibUnwind.v1.8.1+1.i686-linux-musl.tar.gz/md5/0541c3419020334173d299cf3482ff85 -LibUnwind.v1.8.1+1.i686-linux-musl.tar.gz/sha512/0b57745d280fb9893772936cd4872b0e04f41d86379e772b889e75baffe9324ef8dd168bb4c9761c1b8372f387ce99721dd6086b1d52b9a91215f40e8113968d -LibUnwind.v1.8.1+1.powerpc64le-linux-gnu.tar.gz/md5/fee37734fe95d1e96ebc77316df64192 -LibUnwind.v1.8.1+1.powerpc64le-linux-gnu.tar.gz/sha512/953ef70fb203db73764eeab0a37521b94e79ce70644ae16fe3157ca8d1011a0319d1928d094a3e2ed1e0489fdc0ca7dda33722095fd3aa40ed1fde150cf44c2a -LibUnwind.v1.8.1+1.x86_64-linux-gnu.tar.gz/md5/bbb201e7455fd13b805b0a96dc16183b -LibUnwind.v1.8.1+1.x86_64-linux-gnu.tar.gz/sha512/b1e21f7d772bd15bada17d287e1876ae586a97c6a8669e714347e7bf8a9b202fe53e8559cf19358f88bc458b2fe15ccbd616b64163cc715ce253f43f5133a8cd -LibUnwind.v1.8.1+1.x86_64-linux-musl.tar.gz/md5/72156f9d6da9a2742d9152822e5525f5 -LibUnwind.v1.8.1+1.x86_64-linux-musl.tar.gz/sha512/53a3f1985c5ae4816693f292604810cbe948e6332aeb227fb900ba3730f4379e863b144ae87af2c0651c2b9633b35c45c7a0a6fa34958dc9f58e0f8baa2ea701 -LibUnwind.v1.8.1+1.x86_64-unknown-freebsd.tar.gz/md5/e4346df03246d847f2867df3ab5ac624 -LibUnwind.v1.8.1+1.x86_64-unknown-freebsd.tar.gz/sha512/ee01bc12726288ae091476c1bed44de224a9ef5355687fd6fd64742da6628450434d7f33d4daf81029263aa6d23549a0aa5c5ae656599c132051255d1d742d5d +LibUnwind.v1.8.1+2.aarch64-linux-gnu.tar.gz/md5/de3690f3a8ecf0aa5d2525813bdab3c8 +LibUnwind.v1.8.1+2.aarch64-linux-gnu.tar.gz/sha512/366090b4291623603e54d3c73437efcbc3c7f52ce0c64a63e8439eff8a3ddeb4efc1ab6b2513e0a60e2714239bf259cd667159a24207f0c9ce3134530e539155 +LibUnwind.v1.8.1+2.aarch64-linux-musl.tar.gz/md5/e8adf4e842e998b6806653964e721a47 +LibUnwind.v1.8.1+2.aarch64-linux-musl.tar.gz/sha512/77411646767f5f13e2f45d32bfa48d6864b712d46d339e3fd4d62d12f4a26b6ffb8293636209ee5645d8e5552bdf70db5a848736ef0df75db74c8c878553cd40 +LibUnwind.v1.8.1+2.aarch64-unknown-freebsd.tar.gz/md5/ee8fc39c934cf1c640ae4ae41addcc30 +LibUnwind.v1.8.1+2.aarch64-unknown-freebsd.tar.gz/sha512/6245fc3003ef24fce0f84007c0fa1390658e71dc64da6a2f5d296d3928351096ed2c0c83808890413332883abe5fcee7615eb40b2baeddfc56d3484315f3dacf +LibUnwind.v1.8.1+2.armv6l-linux-gnueabihf.tar.gz/md5/4c454e174be7b5f220f4cb8f659722d8 +LibUnwind.v1.8.1+2.armv6l-linux-gnueabihf.tar.gz/sha512/f6e3d83576ae963f400972250c8558b0b15bdd9657aac6eacbd0c3f59af6a3574d0cc475c6e606ad8f2e0b178ba33f297aec0aeac8a5970d93b2c36d9ffae59d +LibUnwind.v1.8.1+2.armv6l-linux-musleabihf.tar.gz/md5/dbec8675d2b73807c9d9e3afc2ce2260 +LibUnwind.v1.8.1+2.armv6l-linux-musleabihf.tar.gz/sha512/45d9ac63282c21bdc6488b65fae8f03bbaa55d18b346ac3fc3d40f38ebd05b2a0db539f23dc6c6f88bbbad8f2ec2cdcf677db1acff83a99d9875bee93555ad1e +LibUnwind.v1.8.1+2.armv7l-linux-gnueabihf.tar.gz/md5/98517b7a4ae874099ef0aafb46e740c9 +LibUnwind.v1.8.1+2.armv7l-linux-gnueabihf.tar.gz/sha512/3a00792415a15fe45c3454f9bf480222862217178a61db0738837537c7e2c50f71b53063facd591680b14e7b3bde218c34cee9b2854ad94897b306388749af1b +LibUnwind.v1.8.1+2.armv7l-linux-musleabihf.tar.gz/md5/f276569278383f7711f40e623670620d +LibUnwind.v1.8.1+2.armv7l-linux-musleabihf.tar.gz/sha512/48160616ac1ed4b3e343556517e3cbb4959e80e9be237fc820e33e06f6668e95d9365dd7c86e68dc898fee1141cd825495bbbc27d685913a2f2808d974b54c19 +LibUnwind.v1.8.1+2.i686-linux-gnu.tar.gz/md5/2cd0203f2b70436ac2323077dad1d5d1 +LibUnwind.v1.8.1+2.i686-linux-gnu.tar.gz/sha512/fa42b3306d9b67011468b2c07bdb6cca6847f0f1632ee4aca3212c5944e991f9a1ae8f881fb4ce86e641e977695942d873a39fc212bdcf6acdf3e12c24b31d8e +LibUnwind.v1.8.1+2.i686-linux-musl.tar.gz/md5/3c456a1b3da2f5d785e02e1b6cb4cd74 +LibUnwind.v1.8.1+2.i686-linux-musl.tar.gz/sha512/fce8368ee670109b681c9d442ad89fee8fdf8eac1e115407784d1e8b82cfb98acd9d2edb4dbea29f8c63c83054da2a4d34149fe231655e2535834a4ef7319666 +LibUnwind.v1.8.1+2.powerpc64le-linux-gnu.tar.gz/md5/73b04ae80ca9fdbe06b3eeaae40d5dc5 +LibUnwind.v1.8.1+2.powerpc64le-linux-gnu.tar.gz/sha512/d4083a696a3492ced38b05fb573d44c4cc2b5332a351b65be2c3992d9e932bb6ea71f48260c643fa54219adb800b5da41160e1d56b0d9145061edf2e5dfc0ef6 +LibUnwind.v1.8.1+2.x86_64-linux-gnu.tar.gz/md5/f9d6132f4166c5ede15b2303280a1066 +LibUnwind.v1.8.1+2.x86_64-linux-gnu.tar.gz/sha512/124159e7d13ce1caee5e2527746ec98b10a776f57e5f9c99053b7ab76e7d9447b998cbc044da7671fd39356445a983f16f2c7bbefc076b29e45d2c2bb4d0364e +LibUnwind.v1.8.1+2.x86_64-linux-musl.tar.gz/md5/665d9215ef915269e009f7dde1f827b3 +LibUnwind.v1.8.1+2.x86_64-linux-musl.tar.gz/sha512/2d8754bbfa7a4b576fb58a2d22b08940bb9f615988bfc388e9ea2cc96e3a573e6c31a4023b2509a3424a0ce3d946584c09ac5d18e4bca6f0f47e52597e193944 +LibUnwind.v1.8.1+2.x86_64-unknown-freebsd.tar.gz/md5/cc8149747db86524da0c9749ed538f3d +LibUnwind.v1.8.1+2.x86_64-unknown-freebsd.tar.gz/sha512/4d416999616fbf08103553aa43603ce62109c21e9a97d6a391fb267c04d382834da380f459c96412773f19d93b8e996ddd405831623ce118d239ad1a0d9025fd libunwind-1.8.1.tar.gz/md5/10c96118ff30b88c9eeb6eac8e75599d libunwind-1.8.1.tar.gz/sha512/aba7b578c1b8cbe78f05b64e154f3530525f8a34668b2a9f1ee6acb4b22c857befe34ad4e9e8cca99dbb66689d41bc72060a8f191bd8be232725d342809431b3 diff --git a/deps/checksums/zlib b/deps/checksums/zlib index b6fc106747c67..f5e7353f32e3e 100644 --- a/deps/checksums/zlib +++ b/deps/checksums/zlib @@ -1,34 +1,36 @@ -Zlib.v1.3.1+0.aarch64-apple-darwin.tar.gz/md5/50b48e14f0b3578e3f398d130749a25d -Zlib.v1.3.1+0.aarch64-apple-darwin.tar.gz/sha512/d970e183035b3615b410f7b0da2c7a1d516234744491d65ed1ebc3800b55732f20bf00fcbb0cf91289b8b4660915282873fb23788896713cf8dfae2984a8fd85 -Zlib.v1.3.1+0.aarch64-linux-gnu.tar.gz/md5/ee42c0bae86fc39968c8cd6a77a801bf -Zlib.v1.3.1+0.aarch64-linux-gnu.tar.gz/sha512/5d21cbeab03d44008c6cbad114d45c917ebee2fe98de6b19686f4f6ba1fc67eeedf968b94ed1c2d4efb89e93be9efa342bcc8a57cb8a505085d177abae14bc2d -Zlib.v1.3.1+0.aarch64-linux-musl.tar.gz/md5/9091d1288736b218f7b016791dc1a9c8 -Zlib.v1.3.1+0.aarch64-linux-musl.tar.gz/sha512/b49cbfe734beb2af9ef8e847542d006765345cbb08aee0854779e35e03c98df25c93539b046547c6b66029987c49499ddf6cb207824b1e376900bfceaa79691a -Zlib.v1.3.1+0.armv6l-linux-gnueabihf.tar.gz/md5/b686c85047b7dad2c2f08d1d16e7978a -Zlib.v1.3.1+0.armv6l-linux-gnueabihf.tar.gz/sha512/511fda619519dccedb264988e3b59a0e0fbf8f73d3ae290f238346209ebc0202a22f945257cea19afef64246574285e0322901a46bb48d7b48364c1e2eacd801 -Zlib.v1.3.1+0.armv6l-linux-musleabihf.tar.gz/md5/374be5cb926876f3f0492cfe0e193220 -Zlib.v1.3.1+0.armv6l-linux-musleabihf.tar.gz/sha512/4d3a2cc0c7c48146e63ed098da5a5acad75517197adc965550c123f7f8bcee0811a27be76fa37b6b0515eee4b5ba1c1a85c854e7b23bea36b5e21671805bedce -Zlib.v1.3.1+0.armv7l-linux-gnueabihf.tar.gz/md5/9febbc6a3d492e34c9ed53c95f3b799f -Zlib.v1.3.1+0.armv7l-linux-gnueabihf.tar.gz/sha512/4cee0e2cf572eb91028a09ef356e1aa6360949e046ceec03bd37574295ddcc4a7cefca9276f7565f152697d55b35f62af2ab107cdbf402b42846818629fea9c7 -Zlib.v1.3.1+0.armv7l-linux-musleabihf.tar.gz/md5/5d0d59a6cbbd1e63193ba6f7dbb755f9 -Zlib.v1.3.1+0.armv7l-linux-musleabihf.tar.gz/sha512/ee3f48b354168342ef63509b19a26aca3301fb3e5f4f6898afe2d3b44ee3380515efd6ced5d4e06e69736d851d19352deb9595bad82c051caccaee8c55e629d8 -Zlib.v1.3.1+0.i686-linux-gnu.tar.gz/md5/834350a64b2302a9caf0250a8f6068e5 -Zlib.v1.3.1+0.i686-linux-gnu.tar.gz/sha512/63dc158c4dfc42db97875893fcdd9784d9487af855bd576dbe04d1b967ad64510222df74a4cfb1b7e67386329d2a5686d7931b81720883fc1924f0d706a0a711 -Zlib.v1.3.1+0.i686-linux-musl.tar.gz/md5/e4f96efdeafa3d74c7c348059a8dc46a -Zlib.v1.3.1+0.i686-linux-musl.tar.gz/sha512/b47a571d94887ddcab8d7b50c6dce3afed3f56513a9d1859feaefebfad4a271d428b440df1d19ef3c2ed01ca4c8fd121ffc1572f5e252f27d0930f616cb47f18 -Zlib.v1.3.1+0.i686-w64-mingw32.tar.gz/md5/6bc27bd7dbbe17243dbbfaff225d3b23 -Zlib.v1.3.1+0.i686-w64-mingw32.tar.gz/sha512/5777661682831519875fffbb114c62596bf7bdb62011667c0f3dc5da9910e14de2285200a0a05187769b9c68c99b07024caafc16fef03c76e96e618f77149790 -Zlib.v1.3.1+0.powerpc64le-linux-gnu.tar.gz/md5/27dcad8557994cfd89d6fa7072bb843c -Zlib.v1.3.1+0.powerpc64le-linux-gnu.tar.gz/sha512/3b388dd286b273881d4344cff61c7da316c2bd2bab93072bf47ce4cb1cf9662158351b8febb0d5b1f8dfd9bc73cd32f7cae37fdd19b0ca91531bd3375df104bb -Zlib.v1.3.1+0.x86_64-apple-darwin.tar.gz/md5/9187319377191ae8b34162b375baa5db -Zlib.v1.3.1+0.x86_64-apple-darwin.tar.gz/sha512/895203434f161926978be52a223dd49a99454651a79c1c5e0529fa064f3f7ac2d7a069fed47a577b32523df22afadd6eb97d564dbd59c5d67ed90083add13c00 -Zlib.v1.3.1+0.x86_64-linux-gnu.tar.gz/md5/55d4d982d60cb643aa8688eb031b07ee -Zlib.v1.3.1+0.x86_64-linux-gnu.tar.gz/sha512/d8f94d22ffc37df027de23b2408c2000014c8b7b6c8539feca669ac1f2dbbe1679ca534c3be4d32c90fe38bbba27c795689226962fb067346b5ca213e64b9c4b -Zlib.v1.3.1+0.x86_64-linux-musl.tar.gz/md5/95d735bba178da4b8bee23903419919c -Zlib.v1.3.1+0.x86_64-linux-musl.tar.gz/sha512/370370f08133a720e3fbedcc434f102dc95225fda3ec8a399e782851bd4be57fb2b64a3ed62dc0559fb0c58d2e28db9b9e960efafd940982e4cb6652be0e81f1 -Zlib.v1.3.1+0.x86_64-unknown-freebsd.tar.gz/md5/df158f50fdb8ac1179fe6dad3bc62713 -Zlib.v1.3.1+0.x86_64-unknown-freebsd.tar.gz/sha512/f4ba4ccfeaf3fd2e172a2d5b3b1ae083ee9854022e71e062e29423e4179cb1fc49b2b99df49b3f5f231e2a0c5becc59b89644e9dcaf0fda9c97e83af7ea1c25d -Zlib.v1.3.1+0.x86_64-w64-mingw32.tar.gz/md5/9cc735c54ddf5d1ea0db60e05d6631ea -Zlib.v1.3.1+0.x86_64-w64-mingw32.tar.gz/sha512/8a2fd20944866cb7f717517ea0b80a134466e063f85bec87ffba56ca844f983f91060dfdc65f8faee1981d7329348c827b723aaad4fea36041e710b9e35c43de +Zlib.v1.3.1+1.aarch64-apple-darwin.tar.gz/md5/50b48e14f0b3578e3f398d130749a25d +Zlib.v1.3.1+1.aarch64-apple-darwin.tar.gz/sha512/d970e183035b3615b410f7b0da2c7a1d516234744491d65ed1ebc3800b55732f20bf00fcbb0cf91289b8b4660915282873fb23788896713cf8dfae2984a8fd85 +Zlib.v1.3.1+1.aarch64-linux-gnu.tar.gz/md5/ee42c0bae86fc39968c8cd6a77a801bf +Zlib.v1.3.1+1.aarch64-linux-gnu.tar.gz/sha512/5d21cbeab03d44008c6cbad114d45c917ebee2fe98de6b19686f4f6ba1fc67eeedf968b94ed1c2d4efb89e93be9efa342bcc8a57cb8a505085d177abae14bc2d +Zlib.v1.3.1+1.aarch64-linux-musl.tar.gz/md5/9091d1288736b218f7b016791dc1a9c8 +Zlib.v1.3.1+1.aarch64-linux-musl.tar.gz/sha512/b49cbfe734beb2af9ef8e847542d006765345cbb08aee0854779e35e03c98df25c93539b046547c6b66029987c49499ddf6cb207824b1e376900bfceaa79691a +Zlib.v1.3.1+1.aarch64-unknown-freebsd.tar.gz/md5/c73793872e3a2259519276b3ab2899ce +Zlib.v1.3.1+1.aarch64-unknown-freebsd.tar.gz/sha512/ce1e3ed5dfb01653471ace4c0cb2d8b521ccd02bc2a2c537e433a0dc497906ad21008645c645f2e0f2bb1f39c40e9a68d8cca0aeddc74ade0e188dc80748c2e8 +Zlib.v1.3.1+1.armv6l-linux-gnueabihf.tar.gz/md5/b686c85047b7dad2c2f08d1d16e7978a +Zlib.v1.3.1+1.armv6l-linux-gnueabihf.tar.gz/sha512/511fda619519dccedb264988e3b59a0e0fbf8f73d3ae290f238346209ebc0202a22f945257cea19afef64246574285e0322901a46bb48d7b48364c1e2eacd801 +Zlib.v1.3.1+1.armv6l-linux-musleabihf.tar.gz/md5/374be5cb926876f3f0492cfe0e193220 +Zlib.v1.3.1+1.armv6l-linux-musleabihf.tar.gz/sha512/4d3a2cc0c7c48146e63ed098da5a5acad75517197adc965550c123f7f8bcee0811a27be76fa37b6b0515eee4b5ba1c1a85c854e7b23bea36b5e21671805bedce +Zlib.v1.3.1+1.armv7l-linux-gnueabihf.tar.gz/md5/9febbc6a3d492e34c9ed53c95f3b799f +Zlib.v1.3.1+1.armv7l-linux-gnueabihf.tar.gz/sha512/4cee0e2cf572eb91028a09ef356e1aa6360949e046ceec03bd37574295ddcc4a7cefca9276f7565f152697d55b35f62af2ab107cdbf402b42846818629fea9c7 +Zlib.v1.3.1+1.armv7l-linux-musleabihf.tar.gz/md5/5d0d59a6cbbd1e63193ba6f7dbb755f9 +Zlib.v1.3.1+1.armv7l-linux-musleabihf.tar.gz/sha512/ee3f48b354168342ef63509b19a26aca3301fb3e5f4f6898afe2d3b44ee3380515efd6ced5d4e06e69736d851d19352deb9595bad82c051caccaee8c55e629d8 +Zlib.v1.3.1+1.i686-linux-gnu.tar.gz/md5/834350a64b2302a9caf0250a8f6068e5 +Zlib.v1.3.1+1.i686-linux-gnu.tar.gz/sha512/63dc158c4dfc42db97875893fcdd9784d9487af855bd576dbe04d1b967ad64510222df74a4cfb1b7e67386329d2a5686d7931b81720883fc1924f0d706a0a711 +Zlib.v1.3.1+1.i686-linux-musl.tar.gz/md5/e4f96efdeafa3d74c7c348059a8dc46a +Zlib.v1.3.1+1.i686-linux-musl.tar.gz/sha512/b47a571d94887ddcab8d7b50c6dce3afed3f56513a9d1859feaefebfad4a271d428b440df1d19ef3c2ed01ca4c8fd121ffc1572f5e252f27d0930f616cb47f18 +Zlib.v1.3.1+1.i686-w64-mingw32.tar.gz/md5/aaa1500c06b280d142e2900dbedf2a8f +Zlib.v1.3.1+1.i686-w64-mingw32.tar.gz/sha512/bc6668baf33bc8e130ae6a72f6cd89d9f1ccc95d2f3a3bcef20cde03ed7602de511f7646feed918918a24d8a2221a0be39eb2c0884c1adb6fe0d67b91cceb683 +Zlib.v1.3.1+1.powerpc64le-linux-gnu.tar.gz/md5/27dcad8557994cfd89d6fa7072bb843c +Zlib.v1.3.1+1.powerpc64le-linux-gnu.tar.gz/sha512/3b388dd286b273881d4344cff61c7da316c2bd2bab93072bf47ce4cb1cf9662158351b8febb0d5b1f8dfd9bc73cd32f7cae37fdd19b0ca91531bd3375df104bb +Zlib.v1.3.1+1.x86_64-apple-darwin.tar.gz/md5/9187319377191ae8b34162b375baa5db +Zlib.v1.3.1+1.x86_64-apple-darwin.tar.gz/sha512/895203434f161926978be52a223dd49a99454651a79c1c5e0529fa064f3f7ac2d7a069fed47a577b32523df22afadd6eb97d564dbd59c5d67ed90083add13c00 +Zlib.v1.3.1+1.x86_64-linux-gnu.tar.gz/md5/55d4d982d60cb643aa8688eb031b07ee +Zlib.v1.3.1+1.x86_64-linux-gnu.tar.gz/sha512/d8f94d22ffc37df027de23b2408c2000014c8b7b6c8539feca669ac1f2dbbe1679ca534c3be4d32c90fe38bbba27c795689226962fb067346b5ca213e64b9c4b +Zlib.v1.3.1+1.x86_64-linux-musl.tar.gz/md5/95d735bba178da4b8bee23903419919c +Zlib.v1.3.1+1.x86_64-linux-musl.tar.gz/sha512/370370f08133a720e3fbedcc434f102dc95225fda3ec8a399e782851bd4be57fb2b64a3ed62dc0559fb0c58d2e28db9b9e960efafd940982e4cb6652be0e81f1 +Zlib.v1.3.1+1.x86_64-unknown-freebsd.tar.gz/md5/df158f50fdb8ac1179fe6dad3bc62713 +Zlib.v1.3.1+1.x86_64-unknown-freebsd.tar.gz/sha512/f4ba4ccfeaf3fd2e172a2d5b3b1ae083ee9854022e71e062e29423e4179cb1fc49b2b99df49b3f5f231e2a0c5becc59b89644e9dcaf0fda9c97e83af7ea1c25d +Zlib.v1.3.1+1.x86_64-w64-mingw32.tar.gz/md5/9cc735c54ddf5d1ea0db60e05d6631ea +Zlib.v1.3.1+1.x86_64-w64-mingw32.tar.gz/sha512/8a2fd20944866cb7f717517ea0b80a134466e063f85bec87ffba56ca844f983f91060dfdc65f8faee1981d7329348c827b723aaad4fea36041e710b9e35c43de zlib-51b7f2abdade71cd9bb0e7a373ef2610ec6f9daf.tar.gz/md5/7ce1b2766499af7d948130113b649028 zlib-51b7f2abdade71cd9bb0e7a373ef2610ec6f9daf.tar.gz/sha512/79d032b8c93260ce6b9806f2289cdccce67e9d80865b5bb39ac46dadffc8ee009da51c551eead59c56249c7adfa164c1d5ebcf2b10a8645e0b11b5650176cb24 diff --git a/deps/clang.version b/deps/clang.version index fcd55b72de5ff..0f49ecdd649f0 100644 --- a/deps/clang.version +++ b/deps/clang.version @@ -3,4 +3,4 @@ ## jll artifact # Clang (paired with LLVM, only here as a JLL download) CLANG_JLL_NAME := Clang -CLANG_JLL_VER := 18.1.7+2 +CLANG_JLL_VER := 18.1.7+3 diff --git a/deps/lld.version b/deps/lld.version index 3ca9960164e27..8c7008fc93d7d 100644 --- a/deps/lld.version +++ b/deps/lld.version @@ -2,4 +2,4 @@ ## jll artifact LLD_JLL_NAME := LLD -LLD_JLL_VER := 18.1.7+2 +LLD_JLL_VER := 18.1.7+3 diff --git a/deps/llvm-tools.version b/deps/llvm-tools.version index 1fcc8944dc769..8a1159fd69174 100644 --- a/deps/llvm-tools.version +++ b/deps/llvm-tools.version @@ -3,5 +3,5 @@ ## jll artifact # LLVM_tools (downloads LLVM_jll to get things like `lit` and `opt`) LLVM_TOOLS_JLL_NAME := LLVM -LLVM_TOOLS_JLL_VER := 18.1.7+2 -LLVM_TOOLS_ASSERT_JLL_VER := 18.1.7+2 +LLVM_TOOLS_JLL_VER := 18.1.7+3 +LLVM_TOOLS_ASSERT_JLL_VER := 18.1.7+3 diff --git a/deps/llvm.version b/deps/llvm.version index 8e4180ef5a277..be03d1529ce7c 100644 --- a/deps/llvm.version +++ b/deps/llvm.version @@ -2,7 +2,7 @@ ## jll artifact LLVM_JLL_NAME := libLLVM -LLVM_ASSERT_JLL_VER := 18.1.7+2 +LLVM_ASSERT_JLL_VER := 18.1.7+3 ## source build # Version number of LLVM LLVM_VER := 18.1.7 diff --git a/deps/nghttp2.version b/deps/nghttp2.version index e9587297d0e32..c9a39ea5ae757 100644 --- a/deps/nghttp2.version +++ b/deps/nghttp2.version @@ -3,4 +3,4 @@ NGHTTP2_JLL_NAME := nghttp2 ## source build -NGHTTP2_VER := 1.60.0 +NGHTTP2_VER := 1.63.0 diff --git a/stdlib/CompilerSupportLibraries_jll/Project.toml b/stdlib/CompilerSupportLibraries_jll/Project.toml index 5aab865b5f6fc..12806e4bc427a 100644 --- a/stdlib/CompilerSupportLibraries_jll/Project.toml +++ b/stdlib/CompilerSupportLibraries_jll/Project.toml @@ -4,7 +4,7 @@ uuid = "e66e0078-7015-5450-92f7-15fbd957f2ae" # NOTE: When updating this, also make sure to update the value # `CSL_NEXT_GLIBCXX_VERSION` in `Make.inc`, to properly disable # automatic usage of BB-built CSLs on extremely up-to-date systems! -version = "1.1.1+0" +version = "1.2.0+0" [deps] Libdl = "8f399da3-3557-5675-b5ff-fb832c97cbdb" diff --git a/stdlib/GMP_jll/Project.toml b/stdlib/GMP_jll/Project.toml index c8fcfe4f2b845..3a6fa12c95aef 100644 --- a/stdlib/GMP_jll/Project.toml +++ b/stdlib/GMP_jll/Project.toml @@ -1,6 +1,6 @@ name = "GMP_jll" uuid = "781609d7-10c4-51f6-84f2-b8444358ff6d" -version = "6.3.0+0" +version = "6.3.0+1" [deps] Artifacts = "56f22d72-fd6d-98f1-02f0-08ddc0907c33" diff --git a/stdlib/LLD_jll/Project.toml b/stdlib/LLD_jll/Project.toml index 6a6cc72aa3c62..1aafd275d99b7 100644 --- a/stdlib/LLD_jll/Project.toml +++ b/stdlib/LLD_jll/Project.toml @@ -1,6 +1,6 @@ name = "LLD_jll" uuid = "d55e3150-da41-5e91-b323-ecfd1eec6109" -version = "18.1.7+2" +version = "18.1.7+3" [deps] Zlib_jll = "83775a58-1f1d-513f-b197-d71354ab007a" diff --git a/stdlib/LibUV_jll/Project.toml b/stdlib/LibUV_jll/Project.toml index fb03c6b996048..74aae1c9249df 100644 --- a/stdlib/LibUV_jll/Project.toml +++ b/stdlib/LibUV_jll/Project.toml @@ -1,6 +1,6 @@ name = "LibUV_jll" uuid = "183b4373-6708-53ba-ad28-60e28bb38547" -version = "2.0.1+18" +version = "2.0.1+19" [deps] Libdl = "8f399da3-3557-5675-b5ff-fb832c97cbdb" diff --git a/stdlib/LibUnwind_jll/Project.toml b/stdlib/LibUnwind_jll/Project.toml index 03ccfcd1449d8..b43f1c537ce5a 100644 --- a/stdlib/LibUnwind_jll/Project.toml +++ b/stdlib/LibUnwind_jll/Project.toml @@ -1,6 +1,6 @@ name = "LibUnwind_jll" uuid = "745a5e78-f969-53e9-954f-d19f2f74f4e3" -version = "1.8.1+1" +version = "1.8.1+2" [deps] Libdl = "8f399da3-3557-5675-b5ff-fb832c97cbdb" diff --git a/stdlib/MPFR_jll/Project.toml b/stdlib/MPFR_jll/Project.toml index eaa8d0988b2ca..a9987ccfa38f6 100644 --- a/stdlib/MPFR_jll/Project.toml +++ b/stdlib/MPFR_jll/Project.toml @@ -1,6 +1,6 @@ name = "MPFR_jll" uuid = "3a97d323-0669-5f0c-9066-3539efd106a3" -version = "4.2.1+0" +version = "4.2.1+1" [deps] GMP_jll = "781609d7-10c4-51f6-84f2-b8444358ff6d" diff --git a/stdlib/Manifest.toml b/stdlib/Manifest.toml index f9fb307190838..8953aa93ce4b2 100644 --- a/stdlib/Manifest.toml +++ b/stdlib/Manifest.toml @@ -23,7 +23,7 @@ version = "1.11.0" [[deps.CompilerSupportLibraries_jll]] deps = ["Artifacts", "Libdl"] uuid = "e66e0078-7015-5450-92f7-15fbd957f2ae" -version = "1.1.1+0" +version = "1.2.0+0" [[deps.Dates]] deps = ["Printf"] @@ -58,7 +58,7 @@ version = "1.11.0" [[deps.GMP_jll]] deps = ["Artifacts", "Libdl"] uuid = "781609d7-10c4-51f6-84f2-b8444358ff6d" -version = "6.3.0+0" +version = "6.3.0+1" [[deps.InteractiveUtils]] deps = ["Markdown"] @@ -73,7 +73,7 @@ version = "1.12.0" [[deps.LLD_jll]] deps = ["Artifacts", "Libdl", "Zlib_jll", "libLLVM_jll"] uuid = "d55e3150-da41-5e91-b323-ecfd1eec6109" -version = "18.1.7+2" +version = "18.1.7+3" [[deps.LLVMLibUnwind_jll]] deps = ["Artifacts", "Libdl"] @@ -113,12 +113,12 @@ version = "1.11.0+1" [[deps.LibUV_jll]] deps = ["Artifacts", "Libdl"] uuid = "183b4373-6708-53ba-ad28-60e28bb38547" -version = "2.0.1+17" +version = "2.0.1+19" [[deps.LibUnwind_jll]] deps = ["Artifacts", "Libdl"] uuid = "745a5e78-f969-53e9-954f-d19f2f74f4e3" -version = "1.8.1+1" +version = "1.8.1+2" [[deps.Libdl]] uuid = "8f399da3-3557-5675-b5ff-fb832c97cbdb" @@ -136,7 +136,7 @@ version = "1.11.0" [[deps.MPFR_jll]] deps = ["Artifacts", "GMP_jll", "Libdl"] uuid = "3a97d323-0669-5f0c-9066-3539efd106a3" -version = "4.2.1+0" +version = "4.2.1+1" [[deps.Markdown]] deps = ["Base64", "JuliaSyntaxHighlighting", "StyledStrings"] @@ -146,7 +146,7 @@ version = "1.11.0" [[deps.MbedTLS_jll]] deps = ["Artifacts", "Libdl"] uuid = "c8ffd9c3-330d-5841-b78e-0817d7145fa1" -version = "2.28.6+0" +version = "2.28.6+1" [[deps.Mmap]] uuid = "a63ad114-7e13-5084-954f-fe012c677804" @@ -163,17 +163,17 @@ version = "1.2.0" [[deps.OpenBLAS_jll]] deps = ["Artifacts", "CompilerSupportLibraries_jll", "Libdl"] uuid = "4536629a-c528-5b80-bd46-f80d51c5b363" -version = "0.3.28+2" +version = "0.3.28+3" [[deps.OpenLibm_jll]] deps = ["Artifacts", "Libdl"] uuid = "05823500-19ac-5b8b-9628-191a04bc5112" -version = "0.8.1+2" +version = "0.8.1+3" [[deps.PCRE2_jll]] deps = ["Artifacts", "Libdl"] uuid = "efcefdf7-47ab-520b-bdef-62a2eaa19f15" -version = "10.43.0+0" +version = "10.43.0+1" [[deps.Pkg]] deps = ["Artifacts", "Dates", "Downloads", "FileWatching", "LibGit2", "Libdl", "Logging", "Markdown", "Printf", "Random", "SHA", "TOML", "Tar", "UUIDs", "p7zip_jll"] @@ -243,7 +243,7 @@ version = "1.11.0" [[deps.SuiteSparse_jll]] deps = ["Artifacts", "Libdl", "libblastrampoline_jll"] uuid = "bea87d4a-7f5b-5778-9afe-8cc45184846c" -version = "7.8.0+0" +version = "7.8.0+1" [[deps.TOML]] deps = ["Dates"] @@ -272,17 +272,17 @@ version = "1.11.0" [[deps.Zlib_jll]] deps = ["Libdl"] uuid = "83775a58-1f1d-513f-b197-d71354ab007a" -version = "1.3.1+0" +version = "1.3.1+1" [[deps.dSFMT_jll]] deps = ["Artifacts", "Libdl"] uuid = "05ff407c-b0c1-5878-9df8-858cc2e60c36" -version = "2.2.5+0" +version = "2.2.5+1" [[deps.libLLVM_jll]] deps = ["Artifacts", "Libdl"] uuid = "8f36deef-c2a5-5394-99ed-8e07531fb29a" -version = "18.1.7+2" +version = "18.1.7+3" [[deps.libblastrampoline_jll]] deps = ["Artifacts", "Libdl"] @@ -292,9 +292,9 @@ version = "5.11.0+0" [[deps.nghttp2_jll]] deps = ["Artifacts", "Libdl"] uuid = "8e850ede-7688-5339-a07c-302acd2aaf8d" -version = "1.60.0+0" +version = "1.63.0+1" [[deps.p7zip_jll]] deps = ["Artifacts", "Libdl"] uuid = "3f19e933-33d8-53b3-aaab-bd5110c3b7a0" -version = "17.5.0+0" +version = "17.5.0+1" diff --git a/stdlib/MbedTLS_jll/Project.toml b/stdlib/MbedTLS_jll/Project.toml index 1fe9b5e702c61..61f3ea0d8b4dc 100644 --- a/stdlib/MbedTLS_jll/Project.toml +++ b/stdlib/MbedTLS_jll/Project.toml @@ -1,6 +1,6 @@ name = "MbedTLS_jll" uuid = "c8ffd9c3-330d-5841-b78e-0817d7145fa1" -version = "2.28.6+0" +version = "2.28.6+1" [deps] Libdl = "8f399da3-3557-5675-b5ff-fb832c97cbdb" diff --git a/stdlib/OpenBLAS_jll/Project.toml b/stdlib/OpenBLAS_jll/Project.toml index a9a1a04facff5..01e3af1d9467c 100644 --- a/stdlib/OpenBLAS_jll/Project.toml +++ b/stdlib/OpenBLAS_jll/Project.toml @@ -1,6 +1,6 @@ name = "OpenBLAS_jll" uuid = "4536629a-c528-5b80-bd46-f80d51c5b363" -version = "0.3.28+2" +version = "0.3.28+3" [deps] # See note in `src/OpenBLAS_jll.jl` about this dependency. diff --git a/stdlib/OpenLibm_jll/Project.toml b/stdlib/OpenLibm_jll/Project.toml index f6162f402bfcf..a4c559e1ff4ef 100644 --- a/stdlib/OpenLibm_jll/Project.toml +++ b/stdlib/OpenLibm_jll/Project.toml @@ -1,6 +1,6 @@ name = "OpenLibm_jll" uuid = "05823500-19ac-5b8b-9628-191a04bc5112" -version = "0.8.1+2" +version = "0.8.1+3" [deps] Libdl = "8f399da3-3557-5675-b5ff-fb832c97cbdb" diff --git a/stdlib/PCRE2_jll/Project.toml b/stdlib/PCRE2_jll/Project.toml index f9b3affb51b63..ae1fb74922d79 100644 --- a/stdlib/PCRE2_jll/Project.toml +++ b/stdlib/PCRE2_jll/Project.toml @@ -1,6 +1,6 @@ name = "PCRE2_jll" uuid = "efcefdf7-47ab-520b-bdef-62a2eaa19f15" -version = "10.43.0+0" +version = "10.43.0+1" [deps] Libdl = "8f399da3-3557-5675-b5ff-fb832c97cbdb" diff --git a/stdlib/SuiteSparse_jll/Project.toml b/stdlib/SuiteSparse_jll/Project.toml index 39b8447138a2d..c91ef6743d653 100644 --- a/stdlib/SuiteSparse_jll/Project.toml +++ b/stdlib/SuiteSparse_jll/Project.toml @@ -1,6 +1,6 @@ name = "SuiteSparse_jll" uuid = "bea87d4a-7f5b-5778-9afe-8cc45184846c" -version = "7.8.0+0" +version = "7.8.0+1" [deps] libblastrampoline_jll = "8e850b90-86db-534c-a0d3-1478176c7d93" diff --git a/stdlib/Zlib_jll/Project.toml b/stdlib/Zlib_jll/Project.toml index bb5771654430b..dfe9ce845c8e0 100644 --- a/stdlib/Zlib_jll/Project.toml +++ b/stdlib/Zlib_jll/Project.toml @@ -1,6 +1,6 @@ name = "Zlib_jll" uuid = "83775a58-1f1d-513f-b197-d71354ab007a" -version = "1.3.1+0" +version = "1.3.1+1" [deps] Libdl = "8f399da3-3557-5675-b5ff-fb832c97cbdb" diff --git a/stdlib/dSFMT_jll/Project.toml b/stdlib/dSFMT_jll/Project.toml index 0db19e602f67b..ca51184b75264 100644 --- a/stdlib/dSFMT_jll/Project.toml +++ b/stdlib/dSFMT_jll/Project.toml @@ -1,6 +1,6 @@ name = "dSFMT_jll" uuid = "05ff407c-b0c1-5878-9df8-858cc2e60c36" -version = "2.2.5+0" +version = "2.2.5+1" [deps] Libdl = "8f399da3-3557-5675-b5ff-fb832c97cbdb" diff --git a/stdlib/libLLVM_jll/Project.toml b/stdlib/libLLVM_jll/Project.toml index a0eac13b3ab23..13669ec173678 100644 --- a/stdlib/libLLVM_jll/Project.toml +++ b/stdlib/libLLVM_jll/Project.toml @@ -1,6 +1,6 @@ name = "libLLVM_jll" uuid = "8f36deef-c2a5-5394-99ed-8e07531fb29a" -version = "18.1.7+2" +version = "18.1.7+3" [deps] Libdl = "8f399da3-3557-5675-b5ff-fb832c97cbdb" diff --git a/stdlib/nghttp2_jll/Project.toml b/stdlib/nghttp2_jll/Project.toml index 88e60941f65ee..acc9444ab4a26 100644 --- a/stdlib/nghttp2_jll/Project.toml +++ b/stdlib/nghttp2_jll/Project.toml @@ -1,6 +1,6 @@ name = "nghttp2_jll" uuid = "8e850ede-7688-5339-a07c-302acd2aaf8d" -version = "1.60.0+0" +version = "1.63.0+1" [deps] Libdl = "8f399da3-3557-5675-b5ff-fb832c97cbdb" diff --git a/stdlib/nghttp2_jll/test/runtests.jl b/stdlib/nghttp2_jll/test/runtests.jl index b6ddefb8222cd..d667ce53e5252 100644 --- a/stdlib/nghttp2_jll/test/runtests.jl +++ b/stdlib/nghttp2_jll/test/runtests.jl @@ -11,5 +11,5 @@ end @testset "nghttp2_jll" begin info = unsafe_load(ccall((:nghttp2_version,libnghttp2), Ptr{nghttp2_info}, (Cint,), 0)) - @test VersionNumber(unsafe_string(info.version_str)) == v"1.60.0" + @test VersionNumber(unsafe_string(info.version_str)) == v"1.63.0" end diff --git a/stdlib/p7zip_jll/Project.toml b/stdlib/p7zip_jll/Project.toml index 6bca9d1d0545b..09a39880af418 100644 --- a/stdlib/p7zip_jll/Project.toml +++ b/stdlib/p7zip_jll/Project.toml @@ -1,6 +1,6 @@ name = "p7zip_jll" uuid = "3f19e933-33d8-53b3-aaab-bd5110c3b7a0" -version = "17.5.0+0" +version = "17.5.0+1" [deps] Libdl = "8f399da3-3557-5675-b5ff-fb832c97cbdb" From 969754d54a65c92a50ef6d8f87355bca91d0615c Mon Sep 17 00:00:00 2001 From: Neven Sajko Date: Sun, 13 Oct 2024 18:02:43 +0200 Subject: [PATCH 186/537] typo in `Compiler.Effects` doc string: `checkbounds` -> `boundscheck` (#56140) Follows up on #56060 --- base/compiler/effects.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/base/compiler/effects.jl b/base/compiler/effects.jl index b22b9396408e3..fb35162134ffa 100644 --- a/base/compiler/effects.jl +++ b/base/compiler/effects.jl @@ -47,7 +47,7 @@ following meanings: * `ALWAYS_TRUE`: this method is guaranteed to not execute any undefined behavior (for any input). * `ALWAYS_FALSE`: this method may execute undefined behavior. * `NOUB_IF_NOINBOUNDS`: this method is guaranteed to not execute any undefined behavior - under the assumption that its `@checkbounds` code is not elided (which happens when the + under the assumption that its `@boundscheck` code is not elided (which happens when the caller does not set nor propagate the `@inbounds` context) Note that undefined behavior may technically cause the method to violate any other effect assertions (such as `:consistent` or `:effect_free`) as well, but we do not model this, From 7f97a9d0aa617c3c8bcc9b3cbc7408dbb923f236 Mon Sep 17 00:00:00 2001 From: cyhan Date: Mon, 14 Oct 2024 00:54:57 +0800 Subject: [PATCH 187/537] HISTORY: fix missing links (#56137) --- HISTORY.md | 24 ++++++++++++++++++++---- 1 file changed, 20 insertions(+), 4 deletions(-) diff --git a/HISTORY.md b/HISTORY.md index 7fb01c7e9a0e9..6142747273864 100644 --- a/HISTORY.md +++ b/HISTORY.md @@ -221,6 +221,7 @@ Tooling Improvements [#35856]: https://github.com/JuliaLang/julia/issues/35856 [#38064]: https://github.com/JuliaLang/julia/issues/38064 [#43845]: https://github.com/JuliaLang/julia/issues/43845 +[#45641]: https://github.com/JuliaLang/julia/issues/45641 [#46501]: https://github.com/JuliaLang/julia/issues/46501 [#47354]: https://github.com/JuliaLang/julia/issues/47354 [#47679]: https://github.com/JuliaLang/julia/issues/47679 @@ -238,6 +239,7 @@ Tooling Improvements [#50661]: https://github.com/JuliaLang/julia/issues/50661 [#50795]: https://github.com/JuliaLang/julia/issues/50795 [#50797]: https://github.com/JuliaLang/julia/issues/50797 +[#50864]: https://github.com/JuliaLang/julia/issues/50864 [#50958]: https://github.com/JuliaLang/julia/issues/50958 [#51229]: https://github.com/JuliaLang/julia/issues/51229 [#51416]: https://github.com/JuliaLang/julia/issues/51416 @@ -251,12 +253,10 @@ Tooling Improvements [#51799]: https://github.com/JuliaLang/julia/issues/51799 [#51897]: https://github.com/JuliaLang/julia/issues/51897 [#51929]: https://github.com/JuliaLang/julia/issues/51929 -[#52049]: https://github.com/JuliaLang/julia/issues/52049 [#52096]: https://github.com/JuliaLang/julia/issues/52096 [#52123]: https://github.com/JuliaLang/julia/issues/52123 [#52139]: https://github.com/JuliaLang/julia/issues/52139 [#52180]: https://github.com/JuliaLang/julia/issues/52180 -[#52196]: https://github.com/JuliaLang/julia/issues/52196 [#52400]: https://github.com/JuliaLang/julia/issues/52400 [#52413]: https://github.com/JuliaLang/julia/issues/52413 [#52461]: https://github.com/JuliaLang/julia/issues/52461 @@ -270,6 +270,7 @@ Tooling Improvements [#52898]: https://github.com/JuliaLang/julia/issues/52898 [#52957]: https://github.com/JuliaLang/julia/issues/52957 [#53262]: https://github.com/JuliaLang/julia/issues/53262 +[#53352]: https://github.com/JuliaLang/julia/issues/53352 Julia v1.10 Release Notes @@ -416,7 +417,6 @@ Deprecated or removed [#44247]: https://github.com/JuliaLang/julia/issues/44247 [#45164]: https://github.com/JuliaLang/julia/issues/45164 [#45396]: https://github.com/JuliaLang/julia/issues/45396 -[#45641]: https://github.com/JuliaLang/julia/issues/45641 [#45962]: https://github.com/JuliaLang/julia/issues/45962 [#46196]: https://github.com/JuliaLang/julia/issues/46196 [#46372]: https://github.com/JuliaLang/julia/issues/46372 @@ -433,6 +433,7 @@ Deprecated or removed [#48899]: https://github.com/JuliaLang/julia/issues/48899 [#48979]: https://github.com/JuliaLang/julia/issues/48979 [#49020]: https://github.com/JuliaLang/julia/issues/49020 +[#49052]: https://github.com/JuliaLang/julia/issues/49052 [#49110]: https://github.com/JuliaLang/julia/issues/49110 [#49266]: https://github.com/JuliaLang/julia/issues/49266 [#49405]: https://github.com/JuliaLang/julia/issues/49405 @@ -656,11 +657,13 @@ Tooling Improvements [#42902]: https://github.com/JuliaLang/julia/issues/42902 [#43270]: https://github.com/JuliaLang/julia/issues/43270 [#43334]: https://github.com/JuliaLang/julia/issues/43334 +[#43536]: https://github.com/JuliaLang/julia/issues/43536 [#44137]: https://github.com/JuliaLang/julia/issues/44137 [#44266]: https://github.com/JuliaLang/julia/issues/44266 [#44358]: https://github.com/JuliaLang/julia/issues/44358 [#44360]: https://github.com/JuliaLang/julia/issues/44360 [#44512]: https://github.com/JuliaLang/julia/issues/44512 +[#44527]: https://github.com/JuliaLang/julia/issues/44527 [#44534]: https://github.com/JuliaLang/julia/issues/44534 [#44571]: https://github.com/JuliaLang/julia/issues/44571 [#44714]: https://github.com/JuliaLang/julia/issues/44714 @@ -690,6 +693,8 @@ Tooling Improvements [#46609]: https://github.com/JuliaLang/julia/issues/46609 [#46862]: https://github.com/JuliaLang/julia/issues/46862 [#46976]: https://github.com/JuliaLang/julia/issues/46976 +[#47117]: https://github.com/JuliaLang/julia/issues/47117 +[#47184]: https://github.com/JuliaLang/julia/issues/47184 [#47367]: https://github.com/JuliaLang/julia/issues/47367 [#47392]: https://github.com/JuliaLang/julia/issues/47392 @@ -984,6 +989,7 @@ Tooling Improvements [#43919]: https://github.com/JuliaLang/julia/issues/43919 [#44080]: https://github.com/JuliaLang/julia/issues/44080 [#44136]: https://github.com/JuliaLang/julia/issues/44136 +[#45064]: https://github.com/JuliaLang/julia/issues/45064 Julia v1.7 Release Notes ======================== @@ -1711,9 +1717,9 @@ Tooling Improvements [#37753]: https://github.com/JuliaLang/julia/issues/37753 [#37829]: https://github.com/JuliaLang/julia/issues/37829 [#37844]: https://github.com/JuliaLang/julia/issues/37844 +[#37928]: https://github.com/JuliaLang/julia/issues/37928 [#37973]: https://github.com/JuliaLang/julia/issues/37973 [#38042]: https://github.com/JuliaLang/julia/issues/38042 -[#38062]: https://github.com/JuliaLang/julia/issues/38062 [#38168]: https://github.com/JuliaLang/julia/issues/38168 [#38449]: https://github.com/JuliaLang/julia/issues/38449 [#38475]: https://github.com/JuliaLang/julia/issues/38475 @@ -1956,6 +1962,7 @@ Tooling Improvements [#25930]: https://github.com/JuliaLang/julia/issues/25930 [#26872]: https://github.com/JuliaLang/julia/issues/26872 [#28789]: https://github.com/JuliaLang/julia/issues/28789 +[#28811]: https://github.com/JuliaLang/julia/issues/28811 [#29240]: https://github.com/JuliaLang/julia/issues/29240 [#29333]: https://github.com/JuliaLang/julia/issues/29333 [#29411]: https://github.com/JuliaLang/julia/issues/29411 @@ -1971,6 +1978,7 @@ Tooling Improvements [#33864]: https://github.com/JuliaLang/julia/issues/33864 [#33886]: https://github.com/JuliaLang/julia/issues/33886 [#33937]: https://github.com/JuliaLang/julia/issues/33937 +[#34126]: https://github.com/JuliaLang/julia/issues/34126 [#34149]: https://github.com/JuliaLang/julia/issues/34149 [#34199]: https://github.com/JuliaLang/julia/issues/34199 [#34200]: https://github.com/JuliaLang/julia/issues/34200 @@ -1997,9 +2005,12 @@ Tooling Improvements [#34896]: https://github.com/JuliaLang/julia/issues/34896 [#34953]: https://github.com/JuliaLang/julia/issues/34953 [#35001]: https://github.com/JuliaLang/julia/issues/35001 +[#35057]: https://github.com/JuliaLang/julia/issues/35057 [#35078]: https://github.com/JuliaLang/julia/issues/35078 +[#35085]: https://github.com/JuliaLang/julia/issues/35085 [#35094]: https://github.com/JuliaLang/julia/issues/35094 [#35108]: https://github.com/JuliaLang/julia/issues/35108 +[#35113]: https://github.com/JuliaLang/julia/issues/35113 [#35124]: https://github.com/JuliaLang/julia/issues/35124 [#35132]: https://github.com/JuliaLang/julia/issues/35132 [#35138]: https://github.com/JuliaLang/julia/issues/35138 @@ -2310,6 +2321,7 @@ Tooling Improvements [#32534]: https://github.com/JuliaLang/julia/issues/32534 [#32600]: https://github.com/JuliaLang/julia/issues/32600 [#32628]: https://github.com/JuliaLang/julia/issues/32628 +[#32651]: https://github.com/JuliaLang/julia/issues/32651 [#32653]: https://github.com/JuliaLang/julia/issues/32653 [#32729]: https://github.com/JuliaLang/julia/issues/32729 [#32814]: https://github.com/JuliaLang/julia/issues/32814 @@ -2319,6 +2331,7 @@ Tooling Improvements [#32851]: https://github.com/JuliaLang/julia/issues/32851 [#32872]: https://github.com/JuliaLang/julia/issues/32872 [#32875]: https://github.com/JuliaLang/julia/issues/32875 +[#32918]: https://github.com/JuliaLang/julia/issues/32918 Julia v1.2 Release Notes ======================== @@ -2461,6 +2474,7 @@ External dependencies [#31009]: https://github.com/JuliaLang/julia/issues/31009 [#31125]: https://github.com/JuliaLang/julia/issues/31125 [#31211]: https://github.com/JuliaLang/julia/issues/31211 +[#31223]: https://github.com/JuliaLang/julia/issues/31223 [#31230]: https://github.com/JuliaLang/julia/issues/31230 [#31235]: https://github.com/JuliaLang/julia/issues/31235 [#31310]: https://github.com/JuliaLang/julia/issues/31310 @@ -4428,6 +4442,7 @@ Command-line option changes [#26932]: https://github.com/JuliaLang/julia/issues/26932 [#26935]: https://github.com/JuliaLang/julia/issues/26935 [#26980]: https://github.com/JuliaLang/julia/issues/26980 +[#26991]: https://github.com/JuliaLang/julia/issues/26991 [#26997]: https://github.com/JuliaLang/julia/issues/26997 [#27067]: https://github.com/JuliaLang/julia/issues/27067 [#27071]: https://github.com/JuliaLang/julia/issues/27071 @@ -4462,6 +4477,7 @@ Command-line option changes [#28155]: https://github.com/JuliaLang/julia/issues/28155 [#28266]: https://github.com/JuliaLang/julia/issues/28266 [#28302]: https://github.com/JuliaLang/julia/issues/28302 +[#28310]: https://github.com/JuliaLang/julia/issues/28310 Julia v0.6.0 Release Notes ========================== From 60291736aa81fa5b3a850cd7358fe0ae155b4f9d Mon Sep 17 00:00:00 2001 From: Tim Besard Date: Sun, 13 Oct 2024 19:27:22 +0200 Subject: [PATCH 188/537] OpenBLAS: Fix cross-compilation detection for source build. (#56139) We may be cross-compiling Linux-to-Linux, in which case `BUILD_OS` == `OS`, so look at `XC_HOST` to determine whether we're cross compiling. --- deps/openblas.mk | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deps/openblas.mk b/deps/openblas.mk index affd1c7a7aa55..8cea044ca348f 100644 --- a/deps/openblas.mk +++ b/deps/openblas.mk @@ -43,7 +43,7 @@ OPENBLAS_FFLAGS := $(JFFLAGS) $(USE_BLAS_FFLAGS) OPENBLAS_CFLAGS := -O2 # Decide whether to build for 32-bit or 64-bit arch -ifneq ($(BUILD_OS),$(OS)) +ifneq ($(XC_HOST),) OPENBLAS_BUILD_OPTS += OSNAME=$(OS) CROSS=1 HOSTCC=$(HOSTCC) CROSS_SUFFIX=$(CROSS_COMPILE) endif ifeq ($(OS),WINNT) From 67c93b9a19cfe00211be595b2d5c50cfcfcece09 Mon Sep 17 00:00:00 2001 From: Jishnu Bhattacharya Date: Sun, 13 Oct 2024 23:24:25 +0530 Subject: [PATCH 189/537] `diag` for `BandedMatrix`es for off-limit bands (#56065) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Currently, one can only obtain the `diag` for a `BandedMatrix` (such as a `Diagonal`) when the band index is bounded by the size of the matrix. This PR relaxes this requirement to match the behavior for arrays, where `diag` returns an empty vector for a large band index instead of throwing an error. ```julia julia> D = Diagonal(ones(4)) 4×4 Diagonal{Float64, Vector{Float64}}: 1.0 ⋅ ⋅ ⋅ ⋅ 1.0 ⋅ ⋅ ⋅ ⋅ 1.0 ⋅ ⋅ ⋅ ⋅ 1.0 julia> diag(D, 10) Float64[] julia> diag(Array(D), 10) Float64[] ``` Something similar for `SymTridiagonal` is being done in https://github.com/JuliaLang/julia/pull/56014 --- stdlib/LinearAlgebra/src/bidiag.jl | 11 ++++------- stdlib/LinearAlgebra/src/diagonal.jl | 11 ++++------- stdlib/LinearAlgebra/src/tridiag.jl | 13 +++++-------- stdlib/LinearAlgebra/test/bidiag.jl | 4 ++-- stdlib/LinearAlgebra/test/diagonal.jl | 4 ++-- stdlib/LinearAlgebra/test/tridiag.jl | 13 ++++--------- 6 files changed, 21 insertions(+), 35 deletions(-) diff --git a/stdlib/LinearAlgebra/src/bidiag.jl b/stdlib/LinearAlgebra/src/bidiag.jl index 381afd2f09a61..a34df37153cd2 100644 --- a/stdlib/LinearAlgebra/src/bidiag.jl +++ b/stdlib/LinearAlgebra/src/bidiag.jl @@ -404,20 +404,17 @@ end function diag(M::Bidiagonal, n::Integer=0) # every branch call similar(..., ::Int) to make sure the # same vector type is returned independent of n + v = similar(M.dv, max(0, length(M.dv)-abs(n))) if n == 0 - return copyto!(similar(M.dv, length(M.dv)), M.dv) + copyto!(v, M.dv) elseif (n == 1 && M.uplo == 'U') || (n == -1 && M.uplo == 'L') - return copyto!(similar(M.ev, length(M.ev)), M.ev) + copyto!(v, M.ev) elseif -size(M,1) <= n <= size(M,1) - v = similar(M.dv, size(M,1)-abs(n)) for i in eachindex(v) v[i] = M[BandIndex(n,i)] end - return v - else - throw(ArgumentError(LazyString(lazy"requested diagonal, $n, must be at least $(-size(M, 1)) ", - lazy"and at most $(size(M, 2)) for an $(size(M, 1))-by-$(size(M, 2)) matrix"))) end + return v end function +(A::Bidiagonal, B::Bidiagonal) diff --git a/stdlib/LinearAlgebra/src/diagonal.jl b/stdlib/LinearAlgebra/src/diagonal.jl index 0c93024f33a9a..aabfb3e8ba114 100644 --- a/stdlib/LinearAlgebra/src/diagonal.jl +++ b/stdlib/LinearAlgebra/src/diagonal.jl @@ -773,18 +773,15 @@ permutedims(D::Diagonal, perm) = (Base.checkdims_perm(axes(D), axes(D), perm); D function diag(D::Diagonal, k::Integer=0) # every branch call similar(..., ::Int) to make sure the # same vector type is returned independent of k + v = similar(D.diag, max(0, length(D.diag)-abs(k))) if k == 0 - return copyto!(similar(D.diag, length(D.diag)), D.diag) - elseif -size(D,1) <= k <= size(D,1) - v = similar(D.diag, size(D,1)-abs(k)) + copyto!(v, D.diag) + else for i in eachindex(v) v[i] = D[BandIndex(k, i)] end - return v - else - throw(ArgumentError(LazyString(lazy"requested diagonal, $k, must be at least $(-size(D, 1)) ", - lazy"and at most $(size(D, 2)) for an $(size(D, 1))-by-$(size(D, 2)) matrix"))) end + return v end tr(D::Diagonal) = sum(tr, D.diag) det(D::Diagonal) = prod(det, D.diag) diff --git a/stdlib/LinearAlgebra/src/tridiag.jl b/stdlib/LinearAlgebra/src/tridiag.jl index c1af12514e020..d6382d2e16a43 100644 --- a/stdlib/LinearAlgebra/src/tridiag.jl +++ b/stdlib/LinearAlgebra/src/tridiag.jl @@ -662,22 +662,19 @@ issymmetric(S::Tridiagonal) = all(issymmetric, S.d) && all(Iterators.map((x, y) function diag(M::Tridiagonal, n::Integer=0) # every branch call similar(..., ::Int) to make sure the # same vector type is returned independent of n + v = similar(M.d, max(0, length(M.d)-abs(n))) if n == 0 - return copyto!(similar(M.d, length(M.d)), M.d) + copyto!(v, M.d) elseif n == -1 - return copyto!(similar(M.dl, length(M.dl)), M.dl) + copyto!(v, M.dl) elseif n == 1 - return copyto!(similar(M.du, length(M.du)), M.du) + copyto!(v, M.du) elseif abs(n) <= size(M,1) - v = similar(M.d, size(M,1)-abs(n)) for i in eachindex(v) v[i] = M[BandIndex(n,i)] end - return v - else - throw(ArgumentError(LazyString(lazy"requested diagonal, $n, must be at least $(-size(M, 1)) ", - lazy"and at most $(size(M, 2)) for an $(size(M, 1))-by-$(size(M, 2)) matrix"))) end + return v end @inline function Base.isassigned(A::Tridiagonal, i::Int, j::Int) diff --git a/stdlib/LinearAlgebra/test/bidiag.jl b/stdlib/LinearAlgebra/test/bidiag.jl index 628e59debe8b7..df30748e042b5 100644 --- a/stdlib/LinearAlgebra/test/bidiag.jl +++ b/stdlib/LinearAlgebra/test/bidiag.jl @@ -398,8 +398,8 @@ Random.seed!(1) @test (@inferred diag(T))::typeof(dv) == dv @test (@inferred diag(T, uplo === :U ? 1 : -1))::typeof(dv) == ev @test (@inferred diag(T,2))::typeof(dv) == zeros(elty, n-2) - @test_throws ArgumentError diag(T, -n - 1) - @test_throws ArgumentError diag(T, n + 1) + @test isempty(@inferred diag(T, -n - 1)) + @test isempty(@inferred diag(T, n + 1)) # test diag with another wrapped vector type gdv, gev = GenericArray(dv), GenericArray(ev) G = Bidiagonal(gdv, gev, uplo) diff --git a/stdlib/LinearAlgebra/test/diagonal.jl b/stdlib/LinearAlgebra/test/diagonal.jl index 85fe963e3592b..f7a9ccb705de9 100644 --- a/stdlib/LinearAlgebra/test/diagonal.jl +++ b/stdlib/LinearAlgebra/test/diagonal.jl @@ -109,8 +109,8 @@ Random.seed!(1) end @testset "diag" begin - @test_throws ArgumentError diag(D, n+1) - @test_throws ArgumentError diag(D, -n-1) + @test isempty(@inferred diag(D, n+1)) + @test isempty(@inferred diag(D, -n-1)) @test (@inferred diag(D))::typeof(dd) == dd @test (@inferred diag(D, 0))::typeof(dd) == dd @test (@inferred diag(D, 1))::typeof(dd) == zeros(elty, n-1) diff --git a/stdlib/LinearAlgebra/test/tridiag.jl b/stdlib/LinearAlgebra/test/tridiag.jl index b1d52ab8c5679..aa3baec8f6be8 100644 --- a/stdlib/LinearAlgebra/test/tridiag.jl +++ b/stdlib/LinearAlgebra/test/tridiag.jl @@ -287,13 +287,8 @@ end @test (@inferred diag(A, 1))::typeof(d) == (mat_type == Tridiagonal ? du : dl) @test (@inferred diag(A, -1))::typeof(d) == dl @test (@inferred diag(A, n-1))::typeof(d) == zeros(elty, 1) - if A isa SymTridiagonal - @test isempty(@inferred diag(A, -n - 1)) - @test isempty(@inferred diag(A, n + 1)) - else - @test_throws ArgumentError diag(A, -n - 1) - @test_throws ArgumentError diag(A, n + 1) - end + @test isempty(@inferred diag(A, -n - 1)) + @test isempty(@inferred diag(A, n + 1)) GA = mat_type == Tridiagonal ? mat_type(GenericArray.((dl, d, du))...) : mat_type(GenericArray.((d, dl))...) @test (@inferred diag(GA))::typeof(GenericArray(d)) == GenericArray(d) @test (@inferred diag(GA, -1))::typeof(GenericArray(d)) == GenericArray(dl) @@ -527,8 +522,8 @@ end @test @inferred diag(A, -1) == fill(M, n-1) @test_broken diag(A, -2) == fill(M, n-2) @test_broken diag(A, 2) == fill(M, n-2) - @test_throws ArgumentError diag(A, n+1) - @test_throws ArgumentError diag(A, -n-1) + @test isempty(@inferred diag(A, n+1)) + @test isempty(@inferred diag(A, -n-1)) for n in 0:2 dv, ev = fill(M, n), fill(M, max(n-1,0)) From 7241673d1858731e2f4a0c62dad6c0f64fded110 Mon Sep 17 00:00:00 2001 From: Christian Guinard <28689358+christiangnrd@users.noreply.github.com> Date: Sun, 13 Oct 2024 16:21:45 -0300 Subject: [PATCH 190/537] Port progress bar improvements from Pkg (#56125) Includes changes from https://github.com/JuliaLang/Pkg.jl/pull/4038 and https://github.com/JuliaLang/Pkg.jl/pull/4044. Co-authored-by: Kristoffer Carlsson --- base/precompilation.jl | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/base/precompilation.jl b/base/precompilation.jl index ea98b0c415ab4..7a821222c52d1 100644 --- a/base/precompilation.jl +++ b/base/precompilation.jl @@ -297,7 +297,8 @@ function show_progress(io::IO, p::MiniProgressBar; termwidth=nothing, carriagere end termwidth = @something termwidth displaysize(io)[2] max_progress_width = max(0, min(termwidth - textwidth(p.header) - textwidth(progress_text) - 10 , p.width)) - n_filled = ceil(Int, max_progress_width * perc / 100) + n_filled = floor(Int, max_progress_width * perc / 100) + partial_filled = (max_progress_width * perc / 100) - n_filled n_left = max_progress_width - n_filled headers = split(p.header, ' ') to_print = sprint(; context=io) do io @@ -306,8 +307,15 @@ function show_progress(io::IO, p::MiniProgressBar; termwidth=nothing, carriagere printstyled(io, join(headers[2:end], ' ')) print(io, " ") printstyled(io, "━"^n_filled; color=p.color) - printstyled(io, perc >= 95 ? "━" : "╸"; color=p.color) - printstyled(io, "━"^n_left, " "; color=:light_black) + if n_left > 0 + if partial_filled > 0.5 + printstyled(io, "╸"; color=p.color) # More filled, use ╸ + else + printstyled(io, "╺"; color=:light_black) # Less filled, use ╺ + end + printstyled(io, "━"^(n_left-1); color=:light_black) + end + printstyled(io, " "; color=:light_black) print(io, progress_text) carriagereturn && print(io, "\r") end From 35bf824e85af9e5f762c3c59d0e5b28aa2c3ab4b Mon Sep 17 00:00:00 2001 From: Zentrik Date: Sun, 13 Oct 2024 21:05:03 +0100 Subject: [PATCH 191/537] Add support for LLVM 19 (#55650) Co-authored-by: Zentrik --- deps/llvm.mk | 2 + src/clangsa/GCChecker.cpp | 6 +-- src/debuginfo.cpp | 6 +-- src/disasm.cpp | 51 +++++++++++++++++++++----- src/features_x86.h | 6 +-- src/jitlayers.cpp | 2 +- src/llvm-multiversioning.cpp | 1 + src/llvm-simdloop.cpp | 4 +- src/llvmcalltest.cpp | 1 + src/processor.cpp | 4 ++ src/processor_x86.cpp | 8 +--- test/llvmpasses/multiversioning-x86.ll | 22 +++++------ 12 files changed, 74 insertions(+), 39 deletions(-) diff --git a/deps/llvm.mk b/deps/llvm.mk index 3f4bc3e6746f0..09dd4f187d611 100644 --- a/deps/llvm.mk +++ b/deps/llvm.mk @@ -234,7 +234,9 @@ $$(LLVM_BUILDDIR_withtype)/build-compiled: $$(SRCCACHE)/$$(LLVM_SRC_DIR)/$1.patc LLVM_PATCH_PREV := $$(SRCCACHE)/$$(LLVM_SRC_DIR)/$1.patch-applied endef +ifeq ($(shell test $(LLVM_VER_MAJ) -lt 19 && echo true),true) $(eval $(call LLVM_PATCH,llvm-ittapi-cmake)) +endif ifeq ($(USE_SYSTEM_ZLIB), 0) $(LLVM_BUILDDIR_withtype)/build-configured: | $(build_prefix)/manifest/zlib diff --git a/src/clangsa/GCChecker.cpp b/src/clangsa/GCChecker.cpp index ecaeb460ebf91..31631eb70a4ad 100644 --- a/src/clangsa/GCChecker.cpp +++ b/src/clangsa/GCChecker.cpp @@ -865,7 +865,7 @@ bool GCChecker::isGCTracked(const Expr *E) { bool GCChecker::isGloballyRootedType(QualType QT) const { return isJuliaType( - [](StringRef Name) { return Name.endswith("jl_sym_t"); }, QT); + [](StringRef Name) { return Name.ends_with("jl_sym_t"); }, QT); } bool GCChecker::isSafepoint(const CallEvent &Call, CheckerContext &C) const { @@ -1166,10 +1166,10 @@ void GCChecker::checkDerivingExpr(const Expr *Result, const Expr *Parent, // TODO: We may want to refine this. This is to track pointers through the // array list in jl_module_t. bool ParentIsModule = isJuliaType( - [](StringRef Name) { return Name.endswith("jl_module_t"); }, + [](StringRef Name) { return Name.ends_with("jl_module_t"); }, Parent->getType()); bool ResultIsArrayList = isJuliaType( - [](StringRef Name) { return Name.endswith("arraylist_t"); }, + [](StringRef Name) { return Name.ends_with("arraylist_t"); }, Result->getType()); if (!(ParentIsModule && ResultIsArrayList) && isGCTracked(Parent)) { ResultTracked = false; diff --git a/src/debuginfo.cpp b/src/debuginfo.cpp index cfaf8d4c70ee9..f6fca47e9a889 100644 --- a/src/debuginfo.cpp +++ b/src/debuginfo.cpp @@ -296,7 +296,7 @@ void JITDebugInfoRegistry::registerJITObject(const object::ObjectFile &Object, uint8_t *catchjmp = NULL; for (const object::SymbolRef &sym_iter : Object.symbols()) { StringRef sName = cantFail(sym_iter.getName()); - if (sName.equals("__UnwindData") || sName.equals("__catchjmp")) { + if (sName == "__UnwindData" || sName == "__catchjmp") { uint64_t Addr = cantFail(sym_iter.getAddress()); // offset into object (including section offset) auto Section = cantFail(sym_iter.getSection()); assert(Section != EndSection && Section->isText()); @@ -310,10 +310,10 @@ void JITDebugInfoRegistry::registerJITObject(const object::ObjectFile &Object, SectionAddrCheck = SectionAddr; SectionLoadCheck = SectionLoadAddr; Addr += SectionLoadAddr - SectionAddr; - if (sName.equals("__UnwindData")) { + if (sName == "__UnwindData") { UnwindData = (uint8_t*)Addr; } - else if (sName.equals("__catchjmp")) { + else if (sName == "__catchjmp") { catchjmp = (uint8_t*)Addr; } } diff --git a/src/disasm.cpp b/src/disasm.cpp index b71503c3f7a77..ebe8f2ac397c0 100644 --- a/src/disasm.cpp +++ b/src/disasm.cpp @@ -103,6 +103,7 @@ // for outputting assembly #include #include +#include #include #include #include @@ -920,11 +921,17 @@ static void jl_dump_asm_internal( // LLVM will destroy the formatted stream, and we keep the raw stream. std::unique_ptr ustream(new formatted_raw_ostream(rstream)); std::unique_ptr Streamer( - TheTarget->createAsmStreamer(Ctx, std::move(ustream), /*asmverbose*/true, - /*useDwarfDirectory*/ true, - IP.release(), - std::move(CE), std::move(MAB), - /*ShowInst*/ false)); +#if JL_LLVM_VERSION >= 190000 + TheTarget->createAsmStreamer(Ctx, std::move(ustream), + + IP.release(), std::move(CE), std::move(MAB)) +#else + TheTarget->createAsmStreamer(Ctx, std::move(ustream), /*asmverbose*/ true, + /*useDwarfDirectory*/ true, IP.release(), + std::move(CE), std::move(MAB), + /*ShowInst*/ false) +#endif + ); Streamer->initSections(true, *STI); // Make the MemoryObject wrapper @@ -1148,7 +1155,11 @@ addPassesToGenerateCode(LLVMTargetMachine *TM, PassManagerBase &PM) { return &MMIWP->getMMI().getContext(); } +#if JL_LLVM_VERSION >= 190000 +class LineNumberPrinterHandler : public DebugHandlerBase { +#else class LineNumberPrinterHandler : public AsmPrinterHandler { +#endif MCStreamer &S; LineNumberAnnotatedWriter LinePrinter; std::string Buffer; @@ -1157,7 +1168,11 @@ class LineNumberPrinterHandler : public AsmPrinterHandler { public: LineNumberPrinterHandler(AsmPrinter &Printer, const char *debuginfo) - : S(*Printer.OutStreamer), + : +#if JL_LLVM_VERSION >= 190000 + DebugHandlerBase(&Printer), +#endif + S(*Printer.OutStreamer), LinePrinter("; ", true, debuginfo), RawStream(Buffer), Stream(RawStream) {} @@ -1176,12 +1191,20 @@ class LineNumberPrinterHandler : public AsmPrinterHandler { //virtual void beginModule(Module *M) override {} virtual void endModule() override {} /// note that some AsmPrinter implementations may not call beginFunction at all +#if JL_LLVM_VERSION >= 190000 + virtual void beginFunctionImpl(const MachineFunction *MF) override { +#else virtual void beginFunction(const MachineFunction *MF) override { +#endif LinePrinter.emitFunctionAnnot(&MF->getFunction(), Stream); emitAndReset(); } //virtual void markFunctionEnd() override {} +#if JL_LLVM_VERSION >= 190000 + virtual void endFunctionImpl(const MachineFunction *MF) override { +#else virtual void endFunction(const MachineFunction *MF) override { +#endif LinePrinter.emitEnd(Stream); emitAndReset(); } @@ -1257,15 +1280,23 @@ jl_value_t *jl_dump_function_asm_impl(jl_llvmf_dump_t* dump, char emit_mc, const } auto FOut = std::make_unique(asmfile); std::unique_ptr S(TM->getTarget().createAsmStreamer( - *Context, std::move(FOut), true, - true, InstPrinter, - std::move(MCE), std::move(MAB), - false)); +#if JL_LLVM_VERSION >= 190000 + *Context, std::move(FOut), InstPrinter, std::move(MCE), std::move(MAB) +#else + *Context, std::move(FOut), true, true, InstPrinter, std::move(MCE), + std::move(MAB), false +#endif + )); std::unique_ptr Printer( TM->getTarget().createAsmPrinter(*TM, std::move(S))); +#if JL_LLVM_VERSION >= 190000 + Printer->addDebugHandler( + std::make_unique(*Printer, debuginfo)); +#else Printer->addAsmPrinterHandler(AsmPrinter::HandlerInfo( std::unique_ptr(new LineNumberPrinterHandler(*Printer, debuginfo)), "emit", "Debug Info Emission", "Julia", "Julia::LineNumberPrinterHandler Markup")); +#endif if (!Printer) return jl_an_empty_string; PM.add(Printer.release()); diff --git a/src/features_x86.h b/src/features_x86.h index 08f979df546b7..2ecc8fee32a38 100644 --- a/src/features_x86.h +++ b/src/features_x86.h @@ -45,15 +45,15 @@ JL_FEATURE_DEF(avx512ifma, 32 * 2 + 21, 0) // JL_FEATURE_DEF(pcommit, 32 * 2 + 22, 0) // Deprecated JL_FEATURE_DEF(clflushopt, 32 * 2 + 23, 0) JL_FEATURE_DEF(clwb, 32 * 2 + 24, 0) -JL_FEATURE_DEF(avx512pf, 32 * 2 + 26, 0) -JL_FEATURE_DEF(avx512er, 32 * 2 + 27, 0) +// JL_FEATURE_DEF(avx512pf, 32 * 2 + 26, 0) // Deprecated in LLVM 19 +// JL_FEATURE_DEF(avx512er, 32 * 2 + 27, 0) // Deprecated in LLVM 19 JL_FEATURE_DEF(avx512cd, 32 * 2 + 28, 0) JL_FEATURE_DEF(sha, 32 * 2 + 29, 0) JL_FEATURE_DEF(avx512bw, 32 * 2 + 30, 0) JL_FEATURE_DEF(avx512vl, 32 * 2 + 31, 0) // EAX=7,ECX=0: ECX -JL_FEATURE_DEF(prefetchwt1, 32 * 3 + 0, 0) +// JL_FEATURE_DEF(prefetchwt1, 32 * 3 + 0, 0) // Deprecated in LLVM 19 JL_FEATURE_DEF(avx512vbmi, 32 * 3 + 1, 0) JL_FEATURE_DEF(pku, 32 * 3 + 4, 0) // ospke JL_FEATURE_DEF(waitpkg, 32 * 3 + 5, 0) diff --git a/src/jitlayers.cpp b/src/jitlayers.cpp index d1757cadee05c..4ff7400df13dd 100644 --- a/src/jitlayers.cpp +++ b/src/jitlayers.cpp @@ -1968,7 +1968,7 @@ void JuliaOJIT::enableJITDebuggingSupport() void JuliaOJIT::enableIntelJITEventListener() { #if JL_LLVM_VERSION >= 190000 - if (TT.isOSBinFormatELF()) { + if (TM->getTargetTriple().isOSBinFormatELF()) { orc::SymbolMap VTuneFunctions; auto RegisterImplAddr = addAbsoluteToMap(VTuneFunctions,llvm_orc_registerVTuneImpl); auto UnregisterImplAddr = addAbsoluteToMap(VTuneFunctions,llvm_orc_unregisterVTuneImpl); diff --git a/src/llvm-multiversioning.cpp b/src/llvm-multiversioning.cpp index d544f182637b9..22ef973decfe9 100644 --- a/src/llvm-multiversioning.cpp +++ b/src/llvm-multiversioning.cpp @@ -12,6 +12,7 @@ #include #include +#include #include #include #if JL_LLVM_VERSION >= 170000 diff --git a/src/llvm-simdloop.cpp b/src/llvm-simdloop.cpp index 07afa8c930deb..ed2a04e650f2a 100644 --- a/src/llvm-simdloop.cpp +++ b/src/llvm-simdloop.cpp @@ -178,9 +178,9 @@ static bool processLoop(Loop &L, OptimizationRemarkEmitter &ORE, ScalarEvolution if (S) { LLVM_DEBUG(dbgs() << "LSL: found " << S->getString() << "\n"); if (S->getString().starts_with("julia")) { - if (S->getString().equals("julia.simdloop")) + if (S->getString() == "julia.simdloop") simd = true; - if (S->getString().equals("julia.ivdep")) + if (S->getString() == "julia.ivdep") ivdep = true; continue; } diff --git a/src/llvmcalltest.cpp b/src/llvmcalltest.cpp index 93c442445d79a..2ab16f3ac6d67 100644 --- a/src/llvmcalltest.cpp +++ b/src/llvmcalltest.cpp @@ -6,6 +6,7 @@ #include "llvm/Config/llvm-config.h" #include "llvm/IR/IRBuilder.h" +#include "llvm/IR/Module.h" #include #include "julia.h" diff --git a/src/processor.cpp b/src/processor.cpp index 025043ac362d4..bc12f5b54be19 100644 --- a/src/processor.cpp +++ b/src/processor.cpp @@ -978,8 +978,12 @@ static std::string jl_get_cpu_name_llvm(void) static std::string jl_get_cpu_features_llvm(void) { +#if JL_LLVM_VERSION >= 190000 + auto HostFeatures = llvm::sys::getHostCPUFeatures(); +#else llvm::StringMap HostFeatures; llvm::sys::getHostCPUFeatures(HostFeatures); +#endif std::string attr; for (auto &ele: HostFeatures) { if (ele.getValue()) { diff --git a/src/processor_x86.cpp b/src/processor_x86.cpp index db954680289ea..f1dff063de1d9 100644 --- a/src/processor_x86.cpp +++ b/src/processor_x86.cpp @@ -144,8 +144,6 @@ static constexpr FeatureDep deps[] = { {avx512f, avx2}, {avx512dq, avx512f}, {avx512ifma, avx512f}, - {avx512pf, avx512f}, - {avx512er, avx512f}, {avx512cd, avx512f}, {avx512bw, avx512f}, {avx512bf16, avx512bw}, @@ -183,7 +181,7 @@ constexpr auto tremont = goldmont_plus | get_feature_masks(clwb, gfni); constexpr auto knl = get_feature_masks(sse3, ssse3, sse41, sse42, cx16, sahf, popcnt, aes, pclmul, avx, xsave, xsaveopt, rdrnd, f16c, fsgsbase, avx2, bmi, bmi2, fma, lzcnt, movbe, adx, rdseed, prfchw, - avx512f, avx512er, avx512cd, avx512pf, prefetchwt1); + avx512f, avx512cd); constexpr auto knm = knl | get_feature_masks(avx512vpopcntdq); constexpr auto yonah = get_feature_masks(sse3); constexpr auto prescott = yonah; @@ -584,7 +582,7 @@ template static inline void features_disable_avx512(T &features) { using namespace Feature; - unset_bits(features, avx512f, avx512dq, avx512ifma, avx512pf, avx512er, avx512cd, + unset_bits(features, avx512f, avx512dq, avx512ifma, avx512cd, avx512bw, avx512vl, avx512vbmi, avx512vpopcntdq, avx512vbmi2, avx512vnni, avx512bitalg, avx512vp2intersect, avx512bf16); } @@ -948,7 +946,6 @@ static void ensure_jit_target(bool imaging) Feature::vaes, Feature::vpclmulqdq, Feature::sse4a, Feature::avx512f, Feature::avx512dq, Feature::avx512ifma, - Feature::avx512pf, Feature::avx512er, Feature::avx512cd, Feature::avx512bw, Feature::avx512vl, Feature::avx512vbmi, Feature::avx512vpopcntdq, Feature::avxvnni, @@ -1142,7 +1139,6 @@ llvm::SmallVector jl_get_llvm_clone_targets(void) Feature::vaes, Feature::vpclmulqdq, Feature::sse4a, Feature::avx512f, Feature::avx512dq, Feature::avx512ifma, - Feature::avx512pf, Feature::avx512er, Feature::avx512cd, Feature::avx512bw, Feature::avx512vl, Feature::avx512vbmi, Feature::avx512vpopcntdq, Feature::avxvnni, diff --git a/test/llvmpasses/multiversioning-x86.ll b/test/llvmpasses/multiversioning-x86.ll index 1fd0ce2d5f40c..ff4a8abba5252 100644 --- a/test/llvmpasses/multiversioning-x86.ll +++ b/test/llvmpasses/multiversioning-x86.ll @@ -101,14 +101,14 @@ define noundef i32 @simd_test_call(<4 x i32> noundef %0) { ; CHECK: @simd_test_call{{.*}}#[[NOT_BORING_CLONE2:[0-9]+]] ; CHECK: %2 = call noundef i32 @simd_test.2(<4 x i32> noundef %0) -; CHECK-DAG: attributes #[[BORING_BASE]] = { "julia.mv.clone"="0" "julia.mv.clones"="2" "julia.mv.fvar" "target-cpu"="x86-64" "target-features"="+cx16,-sse3,-pclmul,-ssse3,-fma,-sse4.1,-sse4.2,-movbe,-popcnt,-aes,-xsave,-avx,-f16c,-rdrnd,-fsgsbase,-bmi,-avx2,-bmi2,-rtm,-avx512f,-avx512dq,-rdseed,-adx,-avx512ifma,-clflushopt,-clwb,-avx512pf,-avx512er,-avx512cd,-sha,-avx512bw,-avx512vl,-prefetchwt1,-avx512vbmi,-pku,-waitpkg,-avx512vbmi2,-shstk,-gfni,-vaes,-vpclmulqdq,-avx512vnni,-avx512bitalg,-avx512vpopcntdq,-rdpid,-cldemote,-movdiri,-movdir64b,-enqcmd,-uintr,-avx512vp2intersect,-serialize,-tsxldtrk,-pconfig,-amx-bf16,-avx512fp16,-amx-tile,-amx-int8,-sahf,-lzcnt,-sse4a,-prfchw,-xop,-fma4,-tbm,-mwaitx,-xsaveopt,-xsavec,-xsaves,-clzero,-wbnoinvd,-avxvnni,-avx512bf16,-ptwrite,+sse2,+mmx,+fxsr,+64bit,+cx8" } -; CHECK-DAG: attributes #[[NOT_BORING_BASE]] = { "julia.mv.clone"="0" "julia.mv.clones"="6" "julia.mv.fvar" "target-cpu"="x86-64" "target-features"="+cx16,-sse3,-pclmul,-ssse3,-fma,-sse4.1,-sse4.2,-movbe,-popcnt,-aes,-xsave,-avx,-f16c,-rdrnd,-fsgsbase,-bmi,-avx2,-bmi2,-rtm,-avx512f,-avx512dq,-rdseed,-adx,-avx512ifma,-clflushopt,-clwb,-avx512pf,-avx512er,-avx512cd,-sha,-avx512bw,-avx512vl,-prefetchwt1,-avx512vbmi,-pku,-waitpkg,-avx512vbmi2,-shstk,-gfni,-vaes,-vpclmulqdq,-avx512vnni,-avx512bitalg,-avx512vpopcntdq,-rdpid,-cldemote,-movdiri,-movdir64b,-enqcmd,-uintr,-avx512vp2intersect,-serialize,-tsxldtrk,-pconfig,-amx-bf16,-avx512fp16,-amx-tile,-amx-int8,-sahf,-lzcnt,-sse4a,-prfchw,-xop,-fma4,-tbm,-mwaitx,-xsaveopt,-xsavec,-xsaves,-clzero,-wbnoinvd,-avxvnni,-avx512bf16,-ptwrite,+sse2,+mmx,+fxsr,+64bit,+cx8" } -; CHECK-DAG: attributes #[[SIMD_BASE_RELOC]] = { "julia.mv.clone"="0" "julia.mv.clones"="6" "julia.mv.reloc" "target-cpu"="x86-64" "target-features"="+cx16,-sse3,-pclmul,-ssse3,-fma,-sse4.1,-sse4.2,-movbe,-popcnt,-aes,-xsave,-avx,-f16c,-rdrnd,-fsgsbase,-bmi,-avx2,-bmi2,-rtm,-avx512f,-avx512dq,-rdseed,-adx,-avx512ifma,-clflushopt,-clwb,-avx512pf,-avx512er,-avx512cd,-sha,-avx512bw,-avx512vl,-prefetchwt1,-avx512vbmi,-pku,-waitpkg,-avx512vbmi2,-shstk,-gfni,-vaes,-vpclmulqdq,-avx512vnni,-avx512bitalg,-avx512vpopcntdq,-rdpid,-cldemote,-movdiri,-movdir64b,-enqcmd,-uintr,-avx512vp2intersect,-serialize,-tsxldtrk,-pconfig,-amx-bf16,-avx512fp16,-amx-tile,-amx-int8,-sahf,-lzcnt,-sse4a,-prfchw,-xop,-fma4,-tbm,-mwaitx,-xsaveopt,-xsavec,-xsaves,-clzero,-wbnoinvd,-avxvnni,-avx512bf16,-ptwrite,+sse2,+mmx,+fxsr,+64bit,+cx8" } -; CHECK-DAG: attributes #[[BORING_CLONE]] = { "julia.mv.clone"="1" "julia.mv.clones"="2" "julia.mv.fvar" "target-cpu"="sandybridge" "target-features"="+sahf,+avx,+xsave,+popcnt,+sse4.2,+sse4.1,+cx16,+ssse3,+pclmul,+sse3,-fma,-movbe,-aes,-f16c,-rdrnd,-fsgsbase,-bmi,-avx2,-bmi2,-rtm,-avx512f,-avx512dq,-rdseed,-adx,-avx512ifma,-clflushopt,-clwb,-avx512pf,-avx512er,-avx512cd,-sha,-avx512bw,-avx512vl,-prefetchwt1,-avx512vbmi,-pku,-waitpkg,-avx512vbmi2,-shstk,-gfni,-vaes,-vpclmulqdq,-avx512vnni,-avx512bitalg,-avx512vpopcntdq,-rdpid,-cldemote,-movdiri,-movdir64b,-enqcmd,-uintr,-avx512vp2intersect,-serialize,-tsxldtrk,-pconfig,-amx-bf16,-avx512fp16,-amx-tile,-amx-int8,-lzcnt,-sse4a,-prfchw,-xop,-fma4,-tbm,-mwaitx,-xsaveopt,-xsavec,-xsaves,-clzero,-wbnoinvd,-avxvnni,-avx512bf16,-ptwrite,+sse2,+mmx,+fxsr,+64bit,+cx8" } -; CHECK-DAG: attributes #[[NOT_BORING_CLONE1]] = { "julia.mv.clone"="1" "julia.mv.clones"="6" "julia.mv.fvar" "target-cpu"="sandybridge" "target-features"="+sahf,+avx,+xsave,+popcnt,+sse4.2,+sse4.1,+cx16,+ssse3,+pclmul,+sse3,-fma,-movbe,-aes,-f16c,-rdrnd,-fsgsbase,-bmi,-avx2,-bmi2,-rtm,-avx512f,-avx512dq,-rdseed,-adx,-avx512ifma,-clflushopt,-clwb,-avx512pf,-avx512er,-avx512cd,-sha,-avx512bw,-avx512vl,-prefetchwt1,-avx512vbmi,-pku,-waitpkg,-avx512vbmi2,-shstk,-gfni,-vaes,-vpclmulqdq,-avx512vnni,-avx512bitalg,-avx512vpopcntdq,-rdpid,-cldemote,-movdiri,-movdir64b,-enqcmd,-uintr,-avx512vp2intersect,-serialize,-tsxldtrk,-pconfig,-amx-bf16,-avx512fp16,-amx-tile,-amx-int8,-lzcnt,-sse4a,-prfchw,-xop,-fma4,-tbm,-mwaitx,-xsaveopt,-xsavec,-xsaves,-clzero,-wbnoinvd,-avxvnni,-avx512bf16,-ptwrite,+sse2,+mmx,+fxsr,+64bit,+cx8" } -; CHECK-DAG: attributes #[[NOT_BORING_CLONE2]] = { "julia.mv.clone"="2" "julia.mv.clones"="6" "julia.mv.fvar" "target-cpu"="haswell" "target-features"="+lzcnt,+sahf,+bmi2,+avx2,+bmi,+fsgsbase,+f16c,+avx,+xsave,+popcnt,+movbe,+sse4.2,+sse4.1,+cx16,+fma,+ssse3,+pclmul,+sse3,-aes,-rdrnd,-rtm,-avx512f,-avx512dq,-rdseed,-adx,-avx512ifma,-clflushopt,-clwb,-avx512pf,-avx512er,-avx512cd,-sha,-avx512bw,-avx512vl,-prefetchwt1,-avx512vbmi,-pku,-waitpkg,-avx512vbmi2,-shstk,-gfni,-vaes,-vpclmulqdq,-avx512vnni,-avx512bitalg,-avx512vpopcntdq,-rdpid,-cldemote,-movdiri,-movdir64b,-enqcmd,-uintr,-avx512vp2intersect,-serialize,-tsxldtrk,-pconfig,-amx-bf16,-avx512fp16,-amx-tile,-amx-int8,-sse4a,-prfchw,-xop,-fma4,-tbm,-mwaitx,-xsaveopt,-xsavec,-xsaves,-clzero,-wbnoinvd,-avxvnni,-avx512bf16,-ptwrite,+sse2,+mmx,+fxsr,+64bit,+cx8" } -; CHECK-DAG: attributes #[[SIMD_CLONE1]] = { "julia.mv.clone"="1" "julia.mv.clones"="6" "julia.mv.reloc" "target-cpu"="sandybridge" "target-features"="+sahf,+avx,+xsave,+popcnt,+sse4.2,+sse4.1,+cx16,+ssse3,+pclmul,+sse3,-fma,-movbe,-aes,-f16c,-rdrnd,-fsgsbase,-bmi,-avx2,-bmi2,-rtm,-avx512f,-avx512dq,-rdseed,-adx,-avx512ifma,-clflushopt,-clwb,-avx512pf,-avx512er,-avx512cd,-sha,-avx512bw,-avx512vl,-prefetchwt1,-avx512vbmi,-pku,-waitpkg,-avx512vbmi2,-shstk,-gfni,-vaes,-vpclmulqdq,-avx512vnni,-avx512bitalg,-avx512vpopcntdq,-rdpid,-cldemote,-movdiri,-movdir64b,-enqcmd,-uintr,-avx512vp2intersect,-serialize,-tsxldtrk,-pconfig,-amx-bf16,-avx512fp16,-amx-tile,-amx-int8,-lzcnt,-sse4a,-prfchw,-xop,-fma4,-tbm,-mwaitx,-xsaveopt,-xsavec,-xsaves,-clzero,-wbnoinvd,-avxvnni,-avx512bf16,-ptwrite,+sse2,+mmx,+fxsr,+64bit,+cx8" } -; CHECK-DAG: attributes #[[SIMD_CLONE2]] = { "julia.mv.clone"="2" "julia.mv.clones"="6" "julia.mv.reloc" "target-cpu"="haswell" "target-features"="+lzcnt,+sahf,+bmi2,+avx2,+bmi,+fsgsbase,+f16c,+avx,+xsave,+popcnt,+movbe,+sse4.2,+sse4.1,+cx16,+fma,+ssse3,+pclmul,+sse3,-aes,-rdrnd,-rtm,-avx512f,-avx512dq,-rdseed,-adx,-avx512ifma,-clflushopt,-clwb,-avx512pf,-avx512er,-avx512cd,-sha,-avx512bw,-avx512vl,-prefetchwt1,-avx512vbmi,-pku,-waitpkg,-avx512vbmi2,-shstk,-gfni,-vaes,-vpclmulqdq,-avx512vnni,-avx512bitalg,-avx512vpopcntdq,-rdpid,-cldemote,-movdiri,-movdir64b,-enqcmd,-uintr,-avx512vp2intersect,-serialize,-tsxldtrk,-pconfig,-amx-bf16,-avx512fp16,-amx-tile,-amx-int8,-sse4a,-prfchw,-xop,-fma4,-tbm,-mwaitx,-xsaveopt,-xsavec,-xsaves,-clzero,-wbnoinvd,-avxvnni,-avx512bf16,-ptwrite,+sse2,+mmx,+fxsr,+64bit,+cx8" } +; CHECK-DAG: attributes #[[BORING_BASE]] = { "julia.mv.clone"="0" "julia.mv.clones"="2" "julia.mv.fvar" "target-cpu"="x86-64" "target-features"="+cx16,-sse3,-pclmul,-ssse3,-fma,-sse4.1,-sse4.2,-movbe,-popcnt,-aes,-xsave,-avx,-f16c,-rdrnd,-fsgsbase,-bmi,-avx2,-bmi2,-rtm,-avx512f,-avx512dq,-rdseed,-adx,-avx512ifma,-clflushopt,-clwb,-avx512cd,-sha,-avx512bw,-avx512vl,-avx512vbmi,-pku,-waitpkg,-avx512vbmi2,-shstk,-gfni,-vaes,-vpclmulqdq,-avx512vnni,-avx512bitalg,-avx512vpopcntdq,-rdpid,-cldemote,-movdiri,-movdir64b,-enqcmd,-uintr,-avx512vp2intersect,-serialize,-tsxldtrk,-pconfig,-amx-bf16,-avx512fp16,-amx-tile,-amx-int8,-sahf,-lzcnt,-sse4a,-prfchw,-xop,-fma4,-tbm,-mwaitx,-xsaveopt,-xsavec,-xsaves,-clzero,-wbnoinvd,-avxvnni,-avx512bf16,-ptwrite,+sse2,+mmx,+fxsr,+64bit,+cx8" } +; CHECK-DAG: attributes #[[NOT_BORING_BASE]] = { "julia.mv.clone"="0" "julia.mv.clones"="6" "julia.mv.fvar" "target-cpu"="x86-64" "target-features"="+cx16,-sse3,-pclmul,-ssse3,-fma,-sse4.1,-sse4.2,-movbe,-popcnt,-aes,-xsave,-avx,-f16c,-rdrnd,-fsgsbase,-bmi,-avx2,-bmi2,-rtm,-avx512f,-avx512dq,-rdseed,-adx,-avx512ifma,-clflushopt,-clwb,-avx512cd,-sha,-avx512bw,-avx512vl,-avx512vbmi,-pku,-waitpkg,-avx512vbmi2,-shstk,-gfni,-vaes,-vpclmulqdq,-avx512vnni,-avx512bitalg,-avx512vpopcntdq,-rdpid,-cldemote,-movdiri,-movdir64b,-enqcmd,-uintr,-avx512vp2intersect,-serialize,-tsxldtrk,-pconfig,-amx-bf16,-avx512fp16,-amx-tile,-amx-int8,-sahf,-lzcnt,-sse4a,-prfchw,-xop,-fma4,-tbm,-mwaitx,-xsaveopt,-xsavec,-xsaves,-clzero,-wbnoinvd,-avxvnni,-avx512bf16,-ptwrite,+sse2,+mmx,+fxsr,+64bit,+cx8" } +; CHECK-DAG: attributes #[[SIMD_BASE_RELOC]] = { "julia.mv.clone"="0" "julia.mv.clones"="6" "julia.mv.reloc" "target-cpu"="x86-64" "target-features"="+cx16,-sse3,-pclmul,-ssse3,-fma,-sse4.1,-sse4.2,-movbe,-popcnt,-aes,-xsave,-avx,-f16c,-rdrnd,-fsgsbase,-bmi,-avx2,-bmi2,-rtm,-avx512f,-avx512dq,-rdseed,-adx,-avx512ifma,-clflushopt,-clwb,-avx512cd,-sha,-avx512bw,-avx512vl,-avx512vbmi,-pku,-waitpkg,-avx512vbmi2,-shstk,-gfni,-vaes,-vpclmulqdq,-avx512vnni,-avx512bitalg,-avx512vpopcntdq,-rdpid,-cldemote,-movdiri,-movdir64b,-enqcmd,-uintr,-avx512vp2intersect,-serialize,-tsxldtrk,-pconfig,-amx-bf16,-avx512fp16,-amx-tile,-amx-int8,-sahf,-lzcnt,-sse4a,-prfchw,-xop,-fma4,-tbm,-mwaitx,-xsaveopt,-xsavec,-xsaves,-clzero,-wbnoinvd,-avxvnni,-avx512bf16,-ptwrite,+sse2,+mmx,+fxsr,+64bit,+cx8" } +; CHECK-DAG: attributes #[[BORING_CLONE]] = { "julia.mv.clone"="1" "julia.mv.clones"="2" "julia.mv.fvar" "target-cpu"="sandybridge" "target-features"="+sahf,+avx,+xsave,+popcnt,+sse4.2,+sse4.1,+cx16,+ssse3,+pclmul,+sse3,-fma,-movbe,-aes,-f16c,-rdrnd,-fsgsbase,-bmi,-avx2,-bmi2,-rtm,-avx512f,-avx512dq,-rdseed,-adx,-avx512ifma,-clflushopt,-clwb,-avx512cd,-sha,-avx512bw,-avx512vl,-avx512vbmi,-pku,-waitpkg,-avx512vbmi2,-shstk,-gfni,-vaes,-vpclmulqdq,-avx512vnni,-avx512bitalg,-avx512vpopcntdq,-rdpid,-cldemote,-movdiri,-movdir64b,-enqcmd,-uintr,-avx512vp2intersect,-serialize,-tsxldtrk,-pconfig,-amx-bf16,-avx512fp16,-amx-tile,-amx-int8,-lzcnt,-sse4a,-prfchw,-xop,-fma4,-tbm,-mwaitx,-xsaveopt,-xsavec,-xsaves,-clzero,-wbnoinvd,-avxvnni,-avx512bf16,-ptwrite,+sse2,+mmx,+fxsr,+64bit,+cx8" } +; CHECK-DAG: attributes #[[NOT_BORING_CLONE1]] = { "julia.mv.clone"="1" "julia.mv.clones"="6" "julia.mv.fvar" "target-cpu"="sandybridge" "target-features"="+sahf,+avx,+xsave,+popcnt,+sse4.2,+sse4.1,+cx16,+ssse3,+pclmul,+sse3,-fma,-movbe,-aes,-f16c,-rdrnd,-fsgsbase,-bmi,-avx2,-bmi2,-rtm,-avx512f,-avx512dq,-rdseed,-adx,-avx512ifma,-clflushopt,-clwb,-avx512cd,-sha,-avx512bw,-avx512vl,-avx512vbmi,-pku,-waitpkg,-avx512vbmi2,-shstk,-gfni,-vaes,-vpclmulqdq,-avx512vnni,-avx512bitalg,-avx512vpopcntdq,-rdpid,-cldemote,-movdiri,-movdir64b,-enqcmd,-uintr,-avx512vp2intersect,-serialize,-tsxldtrk,-pconfig,-amx-bf16,-avx512fp16,-amx-tile,-amx-int8,-lzcnt,-sse4a,-prfchw,-xop,-fma4,-tbm,-mwaitx,-xsaveopt,-xsavec,-xsaves,-clzero,-wbnoinvd,-avxvnni,-avx512bf16,-ptwrite,+sse2,+mmx,+fxsr,+64bit,+cx8" } +; CHECK-DAG: attributes #[[NOT_BORING_CLONE2]] = { "julia.mv.clone"="2" "julia.mv.clones"="6" "julia.mv.fvar" "target-cpu"="haswell" "target-features"="+lzcnt,+sahf,+bmi2,+avx2,+bmi,+fsgsbase,+f16c,+avx,+xsave,+popcnt,+movbe,+sse4.2,+sse4.1,+cx16,+fma,+ssse3,+pclmul,+sse3,-aes,-rdrnd,-rtm,-avx512f,-avx512dq,-rdseed,-adx,-avx512ifma,-clflushopt,-clwb,-avx512cd,-sha,-avx512bw,-avx512vl,-avx512vbmi,-pku,-waitpkg,-avx512vbmi2,-shstk,-gfni,-vaes,-vpclmulqdq,-avx512vnni,-avx512bitalg,-avx512vpopcntdq,-rdpid,-cldemote,-movdiri,-movdir64b,-enqcmd,-uintr,-avx512vp2intersect,-serialize,-tsxldtrk,-pconfig,-amx-bf16,-avx512fp16,-amx-tile,-amx-int8,-sse4a,-prfchw,-xop,-fma4,-tbm,-mwaitx,-xsaveopt,-xsavec,-xsaves,-clzero,-wbnoinvd,-avxvnni,-avx512bf16,-ptwrite,+sse2,+mmx,+fxsr,+64bit,+cx8" } +; CHECK-DAG: attributes #[[SIMD_CLONE1]] = { "julia.mv.clone"="1" "julia.mv.clones"="6" "julia.mv.reloc" "target-cpu"="sandybridge" "target-features"="+sahf,+avx,+xsave,+popcnt,+sse4.2,+sse4.1,+cx16,+ssse3,+pclmul,+sse3,-fma,-movbe,-aes,-f16c,-rdrnd,-fsgsbase,-bmi,-avx2,-bmi2,-rtm,-avx512f,-avx512dq,-rdseed,-adx,-avx512ifma,-clflushopt,-clwb,-avx512cd,-sha,-avx512bw,-avx512vl,-avx512vbmi,-pku,-waitpkg,-avx512vbmi2,-shstk,-gfni,-vaes,-vpclmulqdq,-avx512vnni,-avx512bitalg,-avx512vpopcntdq,-rdpid,-cldemote,-movdiri,-movdir64b,-enqcmd,-uintr,-avx512vp2intersect,-serialize,-tsxldtrk,-pconfig,-amx-bf16,-avx512fp16,-amx-tile,-amx-int8,-lzcnt,-sse4a,-prfchw,-xop,-fma4,-tbm,-mwaitx,-xsaveopt,-xsavec,-xsaves,-clzero,-wbnoinvd,-avxvnni,-avx512bf16,-ptwrite,+sse2,+mmx,+fxsr,+64bit,+cx8" } +; CHECK-DAG: attributes #[[SIMD_CLONE2]] = { "julia.mv.clone"="2" "julia.mv.clones"="6" "julia.mv.reloc" "target-cpu"="haswell" "target-features"="+lzcnt,+sahf,+bmi2,+avx2,+bmi,+fsgsbase,+f16c,+avx,+xsave,+popcnt,+movbe,+sse4.2,+sse4.1,+cx16,+fma,+ssse3,+pclmul,+sse3,-aes,-rdrnd,-rtm,-avx512f,-avx512dq,-rdseed,-adx,-avx512ifma,-clflushopt,-clwb,-avx512cd,-sha,-avx512bw,-avx512vl,-avx512vbmi,-pku,-waitpkg,-avx512vbmi2,-shstk,-gfni,-vaes,-vpclmulqdq,-avx512vnni,-avx512bitalg,-avx512vpopcntdq,-rdpid,-cldemote,-movdiri,-movdir64b,-enqcmd,-uintr,-avx512vp2intersect,-serialize,-tsxldtrk,-pconfig,-amx-bf16,-avx512fp16,-amx-tile,-amx-int8,-sse4a,-prfchw,-xop,-fma4,-tbm,-mwaitx,-xsaveopt,-xsavec,-xsaves,-clzero,-wbnoinvd,-avxvnni,-avx512bf16,-ptwrite,+sse2,+mmx,+fxsr,+64bit,+cx8" } !llvm.module.flags = !{!0, !2} @@ -118,6 +118,6 @@ define noundef i32 @simd_test_call(<4 x i32> noundef %0) { !1 = !{!1} !2 = !{i32 1, !"julia.mv.specs", !3} !3 = !{!4, !5, !6} -!4 = !{!"x86-64", !"+cx16,-sse3,-pclmul,-ssse3,-fma,-sse4.1,-sse4.2,-movbe,-popcnt,-aes,-xsave,-avx,-f16c,-rdrnd,-fsgsbase,-bmi,-avx2,-bmi2,-rtm,-avx512f,-avx512dq,-rdseed,-adx,-avx512ifma,-clflushopt,-clwb,-avx512pf,-avx512er,-avx512cd,-sha,-avx512bw,-avx512vl,-prefetchwt1,-avx512vbmi,-pku,-waitpkg,-avx512vbmi2,-shstk,-gfni,-vaes,-vpclmulqdq,-avx512vnni,-avx512bitalg,-avx512vpopcntdq,-rdpid,-cldemote,-movdiri,-movdir64b,-enqcmd,-uintr,-avx512vp2intersect,-serialize,-tsxldtrk,-pconfig,-amx-bf16,-avx512fp16,-amx-tile,-amx-int8,-sahf,-lzcnt,-sse4a,-prfchw,-xop,-fma4,-tbm,-mwaitx,-xsaveopt,-xsavec,-xsaves,-clzero,-wbnoinvd,-avxvnni,-avx512bf16,-ptwrite,+sse2,+mmx,+fxsr,+64bit,+cx8", i32 0, i32 0} -!5 = !{!"sandybridge", !"+sahf,+avx,+xsave,+popcnt,+sse4.2,+sse4.1,+cx16,+ssse3,+pclmul,+sse3,-fma,-movbe,-aes,-f16c,-rdrnd,-fsgsbase,-bmi,-avx2,-bmi2,-rtm,-avx512f,-avx512dq,-rdseed,-adx,-avx512ifma,-clflushopt,-clwb,-avx512pf,-avx512er,-avx512cd,-sha,-avx512bw,-avx512vl,-prefetchwt1,-avx512vbmi,-pku,-waitpkg,-avx512vbmi2,-shstk,-gfni,-vaes,-vpclmulqdq,-avx512vnni,-avx512bitalg,-avx512vpopcntdq,-rdpid,-cldemote,-movdiri,-movdir64b,-enqcmd,-uintr,-avx512vp2intersect,-serialize,-tsxldtrk,-pconfig,-amx-bf16,-avx512fp16,-amx-tile,-amx-int8,-lzcnt,-sse4a,-prfchw,-xop,-fma4,-tbm,-mwaitx,-xsaveopt,-xsavec,-xsaves,-clzero,-wbnoinvd,-avxvnni,-avx512bf16,-ptwrite,+sse2,+mmx,+fxsr,+64bit,+cx8", i32 0, i32 2} -!6 = !{!"haswell", !"+lzcnt,+sahf,+bmi2,+avx2,+bmi,+fsgsbase,+f16c,+avx,+xsave,+popcnt,+movbe,+sse4.2,+sse4.1,+cx16,+fma,+ssse3,+pclmul,+sse3,-aes,-rdrnd,-rtm,-avx512f,-avx512dq,-rdseed,-adx,-avx512ifma,-clflushopt,-clwb,-avx512pf,-avx512er,-avx512cd,-sha,-avx512bw,-avx512vl,-prefetchwt1,-avx512vbmi,-pku,-waitpkg,-avx512vbmi2,-shstk,-gfni,-vaes,-vpclmulqdq,-avx512vnni,-avx512bitalg,-avx512vpopcntdq,-rdpid,-cldemote,-movdiri,-movdir64b,-enqcmd,-uintr,-avx512vp2intersect,-serialize,-tsxldtrk,-pconfig,-amx-bf16,-avx512fp16,-amx-tile,-amx-int8,-sse4a,-prfchw,-xop,-fma4,-tbm,-mwaitx,-xsaveopt,-xsavec,-xsaves,-clzero,-wbnoinvd,-avxvnni,-avx512bf16,-ptwrite,+sse2,+mmx,+fxsr,+64bit,+cx8", i32 1, i32 284} +!4 = !{!"x86-64", !"+cx16,-sse3,-pclmul,-ssse3,-fma,-sse4.1,-sse4.2,-movbe,-popcnt,-aes,-xsave,-avx,-f16c,-rdrnd,-fsgsbase,-bmi,-avx2,-bmi2,-rtm,-avx512f,-avx512dq,-rdseed,-adx,-avx512ifma,-clflushopt,-clwb,-avx512cd,-sha,-avx512bw,-avx512vl,-avx512vbmi,-pku,-waitpkg,-avx512vbmi2,-shstk,-gfni,-vaes,-vpclmulqdq,-avx512vnni,-avx512bitalg,-avx512vpopcntdq,-rdpid,-cldemote,-movdiri,-movdir64b,-enqcmd,-uintr,-avx512vp2intersect,-serialize,-tsxldtrk,-pconfig,-amx-bf16,-avx512fp16,-amx-tile,-amx-int8,-sahf,-lzcnt,-sse4a,-prfchw,-xop,-fma4,-tbm,-mwaitx,-xsaveopt,-xsavec,-xsaves,-clzero,-wbnoinvd,-avxvnni,-avx512bf16,-ptwrite,+sse2,+mmx,+fxsr,+64bit,+cx8", i32 0, i32 0} +!5 = !{!"sandybridge", !"+sahf,+avx,+xsave,+popcnt,+sse4.2,+sse4.1,+cx16,+ssse3,+pclmul,+sse3,-fma,-movbe,-aes,-f16c,-rdrnd,-fsgsbase,-bmi,-avx2,-bmi2,-rtm,-avx512f,-avx512dq,-rdseed,-adx,-avx512ifma,-clflushopt,-clwb,-avx512cd,-sha,-avx512bw,-avx512vl,-avx512vbmi,-pku,-waitpkg,-avx512vbmi2,-shstk,-gfni,-vaes,-vpclmulqdq,-avx512vnni,-avx512bitalg,-avx512vpopcntdq,-rdpid,-cldemote,-movdiri,-movdir64b,-enqcmd,-uintr,-avx512vp2intersect,-serialize,-tsxldtrk,-pconfig,-amx-bf16,-avx512fp16,-amx-tile,-amx-int8,-lzcnt,-sse4a,-prfchw,-xop,-fma4,-tbm,-mwaitx,-xsaveopt,-xsavec,-xsaves,-clzero,-wbnoinvd,-avxvnni,-avx512bf16,-ptwrite,+sse2,+mmx,+fxsr,+64bit,+cx8", i32 0, i32 2} +!6 = !{!"haswell", !"+lzcnt,+sahf,+bmi2,+avx2,+bmi,+fsgsbase,+f16c,+avx,+xsave,+popcnt,+movbe,+sse4.2,+sse4.1,+cx16,+fma,+ssse3,+pclmul,+sse3,-aes,-rdrnd,-rtm,-avx512f,-avx512dq,-rdseed,-adx,-avx512ifma,-clflushopt,-clwb,-avx512cd,-sha,-avx512bw,-avx512vl,-avx512vbmi,-pku,-waitpkg,-avx512vbmi2,-shstk,-gfni,-vaes,-vpclmulqdq,-avx512vnni,-avx512bitalg,-avx512vpopcntdq,-rdpid,-cldemote,-movdiri,-movdir64b,-enqcmd,-uintr,-avx512vp2intersect,-serialize,-tsxldtrk,-pconfig,-amx-bf16,-avx512fp16,-amx-tile,-amx-int8,-sse4a,-prfchw,-xop,-fma4,-tbm,-mwaitx,-xsaveopt,-xsavec,-xsaves,-clzero,-wbnoinvd,-avxvnni,-avx512bf16,-ptwrite,+sse2,+mmx,+fxsr,+64bit,+cx8", i32 1, i32 284} From 952e952cd729d780a96a4a259cc8edaf7f060754 Mon Sep 17 00:00:00 2001 From: DilumAluthgeBot <43731525+DilumAluthgeBot@users.noreply.github.com> Date: Sun, 13 Oct 2024 23:27:00 -0400 Subject: [PATCH 192/537] =?UTF-8?q?=F0=9F=A4=96=20[master]=20Bump=20the=20?= =?UTF-8?q?Pkg=20stdlib=20from=20fbaa2e337=20to=2027c1b1ee5=20(#56146)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../Pkg-27c1b1ee5cf15571eb5e54707e812d646ac1dde3.tar.gz/md5 | 1 + .../Pkg-27c1b1ee5cf15571eb5e54707e812d646ac1dde3.tar.gz/sha512 | 1 + .../Pkg-fbaa2e3370b4ab922919892640e5d1b0bcb14037.tar.gz/md5 | 1 - .../Pkg-fbaa2e3370b4ab922919892640e5d1b0bcb14037.tar.gz/sha512 | 1 - stdlib/Pkg.version | 2 +- 5 files changed, 3 insertions(+), 3 deletions(-) create mode 100644 deps/checksums/Pkg-27c1b1ee5cf15571eb5e54707e812d646ac1dde3.tar.gz/md5 create mode 100644 deps/checksums/Pkg-27c1b1ee5cf15571eb5e54707e812d646ac1dde3.tar.gz/sha512 delete mode 100644 deps/checksums/Pkg-fbaa2e3370b4ab922919892640e5d1b0bcb14037.tar.gz/md5 delete mode 100644 deps/checksums/Pkg-fbaa2e3370b4ab922919892640e5d1b0bcb14037.tar.gz/sha512 diff --git a/deps/checksums/Pkg-27c1b1ee5cf15571eb5e54707e812d646ac1dde3.tar.gz/md5 b/deps/checksums/Pkg-27c1b1ee5cf15571eb5e54707e812d646ac1dde3.tar.gz/md5 new file mode 100644 index 0000000000000..137460d1a05a1 --- /dev/null +++ b/deps/checksums/Pkg-27c1b1ee5cf15571eb5e54707e812d646ac1dde3.tar.gz/md5 @@ -0,0 +1 @@ +74d656c054c1406a7e88910d673019f7 diff --git a/deps/checksums/Pkg-27c1b1ee5cf15571eb5e54707e812d646ac1dde3.tar.gz/sha512 b/deps/checksums/Pkg-27c1b1ee5cf15571eb5e54707e812d646ac1dde3.tar.gz/sha512 new file mode 100644 index 0000000000000..0b8463176a867 --- /dev/null +++ b/deps/checksums/Pkg-27c1b1ee5cf15571eb5e54707e812d646ac1dde3.tar.gz/sha512 @@ -0,0 +1 @@ +a8e589ce68cc14883a7a21f68862695bfaa9ab38dfa0e704c32aaa801667708af0d851a41199ad09ae81a4c0b928befb680d639c1eca3377ce2db2dcc34b98e5 diff --git a/deps/checksums/Pkg-fbaa2e3370b4ab922919892640e5d1b0bcb14037.tar.gz/md5 b/deps/checksums/Pkg-fbaa2e3370b4ab922919892640e5d1b0bcb14037.tar.gz/md5 deleted file mode 100644 index 762a180d93031..0000000000000 --- a/deps/checksums/Pkg-fbaa2e3370b4ab922919892640e5d1b0bcb14037.tar.gz/md5 +++ /dev/null @@ -1 +0,0 @@ -4ea351427d5b43617abae557670c3313 diff --git a/deps/checksums/Pkg-fbaa2e3370b4ab922919892640e5d1b0bcb14037.tar.gz/sha512 b/deps/checksums/Pkg-fbaa2e3370b4ab922919892640e5d1b0bcb14037.tar.gz/sha512 deleted file mode 100644 index eef70ab9b62d5..0000000000000 --- a/deps/checksums/Pkg-fbaa2e3370b4ab922919892640e5d1b0bcb14037.tar.gz/sha512 +++ /dev/null @@ -1 +0,0 @@ -9e91076974ab1dcb1c85e2c8acaf3404f4e82dcd2118d215d4a8413a1e00462ca47891bdae983441a8621015c082421de1f2e26b9b2ee18c1e3c13d58bd1d261 diff --git a/stdlib/Pkg.version b/stdlib/Pkg.version index fc67189981d59..470acefbc6c83 100644 --- a/stdlib/Pkg.version +++ b/stdlib/Pkg.version @@ -1,4 +1,4 @@ PKG_BRANCH = master -PKG_SHA1 = fbaa2e3370b4ab922919892640e5d1b0bcb14037 +PKG_SHA1 = 27c1b1ee5cf15571eb5e54707e812d646ac1dde3 PKG_GIT_URL := https://github.com/JuliaLang/Pkg.jl.git PKG_TAR_URL = https://api.github.com/repos/JuliaLang/Pkg.jl/tarball/$1 From 9e92a9d17415fabb765f5dfebb34f35fdccb9122 Mon Sep 17 00:00:00 2001 From: Neven Sajko Date: Mon, 14 Oct 2024 16:59:12 +0200 Subject: [PATCH 193/537] HISTORY entry for deletion of `length(::Stateful)` (#55861) xref #47790 xref #51747 xref #54953 xref #55858 --- HISTORY.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/HISTORY.md b/HISTORY.md index 6142747273864..aa7f9f0ccdad6 100644 --- a/HISTORY.md +++ b/HISTORY.md @@ -119,6 +119,11 @@ New library features Standard library changes ------------------------ +* It's not possible to define `length` for stateful iterators in a generally consistent manner. The + potential for silently incorrect results for `Stateful` iterators is addressed by deleting the + `length(::Stateful)` method. The last type parameter of `Stateful` is gone, too. Issue: ([#47790]), + PR: ([#51747]). + #### StyledStrings * A new standard library for handling styling in a more comprehensive and structured way ([#49586]). @@ -225,6 +230,7 @@ Tooling Improvements [#46501]: https://github.com/JuliaLang/julia/issues/46501 [#47354]: https://github.com/JuliaLang/julia/issues/47354 [#47679]: https://github.com/JuliaLang/julia/issues/47679 +[#47790]: https://github.com/JuliaLang/julia/issues/47790 [#48273]: https://github.com/JuliaLang/julia/issues/48273 [#48625]: https://github.com/JuliaLang/julia/issues/48625 [#49546]: https://github.com/JuliaLang/julia/issues/49546 @@ -250,6 +256,7 @@ Tooling Improvements [#51616]: https://github.com/JuliaLang/julia/issues/51616 [#51647]: https://github.com/JuliaLang/julia/issues/51647 [#51704]: https://github.com/JuliaLang/julia/issues/51704 +[#51747]: https://github.com/JuliaLang/julia/issues/51747 [#51799]: https://github.com/JuliaLang/julia/issues/51799 [#51897]: https://github.com/JuliaLang/julia/issues/51897 [#51929]: https://github.com/JuliaLang/julia/issues/51929 From a19569d443f12b3a082258cad419094df2ae1612 Mon Sep 17 00:00:00 2001 From: Denis Barucic Date: Mon, 14 Oct 2024 20:56:32 +0200 Subject: [PATCH 194/537] ntuple: ensure eltype is always `Int` (#55901) Fixes #55790 --- base/ntuple.jl | 6 ++++-- test/tuple.jl | 5 +++++ 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/base/ntuple.jl b/base/ntuple.jl index f81d2686b9764..185c42601280f 100644 --- a/base/ntuple.jl +++ b/base/ntuple.jl @@ -14,7 +14,7 @@ julia> ntuple(i -> 2*i, 4) (2, 4, 6, 8) ``` """ -@inline function ntuple(f::F, n::Integer) where F +@inline function ntuple(f::F, n::Int) where F # marked inline since this benefits from constant propagation of `n` t = n == 0 ? () : n == 1 ? (f(1),) : @@ -30,8 +30,10 @@ julia> ntuple(i -> 2*i, 4) _ntuple(f, n) return t end +ntuple(f::F, n::Integer) where F = ntuple(f, convert(Int, n)::Int) -function _ntuple(f::F, n) where F +# `n` should always be an Int (#55790) +function _ntuple(f::F, n::Int) where F @noinline (n >= 0) || throw(ArgumentError(LazyString("tuple length should be ≥ 0, got ", n))) ([f(i) for i = 1:n]...,) diff --git a/test/tuple.jl b/test/tuple.jl index 355ad965f9584..13af5ac992434 100644 --- a/test/tuple.jl +++ b/test/tuple.jl @@ -534,6 +534,11 @@ end end @test Base.infer_return_type(ntuple, Tuple{typeof(identity), Val}) == Tuple{Vararg{Int}} + + # issue #55790 + for n in 1:32 + @test typeof(ntuple(identity, UInt64(n))) == NTuple{n, Int} + end end struct A_15703{N} From 159adbf6e930ba7ffe58adcc441e7828f4cca429 Mon Sep 17 00:00:00 2001 From: Gabriel Baraldi Date: Mon, 14 Oct 2024 18:17:43 -0300 Subject: [PATCH 195/537] Improve remarks of the alloc opt pass slightly. (#55995) The Value printer LLVM uses just prints the kind of instruction so it just shows call. --------- Co-authored-by: Oscar Smith --- src/llvm-alloc-helpers.cpp | 28 +++++++++++++++++++----- src/llvm-alloc-opt.cpp | 45 ++++++++++++++++++++++++++++++-------- 2 files changed, 58 insertions(+), 15 deletions(-) diff --git a/src/llvm-alloc-helpers.cpp b/src/llvm-alloc-helpers.cpp index 75030f8565221..59fce1235e14e 100644 --- a/src/llvm-alloc-helpers.cpp +++ b/src/llvm-alloc-helpers.cpp @@ -134,12 +134,16 @@ JL_USED_FUNC void AllocUseInfo::dump(llvm::raw_ostream &OS) OS << " zeroed"; OS << '\n'; OS << "Uses: " << uses.size() << '\n'; - for (auto inst: uses) + for (auto inst: uses) { inst->print(OS); + OS << '\n'; + } if (!preserves.empty()) { OS << "Preserves: " << preserves.size() << '\n'; - for (auto inst: preserves) + for (auto inst: preserves) { inst->print(OS); + OS << '\n'; + } } OS << "MemOps: " << memops.size() << '\n'; for (auto &field: memops) { @@ -268,9 +272,12 @@ void jl_alloc::runEscapeAnalysis(llvm::CallInst *I, EscapeAnalysisRequiredArgs r } LLVM_DEBUG(dbgs() << "Unknown call, marking escape\n"); REMARK([&]() { + std::string str; + llvm::raw_string_ostream rso(str); + inst->print(rso); return OptimizationRemarkMissed(DEBUG_TYPE, "UnknownCall", inst) - << "Unknown call, marking escape (" << ore::NV("Call", inst) << ")"; + << "Unknown call, marking escape (" << ore::NV("Call", StringRef(str)) << ")"; }); required.use_info.escaped = true; return false; @@ -284,9 +291,12 @@ void jl_alloc::runEscapeAnalysis(llvm::CallInst *I, EscapeAnalysisRequiredArgs r if (use->getOperandNo() != StoreInst::getPointerOperandIndex()) { LLVM_DEBUG(dbgs() << "Object address is stored somewhere, marking escape\n"); REMARK([&]() { + std::string str; + llvm::raw_string_ostream rso(str); + inst->print(rso); return OptimizationRemarkMissed(DEBUG_TYPE, "StoreObjAddr", inst) - << "Object address is stored somewhere, marking escape (" << ore::NV("Store", inst) << ")"; + << "Object address is stored somewhere, marking escape (" << ore::NV("Store", StringRef(str)) << ")"; }); required.use_info.escaped = true; return false; @@ -310,9 +320,12 @@ void jl_alloc::runEscapeAnalysis(llvm::CallInst *I, EscapeAnalysisRequiredArgs r if (use->getOperandNo() != isa(inst) ? AtomicCmpXchgInst::getPointerOperandIndex() : AtomicRMWInst::getPointerOperandIndex()) { LLVM_DEBUG(dbgs() << "Object address is cmpxchg/rmw-ed somewhere, marking escape\n"); REMARK([&]() { + std::string str; + llvm::raw_string_ostream rso(str); + inst->print(rso); return OptimizationRemarkMissed(DEBUG_TYPE, "StoreObjAddr", inst) - << "Object address is cmpxchg/rmw-ed somewhere, marking escape (" << ore::NV("Store", inst) << ")"; + << "Object address is cmpxchg/rmw-ed somewhere, marking escape (" << ore::NV("Store", StringRef(str)) << ")"; }); required.use_info.escaped = true; return false; @@ -363,9 +376,12 @@ void jl_alloc::runEscapeAnalysis(llvm::CallInst *I, EscapeAnalysisRequiredArgs r } LLVM_DEBUG(dbgs() << "Unknown instruction, marking escape\n"); REMARK([&]() { + std::string str; + llvm::raw_string_ostream rso(str); + inst->print(rso); return OptimizationRemarkMissed(DEBUG_TYPE, "UnknownInst", inst) - << "Unknown instruction, marking escape (" << ore::NV("Inst", inst) << ")"; + << "Unknown instruction, marking escape (" << ore::NV("Inst", StringRef(str)) << ")"; }); required.use_info.escaped = true; return false; diff --git a/src/llvm-alloc-opt.cpp b/src/llvm-alloc-opt.cpp index 188955fd50972..0ec88c9d56356 100644 --- a/src/llvm-alloc-opt.cpp +++ b/src/llvm-alloc-opt.cpp @@ -224,8 +224,11 @@ void Optimizer::optimizeAll() checkInst(orig); if (use_info.escaped) { REMARK([&]() { + std::string str; + llvm::raw_string_ostream rso(str); + orig->print(rso); return OptimizationRemarkMissed(DEBUG_TYPE, "Escaped", orig) - << "GC allocation escaped " << ore::NV("GC Allocation", orig); + << "GC allocation escaped " << ore::NV("GC Allocation", StringRef(str)); }); if (use_info.hastypeof) optimizeTag(orig); @@ -233,8 +236,11 @@ void Optimizer::optimizeAll() } if (use_info.haserror || use_info.returned) { REMARK([&]() { + std::string str; + llvm::raw_string_ostream rso(str); + orig->print(rso); return OptimizationRemarkMissed(DEBUG_TYPE, "Escaped", orig) - << "GC allocation has error or was returned " << ore::NV("GC Allocation", orig); + << "GC allocation has error or was returned " << ore::NV("GC Allocation", StringRef(str)); }); if (use_info.hastypeof) optimizeTag(orig); @@ -243,8 +249,11 @@ void Optimizer::optimizeAll() if (!use_info.addrescaped && !use_info.hasload && (!use_info.haspreserve || !use_info.refstore)) { REMARK([&]() { + std::string str; + llvm::raw_string_ostream rso(str); + orig->print(rso); return OptimizationRemark(DEBUG_TYPE, "Dead Allocation", orig) - << "GC allocation removed " << ore::NV("GC Allocation", orig); + << "GC allocation removed " << ore::NV("GC Allocation", StringRef(str)); }); // No one took the address, no one reads anything and there's no meaningful // preserve of fields (either no preserve/ccall or no object reference fields) @@ -270,8 +279,11 @@ void Optimizer::optimizeAll() } if (has_refaggr) { REMARK([&]() { + std::string str; + llvm::raw_string_ostream rso(str); + orig->print(rso); return OptimizationRemarkMissed(DEBUG_TYPE, "Escaped", orig) - << "GC allocation has unusual object reference, unable to move to stack " << ore::NV("GC Allocation", orig); + << "GC allocation has unusual object reference, unable to move to stack " << ore::NV("GC Allocation", StringRef(str)); }); if (use_info.hastypeof) optimizeTag(orig); @@ -279,8 +291,11 @@ void Optimizer::optimizeAll() } if (!use_info.hasunknownmem && !use_info.addrescaped) { REMARK([&](){ + std::string str; + llvm::raw_string_ostream rso(str); + orig->print(rso); return OptimizationRemark(DEBUG_TYPE, "Stack Split Allocation", orig) - << "GC allocation split on stack " << ore::NV("GC Allocation", orig); + << "GC allocation split on stack " << ore::NV("GC Allocation", StringRef(str)); }); // No one actually care about the memory layout of this object, split it. splitOnStack(orig); @@ -292,16 +307,22 @@ void Optimizer::optimizeAll() // This later causes the GC rooting pass, to miss-characterize the float as a pointer to a GC value if (has_unboxed && has_ref) { REMARK([&]() { + std::string str; + llvm::raw_string_ostream rso(str); + orig->print(rso); return OptimizationRemarkMissed(DEBUG_TYPE, "Escaped", orig) - << "GC allocation could not be split since it contains both boxed and unboxed values, unable to move to stack " << ore::NV("GC Allocation", orig); + << "GC allocation could not be split since it contains both boxed and unboxed values, unable to move to stack " << ore::NV("GC Allocation", StringRef(str)); }); if (use_info.hastypeof) optimizeTag(orig); continue; } REMARK([&](){ + std::string str; + llvm::raw_string_ostream rso(str); + orig->print(rso); return OptimizationRemark(DEBUG_TYPE, "Stack Move Allocation", orig) - << "GC allocation moved to stack " << ore::NV("GC Allocation", orig); + << "GC allocation moved to stack " << ore::NV("GC Allocation", StringRef(str)); }); // The object has no fields with mix reference access moveToStack(orig, sz, has_ref, use_info.allockind); @@ -380,7 +401,10 @@ void Optimizer::checkInst(CallInst *I) std::string suse_info; llvm::raw_string_ostream osuse_info(suse_info); use_info.dump(osuse_info); - return OptimizationRemarkAnalysis(DEBUG_TYPE, "EscapeAnalysis", I) << "escape analysis for " << ore::NV("GC Allocation", I) << "\n" << ore::NV("UseInfo", osuse_info.str()); + std::string str; + llvm::raw_string_ostream rso(str); + I->print(rso); + return OptimizationRemarkAnalysis(DEBUG_TYPE, "EscapeAnalysis", I) << "escape analysis for " << ore::NV("GC Allocation", StringRef(str)) << "\n" << ore::NV("UseInfo", osuse_info.str()); }); } @@ -905,8 +929,11 @@ void Optimizer::optimizeTag(CallInst *orig_inst) if (pass.typeof_func == callee) { ++RemovedTypeofs; REMARK([&](){ + std::string str; + llvm::raw_string_ostream rso(str); + orig_inst->print(rso); return OptimizationRemark(DEBUG_TYPE, "typeof", call) - << "removed typeof call for GC allocation " << ore::NV("Alloc", orig_inst); + << "removed typeof call for GC allocation " << ore::NV("Alloc", StringRef(str)); }); call->replaceAllUsesWith(tag); // Push to the removed instructions to trigger `finalize` to From 8c2bcf67e03f13771841605f5289dc56eb46932e Mon Sep 17 00:00:00 2001 From: James Wrigley Date: Tue, 15 Oct 2024 01:43:30 +0200 Subject: [PATCH 196/537] Implement Base.fd() for TCPSocket, UDPSocket, and TCPServer (#53721) This is quite handy if you want to pass off the file descriptor to a C library. I also added a warning to the `fd()` docstring to warn folks about duplicating the file descriptor first. --- base/iostream.jl | 19 ++++++++++++++++--- base/libc.jl | 7 +++++++ base/public.jl | 1 + doc/src/base/libc.md | 1 + stdlib/Sockets/src/Sockets.jl | 6 ++++++ stdlib/Sockets/test/runtests.jl | 25 +++++++++++++++++++++++++ 6 files changed, 56 insertions(+), 3 deletions(-) diff --git a/base/iostream.jl b/base/iostream.jl index 74908344e078e..d91330960d59a 100644 --- a/base/iostream.jl +++ b/base/iostream.jl @@ -47,16 +47,29 @@ macro _lock_ios(s, expr) end """ - fd(stream) -> RawFD + fd(x) -> RawFD -Return the file descriptor backing the stream or file. Note that this function only applies -to synchronous `File`'s and `IOStream`'s not to any of the asynchronous streams. +Return the file descriptor backing the stream, file, or socket. `RawFD` objects can be passed directly to other languages via the `ccall` interface. !!! compat "Julia 1.12" Prior to 1.12, this function returned an `Int` instead of a `RawFD`. You may use `RawFD(fd(x))` to produce a `RawFD` in all Julia versions. + +!!! compat "Julia 1.12" + Getting the file descriptor of sockets are supported as of Julia 1.12. + +!!! warning + Duplicate the returned file descriptor with [`Libc.dup()`](@ref) before + passing it to another system that will take ownership of it (e.g. a C + library). Otherwise both the Julia object `x` and the other system may try + to close the file descriptor, which will cause errors. + +!!! warning + The file descriptors for sockets are asynchronous (i.e. `O_NONBLOCK` on + POSIX and `OVERLAPPED` on Windows), they may behave differently than regular + file descriptors. """ fd(s::IOStream) = RawFD(ccall(:jl_ios_fd, Clong, (Ptr{Cvoid},), s.ios)) diff --git a/base/libc.jl b/base/libc.jl index 21f9554f7e6db..7364f6e6677fe 100644 --- a/base/libc.jl +++ b/base/libc.jl @@ -36,6 +36,13 @@ RawFD(fd::Integer) = bitcast(RawFD, Cint(fd)) RawFD(fd::RawFD) = fd Base.cconvert(::Type{Cint}, fd::RawFD) = bitcast(Cint, fd) +""" + dup(src::RawFD[, target::RawFD])::RawFD + +Duplicate the file descriptor `src` so that the duplicate refers to the same OS +resource (e.g. a file or socket). A `target` file descriptor may be optionally +be passed to use for the new duplicate. +""" dup(x::RawFD) = ccall((@static Sys.iswindows() ? :_dup : :dup), RawFD, (RawFD,), x) dup(src::RawFD, target::RawFD) = systemerror("dup", -1 == ccall((@static Sys.iswindows() ? :_dup2 : :dup2), Int32, diff --git a/base/public.jl b/base/public.jl index 2e8e777d2f91d..1a23550485d84 100644 --- a/base/public.jl +++ b/base/public.jl @@ -102,6 +102,7 @@ public # functions reseteof, link_pipe!, + dup, # filesystem operations rename, diff --git a/doc/src/base/libc.md b/doc/src/base/libc.md index c0448b04d9db7..b598baaa16bab 100644 --- a/doc/src/base/libc.md +++ b/doc/src/base/libc.md @@ -18,6 +18,7 @@ Base.Libc.strftime Base.Libc.strptime Base.Libc.TmStruct Base.Libc.FILE +Base.Libc.dup Base.Libc.flush_cstdio Base.Libc.systemsleep Base.Libc.mkfifo diff --git a/stdlib/Sockets/src/Sockets.jl b/stdlib/Sockets/src/Sockets.jl index 3c30b214305fb..f9e0f2f88dd78 100644 --- a/stdlib/Sockets/src/Sockets.jl +++ b/stdlib/Sockets/src/Sockets.jl @@ -107,6 +107,8 @@ if OS_HANDLE != RawFD TCPSocket(fd::RawFD) = TCPSocket(Libc._get_osfhandle(fd)) end +Base.fd(sock::TCPSocket) = Base._fd(sock) + mutable struct TCPServer <: LibuvServer handle::Ptr{Cvoid} @@ -139,6 +141,8 @@ function TCPServer(; delay=true) return tcp end +Base.fd(server::TCPServer) = Base._fd(server) + """ accept(server[, client]) @@ -199,6 +203,8 @@ end show(io::IO, stream::UDPSocket) = print(io, typeof(stream), "(", uv_status_string(stream), ")") +Base.fd(sock::UDPSocket) = Base._fd(sock) + function _uv_hook_close(sock::UDPSocket) lock(sock.cond) try diff --git a/stdlib/Sockets/test/runtests.jl b/stdlib/Sockets/test/runtests.jl index 778d9f7415bcc..669237acccb0a 100644 --- a/stdlib/Sockets/test/runtests.jl +++ b/stdlib/Sockets/test/runtests.jl @@ -605,6 +605,31 @@ end end end +@testset "fd() methods" begin + function valid_fd(x) + if Sys.iswindows() + return x isa Base.OS_HANDLE + elseif !Sys.iswindows() + value = Base.cconvert(Cint, x) + + # 2048 is a bit arbitrary, it depends on the process not having too many + # file descriptors open. But select() has a limit of 1024 and people + # don't seem to hit it too often so let's hope twice that is safe. + return value > 0 && value < 2048 + end + end + + sock = TCPSocket(; delay=false) + @test valid_fd(fd(sock)) + + sock = UDPSocket() + bind(sock, Sockets.localhost, 0) + @test valid_fd(fd(sock)) + + server = listen(Sockets.localhost, 0) + @test valid_fd(fd(server)) +end + @testset "TCPServer constructor" begin s = Sockets.TCPServer(; delay=false) if ccall(:jl_has_so_reuseport, Int32, ()) == 1 From b86e647159a4d9f1285e4f8c70a18e1b2bf2aa7d Mon Sep 17 00:00:00 2001 From: Kristoffer Carlsson Date: Tue, 15 Oct 2024 02:02:11 +0200 Subject: [PATCH 197/537] Fix `JULIA_CPU_TARGET` being propagated to workers precompiling stdlib pkgimages (#54093) Apparently (thanks ChatGPT) each line in a makefile is executed in a separate shell so adding an `export` line on one line does not propagate to the next line. --- pkgimage.mk | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/pkgimage.mk b/pkgimage.mk index 740b9760cab48..0bc035ee03b08 100644 --- a/pkgimage.mk +++ b/pkgimage.mk @@ -25,8 +25,7 @@ print-depot-path: @$(call PRINT_JULIA, $(call spawn,$(JULIA_EXECUTABLE)) --startup-file=no -e '@show Base.DEPOT_PATH') $(BUILDDIR)/stdlib/%.image: $(JULIAHOME)/stdlib/Project.toml $(JULIAHOME)/stdlib/Manifest.toml $(INDEPENDENT_STDLIBS_SRCS) $(JULIA_DEPOT_PATH)/compiled - export JULIA_CPU_TARGET="$(JULIA_CPU_TARGET)" - @$(call PRINT_JULIA, $(call spawn,$(JULIA_EXECUTABLE)) --startup-file=no -e 'Base.Precompilation.precompilepkgs(;configs=[``=>Base.CacheFlags(), `--check-bounds=yes`=>Base.CacheFlags(;check_bounds=1)])') + @$(call PRINT_JULIA, JULIA_CPU_TARGET="$(JULIA_CPU_TARGET)" $(call spawn,$(JULIA_EXECUTABLE)) --startup-file=no -e 'Base.Precompilation.precompilepkgs(;configs=[``=>Base.CacheFlags(), `--check-bounds=yes`=>Base.CacheFlags(;check_bounds=1)])') touch $@ $(BUILDDIR)/stdlib/release.image: $(build_private_libdir)/sys.$(SHLIB_EXT) From f42066a9720f42fc330373d384fe9bc22aabf107 Mon Sep 17 00:00:00 2001 From: Jishnu Bhattacharya Date: Tue, 15 Oct 2024 06:27:31 +0530 Subject: [PATCH 198/537] Merge tr methods for triangular matrices (#56154) Since the methods do identical things, we don't need multiple of these. --- stdlib/LinearAlgebra/src/triangular.jl | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/stdlib/LinearAlgebra/src/triangular.jl b/stdlib/LinearAlgebra/src/triangular.jl index ee63865b65d6e..3b949fa54b287 100644 --- a/stdlib/LinearAlgebra/src/triangular.jl +++ b/stdlib/LinearAlgebra/src/triangular.jl @@ -523,10 +523,8 @@ for TM in (:LowerTriangular, :UpperTriangular) @eval -(A::$TM{<:Any, <:StridedMaybeAdjOrTransMat}) = broadcast(-, A) end -tr(A::LowerTriangular) = tr(A.data) -tr(A::UnitLowerTriangular) = size(A, 1) * oneunit(eltype(A)) -tr(A::UpperTriangular) = tr(A.data) -tr(A::UnitUpperTriangular) = size(A, 1) * oneunit(eltype(A)) +tr(A::UpperOrLowerTriangular) = tr(A.data) +tr(A::Union{UnitLowerTriangular, UnitUpperTriangular}) = size(A, 1) * oneunit(eltype(A)) for T in (:UpperOrUnitUpperTriangular, :LowerOrUnitLowerTriangular) @eval @propagate_inbounds function copyto!(dest::$T, U::$T) From b69c8682c8d2deb5716703f925066f86ed7339f2 Mon Sep 17 00:00:00 2001 From: Jishnu Bhattacharya Date: Tue, 15 Oct 2024 06:28:38 +0530 Subject: [PATCH 199/537] Reduce duplication in triangular indexing methods (#56152) This uses an orthogonal design to reduce code duplication in the indexing methods for triangular matrices. --- stdlib/LinearAlgebra/src/triangular.jl | 60 +++++++++++--------------- 1 file changed, 24 insertions(+), 36 deletions(-) diff --git a/stdlib/LinearAlgebra/src/triangular.jl b/stdlib/LinearAlgebra/src/triangular.jl index 3b949fa54b287..71660bc5ca28c 100644 --- a/stdlib/LinearAlgebra/src/triangular.jl +++ b/stdlib/LinearAlgebra/src/triangular.jl @@ -208,45 +208,33 @@ function full!(A::UnitUpperTriangular) B end -Base.isassigned(A::UnitLowerTriangular, i::Int, j::Int) = - i > j ? isassigned(A.data, i, j) : true -Base.isassigned(A::LowerTriangular, i::Int, j::Int) = - i >= j ? isassigned(A.data, i, j) : true -Base.isassigned(A::UnitUpperTriangular, i::Int, j::Int) = - i < j ? isassigned(A.data, i, j) : true -Base.isassigned(A::UpperTriangular, i::Int, j::Int) = - i <= j ? isassigned(A.data, i, j) : true - -Base.isstored(A::UnitLowerTriangular, i::Int, j::Int) = - i > j ? Base.isstored(A.data, i, j) : false -Base.isstored(A::LowerTriangular, i::Int, j::Int) = - i >= j ? Base.isstored(A.data, i, j) : false -Base.isstored(A::UnitUpperTriangular, i::Int, j::Int) = - i < j ? Base.isstored(A.data, i, j) : false -Base.isstored(A::UpperTriangular, i::Int, j::Int) = - i <= j ? Base.isstored(A.data, i, j) : false - -@propagate_inbounds getindex(A::UnitLowerTriangular{T}, i::Int, j::Int) where {T} = - i > j ? A.data[i,j] : ifelse(i == j, oneunit(T), zero(T)) -@propagate_inbounds getindex(A::LowerTriangular, i::Int, j::Int) = - i >= j ? A.data[i,j] : _zero(A.data,j,i) -@propagate_inbounds getindex(A::UnitUpperTriangular{T}, i::Int, j::Int) where {T} = - i < j ? A.data[i,j] : ifelse(i == j, oneunit(T), zero(T)) -@propagate_inbounds getindex(A::UpperTriangular, i::Int, j::Int) = - i <= j ? A.data[i,j] : _zero(A.data,j,i) +_shouldforwardindex(U::UpperTriangular, row::Integer, col::Integer) = row <= col +_shouldforwardindex(U::LowerTriangular, row::Integer, col::Integer) = row >= col +_shouldforwardindex(U::UnitUpperTriangular, row::Integer, col::Integer) = row < col +_shouldforwardindex(U::UnitLowerTriangular, row::Integer, col::Integer) = row > col + +Base.isassigned(A::UpperOrLowerTriangular, i::Int, j::Int) = + _shouldforwardindex(A, i, j) ? isassigned(A.data, i, j) : true + +Base.isstored(A::UpperOrLowerTriangular, i::Int, j::Int) = + _shouldforwardindex(A, i, j) ? Base.isstored(A.data, i, j) : false + +@propagate_inbounds getindex(A::Union{UnitLowerTriangular{T}, UnitUpperTriangular{T}}, i::Int, j::Int) where {T} = + _shouldforwardindex(A, i, j) ? A.data[i,j] : ifelse(i == j, oneunit(T), zero(T)) +@propagate_inbounds getindex(A::Union{LowerTriangular, UpperTriangular}, i::Int, j::Int) = + _shouldforwardindex(A, i, j) ? A.data[i,j] : _zero(A.data,j,i) + +_shouldforwardindex(U::UpperTriangular, b::BandIndex) = b.band >= 0 +_shouldforwardindex(U::LowerTriangular, b::BandIndex) = b.band <= 0 +_shouldforwardindex(U::UnitUpperTriangular, b::BandIndex) = b.band > 0 +_shouldforwardindex(U::UnitLowerTriangular, b::BandIndex) = b.band < 0 # these specialized getindex methods enable constant-propagation of the band -Base.@constprop :aggressive @propagate_inbounds function getindex(A::UnitLowerTriangular{T}, b::BandIndex) where {T} - b.band < 0 ? A.data[b] : ifelse(b.band == 0, oneunit(T), zero(T)) -end -Base.@constprop :aggressive @propagate_inbounds function getindex(A::LowerTriangular, b::BandIndex) - b.band <= 0 ? A.data[b] : _zero(A.data, b) -end -Base.@constprop :aggressive @propagate_inbounds function getindex(A::UnitUpperTriangular{T}, b::BandIndex) where {T} - b.band > 0 ? A.data[b] : ifelse(b.band == 0, oneunit(T), zero(T)) +Base.@constprop :aggressive @propagate_inbounds function getindex(A::Union{UnitLowerTriangular{T}, UnitUpperTriangular{T}}, b::BandIndex) where {T} + _shouldforwardindex(A, b) ? A.data[b] : ifelse(b.band == 0, oneunit(T), zero(T)) end -Base.@constprop :aggressive @propagate_inbounds function getindex(A::UpperTriangular, b::BandIndex) - b.band >= 0 ? A.data[b] : _zero(A.data, b) +Base.@constprop :aggressive @propagate_inbounds function getindex(A::Union{LowerTriangular, UpperTriangular}, b::BandIndex) + _shouldforwardindex(A, b) ? A.data[b] : _zero(A.data, b) end _zero_triangular_half_str(::Type{<:UpperOrUnitUpperTriangular}) = "lower" From 3b3a70fe206783293d87de9320fbf1e369db4c80 Mon Sep 17 00:00:00 2001 From: Oscar Smith Date: Mon, 14 Oct 2024 23:48:24 -0400 Subject: [PATCH 200/537] update LLVM docs (#56162) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit dump with raw=true so you don't get random erorrs, and show how to run single modules. --------- Co-authored-by: Valentin Churavy Co-authored-by: Mosè Giordano <765740+giordano@users.noreply.github.com> Co-authored-by: Jameson Nash --- doc/src/devdocs/llvm.md | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/doc/src/devdocs/llvm.md b/doc/src/devdocs/llvm.md index fdecb0472388a..c05a4f9dc4e7f 100644 --- a/doc/src/devdocs/llvm.md +++ b/doc/src/devdocs/llvm.md @@ -142,9 +142,9 @@ cc -shared -o sys.so sys.o ``` To generate a system image with the new pass manager, one could do: ``` -opt -load-pass-plugin=libjulia-codegen.so --passes='julia' -o opt.bc unopt.bc -llc -o sys.o opt.bc -cc -shared -o sys.so sys.o +./usr/tools/opt -load-pass-plugin=libjulia-codegen.so --passes='julia' -o opt.bc unopt.bc +./usr/tools/llc -o sys.o opt.bc +./usr/tools/cc -shared -o sys.so sys.o ``` This system image can then be loaded by `julia` as usual. @@ -154,11 +154,15 @@ using: fun, T = +, Tuple{Int,Int} # Substitute your function of interest here optimize = false open("plus.ll", "w") do file - println(file, InteractiveUtils._dump_function(fun, T, false, false, false, true, :att, optimize, :default, false)) + code_llvm(file, fun, T; raw=true, dump_module=true, optimize) end ``` These files can be processed the same way as the unoptimized sysimg IR shown -above. +above, or if you want to see the LLVM IR yourself and get extra verification run, you can use +``` +./usr/tools/opt -load-pass-plugin=libjulia-codegen.so --passes='julia' -S -verify-each plus.ll +``` +(note on MacOS this would be `libjulia-codegen.dylib` and on Windows `libjulia-codegen.dll`) ## Running the LLVM test suite From d749f0eb444dfead2016d6ab0fb32e4ae1846792 Mon Sep 17 00:00:00 2001 From: Jishnu Bhattacharya Date: Tue, 15 Oct 2024 09:24:07 +0530 Subject: [PATCH 201/537] Fix zero elements for block-matrix kron involving Diagonal (#55941) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Currently, it's assumed that the zero element is identical for the matrix, but this is not necessary if the elements are matrices themselves and have different sizes. This PR ensures that `kron` for a `Diagonal` has the correct zero elements. Current: ```julia julia> D = Diagonal(1:2) 2×2 Diagonal{Int64, UnitRange{Int64}}: 1 ⋅ ⋅ 2 julia> B = reshape([ones(2,2), ones(3,2), ones(2,3), ones(3,3)], 2, 2); julia> size.(kron(D, B)) 4×4 Matrix{Tuple{Int64, Int64}}: (2, 2) (2, 3) (2, 2) (2, 2) (3, 2) (3, 3) (2, 2) (2, 2) (2, 2) (2, 2) (2, 2) (2, 3) (2, 2) (2, 2) (3, 2) (3, 3) ``` This PR ```julia julia> size.(kron(D, B)) 4×4 Matrix{Tuple{Int64, Int64}}: (2, 2) (2, 3) (2, 2) (2, 3) (3, 2) (3, 3) (3, 2) (3, 3) (2, 2) (2, 3) (2, 2) (2, 3) (3, 2) (3, 3) (3, 2) (3, 3) ``` Note the differences e.g. in the `CartesianIndex(4,1)`, `CartesianIndex(3,2)` and `CartesianIndex(3,3)` elements. --- stdlib/LinearAlgebra/src/diagonal.jl | 70 ++++++++++++++++++++++++--- stdlib/LinearAlgebra/test/diagonal.jl | 10 ++++ 2 files changed, 73 insertions(+), 7 deletions(-) diff --git a/stdlib/LinearAlgebra/src/diagonal.jl b/stdlib/LinearAlgebra/src/diagonal.jl index aabfb3e8ba114..17ff232f5b262 100644 --- a/stdlib/LinearAlgebra/src/diagonal.jl +++ b/stdlib/LinearAlgebra/src/diagonal.jl @@ -686,16 +686,33 @@ for Tri in (:UpperTriangular, :LowerTriangular) end @inline function kron!(C::AbstractMatrix, A::Diagonal, B::Diagonal) - valA = A.diag; nA = length(valA) - valB = B.diag; nB = length(valB) + valA = A.diag; mA, nA = size(A) + valB = B.diag; mB, nB = size(B) nC = checksquare(C) @boundscheck nC == nA*nB || throw(DimensionMismatch(lazy"expect C to be a $(nA*nB)x$(nA*nB) matrix, got size $(nC)x$(nC)")) - isempty(A) || isempty(B) || fill!(C, zero(A[1,1] * B[1,1])) + zerofilled = false + if !(isempty(A) || isempty(B)) + z = A[1,1] * B[1,1] + if haszero(typeof(z)) + # in this case, the zero is unique + fill!(C, zero(z)) + zerofilled = true + end + end @inbounds for i = 1:nA, j = 1:nB idx = (i-1)*nB+j C[idx, idx] = valA[i] * valB[j] end + if !zerofilled + for j in 1:nA, i in 1:mA + Δrow, Δcol = (i-1)*mB, (j-1)*nB + for k in 1:nB, l in 1:mB + i == j && k == l && continue + C[Δrow + l, Δcol + k] = A[i,j] * B[l,k] + end + end + end return C end @@ -722,7 +739,15 @@ end (mC, nC) = size(C) @boundscheck (mC, nC) == (mA * mB, nA * nB) || throw(DimensionMismatch(lazy"expect C to be a $(mA * mB)x$(nA * nB) matrix, got size $(mC)x$(nC)")) - isempty(A) || isempty(B) || fill!(C, zero(A[1,1] * B[1,1])) + zerofilled = false + if !(isempty(A) || isempty(B)) + z = A[1,1] * B[1,1] + if haszero(typeof(z)) + # in this case, the zero is unique + fill!(C, zero(z)) + zerofilled = true + end + end m = 1 @inbounds for j = 1:nA A_jj = A[j,j] @@ -733,6 +758,18 @@ end end m += (nA - 1) * mB end + if !zerofilled + # populate the zero elements + for i in 1:mA + i == j && continue + A_ij = A[i, j] + Δrow, Δcol = (i-1)*mB, (j-1)*nB + for k in 1:nB, l in 1:nA + B_lk = B[l, k] + C[Δrow + l, Δcol + k] = A_ij * B_lk + end + end + end m += mB end return C @@ -745,17 +782,36 @@ end (mC, nC) = size(C) @boundscheck (mC, nC) == (mA * mB, nA * nB) || throw(DimensionMismatch(lazy"expect C to be a $(mA * mB)x$(nA * nB) matrix, got size $(mC)x$(nC)")) - isempty(A) || isempty(B) || fill!(C, zero(A[1,1] * B[1,1])) + zerofilled = false + if !(isempty(A) || isempty(B)) + z = A[1,1] * B[1,1] + if haszero(typeof(z)) + # in this case, the zero is unique + fill!(C, zero(z)) + zerofilled = true + end + end m = 1 @inbounds for j = 1:nA for l = 1:mB Bll = B[l,l] - for k = 1:mA - C[m] = A[k,j] * Bll + for i = 1:mA + C[m] = A[i,j] * Bll m += nB end m += 1 end + if !zerofilled + for i in 1:mA + A_ij = A[i, j] + Δrow, Δcol = (i-1)*mB, (j-1)*nB + for k in 1:nB, l in 1:mB + l == k && continue + B_lk = B[l, k] + C[Δrow + l, Δcol + k] = A_ij * B_lk + end + end + end m -= nB end return C diff --git a/stdlib/LinearAlgebra/test/diagonal.jl b/stdlib/LinearAlgebra/test/diagonal.jl index f7a9ccb705de9..8b56ee15e56e3 100644 --- a/stdlib/LinearAlgebra/test/diagonal.jl +++ b/stdlib/LinearAlgebra/test/diagonal.jl @@ -1391,4 +1391,14 @@ end @test checkbounds(Bool, D, diagind(D, IndexCartesian())) end +@testset "zeros in kron with block matrices" begin + D = Diagonal(1:2) + B = reshape([ones(2,2), ones(3,2), ones(2,3), ones(3,3)], 2, 2) + @test kron(D, B) == kron(Array(D), B) + @test kron(B, D) == kron(B, Array(D)) + D2 = Diagonal([ones(2,2), ones(3,3)]) + @test kron(D, D2) == kron(D, Array{eltype(D2)}(D2)) + @test kron(D2, D) == kron(Array{eltype(D2)}(D2), D) +end + end # module TestDiagonal From 0af99e641a4329b57e48a314e2cedb592e02cd3b Mon Sep 17 00:00:00 2001 From: Jishnu Bhattacharya Date: Tue, 15 Oct 2024 09:25:56 +0530 Subject: [PATCH 202/537] Call `MulAddMul` instead of multiplication in _generic_matmatmul! (#56089) Fix https://github.com/JuliaLang/julia/issues/56085 by calling a newly created `MulAddMul` object that only wraps the `alpha` (with `beta` set to `false`). This avoids the explicit multiplication if `alpha` is known to be `isone`. --- stdlib/LinearAlgebra/src/matmul.jl | 6 ++++-- stdlib/LinearAlgebra/test/matmul.jl | 18 ++++++++++++++++++ 2 files changed, 22 insertions(+), 2 deletions(-) diff --git a/stdlib/LinearAlgebra/src/matmul.jl b/stdlib/LinearAlgebra/src/matmul.jl index b70f7d47b28dd..02ecd74152531 100644 --- a/stdlib/LinearAlgebra/src/matmul.jl +++ b/stdlib/LinearAlgebra/src/matmul.jl @@ -919,7 +919,7 @@ Base.@constprop :aggressive generic_matmatmul!(C::AbstractVecOrMat, tA, tB, A::A _generic_matmatmul!(C, wrap(A, tA), wrap(B, tB), MulAddMul(α, β)) @noinline function _generic_matmatmul!(C::AbstractVecOrMat{R}, A::AbstractVecOrMat{T}, B::AbstractVecOrMat{S}, - _add::MulAddMul) where {T,S,R} + _add::MulAddMul{ais1}) where {T,S,R,ais1} AxM = axes(A, 1) AxK = axes(A, 2) # we use two `axes` calls in case of `AbstractVector` BxK = axes(B, 1) @@ -935,11 +935,13 @@ Base.@constprop :aggressive generic_matmatmul!(C::AbstractVecOrMat, tA, tB, A::A if BxN != CxN throw(DimensionMismatch(lazy"matrix B has axes ($BxK,$BxN), matrix C has axes ($CxM,$CxN)")) end + _rmul_alpha = MulAddMul{ais1,true,typeof(_add.alpha),Bool}(_add.alpha,false) if isbitstype(R) && sizeof(R) ≤ 16 && !(A isa Adjoint || A isa Transpose) _rmul_or_fill!(C, _add.beta) (iszero(_add.alpha) || isempty(A) || isempty(B)) && return C @inbounds for n in BxN, k in BxK - Balpha = B[k,n]*_add.alpha + # Balpha = B[k,n] * alpha, but we skip the multiplication in case isone(alpha) + Balpha = _rmul_alpha(B[k,n]) @simd for m in AxM C[m,n] = muladd(A[m,k], Balpha, C[m,n]) end diff --git a/stdlib/LinearAlgebra/test/matmul.jl b/stdlib/LinearAlgebra/test/matmul.jl index 4c79451ebfc8b..0d1e2776d2bb3 100644 --- a/stdlib/LinearAlgebra/test/matmul.jl +++ b/stdlib/LinearAlgebra/test/matmul.jl @@ -1130,4 +1130,22 @@ end @test a * transpose(B) ≈ A * transpose(B) end +@testset "issue #56085" begin + struct Thing + data::Float64 + end + + Base.zero(::Type{Thing}) = Thing(0.) + Base.zero(::Thing) = Thing(0.) + Base.one(::Type{Thing}) = Thing(1.) + Base.one(::Thing) = Thing(1.) + Base.:+(t::Thing...) = +(getfield.(t, :data)...) + Base.:*(t::Thing...) = *(getfield.(t, :data)...) + + M = Float64[1 2; 3 4] + A = Thing.(M) + + @test A * A ≈ M * M +end + end # module TestMatmul From fe82988327183f90ddccb3f7e270a88c9e9c2021 Mon Sep 17 00:00:00 2001 From: Shuhei Kadowaki <40514306+aviatesk@users.noreply.github.com> Date: Tue, 15 Oct 2024 14:27:10 +0900 Subject: [PATCH 203/537] improve `allunique`'s type stability (#56161) Caught by https://github.com/aviatesk/JET.jl/issues/667. --- base/set.jl | 4 ++-- test/sets.jl | 1 + 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/base/set.jl b/base/set.jl index 2f96cef626b6f..d1f9458039cd4 100644 --- a/base/set.jl +++ b/base/set.jl @@ -549,8 +549,8 @@ function allunique(A::StridedArray) if length(A) < 32 _indexed_allunique(A) elseif OrderStyle(eltype(A)) === Ordered() - a1, rest1 = Iterators.peel(A) - a2, rest = Iterators.peel(rest1) + a1, rest1 = Iterators.peel(A)::Tuple{Any,Any} + a2, rest = Iterators.peel(rest1)::Tuple{Any,Any} if !isequal(a1, a2) compare = isless(a1, a2) ? isless : (a,b) -> isless(b,a) for a in rest diff --git a/test/sets.jl b/test/sets.jl index 4ab360c9fedd4..b78d2f15dd989 100644 --- a/test/sets.jl +++ b/test/sets.jl @@ -644,6 +644,7 @@ end @test !allunique((NaN, NaN)) # Known length 1, need not evaluate: @test allunique(error(x) for x in [1]) + # @test_opt allunique(Int[]) end @testset "allunique(f, xs)" begin From 9223088faefd9680d8217b44d0bf82e478a311c1 Mon Sep 17 00:00:00 2001 From: Cody Tapscott <84105208+topolarity@users.noreply.github.com> Date: Tue, 15 Oct 2024 03:07:19 -0400 Subject: [PATCH 204/537] Add invalidation barriers for `displaysize` and `implicit_typeinfo` (#56159) These are invalidated by our own stdlibs (Dates and REPL) unfortunately so we need to put this barrier in. This fix is _very_ un-satisfying, because it doesn't do anything to solve this problem for downstream libraries that use e.g. `displaysize`. To fix that, I think we need a way to make sure callers get these invalidation barriers by default... --- base/arrayshow.jl | 16 +++++++++++----- base/logging/ConsoleLogger.jl | 2 +- base/precompilation.jl | 4 ++-- base/show.jl | 4 ++-- base/stream.jl | 7 +++++++ 5 files changed, 23 insertions(+), 10 deletions(-) diff --git a/base/arrayshow.jl b/base/arrayshow.jl index 164a9257d8412..3bc69e563a967 100644 --- a/base/arrayshow.jl +++ b/base/arrayshow.jl @@ -545,6 +545,12 @@ typeinfo_eltype(typeinfo::Type{<:AbstractArray{T}}) where {T} = eltype(typeinfo) typeinfo_eltype(typeinfo::Type{<:AbstractDict{K,V}}) where {K,V} = eltype(typeinfo) typeinfo_eltype(typeinfo::Type{<:AbstractSet{T}}) where {T} = eltype(typeinfo) +# This is a fancy way to make de-specialize a call to `typeinfo_implicit(T)` +# which is unfortunately invalidated by Dates +# (https://github.com/JuliaLang/julia/issues/56080) +# +# This makes the call less efficient, but avoids being invalidated by Dates. +_typeinfo_implicit(@nospecialize(T)) = Base.invoke_in_world(Base.tls_world_age(), typeinfo_implicit, T)::Bool # types that can be parsed back accurately from their un-decorated representations function typeinfo_implicit(@nospecialize(T)) @@ -553,9 +559,9 @@ function typeinfo_implicit(@nospecialize(T)) return true end return isconcretetype(T) && - ((T <: Array && typeinfo_implicit(eltype(T))) || - ((T <: Tuple || T <: NamedTuple || T <: Pair) && all(typeinfo_implicit, fieldtypes(T))) || - (T <: AbstractDict && typeinfo_implicit(keytype(T)) && typeinfo_implicit(valtype(T)))) + ((T <: Array && _typeinfo_implicit(eltype(T))) || + ((T <: Tuple || T <: NamedTuple || T <: Pair) && all(_typeinfo_implicit, fieldtypes(T))) || + (T <: AbstractDict && _typeinfo_implicit(keytype(T)) && _typeinfo_implicit(valtype(T)))) end # X not constrained, can be any iterable (cf. show_vector) @@ -573,7 +579,7 @@ function typeinfo_prefix(io::IO, X) if X isa AbstractDict if eltype_X == eltype_ctx sprint(show_type_name, typeof(X).name; context=io), false - elseif !isempty(X) && typeinfo_implicit(keytype(X)) && typeinfo_implicit(valtype(X)) + elseif !isempty(X) && _typeinfo_implicit(keytype(X)) && _typeinfo_implicit(valtype(X)) sprint(show_type_name, typeof(X).name; context=io), true else sprint(print, typeof(X); context=io), false @@ -582,7 +588,7 @@ function typeinfo_prefix(io::IO, X) # Types hard-coded here are those which are created by default for a given syntax if eltype_X == eltype_ctx "", false - elseif !isempty(X) && typeinfo_implicit(eltype_X) + elseif !isempty(X) && _typeinfo_implicit(eltype_X) "", true elseif print_without_params(eltype_X) sprint(show_type_name, unwrap_unionall(eltype_X).name; context=io), false # Print "Array" rather than "Array{T,N}" diff --git a/base/logging/ConsoleLogger.jl b/base/logging/ConsoleLogger.jl index c4596dd86c3f5..818b2272b773c 100644 --- a/base/logging/ConsoleLogger.jl +++ b/base/logging/ConsoleLogger.jl @@ -130,7 +130,7 @@ function handle_message(logger::ConsoleLogger, level::LogLevel, message, _module if !(isopen(stream)::Bool) stream = stderr end - dsize = displaysize(stream)::Tuple{Int,Int} + dsize = Base.displaysize_(stream)::Tuple{Int,Int} nkwargs = length(kwargs)::Int if nkwargs > hasmaxlog valbuf = IOBuffer() diff --git a/base/precompilation.jl b/base/precompilation.jl index 7a821222c52d1..4b7da84a17d55 100644 --- a/base/precompilation.jl +++ b/base/precompilation.jl @@ -672,7 +672,7 @@ function precompilepkgs(pkgs::Vector{String}=String[]; n_print_rows = 0 while !printloop_should_exit lock(print_lock) do - term_size = Base.displaysize(io)::Tuple{Int,Int} + term_size = Base.displaysize_(io) num_deps_show = max(term_size[1] - 3, 2) # show at least 2 deps pkg_queue_show = if !interrupted_or_done.set && length(pkg_queue) > num_deps_show last(pkg_queue, num_deps_show) @@ -687,7 +687,7 @@ function precompilepkgs(pkgs::Vector{String}=String[]; bar.max = n_total - n_already_precomp # when sizing to the terminal width subtract a little to give some tolerance to resizing the # window between print cycles - termwidth = displaysize(io)[2] - 4 + termwidth = Base.displaysize_(io)[2] - 4 if !final_loop str = sprint(io -> show_progress(io, bar; termwidth, carriagereturn=false); context=io) print(iostr, Base._truncate_at_width_or_chars(true, str, termwidth), "\n") diff --git a/base/show.jl b/base/show.jl index 66560265e3b42..a147c2037d70e 100644 --- a/base/show.jl +++ b/base/show.jl @@ -427,7 +427,7 @@ get(io::IO, key, default) = default keys(io::IOContext) = keys(io.dict) keys(io::IO) = keys(ImmutableDict{Symbol,Any}()) -displaysize(io::IOContext) = haskey(io, :displaysize) ? io[:displaysize]::Tuple{Int,Int} : displaysize(io.io) +displaysize(io::IOContext) = haskey(io, :displaysize) ? io[:displaysize]::Tuple{Int,Int} : Base.displaysize_(io.io) show_circular(io::IO, @nospecialize(x)) = false function show_circular(io::IOContext, @nospecialize(x)) @@ -2622,7 +2622,7 @@ end function type_limited_string_from_context(out::IO, str::String) typelimitflag = get(out, :stacktrace_types_limited, nothing) if typelimitflag isa RefValue{Bool} - sz = get(out, :displaysize, displaysize(out))::Tuple{Int, Int} + sz = get(out, :displaysize, Base.displaysize_(out))::Tuple{Int, Int} str_lim = type_depth_limit(str, max(sz[2], 120)) if sizeof(str_lim) < sizeof(str) typelimitflag[] = true diff --git a/base/stream.jl b/base/stream.jl index 93aeead79eb9c..3ca5717be29db 100644 --- a/base/stream.jl +++ b/base/stream.jl @@ -569,6 +569,13 @@ displaysize(io::IO) = displaysize() displaysize() = (parse(Int, get(ENV, "LINES", "24")), parse(Int, get(ENV, "COLUMNS", "80")))::Tuple{Int, Int} +# This is a fancy way to make de-specialize a call to `displaysize(io::IO)` +# which is unfortunately invalidated by REPL +# (https://github.com/JuliaLang/julia/issues/56080) +# +# This makes the call less efficient, but avoids being invalidated by REPL. +displaysize_(io::IO) = Base.invoke_in_world(Base.tls_world_age(), displaysize, io)::Tuple{Int,Int} + function displaysize(io::TTY) check_open(io) From 9f92989274640b14d4d0015107e501e7252344b2 Mon Sep 17 00:00:00 2001 From: abhro <5664668+abhro@users.noreply.github.com> Date: Tue, 15 Oct 2024 03:30:52 -0400 Subject: [PATCH 205/537] Fix markdown list in installation.md (#56165) Documenter.jl requires all trailing list content to follow the same indentation as the header. So, in the current view (https://docs.julialang.org/en/v1/manual/installation/#Command-line-arguments) the list appears broken. --- doc/src/manual/installation.md | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/doc/src/manual/installation.md b/doc/src/manual/installation.md index 07acfd1c62681..f45aba2c37a28 100644 --- a/doc/src/manual/installation.md +++ b/doc/src/manual/installation.md @@ -44,21 +44,21 @@ curl -fsSL https://install.julialang.org | sh -s -- Here `` should be replaced with one or more of the following arguments: - `--yes` (or `-y`): Run the installer in a non-interactive mode. All -configuration values use their default or a value supplied as a command line -argument. + configuration values use their default or a value supplied as a command line + argument. - `--default-channel=`: Configure the default Juliaup channel. For -example `--default-channel lts` would install the `lts` channel and configure it -as the default. + example `--default-channel lts` would install the `lts` channel and configure it + as the default. - `--add-to-path=`: Configure whether Julia should be added to the `PATH` -environment variable. Valid values are `yes` (default) and `no`. + environment variable. Valid values are `yes` (default) and `no`. - `--background-selfupdate=`: Configure an optional CRON job that -auto-updates Juliaup if `` has a value larger than 0. The actual value -controls how often the CRON job will run to check for a new Juliaup version in -seconds. The default value is 0, i.e. no CRON job will be created. + auto-updates Juliaup if `` has a value larger than 0. The actual value + controls how often the CRON job will run to check for a new Juliaup version in + seconds. The default value is 0, i.e. no CRON job will be created. - `--startup-selfupdate=`: Configure how often Julia will check for new -versions of Juliaup when Julia is started. The default is every 1440 minutes. + versions of Juliaup when Julia is started. The default is every 1440 minutes. - `-p=` (or `--path`): Configure where the Julia and Juliaup binaries are -installed. The default is `~/.juliaup`. + installed. The default is `~/.juliaup`. ## Alternative installation methods From d09abe55f1c979df347fa682037c4f68f69b48c0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mos=C3=A8=20Giordano?= <765740+giordano@users.noreply.github.com> Date: Tue, 15 Oct 2024 12:57:39 +0100 Subject: [PATCH 206/537] [Random] Add more comments and a helper function in Xoshiro code (#56144) Follow up to #55994 and #55997. This should basically be a non-functional change and I see no performance difference, but the comments and the definition of a helper function should make the code easier to follow (I initially struggled in #55997) and extend to other types. --- stdlib/Random/src/Xoshiro.jl | 21 +++++++++++++-------- stdlib/Random/src/XoshiroSimd.jl | 21 +++++++++++++-------- 2 files changed, 26 insertions(+), 16 deletions(-) diff --git a/stdlib/Random/src/Xoshiro.jl b/stdlib/Random/src/Xoshiro.jl index 09a3e386e9a2b..94c7e1ab24e1d 100644 --- a/stdlib/Random/src/Xoshiro.jl +++ b/stdlib/Random/src/Xoshiro.jl @@ -296,11 +296,16 @@ rand(r::Union{TaskLocalRNG, Xoshiro}, ::SamplerTrivial{UInt52Raw{UInt64}}) = ran rand(r::Union{TaskLocalRNG, Xoshiro}, ::SamplerTrivial{UInt52{UInt64}}) = rand(r, UInt64) >>> 12 rand(r::Union{TaskLocalRNG, Xoshiro}, ::SamplerTrivial{UInt104{UInt128}}) = rand(r, UInt104Raw()) -rand(r::Union{TaskLocalRNG, Xoshiro}, ::SamplerTrivial{CloseOpen01{Float16}}) = - Float16(rand(r, UInt16) >>> 5) * Float16(0x1.0p-11) - -rand(r::Union{TaskLocalRNG, Xoshiro}, ::SamplerTrivial{CloseOpen01{Float32}}) = - Float32(rand(r, UInt32) >>> 8) * Float32(0x1.0p-24) - -rand(r::Union{TaskLocalRNG, Xoshiro}, ::SamplerTrivial{CloseOpen01_64}) = - Float64(rand(r, UInt64) >>> 11) * 0x1.0p-53 +for FT in (Float16, Float32, Float64) + UT = Base.uinttype(FT) + # Helper function: scale an unsigned integer to a floating point number of the same size + # in the interval [0, 1). This is equivalent to, but more easily extensible than + # Float16(i >>> 5) * Float16(0x1.0p-11) + # Float32(i >>> 8) * Float32(0x1.0p-24) + # Float32(i >>> 11) * Float64(0x1.0p-53) + @eval @inline _uint2float(i::$(UT), ::Type{$(FT)}) = + $(FT)(i >>> $(8 * sizeof(FT) - precision(FT))) * $(FT(2) ^ -precision(FT)) + + @eval rand(r::Union{TaskLocalRNG, Xoshiro}, ::SamplerTrivial{CloseOpen01{$(FT)}}) = + _uint2float(rand(r, $(UT)), $(FT)) +end diff --git a/stdlib/Random/src/XoshiroSimd.jl b/stdlib/Random/src/XoshiroSimd.jl index 1c5f8306cc302..58544714dd9f5 100644 --- a/stdlib/Random/src/XoshiroSimd.jl +++ b/stdlib/Random/src/XoshiroSimd.jl @@ -3,7 +3,7 @@ module XoshiroSimd # Getting the xoroshiro RNG to reliably vectorize is somewhat of a hassle without Simd.jl. import ..Random: rand! -using ..Random: TaskLocalRNG, rand, Xoshiro, CloseOpen01, UnsafeView, SamplerType, SamplerTrivial, getstate, setstate! +using ..Random: TaskLocalRNG, rand, Xoshiro, CloseOpen01, UnsafeView, SamplerType, SamplerTrivial, getstate, setstate!, _uint2float using Base: BitInteger_types using Base.Libc: memcpy using Core.Intrinsics: llvmcall @@ -30,7 +30,12 @@ simdThreshold(::Type{Bool}) = 640 Tuple{UInt64, Int64}, x, y) -@inline _bits2float(x::UInt64, ::Type{Float64}) = reinterpret(UInt64, Float64(x >>> 11) * 0x1.0p-53) +# `_bits2float(x::UInt64, T)` takes `x::UInt64` as input, it splits it in `N` parts where +# `N = sizeof(UInt64) / sizeof(T)` (`N = 1` for `Float64`, `N = 2` for `Float32, etc...), it +# truncates each part to the unsigned type of the same size as `T`, scales all of these +# numbers to a value of type `T` in the range [0,1) with `_uint2float`, and then +# recomposes another `UInt64` using all these parts. +@inline _bits2float(x::UInt64, ::Type{Float64}) = reinterpret(UInt64, _uint2float(x, Float64)) @inline function _bits2float(x::UInt64, ::Type{Float32}) #= # this implementation uses more high bits, but is harder to vectorize @@ -40,8 +45,8 @@ simdThreshold(::Type{Bool}) = 640 =# ui = (x>>>32) % UInt32 li = x % UInt32 - u = Float32(ui >>> 8) * Float32(0x1.0p-24) - l = Float32(li >>> 8) * Float32(0x1.0p-24) + u = _uint2float(ui, Float32) + l = _uint2float(ui, Float32) (UInt64(reinterpret(UInt32, u)) << 32) | UInt64(reinterpret(UInt32, l)) end @inline function _bits2float(x::UInt64, ::Type{Float16}) @@ -49,10 +54,10 @@ end i2 = (x>>>32) % UInt16 i3 = (x>>>16) % UInt16 i4 = x % UInt16 - f1 = Float16(i1 >>> 5) * Float16(0x1.0p-11) - f2 = Float16(i2 >>> 5) * Float16(0x1.0p-11) - f3 = Float16(i3 >>> 5) * Float16(0x1.0p-11) - f4 = Float16(i4 >>> 5) * Float16(0x1.0p-11) + f1 = _uint2float(i1, Float16) + f2 = _uint2float(i2, Float16) + f3 = _uint2float(i3, Float16) + f4 = _uint2float(i4, Float16) return (UInt64(reinterpret(UInt16, f1)) << 48) | (UInt64(reinterpret(UInt16, f2)) << 32) | (UInt64(reinterpret(UInt16, f3)) << 16) | UInt64(reinterpret(UInt16, f4)) end From 8a18f27e24b3fe83815f81fdbe97d6e77f1802df Mon Sep 17 00:00:00 2001 From: Jameson Nash Date: Mon, 26 Aug 2024 12:59:45 -0400 Subject: [PATCH 207/537] add objects to concisely specify initialization PerProcess: once per process PerThread: once per thread id PerTask: once per task object --- NEWS.md | 6 + base/docs/basedocs.jl | 2 + base/exports.jl | 3 + base/lock.jl | 252 +++++++++++++++++++++++++++++++++++++++++- doc/src/base/base.md | 3 + test/precompile.jl | 21 ++++ test/threads.jl | 66 +++++++++++ 7 files changed, 352 insertions(+), 1 deletion(-) diff --git a/NEWS.md b/NEWS.md index 9aebf5d42d954..724d0793e67cd 100644 --- a/NEWS.md +++ b/NEWS.md @@ -68,6 +68,12 @@ variables. ([#53742]). Multi-threading changes ----------------------- +* New types are defined to handle the pattern of code that must run once per process, called + a `PerProcess{T}` type, which allows defining a function that should be run exactly once + the first time it is called, and then always return the same result value of type `T` + every subsequent time afterwards. There are also `PerThread{T}` and `PerTask{T}` types for + similar usage with threads or tasks. ([#TBD]) + Build system changes -------------------- diff --git a/base/docs/basedocs.jl b/base/docs/basedocs.jl index a142ecffdb732..f93d9a5ba0647 100644 --- a/base/docs/basedocs.jl +++ b/base/docs/basedocs.jl @@ -153,6 +153,8 @@ runtime initialization functions of external C libraries and initializing global that involve pointers returned by external libraries. See the [manual section about modules](@ref modules) for more details. +See also: [`PerProcess`](@ref). + # Examples ```julia const foo_data_ptr = Ref{Ptr{Cvoid}}(0) diff --git a/base/exports.jl b/base/exports.jl index daba9a010a9e6..66de141c228b6 100644 --- a/base/exports.jl +++ b/base/exports.jl @@ -70,6 +70,9 @@ export OrdinalRange, Pair, PartialQuickSort, + PerProcess, + PerTask, + PerThread, PermutedDimsArray, QuickSort, Rational, diff --git a/base/lock.jl b/base/lock.jl index b473045e5809d..80ab2b3bb9b42 100644 --- a/base/lock.jl +++ b/base/lock.jl @@ -500,7 +500,7 @@ Create a level-triggered event source. Tasks that call [`wait`](@ref) on an After `notify` is called, the `Event` remains in a signaled state and tasks will no longer block when waiting for it, until `reset` is called. -If `autoreset` is true, at most one task will be released from `wait` for +If `autoreset` is true, at most one task will be released from `wait` for) each call to `notify`. This provides an acquire & release memory ordering on notify/wait. @@ -570,3 +570,253 @@ end import .Base: Event export Event end + + +""" + PerProcess{T} + +Calling a `PerProcess` object returns a value of type `T` by running the +function `initializer` exactly once per process. All concurrent and future +calls in the same process will return exactly the same value. This is useful in +code that will be precompiled, as it allows setting up caches or other state +which won't get serialized. + +## Example + +```jldoctest +julia> const global_state = Base.PerProcess{Vector{UInt32}}() do + println("Making lazy global value...done.") + return [Libc.rand()] + end; + +julia> procstate = global_state(); +Making lazy global value...done. + +julia> procstate === global_state() +true + +julia> procstate === fetch(@async global_state()) +true +``` +""" +mutable struct PerProcess{T, F} + x::Union{Nothing,T} + @atomic state::UInt8 # 0=initial, 1=hasrun, 2=error + @atomic allow_compile_time::Bool + const initializer::F + const lock::ReentrantLock + + PerProcess{T}(initializer::F) where {T, F} = new{T,F}(nothing, 0x00, true, initializer, ReentrantLock()) + PerProcess{T,F}(initializer::F) where {T, F} = new{T,F}(nothing, 0x00, true, initializer, ReentrantLock()) + PerProcess(initializer) = new{Base.promote_op(initializer), typeof(initializer)}(nothing, 0x00, true, initializer, ReentrantLock()) +end +@inline function (once::PerProcess{T})() where T + state = (@atomic :acquire once.state) + if state != 0x01 + (@noinline function init_perprocesss(once, state) + state == 0x02 && error("PerProcess initializer failed previously") + Base.__precompile__(once.allow_compile_time) + lock(once.lock) + try + state = @atomic :monotonic once.state + if state == 0x00 + once.x = once.initializer() + elseif state == 0x02 + error("PerProcess initializer failed previously") + elseif state != 0x01 + error("invalid state for PerProcess") + end + catch + state == 0x02 || @atomic :release once.state = 0x02 + unlock(once.lock) + rethrow() + end + state == 0x01 || @atomic :release once.state = 0x01 + unlock(once.lock) + nothing + end)(once, state) + end + return once.x::T +end + +function copyto_monotonic!(dest::AtomicMemory, src) + i = 1 + for j in eachindex(src) + if isassigned(src, j) + @atomic :monotonic dest[i] = src[j] + end + i += 1 + end + dest +end + +function fill_monotonic!(dest::AtomicMemory, x) + for i = 1:length(dest) + @atomic :monotonic dest[i] = x + end + dest +end + + +# share a lock, since we just need it briefly, so some contention is okay +const PerThreadLock = ThreadSynchronizer() +""" + PerThread{T} + +Calling a `PerThread` object returns a value of type `T` by running the function +`initializer` exactly once per thread. All future calls in the same thread, and +concurrent or future calls with the same thread id, will return exactly the +same value. The object can also be indexed by the threadid for any existing +thread, to get (or initialize *on this thread*) the value stored for that +thread. Incorrect usage can lead to data-races or memory corruption so use only +if that behavior is correct within your library's threading-safety design. + +Warning: it is not necessarily true that a Task only runs on one thread, therefore the value +returned here may alias other values or change in the middle of your program. This type may +get deprecated in the future. If initializer yields, the thread running the current task +after the call might not be the same as the one at the start of the call. + +See also: [`PerTask`](@ref). + +## Example + +```jldoctest +julia> const thread_state = Base.PerThread{Vector{UInt32}}() do + println("Making lazy thread value...done.") + return [Libc.rand()] + end; + +julia> threadvec = thread_state(); +Making lazy thread value...done. + +julia> threadvec === fetch(@async thread_state()) +true + +julia> threadvec === thread_state[Threads.threadid()] +true +``` +""" +mutable struct PerThread{T, F} + @atomic xs::AtomicMemory{T} # values + @atomic ss::AtomicMemory{UInt8} # states: 0=initial, 1=hasrun, 2=error, 3==concurrent + const initializer::F + + PerThread{T}(initializer::F) where {T, F} = new{T,F}(AtomicMemory{T}(), AtomicMemory{UInt8}(), initializer) + PerThread{T,F}(initializer::F) where {T, F} = new{T,F}(AtomicMemory{T}(), AtomicMemory{UInt8}(), initializer) + PerThread(initializer) = (T = Base.promote_op(initializer); new{T, typeof(initializer)}(AtomicMemory{T}(), AtomicMemory{UInt8}(), initializer)) +end +@inline function getindex(once::PerThread, tid::Integer) + tid = Int(tid) + ss = @atomic :acquire once.ss + xs = @atomic :monotonic once.xs + # n.b. length(xs) >= length(ss) + if tid > length(ss) || (@atomic :acquire ss[tid]) != 0x01 + (@noinline function init_perthread(once, tid) + local xs = @atomic :acquire once.xs + local ss = @atomic :monotonic once.ss + local len = length(ss) + # slow path to allocate it + nt = Threads.maxthreadid() + 0 < tid <= nt || ArgumentError("thread id outside of allocated range") + if tid <= length(ss) && (@atomic :acquire ss[tid]) == 0x02 + error("PerThread initializer failed previously") + end + newxs = xs + newss = ss + if tid > len + # attempt to do all allocations outside of PerThreadLock for better scaling + @assert length(xs) == length(ss) "logical constraint violation" + newxs = typeof(xs)(undef, len + nt) + newss = typeof(ss)(undef, len + nt) + end + # uses state and locks to ensure this runs exactly once per tid argument + lock(PerThreadLock) + try + ss = @atomic :monotonic once.ss + xs = @atomic :monotonic once.xs + if tid > length(ss) + @assert length(ss) >= len && newxs !== xs && newss != ss "logical constraint violation" + fill_monotonic!(newss, 0x00) + xs = copyto_monotonic!(newxs, xs) + ss = copyto_monotonic!(newss, ss) + @atomic :release once.xs = xs + @atomic :release once.ss = ss + end + state = @atomic :monotonic ss[tid] + while state == 0x04 + # lost race, wait for notification this is done running elsewhere + wait(PerThreadLock) # wait for initializer to finish without releasing this thread + ss = @atomic :monotonic once.ss + state = @atomic :monotonic ss[tid] == 0x04 + end + if state == 0x00 + # won the race, drop lock in exchange for state, and run user initializer + @atomic :monotonic ss[tid] = 0x04 + result = try + unlock(PerThreadLock) + once.initializer() + catch + lock(PerThreadLock) + ss = @atomic :monotonic once.ss + @atomic :release ss[tid] = 0x02 + notify(PerThreadLock) + rethrow() + end + # store result and notify waiters + lock(PerThreadLock) + xs = @atomic :monotonic once.xs + @atomic :release xs[tid] = result + ss = @atomic :monotonic once.ss + @atomic :release ss[tid] = 0x01 + notify(PerThreadLock) + elseif state == 0x02 + error("PerThread initializer failed previously") + elseif state != 0x01 + error("invalid state for PerThread") + end + finally + unlock(PerThreadLock) + end + nothing + end)(once, tid) + xs = @atomic :monotonic once.xs + end + return xs[tid] +end +@inline (once::PerThread)() = once[Threads.threadid()] + +""" + PerTask{T} + +Calling a `PerTask` object returns a value of type `T` by running the function `initializer` +exactly once per Task. All future calls in the same Task will return exactly the same value. + +See also: [`task_local_storage`](@ref). + +## Example + +```jldoctest +julia> const task_state = Base.PerTask{Vector{UInt32}}() do + println("Making lazy task value...done.") + return [Libc.rand()] + end; + +julia> taskvec = task_state(); +Making lazy task value...done. + +julia> taskvec === task_state() +true + +julia> taskvec === fetch(@async task_state()) +Making lazy task value...done. +false +``` +""" +mutable struct PerTask{T, F} + const initializer::F + + PerTask{T}(initializer::F) where {T, F} = new{T,F}(initializer) + PerTask{T,F}(initializer::F) where {T, F} = new{T,F}(initializer) + PerTask(initializer) = new{Base.promote_op(initializer), typeof(initializer)}(initializer) +end +@inline (once::PerTask)() = get!(once.initializer, task_local_storage(), once) diff --git a/doc/src/base/base.md b/doc/src/base/base.md index b5d50a846ce89..b11e985782709 100644 --- a/doc/src/base/base.md +++ b/doc/src/base/base.md @@ -34,6 +34,9 @@ Main.include Base.include_string Base.include_dependency __init__ +Base.PerProcess +Base.PerTask +Base.PerThread Base.which(::Any, ::Any) Base.methods Base.@show diff --git a/test/precompile.jl b/test/precompile.jl index 7a6e41061f9b1..e44771fb6a86f 100644 --- a/test/precompile.jl +++ b/test/precompile.jl @@ -94,6 +94,17 @@ precompile_test_harness(false) do dir end abstract type AbstractAlgebraMap{A} end struct GAPGroupHomomorphism{A, B} <: AbstractAlgebraMap{GAPGroupHomomorphism{B, A}} end + + global process_state_calls::Int = 0 + const process_state = Base.PerProcess{typeof(getpid())}() do + @assert (global process_state_calls += 1) == 1 + return getpid() + end + const mypid = process_state() + @assert process_state_calls === 1 + process_state_calls = 0 + @assert process_state() === process_state() + @assert process_state_calls === 0 end """) write(Foo2_file, @@ -272,6 +283,9 @@ precompile_test_harness(false) do dir oid_vec_int = objectid(a_vec_int) oid_mat_int = objectid(a_mat_int) + + using $FooBase_module: process_state, mypid as FooBase_pid, process_state_calls + const mypid = process_state() end """) # Issue #52063 @@ -333,6 +347,13 @@ precompile_test_harness(false) do dir @test isready(Foo.ch2) @test take!(Foo.ch2) === 2 @test !isready(Foo.ch2) + + @test Foo.process_state_calls === 0 + @test Foo.process_state() === getpid() + @test Foo.mypid !== getpid() + @test Foo.FooBase_pid !== getpid() + @test Foo.mypid !== Foo.FooBase_pid + @test Foo.process_state_calls === 1 end let diff --git a/test/threads.jl b/test/threads.jl index 6265368c2ac79..f1a8aba418412 100644 --- a/test/threads.jl +++ b/test/threads.jl @@ -374,3 +374,69 @@ end end end end + +let once = PerProcess(() -> return [nothing]) + @test typeof(once) <: PerProcess{Vector{Nothing}} + x = once() + @test x === once() + @atomic once.state = 0xff + @test_throws ErrorException("invalid state for PerProcess") once() + @test_throws ErrorException("PerProcess initializer failed previously") once() + @atomic once.state = 0x01 + @test x === once() +end +let once = PerProcess{Int}(() -> error("expected")) + @test_throws ErrorException("expected") once() + @test_throws ErrorException("PerProcess initializer failed previously") once() +end + +let once = PerThread(() -> return [nothing]) + @test typeof(once) <: PerThread{Vector{Nothing}} + x = once() + @test x === once() === fetch(@async once()) + tids = zeros(UInt, 50) + onces = Vector{Vector{Nothing}}(undef, length(tids)) + for i = 1:length(tids) + function cl() + local y = once() + onces[i] = y + @test x !== y === once() + nothing + end + function threadcallclosure(cl::F) where {F} # create sparam so we can reference the type of cl in the ccall type + threadwork = @cfunction cl -> cl() Cvoid (Ref{F},) # create a cfunction that specializes on cl as an argument and calls it + err = @ccall uv_thread_create(Ref(tids, i)::Ptr{UInt}, threadwork::Ptr{Cvoid}, cl::Ref{F})::Cint # call that on a thread + err == 0 || Base.uv_error("uv_thread_create", err) + end + threadcallclosure(cl) + end + @noinline function waitallthreads(tids) + for i = 1:length(tids) + tid = Ref(tids, i) + tidp = Base.unsafe_convert(Ptr{UInt}, tid)::Ptr{UInt} + gc_state = @ccall jl_gc_safe_enter()::Int8 + GC.@preserve tid err = @ccall uv_thread_join(tidp::Ptr{UInt})::Cint + @ccall jl_gc_safe_leave(gc_state::Int8)::Cvoid + err == 0 || Base.uv_error("uv_thread_join", err) + end + end + waitallthreads(tids) + @test length(IdSet{eltype(onces)}(onces)) == length(onces) # make sure every object is unique + +end +let once = PerThread{Int}(() -> error("expected")) + @test_throws ErrorException("expected") once() + @test_throws ErrorException("PerThread initializer failed previously") once() +end + +let once = PerTask(() -> return [nothing]) + @test typeof(once) <: PerTask{Vector{Nothing}} + x = once() + @test x === once() !== fetch(@async once()) + delete!(task_local_storage(), once) + @test x !== once() === once() +end +let once = PerTask{Int}(() -> error("expected")) + @test_throws ErrorException("expected") once() + @test_throws ErrorException("expected") once() +end From a66733f0e9ea547eb43a2c7637afb3429471e10b Mon Sep 17 00:00:00 2001 From: Jameson Nash Date: Mon, 26 Aug 2024 15:46:12 -0400 Subject: [PATCH 208/537] add precompile support for recording fields to change Somewhat generalizes our support for changing Ptr to C_NULL. Not particularly fast, since it is just using the builtins implementation of setfield, and delaying the actual stores, but it should suffice. --- base/lock.jl | 38 ++++++++++++--- base/task.jl | 7 --- src/builtins.c | 2 +- src/gc-stock.c | 2 + src/julia_internal.h | 2 + src/staticdata.c | 111 ++++++++++++++++++++++++++++++++++++++++++- test/threads.jl | 22 ++++++++- 7 files changed, 167 insertions(+), 17 deletions(-) diff --git a/base/lock.jl b/base/lock.jl index 80ab2b3bb9b42..b53607af05a3a 100644 --- a/base/lock.jl +++ b/base/lock.jl @@ -2,6 +2,13 @@ const ThreadSynchronizer = GenericCondition{Threads.SpinLock} +""" + current_task() + +Get the currently running [`Task`](@ref). +""" +current_task() = ccall(:jl_get_current_task, Ref{Task}, ()) + # Advisory reentrant lock """ ReentrantLock() @@ -606,16 +613,23 @@ mutable struct PerProcess{T, F} const initializer::F const lock::ReentrantLock - PerProcess{T}(initializer::F) where {T, F} = new{T,F}(nothing, 0x00, true, initializer, ReentrantLock()) - PerProcess{T,F}(initializer::F) where {T, F} = new{T,F}(nothing, 0x00, true, initializer, ReentrantLock()) - PerProcess(initializer) = new{Base.promote_op(initializer), typeof(initializer)}(nothing, 0x00, true, initializer, ReentrantLock()) + function PerProcess{T,F}(initializer::F) where {T, F} + once = new{T,F}(nothing, 0x00, true, initializer, ReentrantLock()) + ccall(:jl_set_precompile_field_replace, Cvoid, (Any, Any, Any), + once, :x, nothing) + ccall(:jl_set_precompile_field_replace, Cvoid, (Any, Any, Any), + once, :state, 0x00) + return once + end end +PerProcess{T}(initializer::F) where {T, F} = PerProcess{T, F}(initializer) +PerProcess(initializer) = PerProcess{Base.promote_op(initializer), typeof(initializer)}(initializer) @inline function (once::PerProcess{T})() where T state = (@atomic :acquire once.state) if state != 0x01 (@noinline function init_perprocesss(once, state) state == 0x02 && error("PerProcess initializer failed previously") - Base.__precompile__(once.allow_compile_time) + once.allow_compile_time || __precompile__(false) lock(once.lock) try state = @atomic :monotonic once.state @@ -644,6 +658,8 @@ function copyto_monotonic!(dest::AtomicMemory, src) for j in eachindex(src) if isassigned(src, j) @atomic :monotonic dest[i] = src[j] + #else + # _unsafeindex_atomic!(dest, i, src[j], :monotonic) end i += 1 end @@ -701,10 +717,18 @@ mutable struct PerThread{T, F} @atomic ss::AtomicMemory{UInt8} # states: 0=initial, 1=hasrun, 2=error, 3==concurrent const initializer::F - PerThread{T}(initializer::F) where {T, F} = new{T,F}(AtomicMemory{T}(), AtomicMemory{UInt8}(), initializer) - PerThread{T,F}(initializer::F) where {T, F} = new{T,F}(AtomicMemory{T}(), AtomicMemory{UInt8}(), initializer) - PerThread(initializer) = (T = Base.promote_op(initializer); new{T, typeof(initializer)}(AtomicMemory{T}(), AtomicMemory{UInt8}(), initializer)) + function PerThread{T,F}(initializer::F) where {T, F} + xs, ss = AtomicMemory{T}(), AtomicMemory{UInt8}() + once = new{T,F}(xs, ss, initializer) + ccall(:jl_set_precompile_field_replace, Cvoid, (Any, Any, Any), + once, :xs, xs) + ccall(:jl_set_precompile_field_replace, Cvoid, (Any, Any, Any), + once, :ss, ss) + return once + end end +PerThread{T}(initializer::F) where {T, F} = PerThread{T,F}(initializer) +PerThread(initializer) = PerThread{Base.promote_op(initializer), typeof(initializer)}(initializer) @inline function getindex(once::PerThread, tid::Integer) tid = Int(tid) ss = @atomic :acquire once.ss diff --git a/base/task.jl b/base/task.jl index 6cb1ff785eeee..f3a134f374421 100644 --- a/base/task.jl +++ b/base/task.jl @@ -143,13 +143,6 @@ macro task(ex) :(Task($thunk)) end -""" - current_task() - -Get the currently running [`Task`](@ref). -""" -current_task() = ccall(:jl_get_current_task, Ref{Task}, ()) - # task states const task_state_runnable = UInt8(0) diff --git a/src/builtins.c b/src/builtins.c index 96c4cec0f5087..b129cca0ee71d 100644 --- a/src/builtins.c +++ b/src/builtins.c @@ -1008,7 +1008,7 @@ static inline size_t get_checked_fieldindex(const char *name, jl_datatype_t *st, else { jl_value_t *ts[2] = {(jl_value_t*)jl_long_type, (jl_value_t*)jl_symbol_type}; jl_value_t *t = jl_type_union(ts, 2); - jl_type_error("getfield", t, arg); + jl_type_error(name, t, arg); } if (mutabl && jl_field_isconst(st, idx)) { jl_errorf("%s: const field .%s of type %s cannot be changed", name, diff --git a/src/gc-stock.c b/src/gc-stock.c index 6b97881909bbd..37c7b4df48218 100644 --- a/src/gc-stock.c +++ b/src/gc-stock.c @@ -2741,6 +2741,8 @@ static void gc_mark_roots(jl_gc_markqueue_t *mq) gc_heap_snapshot_record_gc_roots((jl_value_t*)jl_global_roots_list, "global_roots_list"); gc_try_claim_and_push(mq, jl_global_roots_keyset, NULL); gc_heap_snapshot_record_gc_roots((jl_value_t*)jl_global_roots_keyset, "global_roots_keyset"); + gc_try_claim_and_push(mq, precompile_field_replace, NULL); + gc_heap_snapshot_record_gc_roots((jl_value_t*)precompile_field_replace, "precompile_field_replace"); } // find unmarked objects that need to be finalized from the finalizer list "list". diff --git a/src/julia_internal.h b/src/julia_internal.h index 20d90fede3d5e..3c93d9fd0963d 100644 --- a/src/julia_internal.h +++ b/src/julia_internal.h @@ -858,6 +858,8 @@ extern jl_genericmemory_t *jl_global_roots_keyset JL_GLOBALLY_ROOTED; extern arraylist_t *jl_entrypoint_mis; JL_DLLEXPORT int jl_is_globally_rooted(jl_value_t *val JL_MAYBE_UNROOTED) JL_NOTSAFEPOINT; JL_DLLEXPORT jl_value_t *jl_as_global_root(jl_value_t *val, int insert) JL_GLOBALLY_ROOTED; +extern jl_svec_t *precompile_field_replace JL_GLOBALLY_ROOTED; +JL_DLLEXPORT void jl_set_precompile_field_replace(jl_value_t *val, jl_value_t *field, jl_value_t *newval) JL_GLOBALLY_ROOTED; jl_opaque_closure_t *jl_new_opaque_closure(jl_tupletype_t *argt, jl_value_t *rt_lb, jl_value_t *rt_ub, jl_value_t *source, jl_value_t **env, size_t nenv, int do_compile); diff --git a/src/staticdata.c b/src/staticdata.c index 0a8cbe6db7c67..5188659d8618d 100644 --- a/src/staticdata.c +++ b/src/staticdata.c @@ -495,6 +495,7 @@ void *native_functions; // opaque jl_native_code_desc_t blob used for fetching // table of struct field addresses to rewrite during saving static htable_t field_replace; +static htable_t bits_replace; static htable_t relocatable_ext_cis; // array of definitions for the predefined function pointers @@ -1649,7 +1650,23 @@ static void jl_write_values(jl_serializer_state *s) JL_GC_DISABLED write_padding(f, offset - tot); tot = offset; size_t fsz = jl_field_size(t, i); - if (t->name->mutabl && jl_is_cpointer_type(jl_field_type_concrete(t, i)) && *(intptr_t*)slot != -1) { + jl_value_t *replace = (jl_value_t*)ptrhash_get(&bits_replace, (void*)slot); + if (replace != HT_NOTFOUND) { + assert(t->name->mutabl && !jl_field_isptr(t, i)); + jl_value_t *rty = jl_typeof(replace); + size_t sz = jl_datatype_size(rty); + ios_write(f, (const char*)replace, sz); + jl_value_t *ft = jl_field_type_concrete(t, i); + int isunion = jl_is_uniontype(ft); + unsigned nth = 0; + if (!jl_find_union_component(ft, rty, &nth)) + assert(0 && "invalid field assignment to isbits union"); + assert(sz <= fsz - isunion); + write_padding(f, fsz - sz - isunion); + if (isunion) + write_uint8(f, nth); + } + else if (t->name->mutabl && jl_is_cpointer_type(jl_field_type_concrete(t, i)) && *(intptr_t*)slot != -1) { // reset Ptr fields to C_NULL (but keep MAP_FAILED / INVALID_HANDLE) assert(!jl_field_isptr(t, i)); write_pointer(f); @@ -2643,6 +2660,65 @@ jl_mutex_t global_roots_lock; extern jl_mutex_t world_counter_lock; extern size_t jl_require_world; +jl_mutex_t precompile_field_replace_lock; +jl_svec_t *precompile_field_replace JL_GLOBALLY_ROOTED; + +static inline jl_value_t *get_checked_fieldindex(const char *name, jl_datatype_t *st, jl_value_t *v, jl_value_t *arg, int mutabl) +{ + if (mutabl) { + if (st == jl_module_type) + jl_error("cannot assign variables in other modules"); + if (!st->name->mutabl) + jl_errorf("%s: immutable struct of type %s cannot be changed", name, jl_symbol_name(st->name->name)); + } + size_t idx; + if (jl_is_long(arg)) { + idx = jl_unbox_long(arg) - 1; + if (idx >= jl_datatype_nfields(st)) + jl_bounds_error(v, arg); + } + else if (jl_is_symbol(arg)) { + idx = jl_field_index(st, (jl_sym_t*)arg, 1); + arg = jl_box_long(idx); + } + else { + jl_value_t *ts[2] = {(jl_value_t*)jl_long_type, (jl_value_t*)jl_symbol_type}; + jl_value_t *t = jl_type_union(ts, 2); + jl_type_error(name, t, arg); + } + if (mutabl && jl_field_isconst(st, idx)) { + jl_errorf("%s: const field .%s of type %s cannot be changed", name, + jl_symbol_name((jl_sym_t*)jl_svecref(jl_field_names(st), idx)), jl_symbol_name(st->name->name)); + } + return arg; +} + +JL_DLLEXPORT void jl_set_precompile_field_replace(jl_value_t *val, jl_value_t *field, jl_value_t *newval) +{ + if (!jl_generating_output()) + return; + jl_datatype_t *st = (jl_datatype_t*)jl_typeof(val); + jl_value_t *idx = get_checked_fieldindex("setfield!", st, val, field, 1); + JL_GC_PUSH1(&idx); + size_t idxval = jl_unbox_long(idx); + jl_value_t *ft = jl_field_type_concrete(st, idxval); + if (!jl_isa(newval, ft)) + jl_type_error("setfield!", ft, newval); + JL_LOCK(&precompile_field_replace_lock); + if (precompile_field_replace == NULL) { + precompile_field_replace = jl_alloc_svec(3); + jl_svecset(precompile_field_replace, 0, jl_alloc_vec_any(0)); + jl_svecset(precompile_field_replace, 1, jl_alloc_vec_any(0)); + jl_svecset(precompile_field_replace, 2, jl_alloc_vec_any(0)); + } + jl_array_ptr_1d_push((jl_array_t*)jl_svecref(precompile_field_replace, 0), val); + jl_array_ptr_1d_push((jl_array_t*)jl_svecref(precompile_field_replace, 1), idx); + jl_array_ptr_1d_push((jl_array_t*)jl_svecref(precompile_field_replace, 2), newval); + JL_GC_POP(); + JL_UNLOCK(&precompile_field_replace_lock); +} + + JL_DLLEXPORT int jl_is_globally_rooted(jl_value_t *val JL_MAYBE_UNROOTED) JL_NOTSAFEPOINT { if (jl_is_datatype(val)) { @@ -2762,6 +2838,7 @@ static void jl_save_system_image_to_stream(ios_t *f, jl_array_t *mod_array, jl_array_t *ext_targets, jl_array_t *edges) JL_GC_DISABLED { htable_new(&field_replace, 0); + htable_new(&bits_replace, 0); // strip metadata and IR when requested if (jl_options.strip_metadata || jl_options.strip_ir) jl_strip_all_codeinfos(); @@ -2773,6 +2850,37 @@ static void jl_save_system_image_to_stream(ios_t *f, jl_array_t *mod_array, arraylist_new(&gvars, 0); arraylist_t external_fns; arraylist_new(&external_fns, 0); + // prepare hash table with any fields the user wanted us to rewrite during serialization + if (precompile_field_replace) { + jl_array_t *vals = (jl_array_t*)jl_svecref(precompile_field_replace, 0); + jl_array_t *fields = (jl_array_t*)jl_svecref(precompile_field_replace, 1); + jl_array_t *newvals = (jl_array_t*)jl_svecref(precompile_field_replace, 2); + size_t i, l = jl_array_nrows(vals); + assert(jl_array_nrows(fields) == l && jl_array_nrows(newvals) == l); + for (i = 0; i < l; i++) { + jl_value_t *val = jl_array_ptr_ref(vals, i); + size_t field = jl_unbox_long(jl_array_ptr_ref(fields, i)); + jl_value_t *newval = jl_array_ptr_ref(newvals, i); + jl_datatype_t *st = (jl_datatype_t*)jl_typeof(val); + size_t offs = jl_field_offset(st, field); + char *fldaddr = (char*)val + offs; + if (jl_field_isptr(st, field)) { + record_field_change((jl_value_t**)fldaddr, newval); + } + else { + // replace the bits + ptrhash_put(&bits_replace, (void*)fldaddr, newval); + // and any pointers inside + jl_datatype_t *rty = (jl_datatype_t*)jl_typeof(newval); + const jl_datatype_layout_t *layout = rty->layout; + size_t j, np = layout->npointers; + for (j = 0; j < np; j++) { + uint32_t ptr = jl_ptr_offset(rty, j); + record_field_change((jl_value_t**)fldaddr + ptr, *(((jl_value_t**)newval) + ptr)); + } + } + } + } int en = jl_gc_enable(0); if (native_functions) { @@ -3113,6 +3221,7 @@ static void jl_save_system_image_to_stream(ios_t *f, jl_array_t *mod_array, arraylist_free(&gvars); arraylist_free(&external_fns); htable_free(&field_replace); + htable_free(&bits_replace); htable_free(&serialization_order); htable_free(&nullptrs); htable_free(&symbol_table); diff --git a/test/threads.jl b/test/threads.jl index f1a8aba418412..d8e9fd4ce2901 100644 --- a/test/threads.jl +++ b/test/threads.jl @@ -390,10 +390,19 @@ let once = PerProcess{Int}(() -> error("expected")) @test_throws ErrorException("PerProcess initializer failed previously") once() end -let once = PerThread(() -> return [nothing]) +let e = Base.Event(true), + started = Channel{Int16}(Inf), + once = PerThread() do + push!(started, threadid()) + wait(e) + return [nothing] + end @test typeof(once) <: PerThread{Vector{Nothing}} + notify(e) x = once() @test x === once() === fetch(@async once()) + @test take!(started) == threadid() + @test isempty(started) tids = zeros(UInt, 50) onces = Vector{Vector{Nothing}}(undef, length(tids)) for i = 1:length(tids) @@ -420,7 +429,18 @@ let once = PerThread(() -> return [nothing]) err == 0 || Base.uv_error("uv_thread_join", err) end end + # let them finish in 5 batches of 10 + for i = 1:length(tids) ÷ 10 + for i = 1:10 + @test take!(started) != threadid() + end + for i = 1:10 + notify(e) + end + end + @test isempty(started) waitallthreads(tids) + @test isempty(started) @test length(IdSet{eltype(onces)}(onces)) == length(onces) # make sure every object is unique end From dbbd4d96fd560b41db2324ee31fd37eb734fa39e Mon Sep 17 00:00:00 2001 From: Jameson Nash Date: Tue, 17 Sep 2024 10:03:19 -0400 Subject: [PATCH 209/537] improve OncePer implementation Address reviewer feedback, add more fixes and more tests, rename to add Once prefix. --- NEWS.md | 4 +- base/docs/basedocs.jl | 2 +- base/exports.jl | 6 +- base/lock.jl | 148 ++++++++++++++++++++++-------------------- doc/src/base/base.md | 6 +- test/precompile.jl | 2 +- test/threads.jl | 92 ++++++++++++++++++-------- 7 files changed, 153 insertions(+), 107 deletions(-) diff --git a/NEWS.md b/NEWS.md index 724d0793e67cd..e304c78f8ad66 100644 --- a/NEWS.md +++ b/NEWS.md @@ -69,9 +69,9 @@ Multi-threading changes ----------------------- * New types are defined to handle the pattern of code that must run once per process, called - a `PerProcess{T}` type, which allows defining a function that should be run exactly once + a `OncePerProcess{T}` type, which allows defining a function that should be run exactly once the first time it is called, and then always return the same result value of type `T` - every subsequent time afterwards. There are also `PerThread{T}` and `PerTask{T}` types for + every subsequent time afterwards. There are also `OncePerThread{T}` and `OncePerTask{T}` types for similar usage with threads or tasks. ([#TBD]) Build system changes diff --git a/base/docs/basedocs.jl b/base/docs/basedocs.jl index f93d9a5ba0647..0d5d5ac00e8d0 100644 --- a/base/docs/basedocs.jl +++ b/base/docs/basedocs.jl @@ -153,7 +153,7 @@ runtime initialization functions of external C libraries and initializing global that involve pointers returned by external libraries. See the [manual section about modules](@ref modules) for more details. -See also: [`PerProcess`](@ref). +See also: [`OncePerProcess`](@ref). # Examples ```julia diff --git a/base/exports.jl b/base/exports.jl index 66de141c228b6..56cd58ce269e7 100644 --- a/base/exports.jl +++ b/base/exports.jl @@ -70,9 +70,9 @@ export OrdinalRange, Pair, PartialQuickSort, - PerProcess, - PerTask, - PerThread, + OncePerProcess, + OncePerTask, + OncePerThread, PermutedDimsArray, QuickSort, Rational, diff --git a/base/lock.jl b/base/lock.jl index b53607af05a3a..a44cd4c0d63cf 100644 --- a/base/lock.jl +++ b/base/lock.jl @@ -507,7 +507,7 @@ Create a level-triggered event source. Tasks that call [`wait`](@ref) on an After `notify` is called, the `Event` remains in a signaled state and tasks will no longer block when waiting for it, until `reset` is called. -If `autoreset` is true, at most one task will be released from `wait` for) +If `autoreset` is true, at most one task will be released from `wait` for each call to `notify`. This provides an acquire & release memory ordering on notify/wait. @@ -578,11 +578,15 @@ end export Event end +const PerStateInitial = 0x00 +const PerStateHasrun = 0x01 +const PerStateErrored = 0x02 +const PerStateConcurrent = 0x03 """ - PerProcess{T} + OncePerProcess{T}(init::Function)() -> T -Calling a `PerProcess` object returns a value of type `T` by running the +Calling a `OncePerProcess` object returns a value of type `T` by running the function `initializer` exactly once per process. All concurrent and future calls in the same process will return exactly the same value. This is useful in code that will be precompiled, as it allows setting up caches or other state @@ -591,13 +595,14 @@ which won't get serialized. ## Example ```jldoctest -julia> const global_state = Base.PerProcess{Vector{UInt32}}() do +julia> const global_state = Base.OncePerProcess{Vector{UInt32}}() do println("Making lazy global value...done.") return [Libc.rand()] end; -julia> procstate = global_state(); +julia> (procstate = global_state()) |> typeof Making lazy global value...done. +Vector{UInt32} (alias for Array{UInt32, 1}) julia> procstate === global_state() true @@ -606,51 +611,51 @@ julia> procstate === fetch(@async global_state()) true ``` """ -mutable struct PerProcess{T, F} - x::Union{Nothing,T} +mutable struct OncePerProcess{T, F} + value::Union{Nothing,T} @atomic state::UInt8 # 0=initial, 1=hasrun, 2=error @atomic allow_compile_time::Bool const initializer::F const lock::ReentrantLock - function PerProcess{T,F}(initializer::F) where {T, F} - once = new{T,F}(nothing, 0x00, true, initializer, ReentrantLock()) + function OncePerProcess{T,F}(initializer::F) where {T, F} + once = new{T,F}(nothing, PerStateInitial, true, initializer, ReentrantLock()) ccall(:jl_set_precompile_field_replace, Cvoid, (Any, Any, Any), - once, :x, nothing) + once, :value, nothing) ccall(:jl_set_precompile_field_replace, Cvoid, (Any, Any, Any), - once, :state, 0x00) + once, :state, PerStateInitial) return once end end -PerProcess{T}(initializer::F) where {T, F} = PerProcess{T, F}(initializer) -PerProcess(initializer) = PerProcess{Base.promote_op(initializer), typeof(initializer)}(initializer) -@inline function (once::PerProcess{T})() where T +OncePerProcess{T}(initializer::F) where {T, F} = OncePerProcess{T, F}(initializer) +OncePerProcess(initializer) = OncePerProcess{Base.promote_op(initializer), typeof(initializer)}(initializer) +@inline function (once::OncePerProcess{T})() where T state = (@atomic :acquire once.state) - if state != 0x01 + if state != PerStateHasrun (@noinline function init_perprocesss(once, state) - state == 0x02 && error("PerProcess initializer failed previously") + state == PerStateErrored && error("OncePerProcess initializer failed previously") once.allow_compile_time || __precompile__(false) lock(once.lock) try state = @atomic :monotonic once.state - if state == 0x00 - once.x = once.initializer() - elseif state == 0x02 - error("PerProcess initializer failed previously") - elseif state != 0x01 - error("invalid state for PerProcess") + if state == PerStateInitial + once.value = once.initializer() + elseif state == PerStateErrored + error("OncePerProcess initializer failed previously") + elseif state != PerStateHasrun + error("invalid state for OncePerProcess") end catch - state == 0x02 || @atomic :release once.state = 0x02 + state == PerStateErrored || @atomic :release once.state = PerStateErrored unlock(once.lock) rethrow() end - state == 0x01 || @atomic :release once.state = 0x01 + state == PerStateHasrun || @atomic :release once.state = PerStateHasrun unlock(once.lock) nothing end)(once, state) end - return once.x::T + return once.value::T end function copyto_monotonic!(dest::AtomicMemory, src) @@ -659,7 +664,7 @@ function copyto_monotonic!(dest::AtomicMemory, src) if isassigned(src, j) @atomic :monotonic dest[i] = src[j] #else - # _unsafeindex_atomic!(dest, i, src[j], :monotonic) + # _unsetindex_atomic!(dest, i, src[j], :monotonic) end i += 1 end @@ -674,12 +679,12 @@ function fill_monotonic!(dest::AtomicMemory, x) end -# share a lock, since we just need it briefly, so some contention is okay +# share a lock/condition, since we just need it briefly, so some contention is okay const PerThreadLock = ThreadSynchronizer() """ - PerThread{T} + OncePerThread{T}(init::Function)() -> T -Calling a `PerThread` object returns a value of type `T` by running the function +Calling a `OncePerThread` object returns a value of type `T` by running the function `initializer` exactly once per thread. All future calls in the same thread, and concurrent or future calls with the same thread id, will return exactly the same value. The object can also be indexed by the threadid for any existing @@ -687,23 +692,25 @@ thread, to get (or initialize *on this thread*) the value stored for that thread. Incorrect usage can lead to data-races or memory corruption so use only if that behavior is correct within your library's threading-safety design. -Warning: it is not necessarily true that a Task only runs on one thread, therefore the value -returned here may alias other values or change in the middle of your program. This type may -get deprecated in the future. If initializer yields, the thread running the current task -after the call might not be the same as the one at the start of the call. +!!! warning + It is not necessarily true that a Task only runs on one thread, therefore the value + returned here may alias other values or change in the middle of your program. This function + may get deprecated in the future. If initializer yields, the thread running the current + task after the call might not be the same as the one at the start of the call. -See also: [`PerTask`](@ref). +See also: [`OncePerTask`](@ref). ## Example ```jldoctest -julia> const thread_state = Base.PerThread{Vector{UInt32}}() do +julia> const thread_state = Base.OncePerThread{Vector{UInt32}}() do println("Making lazy thread value...done.") return [Libc.rand()] end; -julia> threadvec = thread_state(); +julia> (threadvec = thread_state()) |> typeof Making lazy thread value...done. +Vector{UInt32} (alias for Array{UInt32, 1}) julia> threadvec === fetch(@async thread_state()) true @@ -712,12 +719,12 @@ julia> threadvec === thread_state[Threads.threadid()] true ``` """ -mutable struct PerThread{T, F} +mutable struct OncePerThread{T, F} @atomic xs::AtomicMemory{T} # values @atomic ss::AtomicMemory{UInt8} # states: 0=initial, 1=hasrun, 2=error, 3==concurrent const initializer::F - function PerThread{T,F}(initializer::F) where {T, F} + function OncePerThread{T,F}(initializer::F) where {T, F} xs, ss = AtomicMemory{T}(), AtomicMemory{UInt8}() once = new{T,F}(xs, ss, initializer) ccall(:jl_set_precompile_field_replace, Cvoid, (Any, Any, Any), @@ -727,29 +734,30 @@ mutable struct PerThread{T, F} return once end end -PerThread{T}(initializer::F) where {T, F} = PerThread{T,F}(initializer) -PerThread(initializer) = PerThread{Base.promote_op(initializer), typeof(initializer)}(initializer) -@inline function getindex(once::PerThread, tid::Integer) +OncePerThread{T}(initializer::F) where {T, F} = OncePerThread{T,F}(initializer) +OncePerThread(initializer) = OncePerThread{Base.promote_op(initializer), typeof(initializer)}(initializer) +@inline (once::OncePerThread)() = once[Threads.threadid()] +@inline function getindex(once::OncePerThread, tid::Integer) tid = Int(tid) ss = @atomic :acquire once.ss xs = @atomic :monotonic once.xs # n.b. length(xs) >= length(ss) - if tid > length(ss) || (@atomic :acquire ss[tid]) != 0x01 + if tid <= 0 || tid > length(ss) || (@atomic :acquire ss[tid]) != PerStateHasrun (@noinline function init_perthread(once, tid) - local xs = @atomic :acquire once.xs - local ss = @atomic :monotonic once.ss + local ss = @atomic :acquire once.ss + local xs = @atomic :monotonic once.xs local len = length(ss) # slow path to allocate it nt = Threads.maxthreadid() - 0 < tid <= nt || ArgumentError("thread id outside of allocated range") - if tid <= length(ss) && (@atomic :acquire ss[tid]) == 0x02 - error("PerThread initializer failed previously") + 0 < tid <= nt || throw(ArgumentError("thread id outside of allocated range")) + if tid <= length(ss) && (@atomic :acquire ss[tid]) == PerStateErrored + error("OncePerThread initializer failed previously") end newxs = xs newss = ss if tid > len # attempt to do all allocations outside of PerThreadLock for better scaling - @assert length(xs) == length(ss) "logical constraint violation" + @assert length(xs) >= length(ss) "logical constraint violation" newxs = typeof(xs)(undef, len + nt) newss = typeof(ss)(undef, len + nt) end @@ -759,30 +767,30 @@ PerThread(initializer) = PerThread{Base.promote_op(initializer), typeof(initiali ss = @atomic :monotonic once.ss xs = @atomic :monotonic once.xs if tid > length(ss) - @assert length(ss) >= len && newxs !== xs && newss != ss "logical constraint violation" - fill_monotonic!(newss, 0x00) + @assert len <= length(ss) <= length(newss) "logical constraint violation" + fill_monotonic!(newss, PerStateInitial) xs = copyto_monotonic!(newxs, xs) ss = copyto_monotonic!(newss, ss) @atomic :release once.xs = xs @atomic :release once.ss = ss end state = @atomic :monotonic ss[tid] - while state == 0x04 + while state == PerStateConcurrent # lost race, wait for notification this is done running elsewhere wait(PerThreadLock) # wait for initializer to finish without releasing this thread ss = @atomic :monotonic once.ss - state = @atomic :monotonic ss[tid] == 0x04 + state = @atomic :monotonic ss[tid] end - if state == 0x00 + if state == PerStateInitial # won the race, drop lock in exchange for state, and run user initializer - @atomic :monotonic ss[tid] = 0x04 + @atomic :monotonic ss[tid] = PerStateConcurrent result = try unlock(PerThreadLock) once.initializer() catch lock(PerThreadLock) ss = @atomic :monotonic once.ss - @atomic :release ss[tid] = 0x02 + @atomic :release ss[tid] = PerStateErrored notify(PerThreadLock) rethrow() end @@ -791,12 +799,12 @@ PerThread(initializer) = PerThread{Base.promote_op(initializer), typeof(initiali xs = @atomic :monotonic once.xs @atomic :release xs[tid] = result ss = @atomic :monotonic once.ss - @atomic :release ss[tid] = 0x01 + @atomic :release ss[tid] = PerStateHasrun notify(PerThreadLock) - elseif state == 0x02 - error("PerThread initializer failed previously") - elseif state != 0x01 - error("invalid state for PerThread") + elseif state == PerStateErrored + error("OncePerThread initializer failed previously") + elseif state != PerStateHasrun + error("invalid state for OncePerThread") end finally unlock(PerThreadLock) @@ -807,12 +815,11 @@ PerThread(initializer) = PerThread{Base.promote_op(initializer), typeof(initiali end return xs[tid] end -@inline (once::PerThread)() = once[Threads.threadid()] """ - PerTask{T} + OncePerTask{T}(init::Function)() -> T -Calling a `PerTask` object returns a value of type `T` by running the function `initializer` +Calling a `OncePerTask` object returns a value of type `T` by running the function `initializer` exactly once per Task. All future calls in the same Task will return exactly the same value. See also: [`task_local_storage`](@ref). @@ -820,13 +827,14 @@ See also: [`task_local_storage`](@ref). ## Example ```jldoctest -julia> const task_state = Base.PerTask{Vector{UInt32}}() do +julia> const task_state = Base.OncePerTask{Vector{UInt32}}() do println("Making lazy task value...done.") return [Libc.rand()] end; -julia> taskvec = task_state(); +julia> (taskvec = task_state()) |> typeof Making lazy task value...done. +Vector{UInt32} (alias for Array{UInt32, 1}) julia> taskvec === task_state() true @@ -836,11 +844,11 @@ Making lazy task value...done. false ``` """ -mutable struct PerTask{T, F} +mutable struct OncePerTask{T, F} const initializer::F - PerTask{T}(initializer::F) where {T, F} = new{T,F}(initializer) - PerTask{T,F}(initializer::F) where {T, F} = new{T,F}(initializer) - PerTask(initializer) = new{Base.promote_op(initializer), typeof(initializer)}(initializer) + OncePerTask{T}(initializer::F) where {T, F} = new{T,F}(initializer) + OncePerTask{T,F}(initializer::F) where {T, F} = new{T,F}(initializer) + OncePerTask(initializer) = new{Base.promote_op(initializer), typeof(initializer)}(initializer) end -@inline (once::PerTask)() = get!(once.initializer, task_local_storage(), once) +@inline (once::OncePerTask)() = get!(once.initializer, task_local_storage(), once) diff --git a/doc/src/base/base.md b/doc/src/base/base.md index b11e985782709..7181965d9aa81 100644 --- a/doc/src/base/base.md +++ b/doc/src/base/base.md @@ -34,9 +34,9 @@ Main.include Base.include_string Base.include_dependency __init__ -Base.PerProcess -Base.PerTask -Base.PerThread +Base.OncePerProcess +Base.OncePerTask +Base.OncePerThread Base.which(::Any, ::Any) Base.methods Base.@show diff --git a/test/precompile.jl b/test/precompile.jl index e44771fb6a86f..adf10363298ba 100644 --- a/test/precompile.jl +++ b/test/precompile.jl @@ -96,7 +96,7 @@ precompile_test_harness(false) do dir struct GAPGroupHomomorphism{A, B} <: AbstractAlgebraMap{GAPGroupHomomorphism{B, A}} end global process_state_calls::Int = 0 - const process_state = Base.PerProcess{typeof(getpid())}() do + const process_state = Base.OncePerProcess{typeof(getpid())}() do @assert (global process_state_calls += 1) == 1 return getpid() end diff --git a/test/threads.jl b/test/threads.jl index d8e9fd4ce2901..2dbdcf99eae28 100644 --- a/test/threads.jl +++ b/test/threads.jl @@ -375,41 +375,63 @@ end end end -let once = PerProcess(() -> return [nothing]) - @test typeof(once) <: PerProcess{Vector{Nothing}} +let once = OncePerProcess(() -> return [nothing]) + @test typeof(once) <: OncePerProcess{Vector{Nothing}} x = once() @test x === once() @atomic once.state = 0xff - @test_throws ErrorException("invalid state for PerProcess") once() - @test_throws ErrorException("PerProcess initializer failed previously") once() + @test_throws ErrorException("invalid state for OncePerProcess") once() + @test_throws ErrorException("OncePerProcess initializer failed previously") once() @atomic once.state = 0x01 @test x === once() end -let once = PerProcess{Int}(() -> error("expected")) +let once = OncePerProcess{Int}(() -> error("expected")) @test_throws ErrorException("expected") once() - @test_throws ErrorException("PerProcess initializer failed previously") once() + @test_throws ErrorException("OncePerProcess initializer failed previously") once() end let e = Base.Event(true), started = Channel{Int16}(Inf), - once = PerThread() do + finish = Channel{Nothing}(Inf), + exiting = Channel{Nothing}(Inf), + starttest2 = Event(), + once = OncePerThread() do push!(started, threadid()) - wait(e) + take!(finish) + return [nothing] + end + alls = OncePerThread() do return [nothing] end - @test typeof(once) <: PerThread{Vector{Nothing}} - notify(e) + @test typeof(once) <: OncePerThread{Vector{Nothing}} + push!(finish, nothing) + @test_throws ArgumentError once[0] x = once() - @test x === once() === fetch(@async once()) + @test_throws ArgumentError once[0] + @test x === once() === fetch(@async once()) === once[threadid()] @test take!(started) == threadid() @test isempty(started) tids = zeros(UInt, 50) + newthreads = zeros(Int16, length(tids)) onces = Vector{Vector{Nothing}}(undef, length(tids)) + allonces = Vector{Vector{Vector{Nothing}}}(undef, length(tids)) for i = 1:length(tids) function cl() - local y = once() - onces[i] = y - @test x !== y === once() + GC.gc(false) # stress test the GC-safepoint mechanics of jl_adopt_thread + try + newthreads[i] = threadid() + local y = once() + onces[i] = y + @test x !== y === once() === once[threadid()] + wait(starttest2) + allonces[i] = Vector{Nothing}[alls[tid] for tid in newthreads] + catch ex + close(started, ErrorException("failed")) + close(finish, ErrorException("failed")) + @lock stderr Base.display_error(current_exceptions()) + end + push!(exiting, nothing) + GC.gc(false) # stress test the GC-safepoint mechanics of jl_delete_thread nothing end function threadcallclosure(cl::F) where {F} # create sparam so we can reference the type of cl in the ccall type @@ -429,34 +451,50 @@ let e = Base.Event(true), err == 0 || Base.uv_error("uv_thread_join", err) end end - # let them finish in 5 batches of 10 - for i = 1:length(tids) ÷ 10 - for i = 1:10 - @test take!(started) != threadid() + try + # let them finish in batches of 10 + for i = 1:length(tids) ÷ 10 + for i = 1:10 + newid = take!(started) + @test newid != threadid() + end + for i = 1:10 + push!(finish, nothing) + end end - for i = 1:10 - notify(e) + @test isempty(started) + # now run the second part of the test where they all try to access the other threads elements + notify(starttest2) + finally + for _ = 1:length(tids) + # run IO loop until all threads are close to exiting + take!(exiting) end + waitallthreads(tids) end @test isempty(started) - waitallthreads(tids) - @test isempty(started) + @test isempty(finish) @test length(IdSet{eltype(onces)}(onces)) == length(onces) # make sure every object is unique + allexpected = Vector{Nothing}[alls[tid] for tid in newthreads] + @test length(IdSet{eltype(allexpected)}(allexpected)) == length(allexpected) # make sure every object is unique + @test all(i -> allonces[i] !== allexpected && all(j -> allonces[i][j] === allexpected[j], eachindex(allexpected)), eachindex(allonces)) # make sure every thread saw the same elements + @test_throws ArgumentError once[Threads.maxthreadid() + 1] + @test_throws ArgumentError once[-1] end -let once = PerThread{Int}(() -> error("expected")) +let once = OncePerThread{Int}(() -> error("expected")) @test_throws ErrorException("expected") once() - @test_throws ErrorException("PerThread initializer failed previously") once() + @test_throws ErrorException("OncePerThread initializer failed previously") once() end -let once = PerTask(() -> return [nothing]) - @test typeof(once) <: PerTask{Vector{Nothing}} +let once = OncePerTask(() -> return [nothing]) + @test typeof(once) <: OncePerTask{Vector{Nothing}} x = once() @test x === once() !== fetch(@async once()) delete!(task_local_storage(), once) @test x !== once() === once() end -let once = PerTask{Int}(() -> error("expected")) +let once = OncePerTask{Int}(() -> error("expected")) @test_throws ErrorException("expected") once() @test_throws ErrorException("expected") once() end From 9d56856c74cd83bbf3258d14a096de208d89ee5e Mon Sep 17 00:00:00 2001 From: Jameson Nash Date: Tue, 15 Oct 2024 17:35:17 +0000 Subject: [PATCH 210/537] fix use-after-free in test (detected in win32 CI) --- test/threads.jl | 75 +++++++++++++++++++++++++++---------------------- 1 file changed, 41 insertions(+), 34 deletions(-) diff --git a/test/threads.jl b/test/threads.jl index 2dbdcf99eae28..d5a801c1a6a1c 100644 --- a/test/threads.jl +++ b/test/threads.jl @@ -415,8 +415,8 @@ let e = Base.Event(true), newthreads = zeros(Int16, length(tids)) onces = Vector{Vector{Nothing}}(undef, length(tids)) allonces = Vector{Vector{Vector{Nothing}}}(undef, length(tids)) - for i = 1:length(tids) - function cl() + # allocate closure memory to last until all threads are started + cls = [function cl() GC.gc(false) # stress test the GC-safepoint mechanics of jl_adopt_thread try newthreads[i] = threadid() @@ -434,43 +434,50 @@ let e = Base.Event(true), GC.gc(false) # stress test the GC-safepoint mechanics of jl_delete_thread nothing end - function threadcallclosure(cl::F) where {F} # create sparam so we can reference the type of cl in the ccall type - threadwork = @cfunction cl -> cl() Cvoid (Ref{F},) # create a cfunction that specializes on cl as an argument and calls it - err = @ccall uv_thread_create(Ref(tids, i)::Ptr{UInt}, threadwork::Ptr{Cvoid}, cl::Ref{F})::Cint # call that on a thread - err == 0 || Base.uv_error("uv_thread_create", err) - end - threadcallclosure(cl) - end - @noinline function waitallthreads(tids) + for i = 1:length(tids)] + GC.@preserve cls begin # this memory must survive until each corresponding thread exits (waitallthreads / uv_thread_join) + Base.preserve_handle(cls) for i = 1:length(tids) - tid = Ref(tids, i) - tidp = Base.unsafe_convert(Ptr{UInt}, tid)::Ptr{UInt} - gc_state = @ccall jl_gc_safe_enter()::Int8 - GC.@preserve tid err = @ccall uv_thread_join(tidp::Ptr{UInt})::Cint - @ccall jl_gc_safe_leave(gc_state::Int8)::Cvoid - err == 0 || Base.uv_error("uv_thread_join", err) - end - end - try - # let them finish in batches of 10 - for i = 1:length(tids) ÷ 10 - for i = 1:10 - newid = take!(started) - @test newid != threadid() + function threadcallclosure(tid::Ref{UInt}, cl::Ref{F}) where {F} # create sparam so we can reference the type of cl in the ccall type + threadwork = @cfunction cl -> cl() Cvoid (Ref{F},) # create a cfunction that specializes on cl as an argument and calls it + err = @ccall uv_thread_create(tid::Ptr{UInt}, threadwork::Ptr{Cvoid}, cl::Ref{F})::Cint # call that on a thread + err == 0 || Base.uv_error("uv_thread_create", err) + nothing end - for i = 1:10 - push!(finish, nothing) + threadcallclosure(Ref(tids, i), Ref(cls, i)) + end + @noinline function waitallthreads(tids, cls) + for i = 1:length(tids) + tid = Ref(tids, i) + tidp = Base.unsafe_convert(Ptr{UInt}, tid)::Ptr{UInt} + gc_state = @ccall jl_gc_safe_enter()::Int8 + GC.@preserve tid err = @ccall uv_thread_join(tidp::Ptr{UInt})::Cint + @ccall jl_gc_safe_leave(gc_state::Int8)::Cvoid + err == 0 || Base.uv_error("uv_thread_join", err) end + Base.unpreserve_handle(cls) end - @test isempty(started) - # now run the second part of the test where they all try to access the other threads elements - notify(starttest2) - finally - for _ = 1:length(tids) - # run IO loop until all threads are close to exiting - take!(exiting) + try + # let them finish in batches of 10 + for i = 1:length(tids) ÷ 10 + for i = 1:10 + newid = take!(started) + @test newid != threadid() + end + for i = 1:10 + push!(finish, nothing) + end + end + @test isempty(started) + # now run the second part of the test where they all try to access the other threads elements + notify(starttest2) + finally + for _ = 1:length(tids) + # run IO loop until all threads are close to exiting + take!(exiting) + end + waitallthreads(tids, cls) end - waitallthreads(tids) end @test isempty(started) @test isempty(finish) From b02d6715d3be345962a312d6080bed7c732a4300 Mon Sep 17 00:00:00 2001 From: Ian Butterworth Date: Tue, 15 Oct 2024 15:39:05 -0400 Subject: [PATCH 211/537] Make loading work when stdlib deps are missing in the manifest (#56148) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Closes https://github.com/JuliaLang/julia/issues/56109 Simulating a bad manifest by having `LibGit2_jll` missing as a dep of `LibGit2` in my default env, say because the manifest was generated by a different julia version or different master julia commit. ## This PR, it just works ``` julia> using Revise julia> ``` i.e. ``` % JULIA_DEBUG=loading ./julia --startup-file=no julia> using Revise ... ┌ Debug: Stdlib LibGit2 [76f85450-5226-5b5a-8eaa-529ad045b433] is trying to load `LibGit2_jll` │ which is not listed as a dep in the load path manifests, so resorting to search │ in the stdlib Project.tomls for true deps └ @ Base loading.jl:387 ┌ Debug: LibGit2 [76f85450-5226-5b5a-8eaa-529ad045b433] indeed depends on LibGit2_jll in project /Users/ian/Documents/GitHub/julia/usr/share/julia/stdlib/v1.12/LibGit2/Project.toml └ @ Base loading.jl:395 ... julia> ``` ## Master ``` julia> using Revise Info Given Revise was explicitly requested, output will be shown live ERROR: LoadError: ArgumentError: Package LibGit2 does not have LibGit2_jll in its dependencies: - Note that the following manifests in the load path were resolved with a potentially different DEV version of the current version, which may be the cause of the error. Try to re-resolve them in the current version, or consider deleting them if that fails: /Users/ian/.julia/environments/v1.12/Manifest.toml - You may have a partially installed environment. Try `Pkg.instantiate()` to ensure all packages in the environment are installed. - Or, if you have LibGit2 checked out for development and have added LibGit2_jll as a dependency but haven't updated your primary environment's manifest file, try `Pkg.resolve()`. - Otherwise you may need to report an issue with LibGit2 ... ``` --- base/loading.jl | 37 ++++++++++++++ test/loading.jl | 12 +++++ test/project/deps/BadStdlibDeps/Manifest.toml | 51 +++++++++++++++++++ test/project/deps/BadStdlibDeps/Project.toml | 2 + 4 files changed, 102 insertions(+) create mode 100644 test/project/deps/BadStdlibDeps/Manifest.toml create mode 100644 test/project/deps/BadStdlibDeps/Project.toml diff --git a/base/loading.jl b/base/loading.jl index fe4a4770628da..8dff6838c27cc 100644 --- a/base/loading.jl +++ b/base/loading.jl @@ -308,6 +308,21 @@ function find_package(arg) # ::Union{Nothing,String} return locate_package(pkg, env) end +# is there a better/faster ground truth? +function is_stdlib(pkgid::PkgId) + pkgid.name in readdir(Sys.STDLIB) || return false + stdlib_root = joinpath(Sys.STDLIB, pkgid.name) + project_file = locate_project_file(stdlib_root) + if project_file isa String + d = parsed_toml(project_file) + uuid = get(d, "uuid", nothing) + if uuid !== nothing + return UUID(uuid) == pkgid.uuid + end + end + return false +end + """ Base.identify_package_env(name::String)::Union{Tuple{PkgId, String}, Nothing} Base.identify_package_env(where::Union{Module,PkgId}, name::String)::Union{Tuple{PkgId, Union{String, Nothing}}, Nothing} @@ -336,6 +351,12 @@ function identify_package_env(where::PkgId, name::String) end break # found in implicit environment--return "not found" end + if pkg_env === nothing && is_stdlib(where) + # if not found it could be that manifests are from a different julia version/commit + # where stdlib dependencies have changed, so look up deps based on the stdlib Project.toml + # as a fallback + pkg_env = identify_stdlib_project_dep(where, name) + end end if cache !== nothing cache.identified_where[(where, name)] = pkg_env @@ -362,6 +383,22 @@ function identify_package_env(name::String) return pkg_env end +function identify_stdlib_project_dep(stdlib::PkgId, depname::String) + @debug """ + Stdlib $(repr("text/plain", stdlib)) is trying to load `$depname` + which is not listed as a dep in the load path manifests, so resorting to search + in the stdlib Project.tomls for true deps""" + stdlib_projfile = locate_project_file(joinpath(Sys.STDLIB, stdlib.name)) + stdlib_projfile === nothing && return nothing + found = explicit_project_deps_get(stdlib_projfile, depname) + if found !== nothing + @debug "$(repr("text/plain", stdlib)) indeed depends on $depname in project $stdlib_projfile" + pkgid = PkgId(found, depname) + return pkgid, stdlib_projfile + end + return nothing +end + _nothing_or_first(x) = x === nothing ? nothing : first(x) """ diff --git a/test/loading.jl b/test/loading.jl index 1674a9f59a0c3..4877b256a6ad9 100644 --- a/test/loading.jl +++ b/test/loading.jl @@ -1341,6 +1341,18 @@ end end end +@testset "Fallback for stdlib deps if manifest deps aren't found" begin + mktempdir() do depot + # This manifest has a LibGit2 entry that is missing LibGit2_jll, which should be + # handled by falling back to the stdlib Project.toml for dependency truth. + badmanifest_test_dir = joinpath(@__DIR__, "project", "deps", "BadStdlibDeps.jl") + @test success(addenv( + `$(Base.julia_cmd()) --project=$badmanifest_test_dir --startup-file=no -e 'using LibGit2'`, + "JULIA_DEPOT_PATH" => depot * Base.Filesystem.pathsep(), + )) + end +end + @testset "code coverage disabled during precompilation" begin mktempdir() do depot cov_test_dir = joinpath(@__DIR__, "project", "deps", "CovTest.jl") diff --git a/test/project/deps/BadStdlibDeps/Manifest.toml b/test/project/deps/BadStdlibDeps/Manifest.toml new file mode 100644 index 0000000000000..32aaa0b83dc0a --- /dev/null +++ b/test/project/deps/BadStdlibDeps/Manifest.toml @@ -0,0 +1,51 @@ +# This file is machine-generated - editing it directly is not advised + +julia_version = "1.12.0-DEV" +manifest_format = "2.0" +project_hash = "dc9d33b0ee13d9466bdb75b8d375808a534a79ec" + +[[deps.Artifacts]] +uuid = "56f22d72-fd6d-98f1-02f0-08ddc0907c33" +version = "1.11.0" + +# This is intentionally missing LibGit2_jll for testing purposes +[[deps.LibGit2]] +deps = ["NetworkOptions", "Printf", "SHA"] +uuid = "76f85450-5226-5b5a-8eaa-529ad045b433" +version = "1.11.0" + +[[deps.LibGit2_jll]] +deps = ["Artifacts", "LibSSH2_jll", "Libdl", "MbedTLS_jll"] +uuid = "e37daf67-58a4-590a-8e99-b0245dd2ffc5" +version = "1.8.0+0" + +[[deps.LibSSH2_jll]] +deps = ["Artifacts", "Libdl", "MbedTLS_jll"] +uuid = "29816b5a-b9ab-546f-933c-edad1886dfa8" +version = "1.11.0+1" + +[[deps.Libdl]] +uuid = "8f399da3-3557-5675-b5ff-fb832c97cbdb" +version = "1.11.0" + +[[deps.MbedTLS_jll]] +deps = ["Artifacts", "Libdl"] +uuid = "c8ffd9c3-330d-5841-b78e-0817d7145fa1" +version = "2.28.6+1" + +[[deps.NetworkOptions]] +uuid = "ca575930-c2e3-43a9-ace4-1e988b2c1908" +version = "1.2.0" + +[[deps.Printf]] +deps = ["Unicode"] +uuid = "de0858da-6303-5e67-8744-51eddeeeb8d7" +version = "1.11.0" + +[[deps.SHA]] +uuid = "ea8e919c-243c-51af-8825-aaa63cd721ce" +version = "0.7.0" + +[[deps.Unicode]] +uuid = "4ec0a83e-493e-50e2-b9ac-8f72acf5a8f5" +version = "1.11.0" diff --git a/test/project/deps/BadStdlibDeps/Project.toml b/test/project/deps/BadStdlibDeps/Project.toml new file mode 100644 index 0000000000000..223889185ea15 --- /dev/null +++ b/test/project/deps/BadStdlibDeps/Project.toml @@ -0,0 +1,2 @@ +[deps] +LibGit2 = "76f85450-5226-5b5a-8eaa-529ad045b433" From 924dc170ce12ce831bd31656cb08e79fb2920617 Mon Sep 17 00:00:00 2001 From: Gabriel Baraldi Date: Tue, 15 Oct 2024 17:41:31 -0300 Subject: [PATCH 212/537] Remove llvm-muladd pass and move it's functionality to to llvm-simdloop (#55802) Closes https://github.com/JuliaLang/julia/issues/55785 I'm not sure if we want to backport this like this. Because that removes some functionality (the pass itself). So LLVM.jl and friends might need annoying version code. We can maybe keep the code there and just not run the pass in a backport. --- doc/src/devdocs/llvm-passes.md | 12 --- doc/src/devdocs/llvm.md | 1 - src/Makefile | 2 +- src/jl_exported_funcs.inc | 1 - src/llvm-julia-passes.inc | 1 - src/llvm-muladd.cpp | 117 ------------------------------ src/llvm-simdloop.cpp | 66 +++++++++++++++++ src/passes.h | 4 - src/pipeline.cpp | 1 - test/llvmpasses/julia-simdloop.ll | 69 +++++++++++++++--- test/llvmpasses/muladd.ll | 62 ---------------- test/llvmpasses/parsing.ll | 2 +- 12 files changed, 125 insertions(+), 213 deletions(-) delete mode 100644 src/llvm-muladd.cpp delete mode 100644 test/llvmpasses/muladd.ll diff --git a/doc/src/devdocs/llvm-passes.md b/doc/src/devdocs/llvm-passes.md index 36383acaef512..736faf54c219b 100644 --- a/doc/src/devdocs/llvm-passes.md +++ b/doc/src/devdocs/llvm-passes.md @@ -114,18 +114,6 @@ This pass is used to verify Julia's invariants about LLVM IR. This includes thin These passes are used to perform transformations on LLVM IR that LLVM will not perform itself, e.g. fast math flag propagation, escape analysis, and optimizations on Julia-specific internal functions. They use knowledge about Julia's semantics to perform these optimizations. -### CombineMulAdd - -* Filename: `llvm-muladd.cpp` -* Class Name: `CombineMulAddPass` -* Opt Name: `function(CombineMulAdd)` - -This pass serves to optimize the particular combination of a regular `fmul` with a fast `fadd` into a contract `fmul` with a fast `fadd`. This is later optimized by the backend to a [fused multiply-add](https://en.wikipedia.org/wiki/Multiply%E2%80%93accumulate_operation#Fused_multiply%E2%80%93add) instruction, which can provide significantly faster operations at the cost of more [unpredictable semantics](https://simonbyrne.github.io/notes/fastmath/). - -!!! note - - This optimization only occurs when the `fmul` has a single use, which is the fast `fadd`. - ### AllocOpt * Filename: `llvm-alloc-opt.cpp` diff --git a/doc/src/devdocs/llvm.md b/doc/src/devdocs/llvm.md index c05a4f9dc4e7f..2155e5da6fd7b 100644 --- a/doc/src/devdocs/llvm.md +++ b/doc/src/devdocs/llvm.md @@ -30,7 +30,6 @@ The code for lowering Julia AST to LLVM IR or interpreting it directly is in dir | `llvm-julia-licm.cpp` | Custom LLVM pass to hoist/sink Julia-specific intrinsics | | `llvm-late-gc-lowering.cpp` | Custom LLVM pass to root GC-tracked values | | `llvm-lower-handlers.cpp` | Custom LLVM pass to lower try-catch blocks | -| `llvm-muladd.cpp` | Custom LLVM pass for fast-match FMA | | `llvm-multiversioning.cpp` | Custom LLVM pass to generate sysimg code on multiple architectures | | `llvm-propagate-addrspaces.cpp` | Custom LLVM pass to canonicalize addrspaces | | `llvm-ptls.cpp` | Custom LLVM pass to lower TLS operations | diff --git a/src/Makefile b/src/Makefile index a6b1f433b73ce..4dbb094c65321 100644 --- a/src/Makefile +++ b/src/Makefile @@ -52,7 +52,7 @@ RT_LLVMLINK := CG_LLVMLINK := ifeq ($(JULIACODEGEN),LLVM) -CODEGEN_SRCS := codegen jitlayers aotcompile debuginfo disasm llvm-simdloop llvm-muladd \ +CODEGEN_SRCS := codegen jitlayers aotcompile debuginfo disasm llvm-simdloop \ llvm-final-gc-lowering llvm-pass-helpers llvm-late-gc-lowering llvm-ptls \ llvm-lower-handlers llvm-gc-invariant-verifier llvm-propagate-addrspaces \ llvm-multiversioning llvm-alloc-opt llvm-alloc-helpers cgmemmgr llvm-remove-addrspaces \ diff --git a/src/jl_exported_funcs.inc b/src/jl_exported_funcs.inc index a00a0171d23b7..f712f154ed896 100644 --- a/src/jl_exported_funcs.inc +++ b/src/jl_exported_funcs.inc @@ -554,7 +554,6 @@ YY(LLVMExtraMPMAddRemoveAddrspacesPass) \ YY(LLVMExtraMPMAddLowerPTLSPass) \ YY(LLVMExtraFPMAddDemoteFloat16Pass) \ - YY(LLVMExtraFPMAddCombineMulAddPass) \ YY(LLVMExtraFPMAddLateLowerGCPass) \ YY(LLVMExtraFPMAddAllocOptPass) \ YY(LLVMExtraFPMAddPropagateJuliaAddrspacesPass) \ diff --git a/src/llvm-julia-passes.inc b/src/llvm-julia-passes.inc index bd89c01c6fdfe..c41ecbba87b6a 100644 --- a/src/llvm-julia-passes.inc +++ b/src/llvm-julia-passes.inc @@ -11,7 +11,6 @@ MODULE_PASS("LowerPTLSPass", LowerPTLSPass, LowerPTLSPass()) //Function passes #ifdef FUNCTION_PASS FUNCTION_PASS("DemoteFloat16", DemoteFloat16Pass, DemoteFloat16Pass()) -FUNCTION_PASS("CombineMulAdd", CombineMulAddPass, CombineMulAddPass()) FUNCTION_PASS("LateLowerGCFrame", LateLowerGCPass, LateLowerGCPass()) FUNCTION_PASS("AllocOpt", AllocOptPass, AllocOptPass()) FUNCTION_PASS("PropagateJuliaAddrspaces", PropagateJuliaAddrspacesPass, PropagateJuliaAddrspacesPass()) diff --git a/src/llvm-muladd.cpp b/src/llvm-muladd.cpp deleted file mode 100644 index 12f1c8ad765d9..0000000000000 --- a/src/llvm-muladd.cpp +++ /dev/null @@ -1,117 +0,0 @@ -// This file is a part of Julia. License is MIT: https://julialang.org/license - -#include "llvm-version.h" -#include "passes.h" - -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "julia.h" -#include "julia_assert.h" - -#define DEBUG_TYPE "combine-muladd" -#undef DEBUG - -using namespace llvm; -STATISTIC(TotalContracted, "Total number of multiplies marked for FMA"); - -#ifndef __clang_gcanalyzer__ -#define REMARK(remark) ORE.emit(remark) -#else -#define REMARK(remark) (void) 0; -#endif - -/** - * Combine - * ``` - * %v0 = fmul ... %a, %b - * %v = fadd contract ... %v0, %c - * ``` - * to - * `%v = call contract @llvm.fmuladd.<...>(... %a, ... %b, ... %c)` - * when `%v0` has no other use - */ - -// Return true if we changed the mulOp -static bool checkCombine(Value *maybeMul, OptimizationRemarkEmitter &ORE) JL_NOTSAFEPOINT -{ - auto mulOp = dyn_cast(maybeMul); - if (!mulOp || mulOp->getOpcode() != Instruction::FMul) - return false; - if (!mulOp->hasOneUse()) { - LLVM_DEBUG(dbgs() << "mulOp has multiple uses: " << *maybeMul << "\n"); - REMARK([&](){ - return OptimizationRemarkMissed(DEBUG_TYPE, "Multiuse FMul", mulOp) - << "fmul had multiple uses " << ore::NV("fmul", mulOp); - }); - return false; - } - // On 5.0+ we only need to mark the mulOp as contract and the backend will do the work for us. - auto fmf = mulOp->getFastMathFlags(); - if (!fmf.allowContract()) { - LLVM_DEBUG(dbgs() << "Marking mulOp for FMA: " << *maybeMul << "\n"); - REMARK([&](){ - return OptimizationRemark(DEBUG_TYPE, "Marked for FMA", mulOp) - << "marked for fma " << ore::NV("fmul", mulOp); - }); - ++TotalContracted; - fmf.setAllowContract(true); - mulOp->copyFastMathFlags(fmf); - return true; - } - return false; -} - -static bool combineMulAdd(Function &F) JL_NOTSAFEPOINT -{ - OptimizationRemarkEmitter ORE(&F); - bool modified = false; - for (auto &BB: F) { - for (auto it = BB.begin(); it != BB.end();) { - auto &I = *it; - it++; - switch (I.getOpcode()) { - case Instruction::FAdd: { - if (!I.hasAllowContract()) - continue; - modified |= checkCombine(I.getOperand(0), ORE) || checkCombine(I.getOperand(1), ORE); - break; - } - case Instruction::FSub: { - if (!I.hasAllowContract()) - continue; - modified |= checkCombine(I.getOperand(0), ORE) || checkCombine(I.getOperand(1), ORE); - break; - } - default: - break; - } - } - } -#ifdef JL_VERIFY_PASSES - assert(!verifyLLVMIR(F)); -#endif - return modified; -} - -PreservedAnalyses CombineMulAddPass::run(Function &F, FunctionAnalysisManager &AM) JL_NOTSAFEPOINT -{ - if (combineMulAdd(F)) { - return PreservedAnalyses::allInSet(); - } - return PreservedAnalyses::all(); -} diff --git a/src/llvm-simdloop.cpp b/src/llvm-simdloop.cpp index ed2a04e650f2a..e12b30e3db466 100644 --- a/src/llvm-simdloop.cpp +++ b/src/llvm-simdloop.cpp @@ -41,6 +41,7 @@ STATISTIC(ReductionChainLength, "Total sum of instructions folded from reduction STATISTIC(MaxChainLength, "Max length of reduction chain"); STATISTIC(AddChains, "Addition reduction chains"); STATISTIC(MulChains, "Multiply reduction chains"); +STATISTIC(TotalContracted, "Total number of multiplies marked for FMA"); #ifndef __clang_gcanalyzer__ #define REMARK(remark) ORE.emit(remark) @@ -49,6 +50,49 @@ STATISTIC(MulChains, "Multiply reduction chains"); #endif namespace { +/** + * Combine + * ``` + * %v0 = fmul ... %a, %b + * %v = fadd contract ... %v0, %c + * ``` + * to + * %v0 = fmul contract ... %a, %b + * %v = fadd contract ... %v0, %c + * when `%v0` has no other use + */ + +static bool checkCombine(Value *maybeMul, Loop &L, OptimizationRemarkEmitter &ORE) JL_NOTSAFEPOINT +{ + auto mulOp = dyn_cast(maybeMul); + if (!mulOp || mulOp->getOpcode() != Instruction::FMul) + return false; + if (!L.contains(mulOp)) + return false; + if (!mulOp->hasOneUse()) { + LLVM_DEBUG(dbgs() << "mulOp has multiple uses: " << *maybeMul << "\n"); + REMARK([&](){ + return OptimizationRemarkMissed(DEBUG_TYPE, "Multiuse FMul", mulOp) + << "fmul had multiple uses " << ore::NV("fmul", mulOp); + }); + return false; + } + // On 5.0+ we only need to mark the mulOp as contract and the backend will do the work for us. + auto fmf = mulOp->getFastMathFlags(); + if (!fmf.allowContract()) { + LLVM_DEBUG(dbgs() << "Marking mulOp for FMA: " << *maybeMul << "\n"); + REMARK([&](){ + return OptimizationRemark(DEBUG_TYPE, "Marked for FMA", mulOp) + << "marked for fma " << ore::NV("fmul", mulOp); + }); + ++TotalContracted; + fmf.setAllowContract(true); + mulOp->copyFastMathFlags(fmf); + return true; + } + return false; +} + static unsigned getReduceOpcode(Instruction *J, Instruction *operand) JL_NOTSAFEPOINT { switch (J->getOpcode()) { @@ -150,6 +194,28 @@ static void enableUnsafeAlgebraIfReduction(PHINode *Phi, Loop &L, OptimizationRe }); (*K)->setHasAllowReassoc(true); (*K)->setHasAllowContract(true); + switch ((*K)->getOpcode()) { + case Instruction::FAdd: { + if (!(*K)->hasAllowContract()) + continue; + // (*K)->getOperand(0)->print(dbgs()); + // (*K)->getOperand(1)->print(dbgs()); + checkCombine((*K)->getOperand(0), L, ORE); + checkCombine((*K)->getOperand(1), L, ORE); + break; + } + case Instruction::FSub: { + if (!(*K)->hasAllowContract()) + continue; + // (*K)->getOperand(0)->print(dbgs()); + // (*K)->getOperand(1)->print(dbgs()); + checkCombine((*K)->getOperand(0), L, ORE); + checkCombine((*K)->getOperand(1), L, ORE); + break; + } + default: + break; + } if (SE) SE->forgetValue(*K); ++length; diff --git a/src/passes.h b/src/passes.h index 6557a5813063d..4c9cba164d049 100644 --- a/src/passes.h +++ b/src/passes.h @@ -15,10 +15,6 @@ struct DemoteFloat16Pass : PassInfoMixin { static bool isRequired() { return true; } }; -struct CombineMulAddPass : PassInfoMixin { - PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM) JL_NOTSAFEPOINT; -}; - struct LateLowerGCPass : PassInfoMixin { PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM) JL_NOTSAFEPOINT; static bool isRequired() { return true; } diff --git a/src/pipeline.cpp b/src/pipeline.cpp index 236be179e12c9..f300e4d7757b2 100644 --- a/src/pipeline.cpp +++ b/src/pipeline.cpp @@ -568,7 +568,6 @@ static void buildCleanupPipeline(ModulePassManager &MPM, PassBuilder *PB, Optimi if (options.cleanup) { if (O.getSpeedupLevel() >= 2) { FunctionPassManager FPM; - JULIA_PASS(FPM.addPass(CombineMulAddPass())); FPM.addPass(DivRemPairsPass()); MPM.addPass(createModuleToFunctionPassAdaptor(std::move(FPM))); } diff --git a/test/llvmpasses/julia-simdloop.ll b/test/llvmpasses/julia-simdloop.ll index a8d5ea3342b20..9a23a2826da70 100644 --- a/test/llvmpasses/julia-simdloop.ll +++ b/test/llvmpasses/julia-simdloop.ll @@ -3,18 +3,18 @@ ; RUN: opt --load-pass-plugin=libjulia-codegen%shlibext -passes='loop(LowerSIMDLoop)' -S %s | FileCheck %s ; CHECK-LABEL: @simd_test( -define void @simd_test(double *%a, double *%b) { +define void @simd_test(ptr %a, ptr %b) { top: br label %loop loop: %i = phi i64 [0, %top], [%nexti, %loop] - %aptr = getelementptr double, double *%a, i64 %i - %bptr = getelementptr double, double *%b, i64 %i + %aptr = getelementptr double, ptr %a, i64 %i + %bptr = getelementptr double, ptr %b, i64 %i ; CHECK: llvm.mem.parallel_loop_access - %aval = load double, double *%aptr - %bval = load double, double *%aptr + %aval = load double, ptr %aptr + %bval = load double, ptr %aptr %cval = fadd double %aval, %bval - store double %cval, double *%bptr + store double %cval, ptr %bptr %nexti = add i64 %i, 1 %done = icmp sgt i64 %nexti, 500 br i1 %done, label %loopdone, label %loop, !llvm.loop !1 @@ -23,15 +23,15 @@ loopdone: } ; CHECK-LABEL: @simd_test_sub( -define double @simd_test_sub(double *%a) { +define double @simd_test_sub(ptr %a) { top: br label %loop loop: %i = phi i64 [0, %top], [%nexti, %loop] %v = phi double [0.000000e+00, %top], [%nextv, %loop] - %aptr = getelementptr double, double *%a, i64 %i + %aptr = getelementptr double, ptr %a, i64 %i ; CHECK: llvm.mem.parallel_loop_access - %aval = load double, double *%aptr + %aval = load double, ptr %aptr %nextv = fsub double %v, %aval ; CHECK: fsub reassoc contract double %v, %aval %nexti = add i64 %i, 1 @@ -42,14 +42,14 @@ loopdone: } ; CHECK-LABEL: @simd_test_sub2( -define double @simd_test_sub2(double *%a) { +define double @simd_test_sub2(ptr %a) { top: br label %loop loop: %i = phi i64 [0, %top], [%nexti, %loop] %v = phi double [0.000000e+00, %top], [%nextv, %loop] - %aptr = getelementptr double, double *%a, i64 %i - %aval = load double, double *%aptr + %aptr = getelementptr double, ptr %a, i64 %i + %aval = load double, ptr %aptr %nextv = fsub double %v, %aval ; CHECK: fsub reassoc contract double %v, %aval %nexti = add i64 %i, 1 @@ -59,6 +59,26 @@ loopdone: ret double %nextv } +; CHECK-LABEL: @simd_test_sub4( +define double @simd_test_sub4(ptr %a) { +top: + br label %loop +loop: + %i = phi i64 [0, %top], [%nexti, %loop] + %v = phi double [0.000000e+00, %top], [%nextv, %loop] + %aptr = getelementptr double, double *%a, i64 %i + %aval = load double, double *%aptr + %nextv2 = fmul double %aval, %aval + ; CHECK: fmul contract double %aval, %aval + %nextv = fsub double %v, %nextv2 +; CHECK: fsub reassoc contract double %v, %nextv2 + %nexti = add i64 %i, 1 + %done = icmp sgt i64 %nexti, 500 + br i1 %done, label %loopdone, label %loop, !llvm.loop !0 +loopdone: + ret double %nextv +} + ; Tests if we correctly pass through other metadata ; CHECK-LABEL: @disabled( define i32 @disabled(i32* noalias nocapture %a, i32* noalias nocapture readonly %b, i32 %N) { @@ -82,6 +102,31 @@ for.end: ; preds = %for.body ret i32 %1 } +; Check that we don't add contract to non loop things +; CHECK-LABEL: @dont_add_no_loop( +define double @dont_add_no_loop(ptr nocapture noundef nonnull readonly align 8 dereferenceable(72) %"a::Tuple", ptr nocapture noundef nonnull readonly align 8 dereferenceable(24) %"b::Tuple") #0 { +top: + %"a::Tuple[9]_ptr" = getelementptr inbounds i8, ptr %"a::Tuple", i64 64 + %"b::Tuple[3]_ptr" = getelementptr inbounds i8, ptr %"b::Tuple", i64 16 + %"a::Tuple[6]_ptr" = getelementptr inbounds i8, ptr %"a::Tuple", i64 40 + %"b::Tuple[2]_ptr" = getelementptr inbounds i8, ptr %"b::Tuple", i64 8 + %"a::Tuple[3]_ptr" = getelementptr inbounds i8, ptr %"a::Tuple", i64 16 + %"a::Tuple[3]_ptr.unbox" = load double, ptr %"a::Tuple[3]_ptr", align 8 + %"b::Tuple.unbox" = load double, ptr %"b::Tuple", align 8 + %0 = fmul double %"a::Tuple[3]_ptr.unbox", %"b::Tuple.unbox" +; CHECK: fmul double % + %"a::Tuple[6]_ptr.unbox" = load double, ptr %"a::Tuple[6]_ptr", align 8 + %"b::Tuple[2]_ptr.unbox" = load double, ptr %"b::Tuple[2]_ptr", align 8 + %1 = fmul contract double %"a::Tuple[6]_ptr.unbox", %"b::Tuple[2]_ptr.unbox" + %2 = fadd contract double %0, %1 + %"a::Tuple[9]_ptr.unbox" = load double, ptr %"a::Tuple[9]_ptr", align 8 + %"b::Tuple[3]_ptr.unbox" = load double, ptr %"b::Tuple[3]_ptr", align 8 + %3 = fmul contract double %"a::Tuple[9]_ptr.unbox", %"b::Tuple[3]_ptr.unbox" + %4 = fadd contract double %2, %3 + ret double %4 +} + + !0 = distinct !{!0, !"julia.simdloop"} !1 = distinct !{!1, !"julia.simdloop", !"julia.ivdep"} !2 = distinct !{!2, !"julia.simdloop", !"julia.ivdep", !3} diff --git a/test/llvmpasses/muladd.ll b/test/llvmpasses/muladd.ll deleted file mode 100644 index 079582305ee72..0000000000000 --- a/test/llvmpasses/muladd.ll +++ /dev/null @@ -1,62 +0,0 @@ -; This file is a part of Julia. License is MIT: https://julialang.org/license - -; RUN: opt --load-pass-plugin=libjulia-codegen%shlibext -passes='CombineMulAdd' -S %s | FileCheck %s - - -; CHECK-LABEL: @fast_muladd1 -define double @fast_muladd1(double %a, double %b, double %c) { -top: -; CHECK: {{contract|fmuladd}} - %v1 = fmul double %a, %b - %v2 = fadd fast double %v1, %c -; CHECK: ret double - ret double %v2 -} - -; CHECK-LABEL: @fast_mulsub1 -define double @fast_mulsub1(double %a, double %b, double %c) { -top: -; CHECK: {{contract|fmuladd}} - %v1 = fmul double %a, %b - %v2 = fsub fast double %v1, %c -; CHECK: ret double - ret double %v2 -} - -; CHECK-LABEL: @fast_mulsub_vec1 -define <2 x double> @fast_mulsub_vec1(<2 x double> %a, <2 x double> %b, <2 x double> %c) { -top: -; CHECK: {{contract|fmuladd}} - %v1 = fmul <2 x double> %a, %b - %v2 = fsub fast <2 x double> %c, %v1 -; CHECK: ret <2 x double> - ret <2 x double> %v2 -} - -; COM: Should not mark fmul as contract when multiple uses of fmul exist -; CHECK-LABEL: @slow_muladd1 -define double @slow_muladd1(double %a, double %b, double %c) { -top: -; CHECK: %v1 = fmul double %a, %b - %v1 = fmul double %a, %b -; CHECK: %v2 = fadd fast double %v1, %c - %v2 = fadd fast double %v1, %c -; CHECK: %v3 = fadd fast double %v1, %b - %v3 = fadd fast double %v1, %b -; CHECK: %v4 = fadd fast double %v3, %v2 - %v4 = fadd fast double %v3, %v2 -; CHECK: ret double %v4 - ret double %v4 -} - -; COM: Should not mark fadd->fadd fast as contract -; CHECK-LABEL: @slow_addadd1 -define double @slow_addadd1(double %a, double %b, double %c) { -top: -; CHECK: %v1 = fadd double %a, %b - %v1 = fadd double %a, %b -; CHECK: %v2 = fadd fast double %v1, %c - %v2 = fadd fast double %v1, %c -; CHECK: ret double %v2 - ret double %v2 -} diff --git a/test/llvmpasses/parsing.ll b/test/llvmpasses/parsing.ll index e0a726176b225..b8aec5ee2fa71 100644 --- a/test/llvmpasses/parsing.ll +++ b/test/llvmpasses/parsing.ll @@ -1,6 +1,6 @@ ; COM: NewPM-only test, tests for ability to parse Julia passes -; RUN: opt --load-pass-plugin=libjulia-codegen%shlibext -passes='module(CPUFeatures,RemoveNI,JuliaMultiVersioning,RemoveJuliaAddrspaces,LowerPTLSPass,function(DemoteFloat16,CombineMulAdd,LateLowerGCFrame,FinalLowerGC,AllocOpt,PropagateJuliaAddrspaces,LowerExcHandlers,GCInvariantVerifier,loop(LowerSIMDLoop,JuliaLICM),GCInvariantVerifier,GCInvariantVerifier),LowerPTLSPass,LowerPTLSPass,JuliaMultiVersioning,JuliaMultiVersioning)' -S %s -o /dev/null +; RUN: opt --load-pass-plugin=libjulia-codegen%shlibext -passes='module(CPUFeatures,RemoveNI,JuliaMultiVersioning,RemoveJuliaAddrspaces,LowerPTLSPass,function(DemoteFloat16,LateLowerGCFrame,FinalLowerGC,AllocOpt,PropagateJuliaAddrspaces,LowerExcHandlers,GCInvariantVerifier,loop(LowerSIMDLoop,JuliaLICM),GCInvariantVerifier,GCInvariantVerifier),LowerPTLSPass,LowerPTLSPass,JuliaMultiVersioning,JuliaMultiVersioning)' -S %s -o /dev/null ; RUN: opt --load-pass-plugin=libjulia-codegen%shlibext -passes="julia" -S %s -o /dev/null ; RUN: opt --load-pass-plugin=libjulia-codegen%shlibext -passes="julia" -S %s -o /dev/null ; RUN: opt --load-pass-plugin=libjulia-codegen%shlibext -passes="julia" -S %s -o /dev/null From a7521ea86952168c2f342b4c2275340ea7726a94 Mon Sep 17 00:00:00 2001 From: Cody Tapscott <84105208+topolarity@users.noreply.github.com> Date: Tue, 15 Oct 2024 20:22:33 -0400 Subject: [PATCH 213/537] Fix implicit `convert(String, ...)` in several places (#56174) This removes several `convert(String, ...)` from this code, which really shouldn't be something we invalidate on in the first place (see https://github.com/JuliaLang/julia/issues/56173) but this is still an improvement in code quality so let's take it. --- base/precompilation.jl | 21 +++++++++++++-------- base/regex.jl | 2 +- stdlib/REPL/src/LineEdit.jl | 2 +- 3 files changed, 15 insertions(+), 10 deletions(-) diff --git a/base/precompilation.jl b/base/precompilation.jl index 4b7da84a17d55..a39563178632f 100644 --- a/base/precompilation.jl +++ b/base/precompilation.jl @@ -43,12 +43,12 @@ function ExplicitEnv(envpath::String=Base.active_project()) # Collect all direct dependencies of the project for key in ["deps", "weakdeps", "extras"] - for (name, _uuid::String) in get(Dict{String, Any}, project_d, key)::Dict{String, Any} + for (name, _uuid) in get(Dict{String, Any}, project_d, key)::Dict{String, Any} v = key == "deps" ? project_deps : key == "weakdeps" ? project_weakdeps : key == "extras" ? project_extras : error() - uuid = UUID(_uuid) + uuid = UUID(_uuid::String) v[name] = uuid names[UUID(uuid)] = name project_uuid_to_name[name] = UUID(uuid) @@ -75,9 +75,11 @@ function ExplicitEnv(envpath::String=Base.active_project()) project_extensions = Dict{String, Vector{UUID}}() # Collect all extensions of the project - for (name, triggers::Union{String, Vector{String}}) in get(Dict{String, Any}, project_d, "extensions")::Dict{String, Any} + for (name, triggers) in get(Dict{String, Any}, project_d, "extensions")::Dict{String, Any} if triggers isa String triggers = [triggers] + else + triggers = triggers::Vector{String} end uuids = UUID[] for trigger in triggers @@ -107,8 +109,9 @@ function ExplicitEnv(envpath::String=Base.active_project()) sizehint!(name_to_uuid, length(manifest_d)) sizehint!(lookup_strategy, length(manifest_d)) - for (name, pkg_infos::Vector{Any}) in get_deps(manifest_d) - for pkg_info::Dict{String, Any} in pkg_infos + for (name, pkg_infos) in get_deps(manifest_d) + for pkg_info in pkg_infos::Vector{Any} + pkg_info = pkg_info::Dict{String, Any} m_uuid = UUID(pkg_info["uuid"]::String) # If we have multiple packages with the same name we will overwrite things here @@ -129,8 +132,8 @@ function ExplicitEnv(envpath::String=Base.active_project()) # Expanded format: else uuids = UUID[] - for (name_dep, _dep_uuid::String) in deps_pkg - dep_uuid = UUID(_dep_uuid) + for (name_dep, _dep_uuid) in deps_pkg + dep_uuid = UUID(_dep_uuid::String) push!(uuids, dep_uuid) names[dep_uuid] = name_dep end @@ -140,9 +143,11 @@ function ExplicitEnv(envpath::String=Base.active_project()) # Extensions deps_pkg = get(Dict{String, Any}, pkg_info, "extensions")::Dict{String, Any} - for (ext, triggers::Union{String, Vector{String}}) in deps_pkg + for (ext, triggers) in deps_pkg if triggers isa String triggers = [triggers] + else + triggers = triggers::Vector{String} end deps_pkg[ext] = triggers end diff --git a/base/regex.jl b/base/regex.jl index 38eb4cc512552..9444c9a9fb63e 100644 --- a/base/regex.jl +++ b/base/regex.jl @@ -28,7 +28,7 @@ mutable struct Regex <: AbstractPattern function Regex(pattern::AbstractString, compile_options::Integer, match_options::Integer) - pattern = String(pattern) + pattern = String(pattern)::String compile_options = UInt32(compile_options) match_options = UInt32(match_options) if (compile_options & ~PCRE.COMPILE_MASK) != 0 diff --git a/stdlib/REPL/src/LineEdit.jl b/stdlib/REPL/src/LineEdit.jl index c92dca8c8e015..e881a65ca6b1c 100644 --- a/stdlib/REPL/src/LineEdit.jl +++ b/stdlib/REPL/src/LineEdit.jl @@ -166,7 +166,7 @@ region_active(s::PromptState) = s.region_active region_active(s::ModeState) = :off -input_string(s::PromptState) = String(take!(copy(s.input_buffer))) +input_string(s::PromptState) = String(take!(copy(s.input_buffer)))::String input_string_newlines(s::PromptState) = count(c->(c == '\n'), input_string(s)) function input_string_newlines_aftercursor(s::PromptState) From a9acdae61b1fbcd997d991ca6c157684cbde57a7 Mon Sep 17 00:00:00 2001 From: Timothy Date: Wed, 16 Oct 2024 08:23:21 +0800 Subject: [PATCH 214/537] Change annotations to use a NamedTuple (#55741) Due to popular demand, the type of annotations is to be changed from a `Tuple{UnitRange{Int}, Pair{Symbol, Any}}` to a `NamedTuple{(:region, :label, :value), Tuple{UnitRange{Int}, Symbol, Any}}`. This requires the expected code churn to `strings/annotated.jl`, and some changes to the StyledStrings and JuliaSyntaxHighlighting libraries. Closes #55249 and closes #55245. --- base/strings/annotated.jl | 226 ++++++++++-------- base/strings/io.jl | 4 +- .../md5 | 1 + .../sha512 | 1 + .../md5 | 1 - .../sha512 | 1 - .../md5 | 1 + .../sha512 | 1 + .../md5 | 1 - .../sha512 | 1 - doc/src/manual/strings.md | 4 +- stdlib/JuliaSyntaxHighlighting.version | 2 +- .../src/render/terminal/formatting.jl | 4 +- stdlib/StyledStrings.version | 2 +- test/strings/annotated.jl | 180 +++++++------- 15 files changed, 226 insertions(+), 204 deletions(-) create mode 100644 deps/checksums/JuliaSyntaxHighlighting-19bd57b89c648592155156049addf67e0638eab1.tar.gz/md5 create mode 100644 deps/checksums/JuliaSyntaxHighlighting-19bd57b89c648592155156049addf67e0638eab1.tar.gz/sha512 delete mode 100644 deps/checksums/JuliaSyntaxHighlighting-b89dd99db56700c47434df6106b6c6afd1c9ed01.tar.gz/md5 delete mode 100644 deps/checksums/JuliaSyntaxHighlighting-b89dd99db56700c47434df6106b6c6afd1c9ed01.tar.gz/sha512 create mode 100644 deps/checksums/StyledStrings-056e843b2d428bb9735b03af0cff97e738ac7e14.tar.gz/md5 create mode 100644 deps/checksums/StyledStrings-056e843b2d428bb9735b03af0cff97e738ac7e14.tar.gz/sha512 delete mode 100644 deps/checksums/StyledStrings-f6035eb97b516862b16e36cab2ecc6ea8adc3d7c.tar.gz/md5 delete mode 100644 deps/checksums/StyledStrings-f6035eb97b516862b16e36cab2ecc6ea8adc3d7c.tar.gz/sha512 diff --git a/base/strings/annotated.jl b/base/strings/annotated.jl index 9a0b4b2825436..c5c330fe0dfcd 100644 --- a/base/strings/annotated.jl +++ b/base/strings/annotated.jl @@ -1,5 +1,8 @@ # This file is a part of Julia. License is MIT: https://julialang.org/license +const Annotation = NamedTuple{(:label, :value), Tuple{Symbol, Any}} +const RegionAnnotation = NamedTuple{(:region, :label, :value), Tuple{UnitRange{Int}, Symbol, Any}} + """ AnnotatedString{S <: AbstractString} <: AbstractString @@ -20,7 +23,8 @@ annotated with labeled values. The above diagram represents a `AnnotatedString` where three ranges have been annotated (labeled `A`, `B`, and `C`). Each annotation holds a label (`Symbol`) -and a value (`Any`), paired together as a `Pair{Symbol, <:Any}`. +and a value (`Any`). These three pieces of information are held as a +`$RegionAnnotation`. Labels do not need to be unique, the same region can hold multiple annotations with the same label. @@ -43,7 +47,7 @@ See also [`AnnotatedChar`](@ref), [`annotatedstring`](@ref), ```julia AnnotatedString(s::S<:AbstractString) -> AnnotatedString{S} -AnnotatedString(s::S<:AbstractString, annotations::Vector{Tuple{UnitRange{Int}, Pair{Symbol, <:Any}}}) +AnnotatedString(s::S<:AbstractString, annotations::Vector{$RegionAnnotation}) ``` A AnnotatedString can also be created with [`annotatedstring`](@ref), which acts much @@ -59,7 +63,7 @@ julia> AnnotatedString("this is an example annotated string", """ struct AnnotatedString{S <: AbstractString} <: AbstractString string::S - annotations::Vector{Tuple{UnitRange{Int}, Pair{Symbol, Any}}} + annotations::Vector{RegionAnnotation} end """ @@ -68,8 +72,8 @@ end A Char with annotations. More specifically, this is a simple wrapper around any other -[`AbstractChar`](@ref), which holds a list of arbitrary labeled annotations -(`Pair{Symbol, <:Any}`) with the wrapped character. +[`AbstractChar`](@ref), which holds a list of arbitrary labelled annotations +(`$Annotation`) with the wrapped character. See also: [`AnnotatedString`](@ref), [`annotatedstring`](@ref), `annotations`, and `annotate!`. @@ -78,7 +82,7 @@ and `annotate!`. ```julia AnnotatedChar(s::S) -> AnnotatedChar{S} -AnnotatedChar(s::S, annotations::Vector{Pair{Symbol, <:Any}}) +AnnotatedChar(s::S, annotations::Vector{$Annotation}) ``` # Examples @@ -90,41 +94,48 @@ julia> AnnotatedChar('j', :label => 1) """ struct AnnotatedChar{C <: AbstractChar} <: AbstractChar char::C - annotations::Vector{Pair{Symbol, Any}} + annotations::Vector{Annotation} end ## Constructors ## # When called with overly-specialised arguments -AnnotatedString(s::AbstractString, annots::Vector{<:Tuple{UnitRange{Int}, <:Pair{Symbol, <:Any}}}) = - AnnotatedString(s, Vector{Tuple{UnitRange{Int}, Pair{Symbol, Any}}}(annots)) +AnnotatedString(s::AbstractString, annots::Vector) = + AnnotatedString(s, Vector{RegionAnnotation}(annots)) + +AnnotatedString(s::AbstractString, annots) = + AnnotatedString(s, collect(RegionAnnotation, annots)) -AnnotatedChar(c::AbstractChar, annots::Vector{<:Pair{Symbol, <:Any}}) = - AnnotatedChar(c, Vector{Pair{Symbol, Any}}(annots)) +AnnotatedChar(c::AbstractChar, annots::Vector) = + AnnotatedChar(c, Vector{Annotation}(annots)) + +AnnotatedChar(c::AbstractChar, annots) = + AnnotatedChar(c, collect(Annotation, annots)) # Constructors to avoid recursive wrapping -AnnotatedString(s::AnnotatedString, annots::Vector{Tuple{UnitRange{Int}, Pair{Symbol, Any}}}) = +AnnotatedString(s::AnnotatedString, annots::Vector{RegionAnnotation}) = AnnotatedString(s.string, vcat(s.annotations, annots)) -AnnotatedChar(c::AnnotatedChar, annots::Vector{Pair{Symbol, Any}}) = - AnnotatedChar(c.char, vcat(c.annotations, annots)) +AnnotatedChar(c::AnnotatedChar, annots::Vector{Annotation}) = + AnnotatedChar(c.char, vcat(c.annotations, Vector{Annotation}(annots))) -String(s::AnnotatedString{String}) = s.string # To avoid pointless overhead +# To avoid pointless overhead +String(s::AnnotatedString{String}) = s.string ## Conversion/promotion ## convert(::Type{AnnotatedString}, s::AnnotatedString) = s convert(::Type{AnnotatedString{S}}, s::S) where {S <: AbstractString} = - AnnotatedString(s, Vector{Tuple{UnitRange{Int}, Pair{Symbol, Any}}}()) + AnnotatedString(s, Vector{RegionAnnotation}()) convert(::Type{AnnotatedString}, s::S) where {S <: AbstractString} = convert(AnnotatedString{S}, s) AnnotatedString(s::S) where {S <: AbstractString} = convert(AnnotatedString{S}, s) convert(::Type{AnnotatedChar}, c::AnnotatedChar) = c convert(::Type{AnnotatedChar{C}}, c::C) where { C <: AbstractChar } = - AnnotatedChar{C}(c, Vector{Pair{Symbol, Any}}()) + AnnotatedChar{C}(c, Vector{Annotation}()) convert(::Type{AnnotatedChar}, c::C) where { C <: AbstractChar } = convert(AnnotatedChar{C}, c) @@ -150,7 +161,7 @@ lastindex(s::AnnotatedString) = lastindex(s.string) function getindex(s::AnnotatedString, i::Integer) @boundscheck checkbounds(s, i) @inbounds if isvalid(s, i) - AnnotatedChar(s.string[i], Pair{Symbol, Any}[last(x) for x in annotations(s, i)]) + AnnotatedChar(s.string[i], Annotation[(; label, value) for (; label, value) in annotations(s, i)]) else string_index_err(s, i) end @@ -164,7 +175,8 @@ function show(io::IO, s::A) where {A <: AnnotatedString} print(io, '(') show(io, s.string) print(io, ", ") - show(IOContext(io, :typeinfo => typeof(annotations(s))), annotations(s)) + tupanns = Vector{Tuple{UnitRange{Int}, Symbol, Any}}(map(values, s.annotations)) + show(IOContext(io, :typeinfo => typeof(tupanns)), tupanns) print(io, ')') end @@ -233,27 +245,27 @@ function annotatedstring(xs...) size = mapreduce(_str_sizehint, +, xs) buf = IOBuffer(sizehint=size) s = IOContext(buf, :color => true) - annotations = Vector{Tuple{UnitRange{Int}, Pair{Symbol, Any}}}() + annotations = Vector{RegionAnnotation}() for x in xs size = filesize(s.io) if x isa AnnotatedString - for (region, annot) in x.annotations - push!(annotations, (size .+ (region), annot)) + for annot in x.annotations + push!(annotations, setindex(annot, annot.region .+ size, :region)) end print(s, x.string) elseif x isa SubString{<:AnnotatedString} - for (region, annot) in x.string.annotations - start, stop = first(region), last(region) + for annot in x.string.annotations + start, stop = first(annot.region), last(annot.region) if start <= x.offset + x.ncodeunits && stop > x.offset rstart = size + max(0, start - x.offset - 1) + 1 rstop = size + min(stop, x.offset + x.ncodeunits) - x.offset - push!(annotations, (rstart:rstop, annot)) + push!(annotations, setindex(annot, rstart:rstop, :region)) end end print(s, SubString(x.string.string, x.offset, x.ncodeunits, Val(:noshift))) elseif x isa AnnotatedChar for annot in x.annotations - push!(annotations, (1+size:1+size, annot)) + push!(annotations, (region=1+size:1+size, annot...)) end print(s, x.char) else @@ -266,7 +278,7 @@ end annotatedstring(s::AnnotatedString) = s annotatedstring(c::AnnotatedChar) = - AnnotatedString(string(c.char), [(1:ncodeunits(c), annot) for annot in c.annotations]) + AnnotatedString(string(c.char), [(region=1:ncodeunits(c), annot...) for annot in c.annotations]) AnnotatedString(s::SubString{<:AnnotatedString}) = annotatedstring(s) @@ -274,18 +286,19 @@ function repeat(str::AnnotatedString, r::Integer) r == 0 && return one(AnnotatedString) r == 1 && return str unannot = repeat(str.string, r) - annotations = Vector{Tuple{UnitRange{Int}, Pair{Symbol, Any}}}() + annotations = Vector{RegionAnnotation}() len = ncodeunits(str) fullregion = firstindex(str):lastindex(str) - if allequal(first, str.annotations) && first(first(str.annotations)) == fullregion + if isempty(str.annotations) + elseif allequal(a -> a.region, str.annotations) && first(str.annotations).region == fullregion newfullregion = firstindex(unannot):lastindex(unannot) - for (_, annot) in str.annotations - push!(annotations, (newfullregion, annot)) + for annot in str.annotations + push!(annotations, setindex(annot, newfullregion, :region)) end else for offset in 0:len:(r-1)*len - for (region, annot) in str.annotations - push!(annotations, (region .+ offset, annot)) + for annot in str.annotations + push!(annotations, setindex(annot, annot.region .+ offset, :region)) end end end @@ -298,16 +311,18 @@ repeat(str::SubString{<:AnnotatedString}, r::Integer) = function repeat(c::AnnotatedChar, r::Integer) str = repeat(c.char, r) fullregion = firstindex(str):lastindex(str) - AnnotatedString(str, [(fullregion, annot) for annot in c.annotations]) + AnnotatedString(str, [(region=fullregion, annot...) for annot in c.annotations]) end function reverse(s::AnnotatedString) lastind = lastindex(s) - AnnotatedString(reverse(s.string), - [(UnitRange(1 + lastind - last(region), - 1 + lastind - first(region)), - annot) - for (region, annot) in s.annotations]) + AnnotatedString( + reverse(s.string), + [setindex(annot, + UnitRange(1 + lastind - last(annot.region), + 1 + lastind - first(annot.region)), + :region) + for annot in s.annotations]) end # TODO optimise? @@ -317,18 +332,17 @@ reverse(s::SubString{<:AnnotatedString}) = reverse(AnnotatedString(s)) ## End AbstractString interface ## -function _annotate!(annlist::Vector{Tuple{UnitRange{Int}, Pair{Symbol, Any}}}, range::UnitRange{Int}, @nospecialize(labelval::Pair{Symbol, <:Any})) - label, val = labelval - if val === nothing - deleteat!(annlist, findall(ann -> ann[1] == range && first(ann[2]) === label, annlist)) +function _annotate!(annlist::Vector{RegionAnnotation}, region::UnitRange{Int}, label::Symbol, @nospecialize(value::Any)) + if value === nothing + deleteat!(annlist, findall(ann -> ann.region == region && ann.label === label, annlist)) else - push!(annlist, (range, Pair{Symbol, Any}(label, val))) + push!(annlist, RegionAnnotation((; region, label, value))) end end """ - annotate!(str::AnnotatedString, [range::UnitRange{Int}], label::Symbol => value) - annotate!(str::SubString{AnnotatedString}, [range::UnitRange{Int}], label::Symbol => value) + annotate!(str::AnnotatedString, [range::UnitRange{Int}], label::Symbol, value) + annotate!(str::SubString{AnnotatedString}, [range::UnitRange{Int}], label::Symbol, value) Annotate a `range` of `str` (or the entire string) with a labeled value (`label` => `value`). To remove existing `label` annotations, use a value of `nothing`. @@ -336,30 +350,30 @@ To remove existing `label` annotations, use a value of `nothing`. The order in which annotations are applied to `str` is semantically meaningful, as described in [`AnnotatedString`](@ref). """ -annotate!(s::AnnotatedString, range::UnitRange{Int}, @nospecialize(labelval::Pair{Symbol, <:Any})) = - (_annotate!(s.annotations, range, labelval); s) +annotate!(s::AnnotatedString, range::UnitRange{Int}, label::Symbol, @nospecialize(val::Any)) = + (_annotate!(s.annotations, range, label, val); s) -annotate!(ss::AnnotatedString, @nospecialize(labelval::Pair{Symbol, <:Any})) = - annotate!(ss, firstindex(ss):lastindex(ss), labelval) +annotate!(ss::AnnotatedString, label::Symbol, @nospecialize(val::Any)) = + annotate!(ss, firstindex(ss):lastindex(ss), label, val) -annotate!(s::SubString{<:AnnotatedString}, range::UnitRange{Int}, @nospecialize(labelval::Pair{Symbol, <:Any})) = - (annotate!(s.string, s.offset .+ (range), labelval); s) +annotate!(s::SubString{<:AnnotatedString}, range::UnitRange{Int}, label::Symbol, @nospecialize(val::Any)) = + (annotate!(s.string, s.offset .+ (range), label, val); s) -annotate!(s::SubString{<:AnnotatedString}, @nospecialize(labelval::Pair{Symbol, <:Any})) = - (annotate!(s.string, s.offset .+ (1:s.ncodeunits), labelval); s) +annotate!(s::SubString{<:AnnotatedString}, label::Symbol, @nospecialize(val::Any)) = + (annotate!(s.string, s.offset .+ (1:s.ncodeunits), label, val); s) """ - annotate!(char::AnnotatedChar, label::Symbol => value) + annotate!(char::AnnotatedChar, label::Symbol, value::Any) Annotate `char` with the pair `label => value`. """ -annotate!(c::AnnotatedChar, @nospecialize(labelval::Pair{Symbol, <:Any})) = - (push!(c.annotations, labelval); c) +annotate!(c::AnnotatedChar, label::Symbol, @nospecialize(val::Any)) = + (push!(c.annotations, Annotation((; label, val))); c) """ annotations(str::Union{AnnotatedString, SubString{AnnotatedString}}, [position::Union{Integer, UnitRange}]) -> - Vector{Tuple{UnitRange{Int}, Pair{Symbol, Any}}} + Vector{$RegionAnnotation} Get all annotations that apply to `str`. Should `position` be provided, only annotations that overlap with `position` will be returned. @@ -375,15 +389,16 @@ See also: [`annotate!`](@ref). annotations(s::AnnotatedString) = s.annotations function annotations(s::SubString{<:AnnotatedString}) - map(((region, annot),) -> (first(region)-s.offset:last(region)-s.offset, annot), - annotations(s.string, s.offset+1:s.offset+s.ncodeunits)) + RegionAnnotation[ + setindex(ann, first(ann.region)-s.offset:last(ann.region)-s.offset, :region) + for ann in annotations(s.string, s.offset+1:s.offset+s.ncodeunits)] end function annotations(s::AnnotatedString, pos::UnitRange{<:Integer}) # TODO optimise - Tuple{UnitRange{Int64}, Pair{Symbol, Any}}[ - (max(first(pos), first(region)):min(last(pos), last(region)), annot) - for (region, annot) in s.annotations if !isempty(intersect(pos, region))] + RegionAnnotation[ + setindex(ann, max(first(pos), first(ann.region)):min(last(pos), last(ann.region)), :region) + for ann in s.annotations if !isempty(intersect(pos, ann.region))] end annotations(s::AnnotatedString, pos::Integer) = annotations(s, pos:pos) @@ -395,7 +410,7 @@ annotations(s::SubString{<:AnnotatedString}, pos::UnitRange{<:Integer}) = annotations(s.string, first(pos)+s.offset:last(pos)+s.offset) """ - annotations(chr::AnnotatedChar) -> Vector{Pair{Symbol, Any}} + annotations(chr::AnnotatedChar) -> Vector{$Annotation} Get all annotations of `chr`, in the form of a vector of annotation pairs. """ @@ -420,7 +435,7 @@ string type of `str`). """ function annotated_chartransform(f::Function, str::AnnotatedString, state=nothing) outstr = IOBuffer() - annots = Tuple{UnitRange{Int}, Pair{Symbol, Any}}[] + annots = RegionAnnotation[] bytepos = firstindex(str) - 1 offsets = [bytepos => 0] for c in str.string @@ -437,11 +452,10 @@ function annotated_chartransform(f::Function, str::AnnotatedString, state=nothin end end for annot in str.annotations - region, value = annot - start, stop = first(region), last(region) + start, stop = first(annot.region), last(annot.region) start_offset = last(offsets[findlast(<=(start) ∘ first, offsets)::Int]) stop_offset = last(offsets[findlast(<=(stop) ∘ first, offsets)::Int]) - push!(annots, ((start + start_offset):(stop + stop_offset), value)) + push!(annots, setindex(annot, (start + start_offset):(stop + stop_offset), :region)) end AnnotatedString(String(take!(outstr)), annots) end @@ -450,10 +464,10 @@ end struct AnnotatedIOBuffer <: AbstractPipe io::IOBuffer - annotations::Vector{Tuple{UnitRange{Int}, Pair{Symbol, Any}}} + annotations::Vector{RegionAnnotation} end -AnnotatedIOBuffer(io::IOBuffer) = AnnotatedIOBuffer(io, Vector{Tuple{UnitRange{Int}, Pair{Symbol, Any}}}()) +AnnotatedIOBuffer(io::IOBuffer) = AnnotatedIOBuffer(io, Vector{RegionAnnotation}()) AnnotatedIOBuffer() = AnnotatedIOBuffer(IOBuffer()) function show(io::IO, aio::AnnotatedIOBuffer) @@ -475,8 +489,8 @@ copy(io::AnnotatedIOBuffer) = AnnotatedIOBuffer(copy(io.io), copy(io.annotations annotations(io::AnnotatedIOBuffer) = io.annotations -annotate!(io::AnnotatedIOBuffer, range::UnitRange{Int}, @nospecialize(labelval::Pair{Symbol, <:Any})) = - (_annotate!(io.annotations, range, labelval); io) +annotate!(io::AnnotatedIOBuffer, range::UnitRange{Int}, label::Symbol, @nospecialize(val::Any)) = + (_annotate!(io.annotations, range, label, val); io) function write(io::AnnotatedIOBuffer, astr::Union{AnnotatedString, SubString{<:AnnotatedString}}) astr = AnnotatedString(astr) @@ -487,7 +501,7 @@ function write(io::AnnotatedIOBuffer, astr::Union{AnnotatedString, SubString{<:A end write(io::AnnotatedIOBuffer, c::AnnotatedChar) = - write(io, AnnotatedString(string(c), map(a -> (1:ncodeunits(c), a), annotations(c)))) + write(io, AnnotatedString(string(c), [(region=1:ncodeunits(c), a...) for a in c.annotations])) write(io::AnnotatedIOBuffer, x::AbstractString) = write(io.io, x) write(io::AnnotatedIOBuffer, s::Union{SubString{String}, String}) = write(io.io, s) write(io::AnnotatedIOBuffer, b::UInt8) = write(io.io, b) @@ -498,8 +512,8 @@ function write(dest::AnnotatedIOBuffer, src::AnnotatedIOBuffer) srcpos = position(src) nb = write(dest.io, src.io) isappending || _clear_annotations_in_region!(dest.annotations, destpos:destpos+nb) - srcannots = [(max(1 + srcpos, first(region)):last(region), annot) - for (region, annot) in src.annotations if first(region) >= srcpos] + srcannots = [setindex(annot, max(1 + srcpos, first(annot.region)):last(annot.region), :region) + for annot in src.annotations if first(annot.region) >= srcpos] _insert_annotations!(dest, srcannots, destpos - srcpos) nb end @@ -523,7 +537,7 @@ function write(io::AbstractPipe, c::AnnotatedChar) end """ - _clear_annotations_in_region!(annotations::Vector{Tuple{UnitRange{Int}, Pair{Symbol, Any}}}, span::UnitRange{Int}) + _clear_annotations_in_region!(annotations::Vector{$RegionAnnotation}, span::UnitRange{Int}) Erase the presence of `annotations` within a certain `span`. @@ -531,21 +545,27 @@ This operates by removing all elements of `annotations` that are entirely contained in `span`, truncating ranges that partially overlap, and splitting annotations that subsume `span` to just exist either side of `span`. """ -function _clear_annotations_in_region!(annotations::Vector{Tuple{UnitRange{Int}, Pair{Symbol, Any}}}, span::UnitRange{Int}) +function _clear_annotations_in_region!(annotations::Vector{RegionAnnotation}, span::UnitRange{Int}) # Clear out any overlapping pre-existing annotations. - filter!(((region, _),) -> first(region) < first(span) || last(region) > last(span), annotations) - extras = Tuple{Int, Tuple{UnitRange{Int}, Pair{Symbol, Any}}}[] + filter!(ann -> first(ann.region) < first(span) || last(ann.region) > last(span), annotations) + extras = Tuple{Int, RegionAnnotation}[] for i in eachindex(annotations) - region, annot = annotations[i] + annot = annotations[i] + region = annot.region # Test for partial overlap if first(region) <= first(span) <= last(region) || first(region) <= last(span) <= last(region) - annotations[i] = (if first(region) < first(span) - first(region):first(span)-1 - else last(span)+1:last(region) end, annot) + annotations[i] = + setindex(annot, + if first(region) < first(span) + first(region):first(span)-1 + else + last(span)+1:last(region) + end, + :region) # If `span` fits exactly within `region`, then we've only copied over # the beginning overhang, but also need to conserve the end overhang. if first(region) < first(span) && last(span) < last(region) - push!(extras, (i, (last(span)+1:last(region), annot))) + push!(extras, (i, setindex(annot, last(span)+1:last(region), :region))) end end end @@ -557,7 +577,7 @@ function _clear_annotations_in_region!(annotations::Vector{Tuple{UnitRange{Int}, end """ - _insert_annotations!(io::AnnotatedIOBuffer, annotations::Vector{Tuple{UnitRange{Int}, Pair{Symbol, Any}}}, offset::Int = position(io)) + _insert_annotations!(io::AnnotatedIOBuffer, annotations::Vector{$RegionAnnotation}, offset::Int = position(io)) Register new `annotations` in `io`, applying an `offset` to their regions. @@ -573,19 +593,19 @@ This is implemented so that one can say write an `AnnotatedString` to an `AnnotatedIOBuffer` one character at a time without needlessly producing a new annotation for each character. """ -function _insert_annotations!(io::AnnotatedIOBuffer, annotations::Vector{Tuple{UnitRange{Int}, Pair{Symbol, Any}}}, offset::Int = position(io)) +function _insert_annotations!(io::AnnotatedIOBuffer, annotations::Vector{RegionAnnotation}, offset::Int = position(io)) run = 0 - if !isempty(io.annotations) && last(first(last(io.annotations))) == offset + if !isempty(io.annotations) && last(last(io.annotations).region) == offset for i in reverse(axes(annotations, 1)) annot = annotations[i] - first(first(annot)) == 1 || continue + first(annot.region) == 1 || continue i <= length(io.annotations) || continue - if last(annot) == last(last(io.annotations)) + if annot.label == last(io.annotations).label && annot.value == last(io.annotations).value valid_run = true for runlen in 1:i - new_range, new_annot = annotations[begin+runlen-1] - old_range, old_annot = io.annotations[end-i+runlen] - if last(old_range) != offset || first(new_range) != 1 || old_annot != new_annot + new = annotations[begin+runlen-1] + old = io.annotations[end-i+runlen] + if last(old.region) != offset || first(new.region) != 1 || old.label != new.label || old.value != new.value valid_run = false break end @@ -599,14 +619,14 @@ function _insert_annotations!(io::AnnotatedIOBuffer, annotations::Vector{Tuple{U end for runindex in 0:run-1 old_index = lastindex(io.annotations) - run + 1 + runindex - old_region, annot = io.annotations[old_index] - new_region, _ = annotations[begin+runindex] - io.annotations[old_index] = (first(old_region):last(new_region)+offset, annot) + old = io.annotations[old_index] + new = annotations[begin+runindex] + io.annotations[old_index] = setindex(old, first(old.region):last(new.region)+offset, :region) end for index in run+1:lastindex(annotations) - region, annot = annotations[index] - start, stop = first(region), last(region) - push!(io.annotations, (start+offset:stop+offset, annot)) + annot = annotations[index] + start, stop = first(annot.region), last(annot.region) + push!(io.annotations, setindex(annotations[index], start+offset:stop+offset, :region)) end end @@ -614,8 +634,8 @@ function read(io::AnnotatedIOBuffer, ::Type{AnnotatedString{T}}) where {T <: Abs if (start = position(io)) == 0 AnnotatedString(read(io.io, T), copy(io.annotations)) else - annots = [(UnitRange{Int}(max(1, first(region) - start), last(region)-start), val) - for (region, val) in io.annotations if last(region) > start] + annots = [setindex(annot, UnitRange{Int}(max(1, first(annot.region) - start), last(annot.region)-start), :region) + for annot in io.annotations if last(annot.region) > start] AnnotatedString(read(io.io, T), annots) end end @@ -625,7 +645,7 @@ read(io::AnnotatedIOBuffer, ::Type{AnnotatedString}) = read(io, AnnotatedString{ function read(io::AnnotatedIOBuffer, ::Type{AnnotatedChar{T}}) where {T <: AbstractChar} pos = position(io) char = read(io.io, T) - annots = Pair{Symbol, Any}[annot for (range, annot) in io.annotations if pos+1 in range] + annots = [NamedTuple{(:label, :value)}(annot) for annot in io.annotations if pos+1 in annot.region] AnnotatedChar(char, annots) end read(io::AnnotatedIOBuffer, ::Type{AnnotatedChar{AbstractChar}}) = read(io, AnnotatedChar{Char}) @@ -633,8 +653,8 @@ read(io::AnnotatedIOBuffer, ::Type{AnnotatedChar}) = read(io, AnnotatedChar{Char function truncate(io::AnnotatedIOBuffer, size::Integer) truncate(io.io, size) - filter!(((range, _),) -> first(range) <= size, io.annotations) - map!(((range, val),) -> (first(range):min(size, last(range)), val), + filter!(ann -> first(ann.region) <= size, io.annotations) + map!(ann -> setindex(ann, first(ann.region):min(size, last(ann.region)), :region), io.annotations, io.annotations) io end diff --git a/base/strings/io.jl b/base/strings/io.jl index df34712b519d5..82dd128240a92 100644 --- a/base/strings/io.jl +++ b/base/strings/io.jl @@ -816,12 +816,12 @@ function AnnotatedString(chars::AbstractVector{C}) where {C<:AbstractChar} end end end - annots = Tuple{UnitRange{Int}, Pair{Symbol, Any}}[] + annots = RegionAnnotation[] point = 1 for c in chars if c isa AnnotatedChar for annot in c.annotations - push!(annots, (point:point, annot)) + push!(annots, (point:point, annot...)) end end point += ncodeunits(c) diff --git a/deps/checksums/JuliaSyntaxHighlighting-19bd57b89c648592155156049addf67e0638eab1.tar.gz/md5 b/deps/checksums/JuliaSyntaxHighlighting-19bd57b89c648592155156049addf67e0638eab1.tar.gz/md5 new file mode 100644 index 0000000000000..a86f3fe9c5561 --- /dev/null +++ b/deps/checksums/JuliaSyntaxHighlighting-19bd57b89c648592155156049addf67e0638eab1.tar.gz/md5 @@ -0,0 +1 @@ +401bb32ca43a8460d6790ee80e695bb5 diff --git a/deps/checksums/JuliaSyntaxHighlighting-19bd57b89c648592155156049addf67e0638eab1.tar.gz/sha512 b/deps/checksums/JuliaSyntaxHighlighting-19bd57b89c648592155156049addf67e0638eab1.tar.gz/sha512 new file mode 100644 index 0000000000000..6e54aef5fd34f --- /dev/null +++ b/deps/checksums/JuliaSyntaxHighlighting-19bd57b89c648592155156049addf67e0638eab1.tar.gz/sha512 @@ -0,0 +1 @@ +db2c732d3343f5a8770b3516cdd900587d497feab2259a937d354fac436ab3cb099b0401fb4e05817e75744fb9877ab69b1e4879d8a710b33b69c95b7e58d961 diff --git a/deps/checksums/JuliaSyntaxHighlighting-b89dd99db56700c47434df6106b6c6afd1c9ed01.tar.gz/md5 b/deps/checksums/JuliaSyntaxHighlighting-b89dd99db56700c47434df6106b6c6afd1c9ed01.tar.gz/md5 deleted file mode 100644 index cbcb8097d1673..0000000000000 --- a/deps/checksums/JuliaSyntaxHighlighting-b89dd99db56700c47434df6106b6c6afd1c9ed01.tar.gz/md5 +++ /dev/null @@ -1 +0,0 @@ -3dc1387ed88ba3c0df04d05a86d804d0 diff --git a/deps/checksums/JuliaSyntaxHighlighting-b89dd99db56700c47434df6106b6c6afd1c9ed01.tar.gz/sha512 b/deps/checksums/JuliaSyntaxHighlighting-b89dd99db56700c47434df6106b6c6afd1c9ed01.tar.gz/sha512 deleted file mode 100644 index 2e58061d16058..0000000000000 --- a/deps/checksums/JuliaSyntaxHighlighting-b89dd99db56700c47434df6106b6c6afd1c9ed01.tar.gz/sha512 +++ /dev/null @@ -1 +0,0 @@ -fe30ed73b257e6928097cb7baca5b82a9a60b2f9b9f219fbcf570c5ed513447f0fda2a48da06b57e381516a69278f7f8519764d00e9e4fb5683a5411e245ef45 diff --git a/deps/checksums/StyledStrings-056e843b2d428bb9735b03af0cff97e738ac7e14.tar.gz/md5 b/deps/checksums/StyledStrings-056e843b2d428bb9735b03af0cff97e738ac7e14.tar.gz/md5 new file mode 100644 index 0000000000000..8d78dd7b0a11b --- /dev/null +++ b/deps/checksums/StyledStrings-056e843b2d428bb9735b03af0cff97e738ac7e14.tar.gz/md5 @@ -0,0 +1 @@ +f053c84279a8920f355f202e605842af diff --git a/deps/checksums/StyledStrings-056e843b2d428bb9735b03af0cff97e738ac7e14.tar.gz/sha512 b/deps/checksums/StyledStrings-056e843b2d428bb9735b03af0cff97e738ac7e14.tar.gz/sha512 new file mode 100644 index 0000000000000..5a8ca888c38f8 --- /dev/null +++ b/deps/checksums/StyledStrings-056e843b2d428bb9735b03af0cff97e738ac7e14.tar.gz/sha512 @@ -0,0 +1 @@ +b6f4c1d6c0dc73a520472746c96adff506e5405154e4b93d419e07b577b01804d2fc87d4a6cac48a136777579bebf8388c2c1e54f849b51e233138d482146b4f diff --git a/deps/checksums/StyledStrings-f6035eb97b516862b16e36cab2ecc6ea8adc3d7c.tar.gz/md5 b/deps/checksums/StyledStrings-f6035eb97b516862b16e36cab2ecc6ea8adc3d7c.tar.gz/md5 deleted file mode 100644 index 0d39747d275ba..0000000000000 --- a/deps/checksums/StyledStrings-f6035eb97b516862b16e36cab2ecc6ea8adc3d7c.tar.gz/md5 +++ /dev/null @@ -1 +0,0 @@ -bf7c157df6084942b794fbe5b768a643 diff --git a/deps/checksums/StyledStrings-f6035eb97b516862b16e36cab2ecc6ea8adc3d7c.tar.gz/sha512 b/deps/checksums/StyledStrings-f6035eb97b516862b16e36cab2ecc6ea8adc3d7c.tar.gz/sha512 deleted file mode 100644 index d0a8d6cec08cf..0000000000000 --- a/deps/checksums/StyledStrings-f6035eb97b516862b16e36cab2ecc6ea8adc3d7c.tar.gz/sha512 +++ /dev/null @@ -1 +0,0 @@ -ba2f6b91494662208842dec580ea9410d8d6ba4e57315c72e872227f5e2f68cc970fcf5dbd9c8a03920f93b6adabdeaab738fff04f9ca7b5da5cd6b89759e7f6 diff --git a/doc/src/manual/strings.md b/doc/src/manual/strings.md index c04e5e6d6760e..57431d07c0aa5 100644 --- a/doc/src/manual/strings.md +++ b/doc/src/manual/strings.md @@ -1230,7 +1230,7 @@ to keep the string annotations. ```jldoctest julia> str = Base.AnnotatedString("hello there", - [(1:5, :word => :greeting), (7:11, :label => 1)]) + [(1:5, :word, :greeting), (7:11, :label, 1)]) "hello there" julia> length(str) @@ -1242,7 +1242,7 @@ julia> lpad(str, 14) julia> typeof(lpad(str, 7)) Base.AnnotatedString{String} -julia> str2 = Base.AnnotatedString(" julia", [(2:6, :face => :magenta)]) +julia> str2 = Base.AnnotatedString(" julia", [(2:6, :face, :magenta)]) " julia" julia> Base.annotatedstring(str, str2) diff --git a/stdlib/JuliaSyntaxHighlighting.version b/stdlib/JuliaSyntaxHighlighting.version index 280db66afe5f9..2a409c721d32b 100644 --- a/stdlib/JuliaSyntaxHighlighting.version +++ b/stdlib/JuliaSyntaxHighlighting.version @@ -1,4 +1,4 @@ JULIASYNTAXHIGHLIGHTING_BRANCH = main -JULIASYNTAXHIGHLIGHTING_SHA1 = b89dd99db56700c47434df6106b6c6afd1c9ed01 +JULIASYNTAXHIGHLIGHTING_SHA1 = 19bd57b89c648592155156049addf67e0638eab1 JULIASYNTAXHIGHLIGHTING_GIT_URL := https://github.com/julialang/JuliaSyntaxHighlighting.jl.git JULIASYNTAXHIGHLIGHTING_TAR_URL = https://api.github.com/repos/julialang/JuliaSyntaxHighlighting.jl/tarball/$1 diff --git a/stdlib/Markdown/src/render/terminal/formatting.jl b/stdlib/Markdown/src/render/terminal/formatting.jl index 3274483801c77..c9dadfb5f3d94 100644 --- a/stdlib/Markdown/src/render/terminal/formatting.jl +++ b/stdlib/Markdown/src/render/terminal/formatting.jl @@ -19,9 +19,9 @@ function with_output_annotations(f::Function, io::AnnotIO, annots::Pair{Symbol, start = position(aio) + 1 f(io) stop = position(aio) - sortedindex = searchsortedlast(aio.annotations, (start:stop,), by=first) + sortedindex = searchsortedlast(aio.annotations, (region=start:stop,), by=a -> a.region) for (i, annot) in enumerate(annots) - insert!(aio.annotations, sortedindex + i, (start:stop, annot)) + insert!(aio.annotations, sortedindex + i, (start:stop, annot...)) end end diff --git a/stdlib/StyledStrings.version b/stdlib/StyledStrings.version index 83fbece4c8bc0..5e58a5456148a 100644 --- a/stdlib/StyledStrings.version +++ b/stdlib/StyledStrings.version @@ -1,4 +1,4 @@ STYLEDSTRINGS_BRANCH = main -STYLEDSTRINGS_SHA1 = f6035eb97b516862b16e36cab2ecc6ea8adc3d7c +STYLEDSTRINGS_SHA1 = 056e843b2d428bb9735b03af0cff97e738ac7e14 STYLEDSTRINGS_GIT_URL := https://github.com/JuliaLang/StyledStrings.jl.git STYLEDSTRINGS_TAR_URL = https://api.github.com/repos/JuliaLang/StyledStrings.jl/tarball/$1 diff --git a/test/strings/annotated.jl b/test/strings/annotated.jl index ee53c3d5846eb..85acab74abf7b 100644 --- a/test/strings/annotated.jl +++ b/test/strings/annotated.jl @@ -2,7 +2,7 @@ @testset "AnnotatedString" begin str = Base.AnnotatedString("some string") - @test str == Base.AnnotatedString(str.string, Tuple{UnitRange{Int}, Pair{Symbol, Any}}[]) + @test str == Base.AnnotatedString(str.string, Base.RegionAnnotation[]) @test length(str) == 11 @test ncodeunits(str) == 11 @test codeunits(str) == codeunits("some string") @@ -23,9 +23,9 @@ @test cmp("some stringy thingy", str) == 1 @test str[3:4] == SubString("me") @test SubString("me") == str[3:4] - Base.annotate!(str, 1:4, :thing => 0x01) - Base.annotate!(str, 6:11, :other => 0x02) - Base.annotate!(str, 1:11, :all => 0x03) + Base.annotate!(str, 1:4, :thing, 0x01) + Base.annotate!(str, 6:11, :other, 0x02) + Base.annotate!(str, 1:11, :all, 0x03) # :thing :other # ┌┸─┐ ┌──┸─┐ # "some string" @@ -35,21 +35,21 @@ @test str[3:4] != SubString("me") @test SubString("me") != str[3:4] @test Base.AnnotatedString(str[3:4]) == - Base.AnnotatedString("me", [(1:2, :thing => 0x01), (1:2, :all => 0x03)]) + Base.AnnotatedString("me", [(1:2, :thing, 0x01), (1:2, :all, 0x03)]) @test Base.AnnotatedString(str[3:6]) == - Base.AnnotatedString("me s", [(1:2, :thing => 0x01), (4:4, :other => 0x02), (1:4, :all => 0x03)]) - @test str == Base.AnnotatedString("some string", [(1:4, :thing => 0x01), (6:11, :other => 0x02), (1:11, :all => 0x03)]) + Base.AnnotatedString("me s", [(1:2, :thing, 0x01), (4:4, :other, 0x02), (1:4, :all, 0x03)]) + @test str == Base.AnnotatedString("some string", [(1:4, :thing, 0x01), (6:11, :other, 0x02), (1:11, :all, 0x03)]) @test str != Base.AnnotatedString("some string") - @test str != Base.AnnotatedString("some string", [(1:1, :thing => 0x01), (1:11, :all => 0x03), (6:6, :other => 0x02)]) - @test str != Base.AnnotatedString("some string", [(1:4, :thing => 0x11), (1:11, :all => 0x13), (6:11, :other => 0x12)]) - @test str != Base.AnnotatedString("some thingg", [(1:4, :thing => 0x01), (1:11, :all => 0x03), (6:11, :other => 0x02)]) - @test Base.AnnotatedString([Base.AnnotatedChar('a', [:a => 1]), Base.AnnotatedChar('b', [:b => 2])]) == - Base.AnnotatedString("ab", [(1:1, :a => 1), (2:2, :b => 2)]) + @test str != Base.AnnotatedString("some string", [(1:1, :thing, 0x01), (1:11, :all, 0x03), (6:6, :other, 0x02)]) + @test str != Base.AnnotatedString("some string", [(1:4, :thing, 0x11), (1:11, :all, 0x13), (6:11, :other, 0x12)]) + @test str != Base.AnnotatedString("some thingg", [(1:4, :thing, 0x01), (1:11, :all, 0x03), (6:11, :other, 0x02)]) + @test Base.AnnotatedString([Base.AnnotatedChar('a', [(:a, 1)]), Base.AnnotatedChar('b', [(:b, 2)])]) == + Base.AnnotatedString("ab", [(1:1, :a, 1), (2:2, :b, 2)]) let allstrings = - ['a', Base.AnnotatedChar('a'), Base.AnnotatedChar('a', [:aaa => 0x04]), + ['a', Base.AnnotatedChar('a'), Base.AnnotatedChar('a', [(:aaa, 0x04)]), "a string", Base.AnnotatedString("a string"), - Base.AnnotatedString("a string", [(1:2, :hmm => '%')]), - SubString(Base.AnnotatedString("a string", [(1:2, :hmm => '%')]), 1:1)] + Base.AnnotatedString("a string", [(1:2, :hmm, '%')]), + SubString(Base.AnnotatedString("a string", [(1:2, :hmm, '%')]), 1:1)] for str1 in repeat(allstrings, 2) for str2 in repeat(allstrings, 2) @test String(str1 * str2) == @@ -62,10 +62,10 @@ end end # @test collect(Base.eachstyle(str)) == - # [("some", [:thing => 0x01, :all => 0x03]), - # (" string", [:all => 0x03, :other => 0x02])] + # [("some", [:thing, 0x01, :all, 0x03]), + # (" string", [:all, 0x03, :other, 0x02])] @test chopprefix(sprint(show, str), "Base.") == - "AnnotatedString{String}(\"some string\", [(1:4, :thing => 0x01), (6:11, :other => 0x02), (1:11, :all => 0x03)])" + "AnnotatedString{String}(\"some string\", [(1:4, :thing, 0x01), (6:11, :other, 0x02), (1:11, :all, 0x03)])" @test eval(Meta.parse(repr(str))) == str @test sprint(show, MIME("text/plain"), str) == "\"some string\"" end @@ -78,16 +78,16 @@ end @test uppercase(chr) == Base.AnnotatedChar('C') @test titlecase(chr) == Base.AnnotatedChar('C') @test lowercase(Base.AnnotatedChar('C')) == chr - str = Base.AnnotatedString("hmm", [(1:1, :attr => "h0h0"), - (1:2, :attr => "h0m1"), - (2:3, :attr => "m1m2")]) - @test str[1] == Base.AnnotatedChar('h', Pair{Symbol, Any}[:attr => "h0h0"]) - @test str[2] == Base.AnnotatedChar('m', Pair{Symbol, Any}[:attr => "h0m1", :attr => "m1m2"]) - @test str[3] == Base.AnnotatedChar('m', Pair{Symbol, Any}[:attr => "m1m2"]) + str = Base.AnnotatedString("hmm", [(1:1, :attr, "h0h0"), + (1:2, :attr, "h0m1"), + (2:3, :attr, "m1m2")]) + @test str[1] == Base.AnnotatedChar('h', [(:attr, "h0h0")]) + @test str[2] == Base.AnnotatedChar('m', [(:attr, "h0m1"), (:attr, "m1m2")]) + @test str[3] == Base.AnnotatedChar('m', [(:attr, "m1m2")]) end @testset "Styling preservation" begin - str = Base.AnnotatedString("some string", [(1:4, :thing => 0x01), (1:11, :all => 0x03), (6:11, :other => 0x02)]) + str = Base.AnnotatedString("some string", [(1:4, :thing, 0x01), (1:11, :all, 0x03), (6:11, :other, 0x02)]) @test match(r".e", str).match == str[3:4] @test match(r"(.e)", str).captures == [str[3:4]] let m0 = match(r"(.)e", str) @@ -97,43 +97,43 @@ end end end @test lpad(str, 12) == - Base.AnnotatedString(" some string", [(2:5, :thing => 0x01), - (2:12, :all => 0x03), - (7:12, :other => 0x02)]) + Base.AnnotatedString(" some string", [(2:5, :thing, 0x01), + (2:12, :all, 0x03), + (7:12, :other, 0x02)]) @test rpad(str, 12) == - Base.AnnotatedString("some string ", [(1:4, :thing => 0x01), - (1:11, :all => 0x03), - (6:11, :other => 0x02)]) - str1 = Base.AnnotatedString("test", [(1:4, :label => 5)]) - str2 = Base.AnnotatedString("case", [(2:3, :label => "oomph")]) + Base.AnnotatedString("some string ", [(1:4, :thing, 0x01), + (1:11, :all, 0x03), + (6:11, :other, 0x02)]) + str1 = Base.AnnotatedString("test", [(1:4, :label, 5)]) + str2 = Base.AnnotatedString("case", [(2:3, :label, "oomph")]) @test join([str1, str1], ' ') == Base.AnnotatedString("test test", - [(1:4, :label => 5), - (6:9, :label => 5)]) - @test join([str1, str1], Base.AnnotatedString(" ", [(1:1, :label => 2)])) == + [(1:4, :label, 5), + (6:9, :label, 5)]) + @test join([str1, str1], Base.AnnotatedString(" ", [(1:1, :label, 2)])) == Base.AnnotatedString("test test", - [(1:4, :label => 5), - (5:5, :label => 2), - (6:9, :label => 5)]) + [(1:4, :label, 5), + (5:5, :label, 2), + (6:9, :label, 5)]) @test join((String(str1), str1), ' ') == - Base.AnnotatedString("test test", [(6:9, :label => 5)]) - @test repeat(str1, 2) == Base.AnnotatedString("testtest", [(1:8, :label => 5)]) - @test repeat(str2, 2) == Base.AnnotatedString("casecase", [(2:3, :label => "oomph"), - (6:7, :label => "oomph")]) - @test repeat(str1[1], 3) == Base.AnnotatedString("ttt", [(1:3, :label => 5)]) - @test reverse(str1) == Base.AnnotatedString("tset", [(1:4, :label => 5)]) - @test reverse(str2) == Base.AnnotatedString("esac", [(2:3, :label => "oomph")]) + Base.AnnotatedString("test test", [(6:9, :label, 5)]) + @test repeat(str1, 2) == Base.AnnotatedString("testtest", [(1:8, :label, 5)]) + @test repeat(str2, 2) == Base.AnnotatedString("casecase", [(2:3, :label, "oomph"), + (6:7, :label, "oomph")]) + @test repeat(str1[1], 3) == Base.AnnotatedString("ttt", [(1:3, :label, 5)]) + @test reverse(str1) == Base.AnnotatedString("tset", [(1:4, :label, 5)]) + @test reverse(str2) == Base.AnnotatedString("esac", [(2:3, :label, "oomph")]) end @testset "Unicode" begin for words in (["ᲃase", "cɦɒnɡeȿ", "can", "CHⱯNGE", "Сodeunıts"], ["Сodeunıts", "ᲃase", "cɦɒnɡeȿ", "can", "CHⱯNGE"]) - ann_words = [Base.AnnotatedString(w, [(1:ncodeunits(w), :i => i)]) + ann_words = [Base.AnnotatedString(w, [(1:ncodeunits(w), :i, i)]) for (i, w) in enumerate(words)] ann_str = join(ann_words, '-') for transform in (lowercase, uppercase, titlecase) t_words = map(transform, words) - ann_t_words = [Base.AnnotatedString(w, [(1:ncodeunits(w), :i => i)]) + ann_t_words = [Base.AnnotatedString(w, [(1:ncodeunits(w), :i, i)]) for (i, w) in enumerate(t_words)] ann_t_str = join(ann_t_words, '-') t_ann_str = transform(ann_str) @@ -142,7 +142,7 @@ end end for transform in (uppercasefirst, lowercasefirst) t_words = vcat(transform(first(words)), words[2:end]) - ann_t_words = [Base.AnnotatedString(w, [(1:ncodeunits(w), :i => i)]) + ann_t_words = [Base.AnnotatedString(w, [(1:ncodeunits(w), :i, i)]) for (i, w) in enumerate(t_words)] ann_t_str = join(ann_t_words, '-') t_ann_str = transform(ann_str) @@ -154,31 +154,32 @@ end @testset "AnnotatedIOBuffer" begin aio = Base.AnnotatedIOBuffer() + vec2ann(v::Vector{<:Tuple}) = collect(Base.RegionAnnotation, v) # Append-only writing - @test write(aio, Base.AnnotatedString("hello", [(1:5, :tag => 1)])) == 5 + @test write(aio, Base.AnnotatedString("hello", [(1:5, :tag, 1)])) == 5 @test write(aio, ' ') == 1 - @test write(aio, Base.AnnotatedString("world", [(1:5, :tag => 2)])) == 5 - @test Base.annotations(aio) == [(1:5, :tag => 1), (7:11, :tag => 2)] + @test write(aio, Base.AnnotatedString("world", [(1:5, :tag, 2)])) == 5 + @test Base.annotations(aio) == vec2ann([(1:5, :tag, 1), (7:11, :tag, 2)]) # Check `annotate!`, including region sorting @test truncate(aio, 0).io.size == 0 @test write(aio, "hello world") == ncodeunits("hello world") - @test Base.annotate!(aio, 1:5, :tag => 1) === aio - @test Base.annotate!(aio, 7:11, :tag => 2) === aio - @test Base.annotations(aio) == [(1:5, :tag => 1), (7:11, :tag => 2)] + @test Base.annotate!(aio, 1:5, :tag, 1) === aio + @test Base.annotate!(aio, 7:11, :tag, 2) === aio + @test Base.annotations(aio) == vec2ann([(1:5, :tag, 1), (7:11, :tag, 2)]) # Reading @test read(seekstart(deepcopy(aio.io)), String) == "hello world" @test read(seekstart(deepcopy(aio)), String) == "hello world" - @test read(seek(aio, 0), Base.AnnotatedString) == Base.AnnotatedString("hello world", [(1:5, :tag => 1), (7:11, :tag => 2)]) - @test read(seek(aio, 1), Base.AnnotatedString) == Base.AnnotatedString("ello world", [(1:4, :tag => 1), (6:10, :tag => 2)]) - @test read(seek(aio, 4), Base.AnnotatedString) == Base.AnnotatedString("o world", [(1:1, :tag => 1), (3:7, :tag => 2)]) - @test read(seek(aio, 5), Base.AnnotatedString) == Base.AnnotatedString(" world", [(2:6, :tag => 2)]) + @test read(seek(aio, 0), Base.AnnotatedString) == Base.AnnotatedString("hello world", [(1:5, :tag, 1), (7:11, :tag, 2)]) + @test read(seek(aio, 1), Base.AnnotatedString) == Base.AnnotatedString("ello world", [(1:4, :tag, 1), (6:10, :tag, 2)]) + @test read(seek(aio, 4), Base.AnnotatedString) == Base.AnnotatedString("o world", [(1:1, :tag, 1), (3:7, :tag, 2)]) + @test read(seek(aio, 5), Base.AnnotatedString) == Base.AnnotatedString(" world", [(2:6, :tag, 2)]) @test read(seekend(aio), Base.AnnotatedString) == Base.AnnotatedString("") - @test read(seekstart(truncate(deepcopy(aio), 5)), Base.AnnotatedString) == Base.AnnotatedString("hello", [(1:5, :tag => 1)]) - @test read(seekstart(truncate(deepcopy(aio), 6)), Base.AnnotatedString) == Base.AnnotatedString("hello ", [(1:5, :tag => 1)]) - @test read(seekstart(truncate(deepcopy(aio), 7)), Base.AnnotatedString) == Base.AnnotatedString("hello w", [(1:5, :tag => 1), (7:7, :tag => 2)]) - @test read(seek(aio, 0), Base.AnnotatedChar) == Base.AnnotatedChar('h', [:tag => 1]) - @test read(seek(aio, 5), Base.AnnotatedChar) == Base.AnnotatedChar(' ', Pair{Symbol, Any}[]) - @test read(seek(aio, 6), Base.AnnotatedChar) == Base.AnnotatedChar('w', [:tag => 2]) + @test read(seekstart(truncate(deepcopy(aio), 5)), Base.AnnotatedString) == Base.AnnotatedString("hello", [(1:5, :tag, 1)]) + @test read(seekstart(truncate(deepcopy(aio), 6)), Base.AnnotatedString) == Base.AnnotatedString("hello ", [(1:5, :tag, 1)]) + @test read(seekstart(truncate(deepcopy(aio), 7)), Base.AnnotatedString) == Base.AnnotatedString("hello w", [(1:5, :tag, 1), (7:7, :tag, 2)]) + @test read(seek(aio, 0), Base.AnnotatedChar) == Base.AnnotatedChar('h', [(:tag, 1)]) + @test read(seek(aio, 5), Base.AnnotatedChar) == Base.AnnotatedChar(' ', []) + @test read(seek(aio, 6), Base.AnnotatedChar) == Base.AnnotatedChar('w', [(:tag, 2)]) # Check method compatibility with IOBuffer @test position(aio) == 7 @test seek(aio, 4) === aio @@ -188,19 +189,19 @@ end # Writing into the middle of the buffer @test write(seek(aio, 6), "alice") == 5 # Replace 'world' with 'alice' @test read(seekstart(aio), String) == "hello alice" - @test Base.annotations(aio) == [(1:5, :tag => 1), (7:11, :tag => 2)] # Should be unchanged - @test write(seek(aio, 0), Base.AnnotatedString("hey-o", [(1:5, :hey => 'o')])) == 5 + @test Base.annotations(aio) == vec2ann([(1:5, :tag, 1), (7:11, :tag, 2)]) # Should be unchanged + @test write(seek(aio, 0), Base.AnnotatedString("hey-o", [(1:5, :hey, 'o')])) == 5 @test read(seekstart(aio), String) == "hey-o alice" - @test Base.annotations(aio) == [(7:11, :tag => 2), (1:5, :hey => 'o')] # First annotation should have been entirely replaced - @test write(seek(aio, 7), Base.AnnotatedString("bbi", [(1:3, :hey => 'a')])) == 3 # a[lic => bbi]e ('alice' => 'abbie') + @test Base.annotations(aio) == vec2ann([(7:11, :tag, 2), (1:5, :hey, 'o')]) # First annotation should have been entirely replaced + @test write(seek(aio, 7), Base.AnnotatedString("bbi", [(1:3, :hey, 'a')])) == 3 # a[lic, bbi]e ('alice', 'abbie') @test read(seekstart(aio), String) == "hey-o abbie" - @test Base.annotations(aio) == [(7:7, :tag => 2), (11:11, :tag => 2), (1:5, :hey => 'o'), (8:10, :hey => 'a')] + @test Base.annotations(aio) == vec2ann([(7:7, :tag, 2), (11:11, :tag, 2), (1:5, :hey, 'o'), (8:10, :hey, 'a')]) @test write(seek(aio, 0), Base.AnnotatedString("ab")) == 2 # Check first annotation's region is adjusted correctly @test read(seekstart(aio), String) == "aby-o abbie" - @test Base.annotations(aio) == [(7:7, :tag => 2), (11:11, :tag => 2), (3:5, :hey => 'o'), (8:10, :hey => 'a')] + @test Base.annotations(aio) == vec2ann([(7:7, :tag, 2), (11:11, :tag, 2), (3:5, :hey, 'o'), (8:10, :hey, 'a')]) @test write(seek(aio, 3), Base.AnnotatedString("ss")) == 2 @test read(seekstart(aio), String) == "abyss abbie" - @test Base.annotations(aio) == [(7:7, :tag => 2), (11:11, :tag => 2), (3:3, :hey => 'o'), (8:10, :hey => 'a')] + @test Base.annotations(aio) == vec2ann([(7:7, :tag, 2), (11:11, :tag, 2), (3:3, :hey, 'o'), (8:10, :hey, 'a')]) # Writing one buffer to another newaio = Base.AnnotatedIOBuffer() @test write(newaio, seekstart(aio)) == 11 @@ -210,36 +211,37 @@ end @test sort(Base.annotations(newaio)) == sort(Base.annotations(aio)) @test write(newaio, seek(aio, 5)) == 6 @test read(seekstart(newaio), String) == "abyss abbie abbie" - @test sort(Base.annotations(newaio)) == sort(vcat(Base.annotations(aio), [(13:13, :tag => 2), (14:16, :hey => 'a'), (17:17, :tag => 2)])) + @test sort(Base.annotations(newaio)) == + sort(vcat(Base.annotations(aio), vec2ann([(13:13, :tag, 2), (14:16, :hey, 'a'), (17:17, :tag, 2)]))) # The `_insert_annotations!` cautious-merging optimisation aio = Base.AnnotatedIOBuffer() - @test write(aio, Base.AnnotatedChar('a', [:a => 1, :b => 2])) == 1 - @test Base.annotations(aio) == [(1:1, :a => 1), (1:1, :b => 2)] - @test write(aio, Base.AnnotatedChar('b', [:a => 1, :b => 2])) == 1 - @test Base.annotations(aio) == [(1:2, :a => 1), (1:2, :b => 2)] + @test write(aio, Base.AnnotatedChar('a', [(:a, 1), (:b, 2)])) == 1 + @test Base.annotations(aio) == vec2ann([(1:1, :a, 1), (1:1, :b, 2)]) + @test write(aio, Base.AnnotatedChar('b', [(:a, 1), (:b, 2)])) == 1 + @test Base.annotations(aio) == vec2ann([(1:2, :a, 1), (1:2, :b, 2)]) let aio2 = copy(aio) # A different start makes merging too risky to do. - @test write(aio2, Base.AnnotatedChar('c', [:a => 0, :b => 2])) == 1 - @test Base.annotations(aio2) == [(1:2, :a => 1), (1:2, :b => 2), (3:3, :a => 0), (3:3, :b => 2)] + @test write(aio2, Base.AnnotatedChar('c', [(:a, 0), (:b, 2)])) == 1 + @test Base.annotations(aio2) == vec2ann([(1:2, :a, 1), (1:2, :b, 2), (3:3, :a, 0), (3:3, :b, 2)]) end let aio2 = copy(aio) # Merging some run of the most recent annotations is fine though. - @test write(aio2, Base.AnnotatedChar('c', [:b => 2])) == 1 - @test Base.annotations(aio2) == [(1:2, :a => 1), (1:3, :b => 2)] + @test write(aio2, Base.AnnotatedChar('c', [(:b, 2)])) == 1 + @test Base.annotations(aio2) == vec2ann([(1:2, :a, 1), (1:3, :b, 2)]) end let aio2 = copy(aio) # ...and any subsequent annotations after a matching run can just be copied over. - @test write(aio2, Base.AnnotatedChar('c', [:b => 2, :c => 3, :d => 4])) == 1 - @test Base.annotations(aio2) == [(1:2, :a => 1), (1:3, :b => 2), (3:3, :c => 3), (3:3, :d => 4)] + @test write(aio2, Base.AnnotatedChar('c', [(:b, 2), (:c, 3), (:d, 4)])) == 1 + @test Base.annotations(aio2) == vec2ann([(1:2, :a, 1), (1:3, :b, 2), (3:3, :c, 3), (3:3, :d, 4)]) end let aio2 = Base.AnnotatedIOBuffer() - @test write(aio2, Base.AnnotatedChar('a', [:b => 1])) == 1 - @test write(aio2, Base.AnnotatedChar('b', [:a => 1, :b => 1])) == 1 + @test write(aio2, Base.AnnotatedChar('a', [(:b, 1)])) == 1 + @test write(aio2, Base.AnnotatedChar('b', [(:a, 1), (:b, 1)])) == 1 @test read(seekstart(aio2), Base.AnnotatedString) == - Base.AnnotatedString("ab", [(1:1, :b => 1), (2:2, :a => 1), (2:2, :b => 1)]) + Base.AnnotatedString("ab", [(1:1, :b, 1), (2:2, :a, 1), (2:2, :b, 1)]) end # Working through an IOContext aio = Base.AnnotatedIOBuffer() wrapio = IOContext(aio) - @test write(wrapio, Base.AnnotatedString("hey", [(1:3, :x => 1)])) == 3 - @test write(wrapio, Base.AnnotatedChar('a', [:y => 2])) == 1 + @test write(wrapio, Base.AnnotatedString("hey", [(1:3, :x, 1)])) == 3 + @test write(wrapio, Base.AnnotatedChar('a', [(:y, 2)])) == 1 @test read(seekstart(aio), Base.AnnotatedString) == - Base.AnnotatedString("heya", [(1:3, :x => 1), (4:4, :y => 2)]) + Base.AnnotatedString("heya", [(1:3, :x, 1), (4:4, :y, 2)]) end From 54299d941587d5f4371ba203efa5589e815bf6ad Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mos=C3=A8=20Giordano?= <765740+giordano@users.noreply.github.com> Date: Wed, 16 Oct 2024 06:32:14 +0100 Subject: [PATCH 215/537] Remove redundant `convert` in `_setindex!` (#56178) Follow up to #56034, ref: https://github.com/JuliaLang/julia/pull/56034#discussion_r1798573573. --------- Co-authored-by: Cody Tapscott <84105208+topolarity@users.noreply.github.com> --- base/array.jl | 4 ++-- base/genericmemory.jl | 11 ++++++++--- 2 files changed, 10 insertions(+), 5 deletions(-) diff --git a/base/array.jl b/base/array.jl index 9bd632f794aa5..a628c1212659d 100644 --- a/base/array.jl +++ b/base/array.jl @@ -999,11 +999,11 @@ function setindex!(A::Array{T}, x, i1::Int, i2::Int, I::Int...) where {T} x = x isa T ? x : convert(T, x)::T return _setindex!(A, x, i1, i2, I...) end -function _setindex!(A::Array{T}, x, i1::Int, i2::Int, I::Int...) where {T} +function _setindex!(A::Array{T}, x::T, i1::Int, i2::Int, I::Int...) where {T} @inline @_noub_if_noinbounds_meta @boundscheck checkbounds(A, i1, i2, I...) # generally _to_linear_index requires bounds checking - memoryrefset!(memoryrefnew(A.ref, _to_linear_index(A, i1, i2, I...), false), x isa T ? x : convert(T,x)::T, :not_atomic, false) + memoryrefset!(memoryrefnew(A.ref, _to_linear_index(A, i1, i2, I...), false), x, :not_atomic, false) return A end diff --git a/base/genericmemory.jl b/base/genericmemory.jl index 91b87ab14c6b1..5fe070a73628d 100644 --- a/base/genericmemory.jl +++ b/base/genericmemory.jl @@ -235,13 +235,18 @@ getindex(A::Memory, c::Colon) = copy(A) ## Indexing: setindex! ## -function setindex!(A::Memory{T}, x, i1::Int) where {T} - val = x isa T ? x : convert(T,x)::T +function _setindex!(A::Memory{T}, x::T, i1::Int) where {T} ref = memoryrefnew(memoryref(A), i1, @_boundscheck) - memoryrefset!(ref, val, :not_atomic, @_boundscheck) + memoryrefset!(ref, x, :not_atomic, @_boundscheck) return A end +function setindex!(A::Memory{T}, x, i1::Int) where {T} + @_propagate_inbounds_meta + val = x isa T ? x : convert(T,x)::T + return _setindex!(A, val, i1) +end + function setindex!(A::Memory{T}, x, i1::Int, i2::Int, I::Int...) where {T} @inline @boundscheck (i2 == 1 && all(==(1), I)) || throw_boundserror(A, (i1, i2, I...)) From 6ee784d919b6c97178d50fa85e0420d144f1adbe Mon Sep 17 00:00:00 2001 From: Gabriel Baraldi Date: Wed, 16 Oct 2024 02:33:42 -0300 Subject: [PATCH 216/537] Improve type inference of Artifacts.jl (#56118) This also has some changes that move platform selection to compile time together with https://github.com/JuliaPackaging/JLLWrappers.jl/commit/45cc04963f3c99d4eb902f97528fe16fc37002cc, move the platform selection to compile time. (this helps juliac a ton) --- stdlib/Artifacts/src/Artifacts.jl | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/stdlib/Artifacts/src/Artifacts.jl b/stdlib/Artifacts/src/Artifacts.jl index 9bca72f6c7a14..e21db58b9445e 100644 --- a/stdlib/Artifacts/src/Artifacts.jl +++ b/stdlib/Artifacts/src/Artifacts.jl @@ -443,7 +443,7 @@ function artifact_hash(name::String, artifacts_toml::String; return nothing end - return SHA1(meta["git-tree-sha1"]) + return SHA1(meta["git-tree-sha1"]::String) end function select_downloadable_artifacts(artifact_dict::Dict, artifacts_toml::String; @@ -642,10 +642,9 @@ function artifact_slash_lookup(name::String, artifact_dict::Dict, if meta === nothing error("Cannot locate artifact '$(name)' for $(triplet(platform)) in '$(artifacts_toml)'") end - hash = SHA1(meta["git-tree-sha1"]) + hash = SHA1(meta["git-tree-sha1"]::String) return artifact_name, artifact_path_tail, hash end - """ macro artifact_str(name) @@ -707,17 +706,16 @@ macro artifact_str(name, platform=nothing) # If `name` is a constant, (and we're using the default `Platform`) we can actually load # and parse the `Artifacts.toml` file now, saving the work from runtime. - if isa(name, AbstractString) && platform === nothing - # To support slash-indexing, we need to split the artifact name from the path tail: + if platform === nothing platform = HostPlatform() + end + if isa(name, AbstractString) && isa(platform, AbstractPlatform) + # To support slash-indexing, we need to split the artifact name from the path tail: artifact_name, artifact_path_tail, hash = artifact_slash_lookup(name, artifact_dict, artifacts_toml, platform) return quote Base.invokelatest(_artifact_str, $(__module__), $(artifacts_toml), $(artifact_name), $(artifact_path_tail), $(artifact_dict), $(hash), $(platform), Val($(LazyArtifacts)))::String end else - if platform === nothing - platform = :($(HostPlatform)()) - end return quote local platform = $(esc(platform)) local artifact_name, artifact_path_tail, hash = artifact_slash_lookup($(esc(name)), $(artifact_dict), $(artifacts_toml), platform) From a98f3713cb9aea3225cdb0e3ec0dd719f556a974 Mon Sep 17 00:00:00 2001 From: Tim Besard Date: Wed, 16 Oct 2024 08:06:42 +0200 Subject: [PATCH 217/537] Initial support for RISC-V (#56105) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Rebase and extension of @alexfanqi's initial work on porting Julia to RISC-V. Requires LLVM 19. Tested on a VisionFive2, built with: ```make MARCH := rv64gc_zba_zbb MCPU := sifive-u74 USE_BINARYBUILDER:=0 DEPS_GIT = llvm override LLVM_VER=19.1.1 override LLVM_BRANCH=julia-release/19.x override LLVM_SHA1=julia-release/19.x ``` ```julia-repl ❯ ./julia _ _ _ _(_)_ | Documentation: https://docs.julialang.org (_) | (_) (_) | _ _ _| |_ __ _ | Type "?" for help, "]?" for Pkg help. | | | | | | |/ _` | | | | |_| | | | (_| | | Version 1.12.0-DEV.1374 (2024-10-14) _/ |\__'_|_|_|\__'_| | riscv/25092a3982* (fork: 1 commits, 0 days) |__/ | julia> versioninfo(; verbose=true) Julia Version 1.12.0-DEV.1374 Commit 25092a3982* (2024-10-14 09:57 UTC) Platform Info: OS: Linux (riscv64-unknown-linux-gnu) uname: Linux 6.11.3-1-riscv64 #1 SMP Debian 6.11.3-1 (2024-10-10) riscv64 unknown CPU: unknown: speed user nice sys idle irq #1 1500 MHz 922 s 0 s 265 s 160953 s 0 s #2 1500 MHz 457 s 0 s 280 s 161521 s 0 s #3 1500 MHz 452 s 0 s 270 s 160911 s 0 s #4 1500 MHz 638 s 15 s 301 s 161340 s 0 s Memory: 7.760246276855469 GB (7474.08203125 MB free) Uptime: 16260.13 sec Load Avg: 0.25 0.23 0.1 WORD_SIZE: 64 LLVM: libLLVM-19.1.1 (ORCJIT, sifive-u74) Threads: 1 default, 0 interactive, 1 GC (on 4 virtual cores) Environment: HOME = /home/tim PATH = /home/tim/.local/bin:/usr/local/bin:/usr/bin:/bin:/usr/games TERM = xterm-256color julia> ccall(:jl_dump_host_cpu, Nothing, ()) CPU: sifive-u74 Features: +zbb,+d,+i,+f,+c,+a,+zba,+m,-zvbc,-zksed,-zvfhmin,-zbkc,-zkne,-zksh,-zfh,-zfhmin,-zknh,-v,-zihintpause,-zicboz,-zbs,-zvknha,-zvksed,-zfa,-ztso,-zbc,-zvknhb,-zihintntl,-zknd,-zvbb,-zbkx,-zkt,-zvkt,-zicond,-zvksh,-zvfh,-zvkg,-zvkb,-zbkb,-zvkned julia> @code_native debuginfo=:none 1+2. .text .attribute 4, 16 .attribute 5, "rv64i2p1_m2p0_a2p1_f2p2_d2p2_c2p0_zicsr2p0_zifencei2p0_zmmul1p0_zba1p0_zbb1p0" .file "+" .globl "julia_+_3003" .p2align 1 .type "julia_+_3003",@function "julia_+_3003": addi sp, sp, -16 sd ra, 8(sp) sd s0, 0(sp) addi s0, sp, 16 fcvt.d.l fa5, a0 ld ra, 8(sp) ld s0, 0(sp) fadd.d fa0, fa5, fa0 addi sp, sp, 16 ret .Lfunc_end0: .size "julia_+_3003", .Lfunc_end0-"julia_+_3003" .type ".L+Core.Float64#3005",@object .section .data.rel.ro,"aw",@progbits .p2align 3, 0x0 ".L+Core.Float64#3005": .quad ".L+Core.Float64#3005.jit" .size ".L+Core.Float64#3005", 8 .set ".L+Core.Float64#3005.jit", 272467692544 .size ".L+Core.Float64#3005.jit", 8 .section ".note.GNU-stack","",@progbits ``` Lots of bugs guaranteed, but with this we at least have a functional build and REPL for further development by whoever is interested. Also requires Linux 6.4+, since the fallback processor detection used here relies on LLVM's `sys::getHostCPUFeatures`, which for RISC-V is implemented using hwprobe introduced in 6.4. We could probably add a fallback that parses `/proc/cpuinfo`, either by building a CPU database much like how we've done for AArch64, or by parsing the actual ISA string contained there. That would probably also be a good place to add support for profiles, which are supposedly the way forward to package RISC-V binaries. That can happen in follow-up PRs though. For now, on older kernels, use the `-C` arg to Julia to specify an ISA. Co-authored-by: Alex Fan --- Make.inc | 13 +- base/binaryplatforms.jl | 5 +- base/cpuid.jl | 3 + cli/trampolines/trampolines_riscv64.S | 20 ++ contrib/generate_precompile.jl | 9 +- contrib/normalize_triplet.py | 1 + doc/src/devdocs/build/build.md | 1 + doc/src/devdocs/build/riscv.md | 103 +++++++++ src/abi_riscv.cpp | 315 ++++++++++++++++++++++++++ src/aotcompile.cpp | 3 +- src/ccall.cpp | 3 + src/codegen.cpp | 7 +- src/disasm.cpp | 2 + src/jitlayers.cpp | 26 ++- src/jitlayers.h | 17 +- src/julia_internal.h | 9 +- src/julia_threads.h | 2 +- src/llvm-ptls.cpp | 2 + src/llvm-version.h | 4 + src/runtime_intrinsics.c | 4 +- src/signal-handling.c | 19 +- src/signals-unix.c | 8 + src/stackwalk.c | 38 ++++ src/support/platform.h | 3 + src/task.c | 8 + src/threading.c | 4 +- 26 files changed, 609 insertions(+), 20 deletions(-) create mode 100644 cli/trampolines/trampolines_riscv64.S create mode 100644 doc/src/devdocs/build/riscv.md create mode 100644 src/abi_riscv.cpp diff --git a/Make.inc b/Make.inc index 53aee8a269732..cb79e3ca1b5a9 100644 --- a/Make.inc +++ b/Make.inc @@ -938,8 +938,12 @@ endif #If nothing is set default to native unless we are cross-compiling ifeq ($(MARCH)$(MCPU)$(MTUNE)$(JULIA_CPU_TARGET)$(XC_HOST),) -ifeq ($(ARCH),aarch64) #ARM recommends only setting MCPU for AArch64 +ifeq ($(ARCH),aarch64) +# ARM recommends only setting MCPU for AArch64 MCPU=native +else ifneq (,$(findstring riscv64,$(ARCH))) +# RISC-V doesn't have a native option +$(error Building for RISC-V requires a specific MARCH to be set)) else MARCH=native MTUNE=native @@ -995,6 +999,9 @@ endif ifneq (,$(findstring arm,$(ARCH))) DIST_ARCH:=arm endif +ifneq (,$(findstring riscv64,$(ARCH))) +DIST_ARCH:=riscv64 +endif JULIA_BINARYDIST_FILENAME := julia-$(JULIA_COMMIT)-$(DIST_OS)$(DIST_ARCH) endif @@ -1018,8 +1025,12 @@ ifneq ($(MARCH),) CC += -march=$(MARCH) CXX += -march=$(MARCH) FC += -march=$(MARCH) +# On RISC-V, don't forward the MARCH ISA string to JULIA_CPU_TARGET, +# as it's always incompatible with LLVM's CPU target name parser. +ifeq (,$(findstring riscv64,$(ARCH))) JULIA_CPU_TARGET ?= $(MARCH) endif +endif # Set MCPU-specific flags ifneq ($(MCPU),) diff --git a/base/binaryplatforms.jl b/base/binaryplatforms.jl index c8a55c99a5724..a372137edeb98 100644 --- a/base/binaryplatforms.jl +++ b/base/binaryplatforms.jl @@ -597,7 +597,7 @@ const arch_mapping = Dict( "armv7l" => "arm(v7l)?", # if we just see `arm-linux-gnueabihf`, we assume it's `armv7l` "armv6l" => "armv6l", "powerpc64le" => "p(ower)?pc64le", - "riscv64" => "riscv64", + "riscv64" => "(rv64|riscv64)", ) # Keep this in sync with `CPUID.ISAs_by_family` # These are the CPUID side of the microarchitectures targeted by GCC flags in BinaryBuilder.jl @@ -631,6 +631,9 @@ const arch_march_isa_mapping = let "a64fx" => get_set("aarch64", "a64fx"), "apple_m1" => get_set("aarch64", "apple_m1"), ], + "riscv64" => [ + "riscv64" => get_set("riscv64", "riscv64") + ], "powerpc64le" => [ "power8" => get_set("powerpc64le", "power8"), ], diff --git a/base/cpuid.jl b/base/cpuid.jl index f653ba27b4bcd..0370bd33b83e5 100644 --- a/base/cpuid.jl +++ b/base/cpuid.jl @@ -61,6 +61,9 @@ const ISAs_by_family = Dict( "a64fx" => ISA(Set((JL_AArch64_v8_2a, JL_AArch64_lse, JL_AArch64_crc, JL_AArch64_rdm, JL_AArch64_sha2, JL_AArch64_ccpp, JL_AArch64_complxnum, JL_AArch64_fullfp16, JL_AArch64_sve))), "apple_m1" => ISA(Set((JL_AArch64_v8_5a, JL_AArch64_lse, JL_AArch64_crc, JL_AArch64_rdm, JL_AArch64_aes, JL_AArch64_sha2, JL_AArch64_sha3, JL_AArch64_ccpp, JL_AArch64_complxnum, JL_AArch64_fp16fml, JL_AArch64_fullfp16, JL_AArch64_dotprod, JL_AArch64_rcpc, JL_AArch64_altnzcv))), ], + "riscv64" => [ + "riscv64" => ISA(Set{UInt32}()), + ], "powerpc64le" => [ # We have no way to test powerpc64le features yet, so we're only going to declare the lowest ISA: "power8" => ISA(Set{UInt32}()), diff --git a/cli/trampolines/trampolines_riscv64.S b/cli/trampolines/trampolines_riscv64.S new file mode 100644 index 0000000000000..26307b7c2bb36 --- /dev/null +++ b/cli/trampolines/trampolines_riscv64.S @@ -0,0 +1,20 @@ +// This file is a part of Julia. License is MIT: https://julialang.org/license + +#include "common.h" +#include "../../src/jl_exported_funcs.inc" + +#define SEP ; + +#define XX(name) \ +.global CNAME(name) SEP \ +.cfi_startproc SEP \ +.p2align 2 SEP \ + CNAME(name)##: SEP \ + auipc t3, %pcrel_hi(CNAMEADDR(name)) SEP \ + ld t3, %pcrel_lo(CNAME(name))(t3) SEP \ + jr t3 SEP \ +.cfi_endproc SEP \ + +JL_RUNTIME_EXPORTED_FUNCS(XX) +JL_CODEGEN_EXPORTED_FUNCS(XX) +#undef XX diff --git a/contrib/generate_precompile.jl b/contrib/generate_precompile.jl index 60f7290c7a0ac..04d13011d6223 100644 --- a/contrib/generate_precompile.jl +++ b/contrib/generate_precompile.jl @@ -202,12 +202,15 @@ if Artifacts !== nothing using Artifacts, Base.BinaryPlatforms, Libdl artifacts_toml = abspath(joinpath(Sys.STDLIB, "Artifacts", "test", "Artifacts.toml")) artifact_hash("HelloWorldC", artifacts_toml) - oldpwd = pwd(); cd(dirname(artifacts_toml)) - macroexpand(Main, :(@artifact_str("HelloWorldC"))) - cd(oldpwd) artifacts = Artifacts.load_artifacts_toml(artifacts_toml) platforms = [Artifacts.unpack_platform(e, "HelloWorldC", artifacts_toml) for e in artifacts["HelloWorldC"]] best_platform = select_platform(Dict(p => triplet(p) for p in platforms)) + if best_platform !== nothing + # @artifact errors for unsupported platforms + oldpwd = pwd(); cd(dirname(artifacts_toml)) + macroexpand(Main, :(@artifact_str("HelloWorldC"))) + cd(oldpwd) + end dlopen("libjulia$(Base.isdebugbuild() ? "-debug" : "")", RTLD_LAZY | RTLD_DEEPBIND) """ end diff --git a/contrib/normalize_triplet.py b/contrib/normalize_triplet.py index b1bab29487b8f..833b725480996 100755 --- a/contrib/normalize_triplet.py +++ b/contrib/normalize_triplet.py @@ -14,6 +14,7 @@ 'i686': "i\\d86", 'aarch64': "(arm|aarch)64", 'armv7l': "arm(v7l)?", + 'riscv64': "(rv64|riscv64)", 'powerpc64le': "p(ower)?pc64le", } platform_mapping = { diff --git a/doc/src/devdocs/build/build.md b/doc/src/devdocs/build/build.md index 0ef9ce4e4f071..553f7c2e815cf 100644 --- a/doc/src/devdocs/build/build.md +++ b/doc/src/devdocs/build/build.md @@ -148,6 +148,7 @@ Notes for various operating systems: Notes for various architectures: * [ARM](https://github.com/JuliaLang/julia/blob/master/doc/src/devdocs/build/arm.md) +* [RISC-V](https://github.com/JuliaLang/julia/blob/master/doc/src/devdocs/build/riscv.md) ## Required Build Tools and External Libraries diff --git a/doc/src/devdocs/build/riscv.md b/doc/src/devdocs/build/riscv.md new file mode 100644 index 0000000000000..7c0e7ab29d9f8 --- /dev/null +++ b/doc/src/devdocs/build/riscv.md @@ -0,0 +1,103 @@ +# RISC-V (Linux) + +Julia has experimental support for 64-bit RISC-V (RV64) processors running +Linux. This file provides general guidelines for compilation, in addition to +instructions for specific devices. + +A list of [known issues](https://github.com/JuliaLang/julia/labels/system:riscv) +for RISC-V is available. If you encounter difficulties, please create an issue +including the output from `cat /proc/cpuinfo`. + + +## Compiling Julia + +For now, Julia will need to be compiled entirely from source, i.e., including +all of its dependencies. This can be accomplished with the following +`Make.user`: + +```make +USE_BINARYBUILDER := 0 +``` + +Additionally, it is required to indicate what architecture, and optionally which +CPU to build for. This can be done by setting the `MARCH` and `MCPU` variables +in `Make.user` + +The `MARCH` variable needs to be set to a RISC-V ISA string, which can be found by +looking at the documentation of your device, or by inspecting `/proc/cpuinfo`. Only +use flags that your compiler supports, e.g., run `gcc -march=help` to see a list of +supported flags. A common value is `rv64gc`, which is a good starting point. + +The `MCPU` variable is optional, and can be used to further optimize the +generated code for a specific CPU. If you are unsure, it is recommended to leave +it unset. You can find a list of supported values by running `gcc --target-help`. + +For example, if you are using a StarFive VisionFive2, which contains a JH7110 +processor based on the SiFive U74, you can set these flags as follows: + +```make +MARCH := rv64gc_zba_zbb +MCPU := sifive-u74 +``` + +If you prefer a portable build, you could use: + +```make +MARCH := rv64gc + +# also set JULIA_CPU_TARGET to the expanded form of rv64gc +# (it normally copies the value of MCPU, which we don't set) +JULIA_CPU_TARGET := generic-rv64,i,m,a,f,d,zicsr,zifencei,c +``` + +### Cross-compilation + +A native build on a RISC-V device may take a very long time, so it's also +possible to cross-compile Julia on a faster machine. + +First, get a hold of a RISC-V cross-compilation toolchain that provides +support for C, C++ and Fortran. This can be done by checking-out the +[riscv-gnu-toolchain](https://github.com/riscv-collab/riscv-gnu-toolchain) +repository and building it as follows: + +```sh +sudo mkdir /opt/riscv && sudo chown $USER /opt/riscv +./configure --prefix=/opt/riscv --with-languages=c,c++,fortran +make linux -j$(nproc) +``` + +Then, install the QEMU user-mode emulator for RISC-V, along with `binfmt` +support to enable execution of RISC-V binaries on the host machine. The +exact steps depend on your distribution, e.g., on Arch Linux it involves +installing the `qemu-user-static` and `qemu-user-static-binfmt` packages. +Note that to actually execute RISC-V binaries, QEMU will need to be able to +find the RISC-V system root, which can be achieved by setting the +`QEMU_LD_PREFIX` environment variable to the path of the root filesystem. + +Finally, compile Julia with the following `Make.user` variables (in addition to +the ones from the previous section): + +```make +XC_HOST=riscv64-unknown-linux-gnu +OS=Linux +export QEMU_LD_PREFIX=/opt/riscv/sysroot +``` + +Note that you will have to execute `make` with `PATH` set to include the +cross-compilation toolchain, e.g., by running: + +```sh +PATH=/opt/riscv/bin:$PATH make -j$(nproc) +``` + +Because of the RISC-V sysroot we use being very barren, you may need to +add additional libraries that the Julia build system currently expects +to be available system-wide. For example, the build currently relies on +a system-provided `libz`, so you may need to copy this library from the +Julia build into the system root: + +```sh +make -C deps install-zlib +cp -v usr/lib/libz.* /opt/riscv/sysroot/usr/lib +cp -v usr/include/z*.h /opt/riscv/sysroot/usr/include +``` diff --git a/src/abi_riscv.cpp b/src/abi_riscv.cpp new file mode 100644 index 0000000000000..cbd85892801c8 --- /dev/null +++ b/src/abi_riscv.cpp @@ -0,0 +1,315 @@ +// This file is a part of Julia. License is MIT: https://julialang.org/license + +//===----------------------------------------------------------------------===// +// +// The ABI implementation used for RISC-V targets. +// +//===----------------------------------------------------------------------===// +// +// The Procedure Call Standard can be found here: +// https://github.com/riscv-non-isa/riscv-elf-psabi-doc/blob/master/riscv-cc.adoc +// +// This code is based on: +// - The Rust implementation: +// https://github.com/rust-lang/rust/blob/master/compiler/rustc_target/src/abi/call/riscv.rs +// - The LLVM RISC-V backend: +// https://github.com/llvm/llvm-project/blob/78533528cf5ed04ac78722afff7c9f2f91aa8359/llvm/lib/Target/RISCV/RISCVISelLowering.cpp#L10865 +// +//===----------------------------------------------------------------------===// + + +struct ABI_RiscvLayout : AbiLayout { + +static const size_t XLen = 8; +static const size_t FLen = 8; +static const int NumArgGPRs = 8; +static const int NumArgFPRs = 8; + +// available register num is needed to determine if fp pair or int-fp pair in a struct should be unpacked +// WARN: with this, use_sret must only be called once before the next +// needPassByRef call, otherwise avail_gprs is wrong +int avail_gprs, avail_fprs; + +// preferred type is determined in the same time of use_sret & needPassByRef +// cache it here to avoid computing it again in preferred_llvm_type +Type *cached_llvmtype = NULL; + +ABI_RiscvLayout() : avail_gprs(NumArgGPRs), avail_fprs(NumArgFPRs) {} + +enum RegPassKind { UNKNOWN = 0, INTEGER = 1, FLOAT = 2 }; + +struct ElementType { + RegPassKind type; + jl_datatype_t *dt; + ElementType() : type(RegPassKind::UNKNOWN), dt(NULL) {}; +}; + +bool is_floattype(jl_datatype_t *dt) const +{ + return dt == jl_float16_type || dt == jl_float32_type || dt == jl_float64_type; +} + +Type *get_llvm_fptype(jl_datatype_t *dt, LLVMContext &ctx) const +{ + assert(is_floattype(dt)); + switch (jl_datatype_size(dt)) { + case 2: return Type::getHalfTy(ctx); + case 4: return Type::getFloatTy(ctx); + case 8: return Type::getDoubleTy(ctx); + case 16: return Type::getFP128Ty(ctx); + default: assert(0 && "abi_riscv: unsupported floating point type"); return NULL; + } +} + +// for primitive types that can be passed as integer +// includes integer, bittypes, pointer +Type *get_llvm_inttype(jl_datatype_t *dt, LLVMContext &ctx) const +{ + assert(jl_is_primitivetype(dt)); + // XXX: without Zfh, Float16 is passed in integer registers + if (dt == jl_float16_type) + return Type::getInt32Ty(ctx); + assert(!is_floattype(dt)); + if (dt == jl_bool_type) + return getInt8Ty(ctx); + if (dt == jl_int32_type) + return getInt32Ty(ctx); + if (dt == jl_int64_type) + return getInt64Ty(ctx); + int nb = jl_datatype_size(dt); + return Type::getIntNTy(ctx, nb * 8); +} + +bool should_use_fp_conv(jl_datatype_t *dt, ElementType &ele1, ElementType &ele2) const +{ + if (jl_is_primitivetype(dt)) { + size_t dsz = jl_datatype_size(dt); + if (dsz > FLen) { + return false; + } + if (is_floattype(dt)) { + if (ele1.type == RegPassKind::UNKNOWN) { + ele1.type = RegPassKind::FLOAT; + ele1.dt = dt; + } + else if (ele2.type == RegPassKind::UNKNOWN) { + ele2.type = RegPassKind::FLOAT; + ele2.dt = dt; + } + else { + // 3 elements not eligible, must be a pair + return false; + } + } + // integer or pointer type or bitstypes + else { + if (ele1.type == RegPassKind::UNKNOWN) { + ele1.type = RegPassKind::INTEGER; + ele1.dt = dt; + } + else if (ele1.type == RegPassKind::INTEGER) { + // two integers not eligible + return false; + } + // ele1.type == RegPassKind::FLOAT + else { + if (ele2.type == RegPassKind::UNKNOWN) { + ele2.type = RegPassKind::INTEGER; + ele2.dt = dt; + } + else { + // 3 elements not eligible, must be a pair + return false; + } + } + } + } + else { // aggregates + while (size_t nfields = jl_datatype_nfields(dt)) { + size_t i; + size_t fieldsz; + for (i = 0; i < nfields; i++) { + if ((fieldsz = jl_field_size(dt, i))) { + break; + } + } + assert(i < nfields); + // If there's only one non zero sized member, try again on this member + if (fieldsz == jl_datatype_size(dt)) { + dt = (jl_datatype_t *)jl_field_type(dt, i); + if (!jl_is_datatype(dt)) // could be inline union #46787 + return false; + continue; + } + for (; i < nfields; i++) { + size_t fieldsz = jl_field_size(dt, i); + if (fieldsz == 0) + continue; + jl_datatype_t *fieldtype = (jl_datatype_t *)jl_field_type(dt, i); + if (!jl_is_datatype(dt)) // could be inline union + return false; + // This needs to be done after the zero size member check + if (ele2.type != RegPassKind::UNKNOWN) { + // we already have a pair and can't accept more elements + return false; + } + if (!should_use_fp_conv(fieldtype, ele1, ele2)) { + return false; + } + } + break; + } + } + // Tuple{Int,} can reach here as well, but doesn't really hurt + return true; +} + +Type *get_llvm_inttype_byxlen(size_t xlen, LLVMContext &ctx) const +{ + if (xlen == 8) { + return getInt64Ty(ctx); + } + else if (xlen == 4) { + return getInt32Ty(ctx); + } + else { + assert(0 && "abi_riscv: unsupported xlen"); + return NULL; + } +} + +Type *classify_arg(jl_datatype_t *ty, int &avail_gprs, int &avail_fprs, bool &onstack, + LLVMContext &ctx) const +{ + onstack = false; + if (ty == jl_nothing_type) { + return NULL; + } + ElementType ele1, ele2; + if (should_use_fp_conv(ty, ele1, ele2)) { + if (ele1.type == RegPassKind::FLOAT) { + if (ele2.type == RegPassKind::FLOAT) { + if (avail_fprs >= 2) { + avail_fprs -= 2; + SmallVector eles; + eles.push_back(get_llvm_fptype(ele1.dt, ctx)); + eles.push_back(get_llvm_fptype(ele2.dt, ctx)); + return StructType::get(ctx, eles); + } + } + else if (ele2.type == RegPassKind::INTEGER) { + if (avail_fprs >= 1 && avail_gprs >= 1) { + avail_fprs -= 1; + avail_gprs -= 1; + SmallVector eles; + eles.push_back(get_llvm_fptype(ele1.dt, ctx)); + eles.push_back(get_llvm_inttype(ele2.dt, ctx)); + return StructType::get(ctx, eles); + } + } + else { + // A struct containing just one floating-point real is passed + // as though it were a standalone floating-point real. + if (avail_fprs >= 1) { + avail_fprs -= 1; + return get_llvm_fptype(ele1.dt, ctx); + } + } + } + else if (ele1.type == RegPassKind::INTEGER) { + if (ele2.type == RegPassKind::FLOAT) { + if (avail_fprs >= 1 && avail_gprs >= 1) { + avail_fprs -= 1; + avail_gprs -= 1; + return StructType::get(get_llvm_inttype(ele1.dt, ctx), + get_llvm_fptype(ele2.dt, ctx)); + } + } + } + } + size_t dsz = jl_datatype_size(ty); + if (dsz > 2 * XLen) { + if (!jl_is_primitivetype(ty)) { + onstack = true; + } + // else let llvm backend handle scalars + if (avail_gprs >= 1) { + avail_gprs -= 1; + } + return NULL; + } + + if (dsz > XLen) { + size_t alignment = jl_datatype_align(ty); + bool align_regs = alignment > XLen; + if (avail_gprs >= 2) { + avail_gprs -= 2; + } + // should we handle variadic as well? + // Variadic arguments with 2×XLEN-bit alignment and size at most 2×XLEN + // bits are passed in an aligned register pair + else { + avail_gprs = 0; + } + + if (!jl_is_primitivetype(ty)) { + // Aggregates or scalars passed on the stack are aligned to the + // greater of the type alignment and XLen bits, but never more than + // the stack alignment. + if (align_regs) { + if (alignment == 16) { + return Type::getInt128Ty(ctx); + } + else { + return Type::getInt64Ty(ctx); + } + } + else { + return ArrayType::get(get_llvm_inttype_byxlen(XLen, ctx), 2); + } + } + // let llvm backend handle scalars + return NULL; + } + + //else dsz <= XLen + if (avail_gprs >= 1) { + avail_gprs -= 1; + } + if (!jl_is_primitivetype(ty)) { + return get_llvm_inttype_byxlen(XLen, ctx); + } + return get_llvm_inttype(ty, ctx); +} + +bool use_sret(jl_datatype_t *ty, LLVMContext &ctx) override +{ + bool onstack = false; + int gprs = 2; + int fprs = FLen ? 2 : 0; + this->cached_llvmtype = classify_arg(ty, gprs, fprs, onstack, ctx); + if (onstack) { + this->avail_gprs -= 1; + return true; + } + else { + return false; + } +} + +bool needPassByRef(jl_datatype_t *ty, AttrBuilder &ab, LLVMContext &ctx, + Type *Ty) override +{ + bool onstack = false; + this->cached_llvmtype = + classify_arg(ty, this->avail_gprs, this->avail_fprs, onstack, ctx); + return onstack; +} + +Type *preferred_llvm_type(jl_datatype_t *ty, bool isret, + LLVMContext &ctx) const override +{ + return this->cached_llvmtype; +} + +}; diff --git a/src/aotcompile.cpp b/src/aotcompile.cpp index c2f112f9c9d5c..279686c387e1b 100644 --- a/src/aotcompile.cpp +++ b/src/aotcompile.cpp @@ -1664,7 +1664,8 @@ void jl_dump_native_impl(void *native_code, } CodeModel::Model CMModel = CodeModel::Small; - if (TheTriple.isPPC() || (TheTriple.isX86() && TheTriple.isArch64Bit() && TheTriple.isOSLinux())) { + if (TheTriple.isPPC() || TheTriple.isRISCV() || + (TheTriple.isX86() && TheTriple.isArch64Bit() && TheTriple.isOSLinux())) { // On PPC the small model is limited to 16bit offsets. For very large images the small code model CMModel = CodeModel::Medium; // isn't good enough on x86 so use Medium, it has no cost because only the image goes in .ldata } diff --git a/src/ccall.cpp b/src/ccall.cpp index 2de5be6906e7c..f559ddbe93a43 100644 --- a/src/ccall.cpp +++ b/src/ccall.cpp @@ -367,6 +367,7 @@ static bool is_native_simd_type(jl_datatype_t *dt) { #include "abi_arm.cpp" #include "abi_aarch64.cpp" +#include "abi_riscv.cpp" #include "abi_ppc64le.cpp" #include "abi_win32.cpp" #include "abi_win64.cpp" @@ -391,6 +392,8 @@ static bool is_native_simd_type(jl_datatype_t *dt) { typedef ABI_ARMLayout DefaultAbiState; #elif defined _CPU_AARCH64_ typedef ABI_AArch64Layout DefaultAbiState; +#elif defined _CPU_RISCV64_ + typedef ABI_RiscvLayout DefaultAbiState; #elif defined _CPU_PPC64_ typedef ABI_PPC64leLayout DefaultAbiState; #else diff --git a/src/codegen.cpp b/src/codegen.cpp index bcda527416676..3f69f4789493a 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -5368,7 +5368,7 @@ static jl_cgval_t emit_call_specfun_other(jl_codectx_t &ctx, bool is_opaque_clos } CallInst *call = ctx.builder.CreateCall(cft, TheCallee, argvals); call->setAttributes(returninfo.attrs); - if (gcstack_arg) + if (gcstack_arg && ctx.emission_context.use_swiftcc) call->setCallingConv(CallingConv::Swift); jl_cgval_t retval; @@ -8186,7 +8186,8 @@ static jl_returninfo_t get_specsig_function(jl_codectx_t &ctx, Module *M, Value if (gcstack_arg){ AttrBuilder param(ctx.builder.getContext()); - param.addAttribute(Attribute::SwiftSelf); + if (ctx.emission_context.use_swiftcc) + param.addAttribute(Attribute::SwiftSelf); param.addAttribute(Attribute::NonNull); attrs.push_back(AttributeSet::get(ctx.builder.getContext(), param)); fsig.push_back(PointerType::get(JuliaType::get_ppjlvalue_ty(ctx.builder.getContext()), 0)); @@ -8278,7 +8279,7 @@ static jl_returninfo_t get_specsig_function(jl_codectx_t &ctx, Module *M, Value fval = emit_inttoptr(ctx, fval, ftype->getPointerTo()); } if (auto F = dyn_cast(fval)) { - if (gcstack_arg) + if (gcstack_arg && ctx.emission_context.use_swiftcc) F->setCallingConv(CallingConv::Swift); assert(F->arg_size() >= argnames.size()); for (size_t i = 0; i < argnames.size(); i++) { diff --git a/src/disasm.cpp b/src/disasm.cpp index ebe8f2ac397c0..b944e48430c29 100644 --- a/src/disasm.cpp +++ b/src/disasm.cpp @@ -1058,6 +1058,8 @@ static void jl_dump_asm_internal( if (insSize == 0) // skip illegible bytes #if defined(_CPU_PPC_) || defined(_CPU_PPC64_) || defined(_CPU_ARM_) || defined(_CPU_AARCH64_) insSize = 4; // instructions are always 4 bytes +#elif defined(_CPU_RISCV64_) + insSize = 2; // instructions can be 2 bytes when compressed #else insSize = 1; // attempt to slide 1 byte forward #endif diff --git a/src/jitlayers.cpp b/src/jitlayers.cpp index 4ff7400df13dd..313449dda5557 100644 --- a/src/jitlayers.cpp +++ b/src/jitlayers.cpp @@ -998,6 +998,16 @@ namespace { #if defined(MSAN_EMUTLS_WORKAROUND) options.EmulatedTLS = true; options.ExplicitEmulatedTLS = true; +#endif +#if defined(_CPU_RISCV64_) + // we set these manually to avoid LLVM defaulting to soft-float +#if defined(__riscv_float_abi_double) + options.MCOptions.ABIName = "lp64d"; +#elif defined(__riscv_float_abi_single) + options.MCOptions.ABIName = "lp64f"; +#else + options.MCOptions.ABIName = "lp64"; +#endif #endif uint32_t target_flags = 0; auto target = jl_get_llvm_target(imaging_default(), target_flags); @@ -1042,11 +1052,23 @@ namespace { #endif if (TheTriple.isAArch64()) codemodel = CodeModel::Small; + else if (TheTriple.isRISCV()) { + // RISC-V will support large code model in LLVM 21 + // https://github.com/llvm/llvm-project/pull/70308 + codemodel = CodeModel::Medium; + } + // Generate simpler code for JIT + Reloc::Model relocmodel = Reloc::Static; + if (TheTriple.isRISCV()) { + // until large code model is supported, use PIC for RISC-V + // https://github.com/llvm/llvm-project/issues/106203 + relocmodel = Reloc::PIC_; + } auto optlevel = CodeGenOptLevelFor(jl_options.opt_level); auto TM = TheTarget->createTargetMachine( TheTriple.getTriple(), TheCPU, FeaturesStr, options, - Reloc::Static, // Generate simpler code for JIT + relocmodel, codemodel, optlevel, true // JIT @@ -1067,7 +1089,7 @@ namespace { .setCPU(TM.getTargetCPU().str()) .setFeatures(TM.getTargetFeatureString()) .setOptions(TM.Options) - .setRelocationModel(Reloc::Static) + .setRelocationModel(TM.getRelocationModel()) .setCodeModel(TM.getCodeModel()) .setCodeGenOptLevel(CodeGenOptLevelFor(optlevel)); } diff --git a/src/jitlayers.h b/src/jitlayers.h index 3353a4093bd27..f4b9a6ea5395a 100644 --- a/src/jitlayers.h +++ b/src/jitlayers.h @@ -58,6 +58,10 @@ # define JL_USE_JITLINK #endif +#if defined(_CPU_RISCV64_) +# define JL_USE_JITLINK +#endif + # include # include # include @@ -257,9 +261,18 @@ struct jl_codegen_params_t { bool external_linkage = false; bool imaging_mode; int debug_level; + bool use_swiftcc = true; jl_codegen_params_t(orc::ThreadSafeContext ctx, DataLayout DL, Triple triple) - : tsctx(std::move(ctx)), tsctx_lock(tsctx.getLock()), - DL(std::move(DL)), TargetTriple(std::move(triple)), imaging_mode(imaging_default()) {} + : tsctx(std::move(ctx)), + tsctx_lock(tsctx.getLock()), + DL(std::move(DL)), + TargetTriple(std::move(triple)), + imaging_mode(imaging_default()) + { + // LLVM's RISC-V back-end currently does not support the Swift calling convention + if (TargetTriple.isRISCV()) + use_swiftcc = false; + } }; jl_llvm_functions_t jl_emit_code( diff --git a/src/julia_internal.h b/src/julia_internal.h index 20d90fede3d5e..c09bfc5c3eb42 100644 --- a/src/julia_internal.h +++ b/src/julia_internal.h @@ -105,8 +105,8 @@ JL_DLLIMPORT void __tsan_switch_to_fiber(void *fiber, unsigned flags); #ifndef _OS_WINDOWS_ #if defined(_CPU_ARM_) || defined(_CPU_PPC_) || defined(_CPU_WASM_) #define MAX_ALIGN 8 - #elif defined(_CPU_AARCH64_) || (JL_LLVM_VERSION >= 180000 && (defined(_CPU_X86_64_) || defined(_CPU_X86_))) - // int128 is 16 bytes aligned on aarch64 and on x86 with LLVM >= 18 + #elif defined(_CPU_AARCH64_) || defined(_CPU_RISCV64_) || (JL_LLVM_VERSION >= 180000 && (defined(_CPU_X86_64_) || defined(_CPU_X86_))) + // int128 is 16 bytes aligned on aarch64 and riscv, and on x86 with LLVM >= 18 #define MAX_ALIGN 16 #elif defined(_P64) // Generically we assume MAX_ALIGN is sizeof(void*) @@ -259,6 +259,11 @@ static inline uint64_t cycleclock(void) JL_NOTSAFEPOINT struct timeval tv; gettimeofday(&tv, NULL); return (int64_t)(tv.tv_sec) * 1000000 + tv.tv_usec; +#elif defined(_CPU_RISCV64_) + // taken from https://github.com/google/benchmark/blob/3b3de69400164013199ea448f051d94d7fc7d81f/src/cycleclock.h#L190 + uint64_t ret; + __asm__ volatile("rdcycle %0" : "=r"(ret)); + return ret; #elif defined(_CPU_PPC64_) // This returns a time-base, which is not always precisely a cycle-count. // https://reviews.llvm.org/D78084 diff --git a/src/julia_threads.h b/src/julia_threads.h index b697a0bf030ed..17e8d7d466044 100644 --- a/src/julia_threads.h +++ b/src/julia_threads.h @@ -56,7 +56,7 @@ typedef struct { !defined(JL_HAVE_ASM) && \ !defined(JL_HAVE_UNW_CONTEXT) #if (defined(_CPU_X86_64_) || defined(_CPU_X86_) || defined(_CPU_AARCH64_) || \ - defined(_CPU_ARM_) || defined(_CPU_PPC64_)) + defined(_CPU_ARM_) || defined(_CPU_PPC64_) || defined(_CPU_RISCV64_)) #define JL_HAVE_ASM #endif #if 0 diff --git a/src/llvm-ptls.cpp b/src/llvm-ptls.cpp index 488dd46cade21..614ed15f840e6 100644 --- a/src/llvm-ptls.cpp +++ b/src/llvm-ptls.cpp @@ -117,6 +117,8 @@ Instruction *LowerPTLS::emit_pgcstack_tp(Value *offset, Instruction *insertBefor asm_str = "mrs $0, tpidr_el0"; } else if (TargetTriple.isARM()) { asm_str = "mrc p15, 0, $0, c13, c0, 3"; + } else if (TargetTriple.isRISCV()) { + asm_str = "mv $0, tp"; } else if (TargetTriple.getArch() == Triple::x86_64) { asm_str = "movq %fs:0, $0"; } else if (TargetTriple.getArch() == Triple::x86) { diff --git a/src/llvm-version.h b/src/llvm-version.h index 2a38bb7c488b8..984e918d480cc 100644 --- a/src/llvm-version.h +++ b/src/llvm-version.h @@ -18,6 +18,10 @@ #define JL_LLVM_OPAQUE_POINTERS 1 #endif +#if JL_LLVM_VERSION < 19000 && defined(_CPU_RISCV64_) + #error Only LLVM versions >= 19.0.0 are supported by Julia on RISC-V +#endif + #ifdef __cplusplus #if defined(__GNUC__) && (__GNUC__ >= 9) // Added in GCC 9, this warning is annoying diff --git a/src/runtime_intrinsics.c b/src/runtime_intrinsics.c index db4007d32035e..450096eef5b01 100644 --- a/src/runtime_intrinsics.c +++ b/src/runtime_intrinsics.c @@ -256,7 +256,7 @@ JL_DLLEXPORT float julia_half_to_float(uint16_t param) { #if ((defined(__GNUC__) && __GNUC__ > 11) || \ (defined(__clang__) && __clang_major__ > 14)) && \ !defined(_CPU_PPC64_) && !defined(_CPU_PPC_) && \ - !defined(_OS_WINDOWS_) + !defined(_OS_WINDOWS_) && !defined(_CPU_RISCV64_) #define FLOAT16_TYPE _Float16 #define FLOAT16_TO_UINT16(x) (*(uint16_t*)&(x)) #define FLOAT16_FROM_UINT16(x) (*(_Float16*)&(x)) @@ -355,7 +355,7 @@ float julia_bfloat_to_float(uint16_t param) { #if ((defined(__GNUC__) && __GNUC__ > 12) || \ (defined(__clang__) && __clang_major__ > 16)) && \ !defined(_CPU_PPC64_) && !defined(_CPU_PPC_) && \ - !defined(_OS_WINDOWS_) + !defined(_OS_WINDOWS_) && !defined(_CPU_RISCV64_) #define BFLOAT16_TYPE __bf16 #define BFLOAT16_TO_UINT16(x) (*(uint16_t*)&(x)) #define BFLOAT16_FROM_UINT16(x) (*(__bf16*)&(x)) diff --git a/src/signal-handling.c b/src/signal-handling.c index d7f4697a3c4f0..ce7e8ba57af19 100644 --- a/src/signal-handling.c +++ b/src/signal-handling.c @@ -256,7 +256,8 @@ static uintptr_t jl_get_pc_from_ctx(const void *_ctx); void jl_show_sigill(void *_ctx); #if defined(_CPU_X86_64_) || defined(_CPU_X86_) \ || (defined(_OS_LINUX_) && defined(_CPU_AARCH64_)) \ - || (defined(_OS_LINUX_) && defined(_CPU_ARM_)) + || (defined(_OS_LINUX_) && defined(_CPU_ARM_)) \ + || (defined(_OS_LINUX_) && defined(_CPU_RISCV64_)) static size_t jl_safe_read_mem(const volatile char *ptr, char *out, size_t len) { jl_jmp_buf *old_buf = jl_get_safe_restore(); @@ -344,6 +345,8 @@ static uintptr_t jl_get_pc_from_ctx(const void *_ctx) return ((ucontext_t*)_ctx)->uc_mcontext.mc_gpregs.gp_elr; #elif defined(_OS_LINUX_) && defined(_CPU_ARM_) return ((ucontext_t*)_ctx)->uc_mcontext.arm_pc; +#elif defined(_OS_LINUX_) && defined(_CPU_RISCV64_) + return ((ucontext_t*)_ctx)->uc_mcontext.__gregs[REG_PC]; #else // TODO for PPC return 0; @@ -421,6 +424,20 @@ void jl_show_sigill(void *_ctx) jl_safe_printf("Invalid ARM instruction at %p: 0x%08" PRIx32 "\n", (void*)pc, inst); } } +#elif defined(_OS_LINUX_) && defined(_CPU_RISCV64_) + uint32_t inst = 0; + size_t len = jl_safe_read_mem(pc, (char*)&inst, 4); + if (len < 2) + jl_safe_printf("Fault when reading instruction: %d bytes read\n", (int)len); + if (inst == 0x00100073 || // ebreak + inst == 0xc0001073 || // unimp (pseudo-instruction for illegal `csrrw x0, cycle, x0`) + (inst & ((1 << 16) - 1)) == 0x0000) { // c.unimp (compressed form) + // The signal might actually be SIGTRAP instead, doesn't hurt to handle it here though. + jl_safe_printf("Unreachable reached at %p\n", pc); + } + else { + jl_safe_printf("Invalid instruction at %p: 0x%08" PRIx32 "\n", pc, inst); + } #else // TODO for PPC (void)_ctx; diff --git a/src/signals-unix.c b/src/signals-unix.c index f99eca31730b6..caf0e977929c5 100644 --- a/src/signals-unix.c +++ b/src/signals-unix.c @@ -80,6 +80,9 @@ static inline uintptr_t jl_get_rsp_from_ctx(const void *_ctx) #elif defined(_OS_LINUX_) && defined(_CPU_ARM_) const ucontext_t *ctx = (const ucontext_t*)_ctx; return ctx->uc_mcontext.arm_sp; +#elif defined(_OS_LINUX_) && (defined(_CPU_RISCV64_)) + const ucontext_t *ctx = (const ucontext_t*)_ctx; + return ctx->uc_mcontext.__gregs[REG_SP]; #elif defined(_OS_FREEBSD_) && defined(_CPU_X86_64_) const ucontext_t *ctx = (const ucontext_t*)_ctx; return ctx->uc_mcontext.mc_rsp; @@ -175,6 +178,11 @@ JL_NO_ASAN static void jl_call_in_ctx(jl_ptls_t ptls, void (*fptr)(void), int si ctx->uc_mcontext.arm_sp = rsp; ctx->uc_mcontext.arm_lr = 0; // Clear link register ctx->uc_mcontext.arm_pc = target; +#elif defined(_OS_LINUX_) && (defined(_CPU_RISCV64_)) + ucontext_t *ctx = (ucontext_t*)_ctx; + ctx->uc_mcontext.__gregs[REG_SP] = rsp; + ctx->uc_mcontext.__gregs[REG_RA] = 0; // Clear return address address (ra) + ctx->uc_mcontext.__gregs[REG_PC] = (uintptr_t)fptr; #else #pragma message("julia: throw-in-context not supported on this platform") // TODO Add support for PowerPC(64)? diff --git a/src/stackwalk.c b/src/stackwalk.c index 5377d091cb780..6784e601bcfba 100644 --- a/src/stackwalk.c +++ b/src/stackwalk.c @@ -1066,6 +1066,44 @@ int jl_simulate_longjmp(jl_jmp_buf mctx, bt_context_t *c) JL_NOTSAFEPOINT mc->regs[0] = 1; assert(mc->sp % 16 == 0); return 1; + #elif defined(_CPU_RISCV64_) + // https://github.com/bminor/glibc/blob/master/sysdeps/riscv/bits/setjmp.h + // https://github.com/llvm/llvm-project/blob/7714e0317520207572168388f22012dd9e152e9e/libunwind/src/Registers.hpp -> Registers_riscv + mc->__gregs[1] = (*_ctx)->__pc; // ra + mc->__gregs[8] = (*_ctx)->__regs[0]; // s0 + mc->__gregs[9] = (*_ctx)->__regs[1]; // s1 + mc->__gregs[18] = (*_ctx)->__regs[2]; // s2 + mc->__gregs[19] = (*_ctx)->__regs[3]; // s3 + mc->__gregs[20] = (*_ctx)->__regs[4]; // s4 + mc->__gregs[21] = (*_ctx)->__regs[5]; // s5 + mc->__gregs[22] = (*_ctx)->__regs[6]; // s6 + mc->__gregs[23] = (*_ctx)->__regs[7]; // s7 + mc->__gregs[24] = (*_ctx)->__regs[8]; // s8 + mc->__gregs[25] = (*_ctx)->__regs[9]; // s9 + mc->__gregs[26] = (*_ctx)->__regs[10]; // s10 + mc->__gregs[27] = (*_ctx)->__regs[11]; // s11 + mc->__gregs[2] = (*_ctx)->__sp; // sp + #ifndef __riscv_float_abi_soft + mc->__fpregs.__d.__f[8] = (unsigned long long) (*_ctx)->__fpregs[0]; // fs0 + mc->__fpregs.__d.__f[9] = (unsigned long long) (*_ctx)->__fpregs[1]; // fs1 + mc->__fpregs.__d.__f[18] = (unsigned long long) (*_ctx)->__fpregs[2]; // fs2 + mc->__fpregs.__d.__f[19] = (unsigned long long) (*_ctx)->__fpregs[3]; // fs3 + mc->__fpregs.__d.__f[20] = (unsigned long long) (*_ctx)->__fpregs[4]; // fs4 + mc->__fpregs.__d.__f[21] = (unsigned long long) (*_ctx)->__fpregs[5]; // fs5 + mc->__fpregs.__d.__f[22] = (unsigned long long) (*_ctx)->__fpregs[6]; // fs6 + mc->__fpregs.__d.__f[23] = (unsigned long long) (*_ctx)->__fpregs[7]; // fs7 + mc->__fpregs.__d.__f[24] = (unsigned long long) (*_ctx)->__fpregs[8]; // fs8 + mc->__fpregs.__d.__f[25] = (unsigned long long) (*_ctx)->__fpregs[9]; // fs9 + mc->__fpregs.__d.__f[26] = (unsigned long long) (*_ctx)->__fpregs[10]; // fs10 + mc->__fpregs.__d.__f[27] = (unsigned long long) (*_ctx)->__fpregs[11]; // fs11 + #endif + // ifdef PTR_DEMANGLE ? + mc->__gregs[REG_SP] = ptr_demangle(mc->__gregs[REG_SP]); + mc->__gregs[REG_RA] = ptr_demangle(mc->__gregs[REG_RA]); + mc->__gregs[REG_PC] = mc->__gregs[REG_RA]; + mc->__gregs[REG_A0] = 1; + assert(mc->__gregs[REG_SP] % 16 == 0); + return 1; #else #pragma message("jl_record_backtrace not defined for ASM/SETJMP on unknown linux") (void)mc; diff --git a/src/support/platform.h b/src/support/platform.h index a0dd84c9c20b6..816e2090b5a08 100644 --- a/src/support/platform.h +++ b/src/support/platform.h @@ -27,6 +27,7 @@ * _CPU_X86_64_ * _CPU_AARCH64_ * _CPU_ARM_ + * _CPU_RISCV64_ * _CPU_WASM_ */ @@ -106,6 +107,8 @@ #define _CPU_AARCH64_ #elif defined(__arm__) || defined(_M_ARM) #define _CPU_ARM_ +#elif defined(__riscv) && __riscv_xlen == 64 +#define _CPU_RISCV64_ #elif defined(__PPC64__) #define _CPU_PPC64_ #elif defined(_ARCH_PPC) diff --git a/src/task.c b/src/task.c index f86e0ab3a880d..be2631347e82e 100644 --- a/src/task.c +++ b/src/task.c @@ -1491,6 +1491,14 @@ CFI_NORETURN // because all our addresses are word-aligned. " udf #0" // abort : : "r" (stk), "r"(fn) : "memory" ); +#elif defined(_CPU_RISCV64_) + asm volatile( + " mv sp, %0;\n" + " mv ra, zero;\n" // Clear return address register + " mv fp, zero;\n" // Clear frame pointer + " jr %1;\n" // call `fn` with fake stack frame + " ebreak" // abort + : : "r"(stk), "r"(fn) : "memory" ); #elif defined(_CPU_PPC64_) // N.B.: There is two iterations of the PPC64 ABI. // v2 is current and used here. Make sure you have the diff --git a/src/threading.c b/src/threading.c index c26028d2f3da2..50944a24eb29b 100644 --- a/src/threading.c +++ b/src/threading.c @@ -18,7 +18,7 @@ // For variant 1 JL_ELF_TLS_INIT_SIZE is the size of the thread control block (TCB) // For variant 2 JL_ELF_TLS_INIT_SIZE is 0 #if defined(_OS_LINUX_) || defined(_OS_FREEBSD_) -# if defined(_CPU_X86_64_) || defined(_CPU_X86_) +# if defined(_CPU_X86_64_) || defined(_CPU_X86_) || defined(_CPU_RISCV64_) # define JL_ELF_TLS_VARIANT 2 # define JL_ELF_TLS_INIT_SIZE 0 # elif defined(_CPU_AARCH64_) @@ -638,6 +638,8 @@ static void jl_check_tls(void) asm("mrs %0, tpidr_el0" : "=r"(tp)); #elif defined(__ARM_ARCH) && __ARM_ARCH >= 7 asm("mrc p15, 0, %0, c13, c0, 3" : "=r"(tp)); +#elif defined(_CPU_RISCV64_) + asm("mv %0, tp" : "=r"(tp)); #else # error "Cannot emit thread pointer for this architecture." #endif From 8a79822cbab84d8fe05e55cfb515b73c18db754c Mon Sep 17 00:00:00 2001 From: Shuhei Kadowaki <40514306+aviatesk@users.noreply.github.com> Date: Wed, 16 Oct 2024 19:28:37 +0900 Subject: [PATCH 218/537] minor tweak on sysimg.md (#56183) --- doc/src/devdocs/sysimg.md | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/doc/src/devdocs/sysimg.md b/doc/src/devdocs/sysimg.md index 64c309e1fb02a..2cbba2744d4a1 100644 --- a/doc/src/devdocs/sysimg.md +++ b/doc/src/devdocs/sysimg.md @@ -166,15 +166,17 @@ debug info, respectively, and so will make debugging more difficult. types are not known. All printing should use a specific IO object with a known type. The easiest substitution is to use `print(Core.stdout, x)` instead of `print(x)` or `print(stdout, x)`. -- Use tools like `JET`, `Cthulhu`, and/or `SnoopCompile` to identify failures of type-inference, and - follow our [Performance Tips](@ref) to fix them. +- Use tools like [JET.jl](https://github.com/aviatesk/JET.jl), + [Cthulhu.jl](https://github.com/JuliaDebug/Cthulhu.jl), and/or + [SnoopCompile](https://github.com/timholy/SnoopCompile.jl) + to identify failures of type-inference, and follow our [Performance Tips](@ref) to fix them. ### Compatibility concerns We have identified many small changes to Base that significantly increase the set of programs that can be reliably trimmed. Unfortunately some of those changes would be considered breaking, and so are only applied when trimming is requested (this is done by an external build script, -currently maintained inside the test suite as `test/trimming/buildscript.jl`). +currently maintained inside the test suite as `contrib/juliac-buildscript.jl`). Therefore in many cases trimming will require you to opt in to new variants of Base and some standard libraries. @@ -187,7 +189,7 @@ with trimming as you develop it. Package authors may wish to test that their package is "trimming safe", however this is impossible in general. Trimming is only expected to work given concrete entry points such as `main()` and library entry points meant to be called from outside Julia. For generic packages, existing tests -for type stability like `@inferred` and `JET` are about as close as you can get to checking +for type stability like `@inferred` and `JET.@report_call` are about as close as you can get to checking trim compatibility. Trimming also introduces new compatibility issues between minor versions of Julia. At this time, From f5937b432c51f6b8a5b28eaa5c1583f1350e12a3 Mon Sep 17 00:00:00 2001 From: Fredrik Ekre Date: Wed, 16 Oct 2024 13:46:22 +0200 Subject: [PATCH 219/537] Remove zero arg methods of `+` and `*` from linalg tests (#56184) There are tests elsewhere that i) make sure there is no zero-arg methods of these functions and ii) tests that e.g. `+()` throws a `MethodError`. Without this patch there are test errors whenever the same test process runs both of these tests. --- stdlib/LinearAlgebra/test/matmul.jl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/stdlib/LinearAlgebra/test/matmul.jl b/stdlib/LinearAlgebra/test/matmul.jl index 0d1e2776d2bb3..1294e97c2a30c 100644 --- a/stdlib/LinearAlgebra/test/matmul.jl +++ b/stdlib/LinearAlgebra/test/matmul.jl @@ -1139,8 +1139,8 @@ end Base.zero(::Thing) = Thing(0.) Base.one(::Type{Thing}) = Thing(1.) Base.one(::Thing) = Thing(1.) - Base.:+(t::Thing...) = +(getfield.(t, :data)...) - Base.:*(t::Thing...) = *(getfield.(t, :data)...) + Base.:+(t1::Thing, t::Thing...) = +(getfield.((t1, t...), :data)...) + Base.:*(t1::Thing, t::Thing...) = *(getfield.((t1, t...), :data)...) M = Float64[1 2; 3 4] A = Thing.(M) From b19a7c1721f623ae085354889b183622537543b0 Mon Sep 17 00:00:00 2001 From: Shuhei Kadowaki <40514306+aviatesk@users.noreply.github.com> Date: Wed, 16 Oct 2024 21:13:08 +0900 Subject: [PATCH 220/537] optimizer: allow EA-powered `finalizer` inlining (#55954) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit E.g. this allows `finalizer` inlining in the following case: ```julia mutable struct ForeignBuffer{T} const ptr::Ptr{T} end const foreign_buffer_finalized = Ref(false) function foreign_alloc(::Type{T}, length) where T ptr = Libc.malloc(sizeof(T) * length) ptr = Base.unsafe_convert(Ptr{T}, ptr) obj = ForeignBuffer{T}(ptr) return finalizer(obj) do obj Base.@assume_effects :notaskstate :nothrow foreign_buffer_finalized[] = true Libc.free(obj.ptr) end end function f_EA_finalizer(N::Int) workspace = foreign_alloc(Float64, N) GC.@preserve workspace begin (;ptr) = workspace Base.@assume_effects :nothrow @noinline println(devnull, "ptr = ", ptr) end end ``` ```julia julia> @code_typed f_EA_finalizer(42) CodeInfo( 1 ── %1 = Base.mul_int(8, N)::Int64 │ %2 = Core.lshr_int(%1, 63)::Int64 │ %3 = Core.trunc_int(Core.UInt8, %2)::UInt8 │ %4 = Core.eq_int(%3, 0x01)::Bool └─── goto #3 if not %4 2 ── invoke Core.throw_inexacterror(:convert::Symbol, UInt64::Type, %1::Int64)::Union{} └─── unreachable 3 ── goto #4 4 ── %9 = Core.bitcast(Core.UInt64, %1)::UInt64 └─── goto #5 5 ── goto #6 6 ── goto #7 7 ── goto #8 8 ── %14 = $(Expr(:foreigncall, :(:malloc), Ptr{Nothing}, svec(UInt64), 0, :(:ccall), :(%9), :(%9)))::Ptr{Nothing} └─── goto #9 9 ── %16 = Base.bitcast(Ptr{Float64}, %14)::Ptr{Float64} │ %17 = %new(ForeignBuffer{Float64}, %16)::ForeignBuffer{Float64} └─── goto #10 10 ─ %19 = $(Expr(:gc_preserve_begin, :(%17))) │ %20 = Base.getfield(%17, :ptr)::Ptr{Float64} │ invoke Main.println(Main.devnull::Base.DevNull, "ptr = "::String, %20::Ptr{Float64})::Nothing │ $(Expr(:gc_preserve_end, :(%19))) │ %23 = Main.foreign_buffer_finalized::Base.RefValue{Bool} │ Base.setfield!(%23, :x, true)::Bool │ %25 = Base.getfield(%17, :ptr)::Ptr{Float64} │ %26 = Base.bitcast(Ptr{Nothing}, %25)::Ptr{Nothing} │ $(Expr(:foreigncall, :(:free), Nothing, svec(Ptr{Nothing}), 0, :(:ccall), :(%26), :(%25)))::Nothing └─── return nothing ) => Nothing ``` However, this is still a WIP. Before merging, I want to improve EA's precision a bit and at least fix the test case that is currently marked as `broken`. I also need to check its impact on compiler performance. Additionally, I believe this feature is not yet practical. In particular, there is still significant room for improvement in the following areas: - EA's interprocedural capabilities: currently EA is performed ad-hoc for limited frames because of latency reasons, which significantly reduces its precision in the presence of interprocedural calls. - Relaxing the `:nothrow` check for finalizer inlining: the current algorithm requires `:nothrow`-ness on all paths from the allocation of the mutable struct to its last use, which is not practical for real-world cases. Even when `:nothrow` cannot be guaranteed, auxiliary optimizations such as inserting a `finalize` call after the last use might still be possible (JuliaLang/julia#55990). --- base/compiler/optimize.jl | 2 +- .../ssair/EscapeAnalysis/EscapeAnalysis.jl | 59 +++++--- base/compiler/ssair/passes.jl | 86 +++++++---- base/compiler/types.jl | 2 + test/compiler/EscapeAnalysis/EAUtils.jl | 142 +++++++++++------- test/compiler/codegen.jl | 2 +- test/compiler/inline.jl | 33 ++++ 7 files changed, 225 insertions(+), 101 deletions(-) diff --git a/base/compiler/optimize.jl b/base/compiler/optimize.jl index 5f0c5077688f8..c5606f80468c0 100644 --- a/base/compiler/optimize.jl +++ b/base/compiler/optimize.jl @@ -665,7 +665,7 @@ function refine_effects!(interp::AbstractInterpreter, opt::OptimizationState, sv if !is_effect_free(sv.result.ipo_effects) && sv.all_effect_free && !isempty(sv.ea_analysis_pending) ir = sv.ir nargs = Int(opt.src.nargs) - estate = EscapeAnalysis.analyze_escapes(ir, nargs, optimizer_lattice(interp), GetNativeEscapeCache(interp)) + estate = EscapeAnalysis.analyze_escapes(ir, nargs, optimizer_lattice(interp), get_escape_cache(interp)) argescapes = EscapeAnalysis.ArgEscapeCache(estate) stack_analysis_result!(sv.result, argescapes) validate_mutable_arg_escapes!(estate, sv) diff --git a/base/compiler/ssair/EscapeAnalysis/EscapeAnalysis.jl b/base/compiler/ssair/EscapeAnalysis/EscapeAnalysis.jl index a0abacb617085..1f98758cd6055 100644 --- a/base/compiler/ssair/EscapeAnalysis/EscapeAnalysis.jl +++ b/base/compiler/ssair/EscapeAnalysis/EscapeAnalysis.jl @@ -18,7 +18,7 @@ import ._TOP_MOD: ==, getindex, setindex! using Core: MethodMatch, SimpleVector, ifelse, sizeof using Core.IR using ._TOP_MOD: # Base definitions - @__MODULE__, @assert, @eval, @goto, @inbounds, @inline, @label, @noinline, + @__MODULE__, @assert, @eval, @goto, @inbounds, @inline, @label, @noinline, @show, @nospecialize, @specialize, BitSet, Callable, Csize_t, IdDict, IdSet, UnitRange, Vector, copy, delete!, empty!, enumerate, error, first, get, get!, haskey, in, isassigned, isempty, ismutabletype, keys, last, length, max, min, missing, pop!, push!, pushfirst!, @@ -657,11 +657,13 @@ function analyze_escapes(ir::IRCode, nargs::Int, 𝕃ₒ::AbstractLattice, get_e # `escape_exception!` conservatively propagates `AllEscape` anyway, # and so escape information imposed on `:the_exception` isn't computed continue + elseif head === :gc_preserve_begin + # GC preserve is handled by `escape_gc_preserve!` + elseif head === :gc_preserve_end + escape_gc_preserve!(astate, pc, stmt.args) elseif head === :static_parameter || # this exists statically, not interested in its escape - head === :copyast || # XXX can this account for some escapes? - head === :isdefined || # just returns `Bool`, nothing accounts for any escapes - head === :gc_preserve_begin || # `GC.@preserve` expressions themselves won't be used anywhere - head === :gc_preserve_end # `GC.@preserve` expressions themselves won't be used anywhere + head === :copyast || # XXX escape something? + head === :isdefined # just returns `Bool`, nothing accounts for any escapes continue else add_conservative_changes!(astate, pc, stmt.args) @@ -1064,17 +1066,27 @@ end function escape_invoke!(astate::AnalysisState, pc::Int, args::Vector{Any}) mi = first(args)::MethodInstance first_idx, last_idx = 2, length(args) + add_liveness_changes!(astate, pc, args, first_idx, last_idx) # TODO inspect `astate.ir.stmts[pc][:info]` and use const-prop'ed `InferenceResult` if available cache = astate.get_escape_cache(mi) + ret = SSAValue(pc) if cache isa Bool if cache - return nothing # guaranteed to have no escape + # This method call is very simple and has good effects, so there's no need to + # escape its arguments. However, since the arguments might be returned, we need + # to consider the possibility of aliasing between them and the return value. + for argidx = first_idx:last_idx + arg = args[argidx] + if !is_mutation_free_argtype(argextype(arg, astate.ir)) + add_alias_change!(astate, ret, arg) + end + end + return nothing else return add_conservative_changes!(astate, pc, args, 2) end end cache = cache::ArgEscapeCache - ret = SSAValue(pc) retinfo = astate.estate[ret] # escape information imposed on the call statement method = mi.def::Method nargs = Int(method.nargs) @@ -1162,6 +1174,17 @@ function escape_foreigncall!(astate::AnalysisState, pc::Int, args::Vector{Any}) end end +function escape_gc_preserve!(astate::AnalysisState, pc::Int, args::Vector{Any}) + @assert length(args) == 1 "invalid :gc_preserve_end" + val = args[1] + @assert val isa SSAValue "invalid :gc_preserve_end" + beginstmt = astate.ir[val][:stmt] + @assert isexpr(beginstmt, :gc_preserve_begin) "invalid :gc_preserve_end" + beginargs = beginstmt.args + # COMBAK we might need to add liveness for all statements from `:gc_preserve_begin` to `:gc_preserve_end` + add_liveness_changes!(astate, pc, beginargs) +end + normalize(@nospecialize x) = isa(x, QuoteNode) ? x.value : x function escape_call!(astate::AnalysisState, pc::Int, args::Vector{Any}) @@ -1187,20 +1210,12 @@ function escape_call!(astate::AnalysisState, pc::Int, args::Vector{Any}) if result === missing # if this call hasn't been handled by any of pre-defined handlers, escape it conservatively add_conservative_changes!(astate, pc, args) - return elseif result === true add_liveness_changes!(astate, pc, args, 2) - return # ThrownEscape is already checked + elseif is_nothrow(astate.ir, pc) + add_liveness_changes!(astate, pc, args, 2) else - # we escape statements with the `ThrownEscape` property using the effect-freeness - # computed by `stmt_effect_flags` invoked within inlining - # TODO throwness ≠ "effect-free-ness" - if is_nothrow(astate.ir, pc) - add_liveness_changes!(astate, pc, args, 2) - else - add_fallback_changes!(astate, pc, args, 2) - end - return + add_fallback_changes!(astate, pc, args, 2) end end @@ -1528,4 +1543,12 @@ function escape_array_copy!(astate::AnalysisState, pc::Int, args::Vector{Any}) add_liveness_changes!(astate, pc, args, 6) end +function escape_builtin!(::typeof(Core.finalizer), astate::AnalysisState, pc::Int, args::Vector{Any}) + if length(args) ≥ 3 + obj = args[3] + add_liveness_change!(astate, obj, pc) # TODO setup a proper FinalizerEscape? + end + return false +end + end # baremodule EscapeAnalysis diff --git a/base/compiler/ssair/passes.jl b/base/compiler/ssair/passes.jl index 0e2272524a0ed..e3f294c4e91fe 100644 --- a/base/compiler/ssair/passes.jl +++ b/base/compiler/ssair/passes.jl @@ -1300,7 +1300,13 @@ function sroa_pass!(ir::IRCode, inlining::Union{Nothing,InliningState}=nothing) # Inlining performs legality checks on the finalizer to determine # whether or not we may inline it. If so, it appends extra arguments # at the end of the intrinsic. Detect that here. - length(stmt.args) == 5 || continue + if length(stmt.args) == 4 && stmt.args[4] === nothing + # constant case + elseif length(stmt.args) == 5 && stmt.args[4] isa Bool && stmt.args[5] isa MethodInstance + # inlining case + else + continue + end end is_finalizer = true elseif isexpr(stmt, :foreigncall) @@ -1685,18 +1691,21 @@ end function sroa_mutables!(ir::IRCode, defuses::IdDict{Int,Tuple{SPCSet,SSADefUse}}, used_ssas::Vector{Int}, lazydomtree::LazyDomtree, inlining::Union{Nothing,InliningState}) 𝕃ₒ = inlining === nothing ? SimpleInferenceLattice.instance : optimizer_lattice(inlining.interp) lazypostdomtree = LazyPostDomtree(ir) - for (defidx, (intermediaries, defuse)) in defuses - # Check if there are any uses we did not account for. If so, the variable - # escapes and we cannot eliminate the allocation. This works, because we're guaranteed - # not to include any intermediaries that have dead uses. As a result, missing uses will only ever - # show up in the nuses_total count. - nleaves = length(defuse.uses) + length(defuse.defs) - nuses = 0 - for iidx in intermediaries - nuses += used_ssas[iidx] + function find_finalizer_useidx(defuse::SSADefUse) + finalizer_useidx = nothing + for (useidx, use) in enumerate(defuse.uses) + if use.kind === :finalizer + # For now: Only allow one finalizer per allocation + finalizer_useidx !== nothing && return false + finalizer_useidx = useidx + end end - nuses_total = used_ssas[defidx] + nuses - length(intermediaries) - nleaves == nuses_total || continue + if finalizer_useidx === nothing || inlining === nothing + return true + end + return finalizer_useidx + end + for (defidx, (intermediaries, defuse)) in defuses # Find the type for this allocation defexpr = ir[SSAValue(defidx)][:stmt] isexpr(defexpr, :new) || continue @@ -1706,22 +1715,47 @@ function sroa_mutables!(ir::IRCode, defuses::IdDict{Int,Tuple{SPCSet,SSADefUse}} typ = widenconst(typ) ismutabletype(typ) || continue typ = typ::DataType - # First check for any finalizer calls - finalizer_useidx = nothing - for (useidx, use) in enumerate(defuse.uses) - if use.kind === :finalizer - # For now: Only allow one finalizer per allocation - finalizer_useidx !== nothing && @goto skip - finalizer_useidx = useidx - end + # Check if there are any uses we did not account for. If so, the variable + # escapes and we cannot eliminate the allocation. This works, because we're guaranteed + # not to include any intermediaries that have dead uses. As a result, missing uses will only ever + # show up in the nuses_total count. + nleaves = length(defuse.uses) + length(defuse.defs) + nuses = 0 + for iidx in intermediaries + nuses += used_ssas[iidx] end + nuses_total = used_ssas[defidx] + nuses - length(intermediaries) all_eliminated = all_forwarded = true - if finalizer_useidx !== nothing && inlining !== nothing - finalizer_idx = defuse.uses[finalizer_useidx].idx - try_resolve_finalizer!(ir, defidx, finalizer_idx, defuse, inlining, - lazydomtree, lazypostdomtree, ir[SSAValue(finalizer_idx)][:info]) - deleteat!(defuse.uses, finalizer_useidx) - all_eliminated = all_forwarded = false # can't eliminate `setfield!` calls safely + if nleaves ≠ nuses_total + finalizer_useidx = find_finalizer_useidx(defuse) + if finalizer_useidx isa Int + nargs = length(ir.argtypes) # COMBAK this might need to be `Int(opt.src.nargs)` + estate = EscapeAnalysis.analyze_escapes(ir, nargs, 𝕃ₒ, get_escape_cache(inlining.interp)) + einfo = estate[SSAValue(defidx)] + if EscapeAnalysis.has_no_escape(einfo) + already = BitSet(use.idx for use in defuse.uses) + for idx = einfo.Liveness + if idx ∉ already + push!(defuse.uses, SSAUse(:EALiveness, idx)) + end + end + finalizer_idx = defuse.uses[finalizer_useidx].idx + try_resolve_finalizer!(ir, defidx, finalizer_idx, defuse, inlining::InliningState, + lazydomtree, lazypostdomtree, ir[SSAValue(finalizer_idx)][:info]) + end + end + continue + else + finalizer_useidx = find_finalizer_useidx(defuse) + if finalizer_useidx isa Int + finalizer_idx = defuse.uses[finalizer_useidx].idx + try_resolve_finalizer!(ir, defidx, finalizer_idx, defuse, inlining::InliningState, + lazydomtree, lazypostdomtree, ir[SSAValue(finalizer_idx)][:info]) + deleteat!(defuse.uses, finalizer_useidx) + all_eliminated = all_forwarded = false # can't eliminate `setfield!` calls safely + elseif !finalizer_useidx + continue + end end # Partition defuses by field fielddefuse = SSADefUse[SSADefUse() for _ = 1:fieldcount(typ)] diff --git a/base/compiler/types.jl b/base/compiler/types.jl index 210adf7be96b2..b6c976da48f67 100644 --- a/base/compiler/types.jl +++ b/base/compiler/types.jl @@ -457,6 +457,8 @@ typeinf_lattice(::AbstractInterpreter) = InferenceLattice(BaseInferenceLattice.i ipo_lattice(::AbstractInterpreter) = InferenceLattice(IPOResultLattice.instance) optimizer_lattice(::AbstractInterpreter) = SimpleInferenceLattice.instance +get_escape_cache(interp::AbstractInterpreter) = GetNativeEscapeCache(interp) + abstract type CallInfo end @nospecialize diff --git a/test/compiler/EscapeAnalysis/EAUtils.jl b/test/compiler/EscapeAnalysis/EAUtils.jl index c41e61e231892..1f0a84f1a8365 100644 --- a/test/compiler/EscapeAnalysis/EAUtils.jl +++ b/test/compiler/EscapeAnalysis/EAUtils.jl @@ -6,56 +6,6 @@ const CC = Core.Compiler using ..EscapeAnalysis const EA = EscapeAnalysis -# entries -# ------- - -using Base: IdSet, unwrap_unionall, rewrap_unionall -using InteractiveUtils: gen_call_with_extracted_types_and_kwargs - -""" - @code_escapes [options...] f(args...) - -Evaluates the arguments to the function call, determines its types, and then calls -[`code_escapes`](@ref) on the resulting expression. -As with `@code_typed` and its family, any of `code_escapes` keyword arguments can be given -as the optional arguments like `@code_escapes optimize=false myfunc(myargs...)`. -""" -macro code_escapes(ex0...) - return gen_call_with_extracted_types_and_kwargs(__module__, :code_escapes, ex0) -end - -""" - code_escapes(f, argtypes=Tuple{}; [debuginfo::Symbol = :none], [optimize::Bool = true]) -> result::EscapeResult - -Runs the escape analysis on optimized IR of a generic function call with the given type signature. - -# Keyword Arguments - -- `optimize::Bool = true`: - if `true` returns escape information of post-inlining IR (used for local optimization), - otherwise returns escape information of pre-inlining IR (used for interprocedural escape information generation) -- `debuginfo::Symbol = :none`: - controls the amount of code metadata present in the output, possible options are `:none` or `:source`. -""" -function code_escapes(@nospecialize(f), @nospecialize(types=Base.default_tt(f)); - world::UInt = get_world_counter(), - debuginfo::Symbol = :none) - tt = Base.signature_type(f, types) - match = Base._which(tt; world, raise=true) - mi = Core.Compiler.specialize_method(match)::MethodInstance - interp = EscapeAnalyzer(world, mi) - frame = Core.Compiler.typeinf_frame(interp, mi, #=run_optimizer=#true) - isdefined(interp, :result) || error("optimization didn't happen: maybe everything has been constant folded?") - slotnames = let src = frame.src - src isa CodeInfo ? src.slotnames : nothing - end - return EscapeResult(interp.result.ir, interp.result.estate, interp.result.mi, - slotnames, debuginfo === :source, interp) -end - -# in order to run a whole analysis from ground zero (e.g. for benchmarking, etc.) -__clear_cache!() = empty!(GLOBAL_EA_CODE_CACHE) - # AbstractInterpreter # ------------------- @@ -99,10 +49,10 @@ mutable struct EscapeAnalyzer <: AbstractInterpreter const opt_params::OptimizationParams const inf_cache::Vector{InferenceResult} const escape_cache::EscapeCache - const entry_mi::MethodInstance + const entry_mi::Union{Nothing,MethodInstance} result::EscapeResultForEntry - function EscapeAnalyzer(world::UInt, entry_mi::MethodInstance, - escape_cache::EscapeCache=GLOBAL_ESCAPE_CACHE) + function EscapeAnalyzer(world::UInt, escape_cache::EscapeCache; + entry_mi::Union{Nothing,MethodInstance}=nothing) inf_params = InferenceParams() opt_params = OptimizationParams() inf_cache = InferenceResult[] @@ -115,6 +65,7 @@ CC.OptimizationParams(interp::EscapeAnalyzer) = interp.opt_params CC.get_inference_world(interp::EscapeAnalyzer) = interp.world CC.get_inference_cache(interp::EscapeAnalyzer) = interp.inf_cache CC.cache_owner(::EscapeAnalyzer) = EAToken() +CC.get_escape_cache(interp::EscapeAnalyzer) = GetEscapeCache(interp) function CC.ipo_dataflow_analysis!(interp::EscapeAnalyzer, opt::OptimizationState, ir::IRCode, caller::InferenceResult) @@ -125,8 +76,9 @@ function CC.ipo_dataflow_analysis!(interp::EscapeAnalyzer, opt::OptimizationStat estate = try analyze_escapes(ir, nargs, 𝕃ₒ, get_escape_cache) catch err - @error "error happened within EA, inspect `Main.failed_escapeanalysis`" - Main.failed_escapeanalysis = FailedAnalysis(ir, nargs, get_escape_cache) + @error "error happened within EA, inspect `Main.failedanalysis`" + failedanalysis = FailedAnalysis(caller, ir, nargs, get_escape_cache) + Core.eval(Main, :(failedanalysis = $failedanalysis)) rethrow(err) end if caller.linfo === interp.entry_mi @@ -156,6 +108,7 @@ function ((; escape_cache)::GetEscapeCache)(mi::MethodInstance) end struct FailedAnalysis + caller::InferenceResult ir::IRCode nargs::Int get_escape_cache::GetEscapeCache @@ -301,4 +254,83 @@ function print_with_info(preprint, postprint, io::IO, ir::IRCode, source::Bool) return nothing end +# entries +# ------- + +using InteractiveUtils: gen_call_with_extracted_types_and_kwargs + +""" + @code_escapes [options...] f(args...) + +Evaluates the arguments to the function call, determines its types, and then calls +[`code_escapes`](@ref) on the resulting expression. +As with `@code_typed` and its family, any of `code_escapes` keyword arguments can be given +as the optional arguments like `@code_escapes optimize=false myfunc(myargs...)`. +""" +macro code_escapes(ex0...) + return gen_call_with_extracted_types_and_kwargs(__module__, :code_escapes, ex0) +end + +""" + code_escapes(f, argtypes=Tuple{}; [world::UInt], [debuginfo::Symbol]) -> result::EscapeResult + code_escapes(mi::MethodInstance; [world::UInt], [interp::EscapeAnalyzer], [debuginfo::Symbol]) -> result::EscapeResult + +Runs the escape analysis on optimized IR of a generic function call with the given type signature, +while caching the analysis results. + +# Keyword Arguments + +- `world::UInt = Base.get_world_counter()`: + controls the world age to use when looking up methods, use current world age if not specified. +- `interp::EscapeAnalyzer = EscapeAnalyzer(world)`: + specifies the escape analyzer to use, by default a new analyzer with the global cache is created. +- `debuginfo::Symbol = :none`: + controls the amount of code metadata present in the output, possible options are `:none` or `:source`. +""" +function code_escapes(@nospecialize(f), @nospecialize(types=Base.default_tt(f)); + world::UInt = get_world_counter(), + debuginfo::Symbol = :none) + tt = Base.signature_type(f, types) + match = Base._which(tt; world, raise=true) + mi = Core.Compiler.specialize_method(match) + return code_escapes(mi; world, debuginfo) +end + +function code_escapes(mi::MethodInstance; + world::UInt = get_world_counter(), + interp::EscapeAnalyzer=EscapeAnalyzer(world, GLOBAL_ESCAPE_CACHE; entry_mi=mi), + debuginfo::Symbol = :none) + frame = Core.Compiler.typeinf_frame(interp, mi, #=run_optimizer=#true) + isdefined(interp, :result) || error("optimization didn't happen: maybe everything has been constant folded?") + slotnames = let src = frame.src + src isa CodeInfo ? src.slotnames : nothing + end + return EscapeResult(interp.result.ir, interp.result.estate, interp.result.mi, + slotnames, debuginfo === :source, interp) +end + +""" + code_escapes(ir::IRCode, nargs::Int; [world::UInt], [interp::AbstractInterpreter]) -> result::EscapeResult + +Runs the escape analysis on `ir::IRCode`. +`ir` is supposed to be optimized already, specifically after inlining has been applied. +Note that this version does not cache the analysis results. + +# Keyword Arguments + +- `world::UInt = Base.get_world_counter()`: + controls the world age to use when looking up methods, use current world age if not specified. +- `interp::AbstractInterpreter = EscapeAnalyzer(world, EscapeCache())`: + specifies the abstract interpreter to use, by default a new `EscapeAnalyzer` with an empty cache is created. +""" +function code_escapes(ir::IRCode, nargs::Int; + world::UInt = get_world_counter(), + interp::AbstractInterpreter=EscapeAnalyzer(world, EscapeCache())) + estate = analyze_escapes(ir, nargs, CC.optimizer_lattice(interp), CC.get_escape_cache(interp)) + return EscapeResult(ir, estate) # return back the result +end + +# in order to run a whole analysis from ground zero (e.g. for benchmarking, etc.) +__clear_cache!() = empty!(GLOBAL_EA_CODE_CACHE) + end # module EAUtils diff --git a/test/compiler/codegen.jl b/test/compiler/codegen.jl index 26ae965b35319..ae04250964554 100644 --- a/test/compiler/codegen.jl +++ b/test/compiler/codegen.jl @@ -266,7 +266,7 @@ if opt_level > 0 @test occursin("ret $Iptr %\"x::$(Int)\"", load_dummy_ref_ir) end -# Issue 22770 +# Issue JuliaLang/julia#22770 let was_gced = false @noinline make_tuple(x) = tuple(x) @noinline use(x) = ccall(:jl_breakpoint, Cvoid, ()) diff --git a/test/compiler/inline.jl b/test/compiler/inline.jl index 99fed00a7269d..53f7adc2a2a77 100644 --- a/test/compiler/inline.jl +++ b/test/compiler/inline.jl @@ -2249,3 +2249,36 @@ let src = code_typed1(bar_split_error, Tuple{}) @test count(iscall((src, foo_split)), src.code) == 0 @test count(iscall((src, Core.throw_methoderror)), src.code) > 0 end + +# finalizer inlining with EA +mutable struct ForeignBuffer{T} + const ptr::Ptr{T} +end +mutable struct ForeignBufferChecker + @atomic finalized::Bool +end +const foreign_buffer_checker = ForeignBufferChecker(false) +function foreign_alloc(::Type{T}, length) where T + ptr = Libc.malloc(sizeof(T) * length) + ptr = Base.unsafe_convert(Ptr{T}, ptr) + obj = ForeignBuffer{T}(ptr) + return finalizer(obj) do obj + Base.@assume_effects :notaskstate :nothrow + @atomic foreign_buffer_checker.finalized = true + Libc.free(obj.ptr) + end +end +function f_EA_finalizer(N::Int) + workspace = foreign_alloc(Float64, N) + GC.@preserve workspace begin + (;ptr) = workspace + Base.@assume_effects :nothrow @noinline println(devnull, "ptr = ", ptr) + end +end +let src = code_typed1(f_EA_finalizer, (Int,)) + @test count(iscall((src, Core.finalizer)), src.code) == 0 +end +let;Base.Experimental.@force_compile + f_EA_finalizer(42000) + @test foreign_buffer_checker.finalized +end From c07a40f03c67bbd92db65add4964ee7415453ea4 Mon Sep 17 00:00:00 2001 From: Keno Fischer Date: Wed, 16 Oct 2024 09:31:14 -0400 Subject: [PATCH 221/537] Some small follow-ups to stackless compiler (#55972) 1. Adjust the docstring for `Future`, which had its design changed late in that PR and is now confusing. 2. Add additional assertions validating the API assumptions of the `Future` API. I found it too easy to accidentally misuse this and cause hard-to-debug failures. The biggest change is that `isready` accounts for delayed assignments again, which allows an additional invariant that incomplete tasks must always have other pending tasks, allowing for infinite loop detection in the scheduler. 3. A small fix to use the AbstractInterpreter that created the InferenceState for the callback. We haven't fully defined the semantics of mixed-interpreter inference stacks, but downstream packages were using is and this at least makes it mostly work again. --- base/compiler/abstractinterpretation.jl | 5 ++-- base/compiler/inferencestate.jl | 36 +++++++++++++++++-------- base/compiler/ssair/irinterp.jl | 2 +- 3 files changed, 29 insertions(+), 14 deletions(-) diff --git a/base/compiler/abstractinterpretation.jl b/base/compiler/abstractinterpretation.jl index 04a62700e9de7..dbe79e19bf9b4 100644 --- a/base/compiler/abstractinterpretation.jl +++ b/base/compiler/abstractinterpretation.jl @@ -2681,7 +2681,7 @@ function abstract_call(interp::AbstractInterpreter, arginfo::ArgInfo, sv::Infere end si = StmtInfo(!unused) call = abstract_call(interp, arginfo, si, sv)::Future - Future{Nothing}(call, interp, sv) do call, interp, sv + Future{Any}(call, interp, sv) do call, interp, sv # this only is needed for the side-effect, sequenced before any task tries to consume the return value, # which this will do even without returning this Future sv.stmt_info[sv.currpc] = call.info @@ -2833,7 +2833,7 @@ function abstract_eval_new_opaque_closure(interp::AbstractInterpreter, e::Expr, pushfirst!(argtypes, rt.env) callinfo = abstract_call_opaque_closure(interp, rt, ArgInfo(nothing, argtypes), StmtInfo(true), sv, #=check=#false)::Future - Future{Nothing}(callinfo, interp, sv) do callinfo, interp, sv + Future{Any}(callinfo, interp, sv) do callinfo, interp, sv sv.stmt_info[sv.currpc] = OpaqueClosureCreateInfo(callinfo) nothing end @@ -3775,6 +3775,7 @@ function typeinf(interp::AbstractInterpreter, frame::InferenceState) takeprev = 0 while takenext >= frame.frameid callee = takenext == 0 ? frame : callstack[takenext]::InferenceState + interp = callee.interp if !isempty(callstack) if length(callstack) - frame.frameid >= minwarn topmethod = callstack[1].linfo diff --git a/base/compiler/inferencestate.jl b/base/compiler/inferencestate.jl index 5f8fb82caaa34..a200d5ced4d93 100644 --- a/base/compiler/inferencestate.jl +++ b/base/compiler/inferencestate.jl @@ -1128,24 +1128,35 @@ end """ Future{T} -Delayed return value for a value of type `T`, similar to RefValue{T}, but -explicitly represents completed as a `Bool` rather than as `isdefined`. -Set once with `f[] = v` and accessed with `f[]` afterwards. +Assign-once delayed return value for a value of type `T`, similar to RefValue{T}. +Can be constructed in one of three ways: -Can also be constructed with the `completed` flag value and a closure to -produce `x`, as well as the additional arguments to avoid always capturing the -same couple of values. +1. With an immediate as `Future{T}(val)` +2. As an assign-once storage location with `Future{T}()`. Assigned (once) using `f[] = val`. +3. As a delayed computation with `Future{T}(callback, dep, interp, sv)` to have + `sv` arrange to call the `callback` with the result of `dep` when it is ready. + +Use `isready` to check if the value is ready, and `getindex` to get the value. """ struct Future{T} later::Union{Nothing,RefValue{T}} now::Union{Nothing,T} - Future{T}() where {T} = new{T}(RefValue{T}(), nothing) + function Future{T}() where {T} + later = RefValue{T}() + @assert !isassigned(later) "Future{T}() is not allowed for inlinealloc T" + new{T}(later, nothing) + end Future{T}(x) where {T} = new{T}(nothing, x) Future(x::T) where {T} = new{T}(nothing, x) end -isready(f::Future) = f.later === nothing +isready(f::Future) = f.later === nothing || isassigned(f.later) getindex(f::Future{T}) where {T} = (later = f.later; later === nothing ? f.now::T : later[]) -setindex!(f::Future, v) = something(f.later)[] = v +function setindex!(f::Future, v) + later = something(f.later) + @assert !isassigned(later) + later[] = v + return f +end convert(::Type{Future{T}}, x) where {T} = Future{T}(x) # support return type conversion convert(::Type{Future{T}}, x::Future) where {T} = x::Future{T} function Future{T}(f, immediate::Bool, interp::AbstractInterpreter, sv::AbsIntState) where {T} @@ -1176,7 +1187,6 @@ function Future{T}(f, prev::Future{S}, interp::AbstractInterpreter, sv::AbsIntSt end end - """ doworkloop(args...) @@ -1189,12 +1199,16 @@ Each task will be run repeatedly when returning `false`, until it returns `true` function doworkloop(interp::AbstractInterpreter, sv::AbsIntState) tasks = sv.tasks prev = length(tasks) + prevcallstack = length(sv.callstack) prev == 0 && return false task = pop!(tasks) completed = task(interp, sv) tasks = sv.tasks # allow dropping gc root over the previous call completed isa Bool || throw(TypeError(:return, "", Bool, task)) # print the task on failure as part of the error message, instead of just "@ workloop:line" - completed || push!(tasks, task) + if !completed + @assert (length(tasks) >= prev || length(sv.callstack) > prevcallstack) "Task did not complete, but also did not create any child tasks" + push!(tasks, task) + end # efficient post-order visitor: items pushed are executed in reverse post order such # that later items are executed before earlier ones, but are fully executed # (including any dependencies scheduled by them) before going on to the next item diff --git a/base/compiler/ssair/irinterp.jl b/base/compiler/ssair/irinterp.jl index ca8ca770df413..f9565f3971733 100644 --- a/base/compiler/ssair/irinterp.jl +++ b/base/compiler/ssair/irinterp.jl @@ -52,7 +52,7 @@ end function abstract_call(interp::AbstractInterpreter, arginfo::ArgInfo, irsv::IRInterpretationState) si = StmtInfo(true) # TODO better job here? call = abstract_call(interp, arginfo, si, irsv)::Future - Future{Nothing}(call, interp, irsv) do call, interp, irsv + Future{Any}(call, interp, irsv) do call, interp, irsv irsv.ir.stmts[irsv.curridx][:info] = call.info nothing end From b7b79eb8f62a63d1c80fb3fed6cd7b539057ecce Mon Sep 17 00:00:00 2001 From: Keno Fischer Date: Wed, 16 Oct 2024 14:49:05 -0400 Subject: [PATCH 222/537] Break dependency between loading and Core.Compiler (#56186) This code was originally added in df81bf9a96c59f257a01307cd0ecf05035d8301f where Core.Compiler would keep an array of all the things it inferred, which could then be provieded to the runtime to be included in the package image. In 113efb6e0aa27879cb423ab323c0159911e4c5e7 keeping the array itself became a runtime service for locking considerations. As a result, the role of Core.Compiler here is a bit weird. It has the enable switch and the GC root, but all the actual state is being managed by the runtime. It would be desirable to remove the Core.Compiler reference, so that loading.jl can function even if `Core.Compiler` does not exist (which is in theory supposed to be possible, even though we currently never run in such a configuration; that said, post trimming one might imagine useful instances of such a setup). To do this, put the runtime fully in charge of managing this array. Core.Compiler will call the callback unconditionally for all newly inferred cis and the runtime can decide whether to save it or not. Extracted from #56128 --- base/compiler/cicache.jl | 4 ++++ base/compiler/typeinfer.jl | 10 ---------- base/loading.jl | 13 ++++++++++--- src/staticdata_utils.c | 6 +++++- 4 files changed, 19 insertions(+), 14 deletions(-) diff --git a/base/compiler/cicache.jl b/base/compiler/cicache.jl index a6ed18fe5105f..bf32e8f12f085 100644 --- a/base/compiler/cicache.jl +++ b/base/compiler/cicache.jl @@ -13,6 +13,10 @@ end function setindex!(cache::InternalCodeCache, ci::CodeInstance, mi::MethodInstance) @assert ci.owner === cache.owner + m = mi.def + if isa(m, Method) && m.module != Core + ccall(:jl_push_newly_inferred, Cvoid, (Any,), ci) + end ccall(:jl_mi_cache_insert, Cvoid, (Any, Any), mi, ci) return cache end diff --git a/base/compiler/typeinfer.jl b/base/compiler/typeinfer.jl index 8b85f7c6f35f1..2a3bbf3854302 100644 --- a/base/compiler/typeinfer.jl +++ b/base/compiler/typeinfer.jl @@ -1,9 +1,5 @@ # This file is a part of Julia. License is MIT: https://julialang.org/license -# Tracking of newly-inferred CodeInstances during precompilation -const track_newly_inferred = RefValue{Bool}(false) -const newly_inferred = CodeInstance[] - """ The module `Core.Compiler.Timings` provides a simple implementation of nested timers that can be used to measure the exclusive time spent inferring each method instance that is @@ -264,12 +260,6 @@ function cache_result!(interp::AbstractInterpreter, result::InferenceResult) if cache_results code_cache(interp)[mi] = result.ci - if track_newly_inferred[] - m = mi.def - if isa(m, Method) && m.module != Core - ccall(:jl_push_newly_inferred, Cvoid, (Any,), result.ci) - end - end end return cache_results end diff --git a/base/loading.jl b/base/loading.jl index 8dff6838c27cc..b396c7897c1fd 100644 --- a/base/loading.jl +++ b/base/loading.jl @@ -2900,6 +2900,9 @@ function load_path_setup_code(load_path::Bool=true) return code end +# Const global for GC root +const newly_inferred = CodeInstance[] + # this is called in the external process that generates precompiled package files function include_package_for_output(pkg::PkgId, input::String, depot_path::Vector{String}, dl_load_path::Vector{String}, load_path::Vector{String}, concrete_deps::typeof(_concrete_dependencies), source::Union{Nothing,String}) @@ -2919,8 +2922,7 @@ function include_package_for_output(pkg::PkgId, input::String, depot_path::Vecto task_local_storage()[:SOURCE_PATH] = source end - ccall(:jl_set_newly_inferred, Cvoid, (Any,), Core.Compiler.newly_inferred) - Core.Compiler.track_newly_inferred.x = true + ccall(:jl_set_newly_inferred, Cvoid, (Any,), newly_inferred) try Base.include(Base.__toplevel__, input) catch ex @@ -2928,10 +2930,15 @@ function include_package_for_output(pkg::PkgId, input::String, depot_path::Vecto @debug "Aborting `create_expr_cache'" exception=(ErrorException("Declaration of __precompile__(false) not allowed"), catch_backtrace()) exit(125) # we define status = 125 means PrecompileableError finally - Core.Compiler.track_newly_inferred.x = false + ccall(:jl_set_newly_inferred, Cvoid, (Any,), nothing) end # check that the package defined the expected module so we can give a nice error message if not Base.check_package_module_loaded(pkg) + + # Re-populate the runtime's newly-inferred array, which will be included + # in the output. We removed it above to avoid including any code we may + # have compiled for error handling and validation. + ccall(:jl_set_newly_inferred, Cvoid, (Any,), newly_inferred) end function check_package_module_loaded(pkg::PkgId) diff --git a/src/staticdata_utils.c b/src/staticdata_utils.c index 8eb223d3cfbde..5f1095fec9168 100644 --- a/src/staticdata_utils.c +++ b/src/staticdata_utils.c @@ -91,12 +91,16 @@ extern jl_mutex_t world_counter_lock; // This gets called as the first step of Base.include_package_for_output JL_DLLEXPORT void jl_set_newly_inferred(jl_value_t* _newly_inferred) { - assert(_newly_inferred == NULL || jl_is_array(_newly_inferred)); + assert(_newly_inferred == NULL || _newly_inferred == jl_nothing || jl_is_array(_newly_inferred)); + if (_newly_inferred == jl_nothing) + _newly_inferred = NULL; newly_inferred = (jl_array_t*) _newly_inferred; } JL_DLLEXPORT void jl_push_newly_inferred(jl_value_t* ci) { + if (!newly_inferred) + return; JL_LOCK(&newly_inferred_mutex); size_t end = jl_array_nrows(newly_inferred); jl_array_grow_end(newly_inferred, 1); From df5f4375820dd80ba06e819954c5880b153c4d41 Mon Sep 17 00:00:00 2001 From: Gabriel Baraldi Date: Wed, 16 Oct 2024 16:52:24 -0300 Subject: [PATCH 223/537] Implement parallel sweeping of stack pools (#55643) Also use a round robin to only return stacks one thread at a time to avoid contention on munmap syscalls. Using https://github.com/gbaraldi/cilkbench_julia/blob/main/cilk5julia/nqueens.jl as a benchmark it's about 12% faster wall time. This benchmark has other weird behaviours specially single threaded. Where if calls `wait` thousandas of times per second, and if single threaded every single one does a `jl_process_events` call which is a syscall + preemption. So it looks like a hang. With threads the issue isn't there The idea behind the round robin is twofold. One we are just freeing too much and talking with vtjnash we maybe want some less agressive behaviour, the second is that munmap takes a lock in most OSs. So doing it in parallel has severe negative scaling. --- base/timing.jl | 2 ++ src/Makefile | 1 + src/gc-interface.h | 2 ++ src/gc-stacks.c | 55 ++++++++++++++++++--------------- src/gc-stock.c | 77 ++++++++++++++++++++++++++++++++++++++++------ src/gc-stock.h | 7 +++-- src/gc-tls.h | 1 + 7 files changed, 109 insertions(+), 36 deletions(-) diff --git a/base/timing.jl b/base/timing.jl index b094aa230e1c2..4880951f0a32d 100644 --- a/base/timing.jl +++ b/base/timing.jl @@ -22,8 +22,10 @@ struct GC_Num total_time_to_safepoint ::Int64 sweep_time ::Int64 mark_time ::Int64 + stack_pool_sweep_time ::Int64 total_sweep_time ::Int64 total_mark_time ::Int64 + total_stack_pool_sweep_time::Int64 last_full_sweep ::Int64 last_incremental_sweep ::Int64 end diff --git a/src/Makefile b/src/Makefile index 4dbb094c65321..75635c2e6c062 100644 --- a/src/Makefile +++ b/src/Makefile @@ -318,6 +318,7 @@ $(BUILDDIR)/debuginfo.o $(BUILDDIR)/debuginfo.dbg.obj: $(addprefix $(SRCDIR)/,de $(BUILDDIR)/disasm.o $(BUILDDIR)/disasm.dbg.obj: $(SRCDIR)/debuginfo.h $(SRCDIR)/processor.h $(BUILDDIR)/gc-debug.o $(BUILDDIR)/gc-debug.dbg.obj: $(SRCDIR)/gc-common.h $(SRCDIR)/gc-stock.h $(BUILDDIR)/gc-pages.o $(BUILDDIR)/gc-pages.dbg.obj: $(SRCDIR)/gc-common.h $(SRCDIR)/gc-stock.h +$(BUILDDIR)/gc-stacks.o $(BUILDDIR)/gc-stacks.dbg.obj: $(SRCDIR)/gc-common.h $(SRCDIR)/gc-stock.h $(BUILDDIR)/gc-stock.o $(BUILDDIR)/gc.dbg.obj: $(SRCDIR)/gc-common.h $(SRCDIR)/gc-stock.h $(SRCDIR)/gc-heap-snapshot.h $(SRCDIR)/gc-alloc-profiler.h $(SRCDIR)/gc-page-profiler.h $(BUILDDIR)/gc-heap-snapshot.o $(BUILDDIR)/gc-heap-snapshot.dbg.obj: $(SRCDIR)/gc-heap-snapshot.h $(BUILDDIR)/gc-alloc-profiler.o $(BUILDDIR)/gc-alloc-profiler.dbg.obj: $(SRCDIR)/gc-alloc-profiler.h diff --git a/src/gc-interface.h b/src/gc-interface.h index e543b4b5879f1..bb2abbe2d36ac 100644 --- a/src/gc-interface.h +++ b/src/gc-interface.h @@ -44,8 +44,10 @@ typedef struct { uint64_t total_time_to_safepoint; uint64_t sweep_time; uint64_t mark_time; + uint64_t stack_pool_sweep_time; uint64_t total_sweep_time; uint64_t total_mark_time; + uint64_t total_stack_pool_sweep_time; uint64_t last_full_sweep; uint64_t last_incremental_sweep; } jl_gc_num_t; diff --git a/src/gc-stacks.c b/src/gc-stacks.c index 783129ea97693..f6e787a4c1d2d 100644 --- a/src/gc-stacks.c +++ b/src/gc-stacks.c @@ -1,6 +1,7 @@ // This file is a part of Julia. License is MIT: https://julialang.org/license #include "gc-common.h" +#include "gc-stock.h" #include "threading.h" #ifndef _OS_WINDOWS_ # include @@ -202,7 +203,7 @@ JL_DLLEXPORT void *jl_malloc_stack(size_t *bufsz, jl_task_t *owner) JL_NOTSAFEPO return stk; } -void sweep_stack_pools(void) JL_NOTSAFEPOINT +void sweep_stack_pool_loop(void) JL_NOTSAFEPOINT { // Stack sweeping algorithm: // // deallocate stacks if we have too many sitting around unused @@ -215,33 +216,38 @@ void sweep_stack_pools(void) JL_NOTSAFEPOINT // bufsz = t->bufsz // if (stkbuf) // push(free_stacks[sz], stkbuf) - assert(gc_n_threads); - for (int i = 0; i < gc_n_threads; i++) { + jl_atomic_fetch_add(&gc_n_threads_sweeping_stacks, 1); + while (1) { + int i = jl_atomic_fetch_add_relaxed(&gc_ptls_sweep_idx, -1); + if (i < 0) + break; jl_ptls_t ptls2 = gc_all_tls_states[i]; if (ptls2 == NULL) continue; - + assert(gc_n_threads); // free half of stacks that remain unused since last sweep - for (int p = 0; p < JL_N_STACK_POOLS; p++) { - small_arraylist_t *al = &ptls2->gc_tls.heap.free_stacks[p]; - size_t n_to_free; - if (jl_atomic_load_relaxed(&ptls2->current_task) == NULL) { - n_to_free = al->len; // not alive yet or dead, so it does not need these anymore - } - else if (al->len > MIN_STACK_MAPPINGS_PER_POOL) { - n_to_free = al->len / 2; - if (n_to_free > (al->len - MIN_STACK_MAPPINGS_PER_POOL)) - n_to_free = al->len - MIN_STACK_MAPPINGS_PER_POOL; - } - else { - n_to_free = 0; - } - for (int n = 0; n < n_to_free; n++) { - void *stk = small_arraylist_pop(al); - free_stack(stk, pool_sizes[p]); - } - if (jl_atomic_load_relaxed(&ptls2->current_task) == NULL) { - small_arraylist_free(al); + if (i == jl_atomic_load_relaxed(&gc_stack_free_idx)) { + for (int p = 0; p < JL_N_STACK_POOLS; p++) { + small_arraylist_t *al = &ptls2->gc_tls.heap.free_stacks[p]; + size_t n_to_free; + if (jl_atomic_load_relaxed(&ptls2->current_task) == NULL) { + n_to_free = al->len; // not alive yet or dead, so it does not need these anymore + } + else if (al->len > MIN_STACK_MAPPINGS_PER_POOL) { + n_to_free = al->len / 2; + if (n_to_free > (al->len - MIN_STACK_MAPPINGS_PER_POOL)) + n_to_free = al->len - MIN_STACK_MAPPINGS_PER_POOL; + } + else { + n_to_free = 0; + } + for (int n = 0; n < n_to_free; n++) { + void *stk = small_arraylist_pop(al); + free_stack(stk, pool_sizes[p]); + } + if (jl_atomic_load_relaxed(&ptls2->current_task) == NULL) { + small_arraylist_free(al); + } } } if (jl_atomic_load_relaxed(&ptls2->current_task) == NULL) { @@ -287,6 +293,7 @@ void sweep_stack_pools(void) JL_NOTSAFEPOINT } live_tasks->len -= ndel; } + jl_atomic_fetch_add(&gc_n_threads_sweeping_stacks, -1); } JL_DLLEXPORT jl_array_t *jl_live_tasks(void) diff --git a/src/gc-stock.c b/src/gc-stock.c index 6b97881909bbd..50a3896d4f9aa 100644 --- a/src/gc-stock.c +++ b/src/gc-stock.c @@ -24,11 +24,17 @@ int jl_n_sweepthreads; // Number of threads currently running the GC mark-loop _Atomic(int) gc_n_threads_marking; // Number of threads sweeping -_Atomic(int) gc_n_threads_sweeping; +_Atomic(int) gc_n_threads_sweeping_pools; +// Number of threads sweeping stacks +_Atomic(int) gc_n_threads_sweeping_stacks; // Temporary for the `ptls->gc_tls.page_metadata_allocd` used during parallel sweeping (padded to avoid false sharing) _Atomic(jl_gc_padded_page_stack_t *) gc_allocd_scratch; // `tid` of mutator thread that triggered GC _Atomic(int) gc_master_tid; +// counter for sharing work when sweeping stacks +_Atomic(int) gc_ptls_sweep_idx; +// counter for round robin of giving back stack pages to the OS +_Atomic(int) gc_stack_free_idx = 0; // `tid` of first GC thread int gc_first_tid; // Mutex/cond used to synchronize wakeup of GC threads on parallel marking @@ -996,13 +1002,50 @@ STATIC_INLINE void gc_sweep_pool_page(gc_page_profiler_serializer_t *s, jl_gc_pa // sweep over all memory that is being used and not in a pool static void gc_sweep_other(jl_ptls_t ptls, int sweep_full) JL_NOTSAFEPOINT { - sweep_stack_pools(); gc_sweep_foreign_objs(); sweep_malloced_memory(); sweep_big(ptls); jl_engine_sweep(gc_all_tls_states); } +// wake up all threads to sweep the stacks +void gc_sweep_wake_all_stacks(jl_ptls_t ptls) JL_NOTSAFEPOINT +{ + uv_mutex_lock(&gc_threads_lock); + int first = gc_first_parallel_collector_thread_id(); + int last = gc_last_parallel_collector_thread_id(); + for (int i = first; i <= last; i++) { + jl_ptls_t ptls2 = gc_all_tls_states[i]; + gc_check_ptls_of_parallel_collector_thread(ptls2); + jl_atomic_fetch_add(&ptls2->gc_tls.gc_stack_sweep_requested, 1); + } + uv_cond_broadcast(&gc_threads_cond); + uv_mutex_unlock(&gc_threads_lock); + return; +} + +void gc_sweep_wait_for_all_stacks(void) JL_NOTSAFEPOINT +{ + while ((jl_atomic_load_acquire(&gc_ptls_sweep_idx) >= 0 ) || jl_atomic_load_acquire(&gc_n_threads_sweeping_stacks) != 0) { + jl_cpu_pause(); + } +} + +void sweep_stack_pools(jl_ptls_t ptls) JL_NOTSAFEPOINT +{ + // initialize ptls index for parallel sweeping of stack pools + assert(gc_n_threads); + int stack_free_idx = jl_atomic_load_relaxed(&gc_stack_free_idx); + if (stack_free_idx + 1 == gc_n_threads) + jl_atomic_store_relaxed(&gc_stack_free_idx, 0); + else + jl_atomic_store_relaxed(&gc_stack_free_idx, stack_free_idx + 1); + jl_atomic_store_release(&gc_ptls_sweep_idx, gc_n_threads - 1); // idx == gc_n_threads = release stacks to the OS so it's serial + gc_sweep_wake_all_stacks(ptls); + sweep_stack_pool_loop(); + gc_sweep_wait_for_all_stacks(); +} + static void gc_pool_sync_nfree(jl_gc_pagemeta_t *pg, jl_taggedvalue_t *last) JL_NOTSAFEPOINT { assert(pg->fl_begin_offset != UINT16_MAX); @@ -1078,7 +1121,7 @@ int gc_sweep_prescan(jl_ptls_t ptls, jl_gc_padded_page_stack_t *new_gc_allocd_sc } // wake up all threads to sweep the pages -void gc_sweep_wake_all(jl_ptls_t ptls, jl_gc_padded_page_stack_t *new_gc_allocd_scratch) +void gc_sweep_wake_all_pages(jl_ptls_t ptls, jl_gc_padded_page_stack_t *new_gc_allocd_scratch) { int parallel_sweep_worthwhile = gc_sweep_prescan(ptls, new_gc_allocd_scratch); if (parallel_sweep_worthwhile && !page_profile_enabled) { @@ -1114,10 +1157,10 @@ void gc_sweep_wake_all(jl_ptls_t ptls, jl_gc_padded_page_stack_t *new_gc_allocd_ } // wait for all threads to finish sweeping -void gc_sweep_wait_for_all(void) +void gc_sweep_wait_for_all_pages(void) { jl_atomic_store(&gc_allocd_scratch, NULL); - while (jl_atomic_load_acquire(&gc_n_threads_sweeping) != 0) { + while (jl_atomic_load_acquire(&gc_n_threads_sweeping_pools) != 0) { jl_cpu_pause(); } } @@ -1125,7 +1168,7 @@ void gc_sweep_wait_for_all(void) // sweep all pools void gc_sweep_pool_parallel(jl_ptls_t ptls) { - jl_atomic_fetch_add(&gc_n_threads_sweeping, 1); + jl_atomic_fetch_add(&gc_n_threads_sweeping_pools, 1); jl_gc_padded_page_stack_t *allocd_scratch = jl_atomic_load(&gc_allocd_scratch); if (allocd_scratch != NULL) { gc_page_profiler_serializer_t serializer = gc_page_serializer_create(); @@ -1170,7 +1213,7 @@ void gc_sweep_pool_parallel(jl_ptls_t ptls) } gc_page_serializer_destroy(&serializer); } - jl_atomic_fetch_add(&gc_n_threads_sweeping, -1); + jl_atomic_fetch_add(&gc_n_threads_sweeping_pools, -1); } // free all pages (i.e. through `madvise` on Linux) that were lazily freed @@ -1260,9 +1303,9 @@ static void gc_sweep_pool(void) // the actual sweeping jl_gc_padded_page_stack_t *new_gc_allocd_scratch = (jl_gc_padded_page_stack_t *) calloc_s(n_threads * sizeof(jl_gc_padded_page_stack_t)); jl_ptls_t ptls = jl_current_task->ptls; - gc_sweep_wake_all(ptls, new_gc_allocd_scratch); + gc_sweep_wake_all_pages(ptls, new_gc_allocd_scratch); gc_sweep_pool_parallel(ptls); - gc_sweep_wait_for_all(); + gc_sweep_wait_for_all_pages(); // reset half-pages pointers for (int t_i = 0; t_i < n_threads; t_i++) { @@ -3073,6 +3116,11 @@ static int _jl_gc_collect(jl_ptls_t ptls, jl_gc_collection_t collection) #endif current_sweep_full = sweep_full; sweep_weak_refs(); + uint64_t stack_pool_time = jl_hrtime(); + sweep_stack_pools(ptls); + stack_pool_time = jl_hrtime() - stack_pool_time; + gc_num.total_stack_pool_sweep_time += stack_pool_time; + gc_num.stack_pool_sweep_time = stack_pool_time; gc_sweep_other(ptls, sweep_full); gc_scrub(); gc_verify_tags(); @@ -3491,6 +3539,10 @@ STATIC_INLINE int may_sweep(jl_ptls_t ptls) JL_NOTSAFEPOINT return (jl_atomic_load(&ptls->gc_tls.gc_sweeps_requested) > 0); } +STATIC_INLINE int may_sweep_stack(jl_ptls_t ptls) JL_NOTSAFEPOINT +{ + return (jl_atomic_load(&ptls->gc_tls.gc_stack_sweep_requested) > 0); +} // parallel gc thread function void jl_parallel_gc_threadfun(void *arg) { @@ -3513,12 +3565,17 @@ void jl_parallel_gc_threadfun(void *arg) while (1) { uv_mutex_lock(&gc_threads_lock); - while (!may_mark() && !may_sweep(ptls)) { + while (!may_mark() && !may_sweep(ptls) && !may_sweep_stack(ptls)) { uv_cond_wait(&gc_threads_cond, &gc_threads_lock); } uv_mutex_unlock(&gc_threads_lock); assert(jl_atomic_load_relaxed(&ptls->gc_state) == JL_GC_PARALLEL_COLLECTOR_THREAD); gc_mark_loop_parallel(ptls, 0); + if (may_sweep_stack(ptls)) { + assert(jl_atomic_load_relaxed(&ptls->gc_state) == JL_GC_PARALLEL_COLLECTOR_THREAD); + sweep_stack_pool_loop(); + jl_atomic_fetch_add(&ptls->gc_tls.gc_stack_sweep_requested, -1); + } if (may_sweep(ptls)) { assert(jl_atomic_load_relaxed(&ptls->gc_state) == JL_GC_PARALLEL_COLLECTOR_THREAD); gc_sweep_pool_parallel(ptls); diff --git a/src/gc-stock.h b/src/gc-stock.h index 46f7d3e11e105..76cecf68067bf 100644 --- a/src/gc-stock.h +++ b/src/gc-stock.h @@ -524,7 +524,10 @@ extern uv_mutex_t gc_threads_lock; extern uv_cond_t gc_threads_cond; extern uv_sem_t gc_sweep_assists_needed; extern _Atomic(int) gc_n_threads_marking; -extern _Atomic(int) gc_n_threads_sweeping; +extern _Atomic(int) gc_n_threads_sweeping_pools; +extern _Atomic(int) gc_n_threads_sweeping_stacks; +extern _Atomic(int) gc_ptls_sweep_idx; +extern _Atomic(int) gc_stack_free_idx; extern _Atomic(int) n_threads_running; extern uv_barrier_t thread_init_done; void gc_mark_queue_all_roots(jl_ptls_t ptls, jl_gc_markqueue_t *mq); @@ -535,7 +538,7 @@ void gc_mark_loop_serial(jl_ptls_t ptls); void gc_mark_loop_parallel(jl_ptls_t ptls, int master); void gc_sweep_pool_parallel(jl_ptls_t ptls); void gc_free_pages(void); -void sweep_stack_pools(void) JL_NOTSAFEPOINT; +void sweep_stack_pool_loop(void) JL_NOTSAFEPOINT; void jl_gc_debug_init(void); // GC pages diff --git a/src/gc-tls.h b/src/gc-tls.h index 9e4b09404db84..3c2cc029a6183 100644 --- a/src/gc-tls.h +++ b/src/gc-tls.h @@ -82,6 +82,7 @@ typedef struct { jl_gc_markqueue_t mark_queue; jl_gc_mark_cache_t gc_cache; _Atomic(size_t) gc_sweeps_requested; + _Atomic(size_t) gc_stack_sweep_requested; arraylist_t sweep_objs; } jl_gc_tls_states_t; From 12aa9dea03a8a6bbe2891d3ca6ce34a21ec84734 Mon Sep 17 00:00:00 2001 From: Simon Byrne Date: Wed, 16 Oct 2024 13:54:03 -0700 Subject: [PATCH 224/537] add fenv cache to task struct (#51288) Fixes #51277, though we give no guarantee that it keeps working this way, or that calling `setrounding_raw` won't lead to other undefined behavior. To give some examples: ```julia julia> t = Base.Rounding.setrounding_raw(Float64, Base.Rounding.to_fenv(RoundDown)) do Task(() -> println(rounding(Float64))) end Task (runnable) @0x000000010dff04c0 julia> rounding(Float64) RoundingMode{:Nearest}() julia> wait(schedule(t)) RoundingMode{:Down}() # currently gives RoundingMode{:Nearest}() julia> rounding(Float64) RoundingMode{:Nearest}() julia> Base.Rounding.setrounding_raw(Float64, Base.Rounding.to_fenv(RoundDown)) do Threads.@threads :static for i = 1:Threads.nthreads() println(Threads.threadid() => rounding(Float64)) end end 1 => RoundingMode{:Down}() 2 => RoundingMode{:Down}() # currently gives RoundingMode{:Nearest}() 4 => RoundingMode{:Down}() # currently gives RoundingMode{:Nearest}() 3 => RoundingMode{:Down}() # currently gives RoundingMode{:Nearest}() ``` --- src/julia.h | 4 ++++ src/task.c | 4 ++++ 2 files changed, 8 insertions(+) diff --git a/src/julia.h b/src/julia.h index ed3d9bf825658..46679da9714dc 100644 --- a/src/julia.h +++ b/src/julia.h @@ -20,6 +20,7 @@ #include "libsupport.h" #include #include +#include #include "htable.h" #include "arraylist.h" @@ -2282,6 +2283,9 @@ typedef struct _jl_task_t { uint16_t priority; // hidden state: + // cached floating point environment + // only updated at task switch + fenv_t fenv; // id of owning thread - does not need to be defined until the task runs _Atomic(int16_t) tid; diff --git a/src/task.c b/src/task.c index be2631347e82e..5e1172a96a409 100644 --- a/src/task.c +++ b/src/task.c @@ -534,6 +534,7 @@ JL_NO_ASAN static void ctx_switch(jl_task_t *lastt) jl_set_pgcstack(&t->gcstack); jl_signal_fence(); lastt->ptls = NULL; + fegetenv(&lastt->fenv); #ifdef MIGRATE_TASKS ptls->previous_task = lastt; #endif @@ -726,6 +727,7 @@ JL_DLLEXPORT void jl_switch(void) JL_NOTSAFEPOINT_LEAVE JL_NOTSAFEPOINT_ENTER 0 == ptls->finalizers_inhibited); ptls->finalizers_inhibited = finalizers_inhibited; jl_timing_block_task_enter(ct, ptls, blk); (void)blk; + fesetenv(&ct->fenv); sig_atomic_t other_defer_signal = ptls->defer_signal; ptls->defer_signal = defer_signal; @@ -1138,6 +1140,7 @@ JL_DLLEXPORT jl_task_t *jl_new_task(jl_function_t *start, jl_value_t *completion t->excstack = NULL; t->ctx.started = 0; t->priority = 0; + fegetenv(&t->fenv); jl_atomic_store_relaxed(&t->tid, -1); t->threadpoolid = ct->threadpoolid; t->ptls = NULL; @@ -1239,6 +1242,7 @@ CFI_NORETURN if (!pt->sticky && !pt->ctx.copy_stack) jl_atomic_store_release(&pt->tid, -1); #endif + fesetenv(&ct->fenv); ct->ctx.started = 1; JL_PROBE_RT_START_TASK(ct); From 5c3f477fe0570760a34eb9ea91b0ee5dfabe69b5 Mon Sep 17 00:00:00 2001 From: spaette <111918424+spaette@users.noreply.github.com> Date: Thu, 17 Oct 2024 03:23:10 +0200 Subject: [PATCH 225/537] url relocation LinearAlgebra markdown (#56202) _cf_: https://github.com/JuliaLang/julia/issues/56147 .html and .pdf rendering may be resolved for this content the ticket's mentioned build.md file was left unattended to --- stdlib/LinearAlgebra/docs/src/index.md | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) diff --git a/stdlib/LinearAlgebra/docs/src/index.md b/stdlib/LinearAlgebra/docs/src/index.md index 967ef8237f03d..e3e79b7034969 100644 --- a/stdlib/LinearAlgebra/docs/src/index.md +++ b/stdlib/LinearAlgebra/docs/src/index.md @@ -683,11 +683,9 @@ and the complexity of the operation. ### Level 1 BLAS functions -The level 1 BLAS functions were first proposed in [(Lawson, 1979)][Lawson-1979] and +The level 1 BLAS functions were first proposed in ([Lawson, 1979](https://dl.acm.org/doi/10.1145/355841.355847)) and define operations between scalars and vectors. -[Lawson-1979]: https://dl.acm.org/doi/10.1145/355841.355847 - ```@docs # xROTG # xROTMG @@ -710,11 +708,9 @@ LinearAlgebra.BLAS.iamax ### Level 2 BLAS functions -The level 2 BLAS functions were published in [(Dongarra, 1988)][Dongarra-1988], +The level 2 BLAS functions were published in ([Dongarra, 1988](https://dl.acm.org/doi/10.1145/42288.42291)) and define matrix-vector operations. -[Dongarra-1988]: https://dl.acm.org/doi/10.1145/42288.42291 - **return a vector** ```@docs @@ -763,11 +759,9 @@ LinearAlgebra.BLAS.spr! ### Level 3 BLAS functions -The level 3 BLAS functions were published in [(Dongarra, 1990)][Dongarra-1990], +The level 3 BLAS functions were published in ([Dongarra, 1990](https://dl.acm.org/doi/10.1145/77626.79170)) and define matrix-matrix operations. -[Dongarra-1990]: https://dl.acm.org/doi/10.1145/77626.79170 - ```@docs LinearAlgebra.BLAS.gemmt! LinearAlgebra.BLAS.gemmt(::Any, ::Any, ::Any, ::Any, ::Any, ::Any) From e25287781adb6fec59d9cecccd1a47b3edcd3d03 Mon Sep 17 00:00:00 2001 From: TacHawkes Date: Thu, 17 Oct 2024 06:51:45 +0200 Subject: [PATCH 226/537] Update stable release tag to 1.11.1 in README.md (#56205) This still points to 1.10 and should be updated. --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 465adcf049922..cfa2111600f22 100644 --- a/README.md +++ b/README.md @@ -92,7 +92,7 @@ and then use the command prompt to change into the resulting julia directory. By Julia. However, most users should use the [most recent stable version](https://github.com/JuliaLang/julia/releases) of Julia. You can get this version by running: - git checkout v1.10.5 + git checkout v1.11.1 To build the `julia` executable, run `make` from within the julia directory. From 222cde9c092e143def9ae8238b529aa8e63f902d Mon Sep 17 00:00:00 2001 From: Keno Fischer Date: Thu, 17 Oct 2024 01:19:46 -0400 Subject: [PATCH 227/537] Split reflection into compiler-dependent and compiler-independent pieces (#56185) The `reflection.jl` file provides a large amount of functionality covering everything from helpers for access to core runtime data structures to setting up particular inference problems. It is included by both Base and Core.Compiler, but the functions that use the compiler, don't really make sense in the latter. In preparation for #56128, and stop including the compiler-dependent pieces in Core.Compiler. While we're here, also move a few generically useful reflection functions out of Core.Compiler, so users that access them don't have to load the compiler. Split out from #56128, but doesn't make any semantic changes by itself, so should be quick/easy to merge. --- base/Base.jl | 1 + base/compiler/compiler.jl | 2 +- base/compiler/typeutils.jl | 2 - base/compiler/utilities.jl | 84 -- base/expr.jl | 14 + base/reflection.jl | 1566 ++---------------------------------- base/runtime_internals.jl | 1530 +++++++++++++++++++++++++++++++++++ test/compiler/inference.jl | 2 +- 8 files changed, 1601 insertions(+), 1600 deletions(-) create mode 100644 base/runtime_internals.jl diff --git a/base/Base.jl b/base/Base.jl index 84e10ca788ba2..1e780bb15141a 100644 --- a/base/Base.jl +++ b/base/Base.jl @@ -173,6 +173,7 @@ include("essentials.jl") include("ctypes.jl") include("gcutils.jl") include("generator.jl") +include("runtime_internals.jl") include("reflection.jl") include("options.jl") diff --git a/base/compiler/compiler.jl b/base/compiler/compiler.jl index 5cc01391267d7..dbfc9d7d57140 100644 --- a/base/compiler/compiler.jl +++ b/base/compiler/compiler.jl @@ -84,7 +84,7 @@ const NUM_EFFECTS_OVERRIDES = 11 # sync with julia.h include("essentials.jl") include("ctypes.jl") include("generator.jl") -include("reflection.jl") +include("runtime_internals.jl") include("options.jl") ntuple(f, ::Val{0}) = () diff --git a/base/compiler/typeutils.jl b/base/compiler/typeutils.jl index 577452a873b5e..4af8fed0e40c3 100644 --- a/base/compiler/typeutils.jl +++ b/base/compiler/typeutils.jl @@ -202,8 +202,6 @@ function typesubtract(@nospecialize(a), @nospecialize(b), max_union_splitting::I return a # TODO: improve this bound? end -hasintersect(@nospecialize(a), @nospecialize(b)) = typeintersect(a, b) !== Bottom - _typename(@nospecialize a) = Union{} _typename(a::TypeVar) = Core.TypeName function _typename(a::Union) diff --git a/base/compiler/utilities.jl b/base/compiler/utilities.jl index b3dfd73d53452..f9202788b6360 100644 --- a/base/compiler/utilities.jl +++ b/base/compiler/utilities.jl @@ -48,24 +48,6 @@ anymap(f::Function, a::Array{Any,1}) = Any[ f(a[i]) for i in 1:length(a) ] _topmod(m::Module) = ccall(:jl_base_relative_to, Any, (Any,), m)::Module -####### -# AST # -####### - -# Meta expression head, these generally can't be deleted even when they are -# in a dead branch but can be ignored when analyzing uses/liveness. -is_meta_expr_head(head::Symbol) = head === :boundscheck || head === :meta || head === :loopinfo -is_meta_expr(@nospecialize x) = isa(x, Expr) && is_meta_expr_head(x.head) - -function is_self_quoting(@nospecialize(x)) - return isa(x,Number) || isa(x,AbstractString) || isa(x,Tuple) || isa(x,Type) || - isa(x,Char) || x === nothing || isa(x,Function) -end - -function quoted(@nospecialize(x)) - return is_self_quoting(x) ? x : QuoteNode(x) -end - ############ # inlining # ############ @@ -116,10 +98,6 @@ function is_inlineable_constant(@nospecialize(x)) return count_const_size(x) <= MAX_INLINE_CONST_SIZE end -is_nospecialized(method::Method) = method.nospecialize ≠ 0 - -is_nospecializeinfer(method::Method) = method.nospecializeinfer && is_nospecialized(method) - ########################### # MethodInstance/CodeInfo # ########################### @@ -192,74 +170,12 @@ function get_compileable_sig(method::Method, @nospecialize(atype), sparams::Simp mt, atype, sparams, method, #=int return_if_compileable=#1) end -function get_nospecializeinfer_sig(method::Method, @nospecialize(atype), sparams::SimpleVector) - isa(atype, DataType) || return method.sig - mt = ccall(:jl_method_get_table, Any, (Any,), method) - mt === nothing && return method.sig - return ccall(:jl_normalize_to_compilable_sig, Any, (Any, Any, Any, Any, Cint), - mt, atype, sparams, method, #=int return_if_compileable=#0) -end isa_compileable_sig(@nospecialize(atype), sparams::SimpleVector, method::Method) = !iszero(ccall(:jl_isa_compileable_sig, Int32, (Any, Any, Any), atype, sparams, method)) -# eliminate UnionAll vars that might be degenerate due to having identical bounds, -# or a concrete upper bound and appearing covariantly. -function subst_trivial_bounds(@nospecialize(atype)) - if !isa(atype, UnionAll) - return atype - end - v = atype.var - if isconcretetype(v.ub) || v.lb === v.ub - subst = try - atype{v.ub} - catch - # Note in rare cases a var bound might not be valid to substitute. - nothing - end - if subst !== nothing - return subst_trivial_bounds(subst) - end - end - return UnionAll(v, subst_trivial_bounds(atype.body)) -end - has_typevar(@nospecialize(t), v::TypeVar) = ccall(:jl_has_typevar, Cint, (Any, Any), t, v) != 0 -# If removing trivial vars from atype results in an equivalent type, use that -# instead. Otherwise we can get a case like issue #38888, where a signature like -# f(x::S) where S<:Int -# gets cached and matches a concrete dispatch case. -function normalize_typevars(method::Method, @nospecialize(atype), sparams::SimpleVector) - at2 = subst_trivial_bounds(atype) - if at2 !== atype && at2 == atype - atype = at2 - sp_ = ccall(:jl_type_intersection_with_env, Any, (Any, Any), at2, method.sig)::SimpleVector - sparams = sp_[2]::SimpleVector - end - return Pair{Any,SimpleVector}(atype, sparams) -end - -# get a handle to the unique specialization object representing a particular instantiation of a call -@inline function specialize_method(method::Method, @nospecialize(atype), sparams::SimpleVector; preexisting::Bool=false) - if isa(atype, UnionAll) - atype, sparams = normalize_typevars(method, atype, sparams) - end - if is_nospecializeinfer(method) - atype = get_nospecializeinfer_sig(method, atype, sparams) - end - if preexisting - # check cached specializations - # for an existing result stored there - return ccall(:jl_specializations_lookup, Any, (Any, Any), method, atype)::Union{Nothing,MethodInstance} - end - return ccall(:jl_specializations_get_linfo, Ref{MethodInstance}, (Any, Any, Any), method, atype, sparams) -end - -function specialize_method(match::MethodMatch; kwargs...) - return specialize_method(match.method, match.spec_types, match.sparams; kwargs...) -end - """ is_declared_inline(method::Method) -> Bool diff --git a/base/expr.jl b/base/expr.jl index 478ccd7d7cc20..723b6b5b636c8 100644 --- a/base/expr.jl +++ b/base/expr.jl @@ -1560,3 +1560,17 @@ function make_atomiconce(success_order, fail_order, ex) end error("@atomiconce expression missing field access or indexing") end + +# Meta expression head, these generally can't be deleted even when they are +# in a dead branch but can be ignored when analyzing uses/liveness. +is_meta_expr_head(head::Symbol) = head === :boundscheck || head === :meta || head === :loopinfo +is_meta_expr(@nospecialize x) = isa(x, Expr) && is_meta_expr_head(x.head) + +function is_self_quoting(@nospecialize(x)) + return isa(x,Number) || isa(x,AbstractString) || isa(x,Tuple) || isa(x,Type) || + isa(x,Char) || x === nothing || isa(x,Function) +end + +function quoted(@nospecialize(x)) + return is_self_quoting(x) ? x : QuoteNode(x) +end diff --git a/base/reflection.jl b/base/reflection.jl index 49d640ea40bab..8fe8d324eb792 100644 --- a/base/reflection.jl +++ b/base/reflection.jl @@ -1,1186 +1,6 @@ # This file is a part of Julia. License is MIT: https://julialang.org/license -# name and module reflection - -""" - parentmodule(m::Module) -> Module - -Get a module's enclosing `Module`. `Main` is its own parent. - -See also: [`names`](@ref), [`nameof`](@ref), [`fullname`](@ref), [`@__MODULE__`](@ref). - -# Examples -```jldoctest -julia> parentmodule(Main) -Main - -julia> parentmodule(Base.Broadcast) -Base -``` -""" -parentmodule(m::Module) = (@_total_meta; ccall(:jl_module_parent, Ref{Module}, (Any,), m)) - -is_root_module(m::Module) = parentmodule(m) === m || (isdefined(Main, :Base) && m === Main.Base) - -""" - moduleroot(m::Module) -> Module - -Find the root module of a given module. This is the first module in the chain of -parent modules of `m` which is either a registered root module or which is its -own parent module. -""" -function moduleroot(m::Module) - @_total_meta - while true - is_root_module(m) && return m - p = parentmodule(m) - p === m && return m - m = p - end -end - -""" - @__MODULE__ -> Module - -Get the `Module` of the toplevel eval, -which is the `Module` code is currently being read from. -""" -macro __MODULE__() - return __module__ -end - -""" - fullname(m::Module) - -Get the fully-qualified name of a module as a tuple of symbols. For example, - -# Examples -```jldoctest -julia> fullname(Base.Iterators) -(:Base, :Iterators) - -julia> fullname(Main) -(:Main,) -``` -""" -function fullname(m::Module) - @_total_meta - mn = nameof(m) - if m === Main || m === Base || m === Core - return (mn,) - end - mp = parentmodule(m) - if mp === m - return (mn,) - end - return (fullname(mp)..., mn) -end - -""" - moduleloc(m::Module) -> LineNumberNode - -Get the location of the `module` definition. -""" -function moduleloc(m::Module) - line = Ref{Int32}(0) - file = ccall(:jl_module_getloc, Ref{Symbol}, (Any, Ref{Int32}), m, line) - return LineNumberNode(Int(line[]), file) -end - -""" - names(x::Module; all::Bool=false, imported::Bool=false, usings::Bool=false) -> Vector{Symbol} - -Get a vector of the public names of a `Module`, excluding deprecated names. -If `all` is true, then the list also includes non-public names defined in the module, -deprecated names, and compiler-generated names. -If `imported` is true, then names explicitly imported from other modules -are also included. -If `usings` is true, then names explicitly imported via `using` are also included. -Names are returned in sorted order. - -As a special case, all names defined in `Main` are considered \"public\", -since it is not idiomatic to explicitly mark names from `Main` as public. - -!!! note - `sym ∈ names(SomeModule)` does *not* imply `isdefined(SomeModule, sym)`. - `names` may return symbols marked with `public` or `export`, even if - they are not defined in the module. - -!!! warning - `names` may return duplicate names. The duplication happens, e.g. if an `import`ed name - conflicts with an already existing identifier. - -See also: [`Base.isexported`](@ref), [`Base.ispublic`](@ref), [`Base.@locals`](@ref), [`@__MODULE__`](@ref). -""" -names(m::Module; kwargs...) = sort!(unsorted_names(m; kwargs...)) -unsorted_names(m::Module; all::Bool=false, imported::Bool=false, usings::Bool=false) = - ccall(:jl_module_names, Array{Symbol,1}, (Any, Cint, Cint, Cint), m, all, imported, usings) - -""" - isexported(m::Module, s::Symbol) -> Bool - -Returns whether a symbol is exported from a module. - -See also: [`ispublic`](@ref), [`names`](@ref) - -```jldoctest -julia> module Mod - export foo - public bar - end -Mod - -julia> Base.isexported(Mod, :foo) -true - -julia> Base.isexported(Mod, :bar) -false - -julia> Base.isexported(Mod, :baz) -false -``` -""" -isexported(m::Module, s::Symbol) = ccall(:jl_module_exports_p, Cint, (Any, Any), m, s) != 0 - -""" - ispublic(m::Module, s::Symbol) -> Bool - -Returns whether a symbol is marked as public in a module. - -Exported symbols are considered public. - -!!! compat "Julia 1.11" - This function and the notion of publicity were added in Julia 1.11. - -See also: [`isexported`](@ref), [`names`](@ref) - -```jldoctest -julia> module Mod - export foo - public bar - end -Mod - -julia> Base.ispublic(Mod, :foo) -true - -julia> Base.ispublic(Mod, :bar) -true - -julia> Base.ispublic(Mod, :baz) -false -``` -""" -ispublic(m::Module, s::Symbol) = ccall(:jl_module_public_p, Cint, (Any, Any), m, s) != 0 - -# TODO: this is vaguely broken because it only works for explicit calls to -# `Base.deprecate`, not the @deprecated macro: -isdeprecated(m::Module, s::Symbol) = ccall(:jl_is_binding_deprecated, Cint, (Any, Any), m, s) != 0 - -""" - isbindingresolved(m::Module, s::Symbol) -> Bool - -Returns whether the binding of a symbol in a module is resolved. - -See also: [`isexported`](@ref), [`ispublic`](@ref), [`isdeprecated`](@ref) - -```jldoctest -julia> module Mod - foo() = 17 - end -Mod - -julia> Base.isbindingresolved(Mod, :foo) -true - -julia> Base.isbindingresolved(Mod, :bar) -false -``` -""" -isbindingresolved(m::Module, var::Symbol) = ccall(:jl_binding_resolved_p, Cint, (Any, Any), m, var) != 0 - -function binding_module(m::Module, s::Symbol) - p = ccall(:jl_get_module_of_binding, Ptr{Cvoid}, (Any, Any), m, s) - p == C_NULL && return m - return unsafe_pointer_to_objref(p)::Module -end - -const _NAMEDTUPLE_NAME = NamedTuple.body.body.name - -function _fieldnames(@nospecialize t) - if t.name === _NAMEDTUPLE_NAME - if t.parameters[1] isa Tuple - return t.parameters[1] - else - throw(ArgumentError("type does not have definite field names")) - end - end - return t.name.names -end - -const BINDING_KIND_GLOBAL = 0x0 -const BINDING_KIND_CONST = 0x1 -const BINDING_KIND_CONST_IMPORT = 0x2 -const BINDING_KIND_IMPLICIT = 0x3 -const BINDING_KIND_EXPLICIT = 0x4 -const BINDING_KIND_IMPORTED = 0x5 -const BINDING_KIND_FAILED = 0x6 -const BINDING_KIND_DECLARED = 0x7 -const BINDING_KIND_GUARD = 0x8 - -function lookup_binding_partition(world::UInt, b::Core.Binding) - ccall(:jl_get_binding_partition, Ref{Core.BindingPartition}, (Any, UInt), b, world) -end - -function lookup_binding_partition(world::UInt, gr::Core.GlobalRef) - ccall(:jl_get_globalref_partition, Ref{Core.BindingPartition}, (Any, UInt), gr, world) -end - -binding_kind(bpart::Core.BindingPartition) = ccall(:jl_bpart_get_kind, UInt8, (Any,), bpart) -binding_kind(m::Module, s::Symbol) = binding_kind(lookup_binding_partition(tls_world_age(), GlobalRef(m, s))) - -""" - fieldname(x::DataType, i::Integer) - -Get the name of field `i` of a `DataType`. - -The return type is `Symbol`, except when `x <: Tuple`, in which case the index of the field is returned, of type `Int`. - -# Examples -```jldoctest -julia> fieldname(Rational, 1) -:num - -julia> fieldname(Rational, 2) -:den - -julia> fieldname(Tuple{String,Int}, 2) -2 -``` -""" -function fieldname(t::DataType, i::Integer) - throw_not_def_field() = throw(ArgumentError("type does not have definite field names")) - function throw_field_access(t, i, n_fields) - field_label = n_fields == 1 ? "field" : "fields" - throw(ArgumentError("Cannot access field $i since type $t only has $n_fields $field_label.")) - end - throw_need_pos_int(i) = throw(ArgumentError("Field numbers must be positive integers. $i is invalid.")) - - isabstracttype(t) && throw_not_def_field() - names = _fieldnames(t) - n_fields = length(names)::Int - i > n_fields && throw_field_access(t, i, n_fields) - i < 1 && throw_need_pos_int(i) - return @inbounds names[i]::Symbol -end - -fieldname(t::UnionAll, i::Integer) = fieldname(unwrap_unionall(t), i) -fieldname(t::Type{<:Tuple}, i::Integer) = - i < 1 || i > fieldcount(t) ? throw(BoundsError(t, i)) : Int(i) - -""" - fieldnames(x::DataType) - -Get a tuple with the names of the fields of a `DataType`. - -Each name is a `Symbol`, except when `x <: Tuple`, in which case each name (actually the -index of the field) is an `Int`. - -See also [`propertynames`](@ref), [`hasfield`](@ref). - -# Examples -```jldoctest -julia> fieldnames(Rational) -(:num, :den) - -julia> fieldnames(typeof(1+im)) -(:re, :im) - -julia> fieldnames(Tuple{String,Int}) -(1, 2) -``` -""" -fieldnames(t::DataType) = (fieldcount(t); # error check to make sure type is specific enough - (_fieldnames(t)...,))::Tuple{Vararg{Symbol}} -fieldnames(t::UnionAll) = fieldnames(unwrap_unionall(t)) -fieldnames(::Core.TypeofBottom) = - throw(ArgumentError("The empty type does not have field names since it does not have instances.")) -fieldnames(t::Type{<:Tuple}) = ntuple(identity, fieldcount(t)) - -""" - hasfield(T::Type, name::Symbol) - -Return a boolean indicating whether `T` has `name` as one of its own fields. - -See also [`fieldnames`](@ref), [`fieldcount`](@ref), [`hasproperty`](@ref). - -!!! compat "Julia 1.2" - This function requires at least Julia 1.2. - -# Examples -```jldoctest -julia> struct Foo - bar::Int - end - -julia> hasfield(Foo, :bar) -true - -julia> hasfield(Foo, :x) -false -``` -""" -hasfield(T::Type, name::Symbol) = fieldindex(T, name, false) > 0 - -""" - nameof(t::DataType) -> Symbol - -Get the name of a (potentially `UnionAll`-wrapped) `DataType` (without its parent module) -as a symbol. - -# Examples -```jldoctest -julia> module Foo - struct S{T} - end - end -Foo - -julia> nameof(Foo.S{T} where T) -:S -``` -""" -nameof(t::DataType) = t.name.name -nameof(t::UnionAll) = nameof(unwrap_unionall(t))::Symbol - -""" - parentmodule(t::DataType) -> Module - -Determine the module containing the definition of a (potentially `UnionAll`-wrapped) `DataType`. - -# Examples -```jldoctest -julia> module Foo - struct Int end - end -Foo - -julia> parentmodule(Int) -Core - -julia> parentmodule(Foo.Int) -Foo -``` -""" -parentmodule(t::DataType) = t.name.module -parentmodule(t::UnionAll) = parentmodule(unwrap_unionall(t)) - -""" - isconst(m::Module, s::Symbol) -> Bool - -Determine whether a global is declared `const` in a given module `m`. -""" -isconst(m::Module, s::Symbol) = - ccall(:jl_is_const, Cint, (Any, Any), m, s) != 0 - -function isconst(g::GlobalRef) - return ccall(:jl_globalref_is_const, Cint, (Any,), g) != 0 -end - -""" - isconst(t::DataType, s::Union{Int,Symbol}) -> Bool - -Determine whether a field `s` is declared `const` in a given type `t`. -""" -function isconst(@nospecialize(t::Type), s::Symbol) - @_foldable_meta - t = unwrap_unionall(t) - isa(t, DataType) || return false - return isconst(t, fieldindex(t, s, false)) -end -function isconst(@nospecialize(t::Type), s::Int) - @_foldable_meta - t = unwrap_unionall(t) - # TODO: what to do for `Union`? - isa(t, DataType) || return false # uncertain - ismutabletype(t) || return true # immutable structs are always const - 1 <= s <= length(t.name.names) || return true # OOB reads are "const" since they always throw - constfields = t.name.constfields - constfields === C_NULL && return false - s -= 1 - return unsafe_load(Ptr{UInt32}(constfields), 1 + s÷32) & (1 << (s%32)) != 0 -end - -""" - isfieldatomic(t::DataType, s::Union{Int,Symbol}) -> Bool - -Determine whether a field `s` is declared `@atomic` in a given type `t`. -""" -function isfieldatomic(@nospecialize(t::Type), s::Symbol) - @_foldable_meta - t = unwrap_unionall(t) - isa(t, DataType) || return false - return isfieldatomic(t, fieldindex(t, s, false)) -end -function isfieldatomic(@nospecialize(t::Type), s::Int) - @_foldable_meta - t = unwrap_unionall(t) - # TODO: what to do for `Union`? - isa(t, DataType) || return false # uncertain - ismutabletype(t) || return false # immutable structs are never atomic - 1 <= s <= length(t.name.names) || return false # OOB reads are not atomic (they always throw) - atomicfields = t.name.atomicfields - atomicfields === C_NULL && return false - s -= 1 - return unsafe_load(Ptr{UInt32}(atomicfields), 1 + s÷32) & (1 << (s%32)) != 0 -end - -""" - @locals() - -Construct a dictionary of the names (as symbols) and values of all local -variables defined as of the call site. - -!!! compat "Julia 1.1" - This macro requires at least Julia 1.1. - -# Examples -```jldoctest -julia> let x = 1, y = 2 - Base.@locals - end -Dict{Symbol, Any} with 2 entries: - :y => 2 - :x => 1 - -julia> function f(x) - local y - show(Base.@locals); println() - for i = 1:1 - show(Base.@locals); println() - end - y = 2 - show(Base.@locals); println() - nothing - end; - -julia> f(42) -Dict{Symbol, Any}(:x => 42) -Dict{Symbol, Any}(:i => 1, :x => 42) -Dict{Symbol, Any}(:y => 2, :x => 42) -``` -""" -macro locals() - return Expr(:locals) -end - -# concrete datatype predicates - -datatype_fieldtypes(x::DataType) = ccall(:jl_get_fieldtypes, Core.SimpleVector, (Any,), x) - -struct DataTypeLayout - size::UInt32 - nfields::UInt32 - npointers::UInt32 - firstptr::Int32 - alignment::UInt16 - flags::UInt16 - # haspadding : 1; - # fielddesc_type : 2; - # arrayelem_isboxed : 1; - # arrayelem_isunion : 1; -end - -""" - Base.datatype_alignment(dt::DataType) -> Int - -Memory allocation minimum alignment for instances of this type. -Can be called on any `isconcretetype`, although for Memory it will give the -alignment of the elements, not the whole object. -""" -function datatype_alignment(dt::DataType) - @_foldable_meta - dt.layout == C_NULL && throw(UndefRefError()) - alignment = unsafe_load(convert(Ptr{DataTypeLayout}, dt.layout)).alignment - return Int(alignment) -end - -function uniontype_layout(@nospecialize T::Type) - sz = RefValue{Csize_t}(0) - algn = RefValue{Csize_t}(0) - isinline = ccall(:jl_islayout_inline, Cint, (Any, Ptr{Csize_t}, Ptr{Csize_t}), T, sz, algn) != 0 - (isinline, Int(sz[]), Int(algn[])) -end - -LLT_ALIGN(x, sz) = (x + sz - 1) & -sz - -# amount of total space taken by T when stored in a container -function aligned_sizeof(@nospecialize T::Type) - @_foldable_meta - if isa(T, Union) - if allocatedinline(T) - # NOTE this check is equivalent to `isbitsunion(T)`, we can improve type - # inference in the second branch with the outer `isa(T, Union)` check - _, sz, al = uniontype_layout(T) - return LLT_ALIGN(sz, al) - end - elseif allocatedinline(T) - al = datatype_alignment(T) - return LLT_ALIGN(Core.sizeof(T), al) - end - return Core.sizeof(Ptr{Cvoid}) -end - -gc_alignment(sz::Integer) = Int(ccall(:jl_alignment, Cint, (Csize_t,), sz)) -gc_alignment(T::Type) = gc_alignment(Core.sizeof(T)) - -""" - Base.datatype_haspadding(dt::DataType) -> Bool - -Return whether the fields of instances of this type are packed in memory, -with no intervening padding bits (defined as bits whose value does not impact -the semantic value of the instance itself). -Can be called on any `isconcretetype`. -""" -function datatype_haspadding(dt::DataType) - @_foldable_meta - dt.layout == C_NULL && throw(UndefRefError()) - flags = unsafe_load(convert(Ptr{DataTypeLayout}, dt.layout)).flags - return flags & 1 == 1 -end - -""" - Base.datatype_isbitsegal(dt::DataType) -> Bool - -Return whether egality of the (non-padding bits of the) in-memory representation -of an instance of this type implies semantic egality of the instance itself. -This may not be the case if the type contains to other values whose egality is -independent of their identity (e.g. immutable structs, some types, etc.). -""" -function datatype_isbitsegal(dt::DataType) - @_foldable_meta - dt.layout == C_NULL && throw(UndefRefError()) - flags = unsafe_load(convert(Ptr{DataTypeLayout}, dt.layout)).flags - return (flags & (1<<5)) != 0 -end - -""" - Base.datatype_nfields(dt::DataType) -> UInt32 - -Return the number of fields known to this datatype's layout. This may be -different from the number of actual fields of the type for opaque types. -Can be called on any `isconcretetype`. -""" -function datatype_nfields(dt::DataType) - @_foldable_meta - dt.layout == C_NULL && throw(UndefRefError()) - return unsafe_load(convert(Ptr{DataTypeLayout}, dt.layout)).nfields -end - -""" - Base.datatype_npointers(dt::DataType) -> Int - -Return the number of pointers in the layout of a datatype. -""" -function datatype_npointers(dt::DataType) - @_foldable_meta - dt.layout == C_NULL && throw(UndefRefError()) - return unsafe_load(convert(Ptr{DataTypeLayout}, dt.layout)).npointers -end - -""" - Base.datatype_pointerfree(dt::DataType) -> Bool - -Return whether instances of this type can contain references to gc-managed memory. -Can be called on any `isconcretetype`. -""" -function datatype_pointerfree(dt::DataType) - @_foldable_meta - return datatype_npointers(dt) == 0 -end - -""" - Base.datatype_fielddesc_type(dt::DataType) -> Int - -Return the size in bytes of each field-description entry in the layout array, -located at `(dt.layout + sizeof(DataTypeLayout))`. -Can be called on any `isconcretetype`. - -See also [`fieldoffset`](@ref). -""" -function datatype_fielddesc_type(dt::DataType) - @_foldable_meta - dt.layout == C_NULL && throw(UndefRefError()) - flags = unsafe_load(convert(Ptr{DataTypeLayout}, dt.layout)).flags - return (flags >> 1) & 3 -end - -""" - Base.datatype_arrayelem(dt::DataType) -> Int - -Return the behavior of the trailing array types allocations. -Can be called on any `isconcretetype`, but only meaningful on `Memory`. - -0 = inlinealloc -1 = isboxed -2 = isbitsunion -""" -function datatype_arrayelem(dt::DataType) - @_foldable_meta - dt.layout == C_NULL && throw(UndefRefError()) - flags = unsafe_load(convert(Ptr{DataTypeLayout}, dt.layout)).flags - return (flags >> 3) & 3 -end - -function datatype_layoutsize(dt::DataType) - @_foldable_meta - dt.layout == C_NULL && throw(UndefRefError()) - size = unsafe_load(convert(Ptr{DataTypeLayout}, dt.layout)).size - return size % Int -end - - -# For type stability, we only expose a single struct that describes everything -struct FieldDesc - isforeign::Bool - isptr::Bool - size::UInt32 - offset::UInt32 -end - -struct FieldDescStorage{T} - ptrsize::T - offset::T -end -FieldDesc(fd::FieldDescStorage{T}) where {T} = - FieldDesc(false, fd.ptrsize & 1 != 0, - fd.ptrsize >> 1, fd.offset) - -struct DataTypeFieldDesc - dt::DataType - function DataTypeFieldDesc(dt::DataType) - dt.layout == C_NULL && throw(UndefRefError()) - new(dt) - end -end - -function getindex(dtfd::DataTypeFieldDesc, i::Int) - layout_ptr = convert(Ptr{DataTypeLayout}, dtfd.dt.layout) - fd_ptr = layout_ptr + Core.sizeof(DataTypeLayout) - layout = unsafe_load(layout_ptr) - fielddesc_type = (layout.flags >> 1) & 3 - nfields = layout.nfields - @boundscheck ((1 <= i <= nfields) || throw(BoundsError(dtfd, i))) - if fielddesc_type == 0 - return FieldDesc(unsafe_load(Ptr{FieldDescStorage{UInt8}}(fd_ptr), i)) - elseif fielddesc_type == 1 - return FieldDesc(unsafe_load(Ptr{FieldDescStorage{UInt16}}(fd_ptr), i)) - elseif fielddesc_type == 2 - return FieldDesc(unsafe_load(Ptr{FieldDescStorage{UInt32}}(fd_ptr), i)) - else - # fielddesc_type == 3 - return FieldDesc(true, true, 0, 0) - end -end - -""" - ismutable(v) -> Bool - -Return `true` if and only if value `v` is mutable. See [Mutable Composite Types](@ref) -for a discussion of immutability. Note that this function works on values, so if you -give it a `DataType`, it will tell you that a value of the type is mutable. - -!!! note - For technical reasons, `ismutable` returns `true` for values of certain special types - (for example `String` and `Symbol`) even though they cannot be mutated in a permissible way. - -See also [`isbits`](@ref), [`isstructtype`](@ref). - -# Examples -```jldoctest -julia> ismutable(1) -false - -julia> ismutable([1,2]) -true -``` - -!!! compat "Julia 1.5" - This function requires at least Julia 1.5. -""" -ismutable(@nospecialize(x)) = (@_total_meta; (typeof(x).name::Core.TypeName).flags & 0x2 == 0x2) -# The type assertion above is required to fix some invalidations. -# See also https://github.com/JuliaLang/julia/issues/52134 - -""" - ismutabletype(T) -> Bool - -Determine whether type `T` was declared as a mutable type -(i.e. using `mutable struct` keyword). -If `T` is not a type, then return `false`. - -!!! compat "Julia 1.7" - This function requires at least Julia 1.7. -""" -function ismutabletype(@nospecialize t) - @_total_meta - t = unwrap_unionall(t) - # TODO: what to do for `Union`? - return isa(t, DataType) && ismutabletypename(t.name) -end - -ismutabletypename(tn::Core.TypeName) = tn.flags & 0x2 == 0x2 - -""" - isstructtype(T) -> Bool - -Determine whether type `T` was declared as a struct type -(i.e. using the `struct` or `mutable struct` keyword). -If `T` is not a type, then return `false`. -""" -function isstructtype(@nospecialize t) - @_total_meta - t = unwrap_unionall(t) - # TODO: what to do for `Union`? - isa(t, DataType) || return false - return !isprimitivetype(t) && !isabstracttype(t) -end - -""" - isprimitivetype(T) -> Bool - -Determine whether type `T` was declared as a primitive type -(i.e. using the `primitive type` syntax). -If `T` is not a type, then return `false`. -""" -function isprimitivetype(@nospecialize t) - @_total_meta - t = unwrap_unionall(t) - # TODO: what to do for `Union`? - isa(t, DataType) || return false - return (t.flags & 0x0080) == 0x0080 -end - -""" - isbitstype(T) - -Return `true` if type `T` is a "plain data" type, -meaning it is immutable and contains no references to other values, -only `primitive` types and other `isbitstype` types. -Typical examples are numeric types such as [`UInt8`](@ref), -[`Float64`](@ref), and [`Complex{Float64}`](@ref). -This category of types is significant since they are valid as type parameters, -may not track [`isdefined`](@ref) / [`isassigned`](@ref) status, -and have a defined layout that is compatible with C. -If `T` is not a type, then return `false`. - -See also [`isbits`](@ref), [`isprimitivetype`](@ref), [`ismutable`](@ref). - -# Examples -```jldoctest -julia> isbitstype(Complex{Float64}) -true - -julia> isbitstype(Complex) -false -``` -""" -isbitstype(@nospecialize t) = (@_total_meta; isa(t, DataType) && (t.flags & 0x0008) == 0x0008) - -""" - isbits(x) - -Return `true` if `x` is an instance of an [`isbitstype`](@ref) type. -""" -isbits(@nospecialize x) = isbitstype(typeof(x)) - -""" - objectid(x) -> UInt - -Get a hash value for `x` based on object identity. - -If `x === y` then `objectid(x) == objectid(y)`, and usually when `x !== y`, `objectid(x) != objectid(y)`. - -See also [`hash`](@ref), [`IdDict`](@ref). -""" -function objectid(@nospecialize(x)) - @_total_meta - return ccall(:jl_object_id, UInt, (Any,), x) -end - -""" - isdispatchtuple(T) - -Determine whether type `T` is a tuple "leaf type", -meaning it could appear as a type signature in dispatch -and has no subtypes (or supertypes) which could appear in a call. -If `T` is not a type, then return `false`. -""" -isdispatchtuple(@nospecialize(t)) = (@_total_meta; isa(t, DataType) && (t.flags & 0x0004) == 0x0004) - -datatype_ismutationfree(dt::DataType) = (@_total_meta; (dt.flags & 0x0100) == 0x0100) - -""" - Base.ismutationfree(T) - -Determine whether type `T` is mutation free in the sense that no mutable memory -is reachable from this type (either in the type itself) or through any fields. -Note that the type itself need not be immutable. For example, an empty mutable -type is `ismutabletype`, but also `ismutationfree`. -If `T` is not a type, then return `false`. -""" -function ismutationfree(@nospecialize(t)) - t = unwrap_unionall(t) - if isa(t, DataType) - return datatype_ismutationfree(t) - elseif isa(t, Union) - return ismutationfree(t.a) && ismutationfree(t.b) - end - # TypeVar, etc. - return false -end - -datatype_isidentityfree(dt::DataType) = (@_total_meta; (dt.flags & 0x0200) == 0x0200) - -""" - Base.isidentityfree(T) - -Determine whether type `T` is identity free in the sense that this type or any -reachable through its fields has non-content-based identity. -If `T` is not a type, then return `false`. -""" -function isidentityfree(@nospecialize(t)) - t = unwrap_unionall(t) - if isa(t, DataType) - return datatype_isidentityfree(t) - elseif isa(t, Union) - return isidentityfree(t.a) && isidentityfree(t.b) - end - # TypeVar, etc. - return false -end - -iskindtype(@nospecialize t) = (t === DataType || t === UnionAll || t === Union || t === typeof(Bottom)) -isconcretedispatch(@nospecialize t) = isconcretetype(t) && !iskindtype(t) - -using Core: has_free_typevars - -# equivalent to isa(v, Type) && isdispatchtuple(Tuple{v}) || v === Union{} -# and is thus perhaps most similar to the old (pre-1.0) `isleaftype` query -function isdispatchelem(@nospecialize v) - return (v === Bottom) || (v === typeof(Bottom)) || isconcretedispatch(v) || - (isType(v) && !has_free_typevars(v)) -end - -const _TYPE_NAME = Type.body.name -isType(@nospecialize t) = isa(t, DataType) && t.name === _TYPE_NAME - -""" - isconcretetype(T) - -Determine whether type `T` is a concrete type, meaning it could have direct instances -(values `x` such that `typeof(x) === T`). -Note that this is not the negation of `isabstracttype(T)`. -If `T` is not a type, then return `false`. - -See also: [`isbits`](@ref), [`isabstracttype`](@ref), [`issingletontype`](@ref). - -# Examples -```jldoctest -julia> isconcretetype(Complex) -false - -julia> isconcretetype(Complex{Float32}) -true - -julia> isconcretetype(Vector{Complex}) -true - -julia> isconcretetype(Vector{Complex{Float32}}) -true - -julia> isconcretetype(Union{}) -false - -julia> isconcretetype(Union{Int,String}) -false -``` -""" -isconcretetype(@nospecialize(t)) = (@_total_meta; isa(t, DataType) && (t.flags & 0x0002) == 0x0002) - -""" - isabstracttype(T) - -Determine whether type `T` was declared as an abstract type -(i.e. using the `abstract type` syntax). -Note that this is not the negation of `isconcretetype(T)`. -If `T` is not a type, then return `false`. - -# Examples -```jldoctest -julia> isabstracttype(AbstractArray) -true - -julia> isabstracttype(Vector) -false -``` -""" -function isabstracttype(@nospecialize(t)) - @_total_meta - t = unwrap_unionall(t) - # TODO: what to do for `Union`? - return isa(t, DataType) && (t.name.flags & 0x1) == 0x1 -end - -function is_datatype_layoutopaque(dt::DataType) - datatype_nfields(dt) == 0 && !datatype_pointerfree(dt) -end - -function is_valid_intrinsic_elptr(@nospecialize(ety)) - ety === Any && return true - isconcretetype(ety) || return false - ety <: Array && return false - return !is_datatype_layoutopaque(ety) -end - -""" - Base.issingletontype(T) - -Determine whether type `T` has exactly one possible instance; for example, a -struct type with no fields except other singleton values. -If `T` is not a concrete type, then return `false`. -""" -issingletontype(@nospecialize(t)) = (@_total_meta; isa(t, DataType) && isdefined(t, :instance) && datatype_layoutsize(t) == 0 && datatype_pointerfree(t)) - -""" - typeintersect(T::Type, S::Type) - -Compute a type that contains the intersection of `T` and `S`. Usually this will be the -smallest such type or one close to it. - -A special case where exact behavior is guaranteed: when `T <: S`, -`typeintersect(S, T) == T == typeintersect(T, S)`. -""" -typeintersect(@nospecialize(a), @nospecialize(b)) = (@_total_meta; ccall(:jl_type_intersection, Any, (Any, Any), a::Type, b::Type)) - -morespecific(@nospecialize(a), @nospecialize(b)) = (@_total_meta; ccall(:jl_type_morespecific, Cint, (Any, Any), a::Type, b::Type) != 0) -morespecific(a::Method, b::Method) = ccall(:jl_method_morespecific, Cint, (Any, Any), a, b) != 0 - -""" - fieldoffset(type, i) - -The byte offset of field `i` of a type relative to the data start. For example, we could -use it in the following manner to summarize information about a struct: - -```jldoctest -julia> structinfo(T) = [(fieldoffset(T,i), fieldname(T,i), fieldtype(T,i)) for i = 1:fieldcount(T)]; - -julia> structinfo(Base.Filesystem.StatStruct) -14-element Vector{Tuple{UInt64, Symbol, Type}}: - (0x0000000000000000, :desc, Union{RawFD, String}) - (0x0000000000000008, :device, UInt64) - (0x0000000000000010, :inode, UInt64) - (0x0000000000000018, :mode, UInt64) - (0x0000000000000020, :nlink, Int64) - (0x0000000000000028, :uid, UInt64) - (0x0000000000000030, :gid, UInt64) - (0x0000000000000038, :rdev, UInt64) - (0x0000000000000040, :size, Int64) - (0x0000000000000048, :blksize, Int64) - (0x0000000000000050, :blocks, Int64) - (0x0000000000000058, :mtime, Float64) - (0x0000000000000060, :ctime, Float64) - (0x0000000000000068, :ioerrno, Int32) -``` -""" -fieldoffset(x::DataType, idx::Integer) = (@_foldable_meta; ccall(:jl_get_field_offset, Csize_t, (Any, Cint), x, idx)) - -""" - fieldtype(T, name::Symbol | index::Int) - -Determine the declared type of a field (specified by name or index) in a composite DataType `T`. - -# Examples -```jldoctest -julia> struct Foo - x::Int64 - y::String - end - -julia> fieldtype(Foo, :x) -Int64 - -julia> fieldtype(Foo, 2) -String -``` -""" -fieldtype - -""" - Base.fieldindex(T, name::Symbol, err:Bool=true) - -Get the index of a named field, throwing an error if the field does not exist (when err==true) -or returning 0 (when err==false). - -# Examples -```jldoctest -julia> struct Foo - x::Int64 - y::String - end - -julia> Base.fieldindex(Foo, :z) -ERROR: FieldError: type Foo has no field `z`, available fields: `x`, `y` -Stacktrace: -[...] - -julia> Base.fieldindex(Foo, :z, false) -0 -``` -""" -function fieldindex(T::DataType, name::Symbol, err::Bool=true) - return err ? _fieldindex_maythrow(T, name) : _fieldindex_nothrow(T, name) -end - -function _fieldindex_maythrow(T::DataType, name::Symbol) - @_foldable_meta - @noinline - return Int(ccall(:jl_field_index, Cint, (Any, Any, Cint), T, name, true)+1) -end - -function _fieldindex_nothrow(T::DataType, name::Symbol) - @_total_meta - @noinline - return Int(ccall(:jl_field_index, Cint, (Any, Any, Cint), T, name, false)+1) -end - -function fieldindex(t::UnionAll, name::Symbol, err::Bool=true) - t = argument_datatype(t) - if t === nothing - err && throw(ArgumentError("type does not have definite fields")) - return 0 - end - return fieldindex(t, name, err) -end - -function argument_datatype(@nospecialize t) - @_total_meta - @noinline - return ccall(:jl_argument_datatype, Any, (Any,), t)::Union{Nothing,DataType} -end - -function datatype_fieldcount(t::DataType) - if t.name === _NAMEDTUPLE_NAME - names, types = t.parameters[1], t.parameters[2] - if names isa Tuple - return length(names) - end - if types isa DataType && types <: Tuple - return fieldcount(types) - end - return nothing - elseif isabstracttype(t) - return nothing - end - if t.name === Tuple.name - isvatuple(t) && return nothing - return length(t.types) - end - # Equivalent to length(t.types), but `t.types` is lazy and we do not want - # to be forced to compute it. - return length(t.name.names) -end - -""" - fieldcount(t::Type) - -Get the number of fields that an instance of the given type would have. -An error is thrown if the type is too abstract to determine this. -""" -function fieldcount(@nospecialize t) - @_foldable_meta - if t isa UnionAll || t isa Union - t = argument_datatype(t) - if t === nothing - throw(ArgumentError("type does not have a definite number of fields")) - end - elseif t === Union{} - throw(ArgumentError("The empty type does not have a well-defined number of fields since it does not have instances.")) - end - if !(t isa DataType) - throw(TypeError(:fieldcount, DataType, t)) - end - fcount = datatype_fieldcount(t) - if fcount === nothing - throw(ArgumentError("type does not have a definite number of fields")) - end - return fcount -end - -""" - fieldtypes(T::Type) - -The declared types of all fields in a composite DataType `T` as a tuple. - -!!! compat "Julia 1.1" - This function requires at least Julia 1.1. - -# Examples -```jldoctest -julia> struct Foo - x::Int64 - y::String - end - -julia> fieldtypes(Foo) -(Int64, String) -``` -""" -fieldtypes(T::Type) = (@_foldable_meta; ntupleany(i -> fieldtype(T, i), fieldcount(T))) - -# return all instances, for types that can be enumerated - -""" - instances(T::Type) - -Return a collection of all instances of the given type, if applicable. Mostly used for -enumerated types (see `@enum`). - -# Examples -```jldoctest -julia> @enum Color red blue green - -julia> instances(Color) -(red, blue, green) -``` -""" -function instances end - -function to_tuple_type(@nospecialize(t)) - if isa(t, Tuple) || isa(t, AbstractArray) || isa(t, SimpleVector) - t = Tuple{t...} - end - if isa(t, Type) && t <: Tuple - for p in (unwrap_unionall(t)::DataType).parameters - if isa(p, Core.TypeofVararg) - p = unwrapva(p) - end - if !(isa(p, Type) || isa(p, TypeVar)) - error("argument tuple type must contain only types") - end - end - else - error("expected tuple type") - end - t -end - -function signature_type(@nospecialize(f), @nospecialize(argtypes)) - argtypes = to_tuple_type(argtypes) - ft = Core.Typeof(f) - u = unwrap_unionall(argtypes)::DataType - return rewrap_unionall(Tuple{ft, u.parameters...}, argtypes) -end +const Compiler = Core.Compiler """ code_lowered(f, types; generated=true, debuginfo=:default) @@ -1228,102 +48,8 @@ function code_lowered(@nospecialize(f), @nospecialize(t=Tuple); generated::Bool= return ret end -hasgenerator(m::Method) = isdefined(m, :generator) -hasgenerator(m::Core.MethodInstance) = hasgenerator(m.def::Method) - -# low-level method lookup functions used by the compiler - -unionlen(@nospecialize(x)) = x isa Union ? unionlen(x.a) + unionlen(x.b) : 1 - -function _uniontypes(@nospecialize(x), ts::Array{Any,1}) - if x isa Union - _uniontypes(x.a, ts) - _uniontypes(x.b, ts) - else - push!(ts, x) - end - return ts -end -uniontypes(@nospecialize(x)) = _uniontypes(x, Any[]) - -function _methods(@nospecialize(f), @nospecialize(t), lim::Int, world::UInt) - tt = signature_type(f, t) - return _methods_by_ftype(tt, lim, world) -end - -function _methods_by_ftype(@nospecialize(t), lim::Int, world::UInt) - return _methods_by_ftype(t, nothing, lim, world) -end -function _methods_by_ftype(@nospecialize(t), mt::Union{Core.MethodTable, Nothing}, lim::Int, world::UInt) - return _methods_by_ftype(t, mt, lim, world, false, RefValue{UInt}(typemin(UInt)), RefValue{UInt}(typemax(UInt)), Ptr{Int32}(C_NULL)) -end -function _methods_by_ftype(@nospecialize(t), mt::Union{Core.MethodTable, Nothing}, lim::Int, world::UInt, ambig::Bool, min::Ref{UInt}, max::Ref{UInt}, has_ambig::Ref{Int32}) - return ccall(:jl_matching_methods, Any, (Any, Any, Cint, Cint, UInt, Ptr{UInt}, Ptr{UInt}, Ptr{Int32}), t, mt, lim, ambig, world, min, max, has_ambig)::Union{Vector{Any},Nothing} -end - # high-level, more convenient method lookup functions -# type for reflecting and pretty-printing a subset of methods -mutable struct MethodList <: AbstractArray{Method,1} - ms::Array{Method,1} - mt::Core.MethodTable -end - -size(m::MethodList) = size(m.ms) -getindex(m::MethodList, i::Integer) = m.ms[i] - -function MethodList(mt::Core.MethodTable) - ms = Method[] - visit(mt) do m - push!(ms, m) - end - return MethodList(ms, mt) -end - -""" - methods(f, [types], [module]) - -Return the method table for `f`. - -If `types` is specified, return an array of methods whose types match. -If `module` is specified, return an array of methods defined in that module. -A list of modules can also be specified as an array. - -!!! compat "Julia 1.4" - At least Julia 1.4 is required for specifying a module. - -See also: [`which`](@ref), [`@which`](@ref Main.InteractiveUtils.@which) and [`methodswith`](@ref Main.InteractiveUtils.methodswith). -""" -function methods(@nospecialize(f), @nospecialize(t), - mod::Union{Tuple{Module},AbstractArray{Module},Nothing}=nothing) - world = get_world_counter() - world == typemax(UInt) && error("code reflection cannot be used from generated functions") - # Lack of specialization => a comprehension triggers too many invalidations via _collect, so collect the methods manually - ms = Method[] - for m in _methods(f, t, -1, world)::Vector - m = m::Core.MethodMatch - (mod === nothing || parentmodule(m.method) ∈ mod) && push!(ms, m.method) - end - MethodList(ms, typeof(f).name.mt) -end -methods(@nospecialize(f), @nospecialize(t), mod::Module) = methods(f, t, (mod,)) - -function methods_including_ambiguous(@nospecialize(f), @nospecialize(t)) - tt = signature_type(f, t) - world = get_world_counter() - world == typemax(UInt) && error("code reflection cannot be used from generated functions") - min = RefValue{UInt}(typemin(UInt)) - max = RefValue{UInt}(typemax(UInt)) - ms = _methods_by_ftype(tt, nothing, -1, world, true, min, max, Ptr{Int32}(C_NULL))::Vector - return MethodList(Method[(m::Core.MethodMatch).method for m in ms], typeof(f).name.mt) -end - -function methods(@nospecialize(f), - mod::Union{Module,AbstractArray{Module},Nothing}=nothing) - # return all matches - return methods(f, Tuple{Vararg{Any}}, mod) -end - function visit(f, mt::Core.MethodTable) mt.defs !== nothing && visit(f, mt.defs) nothing @@ -1560,84 +286,6 @@ struct EmissionParams end end -const SLOT_USED = 0x8 -ast_slotflag(@nospecialize(code), i) = ccall(:jl_ir_slotflag, UInt8, (Any, Csize_t), code, i - 1) - -""" - may_invoke_generator(method, atype, sparams) -> Bool - -Computes whether or not we may invoke the generator for the given `method` on -the given `atype` and `sparams`. For correctness, all generated function are -required to return monotonic answers. However, since we don't expect users to -be able to successfully implement this criterion, we only call generated -functions on concrete types. The one exception to this is that we allow calling -generators with abstract types if the generator does not use said abstract type -(and thus cannot incorrectly use it to break monotonicity). This function -computes whether we are in either of these cases. - -Unlike normal functions, the compilation heuristics still can't generate good dispatch -in some cases, but this may still allow inference not to fall over in some limited cases. -""" -function may_invoke_generator(mi::MethodInstance) - return may_invoke_generator(mi.def::Method, mi.specTypes, mi.sparam_vals) -end -function may_invoke_generator(method::Method, @nospecialize(atype), sparams::SimpleVector) - # If we have complete information, we may always call the generator - isdispatchtuple(atype) && return true - - # We don't have complete information, but it is possible that the generator - # syntactically doesn't make use of the information we don't have. Check - # for that. - - # For now, only handle the (common, generated by the frontend case) that the - # generator only has one method - generator = method.generator - isa(generator, Core.GeneratedFunctionStub) || return false - tt = Tuple{typeof(generator.gen), Vararg{Any}} - gen_mthds = _methods_by_ftype(tt, #=lim=#1, method.primary_world) - gen_mthds isa Vector || return false - length(gen_mthds) == 1 || return false - - generator_method = (first(gen_mthds)::Core.MethodMatch).method - nsparams = length(sparams) - isdefined(generator_method, :source) || return false - code = generator_method.source - nslots = ccall(:jl_ir_nslots, Int, (Any,), code) - at = unwrap_unionall(atype) - at isa DataType || return false - (nslots >= 1 + length(sparams) + length(at.parameters)) || return false - - firstarg = 1 - for i = 1:nsparams - if isa(sparams[i], TypeVar) - if (ast_slotflag(code, firstarg + i) & SLOT_USED) != 0 - return false - end - end - end - nargs = Int(method.nargs) - non_va_args = method.isva ? nargs - 1 : nargs - for i = 1:non_va_args - if !isdispatchelem(at.parameters[i]) - if (ast_slotflag(code, firstarg + i + nsparams) & SLOT_USED) != 0 - return false - end - end - end - if method.isva - # If the va argument is used, we need to ensure that all arguments that - # contribute to the va tuple are dispatchelemes - if (ast_slotflag(code, firstarg + nargs + nsparams) & SLOT_USED) != 0 - for i = (non_va_args+1):length(at.parameters) - if !isdispatchelem(at.parameters[i]) - return false - end - end - end - end - return true -end - """ code_typed(f, types; kw...) @@ -1710,7 +358,7 @@ function code_typed_by_type(@nospecialize(tt::Type); optimize::Bool=true, debuginfo::Symbol=:default, world::UInt=get_world_counter(), - interp::Core.Compiler.AbstractInterpreter=Core.Compiler.NativeInterpreter(world)) + interp::Compiler.AbstractInterpreter=Compiler.NativeInterpreter(world)) (ccall(:jl_is_in_pure_context, Bool, ()) || world == typemax(UInt)) && error("code reflection cannot be used from generated functions") if @isdefined(IRShow) @@ -1722,12 +370,12 @@ function code_typed_by_type(@nospecialize(tt::Type); throw(ArgumentError("'debuginfo' must be either :source or :none")) end tt = to_tuple_type(tt) - matches = Core.Compiler.findall(tt, Core.Compiler.method_table(interp)) + matches = Compiler.findall(tt, Compiler.method_table(interp)) matches === nothing && raise_match_failure(:code_typed, tt) asts = [] for match in matches.matches match = match::Core.MethodMatch - code = Core.Compiler.typeinf_code(interp, match, optimize) + code = Compiler.typeinf_code(interp, match, optimize) if code === nothing push!(asts, match.method => Any) else @@ -1747,9 +395,9 @@ function get_oc_code_rt(oc::Core.OpaqueClosure, types, optimize::Bool) if isdefined(m, :source) if optimize tt = Tuple{typeof(oc.captures), to_tuple_type(types).parameters...} - mi = Core.Compiler.specialize_method(m, tt, Core.svec()) - interp = Core.Compiler.NativeInterpreter(m.primary_world) - code = Core.Compiler.typeinf_code(interp, mi, optimize) + mi = Compiler.specialize_method(m, tt, Core.svec()) + interp = Compiler.NativeInterpreter(m.primary_world) + code = Compiler.typeinf_code(interp, mi, optimize) if code isa CodeInfo return Pair{CodeInfo, Any}(code, code.rettype) end @@ -1839,18 +487,18 @@ a full signature to query. function code_ircode_by_type( @nospecialize(tt::Type); world::UInt=get_world_counter(), - interp::Core.Compiler.AbstractInterpreter=Core.Compiler.NativeInterpreter(world), + interp::Compiler.AbstractInterpreter=Compiler.NativeInterpreter(world), optimize_until::Union{Integer,AbstractString,Nothing}=nothing, ) (ccall(:jl_is_in_pure_context, Bool, ()) || world == typemax(UInt)) && error("code reflection cannot be used from generated functions") tt = to_tuple_type(tt) - matches = Core.Compiler.findall(tt, Core.Compiler.method_table(interp)) + matches = Compiler.findall(tt, Compiler.method_table(interp)) matches === nothing && raise_match_failure(:code_ircode, tt) asts = [] for match in matches.matches match = match::Core.MethodMatch - (code, ty) = Core.Compiler.typeinf_ircode(interp, match, optimize_until) + (code, ty) = Compiler.typeinf_ircode(interp, match, optimize_until) if code === nothing push!(asts, match.method => Any) else @@ -1860,24 +508,24 @@ function code_ircode_by_type( return asts end -function _builtin_return_type(interp::Core.Compiler.AbstractInterpreter, +function _builtin_return_type(interp::Compiler.AbstractInterpreter, @nospecialize(f::Core.Builtin), @nospecialize(types)) argtypes = Any[to_tuple_type(types).parameters...] - rt = Core.Compiler.builtin_tfunction(interp, f, argtypes, nothing) - return Core.Compiler.widenconst(rt) + rt = Compiler.builtin_tfunction(interp, f, argtypes, nothing) + return Compiler.widenconst(rt) end -function _builtin_effects(interp::Core.Compiler.AbstractInterpreter, +function _builtin_effects(interp::Compiler.AbstractInterpreter, @nospecialize(f::Core.Builtin), @nospecialize(types)) argtypes = Any[to_tuple_type(types).parameters...] - rt = Core.Compiler.builtin_tfunction(interp, f, argtypes, nothing) - return Core.Compiler.builtin_effects(Core.Compiler.typeinf_lattice(interp), f, argtypes, rt) + rt = Compiler.builtin_tfunction(interp, f, argtypes, nothing) + return Compiler.builtin_effects(Compiler.typeinf_lattice(interp), f, argtypes, rt) end -function _builtin_exception_type(interp::Core.Compiler.AbstractInterpreter, +function _builtin_exception_type(interp::Compiler.AbstractInterpreter, @nospecialize(f::Core.Builtin), @nospecialize(types)) effects = _builtin_effects(interp, f, types) - return Core.Compiler.is_nothrow(effects) ? Union{} : Any + return Compiler.is_nothrow(effects) ? Union{} : Any end check_generated_context(world::UInt) = @@ -1933,7 +581,7 @@ julia> Base.return_types(sum, (Union{Vector{Int},UnitRange{Int}},)) """ function return_types(@nospecialize(f), @nospecialize(types=default_tt(f)); world::UInt=get_world_counter(), - interp::Core.Compiler.AbstractInterpreter=Core.Compiler.NativeInterpreter(world)) + interp::Compiler.AbstractInterpreter=Compiler.NativeInterpreter(world)) check_generated_context(world) if isa(f, Core.OpaqueClosure) _, rt = only(code_typed_opaque_closure(f, types)) @@ -1942,11 +590,11 @@ function return_types(@nospecialize(f), @nospecialize(types=default_tt(f)); return Any[_builtin_return_type(interp, f, types)] end tt = signature_type(f, types) - matches = Core.Compiler.findall(tt, Core.Compiler.method_table(interp)) + matches = Compiler.findall(tt, Compiler.method_table(interp)) matches === nothing && raise_match_failure(:return_types, tt) rts = Any[] for match in matches.matches - ty = Core.Compiler.typeinf_type(interp, match::Core.MethodMatch) + ty = Compiler.typeinf_type(interp, match::Core.MethodMatch) push!(rts, something(ty, Any)) end return rts @@ -2001,7 +649,7 @@ On the other hand `Base.infer_return_type` returns one collective result that su """ function infer_return_type(@nospecialize(f), @nospecialize(types=default_tt(f)); world::UInt=get_world_counter(), - interp::Core.Compiler.AbstractInterpreter=Core.Compiler.NativeInterpreter(world)) + interp::Compiler.AbstractInterpreter=Compiler.NativeInterpreter(world)) check_generated_context(world) if isa(f, Core.OpaqueClosure) return last(only(code_typed_opaque_closure(f, types))) @@ -2009,12 +657,12 @@ function infer_return_type(@nospecialize(f), @nospecialize(types=default_tt(f)); return _builtin_return_type(interp, f, types) end tt = signature_type(f, types) - matches = Core.Compiler.findall(tt, Core.Compiler.method_table(interp)) + matches = Compiler.findall(tt, Compiler.method_table(interp)) matches === nothing && raise_match_failure(:infer_return_type, tt) rt = Union{} for match in matches.matches - ty = Core.Compiler.typeinf_type(interp, match::Core.MethodMatch) - rt = Core.Compiler.tmerge(rt, something(ty, Any)) + ty = Compiler.typeinf_type(interp, match::Core.MethodMatch) + rt = Compiler.tmerge(rt, something(ty, Any)) end return rt end @@ -2071,7 +719,7 @@ julia> Base.infer_exception_types(throw_if_number, (Any,)) """ function infer_exception_types(@nospecialize(f), @nospecialize(types=default_tt(f)); world::UInt=get_world_counter(), - interp::Core.Compiler.AbstractInterpreter=Core.Compiler.NativeInterpreter(world)) + interp::Compiler.AbstractInterpreter=Compiler.NativeInterpreter(world)) check_generated_context(world) if isa(f, Core.OpaqueClosure) return Any[Any] # TODO @@ -2079,15 +727,15 @@ function infer_exception_types(@nospecialize(f), @nospecialize(types=default_tt( return Any[_builtin_exception_type(interp, f, types)] end tt = signature_type(f, types) - matches = Core.Compiler.findall(tt, Core.Compiler.method_table(interp)) + matches = Compiler.findall(tt, Compiler.method_table(interp)) matches === nothing && raise_match_failure(:infer_exception_types, tt) excts = Any[] for match in matches.matches - frame = Core.Compiler.typeinf_frame(interp, match::Core.MethodMatch, #=run_optimizer=#false) + frame = Compiler.typeinf_frame(interp, match::Core.MethodMatch, #=run_optimizer=#false) if frame === nothing exct = Any else - exct = Core.Compiler.widenconst(frame.result.exc_result) + exct = Compiler.widenconst(frame.result.exc_result) end push!(excts, exct) end @@ -2150,7 +798,7 @@ signature, the exception type is widened to `MethodError`. """ function infer_exception_type(@nospecialize(f), @nospecialize(types=default_tt(f)); world::UInt=get_world_counter(), - interp::Core.Compiler.AbstractInterpreter=Core.Compiler.NativeInterpreter(world)) + interp::Compiler.AbstractInterpreter=Compiler.NativeInterpreter(world)) check_generated_context(world) if isa(f, Core.OpaqueClosure) return Any # TODO @@ -2158,18 +806,18 @@ function infer_exception_type(@nospecialize(f), @nospecialize(types=default_tt(f return _builtin_exception_type(interp, f, types) end tt = signature_type(f, types) - matches = Core.Compiler.findall(tt, Core.Compiler.method_table(interp)) + matches = Compiler.findall(tt, Compiler.method_table(interp)) matches === nothing && raise_match_failure(:infer_exception_type, tt) exct = Union{} if _may_throw_methoderror(matches) # account for the fact that we may encounter a MethodError with a non-covered or ambiguous signature. - exct = Core.Compiler.tmerge(exct, MethodError) + exct = Compiler.tmerge(exct, MethodError) end for match in matches.matches match = match::Core.MethodMatch - frame = Core.Compiler.typeinf_frame(interp, match, #=run_optimizer=#false) + frame = Compiler.typeinf_frame(interp, match, #=run_optimizer=#false) frame === nothing && return Any - exct = Core.Compiler.tmerge(exct, Core.Compiler.widenconst(frame.result.exc_result)) + exct = Compiler.tmerge(exct, Compiler.widenconst(frame.result.exc_result)) end return exct end @@ -2236,24 +884,24 @@ signature, the `:nothrow` bit gets tainted. function infer_effects(@nospecialize(f), @nospecialize(types=default_tt(f)); optimize::Bool=true, world::UInt=get_world_counter(), - interp::Core.Compiler.AbstractInterpreter=Core.Compiler.NativeInterpreter(world)) + interp::Compiler.AbstractInterpreter=Compiler.NativeInterpreter(world)) check_generated_context(world) if isa(f, Core.Builtin) return _builtin_effects(interp, f, types) end tt = signature_type(f, types) - matches = Core.Compiler.findall(tt, Core.Compiler.method_table(interp)) + matches = Compiler.findall(tt, Compiler.method_table(interp)) matches === nothing && raise_match_failure(:infer_effects, tt) - effects = Core.Compiler.EFFECTS_TOTAL + effects = Compiler.EFFECTS_TOTAL if _may_throw_methoderror(matches) # account for the fact that we may encounter a MethodError with a non-covered or ambiguous signature. - effects = Core.Compiler.Effects(effects; nothrow=false) + effects = Compiler.Effects(effects; nothrow=false) end for match in matches.matches match = match::Core.MethodMatch - frame = Core.Compiler.typeinf_frame(interp, match, #=run_optimizer=#optimize) - frame === nothing && return Core.Compiler.Effects() - effects = Core.Compiler.merge_effects(effects, frame.result.ipo_effects) + frame = Compiler.typeinf_frame(interp, match, #=run_optimizer=#optimize) + frame === nothing && return Compiler.Effects() + effects = Compiler.merge_effects(effects, frame.result.ipo_effects) end return effects end @@ -2271,24 +919,24 @@ end function print_statement_costs(io::IO, @nospecialize(tt::Type); world::UInt=get_world_counter(), - interp::Core.Compiler.AbstractInterpreter=Core.Compiler.NativeInterpreter(world)) + interp::Compiler.AbstractInterpreter=Compiler.NativeInterpreter(world)) tt = to_tuple_type(tt) world == typemax(UInt) && error("code reflection cannot be used from generated functions") - matches = Core.Compiler.findall(tt, Core.Compiler.method_table(interp)) + matches = Compiler.findall(tt, Compiler.method_table(interp)) matches === nothing && raise_match_failure(:print_statement_costs, tt) - params = Core.Compiler.OptimizationParams(interp) + params = Compiler.OptimizationParams(interp) cst = Int[] for match in matches.matches match = match::Core.MethodMatch println(io, match.method) - code = Core.Compiler.typeinf_code(interp, match, true) + code = Compiler.typeinf_code(interp, match, true) if code === nothing println(io, " inference not successful") else empty!(cst) resize!(cst, length(code.code)) - sptypes = Core.Compiler.VarState[Core.Compiler.VarState(sp, false) for sp in match.sparams] - maxcost = Core.Compiler.statement_costs!(cst, code.code, code, sptypes, params) + sptypes = Compiler.VarState[Compiler.VarState(sp, false) for sp in match.sparams] + maxcost = Compiler.statement_costs!(cst, code.code, code, sptypes, params) nd = ndigits(maxcost) irshow_config = IRShow.IRShowConfig() do io, linestart, idx print(io, idx > 0 ? lpad(cst[idx], nd+1) : " "^(nd+1), " ") @@ -2303,18 +951,18 @@ end print_statement_costs(args...; kwargs...) = print_statement_costs(stdout, args...; kwargs...) function _which(@nospecialize(tt::Type); - method_table::Union{Nothing,Core.MethodTable,Core.Compiler.MethodTableView}=nothing, + method_table::Union{Nothing,Core.MethodTable,Compiler.MethodTableView}=nothing, world::UInt=get_world_counter(), raise::Bool=true) world == typemax(UInt) && error("code reflection cannot be used from generated functions") if method_table === nothing - table = Core.Compiler.InternalMethodTable(world) + table = Compiler.InternalMethodTable(world) elseif method_table isa Core.MethodTable - table = Core.Compiler.OverlayMethodTable(world, method_table) + table = Compiler.OverlayMethodTable(world, method_table) else table = method_table end - match, = Core.Compiler.findsup(tt, table) + match, = Compiler.findsup(tt, table) if match === nothing raise && error("no unique matching method found for the specified argument types") return nothing @@ -2334,7 +982,7 @@ See also: [`parentmodule`](@ref), [`@which`](@ref Main.InteractiveUtils.@which), function which(@nospecialize(f), @nospecialize(t)) tt = signature_type(f, t) world = get_world_counter() - match, _ = Core.Compiler._findsup(tt, nothing, world) + match, _ = Compiler._findsup(tt, nothing, world) if match === nothing me = MethodError(f, t, world) ee = ErrorException(sprint(io -> begin @@ -2653,92 +1301,6 @@ function isambiguous(m1::Method, m2::Method; ambiguous_bottom::Bool=false) return true end -""" - delete_method(m::Method) - -Make method `m` uncallable and force recompilation of any methods that use(d) it. -""" -function delete_method(m::Method) - ccall(:jl_method_table_disable, Cvoid, (Any, Any), get_methodtable(m), m) -end - -function get_methodtable(m::Method) - mt = ccall(:jl_method_get_table, Any, (Any,), m) - if mt === nothing - return nothing - end - return mt::Core.MethodTable -end - -""" - has_bottom_parameter(t) -> Bool - -Determine whether `t` is a Type for which one or more of its parameters is `Union{}`. -""" -function has_bottom_parameter(t::DataType) - for p in t.parameters - has_bottom_parameter(p) && return true - end - return false -end -has_bottom_parameter(t::typeof(Bottom)) = true -has_bottom_parameter(t::UnionAll) = has_bottom_parameter(unwrap_unionall(t)) -has_bottom_parameter(t::Union) = has_bottom_parameter(t.a) & has_bottom_parameter(t.b) -has_bottom_parameter(t::TypeVar) = has_bottom_parameter(t.ub) -has_bottom_parameter(::Any) = false - -min_world(m::Core.CodeInstance) = m.min_world -max_world(m::Core.CodeInstance) = m.max_world -min_world(m::Core.CodeInfo) = m.min_world -max_world(m::Core.CodeInfo) = m.max_world - -""" - get_world_counter() - -Returns the current maximum world-age counter. This counter is global and monotonically -increasing. -""" -get_world_counter() = ccall(:jl_get_world_counter, UInt, ()) - -""" - tls_world_age() - -Returns the world the [current_task()](@ref) is executing within. -""" -tls_world_age() = ccall(:jl_get_tls_world_age, UInt, ()) - -""" - propertynames(x, private=false) - -Get a tuple or a vector of the properties (`x.property`) of an object `x`. -This is typically the same as [`fieldnames(typeof(x))`](@ref), but types -that overload [`getproperty`](@ref) should generally overload `propertynames` -as well to get the properties of an instance of the type. - -`propertynames(x)` may return only "public" property names that are part -of the documented interface of `x`. If you want it to also return "private" -property names intended for internal use, pass `true` for the optional second argument. -REPL tab completion on `x.` shows only the `private=false` properties. - -See also: [`hasproperty`](@ref), [`hasfield`](@ref). -""" -propertynames(x) = fieldnames(typeof(x)) -propertynames(m::Module) = names(m) -propertynames(x, private::Bool) = propertynames(x) # ignore private flag by default -propertynames(x::Array) = () # hide the fields from tab completion to discourage calling `x.size` instead of `size(x)`, even though they are equivalent - -""" - hasproperty(x, s::Symbol) - -Return a boolean indicating whether the object `x` has `s` as one of its own properties. - -!!! compat "Julia 1.2" - This function requires at least Julia 1.2. - -See also: [`propertynames`](@ref), [`hasfield`](@ref). -""" -hasproperty(x, s::Symbol) = s in propertynames(x) - """ @invoke f(arg::T, ...; kwargs...) @@ -2786,7 +1348,7 @@ julia> @macroexpand @invoke (xs::Xs)[i::I] = v::V The additional syntax is supported as of Julia 1.10. """ macro invoke(ex) - topmod = Core.Compiler._topmod(__module__) # well, except, do not get it via CC but define it locally + topmod = Compiler._topmod(__module__) # well, except, do not get it via CC but define it locally f, args, kwargs = destructure_callex(topmod, ex) types = Expr(:curly, :Tuple) out = Expr(:call, GlobalRef(Core, :invoke)) @@ -2845,7 +1407,7 @@ julia> @macroexpand @invokelatest xs[i] = v The additional `x.f` and `xs[i]` syntax requires Julia 1.10. """ macro invokelatest(ex) - topmod = Core.Compiler._topmod(__module__) # well, except, do not get it via CC but define it locally + topmod = Compiler._topmod(__module__) # well, except, do not get it via CC but define it locally f, args, kwargs = destructure_callex(topmod, ex) out = Expr(:call, GlobalRef(Base, :invokelatest)) isempty(kwargs) || push!(out.args, Expr(:parameters, kwargs...)) @@ -2902,23 +1464,3 @@ function destructure_callex(topmod::Module, @nospecialize(ex)) end return f, args, kwargs end - -""" - Base.generating_output([incremental::Bool])::Bool - -Return `true` if the current process is being used to pre-generate a -code cache via any of the `--output-*` command line arguments. The optional -`incremental` argument further specifies the precompilation mode: when set -to `true`, the function will return `true` only for package precompilation; -when set to `false`, it will return `true` only for system image generation. - -!!! compat "Julia 1.11" - This function requires at least Julia 1.11. -""" -function generating_output(incremental::Union{Bool,Nothing}=nothing) - ccall(:jl_generating_output, Cint, ()) == 0 && return false - if incremental !== nothing - JLOptions().incremental == incremental || return false - end - return true -end diff --git a/base/runtime_internals.jl b/base/runtime_internals.jl new file mode 100644 index 0000000000000..645aa55c538b4 --- /dev/null +++ b/base/runtime_internals.jl @@ -0,0 +1,1530 @@ +# This file is a part of Julia. License is MIT: https://julialang.org/license + +# name and module reflection + +""" + parentmodule(m::Module) -> Module + +Get a module's enclosing `Module`. `Main` is its own parent. + +See also: [`names`](@ref), [`nameof`](@ref), [`fullname`](@ref), [`@__MODULE__`](@ref). + +# Examples +```jldoctest +julia> parentmodule(Main) +Main + +julia> parentmodule(Base.Broadcast) +Base +``` +""" +parentmodule(m::Module) = (@_total_meta; ccall(:jl_module_parent, Ref{Module}, (Any,), m)) + +is_root_module(m::Module) = parentmodule(m) === m || (isdefined(Main, :Base) && m === Main.Base) + +""" + moduleroot(m::Module) -> Module + +Find the root module of a given module. This is the first module in the chain of +parent modules of `m` which is either a registered root module or which is its +own parent module. +""" +function moduleroot(m::Module) + @_total_meta + while true + is_root_module(m) && return m + p = parentmodule(m) + p === m && return m + m = p + end +end + +""" + @__MODULE__ -> Module + +Get the `Module` of the toplevel eval, +which is the `Module` code is currently being read from. +""" +macro __MODULE__() + return __module__ +end + +""" + fullname(m::Module) + +Get the fully-qualified name of a module as a tuple of symbols. For example, + +# Examples +```jldoctest +julia> fullname(Base.Iterators) +(:Base, :Iterators) + +julia> fullname(Main) +(:Main,) +``` +""" +function fullname(m::Module) + @_total_meta + mn = nameof(m) + if m === Main || m === Base || m === Core + return (mn,) + end + mp = parentmodule(m) + if mp === m + return (mn,) + end + return (fullname(mp)..., mn) +end + +""" + moduleloc(m::Module) -> LineNumberNode + +Get the location of the `module` definition. +""" +function moduleloc(m::Module) + line = Ref{Int32}(0) + file = ccall(:jl_module_getloc, Ref{Symbol}, (Any, Ref{Int32}), m, line) + return LineNumberNode(Int(line[]), file) +end + +""" + names(x::Module; all::Bool=false, imported::Bool=false, usings::Bool=false) -> Vector{Symbol} + +Get a vector of the public names of a `Module`, excluding deprecated names. +If `all` is true, then the list also includes non-public names defined in the module, +deprecated names, and compiler-generated names. +If `imported` is true, then names explicitly imported from other modules +are also included. +If `usings` is true, then names explicitly imported via `using` are also included. +Names are returned in sorted order. + +As a special case, all names defined in `Main` are considered \"public\", +since it is not idiomatic to explicitly mark names from `Main` as public. + +!!! note + `sym ∈ names(SomeModule)` does *not* imply `isdefined(SomeModule, sym)`. + `names` may return symbols marked with `public` or `export`, even if + they are not defined in the module. + +!!! warning + `names` may return duplicate names. The duplication happens, e.g. if an `import`ed name + conflicts with an already existing identifier. + +See also: [`Base.isexported`](@ref), [`Base.ispublic`](@ref), [`Base.@locals`](@ref), [`@__MODULE__`](@ref). +""" +names(m::Module; kwargs...) = sort!(unsorted_names(m; kwargs...)) +unsorted_names(m::Module; all::Bool=false, imported::Bool=false, usings::Bool=false) = + ccall(:jl_module_names, Array{Symbol,1}, (Any, Cint, Cint, Cint), m, all, imported, usings) + +""" + isexported(m::Module, s::Symbol) -> Bool + +Returns whether a symbol is exported from a module. + +See also: [`ispublic`](@ref), [`names`](@ref) + +```jldoctest +julia> module Mod + export foo + public bar + end +Mod + +julia> Base.isexported(Mod, :foo) +true + +julia> Base.isexported(Mod, :bar) +false + +julia> Base.isexported(Mod, :baz) +false +``` +""" +isexported(m::Module, s::Symbol) = ccall(:jl_module_exports_p, Cint, (Any, Any), m, s) != 0 + +""" + ispublic(m::Module, s::Symbol) -> Bool + +Returns whether a symbol is marked as public in a module. + +Exported symbols are considered public. + +!!! compat "Julia 1.11" + This function and the notion of publicity were added in Julia 1.11. + +See also: [`isexported`](@ref), [`names`](@ref) + +```jldoctest +julia> module Mod + export foo + public bar + end +Mod + +julia> Base.ispublic(Mod, :foo) +true + +julia> Base.ispublic(Mod, :bar) +true + +julia> Base.ispublic(Mod, :baz) +false +``` +""" +ispublic(m::Module, s::Symbol) = ccall(:jl_module_public_p, Cint, (Any, Any), m, s) != 0 + +# TODO: this is vaguely broken because it only works for explicit calls to +# `Base.deprecate`, not the @deprecated macro: +isdeprecated(m::Module, s::Symbol) = ccall(:jl_is_binding_deprecated, Cint, (Any, Any), m, s) != 0 + +""" + isbindingresolved(m::Module, s::Symbol) -> Bool + +Returns whether the binding of a symbol in a module is resolved. + +See also: [`isexported`](@ref), [`ispublic`](@ref), [`isdeprecated`](@ref) + +```jldoctest +julia> module Mod + foo() = 17 + end +Mod + +julia> Base.isbindingresolved(Mod, :foo) +true + +julia> Base.isbindingresolved(Mod, :bar) +false +``` +""" +isbindingresolved(m::Module, var::Symbol) = ccall(:jl_binding_resolved_p, Cint, (Any, Any), m, var) != 0 + +function binding_module(m::Module, s::Symbol) + p = ccall(:jl_get_module_of_binding, Ptr{Cvoid}, (Any, Any), m, s) + p == C_NULL && return m + return unsafe_pointer_to_objref(p)::Module +end + +const _NAMEDTUPLE_NAME = NamedTuple.body.body.name + +function _fieldnames(@nospecialize t) + if t.name === _NAMEDTUPLE_NAME + if t.parameters[1] isa Tuple + return t.parameters[1] + else + throw(ArgumentError("type does not have definite field names")) + end + end + return t.name.names +end + +const BINDING_KIND_GLOBAL = 0x0 +const BINDING_KIND_CONST = 0x1 +const BINDING_KIND_CONST_IMPORT = 0x2 +const BINDING_KIND_IMPLICIT = 0x3 +const BINDING_KIND_EXPLICIT = 0x4 +const BINDING_KIND_IMPORTED = 0x5 +const BINDING_KIND_FAILED = 0x6 +const BINDING_KIND_DECLARED = 0x7 +const BINDING_KIND_GUARD = 0x8 + +function lookup_binding_partition(world::UInt, b::Core.Binding) + ccall(:jl_get_binding_partition, Ref{Core.BindingPartition}, (Any, UInt), b, world) +end + +function lookup_binding_partition(world::UInt, gr::Core.GlobalRef) + ccall(:jl_get_globalref_partition, Ref{Core.BindingPartition}, (Any, UInt), gr, world) +end + +binding_kind(bpart::Core.BindingPartition) = ccall(:jl_bpart_get_kind, UInt8, (Any,), bpart) +binding_kind(m::Module, s::Symbol) = binding_kind(lookup_binding_partition(tls_world_age(), GlobalRef(m, s))) + +""" + fieldname(x::DataType, i::Integer) + +Get the name of field `i` of a `DataType`. + +The return type is `Symbol`, except when `x <: Tuple`, in which case the index of the field is returned, of type `Int`. + +# Examples +```jldoctest +julia> fieldname(Rational, 1) +:num + +julia> fieldname(Rational, 2) +:den + +julia> fieldname(Tuple{String,Int}, 2) +2 +``` +""" +function fieldname(t::DataType, i::Integer) + throw_not_def_field() = throw(ArgumentError("type does not have definite field names")) + function throw_field_access(t, i, n_fields) + field_label = n_fields == 1 ? "field" : "fields" + throw(ArgumentError("Cannot access field $i since type $t only has $n_fields $field_label.")) + end + throw_need_pos_int(i) = throw(ArgumentError("Field numbers must be positive integers. $i is invalid.")) + + isabstracttype(t) && throw_not_def_field() + names = _fieldnames(t) + n_fields = length(names)::Int + i > n_fields && throw_field_access(t, i, n_fields) + i < 1 && throw_need_pos_int(i) + return @inbounds names[i]::Symbol +end + +fieldname(t::UnionAll, i::Integer) = fieldname(unwrap_unionall(t), i) +fieldname(t::Type{<:Tuple}, i::Integer) = + i < 1 || i > fieldcount(t) ? throw(BoundsError(t, i)) : Int(i) + +""" + fieldnames(x::DataType) + +Get a tuple with the names of the fields of a `DataType`. + +Each name is a `Symbol`, except when `x <: Tuple`, in which case each name (actually the +index of the field) is an `Int`. + +See also [`propertynames`](@ref), [`hasfield`](@ref). + +# Examples +```jldoctest +julia> fieldnames(Rational) +(:num, :den) + +julia> fieldnames(typeof(1+im)) +(:re, :im) + +julia> fieldnames(Tuple{String,Int}) +(1, 2) +``` +""" +fieldnames(t::DataType) = (fieldcount(t); # error check to make sure type is specific enough + (_fieldnames(t)...,))::Tuple{Vararg{Symbol}} +fieldnames(t::UnionAll) = fieldnames(unwrap_unionall(t)) +fieldnames(::Core.TypeofBottom) = + throw(ArgumentError("The empty type does not have field names since it does not have instances.")) +fieldnames(t::Type{<:Tuple}) = ntuple(identity, fieldcount(t)) + +""" + hasfield(T::Type, name::Symbol) + +Return a boolean indicating whether `T` has `name` as one of its own fields. + +See also [`fieldnames`](@ref), [`fieldcount`](@ref), [`hasproperty`](@ref). + +!!! compat "Julia 1.2" + This function requires at least Julia 1.2. + +# Examples +```jldoctest +julia> struct Foo + bar::Int + end + +julia> hasfield(Foo, :bar) +true + +julia> hasfield(Foo, :x) +false +``` +""" +hasfield(T::Type, name::Symbol) = fieldindex(T, name, false) > 0 + +""" + nameof(t::DataType) -> Symbol + +Get the name of a (potentially `UnionAll`-wrapped) `DataType` (without its parent module) +as a symbol. + +# Examples +```jldoctest +julia> module Foo + struct S{T} + end + end +Foo + +julia> nameof(Foo.S{T} where T) +:S +``` +""" +nameof(t::DataType) = t.name.name +nameof(t::UnionAll) = nameof(unwrap_unionall(t))::Symbol + +""" + parentmodule(t::DataType) -> Module + +Determine the module containing the definition of a (potentially `UnionAll`-wrapped) `DataType`. + +# Examples +```jldoctest +julia> module Foo + struct Int end + end +Foo + +julia> parentmodule(Int) +Core + +julia> parentmodule(Foo.Int) +Foo +``` +""" +parentmodule(t::DataType) = t.name.module +parentmodule(t::UnionAll) = parentmodule(unwrap_unionall(t)) + +""" + isconst(m::Module, s::Symbol) -> Bool + +Determine whether a global is declared `const` in a given module `m`. +""" +isconst(m::Module, s::Symbol) = + ccall(:jl_is_const, Cint, (Any, Any), m, s) != 0 + +function isconst(g::GlobalRef) + return ccall(:jl_globalref_is_const, Cint, (Any,), g) != 0 +end + +""" + isconst(t::DataType, s::Union{Int,Symbol}) -> Bool + +Determine whether a field `s` is declared `const` in a given type `t`. +""" +function isconst(@nospecialize(t::Type), s::Symbol) + @_foldable_meta + t = unwrap_unionall(t) + isa(t, DataType) || return false + return isconst(t, fieldindex(t, s, false)) +end +function isconst(@nospecialize(t::Type), s::Int) + @_foldable_meta + t = unwrap_unionall(t) + # TODO: what to do for `Union`? + isa(t, DataType) || return false # uncertain + ismutabletype(t) || return true # immutable structs are always const + 1 <= s <= length(t.name.names) || return true # OOB reads are "const" since they always throw + constfields = t.name.constfields + constfields === C_NULL && return false + s -= 1 + return unsafe_load(Ptr{UInt32}(constfields), 1 + s÷32) & (1 << (s%32)) != 0 +end + +""" + isfieldatomic(t::DataType, s::Union{Int,Symbol}) -> Bool + +Determine whether a field `s` is declared `@atomic` in a given type `t`. +""" +function isfieldatomic(@nospecialize(t::Type), s::Symbol) + @_foldable_meta + t = unwrap_unionall(t) + isa(t, DataType) || return false + return isfieldatomic(t, fieldindex(t, s, false)) +end +function isfieldatomic(@nospecialize(t::Type), s::Int) + @_foldable_meta + t = unwrap_unionall(t) + # TODO: what to do for `Union`? + isa(t, DataType) || return false # uncertain + ismutabletype(t) || return false # immutable structs are never atomic + 1 <= s <= length(t.name.names) || return false # OOB reads are not atomic (they always throw) + atomicfields = t.name.atomicfields + atomicfields === C_NULL && return false + s -= 1 + return unsafe_load(Ptr{UInt32}(atomicfields), 1 + s÷32) & (1 << (s%32)) != 0 +end + +""" + @locals() + +Construct a dictionary of the names (as symbols) and values of all local +variables defined as of the call site. + +!!! compat "Julia 1.1" + This macro requires at least Julia 1.1. + +# Examples +```jldoctest +julia> let x = 1, y = 2 + Base.@locals + end +Dict{Symbol, Any} with 2 entries: + :y => 2 + :x => 1 + +julia> function f(x) + local y + show(Base.@locals); println() + for i = 1:1 + show(Base.@locals); println() + end + y = 2 + show(Base.@locals); println() + nothing + end; + +julia> f(42) +Dict{Symbol, Any}(:x => 42) +Dict{Symbol, Any}(:i => 1, :x => 42) +Dict{Symbol, Any}(:y => 2, :x => 42) +``` +""" +macro locals() + return Expr(:locals) +end + +# concrete datatype predicates + +datatype_fieldtypes(x::DataType) = ccall(:jl_get_fieldtypes, Core.SimpleVector, (Any,), x) + +struct DataTypeLayout + size::UInt32 + nfields::UInt32 + npointers::UInt32 + firstptr::Int32 + alignment::UInt16 + flags::UInt16 + # haspadding : 1; + # fielddesc_type : 2; + # arrayelem_isboxed : 1; + # arrayelem_isunion : 1; +end + +""" + Base.datatype_alignment(dt::DataType) -> Int + +Memory allocation minimum alignment for instances of this type. +Can be called on any `isconcretetype`, although for Memory it will give the +alignment of the elements, not the whole object. +""" +function datatype_alignment(dt::DataType) + @_foldable_meta + dt.layout == C_NULL && throw(UndefRefError()) + alignment = unsafe_load(convert(Ptr{DataTypeLayout}, dt.layout)).alignment + return Int(alignment) +end + +function uniontype_layout(@nospecialize T::Type) + sz = RefValue{Csize_t}(0) + algn = RefValue{Csize_t}(0) + isinline = ccall(:jl_islayout_inline, Cint, (Any, Ptr{Csize_t}, Ptr{Csize_t}), T, sz, algn) != 0 + (isinline, Int(sz[]), Int(algn[])) +end + +LLT_ALIGN(x, sz) = (x + sz - 1) & -sz + +# amount of total space taken by T when stored in a container +function aligned_sizeof(@nospecialize T::Type) + @_foldable_meta + if isa(T, Union) + if allocatedinline(T) + # NOTE this check is equivalent to `isbitsunion(T)`, we can improve type + # inference in the second branch with the outer `isa(T, Union)` check + _, sz, al = uniontype_layout(T) + return LLT_ALIGN(sz, al) + end + elseif allocatedinline(T) + al = datatype_alignment(T) + return LLT_ALIGN(Core.sizeof(T), al) + end + return Core.sizeof(Ptr{Cvoid}) +end + +gc_alignment(sz::Integer) = Int(ccall(:jl_alignment, Cint, (Csize_t,), sz)) +gc_alignment(T::Type) = gc_alignment(Core.sizeof(T)) + +""" + Base.datatype_haspadding(dt::DataType) -> Bool + +Return whether the fields of instances of this type are packed in memory, +with no intervening padding bits (defined as bits whose value does not impact +the semantic value of the instance itself). +Can be called on any `isconcretetype`. +""" +function datatype_haspadding(dt::DataType) + @_foldable_meta + dt.layout == C_NULL && throw(UndefRefError()) + flags = unsafe_load(convert(Ptr{DataTypeLayout}, dt.layout)).flags + return flags & 1 == 1 +end + +""" + Base.datatype_isbitsegal(dt::DataType) -> Bool + +Return whether egality of the (non-padding bits of the) in-memory representation +of an instance of this type implies semantic egality of the instance itself. +This may not be the case if the type contains to other values whose egality is +independent of their identity (e.g. immutable structs, some types, etc.). +""" +function datatype_isbitsegal(dt::DataType) + @_foldable_meta + dt.layout == C_NULL && throw(UndefRefError()) + flags = unsafe_load(convert(Ptr{DataTypeLayout}, dt.layout)).flags + return (flags & (1<<5)) != 0 +end + +""" + Base.datatype_nfields(dt::DataType) -> UInt32 + +Return the number of fields known to this datatype's layout. This may be +different from the number of actual fields of the type for opaque types. +Can be called on any `isconcretetype`. +""" +function datatype_nfields(dt::DataType) + @_foldable_meta + dt.layout == C_NULL && throw(UndefRefError()) + return unsafe_load(convert(Ptr{DataTypeLayout}, dt.layout)).nfields +end + +""" + Base.datatype_npointers(dt::DataType) -> Int + +Return the number of pointers in the layout of a datatype. +""" +function datatype_npointers(dt::DataType) + @_foldable_meta + dt.layout == C_NULL && throw(UndefRefError()) + return unsafe_load(convert(Ptr{DataTypeLayout}, dt.layout)).npointers +end + +""" + Base.datatype_pointerfree(dt::DataType) -> Bool + +Return whether instances of this type can contain references to gc-managed memory. +Can be called on any `isconcretetype`. +""" +function datatype_pointerfree(dt::DataType) + @_foldable_meta + return datatype_npointers(dt) == 0 +end + +""" + Base.datatype_fielddesc_type(dt::DataType) -> Int + +Return the size in bytes of each field-description entry in the layout array, +located at `(dt.layout + sizeof(DataTypeLayout))`. +Can be called on any `isconcretetype`. + +See also [`fieldoffset`](@ref). +""" +function datatype_fielddesc_type(dt::DataType) + @_foldable_meta + dt.layout == C_NULL && throw(UndefRefError()) + flags = unsafe_load(convert(Ptr{DataTypeLayout}, dt.layout)).flags + return (flags >> 1) & 3 +end + +""" + Base.datatype_arrayelem(dt::DataType) -> Int + +Return the behavior of the trailing array types allocations. +Can be called on any `isconcretetype`, but only meaningful on `Memory`. + +0 = inlinealloc +1 = isboxed +2 = isbitsunion +""" +function datatype_arrayelem(dt::DataType) + @_foldable_meta + dt.layout == C_NULL && throw(UndefRefError()) + flags = unsafe_load(convert(Ptr{DataTypeLayout}, dt.layout)).flags + return (flags >> 3) & 3 +end + +function datatype_layoutsize(dt::DataType) + @_foldable_meta + dt.layout == C_NULL && throw(UndefRefError()) + size = unsafe_load(convert(Ptr{DataTypeLayout}, dt.layout)).size + return size % Int +end + + +# For type stability, we only expose a single struct that describes everything +struct FieldDesc + isforeign::Bool + isptr::Bool + size::UInt32 + offset::UInt32 +end + +struct FieldDescStorage{T} + ptrsize::T + offset::T +end +FieldDesc(fd::FieldDescStorage{T}) where {T} = + FieldDesc(false, fd.ptrsize & 1 != 0, + fd.ptrsize >> 1, fd.offset) + +struct DataTypeFieldDesc + dt::DataType + function DataTypeFieldDesc(dt::DataType) + dt.layout == C_NULL && throw(UndefRefError()) + new(dt) + end +end + +function getindex(dtfd::DataTypeFieldDesc, i::Int) + layout_ptr = convert(Ptr{DataTypeLayout}, dtfd.dt.layout) + fd_ptr = layout_ptr + Core.sizeof(DataTypeLayout) + layout = unsafe_load(layout_ptr) + fielddesc_type = (layout.flags >> 1) & 3 + nfields = layout.nfields + @boundscheck ((1 <= i <= nfields) || throw(BoundsError(dtfd, i))) + if fielddesc_type == 0 + return FieldDesc(unsafe_load(Ptr{FieldDescStorage{UInt8}}(fd_ptr), i)) + elseif fielddesc_type == 1 + return FieldDesc(unsafe_load(Ptr{FieldDescStorage{UInt16}}(fd_ptr), i)) + elseif fielddesc_type == 2 + return FieldDesc(unsafe_load(Ptr{FieldDescStorage{UInt32}}(fd_ptr), i)) + else + # fielddesc_type == 3 + return FieldDesc(true, true, 0, 0) + end +end + +""" + ismutable(v) -> Bool + +Return `true` if and only if value `v` is mutable. See [Mutable Composite Types](@ref) +for a discussion of immutability. Note that this function works on values, so if you +give it a `DataType`, it will tell you that a value of the type is mutable. + +!!! note + For technical reasons, `ismutable` returns `true` for values of certain special types + (for example `String` and `Symbol`) even though they cannot be mutated in a permissible way. + +See also [`isbits`](@ref), [`isstructtype`](@ref). + +# Examples +```jldoctest +julia> ismutable(1) +false + +julia> ismutable([1,2]) +true +``` + +!!! compat "Julia 1.5" + This function requires at least Julia 1.5. +""" +ismutable(@nospecialize(x)) = (@_total_meta; (typeof(x).name::Core.TypeName).flags & 0x2 == 0x2) +# The type assertion above is required to fix some invalidations. +# See also https://github.com/JuliaLang/julia/issues/52134 + +""" + ismutabletype(T) -> Bool + +Determine whether type `T` was declared as a mutable type +(i.e. using `mutable struct` keyword). +If `T` is not a type, then return `false`. + +!!! compat "Julia 1.7" + This function requires at least Julia 1.7. +""" +function ismutabletype(@nospecialize t) + @_total_meta + t = unwrap_unionall(t) + # TODO: what to do for `Union`? + return isa(t, DataType) && ismutabletypename(t.name) +end + +ismutabletypename(tn::Core.TypeName) = tn.flags & 0x2 == 0x2 + +""" + isstructtype(T) -> Bool + +Determine whether type `T` was declared as a struct type +(i.e. using the `struct` or `mutable struct` keyword). +If `T` is not a type, then return `false`. +""" +function isstructtype(@nospecialize t) + @_total_meta + t = unwrap_unionall(t) + # TODO: what to do for `Union`? + isa(t, DataType) || return false + return !isprimitivetype(t) && !isabstracttype(t) +end + +""" + isprimitivetype(T) -> Bool + +Determine whether type `T` was declared as a primitive type +(i.e. using the `primitive type` syntax). +If `T` is not a type, then return `false`. +""" +function isprimitivetype(@nospecialize t) + @_total_meta + t = unwrap_unionall(t) + # TODO: what to do for `Union`? + isa(t, DataType) || return false + return (t.flags & 0x0080) == 0x0080 +end + +""" + isbitstype(T) + +Return `true` if type `T` is a "plain data" type, +meaning it is immutable and contains no references to other values, +only `primitive` types and other `isbitstype` types. +Typical examples are numeric types such as [`UInt8`](@ref), +[`Float64`](@ref), and [`Complex{Float64}`](@ref). +This category of types is significant since they are valid as type parameters, +may not track [`isdefined`](@ref) / [`isassigned`](@ref) status, +and have a defined layout that is compatible with C. +If `T` is not a type, then return `false`. + +See also [`isbits`](@ref), [`isprimitivetype`](@ref), [`ismutable`](@ref). + +# Examples +```jldoctest +julia> isbitstype(Complex{Float64}) +true + +julia> isbitstype(Complex) +false +``` +""" +isbitstype(@nospecialize t) = (@_total_meta; isa(t, DataType) && (t.flags & 0x0008) == 0x0008) + +""" + isbits(x) + +Return `true` if `x` is an instance of an [`isbitstype`](@ref) type. +""" +isbits(@nospecialize x) = isbitstype(typeof(x)) + +""" + objectid(x) -> UInt + +Get a hash value for `x` based on object identity. + +If `x === y` then `objectid(x) == objectid(y)`, and usually when `x !== y`, `objectid(x) != objectid(y)`. + +See also [`hash`](@ref), [`IdDict`](@ref). +""" +function objectid(@nospecialize(x)) + @_total_meta + return ccall(:jl_object_id, UInt, (Any,), x) +end + +""" + isdispatchtuple(T) + +Determine whether type `T` is a tuple "leaf type", +meaning it could appear as a type signature in dispatch +and has no subtypes (or supertypes) which could appear in a call. +If `T` is not a type, then return `false`. +""" +isdispatchtuple(@nospecialize(t)) = (@_total_meta; isa(t, DataType) && (t.flags & 0x0004) == 0x0004) + +datatype_ismutationfree(dt::DataType) = (@_total_meta; (dt.flags & 0x0100) == 0x0100) + +""" + Base.ismutationfree(T) + +Determine whether type `T` is mutation free in the sense that no mutable memory +is reachable from this type (either in the type itself) or through any fields. +Note that the type itself need not be immutable. For example, an empty mutable +type is `ismutabletype`, but also `ismutationfree`. +If `T` is not a type, then return `false`. +""" +function ismutationfree(@nospecialize(t)) + t = unwrap_unionall(t) + if isa(t, DataType) + return datatype_ismutationfree(t) + elseif isa(t, Union) + return ismutationfree(t.a) && ismutationfree(t.b) + end + # TypeVar, etc. + return false +end + +datatype_isidentityfree(dt::DataType) = (@_total_meta; (dt.flags & 0x0200) == 0x0200) + +""" + Base.isidentityfree(T) + +Determine whether type `T` is identity free in the sense that this type or any +reachable through its fields has non-content-based identity. +If `T` is not a type, then return `false`. +""" +function isidentityfree(@nospecialize(t)) + t = unwrap_unionall(t) + if isa(t, DataType) + return datatype_isidentityfree(t) + elseif isa(t, Union) + return isidentityfree(t.a) && isidentityfree(t.b) + end + # TypeVar, etc. + return false +end + +iskindtype(@nospecialize t) = (t === DataType || t === UnionAll || t === Union || t === typeof(Bottom)) +isconcretedispatch(@nospecialize t) = isconcretetype(t) && !iskindtype(t) + +using Core: has_free_typevars + +# equivalent to isa(v, Type) && isdispatchtuple(Tuple{v}) || v === Union{} +# and is thus perhaps most similar to the old (pre-1.0) `isleaftype` query +function isdispatchelem(@nospecialize v) + return (v === Bottom) || (v === typeof(Bottom)) || isconcretedispatch(v) || + (isType(v) && !has_free_typevars(v)) +end + +const _TYPE_NAME = Type.body.name +isType(@nospecialize t) = isa(t, DataType) && t.name === _TYPE_NAME + +""" + isconcretetype(T) + +Determine whether type `T` is a concrete type, meaning it could have direct instances +(values `x` such that `typeof(x) === T`). +Note that this is not the negation of `isabstracttype(T)`. +If `T` is not a type, then return `false`. + +See also: [`isbits`](@ref), [`isabstracttype`](@ref), [`issingletontype`](@ref). + +# Examples +```jldoctest +julia> isconcretetype(Complex) +false + +julia> isconcretetype(Complex{Float32}) +true + +julia> isconcretetype(Vector{Complex}) +true + +julia> isconcretetype(Vector{Complex{Float32}}) +true + +julia> isconcretetype(Union{}) +false + +julia> isconcretetype(Union{Int,String}) +false +``` +""" +isconcretetype(@nospecialize(t)) = (@_total_meta; isa(t, DataType) && (t.flags & 0x0002) == 0x0002) + +""" + isabstracttype(T) + +Determine whether type `T` was declared as an abstract type +(i.e. using the `abstract type` syntax). +Note that this is not the negation of `isconcretetype(T)`. +If `T` is not a type, then return `false`. + +# Examples +```jldoctest +julia> isabstracttype(AbstractArray) +true + +julia> isabstracttype(Vector) +false +``` +""" +function isabstracttype(@nospecialize(t)) + @_total_meta + t = unwrap_unionall(t) + # TODO: what to do for `Union`? + return isa(t, DataType) && (t.name.flags & 0x1) == 0x1 +end + +function is_datatype_layoutopaque(dt::DataType) + datatype_nfields(dt) == 0 && !datatype_pointerfree(dt) +end + +function is_valid_intrinsic_elptr(@nospecialize(ety)) + ety === Any && return true + isconcretetype(ety) || return false + ety <: Array && return false + return !is_datatype_layoutopaque(ety) +end + +""" + Base.issingletontype(T) + +Determine whether type `T` has exactly one possible instance; for example, a +struct type with no fields except other singleton values. +If `T` is not a concrete type, then return `false`. +""" +issingletontype(@nospecialize(t)) = (@_total_meta; isa(t, DataType) && isdefined(t, :instance) && datatype_layoutsize(t) == 0 && datatype_pointerfree(t)) + +""" + typeintersect(T::Type, S::Type) + +Compute a type that contains the intersection of `T` and `S`. Usually this will be the +smallest such type or one close to it. + +A special case where exact behavior is guaranteed: when `T <: S`, +`typeintersect(S, T) == T == typeintersect(T, S)`. +""" +typeintersect(@nospecialize(a), @nospecialize(b)) = (@_total_meta; ccall(:jl_type_intersection, Any, (Any, Any), a::Type, b::Type)) + +morespecific(@nospecialize(a), @nospecialize(b)) = (@_total_meta; ccall(:jl_type_morespecific, Cint, (Any, Any), a::Type, b::Type) != 0) +morespecific(a::Method, b::Method) = ccall(:jl_method_morespecific, Cint, (Any, Any), a, b) != 0 + +""" + fieldoffset(type, i) + +The byte offset of field `i` of a type relative to the data start. For example, we could +use it in the following manner to summarize information about a struct: + +```jldoctest +julia> structinfo(T) = [(fieldoffset(T,i), fieldname(T,i), fieldtype(T,i)) for i = 1:fieldcount(T)]; + +julia> structinfo(Base.Filesystem.StatStruct) +14-element Vector{Tuple{UInt64, Symbol, Type}}: + (0x0000000000000000, :desc, Union{RawFD, String}) + (0x0000000000000008, :device, UInt64) + (0x0000000000000010, :inode, UInt64) + (0x0000000000000018, :mode, UInt64) + (0x0000000000000020, :nlink, Int64) + (0x0000000000000028, :uid, UInt64) + (0x0000000000000030, :gid, UInt64) + (0x0000000000000038, :rdev, UInt64) + (0x0000000000000040, :size, Int64) + (0x0000000000000048, :blksize, Int64) + (0x0000000000000050, :blocks, Int64) + (0x0000000000000058, :mtime, Float64) + (0x0000000000000060, :ctime, Float64) + (0x0000000000000068, :ioerrno, Int32) +``` +""" +fieldoffset(x::DataType, idx::Integer) = (@_foldable_meta; ccall(:jl_get_field_offset, Csize_t, (Any, Cint), x, idx)) + +""" + fieldtype(T, name::Symbol | index::Int) + +Determine the declared type of a field (specified by name or index) in a composite DataType `T`. + +# Examples +```jldoctest +julia> struct Foo + x::Int64 + y::String + end + +julia> fieldtype(Foo, :x) +Int64 + +julia> fieldtype(Foo, 2) +String +``` +""" +fieldtype + +""" + Base.fieldindex(T, name::Symbol, err:Bool=true) + +Get the index of a named field, throwing an error if the field does not exist (when err==true) +or returning 0 (when err==false). + +# Examples +```jldoctest +julia> struct Foo + x::Int64 + y::String + end + +julia> Base.fieldindex(Foo, :z) +ERROR: FieldError: type Foo has no field `z`, available fields: `x`, `y` +Stacktrace: +[...] + +julia> Base.fieldindex(Foo, :z, false) +0 +``` +""" +function fieldindex(T::DataType, name::Symbol, err::Bool=true) + return err ? _fieldindex_maythrow(T, name) : _fieldindex_nothrow(T, name) +end + +function _fieldindex_maythrow(T::DataType, name::Symbol) + @_foldable_meta + @noinline + return Int(ccall(:jl_field_index, Cint, (Any, Any, Cint), T, name, true)+1) +end + +function _fieldindex_nothrow(T::DataType, name::Symbol) + @_total_meta + @noinline + return Int(ccall(:jl_field_index, Cint, (Any, Any, Cint), T, name, false)+1) +end + +function fieldindex(t::UnionAll, name::Symbol, err::Bool=true) + t = argument_datatype(t) + if t === nothing + err && throw(ArgumentError("type does not have definite fields")) + return 0 + end + return fieldindex(t, name, err) +end + +function argument_datatype(@nospecialize t) + @_total_meta + @noinline + return ccall(:jl_argument_datatype, Any, (Any,), t)::Union{Nothing,DataType} +end + +function datatype_fieldcount(t::DataType) + if t.name === _NAMEDTUPLE_NAME + names, types = t.parameters[1], t.parameters[2] + if names isa Tuple + return length(names) + end + if types isa DataType && types <: Tuple + return fieldcount(types) + end + return nothing + elseif isabstracttype(t) + return nothing + end + if t.name === Tuple.name + isvatuple(t) && return nothing + return length(t.types) + end + # Equivalent to length(t.types), but `t.types` is lazy and we do not want + # to be forced to compute it. + return length(t.name.names) +end + +""" + fieldcount(t::Type) + +Get the number of fields that an instance of the given type would have. +An error is thrown if the type is too abstract to determine this. +""" +function fieldcount(@nospecialize t) + @_foldable_meta + if t isa UnionAll || t isa Union + t = argument_datatype(t) + if t === nothing + throw(ArgumentError("type does not have a definite number of fields")) + end + elseif t === Union{} + throw(ArgumentError("The empty type does not have a well-defined number of fields since it does not have instances.")) + end + if !(t isa DataType) + throw(TypeError(:fieldcount, DataType, t)) + end + fcount = datatype_fieldcount(t) + if fcount === nothing + throw(ArgumentError("type does not have a definite number of fields")) + end + return fcount +end + +""" + fieldtypes(T::Type) + +The declared types of all fields in a composite DataType `T` as a tuple. + +!!! compat "Julia 1.1" + This function requires at least Julia 1.1. + +# Examples +```jldoctest +julia> struct Foo + x::Int64 + y::String + end + +julia> fieldtypes(Foo) +(Int64, String) +``` +""" +fieldtypes(T::Type) = (@_foldable_meta; ntupleany(i -> fieldtype(T, i), fieldcount(T))) + +# return all instances, for types that can be enumerated + +""" + instances(T::Type) + +Return a collection of all instances of the given type, if applicable. Mostly used for +enumerated types (see `@enum`). + +# Examples +```jldoctest +julia> @enum Color red blue green + +julia> instances(Color) +(red, blue, green) +``` +""" +function instances end + +function to_tuple_type(@nospecialize(t)) + if isa(t, Tuple) || isa(t, AbstractArray) || isa(t, SimpleVector) + t = Tuple{t...} + end + if isa(t, Type) && t <: Tuple + for p in (unwrap_unionall(t)::DataType).parameters + if isa(p, Core.TypeofVararg) + p = unwrapva(p) + end + if !(isa(p, Type) || isa(p, TypeVar)) + error("argument tuple type must contain only types") + end + end + else + error("expected tuple type") + end + t +end + +function signature_type(@nospecialize(f), @nospecialize(argtypes)) + argtypes = to_tuple_type(argtypes) + ft = Core.Typeof(f) + u = unwrap_unionall(argtypes)::DataType + return rewrap_unionall(Tuple{ft, u.parameters...}, argtypes) +end + +function get_methodtable(m::Method) + mt = ccall(:jl_method_get_table, Any, (Any,), m) + if mt === nothing + return nothing + end + return mt::Core.MethodTable +end + +""" + has_bottom_parameter(t) -> Bool + +Determine whether `t` is a Type for which one or more of its parameters is `Union{}`. +""" +function has_bottom_parameter(t::DataType) + for p in t.parameters + has_bottom_parameter(p) && return true + end + return false +end +has_bottom_parameter(t::typeof(Bottom)) = true +has_bottom_parameter(t::UnionAll) = has_bottom_parameter(unwrap_unionall(t)) +has_bottom_parameter(t::Union) = has_bottom_parameter(t.a) & has_bottom_parameter(t.b) +has_bottom_parameter(t::TypeVar) = has_bottom_parameter(t.ub) +has_bottom_parameter(::Any) = false + +min_world(m::Core.CodeInstance) = m.min_world +max_world(m::Core.CodeInstance) = m.max_world +min_world(m::Core.CodeInfo) = m.min_world +max_world(m::Core.CodeInfo) = m.max_world + +""" + get_world_counter() + +Returns the current maximum world-age counter. This counter is global and monotonically +increasing. +""" +get_world_counter() = ccall(:jl_get_world_counter, UInt, ()) + +""" + tls_world_age() + +Returns the world the [current_task()](@ref) is executing within. +""" +tls_world_age() = ccall(:jl_get_tls_world_age, UInt, ()) + +""" + propertynames(x, private=false) + +Get a tuple or a vector of the properties (`x.property`) of an object `x`. +This is typically the same as [`fieldnames(typeof(x))`](@ref), but types +that overload [`getproperty`](@ref) should generally overload `propertynames` +as well to get the properties of an instance of the type. + +`propertynames(x)` may return only "public" property names that are part +of the documented interface of `x`. If you want it to also return "private" +property names intended for internal use, pass `true` for the optional second argument. +REPL tab completion on `x.` shows only the `private=false` properties. + +See also: [`hasproperty`](@ref), [`hasfield`](@ref). +""" +propertynames(x) = fieldnames(typeof(x)) +propertynames(m::Module) = names(m) +propertynames(x, private::Bool) = propertynames(x) # ignore private flag by default +propertynames(x::Array) = () # hide the fields from tab completion to discourage calling `x.size` instead of `size(x)`, even though they are equivalent + +""" + hasproperty(x, s::Symbol) + +Return a boolean indicating whether the object `x` has `s` as one of its own properties. + +!!! compat "Julia 1.2" + This function requires at least Julia 1.2. + +See also: [`propertynames`](@ref), [`hasfield`](@ref). +""" +hasproperty(x, s::Symbol) = s in propertynames(x) + +""" + delete_method(m::Method) + +Make method `m` uncallable and force recompilation of any methods that use(d) it. +""" +function delete_method(m::Method) + ccall(:jl_method_table_disable, Cvoid, (Any, Any), get_methodtable(m), m) +end + + +# type for reflecting and pretty-printing a subset of methods +mutable struct MethodList <: AbstractArray{Method,1} + ms::Array{Method,1} + mt::Core.MethodTable +end + +size(m::MethodList) = size(m.ms) +getindex(m::MethodList, i::Integer) = m.ms[i] + +function MethodList(mt::Core.MethodTable) + ms = Method[] + visit(mt) do m + push!(ms, m) + end + return MethodList(ms, mt) +end + +""" + methods(f, [types], [module]) + +Return the method table for `f`. + +If `types` is specified, return an array of methods whose types match. +If `module` is specified, return an array of methods defined in that module. +A list of modules can also be specified as an array. + +!!! compat "Julia 1.4" + At least Julia 1.4 is required for specifying a module. + +See also: [`which`](@ref), [`@which`](@ref Main.InteractiveUtils.@which) and [`methodswith`](@ref Main.InteractiveUtils.methodswith). +""" +function methods(@nospecialize(f), @nospecialize(t), + mod::Union{Tuple{Module},AbstractArray{Module},Nothing}=nothing) + world = get_world_counter() + world == typemax(UInt) && error("code reflection cannot be used from generated functions") + # Lack of specialization => a comprehension triggers too many invalidations via _collect, so collect the methods manually + ms = Method[] + for m in _methods(f, t, -1, world)::Vector + m = m::Core.MethodMatch + (mod === nothing || parentmodule(m.method) ∈ mod) && push!(ms, m.method) + end + MethodList(ms, typeof(f).name.mt) +end +methods(@nospecialize(f), @nospecialize(t), mod::Module) = methods(f, t, (mod,)) + +function methods_including_ambiguous(@nospecialize(f), @nospecialize(t)) + tt = signature_type(f, t) + world = get_world_counter() + world == typemax(UInt) && error("code reflection cannot be used from generated functions") + min = RefValue{UInt}(typemin(UInt)) + max = RefValue{UInt}(typemax(UInt)) + ms = _methods_by_ftype(tt, nothing, -1, world, true, min, max, Ptr{Int32}(C_NULL))::Vector + return MethodList(Method[(m::Core.MethodMatch).method for m in ms], typeof(f).name.mt) +end + +function methods(@nospecialize(f), + mod::Union{Module,AbstractArray{Module},Nothing}=nothing) + # return all matches + return methods(f, Tuple{Vararg{Any}}, mod) +end + +# low-level method lookup functions used by the compiler + +unionlen(@nospecialize(x)) = x isa Union ? unionlen(x.a) + unionlen(x.b) : 1 + +function _uniontypes(@nospecialize(x), ts::Array{Any,1}) + if x isa Union + _uniontypes(x.a, ts) + _uniontypes(x.b, ts) + else + push!(ts, x) + end + return ts +end +uniontypes(@nospecialize(x)) = _uniontypes(x, Any[]) + +function _methods(@nospecialize(f), @nospecialize(t), lim::Int, world::UInt) + tt = signature_type(f, t) + return _methods_by_ftype(tt, lim, world) +end + +function _methods_by_ftype(@nospecialize(t), lim::Int, world::UInt) + return _methods_by_ftype(t, nothing, lim, world) +end +function _methods_by_ftype(@nospecialize(t), mt::Union{Core.MethodTable, Nothing}, lim::Int, world::UInt) + return _methods_by_ftype(t, mt, lim, world, false, RefValue{UInt}(typemin(UInt)), RefValue{UInt}(typemax(UInt)), Ptr{Int32}(C_NULL)) +end +function _methods_by_ftype(@nospecialize(t), mt::Union{Core.MethodTable, Nothing}, lim::Int, world::UInt, ambig::Bool, min::Ref{UInt}, max::Ref{UInt}, has_ambig::Ref{Int32}) + return ccall(:jl_matching_methods, Any, (Any, Any, Cint, Cint, UInt, Ptr{UInt}, Ptr{UInt}, Ptr{Int32}), t, mt, lim, ambig, world, min, max, has_ambig)::Union{Vector{Any},Nothing} +end + +hasgenerator(m::Method) = isdefined(m, :generator) +hasgenerator(m::Core.MethodInstance) = hasgenerator(m.def::Method) + +""" + Base.generating_output([incremental::Bool])::Bool + +Return `true` if the current process is being used to pre-generate a +code cache via any of the `--output-*` command line arguments. The optional +`incremental` argument further specifies the precompilation mode: when set +to `true`, the function will return `true` only for package precompilation; +when set to `false`, it will return `true` only for system image generation. + +!!! compat "Julia 1.11" + This function requires at least Julia 1.11. +""" +function generating_output(incremental::Union{Bool,Nothing}=nothing) + ccall(:jl_generating_output, Cint, ()) == 0 && return false + if incremental !== nothing + JLOptions().incremental == incremental || return false + end + return true +end + +const SLOT_USED = 0x8 +ast_slotflag(@nospecialize(code), i) = ccall(:jl_ir_slotflag, UInt8, (Any, Csize_t), code, i - 1) + +""" + may_invoke_generator(method, atype, sparams) -> Bool + +Computes whether or not we may invoke the generator for the given `method` on +the given `atype` and `sparams`. For correctness, all generated function are +required to return monotonic answers. However, since we don't expect users to +be able to successfully implement this criterion, we only call generated +functions on concrete types. The one exception to this is that we allow calling +generators with abstract types if the generator does not use said abstract type +(and thus cannot incorrectly use it to break monotonicity). This function +computes whether we are in either of these cases. + +Unlike normal functions, the compilation heuristics still can't generate good dispatch +in some cases, but this may still allow inference not to fall over in some limited cases. +""" +function may_invoke_generator(mi::MethodInstance) + return may_invoke_generator(mi.def::Method, mi.specTypes, mi.sparam_vals) +end +function may_invoke_generator(method::Method, @nospecialize(atype), sparams::SimpleVector) + # If we have complete information, we may always call the generator + isdispatchtuple(atype) && return true + + # We don't have complete information, but it is possible that the generator + # syntactically doesn't make use of the information we don't have. Check + # for that. + + # For now, only handle the (common, generated by the frontend case) that the + # generator only has one method + generator = method.generator + isa(generator, Core.GeneratedFunctionStub) || return false + tt = Tuple{typeof(generator.gen), Vararg{Any}} + gen_mthds = _methods_by_ftype(tt, #=lim=#1, method.primary_world) + gen_mthds isa Vector || return false + length(gen_mthds) == 1 || return false + + generator_method = (first(gen_mthds)::Core.MethodMatch).method + nsparams = length(sparams) + isdefined(generator_method, :source) || return false + code = generator_method.source + nslots = ccall(:jl_ir_nslots, Int, (Any,), code) + at = unwrap_unionall(atype) + at isa DataType || return false + (nslots >= 1 + length(sparams) + length(at.parameters)) || return false + + firstarg = 1 + for i = 1:nsparams + if isa(sparams[i], TypeVar) + if (ast_slotflag(code, firstarg + i) & SLOT_USED) != 0 + return false + end + end + end + nargs = Int(method.nargs) + non_va_args = method.isva ? nargs - 1 : nargs + for i = 1:non_va_args + if !isdispatchelem(at.parameters[i]) + if (ast_slotflag(code, firstarg + i + nsparams) & SLOT_USED) != 0 + return false + end + end + end + if method.isva + # If the va argument is used, we need to ensure that all arguments that + # contribute to the va tuple are dispatchelemes + if (ast_slotflag(code, firstarg + nargs + nsparams) & SLOT_USED) != 0 + for i = (non_va_args+1):length(at.parameters) + if !isdispatchelem(at.parameters[i]) + return false + end + end + end + end + return true +end + +# get a handle to the unique specialization object representing a particular instantiation of a call +# eliminate UnionAll vars that might be degenerate due to having identical bounds, +# or a concrete upper bound and appearing covariantly. +function subst_trivial_bounds(@nospecialize(atype)) + if !isa(atype, UnionAll) + return atype + end + v = atype.var + if isconcretetype(v.ub) || v.lb === v.ub + subst = try + atype{v.ub} + catch + # Note in rare cases a var bound might not be valid to substitute. + nothing + end + if subst !== nothing + return subst_trivial_bounds(subst) + end + end + return UnionAll(v, subst_trivial_bounds(atype.body)) +end + +# If removing trivial vars from atype results in an equivalent type, use that +# instead. Otherwise we can get a case like issue #38888, where a signature like +# f(x::S) where S<:Int +# gets cached and matches a concrete dispatch case. +function normalize_typevars(method::Method, @nospecialize(atype), sparams::SimpleVector) + at2 = subst_trivial_bounds(atype) + if at2 !== atype && at2 == atype + atype = at2 + sp_ = ccall(:jl_type_intersection_with_env, Any, (Any, Any), at2, method.sig)::SimpleVector + sparams = sp_[2]::SimpleVector + end + return Pair{Any,SimpleVector}(atype, sparams) +end + +function get_nospecializeinfer_sig(method::Method, @nospecialize(atype), sparams::SimpleVector) + isa(atype, DataType) || return method.sig + mt = ccall(:jl_method_get_table, Any, (Any,), method) + mt === nothing && return method.sig + return ccall(:jl_normalize_to_compilable_sig, Any, (Any, Any, Any, Any, Cint), + mt, atype, sparams, method, #=int return_if_compileable=#0) +end + +is_nospecialized(method::Method) = method.nospecialize ≠ 0 +is_nospecializeinfer(method::Method) = method.nospecializeinfer && is_nospecialized(method) +function specialize_method(method::Method, @nospecialize(atype), sparams::SimpleVector; preexisting::Bool=false) + @inline + if isa(atype, UnionAll) + atype, sparams = normalize_typevars(method, atype, sparams) + end + if is_nospecializeinfer(method) + atype = get_nospecializeinfer_sig(method, atype, sparams) + end + if preexisting + # check cached specializations + # for an existing result stored there + return ccall(:jl_specializations_lookup, Any, (Any, Any), method, atype)::Union{Nothing,MethodInstance} + end + return ccall(:jl_specializations_get_linfo, Ref{MethodInstance}, (Any, Any, Any), method, atype, sparams) +end + +function specialize_method(match::Core.MethodMatch; kwargs...) + return specialize_method(match.method, match.spec_types, match.sparams; kwargs...) +end + +hasintersect(@nospecialize(a), @nospecialize(b)) = typeintersect(a, b) !== Bottom diff --git a/test/compiler/inference.jl b/test/compiler/inference.jl index e3b1ac499e986..71f9da04baa4a 100644 --- a/test/compiler/inference.jl +++ b/test/compiler/inference.jl @@ -2647,7 +2647,7 @@ g26826(x) = getfield26826(x, :a, :b) # If this test is broken (especially if inference is getting a correct, but loose result, # like a Union) then it's potentially an indication that the optimizer isn't hitting the # InferenceResult cache properly for varargs methods. -let ct = Core.Compiler.code_typed(f26826, (Float64,))[1] +let ct = code_typed(f26826, (Float64,))[1] typed_code, retty = ct.first, ct.second found_poorly_typed_getfield_call = false for i = 1:length(typed_code.code) From 7ee3ba912e5232b2e73186c9c28597d4db3550b6 Mon Sep 17 00:00:00 2001 From: Keno Fischer Date: Thu, 17 Oct 2024 01:20:32 -0400 Subject: [PATCH 228/537] Move EffectsOverride to expr.jl (#56187) It makes sense that we originally added this to the compiler, but these annotations are really a runtime feature that the compiler simply reads to allow it to make additional assumptions. The runtime should not semantically depend on the compiler for this, so move these definitions to expr.jl. The practical effect of this right now is that Base gains a second copy of this code. Post #56128, the compiler will use the Base copy of this. Split out from #56128. --- base/compiler/compiler.jl | 41 -------------------- base/compiler/effects.jl | 31 --------------- base/experimental.jl | 2 +- base/expr.jl | 79 +++++++++++++++++++++++++++++++++++++-- src/julia.h | 2 +- test/strings/basic.jl | 2 +- 6 files changed, 79 insertions(+), 78 deletions(-) diff --git a/base/compiler/compiler.jl b/base/compiler/compiler.jl index dbfc9d7d57140..7d1dba88c9011 100644 --- a/base/compiler/compiler.jl +++ b/base/compiler/compiler.jl @@ -38,47 +38,6 @@ convert(::Type{T}, x::T) where {T} = x # Note that `@assume_effects` is available only after loading namedtuple.jl. abstract type MethodTableView end abstract type AbstractInterpreter end -struct EffectsOverride - consistent::Bool - effect_free::Bool - nothrow::Bool - terminates_globally::Bool - terminates_locally::Bool - notaskstate::Bool - inaccessiblememonly::Bool - noub::Bool - noub_if_noinbounds::Bool - consistent_overlay::Bool - nortcall::Bool -end -function EffectsOverride( - override::EffectsOverride = - EffectsOverride(false, false, false, false, false, false, false, false, false, false, false); - consistent::Bool = override.consistent, - effect_free::Bool = override.effect_free, - nothrow::Bool = override.nothrow, - terminates_globally::Bool = override.terminates_globally, - terminates_locally::Bool = override.terminates_locally, - notaskstate::Bool = override.notaskstate, - inaccessiblememonly::Bool = override.inaccessiblememonly, - noub::Bool = override.noub, - noub_if_noinbounds::Bool = override.noub_if_noinbounds, - consistent_overlay::Bool = override.consistent_overlay, - nortcall::Bool = override.nortcall) - return EffectsOverride( - consistent, - effect_free, - nothrow, - terminates_globally, - terminates_locally, - notaskstate, - inaccessiblememonly, - noub, - noub_if_noinbounds, - consistent_overlay, - nortcall) -end -const NUM_EFFECTS_OVERRIDES = 11 # sync with julia.h # essential files and libraries include("essentials.jl") diff --git a/base/compiler/effects.jl b/base/compiler/effects.jl index fb35162134ffa..3d9b69360b317 100644 --- a/base/compiler/effects.jl +++ b/base/compiler/effects.jl @@ -355,36 +355,5 @@ function decode_effects(e::UInt32) _Bool((e >> 14) & 0x01)) end -function encode_effects_override(eo::EffectsOverride) - e = 0x0000 - eo.consistent && (e |= (0x0001 << 0)) - eo.effect_free && (e |= (0x0001 << 1)) - eo.nothrow && (e |= (0x0001 << 2)) - eo.terminates_globally && (e |= (0x0001 << 3)) - eo.terminates_locally && (e |= (0x0001 << 4)) - eo.notaskstate && (e |= (0x0001 << 5)) - eo.inaccessiblememonly && (e |= (0x0001 << 6)) - eo.noub && (e |= (0x0001 << 7)) - eo.noub_if_noinbounds && (e |= (0x0001 << 8)) - eo.consistent_overlay && (e |= (0x0001 << 9)) - eo.nortcall && (e |= (0x0001 << 10)) - return e -end - -function decode_effects_override(e::UInt16) - return EffectsOverride( - !iszero(e & (0x0001 << 0)), - !iszero(e & (0x0001 << 1)), - !iszero(e & (0x0001 << 2)), - !iszero(e & (0x0001 << 3)), - !iszero(e & (0x0001 << 4)), - !iszero(e & (0x0001 << 5)), - !iszero(e & (0x0001 << 6)), - !iszero(e & (0x0001 << 7)), - !iszero(e & (0x0001 << 8)), - !iszero(e & (0x0001 << 9)), - !iszero(e & (0x0001 << 10))) -end - decode_statement_effects_override(ssaflag::UInt32) = decode_effects_override(UInt16((ssaflag >> NUM_IR_FLAGS) & (1 << NUM_EFFECTS_OVERRIDES - 1))) diff --git a/base/experimental.jl b/base/experimental.jl index 648b5da0ed9a1..982ed5e78aa8c 100644 --- a/base/experimental.jl +++ b/base/experimental.jl @@ -420,7 +420,7 @@ macro consistent_overlay(mt, def) inner = Base.unwrap_macrocalls(def) is_function_def(inner) || error("@consistent_overlay requires a function definition") overlay_def!(mt, inner) - override = Core.Compiler.EffectsOverride(; consistent_overlay=true) + override = Base.EffectsOverride(; consistent_overlay=true) Base.pushmeta!(def::Expr, Base.form_purity_expr(override)) return esc(def) end diff --git a/base/expr.jl b/base/expr.jl index 723b6b5b636c8..e281d9b677297 100644 --- a/base/expr.jl +++ b/base/expr.jl @@ -757,7 +757,7 @@ macro assume_effects(args...) return esc(pushmeta!(lastex::Expr, form_purity_expr(override))) elseif isexpr(lastex, :macrocall) && lastex.args[1] === Symbol("@ccall") lastex.args[1] = GlobalRef(Base, Symbol("@ccall_effects")) - insert!(lastex.args, 3, Core.Compiler.encode_effects_override(override)) + insert!(lastex.args, 3, encode_effects_override(override)) return esc(lastex) end override′ = compute_assumed_setting(override, lastex) @@ -784,7 +784,49 @@ function compute_assumed_settings(settings) return override end -using Core.Compiler: EffectsOverride +struct EffectsOverride + consistent::Bool + effect_free::Bool + nothrow::Bool + terminates_globally::Bool + terminates_locally::Bool + notaskstate::Bool + inaccessiblememonly::Bool + noub::Bool + noub_if_noinbounds::Bool + consistent_overlay::Bool + nortcall::Bool +end + +function EffectsOverride( + override::EffectsOverride = + EffectsOverride(false, false, false, false, false, false, false, false, false, false, false); + consistent::Bool = override.consistent, + effect_free::Bool = override.effect_free, + nothrow::Bool = override.nothrow, + terminates_globally::Bool = override.terminates_globally, + terminates_locally::Bool = override.terminates_locally, + notaskstate::Bool = override.notaskstate, + inaccessiblememonly::Bool = override.inaccessiblememonly, + noub::Bool = override.noub, + noub_if_noinbounds::Bool = override.noub_if_noinbounds, + consistent_overlay::Bool = override.consistent_overlay, + nortcall::Bool = override.nortcall) + return EffectsOverride( + consistent, + effect_free, + nothrow, + terminates_globally, + terminates_locally, + notaskstate, + inaccessiblememonly, + noub, + noub_if_noinbounds, + consistent_overlay, + nortcall) +end + +const NUM_EFFECTS_OVERRIDES = 11 # sync with julia.h function compute_assumed_setting(override::EffectsOverride, @nospecialize(setting), val::Bool=true) if isexpr(setting, :call) && setting.args[1] === :(!) @@ -826,9 +868,40 @@ function compute_assumed_setting(override::EffectsOverride, @nospecialize(settin return nothing end +function encode_effects_override(eo::EffectsOverride) + e = 0x0000 + eo.consistent && (e |= (0x0001 << 0)) + eo.effect_free && (e |= (0x0001 << 1)) + eo.nothrow && (e |= (0x0001 << 2)) + eo.terminates_globally && (e |= (0x0001 << 3)) + eo.terminates_locally && (e |= (0x0001 << 4)) + eo.notaskstate && (e |= (0x0001 << 5)) + eo.inaccessiblememonly && (e |= (0x0001 << 6)) + eo.noub && (e |= (0x0001 << 7)) + eo.noub_if_noinbounds && (e |= (0x0001 << 8)) + eo.consistent_overlay && (e |= (0x0001 << 9)) + eo.nortcall && (e |= (0x0001 << 10)) + return e +end + +function decode_effects_override(e::UInt16) + return EffectsOverride( + !iszero(e & (0x0001 << 0)), + !iszero(e & (0x0001 << 1)), + !iszero(e & (0x0001 << 2)), + !iszero(e & (0x0001 << 3)), + !iszero(e & (0x0001 << 4)), + !iszero(e & (0x0001 << 5)), + !iszero(e & (0x0001 << 6)), + !iszero(e & (0x0001 << 7)), + !iszero(e & (0x0001 << 8)), + !iszero(e & (0x0001 << 9)), + !iszero(e & (0x0001 << 10))) +end + function form_purity_expr(override::EffectsOverride) ex = Expr(:purity) - for i = 1:Core.Compiler.NUM_EFFECTS_OVERRIDES + for i = 1:NUM_EFFECTS_OVERRIDES push!(ex.args, getfield(override, i)) end return ex diff --git a/src/julia.h b/src/julia.h index 46679da9714dc..f42ac2a23aaeb 100644 --- a/src/julia.h +++ b/src/julia.h @@ -256,7 +256,7 @@ typedef struct _jl_debuginfo_t { jl_value_t *codelocs; // String // Memory{UInt8} // compressed info } jl_debuginfo_t; -// the following mirrors `struct EffectsOverride` in `base/compiler/effects.jl` +// the following mirrors `struct EffectsOverride` in `base/expr.jl` typedef union __jl_purity_overrides_t { struct { uint16_t ipo_consistent : 1; diff --git a/test/strings/basic.jl b/test/strings/basic.jl index 874607f3c1b20..de04055d047af 100644 --- a/test/strings/basic.jl +++ b/test/strings/basic.jl @@ -1236,7 +1236,7 @@ end end @test_throws ArgumentError Symbol("a\0a") - @test Base._string_n_override == Core.Compiler.encode_effects_override(Base.compute_assumed_settings((:total, :(!:consistent)))) + @test Base._string_n_override == Base.encode_effects_override(Base.compute_assumed_settings((:total, :(!:consistent)))) end @testset "Ensure UTF-8 DFA can never leave invalid state" begin From bbd81580cdcd059176792982308fbf908771847e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergio=20S=C3=A1nchez=20Ram=C3=ADrez?= <15837247+mofeing@users.noreply.github.com> Date: Thu, 17 Oct 2024 08:07:25 +0200 Subject: [PATCH 229/537] Fix some grammatical errors on docstring of `GenericMemory` (#56197) --- base/genericmemory.jl | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/base/genericmemory.jl b/base/genericmemory.jl index 5fe070a73628d..89861444d9652 100644 --- a/base/genericmemory.jl +++ b/base/genericmemory.jl @@ -9,8 +9,7 @@ Fixed-size [`DenseVector{T}`](@ref DenseVector). `kind` can currently be either `:not_atomic` or `:atomic`. For details on what `:atomic` implies, see [`AtomicMemory`](@ref) -`addrspace` can currently only be set to Core.CPU. It is designed to to permit extension by other systems -such as GPUs, which might define values such as: +`addrspace` can currently only be set to `Core.CPU`. It is designed to permit extension by other systems such as GPUs, which might define values such as: ``` module CUDA const Generic = bitcast(Core.AddrSpace{CUDA}, 0) From 727a57ed4fcbf99916678d719e131de2c59cf054 Mon Sep 17 00:00:00 2001 From: Jishnu Bhattacharya Date: Thu, 17 Oct 2024 12:00:26 +0530 Subject: [PATCH 230/537] Read views of destination in adjoint * adjoint (#56138) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Also, add an aggressive constprop annotation to `generic_matvecmul!`. Together, these improve performance: ```julia julia> A = rand(Int,100,100); julia> @btime $A' * $A'; 290.203 μs (405 allocations: 175.98 KiB) # v"1.12.0-DEV.1364" 270.008 μs (5 allocations: 79.11 KiB) # This PR ``` --- stdlib/LinearAlgebra/src/matmul.jl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/stdlib/LinearAlgebra/src/matmul.jl b/stdlib/LinearAlgebra/src/matmul.jl index 02ecd74152531..f64422fd9cb8a 100644 --- a/stdlib/LinearAlgebra/src/matmul.jl +++ b/stdlib/LinearAlgebra/src/matmul.jl @@ -825,7 +825,7 @@ end # NOTE: the generic version is also called as fallback for # strides != 1 cases -generic_matvecmul!(C::AbstractVector, tA, A::AbstractVecOrMat, B::AbstractVector, alpha::Number, beta::Number) = +Base.@constprop :aggressive generic_matvecmul!(C::AbstractVector, tA, A::AbstractVecOrMat, B::AbstractVector, alpha::Number, beta::Number) = @stable_muladdmul generic_matvecmul!(C, tA, A, B, MulAddMul(alpha, beta)) @inline function generic_matvecmul!(C::AbstractVector, tA, A::AbstractVecOrMat, B::AbstractVector, _add::MulAddMul = MulAddMul()) @@ -957,7 +957,7 @@ Base.@constprop :aggressive generic_matmatmul!(C::AbstractVecOrMat, tA, tB, A::A ta = t(_add.alpha) for i in AxM mul!(tmp, pB, view(pA, :, i)) - C[ci,:] .+= t.(ta .* tmp) + @views C[ci,:] .+= t.(ta .* tmp) ci += 1 end else From c4effc384fed9c910cd390758f2f3066ddceaa83 Mon Sep 17 00:00:00 2001 From: Jishnu Bhattacharya Date: Thu, 17 Oct 2024 16:43:22 +0530 Subject: [PATCH 231/537] Add context to `errorshow` `except_str` tests (#56199) With this, the error message in https://buildkite.com/julialang/julia-master/builds/41054#019294ca-e2c5-41f2-a897-e2959715f154 would become ```julia Error in testset errorshow: Test Failed at /home/jishnu/juliaPR/usr/share/julia/test/errorshow.jl:226 Expression: typeof(err) === $(Expr(:escape, :MethodError)) Evaluated: StackOverflowError === MethodError Context: expr = :(+()) ``` Having the failing expression displayed makes it easier to locate the source of the error. --- test/errorshow.jl | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/test/errorshow.jl b/test/errorshow.jl index db22fea1131d1..7a3d50d599f2e 100644 --- a/test/errorshow.jl +++ b/test/errorshow.jl @@ -215,6 +215,7 @@ Base.show_method_candidates(buf, try bad_vararg_decl("hello", 3) catch e e end) @test occursin("bad_vararg_decl(!Matched::$Int, ::Any...)", String(take!(buf))) macro except_str(expr, err_type) + source_info = __source__ return quote let err = nothing try @@ -222,7 +223,9 @@ macro except_str(expr, err_type) catch err end err === nothing && error("expected failure, but no exception thrown") - @test typeof(err) === $(esc(err_type)) + @testset let expr=$(repr(expr)) + $(Expr(:macrocall, Symbol("@test"), source_info, :(typeof(err) === $(esc(err_type))))) + end buf = IOBuffer() showerror(buf, err) String(take!(buf)) @@ -231,6 +234,7 @@ macro except_str(expr, err_type) end macro except_strbt(expr, err_type) + source_info = __source__ errmsg = "expected failure, but no exception thrown for $expr" return quote let err = nothing @@ -239,7 +243,9 @@ macro except_strbt(expr, err_type) catch err end err === nothing && error($errmsg) - @test typeof(err) === $(esc(err_type)) + @testset let expr=$(repr(expr)) + $(Expr(:macrocall, Symbol("@test"), source_info, :(typeof(err) === $(esc(err_type))))) + end buf = IOBuffer() showerror(buf, err, catch_backtrace()) String(take!(buf)) @@ -248,6 +254,7 @@ macro except_strbt(expr, err_type) end macro except_stackframe(expr, err_type) + source_info = __source__ return quote let err = nothing local st @@ -257,7 +264,9 @@ macro except_stackframe(expr, err_type) st = stacktrace(catch_backtrace()) end err === nothing && error("expected failure, but no exception thrown") - @test typeof(err) === $(esc(err_type)) + @testset let expr=$(repr(expr)) + $(Expr(:macrocall, Symbol("@test"), source_info, :(typeof(err) === $(esc(err_type))))) + end sprint(show, st[1]) end end From af51bcc563e15023d9dacca099e323b32b8a266f Mon Sep 17 00:00:00 2001 From: Ian Butterworth Date: Thu, 17 Oct 2024 07:27:24 -0400 Subject: [PATCH 232/537] Include default user depot when JULIA_DEPOT_PATH has leading empty entry (#56195) --- base/initdefs.jl | 11 +++++++---- doc/src/manual/environment-variables.md | 26 ++++++++++++++++--------- test/loading.jl | 2 +- 3 files changed, 25 insertions(+), 14 deletions(-) diff --git a/base/initdefs.jl b/base/initdefs.jl index 707c96a2444d6..85b708433c0ef 100644 --- a/base/initdefs.jl +++ b/base/initdefs.jl @@ -112,20 +112,23 @@ function init_depot_path() # otherwise, populate the depot path with the entries in JULIA_DEPOT_PATH, # expanding empty strings to the bundled depot - populated = false - for path in eachsplit(str, Sys.iswindows() ? ';' : ':') + pushfirst_default = true + for (i, path) in enumerate(eachsplit(str, Sys.iswindows() ? ';' : ':')) if isempty(path) append_bundled_depot_path!(DEPOT_PATH) else path = expanduser(path) path in DEPOT_PATH || push!(DEPOT_PATH, path) - populated = true + if i == 1 + # if a first entry is given, don't add the default depot at the start + pushfirst_default = false + end end end # backwards compatibility: if JULIA_DEPOT_PATH only contains empty entries # (e.g., JULIA_DEPOT_PATH=':'), make sure to use the default depot - if !populated + if pushfirst_default pushfirst!(DEPOT_PATH, joinpath(homedir(), ".julia")) end else diff --git a/doc/src/manual/environment-variables.md b/doc/src/manual/environment-variables.md index b86822e0be4b7..5aa0701c9aafe 100644 --- a/doc/src/manual/environment-variables.md +++ b/doc/src/manual/environment-variables.md @@ -130,17 +130,19 @@ environment variable or if it must have a value, set it to the string `:`. ### [`JULIA_DEPOT_PATH`](@id JULIA_DEPOT_PATH) -The [`JULIA_DEPOT_PATH`](@ref JULIA_DEPOT_PATH) environment variable is used to populate the global Julia -[`DEPOT_PATH`](@ref) variable, which controls where the package manager, as well -as Julia's code loading mechanisms, look for package registries, installed -packages, named environments, repo clones, cached compiled package images, -configuration files, and the default location of the REPL's history file. +The [`JULIA_DEPOT_PATH`](@ref JULIA_DEPOT_PATH) environment variable is used to populate the +global Julia [`DEPOT_PATH`](@ref) variable, which controls where the package manager, as well +as Julia's code loading mechanisms, look for package registries, installed packages, named +environments, repo clones, cached compiled package images, configuration files, and the default +location of the REPL's history file. Unlike the shell `PATH` variable but similar to [`JULIA_LOAD_PATH`](@ref JULIA_LOAD_PATH), -empty entries in [`JULIA_DEPOT_PATH`](@ref JULIA_DEPOT_PATH) are expanded to the default -value of `DEPOT_PATH`, excluding the user depot. This allows easy overriding of the user -depot, while still retaining access to resources that are bundled with Julia, like cache -files, artifacts, etc. For example, to switch the user depot to `/foo/bar` just do +empty entries in [`JULIA_DEPOT_PATH`](@ref JULIA_DEPOT_PATH) have special behavior: +- At the end, it is expanded to the default value of `DEPOT_PATH`, *excluding* the user depot. +- At the start, it is expanded to the default value of `DEPOT_PATH`, *including* the user depot. +This allows easy overriding of the user depot, while still retaining access to resources that +are bundled with Julia, like cache files, artifacts, etc. For example, to switch the user depot +to `/foo/bar` use a trailing `:` ```sh export JULIA_DEPOT_PATH="/foo/bar:" ``` @@ -150,6 +152,12 @@ resources will still be available. If you really only want to use the depot at ` and not load any bundled resources, simply set the environment variable to `/foo/bar` without the trailing colon. +To append a depot at the end of the full default list, including the default user depot, use a +leading `:` +```sh +export JULIA_DEPOT_PATH=":/foo/bar" +``` + There are two exceptions to the above rule. First, if [`JULIA_DEPOT_PATH`](@ref JULIA_DEPOT_PATH) is set to the empty string, it expands to an empty `DEPOT_PATH` array. In other words, the empty string is interpreted as a zero-element array, not a one-element diff --git a/test/loading.jl b/test/loading.jl index 4877b256a6ad9..9e7e40ff3b50a 100644 --- a/test/loading.jl +++ b/test/loading.jl @@ -731,7 +731,7 @@ end "" => [], "$s" => [default; bundled], "$tmp$s" => [tmp; bundled], - "$s$tmp" => [bundled; tmp], + "$s$tmp" => [default; bundled; tmp], ) for (env, result) in pairs(cases) script = "DEPOT_PATH == $(repr(result)) || error(\"actual depot \" * join(DEPOT_PATH,':') * \" does not match expected depot \" * join($(repr(result)), ':'))" From 4329422fac14ae4daf0a2fd3c71fe4e0df169899 Mon Sep 17 00:00:00 2001 From: Jishnu Bhattacharya Date: Thu, 17 Oct 2024 17:56:11 +0530 Subject: [PATCH 233/537] Add news entry for `matprod_dest` (#56160) This was missed out in https://github.com/JuliaLang/julia/pull/55537 --- NEWS.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/NEWS.md b/NEWS.md index 9aebf5d42d954..b33f75063eb93 100644 --- a/NEWS.md +++ b/NEWS.md @@ -139,7 +139,9 @@ Standard library changes * The number of default BLAS threads now respects process affinity, instead of using total number of logical threads available on the system ([#55574]). * A new function `zeroslike` is added that is used to generate the zero elements for matrix-valued banded matrices. - Custom array types may specialize this function to return an appropriate result. ([#55252]) + Custom array types may specialize this function to return an appropriate result ([#55252]). +* The matrix multiplication `A * B` calls `matprod_dest(A, B, T::Type)` to generate the destination. + This function is now public ([#55537]). #### Logging From d32cc269c100cc8f26972c6cd622dc7d5d4243b2 Mon Sep 17 00:00:00 2001 From: Gabriel Baraldi Date: Thu, 17 Oct 2024 10:09:45 -0300 Subject: [PATCH 234/537] Improve type inference of nonmissingtype, nonnothingtype and of Ryu (#56120) Co-authored-by: Cody Tapscott <84105208+topolarity@users.noreply.github.com> --- base/missing.jl | 2 +- base/ryu/Ryu.jl | 2 +- base/some.jl | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/base/missing.jl b/base/missing.jl index 1f34195efed88..6a8c09dc02aff 100644 --- a/base/missing.jl +++ b/base/missing.jl @@ -36,7 +36,7 @@ Any !!! compat "Julia 1.3" This function is exported as of Julia 1.3. """ -nonmissingtype(::Type{T}) where {T} = typesplit(T, Missing) +nonmissingtype(@nospecialize(T::Type)) = typesplit(T, Missing) function nonmissingtype_checked(T::Type) R = nonmissingtype(T) diff --git a/base/ryu/Ryu.jl b/base/ryu/Ryu.jl index 89589aa4ab668..e44e240baafda 100644 --- a/base/ryu/Ryu.jl +++ b/base/ryu/Ryu.jl @@ -112,7 +112,7 @@ end function Base.show(io::IO, x::T, forceuntyped::Bool=false, fromprint::Bool=false) where {T <: Base.IEEEFloat} compact = get(io, :compact, false)::Bool buf = Base.StringVector(neededdigits(T)) - typed = !forceuntyped && !compact && Base.nonnothing_nonmissing_typeinfo(io) != typeof(x) + typed = !forceuntyped && !compact && Base.nonnothing_nonmissing_typeinfo(io) !== typeof(x) pos = writeshortest(buf, 1, x, false, false, true, -1, (x isa Float32 && !fromprint) ? UInt8('f') : UInt8('e'), false, UInt8('.'), typed, compact) write(io, resize!(buf, pos - 1)) diff --git a/base/some.jl b/base/some.jl index 7d7089bf76655..4269b2d78aedd 100644 --- a/base/some.jl +++ b/base/some.jl @@ -16,7 +16,7 @@ Some(::Type{T}) where {T} = Some{Type{T}}(T) promote_rule(::Type{Some{T}}, ::Type{Some{S}}) where {T, S<:T} = Some{T} -nonnothingtype(::Type{T}) where {T} = typesplit(T, Nothing) +nonnothingtype(@nospecialize(T::Type)) = typesplit(T, Nothing) promote_rule(T::Type{Nothing}, S::Type) = Union{S, Nothing} function promote_rule(T::Type{>:Nothing}, S::Type) R = nonnothingtype(T) From 6b95ac0163e6fa77e5167d33e6d23198a381a630 Mon Sep 17 00:00:00 2001 From: Eric Hanson <5846501+ericphanson@users.noreply.github.com> Date: Thu, 17 Oct 2024 16:10:50 +0200 Subject: [PATCH 235/537] Limit implicit `show` in REPL to printing 20 KiB by default (#53959) closes https://github.com/JuliaLang/julia/issues/40735 --------- Co-authored-by: Jameson Nash --- NEWS.md | 2 ++ stdlib/REPL/src/REPL.jl | 62 +++++++++++++++++++++++++++++++++++++++- stdlib/REPL/test/repl.jl | 40 ++++++++++++++++++++++++++ 3 files changed, 103 insertions(+), 1 deletion(-) diff --git a/NEWS.md b/NEWS.md index b33f75063eb93..05f21c5092643 100644 --- a/NEWS.md +++ b/NEWS.md @@ -167,6 +167,8 @@ Standard library changes - the REPL will now warn if it detects a name is being accessed from a module which does not define it (nor has a submodule which defines it), and for which the name is not public in that module. For example, `map` is defined in Base, and executing `LinearAlgebra.map` in the REPL will now issue a warning the first time occurs. ([#54872]) +- When an object is printed automatically (by being returned in the REPL), its display is now truncated after printing 20 KiB. + This does not affect manual calls to `show`, `print`, and so forth. ([#53959]) #### SuiteSparse diff --git a/stdlib/REPL/src/REPL.jl b/stdlib/REPL/src/REPL.jl index 272b907165341..88458f7de4666 100644 --- a/stdlib/REPL/src/REPL.jl +++ b/stdlib/REPL/src/REPL.jl @@ -484,10 +484,70 @@ function repl_backend_loop(backend::REPLBackend, get_module::Function) return nothing end +SHOW_MAXIMUM_BYTES::Int = 20480 + +# Limit printing during REPL display +mutable struct LimitIO{IO_t <: IO} <: IO + io::IO_t + maxbytes::Int + n::Int # max bytes to write +end +LimitIO(io::IO, maxbytes) = LimitIO(io, maxbytes, 0) + +struct LimitIOException <: Exception + maxbytes::Int +end + +function Base.showerror(io::IO, e::LimitIOException) + print(io, "$LimitIOException: aborted printing after attempting to print more than $(Base.format_bytes(e.maxbytes)) within a `LimitIO`.") +end + +function Base.write(io::LimitIO, v::UInt8) + io.n > io.maxbytes && throw(LimitIOException(io.maxbytes)) + n_bytes = write(io.io, v) + io.n += n_bytes + return n_bytes +end + +# Semantically, we only need to override `Base.write`, but we also +# override `unsafe_write` for performance. +function Base.unsafe_write(limiter::LimitIO, p::Ptr{UInt8}, nb::UInt) + # already exceeded? throw + limiter.n > limiter.maxbytes && throw(LimitIOException(limiter.maxbytes)) + remaining = limiter.maxbytes - limiter.n # >= 0 + + # Not enough bytes left; we will print up to the limit, then throw + if remaining < nb + if remaining > 0 + Base.unsafe_write(limiter.io, p, remaining) + end + throw(LimitIOException(limiter.maxbytes)) + end + + # We won't hit the limit so we'll write the full `nb` bytes + bytes_written = Base.unsafe_write(limiter.io, p, nb) + limiter.n += bytes_written + return bytes_written +end + struct REPLDisplay{Repl<:AbstractREPL} <: AbstractDisplay repl::Repl end +function show_limited(io::IO, mime::MIME, x) + try + # We wrap in a LimitIO to limit the amount of printing. + # We unpack `IOContext`s, since we will pass the properties on the outside. + inner = io isa IOContext ? io.io : io + wrapped_limiter = IOContext(LimitIO(inner, SHOW_MAXIMUM_BYTES), io) + # `show_repl` to allow the hook with special syntax highlighting + show_repl(wrapped_limiter, mime, x) + catch e + e isa LimitIOException || rethrow() + printstyled(io, """…[printing stopped after displaying $(Base.format_bytes(e.maxbytes)); call `show(stdout, MIME"text/plain"(), ans)` to print without truncation]"""; color=:light_yellow, bold=true) + end +end + function display(d::REPLDisplay, mime::MIME"text/plain", x) x = Ref{Any}(x) with_repl_linfo(d.repl) do io @@ -504,7 +564,7 @@ function display(d::REPLDisplay, mime::MIME"text/plain", x) # this can override the :limit property set initially io = foldl(IOContext, d.repl.options.iocontext, init=io) end - show_repl(io, mime, x[]) + show_limited(io, mime, x[]) println(io) end return nothing diff --git a/stdlib/REPL/test/repl.jl b/stdlib/REPL/test/repl.jl index 85a8137fa003e..809913502c3d7 100644 --- a/stdlib/REPL/test/repl.jl +++ b/stdlib/REPL/test/repl.jl @@ -1964,6 +1964,46 @@ end @test undoc == [:AbstractREPL, :BasicREPL, :LineEditREPL, :StreamREPL] end +struct A40735 + str::String +end + +# https://github.com/JuliaLang/julia/issues/40735 +@testset "Long printing" begin + previous = REPL.SHOW_MAXIMUM_BYTES + try + REPL.SHOW_MAXIMUM_BYTES = 1000 + str = string(('a':'z')...)^50 + @test length(str) > 1100 + # For a raw string, we correctly get the standard abbreviated output + output = sprint(REPL.show_limited, MIME"text/plain"(), str; context=:limit => true) + hint = """call `show(stdout, MIME"text/plain"(), ans)` to print without truncation""" + suffix = "[printing stopped after displaying 1000 bytes; $hint]" + @test !endswith(output, suffix) + @test contains(output, "bytes ⋯") + # For a struct without a custom `show` method, we don't hit the abbreviated + # 3-arg show on the inner string, so here we check that the REPL print-limiting + # feature is correctly kicking in. + a = A40735(str) + output = sprint(REPL.show_limited, MIME"text/plain"(), a; context=:limit => true) + @test endswith(output, suffix) + @test length(output) <= 1200 + # We also check some extreme cases + REPL.SHOW_MAXIMUM_BYTES = 1 + output = sprint(REPL.show_limited, MIME"text/plain"(), 1) + @test output == "1" + output = sprint(REPL.show_limited, MIME"text/plain"(), 12) + @test output == "1…[printing stopped after displaying 1 byte; $hint]" + REPL.SHOW_MAXIMUM_BYTES = 0 + output = sprint(REPL.show_limited, MIME"text/plain"(), 1) + @test output == "…[printing stopped after displaying 0 bytes; $hint]" + @test sprint(io -> show(REPL.LimitIO(io, 5), "abc")) == "\"abc\"" + @test_throws REPL.LimitIOException(1) sprint(io -> show(REPL.LimitIO(io, 1), "abc")) + finally + REPL.SHOW_MAXIMUM_BYTES = previous + end +end + @testset "Dummy Pkg prompt" begin # do this in an empty depot to test default for new users withenv("JULIA_DEPOT_PATH" => mktempdir() * (Sys.iswindows() ? ";" : ":"), "JULIA_LOAD_PATH" => nothing) do From c2e3498215abee63a4f75be3ba7a25d0a829880a Mon Sep 17 00:00:00 2001 From: Gabriel Baraldi Date: Thu, 17 Oct 2024 11:11:13 -0300 Subject: [PATCH 236/537] Add inferFunctionAttrsPass to the pipeline so that libfuncs get attributes (#52946) I doubt this will make too much of a difference since we don't use too many libfuncs, but it's also quite a cheap pass if it makes any difference --------- Co-authored-by: Valentin Churavy --- src/pipeline.cpp | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/pipeline.cpp b/src/pipeline.cpp index f300e4d7757b2..09d51598ea8b7 100644 --- a/src/pipeline.cpp +++ b/src/pipeline.cpp @@ -19,6 +19,7 @@ #include #include #include +#include #include #include @@ -332,6 +333,7 @@ static void buildEarlySimplificationPipeline(ModulePassManager &MPM, PassBuilder MPM.addPass(ForceFunctionAttrsPass()); invokePipelineStartCallbacks(MPM, PB, O); MPM.addPass(Annotation2MetadataPass()); + MPM.addPass(InferFunctionAttrsPass()); MPM.addPass(ConstantMergePass()); { FunctionPassManager FPM; From afb65fabe0c68c9e7a579017613208d812324e88 Mon Sep 17 00:00:00 2001 From: Alex Arslan Date: Thu, 17 Oct 2024 08:11:09 -0700 Subject: [PATCH 237/537] Fix printing of `AbstractDict`s with unknown length (#56009) Also fix interacting with them at the REPL. Fixes #55931 --- base/show.jl | 2 +- stdlib/REPL/src/REPLCompletions.jl | 2 +- stdlib/REPL/test/replcompletions.jl | 19 +++++++++++++++++-- test/show.jl | 18 ++++++++++++++++++ 4 files changed, 37 insertions(+), 4 deletions(-) diff --git a/base/show.jl b/base/show.jl index a147c2037d70e..fb932838ac69a 100644 --- a/base/show.jl +++ b/base/show.jl @@ -152,7 +152,7 @@ function show(io::IO, ::MIME"text/plain", iter::Union{KeySet,ValueIterator}) end function show(io::IO, ::MIME"text/plain", t::AbstractDict{K,V}) where {K,V} - isempty(t) && return show(io, t) + (isempty(t) || !haslength(t)) && return show(io, t) # show more descriptively, with one line per key/value pair recur_io = IOContext(io, :SHOWN_SET => t) limit = get(io, :limit, false)::Bool diff --git a/stdlib/REPL/src/REPLCompletions.jl b/stdlib/REPL/src/REPLCompletions.jl index 77f7fdf15cc9c..5e80e17036559 100644 --- a/stdlib/REPL/src/REPLCompletions.jl +++ b/stdlib/REPL/src/REPLCompletions.jl @@ -995,7 +995,7 @@ function dict_identifier_key(str::String, tag::Symbol, context_module::Module=Ma isa(objt, Core.Const) || return (nothing, nothing, nothing) obj = objt.val isa(obj, AbstractDict) || return (nothing, nothing, nothing) - length(obj)::Int < 1_000_000 || return (nothing, nothing, nothing) + (Base.haslength(obj) && length(obj)::Int < 1_000_000) || return (nothing, nothing, nothing) begin_of_key = something(findnext(!isspace, str, nextind(str, end_of_identifier) + 1), # +1 for [ lastindex(str)+1) return (obj, str[begin_of_key:end], begin_of_key) diff --git a/stdlib/REPL/test/replcompletions.jl b/stdlib/REPL/test/replcompletions.jl index 3f8addcace73b..8bee70226755f 100644 --- a/stdlib/REPL/test/replcompletions.jl +++ b/stdlib/REPL/test/replcompletions.jl @@ -68,6 +68,16 @@ let ex = quote Base.keys(d::CustomDict) = collect(keys(d.mydict)) Base.length(d::CustomDict) = length(d.mydict) + # Support AbstractDict with unknown length, #55931 + struct NoLengthDict{K,V} <: AbstractDict{K,V} + dict::Dict{K,V} + NoLengthDict{K,V}() where {K,V} = new(Dict{K,V}()) + end + Base.iterate(d::NoLengthDict, s...) = iterate(d.dict, s...) + Base.IteratorSize(::Type{<:NoLengthDict}) = Base.SizeUnknown() + Base.eltype(::Type{NoLengthDict{K,V}}) where {K,V} = Pair{K,V} + Base.setindex!(d::NoLengthDict, v, k) = d.dict[k] = v + test(x::T, y::T) where {T<:Real} = pass test(x::Real, y::Real) = pass test(x::AbstractArray{T}, y) where {T<:Real} = pass @@ -151,6 +161,7 @@ let ex = quote test_repl_comp_dict = CompletionFoo.test_dict test_repl_comp_customdict = CompletionFoo.test_customdict test_dict_ℂ = Dict(1=>2) + test_dict_no_length = CompletionFoo.NoLengthDict{Int,Int}() end ex.head = :toplevel Core.eval(Main, ex) @@ -1486,8 +1497,12 @@ test_dict_completion("CompletionFoo.test_customdict") test_dict_completion("test_repl_comp_dict") test_dict_completion("test_repl_comp_customdict") -# Issue #23004: this should not throw: -@test REPLCompletions.dict_identifier_key("test_dict_ℂ[\\", :other) isa Tuple +@testset "dict_identifier_key" begin + # Issue #23004: this should not throw: + @test REPLCompletions.dict_identifier_key("test_dict_ℂ[\\", :other) isa Tuple + # Issue #55931: neither should this: + @test REPLCompletions.dict_identifier_key("test_dict_no_length[", :other) isa NTuple{3,Nothing} +end @testset "completion of string/cmd macros (#22577)" begin c, r, res = test_complete("ra") diff --git a/test/show.jl b/test/show.jl index d9c3585b7c1df..976141f1ebb17 100644 --- a/test/show.jl +++ b/test/show.jl @@ -2773,3 +2773,21 @@ end do_expr1 = :(foo() do; bar(); end) @test !contains(sprint(show, do_expr1), " \n") end + +struct NoLengthDict{K,V} <: AbstractDict{K,V} + dict::Dict{K,V} + NoLengthDict{K,V}() where {K,V} = new(Dict{K,V}()) +end +Base.iterate(d::NoLengthDict, s...) = iterate(d.dict, s...) +Base.IteratorSize(::Type{<:NoLengthDict}) = Base.SizeUnknown() +Base.eltype(::Type{NoLengthDict{K,V}}) where {K,V} = Pair{K,V} +Base.setindex!(d::NoLengthDict, v, k) = d.dict[k] = v + +# Issue 55931 +@testset "show AbstractDict with unknown length" begin + x = NoLengthDict{Int,Int}() + x[1] = 2 + str = sprint(io->show(io, MIME("text/plain"), x)) + @test contains(str, "NoLengthDict") + @test contains(str, "1 => 2") +end From eb3ed5e8d137f2fd32a5624fa50652364df56b1b Mon Sep 17 00:00:00 2001 From: Keno Fischer Date: Thu, 17 Oct 2024 12:03:24 -0400 Subject: [PATCH 238/537] module: Prepare `usings` array for world age partition (#55357) This is a relatively independent part of the bindings partition branch, extending the module usings list to gain `min_world` and `max_world` `size_t`s. These are always `0` and `(size_t)-1` respectively in this PR, which handles the GC and serialization implications of this change. The semantic part will come later. --- src/gc-debug.c | 3 ++- src/gc-stock.c | 12 ++++++----- src/julia.h | 8 +++++++- src/julia_internal.h | 22 ++++++++++++++++++++ src/module.c | 37 ++++++++++++++------------------- src/staticdata.c | 41 ++++++++++++++++++++++++++----------- test/clangsa/MissingRoots.c | 2 +- 7 files changed, 83 insertions(+), 42 deletions(-) diff --git a/src/gc-debug.c b/src/gc-debug.c index 19dd93af5f236..5c150aba68e10 100644 --- a/src/gc-debug.c +++ b/src/gc-debug.c @@ -1125,7 +1125,8 @@ int gc_slot_to_arrayidx(void *obj, void *_slot) JL_NOTSAFEPOINT if (vt == jl_module_type) { jl_module_t *m = (jl_module_t*)obj; start = (char*)m->usings.items; - len = m->usings.len; + len = module_usings_length(m); + elsize = sizeof(struct _jl_module_using); } else if (vt == jl_simplevector_type) { start = (char*)jl_svec_data(obj); diff --git a/src/gc-stock.c b/src/gc-stock.c index 50a3896d4f9aa..fb5acabed3d5c 100644 --- a/src/gc-stock.c +++ b/src/gc-stock.c @@ -2053,16 +2053,18 @@ STATIC_INLINE void gc_mark_module_binding(jl_ptls_t ptls, jl_module_t *mb_parent gc_heap_snapshot_record_module_to_binding(mb_parent, bindings, bindingkeyset); gc_assert_parent_validity((jl_value_t *)mb_parent, (jl_value_t *)mb_parent->parent); gc_try_claim_and_push(mq, (jl_value_t *)mb_parent->parent, &nptr); - size_t nusings = mb_parent->usings.len; + size_t nusings = module_usings_length(mb_parent); if (nusings > 0) { // this is only necessary because bindings for "using" modules // are added only when accessed. therefore if a module is replaced // after "using" it but before accessing it, this array might // contain the only reference. jl_value_t *obj_parent = (jl_value_t *)mb_parent; - jl_value_t **objary_begin = (jl_value_t **)mb_parent->usings.items; - jl_value_t **objary_end = objary_begin + nusings; - gc_mark_objarray(ptls, obj_parent, objary_begin, objary_end, 1, nptr); + struct _jl_module_using *objary_begin = (struct _jl_module_using *)mb_parent->usings.items; + struct _jl_module_using *objary_end = objary_begin + nusings; + static_assert(sizeof(struct _jl_module_using) == 3*sizeof(void *), "Mismatch in _jl_module_using size"); + static_assert(offsetof(struct _jl_module_using, mod) == 0, "Expected `mod` at the beginning of _jl_module_using"); + gc_mark_objarray(ptls, obj_parent, (jl_value_t**)objary_begin, (jl_value_t**)objary_end, 3, nptr); } else { gc_mark_push_remset(ptls, (jl_value_t *)mb_parent, nptr); @@ -2175,7 +2177,7 @@ FORCE_INLINE void gc_mark_outrefs(jl_ptls_t ptls, jl_gc_markqueue_t *mq, void *_ if (update_meta) gc_setmark(ptls, o, bits, sizeof(jl_module_t)); jl_module_t *mb_parent = (jl_module_t *)new_obj; - uintptr_t nptr = ((mb_parent->usings.len + 1) << 2) | (bits & GC_OLD); + uintptr_t nptr = ((module_usings_length(mb_parent) + 1) << 2) | (bits & GC_OLD); gc_mark_module_binding(ptls, mb_parent, nptr, bits); } else if (vtag == jl_task_tag << 4) { diff --git a/src/julia.h b/src/julia.h index f42ac2a23aaeb..7bb5f31eda708 100644 --- a/src/julia.h +++ b/src/julia.h @@ -714,7 +714,7 @@ typedef struct _jl_module_t { jl_sym_t *file; int32_t line; // hidden fields: - arraylist_t usings; // modules with all bindings potentially imported + arraylist_t usings; /* arraylist of struct jl_module_using */ // modules with all bindings potentially imported jl_uuid_t build_id; jl_uuid_t uuid; _Atomic(uint32_t) counter; @@ -728,6 +728,12 @@ typedef struct _jl_module_t { intptr_t hash; } jl_module_t; +struct _jl_module_using { + jl_module_t *mod; + size_t min_world; + size_t max_world; +}; + struct _jl_globalref_t { JL_DATA_TYPE jl_module_t *mod; diff --git a/src/julia_internal.h b/src/julia_internal.h index c09bfc5c3eb42..b2026c671553b 100644 --- a/src/julia_internal.h +++ b/src/julia_internal.h @@ -819,6 +819,28 @@ void jl_eval_global_expr(jl_module_t *m, jl_expr_t *ex, int set_type); JL_DLLEXPORT void jl_declare_global(jl_module_t *m, jl_value_t *arg, jl_value_t *set_type); JL_DLLEXPORT jl_value_t *jl_toplevel_eval_flex(jl_module_t *m, jl_value_t *e, int fast, int expanded, const char **toplevel_filename, int *toplevel_lineno); +STATIC_INLINE struct _jl_module_using *module_usings_getidx(jl_module_t *m JL_PROPAGATES_ROOT, size_t i) JL_NOTSAFEPOINT; +STATIC_INLINE jl_module_t *module_usings_getmod(jl_module_t *m JL_PROPAGATES_ROOT, size_t i) JL_NOTSAFEPOINT; + +#ifndef __clang_gcanalyzer__ +// The analyzer doesn't like looking through the arraylist, so just model the +// access for it using this function +STATIC_INLINE struct _jl_module_using *module_usings_getidx(jl_module_t *m JL_PROPAGATES_ROOT, size_t i) JL_NOTSAFEPOINT { + return (struct _jl_module_using *)&(m->usings.items[3*i]); +} +STATIC_INLINE jl_module_t *module_usings_getmod(jl_module_t *m JL_PROPAGATES_ROOT, size_t i) JL_NOTSAFEPOINT { + return module_usings_getidx(m, i)->mod; +} +#endif + +STATIC_INLINE size_t module_usings_length(jl_module_t *m) JL_NOTSAFEPOINT { + return m->usings.len/3; +} + +STATIC_INLINE size_t module_usings_max(jl_module_t *m) JL_NOTSAFEPOINT { + return m->usings.max/3; +} + jl_value_t *jl_eval_global_var(jl_module_t *m JL_PROPAGATES_ROOT, jl_sym_t *e); jl_value_t *jl_interpret_opaque_closure(jl_opaque_closure_t *clos, jl_value_t **args, size_t nargs); jl_value_t *jl_interpret_toplevel_thunk(jl_module_t *m, jl_code_info_t *src); diff --git a/src/module.c b/src/module.c index 36c35f50b44af..8dbac950235ee 100644 --- a/src/module.c +++ b/src/module.c @@ -373,16 +373,6 @@ JL_DLLEXPORT jl_binding_t *jl_get_binding_for_method_def(jl_module_t *m, jl_sym_ return b; } -static inline jl_module_t *module_usings_getidx(jl_module_t *m JL_PROPAGATES_ROOT, size_t i) JL_NOTSAFEPOINT; - -#ifndef __clang_gcanalyzer__ -// The analyzer doesn't like looking through the arraylist, so just model the -// access for it using this function -static inline jl_module_t *module_usings_getidx(jl_module_t *m JL_PROPAGATES_ROOT, size_t i) JL_NOTSAFEPOINT { - return (jl_module_t*)m->usings.items[i]; -} -#endif - static int eq_bindings(jl_binding_partition_t *owner, jl_binding_t *alias, size_t world) { jl_ptr_kind_union_t owner_pku = jl_atomic_load_relaxed(&owner->restriction); @@ -407,11 +397,11 @@ static jl_binding_t *using_resolve_binding(jl_module_t *m JL_PROPAGATES_ROOT, jl jl_binding_partition_t *bpart = NULL; jl_module_t *owner = NULL; JL_LOCK(&m->lock); - int i = (int)m->usings.len - 1; + int i = (int)module_usings_length(m) - 1; JL_UNLOCK(&m->lock); for (; i >= 0; --i) { JL_LOCK(&m->lock); - jl_module_t *imp = module_usings_getidx(m, i); + jl_module_t *imp = module_usings_getmod(m, i); JL_UNLOCK(&m->lock); jl_binding_t *tempb = jl_get_module_binding(imp, var, 0); if (tempb != NULL && tempb->exportp) { @@ -746,19 +736,24 @@ JL_DLLEXPORT void jl_module_use_as(jl_module_t *to, jl_module_t *from, jl_sym_t module_import_(to, from, asname, s, 0); } - JL_DLLEXPORT void jl_module_using(jl_module_t *to, jl_module_t *from) { if (to == from) return; JL_LOCK(&to->lock); - for (size_t i = 0; i < to->usings.len; i++) { - if (from == to->usings.items[i]) { + for (size_t i = 0; i < module_usings_length(to); i++) { + if (from == module_usings_getmod(to, i)) { JL_UNLOCK(&to->lock); return; } } - arraylist_push(&to->usings, from); + struct _jl_module_using new_item = { + .mod = from, + .min_world = 0, + .max_world = (size_t)-1 + }; + arraylist_grow(&to->usings, sizeof(struct _jl_module_using)/sizeof(void*)); + memcpy(&to->usings.items[to->usings.len-3], &new_item, sizeof(struct _jl_module_using)); jl_gc_wb(to, from); JL_UNLOCK(&to->lock); @@ -1096,12 +1091,12 @@ JL_DLLEXPORT jl_value_t *jl_checked_assignonce(jl_binding_t *b, jl_module_t *mod JL_DLLEXPORT jl_value_t *jl_module_usings(jl_module_t *m) { JL_LOCK(&m->lock); - int j = m->usings.len; + int j = module_usings_length(m); jl_array_t *a = jl_alloc_array_1d(jl_array_any_type, j); JL_GC_PUSH1(&a); for (int i = 0; j > 0; i++) { j--; - jl_module_t *imp = (jl_module_t*)m->usings.items[i]; + jl_module_t *imp = module_usings_getmod(m, i); jl_array_ptr_set(a, j, (jl_value_t*)imp); } JL_UNLOCK(&m->lock); // may gc @@ -1156,10 +1151,8 @@ JL_DLLEXPORT jl_value_t *jl_module_names(jl_module_t *m, int all, int imported, if (usings) { // If `usings` is specified, traverse the list of `using`-ed modules and incorporate // the names exported by those modules into the list. - for(int i=(int)m->usings.len-1; i >= 0; --i) { - jl_module_t *usinged = module_usings_getidx(m, i); - append_exported_names(a, usinged, all); - } + for (int i = module_usings_length(m)-1; i >= 0; i--) + append_exported_names(a, module_usings_getmod(m, i), all); } JL_GC_POP(); return (jl_value_t*)a; diff --git a/src/staticdata.c b/src/staticdata.c index 0a8cbe6db7c67..89122653758e5 100644 --- a/src/staticdata.c +++ b/src/staticdata.c @@ -811,8 +811,8 @@ static void jl_queue_module_for_serialization(jl_serializer_state *s, jl_module_ } } - for (size_t i = 0; i < m->usings.len; i++) { - jl_queue_for_serialization(s, (jl_value_t*)m->usings.items[i]); + for (size_t i = 0; i < module_usings_length(m); i++) { + jl_queue_for_serialization(s, module_usings_getmod(m, i)); } } @@ -1266,27 +1266,44 @@ static void jl_write_module(jl_serializer_state *s, uintptr_t item, jl_module_t // write out the usings list memset(&newm->usings._space, 0, sizeof(newm->usings._space)); if (m->usings.items == &m->usings._space[0]) { - newm->usings.items = (void**)offsetof(jl_module_t, usings._space); + // Push these relocations here, to keep them in order. This pairs with the `newm->usings.items = ` below. arraylist_push(&s->relocs_list, (void*)(reloc_offset + offsetof(jl_module_t, usings.items))); arraylist_push(&s->relocs_list, (void*)(((uintptr_t)DataRef << RELOC_TAG_OFFSET) + item)); size_t i; - for (i = 0; i < m->usings.len; i++) { - arraylist_push(&s->relocs_list, (void*)(reloc_offset + offsetof(jl_module_t, usings._space[i]))); - arraylist_push(&s->relocs_list, (void*)backref_id(s, m->usings._space[i], s->link_ids_relocs)); + for (i = 0; i < module_usings_length(m); i++) { + struct _jl_module_using *newm_data = module_usings_getidx(newm, i); + struct _jl_module_using *data = module_usings_getidx(m, i); + // TODO: Remove dead entries + newm_data->min_world = data->min_world; + newm_data->max_world = data->max_world; + if (s->incremental) { + if (data->max_world != (size_t)-1) + newm_data->max_world = 0; + newm_data->min_world = 0; + } + arraylist_push(&s->relocs_list, (void*)(reloc_offset + offsetof(jl_module_t, usings._space[3*i]))); + arraylist_push(&s->relocs_list, (void*)backref_id(s, data->mod, s->link_ids_relocs)); } + newm->usings.items = (void**)offsetof(jl_module_t, usings._space); } else { newm->usings.items = (void**)tot; arraylist_push(&s->relocs_list, (void*)(reloc_offset + offsetof(jl_module_t, usings.items))); arraylist_push(&s->relocs_list, (void*)(((uintptr_t)DataRef << RELOC_TAG_OFFSET) + item)); size_t i; - for (i = 0; i < m->usings.len; i++) { - write_pointerfield(s, (jl_value_t*)m->usings.items[i]); - tot += sizeof(void*); - } - for (; i < m->usings.max; i++) { + for (i = 0; i < module_usings_length(m); i++) { + struct _jl_module_using *data = module_usings_getidx(m, i); + write_pointerfield(s, (jl_value_t*)data->mod); + write_uint(s->s, data->min_world); + write_uint(s->s, data->max_world); + static_assert(sizeof(struct _jl_module_using) == 3*sizeof(void*), "_jl_module_using mismatch"); + tot += sizeof(struct _jl_module_using); + } + for (; i < module_usings_max(m); i++) { write_pointer(s->s); - tot += sizeof(void*); + write_uint(s->s, 0); + write_uint(s->s, 0); + tot += sizeof(struct _jl_module_using); } } assert(ios_pos(s->s) - reloc_offset == tot); diff --git a/test/clangsa/MissingRoots.c b/test/clangsa/MissingRoots.c index f402dc30eb33e..0a0d5369eba44 100644 --- a/test/clangsa/MissingRoots.c +++ b/test/clangsa/MissingRoots.c @@ -328,7 +328,7 @@ void scopes() { jl_module_t *propagation(jl_module_t *m JL_PROPAGATES_ROOT); void module_member(jl_module_t *m) { - for(int i=(int)m->usings.len-1; i >= 0; --i) { + for(int i=(int)m->usings.len-1; i >= 0; i -= 3) { jl_module_t *imp = propagation(m); jl_gc_safepoint(); look_at_value((jl_value_t*)imp); From 1f935afc07edde9f8c2e1a0f05d4772e18a55e97 Mon Sep 17 00:00:00 2001 From: Jameson Nash Date: Thu, 17 Oct 2024 15:50:28 -0400 Subject: [PATCH 239/537] [REPL] fix lock ordering mistake in load_pkg (#56215) Fixes #56206 --- base/loading.jl | 1 + stdlib/REPL/src/Pkg_beforeload.jl | 7 +++---- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/base/loading.jl b/base/loading.jl index b396c7897c1fd..49d0cb52cd37b 100644 --- a/base/loading.jl +++ b/base/loading.jl @@ -2093,6 +2093,7 @@ debug_loading_deadlocks::Bool = true # Enable a slightly more expensive, but mor function start_loading(modkey::PkgId, build_id::UInt128, stalecheck::Bool) # handle recursive and concurrent calls to require assert_havelock(require_lock) + require_lock.reentrancy_cnt == 1 || throw(ConcurrencyViolationError("recursive call to start_loading")) while true loaded = stalecheck ? maybe_root_module(modkey) : nothing loaded isa Module && return loaded diff --git a/stdlib/REPL/src/Pkg_beforeload.jl b/stdlib/REPL/src/Pkg_beforeload.jl index 472fbc924668d..e110910bafc2f 100644 --- a/stdlib/REPL/src/Pkg_beforeload.jl +++ b/stdlib/REPL/src/Pkg_beforeload.jl @@ -1,17 +1,16 @@ ## Pkg stuff needed before Pkg has loaded const Pkg_pkgid = Base.PkgId(Base.UUID("44cfe95a-1eb2-52ea-b672-e2afdf69b78f"), "Pkg") -const Pkg_REPLExt_pkgid = Base.PkgId(Base.UUID("ceef7b17-42e7-5b1c-81d4-4cc4a2494ccf"), "REPLExt") function load_pkg() + REPLExt = Base.require_stdlib(Pkg_pkgid, "REPLExt") @lock Base.require_lock begin - REPLExt = Base.require_stdlib(Pkg_pkgid, "REPLExt") # require_stdlib does not guarantee that the `__init__` of the package is done when loading is done async # but we need to wait for the repl mode to be set up - lock = get(Base.package_locks, Pkg_REPLExt_pkgid.uuid, nothing) + lock = get(Base.package_locks, Base.PkgId(REPLExt), nothing) lock !== nothing && wait(lock[2]) - return REPLExt end + return REPLExt end ## Below here copied/tweaked from Pkg Types.jl so that the dummy Pkg prompt From 1cf384222a8b70a1f9cafbefd56cc0940fbb66c1 Mon Sep 17 00:00:00 2001 From: Jameson Nash Date: Thu, 17 Oct 2024 17:16:25 -0400 Subject: [PATCH 240/537] REPL: fix unsafe_write return type (#56220) Fixes: #56219 I am not really sure why we have a test for this, but we need to make the test happy --- stdlib/REPL/src/REPL.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stdlib/REPL/src/REPL.jl b/stdlib/REPL/src/REPL.jl index 88458f7de4666..b8f850c3e9ff9 100644 --- a/stdlib/REPL/src/REPL.jl +++ b/stdlib/REPL/src/REPL.jl @@ -525,7 +525,7 @@ function Base.unsafe_write(limiter::LimitIO, p::Ptr{UInt8}, nb::UInt) end # We won't hit the limit so we'll write the full `nb` bytes - bytes_written = Base.unsafe_write(limiter.io, p, nb) + bytes_written = Base.unsafe_write(limiter.io, p, nb)::Union{Int,UInt} limiter.n += bytes_written return bytes_written end From f1990e2e3d8d31087b23288f640ba16909f7ec7b Mon Sep 17 00:00:00 2001 From: Jishnu Bhattacharya Date: Fri, 18 Oct 2024 02:49:19 +0530 Subject: [PATCH 241/537] Fix triu/tril for partly initialized matrices (#55312) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This fixes ```julia julia> using LinearAlgebra, StaticArrays julia> M = Matrix{BigInt}(undef, 2, 2); M[1,1] = M[2,2] = M[1,2] = 3; julia> S = SizedMatrix{2,2}(M) 2×2 SizedMatrix{2, 2, BigInt, 2, Matrix{BigInt}} with indices SOneTo(2)×SOneTo(2): 3 3 #undef 3 julia> triu(S) ERROR: UndefRefError: access to undefined reference Stacktrace: [1] getindex @ ./essentials.jl:907 [inlined] [2] getindex @ ~/.julia/packages/StaticArrays/MSJcA/src/SizedArray.jl:92 [inlined] [3] copyto_unaliased! @ ./abstractarray.jl:1086 [inlined] [4] copyto!(dest::SizedMatrix{2, 2, BigInt, 2, Matrix{BigInt}}, src::SizedMatrix{2, 2, BigInt, 2, Matrix{BigInt}}) @ Base ./abstractarray.jl:1066 [5] copymutable @ ./abstractarray.jl:1200 [inlined] [6] triu(M::SizedMatrix{2, 2, BigInt, 2, Matrix{BigInt}}) @ LinearAlgebra ~/.julia/juliaup/julia-nightly/share/julia/stdlib/v1.12/LinearAlgebra/src/generic.jl:413 [7] top-level scope @ REPL[11]:1 ``` After this PR: ```julia julia> triu(S) 2×2 SizedMatrix{2, 2, BigInt, 2, Matrix{BigInt}} with indices SOneTo(2)×SOneTo(2): 3 3 0 3 ``` Only the indices that need to be copied are accessed, and the others are written to without being read. --- stdlib/LinearAlgebra/src/generic.jl | 80 ++++++++++------------------ stdlib/LinearAlgebra/test/generic.jl | 55 +++++++++++++++++++ 2 files changed, 83 insertions(+), 52 deletions(-) diff --git a/stdlib/LinearAlgebra/src/generic.jl b/stdlib/LinearAlgebra/src/generic.jl index e5f23b4981616..6c65c49add74b 100644 --- a/stdlib/LinearAlgebra/src/generic.jl +++ b/stdlib/LinearAlgebra/src/generic.jl @@ -389,55 +389,7 @@ function cross(a::AbstractVector, b::AbstractVector) end """ - triu(M) - -Upper triangle of a matrix. - -# Examples -```jldoctest -julia> a = fill(1.0, (4,4)) -4×4 Matrix{Float64}: - 1.0 1.0 1.0 1.0 - 1.0 1.0 1.0 1.0 - 1.0 1.0 1.0 1.0 - 1.0 1.0 1.0 1.0 - -julia> triu(a) -4×4 Matrix{Float64}: - 1.0 1.0 1.0 1.0 - 0.0 1.0 1.0 1.0 - 0.0 0.0 1.0 1.0 - 0.0 0.0 0.0 1.0 -``` -""" -triu(M::AbstractMatrix) = triu!(copymutable(M)) - -""" - tril(M) - -Lower triangle of a matrix. - -# Examples -```jldoctest -julia> a = fill(1.0, (4,4)) -4×4 Matrix{Float64}: - 1.0 1.0 1.0 1.0 - 1.0 1.0 1.0 1.0 - 1.0 1.0 1.0 1.0 - 1.0 1.0 1.0 1.0 - -julia> tril(a) -4×4 Matrix{Float64}: - 1.0 0.0 0.0 0.0 - 1.0 1.0 0.0 0.0 - 1.0 1.0 1.0 0.0 - 1.0 1.0 1.0 1.0 -``` -""" -tril(M::AbstractMatrix) = tril!(copymutable(M)) - -""" - triu(M, k::Integer) + triu(M, k::Integer = 0) Return the upper triangle of `M` starting from the `k`th superdiagonal. @@ -465,10 +417,22 @@ julia> triu(a,-3) 1.0 1.0 1.0 1.0 ``` """ -triu(M::AbstractMatrix,k::Integer) = triu!(copymutable(M),k) +function triu(M::AbstractMatrix, k::Integer = 0) + d = similar(M) + A = triu!(d,k) + if iszero(k) + copytrito!(A, M, 'U') + else + for col in axes(A,2) + rows = firstindex(A,1):min(col-k, lastindex(A,1)) + A[rows, col] = @view M[rows, col] + end + end + return A +end """ - tril(M, k::Integer) + tril(M, k::Integer = 0) Return the lower triangle of `M` starting from the `k`th superdiagonal. @@ -496,7 +460,19 @@ julia> tril(a,-3) 1.0 0.0 0.0 0.0 ``` """ -tril(M::AbstractMatrix,k::Integer) = tril!(copymutable(M),k) +function tril(M::AbstractMatrix,k::Integer=0) + d = similar(M) + A = tril!(d,k) + if iszero(k) + copytrito!(A, M, 'L') + else + for col in axes(A,2) + rows = max(firstindex(A,1),col-k):lastindex(A,1) + A[rows, col] = @view M[rows, col] + end + end + return A +end """ triu!(M) diff --git a/stdlib/LinearAlgebra/test/generic.jl b/stdlib/LinearAlgebra/test/generic.jl index e0a1704913f78..2bf9c75141700 100644 --- a/stdlib/LinearAlgebra/test/generic.jl +++ b/stdlib/LinearAlgebra/test/generic.jl @@ -18,6 +18,9 @@ using .Main.DualNumbers isdefined(Main, :FillArrays) || @eval Main include(joinpath($(BASE_TEST_PATH), "testhelpers", "FillArrays.jl")) using .Main.FillArrays +isdefined(Main, :SizedArrays) || @eval Main include(joinpath($(BASE_TEST_PATH), "testhelpers", "SizedArrays.jl")) +using .Main.SizedArrays + Random.seed!(123) n = 5 # should be odd @@ -725,4 +728,56 @@ end @test det(A) == det(M) end +@testset "tril/triu" begin + @testset "with partly initialized matrices" begin + function test_triu(M, k=nothing) + M[1,1] = M[2,2] = M[1,2] = M[1,3] = M[2,3] = 3 + if isnothing(k) + MU = triu(M) + else + MU = triu(M, k) + end + @test iszero(MU[2,1]) + @test MU[1,1] == MU[2,2] == MU[1,2] == MU[1,3] == MU[2,3] == 3 + end + test_triu(Matrix{BigInt}(undef, 2, 3)) + test_triu(Matrix{BigInt}(undef, 2, 3), 0) + test_triu(SizedArrays.SizedArray{(2,3)}(Matrix{BigInt}(undef, 2, 3))) + test_triu(SizedArrays.SizedArray{(2,3)}(Matrix{BigInt}(undef, 2, 3)), 0) + + function test_tril(M, k=nothing) + M[1,1] = M[2,2] = M[2,1] = 3 + if isnothing(k) + ML = tril(M) + else + ML = tril(M, k) + end + @test ML[1,2] == ML[1,3] == ML[2,3] == 0 + @test ML[1,1] == ML[2,2] == ML[2,1] == 3 + end + test_tril(Matrix{BigInt}(undef, 2, 3)) + test_tril(Matrix{BigInt}(undef, 2, 3), 0) + test_tril(SizedArrays.SizedArray{(2,3)}(Matrix{BigInt}(undef, 2, 3))) + test_tril(SizedArrays.SizedArray{(2,3)}(Matrix{BigInt}(undef, 2, 3)), 0) + end + + @testset "block arrays" begin + for nrows in 0:3, ncols in 0:3 + M = [randn(2,2) for _ in 1:nrows, _ in 1:ncols] + Mu = triu(M) + for col in axes(M,2) + rowcutoff = min(col, size(M,1)) + @test @views Mu[1:rowcutoff, col] == M[1:rowcutoff, col] + @test @views Mu[rowcutoff+1:end, col] == zero.(M[rowcutoff+1:end, col]) + end + Ml = tril(M) + for col in axes(M,2) + @test @views Ml[col:end, col] == M[col:end, col] + rowcutoff = min(col-1, size(M,1)) + @test @views Ml[1:rowcutoff, col] == zero.(M[1:rowcutoff, col]) + end + end + end +end + end # module TestGeneric From e33c6a8551e070e7936a2ac95180a6c834f56549 Mon Sep 17 00:00:00 2001 From: Jishnu Bhattacharya Date: Fri, 18 Oct 2024 17:04:44 +0530 Subject: [PATCH 242/537] Specialize adding/subtracting mixed Upper/LowerTriangular (#56149) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fixes https://github.com/JuliaLang/julia/issues/56134 After this, ```julia julia> using LinearAlgebra julia> A = hermitianpart(rand(4, 4)) 4×4 Hermitian{Float64, Matrix{Float64}}: 0.387617 0.277226 0.67629 0.60678 0.277226 0.894101 0.388416 0.489141 0.67629 0.388416 0.100907 0.619955 0.60678 0.489141 0.619955 0.452605 julia> B = UpperTriangular(A) 4×4 UpperTriangular{Float64, Hermitian{Float64, Matrix{Float64}}}: 0.387617 0.277226 0.67629 0.60678 ⋅ 0.894101 0.388416 0.489141 ⋅ ⋅ 0.100907 0.619955 ⋅ ⋅ ⋅ 0.452605 julia> B - B' 4×4 Matrix{Float64}: 0.0 0.277226 0.67629 0.60678 -0.277226 0.0 0.388416 0.489141 -0.67629 -0.388416 0.0 0.619955 -0.60678 -0.489141 -0.619955 0.0 ``` This preserves the band structure of the parent, if any: ```julia julia> U = UpperTriangular(Diagonal(ones(4))) 4×4 UpperTriangular{Float64, Diagonal{Float64, Vector{Float64}}}: 1.0 0.0 0.0 0.0 ⋅ 1.0 0.0 0.0 ⋅ ⋅ 1.0 0.0 ⋅ ⋅ ⋅ 1.0 julia> U - U' 4×4 Diagonal{Float64, Vector{Float64}}: 0.0 ⋅ ⋅ ⋅ ⋅ 0.0 ⋅ ⋅ ⋅ ⋅ 0.0 ⋅ ⋅ ⋅ ⋅ 0.0 ``` This doesn't fully work with partly initialized matrices, and would need https://github.com/JuliaLang/julia/pull/55312 for that. The abstract triangular methods now construct matrices using `similar(parent(U), size(U))` so that the destinations are fully mutable. ```julia julia> @invoke B::LinearAlgebra.AbstractTriangular - B'::LinearAlgebra.AbstractTriangular 4×4 Matrix{Float64}: 0.0 0.277226 0.67629 0.60678 -0.277226 0.0 0.388416 0.489141 -0.67629 -0.388416 0.0 0.619955 -0.60678 -0.489141 -0.619955 0.0 ``` --------- Co-authored-by: Daniel Karrasch --- stdlib/LinearAlgebra/src/triangular.jl | 19 +++++++++-- stdlib/LinearAlgebra/test/triangular.jl | 43 +++++++++++++++++++++++++ 2 files changed, 60 insertions(+), 2 deletions(-) diff --git a/stdlib/LinearAlgebra/src/triangular.jl b/stdlib/LinearAlgebra/src/triangular.jl index 71660bc5ca28c..83ef221329d33 100644 --- a/stdlib/LinearAlgebra/src/triangular.jl +++ b/stdlib/LinearAlgebra/src/triangular.jl @@ -142,6 +142,7 @@ UnitUpperTriangular const UpperOrUnitUpperTriangular{T,S} = Union{UpperTriangular{T,S}, UnitUpperTriangular{T,S}} const LowerOrUnitLowerTriangular{T,S} = Union{LowerTriangular{T,S}, UnitLowerTriangular{T,S}} const UpperOrLowerTriangular{T,S} = Union{UpperOrUnitUpperTriangular{T,S}, LowerOrUnitLowerTriangular{T,S}} +const UnitUpperOrUnitLowerTriangular{T,S} = Union{UnitUpperTriangular{T,S}, UnitLowerTriangular{T,S}} uppertriangular(M) = UpperTriangular(M) lowertriangular(M) = LowerTriangular(M) @@ -181,6 +182,16 @@ copy(A::UpperOrLowerTriangular{<:Any, <:StridedMaybeAdjOrTransMat}) = copyto!(si # then handle all methods that requires specific handling of upper/lower and unit diagonal +function full(A::Union{UpperTriangular,LowerTriangular}) + return _triangularize(A)(parent(A)) +end +function full(A::UnitUpperOrUnitLowerTriangular) + isupper = A isa UnitUpperTriangular + Ap = _triangularize(A)(parent(A), isupper ? 1 : -1) + Ap[diagind(Ap, IndexStyle(Ap))] = @view A[diagind(A, IndexStyle(A))] + return Ap +end + function full!(A::LowerTriangular) B = A.data tril!(B) @@ -571,6 +582,8 @@ end return A end +_triangularize(::UpperOrUnitUpperTriangular) = triu +_triangularize(::LowerOrUnitLowerTriangular) = tril _triangularize!(::UpperOrUnitUpperTriangular) = triu! _triangularize!(::LowerOrUnitLowerTriangular) = tril! @@ -880,7 +893,8 @@ function +(A::UnitLowerTriangular, B::UnitLowerTriangular) (parent(A) isa StridedMatrix || parent(B) isa StridedMatrix) && return A .+ B LowerTriangular(tril(A.data, -1) + tril(B.data, -1) + 2I) end -+(A::AbstractTriangular, B::AbstractTriangular) = copyto!(similar(parent(A)), A) + copyto!(similar(parent(B)), B) ++(A::UpperOrLowerTriangular, B::UpperOrLowerTriangular) = full(A) + full(B) ++(A::AbstractTriangular, B::AbstractTriangular) = copyto!(similar(parent(A), size(A)), A) + copyto!(similar(parent(B), size(B)), B) function -(A::UpperTriangular, B::UpperTriangular) (parent(A) isa StridedMatrix || parent(B) isa StridedMatrix) && return A .- B @@ -914,7 +928,8 @@ function -(A::UnitLowerTriangular, B::UnitLowerTriangular) (parent(A) isa StridedMatrix || parent(B) isa StridedMatrix) && return A .- B LowerTriangular(tril(A.data, -1) - tril(B.data, -1)) end --(A::AbstractTriangular, B::AbstractTriangular) = copyto!(similar(parent(A)), A) - copyto!(similar(parent(B)), B) +-(A::UpperOrLowerTriangular, B::UpperOrLowerTriangular) = full(A) - full(B) +-(A::AbstractTriangular, B::AbstractTriangular) = copyto!(similar(parent(A), size(A)), A) - copyto!(similar(parent(B), size(B)), B) function kron(A::UpperTriangular{T,<:StridedMaybeAdjOrTransMat}, B::UpperTriangular{S,<:StridedMaybeAdjOrTransMat}) where {T,S} C = UpperTriangular(Matrix{promote_op(*, T, S)}(undef, _kronsize(A, B))) diff --git a/stdlib/LinearAlgebra/test/triangular.jl b/stdlib/LinearAlgebra/test/triangular.jl index ec9a3079e2643..7acb3cbfc0c57 100644 --- a/stdlib/LinearAlgebra/test/triangular.jl +++ b/stdlib/LinearAlgebra/test/triangular.jl @@ -1322,4 +1322,47 @@ end end end +@testset "addition/subtraction of mixed triangular" begin + for A in (Hermitian(rand(4, 4)), Diagonal(rand(5))) + for T in (UpperTriangular, LowerTriangular, + UnitUpperTriangular, UnitLowerTriangular) + B = T(A) + M = Matrix(B) + R = B - B' + if A isa Diagonal + @test R isa Diagonal + end + @test R == M - M' + R = B + B' + if A isa Diagonal + @test R isa Diagonal + end + @test R == M + M' + C = MyTriangular(B) + @test C - C' == M - M' + @test C + C' == M + M' + end + end + @testset "unfilled parent" begin + @testset for T in (UpperTriangular, LowerTriangular, + UnitUpperTriangular, UnitLowerTriangular) + F = Matrix{BigFloat}(undef, 2, 2) + B = T(F) + isupper = B isa Union{UpperTriangular, UnitUpperTriangular} + B[1+!isupper, 1+isupper] = 2 + if !(B isa Union{UnitUpperTriangular, UnitLowerTriangular}) + B[1,1] = B[2,2] = 3 + end + M = Matrix(B) + @test B - B' == M - M' + @test B + B' == M + M' + @test B - copy(B') == M - M' + @test B + copy(B') == M + M' + C = MyTriangular(B) + @test C - C' == M - M' + @test C + C' == M + M' + end + end +end + end # module TestTriangular From 6317e02035e250ad071275da1caa3240ca6a7390 Mon Sep 17 00:00:00 2001 From: Fredrik Ekre Date: Fri, 18 Oct 2024 16:20:25 +0200 Subject: [PATCH 243/537] juliac: remove call to jl_set_newly_inferred (#56222) Moved in #56186 --- contrib/juliac-buildscript.jl | 1 - src/precompile_utils.c | 10 ++++++---- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/contrib/juliac-buildscript.jl b/contrib/juliac-buildscript.jl index 50f96198c416b..490bca86e1cba 100644 --- a/contrib/juliac-buildscript.jl +++ b/contrib/juliac-buildscript.jl @@ -17,7 +17,6 @@ task.rngState3 = 0x3a77f7189200c20b task.rngState4 = 0x5502376d099035ae uuid_tuple = (UInt64(0), UInt64(0)) ccall(:jl_set_module_uuid, Cvoid, (Any, NTuple{2, UInt64}), Base.__toplevel__, uuid_tuple) -ccall(:jl_set_newly_inferred, Cvoid, (Any,), Core.Compiler.newly_inferred) # Patch methods in Core and Base diff --git a/src/precompile_utils.c b/src/precompile_utils.c index a78d1e66dbb51..fc361d8b88e6f 100644 --- a/src/precompile_utils.c +++ b/src/precompile_utils.c @@ -312,10 +312,12 @@ static void *jl_precompile_worklist(jl_array_t *worklist, jl_array_t *extext_met } } } - n = jl_array_nrows(new_ext_cis); - for (i = 0; i < n; i++) { - jl_code_instance_t *ci = (jl_code_instance_t*)jl_array_ptr_ref(new_ext_cis, i); - precompile_enq_specialization_(ci->def, m); + if (new_ext_cis) { + n = jl_array_nrows(new_ext_cis); + for (i = 0; i < n; i++) { + jl_code_instance_t *ci = (jl_code_instance_t*)jl_array_ptr_ref(new_ext_cis, i); + precompile_enq_specialization_(ci->def, m); + } } void *native_code = jl_precompile_(m, 1); JL_GC_POP(); From a64ffa308f04a5bb35dda785caaff85d52296bad Mon Sep 17 00:00:00 2001 From: Cody Tapscott <84105208+topolarity@users.noreply.github.com> Date: Fri, 18 Oct 2024 10:24:36 -0400 Subject: [PATCH 244/537] Fix `goto` insertion when dom-sorting IR in `slot2ssa` pass (#56189) Fix-up this pass a bit to correctly handle fall-through terminators that cannot have their BasicBlock extended (e.g. `Expr(:leave, ...)`) --- base/compiler/ssair/slot2ssa.jl | 75 ++++++++++++++++++++------------- test/compiler/irpasses.jl | 29 +++++++++++++ 2 files changed, 75 insertions(+), 29 deletions(-) diff --git a/base/compiler/ssair/slot2ssa.jl b/base/compiler/ssair/slot2ssa.jl index e70633ffecf6a..2eacdf0f56cfe 100644 --- a/base/compiler/ssair/slot2ssa.jl +++ b/base/compiler/ssair/slot2ssa.jl @@ -339,43 +339,58 @@ RPO traversal and in particular, any use of an SSA value must come after (by linear order) its definition. """ function domsort_ssa!(ir::IRCode, domtree::DomTree) - # First compute the new order of basic blocks + # Mapping from new → old BB index + # An "old" index of 0 means that this was a BB inserted as part of a fixup (see below) result_order = Int[] - stack = Int[] + + # Mapping from old → new BB index bb_rename = fill(-1, length(ir.cfg.blocks)) - node = 1 - ncritbreaks = 0 - nnewfallthroughs = 0 - while node !== -1 - push!(result_order, node) - bb_rename[node] = length(result_order) - cs = domtree.nodes[node].children - terminator = ir[SSAValue(last(ir.cfg.blocks[node].stmts))][:stmt] - next_node = node + 1 - node = -1 + + # The number of GotoNodes we need to insert to preserve control-flow after sorting + nfixupstmts = 0 + + # node queued up for scheduling (-1 === nothing) + node_to_schedule = 1 + worklist = Int[] + while node_to_schedule !== -1 + # First assign a new BB index to `node_to_schedule` + push!(result_order, node_to_schedule) + bb_rename[node_to_schedule] = length(result_order) + cs = domtree.nodes[node_to_schedule].children + terminator = ir[SSAValue(last(ir.cfg.blocks[node_to_schedule].stmts))][:stmt] + fallthrough = node_to_schedule + 1 + node_to_schedule = -1 + # Adding the nodes in reverse sorted order attempts to retain # the original source order of the nodes as much as possible. # This is not required for correctness, but is easier on the humans - for child in Iterators.Reverse(cs) - if child == next_node + for node in Iterators.Reverse(cs) + if node == fallthrough # Schedule the fall through node first, # so we can retain the fall through - node = next_node + node_to_schedule = node else - push!(stack, child) + push!(worklist, node) end end - if node == -1 && !isempty(stack) - node = pop!(stack) + if node_to_schedule == -1 && !isempty(worklist) + node_to_schedule = pop!(worklist) end - if node != next_node && !isa(terminator, Union{GotoNode, ReturnNode}) + # If a fallthrough successor is no longer the fallthrough after sorting, we need to + # add a GotoNode (and either extend or split the basic block as necessary) + if node_to_schedule != fallthrough && !isa(terminator, Union{GotoNode, ReturnNode}) if isa(terminator, GotoIfNot) # Need to break the critical edge - ncritbreaks += 1 + push!(result_order, 0) + elseif isa(terminator, EnterNode) || isexpr(terminator, :leave) + # Cannot extend the BasicBlock with a goto, have to split it push!(result_order, 0) else - nnewfallthroughs += 1 + # No need for a new block, just extend + @assert !isterminator(terminator) end + # Reserve space for the fixup goto + nfixupstmts += 1 end end new_bbs = Vector{BasicBlock}(undef, length(result_order)) @@ -385,7 +400,7 @@ function domsort_ssa!(ir::IRCode, domtree::DomTree) nstmts += length(ir.cfg.blocks[i].stmts) end end - result = InstructionStream(nstmts + ncritbreaks + nnewfallthroughs) + result = InstructionStream(nstmts + nfixupstmts) inst_rename = Vector{SSAValue}(undef, length(ir.stmts) + length(ir.new_nodes)) @inbounds for i = 1:length(ir.stmts) inst_rename[i] = SSAValue(-1) @@ -394,7 +409,6 @@ function domsort_ssa!(ir::IRCode, domtree::DomTree) inst_rename[i + length(ir.stmts)] = SSAValue(i + length(result)) end bb_start_off = 0 - crit_edge_breaks_fixup = Tuple{Int, Int}[] for (new_bb, bb) in pairs(result_order) if bb == 0 nidx = bb_start_off + 1 @@ -426,8 +440,8 @@ function domsort_ssa!(ir::IRCode, domtree::DomTree) else result[inst_range[end]][:stmt] = GotoNode(bb_rename[terminator.label]) end - elseif isa(terminator, GotoIfNot) - # Check if we need to break the critical edge + elseif isa(terminator, GotoIfNot) || isa(terminator, EnterNode) || isexpr(terminator, :leave) + # Check if we need to break the critical edge or split the block if bb_rename[bb + 1] != new_bb + 1 @assert result_order[new_bb + 1] == 0 # Add an explicit goto node in the next basic block (we accounted for this above) @@ -435,11 +449,14 @@ function domsort_ssa!(ir::IRCode, domtree::DomTree) node = result[nidx] node[:stmt], node[:type], node[:line] = GotoNode(bb_rename[bb + 1]), Any, NoLineUpdate end - result[inst_range[end]][:stmt] = GotoIfNot(terminator.cond, bb_rename[terminator.dest]) - elseif !isa(terminator, ReturnNode) - if isa(terminator, EnterNode) + if isa(terminator, GotoIfNot) + result[inst_range[end]][:stmt] = GotoIfNot(terminator.cond, bb_rename[terminator.dest]) + elseif isa(terminator, EnterNode) result[inst_range[end]][:stmt] = EnterNode(terminator, terminator.catch_dest == 0 ? 0 : bb_rename[terminator.catch_dest]) + else + @assert isexpr(terminator, :leave) end + elseif !isa(terminator, ReturnNode) if bb_rename[bb + 1] != new_bb + 1 # Add an explicit goto node nidx = inst_range[end] + 1 @@ -452,7 +469,7 @@ function domsort_ssa!(ir::IRCode, domtree::DomTree) local new_preds, new_succs let bb = bb, bb_rename = bb_rename, result_order = result_order new_preds = Int[bb for bb in (rename_incoming_edge(i, bb, result_order, bb_rename) for i in ir.cfg.blocks[bb].preds) if bb != -1] - new_succs = Int[ rename_outgoing_edge(i, bb, result_order, bb_rename) for i in ir.cfg.blocks[bb].succs] + new_succs = Int[ rename_outgoing_edge(i, bb, result_order, bb_rename) for i in ir.cfg.blocks[bb].succs] end new_bbs[new_bb] = BasicBlock(inst_range, new_preds, new_succs) end diff --git a/test/compiler/irpasses.jl b/test/compiler/irpasses.jl index 740ac5f4958e4..13ef05db2f23a 100644 --- a/test/compiler/irpasses.jl +++ b/test/compiler/irpasses.jl @@ -1967,3 +1967,32 @@ let f = (x)->nothing, mi = Base.method_instance(f, (Base.RefValue{Nothing},)), c ir = Core.Compiler.sroa_pass!(ir, inlining) Core.Compiler.verify_ir(ir) end + +let code = Any[ + # block 1 + GotoNode(4), # skip + # block 2 + Expr(:leave, SSAValue(1)), # not domsorted - make sure we move it correctly + # block 3 + ReturnNode(2), + # block 4 + EnterNode(7), + # block 5 + GotoIfNot(Argument(1), 2), + # block 6 + Expr(:leave, SSAValue(1)), + # block 7 + ReturnNode(1), + # block 8 + ReturnNode(nothing), + ] + ir = make_ircode(code; ssavaluetypes=Any[Any, Any, Union{}, Any, Any, Any, Union{}, Union{}]) + @test length(ir.cfg.blocks) == 8 + Core.Compiler.verify_ir(ir) + + # The IR should remain valid after domsorting + # (esp. including the insertion of new BasicBlocks for any fix-ups) + domtree = Core.Compiler.construct_domtree(ir) + ir = Core.Compiler.domsort_ssa!(ir, domtree) + Core.Compiler.verify_ir(ir) +end From ca3713e7ac8489acd1afe0a47dc8ceeaafb4a292 Mon Sep 17 00:00:00 2001 From: Neven Sajko Date: Fri, 18 Oct 2024 16:29:42 +0200 Subject: [PATCH 245/537] fix infinite recursion in `promote_type` for `Irrational` (#55870) Fixes #51001 --- base/irrationals.jl | 11 ++++++++++- test/numbers.jl | 8 ++++++++ 2 files changed, 18 insertions(+), 1 deletion(-) diff --git a/base/irrationals.jl b/base/irrationals.jl index c51b66045723f..76222997865c0 100644 --- a/base/irrationals.jl +++ b/base/irrationals.jl @@ -45,7 +45,16 @@ promote_rule(::Type{<:AbstractIrrational}, ::Type{Float16}) = Float16 promote_rule(::Type{<:AbstractIrrational}, ::Type{Float32}) = Float32 promote_rule(::Type{<:AbstractIrrational}, ::Type{<:AbstractIrrational}) = Float64 promote_rule(::Type{<:AbstractIrrational}, ::Type{T}) where {T<:Real} = promote_type(Float64, T) -promote_rule(::Type{S}, ::Type{T}) where {S<:AbstractIrrational,T<:Number} = promote_type(promote_type(S, real(T)), T) + +function promote_rule(::Type{S}, ::Type{T}) where {S<:AbstractIrrational,T<:Number} + U = promote_type(S, real(T)) + if S <: U + # prevent infinite recursion + promote_type(Float64, T) + else + promote_type(U, T) + end +end AbstractFloat(x::AbstractIrrational) = Float64(x)::Float64 Float16(x::AbstractIrrational) = Float16(Float32(x)::Float32) diff --git a/test/numbers.jl b/test/numbers.jl index fc3dc2c06bb7c..dc4f2cb613d77 100644 --- a/test/numbers.jl +++ b/test/numbers.jl @@ -2937,6 +2937,14 @@ end @test log(π,ComplexF32(2)) isa ComplexF32 end +@testset "irrational promotion shouldn't recurse without bound, issue #51001" begin + for s ∈ (:π, :ℯ) + T = Irrational{s} + @test promote_type(Complex{T}, T) <: Complex + @test promote_type(T, Complex{T}) <: Complex + end +end + @testset "printing non finite floats" begin let float_types = Set() allsubtypes!(Base, AbstractFloat, float_types) From e5aff12f63f7a2141f1f4a3eb69ff049618d6fbc Mon Sep 17 00:00:00 2001 From: Gabriel Baraldi Date: Fri, 18 Oct 2024 12:01:45 -0300 Subject: [PATCH 246/537] codegen: replace store of freeze in allocop and in emit new struct with memset since aggregate stores are bad (#55879) This fixes the issues found in slack in the reinterprets of ```julia julia> split128_v2(x::UInt128) = (first(reinterpret(NTuple{2, UInt}, x)), last(reinterpret(NTuple{2, UInt}, x))) split128_v2 (generic function with 1 method) julia> split128(x::UInt128) = reinterpret(NTuple{2, UInt}, x) split128 (generic function with 1 method) @code_native split128(UInt128(5)) push rbp mov rbp, rsp mov rax, rdi mov qword ptr [rdi + 8], rdx mov qword ptr [rdi], rsi pop rbp ret @code_native split128_v2(UInt128(5)) push rbp mov rbp, rsp mov rax, rdi mov qword ptr [rdi], rsi mov qword ptr [rdi + 8], rdx pop rbp ret ``` vs on master where ```julia julia> @code_native split128(UInt128(5)) push rbp mov rbp, rsp mov eax, esi shr eax, 8 mov ecx, esi shr ecx, 16 mov r8, rsi mov r9, rsi vmovd xmm0, esi vpinsrb xmm0, xmm0, eax, 1 mov rax, rsi vpinsrb xmm0, xmm0, ecx, 2 mov rcx, rsi shr esi, 24 vpinsrb xmm0, xmm0, esi, 3 shr r8, 32 vpinsrb xmm0, xmm0, r8d, 4 shr r9, 40 vpinsrb xmm0, xmm0, r9d, 5 shr rax, 48 vpinsrb xmm0, xmm0, eax, 6 shr rcx, 56 vpinsrb xmm0, xmm0, ecx, 7 vpinsrb xmm0, xmm0, edx, 8 mov eax, edx shr eax, 8 vpinsrb xmm0, xmm0, eax, 9 mov eax, edx shr eax, 16 vpinsrb xmm0, xmm0, eax, 10 mov eax, edx shr eax, 24 vpinsrb xmm0, xmm0, eax, 11 mov rax, rdx shr rax, 32 vpinsrb xmm0, xmm0, eax, 12 mov rax, rdx shr rax, 40 vpinsrb xmm0, xmm0, eax, 13 mov rax, rdx shr rax, 48 vpinsrb xmm0, xmm0, eax, 14 mov rax, rdi shr rdx, 56 vpinsrb xmm0, xmm0, edx, 15 vmovdqu xmmword ptr [rdi], xmm0 pop rbp ret ``` --- src/cgutils.cpp | 31 ++++++++++++------------------- src/llvm-alloc-opt.cpp | 11 +++-------- test/llvmpasses/alloc-opt-pass.ll | 11 ++++------- 3 files changed, 19 insertions(+), 34 deletions(-) diff --git a/src/cgutils.cpp b/src/cgutils.cpp index 4547e693755cd..a166b0a2c4800 100644 --- a/src/cgutils.cpp +++ b/src/cgutils.cpp @@ -4213,7 +4213,7 @@ static jl_cgval_t emit_new_struct(jl_codectx_t &ctx, jl_value_t *ty, size_t narg else { strct = UndefValue::get(lt); if (nargs < nf) - strct = ctx.builder.CreateFreeze(strct); + strct = ctx.builder.CreateFreeze(strct); // Change this to zero initialize instead? } } else if (tracked.second) { @@ -4380,25 +4380,18 @@ static jl_cgval_t emit_new_struct(jl_codectx_t &ctx, jl_value_t *ty, size_t narg ctx.builder.restoreIP(savedIP); } } - for (size_t i = nargs; i < nf; i++) { - if (!jl_field_isptr(sty, i) && jl_is_uniontype(jl_field_type(sty, i))) { - ssize_t offs = jl_field_offset(sty, i); - ssize_t ptrsoffs = -1; - if (!inline_roots.empty()) - std::tie(offs, ptrsoffs) = split_value_field(sty, i); - assert(ptrsoffs < 0 && offs >= 0); - int fsz = jl_field_size(sty, i) - 1; - if (init_as_value) { + if (init_as_value) { + for (size_t i = nargs; i < nf; i++) { + if (!jl_field_isptr(sty, i) && jl_is_uniontype(jl_field_type(sty, i))) { + ssize_t offs = jl_field_offset(sty, i); + ssize_t ptrsoffs = -1; + if (!inline_roots.empty()) + std::tie(offs, ptrsoffs) = split_value_field(sty, i); + assert(ptrsoffs < 0 && offs >= 0); + int fsz = jl_field_size(sty, i) - 1; unsigned llvm_idx = convert_struct_offset(ctx, cast(lt), offs + fsz); strct = ctx.builder.CreateInsertValue(strct, ConstantInt::get(getInt8Ty(ctx.builder.getContext()), 0), ArrayRef(llvm_idx)); } - else { - jl_aliasinfo_t ai = jl_aliasinfo_t::fromTBAA(ctx, ctx.tbaa().tbaa_unionselbyte); - Instruction *dest = cast(emit_ptrgep(ctx, strct, offs + fsz)); - if (promotion_point == nullptr) - promotion_point = dest; - ai.decorateInst(ctx.builder.CreateAlignedStore(ctx.builder.getInt8(0), dest, Align(1))); - } } } if (nargs < nf) { @@ -4407,9 +4400,9 @@ static jl_cgval_t emit_new_struct(jl_codectx_t &ctx, jl_value_t *ty, size_t narg if (promotion_point) ctx.builder.SetInsertPoint(promotion_point); if (strct) { - promotion_point = cast(ctx.builder.CreateFreeze(UndefValue::get(lt))); jl_aliasinfo_t ai = jl_aliasinfo_t::fromTBAA(ctx, ctx.tbaa().tbaa_stack); - ai.decorateInst(ctx.builder.CreateStore(promotion_point, strct)); + promotion_point = ai.decorateInst(ctx.builder.CreateMemSet(strct, ConstantInt::get(getInt8Ty(ctx.builder.getContext()), 0), + jl_datatype_size(ty), MaybeAlign(jl_datatype_align(ty)))); } ctx.builder.restoreIP(savedIP); } diff --git a/src/llvm-alloc-opt.cpp b/src/llvm-alloc-opt.cpp index 0ec88c9d56356..a9e1b1e02da42 100644 --- a/src/llvm-alloc-opt.cpp +++ b/src/llvm-alloc-opt.cpp @@ -646,14 +646,9 @@ void Optimizer::initializeAlloca(IRBuilder<> &prolog_builder, AllocaInst *buff, return; assert(!buff->isArrayAllocation()); Type *T = buff->getAllocatedType(); - Value *Init = UndefValue::get(T); - if ((allockind & AllocFnKind::Zeroed) != AllocFnKind::Unknown) - Init = Constant::getNullValue(T); // zero, as described - else if (allockind == AllocFnKind::Unknown) - Init = Constant::getNullValue(T); // assume zeroed since we didn't find the attribute - else - Init = prolog_builder.CreateFreeze(UndefValue::get(T)); // assume freeze, since LLVM does not natively support this case - prolog_builder.CreateStore(Init, buff); + const DataLayout &DL = F.getParent()->getDataLayout(); + prolog_builder.CreateMemSet(buff, ConstantInt::get(Type::getInt8Ty(prolog_builder.getContext()), 0), DL.getTypeAllocSize(T), buff->getAlign()); + } // This function should not erase any safepoint so that the lifetime marker can find and cache diff --git a/test/llvmpasses/alloc-opt-pass.ll b/test/llvmpasses/alloc-opt-pass.ll index b962157120456..665687e86835d 100644 --- a/test/llvmpasses/alloc-opt-pass.ll +++ b/test/llvmpasses/alloc-opt-pass.ll @@ -76,7 +76,7 @@ L3: ; preds = %L2, %L1, %0 ; CHECK-LABEL: @legal_int_types ; CHECK: alloca [12 x i8] ; CHECK-NOT: alloca i96 -; CHECK: store [12 x i8] zeroinitializer, +; CHECK: call void @llvm.memset.p0.i64(ptr align 16 %var1, ; CHECK: ret void define void @legal_int_types() { %pgcstack = call ptr @julia.get_pgcstack() @@ -140,7 +140,7 @@ L2: ; preds = %0 ; CHECK: alloca ; CHECK-NOT: call token(...) @llvm.julia.gc_preserve_begin ; CHECK: call void @llvm.lifetime.start -; CHECK: store [8 x i8] zeroinitializer, +; CHECK: call void @llvm.memset.p0.i64(ptr align 16 %v, ; CHECK-NOT: call void @llvm.lifetime.end define void @lifetime_no_preserve_end(ptr noalias nocapture noundef nonnull sret({}) %0) { %pgcstack = call ptr @julia.get_pgcstack() @@ -164,11 +164,8 @@ define void @lifetime_no_preserve_end(ptr noalias nocapture noundef nonnull sret ; CHECK: alloca [1 x i8] ; CHECK-DAG: alloca [2 x i8] ; CHECK-DAG: alloca [3 x i8] -; CHECK-DAG: freeze [1 x i8] undef -; CHECK-DAG: store [1 x i8] % -; CHECK-DAG: store [3 x i8] zeroinitializer, -; CHECK-NOT: store -; CHECK-NOT: zeroinitializer +; CHECK-DAG: call void @llvm.memset.p0.i64(ptr align 1 %var1, +; CHECK-DAG: call void @llvm.memset.p0.i64(ptr align 4 %var7, ; CHECK: ret void define void @initializers() { %pgcstack = call ptr @julia.get_pgcstack() From 1157c6f9c400d2409bd27225e252203800c46bbf Mon Sep 17 00:00:00 2001 From: Ian Butterworth Date: Fri, 18 Oct 2024 15:13:33 -0400 Subject: [PATCH 247/537] fix reporting of precompile configs on CI (#56232) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Currently the header doesn't print for `Pkg.test` with coverage on ``` [8dfed614] Test v1.11.0 1077.2 ms ✓ RequiredInterfaces 1 dependency successfully precompiled in 1 seconds. 8 already precompiled. ``` --- base/precompilation.jl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/base/precompilation.jl b/base/precompilation.jl index a39563178632f..359d2b61800b1 100644 --- a/base/precompilation.jl +++ b/base/precompilation.jl @@ -800,8 +800,8 @@ function precompilepkgs(pkgs::Vector{String}=String[]; name *= color_string(" $(config_str)", :light_black) end lock(print_lock) do - if !fancyprint && target === nothing && isempty(pkg_queue) - printpkgstyle(io, :Precompiling, "packages...") + if !fancyprint && isempty(pkg_queue) + printpkgstyle(io, :Precompiling, something(target, "packages...")) end end push!(pkg_queue, pkg_config) From fc40e629b1d2ddc94ad9ecb87bc308b2892044e5 Mon Sep 17 00:00:00 2001 From: Jameson Nash Date: Fri, 18 Oct 2024 15:33:29 -0400 Subject: [PATCH 248/537] stream: fix reading LibuvStream into array (#56092) Adds a new internal function `_take!(dst::Array{T,N}, src::Array{T,N})` for doing an efficient `copyto!` equivalent. Previously it was assumed that `compact` did this automatically, which wasn't a great assumption. Fixes #56078 --- base/array.jl | 11 +++++++++++ base/stream.jl | 4 +++- test/read.jl | 22 +++++++++++++++------- 3 files changed, 29 insertions(+), 8 deletions(-) diff --git a/base/array.jl b/base/array.jl index a628c1212659d..40907b2b00317 100644 --- a/base/array.jl +++ b/base/array.jl @@ -355,6 +355,17 @@ copy return $(Expr(:new, :(typeof(a)), :(memoryref(newmem)), :(a.size))) end +# a mutating version of copyto! that results in dst aliasing src afterwards +function _take!(dst::Array{T,N}, src::Array{T,N}) where {T,N} + if getfield(dst, :ref) !== getfield(src, :ref) + setfield!(dst, :ref, getfield(src, :ref)) + end + if getfield(dst, :size) !== getfield(src, :size) + setfield!(dst, :size, getfield(src, :size)) + end + return dst +end + ## Constructors ## similar(a::Array{T,1}) where {T} = Vector{T}(undef, size(a,1)) diff --git a/base/stream.jl b/base/stream.jl index 3ca5717be29db..2f00538ad0e96 100644 --- a/base/stream.jl +++ b/base/stream.jl @@ -941,6 +941,7 @@ function readbytes!(s::LibuvStream, a::Vector{UInt8}, nb::Int) if bytesavailable(sbuf) >= nb nread = readbytes!(sbuf, a, nb) else + initsize = length(a) newbuf = PipeBuffer(a, maxsize=nb) newbuf.size = newbuf.offset # reset the write pointer to the beginning nread = try @@ -951,7 +952,8 @@ function readbytes!(s::LibuvStream, a::Vector{UInt8}, nb::Int) finally s.buffer = sbuf end - compact(newbuf) + _take!(a, _unsafe_take!(newbuf)) + length(a) >= initsize || resize!(a, initsize) end iolock_end() return nread diff --git a/test/read.jl b/test/read.jl index 34224c146864e..99903d92d270f 100644 --- a/test/read.jl +++ b/test/read.jl @@ -268,13 +268,27 @@ for (name, f) in l n2 = readbytes!(s2, a2) @test n1 == n2 @test length(a1) == length(a2) - @test a1[1:n1] == a2[1:n2] + let l = min(l, n) + @test a1[1:l] == a2[1:l] + end @test n <= length(text) || eof(s1) @test n <= length(text) || eof(s2) cleanup() end + # Test growing output array + let x = UInt8[], + io = io() + n = readbytes!(io, x) + @test n == 0 + @test isempty(x) + n = readbytes!(io, x, typemax(Int)) + @test n == length(x) + @test x == codeunits(text) + cleanup() + end + verbose && println("$name read!...") l = length(text) for n = [1, 2, l-2, l-1, l] @@ -477,12 +491,6 @@ let s = "qwerty" @test read(IOBuffer(s)) == codeunits(s) @test read(IOBuffer(s), 10) == codeunits(s) @test read(IOBuffer(s), 1) == codeunits(s)[1:1] - - # Test growing output array - x = UInt8[] - n = readbytes!(IOBuffer(s), x, 10) - @test x == codeunits(s) - @test n == length(x) end From 82b150645e318bcb6bcb450e945b3b689a1eb264 Mon Sep 17 00:00:00 2001 From: Jameson Nash Date: Fri, 18 Oct 2024 15:37:36 -0400 Subject: [PATCH 249/537] fix precompile process flag propagation (#56214) CacheFlags could get set, but were never propagated to the target process, so the result would be unusable. Additionally, the debug and optimization levels were not synchronized with the sysimg, causing a regression in pkgimage usability after moving out stdlibs. Fixes #56207 Fixes #56054 Fixes #56206 --- base/Base.jl | 2 +- base/loading.jl | 61 ++++++++++++++++++++++++++++------------ base/precompilation.jl | 63 +++++++++++++++++++++++++++--------------- base/show.jl | 5 +++- base/util.jl | 3 +- base/uuid.jl | 2 ++ pkgimage.mk | 3 +- src/staticdata_utils.c | 15 +++++----- test/loading.jl | 22 +++++++-------- 9 files changed, 112 insertions(+), 64 deletions(-) diff --git a/base/Base.jl b/base/Base.jl index 1e780bb15141a..9800462f855f9 100644 --- a/base/Base.jl +++ b/base/Base.jl @@ -533,6 +533,7 @@ include("deepcopy.jl") include("download.jl") include("summarysize.jl") include("errorshow.jl") +include("util.jl") include("initdefs.jl") Filesystem.__postinit__() @@ -549,7 +550,6 @@ include("loading.jl") # misc useful functions & macros include("timing.jl") -include("util.jl") include("client.jl") include("asyncmap.jl") diff --git a/base/loading.jl b/base/loading.jl index 49d0cb52cd37b..78c584d00852b 100644 --- a/base/loading.jl +++ b/base/loading.jl @@ -1683,6 +1683,8 @@ function CacheFlags(cf::CacheFlags=CacheFlags(ccall(:jl_cache_flags, UInt8, ())) opt_level === nothing ? cf.opt_level : opt_level ) end +# reflecting jloptions.c defaults +const DefaultCacheFlags = CacheFlags(use_pkgimages=true, debug_level=isdebugbuild() ? 2 : 1, check_bounds=0, inline=true, opt_level=2) function _cacheflag_to_uint8(cf::CacheFlags)::UInt8 f = UInt8(0) @@ -1694,12 +1696,29 @@ function _cacheflag_to_uint8(cf::CacheFlags)::UInt8 return f end +function translate_cache_flags(cacheflags::CacheFlags, defaultflags::CacheFlags) + opts = String[] + cacheflags.use_pkgimages != defaultflags.use_pkgimages && push!(opts, cacheflags.use_pkgimages ? "--pkgimages=yes" : "--pkgimages=no") + cacheflags.debug_level != defaultflags.debug_level && push!(opts, "-g$(cacheflags.debug_level)") + cacheflags.check_bounds != defaultflags.check_bounds && push!(opts, ("--check-bounds=auto", "--check-bounds=yes", "--check-bounds=no")[cacheflags.check_bounds + 1]) + cacheflags.inline != defaultflags.inline && push!(opts, cacheflags.inline ? "--inline=yes" : "--inline=no") + cacheflags.opt_level != defaultflags.opt_level && push!(opts, "-O$(cacheflags.opt_level)") + return opts +end + function show(io::IO, cf::CacheFlags) - print(io, "use_pkgimages = ", cf.use_pkgimages) - print(io, ", debug_level = ", cf.debug_level) - print(io, ", check_bounds = ", cf.check_bounds) - print(io, ", inline = ", cf.inline) - print(io, ", opt_level = ", cf.opt_level) + print(io, "CacheFlags(") + print(io, "; use_pkgimages=") + print(io, cf.use_pkgimages) + print(io, ", debug_level=") + print(io, cf.debug_level) + print(io, ", check_bounds=") + print(io, cf.check_bounds) + print(io, ", inline=") + print(io, cf.inline) + print(io, ", opt_level=") + print(io, cf.opt_level) + print(io, ")") end struct ImageTarget @@ -2953,7 +2972,8 @@ end const PRECOMPILE_TRACE_COMPILE = Ref{String}() function create_expr_cache(pkg::PkgId, input::String, output::String, output_o::Union{Nothing, String}, - concrete_deps::typeof(_concrete_dependencies), flags::Cmd=``, internal_stderr::IO = stderr, internal_stdout::IO = stdout, isext::Bool=false) + concrete_deps::typeof(_concrete_dependencies), flags::Cmd=``, cacheflags::CacheFlags=CacheFlags(), + internal_stderr::IO = stderr, internal_stdout::IO = stdout, isext::Bool=false) @nospecialize internal_stderr internal_stdout rm(output, force=true) # Remove file if it exists output_o === nothing || rm(output_o, force=true) @@ -2996,24 +3016,29 @@ function create_expr_cache(pkg::PkgId, input::String, output::String, output_o:: deps = deps_eltype * "[" * join(deps_strs, ",") * "]" precomp_stack = "Base.PkgId[$(join(map(pkg_str, vcat(Base.precompilation_stack, pkg)), ", "))]" + if output_o === nothing + # remove options that make no difference given the other cache options + cacheflags = CacheFlags(cacheflags, opt_level=0) + end + opts = translate_cache_flags(cacheflags, CacheFlags()) # julia_cmd is generated for the running system, and must be fixed if running for precompile instead if output_o !== nothing @debug "Generating object cache file for $(repr("text/plain", pkg))" cpu_target = get(ENV, "JULIA_CPU_TARGET", nothing) - opts = `--output-o $(output_o) --output-ji $(output) --output-incremental=yes` + push!(opts, "--output-o", output_o) else @debug "Generating cache file for $(repr("text/plain", pkg))" cpu_target = nothing - opts = `-O0 --output-ji $(output) --output-incremental=yes` end + push!(opts, "--output-ji", output) + isassigned(PRECOMPILE_TRACE_COMPILE) && push!(opts, "--trace-compile=$(PRECOMPILE_TRACE_COMPILE[])") - trace = isassigned(PRECOMPILE_TRACE_COMPILE) ? `--trace-compile=$(PRECOMPILE_TRACE_COMPILE[]) --trace-compile-timing` : `` io = open(pipeline(addenv(`$(julia_cmd(;cpu_target)::Cmd) - $(flags) - $(opts) - --startup-file=no --history-file=no --warn-overwrite=yes - --color=$(have_color === nothing ? "auto" : have_color ? "yes" : "no") - $trace - -`, + $(flags) + $(opts) + --output-incremental=yes + --startup-file=no --history-file=no --warn-overwrite=yes + $(have_color === nothing ? "--color=auto" : have_color ? "--color=yes" : "--color=no") + -`, "OPENBLAS_NUM_THREADS" => 1, "JULIA_NUM_THREADS" => 1), stderr = internal_stderr, stdout = internal_stdout), @@ -3131,7 +3156,7 @@ function compilecache(pkg::PkgId, path::String, internal_stderr::IO = stderr, in close(tmpio_o) close(tmpio_so) end - p = create_expr_cache(pkg, path, tmppath, tmppath_o, concrete_deps, flags, internal_stderr, internal_stdout, isext) + p = create_expr_cache(pkg, path, tmppath, tmppath_o, concrete_deps, flags, cacheflags, internal_stderr, internal_stdout, isext) if success(p) if cache_objects @@ -4136,5 +4161,5 @@ end precompile(include_package_for_output, (PkgId, String, Vector{String}, Vector{String}, Vector{String}, typeof(_concrete_dependencies), Nothing)) || @assert false precompile(include_package_for_output, (PkgId, String, Vector{String}, Vector{String}, Vector{String}, typeof(_concrete_dependencies), String)) || @assert false -precompile(create_expr_cache, (PkgId, String, String, String, typeof(_concrete_dependencies), Cmd, IO, IO)) || @assert false -precompile(create_expr_cache, (PkgId, String, String, Nothing, typeof(_concrete_dependencies), Cmd, IO, IO)) || @assert false +precompile(create_expr_cache, (PkgId, String, String, String, typeof(_concrete_dependencies), Cmd, CacheFlags, IO, IO)) || @assert false +precompile(create_expr_cache, (PkgId, String, String, Nothing, typeof(_concrete_dependencies), Cmd, CacheFlags, IO, IO)) || @assert false diff --git a/base/precompilation.jl b/base/precompilation.jl index 359d2b61800b1..f597acef9b57f 100644 --- a/base/precompilation.jl +++ b/base/precompilation.jl @@ -362,7 +362,7 @@ function printpkgstyle(io, header, msg; color=:green) end const Config = Pair{Cmd, Base.CacheFlags} -const PkgConfig = Tuple{Base.PkgId,Config} +const PkgConfig = Tuple{PkgId,Config} function precompilepkgs(pkgs::Vector{String}=String[]; internal_call::Bool=false, @@ -375,8 +375,22 @@ function precompilepkgs(pkgs::Vector{String}=String[]; # asking for timing disables fancy mode, as timing is shown in non-fancy mode fancyprint::Bool = can_fancyprint(io) && !timing, manifest::Bool=false,) + # monomorphize this to avoid latency problems + _precompilepkgs(pkgs, internal_call, strict, warn_loaded, timing, _from_loading, + configs isa Vector{Config} ? configs : [configs], + IOContext{IO}(io), fancyprint, manifest) +end - configs = configs isa Config ? [configs] : configs +function _precompilepkgs(pkgs::Vector{String}, + internal_call::Bool, + strict::Bool, + warn_loaded::Bool, + timing::Bool, + _from_loading::Bool, + configs::Vector{Config}, + io::IOContext{IO}, + fancyprint::Bool, + manifest::Bool) requested_pkgs = copy(pkgs) # for understanding user intent time_start = time_ns() @@ -393,17 +407,32 @@ function precompilepkgs(pkgs::Vector{String}=String[]; if _from_loading && !Sys.isinteractive() && Base.get_bool_env("JULIA_TESTS", false) # suppress passive loading printing in julia test suite. `JULIA_TESTS` is set in Base.runtests - io = devnull + io = IOContext{IO}(devnull) end + nconfigs = length(configs) hascolor = get(io, :color, false)::Bool color_string(cstr::String, col::Union{Int64, Symbol}) = _color_string(cstr, col, hascolor) stale_cache = Dict{StaleCacheKey, Bool}() - exts = Dict{Base.PkgId, String}() # ext -> parent + exts = Dict{PkgId, String}() # ext -> parent # make a flat map of each dep and its direct deps - depsmap = Dict{Base.PkgId, Vector{Base.PkgId}}() - pkg_exts_map = Dict{Base.PkgId, Vector{Base.PkgId}}() + depsmap = Dict{PkgId, Vector{PkgId}}() + pkg_exts_map = Dict{PkgId, Vector{PkgId}}() + + function describe_pkg(pkg::PkgId, is_direct_dep::Bool, flags::Cmd, cacheflags::Base.CacheFlags) + name = haskey(exts, pkg) ? string(exts[pkg], " → ", pkg.name) : pkg.name + name = is_direct_dep ? name : color_string(name, :light_black) + if nconfigs > 1 && !isempty(flags) + config_str = join(flags, " ") + name *= color_string(" `$config_str`", :light_black) + end + if nconfigs > 1 + config_str = join(Base.translate_cache_flags(cacheflags, Base.DefaultCacheFlags), " ") + name *= color_string(" $config_str", :light_black) + end + return name + end for (dep, deps) in env.deps pkg = Base.PkgId(dep, env.names[dep]) @@ -569,7 +598,6 @@ function precompilepkgs(pkgs::Vector{String}=String[]; end end - nconfigs = length(configs) target = nothing if nconfigs == 1 if !isempty(only(configs)[1]) @@ -584,7 +612,7 @@ function precompilepkgs(pkgs::Vector{String}=String[]; failed_deps = Dict{PkgConfig, String}() precomperr_deps = PkgConfig[] # packages that may succeed after a restart (i.e. loaded packages with no cache file) - print_lock = io isa Base.LibuvStream ? io.lock::ReentrantLock : ReentrantLock() + print_lock = io.io isa Base.LibuvStream ? io.io.lock::ReentrantLock : ReentrantLock() first_started = Base.Event() printloop_should_exit::Bool = !fancyprint # exit print loop immediately if not fancy printing interrupted_or_done = Base.Event() @@ -677,7 +705,7 @@ function precompilepkgs(pkgs::Vector{String}=String[]; n_print_rows = 0 while !printloop_should_exit lock(print_lock) do - term_size = Base.displaysize_(io) + term_size = displaysize(io) num_deps_show = max(term_size[1] - 3, 2) # show at least 2 deps pkg_queue_show = if !interrupted_or_done.set && length(pkg_queue) > num_deps_show last(pkg_queue, num_deps_show) @@ -692,7 +720,7 @@ function precompilepkgs(pkgs::Vector{String}=String[]; bar.max = n_total - n_already_precomp # when sizing to the terminal width subtract a little to give some tolerance to resizing the # window between print cycles - termwidth = Base.displaysize_(io)[2] - 4 + termwidth = displaysize(io)[2] - 4 if !final_loop str = sprint(io -> show_progress(io, bar; termwidth, carriagereturn=false); context=io) print(iostr, Base._truncate_at_width_or_chars(true, str, termwidth), "\n") @@ -700,12 +728,8 @@ function precompilepkgs(pkgs::Vector{String}=String[]; for pkg_config in pkg_queue_show dep, config = pkg_config loaded = warn_loaded && haskey(Base.loaded_modules, dep) - _name = haskey(exts, dep) ? string(exts[dep], " → ", dep.name) : dep.name - name = dep in direct_deps ? _name : string(color_string(_name, :light_black)) - if nconfigs > 1 && !isempty(config[1]) - config_str = "$(join(config[1], " "))" - name *= color_string(" $(config_str)", :light_black) - end + flags, cacheflags = config + name = describe_pkg(dep, dep in direct_deps, flags, cacheflags) line = if pkg_config in precomperr_deps string(color_string(" ? ", Base.warn_color()), name) elseif haskey(failed_deps, pkg_config) @@ -793,12 +817,7 @@ function precompilepkgs(pkgs::Vector{String}=String[]; std_pipe = Base.link_pipe!(Pipe(); reader_supports_async=true, writer_supports_async=true) t_monitor = @async monitor_std(pkg_config, std_pipe; single_requested_pkg) - _name = haskey(exts, pkg) ? string(exts[pkg], " → ", pkg.name) : pkg.name - name = is_direct_dep ? _name : string(color_string(_name, :light_black)) - if nconfigs > 1 && !isempty(flags) - config_str = "$(join(flags, " "))" - name *= color_string(" $(config_str)", :light_black) - end + name = describe_pkg(pkg, is_direct_dep, flags, cacheflags) lock(print_lock) do if !fancyprint && isempty(pkg_queue) printpkgstyle(io, :Precompiling, something(target, "packages...")) diff --git a/base/show.jl b/base/show.jl index fb932838ac69a..25ed99f50b5b0 100644 --- a/base/show.jl +++ b/base/show.jl @@ -324,8 +324,11 @@ end convert(::Type{IOContext}, io::IOContext) = io convert(::Type{IOContext}, io::IO) = IOContext(io, ioproperties(io))::IOContext +convert(::Type{IOContext{IO_t}}, io::IOContext{IO_t}) where {IO_t} = io +convert(::Type{IOContext{IO_t}}, io::IO) where {IO_t} = IOContext{IO_t}(io, ioproperties(io))::IOContext{IO_t} IOContext(io::IO) = convert(IOContext, io) +IOContext{IO_t}(io::IO) where {IO_t} = convert(IOContext{IO_t}, io) function IOContext(io::IO, KV::Pair) d = ioproperties(io) @@ -427,7 +430,7 @@ get(io::IO, key, default) = default keys(io::IOContext) = keys(io.dict) keys(io::IO) = keys(ImmutableDict{Symbol,Any}()) -displaysize(io::IOContext) = haskey(io, :displaysize) ? io[:displaysize]::Tuple{Int,Int} : Base.displaysize_(io.io) +displaysize(io::IOContext) = haskey(io, :displaysize) ? io[:displaysize]::Tuple{Int,Int} : displaysize(io.io) show_circular(io::IO, @nospecialize(x)) = false function show_circular(io::IOContext, @nospecialize(x)) diff --git a/base/util.jl b/base/util.jl index 95d62c4a16e1d..3ce64e50f7e29 100644 --- a/base/util.jl +++ b/base/util.jl @@ -249,7 +249,7 @@ function julia_cmd(julia=joinpath(Sys.BINDIR, julia_exename()); cpu_target::Unio end function julia_exename() - if !Base.isdebugbuild() + if !isdebugbuild() return @static Sys.iswindows() ? "julia.exe" : "julia" else return @static Sys.iswindows() ? "julia-debug.exe" : "julia-debug" @@ -530,7 +530,6 @@ function _crc32c(io::IO, nb::Integer, crc::UInt32=0x00000000) end _crc32c(io::IO, crc::UInt32=0x00000000) = _crc32c(io, typemax(Int64), crc) _crc32c(io::IOStream, crc::UInt32=0x00000000) = _crc32c(io, filesize(io)-position(io), crc) -_crc32c(uuid::UUID, crc::UInt32=0x00000000) = _crc32c(uuid.value, crc) _crc32c(x::UInt128, crc::UInt32=0x00000000) = ccall(:jl_crc32c, UInt32, (UInt32, Ref{UInt128}, Csize_t), crc, x, 16) _crc32c(x::UInt64, crc::UInt32=0x00000000) = diff --git a/base/uuid.jl b/base/uuid.jl index 9b2da3c6409db..56f3a6aa417e7 100644 --- a/base/uuid.jl +++ b/base/uuid.jl @@ -36,6 +36,8 @@ let Base.hash(uuid::UUID, h::UInt) = hash(uuid_hash_seed, hash(convert(NTuple{2, UInt64}, uuid), h)) end +_crc32c(uuid::UUID, crc::UInt32=0x00000000) = _crc32c(uuid.value, crc) + let @inline function uuid_kernel(s, i, u) _c = UInt32(@inbounds codeunit(s, i)) diff --git a/pkgimage.mk b/pkgimage.mk index 0bc035ee03b08..78b2618be549f 100644 --- a/pkgimage.mk +++ b/pkgimage.mk @@ -25,7 +25,8 @@ print-depot-path: @$(call PRINT_JULIA, $(call spawn,$(JULIA_EXECUTABLE)) --startup-file=no -e '@show Base.DEPOT_PATH') $(BUILDDIR)/stdlib/%.image: $(JULIAHOME)/stdlib/Project.toml $(JULIAHOME)/stdlib/Manifest.toml $(INDEPENDENT_STDLIBS_SRCS) $(JULIA_DEPOT_PATH)/compiled - @$(call PRINT_JULIA, JULIA_CPU_TARGET="$(JULIA_CPU_TARGET)" $(call spawn,$(JULIA_EXECUTABLE)) --startup-file=no -e 'Base.Precompilation.precompilepkgs(;configs=[``=>Base.CacheFlags(), `--check-bounds=yes`=>Base.CacheFlags(;check_bounds=1)])') + @$(call PRINT_JULIA, JULIA_CPU_TARGET="$(JULIA_CPU_TARGET)" $(call spawn,$(JULIA_EXECUTABLE)) --startup-file=no -e \ + 'Base.Precompilation.precompilepkgs(configs=[``=>Base.CacheFlags(debug_level=2, opt_level=3), ``=>Base.CacheFlags(check_bounds=1, debug_level=2, opt_level=3)])') touch $@ $(BUILDDIR)/stdlib/release.image: $(build_private_libdir)/sys.$(SHLIB_EXT) diff --git a/src/staticdata_utils.c b/src/staticdata_utils.c index 5f1095fec9168..9a7653972ea7c 100644 --- a/src/staticdata_utils.c +++ b/src/staticdata_utils.c @@ -605,15 +605,15 @@ static void write_mod_list(ios_t *s, jl_array_t *a) write_int32(s, 0); } -// OPT_LEVEL should always be the upper bits #define OPT_LEVEL 6 +#define DEBUG_LEVEL 1 JL_DLLEXPORT uint8_t jl_cache_flags(void) { // OOICCDDP uint8_t flags = 0; flags |= (jl_options.use_pkgimages & 1); // 0-bit - flags |= (jl_options.debug_level & 3) << 1; // 1-2 bit + flags |= (jl_options.debug_level & 3) << DEBUG_LEVEL; // 1-2 bit flags |= (jl_options.check_bounds & 3) << 3; // 3-4 bit flags |= (jl_options.can_inline & 1) << 5; // 5-bit flags |= (jl_options.opt_level & 3) << OPT_LEVEL; // 6-7 bit @@ -636,14 +636,13 @@ JL_DLLEXPORT uint8_t jl_match_cache_flags(uint8_t requested_flags, uint8_t actua actual_flags &= ~1; } - // 2. Check all flags, except opt level must be exact - uint8_t mask = (1 << OPT_LEVEL)-1; + // 2. Check all flags, except opt level and debug level must be exact + uint8_t mask = (~(3u << OPT_LEVEL) & ~(3u << DEBUG_LEVEL)) & 0x7f; if ((actual_flags & mask) != (requested_flags & mask)) return 0; - // 3. allow for higher optimization flags in cache - actual_flags >>= OPT_LEVEL; - requested_flags >>= OPT_LEVEL; - return actual_flags >= requested_flags; + // 3. allow for higher optimization and debug level flags in cache to minimize required compile option combinations + return ((actual_flags >> OPT_LEVEL) & 3) >= ((requested_flags >> OPT_LEVEL) & 3) && + ((actual_flags >> DEBUG_LEVEL) & 3) >= ((requested_flags >> DEBUG_LEVEL) & 3); } JL_DLLEXPORT uint8_t jl_match_cache_flags_current(uint8_t flags) diff --git a/test/loading.jl b/test/loading.jl index 9e7e40ff3b50a..ec4a0391a412a 100644 --- a/test/loading.jl +++ b/test/loading.jl @@ -1225,10 +1225,7 @@ end @test cf.check_bounds == 3 @test cf.inline @test cf.opt_level == 3 - - io = PipeBuffer() - show(io, cf) - @test read(io, String) == "use_pkgimages = true, debug_level = 3, check_bounds = 3, inline = true, opt_level = 3" + @test repr(cf) == "CacheFlags(; use_pkgimages=true, debug_level=3, check_bounds=3, inline=true, opt_level=3)" end empty!(Base.DEPOT_PATH) @@ -1420,13 +1417,16 @@ end "JULIA_DEPOT_PATH" => depot_path, "JULIA_DEBUG" => "loading") - out = Pipe() - proc = run(pipeline(cmd, stdout=out, stderr=out)) - close(out.in) - - log = @async String(read(out)) - @test success(proc) - fetch(log) + out = Base.PipeEndpoint() + log = @async read(out, String) + try + proc = run(pipeline(cmd, stdout=out, stderr=out)) + @test success(proc) + catch + @show fetch(log) + rethrow() + end + return fetch(log) end log = load_package("Parent", `--compiled-modules=no --pkgimages=no`) From f36f34298e5154b14fea1ec680b4e6c369b61d2b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mos=C3=A8=20Giordano?= <765740+giordano@users.noreply.github.com> Date: Fri, 18 Oct 2024 22:42:50 +0100 Subject: [PATCH 250/537] Do not call `rand` during sysimage precompilation (#56227) This change by itself doesn't do anything significant on `master`, but when backported to the v1.11 branch it'll address #56177. However it'd be great if someone could tell _why_ this fixes that issue, because it looks very unrelated. --------- Co-authored-by: Ian Butterworth --- contrib/generate_precompile.jl | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/contrib/generate_precompile.jl b/contrib/generate_precompile.jl index 04d13011d6223..037e8926d5003 100644 --- a/contrib/generate_precompile.jl +++ b/contrib/generate_precompile.jl @@ -183,10 +183,10 @@ for match = Base._methods(+, (Int, Int), -1, Base.get_world_counter()) # interactive startup uses this write(IOBuffer(), "") - # not critical, but helps hide unrelated compilation from @time when using --trace-compile - foo() = rand(2,2) * rand(2,2) - @time foo() - @time foo() + # Not critical, but helps hide unrelated compilation from @time when using --trace-compile. + f55729() = Base.Experimental.@force_compile + @time @eval f55729() + @time @eval f55729() break # only actually need to do this once end From cd99cfc4d39c09b3edbb6040639b4baa47882f6e Mon Sep 17 00:00:00 2001 From: Jameson Nash Date: Fri, 18 Oct 2024 22:03:58 -0400 Subject: [PATCH 251/537] Split up the one big codegen lock into per-function locks and dependency edge tracking (#56179) Disjoint content can be LLVM optimized in parallel now, since codegen no longer has any ability to handle recursion, and compilation should even be able to run in parallel with the GC also. Removes any remaining global state, since that is unsafe. Adds a C++ shim for concurrent gc support in conjunction with using a `std::unique_lock` to DRY code. Fix RuntimeDyld implementation: Since we use the ForwardingMemoryManger instead of making a new RTDyldMemoryManager object every time, we need to reference count the finalizeMemory calls so that we only call that at the end of relocating everything when everything is ready. We already happen to conveniently have a shared_ptr here, so just use that instead of inventing a duplicate counter. Fixes many OC bugs, including mostly fixing #55035, since this bug is just that much harder to express in the more constrained API. --- src/aotcompile.cpp | 164 ++++- src/cgmemmgr.cpp | 128 ++-- src/clangsa/GCChecker.cpp | 11 +- src/codegen.cpp | 528 ++++++---------- src/debug-registry.h | 14 +- src/debuginfo.cpp | 31 +- src/engine.cpp | 87 ++- src/gf.c | 17 + src/jitlayers.cpp | 1216 +++++++++++++++++++++++-------------- src/jitlayers.h | 94 +-- src/julia.h | 12 +- src/julia_atomics.h | 4 +- src/julia_internal.h | 7 +- src/julia_locks.h | 27 + src/opaque_closure.c | 11 +- src/pipeline.cpp | 6 + src/stackwalk.c | 4 +- 17 files changed, 1336 insertions(+), 1025 deletions(-) diff --git a/src/aotcompile.cpp b/src/aotcompile.cpp index 279686c387e1b..a3ffdf1d051a9 100644 --- a/src/aotcompile.cpp +++ b/src/aotcompile.cpp @@ -295,12 +295,12 @@ jl_code_instance_t *jl_ci_cache_lookup(const jl_cgparams_t &cgparams, jl_method_ jl_value_t *ci = cgparams.lookup(mi, world, world); JL_GC_PROMISE_ROOTED(ci); jl_code_instance_t *codeinst = NULL; - JL_GC_PUSH1(&codeinst); if (ci != jl_nothing && jl_atomic_load_relaxed(&((jl_code_instance_t *)ci)->inferred) != jl_nothing) { codeinst = (jl_code_instance_t*)ci; } else { if (cgparams.lookup != jl_rettype_inferred_addr) { + // XXX: This will corrupt and leak a lot of memory which may be very bad jl_error("Refusing to automatically run type inference with custom cache lookup."); } else { @@ -309,15 +309,129 @@ jl_code_instance_t *jl_ci_cache_lookup(const jl_cgparams_t &cgparams, jl_method_ * it into the cache here, since it was explicitly requested and is * otherwise not reachable from anywhere in the system image. */ - if (!jl_mi_cache_has_ci(mi, codeinst)) + if (codeinst && !jl_mi_cache_has_ci(mi, codeinst)) { + JL_GC_PUSH1(&codeinst); jl_mi_cache_insert(mi, codeinst); + JL_GC_POP(); + } } } - JL_GC_POP(); return codeinst; } -arraylist_t new_invokes; +typedef DenseMap> jl_compiled_functions_t; +static void compile_workqueue(jl_codegen_params_t ¶ms, CompilationPolicy policy, jl_compiled_functions_t &compiled_functions) +{ + decltype(params.workqueue) workqueue; + std::swap(params.workqueue, workqueue); + jl_code_info_t *src = NULL; + jl_code_instance_t *codeinst = NULL; + JL_GC_PUSH2(&src, &codeinst); + assert(!params.cache); + while (!workqueue.empty()) { + auto it = workqueue.pop_back_val(); + codeinst = it.first; + auto &proto = it.second; + // try to emit code for this item from the workqueue + StringRef invokeName = ""; + StringRef preal_decl = ""; + bool preal_specsig = false; + { + auto it = compiled_functions.find(codeinst); + if (it == compiled_functions.end()) { + // Reinfer the function. The JIT came along and removed the inferred + // method body. See #34993 + if ((policy != CompilationPolicy::Default || params.params->trim) && + jl_atomic_load_relaxed(&codeinst->inferred) == jl_nothing) { + // XXX: SOURCE_MODE_FORCE_SOURCE is wrong here (neither sufficient nor necessary) + codeinst = jl_type_infer(codeinst->def, jl_atomic_load_relaxed(&codeinst->max_world), SOURCE_MODE_FORCE_SOURCE); + } + if (codeinst) { + orc::ThreadSafeModule result_m = + jl_create_ts_module(name_from_method_instance(codeinst->def), + params.tsctx, params.DL, params.TargetTriple); + auto decls = jl_emit_codeinst(result_m, codeinst, NULL, params); + if (result_m) + it = compiled_functions.insert(std::make_pair(codeinst, std::make_pair(std::move(result_m), std::move(decls)))).first; + } + } + if (it != compiled_functions.end()) { + auto &decls = it->second.second; + invokeName = decls.functionObject; + if (decls.functionObject == "jl_fptr_args") { + preal_decl = decls.specFunctionObject; + } + else if (decls.functionObject != "jl_fptr_sparam" && decls.functionObject != "jl_f_opaque_closure_call") { + preal_decl = decls.specFunctionObject; + preal_specsig = true; + } + } + } + // patch up the prototype we emitted earlier + Module *mod = proto.decl->getParent(); + assert(proto.decl->isDeclaration()); + Function *pinvoke = nullptr; + if (preal_decl.empty()) { + if (invokeName.empty() && params.params->trim) { + errs() << "Bailed out to invoke when compiling:"; + jl_(codeinst->def); + abort(); + } + pinvoke = emit_tojlinvoke(codeinst, invokeName, mod, params); + if (!proto.specsig) + proto.decl->replaceAllUsesWith(pinvoke); + } + if (proto.specsig && !preal_specsig) { + // get or build an fptr1 that can invoke codeinst + if (pinvoke == nullptr) + pinvoke = get_or_emit_fptr1(preal_decl, mod); + // emit specsig-to-(jl)invoke conversion + proto.decl->setLinkage(GlobalVariable::InternalLinkage); + //protodecl->setAlwaysInline(); + jl_init_function(proto.decl, params.TargetTriple); + jl_method_instance_t *mi = codeinst->def; + size_t nrealargs = jl_nparams(mi->specTypes); // number of actual arguments being passed + bool is_opaque_closure = jl_is_method(mi->def.value) && mi->def.method->is_for_opaque_closure; + // TODO: maybe this can be cached in codeinst->specfptr? + emit_specsig_to_fptr1(proto.decl, proto.cc, proto.return_roots, mi->specTypes, codeinst->rettype, is_opaque_closure, nrealargs, params, pinvoke, 0, 0); + preal_decl = ""; // no need to fixup the name + } + if (!preal_decl.empty()) { + // merge and/or rename this prototype to the real function + if (Value *specfun = mod->getNamedValue(preal_decl)) { + if (proto.decl != specfun) + proto.decl->replaceAllUsesWith(specfun); + } + else { + proto.decl->setName(preal_decl); + } + } + if (proto.oc) { // additionally, if we are dealing with an oc, then we might also need to fix up the fptr1 reference too + assert(proto.specsig); + StringRef ocinvokeDecl = invokeName; + // if OC expected a specialized specsig dispatch, but we don't have it, use the inner trampoline here too + // XXX: this invoke translation logic is supposed to exactly match new_opaque_closure + if (!preal_specsig || ocinvokeDecl == "jl_f_opaque_closure_call" || ocinvokeDecl == "jl_fptr_interpret_call" || ocinvokeDecl == "jl_fptr_const_return") + ocinvokeDecl = pinvoke->getName(); + assert(!ocinvokeDecl.empty()); + assert(ocinvokeDecl != "jl_fptr_args"); + assert(ocinvokeDecl != "jl_fptr_sparam"); + // merge and/or rename this prototype to the real function + if (Value *specfun = mod->getNamedValue(ocinvokeDecl)) { + if (proto.oc != specfun) + proto.oc->replaceAllUsesWith(specfun); + } + else { + proto.oc->setName(ocinvokeDecl); + } + } + workqueue.append(params.workqueue); + params.workqueue.clear(); + } + JL_GC_POP(); +} + + // takes the running content that has collected in the shadow module and dump it to disk // this builds the object file portion of the sysimage files for fast startup, and can // also be used be extern consumers like GPUCompiler.jl to obtain a module containing @@ -346,7 +460,7 @@ void *jl_create_native_impl(jl_array_t *methods, LLVMOrcThreadSafeModuleRef llvm orc::ThreadSafeContext ctx; orc::ThreadSafeModule backing; if (!llvmmod) { - ctx = jl_ExecutionEngine->acquireContext(); + ctx = jl_ExecutionEngine->makeContext(); backing = jl_create_ts_module("text", ctx); } orc::ThreadSafeModule &clone = llvmmod ? *unwrap(llvmmod) : backing; @@ -367,11 +481,11 @@ void *jl_create_native_impl(jl_array_t *methods, LLVMOrcThreadSafeModuleRef llvm params.imaging_mode = imaging; params.debug_level = cgparams->debug_info_level; params.external_linkage = _external_linkage; - arraylist_new(&new_invokes, 0); size_t compile_for[] = { jl_typeinf_world, _world }; int worlds = 0; if (jl_options.trim != JL_TRIM_NO) worlds = 1; + jl_compiled_functions_t compiled_functions; for (; worlds < 2; worlds++) { JL_TIMING(NATIVE_AOT, NATIVE_Codegen); size_t this_world = compile_for[worlds]; @@ -391,7 +505,6 @@ void *jl_create_native_impl(jl_array_t *methods, LLVMOrcThreadSafeModuleRef llvm continue; } mi = (jl_method_instance_t*)item; -compile_mi: src = NULL; // if this method is generally visible to the current compilation world, // and this is either the primary world, or not applicable in the primary world @@ -406,7 +519,7 @@ void *jl_create_native_impl(jl_array_t *methods, LLVMOrcThreadSafeModuleRef llvm jl_(mi); abort(); } - if (codeinst && !params.compiled_functions.count(codeinst) && !data->jl_fvar_map.count(codeinst)) { + if (codeinst && !compiled_functions.count(codeinst) && !data->jl_fvar_map.count(codeinst)) { // now add it to our compilation results // Const returns do not do codegen, but juliac inspects codegen results so make a dummy fvar entry to represent it if (jl_options.trim != JL_TRIM_NO && jl_atomic_load_relaxed(&codeinst->invoke) == jl_fptr_const_return_addr) { @@ -418,7 +531,7 @@ void *jl_create_native_impl(jl_array_t *methods, LLVMOrcThreadSafeModuleRef llvm Triple(clone.getModuleUnlocked()->getTargetTriple())); jl_llvm_functions_t decls = jl_emit_codeinst(result_m, codeinst, NULL, params); if (result_m) - params.compiled_functions[codeinst] = {std::move(result_m), std::move(decls)}; + compiled_functions[codeinst] = {std::move(result_m), std::move(decls)}; else if (jl_options.trim != JL_TRIM_NO) { // if we're building a small image, we need to compile everything // to ensure that we have all the information we need. @@ -428,26 +541,19 @@ void *jl_create_native_impl(jl_array_t *methods, LLVMOrcThreadSafeModuleRef llvm } } } - } else if (this_world != jl_typeinf_world) { + } + else if (this_world != jl_typeinf_world) { /* jl_safe_printf("Codegen could not find requested codeinstance to be compiled\n"); jl_(mi); abort(); */ } - // TODO: is goto the best way to do this? - jl_compile_workqueue(params, policy); - mi = (jl_method_instance_t*)arraylist_pop(&new_invokes); - if (mi != NULL) { - goto compile_mi; - } } - - // finally, make sure all referenced methods also get compiled or fixed up - jl_compile_workqueue(params, policy); } JL_GC_POP(); - arraylist_free(&new_invokes); + // finally, make sure all referenced methods also get compiled or fixed up + compile_workqueue(params, policy, compiled_functions); // process the globals array, before jl_merge_module destroys them SmallVector gvars(params.global_targets.size()); @@ -464,7 +570,7 @@ void *jl_create_native_impl(jl_array_t *methods, LLVMOrcThreadSafeModuleRef llvm data->jl_value_to_llvm[idx] = global.first; idx++; } - CreateNativeMethods += params.compiled_functions.size(); + CreateNativeMethods += compiled_functions.size(); size_t offset = gvars.size(); data->jl_external_to_llvm.resize(params.external_fns.size()); @@ -489,7 +595,7 @@ void *jl_create_native_impl(jl_array_t *methods, LLVMOrcThreadSafeModuleRef llvm { JL_TIMING(NATIVE_AOT, NATIVE_Merge); Linker L(*clone.getModuleUnlocked()); - for (auto &def : params.compiled_functions) { + for (auto &def : compiled_functions) { jl_merge_module(clone, std::move(std::get<0>(def.second))); jl_code_instance_t *this_code = def.first; jl_llvm_functions_t decls = std::get<1>(def.second); @@ -573,9 +679,6 @@ void *jl_create_native_impl(jl_array_t *methods, LLVMOrcThreadSafeModuleRef llvm } ct->reentrant_timing &= ~1ull; } - if (ctx.getContext()) { - jl_ExecutionEngine->releaseContext(std::move(ctx)); - } return (void*)data; } @@ -1975,11 +2078,6 @@ void jl_dump_native_impl(void *native_code, } } -void addTargetPasses(legacy::PassManagerBase *PM, const Triple &triple, TargetIRAnalysis analysis) -{ - PM->add(new TargetLibraryInfoWrapperPass(triple)); - PM->add(createTargetTransformInfoWrapperPass(std::move(analysis))); -} // sometimes in GDB you want to find out what code would be created from a mi extern "C" JL_DLLEXPORT_CODEGEN jl_code_info_t *jl_gdbdumpcode(jl_method_instance_t *mi) @@ -2037,8 +2135,8 @@ void jl_get_llvmf_defn_impl(jl_llvmf_dump_t* dump, jl_method_instance_t *mi, jl_ dump->F = nullptr; dump->TSM = nullptr; if (src && jl_is_code_info(src)) { - auto ctx = jl_ExecutionEngine->getContext(); - orc::ThreadSafeModule m = jl_create_ts_module(name_from_method_instance(mi), *ctx); + auto ctx = jl_ExecutionEngine->makeContext(); + orc::ThreadSafeModule m = jl_create_ts_module(name_from_method_instance(mi), ctx); uint64_t compiler_start_time = 0; uint8_t measure_compile_time_enabled = jl_atomic_load_relaxed(&jl_measure_compile_time_enabled); if (measure_compile_time_enabled) @@ -2046,7 +2144,7 @@ void jl_get_llvmf_defn_impl(jl_llvmf_dump_t* dump, jl_method_instance_t *mi, jl_ auto target_info = m.withModuleDo([&](Module &M) { return std::make_pair(M.getDataLayout(), Triple(M.getTargetTriple())); }); - jl_codegen_params_t output(*ctx, std::move(target_info.first), std::move(target_info.second)); + jl_codegen_params_t output(ctx, std::move(target_info.first), std::move(target_info.second)); output.params = ¶ms; output.imaging_mode = imaging_default(); // This would be nice, but currently it causes some assembly regressions that make printed output diff --git a/src/cgmemmgr.cpp b/src/cgmemmgr.cpp index 8557698a4e513..c257d2a2e3331 100644 --- a/src/cgmemmgr.cpp +++ b/src/cgmemmgr.cpp @@ -32,14 +32,14 @@ namespace { -static size_t get_block_size(size_t size) +static size_t get_block_size(size_t size) JL_NOTSAFEPOINT { return (size > jl_page_size * 256 ? LLT_ALIGN(size, jl_page_size) : jl_page_size * 256); } // Wrapper function to mmap/munmap/mprotect pages... -static void *map_anon_page(size_t size) +static void *map_anon_page(size_t size) JL_NOTSAFEPOINT { #ifdef _OS_WINDOWS_ char *mem = (char*)VirtualAlloc(NULL, size + jl_page_size, @@ -54,7 +54,7 @@ static void *map_anon_page(size_t size) return mem; } -static void unmap_page(void *ptr, size_t size) +static void unmap_page(void *ptr, size_t size) JL_NOTSAFEPOINT { #ifdef _OS_WINDOWS_ VirtualFree(ptr, size, MEM_DECOMMIT); @@ -71,7 +71,7 @@ enum class Prot : int { NO = PAGE_NOACCESS }; -static void protect_page(void *ptr, size_t size, Prot flags) +static void protect_page(void *ptr, size_t size, Prot flags) JL_NOTSAFEPOINT { DWORD old_prot; if (!VirtualProtect(ptr, size, (DWORD)flags, &old_prot)) { @@ -89,7 +89,7 @@ enum class Prot : int { NO = PROT_NONE }; -static void protect_page(void *ptr, size_t size, Prot flags) +static void protect_page(void *ptr, size_t size, Prot flags) JL_NOTSAFEPOINT { int ret = mprotect(ptr, size, (int)flags); if (ret != 0) { @@ -98,7 +98,7 @@ static void protect_page(void *ptr, size_t size, Prot flags) } } -static bool check_fd_or_close(int fd) +static bool check_fd_or_close(int fd) JL_NOTSAFEPOINT { if (fd == -1) return false; @@ -129,7 +129,7 @@ static intptr_t anon_hdl = -1; // Also, creating big file mapping and then map pieces of it seems to // consume too much global resources. Therefore, we use each file mapping // as a block on windows -static void *create_shared_map(size_t size, size_t id) +static void *create_shared_map(size_t size, size_t id) JL_NOTSAFEPOINT { void *addr = MapViewOfFile((HANDLE)id, FILE_MAP_ALL_ACCESS, 0, 0, size); @@ -137,13 +137,13 @@ static void *create_shared_map(size_t size, size_t id) return addr; } -static intptr_t init_shared_map() +static intptr_t init_shared_map() JL_NOTSAFEPOINT { anon_hdl = 0; return 0; } -static void *alloc_shared_page(size_t size, size_t *id, bool exec) +static void *alloc_shared_page(size_t size, size_t *id, bool exec) JL_NOTSAFEPOINT { assert(size % jl_page_size == 0); DWORD file_mode = exec ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE; @@ -162,7 +162,7 @@ static void *alloc_shared_page(size_t size, size_t *id, bool exec) } #else // _OS_WINDOWS_ // For shared mapped region -static intptr_t get_anon_hdl(void) +static intptr_t get_anon_hdl(void) JL_NOTSAFEPOINT { int fd = -1; @@ -228,7 +228,7 @@ static struct _make_shared_map_lock { }; } shared_map_lock; -static size_t get_map_size_inc() +static size_t get_map_size_inc() JL_NOTSAFEPOINT { rlimit rl; if (getrlimit(RLIMIT_FSIZE, &rl) != -1) { @@ -242,7 +242,7 @@ static size_t get_map_size_inc() return map_size_inc_default; } -static void *create_shared_map(size_t size, size_t id) +static void *create_shared_map(size_t size, size_t id) JL_NOTSAFEPOINT { void *addr = mmap(nullptr, size, PROT_READ | PROT_WRITE, MAP_SHARED, anon_hdl, id); @@ -250,7 +250,7 @@ static void *create_shared_map(size_t size, size_t id) return addr; } -static intptr_t init_shared_map() +static intptr_t init_shared_map() JL_NOTSAFEPOINT { anon_hdl = get_anon_hdl(); if (anon_hdl == -1) @@ -265,7 +265,7 @@ static intptr_t init_shared_map() return anon_hdl; } -static void *alloc_shared_page(size_t size, size_t *id, bool exec) +static void *alloc_shared_page(size_t size, size_t *id, bool exec) JL_NOTSAFEPOINT { assert(size % jl_page_size == 0); size_t off = jl_atomic_fetch_add(&map_offset, size); @@ -292,7 +292,7 @@ static void *alloc_shared_page(size_t size, size_t *id, bool exec) #ifdef _OS_LINUX_ // Using `/proc/self/mem`, A.K.A. Keno's remote memory manager. -ssize_t pwrite_addr(int fd, const void *buf, size_t nbyte, uintptr_t addr) +ssize_t pwrite_addr(int fd, const void *buf, size_t nbyte, uintptr_t addr) JL_NOTSAFEPOINT { static_assert(sizeof(off_t) >= 8, "off_t is smaller than 64bits"); #ifdef _P64 @@ -319,7 +319,7 @@ ssize_t pwrite_addr(int fd, const void *buf, size_t nbyte, uintptr_t addr) // Do not call this directly. // Use `get_self_mem_fd` which has a guard to call this only once. -static int _init_self_mem() +static int _init_self_mem() JL_NOTSAFEPOINT { struct utsname kernel; uname(&kernel); @@ -359,13 +359,13 @@ static int _init_self_mem() return fd; } -static int get_self_mem_fd() +static int get_self_mem_fd() JL_NOTSAFEPOINT { static int fd = _init_self_mem(); return fd; } -static void write_self_mem(void *dest, void *ptr, size_t size) +static void write_self_mem(void *dest, void *ptr, size_t size) JL_NOTSAFEPOINT { while (size > 0) { ssize_t ret = pwrite_addr(get_self_mem_fd(), ptr, size, (uintptr_t)dest); @@ -424,7 +424,7 @@ struct Block { Block(const Block&) = delete; Block &operator=(const Block&) = delete; - Block(Block &&other) + Block(Block &&other) JL_NOTSAFEPOINT : ptr(other.ptr), total(other.total), avail(other.avail) @@ -433,9 +433,9 @@ struct Block { other.total = other.avail = 0; } - Block() = default; + Block() JL_NOTSAFEPOINT = default; - void *alloc(size_t size, size_t align) + void *alloc(size_t size, size_t align) JL_NOTSAFEPOINT { size_t aligned_avail = avail & (-align); if (aligned_avail < size) @@ -444,7 +444,7 @@ struct Block { avail = aligned_avail - size; return p; } - void reset(void *addr, size_t size) + void reset(void *addr, size_t size) JL_NOTSAFEPOINT { if (avail >= jl_page_size) { uintptr_t end = uintptr_t(ptr) + total; @@ -462,7 +462,8 @@ class RWAllocator { static constexpr int nblocks = 8; Block blocks[nblocks]{}; public: - void *alloc(size_t size, size_t align) + RWAllocator() JL_NOTSAFEPOINT = default; + void *alloc(size_t size, size_t align) JL_NOTSAFEPOINT { size_t min_size = (size_t)-1; int min_id = 0; @@ -498,9 +499,9 @@ struct SplitPtrBlock : public Block { uintptr_t wr_ptr{0}; uint32_t state{0}; - SplitPtrBlock() = default; + SplitPtrBlock() JL_NOTSAFEPOINT = default; - void swap(SplitPtrBlock &other) + void swap(SplitPtrBlock &other) JL_NOTSAFEPOINT { std::swap(ptr, other.ptr); std::swap(total, other.total); @@ -509,7 +510,7 @@ struct SplitPtrBlock : public Block { std::swap(state, other.state); } - SplitPtrBlock(SplitPtrBlock &&other) + SplitPtrBlock(SplitPtrBlock &&other) JL_NOTSAFEPOINT : SplitPtrBlock() { swap(other); @@ -534,11 +535,12 @@ class ROAllocator { // but might not have all the permissions set or data copied yet. SmallVector completed; virtual void *get_wr_ptr(SplitPtrBlock &block, void *rt_ptr, - size_t size, size_t align) = 0; - virtual SplitPtrBlock alloc_block(size_t size) = 0; + size_t size, size_t align) JL_NOTSAFEPOINT = 0; + virtual SplitPtrBlock alloc_block(size_t size) JL_NOTSAFEPOINT = 0; public: - virtual ~ROAllocator() {} - virtual void finalize() + ROAllocator() JL_NOTSAFEPOINT = default; + virtual ~ROAllocator() JL_NOTSAFEPOINT {} + virtual void finalize() JL_NOTSAFEPOINT { for (auto &alloc: allocations) { // ensure the mapped pages are consistent @@ -552,7 +554,7 @@ class ROAllocator { } // Allocations that have not been finalized yet. SmallVector allocations; - void *alloc(size_t size, size_t align) + void *alloc(size_t size, size_t align) JL_NOTSAFEPOINT { size_t min_size = (size_t)-1; int min_id = 0; @@ -603,7 +605,7 @@ class ROAllocator { template class DualMapAllocator : public ROAllocator { protected: - void *get_wr_ptr(SplitPtrBlock &block, void *rt_ptr, size_t, size_t) override + void *get_wr_ptr(SplitPtrBlock &block, void *rt_ptr, size_t, size_t) override JL_NOTSAFEPOINT { assert((char*)rt_ptr >= block.ptr && (char*)rt_ptr < (block.ptr + block.total)); @@ -618,7 +620,7 @@ class DualMapAllocator : public ROAllocator { } return (char*)rt_ptr + (block.wr_ptr - uintptr_t(block.ptr)); } - SplitPtrBlock alloc_block(size_t size) override + SplitPtrBlock alloc_block(size_t size) override JL_NOTSAFEPOINT { SplitPtrBlock new_block; // use `wr_ptr` to record the id initially @@ -626,7 +628,7 @@ class DualMapAllocator : public ROAllocator { new_block.reset(ptr, size); return new_block; } - void finalize_block(SplitPtrBlock &block, bool reset) + void finalize_block(SplitPtrBlock &block, bool reset) JL_NOTSAFEPOINT { // This function handles setting the block to the right mode // and free'ing maps that are not needed anymore. @@ -662,11 +664,11 @@ class DualMapAllocator : public ROAllocator { } } public: - DualMapAllocator() + DualMapAllocator() JL_NOTSAFEPOINT { assert(anon_hdl != -1); } - void finalize() override + void finalize() override JL_NOTSAFEPOINT { for (auto &block : this->blocks) { finalize_block(block, false); @@ -685,7 +687,7 @@ class SelfMemAllocator : public ROAllocator { SmallVector temp_buff; protected: void *get_wr_ptr(SplitPtrBlock &block, void *rt_ptr, - size_t size, size_t align) override + size_t size, size_t align) override JL_NOTSAFEPOINT { assert(!(block.state & SplitPtrBlock::InitAlloc)); for (auto &wr_block: temp_buff) { @@ -699,13 +701,13 @@ class SelfMemAllocator : public ROAllocator { new_block.reset(map_anon_page(block_size), block_size); return new_block.alloc(size, align); } - SplitPtrBlock alloc_block(size_t size) override + SplitPtrBlock alloc_block(size_t size) override JL_NOTSAFEPOINT { SplitPtrBlock new_block; new_block.reset(map_anon_page(size), size); return new_block; } - void finalize_block(SplitPtrBlock &block, bool reset) + void finalize_block(SplitPtrBlock &block, bool reset) JL_NOTSAFEPOINT { if (!(block.state & SplitPtrBlock::Alloc)) return; @@ -718,13 +720,13 @@ class SelfMemAllocator : public ROAllocator { } } public: - SelfMemAllocator() + SelfMemAllocator() JL_NOTSAFEPOINT : ROAllocator(), temp_buff() { assert(get_self_mem_fd() != -1); } - void finalize() override + void finalize() override JL_NOTSAFEPOINT { for (auto &block : this->blocks) { finalize_block(block, false); @@ -770,17 +772,15 @@ class RTDyldMemoryManagerJL : public SectionMemoryManager { RWAllocator rw_alloc; std::unique_ptr> ro_alloc; std::unique_ptr> exe_alloc; - bool code_allocated; size_t total_allocated; public: - RTDyldMemoryManagerJL() + RTDyldMemoryManagerJL() JL_NOTSAFEPOINT : SectionMemoryManager(), pending_eh(), rw_alloc(), ro_alloc(), exe_alloc(), - code_allocated(false), total_allocated(0) { #ifdef _OS_LINUX_ @@ -794,12 +794,12 @@ class RTDyldMemoryManagerJL : public SectionMemoryManager { exe_alloc.reset(new DualMapAllocator()); } } - ~RTDyldMemoryManagerJL() override + ~RTDyldMemoryManagerJL() override JL_NOTSAFEPOINT { } - size_t getTotalBytes() { return total_allocated; } + size_t getTotalBytes() JL_NOTSAFEPOINT { return total_allocated; } void registerEHFrames(uint8_t *Addr, uint64_t LoadAddr, - size_t Size) override; + size_t Size) override JL_NOTSAFEPOINT; #if 0 // Disable for now since we are not actually using this. void deregisterEHFrames(uint8_t *Addr, uint64_t LoadAddr, @@ -807,16 +807,16 @@ class RTDyldMemoryManagerJL : public SectionMemoryManager { #endif uint8_t *allocateCodeSection(uintptr_t Size, unsigned Alignment, unsigned SectionID, - StringRef SectionName) override; + StringRef SectionName) override JL_NOTSAFEPOINT; uint8_t *allocateDataSection(uintptr_t Size, unsigned Alignment, unsigned SectionID, StringRef SectionName, - bool isReadOnly) override; + bool isReadOnly) override JL_NOTSAFEPOINT; using SectionMemoryManager::notifyObjectLoaded; void notifyObjectLoaded(RuntimeDyld &Dyld, - const object::ObjectFile &Obj) override; - bool finalizeMemory(std::string *ErrMsg = nullptr) override; + const object::ObjectFile &Obj) override JL_NOTSAFEPOINT; + bool finalizeMemory(std::string *ErrMsg = nullptr) override JL_NOTSAFEPOINT; template - void mapAddresses(DL &Dyld, Alloc &&allocator) + void mapAddresses(DL &Dyld, Alloc &&allocator) JL_NOTSAFEPOINT { for (auto &alloc: allocator->allocations) { if (alloc.rt_addr == alloc.wr_addr || alloc.relocated) @@ -826,7 +826,7 @@ class RTDyldMemoryManagerJL : public SectionMemoryManager { } } template - void mapAddresses(DL &Dyld) + void mapAddresses(DL &Dyld) JL_NOTSAFEPOINT { if (!ro_alloc) return; @@ -838,14 +838,9 @@ class RTDyldMemoryManagerJL : public SectionMemoryManager { uint8_t *RTDyldMemoryManagerJL::allocateCodeSection(uintptr_t Size, unsigned Alignment, unsigned SectionID, - StringRef SectionName) + StringRef SectionName) JL_NOTSAFEPOINT { // allocating more than one code section can confuse libunwind. -#if !defined(_COMPILER_MSAN_ENABLED_) && !defined(_COMPILER_ASAN_ENABLED_) - // TODO: Figure out why msan and now asan too need this. - assert(!code_allocated); - code_allocated = true; -#endif total_allocated += Size; jl_timing_counter_inc(JL_TIMING_COUNTER_JITSize, Size); jl_timing_counter_inc(JL_TIMING_COUNTER_JITCodeSize, Size); @@ -859,7 +854,7 @@ uint8_t *RTDyldMemoryManagerJL::allocateDataSection(uintptr_t Size, unsigned Alignment, unsigned SectionID, StringRef SectionName, - bool isReadOnly) + bool isReadOnly) JL_NOTSAFEPOINT { total_allocated += Size; jl_timing_counter_inc(JL_TIMING_COUNTER_JITSize, Size); @@ -873,7 +868,7 @@ uint8_t *RTDyldMemoryManagerJL::allocateDataSection(uintptr_t Size, } void RTDyldMemoryManagerJL::notifyObjectLoaded(RuntimeDyld &Dyld, - const object::ObjectFile &Obj) + const object::ObjectFile &Obj) JL_NOTSAFEPOINT { if (!ro_alloc) { assert(!exe_alloc); @@ -884,9 +879,8 @@ void RTDyldMemoryManagerJL::notifyObjectLoaded(RuntimeDyld &Dyld, mapAddresses(Dyld); } -bool RTDyldMemoryManagerJL::finalizeMemory(std::string *ErrMsg) +bool RTDyldMemoryManagerJL::finalizeMemory(std::string *ErrMsg) JL_NOTSAFEPOINT { - code_allocated = false; if (ro_alloc) { ro_alloc->finalize(); assert(exe_alloc); @@ -904,7 +898,7 @@ bool RTDyldMemoryManagerJL::finalizeMemory(std::string *ErrMsg) void RTDyldMemoryManagerJL::registerEHFrames(uint8_t *Addr, uint64_t LoadAddr, - size_t Size) + size_t Size) JL_NOTSAFEPOINT { if (uintptr_t(Addr) == LoadAddr) { register_eh_frames(Addr, Size); @@ -917,7 +911,7 @@ void RTDyldMemoryManagerJL::registerEHFrames(uint8_t *Addr, #if 0 void RTDyldMemoryManagerJL::deregisterEHFrames(uint8_t *Addr, uint64_t LoadAddr, - size_t Size) + size_t Size) JL_NOTSAFEPOINT { deregister_eh_frames((uint8_t*)LoadAddr, Size); } @@ -925,12 +919,12 @@ void RTDyldMemoryManagerJL::deregisterEHFrames(uint8_t *Addr, } -RTDyldMemoryManager* createRTDyldMemoryManager() +RTDyldMemoryManager* createRTDyldMemoryManager() JL_NOTSAFEPOINT { return new RTDyldMemoryManagerJL(); } -size_t getRTDyldMemoryManagerTotalBytes(RTDyldMemoryManager *mm) +size_t getRTDyldMemoryManagerTotalBytes(RTDyldMemoryManager *mm) JL_NOTSAFEPOINT { return ((RTDyldMemoryManagerJL*)mm)->getTotalBytes(); } diff --git a/src/clangsa/GCChecker.cpp b/src/clangsa/GCChecker.cpp index 31631eb70a4ad..4892ebdabd110 100644 --- a/src/clangsa/GCChecker.cpp +++ b/src/clangsa/GCChecker.cpp @@ -31,7 +31,7 @@ namespace { using namespace clang; using namespace ento; -#define PDP std::shared_ptr +typedef std::shared_ptr PDP; #define MakePDP make_unique static const Stmt *getStmtForDiagnostics(const ExplodedNode *N) @@ -394,13 +394,18 @@ PDP GCChecker::SafepointBugVisitor::VisitNode(const ExplodedNode *N, } else { PathDiagnosticLocation Pos = PathDiagnosticLocation::createDeclBegin( N->getLocationContext(), BRC.getSourceManager()); - return MakePDP(Pos, "Tracking JL_NOT_SAFEPOINT annotation here."); + if (Pos.isValid()) + return MakePDP(Pos, "Tracking JL_NOT_SAFEPOINT annotation here."); + //N->getLocation().dump(); } } else if (NewSafepointDisabled == (unsigned)-1) { PathDiagnosticLocation Pos = PathDiagnosticLocation::createDeclBegin( N->getLocationContext(), BRC.getSourceManager()); - return MakePDP(Pos, "Safepoints re-enabled here"); + if (Pos.isValid()) + return MakePDP(Pos, "Safepoints re-enabled here"); + //N->getLocation().dump(); } + // n.b. there may be no position here to report if they were disabled by julia_notsafepoint_enter/leave } return nullptr; } diff --git a/src/codegen.cpp b/src/codegen.cpp index 3f69f4789493a..0ab26a65fcaaa 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -233,7 +233,6 @@ STATISTIC(EmittedSpecfunCalls, "Number of specialized calls emitted"); STATISTIC(EmittedInvokes, "Number of invokes emitted"); STATISTIC(EmittedCalls, "Number of calls emitted"); STATISTIC(EmittedUndefVarErrors, "Number of undef var errors emitted"); -STATISTIC(EmittedOpaqueClosureFunctions, "Number of opaque closures emitted"); STATISTIC(EmittedToJLInvokes, "Number of tojlinvoke calls emitted"); STATISTIC(EmittedCFuncInvalidates, "Number of C function invalidates emitted"); STATISTIC(GeneratedCFuncWrappers, "Number of C function wrappers generated"); @@ -1009,6 +1008,11 @@ static const auto jlinvoke_func = new JuliaFunction<>{ {AttributeSet(), Attributes(C, {Attribute::ReadOnly, Attribute::NoCapture})}); }, }; +static const auto jlopaque_closure_call_func = new JuliaFunction<>{ + XSTR(jl_f_opaque_closure_call), + get_func_sig, + get_func_attrs, +}; static const auto jlmethod_func = new JuliaFunction<>{ XSTR(jl_method_def), [](LLVMContext &C) { @@ -1606,7 +1610,7 @@ static const auto jltuple_func = new JuliaFunction<>{XSTR(jl_f_tuple), get_func_ static const auto jlintrinsic_func = new JuliaFunction<>{XSTR(jl_f_intrinsic_call), get_func3_sig, get_func_attrs}; static const auto &builtin_func_map() { - static std::map*> builtins = { + static auto builtins = new DenseMap*> { { jl_f_is_addr, new JuliaFunction<>{XSTR(jl_f_is), get_func_sig, get_func_attrs} }, { jl_f_typeof_addr, new JuliaFunction<>{XSTR(jl_f_typeof), get_func_sig, get_func_attrs} }, { jl_f_sizeof_addr, new JuliaFunction<>{XSTR(jl_f_sizeof), get_func_sig, get_func_attrs} }, @@ -1649,18 +1653,18 @@ static const auto &builtin_func_map() { { jl_f__svec_ref_addr, new JuliaFunction<>{XSTR(jl_f__svec_ref), get_func_sig, get_func_attrs} }, { jl_f_current_scope_addr, new JuliaFunction<>{XSTR(jl_f_current_scope), get_func_sig, get_func_attrs} }, }; - return builtins; + return *builtins; } static const auto &may_dispatch_builtins() { - static std::unordered_set builtins( + static auto builtins = new DenseSet( {jl_f__apply_iterate_addr, jl_f__apply_pure_addr, jl_f__call_in_world_addr, jl_f__call_in_world_total_addr, jl_f__call_latest_addr, }); - return builtins; + return *builtins; } static const auto jl_new_opaque_closure_jlcall_func = new JuliaFunction<>{XSTR(jl_new_opaque_closure_jlcall), get_func_sig, get_func_attrs}; @@ -2976,7 +2980,7 @@ static void jl_name_jlfuncparams_args(jl_codegen_params_t ¶ms, Function *F) F->getArg(3)->setName("sparams::Any"); } -static void jl_init_function(Function *F, const Triple &TT) +void jl_init_function(Function *F, const Triple &TT) { // set any attributes that *must* be set on all functions AttrBuilder attr(F->getContext()); @@ -3023,6 +3027,7 @@ static bool uses_specsig(jl_value_t *sig, bool needsparams, jl_value_t *rettype, if (jl_vararg_kind(jl_tparam(sig, jl_nparams(sig) - 1)) == JL_VARARG_UNBOUND) return false; // not invalid, consider if specialized signature is worthwhile + // n.b. sig is sometimes wrong for OC (tparam0 might be the captures type of the specialization, even though what gets passed in that slot is an OC object), so prefer_specsig is always set (instead of recomputing tparam0 using get_oc_type) if (prefer_specsig) return true; if (!deserves_retbox(rettype) && !jl_is_datatype_singleton((jl_datatype_t*)rettype) && rettype != (jl_value_t*)jl_bool_type) @@ -5236,7 +5241,15 @@ static CallInst *emit_jlcall(jl_codectx_t &ctx, FunctionCallee theFptr, Value *t if (theF) theArgs.push_back(theF); for (size_t i = 0; i < nargs; i++) { - Value *arg = boxed(ctx, argv[i]); + Value *arg; + if (i == 0 && trampoline == julia_call3) { + const jl_cgval_t &f = argv[i]; + arg = f.inline_roots.empty() && f.ispointer() ? data_pointer(ctx, f) : value_to_pointer(ctx, f).V; + arg = decay_derived(ctx, arg); + } + else { + arg = boxed(ctx, argv[i]); + } theArgs.push_back(arg); } CallInst *result = ctx.builder.CreateCall(TheTrampoline, theArgs); @@ -5283,13 +5296,13 @@ static jl_cgval_t emit_call_specfun_other(jl_codectx_t &ctx, bool is_opaque_clos idx++; } for (size_t i = 0; i < nargs; i++) { - jl_value_t *jt = jl_nth_slot_type(specTypes, i); // n.b.: specTypes is required to be a datatype by construction for specsig if (is_opaque_closure && i == 0) { // Special implementation for opaque closures: their jt and thus - // julia_type_to_llvm values are likely wrong, so override the - // behavior here to directly pass the expected pointer based instead - // just on passing arg as a pointer + // julia_type_to_llvm values are likely wrong (based on captures instead of the OC), so override the + // behavior here to directly pass the expected pointer directly instead of + // computing it from the available information + // jl_value_t *oc_type = (jl_value_t*)jl_any_type; // more accurately: get_oc_type(specTypes, jlretty) jl_cgval_t arg = argv[i]; if (arg.isghost) { argvals[idx] = Constant::getNullValue(ctx.builder.getPtrTy(AddressSpace::Derived)); @@ -5302,6 +5315,7 @@ static jl_cgval_t emit_call_specfun_other(jl_codectx_t &ctx, bool is_opaque_clos idx++; continue; } + jl_value_t *jt = jl_nth_slot_type(specTypes, i); jl_cgval_t arg = update_julia_type(ctx, argv[i], jt); if (arg.typ == jl_bottom_type) return jl_cgval_t(); @@ -5519,6 +5533,7 @@ static jl_cgval_t emit_invoke(jl_codectx_t &ctx, const jl_cgval_t &lival, ArrayR // Check if we already queued this up auto it = ctx.call_targets.find(codeinst); if (need_to_emit && it != ctx.call_targets.end()) { + assert(it->second.specsig == specsig); protoname = it->second.decl->getName(); need_to_emit = cache_valid = false; } @@ -5559,7 +5574,7 @@ static jl_cgval_t emit_invoke(jl_codectx_t &ctx, const jl_cgval_t &lival, ArrayR handled = true; if (need_to_emit) { Function *trampoline_decl = cast(jl_Module->getNamedValue(protoname)); - ctx.call_targets[codeinst] = {cc, return_roots, trampoline_decl, specsig}; + ctx.call_targets[codeinst] = {cc, return_roots, trampoline_decl, nullptr, specsig}; if (trim_may_error(ctx.params->trim)) push_frames(ctx, ctx.linfo, mi); } @@ -5570,9 +5585,9 @@ static jl_cgval_t emit_invoke(jl_codectx_t &ctx, const jl_cgval_t &lival, ArrayR if (!handled) { if (trim_may_error(ctx.params->trim)) { if (lival.constant) { - arraylist_push(&new_invokes, lival.constant); push_frames(ctx, ctx.linfo, (jl_method_instance_t*)lival.constant); - } else { + } + else { errs() << "Dynamic call to unknown function"; errs() << "In " << ctx.builder.getCurrentDebugLocation()->getFilename() << ":" << ctx.builder.getCurrentDebugLocation()->getLine() << "\n"; @@ -5728,20 +5743,16 @@ static jl_cgval_t emit_call(jl_codectx_t &ctx, jl_expr_t *ex, jl_value_t *rt, bo } } FunctionCallee fptr; - Value *F; JuliaFunction<> *cc; if (f.typ == (jl_value_t*)jl_intrinsic_type) { fptr = prepare_call(jlintrinsic_func); - F = f.inline_roots.empty() && f.ispointer() ? data_pointer(ctx, f) : value_to_pointer(ctx, f).V; - F = decay_derived(ctx, F); cc = julia_call3; } else { fptr = FunctionCallee(get_func_sig(ctx.builder.getContext()), ctx.builder.CreateCall(prepare_call(jlgetbuiltinfptr_func), {emit_typeof(ctx, f)})); - F = boxed(ctx, f); cc = julia_call; } - Value *ret = emit_jlcall(ctx, fptr, F, ArrayRef(argv).drop_front(), nargs - 1, cc); + Value *ret = emit_jlcall(ctx, fptr, nullptr, argv, nargs, cc); setName(ctx.emission_context, ret, "Builtin_ret"); return mark_julia_type(ctx, ret, true, rt); } @@ -5758,52 +5769,12 @@ static jl_cgval_t emit_call(jl_codectx_t &ctx, jl_expr_t *ex, jl_value_t *rt, bo JL_GC_POP(); return r; } + // TODO: else emit_oc_call } } int failed_dispatch = !argv[0].constant; if (ctx.params->trim != JL_TRIM_NO) { - size_t min_valid = 1; - size_t max_valid = ~(size_t)0; - size_t latest_world = jl_get_world_counter(); // TODO: marshal the world age of the compilation here. - - // Find all methods matching the call signature - jl_array_t *matches = NULL; - jl_value_t *tup = NULL; - JL_GC_PUSH2(&tup, &matches); - if (!failed_dispatch) { - SmallVector argtypes; - for (auto& arg: argv) - argtypes.push_back(arg.typ); - tup = jl_apply_tuple_type_v(argtypes.data(), argtypes.size()); - matches = (jl_array_t*)jl_matching_methods((jl_tupletype_t*)tup, jl_nothing, 10 /*TODO: make global*/, 1, - latest_world, &min_valid, &max_valid, NULL); - if ((jl_value_t*)matches == jl_nothing) - failed_dispatch = 1; - } - - // Expand each matching method to its unique specialization, if it has exactly one - if (!failed_dispatch) { - size_t k; - size_t len = new_invokes.len; - for (k = 0; k < jl_array_nrows(matches); k++) { - jl_method_match_t *match = (jl_method_match_t *)jl_array_ptr_ref(matches, k); - jl_method_instance_t *mi = jl_method_match_to_mi(match, latest_world, min_valid, max_valid, 0); - if (!mi) { - if (jl_array_nrows(matches) == 1) { - // if the method match is not compileable, but there is only one, fall back to - // unspecialized implementation - mi = jl_get_unspecialized(match->method); - } - else { - new_invokes.len = len; - failed_dispatch = 1; - break; - } - } - arraylist_push(&new_invokes, mi); - } - } - JL_GC_POP(); + abort(); // this code path is unsound, unsafe, and probably bad } if (failed_dispatch && trim_may_error(ctx.params->trim)) { @@ -6634,66 +6605,73 @@ static std::pair get_oc_function(jl_codectx_t &ctx, jl_met assert(jl_is_method_instance(mi)); ci = jl_atomic_load_relaxed(&mi->cache); } - - if (ci == NULL || (jl_value_t*)ci == jl_nothing) { - JL_GC_POP(); - return std::make_pair((Function*)NULL, (Function*)NULL); - } - auto inferred = jl_atomic_load_relaxed(&ci->inferred); - if (!inferred || inferred == jl_nothing) { + if (ci == NULL || (jl_value_t*)ci == jl_nothing || ci->rettype != rettype || !jl_egal(sigtype, mi->specTypes)) { // TODO: correctly handle the ABI conversion if rettype != ci->rettype JL_GC_POP(); return std::make_pair((Function*)NULL, (Function*)NULL); } - auto it = ctx.emission_context.compiled_functions.find(ci); - - if (it == ctx.emission_context.compiled_functions.end()) { - ++EmittedOpaqueClosureFunctions; - jl_code_info_t *ir = jl_uncompress_ir(closure_method, ci, (jl_value_t*)inferred); - JL_GC_PUSH1(&ir); - // TODO: Emit this inline and outline it late using LLVM's coroutine support. - orc::ThreadSafeModule closure_m = jl_create_ts_module( - name_from_method_instance(mi), ctx.emission_context.tsctx, - jl_Module->getDataLayout(), Triple(jl_Module->getTargetTriple())); - jl_llvm_functions_t closure_decls = emit_function(closure_m, mi, ir, rettype, ctx.emission_context); - JL_GC_POP(); - it = ctx.emission_context.compiled_functions.insert(std::make_pair(ci, std::make_pair(std::move(closure_m), std::move(closure_decls)))).first; + // method lookup code (similar to emit_invoke, and the inverse of emit_specsig_oc_call) + bool specsig = uses_specsig(sigtype, false, rettype, true); + std::string name; + std::string oc; + StringRef protoname; + StringRef proto_oc; + + // Check if we already queued this up + auto it = ctx.call_targets.find(ci); + bool need_to_emit = it == ctx.call_targets.end(); + if (!need_to_emit) { + assert(specsig == it->second.specsig); + if (specsig) { + protoname = it->second.decl->getName(); + proto_oc = it->second.oc->getName(); + } + else { + proto_oc = it->second.decl->getName(); + } + need_to_emit = false; + } + else { + if (specsig) { + raw_string_ostream(name) << "j_" << name_from_method_instance(mi) << "_" << jl_atomic_fetch_add_relaxed(&globalUniqueGeneratedNames, 1); + protoname = StringRef(name); + } + raw_string_ostream(oc) << "j1_" << name_from_method_instance(mi) << "_" << jl_atomic_fetch_add_relaxed(&globalUniqueGeneratedNames, 1); + proto_oc = StringRef(oc); } - auto &closure_m = it->second.first; - auto &closure_decls = it->second.second; - - assert(closure_decls.functionObject != "jl_fptr_sparam"); - bool isspecsig = closure_decls.functionObject != "jl_fptr_args"; - - Function *F = NULL; - std::string fname = isspecsig ? - closure_decls.functionObject : - closure_decls.specFunctionObject; - if (GlobalValue *V = jl_Module->getNamedValue(fname)) { + // Get the fptr1 OC + Function *F = nullptr; + if (GlobalValue *V = jl_Module->getNamedValue(proto_oc)) { F = cast(V); } else { F = Function::Create(get_func_sig(ctx.builder.getContext()), Function::ExternalLinkage, - fname, jl_Module); + proto_oc, jl_Module); jl_init_function(F, ctx.emission_context.TargetTriple); jl_name_jlfunc_args(ctx.emission_context, F); F->setAttributes(AttributeList::get(ctx.builder.getContext(), {get_func_attrs(ctx.builder.getContext()), F->getAttributes()})); } - Function *specF = NULL; - if (!isspecsig) { - specF = F; + + // Get the specsig (if applicable) + Function *specF = nullptr; + jl_returninfo_t::CallingConv cc = jl_returninfo_t::CallingConv::Boxed; + unsigned return_roots = 0; + bool is_opaque_closure = jl_is_method(mi->def.value) && mi->def.method->is_for_opaque_closure; + assert(is_opaque_closure); + if (specsig) { + bool gcstack_arg = JL_FEAT_TEST(ctx, gcstack_arg); + jl_returninfo_t returninfo = get_specsig_function(ctx, jl_Module, nullptr, protoname, mi->specTypes, rettype, is_opaque_closure, gcstack_arg); + cc = returninfo.cc; + return_roots = returninfo.return_roots; + specF = cast(returninfo.decl.getCallee()); } - else { - //emission context holds context lock so can get module - specF = closure_m.getModuleUnlocked()->getFunction(closure_decls.specFunctionObject); - if (specF) { - jl_returninfo_t returninfo = get_specsig_function(ctx, jl_Module, NULL, - closure_decls.specFunctionObject, sigtype, rettype, true, JL_FEAT_TEST(ctx,gcstack_arg)); - specF = cast(returninfo.decl.getCallee()); - } + + if (need_to_emit) { + ctx.call_targets[ci] = {cc, return_roots, specsig ? specF : F, specsig ? F : nullptr, specsig}; } + JL_GC_POP(); return std::make_pair(F, specF); } @@ -7173,7 +7151,12 @@ static Value *get_scope_field(jl_codectx_t &ctx) return emit_ptrgep(ctx, ct, offsetof(jl_task_t, scope), "current_scope"); } -static Function *emit_tojlinvoke(jl_code_instance_t *codeinst, StringRef theFptrName, Module *M, jl_codegen_params_t ¶ms) +Function *get_or_emit_fptr1(StringRef preal_decl, Module *M) +{ + return cast(M->getOrInsertFunction(preal_decl, get_func_sig(M->getContext()), get_func_attrs(M->getContext())).getCallee()); +} + +Function *emit_tojlinvoke(jl_code_instance_t *codeinst, StringRef theFptrName, Module *M, jl_codegen_params_t ¶ms) JL_NOTSAFEPOINT { ++EmittedToJLInvokes; jl_codectx_t ctx(M->getContext(), params, codeinst); @@ -7184,7 +7167,6 @@ static Function *emit_tojlinvoke(jl_code_instance_t *codeinst, StringRef theFptr name, M); jl_init_function(f, params.TargetTriple); if (trim_may_error(params.params->trim)) { - arraylist_push(&new_invokes, codeinst->def); // Try t compile this invoke // TODO: Debuginfo! push_frames(ctx, ctx.linfo, codeinst->def, 1); } @@ -7213,7 +7195,17 @@ static Function *emit_tojlinvoke(jl_code_instance_t *codeinst, StringRef theFptr return f; } -static void emit_cfunc_invalidate( +static jl_value_t *get_oc_type(jl_value_t *calltype, jl_value_t *rettype) JL_ALWAYS_LEAFTYPE +{ + jl_value_t *argtype = jl_argtype_without_function((jl_value_t*)calltype); + JL_GC_PUSH1(&argtype); + jl_value_t *oc_type JL_ALWAYS_LEAFTYPE = jl_apply_type2((jl_value_t*)jl_opaque_closure_type, argtype, rettype); + JL_GC_PROMISE_ROOTED(oc_type); + JL_GC_POP(); + return oc_type; +} + +void emit_specsig_to_fptr1( Function *gf_thunk, jl_returninfo_t::CallingConv cc, unsigned return_roots, jl_value_t *calltype, jl_value_t *rettype, bool is_for_opaque_closure, size_t nargs, @@ -7240,14 +7232,18 @@ static void emit_cfunc_invalidate( ++AI; // gcstack_arg } for (size_t i = 0; i < nargs; i++) { - // n.b. calltype is required to be a datatype by construction for specsig - jl_value_t *jt = jl_nth_slot_type(calltype, i); if (i == 0 && is_for_opaque_closure) { + // `jt` would be wrong here (it is the captures type), so is not used used for + // the ABI decisions, but the argument actually will require boxing as its real type + // which can be exactly recomputed from the specialization, as that defined the ABI + jl_value_t *oc_type = get_oc_type(calltype, rettype); Value *arg_v = &*AI; ++AI; - myargs[i] = mark_julia_slot(arg_v, jt, NULL, ctx.tbaa().tbaa_const); + myargs[i] = mark_julia_slot(arg_v, (jl_value_t*)oc_type, NULL, ctx.tbaa().tbaa_const); continue; } + // n.b. calltype is required to be a datatype by construction for specsig + jl_value_t *jt = jl_nth_slot_type(calltype, i); bool isboxed = false; Type *et; if (deserves_argbox(jt)) { @@ -7335,16 +7331,6 @@ static void emit_cfunc_invalidate( } } -static void emit_cfunc_invalidate( - Function *gf_thunk, jl_returninfo_t::CallingConv cc, unsigned return_roots, - jl_value_t *calltype, jl_value_t *rettype, bool is_for_opaque_closure, - size_t nargs, jl_codegen_params_t ¶ms, - size_t min_world, size_t max_world) -{ - emit_cfunc_invalidate(gf_thunk, cc, return_roots, calltype, rettype, is_for_opaque_closure, nargs, params, - prepare_call_in(gf_thunk->getParent(), jlapplygeneric_func), min_world, max_world); -} - static Function* gen_cfun_wrapper( Module *into, jl_codegen_params_t ¶ms, const function_sig_t &sig, jl_value_t *ff, const char *aliasname, @@ -7712,11 +7698,11 @@ static Function* gen_cfun_wrapper( GlobalVariable::InternalLinkage, funcName, M); jl_init_function(gf_thunk, ctx.emission_context.TargetTriple); gf_thunk->setAttributes(AttributeList::get(M->getContext(), {returninfo.attrs, gf_thunk->getAttributes()})); - // build a specsig -> jl_apply_generic converter thunk + // build a specsig -> jl_apply_generic converter thunk // this builds a method that calls jl_apply_generic (as a closure over a singleton function pointer), // but which has the signature of a specsig - emit_cfunc_invalidate(gf_thunk, returninfo.cc, returninfo.return_roots, lam->specTypes, codeinst->rettype, is_opaque_closure, nargs + 1, ctx.emission_context, - min_world, max_world); + emit_specsig_to_fptr1(gf_thunk, returninfo.cc, returninfo.return_roots, lam->specTypes, codeinst->rettype, is_opaque_closure, nargs + 1, ctx.emission_context, + prepare_call_in(gf_thunk->getParent(), jlapplygeneric_func), min_world, max_world); returninfo.decl = FunctionCallee(returninfo.decl.getFunctionType(), ctx.builder.CreateSelect(age_ok, returninfo.decl.getCallee(), gf_thunk)); } retval = emit_call_specfun_other(ctx, is_opaque_closure, lam->specTypes, codeinst->rettype, returninfo, nullptr, inputargs, nargs + 1); @@ -8026,7 +8012,8 @@ const char *jl_generate_ccallable(LLVMOrcThreadSafeModuleRef llvmmod, void *sysi } // generate a julia-callable function that calls f (AKA lam) -static void gen_invoke_wrapper(jl_method_instance_t *lam, jl_value_t *jlretty, jl_returninfo_t &f, unsigned nargs, int retarg, StringRef funcName, +// if is_opaque_closure, then generate the OC invoke, rather than a real invoke +static void gen_invoke_wrapper(jl_method_instance_t *lam, jl_value_t *jlretty, jl_returninfo_t &f, unsigned nargs, int retarg, bool is_opaque_closure, StringRef funcName, Module *M, jl_codegen_params_t ¶ms) { ++GeneratedInvokeWrappers; @@ -8055,11 +8042,14 @@ static void gen_invoke_wrapper(jl_method_instance_t *lam, jl_value_t *jlretty, j allocate_gc_frame(ctx, b0); SmallVector argv(nargs); - bool is_opaque_closure = jl_is_method(lam->def.value) && lam->def.method->is_for_opaque_closure; jl_aliasinfo_t ai = jl_aliasinfo_t::fromTBAA(ctx, ctx.tbaa().tbaa_const); for (size_t i = 0; i < nargs; ++i) { - jl_value_t *ty = ((i == 0) && is_opaque_closure) ? (jl_value_t*)jl_any_type : - jl_nth_slot_type(lam->specTypes, i); + if (i == 0 && is_opaque_closure) { + jl_value_t *oc_type = (jl_value_t*)jl_any_type; // more accurately: get_oc_type(lam->specTypes, jlretty) + argv[i] = mark_julia_slot(funcArg, oc_type, NULL, ctx.tbaa().tbaa_const); + continue; + } + jl_value_t *ty = jl_nth_slot_type(lam->specTypes, i); Value *theArg; if (i == 0) { theArg = funcArg; @@ -8455,6 +8445,7 @@ static jl_llvm_functions_t // specTypes is required to be a datatype by construction for specsig, but maybe not otherwise // OpaqueClosure implicitly loads the env if (i == 0 && ctx.is_opaque_closure) { + // n.b. this is not really needed, because ty was already supposed to be correct if (jl_is_array(src->slottypes)) { ty = jl_array_ptr_ref((jl_array_t*)src->slottypes, i); } @@ -8554,7 +8545,7 @@ static jl_llvm_functions_t raw_string_ostream(wrapName) << "jfptr_" << ctx.name << "_" << jl_atomic_fetch_add_relaxed(&globalUniqueGeneratedNames, 1); declarations.functionObject = wrapName; size_t nparams = jl_nparams(lam->specTypes); - gen_invoke_wrapper(lam, jlrettype, returninfo, nparams, retarg, declarations.functionObject, M, ctx.emission_context); + gen_invoke_wrapper(lam, jlrettype, returninfo, nparams, retarg, ctx.is_opaque_closure, declarations.functionObject, M, ctx.emission_context); // TODO: add attributes: maybe_mark_argument_dereferenceable(Arg, argType) // TODO: add attributes: dereferenceable // TODO: (if needsparams) add attributes: dereferenceable, readonly, nocapture @@ -8564,11 +8555,10 @@ static jl_llvm_functions_t GlobalVariable::ExternalLinkage, declarations.specFunctionObject, M); jl_init_function(f, ctx.emission_context.TargetTriple); - if (needsparams) { + if (needsparams) jl_name_jlfuncparams_args(ctx.emission_context, f); - } else { + else jl_name_jlfunc_args(ctx.emission_context, f); - } f->setAttributes(AttributeList::get(ctx.builder.getContext(), {get_func_attrs(ctx.builder.getContext()), f->getAttributes()})); returninfo.decl = f; declarations.functionObject = needsparams ? "jl_fptr_sparam" : "jl_fptr_args"; @@ -8940,76 +8930,73 @@ static jl_llvm_functions_t } for (i = 0; i < nreq && i < vinfoslen; i++) { jl_sym_t *s = slot_symbol(ctx, i); - jl_value_t *argType = jl_nth_slot_type(lam->specTypes, i); - // TODO: jl_nth_slot_type should call jl_rewrap_unionall? - // specTypes is required to be a datatype by construction for specsig, but maybe not otherwise - bool isboxed = deserves_argbox(argType); - Type *llvmArgType = NULL; - if (i == 0 && ctx.is_opaque_closure) { - isboxed = false; - llvmArgType = ctx.builder.getPtrTy(AddressSpace::Derived); - argType = (jl_value_t*)jl_any_type; - } - else { - llvmArgType = isboxed ? ctx.types().T_prjlvalue : julia_type_to_llvm(ctx, argType); - } jl_varinfo_t &vi = ctx.slots[i]; - if (s == jl_unused_sym || vi.value.constant) { - assert(vi.boxroot == NULL); - if (specsig && !type_is_ghost(llvmArgType) && !is_uniquerep_Type(argType)) { - ++AI; - auto tracked = CountTrackedPointers(llvmArgType); - if (tracked.count && !tracked.all) - ++AI; - } - continue; - } jl_cgval_t theArg; - // If this is an opaque closure, implicitly load the env and switch - // the world age. if (i == 0 && ctx.is_opaque_closure) { + // If this is an opaque closure, implicitly load the env and switch + // the world age. The specTypes value is wrong for this field, so + // this needs to be handled first. + // jl_value_t *oc_type = get_oc_type(calltype, rettype); + Value *oc_this = decay_derived(ctx, &*AI); + ++AI; // both specsig (derived) and fptr1 (box) pass this argument as a distinct argument // Load closure world - Value *oc_this = decay_derived(ctx, &*AI++); - Value *argaddr = oc_this; - Value *worldaddr = emit_ptrgep(ctx, argaddr, offsetof(jl_opaque_closure_t, world)); - + Value *worldaddr = emit_ptrgep(ctx, oc_this, offsetof(jl_opaque_closure_t, world)); jl_cgval_t closure_world = typed_load(ctx, worldaddr, NULL, (jl_value_t*)jl_long_type, nullptr, nullptr, false, AtomicOrdering::NotAtomic, false, ctx.types().alignof_ptr.value()); ctx.world_age_at_entry = closure_world.V; // The tls world in a OC is the world of the closure emit_unbox_store(ctx, closure_world, world_age_field, ctx.tbaa().tbaa_gcframe, ctx.types().alignof_ptr); - // Load closure env - Value *envaddr = emit_ptrgep(ctx, argaddr, offsetof(jl_opaque_closure_t, captures)); + if (s == jl_unused_sym || vi.value.constant) + continue; - jl_cgval_t closure_env = typed_load(ctx, envaddr, NULL, (jl_value_t*)jl_any_type, - nullptr, nullptr, true, AtomicOrdering::NotAtomic, false, sizeof(void*)); - theArg = update_julia_type(ctx, closure_env, vi.value.typ); - } - else if (specsig) { - theArg = get_specsig_arg(argType, llvmArgType, isboxed); + // Load closure env, which is always a boxed value (usually some Tuple) currently + Value *envaddr = emit_ptrgep(ctx, oc_this, offsetof(jl_opaque_closure_t, captures)); + theArg = typed_load(ctx, envaddr, NULL, (jl_value_t*)vi.value.typ, + nullptr, nullptr, /*isboxed*/true, AtomicOrdering::NotAtomic, false, sizeof(void*)); } else { - if (i == 0) { - // first (function) arg is separate in jlcall - theArg = mark_julia_type(ctx, fArg, true, vi.value.typ); + jl_value_t *argType = jl_nth_slot_type(lam->specTypes, i); + // TODO: jl_nth_slot_type should call jl_rewrap_unionall? + // specTypes is required to be a datatype by construction for specsig, but maybe not otherwise + bool isboxed = deserves_argbox(argType); + Type *llvmArgType = NULL; + llvmArgType = isboxed ? ctx.types().T_prjlvalue : julia_type_to_llvm(ctx, argType); + if (s == jl_unused_sym || vi.value.constant) { + assert(vi.boxroot == NULL); + if (specsig && !type_is_ghost(llvmArgType) && !is_uniquerep_Type(argType)) { + ++AI; + auto tracked = CountTrackedPointers(llvmArgType); + if (tracked.count && !tracked.all) + ++AI; + } + continue; + } + if (specsig) { + theArg = get_specsig_arg(argType, llvmArgType, isboxed); } else { - Value *argPtr = emit_ptrgep(ctx, argArray, (i - 1) * ctx.types().sizeof_ptr); - jl_aliasinfo_t ai = jl_aliasinfo_t::fromTBAA(ctx, ctx.tbaa().tbaa_const); - Value *load = ai.decorateInst(maybe_mark_load_dereferenceable( - ctx.builder.CreateAlignedLoad(ctx.types().T_prjlvalue, argPtr, Align(sizeof(void*))), - false, vi.value.typ)); - theArg = mark_julia_type(ctx, load, true, vi.value.typ); - if (debug_enabled && vi.dinfo && !vi.boxroot) { - SmallVector addr; - addr.push_back(llvm::dwarf::DW_OP_deref); - addr.push_back(llvm::dwarf::DW_OP_plus_uconst); - addr.push_back((i - 1) * sizeof(void*)); - if ((Metadata*)vi.dinfo->getType() != debugcache.jl_pvalue_dillvmt) + if (i == 0) { + // first (function) arg is separate in jlcall + theArg = mark_julia_type(ctx, fArg, true, vi.value.typ); + } + else { + Value *argPtr = emit_ptrgep(ctx, argArray, (i - 1) * ctx.types().sizeof_ptr); + jl_aliasinfo_t ai = jl_aliasinfo_t::fromTBAA(ctx, ctx.tbaa().tbaa_const); + Value *load = ai.decorateInst(maybe_mark_load_dereferenceable( + ctx.builder.CreateAlignedLoad(ctx.types().T_prjlvalue, argPtr, Align(sizeof(void*))), + false, vi.value.typ)); + theArg = mark_julia_type(ctx, load, true, vi.value.typ); + if (debug_enabled && vi.dinfo && !vi.boxroot) { + SmallVector addr; addr.push_back(llvm::dwarf::DW_OP_deref); - dbuilder.insertDeclare(pargArray, vi.dinfo, dbuilder.createExpression(addr), - topdebugloc, - ctx.builder.GetInsertBlock()); + addr.push_back(llvm::dwarf::DW_OP_plus_uconst); + addr.push_back((i - 1) * sizeof(void*)); + if ((Metadata*)vi.dinfo->getType() != debugcache.jl_pvalue_dillvmt) + addr.push_back(llvm::dwarf::DW_OP_deref); + dbuilder.insertDeclare(pargArray, vi.dinfo, dbuilder.createExpression(addr), + topdebugloc, + ctx.builder.GetInsertBlock()); + } } } } @@ -9996,7 +9983,6 @@ jl_llvm_functions_t jl_emit_code( { JL_TIMING(CODEGEN, CODEGEN_LLVM); jl_timing_show_func_sig((jl_value_t *)li->specTypes, JL_TIMING_DEFAULT_BLOCK); - // caller must hold codegen_lock jl_llvm_functions_t decls = {}; assert((params.params == &jl_default_cgparams /* fast path */ || !params.cache || compare_cgparams(params.params, &jl_default_cgparams)) && @@ -10031,33 +10017,38 @@ jl_llvm_functions_t jl_emit_code( return decls; } +static int effects_foldable(uint32_t effects) +{ + // N.B.: This needs to be kept in sync with Core.Compiler.is_foldable(effects, true) + return ((effects & 0x7) == 0) && // is_consistent(effects) + (((effects >> 10) & 0x03) == 0) && // is_noub(effects) + (((effects >> 3) & 0x03) == 0) && // is_effect_free(effects) + ((effects >> 6) & 0x01); // is_terminates(effects) +} + static jl_llvm_functions_t jl_emit_oc_wrapper(orc::ThreadSafeModule &m, jl_codegen_params_t ¶ms, jl_method_instance_t *mi, jl_value_t *rettype) { - Module *M = m.getModuleUnlocked(); - jl_codectx_t ctx(M->getContext(), params, 0, 0); - ctx.name = M->getModuleIdentifier().data(); - std::string funcName = get_function_name(true, false, ctx.name, ctx.emission_context.TargetTriple); jl_llvm_functions_t declarations; declarations.functionObject = "jl_f_opaque_closure_call"; if (uses_specsig(mi->specTypes, false, rettype, true)) { + // context lock is held by params + Module *M = m.getModuleUnlocked(); + jl_codectx_t ctx(M->getContext(), params, 0, 0); + ctx.name = M->getModuleIdentifier().data(); + std::string funcName = get_function_name(true, false, ctx.name, ctx.emission_context.TargetTriple); jl_returninfo_t returninfo = get_specsig_function(ctx, M, NULL, funcName, mi->specTypes, rettype, true, JL_FEAT_TEST(ctx,gcstack_arg)); Function *gf_thunk = cast(returninfo.decl.getCallee()); jl_init_function(gf_thunk, ctx.emission_context.TargetTriple); size_t nrealargs = jl_nparams(mi->specTypes); - emit_cfunc_invalidate(gf_thunk, returninfo.cc, returninfo.return_roots, mi->specTypes, rettype, true, nrealargs, ctx.emission_context, ctx.min_world, ctx.max_world); + emit_specsig_to_fptr1(gf_thunk, returninfo.cc, returninfo.return_roots, + mi->specTypes, rettype, true, nrealargs, ctx.emission_context, + prepare_call_in(gf_thunk->getParent(), jlopaque_closure_call_func), // TODO: this could call emit_oc_call directly + ctx.min_world, ctx.max_world); declarations.specFunctionObject = funcName; } return declarations; } -static int effects_foldable(uint32_t effects) -{ - // N.B.: This needs to be kept in sync with Core.Compiler.is_foldable(effects, true) - return ((effects & 0x7) == 0) && // is_consistent(effects) - (((effects >> 10) & 0x03) == 0) && // is_noub(effects) - (((effects >> 3) & 0x03) == 0) && // is_effect_free(effects) - ((effects >> 6) & 0x01); // is_terminates(effects) -} jl_llvm_functions_t jl_emit_codeinst( orc::ThreadSafeModule &m, @@ -10070,12 +10061,14 @@ jl_llvm_functions_t jl_emit_codeinst( JL_GC_PUSH1(&src); if (!src) { src = (jl_code_info_t*)jl_atomic_load_relaxed(&codeinst->inferred); - jl_method_t *def = codeinst->def->def.method; + jl_method_instance_t *mi = codeinst->def; + jl_method_t *def = mi->def.method; // Check if this is the generic method for opaque closure wrappers - - // if so, generate the specsig -> invoke converter. + // if so, this must compile specptr such that it holds the specptr -> invoke wrapper + // to satisfy the dispatching implementation requirements of jl_f_opaque_closure_call if (def == jl_opaque_closure_method) { JL_GC_POP(); - return jl_emit_oc_wrapper(m, params, codeinst->def, codeinst->rettype); + return jl_emit_oc_wrapper(m, params, mi, codeinst->rettype); } if (src && (jl_value_t*)src != jl_nothing && jl_is_method(def)) src = jl_uncompress_ir(def, codeinst, (jl_value_t*)src); @@ -10149,135 +10142,15 @@ jl_llvm_functions_t jl_emit_codeinst( return decls; } - -void jl_compile_workqueue( - jl_codegen_params_t ¶ms, - CompilationPolicy policy) -{ - JL_TIMING(CODEGEN, CODEGEN_Workqueue); - jl_code_info_t *src = NULL; - JL_GC_PUSH1(&src); - while (!params.workqueue.empty()) { - jl_code_instance_t *codeinst; - auto it = params.workqueue.back(); - codeinst = it.first; - auto proto = it.second; - params.workqueue.pop_back(); - // try to emit code for this item from the workqueue - StringRef preal_decl = ""; - bool preal_specsig = false; - jl_callptr_t invoke = NULL; - if (params.cache) { - // WARNING: this correctness is protected by an outer lock - uint8_t specsigflags; - void *fptr; - jl_read_codeinst_invoke(codeinst, &specsigflags, &invoke, &fptr, 0); - //if (specsig ? specsigflags & 0b1 : invoke == jl_fptr_args_addr) - if (invoke == jl_fptr_args_addr) { - preal_decl = jl_ExecutionEngine->getFunctionAtAddress((uintptr_t)fptr, invoke, codeinst); - } - else if (specsigflags & 0b1) { - preal_decl = jl_ExecutionEngine->getFunctionAtAddress((uintptr_t)fptr, invoke, codeinst); - preal_specsig = true; - } - } - if (preal_decl.empty()) { - auto it = params.compiled_functions.find(codeinst); - if (it == params.compiled_functions.end()) { - // Reinfer the function. The JIT came along and removed the inferred - // method body. See #34993 - if ((policy != CompilationPolicy::Default || params.params->trim) && - jl_atomic_load_relaxed(&codeinst->inferred) == jl_nothing) { - // XXX: SOURCE_MODE_FORCE_SOURCE is wrong here (neither sufficient nor necessary) - codeinst = jl_type_infer(codeinst->def, jl_atomic_load_relaxed(&codeinst->max_world), SOURCE_MODE_FORCE_SOURCE); - } - if (codeinst) { - orc::ThreadSafeModule result_m = - jl_create_ts_module(name_from_method_instance(codeinst->def), - params.tsctx, params.DL, params.TargetTriple); - auto decls = jl_emit_codeinst(result_m, codeinst, NULL, params); - if (result_m) - it = params.compiled_functions.insert(std::make_pair(codeinst, std::make_pair(std::move(result_m), std::move(decls)))).first; - } - } - if (it != params.compiled_functions.end()) { - auto &decls = it->second.second; - if (decls.functionObject == "jl_fptr_args") { - preal_decl = decls.specFunctionObject; - } - else if (decls.functionObject != "jl_fptr_sparam") { - preal_decl = decls.specFunctionObject; - preal_specsig = true; - } - } - } - // patch up the prototype we emitted earlier - Module *mod = proto.decl->getParent(); - assert(proto.decl->isDeclaration()); - if (proto.specsig) { - // expected specsig - if (!preal_specsig) { - if (params.params->trim) { - auto it = params.compiled_functions.find(codeinst); //TODO: What to do about this - errs() << "Bailed out to invoke when compiling:"; - jl_(codeinst->def); - if (it != params.compiled_functions.end()) { - errs() << it->second.second.functionObject << "\n"; - errs() << it->second.second.specFunctionObject << "\n"; - } else - errs() << "codeinst not in compile_functions\n"; - } - // emit specsig-to-(jl)invoke conversion - StringRef invokeName; - if (invoke != NULL) - invokeName = jl_ExecutionEngine->getFunctionAtAddress((uintptr_t)invoke, invoke, codeinst); - Function *preal = emit_tojlinvoke(codeinst, invokeName, mod, params); - proto.decl->setLinkage(GlobalVariable::InternalLinkage); - //protodecl->setAlwaysInline(); - jl_init_function(proto.decl, params.TargetTriple); - size_t nrealargs = jl_nparams(codeinst->def->specTypes); // number of actual arguments being passed - // TODO: maybe this can be cached in codeinst->specfptr? - emit_cfunc_invalidate(proto.decl, proto.cc, proto.return_roots, codeinst->def->specTypes, codeinst->rettype, false, nrealargs, params, preal, 0, 0); - preal_decl = ""; // no need to fixup the name - } - else { - assert(!preal_decl.empty()); - } - } - else { - // expected non-specsig - if (preal_decl.empty() || preal_specsig) { - // emit jlcall1-to-(jl)invoke conversion - StringRef invokeName; - if (invoke != NULL) - invokeName = jl_ExecutionEngine->getFunctionAtAddress((uintptr_t)invoke, invoke, codeinst); - preal_decl = emit_tojlinvoke(codeinst, invokeName, mod, params)->getName(); - } - } - if (!preal_decl.empty()) { - // merge and/or rename this prototype to the real function - if (Value *specfun = mod->getNamedValue(preal_decl)) { - if (proto.decl != specfun) - proto.decl->replaceAllUsesWith(specfun); - } - else { - proto.decl->setName(preal_decl); - } - } - } - JL_GC_POP(); -} - - // --- initialization --- -SmallVector, 0> gv_for_global; +static auto gv_for_global = new SmallVector, 0>(); static void global_jlvalue_to_llvm(JuliaVariable *var, jl_value_t **addr) { - gv_for_global.push_back(std::make_pair(addr, var)); + gv_for_global->push_back(std::make_pair(addr, var)); } static JuliaVariable *julia_const_gv(jl_value_t *val) { - for (auto &kv : gv_for_global) { + for (auto &kv : *gv_for_global) { if (*kv.first == val) return kv.second; } @@ -10286,6 +10159,9 @@ static JuliaVariable *julia_const_gv(jl_value_t *val) static void init_jit_functions(void) { + add_named_global("jl_fptr_args", jl_fptr_args_addr); + add_named_global("jl_fptr_sparam", jl_fptr_sparam_addr); + add_named_global("jl_f_opaque_closure_call", &jl_f_opaque_closure_call); add_named_global(jl_small_typeof_var, &jl_small_typeof); add_named_global(jlstack_chk_guard_var, &__stack_chk_guard); add_named_global(jlRTLD_DEFAULT_var, &jl_RTLD_DEFAULT_handle); diff --git a/src/debug-registry.h b/src/debug-registry.h index 4c9e13d8cd72d..4d0b7a44f19e5 100644 --- a/src/debug-registry.h +++ b/src/debug-registry.h @@ -32,7 +32,7 @@ class JITDebugInfoRegistry std::unique_lock lock; CResourceT &resource; - Lock(std::mutex &mutex, CResourceT &resource) JL_NOTSAFEPOINT : lock(mutex), resource(resource) {} + Lock(std::mutex &mutex, CResourceT &resource) JL_NOTSAFEPOINT JL_NOTSAFEPOINT_ENTER : lock(mutex), resource(resource) {} Lock(Lock &&) JL_NOTSAFEPOINT = default; Lock &operator=(Lock &&) JL_NOTSAFEPOINT = default; @@ -56,7 +56,7 @@ class JITDebugInfoRegistry return resource; } - ~Lock() JL_NOTSAFEPOINT = default; + ~Lock() JL_NOTSAFEPOINT JL_NOTSAFEPOINT_LEAVE = default; }; private: @@ -68,15 +68,15 @@ class JITDebugInfoRegistry Locked(ResourceT resource = ResourceT()) JL_NOTSAFEPOINT : mutex(), resource(std::move(resource)) {} - LockT operator*() JL_NOTSAFEPOINT { + LockT operator*() JL_NOTSAFEPOINT JL_NOTSAFEPOINT_ENTER { return LockT(mutex, resource); } - ConstLockT operator*() const JL_NOTSAFEPOINT { + ConstLockT operator*() const JL_NOTSAFEPOINT JL_NOTSAFEPOINT_ENTER { return ConstLockT(mutex, resource); } - ~Locked() JL_NOTSAFEPOINT = default; + ~Locked() JL_NOTSAFEPOINT JL_NOTSAFEPOINT_LEAVE = default; }; struct image_info_t { @@ -105,6 +105,7 @@ class JITDebugInfoRegistry std::unique_ptr object; std::unique_ptr context; LazyObjectInfo() = delete; + ~LazyObjectInfo() JL_NOTSAFEPOINT = default; }; struct SectionInfo { @@ -113,6 +114,7 @@ class JITDebugInfoRegistry ptrdiff_t slide; uint64_t SectionIndex; SectionInfo() = delete; + ~SectionInfo() JL_NOTSAFEPOINT = default; }; template @@ -145,7 +147,7 @@ class JITDebugInfoRegistry void add_code_in_flight(llvm::StringRef name, jl_code_instance_t *codeinst, const llvm::DataLayout &DL) JL_NOTSAFEPOINT; jl_method_instance_t *lookupLinfo(size_t pointer) JL_NOTSAFEPOINT; void registerJITObject(const llvm::object::ObjectFile &Object, - std::function getLoadAddress); + std::function getLoadAddress) JL_NOTSAFEPOINT; objectmap_t& getObjectMap() JL_NOTSAFEPOINT; void add_image_info(image_info_t info) JL_NOTSAFEPOINT; bool get_image_info(uint64_t base, image_info_t *info) const JL_NOTSAFEPOINT; diff --git a/src/debuginfo.cpp b/src/debuginfo.cpp index f6fca47e9a889..31f1ba8281a89 100644 --- a/src/debuginfo.cpp +++ b/src/debuginfo.cpp @@ -58,7 +58,7 @@ extern "C" void __register_frame(void*) JL_NOTSAFEPOINT; extern "C" void __deregister_frame(void*) JL_NOTSAFEPOINT; template -static void processFDEs(const char *EHFrameAddr, size_t EHFrameSize, callback f) +static void processFDEs(const char *EHFrameAddr, size_t EHFrameSize, callback f) JL_NOTSAFEPOINT { const char *P = EHFrameAddr; const char *End = P + EHFrameSize; @@ -164,6 +164,12 @@ static void jl_profile_atomic(T f) JL_NOTSAFEPOINT // --- storing and accessing source location metadata --- void jl_add_code_in_flight(StringRef name, jl_code_instance_t *codeinst, const DataLayout &DL) { + // Non-opaque-closure MethodInstances are considered globally rooted + // through their methods, but for OC, we need to create a global root + // here. + jl_method_instance_t *mi = codeinst->def; + if (jl_is_method(mi->def.value) && mi->def.method->is_for_opaque_closure) + jl_as_global_root((jl_value_t*)mi, 1); getJITDebugRegistry().add_code_in_flight(name, codeinst, DL); } @@ -369,11 +375,6 @@ void JITDebugInfoRegistry::registerJITObject(const object::ObjectFile &Object, if (codeinst) { JL_GC_PROMISE_ROOTED(codeinst); mi = codeinst->def; - // Non-opaque-closure MethodInstances are considered globally rooted - // through their methods, but for OC, we need to create a global root - // here. - if (jl_is_method(mi->def.value) && mi->def.method->is_for_opaque_closure) - mi = (jl_method_instance_t*)jl_as_global_root((jl_value_t*)mi, 1); } jl_profile_atomic([&]() JL_NOTSAFEPOINT { if (mi) @@ -1281,14 +1282,14 @@ void register_eh_frames(uint8_t *Addr, size_t Size) { // On OS X OS X __register_frame takes a single FDE as an argument. // See http://lists.cs.uiuc.edu/pipermail/llvmdev/2013-April/061768.html - processFDEs((char*)Addr, Size, [](const char *Entry) { + processFDEs((char*)Addr, Size, [](const char *Entry) JL_NOTSAFEPOINT { getJITDebugRegistry().libc_frames.libc_register_frame(Entry); }); } void deregister_eh_frames(uint8_t *Addr, size_t Size) { - processFDEs((char*)Addr, Size, [](const char *Entry) { + processFDEs((char*)Addr, Size, [](const char *Entry) JL_NOTSAFEPOINT { getJITDebugRegistry().libc_frames.libc_deregister_frame(Entry); }); } @@ -1300,7 +1301,7 @@ void deregister_eh_frames(uint8_t *Addr, size_t Size) // Skip over an arbitrary long LEB128 encoding. // Return the pointer to the first unprocessed byte. -static const uint8_t *consume_leb128(const uint8_t *Addr, const uint8_t *End) +static const uint8_t *consume_leb128(const uint8_t *Addr, const uint8_t *End) JL_NOTSAFEPOINT { const uint8_t *P = Addr; while ((*P >> 7) != 0 && P < End) @@ -1312,7 +1313,7 @@ static const uint8_t *consume_leb128(const uint8_t *Addr, const uint8_t *End) // bytes than what there are more bytes than what the type can store. // Adjust the pointer to the first unprocessed byte. template static T parse_leb128(const uint8_t *&Addr, - const uint8_t *End) + const uint8_t *End) JL_NOTSAFEPOINT { typedef typename std::make_unsigned::type uT; uT v = 0; @@ -1335,7 +1336,7 @@ template static T parse_leb128(const uint8_t *&Addr, } template -static U safe_trunc(T t) +static U safe_trunc(T t) JL_NOTSAFEPOINT { assert((t >= static_cast(std::numeric_limits::min())) && (t <= static_cast(std::numeric_limits::max()))); @@ -1375,7 +1376,7 @@ enum DW_EH_PE : uint8_t { }; // Parse the CIE and return the type of encoding used by FDE -static DW_EH_PE parseCIE(const uint8_t *Addr, const uint8_t *End) +static DW_EH_PE parseCIE(const uint8_t *Addr, const uint8_t *End) JL_NOTSAFEPOINT { // https://www.airs.com/blog/archives/460 // Length (4 bytes) @@ -1481,7 +1482,7 @@ void register_eh_frames(uint8_t *Addr, size_t Size) // Now first count the number of FDEs size_t nentries = 0; - processFDEs((char*)Addr, Size, [&](const char*){ nentries++; }); + processFDEs((char*)Addr, Size, [&](const char*) JL_NOTSAFEPOINT { nentries++; }); if (nentries == 0) return; @@ -1510,7 +1511,7 @@ void register_eh_frames(uint8_t *Addr, size_t Size) // CIE's (may not happen) without parsing it every time. const uint8_t *cur_cie = nullptr; DW_EH_PE encoding = DW_EH_PE_omit; - processFDEs((char*)Addr, Size, [&](const char *Entry) { + processFDEs((char*)Addr, Size, [&](const char *Entry) JL_NOTSAFEPOINT { // Skip Length (4bytes) and CIE offset (4bytes) uint32_t fde_size = *(const uint32_t*)Entry; uint32_t cie_id = ((const uint32_t*)Entry)[1]; @@ -1631,7 +1632,7 @@ void deregister_eh_frames(uint8_t *Addr, size_t Size) #endif extern "C" JL_DLLEXPORT_CODEGEN -uint64_t jl_getUnwindInfo_impl(uint64_t dwAddr) +uint64_t jl_getUnwindInfo_impl(uint64_t dwAddr) JL_NOTSAFEPOINT { // Might be called from unmanaged thread jl_lock_profile(); diff --git a/src/engine.cpp b/src/engine.cpp index 6db4dce44e48e..2b68de731c4dd 100644 --- a/src/engine.cpp +++ b/src/engine.cpp @@ -45,8 +45,8 @@ template<> struct llvm::DenseMapInfo { } }; -static std::mutex engine_lock; -static std::condition_variable engine_wait; +static std::mutex engine_lock; // n.b. this lock is only ever held briefly +static std::condition_variable engine_wait; // but it may be waiting a while in this state // map from MethodInstance to threadid that owns it currently for inference static DenseMap Reservations; // vector of which threads are blocked and which lease they need @@ -63,55 +63,51 @@ jl_code_instance_t *jl_engine_reserve(jl_method_instance_t *m, jl_value_t *owner ct->ptls->engine_nqueued++; // disables finalizers until inference is finished on this method graph jl_code_instance_t *ci = jl_new_codeinst_uninit(m, owner); // allocate a placeholder JL_GC_PUSH1(&ci); - int8_t gc_state = jl_gc_safe_enter(ct->ptls); - InferKey key = {m, owner}; - std::unique_lock lock(engine_lock); auto tid = jl_atomic_load_relaxed(&ct->tid); - if ((signed)Awaiting.size() < tid + 1) - Awaiting.resize(tid + 1); - while (1) { - auto record = Reservations.find(key); - if (record == Reservations.end()) { - Reservations[key] = ReservationInfo{tid, ci}; - lock.unlock(); - jl_gc_safe_leave(ct->ptls, gc_state); // contains jl_gc_safepoint - JL_GC_POP(); - return ci; - } - // before waiting, need to run deadlock/cycle detection - // there is a cycle if the thread holding our lease is blocked - // and waiting for (transitively) any lease that is held by this thread - auto wait_tid = record->second.tid; - while (1) { - if (wait_tid == tid) { - lock.unlock(); - jl_gc_safe_leave(ct->ptls, gc_state); // contains jl_gc_safepoint - JL_GC_POP(); - ct->ptls->engine_nqueued--; - return ci; // break the cycle + if (([tid, m, owner, ci] () -> bool { // necessary scope block / lambda for unique_lock + jl_unique_gcsafe_lock lock(engine_lock); + InferKey key{m, owner}; + if ((signed)Awaiting.size() < tid + 1) + Awaiting.resize(tid + 1); + while (1) { + auto record = Reservations.find(key); + if (record == Reservations.end()) { + Reservations[key] = ReservationInfo{tid, ci}; + return false; + } + // before waiting, need to run deadlock/cycle detection + // there is a cycle if the thread holding our lease is blocked + // and waiting for (transitively) any lease that is held by this thread + auto wait_tid = record->second.tid; + while (1) { + if (wait_tid == tid) + return true; + if ((signed)Awaiting.size() <= wait_tid) + break; // no cycle, since it is running (and this should be unreachable) + auto key2 = Awaiting[wait_tid]; + if (key2.mi == nullptr) + break; // no cycle, since it is running + auto record2 = Reservations.find(key2); + if (record2 == Reservations.end()) + break; // no cycle, since it is about to resume + assert(wait_tid != record2->second.tid); + wait_tid = record2->second.tid; + } + Awaiting[tid] = key; + lock.wait(engine_wait); + Awaiting[tid] = InferKey{}; } - if ((signed)Awaiting.size() <= wait_tid) - break; // no cycle, since it is running (and this should be unreachable) - auto key2 = Awaiting[wait_tid]; - if (key2.mi == nullptr) - break; // no cycle, since it is running - auto record2 = Reservations.find(key2); - if (record2 == Reservations.end()) - break; // no cycle, since it is about to resume - assert(wait_tid != record2->second.tid); - wait_tid = record2->second.tid; - } - Awaiting[tid] = key; - engine_wait.wait(lock); - Awaiting[tid] = InferKey{}; - } + })()) + ct->ptls->engine_nqueued--; + JL_GC_POP(); + return ci; } int jl_engine_hasreserved(jl_method_instance_t *m, jl_value_t *owner) { jl_task_t *ct = jl_current_task; InferKey key = {m, owner}; - std::unique_lock lock(engine_lock); + std::unique_lock lock(engine_lock); auto record = Reservations.find(key); return record != Reservations.end() && record->second.tid == jl_atomic_load_relaxed(&ct->tid); } @@ -123,7 +119,7 @@ STATIC_INLINE int gc_marked(uintptr_t bits) JL_NOTSAFEPOINT void jl_engine_sweep(jl_ptls_t *gc_all_tls_states) { - std::unique_lock lock(engine_lock); + std::unique_lock lock(engine_lock); bool any = false; for (auto I = Reservations.begin(); I != Reservations.end(); ++I) { jl_code_instance_t *ci = I->second.ci; @@ -142,7 +138,7 @@ void jl_engine_sweep(jl_ptls_t *gc_all_tls_states) void jl_engine_fulfill(jl_code_instance_t *ci, jl_code_info_t *src) { jl_task_t *ct = jl_current_task; - std::unique_lock lock(engine_lock); + std::unique_lock lock(engine_lock); auto record = Reservations.find(InferKey{ci->def, ci->owner}); if (record == Reservations.end() || record->second.ci != ci) return; @@ -152,7 +148,6 @@ void jl_engine_fulfill(jl_code_instance_t *ci, jl_code_info_t *src) engine_wait.notify_all(); } - #ifdef __cplusplus } #endif diff --git a/src/gf.c b/src/gf.c index fc2e62ebff96b..e77c950c38ae4 100644 --- a/src/gf.c +++ b/src/gf.c @@ -3222,6 +3222,23 @@ jl_value_t *jl_argtype_with_function_type(jl_value_t *ft JL_MAYBE_UNROOTED, jl_v return tt; } +// undo jl_argtype_with_function transform +jl_value_t *jl_argtype_without_function(jl_value_t *ftypes) +{ + jl_value_t *types = jl_unwrap_unionall(ftypes); + size_t l = jl_nparams(types); + if (l == 1 && jl_is_vararg(jl_tparam0(types))) + return ftypes; + jl_value_t *tt = (jl_value_t*)jl_alloc_svec(l - 1); + JL_GC_PUSH1(&tt); + for (size_t i = 1; i < l; i++) + jl_svecset(tt, i - 1, jl_tparam(types, i)); + tt = (jl_value_t*)jl_apply_tuple_type((jl_svec_t*)tt, 0); + tt = jl_rewrap_unionall_(tt, types); + JL_GC_POP(); + return tt; +} + #ifdef JL_TRACE static int trace_en = 0; static int error_en = 1; diff --git a/src/jitlayers.cpp b/src/jitlayers.cpp index 313449dda5557..8b8004af03616 100644 --- a/src/jitlayers.cpp +++ b/src/jitlayers.cpp @@ -64,9 +64,6 @@ using namespace llvm; #define DEBUG_TYPE "julia_jitlayers" STATISTIC(LinkedGlobals, "Number of globals linked"); -STATISTIC(CompiledCodeinsts, "Number of codeinsts compiled directly"); -STATISTIC(MaxWorkqueueSize, "Maximum number of elements in the workqueue"); -STATISTIC(IndirectCodeinsts, "Number of dependent codeinsts compiled"); STATISTIC(SpecFPtrCount, "Number of specialized function pointers compiled"); STATISTIC(UnspecFPtrCount, "Number of specialized function pointers compiled"); STATISTIC(ModulesAdded, "Number of modules added to the JIT"); @@ -151,13 +148,6 @@ void jl_dump_llvm_opt_impl(void *s) **jl_ExecutionEngine->get_dump_llvm_opt_stream() = (ios_t*)s; } -#ifndef JL_USE_JITLINK -static int jl_add_to_ee( - orc::ThreadSafeModule &M, - const StringMap &NewExports, - DenseMap &Queued, - SmallVectorImpl &Stack) JL_NOTSAFEPOINT; -#endif static void jl_decorate_module(Module &M) JL_NOTSAFEPOINT; void jl_link_global(GlobalVariable *GV, void *addr) JL_NOTSAFEPOINT @@ -187,214 +177,536 @@ void jl_jit_globals(std::map &globals) JL_NOTSAFEPOINT } } -// this generates llvm code for the lambda info -// and adds the result to the jitlayers -// (and the shadow module), -// and generates code for it -static jl_callptr_t _jl_compile_codeinst( - jl_code_instance_t *codeinst, - jl_code_info_t *src, - orc::ThreadSafeContext context) + // lock for places where only single threaded behavior is implemented, so we need GC support +static jl_mutex_t jitlock; + // locks for adding external code to the JIT atomically +static std::mutex extern_c_lock; + // locks and barriers for this state +static std::mutex engine_lock; +static std::condition_variable engine_wait; +static int threads_in_compiler_phase; + // the TSM for each codeinst +static SmallVector sharedmodules; +static DenseMap emittedmodules; + // the invoke and specsig function names in the JIT +static DenseMap invokenames; + // everything that any thread wants to compile right now +static DenseSet compileready; + // everything that any thread has compiled recently +static DenseSet linkready; + // a map from a codeinst to the outgoing edges needed before linking it +static DenseMap> complete_graph; + // the state for each codeinst and the number of unresolved edges (we don't + // really need this once JITLink is available everywhere, since every module + // is automatically complete, and we can emit any required fixups later as a + // separate module) +static DenseMap> incompletemodules; + // the set of incoming unresolved edges resolved by a codeinstance +static DenseMap> incomplete_rgraph; + +// Lock hierarchy here: +// jitlock is outermost, can contain others and allows GC +// engine_lock is next +// ThreadSafeContext locks are next, they should not be nested (unless engine_lock is also held, but this may make TSAN sad anyways) +// extern_c_lock is next +// jl_ExecutionEngine internal locks are exclusive to this list, since OrcJIT promises to never hold a lock over a materialization unit: +// construct a query object from a query set and query handler +// lock the session +// lodge query against requested symbols, collect required materializers (if any) +// unlock the session +// dispatch materializers (if any) +// However, this guarantee relies on Julia releasing all TSC locks before causing any materialization units to be dispatched +// as materialization may need to acquire TSC locks. + + +static void finish_params(Module *M, jl_codegen_params_t ¶ms) JL_NOTSAFEPOINT { - // caller must hold codegen_lock - // and have disabled finalizers - uint64_t start_time = 0; - bool timed = !!*jl_ExecutionEngine->get_dump_compiles_stream(); - if (timed) - start_time = jl_hrtime(); + if (params._shared_module) { + sharedmodules.push_back(orc::ThreadSafeModule(std::move(params._shared_module), params.tsctx)); + } + + // In imaging mode, we can't inline global variable initializers in order to preserve + // the fiction that we don't know what loads from the global will return. Thus, we + // need to emit a separate module for the globals before any functions are compiled, + // to ensure that the globals are defined when they are compiled. + if (params.imaging_mode) { + if (!params.global_targets.empty()) { + void **globalslots = new void*[params.global_targets.size()]; + void **slot = globalslots; + for (auto &global : params.global_targets) { + auto GV = global.second; + *slot = global.first; + jl_ExecutionEngine->addGlobalMapping(GV->getName(), (uintptr_t)slot); + slot++; + } +#ifdef __clang_analyzer__ + static void **leaker = globalslots; // for the purpose of the analyzer, we need to expressly leak this variable or it thinks we forgot to free it +#endif + } + } + else { + StringMap NewGlobals; + for (auto &global : params.global_targets) { + NewGlobals[global.second->getName()] = global.first; + } + for (auto &GV : M->globals()) { + auto InitValue = NewGlobals.find(GV.getName()); + if (InitValue != NewGlobals.end()) { + jl_link_global(&GV, InitValue->second); + } + } + } +} - assert(jl_is_code_instance(codeinst)); - JL_TIMING(CODEINST_COMPILE, CODEINST_COMPILE); - jl_callptr_t fptr = NULL; - // emit the code in LLVM IR form - jl_codegen_params_t params(std::move(context), jl_ExecutionEngine->getDataLayout(), jl_ExecutionEngine->getTargetTriple()); // Locks the context - params.cache = true; - params.imaging_mode = imaging_default(); - params.debug_level = jl_options.debug_level; - { - orc::ThreadSafeModule result_m = - jl_create_ts_module(name_from_method_instance(codeinst->def), params.tsctx, params.DL, params.TargetTriple); - jl_llvm_functions_t decls = jl_emit_codeinst(result_m, codeinst, src, params); - if (result_m) - params.compiled_functions[codeinst] = {std::move(result_m), std::move(decls)}; - jl_compile_workqueue(params, CompilationPolicy::Default); - - if (params._shared_module) { - jl_ExecutionEngine->optimizeDLSyms(*params._shared_module); - jl_ExecutionEngine->addModule(orc::ThreadSafeModule(std::move(params._shared_module), params.tsctx)); +static int jl_analyze_workqueue(jl_code_instance_t *callee, jl_codegen_params_t ¶ms, bool forceall=false) JL_NOTSAFEPOINT_LEAVE JL_NOTSAFEPOINT_ENTER +{ + jl_task_t *ct = jl_current_task; + decltype(params.workqueue) edges; + std::swap(params.workqueue, edges); + for (auto &it : edges) { + jl_code_instance_t *codeinst = it.first; + auto &proto = it.second; + // try to emit code for this item from the workqueue + StringRef invokeName = ""; + StringRef preal_decl = ""; + bool preal_specsig = false; + jl_callptr_t invoke = nullptr; + bool isedge = false; + assert(params.cache); + // Checking the cache here is merely an optimization and not strictly required + // But it must be consistent with the following invokenames lookup, which is protected by the engine_lock + uint8_t specsigflags; + void *fptr; + jl_read_codeinst_invoke(codeinst, &specsigflags, &invoke, &fptr, 0); + //if (specsig ? specsigflags & 0b1 : invoke == jl_fptr_args_addr) + if (invoke == jl_fptr_args_addr) { + preal_decl = jl_ExecutionEngine->getFunctionAtAddress((uintptr_t)fptr, invoke, codeinst); } - - // In imaging mode, we can't inline global variable initializers in order to preserve - // the fiction that we don't know what loads from the global will return. Thus, we - // need to emit a separate module for the globals before any functions are compiled, - // to ensure that the globals are defined when they are compiled. - if (params.imaging_mode) { - // Won't contain any PLT/dlsym calls, so no need to optimize those - if (!params.global_targets.empty()) { - void **globalslots = new void*[params.global_targets.size()]; - void **slot = globalslots; - for (auto &global : params.global_targets) { - auto GV = global.second; - *slot = global.first; - jl_ExecutionEngine->addGlobalMapping(GV->getName(), (uintptr_t)slot); - slot++; + else if (specsigflags & 0b1) { + preal_decl = jl_ExecutionEngine->getFunctionAtAddress((uintptr_t)fptr, invoke, codeinst); + preal_specsig = true; + } + bool force = forceall || invoke != nullptr; + if (preal_decl.empty()) { + auto it = invokenames.find(codeinst); + if (it != invokenames.end()) { + auto &decls = it->second; + invokeName = decls.functionObject; + if (decls.functionObject == "jl_fptr_args") { + preal_decl = decls.specFunctionObject; + isedge = true; } -#ifdef __clang_analyzer__ - static void **leaker = globalslots; // for the purpose of the analyzer, we need to expressly leak this variable or it thinks we forgot to free it -#endif + else if (decls.functionObject != "jl_fptr_sparam" && decls.functionObject != "jl_f_opaque_closure_call") { + preal_decl = decls.specFunctionObject; + preal_specsig = true; + isedge = true; + } + force = true; } } - else { - StringMap NewGlobals; - for (auto &global : params.global_targets) { - NewGlobals[global.second->getName()] = global.first; + if (!preal_decl.empty() || force) { + // if we have a prototype emitted, compare it to what we emitted earlier + Module *mod = proto.decl->getParent(); + assert(proto.decl->isDeclaration()); + Function *pinvoke = nullptr; + if (preal_decl.empty()) { + if (invoke != nullptr && invokeName.empty()) { + assert(invoke != jl_fptr_args_addr); + if (invoke == jl_fptr_sparam_addr) + invokeName = "jl_fptr_sparam"; + else if (invoke == jl_f_opaque_closure_call_addr) + invokeName = "jl_f_opaque_closure_call"; + else + invokeName = jl_ExecutionEngine->getFunctionAtAddress((uintptr_t)invoke, invoke, codeinst); + } + pinvoke = emit_tojlinvoke(codeinst, invokeName, mod, params); + if (!proto.specsig) + proto.decl->replaceAllUsesWith(pinvoke); + isedge = false; } - for (auto &def : params.compiled_functions) { - auto M = std::get<0>(def.second).getModuleUnlocked(); - for (auto &GV : M->globals()) { - auto InitValue = NewGlobals.find(GV.getName()); - if (InitValue != NewGlobals.end()) { - jl_link_global(&GV, InitValue->second); - } + if (proto.specsig && !preal_specsig) { + // get or build an fptr1 that can invoke codeinst + if (pinvoke == nullptr) + pinvoke = get_or_emit_fptr1(preal_decl, mod); + // emit specsig-to-(jl)invoke conversion + proto.decl->setLinkage(GlobalVariable::InternalLinkage); + //protodecl->setAlwaysInline(); + jl_init_function(proto.decl, params.TargetTriple); + // TODO: maybe this can be cached in codeinst->specfptr? + int8_t gc_state = jl_gc_unsafe_enter(ct->ptls); // codegen may contain safepoints (such as jl_subtype calls) + jl_method_instance_t *mi = codeinst->def; + size_t nrealargs = jl_nparams(mi->specTypes); // number of actual arguments being passed + bool is_opaque_closure = jl_is_method(mi->def.value) && mi->def.method->is_for_opaque_closure; + emit_specsig_to_fptr1(proto.decl, proto.cc, proto.return_roots, mi->specTypes, codeinst->rettype, is_opaque_closure, nrealargs, params, pinvoke, 0, 0); + jl_gc_unsafe_leave(ct->ptls, gc_state); + preal_decl = ""; // no need to fixup the name + } + if (!preal_decl.empty()) { + // merge and/or rename this prototype to the real function + if (Value *specfun = mod->getNamedValue(preal_decl)) { + if (proto.decl != specfun) + proto.decl->replaceAllUsesWith(specfun); + } + else { + proto.decl->setName(preal_decl); } } - } - -#ifndef JL_USE_JITLINK - // Collect the exported functions from the params.compiled_functions modules, - // which form dependencies on which functions need to be - // compiled first. Cycles of functions are compiled together. - // (essentially we compile a DAG of SCCs in reverse topological order, - // if we treat declarations of external functions as edges from declaration - // to definition) - StringMap NewExports; - for (auto &def : params.compiled_functions) { - orc::ThreadSafeModule &TSM = std::get<0>(def.second); - //The underlying context object is still locked because params is not destroyed yet - auto M = TSM.getModuleUnlocked(); - jl_ExecutionEngine->optimizeDLSyms(*M); - for (auto &F : M->global_objects()) { - if (!F.isDeclaration() && F.getLinkage() == GlobalValue::ExternalLinkage) { - NewExports[F.getName()] = &TSM; + if (proto.oc) { // additionally, if we are dealing with an OC constructor, then we might also need to fix up the fptr1 reference too + assert(proto.specsig); + StringRef ocinvokeDecl = invokeName; + if (invoke != nullptr && ocinvokeDecl.empty()) { + // check for some special tokens used by opaque_closure.c and convert those to their real functions + assert(invoke != jl_fptr_args_addr); + assert(invoke != jl_fptr_sparam_addr); + if (invoke == jl_fptr_interpret_call_addr) + ocinvokeDecl = "jl_fptr_interpret_call"; + else if (invoke == jl_fptr_const_return_addr) + ocinvokeDecl = "jl_fptr_const_return"; + else if (invoke == jl_f_opaque_closure_call_addr) + ocinvokeDecl = "jl_f_opaque_closure_call"; + //else if (invoke == jl_interpret_opaque_closure_addr) + else + ocinvokeDecl = jl_ExecutionEngine->getFunctionAtAddress((uintptr_t)invoke, invoke, codeinst); + } + // if OC expected a specialized specsig dispatch, but we don't have it, use the inner trampoline here too + // XXX: this invoke translation logic is supposed to exactly match new_opaque_closure + if (!preal_specsig || ocinvokeDecl == "jl_f_opaque_closure_call" || ocinvokeDecl == "jl_fptr_interpret_call" || ocinvokeDecl == "jl_fptr_const_return") { + if (pinvoke == nullptr) + ocinvokeDecl = get_or_emit_fptr1(preal_decl, mod)->getName(); + else + ocinvokeDecl = pinvoke->getName(); + } + assert(!ocinvokeDecl.empty()); + assert(ocinvokeDecl != "jl_fptr_args"); + assert(ocinvokeDecl != "jl_fptr_sparam"); + // merge and/or rename this prototype to the real function + if (Value *specfun = mod->getNamedValue(ocinvokeDecl)) { + if (proto.oc != specfun) + proto.oc->replaceAllUsesWith(specfun); + } + else { + proto.oc->setName(ocinvokeDecl); } } } - DenseMap Queued; - SmallVector Stack; - for (auto &def : params.compiled_functions) { - // Add the results to the execution engine now - orc::ThreadSafeModule &M = std::get<0>(def.second); - jl_add_to_ee(M, NewExports, Queued, Stack); - assert(Queued.empty() && Stack.empty() && !M); + else { + isedge = true; + params.workqueue.push_back(it); + incomplete_rgraph[codeinst].push_back(callee); } -#else - for (auto &def : params.compiled_functions) { - // Add the results to the execution engine now - orc::ThreadSafeModule &M = std::get<0>(def.second); - if (M) - jl_ExecutionEngine->addModule(std::move(M)); + if (isedge) + complete_graph[callee].push_back(codeinst); + } + return params.workqueue.size(); +} + +// test whether codeinst->invoke is usable already without further compilation needed +static bool jl_is_compiled_codeinst(jl_code_instance_t *codeinst) JL_NOTSAFEPOINT +{ + auto invoke = jl_atomic_load_relaxed(&codeinst->invoke); + if (invoke == nullptr || invoke == jl_fptr_wait_for_compiled_addr) + return false; + return true; +} + +// move codeinst (and deps) from incompletemodules to emitted modules +// and populate compileready from complete_graph +static void prepare_compile(jl_code_instance_t *codeinst) JL_NOTSAFEPOINT_LEAVE JL_NOTSAFEPOINT_ENTER +{ + SmallVector workqueue; + workqueue.push_back(codeinst); + while (!workqueue.empty()) { + codeinst = workqueue.pop_back_val(); + if (!invokenames.count(codeinst)) { + // this means it should be compiled already while the callee was in stasis + assert(jl_is_compiled_codeinst(codeinst)); + continue; } -#endif - ++CompiledCodeinsts; - MaxWorkqueueSize.updateMax(params.compiled_functions.size()); - IndirectCodeinsts += params.compiled_functions.size() - 1; - } - - // batch compile job for all new functions - SmallVector NewDefs; - for (auto &def : params.compiled_functions) { - jl_llvm_functions_t &decls = std::get<1>(def.second); - if (decls.functionObject != "jl_fptr_args" && - decls.functionObject != "jl_fptr_sparam" && - decls.functionObject != "jl_f_opaque_closure_call") - NewDefs.push_back(decls.functionObject); - if (!decls.specFunctionObject.empty()) - NewDefs.push_back(decls.specFunctionObject); - } - auto Addrs = jl_ExecutionEngine->findSymbols(NewDefs); - - size_t i = 0; - size_t nextaddr = 0; - for (auto &def : params.compiled_functions) { - jl_code_instance_t *this_code = def.first; - if (i < jl_timing_print_limit) - jl_timing_show_func_sig(this_code->def->specTypes, JL_TIMING_DEFAULT_BLOCK); - - jl_llvm_functions_t &decls = std::get<1>(def.second); - jl_callptr_t addr; - bool isspecsig = false; - if (decls.functionObject == "jl_fptr_args") { - addr = jl_fptr_args_addr; + // if this was incomplete, force completion now of it + auto it = incompletemodules.find(codeinst); + if (it != incompletemodules.end()) { + int waiting = 0; + auto &edges = complete_graph[codeinst]; + auto edges_end = std::remove_if(edges.begin(), edges.end(), [&waiting, codeinst] (jl_code_instance_t *edge) JL_NOTSAFEPOINT -> bool { + auto &redges = incomplete_rgraph[edge]; + // waiting += std::erase(redges, codeinst); + auto redges_end = std::remove(redges.begin(), redges.end(), codeinst); + if (redges_end != redges.end()) { + waiting += redges.end() - redges_end; + redges.erase(redges_end, redges.end()); + assert(!invokenames.count(edge)); + } + return !invokenames.count(edge); + }); + edges.erase(edges_end, edges.end()); + assert(waiting == std::get<1>(it->second)); + std::get<1>(it->second) = 0; + auto ¶ms = std::get<0>(it->second); + params.tsctx_lock = params.tsctx.getLock(); + waiting = jl_analyze_workqueue(codeinst, params, true); // may safepoint + assert(!waiting); (void)waiting; + Module *M = emittedmodules[codeinst].getModuleUnlocked(); + finish_params(M, params); + incompletemodules.erase(it); + } + // and then indicate this should be compiled now + if (!linkready.count(codeinst) && compileready.insert(codeinst).second) { + auto edges = complete_graph.find(codeinst); + if (edges != complete_graph.end()) { + workqueue.append(edges->second); + } + } + } +} + +// notify any other pending work that this edge now has code defined +static void complete_emit(jl_code_instance_t *edge) JL_NOTSAFEPOINT_LEAVE JL_NOTSAFEPOINT_ENTER +{ + auto notify = incomplete_rgraph.find(edge); + if (notify == incomplete_rgraph.end()) + return; + auto redges = std::move(notify->second); + incomplete_rgraph.erase(notify); + for (size_t i = 0; i < redges.size(); i++) { + jl_code_instance_t *callee = redges[i]; + auto it = incompletemodules.find(callee); + assert(it != incompletemodules.end()); + if (--std::get<1>(it->second) == 0) { + auto ¶ms = std::get<0>(it->second); + params.tsctx_lock = params.tsctx.getLock(); + assert(callee == it->first); + int waiting = jl_analyze_workqueue(callee, params); // may safepoint + assert(!waiting); (void)waiting; + Module *M = emittedmodules[callee].getModuleUnlocked(); + finish_params(M, params); + incompletemodules.erase(it); } - else if (decls.functionObject == "jl_fptr_sparam") { - addr = jl_fptr_sparam_addr; + } +} + + +// set the invoke field for codeinst (and all deps, and assist with other pending work from other threads) now +static void jl_compile_codeinst_now(jl_code_instance_t *codeinst) +{ + jl_unique_gcsafe_lock lock(engine_lock); + if (!invokenames.count(codeinst)) + return; + threads_in_compiler_phase++; + prepare_compile(codeinst); // may safepoint + while (1) { + // TODO: split up this work by ThreadSafeContext, so two threads don't need to get the same locks and stall + if (!sharedmodules.empty()) { + auto TSM = sharedmodules.pop_back_val(); + lock.native.unlock(); + { + auto Lock = TSM.getContext().getLock(); + jl_ExecutionEngine->optimizeDLSyms(*TSM.getModuleUnlocked()); // may safepoint + } + jl_ExecutionEngine->addModule(std::move(TSM)); + lock.native.lock(); } - else if (decls.functionObject == "jl_f_opaque_closure_call") { - addr = jl_f_opaque_closure_call_addr; + else if (!compileready.empty()) { + // move a function from compileready to linkready then compile it + auto compilenext = compileready.begin(); + codeinst = *compilenext; + compileready.erase(compilenext); + auto TSMref = emittedmodules.find(codeinst); + assert(TSMref != emittedmodules.end()); + auto TSM = std::move(TSMref->second); + linkready.insert(codeinst); + emittedmodules.erase(TSMref); + lock.native.unlock(); + uint64_t start_time = jl_hrtime(); + { + auto Lock = TSM.getContext().getLock(); + jl_ExecutionEngine->optimizeDLSyms(*TSM.getModuleUnlocked()); // may safepoint + } + jl_ExecutionEngine->addModule(std::move(TSM)); // may safepoint + // If logging of the compilation stream is enabled, + // then dump the method-instance specialization type to the stream + jl_method_instance_t *mi = codeinst->def; + if (jl_is_method(mi->def.method)) { + auto stream = *jl_ExecutionEngine->get_dump_compiles_stream(); + if (stream) { + uint64_t end_time = jl_hrtime(); + ios_printf(stream, "%" PRIu64 "\t\"", end_time - start_time); + jl_static_show((JL_STREAM*)stream, mi->specTypes); + ios_printf(stream, "\"\n"); + } + } + lock.native.lock(); } else { - assert(NewDefs[nextaddr] == decls.functionObject); - addr = (jl_callptr_t)Addrs[nextaddr++]; - assert(addr); - isspecsig = true; + break; } - if (!decls.specFunctionObject.empty()) { - void *prev_specptr = NULL; - assert(NewDefs[nextaddr] == decls.specFunctionObject); - void *spec = (void*)Addrs[nextaddr++]; - assert(spec); - if (jl_atomic_cmpswap_acqrel(&this_code->specptr.fptr, &prev_specptr, spec)) { - // only set specsig and invoke if we were the first to set specptr - jl_atomic_store_relaxed(&this_code->specsigflags, (uint8_t) isspecsig); - // we might overwrite invokeptr here; that's ok, anybody who relied on the identity of invokeptr - // either assumes that specptr was null, doesn't care about specptr, - // or will wait until specsigflags has 0b10 set before reloading invoke - jl_atomic_store_release(&this_code->invoke, addr); - jl_atomic_store_release(&this_code->specsigflags, (uint8_t) (0b10 | isspecsig)); - } else { - //someone else beat us, don't commit any results - while (!(jl_atomic_load_acquire(&this_code->specsigflags) & 0b10)) { - jl_cpu_pause(); + } + codeinst = nullptr; + // barrier until all threads have finished calling addModule + if (--threads_in_compiler_phase == 0) { + // the last thread out will finish linking everything + // then release all of the other threads + // move the function pointers out from invokenames to the codeinst + + // batch compile job for all new functions + SmallVector NewDefs; + for (auto &this_code : linkready) { + auto it = invokenames.find(this_code); + assert(it != invokenames.end()); + jl_llvm_functions_t &decls = it->second; + assert(!decls.functionObject.empty()); + if (decls.functionObject != "jl_fptr_args" && + decls.functionObject != "jl_fptr_sparam" && + decls.functionObject != "jl_f_opaque_closure_call") + NewDefs.push_back(decls.functionObject); + if (!decls.specFunctionObject.empty()) + NewDefs.push_back(decls.specFunctionObject); + } + auto Addrs = jl_ExecutionEngine->findSymbols(NewDefs); + + size_t nextaddr = 0; + for (auto &this_code : linkready) { + auto it = invokenames.find(this_code); + assert(it != invokenames.end()); + jl_llvm_functions_t &decls = it->second; + jl_callptr_t addr; + bool isspecsig = false; + if (decls.functionObject == "jl_fptr_args") { + addr = jl_fptr_args_addr; + } + else if (decls.functionObject == "jl_fptr_sparam") { + addr = jl_fptr_sparam_addr; + } + else if (decls.functionObject == "jl_f_opaque_closure_call") { + addr = jl_f_opaque_closure_call_addr; + } + else { + assert(NewDefs[nextaddr] == decls.functionObject); + addr = (jl_callptr_t)Addrs[nextaddr++]; + assert(addr); + isspecsig = true; + } + if (!decls.specFunctionObject.empty()) { + void *prev_specptr = nullptr; + assert(NewDefs[nextaddr] == decls.specFunctionObject); + void *spec = (void*)Addrs[nextaddr++]; + assert(spec); + if (jl_atomic_cmpswap_acqrel(&this_code->specptr.fptr, &prev_specptr, spec)) { + // only set specsig and invoke if we were the first to set specptr + jl_atomic_store_relaxed(&this_code->specsigflags, (uint8_t) isspecsig); + // we might overwrite invokeptr here; that's ok, anybody who relied on the identity of invokeptr + // either assumes that specptr was null, doesn't care about specptr, + // or will wait until specsigflags has 0b10 set before reloading invoke + jl_atomic_store_release(&this_code->invoke, addr); + jl_atomic_store_release(&this_code->specsigflags, (uint8_t) (0b10 | isspecsig)); + } + else { + //someone else beat us, don't commit any results + while (!(jl_atomic_load_acquire(&this_code->specsigflags) & 0b10)) { + jl_cpu_pause(); + } + addr = jl_atomic_load_relaxed(&this_code->invoke); } - addr = jl_atomic_load_relaxed(&this_code->invoke); } - } else { - jl_callptr_t prev_invoke = NULL; - // Allow replacing addr if it is either NULL or our special waiting placeholder. - if (!jl_atomic_cmpswap_acqrel(&this_code->invoke, &prev_invoke, addr)) { - if (prev_invoke == jl_fptr_wait_for_compiled_addr && !jl_atomic_cmpswap_acqrel(&this_code->invoke, &prev_invoke, addr)) { - addr = prev_invoke; - //TODO do we want to potentially promote invoke anyways? (e.g. invoke is jl_interpret_call or some other - //known lesser function) + else { + jl_callptr_t prev_invoke = nullptr; + // Allow replacing addr if it is either nullptr or our special waiting placeholder. + if (!jl_atomic_cmpswap_acqrel(&this_code->invoke, &prev_invoke, addr)) { + if (prev_invoke == jl_fptr_wait_for_compiled_addr && !jl_atomic_cmpswap_acqrel(&this_code->invoke, &prev_invoke, addr)) { + addr = prev_invoke; + //TODO do we want to potentially promote invoke anyways? (e.g. invoke is jl_interpret_call or some other + //known lesser function) + } } } + invokenames.erase(it); + complete_graph.erase(this_code); } - if (this_code == codeinst) - fptr = addr; - i++; + linkready.clear(); + engine_wait.notify_all(); + } + else while (threads_in_compiler_phase) { + lock.wait(engine_wait); } - if (i > jl_timing_print_limit) - jl_timing_printf(JL_TIMING_DEFAULT_BLOCK, "... <%d methods truncated>", i - 10); +} - uint64_t end_time = 0; - if (timed) - end_time = jl_hrtime(); - - // If logging of the compilation stream is enabled, - // then dump the method-instance specialization type to the stream - jl_method_instance_t *mi = codeinst->def; - if (jl_is_method(mi->def.method)) { - auto stream = *jl_ExecutionEngine->get_dump_compiles_stream(); - if (stream) { - ios_printf(stream, "%" PRIu64 "\t\"", end_time - start_time); - jl_static_show((JL_STREAM*)stream, mi->specTypes); - ios_printf(stream, "\"\n"); +static void jl_emit_codeinst_to_jit( + jl_code_instance_t *codeinst, + jl_code_info_t *src) +{ + { // lock scope + jl_unique_gcsafe_lock lock(engine_lock); + if (invokenames.count(codeinst) || jl_is_compiled_codeinst(codeinst)) + return; + } + JL_TIMING(CODEINST_COMPILE, CODEINST_COMPILE); + // emit the code in LLVM IR form to the new context + jl_codegen_params_t params(std::make_unique(), jl_ExecutionEngine->getDataLayout(), jl_ExecutionEngine->getTargetTriple()); // Locks the context + params.cache = true; + params.imaging_mode = imaging_default(); + params.debug_level = jl_options.debug_level; + orc::ThreadSafeModule result_m = + jl_create_ts_module(name_from_method_instance(codeinst->def), params.tsctx, params.DL, params.TargetTriple); + jl_llvm_functions_t decls = jl_emit_codeinst(result_m, codeinst, src, params); // contains safepoints + if (!result_m) + return; + { // drop lock before acquiring engine_lock + auto release = std::move(params.tsctx_lock); + } + jl_unique_gcsafe_lock lock(engine_lock); + if (invokenames.count(codeinst) || jl_is_compiled_codeinst(codeinst)) + return; // destroy everything + invokenames[codeinst] = std::move(decls); + complete_emit(codeinst); + params.tsctx_lock = params.tsctx.getLock(); // re-acquire lock + int waiting = jl_analyze_workqueue(codeinst, params); + if (waiting) { + auto release = std::move(params.tsctx_lock); // unlock again before moving from it + incompletemodules.insert(std::pair(codeinst, std::tuple(std::move(params), waiting))); + } + else { + finish_params(result_m.getModuleUnlocked(), params); + } + emittedmodules[codeinst] = std::move(result_m); +} + +static void recursive_compile_graph( + jl_code_instance_t *codeinst, + jl_code_info_t *src) +{ + jl_emit_codeinst_to_jit(codeinst, src); + DenseSet Seen; + SmallVector workqueue; + workqueue.push_back(codeinst); + // if any edges were incomplete, try to complete them now + while (!workqueue.empty()) { + auto this_code = workqueue.pop_back_val(); + if (Seen.insert(this_code).second) { + if (this_code != codeinst) + jl_emit_codeinst_to_jit(this_code, nullptr); // contains safepoints + jl_unique_gcsafe_lock lock(engine_lock); + auto edges = complete_graph.find(this_code); + if (edges != complete_graph.end()) { + workqueue.append(edges->second); + } } } - return fptr; } +// this generates llvm code for the lambda info +// and adds the result to the jitlayers +// (and the shadow module), +// and generates code for it +static jl_callptr_t _jl_compile_codeinst( + jl_code_instance_t *codeinst, + jl_code_info_t *src) +{ + recursive_compile_graph(codeinst, src); + jl_compile_codeinst_now(codeinst); + return jl_atomic_load_acquire(&codeinst->invoke); +} + + const char *jl_generate_ccallable(LLVMOrcThreadSafeModuleRef llvmmod, void *sysimg_handle, jl_value_t *declrt, jl_value_t *sigt, jl_codegen_params_t ¶ms); // compile a C-callable alias @@ -415,42 +727,40 @@ int jl_compile_extern_c_impl(LLVMOrcThreadSafeModuleRef llvmmod, void *p, void * orc::ThreadSafeModule backing; if (into == NULL) { if (!pparams) { - ctx = jl_ExecutionEngine->acquireContext(); + ctx = jl_ExecutionEngine->makeContext(); } backing = jl_create_ts_module("cextern", pparams ? pparams->tsctx : ctx, pparams ? pparams->DL : jl_ExecutionEngine->getDataLayout(), pparams ? pparams->TargetTriple : jl_ExecutionEngine->getTargetTriple()); into = &backing; } - auto target_info = into->withModuleDo([&](Module &M) { - return std::make_pair(M.getDataLayout(), Triple(M.getTargetTriple())); - }); - jl_codegen_params_t params(into->getContext(), std::move(target_info.first), std::move(target_info.second)); - params.imaging_mode = imaging_default(); - params.debug_level = jl_options.debug_level; - if (pparams == NULL) - pparams = ¶ms; - assert(pparams->tsctx.getContext() == into->getContext().getContext()); - const char *name = jl_generate_ccallable(wrap(into), sysimg, declrt, sigt, *pparams); bool success = true; - if (!sysimg) { - JL_LOCK(&jl_ExecutionEngine->jitlock); - if (jl_ExecutionEngine->getGlobalValueAddress(name)) { - success = false; - } - if (success && p == NULL) { - jl_jit_globals(params.global_targets); - assert(params.workqueue.empty()); - if (params._shared_module) { - jl_ExecutionEngine->optimizeDLSyms(*params._shared_module); - jl_ExecutionEngine->addModule(orc::ThreadSafeModule(std::move(params._shared_module), params.tsctx)); + { + auto Lock = into->getContext().getLock(); + Module *M = into->getModuleUnlocked(); + jl_codegen_params_t params(into->getContext(), M->getDataLayout(), Triple(M->getTargetTriple())); + params.imaging_mode = imaging_default(); + params.debug_level = jl_options.debug_level; + if (pparams == NULL) + pparams = ¶ms; + assert(pparams->tsctx.getContext() == into->getContext().getContext()); + const char *name = jl_generate_ccallable(wrap(into), sysimg, declrt, sigt, *pparams); + if (!sysimg) { + jl_unique_gcsafe_lock lock(extern_c_lock); + if (jl_ExecutionEngine->getGlobalValueAddress(name)) { + success = false; + } + if (success && p == NULL) { + jl_jit_globals(params.global_targets); + assert(params.workqueue.empty()); + if (params._shared_module) { + jl_ExecutionEngine->optimizeDLSyms(*params._shared_module); // safepoint + jl_ExecutionEngine->addModule(orc::ThreadSafeModule(std::move(params._shared_module), params.tsctx)); + } + } + if (success && llvmmod == NULL) { + jl_ExecutionEngine->optimizeDLSyms(*M); // safepoint + jl_ExecutionEngine->addModule(std::move(*into)); } } - if (success && llvmmod == NULL) { - into->withModuleDo([&](Module &M) { - jl_ExecutionEngine->optimizeDLSyms(M); - }); - jl_ExecutionEngine->addModule(std::move(*into)); - } - JL_UNLOCK(&jl_ExecutionEngine->jitlock); // Might GC } if (timed) { if (measure_compile_time_enabled) { @@ -459,9 +769,6 @@ int jl_compile_extern_c_impl(LLVMOrcThreadSafeModuleRef llvmmod, void *p, void * } ct->reentrant_timing &= ~1ull; } - if (ctx.getContext()) { - jl_ExecutionEngine->releaseContext(std::move(ctx)); - } return success; } @@ -512,18 +819,13 @@ extern "C" JL_DLLEXPORT_CODEGEN int jl_compile_codeinst_impl(jl_code_instance_t *ci) { int newly_compiled = 0; - if (jl_atomic_load_relaxed(&ci->invoke) != NULL) { - return newly_compiled; - } - JL_LOCK(&jl_ExecutionEngine->jitlock); if (jl_atomic_load_relaxed(&ci->invoke) == NULL) { ++SpecFPtrCount; uint64_t start = jl_typeinf_timing_begin(); - _jl_compile_codeinst(ci, NULL, *jl_ExecutionEngine->getContext()); + _jl_compile_codeinst(ci, NULL); jl_typeinf_timing_end(start, 0); newly_compiled = 1; } - JL_UNLOCK(&jl_ExecutionEngine->jitlock); // Might GC return newly_compiled; } @@ -541,38 +843,39 @@ void jl_generate_fptr_for_unspecialized_impl(jl_code_instance_t *unspec) uint8_t measure_compile_time_enabled = jl_atomic_load_relaxed(&jl_measure_compile_time_enabled); if (measure_compile_time_enabled) compiler_start_time = jl_hrtime(); - JL_LOCK(&jl_ExecutionEngine->jitlock); - if (jl_atomic_load_relaxed(&unspec->invoke) == NULL) { - jl_code_info_t *src = NULL; - JL_GC_PUSH1(&src); - jl_method_t *def = unspec->def->def.method; - if (jl_is_method(def)) { - src = (jl_code_info_t*)def->source; - if (src && (jl_value_t*)src != jl_nothing) - src = jl_uncompress_ir(def, NULL, (jl_value_t*)src); - } - else { - jl_method_instance_t *mi = unspec->def; - jl_code_instance_t *uninferred = jl_cached_uninferred( - jl_atomic_load_relaxed(&mi->cache), 1); - assert(uninferred); - src = (jl_code_info_t*)jl_atomic_load_relaxed(&uninferred->inferred); - assert(src); - } - if (src) { + jl_code_info_t *src = NULL; + JL_GC_PUSH1(&src); + jl_method_t *def = unspec->def->def.method; + if (jl_is_method(def)) { + src = (jl_code_info_t*)def->source; + if (src && (jl_value_t*)src != jl_nothing) + src = jl_uncompress_ir(def, NULL, (jl_value_t*)src); + } + else { + jl_method_instance_t *mi = unspec->def; + jl_code_instance_t *uninferred = jl_cached_uninferred( + jl_atomic_load_relaxed(&mi->cache), 1); + assert(uninferred); + src = (jl_code_info_t*)jl_atomic_load_relaxed(&uninferred->inferred); + assert(src); + } + if (src) { + // TODO: first prepare recursive_compile_graph(unspec, src) before taking this lock to avoid recursion? + JL_LOCK(&jitlock); // TODO: use a better lock + if (jl_atomic_load_relaxed(&unspec->invoke) == NULL) { assert(jl_is_code_info(src)); ++UnspecFPtrCount; jl_debuginfo_t *debuginfo = src->debuginfo; jl_atomic_store_release(&unspec->debuginfo, debuginfo); // n.b. this assumes the field was previously NULL, which is not entirely true jl_gc_wb(unspec, debuginfo); - _jl_compile_codeinst(unspec, src, *jl_ExecutionEngine->getContext()); + _jl_compile_codeinst(unspec, src); } - jl_callptr_t null = nullptr; - // if we hit a codegen bug (or ran into a broken generated function or llvmcall), fall back to the interpreter as a last resort - jl_atomic_cmpswap(&unspec->invoke, &null, jl_fptr_interpret_call_addr); - JL_GC_POP(); + JL_UNLOCK(&jitlock); // Might GC } - JL_UNLOCK(&jl_ExecutionEngine->jitlock); // Might GC + JL_GC_POP(); + jl_callptr_t null = nullptr; + // if we hit a codegen bug (or ran into a broken generated function or llvmcall), fall back to the interpreter as a last resort + jl_atomic_cmpswap(&unspec->invoke, &null, jl_fptr_interpret_call_addr); if (timed) { if (measure_compile_time_enabled) { auto end = jl_hrtime(); @@ -634,8 +937,8 @@ static auto countBasicBlocks(const Function &F) JL_NOTSAFEPOINT static constexpr size_t N_optlevels = 4; -static Expected selectOptLevel(orc::ThreadSafeModule TSM, orc::MaterializationResponsibility &R) { - TSM.withModuleDo([](Module &M) { +static orc::ThreadSafeModule selectOptLevel(orc::ThreadSafeModule TSM) JL_NOTSAFEPOINT { + TSM.withModuleDo([](Module &M) JL_NOTSAFEPOINT { size_t opt_level = std::max(static_cast(jl_options.opt_level), 0); do { if (jl_generating_output()) { @@ -661,7 +964,10 @@ static Expected selectOptLevel(orc::ThreadSafeModule TSM, opt_level = std::min(opt_level, N_optlevels - 1); M.addModuleFlag(Module::Warning, "julia.optlevel", opt_level); }); - return std::move(TSM); + return TSM; +} +static orc::ThreadSafeModule selectOptLevel(orc::ThreadSafeModule TSM, orc::MaterializationResponsibility &R) JL_NOTSAFEPOINT { + return selectOptLevel(std::move(TSM)); } void jl_register_jit_object(const object::ObjectFile &debugObj, @@ -699,8 +1005,8 @@ class JLDebuginfoPlugin : public ObjectLinkingLayer::Plugin { { std::lock_guard lock(PluginMutex); assert(PendingObjs.count(&MR) == 0); - PendingObjs[&MR] = std::unique_ptr( - new JITObjectInfo{std::move(NewBuffer), std::move(NewObj), {}}); + PendingObjs[&MR] = std::unique_ptr(new JITObjectInfo{ + std::move(NewBuffer), std::move(NewObj), {}}); } } @@ -870,7 +1176,7 @@ class JLMemoryUsagePlugin : public ObjectLinkingLayer::Plugin { // TODO: Port our memory management optimisations to JITLink instead of using the // default InProcessMemoryManager. -std::unique_ptr createJITLinkMemoryManager() { +std::unique_ptr createJITLinkMemoryManager() JL_NOTSAFEPOINT { #if JL_LLVM_VERSION < 160000 return cantFail(orc::MapperJITLinkMemoryManager::CreateWithMapper()); #else @@ -900,7 +1206,7 @@ class JLEHFrameRegistrar final : public jitlink::EHFrameRegistrar { } }; -RTDyldMemoryManager* createRTDyldMemoryManager(void); +RTDyldMemoryManager *createRTDyldMemoryManager(void) JL_NOTSAFEPOINT; // A simple forwarding class, since OrcJIT v2 needs a unique_ptr, while we have a shared_ptr class ForwardingMemoryManager : public RuntimeDyld::MemoryManager { @@ -909,7 +1215,10 @@ class ForwardingMemoryManager : public RuntimeDyld::MemoryManager { public: ForwardingMemoryManager(std::shared_ptr MemMgr) : MemMgr(MemMgr) {} - virtual ~ForwardingMemoryManager() = default; + ForwardingMemoryManager(ForwardingMemoryManager &) = delete; + virtual ~ForwardingMemoryManager() { + assert(!MemMgr); + } virtual uint8_t *allocateCodeSection(uintptr_t Size, unsigned Alignment, unsigned SectionID, StringRef SectionName) override { @@ -947,7 +1256,11 @@ class ForwardingMemoryManager : public RuntimeDyld::MemoryManager { return MemMgr->deregisterEHFrames(); } virtual bool finalizeMemory(std::string *ErrMsg = nullptr) override { - return MemMgr->finalizeMemory(ErrMsg); + bool b = false; + if (MemMgr.use_count() == 2) + b = MemMgr->finalizeMemory(ErrMsg); + MemMgr.reset(); + return b; } virtual void notifyObjectLoaded(RuntimeDyld &RTDyld, const object::ObjectFile &Obj) override { @@ -955,10 +1268,10 @@ class ForwardingMemoryManager : public RuntimeDyld::MemoryManager { } }; - -void registerRTDyldJITObject(const object::ObjectFile &Object, - const RuntimeDyld::LoadedObjectInfo &L, - const std::shared_ptr &MemMgr) +#ifndef JL_USE_JITLINK +static void registerRTDyldJITObject(orc::MaterializationResponsibility &MR, + const object::ObjectFile &Object, + const RuntimeDyld::LoadedObjectInfo &L) { StringMap loadedSections; for (const object::SectionRef &lSection : Object.sections()) { @@ -980,6 +1293,8 @@ void registerRTDyldJITObject(const object::ObjectFile &Object, auto DebugObject = L.getObjectForDebug(Object); // ELF requires us to make a copy to mutate the header with the section load addresses. On other platforms this is a no-op. jl_register_jit_object(DebugObject.getBinary() ? *DebugObject.getBinary() : Object, getLoadAddress); } +#endif + namespace { static std::unique_ptr createTargetMachine() JL_NOTSAFEPOINT { TargetOptions options = TargetOptions(); @@ -1078,9 +1393,6 @@ namespace { fixupTM(*TM); return std::unique_ptr(TM); } -} // namespace - -namespace { typedef NewPM PassManager; @@ -1131,14 +1443,14 @@ namespace { }; template - struct OptimizerT { - OptimizerT(TargetMachine &TM, SmallVector, 0> &printers, std::mutex &llvm_printing_mutex) JL_NOTSAFEPOINT { + struct sizedOptimizerT { + sizedOptimizerT(TargetMachine &TM, SmallVector, 0> &printers, std::mutex &llvm_printing_mutex) JL_NOTSAFEPOINT { for (size_t i = 0; i < N; i++) { PMs[i] = std::make_unique>>(PMCreator(TM, i, printers, llvm_printing_mutex)); } } - OptimizerResultT operator()(orc::ThreadSafeModule TSM, orc::MaterializationResponsibility &R) JL_NOTSAFEPOINT { + orc::ThreadSafeModule operator()(orc::ThreadSafeModule TSM) JL_NOTSAFEPOINT { TSM.withModuleDo([&](Module &M) JL_NOTSAFEPOINT { auto PoolIdx = cast(cast(M.getModuleFlag("julia.optlevel"))->getValue())->getZExtValue(); assert(PoolIdx < N && "Invalid optimization pool index"); @@ -1243,12 +1555,23 @@ namespace { llvm_unreachable("optlevel is between 0 and 3!"); } }); - return Expected{std::move(TSM)}; + return TSM; } private: std::array>>, N> PMs; }; + // shim for converting a unique_ptr to a TransformFunction to a TransformFunction + template + struct IRTransformRef { + IRTransformRef(T &transform) : transform(transform) {} + OptimizerResultT operator()(orc::ThreadSafeModule TSM, orc::MaterializationResponsibility &R) JL_NOTSAFEPOINT { + return transform(std::move(TSM), R); + } + private: + T &transform; + }; + template struct CompilerT : orc::IRCompileLayer::IRCompiler { @@ -1264,7 +1587,8 @@ namespace { size_t PoolIdx; if (auto opt_level = M.getModuleFlag("julia.optlevel")) { PoolIdx = cast(cast(opt_level)->getValue())->getZExtValue(); - } else { + } + else { PoolIdx = jl_options.opt_level; } assert(PoolIdx < N && "Invalid optimization level for compiler!"); @@ -1273,74 +1597,89 @@ namespace { std::array>>, N> TMs; }; +} - struct JITPointersT { - - JITPointersT(SharedBytesT &SharedBytes, std::mutex &Lock) JL_NOTSAFEPOINT - : SharedBytes(SharedBytes), Lock(Lock) {} +struct JuliaOJIT::OptimizerT { + OptimizerT(TargetMachine &TM, SmallVector, 0> &printers, std::mutex &llvm_printing_mutex) + : opt(TM, printers, llvm_printing_mutex) {} + orc::ThreadSafeModule operator()(orc::ThreadSafeModule TSM) JL_NOTSAFEPOINT { + return opt(std::move(TSM)); + } + OptimizerResultT operator()(orc::ThreadSafeModule TSM, orc::MaterializationResponsibility &R) JL_NOTSAFEPOINT { + return opt(std::move(TSM)); + } +private: + struct sizedOptimizerT opt; +}; - Expected operator()(orc::ThreadSafeModule TSM, orc::MaterializationResponsibility &R) JL_NOTSAFEPOINT { - TSM.withModuleDo([&](Module &M) JL_NOTSAFEPOINT { - std::lock_guard locked(Lock); - for (auto &GV : make_early_inc_range(M.globals())) { - if (auto *Shared = getSharedBytes(GV)) { - ++InternedGlobals; - GV.replaceAllUsesWith(Shared); - GV.eraseFromParent(); - } +struct JuliaOJIT::JITPointersT { + JITPointersT(SharedBytesT &SharedBytes, std::mutex &Lock) JL_NOTSAFEPOINT + : SharedBytes(SharedBytes), Lock(Lock) {} + + orc::ThreadSafeModule operator()(orc::ThreadSafeModule TSM) JL_NOTSAFEPOINT { + TSM.withModuleDo([&](Module &M) JL_NOTSAFEPOINT { + std::lock_guard locked(Lock); + for (auto &GV : make_early_inc_range(M.globals())) { + if (auto *Shared = getSharedBytes(GV)) { + ++InternedGlobals; + GV.replaceAllUsesWith(Shared); + GV.eraseFromParent(); } + } - // Windows needs some inline asm to help - // build unwind tables, if they have any functions to decorate - if (!M.functions().empty()) - jl_decorate_module(M); - }); - return std::move(TSM); - } + // Windows needs some inline asm to help + // build unwind tables, if they have any functions to decorate + if (!M.functions().empty()) + jl_decorate_module(M); + }); + return TSM; + } + Expected operator()(orc::ThreadSafeModule TSM, orc::MaterializationResponsibility &R) JL_NOTSAFEPOINT { + return operator()(std::move(TSM)); + } - private: - // optimize memory by turning long strings into memoized copies, instead of - // making a copy per object file of output. - // we memoize them using a StringSet with a custom-alignment allocator - // to ensure they are properly aligned - Constant *getSharedBytes(GlobalVariable &GV) JL_NOTSAFEPOINT { - // We could probably technically get away with - // interning even external linkage globals, - // as long as they have global unnamedaddr, - // but currently we shouldn't be emitting those - // except in imaging mode, and we don't want to - // do this optimization there. - if (GV.hasExternalLinkage() || !GV.hasGlobalUnnamedAddr()) { - return nullptr; - } - if (!GV.hasInitializer()) { - return nullptr; - } - if (!GV.isConstant()) { - return nullptr; - } - auto CDS = dyn_cast(GV.getInitializer()); - if (!CDS) { - return nullptr; - } - StringRef Data = CDS->getRawDataValues(); - if (Data.size() < 16) { - // Cutoff, since we don't want to intern small strings - return nullptr; - } - Align Required = GV.getAlign().valueOrOne(); - Align Preferred = MaxAlignedAlloc::alignment(Data.size()); - if (Required > Preferred) - return nullptr; - StringRef Interned = SharedBytes.insert(Data).first->getKey(); - assert(llvm::isAddrAligned(Preferred, Interned.data())); - return literal_static_pointer_val(Interned.data(), GV.getType()); +private: + // optimize memory by turning long strings into memoized copies, instead of + // making a copy per object file of output. + // we memoize them using a StringSet with a custom-alignment allocator + // to ensure they are properly aligned + Constant *getSharedBytes(GlobalVariable &GV) JL_NOTSAFEPOINT { + // We could probably technically get away with + // interning even external linkage globals, + // as long as they have global unnamedaddr, + // but currently we shouldn't be emitting those + // except in imaging mode, and we don't want to + // do this optimization there. + if (GV.hasExternalLinkage() || !GV.hasGlobalUnnamedAddr()) { + return nullptr; } + if (!GV.hasInitializer()) { + return nullptr; + } + if (!GV.isConstant()) { + return nullptr; + } + auto CDS = dyn_cast(GV.getInitializer()); + if (!CDS) { + return nullptr; + } + StringRef Data = CDS->getRawDataValues(); + if (Data.size() < 16) { + // Cutoff, since we don't want to intern small strings + return nullptr; + } + Align Required = GV.getAlign().valueOrOne(); + Align Preferred = MaxAlignedAlloc::alignment(Data.size()); + if (Required > Preferred) + return nullptr; + StringRef Interned = SharedBytes.insert(Data).first->getKey(); + assert(llvm::isAddrAligned(Preferred, Interned.data())); + return literal_static_pointer_val(Interned.data(), GV.getType()); + } - SharedBytesT &SharedBytes; - std::mutex &Lock; - }; -} + SharedBytesT &SharedBytes; + std::mutex &Lock; +}; struct JuliaOJIT::DLSymOptimizer { @@ -1362,20 +1701,24 @@ struct JuliaOJIT::DLSymOptimizer { #undef INIT_RUNTIME_LIBRARY } + ~DLSymOptimizer() JL_NOTSAFEPOINT = default; - void *lookup_symbol(void *libhandle, const char *fname) { + void *lookup_symbol(void *libhandle, const char *fname) JL_NOTSAFEPOINT { void *addr; jl_dlsym(libhandle, fname, &addr, 0); return addr; } - void *lookup(const char *libname, const char *fname) { + void *lookup(const char *libname, const char *fname) JL_NOTSAFEPOINT_LEAVE JL_NOTSAFEPOINT_ENTER { StringRef lib(libname); StringRef f(fname); std::lock_guard lock(symbols_mutex); auto uit = user_symbols.find(lib); if (uit == user_symbols.end()) { + jl_task_t *ct = jl_current_task; + int8_t gc_state = jl_gc_unsafe_enter(ct->ptls); void *handle = jl_get_library_(libname, 0); + jl_gc_unsafe_leave(ct->ptls, gc_state); if (!handle) return nullptr; uit = user_symbols.insert(std::make_pair(lib, std::make_pair(handle, StringMap()))).first; @@ -1390,7 +1733,7 @@ struct JuliaOJIT::DLSymOptimizer { return handle; } - void *lookup(uintptr_t libidx, const char *fname) { + void *lookup(uintptr_t libidx, const char *fname) JL_NOTSAFEPOINT { std::lock_guard lock(symbols_mutex); runtime_symbols.resize(std::max(runtime_symbols.size(), libidx + 1)); auto it = runtime_symbols[libidx].second.find(fname); @@ -1402,7 +1745,7 @@ struct JuliaOJIT::DLSymOptimizer { return handle; } - void operator()(Module &M) { + void operator()(Module &M) JL_NOTSAFEPOINT_LEAVE JL_NOTSAFEPOINT_ENTER { for (auto &GV : M.globals()) { auto Name = GV.getName(); if (Name.starts_with("jlplt") && Name.ends_with("got")) { @@ -1518,7 +1861,7 @@ struct JuliaOJIT::DLSymOptimizer { bool named; }; -void optimizeDLSyms(Module &M) { +void optimizeDLSyms(Module &M) JL_NOTSAFEPOINT_LEAVE JL_NOTSAFEPOINT_ENTER { JuliaOJIT::DLSymOptimizer(true)(M); } @@ -1552,10 +1895,6 @@ llvm::DataLayout jl_create_datalayout(TargetMachine &TM) { return jl_data_layout; } -#ifdef _COMPILER_ASAN_ENABLED_ -int64_t ___asan_globals_registered; -#endif - JuliaOJIT::JuliaOJIT() : TM(createTargetMachine()), DL(jl_create_datalayout(*TM)), @@ -1564,34 +1903,27 @@ JuliaOJIT::JuliaOJIT() JD(ES.createBareJITDylib("JuliaOJIT")), ExternalJD(ES.createBareJITDylib("JuliaExternal")), DLSymOpt(std::make_unique(false)), - ContextPool([](){ - auto ctx = std::make_unique(); - #if JL_LLVM_VERSION < 170000 - SetOpaquePointer(*ctx); - #endif - return orc::ThreadSafeContext(std::move(ctx)); - }), #ifdef JL_USE_JITLINK MemMgr(createJITLinkMemoryManager()), ObjectLayer(ES, *MemMgr), - CompileLayer(ES, ObjectLayer, std::make_unique>(orc::irManglingOptionsFromTargetOptions(TM->Options), *TM)), #else MemMgr(createRTDyldMemoryManager()), - ObjectLayer( + UnlockedObjectLayer( ES, [this]() { std::unique_ptr result(new ForwardingMemoryManager(MemMgr)); return result; } ), - LockLayer(ObjectLayer), - CompileLayer(ES, LockLayer, std::make_unique>(orc::irManglingOptionsFromTargetOptions(TM->Options), *TM)), + ObjectLayer(UnlockedObjectLayer), #endif - JITPointersLayer(ES, CompileLayer, orc::IRTransformLayer::TransformFunction(JITPointersT(SharedBytes, RLST_mutex))), - OptimizeLayer(ES, JITPointersLayer, orc::IRTransformLayer::TransformFunction(OptimizerT(*TM, PrintLLVMTimers, llvm_printing_mutex))), - OptSelLayer(ES, OptimizeLayer, orc::IRTransformLayer::TransformFunction(selectOptLevel)) + CompileLayer(ES, ObjectLayer, std::make_unique>(orc::irManglingOptionsFromTargetOptions(TM->Options), *TM)), + JITPointers(std::make_unique(SharedBytes, RLST_mutex)), + JITPointersLayer(ES, CompileLayer, IRTransformRef(*JITPointers)), + Optimizers(std::make_unique(*TM, PrintLLVMTimers, llvm_printing_mutex)), + OptimizeLayer(ES, JITPointersLayer, IRTransformRef(*Optimizers)), + OptSelLayer(ES, OptimizeLayer, static_cast(selectOptLevel)) { - JL_MUTEX_INIT(&this->jitlock, "JuliaOJIT"); #ifdef JL_USE_JITLINK # if defined(LLVM_SHLIB) // When dynamically linking against LLVM, use our custom EH frame registration code @@ -1606,12 +1938,7 @@ JuliaOJIT::JuliaOJIT() ObjectLayer.addPlugin(std::make_unique()); ObjectLayer.addPlugin(std::make_unique(jit_bytes_size)); #else - ObjectLayer.setNotifyLoaded( - [this](orc::MaterializationResponsibility &MR, - const object::ObjectFile &Object, - const RuntimeDyld::LoadedObjectInfo &LO) { - registerRTDyldJITObject(Object, LO, MemMgr); - }); + UnlockedObjectLayer.setNotifyLoaded(registerRTDyldJITObject); #endif std::string ErrorStr; @@ -1741,19 +2068,34 @@ JuliaOJIT::JuliaOJIT() #endif cantFail(GlobalJD.define(orc::absoluteSymbols(msan_crt))); #endif +#if JL_LLVM_VERSION < 190000 #ifdef _COMPILER_ASAN_ENABLED_ + // this is a hack to work around a bad assertion: + // /workspace/srcdir/llvm-project/llvm/lib/ExecutionEngine/Orc/Core.cpp:3028: llvm::Error llvm::orc::ExecutionSession::OL_notifyResolved(llvm::orc::MaterializationResponsibility&, const SymbolMap&): Assertion `(KV.second.getFlags() & ~JITSymbolFlags::Common) == (I->second & ~JITSymbolFlags::Common) && "Resolving symbol with incorrect flags"' failed. + // hopefully fixed upstream by e7698a13e319a9919af04d3d693a6f6ea7168a44 + static int64_t jl___asan_globals_registered; orc::SymbolMap asan_crt; #if JL_LLVM_VERSION >= 170000 - asan_crt[mangle("___asan_globals_registered")] = {ExecutorAddr::fromPtr(&___asan_globals_registered), JITSymbolFlags::Exported}; + asan_crt[mangle("___asan_globals_registered")] = {ExecutorAddr::fromPtr(&jl___asan_globals_registered), JITSymbolFlags::Common | JITSymbolFlags::Exported}; #else - asan_crt[mangle("___asan_globals_registered")] = JITEvaluatedSymbol::fromPointer(&___asan_globals_registered, JITSymbolFlags::Exported); + asan_crt[mangle("___asan_globals_registered")] = JITEvaluatedSymbol::fromPointer(&jl___asan_globals_registered, JITSymbolFlags::Common | JITSymbolFlags::Exported); #endif cantFail(JD.define(orc::absoluteSymbols(asan_crt))); #endif +#endif } JuliaOJIT::~JuliaOJIT() = default; +ThreadSafeContext JuliaOJIT::makeContext() +{ + auto ctx = std::make_unique(); + #if JL_LLVM_VERSION < 170000 + SetOpaquePointer(*ctx); + #endif + return orc::ThreadSafeContext(std::move(ctx)); +} + orc::SymbolStringPtr JuliaOJIT::mangle(StringRef Name) { std::string MangleName = getMangledName(Name); @@ -1773,40 +2115,32 @@ void JuliaOJIT::addModule(orc::ThreadSafeModule TSM) { JL_TIMING(LLVM_JIT, JIT_Total); ++ModulesAdded; -#ifndef JL_USE_JITLINK - orc::SymbolLookupSet NewExports; - TSM.withModuleDo([&](Module &M) JL_NOTSAFEPOINT { - for (auto &F : M.global_values()) { - if (!F.isDeclaration() && F.getLinkage() == GlobalValue::ExternalLinkage) { - auto Name = ES.intern(getMangledName(F.getName())); - NewExports.add(std::move(Name)); - } - } - assert(!verifyLLVMIR(M)); - }); -#endif - - auto Err = OptSelLayer.add(JD, std::move(TSM)); + TSM = selectOptLevel(std::move(TSM)); + TSM = (*Optimizers)(std::move(TSM)); + TSM = (*JITPointers)(std::move(TSM)); + auto Lock = TSM.getContext().getLock(); + Module &M = *TSM.getModuleUnlocked(); + // Treat this as if one of the passes might contain a safepoint + // even though that shouldn't be the case and might be unwise + Expected> Obj = CompileLayer.getCompiler()(M); + if (!Obj) { + ES.reportError(Obj.takeError()); + errs() << "Failed to add module to JIT!\n"; + errs() << "Dumping failing module\n" << M << "\n"; + return; + } + { auto release = std::move(Lock); } + auto Err = JuliaOJIT::addObjectFile(JD, std::move(*Obj)); if (Err) { ES.reportError(std::move(Err)); - errs() << "Failed to add module to JIT!\n"; + errs() << "Failed to add objectfile to JIT!\n"; abort(); } -#ifndef JL_USE_JITLINK - // force eager compilation (for now), due to memory management specifics - // (can't handle compilation recursion) - auto Lookups = ES.lookup({{&JD, orc::JITDylibLookupFlags::MatchExportedSymbolsOnly}}, NewExports); - if (!Lookups) { - ES.reportError(Lookups.takeError()); - errs() << "Failed to lookup symbols in module!\n"; - } -#endif } Error JuliaOJIT::addExternalModule(orc::JITDylib &JD, orc::ThreadSafeModule TSM, bool ShouldOptimize) { - if (auto Err = TSM.withModuleDo([&](Module &M) JL_NOTSAFEPOINT -> Error - { + if (auto Err = TSM.withModuleDo([&](Module &M) JL_NOTSAFEPOINT -> Error { if (M.getDataLayout().isDefault()) M.setDataLayout(DL); if (M.getDataLayout() != DL) @@ -1815,24 +2149,29 @@ Error JuliaOJIT::addExternalModule(orc::JITDylib &JD, orc::ThreadSafeModule TSM, M.getDataLayout().getStringRepresentation() + " (module) vs " + DL.getStringRepresentation() + " (jit)", inconvertibleErrorCode()); - + // OrcJIT requires that all modules / files have unique names: + M.setModuleIdentifier((M.getModuleIdentifier() + Twine("-") + Twine(jl_atomic_fetch_add_relaxed(&jitcounter, 1))).str()); return Error::success(); - })) + })) return Err; + //if (ShouldOptimize) + // return OptimizeLayer.add(JD, std::move(TSM)); return CompileLayer.add(JD.getDefaultResourceTracker(), std::move(TSM)); } Error JuliaOJIT::addObjectFile(orc::JITDylib &JD, std::unique_ptr Obj) { assert(Obj && "Can not add null object"); -#ifdef JL_USE_JITLINK + // OrcJIT requires that all modules / files have unique names: + // https://llvm.org/doxygen/namespacellvm_1_1orc.html#a1f5a1bc60c220cdccbab0f26b2a425e1 + // so we have to force a copy here + std::string Name = ("jitted-" + Twine(jl_atomic_fetch_add_relaxed(&jitcounter, 1))).str(); + Obj = Obj->getMemBufferCopy(Obj->getBuffer(), Name); return ObjectLayer.add(JD.getDefaultResourceTracker(), std::move(Obj)); -#else - return LockLayer.add(JD.getDefaultResourceTracker(), std::move(Obj)); -#endif } SmallVector JuliaOJIT::findSymbols(ArrayRef Names) { + // assert(MemMgr.use_count() == 1); (true single-threaded, but slightly race-y to assert it with concurrent threads) DenseMap Unmangled; orc::SymbolLookupSet Exports; for (StringRef Name : Names) { @@ -1978,6 +2317,7 @@ void JuliaOJIT::enableJITDebuggingSupport() addAbsoluteToMap(GDBFunctions,llvm_orc_registerJITLoaderGDBAllocAction); auto registerJITLoaderGDBWrapper = addAbsoluteToMap(GDBFunctions,llvm_orc_registerJITLoaderGDBWrapper); cantFail(JD.define(orc::absoluteSymbols(GDBFunctions))); + (void)registerJITLoaderGDBWrapper; if (TM->getTargetTriple().isOSBinFormatMachO()) ObjectLayer.addPlugin(cantFail(orc::GDBJITDebugInfoRegistrationPlugin::Create(ES, JD, TM->getTargetTriple()))); #ifndef _COMPILER_ASAN_ENABLED_ // TODO: Fix duplicated sections spam #51794 @@ -2013,12 +2353,12 @@ void JuliaOJIT::enableOProfileJITEventListener() void JuliaOJIT::enablePerfJITEventListener() { #if JL_LLVM_VERSION >= 180000 - orc::SymbolMap PerfFunctions; - auto StartAddr = addAbsoluteToMap(PerfFunctions,llvm_orc_registerJITLoaderPerfStart); - auto EndAddr = addAbsoluteToMap(PerfFunctions,llvm_orc_registerJITLoaderPerfEnd); - auto ImplAddr = addAbsoluteToMap(PerfFunctions,llvm_orc_registerJITLoaderPerfImpl); - cantFail(JD.define(orc::absoluteSymbols(PerfFunctions))); if (TM->getTargetTriple().isOSBinFormatELF()) { + orc::SymbolMap PerfFunctions; + auto StartAddr = addAbsoluteToMap(PerfFunctions,llvm_orc_registerJITLoaderPerfStart); + auto EndAddr = addAbsoluteToMap(PerfFunctions,llvm_orc_registerJITLoaderPerfEnd); + auto ImplAddr = addAbsoluteToMap(PerfFunctions,llvm_orc_registerJITLoaderPerfImpl); + cantFail(JD.define(orc::absoluteSymbols(PerfFunctions))); ObjectLayer.addPlugin(cantFail(DebugInfoPreservationPlugin::Create())); //ObjectLayer.addPlugin(cantFail(PerfSupportPlugin::Create( // ES.getExecutorProcessControl(), *JD, true, true))); @@ -2032,7 +2372,7 @@ void JuliaOJIT::enablePerfJITEventListener() void JuliaOJIT::RegisterJITEventListener(JITEventListener *L) { if (L) - ObjectLayer.registerJITEventListener(*L); + UnlockedObjectLayer.registerJITEventListener(*L); } void JuliaOJIT::enableJITDebuggingSupport() { @@ -2071,7 +2411,7 @@ std::string JuliaOJIT::getMangledName(const GlobalValue *GV) size_t JuliaOJIT::getTotalBytes() const { - auto bytes = jit_bytes_size.load(std::memory_order_relaxed); + auto bytes = jl_atomic_load_relaxed(&jit_bytes_size); #ifndef JL_USE_JITLINK size_t getRTDyldMemoryManagerTotalBytes(RTDyldMemoryManager *mm) JL_NOTSAFEPOINT; bytes += getRTDyldMemoryManagerTotalBytes(MemMgr.get()); @@ -2081,7 +2421,7 @@ size_t JuliaOJIT::getTotalBytes() const void JuliaOJIT::addBytes(size_t bytes) { - jit_bytes_size.fetch_add(bytes, std::memory_order_relaxed); + jl_atomic_fetch_add_relaxed(&jit_bytes_size, bytes); } void JuliaOJIT::printTimers() @@ -2326,74 +2666,6 @@ static void jl_decorate_module(Module &M) { #undef ASM_USES_ELF } -#ifndef JL_USE_JITLINK -// Implements Tarjan's SCC (strongly connected components) algorithm, simplified to remove the count variable -static int jl_add_to_ee( - orc::ThreadSafeModule &M, - const StringMap &NewExports, - DenseMap &Queued, - SmallVectorImpl &Stack) -{ - // First check if the TSM is empty (already compiled) - if (!M) - return 0; - // Next check and record if it is on the stack somewhere - { - auto &Id = Queued[&M]; - if (Id) - return Id; - Stack.push_back(&M); - Id = Stack.size(); - } - // Finally work out the SCC - int depth = Stack.size(); - int MergeUp = depth; - SmallVector Children; - M.withModuleDo([&](Module &m) JL_NOTSAFEPOINT { - for (auto &F : m.global_objects()) { - if (F.isDeclaration() && F.getLinkage() == GlobalValue::ExternalLinkage) { - auto Callee = NewExports.find(F.getName()); - if (Callee != NewExports.end()) { - auto *CM = Callee->second; - if (*CM && CM != &M) { - auto Down = Queued.find(CM); - if (Down != Queued.end()) - MergeUp = std::min(MergeUp, Down->second); - else - Children.push_back(CM); - } - } - } - } - }); - assert(MergeUp > 0); - for (auto *CM : Children) { - int Down = jl_add_to_ee(*CM, NewExports, Queued, Stack); - assert(Down <= (int)Stack.size()); - if (Down) - MergeUp = std::min(MergeUp, Down); - } - if (MergeUp < depth) - return MergeUp; - while (1) { - // Not in a cycle (or at the top of it) - // remove SCC state and merge every CM from the cycle into M - orc::ThreadSafeModule *CM = Stack.back(); - auto it = Queued.find(CM); - assert(it->second == (int)Stack.size()); - Queued.erase(it); - Stack.pop_back(); - if ((int)Stack.size() < depth) { - assert(&M == CM); - break; - } - jl_merge_module(M, std::move(*CM)); - } - jl_ExecutionEngine->addModule(std::move(M)); - return 0; -} -#endif - // helper function for adding a DLLImport (dlsym) address to the execution engine void add_named_global(StringRef name, void *addr) { diff --git a/src/jitlayers.h b/src/jitlayers.h index f4b9a6ea5395a..ba4ac3081795e 100644 --- a/src/jitlayers.h +++ b/src/jitlayers.h @@ -69,7 +69,6 @@ using namespace llvm; extern "C" jl_cgparams_t jl_default_cgparams; -extern arraylist_t new_invokes; DEFINE_SIMPLE_CONVERSION_FUNCTIONS(orc::ThreadSafeContext, LLVMOrcThreadSafeContextRef) DEFINE_SIMPLE_CONVERSION_FUNCTIONS(orc::ThreadSafeModule, LLVMOrcThreadSafeModuleRef) @@ -154,11 +153,11 @@ struct jl_locked_stream { std::unique_lock lck; ios_t *&stream; - lock(std::mutex &mutex, ios_t *&stream) JL_NOTSAFEPOINT + lock(std::mutex &mutex, ios_t *&stream) JL_NOTSAFEPOINT JL_NOTSAFEPOINT_ENTER : lck(mutex), stream(stream) {} lock(lock&) = delete; lock(lock&&) JL_NOTSAFEPOINT = default; - ~lock() JL_NOTSAFEPOINT = default; + ~lock() JL_NOTSAFEPOINT_LEAVE JL_NOTSAFEPOINT = default; ios_t *&operator*() JL_NOTSAFEPOINT { return stream; @@ -177,8 +176,8 @@ struct jl_locked_stream { } }; - jl_locked_stream() JL_NOTSAFEPOINT = default; - ~jl_locked_stream() JL_NOTSAFEPOINT = default; + jl_locked_stream() JL_NOTSAFEPOINT JL_NOTSAFEPOINT_ENTER = default; + ~jl_locked_stream() JL_NOTSAFEPOINT JL_NOTSAFEPOINT_LEAVE = default; lock operator*() JL_NOTSAFEPOINT { return lock(mutex, stream); @@ -210,12 +209,12 @@ struct jl_codegen_call_target_t { jl_returninfo_t::CallingConv cc; unsigned return_roots; llvm::Function *decl; + llvm::Function *oc; bool specsig; }; typedef SmallVector, 0> jl_workqueue_t; -// TODO DenseMap? -typedef std::map> jl_compiled_functions_t; + typedef std::list> CallFrames; struct jl_codegen_params_t { orc::ThreadSafeContext tsctx; @@ -229,7 +228,6 @@ struct jl_codegen_params_t { typedef StringMap SymMapGV; // outputs jl_workqueue_t workqueue; - jl_compiled_functions_t compiled_functions; std::map global_targets; std::map, GlobalVariable*> external_fns; std::map ditypes; @@ -292,13 +290,20 @@ enum CompilationPolicy { Extern = 1, }; -void jl_compile_workqueue( - jl_codegen_params_t ¶ms, - CompilationPolicy policy); - Function *jl_cfunction_object(jl_function_t *f, jl_value_t *rt, jl_tupletype_t *argt, jl_codegen_params_t ¶ms); +Function *emit_tojlinvoke(jl_code_instance_t *codeinst, StringRef theFptrName, Module *M, jl_codegen_params_t ¶ms) JL_NOTSAFEPOINT; +void emit_specsig_to_fptr1( + Function *gf_thunk, jl_returninfo_t::CallingConv cc, unsigned return_roots, + jl_value_t *calltype, jl_value_t *rettype, bool is_for_opaque_closure, + size_t nargs, + jl_codegen_params_t ¶ms, + Function *target, + size_t min_world, size_t max_world) JL_NOTSAFEPOINT; +Function *get_or_emit_fptr1(StringRef Name, Module *M) JL_NOTSAFEPOINT; +void jl_init_function(Function *F, const Triple &TT) JL_NOTSAFEPOINT; + void add_named_global(StringRef name, void *addr) JL_NOTSAFEPOINT; static inline Constant *literal_static_pointer_val(const void *p, Type *T) JL_NOTSAFEPOINT @@ -371,6 +376,11 @@ using OptimizerResultT = Expected; using SharedBytesT = StringSet::MapEntryTy)>>; class JuliaOJIT { +private: + // any verification the user wants to do when adding an OwningResource to the pool + template + static void verifyResource(AnyT &resource) JL_NOTSAFEPOINT { } + static void verifyResource(orc::ThreadSafeContext &context) JL_NOTSAFEPOINT { assert(context.getContext()); } public: #ifdef JL_USE_JITLINK typedef orc::ObjectLinkingLayer ObjLayerT; @@ -385,13 +395,13 @@ class JuliaOJIT { std::unique_ptr O) override { JL_TIMING(LLVM_JIT, JIT_Link); #ifndef JL_USE_JITLINK - std::lock_guard lock(EmissionMutex); + std::lock_guard lock(EmissionMutex); #endif BaseLayer.emit(std::move(R), std::move(O)); } private: orc::ObjectLayer &BaseLayer; - std::mutex EmissionMutex; + std::recursive_mutex EmissionMutex; }; #endif typedef orc::IRCompileLayer CompileLayerT; @@ -420,11 +430,16 @@ class JuliaOJIT { : pool(pool), resource(std::move(resource)) {} OwningResource(const OwningResource &) = delete; OwningResource &operator=(const OwningResource &) = delete; - OwningResource(OwningResource &&) JL_NOTSAFEPOINT = default; + OwningResource(OwningResource &&other) JL_NOTSAFEPOINT + : pool(other.pool), resource(std::move(other.resource)) { + other.resource.reset(); + } OwningResource &operator=(OwningResource &&) JL_NOTSAFEPOINT = default; ~OwningResource() JL_NOTSAFEPOINT { // _LEAVE - if (resource) + if (resource) { + verifyResource(*resource); pool.release(std::move(*resource)); + } } ResourceT release() JL_NOTSAFEPOINT { ResourceT res(std::move(*resource)); @@ -510,7 +525,11 @@ class JuliaOJIT { std::unique_ptr mutex; }; + typedef ResourcePool> ContextPoolT; + struct DLSymOptimizer; + struct OptimizerT; + struct JITPointersT; #ifndef JL_USE_JITLINK void RegisterJITEventListener(JITEventListener *L) JL_NOTSAFEPOINT; @@ -528,7 +547,7 @@ class JuliaOJIT { orc::SymbolStringPtr mangle(StringRef Name) JL_NOTSAFEPOINT; void addGlobalMapping(StringRef Name, uint64_t Addr) JL_NOTSAFEPOINT; - void addModule(orc::ThreadSafeModule M) JL_NOTSAFEPOINT; + void addModule(orc::ThreadSafeModule M) JL_NOTSAFEPOINT_LEAVE JL_NOTSAFEPOINT_ENTER; //Methods for the C API Error addExternalModule(orc::JITDylib &JD, orc::ThreadSafeModule TSM, @@ -552,15 +571,7 @@ class JuliaOJIT { uint64_t getGlobalValueAddress(StringRef Name) JL_NOTSAFEPOINT; uint64_t getFunctionAddress(StringRef Name) JL_NOTSAFEPOINT; StringRef getFunctionAtAddress(uint64_t Addr, jl_callptr_t invoke, jl_code_instance_t *codeinst) JL_NOTSAFEPOINT; - auto getContext() JL_NOTSAFEPOINT { - return *ContextPool; - } - orc::ThreadSafeContext acquireContext() { // JL_NOTSAFEPOINT_ENTER? - return ContextPool.acquire(); - } - void releaseContext(orc::ThreadSafeContext &&ctx) { // JL_NOTSAFEPOINT_LEAVE? - ContextPool.release(std::move(ctx)); - } + orc::ThreadSafeContext makeContext() JL_NOTSAFEPOINT; const DataLayout& getDataLayout() const JL_NOTSAFEPOINT; // TargetMachine pass-through methods @@ -576,22 +587,21 @@ class JuliaOJIT { void addBytes(size_t bytes) JL_NOTSAFEPOINT; void printTimers() JL_NOTSAFEPOINT; - jl_locked_stream &get_dump_emitted_mi_name_stream() JL_NOTSAFEPOINT { + jl_locked_stream &get_dump_emitted_mi_name_stream() JL_NOTSAFEPOINT JL_NOTSAFEPOINT_ENTER { return dump_emitted_mi_name_stream; } - jl_locked_stream &get_dump_compiles_stream() JL_NOTSAFEPOINT { + jl_locked_stream &get_dump_compiles_stream() JL_NOTSAFEPOINT JL_NOTSAFEPOINT_ENTER { return dump_compiles_stream; } - jl_locked_stream &get_dump_llvm_opt_stream() JL_NOTSAFEPOINT { + jl_locked_stream &get_dump_llvm_opt_stream() JL_NOTSAFEPOINT JL_NOTSAFEPOINT_ENTER { return dump_llvm_opt_stream; } std::string getMangledName(StringRef Name) JL_NOTSAFEPOINT; std::string getMangledName(const GlobalValue *GV) JL_NOTSAFEPOINT; - // Note that this is a safepoint due to jl_get_library_ and jl_dlsym calls - void optimizeDLSyms(Module &M); - - jl_mutex_t jitlock; + // Note that this is a potential safepoint due to jl_get_library_ and jl_dlsym calls + // but may be called from inside safe-regions due to jit compilation locks + void optimizeDLSyms(Module &M) JL_NOTSAFEPOINT_LEAVE JL_NOTSAFEPOINT_ENTER; private: @@ -618,20 +628,20 @@ class JuliaOJIT { std::mutex llvm_printing_mutex{}; SmallVector, 0> PrintLLVMTimers; - ResourcePool> ContextPool; - - std::atomic jit_bytes_size{0}; -#ifndef JL_USE_JITLINK - const std::shared_ptr MemMgr; -#else + _Atomic(size_t) jit_bytes_size{0}; + _Atomic(size_t) jitcounter{0}; +#ifdef JL_USE_JITLINK const std::unique_ptr MemMgr; -#endif ObjLayerT ObjectLayer; -#ifndef JL_USE_JITLINK - LockLayerT LockLayer; +#else + const std::shared_ptr MemMgr; // shared_ptr protected by LockLayerT.EmissionMutex + ObjLayerT UnlockedObjectLayer; + LockLayerT ObjectLayer; #endif CompileLayerT CompileLayer; + std::unique_ptr JITPointers; JITPointersLayerT JITPointersLayer; + std::unique_ptr Optimizers; OptimizeLayerT OptimizeLayer; OptSelLayerT OptSelLayer; }; diff --git a/src/julia.h b/src/julia.h index 7bb5f31eda708..168ba0deff1ec 100644 --- a/src/julia.h +++ b/src/julia.h @@ -426,8 +426,8 @@ typedef struct _jl_opaque_closure_t { jl_value_t *captures; size_t world; jl_method_t *source; - jl_fptr_args_t invoke; - void *specptr; + jl_fptr_args_t invoke; // n.b. despite the similar name, this is not an invoke ABI (jl_call_t / julia.call2), but rather the fptr1 (jl_fptr_args_t / julia.call) ABI + void *specptr; // n.b. despite the similarity in field name, this is not arbitrary private data for jlcall, but rather the codegen ABI for specsig, and is mandatory if specsig is valid } jl_opaque_closure_t; // This type represents an executable operation @@ -475,7 +475,7 @@ typedef struct _jl_code_instance_t { // & 0b100 == From image _Atomic(uint8_t) precompile; // if set, this will be added to the output system image uint8_t relocatability; // nonzero if all roots are built into sysimg or tagged by module key - _Atomic(jl_callptr_t) invoke; // jlcall entry point + _Atomic(jl_callptr_t) invoke; // jlcall entry point usually, but if this codeinst belongs to an OC Method, then this is an jl_fptr_args_t fptr1 instead, unless it is not, because it is a special token object instead union _jl_generic_specptr_t { _Atomic(void*) fptr; _Atomic(jl_fptr_args_t) fptr1; @@ -2339,7 +2339,13 @@ JL_DLLEXPORT JL_CONST_FUNC jl_gcframe_t **(jl_get_pgcstack)(void) JL_GLOBALLY_RO extern JL_DLLIMPORT int jl_task_gcstack_offset; extern JL_DLLIMPORT int jl_task_ptls_offset; +#ifdef __cplusplus +} +#endif #include "julia_locks.h" // requires jl_task_t definition +#ifdef __cplusplus +extern "C" { +#endif // Return the exception currently being handled, or `jl_nothing`. // diff --git a/src/julia_atomics.h b/src/julia_atomics.h index c094afcc54cd5..d05f0fafab28f 100644 --- a/src/julia_atomics.h +++ b/src/julia_atomics.h @@ -103,12 +103,12 @@ enum jl_memory_order { // this wrong thus we include the correct definitions here (with implicit // conversion), instead of using the macro version template -T jl_atomic_load(std::atomic *ptr) +T jl_atomic_load(const std::atomic *ptr) { return std::atomic_load(ptr); } template -T jl_atomic_load_explicit(std::atomic *ptr, std::memory_order order) +T jl_atomic_load_explicit(const std::atomic *ptr, std::memory_order order) { return std::atomic_load_explicit(ptr, order); } diff --git a/src/julia_internal.h b/src/julia_internal.h index 82c91c6d073af..bb8169c6e5f9e 100644 --- a/src/julia_internal.h +++ b/src/julia_internal.h @@ -1715,13 +1715,14 @@ JL_DLLEXPORT int jl_tupletype_length_compat(jl_value_t *v, size_t nargs) JL_NOTS JL_DLLEXPORT jl_value_t *jl_argtype_with_function(jl_value_t *f, jl_value_t *types0); JL_DLLEXPORT jl_value_t *jl_argtype_with_function_type(jl_value_t *ft JL_MAYBE_UNROOTED, jl_value_t *types0); +JL_DLLEXPORT jl_value_t *jl_argtype_without_function(jl_value_t *ftypes); JL_DLLEXPORT unsigned jl_special_vector_alignment(size_t nfields, jl_value_t *field_type); -void register_eh_frames(uint8_t *Addr, size_t Size); -void deregister_eh_frames(uint8_t *Addr, size_t Size); +void register_eh_frames(uint8_t *Addr, size_t Size) JL_NOTSAFEPOINT; +void deregister_eh_frames(uint8_t *Addr, size_t Size) JL_NOTSAFEPOINT; -STATIC_INLINE void *jl_get_frame_addr(void) +STATIC_INLINE void *jl_get_frame_addr(void) JL_NOTSAFEPOINT { #ifdef __GNUC__ return __builtin_frame_address(0); diff --git a/src/julia_locks.h b/src/julia_locks.h index 5774ddada60c6..4d1345177f965 100644 --- a/src/julia_locks.h +++ b/src/julia_locks.h @@ -103,6 +103,33 @@ JL_DLLEXPORT void jl_unlock_field(jl_mutex_t *v) JL_NOTSAFEPOINT; #ifdef __cplusplus } + +#include +#include +// simple C++ shim around a std::unique_lock + gc-safe + disabled finalizers region +// since we nearly always want that combination together +class jl_unique_gcsafe_lock { +public: + int8_t gc_state; + std::unique_lock native; + explicit jl_unique_gcsafe_lock(std::mutex &native) JL_NOTSAFEPOINT_ENTER + { + jl_task_t *ct = jl_current_task; + gc_state = jl_gc_safe_enter(ct->ptls); + this->native = std::unique_lock(native); + ct->ptls->engine_nqueued++; // disables finalizers until inference is finished on this method graph + } + jl_unique_gcsafe_lock(jl_unique_gcsafe_lock &&native) = delete; + jl_unique_gcsafe_lock(jl_unique_gcsafe_lock &native) = delete; + ~jl_unique_gcsafe_lock() JL_NOTSAFEPOINT_LEAVE { + jl_task_t *ct = jl_current_task; + jl_gc_safe_leave(ct->ptls, gc_state); + ct->ptls->engine_nqueued--; // enable finalizers (but don't run them until the next gc) + } + void wait(std::condition_variable& cond) JL_NOTSAFEPOINT { + cond.wait(native); + } +}; #endif #endif diff --git a/src/opaque_closure.c b/src/opaque_closure.c index 0bf3a729cbcb1..9fe36f32d2030 100644 --- a/src/opaque_closure.c +++ b/src/opaque_closure.c @@ -80,14 +80,16 @@ static jl_opaque_closure_t *new_opaque_closure(jl_tupletype_t *argt, jl_value_t if (!jl_subtype(rt_lb, selected_rt)) { // TODO: It would be better to try to get a specialization with the // correct rt check here (or we could codegen a wrapper). - specptr = NULL; invoke = (jl_fptr_args_t)jl_interpret_opaque_closure; + specptr = NULL; // this will force codegen of the unspecialized version + invoke = (jl_fptr_args_t)jl_interpret_opaque_closure; jl_value_t *ts[2] = {rt_lb, (jl_value_t*)ci->rettype}; selected_rt = jl_type_union(ts, 2); } if (!jl_subtype(ci->rettype, rt_ub)) { // TODO: It would be better to try to get a specialization with the // correct rt check here (or we could codegen a wrapper). - specptr = NULL; invoke = (jl_fptr_args_t)jl_interpret_opaque_closure; + specptr = NULL; // this will force codegen of the unspecialized version + invoke = (jl_fptr_args_t)jl_interpret_opaque_closure; selected_rt = jl_type_intersection(rt_ub, selected_rt); } @@ -108,8 +110,7 @@ static jl_opaque_closure_t *new_opaque_closure(jl_tupletype_t *argt, jl_value_t jl_value_t *oc_type JL_ALWAYS_LEAFTYPE = jl_apply_type2((jl_value_t*)jl_opaque_closure_type, (jl_value_t*)argt, selected_rt); JL_GC_PROMISE_ROOTED(oc_type); - if (!specptr) { - sigtype = jl_argtype_with_function_type((jl_value_t*)oc_type, (jl_value_t*)argt); + if (specptr == NULL) { jl_method_instance_t *mi_generic = jl_specializations_get_linfo(jl_opaque_closure_method, sigtype, jl_emptysvec); // OC wrapper methods are not world dependent @@ -197,7 +198,7 @@ int jl_tupletype_length_compat(jl_value_t *v, size_t nargs) JL_CALLABLE(jl_f_opaque_closure_call) { - jl_opaque_closure_t* oc = (jl_opaque_closure_t*)F; + jl_opaque_closure_t *oc = (jl_opaque_closure_t*)F; jl_value_t *argt = jl_tparam0(jl_typeof(oc)); if (!jl_tupletype_length_compat(argt, nargs)) jl_method_error(F, args, nargs + 1, oc->world); diff --git a/src/pipeline.cpp b/src/pipeline.cpp index 09d51598ea8b7..f8935070bb001 100644 --- a/src/pipeline.cpp +++ b/src/pipeline.cpp @@ -980,3 +980,9 @@ extern "C" JL_DLLEXPORT_CODEGEN ::llvm::PassPluginLibraryInfo llvmGetPassPluginInfo() JL_NOTSAFEPOINT { return {LLVM_PLUGIN_API_VERSION, "Julia", "1", registerCallbacks}; } + +void addTargetPasses(legacy::PassManagerBase *PM, const Triple &triple, TargetIRAnalysis analysis) +{ + PM->add(new TargetLibraryInfoWrapperPass(triple)); + PM->add(createTargetTransformInfoWrapperPass(std::move(analysis))); +} diff --git a/src/stackwalk.c b/src/stackwalk.c index 6784e601bcfba..7c6f946fe73c5 100644 --- a/src/stackwalk.c +++ b/src/stackwalk.c @@ -642,13 +642,13 @@ void jl_print_native_codeloc(uintptr_t ip) JL_NOTSAFEPOINT for (i = 0; i < n; i++) { jl_frame_t frame = frames[i]; if (!frame.func_name) { - jl_safe_printf("unknown function (ip: %p)\n", (void*)ip); + jl_safe_printf("unknown function (ip: %p) at %s\n", (void*)ip, frame.file_name ? frame.file_name : "(unknown file)"); } else { jl_safe_print_codeloc(frame.func_name, frame.file_name, frame.line, frame.inlined); free(frame.func_name); - free(frame.file_name); } + free(frame.file_name); } free(frames); } From aa51abe6400bc29c6093dca5a395fc03806ab511 Mon Sep 17 00:00:00 2001 From: Ian Butterworth Date: Sat, 19 Oct 2024 01:20:59 -0400 Subject: [PATCH 252/537] rename: invalid -> incompatible cache header (#56240) Falling back to the older serial precompilation process is basically a bug (except for if a manifest hasn't been resolved) so https://github.com/JuliaLang/julia/pull/52619 added more info on why it's been hit so we have a chance of fixing issues that are otherwise very difficult to recreate. However "invalid header" which usually just means it was made by a different julia version appears to sound too alarming to users. https://discourse.julialang.org/t/cache-misses-when-using-packages-since-upgrading-to-1-11/121445 So soften it there and in error messages, given it seems a better description. Suggested by @giordano in https://discourse.julialang.org/t/cache-misses-when-using-packages-since-upgrading-to-1-11/121445/4?u=ianshmean --- base/loading.jl | 22 +++++++++++----------- stdlib/Logging/docs/src/index.md | 4 ++-- 2 files changed, 13 insertions(+), 13 deletions(-) diff --git a/base/loading.jl b/base/loading.jl index 78c584d00852b..db6a681bb2a5b 100644 --- a/base/loading.jl +++ b/base/loading.jl @@ -1247,7 +1247,7 @@ function _include_from_serialized(pkg::PkgId, path::String, ocachepath::Union{No else io = open(path, "r") try - iszero(isvalid_cache_header(io)) && return ArgumentError("Invalid header in cache file $path.") + iszero(isvalid_cache_header(io)) && return ArgumentError("Incompatible header in cache file $path.") _, (includes, _, _), _, _, _, _, _, _ = parse_cache_header(io, path) ignore_native = pkg_tracked(includes) finally @@ -1887,7 +1887,7 @@ function isrelocatable(pkg::PkgId) isnothing(path) && return false io = open(path, "r") try - iszero(isvalid_cache_header(io)) && throw(ArgumentError("Invalid header in cache file $cachefile.")) + iszero(isvalid_cache_header(io)) && throw(ArgumentError("Incompatible header in cache file $cachefile.")) _, (includes, includes_srcfiles, _), _... = _parse_cache_header(io, path) for inc in includes !startswith(inc.filename, "@depot") && return false @@ -1962,7 +1962,7 @@ function _tryrequire_from_serialized(pkg::PkgId, path::String, ocachepath::Union io = open(path, "r") ignore_native = false try - iszero(isvalid_cache_header(io)) && return ArgumentError("Invalid header in cache file $path.") + iszero(isvalid_cache_header(io)) && return ArgumentError("Incompatible header in cache file $path.") _, (includes, _, _), depmodnames, _, _, _, clone_targets, _ = parse_cache_header(io, path) ignore_native = pkg_tracked(includes) @@ -3179,7 +3179,7 @@ function compilecache(pkg::PkgId, path::String, internal_stderr::IO = stderr, in # append extra crc to the end of the .ji file: open(tmppath, "r+") do f if iszero(isvalid_cache_header(f)) - error("Invalid header for $(repr("text/plain", pkg)) in new cache file $(repr(tmppath)).") + error("Incompatible header for $(repr("text/plain", pkg)) in new cache file $(repr(tmppath)).") end seekend(f) write(f, crc_so) @@ -3503,7 +3503,7 @@ end function parse_cache_header(cachefile::String) io = open(cachefile, "r") try - iszero(isvalid_cache_header(io)) && throw(ArgumentError("Invalid header in cache file $cachefile.")) + iszero(isvalid_cache_header(io)) && throw(ArgumentError("Incompatible header in cache file $cachefile.")) ret = parse_cache_header(io, cachefile) return ret finally @@ -3516,7 +3516,7 @@ function preferences_hash(cachefile::String) io = open(cachefile, "r") try if iszero(isvalid_cache_header(io)) - throw(ArgumentError("Invalid header in cache file $cachefile.")) + throw(ArgumentError("Incompatible header in cache file $cachefile.")) end return preferences_hash(io, cachefile) finally @@ -3532,7 +3532,7 @@ end function cache_dependencies(cachefile::String) io = open(cachefile, "r") try - iszero(isvalid_cache_header(io)) && throw(ArgumentError("Invalid header in cache file $cachefile.")) + iszero(isvalid_cache_header(io)) && throw(ArgumentError("Incompatible header in cache file $cachefile.")) return cache_dependencies(io, cachefile) finally close(io) @@ -3572,7 +3572,7 @@ end function read_dependency_src(cachefile::String, filename::AbstractString) io = open(cachefile, "r") try - iszero(isvalid_cache_header(io)) && throw(ArgumentError("Invalid header in cache file $cachefile.")) + iszero(isvalid_cache_header(io)) && throw(ArgumentError("Incompatible header in cache file $cachefile.")) return read_dependency_src(io, cachefile, filename) finally close(io) @@ -3856,9 +3856,9 @@ end try checksum = isvalid_cache_header(io) if iszero(checksum) - @debug "Rejecting cache file $cachefile due to it containing an invalid cache header" - record_reason(reasons, "invalid header") - return true # invalid cache file + @debug "Rejecting cache file $cachefile due to it containing an incompatible cache header" + record_reason(reasons, "incompatible header") + return true # incompatible cache file end modules, (includes, _, requires), required_modules, srctextpos, prefs, prefs_hash, clone_targets, actual_flags = parse_cache_header(io, cachefile) if isempty(modules) diff --git a/stdlib/Logging/docs/src/index.md b/stdlib/Logging/docs/src/index.md index 17d4e71328ac4..a2bfd499e4586 100644 --- a/stdlib/Logging/docs/src/index.md +++ b/stdlib/Logging/docs/src/index.md @@ -191,10 +191,10 @@ module. Loading julia with `JULIA_DEBUG=loading` will activate ``` $ JULIA_DEBUG=loading julia -e 'using OhMyREPL' -┌ Debug: Rejecting cache file /home/user/.julia/compiled/v0.7/OhMyREPL.ji due to it containing an invalid cache header +┌ Debug: Rejecting cache file /home/user/.julia/compiled/v0.7/OhMyREPL.ji due to it containing an incompatible cache header └ @ Base loading.jl:1328 [ Info: Recompiling stale cache file /home/user/.julia/compiled/v0.7/OhMyREPL.ji for module OhMyREPL -┌ Debug: Rejecting cache file /home/user/.julia/compiled/v0.7/Tokenize.ji due to it containing an invalid cache header +┌ Debug: Rejecting cache file /home/user/.julia/compiled/v0.7/Tokenize.ji due to it containing an incompatible cache header └ @ Base loading.jl:1328 ... ``` From b0c1525f1731186767ae42e7d625bf0909e49af8 Mon Sep 17 00:00:00 2001 From: Nick Robinson Date: Sat, 19 Oct 2024 09:27:56 +0100 Subject: [PATCH 253/537] Restore support for checking for UndefVarError variable name in at-test_throws (#56231) Fix https://github.com/JuliaLang/julia/issues/54082 Arguably this was a breaking change (as a consequence of https://github.com/JuliaLang/julia/pull/51979). But regardless, it seems like useful functionality to have a public API for testing that an `UndefVarError` was thrown for the expected variable name (regardless of scope). This is particularly useful if you don't know what the scope is (for example, in my use-case i want to test that a specific `UndefVarError` is thrown from a module with a `gensym`'d name). Pre-v1.11 the syntax for this was ```julia @test_throws UndefVarError(:x) foo() ``` but that stopped working in v1.11 when `UndefVarError` got a second field (in fact in v1.11.1 this is an error when before it would pass) This PR restores that functionality. We might want to backport it to v1.11.x so that v1.11 isn't the only version that doesn't support this. --- stdlib/Test/src/Test.jl | 6 +++++- stdlib/Test/test/runtests.jl | 17 +++++++++++++++++ 2 files changed, 22 insertions(+), 1 deletion(-) diff --git a/stdlib/Test/src/Test.jl b/stdlib/Test/src/Test.jl index 46bc2d8790cec..cf906591b9962 100644 --- a/stdlib/Test/src/Test.jl +++ b/stdlib/Test/src/Test.jl @@ -812,7 +812,11 @@ function do_test_throws(result::ExecutionResult, orig_expr, extype) if extype isa LoadError && !(exc isa LoadError) && typeof(extype.error) == typeof(exc) extype = extype.error # deprecated end - if isa(exc, typeof(extype)) + # Support `UndefVarError(:x)` meaning `UndefVarError(:x, scope)` for any `scope`. + # Retains the behaviour from pre-v1.11 when `UndefVarError` didn't have `scope`. + if isa(extype, UndefVarError) && !isdefined(extype, :scope) + success = exc isa UndefVarError && exc.var == extype.var + else isa(exc, typeof(extype)) success = true for fld in 1:nfields(extype) if !isequal(getfield(extype, fld), getfield(exc, fld)) diff --git a/stdlib/Test/test/runtests.jl b/stdlib/Test/test/runtests.jl index 3ddcd7d5de0fd..0c08f78ef356f 100644 --- a/stdlib/Test/test/runtests.jl +++ b/stdlib/Test/test/runtests.jl @@ -1736,3 +1736,20 @@ end This is deprecated and may error in the future.""" @test_deprecated msg2 @macroexpand @testset DefaultTestSet DefaultTestSet begin end end + +# Issue #54082 +module M54082 end +@testset "@test_throws UndefVarError(:var)" begin + # Single-arg `UndefVarError` should match all `UndefVarError` for the + # same variable name, regardless of scope, to keep pre-v1.11 behaviour. + f54082() = var + @test_throws UndefVarError(:var) f54082() + # But if scope is set, then it has to match. + @test_throws UndefVarError(:var, M54082) M54082.var + let result = @testset NoThrowTestSet begin + # Wrong module scope + @test_throws UndefVarError(:var, Main) M54082.var + end + @test only(result) isa Test.Fail + end +end From 877de9839809e769d4f9707a61df3400d087d8d3 Mon Sep 17 00:00:00 2001 From: Eduardo Souza Date: Sun, 20 Oct 2024 00:46:03 +1100 Subject: [PATCH 254/537] Refactoring to be considered before adding MMTk (#55608) This PR contains some refactoring of common functions that were moved to `gc-common.c` and should be shared between MMTk and Julia's stock GC. --- src/Makefile | 2 +- src/gc-common.c | 205 +++++++++++++++++++++++++++ src/gc-common.h | 12 ++ src/gc-debug.c | 42 ------ src/gc-interface.h | 37 ++--- src/gc-stacks.c | 22 +-- src/gc-stock.c | 325 ++++++++++++++----------------------------- src/gc-stock.h | 6 - src/gc-tls-common.h | 52 +++++++ src/gc-tls.h | 25 ---- src/julia_internal.h | 26 +--- src/julia_threads.h | 2 + src/scheduler.c | 4 - src/stackwalk.c | 12 +- 14 files changed, 402 insertions(+), 370 deletions(-) create mode 100644 src/gc-tls-common.h diff --git a/src/Makefile b/src/Makefile index 75635c2e6c062..3458f51fa5548 100644 --- a/src/Makefile +++ b/src/Makefile @@ -103,7 +103,7 @@ ifeq ($(USE_SYSTEM_LIBUV),0) UV_HEADERS += uv.h UV_HEADERS += uv/*.h endif -PUBLIC_HEADERS := $(BUILDDIR)/julia_version.h $(wildcard $(SRCDIR)/support/*.h) $(addprefix $(SRCDIR)/,work-stealing-queue.h gc-interface.h gc-tls.h julia.h julia_assert.h julia_threads.h julia_fasttls.h julia_locks.h julia_atomics.h jloptions.h) +PUBLIC_HEADERS := $(BUILDDIR)/julia_version.h $(wildcard $(SRCDIR)/support/*.h) $(addprefix $(SRCDIR)/,work-stealing-queue.h gc-interface.h gc-tls.h gc-tls-common.h julia.h julia_assert.h julia_threads.h julia_fasttls.h julia_locks.h julia_atomics.h jloptions.h) ifeq ($(OS),WINNT) PUBLIC_HEADERS += $(addprefix $(SRCDIR)/,win32_ucontext.h) endif diff --git a/src/gc-common.c b/src/gc-common.c index ee461b576ea9e..b552afb8228f0 100644 --- a/src/gc-common.c +++ b/src/gc-common.c @@ -20,6 +20,11 @@ extern "C" { jl_gc_num_t gc_num = {0}; +JL_DLLEXPORT uint64_t jl_gc_total_hrtime(void) +{ + return gc_num.total_time; +} + // =========================================================================== // // GC Callbacks // =========================================================================== // @@ -485,10 +490,210 @@ JL_DLLEXPORT void jl_finalize(jl_value_t *o) int gc_n_threads; jl_ptls_t* gc_all_tls_states; +// =========================================================================== // +// Allocation +// =========================================================================== // + +JL_DLLEXPORT void * jl_gc_alloc_typed(jl_ptls_t ptls, size_t sz, void *ty) +{ + return jl_gc_alloc(ptls, sz, ty); +} + +JL_DLLEXPORT jl_value_t *jl_gc_allocobj(size_t sz) +{ + jl_ptls_t ptls = jl_current_task->ptls; + return jl_gc_alloc(ptls, sz, NULL); +} + +// allocation wrappers that save the size of allocations, to allow using +// jl_gc_counted_* functions with a libc-compatible API. + +JL_DLLEXPORT void *jl_malloc(size_t sz) +{ + int64_t *p = (int64_t *)jl_gc_counted_malloc(sz + JL_SMALL_BYTE_ALIGNMENT); + if (p == NULL) + return NULL; + p[0] = sz; + return (void *)(p + 2); // assumes JL_SMALL_BYTE_ALIGNMENT == 16 +} + +//_unchecked_calloc does not check for potential overflow of nm*sz +STATIC_INLINE void *_unchecked_calloc(size_t nm, size_t sz) { + size_t nmsz = nm*sz; + int64_t *p = (int64_t *)jl_gc_counted_calloc(nmsz + JL_SMALL_BYTE_ALIGNMENT, 1); + if (p == NULL) + return NULL; + p[0] = nmsz; + return (void *)(p + 2); // assumes JL_SMALL_BYTE_ALIGNMENT == 16 +} + +JL_DLLEXPORT void *jl_calloc(size_t nm, size_t sz) +{ + if (nm > SSIZE_MAX/sz - JL_SMALL_BYTE_ALIGNMENT) + return NULL; + return _unchecked_calloc(nm, sz); +} + +JL_DLLEXPORT void jl_free(void *p) +{ + if (p != NULL) { + int64_t *pp = (int64_t *)p - 2; + size_t sz = pp[0]; + jl_gc_counted_free_with_size(pp, sz + JL_SMALL_BYTE_ALIGNMENT); + } +} + +JL_DLLEXPORT void *jl_realloc(void *p, size_t sz) +{ + int64_t *pp; + size_t szold; + if (p == NULL) { + pp = NULL; + szold = 0; + } + else { + pp = (int64_t *)p - 2; + szold = pp[0] + JL_SMALL_BYTE_ALIGNMENT; + } + int64_t *pnew = (int64_t *)jl_gc_counted_realloc_with_old_size(pp, szold, sz + JL_SMALL_BYTE_ALIGNMENT); + if (pnew == NULL) + return NULL; + pnew[0] = sz; + return (void *)(pnew + 2); // assumes JL_SMALL_BYTE_ALIGNMENT == 16 +} + +// allocator entry points + +JL_DLLEXPORT jl_value_t *(jl_gc_alloc)(jl_ptls_t ptls, size_t sz, void *ty) +{ + return jl_gc_alloc_(ptls, sz, ty); +} + +// =========================================================================== // +// Generic Memory +// =========================================================================== // + +size_t jl_genericmemory_nbytes(jl_genericmemory_t *m) JL_NOTSAFEPOINT +{ + const jl_datatype_layout_t *layout = ((jl_datatype_t*)jl_typetagof(m))->layout; + size_t sz = layout->size * m->length; + if (layout->flags.arrayelem_isunion) + // account for isbits Union array selector bytes + sz += m->length; + return sz; +} + +// tracking Memorys with malloc'd storage +void jl_gc_track_malloced_genericmemory(jl_ptls_t ptls, jl_genericmemory_t *m, int isaligned){ + // This is **NOT** a GC safe point. + mallocmemory_t *ma; + if (ptls->gc_tls_common.heap.mafreelist == NULL) { + ma = (mallocmemory_t*)malloc_s(sizeof(mallocmemory_t)); + } + else { + ma = ptls->gc_tls_common.heap.mafreelist; + ptls->gc_tls_common.heap.mafreelist = ma->next; + } + ma->a = (jl_genericmemory_t*)((uintptr_t)m | !!isaligned); + ma->next = ptls->gc_tls_common.heap.mallocarrays; + ptls->gc_tls_common.heap.mallocarrays = ma; +} + +// =========================================================================== // +// GC Debug +// =========================================================================== // + +int gc_slot_to_fieldidx(void *obj, void *slot, jl_datatype_t *vt) JL_NOTSAFEPOINT +{ + int nf = (int)jl_datatype_nfields(vt); + for (int i = 1; i < nf; i++) { + if (slot < (void*)((char*)obj + jl_field_offset(vt, i))) + return i - 1; + } + return nf - 1; +} + +int gc_slot_to_arrayidx(void *obj, void *_slot) JL_NOTSAFEPOINT +{ + char *slot = (char*)_slot; + jl_datatype_t *vt = (jl_datatype_t*)jl_typeof(obj); + char *start = NULL; + size_t len = 0; + size_t elsize = sizeof(void*); + if (vt == jl_module_type) { + jl_module_t *m = (jl_module_t*)obj; + start = (char*)m->usings.items; + len = module_usings_length(m); + elsize = sizeof(struct _jl_module_using); + } + else if (vt == jl_simplevector_type) { + start = (char*)jl_svec_data(obj); + len = jl_svec_len(obj); + } + if (slot < start || slot >= start + elsize * len) + return -1; + return (slot - start) / elsize; +} + +// =========================================================================== // +// GC Control +// =========================================================================== // + +JL_DLLEXPORT uint32_t jl_get_gc_disable_counter(void) { + return jl_atomic_load_acquire(&jl_gc_disable_counter); +} + +JL_DLLEXPORT int jl_gc_is_enabled(void) +{ + jl_ptls_t ptls = jl_current_task->ptls; + return !ptls->disable_gc; +} + +int gc_logging_enabled = 0; + +JL_DLLEXPORT void jl_enable_gc_logging(int enable) { + gc_logging_enabled = enable; +} + +JL_DLLEXPORT int jl_is_gc_logging_enabled(void) { + return gc_logging_enabled; +} + + +// collector entry point and control +_Atomic(uint32_t) jl_gc_disable_counter = 1; + +JL_DLLEXPORT int jl_gc_enable(int on) +{ + jl_ptls_t ptls = jl_current_task->ptls; + int prev = !ptls->disable_gc; + ptls->disable_gc = (on == 0); + if (on && !prev) { + // disable -> enable + if (jl_atomic_fetch_add(&jl_gc_disable_counter, -1) == 1) { + gc_num.allocd += gc_num.deferred_alloc; + gc_num.deferred_alloc = 0; + } + } + else if (prev && !on) { + // enable -> disable + jl_atomic_fetch_add(&jl_gc_disable_counter, 1); + // check if the GC is running and wait for it to finish + jl_gc_safepoint_(ptls); + } + return prev; +} + // =========================================================================== // // MISC // =========================================================================== // +JL_DLLEXPORT jl_weakref_t *jl_gc_new_weakref(jl_value_t *value) +{ + jl_ptls_t ptls = jl_current_task->ptls; + return jl_gc_new_weakref_th(ptls, value); +} + const uint64_t _jl_buff_tag[3] = {0x4eadc0004eadc000ull, 0x4eadc0004eadc000ull, 0x4eadc0004eadc000ull}; // aka 0xHEADER00 JL_DLLEXPORT uintptr_t jl_get_buff_tag(void) JL_NOTSAFEPOINT { diff --git a/src/gc-common.h b/src/gc-common.h index 4d53830442a7d..32b7470b13a58 100644 --- a/src/gc-common.h +++ b/src/gc-common.h @@ -53,6 +53,12 @@ extern jl_gc_callback_list_t *gc_cblist_notify_gc_pressure; // malloc wrappers, aligned allocation // =========================================================================== // +// data structure for tracking malloc'd genericmemory. +typedef struct _mallocmemory_t { + jl_genericmemory_t *a; // lowest bit is tagged if this is aligned memory + struct _mallocmemory_t *next; +} mallocmemory_t; + #if defined(_OS_WINDOWS_) STATIC_INLINE void *jl_malloc_aligned(size_t sz, size_t align) { @@ -173,4 +179,10 @@ JL_DLLEXPORT void jl_finalize_th(jl_task_t *ct, jl_value_t *o); extern int gc_n_threads; extern jl_ptls_t* gc_all_tls_states; +// =========================================================================== // +// Logging +// =========================================================================== // + +extern int gc_logging_enabled; + #endif // JL_GC_COMMON_H diff --git a/src/gc-debug.c b/src/gc-debug.c index 5c150aba68e10..7c479484cde45 100644 --- a/src/gc-debug.c +++ b/src/gc-debug.c @@ -1105,48 +1105,6 @@ void gc_count_pool(void) jl_safe_printf("************************\n"); } -int gc_slot_to_fieldidx(void *obj, void *slot, jl_datatype_t *vt) JL_NOTSAFEPOINT -{ - int nf = (int)jl_datatype_nfields(vt); - for (int i = 1; i < nf; i++) { - if (slot < (void*)((char*)obj + jl_field_offset(vt, i))) - return i - 1; - } - return nf - 1; -} - -int gc_slot_to_arrayidx(void *obj, void *_slot) JL_NOTSAFEPOINT -{ - char *slot = (char*)_slot; - jl_datatype_t *vt = (jl_datatype_t*)jl_typeof(obj); - char *start = NULL; - size_t len = 0; - size_t elsize = sizeof(void*); - if (vt == jl_module_type) { - jl_module_t *m = (jl_module_t*)obj; - start = (char*)m->usings.items; - len = module_usings_length(m); - elsize = sizeof(struct _jl_module_using); - } - else if (vt == jl_simplevector_type) { - start = (char*)jl_svec_data(obj); - len = jl_svec_len(obj); - } - if (slot < start || slot >= start + elsize * len) - return -1; - return (slot - start) / elsize; -} - -static int gc_logging_enabled = 0; - -JL_DLLEXPORT void jl_enable_gc_logging(int enable) { - gc_logging_enabled = enable; -} - -JL_DLLEXPORT int jl_is_gc_logging_enabled(void) { - return gc_logging_enabled; -} - void _report_gc_finished(uint64_t pause, uint64_t freed, int full, int recollect, int64_t live_bytes) JL_NOTSAFEPOINT { if (!gc_logging_enabled) { return; diff --git a/src/gc-interface.h b/src/gc-interface.h index bb2abbe2d36ac..0b5df17a3b8c5 100644 --- a/src/gc-interface.h +++ b/src/gc-interface.h @@ -96,6 +96,8 @@ JL_DLLEXPORT void jl_gc_set_max_memory(uint64_t max_mem); // should run a collection cycle again (e.g. a full mark right after a full sweep to ensure // we do a full heap traversal). JL_DLLEXPORT void jl_gc_collect(jl_gc_collection_t collection); +// Returns whether the thread with `tid` is a collector thread +JL_DLLEXPORT int gc_is_collector_thread(int tid) JL_NOTSAFEPOINT; // ========================================================================= // // Metrics @@ -130,6 +132,13 @@ JL_DLLEXPORT uint64_t jl_gc_total_hrtime(void); // Allocation // ========================================================================= // +// On GCC, this function is inlined when sz is constant (see julia_internal.h) +// In general, this function should implement allocation and should use the specific GC's logic +// to decide whether to allocate a small or a large object. Finally, note that this function +// **must** also set the type of the returning object to be `ty`. The type `ty` may also be used to record +// an allocation of that type in the allocation profiler. +struct _jl_value_t *jl_gc_alloc_(struct _jl_tls_states_t * ptls, size_t sz, void *ty); + // Allocates small objects and increments Julia allocation counterst. Size of the object // header must be included in the object size. The (possibly unused in some implementations) // offset to the arena in which we're allocating is passed in the second parameter, and the @@ -157,26 +166,6 @@ JL_DLLEXPORT void *jl_gc_counted_calloc(size_t nm, size_t sz); JL_DLLEXPORT void jl_gc_counted_free_with_size(void *p, size_t sz); // Wrapper around Libc realloc that updates Julia allocation counters. JL_DLLEXPORT void *jl_gc_counted_realloc_with_old_size(void *p, size_t old, size_t sz); -// Wrapper around Libc malloc that allocates a memory region with a few additional machine -// words before the actual payload that are used to record the size of the requested -// allocation. Also updates Julia allocation counters. The function returns a pointer to the -// payload as a result of the allocation. -JL_DLLEXPORT void *jl_malloc(size_t sz); -// Wrapper around Libc calloc that allocates a memory region with a few additional machine -// words before the actual payload that are used to record the size of the requested -// allocation. Also updates Julia allocation counters. The function returns a pointer to the -// payload as a result of the allocation. -JL_DLLEXPORT void *jl_calloc(size_t nm, size_t sz); -// Wrapper around Libc free that takes a pointer to the payload of a memory region allocated -// with jl_malloc or jl_calloc, and uses the size information stored in the first machine -// words of the memory buffer update Julia allocation counters, and then frees the -// corresponding memory buffer. -JL_DLLEXPORT void jl_free(void *p); -// Wrapper around Libc realloc that takes a memory region allocated with jl_malloc or -// jl_calloc, and uses the size information stored in the first machine words of the memory -// buffer to update Julia allocation counters, reallocating the corresponding memory buffer -// in the end. -JL_DLLEXPORT void *jl_realloc(void *p, size_t sz); // Wrapper around Libc malloc that's used to dynamically allocate memory for Arrays and // Strings. It increments Julia allocation counters and should check whether we're close to // the Julia heap target, and therefore, whether we should run a collection. Note that this @@ -190,14 +179,6 @@ JL_DLLEXPORT void *jl_gc_managed_malloc(size_t sz); // thread-local allocator of the thread referenced by the first jl_ptls_t argument. JL_DLLEXPORT struct _jl_weakref_t *jl_gc_new_weakref_th(struct _jl_tls_states_t *ptls, struct _jl_value_t *value); -// Allocates a new weak-reference, assigns its value and increments Julia allocation -// counters. If thread-local allocators are used, then this function should allocate in the -// thread-local allocator of the current thread. -JL_DLLEXPORT struct _jl_weakref_t *jl_gc_new_weakref(struct _jl_value_t *value); -// Allocates an object whose size is specified by the function argument and increments Julia -// allocation counters. If thread-local allocators are used, then this function should -// allocate in the thread-local allocator of the current thread. -JL_DLLEXPORT struct _jl_value_t *jl_gc_allocobj(size_t sz); // Permanently allocates a memory slot of the size specified by the first parameter. This // block of memory is allocated in an immortal region that is never swept. The second // parameter specifies whether the memory should be filled with zeros. The third and fourth diff --git a/src/gc-stacks.c b/src/gc-stacks.c index f6e787a4c1d2d..a2d3862dc9501 100644 --- a/src/gc-stacks.c +++ b/src/gc-stacks.c @@ -47,7 +47,7 @@ static void *malloc_stack(size_t bufsz) JL_NOTSAFEPOINT } -static void free_stack(void *stkbuf, size_t bufsz) JL_NOTSAFEPOINT +void free_stack(void *stkbuf, size_t bufsz) JL_NOTSAFEPOINT { VirtualFree(stkbuf, 0, MEM_RELEASE); jl_atomic_fetch_add_relaxed(&num_stack_mappings, -1); @@ -82,7 +82,7 @@ static void *malloc_stack(size_t bufsz) JL_NOTSAFEPOINT return stk; } -static void free_stack(void *stkbuf, size_t bufsz) JL_NOTSAFEPOINT +void free_stack(void *stkbuf, size_t bufsz) JL_NOTSAFEPOINT { munmap(stkbuf, bufsz); jl_atomic_fetch_add_relaxed(&num_stack_mappings, -1); @@ -132,7 +132,7 @@ void _jl_free_stack(jl_ptls_t ptls, void *stkbuf, size_t bufsz) JL_NOTSAFEPOINT if (bufsz <= pool_sizes[JL_N_STACK_POOLS - 1]) { unsigned pool_id = select_pool(bufsz); if (pool_sizes[pool_id] == bufsz) { - small_arraylist_push(&ptls->gc_tls.heap.free_stacks[pool_id], stkbuf); + small_arraylist_push(&ptls->gc_tls_common.heap.free_stacks[pool_id], stkbuf); return; } } @@ -161,7 +161,7 @@ void jl_release_task_stack(jl_ptls_t ptls, jl_task_t *task) #ifdef _COMPILER_ASAN_ENABLED_ __asan_unpoison_stack_memory((uintptr_t)stkbuf, bufsz); #endif - small_arraylist_push(&ptls->gc_tls.heap.free_stacks[pool_id], stkbuf); + small_arraylist_push(&ptls->gc_tls_common.heap.free_stacks[pool_id], stkbuf); } } } @@ -176,7 +176,7 @@ JL_DLLEXPORT void *jl_malloc_stack(size_t *bufsz, jl_task_t *owner) JL_NOTSAFEPO if (ssize <= pool_sizes[JL_N_STACK_POOLS - 1]) { unsigned pool_id = select_pool(ssize); ssize = pool_sizes[pool_id]; - small_arraylist_t *pool = &ptls->gc_tls.heap.free_stacks[pool_id]; + small_arraylist_t *pool = &ptls->gc_tls_common.heap.free_stacks[pool_id]; if (pool->len > 0) { stk = small_arraylist_pop(pool); } @@ -197,7 +197,7 @@ JL_DLLEXPORT void *jl_malloc_stack(size_t *bufsz, jl_task_t *owner) JL_NOTSAFEPO } *bufsz = ssize; if (owner) { - small_arraylist_t *live_tasks = &ptls->gc_tls.heap.live_tasks; + small_arraylist_t *live_tasks = &ptls->gc_tls_common.heap.live_tasks; mtarraylist_push(live_tasks, owner); } return stk; @@ -228,7 +228,7 @@ void sweep_stack_pool_loop(void) JL_NOTSAFEPOINT // free half of stacks that remain unused since last sweep if (i == jl_atomic_load_relaxed(&gc_stack_free_idx)) { for (int p = 0; p < JL_N_STACK_POOLS; p++) { - small_arraylist_t *al = &ptls2->gc_tls.heap.free_stacks[p]; + small_arraylist_t *al = &ptls2->gc_tls_common.heap.free_stacks[p]; size_t n_to_free; if (jl_atomic_load_relaxed(&ptls2->current_task) == NULL) { n_to_free = al->len; // not alive yet or dead, so it does not need these anymore @@ -251,10 +251,10 @@ void sweep_stack_pool_loop(void) JL_NOTSAFEPOINT } } if (jl_atomic_load_relaxed(&ptls2->current_task) == NULL) { - small_arraylist_free(ptls2->gc_tls.heap.free_stacks); + small_arraylist_free(ptls2->gc_tls_common.heap.free_stacks); } - small_arraylist_t *live_tasks = &ptls2->gc_tls.heap.live_tasks; + small_arraylist_t *live_tasks = &ptls2->gc_tls_common.heap.live_tasks; size_t n = 0; size_t ndel = 0; size_t l = live_tasks->len; @@ -306,7 +306,7 @@ JL_DLLEXPORT jl_array_t *jl_live_tasks(void) jl_ptls_t ptls2 = allstates[i]; if (ptls2 == NULL) continue; - small_arraylist_t *live_tasks = &ptls2->gc_tls.heap.live_tasks; + small_arraylist_t *live_tasks = &ptls2->gc_tls_common.heap.live_tasks; size_t n = mtarraylist_length(live_tasks); l += n + (ptls2->root_task->ctx.stkbuf != NULL); } @@ -325,7 +325,7 @@ JL_DLLEXPORT jl_array_t *jl_live_tasks(void) goto restart; jl_array_data(a,void*)[j++] = t; } - small_arraylist_t *live_tasks = &ptls2->gc_tls.heap.live_tasks; + small_arraylist_t *live_tasks = &ptls2->gc_tls_common.heap.live_tasks; size_t n = mtarraylist_length(live_tasks); for (size_t i = 0; i < n; i++) { jl_task_t *t = (jl_task_t*)mtarraylist_get(live_tasks, i); diff --git a/src/gc-stock.c b/src/gc-stock.c index f60aa89e6b11d..541c5b4ecc5c2 100644 --- a/src/gc-stock.c +++ b/src/gc-stock.c @@ -363,7 +363,7 @@ JL_DLLEXPORT jl_weakref_t *jl_gc_new_weakref_th(jl_ptls_t ptls, jl_value_t *valu jl_weakref_t *wr = (jl_weakref_t*)jl_gc_alloc(ptls, sizeof(void*), jl_weakref_type); wr->value = value; // NOTE: wb not needed here - small_arraylist_push(&ptls->gc_tls.heap.weak_refs, wr); + small_arraylist_push(&ptls->gc_tls_common.heap.weak_refs, wr); return wr; } @@ -373,8 +373,8 @@ static void clear_weak_refs(void) for (int i = 0; i < gc_n_threads; i++) { jl_ptls_t ptls2 = gc_all_tls_states[i]; if (ptls2 != NULL) { - size_t n, l = ptls2->gc_tls.heap.weak_refs.len; - void **lst = ptls2->gc_tls.heap.weak_refs.items; + size_t n, l = ptls2->gc_tls_common.heap.weak_refs.len; + void **lst = ptls2->gc_tls_common.heap.weak_refs.items; for (n = 0; n < l; n++) { jl_weakref_t *wr = (jl_weakref_t*)lst[n]; if (!gc_marked(jl_astaggedvalue(wr->value)->bits.gc)) @@ -392,8 +392,8 @@ static void sweep_weak_refs(void) if (ptls2 != NULL) { size_t n = 0; size_t ndel = 0; - size_t l = ptls2->gc_tls.heap.weak_refs.len; - void **lst = ptls2->gc_tls.heap.weak_refs.items; + size_t l = ptls2->gc_tls_common.heap.weak_refs.len; + void **lst = ptls2->gc_tls_common.heap.weak_refs.items; if (l == 0) continue; while (1) { @@ -408,7 +408,7 @@ static void sweep_weak_refs(void) lst[n] = lst[n + ndel]; lst[n + ndel] = tmp; } - ptls2->gc_tls.heap.weak_refs.len -= ndel; + ptls2->gc_tls_common.heap.weak_refs.len -= ndel; } } } @@ -416,18 +416,18 @@ static void sweep_weak_refs(void) STATIC_INLINE void jl_batch_accum_heap_size(jl_ptls_t ptls, uint64_t sz) JL_NOTSAFEPOINT { - uint64_t alloc_acc = jl_atomic_load_relaxed(&ptls->gc_tls.gc_num.alloc_acc) + sz; + uint64_t alloc_acc = jl_atomic_load_relaxed(&ptls->gc_tls_common.gc_num.alloc_acc) + sz; if (alloc_acc < 16*1024) - jl_atomic_store_relaxed(&ptls->gc_tls.gc_num.alloc_acc, alloc_acc); + jl_atomic_store_relaxed(&ptls->gc_tls_common.gc_num.alloc_acc, alloc_acc); else { jl_atomic_fetch_add_relaxed(&gc_heap_stats.heap_size, alloc_acc); - jl_atomic_store_relaxed(&ptls->gc_tls.gc_num.alloc_acc, 0); + jl_atomic_store_relaxed(&ptls->gc_tls_common.gc_num.alloc_acc, 0); } } STATIC_INLINE void jl_batch_accum_free_size(jl_ptls_t ptls, uint64_t sz) JL_NOTSAFEPOINT { - jl_atomic_store_relaxed(&ptls->gc_tls.gc_num.free_acc, jl_atomic_load_relaxed(&ptls->gc_tls.gc_num.free_acc) + sz); + jl_atomic_store_relaxed(&ptls->gc_tls_common.gc_num.free_acc, jl_atomic_load_relaxed(&ptls->gc_tls_common.gc_num.free_acc) + sz); } // big value list @@ -448,10 +448,10 @@ STATIC_INLINE jl_value_t *jl_gc_big_alloc_inner(jl_ptls_t ptls, size_t sz) jl_throw(jl_memory_exception); gc_invoke_callbacks(jl_gc_cb_notify_external_alloc_t, gc_cblist_notify_external_alloc, (v, allocsz)); - jl_atomic_store_relaxed(&ptls->gc_tls.gc_num.allocd, - jl_atomic_load_relaxed(&ptls->gc_tls.gc_num.allocd) + allocsz); - jl_atomic_store_relaxed(&ptls->gc_tls.gc_num.bigalloc, - jl_atomic_load_relaxed(&ptls->gc_tls.gc_num.bigalloc) + 1); + jl_atomic_store_relaxed(&ptls->gc_tls_common.gc_num.allocd, + jl_atomic_load_relaxed(&ptls->gc_tls_common.gc_num.allocd) + allocsz); + jl_atomic_store_relaxed(&ptls->gc_tls_common.gc_num.bigalloc, + jl_atomic_load_relaxed(&ptls->gc_tls_common.gc_num.bigalloc) + 1); jl_batch_accum_heap_size(ptls, allocsz); #ifdef MEMDEBUG memset(v, 0xee, allocsz); @@ -561,29 +561,11 @@ static void sweep_big(jl_ptls_t ptls) JL_NOTSAFEPOINT gc_time_big_end(); } -// tracking Memorys with malloc'd storage - -void jl_gc_track_malloced_genericmemory(jl_ptls_t ptls, jl_genericmemory_t *m, int isaligned){ - // This is **NOT** a GC safe point. - mallocmemory_t *ma; - if (ptls->gc_tls.heap.mafreelist == NULL) { - ma = (mallocmemory_t*)malloc_s(sizeof(mallocmemory_t)); - } - else { - ma = ptls->gc_tls.heap.mafreelist; - ptls->gc_tls.heap.mafreelist = ma->next; - } - ma->a = (jl_genericmemory_t*)((uintptr_t)m | !!isaligned); - ma->next = ptls->gc_tls.heap.mallocarrays; - ptls->gc_tls.heap.mallocarrays = ma; -} - - void jl_gc_count_allocd(size_t sz) JL_NOTSAFEPOINT { jl_ptls_t ptls = jl_current_task->ptls; - jl_atomic_store_relaxed(&ptls->gc_tls.gc_num.allocd, - jl_atomic_load_relaxed(&ptls->gc_tls.gc_num.allocd) + sz); + jl_atomic_store_relaxed(&ptls->gc_tls_common.gc_num.allocd, + jl_atomic_load_relaxed(&ptls->gc_tls_common.gc_num.allocd) + sz); jl_batch_accum_heap_size(ptls, sz); } @@ -602,18 +584,18 @@ static void combine_thread_gc_counts(jl_gc_num_t *dest, int update_heap) JL_NOTS for (int i = 0; i < gc_n_threads; i++) { jl_ptls_t ptls = gc_all_tls_states[i]; if (ptls) { - dest->allocd += (jl_atomic_load_relaxed(&ptls->gc_tls.gc_num.allocd) + gc_num.interval); - dest->malloc += jl_atomic_load_relaxed(&ptls->gc_tls.gc_num.malloc); - dest->realloc += jl_atomic_load_relaxed(&ptls->gc_tls.gc_num.realloc); - dest->poolalloc += jl_atomic_load_relaxed(&ptls->gc_tls.gc_num.poolalloc); - dest->bigalloc += jl_atomic_load_relaxed(&ptls->gc_tls.gc_num.bigalloc); - dest->freed += jl_atomic_load_relaxed(&ptls->gc_tls.gc_num.free_acc); + dest->allocd += (jl_atomic_load_relaxed(&ptls->gc_tls_common.gc_num.allocd) + gc_num.interval); + dest->malloc += jl_atomic_load_relaxed(&ptls->gc_tls_common.gc_num.malloc); + dest->realloc += jl_atomic_load_relaxed(&ptls->gc_tls_common.gc_num.realloc); + dest->poolalloc += jl_atomic_load_relaxed(&ptls->gc_tls_common.gc_num.poolalloc); + dest->bigalloc += jl_atomic_load_relaxed(&ptls->gc_tls_common.gc_num.bigalloc); + dest->freed += jl_atomic_load_relaxed(&ptls->gc_tls_common.gc_num.free_acc); if (update_heap) { - uint64_t alloc_acc = jl_atomic_load_relaxed(&ptls->gc_tls.gc_num.alloc_acc); - freed_in_runtime += jl_atomic_load_relaxed(&ptls->gc_tls.gc_num.free_acc); + uint64_t alloc_acc = jl_atomic_load_relaxed(&ptls->gc_tls_common.gc_num.alloc_acc); + freed_in_runtime += jl_atomic_load_relaxed(&ptls->gc_tls_common.gc_num.free_acc); jl_atomic_store_relaxed(&gc_heap_stats.heap_size, alloc_acc + jl_atomic_load_relaxed(&gc_heap_stats.heap_size)); - jl_atomic_store_relaxed(&ptls->gc_tls.gc_num.alloc_acc, 0); - jl_atomic_store_relaxed(&ptls->gc_tls.gc_num.free_acc, 0); + jl_atomic_store_relaxed(&ptls->gc_tls_common.gc_num.alloc_acc, 0); + jl_atomic_store_relaxed(&ptls->gc_tls_common.gc_num.free_acc, 0); } } } @@ -629,13 +611,13 @@ static void reset_thread_gc_counts(void) JL_NOTSAFEPOINT jl_ptls_t ptls = gc_all_tls_states[i]; if (ptls != NULL) { // don't reset `pool_live_bytes` here - jl_atomic_store_relaxed(&ptls->gc_tls.gc_num.allocd, -(int64_t)gc_num.interval); - jl_atomic_store_relaxed(&ptls->gc_tls.gc_num.malloc, 0); - jl_atomic_store_relaxed(&ptls->gc_tls.gc_num.realloc, 0); - jl_atomic_store_relaxed(&ptls->gc_tls.gc_num.poolalloc, 0); - jl_atomic_store_relaxed(&ptls->gc_tls.gc_num.bigalloc, 0); - jl_atomic_store_relaxed(&ptls->gc_tls.gc_num.alloc_acc, 0); - jl_atomic_store_relaxed(&ptls->gc_tls.gc_num.free_acc, 0); + jl_atomic_store_relaxed(&ptls->gc_tls_common.gc_num.allocd, -(int64_t)gc_num.interval); + jl_atomic_store_relaxed(&ptls->gc_tls_common.gc_num.malloc, 0); + jl_atomic_store_relaxed(&ptls->gc_tls_common.gc_num.realloc, 0); + jl_atomic_store_relaxed(&ptls->gc_tls_common.gc_num.poolalloc, 0); + jl_atomic_store_relaxed(&ptls->gc_tls_common.gc_num.bigalloc, 0); + jl_atomic_store_relaxed(&ptls->gc_tls_common.gc_num.alloc_acc, 0); + jl_atomic_store_relaxed(&ptls->gc_tls_common.gc_num.free_acc, 0); } } } @@ -655,17 +637,6 @@ void jl_gc_reset_alloc_count(void) JL_NOTSAFEPOINT reset_thread_gc_counts(); } -size_t jl_genericmemory_nbytes(jl_genericmemory_t *m) JL_NOTSAFEPOINT -{ - const jl_datatype_layout_t *layout = ((jl_datatype_t*)jl_typetagof(m))->layout; - size_t sz = layout->size * m->length; - if (layout->flags.arrayelem_isunion) - // account for isbits Union array selector bytes - sz += m->length; - return sz; -} - - static void jl_gc_free_memory(jl_value_t *v, int isaligned) JL_NOTSAFEPOINT { assert(jl_is_genericmemory(v)); @@ -689,8 +660,8 @@ static void sweep_malloced_memory(void) JL_NOTSAFEPOINT for (int t_i = 0; t_i < gc_n_threads; t_i++) { jl_ptls_t ptls2 = gc_all_tls_states[t_i]; if (ptls2 != NULL) { - mallocmemory_t *ma = ptls2->gc_tls.heap.mallocarrays; - mallocmemory_t **pma = &ptls2->gc_tls.heap.mallocarrays; + mallocmemory_t *ma = ptls2->gc_tls_common.heap.mallocarrays; + mallocmemory_t **pma = &ptls2->gc_tls_common.heap.mallocarrays; while (ma != NULL) { mallocmemory_t *nxt = ma->next; jl_value_t *a = (jl_value_t*)((uintptr_t)ma->a & ~1); @@ -702,8 +673,8 @@ static void sweep_malloced_memory(void) JL_NOTSAFEPOINT *pma = nxt; int isaligned = (uintptr_t)ma->a & 1; jl_gc_free_memory(a, isaligned); - ma->next = ptls2->gc_tls.heap.mafreelist; - ptls2->gc_tls.heap.mafreelist = ma; + ma->next = ptls2->gc_tls_common.heap.mafreelist; + ptls2->gc_tls_common.heap.mafreelist = ma; } gc_time_count_mallocd_memory(bits); ma = nxt; @@ -764,12 +735,12 @@ STATIC_INLINE jl_value_t *jl_gc_small_alloc_inner(jl_ptls_t ptls, int offset, return jl_gc_big_alloc(ptls, osize, NULL); #endif maybe_collect(ptls); - jl_atomic_store_relaxed(&ptls->gc_tls.gc_num.allocd, - jl_atomic_load_relaxed(&ptls->gc_tls.gc_num.allocd) + osize); - jl_atomic_store_relaxed(&ptls->gc_tls.gc_num.pool_live_bytes, - jl_atomic_load_relaxed(&ptls->gc_tls.gc_num.pool_live_bytes) + osize); - jl_atomic_store_relaxed(&ptls->gc_tls.gc_num.poolalloc, - jl_atomic_load_relaxed(&ptls->gc_tls.gc_num.poolalloc) + 1); + jl_atomic_store_relaxed(&ptls->gc_tls_common.gc_num.allocd, + jl_atomic_load_relaxed(&ptls->gc_tls_common.gc_num.allocd) + osize); + jl_atomic_store_relaxed(&ptls->gc_tls_common.gc_num.pool_live_bytes, + jl_atomic_load_relaxed(&ptls->gc_tls_common.gc_num.pool_live_bytes) + osize); + jl_atomic_store_relaxed(&ptls->gc_tls_common.gc_num.poolalloc, + jl_atomic_load_relaxed(&ptls->gc_tls_common.gc_num.poolalloc) + 1); // first try to use the freelist jl_taggedvalue_t *v = p->freelist; if (v != NULL) { @@ -824,6 +795,29 @@ jl_value_t *jl_gc_small_alloc_noinline(jl_ptls_t ptls, int offset, int osize) { return jl_gc_small_alloc_inner(ptls, offset, osize); } +// Size does NOT include the type tag!! +inline jl_value_t *jl_gc_alloc_(jl_ptls_t ptls, size_t sz, void *ty) +{ + jl_value_t *v; + const size_t allocsz = sz + sizeof(jl_taggedvalue_t); + if (sz <= GC_MAX_SZCLASS) { + int pool_id = jl_gc_szclass(allocsz); + jl_gc_pool_t *p = &ptls->gc_tls.heap.norm_pools[pool_id]; + int osize = jl_gc_sizeclasses[pool_id]; + // We call `jl_gc_small_alloc_noinline` instead of `jl_gc_small_alloc` to avoid double-counting in + // the Allocations Profiler. (See https://github.com/JuliaLang/julia/pull/43868 for more details.) + v = jl_gc_small_alloc_noinline(ptls, (char*)p - (char*)ptls, osize); + } + else { + if (allocsz < sz) // overflow in adding offs, size was "negative" + jl_throw(jl_memory_exception); + v = jl_gc_big_alloc_noinline(ptls, allocsz); + } + jl_set_typeof(v, ty); + maybe_record_alloc_to_profile(v, sz, (jl_datatype_t*)ty); + return v; +} + int jl_gc_classify_pools(size_t sz, int *osize) { if (sz > GC_MAX_SZCLASS) @@ -983,8 +977,8 @@ static void gc_sweep_page(gc_page_profiler_serializer_t *s, jl_gc_pool_t *p, jl_ // instead of adding it to the thread that originally allocated the page, so we can avoid // an atomic-fetch-add here. size_t delta = (GC_PAGE_SZ - GC_PAGE_OFFSET - nfree * osize); - jl_atomic_store_relaxed(&ptls->gc_tls.gc_num.pool_live_bytes, - jl_atomic_load_relaxed(&ptls->gc_tls.gc_num.pool_live_bytes) + delta); + jl_atomic_store_relaxed(&ptls->gc_tls_common.gc_num.pool_live_bytes, + jl_atomic_load_relaxed(&ptls->gc_tls_common.gc_num.pool_live_bytes) + delta); jl_atomic_fetch_add_relaxed((_Atomic(int64_t) *)&gc_num.freed, (nfree - old_nfree) * osize); } @@ -1277,7 +1271,7 @@ static void gc_sweep_pool(void) } continue; } - jl_atomic_store_relaxed(&ptls2->gc_tls.gc_num.pool_live_bytes, 0); + jl_atomic_store_relaxed(&ptls2->gc_tls_common.gc_num.pool_live_bytes, 0); for (int i = 0; i < JL_GC_N_POOLS; i++) { jl_gc_pool_t *p = &ptls2->gc_tls.heap.norm_pools[i]; jl_taggedvalue_t *last = p->freelist; @@ -2841,34 +2835,8 @@ static void sweep_finalizer_list(arraylist_t *list) list->len = j; } -// collector entry point and control -_Atomic(uint32_t) jl_gc_disable_counter = 1; - -JL_DLLEXPORT int jl_gc_enable(int on) -{ - jl_ptls_t ptls = jl_current_task->ptls; - int prev = !ptls->disable_gc; - ptls->disable_gc = (on == 0); - if (on && !prev) { - // disable -> enable - if (jl_atomic_fetch_add(&jl_gc_disable_counter, -1) == 1) { - gc_num.allocd += gc_num.deferred_alloc; - gc_num.deferred_alloc = 0; - } - } - else if (prev && !on) { - // enable -> disable - jl_atomic_fetch_add(&jl_gc_disable_counter, 1); - // check if the GC is running and wait for it to finish - jl_gc_safepoint_(ptls); - } - return prev; -} - -JL_DLLEXPORT int jl_gc_is_enabled(void) -{ - jl_ptls_t ptls = jl_current_task->ptls; - return !ptls->disable_gc; +int gc_is_collector_thread(int tid) JL_NOTSAFEPOINT { + return gc_is_parallel_collector_thread(tid) || gc_is_concurrent_collector_thread(tid); } JL_DLLEXPORT void jl_gc_get_total_bytes(int64_t *bytes) JL_NOTSAFEPOINT @@ -2879,11 +2847,6 @@ JL_DLLEXPORT void jl_gc_get_total_bytes(int64_t *bytes) JL_NOTSAFEPOINT *bytes = (num.total_allocd + num.deferred_alloc + num.allocd); } -JL_DLLEXPORT uint64_t jl_gc_total_hrtime(void) -{ - return gc_num.total_time; -} - JL_DLLEXPORT jl_gc_num_t jl_gc_num(void) { jl_gc_num_t num = gc_num; @@ -2918,7 +2881,7 @@ JL_DLLEXPORT int64_t jl_gc_pool_live_bytes(void) for (int i = 0; i < n_threads; i++) { jl_ptls_t ptls2 = all_tls_states[i]; if (ptls2 != NULL) { - pool_live_bytes += jl_atomic_load_relaxed(&ptls2->gc_tls.gc_num.pool_live_bytes); + pool_live_bytes += jl_atomic_load_relaxed(&ptls2->gc_tls_common.gc_num.pool_live_bytes); } } return pool_live_bytes; @@ -3271,13 +3234,13 @@ static int _jl_gc_collect(jl_ptls_t ptls, jl_gc_collection_t collection) // free empty GC state for threads that have exited if (jl_atomic_load_relaxed(&ptls2->current_task) == NULL) { // GC threads should never exit - assert(!gc_is_parallel_collector_thread(t_i)); - assert(!gc_is_concurrent_collector_thread(t_i)); + assert(!gc_is_collector_thread(t_i)); + jl_thread_heap_common_t *common_heap = &ptls2->gc_tls_common.heap; jl_thread_heap_t *heap = &ptls2->gc_tls.heap; - if (heap->weak_refs.len == 0) - small_arraylist_free(&heap->weak_refs); - if (heap->live_tasks.len == 0) - small_arraylist_free(&heap->live_tasks); + if (common_heap->weak_refs.len == 0) + small_arraylist_free(&common_heap->weak_refs); + if (common_heap->live_tasks.len == 0) + small_arraylist_free(&common_heap->live_tasks); if (heap->remset.len == 0) arraylist_free(&heap->remset); if (ptls2->finalizers.len == 0) @@ -3346,8 +3309,8 @@ JL_DLLEXPORT void jl_gc_collect(jl_gc_collection_t collection) jl_task_t *ct = jl_current_task; jl_ptls_t ptls = ct->ptls; if (jl_atomic_load_acquire(&jl_gc_disable_counter)) { - size_t localbytes = jl_atomic_load_relaxed(&ptls->gc_tls.gc_num.allocd) + gc_num.interval; - jl_atomic_store_relaxed(&ptls->gc_tls.gc_num.allocd, -(int64_t)gc_num.interval); + size_t localbytes = jl_atomic_load_relaxed(&ptls->gc_tls_common.gc_num.allocd) + gc_num.interval; + jl_atomic_store_relaxed(&ptls->gc_tls_common.gc_num.allocd, -(int64_t)gc_num.interval); static_assert(sizeof(_Atomic(uint64_t)) == sizeof(gc_num.deferred_alloc), ""); jl_atomic_fetch_add_relaxed((_Atomic(uint64_t)*)&gc_num.deferred_alloc, localbytes); return; @@ -3449,16 +3412,10 @@ void gc_mark_queue_all_roots(jl_ptls_t ptls, jl_gc_markqueue_t *mq) gc_mark_roots(mq); } -// allocator entry points - -JL_DLLEXPORT jl_value_t *(jl_gc_alloc)(jl_ptls_t ptls, size_t sz, void *ty) -{ - return jl_gc_alloc_(ptls, sz, ty); -} - // Per-thread initialization void jl_init_thread_heap(jl_ptls_t ptls) { + jl_thread_heap_common_t *common_heap = &ptls->gc_tls_common.heap; jl_thread_heap_t *heap = &ptls->gc_tls.heap; jl_gc_pool_t *p = heap->norm_pools; for (int i = 0; i < JL_GC_N_POOLS; i++) { @@ -3466,12 +3423,12 @@ void jl_init_thread_heap(jl_ptls_t ptls) p[i].freelist = NULL; p[i].newpages = NULL; } - small_arraylist_new(&heap->weak_refs, 0); - small_arraylist_new(&heap->live_tasks, 0); + small_arraylist_new(&common_heap->weak_refs, 0); + small_arraylist_new(&common_heap->live_tasks, 0); for (int i = 0; i < JL_N_STACK_POOLS; i++) - small_arraylist_new(&heap->free_stacks[i], 0); - heap->mallocarrays = NULL; - heap->mafreelist = NULL; + small_arraylist_new(&common_heap->free_stacks[i], 0); + common_heap->mallocarrays = NULL; + common_heap->mafreelist = NULL; heap->young_generation_of_bigvals = (bigval_t*)calloc_s(sizeof(bigval_t)); // sentinel assert(gc_bigval_sentinel_tag != 0); // make sure the sentinel is initialized heap->young_generation_of_bigvals->header = gc_bigval_sentinel_tag; @@ -3497,8 +3454,8 @@ void jl_init_thread_heap(jl_ptls_t ptls) jl_atomic_store_relaxed(&q->array, wsa2); arraylist_new(&mq->reclaim_set, 32); - memset(&ptls->gc_tls.gc_num, 0, sizeof(ptls->gc_tls.gc_num)); - jl_atomic_store_relaxed(&ptls->gc_tls.gc_num.allocd, -(int64_t)gc_num.interval); + memset(&ptls->gc_tls_common.gc_num, 0, sizeof(ptls->gc_tls_common.gc_num)); + jl_atomic_store_relaxed(&ptls->gc_tls_common.gc_num.allocd, -(int64_t)gc_num.interval); } void jl_free_thread_gc_state(jl_ptls_t ptls) @@ -3685,10 +3642,10 @@ JL_DLLEXPORT void *jl_gc_counted_malloc(size_t sz) if (data != NULL && pgcstack != NULL && ct->world_age) { jl_ptls_t ptls = ct->ptls; maybe_collect(ptls); - jl_atomic_store_relaxed(&ptls->gc_tls.gc_num.allocd, - jl_atomic_load_relaxed(&ptls->gc_tls.gc_num.allocd) + sz); - jl_atomic_store_relaxed(&ptls->gc_tls.gc_num.malloc, - jl_atomic_load_relaxed(&ptls->gc_tls.gc_num.malloc) + 1); + jl_atomic_store_relaxed(&ptls->gc_tls_common.gc_num.allocd, + jl_atomic_load_relaxed(&ptls->gc_tls_common.gc_num.allocd) + sz); + jl_atomic_store_relaxed(&ptls->gc_tls_common.gc_num.malloc, + jl_atomic_load_relaxed(&ptls->gc_tls_common.gc_num.malloc) + 1); jl_batch_accum_heap_size(ptls, sz); } return data; @@ -3702,10 +3659,10 @@ JL_DLLEXPORT void *jl_gc_counted_calloc(size_t nm, size_t sz) if (data != NULL && pgcstack != NULL && ct->world_age) { jl_ptls_t ptls = ct->ptls; maybe_collect(ptls); - jl_atomic_store_relaxed(&ptls->gc_tls.gc_num.allocd, - jl_atomic_load_relaxed(&ptls->gc_tls.gc_num.allocd) + nm*sz); - jl_atomic_store_relaxed(&ptls->gc_tls.gc_num.malloc, - jl_atomic_load_relaxed(&ptls->gc_tls.gc_num.malloc) + 1); + jl_atomic_store_relaxed(&ptls->gc_tls_common.gc_num.allocd, + jl_atomic_load_relaxed(&ptls->gc_tls_common.gc_num.allocd) + nm*sz); + jl_atomic_store_relaxed(&ptls->gc_tls_common.gc_num.malloc, + jl_atomic_load_relaxed(&ptls->gc_tls_common.gc_num.malloc) + 1); jl_batch_accum_heap_size(ptls, sz * nm); } return data; @@ -3730,10 +3687,10 @@ JL_DLLEXPORT void *jl_gc_counted_realloc_with_old_size(void *p, size_t old, size jl_ptls_t ptls = ct->ptls; maybe_collect(ptls); if (!(sz < old)) - jl_atomic_store_relaxed(&ptls->gc_tls.gc_num.allocd, - jl_atomic_load_relaxed(&ptls->gc_tls.gc_num.allocd) + (sz - old)); - jl_atomic_store_relaxed(&ptls->gc_tls.gc_num.realloc, - jl_atomic_load_relaxed(&ptls->gc_tls.gc_num.realloc) + 1); + jl_atomic_store_relaxed(&ptls->gc_tls_common.gc_num.allocd, + jl_atomic_load_relaxed(&ptls->gc_tls_common.gc_num.allocd) + (sz - old)); + jl_atomic_store_relaxed(&ptls->gc_tls_common.gc_num.realloc, + jl_atomic_load_relaxed(&ptls->gc_tls_common.gc_num.realloc) + 1); int64_t diff = sz - old; if (diff < 0) { @@ -3746,63 +3703,6 @@ JL_DLLEXPORT void *jl_gc_counted_realloc_with_old_size(void *p, size_t old, size return data; } -// allocation wrappers that save the size of allocations, to allow using -// jl_gc_counted_* functions with a libc-compatible API. - -JL_DLLEXPORT void *jl_malloc(size_t sz) -{ - int64_t *p = (int64_t *)jl_gc_counted_malloc(sz + JL_SMALL_BYTE_ALIGNMENT); - if (p == NULL) - return NULL; - p[0] = sz; - return (void *)(p + 2); // assumes JL_SMALL_BYTE_ALIGNMENT == 16 -} - -//_unchecked_calloc does not check for potential overflow of nm*sz -STATIC_INLINE void *_unchecked_calloc(size_t nm, size_t sz) { - size_t nmsz = nm*sz; - int64_t *p = (int64_t *)jl_gc_counted_calloc(nmsz + JL_SMALL_BYTE_ALIGNMENT, 1); - if (p == NULL) - return NULL; - p[0] = nmsz; - return (void *)(p + 2); // assumes JL_SMALL_BYTE_ALIGNMENT == 16 -} - -JL_DLLEXPORT void *jl_calloc(size_t nm, size_t sz) -{ - if (nm > SSIZE_MAX/sz - JL_SMALL_BYTE_ALIGNMENT) - return NULL; - return _unchecked_calloc(nm, sz); -} - -JL_DLLEXPORT void jl_free(void *p) -{ - if (p != NULL) { - int64_t *pp = (int64_t *)p - 2; - size_t sz = pp[0]; - jl_gc_counted_free_with_size(pp, sz + JL_SMALL_BYTE_ALIGNMENT); - } -} - -JL_DLLEXPORT void *jl_realloc(void *p, size_t sz) -{ - int64_t *pp; - size_t szold; - if (p == NULL) { - pp = NULL; - szold = 0; - } - else { - pp = (int64_t *)p - 2; - szold = pp[0] + JL_SMALL_BYTE_ALIGNMENT; - } - int64_t *pnew = (int64_t *)jl_gc_counted_realloc_with_old_size(pp, szold, sz + JL_SMALL_BYTE_ALIGNMENT); - if (pnew == NULL) - return NULL; - pnew[0] = sz; - return (void *)(pnew + 2); // assumes JL_SMALL_BYTE_ALIGNMENT == 16 -} - // allocating blocks for Arrays and Strings JL_DLLEXPORT void *jl_gc_managed_malloc(size_t sz) @@ -3821,10 +3721,10 @@ JL_DLLEXPORT void *jl_gc_managed_malloc(size_t sz) if (b == NULL) jl_throw(jl_memory_exception); - jl_atomic_store_relaxed(&ptls->gc_tls.gc_num.allocd, - jl_atomic_load_relaxed(&ptls->gc_tls.gc_num.allocd) + allocsz); - jl_atomic_store_relaxed(&ptls->gc_tls.gc_num.malloc, - jl_atomic_load_relaxed(&ptls->gc_tls.gc_num.malloc) + 1); + jl_atomic_store_relaxed(&ptls->gc_tls_common.gc_num.allocd, + jl_atomic_load_relaxed(&ptls->gc_tls_common.gc_num.allocd) + allocsz); + jl_atomic_store_relaxed(&ptls->gc_tls_common.gc_num.malloc, + jl_atomic_load_relaxed(&ptls->gc_tls_common.gc_num.malloc) + 1); jl_batch_accum_heap_size(ptls, allocsz); #ifdef _OS_WINDOWS_ SetLastError(last_error); @@ -3936,18 +3836,6 @@ jl_value_t *jl_gc_permobj(size_t sz, void *ty) JL_NOTSAFEPOINT return jl_valueof(o); } -JL_DLLEXPORT jl_weakref_t *jl_gc_new_weakref(jl_value_t *value) -{ - jl_ptls_t ptls = jl_current_task->ptls; - return jl_gc_new_weakref_th(ptls, value); -} - -JL_DLLEXPORT jl_value_t *jl_gc_allocobj(size_t sz) -{ - jl_ptls_t ptls = jl_current_task->ptls; - return jl_gc_alloc(ptls, sz, NULL); -} - JL_DLLEXPORT int jl_gc_enable_conservative_gc_support(void) { if (jl_is_initialized()) { @@ -4075,11 +3963,6 @@ JL_DLLEXPORT size_t jl_gc_external_obj_hdr_size(void) } -JL_DLLEXPORT void * jl_gc_alloc_typed(jl_ptls_t ptls, size_t sz, void *ty) -{ - return jl_gc_alloc(ptls, sz, ty); -} - JL_DLLEXPORT void jl_gc_schedule_foreign_sweepfunc(jl_ptls_t ptls, jl_value_t *obj) { arraylist_push(&ptls->gc_tls.sweep_objs, obj); diff --git a/src/gc-stock.h b/src/gc-stock.h index 76cecf68067bf..b9a2e720f120a 100644 --- a/src/gc-stock.h +++ b/src/gc-stock.h @@ -106,12 +106,6 @@ JL_EXTENSION typedef struct _bigval_t { // must be 64-byte aligned here, in 32 & 64 bit modes } bigval_t; -// data structure for tracking malloc'd genericmemory. -typedef struct _mallocmemory_t { - jl_genericmemory_t *a; // lowest bit is tagged if this is aligned memory - struct _mallocmemory_t *next; -} mallocmemory_t; - // pool page metadata typedef struct _jl_gc_pagemeta_t { // next metadata structure in per-thread list diff --git a/src/gc-tls-common.h b/src/gc-tls-common.h new file mode 100644 index 0000000000000..ba36f5c1c238e --- /dev/null +++ b/src/gc-tls-common.h @@ -0,0 +1,52 @@ +// This file is a part of Julia. License is MIT: https://julialang.org/license + +// Meant to be included in "julia_threads.h" +#ifndef JL_GC_TLS_COMMON_H +#define JL_GC_TLS_COMMON_H + +#include "julia_atomics.h" + +// GC threading ------------------------------------------------------------------ + +#include "arraylist.h" + +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct { + // variable for tracking weak references + small_arraylist_t weak_refs; + // live tasks started on this thread + // that are holding onto a stack from the pool + small_arraylist_t live_tasks; + + // variables for tracking malloc'd arrays + struct _mallocmemory_t *mallocarrays; + struct _mallocmemory_t *mafreelist; + +#define JL_N_STACK_POOLS 16 + small_arraylist_t free_stacks[JL_N_STACK_POOLS]; +} jl_thread_heap_common_t; + +typedef struct { + _Atomic(int64_t) allocd; + _Atomic(int64_t) pool_live_bytes; + _Atomic(uint64_t) malloc; + _Atomic(uint64_t) realloc; + _Atomic(uint64_t) poolalloc; + _Atomic(uint64_t) bigalloc; + _Atomic(int64_t) free_acc; + _Atomic(uint64_t) alloc_acc; +} jl_thread_gc_num_common_t; + +typedef struct { + jl_thread_heap_common_t heap; + jl_thread_gc_num_common_t gc_num; +} jl_gc_tls_states_common_t; + +#ifdef __cplusplus +} +#endif + +#endif // JL_GC_TLS_COMMON_H diff --git a/src/gc-tls.h b/src/gc-tls.h index 3c2cc029a6183..d82506383c501 100644 --- a/src/gc-tls.h +++ b/src/gc-tls.h @@ -21,16 +21,6 @@ typedef struct { } jl_gc_pool_t; typedef struct { - // variable for tracking weak references - small_arraylist_t weak_refs; - // live tasks started on this thread - // that are holding onto a stack from the pool - small_arraylist_t live_tasks; - - // variables for tracking malloc'd arrays - struct _mallocmemory_t *mallocarrays; - struct _mallocmemory_t *mafreelist; - // variable for tracking young (i.e. not in `GC_OLD_MARKED`/last generation) large objects struct _bigval_t *young_generation_of_bigvals; @@ -42,22 +32,8 @@ typedef struct { // variables for allocating objects from pools #define JL_GC_N_MAX_POOLS 51 // conservative. must be kept in sync with `src/julia_internal.h` jl_gc_pool_t norm_pools[JL_GC_N_MAX_POOLS]; - -#define JL_N_STACK_POOLS 16 - small_arraylist_t free_stacks[JL_N_STACK_POOLS]; } jl_thread_heap_t; -typedef struct { - _Atomic(int64_t) allocd; - _Atomic(int64_t) pool_live_bytes; - _Atomic(uint64_t) malloc; - _Atomic(uint64_t) realloc; - _Atomic(uint64_t) poolalloc; - _Atomic(uint64_t) bigalloc; - _Atomic(int64_t) free_acc; - _Atomic(uint64_t) alloc_acc; -} jl_thread_gc_num_t; - typedef struct { ws_queue_t chunk_queue; ws_queue_t ptr_queue; @@ -78,7 +54,6 @@ typedef struct { typedef struct { jl_thread_heap_t heap; jl_gc_page_stack_t page_metadata_allocd; - jl_thread_gc_num_t gc_num; jl_gc_markqueue_t mark_queue; jl_gc_mark_cache_t gc_cache; _Atomic(size_t) gc_sweeps_requested; diff --git a/src/julia_internal.h b/src/julia_internal.h index bb8169c6e5f9e..4f735029da444 100644 --- a/src/julia_internal.h +++ b/src/julia_internal.h @@ -372,6 +372,8 @@ extern jl_function_t *jl_typeinf_func JL_GLOBALLY_ROOTED; extern JL_DLLEXPORT size_t jl_typeinf_world; extern _Atomic(jl_typemap_entry_t*) call_cache[N_CALL_CACHE] JL_GLOBALLY_ROOTED; +void free_stack(void *stkbuf, size_t bufsz) JL_NOTSAFEPOINT; + JL_DLLEXPORT extern int jl_lineno; JL_DLLEXPORT extern const char *jl_filename; @@ -518,30 +520,6 @@ STATIC_INLINE uint8_t JL_CONST_FUNC jl_gc_szclass_align8(unsigned sz) JL_NOTSAFE #define GC_MAX_SZCLASS (2032-sizeof(void*)) static_assert(ARRAY_CACHE_ALIGN_THRESHOLD > GC_MAX_SZCLASS, ""); - -// Size does NOT include the type tag!! -STATIC_INLINE jl_value_t *jl_gc_alloc_(jl_ptls_t ptls, size_t sz, void *ty) -{ - jl_value_t *v; - const size_t allocsz = sz + sizeof(jl_taggedvalue_t); - if (sz <= GC_MAX_SZCLASS) { - int pool_id = jl_gc_szclass(allocsz); - jl_gc_pool_t *p = &ptls->gc_tls.heap.norm_pools[pool_id]; - int osize = jl_gc_sizeclasses[pool_id]; - // We call `jl_gc_small_alloc_noinline` instead of `jl_gc_small_alloc` to avoid double-counting in - // the Allocations Profiler. (See https://github.com/JuliaLang/julia/pull/43868 for more details.) - v = jl_gc_small_alloc_noinline(ptls, (char*)p - (char*)ptls, osize); - } - else { - if (allocsz < sz) // overflow in adding offs, size was "negative" - jl_throw(jl_memory_exception); - v = jl_gc_big_alloc_noinline(ptls, allocsz); - } - jl_set_typeof(v, ty); - maybe_record_alloc_to_profile(v, sz, (jl_datatype_t*)ty); - return v; -} - /* Programming style note: When using jl_gc_alloc, do not JL_GC_PUSH it into a * gc frame, until it has been fully initialized. An uninitialized value in a * gc frame can crash upon encountering the first safepoint. By delaying use of diff --git a/src/julia_threads.h b/src/julia_threads.h index 17e8d7d466044..67da2978b4267 100644 --- a/src/julia_threads.h +++ b/src/julia_threads.h @@ -5,6 +5,7 @@ #define JL_THREADS_H #include "gc-tls.h" +#include "gc-tls-common.h" #include "julia_atomics.h" #ifndef _OS_WINDOWS_ #include "pthread.h" @@ -155,6 +156,7 @@ typedef struct _jl_tls_states_t { // Counter to disable finalizer **on the current thread** int finalizers_inhibited; jl_gc_tls_states_t gc_tls; // this is very large, and the offset of the first member is baked into codegen + jl_gc_tls_states_common_t gc_tls_common; // common tls for both GCs volatile sig_atomic_t defer_signal; _Atomic(struct _jl_task_t*) current_task; struct _jl_task_t *next_task; diff --git a/src/scheduler.c b/src/scheduler.c index bb2f85b52283f..7e23f654c2566 100644 --- a/src/scheduler.c +++ b/src/scheduler.c @@ -80,10 +80,6 @@ JL_DLLEXPORT int jl_set_task_threadpoolid(jl_task_t *task, int8_t tpid) JL_NOTSA return 1; } -// GC functions used -extern int jl_gc_mark_queue_obj_explicit(jl_gc_mark_cache_t *gc_cache, - jl_gc_markqueue_t *mq, jl_value_t *obj) JL_NOTSAFEPOINT; - // initialize the threading infrastructure // (called only by the main thread) void jl_init_threadinginfra(void) diff --git a/src/stackwalk.c b/src/stackwalk.c index 7c6f946fe73c5..770daa8bf17a6 100644 --- a/src/stackwalk.c +++ b/src/stackwalk.c @@ -5,7 +5,7 @@ utilities for walking the stack and looking up information about code addresses */ #include -#include "gc-stock.h" +#include "gc-common.h" #include "julia.h" #include "julia_internal.h" #include "threading.h" @@ -1340,18 +1340,14 @@ JL_DLLEXPORT void jl_print_task_backtraces(int show_done) JL_NOTSAFEPOINT jl_ptls_t *allstates = jl_atomic_load_relaxed(&jl_all_tls_states); for (size_t i = 0; i < nthreads; i++) { jl_ptls_t ptls2 = allstates[i]; - if (gc_is_parallel_collector_thread(i)) { - jl_safe_printf("==== Skipping backtrace for parallel GC thread %zu\n", i + 1); - continue; - } - if (gc_is_concurrent_collector_thread(i)) { - jl_safe_printf("==== Skipping backtrace for concurrent GC thread %zu\n", i + 1); + if (gc_is_collector_thread(i)) { + jl_safe_printf("==== Skipping backtrace for parallel/concurrent GC thread %zu\n", i + 1); continue; } if (ptls2 == NULL) { continue; } - small_arraylist_t *live_tasks = &ptls2->gc_tls.heap.live_tasks; + small_arraylist_t *live_tasks = &ptls2->gc_tls_common.heap.live_tasks; size_t n = mtarraylist_length(live_tasks); int t_state = JL_TASK_STATE_DONE; jl_task_t *t = ptls2->root_task; From e08280a24fba1b5f1ecf5dce7d8b974d880dae5a Mon Sep 17 00:00:00 2001 From: Katharine Hyatt Date: Sat, 19 Oct 2024 21:04:19 -0400 Subject: [PATCH 255/537] Few more tests for AbstractChar (#56249) --- test/char.jl | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/test/char.jl b/test/char.jl index 3100add0e81c5..5523125529031 100644 --- a/test/char.jl +++ b/test/char.jl @@ -288,6 +288,10 @@ Base.codepoint(c::ASCIIChar) = reinterpret(UInt8, c) @test string(ASCIIChar('x')) == "x" @test length(ASCIIChar('x')) == 1 @test !isempty(ASCIIChar('x')) + @test ndims(ASCIIChar('x')) == 0 + @test ndims(ASCIIChar) == 0 + @test firstindex(ASCIIChar('x')) == 1 + @test lastindex(ASCIIChar('x')) == 1 @test eltype(ASCIIChar) == ASCIIChar @test_throws MethodError write(IOBuffer(), ASCIIChar('x')) @test_throws MethodError read(IOBuffer('x'), ASCIIChar) From 1fd7ada972911c181750d104604487a35ae3bea9 Mon Sep 17 00:00:00 2001 From: Ian Butterworth Date: Sun, 20 Oct 2024 13:12:44 -0400 Subject: [PATCH 256/537] REPL: run repl hint generation for modeswitch chars when not switching (#56251) Fixes https://github.com/JuliaLang/julia/issues/56003 --- stdlib/REPL/src/REPL.jl | 3 +++ 1 file changed, 3 insertions(+) diff --git a/stdlib/REPL/src/REPL.jl b/stdlib/REPL/src/REPL.jl index b8f850c3e9ff9..ac791327e2d75 100644 --- a/stdlib/REPL/src/REPL.jl +++ b/stdlib/REPL/src/REPL.jl @@ -1431,6 +1431,7 @@ function setup_interface( end else edit_insert(s, ';') + LineEdit.check_for_hint(s) && LineEdit.refresh_line(s) end end, '?' => function (s::MIState,o...) @@ -1441,6 +1442,7 @@ function setup_interface( end else edit_insert(s, '?') + LineEdit.check_for_hint(s) && LineEdit.refresh_line(s) end end, ']' => function (s::MIState,o...) @@ -1477,6 +1479,7 @@ function setup_interface( Base.errormonitor(t_replswitch) else edit_insert(s, ']') + LineEdit.check_for_hint(s) && LineEdit.refresh_line(s) end end, From a4a4b954450467d40e6d4dc22b17269da1eb5337 Mon Sep 17 00:00:00 2001 From: DilumAluthgeBot <43731525+DilumAluthgeBot@users.noreply.github.com> Date: Sun, 20 Oct 2024 21:01:52 -0400 Subject: [PATCH 257/537] =?UTF-8?q?=F0=9F=A4=96=20[master]=20Bump=20the=20?= =?UTF-8?q?Pkg=20stdlib=20from=2027c1b1ee5=20to=20799dc2d54=20(#56257)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Stdlib: Pkg URL: https://github.com/JuliaLang/Pkg.jl.git Stdlib branch: master Julia branch: master Old commit: 27c1b1ee5 New commit: 799dc2d54 Julia version: 1.12.0-DEV Pkg version: 1.12.0 Bump invoked by: @IanButterworth Powered by: [BumpStdlibs.jl](https://github.com/JuliaLang/BumpStdlibs.jl) Diff: https://github.com/JuliaLang/Pkg.jl/compare/27c1b1ee5cf15571eb5e54707e812d646ac1dde3...799dc2d54c4e809b9779de8c604564a5b3befaa0 ``` $ git log --oneline 27c1b1ee5..799dc2d54 799dc2d54 REPLExt: use Base.isaccessibledir rather than isdir in completions (#4053) 3fde94ee9 REPLExt: run repl hint generation for modeswitch chars when not switching (#4054) ``` Co-authored-by: Dilum Aluthge --- .../Pkg-27c1b1ee5cf15571eb5e54707e812d646ac1dde3.tar.gz/md5 | 1 - .../Pkg-27c1b1ee5cf15571eb5e54707e812d646ac1dde3.tar.gz/sha512 | 1 - .../Pkg-799dc2d54c4e809b9779de8c604564a5b3befaa0.tar.gz/md5 | 1 + .../Pkg-799dc2d54c4e809b9779de8c604564a5b3befaa0.tar.gz/sha512 | 1 + stdlib/Pkg.version | 2 +- 5 files changed, 3 insertions(+), 3 deletions(-) delete mode 100644 deps/checksums/Pkg-27c1b1ee5cf15571eb5e54707e812d646ac1dde3.tar.gz/md5 delete mode 100644 deps/checksums/Pkg-27c1b1ee5cf15571eb5e54707e812d646ac1dde3.tar.gz/sha512 create mode 100644 deps/checksums/Pkg-799dc2d54c4e809b9779de8c604564a5b3befaa0.tar.gz/md5 create mode 100644 deps/checksums/Pkg-799dc2d54c4e809b9779de8c604564a5b3befaa0.tar.gz/sha512 diff --git a/deps/checksums/Pkg-27c1b1ee5cf15571eb5e54707e812d646ac1dde3.tar.gz/md5 b/deps/checksums/Pkg-27c1b1ee5cf15571eb5e54707e812d646ac1dde3.tar.gz/md5 deleted file mode 100644 index 137460d1a05a1..0000000000000 --- a/deps/checksums/Pkg-27c1b1ee5cf15571eb5e54707e812d646ac1dde3.tar.gz/md5 +++ /dev/null @@ -1 +0,0 @@ -74d656c054c1406a7e88910d673019f7 diff --git a/deps/checksums/Pkg-27c1b1ee5cf15571eb5e54707e812d646ac1dde3.tar.gz/sha512 b/deps/checksums/Pkg-27c1b1ee5cf15571eb5e54707e812d646ac1dde3.tar.gz/sha512 deleted file mode 100644 index 0b8463176a867..0000000000000 --- a/deps/checksums/Pkg-27c1b1ee5cf15571eb5e54707e812d646ac1dde3.tar.gz/sha512 +++ /dev/null @@ -1 +0,0 @@ -a8e589ce68cc14883a7a21f68862695bfaa9ab38dfa0e704c32aaa801667708af0d851a41199ad09ae81a4c0b928befb680d639c1eca3377ce2db2dcc34b98e5 diff --git a/deps/checksums/Pkg-799dc2d54c4e809b9779de8c604564a5b3befaa0.tar.gz/md5 b/deps/checksums/Pkg-799dc2d54c4e809b9779de8c604564a5b3befaa0.tar.gz/md5 new file mode 100644 index 0000000000000..7c0bfbf62bd6e --- /dev/null +++ b/deps/checksums/Pkg-799dc2d54c4e809b9779de8c604564a5b3befaa0.tar.gz/md5 @@ -0,0 +1 @@ +6fce8506a1701acdcbc4888250eeb86a diff --git a/deps/checksums/Pkg-799dc2d54c4e809b9779de8c604564a5b3befaa0.tar.gz/sha512 b/deps/checksums/Pkg-799dc2d54c4e809b9779de8c604564a5b3befaa0.tar.gz/sha512 new file mode 100644 index 0000000000000..06e3ea9c8dfa7 --- /dev/null +++ b/deps/checksums/Pkg-799dc2d54c4e809b9779de8c604564a5b3befaa0.tar.gz/sha512 @@ -0,0 +1 @@ +e251745da221a82f3ec5e21a76c29df0b695dc4028ee2c719373c08637050318db7b543c9d40074314fc3495738d39fd8af5a7954e8b72695df44e25e395f883 diff --git a/stdlib/Pkg.version b/stdlib/Pkg.version index 470acefbc6c83..c29c83fce4046 100644 --- a/stdlib/Pkg.version +++ b/stdlib/Pkg.version @@ -1,4 +1,4 @@ PKG_BRANCH = master -PKG_SHA1 = 27c1b1ee5cf15571eb5e54707e812d646ac1dde3 +PKG_SHA1 = 799dc2d54c4e809b9779de8c604564a5b3befaa0 PKG_GIT_URL := https://github.com/JuliaLang/Pkg.jl.git PKG_TAR_URL = https://api.github.com/repos/JuliaLang/Pkg.jl/tarball/$1 From fee8090fa86fb0529ae59fe3e3d339c60e2b90a8 Mon Sep 17 00:00:00 2001 From: Gabriel Baraldi Date: Mon, 21 Oct 2024 02:45:47 -0300 Subject: [PATCH 258/537] Make isbitstypes use memmove instead of the runtime function in copyto! (#56237) This might help llvm understand whats going on. Also enzyme really wants this to look like this to trace through it better. --------- Co-authored-by: Jameson Nash --- base/genericmemory.jl | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/base/genericmemory.jl b/base/genericmemory.jl index 89861444d9652..de1fc668333f5 100644 --- a/base/genericmemory.jl +++ b/base/genericmemory.jl @@ -118,7 +118,17 @@ function unsafe_copyto!(dest::MemoryRef{T}, src::MemoryRef{T}, n) where {T} @_terminates_globally_notaskstate_meta n == 0 && return dest @boundscheck memoryref(dest, n), memoryref(src, n) - ccall(:jl_genericmemory_copyto, Cvoid, (Any, Ptr{Cvoid}, Any, Ptr{Cvoid}, Int), dest.mem, dest.ptr_or_offset, src.mem, src.ptr_or_offset, Int(n)) + if isbitstype(T) + tdest = @_gc_preserve_begin dest + tsrc = @_gc_preserve_begin src + pdest = unsafe_convert(Ptr{Cvoid}, dest) + psrc = unsafe_convert(Ptr{Cvoid}, src) + memmove(pdest, psrc, aligned_sizeof(T) * n) + @_gc_preserve_end tdest + @_gc_preserve_end tsrc + else + ccall(:jl_genericmemory_copyto, Cvoid, (Any, Ptr{Cvoid}, Any, Ptr{Cvoid}, Int), dest.mem, dest.ptr_or_offset, src.mem, src.ptr_or_offset, Int(n)) + end return dest end From b01095e0274bf078e162379a9f243197710053ff Mon Sep 17 00:00:00 2001 From: Jishnu Bhattacharya Date: Mon, 21 Oct 2024 11:28:56 +0530 Subject: [PATCH 259/537] Fix kron indexing for types without a unique zero (#56229) This fixes a bug introduced in https://github.com/JuliaLang/julia/pull/55941. We may also take this opportunity to limit the scope of the `@inbounds` annotations, and also use `axes` to compute the bounds instead of hard-coding them. The real "fix" here is on line 767, where `l in 1:nA` should have been `l in 1:mB`. Using `axes` avoids such errors, and makes the operation safer as well. --- stdlib/LinearAlgebra/src/diagonal.jl | 50 +++++++++++++-------------- stdlib/LinearAlgebra/test/diagonal.jl | 4 +-- 2 files changed, 27 insertions(+), 27 deletions(-) diff --git a/stdlib/LinearAlgebra/src/diagonal.jl b/stdlib/LinearAlgebra/src/diagonal.jl index 17ff232f5b262..6e8ce96259fc1 100644 --- a/stdlib/LinearAlgebra/src/diagonal.jl +++ b/stdlib/LinearAlgebra/src/diagonal.jl @@ -700,16 +700,16 @@ end zerofilled = true end end - @inbounds for i = 1:nA, j = 1:nB + for i in eachindex(valA), j in eachindex(valB) idx = (i-1)*nB+j - C[idx, idx] = valA[i] * valB[j] + @inbounds C[idx, idx] = valA[i] * valB[j] end if !zerofilled - for j in 1:nA, i in 1:mA + for j in axes(A,2), i in axes(A,1) Δrow, Δcol = (i-1)*mB, (j-1)*nB - for k in 1:nB, l in 1:mB + for k in axes(B,2), l in axes(B,1) i == j && k == l && continue - C[Δrow + l, Δcol + k] = A[i,j] * B[l,k] + @inbounds C[Δrow + l, Δcol + k] = A[i,j] * B[l,k] end end end @@ -749,24 +749,24 @@ end end end m = 1 - @inbounds for j = 1:nA - A_jj = A[j,j] - for k = 1:nB - for l = 1:mB - C[m] = A_jj * B[l,k] + for j in axes(A,2) + A_jj = @inbounds A[j,j] + for k in axes(B,2) + for l in axes(B,1) + @inbounds C[m] = A_jj * B[l,k] m += 1 end m += (nA - 1) * mB end if !zerofilled # populate the zero elements - for i in 1:mA + for i in axes(A,1) i == j && continue - A_ij = A[i, j] + A_ij = @inbounds A[i, j] Δrow, Δcol = (i-1)*mB, (j-1)*nB - for k in 1:nB, l in 1:nA - B_lk = B[l, k] - C[Δrow + l, Δcol + k] = A_ij * B_lk + for k in axes(B,2), l in axes(B,1) + B_lk = @inbounds B[l, k] + @inbounds C[Δrow + l, Δcol + k] = A_ij * B_lk end end end @@ -792,23 +792,23 @@ end end end m = 1 - @inbounds for j = 1:nA - for l = 1:mB - Bll = B[l,l] - for i = 1:mA - C[m] = A[i,j] * Bll + for j in axes(A,2) + for l in axes(B,1) + Bll = @inbounds B[l,l] + for i in axes(A,1) + @inbounds C[m] = A[i,j] * Bll m += nB end m += 1 end if !zerofilled - for i in 1:mA - A_ij = A[i, j] + for i in axes(A,1) + A_ij = @inbounds A[i, j] Δrow, Δcol = (i-1)*mB, (j-1)*nB - for k in 1:nB, l in 1:mB + for k in axes(B,2), l in axes(B,1) l == k && continue - B_lk = B[l, k] - C[Δrow + l, Δcol + k] = A_ij * B_lk + B_lk = @inbounds B[l, k] + @inbounds C[Δrow + l, Δcol + k] = A_ij * B_lk end end end diff --git a/stdlib/LinearAlgebra/test/diagonal.jl b/stdlib/LinearAlgebra/test/diagonal.jl index 8b56ee15e56e3..1c3a9dfa676ac 100644 --- a/stdlib/LinearAlgebra/test/diagonal.jl +++ b/stdlib/LinearAlgebra/test/diagonal.jl @@ -353,7 +353,7 @@ Random.seed!(1) D3 = Diagonal(convert(Vector{elty}, rand(n÷2))) DM3= Matrix(D3) @test Matrix(kron(D, D3)) ≈ kron(DM, DM3) - M4 = rand(elty, n÷2, n÷2) + M4 = rand(elty, size(D3,1) + 1, size(D3,2) + 2) # choose a different size from D3 @test kron(D3, M4) ≈ kron(DM3, M4) @test kron(M4, D3) ≈ kron(M4, DM3) X = [ones(1,1) for i in 1:2, j in 1:2] @@ -1392,7 +1392,7 @@ end end @testset "zeros in kron with block matrices" begin - D = Diagonal(1:2) + D = Diagonal(1:4) B = reshape([ones(2,2), ones(3,2), ones(2,3), ones(3,3)], 2, 2) @test kron(D, B) == kron(Array(D), B) @test kron(B, D) == kron(B, Array(D)) From 04259daf8a339f99d7cd8503d9c1f154b247e4e1 Mon Sep 17 00:00:00 2001 From: Jishnu Bhattacharya Date: Mon, 21 Oct 2024 11:31:36 +0530 Subject: [PATCH 260/537] Reroute` (Upper/Lower)Triangular * Diagonal` through `__muldiag` (#55984) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Currently, `::Diagonal * ::AbstractMatrix` calls the method `LinearAlgebra.__muldiag!` in general that scales the rows, and similarly for the diagonal on the right. The implementation of `__muldiag` was duplicating the logic in `LinearAlgebra.modify!` and the methods for `MulAddMul`. This PR replaces the various branches with calls to `modify!` instead. I've also extracted the multiplication logic into its own function `__muldiag_nonzeroalpha!` so that this may be specialized for matrix types, such as triangular ones. Secondly, `::Diagonal * ::UpperTriangular` (and similarly, other triangular matrices) was specialized to forward the multiplication to the parent of the triangular. For strided matrices, however, it makes more sense to use the structure and scale only the filled half of the matrix. Firstly, this improves performance, and secondly, this avoids errors in case the parent isn't fully initialized corresponding to the structural zero elements. Performance improvement: ```julia julia> D = Diagonal(1:400); julia> U = UpperTriangular(zeros(size(D))); julia> @btime $D * $U; 314.944 μs (3 allocations: 1.22 MiB) # v"1.12.0-DEV.1288" 195.960 μs (3 allocations: 1.22 MiB) # This PR ``` Fix: ```julia julia> M = Matrix{BigFloat}(undef, 2, 2); julia> M[1,1] = M[2,2] = M[1,2] = 3; julia> U = UpperTriangular(M) 2×2 UpperTriangular{BigFloat, Matrix{BigFloat}}: 3.0 3.0 ⋅ 3.0 julia> D = Diagonal(1:2); julia> U * D # works after this PR 2×2 UpperTriangular{BigFloat, Matrix{BigFloat}}: 3.0 6.0 ⋅ 6.0 ``` --- stdlib/LinearAlgebra/src/LinearAlgebra.jl | 2 + stdlib/LinearAlgebra/src/diagonal.jl | 158 +++++++++++++--------- stdlib/LinearAlgebra/test/diagonal.jl | 40 +++++- 3 files changed, 134 insertions(+), 66 deletions(-) diff --git a/stdlib/LinearAlgebra/src/LinearAlgebra.jl b/stdlib/LinearAlgebra/src/LinearAlgebra.jl index 15354603943c2..88fc3476c9d7f 100644 --- a/stdlib/LinearAlgebra/src/LinearAlgebra.jl +++ b/stdlib/LinearAlgebra/src/LinearAlgebra.jl @@ -655,6 +655,8 @@ matprod_dest(A::StructuredMatrix, B::Diagonal, TS) = _matprod_dest_diag(A, TS) matprod_dest(A::Diagonal, B::StructuredMatrix, TS) = _matprod_dest_diag(B, TS) matprod_dest(A::Diagonal, B::Diagonal, TS) = _matprod_dest_diag(B, TS) _matprod_dest_diag(A, TS) = similar(A, TS) +_matprod_dest_diag(A::UnitUpperTriangular, TS) = UpperTriangular(similar(parent(A), TS)) +_matprod_dest_diag(A::UnitLowerTriangular, TS) = LowerTriangular(similar(parent(A), TS)) function _matprod_dest_diag(A::SymTridiagonal, TS) n = size(A, 1) ev = similar(A, TS, max(0, n-1)) diff --git a/stdlib/LinearAlgebra/src/diagonal.jl b/stdlib/LinearAlgebra/src/diagonal.jl index 6e8ce96259fc1..8ba4c3d457e83 100644 --- a/stdlib/LinearAlgebra/src/diagonal.jl +++ b/stdlib/LinearAlgebra/src/diagonal.jl @@ -396,82 +396,120 @@ function lmul!(D::Diagonal, T::Tridiagonal) return T end -function __muldiag!(out, D::Diagonal, B, _add::MulAddMul{ais1,bis0}) where {ais1,bis0} +@inline function __muldiag_nonzeroalpha!(out, D::Diagonal, B, _add::MulAddMul) + @inbounds for j in axes(B, 2) + @simd for i in axes(B, 1) + _modify!(_add, D.diag[i] * B[i,j], out, (i,j)) + end + end + out +end +_maybe_unwrap_tri(out, A) = out, A +_maybe_unwrap_tri(out::UpperTriangular, A::UpperOrUnitUpperTriangular) = parent(out), parent(A) +_maybe_unwrap_tri(out::LowerTriangular, A::LowerOrUnitLowerTriangular) = parent(out), parent(A) +@inline function __muldiag_nonzeroalpha!(out, D::Diagonal, B::UpperOrLowerTriangular, _add::MulAddMul) + isunit = B isa Union{UnitUpperTriangular, UnitLowerTriangular} + # if both B and out have the same upper/lower triangular structure, + # we may directly read and write from the parents + out_maybeparent, B_maybeparent = _maybe_unwrap_tri(out, B) + for j in axes(B, 2) + if isunit + _modify!(_add, D.diag[j] * B[j,j], out, (j,j)) + end + rowrange = B isa UpperOrUnitUpperTriangular ? (1:min(j-isunit, size(B,1))) : (j+isunit:size(B,1)) + @inbounds @simd for i in rowrange + _modify!(_add, D.diag[i] * B_maybeparent[i,j], out_maybeparent, (i,j)) + end + end + out +end +function __muldiag!(out, D::Diagonal, B, _add::MulAddMul) require_one_based_indexing(out, B) alpha, beta = _add.alpha, _add.beta if iszero(alpha) _rmul_or_fill!(out, beta) else - if bis0 - @inbounds for j in axes(B, 2) - @simd for i in axes(B, 1) - out[i,j] = D.diag[i] * B[i,j] * alpha - end - end - else - @inbounds for j in axes(B, 2) - @simd for i in axes(B, 1) - out[i,j] = D.diag[i] * B[i,j] * alpha + out[i,j] * beta - end - end - end + __muldiag_nonzeroalpha!(out, D, B, _add) end return out end -function __muldiag!(out, A, D::Diagonal, _add::MulAddMul{ais1,bis0}) where {ais1,bis0} + +@inline function __muldiag_nonzeroalpha!(out, A, D::Diagonal, _add::MulAddMul{ais1,bis0}) where {ais1,bis0} + beta = _add.beta + _add_aisone = MulAddMul{true,bis0,Bool,typeof(beta)}(true, beta) + @inbounds for j in axes(A, 2) + dja = _add(D.diag[j]) + @simd for i in axes(A, 1) + _modify!(_add_aisone, A[i,j] * dja, out, (i,j)) + end + end + out +end +@inline function __muldiag_nonzeroalpha!(out, A::UpperOrLowerTriangular, D::Diagonal, _add::MulAddMul{ais1,bis0}) where {ais1,bis0} + isunit = A isa Union{UnitUpperTriangular, UnitLowerTriangular} + beta = _add.beta + # since alpha is multiplied to the diagonal element of D, + # we may skip alpha in the second multiplication by setting ais1 to true + _add_aisone = MulAddMul{true,bis0,Bool,typeof(beta)}(true, beta) + # if both A and out have the same upper/lower triangular structure, + # we may directly read and write from the parents + out_maybeparent, A_maybeparent = _maybe_unwrap_tri(out, A) + @inbounds for j in axes(A, 2) + dja = _add(D.diag[j]) + if isunit + _modify!(_add_aisone, A[j,j] * dja, out, (j,j)) + end + rowrange = A isa UpperOrUnitUpperTriangular ? (1:min(j-isunit, size(A,1))) : (j+isunit:size(A,1)) + @simd for i in rowrange + _modify!(_add_aisone, A_maybeparent[i,j] * dja, out_maybeparent, (i,j)) + end + end + out +end +function __muldiag!(out, A, D::Diagonal, _add::MulAddMul) require_one_based_indexing(out, A) alpha, beta = _add.alpha, _add.beta if iszero(alpha) _rmul_or_fill!(out, beta) else - if bis0 - @inbounds for j in axes(A, 2) - dja = D.diag[j] * alpha - @simd for i in axes(A, 1) - out[i,j] = A[i,j] * dja - end - end - else - @inbounds for j in axes(A, 2) - dja = D.diag[j] * alpha - @simd for i in axes(A, 1) - out[i,j] = A[i,j] * dja + out[i,j] * beta - end - end - end + __muldiag_nonzeroalpha!(out, A, D, _add) end return out end -function __muldiag!(out::Diagonal, D1::Diagonal, D2::Diagonal, _add::MulAddMul{ais1,bis0}) where {ais1,bis0} + +@inline function __muldiag_nonzeroalpha!(out::Diagonal, D1::Diagonal, D2::Diagonal, _add::MulAddMul) d1 = D1.diag d2 = D2.diag + outd = out.diag + @inbounds @simd for i in eachindex(d1, d2, outd) + _modify!(_add, d1[i] * d2[i], outd, i) + end + out +end +function __muldiag!(out::Diagonal, D1::Diagonal, D2::Diagonal, _add::MulAddMul) alpha, beta = _add.alpha, _add.beta if iszero(alpha) _rmul_or_fill!(out.diag, beta) else - if bis0 - @inbounds @simd for i in eachindex(out.diag) - out.diag[i] = d1[i] * d2[i] * alpha - end - else - @inbounds @simd for i in eachindex(out.diag) - out.diag[i] = d1[i] * d2[i] * alpha + out.diag[i] * beta - end - end + __muldiag_nonzeroalpha!(out, D1, D2, _add) end return out end -function __muldiag!(out, D1::Diagonal, D2::Diagonal, _add::MulAddMul{ais1,bis0}) where {ais1,bis0} - require_one_based_indexing(out) - alpha, beta = _add.alpha, _add.beta - mA = size(D1, 1) +@inline function __muldiag_nonzeroalpha!(out, D1::Diagonal, D2::Diagonal, _add::MulAddMul) d1 = D1.diag d2 = D2.diag + @inbounds @simd for i in eachindex(d1, d2) + _modify!(_add, d1[i] * d2[i], out, (i,i)) + end + out +end +function __muldiag!(out, D1::Diagonal, D2::Diagonal, _add::MulAddMul{ais1}) where {ais1} + require_one_based_indexing(out) + alpha, beta = _add.alpha, _add.beta _rmul_or_fill!(out, beta) if !iszero(alpha) - @inbounds @simd for i in 1:mA - out[i,i] += d1[i] * d2[i] * alpha - end + _add_bis1 = MulAddMul{ais1,false,typeof(alpha),Bool}(alpha,true) + __muldiag_nonzeroalpha!(out, D1, D2, _add_bis1) end return out end @@ -658,31 +696,21 @@ for Tri in (:UpperTriangular, :LowerTriangular) @eval $fun(A::$Tri, D::Diagonal) = $Tri($fun(A.data, D)) @eval $fun(A::$UTri, D::Diagonal) = $Tri(_setdiag!($fun(A.data, D), $f, D.diag)) end + @eval *(A::$Tri{<:Any, <:StridedMaybeAdjOrTransMat}, D::Diagonal) = + @invoke *(A::AbstractMatrix, D::Diagonal) + @eval *(A::$UTri{<:Any, <:StridedMaybeAdjOrTransMat}, D::Diagonal) = + @invoke *(A::AbstractMatrix, D::Diagonal) for (fun, f) in zip((:*, :lmul!, :ldiv!, :\), (:identity, :identity, :inv, :inv)) @eval $fun(D::Diagonal, A::$Tri) = $Tri($fun(D, A.data)) @eval $fun(D::Diagonal, A::$UTri) = $Tri(_setdiag!($fun(D, A.data), $f, D.diag)) end + @eval *(D::Diagonal, A::$Tri{<:Any, <:StridedMaybeAdjOrTransMat}) = + @invoke *(D::Diagonal, A::AbstractMatrix) + @eval *(D::Diagonal, A::$UTri{<:Any, <:StridedMaybeAdjOrTransMat}) = + @invoke *(D::Diagonal, A::AbstractMatrix) # 3-arg ldiv! @eval ldiv!(C::$Tri, D::Diagonal, A::$Tri) = $Tri(ldiv!(C.data, D, A.data)) @eval ldiv!(C::$Tri, D::Diagonal, A::$UTri) = $Tri(_setdiag!(ldiv!(C.data, D, A.data), inv, D.diag)) - # 3-arg mul! is disambiguated in special.jl - # 5-arg mul! - @eval _mul!(C::$Tri, D::Diagonal, A::$Tri, _add) = $Tri(mul!(C.data, D, A.data, _add.alpha, _add.beta)) - @eval function _mul!(C::$Tri, D::Diagonal, A::$UTri, _add::MulAddMul{ais1,bis0}) where {ais1,bis0} - α, β = _add.alpha, _add.beta - iszero(α) && return _rmul_or_fill!(C, β) - diag′ = bis0 ? nothing : diag(C) - data = mul!(C.data, D, A.data, α, β) - $Tri(_setdiag!(data, _add, D.diag, diag′)) - end - @eval _mul!(C::$Tri, A::$Tri, D::Diagonal, _add) = $Tri(mul!(C.data, A.data, D, _add.alpha, _add.beta)) - @eval function _mul!(C::$Tri, A::$UTri, D::Diagonal, _add::MulAddMul{ais1,bis0}) where {ais1,bis0} - α, β = _add.alpha, _add.beta - iszero(α) && return _rmul_or_fill!(C, β) - diag′ = bis0 ? nothing : diag(C) - data = mul!(C.data, A.data, D, α, β) - $Tri(_setdiag!(data, _add, D.diag, diag′)) - end end @inline function kron!(C::AbstractMatrix, A::Diagonal, B::Diagonal) diff --git a/stdlib/LinearAlgebra/test/diagonal.jl b/stdlib/LinearAlgebra/test/diagonal.jl index 1c3a9dfa676ac..380a0465028d1 100644 --- a/stdlib/LinearAlgebra/test/diagonal.jl +++ b/stdlib/LinearAlgebra/test/diagonal.jl @@ -1188,7 +1188,7 @@ end @test oneunit(D3) isa typeof(D3) end -@testset "AbstractTriangular" for (Tri, UTri) in ((UpperTriangular, UnitUpperTriangular), (LowerTriangular, UnitLowerTriangular)) +@testset "$Tri" for (Tri, UTri) in ((UpperTriangular, UnitUpperTriangular), (LowerTriangular, UnitLowerTriangular)) A = randn(4, 4) TriA = Tri(A) UTriA = UTri(A) @@ -1218,6 +1218,44 @@ end @test outTri === mul!(outTri, D, UTriA, 2, 1)::Tri == mul!(out, D, Matrix(UTriA), 2, 1) @test outTri === mul!(outTri, TriA, D, 2, 1)::Tri == mul!(out, Matrix(TriA), D, 2, 1) @test outTri === mul!(outTri, UTriA, D, 2, 1)::Tri == mul!(out, Matrix(UTriA), D, 2, 1) + + # we may write to a Unit triangular if the diagonal is preserved + ID = Diagonal(ones(size(UTriA,2))) + @test mul!(copy(UTriA), UTriA, ID) == UTriA + @test mul!(copy(UTriA), ID, UTriA) == UTriA + + @testset "partly filled parents" begin + M = Matrix{BigFloat}(undef, 2, 2) + M[1,1] = M[2,2] = 3 + isupper = Tri == UpperTriangular + M[1+!isupper, 1+isupper] = 3 + D = Diagonal(1:2) + T = Tri(M) + TA = Array(T) + @test T * D == TA * D + @test D * T == D * TA + @test mul!(copy(T), T, D, 2, 3) == 2T * D + 3T + @test mul!(copy(T), D, T, 2, 3) == 2D * T + 3T + + U = UTri(M) + UA = Array(U) + @test U * D == UA * D + @test D * U == D * UA + @test mul!(copy(T), U, D, 2, 3) == 2 * UA * D + 3TA + @test mul!(copy(T), D, U, 2, 3) == 2 * D * UA + 3TA + + M2 = Matrix{BigFloat}(undef, 2, 2) + M2[1+!isupper, 1+isupper] = 3 + U = UTri(M2) + UA = Array(U) + @test U * D == UA * D + @test D * U == D * UA + ID = Diagonal(ones(size(U,2))) + @test mul!(copy(U), U, ID) == U + @test mul!(copy(U), ID, U) == U + @test mul!(copy(U), U, ID, 2, -1) == U + @test mul!(copy(U), ID, U, 2, -1) == U + end end struct SMatrix1{T} <: AbstractArray{T,2} From e3f2f6b9c69293da68c1019366f8397d2049c6d8 Mon Sep 17 00:00:00 2001 From: Kristoffer Carlsson Date: Fri, 18 Oct 2024 14:42:23 +0200 Subject: [PATCH 261/537] Revert "Extensions: make loading of extensions independent of what packages are in the sysimage (#52841)" This reverts commit 08d229f4a7cb0c3ef5becddd1b3bc4f8f178b8e4. --- base/Base.jl | 1 - base/loading.jl | 9 +++------ test/loading.jl | 19 ------------------- .../HasDepWithExtensions.jl/Manifest.toml | 7 +------ .../Extensions/HasExtensions.jl/Project.toml | 2 -- .../HasExtensions.jl/ext/LinearAlgebraExt.jl | 3 --- 6 files changed, 4 insertions(+), 37 deletions(-) delete mode 100644 test/project/Extensions/HasExtensions.jl/ext/LinearAlgebraExt.jl diff --git a/base/Base.jl b/base/Base.jl index 9800462f855f9..5fb764bd4cc01 100644 --- a/base/Base.jl +++ b/base/Base.jl @@ -646,7 +646,6 @@ function __init__() init_load_path() init_active_project() append!(empty!(_sysimage_modules), keys(loaded_modules)) - empty!(explicit_loaded_modules) empty!(loaded_precompiles) # If we load a packageimage when building the image this might not be empty for (mod, key) in module_keys push!(get!(Vector{Module}, loaded_precompiles, key), mod) diff --git a/base/loading.jl b/base/loading.jl index db6a681bb2a5b..19fcaba388d11 100644 --- a/base/loading.jl +++ b/base/loading.jl @@ -1554,7 +1554,7 @@ function _insert_extension_triggers(parent::PkgId, extensions::Dict{String, Any} # TODO: Better error message if this lookup fails? uuid_trigger = UUID(totaldeps[trigger]::String) trigger_id = PkgId(uuid_trigger, trigger) - if !haskey(explicit_loaded_modules, trigger_id) || haskey(package_locks, trigger_id) + if !haskey(Base.loaded_modules, trigger_id) || haskey(package_locks, trigger_id) trigger1 = get!(Vector{ExtensionId}, EXT_DORMITORY, trigger_id) push!(trigger1, gid) else @@ -2430,9 +2430,8 @@ function __require_prelocked(uuidkey::PkgId, env=nothing) insert_extension_triggers(uuidkey) # After successfully loading, notify downstream consumers run_package_callbacks(uuidkey) - elseif !haskey(explicit_loaded_modules, uuidkey) - explicit_loaded_modules[uuidkey] = m - run_package_callbacks(uuidkey) + else + newm = root_module(uuidkey) end return m end @@ -2445,7 +2444,6 @@ end PkgOrigin() = PkgOrigin(nothing, nothing, nothing) const pkgorigins = Dict{PkgId,PkgOrigin}() -const explicit_loaded_modules = Dict{PkgId,Module}() # Emptied on Julia start const loaded_modules = Dict{PkgId,Module}() # available to be explicitly loaded const loaded_precompiles = Dict{PkgId,Vector{Module}}() # extended (complete) list of modules, available to be loaded const loaded_modules_order = Vector{Module}() @@ -2485,7 +2483,6 @@ end end maybe_loaded_precompile(key, module_build_id(m)) === nothing && push!(loaded_modules_order, m) loaded_modules[key] = m - explicit_loaded_modules[key] = m module_keys[m] = key end nothing diff --git a/test/loading.jl b/test/loading.jl index ec4a0391a412a..ecba64ca45a73 100644 --- a/test/loading.jl +++ b/test/loading.jl @@ -1129,25 +1129,6 @@ end run(cmd_proj_ext) end - # Sysimage extensions - # The test below requires that LinearAlgebra is in the sysimage and that it has not been loaded yet. - # if it gets moved out, this test will need to be updated. - # We run this test in a new process so we are not vulnerable to a previous test having loaded LinearAlgebra - sysimg_ext_test_code = """ - uuid_key = Base.PkgId(Base.UUID("37e2e46d-f89d-539d-b4ee-838fcccc9c8e"), "LinearAlgebra") - Base.in_sysimage(uuid_key) || error("LinearAlgebra not in sysimage") - haskey(Base.explicit_loaded_modules, uuid_key) && error("LinearAlgebra already loaded") - using HasExtensions - Base.get_extension(HasExtensions, :LinearAlgebraExt) === nothing || error("unexpectedly got an extension") - using LinearAlgebra - haskey(Base.explicit_loaded_modules, uuid_key) || error("LinearAlgebra not loaded") - Base.get_extension(HasExtensions, :LinearAlgebraExt) isa Module || error("expected extension to load") - """ - cmd = `$(Base.julia_cmd()) --startup-file=no -e $sysimg_ext_test_code` - cmd = addenv(cmd, "JULIA_LOAD_PATH" => join([proj, "@stdlib"], sep)) - run(cmd) - - # Extensions in implicit environments old_load_path = copy(LOAD_PATH) try diff --git a/test/project/Extensions/HasDepWithExtensions.jl/Manifest.toml b/test/project/Extensions/HasDepWithExtensions.jl/Manifest.toml index 5706aba59d1e0..f659a59e0910b 100644 --- a/test/project/Extensions/HasDepWithExtensions.jl/Manifest.toml +++ b/test/project/Extensions/HasDepWithExtensions.jl/Manifest.toml @@ -25,17 +25,12 @@ deps = ["ExtDep3"] path = "../HasExtensions.jl" uuid = "4d3288b3-3afc-4bb6-85f3-489fffe514c8" version = "0.1.0" +weakdeps = ["ExtDep", "ExtDep2"] [deps.HasExtensions.extensions] Extension = "ExtDep" ExtensionDep = "ExtDep3" ExtensionFolder = ["ExtDep", "ExtDep2"] - LinearAlgebraExt = "LinearAlgebra" - - [deps.HasExtensions.weakdeps] - ExtDep = "fa069be4-f60b-4d4c-8b95-f8008775090c" - ExtDep2 = "55982ee5-2ad5-4c40-8cfe-5e9e1b01500d" - LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" [[deps.SomePackage]] path = "../SomePackage" diff --git a/test/project/Extensions/HasExtensions.jl/Project.toml b/test/project/Extensions/HasExtensions.jl/Project.toml index fe21a1423f543..a02f5662d602d 100644 --- a/test/project/Extensions/HasExtensions.jl/Project.toml +++ b/test/project/Extensions/HasExtensions.jl/Project.toml @@ -8,10 +8,8 @@ ExtDep3 = "a5541f1e-a556-4fdc-af15-097880d743a1" [weakdeps] ExtDep = "fa069be4-f60b-4d4c-8b95-f8008775090c" ExtDep2 = "55982ee5-2ad5-4c40-8cfe-5e9e1b01500d" -LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" [extensions] Extension = "ExtDep" ExtensionDep = "ExtDep3" ExtensionFolder = ["ExtDep", "ExtDep2"] -LinearAlgebraExt = "LinearAlgebra" diff --git a/test/project/Extensions/HasExtensions.jl/ext/LinearAlgebraExt.jl b/test/project/Extensions/HasExtensions.jl/ext/LinearAlgebraExt.jl deleted file mode 100644 index 19f87cb849417..0000000000000 --- a/test/project/Extensions/HasExtensions.jl/ext/LinearAlgebraExt.jl +++ /dev/null @@ -1,3 +0,0 @@ -module LinearAlgebraExt - -end From ad1dc390e3123b65433ee06a651ca6de88c29914 Mon Sep 17 00:00:00 2001 From: KristofferC Date: Mon, 21 Oct 2024 14:18:48 +0200 Subject: [PATCH 262/537] fix lookup when extension is in `[deps]` --- base/loading.jl | 35 +++++++++++++++++++---------------- 1 file changed, 19 insertions(+), 16 deletions(-) diff --git a/base/loading.jl b/base/loading.jl index 19fcaba388d11..e08d03ad513c7 100644 --- a/base/loading.jl +++ b/base/loading.jl @@ -974,14 +974,14 @@ function explicit_manifest_deps_get(project_file::String, where::PkgId, name::St entry = entry::Dict{String, Any} uuid = get(entry, "uuid", nothing)::Union{String, Nothing} uuid === nothing && continue + # deps is either a list of names (deps = ["DepA", "DepB"]) or + # a table of entries (deps = {"DepA" = "6ea...", "DepB" = "55d..."} + deps = get(entry, "deps", nothing)::Union{Vector{String}, Dict{String, Any}, Nothing} if UUID(uuid) === where.uuid found_where = true - # deps is either a list of names (deps = ["DepA", "DepB"]) or - # a table of entries (deps = {"DepA" = "6ea...", "DepB" = "55d..."} - deps = get(entry, "deps", nothing)::Union{Vector{String}, Dict{String, Any}, Nothing} if deps isa Vector{String} found_name = name in deps - break + found_name && @goto done elseif deps isa Dict{String, Any} deps = deps::Dict{String, Any} for (dep, uuid) in deps @@ -1000,23 +1000,25 @@ function explicit_manifest_deps_get(project_file::String, where::PkgId, name::St return PkgId(UUID(uuid), name) end exts = extensions[where.name]::Union{String, Vector{String}} + weakdeps = get(entry, "weakdeps", nothing)::Union{Vector{String}, Dict{String, Any}, Nothing} if (exts isa String && name == exts) || (exts isa Vector{String} && name in exts) - weakdeps = get(entry, "weakdeps", nothing)::Union{Vector{String}, Dict{String, Any}, Nothing} - if weakdeps !== nothing - if weakdeps isa Vector{String} - found_name = name in weakdeps - break - elseif weakdeps isa Dict{String, Any} - weakdeps = weakdeps::Dict{String, Any} - for (dep, uuid) in weakdeps - uuid::String - if dep === name - return PkgId(UUID(uuid), name) + for deps′ in [weakdeps, deps] + if deps′ !== nothing + if deps′ isa Vector{String} + found_name = name in deps′ + found_name && @goto done + elseif deps′ isa Dict{String, Any} + deps′ = deps′::Dict{String, Any} + for (dep, uuid) in deps′ + uuid::String + if dep === name + return PkgId(UUID(uuid), name) + end + end end end end end - end # `name` is not an ext, do standard lookup as if this was the parent return identify_package(PkgId(UUID(uuid), dep_name), name) end @@ -1024,6 +1026,7 @@ function explicit_manifest_deps_get(project_file::String, where::PkgId, name::St end end end + @label done found_where || return nothing found_name || return PkgId(name) # Only reach here if deps was not a dict which mean we have a unique name for the dep From 1c67d0cfdc8ab109120dc3f0720053e509a10131 Mon Sep 17 00:00:00 2001 From: Ian Butterworth Date: Mon, 21 Oct 2024 08:21:44 -0400 Subject: [PATCH 263/537] REPL: fix brace detection when ' is used for transpose (#56252) --- stdlib/REPL/src/REPLCompletions.jl | 9 +++++++-- stdlib/REPL/test/replcompletions.jl | 6 ++++++ 2 files changed, 13 insertions(+), 2 deletions(-) diff --git a/stdlib/REPL/src/REPLCompletions.jl b/stdlib/REPL/src/REPLCompletions.jl index 5e80e17036559..42480aea91605 100644 --- a/stdlib/REPL/src/REPLCompletions.jl +++ b/stdlib/REPL/src/REPLCompletions.jl @@ -480,6 +480,7 @@ function find_start_brace(s::AbstractString; c_start='(', c_end=')') i = firstindex(r) braces = in_comment = 0 in_single_quotes = in_double_quotes = in_back_ticks = false + num_single_quotes_in_string = count('\'', s) while i <= ncodeunits(r) c, i = iterate(r, i) if c == '#' && i <= ncodeunits(r) && iterate(r, i)[1] == '=' @@ -502,7 +503,9 @@ function find_start_brace(s::AbstractString; c_start='(', c_end=')') braces += 1 elseif c == c_end braces -= 1 - elseif c == '\'' + elseif c == '\'' && num_single_quotes_in_string % 2 == 0 + # ' can be a transpose too, so check if there are even number of 's in the string + # TODO: This probably needs to be more robust in_single_quotes = true elseif c == '"' in_double_quotes = true @@ -1197,7 +1200,9 @@ function complete_identifiers!(suggestions::Vector{Completion}, if !isinfix # Handle infix call argument completion of the form bar + foo(qux). frange, end_of_identifier = find_start_brace(@view s[1:prevind(s, end)]) - isinfix = Meta.parse(@view(s[frange[1]:end]), raise=false, depwarn=false) == prefix.args[end] + if !isempty(frange) # if find_start_brace fails to find the brace just continue + isinfix = Meta.parse(@view(s[frange[1]:end]), raise=false, depwarn=false) == prefix.args[end] + end end if isinfix prefix = prefix.args[end] diff --git a/stdlib/REPL/test/replcompletions.jl b/stdlib/REPL/test/replcompletions.jl index 8bee70226755f..cfb9a6137a287 100644 --- a/stdlib/REPL/test/replcompletions.jl +++ b/stdlib/REPL/test/replcompletions.jl @@ -340,6 +340,12 @@ end # inexistent completion inside a cmd @test_nocompletion("run(`lol") +# issue 55856: copy(A'). errors in the REPL +let + c, r = test_complete("copy(A').") + @test isempty(c) +end + # test latex symbol completions let s = "\\alpha" c, r = test_bslashcomplete(s) From 319ee70104fa4f50b08c57fc202d5ab8d26f0d6b Mon Sep 17 00:00:00 2001 From: Kristoffer Carlsson Date: Mon, 21 Oct 2024 14:40:37 +0200 Subject: [PATCH 264/537] remove new references to explicit_loaded_modules --- base/loading.jl | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/base/loading.jl b/base/loading.jl index e08d03ad513c7..ef8837b36a1ca 100644 --- a/base/loading.jl +++ b/base/loading.jl @@ -2517,9 +2517,6 @@ loaded_modules_array() = @lock require_lock copy(loaded_modules_order) # after unreference_module, a subsequent require call will try to load a new copy of it, if stale # reload(m) = (unreference_module(m); require(m)) function unreference_module(key::PkgId) - if haskey(explicit_loaded_modules, key) - m = pop!(explicit_loaded_modules, key) - end if haskey(loaded_modules, key) m = pop!(loaded_modules, key) # need to ensure all modules are GC rooted; will still be referenced @@ -3125,7 +3122,7 @@ function compilecache(pkg::PkgId, path::String, internal_stderr::IO = stderr, in # build up the list of modules that we want the precompile process to preserve if keep_loaded_modules concrete_deps = copy(_concrete_dependencies) - for (pkgreq, modreq) in loaded_modules # TODO: convert all relevant staleness heuristics to use explicit_loaded_modules instead + for (pkgreq, modreq) in loaded_modules if !(pkgreq === Main || pkgreq === Core || pkgreq === Base) push!(concrete_deps, pkgreq => module_build_id(modreq)) end From 8bdacc341a740b73d5e11b3ba548cd97bebab6c6 Mon Sep 17 00:00:00 2001 From: Keno Fischer Date: Mon, 21 Oct 2024 09:17:03 -0400 Subject: [PATCH 265/537] Add basic infrastructure for binding replacement (#56224) Now that I've had a few months to recover from the slog of adding `BindingPartition`, it's time to renew my quest to finish #54654. This adds the basic infrastructure for having multiple partitions, including making the lookup respect the `world` argument - on-demand allocation of missing partitions, `Base.delete_binding` and the `@world` macro. Not included is any inference or invalidation support, or any support for the runtime to create partitions itself (only `Base.delete_binding` does that for now), which will come in subsequent PRs. --- base/essentials.jl | 44 +++++++++++++++++ base/range.jl | 11 +++++ base/runtime_internals.jl | 27 +++++++++-- base/show.jl | 18 +++++++ src/clangsa/GCChecker.cpp | 2 + src/julia.h | 11 +++-- src/julia_internal.h | 15 ++---- src/module.c | 99 +++++++++++++++++++++++++++++++-------- src/staticdata.c | 9 ++-- src/toplevel.c | 2 + test/choosetests.jl | 2 +- test/rebinding.jl | 18 +++++++ 12 files changed, 214 insertions(+), 44 deletions(-) create mode 100644 test/rebinding.jl diff --git a/base/essentials.jl b/base/essentials.jl index 0e7be924c908c..a07aaa6769ed2 100644 --- a/base/essentials.jl +++ b/base/essentials.jl @@ -1250,6 +1250,50 @@ function isiterable(T)::Bool return hasmethod(iterate, Tuple{T}) end +""" + @world(sym, world) + +Resolve the binding `sym` in world `world`. See [`invoke_in_world`](@ref) for running +arbitrary code in fixed worlds. `world` may be `UnitRange`, in which case the macro +will error unless the binding is valid and has the same value across the entire world +range. + +The `@world` macro is primarily used in the priniting of bindings that are no longer available +in the current world. + +## Example +``` +julia> struct Foo; a::Int; end +Foo + +julia> fold = Foo(1) + +julia> Int(Base.get_world_counter()) +26866 + +julia> struct Foo; a::Int; b::Int end +Foo + +julia> fold +@world(Foo, 26866)(1) +``` + +!!! compat "Julia 1.12" + This functionality requires at least Julia 1.12. +""" +macro world(sym, world) + if isa(sym, Symbol) + return :($(_resolve_in_world)($world, $(QuoteNode(GlobalRef(__module__, sym))))) + elseif isa(sym, GlobalRef) + return :($(_resolve_in_world)($world, $(QuoteNode(sym)))) + else + error("`@world` requires a symbol or GlobalRef") + end +end + +_resolve_in_world(world::Integer, gr::GlobalRef) = + invoke_in_world(UInt(world), Core.getglobal, gr.mod, gr.name) + # Special constprop heuristics for various binary opes typename(typeof(function + end)).constprop_heuristic = Core.SAMETYPE_HEURISTIC typename(typeof(function - end)).constprop_heuristic = Core.SAMETYPE_HEURISTIC diff --git a/base/range.jl b/base/range.jl index 4b5d076dcf436..3301335785878 100644 --- a/base/range.jl +++ b/base/range.jl @@ -1680,3 +1680,14 @@ function show(io::IO, r::LogRange{T}) where {T} show(io, length(r)) print(io, ')') end + +# Implementation detail of @world +# The rest of this is defined in essentials.jl, but UnitRange is not available +function _resolve_in_world(worlds::UnitRange, gr::GlobalRef) + # Validate that this binding's reference covers the entire world range + bpart = lookup_binding_partition(first(worlds), gr) + if bpart.max_world < last(world) + error("Binding does not cover the full world range") + end + _resolve_in_world(last(world), gr) +end diff --git a/base/runtime_internals.jl b/base/runtime_internals.jl index 645aa55c538b4..ab867f8fcae6d 100644 --- a/base/runtime_internals.jl +++ b/base/runtime_internals.jl @@ -218,9 +218,10 @@ function _fieldnames(@nospecialize t) return t.name.names end -const BINDING_KIND_GLOBAL = 0x0 -const BINDING_KIND_CONST = 0x1 -const BINDING_KIND_CONST_IMPORT = 0x2 +# N.B.: Needs to be synced with julia.h +const BINDING_KIND_CONST = 0x0 +const BINDING_KIND_CONST_IMPORT = 0x1 +const BINDING_KIND_GLOBAL = 0x2 const BINDING_KIND_IMPLICIT = 0x3 const BINDING_KIND_EXPLICIT = 0x4 const BINDING_KIND_IMPORTED = 0x5 @@ -228,6 +229,8 @@ const BINDING_KIND_FAILED = 0x6 const BINDING_KIND_DECLARED = 0x7 const BINDING_KIND_GUARD = 0x8 +is_some_const_binding(kind::UInt8) = (kind == BINDING_KIND_CONST || kind == BINDING_KIND_CONST_IMPORT) + function lookup_binding_partition(world::UInt, b::Core.Binding) ccall(:jl_get_binding_partition, Ref{Core.BindingPartition}, (Any, UInt), b, world) end @@ -236,9 +239,27 @@ function lookup_binding_partition(world::UInt, gr::Core.GlobalRef) ccall(:jl_get_globalref_partition, Ref{Core.BindingPartition}, (Any, UInt), gr, world) end +partition_restriction(bpart::Core.BindingPartition) = ccall(:jl_bpart_get_restriction_value, Any, (Any,), bpart) + binding_kind(bpart::Core.BindingPartition) = ccall(:jl_bpart_get_kind, UInt8, (Any,), bpart) binding_kind(m::Module, s::Symbol) = binding_kind(lookup_binding_partition(tls_world_age(), GlobalRef(m, s))) +""" + delete_binding(mod::Module, sym::Symbol) + +Force the binding `mod.sym` to be undefined again, allowing it be redefined. +Note that this operation is very expensive, requirinig a full scan of all code in the system, +as well as potential recompilation of any methods that (may) have used binding +information. + +!!! warning + The implementation of this functionality is currently incomplete. Do not use + this method on versions that contain this disclaimer except for testing. +""" +function delete_binding(mod::Module, sym::Symbol) + ccall(:jl_disable_binding, Cvoid, (Any,), GlobalRef(mod, sym)) +end + """ fieldname(x::DataType, i::Integer) diff --git a/base/show.jl b/base/show.jl index 25ed99f50b5b0..3aeb267b4a696 100644 --- a/base/show.jl +++ b/base/show.jl @@ -1035,6 +1035,21 @@ function is_global_function(tn::Core.TypeName, globname::Union{Symbol,Nothing}) return false end +function check_world_bounded(tn::Core.TypeName) + bnd = ccall(:jl_get_module_binding, Ref{Core.Binding}, (Any, Any, Cint), tn.module, tn.name, true) + isdefined(bnd, :partitions) || return nothing + partition = @atomic bnd.partitions + while true + if is_some_const_binding(binding_kind(partition)) && partition_restriction(partition) <: tn.wrapper + max_world = @atomic partition.max_world + max_world == typemax(UInt) && return nothing + return Int(partition.min_world):Int(max_world) + end + isdefined(partition, :next) || return nothing + partition = @atomic partition.next + end +end + function show_type_name(io::IO, tn::Core.TypeName) if tn === UnionAll.name # by coincidence, `typeof(Type)` is a valid representation of the UnionAll type. @@ -1063,7 +1078,10 @@ function show_type_name(io::IO, tn::Core.TypeName) end end end + world = check_world_bounded(tn) + world !== nothing && print(io, "@world(") show_sym(io, sym) + world !== nothing && print(io, ", ", world, ")") quo && print(io, ")") globfunc && print(io, ")") nothing diff --git a/src/clangsa/GCChecker.cpp b/src/clangsa/GCChecker.cpp index 4892ebdabd110..830fe322a0a38 100644 --- a/src/clangsa/GCChecker.cpp +++ b/src/clangsa/GCChecker.cpp @@ -824,6 +824,7 @@ bool GCChecker::isGCTrackedType(QualType QT) { Name.ends_with_insensitive("jl_tupletype_t") || Name.ends_with_insensitive("jl_gc_tracked_buffer_t") || Name.ends_with_insensitive("jl_binding_t") || + Name.ends_with_insensitive("jl_binding_partition_t") || Name.ends_with_insensitive("jl_ordereddict_t") || Name.ends_with_insensitive("jl_tvar_t") || Name.ends_with_insensitive("jl_typemap_t") || @@ -847,6 +848,7 @@ bool GCChecker::isGCTrackedType(QualType QT) { Name.ends_with_insensitive("jl_stenv_t") || Name.ends_with_insensitive("jl_varbinding_t") || Name.ends_with_insensitive("set_world") || + Name.ends_with_insensitive("jl_ptr_kind_union_t") || Name.ends_with_insensitive("jl_codectx_t")) { return true; } diff --git a/src/julia.h b/src/julia.h index 168ba0deff1ec..dd79dbb82c28d 100644 --- a/src/julia.h +++ b/src/julia.h @@ -620,6 +620,7 @@ typedef struct _jl_weakref_t { jl_value_t *value; } jl_weakref_t; +// N.B: Needs to be synced with runtime_internals.jl enum jl_partition_kind { // Constant: This binding partition is a constant declared using `const` // ->restriction holds the constant value @@ -684,7 +685,7 @@ typedef struct __attribute__((aligned(8))) _jl_binding_partition_t { _Atomic(jl_ptr_kind_union_t) restriction; size_t min_world; _Atomic(size_t) max_world; - _Atomic(struct _jl_binding_partition_t*) next; + _Atomic(struct _jl_binding_partition_t *) next; size_t reserved; // Reserved for ->kind. Currently this holds the low bits of ->restriction during serialization } jl_binding_partition_t; @@ -1845,8 +1846,8 @@ JL_DLLEXPORT jl_sym_t *jl_symbol_n(const char *str, size_t len) JL_NOTSAFEPOINT; JL_DLLEXPORT jl_sym_t *jl_gensym(void); JL_DLLEXPORT jl_sym_t *jl_tagged_gensym(const char *str, size_t len); JL_DLLEXPORT jl_sym_t *jl_get_root_symbol(void); -JL_DLLEXPORT jl_value_t *jl_get_binding_value(jl_binding_t *b JL_PROPAGATES_ROOT) JL_NOTSAFEPOINT; -JL_DLLEXPORT jl_value_t *jl_get_binding_value_if_const(jl_binding_t *b JL_PROPAGATES_ROOT) JL_NOTSAFEPOINT; +JL_DLLEXPORT jl_value_t *jl_get_binding_value(jl_binding_t *b JL_PROPAGATES_ROOT); +JL_DLLEXPORT jl_value_t *jl_get_binding_value_if_const(jl_binding_t *b JL_PROPAGATES_ROOT); JL_DLLEXPORT jl_value_t *jl_declare_const_gf(jl_binding_t *b, jl_module_t *mod, jl_sym_t *name); JL_DLLEXPORT jl_method_t *jl_method_def(jl_svec_t *argdata, jl_methtable_t *mt, jl_code_info_t *f, jl_module_t *module); JL_DLLEXPORT jl_code_info_t *jl_code_for_staged(jl_method_instance_t *linfo, size_t world, jl_code_instance_t **cache); @@ -2008,8 +2009,8 @@ JL_DLLEXPORT jl_value_t *jl_checked_swap(jl_binding_t *b, jl_module_t *mod, jl_s JL_DLLEXPORT jl_value_t *jl_checked_replace(jl_binding_t *b, jl_module_t *mod, jl_sym_t *var, jl_value_t *expected, jl_value_t *rhs); JL_DLLEXPORT jl_value_t *jl_checked_modify(jl_binding_t *b, jl_module_t *mod, jl_sym_t *var, jl_value_t *op, jl_value_t *rhs); JL_DLLEXPORT jl_value_t *jl_checked_assignonce(jl_binding_t *b, jl_module_t *mod, jl_sym_t *var, jl_value_t *rhs JL_MAYBE_UNROOTED); -JL_DLLEXPORT jl_binding_partition_t *jl_declare_constant_val(jl_binding_t *b JL_ROOTING_ARGUMENT, jl_module_t *mod, jl_sym_t *var, jl_value_t *val JL_ROOTED_ARGUMENT JL_MAYBE_UNROOTED) JL_NOTSAFEPOINT; -JL_DLLEXPORT jl_binding_partition_t *jl_declare_constant_val2(jl_binding_t *b JL_ROOTING_ARGUMENT, jl_module_t *mod, jl_sym_t *var, jl_value_t *val JL_ROOTED_ARGUMENT JL_MAYBE_UNROOTED, enum jl_partition_kind) JL_NOTSAFEPOINT; +JL_DLLEXPORT jl_binding_partition_t *jl_declare_constant_val(jl_binding_t *b JL_ROOTING_ARGUMENT, jl_module_t *mod, jl_sym_t *var, jl_value_t *val JL_ROOTED_ARGUMENT JL_MAYBE_UNROOTED); +JL_DLLEXPORT jl_binding_partition_t *jl_declare_constant_val2(jl_binding_t *b JL_ROOTING_ARGUMENT, jl_module_t *mod, jl_sym_t *var, jl_value_t *val JL_ROOTED_ARGUMENT JL_MAYBE_UNROOTED, enum jl_partition_kind); JL_DLLEXPORT void jl_module_using(jl_module_t *to, jl_module_t *from); JL_DLLEXPORT void jl_module_use(jl_module_t *to, jl_module_t *from, jl_sym_t *s); JL_DLLEXPORT void jl_module_use_as(jl_module_t *to, jl_module_t *from, jl_sym_t *s, jl_sym_t *asname); diff --git a/src/julia_internal.h b/src/julia_internal.h index 4f735029da444..8c4ee9fca36e0 100644 --- a/src/julia_internal.h +++ b/src/julia_internal.h @@ -888,13 +888,10 @@ EXTERN_INLINE_DECLARE enum jl_partition_kind decode_restriction_kind(jl_ptr_kind #endif } -STATIC_INLINE jl_value_t *decode_restriction_value(jl_ptr_kind_union_t pku) JL_NOTSAFEPOINT +STATIC_INLINE jl_value_t *decode_restriction_value(jl_ptr_kind_union_t JL_PROPAGATES_ROOT pku) JL_NOTSAFEPOINT { #ifdef _P64 jl_value_t *val = (jl_value_t*)(pku & ~0x7); - // This is a little bit of a lie at the moment - it is one of the things that - // can go wrong with binding replacement. - JL_GC_PROMISE_ROOTED(val); return val; #else return pku.val; @@ -928,14 +925,8 @@ STATIC_INLINE int jl_bkind_is_some_guard(enum jl_partition_kind kind) JL_NOTSAFE return kind == BINDING_KIND_FAILED || kind == BINDING_KIND_GUARD || kind == BINDING_KIND_DECLARED; } -EXTERN_INLINE_DECLARE jl_binding_partition_t *jl_get_binding_partition(jl_binding_t *b, size_t world) JL_NOTSAFEPOINT { - if (!b) - return NULL; - assert(jl_is_binding(b)); - return jl_atomic_load_relaxed(&b->partitions); -} - -JL_DLLEXPORT jl_binding_partition_t *jl_get_globalref_partition(jl_globalref_t *gr, size_t world); +JL_DLLEXPORT jl_binding_partition_t *jl_get_binding_partition(jl_binding_t *b JL_PROPAGATES_ROOT, size_t world); +JL_DLLEXPORT jl_binding_partition_t *jl_get_globalref_partition(jl_globalref_t *gr JL_PROPAGATES_ROOT, size_t world); EXTERN_INLINE_DECLARE uint8_t jl_bpart_get_kind(jl_binding_partition_t *bpart) JL_NOTSAFEPOINT { return decode_restriction_kind(jl_atomic_load_relaxed(&bpart->restriction)); diff --git a/src/module.c b/src/module.c index 8dbac950235ee..f1098e22ff522 100644 --- a/src/module.c +++ b/src/module.c @@ -13,10 +13,51 @@ extern "C" { #endif // In this translation unit and this translation unit only emit this symbol `extern` for use by julia -EXTERN_INLINE_DEFINE jl_binding_partition_t *jl_get_binding_partition(jl_binding_t *b, size_t world) JL_NOTSAFEPOINT; EXTERN_INLINE_DEFINE uint8_t jl_bpart_get_kind(jl_binding_partition_t *bpart) JL_NOTSAFEPOINT; extern inline enum jl_partition_kind decode_restriction_kind(jl_ptr_kind_union_t pku) JL_NOTSAFEPOINT; +static jl_binding_partition_t *new_binding_partition(void) +{ + jl_binding_partition_t *bpart = (jl_binding_partition_t*)jl_gc_alloc(jl_current_task->ptls, sizeof(jl_binding_partition_t), jl_binding_partition_type); + jl_atomic_store_relaxed(&bpart->restriction, encode_restriction(NULL, BINDING_KIND_GUARD)); + bpart->min_world = 0; + jl_atomic_store_relaxed(&bpart->max_world, (size_t)-1); + jl_atomic_store_relaxed(&bpart->next, NULL); +#ifdef _P64 + bpart->reserved = 0; +#endif + return bpart; +} + +jl_binding_partition_t *jl_get_binding_partition(jl_binding_t *b, size_t world) { + if (!b) + return NULL; + assert(jl_is_binding(b)); + jl_value_t *parent = (jl_value_t*)b; + _Atomic(jl_binding_partition_t *)*insert = &b->partitions; + jl_binding_partition_t *bpart = jl_atomic_load_relaxed(insert); + size_t max_world = (size_t)-1; + while (1) { + while (bpart && world < bpart->min_world) { + insert = &bpart->next; + max_world = bpart->min_world - 1; + parent = (jl_value_t *)bpart; + bpart = jl_atomic_load_relaxed(&bpart->next); + } + if (bpart && world <= jl_atomic_load_relaxed(&bpart->max_world)) + return bpart; + jl_binding_partition_t *new_bpart = new_binding_partition(); + jl_atomic_store_relaxed(&new_bpart->next, bpart); + if (bpart) + new_bpart->min_world = jl_atomic_load_relaxed(&bpart->max_world) + 1; + jl_atomic_store_relaxed(&new_bpart->max_world, max_world); + if (jl_atomic_cmpswap(insert, &bpart, new_bpart)) { + jl_gc_wb(parent, new_bpart); + return new_bpart; + } + } +} + JL_DLLEXPORT jl_binding_partition_t *jl_get_globalref_partition(jl_globalref_t *gr, size_t world) { if (!gr) @@ -188,19 +229,6 @@ static jl_globalref_t *jl_new_globalref(jl_module_t *mod, jl_sym_t *name, jl_bin return g; } -static jl_binding_partition_t *new_binding_partition(void) -{ - jl_binding_partition_t *bpart = (jl_binding_partition_t*)jl_gc_alloc(jl_current_task->ptls, sizeof(jl_binding_partition_t), jl_binding_partition_type); - jl_atomic_store_relaxed(&bpart->restriction, encode_restriction(NULL, BINDING_KIND_GUARD)); - bpart->min_world = 0; - jl_atomic_store_relaxed(&bpart->max_world, (size_t)-1); - jl_atomic_store_relaxed(&bpart->next, NULL); -#ifdef _P64 - bpart->reserved = 0; -#endif - return bpart; -} - static jl_binding_t *new_binding(jl_module_t *mod, jl_sym_t *name) { jl_task_t *ct = jl_current_task; @@ -215,9 +243,7 @@ static jl_binding_t *new_binding(jl_module_t *mod, jl_sym_t *name) JL_GC_PUSH1(&b); b->globalref = jl_new_globalref(mod, name, b); jl_gc_wb(b, b->globalref); - jl_binding_partition_t *bpart = new_binding_partition(); - jl_atomic_store_relaxed(&b->partitions, bpart); - jl_gc_wb(b, bpart); + jl_atomic_store_relaxed(&b->partitions, NULL); JL_GC_POP(); return b; } @@ -324,6 +350,12 @@ JL_DLLEXPORT jl_value_t *jl_get_binding_value_if_const(jl_binding_t *b) return decode_restriction_value(pku); } +JL_DLLEXPORT jl_value_t *jl_bpart_get_restriction_value(jl_binding_partition_t *bpart) +{ + jl_ptr_kind_union_t pku = jl_atomic_load_relaxed(&bpart->restriction); + return decode_restriction_value(pku); +} + typedef struct _modstack_t { jl_module_t *m; jl_sym_t *var; @@ -947,6 +979,28 @@ JL_DLLEXPORT void jl_set_const(jl_module_t *m JL_ROOTING_ARGUMENT, jl_sym_t *var jl_gc_wb(bpart, val); } +extern jl_mutex_t world_counter_lock; +JL_DLLEXPORT void jl_disable_binding(jl_globalref_t *gr) +{ + jl_binding_t *b = gr->binding; + b = jl_resolve_owner(b, gr->mod, gr->name, NULL); + jl_binding_partition_t *bpart = jl_get_binding_partition(b, jl_current_task->world_age); + + if (decode_restriction_kind(jl_atomic_load_relaxed(&bpart->restriction)) == BINDING_KIND_GUARD) { + // Already guard + return; + } + + JL_LOCK(&world_counter_lock); + jl_task_t *ct = jl_current_task; + size_t new_max_world = jl_atomic_load_acquire(&jl_world_counter); + // TODO: Trigger invalidation here + (void)ct; + jl_atomic_store_release(&bpart->max_world, new_max_world); + jl_atomic_store_release(&jl_world_counter, new_max_world + 1); + JL_UNLOCK(&world_counter_lock); +} + JL_DLLEXPORT int jl_globalref_is_const(jl_globalref_t *gr) { jl_binding_t *b = gr->binding; @@ -1018,13 +1072,17 @@ void jl_binding_deprecation_warning(jl_module_t *m, jl_sym_t *s, jl_binding_t *b jl_value_t *jl_check_binding_wr(jl_binding_t *b JL_PROPAGATES_ROOT, jl_module_t *mod, jl_sym_t *var, jl_value_t *rhs JL_MAYBE_UNROOTED, int reassign) { + JL_GC_PUSH1(&rhs); // callee-rooted jl_binding_partition_t *bpart = jl_get_binding_partition(b, jl_current_task->world_age); jl_ptr_kind_union_t pku = jl_atomic_load_relaxed(&bpart->restriction); assert(!jl_bkind_is_some_guard(decode_restriction_kind(pku)) && !jl_bkind_is_some_import(decode_restriction_kind(pku))); if (jl_bkind_is_some_constant(decode_restriction_kind(pku))) { jl_value_t *old = decode_restriction_value(pku); - if (jl_egal(rhs, old)) + JL_GC_PROMISE_ROOTED(old); + if (jl_egal(rhs, old)) { + JL_GC_POP(); return NULL; + } if (jl_typeof(rhs) == jl_typeof(old)) jl_errorf("invalid redefinition of constant %s.%s. This redefinition may be permitted using the `const` keyword.", jl_symbol_name(mod->name), jl_symbol_name(var)); @@ -1033,13 +1091,13 @@ jl_value_t *jl_check_binding_wr(jl_binding_t *b JL_PROPAGATES_ROOT, jl_module_t jl_symbol_name(mod->name), jl_symbol_name(var)); } jl_value_t *old_ty = decode_restriction_value(pku); + JL_GC_PROMISE_ROOTED(old_ty); if (old_ty != (jl_value_t*)jl_any_type && jl_typeof(rhs) != old_ty) { - JL_GC_PUSH1(&rhs); // callee-rooted if (!jl_isa(rhs, old_ty)) jl_errorf("cannot assign an incompatible value to the global %s.%s.", jl_symbol_name(mod->name), jl_symbol_name(var)); - JL_GC_POP(); } + JL_GC_POP(); return old_ty; } @@ -1076,6 +1134,7 @@ JL_DLLEXPORT jl_value_t *jl_checked_modify(jl_binding_t *b, jl_module_t *mod, jl jl_errorf("invalid redefinition of constant %s.%s", jl_symbol_name(mod->name), jl_symbol_name(var)); jl_value_t *ty = decode_restriction_value(pku); + JL_GC_PROMISE_ROOTED(ty); return modify_value(ty, &b->value, (jl_value_t*)b, op, rhs, 1, mod, var); } diff --git a/src/staticdata.c b/src/staticdata.c index af4527cbc143f..af3477a25128e 100644 --- a/src/staticdata.c +++ b/src/staticdata.c @@ -3883,9 +3883,12 @@ static void jl_restore_system_image_from_stream_(ios_t *f, jl_image_t *image, jl if ((jl_value_t*)b == jl_nothing) continue; jl_binding_partition_t *bpart = jl_atomic_load_relaxed(&b->partitions); - jl_atomic_store_relaxed(&bpart->restriction, - encode_restriction((jl_value_t*)jl_atomic_load_relaxed(&bpart->restriction), bpart->reserved)); - bpart->reserved = 0; + while (bpart) { + jl_atomic_store_relaxed(&bpart->restriction, + encode_restriction((jl_value_t*)jl_atomic_load_relaxed(&bpart->restriction), bpart->reserved)); + bpart->reserved = 0; + bpart = jl_atomic_load_relaxed(&bpart->next); + } } #endif } diff --git a/src/toplevel.c b/src/toplevel.c index 8caa8b086ec00..6f2e0cf77568a 100644 --- a/src/toplevel.c +++ b/src/toplevel.c @@ -318,6 +318,7 @@ void jl_binding_set_type(jl_binding_t *b, jl_module_t *mod, jl_sym_t *sym, jl_va jl_symbol_name(mod->name), jl_symbol_name(sym)); } jl_value_t *old_ty = decode_restriction_value(pku); + JL_GC_PROMISE_ROOTED(old_ty); if (!jl_types_equal(ty, old_ty)) { jl_errorf("cannot set type for global %s.%s. It already has a value or is already set to a different type.", jl_symbol_name(mod->name), jl_symbol_name(sym)); @@ -738,6 +739,7 @@ JL_DLLEXPORT jl_binding_partition_t *jl_declare_constant_val2(jl_binding_t *b, j if (!val) return bpart; jl_value_t *old = decode_restriction_value(pku); + JL_GC_PROMISE_ROOTED(old); if (jl_egal(val, old)) break; if (!did_warn) { diff --git a/test/choosetests.jl b/test/choosetests.jl index 96d230d185c71..affdee412bd86 100644 --- a/test/choosetests.jl +++ b/test/choosetests.jl @@ -29,7 +29,7 @@ const TESTNAMES = [ "channels", "iostream", "secretbuffer", "specificity", "reinterpretarray", "syntax", "corelogging", "missing", "asyncmap", "smallarrayshrink", "opaque_closure", "filesystem", "download", - "scopedvalues", "compileall" + "scopedvalues", "compileall", "rebinding" ] const INTERNET_REQUIRED_LIST = [ diff --git a/test/rebinding.jl b/test/rebinding.jl new file mode 100644 index 0000000000000..4066d91bc4b9b --- /dev/null +++ b/test/rebinding.jl @@ -0,0 +1,18 @@ +# This file is a part of Julia. License is MIT: https://julialang.org/license + +module Rebinding + using Test + + @test Base.binding_kind(@__MODULE__, :Foo) == Base.BINDING_KIND_GUARD + struct Foo + x::Int + end + x = Foo(1) + + @test Base.binding_kind(@__MODULE__, :Foo) == Base.BINDING_KIND_CONST + @test !contains(repr(x), "@world") + Base.delete_binding(@__MODULE__, :Foo) + + @test Base.binding_kind(@__MODULE__, :Foo) == Base.BINDING_KIND_GUARD + @test contains(repr(x), "@world") +end From 82e0e28d76621888f6e501033c59549cb0104bac Mon Sep 17 00:00:00 2001 From: Jishnu Bhattacharya Date: Mon, 21 Oct 2024 19:48:49 +0530 Subject: [PATCH 266/537] Specialize `haszero` for `Union{Missing,<:Number}` (#56169) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Since `zero(::Union{Missing,T})` calls `zero(T)` internally, we may use the same logic for `LinearAlgebra.haszero`. This helps with certain structured matrix operations: ```julia julia> M = Matrix{Union{Int,Missing}}(missing,2,2) 2×2 Matrix{Union{Missing, Int64}}: missing missing missing missing julia> triu(M) 2×2 Matrix{Union{Missing, Int64}}: missing missing 0 missing ``` whereas previously, this would have been ```julia julia> triu(M) 2×2 Matrix{Union{Missing, Int64}}: missing missing missing missing ``` --- stdlib/LinearAlgebra/src/dense.jl | 1 + stdlib/LinearAlgebra/test/triangular.jl | 8 ++++++++ 2 files changed, 9 insertions(+) diff --git a/stdlib/LinearAlgebra/src/dense.jl b/stdlib/LinearAlgebra/src/dense.jl index aacc5479bfa9d..d8f2513f5bfc8 100644 --- a/stdlib/LinearAlgebra/src/dense.jl +++ b/stdlib/LinearAlgebra/src/dense.jl @@ -110,6 +110,7 @@ norm2(x::Union{Array{T},StridedVector{T}}) where {T<:BlasFloat} = # Conservative assessment of types that have zero(T) defined for themselves haszero(::Type) = false haszero(::Type{T}) where {T<:Number} = isconcretetype(T) +haszero(::Type{Union{Missing,T}}) where {T<:Number} = haszero(T) @propagate_inbounds _zero(M::AbstractArray{T}, inds...) where {T} = haszero(T) ? zero(T) : zero(M[inds...]) """ diff --git a/stdlib/LinearAlgebra/test/triangular.jl b/stdlib/LinearAlgebra/test/triangular.jl index 7acb3cbfc0c57..2ceda735dfd0a 100644 --- a/stdlib/LinearAlgebra/test/triangular.jl +++ b/stdlib/LinearAlgebra/test/triangular.jl @@ -1284,6 +1284,14 @@ end @test istril(U, k) == istril(A, k) end end + + @testset "Union eltype" begin + M = Matrix{Union{Int,Missing}}(missing,2,2) + U = triu(M) + @test iszero(U[2,1]) + U = tril(M) + @test iszero(U[1,2]) + end end @testset "indexing with a BandIndex" begin From 6d7e29f33018b8750ac1f2cf446a51910d25a1d7 Mon Sep 17 00:00:00 2001 From: Gabriel Baraldi Date: Mon, 21 Oct 2024 11:24:39 -0300 Subject: [PATCH 267/537] Add small tweaks to juliac script and misc fixes to juliac (#56119) This comments out an assert thats currently faulty and also marks apply_iterate as safe when we can special case it to not dispatch --- contrib/juliac-buildscript.jl | 22 ++++++++++++------ contrib/juliac.jl | 6 +++-- src/codegen.cpp | 42 ++++++++++++++++++++++++++++++----- 3 files changed, 56 insertions(+), 14 deletions(-) diff --git a/contrib/juliac-buildscript.jl b/contrib/juliac-buildscript.jl index 490bca86e1cba..0303e95f448b5 100644 --- a/contrib/juliac-buildscript.jl +++ b/contrib/juliac-buildscript.jl @@ -27,6 +27,7 @@ end (f::Base.RedirectStdStream)(io::Core.CoreSTDOUT) = Base._redirect_io_global(io, f.unix_fd) @eval Base begin + depwarn(msg, funcsym; force::Bool=false) = nothing _assert_tostring(msg) = "" reinit_stdio() = nothing JuliaSyntax.enable_in_core!() = nothing @@ -229,20 +230,15 @@ let loaded = Symbol.(Base.loaded_modules_array()) # TODO better way to do this using Artifacts @eval Artifacts begin function _artifact_str(__module__, artifacts_toml, name, path_tail, artifact_dict, hash, platform, _::Val{lazyartifacts}) where lazyartifacts - moduleroot = Base.moduleroot(__module__) - if haskey(Base.module_keys, moduleroot) - # Process overrides for this UUID, if we know what it is - process_overrides(artifact_dict, Base.module_keys[moduleroot].uuid) - end - # If the artifact exists, we're in the happy path and we can immediately # return the path to the artifact: - dirs = artifact_paths(hash; honor_overrides=true) + dirs = artifacts_dirs(bytes2hex(hash.bytes)) for dir in dirs if isdir(dir) return jointail(dir, path_tail) end end + error("Artifact not found") end end end @@ -258,6 +254,18 @@ let loaded = Symbol.(Base.loaded_modules_array()) # TODO better way to do this __init__() = rand() end end + if :Markdown in loaded + using Markdown + @eval Markdown begin + __init__() = rand() + end + end + if :JuliaSyntaxHighlighting in loaded + using JuliaSyntaxHighlighting + @eval JuliaSyntaxHighlighting begin + __init__() = rand() + end + end end empty!(Core.ARGS) diff --git a/contrib/juliac.jl b/contrib/juliac.jl index 61e0e91958667..0f008976d2b4f 100644 --- a/contrib/juliac.jl +++ b/contrib/juliac.jl @@ -8,6 +8,7 @@ trim = nothing outname = nothing file = nothing add_ccallables = false +verbose = false help = findfirst(x->x == "--help", ARGS) if help !== nothing @@ -39,6 +40,8 @@ let i = 1 end elseif arg == "--compile-ccallable" global add_ccallables = true + elseif arg == "--verbose" + global verbose = true else if arg[1] == '-' || !isnothing(file) println("Unexpected argument `$arg`") @@ -77,9 +80,8 @@ open(initsrc_path, "w") do io end static_call_graph_arg() = isnothing(trim) ? `` : `--trim=$(trim)` -is_verbose() = verbose ? `--verbose-compilation=yes` : `` cmd = addenv(`$cmd --project=$(Base.active_project()) --output-o $img_path --output-incremental=no --strip-ir --strip-metadata $(static_call_graph_arg()) $(joinpath(@__DIR__,"juliac-buildscript.jl")) $absfile $output_type $add_ccallables`, "OPENBLAS_NUM_THREADS" => 1, "JULIA_NUM_THREADS" => 1) - +verbose && println("Running: $cmd") if !success(pipeline(cmd; stdout, stderr)) println(stderr, "\nFailed to compile $file") exit(1) diff --git a/src/codegen.cpp b/src/codegen.cpp index 0ab26a65fcaaa..eaa3cc8176ad5 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -2247,7 +2247,10 @@ static jl_array_t* build_stack_crumbs(jl_codectx_t &ctx) JL_NOTSAFEPOINT break; } if (caller) { - assert(ctx.emission_context.enqueuers.count(caller) == 1); + + // assert(ctx.emission_context.enqueuers.count(caller) == 1); + // Each enqueuer should only be enqueued at least once and only once. Check why this assert is triggering + // This isn't a fatal error, just means that we may get a wrong backtrace if (jl_is_method_instance(caller)) { //TODO: Use a subrange when C++20 is a thing for (auto it2 = std::get(it->second).begin(); it2 != (std::prev(std::get(it->second).end())); ++it2) { @@ -5732,10 +5735,34 @@ static jl_cgval_t emit_call(jl_codectx_t &ctx, jl_expr_t *ex, jl_value_t *rt, bo // special case for some known builtin not handled by emit_builtin_call auto it = builtin_func_map().find(builtin_fptr); if (it != builtin_func_map().end()) { - if (trim_may_error(ctx.params->trim) && may_dispatch_builtins().count(builtin_fptr)) { - errs() << "ERROR: Dynamic call to builtin" << jl_symbol_name(((jl_datatype_t*)jl_typeof(f.constant))->name->name); - errs() << "In " << ctx.builder.getCurrentDebugLocation()->getFilename() << ":" << ctx.builder.getCurrentDebugLocation()->getLine() << "\n"; - print_stacktrace(ctx, ctx.params->trim); + if (trim_may_error(ctx.params->trim)) { + bool may_dispatch = may_dispatch_builtins().count(builtin_fptr); + if (may_dispatch && f.constant == jl_builtin__apply_iterate && nargs >= 4) { + if (jl_subtype(argv[2].typ, (jl_value_t*)jl_builtin_type)) { + static jl_value_t *jl_dispatchfree_apply_iterate_type = NULL; + if (!jl_dispatchfree_apply_iterate_type) { + jl_value_t *types[5] = { + (jl_value_t *)jl_simplevector_type, + (jl_value_t *)jl_genericmemory_type, + (jl_value_t *)jl_array_type, + (jl_value_t *)jl_tuple_type, + (jl_value_t *)jl_namedtuple_type, + }; + jl_dispatchfree_apply_iterate_type = jl_as_global_root(jl_type_union(types, 5), 1); + } + for (size_t i = 3; i < nargs; i++) { + auto ai = argv[i].typ; + if (!jl_subtype(ai, jl_dispatchfree_apply_iterate_type)) + break; + } + may_dispatch = false; + } + } + if (may_dispatch) { + errs() << "ERROR: Dynamic call to builtin " << jl_symbol_name(((jl_datatype_t*)jl_typeof(f.constant))->name->name); + errs() << "In " << ctx.builder.getCurrentDebugLocation()->getFilename() << ":" << ctx.builder.getCurrentDebugLocation()->getLine() << "\n"; + print_stacktrace(ctx, ctx.params->trim); + } } Value *ret = emit_jlcall(ctx, it->second, Constant::getNullValue(ctx.types().T_prjlvalue), ArrayRef(argv).drop_front(), nargs - 1, julia_call); setName(ctx.emission_context, ret, it->second->name + "_ret"); @@ -5752,6 +5779,11 @@ static jl_cgval_t emit_call(jl_codectx_t &ctx, jl_expr_t *ex, jl_value_t *rt, bo fptr = FunctionCallee(get_func_sig(ctx.builder.getContext()), ctx.builder.CreateCall(prepare_call(jlgetbuiltinfptr_func), {emit_typeof(ctx, f)})); cc = julia_call; } + if (trim_may_error(ctx.params->trim)) { + errs() << "ERROR: Dynamic call to unknown builtin"; + errs() << "In " << ctx.builder.getCurrentDebugLocation()->getFilename() << ":" << ctx.builder.getCurrentDebugLocation()->getLine() << "\n"; + print_stacktrace(ctx, ctx.params->trim); + } Value *ret = emit_jlcall(ctx, fptr, nullptr, argv, nargs, cc); setName(ctx.emission_context, ret, "Builtin_ret"); return mark_julia_type(ctx, ret, true, rt); From 11ef8eb0be7b411ab8c2813789adf63af4793798 Mon Sep 17 00:00:00 2001 From: Jishnu Bhattacharya Date: Mon, 21 Oct 2024 20:13:53 +0530 Subject: [PATCH 268/537] Warn about negative size in array construction (#56262) After this, ```julia julia> zeros(-1) ERROR: ArgumentError: invalid GenericMemory size: the number of elements is either negative or too large for system address width [...] ``` The error message is updated to warn about possible negative sizes when creating arrays. Fixes https://github.com/JuliaLang/julia/issues/55446 --- src/genericmemory.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/genericmemory.c b/src/genericmemory.c index ea52fca66ba48..5c48e3202493e 100644 --- a/src/genericmemory.c +++ b/src/genericmemory.c @@ -45,7 +45,7 @@ jl_genericmemory_t *_new_genericmemory_(jl_value_t *mtype, size_t nel, int8_t is prod += nel; } if (nel >= MAXINTVAL || prod >= (wideint_t) MAXINTVAL) - jl_exceptionf(jl_argumenterror_type, "invalid GenericMemory size: too large for system address width"); + jl_exceptionf(jl_argumenterror_type, "invalid GenericMemory size: the number of elements is either negative or too large for system address width"); size_t tot = (size_t)prod + LLT_ALIGN(sizeof(jl_genericmemory_t),JL_SMALL_BYTE_ALIGNMENT); int pooled = tot <= GC_MAX_SZCLASS; From cba1cc022b05eef93f7145aeb363a6ab2c9c3e6c Mon Sep 17 00:00:00 2001 From: Jishnu Bhattacharya Date: Mon, 21 Oct 2024 20:28:39 +0530 Subject: [PATCH 269/537] Revert "Reroute` (Upper/Lower)Triangular * Diagonal` through `__muldiag`" (#56267) Reverts JuliaLang/julia#55984 This PR was buggy, but the test failures as seen in https://buildkite.com/julialang/julia-master/builds/41300#0192adab-9d07-4900-8592-2d46aff26905 were not caught in the CI run for the PR as the tests are run randomly. Let's revert this for now. --- stdlib/LinearAlgebra/src/LinearAlgebra.jl | 2 - stdlib/LinearAlgebra/src/diagonal.jl | 158 +++++++++------------- stdlib/LinearAlgebra/test/diagonal.jl | 40 +----- 3 files changed, 66 insertions(+), 134 deletions(-) diff --git a/stdlib/LinearAlgebra/src/LinearAlgebra.jl b/stdlib/LinearAlgebra/src/LinearAlgebra.jl index 88fc3476c9d7f..15354603943c2 100644 --- a/stdlib/LinearAlgebra/src/LinearAlgebra.jl +++ b/stdlib/LinearAlgebra/src/LinearAlgebra.jl @@ -655,8 +655,6 @@ matprod_dest(A::StructuredMatrix, B::Diagonal, TS) = _matprod_dest_diag(A, TS) matprod_dest(A::Diagonal, B::StructuredMatrix, TS) = _matprod_dest_diag(B, TS) matprod_dest(A::Diagonal, B::Diagonal, TS) = _matprod_dest_diag(B, TS) _matprod_dest_diag(A, TS) = similar(A, TS) -_matprod_dest_diag(A::UnitUpperTriangular, TS) = UpperTriangular(similar(parent(A), TS)) -_matprod_dest_diag(A::UnitLowerTriangular, TS) = LowerTriangular(similar(parent(A), TS)) function _matprod_dest_diag(A::SymTridiagonal, TS) n = size(A, 1) ev = similar(A, TS, max(0, n-1)) diff --git a/stdlib/LinearAlgebra/src/diagonal.jl b/stdlib/LinearAlgebra/src/diagonal.jl index 8ba4c3d457e83..6e8ce96259fc1 100644 --- a/stdlib/LinearAlgebra/src/diagonal.jl +++ b/stdlib/LinearAlgebra/src/diagonal.jl @@ -396,120 +396,82 @@ function lmul!(D::Diagonal, T::Tridiagonal) return T end -@inline function __muldiag_nonzeroalpha!(out, D::Diagonal, B, _add::MulAddMul) - @inbounds for j in axes(B, 2) - @simd for i in axes(B, 1) - _modify!(_add, D.diag[i] * B[i,j], out, (i,j)) - end - end - out -end -_maybe_unwrap_tri(out, A) = out, A -_maybe_unwrap_tri(out::UpperTriangular, A::UpperOrUnitUpperTriangular) = parent(out), parent(A) -_maybe_unwrap_tri(out::LowerTriangular, A::LowerOrUnitLowerTriangular) = parent(out), parent(A) -@inline function __muldiag_nonzeroalpha!(out, D::Diagonal, B::UpperOrLowerTriangular, _add::MulAddMul) - isunit = B isa Union{UnitUpperTriangular, UnitLowerTriangular} - # if both B and out have the same upper/lower triangular structure, - # we may directly read and write from the parents - out_maybeparent, B_maybeparent = _maybe_unwrap_tri(out, B) - for j in axes(B, 2) - if isunit - _modify!(_add, D.diag[j] * B[j,j], out, (j,j)) - end - rowrange = B isa UpperOrUnitUpperTriangular ? (1:min(j-isunit, size(B,1))) : (j+isunit:size(B,1)) - @inbounds @simd for i in rowrange - _modify!(_add, D.diag[i] * B_maybeparent[i,j], out_maybeparent, (i,j)) - end - end - out -end -function __muldiag!(out, D::Diagonal, B, _add::MulAddMul) +function __muldiag!(out, D::Diagonal, B, _add::MulAddMul{ais1,bis0}) where {ais1,bis0} require_one_based_indexing(out, B) alpha, beta = _add.alpha, _add.beta if iszero(alpha) _rmul_or_fill!(out, beta) else - __muldiag_nonzeroalpha!(out, D, B, _add) - end - return out -end - -@inline function __muldiag_nonzeroalpha!(out, A, D::Diagonal, _add::MulAddMul{ais1,bis0}) where {ais1,bis0} - beta = _add.beta - _add_aisone = MulAddMul{true,bis0,Bool,typeof(beta)}(true, beta) - @inbounds for j in axes(A, 2) - dja = _add(D.diag[j]) - @simd for i in axes(A, 1) - _modify!(_add_aisone, A[i,j] * dja, out, (i,j)) - end - end - out -end -@inline function __muldiag_nonzeroalpha!(out, A::UpperOrLowerTriangular, D::Diagonal, _add::MulAddMul{ais1,bis0}) where {ais1,bis0} - isunit = A isa Union{UnitUpperTriangular, UnitLowerTriangular} - beta = _add.beta - # since alpha is multiplied to the diagonal element of D, - # we may skip alpha in the second multiplication by setting ais1 to true - _add_aisone = MulAddMul{true,bis0,Bool,typeof(beta)}(true, beta) - # if both A and out have the same upper/lower triangular structure, - # we may directly read and write from the parents - out_maybeparent, A_maybeparent = _maybe_unwrap_tri(out, A) - @inbounds for j in axes(A, 2) - dja = _add(D.diag[j]) - if isunit - _modify!(_add_aisone, A[j,j] * dja, out, (j,j)) - end - rowrange = A isa UpperOrUnitUpperTriangular ? (1:min(j-isunit, size(A,1))) : (j+isunit:size(A,1)) - @simd for i in rowrange - _modify!(_add_aisone, A_maybeparent[i,j] * dja, out_maybeparent, (i,j)) + if bis0 + @inbounds for j in axes(B, 2) + @simd for i in axes(B, 1) + out[i,j] = D.diag[i] * B[i,j] * alpha + end + end + else + @inbounds for j in axes(B, 2) + @simd for i in axes(B, 1) + out[i,j] = D.diag[i] * B[i,j] * alpha + out[i,j] * beta + end + end end end - out + return out end -function __muldiag!(out, A, D::Diagonal, _add::MulAddMul) +function __muldiag!(out, A, D::Diagonal, _add::MulAddMul{ais1,bis0}) where {ais1,bis0} require_one_based_indexing(out, A) alpha, beta = _add.alpha, _add.beta if iszero(alpha) _rmul_or_fill!(out, beta) else - __muldiag_nonzeroalpha!(out, A, D, _add) + if bis0 + @inbounds for j in axes(A, 2) + dja = D.diag[j] * alpha + @simd for i in axes(A, 1) + out[i,j] = A[i,j] * dja + end + end + else + @inbounds for j in axes(A, 2) + dja = D.diag[j] * alpha + @simd for i in axes(A, 1) + out[i,j] = A[i,j] * dja + out[i,j] * beta + end + end + end end return out end - -@inline function __muldiag_nonzeroalpha!(out::Diagonal, D1::Diagonal, D2::Diagonal, _add::MulAddMul) +function __muldiag!(out::Diagonal, D1::Diagonal, D2::Diagonal, _add::MulAddMul{ais1,bis0}) where {ais1,bis0} d1 = D1.diag d2 = D2.diag - outd = out.diag - @inbounds @simd for i in eachindex(d1, d2, outd) - _modify!(_add, d1[i] * d2[i], outd, i) - end - out -end -function __muldiag!(out::Diagonal, D1::Diagonal, D2::Diagonal, _add::MulAddMul) alpha, beta = _add.alpha, _add.beta if iszero(alpha) _rmul_or_fill!(out.diag, beta) else - __muldiag_nonzeroalpha!(out, D1, D2, _add) + if bis0 + @inbounds @simd for i in eachindex(out.diag) + out.diag[i] = d1[i] * d2[i] * alpha + end + else + @inbounds @simd for i in eachindex(out.diag) + out.diag[i] = d1[i] * d2[i] * alpha + out.diag[i] * beta + end + end end return out end -@inline function __muldiag_nonzeroalpha!(out, D1::Diagonal, D2::Diagonal, _add::MulAddMul) - d1 = D1.diag - d2 = D2.diag - @inbounds @simd for i in eachindex(d1, d2) - _modify!(_add, d1[i] * d2[i], out, (i,i)) - end - out -end -function __muldiag!(out, D1::Diagonal, D2::Diagonal, _add::MulAddMul{ais1}) where {ais1} +function __muldiag!(out, D1::Diagonal, D2::Diagonal, _add::MulAddMul{ais1,bis0}) where {ais1,bis0} require_one_based_indexing(out) alpha, beta = _add.alpha, _add.beta + mA = size(D1, 1) + d1 = D1.diag + d2 = D2.diag _rmul_or_fill!(out, beta) if !iszero(alpha) - _add_bis1 = MulAddMul{ais1,false,typeof(alpha),Bool}(alpha,true) - __muldiag_nonzeroalpha!(out, D1, D2, _add_bis1) + @inbounds @simd for i in 1:mA + out[i,i] += d1[i] * d2[i] * alpha + end end return out end @@ -696,21 +658,31 @@ for Tri in (:UpperTriangular, :LowerTriangular) @eval $fun(A::$Tri, D::Diagonal) = $Tri($fun(A.data, D)) @eval $fun(A::$UTri, D::Diagonal) = $Tri(_setdiag!($fun(A.data, D), $f, D.diag)) end - @eval *(A::$Tri{<:Any, <:StridedMaybeAdjOrTransMat}, D::Diagonal) = - @invoke *(A::AbstractMatrix, D::Diagonal) - @eval *(A::$UTri{<:Any, <:StridedMaybeAdjOrTransMat}, D::Diagonal) = - @invoke *(A::AbstractMatrix, D::Diagonal) for (fun, f) in zip((:*, :lmul!, :ldiv!, :\), (:identity, :identity, :inv, :inv)) @eval $fun(D::Diagonal, A::$Tri) = $Tri($fun(D, A.data)) @eval $fun(D::Diagonal, A::$UTri) = $Tri(_setdiag!($fun(D, A.data), $f, D.diag)) end - @eval *(D::Diagonal, A::$Tri{<:Any, <:StridedMaybeAdjOrTransMat}) = - @invoke *(D::Diagonal, A::AbstractMatrix) - @eval *(D::Diagonal, A::$UTri{<:Any, <:StridedMaybeAdjOrTransMat}) = - @invoke *(D::Diagonal, A::AbstractMatrix) # 3-arg ldiv! @eval ldiv!(C::$Tri, D::Diagonal, A::$Tri) = $Tri(ldiv!(C.data, D, A.data)) @eval ldiv!(C::$Tri, D::Diagonal, A::$UTri) = $Tri(_setdiag!(ldiv!(C.data, D, A.data), inv, D.diag)) + # 3-arg mul! is disambiguated in special.jl + # 5-arg mul! + @eval _mul!(C::$Tri, D::Diagonal, A::$Tri, _add) = $Tri(mul!(C.data, D, A.data, _add.alpha, _add.beta)) + @eval function _mul!(C::$Tri, D::Diagonal, A::$UTri, _add::MulAddMul{ais1,bis0}) where {ais1,bis0} + α, β = _add.alpha, _add.beta + iszero(α) && return _rmul_or_fill!(C, β) + diag′ = bis0 ? nothing : diag(C) + data = mul!(C.data, D, A.data, α, β) + $Tri(_setdiag!(data, _add, D.diag, diag′)) + end + @eval _mul!(C::$Tri, A::$Tri, D::Diagonal, _add) = $Tri(mul!(C.data, A.data, D, _add.alpha, _add.beta)) + @eval function _mul!(C::$Tri, A::$UTri, D::Diagonal, _add::MulAddMul{ais1,bis0}) where {ais1,bis0} + α, β = _add.alpha, _add.beta + iszero(α) && return _rmul_or_fill!(C, β) + diag′ = bis0 ? nothing : diag(C) + data = mul!(C.data, A.data, D, α, β) + $Tri(_setdiag!(data, _add, D.diag, diag′)) + end end @inline function kron!(C::AbstractMatrix, A::Diagonal, B::Diagonal) diff --git a/stdlib/LinearAlgebra/test/diagonal.jl b/stdlib/LinearAlgebra/test/diagonal.jl index 380a0465028d1..1c3a9dfa676ac 100644 --- a/stdlib/LinearAlgebra/test/diagonal.jl +++ b/stdlib/LinearAlgebra/test/diagonal.jl @@ -1188,7 +1188,7 @@ end @test oneunit(D3) isa typeof(D3) end -@testset "$Tri" for (Tri, UTri) in ((UpperTriangular, UnitUpperTriangular), (LowerTriangular, UnitLowerTriangular)) +@testset "AbstractTriangular" for (Tri, UTri) in ((UpperTriangular, UnitUpperTriangular), (LowerTriangular, UnitLowerTriangular)) A = randn(4, 4) TriA = Tri(A) UTriA = UTri(A) @@ -1218,44 +1218,6 @@ end @test outTri === mul!(outTri, D, UTriA, 2, 1)::Tri == mul!(out, D, Matrix(UTriA), 2, 1) @test outTri === mul!(outTri, TriA, D, 2, 1)::Tri == mul!(out, Matrix(TriA), D, 2, 1) @test outTri === mul!(outTri, UTriA, D, 2, 1)::Tri == mul!(out, Matrix(UTriA), D, 2, 1) - - # we may write to a Unit triangular if the diagonal is preserved - ID = Diagonal(ones(size(UTriA,2))) - @test mul!(copy(UTriA), UTriA, ID) == UTriA - @test mul!(copy(UTriA), ID, UTriA) == UTriA - - @testset "partly filled parents" begin - M = Matrix{BigFloat}(undef, 2, 2) - M[1,1] = M[2,2] = 3 - isupper = Tri == UpperTriangular - M[1+!isupper, 1+isupper] = 3 - D = Diagonal(1:2) - T = Tri(M) - TA = Array(T) - @test T * D == TA * D - @test D * T == D * TA - @test mul!(copy(T), T, D, 2, 3) == 2T * D + 3T - @test mul!(copy(T), D, T, 2, 3) == 2D * T + 3T - - U = UTri(M) - UA = Array(U) - @test U * D == UA * D - @test D * U == D * UA - @test mul!(copy(T), U, D, 2, 3) == 2 * UA * D + 3TA - @test mul!(copy(T), D, U, 2, 3) == 2 * D * UA + 3TA - - M2 = Matrix{BigFloat}(undef, 2, 2) - M2[1+!isupper, 1+isupper] = 3 - U = UTri(M2) - UA = Array(U) - @test U * D == UA * D - @test D * U == D * UA - ID = Diagonal(ones(size(U,2))) - @test mul!(copy(U), U, ID) == U - @test mul!(copy(U), ID, U) == U - @test mul!(copy(U), U, ID, 2, -1) == U - @test mul!(copy(U), ID, U, 2, -1) == U - end end struct SMatrix1{T} <: AbstractArray{T,2} From 2188ba4d70a349594484c927bc7a6e71edaa5902 Mon Sep 17 00:00:00 2001 From: Jameson Nash Date: Mon, 21 Oct 2024 11:57:10 -0400 Subject: [PATCH 270/537] precompile: add error for using require_stdlib during precompile (#56233) This function could accidentally add a dependency on the stdlib in the user's package, which would make it immediately stale. As pointed out to me by topolarity --- base/loading.jl | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/base/loading.jl b/base/loading.jl index db6a681bb2a5b..190aca66f91ff 100644 --- a/base/loading.jl +++ b/base/loading.jl @@ -1564,7 +1564,6 @@ function _insert_extension_triggers(parent::PkgId, extensions::Dict{String, Any} end end -precompiling_package::Bool = false loading_extension::Bool = false precompiling_extension::Bool = false function run_extension_callbacks(extid::ExtensionId) @@ -2288,11 +2287,6 @@ For more details regarding code loading, see the manual sections on [modules](@r [parallel computing](@ref code-availability). """ function require(into::Module, mod::Symbol) - if into === Base.__toplevel__ && precompiling_package - # this error type needs to match the error type compilecache throws for non-125 errors. - error("`using/import $mod` outside of a Module detected. Importing a package outside of a module \ - is not allowed during package precompilation.") - end if _require_world_age[] != typemax(UInt) Base.invoke_in_world(_require_world_age[], __require, into, mod) else @@ -2301,6 +2295,10 @@ function require(into::Module, mod::Symbol) end function __require(into::Module, mod::Symbol) + if into === Base.__toplevel__ && generating_output(#=incremental=#true) + error("`using/import $mod` outside of a Module detected. Importing a package outside of a module \ + is not allowed during package precompilation.") + end @lock require_lock begin LOADING_CACHE[] = LoadingCache() try @@ -2709,6 +2707,10 @@ end [2] https://github.com/JuliaLang/StyledStrings.jl/issues/91#issuecomment-2379602914 """ function require_stdlib(package_uuidkey::PkgId, ext::Union{Nothing, String}=nothing) + if generating_output(#=incremental=#true) + # Otherwise this would lead to awkward dependency issues by loading a package that isn't in the Project/Manifest + error("This interactive function requires a stdlib to be loaded, and package code should instead use it directly from that stdlib.") + end @lock require_lock begin # the PkgId of the ext, or package if not an ext this_uuidkey = ext isa String ? PkgId(uuid5(package_uuidkey.uuid, ext), ext) : package_uuidkey @@ -3048,7 +3050,6 @@ function create_expr_cache(pkg::PkgId, input::String, output::String, output_o:: empty!(Base.EXT_DORMITORY) # If we have a custom sysimage with `EXT_DORMITORY` prepopulated Base.track_nested_precomp($precomp_stack) Base.precompiling_extension = $(loading_extension | isext) - Base.precompiling_package = true Base.include_package_for_output($(pkg_str(pkg)), $(repr(abspath(input))), $(repr(depot_path)), $(repr(dl_load_path)), $(repr(load_path)), $deps, $(repr(source_path(nothing)))) """) From f9765410821dce0f2e5ce8a625fbdf9a52f02462 Mon Sep 17 00:00:00 2001 From: Jishnu Bhattacharya Date: Mon, 21 Oct 2024 22:26:09 +0530 Subject: [PATCH 271/537] Fix indexing for block triangular matrices (#56168) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Assuming that block matrices contain tiled elements (i.e. all elements along a row have the same number of rows and similarly, all elements along a column have the same number of columns), we may generalize `diagzero` to accept arbitrary matrices. We may therefore use only the diagonal elements to generate the structural zeros. This was being assumed anyway in the individual methods. We also now use `diagzero` in indexing triangular matrices, so the following would work correctly: ```julia julia> M = reshape([ones(2,2), fill(2,4,2), fill(3,2,3), fill(4,4,3)],2,2) 2×2 Matrix{Matrix{Float64}}: [1.0 1.0; 1.0 1.0] … [3.0 3.0 3.0; 3.0 3.0 3.0] [2.0 2.0; 2.0 2.0; 2.0 2.0; 2.0 2.0] [4.0 4.0 4.0; 4.0 4.0 4.0; 4.0 4.0 4.0; 4.0 4.0 4.0] julia> U = UpperTriangular(M) 2×2 UpperTriangular{Matrix{Float64}, Matrix{Matrix{Float64}}}: [1.0 1.0; 1.0 1.0] … [3.0 3.0 3.0; 3.0 3.0 3.0] ⋅ [4.0 4.0 4.0; 4.0 4.0 4.0; 4.0 4.0 4.0; 4.0 4.0 4.0] julia> U[2,1] 4×2 Matrix{Float64}: 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 julia> U[2,1] == zero(M[2,1]) true ``` This also changes ```julia julia> M = Matrix{Union{Int,Missing}}(missing,4,4) 4×4 Matrix{Union{Missing, Int64}}: missing missing missing missing missing missing missing missing missing missing missing missing missing missing missing missing julia> U = UpperTriangular(M) 4×4 UpperTriangular{Union{Missing, Int64}, Matrix{Union{Missing, Int64}}}: missing missing missing missing ⋅ missing missing missing ⋅ ⋅ missing missing ⋅ ⋅ ⋅ missing julia> U[3,1] # v"1.12.0-DEV.1373" missing ``` to ```julia julia> U[3,1] # this PR 0 ``` --- stdlib/LinearAlgebra/src/bidiag.jl | 11 ----------- stdlib/LinearAlgebra/src/diagonal.jl | 5 +++-- stdlib/LinearAlgebra/src/triangular.jl | 4 ++-- stdlib/LinearAlgebra/test/triangular.jl | 13 +++++++++++++ 4 files changed, 18 insertions(+), 15 deletions(-) diff --git a/stdlib/LinearAlgebra/src/bidiag.jl b/stdlib/LinearAlgebra/src/bidiag.jl index a34df37153cd2..b38a983296065 100644 --- a/stdlib/LinearAlgebra/src/bidiag.jl +++ b/stdlib/LinearAlgebra/src/bidiag.jl @@ -118,17 +118,6 @@ Bidiagonal(A::Bidiagonal) = A Bidiagonal{T}(A::Bidiagonal{T}) where {T} = A Bidiagonal{T}(A::Bidiagonal) where {T} = Bidiagonal{T}(A.dv, A.ev, A.uplo) -function diagzero(A::Bidiagonal{<:AbstractMatrix}, i, j) - Tel = eltype(A) - if i < j && A.uplo == 'U' #= top right zeros =# - return zeroslike(Tel, axes(A.ev[i], 1), axes(A.ev[j-1], 2)) - elseif j < i && A.uplo == 'L' #= bottom left zeros =# - return zeroslike(Tel, axes(A.ev[i-1], 1), axes(A.ev[j], 2)) - else - return zeroslike(Tel, axes(A.dv[i], 1), axes(A.dv[j], 2)) - end -end - _offdiagind(uplo) = uplo == 'U' ? 1 : -1 @inline function Base.isassigned(A::Bidiagonal, i::Int, j::Int) diff --git a/stdlib/LinearAlgebra/src/diagonal.jl b/stdlib/LinearAlgebra/src/diagonal.jl index 6e8ce96259fc1..417bcfa5715b1 100644 --- a/stdlib/LinearAlgebra/src/diagonal.jl +++ b/stdlib/LinearAlgebra/src/diagonal.jl @@ -191,8 +191,9 @@ end Return the appropriate zero element `A[i, j]` corresponding to a banded matrix `A`. """ diagzero(A::AbstractMatrix, i, j) = zero(eltype(A)) -diagzero(D::Diagonal{M}, i, j) where {M<:AbstractMatrix} = - zeroslike(M, axes(D.diag[i], 1), axes(D.diag[j], 2)) +diagzero(A::AbstractMatrix{M}, i, j) where {M<:AbstractMatrix} = + zeroslike(M, axes(A[i,i], 1), axes(A[j,j], 2)) +diagzero(A::AbstractMatrix, inds...) = diagzero(A, to_indices(A, inds)...) # dispatching on the axes permits specializing on the axis types to return something other than an Array zeroslike(M::Type, ax::Vararg{Union{AbstractUnitRange, Integer}}) = zeroslike(M, ax) """ diff --git a/stdlib/LinearAlgebra/src/triangular.jl b/stdlib/LinearAlgebra/src/triangular.jl index 83ef221329d33..d6994f4b4dd58 100644 --- a/stdlib/LinearAlgebra/src/triangular.jl +++ b/stdlib/LinearAlgebra/src/triangular.jl @@ -233,7 +233,7 @@ Base.isstored(A::UpperOrLowerTriangular, i::Int, j::Int) = @propagate_inbounds getindex(A::Union{UnitLowerTriangular{T}, UnitUpperTriangular{T}}, i::Int, j::Int) where {T} = _shouldforwardindex(A, i, j) ? A.data[i,j] : ifelse(i == j, oneunit(T), zero(T)) @propagate_inbounds getindex(A::Union{LowerTriangular, UpperTriangular}, i::Int, j::Int) = - _shouldforwardindex(A, i, j) ? A.data[i,j] : _zero(A.data,j,i) + _shouldforwardindex(A, i, j) ? A.data[i,j] : diagzero(A,i,j) _shouldforwardindex(U::UpperTriangular, b::BandIndex) = b.band >= 0 _shouldforwardindex(U::LowerTriangular, b::BandIndex) = b.band <= 0 @@ -245,7 +245,7 @@ Base.@constprop :aggressive @propagate_inbounds function getindex(A::Union{UnitL _shouldforwardindex(A, b) ? A.data[b] : ifelse(b.band == 0, oneunit(T), zero(T)) end Base.@constprop :aggressive @propagate_inbounds function getindex(A::Union{LowerTriangular, UpperTriangular}, b::BandIndex) - _shouldforwardindex(A, b) ? A.data[b] : _zero(A.data, b) + _shouldforwardindex(A, b) ? A.data[b] : diagzero(A.data, b) end _zero_triangular_half_str(::Type{<:UpperOrUnitUpperTriangular}) = "lower" diff --git a/stdlib/LinearAlgebra/test/triangular.jl b/stdlib/LinearAlgebra/test/triangular.jl index 2ceda735dfd0a..678827ceac720 100644 --- a/stdlib/LinearAlgebra/test/triangular.jl +++ b/stdlib/LinearAlgebra/test/triangular.jl @@ -1330,6 +1330,19 @@ end end end +@testset "indexing uses diagzero" begin + @testset "block matrix" begin + M = reshape([zeros(2,2), zeros(4,2), zeros(2,3), zeros(4,3)],2,2) + U = UpperTriangular(M) + @test [size(x) for x in U] == [size(x) for x in M] + end + @testset "Union eltype" begin + M = Matrix{Union{Int,Missing}}(missing,4,4) + U = UpperTriangular(M) + @test iszero(U[3,1]) + end +end + @testset "addition/subtraction of mixed triangular" begin for A in (Hermitian(rand(4, 4)), Diagonal(rand(5))) for T in (UpperTriangular, LowerTriangular, From 1ba035da42110313b84e8edf2dccd3aa9a2a5082 Mon Sep 17 00:00:00 2001 From: Cody Tapscott <84105208+topolarity@users.noreply.github.com> Date: Mon, 21 Oct 2024 15:16:53 -0400 Subject: [PATCH 272/537] trimming: don't abort where we used to resolve dynamic calls (#56271) This call resolution code was deleted in #56179 (rightfully so), but it should be a no-op until we implement this in inference. --- src/codegen.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/codegen.cpp b/src/codegen.cpp index eaa3cc8176ad5..b0d5038024900 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -5806,7 +5806,8 @@ static jl_cgval_t emit_call(jl_codectx_t &ctx, jl_expr_t *ex, jl_value_t *rt, bo } int failed_dispatch = !argv[0].constant; if (ctx.params->trim != JL_TRIM_NO) { - abort(); // this code path is unsound, unsafe, and probably bad + // TODO: Implement the last-minute call resolution that used to be here + // in inference instead. } if (failed_dispatch && trim_may_error(ctx.params->trim)) { From 08d11d041b22fe90380e56be4fb4d44aaf46ec85 Mon Sep 17 00:00:00 2001 From: Shuhei Kadowaki <40514306+aviatesk@users.noreply.github.com> Date: Tue, 22 Oct 2024 05:23:04 +0900 Subject: [PATCH 273/537] inference: fix inference error from constructing invalid `TypeVar` (#56264) - fixes JuliaLang/julia#56248 --- base/compiler/tfuncs.jl | 12 ++++++++++-- test/compiler/inference.jl | 8 ++++++++ 2 files changed, 18 insertions(+), 2 deletions(-) diff --git a/base/compiler/tfuncs.jl b/base/compiler/tfuncs.jl index a6b7e53c6f320..450cfdcfadf82 100644 --- a/base/compiler/tfuncs.jl +++ b/base/compiler/tfuncs.jl @@ -601,8 +601,16 @@ add_tfunc(svec, 0, INT_INF, @nospecs((𝕃::AbstractLattice, args...)->SimpleVec return TypeVar end end - tv = TypeVar(nval, lb, ub) - return PartialTypeVar(tv, lb_certain, ub_certain) + lb_valid = lb isa Type || lb isa TypeVar + ub_valid = ub isa Type || ub isa TypeVar + if lb_valid && ub_valid + tv = TypeVar(nval, lb, ub) + return PartialTypeVar(tv, lb_certain, ub_certain) + elseif !lb_valid && lb_certain + return Union{} + elseif !ub_valid && ub_certain + return Union{} + end end return TypeVar end diff --git a/test/compiler/inference.jl b/test/compiler/inference.jl index 71f9da04baa4a..dd62e329962c6 100644 --- a/test/compiler/inference.jl +++ b/test/compiler/inference.jl @@ -6055,3 +6055,11 @@ f55916(::Vararg{T,T}) where {T} = "2" g55916(x) = f55916(x) # this shouldn't error @test only(code_typed(g55916, (Any,); optimize=false))[2] == Int + +# JuliaLang/julia#56248 +@test Base.infer_return_type() do + TypeVar(:Issue56248, 1) +end === Union{} +@test Base.infer_return_type() do + TypeVar(:Issue56248, Any, 1) +end === Union{} From 36593fdd3a4e8112798059bb2310a39bbfcf96ed Mon Sep 17 00:00:00 2001 From: Ian Butterworth Date: Mon, 21 Oct 2024 22:41:34 -0400 Subject: [PATCH 274/537] add Pkg 1.11 news to HISTORY.md (#56277) Backport already on https://github.com/JuliaLang/julia/pull/56228 --- HISTORY.md | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/HISTORY.md b/HISTORY.md index aa7f9f0ccdad6..c3ca212453d07 100644 --- a/HISTORY.md +++ b/HISTORY.md @@ -136,6 +136,14 @@ Standard library changes `AnnotatedString` with various faces or other attributes applied ([#49586]). #### Package Manager +* It is now possible to specify "sources" for packages in a `[sources]` section in Project.toml. + This can be used to add non-registered normal or test dependencies. +* Pkg now obeys `[compat]` bounds for `julia` and raises an error if the version of the running Julia binary is incompatible with the bounds in `Project.toml`. + Pkg has always obeyed this compat when working with Registry packages. This change affects mostly local packages +* `pkg> add` and `Pkg.add` will now add compat entries for new direct dependencies if the active environment is a + package (has a `name` and `uuid` entry). +* Dependencies can now be directly added as weak deps or extras via the `pkg> add --weak/extra Foo` or + `Pkg.add("Foo", target=:weakdeps/:extras)` forms. #### LinearAlgebra * `cbrt(::AbstractMatrix{<:Real})` is now defined and returns real-valued matrix cube roots of real-valued matrices ([#50661]). From 31f7df648f750897e245d169639cb1264ebc7404 Mon Sep 17 00:00:00 2001 From: Tim Besard Date: Tue, 22 Oct 2024 07:36:17 +0200 Subject: [PATCH 275/537] Remove NewPM pass exports. (#56269) All ecosystem consumers have switched to the string-based API. --- src/codegen-stubs.c | 16 ---------------- src/jl_exported_funcs.inc | 15 --------------- src/llvm-julia-passes.inc | 30 +++++++++++++++--------------- src/llvm_api.cpp | 32 -------------------------------- src/pipeline.cpp | 22 +++++++++++----------- 5 files changed, 26 insertions(+), 89 deletions(-) diff --git a/src/codegen-stubs.c b/src/codegen-stubs.c index 7ddb68fd6b036..98ac063ba36d6 100644 --- a/src/codegen-stubs.c +++ b/src/codegen-stubs.c @@ -110,22 +110,6 @@ JL_DLLEXPORT uint64_t jl_getUnwindInfo_fallback(uint64_t dwAddr) JL_DLLEXPORT void jl_register_passbuilder_callbacks_fallback(void *PB) { } -#define MODULE_PASS(NAME, CLASS, CREATE_PASS) \ - JL_DLLEXPORT void LLVMExtraMPMAdd##CLASS##_fallback(void *PM) UNAVAILABLE -#define CGSCC_PASS(NAME, CLASS, CREATE_PASS) \ - JL_DLLEXPORT void LLVMExtraCGPMAdd##CLASS##_fallback(void *PM) UNAVAILABLE -#define FUNCTION_PASS(NAME, CLASS, CREATE_PASS) \ - JL_DLLEXPORT void LLVMExtraFPMAdd##CLASS##_fallback(void *PM) UNAVAILABLE -#define LOOP_PASS(NAME, CLASS, CREATE_PASS) \ - JL_DLLEXPORT void LLVMExtraLPMAdd##CLASS##_fallback(void *PM) UNAVAILABLE - -#include "llvm-julia-passes.inc" - -#undef MODULE_PASS -#undef CGSCC_PASS -#undef FUNCTION_PASS -#undef LOOP_PASS - //LLVM C api to the julia JIT JL_DLLEXPORT void* JLJITGetLLVMOrcExecutionSession_fallback(void* JIT) UNAVAILABLE diff --git a/src/jl_exported_funcs.inc b/src/jl_exported_funcs.inc index f712f154ed896..71a78b1c20fc7 100644 --- a/src/jl_exported_funcs.inc +++ b/src/jl_exported_funcs.inc @@ -547,21 +547,6 @@ YY(jl_getUnwindInfo) \ YY(jl_get_libllvm) \ YY(jl_register_passbuilder_callbacks) \ - YY(LLVMExtraMPMAddCPUFeaturesPass) \ - YY(LLVMExtraMPMAddRemoveNIPass) \ - YY(LLVMExtraMPMAddMultiVersioningPass) \ - YY(LLVMExtraMPMAddRemoveJuliaAddrspacesPass) \ - YY(LLVMExtraMPMAddRemoveAddrspacesPass) \ - YY(LLVMExtraMPMAddLowerPTLSPass) \ - YY(LLVMExtraFPMAddDemoteFloat16Pass) \ - YY(LLVMExtraFPMAddLateLowerGCPass) \ - YY(LLVMExtraFPMAddAllocOptPass) \ - YY(LLVMExtraFPMAddPropagateJuliaAddrspacesPass) \ - YY(LLVMExtraFPMAddLowerExcHandlersPass) \ - YY(LLVMExtraFPMAddGCInvariantVerifierPass) \ - YY(LLVMExtraFPMAddFinalLowerGCPass) \ - YY(LLVMExtraLPMAddJuliaLICMPass) \ - YY(LLVMExtraLPMAddLowerSIMDLoopPass) \ YY(JLJITGetLLVMOrcExecutionSession) \ YY(JLJITGetJuliaOJIT) \ YY(JLJITGetExternalJITDylib) \ diff --git a/src/llvm-julia-passes.inc b/src/llvm-julia-passes.inc index c41ecbba87b6a..523c9fbcd3402 100644 --- a/src/llvm-julia-passes.inc +++ b/src/llvm-julia-passes.inc @@ -1,26 +1,26 @@ //Module passes #ifdef MODULE_PASS -MODULE_PASS("CPUFeatures", CPUFeaturesPass, CPUFeaturesPass()) -MODULE_PASS("RemoveNI", RemoveNIPass, RemoveNIPass()) -MODULE_PASS("JuliaMultiVersioning", MultiVersioningPass, MultiVersioningPass()) -MODULE_PASS("RemoveJuliaAddrspaces", RemoveJuliaAddrspacesPass, RemoveJuliaAddrspacesPass()) -MODULE_PASS("RemoveAddrspaces", RemoveAddrspacesPass, RemoveAddrspacesPass()) -MODULE_PASS("LowerPTLSPass", LowerPTLSPass, LowerPTLSPass()) +MODULE_PASS("CPUFeatures", CPUFeaturesPass()) +MODULE_PASS("RemoveNI", RemoveNIPass()) +MODULE_PASS("JuliaMultiVersioning", MultiVersioningPass()) +MODULE_PASS("RemoveJuliaAddrspaces", RemoveJuliaAddrspacesPass()) +MODULE_PASS("RemoveAddrspaces", RemoveAddrspacesPass()) +MODULE_PASS("LowerPTLSPass", LowerPTLSPass()) #endif //Function passes #ifdef FUNCTION_PASS -FUNCTION_PASS("DemoteFloat16", DemoteFloat16Pass, DemoteFloat16Pass()) -FUNCTION_PASS("LateLowerGCFrame", LateLowerGCPass, LateLowerGCPass()) -FUNCTION_PASS("AllocOpt", AllocOptPass, AllocOptPass()) -FUNCTION_PASS("PropagateJuliaAddrspaces", PropagateJuliaAddrspacesPass, PropagateJuliaAddrspacesPass()) -FUNCTION_PASS("LowerExcHandlers", LowerExcHandlersPass, LowerExcHandlersPass()) -FUNCTION_PASS("GCInvariantVerifier", GCInvariantVerifierPass, GCInvariantVerifierPass()) -FUNCTION_PASS("FinalLowerGC", FinalLowerGCPass, FinalLowerGCPass()) +FUNCTION_PASS("DemoteFloat16", DemoteFloat16Pass()) +FUNCTION_PASS("LateLowerGCFrame", LateLowerGCPass()) +FUNCTION_PASS("AllocOpt", AllocOptPass()) +FUNCTION_PASS("PropagateJuliaAddrspaces", PropagateJuliaAddrspacesPass()) +FUNCTION_PASS("LowerExcHandlers", LowerExcHandlersPass()) +FUNCTION_PASS("GCInvariantVerifier", GCInvariantVerifierPass()) +FUNCTION_PASS("FinalLowerGC", FinalLowerGCPass()) #endif //Loop passes #ifdef LOOP_PASS -LOOP_PASS("JuliaLICM", JuliaLICMPass, JuliaLICMPass()) -LOOP_PASS("LowerSIMDLoop", LowerSIMDLoopPass, LowerSIMDLoopPass()) +LOOP_PASS("JuliaLICM", JuliaLICMPass()) +LOOP_PASS("LowerSIMDLoop", LowerSIMDLoopPass()) #endif diff --git a/src/llvm_api.cpp b/src/llvm_api.cpp index e98c375b711b3..8c48b5661f984 100644 --- a/src/llvm_api.cpp +++ b/src/llvm_api.cpp @@ -10,7 +10,6 @@ #endif #include "jitlayers.h" -#include "passes.h" #include #include @@ -58,14 +57,6 @@ DEFINE_SIMPLE_CONVERSION_FUNCTIONS(orc::IRCompileLayer, LLVMOrcIRCompileLayerRef DEFINE_SIMPLE_CONVERSION_FUNCTIONS(orc::MaterializationResponsibility, LLVMOrcMaterializationResponsibilityRef) -typedef struct LLVMOpaqueModulePassManager *LLVMModulePassManagerRef; -typedef struct LLVMOpaqueFunctionPassManager *LLVMFunctionPassManagerRef; -typedef struct LLVMOpaqueLoopPassManager *LLVMLoopPassManagerRef; - -DEFINE_SIMPLE_CONVERSION_FUNCTIONS(llvm::ModulePassManager, LLVMModulePassManagerRef) -DEFINE_SIMPLE_CONVERSION_FUNCTIONS(llvm::FunctionPassManager, LLVMFunctionPassManagerRef) -DEFINE_SIMPLE_CONVERSION_FUNCTIONS(llvm::LoopPassManager, LLVMLoopPassManagerRef) - extern "C" { JL_DLLEXPORT_CODEGEN JuliaOJITRef JLJITGetJuliaOJIT_impl(void) @@ -150,27 +141,4 @@ JLJITGetIRCompileLayer_impl(JuliaOJITRef JIT) return wrap(&unwrap(JIT)->getIRCompileLayer()); } -#define MODULE_PASS(NAME, CLASS, CREATE_PASS) \ - JL_DLLEXPORT_CODEGEN void LLVMExtraMPMAdd##CLASS##_impl(LLVMModulePassManagerRef PM) \ - { \ - unwrap(PM)->addPass(CREATE_PASS); \ - } -#define FUNCTION_PASS(NAME, CLASS, CREATE_PASS) \ - JL_DLLEXPORT_CODEGEN void LLVMExtraFPMAdd##CLASS##_impl(LLVMFunctionPassManagerRef PM) \ - { \ - unwrap(PM)->addPass(CREATE_PASS); \ - } -#define LOOP_PASS(NAME, CLASS, CREATE_PASS) \ - JL_DLLEXPORT_CODEGEN void LLVMExtraLPMAdd##CLASS##_impl(LLVMLoopPassManagerRef PM) \ - { \ - unwrap(PM)->addPass(CREATE_PASS); \ - } - -#include "llvm-julia-passes.inc" - -#undef MODULE_PASS -#undef CGSCC_PASS -#undef FUNCTION_PASS -#undef LOOP_PASS - } // extern "C" diff --git a/src/pipeline.cpp b/src/pipeline.cpp index f8935070bb001..f8976099ee53c 100644 --- a/src/pipeline.cpp +++ b/src/pipeline.cpp @@ -617,29 +617,29 @@ namespace { void adjustPIC(PassInstrumentationCallbacks &PIC) JL_NOTSAFEPOINT { //Borrowed from LLVM PassBuilder.cpp:386 -#define MODULE_PASS(NAME, CLASS, CREATE_PASS) \ +#define MODULE_PASS(NAME, CREATE_PASS) \ PIC.addClassToPassName(decltype(CREATE_PASS)::name(), NAME); -#define MODULE_PASS_WITH_PARAMS(NAME, CLASS, CREATE_PASS, PARSER, PARAMS) \ +#define MODULE_PASS_WITH_PARAMS(NAME, CREATE_PASS, PARSER, PARAMS) \ PIC.addClassToPassName(CLASS, NAME); #define MODULE_ANALYSIS(NAME, CREATE_PASS) \ PIC.addClassToPassName(decltype(CREATE_PASS)::name(), NAME); -#define FUNCTION_PASS(NAME, CLASS, CREATE_PASS) \ +#define FUNCTION_PASS(NAME, CREATE_PASS) \ PIC.addClassToPassName(decltype(CREATE_PASS)::name(), NAME); -#define FUNCTION_PASS_WITH_PARAMS(NAME, CLASS, CREATE_PASS, PARSER, PARAMS) \ +#define FUNCTION_PASS_WITH_PARAMS(NAME, CREATE_PASS, PARSER, PARAMS) \ PIC.addClassToPassName(CLASS, NAME); #define FUNCTION_ANALYSIS(NAME, CREATE_PASS) \ PIC.addClassToPassName(decltype(CREATE_PASS)::name(), NAME); #define LOOPNEST_PASS(NAME, CREATE_PASS) \ PIC.addClassToPassName(decltype(CREATE_PASS)::name(), NAME); -#define LOOP_PASS(NAME, CLASS, CREATE_PASS) \ +#define LOOP_PASS(NAME, CREATE_PASS) \ PIC.addClassToPassName(decltype(CREATE_PASS)::name(), NAME); -#define LOOP_PASS_WITH_PARAMS(NAME, CLASS, CREATE_PASS, PARSER, PARAMS) \ +#define LOOP_PASS_WITH_PARAMS(NAME, CREATE_PASS, PARSER, PARAMS) \ PIC.addClassToPassName(CLASS, NAME); #define LOOP_ANALYSIS(NAME, CREATE_PASS) \ PIC.addClassToPassName(decltype(CREATE_PASS)::name(), NAME); -#define CGSCC_PASS(NAME, CLASS, CREATE_PASS) \ +#define CGSCC_PASS(NAME, CREATE_PASS) \ PIC.addClassToPassName(decltype(CREATE_PASS)::name(), NAME); -#define CGSCC_PASS_WITH_PARAMS(NAME, CLASS, CREATE_PASS, PARSER, PARAMS) \ +#define CGSCC_PASS_WITH_PARAMS(NAME, CREATE_PASS, PARSER, PARAMS) \ PIC.addClassToPassName(CLASS, NAME); #define CGSCC_ANALYSIS(NAME, CREATE_PASS) \ PIC.addClassToPassName(decltype(CREATE_PASS)::name(), NAME); @@ -899,7 +899,7 @@ static void registerCallbacks(PassBuilder &PB) JL_NOTSAFEPOINT { PB.registerPipelineParsingCallback( [](StringRef Name, FunctionPassManager &PM, ArrayRef InnerPipeline) { -#define FUNCTION_PASS(NAME, CLASS, CREATE_PASS) if (Name == NAME) { PM.addPass(CREATE_PASS); return true; } +#define FUNCTION_PASS(NAME, CREATE_PASS) if (Name == NAME) { PM.addPass(CREATE_PASS); return true; } #include "llvm-julia-passes.inc" #undef FUNCTION_PASS if (Name.consume_front("GCInvariantVerifier")) { @@ -921,7 +921,7 @@ static void registerCallbacks(PassBuilder &PB) JL_NOTSAFEPOINT { PB.registerPipelineParsingCallback( [](StringRef Name, ModulePassManager &PM, ArrayRef InnerPipeline) { -#define MODULE_PASS(NAME, CLASS, CREATE_PASS) if (Name == NAME) { PM.addPass(CREATE_PASS); return true; } +#define MODULE_PASS(NAME, CREATE_PASS) if (Name == NAME) { PM.addPass(CREATE_PASS); return true; } #include "llvm-julia-passes.inc" #undef MODULE_PASS if (Name.consume_front("LowerPTLSPass")) { @@ -964,7 +964,7 @@ static void registerCallbacks(PassBuilder &PB) JL_NOTSAFEPOINT { PB.registerPipelineParsingCallback( [](StringRef Name, LoopPassManager &PM, ArrayRef InnerPipeline) { -#define LOOP_PASS(NAME, CLASS, CREATE_PASS) if (Name == NAME) { PM.addPass(CREATE_PASS); return true; } +#define LOOP_PASS(NAME, CREATE_PASS) if (Name == NAME) { PM.addPass(CREATE_PASS); return true; } #include "llvm-julia-passes.inc" #undef LOOP_PASS return false; From 7d4b2b78e66045a7249a96d195621f2b7d20c1fd Mon Sep 17 00:00:00 2001 From: Benjamin Lorenz Date: Tue, 22 Oct 2024 15:27:42 +0200 Subject: [PATCH 276/537] jitlayers: use std::make_tuple instead of tuple constructor (#56287) this should be safer for the type deduction and fixes a build error for macos on Yggdrasil (https://github.com/JuliaPackaging/Yggdrasil/pull/9660): ``` src/jitlayers.cpp:665:54: error: no viable constructor or deduction guide for deduction of template arguments of 'tuple' 665 | incompletemodules.insert(std::pair(codeinst, std::tuple(std::move(params), waiting))); ``` The Yggdrasil environment is a bit special with a rather new clang (version 17) but an old macos sdk and I don't know exactly in which circumstances this triggers. But I think `std::make_tuple` should be more reliable when the tuple types are not specified. cc: @fingolfin --- src/jitlayers.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/jitlayers.cpp b/src/jitlayers.cpp index 8b8004af03616..c8d8356687dcf 100644 --- a/src/jitlayers.cpp +++ b/src/jitlayers.cpp @@ -662,7 +662,7 @@ static void jl_emit_codeinst_to_jit( int waiting = jl_analyze_workqueue(codeinst, params); if (waiting) { auto release = std::move(params.tsctx_lock); // unlock again before moving from it - incompletemodules.insert(std::pair(codeinst, std::tuple(std::move(params), waiting))); + incompletemodules.insert(std::pair(codeinst, std::make_tuple(std::move(params), waiting))); } else { finish_params(result_m.getModuleUnlocked(), params); From ab22f982427184b0a50ba407e4f1cbedbc862ced Mon Sep 17 00:00:00 2001 From: Ian Butterworth Date: Tue, 22 Oct 2024 09:49:42 -0400 Subject: [PATCH 277/537] move time_imports and trace_* macros to Base but remain owned by InteractiveUtils (#56276) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This way all packages can be timed including InteractiveUtils and its deps (Base64, JuliaSyntaxHighlighting, Markdown, StyledStrings). With this PR ``` % ./julia --start=no -e "@time Base.@time_imports using REPL" 41.8 ms StyledStrings ┌ 0.1 ms JuliaSyntaxHighlighting.__init__() 14.2 ms JuliaSyntaxHighlighting 1.0 ms Base64 ┌ 0.0 ms Markdown.__init__() 9.6 ms Markdown 2.2 ms InteractiveUtils 0.3 ms Unicode ┌ 0.0 ms REPL.REPLCompletions.__init__() ├ 0.0 ms REPL.__init__() 95.7 ms REPL 0.225907 seconds (290.95 k allocations: 16.761 MiB) ``` Otherwise ``` % ./julia --start=no -e "using InteractiveUtils; @time @time_imports using REPL" 0.5 ms Unicode ┌ 0.0 ms REPL.REPLCompletions.__init__() ├ 0.1 ms REPL.__init__() 107.5 ms REPL 0.127016 seconds (164.18 k allocations: 9.199 MiB) ``` Also the `@trace_compile` and `@trace_dispatch` macros for the same reason. --- base/timing.jl | 35 +++++++++++++++++++++++++ stdlib/InteractiveUtils/src/macros.jl | 37 +++------------------------ 2 files changed, 39 insertions(+), 33 deletions(-) diff --git a/base/timing.jl b/base/timing.jl index 4880951f0a32d..1de3727756829 100644 --- a/base/timing.jl +++ b/base/timing.jl @@ -628,3 +628,38 @@ macro timed(ex) ) end end + +# Exported, documented, and tested in InteractiveUtils +# here so it's possible to time/trace all imports, including InteractiveUtils and its deps +macro time_imports(ex) + quote + try + Base.Threads.atomic_add!(Base.TIMING_IMPORTS, 1) + $(esc(ex)) + finally + Base.Threads.atomic_sub!(Base.TIMING_IMPORTS, 1) + end + end +end + +macro trace_compile(ex) + quote + try + ccall(:jl_force_trace_compile_timing_enable, Cvoid, ()) + $(esc(ex)) + finally + ccall(:jl_force_trace_compile_timing_disable, Cvoid, ()) + end + end +end + +macro trace_dispatch(ex) + quote + try + ccall(:jl_force_trace_dispatch_enable, Cvoid, ()) + $(esc(ex)) + finally + ccall(:jl_force_trace_dispatch_disable, Cvoid, ()) + end + end +end diff --git a/stdlib/InteractiveUtils/src/macros.jl b/stdlib/InteractiveUtils/src/macros.jl index 211687df47954..e338d8626fb0f 100644 --- a/stdlib/InteractiveUtils/src/macros.jl +++ b/stdlib/InteractiveUtils/src/macros.jl @@ -4,6 +4,10 @@ import Base: typesof, insert!, replace_ref_begin_end!, infer_effects +# defined in Base so it's possible to time all imports, including InteractiveUtils and its deps +# via. `Base.@time_imports` etc. +import Base: @time_imports, @trace_compile, @trace_dispatch + separate_kwargs(args...; kwargs...) = (args, values(kwargs)) """ @@ -245,39 +249,6 @@ macro code_lowered(ex0...) end end -macro time_imports(ex) - quote - try - Base.Threads.atomic_add!(Base.TIMING_IMPORTS, 1) - $(esc(ex)) - finally - Base.Threads.atomic_sub!(Base.TIMING_IMPORTS, 1) - end - end -end - -macro trace_compile(ex) - quote - try - ccall(:jl_force_trace_compile_timing_enable, Cvoid, ()) - $(esc(ex)) - finally - ccall(:jl_force_trace_compile_timing_disable, Cvoid, ()) - end - end -end - -macro trace_dispatch(ex) - quote - try - ccall(:jl_force_trace_dispatch_enable, Cvoid, ()) - $(esc(ex)) - finally - ccall(:jl_force_trace_dispatch_disable, Cvoid, ()) - end - end -end - """ @functionloc From 6de6b46b7e5f5438c04ced8510296e0a63507264 Mon Sep 17 00:00:00 2001 From: Cody Tapscott <84105208+topolarity@users.noreply.github.com> Date: Tue, 22 Oct 2024 12:07:54 -0400 Subject: [PATCH 278/537] lowering: split `finally` blocks for exceptional control-flow (#55876) This change duplicates `finally` blocks in lowered IR, so that they can have a static nesting depth in the `try-catch` hierarchy. Previously, `finally` control-flow looked like this: ``` error non-error \ / \ / | finally block | / \ / \ error non-error ``` This kind of flow is a problem, because in a couple places the compiler assumes that it can actually "color" the CFG such that there is a static nesting depth at each BasicBlock (i.e. each BasicBlock can be labeled w/ a unique enclosing `try` / `catch` scope). The above `finally` pattern violates that assumption. In an upcoming PR, I want to extend the lifetimes of our Event Handlers (`jl_handler_t`) until the end of a `catch` block (rather than the start) which noticeably breaks `llvm-lower-handlers.cpp`. (@keno was very clear about this assumption in the comments for that pass.) Behaviorally this was _mostly_ benign, except for some mis-handling of an erroring entry that turns into a non-erroring exit. That could be fixed by banning `break` and `return` within `finally` blocks or making the lowering more complicated, but this PR instead splits the `finally` block into an erroring and non-erroring path so that we can attach the `catch` handler appropriately. --- src/julia-syntax.scm | 10 +++++++--- test/exceptions.jl | 12 ++++++++++++ 2 files changed, 19 insertions(+), 3 deletions(-) diff --git a/src/julia-syntax.scm b/src/julia-syntax.scm index 4b3e6ae96898b..b48cb48bf0b79 100644 --- a/src/julia-syntax.scm +++ b/src/julia-syntax.scm @@ -4854,10 +4854,14 @@ f(x) = yt(x) ;; separate trycatch and tryfinally blocks earlier. (mark-label catch) (if finally - (begin (enter-finally-block catchcode #f) ;; enter block via exception + (begin (set! finally-handler last-finally-handler) + (set! catch-token-stack (cons handler-token catch-token-stack)) + (compile (caddr e) break-labels #f #f) ;; enter block via exception + (emit '(call (top rethrow))) + (emit-return tail '(null)) ; unreachable + (set! catch-token-stack (cdr catch-token-stack)) (mark-label endl) ;; non-exceptional control flow enters here - (set! finally-handler last-finally-handler) - (compile (caddr e) break-labels #f #f) + (compile (renumber-assigned-ssavalues (caddr e)) break-labels #f #f) ;; emit actions to be taken at exit of finally ;; block, depending on the tag variable `finally` (let loop ((actions (caddr my-finally-handler))) diff --git a/test/exceptions.jl b/test/exceptions.jl index eb0bbaec35090..1e52c7a2fe2c3 100644 --- a/test/exceptions.jl +++ b/test/exceptions.jl @@ -241,6 +241,18 @@ end end end)() @test length(Base.current_exceptions()) == 0 + + (()-> begin + while true + try + error("foo") + finally + break + end + end + @test length(Base.current_exceptions()) == 0 + end)() + @test length(Base.current_exceptions()) == 0 end @testset "Deep exception stacks" begin From e4101b71dbcd766b2e4f162320d1d64c0f03c6f3 Mon Sep 17 00:00:00 2001 From: Ian Butterworth Date: Tue, 22 Oct 2024 12:26:07 -0400 Subject: [PATCH 279/537] add recompile comment in --trace-compile in terminal color mode too (#56275) Update: Just adds the comment in color terminal mode too --- I didn't think adding the `# recompile` text to the end in the repl was a good idea as it increases likelihood of text wrapping. And the color should be sufficient for local review, but when people copy from a color terminal we lose the recompile info. So this just adds a zero-length change indicator, for people to look out for. --- src/gf.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/src/gf.c b/src/gf.c index e77c950c38ae4..285942cd157c5 100644 --- a/src/gf.c +++ b/src/gf.c @@ -2566,12 +2566,10 @@ static void record_precompile_statement(jl_method_instance_t *mi, double compila jl_static_show(s_precompile, mi->specTypes); jl_printf(s_precompile, ")"); if (is_recompile) { + jl_printf(s_precompile, " # recompile"); if (s_precompile == JL_STDERR && jl_options.color != JL_OPTIONS_COLOR_OFF) { jl_printf(s_precompile, "\e[0m"); } - else { - jl_printf(s_precompile, " # recompile"); - } } jl_printf(s_precompile, "\n"); if (s_precompile != JL_STDERR) From 7c1935d5bf2a7b13008a9494f626febbccd8bf7c Mon Sep 17 00:00:00 2001 From: Keno Fischer Date: Tue, 22 Oct 2024 17:32:46 -0400 Subject: [PATCH 280/537] Some usability follow-ups and fixes for the world macro (#56273) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 1. Allow fully qualified module names: `@world(Foo.Bar.baz, 1)` 2. Correct the printing order of the world macro and module qualifier 3. Add pretty printing for Binding(Partition). Example of the test: ``` julia> GlobalRef(Rebinding, :Foo).binding Binding Main.Rebinding.Foo 27497:∞ - undefined binding - guard entry 0:27496 - constant binding to @world(Main.Rebinding.Foo, 0:27496) ``` --------- Co-authored-by: Shuhei Kadowaki --- base/essentials.jl | 11 +++++--- base/range.jl | 6 ++--- base/show.jl | 63 ++++++++++++++++++++++++++++++++++++++++++++-- test/rebinding.jl | 6 +++++ 4 files changed, 77 insertions(+), 9 deletions(-) diff --git a/base/essentials.jl b/base/essentials.jl index a07aaa6769ed2..750ee0f9c434c 100644 --- a/base/essentials.jl +++ b/base/essentials.jl @@ -1258,8 +1258,8 @@ arbitrary code in fixed worlds. `world` may be `UnitRange`, in which case the ma will error unless the binding is valid and has the same value across the entire world range. -The `@world` macro is primarily used in the priniting of bindings that are no longer available -in the current world. +The `@world` macro is primarily used in the printing of bindings that are no longer +available in the current world. ## Example ``` @@ -1283,9 +1283,12 @@ julia> fold """ macro world(sym, world) if isa(sym, Symbol) - return :($(_resolve_in_world)($world, $(QuoteNode(GlobalRef(__module__, sym))))) + return :($(_resolve_in_world)($(esc(world)), $(QuoteNode(GlobalRef(__module__, sym))))) elseif isa(sym, GlobalRef) - return :($(_resolve_in_world)($world, $(QuoteNode(sym)))) + return :($(_resolve_in_world)($(esc(world)), $(QuoteNode(sym)))) + elseif isa(sym, Expr) && sym.head === :(.) && + length(sym.args) == 2 && isa(sym.args[2], QuoteNode) && isa(sym.args[2].value, Symbol) + return :($(_resolve_in_world)($(esc(world)), $(GlobalRef)($(esc(sym.args[1])), $(sym.args[2])))) else error("`@world` requires a symbol or GlobalRef") end diff --git a/base/range.jl b/base/range.jl index 3301335785878..cee15db39b911 100644 --- a/base/range.jl +++ b/base/range.jl @@ -1685,9 +1685,9 @@ end # The rest of this is defined in essentials.jl, but UnitRange is not available function _resolve_in_world(worlds::UnitRange, gr::GlobalRef) # Validate that this binding's reference covers the entire world range - bpart = lookup_binding_partition(first(worlds), gr) - if bpart.max_world < last(world) + bpart = lookup_binding_partition(UInt(first(worlds)), gr) + if bpart.max_world < last(worlds) error("Binding does not cover the full world range") end - _resolve_in_world(last(world), gr) + _resolve_in_world(UInt(last(worlds)), gr) end diff --git a/base/show.jl b/base/show.jl index 3aeb267b4a696..93f0a58b7cdd6 100644 --- a/base/show.jl +++ b/base/show.jl @@ -1061,6 +1061,8 @@ function show_type_name(io::IO, tn::Core.TypeName) sym = (globfunc ? globname : tn.name)::Symbol globfunc && print(io, "typeof(") quo = false + world = check_world_bounded(tn) + world !== nothing && print(io, "@world(") if !(get(io, :compact, false)::Bool) # Print module prefix unless type is visible from module passed to # IOContext If :module is not set, default to Main. @@ -1078,8 +1080,6 @@ function show_type_name(io::IO, tn::Core.TypeName) end end end - world = check_world_bounded(tn) - world !== nothing && print(io, "@world(") show_sym(io, sym) world !== nothing && print(io, ", ", world, ")") quo && print(io, ")") @@ -3359,3 +3359,62 @@ end function show(io::IO, ::MIME"text/plain", oc::Core.OpaqueClosure{A, R}) where {A, R} show(io, oc) end + +# printing bindings and partitions +function print_partition(io::IO, partition::Core.BindingPartition) + print(io, partition.min_world) + print(io, ":") + max_world = @atomic partition.max_world + if max_world == typemax(UInt) + print(io, '∞') + else + print(io, max_world) + end + print(io, " - ") + kind = binding_kind(partition) + if is_some_const_binding(kind) + print(io, "constant binding to ") + print(io, partition_restriction(partition)) + elseif kind == BINDING_KIND_GUARD + print(io, "undefined binding - guard entry") + elseif kind == BINDING_KIND_FAILED + print(io, "ambiguous binding - guard entry") + elseif kind == BINDING_KIND_DECLARED + print(io, "undefined, but declared using `global` - guard entry") + elseif kind == BINDING_KIND_IMPLICIT + print(io, "implicit `using` from ") + print(io, partition_restriction(partition)) + elseif kind == BINDING_KIND_EXPLICIT + print(io, "explicit `using` from ") + print(io, partition_restriction(partition)) + elseif kind == BINDING_KIND_IMPORTED + print(io, "explicit `import` from ") + print(io, partition_restriction(partition)) + else + @assert kind == BINDING_KIND_GLOBAL + print(io, "global variable with type ") + print(io, partition_restriction(partition)) + end +end + +function show(io::IO, ::MIME"text/plain", partition::Core.BindingPartition) + print(io, "BindingPartition ") + print_partition(io, partition) +end + +function show(io::IO, ::MIME"text/plain", bnd::Core.Binding) + print(io, "Binding ") + print(io, bnd.globalref) + if !isdefined(bnd, :partitions) + print(io, "No partitions") + else + partition = @atomic bnd.partitions + while true + println(io) + print(io, " ") + print_partition(io, partition) + isdefined(partition, :next) || break + partition = @atomic partition.next + end + end +end diff --git a/test/rebinding.jl b/test/rebinding.jl index 4066d91bc4b9b..564be70e44913 100644 --- a/test/rebinding.jl +++ b/test/rebinding.jl @@ -7,6 +7,7 @@ module Rebinding struct Foo x::Int end + const defined_world_age = Base.tls_world_age() x = Foo(1) @test Base.binding_kind(@__MODULE__, :Foo) == Base.BINDING_KIND_CONST @@ -15,4 +16,9 @@ module Rebinding @test Base.binding_kind(@__MODULE__, :Foo) == Base.BINDING_KIND_GUARD @test contains(repr(x), "@world") + + # Tests for @world syntax + @test Base.@world(Foo, defined_world_age) == typeof(x) + @test Base.@world(Rebinding.Foo, defined_world_age) == typeof(x) + @test Base.@world((@__MODULE__).Foo, defined_world_age) == typeof(x) end From 049d92a2ac506316ca2413e103647f72ce847b56 Mon Sep 17 00:00:00 2001 From: Ian Butterworth Date: Tue, 22 Oct 2024 18:14:39 -0400 Subject: [PATCH 281/537] REPL: Don't search for ?( completions when hinting (#56278) --- stdlib/REPL/src/REPLCompletions.jl | 50 ++++++++++++++++-------------- 1 file changed, 26 insertions(+), 24 deletions(-) diff --git a/stdlib/REPL/src/REPLCompletions.jl b/stdlib/REPL/src/REPLCompletions.jl index 42480aea91605..3188a6ca42a12 100644 --- a/stdlib/REPL/src/REPLCompletions.jl +++ b/stdlib/REPL/src/REPLCompletions.jl @@ -1225,33 +1225,35 @@ function completions(string::String, pos::Int, context_module::Module=Main, shif partial = string[1:pos] inc_tag = Base.incomplete_tag(Meta.parse(partial, raise=false, depwarn=false)) - # ?(x, y)TAB lists methods you can call with these objects - # ?(x, y TAB lists methods that take these objects as the first two arguments - # MyModule.?(x, y)TAB restricts the search to names in MyModule - rexm = match(r"(\w+\.|)\?\((.*)$", partial) - if rexm !== nothing - # Get the module scope - if isempty(rexm.captures[1]) - callee_module = context_module - else - modname = Symbol(rexm.captures[1][1:end-1]) - if isdefined(context_module, modname) - callee_module = getfield(context_module, modname) - if !isa(callee_module, Module) + if !hint # require a tab press for completion of these + # ?(x, y)TAB lists methods you can call with these objects + # ?(x, y TAB lists methods that take these objects as the first two arguments + # MyModule.?(x, y)TAB restricts the search to names in MyModule + rexm = match(r"(\w+\.|)\?\((.*)$", partial) + if rexm !== nothing + # Get the module scope + if isempty(rexm.captures[1]) + callee_module = context_module + else + modname = Symbol(rexm.captures[1][1:end-1]) + if isdefined(context_module, modname) + callee_module = getfield(context_module, modname) + if !isa(callee_module, Module) + callee_module = context_module + end + else callee_module = context_module end - else - callee_module = context_module end - end - moreargs = !endswith(rexm.captures[2], ')') - callstr = "_(" * rexm.captures[2] - if moreargs - callstr *= ')' - end - ex_org = Meta.parse(callstr, raise=false, depwarn=false) - if isa(ex_org, Expr) - return complete_any_methods(ex_org, callee_module::Module, context_module, moreargs, shift), (0:length(rexm.captures[1])+1) .+ rexm.offset, false + moreargs = !endswith(rexm.captures[2], ')') + callstr = "_(" * rexm.captures[2] + if moreargs + callstr *= ')' + end + ex_org = Meta.parse(callstr, raise=false, depwarn=false) + if isa(ex_org, Expr) + return complete_any_methods(ex_org, callee_module::Module, context_module, moreargs, shift), (0:length(rexm.captures[1])+1) .+ rexm.offset, false + end end end From 73b85cfc04d83cb4b630dbd36ad2c270cf548330 Mon Sep 17 00:00:00 2001 From: Keno Fischer Date: Tue, 22 Oct 2024 21:12:50 -0400 Subject: [PATCH 282/537] Use a curried helper for module-local eval/include (#55949) In https://github.com/JuliaLang/julia/pull/55466, the automatically added `include` method for non-bare modules was adjusted to conform the signature to the version of those methods in Main (defined in sysimg.jl, since `Main` is technically a bare module). Unfortunately, this broke some downstream packages which overload Base.include with additional functionality and got broken by the additional type restriction. The motivation in https://github.com/JuliaLang/julia/pull/55466 was to give a slightly nicer MethodError. While I don't think this is per-se a particularly strong justification, I do agree that it's awkward for the `Main` version of these functions to have (marginally) different behavior than the version of these functions that gets introduced automatically in new modules (Which has been the case ever since [1], which added the AbstractString restriction in `Main`, but not in the auto-generated versions). This is particularly true, because we use the `Main` version to document the auto-introduction of these methods, which has regularly been a point of confusion. This PR tries to address this problem once and for all, but just not generating special methods into every new module. Instead, there are curried helpers for eval and include in Core and Base (respectively), which can be added to a module simply by doing `const include = IncludeInto(MyModule)` (and similarly for `eval`). As before, this happens automatically for non-bare modules. It thus conforms the behavior of the `Main` version of these functions and the auto-generated versions by construction. Additionally, it saves us having to generate all the additional code/types/objects, etc associated with having extra generic functions in each new module. The impact of this isn't huge, because there aren't that many modules, but it feels conceptually nicer. There is a little bit of extra work in this PR because we have special snowflake backtrace printing code for the `include` machinery, which needs adjusting, but other than that the change is straightforward. [1] https://github.com/JuliaLang/julia/commit/957848b899c7b5389af34cf815aa7bd2b6e2bf82 --------- Co-authored-by: Jameson Nash --- base/Base.jl | 14 ++++++++++++-- base/boot.jl | 8 ++++++-- base/docs/basedocs.jl | 2 +- base/errorshow.jl | 5 ++++- base/loading.jl | 8 ++++---- base/show.jl | 19 +++++++++++++++++++ base/sysimg.jl | 8 ++------ src/jlfrontend.scm | 22 ---------------------- src/toplevel.c | 14 ++++++++++---- test/docs.jl | 2 +- test/reflection.jl | 4 ++-- 11 files changed, 61 insertions(+), 45 deletions(-) diff --git a/base/Base.jl b/base/Base.jl index 5fb764bd4cc01..bfac74e5d7bab 100644 --- a/base/Base.jl +++ b/base/Base.jl @@ -25,6 +25,11 @@ function include(mod::Module, path::String) end include(path::String) = include(Base, path) +struct IncludeInto <: Function + m::Module +end +(this::IncludeInto)(fname::AbstractString) = include(this.m, fname) + # from now on, this is now a top-module for resolving syntax const is_primary_base_module = ccall(:jl_module_parent, Ref{Module}, (Any,), Base) === Core.Main ccall(:jl_set_istopmod, Cvoid, (Any, Bool), Base, is_primary_base_module) @@ -572,6 +577,9 @@ include("precompilation.jl") for m in methods(include) delete_method(m) end +for m in methods(IncludeInto(Base)) + delete_method(m) +end # This method is here only to be overwritten during the test suite to test # various sysimg related invalidation scenarios. @@ -579,8 +587,10 @@ a_method_to_overwrite_in_test() = inferencebarrier(1) # These functions are duplicated in client.jl/include(::String) for # nicer stacktraces. Modifications here have to be backported there -include(mod::Module, _path::AbstractString) = _include(identity, mod, _path) -include(mapexpr::Function, mod::Module, _path::AbstractString) = _include(mapexpr, mod, _path) +@noinline include(mod::Module, _path::AbstractString) = _include(identity, mod, _path) +@noinline include(mapexpr::Function, mod::Module, _path::AbstractString) = _include(mapexpr, mod, _path) +(this::IncludeInto)(fname::AbstractString) = include(identity, this.m, fname) +(this::IncludeInto)(mapexpr::Function, fname::AbstractString) = include(mapexpr, this.m, fname) # External libraries vendored into Base Core.println("JuliaSyntax/src/JuliaSyntax.jl") diff --git a/base/boot.jl b/base/boot.jl index 861c83a2edac5..ed3e22391f215 100644 --- a/base/boot.jl +++ b/base/boot.jl @@ -454,9 +454,13 @@ Nothing() = nothing # This should always be inlined getptls() = ccall(:jl_get_ptls_states, Ptr{Cvoid}, ()) -include(m::Module, fname::String) = ccall(:jl_load_, Any, (Any, Any), m, fname) +include(m::Module, fname::String) = (@noinline; ccall(:jl_load_, Any, (Any, Any), m, fname)) +eval(m::Module, @nospecialize(e)) = (@noinline; ccall(:jl_toplevel_eval_in, Any, (Any, Any), m, e)) -eval(m::Module, @nospecialize(e)) = ccall(:jl_toplevel_eval_in, Any, (Any, Any), m, e) +struct EvalInto <: Function + m::Module +end +(this::EvalInto)(@nospecialize(e)) = eval(this.m, e) mutable struct Box contents::Any diff --git a/base/docs/basedocs.jl b/base/docs/basedocs.jl index 0d5d5ac00e8d0..b080bf51e5e98 100644 --- a/base/docs/basedocs.jl +++ b/base/docs/basedocs.jl @@ -2580,7 +2580,7 @@ cases. See also [`setproperty!`](@ref Base.setproperty!) and [`getglobal`](@ref) # Examples -```jldoctest; filter = r"Stacktrace:(\\n \\[[0-9]+\\].*)*" +```jldoctest; filter = r"Stacktrace:(\\n \\[[0-9]+\\].*\\n.*)*" julia> module M; global a; end; julia> M.a # same as `getglobal(M, :a)` diff --git a/base/errorshow.jl b/base/errorshow.jl index 20bdee1de6ec0..7225a024f529e 100644 --- a/base/errorshow.jl +++ b/base/errorshow.jl @@ -850,7 +850,10 @@ function _simplify_include_frames(trace) for i in length(trace):-1:1 frame::StackFrame, _ = trace[i] mod = parentmodule(frame) - if first_ignored === nothing + if mod === Base && frame.func === :IncludeInto || + mod === Core && frame.func === :EvalInto + kept_frames[i] = false + elseif first_ignored === nothing if mod === Base && frame.func === :_include # Hide include() machinery by default first_ignored = i diff --git a/base/loading.jl b/base/loading.jl index dfcf22b2b0751..69bb332193519 100644 --- a/base/loading.jl +++ b/base/loading.jl @@ -2890,12 +2890,12 @@ julia> rm("testfile.jl") ``` """ function evalfile(path::AbstractString, args::Vector{String}=String[]) - return Core.eval(Module(:__anon__), + m = Module(:__anon__) + return Core.eval(m, Expr(:toplevel, :(const ARGS = $args), - :(eval(x) = $(Expr(:core, :eval))(__anon__, x)), - :(include(x::AbstractString) = $(Expr(:top, :include))(__anon__, x)), - :(include(mapexpr::Function, x::AbstractString) = $(Expr(:top, :include))(mapexpr, __anon__, x)), + :(const include = $(Base.IncludeInto(m))), + :(const eval = $(Core.EvalInto(m))), :(include($path)))) end evalfile(path::AbstractString, args::Vector) = evalfile(path, String[args...]) diff --git a/base/show.jl b/base/show.jl index 93f0a58b7cdd6..ee467ae90ff50 100644 --- a/base/show.jl +++ b/base/show.jl @@ -3418,3 +3418,22 @@ function show(io::IO, ::MIME"text/plain", bnd::Core.Binding) end end end + +# Special pretty printing for EvalInto/IncludeInto +function show(io::IO, ii::IncludeInto) + if getglobal(ii.m, :include) === ii + print(io, ii.m) + print(io, ".include") + else + show_default(io, ii) + end +end + +function show(io::IO, ei::Core.EvalInto) + if getglobal(ei.m, :eval) === ei + print(io, ei.m) + print(io, ".eval") + else + show_default(io, ei) + end +end diff --git a/base/sysimg.jl b/base/sysimg.jl index 966ed76751f28..ccc8ef38e81bc 100644 --- a/base/sysimg.jl +++ b/base/sysimg.jl @@ -32,11 +32,7 @@ Use [`Base.include`](@ref) to evaluate a file into another module. !!! compat "Julia 1.5" Julia 1.5 is required for passing the `mapexpr` argument. """ -include(mapexpr::Function, fname::AbstractString) = Base._include(mapexpr, Main, fname) -function include(fname::AbstractString) - isa(fname, String) || (fname = Base.convert(String, fname)::String) - Base._include(identity, Main, fname) -end +const include = Base.IncludeInto(Main) """ eval(expr) @@ -45,7 +41,7 @@ Evaluate an expression in the global scope of the containing module. Every `Module` (except those defined with `baremodule`) has its own 1-argument definition of `eval`, which evaluates expressions in that module. """ -eval(x) = Core.eval(Main, x) +const eval = Core.EvalInto(Main) # Ensure this file is also tracked pushfirst!(Base._included_files, (@__MODULE__, abspath(@__FILE__))) diff --git a/src/jlfrontend.scm b/src/jlfrontend.scm index 463e39c41d00a..808af18ebfdbd 100644 --- a/src/jlfrontend.scm +++ b/src/jlfrontend.scm @@ -199,28 +199,6 @@ (error-wrap (lambda () (julia-expand-macroscope expr)))) -;; construct default definitions of `eval` for non-bare modules -;; called by jl_eval_module_expr -(define (module-default-defs name file line) - (jl-expand-to-thunk - (let* ((loc (if (and (eq? file 'none) (eq? line 0)) '() `((line ,line ,file)))) - (x (if (eq? name 'x) 'y 'x)) - (mex (if (eq? name 'mapexpr) 'map_expr 'mapexpr))) - `(block - (= (call eval ,x) - (block - ,@loc - (call (core eval) ,name ,x))) - (= (call include (:: ,x (top AbstractString))) - (block - ,@loc - (call (core _call_latest) (top include) ,name ,x))) - (= (call include (:: ,mex (top Function)) (:: ,x (top AbstractString))) - (block - ,@loc - (call (core _call_latest) (top include) ,mex ,name ,x))))) - file line)) - ; run whole frontend on a string. useful for testing. (define (fe str) (expand-toplevel-expr (julia-parse str) 'none 0)) diff --git a/src/toplevel.c b/src/toplevel.c index 6f2e0cf77568a..c2fbc38d067eb 100644 --- a/src/toplevel.c +++ b/src/toplevel.c @@ -206,11 +206,17 @@ static jl_value_t *jl_eval_module_expr(jl_module_t *parent_module, jl_expr_t *ex if (std_imports) { if (jl_base_module != NULL) { jl_add_standard_imports(newm); + jl_datatype_t *include_into = (jl_datatype_t *)jl_get_global(jl_base_module, jl_symbol("IncludeInto")); + if (include_into) { + form = jl_new_struct(include_into, newm); + jl_set_const(newm, jl_symbol("include"), form); + } + } + jl_datatype_t *eval_into = (jl_datatype_t *)jl_get_global(jl_core_module, jl_symbol("EvalInto")); + if (eval_into) { + form = jl_new_struct(eval_into, newm); + jl_set_const(newm, jl_symbol("eval"), form); } - // add `eval` function - form = jl_call_scm_on_ast_and_loc("module-default-defs", (jl_value_t*)name, newm, filename, lineno); - jl_toplevel_eval_flex(newm, form, 0, 1, &filename, &lineno); - form = NULL; } newm->file = jl_symbol(filename); diff --git a/test/docs.jl b/test/docs.jl index 92d45fe05e397..8db9db30b8463 100644 --- a/test/docs.jl +++ b/test/docs.jl @@ -101,7 +101,7 @@ end @test Docs.undocumented_names(_ModuleWithUndocumentedNames) == [Symbol("@foo"), :f, :⨳] @test isempty(Docs.undocumented_names(_ModuleWithSomeDocumentedNames)) -@test Docs.undocumented_names(_ModuleWithSomeDocumentedNames; private=true) == [:eval, :g, :include] +@test Docs.undocumented_names(_ModuleWithSomeDocumentedNames; private=true) == [:g] # issue #11548 diff --git a/test/reflection.jl b/test/reflection.jl index 634390e0680d1..8c701acb9c09d 100644 --- a/test/reflection.jl +++ b/test/reflection.jl @@ -179,7 +179,7 @@ let @test Base.binding_module(TestMod7648.TestModSub9475, :b9475) == TestMod7648.TestModSub9475 defaultset = Set(Symbol[:Foo7648, :TestMod7648, :a9475, :c7648, :f9475, :foo7648, :foo7648_nomethods]) allset = defaultset ∪ Set(Symbol[ - Symbol("#eval"), Symbol("#foo7648"), Symbol("#foo7648_nomethods"), Symbol("#include"), + Symbol("#foo7648"), Symbol("#foo7648_nomethods"), :TestModSub9475, :d7648, :eval, :f7648, :include]) imported = Set(Symbol[:convert, :curmod_name, :curmod]) usings_from_Test = Set(Symbol[ @@ -265,7 +265,7 @@ let defaultset = Set((:A,)) imported = Set((:M2,)) usings_from_Base = delete!(Set(names(Module(); usings=true)), :anonymous) # the name of the anonymous module itself usings = Set((:A, :f, :C, :y, :M1, :m1_x)) ∪ usings_from_Base - allset = Set((:A, :B, :C, :eval, :include, Symbol("#eval"), Symbol("#include"))) + allset = Set((:A, :B, :C, :eval, :include)) @test Set(names(TestMod54609.A)) == defaultset @test Set(names(TestMod54609.A, imported=true)) == defaultset ∪ imported @test Set(names(TestMod54609.A, usings=true)) == defaultset ∪ usings From be0ce9dbf0597c1ff50fc73f3d197a19708c4cd3 Mon Sep 17 00:00:00 2001 From: Keno Fischer Date: Wed, 23 Oct 2024 01:14:02 -0400 Subject: [PATCH 283/537] Don't try to allocate new binding partitions from static show (#56298) In particular static show is used inside the GC for profiling, which showed up as a segfault on CI, e.g. in https://buildkite.com/julialang/julia-master/builds/41407#0192b628-47f3-49f9-a081-cd2708eb6121. GC check didn't catch it because that file is explicitly exempt: https://github.com/JuliaLang/julia/blob/master/src/Makefile#L504 --- src/julia.h | 1 + src/module.c | 21 +++++++++++++++++++++ src/rtutils.c | 2 +- 3 files changed, 23 insertions(+), 1 deletion(-) diff --git a/src/julia.h b/src/julia.h index dd79dbb82c28d..1d36dba519700 100644 --- a/src/julia.h +++ b/src/julia.h @@ -1848,6 +1848,7 @@ JL_DLLEXPORT jl_sym_t *jl_tagged_gensym(const char *str, size_t len); JL_DLLEXPORT jl_sym_t *jl_get_root_symbol(void); JL_DLLEXPORT jl_value_t *jl_get_binding_value(jl_binding_t *b JL_PROPAGATES_ROOT); JL_DLLEXPORT jl_value_t *jl_get_binding_value_if_const(jl_binding_t *b JL_PROPAGATES_ROOT); +JL_DLLEXPORT jl_value_t *jl_get_binding_value_if_resolved_and_const(jl_binding_t *b JL_PROPAGATES_ROOT) JL_NOTSAFEPOINT; JL_DLLEXPORT jl_value_t *jl_declare_const_gf(jl_binding_t *b, jl_module_t *mod, jl_sym_t *name); JL_DLLEXPORT jl_method_t *jl_method_def(jl_svec_t *argdata, jl_methtable_t *mt, jl_code_info_t *f, jl_module_t *module); JL_DLLEXPORT jl_code_info_t *jl_code_for_staged(jl_method_instance_t *linfo, size_t world, jl_code_instance_t **cache); diff --git a/src/module.c b/src/module.c index f1098e22ff522..9b4d26cc7b000 100644 --- a/src/module.c +++ b/src/module.c @@ -48,6 +48,7 @@ jl_binding_partition_t *jl_get_binding_partition(jl_binding_t *b, size_t world) return bpart; jl_binding_partition_t *new_bpart = new_binding_partition(); jl_atomic_store_relaxed(&new_bpart->next, bpart); + jl_gc_wb_fresh(new_bpart, bpart); if (bpart) new_bpart->min_world = jl_atomic_load_relaxed(&bpart->max_world) + 1; jl_atomic_store_relaxed(&new_bpart->max_world, max_world); @@ -350,6 +351,26 @@ JL_DLLEXPORT jl_value_t *jl_get_binding_value_if_const(jl_binding_t *b) return decode_restriction_value(pku); } +JL_DLLEXPORT jl_value_t *jl_get_binding_value_if_resolved_and_const(jl_binding_t *b) +{ + // Unlike jl_get_binding_value_if_const this doesn't try to allocate new binding partitions if they + // don't already exist, making this JL_NOTSAFEPOINT. + if (!b) + return NULL; + jl_binding_partition_t *bpart = jl_atomic_load_relaxed(&b->partitions); + if (!bpart) + return NULL; + size_t max_world = jl_atomic_load_relaxed(&bpart->max_world); + if (bpart->min_world > jl_current_task->world_age || jl_current_task->world_age > max_world) + return NULL; + jl_ptr_kind_union_t pku = jl_atomic_load_relaxed(&bpart->restriction); + if (jl_bkind_is_some_guard(decode_restriction_kind(pku))) + return NULL; + if (!jl_bkind_is_some_constant(decode_restriction_kind(pku))) + return NULL; + return decode_restriction_value(pku); +} + JL_DLLEXPORT jl_value_t *jl_bpart_get_restriction_value(jl_binding_partition_t *bpart) { jl_ptr_kind_union_t pku = jl_atomic_load_relaxed(&bpart->restriction); diff --git a/src/rtutils.c b/src/rtutils.c index 85a9be5e0b1da..faa087dcb077d 100644 --- a/src/rtutils.c +++ b/src/rtutils.c @@ -661,7 +661,7 @@ static int is_globname_binding(jl_value_t *v, jl_datatype_t *dv) JL_NOTSAFEPOINT jl_sym_t *globname = dv->name->mt != NULL ? dv->name->mt->name : NULL; if (globname && dv->name->module) { jl_binding_t *b = jl_get_module_binding(dv->name->module, globname, 0); - jl_value_t *bv = jl_get_binding_value_if_const(b); + jl_value_t *bv = jl_get_binding_value_if_resolved_and_const(b); // The `||` makes this function work for both function instances and function types. if (bv && (bv == v || jl_typeof(bv) == v)) return 1; From 5e4fb519b5955b2f9ff9a26bd1dc5454561ecef7 Mon Sep 17 00:00:00 2001 From: Jishnu Bhattacharya Date: Wed, 23 Oct 2024 15:26:38 +0530 Subject: [PATCH 284/537] Fix fetching parent in symmetric algebra (#56286) We only need the `parent` of the result if it is a triangular matrix. For other structurally triangular matrices, such as `Diagonal`s, we may use these directly in the `Hermitian` constructor. The operation proceeds as (assuming `H isa Hermitian` with `H.uplo == 'U'`): ```julia function +(H::Hermitian, H::Hermitian) U = uppertriangular(parent(H)) Ures = U + U data = Ures isa UpperTriangular ? parent(Ures) : Ures # this PR Hermitian(data, :U) end ``` This accounts for the fact that `Ures` may not be an `UpperTriangular`, as `uppertriangular` might be specialized by the parent. In such cases we should not extract the parent. Currently, only `Diagonal` specializes `uppertriangular`, so this issue only exists for a `Hermitian` or a `Symmetric` that wraps a `Diagonal`. Fixes https://github.com/JuliaLang/julia/issues/56283 --- stdlib/LinearAlgebra/src/symmetric.jl | 4 +++- stdlib/LinearAlgebra/test/symmetric.jl | 7 +++++++ 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/stdlib/LinearAlgebra/src/symmetric.jl b/stdlib/LinearAlgebra/src/symmetric.jl index e17eb80d25453..265995d9e7806 100644 --- a/stdlib/LinearAlgebra/src/symmetric.jl +++ b/stdlib/LinearAlgebra/src/symmetric.jl @@ -307,7 +307,9 @@ function applytri(f, A::HermOrSym, B::HermOrSym) f(uppertriangular(_conjugation(A)(A.data)), uppertriangular(B.data)) end end -parentof_applytri(f, args...) = applytri(parent ∘ f, args...) +_parent_tri(U::UpperOrLowerTriangular) = parent(U) +_parent_tri(U) = U +parentof_applytri(f, args...) = _parent_tri(applytri(f, args...)) isdiag(A::HermOrSym) = applytri(isdiag, A) diff --git a/stdlib/LinearAlgebra/test/symmetric.jl b/stdlib/LinearAlgebra/test/symmetric.jl index 7a51ab9d454af..3aef23617b942 100644 --- a/stdlib/LinearAlgebra/test/symmetric.jl +++ b/stdlib/LinearAlgebra/test/symmetric.jl @@ -1160,4 +1160,11 @@ end @test symT-s == Array(symT) - Array(s) end +@testset "issue #56283" begin + a = 1.0 + D = Diagonal(randn(10)) + H = Hermitian(D*D') + @test a*H == H +end + end # module TestSymmetric From 133051f20f5995d4128f9c7973efff62ed25e919 Mon Sep 17 00:00:00 2001 From: Ian Butterworth Date: Wed, 23 Oct 2024 07:59:06 -0400 Subject: [PATCH 285/537] REPL: fix closing quote on completing files in a ~ path (#56253) --- stdlib/REPL/src/REPLCompletions.jl | 18 +++++++----------- stdlib/REPL/test/replcompletions.jl | 27 ++++++++++++++++++++++++++- 2 files changed, 33 insertions(+), 12 deletions(-) diff --git a/stdlib/REPL/src/REPLCompletions.jl b/stdlib/REPL/src/REPLCompletions.jl index 3188a6ca42a12..d230b7b5fd232 100644 --- a/stdlib/REPL/src/REPLCompletions.jl +++ b/stdlib/REPL/src/REPLCompletions.jl @@ -935,17 +935,11 @@ function get_import_mode(s::String) return nothing end -function close_path_completion(dir, paths, str, pos) - length(paths) == 1 || return false # Only close if there's a single choice... - path = (paths[1]::PathCompletion).path +function close_path_completion(dir, path, str, pos) path = unescape_string(replace(path, "\\\$"=>"\$")) path = joinpath(dir, path) # ...except if it's a directory... - try - isdir(path) - catch e - e isa Base.IOError || rethrow() # `path` cannot be determined to be a file - end && return false + Base.isaccessibledir(path) && return false # ...and except if there's already a " at the cursor. return lastindex(str) <= pos || str[nextind(str, pos)] != '"' end @@ -1358,10 +1352,12 @@ function completions(string::String, pos::Int, context_module::Module=Main, shif if !isnothing(path) paths, dir, success = complete_path(path::String, string_escape=true) - if close_path_completion(dir, paths, path, pos) - p = (paths[1]::PathCompletion).path * "\"" + if length(paths) == 1 + p = (paths[1]::PathCompletion).path hint && was_expanded && (p = contractuser(p)) - paths[1] = PathCompletion(p) + if close_path_completion(dir, p, path, pos) + paths[1] = PathCompletion(p * "\"") + end end if success && !isempty(dir) diff --git a/stdlib/REPL/test/replcompletions.jl b/stdlib/REPL/test/replcompletions.jl index cfb9a6137a287..4fe32f47bc80c 100644 --- a/stdlib/REPL/test/replcompletions.jl +++ b/stdlib/REPL/test/replcompletions.jl @@ -1236,7 +1236,7 @@ let current_dir, forbidden e isa Base.IOError && occursin("ELOOP", e.msg) end c, r = test_complete("\"$(escape_string(path))/selfsym") - @test c == ["selfsymlink"] + @test c == ["selfsymlink\""] end end @@ -1357,6 +1357,31 @@ let (c, r, res) = test_complete("\"~/julia") c, r, res = test_complete("\"foo~bar") @test !res end +if !Sys.iswindows() + # create a dir and file temporarily in the home directory + path = mkpath(joinpath(homedir(), "Zx6Wa0GkC0")) + touch(joinpath(path, "my_file")) + try + let (c, r, res) = test_complete("\"~/Zx6Wa0GkC") + @test res + @test c == String["Zx6Wa0GkC0/"] + end + let (c, r, res) = test_complete("\"~/Zx6Wa0GkC0") + @test res + @test c == String[homedir() * "/Zx6Wa0GkC0"] + end + let (c, r, res) = test_complete("\"~/Zx6Wa0GkC0/my_") + @test res + @test c == String["my_file\""] + end + let (c, r, res) = test_complete("\"~/Zx6Wa0GkC0/my_file") + @test res + @test c == String[homedir() * "/Zx6Wa0GkC0/my_file"] + end + finally + rm(path, recursive=true) + end +end # Test the completion returns nothing when the folder do not exist let (c, r) = test_complete("cd(\"folder_do_not_exist_77/file") From 0b9fcb5db93e1fdd2ec0f319a8cd155b9f9508b1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=B8=8C=E7=90=86?= Date: Wed, 23 Oct 2024 07:17:08 -0700 Subject: [PATCH 286/537] Implement faster `issubset` for `CartesianIndices{N}` (#56282) Co-authored-by: xili --- base/multidimensional.jl | 2 ++ 1 file changed, 2 insertions(+) diff --git a/base/multidimensional.jl b/base/multidimensional.jl index 99f41f2404e47..c82f1c1ba75d7 100644 --- a/base/multidimensional.jl +++ b/base/multidimensional.jl @@ -615,6 +615,8 @@ module IteratorsMD # array operations Base.intersect(a::CartesianIndices{N}, b::CartesianIndices{N}) where N = CartesianIndices(intersect.(a.indices, b.indices)) + Base.issubset(a::CartesianIndices{N}, b::CartesianIndices{N}) where N = + isempty(a) || all(map(issubset, a.indices, b.indices)) # Views of reshaped CartesianIndices are used for partitions — ensure these are fast const CartesianPartition{T<:CartesianIndex, P<:CartesianIndices, R<:ReshapedArray{T,1,P}} = SubArray{T,1,R,<:Tuple{AbstractUnitRange{Int}},false} From 6c70bf784999b478bc176fe594738f9746a1dcfd Mon Sep 17 00:00:00 2001 From: Arno Strouwen Date: Wed, 23 Oct 2024 16:18:58 +0200 Subject: [PATCH 287/537] Improve doc example: Extracting the type parameter from a super-type (#55983) Documentation describes the correct way of extracting the element type of a supertype: https://docs.julialang.org/en/v1/manual/methods/#Extracting-the-type-parameter-from-a-super-type However, one of the examples to showcase this is nonsensical since it is a union of multiple element types. I have replaced this example with a union over the dimension. Now, the `eltype_wrong` function still gives a similar error, yet the correct way returns the unambiguous answer. --------- Co-authored-by: Lilith Orion Hafner --- doc/src/manual/methods.md | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/doc/src/manual/methods.md b/doc/src/manual/methods.md index 6be44dcf4fa13..3c234b17f10d8 100644 --- a/doc/src/manual/methods.md +++ b/doc/src/manual/methods.md @@ -698,11 +698,14 @@ While this works for declared types, it fails for types without supertypes: ```julia-repl -julia> eltype_wrong(Union{AbstractArray{Int}, AbstractArray{Float64}}) -ERROR: MethodError: no method matching supertype(::Type{Union{AbstractArray{Float64,N} where N, AbstractArray{Int64,N} where N}}) +julia> eltype_wrong(Union{Vector{Int}, Matrix{Int}}) +ERROR: MethodError: no method matching supertype(::Type{VecOrMat{Int64}}) + Closest candidates are: - supertype(::DataType) at operators.jl:43 - supertype(::UnionAll) at operators.jl:48 + supertype(::UnionAll) + @ Base operators.jl:44 + supertype(::DataType) + @ Base operators.jl:43 ``` ### Building a similar type with a different type parameter From 005608af2e134b274eeb1224b610b3004e2a832f Mon Sep 17 00:00:00 2001 From: Tim Besard Date: Wed, 23 Oct 2024 18:46:31 +0200 Subject: [PATCH 288/537] llvmpasses: force vector width for compatibility with non-x86 hosts. (#56300) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The pipeline-prints test currently fails when running on an aarch64-macos device: ``` /Users/tim/Julia/src/julia/test/llvmpasses/pipeline-prints.ll:309:23: error: AFTERVECTORIZATION: expected string not found in input ; AFTERVECTORIZATION: vector.body ^ :2:40: note: scanning from here ; *** IR Dump Before AfterVectorizationMarkerPass on julia_f_199 *** ^ :47:27: note: possible intended match here ; *** IR Dump Before AfterVectorizationMarkerPass on jfptr_f_200 *** ^ Input file: Check file: /Users/tim/Julia/src/julia/test/llvmpasses/pipeline-prints.ll -dump-input=help explains the following input dump. Input was: <<<<<< 1: opt: WARNING: failed to create target machine for 'x86_64-unknown-linux-gnu': unable to get target for 'x86_64-unknown-linux-gnu', see --version and --triple. 2: ; *** IR Dump Before AfterVectorizationMarkerPass on julia_f_199 *** check:309'0 X~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ error: no match found 3: define i64 @julia_f_199(ptr addrspace(10) noundef nonnull align 16 dereferenceable(40) %0) #0 !dbg !4 { check:309'0 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 4: top: check:309'0 ~~~~~ 5: %1 = call ptr @julia.get_pgcstack() check:309'0 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 6: %ptls_field = getelementptr inbounds ptr, ptr %1, i64 2 check:309'0 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 7: %ptls_load45 = load ptr, ptr %ptls_field, align 8, !tbaa !8 check:309'0 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ . . . 42: check:309'0 ~ 43: L41: ; preds = %L41.loopexit, %L17, %top check:309'0 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 44: %value_phi10 = phi i64 [ 0, %top ], [ %7, %L17 ], [ %.lcssa, %L41.loopexit ] check:309'0 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 45: ret i64 %value_phi10, !dbg !52 check:309'0 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 46: } check:309'0 ~~ 47: ; *** IR Dump Before AfterVectorizationMarkerPass on jfptr_f_200 *** check:309'0 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ check:309'1 ? possible intended match 48: ; Function Attrs: noinline optnone check:309'0 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 49: define nonnull ptr addrspace(10) @jfptr_f_200(ptr addrspace(10) %0, ptr noalias nocapture noundef readonly %1, i32 %2) #1 { check:309'0 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 50: top: check:309'0 ~~~~~ 51: %3 = call ptr @julia.get_pgcstack() check:309'0 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 52: %4 = getelementptr inbounds ptr addrspace(10), ptr %1, i32 0 check:309'0 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ . . . >>>>>> -- ******************** Failed Tests (1): Julia :: pipeline-prints.ll ``` The problem is that these tests assume x86_64, which fails because the target isn't available, so it presumably uses the native target which has different vectorization characteristics: ``` ❯ ./usr/tools/opt --load-pass-plugin=libjulia-codegen.dylib -passes='julia' --print-before=AfterVectorization -o /dev/null ../../test/llvmpasses/pipeline-prints.ll ./usr/tools/opt: WARNING: failed to create target machine for 'x86_64-unknown-linux-gnu': unable to get target for 'x86_64-unknown-linux-gnu', see --version and --triple. ``` There's other tests that assume this (e.g. the `fma` cpufeatures one), but they don't fail, so I've left them as is. --- test/llvmpasses/pipeline-prints.ll | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/llvmpasses/pipeline-prints.ll b/test/llvmpasses/pipeline-prints.ll index ecb70953026c2..9c27885c5ca45 100644 --- a/test/llvmpasses/pipeline-prints.ll +++ b/test/llvmpasses/pipeline-prints.ll @@ -14,7 +14,7 @@ ; RUN: opt --load-pass-plugin=libjulia-codegen%shlibext -passes='julia' --print-before=BeforeScalarOptimization -o /dev/null %s 2>&1 | FileCheck %s --check-prefixes=BEFORESCALAROPTIMIZATION ; RUN: opt --load-pass-plugin=libjulia-codegen%shlibext -passes='julia' --print-before=AfterScalarOptimization -o /dev/null %s 2>&1 | FileCheck %s --check-prefixes=AFTERSCALAROPTIMIZATION ; RUN: opt --load-pass-plugin=libjulia-codegen%shlibext -passes='julia' --print-before=BeforeVectorization -o /dev/null %s 2>&1 | FileCheck %s --check-prefixes=BEFOREVECTORIZATION -; RUN: opt --load-pass-plugin=libjulia-codegen%shlibext -passes='julia' --print-before=AfterVectorization -o /dev/null %s 2>&1 | FileCheck %s --check-prefixes=AFTERVECTORIZATION +; RUN: opt --load-pass-plugin=libjulia-codegen%shlibext -passes='julia' --print-before=AfterVectorization -force-vector-width=2 -o /dev/null %s 2>&1 | FileCheck %s --check-prefixes=AFTERVECTORIZATION ; RUN: opt --load-pass-plugin=libjulia-codegen%shlibext -passes='julia' --print-before=BeforeIntrinsicLowering -o /dev/null %s 2>&1 | FileCheck %s --check-prefixes=BEFOREINTRINSICLOWERING ; RUN: opt --load-pass-plugin=libjulia-codegen%shlibext -passes='julia' --print-before=AfterIntrinsicLowering -o /dev/null %s 2>&1 | FileCheck %s --check-prefixes=AFTERINTRINSICLOWERING ; RUN: opt --load-pass-plugin=libjulia-codegen%shlibext -passes='julia' --print-before=BeforeCleanup -o /dev/null %s 2>&1 | FileCheck %s --check-prefixes=BEFORECLEANUP @@ -311,4 +311,4 @@ attributes #2 = { inaccessiblemem_or_argmemonly } ; COM: Intrinsics are lowered and cleaned up by the time optimization is finished ; AFTEROPTIMIZATION-NOT: call void @julia.safepoint -; AFTEROPTIMIZATION: load volatile i64{{.*}}%safepoint \ No newline at end of file +; AFTEROPTIMIZATION: load volatile i64{{.*}}%safepoint From b9b4dfa072ddb8bbbda2a7c04889465e57dd4259 Mon Sep 17 00:00:00 2001 From: Jishnu Bhattacharya Date: Thu, 24 Oct 2024 00:04:08 +0530 Subject: [PATCH 289/537] Reduce generic matrix*vector latency (#56289) ```julia julia> using LinearAlgebra julia> A = rand(Int,4,4); x = rand(Int,4); y = similar(x); julia> @time mul!(y, A, x, 2, 2); 0.330489 seconds (792.22 k allocations: 41.519 MiB, 8.75% gc time, 99.99% compilation time) # master 0.134212 seconds (339.89 k allocations: 17.103 MiB, 15.23% gc time, 99.98% compilation time) # This PR ``` Main changes: - `generic_matvecmul!` and `_generic_matvecmul!` now accept `alpha` and `beta` arguments instead of `MulAddMul(alpha, beta)`. The methods that accept a `MulAddMul(alpha, beta)` are also retained for backward compatibility, but these now forward `alpha` and `beta`, instead of the other way around. - Narrow the scope of the `@stable_muladdmul` applications. We now construct the `MulAddMul(alpha, beta)` object only where it is needed in a function call, and we annotate the call site with `@stable_muladdmul`. This leads to smaller branches. - Create a new internal function with methods for the `'N'`, `'T'` and `'C'` cases, so that firstly, there's less code duplication, and secondly, the `_generic_matvecmul!` method is now simple enough to enable constant propagation. This eliminates the unnecessary branches, and only the one that is taken is compiled. Together, this reduces the TTFX substantially. --- stdlib/LinearAlgebra/src/matmul.jl | 126 ++++++++++++------------- stdlib/LinearAlgebra/src/triangular.jl | 2 +- 2 files changed, 61 insertions(+), 67 deletions(-) diff --git a/stdlib/LinearAlgebra/src/matmul.jl b/stdlib/LinearAlgebra/src/matmul.jl index f64422fd9cb8a..a8205a1dde808 100644 --- a/stdlib/LinearAlgebra/src/matmul.jl +++ b/stdlib/LinearAlgebra/src/matmul.jl @@ -78,17 +78,13 @@ _mul!(y::AbstractVector, A::AbstractVecOrMat, x::AbstractVector, generic_matvecmul!(y::StridedVector{T}, tA, A::StridedVecOrMat{T}, x::StridedVector{T}, alpha::Number, beta::Number) where {T<:BlasFloat} = gemv!(y, tA, A, x, alpha, beta) -generic_matvecmul!(y::StridedVector{T}, tA, A::StridedVecOrMat{T}, x::StridedVector{T}, - _add::MulAddMul = MulAddMul()) where {T<:BlasFloat} = - gemv!(y, tA, A, x, _add.alpha, _add.beta) + # Real (possibly transposed) matrix times complex vector. # Multiply the matrix with the real and imaginary parts separately generic_matvecmul!(y::StridedVector{Complex{T}}, tA, A::StridedVecOrMat{T}, x::StridedVector{Complex{T}}, alpha::Number, beta::Number) where {T<:BlasReal} = gemv!(y, tA, A, x, alpha, beta) -generic_matvecmul!(y::StridedVector{Complex{T}}, tA, A::StridedVecOrMat{T}, x::StridedVector{Complex{T}}, - _add::MulAddMul = MulAddMul()) where {T<:BlasReal} = - gemv!(y, tA, A, x, _add.alpha, _add.beta) + # Complex matrix times real vector. # Reinterpret the matrix as a real matrix and do real matvec computation. # works only in cooperation with BLAS when A is untransposed (tA == 'N') @@ -96,9 +92,6 @@ generic_matvecmul!(y::StridedVector{Complex{T}}, tA, A::StridedVecOrMat{T}, x::S generic_matvecmul!(y::StridedVector{Complex{T}}, tA, A::StridedVecOrMat{Complex{T}}, x::StridedVector{T}, alpha::Number, beta::Number) where {T<:BlasReal} = gemv!(y, tA, A, x, alpha, beta) -generic_matvecmul!(y::StridedVector{Complex{T}}, tA, A::StridedVecOrMat{Complex{T}}, x::StridedVector{T}, - _add::MulAddMul = MulAddMul()) where {T<:BlasReal} = - gemv!(y, tA, A, x, _add.alpha, _add.beta) # Vector-Matrix multiplication (*)(x::AdjointAbsVec, A::AbstractMatrix) = (A'*x')' @@ -539,9 +532,9 @@ Base.@constprop :aggressive function gemv!(y::StridedVector{T}, tA::AbstractChar if tA_uc in ('S', 'H') # re-wrap again and use plain ('N') matvec mul algorithm, # because _generic_matvecmul! can't handle the HermOrSym cases specifically - return @stable_muladdmul _generic_matvecmul!(y, 'N', wrap(A, tA), x, MulAddMul(α, β)) + return _generic_matvecmul!(y, 'N', wrap(A, tA), x, α, β) else - return @stable_muladdmul _generic_matvecmul!(y, tA, A, x, MulAddMul(α, β)) + return _generic_matvecmul!(y, tA, A, x, α, β) end end @@ -564,7 +557,7 @@ Base.@constprop :aggressive function gemv!(y::StridedVector{Complex{T}}, tA::Abs return y else Anew, ta = tA_uc in ('S', 'H') ? (wrap(A, tA), oftype(tA, 'N')) : (A, tA) - return @stable_muladdmul _generic_matvecmul!(y, ta, Anew, x, MulAddMul(α, β)) + return _generic_matvecmul!(y, ta, Anew, x, α, β) end end @@ -591,9 +584,9 @@ Base.@constprop :aggressive function gemv!(y::StridedVector{Complex{T}}, tA::Abs elseif tA_uc in ('S', 'H') # re-wrap again and use plain ('N') matvec mul algorithm, # because _generic_matvecmul! can't handle the HermOrSym cases specifically - return @stable_muladdmul _generic_matvecmul!(y, 'N', wrap(A, tA), x, MulAddMul(α, β)) + return _generic_matvecmul!(y, 'N', wrap(A, tA), x, α, β) else - return @stable_muladdmul _generic_matvecmul!(y, tA, A, x, MulAddMul(α, β)) + return _generic_matvecmul!(y, tA, A, x, α, β) end end @@ -825,82 +818,83 @@ end # NOTE: the generic version is also called as fallback for # strides != 1 cases -Base.@constprop :aggressive generic_matvecmul!(C::AbstractVector, tA, A::AbstractVecOrMat, B::AbstractVector, alpha::Number, beta::Number) = - @stable_muladdmul generic_matvecmul!(C, tA, A, B, MulAddMul(alpha, beta)) +# legacy method, retained for backward compatibility +generic_matvecmul!(C::AbstractVector, tA, A::AbstractVecOrMat, B::AbstractVector, _add::MulAddMul = MulAddMul()) = + generic_matvecmul!(C, tA, A, B, _add.alpha, _add.beta) @inline function generic_matvecmul!(C::AbstractVector, tA, A::AbstractVecOrMat, B::AbstractVector, - _add::MulAddMul = MulAddMul()) + alpha::Number, beta::Number) tA_uc = uppercase(tA) # potentially convert a WrapperChar to a Char Anew, ta = tA_uc in ('S', 'H') ? (wrap(A, tA), oftype(tA, 'N')) : (A, tA) - return _generic_matvecmul!(C, ta, Anew, B, _add) + return _generic_matvecmul!(C, ta, Anew, B, alpha, beta) end -function _generic_matvecmul!(C::AbstractVector, tA, A::AbstractVecOrMat, B::AbstractVector, - _add::MulAddMul = MulAddMul()) - require_one_based_indexing(C, A, B) - @assert tA in ('N', 'T', 'C') - mB = length(B) - mA, nA = lapack_size(tA, A) - if mB != nA - throw(DimensionMismatch(lazy"matrix A has dimensions ($mA,$nA), vector B has length $mB")) - end - if mA != length(C) - throw(DimensionMismatch(lazy"result C has length $(length(C)), needs length $mA")) - end - +# legacy method, retained for backward compatibility +_generic_matvecmul!(C::AbstractVector, tA, A::AbstractVecOrMat, B::AbstractVector, _add::MulAddMul = MulAddMul()) = + _generic_matvecmul!(C, tA, A, B, _add.alpha, _add.beta) +function __generic_matvecmul!(f::F, C::AbstractVector, A::AbstractVecOrMat, B::AbstractVector, + alpha::Number, beta::Number) where {F} Astride = size(A, 1) - @inbounds begin - if tA == 'T' # fastest case - if nA == 0 - for k = 1:mA - _modify!(_add, false, C, k) - end - else - for k = 1:mA - aoffs = (k-1)*Astride - firstterm = transpose(A[aoffs + 1])*B[1] - s = zero(firstterm + firstterm) - for i = 1:nA - s += transpose(A[aoffs+i]) * B[i] - end - _modify!(_add, s, C, k) - end - end - elseif tA == 'C' - if nA == 0 - for k = 1:mA - _modify!(_add, false, C, k) + if length(B) == 0 + for k = eachindex(C) + @stable_muladdmul _modify!(MulAddMul(alpha,beta), false, C, k) end else - for k = 1:mA + for k = eachindex(C) aoffs = (k-1)*Astride - firstterm = A[aoffs + 1]'B[1] + firstterm = f(A[aoffs + 1]) * B[1] s = zero(firstterm + firstterm) - for i = 1:nA - s += A[aoffs + i]'B[i] + for i = eachindex(B) + s += f(A[aoffs+i]) * B[i] end - _modify!(_add, s, C, k) + @stable_muladdmul _modify!(MulAddMul(alpha,beta), s, C, k) end end - else # tA == 'N' - for i = 1:mA - if !iszero(_add.beta) - C[i] *= _add.beta - elseif mB == 0 + end +end +function __generic_matvecmul!(::typeof(identity), C::AbstractVector, A::AbstractVecOrMat, B::AbstractVector, + alpha::Number, beta::Number) + Astride = size(A, 1) + @inbounds begin + for i = eachindex(C) + if !iszero(beta) + C[i] *= beta + elseif length(B) == 0 C[i] = false else C[i] = zero(A[i]*B[1] + A[i]*B[1]) end end - for k = 1:mB + for k = eachindex(B) aoffs = (k-1)*Astride - b = _add(B[k]) - for i = 1:mA + b = @stable_muladdmul MulAddMul(alpha,beta)(B[k]) + for i = eachindex(C) C[i] += A[aoffs + i] * b end end end - end # @inbounds + return C +end +function _generic_matvecmul!(C::AbstractVector, tA, A::AbstractVecOrMat, B::AbstractVector, + alpha::Number, beta::Number) + require_one_based_indexing(C, A, B) + @assert tA in ('N', 'T', 'C') + mB = length(B) + mA, nA = lapack_size(tA, A) + if mB != nA + throw(DimensionMismatch(lazy"matrix A has dimensions ($mA,$nA), vector B has length $mB")) + end + if mA != length(C) + throw(DimensionMismatch(lazy"result C has length $(length(C)), needs length $mA")) + end + + if tA == 'T' # fastest case + __generic_matvecmul!(transpose, C, A, B, alpha, beta) + elseif tA == 'C' + __generic_matvecmul!(adjoint, C, A, B, alpha, beta) + else # tA == 'N' + __generic_matvecmul!(identity, C, A, B, alpha, beta) + end C end diff --git a/stdlib/LinearAlgebra/src/triangular.jl b/stdlib/LinearAlgebra/src/triangular.jl index d6994f4b4dd58..1a7d04115c97d 100644 --- a/stdlib/LinearAlgebra/src/triangular.jl +++ b/stdlib/LinearAlgebra/src/triangular.jl @@ -1066,7 +1066,7 @@ for TC in (:AbstractVector, :AbstractMatrix) if isone(alpha) && iszero(beta) return _trimul!(C, A, B) else - return @stable_muladdmul generic_matvecmul!(C, 'N', A, B, MulAddMul(alpha, beta)) + return _generic_matvecmul!(C, 'N', A, B, alpha, beta) end end end From 28b0abd95064370cf5d7ef08beb5adb33710c18f Mon Sep 17 00:00:00 2001 From: Lilith Orion Hafner Date: Wed, 23 Oct 2024 16:33:40 -0500 Subject: [PATCH 290/537] Type `Base.is_interactive` as `Bool` (#56303) Before, typing `Base.is_interactive = 7` would cause weird internal REPL failures down the line. Now, it throws an InexactError and has no impact. --- base/initdefs.jl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/base/initdefs.jl b/base/initdefs.jl index 85b708433c0ef..f7693813239c6 100644 --- a/base/initdefs.jl +++ b/base/initdefs.jl @@ -30,14 +30,14 @@ exit() = exit(0) const roottask = current_task() -is_interactive = false +is_interactive::Bool = false """ isinteractive() -> Bool Determine whether Julia is running an interactive session. """ -isinteractive() = (is_interactive::Bool) +isinteractive() = is_interactive ## package depots (registries, packages, environments) ## From 4236a33bc5a33dd123a8ffaf2ed2b4fe5641bb87 Mon Sep 17 00:00:00 2001 From: Ian Butterworth Date: Wed, 23 Oct 2024 19:14:02 -0400 Subject: [PATCH 291/537] REPL: don't complete str and cmd macros when the input matches the internal name like `r_` to `r"` (#56254) --- stdlib/REPL/src/REPLCompletions.jl | 13 +++++++++++++ stdlib/REPL/test/replcompletions.jl | 10 ++++++++++ 2 files changed, 23 insertions(+) diff --git a/stdlib/REPL/src/REPLCompletions.jl b/stdlib/REPL/src/REPLCompletions.jl index d230b7b5fd232..d59a18e6d4f16 100644 --- a/stdlib/REPL/src/REPLCompletions.jl +++ b/stdlib/REPL/src/REPLCompletions.jl @@ -141,6 +141,19 @@ function append_filtered_mod_names!(ffunc::Function, suggestions::Vector{Complet ssyms = names(mod; all=true, imported, usings) filter!(ffunc, ssyms) macros = filter(x -> startswith(String(x), "@" * name), ssyms) + + # don't complete string and command macros when the input matches the internal name like `r_` to `r"` + if !startswith(name, "@") + filter!(macros) do m + s = String(m) + if endswith(s, "_str") || endswith(s, "_cmd") + occursin(name, first(s, length(s)-4)) + else + true + end + end + end + syms = String[sprint((io,s)->Base.show_sym(io, s; allow_macroname=true), s) for s in ssyms if completes_global(String(s), name)] appendmacro!(syms, macros, "_str", "\"") appendmacro!(syms, macros, "_cmd", "`") diff --git a/stdlib/REPL/test/replcompletions.jl b/stdlib/REPL/test/replcompletions.jl index 4fe32f47bc80c..7ee03cee940b8 100644 --- a/stdlib/REPL/test/replcompletions.jl +++ b/stdlib/REPL/test/replcompletions.jl @@ -1546,6 +1546,16 @@ end @test "testcmd`" in c c, r, res = test_complete("CompletionFoo.tϵsτc") @test "tϵsτcmδ`" in c + + # Issue #56071: don't complete string and command macros when the input matches the internal name like `r_` to `r"` + c, r, res = test_complete("CompletionFoo.teststr_") + @test isempty(c) + c, r, res = test_complete("CompletionFoo.teststr_s") + @test isempty(c) + c, r, res = test_complete("CompletionFoo.testcmd_") + @test isempty(c) + c, r, res = test_complete("CompletionFoo.testcmd_c") + @test isempty(c) end @testset "Keyword-argument completion" begin From 0a6277d4876b3d9cdf9e37de0812acf41ae1c1b3 Mon Sep 17 00:00:00 2001 From: Nathan Zimmerberg <39104088+nhz2@users.noreply.github.com> Date: Wed, 23 Oct 2024 19:14:51 -0400 Subject: [PATCH 292/537] fix REPL test if a "juliadev" directory exists in home (#56218) --- stdlib/REPL/test/replcompletions.jl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/stdlib/REPL/test/replcompletions.jl b/stdlib/REPL/test/replcompletions.jl index 7ee03cee940b8..1355f74c9bfff 100644 --- a/stdlib/REPL/test/replcompletions.jl +++ b/stdlib/REPL/test/replcompletions.jl @@ -1347,9 +1347,9 @@ mktempdir() do path end # Test tilde path completion -let (c, r, res) = test_complete("\"~/julia") +let (c, r, res) = test_complete("\"~/ka8w5rsz") if !Sys.iswindows() - @test res && c == String[homedir() * "/julia"] + @test res && c == String[homedir() * "/ka8w5rsz"] else @test !res end From 894296bd2398c297a4d18f0f16c7601a3237ffef Mon Sep 17 00:00:00 2001 From: Gabriel Baraldi Date: Wed, 23 Oct 2024 20:15:39 -0300 Subject: [PATCH 293/537] Fix trampoline warning on x86 as well (#56280) --- cli/trampolines/trampolines_x86_64.S | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cli/trampolines/trampolines_x86_64.S b/cli/trampolines/trampolines_x86_64.S index 3b800da56eee1..fcc8e40e1ddc9 100644 --- a/cli/trampolines/trampolines_x86_64.S +++ b/cli/trampolines/trampolines_x86_64.S @@ -6,9 +6,9 @@ #define XX(name) \ DEBUGINFO(name); \ .global CNAME(name); \ +CNAME(name)##:; \ .cfi_startproc; \ SEH_START1(name); \ -CNAME(name)##:; \ SEH_START2(); \ CET_START(); \ mov CNAMEADDR(name)(%rip),%r11; \ From 53ffe5630cff5d974722ec197c6f53b907ffd457 Mon Sep 17 00:00:00 2001 From: N5N3 <2642243996@qq.com> Date: Thu, 24 Oct 2024 07:32:33 +0800 Subject: [PATCH 294/537] typeintersect: more fastpath to skip intersect under circular env (#56304) fix #56040 --- src/subtype.c | 9 +++++++-- test/subtype.jl | 9 +++++++++ 2 files changed, 16 insertions(+), 2 deletions(-) diff --git a/src/subtype.c b/src/subtype.c index 5edcd100ee8e0..f5c13b77ea0cf 100644 --- a/src/subtype.c +++ b/src/subtype.c @@ -2464,8 +2464,10 @@ static jl_value_t *intersect_aside(jl_value_t *x, jl_value_t *y, jl_stenv_t *e, return y; if (y == (jl_value_t*)jl_any_type && !jl_is_typevar(x)) return x; - // band-aid for #46736 - if (obviously_egal(x, y)) + // band-aid for #46736 #56040 + if (obviously_in_union(x, y)) + return y; + if (obviously_in_union(y, x)) return x; jl_varbinding_t *vars = NULL; @@ -2495,6 +2497,9 @@ static jl_value_t *intersect_aside(jl_value_t *x, jl_value_t *y, jl_stenv_t *e, static jl_value_t *intersect_union(jl_value_t *x, jl_uniontype_t *u, jl_stenv_t *e, int8_t R, int param) { + // band-aid for #56040 + if (!jl_is_uniontype(x) && obviously_in_union((jl_value_t *)u, x)) + return x; int no_free = !jl_has_free_typevars(x) && !jl_has_free_typevars((jl_value_t*)u); if (param == 2 || no_free) { jl_value_t *a=NULL, *b=NULL; diff --git a/test/subtype.jl b/test/subtype.jl index 7be869107b432..dfa1487eaa55d 100644 --- a/test/subtype.jl +++ b/test/subtype.jl @@ -2721,3 +2721,12 @@ let T1 = NTuple{12, Union{Val{1}, Val{2}, Val{3}, Val{4}, Val{5}, Val{6}}} @test !(T1 <: T2) @test Tuple{Union{Val{1},Val{2}}} <: Tuple{S} where {T, S<:Val{T}} end + +#issue 56040 +let S = Dict{V,V} where {V}, + T = Dict{Ref{Union{Set{A2}, Set{A3}, A3}}, Ref{Union{Set{A3}, Set{A2}, Set{A1}, Set{A4}, A4}}} where {A1, A2<:Set{A1}, A3<:Union{Set{A1}, Set{A2}}, A4<:Union{Set{A2}, Set{A1}, Set{A3}}}, + A = Dict{Ref{Set{Union{}}}, Ref{Set{Union{}}}} + @testintersect(S, T, !Union{}) + @test A <: typeintersect(S, T) + @test A <: typeintersect(T, S) +end From 2a06376c18afd7ec875335070743dcebcd85dee7 Mon Sep 17 00:00:00 2001 From: Jishnu Bhattacharya Date: Thu, 24 Oct 2024 08:31:24 +0530 Subject: [PATCH 295/537] Preserve type in `first` for `OneTo` (#56263) With this PR, ```julia julia> first(Base.OneTo(10), 4) Base.OneTo(4) ``` Previously, this would have used indexing to return a `UnitRange`. This is probably the only way to slice a `Base.OneTo` and obtain a `Base.OneTo` back. --- base/range.jl | 5 +++++ test/ranges.jl | 6 ++++++ 2 files changed, 11 insertions(+) diff --git a/base/range.jl b/base/range.jl index cee15db39b911..c4435f2ff3e97 100644 --- a/base/range.jl +++ b/base/range.jl @@ -850,6 +850,11 @@ first(r::OneTo{T}) where {T} = oneunit(T) first(r::StepRangeLen) = unsafe_getindex(r, 1) first(r::LinRange) = r.start +function first(r::OneTo, n::Integer) + n < 0 && throw(ArgumentError("Number of elements must be non-negative")) + OneTo(oftype(r.stop, min(r.stop, n))) +end + last(r::OrdinalRange{T}) where {T} = convert(T, r.stop) # via steprange_last last(r::StepRangeLen) = unsafe_getindex(r, length(r)) last(r::LinRange) = r.stop diff --git a/test/ranges.jl b/test/ranges.jl index 86cd1c3f2345c..629c2966b2fa6 100644 --- a/test/ranges.jl +++ b/test/ranges.jl @@ -1539,6 +1539,9 @@ end @test size(r) == (3,) @test step(r) == 1 @test first(r) == 1 + @test first(r,2) === Base.OneTo(2) + @test first(r,20) === r + @test_throws ArgumentError first(r,-20) @test last(r) == 3 @test minimum(r) == 1 @test maximum(r) == 3 @@ -1570,6 +1573,9 @@ end @test findall(in(2:(length(r) - 1)), r) === 2:(length(r) - 1) @test findall(in(r), 2:(length(r) - 1)) === 1:(length(r) - 2) end + let r = Base.OneTo(Int8(4)) + @test first(r,4) === r + end @test convert(Base.OneTo, 1:2) === Base.OneTo{Int}(2) @test_throws ArgumentError("first element must be 1, got 2") convert(Base.OneTo, 2:3) @test_throws ArgumentError("step must be 1, got 2") convert(Base.OneTo, 1:2:5) From bf6da77c01872dbeffb764784a9a3feb49b7364c Mon Sep 17 00:00:00 2001 From: Jishnu Bhattacharya Date: Thu, 24 Oct 2024 19:25:18 +0530 Subject: [PATCH 296/537] Matmul: dispatch on specific blas paths using an enum (#55002) This expands on the approach taken by https://github.com/JuliaLang/julia/pull/54552. We pass on more type information to `generic_matmatmul_wrapper!`, which lets us convert the branches to method dispatches. This helps spread the latency around, so that instead of compiling all the branches in the first call, we now compile the branches only when they are actually taken. While this reduces the latency in individual branches, there is no reduction in latency if all the branches are reachable. ```julia julia> A = rand(2,2); julia> @time A * A; 0.479805 seconds (809.66 k allocations: 40.764 MiB, 99.93% compilation time) # 1.12.0-DEV.806 0.346739 seconds (633.17 k allocations: 31.320 MiB, 99.90% compilation time) # This PR julia> @time A * A'; 0.030413 seconds (101.98 k allocations: 5.359 MiB, 98.54% compilation time) # v1.12.0-DEV.806 0.148118 seconds (219.51 k allocations: 11.652 MiB, 99.72% compilation time) # This PR ``` The latency is spread between the two calls here. In fresh sessions: ```julia julia> A = rand(2,2); julia> @time A * A'; 0.473630 seconds (825.65 k allocations: 41.554 MiB, 99.91% compilation time) # v1.12.0-DEV.806 0.490305 seconds (774.87 k allocations: 38.824 MiB, 99.90% compilation time) # This PR ``` In this case, both the `syrk` and `gemm` branches are reachable, so there is no reduction in latency. Analogously, there is a reduction in latency in the second set of matrix multiplications where we call `symm!/hemm!` or `_generic_matmatmul`: ```julia julia> using LinearAlgebra julia> A = rand(2,2); julia> @time Symmetric(A) * A; 0.711178 seconds (2.06 M allocations: 103.878 MiB, 2.20% gc time, 99.98% compilation time) # v1.12.0-DEV.806 0.540669 seconds (904.12 k allocations: 43.576 MiB, 2.60% gc time, 97.36% compilation time) # This PR ``` --- stdlib/LinearAlgebra/src/matmul.jl | 168 ++++++++++++++++++++--------- 1 file changed, 116 insertions(+), 52 deletions(-) diff --git a/stdlib/LinearAlgebra/src/matmul.jl b/stdlib/LinearAlgebra/src/matmul.jl index a8205a1dde808..74cb50f955bbb 100644 --- a/stdlib/LinearAlgebra/src/matmul.jl +++ b/stdlib/LinearAlgebra/src/matmul.jl @@ -294,16 +294,45 @@ true """ @inline mul!(C::AbstractMatrix, A::AbstractVecOrMat, B::AbstractVecOrMat, α::Number, β::Number) = _mul!(C, A, B, α, β) # Add a level of indirection and specialize _mul! to avoid ambiguities in mul! -@inline _mul!(C::AbstractMatrix, A::AbstractVecOrMat, B::AbstractVecOrMat, α::Number, β::Number) = +module BlasFlag +@enum BlasFunction SYRK HERK GEMM SYMM HEMM NONE +const SyrkHerkGemm = Union{Val{SYRK}, Val{HERK}, Val{GEMM}} +const SymmHemmGeneric = Union{Val{SYMM}, Val{HEMM}, Val{NONE}} +end +@inline function _mul!(C::AbstractMatrix, A::AbstractVecOrMat, B::AbstractVecOrMat, α::Number, β::Number) + tA = wrapper_char(A) + tB = wrapper_char(B) + tA_uc = uppercase(tA) + tB_uc = uppercase(tB) + isntc = wrapper_char_NTC(A) & wrapper_char_NTC(B) + blasfn = if isntc + if (tA_uc == 'T' && tB_uc == 'N') || (tA_uc == 'N' && tB_uc == 'T') + BlasFlag.SYRK + elseif (tA_uc == 'C' && tB_uc == 'N') || (tA_uc == 'N' && tB_uc == 'C') + BlasFlag.HERK + else isntc + BlasFlag.GEMM + end + else + if (tA_uc == 'S' && tB_uc == 'N') || (tA_uc == 'N' && tB_uc == 'S') + BlasFlag.SYMM + elseif (tA_uc == 'H' && tB_uc == 'N') || (tA_uc == 'N' && tB_uc == 'H') + BlasFlag.HEMM + else + BlasFlag.NONE + end + end + generic_matmatmul_wrapper!( C, - wrapper_char(A), - wrapper_char(B), + tA, + tB, _unwrap(A), _unwrap(B), α, β, - Val(wrapper_char_NTC(A) & wrapper_char_NTC(B)) + Val(blasfn), ) +end # this indirection allows is to specialize on the types of the wrappers of A and B to some extent, # even though the wrappers are stripped off in mul! @@ -408,7 +437,7 @@ end # THE one big BLAS dispatch. This is split into two methods to improve latency Base.@constprop :aggressive function generic_matmatmul_wrapper!(C::StridedMatrix{T}, tA, tB, A::StridedVecOrMat{T}, B::StridedVecOrMat{T}, - α::Number, β::Number, ::Val{true}) where {T<:BlasFloat} + α::Number, β::Number, val::BlasFlag.SyrkHerkGemm) where {T<:BlasFloat} mA, nA = lapack_size(tA, A) mB, nB = lapack_size(tB, B) if any(iszero, size(A)) || any(iszero, size(B)) || iszero(α) @@ -418,24 +447,31 @@ Base.@constprop :aggressive function generic_matmatmul_wrapper!(C::StridedMatrix return _rmul_or_fill!(C, β) end matmul2x2or3x3_nonzeroalpha!(C, tA, tB, A, B, α, β) && return C - # We convert the chars to uppercase to potentially unwrap a WrapperChar, - # and extract the char corresponding to the wrapper type - tA_uc, tB_uc = uppercase(tA), uppercase(tB) - # the map in all ensures constprop by acting on tA and tB individually, instead of looping over them. - if tA_uc == 'T' && tB_uc == 'N' && A === B - return syrk_wrapper!(C, 'T', A, α, β) - elseif tA_uc == 'N' && tB_uc == 'T' && A === B - return syrk_wrapper!(C, 'N', A, α, β) - elseif tA_uc == 'C' && tB_uc == 'N' && A === B - return herk_wrapper!(C, 'C', A, α, β) - elseif tA_uc == 'N' && tB_uc == 'C' && A === B - return herk_wrapper!(C, 'N', A, α, β) + _syrk_herk_gemm_wrapper!(C, tA, tB, A, B, α, β, val) + return C +end +Base.@constprop :aggressive function _syrk_herk_gemm_wrapper!(C, tA, tB, A, B, α, β, ::Val{BlasFlag.SYRK}) + if A === B + tA_uc = uppercase(tA) # potentially strip a WrapperChar + return syrk_wrapper!(C, tA_uc, A, α, β) else return gemm_wrapper!(C, tA, tB, A, B, α, β) end end +Base.@constprop :aggressive function _syrk_herk_gemm_wrapper!(C, tA, tB, A, B, α, β, ::Val{BlasFlag.HERK}) + if A === B + tA_uc = uppercase(tA) # potentially strip a WrapperChar + return herk_wrapper!(C, tA_uc, A, α, β) + else + return gemm_wrapper!(C, tA, tB, A, B, α, β) + end +end +Base.@constprop :aggressive function _syrk_herk_gemm_wrapper!(C, tA, tB, A, B, α, β, ::Val{BlasFlag.GEMM}) + return gemm_wrapper!(C, tA, tB, A, B, α, β) +end +_valtypeparam(v::Val{T}) where {T} = T Base.@constprop :aggressive function generic_matmatmul_wrapper!(C::StridedMatrix{T}, tA, tB, A::StridedVecOrMat{T}, B::StridedVecOrMat{T}, - α::Number, β::Number, ::Val{false}) where {T<:BlasFloat} + α::Number, β::Number, val::BlasFlag.SymmHemmGeneric) where {T<:BlasFloat} mA, nA = lapack_size(tA, A) mB, nB = lapack_size(tB, B) if any(iszero, size(A)) || any(iszero, size(B)) || iszero(α) @@ -445,23 +481,48 @@ Base.@constprop :aggressive function generic_matmatmul_wrapper!(C::StridedMatrix return _rmul_or_fill!(C, β) end matmul2x2or3x3_nonzeroalpha!(C, tA, tB, A, B, α, β) && return C - # We convert the chars to uppercase to potentially unwrap a WrapperChar, - # and extract the char corresponding to the wrapper type - tA_uc, tB_uc = uppercase(tA), uppercase(tB) alpha, beta = promote(α, β, zero(T)) - if alpha isa Union{Bool,T} && beta isa Union{Bool,T} - if tA_uc == 'S' && tB_uc == 'N' - return BLAS.symm!('L', tA == 'S' ? 'U' : 'L', alpha, A, B, beta, C) - elseif tA_uc == 'N' && tB_uc == 'S' - return BLAS.symm!('R', tB == 'S' ? 'U' : 'L', alpha, B, A, beta, C) - elseif tA_uc == 'H' && tB_uc == 'N' - return BLAS.hemm!('L', tA == 'H' ? 'U' : 'L', alpha, A, B, beta, C) - elseif tA_uc == 'N' && tB_uc == 'H' - return BLAS.hemm!('R', tB == 'H' ? 'U' : 'L', alpha, B, A, beta, C) - end + blasfn = _valtypeparam(val) + if alpha isa Union{Bool,T} && beta isa Union{Bool,T} && blasfn ∈ (BlasFlag.SYMM, BlasFlag.HEMM) + _blasfn = blasfn + αβ = (alpha, beta) + else + _blasfn = BlasFlag.NONE + αβ = (α, β) end - return _generic_matmatmul!(C, wrap(A, tA), wrap(B, tB), MulAddMul(α, β)) + _symm_hemm_generic!(C, tA, tB, A, B, αβ..., Val(_blasfn)) + return C end +Base.@constprop :aggressive function _lrchar_ulchar(tA, tB) + if uppercase(tA) == 'N' + lrchar = 'R' + ulchar = isuppercase(tB) ? 'U' : 'L' + else + lrchar = 'L' + ulchar = isuppercase(tA) ? 'U' : 'L' + end + return lrchar, ulchar +end +function _symm_hemm_generic!(C, tA, tB, A, B, alpha, beta, ::Val{BlasFlag.SYMM}) + lrchar, ulchar = _lrchar_ulchar(tA, tB) + if lrchar == 'L' + BLAS.symm!(lrchar, ulchar, alpha, A, B, beta, C) + else + BLAS.symm!(lrchar, ulchar, alpha, B, A, beta, C) + end +end +function _symm_hemm_generic!(C, tA, tB, A, B, alpha, beta, ::Val{BlasFlag.HEMM}) + lrchar, ulchar = _lrchar_ulchar(tA, tB) + if lrchar == 'L' + BLAS.hemm!(lrchar, ulchar, alpha, A, B, beta, C) + else + BLAS.hemm!(lrchar, ulchar, alpha, B, A, beta, C) + end +end +Base.@constprop :aggressive function _symm_hemm_generic!(C, tA, tB, A, B, alpha, beta, ::Val{BlasFlag.NONE}) + _generic_matmatmul!(C, wrap(A, tA), wrap(B, tB), alpha, beta) +end + # legacy method Base.@constprop :aggressive generic_matmatmul!(C::StridedMatrix{T}, tA, tB, A::StridedVecOrMat{T}, B::StridedVecOrMat{T}, _add::MulAddMul = MulAddMul()) where {T<:BlasFloat} = @@ -472,8 +533,8 @@ function generic_matmatmul_wrapper!(C::StridedVecOrMat{Complex{T}}, tA, tB, A::S gemm_wrapper!(C, tA, tB, A, B, α, β) end Base.@constprop :aggressive function generic_matmatmul_wrapper!(C::StridedVecOrMat{Complex{T}}, tA, tB, A::StridedVecOrMat{Complex{T}}, B::StridedVecOrMat{T}, - α::Number, β::Number, ::Val{false}) where {T<:BlasReal} - _generic_matmatmul!(C, wrap(A, tA), wrap(B, tB), MulAddMul(α, β)) + alpha::Number, beta::Number, ::Val{false}) where {T<:BlasReal} + _generic_matmatmul!(C, wrap(A, tA), wrap(B, tB), alpha, beta) end # legacy method Base.@constprop :aggressive generic_matmatmul!(C::StridedVecOrMat{Complex{T}}, tA, tB, A::StridedVecOrMat{Complex{T}}, B::StridedVecOrMat{T}, @@ -675,7 +736,7 @@ Base.@constprop :aggressive function gemm_wrapper(tA::AbstractChar, tB::Abstract if all(map(in(('N', 'T', 'C')), (tA_uc, tB_uc))) gemm_wrapper!(C, tA, tB, A, B, true, false) else - _generic_matmatmul!(C, wrap(A, tA), wrap(B, tB), MulAddMul()) + _generic_matmatmul!(C, wrap(A, tA), wrap(B, tB), true, false) end end @@ -702,7 +763,7 @@ Base.@constprop :aggressive function gemm_wrapper!(C::StridedVecOrMat{T}, tA::Ab _fullstride2(A) && _fullstride2(B) && _fullstride2(C)) return BLAS.gemm!(tA, tB, alpha, A, B, beta, C) end - _generic_matmatmul!(C, wrap(A, tA), wrap(B, tB), MulAddMul(α, β)) + _generic_matmatmul!(C, wrap(A, tA), wrap(B, tB), α, β) end # legacy method gemm_wrapper!(C::StridedVecOrMat{T}, tA::AbstractChar, tB::AbstractChar, @@ -737,7 +798,7 @@ Base.@constprop :aggressive function gemm_wrapper!(C::StridedVecOrMat{Complex{T} BLAS.gemm!(tA, tB, alpha, reinterpret(T, A), B, beta, reinterpret(T, C)) return C end - _generic_matmatmul!(C, wrap(A, tA), wrap(B, tB), MulAddMul(α, β)) + _generic_matmatmul!(C, wrap(A, tA), wrap(B, tB), α, β) end # legacy method gemm_wrapper!(C::StridedVecOrMat{Complex{T}}, tA::AbstractChar, tB::AbstractChar, @@ -908,12 +969,16 @@ end # aggressive const prop makes mixed eltype mul!(C, A, B) invoke _generic_matmatmul! directly # legacy method Base.@constprop :aggressive generic_matmatmul!(C::AbstractVecOrMat, tA, tB, A::AbstractVecOrMat, B::AbstractVecOrMat, _add::MulAddMul = MulAddMul()) = - _generic_matmatmul!(C, wrap(A, tA), wrap(B, tB), _add) -Base.@constprop :aggressive generic_matmatmul!(C::AbstractVecOrMat, tA, tB, A::AbstractVecOrMat, B::AbstractVecOrMat, α::Number, β::Number) = - _generic_matmatmul!(C, wrap(A, tA), wrap(B, tB), MulAddMul(α, β)) + _generic_matmatmul!(C, wrap(A, tA), wrap(B, tB), _add.alpha, _add.beta) +Base.@constprop :aggressive generic_matmatmul!(C::AbstractVecOrMat, tA, tB, A::AbstractVecOrMat, B::AbstractVecOrMat, alpha::Number, beta::Number) = + _generic_matmatmul!(C, wrap(A, tA), wrap(B, tB), alpha, beta) + +# legacy method +_generic_matmatmul!(C::AbstractVecOrMat, A::AbstractVecOrMat, B::AbstractVecOrMat, _add::MulAddMul) = + _generic_matmatmul!(C, A, B, _add.alpha, _add.beta) -@noinline function _generic_matmatmul!(C::AbstractVecOrMat{R}, A::AbstractVecOrMat{T}, B::AbstractVecOrMat{S}, - _add::MulAddMul{ais1}) where {T,S,R,ais1} +@noinline function _generic_matmatmul!(C::AbstractVecOrMat{R}, A::AbstractVecOrMat, B::AbstractVecOrMat, + alpha::Number, beta::Number) where {R} AxM = axes(A, 1) AxK = axes(A, 2) # we use two `axes` calls in case of `AbstractVector` BxK = axes(B, 1) @@ -929,34 +994,33 @@ Base.@constprop :aggressive generic_matmatmul!(C::AbstractVecOrMat, tA, tB, A::A if BxN != CxN throw(DimensionMismatch(lazy"matrix B has axes ($BxK,$BxN), matrix C has axes ($CxM,$CxN)")) end - _rmul_alpha = MulAddMul{ais1,true,typeof(_add.alpha),Bool}(_add.alpha,false) if isbitstype(R) && sizeof(R) ≤ 16 && !(A isa Adjoint || A isa Transpose) - _rmul_or_fill!(C, _add.beta) - (iszero(_add.alpha) || isempty(A) || isempty(B)) && return C + _rmul_or_fill!(C, beta) + (iszero(alpha) || isempty(A) || isempty(B)) && return C @inbounds for n in BxN, k in BxK # Balpha = B[k,n] * alpha, but we skip the multiplication in case isone(alpha) - Balpha = _rmul_alpha(B[k,n]) + Balpha = @stable_muladdmul MulAddMul(alpha, false)(B[k,n]) @simd for m in AxM C[m,n] = muladd(A[m,k], Balpha, C[m,n]) end end elseif isbitstype(R) && sizeof(R) ≤ 16 && ((A isa Adjoint && B isa Adjoint) || (A isa Transpose && B isa Transpose)) - _rmul_or_fill!(C, _add.beta) - (iszero(_add.alpha) || isempty(A) || isempty(B)) && return C + _rmul_or_fill!(C, beta) + (iszero(alpha) || isempty(A) || isempty(B)) && return C t = wrapperop(A) pB = parent(B) pA = parent(A) tmp = similar(C, CxN) ci = first(CxM) - ta = t(_add.alpha) + ta = t(alpha) for i in AxM mul!(tmp, pB, view(pA, :, i)) @views C[ci,:] .+= t.(ta .* tmp) ci += 1 end else - if iszero(_add.alpha) || isempty(A) || isempty(B) - return _rmul_or_fill!(C, _add.beta) + if iszero(alpha) || isempty(A) || isempty(B) + return _rmul_or_fill!(C, beta) end a1 = first(AxK) b1 = first(BxK) @@ -966,7 +1030,7 @@ Base.@constprop :aggressive generic_matmatmul!(C::AbstractVecOrMat, tA, tB, A::A @simd for k in AxK Ctmp = muladd(A[i, k], B[k, j], Ctmp) end - _modify!(_add, Ctmp, C, (i,j)) + @stable_muladdmul _modify!(MulAddMul(alpha,beta), Ctmp, C, (i,j)) end end return C From c188e0c626149149e90a0d45eab4af570bc6b669 Mon Sep 17 00:00:00 2001 From: Jishnu Bhattacharya Date: Thu, 24 Oct 2024 19:26:19 +0530 Subject: [PATCH 297/537] Scaling `mul!` for generic `AbstractArray`s (#56313) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This improves performance in the scaling `mul!` for `StridedArray`s by using loops instead of broadcasting. ```julia julia> using LinearAlgebra julia> A = zeros(200,200); C = similar(A); julia> @btime mul!($C, $A, 1, 2, 2); 19.180 μs (0 allocations: 0 bytes) # nightly v"1.12.0-DEV.1479" 11.361 μs (0 allocations: 0 bytes) # This PR ``` The latency is reduced as well for the same reason. ```julia julia> using LinearAlgebra julia> A = zeros(2,2); C = similar(A); julia> @time mul!(C, A, 1, 2, 2); 0.203034 seconds (522.94 k allocations: 27.011 MiB, 14.95% gc time, 99.97% compilation time) # nightly 0.034713 seconds (59.16 k allocations: 2.962 MiB, 99.91% compilation time) # This PR ``` Thirdly, I've replaced the `.*ₛ` calls by explicit branches. This fixes the following: ```julia julia> A = [zeros(2), zeros(2)]; C = similar(A); julia> mul!(C, A, 1) ERROR: MethodError: no method matching +(::Vector{Float64}, ::Bool) ``` After this, ```julia julia> mul!(C, A, 1) 2-element Vector{Vector{Float64}}: [0.0, 0.0] [0.0, 0.0] ``` Also, I've added `@stable_muladdmul` annotations to the `generic_mul!` call, but moved it within the loop to narrow its scope. This doesn't increase the latency, while making the call type-stable. ```julia julia> D = Diagonal(1:2); C = similar(D); julia> @time mul!(C, D, 1, 2, 2); 0.248385 seconds (898.18 k allocations: 47.027 MiB, 12.30% gc time, 99.96% compilation time) # nightly 0.249940 seconds (919.80 k allocations: 49.128 MiB, 11.36% gc time, 99.99% compilation time) # This PR ``` --- stdlib/LinearAlgebra/src/generic.jl | 49 +++++++++++---- stdlib/LinearAlgebra/test/generic.jl | 89 ++++++++++++++-------------- 2 files changed, 84 insertions(+), 54 deletions(-) diff --git a/stdlib/LinearAlgebra/src/generic.jl b/stdlib/LinearAlgebra/src/generic.jl index 6c65c49add74b..9c050a32bbda7 100644 --- a/stdlib/LinearAlgebra/src/generic.jl +++ b/stdlib/LinearAlgebra/src/generic.jl @@ -174,23 +174,23 @@ end end -function generic_mul!(C::AbstractArray, X::AbstractArray, s::Number, _add::MulAddMul) +function generic_mul!(C::AbstractArray, X::AbstractArray, s::Number, alpha::Number, beta::Number) if length(C) != length(X) throw(DimensionMismatch(lazy"first array has length $(length(C)) which does not match the length of the second, $(length(X)).")) end for (IC, IX) in zip(eachindex(C), eachindex(X)) - @inbounds _modify!(_add, X[IX] * s, C, IC) + @inbounds @stable_muladdmul _modify!(MulAddMul(alpha,beta), X[IX] * s, C, IC) end C end -function generic_mul!(C::AbstractArray, s::Number, X::AbstractArray, _add::MulAddMul) +function generic_mul!(C::AbstractArray, s::Number, X::AbstractArray, alpha::Number, beta::Number) if length(C) != length(X) - throw(DimensionMismatch(lazy"first array has length $(length(C)) which does not -match the length of the second, $(length(X)).")) + throw(DimensionMismatch(LazyString(lazy"first array has length $(length(C)) which does not", + lazy"match the length of the second, $(length(X))."))) end for (IC, IX) in zip(eachindex(C), eachindex(X)) - @inbounds _modify!(_add, s * X[IX], C, IC) + @inbounds @stable_muladdmul _modify!(MulAddMul(alpha,beta), s * X[IX], C, IC) end C end @@ -198,22 +198,51 @@ end @inline mul!(C::AbstractArray, s::Number, X::AbstractArray, alpha::Number, beta::Number) = _lscale_add!(C, s, X, alpha, beta) +_lscale_add!(C::StridedArray, s::Number, X::StridedArray, alpha::Number, beta::Number) = + generic_mul!(C, s, X, alpha, beta) @inline function _lscale_add!(C::AbstractArray, s::Number, X::AbstractArray, alpha::Number, beta::Number) if axes(C) == axes(X) - C .= (s .* X) .*ₛ alpha .+ C .*ₛ beta + if isone(alpha) + if iszero(beta) + @. C = s * X + else + @. C = s * X + C * beta + end + else + if iszero(beta) + @. C = s * X * alpha + else + @. C = s * X * alpha + C * beta + end + end else - generic_mul!(C, s, X, MulAddMul(alpha, beta)) + generic_mul!(C, s, X, alpha, beta) end return C end @inline mul!(C::AbstractArray, X::AbstractArray, s::Number, alpha::Number, beta::Number) = _rscale_add!(C, X, s, alpha, beta) +_rscale_add!(C::StridedArray, X::StridedArray, s::Number, alpha::Number, beta::Number) = + generic_mul!(C, X, s, alpha, beta) @inline function _rscale_add!(C::AbstractArray, X::AbstractArray, s::Number, alpha::Number, beta::Number) if axes(C) == axes(X) - C .= (X .* s) .*ₛ alpha .+ C .*ₛ beta + if isone(alpha) + if iszero(beta) + @. C = X * s + else + @. C = X * s + C * beta + end + else + s_alpha = s * alpha + if iszero(beta) + @. C = X * s_alpha + else + @. C = X * s_alpha + C * beta + end + end else - generic_mul!(C, X, s, MulAddMul(alpha, beta)) + generic_mul!(C, X, s, alpha, beta) end return C end diff --git a/stdlib/LinearAlgebra/test/generic.jl b/stdlib/LinearAlgebra/test/generic.jl index 2bf9c75141700..725f9b3497db8 100644 --- a/stdlib/LinearAlgebra/test/generic.jl +++ b/stdlib/LinearAlgebra/test/generic.jl @@ -131,53 +131,54 @@ end @testset "array and subarray" begin - aa = reshape([1.:6;], (2,3)) - for a in (aa, view(aa, 1:2, 1:2)) - am, an = size(a) - @testset "Scaling with rmul! and lmul" begin - @test rmul!(copy(a), 5.) == a*5 - @test lmul!(5., copy(a)) == a*5 - b = randn(2048) - subB = view(b, :, :) - @test rmul!(copy(b), 5.) == b*5 - @test rmul!(copy(subB), 5.) == subB*5 - @test lmul!(Diagonal([1.; 2.]), copy(a)) == a.*[1; 2] - @test lmul!(Diagonal([1; 2]), copy(a)) == a.*[1; 2] - @test rmul!(copy(a), Diagonal(1.:an)) == a.*Vector(1:an)' - @test rmul!(copy(a), Diagonal(1:an)) == a.*Vector(1:an)' - @test_throws DimensionMismatch lmul!(Diagonal(Vector{Float64}(undef,am+1)), a) - @test_throws DimensionMismatch rmul!(a, Diagonal(Vector{Float64}(undef,an+1))) - end + for aa in (reshape([1.:6;], (2,3)), fill(float.(rand(Int8,2,2)), 2,3)) + for a in (aa, view(aa, 1:2, 1:2)) + am, an = size(a) + @testset "Scaling with rmul! and lmul" begin + @test rmul!(copy(a), 5.) == a*5 + @test lmul!(5., copy(a)) == a*5 + b = randn(2048) + subB = view(b, :, :) + @test rmul!(copy(b), 5.) == b*5 + @test rmul!(copy(subB), 5.) == subB*5 + @test lmul!(Diagonal([1.; 2.]), copy(a)) == a.*[1; 2] + @test lmul!(Diagonal([1; 2]), copy(a)) == a.*[1; 2] + @test rmul!(copy(a), Diagonal(1.:an)) == a.*Vector(1:an)' + @test rmul!(copy(a), Diagonal(1:an)) == a.*Vector(1:an)' + @test_throws DimensionMismatch lmul!(Diagonal(Vector{Float64}(undef,am+1)), a) + @test_throws DimensionMismatch rmul!(a, Diagonal(Vector{Float64}(undef,an+1))) + end - @testset "Scaling with rdiv! and ldiv!" begin - @test rdiv!(copy(a), 5.) == a/5 - @test ldiv!(5., copy(a)) == a/5 - @test ldiv!(zero(a), 5., copy(a)) == a/5 - end + @testset "Scaling with rdiv! and ldiv!" begin + @test rdiv!(copy(a), 5.) == a/5 + @test ldiv!(5., copy(a)) == a/5 + @test ldiv!(zero(a), 5., copy(a)) == a/5 + end - @testset "Scaling with 3-argument mul!" begin - @test mul!(similar(a), 5., a) == a*5 - @test mul!(similar(a), a, 5.) == a*5 - @test mul!(similar(a), Diagonal([1.; 2.]), a) == a.*[1; 2] - @test mul!(similar(a), Diagonal([1; 2]), a) == a.*[1; 2] - @test_throws DimensionMismatch mul!(similar(a), Diagonal(Vector{Float64}(undef, am+1)), a) - @test_throws DimensionMismatch mul!(Matrix{Float64}(undef, 3, 2), a, Diagonal(Vector{Float64}(undef, an+1))) - @test_throws DimensionMismatch mul!(similar(a), a, Diagonal(Vector{Float64}(undef, an+1))) - @test mul!(similar(a), a, Diagonal(1.:an)) == a.*Vector(1:an)' - @test mul!(similar(a), a, Diagonal(1:an)) == a.*Vector(1:an)' - end + @testset "Scaling with 3-argument mul!" begin + @test mul!(similar(a), 5., a) == a*5 + @test mul!(similar(a), a, 5.) == a*5 + @test mul!(similar(a), Diagonal([1.; 2.]), a) == a.*[1; 2] + @test mul!(similar(a), Diagonal([1; 2]), a) == a.*[1; 2] + @test_throws DimensionMismatch mul!(similar(a), Diagonal(Vector{Float64}(undef, am+1)), a) + @test_throws DimensionMismatch mul!(Matrix{Float64}(undef, 3, 2), a, Diagonal(Vector{Float64}(undef, an+1))) + @test_throws DimensionMismatch mul!(similar(a), a, Diagonal(Vector{Float64}(undef, an+1))) + @test mul!(similar(a), a, Diagonal(1.:an)) == a.*Vector(1:an)' + @test mul!(similar(a), a, Diagonal(1:an)) == a.*Vector(1:an)' + end - @testset "Scaling with 5-argument mul!" begin - @test mul!(copy(a), 5., a, 10, 100) == a*150 - @test mul!(copy(a), a, 5., 10, 100) == a*150 - @test mul!(vec(copy(a)), 5., a, 10, 100) == vec(a*150) - @test mul!(vec(copy(a)), a, 5., 10, 100) == vec(a*150) - @test_throws DimensionMismatch mul!([vec(copy(a)); 0], 5., a, 10, 100) - @test_throws DimensionMismatch mul!([vec(copy(a)); 0], a, 5., 10, 100) - @test mul!(copy(a), Diagonal([1.; 2.]), a, 10, 100) == 10a.*[1; 2] .+ 100a - @test mul!(copy(a), Diagonal([1; 2]), a, 10, 100) == 10a.*[1; 2] .+ 100a - @test mul!(copy(a), a, Diagonal(1.:an), 10, 100) == 10a.*Vector(1:an)' .+ 100a - @test mul!(copy(a), a, Diagonal(1:an), 10, 100) == 10a.*Vector(1:an)' .+ 100a + @testset "Scaling with 5-argument mul!" begin + @test mul!(copy(a), 5., a, 10, 100) == a*150 + @test mul!(copy(a), a, 5., 10, 100) == a*150 + @test mul!(vec(copy(a)), 5., a, 10, 100) == vec(a*150) + @test mul!(vec(copy(a)), a, 5., 10, 100) == vec(a*150) + @test_throws DimensionMismatch mul!([vec(copy(a)); 0], 5., a, 10, 100) + @test_throws DimensionMismatch mul!([vec(copy(a)); 0], a, 5., 10, 100) + @test mul!(copy(a), Diagonal([1.; 2.]), a, 10, 100) == 10a.*[1; 2] .+ 100a + @test mul!(copy(a), Diagonal([1; 2]), a, 10, 100) == 10a.*[1; 2] .+ 100a + @test mul!(copy(a), a, Diagonal(1.:an), 10, 100) == 10a.*Vector(1:an)' .+ 100a + @test mul!(copy(a), a, Diagonal(1:an), 10, 100) == 10a.*Vector(1:an)' .+ 100a + end end end end From 20f933ae785423d3b94474e11453794c1fadc188 Mon Sep 17 00:00:00 2001 From: David Gleich Date: Thu, 24 Oct 2024 11:07:58 -0400 Subject: [PATCH 298/537] InteractiveUtils.jl: fixes issue where subtypes resolves bindings and causes deprecation warnings (#56306) The current version of `subtypes` will throw deprecation errors even if no one is using the deprecated bindings. A similar bug was fixed in Aqua.jl - https://github.com/JuliaTesting/Aqua.jl/pull/89/files See discussion here: - https://github.com/JuliaIO/ImageMagick.jl/issues/235 (for identifying the problem) - https://github.com/simonster/Reexport.jl/issues/42 (for pointing to the issue in Aqua.jl) - https://github.com/JuliaTesting/Aqua.jl/pull/89/files (for the fix in Aqua.jl) This adds the `isbindingresolved` test to the `subtypes` function to avoid throwing deprecation warnings. It also adds a test to check that this doesn't happen. --- On the current master branch (before the fix), the added test shows: ``` WARNING: using deprecated binding InternalModule.MyOldType in OuterModule. , use MyType instead. Subtypes and deprecations: Test Failed at /home/dgleich/devextern/julia/usr/share/julia/stdlib/v1.12/Test/src/Test.jl:932 Expression: isempty(stderr_content) Evaluated: isempty("WARNING: using deprecated binding InternalModule.MyOldType in OuterModule.\n, use MyType instead.\n") Test Summary: | Fail Total Time Subtypes and deprecations | 1 1 2.8s ERROR: LoadError: Some tests did not pass: 0 passed, 1 failed, 0 errored, 0 broken. in expression starting at /home/dgleich/devextern/julia/stdlib/InteractiveUtils/test/runtests.jl:841 ERROR: Package InteractiveUtils errored during testing ``` --- Using the results of this pull request: ``` @test_nowarn subtypes(Integer); ``` passes without error. The other tests pass too. --- .../InteractiveUtils/src/InteractiveUtils.jl | 4 ++-- stdlib/InteractiveUtils/test/runtests.jl | 19 +++++++++++++++++++ 2 files changed, 21 insertions(+), 2 deletions(-) diff --git a/stdlib/InteractiveUtils/src/InteractiveUtils.jl b/stdlib/InteractiveUtils/src/InteractiveUtils.jl index f3c1ff7fba59f..f14e2f7de2f49 100644 --- a/stdlib/InteractiveUtils/src/InteractiveUtils.jl +++ b/stdlib/InteractiveUtils/src/InteractiveUtils.jl @@ -16,7 +16,7 @@ export apropos, edit, less, code_warntype, code_llvm, code_native, methodswith, import Base.Docs.apropos using Base: unwrap_unionall, rewrap_unionall, isdeprecated, Bottom, summarysize, - signature_type, format_bytes + signature_type, format_bytes, isbindingresolved using Base.Libc using Markdown @@ -262,7 +262,7 @@ function _subtypes_in!(mods::Array, x::Type) m = pop!(mods) xt = xt::DataType for s in names(m, all = true) - if isdefined(m, s) && !isdeprecated(m, s) + if isbindingresolved(m, s) && !isdeprecated(m, s) && isdefined(m, s) t = getfield(m, s) dt = isa(t, UnionAll) ? unwrap_unionall(t) : t if isa(dt, DataType) diff --git a/stdlib/InteractiveUtils/test/runtests.jl b/stdlib/InteractiveUtils/test/runtests.jl index 851391ec6c249..e729ae67bde19 100644 --- a/stdlib/InteractiveUtils/test/runtests.jl +++ b/stdlib/InteractiveUtils/test/runtests.jl @@ -823,3 +823,22 @@ end @testset "Docstrings" begin @test isempty(Docs.undocumented_names(InteractiveUtils)) end + +# issue https://github.com/JuliaIO/ImageMagick.jl/issues/235 +module OuterModule + module InternalModule + struct MyType + x::Int + end + + Base.@deprecate_binding MyOldType MyType + + export MyType + end + using .InternalModule + export MyType, MyOldType +end # module +@testset "Subtypes and deprecations" begin + using .OuterModule + @test_nowarn subtypes(Integer); +end From f285de58f470cc3f0ac263842a055ce9bddb2451 Mon Sep 17 00:00:00 2001 From: Nathan Zimmerberg <39104088+nhz2@users.noreply.github.com> Date: Thu, 24 Oct 2024 11:10:18 -0400 Subject: [PATCH 299/537] [CRC32c] Support AbstractVector{UInt8} as input (#56164) This is a similar PR to https://github.com/JuliaIO/CRC32.jl/pull/12 I added a generic fallback method for `AbstractVector{UInt8}` similar to the existing generic `IO` method. Co-authored-by: Steven G. Johnson --- stdlib/CRC32c/src/CRC32c.jl | 25 +++++++++++++++++++------ stdlib/CRC32c/test/runtests.jl | 32 ++++++++++++++++++++++++++++++-- 2 files changed, 49 insertions(+), 8 deletions(-) diff --git a/stdlib/CRC32c/src/CRC32c.jl b/stdlib/CRC32c/src/CRC32c.jl index 03bef027bde16..923f7333b4c17 100644 --- a/stdlib/CRC32c/src/CRC32c.jl +++ b/stdlib/CRC32c/src/CRC32c.jl @@ -7,7 +7,6 @@ See [`CRC32c.crc32c`](@ref) for more information. """ module CRC32c -import Base.FastContiguousSubArray import Base: DenseBytes export crc32c @@ -16,9 +15,9 @@ export crc32c crc32c(data, crc::UInt32=0x00000000) Compute the CRC-32c checksum of the given `data`, which can be -an `Array{UInt8}`, a contiguous subarray thereof, or a `String`. Optionally, you can pass -a starting `crc` integer to be mixed in with the checksum. The `crc` parameter -can be used to compute a checksum on data divided into chunks: performing +an `Array{UInt8}`, a contiguous subarray thereof, an `AbstractVector{UInt8}`, or a `String`. +Optionally, you can pass a starting `crc` integer to be mixed in with the checksum. +The `crc` parameter can be used to compute a checksum on data divided into chunks: performing `crc32c(data2, crc32c(data1))` is equivalent to the checksum of `[data1; data2]`. (Technically, a little-endian checksum is computed.) @@ -30,11 +29,26 @@ calling [`take!`](@ref). For a `String`, note that the result is specific to the UTF-8 encoding (a different checksum would be obtained from a different Unicode encoding). -To checksum an `a::Array` of some other bitstype, you can do `crc32c(reinterpret(UInt8,a))`, +To checksum an `a::AbstractArray` of some other bitstype without padding, +you can do `crc32c(vec(reinterpret(UInt8,a)))`, but note that the result may be endian-dependent. """ function crc32c end +function crc32c(a::AbstractVector{UInt8}, crc::UInt32=0x00000000) + # use block size 24576=8192*3, since that is the threshold for + # 3-way parallel SIMD code in the underlying jl_crc32c C function. + last = lastindex(a) + nb = length(a) + buf = Memory{UInt8}(undef, Int(min(nb, 24576))) + while nb > 0 + n = min(nb, 24576) + copyto!(buf, 1, a, last - nb + 1, n) + crc = Base.unsafe_crc32c(buf, n % Csize_t, crc) + nb -= n + end + return crc +end function crc32c(a::DenseBytes, crc::UInt32=0x00000000) Base._crc32c(a, crc) @@ -51,6 +65,5 @@ mixed with a starting `crc` integer. If `nb` is not supplied, then """ crc32c(io::IO, nb::Integer, crc::UInt32=0x00000000) = Base._crc32c(io, nb, crc) crc32c(io::IO, crc::UInt32=0x00000000) = Base._crc32c(io, crc) -crc32c(io::IOStream, crc::UInt32=0x00000000) = Base._crc32c(io, crc) end diff --git a/stdlib/CRC32c/test/runtests.jl b/stdlib/CRC32c/test/runtests.jl index e1bd75d0e15f6..37b447e6d999a 100644 --- a/stdlib/CRC32c/test/runtests.jl +++ b/stdlib/CRC32c/test/runtests.jl @@ -3,12 +3,23 @@ using Test, Random using CRC32c +const BASE_TEST_PATH = joinpath(Sys.BINDIR, "..", "share", "julia", "test") +isdefined(Main, :OffsetArrays) || @eval Main include(joinpath($(BASE_TEST_PATH), "testhelpers", "OffsetArrays.jl")) +using .Main.OffsetArrays: Origin + +isdefined(Main, :FillArrays) || @eval Main include(joinpath($(BASE_TEST_PATH), "testhelpers", "FillArrays.jl")) +using .Main.FillArrays: Fill + function test_crc32c(crc32c) # CRC32c checksum (test data generated from @andrewcooke's CRC.jl package) for (n,crc) in [(0,0x00000000),(1,0xa016d052),(2,0x03f89f52),(3,0xf130f21e),(4,0x29308cf4),(5,0x53518fab),(6,0x4f4dfbab),(7,0xbd3a64dc),(8,0x46891f81),(9,0x5a14b9f9),(10,0xb219db69),(11,0xd232a91f),(12,0x51a15563),(13,0x9f92de41),(14,0x4d8ae017),(15,0xc8b74611),(16,0xa0de6714),(17,0x672c992a),(18,0xe8206eb6),(19,0xc52fd285),(20,0x327b0397),(21,0x318263dd),(22,0x08485ccd),(23,0xea44d29e),(24,0xf6c0cb13),(25,0x3969bba2),(26,0x6a8810ec),(27,0x75b3d0df),(28,0x82d535b1),(29,0xbdf7fc12),(30,0x1f836b7d),(31,0xd29f33af),(32,0x8e4acb3e),(33,0x1cbee2d1),(34,0xb25f7132),(35,0xb0fa484c),(36,0xb9d262b4),(37,0x3207fe27),(38,0xa024d7ac),(39,0x49a2e7c5),(40,0x0e2c157f),(41,0x25f7427f),(42,0x368c6adc),(43,0x75efd4a5),(44,0xa84c5c31),(45,0x0fc817b2),(46,0x8d99a881),(47,0x5cc3c078),(48,0x9983d5e2),(49,0x9267c2db),(50,0xc96d4745),(51,0x058d8df3),(52,0x453f9cf3),(53,0xb714ade1),(54,0x55d3c2bc),(55,0x495710d0),(56,0x3bddf494),(57,0x4f2577d0),(58,0xdae0f604),(59,0x3c57c632),(60,0xfe39bbb0),(61,0x6f5d1d41),(62,0x7d996665),(63,0x68c738dc),(64,0x8dfea7ae)] s = String(UInt8[1:n;]) ss = SubString(String(UInt8[0:(n+1);]), 2:(n+1)) @test crc32c(UInt8[1:n;]) == crc == crc32c(s) == crc32c(ss) + @test crc == crc32c(UInt8(1):UInt8(n)) + m = Memory{UInt8}(undef, n) + m .= 1:n + @test crc == crc32c(m) end # test that crc parameter is equivalent to checksum of concatenated data, @@ -50,9 +61,24 @@ function test_crc32c(crc32c) LONG = 8192 # from crc32c.c SHORT = 256 # from crc32c.c n = LONG*3+SHORT*3+SHORT*2+64+7 - big = vcat(reinterpret(UInt8, hton.(0x74d7f887 .^ (1:n÷4))), UInt8[1:n%4;]) + bigg = vcat(reinterpret(UInt8, hton.(0x74d7f887 .^ (1:n÷4))), UInt8[1:n%4;]) for (offset,crc) in [(0, 0x13a5ecd5), (1, 0xecf34b7e), (2, 0xfa71b596), (3, 0xbfd24745), (4, 0xf0cb3370), (5, 0xb0ec88b5), (6, 0x258c20a8), (7, 0xa9bd638d)] - @test crc == crc32c(@view big[1+offset:end]) + @test crc == crc32c(@view bigg[1+offset:end]) + end + + # test crc of AbstractVector{UInt8} + @test crc32c(Origin(0)(b"hello")) == crc32c(b"hello") + weird_vectors = [ + view(rand(UInt8, 300000), 1:2:300000), + vec(reinterpret(UInt8, collect(Int64(1):Int64(4)))), + vec(reinterpret(UInt8, Int64(1):Int64(4))), + view([0x01, 0x02], UInt(1):UInt(2)), + Fill(0x00, UInt(100)), + Fill(0x00, big(100)), + reinterpret(UInt8, BitVector((true, false, true, false))), + ] + for a in weird_vectors + @test crc32c(a) == crc32c(collect(a)) end end unsafe_crc32c_sw(a, n, crc) = @@ -64,6 +90,8 @@ function crc32c_sw(s::Union{String, SubString{String}}, crc::UInt32=0x00000000) unsafe_crc32c_sw(s, sizeof(s), crc) end +crc32c_sw(a::AbstractVector{UInt8}, crc::UInt32=0x00000000) = + crc32c_sw(copyto!(Vector{UInt8}(undef, length(a)), a)) function crc32c_sw(io::IO, nb::Integer, crc::UInt32=0x00000000) nb < 0 && throw(ArgumentError("number of bytes to checksum must be ≥ 0")) buf = Vector{UInt8}(undef, min(nb, 24576)) From 5cdf3789d8058d137b62259d2ab12f6eb456911e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lars=20G=C3=B6ttgens?= Date: Thu, 24 Oct 2024 22:43:27 +0200 Subject: [PATCH 300/537] Put `jl_gc_new_weakref` in a header file again (#56319) --- src/gc-common.h | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/src/gc-common.h b/src/gc-common.h index 32b7470b13a58..bbac5f30c6755 100644 --- a/src/gc-common.h +++ b/src/gc-common.h @@ -185,4 +185,13 @@ extern jl_ptls_t* gc_all_tls_states; extern int gc_logging_enabled; +// =========================================================================== // +// Misc +// =========================================================================== // + +// Allocates a new weak-reference, assigns its value and increments Julia allocation +// counters. If thread-local allocators are used, then this function should allocate in the +// thread-local allocator of the current thread. +JL_DLLEXPORT jl_weakref_t *jl_gc_new_weakref(jl_value_t *value); + #endif // JL_GC_COMMON_H From bc660477fd4a82e61e9dc70dafa12613df656bba Mon Sep 17 00:00:00 2001 From: "Steven G. Johnson" Date: Thu, 24 Oct 2024 19:17:06 -0400 Subject: [PATCH 301/537] use textwidth for string display truncation (#55442) It makes a big difference when displaying strings that have width-2 or width-0 characters. --- base/strings/io.jl | 32 +++++++++++++------------------- base/strings/util.jl | 21 ++++++++++++--------- test/show.jl | 14 +++++++------- 3 files changed, 32 insertions(+), 35 deletions(-) diff --git a/base/strings/io.jl b/base/strings/io.jl index 82dd128240a92..116bcf71eeb7a 100644 --- a/base/strings/io.jl +++ b/base/strings/io.jl @@ -214,35 +214,29 @@ function show( # one line in collection, seven otherwise get(io, :typeinfo, nothing) === nothing && (limit *= 7) end + limit = max(0, limit-2) # quote chars # early out for short strings - len = ncodeunits(str) - len ≤ limit - 2 && # quote chars - return show(io, str) + check_textwidth(str, limit) && return show(io, str) # these don't depend on string data units = codeunit(str) == UInt8 ? "bytes" : "code units" skip_text(skip) = " ⋯ $skip $units ⋯ " - short = length(skip_text("")) + 4 # quote chars - chars = max(limit, short + 1) - short # at least 1 digit - # figure out how many characters to print in elided case - chars -= d = ndigits(len - chars) # first adjustment - chars += d - ndigits(len - chars) # second if needed - chars = max(0, chars) + # longest possible replacement string for omitted chars + max_replacement = skip_text(ncodeunits(str) * 100) # *100 for 2 inner quote chars - # find head & tail, avoiding O(length(str)) computation - head = nextind(str, 0, 1 + (chars + 1) ÷ 2) - tail = prevind(str, len + 1, chars ÷ 2) + head, tail = string_truncate_boundaries(str, limit, max_replacement, Val(:center)) # threshold: min chars skipped to make elision worthwhile - t = short + ndigits(len - chars) - 1 - n = tail - head # skipped code units - if 4t ≤ n || t ≤ n && t ≤ length(str, head, tail-1) - skip = skip_text(n) - show(io, SubString(str, 1:prevind(str, head))) - printstyled(io, skip; color=:light_yellow, bold=true) - show(io, SubString(str, tail)) + afterhead = nextind(str, head) + n = tail - afterhead # skipped code units + replacement = skip_text(n) + t = ncodeunits(replacement) # length of replacement (textwidth == ncodeunits here) + @views if 4t ≤ n || t ≤ n && t ≤ textwidth(str[afterhead:prevind(str,tail)]) + show(io, str[begin:head]) + printstyled(io, replacement; color=:light_yellow, bold=true) + show(io, str[tail:end]) else show(io, str) end diff --git a/base/strings/util.jl b/base/strings/util.jl index 0ba76e1c76fa0..04d451a4fd288 100644 --- a/base/strings/util.jl +++ b/base/strings/util.jl @@ -613,22 +613,25 @@ function ctruncate(str::AbstractString, maxwidth::Integer, replacement::Union{Ab end end +# return whether textwidth(str) <= maxwidth +function check_textwidth(str::AbstractString, maxwidth::Integer) + # check efficiently for early return if str is wider than maxwidth + total_width = 0 + for c in str + total_width += textwidth(c) + total_width > maxwidth && return false + end + return true +end + function string_truncate_boundaries( str::AbstractString, maxwidth::Integer, replacement::Union{AbstractString,AbstractChar}, ::Val{mode}, prefer_left::Bool = true) where {mode} - maxwidth >= 0 || throw(ArgumentError("maxwidth $maxwidth should be non-negative")) - - # check efficiently for early return if str is less wide than maxwidth - total_width = 0 - for c in str - total_width += textwidth(c) - total_width > maxwidth && break - end - total_width <= maxwidth && return nothing + check_textwidth(str, maxwidth) && return nothing l0, _ = left, right = firstindex(str), lastindex(str) width = textwidth(replacement) diff --git a/test/show.jl b/test/show.jl index 976141f1ebb17..de5cf32b726ee 100644 --- a/test/show.jl +++ b/test/show.jl @@ -928,19 +928,19 @@ end # string show with elision @testset "string show with elision" begin @testset "elision logic" begin - strs = ["A", "∀", "∀A", "A∀", "😃"] + strs = ["A", "∀", "∀A", "A∀", "😃", "x̂"] for limit = 0:100, len = 0:100, str in strs str = str^len str = str[1:nextind(str, 0, len)] out = sprint() do io show(io, MIME"text/plain"(), str; limit) end - lower = length("\"\" ⋯ $(ncodeunits(str)) bytes ⋯ \"\"") + lower = textwidth("\"\" ⋯ $(ncodeunits(str)) bytes ⋯ \"\"") limit = max(limit, lower) - if length(str) + 2 ≤ limit + if textwidth(str) + 2 ≤ limit+1 && !contains(out, '⋯') @test eval(Meta.parse(out)) == str else - @test limit-!isascii(str) <= length(out) <= limit + @test limit-2 <= textwidth(out) <= limit re = r"(\"[^\"]*\") ⋯ (\d+) bytes ⋯ (\"[^\"]*\")" m = match(re, out) head = eval(Meta.parse(m.captures[1])) @@ -956,11 +956,11 @@ end @testset "default elision limit" begin r = replstr("x"^1000) - @test length(r) == 7*80 - @test r == repr("x"^271) * " ⋯ 459 bytes ⋯ " * repr("x"^270) + @test length(r) == 7*80-1 + @test r == repr("x"^270) * " ⋯ 460 bytes ⋯ " * repr("x"^270) r = replstr(["x"^1000]) @test length(r) < 120 - @test r == "1-element Vector{String}:\n " * repr("x"^31) * " ⋯ 939 bytes ⋯ " * repr("x"^30) + @test r == "1-element Vector{String}:\n " * repr("x"^30) * " ⋯ 940 bytes ⋯ " * repr("x"^30) end end From dc6072726ea721c21b7bef82e0a1c2392462ffdf Mon Sep 17 00:00:00 2001 From: Fredrik Ekre Date: Fri, 25 Oct 2024 01:41:35 +0200 Subject: [PATCH 302/537] Use `pwd()` as the default directory to walk in `walkdir` (#55550) --- base/file.jl | 7 +++++-- test/file.jl | 7 +++++++ 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/base/file.jl b/base/file.jl index 567783c4b1e5b..c69a598f42623 100644 --- a/base/file.jl +++ b/base/file.jl @@ -1100,7 +1100,7 @@ function _readdir(dir::AbstractString; return_objects::Bool=false, join::Bool=fa end """ - walkdir(dir; topdown=true, follow_symlinks=false, onerror=throw) + walkdir(dir = pwd(); topdown=true, follow_symlinks=false, onerror=throw) Return an iterator that walks the directory tree of a directory. @@ -1117,6 +1117,9 @@ resume where the last left off, like [`Iterators.Stateful`](@ref). See also: [`readdir`](@ref). +!!! compat "Julia 1.12" + `pwd()` as the default directory was added in Julia 1.12. + # Examples ```julia for (path, dirs, files) in walkdir(".") @@ -1146,7 +1149,7 @@ julia> (path, dirs, files) = first(itr) ("my/test/dir", String[], String[]) ``` """ -function walkdir(path; topdown=true, follow_symlinks=false, onerror=throw) +function walkdir(path = pwd(); topdown=true, follow_symlinks=false, onerror=throw) function _walkdir(chnl, path) tryf(f, p) = try f(p) diff --git a/test/file.jl b/test/file.jl index a4262c4eaaa21..498761d6a624b 100644 --- a/test/file.jl +++ b/test/file.jl @@ -1728,6 +1728,13 @@ cd(dirwalk) do @test dirs == [] @test files == ["foo"] end + + # pwd() as default directory + for ((r1, d1, f1), (r2, d2, f2)) in zip(walkdir(), walkdir(pwd())) + @test r1 == r2 + @test d1 == d2 + @test f1 == f2 + end end rm(dirwalk, recursive=true) From beda6322f18dd5cb7c7f687afcd9e8b8122989aa Mon Sep 17 00:00:00 2001 From: Zentrik Date: Fri, 25 Oct 2024 00:43:24 +0100 Subject: [PATCH 303/537] Reset mtime of BOLTed files to prevent make rebuilding targets (#55587) This simplifies the `finish_stage` rule. Co-authored-by: Zentrik --- contrib/bolt/Makefile | 12 +++++++----- contrib/pgo-lto-bolt/Makefile | 12 +++++++----- 2 files changed, 14 insertions(+), 10 deletions(-) diff --git a/contrib/bolt/Makefile b/contrib/bolt/Makefile index ea92ba9ff936a..76833b9865020 100644 --- a/contrib/bolt/Makefile +++ b/contrib/bolt/Makefile @@ -79,22 +79,21 @@ copy_originals: stage1 # I don't think there's any particular reason to have -no-huge-pages here, perhaps slightly more accurate profile data # as the final build uses -no-huge-pages +# We reset the mtime of the files to prevent make from rebuilding targets depending on them. bolt_instrument: copy_originals for file in $(FILES_TO_OPTIMIZE); do \ abs_file=$(STAGE1_BUILD)/usr/lib/$$file; \ + old_time=$$(stat -c %Y $$abs_file); \ $(LLVM_BOLT) "$$abs_file.original" -o $$abs_file --instrument --instrumentation-file-append-pid --instrumentation-file="$(PROFILE_DIR)/$$file-prof" -no-huge-pages; \ mkdir -p $$(dirname "$(PROFILE_DIR)/$$file-prof"); \ + touch -d "@$$old_time" $$abs_file; \ printf "\n"; \ done && \ touch $@ @echo $(AFTER_INSTRUMENT_MESSAGE) -# We don't want to rebuild julia-src as then we lose the bolt instrumentation -# So we have to manually build the sysimage and package image finish_stage1: stage1 - $(MAKE) -C $(STAGE1_BUILD) julia-base-cache && \ - $(MAKE) -C $(STAGE1_BUILD) -f sysimage.mk sysimg-release && \ - $(MAKE) -C $(STAGE1_BUILD) -f pkgimage.mk release + $(MAKE) -C $(STAGE1_BUILD) merge_data: bolt_instrument for file in $(FILES_TO_OPTIMIZE); do \ @@ -108,10 +107,13 @@ merge_data: bolt_instrument # It tries to reuse old text segments to reduce binary size # BOLT doesn't fully support computed gotos https://github.com/llvm/llvm-project/issues/89117, so we cannot use --use-old-text on libjulia-internal # That flag saves less than 1 MiB for libjulia-internal so oh well. +# We reset the mtime of the files to prevent make from rebuilding targets depending on them. bolt: merge_data for file in $(FILES_TO_OPTIMIZE); do \ abs_file=$(STAGE1_BUILD)/usr/lib/$$file; \ + old_time=$$(stat -c %Y $$abs_file); \ $(LLVM_BOLT) "$$abs_file.original" -data "$(PROFILE_DIR)/$$file-prof.merged.fdata" -o $$abs_file $(BOLT_ARGS) $$(if [ "$$file" != $(shell readlink $(STAGE1_BUILD)/usr/lib/libjulia-internal.so) ]; then echo "--use-old-text -split-strategy=cdsplit"; fi); \ + touch -d "@$$old_time" $$abs_file; \ done && \ touch $@ diff --git a/contrib/pgo-lto-bolt/Makefile b/contrib/pgo-lto-bolt/Makefile index 2114b14991184..ce1b8b04f68c9 100644 --- a/contrib/pgo-lto-bolt/Makefile +++ b/contrib/pgo-lto-bolt/Makefile @@ -123,25 +123,24 @@ copy_originals: stage2 # I don't think there's any particular reason to have -no-huge-pages here, perhaps slightly more accurate profile data # as the final build uses -no-huge-pages +# We reset the mtime of the files to prevent make from rebuilding targets depending on them. bolt_instrument: copy_originals for file in $(FILES_TO_OPTIMIZE); do \ abs_file=$(STAGE2_BUILD)/usr/lib/$$file; \ + old_time=$$(stat -c %Y $$abs_file); \ $(LLVM_BOLT) "$$abs_file.original" -o $$abs_file --instrument --instrumentation-file-append-pid --instrumentation-file="$(BOLT_PROFILE_DIR)/$$file-prof" -no-huge-pages; \ mkdir -p $$(dirname "$(BOLT_PROFILE_DIR)/$$file-prof"); \ + touch -d "@$$old_time" $$abs_file; \ printf "\n"; \ done && \ touch $@ @echo $(AFTER_INSTRUMENT_MESSAGE) -# We don't want to rebuild julia-src as then we lose the bolt instrumentation -# So we have to manually build the sysimage and package image finish_stage2: PGO_CFLAGS:=-fprofile-use=$(PGO_PROFILE_FILE) finish_stage2: PGO_CXXFLAGS:=-fprofile-use=$(PGO_PROFILE_FILE) finish_stage2: PGO_LDFLAGS:=-flto=thin -fprofile-use=$(PGO_PROFILE_FILE) -Wl,--icf=safe finish_stage2: stage2 - $(MAKE) -C $(STAGE2_BUILD) $(TOOLCHAIN_FLAGS) julia-base-cache && \ - $(MAKE) -C $(STAGE2_BUILD) $(TOOLCHAIN_FLAGS) -f sysimage.mk sysimg-release && \ - $(MAKE) -C $(STAGE2_BUILD) $(TOOLCHAIN_FLAGS) -f pkgimage.mk release + $(MAKE) -C $(STAGE2_BUILD) $(TOOLCHAIN_FLAGS) merge_data: bolt_instrument for file in $(FILES_TO_OPTIMIZE); do \ @@ -155,10 +154,13 @@ merge_data: bolt_instrument # It tries to reuse old text segments to reduce binary size # BOLT doesn't fully support computed gotos https://github.com/llvm/llvm-project/issues/89117, so we cannot use --use-old-text on libjulia-internal # That flag saves less than 1 MiB for libjulia-internal so oh well. +# We reset the mtime of the files to prevent make from rebuilding targets depending on them. bolt: merge_data for file in $(FILES_TO_OPTIMIZE); do \ abs_file=$(STAGE2_BUILD)/usr/lib/$$file; \ + old_time=$$(stat -c %Y $$abs_file); \ $(LLVM_BOLT) "$$abs_file.original" -data "$(BOLT_PROFILE_DIR)/$$file-prof.merged.fdata" -o $$abs_file $(BOLT_ARGS) $$(if [ "$$file" != $(shell readlink $(STAGE2_BUILD)/usr/lib/libjulia-internal.so) ]; then echo "--use-old-text -split-strategy=cdsplit"; fi); \ + touch -d "@$$old_time" $$abs_file; \ done && \ touch $@ From c94102bea4443c8cab8c78589a96cbcddbea283e Mon Sep 17 00:00:00 2001 From: Ian Butterworth Date: Thu, 24 Oct 2024 22:43:21 -0400 Subject: [PATCH 304/537] add docstring note about `displaysize` and `IOContext` with `context` (#55510) --- base/show.jl | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/base/show.jl b/base/show.jl index ee467ae90ff50..dbdd85f0608da 100644 --- a/base/show.jl +++ b/base/show.jl @@ -339,6 +339,11 @@ end IOContext(io::IO, context::IOContext) Create an `IOContext` that wraps an alternate `IO` but inherits the properties of `context`. + +!!! note + Unless explicitly set in the wrapped `io` the `displaysize` of `io` will not be inherited. + This is because by default `displaysize` is not a property of IO objects themselves, but lazily inferred, + as the size of the terminal window can change during the lifetime of the IO object. """ IOContext(io::IO, context::IO) = IOContext(io, ioproperties(context)) From ac5bb668dafbfb0ce96449cfa32e64821a4cce15 Mon Sep 17 00:00:00 2001 From: Jishnu Bhattacharya Date: Fri, 25 Oct 2024 09:45:44 +0530 Subject: [PATCH 305/537] LinearAlgebra: replace some hardcoded loop ranges with axes (#56243) These are safer in general, as well as easier to read. Also, narrow the scopes of some `@inbounds` annotations. --- stdlib/LinearAlgebra/src/generic.jl | 95 ++++++++++++++--------------- 1 file changed, 46 insertions(+), 49 deletions(-) diff --git a/stdlib/LinearAlgebra/src/generic.jl b/stdlib/LinearAlgebra/src/generic.jl index 9c050a32bbda7..20c58e593d3f6 100644 --- a/stdlib/LinearAlgebra/src/generic.jl +++ b/stdlib/LinearAlgebra/src/generic.jl @@ -734,18 +734,15 @@ norm(::Missing, p::Real=2) = missing # special cases of opnorm function opnorm1(A::AbstractMatrix{T}) where T require_one_based_indexing(A) - m, n = size(A) Tnorm = typeof(float(real(zero(T)))) Tsum = promote_type(Float64, Tnorm) nrm::Tsum = 0 - @inbounds begin - for j = 1:n - nrmj::Tsum = 0 - for i = 1:m - nrmj += norm(A[i,j]) - end - nrm = max(nrm,nrmj) + for j in axes(A,2) + nrmj::Tsum = 0 + for i in axes(A,1) + nrmj += norm(@inbounds A[i,j]) end + nrm = max(nrm,nrmj) end return convert(Tnorm, nrm) end @@ -761,18 +758,15 @@ end function opnormInf(A::AbstractMatrix{T}) where T require_one_based_indexing(A) - m,n = size(A) Tnorm = typeof(float(real(zero(T)))) Tsum = promote_type(Float64, Tnorm) nrm::Tsum = 0 - @inbounds begin - for i = 1:m - nrmi::Tsum = 0 - for j = 1:n - nrmi += norm(A[i,j]) - end - nrm = max(nrm,nrmi) + for i in axes(A,1) + nrmi::Tsum = 0 + for j in axes(A,2) + nrmi += norm(@inbounds A[i,j]) end + nrm = max(nrm,nrmi) end return convert(Tnorm, nrm) end @@ -967,7 +961,7 @@ function dot(x::AbstractArray, y::AbstractArray) end s = zero(dot(first(x), first(y))) for (Ix, Iy) in zip(eachindex(x), eachindex(y)) - @inbounds s += dot(x[Ix], y[Iy]) + s += dot(@inbounds(x[Ix]), @inbounds(y[Iy])) end s end @@ -1008,11 +1002,11 @@ function dot(x::AbstractVector, A::AbstractMatrix, y::AbstractVector) s = zero(T) i₁ = first(eachindex(x)) x₁ = first(x) - @inbounds for j in eachindex(y) - yj = y[j] + for j in eachindex(y) + yj = @inbounds y[j] if !iszero(yj) - temp = zero(adjoint(A[i₁,j]) * x₁) - @simd for i in eachindex(x) + temp = zero(adjoint(@inbounds A[i₁,j]) * x₁) + @inbounds @simd for i in eachindex(x) temp += adjoint(A[i,j]) * x[i] end s += dot(temp, yj) @@ -1625,10 +1619,12 @@ function rotate!(x::AbstractVector, y::AbstractVector, c, s) if n != length(y) throw(DimensionMismatch(lazy"x has length $(length(x)), but y has length $(length(y))")) end - @inbounds for i = 1:n - xi, yi = x[i], y[i] - x[i] = c *xi + s*yi - y[i] = -conj(s)*xi + c*yi + for i in eachindex(x,y) + @inbounds begin + xi, yi = x[i], y[i] + x[i] = c *xi + s*yi + y[i] = -conj(s)*xi + c*yi + end end return x, y end @@ -1648,10 +1644,12 @@ function reflect!(x::AbstractVector, y::AbstractVector, c, s) if n != length(y) throw(DimensionMismatch(lazy"x has length $(length(x)), but y has length $(length(y))")) end - @inbounds for i = 1:n - xi, yi = x[i], y[i] - x[i] = c *xi + s*yi - y[i] = conj(s)*xi - c*yi + for i in eachindex(x,y) + @inbounds begin + xi, yi = x[i], y[i] + x[i] = c *xi + s*yi + y[i] = conj(s)*xi - c*yi + end end return x, y end @@ -1662,18 +1660,16 @@ end require_one_based_indexing(x) n = length(x) n == 0 && return zero(eltype(x)) - @inbounds begin - ξ1 = x[1] - normu = norm(x) - if iszero(normu) - return zero(ξ1/normu) - end - ν = T(copysign(normu, real(ξ1))) - ξ1 += ν - x[1] = -ν - for i = 2:n - x[i] /= ξ1 - end + ξ1 = @inbounds x[1] + normu = norm(x) + if iszero(normu) + return zero(ξ1/normu) + end + ν = T(copysign(normu, real(ξ1))) + ξ1 += ν + @inbounds x[1] = -ν + for i in 2:n + @inbounds x[i] /= ξ1 end ξ1/ν end @@ -1684,16 +1680,16 @@ end Multiplies `A` in-place by a Householder reflection on the left. It is equivalent to `A .= (I - conj(τ)*[1; x[2:end]]*[1; x[2:end]]')*A`. """ @inline function reflectorApply!(x::AbstractVector, τ::Number, A::AbstractVecOrMat) - require_one_based_indexing(x) + require_one_based_indexing(x, A) m, n = size(A, 1), size(A, 2) if length(x) != m throw(DimensionMismatch(lazy"reflector has length $(length(x)), which must match the first dimension of matrix A, $m")) end m == 0 && return A - @inbounds for j = 1:n - Aj, xj = view(A, 2:m, j), view(x, 2:m) - vAj = conj(τ)*(A[1, j] + dot(xj, Aj)) - A[1, j] -= vAj + for j in axes(A,2) + Aj, xj = @inbounds view(A, 2:m, j), view(x, 2:m) + vAj = conj(τ)*(@inbounds(A[1, j]) + dot(xj, Aj)) + @inbounds A[1, j] -= vAj axpy!(-vAj, xj, Aj) end return A @@ -1828,9 +1824,10 @@ julia> LinearAlgebra.det_bareiss!(M) ``` """ function det_bareiss!(M) + Base.require_one_based_indexing(M) n = checksquare(M) sign, prev = Int8(1), one(eltype(M)) - for i in 1:n-1 + for i in axes(M,2)[begin:end-1] if iszero(M[i,i]) # swap with another col to make nonzero swapto = findfirst(!iszero, @view M[i,i+1:end]) isnothing(swapto) && return zero(prev) @@ -2020,12 +2017,12 @@ function copytrito!(B::AbstractMatrix, A::AbstractMatrix, uplo::AbstractChar) A = Base.unalias(B, A) if uplo == 'U' LAPACK.lacpy_size_check((m1, n1), (n < m ? n : m, n)) - for j in 1:n, i in 1:min(j,m) + for j in axes(A,2), i in axes(A,1)[begin : min(j,end)] @inbounds B[i,j] = A[i,j] end else # uplo == 'L' LAPACK.lacpy_size_check((m1, n1), (m, m < n ? m : n)) - for j in 1:n, i in j:m + for j in axes(A,2), i in axes(A,1)[j:end] @inbounds B[i,j] = A[i,j] end end From 29b509da2efb9fa499b567fd1d976532da5864fc Mon Sep 17 00:00:00 2001 From: Shuhei Kadowaki <40514306+aviatesk@users.noreply.github.com> Date: Fri, 25 Oct 2024 13:18:31 +0900 Subject: [PATCH 306/537] inference: fix `[modifyfield!|replacefield!]_tfunc`s (#56310) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Currently the following code snippet results in an internal error: ```julia julia> func(x) = @atomic :monotonic x[].count += 1; julia> let;Base.Experimental.@force_compile x = Ref(nothing) func(x) end Internal error: during type inference of ... ``` This issue is caused by the incorrect use of `_fieldtype_tfunc(𝕃, o, f)` within `modifyfield!_tfunc`, specifically because `o` should be `widenconst`ed, but it isn’t. By using `_fieldtype_tfunc` correctly, we can avoid the error through error-catching in `abstract_modifyop!`. This commit also includes a similar fix for `replacefield!_tfunc` as well. --- base/compiler/tfuncs.jl | 6 ++++-- test/compiler/inference.jl | 15 +++++++++++++++ 2 files changed, 19 insertions(+), 2 deletions(-) diff --git a/base/compiler/tfuncs.jl b/base/compiler/tfuncs.jl index 450cfdcfadf82..a74146dcff552 100644 --- a/base/compiler/tfuncs.jl +++ b/base/compiler/tfuncs.jl @@ -1347,13 +1347,15 @@ end return getfield_tfunc(𝕃, o, f) end @nospecs function modifyfield!_tfunc(𝕃::AbstractLattice, o, f, op, v, order=Symbol) - T = _fieldtype_tfunc(𝕃, o, f, isconcretetype(o)) + o′ = widenconst(o) + T = _fieldtype_tfunc(𝕃, o′, f, isconcretetype(o′)) T === Bottom && return Bottom PT = Const(Pair) return instanceof_tfunc(apply_type_tfunc(𝕃, PT, T, T), true)[1] end @nospecs function replacefield!_tfunc(𝕃::AbstractLattice, o, f, x, v, success_order=Symbol, failure_order=Symbol) - T = _fieldtype_tfunc(𝕃, o, f, isconcretetype(o)) + o′ = widenconst(o) + T = _fieldtype_tfunc(𝕃, o′, f, isconcretetype(o′)) T === Bottom && return Bottom PT = Const(ccall(:jl_apply_cmpswap_type, Any, (Any,), T) where T) return instanceof_tfunc(apply_type_tfunc(𝕃, PT, T), true)[1] diff --git a/test/compiler/inference.jl b/test/compiler/inference.jl index dd62e329962c6..dab8e57aa2309 100644 --- a/test/compiler/inference.jl +++ b/test/compiler/inference.jl @@ -6063,3 +6063,18 @@ end === Union{} @test Base.infer_return_type() do TypeVar(:Issue56248, Any, 1) end === Union{} + +@test Base.infer_return_type((Nothing,)) do x + @atomic x.count += 1 +end == Union{} +@test Base.infer_return_type((Nothing,)) do x + @atomicreplace x.count 0 => 1 +end == Union{} +mutable struct AtomicModifySafety + @atomic count::Int +end +let src = code_typed((Union{Nothing,AtomicModifySafety},)) do x + @atomic x.count += 1 + end |> only |> first + @test any(@nospecialize(x)->Meta.isexpr(x, :invoke_modify), src.code) +end From e8aacbf45ea24dc4e4d016fbc16ea2a8cd8c1f1d Mon Sep 17 00:00:00 2001 From: Shuhei Kadowaki <40514306+aviatesk@users.noreply.github.com> Date: Fri, 25 Oct 2024 13:20:05 +0900 Subject: [PATCH 307/537] inference: don't allow `SSAValue`s in assignment lhs (#56314) In `InferenceState` the lhs of a `:=` expression should only contain `GlobalRef` or `SlotNumber` and no other IR elements. Currently when `SSAValue` appears in `lhs`, the invalid assignment effect is somehow ignored, but this is incorrect anyway, so this commit removes that check. Since `SSAValue` should not appear in `lhs` in the first place, this is not a significant change though. --- base/compiler/abstractinterpretation.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/base/compiler/abstractinterpretation.jl b/base/compiler/abstractinterpretation.jl index dbe79e19bf9b4..777240adf581b 100644 --- a/base/compiler/abstractinterpretation.jl +++ b/base/compiler/abstractinterpretation.jl @@ -3648,7 +3648,7 @@ function typeinf_local(interp::AbstractInterpreter, frame::InferenceState, nextr changes = StateUpdate(lhs, VarState(rt, false)) elseif isa(lhs, GlobalRef) handle_global_assignment!(interp, frame, lhs, rt) - elseif !isa(lhs, SSAValue) + else merge_effects!(interp, frame, EFFECTS_UNKNOWN) end end From ec2e1217fd816f53b8921d515634534930fa6e37 Mon Sep 17 00:00:00 2001 From: Nathan Zimmerberg <39104088+nhz2@users.noreply.github.com> Date: Fri, 25 Oct 2024 03:25:34 -0400 Subject: [PATCH 308/537] Fix `unsafe_read` for `IOBuffer` with non dense data (#55776) Fixes one part of #54636 It was only safe to use the following if `from.data` was a dense vector of bytes. ```julia GC.@preserve from unsafe_copyto!(p, pointer(from.data, from.ptr), adv) ``` This PR adds a fallback suggested by @matthias314 in https://discourse.julialang.org/t/copying-bytes-from-abstractvector-to-ptr/119408/7 --- base/iobuffer.jl | 21 ++++++++++++++++++++- base/strings/search.jl | 8 -------- test/iobuffer.jl | 10 ++++++++++ 3 files changed, 30 insertions(+), 9 deletions(-) diff --git a/base/iobuffer.jl b/base/iobuffer.jl index bd924fd040496..7e309b9ad586c 100644 --- a/base/iobuffer.jl +++ b/base/iobuffer.jl @@ -194,7 +194,7 @@ function unsafe_read(from::GenericIOBuffer, p::Ptr{UInt8}, nb::UInt) from.readable || _throw_not_readable() avail = bytesavailable(from) adv = min(avail, nb) - GC.@preserve from unsafe_copyto!(p, pointer(from.data, from.ptr), adv) + unsafe_read!(p, from.data, from.ptr, adv) from.ptr += adv if nb > avail throw(EOFError()) @@ -202,6 +202,25 @@ function unsafe_read(from::GenericIOBuffer, p::Ptr{UInt8}, nb::UInt) nothing end +function unsafe_read!(dest::Ptr{UInt8}, src::AbstractVector{UInt8}, so::Integer, nbytes::UInt) + for i in 1:nbytes + unsafe_store!(dest, @inbounds(src[so+i-1]), i) + end +end + +# Note: Currently, CodeUnits <: DenseVector, which makes this union redundant w.r.t +# DenseArrayType{UInt8}, but this is a bug, and may be removed in future versions +# of Julia. See #54002 +const DenseBytes = Union{ + <:DenseArrayType{UInt8}, + CodeUnits{UInt8, <:Union{String, SubString{String}}}, +} + +function unsafe_read!(dest::Ptr{UInt8}, src::DenseBytes, so::Integer, nbytes::UInt) + GC.@preserve src unsafe_copyto!(dest, pointer(src, so), nbytes) + nothing +end + function peek(from::GenericIOBuffer, T::Union{Type{Int16},Type{UInt16},Type{Int32},Type{UInt32},Type{Int64},Type{UInt64},Type{Int128},Type{UInt128},Type{Float16},Type{Float32},Type{Float64}}) from.readable || _throw_not_readable() avail = bytesavailable(from) diff --git a/base/strings/search.jl b/base/strings/search.jl index a481b3af775e0..5f658e24526ba 100644 --- a/base/strings/search.jl +++ b/base/strings/search.jl @@ -61,14 +61,6 @@ function findnext(pred::Fix2{<:Union{typeof(isequal),typeof(==)},<:AbstractChar} end end -# Note: Currently, CodeUnits <: DenseVector, which makes this union redundant w.r.t -# DenseArrayType{UInt8}, but this is a bug, and may be removed in future versions -# of Julia. See #54002 -const DenseBytes = Union{ - <:DenseArrayType{UInt8}, - CodeUnits{UInt8, <:Union{String, SubString{String}}}, -} - function findfirst(pred::Fix2{<:Union{typeof(isequal),typeof(==)},<:Union{UInt8, Int8}}, a::Union{DenseInt8, DenseUInt8}) findnext(pred, a, firstindex(a)) end diff --git a/test/iobuffer.jl b/test/iobuffer.jl index b5b34a2dbed8c..933662f7e41d1 100644 --- a/test/iobuffer.jl +++ b/test/iobuffer.jl @@ -389,3 +389,13 @@ end b = pushfirst!([0x02], 0x01) @test take!(IOBuffer(b)) == [0x01, 0x02] end + +@testset "#54636 reading from non-dense vectors" begin + data = 0x00:0xFF + io = IOBuffer(data) + @test read(io) == data + + data = @view(collect(0x00:0x0f)[begin:2:end]) + io = IOBuffer(data) + @test read(io) == data +end From b38fde1ad42c977878d4f481c962b108a3ae20ab Mon Sep 17 00:00:00 2001 From: Neven Sajko Date: Fri, 25 Oct 2024 09:26:36 +0200 Subject: [PATCH 309/537] support `isless` for zero-dimensional `AbstractArray`s (#55772) Fixes #55771 --- NEWS.md | 1 + base/abstractarray.jl | 9 +++++++++ test/arrayops.jl | 8 ++++++++ 3 files changed, 18 insertions(+) diff --git a/NEWS.md b/NEWS.md index cf04fbf577248..658dcc7aa320e 100644 --- a/NEWS.md +++ b/NEWS.md @@ -117,6 +117,7 @@ New library features * `RegexMatch` objects can now be used to construct `NamedTuple`s and `Dict`s ([#50988]) * `Lockable` is now exported ([#54595]) * New `ltruncate`, `rtruncate` and `ctruncate` functions for truncating strings to text width, accounting for char widths ([#55351]) +* `isless` (and thus `cmp`, sorting, etc.) is now supported for zero-dimensional `AbstractArray`s ([#55772]) Standard library changes ------------------------ diff --git a/base/abstractarray.jl b/base/abstractarray.jl index e877a87c2cdd1..cbbae8e852b2e 100644 --- a/base/abstractarray.jl +++ b/base/abstractarray.jl @@ -3044,6 +3044,15 @@ function cmp(A::AbstractVector, B::AbstractVector) return cmp(length(A), length(B)) end +""" + isless(A::AbstractArray{<:Any,0}, B::AbstractArray{<:Any,0}) + +Return `true` when the only element of `A` is less than the only element of `B`. +""" +function isless(A::AbstractArray{<:Any,0}, B::AbstractArray{<:Any,0}) + isless(only(A), only(B)) +end + """ isless(A::AbstractVector, B::AbstractVector) diff --git a/test/arrayops.jl b/test/arrayops.jl index ec8f54828b965..49d51176dcf71 100644 --- a/test/arrayops.jl +++ b/test/arrayops.jl @@ -1409,6 +1409,14 @@ end end @testset "lexicographic comparison" begin + @testset "zero-dimensional" begin + vals = (0, 0.0, 1, 1.0) + for l ∈ vals + for r ∈ vals + @test cmp(fill(l), fill(r)) == cmp(l, r) + end + end + end @test cmp([1.0], [1]) == 0 @test cmp([1], [1.0]) == 0 @test cmp([1, 1], [1, 1]) == 0 From bf8f8142ea9b237533f53109b564b72cd9056682 Mon Sep 17 00:00:00 2001 From: Shuhei Kadowaki <40514306+aviatesk@users.noreply.github.com> Date: Fri, 25 Oct 2024 21:10:27 +0900 Subject: [PATCH 310/537] inference: don't add backdge when `applicable` inferred to return `Bool` (#56316) Also just as a minor backedge reduction optimization, this commit avoids adding backedges when `applicable` is inferred to return `::Bool`. --- base/compiler/tfuncs.jl | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/base/compiler/tfuncs.jl b/base/compiler/tfuncs.jl index a74146dcff552..2f78348b79844 100644 --- a/base/compiler/tfuncs.jl +++ b/base/compiler/tfuncs.jl @@ -3008,14 +3008,16 @@ function abstract_applicable(interp::AbstractInterpreter, argtypes::Vector{Any}, else rt = Const(true) # has applicable matches end - for i in 1:napplicable - match = applicable[i]::MethodMatch - edge = specialize_method(match)::MethodInstance - add_backedge!(sv, edge) - end - # also need an edge to the method table in case something gets - # added that did not intersect with any existing method - add_uncovered_edges!(sv, matches, atype) + if rt !== Bool + for i in 1:napplicable + match = applicable[i]::MethodMatch + edge = specialize_method(match) + add_backedge!(sv, edge) + end + # also need an edge to the method table in case something gets + # added that did not intersect with any existing method + add_uncovered_edges!(sv, matches, atype) + end end return Future(CallMeta(rt, Union{}, EFFECTS_TOTAL, NoCallInfo())) end From b81e33fe522f8a8f060f885f83944f11192cd0db Mon Sep 17 00:00:00 2001 From: Adrian Hill Date: Fri, 25 Oct 2024 15:22:00 +0200 Subject: [PATCH 311/537] Mark `require_one_based_indexing` and `has_offset_axes` as public (#56196) The discussion here mentions `require_one_based_indexing` being part of the public API: https://github.com/JuliaLang/julia/pull/43263 Both functions are also documented (albeit in the dev docs): * `require_one_based_indexing`: https://docs.julialang.org/en/v1/devdocs/offset-arrays/#man-custom-indices * `has_offset_axes`: https://docs.julialang.org/en/v1/devdocs/offset-arrays/#For-objects-that-mimic-AbstractArray-but-are-not-subtypes Towards https://github.com/JuliaLang/julia/issues/51335. --------- Co-authored-by: Matt Bauman --- NEWS.md | 1 + base/public.jl | 4 ++++ doc/src/base/arrays.md | 6 ++++++ 3 files changed, 11 insertions(+) diff --git a/NEWS.md b/NEWS.md index 658dcc7aa320e..228d133bd8557 100644 --- a/NEWS.md +++ b/NEWS.md @@ -116,6 +116,7 @@ New library features the uniquing checking ([#53474]) * `RegexMatch` objects can now be used to construct `NamedTuple`s and `Dict`s ([#50988]) * `Lockable` is now exported ([#54595]) +* `Base.require_one_based_indexing` and `Base.has_offset_axes` are now public ([#56196]) * New `ltruncate`, `rtruncate` and `ctruncate` functions for truncating strings to text width, accounting for char widths ([#55351]) * `isless` (and thus `cmp`, sorting, etc.) is now supported for zero-dimensional `AbstractArray`s ([#55772]) diff --git a/base/public.jl b/base/public.jl index 1a23550485d84..8777a454c920a 100644 --- a/base/public.jl +++ b/base/public.jl @@ -28,6 +28,10 @@ public acquire, release, +# arrays + has_offset_axes, + require_one_based_indexing, + # collections IteratorEltype, IteratorSize, diff --git a/doc/src/base/arrays.md b/doc/src/base/arrays.md index 66fe5c78f1ee6..defe497daf00c 100644 --- a/doc/src/base/arrays.md +++ b/doc/src/base/arrays.md @@ -115,6 +115,12 @@ Base.checkindex Base.elsize ``` +While most code can be written in an index-agnostic manner (see, e.g., [`eachindex`](@ref)), it can sometimes be useful to explicitly check for offset axes: +```@docs +Base.require_one_based_indexing +Base.has_offset_axes +``` + ## Views (SubArrays and other view types) A “view” is a data structure that acts like an array (it is a subtype of `AbstractArray`), but the underlying data is actually From fb297af78ed661faf491fd496dda4d261db07384 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A1ll=20Haraldsson?= Date: Fri, 25 Oct 2024 14:06:00 +0000 Subject: [PATCH 312/537] Avoid some allocations in various `println` methods (#56308) --- base/coreio.jl | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/base/coreio.jl b/base/coreio.jl index 7fc608111d5f2..b5c543a25d5ad 100644 --- a/base/coreio.jl +++ b/base/coreio.jl @@ -1,8 +1,13 @@ # This file is a part of Julia. License is MIT: https://julialang.org/license +print(x) = print(stdout, x) +print(x1, x2) = print(stdout, x1, x2) +println(x) = print(stdout, x, "\n") +println(x1, x2) = print(stdout, x1, x2, "\n") + print(xs...) = print(stdout, xs...) -println(xs...) = println(stdout, xs...) -println(io::IO) = print(io, '\n') +println(xs...) = print(stdout, xs..., "\n") # fewer allocations than `println(stdout, xs...)` +println(io::IO) = print(io, "\n") function show end function repr end From 49e3b873fe443d94d87830181655b04ed857a9f7 Mon Sep 17 00:00:00 2001 From: Jishnu Bhattacharya Date: Fri, 25 Oct 2024 19:39:22 +0530 Subject: [PATCH 313/537] Add a developer documentation section to the `LinearAlgebra` docs (#56324) Functions that are meant for package developers may go here, instead of the main section that is primarily for users. --- stdlib/LinearAlgebra/docs/src/index.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/stdlib/LinearAlgebra/docs/src/index.md b/stdlib/LinearAlgebra/docs/src/index.md index e3e79b7034969..1e44bf5cb04d7 100644 --- a/stdlib/LinearAlgebra/docs/src/index.md +++ b/stdlib/LinearAlgebra/docs/src/index.md @@ -891,6 +891,12 @@ LinearAlgebra.LAPACK.trsyl! LinearAlgebra.LAPACK.hseqr! ``` +## Developer Documentation + +```@docs +LinearAlgebra.matprod_dest +``` + ```@meta DocTestSetup = nothing ``` From db3d816985cf307520e7e86ebedba2d674af1353 Mon Sep 17 00:00:00 2001 From: Jameson Nash Date: Fri, 25 Oct 2024 10:30:28 -0400 Subject: [PATCH 314/537] drop require lock when not needed during loading to allow parallel precompile loading (#56291) Fixes `_require_search_from_serialized` to first acquire all start_loading locks (using a deadlock-free batch-locking algorithm) before doing stalechecks and the rest, so that all the global computations happen behind the require_lock, then the rest can happen behind module-specific locks, then (as before) extensions can be loaded in parallel eventually after `require` returns. --- base/loading.jl | 270 ++++++++++++++++++++++++++++-------------------- 1 file changed, 157 insertions(+), 113 deletions(-) diff --git a/base/loading.jl b/base/loading.jl index 69bb332193519..6391e2511f8d5 100644 --- a/base/loading.jl +++ b/base/loading.jl @@ -1261,47 +1261,52 @@ function _include_from_serialized(pkg::PkgId, path::String, ocachepath::Union{No assert_havelock(require_lock) timing_imports = TIMING_IMPORTS[] > 0 try - if timing_imports - t_before = time_ns() - cumulative_compile_timing(true) - t_comp_before = cumulative_compile_time_ns() - end + if timing_imports + t_before = time_ns() + cumulative_compile_timing(true) + t_comp_before = cumulative_compile_time_ns() + end - for i in eachindex(depmods) - dep = depmods[i] - dep isa Module && continue - _, depkey, depbuild_id = dep::Tuple{String, PkgId, UInt128} - dep = something(maybe_loaded_precompile(depkey, depbuild_id)) - @assert PkgId(dep) == depkey && module_build_id(dep) === depbuild_id - depmods[i] = dep - end + for i in eachindex(depmods) + dep = depmods[i] + dep isa Module && continue + _, depkey, depbuild_id = dep::Tuple{String, PkgId, UInt128} + dep = something(maybe_loaded_precompile(depkey, depbuild_id)) + @assert PkgId(dep) == depkey && module_build_id(dep) === depbuild_id + depmods[i] = dep + end - if ocachepath !== nothing - @debug "Loading object cache file $ocachepath for $(repr("text/plain", pkg))" - sv = ccall(:jl_restore_package_image_from_file, Any, (Cstring, Any, Cint, Cstring, Cint), ocachepath, depmods, false, pkg.name, ignore_native) - else - @debug "Loading cache file $path for $(repr("text/plain", pkg))" - sv = ccall(:jl_restore_incremental, Any, (Cstring, Any, Cint, Cstring), path, depmods, false, pkg.name) - end - if isa(sv, Exception) - return sv - end + unlock(require_lock) # temporarily _unlock_ during these operations + sv = try + if ocachepath !== nothing + @debug "Loading object cache file $ocachepath for $(repr("text/plain", pkg))" + ccall(:jl_restore_package_image_from_file, Any, (Cstring, Any, Cint, Cstring, Cint), ocachepath, depmods, false, pkg.name, ignore_native) + else + @debug "Loading cache file $path for $(repr("text/plain", pkg))" + ccall(:jl_restore_incremental, Any, (Cstring, Any, Cint, Cstring), path, depmods, false, pkg.name) + end + finally + lock(require_lock) + end + if isa(sv, Exception) + return sv + end - restored = register_restored_modules(sv, pkg, path) + restored = register_restored_modules(sv, pkg, path) - for M in restored - M = M::Module - if parentmodule(M) === M && PkgId(M) == pkg - register && register_root_module(M) - if timing_imports - elapsed_time = time_ns() - t_before - comp_time, recomp_time = cumulative_compile_time_ns() .- t_comp_before - print_time_imports_report(M, elapsed_time, comp_time, recomp_time) + for M in restored + M = M::Module + if parentmodule(M) === M && PkgId(M) == pkg + register && register_root_module(M) + if timing_imports + elapsed_time = time_ns() - t_before + comp_time, recomp_time = cumulative_compile_time_ns() .- t_comp_before + print_time_imports_report(M, elapsed_time, comp_time, recomp_time) + end + return M end - return M end - end - return ErrorException("Required dependency $(repr("text/plain", pkg)) failed to load from a cache file.") + return ErrorException("Required dependency $(repr("text/plain", pkg)) failed to load from a cache file.") finally timing_imports && cumulative_compile_timing(false) @@ -2020,13 +2025,46 @@ end if staledeps === true continue end - try - staledeps, ocachefile, newbuild_id = staledeps::Tuple{Vector{Any}, Union{Nothing, String}, UInt128} - # finish checking staledeps module graph - for i in eachindex(staledeps) + staledeps, ocachefile, newbuild_id = staledeps::Tuple{Vector{Any}, Union{Nothing, String}, UInt128} + startedloading = length(staledeps) + 1 + try # any exit from here (goto, break, continue, return) will end_loading + # finish checking staledeps module graph, while acquiring all start_loading locks + # so that concurrent require calls won't make any different decisions that might conflict with the decisions here + # note that start_loading will drop the loading lock if necessary + let i = 0 + # start_loading here has a deadlock problem if we try to load `A,B,C` and `B,A,D` at the same time: + # it will claim A,B have a cycle, but really they just have an ambiguous order and need to be batch-acquired rather than singly + # solve that by making sure we can start_loading everything before allocating each of those and doing all the stale checks + while i < length(staledeps) + i += 1 + dep = staledeps[i] + dep isa Module && continue + _, modkey, modbuild_id = dep::Tuple{String, PkgId, UInt128} + dep = canstart_loading(modkey, modbuild_id, stalecheck) + if dep isa Module + if PkgId(dep) == modkey && module_build_id(dep) === modbuild_id + staledeps[i] = dep + continue + else + @debug "Rejecting cache file $path_to_try because module $modkey got loaded at a different version than expected." + @goto check_next_path + end + continue + elseif dep === nothing + continue + end + wait(dep) # releases require_lock, so requires restarting this loop + i = 0 + end + end + for i in reverse(eachindex(staledeps)) dep = staledeps[i] dep isa Module && continue modpath, modkey, modbuild_id = dep::Tuple{String, PkgId, UInt128} + # inline a call to start_loading here + @assert canstart_loading(modkey, modbuild_id, stalecheck) === nothing + package_locks[modkey] = current_task() => Threads.Condition(require_lock) + startedloading = i modpaths = find_all_in_cache_path(modkey, DEPOT_PATH) for modpath_to_try in modpaths modstaledeps = stale_cachefile(modkey, modbuild_id, modpath, modpath_to_try; stalecheck) @@ -2054,37 +2092,22 @@ end end end # finish loading module graph into staledeps - # TODO: call all start_loading calls (in reverse order) before calling any _include_from_serialized, since start_loading will drop the loading lock + # n.b. this runs __init__ methods too early, so it is very unwise to have those, as they may see inconsistent loading state, causing them to fail unpredictably here for i in eachindex(staledeps) dep = staledeps[i] dep isa Module && continue modpath, modkey, modbuild_id, modcachepath, modstaledeps, modocachepath = dep::Tuple{String, PkgId, UInt128, String, Vector{Any}, Union{Nothing, String}} - dep = start_loading(modkey, modbuild_id, stalecheck) - while true - if dep isa Module - if PkgId(dep) == modkey && module_build_id(dep) === modbuild_id - break - else - @debug "Rejecting cache file $path_to_try because module $modkey got loaded at a different version than expected." - @goto check_next_path - end - end - if dep === nothing - try - set_pkgorigin_version_path(modkey, modpath) - dep = _include_from_serialized(modkey, modcachepath, modocachepath, modstaledeps; register = stalecheck) - finally - end_loading(modkey, dep) - end - if !isa(dep, Module) - @debug "Rejecting cache file $path_to_try because required dependency $modkey failed to load from cache file for $modcachepath." exception=dep - @goto check_next_path - else - push!(newdeps, modkey) - end - end + set_pkgorigin_version_path(modkey, modpath) + dep = _include_from_serialized(modkey, modcachepath, modocachepath, modstaledeps; register = stalecheck) + if !isa(dep, Module) + @debug "Rejecting cache file $path_to_try because required dependency $modkey failed to load from cache file for $modcachepath." exception=dep + @goto check_next_path + else + startedloading = i + 1 + end_loading(modkey, dep) + staledeps[i] = dep + push!(newdeps, modkey) end - staledeps[i] = dep end restored = maybe_loaded_precompile(pkg, newbuild_id) if !isa(restored, Module) @@ -2094,11 +2117,21 @@ end @debug "Deserialization checks failed while attempting to load cache from $path_to_try" exception=restored @label check_next_path finally + # cancel all start_loading locks that were taken but not fulfilled before failing + for i in startedloading:length(staledeps) + dep = staledeps[i] + dep isa Module && continue + if dep isa Tuple{String, PkgId, UInt128} + _, modkey, _ = dep + else + _, modkey, _ = dep::Tuple{String, PkgId, UInt128, String, Vector{Any}, Union{Nothing, String}} + end + end_loading(modkey, nothing) + end for modkey in newdeps insert_extension_triggers(modkey) stalecheck && run_package_callbacks(modkey) end - empty!(newdeps) end end end @@ -2111,66 +2144,76 @@ const package_locks = Dict{PkgId,Pair{Task,Threads.Condition}}() debug_loading_deadlocks::Bool = true # Enable a slightly more expensive, but more complete algorithm that can handle simultaneous tasks. # This only triggers if you have multiple tasks trying to load the same package at the same time, # so it is unlikely to make a performance difference normally. -function start_loading(modkey::PkgId, build_id::UInt128, stalecheck::Bool) - # handle recursive and concurrent calls to require + +function canstart_loading(modkey::PkgId, build_id::UInt128, stalecheck::Bool) assert_havelock(require_lock) require_lock.reentrancy_cnt == 1 || throw(ConcurrencyViolationError("recursive call to start_loading")) - while true - loaded = stalecheck ? maybe_root_module(modkey) : nothing + loaded = stalecheck ? maybe_root_module(modkey) : nothing + loaded isa Module && return loaded + if build_id != UInt128(0) + loaded = maybe_loaded_precompile(modkey, build_id) loaded isa Module && return loaded - if build_id != UInt128(0) - loaded = maybe_loaded_precompile(modkey, build_id) - loaded isa Module && return loaded + end + loading = get(package_locks, modkey, nothing) + loading === nothing && return nothing + # load already in progress for this module on the task + task, cond = loading + deps = String[modkey.name] + pkgid = modkey + assert_havelock(cond.lock) + if debug_loading_deadlocks && current_task() !== task + waiters = Dict{Task,Pair{Task,PkgId}}() # invert to track waiting tasks => loading tasks + for each in package_locks + cond2 = each[2][2] + assert_havelock(cond2.lock) + for waiting in cond2.waitq + push!(waiters, waiting => (each[2][1] => each[1])) + end end - loading = get(package_locks, modkey, nothing) - if loading === nothing - package_locks[modkey] = current_task() => Threads.Condition(require_lock) - return nothing + while true + running = get(waiters, task, nothing) + running === nothing && break + task, pkgid = running + push!(deps, pkgid.name) + task === current_task() && break end - # load already in progress for this module on the task - task, cond = loading - deps = String[modkey.name] - pkgid = modkey - assert_havelock(cond.lock) - if debug_loading_deadlocks && current_task() !== task - waiters = Dict{Task,Pair{Task,PkgId}}() # invert to track waiting tasks => loading tasks - for each in package_locks - cond2 = each[2][2] - assert_havelock(cond2.lock) - for waiting in cond2.waitq - push!(waiters, waiting => (each[2][1] => each[1])) - end - end - while true - running = get(waiters, task, nothing) - running === nothing && break - task, pkgid = running - push!(deps, pkgid.name) - task === current_task() && break + end + if current_task() === task + others = String[modkey.name] # repeat this to emphasize the cycle here + for each in package_locks # list the rest of the packages being loaded too + if each[2][1] === task + other = each[1].name + other == modkey.name || other == pkgid.name || push!(others, other) end end - if current_task() === task - others = String[modkey.name] # repeat this to emphasize the cycle here - for each in package_locks # list the rest of the packages being loaded too - if each[2][1] === task - other = each[1].name - other == modkey.name || other == pkgid.name || push!(others, other) - end - end - msg = sprint(deps, others) do io, deps, others - print(io, "deadlock detected in loading ") - join(io, deps, " -> ") - print(io, " -> ") - join(io, others, " && ") - end - throw(ConcurrencyViolationError(msg)) + msg = sprint(deps, others) do io, deps, others + print(io, "deadlock detected in loading ") + join(io, deps, " -> ") + print(io, " -> ") + join(io, others, " && ") + end + throw(ConcurrencyViolationError(msg)) + end + return cond +end + +function start_loading(modkey::PkgId, build_id::UInt128, stalecheck::Bool) + # handle recursive and concurrent calls to require + while true + loaded = canstart_loading(modkey, build_id, stalecheck) + if loaded === nothing + package_locks[modkey] = current_task() => Threads.Condition(require_lock) + return nothing + elseif loaded isa Module + return loaded end - loaded = wait(cond) + loaded = wait(loaded) loaded isa Module && return loaded end end function end_loading(modkey::PkgId, @nospecialize loaded) + assert_havelock(require_lock) loading = pop!(package_locks, modkey) notify(loading[2], loaded, all=true) nothing @@ -2650,6 +2693,7 @@ function _require(pkg::PkgId, env=nothing) end # load a serialized file directly, including dependencies (without checking staleness except for immediate conflicts) +# this does not call start_loading / end_loading, so can lead to some odd behaviors function _require_from_serialized(uuidkey::PkgId, path::String, ocachepath::Union{String, Nothing}, sourcepath::String) @lock require_lock begin set_pkgorigin_version_path(uuidkey, sourcepath) From 2b3a0f0ab41350e420934d288b68df7b3872b782 Mon Sep 17 00:00:00 2001 From: Nathan Zimmerberg <39104088+nhz2@users.noreply.github.com> Date: Fri, 25 Oct 2024 11:49:12 -0400 Subject: [PATCH 315/537] Make `String(::Memory)` copy (#54457) A more targeted fix of #54369 than #54372 Preserves the performance improvements added in #53962 by creating a new internal `_unsafe_takestring!(v::Memory{UInt8})` function that does what `String(::Memory{UInt8})` used to do. --- base/gmp.jl | 4 ++-- base/intfuncs.jl | 12 ++++++------ base/strings/string.jl | 13 +++++++------ base/strings/util.jl | 2 +- base/uuid.jl | 2 +- stdlib/FileWatching/src/pidfile.jl | 2 +- test/strings/basic.jl | 11 +++++++++++ 7 files changed, 29 insertions(+), 17 deletions(-) diff --git a/base/gmp.jl b/base/gmp.jl index 1eaa20d6baecf..df0d9fee49348 100644 --- a/base/gmp.jl +++ b/base/gmp.jl @@ -11,7 +11,7 @@ import .Base: *, +, -, /, <, <<, >>, >>>, <=, ==, >, >=, ^, (~), (&), (|), xor, bin, oct, dec, hex, isequal, invmod, _prevpow2, _nextpow2, ndigits0zpb, widen, signed, unsafe_trunc, trunc, iszero, isone, big, flipsign, signbit, sign, hastypemax, isodd, iseven, digits!, hash, hash_integer, top_set_bit, - clamp + clamp, unsafe_takestring if Clong == Int32 const ClongMax = Union{Int8, Int16, Int32} @@ -761,7 +761,7 @@ function string(n::BigInt; base::Integer = 10, pad::Integer = 1) sv[i] = '0' % UInt8 end isneg(n) && (sv[1] = '-' % UInt8) - String(sv) + unsafe_takestring(sv) end function digits!(a::AbstractVector{T}, n::BigInt; base::Integer = 10) where {T<:Integer} diff --git a/base/intfuncs.jl b/base/intfuncs.jl index ec450aff2dff2..e8d4b65305be7 100644 --- a/base/intfuncs.jl +++ b/base/intfuncs.jl @@ -792,7 +792,7 @@ function bin(x::Unsigned, pad::Int, neg::Bool) i -= 1 end neg && (@inbounds a[1] = 0x2d) # UInt8('-') - String(a) + unsafe_takestring(a) end function oct(x::Unsigned, pad::Int, neg::Bool) @@ -806,7 +806,7 @@ function oct(x::Unsigned, pad::Int, neg::Bool) i -= 1 end neg && (@inbounds a[1] = 0x2d) # UInt8('-') - String(a) + unsafe_takestring(a) end # 2-digit decimal characters ("00":"99") @@ -876,7 +876,7 @@ function dec(x::Unsigned, pad::Int, neg::Bool) a = StringMemory(n) append_c_digits_fast(n, x, a, 1) neg && (@inbounds a[1] = 0x2d) # UInt8('-') - String(a) + unsafe_takestring(a) end function hex(x::Unsigned, pad::Int, neg::Bool) @@ -897,7 +897,7 @@ function hex(x::Unsigned, pad::Int, neg::Bool) @inbounds a[i] = d + ifelse(d > 0x9, 0x57, 0x30) end neg && (@inbounds a[1] = 0x2d) # UInt8('-') - String(a) + unsafe_takestring(a) end const base36digits = UInt8['0':'9';'a':'z'] @@ -922,7 +922,7 @@ function _base(base::Integer, x::Integer, pad::Int, neg::Bool) i -= 1 end neg && (@inbounds a[1] = 0x2d) # UInt8('-') - String(a) + unsafe_takestring(a) end split_sign(n::Integer) = unsigned(abs(n)), n < 0 @@ -998,7 +998,7 @@ function bitstring(x::T) where {T} x = lshr_int(x, 4) i -= 4 end - return String(str) + return unsafe_takestring(str) end """ diff --git a/base/strings/string.jl b/base/strings/string.jl index a46ee60e4f023..9f3c3d00e4b81 100644 --- a/base/strings/string.jl +++ b/base/strings/string.jl @@ -61,12 +61,7 @@ by [`take!`](@ref) on a writable [`IOBuffer`](@ref) and by calls to In other cases, `Vector{UInt8}` data may be copied, but `v` is truncated anyway to guarantee consistent behavior. """ -String(v::AbstractVector{UInt8}) = String(copyto!(StringMemory(length(v)), v)) -function String(v::Memory{UInt8}) - len = length(v) - len == 0 && return "" - return ccall(:jl_genericmemory_to_string, Ref{String}, (Any, Int), v, len) -end +String(v::AbstractVector{UInt8}) = unsafe_takestring(copyto!(StringMemory(length(v)), v)) function String(v::Vector{UInt8}) #return ccall(:jl_array_to_string, Ref{String}, (Any,), v) len = length(v) @@ -83,6 +78,12 @@ function String(v::Vector{UInt8}) return str end +"Create a string re-using the memory, if possible. +Mutating or reading the memory after calling this function is undefined behaviour." +function unsafe_takestring(m::Memory{UInt8}) + isempty(m) ? "" : ccall(:jl_genericmemory_to_string, Ref{String}, (Any, Int), m, length(m)) +end + """ unsafe_string(p::Ptr{UInt8}, [length::Integer]) diff --git a/base/strings/util.jl b/base/strings/util.jl index 04d451a4fd288..fcccb9babadfd 100644 --- a/base/strings/util.jl +++ b/base/strings/util.jl @@ -1217,7 +1217,7 @@ function bytes2hex(itr) b[2i - 1] = hex_chars[1 + x >> 4] b[2i ] = hex_chars[1 + x & 0xf] end - return String(b) + return unsafe_takestring(b) end function bytes2hex(io::IO, itr) diff --git a/base/uuid.jl b/base/uuid.jl index 56f3a6aa417e7..4b9bae863d926 100644 --- a/base/uuid.jl +++ b/base/uuid.jl @@ -98,7 +98,7 @@ let groupings = [36:-1:25; 23:-1:20; 18:-1:15; 13:-1:10; 8:-1:1] u >>= 4 end @inbounds a[24] = a[19] = a[14] = a[9] = '-' - return String(a) + return unsafe_takestring(a) end end diff --git a/stdlib/FileWatching/src/pidfile.jl b/stdlib/FileWatching/src/pidfile.jl index 95b8f20face29..6862aaa9f8453 100644 --- a/stdlib/FileWatching/src/pidfile.jl +++ b/stdlib/FileWatching/src/pidfile.jl @@ -304,7 +304,7 @@ function open_exclusive(path::String; end function _rand_filename(len::Int=4) # modified from Base.Libc - slug = Base.StringMemory(len) + slug = Base.StringVector(len) chars = b"0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" for i = 1:len slug[i] = chars[(Libc.rand() % length(chars)) + 1] diff --git a/test/strings/basic.jl b/test/strings/basic.jl index de04055d047af..ee92995bd2e11 100644 --- a/test/strings/basic.jl +++ b/test/strings/basic.jl @@ -1093,6 +1093,17 @@ let v = [0x40,0x41,0x42] @test String(view(v, 2:3)) == "AB" end +# issue #54369 +let v = Base.StringMemory(3) + v .= [0x41,0x42,0x43] + s = String(v) + @test s == "ABC" + @test v == [0x41,0x42,0x43] + v[1] = 0x43 + @test s == "ABC" + @test v == [0x43,0x42,0x43] +end + # make sure length for identical String and AbstractString return the same value, PR #25533 let rng = MersenneTwister(1), strs = ["∀εa∀aε"*String(rand(rng, UInt8, 100))*"∀εa∀aε", String(rand(rng, UInt8, 200))] From f1a90e076186e77792c77e2e2f0905e14b19f321 Mon Sep 17 00:00:00 2001 From: DilumAluthgeBot <43731525+DilumAluthgeBot@users.noreply.github.com> Date: Fri, 25 Oct 2024 17:07:33 -0400 Subject: [PATCH 316/537] =?UTF-8?q?=F0=9F=A4=96=20[master]=20Bump=20the=20?= =?UTF-8?q?Pkg=20stdlib=20from=20799dc2d54=20to=20116ba910c=20(#56336)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Stdlib: Pkg URL: https://github.com/JuliaLang/Pkg.jl.git Stdlib branch: master Julia branch: master Old commit: 799dc2d54 New commit: 116ba910c Julia version: 1.12.0-DEV Pkg version: 1.12.0 Bump invoked by: @IanButterworth Powered by: [BumpStdlibs.jl](https://github.com/JuliaLang/BumpStdlibs.jl) Diff: https://github.com/JuliaLang/Pkg.jl/compare/799dc2d54c4e809b9779de8c604564a5b3befaa0...116ba910c74ab565d348aa8a50d6dd10148f11ab ``` $ git log --oneline 799dc2d54..116ba910c 116ba910c fix Base.unreference_module call (#4057) 6ed1d2f40 do not show right hand progress without colors (#4047) ``` Co-authored-by: Dilum Aluthge --- .../Pkg-116ba910c74ab565d348aa8a50d6dd10148f11ab.tar.gz/md5 | 1 + .../Pkg-116ba910c74ab565d348aa8a50d6dd10148f11ab.tar.gz/sha512 | 1 + .../Pkg-799dc2d54c4e809b9779de8c604564a5b3befaa0.tar.gz/md5 | 1 - .../Pkg-799dc2d54c4e809b9779de8c604564a5b3befaa0.tar.gz/sha512 | 1 - stdlib/Pkg.version | 2 +- 5 files changed, 3 insertions(+), 3 deletions(-) create mode 100644 deps/checksums/Pkg-116ba910c74ab565d348aa8a50d6dd10148f11ab.tar.gz/md5 create mode 100644 deps/checksums/Pkg-116ba910c74ab565d348aa8a50d6dd10148f11ab.tar.gz/sha512 delete mode 100644 deps/checksums/Pkg-799dc2d54c4e809b9779de8c604564a5b3befaa0.tar.gz/md5 delete mode 100644 deps/checksums/Pkg-799dc2d54c4e809b9779de8c604564a5b3befaa0.tar.gz/sha512 diff --git a/deps/checksums/Pkg-116ba910c74ab565d348aa8a50d6dd10148f11ab.tar.gz/md5 b/deps/checksums/Pkg-116ba910c74ab565d348aa8a50d6dd10148f11ab.tar.gz/md5 new file mode 100644 index 0000000000000..61dca3054d58f --- /dev/null +++ b/deps/checksums/Pkg-116ba910c74ab565d348aa8a50d6dd10148f11ab.tar.gz/md5 @@ -0,0 +1 @@ +9905cd10c29974f3b0bb47f2e40951b0 diff --git a/deps/checksums/Pkg-116ba910c74ab565d348aa8a50d6dd10148f11ab.tar.gz/sha512 b/deps/checksums/Pkg-116ba910c74ab565d348aa8a50d6dd10148f11ab.tar.gz/sha512 new file mode 100644 index 0000000000000..3757366fd23cf --- /dev/null +++ b/deps/checksums/Pkg-116ba910c74ab565d348aa8a50d6dd10148f11ab.tar.gz/sha512 @@ -0,0 +1 @@ +b99db15e6646b1eaa35df705ca39c7f3ddb05073293c779963231c22d17f4ae449739f4e8535a41ae9ae5fb1661f76c915fb2c7853a86fc695335b3e1ce3c06d diff --git a/deps/checksums/Pkg-799dc2d54c4e809b9779de8c604564a5b3befaa0.tar.gz/md5 b/deps/checksums/Pkg-799dc2d54c4e809b9779de8c604564a5b3befaa0.tar.gz/md5 deleted file mode 100644 index 7c0bfbf62bd6e..0000000000000 --- a/deps/checksums/Pkg-799dc2d54c4e809b9779de8c604564a5b3befaa0.tar.gz/md5 +++ /dev/null @@ -1 +0,0 @@ -6fce8506a1701acdcbc4888250eeb86a diff --git a/deps/checksums/Pkg-799dc2d54c4e809b9779de8c604564a5b3befaa0.tar.gz/sha512 b/deps/checksums/Pkg-799dc2d54c4e809b9779de8c604564a5b3befaa0.tar.gz/sha512 deleted file mode 100644 index 06e3ea9c8dfa7..0000000000000 --- a/deps/checksums/Pkg-799dc2d54c4e809b9779de8c604564a5b3befaa0.tar.gz/sha512 +++ /dev/null @@ -1 +0,0 @@ -e251745da221a82f3ec5e21a76c29df0b695dc4028ee2c719373c08637050318db7b543c9d40074314fc3495738d39fd8af5a7954e8b72695df44e25e395f883 diff --git a/stdlib/Pkg.version b/stdlib/Pkg.version index c29c83fce4046..24c73834eca22 100644 --- a/stdlib/Pkg.version +++ b/stdlib/Pkg.version @@ -1,4 +1,4 @@ PKG_BRANCH = master -PKG_SHA1 = 799dc2d54c4e809b9779de8c604564a5b3befaa0 +PKG_SHA1 = 116ba910c74ab565d348aa8a50d6dd10148f11ab PKG_GIT_URL := https://github.com/JuliaLang/Pkg.jl.git PKG_TAR_URL = https://api.github.com/repos/JuliaLang/Pkg.jl/tarball/$1 From f6a38e00c009292e2b2730758bb99a154809c413 Mon Sep 17 00:00:00 2001 From: Diogo Netto <61364108+d-netto@users.noreply.github.com> Date: Fri, 25 Oct 2024 19:51:27 -0300 Subject: [PATCH 317/537] Wall-time/all tasks profiler (#55889) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit One limitation of sampling CPU/thread profiles, as is currently done in Julia, is that they primarily capture samples from CPU-intensive tasks. If many tasks are performing IO or contending for concurrency primitives like semaphores, these tasks won’t appear in the profile, as they aren't scheduled on OS threads sampled by the profiler. A wall-time profiler, like the one implemented in this PR, samples tasks regardless of OS thread scheduling. This enables profiling of IO-heavy tasks and detecting areas of heavy contention in the system. Co-developed with @nickrobinson251. --- NEWS.md | 2 + doc/src/manual/img/cpu-profile.png | Bin 0 -> 147343 bytes doc/src/manual/img/task-sampling-failure.png | Bin 0 -> 359826 bytes .../wall-time-profiler-channel-example.png | Bin 0 -> 191682 bytes ...ll-time-profiler-compute-bound-example.png | Bin 0 -> 435521 bytes doc/src/manual/profile.md | 214 ++++++++++++++++++ src/gc-stacks.c | 33 +++ src/gc-stock.c | 24 +- src/init.c | 4 + src/julia_internal.h | 29 +++ src/julia_threads.h | 1 + src/mtarraylist.c | 2 +- src/signal-handling.c | 135 +++++++++-- src/signals-mach.c | 178 ++++++++------- src/signals-unix.c | 170 ++++++++------ src/signals-win.c | 39 ++-- src/stackwalk.c | 58 +++-- stdlib/Profile/src/Profile.jl | 77 +++++-- stdlib/Profile/test/runtests.jl | 61 ++++- 19 files changed, 796 insertions(+), 231 deletions(-) create mode 100644 doc/src/manual/img/cpu-profile.png create mode 100644 doc/src/manual/img/task-sampling-failure.png create mode 100644 doc/src/manual/img/wall-time-profiler-channel-example.png create mode 100644 doc/src/manual/img/wall-time-profiler-compute-bound-example.png diff --git a/NEWS.md b/NEWS.md index 228d133bd8557..079625b1610aa 100644 --- a/NEWS.md +++ b/NEWS.md @@ -213,4 +213,6 @@ External dependencies Tooling Improvements -------------------- +- A wall-time profiler is now available for users who need a sampling profiler that captures tasks regardless of their scheduling or running state. This type of profiler enables profiling of I/O-heavy tasks and helps detect areas of heavy contention in the system ([#55889]). + diff --git a/doc/src/manual/img/cpu-profile.png b/doc/src/manual/img/cpu-profile.png new file mode 100644 index 0000000000000000000000000000000000000000..ec48b41f6e78b9975c1bf1fc670d8221514f249f GIT binary patch literal 147343 zcmb@t1yo$i5-tpcBm{yC8iIQW79cnb1Sbie0Kwhe-GU4b0fIBZ-Q6L0aDohO!QEw$ zw>kHmdvfl*|G(B->&=>Bn%=v+cXw4+fAv*OkerMJI_h&&1Ox|Ee^9I1cawTCL$toQX(Sca<*26CguhR2$Dgu%8ykPd+}4%zC;QcBO+(|%_Aey zBWC&yBT2IOhD9Qy&~*8I)0H5=Q*VHHqif*F?|8#Nd?DKVArBuKXfVQJx#+)bHy<@! zHrE|5cnw~!_t&wX&m#)0P)kt%5aU4X!C4Dg5q_|#r>L-*Y3cm{X&CXf(5f{JrQ!Sc z)ZPZAH*PLY56H{U@-yCVLvQY?j1MocnGqzPc5L!L;lf=0jIj7oJ&KzQAw9GvK9S{% zj*3tCvqvb0l!SvH7GDy|C@(hICi!vWcQ7I3=Gt&CzegCGp^#-+G!#ZZ1w&H&`F*eu z(0MyNU0kAFuyWp*qgjYz^_>uI3U27%O0#=NMz{A8e-Ezf33(eSMEPU&{hi>?!Tp2Z{5R!Cu*^iR-?T>0h=eMXXX*r@x{W~?blQ>; z1t2Ld$=^^QVdNN~HF2?*mnTo(pb+WS3J;t3x;r9uTw$>+zw=#*tPo89TqqLvVHt0S%$MW~l&pb`?~F=g z-@;U$(}gTxFLO7)dM%e0fu|1p zeJO0X{0&2kjnmeencq{cRCiB>ODq0a$=D_N56n=-&`$dF*=Jb-mV-m+v4?9|4N|Df zv3iG@?zQB2SIz-11Zm%Z=rrR8oCdPOQpP&XezaVzMZU z2&_HG?u)tlk0hx3-24unW1_Y}u?~Iojn%(3#$lq8-#I`&s*}74Xsi(eQ$FVR=f)KE zR(EEJjljlmd50uRPIVFJgi)ye%!;|N{MKs-vwVS&R-ZC9)P9&sLvQ%Yv-eLa13)4W z$6AWLK=@+R!E=NHhQZdRN9%`ksRuE`Z=6MIWj3g1Q>kBI-{G!48-8*s5>qF6$4XGk zF=pTWz(y+7K?k?au^fmJTP^RqEDfVsSovrZntG%F)$~sl8P@{{^*r0!qnM% z)YjS7p2*8`^}Bd7vBFJ}OGEV2>mBH+$eBxN*u@mKnjcB)piWLH8ce>T^e+#NU# zenk`{$wx>Q?2ipb!zE^Jz*WH5NLy5NbS1#*pu} z`qOOR1Z~n6kFvCRR35x+6)%0bgV50CIgAkQtGLKmi#*-tU`5gh&*y@oO_CZrFq7Owl9p05v**1&SRuGj^kw9LBzid}PiV3*GdW3* z#V?A{&yC{Xv}gGNTi?2JIe4)RzP*2qBrQbuEnx$<5=+CIS*&lw)Sf^!B2}0rZE{5B zCCiwH;A-|gUh(;`$JJVf()r7=^U=w*0 zH814kjo8P|51k)rL}@;X(p|Yakieiaw z=z-F!;t~b3(D;0jPLQC_8IvEB+moNq&CgFPaLwz=NByv#|FvN6qd>`(+-XK)bz;-= zcNUF_A1v~w9jl^EYsTm0@;;cEB5=ee%4)nVN=wZKfAf^f$>Ynb7?cV?sP!TR2i6HBmXCZ({q}$V|zUbfPhDszmIk?MU#%kDsdPRf^lb zN;RBPGpc+lNjXhIZXstTyWOut4d3XE)K*?r7FDuWGLxi-hheDEspWhys*y>OffiR6 zCs}^kdp-AdPOm=2H9ob9RE zd_!yUPG5)%g|mlMMQ2#sdI5{8k}H%W*|fIpAXI1HI%NsmUf2FEV4uvh8-=5sE0{e| zr?NA5Ja+-s!9Av;-#Bm^wsIro?Zi34U02r@yPReg9s5$& zz+E0`ZWVB4Oj*n_i4=*5oGhb6qu6!n01T>tGC!O7kVU)=j_&Q;NeL^=efRyFd9O?E zt6vv8C!3@e?1_S|+Z)}@Yt6(91I;O(_ARelq&-t^4R49>Oz$KR@ev!nEf9y|9sAAK znAiG*)`e^q(^}6NttJ>JddV4Iyw0W0Wm4heX04^Xcmr{fG?cWE)XH_+2px{r@hgRt z;^}Zrb(u3a1k4fa5rP>SX@r?5!ReIC@|hl)+~p=ud1>AI^I6Z+3)B7N(MQM=Nt#|> zd4@{&$HY^Wb-?0bKVUgF%r@*RgPlK@={omAz2dOqOBt6e(=F0pm+?qZnYb%RkHUX_ z`xV=pq9+la!f~b$tP@P0#Tpxv7*o$w<)wa5%dt}5)eyl#>ZRv9;Aeuv{Qh-vJl`RV zI!r2A>hUOwNv{dt&t~g%p7G>cdq>xgu4HkY8hsvZXp2Q_rw1OF43|S}?(9m2_C{BR z#Xoj>kA`0MZQ+$7ZFp()DNnD&zZn)VH?=< z<^$ta^0I)zKn&Q%&p8P>>V4$vwe422OQpP`6N1flq!!;FzL<*9*`)E=9MarH4)r%D zoj;*?0l2rE51?%+Bm6(j*BTQrjaZE=Qp5UXpmY4)x`Vzv8g?SD8SX_$6xBMSjY*Wn*=fg(rR3 zdtX9Y>aSIAw_8U;vn=}fzgnoss57g3>iORVlQ2{%;TO9%n)c^kjh~Fu6f6`cRIUjp zs7-ZM#Z-+}rL0D+hHW!WGHIP>Q5Gz_8fdSkzRbw){J1}PJfSs(Rhnj*ao9B)2IYLh zIX6vQU)MZcUvG|P^RDe{j#_keDm?5^ZGf}gB5S7ElE89*7F3gw)m4#gm@_hHP-R?H zS#30Vp|RWW*^z64XToAWqbA3m1;#K3ch{P3CW2F)agk{g=kggllx|_|%1H?*2sSmB zP!yz9ux6F5SYIry(L%FDB$C6;1x+0*kK*^r47%yN35dzK<(<>-GB@=1^ZFGdlIS=V z1O(1bs=mU$Rk*m<&DIx`E4uA85${fm_KE^Mo$hYrk|fk_ z42Jr`b~SgUNiz7twGLZsw@in}D<)x+%o-8(^EOn6T}4SNR*v(_b6d?RZt(FGUq0ym zV3S0xvaQy!=hTCY$KzhGo3fmr8ON~pZ-GS-%VcvN{T|oLLBD$AiXY16$@ZltfD1Mq zcfr#*qd#xmXOD{s(g~n$A$Ma}6C#xxcz7U7=c8sgv!6rh24a`JZH67tBp&TY(n#(0`#ng>dVJUn?Vg#vABC zSUhz^$fZXZUc3xtld-vtCJ?nTjHNn&mM(|BU?D{qYr8v#X1nXPP%SXg;@cvxPsv9Pf*0e3Ljxmeok zI5Sz=QT?lvzxok1u+y_Ov9>p{vLwInS4Y>%!Crup@_wLyzy9@{2F@mbk7Q~0=d^$c zvfN)`VP$^B^6$QZru_G{ymBVa2IikcO)P*l1I7Tea&mC<|Iy&TF8w{^Z%tMHZpy~P z!}fdA-!A=U(~ourwjx#*z@YZvzXkSZhFb|zn|SaMiKAtv-{&29H7ACJ9s@E zfAHJm```le=D_)Vl>aP~iwPk)4)~dZ;(6?{1ulO@pPkKE&2*D_nO7JNyk)dHpL42t z$6QCvD*gCBMNCcv^j;A)V)Ofm_2NQEXZ4)ArX~L=>A%-Os18^=P3H1=1uHW9Nf0``^a@4~_Bpa=&1i+;Pd@ z=iQH``QSSh72@g<(UIN)GNSO`djrb;SirqlZ+}qzr8dCA$`RZ@DuK9^1@3=z{oB8} zPwwT3yy~jd{oNm}NV!Cl(`LED*Nqm@tt~f3{-5#$3>%^wf=JTugF7VNPsR0O%=!Ok zD%kf^@p}~)lRocpuEBSEu@bUXjJzYZq+s~ZRz6~YzD|DlefQ+D+^aAPRi5YHQuy~) z{;TjI4tR=jnp)&kuaz*a>BY9sc4v)J(X`c@*-5H_7$oNNjV{&4gM1A)$#nGpF)JZt zU0{R=#F66P#4eOV1I#srgqK78pKI`Mg$4@WI)H{Otiv>EwRS@SlbY^_orM}2CRwrO z!{6RaWJ@O%y2Sj~F8hBWsu*>e1I;IC?pZj!?1R9&pmFr8>t{3*s++h#)R{hVTsOcij`r03NpUD7KZUkau}%q zuN6_xaHbT_jA4?6`HV#kNAp6@--~^(T`IuVBzX7!@;{B&Zz{OO;PV`(6l(i}{_JYI z#AuaUyA@%^b-PI8Y8~oj8K>o0hf{3C=d(Hs4D7Z2Ge`D~?oQ_?uQUh2qaG?w<6&}< z4$|lECR)zt-Imhc&X9SWL^Ge|_R|Z-%~`jj zpDljIT>J5b%VNRfyb9P=ELLEH-?!|2*(Xwn2YH93>EsJK5oiDH_3=vr91XC*>#@W)Mdl4ZFx=TwJf^Q9pX zn?3}@(THU>TU(~#aXQY7tyz2>QgJK1MSHc->hl1nA@W_TOy;TQ&DB&6)HBi9viUQF zWpT2?XsGZLjrLHm_Qr{}m@gHI;P)>`)kio&1$~J12 zb*nqIE8!JqyEEO0nf@j^p#JMCq^h^AGJa>>q;7>96hez0 zy)KbgA6kW=P&E(Mc)1co@2P_|Nz$_p>wFv&p(EdXtLL@Y=JSK$QmgWenwU?k1`Il^G zlP!#D5^m{+A+5_aa%RXoO>XDaX(EA230(^Bm<&EYLN5>FoPJRj>P~rNI{_`(qm@pC z0h)$0Gtfqj2tQbA-|c4~>VN4vq^c5*Oo;-lY>fiO6Bq*p{YC4&PHTCb#`d+WC7Ml7 zo0VuB)Ct~QS2O|Y%k0(nmC0f~#YqMVhu&Rxuk|PB^}}z@7X&?$(bF&K#A5n;aA$Nd z_5j&St-Y@DNAc8owESt>A$=>ZPx zL>6fCZryt7M6P_sE1vB$2#9J2uoL#1UN_rX7MH7WLp|Gl1KjaOM(yZal^>Np(a18> ztW&n$984KOIrbjR>N)RKS9K{aVs^6ucDq81rH{htp!38)L@2B5b99 zi=#Xr=irzo4>{H6z;d!U#bI~4YV?L~Guga5pS`6HKLDL*E!c9dzG@dx@Tq*8Zo%sV z5h5Pj%o$C+Fb?w+hl$>VJAVSrQUC_EuP#lmCboZvUzrvZ8EzNVr1GWiV~!L-y>4CL z$3uce@8X!NJMq=@ld-=hoiz6JsJCKF{wOGE-YUl9z5_s8vE{tWMjv1Z*XA3nj~FMK zZ?DV%yfc(6bhVKYQQdOA%JL8objG^r2OMSfk_-1H`FucqUrlO#@>DMiyR`jskZ<39 zWjCR)@#u#mOeb)vyl?Fe`gT_94!&?_=>B-cb?Lg(;COwghF&oncCz_%uBFay{rR!~ zIn(XoAk#ZgJcrp=BDd7AS6}}26Yh8e?(-yc!BSF1{|7OehI%8jG!JnfFYHI20ueR z#M3*wh41G?RJ1*tcbu^x>^|Bhw*)48qZPLcY=9gJ-EJA4)tF@8WUdAGpEBS}{kS;6 z^I5xyqK3Wt&6|rZrkYt@;25moYuqkwm^O&hz#mG#VMInHQAXxLakRl`rT4$7#Zy&s z*5sDBwh_d<@uYyt@*j$-$`MzuklkGbQkfm$rJLcr#RCaVGvn;+w;9|W~j7i0l(GqN^F$L2*&bk zL2>;L>$PPd?iGKBB0mJ|y~^8wDRmvT|yYDIdnQX{@jn%@vRJhi; zA;;5Sv;h^p+t#`(AIXr&iK5W#_KDk9^Phf9=DxUAJv3AiaH$4qj}gqlg>t)Nfk*>` z)CY&TkCx&`i_~g-*L3Npm5BOUeZukjU-{j*6GZ)AX()hDnPg*?1dzx)_-57y?|$Ny zE1h>uG=4fm=ol}^J29GMK;&~S4|-a_K*@sZp>Q)^)bQr>qkz)KBDsOD0#MrhpUEqM z^!t-}Dk67e7SDlY%%P&T8}&G!GlB@@>xr(33)N;$bp+vr8P|srE#p&i;#2;jw55#7 z@UZ_0=vvVh>3o(&d_hh5EXp9JrZ?(8lc z74+rGpE4By3^W*%e(li}^O0DX>iJeDfmX7e!=v=|W%U*1;DdELLH$$2t>BR9gch!O zXT6D!udW4R0T8QR5bT|TXS`Rtf)PP^6J$+@FPe-VCVAdivO!9$K=00%%;KL~b-9)_ zgQu(>#YO3sx5l*+P8IFsNX+kSo%NB~eWq7L(`)Wv53(pQlTQGta3Scl=BJ=+s<}A! zcjG*_EG|%2!a;Jp3XJXs4kfKT`dK$)y;1nlyxab6F~NNIF=4uMF~iW8YxSflB?xL? zNCn!@atkg1nljfeBApM$Gs$}JXrdzW6;LjjZ=!n^8h@1h<=_>5_e*nE8E+Z3RZy}= zOzQ4hCzzmD(sY(69UmFIB`4|O{#b`5+oTa`quh?pK?}pZy7r!P~<}5r$ih*ueO+(j%U@|0kDj;#3dCQ zC#d|Ga2dIy1455QFNYy~LI-tc6ZsJRVekoEvNnoP6GbGsZ!s68<^4I~Su;wP&Yd0> z7?4}4(crk0Mq>!?z>$3~0slM)k|2(%*+=P?5xlXFV`0Wv`9?6bic^a735&uVERuQe z3D%oK_<&vnYot%clr>#w%*H0}0fL?-mWKfDBmYy%4>Y>ljV{NtI_VGDyAAw7DE=0Z zm1TJeimOY=hg&^VuZab(Z+4Z5=5B;z4QT_|;VO>BTGy zaUln$8^dXY7@T0HYsL?)gQtqd4MC_f)Eb|bkev;W8PAtZQrA)5z)v<;(p#~4z8axX zI;9H)QaTZ*TDu3&#Lz3MQ(rH5-ObS)y3bfP?5byOR!yk(HL+viC9cyllv_g&g%$jQ zBfwX=4m&VbP4lbNEgbyydqbJlwDgv2=ci-3$$ohs^F+D9I}F0c`{Vp5AS#fh@j#e> zWY%C5>iguvO>tBJ8SypcT1m_8w5G@D$99SVqI0Z{pWGj)Kjp1+?d$R5yizLFcJQIr zd-xSV&IZ&r(%3-)y(+8zaIP-+cL3ca4Faa~rcK=e&d))i-2!+6De?1@Qn<&TF~Kka z*_ld{6%4Aiz{e{c4HaDJrBwEVMvV{V+>Zwcr_Nw{nCtP;Hm`K(^a;E;?a12czrbG} z2Rl^_qk6PF9m+GC$-*i<#sKjm9)mlbvh)8qx8bo)9p67iy5)Oy=e$uz zF8WzDEJHi1bPs;zb~mg97w8n=sjv^KDL4WOtRSv>^Ct`9Gc60yJkx^WrfL7%+4w~T zc>ZAFwd8d^UXPLFVF> zXTUkkrgntwbPb>>NeO6o8iKq5_@3_BU5(RnocfZU7`R*qI01qgwQ>lt)To0!$p(*? zaFO^!&qnuZqeuzlsp8)pgdYf4lDQwe;pOF9PLLpVp-vTp^Pu4J7DHySNC6|$v^lDc zZfE8Hb@kW?-p_$qk%B`yG>M_*>5L)2%F=RubRd_ttBbsIxSgY*!z*Wig!1x!S)IV3 zmLTuuUfLw;uYoV3;qwGq?n|Sf#luFc@%)eCo_@QtimWX7{di);M&zkUnyobe&Z|Y~ zC-fKRt8yq_Z4eg^dv5TU)LEPN3&30Pe~#X3J8?PNnPPTrf}YRSJ5&IWn7<%eAvyX? z5&7DEDPjE+ZP}Zr121WH{A@eQ)zPGswk`)aI{3%}9(>{qBsuC>C;>H4@yoI)sV*gi zpYAU#+~Ym_YU{zP&)mIB#vpK@d*U5qjkm|Io~SN(nNzx&+iuldRr&Q%ADmX-eHmii zMQD_%ETmhwdG9L6MX@x0{zewXcqy_@D0(>i*zQvg^8$QE&8lg&nf-k#{}f*D&B0#7 z{$TwN(PG!_x2p8WJB4rA^}3!;8g!ww@eE3T15x&Crsl(sl^l-5 z$*e`g_|VuN15Sz^ojqm?d}(%zB-x$t-S-^iop;|~6h!<0%hB%1io2(WA@&PHbr2(n3BQdLKcZjt{ilSYp4mPQOm;ik z{Z+F-7C}$W!S{7a_$d4GHiuw?OWTB+x{jPf*D5V;L}U*S_?a}+dNo#gdg1o0=EQTf zD^n^uRI(GC$W#!9B{e$(kQ|$oMR1RF3ihGnPaC45^O-%Po-!SMsb>p@vLzbtnO6LSQbC` zn-*O%8uzVr$>A7n?BPhtARHuzEL~>a+2mAN4+fkTR!^HU=kqYIQh%y@xHBuO(utyn z_oz75{kuWO9?F-Siq{K#oWeRSKL!Dg$kM&!&0^@1v;PAnX0$2bSW?6Y_v7Ec&X+7) z6m8im_^8}E26NJY*rc5tp8=-gm!6Oxqf#~;?(moTU4DR zGP2mHOhB_Zxo}}_GJ^+9q)&}*HtNw0*3kb{G&tE9?Rn0j{=;zK@@Mm{OSlK0X)%Xc zd1cXXvro}w3zw zg1y;;l2!W`O=_y=Vh(KU32>X0*(klWDNMa})%Hj4>Ug^N&FYMImM{D2T0Y>`vADh1 z!=6xwK$3d4)hwz$4ysB-kXpd^TfBIEbrse!Uqk~`mAk|71KudYAi0FTWy;3PMu;ER zv~fme=1Ex)4&8H*nFsCF?orpAiC6N#Cfo%ooRRy4AlUD(_X3(eseJ*L1uvx zT}}wX3qlF9C~bShyllj8#eAq^x0gAHo*X} zofOW!kHL5)LVXFM60dDavTB~fa{4&wd3WQqqD3DnIdo_|l=Y(Kg8?pI@ua4k1@8|5 zP@>Jwp_2Kh+r|r{VwYz_NtMtNl4kexi9LXjF^N(M!Dmumn69ycelYKP396E6ZKfVj z-Xqi;lX$I4`G7!IG`lMbok|sMQ8V`y9OmQ)EXTrb#Y^N%tYLiJrJKMy`?iM!`pHUa z>#D_O$gH%&Qe+yWbhW!R3*Zc`DM#x};p7NBcJS1Xm5VR0TW;LxKeUD`g5>)IuQsRx zBR`rtE29>vAJ0f2=M-ncmm&mjH|7hZ7R)OKc7U>EiC?Rm5r4YfM)LQ3sU=gg7|`I< z0}SrgRJXml$_Hf2mc;pA3X=iE-cj}4R7Qn$@-0jY`L;2`VPlvg56y9aV-hx;CZh3N z*hGnY(c?K4?^#jJ{8n|v&66oSO6{EFNFq28fYNFT`>v^h$S1?MwDowqq@|g}{h&>| zpxoxhZ-~B_V*YcV1!Sa2?_v0}Iwi_9ChqJ-tZkAGB@Ks<=Ok2lDZf?|mr}t7KXA{J zSHZ&oc1{kT(D_uT?XCB4h+gS-k&k)sV`GcW4K>f`0b>$COfozQw505?3*EmhH1s(} zW1#cvYKq;jS~@nGGKh0HEqzYJfaCX-S~~|ri9G@AQ4hE5N2LY4Ovi;Mwme@0R|94~ zZc5O{+j+Hvu<&CZKUU$goQ0j5wfT^ zZN~Jf9QOZ$4iTj8D~A*EITrQ}HR=q$!tZY~ACztO^!SrN6wtn!T$ zB=o-N?x7x~yhHxSA@wNYddFB)##h$ekcbXOrP8t(t)^ zoEyQUBeGsw%@j_Koz2(RX*g<~%uJHegO1V_XS)r}ILcPeO$>_E^lxuhkIBbcjs+EW zPGya<>n>h{)i|`K&*rl0c5!Zw&$cr!C(frAW|^1wB$)TGZ@#W5Dne1xF`Mko>u;3F zCI=@p^U8<{S&|D-T0obyFsB_9Vds$eVm_uoXu3% zozAB%JC!_9#D~c%-y{yY_T`b7cQS?}_1B3|Vz5)@AncqNKW)E!rKi+!mq5>XodNM$ zU8<;3GS|P`nVYT`#du;~w7cV5c5X-X%4(|pw?b#}@^zaC`& zJsrI`D*DW9wd3xv(sp<9IU=8+e)H8o&5m2$@e{T#aR!hf4BHW-OFSs5EqAx@1Gz3vP+Y0rF!|M1OFFsR{jdZ|yMb@13nvi9CXWk6#kkE)Com0J>cDOI}J z$MnHH5q9yCR9#_>K_2SyLEA6+vcrLtZn;VpeA%%}sXpzz66W#cp3CJl?0;6{b$hu! z1BUNJ%kvj*5IYJ1c6gq6#60O0n*gCutw_M7c4vC0k9 zY-p_Bls$d-4TZ!~q0+zCLaO`T3^?I*ScGeEel zT5cR`p!T}~c3(c?hx6@mFFZUa%*Ht-K3oEvjbEdSoo^Y5ENgYTcy|1Rcc{DTN@sN! zEQGsTbxZK3@tfI=)M5k9iIox-n;_@J^GR5dS2kJul~hS9c&8t#BKjG~G#bDq)`}X4 za6ksS+;Xlfyj{KNyaBbm_ToC01m$d!4qLxw0fK!9OHXfB)|r&H3A65L$nByiug-Vm zn!p!4?#);3{aVfM0>0})#V-prTj2zD{JmK673JvaSn_m)LypN>K*%pOD%5(aTsk2>beFcsrS{y`a`JCnYnKu;b>7Wi(}(#-~RVzTJ7xbBL|DF)r}5 zxy3aKg%CWM$Ry8=q!P;!bun869 z21evVI`tAma=0s)k|E-#@FE@NWva$}xKkh|Ki_>vD(0t!`-U$MeUA5aX_D-wij@AX z8tyLKmyZrs5=}sV6)mG_|1|A7!DG6X^T2L?Nm;bbAOFdk6@OV%QN8fmzIVG|s>vjJ zLNasN`PsnkfyPUF3~H>Z+t!dS?juvL?h^dUM-MVqx&0mVQ9D~gf>5+OKj^;Xn*Hyxcse`x{jpyU$4JY$KdvXBgntHJA zBqL!@Hwu4vm#W%Xup;U-D|d0uJ`Vgub#cneGVa3e?2fUZ`614Q{F;)g~IQ8^UKSCqlU|Nk;TFuu49s!EDe;C}j9{ zU(?;5afdMVK?&{vS9--I`s}!>JuU*WF~GG@c~7R)YF&*}l|OmX?)Jd~r?fq-c}TfK z1tsbgvK7EUr&pOME)A?>2VZ}dL#H4VPk__}^?b^un4h-kCiyw?BS>^6>s{Q*=Dq)1 zFOEe*1@2Nw2v~QuD7^x==QD;qRu;qzw`9DwJimb?Gfi< z+3Nv;B-^M(kW-+N3?d(BGVm$ka(=;&D&LA2Eioh`V!s<3k#D{1PtbS**S}A<@O2Iu z3F<3{NYsA4YHGhjiyNXs^%zQcnGLhvIe_u0*mTa0vh^DT1;A&KcaD(pYdF zD$zZ!Fg+FW(=ZlC5Yu%KcM#-J2%Jvk8DRQwaHY`q3D6Shwd@(%xJYC=fDeazeb$>HgmeC3vd zlI2ditk>-du^WBxw>_%znh?>Z&x2dWVb4VgsrqQN>DbPsBg6~Fm=#LSr_Dd{dG0Hj z9P+LQA~~M1iVh};`L2*^5KYHyqU}y`<;=idDAd=ioSB+5SjneRiTZh-S*+~#K^f9b zN=EC_ENWG8wx7kGj?3EZv|Aa?{36eXk2J9Cr#C6Cg^AG3lwumzFW)t9A5~l7X7RrU z;*^<9ryHZgkCJEG_Unf*DIfcUP)Z=0pGqQlc~Fm@;RjdnC^713$rQ_WE$z(EW_E7( zu{2a#lpErx6{%6d9hQA1-NAk0vXZQsx5p92+oSZy36TA7FKh)SiaZ`PoSyvheoGPf zDj2-3@hoUk@Y)h1Y0%Zt>emIAMR}#iZ96d$h)29XESdS$DD(cz;B)3g5#j<2s$zdG z!KBr-PcQTZjREjdry&yTL}|bF+Hpw(XU<)_;S;U{S=+7YNc;`hBF%%>HeBFt&t9!m zaz>`(DkGgo9!Cj?Ig1t?kH{Z85Q9*4W6Z^4m^~rrOn*e*fOxv(x@g0o5kyW4AcYF2 zgxlUvybr;)#wSNLB>>+=o7wP1RGGH}_@sB^3F`xuCDFmzJp`s_NVV)N#MUWY+x+V1`|2Ky( zN>Ey`Y;-P>)tke01q&z>3mKLjRQy&FWnlr_f&BB!7&tng#uKRO*3wG&CXHa>nc&N# zChwGTz?0G>;x+O6K|()K1}5z&hDMc|=HGOue^j~1cY zaOb@G0CGzpi0#mc#ell;*#n@N?c@h9DqxV4 z&gu-<^{InY#OUT5RTFK>RsnI6AkByURsHNb)%-JV+t* zm{3u%+y6y2eksY~U$*i=>G+@;P`dSXs zEOX%#l){heEMI(fn*{>>Z0X)B8$#6Q!bQWLF}^$udG0485}Vnqq$i(a4PQX3JD8ZY zF*7cs2?n|G55fn6)nk@OB&51cOsd9}^nQjtoD_P-A%L5((%OI1t9HIOmy+=iZiCn` z)SmAg`q+bz%Tm3oQ7?QS5T<3a$8T=ceYTng2~U;U|8}deUc`Cd_<5+RmJGb?SUNe4 zm4tY<4=?&i~gtqJYeM%m!2w2TBBSauvFzroo_ zlBbXI`YBl}jqE^~CmTXJPeyubbDIv8vJxoz?64h)?FmO(W4ZW=QD8e6PYbOXOvl%zJJTId>pe< zF@XXD;X2!>HirjL#+kc-N~!eEzsibg4qhFPd;FBl>fuyi?DWY)c*KP$;}CV{xsJ~CBvC_qk33+o&J#L)&NvMn>{Gw5?ndoW0P zg65F7@Vkw`TD;y4kZUn01VOMl*%%4mjBa7;d?NJ!1(Qmo@+7rYrOm|6Z zc~F_MmQDOw6dU2^Emaj-y*;W30!8L?_B}I|7q1LjXxCKpNH9Tm8 z60%c9EDFUuqt4<=;u<|sLvGm1%IOMJR}$Bo=!gvw3dBbzo44%x%J0eOq!WK_$FwA?YrP+xbYC7!FyKWGA-rn*Jd z#44%w&PCsVp^a3OduIU|OcudtFq^@)CGs7HeITd7S)NnZx_JNIP<@EwNw@X8zV&DU za>|NRP==c*){NAJ5nW$suk#F?b9-s6c{l-@%JJh!oRoM2l&oj%m&3Iu2&5Lht$#Y- zEw%3UgxF)M5-rE{t~r%j+X&2BA`HJ+iP*i5@{)ZALBM~dHhwF{0sO4uh;P&X%$EBT zPnB{%i4kdS-n|;w=?51vIG};fZ6u{~NX?7Vv{zvh4`>)0S_+(H$Mw7)y*MkfyHGHTGEGy$xc zp}iG!zgug4ijXU!h9`v52kNP9HsjfD$Z28tcI>@doLPAcdR^^1-nZ91Wp-3cZd*0O z7)4*D&=zm%ns?YO39Yp5zFaYF*00RWJZDh4Rw50qDEJPWjM}vuyKp&+*zT^=*Pobt zi1BENM=_hFbS1<6gkalAl4!6qV7!nqZc93qwfaZrXgg*?8AYgDE{7BDW5`F=sf(@J# z5}*g}N@8zc9*H3GIgn%Mjz3+a3_!uv$*eixEf_#@oK_=SXBfipD}VuMKo*4vavCBV zom=QC*UgH?QIXz3 zeEss*!vUKmuH99*$ZYLxV|*eUg(0$Yoo}+k3&^2+O+bdn&#c{D#z5!%*DfqUHpVY&nkE!N0@|{fx+ASciebGvSmAj+^O)1c%br;k?LxN;F( zN^F1;{O7DNvDU2IxK$cOUDpY6_?l{C#(-~O4BBBVik z4{t~Xs{ZTO75CY-+BP^Z0`K8X#>RFMa~!OyfHYp;$}<(4o@~m@WyY)FgLf;CbhluU zG~*!u+Kxq2@T7!k=NpL{_1E4uxz_po%TeZ+&I)T#uOuV7*?4RdF4NK&0UNPSD`9vF z7cJ9U!73+)8Jmn?ZHdEj=4Q9_unSrm#{~-P5-5HUj8$|1l2n#-J2fBlb;|Q(+{AGK zEpNe%nLj-YAeCPRb0c~BUBh7_|53|V$~<-d}MNhBQQaW>V>Q;EgHX8DKv z2(S?3hxvzd#oLWJ}!L`p7KE@#47-Y`z7BJ_M zKL|on4+V|P@=4w2YzB}ERJtR%Ag7~fg9wk`uhpNX&y^e*B>|2$!U{T=~#lP)A`#h^8HoYpv}20(YUl8JVZ zt(*HyYv`We8Duqj2Z(OTgzSRy>BO`bfRG+zsLEd76aT$K&nx~Z@Cwpd=DkD2C=pJ) znb_t;L^)4K$RN@hD$9slS}__x#;!Eo(}i7j3AlwRjwp-ZXT`aGJ;dE~#Ua#GEbD%N zm$jZR$UK3$F0GPX{bxGvH)i0cbWO}*U#DOGFVa?S1_b~Uo-0!FuX2m=qT1WRn69qD z0>=~fP8$#nRT$z-j_78ZcS=WAhV}&D_GnP+gQ+Oe-Bc`JXw$i#i%EP$et>#>0}?~u zx08DLLjJQse`$}${Kg8KED0tHTdnx*%YRo`K3WzbsK?wxZmD=H0Zd{pz=oBB~nrXBGMw#DJd!~-5?;{LzhaINU1QOprUk#q{JXC-5@zM!obio-<}ii z>xrN5@z3x3&Ocsr$rJn8d#|2bO|ro60<3&)9D zUMo^xF;U5QCKB=)hmliqOwe&^LXLSaZX>LHIk}ounoaFOi7qHzYvQUZx)W00Xb!`R zGIaWcTMlXw%eRQ@1ZD z4QXzPJZy9}v`Eyn12B^A%kuLw39)k+~)>KlvxdJwHK&?tb=x&3!s`)%^Z9kiAHUrZ)DVJ;W9oFd}2vu zehlIea>K>^>J6JY*xMTQxxM@`kzp4@W?*GLhr&v3OB%|;ETmjSwh}JZ&#_vuaYCu$ zT$?m)8-3r76+1`Vh|?e0IGcBYSt=BzFQq;|z8PP?5nfn|l`*0G~~mQP4~~ zgBAd4ck{VWv5p)WkmmJ?(6&&#M7(Z1hIr1@5@#&Y1JcCVud^))63={uao1{mJbO8Y z!(-p24WQnb3%MCVMlHD6x>o@3s`M9r#3i$(N~h%JFKjy@G!EBOTW2YXITlB0>wLBx zo~~9)*K~g@H3lx_WT7@_FtEm%Ztinbi9BP-?c_mwX-0v#h8~gC+)}dJj0*|O#5t_a z_GHTw=()q#aXs6-1+CjN`@pfAVRF7A1Ucs)lx)pqTHrD(Ty*s1`8I8S%v(1XMa>5b ze^F(;AAowfcd&mx`19*;G>qc2p0KN>o;V(~z0YW!np_*#RiCHj5p5$Rx?D*!31|2> zh!*!XF!4v~-et6MlLw4YVC1rG&ht$pgALrZD21i-pG8mggxU$%47_(!SNBWkDI-sJ z^xczgW?<+5@j$$iK+#5T1fHV27OF8^M?)%>vcF}-y(+%|LJskZ zS6h$hBHp;-k2E31DhyzlY^t8D*`^)ExseLpf)Ah`^lpDVo$}mU8LXove8hU?D*Gb^ zac3_|+kxxqiB?6)3Y~>mv8WAoQhj2vvd8176}T{DF^_BlxhS48<`r7utk!a5kQy?Zj-*=Pd1}AykWaPrxdZ9sj{{>=?4YsqT#ZZCns8bBTY(dCV=Ty+wW>0JxvNAH^ zpS9Smp|^AW28-CRB*QBdl+X2WyQm9!!YLr%l-XE=N#jP2W^eCA=haCwSEHIEru$*{ zQXN~71z$IBkne|nv_7rHE+33H5>64YTuTNumsZyZ|5mTtbG5u@sLOuveLcTLnS4Xc zWx_apMhbb6d+j~3+hLY6zn5^h0U)CRJ*4kQSe;m;bWqAF^E1^tt>@PuB$#`x z#94a>rc|$8o(0c-dEST3AhipPaD82Xl*(Xo(3}&mc z&%L^k?X0Fwx@!`q;7rfhc%?Cch)!e#5HORSFPYuWD8Epfc-#B&Sz<^=`>=7q;6Gw{|PZg4#PugK5AQu--CcTwu~8 zU~XahEa$Y8SKbYBtawZh>K~ml(~&I-)?0dT>a1tN_)5odNPIdE-u6@g`sC4xP&3XC0jb7=5+OO$PU-XFH4=IsaMlbQYnT~t5)ty=oDN6=^tNwK zFo9vhZW_~h_a|S3e7_p#j7J?2o-{i*6Fj$Oo+j7*5L%9yjN3cRrXr$|)o=BbD9JeO z{;86r19aiVSRo0A*QcO2eT$r>f7b%gO1NC?{hA_8rTU~DcMEYB;P;1O)vu#B1JEEH z{m@5@$-72UC=YQSRuc$4__}Gkb$ozyPI#SKN!m7DP&m^ccD~1>u(Aa}?kK&wuTZPa z%4D%=QLc^@nX7Bjk^M?u3lfn57&sBW`!?_Y76Zq1MU@g~%|xFk!p3j3cRfQ&vW+B% z!vyW#=ez1-9~|iYSq_@#Rl=Dlr{sx(E<-xvKajioxLGgZAWNe>bs?9^udBFh95e)Sk_dJ@-s@fPifx8zE zaV0sAqf|aLkJi=f!$F#&e1@Ux9>d0fND0bKx^!!G{(hjscJ5T3v;33eP5GWOjkE6# zhw@||Uz7G;sd$eC$~hejZ`vjOHdHe21msV^1%H zB}b?+VnaM-d5P8$*NRac44qDDGvV_L6|lIcCljCJg2LALO@RR`TQxmQ7!`^Al6-q` zYsttIg@IVn@sBa|urje4SFQADd%VVm_&<=Bjqf0-bo{vduBw=-7qS0oD)Rla8~v6% zS0b`PXTNw}I>O*SJ_sRUG>%jm+~;{|5HZdO4w6*Jm^B1|q3@?YElm8i1}n|Kp$dqO z6REeZNi`euT)VYD+Y&!hWppL<+9DurxMiq4k%;7Ae@|(5C7iVpGK^f8hV4f2br*v8 zV$l69K+h>(q>A;tZN_Lp-pD*G#yNlNyZ&v#3Lq<6LDI>d73!wG6R5;B{73_@@ot8` znY&_xvd?OV?{06(GTAsFTymy(Mc>Yws>*g>9iotbwD3YU!Ez1ImfYsjDjMXd!;&IR zNbkD14fAh5K{Vz)&YN$qKx412ArZ^{>{rjnb4QMeI$0=D=MOBlh%YAxU6b>|{}gP*a{Tj@`_#q(u~d zgF^ia$DYJKy5}u+iNE!F^UJ9)o1P=JC0Q9g&BHL&=X<=V>euU7FOqSMLGLS|5&+rM zgLHG-uO&7|Ay7uHCrx~EMY=}thE`3}F9ZibJgnL?O&<=}&TXuE=|NuJVGsDR$^|2` zhNXfv$yWw@L@&h5&KNA5qz%4mygj7|w-l8RU^^{OMcQ$4M`}jcm%)Ueg2~d=uh1r_ z0<=8${=z0rLXf#$U&PtE<9#cTP^52Y=Ty{k!17^xj-XW-i3Zs$uw#f-C{dYxx~`+{ zcN~Db5^k)KJJ52+1LPA%PJE-_=91hd+BnE7cYYI(P#wm=rHAG3t&@MxR{j0=dnKG% zY$R)b7*njmqwy=Et3zAJg`N-7Ha)_n(e_o!>_TK-IUKa8?JQx)ko||ilmw5%pgeoK zi;3e3Z152yeJ(4%y@zd(%D_J|uO$co2i zG_&b;OF#i0x7+SlMI_AX!*rTMI8zB)uzhbrl+SJ%kK+;K$=2s$Oyxl-rRoc)fj&sy zwYNHJBm`TO`}l1`O?EGZQqFB}cT3k0W~!?=?s4J~A$}0>LE>O(OYz{GFywf*ew1ep zUY%P3o%$qFW^EE`gv??5=0LSP%TW2Mglo7|`nB>+#OVx_4~^%Yv-%%2Rw^_Ca-a67 z^RnM>pf3;SJz1{s;$}J;Pw#0D>#Q?~e{1)-#W3U5!x`#@(R@v8q%u@w6!0~46u-<= z$EeOfAyY2IAbuF`9N)X(I8PR*q56vxxE7+g_9f#npH;WpBe>Ht($in|yzH(l=hH3> zy@B;Xds4SEQy>|>SQ{p?SKsGlx_E)tX?wmcMD+}#CMcsC?8wMcg^i7sDjHEV4?hF0 zfE+7@GKNMwuC&8_8L73GbS5e3Tg2vWEx}5g?e`6;gR}E(J-$$W4hJ$!va& zzo?l!M!=TyRBQ2NXmBb9b9!t5b}x+Tn@z-$YuU0)p zr4O^3GwXWe?-XMkWClV%=eT&kI*c2ry@6*|$(rVw@rs;|gq`VIDh5Tl0$4?zI(dPQ zf5TuzI@dDiHal_8zT|1<1{4}*Zw?sZESwUsd5$r-Qwi(7D}q|Nmx5-(fFg*M<$ay* z10^}r0YBN{FeNYIwz3t1?ig7|Bj*;Wg^X!WIO|@sUI5?N9 z?|jZzjgyEG7FsfiBE;_dxKRifiT{?BfDMUgeVsgbs!}4y%y;+^|1c>`qWkm&L%VgZ z7D>Jy>>O=wL_!Zq0dp!6W`hIxRAs~x`we+GEzVP~0oK0$fKwPm#^^fy4gkB$LLg4g zDBMXkCfz;qEu6iyt(6-HJvtN9Fr`Bmv3jFzzitg$Mmoxz1{WVi2RvzEGvUE?)l%Nt zE45>CuKWfK++#scHYk|1k}}m;6=7o`UX$E)d5(85r8PP-9wuJ z;ij>fOAA$Vc{;7Q0cjGyZ}=h|@ zcsdq-B7XCNfy#@xi0g(xk)mYS&7EnpVL#FO-T_bO}k%FvXK9q@dc{f#91~aX9VEP)dkWPbvEjH`9J!%vecs8}1l7znAJC4KXO|Tn zA5a~YPPqXV_iHBgS6wWZUNpU6%$1z*2Q+*Lg6xA=!&Zv2gJX`HqFe+qBGJ2A3x8yp zRU&Eld2iE7*wx zY~Va0-PFjQ7SNGc2$6QnCx0>>WzNm)A{sa2x3g0@cWVi+vAT}3#EOG&&GAC(Q$rB0 zP0<1R=GabI=LurMwWmwV4=QN{?w4{RTC67J(A!TmIENK1LxBid^#YlWWepqYX8Kt9 z+Li2_F~App>L622m{#Nn6fs0bBO~eoWpyW;-vBP;YY}M^bBc~s7}m~Un_Cnz9+F<) z`Y8}^R8iS>3vZf$-dKDzxVFG~f2YY&ytDxQp=Q)IP}}Y=*MW!e`w+d&n^4rSDV*inC=` z^MrnsJlWBxxT`B<`X9*U|0o4k;?oAmT;%dGVAn%h*XoZF(P&Z-F*DWgOeLIpflSh9 zn995>L*wMazBCXuO0W(P)`mABEB3TWxUd3{(jQB0KYnyhcE(1>t`nPx^?-a7m*4*UiT-!vk_ZU(R$iuH-Ca(u8lolxswRft#I2|I7RU>hG3!ZAr zT!b!GVsa1Ye0N;b5K?!jaJUp#n^YlBsPXB;-Ii+?3+J8`9#OpLJ|8vte(M8jFHCkh zWjA*seaT8|9A48K5|0PD1wg4}htNA*lhb;vCRw(!_T!+IVwb3?sIQNV0$TD^|hy$R0yS3DM@4A?|WTe6DJug;~tCaro5e6nfDiD0((|A>RMu-z z@Ai$1@j0o5O}OZKX}lo-2`K8ibDqVLG$ z8{w}>Ykg{WhX{z@R#ZWNzQX*D1dRfNUnygLx8+P~7wG(`l5P+2)vR zsfvpcmAH4QepwUZd?w0O(r2rMvVTu{QF5;eVBQ7H>$cDjJt3I#ptUIC`2pD_zpuKN zrXoKW|E?eB4h7mY?tVCf{=ZY-evkiu(I0A2YEZz=qB9&5D9nkmPiCAz`R^E1ZwDy~ z_}^)94B%%m$x^D1O0mkC7Mrr{I*B)A77@bUTS(5^=X!Gy8_DCJYHl?x@oDfA1JgU> z`$cKy?Alm= zmAb3ZNyVW&8+L?AiulDRRlZ4gQt6VYrzNbu;2f=m3U6GGo1uoQrqZ3(8U$-tTsrUJ$hd=yI&26;ZJgq3J*{VJCHdVk(oPD14+&PLl z<~Z>Tbb)``YH{8`(Q87nGPB@yb>2g`(wyD=NP?8#V@;ZITs6^4a@}N8jd%z`CS`5; z;ZD6tnxhAM)bg|NhF@z2mn`SmT9pBzUpp&PR%ZU}LFEEjTq{D`UGf7UrcpARz!eOb z#1

bqA4TYJ^x(luK6<$s!h>^@YDud0?RHfDK5#ugKc>4Z2bUJ$iRt<;aj;uHkTL ztouM6>;f~{q02F=J2QQkC7uK+o=>ELJhJr!{_57_H9%_kR;^s3!Br0hrgRS07V>!qG9Mhta&YFE3)8 z6^d)ECB>y+Wb2iA0mmDzKV0EpeZMWOw(S7wecaMs2DEZ|Rd7A-*L=HHCBDOWaA(KG z+CaRiDU!YmU2cE}c2n!xWf!*IH{ns6ghf;tR(L&mNnH9Oby0UG_LV~rZb}g&UY6U(J0T=c&6q|H@pRa3WPcL?IUX~zLiW^h zy|kF6Ed5OrT8@-c1jz4x?G;R87W(Qvci1>e%X}!JHp08Xiuc@fG1_Mah`E>>7{4Z6 zpHx`5T6_fLmu8+-E$TS4v{lA5K}d9=@Or`gM1Ve`CSs=A#7mH?wuua^z1T_31B))c zUf03v;APamT#f@KlN6;Zg6W~x1w(_@{ z08(D~HTzTNY4-Owm+!n3G#o==V9?x$w4%;+emj$EZWpc7KU~FK8#ow(m#k&qmQ8K7M8?@`RtNk(nZ|%Jn3*mtPQu3Gx_)NgyU?`26gX+Cky)hGUdzW_HHfJGslb*cZ&xNy; zhW>?Jfv?W%TT--j-WBpgToh6^@gs1LHs(hcyqk5{giFqz~`YT*|mLo->rUwqi^H)3k}XKkJiQ+ zB4Uxt+wE0q1psMKMh~+7JH*+Hm7rXB-BGimWDpZ}fThiTqSBc=;(t6ZoREc!VlD{kQ0$_J*NQ0j~d=>nhvWM5w*(yZ!l;&;Sc~k0rC$EEjuhVCVN`b(E zpx^VQP`uB8{fZUdxUv2wUb&b#R;+JxrQ{q=Bhanz36*0S8wGU&XxOtkJ7c$Axn)jM z-Q~w#0?!t2=X*WR5=y2mXa1m4cMQsyVV+?^u_mqh5&(Ofyve6MFz*ZJZd=An0BS6c zN=^6}2%Zdw4U_U(@WBp%@*a=Q^*h30!&EX)>=d;&s*>Calx-A_b?R)J;jACodx&Kt zf^|T)Z5^ZD6jiMM`QWZMy`=HEg+P*0<9h$)hb>jz9IeO5-7PyQ+L%JpaiGTJB2o}{ z_EW8KOCVm4>`6C-E>1GF1k_@L;UGIJFhy2#N+>-L>Uyr|4T{m%f`xhNEdg(uU+$Hl z+X6RJU5}5nc{hc=HQ|%UoR21%BDf_3F*O`5uPSJWupN!mDp|-QZnc$eFw#`!=Q4v| zg7umm?nI!2n#H8;V$IK`iv=9yhn*TVM$`oenLp`8J4ZEqYN2tB+@avot8@vYQWl;P zQf_u+$P@LK*b4*cqpI@4XJka|ubHEGLaih+d{#i_Nx2?SKZJ+5jfpIr@?6kY+QqzT z7#;S89j}dqkaAqVqD$pWOQA@*5Z^tg^ZF!=k0VLgeda!i2o)c_nB!Ne2ksJcppK^4 z#CYB9a^r_+A2MZ}P6p4Ag~DwmfQ&XQ!t1CC@OjKebj#o9YvI>)hWSszkB)AnN0Abh zJI@)(Gd%Ap?GV*^8hh;!$SWEiUtEv;RnPw2k{`@23=Qc9NO1RI=0(!WTO90Qb?Oh^Bsy=XM>Z?DI3-%ns|W?pvjE$7XNt_D%q*o&&pQy;O})^sUG09WMrIfi8MaYrVsLXP>gt$%@xp6C&vVx;#&2&DcQ!!?7C|t*S~bY#&U(pA0YxxN*?FYhEJ7H zdlmRSd#SXI28iqW!ZTZibc%l43jn;BW!HS;ES;OGi28$P zfs@HJx@FF?w3m5b$RBO8{ae64WB!1{Z80NR~11 zoN}-|@l=P^rIRhZv{l?&i8}LOw9O;>N!FG`LpuSUSrt(Yl~8Asd#5WsFTMRU&1j$) zp=i}a-i^1erDKZAI>daZnsSE)8t;~yfDq+GW3=ipNqjo91@h)6RdzrD2*VSvsb0?OW> zK(s!QO)*oOeDLYq=_u_F(H?e8=Aoqd1Ruaf?k z?+d_%gs*+L%{TfsR8@OUo`tOkpQ~p}_pVgvPD$3o=Fx`^Q~(g@JE*Kpd>~XnJ6V|4 z>#X$o^e&Q&&_^f$V2TszCXi_KE_V@%X~Ke=cZxFQhFUmMhIg3K>xoJG4Cfn8+F*fw z%uLmc0D{O_Eav{NOD`7bdt!_N6>;W!iN9B%a=>oht~#n>L!z`x?C)K4uf6rA49Xxk zbJb*jXqCS#6Jl>&A+}YG0p)Rm3}WmuIwf*QS<*o=Gt=EZnU-X$+v?p&zvCP1yL3f& z2}DhfDDqdQqtk4&h-f#zd)EXt`h5`Xl}Wt@c_()Gd0^(w62<}$Z-mIa_V1|2VKisfXw+r)R~ ze~jYLOq)9uH^LB|ife>^U@3nSqRhy2G^MYAl`o**eTn};$b*XO8Gd`$A`ZK^2>S;U5!#RMrc_m3I2kZB#{dMwDi3B-SVkiG z{l5TdMGolmE8|tcVV^rR0~p;)-av|wKpAHYqXR`sS}=0sa}V*jRhrj3vXzT2bvSFm zs_OK=23SHt4F3C{!bZYX35z`8-B7420jYV8jjXzSLddIVyn-6C%PVVBwMQ3NHqIX~ z#<29=tRWp`5}7kL2%|~MTpv@ELR9jY*KRK4K+u8Y6%Lt2G7YYj&SV`W6ARHim3yIjqu(+(m7^{t1rFO zn*(s#0QND+2bM)(Q2NP3DeYDm+>1LdR-o6C$Ha;y4y3-T`9AlwvsFdGX=m53->B*7 z@o50;hZ!wud}wIn=anj^LKTpd%oDifh4;>xf^8sTUBUhNk-!#FGp$tA2HiE!)<;}( zEziA`g-m7)$S>_lz~%vVAD=j@BH0k4s}IC#^FTdw**v$rMba@hwQ1#w^IYxu*FIE1 z3}+oO3PVXpoQ0a(c%v#eI%ex?KAoYNPX(Oq|K!B;G0f%ssDv@Lzs-qj*aKJ3IYuqHrbZBI2YO9Iw+n&1zQgdc~06mDvi138BBk;MYSj z?Xq2pE5H|VITq1)PR;G`Ob2J0mwFD)8mFQ-fR54t!94i~@PEhk% zzFCbSn6hTxeR}q+08XDK|KS}!ymV4Ikl5%vL-;IH?0#9AOPaRfVx7HE?k9?~r(g^+ zU%K0MC06{@6WjtM%n9$}tOEgRm-E!3F#5A0>Uv_>q8X5t3_>BS>uFIIKmldmSNqO4 z_Pf6X8n1>hLp5|h_|o5&FMgw%KgS?w*1CScK&TX8WgHZ0l-Z3gYL%2X<{)G+q^QZp&jW95+FT|Q7Bu-RMX7@4d5h+ zF$)8GW41hCQS4&kiJYyurz|vsQ(@;*F5)Dm1Cm}PEUT4&hD%($_bMNBGD&YP=(v|L zAzi#U4H!vZAmSTl_yX^KZ+qjp^*t;nWpkTx0bJq54;N;%R z_jW)QulA_**|hA!O5fYaqY9Av{&>w+!m+LgY$ob*)rU4aIQLl9mULc1KU>IJ!Qj7>Mruc>%j_R1yO8g7|#Yq1X*^Tu8tApg;~a zse3!Q{0ZokV{$QL&^(uQ1i&2A7dk~PItWDU>;rvTBD`*yffFDa#{Jh*+^dp!}+M0ngnAeA>snYyFp4#SgkRDx7Z9*WB5M@zHWcjmu;g@;EeQt#Dao7na zN*n;N%00Xi{{ew#8q0*H!ofnpmQ>FToiQK**ib<7^_R)n5(_ zI{~TR!qE~d*0jl2w@&K=yRb?0gocD*Lo!-7fx_2Nsday|#L}zZX8eExrbk-UG}C|3 zBiYg*O&~V_!j<-&Ug1BtMb!w~UAMu==mwZH|9!}-d`!(UKl=F?$Dcd!zYkF525;wK z;=uB$diT8j&o&X1jUi2~thv4W&OD{ofnvJP`X%Br`S0HLM}cu!lP&OaKT;ncl|Oyu ze%F6r_^YpZ8VypYqo0?)H~aq~(evBTVD#vWWwo%8E=s(KfB7&?R?xJ;(dB#J&E=pm za9js+jGx{n`n#|Aqg8}8%rdK*VLFJsR|E{x>^RRJ* z#QXE#J@}WbuTCFuD0Oe5D-XYG%r53sh+Pl6LB2m<{$I8FpjnKnd~KyATgBfr>$ze> zzV5Gm_3OW?#u&$CEU$j@-$#6(cK+q}du|{TkaJfR{#Vma!1TC-!?E9yF3B>#RgI9i?X5h#S+V7k5Ge>5;M||IsU!T{GIS8frwdSYq=L`Qm@Sm5? z8%A3QMoE_mt6g@J>= zKFyJSU`PvSzI=EL2WvHa3Ubx;la0>%TdN z>Ax)jE)yeCZ(i{7uZDaMtb=|-Yoox6a;AvemUBfR|G0vxYGZmqTS0l9;=Awv-j{&i z*T{hp+?7*b`0q>qGY5Y1$2=zUrYv^n?^RC!?`U^ImfP35Rq~5ma6P6T-=(knw)s*vN&>2xv?2ZY?ipLBuHI;`ZVon zn$zCc+bD;I*CKzks{HF(e`Hwyu8}|R$^WjA zKQgR;gPkAwXZR+b{gc82=OkwX1^fJ5O2m zRr`Vo4^`y<9EvPZWn<&EVjS&~%p@uoDy}cia*9;saVml3V=e zvHig#gKk^|HK`^!uKh%u5rQ*Z6)xCo3*{M8p+6ptKfK|K6DYwETIq8q;*NA!u+S`t zgI3{b>HbWIQlG;sOrFW0IKBg_d5@Fyt6Yc+jXjR;@*a;C9TY<$v&uy?A1B-G@qem= zOx}WVD!93F)rT1id9~V&1OBN)yurVFL1u1qFS?T#?K1Op#J^;U(@8j}^Py2t_ z*mYbm3D|z?yfw8?E&QqO@lb3x-T3^t#=D+yN_nNQ4}6QV!^-yNd+Uet2+OYQ_g7U? z=wuXX$e5>7shC^u#X#)!U{_CuqLj|K#oo`dh+NckO)f3U<$u?cO0y@)dlj{n9=A1c zcN1<=57`muEl?T0R59ZFxXi3+cj&!y=TFtsr6$3oajzCS3=ceY#7a@VLZXv&>WGbU zl5$UMTiysL@5jbeAt+$61ymF~^}I#$3vdUNOR4Sn4N2r@KJ&RwV{sC)tnPf?EElUt zLZ9Zh(=@cJ$Q1*xhebULADlNc9bw>9wapfrK?$cZ$}=t6v19L$rbqu|bpLaWJ1F!H z2hFMyg4*;YT8Iq+r(>YFyDkH&v$|+$F4i_Vs{PcF(q7#qIBN0r`_w+B)zYZ|Xg;si z#v`huPdnMGBBi$Ef(ARwRL|Wl3dEBtV0~vsdQjWkTfvO_Lv?w6KVc!Kf<%C^uSz+; z4*?~1LL$*J+$6yUBi$_9tn#AuceQ5ys7dpw<6vIPiGj-f(oeb`NL=3i9Kq&NYFESu zg|%wu)Fa7iZFkE6$Hpk3g~p@VmR_=L#N+ZcvSHNDm-gZV_|6O0d}lD7#$*wTT{g5OGCxypSFHd|p5%`42VNT$9-JqF1DZbd z&OLH}(RsWzId+6y-D-Diu2E8)LVNnq*WVLuc-XoyjlRFmru*5?^gODakk@ou;h<{z zBDvaJYDXw+dXwhG@Nq}5jM-6_&6$xL2g}Dl;kcw;y#WnV%oy~4bn3_k z&7msUuzh9(?fdC<5kku^N#|I@_{g?K+(FMDm7YwO=<%??*5zDP)+_Hzt!wp)sR7$* zn*9;)85@jRtbgXUg^gh9bKKKG0sumXf4KGQ6)x3xsFXz@?nRB^STOgS4HIexO4c0@KpbwSa@*Gkph?h zq$oEMxZK_C{QVAo*}xMh@w--k<0{60%8M>4rh6>W)Fq28$X27o^o_w<88NqA`1U%J z%^PX#HQ_9V|5(WX{t+Y%rY(Uf=PDESnni652g-$z)Fi-p=ZVnDfc|#qxG7@F`J^Y5 zbL$qR-?6iu%g81I0TnF`&vFoBo951tXTQ>r=6d)uAdPFXV0|Zn6>~E+7!7q)&+-hD zU?COzEnB%+4dNvyylbnQ>(Sp{ut+TJD>Aguvl=C*GkmGgpV%EYNBr`MgQwTnZl`1K z{uEB5VH+$pJn7>rZi%OkR!YY@w(`M;s>Bve7HPM1>LN1Sb11etm5d{_xKf4>+?b90 zC1WI4%a11{fScycVX{f}JNUBS>c@$;2r9A4+!Jmd^h6xHnF z$bj|a``*m2S`V&Sw6NNsJi8|FD!Y#Gr~H%!8HtIpK*6AMIp~PWm5y)SgybgmC$D>* z4_o%&Dt%cBS^FJwG8rCwG5l$3ZOO2faQFm00i|E6hfV!a#GDy44Vt}pQEhH)+&C&7 zW$I~!D6AEXFi!iZqUlHTzh~Tc(N4$J%MEpQMmckKSja)=!Q>ez4SNG>tCh4h)&FkI z_3R9SqnCzbEfq1-jAxMgd=#8I7h@&R4#zW#R_31?YJx>CJUYy73AlJ0nv5I=2nz*5?v)${Z+BDiRz4Hkeud0g?Ln zr+S1RyxG=!MBhLv(9fAnAse6bZs5tz-Hl&deL>x7MJp_@ZZcal+sAb%^}rdmNgp0y zF{}yEFGwAIZJ4tU8A?1M2(_o~dZ|foc5aFQzH!*q0>8)PdAzPeHB|BLkJE41E4!^vTpiM{wsqSm^c>zB+bdJ~sa1;ca>It;YYZHb z?e*aCar?Q#g2hgw^q*KzR4IUGw-H(_Nno!jd8})-#}gD5T(lHq{6C`Nzx&7pFvWdzIn5u}Jvrs`cw69P7Cb9}uomk( zEB(9szIS>i7MLiWbNQVQ0~YdKHH2fK4LG-RB`M!u^uIc%T^g(koJmEujHLr>Z+WHo zJA-G{hfMvi`@Y{66c&TEOgqV~zc%)iPrqSx;`TgP$b(0fzQ5>yb&ds?W=0epBd>VC z*7a45QoZN7!ho8Gw~$N>3MH0Y#p_<9i>*rNmYRRuMG3(TtqeR7!6mG+9mv{qe(9E6 zWAnQbMmHrpy13X&%gu0Hf-Ji-brD*+oUW#_X@kcFcOkjD5Qm~Ur6+m19gtpdTKCoM zQhHZS5bze9+r8@6bJ0ZbXFduCk24P}Jbica(h#7FdfbgyDwMI1atJFqgw}da{tNMy zPuceW+b{okpjg(a zqiko#8y-qn$QR6Ldb+dpiIRvKkM!6VDn|Ve(xxB((|m%I*SXT=io0$ji|-#!)p)sW zObEYIhQ6#{%GLKLCm_Hp^f(-HM0g_#(Dt0(2QRi|ceHZuzuNsE8Et91+nH|KCvIg4 zy6FIU>2bh?F1@lyFCGb6*HDyuqQuxU z&-}k8+HYnR6asemj<*fVw86&Vs9hBj3j>*lDJt!^q~h_Xe3i5g?kLyfHAPmxVmFvhDj}7>S^sjKk85S zSpVtAC4wzQo%-|1H&~zvG$h)~@?7BK-X~iPN~VHAHa@NL51)jE9SMeu$6@w%KX7}F zdaDc8k)TmL-Nl?BL6f{t?9KKMedkRt;CrvuKgl@(4=g~|W9M*zj<>1axw{9P-(C3q zZb0ufBvFYuyd2Q70Ud8yT+P$%E?AxXX9o9>2@FDxKOmhwY`D-Csrxx3G%c6OaL<^o+EyHqwSx;{F$2=3|j-EY;7 zYuF`@iQ3YzTJVrG8Q(SIGrplC$1zj>zkD*rZE|RUAi}~Xz@?yIYEO0owdivYgr+TX zv@5qvR#6_)y9)ZjV)jN=6!4lI4|!>3HNg-`mst;9G;us$<)1$upIfM4T_E=ERjh7_ zt9DS`{i0#n9nM#9DQ%N@ZJbM4+;=?NRp1|W%S{XLGR+ci?6sCEfFjTwll>S&vFV*h1(>MDs=c&@K5M|7a!*l#g=~J3kkA^}8}7juUIJEQxq!R=+;wld1o#h5q4EdR5Q zWE*os9aZw_f6~2xR?|>tL!lm6E98NNoaNNC(F233WLv9s*bhPzw{3Y387!oH#g&d7 zDKOa07B~KZ^+}Oo7IpqemcMI$2V;YT-%!ndkL%_<#I~T97jkKY1E!ox9b0p3RP$}9 z*%R^Dx>s^lr`El`Y_?&2IJSFTe|YtHWu$GiY;JZPzkjY)kwI>l$UdXzC7oGq8r1yT zkYA>LYx%*hhE;d;StF_x$1LF^yTOX7j~}M`3sgSCcUDJP%Bc&r>I{bwDyb5p^u9Bb z#|H|vf-K$;TLzlFWs3As*EyX`jh2(u>QT^x5u^pNvBOi)OKh*E>XbTkBz;iPhi=ie zAylHN<@moJ^iUw!Jnic3VKoU zXk6oP;418Gd~bl)yr8m+UpE0#|d zGVS*-(yA8FL}WX58Wmnoqn8vmdU=x{)|E;}X4Mnj=DH}CHN=&v0P&kvUZ49+IBYn( zJ81isQ_CBveN`zkE5(LG;6eC1prF~gpz*nvMXjX^m^ z7eNVw*}8S-uah1XOjpCo2&>|0)AmiwP(+#U?!_Xx^cOd;U-wD+w2F)<=aOt|;h%Tq zylW49Oz(|~RE&?Y=BN2;*beeB;(<`<2L0Vt`ugglmgn^*y=G=7KJMWhD%7c=Pi@oh z1?_Aa^NDTvlrx<-KJpz?G5yNyF?rn|8EnXeo3tmfx>@AZ+j_7S$}Pg~?XK_Qd8n^_ zKDs)cK0f*6ld~u+{IOtHMUEu8PFG5zwy6c}dDXHrEjhgaW!}YE<9~o&BY!Ngp?Tso z2ssRemF?BPG(@{~wxQxwhi_w{c>HZjgbtqOZTHJf)#o2^^%Py)mq4t{@S5%Hvlyk- zo%$BZvq2y5dFXIaK^~5fsNcNmykT{QYwIQNTvqE>SBJKqEFPxw_4`W?p(mLZ(O+g0?~&CGUUC0H=M1j>&>P_IJ~N!y_~x z*mqRBUEMP%*dqLo=3ulsW)9|@twtA4zp7k$D>%EwNoC|yYBMF4FZnQ8?CZ&JocU}` zt5MW4g8ij~R(i^#C2c>OtdV>;Yu62qo~$wW`G7i?itZk_be;!{`v{M`&yNQAm-DRo z=0s)EyJe8>P?gs5o{fOfsc{7~=a)43-MwQCr0;OuwwBnA!=!FLv%nfn@_LzHX=kxJ zA-_8JhRTc9o-|+K#Rs2IALWyW@@8%I9jK-!9ZU?fv`2Zt8X~_5%h3S~8##}ZdA(m1 z?np9B)ctzES1SeiR!CmC|^Mho}M2p(Cve) zjQCCCD#+#}b=lr)L(xF%o0@dt2kmhp;aRFD^VF_p4rNk4=MJ(iyIqo-tlv)KID9em z738;rYR0`R@`!Y^wuxShKd~<1AYMoh5V5FV8Y@zwJ?IzRKH=6CLKoeQ?UGsMzl!o; z#qHGiA{JdhTj-VQHGn)oRt-y12%h)jnwCJjio&5A2q+p=xl)&$^D#9`1}(Bzc!_SP zzo@{-KR{CfCE88%%-wV|i3pwv2^9AuWvuO=f8lC#?C`ei<8ElE>|XaeJ5QIS_LYXI z4~DCr$l>%nwVr=e76^KG`OEXuYj2N5EGI_b+F1ATUF1=~J`P6zCKeNTtZr!#Tv z&yd?$`695$WpHt>BxZwcz)-k*%(P#~V!Y+?c+*r03F{O>=Z(WgBD1*m%Efe>`wRiX z4~Ivx9D9yccTf533f~<=EzplQA&qTzOzK(L09LoP%6L01`ei9`XrN!jR`RVrxF}=W3Z5mx;956;sOz;D~u(}4eEV- zZ(8|J2+!tKE-jLJ1&ZaVoa4?q<+||8TKamqbXst=&~G_;FN~|?smDrX8cH->#A$kK zK;L;moJVexU*taSh{nhvK{85|r$6&4Gt(mS<)>}8?c(vYXya+4E&EC`Bs~?J zu3p1m=}=8O@VH@pD7MLinq=Jm3v+$I$~kmqy%;?LbwnIapfbiE(=PH!n?aC>hFr1| z*?X+&y|GDOXYb2ht$pqe&u3Kc4R25>Kz)jHMl@xbNA3>i3O3YOnJF215$g`E7lfE!n&7cH0wpmO`_)xCk7v(IzsN?%VRk*XOhH$=ox$lbqN& z?vK^-g8KywH5RE!2U-lGzHr#{XSjX+lvmO!cPfwR6XCJw1@R)?Azi=eyhGT~$#8@~ z9y{v3(xJk9STEUZDMx6_k*)&zrgB>y&Q{)92gzXTGwii~tF8|8g$73WWRl7>d(KIg zBXPdJHA*gYN7UpWr4An2U`LBL)IlgRP)Grq)(t=J<}tdlmPoQJ7SR;4WGS>qDzKrJrd;Ad{PR^hNY zVl|mo1ZhQTGVXkzQQD(PpQ)X6PakhNZnog%fU;po z(dbLVZIKVKCf$aUj4Y!z_y9-@N5(;iSnXU5bq*8~=q^EI%m7O*@-|($M%MaGZ#R2} z*yHV`+opRrxlyhCTh-OZofQF26EV)SUzHH+W0LBuNXqFnDpBG!X!@NQBJ8sGhDB#v~%|1KQ?|L#DS>580l5gl<=Q7vG z85poo%d=2zJmFX(1lBTkm6m}QuIAr_E1Cyl7Z+Psykpwi3TpU-(>&K73t87kcyP|5 z63euAO1!^F=FLcU3s-hKtgw9!*!3%zH&(xGYvgC&KW5-@h>8?%f6NmVUGTuLuYZIe zQM)*xqS>YBFY3PsJJt^eq<1P}L$~{bRm3Ik^hQnHVzae|JlxlHwZ>+qC(f& zGwu~xlj7w*jN16#*3$ z>DWL;K}A7CilQPSy%Um6WO{67+v~R^SXJ*bb zbMDObUe|fv_sd^*oZ;U0-fOMB)^C-)w%h{m>JuLyc~>m*H1~3N7w4Z{5y}gD?Oy=BuJkQ*AFRi1x1~%jo&{M1-rZKg$|5Ji5 zx2 zoSdk0(MA(Xilv4ewDcX`l%eky-Qe456PdCXV=kSg)~joFb7`LLTvN<}oB5AjZQd?c zJ+VC?Z*-mG`I;oFr2dSOi{%;aM}vx|1jpCz1{k&Z*g%(mZS5LjLSV@_fl-zfy_S0c zUBi&R<}5M#rjg6+S?vD$h_uD5WGWfo>eNwGGr_kLcY7X3dT)kynKhs^oEMvkGqa|A& zVlf<9RgUj;?CIkJ&kvHu-%iywn%0;yRJr5XH5w)$HjouuuRA~T1)YWU*?FIQr)jxv zDc`r;Ve6-1gYw;b9j~Qatz9qAc|xT!j`UL#s-&{&&7UAiVPt7ZZYrCEIu@;m9e21t zC0e&BFO;J1dm!F;v)91*_06a5DS9s;NUWhvyKdwD%Q zYjXk(#-;3%M)(^NT30F$Z?4i}W68J~U4F!Sn?5EJH`Fo5=bEEJWvCnhWiP*p_t8P3 zj6!c*A)Td^!!0g&MNr$SL(oZgKh~d{lQCHO#4T`e!)Tdr$5Z{8A&xm~xwrNYj?^;1 zDROF9g&;C{G+sRBTo9Uw=gxa%7JwF^E4ASl;JNi#f&Tr|vQiQ!d@t+C`EoTBZTc9A z0YSRrt-kp~Wai4nzWqyfgfBeaH@t`vy@kka9bmcy4i2)LE=y*m%p7}UhF+Ab{hlHb zOO%FKW!7%_>}}vcFTR)?crC+4gsz^6S;5`F9t#vv<2Bj1f0)^Cp>k%LtgT;-nZHb6 z+b@@nl&8Ahr?wKf_~MhS87Oo8xZr5z3w?6Ux!1t2+0x`-{fbn~&3=BO$BEfOQ^?s7 z*lQMB`&@N!opTVIvyNE8f&`fow~u^Nw88N>s)?Ijv8s4v@B#DSD^7H`-h*x(*6U#R zQ4;Na%1ZKLQX0#Y^2x&C4KuI2!qtMkCq^HGavLb_(qfK&gAyGaM`pBye9_l8JvnE} z%ig_D$WwgAq0jJ_7!RE|g>~sJ-!V*%9Mz5}@m$Z2F5elRG1xJT)s5z<&D-r+XOm=) zv7UI{d9cuBfo#H4r_d%1eI2c;5+A=)DI*{6I;Sou8Y{@&MpON$CiNnP{3rveU;Q+nmlC&zbcVuq1FU^R*=bA##Hf-MF&PjDG%gro`Bt4S1Q`mshnNV&ZppntAwy65z*(bOk%nF&I}K3lf%~&dT+FzWa#Nebq_7BT>|04+$Hj7Rzi|B zbLT*^Uuvxik=s?lo%3k1&m!Jd%8iD#{b@F+`x(vQp=`wj)JUn{(*8cqcDkkmR|!Y| z)XFN63VSoTuQro2phgW-GCIO8A68NtSQ31?waHhHANyKWzi`Le?ADl?#B5NSa=Nvu=4cA4d_lnHgU0?{ zN;NJ^wyedt4JI*hpEmFSBg`QcZCDz{WLh(`i=Q1;%FsFemb{8Rphml@WXn>NP78lN zkmLS3Qo4My+Vx68)`5i&GpS?gJM!L<(4VWXxhmTXceUETF`2CApLo9iA-?qvPxZlt z9X@>!;wZFF`}oK?RMXKRDf4&)uQ74n)GXSjEQ{mV36YRmAEZ&?qt=rLc9e&OlUg?G z*go0Z^eoJn(V|MX#SSGW69NKGu}{8IW)}5{(D{IWe!cmoIr~pAl=KVF-?{gCww-?e zXUdlti?;V<+LPkE5c@61GT(9*LH0O*GfJ6Yv}*$0n*Wm=zmpV{(A{z}GlYm0W(yg9dDOb~dR;9M#KC@!6T_zWm z-8x10i>rg&VSf}JNmN}d<|(mnksa@ChuZ6TB#ToCCT#&pTOB@hduMcj&*(Tz)F;c= zbXSE(SDZR^W1kBBY&m0`A>xSrh$eykh3Y=8x>?&f#f>FT zF4SDLFE*a!ShsH3rS6*N{pCAOCh4`V^*P5`x6FY1rlRA^E88PpBDAKv``e0(O5C_m zo_Fs^MK?2It~>8o=F^r!BObfTj~*&L(r*g+lJVCstW>k>?oHQVCTF3>hD!V8LkN1K z$1pveX*(v4@GzQ(vWhM8W)6MWWu&r&Fp=Ba*1NF!C@OU32A5$Ye+{}3#(m%YDpn+4 zKg#xE9Y?FWAISJm%ORtA34ReWAR^J;1w)80>|RJW@M3W>#99t z*3IyLG^38Xh{5ixsjX;2V&fD)oDw&$pTx@c)tL=JoEw?VnT$5Qu%@%ETJ>2alUR52 zW`@q{P@>ew=aweBzk4>hKkeqsZp*ZRoL$q3PbX!IH7Er51}^b+*rha);e3n|->IEz z;~|6P-diVIU$jg%ny`EMION(T=f7B*|X%p7WCB5q2b9f!PYGtLC z?VQ)@((g^@x%2uMM{f>d^(GAC+-pnyuR>mh;*>uaBp(tP*cJcQ-KTCAuMpq1h2C<{ zG;~Cbpu6UB92OO7??`D5c8=${X3wdedEcb>k$pg4oQ-BR$8`cznbBQaxt0>2#UO5_ zjmNL`9U+aLzHI#@>}zC4H%(zl^Yo*s9vZwrO6fxPHe31d6KciLG)8};CY9AYYjby; zPgkjXbBAkZIoG{vY4#BUb#s-4Pll<4!kg{G{bx@2BdT~bFq7r$MThCAEg9Ic^gt&4 zc#1=d-`9-IQ5(gpGApWh?5olCO-0wzYFrtuOJ|Z3U2BVCjf&@xF>X6c$Pc_zpHma9 zPX;GAna5|g_&I+0wAHym-MxEeY?&S!)g8Rp7%)#YBcI9jL-$>0=O4Pk`&IW@Xr0Qr z%bzHxD;=;b_Cr6nf$`_gZxWa-nwI8N+<9Kd`^S(8$wuL0G@tVE6eKY!DK5>Pp*^j4 ztxMuD_gQTWx&$*_V3uB-fNF1<9(ASpH;@g~JnCL^y5T6AUMvRK>t-PR>$gWvL2{OskTns3n#ZZzYtobgPf!DlDV-}dfGi1${+c85-N zcbfl;y#8r(`5=>RJ)5o-U~5Ay+&V6X(5^zZ>4wFg)>^amfrT>yUdO~Y^YfLjT1~^R zesb62EFUmU8OS>tq-*W>O5FFAl>I7m-93-h2ks>~c^%BuvUExcNa1+(#o7B_kDxa{ ztxLNyxvF?`UaC&^eeX8LfE7O_IO6yyl|^B+u3)te%(Z`FuU52qgM7eH>u#(_qQ^SZ zAdaUjd2x7X+PKHAX_?p|v)ElEKxeI2)pN+?tL|^384uUdB9PQz8AQzD%Ua%b`NiM7 z-S|VMqjUF>A<|}fX6=`J>6BN?p(y&G+1l-`ks+4a#Dp*fZ&v9B@P?STqAV}>z1k0D z&c$`tse?_gZucsb$+?!uN*8M{C7tASC0Z}G4${>fhr(!uJH3lIZKi5JIiso;Z|qq;-hsLB zV%y&Qa@T=VQ57QVBV>1JB(v3rr~GC1Ztm-677w^uce|?v~3o3kuki z^kx!so^OdvG^aoR=nZsITFFf27?LqRQIm^|s)3y}aoe z+jU!)O5IK&a(;acZgN?$WgdM~loBzaB+l(UiS{@rus=%WOC%YcQRO)kQF+bT_oAsz zLAonL!wNfYE{GDZ?O&|&q^0^LulrZX9H!iuh~{$qiZ(hPL!Tb@4Bbpjkf%OzunjRb zEQ;|p%%tstYWbQq^y94MC39Le6b_$1So>}-)PSW$<%~zK$(w`obb6Fl`+(lN zh($ThI?Hx$c*t&8XyhWx2TY47PPF^fmPf|EizH5y{FwFhcX>{{+gs5-173aP;nG6q z0v|T>QvsniQgle4**|Bf^Wjj4cw?c@`8mNf9mTL#bW($2|N8{Pj|@^sMtk_D7u-IU zO_(wbtyzt#m)Carq#pLRZK-#KsqNZ_x`)b{@uobh_i@%k6!(mGC0<$O-T1&A8FY3Z zD@mO&IfvTkc)k^zKF(^Cu+X>8wdRCKJWA#1V-I}%SaNQAZs=Xh=rIKw#?kw#6SYQ= z-TLyzddn48>Bdz)x`VDv+o=ma6dJdH;jc|8`<4|v-8xe991=aRyZcqRDS=cJJ$@_g za{0(iR;?NpAjXb>HEq#V!}mhd{uo*ptIN44EMbbn_4^5pk-WPe65 ztj&f^`Xt(h?J(5ww50Jel#7*p+o=CYM|pVh0*&iS%sTv<8qcM>@GV03i&3eBlhz0H zfA#|KTcgioz0iJrVsf43dFn6gLdMEvZNrrl6y_;ZB@{L=;{$xdy?8q$Mz3yBS^MOu zF)P7Amwo*B$h%noN-R2n)Nq&bB|o{$<$0Wh;r=`F`U|+*b-3JDWxoux0=iGya$Lit z;S{Igq17(WW(b)^)ax1@_BxYU#Brc4l9vvX!_#bvc_!|}2`7t$Yw!me@9M_yo==bV z?fF)~<n>x&D%zWI)>3oZasLX1*o<|+Z;@-+sLt_2a6)oQK|DQ?b8ROPJuX-+ zK1@!~Q*XPyMZM{?W*R15`*o9C^X8`Ox1hRfV!u222-JI(Jhn)@NhTbhJdk>gb4}a! ziZPeD55r3GZC;PTe91`dJJ=G9#Dc)z?B{nZ*`tOJCP6%evnVZtUu@O?POS^2W*UT! zu5mr~uFu=lD$DKQsnB~#0K~mbkG@N?V%REUSfRxoV`)iaB9yim+arxQ^cv&2TF%^G zc4brUXp}>4cBCMW*OS$7pSI{yTT=EsW4SVdS0mPZJe!M;FAT|Er%e(qdR&m2`nd4S zmuoMkES$BZY}M>9M!!hzR7@o9cYCh5Q*>pExY6$H1E=m^ROY%&UcOjcW4FNynWfi$ z*(A0!^rFnvtCxP2>abh-e)?{oRdd2`svj|{9jiIwJ=5CZaMCmHGiUHQkS9ua)kS}H zv4A^)8M-ujYkx_F$LKCqWaZk5*Mme+sDs>A-UPYE*cRhF+RhtLFsvZcN@rTMK$+6 zv#6U;6Q|eB*~BY=Y4^YL9&XVRr#GFIw>|!V#0^%vAKR#d9^FAR)*o+GXdJ&1ZK2Uy zqlVG5){pwhh;DNZ4ht6 zVCOosm59h=hDCo(fCa%_Q%h?BKc33Va5ZgiZti7L@TNvaMj;zqFeAo}^w$e@0Goxkq3HZFpi2w~vn1o;DEi;d1_AobJ9`^N}A0Ku>F#kD+m2 z)Rn7Y*0!?g%1KKTc;hLD3Vg8#!^n+|3(gw*du(0Uu3&9<8r5Ah9B{y zwnym4&m@d-YTw)6nBdED#jBl2W!E5xI1L0%*-rCVF=(Z+Sqt6n2Z^$F8-&k6rt|ol z?X>vZhKdOk$_9vDl;DzT3QR27w{IuXjVLCW21>j{G%Bf9YIT-{!c7sQX{)os#YVRx z7Ei?k=J3%pLLLP+45`2OnUD}dLI@?|o0=1DjqfH*s3U$VWT8X|CE||=NGK8iuSvxE z*vjgmp`mG})AbAT@}lVsMzDVS3fxW6Jo50MM(c29X=!Oxxd!VMz`r?@3ln~ zShfs({apIAuuywKPgZ8ZS~m}mUo$c@YjmtrG+b>KO&ncGc~+g*!ZLnKAlmF%5*c=v zXyds4^|HVD)}Q{@<*7PWcKW=0*(Y-wq%myuHLZpCnKNfrS5;MIZHrrWV_W;^=q77x z>jxu^vNdnhibYuxRO}&~ZmF*fbDB8)O>a{uVp4eltZm!&MlSs!vF%_(e8yv9o8#9y zM0vk&vpxuDK52zxxRr>w4|$sxkhy1Sb2#G9i^(3ala_aL=wFlNJKrz3e{GiUf`UlI zeV7bZzz26bHtoW-YRFg{;B*6C(vcpl6cK#^PQe9j+QCLKt8Gzs0}}X5KF1`z%2e<$&{jo*{|~zH4;{G2wR3h7aCK zww~L7p8OHkk)(89ANkCW!_57N@#!hVHp_Ooj8tOVi5_$0Ouq?CU;e;R7Fcc|WI3W* zbRo-yyIi=-g^K;BfAc4k|F6KbP_h4aY<$hNj(l%_eG%ATr+sDn1c++d;Tq3J zg@uPp;BC9w+uCfJ$Yk5DSq@XL&wkE{awYUeVb^5k#kGy*C})|v4$Kf>-r#U$_68BY7Wq!tl1LEdW2_Lh$pKs^rhWM2L5VcvtA+ zAX9Hb7f0yT3IiDk_KGl&@n4EvC8%WCa+}nWV>8kIqpzh2yg4D-^11=H>F;Md+; z8#XvN_<+OV)XICr5G8TVu1tEY3o}|2Rnfa&zfhDl`nnAcwd#;t zw@s8Zzd?a8{>UrhE|JR)$7nr3F=}4C*wzG<5u1W{h|*-mHEaB3kIG zSODu7y*Oy}=WqNa7k&X!Nk~7>8JpXnEmc(NJ+DD~dh0vng!>gfcz@I2JtB3nkmW*_ z|J)FNrRIdYT)4}HiXEAD7Ap4tk}ZFKCdVr-T!zfdy?gxnLA)hZYEcH2u_YuBustdgc)> z_gN9f>LeqCm@H;uVp2UeHkNFo@fHQe)hA|nJ`zRUUaf+-JFnQp+}k8dZ%@zJZ1z|^ z;nxhp#JjUsiuFf}L$uja14(PFv+fb$KP!rOMo;OU5NH9@9#_*31t4xFbA zS^XMo!Ff99qNWTr9X69%l@WUblW>7RJLbC9!#3a(22NS^vwj9Kt`!lT1K8Ab`oKge z)J41AYOa_D4vJ7Dgd!mn2^pc5_)V(|wZsqWBs3g9y-uM>{O=NpujkKPJC(T~o z?C=`EAE(z{7Nq2L+IJkUGdjMPLZ{QKxm@mE($;-;*NJc6?kOmN-Wf?YAPwn(f76CHX`>tjCZ|coBu{3g_eE4%%T&*Gg=Zf=XM`kcxvmtu?c`$;3)q z8jvR|db$j%dG{XbK5hYZ&u~8n;()G*=n=qc7PBra-3V~h;TNy{xqt|34ei$> zC$rrZz`!yq9gfX{!>KFRcxEUA9LvzpAIh((-=2rCAR?hOI@F=U;yR-*CUdJfOmnS9VwS!Sjxy zwOKmA``nKUt|Rv`76421cD}u14okAy^DeF*@NuRx1z897Z!=At1}k5Ca{bO#@Z5JJ z;}QQHVbj^^hR8jH>A(_&d!74~VHxQwPFHz@qf;L_vgF?u{Fm!Ga0ymEZ^MFR5%65K zyZ$gR*T#9%6yzBTvCF^)>4P50^I#d~ziy;_KSb>>faCpeJFNWLg6TK;mDh9bJ3R)5 zW^P$@K>#t|0c;3ftyyyomZ7}oj;A$D#=A>L0EhW7q)cpfO~3pGau^9yt2fsILoF7E zz7RmnmjWAh+=_GD19B z)dZY~7pjRLxJ9TYe%SgyB8&f<)x?iOb2pNd@r}*R#w->~S)T%(LZ-#V#oh1k@Ap>7 z{E`m`=hk+w-x&{f#=HCES^+RzBA6nB)#Wi@UtE{1+7Sj%1svrd@(~h?h=F?F`fhAX z2>cvfQ<uM_*sxKYJ1EFZv1-N9R$V$;;nwC48;_6=K=fwmz2I zE;!8bgH{+^PqkBPvJ}|k4$zxIUv9~g`|W_lK=+i*+io5ch|rY z_Ln>OA!oV)zh?i(aX8CW0Q>iqkp#d)XMlpSSKlR0hNpz0X7y75XEj3$1vv0|fM2cn z<49Z-cra>aL+Z$y`x0xu)I#t>jM{~VrB1cV|0f&eEY zg<3$U1%!q`Xb6N@;2&TCC~PK?+V+h^Xc&@8I4@kS{@ifE$0tfvO-;7kArs3?FQ?M( zr*kfp;XMP%I~EMr-TV-P+pP`wJEn?8iX`vlj-FkU_CEC3udghpd!$<~(`SEtIx>WZDD2~oU|(Q_?`(_! z7%OHj;M9#rl{Bsq?C}bwu9H8;tz1@{AG*;eA+i?sAD~>;>5kDl8Bk!!!NrhwIsQu6XiEsqFMp0lJ|_qBVK~c z*+VeE+lJ|!neb`aq>l0vu!_>1joJ_}xhM33IS1?r5nlzUdieki z_)rnq3n%2@(`1p?;iVAT4Zd2v3~^*V(G=*X=NVp*D~kGbYS0^rz;g9bjQRGK=5->0 z@df09fJIlPEu1%dr>M!j1+Fs7R>`<7ZS$*^*tH3_bHfXXIWv|?$H>fENi15hTzYn< zg6mqESf6K~mXMc8|$>QS+8 z`Usl98*L^y{lSk(+zs?MXlc`UkjYmm~DgH*TgM@P;gx_x!i1`bB#TEP2lyZu)FSy%k^c-KG=sevBK{pMLsH4Q~cv4!vlBScMu06@gF@hzS+J{{cm?-rA1L$;6r% znPPfdWEN*)Mmn!U9re?vPraxkOAOjw#C?2yYo5PKsCIACn)upTZr*K^U~Z@uUqsfM zIH1LBI+3&qZ|LYf-0EIH&rOvbd9r~K^K^Xxwe#x*!gz$4{wZpk>;q;!$E(1j>)j)U z^@{cLAzVln`HxEGH;ZuKDRF4Xy{WCOIOiR;LqMnmGUVxPHbST`_Tb)vxa2oWWgXSa!>}SwNvL*wQ5nI(sxRAM9 zH8uBGSy@GdKj#w3_e(Q%JgDK#>}+yETy{xGLR3^#6*^(EWJ@|Rbg@P?l}fD%S4dF` z)4MUhdZea{sX!R(kdO}4k<%H!`j#O@W?p+b+J}!RB6IrnO_b^r3dUM)xYl91;`8Jf zoesR7XS7&XRWc)a6TxF8bH+gEmHHYm$fjoQ$eYi}tTwbOAY#Vm&=%h(V!9l7>mTG_ zdYYYmbH1!>NcQ@(Ows)~&ezVXqn$P4^s+=jcIAl z=jBzu9UHVF&0wDC(4d(*Zqm>k9)E2fFs$F^-8U4=DBhrA?SEC2^+-!~i%`=GA)gTP z2_c^l@(F!Eq3lp$C={!1SV^KVuML~zY8a9lp#W`-_bV$alY2H` z!iaaoeSCcQrQn^Euo{~fV`Ec-LaR^J$7nr$CWgvX+m}=}#JOcMGl6@jqx3CHo#*7D z8rm(Zr0?UA#kj7Zppx0@GuZicRNp@7QY_Xb<9NPD^GM0k^^o4zOaH7ZfCCUmjE4ei z*#*({|Rb?feb?CB?jqS<8b zcwTO}ZMiKA&uQo!OXD45XDIAVsttV`E{Ex}bzu!LEsEBL*15JhxeQ>bbkZGEn^g5n zDOwb<(^n-g(J=H^v2YfNs#Q~e;Mig+ZVSDEO*Hu%5ONL%`f0F zb@DS6AfN~FZ=2(%rcj{*hemW)z6cY#vOoply-YCAK7J*%2J1>J1oZaRn(!-lY8JH# zaD1~2@({AJcW~O^!)d#>d__2HiDq zAobGpk|$4oHMg)(>pUMkd+UCPTkD|-nM6I#Kn~Le$7%ee){!%>q2kTkTZj=$kUq?ZM3TOInIm$xW>{~57SL6=P6wns(<@& z5Q{M!(crDSZkY5*r$(o~WieL2M4!7zc91iLKJ^kd_j_gdy+DZPL!b34uyB9BB?C9=3)I!k=GN=-TBL{GpRVjXnLb8$`hu&<6rYe(f z8cHcUqS3mccx|W_(vZh0TC-jTSL-s0SE?MJsOwY{K5NE=Y6|*1aYGFz+JHBm+o5mAAnD9)0C^@hkhq_Zb6Lg6>sFh2yM*Us#q9- zt$uCO-EU3wkhk?&%TKmLlR)VoSswmnuBf0Ee=LczZ^~G;(45bj0@!52Xt<5sz5pH3x63pv;06xG$Ojljknn6?5R*?K{XXFrCLgs0p_V zNr11UaI5BoE-Xpcz(oKj$)7KT@!`G0(6or8pR{|<+HZ{{5tA`-JBZM<9eKlv@aYR4 zT4_c=6p1&^h^5&;(G^B08rfjYg%Owza~-#TyZ`7CVlp0vMyob(*^m5TIBasjBzO!o z1!GeFe&oJtCpcCKzIw?Ow!jbG^=11Xerx54+*6zeBBTs`D=z>)q~G;s36%n*euYZm zZZat;&SO*BI>Q9a`5&(6wzjbr0tE)Seb%Dn|tQ9aiLXr3AQfk|z zY47Z;qN1jTXX!}Dys@-9Zle(0P;Ohu8>K|l*W!zq4tDPD8&p(O9t^o!D(HKyZ+d+tM2nS4EqMp+ zNV-c$1Q>x|3mmWGGP?&b8Exe-F$n%W`J~o0E>KuEBqL;r@XE@Jo@HBlB zh_njshREhu@=Y@HI`xlWd67WeC)l)eFAuJN$f4B0skj z{burHK3cxgyBfK#U`Gcg&vO|ZW3{vz#GCtR5{nFzWT+B7oD8X^d!g6!^IJd8qz-1 zaar7tguDRulP&&R)V$z#QS&#u_|@K&eKlR9DG32<<>cgAnY)i2JGK|v){vf4MREV6 zfV`b9%G#q9IQrR>RN_W6UgdDTqQ7Hyp_=GT@tM-nN(5^(8>@bsyC~E!f!zX`PgdFM|xK4cTRS3m9cYG*x1;-up$g6po!nYCtUYd z28KGh_ni~@W_Og@gWTNQPq$iJbq8kPew^|Xz;)RO#-5*r_#Ue?z2E!&s&2$a|F?iS z|AO)td*m9vAMrwdw~yk1%U~oIYTnuSJyv;jQ~W;-n8RH=dRre&{&CPxfPncsaQW1h zE17kFk5%4GHv5MGbN-c?5=!27KaP6}0O%J2WIEk>>fQHHgi^KFKMk0}ZAYy3`!mmf zKkJA5hIpCP{Box2ar^FkPhjkfG5Mzfb8zz%;y&SiocI(VV162+%h5L@){J~hVBmMx ztoerlbN+o(7b3K>z908HjaZru5~qV0Z}ixB`g;Onj-%E;4VWJXMDKP;;@Lk)twG4$ z&?Nc&_XNhW{VD%6U=EIrv`HUPs1$@sL8uh+hJ{LjpG6ZY1xT&`H<1FIO)SRp%DqO% zWl8B-^agcw}V6(bIFJt_OOmIAXE-a@N>ogF2(q zjw39rw}-2@_vZApw3dLX#iG_=^{ln10>o1rrIsitq`=jjQg=L$zmtphM9;dSnw6Uy z`5L?M)P&QfMGsKad`jJem zVt0S^@so^Q?JfX|X@+mr1we4mM1ixd1T+I4!6e=O*RDkx512pCr;_+*X}|H-SAki7 zD@?kk&$o!WP@LXhpUJ;^%8N~wPQKud<1pIr)zu!uvbr9o8ktO}o_JTf3HdCD3ju+W zB)i>3Qvi72fB8~<37)o+sXGMl2`s_8A?;xDV9_M0HxF(Ad8SE&4N2d{xNw%p!ey(LTQ9YYy3Uv*V|h_5EbF5AvWt>p{UTGXwl_HSB)rH;h)+1z zQmmbmm)u-mn?g+wAbTe4e^!u6eZ9xiWJwca#OTZ=opem z?%m5+8xZBuJGv_r741k_54N0{o0XSsl=tshxxe}^tm zm5>NRA_yfxxE+K#;s4)?k$5h5ErmjHrDB<1n6dS_YBS|^iZEXJ$D=wrI-2Ztj1)tO zwYdght!M@P#(=i2XiWdabe=dpeU03Wo$b$fqO4*R<6R-Ggw;I- zkC|}vAM2})#oH2|Ph@7230U4(ICsC@VFRMxyN>;cO|OAs0dc(@cH*-{*B6Oqi2)kt z1?xeJ_PUgml+)QqcPgz{z@`5l*Nk=Ks8$TiVpEpL%SRc7%R8f=O_rPv_J7BWOaWlN z7tr2IS>4S*L5RkEQb z{XJ0xs3#I#-LqSiZYbM-46(zNC=0|>i}fLC!T_FqS}4G83g`R;sE@u6lA2<52R9k0 zH(B+;$D3jUp{jMch@CA1HCVgNmcay17=ZH0n|=VK@6*>9j|=CJ>o=m$%d=&obX$Z8O=G8z3?8NCmz z1&CVKiJPY$(bU(s<#p}Ums3!P?()$5=oq$C#sV&5O*_s}xGNzbPQ@LfO#RLecLS8K!#b;Ed~Q>|E`xX8hI> z^FRIwm=6fr(6%hkg2(>qN;sy!gp{4^hyI=bm2(_O@8Du4 z|1k!6oj%tzJDNpcvbw&0&Ej7>JJHi^ybAElv-KSU1kd`xT6;_D&w2<}y#AqovY`Rd zCaGJH?8bGl+V9s+|Me&kz|a4!p=_3t)ophH(6~21mV#istlmMhw29vqpD z97M^$q~*`do3dveEWfDeRFYIfgv$Hp$Poy!0Dwblsj1jGz;m0=B|Q303<1s@NW!G0 zbEaKbF<&N2$;Rf%NAZET^UoYd4#Vsqj#@(ftR%V_7_w+H=?5_ch;v_rNe5eA+BO>` zdXAb}V~;5Pl$5$Eax!MAD%#mHzRtld0z4KPdB>r(a1N~QCXR+Ow{;QgHUj93@g=t5JP~>#|hxz3G)*-UI7_jRTyt62-0nQdEFR! zx*!e6SFg+7nfQGt-1$Ka#9qlH+*&nzbh-}hmz4u&LOH?R85VU2u&}# zzi_I|SqQeZwd}RNf*6pDu7t>v6WPT$$iM|mDwT5w`Bj@jLm@V!OysbW28#&2jZd*VMF*CCUyodD<0F&?C z?|?WMD}-ivmumxYoD+1ylchzZgisB44&(8!z%NhB;d8-T* zA!4gbY1=12)*ukP6h;c4OFA$Gox+U*TqEZOSrg9uxdh;n!2#IMAIcUWk21?NKs2J{ zPJ}5)Y3x0u#FNTbh#>wHO z0_@3usXkmRa65JtMgWI+#cc4!k3}7vD$`>FlFq?TnuPe%H>@CK~3T#Gsk1UW85uE{?UTl+*idc!^ z4Q1>PGS~}~ewVvYfLxd#XcDZ-XEN~i3UEA}uzZdHVWj22omop<`CH>QZ0UyW*9Bly z3n(Ith8iBSmqa}fbk-+ED{>M09l*iGgNXSW4u&AsW0P>2Kyc58S`qx61>p;dyZ2h; zJ1c8BY^Sh9^bB}qm5fZ5X;6&dV_^pLS(HH#VR~-e4LU&@*Dqp!vAagi#lju#~(=#60|I1 z0g%HXsWkIc4@?RADQ5A@@2E7dxXYJ6r2|VLs%J2Q{Q5^l86!JgJHk3O0`FA+=lS}q z2mR_9U&d0ewCeN6TMt1I^dh5_KY)+FUp{K~^P-}44h|0W` z9fV?V@KY#aIWn*SR%X-d7}Uqyt(ZF z!MX5C4W=q%4j{y5^ArI(S!{uzq3gX)ezo@%;81wBgqZ*ojfC0m-5L(N-cA~R>L#-G zB9%XQ1=7AOuP75fWNLx8^Do5<&vvoo?1yyc zPiRS*vzB0$hJ9L*-(BSemBBQ-w;8*G4Afx?XUuz-{hl8G^K5@`18zSRLE%xpP|x52 zu*Z{H`3dp6K}3qEs1J4;yzo1GjTBqwfh};|Hlz&lEC$L|!uoo)0JQ#{1F;MST7Le; zbma3k$OZfmQ*{B}XvhaGN9ioNfn2_BAA;TRAUOwSyFL@%wy{kSpe{)rHip9#9}p6; zRe;h>1?!{20RFbdvJ8P}pMYIXT{;4Y_C>It!ACDFg^6d~go!U-rGUH)M(iGZFUjTg zaUj}r*kJ_6FB$?+d>M$@z0(ULC*k-WZ!2%=q`wu*KL}Aiyphjv2oM#U0Kmz}bwC7rPArXTy*UYI&flKrt921es6h&F%0!mdBqn(u z*qm}a_AX*`H3?S_FW1lJg42c@ho9rx8ehzq7zyMmu~eK^_Z|xeb<9FZi)KP-=ZRye)5$f>?t> zZ~}ybCfQUEmcULF^uE}YFyt~OMu$o@GQQZA50R}w;C$ze_yZ08Wmf)nEX7-Z$vs}Qd)aVhNH9Ti^lM8#qN zdub3($|CQE=PdOzQfbWniZ3~+s)u&!-^@;5jX{O;!N)ZU)9E z<$%`|*nB?AL83x)Tm?w)o&gaiiS-|a>AnY}ju)xDiI_=2L@F${Fo(XMzxe>DmZT?A z1Q5bv(C>_Qv?OZo&TJHJQ;lqIGp$FAY-XP;%Siaie_&O#G>*6 z2L|QB%0ifQC2&2~@__&sfAhU?G_^H-s>~ZOrlfogGXaDQ4=d)Zf%PkbmeeI>>=WR) zUkg|Vf_PabFz0t6RiupG0`zk2?lc1ty8;3lo+YmSlMM*CT?<@3rzH(K-42i=JwAf9 z9Wk{H>>%R8x)oE!_hyRVZGUvU$sb_C-|m$6P`QF%*f0ctbemtryi}k*eyu=oUsvj4 z*c%6w!JCO3S%Tai|1Lp5G=zzV-VPMNCSCzIEWBW$C*PY%{w-yJbm9NXixj}N^5rQ_ zLi(ndWG7VGP}=al5ZyBdAXHz{8iiC$BsYOIb#-~5x--b26qbBBy8qnYvgh|glqdl^ zQQFGKNL1_?IAeKF_AroZgLu$l+SWx*a};j?Oi%D|z6=H(>9lQt&9=tl8o=8x@Uw&V z<|5)*2NACTVi7C1f;S{V4@p0h>G=Js{=+1EVPlaiD^fthvcM%{>5ZZJ(?GDP_^v3V)uZ@Nv_{| zqmDxok9+Bo1;SZ_JQ_fpANBIV!aI|2F7V~njJ7i19Wq%nSPawNLhqR`COHZ*A(}nO zWzT<*kAF=p2B@S_cuaP=LE<);!UbdRl}L%L_#xuwQLBtV=5T~YrjzZGby4-Q<;m3Plj#mvhDFyG zmg%#Smn8OQ+b9oOR$M|tRN2YwG5?wT!EhuPm^&NcmgB!54EXR9st6@FN$1FnpS=J? zg~1g$f!-hQ|Y{%fdifw+h$8vIvQLyi?ZjR>ibknTt*m7-mCm)~c7cZ}zp`Hi(^&U2nK&(ry9KXa_UYrX4T@BRI}-~Vn{ zeU8f{?O?Ta4BWH@o1F+#VPf}X9jJ-T7o#-Jy7c+iSW%8JXOg-{A-&C~G8-O6#LjyY zvD^LVV=x(iMvq$YDk(pgH_s9jtl4m1BOYX}VmS8hwp$ftY0XWsXE)X+p4RoMI$9X_ zpNQPaoBm_+itFm)h6blC&hQ+Bd)Y`zm*PCu)4OdQiS23Q{r<>l=k+e`n}l283eDtP zAyhpiT`&HE{*rj1l9G}Y-lzAk%D$N8IR)Wc80CgB zGkeg-B!ze=sMt?KE$MX2v*h2WykiFCoT_XrG;OjXxmX(tfGMY%vrRWIeI zZy@QEnP{4Y63t+!v?yW1FzRjpUa>6Nio5El_b~f&df?+-@<~aC_J#7@$|~m&Rdrqw zR6#!`-3V^{`iou}EyUlJWPruSAE})}dCO!; zZ+vjv{4gj^(FUxpi8K27<86HQ$0L&So^~gRG#z$@!K2G}SEl9@UbHE8-c&9mwnGJC zvdO3RXEKEB4`I899xUq#QtgC9ownf;gZ7_{JDyg^&5fz@Lkw6=jfC$h9d8k-7vC~V zAF$~kkl?8IEi%Bi*OuQMrlRozMoU3zc>bQx()q*2M#I!__B=m)sxN;tw$Lv4)lBNE z79#^CFw6C{K=1}OTa5Fa2Ku#bSX=6&5NYPiAB>+TJv8Y9*BV}^OKrNaJp%7Z1GVzn z>kS98RC~dKclLeO51UniNKikkVn=;Zib@_JlaV1Qf40@ZPvHsKT!yDL3Jm)F-cCgR zmSc6!k3vOy_8xH@j|0da%#u4+yW?e|mXE4RizQR5l(9ohMistS+q zdO=Ne{`y(Ja|<3Nta+U=|In`$o|&G~y%ouB72`^U1|cC8mZjm+4MQ4M{rO(+T#&r+ zD)&stUv)#zr*3maNr34A_j^J=*q(tV8)_zM+WvwP~DKQk3r7 zSmX0vEEqFM(zMI8IU!QebU4>3Hh=Z`laEtNuru>L(2u{YjiVoy%Nv2NbCdnIzktAr zM&Yq=bWeMJKPk5_3DJ9G3U!r(4KD|n79YGN9I_6`MDw_UDT=ad`bx zqv`9A<@K8DkpMS`5K7EQjAwv%a)FYOF2H-?Pr!LMdX^lc))T&xu2`aL-axXkeo)G4 zOLx*EQzZ{`O&fr;%lzSLQ@S>+Oc^LJO$@S@pKVge@_-R+NY4_)L`@04lgfyFV zN-rytAWQ_!689TVrc58;e0yPb>3Qf^5Q?XQW~l?c>06>y=Hnelty~<5sFN31v@VLF zS;agSj9JkbKS5FH47PoycI^^sdbc#I^xCfQ!NXWd&7!;?a8+0KrK7TU2oAi^U{xpT z@5-w?-dsm$ko2njyvB{x^!p+DIqB^&ie0r_do^((90^}K7=&vm%8`=QOW@+M-E%RS$*Mt<~SchU4BFJl;; z6>n=Zo9Ywt(;_w**AJ|0%K2icv|&nYzEeW>>M?)a?rb$VsGm!#$uXkUYI6L8Ob(^m>A42zJ3Wtp=9)sWjNVKtf~iwvqwo|S^3QNYPI|--rMF9} zke_cdPe~@>pGd6E%a7BfUwwy!ecRk50nI^^q3Cb)wW4pA*XW5dV4c$Eq+EOvT)(6( z7uPdn>tkS0I~;K*IjB9LSx{F2LqL-&|NZu14n8f%JUO04t)&%2w~2w6+#s!7AJrtC z7Ub;GxXovs(bMU_XPD5PW1UEs`X72@|K`5j3B*MnTG7i-LG&DJ)1w~w(k|tT*NUFLdGy*dAuG_P zp-AUquxY(lwWP4F)+Q_?E-9%318xSAaX0vbaq#101L?NJkzs+_CCzA`+Q!O^F44)d zF!hTS_5Mx?`JSh;Pp;MphC{=j0ao5~p?Em;c6}wbaA<5t=|F2rmSgp`7l{mgDe{)G z!}=g>;55V#Q*$gSX-rJ*$@t#n=-qGv`RFJUD9xY7z`pfxChLDoBy}<0;gY^8m%$vib})q-Yp9vbExl%uYk3_1GLbq zX^+FF|8|c@&0@S`2oyXom;+RgFkHBi^!WqowV#A&-yx@1mh*Ec1B;kpNd*5*KhU=u z32wQMWPpWNd5b`a)UBF;D9wcX930(tJ(X!GmsnFl;%Qm+JgnwYY(WB+9#bw|#9>GA z5Tl-OnnRSL6osM`P&q}i8}dN16F)Ua_F<^(c!p?2siBm#K<`A9KH%pXh7t!cLCfj9 z#zhsY-;!qH<<$%&4(341NxYvP*8?=u2KjVC>s0ZRl1a^x&PY7GA|B``#gGTZ$w~^8 zD`cWa5WE+64VVCUa~quA1P76S`|x!eu_$` z!K;=zzl@{bvm5|QD^x)=80TFOnSPW(gpG&T3fp2ztd=Cut)Wjm1ADqaGkdp7C0TNF zB&`yo{o*>&U(^UxMVQ=9%fiJjLK9KaFG=gEZ8b%uAe=RbLjx2!f_RSitrKnEq8M1B11_pIC zXeLy~eIqNPWgI{NhYbz%g#sFbkR-Eyzcnuas|mt^K+T6C`8*f_HjB8?7O1584Wg+0 zRY&^1z{j9Brs)BaMFWPewq52i`nYL2s1&;N^yQBwu)F9ly02>QPTQQ43!7Izdm3!h z@T$_Txw?G0+xr*xF)tsKy?bOQ&#GvH2XuVvbm@85JXmtoSD2BT{GSRK1~*G*NVAVO zPpBUlTG*7BrP-uj9`rQosw^AhXp+Ib8eXg!fj(@`z8Bd;BXPm;n#3kD5d z0~(aZxOc&BxjtmI@$|LK8q=Zk&2E*sK!wQBcCwZ+(30R~I<2cDQF|n`WM1PV=y_MU zJs7`7w6X;Z20np+F&jichW^db_<#P(3b1}!p1&oQ6n%|ve>d2BD(zZjmHGXUiL+lY zoRoOXl~m07e?Ia@q?nIVzo?DE;VA7Hfn7)YX)%}3KL6P3Jw}U~$5hbltRDS+zb?Ex zK;usgS(^ZdwJLLuIFNfl+zkmS8nZOtW$()3q@>cJ8P=o!LTD==^)Q0B;BrkskKhLQBz7vR_s7NyRN24=+Uko479H*&!%)6i@Qj`FN5srZjk55SU61sv?T@7Ri1l_cLT*&kU&L%%VEd zr_&xjSdR-D4;e0MD^$j|4I79Mm<2f+d|%2^KzkO!fKi7CTuLC?8^U)zW`SbthRf~7 zHG1>O)P-J5_&Oml5JSX?#KhAF0UX%MX93wM94Fx5b>J7s%tx|R&rSi!{FiJ?BSH=K zLF3Q>8wMSH6t3E=!ay0(R=_Fsb}n|NBDqHYAw=GS)Ozp+A^m(sr zucW*j6tgSO*-QOl@nVr-o7yOo#yG({|A;x^9*vgCg8!umB<#$_x)MEIGimv9q0uCX zNyt3{`&4%T_p_#V6MZGT%Kd>Z#@Y$_PU|n$_RwisD}5CdFfbrm*eqzxP9x0HS`bHJ zm$nwf{i6!vChw_n`hW#MkepkK<7iEn*5Wv*e z+JV^YY&bGzU%q9)H?<|8riUA>5m^^a1e;B?(q`xanu$*bU+4^!MPErS$mbmUMvI4qJIfXA>neF*OyQSI9DvMIR3@6A9Z1wxF5xR zj*~yO+;664G0}byc)!39T`(RvZdqO^n(zNq+<(fR9~1WXNtD&W!ihl~7$bb;qjW6* z^fgDzed3*KaRfz}lH7O95QfjZNs4_2A^l{1rF$Mj=Tc8Zc6H{I5=`zJ!1&0VTfx$D ze-I=aCvUkwho9$?<}y^zcY(y}DxadH4}v?SxHHTp3ba8KrZA5{Lykbly~)B14nvJj z28;TFO{V;Ic>qX!hkz{l-<(8CZb*<{anx@OpIT#y7dX_%@rf?O>M2;gV_(Th1U3)? z+t%Ek0g~GTBr7Y3$nDD0z_zv56dt2*lxTtE79nqjNNqut>n{4-fgI~SXXnR``{(p8 zVhVcf$3?htR>#pz8M8etipj-9&XItq>EkRG5OtUvP6=@k(U+MnD zh+#D{9kLO6T*v(k{M_@tfB`CCHd6j(^6|@Op{`=)2|Ck1=|B|ig@2PBg3V5cI5B(r ziebVUF9>1iuID&B?snLB2PQo3eb5tXP#N>eTr?rP4no;Q zl)RZmEyws`cdUM?-c)u{I?35Z|rrmrkrf_W7IuDQR zrPkJSO)^PXkLQB>m(HPoZOSLRkL}$l-#giC)YXlN6$YVm_Q;#Mw}U9#Ee%PX0o~?S zPA|gP!II)>LB|ukN-dA;Tc?&q_48~d`ARunY40$GKER>RUfYuH~@kw}C9?Pt_ zaX7V`d{*2l{9MI^M9mE_mk$832f;I$27vIAAybp%3BHdl5+TCh4rY>P@Tmd#C$DM> z*u;O)=2U2F3ORAu_n;xZ@F|6JJ1^VT%v;{E4(NL=1 z{G|f*wv}H&hdoR*jn(Dz_Tl^wlIF~!J+krv=n=!%Z`*tv0Md(ImQO~J#I#3eH9wAu zOQ+9j`~XXG^YDU4D0e`CNtEl_;S5SU53-=Le*?j}sRq?cDsOb8cBd6Y`jD{tW4%e6 z^c7IuZ&->4!$lDTWtnw2gn(oZehzq-7)r@r;Jp0=W_|VxQa%1X2=8#DmH~&F=ZDMn z#tawyjo6Nsm z^Dlq@PiVlOhwmMJP&^7t#-bs`H0^0H!|InqpwThnIFPCzH&4k z=swH9Da$OJNi`9juh{|sdEf^HQBUllr@&wUwFF%WU#Fmab;pn3KCAmve^+A$T!vM- z_5=80g8%qh%iV{5trgZbsM&6U6;BEt&JPc!_dJxf@hEk6Z~*VONU=qNmLa`Mue=fB zQ^4CmDu4X&Q*H5&yLu?MQaZzaQPjhH`X#34KhsB4A zRReDr@P1O>lCty0VAY(kiQ_NWMNJrg(I+Eb+ED*X&Qe)Me5tg4PW^)4pTlH$g46y3 zecapOTLqN8q%SY#+b$R7JN3Ab``W@ezqv9ds>$T7o^x1KC!M9I7cPsp>NMdxm#irg zp=MEWyAMN6=?Ycj4v*{#nigDY8oXSpL;R)`bW5kMb4LoC)(##KB=JsG4^4kwRokPv zN#OP7X3Sl~#op5VtE9{3h`h_@Ax!J2b#?TG(LD15l1AUgtitBOGbK$K0p6Xj1jR&K zM_pZhW{|wcCXlpj=CbwJ0FAp~3fE(E13~KWRJIV1qo1tN( zjp080)caR57?OK?;QIts8y$r2GY<~(ym5W#!>TH?k(7*Bl1HXkkAUN8l9!>&5Qpi2 zeQOTAOw!eN;MWv%zD1ghD2zYyZ}{Ojst)9MkF;hp2LtlhJeUzHVjFU+TbuG$AN0DY#5RROuKV8Q_>}K46A4Nc)En)nhZf} zLtTRS%Og|oI$63fOe?U(L}THIFm9eAf&#KHHYu2IM z3v9*Vn=z%#M*w6_xY5lS$y5o~3otH%AqsOAL0N}0?Sjc0At=$h1JxNq;PE)BTmDV~ zmJ|U8v+>N25wtH75~MAH4QfmFokANJdTbKwnJ^?TU`08HA-NWRCYPy&n;=8K;##-| zBNlGK)WY2zZ*rI;$(i2=Bz4q^AcmazE|5!W>7_HO8iUXYZT;-A6=UyO;mo=q5Uw6+ z)SAMdv=}t3F1D@f(m>*6A)uGO<@Pd$o8rJ(FBv1|F5?5;!J_2}JTNKJCx~B1!5DW-x(Bx)NB177)oW zJgf{R5J@Gd=C&M&q&%?9(!>1<0^uRo!`%|lW=NXPAL3$a+g$xLd24=c(A^m8O{Icu z`jL^5SL*BQU)1#G0=EwVfl|mYlv=z}Zgj5Qqq#3_1!i!!6jd%EIx5aXpSqY!v_n}N z-O_rdIur6O{Y#2Fpreu-Y*PfF6d7BO&EA30g_T$BW5!v@vP>I{F4dLB-0gR%W^=^0 zs`D+zVb2d!xRD!Wf;H*}D@jk@lxqjByWZW&irU;T2S{t;gDToe$2gW!*bfi|F zVnvAmHnR*cfH@ns!>PqbC(@f&q1dSbm-fDedkJPtdqXyHY41TK?VZ7t_8to%Z2H5K zX*lK$q?*ECF1PZBjH5jD3z&kH&!3SoQqRznoViS`fYDBksH9w$A|#I5}V`HtgNW?BX0LcQ6Do1WUc~a|89oP*kcebX3iD6KqPrFP^lXw zy@-`SCK3#NP}BuWUXamaDbK#5B{%Fc3mi2vbe$fS7=wJK25-93uNL#LdSaUE0@Q)x z42Dc;?_2m>FqifgVQs1T%Ey-XZlAe#1h6rL=_D^N#m= zQ&U@vLSexXe?tRBr75r-{H;P~F)#^=S8FX*i674M!QW{b?>dQ|)1Ech@ z+Fos4ANtkudXnJdBm<1DJK@l-LSfp zDe=uiJMn~GM>j*U2OurfQ&;3{^7&!XE-Y6Kaw z6DDSWq-GwyII;EqUdqbA<8-1;oAa}(`hFv6 zdKb|!YKyMJvPA1`*Ot#a7k~mtK94x(b8}sK&=XBe#w-|ysKd{*#)CcFa<|Jsv!ZmcyJ2n}nLt4KNX&gfao+*)Frk&X z8V*v+37d(A=zKE5>TXL$zx!yhcL~jK_Qz zwts*HjOQHml(SpqF-*mn4O)i?+}2PNi(iDeWW^~^N92kNFDbt@hk-}>fm;3KU_7&- zLqgcVc$~1>Ne;%-2|s(Yf$?w(1a%64-m?!u%vFDw$=*^Q?V9e~(_xUhSNC4g|eky%vbr`g>C0Fix0@)txw1Eue zuDw7mLpd7HFL;wT=+Ss~lVx_Bag{!Q> z82a{R!>f%@dAtVX;}De;?{=o5g(`wYi? zA5jqgP>``VOHV54^b=01Al=Ix;HO_QgMro-ccf-@U_6YTaBHH;LD&UXsB9c?xI>gw||<;9eD4udM!PKT14rK8IH{#6?>nq05x zP4SDD8A8S zd*Ewjg$gym4%-77{jxS5xcs!stEQv*VS9=?$k3Wdr=(hIBE2<{{^xFmq&1O_JjArt zMEYk1v3YAEy)}{k*$~A)IFXLXn$Mfk!mYv0?{i-avGHy`{y&wICViY)gWzDIk2A{q zT04~SP#TyXYUNT{bdF=EN#0GVg)2*++LdZ+H=IrizRSA!2mLC$r@*V8vfD{n*qSP)cC4rdQ%GIMI>HCh`Hp*+&S z-(iXa%uQQht;$Ac6wsHmsr1Kgb${?6W1hOeGwdohq6Y%d*pJ+wN!WQqxKi>aVKEEV zY7D$8hZ3nPEIV#O#*^w1pB2d;IUo?}{0XtU;xNKRxc~tOpO{^RwDCmHxh>ABwrI}~ mcmy9~ZL}Z%pQu7;(8-RxS>`)c(_R7pn=xhX_D=k1! z2pvKTfdmLGgpl@4&OPVe_j~{MJ^B5@OlD^9*=6mu*Lt3ZSN9BcjvPFFkb{Hch@S2( zV-Ai(og5tdv=8h9u5|Ic=yGrz)^fRd^Pb+#n_~BTy&k)`J8^L6zDhOUZ(-7PBG3A5 zlGYPW?vF83+?*FVKgRTP>B>dNCvhK;XpedKNJmh>ruk*$5nF-#%aJ3;)MFozV)yQK zvb{W#s(kSsv2CN3+Sa&{0p&jH})(!m>4g8^o-oY z)z5iN3+F9y?yHJ>%b|Pt$C3t*GVknDj%kR^ z8x>KG_i^T?W^pEZ)3 zethEy8T2yunMM>J#}Tz35g{STA$$eb-4A)(;zMnnS^_URG7S{Mb(6n$34MFr*cp2# zN$XrUPM-xHSV!KqEA#Jvp;+Q-Ia{GJn)7hV6Q`e$^ic6-*t4bkzxdp4(yxC>9={o9 zT=ntctAk;KFE2g(n!~C2GB+gW-GP_!Vn>fHXy2BmZXLdU;LM|j8~rZP&_J#qyL@u9 zccbT0m9l>%&!x2W?;gGyTQraM`{9d}x99z+`bz&3lXw|qU6fcP_*g>xrhe2&l~&Rp zdn?s`7{1NfIq<1>@!Msy!FTZ%r=?=C$7faAu3Wp9pD5t?^5$&HwnWI=uhd7`Rou%L zB&Oi-RKCxhyhGn`opUezzK&6EJf3}b)Lvf6*IQO2;$Fq0RSRYNjFaVqRIzT}IFq=a z7vE2u{G{pGhdz?JzQEV4cW^eH{mIvr_-e>XTi?`g8y%SDxZ24Loi23Trz4IEi&;C(d+;lqZ#~NKiOsu~bl!tv ztbmsg8{zBETk5qy=k{wnQ{lZ8X%j4$nt1$Z$XzZYvGY4Gf{vEhob-|{sba#=yj9p! zl8)z6Y0%w;Bu= z#V7K_FC1t6fjimHL%x~PsLQ%4*q}J*-?7KXzVK+OkLCoQ%B*KUFF)l)-sD-7ak~ua zAVX>e**pEBAiw6kdh-2^E500~+xRzxZNlnJ=_(smhUo2*rJK~pN67SLIQiywNJae4 z=yB^9;V*p;3n~tc+%MtmujIElKlEBSc_tr2zSY*Wuk~lwTvwD@G;HS#w5m$~PL5k{ zSPt^S?gn+hk@VzC*5xeO6ONB@1DR{FUka+9*%Tb}H+MEy=5WLc{v;9&O6bi|i?tE> z1r}!{=~Yxz)Ol><8b8PKX3RKiEvPSz6D*A2$N~4HzCQGakZd#GwDpZWtJn5C;JO?s zlaqL9jRPdc!Tr>m@5phjgBb@tpreU4hd)PWJ`lOE@6!WSi#^g`v@7;5b2NXA=;ug? zHkrQMz&-Xgz+12^Pj-!KD=KJOAzDm4=5;^co*#cy-Vow5kBQnj`&C!`_=}HXF1nKE zZhh?3cLW)~F1aP0)T?`>idQu*=Z379aHq%o+22oFv_bhN5zh(l+6xucjyt{6zs6;t zCG{?I@sAol+eq2ls9&!Bf>w!nH{|k%e;ID=9ZTfY7Z~{7b44dEsR=K6Jhhi)^Bm72p@cbB`=8T>G*~k3OeCd3{{1iT; z-ufTPt%_eiCYqo+l9s-jXzhJ z;2OFGQk|vYU0IHos+3i;dd?e3$({F-P<|tq+}sJjX!XaL*K!h3Zw)eaE*1>l@4L5p zf2t6H$SMvkYDXM2#Us*+S0S3^yZ6W+vg)#0Pv7-u$ujjQ8VjsVcCE)u-77M6bLCJ> z%`&pRQ<|Um8T2mVUO|z1Q8gxn;5T>)L&TI9A_v9XEL^TAO1l2xn&KkV-%#?aSZ}Ci z$kE03nzNgktH@AG(P;VY0pDM*hq?!>-q)HCqs{7-^v12!EwT$*wZdYzT~<1-#XY|6 z@T;MQT2or1P$Mh+J|X_7wUl*%sdK$ywjsQ%t}NU0?drA3JChDgxuF?(wIbGX=3bdo z5v^xJ;m=R;Ivkzjwbm%{97%mhBEKN+U)&_$-J&eo53?M#ln*F%UbT4G|G*o|r(B~P zr1TRM~|(X3{(vyu8ghR4-2AQrrn{Tjut+B@$}2X)|(v%z8xq( z_U#zvyr&vMRbKdzkejfm+LG#RH33ycl~y4aRr!GV*KhFJ5q^O|eF0Mh=Qcu7zaV+ux9*`426-dYmzP{;y@5*It#7TScc zy=}P>{>W=cgNR(_W2O+xm8HXZg0m&kgA<(**yFw+yMWTdYxzv)f1zCV8oE5xC3gA3 zwLqB9%KMV{G53%B63Y^9mEMhrGw4alIA8f=Bx9s|q`*hk zM`5n-=fJGgPf{E#oiC%}@{H$ukN4LqRrSuhK#dK4)8^mJr*`E!=p^JSZX3UT_*(4K z)zp-%lqQ*4n9W*);#^aEbE2FG%ptlr#)V&2|5{Fl`ud1?yk3sp{@({&x?I!;+PvSZ zVse=NfuWF4(e$6TsPM0crb`#dYvI&ORJ1%)!R)cW^X}ua?&Yoxv^0t!P{Az_Z9e*N z^h`Ate+%!9KGkW^H_i_Yt&mkBwunohkbAfK4vZ09MWtJUOW(PW(cRI!Cdr>gGCnIBsFVgYF}8jVzy+TF!BpA<|C;+tW59i( zC6m|rs0*P`C|33A$7hVeOW%Z!t!D@5|GKOhv zU1E1xwym+>d8YmL&bhDe>wbPXQEp8yjWPGF9Biy9*+Ly(eH)8xqFXVEUw+4Z@<3^% zc~}_Q$l63WJY&5UzEo>=q72&N+Jo4|Y+)pdv1OSx3pX;YN84*tYJb<};@;rmiI<0E z>?xnl70-q`J;3Eje?a_%kcKyh>__=3@;yJSxBrfZEAc2zjtMn2wv9D4xgYYm`!%h= zI=L>77Qb%YtMuLD(|DVwpeJeKSpA1j?bV+j7yRmTs(n&gQ|COqW4qG)G*Ed+b;x7t zLw$k2+{mR#8q|KQ?F{YwwzB90p+fa10Tl$km3w-c#^Bbr^0USH)mJ}N&Ux?5EJ(sX z-_*%Ta0j~v)@)>~RyuWD><|l(qISznu5g5cP6=#`y{x$|># zjYeJYyzso;XIr1>#goIIy^dj5kIWxA{8Hsn0^OYBsX%Wh$22*Rqwpd}|1>pD-q44c zEO^W3aq9Usc*Sho1vwFp!LKaJh}}+*AAe-4_00Mqf?M2vvF8W1q6M{d;!h)&S)iX?g~k;Z6f7#cL-Ey~hRy9MZt~ z0S<1?(;Rz&GfvpzImdqB z*Kyz*{EqAIrw?_$+w=GNK5gJ0$90pNdV0Xo#KG6e$>!^!`lSg?n?r=MD| z=D9zgPy^1{m*vih{rQN$o8~zSgL`5(y?mX-lw_~SUO5LkC?+PR;p_NV&G?q~U&Vo6 zn&+PQ`+KX&$pr-k$p$INdigrbT~$?8mAfJ@CoeApJR##3;_3e|SjN-u{J$#syPjK4 zeh$7a-u^CLo?`5JA3pL5@Yg(dj{Ty4fBw}@r(l==yvft=uWkVylw;qKyDED{?%#C- zMK#!0)$X|jJGoola`6Dh40s3Vs?rrDjXw+gKezt#%KuT+;y*hZr*1twC+ z<(82ta0JYZ{n>LK_;cZ3N8p_Mewho(?Ft8n7Kh%g>!!h+%acJlHjqj1S~6FR)(z6_ z>)anNLhfDGu(($lRrr$Ypz$Ankfn45Qxl>)&)>eaH${^7-G#yng$D}7Aouxhg-UJl zy(>t28UuuBySecl%QJ9nAu6aX&8oVxWvQUU~2gm!4g#{#zZm_IJ7edh+Bz z9A6Rz^skz^5A?KhQ6|Fd1J;NotwR&B)15rPOCO{iJkW_Yw#()J`?0@T^1o`-;^pL| ziiqVM|Ld^@N#J+mlDoPY&6 zG%2mfFFA5RTk&uQ9o>I^QNB3vTuPs|s6(b&fUD|4hC{b1K4-wUG}YX$F(+s$M8>{4 z>F=S)j*NtQ$)~Y9rq#^NJwGl{B>I1k<)5uuOa#inQl5WNdm$WXp~|9fh6%rlgqB~k z`)iVRf$=5n>A1_DlKvAw59+QEV*WEB|8=f*FN%p_jS3CR>!=Ia={&%=D91e$v}?Fr zH}c4dic6Qn?zAg^4x8lqt8rTPz>I`11;?}N9XksYK6*R+zuVBi->e1O%RNuKg!xKd zZ6>vCC^MP9(k!VE=yb7#JC6ZFJrLX=MNE z)0KNbXQN&RJYd+fc88rvt(4U|{QVEwsq8tA69u=8`x^aS{~n-z(U)lff3AirKxdxb zuRii$hvx4R+y@SuJ?PvwZy&n!=`BkqK}dDPA=|Ur@)v%-D;c%;>qEh~dGTJkmbDup zv#+GO8+_)vxei_KcTG)0MKCwv>3euJD#qQ4%bHwO0aI&|+5CG>uH1_X+&d44Zgg!7 zni!?(&ssaax!q0QM1%YDE#U0oTVQugb?b-P<$tz~!}p^`MUud~iyu(JK@-2?9kM;@ zE@peRg@i2TzvvD^(k6XTGW99Vt4+l%JF~G)nNDdJd7gUJPX;2#Aema?pJH4iidpH^P4RcY61J z&{~x1n@Fh4oZMdv>C)bP{7FVC%#X>2gk6Iab!H0zQZ5bHj!i&NZWyVxe!_pi0GH{U zSMx$BCp&2Tv0E9{e|}zzk|g;FWHQnd!OIZig;JY9=p5W2mx*a zN#*uL5;k+C`e-K@7=GMM(cODp*r+jU(@%NR zB+@M2w=6r!UCN9cfNuk9Cq2f>k6{HYB!qg{;q)tR?Rj5TV|e)QARzdW+P1yVH>s<% zPAg;<`UkKs@7V!sx@zn9-E^mP+dA@VznR}r`{ot$*B3t|mPvOco>oLSZai9bRPdee z^bAF7QfwLlV{cj&s#(Z#OA`(p$sbo!pv^B8z-5Cb1Iz_}z-FEac4u65V0!@s@r!q3 z#H3G$u+zqTmlH14Tz0cZ0p0Uo*>A626f&V^SSLV4-uh;wbj3%ar z=pGe_v=}^cCzM!fZe|;Rk=75`>{T{@h|bHAgHcf^DjtrlZ77eV zmnoK2yNC6exnp(IkGL3Y6ROgQ6;r(HkD@e51)M@^t-mCY+|hT!AniuX;$Q( zqZiJ2HaujnoiJhB)uZ0PSfot`j+b7#Mfu7bQE{-1J=#wB&rTrf7&tkYmPM z(0*b6dm$a_Q}_4c>wxD8ISed|f%P9>uE$2;Hc)bMqSQ{E?!2(o(G_wG2X=R;KJB8( z=M-RBnmaM6c+&Q+H}rS*8_IC)yN;*a4%y{rHSI*|jN||l_uc4{ zN~e*KNaD%ke@r?i8z9t82jTECLDuHtOF*`LlZ*%iP|`L_;4Z9LlNNxT~uQ zpMeeoH%?$!A9NDSGMgB}i&Vp%hTP%)P`bkARF^Z|4^FeGpChbvks>I~zQBf)HtyM6 zIxfv~p43DvgETacAJWX5uYN;BNf(zG_W==zNI8?@QcN^#D^9oZ?W+2^?@UT*{Z!LD z1$EDT&yh~NIzte-b35PyHOHfkx)3fh6x6nv>q}dyHzzIw?hZrx1X5na#{wGz_>LD+ zK;i0zC_3Hj3_%8Ia6Ac34g{uIgz#Mv1bE}acL1Zl=dSdRHa*OvpSNKgf{mc0UfoQ% z0A><={U+CWDSIO5MHg|*4#{+h6C#i_tB(N@9)ZGhNj#lW4VfHPez%9SJ)4T48_DpJ z8#gttAD6u2G+#5twNZ^gvmKt(O6~Bf>PKmdtSbRr*}WcL_X?KW2q?0!uIK)PU!{fA zMkCy|g~>H7zDM}_wNB%~tPp`O<#0S{Hdby*z|hSOWGY%X3 z41bWJ8o<0I|3UD}8QvcnLGYl3f_oPkZx{=k*%ShM=g=rkka;*_?b-N=PPAts(j>&P z=R5;?i_A_wXVq+k5sC_$4B~WelfLtUp-r!+o~mMt8m9W(o@g@rdAD1Wuk3-$bxKwtnH+LZ`gJ(b4_Th#q>Q;_wM@#H>~x*;sCI%#SRP-#n8?4qK&6_;nJiKMzm8p#lP;}Jg_4dbmMuX za_%Wj1qlUFB#=i)IVVQm$FKS@c1(dTSqv%klXzs;OTLN}PKYYLRHuq+x3GatEYK|q?@ zg*fT;mzBM@mn*;dqLgccaYSzw$znpTurAqC?}Q9DPeFog<#|4gstNcwSl64j8e9o3 z$PJW+?+z9aetzHeVnh&J(mCe#3fkNYKj9bId%b_EZt2LQcSK4+0x|rfd?@0Ia{Z~j z^VwL1-tk_SME(QlTM67g{HG0ft_>I0=&N)I%N{>x4`Yx~7YlNJudET9?Jn028e_sV z1v-h_U-s~(=wEBZ=Y=7p$905!ZgtfvmV!FSO?yfp8Q?S03y|L$%lieY*^SJ zihwMgDgb=@BgIj1q_Di|W&Q=<7V@M5!9$%9B*sZ?sh5wEkm2u^H0Kmp!8%MTx)EuS3P;=TC6Yzt|YPssyA)5rtJdzF#Q8D3ZebUoI-;3rjIw7F&N9i{^4sHHnd{{-1mci_eg^LQ zRWbPMR-SRmn4PDh^mm|^GGqXU0<3<_wYUsddgDc{9&Aq?&w|uDRvV?Y+**+t^Nin2 zUqxl+cV6--`KdVd>s0cPb-A0qLg*Sh5}<>V(hfFIVP^Gh%f{;}+d+5h~)jgYzIZ7dXSk7KvRkdbEjx7 z1-V1=-Z^@@>ZBGwf!BSL=l5+;Pj}>kZ+lYNu~{l1Qgk=!+5{Oomjn;X^~F2;o-c>CM8>rMvdh znKj9G@OU^Zg7G_PM?EPLD%k@iT4NFIx$5yXO&<)4pEj+1JA{Im+d{%ch>_vN^9~&eGL6N$t9ieKs?hry zn|l}(tUUwNsdy!b*W%GD355Eu`2>ytl(TF;eWG%9DvVUxcGJ%}nGR1QK1p+~(v~cl z_f*LgzmF9tINFI^kM}BRC$D|yO{X(^6)z1FPfc>3$I$(TqEld+YmY)$ zXF5`yPMn#f8>`}^`0t#5ZV?zxf)&4DZ9m6%+q1|wLdKsTO3{(jzc|cvcoZ^S*@da| z8hzj%fq?Wh>pGZMn%ART>{2@9B>md%c-P@l-48QAfk-SH@iAc$pED zj~nUFE!$h^Z#Po{>{ab8>#t6Q=ok^_+F^#5VGUGjRrkiG8O?2VfhuSxjipgz^OdSPb+i@2tt{l4P=HH;g6^&4}MfNe}UP)GbJFHWyGshd^NJ2U&>${?LpC(uE)|I zS^9MSaw<*5U%D$R-vpiRW%U|?smp^KD6pmpbJPM)75tngke*lI-qbfz75e?tq=&9) z659Y#_jQCZ#7Yk!#)ohegglGMHP%!qg=j^XZBgGOLO*^Cb@!6tYymdQ@^KW8$*0#Y zUZt236FqRM*?)XVq}ewXkETS@h|P-X+1mORoQzX<%0*l{lI~Q`=U?3a(Bn5Ya^-xY zhB2hk!21&9H!+Q{&f_F;I8V$`dS>WrtLe#zf$S;My>+rk`i6X{KZQ49QpXFn|9C~N z?aWSGfOcR}O|Dyu(P(AUAH6pB% z2-3W|1iqqNT~7l*-z4(qVgDz(%5|xCD@ht`;b2eJ(-0?6emMrb$}8MT+lOl|s5AdCA1A?sY zl!;TS|A1~p<#E}AR(?s(Gi!4N<;p{Fu-FwR0LlyV%9O87iA}U=bF22sqXbhiwdhnu zJ0&mbII^6_=+{Y2&c2$?#)@qXIb{$|-W(YPGS2&q|!L0>q70F7VNv5JjSl+e0 zQRKB`Dk9dle>&^1NkBQHeH`KdoUi#`;-Kzu<7+$^< zQFJ)_3q<;TUdTcwrj393j*3R=J^mb{Nahi*;G5SY}Bob*=pS$E4C{odKFV# zhGSVF6#%Z8#tt5#fXk3A4_|Q$Yn!OV?*Y9mi>4Iu$H0n%K(BCF+8b4h=kUmLNhrUma9-^K`Wi0f#ck0@m3oX z=5{?2@|9d7_%L7 z(j5h};Sq-<7W2&Z&ntw8BR?Hauz&bWKpL(u%}yMx7tawja|z&5py=E;e=|S-0ptG@ zQ7>M+G7<^h4IzC#lveLmqt(dz)rF&DLAIVA+zaMUOwWMV58X9yUOJTolbzWy5tS*@ zZ~Il;o)QjjvdF}PQwNA^fw0|PwEzo^sO3bYUw+kBf8l{wUYmb2B_o`s5Cb!EGEPy%D1>J`$(^d+mk(I}-}2{8i;%)Z^s zwK0l6ekfi#%)M3D^8K+EWYv8fV?tXC=Ig-q6*MkmHPv63GiBbqP>xQ&X#^`l?B;cE zWyX19&&#EG*!~B%h(X?3gGR1-2(w7aYwed;I%ZI;jUTETQq)1BXc(TWrGid%Yr30~Ak zv>p<*25oOYJN@$yos@Nlf?jSFW`|LVJ(D_GwSL+`I{8gJ|9WhD1k8KM1QVgq_&_X` zOmtXYAp!nGY9gfb?29d>cIlizVrhL~nC98s4~d{At08GI@TJ|H`&w;B-IaK+Mdfi9 zgu20*TLsNrdSo>o+g#u7i{yc_D~`j_?N4V;CmFqydH91(j#<|-LXZDYMpApLmOf`I ziMpZ)zXHBgjlb5+qpv5zmd(hTGc9aX0)koME-`@w15muI`bv!nb+7;g*~Y?HzDu9d z&4}WM&CMS$X9bZKAo>h1WwDiIL_0Xrx9SbmwjGy9Nkbc2MF$swvgeGvsbC_U|41T@ zT~FMvx(eOiH~##$s)4|iDGiO6gu|n1Oa+lEb>j%0b4AnR)-(WTw5t^N9YlR4uNJQ}(OA$rQaC zOO4Dp;c-eLd7^a|0RItOn9Kv_M!s324!vIA>F~$^beg)hcv3E@n!Apl&=+K5<=3Ez z_7m3E1;NvEI!fSfZgqN*;jbjpI$a^CjTpkPg+qMRkd+$*1-Kl7)qbhJjQ5Y7O6OI&Tp?5(!+>eaY!GXbzAFuyK;mC>2bt9VbHQ*K1^=|H@cuj-Pbb0zuG7`m3MK zO)WTkxfHqFEnU3_4AxQGvN#dN_XoX^UPxMZMu<*|!f8xDuLk+e2Bldlb+}v;MxCvT zH6soR56-MGx3WEs?yi2o$<-Syp9n>76u`5sKz8D}nRdCjYz^D;J)%Iv z*t2Qw*+xF=l=hu=BObbV7ZW;nMqojKX_{#7`sTjG^_|LiZ|LMvs@;d+d8|~LW}VeX_w_NC zpBIS^#Bv|aS7Hz8F&o+Ac^x2ACZ_Nl*3b7#JS)dS(lE5hyODB+lU&)TY(%}e-cb5i+@G%9mK2}Ql%OSrUPJKse^k8HINZAl1e zgpyYZhaF;QsPriJ5PCDH0bpgexj&dVZIx8_A-?*t547K{bm*Z`PG9f&H3 z_&9sv4k5sP4`W>V!T2k0XuIt=Iq>iqkDB`smnKy{3w8(+%9z^zW%EjdxCNkNd>%u^ zQu*Rq8Ig}|2F%esf&s81ZVdotbR6(p5hexeKql+Pzj1<2N#nNwmuAoT4>!j8|8c!PIOpKhac@?9%`@|Y?z^lO z@F(sE-10Paxf)&eJgfN+cL1emJ*tkx%3qzX>=3!cV~P(=eGuMkg~%?r6C#~n@GUID zDt_32t5S%v61QtwGnUO1ka9It_q=*5m z&e2As2Ijzbm>1Rw@p{$Lte&4qdZjHuc1HDC%bHG6p8H^~y3=s9JLj$K`cxeoH@;ua`yhCLtB9)u*g>@c3 zEStv?%Y&PsthZ7q_0s~2UlT}g4;8sVT@6B7Lw+w)Z%8Le7LE(ecdMx{SKQP)Q0^I!Q4x`c z{H`hnTW!Rr0nkDfQN^vEd)fGGi?f@6T8Y8qKi;~pUW)qFt2ADMh|Z;qv&l^9wLk!5 zhdiu`AslbJgZr%?s9WJn-&uz!4lYFOqcx~L7%u{ryWRMwJ8n0kU3f}uq3b^}w-RDT zgqKvcjvV1~bzKe-@X0iX~Id@FuvO>@Xs!`tJgGJTG3+W;xdUPyMLS4HBB zZ>p^T#e~pu~UpW znk2P1uD{J~d~PeY)BOONI1S(@NjLyQzgz^;Q1G~rLyALg6N#GuN1$w$+%W`DM$$>S zD~}hT&a^<=o5Xc=Pgp^@1KL6~csdGE6(Aji9~&|+ma?7?JDcqBJ4>nsw`*D-rx~@J z$3MCeT1>QPwPh#=UFw=tGE|v~@q{!Hht)#Py-^3KL6#)<&6A;c zDIqujHlUk-n99|x!}XFSQ*8vj0IkOcK)0J~;;Z2Y7c7-Nq>)mfI#S+nqTV-3Dd6Lw zehuohvhuE8lrLf;JdF z@0D=ABYCdV;y9f|9j^^xX!HdcZ#MSbDsG_+7^{Lw&zd%%eg{CPj<{1MrRFS?{ae=0 zD8vA|gw}(_LjXs^@NtdJ z*(O|H^-0Ii;{Kr4%P``X0N8FgKuS8%Zi-&mj2F4z=Ve+<8kMCp25^J1XHNYlccis~ zR7xYU`2}FJGA42JPfS5$eJf?7wtdso&q#~eDGIXFoO0}?qK?KpScLx!L0yd}|0YDz zk;%h=Q+vkA!MYg*HzyMRvO+x{J*-%R@u3$)XpPvD6!?&~%CJ zvGR=a!8t6;AvV=uC<|#-kawWp8fMwHfWph@tb$f7_C3n+02U>9lRoIXb1E_Q%o%|# zV%lb<-rOpOXxR8p)!_U1-*gZ6o9D#fb9xN~T08>Lo1bCI1rYBZ#of7@iBxqg)lk5( zZ^Mj}df6)ot^gn^YkA!ticXA89B-fjjKqixU(SR3(kb@gWK3_dSz_2e0vjKA4XjXm z^EjhU>90Ta0~=PGh~0@1TOXFB-z;QUKRfD{eCnBacJGD?er?RHyi`(VK0KM{U~ls2 zs>@MTrc!9`IAEccc$nY)q9ilZY*;(B95@w%M;NaFdOeF)QcnQoq2WWxjP94N=^?F74K`ND_*7u&s2y{!1D{Yjxmc{p| ztTF(?@at185DFN)j|ouXKN{*5fFascZ!mR>(d!TTj9NWlV1OVJEz%8dKjrLiP*b!I zGYj9P&7_zZ79mvXEu6GspMPV+gn$aha`L_yWX_~g-Q#gzQ2by5oBy zLc02HFt8R6D?z>?87p4qknv3oez-A1gN3(BP73{o>%C4eP#XQ%zCk8k{KV9UJ58g|6dfzX6lDkyIGCr{$eBoJ;x7;P#I%|;Y9x0YuQ3v! z3f&AB0#mz;5Io|&*LPa#t2AVSb}RsqM)OJ)voyn%6rjcQ@Cvi8*W0IXCJ8bVcb<*>`iP!_-V1_U+k96>CV?xaX{ zfaC6-O}-mVmLHv13W-t6sINYcJ3+?>E{?1+-weM@+Avd)yqRIPT({ zV1}73v&s$7h2&Chg$zp`N)g6ZdXKJNqNKC64h71H%|sDJ51ImO-BFdMY?O{mKRKy5 zHTKZg0bWEv`kyb%iwqyrimQ&W2OyWcK+<=)H1MQa<-{6Vu=_~8QOT5CtRi9>zQsz* zTW!%Ff8@es5_<;-5p z&M+RLvWJEHIH^K9-x_#i zL?U)(oLCG>Iagn)MwRDZIKRiP9!fkeCR};s!BgoAFhFc*hA}Q^wdw}=IkRVT#NkAh z*_YD&7D$j3eWiDm9TR>-^PK){P`H&Hu4Eq6w`sU2q8~W<6c-9gea|AI3T#K@ol)I&^+Dgz1ixIB)4Ectv0-4^hCHAk~&Z~6-q3_GJ9!T8314h zNP)o`eaZL#R*e5wz%KF2!+7aw^T`b;giN~`KOH)`8s_giN?zPwT(AO&C=xGu2A3Ut zCpFc|+Kx?)KO-p65u@cmM9`{+>X_IjpGh|PJyApZ9 zR~wb3-H7jH!#}BsqtUxC`p7ZZmAW1X>Wde=01suw9nu4@jm#5N6t>Hi`cO@peRqeo_N_nA-8)ZH zXXafe0|Dd8ay34}nw-n=2w^>(dfmEoV7MxVe0SL0~Ub$Q#XQ9K3m5| zk+Zb=p>gCnHpKoB$qC5GxOM6Qc8cJFr~@Y7<|hSE`RE9U_Z=*?E2o|6gVCz)ep)hB zTD7!9mU~!P9`;BM*y_cN1Mssb5_pxs4+-20R>-zXE*-)*h z7!mw!tBB5MM2DA+eFkmsdaVww0Tg#zQ>?-2W0QJdIW9{@&6s{Mtc>%??EE#wa0O^opfrZm?cS&U@8>(!#SFO#!H7@Sw<$%(7O*2T_@c%-- z08s2*!I#7cvMKeKlTj+fJfmLR9TeEp`L>%>^f3b0i#9Jt1K3z98b-(95N&N?>oLAx zN$?^>tY~w`TPLanz9^0NT1$Ahkz@*-K9i~aZ0kPmOqeXBJux?}xM>IB+^r{QaN)yg zBP8vUQ}^s4gG*Dnxyyk)PjOQ^lIFB9HllCT7G^clax@zM%ru4d>nlphHw5GAaj4T- z|J3;Fy9u&7;jUeXN6>BW&0M*Zn{AXQ=Re%<&fG6MUP=hPv2Jd|veCP}-I14NJWK9w z4?&F2#XG@O=uz^G_b)~ymY%q1-4Yl@_@pPN)MxK74tNx56(+kqb)`PT>^AIOyPz%A zpW0^xzkEqf-H5v2oJNq>yG~yGwntinzcgp__t1} zIF|ZQXwOrIIy(V=EwPxT)f;E^O^GK54_lW6K7kV2LRk&U`xo+fI2M1BkyVFI<6JY@#tGl(YYLs zIY2l7G4^O|l;Fq|3mLlmBme_3FYODHj1_;7gJwsembL@OS4*s0k$>u4GecmGLaJq& zl!@w4x%nNt&8%V5;ioYco4Ic(+FTjVxdCSNuda}<9v(*4+Hc~oat^WgabnmOn-ru1 zqI!^RE$(X`X$&F!m+;jEv>obB#+tgK9g}Ke+!WrNo0*shmkiSCq|w=`WPy8qa}yWE zgF^IBVUpNm)NG%H>cMJ>>709O14dw5SwQtE&7BiKAX3P59^^v# ziNpD@$4X*Y>`lEz4dl!tei{@zpCI$prT5|TR_bQ9SOnsY6f_Te$_1eB0z)vxb`VX^ zGRHiukR@24o9LHCcY`bb{L!`dt33913em&Hw9>)Y5^Nkiy@;XkH`dK}^5qxsYM!a_b9X?i!j`=4^8I#~!$c_e$U8@fJ@(t9 zo|Ob2ri=MrYFrRJS%h7qGWsRr1|v599krB|XZm(=gb+X1Ej6Vm(`iSd?~Y3;m57w) zmGvJOg6}+Y5g-wVHz~@nkTU38PDk-=l&n(I<@z6sz8&Go-E;9tp^`_0XqgfyH;H8?}+BU10fX_hA`SE*H9d9G)=jOE2h+eS#P;5!fmIlrm zPV@4?y5be6Xf_cRoLAAyR-)l&N*ZQg;{bVWpn#pLT_qdcB4@hRFkGDTLLNhERipBm zH6HbobqKTU*O{8u!jkTGpou*js%bF+CSR4WPkDtCt$SY#udLIsET%6Y(>I6c6J1F> zWh=loGC4$~ibEpnCwx(PvBfJ@i~;~)dP^}xZ)3_MYHirToSUVyp- z_E4xs=dEn+)A*XS%|JllYsRlZaQOe&d+UHGx9xpgN~D#LP8E~}>Ba)2W9U*ul1M#7ONpVoYhY+*27WJ|OMMhPsH>MlZ$rS+M^3eWsZ@bJw?CHJ07!a^sR&;y_?+*>1AueSS>Xqe@;!JE$LXhO z0IDtxtv6WM*M+Bs1f3Jyp)fMv8Jr>9*fc+q(B*P27KnwNm7>hMQSyr3C-rGO5R?vA z-{IX!@~K{v%YuRI8T)lNj9~y}*HzWM9qA8bY=Nfty$POkNI^~oTiE2$A@jQ?UOQi@ z_2joyDptITR4$yI^Jz>=4igUV7k3>u)!g^*&|3f9A4WOYzjdq zkDn9m(vE)6=V3OgqEH)ub5{zE)E9=dal-*P8+;CDBT{Gu{tnJ=!Ug+$ zcKJbN3vDT%x4^XYmNkJ-`o2TK;r zuw?4?Uaa$4UmCY4#^o{9CH0{rJrQU;%%^Tpf z2Opd4C51RYUigIGZ5DG>AwKl|pEtRca_TgF%(Oj+g&5eXB4bnfo;KO4m(Ek%LsH&5vJ? z%6gv8*OzQVti+jNCjjPl;KKo0wt12X*{7R5ORX}_E8K<j5J(&PfjXHs=BHWIlN500MZx4B$wXnQTrk*fW z1YdN0h~h*k=2lyL0LX!7wh8d@s>i#O`5ImI-sg=IrCny>*CWcZcIJG=A$CqVO#tZ{ zWW&>4)$??SdY{DN=DAF&dX}ffes`J7IFMo;#!aUv?A{l!=o8jmH!&B3Af}?B`&oS-BQa;NEF%%%&tVCdmxQ%LnqULOXiZ1Sn$DdReH>gJN5 z35qo}w?pGIZ=1wRv63B6Fd;*?SNZ2x08Dwf3w3>{Yu)A{yX=xg;$^S{g{JpGoWRoA z1|RFuC}t}8o4~SuxmwUBtr13S`WCgZqr6+O;3|LI;=1m*RnVQFt>dPb7&#+$`a!CN z+oiYQy9&&H706z%jglR?LmNLJ9s2y*y0Kp+`gcix<;v!tZfifhW~y=3=-Hs_qs>v_ z7cE;H@&_Sh%Mgc?iFR{TO5C{g7>xrq=5<7J)8#|+!(`5Vb!pLT?y;3}akT8n>Dngw zAVh36Td+=GP#4oG3pR$*43Iu+Pf7C@NQ_N;d5XJ$KbE`yGu-VGEXRS3bm~dWx&A_H?hw# zG*sDpX#$XH@npTE#NAYHsBhuBPQUs%fTDCgGEnQPGj!w5S|tmCOC+&`JZ-FVokgzaGmCH@#21Fyxn(D|@t z)CQ0gaqFB|uEgJDf1ZzN;ImoTb)Ko^tvP-sg1=A=Ko2qqx}k&A^_UBRqwIh%<9VAe z|B`zQWN~v-U55OV$2cTpfzQ`HT@=Vz{g*-i1?wQtfR%q7 z2YF6uuRT3kuV?H605H=s-;Z{>d*UOdBT>JPk3R)Siw+wY6+BPICsqgAp7olF_)#?A zrycYZ##)s{G3f?OvI?WaY`zdM99#plP$emAWsaICxl zvd+=l;(8DML$<%T{CWG!xI%3|f!+7ofSAj%w^zjgsrX&ba~-y71qA=jeu1h5uYkT? z<_z3aejh?07|19+r0BFh|IGgk7_9uj=+A}w+10<_Y%iXpQI~|0e@^rgCGdaZiz*VN z=QCyke8}*zkBImieF465`2dVO#Z#A&;`h@v9RkRP2`K{@??5Yt0Fd49D)M(YHXvT0 z{CVaOb`Sf{;fUV{{!dN(T~sj8#a%t%!%7T32a)RH7W3C3I!b7xsv*yngy&td-=DOw9 z_P=EiozGZx8T;>D z{uScT-2hB=H~m`RPYt)lOY=qMBL@f6$;Nl+#ePgUb0Af`nf(#*u;PCj=*W2z^0>Cu z%+EDaio)g#T;nztqEiFksgQ$!l-^v9JG` zp?X>;`kxDn1qe%;K7QlRjsAxx6=r~4V{*Lzxv-?b(dThtlJ}o$I|k79P0^ui|3S1r z8}|Q9^OqL+|1|V}ruqM-;@5xt&ouvRRR8)U{ePzUYsL9_X#DTg{5cc9KBNA3YX0R7 z`iGqb{_oWMwc`AowrBsl(Egl?U!L`H|GUusz2EV_3+=xi=)Zsf{^x7{z2EWwGhb5` zTU~1Bp4U!?WOBUcKZ6m{9Rj_s8rE``wX^w^EP$|i0AJfEU4Kv}Fbq*wtWvW$c;J@D z@%QHXM`aBZH#JWS)#c=Be1s4`6=O929cySiFHp-)O!kj_&6D#YJ$&q3l>pM=5cpUV z1&B9pmH$r!YMx*NpxNq_)dS|-A2`bhPz826WR~^sRp^g*Bsmz+G3x7+?5lv^%cH?< z4LQebxc7gey}#V&|Lq$3-Uw)8IsW$K={)5Rj42uToXjuvkKFc^1fXNs`@+FHfX|N? zet(mKCLHV7A? zc6K*6&g(%mHyrKQHv0OJ!{49z|JDNrh^-)~-^CdM07JE0X`bIXTu?sv6Q}?4miFf- z=Jw~{w^6SvDq8yoykSee!o_k{K8-0yAL;q?NjmJWm3?6&ihW&NQ#9cYpSP^dZjIwZ zoW6i2n3-Tokin1%9UkWM0;PW0UptG(3yn+>`l9{t7StsU`4&fjq)pRG z5}5}(>F ze8g#M*{0<1ip0rjAqxGaWAPfwSl=E4&$PTZ4-45`uj*4v)*yDfnO0?vSLpY~4n*@w(8^DKigm>U_# z6|l4QQe+07sYY23pW0LvP@Mz4xP=el#F&Ws6`o}bbV_9^#M^*tEYg0&gZ}m+^sica zR|N>PyiARGB*cJq{K&QJ#k>ZJB1ZOEwQ83;!mwa`Hr2f)a|SZa1=7 zc~=eXcl@pI%rI)i21pR;(c{7~5M?B2_n6=NS;F_f5dbLS3532TZqbV1kjF5K)!D3I16$9z zhIMS80uTWHn=XI*&5INusuCl4(fjj~4oP1isR+wi_B<~iHTr~kF$-r}Q)GO_ za+3744R+!)dUnLs*=y989EH13@8o^w&&KnYPiUM0OT4=kOPq{F&`y@D zEi8pE0i;{KT0YHY@Mwi}%0Nc4eCIGFTkhM_VwmW~h23I4v(rKPt}29nCh?O=*Vq7b z`;}{*kiJvTYOplxe?L}$3`2L`_q-HwnNHKeKT>HXNNZEHD}gWkU!O~u8nb+OD|!fTw@Dz2W{x6JrBc(ZHnbp+#sq!)i5 z`23>*zn47Ug`;+*xT3TG5?J0Ez(@e<|GIvJzkzWk7<(xgJg*JyLW!CcsiaC&rl7iD z2`FZ8o$o@bO?h*9=Nv1|rRqfIB7 zzq@jx;C}Yn;DM)+@D~dCTW&4D*(iaJ_^b|WNJpvV z8G?_2V6-#)Qoj=rSIyuUe)Kc0`rpX(Peo}yvqH!;f2bL|d4XBtCCiA9!-x+;_`vkB zijcqq?TFPJ1yycMi-;syft>gEB5xc_)dSy~v;;M>tes81E;n$bb`K22>s=tImy~o_ z?v>=a-CR4Z5r9pZegW^M92Vgp|Ljl?z&c*==uXLQBW{=MdZUX?232TA%xbyoE@3*-}-qZYzcU!NxJ-bDEn2{m<1jrUx~d{_+MaKgOg|NA$V zE4aoJ4ciMH*JhmVvDro@|CE$P9@p4dyA45UJQ><`zgT<0<7Ue5LvW&s+0A)fpfKUj z(Ko%?21;Ak{N)h8$TxqT#0TEQ=A{3wAj=)=;(|7cqqzlDf{nB$B$KpvQf693cJj?n z?)i}MF_+g6TWS4lC-4jTcDs&-!`h>jZ(qi)SKs}8?&KKoCcaw+7w8!dj9poddY_j2 zg=0&cXnoS#7p?JdLVAnh$IM-S8(lS9hMwC?V z=O};n_gnLdj#&GLS1vA4qzHqA+JUkhV%JffZO&vpFV~Gf=4dDf-5L8}=j4JB zyBES#^+%DfINm?0y5{IH%_b#QBjhB~QS@`YFI!=Wo)vC8OsEh02W{IaZ?kyXr;I-& zgLGE%(*xVJmZ=x3Mb1Jxz3 zPY9?-m?-A;4ZY)Nt{_n;U2MNGQ9~a^44$Gn(Q>QFdo8MV-nlL1M-CQ$jH%eCYfP+- zCDaopCi_=q6050Xn6{3PwPhmq0?(;;{YqiJkUhDhezJE}*WB$f7-7ko<<-Dbkn&jV zQ=|#{;Pa)4H)oiI6W+*rALQhldt2g@S)L_;V;2#P~EA(np+Le0qr`%Qd^s}kdxVoYE~yYn5j3AFowWCszR z&_lz~tA*Ojxz32wOt|!ci|D{{{*e*IETom9<+DW6SR~?I=6Cq=#cuiF@3s6}k={<$ z_Uqp+bC0`SRP9{8KZ;JA8+?(eODE2OOqr?<-Ax;?ShIK|dO93@eVEJ+ag!~qBKb0e zYp9g$R6p7vIZ9!5{td+RJN&ukQzYKGKGEQA5VTh_!H6aMOumK`S87&k0O>Ao9CAKS zk5d@QKBM}qo3dQAKo`>|4Hgm1-uZMuog_x~6npQ#ExP&h#(O3R9}pCl&@9&tVqjAxPv%lYb@ zVO$)sGp2T@ipofV-Cp`d-;%>;tH{oo%C-9zMk$EjZTb(}O&}!?U2O9Lfs^owKg;sIPB)|Y%4;mIyWdNrZvnk@rCA(tdXim)-waN8y&N4R z<_^E2BI|6pL@jst3~}FppQs~j zb#b2#>5KpuB7PR;>Up5R!o)<)FRvdjO90_^s8kx)f!l?5wBLt~+S>r3(Lwyx>U#7C zmQ(UC*0ap$h3b zQxQjZpcPj;nJ#BXhFyu?>6&ZQ2VSxkW!4Z_A;aXIcr|V}<9;@p9~+5Nr`u9gKdaw> z5sFHIMR5l{@6UL^x@WCLZePr+IaaZJUuE1uSAf|&(g3Tndu7UP2GV-_;Q&3UaZD59 zZsg-p&Fp8o5$|hps0c0PAy>0iyLw1vq70B{MuH_D8mv@0H`j_#M@0^IC(krp+#gRi z?K%^0&9+YBE6%S*r5msAywyP#=}9D%CV_qJ_6W0f+}rZt);8l)7J6()Gxtr3svmx{ z&9MZ@>iCQOxIW2QQ@<2vnYJ4gw3YEDh-Q|Rk7F|s)-^YDKyO>~Fi23pKrC;?+Pq43 zo~q+mrla;N(x`r-wr8tj3_+1hrpb05cXZt?vFa|r8Z+%RC)mn65*)l-JlmRz-_A!> zHLlAOV80(U!#U2MQ%}am|M-Bk@QusDu}$?G;by(w=s^9tU1GZSW2XuF{fPT^V6@fd zl?bycvfR54tLvE?tA6`+wSp&!cY5I2>YuGPo8zys)?s_5b%XPmVo4CyoE)eX``xM! ze4fT89z0{OCpi}D)?d_ZAs=MeVm-S7ms&F{fS)a-fLF8@#4)R-8g>nn4aA(3-G#~J zl@28l20$Sc9ep44cfXMvXJu^7 zRE*67Kb|=4;_Nl5`xq_Fl5B>Z+N8?W8kwp!n&qC2Xa-s~Sv_ew%C@f`CG~4Clv8#F zS1orHbUj2i>bok->rRg1xNSVEY?_$?!!5URe9?kz1ndvt2N*Swm2H^%e`n z-*B)qkru;wHMm5KW`KxDl%;u`qU5!niXR5mtc0#uzNr@OaO(Q}&275{cjx2WmY0T$ z{D?1JDzTQ{al9ekwQT;=<_r*~8$>hxmC5>!$Odrt(t7an>xC90%h%1DtNW9|3enr& zKbSA5F28oLi5ITRWwv6Hew`m?`O#qXQ(oLPWSv6eqjeeRye#5K-1jERlvVlUrk#iB zN36sF&quD4?R|)Awd_f3<4N8tLTXbDJa9!hH33l5+HU%ZeIF%r^%1xjBs*k<@Hr;l zO6Utma^7q!4jzGVq8@|o?D}M!3<$3)1b(^H&J1&=kS&%BNL?;wy9a*IyTBzoVEHg? zID0o8Jj>wehBUO5qUAh02;lTOLzy0^O_HCe@!>D}o=k=}ih)7k)_92IkUA`J!Puz8 z(R~kUN=cg~ZLuceM1t)rwPTXp`MzrYx(I~y{(JOP&9itpZgJ16xeHbczO?9=Ym)NKH=i7lLlsJ?;ZL z8vNogx*5Z8eshgd=n+5Z4rp*;2%{V)vLbD#4|K+nwQ8-9D0EkWev*lY9r=xwvYEb- zOHS%FZ3-PW>-*f}GkY^l631EjXkDjWl_8$rSdHLS5zT}ypZ3{6q69+|_@07i7jsPH z^LMy4by4~e&Uk_rV;rjc8y^}^?|jK@li1r_S_{7;j(wTjUGL7`VT>yFv#yJk!j(?T zr*~ax$~3SMkJ=ex9-PE5B|I_MH^ew0+mGyX0vb+?ph>x78d$!S-PZ_^enZ)Urm_4c z#_UE_B|#f5BB}ker}pH!qL4xJ0AZiLd3Y0v2D=%%j<-iR=~fB-a7%@LeGAc(?H0?5 z0@KUWG9)sglTFd#2o?DqDO&#J*H^tqpJJA0Dn_W^&BrKVKkl?iD9k-@+E{nnF+P^c z5m9b9t(IY;hpmVBkTY!TZQfy!@=-lap9}gxPA8P3zM!usOw>)kd;FVQm#@kKl#cC^ z-@S3kUlrmK-ZjH5Y#B3i!0aIU#qibGP`p6=NY zdL-tuD?jp7-p%QhYvNZ_4ig{iU#4_5;=9V801QfA{@yfFY z3ypQd#ZRtL&AwkzE5UMutuX@^z85AZIrjW!I6P>8>Fc9^(g(5XSRAI z=7aDcK`||ZH5+*Xd)~9U$W_B_e?k2jN!kmVDl#6NG$qA8gIO1^7ozKUfGs@FEr~RR zE0`(GR9R`p*i1P!q9OjIRF6?gj~8j!sR`i{-hRCCY4XHSY+}+>09pHv7WSHe0mAtplV#qoS>?ND$d&z2XD#9r-g&#+`RHHTpU2qwkd>4#YK-D?wO{E-E31BC54(A zlG$qzO|AL)-&G&M0}}X?KmV5lPA@DKk!GqeCaTZBsNi<}A~RN-9CL zKHp^jI;tj7Ft3?9xn5{^7dBCYKYz${*PB*ILqPeWRLp#h`b9nCH=7v!61K6c;Sx2x z4=t{merfA@##;=rnJ@oP*wxj1p;En|@5NeQ4xNfX#Zy`f3g0ZbaB4?axl@%9J0a$3 zN>M~u=Dn)D_G`;u-vO^;zxXEK#R)a7H`AQ`u^qCSmX<(;UXJWaH*Dh8_uXdjRy8Pr zgJ#{;kG;GPk8LDu%>;eCySGN=dhOoGT2Rx7sX{CUj}IMJNCtLnOgKwIw(nl%#KIIJ z%fqff^|~-67H$L1RZ_ID9e(hrXS=mg)q$u8{rpJ_0C<0Xb-95JhiK6233^vpvs`YW z+0W>t&+-qx)o%_`x}5V>0EG}|Zp_*QQV*5ZA#>Gf4A@A{3YqP%(3>Yh#+ANs+PsJJ zt2c0~bg?HmS7e?}*`rzFy3F#7C~-_*7JknsO(V47nQ$j`5aP;?&NfsplC3ckc8%rC zYTY}D<|(^|;zHRH=xjf#gEpDRm};Li z8Rodd{P!imTANl~hpDr(TTq_3d?@(rQ9bemgJV^^I+K(zdI8t6AOduMbjLxozbl__ z3CqjkX`;-#nc1ZmMVS)xjPDKQyJ^AItb@)Rxy^b^inN==wZr=;Qqa$6+Vw@FM+X1H*VWHsYvyPI;xpSTG}=_SL!c} zjQQ^_K3>ZlD{i`Kt3}xOwbPa7Ce8_Id*zIS-&&^Ht9BdtYb+%f4Q?WG1X=hK7<}CH zu-qE#fIYS;zlx|m_0k7p<^1(&h_1Zf3$P!TV90WNUmW3{|H!?=H8G0wl*C z$&*-KMkJAT+iw8p)Bw8*cOg<*msOI=)a9O>sh|IrW;TD4(hz(?uzj_TxLEH|Q%IUE zNqa&!O?vQX_U-$mY}!<_pdnkTh_eq5m0WKnDVSO0g|F|QnPe0Msymnt44O~{@KUbe z^Bs?lzP}2o3rI+?y%RiPu;cO0+WTOc-hIB>ukD*L=RowE@c9SrCZM-^SX)jcoU4mE zE9h5fc&^Ho{I{n1dIc~(xDux^O#OskbF6@Gb4)T7=0{zb%LB)u$Pp=s>KqkZV;__Y zg^{I&M2BoiQg;L&4sZ0w?fb-Yzr-4PxN4q3s06F`mds%m4;JmBY+a619+`xj9WZ>> zYw_W)c|hA>Eae}&)kYbkM}-Mf1bJq9*F(9_PQrP#G-WTp9vdFo9HEb?wqmv;X)ll` z;mj(@ndyymOE;6&YdY&py+nwcHI(5c+?RH%*na2OxFC?eGKY5n;$v9|JuOh0WX)(D z%&uW)ogzu4XDuOioVgb`$TG7MfF>(Av{-;Pdgg1f`c7`ZAOs*q@abv!igv;Kr@S(r zFG!mZ8`R?IofD_fiLVP)Xycqu`>m9`!Cxg6KY|k`XrG#vJh*v7-b)xT1G+ti*c%rd z;J2d6#BVENF|ZV&3x)jPO1_Pr92XzT)+zbV-Nw-Qj-2mgaNXn;tmA$1C;rO$lg?Dix!{C0fK%w6i8xtBL2Y-M5oT#fAp z`ErJ5?`Z2DpI8%w6qr3OP{LaP;koVt)^)m4TfcXPx$$xugnewyIc_D$r05Br+$OCP zEKH-_e0?P*x=EHZPq*;UOp#+~0kkc}`;KQ^Z?lSqUoQ-myJr(b7|Q0rdJ4dYN18$Q zn5+*reamcTGg6P2NYM?B(Z!S{oULb7uCgyw#-RmmeIt&<^Vi>lU@E-6Tubv`Px}t( zGQ3OAph+FmrEZqKpj&z%=|;}80(s)&vZM7*RI7|2i-Y31(VT;=6fb?`rk!i@NL zKJ6x!i7FM8D}JSe(#`Or$~#+U6gRtQd7H;aXDfVnB0!v!s6F9rKwaH>@6sd2Or3DO zOS*<%AOE7tz3}dW0?g9^q&d25Q$9o_% zWs^$-9*i}G#o12+!syR{S|INx|l7)%B)S zm_@eCU0ljcs3gB*GU4;~L^miefs2S;IMPs{8o6+zZVgqcXSf#gq4X`(5xg=+l2QYv z&(PvA1`+GUb%8$W1?&H!BX0t`viX{@z2M=VwfOGykdPJFJCQKO z<_4dNfZAi{;j}ZhxE|PB!sU{GMP6c>N_>J2xB8Y%%sKlFoGV*+>}OM$0SgeEH^Y zSjP}}*WrO>J%9qcM?5njpdENH70F#^rvjXC22|nGe)8Z}()wT<4`%akf#oCaON0-q zOTu+;?5=#(k(1E~FSZ54;#NHnttSGZW097&eihk8BPTWmkzR5#ikusV?;A)>1-I8r zsBPAs;PcKs@J7Z}R|z`GB{!j;O&0OO7HfsR@`leGJ@#i{3(yc@TK8IX~5Ee%uk zCLcmx8)@OGfnIR2gT|B>deVdUZ|pwMy?L9|dZ1)!W&0qD403mo%|JjuXd0$`0ey|& zCG0?mr8vIwYRs87=U0Cos(IOU9Rfa4?iYY_qOpm|@6&lIT4g0=NYq|%*aa)Xwr%be zFp2nT2d;2KL0fLwM9;Y7vyVd`V)=5!ZGpsKIk*ctZUd3mB-Ub@4DAOplvgAkmy&+* zM>&&J&;!@M?6oa3gz$sN=$s6%ujr%GN<$4CDxI%KjG6WCp(({9Zdc9J2p=epm<;K} zDuKS4h%dlcm;fN#^C;+toj#@l?9?>1t?7M*68;=WQRZZCl1unoo%0jRZ!3>HD{c3@ z%rNBy6ne~2Q!24=sI?D1?K9n_RR5R7Iy1SYD;6T;UL#e(xEBkuNN+T`NU^L zmn~ZyOs6SL8ssES*s*uI>1`3O+{B@an}AP zB%F)Vh_aD8{SrBzCu4bQO;PR?@(knGkD@NL?^~uH&cID}cA) z(8b6klL}J>R(?h5LUD!;M5TV8| z-vtRWpSk-D^28}K!)&w+b~*CBo4{vy3^3r;Kq?@IJWd33tX@DhF6PLJoZNw%$|USC z@6{Ufy{S33M#7i5J3?2>0pH)~jwTNK8Fh@T&+*l@hj=H2&dCS+TJlq4lmN3Y2zQxD z+M7H#prL474^zic-B`qqDu7NF$f2t&SXZRovu|B;*u!L?H8WE}4sIX$+$Fi5H;FK7 zm@JDpJv(-wel(p}pf6j918r{(=nO4?9HbMY_wGvJ;hW1dUF1lprW8K%>A_uZSqEID zIJ7Y>ZH#Ed?|6vQ6~M5Xx;&IW9=G3!?ST-K4D!ABB#s?(gN5WYCeQ(zRN07iOvX_N z2hEwq63%6RAZfjQP*~A@c;I@sNZm@G*hx~5#b52tMx|x_*Bdo5hviET5>%o&^^38W zP^}FYAi0ku2a-jkG~p(kluf7RgX_GD8PL3f4#T5~sU`y=XhC{Y8OVboX?L|8_3X2~ z{${2je9ZmK&`oiR>Eq}UFei{W?9uO!24A040MC{B1+UtSs zSH=>y)1&O4JIdYhEoNYhsebaTYiCXnFeoGi;J~d3pdKwhBAzT`{>Bvxi4E`-=E6FD zaa;HCbliGQ0Qv<{!Z+}gM3$3>f;s^a{0+aW=~+cPOPtrP;*BFIbsS`_!<2lSe>_Wa z37NZVq#>=0b;$*rjNR!QEN2E%KzVgtyU#F%Uh@#0ZoNP|<$+Nn;5`M%$Fbcs0oj{t z(&dV^uU@<2VJOIB^a34xPLed_9ftBB`}e}{Uz{V4srF#rBxo;aDw1Rlhj=i5J?OWo zVGRTcTW%kGDvYnxP`s!(B6%Y=SsJq5R8Y0fxx?3$O4#|Nih-9W`IcYGLE98&8A<7Q z5laDoVMxX_F6h=d66)5*H#@SJoHLyb)5*WwWUp&_`wQ?=n7y`#wh~m~&b5Lc6ox+C zd@Fvz0OPwop7Z$#JR~D#1RS8JV~;Ix7kXs*q@h<^68i2peoi??`TnFV{NI%lxIAe= zfne0F{dNg3j3&mT!3jEb2RNXrkDtmMq}2sc1&_0-KRvUGnl&K^;+p~!nyF4?6;$I! zuY?3tn0nWu$Vp3BS)D-pdvOoG3V?THT=TVYaq4v=zeiOPBER7vqq9TwJ4D&n`i~n* z4A~T+aHAv!s+ejw)C0)Hhx;hlb>gFZ9qCDyj4A+Dv>0CQxzhL@41}y!59@}=f}lyT zy=0Mgh%h7j!<&ULof$u+r2_X8rA17dIv0X0jdSP-K&ILlIcJ7iZ$zlBwcU~C7-5L< zaC7f|j!o%V}Z!gB#B#Q?oXBZbMw20;0vRFd2=854h_?tEWKILXJs;Fx= zlKn;z6TMq?JPUzHft|yf^(&%!)B|M%SDu{u9b*wEHBp(l7KUs&+l#Qf`DvhwlqAw& zoAte#RRtHjIos~X zIn-p05O0uh$1_w~JbUAeA5f#h`Nbjs$5B*CxfPWk1Q-ofrK%hEkN~D8)!3}V4SKBe z@el%_wVASd!vF=*%C_w&8})(srof55;>Ej5?K7{3f=}y>Vx(U@ko@gd2rRBIe7MjX3EHm-3;vNfxw>80kj9uicYa9I6l<@d>#mne@dn6MkAN|&>rdQ&ZbSFkye_;Uwun4*6hSiX9G?mg+XEt z?$T6kUU&d63Lp)2S?|x6Mjh?v_#I7eU86B7!5*XPw3`xgMhg;&59G8g@kVBLi<;)u zlSIVW@x|8@?{R`IVG0akYQn>>PTvE!L)?Y74T*@B!>rpS_Yvf?N!so|l*M40uQl?FT`l?n-tmgv=(yQ#q*1v5HWIXJ+1i}5xHC6h z;Iz6Vy_avoz&u!f=2LPka-8xzALb7(-E~obHS=!#ic8w5APYjOjqU!yb$|juZ7&^K z-B$DljZ*|#L0+2Z=Y+#7lfZk7rDEE#%o42E1O^8pc(vY@r$C|#>M-rVO zwcU>4fCj@{RC~6*{|vV2bA1MgJM@_~``kAR#+wM|NsOp`^U=Iq z>%?#0vXby4=A$v$Q;lG{i2N_>{-4dD2M*}scLGHFvZ=3bYgInW47qeWdsm=m8Gwg> z@R{alfU|Q^WX)IPZrEV(BI(TsU-#Cv1Q3(c!;gz5{2t&Hg8B_rzfLFR^{I2N*r|M5 z-jw#EvU|}Dx9dI0N}Lc_gkkwO69nCtRLk%zCU)N1y<5<-TdE$n;v`rz5TQBZ2N`G%GIf%NG zI^eL{?8|Y5B$Ltn7peV&E&`CeZ@>-@dzp<_|6|Y#tZV z3@uIDfUY`-2-nAtd5}uC7WPAOwK|5_sJboU``bcfdc@$-I0Krmt#L(1+US zY1;{$n7yUfT0z`CzE805iBjEG~~7}{8d$g?VZ{RyC=L? zISqRaGPz**vePsM-#wG#r93w;-uY6eR?NN9V2(4KLevsNXW4JU`I7$K6(I+0e(F;_ zS)>Qg!JgaFw{lOHMO{~}c;>q{@d8Oh(K`UN^B{=5yRBzCY=rI6J_EU--azR1;dZ?3 z?V}IInNq;_#|Inp4qCNmF8LEB(5-?sl(iNtFp+iW2n0Hv05<{E{5Qd3PnjfNCtROF zJin?20-=VSd59Li)Le|)J2~)Gx+&@-1Fz=`%joC^Z6EG6f2wkOVye9%a)YOjWYoeYw<-w&R) zc`o$GTgPuz@)`}FF851^Bt8C~=q?HyyU_tNBfE$I^zXzWCmk?ka`uo)1AY}E#2q;$ z95h!(WE}R(9Yy(kM_GL260UZe1wk3cy6Da(j=M16S)1wJkB0qQCB#Fs07f z7qwA{Pn);6WG5cz;crk793~uYr63~yv_8tLm5HDv|CR^4Y#{^E45XRHJ!&rjI z`T*@JSd|1=V^si{JACXtc2y;&KHpP`Mh>q6q-YwtQ_V~?G@i7VW?ayIY*VVE=MPYV zOujZcjY=tUu?F!d+;x3#TuA{5XduiIJ>f+9PMY>EV`(xwKjK$3gHtFs8^qjGf*yaKW$*Wb6@|ZM6R+dCw1>*y=ff{6A}sTHyk*fi+$JoAnoZdcEvpQ zs}4ZDPN_P1oGc|LPuHz1r-t8oF2i%-T64)f8pYXa4;teQ_f-K1@ePE_2Fpvh zzpSHK{DiJfb96$Hk2S%r?T(|$Qo$=MubyT;%JhK5fO7l=e7?z0CwkKR1Z9nqE)-9< zWI_?+y7J@Sz4oUXwwE~zWv>Li;v16h!5;P(Q3aHjo8TU-^l<@cD!d~RZ*<_q+^1c5 z<844`Qa4u;b3K>{0H^~U#iOAt-r3x=mk(GNh}2D+ze*lDo>qzg!O}a+-guHt4LtIj ztK=iSGn=7Rq0X$0)g102rk^`+vwFgOFHiB9QQ~K-XeNRmDl6(NG5xXRD#v zqsryGPG(@yEcV@i25FS1(8kp@HM1xMa$UQ(eDzi1zN>_TzQVLv$J(NZ^fg)Km>xL( zz9lLY8|`>7%-9{r@3k_CI?8@zS#a3w=*da~Bf<1Q8^$Cb< zDxcbP-D@7f{<#|8_%l0AilK6=GAq3rZoIce0hu^RM0>6ML7H~njpYhK@!QSglZ?sg zVUPE}z0NbbDaq~{$)ISLLJ?esn{GP0p@GvX5!(~S>2*7E>ph;B)8bV^OQb?2+Q4z~ zz|XH(XJ~9l=wWZBU0Rxg1%Tekfia~-`x1o6ZNo z_aNPH_sInGV?JOou>x;WzHDu#WEqsTCzFkpeGw zG8mXm3F*-}GFJ5}Kboh{*MA-|KO_YLf%h6r#Mm?JEwtih>0>)0u}lis%0nM6^qlmy zJR*p~3|D910#%ZOA88<9Pa1{vckO&Kl`vvXL#%s{3Id2nF)oO9>OE;%SDtSW)ByKA=FzfFB2uVi*%yxI07AZ^S{??yKdXGt?(|DZ+bJ0AN0^0 zH2`#^ljp+N-@PAw-j0$Ou{v$qX&A#FaFsI&9Rt-&zRxL<3|Bv$2v~~CLT=bpMvk61 zt?bzynQ+cQp?9qbV`m1Fdz2sb5AxInp^@@$GI;k*J489wI_v2b(0Un%Z*^F_=i5Lz z8jXfq%hhYE7Wru|AAaLIb6*DB^cTbd_3ZxdW%;XYC~s0-)nj#|x~ccBPE7*~L9E57 ziNKsX)tY{qhOVlu(2c zGlffMrE?cvbTZoN^c$%H#LpEd;@a{?gr1yfW%iwu&rj{zg2UdxA3}kc%3N_yDhKa6 z;V$%Ty#Q5w{kHFPW;|2G)9&(Rr`am)qerLq1osu{w>VRc^h!mG>jzm{;QmqGYIIzL>k%K*2PI!N!|HGS~8EZKs+0yP& zH}<;(zPCvQMl{%<8IBHuUSe69m2ND|?Y0oNkp)KU@{kEFRf&thnGQhclE zz@p*9PaIrLQN*{QO=4>!>}t;*=#~tz->z_|)!Ggh}oPT$Cx=MtGPB7Sb`f_MtB1 zLXm1I-hu)`DuUJMz(OY#xpF3$J>7o^xrq+)pJdk z&Wxr$m2f1t$O>w37GXC!)Z{q}8#yGK%ETAVgC}P7@D;?~n;sf}ql|r=oW%x%nx4I4 z7!S?meShcJ+5nV)7EJh61(m%tDtRL$xe;QsZ^O z9?l&xO%~xxViGK}{%5oihL!4i(xOsBea4aD3xE^9_{IRNraSO8+2fl{r}u`8e~&)# zJf!5+gZnNs=Iw3`GH$`eVkJ{L>jlkz7sVlYflo8DgM15esTPQ!gvjqptsZA6CTnYr zG<4JIr9tuImVxQBg0+2UxSugQa2Gq@7TN72U#KtJeFm5fAFL|ba*FK%@Fn5Y^x8Cx z`H>3B`kCe&Y&YOJt4zipfR*1Pc(+u56b(vuISMBaV5_{hF<6to0Mx(y-Y8+Hcppxr zbwdZQ7j&s@%oR0tH-CbT_Ci;pKoO~#k)dvAhY|66BB~}cHr8wVMCg#DEu~Nzs;1p{ z=Bd#1t)--Z$npM;tf{88Ie_i!c!%@VNyiRn$~Atqiy5L@vvs%(paA|{*;=A&%ol@26-7JQk9-QR^3@%z9NbYBT^xhsgqFAY@+<P8{k~~j+-S|phmg$2dc%9In`&R9K9@* zMshN&D9__5oIJvvQ*w7c@)fc}eej{9C>u71!^F3)f|ydHJEkv3K3Z_1eP<6$^3BgQ zIMZ{#VLnzQL2XNdhK{eVlmtx?2;@7WX$ic%+be?!6F3%{3g#|w4*K7ZQqcJS_^}hUr$Y$?# zX3flc=9$`6tis_m5)H zMyu8mgFmX3cGBBPD{9cEK`(B9MjyYdTs>tq-F70D0jyc91YA5MU#`wr5h2*A)S}ycb z*({~@h4G80AD@}p`l^F}haf+HKprD?bMIU5?oUVB+n`v@UzmD940S!?|C}wn#U;h= zGf&KeGY~VytB#HR<}9!{>+r43LwUC@eUqvK$eY}@UjVhgYh*y}??WoGw%mkbowRCx zn-v_tsik$w4F6oX-dr3V+ufyoFVw?Q%O2(M&TC3FwJKU;ryaTtgo^L9)fZ9K1!S zt~2{M4R(cl3uUDua31}Jk9@8DFKP6;wcJZjjjU% zsFSes59ftl?4U#O<_Xs!o#V=}EtzukUYz~}?f6-@tB4|^;#qm~-*5-;I-nZ(j6E+m z8&1T)g`puz*tVfAOyVlK`y}=cNwrXBwZb#DMsNxOIPq+Y}V zJs%PJ+%Ev^Th-kf_s0Y|G6Awx%2|1u=J(%wVFNl;*lfl2TbdwP;77gYWwk%o|M>*J zD?OyBfL^;X!U+8_5pj{ZH+l5Tt)pe%fB{CKo`Ec zy1JQH527I#UNeRqma|Imx0C{60E4b`%4DrLUTe`9JB_?*@1P4^EbO8}&2C><<@?p-_l@=3UL#KCf*T9xkn!#67FqpM@R>e^Z%|Iy$57{9=82A0#=*7IV?h?8elbH7F4 z%>n=R?c1#i(m~?Dw%Hyd?8}$_ndQYGL&nLukKrJlVOt-tlA=Nmx#y z&*pC4L;u^##7Q6-h?1=OqR}iYERHTOrAb^n7R28JQpc2wpZf*D|4W0!WxPvDOr1*f zXo$?>Vp8_G)b#l5>ss^0u`~3du#C9h?4bdV@|LGafz8To<+wBqX-SuZlbvX^HTu^k z{2$pogTeSHfC!tp&yCN=$9HsbA=QpzD@XIxr5R~MUPJu1tp{O0wTL33aXH5Q($|P5 zz(Ah9`T1`T5ZHNY>9i-bgxjMBI+O(rzWwc%UXX!gK-s|xhb`SdzouyR;^Bw;AJ>us z*QPOLuZ05ZhzMu64E@J${!LmLR)8RZ=E04FU#jCJhXC}!%75`q{5$GVdiuz&W|+o9 zfYp$HviSVBd&+>8pW;U+xypZv3`-~hBVffF%>Fl-{yE>oWuEpjl7*G6)630-XJ_8% zuZAl0WT+%?5p};!p+~5B5*W_lx(x4m2ERS#)Uq(4a5&e6RMpbehN8v#D*n~Qge;X3 zVN*cO?NEwboLf!=cX_4=NBG}YO!9Ph_zw96SfJzV=E!#QuiJF;A^;$B7s_~_nkTu> zB`J3a&vWOnHAV738rs@ri)ao>zc;(es}SN^5i?qE^Ey9vIQPX4c7K**?NQZZE6}qz zcT@Sk5}qH1xD^~#_xgu1j2(>{XyyQp`>&b21kI=JUjDa(i-NFgiTCiOo`2tOMHpS6 znhBSQFbU}SJ67S&Hz)tX4tyw7u|W5y_EGQ&3{YZt8AlibMKWa%uGcMkkN+9i&ByDX zME`0l|3A9n4gKU^Td3tsg@N?@B-zmYulC;mbH%TJ(g*-5fn|kWgY;`zU^8goDQdGM zuxs?WRz!a@=oBvzV8iLltg`k2A3tZ&LGd?PKZ8kl56bd#n_5!RT|Gvxu70L)YV`WN z-m)PgCzc(vR$4jK>rE2wND*R|Ilni3wvhGyAXL3ZF|@>ncsP+~pY z{zZ}8m17seS&;%VE23fONm9CIiBEmH{><-!ylFCYHGzjY1=o_>6;`=#OKgP!>JW`L z_HIxoZ~*7i{@tC!`kb<;`&u<|iRIcQ-J@mCsZip%e&lB7xOuNC^p#VZt2Gx;njnA|r4KiK0MssO?6_~$)xHg*33#Sj~ z4gIa$5ec$Njha%Yk~zUH8bM@U4XS~(*pnHgg0)@n5$mKtE0sp!G+JGZ+ zrejJl&j(JbN(pPuzuAufL^fss4QR=1kMw0VE-cP9Bej}W?qM=3%@-4xvXq}Vaz81z zYwIz5_+Q#AoIT`(O$_HKOJVKuf0#sSPviUW{xFw{zhM5$Va#ERBVk^e#*Ev?McZ*C zpu3f#&*9e&HWPVIL!M~8nh7)CELyRfHlO!6LJ!t557AX29clr979*D(YJ;JfKvMG$ zuKI)Jjl8xilB$E$jD+G%JIiN|N=gmf*I(X?wXb&F)P#Q_Kr}q-n%Q-<0{X4XuCBH1 z53`QW?WPXH7p$JODIT)E&Jpm$M{|7HV1pT+%+rE{(wkzmPI_|>)L*hpS*A}uz5t93 zN^Q{s!P1dkj+;RGOWJ9>0(CMAnW25Y<@X_~|CHnJ6Ppb{*_KckIDzi~#g4VR`X4$h zhXmy8l?WMMYjg1|Xq2v&I}Gi8LF^XeaS2INfwG&pDw9>X2!vjCYv4%{azyUt7w*LS zZ6Uo+hDyLg+g${?kAMemB8{3p^b_0isxM6hxYMdfQy2%0>d)83Z{FsbZJ(&g+iqW9oI3cPhc+>FSvX8W3(?h93Xc7C<;>Su-$zs&$CsvG>g!#yZ&*}PGNU% zgtCLv8^+HyIc^CMw95l@sdC#nj-QQBH*L&fg07G>jWhDc)eyTaIk0^;sR}1sZYX#G z^HHd{`TnTpRUljF(H1|DTrH-0!hYUX+&+bE^P${1Yz~b@ zu*G?;`7}T+&h2TX_D#~a+S&R%E=zh4zw;)7TfG(CcdUWHuk^I(qgyorPItI~!3Eka zCedtRHf#J9LK8Pfp8JBMOMt__nY_K5~Wmxh(1Ai6M(=03Tp)KK~FkAk#=PaoOs zbSXw>sKZqGR+ME8<-I8@$diBND{9A5?ISub9Zt(oJZh?G4J7V-hEHy?+wZj0f9w#o z_7}h_J7`;flms?GO?H{*uin{Pp9DCB8V4Out!`%N&91BInXjG2X0ic=F==7edJK-o zuv2@~k<>>!n%LE>w#GS>nhO}95>AI+lh$m2!ERUbc46gpxq~(;TKJq@M`y#M!OpYt zdQhtg$`XxTUou9N(P$S3cFjC$#pbt8h(F0AIqKBtoOrWss1It690ZWdvaA|SIAMI{ zzNH)O6ogJnEsm_~#LR?oV=j7f$uDjyZfvM{Acv$?{WWlkmb=&#M$&+ZZ%GwicXz%< z$DRG5)UTp;VlK=IhE9Rq%K!YiV4e)(#<%tZ&UI~?5++vvRa^GJEB-qwhzJ!hgSfuTg45d={6_tq_1SsQhCug#>y|PfKka5;2yg9A_le&E zuXl4W0Iz{yd)@556{v5MRS)~+6JFAQ%?=Wj*MEs&XX5Q4vWMt!~6E>*)j;pA7>pdYf8oN98<2YoX!`|KDSK{@w2a zC3NZlYz+^_pLz{owMArK>Hi_Km?xP9%N{peX4p@=ALJQ#bv7v@q4?UQ4dcjxXNKSi z_)H93hVB9p$1cDBcu_YfT11D(`Gz4H2`__mKVv}bb(yfj5Bhj%xQ$>BR%~N(Wycgx zv4!@TlDQmCx1u8IrVV2OkFGenPq(MC^8N7$V_K%48grGCSwO^e&vAZ{S>A8aOMn3q zJ7?$0z!~+TX%_d%DJEePvO#$)5>4MY^l_Y@X>Zw|C6zv>S|4OGkyV&AeTUBLTg+~F z!0SI!n4e;Q{0J!3sl$QS4~)E~+?Y!nN8z~$=iw$DPB|Et^Uu%0jNlfuJX!av^e&fc zx9xkT3}YBM4$Xm=Rom#Wh0~#_ALsz6cIU!*${5)!zWPir!nDA;PQu~1I-|`zW}p4P@hfm`T+;qs|REBpR30GVduxC^4`4e=WIpu zjo~R~5aPS*&v&`PlmTBYmQ(iO*CR`mfH=l2o?l7(wubJHpE)!vU?`?-A+B-5bZjLi z4gV3(Ss317aEcbkaOnJGh}Ew)trQISisvUf7ADT))`6ntqm8KHJYZ^4OpE=O_ku9KT(|LLE8yIn)T_~LbrjQ=RqUp+UL6(WQhK!%LW zINA9?Ts!@=R`~T9#F?IeA6n+kO%_Y)7MlU#yt;{owv@cE;pa5ZU{~9{%X~ZG+Fu2oJSV#K&{BYV7TL zo@3+t(b5Z@_0nKvr>$39F+JgkoGhKS^_eg}(qDFVj!oPrQ2&RnIz8#Cv_R1yzzIP8 zeV2O_0Zgm!x;1V6d*41o0vWGw%M7J`f4_c^8o)&?L{a(^0OrCdUjMJD1mi5=iq_=)@e!u*8f8}Hqu*6Lu$Uwx_ z8&*9<7PO19ck^P+Ccf1ryFH-=Jg+X&>qRDc4P7xk5Sga5X2=w z4_H}1j9uG#Wd#B)#tewG%T|-Xb$woLLMCOu=Q90qGk+*t z@3+|SNOhl&1|-oNJ*3hs*h|?rCax%)qH=ufytgr3$L9YDS(?ewpFj6jtFl;549d*`&m{M6w!ePSOal!$&(%Ce!7fB5D&d*LA(A}(Ti5|u zR^HsqHJAS3;b`>@4Lw2-b%3O*;zQ{E^Gx~;_&I_BogcIzpGNzwX()hy$^M+)|BH$c z@b-J+yHd$#P@m-1^~5=Gs8zlPz$>T`xUfqJ49w6O8v1>!|9pTz*^}AL5OF#DF2IaP z1s>INl=6=fgY|(sP~q{iKi#g`5QtpNvsQeb^czD1(Ehhn($)m}0(>2WvIl?X8+o6W znxr?t`zv7Q*SuLf3+Ak_=|4>W%wUVYFQ909gY{PojHVugDM0dFk( zG`o=ID24Kb;Wa3^`{(?>hQ0lp=?b(ywd*%|Fuq(4Cn9mjXRNNP-3j%qw<2)knLko! zD?^yVgR5NofBHgLjm`GqchnG|0wm<_R>h>5u7VA!-B0;u?HEFPSx?&Ms|gSJoH6Wm zaAI{ z>5KIG<-6KAU^j+BFRx>dAbYQu-qn@ZuNu!s>c#M-GxFVMJwI4L5mV)7yvhJ3Flf}R zaJ(3t4R_G!P0_8GM_Bs$iQeP7Md<3vDVER3(c;VML1K=3P$f|IVEk~{c2`3WztI=Z zU-hHN<#GHtzuMuP!AC-05MN7LGFT+t>Q@_U{e4_UYwn=5VwE#Il6)&yhM;R6^>cKHf7i-28w!Lvhx5iM?PXl`Ne?5D>j zZoAr_9UhLLFhZuNa{!g1%-n~9AMY?P914BOv)IZ>q|ccFcHBll@E4;K-uC!n-# za%Xol{AMb=ba!K^tv^{ztA{C=Z^_BL=j7oY{w22^UN{l2Xlh!VS)*p~V)#~ZsJW=8 z+uap{6j4JEUCbfKu~8s@Gi$Dj=;!c*jDGK4Fwb@JJ+GbFqBXB)0=SVr$rXKbP+hF{ zndKS5aPtqOjmGF#ACUOmbsOEzE<>g^_G1|>`IeL???x0YPwo4Y3Fd;#xkriv`{%^B zHbs+lP1q|RI)7NWEdfL48hzB0A)Gy6|9F>wcUeFI6p-Wk4>t6&;{ULrFK!3D+{!rK z?~y-wLo7yZX@Q5sV>SDqwW|AWR|Ymlgo9G>DgNbXKO3eDMT~p<`ZT`yNJb5XCAsIN zs%OSREqqjzqF}sf;KZ>dcj9#K>7!O*yJc~8-`#+cf5jmLyLzpCx_o&wn9pJ_pwmyh zKU|_RVie78=mu1g7N|Kxo=!EuVrS*3UvY(ANbKwF#qP9r z2yQh7l-L`Qu9%nnOh4WqST4s566{`FOt{PWGK^nBp7M!o+aWW`wpueUHC)}Pl^QK% zo}w+EUcUIbaenJ^T2#c4GgV!3S7!!eiC2$4ra$DMYa-Ur4bVXHrOe6v~WA#sB)mS|uWOYF3=BN<_xqN7RUJg$&@6P=Y1`T9NF zdROSI*CAYSXUfL`gA{4wdrcvC>ocPtmf>;A%!eSuqAhP-igHT%D$I;Tmh9Hy>XS#A zd=J5p`1u~)c~WAk-&O{}(?;^yz`T_xJ?jKOmO=7ze7a2TfCrTdIa z(p*oFG4Q%!c3=T}d(G8;Le=^BWLH5Z?fu#z3kroKKPKKGUihJhDqO>2ZjqKKKTgg? zdEd`OSewM91|0HH`BAlkmmaDRo}&@( zI`6m?rX$1N{_JzP*;F+}=~%zrYt+CwHh)~7Zn}*is?M8b(BvX8eC0~fYdAh+v$MH( zo-ZZfzdeBk;wSLnuB0$wfAvX~jqR{Hfq(pb*R{dW7|lLZR_NyPwn^iRE!}fcL1vD> zS?9wkd8Hn$wfxiE z#%47eI4U)inRcHo#_%WQn-mCQaL9Sqds-cmguSc}$yYiM@L#7IaNUkWe`mIzb#|$M zUSitUWG6m5OMDt^MC_AL1686ww!8JXO#1%X7VaVGUN)nAEry7v1*p8_=#B#|r)|J87vL-8&$T>_5- z_J_Rtg3vGb(-;N@0^Qdv>?OL#&o!P`2T$8O*VG@F$il64g(G-mNmt@iUan%f^RJ#- zc1+qUpr4-?js2t{NsT6y*pZo&xK@8TkK;qjdaBzRf|KS**K4X)g=s)R$9Q$fiO~t< z>k%;?83b#>;n)N&7o!SuI?XF@JW?AibB4Tb%;w?i!iT|X)lLst7Bc>%R6~cKU)GGq zv4OE39TX;Rq&_@=`Kq-SXQy5gfgR6`#^ZqYRsly#_IFGE`W+X1-QO2ZIOi63eH0|a zR?cy=zOJWb=^Nq;_;;@}TUE?NNf|F(~GP@-E?YtovNXS4Z@29A80ySTJ-YT_)Fs=#Di%egLR? zyJE%dc~@|%(DhE3g&;-QPS81RJD=VAVCuQ;aeYXKzC>wxr$&o(pBpZngEM!hSL+>~ z$&(#+VjODq`<$LRJL?m{?!||0XH899TP)=bWV}I6A^EZ}k3SnF zWp*cBi@WyoFBr4Tz979gLKt##4NS$Ed&)x`CV^nHShL4DvmpE zSvja-E89+NuyOa(;f#lo*`|4i%C@uz0UAPMH3=pmS)+vbmnMI8{bw?OEHF z9j$UIZ6{hx%DZy54!g|hD9JEw>!aNfoQmy=^tK>Tst8wfDFtP>+bF!(s18gQthrDp zqWmi-vGd{IuKBt)mx6eA8%I5u*@}2`+q4Geu%|R+APBH!6(F`_X;tRR2~XY2FA^S; z$P9oY3Sv&&AhvT);pMcrjPIss)e`HYehDGNa%Z6EJZf&6VOleFTa21h*qs&wgpV{F zB#--CWk5oWKOJaaQCfbFJ-T;mZC?MNseigyUo6~4w z-9JpcI=T1f@+)fH?Zk?&;@u1n-n2-wsCkXCIOL;3r^U*qjRgTg@cicYF=WYQt_&dF}Ug831{1&g}SJBs&_(arXXW8 zkfWva{jAGtLCs)TPX{;%%I-Q}{H8W|CdiOyWX*s3hW-lON(4+?e-2I9blUxKuqEpB zW0~^Enj>X5xArpHpFi_5fG$8^8%n5_l$o>LsMIikHaJ0{8$y7&f*#-PV&17--Px(h zC(i6bTM|i!v&@!i2nu`*?mHVyu3?;j6h!rJb>mq6#;bYvNj}X?)#P45@P?;oXz|%s zevhm07el;KlI@S|MSL%U>>fYrrsg1Fh>Wpazsw5?K(gmub38cXP3^o@rDdNq11Ek* zz&TVs=W3sTMZe}ov{%Tv3dPv*dhLhgZ2qjqzIUrZK11H*MksPf>blA7O}H+i<7rc9 z5#_Fx)gekQ*G#F=>t1^K&g`%`F=5ZySIJW*u^RD(cgV7^ktkFT6JA)>GKV+!i;0`i zvJ{ZLu=S}#XqnSrn=HrQ4NQ76dYzLL56*1D>cHnc_qejh!zLmVZYu2{4EW9w38Sd91M!)JP1 zx%6m0+5=kN@(bsiHC7bjY6T1<3#5tGwcHh%WsF?EQZa-e{DCf{wiACU1iz1K--L2L zP`=yomFq)=$r#=8i4#Le4vsk{X~uoNw$$h@Ei*b_y3MT7c;uz)NfcFvSkYa48x_5- zWKiik5a(91H@3wNzpI!);U3rDsZucgwF6g0+;Cst9+`Gt&Hj}DW_v8J%NAS1^Iq_b z4Q1M)2aU8b42=rGUY6;b#d$(H>R|3ezUoIGfx(j(w#|*Uycsu@c~co5T|Z@jgsOIw z!8d1*Z#HYcp^d&VZQCbc{9JupP;zoMfhb56G{Q(Zf^z&h@*Ocgu*EI>m z=@jLgMdH%R!6lUYvo8J;(UNqVo@U>DFVKP67ch=t)tM<&iqTr-YL)LKM!9 z&2^I+9In46WdAhI@HkaxCVJ4;+#S~Uj^);q^DJdBZj%sX(BL|RuGp3pk3b!md>NHA z;fx^)i`g;nc5_8o1NZFR;la|-_Dz%+1Bx6te8C-JQsc+eeH|nGuIPA&k{*Fgam1z;vRWa)J<& zkL^?MxoI-qHi6gmsi;#W-7uGC)D|5%QQpd)Qr7y1Qux0sf~XVHgK$cYXV1!@?v_+5Ji@vrt_x8K1M8OU2X zChboe2C5IGYHDR$qedl+(S^Wizx6tQv8jT+*mNs??JxLda@yUfj(f1k&`C{&U!LeF z@)@b;0wt&M&u-c$&y`o~Xi^%$R>;dZ6f9$})FMUf5amI)Y;JHkXo}kAsbsCNXsDEL zFm@OSE>ayTNHQ%*x-N+BDX@SzPe3?<@Hd-&7lUx3lU-Po^~YcSx3~4wXN=Bex% zi@M+n9gHYucJX|36- z=4mx8iL*R%Xm3p}PS~asXCBUbh!*Y+q^h-Ib78GT?r?SsCaTd#?^J^r#6obBq!4}B z#b-gsLt0}Ikw}hFmU|o@X<|T;k8}agj9^;e+iy}=O=T)-RSevJ<~4d2v$pcydq$E; zW4vQnr0@Q4xabbf_ylfI_tzwh9_#JMzC%YpLiIRXEWKv_@O?}m;%YyVSR!DV|MqKuw-EtOov! z^g=SXQMESDO@1vSjR6$vZH3yZM=Z!;O3}STTWvxN-rRw2!$7x{DSCsqGFl^d2>_7R zbZOsiSlLbJ)IHD3L!(h@5)Yp(=IbymoT!Wu9jHq2v~bc;F0yW6gHn$U{_W9gPv|%< zW^Bw(Tp8_pnod+6q=MONid2RJXkNiZl?TJs0<(4G@r>E`G5h*P$dHa;*H$EjgQI2s zZ)4EU@G$@ech^~3$X>Imr^;b`<5FlLMpU68vB@fteimzrfLni*)x@I5-! zTJM+U1ua)Nq`y4d&B;6AoFq`X^;uV&`(E_h^?ALynHm>nm?CIHN28dF=bb{yKrE zHER!^ho6p2hY;qPrpH_P9;o-5@K~8VVXQ52WKRRXKU9TTI{W3aUSswne)TPpM z%HHIoYbCKI#6f>KlXg$CMeG~ZSrTNWVs$qG&)^!GGLC%x7rSBw(;ww4PiC`P)Qt%_Lhh&s!#gYxlM(~-N80uk z4RFl!BwlB-i5JBDE+V@Z_D504At~4C0Sv}42rd(Y4q;TDOD+-yr(tC1&pyIZkEqe} zV|?U~^{sw3;RGglz>M`$Mj?G86f+rY+?Hn>@eGPIWZ_weH?hz@vm7~Il~NP)4+Uyb zN8^fsN}_$C(v>T82CQx6=a>Xly~6jfp3FqgykSkk_UmxIZqrQM5W^~fsG&@K314Cf zB4IcQMGy{b*sL`zFS%D)hq6H#55m@Y`;}(}Ug3gXjGMcA zs%X~8>hXiDdAUg4-MFIwXYLwO$Ao2FQHBAMlq^B24U)YhG`QMxtMt2zn^sWcp6v8T z3nM>8qf6Mw>UR;s@knMafp4N9{PNJc(?MTrG0)zjx<4p(;hnIVP}S+7SNrCS7n$k^ zzNLK?io00vEFB8{`nZ5eNT!qeAOmToVZ_PS=(q3MiIW*AkmU|efKyPdrahlYaQ-%0 z&zc;fH{a7`Lh$*=aY0Alx8wo%6PYkrnnIo6c@Oy^1m3bqS z$~P6or9mu0afF>5AZNJtX%QWb0bk|pGuwdgF=wZ*ur5OcLX^7Sjm#AtvO@=`A270v1oGTJ>i=>Ef`K8CK*}%kNLNd>Gsdo zf0Q#awY4blIWS$9PN^Dq!y0Vq2ob6k76)M!P;?7Z;G2vDa675ncWFZG?CX6t%l`V! zola7X<_kU}ZktTU=NMp$YdxayOD78-36_gk5b_i?rwTs&LrryHh-rm60K#^@ldzz* z@*6@sOZQLYB;~V&mDk(^PS@!kq6)J+$&b?N=d(QT#O8U&fkfznWshshB}sgnZpl>) zcKxvbFr=bxv`w%emtlZgqf1=Wdbyy~$MO?6hvAc|_CoX~nnEuwyUC~rIrA)nK*fzn z*#;-`-lx*#mDhxtTvo8Ks3H)8ZeFDv6v{M6XU3g%Ct`yaSs2M;!?Tv8a9qP2z7mjd} zTKu6#^#2Ex;X%iOhz-Agr~AhNp(CT02!R>nY(qd9O_{zz{%jzhn~(T!xzN|1FCt-Z z%b%o7BCgoBMb~6Z7^ zro#7V3Bhp!CO*5@7E6)xRqNP6UVR(MAaq8&h(~jb zsJfQ~GlC-1w2(3laBbQ{&foE3?A(KTvIfwl^Sbkyr)zcsU*_!#j{D{{Ojv*B37zdZ zz=b}LQCXx7!TzDMH%$1(VN1&R2gFGP_xATt_BA7;f=_)$mkS-XVPlR9itkodo4+c! zQNXOQ%)ZE&ov7o(Th9@9=>1fIB0TG)sO)rOA_`V|%Sk=kDnJSHA?2+h)wMTk1XmW+ zfkO2JB9H@0gKKNP!yT*hf2sa%6eXBMdhQwlvb0Zrv`AS$mj1wG*6dcu zYUiH~-+%c@3%C0xGQ_JQO%Lk9uT<;T5S=12HdAoirv)ZZ=xh0it3CC)QYSA&nT`A$ z-#A?hHnpmt8wvTz)^wutlzD$jVk)&9br`zn`*a>LHfohE8!8QpmefpOY-d{jmiC}{ zabZxYN##CX&A=Ttp-lxmiQ`kcm)?&k}v* zWG%C;M_;VM&&`W%ii=#MglH*a=`Y5)H&h}mEMtAH?;ceYl}{T#b2SP?c9j#hC@ZQ9 zeV`)RZ`k$d{3TGBYfuEo!!g8|Yr3cvv%r%PVgWp6P&(qJ=PtkIOPhspCMp*Z9!UFZQx_pm+fB# z@7dmosI+@y5PE=MOVc&S5@g-cF5%8p#B8&OJH^_Dm2xsG&9s`2IEh}xI!E5PH;8V8 zG7IE3${&=5uFL=rF7+iN}c8}XCxr24h#AT-i9 z;^sw=;<8^D>WqG-QQ1(ZC6acNY0<4BABQ}-gg343lWV?beU1d=%P{RY(wV_!roM@# zY3^vr`}K9!=bSt9CZ&2(BUzbPYoYK;4kv=OinB}^p+@wYNq1&|3pfqjRk!}gwMm&T_)(fOo`m~AxV9bC?>V`fGS zq5_)wKE_(%0g`t2(YNW~=3Re2fdz*)@362rLnpE{(gzRjl9w&z0d=V+rTpX&byXns z)^8Ho(MbN;(L_@3E)_3fM0cO{elpV1{_LCYRUY^f;t5m^p%0wO6cE_10rRRxTsvdBLsnW8sg`#9C5DI@8aUz#BJaLO{hOC?Peo z+=lwON8y|(MmXRc9kVpFqQSQ})mKH6kozoPKf&&<2VUb7z zb@nmMdc$SccoJnLyA+e*LbQE@@JX~Tm~EbMNeM#7+I$&RRlb|FujH?DqV6+s{x!eh z<(j3F+U}x5?n}$r%N0alDe4C9Q&>MIhYb~6^UM8u*(5zrqUAX&S?9V@d?`i7*KrI9 z4UALhg`2Ev`B-bIPT|gPflLhwnS#jpm#@!|1+=w)zRGDt6?SUBrY=DJ*?^)`anuSQnm$L+41Su73JNhLE=Ocl8s#nli*Fb{unV;lNC``muiV=C!-?`P%*vJyTZmM8(E zHe3T%byi6~74qLf#6OXRw@mmmz&zbCZDH_!|Ci5m$d*J9S`OBAnqct!+1Rkec>SbV z?qUp!rEKm)G@l+%P%`yr@*jeNs*>QYTUST*^li+Gk=KIiGW_*j*F3a>C|Kl>=qLgl zpGLWgLBmw7bRLia=K;I4^&aP8K}N(B6T+#pE>24yZt^;W@+O$L@7Zd54C+Oi#OVS!Yb-0Gz4g*as49Sjt zM8+FHFB8{fbL=fI*~Rh`o3{D^^}7bAE(9K(l2V< zPidlhSfZJc(jllxcA@H-y|nH2Tr2~tJmF7hi=lOq?H(&L_~@>A)0Cj|Wa-Msj11Y& z*A$J&)iQM02GP5668FLc$>xoudEY|$uawFjT*%i_*LijZ)g2P?ItAk^#0h57W_(K` zltINtJ*W{JFd2;K#Alinuwo~U%2N4-+;`TuO~paodc3{yhSVZ~J0fs?BbWgVfaQ4? zjyTCz?{1D2+tabQySxHC7n z<^AwABA3QrW+?R3`Au*18M{PuE1w`yHTD2kPy~7KJ32?V+#t(sRe&?dZKp2x=;}-q#IvG+O*PizwfNFHSu}YAWH(qS_It1QeIgq zrxX=4^F4J1W`dRD`5HFO{n;`1_>az4t@A0YZ{%@jD0XNeUx_~)sXjYl?Bo?cxk*{c zE89v|yGAWmJ_?&qrH*W~g6;gVIdp|yf{nK{$icR=bW;Z1!6~K%yYGmLkiLjCRTa@W zwS%|u&WWXt?BYu1Ogf&A$b8P}BYxzML(TT(T!0O_0CT7TaqU>{X29hkU++2Y;$$R* z#jcAjyr0Kh&(u7oBWo2rJ3dSemcb4xD66kai{8{h%1M4CMGdP{h?1iP_ZyHe* z9*ViIa%S6xKON+~z!Jud{^Xm;S1|eouBI{(>7DRreHuMZqp05`kLMVog13EclH#Uzj;<^=%o#=sh%e*kz3JOmJUft>{_ zxvWZ!ydvlK#Ef(5gb`{D@6BWbi)6B%V=Ztq?_-dk1!yME=Z6zie(OX&mIP0|z|ZT` zgFQ?c5Gt%t_yRe;nO%0)wZ;2J_ow1J28*e}0JhrwA*iMWf%5E^k=q9g)=>^-je9bO zBD$chIhrOsi1*k>gZoAiU=-bjHD{Cw^ReAf)9~%?hj?b9ansM%rWqFCW>6wetIgudGyXOt`_{kM*0UOcZbQ(B<4K%wZE|OWu!}0zD*q|ua4B# z@w%*P)k*;)RFD^L9d$Zjoa<9VBt;uDloN)dI;E6_OPlFZ$ijsQG@rqmB80}_#ErqC zXp2(5&=6?1P|)doCPqonH0b*84L*@!K0FfFK|9wQRx$!_8V1Gt*yW+bVce~}5F|3b z8`fjhF^_Aiq-YuQoA9c5Y04!Yl^>HtnS-V0PB`qyi_MS})T=1P_)Q|}!Zv0be;+Ai z5~A)FvfRTcGF;&$I-1|W%O2Qw9hMVR6Spof&S$0SX~m81w@J|!#$-IZSCk2PmwTF_ zolnk#P0Pp}lSL7$&we5+laE`CWxPD4_WScSUz4KGT>(<`IPcDPtXX!R!`IWsCI-s$ z*xOgU$l=&Ap(=6_^Yq7X=~36C77gi!mgVc7rRdAd4D@6Lf7}n#OMfUFV$T`kyg8Al z=I}1ka(&TE13P}`SLLPJstxnGJ>QR(i>HjFntO;3eK9-8@LIzKcG~HP?h7Q> zXgjXoWkAlzZ&p?3Wk?E6KbOS4zqhln&Ugl7J;a(czxv?0>yZ7WFuo&>Wz<6Uw@yo+c*l}W_x23HMH9><7BbM3-Ec5FsdUnm-Cv3>)@J|9D*u(3K+i&lr}ep8 zowD3M-u0xh81G&qoDt(ZZ|P${+uP@sK<7mi!R(EPzf5Y%iaA`A(>=_=;%udoO=n`2#qbiKGIZ29oy?vXAL^2JZ{f@%> zTX7GE^ZAMJQ6)xREQ`I(=N_MkIm!^RUqwgsZV-ySt|uFj5(F!mY)U8BXC>`Nj}#Ru zecHOhmoKzXbh+EYE>v~UqNn=z?8FCe10$SFiQ_yJ-_G-Iutnzi-8X8IO+3CZ|FnO$5EU%- zvG)D38x87;nCl%nT5AC57ZT0lu)f@neLQEY;GnP{dn?+aDc(Ul9|SMNlApy*FK9;& z&8{2ey3SEVvI+C3mNpcUBaxyh?VpO=*9d7mBAyedf5orrU8Rm7wO7nd^#3efKy!He z1r!d=^+ruVZAvSHQfm!6s1e)kx#4gn(`3#Js4gAZfp07DrAtKN?@$}zGn_+b%JTyk z(!lJ|i4_CKf~!l7oOWS)&0RuB(HG~m@3SR0A5&p=RX<+L2Ug607noJe+rVr|5QrP; zRzCAcKCF=9I!xq8L0Cx`RG~D7-r)19qZ_Yta-9|2lcfA7CaOT#9KSzmm1uQI#p!f| z_xyz|nb1@t#U1)ye-gphBmg5y%UpFN_*d)|M-61AF`Xa)5xHGxCs`87)+Tzovm#Ay zdg0{$KB&jtsY&4T^Rv5cf>}550`qt7lWgEi7tadyFcf;h#hh?b!u-=Uw$d2mB|C^r z%thlA)Pf7VEq20o_kvSNHcygE7;&5y{#(fP?X)>|3IxP9TC*j)z**KL#!Dime%Tge zOYFn;uT8lC{cckX8Bw?J%!d~%;&m6b{qZ{=Xf+|QP9n@NDzEvVu^AaI-7O(%VWL5b z{>4aE#b&P5>Ps8mOQ1#!nsg^;sS?3pOA-~vIU{xayILiqfID2IJvoxWCg&L1J73DK z55dF$$S3rvpQtQVRmwO0+(FBgrn7XnHe#CUM>LW8GW7o9G~1HKqjf9dD!6+qyjZg_ z-A}iefU|6KW4%xb|4JOO7P_b>4j7UFByd(}C zb$mPQR)T-oe6Z_xFaKjpvyn$TH$&U2!f*z!rqZi~*ElQ|r}w5G$)T(Com!)H49_Kg^IS^g;#PqxlJjV2|^^4{jF zD>%!H?2tB(YoNS_|J<4S<$2zIEL@3$PFTJ=3k#5m@3Sr{o&EAP=Htzc&2*^gk3}{5 zcn@j6yUd+hD-5pBZ$salhC@aElL)%_dbhVza|1!pUe&LH z8SyhiQlt*&13K_Y#& z>JsVL>G+_S55l?rvoz}8d^zA@{y)mzI;^U9>-(jB-z1f-<9yFvO)-2V3aoa^2DIoJ8i3xPT3n%wu8V~p=-d}r5E{=`?H$=~m$ zaZL*}UlO}wPFN5tk2hVi%Q*UQK|3R_$28Ux#e3Kjy)YIUE);R%2Gs#VW?lFolc?4L zKin53s7K>WLh+lfZ?pY*5I=OMvuARvz`3})LoB;qoet5*`{0T&uN*6qFiMIaC_J3*{worpKTh)EH(Hv&1<`$ABa@|EA{`uJKMfHTBLpk@IM7eSrn^|N z<oU6iP-DU8C5sk;Rv=D|xOng@LiRaVl3pqWbfMt}bsB2_@OawayvmJ zem|Pke zouQxj#%UCO(R?M!ZZXQ|EpqJ)NW&>ZUPG~7pI6z#lcE~I21?K|Lj&W^#ke@)wA5M! z9%no$sy2Y4>buEWy=DFIj6SlbxvLS;6#o}*i6hEu<*gFS6;Q&OAt0;foP-PEK1mf8 zCJx$V*3$Cv&%V-kup2qVFSgu@ty%o~lxvn*_o4N$LUA~a)6!q{J)G*r)iw+I} zh=H2awF1f=oMqDe{Rn381-Zlow~=l3!ik~MoL|L$;;-LVgI~9-^IW`ZQ^Yf0=|;>o zB4L0E-PHY1D2D=ihWE~+Us3YsXP6i(0dRY%8UFX`9-|^gS8VJjN8?uf2gF{%+s|qF zm3nqt+b0FGZfa*9un_-98n4)l9nMa#ktYgfF&cU?05Lg&1 zAyBqvt2jWP5pI1XtW1=H*u*12n_VN?W2ZUGNzV6@O$iSZQh5sWgI+mu3251ww z%Rn-1#tUO3b7yhaO=BZ*b*D1uuI|T!8{Y%?(z>vP-w%{i=;z|N@>z?Qg7AeREo+j* zkkcz$|2&kg%-$&Jv*3K>5`|qw0zN|?`3t{VN1ugVz9EJy`1&JOpL;J>yXbkjKDH{O z%9t;CV=7H$Ay)nKYv;C8nogK=eyw35qyo&Bw?o!H;;&RUFFO2@*C2?AF^P`vCfYQ0 zXKjq3&avgX+$PTuB_wN>X4J3NQ(#6CAumMe+V_x}@!DTvO6%Xc8GM>*M*K8dVC53L zBEypzp4#TRrPJ(r^D+yUR=4exA_LdUci%1Po!y#BQLiTwyaq|+mj#pM z_u3`dpuV6xQ4e3b9a2ek`d*pGwp$0cJj z?yn&#K7$3=G6b|Q#)){791Zfobe0bV^<_N^D3FO5rpsmNy%wFBiKn)v3P#SwkC}#^ zT|=au)I_-I#SLH40IqI?oSf%X>V#Q8tTwEuKij6VW`tan;SlVxYfTuqr0 z;y^kmsYIcN<|9j}im&K&cR6cJ;=seV4FsiWucTJCPeHSU=QMq8#0b~jpuQ<)+1l!_ zvHd(5O&PBG4GPX_3AwZ^3vOvc2p(*u9gcsSs&Hg~D#My%386k%6+9Orq>DL8zgP2M z!o^4Z`7UJw?U8J86kcl#%l4LD>+EYWoB^0Xlc7s5%s!d`d0ca$4~y$)V2LOxqxKb%Cl;-1W^Iktg$HsR*2&45>(*kodE@VB7&us2OtaAa*ntEQH#s~^c}-cF zmfK{Pf~w?UVXgR$*2hvfUc!il8<=?UDKL=&*Kb!yIP8ugc}WuW(HjLW*(A5YxL33E z>As=jRv##G7<7Ebv&+5XG}BwN8m`UCYu_KP3#Yb~F+eb@ER&XeSj4Tr48(A9k?_3> z<1}_(b`N%CR(~F(5rN#p+U{Wbdqi*NK#!V>4rN~Db6rs96B4%$*2{}GFAeog{CR}7 zsd^hZ~{6jt^gpco3c69-D?Re>$#aQf=~;Uz+l>S|CL0_`^g#MF2v`T+S$?oQ^6o^ zqymeO_qHQeqL`}?s?)G3&oAkz4by-Q$!yDT+$_r2j|93#TZ*EJ@HZtn_Q*-j%om=t z2JEj~dn1?@yWMXqr^v!^al$S02J&;V@vUDnw9K;SWW2zTTO&oV#);v%%=3x9xEKIYpI z6L$nI4!BLjqt~jrTk(<|AdEd~4Zzv9L7p$GrwpiSuXlmDm&|;kb>-M)5fn{71OV7X zmER&Px=!z&yjA&lIPX-+!`UG`QK!EVNgz|8IFjZlS#-*}`V2~bp(<1@pd2`l(n&z_ z1!Vj~vk-8#T=L?;Tuw+PxK_D4IckMD6S`CZ>C=g&)Jw$Bp3h;EhXi_4=(d#yHOGj) zbr+RThOj&V=lC@%N;acyQeuMwq8^?ivrio4mG2@12iAB9l4e>Vn>t$9z0e zDQ;ruQ>hGTWbX^jM6GJg56U>5>-U=WXIDFM00j2ePIRTK3cCgk~qpbF;N6>`Zst}wj z!>D3Yp)RMiYZ_fBQ|&HbABoBb&e>!;q-l_R&bkZ0-H+t1c10_APNK4dgwzVUSt1A} zilJ(a?;!;FcnQ1)>PRlAa8!)^2!QR<&opj)Rbzx`1ev608EEI>>z8TGQEu>(H@WTw zKSzxh>bu+F>T)i@ym>FS8)SOfi3~P>1&S;CA5C^ga}+;6vyNS=!hLK)L!dczpJ%9w<4XK9G=fs=H_hGqv;FFZ4lt(Jky|p zQuP+;2lxOb)9d`K#)rEdJ(CK8G(7s#15~j@Ov%HK9cO)lj?y8HMKc05*b$)R~k9?|bgpmqBDbJ#Zu zpp`nSo3Qua@q957@peMULxuxv(@~F)^>cfs5MoxlEzij~ZDAqjXv1uYx`)xF7k3ZN zAwmR0gLRF?CU3BXjRTv*{MX<7U3hmP{+fYzErKI?ry$whVz=5)5zP++pN+6^5XuaE zB?-}$vVg*4^hn(rS(b{1VHB}CdMpmU?iq32QWlz;t=oF}TwmF2*@j{uCvEto3d=V# zv}9QZA-*@YWEpGE;b0@g9Cq8ZP)=dYj$_g+Z-VdNtlQFy!LD_Y20YRoyM&q7Sf5zw zwp3H*;pPriP|VK|H+x+LyjA9xlv2q1nqal65{$Cay|~Kgv*g1Y0MqDgqbJWM)cG3q zaGjbrH90X?7!MhVFuSnq=^W8p)-mrh#pHrV_ygdo>F^&{4LFuwTGh3P${Fao5^4eD-mz>9R9D zCwZrR2zgeIL-bKC4)v4+b~WtpVX}!}ixE6vIBiZLHycfu8qJDi+4M%4%SmYx#}dXB zAc!bVM4d#>RF(sjxDDHv@CEZIz}xQ}Kcpck-!DEm%66+L$5PMRe_8@0Fcy6-!pVZ< zp=Fk(w+L{=usG{M3Xiq&v3?h)Mn!7x<3XdXxxk}D?Yk)pJTcF36fO=*`pZ*3CCWNw zGNHu61QA8E)#29V%42zT#*x>e_21bfflQ}RBIb)Plyt#?>9L)@Z>jZ~M`Lo56$0I| z&Y>a~Iy&QLa+vEDb?;PZa2}KzyG57B+_=F+7C^E?d|7Aw5x9Lpw&TH{0|A_`i-(Z- zudw|ea6VXRv6!dZw}&XmqD25^&3N%#b0|Ua(K`HQ810m%sG+{%&rJ)~MF7yOr*RJ? zRH=F5ktMZKq!|{YmGx{Q;5Ap_>1P~0c)5{o_B%mwQm80Fd5}FLmyq7 zuwa?|#UaUJ+;&>#V1r^ZH5Xi@*s;X&(4Je(cDdPmV9Lz%8HNk3%m=Z4_3rGdj%koW zwB+~l_6KHF%i?0qKq1gt!z?vrn2|YxKIr&vMsK$5mh7G#!@>ACh?@vm8U_gW(Axk% zTwwe7;jUl()f4}t>K}*!tBr6CV$*-n}5NM6;b{`p>xo6jrdPW4N9Of z4g*mS6b(P`JbX zwYq;_p5W7C3QLzF`7e4+;(l4bT6XC?Z36xW6*; zygtoqK8stlh91lO^U?irf6^V3O2yZ?4^7Z@F8#S;6cLnyh8t1G)@$%@f{zLv|9ZB9 za8Q^Y&uGD5a0C%MnOO(Qv@}qee4|+-{@*Sz5FIW8^!RWYvaFU1!y*$*zhmAAVDvcd z{HlL1;UkJA>gl6zv&jj2^uL9wU?qfR5d{vmVVk$p@BGT1Zg(&S0)_L1UtRdaEeRLe zP~M-vCMXhw!=W!3LBfFx#1dIAWJz8imf#dPrT(w6B(Qz0H)i`F4)fggF+!{#T)_1~ zt%y`=;B~BNRiL*DFYMOfKOfrt@j+PF?;#!P{aW$y!7b|fpB3`2Jw}Yu7438NC)nWWNUn2Go4;us&hMw+K7PJM@plJ_a{vd&Zm{iTus|RQs0ey`aJ_rbhi9Xc7sbetB z)76BMr4>KnhBQP^AA9=iS|B$N%Sgn4Ph*l52VWlnoXgms-{b!}!T%wu1N@(EKWrF# z{=e_@Hg1JPEPx@3-du!5ov+T218f2^A#7_?XmOH1p$y8 zpAU^}+=&plK}CHe+Pv!^U8u_NGFsUH4l#Z`epLYA6A5faYWYV6X_nZNNV6lm{6@kt zFb;<2Q_ltJw*-XoZHbxecy3&cJv{GsjUuS%@T5MFslIJAJ2TgVhl+kKs#BW{dX;xW#j)NwHD8V?z7j_fOw%aDgvcFRQEFPVQzm!p^r>*ai}K zVWpxrH=d=Ad(GC=W@bolrhP@3rs+GH@lDivCL}l7LN|WZmc*wtXN^#k^P=Ed*Gu`4 zzo6$(^vbaBg5K_KyGnMB&+tp_;D}1g+XiOkfHya$NP9;BL-iFu%wB!UUS91B{o?nc zk|+LS1O1gs2BWDh^SZaKgPT!SCg1r|5Fw5}iS+X0(^&zcMpv|Lk6c4=X>ZLsb7x82 z(V%;@4L@ONXqcZFc#{&7wVxusO?o7QFlpsV3C-tBXT1g_X1PdPs`jXu}sIqpk;5#LNWFV9C#;!zutiT z9}1g)&x!`WMhFZ9iF%KtqJbYY>-AFJ1%&D)s0}fz^#l@dyY7n(e&YTU0-nOw`P1zh zN%dC2D??9~mGb*x#oRx0b3T9F3P=;XfDmm|E*(Jti4Uyal2f!5B>p;oZGhkC(?TKL zdVXz<$WGif2+T*Qe$)SOKXG$iU6%kp;H;bq7Y9~vmYr>qY+%Rd9hU}os7Tm);}q-Mr`b5`|w$v&7Nsw>oN|c@O{|0+LxvPpxO{@ z>S%!rR=cZ7WBfMze8QS{-{p)pfay5uA*fSq>rEqx%i)1~9gEr3SOu(}WqZYdMwe@7z=)A}BfxeQf|r#7dkZ`#A4MGo@IP0Y#G~&gCZj ziQU}&QYrBT)>HeT=-2RSSOPJD8L`icw$~Ck!;81C@HUw4ThpW;8l#>R7S8j2noN&f zWwEQ#GNWhN7i=SA@k2%`o9|GCHQ&YLI#EXjC~# zm?)Bf45V_$f&J4nK6p#v7&ZFo#m~C1(c14#!R2PYCOJwVyuC|0I8_gnd`?kbTt7Y1 zt*~rXvF=WSA^JJPS!I1Gc;ym+?{Z;vvrP*N01ziZMBV#I1T~4;-~TATPNH;mD8#2| zbN%gG0*$lx_1%)6=+F9WLCf9WKIab$G9K8swKp9k`+L558x6mhY1}qv)O!zqE5CL!?_YkK)SYSFv0p5$ubI6)5`ADP5{=2jwb&P!O2 zOdwEyu$z`m1HyYwMnls7d_4WXcV4jk(_YJTX{S9!dnBd2E_2N?89=xe3rhWGi#Y^5 z6?0EeQ_R?>^_W|bl+;H%gO+Kj%hS=_Jd1+--zOB-B`F9i7@~`9KKIca>Gxq&BJOga z!N=OF25P(yI*r-){Nese#UlyyRyovX)$|Wh@r*H2dsi-54=umOnn`6W0a=41vDYR! zgBA5k6th%P<;HgEjmW{v(U4W9JISibh=WZ0tMM$~wCykmVS(IUvzAdcCD=aIgg}*f z<4AaF;j-!bN=R`sm0Dg0K0}X0k7mAp2Wb5=D!;~K0kIHe{6P8GFA1I za!QFQ$=IBnkurd~6dwhK&t~tyo#N4F?_N~Nsm&3Mf`_4vGKe4Zaz%;xMu`Wf+={P% zd-gA*$5>Dsm*A~K=%I|^PJkGjmvn$mlC`^N`-Av1-%sS9d{7ZziK`>we8IZRegsHF zdUX*ZgB;O#97HA3px_r@0gpif4MpkviMc<(`ZmTOSp*7^HJWF%ZrRQ<+of}lDQS{0{2DH(c zqxZE0HP2L7cK=B?)SLjIjoh`4Gt|B_pn02k8%&6U1iWdGjFv7|aNs5Hw76U3D*tGWMpTjS# zLhjOhn&yCwYtu>n|0M$hdC~W8R`tezw^=sMQzSdIP5udh7gpyr0vfGFl{fW*IHdQ? zm|yO+*@^IE;epAkY+x?qJ=3+yJ(llpBuDrzUJLm#DXzy?a)^#*53@<AcFIA{!O_bxr>HhR}1@v0pQyX5GV0rF+BKPX+F&_N0K$8(gspSUExjD{c8- zuBW5FhPJtZh5iPxUjPGGMeTrM-Z|+SD(@6{n>$;7w?9i(*^`!tz+C38D)I7Nn|!}| zPg0LLR0ZvFeHv31@!J0|=Lv{)Vw6oaxJ}B!67~-FyYaPfGva(r;`wgQA@0|q)zL94 z_Zxl8BHCtxdVHaoWc>~5*Vpx|@S^CtPqq(1re(fNvO`NQdQxme8DG>0$3}y7E-1xF zGd@t@zNbzvMN}!YTY4`-$?x!Smv=}`G^T)d$%#liS-;}e+I2YPcFW|Z@Jb++s|}a% zS!#S0&8@r%7qwQc9J9L1&9}?K<|z{h;(9A5cGjiC>9me=IvS`W zQ)o288y}2y+KBe{Y55+`drIdy#o(^rBhQ zxbUquvnCuelEL7cJa3ltPrVdzcP9aa>-Vr!ZGUH!mY@H>y*LOF2G)V$9elK7)PkuP z^9kGD$w_BzQTcyBz6?)7{72&CoeWWDqawsW3bc@GhYlvM=HH_Qz-Z9KO>yA4$Gl~< z_y30#hdb=9XW!BMj+c1URjr0y$qY!47oTIXcGo4k>+um*@*!H>fRNQUCs@E3V@2GM zT+A+m%M(n0DM5?!+;zeU3Ktd;Y$UXj@xXtZ>M*nH!+l|b| zc!8#!0Q-$F!MUn(LYv9p{6s-sOhl9XNQns%b`Pjc}$%M(=+u!TI0#+#}gTdU^YD4fy=*LC(w_z662?-~`8 z!9rQnnKF4L;_RnDqGNuonQ~z0%t-z0mgfeu14#bmj2NpOiP&7kj z@oN^i8@3*V4nEQ3GqajEm-B=>s5ZF2at-v#Rr;N*Htu4);jH(IPAqxsX3?=xM!!hq zu6(l2CUI)*oDHmu5NTEVQ9Z>W8h=J06g6QFRp{xY{$WFz`V7a};@vLz7lZ9Zd%WgO^3sA>F{iYekYdmO8{?qHL7syY+6ZRDwA!V8xV0T63|J*!`b~X#^z1cK zO7$T{A#p3f1RiRxa={rfp3QZsjWV-aKyLIs%NX1>=^oE4zPn`13d2-xQEgSdxklC{ zgVNl39x!e?3P3q;sDCgM4FJ@wfFUwS5OBAef(GzRKG~S2Zmthj7Po( zPBx~Nl1fuyXov(j){4ho0ZN@`y}%mD(Bw;-iD$f0o>+m|pU{BG5Z35R>x5P{cx9$b ziQ~lpV*>3o=%__(1n~$z$BmYNhwoVL-q|FYRfXUiam+%vXpfEoex(ch`v(!Zs@G1? z=Ym`9?&Q+*A79iYL3@lz)OARHTMOGXG=aq_OYzowG3Vp}wLA-Ll54b}ur+bjLYZF{ zfUQBo{p~rCXothq3$xGS_V_3E_IQS~*5a6M48-=iL)^^KHa{>#kzFRVn} zWf_$Gp4n&XlH1zA9%})w>}NJ8UtjrQ;7y~HA;3^~9QKCu&lf6n%%MyNY!|NyZJypa+R0$u!gL$4wmY~z59_Gv1QAcb$ z7M8pt32QJh0v2@_6pJRMj}Kt4-(OA`STVzfND_LYk2wJ1#-z4_c4ACKy2b<|0yoYu z#C@L*#iA5J#{g0L0RB{;(&ROqDb_$DqlCVI+b&cQUDEyB3TH-aHyxC4fAny#NXF2m zjAOvKbh7s)o=4zaJCWSEYObmUf=9u#U**4<3BYYM-%OmGF8qxg~`P4RJi=J`j9>bUY zg~%O=bCyg@*?3;7QGs1hxo1t)MOK_Caenp=&!h$^h=zB>`h)l9Ij4Lw)~%oBdOmDF zS;begDklqpv5K!Y(HS0iXHJicHXDs_rb3rEuY;Z|)778OXOWE!w`O^M=^-Sn0=9&%O;$v)43C*qlT*Nl@`Rdy&LF1?fuc09=Mh_)20}QUeyw&L)}8 z0l$G(JW&&jfoGA@CK`?>^FAh7a_=_}>s~oI_$Y`PPP8K2<2s{X`pIA~#-CwE9&<)> zB3oUty|oPzCJ)SLEb@K(tXC&=Vz#Aiyq%v(;*V;*unzd*cVAQc;y z&k<>I?&i|dj;e>*~Y=q$)&u6wMmt{GOEj?TRRpErAWP2M0Qm)?Qm>For|ev;dm;jI-I?V4(Fp?7E@*Zyo3VYD?fS;v5q|9u|ORrMZ6$ z>(V!>NoL0%1$d$b17Lp7M77?tuR{H61714F&uGDrGXB#H`@BqlgeOO&4;=YJ|LXr6 z6Fy-NI6RPvd6!1=ZbH_TVYqPXkHH;$8bgWlB`=`GTf4~npI2pk3M?cxDCu+b#2v|V z*H`duZqydKVy@hwLgw#eh*W;q>iT@8w<@3>&t6Sox=>>dH4G9 z%woo6`ft8ygm*WAz;r@hr_x32BEIJlzYg}6*+fYpXEL}3V`zBiMHvSa$`7p<2~YC| zAeb*0Dqre!ed)jWaw0pz7M+0j?wB}XE6F4+gE>lx$3r!@=wXp0Nt$3u;6;OhnJ`%{ zyK-N1%VL$t!!c`ITjALd)9w=*ede)BTE-W4iD5UG4{wBE;4mjB=G?%vbSH>#O~u&Vbco6l{yWl z)0nBFIM@dnHCbnVzvq*y9U=l+YfG>jx$$5E?Y4u)_$2n1a<;((>=VH2yH_8wd#!g* zd{}O(6ij3qGH`V>7Rc|`80D~RIVyG-#x#&{ibOy$fs(tge&X|=U+Ym?^v(U516;=X zq6V;$h`5CnJf=@DKQCTaS9J#pgfyrA6G!-!^x6$gm@XU!y!)m2y z!oqn|N&f7YH&?VH6ZOBiS|YHr?+d^eiJvUDCY0UjR#jMgqn%8vcAEa$Wnw}gLDKN^#Z?_R)van-#3adR^QtA#bhk zY+7KJTS*t?Yl(A3V;|>Hr}2$BzAt_On!ZktTL+Xn@y3nN*BDg-(c06bzz zDaH>R=ZAwsVhp9C?|!WoSrvrT^Rr5?)?P6!q7CY)%7;*0o>eR=?S)Ehf6v>`G;>z# zZ*CtKb9>NxI5jF)7+@fz$cwoj<;&3S zYx(N>xb>{8`aJ58WsECQzY<$}<@T_6gVf>YXKREJJpj7eU6Q`_TE_(jeKq-{Vzrk- z#<@IRbzSG{%-vyuDPO|FhtV>qk0)G5QCyu+^@t9qOzQo8$1|muacxw7Fpg@+# zYlx<(-vCVnwX458ll45gD^VG2+0QEX=nXGWvJMRBWyqv%V$O>V&3U zGYuPDwYD!~bX}gw`UG1yHm$_9SA@6QbUdRjIflPGwAHrizEpmLmH3TTH;Fkd$rpFG zQaTjBtS^FyiWsbRZj)VuArbw#QlwBuyoH*61}LoM z2t72B1hs|ZBx;4jJymM!1cqbtRRw1#gW4%s2lYZK17Ac^HU{-zEwDtr5HoaQGN?WY zz1xo@w7uSMZTicj%6U_~tcobl74w#!3)E=xoMOtMAnbodTKdgImV5*aUD*im1&Uxs zmeM2XLy%O-Wzv=`i5?;zOL8I3b;OX3e;V=U102hse+G?9xcby{eU*V({^3v7BB9?i z7r7Otz)3%E5-r$S`f?5dM>0^YxD|!jEj7f``+PZ(elc(Y#VH494bd6Guc#tGHM2NN zvK{NBGVgE$$8V@obj&!Ts z^fL=~Vki}w&*`~?9CN)k`$HHQ@!H!eY*f#rq*il(#&~<>gE5ccE7TD=kOvX?Q|QCz zy}3>l4DO>v4{4dQOGfa@^FHgj*QLdiyq)+LSIT5>|41*utQsArvGMzl)qxrTL|z2| zaO*~*mQ}W^80=FE{aI2`uq4E%NxaC9GocH!aau4+i)OD2w#+})Ma&_`DbTiiRP*#5 z>v5^I>yyNMV9v=9?+_e6eDA_Y%N!jbZpKO}D-X$)2C@N%eCi>^Z!WM(;iJ4^fzTBEh91m)t$s!MdRP2EUz*&d~mjC^I6U?V!$aiu`xC#BW zMQ+fpeqib`c|4M2y$m089RQ|)8oq%*CdvJRNsFz%%7U-nyA(87uf4v;=x(Jl;>f-y_LNt{45#uy zW6VGDV?Ww*VP2ESCXmKZ@%fR!OJk0^`&ETgbN8!zKVkMSEUXBzwaEHtZy~8*=foJb zJ_^Acj>OXSoM6id5zyBl&L=4^9|_6*QUzxtW?`GI-zFkjCW}ycNh1Qo!C#5D;>b|6})5{8%&I2B$UW8P=@K+6LBf*{q5}V?+jBk zzf@Y1S}Y%}K45jRqigjvtyt^saav6ZF&v_kE-HjFfBa(tq=~=2@G-EfxVZ=ca0xSr z?8zz0_r+5E{W5Lj;;bYe<>u@PC-jG#8*r8S-wyqKEPt&Pv3Wh5u zzJYO>13)+dyR?<4@QB~H2klM>BeaL1Mcogv2>XkH4yekk8M+(;eKR5q?RQ4ingZgFQ*7IcZK>B>o>Q~c(nmH7f$a0cALKZz zGbt7(H4AlT@*dIm9>MmVavKNoHZOHry52N8T(;^<)Ig_V%hh7TR)sfl3GagVA~gtA zsiomKVFCgIs7Mch6bv z9O(M?MtH5Ezoz7F4ZJq1gOXoFwch|-UAwrz`DQ2Z)SvsN&;8$@`r^V*Pks23h2*2r zbpwl??V+&q^NTjTwwI%{qk-SdS!{*uuQO0j@sJ+=lD0TVxG0AR9y}jtjd(2M(tOEu zKcr5jvQOSRZ7Q)&E{$14%ADYL#|FP(mIZP?cfX@(8y=qy5dp?<^xd$lmzgpQ1p$GQ zFkfI%kd<_|2oY{1-Mf`xDybX|LZ-?WRZsSFay^+f^stYbSL(&QZ?fDl5W|f`vB-eKrKAZ9hBle=2r*4-bJv?6 z`(L5+yOD1`^8Dx_T_yRK3Bm1tVVGr>ijda80#u&Z{A%R2lk>16Yh{1oltusHfj@b^FOU(uy&u*T=^K=<_q%GCo{CB`baKr&+}tB1anNFU z(Vq?SJG;SWO}0;0vAQI(9?J0Pz8C>ah2T0zPZ4_*OxaM%W>uPgXcU6VP)^<7{sQ)J z(J7Z6w?=bTBN9_!Gi_d`h;LZxug_AS$!kJG(de2a4W#@L`>74l#U8rrk>w1jHS{%! z?w#3|RtV7-D|OLBoJfY-KVROFQp0_tU7I8AmA=P(LmKXm&(;{ z3-liYewH^^6*#RH&05X;dXey40q%fqC#pZgnIW>eA~ciVF_|lz2LL7PwoxHVQ`rNL zF38lHUL<5g29trNfJv@x#dw(J%`s@|n=_47U+gpUXJOwPUl}+eF#@n*7jnYRWc}B=KN+I_w9T|2G84Yqrm2G#wrF>-0sM>MAoIH z0t8tiqKSE;(ns02@OAP|j!kw_HKVqERDjoXs_;gZqT5k)aSZ^sg?WPU;2}$|u7^;J zi~QZ}L_Y<;!`TC*{sOx+LwkKMMVZT`gp({oJ&k6guWFuhu7wMX!CfCZqA9&cK|6sROSf*y8g=>Am`pwmmlkTTK3bJ^(-)%v$_ij?AbbjH031kwW){?9(R548Dz|DqYeJyMJjU9Q0l5G4vAGSqNM(oVPK0gvagD;YhC`!qF(O zj9xYXWX38%o^t&7IL*+v##`+Zptg!MLy)U(+n2CRT!(ThyB0=OjA}D@SIdqHWh_A| z*?2t;DbjB_=Yiyr?Ctrt?iD(mXlQ(M@=Vbvs>y6poU(jM)TbASKVJyu0G?d_?x1&@ zvM*#=eTzj37pK_za$+{{d@c^=uAX$@6`fwb6^#~)D{CIZeZ2MBYXO_I&y`G#$RvPIf4X{s?|%bMT!6?0b$i;HW;x>dCW9VZ2E*b;s! zY?4VlH7QCg+QPM}2=kK&RWHz?{*JCimiV`vrG#-~|n_iwLe1N8*c>-uMf+ zBRWQ5ZXsGO5AnDD-XZbh6CqJP88*N(*obf?^Pb$3Z~sWeHcjj7{S8Zf1NY8wW< zFHJ&q459^~S!NY%0}nuFd0)#k75ZN#?*Psae<0C{F>J}(=q*?O_LZIK%ekr(^;h<`Tw;YdIawwCD#Q!d!$OyZtLG2*?s^M}xFN@=s`BVn#W<3!QO8+N zoV^}rDlfEXGOw}OBP0`OMba!zO9WURsbZo`#)5Qfxz0giXs(D+X_h=M*JxjVEyGTO zVTcv&P@ftqr_}Dt=+hvITHg(wfpqrlAcFf(pa4cwt-I$7-x2~n)VZP~#;4n+k8 ziv(}{&QW7OEMQ7sml*gaHP#u3TXA>g5YElXSAk9)CPj=UGTA0B%2nFF6Bara3q{ki zs97Y=>RA5nNuiG z1mI@PfB5_%BZSX>Q5|Ta)4o5@>%jg#yx)Mkv(qtiix6V;L+AD2gYJwzD;ixU;Cm!H zWht~u8t9Al9Iyk87nrqPSj38-54%EtLjoDeY|S1D>zW19AzOHl@+rppA zQ&&LQ4&N)~Qi)haU?l@-x7|83z;_%n-z(7e=8V|M$p1a=T{Tz*%F*bf9uIU7Ixdxx z3k=q-osM8TpAr5z);f7bahof(6Ly$MrXTU{^cF3&K{lrwlG4pDMKwapfP;Ab)g+NrEWA#dF~BmWyL z<~>=Wfnfk?jz?08P9qWp{L2IpJc@12YRj~yFg|L&m+Wl4?$4PLM-?stIh`rzXYFb(9;{nihR8(V&uq~dC5TNe)CwM3MR*-(l(%$BI5!ojyQj==@4z+ zbVp5+F+Xl4^0pY`XTmN4G1{m#3<;~%n~IXo!L&^R*qzQ~zo4t;V>rXU!-0Os(w+k^ z_mh{32d>pK!#Y&UXQMq}Y=}4O2Y9*1kL7A9fb8Ucf!W80|9Ih*e$K_>A7NzOz6QE8 z!W(Ym0tPycgTeJss9)Sy9iWOQ6~CLu0O0&Iny!(5!~FlD0tsY?12R@4V){$GPHp)P z&%*N$EJ{N5jTgTWO9fnl9y}gGdw5yJHLGs!evX>080tFhlbI`$e3ZA4RlI_J^ot$I zNUdVJPv^QY0)05zx|vG;96({DpPQzSv*~tT!gxg4_<%imZ-&1iN0m%ecc$vV1ONGd zj_e1tKSe&@iL8TohkT|R>%ptND{`w%YGh`=I07DLEA<1q>lwf?J%=aUKBbzG{<~aq zgew4nBp|5J!>o1(&s;E|Lhh&0ul5ilzUGDnrdP_~_-E^eb4mi9t>}PC`Od*4le3bK z5S$jOhF+^)&xq`ZgBISVT&M(S81Y91r&19F0jwXW*VNHWSBCb)4Gd!G#Ktl!a|`>} zLL&88uSgo5_fkN#$@yPxPy4-QMFBC45$7HU5STZ^KEJEIb6?smBjrO|GF&mOTM#NDTHuh#J z>$T>$pMjXJp-J9DO`m*d7khYUhm$KIk0Az&iz|vN1uI2B2%m^2wA^o)HHbH zGvYKgWAgTL_;A1Kgu?en2LILiW4iMe@Zukzn3K=tW_hNhn5?f#)i1TrT2%)Fdy4O( z$gi@$VA;P!V8~JpLgw}EOJT}8dE@ag&(4!{6OoMi%=$Uz$+78V&LXiUPrNijO)r*F zC>mo#fJ{7=XS2-*urjp13G;x&#EroW_XY>+5gHCJS+>Q zZ)Ki}i>|X_$n}^U-=919+5xtEX7>9Nm!@|$d=E5ZE7Edsvfgt}y2*@W^jGzUk3M3; zoU*rZ=<523O@X0zf&0cQjEmMHNcmamZWIJ~Ve(1uoh#$;xPl6t2Vr+2!AQx6H_73N zb4`dSEtG>A-`!4#+n%egy{+}w)Fqto*t^lujqEY)ze++&JNHs*TjSZj+UepyfJ4|I zBi6N( zM3^vB*cpDWtEqtu*V2KY&nq>@wfvJc$yC-DN{Giop?uyi4w$ZUG;k3mXsZWgQ;Xz(O34HDU&_>OTlIKjP`O^7PD` zNfY;?KksGR>?vPFaK>$F3LFId8TI*c(sJ6WKK9AE%$_ z+u~8Y3DI-Dn+q6F)-7`pbo0)~x4Nl#1X=uM-O?ercW9PQDD3$4`c{yj``gIuhP?Uj zoZU8D9SM)Ail$44#C1>nflazn=+&^6#d86<3HV&obu)-<^dw29wgsBil@gmMaI1bU9c9YbSEduOanQlgunE0R zr}l#|EESrxj!ZnzCQxvnpZvX@<{eTbZPJBgVXNa83%safJF-{uw12aUl>BnOLIBf$ z0GKo%a@XBFYce7@J%ep%&!E19Fk>bkC{Q;)w4B15y;^5ms$!tFo{CP$qhp}9dtgYi zlOOBl1YAVO)C?Jz4kP4d@qvrhWf}H+umPI}nVP=19hIm+!GKg*OLa0!!^Ec3*f8?P%A0Uqx-Bru9&?P&O}YX;m*ktL7XOp`tyVKv!+ z-Hd4JNtnm>I&b9nV>h$lH#0lS9o@_<^>3c^%~is-Oa&2Rl1 zYsPRfXTA(lo)&~@q4|?& z&6T>(kK;)O&ZzasO1muTT-D>+#me6anI9!GhiZ&LEPl=T4#i6?BId1*NEG$NRA~Pq7#@C)%mS~H{#M|wXf1F!dl`-CfTw+ zR5u?38@J}*hsF2oqR<-%PLjH!d|ZClTS`}|w+uYH@R%_<7x~;D_#=GBEhxe6a zy-sag5i#ky(MgeX#**BTBsxSnNPycJW$?Dh8G0eZlWQMM_75$M;p5dDk+-j#m$_Ec{IS&3T8MO-S4vq_hsxTLvGlju;S3bXpAy`^O zM-#gdF7zUxCLQlMU8vzA0*U_;?fnyjbsC@Buxk;pG zbI*ibd%obybGy)N8K}|17m)Mv%{z++*85-0Rr}v69pasemLt?T>Zh_Y!o(2IA_5qB zNQjQ#RM_}fla=$xzzLO}0l0W3hga-+_{(6$74i+JuT6L6QgkmVOKcoaXykz>fRuYM zO3y2Xqm34(XM`C$&QL5M+)ghCfyE&`NgsLKcObpy|7Nmli6!OC=d|6gh6;1y_p+&I*e8oNb78 zq~7_C`%x?0)T8p%mwYkE!p=yime*Nc~!b2>&D zXoW*HNQYGxz{JaYW_b9@y8PUm+v{3us>E2PsG2GVoo>KXRJUE5bT zry*r}NXis2%JxB#soX(b=4u6WodMsYnca8QZMn1Y+xjg(-*WbV!_DpSN3l&1^GcJe zHDGDw(vZz~hacxr=lQVhSrxcg?5TMH>&k*o*B72py+vqS>baLDV##+PvZq5s(|`t^ zBm@eKKc5=9?iMTEc$fU7f+V_uL$M3q&u>+-rNpz%mDjS7W#PTwZ=9<_#uz5|a>wBp z`Y{Jv$=tJYY_&3$0PMMmoDWK$anIZo(H6e_9ycO*L$3qtiOHY}m@|eurJCTYRNtDB zH0xW-L}}WTwvBQKaa|ORB6Fz>j2gUqsB{aq=Adl{R0;`ad-jq#AP-dUUmf22Ja&mt ze@8Gt2iY}P4@%BiaE6{^)_6en_Ffnc0LF)9Qyi&$eD>Isz5# zG;je?wl%wKWTod_%Mu%-1l>U~m&Kyqkmyp6;4|0gsp3V=9rs(*_t4FMC7>7nL5Jb| zwoAorAnG4xAzG)_j?xUg%BVcXKolT?^VLlFOm_CgO}F@}4kEv*WZ0h0%&4MtdEkL` zloVZ5^WMY&|2g(FB)LM@U^+irT5|83VlB<{*qBc&{dfPmmd8pzW?p*3goK@*)xJ6P zD@=jkIEu+qvFA!lMg~-g{-a(GlhOBZ*G4iSw&FxZTb_42(cLWN18CHjMmTh0-WWB? z$j=66t`@z$3L`ZyMw*VFuhyFFc5BwA!uyg1qUsh7Tl5|RAp5grYa(n~W;NjFwY2?O zgUn(=YgMUf+e%DYI6?WB_9z8@iFsxmKGyolziC0?H8~X?dq6%q*drqtG>&Y$X%%Bm z4He9B!5;Q$lbwljy6GKyhKe^)$K0Zh=F1*{biFwR0VTB_hwps`%8f#6s^*4EBZb+7Vuon?d^*K?%UK71^k<1FTBUAR2~zmHpXzrcYj7 zv+Z~fao0Kql;q|;)R`Ead!k>kqwMVey#k)8{f?Jz>6vh^y~MXsPeTLERj0)MW93Xm zq`x?0btP97KDBQ$zEB7*n(2)9=G&^B$6t>5%|s$&N*YsVdOQDM%=_?0^2UAoT|{2w z!j~7cs*R`v!Q2+h?c~7$H(6>)l5@^Nyp|l6GJ$!Q3u%`8I6*HSlJ|xrU)t+3`E8y* zns_0kB3E6HOVEPM6cr7 zn8n;1lpAC^CO%SNLaxDEFW!rh{?21U;Oerv+g%X$BC7`U~@v9cBGQ}i~jnq!N1tN zfUJiH6SXfY+|K+{Y~6?NM+=@jhLkf`cDhXEDW0Y0E|o7uaHd(EPP%AB)tcRo-g z@I9rD2tZGGfN`9L_2enaYb)WrlKL;h`t8ah9dP3+I}(0B9Dnp|riZBfMuV8BNd@ix zU8T_iusvB~i4x)%|N8v)NPi);%a`_#Ihp^_qnD^Cds7bkIZDxFm18ge_VWKO4;i$m z2AQ;Aoxj}@W8BLf11sH_?Vnz-1yQ#HqoS$*5$Yf|>ZVnJKFR${d%p8W1C__|29lwg zr28MS{FjkPcRq1C+tD7M_B&jR5{@ENc$?F8&e!mj^$ls{{NU^VjHczu(Nf!^`4*qK z$LHt>AI)cPpZqn4043!mj7~v`RiG6s{RfW-)ZD%&{#7=K>^~W5jmuWtL(R?u961KG zu`Hzd6P*&hP)Z(?wQVXdQmgwsgKwfF^xsxRlu&;|(YXR{a>lW|jos=wcoZ7x@+2Pj zh7_BbS=`Hssh#6W=MrJk+m=r{$0bJlz*3lfN~=Vc)A?N<_}ph(b61KhOKmyvNltqk7mMt+e>t!~gwDe0i_xmpv&G ztbbKq>i#{YEZUO(u4F-eXeez!@|xrQ^Nwhs;lIsV@0+z+kZBkw1bE9)Kd+7dZ&IR0 zbs!ZOMDm{}6WiVYr$9AV%3rM^B}GY@l#ruI4ke`Rd(+zfl0c8L-cUJI!x*{1%e2PV zC?9|}V)x&q^jZ}q%~-yVIRAOXhh%9_ieBV3&rA0{lTWig6i~o|Vx^q#MYiEnqed`R zC}yq_sZdrbBatyi>oo7^_1UY3&Lqo2U7RE5UAp@%K8Pm=y~qn^i#8rrd{gq>co!W9 zkpNIJ;g2_GM$h7SN}u!b>tE!K{V?iXWk;?=#MwKYoIm0(`i}P#_ret}U(=XGC(Exd zMIxc+6OQlWOc&vr?6`UwoT~JVpZiZm@FWXzjK9kvteZ2V!ppJIAt~hKyoDpwh!i1-6sj^%d8LLnx z=KN68gP~&p#FuTJH1Xs`^gnKn=?_V$3L5A{xAH{E4T+RigFHI+9?MxigjW=;FR?zjFM#IaF?Pjz7)b~>|tfDj{CWo|Q z7Ud{9X?SY?I+lMt|Nr5$e+T?NA@237a%0aPFimOFJ)`sy=4YRPUH!*PXnbY;p@W6@ zA8!?qg&kxa;9Y*0WHwd#D4_9|*=w%^?{y$qou6vHKVv8!KSLNg+`?=qr8eMC$R?4H z+^K2F`j++8%DdE%$i<*79ywX4PFF^s;D-yE$n+*WV}r-?b-LkFu6H1X$dnd(Q)COn z?P8h#M$Iu3E0-YBmE!FbqikuVzJ{=F(dJAk@nosRPYaz|b#UZGHKF;164mYIV#wHS z@za1=3NanGPD(SSwzTJPtN8Y3U&YLEJ|#PdN2}garbr5u#?fX1OQ{O{NcKC{cY*Nr z0(ZTN?Rs9!%2`HED>p5kbc~_gn4frgRoNYY|dK!e3 z{Ry#RzalV9o>W^03`fYHJVQb!u^Z1 zex*Hg{FM8Kg6Qn6rTp~{h=fn!!=oM=i1kwM%OLpd%KuEG8s+xAfN_fbY0 z{rJT(WReYNDSGUZys@-xU$9`!RzxuzYPL}H{i7(On~9r)I`(0+(R3&1`#6PZadfEU zl#Wz;M;zMa(Mw#CCW6PkihFR2-GBE%Ov+@Q3}~0$w|P&JZm25Gaum$EGf;7e>`?e$ z#T6&UE=Rk}IS%v~QjPD7a1qD%Ef2@e)TAx%FO>5fJ%vzrdo46)e#f{GW+cHl-qD`X zHWkN*^5uu~BT+kDCY*kY;AhqjQBgI&=V3G{jE7#Hy58=E27g)npR)kCVu^4f#;Snk zOT9H0xtb!?WvPb^=bZJ4ru8ngr~|$!07%}7rA>;^O-jCcd5L&Hi|39XV)v&~ytw4? z$G6e!EALKVfr`RTd$?LDrRJAXn2=B6K~nJ|X$o%i^;8j447{b|^rTbK_7TDrd!PV( zfU!Z{Dt{w^LFBH`A&XBDx|Jfv0{!Y}U-2~P2yG&;33nYh;OMBeT zq-)X5EanSm$PEEW`amnbYe?2T~*AUdWkNv)_F6rMREtdhO!rO%B3=Ph%O+H@Pi7WxLk7CmOL8=ZiSNqqr>g zqbE;7^HHILdW@0#dnU$)?s&+5;=I4n-v9aycqWfp+MWr_?{WPTSN?WoBR0V}=J3@f zDFIXJ@~p}PhoXFDk($S_7isTYAF}z6Fy5N%A40YE`Pc;7Bv2EP!DlI5#_kx#De0kC zEoZ_GIBV$;+v3oR(ua6IVAEnkoS%Vn3D^VNSWVpIUz2UcFmYD96w0{zepy{V_rCTO zm?(~wn6{KGj|3h{yxy;4TaFCPFbE?V2q{nG#l+L`Vi5O^T&|pJ-})x@%hCmlQ&c6U z4E?x_h8Q;Wqqa$=uCO=h362$kZRD2ZY@E!sYCv-_^>5+(@CDlxojF3<_lKwAA+yOJ z*_9{r$t;;wg(+s&W8f@dnDm`k#2+ACL#UwXi}v(u zB3?uB*-iH&8vss=MA&Gl=t80t@Rp=m$ea9>!FQ-`C{7ulW8r5_f2Hebo`XmKi9C&I zbstI{wdcf#Y=Pb^J9;_qa%%bt}dt~*mKDUIvc z(Z(&oe(R=Jop5zFLyl_B#7AM4c=JNtrR}pUn^B0=Kl<-6j(qd&Bp;6?FfUb=FOCK_ zmxon~|Bc<2h<~xf`rVF8#RjD*>`pV$!$Ht!uTG%f?4Hj zOGJN4%ST4RTUQF*t{m9hi|}lvgNg39$4eEoznr1et0a=~o>E+z&(cXYyM1sSE5BL)JfMTVN;jJTMd%XvI2-b--WE;F66-N5z!3)WKkRns zTr^Y-4@Klfx_eWXp9+HxFvI&jJsh+Lc^mc-_u8YkMR(!8qq8^d_i)Ej0<==!%Y9pJ zx-CD!olFo)ID?9`dfHZXBSVfKxl9=2eb*WZ7D|2m1q`hIT({l=D`0vb zn%^hFN%*1F695(Toox`m6Y<6&Xj0>2n78(ZPIE|(j?n<3_HMz{YrP2*=O7>&=!G=2 z8$l*0vl+RnXE(oSs7AAKA9Bu5^F~oYqJ9;=XV%PW45VS?6Iz z;0N?I=3Mv;t5)GjSKEvYPP^F|OqUG#%eC1Y(!&X__NSxtYSx1qBzM8OEa;`>c71~v zKVw?0H8<_ws_%}9KqeIgJg|4JrN_Zdf~Ch&RF!mm$k`Z~W1op%cB-7BojpsqYm^Ma zQ5va)9~QfIk8dBXseYp}R^C)-7(_}08br;xA8>prTa56bEL+_%{w5Z-lJjXJ!A4V< z_78h_2z-t(2a)~aqQFJQjqB2Maf+_h|Htn6_l2>RGEKVYD_s@gYyYHS*^dLVP^hGL zdgIOmSV20`^R+vd^Tv*89z@u7e3#;3($k}j*q^-#98a!lUeJf8=A~9#5yMJS87)sP z7X1t@Yl<_luvATh!)J+lB}#BpQj`2h=yxfw-H50Qq=^`X4bK`4m zx77J~9Q#$Z4rY-v2yY4nS> zhi6_oX#yusu+Y!zPktz4uh->y2PJ$wZGYP97`K{0|2_NboXVLW-X^iv6a*f-)w2aX zlgP9-o_346d0kr(5FdMo2uM=c;|_Bplu2SlQ|3ylhbN~k-!)ckcx1>hF5hJq~hc0>=U=dhJ_=rS5{C8mdt-fMK{W>=uc{#q-@@Jx;MT zy>>q}+tS8B(OHMovaot*^AAe-$`9pR&KF2oDRL&EA64)8fmpTljUT4Td%w~#)^P+= zjz1+-Dh3=3_#o8Z)R}KSr;~Igc(c+Y^E%GwLXVz%ua&L?zG>~cBN0Gm&;*%#RNI!FPecAK$BCN2R z#{+*g4v~vZqBWoL%2u3|brz zF~RjV0&_l070n%tY0stU7ht!JHH|yV1_^+weo8Bltk8*}&VF4_mN&{Ee*2}ab4y?B z<$Jk7%JO;M*B)=>(%#^6wji@awT|XaA>V{OS8z$YVxnA_Jz&rGp_T63BaE~2*poF2 zb_wuO>?G|vmUG6ukuZ4`Kk1!uk=$;k(%p4FV|>+yhAK88``0tJqXOAJ>b#A(lk@{Z zICLg%T;VkrL!FU~dW=txZ>@=c#AOUOk@aSh|IoUYJaJN?FCkZR1TbLbu zSgID7UVE#CKEWp^ecM9ywt1{R)~`Fiyxd!uHq|4uYwPYzT_^H2zh{+iiKKnYXP4VC z`&1fPBa(`1vGT`43bI3snPCp*5BPS*8=sl-2eJ{o+NyExy?B==PCh3U*U;e6^PJ@r zS?!8ZU6Rh8g%6l5cD=G}kYp6|SqO!@?*aOo=bkeAZJr%8`vw|>3~$#2fF-I=)9@wg zlM&&%G4^4psOLS#6VR*5bf5g4c!XzpM39UXrQE{f^AXn1$;=&Fubz$Cin~}VaU9a3 zzdl>t+rRLg67;?WgZLkh)nOKw_E%*VY@{#(PeBenJ&Nlsu)Pl>=SH!I@sjr@aK zgjD{}36wL@wT<*}u5|n{2T%V>e1-jxYYf;*jrmKIsAIoEV2-qJ@1TQ1)?wH+0WTW& z@i}@ta0O|XBR4#;0XbXQZe|~IyKak!jPzb$2jgl&Igz0Id=PdSPquq%M977S&)$Q%+d(yDyq4{w69{jHJBLRe;!l| z5a0Z_!jXI5DIN55i8J6vmV|MAUue=vZ*28x?z=Ul?AJSC0gE|}kMH7$<@v6VdU558 z{HvCLUB#&ONLq|zvoGJtlj#Er!v7qDm`w@-$3|1HVz7wo0QytAWN)zOm9aea zYSF)mbXjDAqjb$09Un28cK$HEt0f%ol&u5%-W}DvQs~TI9ANZqrK?H`!%`$EC{ouB z41arlQ%`zW!xj8}M;=9-4vZ||P*u^CSl<#l$#OgaeQUyQ59Syv~0D?;P7 z-S!Wi_^$1DY9g?e|?%-wijH`pNgcwSZH4Ch8T!u5hX31 zTh6>&Dh3v(tm&lT&P;k{J8}ETzIpp01KE|3a#xCkm^<}HJ?SPCMc5|~aAb6n(O5i6 zPaXmf2Z41>#mdq>dyyk=>nE`!U=$BM)9ct$pOF_(5|3<0w~d2}Z+D(Zq=%}yIWAC) z^%lAc4b9%@c$gRHwwW$jCN?N;=nKMgsjmyO#VvsfFb z@l=YmO^}Tqe8YR#W0gn1c>Al}^X~9D6!+{HfFb5E?%Q%R%wHgc_{m~`;R&2{pORiN zeLNC>5^@F2S)VnQvgU@p+Sk$;-ts+D=K|1-bCG{XKCxGh;-k?W2+pqbbx-~xU@^-q z{~l>gUVK{?%`SJLX;@|tzcDx45h93+3(gy-f2H-Q98(}7qy|4l(POUI(pvxwnRz1D zgTUhD4r6hcSh9os4@8AXa<01eSsI$fljuaY90nOl&185#U?trR14VQB%csv8%_qHv&@bd{Q2byIapS{JJ zu~qs3XVAFD6yu2Tu9iju6ERd`i@<@ntq1u~H5 zZaP#={&aEn&1rf277F%C+2kP)wHM6-c&vULE70ob)m_pu=M*+I&${QlQ-_aRddr9M*|KIsKzw<)1PjGWKATrzfCk zPE2Z+#nNcyl|thV%6P%o7~A`n>;XjI$%#C&^ z^-+gmV&t^Rz_jyPHpvBT4H3hbfrva1MS>Y%qB8cIH14VO)5d9y3 z@c#x8GjlyO#a)Z==6zJx89x|0MXoMj-6$=kDvXvb4U5BE{7W6lJw@|C!EAlT68vJA zXWg*n_{##W&;4d<7*!m-D$E&kI&Tcf?M1(|4=GT~9L!eiM?Qk!URL&_BlNlshm@`AJzMb2s2+KJ&bw;f13DmXAJUQp785yhn!yRgRs zweMocgjRc`1ZgYk!@C906`25ad+$qor~pthOhIY5HxfsKFrz-a5uirs9?<->NZZ0c z;82T@`1X6WShFYT1_eNX1}IP0v<0P5N@G%$<~|cP%^7D5Kp#!QMnpI!j^={P{HrD@qq6*J?% zJYI|Os=uN+$a)|a9nbe}BtAD=Cx2L^vR z`7}#Y`q@bnm4o*a1={i=-qBr4uH1lVyAsWkb(;bvyO9#1N9nGJWuciDG6Yc*7fO&( zu`eUOm$YPeH&}s-3Z|NMqlrfi<8Q?+aYok(NL@5iS+U}?} z7|=r0te+aKatb-9uCN>t`IIs75U0zqkmqw9+T}7W!s2%&efLQd*dK`RLjmZTE)6+y zSRl3eD#fSZAnHGIHyaI&*!<0|{piqcDFtv-+pz_)!ILa9A_l#GD!6hnU76G)Q0(8n z;-2vjrI#&wPn+w$2AnC;qawK9gzKZ#HxUoFCQ*C}twpDy$KC_D>+d(IfX}48O?Ism z#=lNHPv&cUl_DOf!D-4fAOmihk=1=GW?j&C0vysMj`_D4ot+lR2lA~ryJM>8qY^Zi zw3Xj+*Ve1xyS%bNmmCV^+FVt-?gseb;T1b;F!xgCp^Dwnbv>qbH^l(&1_sS6^F;@# zYbjmK{-r>oBpXrV%8Oohg=GYTq@c-YOd2FwrSef*9X+QwOMM;sq8%5BSYE55dr#h$ z>&b;ljY@)c%XyvIx4nAq?u4F?J^3QLgsUwRKQZYUYfk-IYW|2uoY4;u?m#AMQOUnM zFtD}tawJP@_gcn7|3b#Y@Inh;!9W-8SH>}A$$T*H7xdIr@U^)fnWQo)&J?b@D?t2MrZ*8&=XBYsVv3iGdKeXPKzOy!C9X#8UDt0{_;BPUe3jCkbh zPRB{W)#MbFxsZg3~t|f`)e0&yomy z38j=!EaT67Ix)3-+Fj$jv=RzZDARTZpkU9Jn^=AU!D2FbxL*fqX=!^;s#rWFfR{VQ zduex~=6ygnC5+=Fy@?9KO$3LL_QT@3He*J}vf1Qzl(g^OyfM{FZvyb!rb)5}Oj8un zI?%$LL8Hx7o_*}jj0-$<+fsgMf7Wv_miS@nTh}(8kVfmS)~cDId?zYiG(BIlF<_~q z(?r)=S52`$d&2Z1LI}Rv-9DAy?gnHw8qzTe8dQ^8%VqX3O)9qBbP%S~?=rleXS{{R zc3-~IL%XyRaJ7(54i5;|iP=lP=IL}MIgPw+mQ*G`qD;K0S*DUCZ>s_O0ST$cSv&tM zm#S2!^BzwcXvOJw>GrI)C(fz!diCcsK(%|3J3S|ZWfbErwvU&L7ggt~$x!gW3^X7@ zgW1#om98yD0x$~M%RDcP&b;h9!`2Vq7>_p^UJvhXTFm&EReFXaO~fmp+{y2%B+055 z=cQr%Y=~kA_2=*D)YYR9W+TY`)PM3 zTBV`_CL_S->9i1z4WUDUgXHA5IgVxcI+fPZNzyGw8(PzDJERO@V*}0Sld(eDU#z@>=4o?>w{!=?fz8F|ru80Jmaz25G#5XSR^-5J zq_oV4@mH_gg^oFIL#@hZIN_`<>MQD4BZ%aN~{M&o88cL#AYeiHjeHZv- zAS(GncV|O3fD%cjDW+q@q^Mi~wFStk`)I*E)Wu6g)8&_G4$E#(1g0!xG31Lhtl<;+ zlVp3LVNZwqKzh>1RZGJ?z3%m5V6#r1`Kf8XW5;)gd?&pfpX?%Y9MLi#u|&SZ1svB> zkz{4@j|QL3;@g(xqj-08dL7Nh!<^n$UoChV>jpIQ3`~Y+=1ZIGWoPj+eKzeA z9Rm+aM(bQJ{ZC|S-jNj@we0MYIc==Gu*l8oekj`wc-_Mm3^!@|`p|Nlw#mK5pv)yZ1?YpLXCj z!wQH|$w+_aj6=CVK7CYrNxQ5p6JXLBasB&Xwt9BY6Q2m}lHo+ie74NuqV$c3*1!QE z$h^k+Pjt>!%?C2k#KADw3PHWrZnA!lJuxu7I2e>layKf;=Gs=PuhYgx8ZwMIr^?TOIn1ZTd||{yrLgB zaB!9cY*sDQuI~!#T#?gq9M`=rgZBJR#JY6$+ZSlUEXemF?=^INmAI#jFj>vkIhR%x zm@3fy<0r)4bvc$IJUk zEYvGyDBh$d9A8+}%VCP_VD^A37Rrb`5n>3yB4U(U1LKOe8>`a(UR*z$RzZ&iP0XzbvMnoWUzF>=Ab=qcZ7vSbZf;_}50{_+Puuty99jdkrBM;E3W>+uwceCw zivkO=lnGY}?~H|0LFLW@CMqFuQ3IIDyBBSw zy~%wEq_;(s3cG#9P6r#tZk+L05}a$UD*cFipqlt8PLLdvAEJ$A?NFGIT9fT}rAz>U zm*4j)dRZ&S4rqC@Vn^kjLJ}+#0-=vw@{ENZ&Ugc&<2TE;rcW)3r5AU>BU(!0j_hHK zw7abUv+t|#u3rRrXX-p1YiUGE_)q4WZXR|n*&0SZXmo|3=~NHo2)Wpf2%Bi&h^iFO$(^=0qjzt>mF>EG{`GHq~WtOC;Y zaCTJhy(-Y_2CqLIvj1)_SUfc;<6k zZ?d0$-cru%NzeI=&Hcs(0~OVru4WxNNqBN?NxNz)!Q$_k4oQgA{!`mW5`YSNwLi_C z=QMO*!%q=faOlm~}bv)2lHS)uw~MH(0#%-PX2!H|KTXi4YuAr)SN9Ib z&EqAve#0d?>|#%?$|?yTU^{DW9eE0yNv)|jgHrQBS1-FdlNUHs!CFivwL^Mx@rGAt ztFyF+=NmyC5nb>eBKs_>ZzWbH)8C{FI?qxO8ZEcN&lCyV`>lIoP41w~Wbo6jgR7ZF z1f9X@cMPc+0*mb<9i_O_)|v+7H2D@G!4B`;PLx-{Bq9Se;46N~$D3#UShFnS>Svh?FBIPq?@6+I6@dLW)plRYNPM(c`;jY!CwFG@x zO?nb2F2;SJP6qE(SLx86St#ib#i(cNd0GyYJO7ANaXImnpQaVx_t@wX*wd&)y|@@E zJAT;YNxunLkniFuG(&l3(()7fuIrn&w?d;I+h1vJge2l-xfU9I5B{5&as|XxgJ*VCK^5%vS6X5hq&z~;b-cz;JeE`UwDBGgh?FLQ7aip5b>kX~DCXXG^$xY2A z)p0MxCocg9QsjCW9q(z;`+iL+8GT%;(7G9B|Bd~lP?b=NX?TZqnOU@F{|F1ii2y(K zOQJa<(@q5v6?XS4+VVF{JO~DHg%OhC!5$zjg`mlPsVn%@4VGwQaAMTt&KPw)u$YV@ zSyAZx%UsQ${?hk6R&0mu z{mT21A8}UE_oAKjGb~PNzHx+^N?%fzEwni}`^K`_id$r@=5dg{3utB+6}JvSRrY#3 z<0bfa{}wA<&w7vOJCjuC-dL59l;{q}QOvw&?JO*=dkH|`7$q>Gf2rk)XuQ0s+t}xI zr?>hf^~I==EJt~`T9^lh_)gvzj9dE~R0KZMUi-lH4q!WaI*chZaJTm4WE zPj=tY&u8CxMe=|pjuD&~$F(nc|0!{gm3~HczsVwI;j%Jc`iD_rR`!s#q3TL?D5)~J zQbBfkIuG>n%J6gTkt3MyvC@Zck$m z&-Pyp8u&gC;bWKmxk4@P>7_WmAs6Otn+c!YbC?NHHIaWya%qyx*fF`j_8Ljgz13?7UjDVP=CT-dUqc+lx!MN!Ka4Tms#Gu+6mV1*u>-v6x@<Ig`2We6563l8b%)JQW7SfbTg0(0=QUMkBW+1(#_D{3 zi7CKJy0+gUmQj!z72qh9?dmY{rF`=!;?njhjn;5LxtA92zLPJtl%LSmZ6i%$g!L<* ziaEjJw3v{x_O)QI!Jg-VwAt#zT2qDoIlA^IV*05Agj6{p$(?k_Y=&ym{59Pe=4EBD2ZG%7D7N z(EFl5SnA;;z*Jtq>8X4<_rey9zb}(IW}W7$ zCz?gr=az7mqA&WPz}?*T+5(7{*9XDNO7^H66ZDF?kghJh;WK&2NVZo)vuFON3~Tgb zgPvhm-BIrq+<6LFq8Dldmj7()D^O<2MI}icI_ASyEK12-k{skJfKIr*{O@As_XNu` zfz7pm%=sRn3%BU`Dk+(G7anHaFS++X^>%zFmEDV*>>t~yd1u|J*A*v1>QxB*p)cL< z%fQ*Xai9E1=)tW#x!I zu01Jta(zwq*S39rSH~|(7F?%19id`m9sa`=C~iTQwOx|saL;6(IHQY}=KWPuDTv!cZSlmz*skW9 zKWu7SAtjFZts!njp$TED*{?^Q3$tLhbEpCgUjU^wP&sSYY5ER@G%8jx0H()SuOx|w ztPQ6;CO^p+V!o6(7Oym0)PLOj6-|_a=CfTqOL?%l=a}V|3({*k``a3WAcbHUNknw; z7gViI#zu7))Vc0~W6llbKOgq4Yf9L>#KGTB0`opm1O^&!O;%IE zO5zt2axmGdrWm@HggBPgU)>Fp&iX)lsYGvJRc!t1XE#6!+%*Y8EVWrC>40VeC6)i# zJoC4tL;U?+rNo0&UKXtHMQ8OWri3!}4mgq0{=7?PcziwtFF#Npo z?fr+H)Ddv1x539O6eJnoboA&yMVibg8j0t-%}1s5(NMvfLJm>g3cF09e#~Rfw7O-d z+{ZCy1Bwl>KfZGDcUm+J}?N+N5Gs<*|EzshG-XJ0q&M5xATVQC#@Qg^& z)`YIOQOz8E2aexe;5~rOh;!m`cI%gdDozoE-I;6nd+N_2-)0&p42s%q5aM4s&||t4 zhFS^S$`iv9Q12rX5UzxNzHjILSp!a)e?uWP`7C z)DN`ZTD4g-qj{Y6mzvaK#puUetI5CR_@Ya6LNgI-@%uLepbeAhEo=t-B8Tz7SQrR!@YR}Sm`X^nqJYn@c_>YNM{B0Nc{}fn% z2CMVT&YLnx!3Qv(p+3)=#h)>ZyhNC}A?@B|osM-WF*WQ7XJI}cvUHqCt|@WW83ply zr&KMle>ig-_!fgSym?)s;7qNaDXmzbCX2NpgETnQ_Xg1G!b%a7Ny}a<^v-VFi*Taq zreI`;W@MM`&$c|I0#y||)#*qKxUv5Te9&~F$oIko_}KX43lI$!U}{A68_#{^rW5?G z3=>BORlqL*1DN8Imjc2eB9)_)gFv^JaL zY*9z|YQ-}u65fvI+2qE}0*)!8tjk~ik$bD%e*wBRVezP2iO72!4NJt!^1ND0`E~nJ z(x@JTQTKU}kn)As4b*K{%}6fwd2!^2qkPPt@b?G!Iv)S*Tw+ot!B%EM?%t$Ri96t7 zLUe|)A?eEn>}$hP+rij9PeNpG|1$KW)4kK% zS?I^u8PZzXea)=0f1#C9WGg;tn^>_wiqXu@taDcFh;i0_SLzu(@)XggbEK72d|v#( zM8b8^@2XCRTx{faaB*p3{^Zu@0V*91d{zCL4&+ht{#g!5WHHbdu&Z@=o9=sohonOw zn@VZ+iYRYR-uGNVtl!xwa?X9v{oe14asTw7o9?}-8yvY{p!1N(7f~_R?z;m&1X{ekB(jMNfqm>bo4814+V}uSb#9Ys=HL|_+nv4 zY5hi4%~R(I^^aVJi;Mamd+@Sj(^S+^ub)%uoeUo;KsSy{yqD&xm`8>8LKb`V$_du^ zFm0HdhS}=bYgiJ zJALYFWpNlL^}H~FWv4!Rejirt5DDRCR1cK5 z=w(^4UR=5J#F$FCuRT#0YesCjXet&huOxfZz<9vcjE7&J(z;+G@1fgdk&acGAJRo` zAt_VZ$W@2PyXpkzgWFZME`jmAJC=~h6XQ4pa@H)xm||i4;Y@u0*J6#TC&15DCCxko z{c8NadyKJf?j4Ue_XSCf=hgDO%W|#PfQsD(mQIJ?hnm*;bP>dMPT(Og z?un1pE9N)HvP@~r|99xIUih^N{W@M#gg+w&1$K>ZwG2uwWrmMw8Lt`9R7bsP5;u}FD^aj7y@|j5wbyQ6d=uc zGqV4eLZFR#sz!2B5e5IwO`;3izYD8!`I96+cj2t3lyB2=69&sNkXm3-Xi`bQiagWo zD%3;9ShHSsKyCKc_LViHW&!t_NOB7pkLy~_dyF{a-aFF<)e*rk)F6e5BdEUB0nQcK ztlXq9D*D~1ck@|}(79P(9tVnp-E7vhEpbpdzP|8z(@hiQA{#{YFV;m{hOi25pEu9IQ?mp<9iz6<~4qF6+_+)+Xa}fIl!T`7@BE*1i`674pNLGwO!}3cA;Ur6K`Y_-6;b)zlNt z?bKCSg7;1wAA(t=(dMx;mGKTk(Z4+E*FV9$iO_x#HD&!HTg%BBEeDK`LpJ?C;(t#C z5`G{DSJyC)6YA1Ug8kVyX3O%m#tK@iyvmj>^N2$F^4EPi~kZP5upnIzAQ z3^lS$*VTioZv}lw9bxGb*hG+Hp^mBU`?8Z=O6__(G`p?~!(8@|PsJ)GTZ8ExNQQFX zg4$jbqk~Eob_XM3>_y41DbI7Ne}Fjzv^A$-AzdGN+1JG0f zx5Xy~{BKYAH5R%T%kdrK*~e6TQh#@<76g9R(^tR=nVjJd z5T&iBW0Y0$_L;v6PaFn%-0()I_tE1-LEMaSQTpX4N-@G!+;;M<&!9eV3N;VZocW7t zU7q9%yLcD|T)APlXp`$%hl|#HgRKnEKU*Gp%eViS*Zo*QCoI^3O8Q>mpQ+~O1BjExlU_$>DgM>?zl=4GFP^zWVis}Q9Pt;`vVXxx=$OIL zEO|8R>p7imKfT2?-c|mkI^D$hxJN}s(__J&w|sxsJpdFe2}R*MjgfA{MDc#RHe zn=Vwhg|x9QE#I4d-Jf9tg{arTKCog-Wp!PWJR=iG{)NE4GxEFUS;wFM&q@qK9UuxZMRaFXOB`l-bV;W*oNM;yL@uXl!ch+yT< zm*1c(!RHGL_7D26fx$uCb{pccCY5RH!NBugH)EcA`(LIH)Cr&$KOtnw{kPIL>Sf+h zTqY*`ojrdrfVY3bx$l9_0!;#@5{6W`74YR?+81p(Ejgk4i#@!YtN8j~?%@0B127?} zUz?o$t7mJ%K)nYzJkNg*pUBG{;f49-Uxstt=cU2==yNbHm$MZEzRIPKXO#kcTBF25 z`d_Aiuq#mH!qfFVMorM)X0yH&9$<}RyqEmv8Le3W3i|tG>Ot}sipwhS&lQNACfUo? zZbE@ew~*mG4Q2`p?nelC{4;6()ztrZsu=0O_b|NR=l_yqeBob~9Xz{+CG9_LYI6XV z-Gc)$H0M8Nz>flGPgHQ?KX3k38skn{nFSyUm{8AS{NINv*!^Wl-Hk?w{!Tl7A)wWb zrrzs4I^tn~;C>A(7#+qInehnV#;Wsq8kH>S@-~BbTk9m7X}7S_YYz z9zpNM;T0dZ(>CX4%5$-Xt~2z<05G2hZw4I0T*Sj(Octm2VjmWNSAAi|%u8*?!CA#@3wA%WU0De}#N>xo;=meZDC-BC#I& zRVw7XfT8kE-Ra5chJNQrAVe*oA`6I|M6fYRMk1Hp;TK1=+wZZ@Nv%5S*8f;pL}c_< z>GY0g>Epx>t+^A*EjuAwMCu7QFX2a#yNP<m^cB3*_e@@V++p+8+eq6?c!@J zDY_=idF{5F=*cu@UvqMXHdCj=i#F&($r74CPw*3Ta{OS#04(eGun(mx4sR-FfkQG{ zHEyzvWiqP!-6P5ot1kFAQ+o$22nh#m6%JfSKH3l#SEWyAn5RXm>4II=GyIiX3hg~H zaN`e*fb4Bz?OE{90j?5RP_qYa%)@Sm^Z z@W5l23N|n#|ErWHLx9pl+G^FT>V)@gRA)lZM9^uye-&`2zyacB-Nh!4okerLgWT^w z-dK*9#CouxNV?>Ig(1uRh2Ao>Xh)z!RfMYY@epr3ysrLJ&(~cQ8m3TVX9iKESQ-M* zjm4;W)6+fTWY!L4#pd9L7OW!vNNe2|C$!f)R^8{#d6^DD}o+^Xfme^yCew;psnCo$;D+tyn}i zA}AkGbp8H-Ylq>^_9m>TGkCCj*y$qin3~X4aJ0bS(7K1C`2*;Q9!#aLp{{mA&z=Q8 zf2pINuM+K`vUsn$S`nP)h3PI!&P2KLW`NKgWu#RQK`Fows^ul@jBuR zuHQ91_n54XzB1pd=_3a~zIuSB|-Sbe5tD{F7?6Qx0 zojjK74=(?@h%m*Nj2}=Lu;S3+JE=bIy5_lr&C-g!Qlf=^(sBq&DSl|-{g(30lNAdH z9J>3gr#~?&e$m4Ye@8sp740zldO+EKPTX);`Gd>5CN%-3b5m4LCIRnBr6x8{V_|o7 zn-vQUn6!hN)r3Y`A*+g;d?jvjUg_Hc%x6;q;~X8nI-bv=z9q}HB6YEQJoDD}Lyy^l zfLrv94`lWMdGGEt*QyiuxJP}G%Sbqd)l7*rh)Xj=<~>oOHloV+QW!ko z==j-hmH0#7_6f8c68_)Ky7;%3g|NSF4F2n3b>P1&gbQz0SlWoiiCP)%g94XfCB=CZ z?=N5ZV;K*Xn1q2t0G>y}5{|$5gF~g35;Yboi8lJj)eVlXH+MPhI3|M8$!r#~UCFG$ z$3mVzK;ROQfO*>ZjB_=ox6;JT$p%aRt_5J%rEU{`{~pOXjX8KK>^3*@J-NcrZi^^R z?sgH6BdSi%bGU^I5sBEu4RsPn&OB)C7yP74dIR#+T8ZfGqh1yAq_bvRJ!ao?gCUxU zC66DcvHdT3bMMUF{8os8&!Nxh{9yOE+6JM2;bjCE^a$9HFuerBi*lsIYqeohnERHJ z$n9vrv%ydz@qU`oQ6VZvNBEi-Y-}?n$^V{Da?T*reCJTWm$7n|Jp8aGMoN6ag;=m7 zjt^pYxX-ZUaXI&PGRM*S@=v1Br|56e%vKPVD!Ld#tL;`U(oq*S3|W|`#Z~h)dCs~& zZ0vwXXqC6D5Mb)tbe-8vOjowvB~b(_Oiv;ndAu_?BFYzj47$q^C&|E+@JjWcz*jc3 zFZ?(n^IDKe!0`=XZgEe1$yGRpWbEyNf$p1Rqwggtlk4%uz( zl0!Srl+BoVKW%@*>Y;VHD>k~~WBM2x01G(I$L?%9{1Pd!+(|XME#Jo9TR6I(b2&#m zMp&$p*)$k<-Y0R2arK=jd2C2hUn-SNO7qsm$!9PvU1{$R0j zT6|53?2ksOL%hO14yf8s+nPSjA_UWT3(Z~XAlWeA&@;fE3yR?zBLVK>0IC4re{(D{ zUK|T=%reK-mKzgdY%lxj`!!*dK4nNNa=+p1A(v<&(>L`@k8T?W@I|{d8Sqp|L9w8 z1!vJ4X=hQq*(T_LP+`FX^~tV5clQU{izywvGC6^sjP`Rb3o)>U*VzZ5t(+LMC@NQo zc1x=4@q(Oh6CZ2ldtzXlC(U`Q27Sw$F@j@oqv}dGf@=QkH_)oLpwhZZ@PeXO-ZgDq z+)K;09nYNv*{?pgvRE*CJQ}FbBtmsIs-T$V0?cQO2e8$$-a!#^$v`XP=qH<}y0==W zI>Kx;;IO>yx{nrdn@4ts70tVl-Zz{5x&jMquK-PL;|BiB5YROa#2;bOFP@zA@9vtv zT$lgc-v9m$LxvtJ{8W7sPf~8F&bM1_pLh)svE;QSQny|a!=1;h+6|)#56S`{EB)>f zWp2-%cQIX&{tuclnd*}peH&|OrYnKJ69EbL7uEg%?+Cq~b3!URAcOidH^=~RajneA8sCp9ukcMH{&J0f`^guw zB_A|aZb+2}VrRqxuLa5 zJhcAx&U&G-gF&n7s$Sjwcb;4YJY4xOQ!(&>TL#AHA+Hl`E8{bq2W7LnwF=tVKyEd_ zm{nM%U+~R%S_l`#|7p~-$nn~l0DcPN_E-AT&;I5DY@8#(kX5x4wgch$ZG{vV8<|IA zJvl2%mR@x!2CjpRD@3=`O})mls~bu-Dx?8xs0?=MK!O5H*pJ@eCvwkbTRycuZ6VNA z4ZaYA)QRM zBLetRE$7?e>9n8Kob490LdfjyP(P+L@x-q@Jd3V4j<2u>@77Q}h8G>qc`IAsxR|VV zl2wT!DNgy}Vbn=lPut9YU@A`BGwLQ-P)6|?8|Ir{*(W{Uw^ZF#9xSOM>$KwDGivuv zH`))&yd^ef%M(xOn2!=v;{SnbU}uAY<4uuu5rLX@!O_d4zuVAkLY461q7l`&$Z9%y z>G?er{M7fl-&e!-jd1D4I%WBg|` z*~|bO8q|Q#zog4?n7WsB#LCbq>a=X7ZEadlR%)A!UFIno4G?7NB8ijcleiS0E=*jU zua_=%J580^DDS-S)&2G=gKJFWVm@6pTN$WRgb}M(JD^?t#K)nkwM%PZO2d_4%@IwJ z5R{hmRsU^)qqKe4>a5PQ;KC3GcbILXnTOMdR-}UNlF`yugT!UavWrkLey^J%T4JWq z;hmoDSJWv!y`Z1GXWAno~pXR;J za+6VfSE+86aE78V_b_?ZQNvf)rxD3zD_pEc_ft=Vibue(cb1Cc8T~xUl&Vm$>j<=K z2cY(GSu?5G&9lY!saj#}tOH7$g|E9B>7Mb}P$h zb92*Z132-*=>Y>_E*YH(s>{*5Nk1{!G7yj@b5%m(CS7}lYGP!a4Uw%iJBkSDAbMS_ zs~5W(%Y->g$g}>8h4Kyn;xrE*|g}pKY8&GMWRPckji|v;!d*TOpeUhY7}Z$%zq~cJK+-LLH#HCv5*? z7iCH((Ob`?G2gj7xrgJH#KoSC^HxqG|IxGeb+!kjm^|IS7J*;$b=c`1y&+zfvMKdV z5m+^b?*&fM7IMf2Jh(4DR9>d8Xz|K3*X2DgWd~zUq!|Cgy$Ifve5ULB$ph=o>4^*9 zb1RCO!#21;F=Pq{!Wlj2BA6F>5`#I{j$f@`G`n7OpI&i#jZ#_geF{gXR$y?fYk{Aw zt*iQS=@C!BB@ev2U*Cu%;9|2@(34aG4zsh#SgrjHhByx2jN>7=pJT4h3`BPDObDOY zh*hG>{Q}s4Hi#XvU36 zd zk%|ub-as$6LmbOvOv=K5w3|TC6s*gExKU`o!}wc1*Rlr+b7kbV0{Yy9qI~*Sj^%y0h4*CEolT$yWq2U#WEw*)@gUqT_GqMe6sUV$A0XVF3;V*5hdX zB-qs@9NAfy+6-K37XWQO=!}0V7fuVoU~#n{I1z(Frf)L@Uy@#h{_(7ND7f!^1Ebwo zs@+~W3DUu^|7mJaYeQ_4&4#hHI8j54dsJ=y5c^@ghA60LJ5Nh6n`-^zIn3D)qlxo} zJwb;IjlNM4tlm8$_TcYkLMms%xQGL0%yz#qi%0LW_y=}aanS<1{9v!>ixj0ryJINC z*I0Hvj&VYypH&I8-nwuQ?Jsx;uApyfWW#Si^T-4{AFQ)+c-22HdKLNKQ0Q-UAzOs& zMasGm)3A8yHmnf+Y@;AKZTZuwLh=lb_<7^BY=BID=22+O{l&F62saVva|#Ll+k_sW_9RAGGzA$-@c#(O?~<>`t5? zGmGzCL-U(!@H4z8CiFT+UnP8UKodOJxFa%>{E4#gXBQ@yva!Wv;dF_vBb*a8!#s18kDUj_r|A+co1Th>w%ej+THHY; zJ(jn@sddXTsYB*U7v~iGidGj-Kpl4NF&~B?mWwqW%7zK#4aI~{dS&9?ICHOS62|k%FfIa6W4lBClPiuro3*hRndwO67Iv7URES?O;!h49_)83(16C8Lz zx8A3=C4P~JljbgRw%2reUh%6`QT&Yglt5{B7o7sJi!&j!%Ys}m5dKlp7Z+8%ULt>s zE@I?&f2bgO^OnMWvt>E^$re$TWYn#pWCac9Eaf_=7Pt>zScxgexoE)Qo|5sMB;~Hj zIOrW_y2D!A`oSf>=;n+)4zx1}EySg{N%Pm}UQ@2|D$*MsComMOV``7jM_3hRsaE-* z-KIRh+JN^>v$0Nf=W8LE<(Uswhtb%-shPk3EP;si#E^5lf`^e&ck9G5;`T84NR6TW z@e+H%Uu_)Lov3x#jyFhBM6@DFIK%C5dZ;qXJ7@i?%iD~GJVw+w)Fxtl(j^W(QBLR9 z`A!D~-s2BeJ9VGC8)OP0t=7v~D?eS*tb0EaVNQYtAX5>Acsq##x zN(h>cpJX)uNrSD(kwk3R0hy`M;{2&4xrh>B2rcw%P_SsiR6-vzNBpqeQW2c0{PEoh zTcc`^BqGj@$+`2_T+;S_+6f$}DeXj$Onw?6GOE_tok7m)N- z7i#~8?uvrPhsQvF=8YI;K{!}Z?Z$h&!>G|s`qLFGez&uaSJ5YMuEeu$`f_ZW^$$@8 z{)=X#H&tp0MDGbGK2h^|2R)%tm9F%HSkLhvYcjMW8TQ1I3N^Q>%Yt6xy6*T1wHD+b zBV=6gyl+v{ks_zo$doj|WxusbLkwOAbJnM=!p>cNaLB~0Uxg)^n>aq*!2sZw{(ie; zANVZ}1Wm;Mz1#h3>l5dJu7TgWh2j-zGoHPHzI=7p`t0-l(toIHT8i0d6QQa&6;9wi zuF?oKvT)e?LEM0EG-1kL7qS(7)#(jr;)I=|o1f#V{flwi(@ z{r|+${M})%F>Paob!SsbpUJkh4}^YYu6))0W=9`JvXC$mo=lXqF4v*;gz-QV=H6Q@ zCTYL9GGAG8CS`-OFhkI&!1LASP**liHK?ogaJRWoU1xi17myKilI*Fi+E?!7pUkYm%_2=&3BV{)r*LB4@{Yw zv_rjfcZT*Fu7oNJj@AJSm@QSpr}@*kI)5*lh`U$JUz*<9dIvYyQvB_!TWF7me@F^C zof@(wRlGYvxp4X@WEOjA+I8pG2rdP;k%6EdIom>ll$*T|nBDWpfMXLn5{$qgtNb1% zqF9OsuRX;NQxvQ3;%pM6V6^+q8~L2kZ-&z0{DV=&hp3Z!S7Mx7olk5~m&K|Y2rP-C zidAX7XmCxoYe-ZcbMNVLuyG`?w_hP*@}}V}bU7vzA^E~n>6Ze|d^2{g^(RqCTrnNZ zI5pQDMTq}Fv@3_&P~zNlIqzr6ACU)J@Ve#;Z<^L2i64wVP0f?2fGzfkF=hD1O>x6~ zfczePjcJfA&U1NR{oLYa9nnc^?Z#O6J@-_=igG@xMMyvuc7d*fa-Ts{n^uBL8dudL zF!|Nfn?e~2uW2A>_$ac4hQL)Gyhb-16dGIp7&tEMv@i*H;m^|~Mx@R2lux>9Z#ZWc zwsWXkdEN)XF52uuLGih`FeKr^vgN8|L=%`XM`{-{NZfuo9W+KO{L)9=uxkGlXqX%d zoqz46BfHGS45tNij6Lya zcq!LdiFL5Ct-dA<)CYRXm4|l2VVF&+L@6nV(WeVJaYD=iMe-&?oJB`C5Vk8 z&$(@$khB+hwofX>9J|1qq+ws7e=uElG{#(-{a|_%;&g=j-9do={uR>mOd3KLngwcG zMvwjHhSfp=4?0ApHY1_rD<3SRDx=9d+bIqEw{Yp2`ph8?}aC)BQ+G*wYK%u>ua3U&c;3 zdF*THGZu^P7AZcPDsf~KZcho~-bHkUw5F);H6pifT{mwh+2l6`utDwC*e?D-VfKtn9V_5JH|403H~~|_NzgBV)auhK=~~Py zK78~RUTR6IAiN)4`mFGHh*SEL2y1BEF`lwHBjK=YrKO_1tozHYr{2S)95MZ>2NXee zyY+f%(Y7jI#~BqxIcFp#k+RiZ@}5k@+2PHIOeEUmb(yfPK@q{-|BI#%%b5scxL)~*cG=r;i;H{9V6o$J0;BAmD z@%0bI!3OSN3h#vaKhIiwyfTWuEU7mw#H+aJg1(GQ5b)wfFSMx zWA%Y>UWqkGg=RZpOlCNUIBf?iJ_{pIbY4cu`|=nKZ+$(#Z$!k$0Y|ufNmtv8{m+2w zKcg=}w3mJ8Cr~(Zxs{706wHo4Y6GU39U5+(QMZX$vjC$#n zaI9~Cz?#2vT-0zrnFZ4DJoOu2Pv(ly_J;XclkrL6>=17>VzzUJ;Pdg>p{P)`l_n+h zjOe;|OHuIWl9Q!Znku%}*RQwBKtW#LTNO*v>9t3bc@cQ*X4S6Fi9KM?DZMmHU=E=C z-@hNU-3rCArJ}?YN2OTnBKzo^W_rdOb};AEq?gw5aX0G#wXl_*!YJ>NilQcCAvYEA zElJvthk4BjKC-qOx|Qs+&|z!%bL-JFpRA)f&oi|_fz!W znFikxa8%2>qGEDb9~+=!0GmZg=giq}i3Rd^*iuHAwFg~>f!d%QavC1odo;6OoTDV= zH|UsQ-s^GiK^C|(n#1t~B%ge=pEY;wtzFqFL8Q%NM)Aww`G6JnE4||&9*qA90G+foYs&7t@-|>uFgXN?V#*EY(-1!J>dw;G7!WybJ<0eu+Mtd8ZR~XYPP10 z8Wk%UqmPJ`BCF;JAEZ_XNB{}lA4+<#P#JGmP6g&1GstJGF#3pFv46a)iP!W_~-9ETBd(wMc&bKLhfrKH1)2$v=Sb z{-y%@frlNs0_q}^D)tg|c3c_%>C{5^4!&UD^voIc_`iJWj|y9PN8`NSYmKO`R(ZB@X;?%Ra3Ra3AV zNZf>!lolgGp{7>@=KtVG8}yArW7g@*W;a|QMbvCS1$BDb=Iawd5%gv93Wr)D0T;px zWBe(`Z*{8-H_QA@*{?ojtiFqW-F)T4=&{h`$~CObx`->pMf1mx#8Hc&ek$vdm14)l z*X49Gfm=qTfIl!fzCWZzo`yT1<{%sPyny;wJmFO_rC^D*T_+%Oc#1?pEWEcwMW1uy zkJ29SZyR|$=qnfK)H0|UX$)FA=jq!Ni`f~1w!ryv&=v8pR6BfWnPw_87Gg@1FDHUs zvIX6mzR(o{d%A!7Xfv~X@8SDAD*#-3khx}k5ggy=spLJntiS2{Ie5f508ba?%EX8Vty4$nttKU>-F z>xnFJ&*|^G#0{lm0QsX8Hi}#F_fS9*tA>s22;+b9R{v>6fs9sJk`Dv4+tn*jRsGEm z9g|QmE}S4Q{Vj*|5U|tm`d=&mnrCrp zUq#ZW+PA&?z|p&v6+54ZCm<_+6)e60m6Y1jeBZCZ4bR zZr^PJ=k9#N$YaU#4eSEdSWwx>$rnaUxYW0c*Sh_cdnnXc>YB;)s0-U3HVvXm8QW)VJ6&1bdWbhN zvYt8zwM?%3F?JvtRX)_1i#N(*y+*Sehe<{X*lXsP4XCtXR1$ex@t34x=?&~vK*H4E7&+L?&&C*6hUx!np_$Y+0 zQ_J?7@n6eDB8<9A&kjl;X0NqHRA^2bpbmQ(B>#!_=O0aM-%RRt5~tJxid>O7SZDkO z@>*)^B^R+4*Xw7)w>iuGpJNAM&B`09YqVx?lZCz7mOzP(owm=!pF8iy4pR?Jh`ywG zT1rFTQaRczJZCh|t$D-Ji`K~f?x#ubUZXE#oHru1k7{%8)bg!1*7YJx=Ty9Ffd2Eg z%8S^h7}cZ3T*)2NUOZB!QiI>)O!Dfd>{7HRwU$IUdqJBMqCpQ=8aM+1Z>o9B42V zZ8hosalGH82Y%E5*6>mq%z7pz&JpqL?|=c!m{+>Q#1l^^dB+|5uW`$>UF5fUOD}A4 z@%8u=7jk>ZBV`qc-9dA_f?|JU`SvfK*eR~T^XMNNjD?L@H|(-bjIJpxpxr!@4({g6 zTakth+g6n;^A!L1q1-Gbe!%EUnTWc|mlg)jB0DRRNeTErtjho7$=~{(0tDz20g-xQ zTym>(m|cYt-kHzZ$-Gr+EdF%bX3)3klP*RyJSg%Ovq4{~v=l2#4Poo%YY=6>I^JDyb_Ca&2!A!&SA0~~s zM0Gy8ve%hL^ZdIylWP%pfKj<7vcIB&criq^5^PSdN z${+>J*QXFRH5QZhu579^XEo$u2Bhq8=*&X9O8m|`_hayAY>bbz^Pqa*aoO_to zO0c&ubh2&)Ai*1pXH9|zJ7P4Mx?vj1dOapzJ;76*qKdS5%fZPSie zAA9+_jQfi3gqvOajkAYA=!gXCK}TnW}2tV$lOb-BXrk>M?f8qd6<%kW=C^-YehvPOCg>~xQ&kc@(2fa*e;b%K3i9A3`1AtO)I{=S}Ql*ZM=tT1LkVx@{{9lOtgcurMFHRh&Id{s}A!X`A445O=?d+h-%-j-n?&cP9evHh2I; z1^revA9}z?9tz7{m-4-KTN^t3p4ZoW>_T}rs3Z$-*nTI}PbMzYDx;Bg}m0w_xsBV&J!g-l)h(l92)Ck@`j70AOP z>4VcCQ|I3)v1N^|xmJ8m&RrYU`;xRo4^wZFJqAyaY(|Wx zFB2A)&$dF9WK;yYo_fyzfP1brGs(3_*VprzH(eljA@(FWt`!9UpAwgF8Cq+Fm zKNG#v*Cp0NIyA{h{a)<#_&(oju2Yv#|2%IzTb|qXwxppYh8tfyLf(1s4|*{3i|u%x zb3i0_ArxChyd5t$W2|(Jm~%tMf~2z|>bj-Xs@e?)0{~LeHU}sZh;u8m+x0PFYqjfS z>p|Y?tN@{x#oL<^V0EU)FyFb>y5~x8TME#_x#2W$Wwc_^kLa$JAl5HYDLVyGL~&W+ zH7YzGJ3tRGG?^(}7V5*l()Z=JcwKx&!!G4u;S@-5v+c}&W13>WyLzjN@|80B=3@4l zY<4&2q(0wa#jb^lWdY3HGesNv=9sYwfwcPPDxX87ZtlwO$e7peOByVyUKZXxhk{T~ zHenWZ)td~pD07~8Y>za*A~cR$Jl`)et-y>5u0=Bj4E#x(_0IJ{AsqTO*mr)AWedb; zH|Ym5OQ1_KwVIM(Kl~!N`8{{JgqMujVSO>3qONK?xhL_A%?0TxGNS6;o#W**6SjkzDkWcYFK!R-20m5-^nDXu(i9F zz5BoM0I)}iWYj1=sWlriKmB;nHavfto_%qUBVeT~*F)88df)M7)ya*xq3Xj&Q&Jaj z$l?^lmu6K(*e%kfhYqkq7EoShN7Xa&W9LCNqsba6qtS3_zcntCwz&dLgIIC3wc>FB z7X^%D?k$e6b%dX>WMVO!bsVOf-hd;%c?VxZQ2*`2*6h8Q77*toJfH5#x;|ID;iCTK zTS|_)V5YKw&A<8G+|ZYQ)ZNxh+hKATqlSoFjer6|1Vu$f)fGuB)Uh8InuYcf0cAMQ z2>iDQsLfQ0qzT&X&9vPTjwPQxN-=Firgzwe$wu~K`{@YHiz!)!@u zdR>D5)8Gi*X8Y6iTE8sJ?hmrdk$70_m{GS(p#-W-$rXOx< zHu#{{uP3?2yn39!lG;s2q&unTK6Q5J;cVFJIU*K&Ur|}A93MQren$;<0{z&t?{5#A zbKd8*&ymd~IFW~(@j_B`26-MoO?M833$Xh>bT>whIo~;JBe{!OgS-2W_#1~GQL>1~ z)d^dQQo-}(YE;e&;F_sy2WH<95#cpZnX>!Fja_+#EJt0GN2Wq{Lff5@dNh?7PzeTv zYrT0QU>eP=Yx%=cTh!Rtmz(dtvMyX=@^Hr{iM_YKxam`@$*Zy)s|MtM+rd#kOB@rv z=vS$|lF1Y&J(o#ZXtwv`4%>L6$aIlkP;_f(qp_MTuvL0@oy%=OJ%DmUw8O<4Gv;2e zJ}eN6t3AntVeS+p-;ls=3-5J}87Emn3iq5a2S6s9x3z$Iyqd;uCF+ItSBZjwleIMz z8PsFvYlBA=EQqDT%B|rDeq?|>Np*n@hjq5F(d@t$+s7veaO0!0xYe4pd?mg?6=L z>F-eSeQv6S7PAGkijXC3E4Bn^@*KT@V`Li^K=B=(?@AKXzv8agz!>V@r zei{d7&LYn_jk4;I6k$tKZX{xcp504W;qu-{_kh$-k`cDVEKKhy8_T!(Veha2uLH9hq>H(tigzTI~tixAyr|M@5IH1}l5X_F4sh@Qdz8x9=T zrY+e7g2F|WiN;J*{+sC3qsCZI+_vhY4*4pb!{19=E=p}a3Ylj!Zmw=yCg}S^D_JIz zYxtOl^7Lgw5>Qx$`Nf`uWmr!`lDheckB^#K1T4UbDh9V9bh{sC)#>a(&jCxTohb@S zUOw;GxJVI}gNCgS14Q4Noqq4xuty}g35T|D?FqO#qE$u@U2m!F_Deg>Y=dq&zPXzW zF&((QAA_xeu*0nH9o5hiQbx6k&oaT)pI&wE>D~PTTku*JX|PZFtT(tzu)&;KB6*_q zDBN6s$osI2PA6*(!*C$Nh56j<;;P|iGirS?&DKiT$|?TFy{C82WK7vHt32aSbr$sV#fgp zdS4J8j?CE-R;>?B3PH03H;w9O8=ZR|C!%VEtqI3Gkw+Bnqe(P8ErpNAUp9;Yve@W~ zJE9oQp*kr2-iYJ3nD1UYEflLW<9iMyhXJ*dFG14aY^~C{1%R-nnHNg?sMb z5M3ILgbLs??+>z`GMd|le`B-%eaE7&7Ws!djY}+lNP>izmT3N8&^d;9+c<#vsZ^Hn zy4-!ukK+4{iOxkEL)cxBBVx9pF?oEg*+6^&$xu&;2Wer$vgNTZ7td6`=WM+x{{rQy zLt4tBI9Q<74|vwZtEwQmvMBW1_67e283{65iLnb-YLx`Hs_m@Y-3HVAOQ-0OcS8+Xf} zOhVrxQvh@yAtZGT0Q~yyIOMN|V_#mvNINI4Nr5)wI@{(%Ns z#;lcdjrRESJeHPwW}uXbYKyfuzdiES8Z1tKC3Qx?7X3tq6o^&?v{^NG0`2vs3GMKW zx9cwhNncn;U60AzE5=xCqvUv%Tl(j+e6Kb{Ar;%f;3p@3E9(?Ss-iQj1@0=~+^wW# z+~aQ6=df@5(+s_{+8~0!RqA-p2xY2>NF*1c^_we_ zB~liFFvJmLi`;W>J{PVTqGmr!K*I)wFt6FwvexFNjpp^|`woAEV}%q+t1mP*@93G* zNSEqzT3pJ*Q<#hG?TV4AHju&@Q4i^QN`2>ULIrnI=PLgH@bwl@Rdrq4uyjdxAG$@P zyF=-2P>>Lil+FVx-6btu(k4_iqY(Y3|*rKiu-+dfyNP4o08KTh(hoFX|$5S0y3@6WF;pMhz3ZX$R? zb60f=cX*m_*2%1e7S86rAN%?0m|3pnUnduC3}itQzCaMpaj=Cec^$*sDEdLo$@6OQ zv0nYI)EBmwaEHBua&qvOixc;jm%}T^2=t7ClXac)L1d;My`JOSz0LK~?~o=5JXfbON+O+eo^ z|M9f|m&N)}7PQE>rz>h6VW|h%^Qj*#mCu2|CEhB35KM3@(|wfzXIYuF26g6&?4us3 z!Jt=}M+qtd+fk3CIv&FOu_q`T6HQ?K4spE9cJl7PbV=QL;=BiSMT_w zw-Ktob1g*v$(R&R&KU6omKZOT(Hk-vu|KyupE==bhJV0%J*^WXoXUc>LJ&i6K6ODZ56Y};YLR! zl}h|o3_w+Pwe9tdx7?jOcht}&9Z0#bs~49{eOG_GTBNxYLZ3_(YTv%(vW;vVxzjt_ zJeOx|_3`X1-bU#gG-(t~2nvwuNgDi-g`6R5<)M>K|{u!+2E?@@%_Ua^VFy&bmkp>>>+BdDd-zDoKHzC`YILW=fSil_C$X0VoZix z)3Pbt!h(03XCD&!MjUn(I8@_`}3tkvaLG6}+y*@oC9x9}y9@{`pP^di}iGke}(sb87}2(6F$~G21=Nb2=wEqPd(bWs>1} zsTV>4!qcSUCA5+w>52R{ZQVZ zf$%R}kQLVN6BmH{tN?zpJl37#t$V!kryXeoqROy%`7jZ>D)$dh6 z`blAYOqK8^Mb!w7H`YHUC_0ma-=!iirSpIffDuDA#_fwAKx@Bjs;gsKDy{N=={fy@ zhVs`vvy&UYWO} zWf|7xcD-nOWDofH)4_b2QjtHtl9#%+6Je=3Qz%z`2$H-^+Mh@ZgzW+HQ$EhEKTLn2 z>)!S|Ug~;1BMz*`d$1+lk9+hqL02y>j;_?yT6ZQh?jcN`-Rltku1Rfnnr4Zo0!C;^ zSv>XDtBi-8ID;`@lo2J_`bXvi$rZlv7>oQJLUs};Gkel+!+I3)Nj7wiiYmKhb zPRnIPrLvwKH!C|@w{^Zd=uM!oW;e7{(M5=E)EyK!wDJ0xY=*l~4d2GTpkYe*V&hE62ZZnt*Kcyzpfcu`{3!THj?|1j9Mn10KXE+CR$#38ckO+s&--=cK%_K4mP7j z%mxp4?*Vq09|sy#9ryml3z@U<9<8l-d>dKXSY4b<*}f*{T<)`bYi=svJ1LQCuZxuJ zY+FL9DUKPCKGQ@zIXn^Gmp{;Z(Zug>-k_#AGQZ#a=p9|b9lXZd8Agb`9Me-IaWt2b zTyy3=gu`e&)}$&0=Cv`4G*3GvqZgx`u9JT8{R-auyin zW&~(>>+xy*_cr0m8CDzE+UId)74*7?i66>V_Z>55k}JxCAJGjCrQ#YwI~z&kZwi`D zEEUFyV#IK#Lwbbl1NlsCwsgLd(t5M(t?Of7jqMYyH>ISQM%Kwe8Q$JgQhskAH@%tu}6WDoDuO;)>sIFphh?EcAK zK31a6y@vj+OS3Vi7sBq*iEb++N^m-_i?8GY^Cv?{XzeSV>Kz_=eaN9DI*2#0G~9{v zX<%fdgr77(f$1_&0HG-#70r!ZUO*$4jOXsqpqGKHDYGR+R2Pj2;UzlBAJUMH!- zL81>h{N&uuEKkCtY;men@$C82-qnZ8(4t*In-8+xDC@aDP+Iaf{A2u_{A$9ca}G?) z%(q_8$JWNwjGOo~!d3Wm4xKrrw&qs2poftvPLFc|_40YdQ^62u~dM8W^UM9v z_F`T@A`N>b-wi=YYB1*cHJm~N?!>OW0s{QneDAzn0<;t_Tlbjjs{%qk?c_Ut-kxdK zJ<#x-qCQHSPu-pEH(#n{FTH{UfyUPo%gx$sLmAccW2Qs%@9y#TdGG>u^qH90xNX?w z!x^-`uQp z{O7N-9&~COV@x@F6n))OzAeUkTX816TM;iockjwI(HC%X`C*;$46Wj5WW8&t%35^o zqVFIaaWe_cuyZ=PnnJZeLTj=l%nn|!*qgQy;a;ru<%hhFdkE9wD#&jSzI^$o7l2Qb zq^PjrxVzr7LF#nu;9Xf&+n~E?hZPC-WYVM&m4h+syM6lR1At%e7%uU{!F1K}KqMZD z8<8*NC*EpiNstW+O)oYP5J&WvlD)&aikpw zuMG#6vKMN*Ia$B(!RhVuPUJLP$tZOcFEL{nVV2}$%;rGTtI9mwxrL!mMf+V%RGYLa zEJjky&KWj2|CEG6+b05A|Kh3pcYPgZt?$kSVZOh8dFn-v;68U*)`cGY$vVns4~kcw zFIOhZmriUKf2&c_S86i>Kr?I0xDaB=;-#n6T|6r@>c~3zhGQqAw;RwQ78=d%tB!#< zJ?u;Ej#U#pY?G{2ruIxq=75_e8b+o#()_C#-pVr+Y3kI3?{i0%k+I1@gN-6IG|C8S zSRooPP$iOHC9i*UMK*Rp2E(iR#jCO6TkMxnfXlA4xG%7X$HZSFey2&H|LQGios{m+ z^rX!|<~?Sm=GRw15#^YfvKU=f?iJSfGw5-rauJ?u-bKq1Gd%trZ*yeYsrVkjr9jv< zUR65OG6Oupa^p+STar(i9#eV)^7B|4{5R}j`opu@^9e(aQ5Zvcq((Db+I;}EmWI_hH&a2BS>CWZ zpZ`5CK%3Bp(H-CTunzDH4?8m2u9NVVJ3RYh;wDuwfBbEuEH?FUQ2b{Ob9iuHNxOYS zO}5RzC9?F&hrfd(x6kMXQ8qIgLuos;{kvDyWWakatL24!%vb?~(i6klNRK zi(ILG_~6u-^_~nv`8Dh*rX=6(6czN%w!3w<*?+Hr^T7zXgay*2=qNGyB2AI;mRhr< zO@^?iXwr;uf6$a7Jcz)_eN=Xw!eph@iGKOQ7j~WFl49po;48*3y`qjPp~hW6)iI-F zFoo)5`a*L#mcTio#pBq)X53x}@~9|h(vD@PcN%i+wRFxyQeOi41NudoQmk3AqoFPS zKq?S7SeN>cS=do9O1Wid3T-mR;Vjo8c;e0#``UU^=m(FhdZ@Tl-&68wUarCXnr2C*HYD?T7)j(8SP!JbNK zq3LLEJKzC+%#7Y#%VwwWb$#q$tmInA92C%zeWZrc-{CbdLmqC>lo3zoTcCm(2r%N| z`J20c14bMZVq<{s%{)%C_x?;R#wu(Z&~K#lZTR3vBAt?LgkT={AII&>?n4>URrl99 zzV(BS(m5d&PHC;*)e)3mXu@w$vxZG$Iy+^w0~4ha*_@MJ_`tTf#gT~D4_-wLl6-`h zMG;1URtC~Ibf#Uc4g|FuFCmb<9aLEgC%J^s)e3^r$RE+t*6{E89>??2xKRpfO+(%n z1IO?p(8`1la z3HkF%1vLhh7xd)O_M_EFg+Sfd2jD$3puhwHm~=wFx^ zVvPQo#{X61yYdoR2lRA1#p@qSNQe>?J*3Vl3ws)QvM-bL`&((~P^#ejW_(3&A=3EN ze~P9M5x`or_Jx^CdcwqEnHv~?<~aZnx{Q5OZL*=s8S{GC8&f%Zg5uBgR$~R=BG{?C z#{@SNHBc|R!%Bdj$gpPrPX2!x7KS<^ECRx6XW5{Oor}EM2m9K&gzxWBI);^>zwQ(j z(z{;={duPCb1=8f(#pwNw~L@9^CQ3e0^6524FB9Qg8^LY+5Kx-+WFY(X4`yDd?yYv zfuLP%8Kpb^j-eP7Q6_Hy=Q$yqG6n;gL_#nDb%dh9ohA=2FNN3r_2>`hv6KLaEChUK zcKk0D(toiX9yk!KNUxQx^k-NY&>E14IwRgJ{ht{SYheVeE8R{?;@{d5=(7o|%;Zi) z*YA4{0OGau+`j=40b+u|B$816z3Tr~Wn=I{MA1GO;lr0dBH}>?nN8PZvz_cef8c@0 zGHJW=o8ReR4>`!lzJ1EJf{d)iAJ`-J{wL9yz=Qhf{|CbMzeED~8gS;719v4iQB2{` z`=hurK)@B!tQi_J~_v>LQ0RO`b6PQz{hrTg) zev>W!&x-u#bFMvr*LghSogP#DGjrGHkk{E623P)l_Y5@1gHs%6Sa1Q+KZ8VbhOZW6 zhk*wL+(QO=GpzjHCLNg0JHn{{AKUda1jt8`5lQ;bP)HO&<%sJx!tP*#iFyvCU;GW> zD&PN1A)r|>5lciKy`Nx=juA*{#vR-7ZkS**@W7rxtwhx=egmB&Z7d+*A#srIc}ynEO(<4%dGn7~NNcLL+gC{kW12*$!HDDiObBwE^;u2KM&Jv><3#d9k>E zhX0>4i2z%uLQ7LHQZ`6RX|0Qln-v!ry;p%q5)(5k)cDO-< z7&yKEW%>sON7U!@wy^UGhTU=2oXeTRcX1H`!rQa56`qr4e8x%#Z8Fg_y;k)=X+OkR zsKD^NjEmk@$$j?I4L^h~s8RO;APM_F$X$y84B6nVQ%FuIX>Qk1z4SN!5en6?*U znNe4Zv!k$3ZorRzYYcnH4n+fwV^{z5dn**6493iTH+${hW-;IvGTj%$>F58S_9Dv$zSZa}&vqiE+LL=Sr)9-QPx6-628T5pQ@L?!vbBq0IPV4}Apd z@t|-3HWS``7x&$EGG%#j_!410M|ryxpgU?XW8VK;e+Xa;di>skLxGht zfAs__WWLY?ixJnwDXHfF%*%=@W~nCV>H146o0r}iR^CA*+`dKQjT)%ho4jeIBmhe^ zSmaSPwDfqISlQT5_nHFw2EC5fOWT`3S8=(e`pA;V%$Q(24|aE?$|S*bpV!Y~9WRaI z1N2%kiZgloSZ&qy@{`XXMq*9f*~JH2{tUk| z{@@$Mf*~x;MlGMZFhi#O4`2sl6tYHFi=H@JWCC4z>gXO7u+9xlUI#FsGeUn`=ZEF&gv;+RRuih zbxM}~KHC`?+h=FEJZ>6RE|%igmn_qmDSm(d)j4fvQE*X-L;ua1UHL!94TutHhBBi0 zVw*QizqusQad!&lzG>r|eYjcoUStw6gX_0m}oJ9vc64;JfT<@#yI9@D!hy2LLqB0))yvwtNgT%H_Pp_b=pIHsN z@@uiX_9;f#IlT!+vnLjcRCY`14WnZ2B$Q_9@rvZ9b9-?ji|L@u_{o!kxyT6Gf)35*v3Tw7OG5v)sq}h8lKHY(=D^kW zX|!3c7c1laWu>{QK(0|%f2GMzPuV-!c`=}(pf;g$jrvztSRm#_5JSAPwYN(NlF-yu z+eZQSi$u*6VtwJ>?b{uh>ERD^r(OA2)|ofYBqk zT%kEm?K?Hkf3V?s@@h9eQN~>-i+4gNjQ)(8caL$CoBvXeo@)H5`<2XvQ%;1ufAQO? zoM;{gL7BnCjm>1}6brZSYGh|@=JJ5K2n5hMoXLwv-Q92Qhsz%(K}(Wy_bLk?~&AK31zuR&j{83J=m9P$s0tPqk3 zxd#I5Gk)_EKaI%L=vRK69-oaWtkoDDX|{oC3-a zb2#&SCm1IbA82yUgF1%z$L9DiW)S!o15p-~R&ghA;ira&1)H2r?(>OgyZ}c57T$R7 zGWy%8q_$eG*|n(j(eUGga8C>+rqkuwR8oscV>Yl5JYF)-H>%w8si_k*NK_O%}Y*JrF=dfoW3o% zfMF>h{Bq{$MBX~EhR;^-hNMP;dH++E`?#~q)An@~#;i;wPP~w1_yTGd3)X>amcm!g zi#`gpk83fGb8o0ai4mlArdukwZIqllUnJLFi3GBpW_jS3a($N&iWxF*=229q^!~@3 z@Bl#p)!59nk6tGRNfvWw#`^ESAEj!(jxlf_lM-+{K|Ul@0ik$0H%f@P;5G8oDSjpu zo@nD(9>4D2;y8?O5h6uGsajd2ku;LC(s&ct%s)6^6lMbD2?WtFM8qEEsb_}c+2G>5 z)d|at35A^L4tc|EnGmW^jK`NKKMfB~r24-(9&oi0a{F3wo=DoviuTb$ zHrCYjlkkOiG={`d3dzN`+YF z21CaN?Fo=>0AWw2V_XmrD6R~N@M_X2jt>^f@b*J_>j)>0^8-{AZ3qhMRS2~}bFRd> z0?tB(bsl%}j)PBhRnyddGqmQRfEd{;#MdbkNLSkqzF!XSwObt3nD%|QSeL)|9y?Nd zG?xgW+eiBtMF?`LM93^}bvWAA>&6Th-FA1E)-JmrGYYs} zBD+>(No-EM#_47Ui}pwy=&|Qne{ zDVf%uNaYu|-ykxK@REBB_hM0$QLw@0Ca=*2Jqhz&?)4pB7Z@3AU|jinJ-6YBBkAm6 zv5@LMrH`jGuD!#W15-wzhlcI{F?qlNlt2S4ud`U)zQso6p#cKJR#ncuJJLrj+uI`khD{3DP#teKRI!uU6==jcT zF#JZ$?y6N5BmR=~wyx;~qh7xd(?iUeC#thqh^1bX#2q|Xs3!MtUB0hKA7ve(_@w(s zqgg)7&~@WGBr0Mj8#6BK#JKW$)SqdG=l5?3>h|{@D+-^k54vMtHYry5n*T}&H7yEE zhsG*+C^LYA9=OZ!Ef}71Ft#+Dr4c?_=`7d>^obO_{lr#B+k}ZfD+MuVx*cJ=I8 zH>Pp>R5bvqaW<^|g681Fum(;j>i?c6OH`5Lzm+%wcTBLkHXVKEeMNI;Tk0%GO_6T5 z$dIHo!W(OUx7AR_up)}!vu(^0|Fkdc-&Ba!7HdiaG{Qxt`CL6fRg#GSM#t=_H&Fn$ zQtPpPTYMQdtKBr8#y=o&^ikTT;}DzsSbFO;&!D3dvFJ+9a6{~A9^ar(@?kfnJ8#Z8 z0jrz!Ko>>ZTmwU4zk2C9zA6Bh3BLNd&D&3j+_*gzK95OA^JRi7Lyvq3Kj!ECGS#|qjAVb=?x^6?Oq6FE6iF7ls2L)g_B z4gTyoPK8-v8^Ul+H_~u=H=#DB3tdb#erf4_!~GNPTc?jzt5H)UHU8ATC1Oypt>K0R zO(qJ_1l~XReC?RfG9}@#25Xq#G5h3ex#T*5fIS;4uAkWh%wKiutsd(Da#D6=)Z=juP5-cOQ-g#U{>fY}?J&HHYS}HT^ zGhQz?g@*)Uy>)0uc9HXKSYX%8w4KAz+R&f+q0Jm*wslz{C--m0j*j0iY;FS zbE5yxkTJE#P}v_lh2WUv0h3lI07n@6?U9l1EJPS_zz>i?VCAqP{MMeq%9I$GGaRMr9&5KZ0DF4#m05Xc$(_MRq<(Er=!8s zFgLBgT?%wDLOL4nWqf}ES;bt;c+%vU<%GqCo_D!7mAyq6lJ0P5k*5hJ-3LF2~x3+qP88wsRf>^3pkSfAdZ7 zB+hoL^#k137wy!Eyy-dyXR|OY;o~I_+eGGU#!1K$-=>?OgG@N_Pxd^@L&zYgPT>Z9 zpl^g5zE4LhlPlj4ZSJKD&&weY*0j-S)H>qRRrn-MCEc)LW(SZ1*SwS==$=d6Ikl%M_()~uIg9cysR?r7Cv!TVecX|^Iq z)_1U-Xq!?Tr{}400|$P1-XCJy`rm#Dxp6faS@6`_bSJ}ZqUCnOy4vH4IqUgsI3Ldi z5BrT&f)&8nyp}LP0dKzsfwRGvg z7Cn5{9*7IhO#AD~a!d5^hoM`xqEr(*6o7aKbar{JxGLYW0&Un3qhF{u2`87;ha|k7 z_#Cc^<9k#%rQ%&ZT-Of+U*q=cXqo$0eS++9MA|CYtb}j_!W=wp^AtVTte(f+$QwEk z&kxK&rLsl%(GcPFS^U(U%A|mjMqVdWTz%mn+D_t6SFKd3#bJ1u+gXcyuNK%lE9iN6 zIZp7>jJ4@UDmu^@&J=ow-yI!qU%)RADA$>6`}&L~G^yt7ai~gQq_Jt$0Sq*hS2W1@ zwDf$&l^J6ax&H0_^hS(qz0iy86BL{SKEvk9pK|Ls2-39GYIl`9o__;ObJZ6;>gT!D z;!##F|XlUGWumoMIpZ>aipv#-|rXd?=rV$tyC$ryi&LtEj zjs=BELm zZDCLQ&ye__2%-cVM9X#Grgb4UM-wReUJ7y_8cp8p@hnx--rFFgk6Mu5t5KsPWj^{Z zKPWO>T~qEnA4*5nxXHUuU1xmCmPT7wh=kq2;(KS&;u^x|f()cajQk`Mnf^#3;086X zkI}ZS>r!N^9Vre9Qh^Ew!g8kOC^}Nek@M{2G&$8D!NvG70XZ5QU zB1;@8R0ubH0QN@HFZKJt_M}W22h`=G+BC-=LJ?3U*X;4c*7sno4>EN41)p4tX$1h9 zVcPh(FVEGTdjM}*ZqXCjzhtpn94plR*GcJi|C^pjzQ7^VvoRpB#Pq!IqYjX>s=egc zyq)H7j@*18x0jE}X0uyM7j5Z3yks-(0Oj6(+ii+A+_TJ=93|0!QLKGr$t^AviJkt$ z5@w0xgQEDSdqI2GSk8YFEb!WTUNGudtGysQp0 zf3~F-h`XAWb4O2i(cDz{85wTCj9+Wt0S{<8k%+Xb4e}0O;&*%#+ktfEKKZQv=N^vB zQ6Sb{g3Ip^H(nhx0MRNQDAhafSQla6-G3{xo-HJ-4y&SmUNbHgztjqXbF-`8H(YvR0PN*5rQ@$4qF6Q?kYikJA z%*CgWf~^Q)G0+^4Nb{%^?j&#Y1F{9%0ol<1Dv985$rW+f)8=Bn&&|~-9Y-v(t@Gmx zn9SW*4v}nx>2m z9l2sPwLl_dpv(UI^`InLQ4f+v;*jW%mQ-B49YWOj^9>bKAiD(~L~~UbJ0qeFpOEM; z=Tr|?*g$A__f!4c=smN{)gPMzQ~>eWy}x{3LSS81U)-s=1vS`biR0GDQS&r?iG|X9 z2}GWZM$L$G=g71%;yaqA#@G|TmnPsNin@{fP@-S>gqf%&?hJ|v+icWx^QAh!(U`_{f2u&F%3KX^>}}Sct2;)3|C*c zdF-NnEvlqw?9)f}Y|CI(CuDm16)VIMjv|k#rHZ~HVXx$w>k7#arE85@!+s(Xi0el} z9%ve4w)B9SYjpt{m()6T`o}#@BB<19G zZ;NYJS8z9KhuM|(9~v2=Xo3*%dUOl>Q_E>e09_ZE2IxzwC*Gia79eN$URZog=-;Ij z1IZ%*=paxGv?Uopmk-)%InIHB$T|d8a!I=-S}dJ9yMu5f5pQ@OrzO zRpBbFp@j;IGK)sD33k1qV~6WOECh4?6$>GVPU7%pD0=ciRq4q8!GLo>yH_0RAE^>?5^d~b2(E*MTk7g z>=D_zF)g?ppW^b_oV15o*07ZsVNN)6@-UdDRAaqf-c*MhNsBt;UOjE6+jYTPk^O@f z22#-Z5c7HZz_`rT&R#KXGKZuOC|(g_y0^WT!qD*4S2ttRE#`3}tQ({Do7c_NU+PV3 z#_jq6SD#`z%CogsZO*EL=g>LA2e0Wm&SL1{5{ge%BPtw0wZMZMt1-6P$58 zr0wE*@$S_skY2$b)x8P z$Q+5!M>Fg1QMyucmfBLd3qcZ7LhGqdlKFbunrkW8H^lbHPwr2=6+X{&b+J^059}gp zE*56yra^ASrKj7tmGn{V%pyj!tNqvSdJQJ};t3%RWmRyOM;HNdb+tUo`kfk+Pnd!A z3LzAc;$qO-UEPubxRLyjq%wWC&LvAR^6q|ve)N%ASD)CFLXrB5VRL;vTzkSpY5B8L zzVL*{!l&US?G|arPs;Zoq|YnX;FZJH#YKj(F_kwA6X~auvW-eQb1FJ-WJ&1{bru#E z7qLJ{u)b%D{HrWU@406~BO`zsCQp8wr5^7kcP8Ge0n?Cq7epcLG2`>-3neiPwYP7C zV6ntNB^-tiu7sObJjjl2np;<+Fo_wWH>EA8i_IGm!au#(5NJ=+7ZI(Nq}1Gd@W?Q`HA+aZZ){{ z{rqcz*;*VSj|TX|Zv6z$Etryb7pd&qYV~=pq{<*0-sa}1{8t9%s;x1j{w6?on63IR z^z_!J!3e^LVHWq7ILQ&}@>2l;g3`3Cx;H7^FTRiGR>=9ZOrX!TaZkg%T0_x9R%XPM^w(^5*7!6=}(l-QZG6knt9)@s#v|a)F2jk7d6T1~U^GP>+R$3vL%EMxRcTx)0`dwHJ@vy!`{Z7Apjc|!7)Nl-o5wTdx z35p%hHgb?zuMgjiJtgRzPb2xX2I)YW<5b+tE_-@i2K00qCI8K11@?_u|7Th_`?ZCt zy={XGkj3VpwWS~NUoyUk2v|Mb9>sa5=QeWM{-j6FUuzI4)oqs=K6y1z`5H-oSjnnV z0y`{bxGKF)E**dKcJ|izQHyq&g*XWD8XXhHl8@2ohIV<7Z9ZK6g_YihU&(R@{y`xT z+e0KuyfU*H?F5$D5en8w(a`+vi;|uBZi@_KNyCZ*IUZ}LzIMy8wES0Jqw3g+F;8!p z(zh88)LCQ38GnscAm`TKm-n1g#Qu1hnMN*Vv&e?l${1FO-$RZEW!jH<7P98iXym%0 z`LZ51f4Spckj_p4dNkZ)YB8JZ0EL%0mmlET+3K1sE68+vurZ}j%=u)lB~W4~ER0Lw zJ6o>+5=qYV-d83r>i_B~R8B7YY2MDxx^}K60NV9nK5RUGWUYoqYN4dNRx#T4x+QRW_B>Aq4K>B!HYLwYyP9okB=C5hhxHnQG{_aiWwu$Um`e<~ zgjIT`D+Ie)-#t85&>tnitY!Vk34p%Q-V1-blDBajb>!&Sw<$?xm$&Rh{fs}%;N^yT zNb`GJ{E8xUn-Bx=jwM>=`$uMTrB;MT(dm(F1vWLNNaD1IdNS`93w|6%7-K(mD5#=@ z32i%Tm&tkYL)e}y!9+A?D;I{EZ*xM@6%su1w0IE4TyomF9=6InwivLLzJuXLt4xrO z!LTwbkpLa*Jr!Fhx+4a#%hc|zl7F*F{vTpj84wq226j)pizkVFknw0AIzOM)W}5ol z$;~;-sq_WEsrTjc4<`zDeJ&3BI_tx`p(f;?N=dhV6tfkgt+e+NAB##0pBIx?51eRL zRR7xQte4mKB#z^9YyeWT`IHglQx#l%*SN1-8@&udV(*5G22kXc3Tw|c>KP(xx2$B7n*E9&bU5muSHCsKMFcAu89`1^( zMw-M#RUZ`luT~15s@L|0I7hPWXPmVQ^*x1%Jq9PL9wsDpFG~dX5+7$?=`HY3_#TsI zH!lAAo<>nb*SFOl#3$(>PP$fqgWDPrHfhC~UwY%0kW{*{4wna0mJ?zxm;0vaI!`{f z^f@FF1L+amIh%5D!hP-M@J;Q z(x{=6madJF%w4~?;+AQJ!)1cpdI$5hsQv)w#9a~N>g?RR<^*%{Y4VG1Rqq$=gZ$3N z;x%(!sLQ+c_0yjnli^X-NCi;6dj%%rZ`=%r&{N1Aqn+a}Q&C^F;tgF|dU;{OmAGZ< zt4Y?fB4Gz8fP~#1Up!B(#Z0>@2(`WlPKtaKFhgicEhiMeoe?M1b^jO}$2E1xugQTuHMcT0$+7aoj%VS27(?6)ehyjXD`?O$j0@S-Hv} zc+Kvh>!a0Eer~Kun5$QZ(Hena&RZ@yx00$KLXpV1FFq51fATf&6(DJ-P<(QUJN$F#wmi}yMM+4V&y+#WagFwib7%c?}nF$nrOP1|u?&Gr& zLawe`4O1ryO&zfgaLuNz=N_k{`#|6L3&HLGU*g|Ce-V*GlAw!l&Ng_+JcPROGsnY( z#_5Y>TTp2)CP~9Ep^s2R?6dOwLkr6XU5vb*S=c@MsOx<6OJoICuhEehj@^w)^JnEy z5Z|CM>(B}KS6I?KKg+A!lTSKf{C|&BP!m!ufQ|ehEM~pX)+?(yxXQRP7%|oUP95O7 zXULF5$JE{&-v5ZLRUmWv{mJC>Lcdy%Nc1C0U#o zW@nOSP(pWKvhs6*`^-c!JY+|&UemCanaLp;RSKTLP{1(G{HS7=g#6P5yuFEnzZ{x| z2_kYGI2ym0psX?I7wKZnR8sLmuPG4KJnXDn^obL4gG)vQuk$bla59I2y5M2L`bBjS$hjqxZhxrQEPI=HauhddvrkPBJU@_5j?&v|SHDCNa5crxp0$^!B;We20%`W^p5E%6957k&^h71vk$!Bc_ z|3<=E3n3}S2bCfIrKaqFkaMpgQvW9BhQgA;gN>>X@mlB-pZPT42*92W3V%h_qE6qq znPxdqnFQew%+!(?SQE?&3aPB#5dx#5fIH3Y7!|GynVcnIOM}Hi0q%$0FIP$qnGB2s zU|f+2FS`XHjKAH%+5cwMVL^t0J^H?s>5owqE&zNczhT&X_UDRU5hC_@J>9Uz+eQ4} zN_SpfrlPzux~U|~UrAySOpNUdx4$Xx%IcGFm$N6b%t(QC3V}TY*pO3mp&{={Z2&x; zZmI4yqaXmn${)bFd;M4Y|9RT~{KkMxWMst4Kl{oD_E#wI`Q&RH?qB9&Ux5G48;*FQAS|8w&{tjiegz+k#lDJ%gV z=-)q@fJQiCWQhA?go2=e1xkg3TorK*(G}NQ0fN-P$}|CwR%~)0*Q5eiz>8(d$^JXp zG6)Pyhe1GKKG)#P&e1xr&;zgP;+C*k{=PRVVz8X$??s5xD-e2aw~K<_dVDvQ1)-*Y zMuUZ@2>PPQ7j1XcmHFVwd7pO^1ko{3cwn3lZC(!b&cRo^S)300EJzU|<)8>kIF&2~ z6h%cv7k78MU5n@Buo2;M&qd>B?W{2-xFrhN^(lj`wGO z(^f~cz(8L@-^|lp3eU9uGg=T>237^1{@v*z$t?Z{a`r#B#zFwAeUVKJTR`S{zEB^} z!`Er>hl7v+dKZ+&rz?uQ?kd#r4Ol=nscx8mlRPpma7&9UqF^a(y8EWkesPE8-BN6Q zd*NI4_kD5*H!eUg^*@(pP}|;Je$=b8hf7!WYDvkg&t&dT`MbCg3rH@_*PMrN$=yY1 z&2oqIJYJBAmZ^jlxNQiFk$gj8+qVZ`q>{|r{)b`(2)?XTOW^|{r{EhGw)WwpAi}<0RR40u@?DTc$6UWLO*8p zn>l;|VDWCa8*q_u0WR_1YSsKtsr)0GBG9iP#Oklj%V*So4+2cB2;7re_zTJ3LkSuS zARvc&SHS@JcK`*jON|l3{C~d^G=%RQ3MBkX9@rKEiZA`8|NABje+Ajw8?zfTf7g*1 z_>4UtQY!psP=OGo5$NhP!wq=`BEWzyF#MY}U8Vu@N(~OtYHm_s50wOl%jkczW3hl+ zQc=8v|3}L~9y9nA*C7U&q(Gyiub&1xMWGtn#1_6wPY*j2YXl)bDEqO>XGxcme1{ni8h0!}Jyk zGx~uWL_x#Kbnsx0o}&3$#wT`{9I2lZ`D+ffKazgy8EP=VEwW);bQ4nSSG`>Azd}x8 z(WFXUy-Xua*bK#Ov)pPXXgTN1tucQTs*S?xTH@MlIjJ=nO@JQ8(~M~d^l6H#b6Ue2 zbIE$O{E;N~9T~9M&H=dzPsNm#kOzc0@5Yw(uUPyieW?f~Ek>+}(Dj_Wn(K!jI}feP zZu5`ku>t;Tm6kX?{F3dYe@0J2`=CM^06Nm(2!EDESg6e5KY2=1I*cN~7#-#6JsXtcx zbXbK9Dp8w1^1OvTa`bRWKVdyneq}~`e`V!R7~tAOn=vAlxhW3U>D|k#QlnICduM!> zv6|<~<8cyHgpAwc?wTc)8e(9sKZZ~;#lX_G8&myoQpIM!DCAN19s$1QynPMp`lNL1 zYc!OW?MkH{yHH*f>wNpJRB~m#Fo@vbN$gYVK zm+|iFb}-!3m_sOv?vvG1zTZ?(=dE?_ShiOs&^j`{mx(6AJu_`b;goG-4FcuZBGCTA z4-^GGweB7eG@iXQC{#hV(O@w#FA{c#ItxpYOlrGXiSIx0|_G150YJ;nW?uX4soN;Ju&{P05iAvCa-aXH%Q;rMe= zbJ-&O*)M|ka$A8_;`Lp51@G9a6OXt{svXIwi8F&pN!Ar~<1el@z-w&xb+_i)*&uku zrO(;?itk#xNl7N|L9)Z)OUtYs^C^YjZO%%EBhWAHgJES_)B%ND|0ZZ78PGyYgoK-a zRQrEQCSb$3K`SGzm-Lb6@p*I2-$Pj((vf0Ml4mTy$$}f|JG4@N>nV^BBA{v-!piPy zO|-lPo>>}L_z4h_ln|)898+JvzwA1FEIK~CA0(*Qcu9L{wLS6CeYBO`#dM6(%Bw2H z?xW3FH)YIV$}D2UnxCbbqd}u9@xi0sd!cUT{rNW92v(L1Z$(T)8{!e6g7!fn=Pq{Z zJ} zS_g!>G5jY$$^hp?m@qNJl)O?Cj;>&t20K%vsf}q3aE55OQ1`5j2@Hu3E2~_(Ukh*>Ax#Ay8)`ytAKzHTFnmKz ztEtu@!2znE*O*<%lji}Iw~Ys`zNQil*h9~r(Knh~g4***tS z4%>vCD^dm^&+U%m{BP5*12EAcogB}9@m7}ga`Rp_)cYF>WzcBFy>3Gn7>-Ut?Ym}F z$y3OG^(_tKd~qW#>0)v7H|+MX3bbcd^KSHW=tFSnJ}=&-rwMk@DI z&sS=9_o)e6Gs7HWz`0Z124zSmq}nEC_%dd9bHiG1)zR}LlV8(aG>qVyh;5Um1_60& z_T1SNGW9r?ct`C-+$yXgc*i)So`Ed#xZ=tpT|f8CX!}Hg!*&?X~2CqJEI+zRYM|`p|dI?P=rxega>^#Z!SrI&gd6e>a zEw&zQk^Z3bwgB~X?Ft$l_0K2gDA2RrRE2tupdg=IoYnr#eU3`d02IU1W%a{a2H-q@ zsV}vMTa=g|p3*J5cM;_3WO-IBhp<#IZ7|nK6+NlFq`x{3oB{&V zP(Rk1uI7N#WQf%T6&%UC*OR+xi!1i~b^AT-H(fNILDrk=COF&18}CK37HQ&(h^qA` zEVqtiLb}9&!ySPJ%PFj32h)rAkk+|3oZ??{ygu2`?Z0sbO_H4wR@ez^84anp$4Uy9+i=+cv3%N@F{BJHgmCQ88#4Y;p@KWohY+mjEo2mu)jAE zLJeYW2I;?c*wbAb_hfm6l&t^eC988HR^8A_zw~n?iRE@L zwX(XuAG@+xdrV_HwU-?;0Q<=KQ3Ue%_fIbp!3WK7``1E_ouds|XPOGi#enc)%))3( z$sCG)IFGMqEBm{tnaj+lRtVEf9aFRK+Elqf{>#=!MOZTWdgZ&N;#s{#8kEVGh}1$LL)#!X7uLSJ6BAxknv{20UQRB7AE=rIsc^Ilmcn+O%T_9x zewhmbq>V36Q)*;GR2kcTN)f-dMguM(z^iCYea_j^=D&6D#H62yJs}jlpP~FwOVg}N!&lZEQUDW*LfJs z*~|mMpUP|fZ41cj_BzdM+Ic@iqzrmS=fXcIB~ZRClz?9yisZnq-UX{L|%8`W(b zmxuzeaFx(6CxRQWrgL+iRN|(dTh?pyvDa6+;kT^Lw2#uc+dSfLOMsqY{fRR%$(1WF z=nKJt^dPq*#Y|KvzOtCrZ4<_Q>(r|PSxx6l+Zvt@GDzVLee0Kwn z*u4^B_2XWA&d85o=w|<6n0(vxhrXlb8LCs;t~wf`Mr&^J^_M(AFeQSQxcVFNpT;N1eI_+!AB&l8dzc! z*O|R-Yig>xXeKTnM zyT2ad0JwSa+bI)joQE8fUy^TCK#i<|ZUmme!VPlY1dO>{9*1jUh$PIbMk@HOFhwr7 z+)yh{^W-VKT;s4jmtJU92x@8P{^+u{Yrf&hR&+?-PtYOpq&ui2=HSZiJ9dXXTsLL}xd=r@i_YEfB<8n$ICEx3q?-MNZBx|a*w51KenAbX z98uT`Tyw(;onS(QdiiKBO5=sAmeNd{#V-u{`&71qqieIAsrwGO4KOkKzAJtzaG&wm zjH<~B$ViGwJpQ6bQ!-iB+5F=(0+!;q=fR}KmwQ*nizMu4kKbe9E2bH6G%nS6+=6ZLw*qwN>$-i*uiEViG zjr;SV_n!h$RCCmuP*BL~tcm?ZUlbQD#z`(F{|Tt34SbRPcmwhd(;RQIf&@u3#Jx(W zBDJR|TXN1}%WOptF4EiQS?>`aEt;rMoUh-ErX^s$CcFs0FtbH|ANOU=0O=FR|=40$+5p`UYSwD*^78-mcztaRg&w2XGpZ4{yDzxa%STxZxMaha0u1}}dUCuAwI`AqhJV&R# z`ekBYK8OlWeI#X&G;n)l2}@7+nv1`n_xH7GMnXKa$JZq}v{tV@+a6dMy`4aj>dhbX z@ZHs}$?T+%7Nx=8{moBiOwS`(B`q!&3}7Qqw$C=m^e)&B7ACV^f=ct9=G4J<6D$dH zNU*|1*Gad@Ytg476vf(YF=GJ$i4AgSzB+x)vS=gUaeRHO_keN~qZC;bWUW2N>qL*1 zux{tE@cO#sZW@^;<1%Vk@f2)|ow~mx-5B0AyStVko}*PK%mt*(23r_0W{Yz5j&BX!(>&j~E9(`n*Y9~Tq(aXZ@6;}nDw+P4?A2m%0J0rEi zg%nJLDuliO83FZWwD33_Jo$lrDGo;hw~0%U{wsJwhXZKMw>6L4_D;5^^2i!Se|DWG zIv!0EmX|{AjXwu1R+F7bOtEdI<=m#5-Wy@Zy$8FqBVj&pt7>?U>K!i+$sp_51cd@= z_s`zCzu4gC5Ab|-Q-SdJF_cdHs{X2$pvq7ts9ZNWbu$p=!dKwb6zdFCEX}~s9t%8b z`|t!SNqng{e?(GaeBxl?L!3^P;*iuTi8e~C=ur;q!j4!>ddBqK>d7LI)*CZptqM8h zn2>KymSeoA9IA!M5XT#U+>G#S-p>gUWZE_rTnW#Yapsy?nWC4B{d_x{yoFGzmb9nj zo4;0YzGhx7OMXy|X>8>Sedw)8iM0{bF8_)9#OyKXt2U@x8`Puz9`<`9m5%zTQQuB{ z%KhTAN`3)fmTR!2_?&gj=hqa4-FQ87Q@IBmY~QeVKWQt9E?o2i&MxQO1rU+=W>X^~`HgFi&vOhQ!cH?=f@e z_Vp87A^wj`g|lJDbKz~W9p}5hDcQv;m+*S>e=odIVw$gW?MCZR>d}-RnoyPHMtAgR z<~5?B56rhuHSIi3Pm<+R2I)4lyV7Ls3;1stY&^0ToLZLw1VcR%oLUF*7^9^9!B2m9 z#0e8%>XbxP^0j9Juk+*P5_spjc&as0*<<_p(2?@!o@O!Z8@XAAeeNqlxyHo_q_aYl zje%f15`l~Z)5t*-Rl!w3n;R%Y{wL#`FUMRtJle@SoJ$xk#HEI8O;I2UzW7tl)>kCA z8O-mu1@-&~LQv+6XKCJEg?qBaJqnIddEN%wr&gR)JP1&wFWLC5t~p?~_q^_uiR%){ z@h}L$leUS*pEvhiz7gB@Bz0Zd*2iS-8XiezlRKzJI;rE6Yzk$fZV13hFw#^Nfp!W> zTEBtMCry<9`78;@viz3=>W7{`DU*q_?gb5!St?*{;esl*=4W@!!o zN>$*Q!Hc0EdHIcb>XW_sG-uq%7-D^jB_=kn&=w_s$l~eL>aP!-$6DuQS+IImq;!?I zr;;?6l+1%dE+~zJ1@EH0YmXy%2F-$BL{2qmJCL=daq=IDB?oV7UbJ%N!oL4Ot!iDulUd8=khA&&Q2K0(wZl(Y5W zHgXVl$8wReZ(2>wQqJ|97@JRF`wf@lbh(Zx=meT9%@%4hrxys$m;`RR>#tcrG?8&i zv^;w=E?n&Qgwb59)qbv_iMMmfcK|f8@A_M|EN_#0HEBtXY`!@TY;4OhDR{zf_Dvwc zJp4w@a-|A?`(aF;BZfUZu2d}-05qiT68-I~%Ld9(usQCm>#nhQ?;0nM28l1+Mag#( zKRgoA>A~EdKG8oGcg}q`hUe}sBq<_?&KL*u+Izab+bIs{A zLP_Pp92>s9%JzZNeL2mwBE45f%R&f|n zUbR-(eft{{$0!ph>gD$Kli@$2uMu2$TGw5DdmywHEV$7tp0kTj|5kCz(B8@ef>dw^ zeWG4j5X^ZALtWec#e78Qco1^(P_N_~npsyTs{S6iT(zUry8Yu&a?3=mCUO>Or-5Jy z8mrz9@U7JouGG3f-c^kP%bDG`;4%g23Km@F&hO8uTkhn1Yj&R%haWc=ELkPB+q;g2 z>s=j`@)@hKur;ZakqZ^iL4U7k`R(tL6=Tipaug%nN#hyouwjMe5-AhcyrFnm{CUH( zn*5s%iq%?+yBt}3JUOpfQoNFYKqf>lk)}Eh;#FRaL~4kiZ3xQBi>|Ia+nb+#(o|{Q zQE|MEd`46~z$0?ce0UH?TB~H45ICQT_YGz@@sY^Ujd>|{>zG>fAr);DH3&x&dFS|B z+mm&LAMDzMHe;%|HF;w-eA?IqA~k#}X7VFN#_4ITgmDTDoB8SusWlt#w4 zF<}50dclyz$ONZb?hT7Ldkk932c_|ud?#bE#%;8Gq`t(WNxL)xzyQ~L-_sw>tJ}P1 zw~ojpgo7bMb^hq3XEzP>8M_8|>VEZy_H_OzLc{Nl$%w;38tg5a)Ur9WiV5jiC;2!V z+48Y(8?SNvd-L!#ML!kyCJ2Lp;8S&p1c3&X)-rsFqatg(aA=$yskupfEaZ#&k*Zik zhD6}kPSuMM))0joRaFBKAM)`D?P6URh$W-mXxCj5@(bLD`-9(ln#iio%Tv>qsaF|JIkw{u7($_Zr#m zjIDvDkF7j-&k#JFv1_SVPujVlVk=TRrF9Z--e_Q~W1@iJ`0`f;xZJVOhb;)-xsscH zIi8S|G}|5&Sv+e;V24^+9_;JtKuZGgYPe^u{gAYIFm_p9rCW2mcvV`UKT|F;4ULNX z-k{toaJ5xg0&@v;lgK^Z6s76V7F=hi0aF6?R8vv~I$LG#SFeLM1lPhk)G+k`_Wp~x zjCX33?JgwoR(5eR$={kFgOoY_b|>MQ4<8-W$MaY4sBKjwJ0pGNq!sJHp%b@{Sxr(sVgqO8hUxhhJfANtK||{ciaO z0-J`^V=Yn*0+FT@qLF6s4+s-csqxP#VDEd|_7pD=u2RtR4MUxQgY{S_-2aVK2H0|7 zzkH}P#s-sDQ$Wvi{1>JBiE!59q1`agLJ?OR(VHwQL7L?}SeGbLh>H1OiDi%eWq0}& ztprf~jT$WcE&ht|fWU9X8|lczeCTtKmv}iuLSslH>5Dn-7CHgetLphp zoYY8Apvu7Q&X;UjNDT=^JgK2Lm(wK6nG1rIVl43AR}$5C*JT-JI)bLf!(42=T%$0S)b7l0p8ou~Ccgd$M{O!geqpXJOUT!5&dZ+3=~!n) zaY~L4KdB<8N_UyC>t@@L(EW2%Uu6&8X=BQbsp~Ix=3Hn*9H)F)X4QJmj&^rv270>m0HnnyxhJ3;M3tB@X1cr2>=77 z=IqJ(>2lIOO}=8d%OUET0s{-28DqN-;bLdmuYdcE2`D`1^udW&+qCz3bx80uT5K-u zG3gJE=?s8{%Q;BwFVI>x*xEH)GIX<$V2IRqr+=hrSj6Fd1tn1y$5KhIeCi-}f$iUn=M#kQ*>Jr*sox%5b|yDC zMvrEJ?S+aT4xc4GX)x_r-%SB4d*t6*@AI93F0ZTk0}5l#7Qa^#HLpJMaA^{uJ+9i^ zyC>8K4NmbZa-HW1gI}%l?Zym+Y}CtSifwpz{aOUSkoI}3XicFc-+gn?PRT7s7G^(k z5uH`Mi$KW^>iPEgqmXAwM%bH?I=Z6>r-r+|OJ6_;wSO8!dR%|ol+T!r^gQNrFkJ1| zNt#*m=cA}t--7jmmuAi9^KU~HNVZKMYu=U^&%HmRWc2c1wNCByTk-*Pq%TeqkUqn8 z64S?PBLILS@Z@hL@$cWGSpF>1auu?mb!;g>bn=_DT5hirz2-S<8dKoXmm`6jhZ3S> zGs5;ru3eXAuppZTOin95U0awr0Vw1{S_W zPK~PHa})V&4$H0^wd7FF^8!UAN^aL>wH#Oq%=Qpq$HM+ zgwZ-J4_n%kgV5ur+|Cg8V<#RPo?CwI$i3iKLc1M3t6b&rlpPAn+jbJdX=w_rEMz%* z$dREoXC}<-HKBA3BQ9fV!z|Bh1r(y|x8HrrYRh$s3S@BD zbC1-Kmtjl$EN##|)_A2-D|y_Wi0D}K#gzJ9wDE~TO|k82=wc@ddXgNW4G_C}QZbqg zi%M9L*gw(S zc&i`<$b1?ZYFuUA#*t6>P#?^B&AoF{B4LN|FY{ZHU9%{fZ>F79C)OTSB{c==p#L8I z31ufKk>_v~KjnK)0Z2KPqRi4dpyiS6%rCPl@-o9bM(o`z7K zNqgAvN{|&n%SYuzmn<8TamHvuWU*(d(pNEVE0jC+z)`nm=CWKYCP+}_xrK%NJ_cpB}B322xu-+WzT_)i!sl^3lPRTJ__@FqU);KR$+ zYsY$}c7Sgx(TF2YxzJUQi56<2Rm>0ur%RTT|6#eja?on1Bd69-@2Wwb=6rDRNe2@y zBOEg=7kiS-c;P zzB#zwHTi8V8N^x$f=H-{q$%5Xx6r5+V{qW8ma#Qm^g>75sN-N8>15Zkh;=Jp+c?ZE zVKgE3(U1WfBb2ugfw%V{ctExmSCB8_bc=Y_I&T2+H9yadMvPL z&4g0E*OPpdN$M`&2-Z0pYFC+QWDGVO`YPl|*~-6)vy^R)dt0^l3Y;THz^2GzSqC;A z#DcT%-~v^wVaN*tOCD#>l-)An`{>z?k!p<*>+VzJKax>yxc*ocNuxFE-91Wwz}jSb zQB4tdp~@q(i{w87-$wU#Jn7u*wM|jrcD7X~f5i353@rb%8h>+26*A4%K}WXic+1*(I~X-r zSc`mB=~J_6R~F39=zT!YgISQmDyRr@(139|f+&7#9S zro(DkG1%P(pcj<~514}-Jz!_$XBIi3A`8$7uu@oFHS zgwKv9pH`D~bFxdRHeNn8wBQM5ODQnu%ZqkDkar#_kbkTo0WdG0N+VB~{23#;zh4O> zl+puLase(q3OU1UFc{O?i~_)mmK=^}9e=dO{@@7qR`E{hj0)=~Oy4BG*dn7FsY z{;dH`7IWkqpPB4%c|gV8GDh?3AmSsp7}1xTHYFR`4|JYRC*g_#<$$s}`I|2I59BEP z7SCFV%5%HTfaAuX$z~7pX(PJ5n=*q9v#RBg)g)U$yjgr%S*fV+nQX}NJcMi=BCymK z2bFqd#%%4WH<$3y_;QQ-EJuato!Yh7Jo$kcXc~Hs-8PV&xa=mGW7GDKX{O1+`lEZN zboZ)gRayF%ok7EhS4}!(MkxJH#0EY;NfABjrj0e*n@c4HI3=yGv+CM~m{{zt%v###y6c1fGcPzLy z^87U0y@p~G@VE?}Nx%$#K_LgN+{_N??gF$`(2QsQg2+o@6%WUZ5zklX)JGo|Y z&B+Oha%u_6h#evt7I7;z$Q7H{EC-m>hp#EkT>cCk5Q?C3sh+*H-N6dgs2^?^$MNdK zMR@TgAahR>61dH7Nqk1~Q2I`~;wAZJ=*_@SB^6p`Jb?pXJp4Ez|Di`0sYmW~0P8mF>ISksRmE*|As#79$(rwgj;=`TvO+L}RYri(y}Wg_*V4 zK}nOJRljWH_k6?J>zft zHayO5EF6KtKaDCa6?>{MtO=u{#9SRo^Y%VT$$Sr;a?w7Qiv6HWgI+&1AG4kdZef}E>b0XwB5B!oKylF&w^!M%cj zVotvCN4ZK%4G%(vMJd=(ErSozZO~#4k-M_%(EQs&przYgBbV|w$zfcvHMJH^uLg?K zzT$p%qc!%Nv3kDd2l~}36zZtz(YW?|C-WYGwHHE;pIm?9PdP|?mHm(_$IhlvN!fbe zv{42ChWXrMM)-)$*_d3&ZeU~@oC{y1a?k7_x{1X|^9HH6CCx(z9GRu4}PmE{7obtVV|F5}D;G6O;u3%U)* zFs6!&)6S!`9Xlo@qH=T-w5Rfk5I+# z+h7Oc$v|PqSV`4HsN_ULB&xj$rjO ztCV*%4-zRcIl6zRiJM%_^oMl$@bR4Xh&H1)sNe{mVJ+W5#9!s z{FuV+wFg%2qO%#r7vMB^p5r3ABGx!T8dKP&=+{8y@m8&c4Ni9kteNBvM)hbK_WiG_ zd+hdGHZgq(=wZQ|ozA$3jvzTRO#V_Asjf_m+_c8Hvs{&`&>EX9tj&Ev0|b=dt)4*V zVUBX%MfEmwP#t6-5mYvp`0Pn+hVP&(nb> zi}woK0Q;-hawyqBQQFX?Ea!QxJJuPyForg+Fq7}Rk9B3dqI}Cb^uQ_hm477&#lf|W zD|6OrP*>BY8gjgEfjf#4@>>#7^b3VxhlQO+#7zkT8pl<}ITUaWdJQS%CMAd+z06>$ zr8rYlxz+fX_+l!#Y*OH<nO8BjXD)P!wcYB9$Rk93M21zl6t?K+z zKUSh)c*=CSZq2E5XBQLK)Xz1A3$^?HIBc?2d$F&xWz=6WzU>hX0sUwSFwy|HJLh-H z52ow#-U~q%ez&2(7_(l1uUTwYG^p9vtf#juFX@n`XyCz0L8uMF2_6 zYte+dGZ|XFXq6V*06WMd9r9g1em(T93_;vX6`kg6l01fpiJ3F7AL{jB&vlTDBjQ1h zuEj(pB9O1YfhtXcHvZn58Bm~yo5*s$c0LpU(&;5iCt^%`me&(a3XM;-a?f;PeJsl% zx}A)k`q8eNLt2FvP?|BUPq8`9fz}RPv)ze_77_^8 zG_B8bxINNmmKya%^xyB99BOL^1*Qc3k$#iHjh|yv_Lrdm3q+s7jpP8^1mjYB&iA+~y|t;Y-XnzAJL` z|536_C8PaRWEh3+=y*-n>${pjhOpMcs*)&J?+*9bU^MaClDQYl&e-z6uYBX{k*B^r zMs=fLRXJG%M2;Hs%b<@c%@1UK{kuONKB!9LcIO@LA&M7&jYl$>#GSemP;D$dkbLAo zR_sC+$99^tT`EQ9%1jG?g}BSTOfY%}We@+*#c zT+3IQEpPUV^UWUSb8a`QJe#(uWiRTsm2p3ufd#jxyKN8ens1DiI;SegmDIDo3K(m? zWFqWPf8AUs|COR#Z2n+Oq7)&*W<$HAU$OL^?_FPxX{iQR_VpIsC#q<)>jVIz(11HW z_6k%+*@37?Zl}xo-)LJrDx75DruowcqJVVHvS%f+9|SMsd7l4jKS_^JRo!$Rh;qL& z9#Cso%H&IlGjKc@bGt?y`zZ8M%6&iwAtentMB4LJ!wTjZ>xu4uqc`k z5L!3)ev-JR^JzyqQJ)yuIpsZy777pb%#L*j$yq8;QPNQy0>&mdKTq`stvclQyH}W^ z3JKKb4)k##!9AYI9*#HfF5bfy?j-s#ZgCqs`|G$AEzZ3>tNY|0dyygJX+mfcb&3`& z!_HAW-x9>1|3blUvZ#(~ukUjcPE)H6h<+AGj}37>m|+z7FqcS+qs)UAlmEeSpvnh4e^lW$^VJ9oWW60!<@!x1y4ohFy8m?|Q=D>Z zS<1V7v5LGq8@e4+q&lWn?S)+uh`B^}4y>8a!%Vp4s+&}l32(yVV~Lq(B#Oa0ZmU(S z+l;=w@2I%3S+i$3lJ^Te(6QQk#Haq%#g<5?$|>M_HHY!KyWe{zB?Ui@Y6?XScS8;&U#BXKAoD$EN(v z-famZ8k}jRwNjAi=naPCUrGr|oZkz2KA4oRdQE(73TU9ur%;&+Eb>qDLfN6MF<45} zT@^61$A(e8Y{UW(?`b4V(dWaE?0UWQJk(y{X+vrYM=q;TTg9|H}inZEen`obv5}t0vXY+qJ{?A6d|D}?#6=< zY!4rn>o%cH$6k=OKeDlEcc^L=wog}KH|j#(%1#%yw`JkoOVJ#UBPpE4!_|PM!9JCl zDkIHG+oSct-!5O2K1%1(>6Cl++dBP{M*b;p0J~FM8w{#(SmSbd>N(%{x?6y)**#Cs zEdgQX)%8JKSI}Jy+P+d+llhhjK8`7Q+FO@{Y`!WpfV*vt=3W5^UUR4tQ=Mn7J+d2@ z4I;*eql3@5H|jeY%bWs2wz<;6#n+iy#=i6*ciKp-?=?46O{ukmHMQ#9KS8Z-YZzcB ziSIuzPg(->rq-GZyKOWPRtkPvUKLm62OiqD-{fA)RsMo0eJT#UcdA6+jqPPPFM1<$ zv8zL=Bwy;GV6*QS7uTP(H5|7%V+prP@Bp0+8W3L$*oFX5ujR^T-G3>a{}-6#1^7Pr zyxczTnwj6c^>Q=cK34gQ7S&KMhYK&pa!oK^MWX4v34T`}?0Xc2HG; zepo1dnfVxtL6lbeg_*zc)k^rBWyxMs5x-{rC0gIpxXO1 zNiKA%gQ?L?e~WYVR3P)5YO9N%{Vp7#!`sYZEZ(^3~hHTD8Q<< zl|6pLq-}7K%!C%+A3JMxb_$Nn2#WY%a`2>=B-x$baA0=ZEf-*ZFFPe_ik$%Sh17d| zmOh++WAFF1+?Mi54M{NBYEcS9CZxuWJ-urifc~@Y&?8|F=`Ap5rt0gy1~86LoqTo? zp6%PiB^UB5eaXvDM75)`anFH40i4Urz;A$IIPvyNI5t<0aYy%zN})O}#kPSXM&PXbLAI9;2@Id99ob^udT-dxIX?(hhhDZ`<`Aaw?>} zZZls;lcE0SO_BB;APHb-f4}uF3pa^Z4d6bW64pv6#Ts)=ZCTw?6W08h5C!%EWZ-G7 z!>7ONVz9*>3U*Guj<0Azn63c(6;SDpZ>&YRwA!dgECaddD@lz9n4;@)+;vH}0eY#B znHI6El8pc*Ks*F2Xd4|cRGxki)OQWl^oNTuH*&>~`cnnxe~%yE4>i`~#r!5nu|#?5 zn;*zVjT>kMbRtJd01#%twzN` z9DVu$_^|vL8R|$yq?5TJjke)cuEkt)5f0`QwkGS6MI`#8nbm_Xx!Zb&339#&{@w7p zQ|^0%Cf-A+<@u*M?(EFZ5gLo9fJA*}r5vuI{231-!F$I?i5ILUG~^X8!DJrk@#|Fb zphW$>2vzF%P`)gIMf)};>hH|Yx=&3u2*Y8mPvQqTU#H2KG$;cggS#8<|80`?x`N-S z8@}Nk{CRQgfWhfbB9D0bf1IK4`~P2GUbJu{GA+OTKF{CU438H6c<*su){{jbDU%CY zDL?B0Ivw!jb?YevW$i9(A<_j>A%EEFOESLEpYM012uPK60L+>_Ce4dK>d_PQixw>u zTTK!=N@#MEAl(S>UVT2-`YHLi0Z^)zoIedm2rp!CIy(sHGp4C)iC6{?|Sd$c3WyN_?J`u z_wUD2z>9K18QA@Y&29+?W{|_HntyrZN8o>vM0oy>p(E%ke0u5NO(Vl^*!nGCR%g)p zegL=hYKnJy|NGVd{v;?&SVt#qXS7j0J*d#kW;XR1g;rJ*i)vYiNn=6iKNj2h=hmm2 z(i)8}6J)l~E>1@*1J!@tilhUQ{LlABU2beK(1c#g6J8x4lF+`R@|(JIJ*T;!`y7>k z;}8SCOdGDG*E0%}m7N`QD=VnA>y2HH->U`xQMBu7OF5mm+h`;Mj1rk|`s0qc1*lhf0GA_grfLM}!;hhC%mXU{U?H>e-r zYw>-U@Gb(KRJ>IxAKH9<;&|QgJ$&)C&`48>nacj`QviAbNw~zVIa&iVm<_P+{1>GW zP6QGHBHPMMQzzO2Cj*7L^Oce->|9uF$+Sll^*TU^c>gi{HfZ$_s;!o~VwoY#My`F|G=!1TlWPV4aP@72En@A{VJO)S~p{u09f zc*4uk;R6{+2bWKvM+U%6X2Ndp$qL?~52*uMRuGI0i2Ua`)sp!hn4S`LTD(kuCWBW8 z{Bs8{+dk;;=&ZtB^Q%dO7k~N`>;ukjD_Ng8__T}cfiHm`dfFZP0EFwG+(!HC|BT-M z#;pHnz)ltai6LIT@I}>DYWSz22QaLuz?^^YiN%Wm%sL79hlqPJJ74&K!-MU6rxQGh zDc~W{|LPUzHZpRcD?&&>QVW_Wwp!3jL1__Vt-Pl`!CNR+=def9AUR8NF^4GdKYpvJG(|nhVe3v zCOSU${C7&@oeNc=6{mh*)JFQb7v?`cKjF`~J*mwO3k?!mQt^|8?`vtSZhVxXEgOl| zzYFz6#)lHy$sX3K5T`wNUYqvr9Y+06-y|CmxCdHDr35)w;a5i*J%}6&dUCE#RAJ35 zAfMF&Da>D;I!7((7D5wb28qYs&X7~+8T3t=pDq>@A5<6Kp`W{*Z(3|WoSDeeBunfP zahnODzzzaIyG=P!3$oZ*-}yJ7eOqzmkheCkxAPM4mkNj@Uuyoa9xMbqP((XYNL*t0 zeQFe*-uhvUyza+;nwJ_2R;|H|x0-L$+i;i5*BJ_&=)AJ;D3#F1UkhFkI0CldWx6f@ z^1YV9T@?+q?fLK}2c$h&B&zKjY5n-&?RRsEl?(WA+h+(mAiME|9cpUSIUuIw%0DlU z&o*(`NiY9i3qz_?V(}#@tx*O}Qt)O^#9r&^WIl>?t(csuQCX}ZQu7{P3G~SnaO6L| zAZt`s(EU{Y;gF*51ehxk@SX&ezh>NC=%owar|TGH!sqCJ`mzxL@Z8%CGj2J9Wv8c} zMPM~$#y}wX*4-IWP&u**uwFoqvyDZ)H9Nry+XcydqAb!uXap9MmvA}p-!<|A9dI1F zm7rC9xM%M_JC%Bn;aVAt40DbAK)t+p#Jut4X{IAZ7ixZWrq+ED(O6zw;Gq1EvH+i= zw1cya8izfsID0dMrO-v0y=AO_La=qLFu)69f3Kua5R($jcf?ohiRK)ARn+5Zo?w+l ze7qIzMSgT!QTe_6l2+Wl$S;oWM1sl6z^%@SG>(!tK+Ij8N_6)Hw>h83_9sM(57#PU zc>Aiz_i){;w`YZ=k~p;2$6#B{}> z0mI8?YAO%2LwX~zkqlAr?PzV)%)@om!S{-_@3`x;l)v-zJ={hMUfX72Kfx2AAo6yh z*~$RaPgmvYny`#23&+iKN8QVBuL{&(5Z*`K$S*w<=;yQela^m3b%X#lBtM_@u3`_B z*VF&8n*X2*wMOk;6+14@qS8~tHh1}@$zmFGm#)pWO0wo8+xL}qBHo+? z`|+@*25hBX$~*U^g(fkUhx4-JO*cvvp?!CTRgWehkOabgzIu-%0+@h*ohkl3D*gQl ztt!GH#yTOCCkT%9*QINQy>SZ|Hp-N0HNR>w8nk{|6l|xJ&9+6Nd)pv#z!-@w5y190 zj72efbpYpf;T}uw<$l~~uoFjdXWAzofILNyFhs$4qfB%s;t|Gu?v5H9uVxPWLYs8YEH#_6eATuBOY3(< zVHz?;8#N?$8k1{(qSahzn6Ss$&E3_yP%X4N2G*p_?wIIM@3vE%<*zwh9Kfw(Q%?=A zFaD>Y(@VH$+AqZKzbPQ`@ZlC`ZJAkLx>cg)qi8HNT-=7b4$o65&5A7;w;L9w5{>6pplA%HA!xt+5!RcVxlpHUO zgwf~P4puRJ4>}3gO*8~Fnm}KerRo&o*8MOEcAE*2GgA!=pe>v3ecak6FC^}ZQ-Oor znU&lSO9VM(`!681%PEujyLULk8`*h!PB@S~t@AG4gZRt(zY>`$talu7z2{rMkTE+c zM{G<+AXm87VKYjvB7_w~7 z_f!e3b_Fp*HcE#Z;!Pv%#RCQ26xkvsJzml&x~(K>Tjw&Vxer>seV zHOLYW^+7Sk@zAz!A0eT0RU`phk(U#2-*+tu-8aoew!)up*N^8SGCafZdUg){0Fqo! zTlm#UzvJStx?<}v(5~VMZmH*376}D2)QsHF)$*+0ZO!T4XsV8D>s*>TYd8U1SU6D2 zoIY)H5#`(n@l6Iy@yPZOQ($+xjGIss0cn_8n#am~7rEPSPj#E4RRcG=)$QAaHnIt; zej{HahC;54Pp%_p!I@i9Li}-z#(SAnrMEr2N%2)abkl;#Moeq#1^YkH-Z2m2dRxw! z0LJMop`p(l+f<%i{02EDrn-WrR zI@VgZ3CL7|4trr^uJN`_+nSZ%&m^dpWb3T{FV8$=w_q)?ABkNh|A|yAtRFl7QrL$Z zqxatJve;tpCSG?x~d8?6n{9@y7K#}xc**uHIZbrcO&_({P@5g2f(deXR$g3Zz(B}th(`oEqu zvTa|_{dAfZv?1;QO^PCY!lpdNWJ`_LT{Iv`to_B>0Do|v(3}j zS*IPd>6+4uvUZNEi96~8lz!{b=RMIAq^b>LMCo_r*Lss_6cfja?NL>0YqmcBZWaWx zT0KArMl_lB+9l?UbC3n}{C3SwIq?8fwxR@mYe};b({rECyIE~g^bKz7u%HWyJ3)bu zmAKV2f-SY-7{=0X2?lf-korONCXiR1O!VCa+PZwU|h4Z+FlFP-m=g8 z!bWE(c;>P4RI%(&HeR*Cthk}=TlP%L31#2>-e!%$u#=5~TUMkV5eQBa8SL+tv9(+L zE1C8aRz$eYq@FiBI@Qp}|F+^EKSyWEr$gV- z#A__&w>>lZt8>gltW?=IQLmb!wb~+IWUeAA_%rH&hE-$eiSxlni%40G->5I=gN3*# zI(|$xhmkByP^TZ7U(-k5hSoprmSOt0X@pr_H zR=gf7p!pCQ!*lA7vU3_L1_eg_B28B3tJKSE&o|_WgfA4@<_JgwusYX92J;Ug={F36Oo_GCyOo|zEMDja0n%?&@m;w@F>x{G zvH>GEpfIj=!<4C-#1IW20{w#~GC{JpVts-a0I*u5BMyBn73U z1OaL3?ruqgPU!~eW(1Lzl$MrmknWI%p^>hkV_=8@hWKstzTe|}zxR2LgFj%HnZ4KA z>#Fm-qHvTjtf>iqVUedJ00rYHgKJQqcks}h4=Kjs%4K7z!P#QT2E!9f()@-R*2dvV zQZtIa2O4zrlMg^H*;|~;c-(G96XFtu-j&yWR_iur_m>_o|eX|x?H{+D`S#eLwdJ+KHR@NLNB}ZS+C?iaqo_plv4yMd zIR~1MN;+S!6x2>90J2PS=k+7~l1!7$i_J>RDZO7WL;oFZs?g(0usmEXJ|b|!%?rX|qt1QGc={XmM55qU*xUrR~H#Id}uVca^9-c_Uh^Oy>9uQrh^-FVz_N z%C1F6H+&yjO3&Ka_}-$j!Gg)uwNKiieC230$_{aSIR5ob!>wn(9$4}pm|~1}<#qH7 zuR4~2*UC5C8r}FIo8{s8iOJJUS9zh*hAO6B zED7wzoi`IxVUqL1^!^qpPjr51uz*#y{7HnOj8~mml+G=yM}vt799-{oxN|&x`zLSx z`u z?STU-EB6cM)}f_DGDT_Qsn)A^n)4wn5~7UcjpZ)~YmI)Irk7C}A7E2W2qwHURORjk ztrs7V5v4k}09g$Ryyx8JvY(z&En2^o&@X>73=@GSZ<;5&wiMSRv`-YwHBj_9ky(>p8+;%Of^8ReZrJQhKrOI8ojNJ)z_!$HnG(`UDRQs}B2O;N{HP7}?FgL!C zH^&?$w`319s9vul<6dhusMb+y1X=XxG|y*}8I+J{Z^K;-ex0U&a3fb_OGoy!Az~aT z{{BKYpn1x^=H>g38i&W;%TQyMuoJjnEUw|B1f78sBYDK`<~1yl-Puzrf4Jt&u5h8r`o>%m)l zS=zs@HR9!=fKKQTvB^?AXzKiaYxDyW4tn|7)bsIeqslv_-E0IlxzSKYcd=TSQa>D1^Q`6HC>joeA|4KG>4Y~LC~)YL}ON#4)R zgiq2EuyH$IuVN-TY6PZPA);PhKuE49>lp=~RrY^weGH4gO~E*rFQcEAo=YrdQ;hnu zvRXKJq%=#lZ8%SNTVK>`_77T#Ljd%G^He;V-v52K)KBd5TKa-9c6CwA%&DJwnJyN& z04zUD2BJ|)kD*d4g0!A*@5nn<(hfDob<}`0HqR}JiGs~bBJBmJeG6zvhCy+*17E`+ zou4jAzaEKg?U=*43$x8;9q3+qf3Pm`DXcNAZTk{N6{Imt8tBRyq(`L;ezy9q*HCuX z%FSjA3yuf3=Om-x_r`rm<&Yk87aTIGB_DVyLv?N)8cFHE`;Ex3$HXotz)$Dyo2>LS z^FospThb6>PILIy?sd10VN~5fwJkU<94R&RoSG%TR_Z#F zBS`R-?GV}1uIR)NvvO0OAtDQW2MSn8KpwYIad@z?DO~&Q^ z+@`LUGL3l8noJ;R;XAtaI{IExdw|eIQ)xLah0;*R)IKfLc#6qx0kvC82@pCiekYZ5 zA-qvxlVD+Yh~H_;Q2SU}1WUoMcr2)O@>E_Ov<%sEar%}yI^ypl;7G^O*&kcH5KO>H%A2cMBrBa67lF z;H?_wbXKKh{a0bI0pLR)5Qfc(sq}_oYDyp3TX72NYIkOoza@k6{p{ak{MyEr+@&7k zeilJCUMnb=(C^>{d7V_#SVGd|+NLQJjdeJInqns^D1$f|?~ae%X%QK}L?rJ|Pr=N8 z(E~*^wq=6_U$lJr)w<`Ve>)3eiS#o6Ql-+iCiIQBRd*6&gn^K2!LffrulE}o?4{=l`RDB3R@-08DWmn;-C<7$r>z4K5$d~}K62a*va5dex=g0+4BY`c$&c=g~`mjHwrx}b%AjPdyqT>X$ zJ)_f2^Ml8g_~-)sDgQ8reo>7PC1NPbV;>|weH?ApFlau9C+8Y_K zG?8gJ^W`FcJ|u4SWNX!Fdh`r}zfSUt<4;HMHK3g`WqA%oqmNLlPBQ^G6z9$#{g~yJ z4^2q2GvX+X&qU`@!QOJXk_K!bR{f^X`qvx1s`*WHto9I|rsIQgyCe1J8Ch4M8&HyI zg#AmBoE8=nJ_>Hhs_8abl6G|%cJy*(O+kC9Otrtw7}kaM_`(zUx}Gb$RBO)kH%j=& zPF=+Z6$Vv$Hf}D%&-~tV4%%p3ghug6ZwO1|r4a;6jw%!XI97bM&KA~);+!eI>5_NQ z`MLL{Ar|swQbe!I$gpBHZuPL^_bX|WZ{XxvWtRCh?WV}p+2g^nijOMStlD8O!ckG1 zg{^W;Ia|M$>kirqx+A?L+tbS6p{^SkOv&;)!|hYy`M>n*lcqx{M;ksEHf8Rfc^m`=Hi_usRDXHvw@~7O~^(PGI+?Z@ztsD zD55)@M{#8l)!l+0M$^~EVfW#F=q{u2wpV(;b*9RP`}7nT5W0!PdhW}O%=D>B!T@UL^`LlytNim z&oC4mTHi?y5LPEx|=cTRUj~RW0 zNc^k(^fPMRF7@Cdrn9_0)ihoK*N(Uj>H(#?>_R}ZalWFg-!l|=4V)L-RwpgclCuI` z2B8!Mw_P(dRU_DR7cui}$NRo&{c!Va6<4{087OH~hRpo(R&q(#3H-Zwi=@EOXZeIH zE=lzmd#*f{FC{zgOin_bf5UXVy@hVJUZWafppHI3dP4J&`BDr4wvkv;H$MYJqLN}H zWABFZ5!#4BC&0(f1L;zN)9}~xPjkQ!bxVED^8Nw`7K#Qp>?`o1W#GnYnsrVjS#iOD z5Z82y?RA_tkK`}CTFUN%+Oac65yMto|I%{zqr672E1<^i^y+3z`JrmH54b1W2ZnPX-N)TnN)HdL?->Ss+CXZ8_?VFpnH@-mmo0su3msu%{Nu$+W387%`$)&t=eP(Mi zk55S2OIF)exWz%tbtwWAiOefP4bH(%Rb!u0kLH^>(XAaiNuOI01kNf2#+zt3E^)CC zekLVUDe%8z4_NokxD6%rt^r9XDy=IJuCqGniTiMgJRG-$G)qDoo(wZ)?4!VH_`o^6 zHTc}J(eZuN-ixO#=)=zU@fOP`B1GqpQ! z-tlnCw&UXjba|^mY46)P*JgG$;_%4tol#$lvQ5UC^ZYt+<6>4-^sB`UtZ#(S(7ITY z57gV;nE^}tI^fmt)yIY1OZ!(Bx^t2zM{Y@k{`vPYB#~BHU6h(i^(>XXAn=NxQLy@u-k|? z2BsjUN6@2@nlUAa7t75iP~5SqlOo@9boYAmW3g6p`ErpaVGAJi-bDLC{sx?=@gQ}i zY2(+wJ_9rmH34R_8GapkC)0b4b&i-qv@o@UB54a@8`?MGAaHp)o6Qe8782oVQ(}!! ztw$F>BXrupKteiupt>6WbY@5?J-7q!f|@4WZ+Uj_7#wsns*TJY-e~S{o~PC62;{>Y zH6Sy2=+517ESth+B5otK?_SikG2Zzd7k!+B<(VStIO%m=g-h#_>UU>axrmN0l4v*e z?y}X@U6>4U_5$sK#fXxQ!JdsI;{(9lF9G^3-!3DrtgSZl{uxogD9wMQ?`spYc`;(p zU_P={HXd9MmVc}>Z;c8EK$+Ba_?%R3>95U}Loiq7!{;^|Wc`dYtXh|o&{QZZ>SOQ=(QH9mj-0ZM`I@z4i7e8 z*Y(%e9Q2asS;eo{PJK?b-F;Y^I|J z7#`GpHt_W1>gN;Z6mbzpuPSRSY?|KS_`tB{RCH`**(+2$g|&O=-fRa5=4=@+UxqUz zyuxvHvXHX8PzT~HW1<6)r4~vW4O0uDW`GYA#fHNkZuD&+_Tkr)jrQ05RkNiYev?B4 zf%i^ppCB$DdC@0>T)V38R(-z|w+a<2ueBI9U9AGg#|bh2HN1(R>H@BJyRx=9t+Qs8 zMRBv$&uf2J>;D!H{16`!9)_jY^hubS&5(;Emu1W+1ReHb@VB&#duK^8$Iyc7Gw@f# zhXn3v8~U1OFA|k;td54QY${|pBa53$NG8OF*H;T6LtGiQ)|@;xq=26-$H#GC?uNUNkF=Rc71zY5EH}hk)qtv|W!zBg6*<@% z8!;-C=+Gt?;#QI~G~mNE#(Y&S_h|CtW2~c}0FL3TNExK?yH3V@I^yg(PL;kH{gCZ8=lm*#UI<>a1N$Ed!u-6M^G$wix)8` zYtGD&B!CkP4xeP2$yhRxRivo7jf_6AJe$|Ue~|6Vn9Pzh^uQ5SaNge__$0&5C~*|f z9!uXf#~SDZm+M2<3>6Z*NqP_KgpyZZBz(vg#8dhKA2nl}qp{j+eJ7xbLnpd4+W8Ue z{SF@jPiy^T-Zm11iZjXd{gFDvXXaCcwv&UX<4ZC$glGh+*l>l2J567CN(&S*=zAxK z8roeBkEZJ~UyU6xrXxAs>#xayZ`$D%{Dgf#9~ivxu>}Xwt*m8H?fFVU+ZqW`ZS;C*zYV{Y(v$5J8fgpwK^bjLYs~gc5e8_ z2PsB0ea3#NU3zX3`tUf4v~H`DrdcoEn6#9YT=fu315pQWh)elK(RHN?3gQ6>_fZ>Y zl3B><3U9Yfj6CJ7-|7nqzpmRiF66?G)WiBH6qoZT9mJ3S-WcT)-jPN$1+Dh)J>bid^b)b^5=3rP_&*mL9|A%-2_|BN(pz(Ko?U4CGdPjKxLL zMcH+PzOvH<0_=)9AnM;wvaIgDXbKR1>SD`O`*x&M;q`zU@{XLagYu&CuTk(GJczIa7)SOyf`E&-Z^)368 zUN*gs&Mh6{S97iJM6vWLFuF9MLFV75vu9@!ldNAIyRZj{(vHoLsKzozy3;vA3QrhhMiKVQ{7R=fa}>K zINU1g4d>8H_~cd0;5^)O^LG95`Ov{Y&E*S;rJ}9tbb2Wr%T17`Om7j@&&Q`rrjj$f z+`q-E!;81Yay3mfhu5+Pei+UpUphwdh}Sf0Q%{}zG_a5#O~xZrd`!|5b}L9Ye7-CZ zAncN?uC_F_UPe-?9yt@s>FLz0R*Y;ApBcG5&2qef97iHluS#m8Ir;!YMAS@r31Bht z5l*TU!e3|R%I=!K5?7knVT2Li$otwo*o(7kq8 zuNBaYio0!#KcEzD9!x<~&A>l!+^F{|GJ*25LxL@rKS`rfc-ZDwv(2E^uLJWIaa!~e z3;?E^h+W|Nd*&UdFDnr8scqd>b;$9Abjpgh}Ib+qK_`F6A{XBX6G zFyT{Yid`4S2ie|H33Iv8PiRkkK57D>OI<$}2}=-IwtB6BLk~&dCzfsl{-dqeo2S$2 z2V5HU^TACUJguo=T%67w>2SfOVm$gmS7F}`(N=>1YM{9ujcyZ#;A{3UbM@ty@T{%g z9p|m=**4F^5Ty(mbD=l5K9*>nhYJ`(PQN;)I1SO>86SCYtxFsg*J8mMTVUj@aKN_- z2!?30>t>+%Yh8~oE@Ok@Y#c^?f)TBKM_gu`7%XxdUYA2L@$CLf(Wzx&W5h|m9F7?OEb^_qQh?`%+7aFZS~d$zCDkPhv;(s2mlZ6N}lkll4|{K zq5kp<-_7Jc zO$lLsVHieUr=MlC=|O}!@F6mzsrB;ROy$>^yt0l97PMmk)RM(<(<7S6;PJj`II=NE zMM_(2uN0<6YC%=^VUu8gF2AbBEMf5ZrAv!>&j@9#bM!SCnflntd5CG$%m@78=z~MA zH!*h=^00f57@l_FWgq}MmW?)SyrUt%@#YYe{|I)E6%ncY#>5%dC4td8j<6oViUEIa z0;iF>rwbiI?cj02o`mv**QbT=WiFXRc3VLw({}jC+0}TE_^kj`41S^~qhsF9=3i$r z$yH|*Ly!Y9nw(zDZV0UShzQjULIHbYfCtzcS_TbgxlaMjLY#X4wFCwV#J^GS;E)&p zl!!|sujxcbDXOIywH|Ia^f6HVS}Xe+LuWQ1-=ym({46bxnMZq1uCC~X)r+=&5}n0e z(7W>}DbjAdtB3vipX%KuCIh3_*!&LPOr$Dn(Lco528OfH*$G8lf_de}GEWW8E`EfJ zJ_#D;ewS~bKid$eP$=k9WLntvMV+lqf)OJ#1bZV>e#gtM=v!GeisiY_)*3^fw~+29 zXHv%-k(b{!0h;p({}`<28&48b$77%+QMhPqH- z)&K635(W*jghB7c&b2an{QkbwoJO#Nz^sWv$b2t>|JPpd8c{Myrb+TO&?vZWrst`)?UkALq83tNy&UITzAINkgVOhp%V${d9lUN<_s=d&77dzNB+mzH z6dLz6C2=4%>&G|E0DQSc%HA^~8g4qDaVLox=epc_G=di2jy7~=5M2&{T!INqD+^dY zcJyXdrXkT-TT2Q4EaVZC6r{GzFR1qbbeV<|SUCRd!>3W|RqYv1xSPe>er?#JVvC&GHnV%D3FIdg8!Cdl^%-5>Heqpq&5UA9nm zF#huy93KBbsy;apylHtxhp{M#@fvg&LeDzc){;=aZ6nQc6GY(&LmV|e(#_4< zum{f@iKs5blfi~(RW&)7=V|Q`Kvrp~q=Z_(38H~_39CJe`tL@x2^-Ot$u3B*GVdI# zon^<4iM1Pdzj*Qnx zNdf&-CH40Mp(%iSC++JYz<+RL1MiL`{Et-F$(4l$XLGSShrs2;8JH&?EDtv<8S^)LLsNdHetjxdfqVRTPPi|<#k5_VGkpL#HI27lAr=@y+kyW{gm z2bk`E_05VjrM44|V*K;Mce#>tKdsUG78jgESeZu^gK>V-^(a!H!yoil@82h0b)7g>KA$#ymyT(0p{ggadf zu5;4;rbZv3|6ow-bP-(JCSRE@4di=po^-!5JPv-I7gVEw!K7x##Wk1*pt{-0YC_qji>t>h%a5?|3t6wK*v?Ue0X>5yM~19&N+{AE^EoW`w2u`#a$KVdPO%06L!e~y zlcn2+JKhr=fwR*_;on}x<|w9RTfPx3_gxsQrRaskBXEdUf}_X_jST&CLBuW${cCH% z;=$*aG04nFn0yo+5i6g!b?S(Vo(`80@KRXy7B zE;Q&BQ<)EhI7zfF*D!MSK`kR`i_RilP1c=#a3E}pA?;?DVsFw_t=?Uo+v@soBd@zt z$X==WUHnw>{A>#PO^c;nH^F{euc2*vs!Bjw@cU2OHeq(8C3F~Wo&D;5yp4Q36$&Tn z??Z1)cv|~6yWp`JNr}n6!~_2`u{+1X z@%V%fpzxP&ZNptbCKgBD@J36mZn_`>QtqcaGJd)U9o>Awmb2sgemWDa9Kq!>REqWmTJ>BT9!!Ws!@Ae@o{hS!S-S;l0m5K-C*+( z{W#i%$uBp!Vi!W^Y@b5F#gpaG<8hmLZ+-HA?{K$K{HNT-?dbwHxi?AC_;Vy+Z>)gsKW*d5>BD;ST=`*?*Y6zlF{o0cJii7QWZtoszc6)(hL*+GY8 zuZQ6>1e4>IWcWt{|2kT@Gw0yPA2*>P2!Q1Ek7~ zM6>)=N8R5)@dfyNNtEkQ9%a+Pq#hU05vYTh!@JltyDW%4HK7*!b={a9526i~e<8sIrdclcArT#J z!K(LV0G6|C7g~i9us?tF2&0=2P+jh?8ygdsV)u^6*vyR&(%)Y|TQuOvFzK3G`#p}k zXB5bn6ukRAzZZj#q?0?h)f=NpCd*Ud*~?+W7bPkv+V;Ae!gIGwbt_xS-bb%Xojhvw zV&seCje1`xR?12vUcK0Dir=TU>%ND*?&S?of#o7z&!I}#vH&T+i7;U?~ zH4!1hX24g((aryL2tru>yIK3>62oiseefWN>ne1v0PK}uBGOFH4I~XBlMN9L3>b@8 zifL6}`9))~epnslfuPq@JF(;dV^2=w@nE~*A@pfM#b_v?-pIJrin*_@;U|?JCUBb> zlD7@p;zF0^B7p=FQs9s+=WZ)R&(x%xwGQ{jy``~0RXUtu9Y5O}zH0Igna#hT>}z^1 zp%!o*S#4w~X#Y^bR3%C~?Rn#OTDzGBOiNC;hhgTAyKM{O-kC?;hwzmCf${)%y);#i z7p|4A)w8-w>_`H*k5rU^<-{fHMiWAQ48u+kga6l%?h#Tt@+FwawkL1h;-D0?)W`@k zSF&mCm5$D+xTI_~Y<+}KF%<6@^6yLQNH980e*SALQ(gseYkcwu$bkIOU;9@)vK4_8 zfYz;<>Pn|x3>w7*EFJ|&1vJln{&B%OZlUz&oJ1rOSL-MAdmk36M9`OfA6u~L%j9;> zD&;MsZk(hjEuBAs`goXJ;C*k!i3Fyq1~c!x7bb!68JSJa-x0$=$z2#?}^Ld6)`7W2| zQb;T1P1wAcTQu9__`vIXD_Pr98@IIXv5CZJG2P77Q?FhH`E@8g$+ACvYx38rCe^j+ z(s`%WIlrL6ZzB{^>BSByUT)jM)Q#BCiQnI)A`hDIAvKZdOj2}q;uHc<#Lr^OWu-)x zMK7#;nkLZic$@L4Zw$%wT;z!-1zFz_kun0;!cu+-@y@DyXt~(pF?u9pWwI}SmgtNc zOwBt_wEBl*FFDneE$64iCvezz2Kl}mJ^At7wEylX<~al7P4$4Xk)T1@vEv{-$yvs8 zE5SL}y#KpL^V`>uzHfw$hGyTzbiGbNLkXc%bAGTQ-=qp3wsgC<(|&%sTZ3G&J7UGx z*QKu9ArjygRj=Q*QBTMFpZ2~vN^q-D3BDz+FR^%aUP0Yq(k4DIwyekk1p+{3OGd|9c{eF)|Lur zLgdulj0a^mChpokjodf@66%nX2_WJ06q3DjFz13}kij)*zO2jF<7u%OGT_3{HWCG(5>Ttx;cK?ugr8_`tFrM4o? z{?nYTq4*@{ZRFc;zC8UPxTif7l7?N*tQUOe_M+4M3FA5tG^c5v{K<6z+l9?Jves%y(YL8q7-|qzfg$i-tLhcOOu%#01?{hSx)w@eXp9V?^9-GN*wohs`9uV70FK{B&(QaCsX(AOo-k|6|ACUg#K~SM?sxDy= zjti-_5TEJb60UmBaXlZp7sFeVVO01I?t}|1ms{X=stBvrc#Eq`d*DaKB1XqqRqQ13 zV@{#vj<@*Mvk9e$Ac|SMQMY_{T|F8bqsiEGf!V({R~Z= zt0t$I4c$4sNrS87aic-*qDqDemv$D{59Kex3R^vSkMgs6`NgHPNgmCShHC9uvwuNF z|1Ri`!gyY!)p~OTFa$(6g`g_rCj~Uie(lewfxeD4-8xU&i7$2DQ~PP|oM0o#Nb0a| z{iwFxSBVyLJm=(o1L@ldHy4ZaM=QNj=B%neRNPpdFj{>hpaq8}?HKm{@SQMuqqDmi zwvuCMOrlP3+wtU4d*{d9&@m!rAoBuD;fL4pzv=2n&R5j#v^iLBA(Y7#~`s z?av(M-<9=1Pij*sQCr;3gctoTutxOi!^$%XnX*Af&PszV`~ze;g%ow+lTFZk+iv$b zKH`qX&&WBJl_QCv6Ie&1nn|7z8M~=u+nE0w!=f+Y5wNLay})O84d5NhUNw5 z)0CM9vDHN7l4dRhCoF`|Gjcl62E=lfbZI__gXy57!Ro8rRoO)ZB$hs!cX*G8yEr9^(3IoqAdzI5pvN4v5^Z9?onv zWhOTH&nZv(h`5yRerAjSkPFKX~=s4D}N z5?F;;6JWg^e8%z-{l?@xLQ+KJPZy(fDO3s4*8m$UvNoV zR0<2O+Y9;-K22{492em1Z)3}oEeL^;O8d-sUskBpjAeH(W-dy8?x9*60gF9^A>PGg zp7J~0a*l*+W$63S8J_wm{)qRkBQ)F~0QSnQ7Vz{o`wH*9OSuBny?RD@)N@(`USp1V_w)M~|I^R$w@>sEOS>>le zTaK)D|?Otu#@kfBYrhB?vq2`P}tHGB^OoMHptcN;k zuRf}#zGd++sD;<+TSyq7?B)5=CzU|)#cUtN7uN)avd^7IH>86c^Qm9{4TzFkZ9<6VNm`VV?XF` zaDnQ@9dYd(&DPA)uZgmI8#NV>4y1~DSP4CksDKOxe|0HEOx&Eu+U`N+dpz2{^(*Ta zKZ!3WwVi@~)eEYOZMhDm0}8Fm@hZbQpaZbbSY@5+*y5q`-1AhEA;$&LeaDkUuQ4U4 zs*e!bbB~qpi+Xzi1vnCtseS4&2pZV8+@__5gAleNKMM`|tVf!QDGm57WG{q}KW#UZ zZh3rm^5nRV@5xK24Yon`?R|h153xX|Xb&$PB;{C4bmn>|N7j1Lsa|dvUukHm{uGSS z^#H@Xo>6lfnFy2OGGsc{2s%F zyw8)jk1SonhM7I-_Ux}HlfbL&=yI9d`8DCYSEM(;w696x(FLhT*5ZZ=jrDD}wJl+( zhnw-Wymh-{oeQ1h92tpyxg-eX(aLNyjVWiS{s<_ZK_`^Y>{$+^)kBGh-&-a##up_l zn?7V$lD=K)q_ZxRlgI@CO#)SH^Jz&;@gEFNA8Bz#{M!qloX3!YWlIN{_inOF#}*W2 z-O^%=B?`!2&8+!4(Ff;k&l10%Brue`Ut{5#Z_c3F=21KEFO6)fd-*PQAFA^m#Z&)$Z?k2)&X`NI z`29oGZ=pWr<0Yqy`E0%GR1+Z9cP;r;A#V+6?wF{ipuPiV5wz*GTlcw=cp=!xKw`DV zo!U3eWPN-SU^#6E5CQ5&gpmApwfl=MuJwehEYpv7EqjZzT54W188*v>h9jm)@VJan z)rWyZ58!Cs*G-hYz%&_LjQ(oU2DSbxDuWC==MWY6Ku z*8O5{E~oMZN2DlUo&r8oZSnp|l!UcM3N2UfJzu0++p(usHp2F_Xnw}atFX(svr8sOY<%&B>HmoC=0R|Z@?cT}^BV%iPA zOrt2O_i+3i4Dg2^L%JXIKe&rC4WO84e7d_?i6U)jsP<;rlNSp?JMYjJZ^O@`4QbP^ zd-R3#M;B0#r0niX0J)p>#BpHOiBh$W@2cLg1rtm}rzX0^5tG0VIX_q}XddV5NjW z@jOF13PhCic1(}>vO>84o?8Z#US3LIBwTspDwq$3%!0YOn#OImKTB!a%#4k6pu~f7>?Qfk= zI)2>!eO;ioMWm_lEq3hz^`2?l<&-m_#$WSpEF6~;y*638Eo&7JXH|rH=vjXrum7BO zwC0%lml-}Gjh>TO*ZIl|sy_IQGm0$R(iaLt>k#vJm=Lc?!k*Xg?oH6XoZ@R-Q%uDK zd!|v!an#DAUvZS>!`_4QX^>*Og70OqS70_i&WEUsMBa=$NY@p|!Q|+G_e*oJ7W>uj zDoq@-_dpG3ALF;j7WFdTbk~KxPuCamI*HHKxa>eXz(+Ir;6vt+*NWD8n9}tE{KSy~ zf+X8AwIJ#YB-;cZ#~^bTeA?G@Aq+qPI@tf}9nq8A^NRyeaY1ffuUGwe26P~tMq#yQ zW|+vyg1~tnK}YFX#6>N=UX_D`}%ZI>;4QW-Eoj`nWdA%TL|s z(&5K3ysI%Q>bHf}sAJC;N~m4i>Fw@biyFcx*GYEfvK z#wj&DgTxJGZHoWpuKpKJ3xKzj$H4nCE+Sva{iR%RKp5RMlwz`+WgtqDnN;}UbQbwk?n(ihfl$^u?`7F+3`&TKezJuT> z37La>9EfX1%9J6>$|0U5hd2_e#(}Uux#5W9Cy+v1OsmR{9z!&X2q#(|@53ja)k{Dn zrFZ;HJh;lPw*o4i)E`nlc&0arwL2}adeBffFGv)wey(JNL!h}hoyKsRIP~)&-S@8cMoyY?5z^#FSW?_uILdYNW7i zkKlv9>m8A!ZI`QAsx19;INM&1Vf_N7hLt=l&*+096}tPt}I70?5S} zZSTP$P9`Pz{lEO<|2c{s0Iiwv0sDo}UmO-V#vV+4^r{J1du^c8bX+ey%&Dc{?z zuN-vF=W)t|9{_*QTBSn)SY7B4W zCK1zTpXI|Tfa5Fl);YdEq;*}N3V%BtaRZSza(wS#%LUOp8r8@ZXk+RS6uy5t5};e> zkTY}wdg)s9>>V6;mM*!)8VEtuqOoR^$1hE$>K2Eb+yj~kJXY)@(mDHVx^kqW@3fFF zpVovWeZXe&zG!m7%5W150wQ0l9!s<6I`@kaG?9erX>!ia7KPVLZHGZ)if)%K_JVbV z%klAbdNr+0dow_x=De0!Z%p-tN?EWc>C;paBA3lCM|-#oh)%g?|<$1o`hGG8t5G6j^mw2FDnvn@`~ zco*86#M^(i_yDjWn|?`4GMVaXd!=sK2y&pV@<3G3M1YV?^h^FAVNVDPqT;76{^~MkPIF|J*KGG(KaU(eIvt_D7ajk-&i6&{8AN z`}=Msa$p{1vXF@VOSH&io#>1<`2VBqt)lAMmMzd70>Kgp9zt;U;KAM9-Q6962MEC} zxD(vnHMqMhTo*12cX^YYecrq6-hDpaPg+}q31bdM)u`&dSJjvF57p?ngWx{bBa08) z$Ix!7дs>C)QDW5iGaK7fb+douUkh?2Ug^g*<`8l9BB9|;MV0*~(vMgL39KUMvnQon{xh^N!;A!&5uPh#00H74`U13x@29*m*I z>ZVfnrV@?Qbskc9ke(-5q6igDWM>*8{=?nJzOZB2*S(u|yD%m1gV`*9d=q;-B}vx6 zX#Z4?*=1tGpu1%sqW+p=j;Th|IqUpbx_Q*E1z`b~H_hVv1iaT`Cg0V?mG;mkF3|Ty zsL(ShT9?rmXfeWvWy9fT`eRg(Vc;82o2_L`r4~?f)o2zE;k3t7ysM34t=ctcFs%2s zg~P*R#&z?9V#xO4%sH)x589#E7MyBBBjLPeQD0dZEj<5i{mE zN!LK7b@o@5mu7KU{$-5nk-v$ycMU(h=VX`ji62|bvUnRnDrJt*hsrUliby5c?~N&s zA@sp?o-+(&K1~p_WbXV>mPu>&+ggmwHndIw&jTDQp7|ILHo1Ohx7-RGNbDDApTa1>>nws#%SpSYY~Dc;vI&2xIOv#CNj;4$W>@~*meWM?ar zFN2E#>r;>7b={-ND_Mv7+gwZ^?iD;Oo8iyXWy&D>zQ)SeOHR)*!}ojMlFWBG~3^-@?!TR@0RC2 zJrk^1p2x`7a$!iQ`_sPt?0p;n-z0e7y3@0yHBMz{*oMV_`vqrpR~cA4_f+-cQPz4^u953{8~c)17`iis6Fa5~BOVd=Su`X_=I z!;A;U6VZyhJYMYX15Nxg2QSt51k7G74#ZF%=}oS)NX2-6*A5c!pna9|dpJ;3HC)>E z|61&=eo>}97-812f(B$d!2@LN;2xE$t+Jf!Aym7$M@Ycj*9RK!Lpd~SiFuRKv)nRH zejQgXvksGo7Kgw+|EC-7=SEBlRHtiKqH4cClxo5YvrW28GQkRSOJbkMF81#jyI3mp zMhXEwU6uN2$7U3VF~XtuSUkh&Nuy-Jd6!ent@->=9j{d>H&N#@f=X+4gQlpH#5_Ni)^&5*mhQd#p&uNw8R=rey z5uPJtH|(>)K!2SDXQ2HLZ^l#e$M&&BBp-MSOHzY!i8E0FdI4b-j9;RyGoJu9ky+aT z6Rg~31-OEOISmTYH`6ppXh1~X8FEB!r_IP{0d!H~u2j_5>an-Z&o-K#{j23U{*d*E zJ#>FnqV5)-^q{4hz{ch4j&Lsc1vOsndw@YxL5l)iK! znBS%jZAp{wt-B}=BsQL!Jp$yyxyK5()gKttm|8QC5CcsXXBFDD@E#WOD^VWu2itV- zoh#>k(k2_D6zisd)}XL5DNPaT)5=I0L_h6;FU$XuQ*k_o0aPD7L{pY2#5(2%b*EcP zpX#lUS8yAL!%UCHMTf8MHV{=^vl;+M`|QO#t?BK*I(P1|K4t=f zkv!ybL^N(XcP=L@_mz@4`%kLni)F~RFmBevSa>EoI&BR&^|g&`_akxaX&|YUyHy1# ztx6DdeNCKRgcK?`Yo{FVdBvJ~ZHpBHpw??MRxW@f{o}>?nJnVS?MjV;cTSAeSx2*M z^6$6SeezMDzcS+Sr5Ud+N{c(bxTo(s(8|M~Y4k0=nDtCuZAo++@7b*sb9C4PZ*zY? z{*1dOHG{_`P&)2>47DGbWawexAaer6N)qpn5La%&ExO{kC^bNgZK zz!39!LrOB(I304|{ceh?p8Mc1szedRL#x6wIqc~|+7Cf){@#?mlYF@>0T#^z6jNl^ z74$+`?WiceKZ)}RV|=JeJW5~`zKD>_yV&|@J8v*^Na^@t!Z-q1QC&&TRotI~JNWOv z{ZYZ>wwz%rF)GPmY-hwRK05Kt%BMMOE7W=IP_h-ZeUknID*9(&rG{^@!lPFD@R?|m67v5-2_kwo)~gqGueFA!s7gH z#Zn20ewxZWzhnv-3y?3FKPVXNG}qB&ijpXL9zpWwpDLV1qjX&_-g1Pc1{S&be{REq zapUCk{-cEs0==kjWYe3&uza-iuzZ~P+Jh43)S!EQjy>u#64A$7O{7Z{d$Sfk_>{Av z*mFeY7x9#a7A>Px*BiWuR4C@7nk7_>XsJvhw3~1Q$!tH*rB3$9Ru*Ej_Hwau8FcmzOn&0{_vvNV6a^;4JLb}v29qWhsM+LK`I_s|Zh5t=cW zCsq!&Xnpg}Ok~dz^;3axUuMz>uB1{6EdbH)XXn$;`@dm^TUU%eD*gSsSi1ClZ+Awm z=9YrUh!w9GJzDA<(6k9$ySwI2zPlHY!gPqMEzMG&Z;u{6{n(-(7r1hVY)Y@wQ>Tkp zk2$}Z@nlmrpHO^mcbEM7tMMF>hxTJ;AAy*ObU$&GE2ybwqReDn%%C-0vwdWqY@;nz zNr5p}GCW}+wvP`x#srdnopfX>iZKu}_Ey4J1z<2&41J8~YpT2!xQRZ)6&B=_TsdrQ)7t1m`a%g(&X?;hi> zFpfa+bW>rw^!<+lFm|*YGI6EV&~MfQTNQ`GPYtE# zO0B=VSYB=@wQiZWZTgG1JZUMYffReOJ~4@zPPbM6=uKRRQP*Nxai+w?A)dOD$EB{L zSIP5}{CyqZ9Q;{p!cv<@mMlvwX*}|nzt(uedlLJyx#NTG8_6>Fwljb2T=XcMX}>0+Fe-r#eCwSR0Y6<8uL+Sat>5nf zFXQm*ESBGZ-ln#A&WIgR)oX5Lq`{u{W?h=;k|2BAXpI2HLigpd=R#Fun%A+zM2+nl zjnH?rt6@+`*kGTP8d#0%A@{iPY@=5mI9>IJJ-?L~?T+$}cy!1p{EZ6hadk%o%I9~V zZPanzoeO*O%;7{KmrJfb#qoG+wlgZ#nEro~9q+}$b2A15)?|#a@bxt7>eIKHwLqnj zZRc9N@~K#lz{JNU8(z=LvgYeE=BHiac`$EM^D7ueSc6jl1UxFbDC4Ex8cIaEAJzIi z^hhdKWH|qD(;!Ytyr3E9Rfse5B^fCrAdOJ}CRW-EzmK1c>IK2gA6#Rvr>Qca{IkxVa7qQVJdFOiyFBSdO=p@I*WLn)Z99lP{ zRNtQ?i`u~hjpns`h^m%2vXF)JGZ0We_9YX0OFLY3Yef*X-%eyMwm(L_D&C`whwErc z0}F?%q|VbV(xQEZal)&J%Uj?kV8Dx9R%NV@m-W8DT073tqi>yQTPME*Ctb$tUC^B4 zVGDd4MIyI_)d9@0As2O9ZO*AkqVF`$}pV)=!~J>+&p z*)t}K*hrcr&K0bPWwS2&QI3p)*u<&C)~`8ranbdO z=;H$$et__F>Qp1)!%tC@L#>QI5ojAKP(rHfz6O*VnKA%s+(^hm&v6)bJ}I9?IUHoq zcspW4ovGo$O?BdOjk5+vNcg(#f&<)%TL9BxJP zd{__r7KrOg;#Tyq$6FXD^Cb49XVi)X`FUpLstEs(HXq)f2uDR8+pPQ??gA8)J$>93 zS)%@^(aG4=Mr^Z3>=@=r`SkeL1-{Hg?<`KI%ko)K_nQ=V$}-JY;rl5%;H%#_@qK%K ze4OvWSpj;PC2(AuKu01U^5RtY8hJsk#olqv?jD!$pZ>Bv^oKwjI3S;T*)DDHql~Zl z^|d7j08yXbMRTc~dKH8-9$*t@!}oR?Fu{fkyt0cvCD{9<_Yc20woZ+(7p0I?y!gX_ z?o9?gm2y%xy2#3oMwKdm7%QM%sjT0F8$9&XS#J$XV)Olnl@e7#!U|q;SBGU z{hui}d|9OLL4Kb7x6}pKqiO)ox^PzBv_kA0snddJ>Gd9b&#N*8cP3w(qJ}(&e_m$@ zD42mE<#;jTEA0LbTH%YDEAaeL?G8t8k}uQXMjoCPi>2kOjYb{e>w|?o5p3r`bFgRG*!g94<5B3pRnyQd1!K@$D>vM zOLwN&NO$z_04eG{JV1(`ziYX-4OO4KF?@2QjZ$Ap3q?44z3oQd528)P#T~-5v+3P* z)y;9$-EUglD-2yPxZGrhzRCTa%wrwh3we68)O3KA@FMGH0QNIx?p5UDxSDw|M zO342!6JFy%IU(uiWjCQ@e3K*4MbtRnq_lEBo!v11j;X)zood}5nq>_K7+?nm(r)vr zKFhjQI%JIVQb#CiJbViA6Asmet=Q zs_@`mPUjGs>V`$PH2UaIJXvMx5I|;yCx-8w4;hAw_pqUASuOC4^*YHdYwlGUEJ;n~ zZj(qSa4MGT*Jh!60gZecUMf9oZpnOSDf`KN{f7zveYg~#!Anb9St-Ljf{ernvcA7N z-NGmEX(67$C%pCekCOEoRczPqp_O>!Z`GKX=ZgyowypVu@^Z>aJ_CO%w7FNIKFhfT z_|FmW9{;WEVhxyob*AI@vSq*;0T>Ggc+&6{QOci>VvK+bBTo_>@qHQlr1$buPdW@i zFUS}#@L=%shrUxd03>J7WBsocL9QVslh;3u(?JpzAF144t!}!O&pvyqrlNhGn zJzM=E=;AtM@w6u45cBUBmxmhoNvvKpEfze@4-LV_HP`hS%^Iw5aTMQ zIsf?@-e0fUON57Y%2#=)ydO$o$6p`+ko@7uS(a%9pt?2+TML(zc6ZbDPz{HK=lpL6o(cmV%4;J?Va zkq4LlGq-XM$jWw-{(ceQ_pQ88tlZSIFT|-YtnfF#2Y;fm$Cs1OHw`=}IugXf_3|xA z$e@2W2VhK@;mZVblIq?12K>L@9s!yH9Q!gLiOTOH>sA8;wFNz0y zzL)>vlQxhBI=C6Yrgr`NTL8UffbTQlNdEMvGYg+5IdEdy@CmOX|K4ZCeqa|eGLi8)0XvEkdf}UiV3WImZ4--nd9c3=vz$YkpGOB5$>qNd4SebP z0C%?2Rla#QlG2~ri%%It&NkA{X-Et8ppdB>6Vi5g>^pO5d-dgEXqxYbR1&@#euLpZ z%YEsj(TE-ah(c$b3Xgsh*(`U?Q-h+Ygb@JQD=_(YHKQ+~Ty*X15=U$Y63FaLdGr{u z7g(PUODwnWP+=dC$l7!Chlt!@c&R4{{H|Teea-}Z3%J))!>P% zKAGQ}spA|)lVsihf6KFyF`Cj0EB86+Hce5#%0YXgfK_RDzEK+16Qrfyd+}O+wA79m zW!Y#6cQpM}&a~4MagbVHz3*PM*)rn)z_YeIFSeKP3G&WtqYej6)V3GsSvQfTO^Hs2 z_Z`1Y&2O0B{Pe0Xe90!8IyRuEw`e)8{#nHitj^ z16DJ?%IJE`eVgjB6LFGyP34xw>Dr|uyfLP~vhg7E!+y#VNQ)HV8t&eXEMf^qqasPL z=(esjVu!UZmJ^@wQ&?GzFno2waK<~T+&!po;^UgrJF z*?w~u=iKqlq0HfPK0Z&N>Tlv{hrtj7^~onTfhiq_@aylbYJ7H8uG&)w4mN+5Cng7- zg$5p5`uf8Q>p0uw^b^I;MIQd}qnX8Pidm1^!}F0I+!?4}mHuiqJB(!3T%qsD%WnxssR`=j=b77{%ZLeResb9EkivCYy(#z-I;Fa?7O?6-ajr z0g3^75m-XE_{y6C^y9VSRMUdl5`ViA>6H-R169Blaq8c8_}A5H@?e&RcEW1$6k17Y zsVD&^)5>aCzDWzo=}-ruEI_$^Oeq4g;{NV+Q_<6M?%|a85pNAt(r>p>B3_4~l$ z>Cs4}qF!iAxcu}<8oqGGVViWcnw?6pPUD2 z1#&;SG1wX_jtI8k_Cepdth_D91y#EFn0~<;Z-*O8Wan{wc(uT)W?a#?H>tV4JLAo9 z&RpL3=@rOt&DCdIwJ-bSthNin_fZn*MZn0wyya`BOUb?A@lZEZ!T^<(fm^5WUpHBI z^dV>H)A9&v^+1K^;%F9DQ-SKM1Xm(^I{7rjpAH=;zsz_yc+w z9yodz7xp`UAHDx^YX5VDqZ;5p5cglNxI=-`0}KR8Hz=Z6TA?vl1GtXVjrny_;Mz_9 zKJ32u`~)2Q=|p89%{o~z2l0w>%XhT|KCCA&Z$b<9WUlHGrz&ci4o)Avxt_b7ty-Lr zk3x6&n++nWRzqcB^ui+XdsXPEu8Uy}JKrFlk^SB%d0C7vv=s1Dy$i3EA8MbNMQZ5+ zSmVObjyDh^*rJI9ZZbHxTD`9mf2?XpMOwR{v8Hwylv96LSbCRM)Ywh6-P!u&oAfng zG%1PJuyzHaJ)f3eE@wSwGPkHiA-Aj9*iR4#C$h#;Mp!aD zGOeBCpwt$o_La}zAuuxIJ?ayZDg<`38qQQxQ0&anT}f$Y=N;JzH~a_ce%X(;_>wzm z-4Mf~%&u}$!`e`mWvsR1QoUHJl-Jt=Dqhs-<96qFj^A56saKy{%U~p8NAqkPLGhL! zC`i;$9UVFLb!rxCtWl;dvoyfP>OO<911KL+35n%JMZAT`U@c3Q=U-tn6_dD;Le?d{ z^fFFEzm(f>rCX2A&|N8B=a z6Leo(J#LVYV&DrcepiziP-d>}*TS$51}ZOW#Yld>dq0AHWpR;W+~0OxzC@ZO`S=n8mGqy6=U;105{VBZMtVai7IRmk##yv`kGv|6wMJD^b==^O-QEPotGqeH4Orh_zFWZsAyUVR-Hy`%+n5`8*80OE!3p;Ji zlu+~7C}w2vBKA#Cd*z0eEcew8jp<)xwyL$XkxICMzgfngz7f_7S&~NCBuD(!<|1f1 zz7h0VhppmT$n~h`vJFs-(D32kpjz zx7?`N>-OeT)uO>;I#MFT(|b%Q-XFMa+R=UD7|>$o{yG`ZZ6ZBc)$8EoSDHvp6I?kf zv#wscZ6_b3i|Jzo9_JU^JC?W_n}#D-^!T4d$M`yJGk|c9H;@bJUv9_0{S1i))DP&} zU|9O~jhF-rRy4~vJ{3*sTb^I=ZZ!SH`O$2D?$kBdm%luuVI8hy_~($=X}wNMFC652 zA8#r~>n`CDHSSUq(w89=RPzoCGWZM${JyDs$`cxnF$k^@4McT2;o?F?>%M-)HetgX z?!u-4XZ5B`LdI}3?c6`FuRO`&Jr1sO z$Q2JCW{t0+@M_P5V8`-!-i6(gKdgJ~z}vtE5-KAwfu(K3@95@xo{K1KC7^FfrMW+! z@3N$NTtK1V^Zb&_WB#CB8q{AN(oIDT#{c7Mqx9fEpn^enZ}d)As2lJvKC&~Z5j!p@ zIiw=(DXfoO8CRTNXn)0P^wqU!AZl$`wc>QcA#pH8xlRAk(&1shns{poGpCjt*16Lk zC(4NGt`4HvRnW#@|03~*+m<5ay1e>2MadSV&QpON9c1cMoLP36whLNH!?NF1Ah6+u z574G^MDLHr^iPhD+EilPM+2|6-G3`g8SA=sAi#qOB+gF5NUh5Ay}rFK(E5Udc~us+ zJ137*zS7bKmfo@BSfhjzjy*rTDm1=0dleoM!s8QcCF5rP36_qhd)2Eb)8JQ0%|UU^ zb`)>R=yITAUCoE3ng;nk;_evdpuOM&S@4vz#MW2=9jMrb@PT%FHP-Er%MjnH$CHgd zLp7b#+ciBD!ozpUb@hQVU4XBI84Wk1x(gYE-xkZ|U{b?R>?&GQz7dwQVa_YT_+w@k zt)9UaB)sHyhQQu^E3rdJ8d9g*_RT*GTGwaTrNKltw3K+CQn^_768_9-g&oimR)fWb zzCWr|yr0;jybX2qx{E(=pw1eF4HOO{#~|V7z(G`^NmNb;nU{{tIQyWY*s|fa28Z|A z3?2k0$~{YP8VfWJ%2?3#rl;(OTv~nTQf~APzI?}ksi9@JDNn&Tr=C?Pisw|U{CQB| z?FNTW{S9t>Vzns|st<=H;un)T;YWo=xnd!i`IyT<+kP0h$B*dz(MwnI?jZf-`kT~> z-{4W>nfthNV*_$zD|F9MAY@C}XtoCeBY7#I=SW(-{Wm>77;TQmXz#RemPIbn3meQ- ztC1PzSphp)ae0m8zTEBSll)7dmP?lf4g2&J_k#Q2qhPPf3}v|j*c5{MWJeYCsy)2q zQ!bL%WYP1CshOO8Wf)lItmP*l+s$u^g`0Kze$&2br>frC)<$8ConKa8ezk1Co)4~Ht+a&GaUn+}H`NxD@P;a? z@IyXV;Pq-u}}SO^_025 zQNW8=p^UX6Yj?43L}IPO{MoTxi)Fvv{Tds)rSEqY#s2m;R|tcy@<;`TFbIm>VgKt-=UcS3z)lC6j4wG$Nvz%%1q`LC=~)HFQ;Buzg5{ zto&4AIwo#NmT{oL{$1=lgGz^WbLb$qY!5kaLO`s3fD~d>mK1C6bSRH{o{!fPP#wt! zQhmi>+x1lPM%2E{B5$+JwNkH_L96u!T0Dl#*Rdn)>)n^P*N~R(?Z>ort+k=nYt3!7 zYUu@Udd!MxR755`?zcT;83jjC1$b23Neaar)ZkVP)qkA*q9I`w0>5|1{Q~ zl6W??s7T|?;0g{Po?Op4kNR)%&{4~ul@NuwHj`S?^oz%)>AuFkRCMy zS|B5lFt-rt)J4NTIh6KG^27qEeBv)3_vxdIB5>I;{!p|tZ-H0GE2=CY>kyCE;Tb@|W9 zn8U$^&dcDZ1c5MmE-z{=rUiZ19SaBi;NwkXt?us{JrhM@HVH-TLt-v8S_8C?{V3CvtB-I20*{y?6v18Ya);I9uB-w$jUXerBep@#f{5%{k^g5#7jE+nRZF99` zvrcPa>2oV&9j;jk?KG2s!1OLcp=Io2PMw&qAyE_ia)NWNy``*v;}}cqZ~{s(?92mI zXxS!;fQS{3e*ODt|J=1S-JmClkCP?PVjOJ7c;~L#zL5rmc%-NUj;2-7XFdgLZ}n^~ z;WOGl7H&Pi6UciqoU!jX(nTMQ!!uV%+3cSYy*W8$(oh{PkBKx;4J0`FmQ`Uc+w7cQ0FNBrXS0HXai0rAZf?pTEKA@a58@BPgIm9HCdqqJkP+K% z)E{gtJpLZ>&ajaRL|-aMpwFxLk$Nsqg3{RSV?7yp6VJ!6Y%>w|OuKV;G{W&ImfxNv9j?&iXd}g}E}=oh^;tiN_S{a``KI){3l@ z*=Sg+$_z~SoLq0|fn#VdisreVkgC)mY8rv8gUvw8RMY|osVDsvKOji4`4()!*E z%Du~Nwrihse|(or$c)JbJZeHfCf)_}TRMRyo%Uw`rUwB+zL;+^(UrD0ynW=S>LQey zGMryC_6cF`&jL(-Wfpjhj9#53ZX=_vV2yCS(P-i8yq{ZbmCO15dY{=v-ZWe6(cj%l zv7N2P{>0PP$e|)$T`a;@$r%24*_*OY8j*m4;})YM{;rU+zx{nE)W^^k5{nqV3{G8+ z8(lZDfXlv}CZ#v)7jK5M^)IU*9jyFIQ)@#9ZY zNvePwf{xHR<3WRPZlaKO)Md|BH77=kalFO`zwWR5OyQ4vo1iKlYkrvY~=+)0%7Kh{M%rCi5KaQ+ZtQ4^-3pzRzFk#cv#}VjZqJ zzUE%^7=%Hnu@h3AboQFc3IZSu{j9GFP}*pU-7#pfLZMV)X`H!IR_#>;_=`elx?Vu$ z)yBSq@^{MgzhNvOP+!X@)Dw$ReKT)8#c+6vY{akgseXv`lIiJaZ2*AwJ>f}HELpKhMo_f>)hpQ=<41(Xbm-?T zB0n$EJ}E}}=RX*S?{~=;51dUjabJF!&mbZ6Y@r91zx)i5E>zWw%C8(x1ktmM`2qT+HJu!XgSJQbe)pO z1g-wp$ny6u`FGwY&2#NrH?)lf-!QS{vV!h$^F6Ya10Ft1sbFBM;f>!hcr@d)1_eE# zyJ<2RwEY|zKLsEUlvS3W!Ek%DaffF!N-HE>Q)*A3g2bS(W18E^M!x7yN{$B9*M2V2BrX7$QCopr4U=E&g;P zAcQ;E4NDgDHK|!VjW|598U3$4>&Mi3B5Q<&&5s&@Of{cl#oSE?`xQo3S zafwR9yjg+jI^k()YClhh>YR$2X<8*${Iy^4vCcEHp=>`cRw7Dq=(72%p%OpmrNt0aSUFiVy+NPx=-m)0kcSMlT7P}kc6dND0SWtW!{pjYF9D{78r%)wIk9JTs7GlL zlme{b^b5HK$8mZ>2JEB;Tdh;NAx|U^Dam4oc~_JyMJ*qUskeG z?1_QN#>F~i*IKe`OU-5)hKu3s}KZ<%ro z3<%yIN1c!7s+wa@XtNq@APdus?N}j)eAF#a!+fCb73zF^*b}X^dZMgBJM>0F*~Dnn zST7oWhGlp%Xe1koZ|&}UjXFsgz1u!=&gj6A7l()#>rm7Gtqk5G<|mO^KFY)%vequG z@D=Q}fL!yiz{!>!q_auUUXVf+#YE5z}{@Nw4A8F&nY6# zJq{F=u)%Y{iVP8IFMuTBTEz7a`=?eI7bZp#Wwe$6|~5Q2R`TnR?Netb(x?&E#Lwuf)G%JZRE_c7`cjEPJi% zbX)F`g9<(pj#V5fxQeyCTc=L+22=IUyx-7d5Y-lT!~erUkPtvs680bb+=1sG3U1Ct z=dL#;RiZEY*(vtdDn<9n-?)B@4FeU2XWn!%ZNDPPRXx$6a;fzGe2h(MFz?^nEdZUmDop#ylx4V&y zB`J|LTioM0`s97oR^R86fE4uonY4(WcV2#6Ecytxs9;L>`{;op@<|G9E-Iof*Tn5` ziGV5t`3x$x#%GS+687LLgI}HBe&}YMDLhRS%NkU_@7oE=N1uKlN++dnEEaWrv-*az zMmKRvV^1YkRhnOiF7^GHVNTGe2c`Z8^d4SYOwNb-6>Zv|)B)ll`TeK9^qr@SG^jc| z$LmX_1X5TN+Xo>9=jbRvp`ylj(YC(M6=m=Sf`(9LniM-*?GP12dc7!UW0TW8v#>7# zCXVEo(^Zo&y&?)V-?RWziUK4M;D?k*aF$=K0^cCfJZLFJ)V~;9a4DMa`DpZgZtd5F zz&d4x<9d0`sZCWJ@Ez)_1tSc8<3(|8a}@G8MbFGOagL>uKTuBtRH zOUeR}nEecK1gpSrEIeAwmQheNv5@9I`T8jN<;EoiP2xom-`j33Y~_o^age&)xG0MX zsh;7rO~YPcE6YBPmo1KaWN^O%NCaYugps1Qzi9u{3&7VnMJWS*-9TSv9*Sve&QrLR zS%fq?)8n?&5Sj;0SnNcTvN3}fH>eQtW2Or+R+2PI)pMd8ZYgIW<`>GXI1n+c&1M4T z!FZ)8F1IdX%#?dJTA7v(`f_PXo(Yv*|U(IvJ_o1Ur6nAXG-*uZRF_wn5 zly(|PiIPB+^%N4oyN!uY^0?>f#)_SyWm%C9bZ-Oq9iiZ0#nD#U6sb`4r za?43t@6+CO=Xs@pWB!UC;iM#vx&E;0%i}*pJr6!e?C_{GnP*2q(XKV~bD8`y8A|aC z8=zO7v@OLG#vT)?*WLZmY^L%9*Yjx?{%e;381e<#ce{&jOf4>2&k|9p`%KOeaSnqA zbA&o-tza6CU`J~?=AC2)KToUFW+hR8lrcts)b48%ZtRWqo+@_miXo7dwa)B;i#Fh2 znCjEd%r^!PWZ(giuE_d^y59bhc^z8HB29lE*+{oK7UEg3@VNczzjL%=d9%arr^ReZmO&E?1fxiv6H--SMBMd7vMf5&|O6f5vB z-DFcxZ-w6(5F||GIiS8MKSqjU!bTDPgWA(>%bcR@tyNGgQ57YrBJ^$M2^@MxBd*&d zeBm!mX?2}dzzn*{y1g3nYjYvQ7ZF@CwGtC`6i zwFkV>#klLm{Q>Q!`1Jq{objXO)kv%2Eb|;g5s%1|BUCZ+9nK+(@NBsvW4?IFTlg~> zfxA)KtXjx)M=D1a+*w~fYhY_>Zj&uZ!`P}9e6+XEfx*ClTXW*ukYiw30$PrHE*_r`0i@UTgB&4Gb_EoCqml|6!c3ERaE z+(Y-LMIJSN}%4DMeh0V6*bfBaN6UxZBQW=PeqD0DH{vjZfqELZ8fbi^&?+TKXn zut&l>>A5G1VsR}ps8#y10$n%kY&9<~m-rYvsefbcC9ef*b6$_jDV?NV@xo zA*}c@VA4CL%mb6uCI&e=JTo)%DNh=TaZm{kITh z297SU6@KwW4?9!}PO%2eOq{+O2|UaB#!j7_DKX;t;k12uuHp>Jgx)f&sP@7+A+@n>BXOByy&-O{kVhILhb!N2JKc=BR-M)O;amH$m|Xg7$|)>97-yF~1hV zubyEqmKya9t~-}NHoAZs>8g7(!8RwhWfk%&&z3z>*Ak=q`6E^+1F7Qv3_6&y(-meS z<1LZ<(eEEki?W0{j~q5dyG;vzT;iQuux>@9wj-|+iTbG@?FP~@7e%J0N+KZZ`X-s7@ zRPPBOD0eC|daiYd?qfjR4FIpWPvQ}#@Wi2PajH1SqipXjY1BcIWA^~hQM7q;Nv^ix z4CC$UcOoI?7dx55+NVLdM7_y;wGr_w4b1I^7icZ!eGmqJdNPq)>Zq{$ z$KOZf|}3&3VBhO zb-|eCtafLRZ?kdym;nr;cJ{V`JQit4k#umDwPF_Bjrn8|<{cN3XMlw84ne!*8!PTP zVLVlDGsLqy~ z7Bfjkf2zgdD0a>G;)y)8o$-COQS>YBN6PD|MDgU&G&WqoMGD>>{&lK#5=jO12x)cI zJGhf%B=YIm*G(_rkE4<`9oPEu%l6ZAP|(q)5ZGl)97%#RB&htjWp14GCH!VN$UQo? zW?RbyN#Djoxohw{4Se8F-w)brlzhup-WmiDzeE{dkjY02^REk0r)8W#uGl-+0ADOZ z$C$)%oGI=odf6vz@X}_Utc*iFN>=N9B-{AE1>Ca2F9Ej&huh~r#`S&-zFU-E2W`IA z^jD(G4MK}8S0_E``FbQ(z}EWJ0n+B$5&5nfP+SyDaRlpA8oXp)T6wla`*zhs=5UPo zYPEZOFyO?pke(=jkl#Bt=xjxtc<|oz)auMLhIKtH$e}4;(z(It1~ovwIEI;esJ_d&GB04q3mMX|7T5L~-S% z1;UvVKFypV2B47%VPeu8+&%p9QFTF%s6U3{m?kSKzwW*KSJN8wOA>i3Hj;kJ>TK_Z zrzV7LqPydyq|&Tuw9{i^giwl0mqLXN@C9a-_ZTtTXQNvNGf=h-7bYtOFlL)X+|Jv@ zGOd=9CM!kM%kQB>68UsU3V)hD4oA_4G7yG{=ss3@=W~TTgHyS-oM}qUl#&W?k7|59 zxmt#8HJrxc*V+5T?e&f1)E_P7w+4-q6B;Ue8X|dKZO|FB@BHH9XGNhD5GFXzefRRN zD}ga`EC4ps?6{qcT7R;!P(5M0u2QQo?AHMRE0@nB*)MrqwRxic7A4$IPh5bi+el5@ zb;I<5w3av?x;4{?Zr2priJd6^Y&-9nwp1=0OH>uyj2LnW-%I($H35GX(#`3lrCf8c zZ&Fb70e_0L6mQPsbt{L}_nVvFOBmAfzZS3>zvTI~5Ut_t7nHSq7%V@S3ni#DE9$q( z!9-LR8?;rlC8bhw?)IA*F?RV`Cdnx+NM-5xsdb4>0({x17@>ESlL&t%i`N>D>Yjwq zcyi4#v(r-cWDfDLqley+<&JF@MP5NfmTz+yX(5pJ+}WlklJK!6k<4qNpRw4@ia2=^ z=Yq-Bn}v7yL|%3&I{ADiCk%K@VR-sf>#W#SSuF^@u{{*+cM3Y9iN$&Kpa90lnr72! z4=)4uT21CYlKB<*=U4k}ILd|nV4q8D6IrgX#|_WJxmJ=lk!1#QQML-!lw~*EzSo6# z{qa*shyk;z(UvRYxqpDe+CL9B7=zdJ~i0Jt~1hZD%@$qW>wn*(mq^eB|;Bk>aDm6P4Rx=+8>Rv+8O^*(%l?GZvg!~&yF~)THW!jLpX?p zt8y$bQ|MO(+mG_)`Y5TlSsW7Xu`&>Tf57K$Jfi8oY!%SU7rH)1WvbtdiQ1z$+S7VR zhkXQQeFPkoZKDIeSACk$qCvz>wa7GFV=3rnqklPnmk9iT@;q>>gghWkJ+`8NcuCWb zh9UFfRD3fs_?-M6>SF2{P9R-o+6=rR^^~|UL4bu1-0Y8V#@?>F3$;CyxXxfs%$vFt zXMTz}{bS+NY5?#4#qN}FA*29AtP%H9A%8OXg+S?EEeOkA$`xA=A&}+737e0gzV>Hm zbX1R`kqOROXusC!0Ee5(H`=k%9BQi{vvrqsG23DXd&KH3h*T&~rs8@hR$fBw#}5bW zX)DWyu{&&`aL8QGvcpQSyDvK(k|F8571Dv_yeQ1q_-1-|Iw;7x-f<54LMOCy!HSvO z1FoxVafTlwQLIlp3NVmf?8-cBz4b>UjoR)6wt*tG|ki9&r0ke^E^Fq+2iUXr^swzOk#Q5 zeYxRqa!e-A<4jxFu*A9emU#&vp1=X<9)86#<@u;+-nZe4(=O(RH5DW&UKaBlv{`@_ zL~7Vk!3ZVv6-6r+q9T5m zS6@o79kVkdx=dE)(DH*QUi1~^ij#GtO#G@(85X1Q?b&U9nk?wvRJK#DZJdTiV-aeL zYNt2o`6xcR5n#?IHF#r=fPdB z6Tsx)7d2b%E+B7{B`l*RwLWhN>)@`>Ht`~tdTx%^;rAY^JJ@|5GWoJC8F|#!PEe?J z=&~+FpfRgMnDL44o~Bfx-$GE3NN&jMmDyj}iMId8 zLH!T-??1X<-y4B7QTQ{TqxK*e@<)ao1wtqK(mQ|7fv+T6BL%9c;RxWUAdOP?@7b;y z`p(5?6e&#m^k8ZIe!mvVnlJQ!kh^Y-m*jpG8D4bWtvO1ZMxZ#66YbMfUT>G`2J#&U zf2}bpcN>wttu6{oxeTz-%9uxSi~iJ@Lb9&pjncfa9?lvlr5B#hZ{&JllyGNB`&YfN ze@Gx+^}iiK*!o^7EK$jK%-sv3iCF9?mw&XSZ+X7J-f}b+IxX3IPlY*sbcx)JR%v#; zGHz1!3Gs}o?IGqRi8!f73j%PgS2cGhBdCIgkz5tksMoRe5Qe(6<_@^&rOM9O^@buYoz zQoIre21zw&YT~#cg8zrMw}7g$``$&BkWxC7?hXa%?r!OBk?sa5>F(}E zknT_#q)WQHa}yi(eNn&vx##~o-??|3JFa67hI{OK-?i49Yt3gqb3WeNy~;EQ$|SOa z%c$ItPSh!)HXVuHtnqvq!p+gt7dU~fZ4XlfG+HMFvkDBqls?=Vdd+mIe&Z+Iv>fig zN<#`Cst0FkLzF6C#g$mz}*pjK!-UPN?fI$9g=Lru9y@Xz9#Dj7C$5C zV)f9(9I!$`|AL~2AhH@}8$XzZ00Cc%E~?z6P0+vjgd~|xej6mj$mc=_?~>Ub>sAu} z+c!=&r1t@|s+=}z!8ci?{T>`jaa44VAHXPyvAGhqRQQR)pv(AB05 zfu#LK(l&wVZWBuNl}a^iqEmiNId^qPv~EaP!+x0CdgSI5S>ip5yEC%{X|uZBW9{4& zNy^WmsdeB1zeLV)H`jfs^=MTGozMXVItY>##apj3Nt zS;eAlm(7eseJD}(>y@j1O97OXtZJg-s%ZF)uGh3G4hi+2#6HIv<&Rb3Lg;FVa#9ZK zZk}hZmCsU_Gsy&al1t}d#*TJWvoEQ&Xc{n6Dv{>Wu(`&5gaIgmR7z9o0uKqJR-Sgk zV*ge$c2vMmnRRYnEdAdZqESfzK{=E|LSPOF-UnI3`R8Ae8yXB%M||x`H&t^<%6BUz zD=Ll>@x<-e;rDjMzu7Mfr26eaC!uogupWCO4g{;0mDcfd?@J+dm}Z3v@%XY-h6AN=&Z6)ui->hshw)3c1%F+4sfRX%A||z7Fr@a zSJ{ablv7!($6F3Rgwh>|dW7m`*DnsPhjGs5SujA^0O34G#C+N;xyk{Dru`mkgb!hR zdteQu|IPA#?LpxK!#g!^(wEav%EUcioD*jx4K46OU{62;6%J6;S860CsarNxG za(fa-x&lC>14+;u1L6EiIwz)VKkAv)%-#BFMv?kLGt}T_KWbimF)GseJF!04KYS%( z>?gMSL35ujz#mEtYg6FW!v-&P3onuBxJ|?5&B0@!DF+>K2DLr+4aOWNOE6~2++@y9 zr_;n5C^(F*q%gHgl6U5u$YZNsD9M$Z@OAQbdHZqp&6LB)7tU9CVkbJ(*j*1a*gdkq zkw?l)T7IST#y{!wxWiee8x+P15iKaHLSFGe-htK=01xX@y3flPRk>O_vQ7hvM)IdH zR~I6cT#zk=wsIa9=|lpRx?rw(&-j<%FMpv}OrUba4fmzwEGEIj65MoKIH{;|_bzM; zqru>|L-MZy=!n40uS{+k9Z{JMDA@`lm%rZg?0bpoy)nARQ?~IXEc>9?Uc{!RCRu&m z=u~98tCSs|od8YF)c|lT6Spn~bi&#y^nbiS+xj5#!o&Zp<~__$81u3Ur}{UE`M5 z9mpkDW~a`;>(WY$KkrZ?3h~brx7Pm>{F4y#PIc~;1E7|slwL{?si=B8@_G|&(2 zU}AZi`6x_3Ppwyi%Yc8b&};Cv*t+!sqVZ2sgW#I_`?VRPgk8D&kF6@*7#Ly=3=Dlj z_tXy#t6YzoK4M~WMWDTMhSQehAwlU#V(I1&pX|~jIi0jfhVJB)L@NrtQU@>zQ@37P z;o&wYjU+ph_3Y#vRMHucU@L29&Z=UoWYPZ&(f>y#w$|{-Oc*Z_rzt?Zm8#|l z*$S1A+wlV8Zvmv|)P}cDRSZQ{VLzo~z7|2;6vRqxe&y=NyVtIGf{C%|!l+Ma0*enE z>di1|^tdap?_)ZSd?8zA>KzTx)cb8gL*djhCv_KeCu;OO5d%io{_}{CzSdSfY_dU`NmM-3RrWOM1SL;`R90Gz!sKzT@4i4tquHj9t9GRFj>zrl-0< z77**Gxp{yqlgFK*GQuM112Y#xXYtY)tl;Pm+x=ZHOU^OaT4Di~9^*)UgpgLHD$eZP z3vx&jW0G*qU}Zg=^8zVOm8X-yVKyujz%H0}k{FSe8PzF0-M-qfCxOu+YtGykaC6jI zxx|Fm#8hl_#Rku>2qPZFC&gAdi0??RSz;I%n14Y6y%?w-j3cGaZ3XQS+7o3!7(OwG zSH)^nxr=i*F*Os>_lt`MM~RCA2;46Gnccr}```MyN_V~%Y+nm~;GiO}>kJ&o~4a-9LA0OP$@4klPX7MF7 zg;Qq%6b=qK@;?5TUeGnhl56@Fut#hw-Z#M>OkINZJfcmre1v^V8~cAfIw2}hGf-q5 zi%)bzDzm5@D>K0OK141Z5;hRvRxb| zglZ|^ONa0aE$Lsu!B?pRub0XDiyEprV+Mwe9WQ5XRwVfbdR)+iL*~hwl3imlD^I}8 zUlMYhsqgrCVf7782AStOa$0-w^FH3P2flx#JcpJDoniO+fRkY}1;HWx+{g)_yG$5MMpNmzCx?h?RmDhj7q)LZRVT$jjPgc^RQiAv4d&KLms6p(#D-I~ zM>#S{#r8OcLTsG2JLZQN z@(#Cpw@|wqYvna~2y5QuH5;wJ10~?>@|h3{phvieinrW*k6C;mXnwP2#jwgLH7|e(3S`6rlb%kMEnL&bY2qa5m<5IuvgfRTnWQ3 zwmhJ-N{xy~xXUG1HIAd~3CrMly#Zs#i5;;wH+tNr7teN2?}Dph6qTcG7E2W@KMXj5 z#aFdPmr5zN2VqhDdzA4$p80w8P9bD60BJ05M4y{y>d}xjzkhjnYSz4;q7gQs9*&^A46czvzZ zh>G3v>tsS6mOhb2xK1aa|9DVW>-dM!^7gsclae%1-5vQOr!4_A9+og@9yZU>e^WKK zJwld{QDbbCUvOEWt#>AA?7Ynj^n5zV=6l#In)-YtE>N#=XD$3T(Zv&I6X6$Pmc{Fq zYs^6$+vu20Rvar&_UuK4a@3o|(P;I(PM7j-b)dO(8mRcF3YE!kdG*AJL7~(#^5mDa z_2366j6mZ@ArY*s_m$qYt1M|38NL5(8k) zyUg3d)UU@(CE&BRU1>&?|3rA@6b-KnqVsl9-j&dLYq0s3v;M~tG={(2PyX* zubp+NeXErs*`X11)Rw0Lgb<*=xZueGSWBLwo!Gxy^oLv{Z~gZ+%qP4XO=EmTIe;lJN45C*s&O@SMRKc3?4b7&a#X!d78DDR$3Vjbb> z?E@mVf9lw{03JsrG~x+!2=F1wwe_F>n{X#YKtdw;aHQj8xwpu=$6YTrzc$`!er~SE zeMy_-pC2d0cI`z4-I2r@e7^_ZTN6q$Hu*cfBqjEy&-|Ond*3QzB2ah}?7y!E z4pLO~bM%GT|;9A!L|f6b#3nA)gHmaYfG-*f8=Xf4Cu-f++@Qa!yv-~;fh;#=QO zet-PzX`!B(v!R>n0Sw6Gi{7?>V?aK`Kn1>cXY!v`2%R7@;CDer!wkO-Az9!DRVN^i znfe46u zpKj+J7i0gQnF@&pt~kVY!mS?|R&oyDW~drzHjsfu6&(y%Usf(f8} z@IUK~*E4}{u^Ua+`e!(^fyvP&IlDSY0}-8VKfvWnnZJ;(+0-)&Udt4086mDKz5p`K zM(WTwf2x;!i9~_&b={RVviIw}%RYAj?-OZac+8dN9{d}!^zWzPKI(E%Ntx6@6or|8 zSu420#0EnqikR7xN8|nPZFqY}EKqMu8;ZTwVo=#mM)l;G}4T2 z^rpj0SCVBQk9e$VvE+&?2ku2WV%$-t1oLjJQRYd?=eUD;I@{vyME5N?K+w?>U8Qp6 zY&6s?&Ea%O_-V#P?AUYft=WV@zNA^8U>@xOgD_lh;15BE1^pZdGM^82WQh?zkAQYzT4H5v3;()LBW2t5rX=%3I!YrLN1i9kUfo4eSqA0_ zLiecuPklD@ri%_69avs_H0X)1`YZhUm1=alOFKy7CRerskQW|ieqQ7?0JYoWEYYV6 zW`NTDC`r^m%j+M+{lBpIkSDTDi1WVN>R)q$hVM=!=I;S9$A)xtUo-98L5rcW%SK+d ze>O3*&eqewd3j@_{Xh53=q+B;-rzN066uO-JLoOu!TD<;MB-0#Iosy%uI1B&!pMWz zd%nG;Fz_x8zY~n{_}XWclyZue(fDkU-&(<4(;MrS5Vp`dM*IZDg7LkZ%H52|W4h7o z+r2RpnTm}s+ky7`<{2)(y_}(3r@A2D`Vrg|ZOyY${~uK!Wsw?_qS{8uOOx_dDJY~f zG1=I*sdX6PfGRB5*&n4IzV@D=8H?9rDpx6G|Hq9jVt#V9a6U$PsaKuWYELONTgpu_ zwxGIY99AuXaDa9JJKZsS9QLco&V+lRr{7$tQ(R~Z=|57Fdr-BRyhm9)_le}%!p>2O zXiaXYF4-nojx>Vu;v-)`W1aof!c;c@@tYHr$L45g!8GOORv#8L$6L0+u5JhS^X55}0SI-B6F zWy*v*3gsM3#Qu|Q4*#nA;W$e20^n~3;3oavBfC+c0JcK&F~QWuvX=bne2oH47oFnd zMhk$LpB^wQP?5$^es8Z}odVcq%NIa&LA1jR@8FY32JzVco%qM6lTHLD06(9tFFq|4->e8A?cYkX5pm+z z+Fk{m*rHyZ0Ig1l+n?}+4m)1>S9DB*%iz5#b3HS1*+L#FGH0DT9s4wIGYo5Q#b6M4$!R&tq_e4Yv-$jD_T>X zdit#PFD}g10I01pKBn%z(fO7$oUj%rY_>s!CJNtnn0YxHUM8l6#t>{wGBs@J{I`G&g^6 z`hKW`W4AWqAkumt?8iA_AcDlcIbc~fg13OD+N1S@Cn(J|zf{LkZTRZXS>qYT+tklh ze&8)Cl4Ls4()+O81`@QB=xMg!{U9cVIN$v^;E7y85>i@=j~}$Iv~Kayz!CAh2>_C| z1_p`vlLv_bbxkREHyKF=H%2FJ$szq0EC)#e5pIZA?~K^^LTo?bF)k-SfE;aK$GR6b z9^G6eG9UuebmRSYZIzgATAi1lW)e#YBNDmUxYMRIwGsTnEcwhDvi*kmRq=Db_?j<^**9p(}qt&W$F8IFtNmRreuRt0+_~$ru7XGxB zuiK1r8&PyT&y{!)oAt)L{FN->&!0->Myd`@J@CVe4A3NOOd@&K`6k9#x%W9tdL&LR z8gJKGDW@5q!`VgUU!Gl9u@jTTnRB5mjE>^^nn=;rm5>zX*% zou0c0UiN55f1|hJ@}+*=oPG+9!VjFq8oGAXg7WJKg8E#rGWuN|MJmon2;s4u1ZD1w z!|z?TJN_xX1FFZf9Xp`J22Z}kD&4v*E$I(N*&UxzJ|<$WFO_pkAm#TsISo?weR*SL zyhZJ_8>jkE$tUa|8fHT761$UtiRZ^NT_EypgsdAv5Y#fbe|05lzA$!BF|1Z)J}mm7 zGzQM9x%hE7FU7}p^hn7#>wO2GUsgwq;u4L$_h}gWIRg3K(N~4`P%^G7`oP+~4veos?rO62`=kKda< z+pBd9ax4g=3u7{xJW8ykL4kp~k#cz__He9&AX%A?%-?$Zh^ zJ+BwEIlXdjvM^UGamxqpXwN zD?r^ql3zib4R`*v)Rekd#K8FYe{YU}g-%+4?_Bb>BZhk!AQZgFwZ_Ge3<6ps1eD*X zO8Kmx>@>1}z#@qSDu6H~AM;L#$-CpGBpyaAtwMyCEWY!&EqX$t3AX<>#z8fDI!Vog zB7w#+YnJ+NKa!|5Mt=u6mmu#zjuYnHWRIr&$84u`t5GV+R=QZ@2&%(+EL=pnYm+0$ z!CkzckOUv05;K~72b>niwCO+PIqqI#)M~k?TN9nAh%<~R>#3!;1s1I9%7QGV+^DQE z!xofQEmz#nmzP|(p0|O(%$&@fV=iBOw{*s5yF>v38t8d5|3us92q`Q$kX$}&rS}6< zC~{9nqDl#W`tMDz7xjr6!`WcGZ2km$wd33qr=UvIDgP>yi0@}nIo7SKV~}bTf8Zq} zxUsn5E7x|!wKl0X+Dc?oEFGizs>^`^)HOv4sXa6G5;W4KaQj`ktJhJBCiCOJjv%jeSWc-nC;gLp9KQ1IQ9L{qCE zG=*GV@oWz?-Mv|;pPxT)v7ZNUYyBSV7ZF{NEwmisza}HSCqg+1l0tLeiK(#dFT87L zvd^3^s1iXVnd!hWe7G7KB{wQ9v&K-PzR0N+G-040-sF&|CFE&F1OlZJ{6xdYp9t;t zmYClmas3WL!AB=O9UHSlM{P2Pv+AgV90Zo())OMW}%T)PfRq)sp=wUB#gFjfIt`rgX1+I>&X}`bK>F? zJwjj0(LL5Ajl?<;+(Qb0@O^tmpepYzH<@G|6jPe^M}p2rUiPt?wj%9p0VHup3Y}SY zhEO2$B zNW82FM});+4*-CDJHG_Fza)E%r*j&=`vbNL_A#0es6&0Jw3Ta}>)|;#>!QKf_{LPI z(cvjT>LzM=xbwN~Dw{}{jUiHSnd4i;Lfe2-q5iCEl0I&BAo)bt)sf!nIvos5C>QHc z%6BdrNHg|EeIoQJ3WIPdZ0v}M_Mde;HaywwuiF7Z&xsocSa0xx; zo(3UqBZOq4Bb?{MTsyjmxq!ex^D8&9z{M4X{wwkuCLFXwI!@DAx7)vYoo*(mLd0{a zlNKwkFT9VwRkZ!19QIg~$a;d@XenOl!Mzpa%`3;4@`8H|#|`&3e2I#ycsTvOd+W<0 zdp_LqLq(Ni*U?geV{MM!^CH7Ny7n=%TfPzc6xdyg6PTM@$Y;VUyeq#>UIMj`I8_Sz z+QaMAl-MhZ^WI0#-K{u%g;FN<#rsO_iT?B>=neqRYT$8ym$8CdO`5YvbhI9)0`|2N z#iBzYw~yPgz0~R->TD%gP8MmzzbT3c7JXb+#$lV{fzg@x^icxl&T*c+;MY54iyVyw z(tYMo;;cn_8hQpNr0FF%*J272oh#)J2PAC`Mh9PpK8}??qx@MybugQtw)71RFC(?$ zx#(49RU_J`DF>yb*ULUh_>h1(rzf^2jWz~5`euR%sa={oT|w(t5sxCH&}i8WtCeZl znKYGn4~o|@>T)6KZYP&Xx%>!_LQ3?O5-+I;S|+hMsiw1q;6aYY6UsZs#bAG&yH2)< zK%NTt`O7b)vvKsYVXr^_+{P{~%Kl#zs0aRf6`jsgdxFe~qT`;Bh_+-xM)|1FeeOAO zon3qu7;5sp!%&8NaWu;t6R2kq;{JZjj`mJBrVOiM5bT4x!IWrGZ8;}R&qGX-fgbX> zz{5mVWTe^jO@Kx`?B#B1HX99%pEE96#$#vTvXZe6K=!X2qK|+0^{pWf^M;0rmmo*< zE0zQ!cKRXj90AarbKH82V|Ve8GIt0vv7i$Q0(imW)id_mIAI9b73Pdt%%`N(+Vs8Z zpMx7ts-|jKrQD5^w)Xlezs_z%2Y;$$cq);Z#)34Uou4z5HVt=ik2CFin8H2CJB7(b z7twZH0X##H2h;QwkB1w^hp!3pa|Bx|P3;6d>*1K|_jnl>qp;G=iRWB%U6U<&cYE-K zBRcV449{Kxz5<>lMzzP!lQhGXknLZ6#fv9j@l-YPtGkeSzjQmf&m)r+sIsv!^3B6Y z{iy1n!VP|F(I*;it%7bvTWsu0{l;+Z>D!;hl>I?LeQhXFGl)AxL_QKRkk+{XUGm6E zcX8&f2JEHaY=VpTF`F{5bEjQ~BZq|yD}k^kk(G&#sIOv^vj9ZJ*rQZLILgGqNkH-`&)uV zp-uvJ<;M3XA8>SZTlg=5a^U+9+lsmKlaG5s_UVkq;<5OW&YORe4QEjTfSgpqJtjkG zDk*9AK{IG ztd{+1^7sVKY3Uc@r&Sgw^%j$(J2L6HhIhi{z$19mQGhOrpx*&x?kVd{(i$3}Ya)!& zJg1^{sd4H`apc8bCWe_DqbQ}oi>MoZo{2E+2N> z_bjQJGsqJc<~4rIR)W;zIjK`@a7nwHM3@^QzDk`J-DUEV4S4e5UlhQJ#sHdpI^h}H z7Ryr8HHg?sks>bc^1{70-ZsDIk}!y4%S`! zhFJ^~2rlEzRwRzs41K^5^%+UE{_fpZ?tD;ob^jG8|DDf{Lpzp=_Nytf5l|cIXF$8Z z_fjM3@nrcvSyfZnm$rbp*(S;2o+v8F=N`%J@lUV+KMmdg>#w5UXKue%vJxHc69z%= z#R=YBZaZi`tF0h8R>S}#HN^8ZzYBSEn}2!%Bn19xA@3^DnLrE~Uwj6x+d>tSxV*SS zf3u`vPtkwihRX?J4aug1=!R*R#2K3Q;Y!deE_S89k8KfioQ-F*gTk<@ZQKR(ordg0>#?o<2|vec-XM-;ef3(xU+-F4oUbqO^F?NxQJBi$j$|&jy8+;p%CuKQsdN$foiliw-6^|Hs5%Yw2B-4cj-DQIspgdn}< z1BRLjCH7fecWPy}E6|uWo(2?RJ%$YEecA}+blU=xAiBP~35YprJpyiuRUV}ME%zzk zQ-~|CN`6+-bsxD`rnH-oKDFe3i)KsFuf=TpQ8A_KL)DUGVr}Ys4bSfIH(kMJw2`yc z$wVtr-gks%2;FW<>jqDA6JLQixVF->dLZ!hb_2^Z?-eKTAR*VWX48gqY+kq9jq@R11t~l zu}15bJzNM4x2_4tK3*6|BIWa*IA$P19!^pRpk>P(WhFt=IvjyEcJ^g}J9M zSysn9C`Uh!;%tEQz{Ad|jv)Y9OpIkNXKSd(a{Npf55=P3^V)`HwwO)|2V_|r%8fO>rx26x(325^wAvu9R}VmGgE0 zMa{33%E(f$!kUQFbwI{*tREc4-WrlWuo3hV{jiPO4_u?&!ey=>E+aHH@SaOUkZSVd zLOO+6#|jtc`s~TqH*E!ps{nLsCk%4LzjtZic9iGb52`12eJGe7Xj zbGXx6zhTTxVgEj(Crdx|Ll7Ys)K%1O_CldeujH~vh7be61n=-OX; z-D(K;g^f1CiP_Hb`WS;!=zd!nXZ(6}%eBFt1f$I>x!jOhHn%V7XZn145kC*w=F-DY z)Kp@<%NBF=txUgGt7naZfH(vt6q;moL;j`C!pQ1YFNW>THS{Hc_NYiq!-=*!tXl$D zxj+skf+vecy1Ixo!h&D~M)Uqo$13W^nzfzi+Yf95Es6`TmQka%FYK1g-a&;v`dz6; z=p|e=AwRRkN!5ymqq`+WY4_Y|`z_KwV9z-Cyq?6b!-Ywz61KRg4Ouw5ZE=uI?5lPk zB#QxWP{F6Ce^snwHLtaj#eL3iQW{`-4=~D#RSh}Lv1EewhMJSTW}>3~z$F;73T>OV znAEX^$~Z3d)pNRKnD^r{w7=XBp|zIk{jDuIFowRmPaQr6EgHw+R^z*=EyG1>tMo^oh$YRG01T1Ov-}alhIS4=y(YKMy+W z?P)e{+h9I1QCn|X+rQ{?P>Zf=z}!ApW;59I%XoeMN%TDphW7fDD4R(U=q6_wc$Hi*MGj}zn(Ahy4Nt_&nb3LheklImh zYgu%%%NXXO#|`{N^OTt2US5re;PRU{$G;_kg5`;G5zeo>tp^Zknl*>0E}+AW3CHr# zl*ogFz7{d1VSPx&y8a#uU8Kk^ z6JwKr_X1e<0)orALu!C)KP6q2if$lZUe=oy394AW=QCU(5}(+gxV=A9?HdRtX_(9E ztrJPm6tuSk*1mLSLnZfSeO`UQ&yD?uo(p!xf}1FbSaQA(gFa2;t{+NLkUGF~^Xa7% zPIKTD61#~T^|n0mZ~X`SuXF>6-~wf%`5pF~W5$Nm27F^M78*Yl9^IVfsePM+1h}p2 zblnyc@2FL#!O@0}W;QG5Ue1b_tNV_&xXU8VleY4^-@F#Be);x6y`crikZ&LlHV|`@ zRm!!h96AErZY^J_ABW*c8c->cD<4DpT=it%+#-nzZ=~D)YeT>&vGPiD+sUu_W9vB& zmyE6`Xf?wS?Whc0rU;b#DR`A=8%2wk&epvCfsZ!C7T1_JCcC&V?@gP8b3Auvhh(PC znL56Pu~o*um9>8|wpO_Sg_1?|?W6k*iD;CA*9%E4-qVVXmXB1-rgC3y5AXbo^TZ{PJN0WVHRH8)~xQB`ewn3-JS@q^k~V>!HQ^SF+sHyhBX}jIJr+5 zyEadqZNf$@L~&>2M&B$t;`u_zGLz>upncOUr*Ew5Pz`q7tHeV84x(=kPKVrLaOy?z z#s>#(KqJowxFeopzia<3D6~oTba?Wj+>(uZd1qM@EBCAPZiolJ$+L^iKpKSgVBKt0 zkPyQ_o9(i~mE$WansV_RjP18;dn#4Sdt9#^icZ+Z_nCAXfz14;h(T8SlqFJyzJa1YD1 z&7$U5_b|e7ATA;3-2`TAyFTyeXP`ro(Ht9aepLbwX+Gqyg^(69?uA1IPHO3}RvNr9 z;#KUc4Gh0JCNT5YN&AlIa@wmSs4MT4XrVR=DFq3E-$@<6>B7x+YsqasDA_#Jy;yxD zV*!p{g!!Y%m_rTiM`ynXxIN#zGyYj~2lJS*SykShy@AHAOV3gwfajpM&&g0mvwH%x z$d}Cgl$A$-xk;e7#wQPGHGu-KFaAxwN`x9}l(e6S%i)#N{m-{UU9+^ASTv%hC>+Gn z!$aP(z-wCLd;FHV=sFf~k4BMFpvf7cAFFC~+2$z%O_g0^f$B9oFMF@bLl9x1ATdK| z`iSR|#kNGhkAR&Qlz@+)y}m?W^9&8%0S;-x8$xr@O1b0KoL&e+D4BwzQn2HBLV{!k zW$BsxR`wr;621ptd?gq11kP#WJ_BsvYHyow)R$UvIzks`(y}g8b(Nv9DasyGP6=mp zq48NH=0YEaQ=T^|QG5@vIFEvOL~i^TZh1{&@972bi;p^NU$J_p+0^rwYr@zh4%yUS ziMMnl8ePRW6xyVDunH>t)b5M{gv1S4(NI{;A+bnd2cY8(JhTTCo#x1SGMv@Nygt*3 z&o+rul=hFFA=@6?y#)A2OM~g+g3J1=C{La;I39*HmUu;((z1kHPm=Tp&S*U1nNgn~ zV{kr5YiqJ6cw2>-PILzDKn(?-c z7d&WEhdify@6!yw;j{2rX{*KnT$^Nd@6?r2TiQpz$Eva~lFU&_R&6YeEFCSML5Wtq zBUdn&DfLyC(Hi5YFdhG9i0X1K1v7~?m{@XtP%Y<^oa2EavNB}(%QCMi9^>QvauxD| zE_(%c`2*NzeOq#?)k(S(cda=Y?HB@jlo3u^P`RNwq}7ho1bF=E@upx8f`jy zibju%4wn9}V$JHgxwK*LsbmQpYeQ}fl7w?!EqyAVpBdQK^qtxLUrG)Le@I$4{ z(3^RPq>p9KUfgVEs_S|=5eckYM#FOBpajtScR5~5tb&9=-oz*28!IRJW*2j>W}=+! zxJ8~o%w=+QvBNjX7w(IE=9^Afd#n*eP0t}AK!>}AiS4%PnUEKe6F|hJnTfU5u5!O# zy)VcsuJY@cb&J+a9I9!XaF7;$cI(k;w^Q_@1xMVEAUtIe+1mvA#Naq`X^9&}fqsFw zJBkQTE6V5NKA+to?Wem3<7165ws!Mtt7TjCBiwLdyN;hX9wT(f3$N{|U~i*EZkCk+ zi8o$~Uhsbb8vpG-XkUQk$G3uq5zVF{R1Pj|>SB{uug7_yf?a*=>f}!^l1qI?R7V|x z(pw=&2}qbbu`IZa4Bi>t>Uy3f@$*^eAQF_pJ^Ff#P08LkJ<8Dx;Xuk%#^TSG)zLO* z34TPY(d9~};G6388H9cN1TvR#Ey5n$8qzPL+XUK40T-D0TitF|6xCKd+J_kO>#N`W zs0Cz3H^~Hi5oD_7bdAvx@ba7oXzx25XdQKZ#Y;Tki zx6kF5huxZwVIfi-`C0_zVMMRpMiNQlBk(|kt8s2=Z+%uI7roAZ&K}|d32`8)P~p1j zFh~32Bq)W0Dz;hd`)DF*RUIBi17R9$i zTzqxtMiy?kNpFVUI*g5o9(v_Z?A&S#RRQB>HX6wor)sqJsBDUbLATyg(|YV|Uk`sG ztajY(8#ZfIR9vVoo!fOlOLbk={sbrSrQ&@yJ?Fulp;p+Ph`ZEKx$e{8@OofNY*R(y zH2IO@HOT%EF}B~q(g-A0zXW)jI&!Es?yNW7v7;E!Tbx|^1~j$wu}i8I)|fELi zW)oi(`nHlbtW4cHWOH35&ocpkEaVw(mFHcR zN&`BYQpb>f37pvFpyGJKREAcrZa+37H(ZQ?Iq!1jFBUO-&^Q`&@y&IFZ?_a)b;5q- zF&q6u>x{AJrP|s#NiDjwXc34sn-IHd`dp9h+X9?LVfvy&&f`55am6IoIs{O#^S|BE zq!axpavO-Z{PzCx&PMsfC7wP5)@Nj2|GMc;P;eQGgwO?$-fX24;`v8D_P==|a#RC^ zM)b`k^P326klpH~3b{Nc%G@Lc5-U_V_g0`dA%L|CCYb!iSEV`u^p{o{GN%RGTy~y5 zO#8zFJwynPIRgMY)izzZ(5B<4Z1r4GG$LD0h(bR5h|T9KZcLI#+K>+RDPfSM#|@rQ zRV)kPhjgZCs0Wl^T>A%cxZf70pk^x`3lpO3Y*)GdiR5jcLRc47t4TZQZ<3VqU*khG z`J6H8(zigy5ZRRX&f1xhsg6PbR8>Q@)s)6wYt{~!+Tz!s+uydN40Q~D?%|v%9G03F z(_!ll=I)3SpQtN33X!{2q2_OdKv^%vpM;Z+mj)k2yl*^F3de0n&VyK43 zL=E{pwz(M7Tg>MZ`GhC*(OW3`eR#{C@3YfUqwQCw*gJ~)k4?0BphR>Nu&BP=97%g& zx6aolB=~WMo4b&-zMfV7v6k|@qsBAF00Qh-xLX}UIVZy+sjvrnpieGc7Ke-Gm`UG1w}C%LYxbAN6&K`V4o6>*coIcb%=%2_=jl3eV=-g^ zlis@OjP>v-NhG(iYH4IN{O`J0eFb%lcEwQR z*rLB40PN&d8Rl^98zw!>%Ky={zZaW>N$==LK> zHF`0ifb_%6<^6!n=Nko8tFZwj^GP_pBPwRULSmvf;tT*M#Lu%E7p4ARg;AlOHsa4c z<$v$)G#rLBlGq0xPfpvhgXx$|rCC2wCfkcj^c}UVptW)>~84=J7!yqpBB73FUJx zuw}_bdrcjCD;h$}x4YBJnAaqpUtdPde|phl&0eCNaQJbt`rE>7TTF~!D?2q8(}qUH z(lDOf`cY(@-Japn{7)>iQ-Rl4?NZcpDuSls%%JGJXbC6QX!LJuZ{QKjb#hON2I?UC+L$o;UrJH96| zHH0^N^4(`xF`duu6#_;N>jq{>D&&JQfLaFzaPyD@u#~X=&wq74c0d#r#bNY4@z3ro z;swlD-s9__ULlr+-*&-_0P;OB@2YRBFG}${Pg?|{eAzpyf7wIy$9>@T8F~<*`MF+ zyr4pbzSGn0j=;kSL8owke5D@|lL_3u1b@Kc|1vA^3_(N>Dv=nn3jR+0ibQT+yU+(1 z+sn5#4o_o?x`yxz=^Q2MB<%Fa1p3c#O00^a2WPNWmSm3p#Nk9NPkS04Au9bAwXB#9 z20m|mcT>VYh^;G7T+k{_cjOTNX;eiVp-_nUfZJoY1+5UB?eAxqhlhYT5!GknW=411tmdh$XU<8t9zm1^v85C(h70}!a0i9d` zd~>EJ0IhCf{3P&P9YwtGFRJC^$-szy*Jz`0s_)V2-4l5fFRN> zQqs~5(%s!9Al+Tk&Cn?=-7s|LFbwg1MnC8L&N=V<{sq75;^LZX=9!si_Fil4weNM` zYXh*Z$w$CyIbZFZ>{A06y8ck?KXdjkv7j>vxXEuwzt;b`pgj_>Xg-3)zj2NX0U!8= z=11LKf2;rj?thQ`+Nt3d?acw)OIpmDClCCjYiykAnEzy0{J@12%RhdK{{3wViNIGD z^3@%@`ZH|a5a9g~0mXzr>&`MY5CaxO)Lz41bB+OShNUw+T~r}DDt%)_`)`@*|M*k% zhwo&-;7Yxq-~-AxaBG!(-oVhW)9OYFAr%lC^tZv}Gm@EXj>1DE6FQi^&UUSom{{1J z`9+~>wI0Rv%;x$`wmTB{Nm-bIc#J(ACIwP7wSv2b@Nz~yrsPIXhw)VZ7*YVypHKFI z|5kyw+t$lMFQ6>4>zUiB=Zi-qYPKhwM#lm^jy-nmEYgG!IX!0hBmlrY>e&z^t+({B zZ)}i=2l+B80o<(M;r2Jl@`~b{KJ!LgB!7na#dn2hApS~yT;cmUPpvx zXeoY#P!4^iAgPYMHl{eDVlN&de5>W2SykX{$fJvnNVk1iKhkae?cJeZy3vywtiHzEKuLvdoOg1rYpHNm-w@$Z}%PGt=Ve`5Wt6> zqOk4$qqly5d+TcEPsxA0HNm^t_g15NS|0)}A^5U@@0s)@FJ${of7D_FsYjw!k1Ch+ zYCP5&O_XzWI*)OA->>(5<8wSbP4R<4h&Fm_gK2I%{0k0q zCCEatP6EjU2favdn3qF6Yd7bpM?(vvISFlYD#Iv0o4#P&BfLmuV0ETn?+pzpYfbhq zO2Gkl`A5*ie5e5;?J|^#^&|MUC?zUWc#hvhhx+6dr|YHmMN0K64v>_c9qr~I^VT5KUisCy%0%Tryz?6?IQ0_ z97>XFYwqJ_U_8J_O0-_$IPP;O_WkV0@c!4l;|ptfO??pvv1u%C?zoDIriuFEGnM5R z>>ZgO0&yB(tC({AW_EpsdGBP8e>3_XO+BW;RCl>tv`z-9Ul^lpZx2kqIDL-BV0f1f z@dv`2Hp<^|{@4F83GnrU&)(wgpE&t}k`kWY6q>^kWdyTuwMP-;q$ftgxzefaHu*Ge zbp1i0LC@eWN!AJED93ZrKv)gOw*z9Pa}e-Fqi;V)M-gL!A?dB5H-aO=%k~3o`(?ai zCg8OgMr4pA8>!5SE6$uEO-GdzQJ}aoX10tpJ>;Q*Zh{K!_Wpi#T;HA5@O{4JJNlSg z4oS*~!MZwE@+!U|K1E7c`a@~Y#>0KrO;ie*$Amw3(lQaSdAR67Y9}HBNIXJp>WM-^ z0_qroeE(r7jY$DOuKOucJPMa=XM*3A{L~fig3+Fhtc5Z%z*O$C-+tSo-$Ei5t?2Dh z;1^H+O2&(>;s1L8>+81yl)5_=H7zr|?kKxD-Jvzq2D{@oq1U;++m=#qoO;1^Ez8Ri z;03%>52-h5p*YLT$(#R}zV`y6Ci2-6)r0oB>O{*;w9<_*#sRD($S8fkV64}RdnXv? zZSw|gq;{jvL+)=GN$f25x|$s&cAf-c-UuC@gE)Pj7z#tIcX$n@&C?Z!W1P-Ya;0x= z@wTm&J8GLTqCaKVZp)_-R}4bAYz;YGE`r4DGiV)JRp?nJLTdyZX4npJL*7@5*~ZMO%8L^<;s$#XI>TKE`L1HNQ#^TKjIvdbWJxr? zr>48=3dVzlAc~jiq+y8ON{%MN0?H#5ZPkirEKBY3`w(~t54778IgQ~{{>;hn0}buW zu*0!Y0IA>Mvy=+nj#^u0Sp!|C^WaEMRfRV(>W${KM}i=;q(*OTVel-uLjzx+`p>C~ z#L&%l7Azl^p(N?R#B#GSd6ifF(Ucd+$aTFoe(+SRQvyH-wDXEF_pkJN);Qi$LdLpn zHprqx2rL;P!zlnHICJS{Ia(KfMm+JI%7y%QqT3#etxQfLy5$!_$&J3s+Ov~QqHBwL zqwW}!gB2d;2m{+F2q#JGcJB$7+fU`AZDZ?2A;Q*-0?EMq0I+AeAZpu}hF8y+wfo3i z91T3rO1-YH*_ve3BkXOxNWP4Ys%%QRpqpx3E+&XprQD_6YXyi*T&>9ttw)MT95Rk( z*NTq{-?g&Kr-vk0s`mJv3}Ibs&f09Dc&gVwbEEoPQeGEJ<)Zf;A{ll?WtYSaV5^{V zMXZ^CrNvg7pU1acx$?0u%fbx%h&InC5=6?3;k+sYx5ohXaMO6xm&aFWv0_Th&yrEH zX&n*3k=Nbl5JvRoOV1SZhU$moUBTKCG6;ATvNZK$kzoSRO4>%i-5U51Ann zo=WMe^dF65w`1}d>9A60r&Yc~DhOd~H%qV|^@>MbwxcRp_3_qnE!vGAeu!F@9Iy~3 zbO#VQRoWlZ5jPUdqDOqQ9___$oA)H)*=#xU<`1LdbzXNqW0XsL5fOUR|1h`8=zBXa zynS+hrWH)YM~%eI;KwQXxX*IvH0)ZaJn{Wm{dUP&%U}#Io%;7*k&&BcgLce2?|z>` zHYJ!>#_wtL%b`4^i6E{OFaPW$MIl&`0+>_!eDNcLg?kgG?s5r?*(9?j7aW z5?mrzMo%vakkX! zuQV#6z-|^v2$SD7uRJVX>bCS$gvr=n7FEoA3@PV?oJrFnQfV`r z*is$Zu9d5A-Q#jq@HB3-)?%JL4q@^OLsPwTbXWu`U&==@qqGiQKa6u-a7W6(GOqQJ z3!uORNS=ap<<^*5xN{Nt9^N39m2XR3ACX7p@eGGGATlxXA8&nxayEX4^7Y~0r}t!7 z*g*My+rz^b8kA^Z8u#^~sH864bEc8pkrWiJ5azg+$WxqcR9RZ5-xO1_@YX;so?r;Z zQ-3&9^?Fe7he>xDkb)4txnG=GXmGwmXU~=K@3~7RZwq!@ivdujXUYkZe-iFhIs{eU z+k!A|{mCNfx2~7@F*kVj#fJX2W!q{`p)83O9XsAn$X)t`2V{!A>7)+;n0!ai0_4Om z*au$(4CH>=sfk{U@+^AVd;UJQzES;z6oiRfT4i+O^U9el-8 zf#Yb-jJTnmI8dvxK@wV8_S?3wnUhcVYdUcWQ*F-7iI+cqF?*VXTzcvFFKt&O-_*(` zf2Il#5>!{V?Ux1!To_zb(WAtTBK(3bhY1qCkcflUTQ3LEn<&f36)czRasQrKmWhba znE2{~J{8RFD4kZr5v_^FNXs+dgpv&F8ZW}(WvRhhQy+_Ijl>Fd$5xxL@E3}-9w^tr z4bCFY$1wuMzZ#3p-%n2K368MpgCAUvkT8y$L`u=f0dro#> z2qR4>D1>47YQCOuwi`N%XVKZ>Wz?y@l&{oS5PILjv&Xx=xIn!9Ybmp{di7xU7Zj#JxqlKn-ou6F-gbY%rD~;J^+Y zS)r_OIyTrLuaQoFlX%&3*ow+ghfC7iya_iT*2)NEC`tb6ek84@`b?ANthhbxW7|q0 zWd*xKhFF`ksKM1w$`XtH#b0z-1IEy|Z^)Q$sul+U-~$G7Nx&F7i#Wd0uzYO!EA=$HSN8&%4m+af?6E zVU8o>b!nGs6i73H)Nc{=Q>+M%Y6D?#$VuI>AZE4<#I^5Gh`HwQ*;_7i%yBtda>aFt zR>Mt5o9bB4gN7nRCS4IY8tcM2t^HH(sKMlSucqk_y9YwpRHav!X|2fm(+9#y#JjAg zUQ=aDb*gEs6O!eYC06F)^YJ4A_p*;clWIw2`sL+lcdf56F`rs>Wn&U!6KX1VO zLLT=qu%0OjNwg}UgzPw?&HrG5qqTtSLFU?-{ew`tD#D+P_+Rk`fZMtnNmlXMt?d_` z%x~uioQemNSc!X|)$Ml+@A1G7adtWts)!X+%>Ia{dK=}pe!@ctc9pPh!*JlPigD0! z_FK@t`?l1FRPp~oTzr1QtxvNp`3x=1EL3$sJ%C4+zFdCE;kSq| zWcWKFh3N17X8V$m{Cla8f*gh;oGk5}q`oi`&MeCgAH^MZ2ejhtU*0QJQjOHWp*u|2 z>rL02Vty@Z(aPWtxFywL-*<{%D+Z?s-g>4743U=;yH<$)q)-zi(sjt6 zK%xtesMzz_GZK{qE-3d-esVRl8eB5lM@u5=%+TxZM@gMkQ-)eYs-VLH@jFhzzZ98c`Fsp|vDY43I78W98-S7uz?Gq|pa%2SDH5-H99UT#v8Hu+ji79In$!WJ2 zD2HXIlw!4u{6UXJ0d8f5!fc-vzqcz^jK`OpiQoq?Gqk&rb+ESaGFR+FYRhoV{jr$eeWwG_yPLv|MQ1yCd(x)sps2iFrqD=&$ zm!e7@aL%O^9IhH#!FvVu+)8oyL}@2wN1K~#wdn+@T|0r!+U&8bw663A-_EC>Ju&_< zshqidYic)HAlfXy?uD(AOnXc^jqL9Ew%D!a=MIg`P3S>+%)((@Pp-F2`Au<122*GQ z)xDX)#VhOSI>DYp%My$D91tFqSLX)2r5O=hK0cvLr~}j0Ki3+?Zqpe-X+JiHD7x+)>A8)yjuCS;HzV3>s!u# zqSzd2G*NvtGe8(&*y%@AwBHsFGwht7HFPerx6;L>1n;vk>~?z-3UB|Y2T3TMWvhKw z3$mzKVVMgFA;RdKp+WPsP+YECnRnhlp8C;x*T7T^HEpd!8vYRLY*Tp}m*3Imq_MF* zQkylCr5*JoIbMtil&a#eLl46r^wi4B@?2Z(u(C+fv}o9?FWHDhbV&<7yU8f2)JD9w zHpmOA1GfKPX4>SOE2*o4EV9eNnI)WGTo!IN*%{csK<#}cbJkEPCE6(%z3O8cJySPp zM{Ksa_+jgEcUIf*+7Z2xy$n!Z)n{Im{*yHQsk8jAUH?nu=M5D71<@vA=16t!^y(u# zagX`&z>}2H-cwI}Q7u+R=R--2EQK>1^G^}(m>$B!@E z;MpWS?c=QQ-fLt}-ud|kwi?+`j%mmxb)y?|%e>gcfVp$5_d_IgIEFnWgtpvJ9Sf?; zB`Ri5Hs7o$-(UC6?YM|6L!}!7*1xZQ6iQ)ydIn-p5N-&^6-Y+$5?ty(XE69EWE(+1SWwm8KUxU0B=)9>XBEO8=IeD{>Asao@0^3ZQ{*pab z@@b~w6Sqt}%tW~nBg#^1T;j)@A-nCnUh&0Xt&8sdx%X%+?hJ@~{QA_`HO_6K;>N)WJya=y+}A)!4flqS|g5-ybVE>5kYmlvVDV z9o4_go|g!k-2PsH%I0sMbj1!b3`t?Rq7@Q@_AhYlqD*M?Znx2-=Bd?8>=-8w8v3*d@9~%whP2kN>F0~aNyoelhs$&7%ymW;1 zuz-w|+Jc`h7{Kkw@D~4U@&8SYE7$;D_a>1%oR_smKHEq*5?To%Ix%Gsv@cd5(|itK zgZQP4-~Se@gdej2)J-EZ183YxuA&P~P7f)im!?gYF~oH{F6G^qqR-@WAf2*r-8LfW z`tHTKLtcWKG*$)MfK&dYmVtss{iso$*3|iLvQ%s3))Ha6m9FGJBWLsyF1pHYqg{iC!@u^fNA{=g-}k3lIW{J*m;17(GGjUgu$kW5EHB47TMiQLAZBZRUj7xyl?j3V z%#WL~%DR$hhBb=pGBJMqnxgma6Q%IjUCX9_-ie?mg1Kn4;I~s=?WBeYkyafl?3nvy zTY@2-il8_w*{e;EY9>$$*io*`ERzI;Mx?`N?K&Fhy8a@+D_F*_(g2}R%a&@Aj%wV( zr8b9gft?`WtDIDTBFIZE_Yz3>k>kUct(`pQCcsehE}@I`x{m&GVU2hWZ2Lj9)X0+h z<1UbsV#?PFY%k@8^!oF+O6%xJ7n-<6T<>Ck4dU;M!7AAVz9axoAH$Q8SNBE(sKNa_;4X#*gNjw(-XeG2MQ z!Iu^~{X7{{EIuPxOqy|kn($!WV~4{$PWBt<4MT5Xe+A}Ny~lDVb&mNfV5tk6s$fwS zIrJULNOAT20^|%piNd)+Y|Ox0Z6QkcSF%T8EDwd}@s}QzmMLm+Y+_<9qS~P005Q3Y zYtSBT90f^fJh4FR6dLFJeV9~&Qo0g#R$uJEX8n;ME2yjKv5%bR^CW)Exw4iwL!TP& z(~?Cq*;~qK3kC-F(S!u}Z?I;iSnhJkj;W%@>j?lz!|7no?%#M0oalRj2yo;DJ?FG9 zpbHtk?zy1k*?&sNs)*XTWEGYg9tWu+L@QCl1uSdHBBhhx>2>*KtW?X$i*u3_P~hi{Lq0M)sSL8ssKa-)|E0-` zhuv0F+6gA&s;H2>3-Tiem1vgl;;v}!s5a-$-RoXjy+}|N`lFBQ8`1w4eH=v)x<;+^ zyb=i&M*;WtE`~fJG@av4*5KCX@@`Cp(!-dJQ>bmnX;Q=ln5S?ONZbc|YPhSM4KgVa za6Z}H)V}jPBtxD}S6%#4Id&ph;}pTEbCY69JzxFMCyPnOe)&LMS~IXa?V|Gi5KB#w zNKHLw%$y`==74LwtCg;Q-}so^gIbtsAw;Vr=eGUTj#>_@s|riF3W&LPWHPbxDu?$o_0G(IhtkJIuvuLOE}ug>&#WADlG7rd1V7DzGb4GBj$}-XF+zu z0$G0`mEXc~b<~#Sp_kKyD>hXdP%olQerNJA$w5;mY_OVDb-rCUzB+So+Wqq88HLR=(Y6iF|egOJg>DeDK!cRhgHD1$jTjS{;Q@bXmZK zyh_8RWP8Yf8o|W>rnCrafqL4%Rugu9P~TO}a&^gn&J7S�Zv9Gs#GoDjr>B{81Gosc!$m*LRm`i#f@WD594rs?k9^y$IFzoVZ!~q zCo8PTXOVlzDnE&=TQG&RY0zuZ8Xn71p4~)YxoA=Kwyem;(;l(?EOWz7jLqeUS&Y~U zO5RL({?QJS(M^L@tCQC)Yt8*J6wzk*9SL<=XH}Q+iP^v+LG|vV1cxGqIO5o^w_ASK z2IEHdv3L504>Zc5>$MX;8=C_JW>h0LwcxQ=_Ngr>vZxX@zXJ1w51(Mb`G)P)FQ3as z^Fa256vIW{RY+S_?*-;G0kx8$DT6CAPAa)U-9|mq8*$HCoWI@iIXX!s10)@!2(w`U!#BU=8-%aQ{PgZTc14FuG zC=+*zO|fX&uQ(DbH`4N{CyJ+#fW?Hr>F4>eW$bkQ z8kE-n^D| za|})A1y`fBxzY{g>Bd+6W2262A1{yrO(63!x;4t#VD`rGkSrf>{19q{TpKtIsXvLj zD8#W`VU#anNPqs;W@&v#g${(KV}c15wEl!uk$`dEI;e;|uM$Hxgoy;jeQ%eZMSU`A=QO#vWyZ33 zn2C2g`MkS$-4v3cJdEmsMi*~Rfp;7OlI#~Dh&U=QAt+q6qLOxXqM2}W-mIO+Isf4?{Zf=vD>KnQ3oSv z8a)$-t-l)wseHB9D1jaSn)8riV@|nT*aMSS6hKO6792M3>OxDQ zb~#lhKLo>!4FB{O`1S@^Nh)yLE@n_d#ys(~49Lf}xkBgWGa*-}2+ty33R-TnUHqtg zeJH)vIpg3fxNxBj9qf+(0#_LpY2#Q@Ho2xLpxQ{OVD0M6;Ikz5n=(wsp_Or16E26@Ur=#)HJAO^Y#Z%+C%i7G&-0qKUyFZ=HCxii6tD=i|QRwMkjD{^|r z=EQ!t(R)2(Or@hTYEfI@eM&=V<{}!RUhaE~1f&Mb`I0pJ4jBzd43LbDpVOOTxj&-j zjwL?xJmbIC74?`=JjjXGX>!5Oqg-!AnN8km(ZYYqfpm8`uCO#&|5|j^YkGZ?z426e zW3{TP0IR0x_b}6)k~c#a|+@>GQgbmhfJ|QE>oX8UA4`pVm!}esgJb zG_ny`5he3t8VqnlU5L;#th3`WOx0A%>)VY}1%i}`N#A-gn1M&b6-ek!RRF;oLGz14 z!|8 z@1{$%yVSk3p~RJ$#;Z;i#v29OZ?V7*peyt4(4{6*&ttqxiyusCy*EK=YN8kJA;BxRPt3tF>Z~)&DoSx#L z^6PJP?B4)A9nbm0$h>xbas58z=(@G3Q#mb_pksGyM^F>czA|^k6Iu?Hd(b_P-BE9j zrjyTuneJogo|NE@)yQNPj_K}vES zNDGTY+zv^W~ zlv*7J4Q0c;=2h4(&!54w9)El|Qkx|`qu>U9yoir1pHl%dlC(q|fMS*1h07T6&Ns(n zPLzcFC@+eaSQ!8!;-wqpG~)TmU7|%7%RU4tth8MWZq3kf>nx_&!%Xa~oR zS`nG6676%bXw`WcV9wLA;03a8TIREw=Z)$V;i@Ak9q#4J4mY78v05S`vtG^9&sEA$ z7U-?a2`8##Sbel1YI|N**h~5JG_+6y zKdCeNr_YF+A4Rm>LmrBSDaOsuM51R34=NrS2SuVP6j{Suj&4ZbN9C};PCj@FU0|nc z_yQW(;^=CMN=6KOYt23swa;!OdLzSa{SdQXZ+}%Y{br$LDv^C+Ib_00mVPsl^z9R4 z|KU&w;T+g_L9!}~+}D8Ui>+m90xvx}%I!jHvCbrW`F+dFO@3!XFT3e2Jc2W&~(fC(oKclAaX|3}*2 zBS2FIb+?xJpdKPnb(XS)uED1ZY_ctUeVQ?>oiJ73;^IwItB!1hc7DGr|XJeWPb&zW@77me0&dM>P87EEpUm_pit zxbAt0V@9vPlUbG~fZsB|A(9pnpu}v0x4J;Ua>hhl#0B%oYxxB{lS?XGRpC5^Zzv>O zZC(OYx2P0P;atCER36>?d&LGAIH@ckH{Q#@v;O0o8U{j~N*nuj1ovt^zN7a81(-uz zwvG7}36_3h%rjoUXL4<|Ahu7(j0faR@$PEZKX_G#c@8Ba zwj#SB-nKw4R8@M58x!h~k&wcy;9lz1>Jv{TQ&DZ$+395NxYpjYUw1xLQ54S?7qU~a zVVL3Wm23zWPy3OHjZ?~L1H)_)w5A~391YDNjIVS|QFZG3#8E<4;VaJ_BIoo8{7RO- zn3LrPnvM#KRoJvC`pMs5_5=uKEh>%D)XW~dUfA5y-!Iw=u)zYP&xTQ9net`r5_S6A z2Yoz42kDkrF}317zB{}w5zl?$pthx5FiF)ED*2*E8vHQGZaQndFue8zq z#*=w`jVrX(0}%qMBkmNpdom_7&sVBosu44vSS2{ZK3ZZ}F(o`gGj3b<0*WNdHXg_0 zh=G%9|BysfFPl-9hMwAz4-=CFL)Ec-v^T}mUYwlIzaN|WP@0PkbU|Tw3}Q4ri6bVW zO%3Gyj;s8!^pT?bjy-?0@!E)!U)zR;;YmCIfVjdbEDULJs#aN1!J|W0X@O#R^|2^N z0RLIEd7`mYYfG6T0Cc)-Mgi>18$zHj%rbP3=SrMq4UDC&rbkP0?TkN|L1oH2ArLH~ zwo_o@L1puP_jdKGDfxMsb4S6rmb0M^b+fmHGmTioG5(uMCBDlOGb%8zDt zmZzPV)84YyyJ_9FxZ?x-slX1V!OVdE^Q)q6RYm_Lz%fKoVM|ul8ipkXrFt#6m)amA zG!t!e$WwEpV^hW*B_J<;e}V@9ncOf6Tbg&JG%B z(@20_Ek^SLX7{Ix+t|z43Jg*^Ju^utn*?YnpvQfe819VMqytEEe;5Rp;@X@eD zg_;dq4oWwwZEWw>D!^ckvos)<$eXOHTB(_crieYtY<>h~3_HgtWs_$iwkj4e*}n1*+avc_N& zjVVfMhdyWDA1PvDxgdtcp6UfdfUyoImX`n~n5+2VNT5t6etPc}p@KGzs>#GkBT!E4 z9Hl{{b|^8udVlHXd#fnX&;=JKyx-_q{9gsO|B;Dd1_Sk@<+S|e=pC77ayp{6As@cK z*Q)v&6TA5C!3CT}_P|eDjsACos0xosDfg8)2nEb~@z1vf=zP14RX3Y??TuNG z>oSbDR)6op-0c#@+&>RKD9`pX+09}$WLCI7exAtKDGYm^E}1h;ihrM6BalY0HVi%@ zm>2JzvE!`D(~hdt@4Yax`+^70Xx_p(57ue!unu8j+yUv$>lfrLbArvBQ7&7-iw5bI zF-{xaaD9 z7Jz|M2!nUlo6?W3Qtof1fE@gy1KZUGVMmwRUMP#go@`f08r%d?qtuJJM#>EECo&7V zFe8V389iwoob=R*ITqq=B=hO7=auYGSkhf9iJ6HCanBIxi@5^g`2$J-X1|1(x{mSybpQ-D59xc&$l<&zUO%~ISf%(o9 z=emOGV{3DfuRJejoWvF;muSE}AdkX~=U5O0)F-vb&gA_;_{hL;Pl+tb{#89=>77ymk53Q*8;P%Ga}iZ zR%qemxE@rPtKyP~*U>Ru6w;98v>J3pZc-soucrx1%-P|jjxE|{dYw@AL1Hv5Oi1DG z@cbKJ=t}@L6h%7gaR7WGOn)@_-;)Hr5z_&g4b&L1fAXIT+MmF40mdf}weeSS0lquR zZJCFycfA=J82TFns8`J&m+qTX1|;6}Lp1iC?5m1f8yFI5?_dJk`r_4#g=RBAjQLp<$c*7|skCi5+ z)OXr>`xKH(xK%9c55)fcmVf=9&Ve8u?e%SK)vg>0Rl%o`WmW_fo!dicwSTi`M4FSG zaf-z@*#VZfxNU#Q7i0*1i1vC(`9XbQVSZE z!MSi1IGl_W-C2$D{~8QH<{M7ou{oXI&diReeC8+{P%Mn<}Mcnq-&OeXpFVlbaVxUKvqQoQ=(0snI- zvmXP$NkJE9?it``g>t}UN~F41K>Q>74A>88Q4k9`XF3q537*#Z{D(kICIbk~f{=;i z|Fx3-+!(%}dBqy`XWPI6+~x7pU`Ai~2X%%4=6XU-WI34k|I1rW!mNsOl1k5MWv*97TI*Xz2zc&$G!Uuum~Lf*)ck63T;0Cb6c zVG;f|dMQnS&c$!eO+Hg=ocLrYb>954nhZXd;;SymXZw+9eV~QNOvx;Z!c>YoR0L>8 zYJ>z93%Wpw!ms_y6xjK6*GZzMb}Q^r5q3{kJ(|>;z~A=ZGql@E=u7k=^VA<~<4q=x zN~3Ax`ye2aj}p4p7W=o?;nQ>?!57#Zz=57M{4a87oi;DTs74?`sW;`UZ5f zRn$xXheRI?Y(Ivbb=kc*+;_oT7y{3|x0|u4VW!UTs>*km_ZLnq*ROMzw zNQ7FC)6bQ}j#REr=L^bwm<(bsS`@P~olwygl6m%bxs0DsAM+EI2Fg*{=^x}j0aQ1S zmdif%6T@tlUcXRpeB!r%lN5r~A-75d|Kgt%K<2VShHZ;?z!%^Ot%+;|>0U7w(IgJm@$bbW>e54F#93U z!*pwHJ%sY*A)L=I{*jP}qO=eMl4TN>3prw?we6nNNWXAuLey=uS0UZ=HZK~P-)oXZ=0Rki+eJIe+ zo4$R~gJ#_FMjcyOCuct>;lI0>8~_sR`U1>4OWc-bSvCD^PxA|FSZ-|9;2=>mPw1!5 zEgnQ^$baY&fFVVz=3dUFnZF8Ix^FRFdfj~b2wv{lHLPkM9Ue>yd3t4Nc5xnMIOWsi z!l4+A&stEvA~r{Aob|N{{9`7s?tO7VU0Dd4EUIBwO0Ci-(_256=MUCUH|Mtn)g*oDP~OHz^BFqEuVjW7KS$aeimlb#_KU zcO=uvM_j0j4-Z|gFI*Yb8?W~B7QTHbuFHs%Dyt%_GCqs7wA)D9%Zp~pDL9_ zR*m6VRAO7N^?QHeH>#QOHs?1tnT{RAJ5;b=Og@sW;y@v@N>mvPavG^WS_EKd;zomq zrbRR9+?#_@bH^P0BwuHdNIveFG2RLqdfWgc_U&WA5D(kKsgfX-t;9_W9EqaG<&V$2 ztY)o?gGeVvF`zR26vqyh&^wP4!xFPCkE>xlXz6#zZln2`8XYO_+~#?mO(p*h&D6wY z^^DmFeLst!J)vOvEOM0n?n$kCoezUXF@yR@#bTIz$*>o7}frRY1hW{oy26Wqm7Q!!Di- zQ|mN?#qhv!9=(yPaZK0cNu`T$4di-(BkX#a8ky~7jP!o?-e z8o|4?)2ZyTFO;y~^pHjlW06AsqK>-2$uS(5Y>RjL+m5U4RIS>zq* zP(y}h{g0ceR|vgUyxz-aZmQ1&cc8}!9f*zHRpvdN)W0flPs_HBE??0@z}u+Y!q3$B zw^kvM%D8PBJ>=I?%M2sQ_nu_}2J|1xgY+#Q9Cz4V+ZHoaS&NM0OdwUw+m>l|*e=%l z7S9zl9_UKGU8xfU)?kEQw9Q=J$bAW%YmU-SBM!T-_p5fiSm*j3<3^t0-p1&a6;VC) zdBqk9thmAzwasDlK_Fddj^zKk^dIm3u#Vh~6U{PZj59HybBnrl(v!aNrtS&RY{hFw z;zsWkW6Q3wM8X`?&Bt{!7m}yT6wy2>N(s>7Fw4d(2kCsVsr!k6i!#DsK@wY%ZRQ|y zCp^+$cP$(({1bE2QKK|3`uhr~VcCCU$bW^C=o7eY6Bc_K_7ZA>T!#F|Qt(H^bkodI z{6ft?+y^6;sz)S;4e74YrZ7ATxaC;O^ylMZEz262Yfa8N&p@rbvk&%vH<=mkzgc>v z-GxG(`QC1n?INY-@l!($v;ZTCM7$JvZB|{F?ZozP8bXTJ+Xp?%08skZdoSBJ$cpC9 z!)7ZI&i3FN4`zsd8PVj%4_O$SNasFHg@5uVqY*cRYJ}P$0?;U)rH@Lym*z*i`BLwe zsS2dMTC;s^957cTgw*!E;>tO3`v{ss-F zdTE<^Ex}))cDIq`5Wjh_hIC{lcxC303pR0L+HfFU5rQs3Fi{tX+~h@u0A1UMtfwSQ zjVb$9&f}514WPMK(@U>#uB&ANjv`n@BdFCS6|*mYf_BogMX0`Q$Q$p)8D1eIHMD;x zhC43r8x6#a5cs%)%9fcBpi6HloKXBoz%)c2b6FK3Ph-Tj+bwlAzBpaM67ZyP*6KEnbr(RCcBYJ=Y z{xT!mHO1R6mf{p>Pn$4|-v@RQ3-&LtgBf*q8f%d22L_|eJfwh-L4%DC(|PF0iNDMp zbYG)-@{ZuGPYEFkCk$;@EA>x*1K~b8(xDkcymgXl*Adisvgs7WP4e|p9CAZlxa5rBNhADFs207LXnUq(wT05Rgvk9*|NH8AQ57 zLI)g%?i7>`fnn$_8DMA_^1HbA-tXSe{jlHT{p0&jkBd34^IE-rYpwGXS=v@BblW;m zi!I)o@L3%CGy2f{#2oQhA#~^PqL?a+>;IzQBh$j1Gag5dZ8o5wt(^^ePTk0?PhB%v zB883{U15t~l@m}&4v8Y&DwLV{l*BbIK{X9djOcd}<%sZ^$2h(YDTLJ$7d6+?*p!|u zz#agnr9}HG#am{0+RujLKrU4~9*eA!h-I*VxQ~d}>o!T6Ugxr! zX!{+E%e`eH>>@&3r7uF`?iuQP^87QZsap-?j&ygLeaQNhoL1zn$PgKA^0tn)b~bPo zIvXAt!>NdN!Ca)}JA7f!;$T4SO;40)Rz%o$bAlf=aiZ<5{Jqm6j8y|Aet$1&@gt|R z9|~S>+lW*f(2EVs)M>=9C)m_|;t6vB>x-i6Y3ub0`j{X0&)B_S>PNmUwiU4WK)m6y zG4p%0{Zfr-kPdy*3;9NYdc@@gNr5Z_cduP!gGi`cX`rZ~I}cc~u`5o3G$UEHbjKCX z!E&7p*G2$Sv{$CZt>-Z6?}y3Px*;`RCTI@|8fzIEAX#E?V;ZYqvcL8AdS7aPZdPb- z4^3@@o)s!TOPL&!*w2OyFnz>odlG1L^5KtWYxBp#Uy#KzIqu)+Sq{c;dAj8lTqh^$ zTwf46RH$WVkSPiYx`0@J{|P4VPFtI+?XfrN1jqW$fMX4!dlon>3h=1P84l~T1WaQU z(rBzV9|@V;hWk0`y)`V%Iz)hfg>`ubd_2$mO|rLnuse3zd*qAcef&{YUc6bBh}d?> z0qNXk4E}7C#52;Obn_*KHG8f9OIX3aXCb+1?Uar-f@{vUP_S`7Ra)Ht%$pbqVVm=V ze=4FGnsTzwF4?2APiS$kP)WfdH`jXCltU&m${TuP&(vP$(HDj@uaq0|M6#~j>9S7utkAs!A?&986QYobr=IvznN4Q84G*@%qPI;gZzn$}dr|!E8s1Jl`8;QHaKK%h z8nG+ga&s%SD+dcRw3E7<&{G)VS}HTx-wu!Q1WDIri6&A63^(2(OR2vhm+$x^Qe#*? z_SxZu60=?2lk+PV4UwnyF~ITV=AZs%>#Xf7dD0NouBkDTFj3aT(^efm<_OaO;j;jCoy`@rDkhg>8Rl!Cs{5{nAc|RxlD3 zk+B6e{y5Zj)#ak2Op3z9uz#aBp?Kf}KB(*LU{8SPtAI?1?&+KcaTO?2xu0#jcc9Us zDQ-;T8lS&^W&QasRy4|kbJJK3U4PftlU~T|fZbHk2V_I}G6$qW{Z&KyIOU&Gnilb%;aeFwXh~8 zhhXt=ccZ$;y6b7D;@n!GpzmSlGoJ=7h}5~~?1;D9y-!~DJ$r9ZN958W9HeY5_qs}~ zE@FmvEW(BT`-M!3$*eN0*jX69)d=RQW3G4o*%Rh!x~W0FfqqY9?@}?k=RmO7&MzlT>zD>)>n)iLFDZD-MK zR9%Nja;)dg7jqwn*>K&IGY;aCsuI$Y_T_rdUy+*-Uct~!>q94JuR;vAR;HI?f>{wQ0j4#>!HQZJ{T$^X68Ns|q+N4iLj2{!banrjbMMZD z$j5@Nn|hu!Y~Eubp>#5)EXa8FyD;@LE+vI?@o={<{@8n!1;37slA35O_J5Z>0Pa|t zWS7MDYmqsbQLD0lJ516+noU-*f9Sjx(caG2Qs{}h9i_94qD^yHU911%m(qu&B+!C_ zjB$64rTuO41N6Gq?|zWxCaR)*`nB{%R)QYLG<3`%HZ!!(2SziGmqRA$t5KjBo5(d- zl&Y6KVn1Bs)c2#A17WNf1B^bloEZk*d(%XG8;XK@<+*UwNuH`@-Ud#Les0ZZ)6SR{ zwC8OxD@r|gUw5Ude*8CW`UvBC+?eqf!bLchj~cSeBr`0U4q{RK3|}?N#R;tjl`gltM69liEkRVykq%k%H#8T+v)@IIF!2cEMtzzHPkd z&>fC%4kW_5d}n2R^QAFe90DXio~DOBk`6#9bR*tG^M8uYztD{$kk<8%3jqj+G(%Yw z4z{gfZ}QInG|VAvJX^)MJ5gV<%bE2y+dj&D1&;rrq1GE(GTJvT%yMw2F^2466byV9 z-@}xqzuFUj>RGla$e{pN5rrl?DY{W~G||@6E>tnKU58Ch%X3vViCf0NZX!fz+-w9y zG%sLm-O*P2F{t4BP0O*lp(9;ff1}^$S_ai)PI|Bp5_2UH){xql9Z`%&elQ(EXA_g6 zPD8aMmd*U6@ieE}7?Us*I#D=biHL6|L|KX<+kVN0{GOBq4xjKVsG&xBg@@hX%iit{ zs0m@dojYo14TFdw+Dbh2mhaZW@_%z8U;U0RUKf4vXJyG8PP_3s=WcwT(&q<&G8Wix zDmDZVVL#tY6hll{1b}li%s4MdT{5-i>YIzb`XM?URh-dlLOwS3mUdZfl*^4O2M> zw3%(>H5W>1s0KH;GxNajAW!PyBK}t-f~w&|U&=E_ZaoR4r?nDYcT=%TA;d;ONSw0Z}5W7~A0lS>iK_0Mnomkef2(4zX#SYE#p#&pi=|oh0T+!W(uSS9& zmo!Ec^%d@a^Br|t!%d7zir^mOgbw0Ql6OEjWe=ur+E%4f*u$RK3JUsLH6NdE+=u9? zAknhVad#?m*Y?_bxpM$`N!<#Qg#Hv~Ke+VWd3z`-Qe!Ijc-*b1TDlGZp{@hFzLt^`G#XZOe z`{1KDWaHz9Blky^mv;+?^)K#a1eiJ{+HW%b*{cfMU0$!oOg4~upVaU)T|@NzF%&*bp_+t^!o9w*~CoyJKk#!mUIbyLc-6&tO#;`{y*T{e?c)@ zTln)gq0CY)9Mb3RG)!=P+^qcqr{_=4i{3osK3v$J0Y-%mdMNKgn|$^OpDscS0~==3 z1itw7V7tw$1ElVX;nl}&PcsYm8P|VsFt372L{kFLk68E~Y?yqSps=N1rumO}K*;}r zk^lMLj~|*_NjfyfUytep@Wr>zL}as85H|I;wLlA#k%hN$mnv$T2p5V^>rJ0-jj_LU zs>m@KJ6yT_fCXXhhatOhVYdGzOx7&DmL!@o^9L0yb-YAPwoG{r!NC%#V_|Y;EdOF~ zt=-R~k{Rvk&Af1*rRizGym!Ld5y6>|`Rk^{2Q08TjL6N}Asn2cEPMN>yCVXf`uNGL zG!xy+Wc+ik`m$~gcf6VS!ls)S85IX|EPgAs3_z-we+B<3{9UONbUNIa|3?L3;(NRJ zrEW6w2uviEuHV7*1XCYFUlN4yoz^Lsqf9C|8n^H#&&Wv9D}*?AseFQ}Al>~%6^-gZ zu(2C|WqEHOvdGv?nD1qkl4$5WjL-n;`%ino9xr;fY94e*!IJ_W zSamv1d<}X)fRT&>!9Zs$czSokLn@BqPLiN!@S-hQbL(I0SfD49r?~}?nb7yeK$!pK#5WH@R%x6hE;U2nQ zvt0vuM9#{Yq=~>95wl>})9D&x)~U&`eMlU1cZIa_v)#M;qqDfT{hAFy8XYVV980wq z{hPIEG3)18n{L^jq+QO3P5NfAJGO@^ZQA+>2shqCkNgnH#@pM0kt{mhSyF=s43iL- z5x>WrsS_ew&A)|*V=kkVl44dBvu*GH;dw*5sM*mchC!RNV2f7xB zKLAv0JZU=C&n##vU%XcDZMBAH4YJXw9C>$wd%sJ(o_0hSnZmSu;9xESAlu-R-~LwL zdIodUF%#JSau2bb(1=yf=67Waf{arq?Z@dph}Q~{tdJ8=?Ot~PW9`j*{*BrSB%S?g zjPLIT{4aS2y&-L5XKA`98Q-5l0l3NK5H4OGN`c*I(ko)}ML89Qw3FM;SVJd1Om|DeTVfAzw&x zwREr$ui7OCU4(OU^0usw6S&Q$>s**{%_(!|o?XVtY&5+UKW{<3E0~8u(k>*GK++D+ zG|C^v_3Ok--E#Yxj?hCRS!xKo;0JT3F-L(@rat)^kzO~S^Ra~t-={-ZWay;oN4}oA z^EvU!6Vi|1v+5HcgX5xIRem>B8pDPM3S~!6xqF_+gO0}>%;~x2jzrw<>iK)^ko1fO z_UKSepKAGoznOB+IVc1;Wj%(!&rFCmOlw;M(37QwI41JwMG^B}Jh$oH!we7u@z0AX zc%v8uyb|y<(%&+v1X0V$Ec{;5V12&Gig#!sy?q2a=F3)edvstkrOzr zjH;}WHZ7?9&5_UruV`qF(WfpOLecNiZnSa_&xbs;_NBmmH=3jw*q@DpO+wM%aIg<2Vv|D{ z#9ieIha9d$TDNyc1Log|wY?F%-!UkEy`z2k7^o3c6%=|S*g{2&6v)Pl>pl4m;mr99 zVUzlPrhY-1Y17@v#@0hf*%%B=^b`@hx3QGVV^^ z$%m&DDLdZVqK99%D`vKnu@c*3qZQYa{%BTM?e6~qn-tFVnybc%f*(9O1*#REHi0pQ z#iY#VB{wlrdU#T4&(s1>_hryqs#|(P`dPGA>dY=mS@DUO*5;u7fktb2P$N)2HK^Qp z@~r7MZbjE(Byfa{;ctZC3d$ zs-5fByM1MQBu2KoO0JL^tRCv!o4a;o(_4^ikEuOfOn9-EQGPd#{j*)g%o4qa6#25$ z{cK106NGvdsyR@ESjqm|`AMbh>WlnfUwDToHB_5x0O6q}JBlpc;j6yn!Kt3W4Wl*fkK*;753nogTHE2tSbjdHjk$kDH$EUzym5%``LVF zC_k>ve_bjYw?9r9&aDo3pv=tR^TCPx_@|6@a7;T8E$e1*BX{T=ksv6fcqw*J-q|w1sd4iqSX_WIZe;jge2KmCm%*MwjB?$v%$XBeO z$mxs8iw{U6St7!~CpP!X3u^RAfynmBstFvQ32}G(494XBL2cRqYjMXRK`D0gAm`2* zFKR~{2Ao#FmqeyBIEK@hxVF%n`{uORpJvh11YTa$E>ZxNdi?2HQ_exj5gE8)B5a~v zdYWaNOQ4|$<(F=Z#w1~y1yT|~kV+$g*8{h&XF^*vMk-!g3;3N$Ydf8{<|ho)`4JC= zXRf%oIWjPz@MCfe^XcU~i7yk2=!qi0=nPMsD6F=lPDTsnIxvk;8b1W zgl8#j3eL+-V;lG)qWv^HY=$|WDqy%0%(aI8OrEg>I&_-Z3*1{}Ce;*k(No(TYXdH~ z=!fHw9y1LeS~&JiH0D@0_5LUlN;00lGjgm)bLM$Ak#aI>#I&zTL_PfRP?brB`Rk$I zu#?u$ezK0QQQhfKj?UnKr)#Aye^aP`^c;lXUy2E)uMg3o^UFWhEDZ8M<5u4v4&?92 zF|OArbCC4wh8oeo{q!{r0GlFo$C&R_(KB}6YNI0N_+>LhQm)7r@QtS_3@)hpWKvVY zU6_nDTNJ7djCbBlEl-UgI#lW~<-S9=*Z+!i1dM1u_&pY6F`?mzqKv!fhrB3g_g5`0 z57X(^pliMOgW8WZt&O?{y9vh`P9BK`u8l+>kT#yzS`1;+StdWSj5MXC4_7vY%W-#R zI%Y!-ZuTLenG_E?c+Cw&NOC=Jym5EzKhGWI+lE?-uFN^*iYNo?3eB2!;Ijv zn@5hvz=hvPSNBbyW3|El%uua&=ac*VpE{Opj=j8k#6R1_;>TCG1>Lyq@rlYTZEZbM zyzJR(@5J(90wQT)M!)aFZ=+3T8F7BQI-Ons+rxd#xw)S1ks;F5FZ;46SP8igKl^!@ zli5U3IRL6Nb;uxKFH$ee!$?33rH72Cg))Lp+ZKefSljg$o(#zlrcrZ zkyTKN*C8RRxQUo>0{~Ck*Nzjr6_IstT<4a^yVAldk%b&x@8J0L`j;m&P+!u8PH6v6 z)JDk|2f#I=&F7Y1ngRi{`(Z}&9nbFhC8HZcAIF&U^jIGkq5pLjz|@&eL1*2>+O4j4 z&kqY|>gsx14KRGLk;ctYu`2H~<)N;vZC55P_qrT>BY~Q%*g#NYq0X{Z4#@tdt0+s> zC$%0-1U79_I#*l}Pl*F2-$(0@1OJTB9hGr5y)9Kp8yiS8dC!z!?eqzr;w%22{xRGD z%#0&c&1dR8`^-iUdIsSsm{m>*Y&IN9A^+9AP7<2rELz4IFkMLHDPBtr7d${oD_7UkHvIsGm?$v#PdCCi>KFd*Xz-p)3AXVBl8kp7uH-ZTlu!OC;2Q2f2F*-U;Z z7j*eWPejAmjeDNb^O=NvSi+&HXgo%0g_rdkh{EHcS;v??%v%g^2X;8# zK9nNF!mI&ZkNA5={MVPVYy;r9Mu7Wf+}|Ht&;k{~SYL-C*}u~GcdUSvSa7;CUz$p$ z7XUek!WOI(fx=QEKJ(wH@(aUK{&og zw?2cqXkUV)t;}ZRpGW}0hLckFPqg%(Z1@**l;}gCP^zKrskCh3vQV}`hg7oZWhGo( z+>aeMw)y|{SO5BLb3!oTqRVvsy9vdEYNHs$Uw~}0+$|;j&tYRHQ+JN1=BFZk*d~Sl zN~PqfmEvjbB=45Xj z97H4~=?5J40VQyNqp;a~ySM(v(Z6;AL|Fit@=Ogg>Hf>RZt4NN4%@fFT)ttR4Unkx zqiO*FG{WHijZ**k#E@6qz~7WMcu2Zl`5cEt^3p0XbIEvLvZlEL*j`cKk^OsIz>+Do z(fnN!{wbFQz@I7acWqT){hSk8eyJ4Ws7OUc9FknOOJT*5QymW3fTTAg*8dl@^w%o0 zti=2*7tq-n@PFAUn>(;hJeFW_4sb0G!2lsX1qM>jtpjhuf3^AM@9Ov0qAcb>SA_<> z{(;JsP#P%Y6E9Q;cjqf2k~QY#O2=x#V5e`;CFv&VUV^3rxr_b)is~Er$XMyh-$UEHVuKnugppaY+=q=oxB|k8tWy+uA&39^pdh;s3ZV2zv?+CBN^=ka6`1L=HPluI^L^Y zKR_*GQnv8m`m+14K+`E?NdS9b&j9x5xWlN|}K<` zP*-Il;K91s8^|IAq4$s-VO7QhF+Vp-rzCApPON}RJ23=QcoESuO2ufSez^YmrT(f1 zN?}!cJom><;rI8K&J$k8i=8Rlv+=A_5hAb;1K_IP?jtIXB!XxM$?caqoxz6{1$mlM z`g#-n0Z#$Kuqcr7vt)hYbFoOjdnp4wf&Xck{vAiLy#)cy4qAcFSTj!9VJ|7n*2S1C zBqBo2*38@ENRI+XM92bR0;zhuFa6cCZegTweaozVeL>WYCOVi^InM~3<;DMr5vGEO zFN*h>P$+CZl1@JHwej)IErT;>X#h1|4fO+sD;WhGaH|WFmjAJ7rPvGNW#;>i`91gx z={JAR@rgT*i7IW|Y=myl4r!_LY{wtYq+VwC+Q)+33^7@ep-G4otFZ>D*CKm_BI%96 z6W$mLgE9tN6E+0A|3_+Y0~IBgN>wc}?2%_SVo|ZwW<5_Y7joIBmx# zUsue>KVQ+{$VqbNkH`9_;78YcSb!mCa8Xc}T~d&|D|nm5QoTZf@`a zS^MPInZ2+aX`x;f_NPg`)e|n0J6Mlg$J-exz1jLTwXy2Z(UNEW(&K~-vjY}V3-|Wf_YIKK)iBvDIl- zxb1wnwu*ejFKYl8L@GV6t1UbWT1f`zz$YfVh!VEb^gh@&3BPcgPolu2_<1y^>>UuBP5j1Zbgrjo8Me z=xo{ZyPS(@#D)!0FSbyfoE^dSY5H#qfsMh8E^Tojc41-hHZ+z};ou+f=syC4e{1{J zi_5aoh1;q4sfar_;Xz^KfpGLQ@$;JviuW`s)eQD@=_Ymn+tZ>c>_@ohB>$J?`nPTR zKc5xy>OKJBnrbX=&n_9bEMra^B{`3ezq0YZMsjs0d6 ztxM>+j@z>_?=x@rlnT)y7K}!D7W?M$_+mYtH~f3#!nKyd>@xVgvQKl8?h@0G^(L=( zAnjR;^o@Ni(%&CG4f^@Jr5bL=sggF4ccaUCSG50x&p39wj&cFMW;M{{_OMLtA_cN| z%MTv`GH+5d_}!4=?VyWy(YE{NuO=Ja>3R!p$tU)h8q}>k_Nb`@P;L0yo{O{= z&6!bx(3#z8&`#9ZQND|m$2S_yl5-fs#uEF1njY)tK1|$WIpV%YiMR7Xr~3VsDjSST z$vgWT6y)7u^_X6?e|>R-A;hOfOL>B#?zve6dwU;CVHHQEG?Kw?vq8{qMpIJ8g{OY# zt6b=*SX_Ji9dy5ObG{I(bwhzd_#PmjjUF?a9dk)HqMLl%=FZ!DX5RJxDpELt zPb}{{HctB#ItbT`IX>L6?hXq#^DJ>%`Hm1E^_pvy(VK5*xBqN2BxrqxxG;<~dsCI# zcwElukl4T)_}r+G67K{lcanMo2b+nv@^Y+lf&T0n=`dIPL?zyYQ;p4SmPib~q5>Uk=xGD_=BtiGNX0FXvo5m1t8UYG(c` z*z~@PrB7%AR@?i7Pvx5MC9hpAG(>*^de58mpSG499w{8z?8wih2Ps)5__^6E$Qwlh zN&TB;=l@OPOY5Q)6I=&mxddW&{P0em7AYM`;i(WF-3uHag>3s6akWQ=h2Kw#r8||9 zV%mFAeL3q8vd95=q@6lz<@L8f+-N};7Af~{3-uqGHrogaz+ar2nP>&a<6t9g`lbE` zH~mvHbJaq^C_28X31`w=E-!zXhQ@u?-gE|hMhz;_=Gj%NYt;jh_`~y?yZHZwRX|?( zZ!C;T<}z1`wdC3i2h421w(r{CE$ijJS!ymryh#L0Y}S8M7)8KLM=ta=Tz1O2ssd?8 zRdV&m%bX`;VvFW#?y&I!>wo?;wGWGjMS- zz{gwcP!aM$AHUYtRy=8G>4$5>K7#M=1GJcb;?8#!`GawZSzZz>x-d>!RETF`>{1zG68#7R1u9vMO)0I2`_@)|CT+AO9 z5i!5kyps1u5`ckr?(KYj2qdiBP-#3t@*}3{>FKSV9hI7uP9ar(fD6>`6B(~o0=9BM zo{4#S&i@-n1ur?8f=aLfg8y8qOOAGg2l#w#`M=4*|GgI)P<|NjQrO;)-Ev^li>n5D4pq3rAZKp29sjC;vXLd6+@gGGLvI%iXvEJ?y7k)BZ#J* zhTY#Dmmy1dy^)Wo9Wq9XAUvnE6p_Y1v1n0fyXbuq!XuWz@p3)yajZbnimYV_0yhvz^cl;?swli*HxqFXo>&+R{VMQ!!u-h3x>>Q8 zYb>#SUwPO))}9)5J~ed?PeIRSur_G(d7sSr#Su`LR!0fkIhN6NZg;&*6sC#M+ETD~vn)cOsb7`p_<7 zQ0%lf7UCive1VTBGmSbJ{qoB6biAh0u<>k9A%HFWKv=PV?n78KJn?=!-E>?FNk$iK zIjwnfIWn0Ok2!ze{tZb@*=QnIqo_le`0f+p^J%8h9Tg%;Q9E|)b;&3*an#f9kJ#_X zo`wCis)5xc!vR^soJA%W_=!y}YGFsN)y&%B0?S>=`xuBb_a_^tZB4jjj!_Tqx2$!{ zK_4DZk#CldG_6Jr&Bt^pX~b=m|UowOdMnj z=iJr#Pj(Y^)u8pLqdbEXH}GQ5tpLxbsXm8>b(C z4t!siS#^l-cTq-$KB*GYU7dKJ99dd(x~b~if9Dcsu{-kwZ;BG9NPhp&*L3?jliEwvXt z=SHrc=*x(*_`e>w@%GxnMrsZ?fXVNUZO zgj4&!Kt*VR|D&RD|H_y)s3=M?U%N0gD2ij{#rb zlc!Wf^dmUHt-(#w`D%m5&(fcEMdJ34P{@ZWB$pnVReE;aG_|x#$7_V7Uvq-_W_pl5 z?l45@T3sdnVswB&i;ku@t?|g(qLDz5(g}mDets~lQrf$p_>o_w)o>B>T7L=g{R@{y zz4tO9;Ww8nxaKLH_3<|FNaZDnxLI7x;L_tNhqMMbSRAc~QzSnmz}`deNS_4#VK7p$ zm2_g@UZ!;4*kh>V&0vkIHIi7AnkC4??cbgI6 z9hMZfLR~7r!hBJy4oM@2-(%h%U(3u)x=osoeor8HB#T*LLjoQ~E9t%%6VTq$xm~cG ze6^In2L|l}`b$*R6VkE;Y2eR0?Pn4hC1bJ{W6AcTmVcB9tD1jRdw0_N#p#LbxnQ43 zojDi1Rni~VJ9OQlz*%PZ(qfaGcE6sRTIAO;juwgkauI)vRP3Aov43ZkZ|FiVf$J&p zZ^<+%;{IlqBLexXH=?Wu?b26o1!&y8tR@OCN;-GTKIX|JYAB$MeJ1b{XbevYc#B`Q zU)LI!M5S_K()x(#@2Q#!*A9>t<#MKHWYk_O0(|NQo_XLHA)A-WvXzYQA0G9x<4*vs7@$ z$jd?CSapAIcSmaNTajlRuMr5_^C2YDr<}1ge)+jj}Hd_k(F8 z)t9-jDr7F!wM5cPMqC2kuz?+)r|3YKNAty*fa;%-qrc-G`jK{L&|;3KfMYLH0MHyf zqMR%pnI4ZUw{O(zXWq7D+-<0+nDi@uw7)```=dUv+BE6sUTsrd#rx?VR7eS11ACzx=Y z)95TFR?9)u@i`&eji)v5_&Ao)KJ{dvQ9DCnHVIj$+2qO#8#W!R-y5#ZvuE_t+4i#2AS6*}5uL zX7MBi@X*5~J2?&~srQs=`cE;OL%sGLfpd-V^kWT73^d&_DRm#_QXq*rpVi)hfmq z^%x`xfEfqNghypYCvm$anz#)NYa^~zB0RKOnj<{iD+~dA_?w$p^uYnnDjKNQ?5>87 zz805{O3W1qCGlW%!Uz|=ELcSE*2;yxezI5~?K#H8V@5wy4zmWglXM_3GZbzQq3k(| zxej(<%}fKD#gh@vkVd3xm!I9Mz<%58S0_$iq^k;=MET8fD`y&jWvsEjC6bCpwJ8V2UcNbZz$;Z=J3Ny$N83&;UUYV5j-JKIGQi}e3T|0UE|7oOpI_Zi-t2rgK{1!b z6S?J;v*W}2O($2H`F7dBtzoS8be$)L{N{imlk-%K<;c(&UX<1TPtBA-P%B1E$Tw~5 z?(mC=nKX*oj1+Y5%Cn_fNrJ)=y2v0$Ch+hDVw@d}s{(GX0!SHpB2u=x93+tpW_*~^mA`KXx3T><8w zWhWMmQ=eMe7rR%)XZk_X5avg|*$G&@b}?*df*#8tF#TI@q)ms#gPylI91p2$nx6Y) ztK;n)hga&g{xPc}wfq373T`!FX*><8L{F-MVJE5h*5C^+w zH}pkge?ET(=zUN#s4Tv`28NL#bPFse%IB+ss$lRXjQhZ6TjzRi^J!my zjrkNvl4Sz^cf4HOIuX9Q1tiOc9*ncWm zv!I$9vQJOkwo_}I{m#}W%Bb~|7tD^-RJCa)&nuEM9jtaZe({Dx^vRTwDo?n4J#l%I zuv&382}>kiXkYf^W-&AF8@g{IK3VTR(ev@M?5393DL>qXdg2ToN>jcT_<-0;BW_8& zS#E-Y*uwj+lo{@+U5R{SJZg-_qO9m(ZuNX%iE@yTZ{kCVAgQD7ZTT-WLpdV)2#Gq5 zeNz|giSc5BA+twfq=#U!+=Q2ycVJTjv`MiCJ@KQGo4f6=-oQi8p{e-Bx=?*0?%5(_$wkoj#_Srug$D&H z?5_QLc6p@%VDL)=vtj#Q@rA_}QhnEFByQN^X`5<=rkHm8au< zsd~;Rmd){`iLoE+eIuTgr}a8Kh(`(2zy;$(G0{sc9J`_^%@^s?y^PK~c>ylRIXcxS zm2D$|a}=^peaSVjh^7nz$)!l8?GFX6)Be?8=%rZbis{3mKQIz}&IpgoVOoW(I5rM%cQ-`uSQ@fH2pkXc&VA?G+}uMAVG%r{@B16f6@JG;H-k|vc>5$PKUSSRb96J! z*jNua*cYb}&y=+pXM#?WGh?R$sKogf@NQq<^7u5SWjQ`Fg}P6)IFx!KrpATJs&|{x zDdtO=qBJWnh~m=X|5YmrB!Qu*wS<`R=)+ffp}*0GEexQWX`U7L6%Q428QBIYi#9uX zA&(x<9RNvY>6AZ0!z_9mOfeswsJm^Dcds&MI5^;gnrWQ1^#EOkU;t&xAHA8h`-@WI z7E_l4*EzSVx2@0IDmV7u_1zLMN3#lTW$VHMN_J2i83c;}*y*27CrdD(Sv3E=%t)D-}8fwFcJa5y?xXe3*^<_-NL8jC$j8rdD9M{vI~ zFObC1OyQ?o$fzHOPXqpZ?05n9)C_4)<#?nBc}E&w`AJoU8M|zj5KGqQ`t|fHU%^5{ z0+ceFcxbwTK^;5}KC`4?e{SvHSW0Z;_T`+v;|t~b`w3Za5pN7}nQ}~!vqFo;x)p?k z*R5bLnrD?VqYs*TpUP?9XnI4O<8Uo#h?~(u_$|nWVBmAD-u^V!k7Gh?6w~fkM4;>U z!7R$Ev{$u4bs>hc#mYpZ$vwz?M@5^l(Xr^-`@ruw0eQ87!}&B@`#~gio?t@h@)(^z z(rdMvkjl+8GYQl)QkZQP6<|`)B}+YvP7~o2j|R>lbsiD`QuP7i_qYu#gp%dnoFHn0 zlYdt^_<$qy<6uD)akrcNCh^S%l19{c_|8wKir{ z{ZjucIN%Ha*ToC~EB}b#?T@*NnYpf)l~R<;^wSELuk#V-zvmLX4Ez`G9=|t>P+W}F zz?;ym`s2aaFrY(G@R5%@Gm%8vh@2w{Aqq_Qg}TtWSc0Y>{N%Vw5P_sv>Gowu%KoFs zM>8QVizh8MY*ZIT`19a%X(EX{m3qSI-IEjg>)ZyJK)Wn#(_~VIitiJ9Jj}>cr$CX; zCph?27`A5Y*`cG|8{ll#PBX)3LIB?@nlp6rW~+v?Lf{caJURs-4&6{*xbHB@k&)q) zJwq5T5LLkvr8l=Dz5yMW^;)6>ryAVWF2FQw!At$Pg~69wSvS*xdGlS=K46wi!sE~s zDxx080PtobnHCoCI2X}E6s?%ZQ! zb7A!m^O?aV-?>OO5GMUvGN^zYTgW$tN_@h}=5=j_>y!;Cnl-i72MTP4L@O8~NgN59@a% zzISKhX_icR5_?gVMH1(JWMmzMxM2HR05m9U>X|9@zl_EuuUP-$)nl3D3Y_~Rn?Fy~ zxl>hSNDnjG_P(ZlIc?xe;R_GXnHPsNF&ijFNSHGo88KT#VPyKIWYMmW-dr+`#{>(iQ>a|3D81L0z)hp02{LSPCaVF*lVnML*PjNt z|76Ou;KH5%622ukGfHlLoy+(PYhJp(yY#SdAm0O+jx5RF(1l-{21k`R$}ql3WZyLh za2?*l-{=U2Jp_BpnqRoPg-4ZNWut|kMR?zWU=B#_#8pprdc0`xC2@yp-riDwRYd9~ z7&Gl)jB}Pd`#$i*?YG`3C(VkCgC@b{{3~$mTx~bu7X#l_nXAq4Dv>0hee^Qh7-p4Uw@kE}tm~mvY2Df)g2$7MeSs|v* zzC_rQ7K$P{IG%RfYjm%;koS+FS)`5;&)a+p^alD_`I=s(Lx%apDdzO#*LMEnnxYw&=sGAKr|w)@I!ZdLJlA zpAi8a%I3Nkm3nk>Oq8b87U!TGZ4QSad%?)1SRN)VBxjbHyzaw{)WhU;{lkO_QOn#7 z#f*Gu$@6Rx+f66#C;IBN_{C)GQj{a8#$u3Y#A`yTWhch^Y4xbPjg zx8U^j*_oRU?BMJr8kacr@KdudD>l>holZK+S{L`C)>GIc!I0OaLY;&6^HhiLs%gpf z#!oN(9mgLMyEt%qeY)!)C#DDIYT6i9P)myXQD2KUHfl;2W7Yg7r}8-=$z&oIaM;dxLtt9%!gW*NqDaGBzjMN|EWKQmh;?g_kZrR* z3CuvD&H1kQUG+wVB32dJ?7Vb04)DJV3Hm>O>hGSU8k9PW^UYee5P(&{19& z?Ou#TcRD|qs> zF;{2P(6=|;>WAqKVL-$rg0@{fA*fRsi9Kr=)#>UhXmKMRe%97yUQk=-e?DH4b8Ro! zu2A4L+Wb@B90$$u?rb^!DRTl2zO2{WLG8^iF6;^8GtN5GI{XlYAG2VyjbJscqc+Vl zu7IST&B*_cviFW_YTMdI1?g-=MPv(7Y@i^Z(xjt^AYH1`q9VOSdM7ANrEZZlk&wGBQt1Rwts%r=IGpQV2>>mLSmZ5a=L^WJ0T4&Y$ieXTLlLADp`(@%`HxCpLX=j ztQ$gVs$F3mg;|KuXYQTB^N9#m=It(Pb5(5DxpS{|0WNWKCMgUtAh&JA1}qPk$z`%; z)^`i8joXq*n9_|k53h{}+}_*coyYTLzUoK-CS}ORcQELvRMtyZ`!mLH+bVw9>!aG+ z?emT9EY%Bjos-;&9&+j1*wC6=mUrf^C%Y_U{c-*;D#k-Bv$=8Y&{-~h8!8<1`Th?O z?n%1CA#-ri9`E?c`l!ji)@MMT!=R_hvHp()tWLh)CNH{jPN!aRm-wa@;J~Jbd1KLl zelImnU)K6>hv8qr3%wwaUgA@V*j^7|XNXngD>*kPGHFmNyU2R!u)zp6o(KC;Xe`cA zpJ+8mM~JBxTg{qqD?;6k`-|?H7*b`Flmab>HGjpRYx_P~u5X zQ8KyXTO5KAKndl z?iSKKfPsjs*X7G4TE%g#W{rO+ybE%^P)O5!F!7pcTT%VWhA4+qN1G(_mm2P_wYf3L zw$50&a>F34c5@jt%lW9>vXgdkM&2HY@6jX3+b{az_b?d_R-UB{rI~>a37P=`>r27j zZk;CjRs{NMSD(%NkcUm>jgGVRIi@>6S`?L+A25Ew#tqg>6ex{zjpa(FJSc#qo)s^= zxO}4lR4!h9$GoX7@k>}h`9`0L%8?bl&U_p$HD~SU%1HQLjM1HO`z>LO#mckEs--Oo zrgt|@7*Y0|Nl@NL$4E|kH}yg>Ev_XEh+@eCCl9vsF%8jSxf10i7ghim@NC>}>c9V? z^7Nxh>U$XM+mMOl^!I5N1muQJB6m~~X}lw7BhZ)(!flo*_QB|{S*(VuBKp{=;??Vg zzEi4YD4W#BV_OtuOFXXk^LR+oz}fH6 z%LA9^P8HIunP)`45dJur+x=KrzvFBHXo}V)RsawCQN3C{{-S<5d@Qo2!Aw!UvxhJE zjz&!?_Jhbs9<5KxnSo#|r*V0o%t@}151>rC$slkBbjX~!e$hA7r_~PWYtu!-eHQ++ z!PPitpxHRD^i!)T>n8=infc8tk6gFT5CN0Xe&@m%@fo2z$iL)k-JRzklj4%!mH@>l z+{@>efy_+9^@kNTS9t3YpnoFlVCba3o*m#$;)OoVQ*e~YnD-j>N5<|(w|>rhGXhkP zAceLF!*3(G)%GQgmDTeeB4J_KDIta5mUg=?7wW>_oBF4<8ZoglHa{(DcBtMxu)Xli zdwbi%Ea8~Xg~Fu_4eIUwlt5MgpRQN6&Fe+tv0_PUE1cTVoBKFwMxL_CP*C8O{Fwg7 zDx`R(_M8I8%hRVb1dsEs+uVF%gAXxT4m%SyAuPtaLa?})no-)*l1=EaHpgR1b@yv; zt$e67DAws+T1&5@Q}>@y=>8+;g}WJN?7&}NnTstPE$MvC-r*n8_pLmkHU?cAvi0OG z=usK=|CCkC!1H{Y>g9i+N;crLqgo^Q)?- zc)s7e71aENWX*ZC)p!2vw4DLZ<6FKmQ=fp}^+^YTl#LR+ZNB`p^#y*z-2WCdPC7$m zk@-S_(tdb=+9^CJJF+tif9`(<40}GJOPOLHyFzk>;%FfK))8gKpIl}3N4H`US1q|U za_m)d3Lul4`ZOcRZNaT=|M(}X^|vp{M9w`5g*-{;`5!FDX*KXvnpak}ZDWRKQ2SbwR>*VUQjcgX@S%2L;1gYZO0fG7+ycsYSH|sr2J=V>Ms2! znQkLW_b~w>oBD$LZ^fi`{iV3wU!CpDJUl#GzkWR^X?9h-Mjq7}_nSU~|M~d;R}%TE z8A0UUv}-?SMPDbQEJRunWQ^@vEq$dNox7YB#2(w~_JVa|x;gH(-xA{QmEz-nDZVNn zTcdP}Zo%@^Pqa43LGw~?=b*HV$7tU*D5-VR{&=KFPqNu;na>v7mx@-T8deJmnU2^! zF)VvgB`5netzSKH-J*Gan)$nkh2Q2y!< zX=)nWz5D82dYzVxmeTa%=S$$KSq|z?pO^ynxpV-=Cy(?BzNeA74g5!fptd(DO!csiQz2l>2n-092^o|MwBRl)DTD z$12RPPvKD~mH0Sl8M>D*N+#X~Pxfc$4p-Pai~&&(FkwtoQ^v^Q>fPn0{IUz<>(|MCN${76k^)?H=)wYW~O*+Da4 z9@x}ehS8TNV_8SzEr0wb0YD&nqJxe$Q73GBCgBZLfmh5Q73M&S!o2E@GCfR-AGudS zqGRq{_=n*9{qm$OHSuJu;yZUe4+Z>0CMp20Bybf9X=<>IVoHg{2lt_^yAOZ$^!d7O zxO$X4GJ!xQjXu#b-FD*5s5Hw!OqMvT86gL?Ijp@E=*be*dKRqy_%}9&;7feG48>pm zq_kec?A>6z0y|@9+#A?R{dAuPXg0HIvFyKjnGe=t9E7GM=nE25FwjW`u4z>EQ0&a} z&s$=Mm9-Pys?%h;HI(;cCI%j?_P86MWm-ZELD2$uD z!4PJ!!RSTCkWZ&#Swjq;MN-<}Ri@`>V`uzl3y4wDd6J_>Pzs}G&>t%qooqPU7PM~qhng1g+i4Xi3)_XQ0v zo0;3kk2_(|1tF`VQj&t*%VD?1DINZ99qM#y@RdbhR&qm&kws>VgvQ6_Qr~pAY8*mv zn=^fDNtRcp@SABZYT>7KR?z8A@EeB?5o5MX(E%0l(=nIf26MBoij1?8oSh$;PZhs+ z%dp8XlX6MFs7ZT(ZKyH0yM1lyhF#0jx(r{c9n`eteIw?0AG*kD;At*CvTkiGE(xA%$UHOwHd;STLKY3@`kmL%~kgwh(GWAJ9` zhQ_?n&%pAd3vlfQ!LA*Kx!yT7=VkId@Xrjd3F?V$ZkEO@BSylPS_%AekQ&=M*)5Ce zd#9RZ>)`yQ;C!^E&Zn97tuu=l=n4^;P*T?+X4z15-d$Se=QkSGQCdn0`0J~Tlyn}I z(CjrnUdtK%qn{k{612Ax{#ae5Lrd7_5x>J81#8V73A4%7qMnY_5SQnX<G)LWYC~%TaRY~Eb(VLqW|0US}WpZ-JlOXL!8v5>x$2ZhiY01yz*W>-U-jS@3 zo>R!emdedIAN1bQ7sXw(&7}r}p_Z2{{)L4lE~A2f?yQ`5>( zf@i$Adp@dCZ%g0T=%Tb|IDO~lDuHcF7a~T zP-<82#Y^gxDvuaval@HSdlQYATKn1a5G(2piR_50RZs3LR}*g9eRH0=?ent1=vr}^ zX0Fe8!Y#hdNn`Ygi}p-0bOkfH7`QI)o0pJOPI!y<`x>?>HrbzaJW8vZR9C3py1LYI zPQp@=Y*#a=X%+}>=G3+ZGz-UVKmIBByGagGvZvCa!eMR7kV5(it63(Cdxt?rg6Ga9 z94@q4_%L-hYL{moEH<97Q`KyG7A;`+bB)`ii4Zh>Zqz7%Yw>8D4HOub(!@|&5QgV5 zO4d}kL=ME5tj4%({7gL5exHxYu`|_Yx#6C|$wNE#VF%QM(FUF>~3CVs(9Ip;NOnSQ^ z#L9E?!tV6O+h1{7hB#Gg(UIwc5ZF~?Va`mPG4{nKR)P7qjoViLPhyJ|gN%z?T?Tn8esX1s9U` zgIJ;bD}SuuStjL_9T+;jY+i7V!c{cZ6^AB!kGDFVgUd;Stvb%MR zHdSyJfx(L+Zi@R!^N%W_Y9ix?Y_S9X2)UdW|GG5b8E!1pcBR7SK5hdBhs^Ts@{nX>?XCc!jj?o&EbKK%DvC^s79;3usR4PB8;w-4u z{uYXuhOAD;>(P2{d+EKfB{=BLwa$H`EM5OEaG{0;+)=$t@3&>?GrYCsdhEhFO zqU|EiI8Vo{n-xa(sD?OvCAOQ?D0bRCthNnVf=so`f8V9v_98VrQgr%`@a2C$Q|S6t;nI)(kZWcmhKs;!c* z=WnNvoT?6xfi+veE6g5z%}+D0sc0&_(drB>aV7;u>$^=k~JoesGY# z_1*KT6l{l5n-mq4db_48527__p?|wQY|^O?45!h#m?sGremGuG01%@)#*-%_wN;Nd zX>DhpOBiGH!+(Aupl@wuwO+_TLW>r+#v7wQG&s%6Y-7H@vB zuR#s~lPbV%%W?Yb0CYxMEAW@`*h8AO6^3s5wu?9KQBWi=cWIj#yYpezPcWeT;}N>U zmeSmhAbk19x23N=5$g3i3M|Ufv^t!j^D>;f8~P)$QUXm+kTc!!mq^ilTeq5>`4 zyGhr}+MCzS%lLLSS0z~tuZ!Pq$GMS(dy7n(1UAdPym6a)`(?R$iLVv7 zN&}l-$9BN^`(JwNuiw0J+0S^Ou7>rtm^gQab3^lrjEqc~<{KZ1c;$r!?a%pnkDp%{ z;Sa<&h0~?oEV@2-{4G7h3}+bL5m0JsYOgO|yf}P*;RWPn2Y?U^c zFZ1&YUbt{4yz3}D$bVzUO@(~EvXx>PyH&9PDsyvlOw7zX-xGovas&Y1AkV%}Nw{c2 zfOG~u;-`B<=_x@oU_-HuSDq??5d@h68BKj9uzXcXiFkhQHVXx@nLPq)sz`EZWB4aF z{~I4L+yfH#vd%v89N_69$UI%eu(Xs8aKdaf27aI9F>2}SVCdm5S9vHr>)1;z4# zh+H4J3;rgPh&tEjec&`yxSgJXf9?7|Mp1DQTqNY17C~vUN<@IkGGOEB$(MqHNMyI3 zB(aS>bvhP{Wq;;RX>{k9fB?mY9jEjjfF5`F%IFucywV81oic-uFIbsYc@LcywWCM@ z5p>mMg$dQ)@@7`bn)hYONLBY(irx?InWKFiQ{#j4wKIkyW<*CnhQu|Ln6p$P^hJcG z7AM?lH)M;THi)=@>jvdPm0=tbh=tzqxVdDp!UwNj>9sDgQ`ZAmTA{XMXL=9NlFbrU z_kSOnQVXCUGoT46Zz%aWe6giu(9PlWJ-<++w$Ns$$lj#cy~aW|Usl%jC?sWy5apH} z#XEBq(%#RjZB{rQXi~y~a3~g%Meg7J5!Gmk-1$1xQ1`$p;03m0qXrsHRHkL7Sxpbr zn!;CXFGScm>>hbBBU>zd0{kIT0!Q$8qS-RtwoK#FtnZIfr3PVee|kf5O$>(%-0&UnO}Du+|#$3 zqRDkSwZ%BVa$A%N$L|$TI{S*YdIeKl;aH=%U)P{`Q~j@}7gCNsA5#gTwW-%JrhnPg zkC3k8*%Z6rWr`!w>3=09XmDPY%ZYubovyc9@Ok^W?ZC(71RqAiNV`CQ|Jt?FAoFYX@TlKjrt zk)Ku|plqxZ_@c|i_t&B4#)x+6ha(ILj({X?4sz$6CbW`@?rl1^vC+8dg2g}ab14qN z*u5MVYMt`?7?do|Y=O+PQgEVEK$<;XT_t4oXUd|7DSA@SF1@h^-I|(Puyv9JjnY#TL1l%aSXW-$eYfER zf%FM2YSO|7dB(3*1v;=th-`4uQTF@zVrj`7$!4HwS&TsGa1BA#uvGJ5yy&S_v%+Uz z)dZ9rk9S1vb{&i1 z0Hm?2X@E0l%%i8F4CXb%Z*Nfub=4I*o=lCl7&3&chVoJsD>poVXo{BemL7NpIG5!ZL}{V z)pGMOv(!3rb;KspM|yd8rCZmAASTv(l@BWW5!+*GnXtxF>51KSSmQ~UlJY}}V8oDw znDaWW8`TZ$5oj4VUXPpD76)0YPBGX1`vh^X>>d2`E|X733e|9sL+KTn+)XIyL4xBYNK!&@wz*V^fY!WS0^DG%tcD!dtf zAxtVSiKG*P-bAi2oY-lGM>Oh-ra$*Ihiqtw ziX`Up(=1ee>G}%jC%Iax-;Z68QX3hA&&XQ8yk9n%XVV~s)Dm{cP041dANby+ngN-3 zXm0<0* zwkV|r&3fJii}xgraSncXi}&zUxWJbIPuV3S%HP01&;p==ir=Dwj)<4K4EY<%)z2_` zbr-!C$s!SFRD3rR+4--w-189P7!QMgl(Ay+_+F~MnsseEj@ z(ZVhFyIy(ti7)IuXc(k<0`sp*^lO#U@jQQfNxzkaxT(R;!isE?qHAm<4TX4T9NDLz z+dL;x|Ka^x4#tsnWTX2nTot0eAUd6UxYI7xLTNf|9xpGeZMx6x@rtk~m^k=xUZBbq zeZhvf5#+B|LDzcg6U`RuweF*0%U7ES%#e`D?N7E`f56;!dEed$)pFFG3~N|MOq8+# zXYi)kRDCr1j6j-`@}y4c;C3~?E_{0bvlsIou1dn*u(71Yr(jtz<+=Nh5gA2yAGeGq z^pE|}cRKK9*3ZgJg}(LACVg&be_T>#$jxSQnAA9~^;+)xxzy^W_0V6Fb+Ses7w?W{ z2GR=8xaxXFVd~k^H_OzoMBH|2{#cQ@LmY@1k=e;fjTDMjV`ZYnA5|aR;-Maju z3BfD*SoQ?Z$fKRP5EJ$+cpxUmLMO(BdUM}I@+e?S?UmyPl!c;IE-bN`a4e{K6T?|H z5vUd(&2Rs`lp9&*;gPQc-PFP!8}|9B!Hd|d*xcg~-z1EeoXicl*#&1Udk;)Rp{V&1 zaca-{gea*_Ekn~H7rW8$Xez}>TcRPQ*56v;jP5-#Nb`^88H*VU6j8C=h~VWU!UkFT zDKnm`sxw|azrWwIyJYg%Nm)su_*I&YQo0`{Cm1w`k)vHmT$d}L%Fr?Hd#55n*l#6W zt=Lg(nq(!ZuxDQsM@MVGi`3}n+1R0iToXY}4zf>)NiH~!k!nZm)2P<^RL*!=eE4~; zSRF~&R*|WUI@YrwKaX9#yJ@*hO+Tve(WBSsUB`gSd>N%_?qL+gxhtq?@#EZTV&H-K zK+@WbazoAu>qhGBP|eIJu12Ksz>=Y1y}>DgKg^K^X{`YV?pme|=Az~^+Q#ljjow|P zWP*NQAXi20@<9V`7&-|EDUv7b+54Tql@$Secl3y}s%CdHe*O}>+x4#@b)x!KI~Rfg z2~rmQs3eBJbW|lua1~BMuhZ0HaNlarzqnhC2x^+LsRcIQiRrI1qgl0OGL7?P87M5T zevE*s^xVN;F<4LP&-<~Zwy+M3)`BOW_-xK!i3pRmF(dx;2DLh~z&1@{jg2?LNztZx zyjm(SQy*EU4u4elh>H$-J))L{2St6vdcx6KFne78MJ9E{9dFZ*C?k5<7ysL{2|O%m zH&OT=3oM~&_RD0 ze18PeqABU^EX9=tzFR3yJ5Z zBGQM%P6y0>`{6;0^V*N{tZ}#7i9t;SB^CJFA#LOVKT<;)W>e0~`9Pfa)~|gp32vsC z#`Pw#hKpqv!$oe#u$^^ESv|!Q4BKOey_C(JheewvTLUIv=2OvDhU{od?sv1Or*Qu@PNO^pmcNXibkv5@ok7Uq%cY^afBFwFX5UeQk6qDOt_5dl_XyI*Q18H*F`fj9` zK{#YD43q3KSZSS$y0$i4_fDZHzu2t=CqI+2#r=TnOeq(Y!Rd9WZhpRAUF zj!Ij_6;g7GsoTbSg{d25zc0xLt<}%(Un#(CeVI=37HESOA1Ve~X)GTBj3(^&XV5R46OY?S%rgTL^qDpeF~J?Um%7J#{w zz2|GQuGeg-@Zwh_PBQeuk|}!=yuF@gA^VcDgw4tBMO> z0oh3aeou59P0<5?v6kRpm@>>ZoexTNf=!0^GGdx6o5#rCFHSmH-27D zN8rEDCNvcK5n$l^a+0z7sndH=OyP76UuBIOh0;+ik{i+Sz@-pqNz31anU;BNu4nOZ zqz4QjCHNa>cxnxsd2hsj#~5KDkb;In@r}a|FN>W#<$b|6Ie4St4`(OodYzP-opXO4 z;*tYp9Pchjif`CO{J}a_F{gZ<`TyNjR>>L9tt{MF@Ou=0Wee$W)7=RLGk}nmPXh`iOacKVp+E`+8@0)5cW zNY)22kz?q8dv$Qu%gTTni};~}zt9$Fi@g!vY*im9%cj1KZQqLt1F zlEYFsYGg14k_{9CZ#@>C=FHDo&^K|1K!k50XQ4QkXgUc*-qQe+hGqGK;&UmjPOT}1 z*E3U1$BZz;9R@XObqW@BccA?yiy>%`7+UZBit+=P7u=PI;g7j?4>;VA*^k+q1KsGW z{bp6Kq(aNn#2m}tCPNG4#`{o#BXkSAhtF&DuLPiUHl5dm*(^3$WbT5z4xfs6I{D1%WfX%D=d3kAC~$vdp}QI8&FJK23~dZ zUjJ3^^$Lf7;c;VG*3QD6_DgBd)80?oCRnL*xV_X~+}U$}tkq(&cu#3My3c5@HWO1U zgB;r#uSWhGm@%;mU*@_9g|s#mC5ROLRQa67DsCXS=cP2At~lVI{fe*@WChz8|LcIw zGjU|UG2BlEo9d804fgBnfA&jgM~itN1!_``+%~83@vHA$iiK18K$hQ~Pz`Iml~OB; zwF-mV>;PwT;*S*8#!Gw|tQ_!b6jEb+RQM``n&8VLail?;2+6gt&RXKj0|6-?ePTLi zerQGYuzUR(omA%KbjvaFZAkK(Dt1pFkkZcSwbo^0T&<^Ky@I!BC{sHZXPuOYZ;DM; z!w`FVeTU5uBiruB1mtjI>Np5x|Bcl}mrF&S~=-GNI8 zB6x@Mt>bm5NdXd9s~I$ss2pQ4bXN*%8@u*h*YdFCWA**99_O0FLzf+v4|ezm9?#BN z)Bz5)v=TFBx*7SZOU!6}uoSAJ-#UhrSRad)30R8coca}i^ZExBC?q%bF0I)4<}#ym zv_U0xNa-eNOroG+i|X*3u~ zaQ12`Z3}dG3Djv%i=G0OeDs8}gZz6Oa;3tJRj+#nc7;z3KO{^V=uDVZJLAk2HSXCj z726cAp@Pqu&+z=`EF#eV+q38m>ciZ(3L2UeA6{V!7wkN`Sp>Wlg1^2LHqGlxXYth5 zFTrOmJl-G^J3Mtw&ND*7X_H3`(&{u_T#K(A3!<}W-N3#|ykFq{Jn8I@k*+D#9s$!6 zym5qfo%j_s8|<|m`WUC1E=)H+;oo*#ObZau%q@Vc{$zJ19y~pkZE5%95chFEtn^w(tIY{3#sqh9XY z&_*ojqg^Y)8^>XI{qa4vbaejbrGUqc9i3MS@od?ep}#Iywzi^Hw>i2}d*xEDxzqZhZY%k04K?%1-?WyfCgJqr+;HyWdLw6L|J{%ZYzlTUh& z`h2(MA2(E~NHn|o+2CBElMxg4`@n-)dmCLn`tc6!@?p#VU(>!5(Zh5L&G7cbyzL{m zyzM6L5smR$x6bFVvUP&hvi+tPKsGJ?9ra0xia^cCPoeO`HPx+UH9cX@GUUfrg&Vwyy#Ck#Qxrc$wbMyR6UIL;{)WN; z#2``rbO~f3>$4Z_7b{-trJ0eEbFJa*^<;~Nk~Tz(9I1EQHr5w+YrVs9mZKaa$dYFd zqFUxQ6*;BpM{ppT#(l<<62}e*Co()S#c~_6vVE^XSz#OmWiA@1#$*ic)_zTimSm~vYXuI4HCJeRYH-EYW~@h(cYwq}V_C!94sY@z=``DXnUpmlhId(;I#! zEQwVpr?!3x*|SUNQ;2+hCbvpxR#lR)R1*ha6oFaU5|0oEG>0F zIc-E}WN?<&g~Sy(D7+smyrUv;xz!r~RM@OlHL56KxRx~o9xnPXW#~@3@GbSEo4i;c z*LuwyF>`Wql z*zhJP^IIyqCY=TTtub&pJVo%3Kq|_D5z4>z(<>;zK7>z>3vFU$bi_{ z$epJgzj-tuwD*JoL|QWd9(CrG))`ugk!+no8E4wH)OccukEVztauaKto|TZ3KR)_C z*7s1*%r(HAg;s6MeM?hDFzwZ3 z9j&)gZ)Cc%gzxRbD!vY&J zrb$+zVbLa8#!&mRZ<9#(Xsg}q@e#|aK$}Q~`m$6RiISv;Vi{PQF=)GNi)&E_nLwVZ zWl@MpYQ6)FG8Z(Rn5Yg7J!6pA8Nau}E}T}r(-mjD8qdqIs-n;6S6=uUGpUf;U?Taa zL-4Gp5S#g=pFIyYX48v``nY2sVwHG9&A)`{BXAb5tm-#v^UJ!iXOREjLdVE4%nNW zxwG5a(7o$@AH6O!qhP({6x_9MN-Y>Kn5Bq4@1kA(6v}lb@Mn>cc9NVGO!7AAGnQGx ztwXyuU*00y|2<;UM90{p2qTW|C<A^YVuc&9!O4aFA02du z4TxorE)M@VUX2{+y|mcy9p9dUf}Scjt-_5VW%1%;5!TxKrVM$_?#a=~_CIxC5)l0a zXZ@dVDqEFnoK~NOdO%5eg&V?vA@U2%55Ir=4nAK?i8y%%0Jg&de3@vP%hCQMA1c=m z@7pOOZz(v6l4$%?5-jRgj@{8vyzaUIMLqqRHRt=~NLzmQtE1u7bA{*1(u9jFW31c* z{Cb^RdKcHvO%r<8$?g^cYFNJ0n5%kx`6ub-&UxVfhb4orRfnv5wsYx=+w6%1*aeEB zQf71@lGNKRwnLKg03@y!ma|d-aeRCYm&YSV*~bIW4)vME_LNK&2&IeKij9=L$`Mm~2<- z{KOE{(d?aPlF!5wH)(z8A1I$IoH5lXR_WylzABbbzsY@GX%WBDTMt7GdksKUg;bE3K58z9!^`T3b<#5s<#)!gYkL zg3AQlM`^K>zT`Y#CJLI|zC5)wZIbI(F_Fv`Mjyj}o0snDvre<(yY6AwSa>#}Eu>Tp zs*J74YRboB%r^VpyA4Du=KV5KsmKIq@)Ve!fiz`qb-e5Rrdf+?hlvB7Yvp^bdxAvQPh-(+d%5 zY3^7^U0~V$0aT8)gt~)nxXJRZFYW_h3j;@*lXx+59*2<@k}qfLzQmE^JSJlzsY2O1 zWB6a9OGUPGy7F=bK8Q%PnTTu49|Kk;X~7_CYU5=IIg(t&w@hKH8PcS{c$TBmnJK-= zcXE(CAXeM+bY$`>&4zrf57lx18^%h2TRh&GcLYeA zPkdsPa6xHchSpVarOM{XH35PaB_2FCOgR0KonBB zpWoqAP_PitP8&>)V|=_4s{i*UxZdNts&*&z^O@79vtGY`eZHOszT^ak;^P;P%>Da~ zV)JRusBm%gQj4oTbcrQ%y)i-l=@!=fl(K7LBS3L6q2JCEXXV znG%ZcD=COwjkW<<4GPMfCnyd0pixP)_vsbK14Bj!fdjm5rFhHhJY&Hob!PU6tXKXC ze9|DF;ddqldFi;q@ZEDW56{# z^F?Ad6oyPiQy~J@0UfqW-nWG$kfL!uyHAvxEp|Xus88*WlE()O;nGkWD8vOX)P z5yT`nhw6|hiMj_8)d;i6sj-ao^%xD^Se&uot?Q^TiGUI3C`hHUu~f5?uwZ;oP2eg6 z7~M%KALm!s$*1}vUp}RCmZnHPsRQXkl-}e!*-g&E$Vjg+$Pk|VfR`B^ZTO>}3^|AyukB3>OVqhYK(up(!ReZP3|7j9H`G!roBMfTY--of<&aY#!AU+W` zgRLxNy7ly+u-5!1D5c_(=`r{$`oRi|^%@vR6acV!C<_ehfb8c9h;Wkhf#fJ^eGjH! zJ5KTeX*&Kq^H2WYl>`vtK>parO4EsRhki;5+4W#Q)WQf1d@rVfV#~l@Vc4YtwQalX z&yRlBR7whGyRY-Xc2fTiiW@_n%QCE?qggC?_k%}S8*8Y0ef9}P!(a7%vCE4o_0RGp zH=_*!(ZOjXlo?J}ty^88GF36p;XL&EF_*hXqKrv1Cf54;OePs#PMRU>qY*dpZmtow zzPX=^t8_mWtfV#l5kOHGVxeSIPO9(>Z+YDCxVEOKG@>YRBrjNXZ}h$54+S7~!vyX% z*|N$@pwsT)Lzaw2qi>$P!_$_Gg6yaAQgYwgDFq9znb*EET9+;{wl2}taGSyt8xYM0 z1@WePakznv$}Tfhvn9vM;IGR8H|w&Q&N2)~DfS!PdvPIuE#Q?8WOwOQr34fmn5=AX zo?C?$x6$@=_B5>*xWAdAeu&=h5ahq`AU^9N5@By=?jX2keEsax&c^#?eP)FmqIP-N zAiLq|)QQp=*gdO=Gh?&-NOx6KgP?8E*x#iN3glF)@LLD-Nr-}4dU@E1ugH) zRsiXLrtM50uyabfzQG^8wT3+%v5sRzW+=Gbk1?OnliItD@4DfY?dX43NrCI;Dic|$ z{uUd(1f)p8X(a&87&t@PxqBr|Z7d@^rcf2bbhxs;A8j>R(?Si6iPA6A$y>Ai$tZD^ zqg0Upb;}k0e#SiGw{HhaWPPQn_VS?75_3B*gzFk(@TUn18F{&&5=S@No^vcew~|!O z*eT`X7RmWOYAhtRQ5J9Cd>HpOEY$W_bAZRu^9m9CwsGkMS2iEplQL8Oo~ao&4k^hC z2e%HLCo+5;KR>g9#W7tuV>iueZZ(M-Bpioxu1xRmO#x=$rJ^NK2vSv^)vo-!R67}~ zm4SB2bhfn^nUc~W&$SF320ty8_FeEhfAg~ig#~zd?Bo_dUxxo$)tbRu&!SGLLJReL z9gR~@Kjpa|-3pNLsdip5(qV-R-Tf5GbX&|bbdJ8NICC7j8f!ljDxr?9M^DNSCXNFZ za~!j&Y5eQr(-etaCg2jfPugHv$#_>C_WKb8u+hA2JgDj0x3JbmnD{l*E3yQT)m({d)$#%UBp>voZ z5ZKMigYq1|=bGN{9U@g17OOUjyGG~#(cO4hCguMAuBM-#MUT_?(EQrrr+Zg26tba+ zjRk}ztfz^17^B4`6~9tuYu3jdTLz`x7u|J4So5nvYh4^_(1@WOWSzeY!QZm>z4^}H z#z`BfQ!nkR9&%z-nE3hM1qv82FLJ{9&|jG;JDpGR-}(38#9Ps(+&b!GqsDZCf)_^v zY+;VC8sug4B!uqS{bW9#MnDW6eUqvl|8SyEh=;}Z7l-gv6~r=qqnOTcfJoIn^;l9K zWQl0Sx>M4VL2VFf;+a@4!w;K!*=b?>&V}zzJzYhN6pI@uY8&L!5ip zGp`dF|2p&IeM$7GZA}i9cMdtrB6Z&v^0$g>9ZVy`RDoc%WdkD;BX_>n46Mx3{VIwp zqfcHLz@s|smka6j&2D|XSKYMSXn-GXdHshJdm>Tl%LE2g1FqP(Njw{)Yxcr+06nl+ zHTC?P0}!RpY66r*iJ^}62;|8KeZn5tRzzQc8)QP zB945+*k1F3GkF*&Y3L&G4$kLa?A2KaCG;FZNs^pf9pa^vT*Zd0f9Tp~R?hgYg5~&& zF=7uh+~*5@V!Bsk`Za)1r30d=C{LE~sIgTso2tW4FYR>ZC4h;Ftp}j0H_CSIArKK& zo)eU^Fb^u*>gg7|cp~+eNs+qAE1!L5D_UDCZZDcC_wq*j*2*^D=Wdn@ec)|OA6wWXejU4jP{7O?3z=yO zOW$DJX4CjW(3hM-to`rktI+FH9L_}?TWsm1>3u2c+GrNUuxMu zGU!Np#N{GI%fHHWV5+3B$TzQLc<=`8x%TTp1Uwe^(+@8wBR6X2O2Af%Tkh~oea}iz zFs-^H=DbSELVV=o=q02p<8kE^~!y^Zr&OqTk6WU=%#{Sti8{6G|H zWEWk*-LU_&!FMfXjOiAP!PN=>EZSisWr`f)uLQxJqmY@}m2|9ik#*=*!nQw*{RaF? z=ap-nN5)yUNpH`daOqcc7k>zta zYc#lI@H|Jh37x&DlFV9915ynV9x&djFu^znBR*jTRKoN`;t;S{=f0Nn{yr|y;=zq= z3)DCTSaNiSEy$(}`iuapY~Aw9b#DK-cJ6_QM9=EZC+?}ZA1`KKHw51P>Og{4S(2pS48oEf|NYTdBn~`{^Pt< zIRDxO%|CyiK({Q}^FU7@hN^Q2mCS^lQ1==Jf$G&zD2M&hTe!M{$K+JrecV;^nbLH` zni*Nvn~{@ zL(vIOpUVf04ARy%46`mxE&I2w>&WkZySP_s{J5RI%4M@wXRS$6Pv!Hg>Xr1C4T4`Z z_qdZ{A98ytdT^8_< zciRuEMnBvy6zY}=aEsIiWwgL%xMzH0Biq4$w>7yzZfUaGSkCeLRw(fuBLjaw*--ub z0b4xLQe7OjQmA`NPlY)=M+V6r3&Z8@uZ^rxV-(IWS6zF&yIFQt+vBceE6_oGAKWQ2 zA)92E*M>&i@0*4B4hG8kq^PKMs?6hlU%ws9Z1{mwOq5rD=^6ISi%VR>Zk8yhU{}}W zG~MLx7b>CofzaK^b8Q>hUF;6g#{C^P7dxBk>41~=5mhAc^>5 zqm?{q7e-cPkOYU0XOhJcYbK0Ss2GCW>u*4pZ(_jg9z5&-ULGqpi(lZgY%a`tq0L;^fo<(g>4fBv;@B6 z)(wW=6bl)TP;0j(1|3aVE;+2PVK$@_uAVPiC9M_yT)K&|KE6Gmvje)FeKa}8;`m6y zNeZZaP|N~~AIZr{6F2+-00~m#paejcRrzYEx5bRtM{`t6B?>HsC5j)*j0-k= zUs3vmj~RI&brL8ED}u-s{tUgn(<&Zr-%!$)Uk4sSyMuHj;yYvKGWV}C~ z0Z9Jp0aOnR$9eR72L~(5clnwf5bv|jd6b3*uRyH(^hiA5=n0RdiO7V6g!u*Wh*Qvg z6!`WfY)4sGvS~P!MiqiRZD$p1^us28#P3|r9#w~5Idl8qfdi`7u00Ym`?AkR9fS$g z?7ma(zz(cIR3Mf(*vCX^(J6^6P+8Gw5CI{GWIWXlP*PAe)85-V7~*X6QVFG0boj?< zO{xauW;Btw{(aDV(7!C(LLCXp$O zD!6s-zkdGt7F95i3b+newJ&hwfJfwk)}5P-{R`a1O_v1Q0kuAQWb?_YRgWA&gE$60 z;%p!`rOd?4VARU^xBYg&o}?IFbBUOnX$#`=iaaWQ*OPLaX-B|(USuW3qA4Of0vcJ#x{i^3#_bFKC-~0&FF_&ay}6vM zu!W+~iwq|c*Te*CXtZUIE2n;Y+ti>}C?VNdIoN1(GiKy_iPb`PU8q}yv;1KjJuYQ! z-UFbI))l}aBF~~=rNF0$wMX3C!3l9CAE=7e(`4U?kG#Q=q&r%0bpIFCgBP^nwbL^w zym3Rop-K=;#Wf~hta(IC>34cA#)@il1(FcZhG+bJ@C=yL)ttAnQ60qISe&x}S3uAl zbQo}39>oi82s+3*rsgS`7j^H0+@os~IJlfk4nF&NM=Sk%IXgH>%+u$kLW)6Ow<1(t zTyhEKKChu3E|8JT)ZL~@p|;neH`jXAZr0Q>Lszw{VE3kti#oSd@lc!TFN4b%C1f|N_2aSzWP(@fktgP zrZd}%)-hY-iV*TurmHR?if}TiUE_LL{q&5AA#OeA<{F-+Eych@zip6`z`J&5)RuD* znrON1-Y59M62pCVIk@s)Vv`>9m z-H-(}m$whw+)#AR&&+r*yrClqSeV@2tT7-+8c_Ns)!=<7Q~%~O&jCVd#HTo+iPzpfnuCz z-C=#&V>fC%V}+CpvDIlE9BPjIvyT{G`f1YAT*Dow8KG+7l$JJcKV1>+?bafMjr{If zOxN27oJbfSDvA9SELBzD@I*^<7ow92A5|_QbNI82)rb1N7|#NO4LRwLSM!(q(V||L zyYS*Y8e9o8VH;^!_DfC;)<(l8Er#}f1db+0a!Cs>VGCXH0i~_#cMuu2oR4WQjxKkOPkr>#O(Gq>J5iqXBOb@# z1%h^@Z-lCW05|(N79|zW0WOLBMs@e1*J~fx;l%lXK84W5XNc&KuD~A^rFoiTqG}Nv zd({H^3amvu6!0C>U;3uTMatYfMWqYV*Lua=#X7oN&ly-hMLGx49ysh&Z6&p?COFy5 zb#{1^)ZN;apwB3RRRaaR8)p2C`m*(FZ&?)7b1&SO1EG%I&@t<#;A~Q7;ytzJMBnPR zUW%2~;fGWp7C23O_YRn8jvb>bmU`%72q9-Y8F*Yp~G`i_&G_M8cDSL!Y3F@RYMmLS` z-*0+fbnO^ut^h0;(8DrzYMzZ1HI;aO$L=e-08jQ2qPh= zG=EVQqJsS&T39THWN=*Lb;HpZPA`C5x8# z2};Giu@0W|jBaRIzLA9x_E)%Do##_NtW!&I-{|>?c_8>R8*Q zC2f$A)xqiYee%2sT$dkl63(LWHUqBvp$Epy@unNGX>r$#F;?8NR;e@aDKF2i1Fu(S0Pg#Y~B}ztTW1$pL&nGdf_gMSYAESzGQkYTsod+U# z&_&rGmlMOBMe+(Wz1yF_q}$pI>hH~~rzA8guBm#K+zE+&m0vlTZLwtAYD_QGK9xC9 zoE@nOm#lC_i`iz9V-w^E2KHfh+1lT0kGwr}jv4%qBeZ~rIGva^N#8=$&t1A^JTv|} zoz1V<>^2dd^qjLO!{D*G2G=ZsScZwon5`{JyL0E&k!rSaHJU(7&7(mz8{TA5waHnU zi^qaA*fjg9gPfO&Z++(W@KS>Jn_X7gNqK^F0+9Q1 ziP=b71RrfT;iqbuMh7dS+Qa>X#*)JdqCHQO2!4fCl^Za3XJ#{#nIAz^jc{)_&4ENZ)j#PFFCv3A#<&4(4p$EVPm-f z2m1R{j`)vGNfuv-Sr_>5YshkjvO%Ns08ABYhhPvyg~?@${e?RO;4D;iM8I9{9V$`O}n zSkFuIULS5>Sr=oet`TDnXddMIa==jJVmF6XrbmUK*=>)vE;mn=TVeADHVTx>Pr*do zRHE|<+@bo-&a{IWcEV@itY<;vp^>j@2Y;_YT!$CYu)@86qoAbv{pin!R@A#NP;qk8 zbH;~$Ku8%At8Q{JFo7@~`DDwaIf_2QwQpRGkv1!TwhBf90n2-?LCGI z)S{*aX62D`=`gk-l%UJT=zByZm$H&yEv!d~hb1Ry&(-X z(A#)kVkgdTEcbOu;k~C%3o(2PVL4cXu;+8$1ZwM{GZ?N&u_83g<$D{$OwZZzMsZL7 zp3=K4hr%-bpN3dY&3NWtA}(gCj(s;I&ExR>;Urp#o}$3lWscYPd`k%aZ8Me^{q|*3CIUP;T!UqFjt8_SLL`$3~n4-fI8k1y)10 z8SmeGbPoJp)8Q+ZQh(_88r%MA+FvR30~tR1xK-3ZEyr(T4RW@XhY>NyBYdYrvlXNX zQts17wU`T(?_2hj=Zbi;IBj&kiuGulEeaf`A33!!W5}T4R66{c9zV>HtXbxzdOQdt zbf5-zZ_dk34-?apm4aJxQFui(@E0*NpTvO5uq2c-WO@q9!&l;xLA$NMY_GShS#HaZ zUq!3S85&wi#GB*=3*XL;!H=I^k_(^hmzBr&mT^?e&opG~v}ePd;p+=yTKF~s=Rnz0 zLPokEYV7U3mCwR+?X$BDAK2_%kL!>=?|Z$NPEmO8d$>j3z>^e%=LoOj)O0QOJBUhV zu}bPEglx_C5-qpCHj+-Uobw?nI1;jkdNlgxvGh29d90GJvHR2sA!b-&x`e|Qe7AeB z?eL}Na`eo)ZRj+|ezyS%yUJ;?%Jf0x?>599t64v@(zR4d-4-v_QV{ACt}8 zHrKQC=a*j=PrtfiXM|dmmbtjLSoV#m9jZQz7>a)ps@Q+v==u8;6f0Lr2bNnW=z~bD zkM{kPp)Gbzh}a?})7AQ$<(Do7ADlm3$QjU{qy2*_{UtfXJn=e@;1(bnTbF$Mo7%Icv{eV*Z9^)s3yBB2FPP=W(8du1?9@6 zFGoQRZ_>&dfU~R)aDnn|tUm@~q6fEB;ZHK!YXY;vtf-;#z|UW{-f7JMag={>Q+SD$ z6=TApWp(0cQ1*+z_d`+oh zD-~HVq$5=f1__42RTNy_J`B8{EH)c-x~vdTfAg{a6Y#I1_TlS5Y$w5>X4o8W*<(RG zU+atRLk7ws543tD5)?XamuR2WW2uU8ueR<2)ttK~{1kDmx;FO@!8e3kCVBC znp{t%z~-zy2^te~lZP$OK_I7~+^-?Cwq0QlX?20--rQ{MN)(< zOC%ldr5n~8AJT)U`NQX)tC?-%D~E?m`Kru^`(QA=U11<8#?-EBHpb#jGWfU z#L!Su9aM({cR)zey%sQKY|;y;WQ}W+cU0Uw_L`iXoxO|PU%!BV*gNTq8hX!8grYJac@_zXtYTg$NU&v1VskVQ15_yzsc>Dp-FM3(F+~ zx}I^ZUu#-(}25y)?!Nrobyysn~g(SUfhQATYS2cTM)=7(r*0@2Pp@|1M z2;_>qR^@4qQw#xs8%I+N12~9_UZP7MhBD$^Kuilb!Y(BPkM8U@t>8SX;g+itgplGf4V5o*<>s1^J%8R4<#wamE3Ui z=%KJ|e;aF^%I6i%fjn$hcVBw2N4sv|tnvnsYNpk$50{=Dc|KdAhV!*>pqB~PG@1SQ zk%6^`q%yb+DOOzerYQjg087)Q@jJaBb zR5xWhWVo134`{!=Bb{&CC(gXmRAb(spO}s)qy|CIUFchI?%KxU(^$VG&W_3`T9&Hs zzzEF5PK5`1c6+b)915#^fpI@a$M$-B8V7bc3AlVF`$J=ih9(~!$P`R8tVy^fS}wv(kF!H(Tw26d&)CCnHSe% z4~!Y|eGBd6TfuvG!1P|d;gchBWXFVj@v83f`^{$`Fllh!LL5TLAeF?lb|(y~9-@u{ z3QM+B%&Zv4Cl(8{Z>+zxbjC=GBrrI&RagiUV>1E+v!-;DJ5uT+SrWYd67==+3;wcp zAjh+IvF!yo$)M2m+E00uC55G%^E#uDZxW|wQs@weB8iooMc}Un=Kiv7zFgNoLFJ>O z@iEALfAkrc9o@`9^Rn?#ecnmXf-WM>uCG!H@9)(Y{CSj=WhBXWARqR9^-b|TaP#^s zYGdjw@6bbEm*L*VY9+a2gmi>8_2n~5^3@V7TIU=@VG%t~vg^U&Z3kxg5Z7oY zLdz%d;p!q-p7WiHUoa^3Uf5va(-vDQ&+qvkn-4BVFX+`{XNaoA4(jzR;y@0t`K*3kvvk9OCa%fDvz2uO{2@7Q(s!E}ywF(!k`y6+7QHbF3;qj1C|-Cs zE-ggRDPwI?R@*5)aOI()WT~$ zqAdUU{w!uy>*%|&!2D8c(T0i})p+&;kL2U)-{dq_k-@g9b?Sn+JkN>ImK<@Veyi|K zG1qJliTR~yhQ$<)7Wg_)O#TU2aYlK~ETxCv_aIin8JZkYif2bXjVotZ{1mE|)9C$E z(-~lrX%|VE7$v2QOsznzmDsd5lC#lgIhBpc;7RJx?Xe}wFI*oMBKm*KSnXJ1)umV= zMPoye3V~w8*=o9!*_g!cbrHFO6@aPHLF!|d(FRn%4+a6}=!muE$LMr#kB`0dd4EFp zhsW>Q!Tk^w)>nJQ>|}cFzuo4|w+**cF17lwP&^<+gO5H-@U2xk6=cw(2lrYkud@5j z<}5uE(^I{HlPv5O?h*%lmz@BT;8vso;+qvj^)DW=mr#%+#^OPZ$Y4 zfGB5}4&y@`W+r#Gd6)AMSRv-fmzvKT)gR%$Qy*-uLaZy}l&s_3dqCKYDPEX8$O;H< zNulv!8#EAL8+nCi+butMTp?uvaAz)F^!+3ht7BbbxIUCrOzi(;S0%Rx|K@fZ=85xy z1W#qXaY`jYiRH&O>V_MpoI@%{(2lGxykOuh*41;KUNwYkxT}OIfqj$8f&pR|QBblR zx@3qJ5989MlSix%J#(}Y8k_3|5ijiW!7*;sxD-h>+RSmk&ogT<9En0jZZ#ZptR~8dj!-leV4q~@?Z^%+ls{O5(s+MEpo_~X(nkr zRET}yAu&shtq$xddbS?}`!iCAOD>yByZq0<;<ToZMIog^O zeKBr>ND(0~r~Wf`t#ONnO;INk{#?OW*P1rvuB;l~sn^XHr{mH;dHS<_My!C@`)2K2 z56AnKYIOCHC$#FV)%JrR;*BO5h!%(6n%Fze$%??W78xU_bY-%aCSdM$Z&=l?5XnA3 z1g)QO@5!gwSQf)tsy;p+LLFxd!o!|xe)X?3I1ZL}3NwuVJWtIrRRKe%u|733<2HFw|4kV#X2?7_wqEl9=wXmTPJuu==@L!JL!`lQ_wP7J_-%)qNE% zlwGv__GYpuiEx){P%BfPDFSvJBm}Zy_P6}UD;&xfyrfTUl*z_R&qYjkwjA-(4_;J9 zUv^x^vLo&W<xlbFk5hBZCk%q7Zdp)-S zZ(%}BMf1{USAWU1IAI%%jYZGN1Z-7h+D!En{b9d7OOtwzq~s%1z!i!QfXFQH(+{<3 z!HFMo8ml~kG5yeKjA8A*B~3jeJr-wLugF#@DO7kuV;hX1cse3um zqAnXk2BwTkCG%;ozPImhDs*jenO!$d7{!-Oqn4aJ7I_fJ=HLNgmyy7xG-(S3FK4gg zmr$ss(Qdc0S1wA6-s|J%p7p0lxlZOu^(;6va|EX^=FxFK-*B>IagM$J9eWm$Ta)7z1v&`64{QG06v-8%kBRZU!-Fa=?zoDA=b7~(E z_|B|=h~sm)Jea@GFydu{2%j_Pv0P~8Mq{o*wm&Sz?CmZCbyjROtN`KMpo6U^f)E%Z zIgg=gtkLH>>WoSo^w%%KZ*WRE_KT6l!wn-`mPdqOCE zJ@m#kV3Dr$$%i{}4b&6pl8prvz23h(P-f`(8Lir#XxFw3D_L7vZhOds9ZJ|&-SyEu zWh%T^^vtxosf?a;!xdhmnpN+0vKfsck)hVM5hZ#w2=~%ga0hj$OOD8{ zXUGY=cb-NBeCu<5ri<+10WIc243H!@qw9KNZx9~n#w?TzC3U$Zr_C#2f#gPAbw}Ib zkeb_{_`F<}B`v^}NQXXMAsG_cB(=*g9WF5YFcq3j@^m%ToBKY``{#jYxR>g#xbLA6 zI4-%x6>#f8U527X%W`*6>}=mTZSuDK4F-BVORi0Jr(ZHX_ie6@lSUl&TC1bj%^b&X zjuQ0xfFQoAn;E#{g%dT;UjB?G9a%Da(zTI7NTU5}xIEXSkuYt)A<@LY;!$YQ!ah4K z3(ojZ&$Nr!EKj~O@4LHEx$M{^N-fzNoMQcErBb3+77ux5dTK#kGF=<*|(NJ~2F zE+1+&+D16nTsh#Z`^izTTOo`~CqNWziTLU?#JztcFf6LW=>G9aEV~MK;z9AD+ zbmgJk#Zt$88w#WK^EfdR=7f3Bfu8 zFCdpnsy~+s<5~H`i2%HrZ$vJM45O~kE@Ej;xa54f`!-$TzObyzm?U?-v;R|^Fa_a0 z94#o#zYRxg3f(CQ@%9M2a#$83Co115br06?{C#yGqCzy8a*orhS7#LfJ3{S3dUo*rJkJrUFvO?Tz?1gE(AiiR%8t zK&z(80sEExOo!D`5Ur-DF@Z3=&l67I{M{o|*X>|j@!#>`+B@jH{z+d$&9h7K#1YWyNAV(2tYg2|lUTVMyO@LUC<-$P|jcL@FFkTaJfu8WsD?|4aKJ^IbFi=p6oChJ^hf(cslP|I2-Y$K- z3tgn74;%!kVeelS%4va2!UszgD2^!U$o{AI%}X&r;~$m+D7`%?b)*rLNIey5Y=yW4 zkGVlCZ4^e)t_-0v*vEk&6*biBlZitEP$PP~VdN=@vd&NIFm2~|)l!N_Cr_&6XI^iP=q6)Fwzu6ks`&}U&*i+j-RCE0DX7Q^1CA(N?c&>It#en%l|D{QjF5Nsf)tx!9FuL=4q~wRvLL8} zz?@{LhrV;z@RmHq3ZD$p&n%^aURqk3w^$aNUPU%L{uu2)Bf$b&$t?C_?LKCu)DlkB>NgCy4r5;|-p(Y*^gzSbT+^UBs_xsI4}Qc()jsE&Wg%8ZCGDDOh+_pHM2O4QKE7Y?zS+WL zGdpv!+qD9_T_U1CS2~ynm`HLas{q18b9%R|_RNL%3!a&d5k6~E^8GDCcagk6QW?7f zlGBTiw?V`{yLhsa;>X-(jk>4y8`^6v^p{xolBSF^0z&F(z{T2@jNpo&?Ueud3&@^= zs!mtuOo$^CH1ccgLUDi|;d&tp<7}^I*ZfY{N_)n#Lk2g0wO8Efy2s4Inb{R7x5N%X z`XGeI=QqM5Dagua@B0Qy9jMaG11 zlMgLj*i!PTWP_pa<4Fjn;YSXD6=0UzQwE4;{m+=i!mwvRJXYcShQt2*$Sdym{SP5l zu89jvrz25T;Yj;t_x31yN+o-EMDzmd1 zzKnLV==ihW!}fMJ$P5V|-t-AIaH!r5kM;5Q;rKb#J^0CQ6)j8hHZeyD5`1T~#75#6 zc<*?^iFFgz*p}>uK%9ZN_|imM+Bi}1G{~@DmxdT&zpTk805H6eVAIuKe)|v2g3gk+ zIL`1F|DT+n{QJ`Pt#OwRx7xP4GVud2_NYriP2Wv^8B78)Z(x%l4UzeSQgMaATFc0} z4JWn)76=dy{VW=QY|8&Akb|ng5>7V?Zt=tP>#YHPYVplq{xR82KX3lB`2a#-7g-H) z{giWy6?*Jad*LghaJuh>NZB2 zZ6Y`!@4y8BKUkhp`h~_@ko==iPLzWg-jja+4f2=4EV8F==Fd#usN~M$>+5TD4;iT8 zR}Pdo!9)E+ zNw)dR3h-x8*^pb1hw*o3X( zSU|Q=PtmdkFjY0vW;B1E3rR*C4M#FEm{HH@Po3N)z4jBDxtl;ZQO+IR+u)<1bmb=J zKYOYxj>A@45|_t$J@FG*LPqjJ3~yug42wHgmc+z6h7yv^SOG}f>Sr{+`pZqd0IiG> z6>BKB@%W>!Oul!n;A6Ue?eKGN4?%$d>7QC0x@8450`6ZaFKdS+NNOC&S#Co|EZ;1a z!w?zOz{2=()j2cyrA4D2X|4%$aDK7TVik3$24k3W?nv=y!zd^(62G_j%o2a+f8V%= zWHzrmE52gZS~oR{b{I6U>w3PYnLKx6Y?1I0^kCL#g+osDb{Nc6F5f7{=d6@VUjTwC#`Tkb!7_G@d4UzoW-3>0G?pKeaJqz)TADr{p|U!1WR&^g(kKHxaqhXx zz2>^k0^EE9{vQ8YRyM_GIgJJAv9bUc2!! zyEW;L1Ktv1T78(bJs=*HX1{v#7bYH#!^*t1IJR~Y6y^Z8Gq#=aC6t40&LB<dLX*ZW}F zrq_=1<{It!;QWJOj-{dkO8>>afih=qBnT#?>o-QL*2sg6qr6ws54v$Yfm`pHLund+1ZCT&t*oZ$gK&%jgkM-~ zyXHC(4>rj&)1?9s^Jxy$mq;oLHeZGkZ|j#kd;otr67mWOuDM2>x1|I=NRJt1?2lX!R zUI(ta+C}IFzbTi%G0Fm;f19odg^#O`Tl}BvA+VJ3V`>qDK~)t|L<|;Up~L141jkJlYyv%qp8LK zc+q_X;q8p_D~%580pNXXgT^2}`|o!R;v~UcbEUH{%=O#(vm`u8#cN$BXM16=XTDXn z_zYB^*s8M?W<}1nDm3>>%C$WGebO0obD70GJH%DCAU-GyX1ur5eH|j>t{f^TaA660 zwG2d4PU!a(JR6(gPR*?#`t7fNcBM=+#taYE%H&%O;h$f`g9Bf!9Dj6 zYGvPKl5je&oKa2aNz#9#U&Q@6GW1%;uww2-ZdR0r(nBa7HU}RB8=42RZB`QGojLtD z^q#pP6Gh_zq>=*XCtGg9-C`x zN>jYa&v~K$lEP2DrQy0_y0&ZL$Q%C1yBtNi9f{<#U*x%2aBElBSF^J#O|S%{19CKG z)xcw|#SVwVJ!>*!K8l^`0Wri4GraMD)lAiCdS>SR3zOwto?sik6mVz@MNrhF1NT%5 ze{YA7n>^*>$)Fj#_cQUHwZpTm)JmPtCcD=Ro*4!p*tl6lSQ|t<8W&cpoX=6LU@DGv zc+S2Pw)v7}(GvkKw^w+zWV1ePC0pqwA!^O9WbzOk!h5vS3CK;*bjj(hTx7=P51S8% zwaN19_~}np!|?9d-Q1U2K){l6_hF^Yo${ZD^Y_S!b{@Hlg&o5MMs1Pgh$lGH(w6K# ziOX})*t@LL7xY*OG-QGEQ$gJHO*I3*0?zZ$R|@Jv3MW`$dke{N--2IFX~O|>H*j$!nYod zpI1@k<)Fi?-n-;Bi>8W|TsvFTKP0AFxWe6pE*D=5uY;(kO)UdFf=85!RD*tRC$2+| zG$fMkGPVHqegBS;-+UH9@Dmfu$|bd(oUB3aKs)wE^}N`m4fFwi2H3gu_Ri6e>gH+X>Dokj#;BwCZ6hwc2ATh{B;g&7Vr|0cBLk9kw(16ckS*?a{^WO{XqSGb z(Y(%j4(&9SEWu|6^rAcGDG2aeCSKe-aBSj(gb%9xPs`b6^ri( zEc$c@!mP2g4ErQPu~3o}Xs*>R&W;kl+b5LiX58$KsxBU5sU}tk1)LTdQ_mJ?A7Z0e zacsIA4q3t`O50;23U~P&4H_?&MLi+(1Rho zs5Kz`dS1t;n{tj`V$MimjS$TkrMiE~Pf;wtp;zb}xKf~12tWS31s@|aZ#=JLZH!;a zylkbJtzRK)yuSRv!#Gt3>K1LHhcy5&n#E*#B!pzrf2!!hiEq|Ys#^(W*fJ2y_Zn(0 zMht!#?Msr9Nj^idVioJkmF2ZsuzZIYKiXYra^F%v>}H-jR?u#xXdl0Cxwl8cTOBBO zZO#1{%zfc*dN5?}D)QW3N$+c#l&kiD6aOI3(l$X%x_A!p3sH=RGy|4&;pirFE@-Xi zCJKWt10i`Spf0jWsDB=OjDR)9lWda_`tQu+4etXWIxfW`9;T;~}!9X5DdL_&t@^GRA4o(0V8@LI1xe@qyjYcvDiq>ZGuhUM%zg)K>;0nw8Be(1 zZ4%&H(jb8zB%%IxyGdXM=u?tC{w;7jNO0CGlwIJ$UP{T{LZ<@-io)(jqUq|F{#<~eKbf(yz!t{`+29>i8*5Gc^ z29Z8~C-Qof)nA^M?Ok^fpB={9)jq2*`Z}y{DFOOWW{NXI-ODZQp9=I%_Un64&J5xw z>zk)~gnJ5}2u*RX0g9#yhYQ znut}U47dbSlCe-|PP~O%Q>-M_hGF?;ypH2AL!Zobn$HJ0CknXrflCz<}h0pjgrI5#P<=3X&yD=0Fdm_6#0(o3dc?;Ov$xnR_PP>ev z2Ub6FqX(R5`h^NVG};7+TXp%N~Kr^%Q% z$7>{`=jP_rJJNQK7mzKv)Hnjv!kJ@#dzqC~wV!X!>51N5y;8ZxIzix8d@0<&O`}4K=KMz$Txd=x5rL?m9M(>{#>c3Od#l-(ds%7LST- z>ns1xsT7nJU=vkV6KaDv#pf@Rwa|V;3CUQ}EhjxhYg}j9$di+=GaJEWZl5EugKE_2 z;hou}6+Jydo&WNfK~MFkhea73j1zO9!%L}og!~pqZ`Z~ub%3f>y`BXS>-hK&TaqJz zqneXNt8(whRcTDUV2yh37w>giWi+ER`n%Jq3M-_rTR3w-)_GRkXr z-*v{cKel54&U^3<4KU~ArS{BO>E`ibfeWG5(G8{H<}bV9>4#~z08G1kP60Kkx63M% zhcfvHhrv-Aim)FKgm|4 zS)Y(VGFphUKZXxBti>I$rrasy<~$YDWMK{`em;<0y%(Qc1u$3T?A)K*RQ11Q_k!s- zC1mMa7(g(vV#f?jJ#Unx=a0`H(ZAdV)spG1+6a}cs{>J zzPYxYiGGE10yKcZ9MMkRVT%oep8@4ubeb|%@tY=?Ph`a{JC8yai~ANmmvw}A>iz4Z zi_X(sTKQAZvg6|CNP+r;`FCrcnKxM#nHw6!wt0m7g63Zq{ht+{AXoq9I2rV9=Ov#L zzn=iz6xQu{Dm7@5v16nrA)*7p;ACIOvY41Ws zg%Ogg%(QJU=(F=m0kVPrR8IgP&=HCFI~1_voMeBnSU1++t_9}*lReW4I&5d26>>#Z zB?8T%HKhd;kZ5rK{+Pj01+(0sq-)yFAmF8=Hw)U%Re#CG&h9Dth&a}9JMsk(D~m+gT9+)}q^J7$3QObgU2qQ;Qf zH*Kcg7mIMZb$_Ol{YBNm8jf7+BanLhEu;tSr!Wj^+*8U0)$8rZEpy5Pm{ViVfsrOx z?lxQ$Zy^^VAV^$m6*B%@meI0x2B-cg=`6mV5F24l zj^tSH63Ov72XMqu;@$v_<=TVborvnnhh=T?A(pf-^OBJls_l7*e8S!pb;8XZqW7B% zJPbk+w#Bb$q2f75uCwHD>4B=MEsUL40SKeC_TQm_Mk^OcPZYh_wUpDBd{pdJX{P3O zl?ne0l(bsA%FxzSTY~kI8O`H>I@?200YL|A5LuD0clAU-m}!$!yy0hnK}K(+75yCY z5-TV8{Qp6xsqh0%lWwtm3_(vbXlsN*$EHJ1o&7So6j&cq{D}Vcg@Y>!$UZ?RZg`VI zJ3Jgz0(kqXGXRJFtqXi^#%aO~5rC~#`V4@Rc|3uV8Q`9U|DHeSI@y+M#0YV6dyXc# zEGCrjI^Qm;CIq6D(2HZsclzU( zKRWM|84)zG5^t=6*GeG$b7MCWJY8M63-fky6@K%n1WC~|#oy^GHiqz#y{vasPjbQ=L`ku;UoG0;I!cs@hj zE2`90G;>jV#6RM=vyvw#H%pG}WRZRm128Um=WLZL-({9oD`KfX)x}_3EGAcb%HpLh zT3)#ff3KU-p!@VK0JPM|vED70L@uAWR@C?n0!bAYIfOV_#(@o>F>5_|e^|)b*O<`B z9>!&O%-x@nrp+)0G1z)LBD)^PKf5SAF)e4e9@lTTN+H%-s$>j15oCM!_QWn0W1=)5 z30I6dgoomO-wTj{vhS&^(;7fBf8hg^ZqadE)eNv*ELe&#LwGG4+I@Q1GEjV_VnA#E zB*XsoZz$f4gp%1#o~KpGr+eNkZs;98STl0HTM{Zuo4?4B1OmZd3x_qvel$Um+N1g?OtKGb6Y}1^VmI z6-sWCzn_{7=sLI#&=4)na*hxU=&M5BX8l;|ez10Tm;ln`{V}yO3zn1;3l~^jJe=m)KIjX+k6+o#!H*W_9o0(55s?rLwSkzD856 z`VEh!;L+wRjeJX05b0Z4iPzRH;TN3zB1$$h{o4qj+->!(>w0+9VO@#9bj z`ELNShhqfhq?kZf)L#-hF9iektl5Me_=tg-c`3M#w75*PG~&Zv;GXprPTn|sc`Q?T z;5#SQY`VLXW>cE4wSY!qZf=#Ih@sy!NYo;zQ#MV=E+G~1hx$oE> zqvgOTv+BUo;cqVEi%5Jmejj#PLa^?C8+dRH_+t2gEoxn6-K*D6bwipE3TrdG&(iE3 zB8>Mwoq_(|Z#i^tnD9k@I^ZN9%+F9mLd-@bTO(Ejr?=cVg8z&)xH+`aXdA`gtmw(< zWk?$YTSn}elO+wkaIvqJwQHG`GWx2i-=S@$zYknrC0(lr(LI}-;$sA|!aT#%8RUb( zn;c4h2)gG}N;zl9oo#g6L>(c4HlE1seCwHEu9G_+{tY8u^d$>)#>_n10Pz>Mw$}fg zGwnV(0yXDT)o$s75IMKzI~w3W6^jHy3Sxg5hTNlJZ`+ZlP-9D*9w?FBGdbSyRkDwT zGp2>?tvA5Qaa4Ev#a+W<6@U#Vs)pJA8#`qEeU-fNJ>nMLvkh~<$lY%~AAq6A6Kx=T z0P@m1QLs&ApM5puC)rDLqkJp zS!%o4XH<+oTDxAEfvU$r#akwDq93BMrst%6j@_7y1YZxZ9j^B5&@pNesY@)n(gb8Zm&JQO2zAZcmnk^G}TB;!q^tsbRZaK z)4+kkcRw|0pFEJRlv|_GQ=lv-O?r8?z}l6w8K)8@iw<+KXel~~CbtbX6#(G)TP`4u z`tEV`?8>Ord@WPyY^0+vq-d*MQiXx1CTI&`4*%Wmyn^i6Y=;_nc;K=Xa)ngOf|nG^ z&m9&Z?=|%-G6uR^;>c`!{4tSUqUV{iyPsTuv6QZxAR$5&4l4blRnfO9u3Zr3Lk=4x4l>+M)JYp2M&{YC*%j6w~SC zhZ4r|9IM9Er0ej-Xn?|1u$0 z{#z#G(svWGC&j`VXPJ;aJe|T$7`7S{s|9oUaKtAlU_#2fm!<7)6WKjaQZ!vuaUi7| z?*z&D9Nm$neD?jpwINv^e@72sL!LltVS2X=td=J3A!NcDlLxC%^&5ptPbmEl6LRyP zCZv6740%kTy!?yJf7^tlQjl4KR@`$@c|V(w2Hf4U%LHIW0wkEVR;1^9=h5Qov*Ht4 z#+oOS{qwb`&bJI%#`Gj=l2kN&8_`0#7f!*%b;cQ70il7xPWQvY=jZB9y ziS^5yVQGzXkCBEO7KVk(2nq`U^UL$BFBkpB1w0dRg9@)&kE8G#WbqHU^inu$DBO#M z?xy(9wL~4Q+Q81tR{q*VJE0=#Lf&M$j<)N{ROfNLVQujNoT3Smv#10lnuD;je;Jaf z|1%`l*kk`^NUqUQ|6oXBDx)N)p!9B65h?WF2P4zJMVFg={{0{HMExn$Tg+4@Yi=IFcR_=Ypk`vPhl2~irgXW9rkLO`K zO_Sd{^g%50{SwKMcIn5jvq6zW>s`{`HJkMFk3e7@cv~Oe2*5(Me;bQmd$*P#(1*5& z%G$B^;`hfRxi@8OuTY#J`k?x{Ys+yx&ksPM%bZB(z@UMX51kbjPvQ++&Q13eU8$}t zS9jx0#Z4HCkcy@2U%fhSovB+sU;EWKXhWxsffz3jtBo5Ei=bqWw2UKPZTajhd<$v5 zZ}34VUYGUhG@K5!S>FOY&5?N>QWWmu!{b_1n?mIQKD*At=0(PPd1{S1lv0s#zP@vLXz;3L{;5=1E9xL_?5wzEd63($-0tJW&xOG-v#F`J ziC6VnU+4Qp7K`qgWTuXZIwas^Wp^t&Sf3I+@d)L)c*oI}kYOHRVDxvTEUOEn=XwIg zZs+OZ7UkOfG@aC9NNjQNNjW_8lpw#KNp9*6NE4<}T|~}y&s$Rl>$r#sJ`+Wsx~K)S z@A4Kt=ny~h?DGA3&nGJl5GjzSCZ_!! zm*B9z4a2}LO~r`Xy5Dr8EDS7_dX9E@gyc7qnro?&8uda7Id3GL4YlxYsZ?^&$ynJMz-tN%yUUGZ&}R)6s$Ph=B-kc> z=31_>Lg7QyOPPFZcyLm;TD4;OhR-4ubQ)&iv6fY$JXZkZSA*;zW%Vrn`rv(-1;n&H zhX&uFh6V?SI)qL^tS2&7>zOuFg)s_%vn=T+lv*CS6JUzXU*>lZURdhQ8y;sR;sQJ6 zv6<>1!u9W;P91ETGwHq0bhh?V6)A9vDKLZOHQT#6a}4(p75Rg97%9mgj4C(R@mY$P zoSt6U+JP@-2OaU54Ri?P zx+*WoqJ!Dn8I&i)jP@kI;kkkDiIo<9K$&l8%Tn!hnQu5EYAT>KCu<9UqU4>g{x?<* zEb5!H6sfQAWb$NZrl?lfDa(CZ$FXK;QH$$xQ`oWLYl1$gjbDC#Mo znzH-7=kMvi=8U(9=;d1H*t?|Q1d{Gfrs;S%VPCyE>SnFSU422dMMgO69*_}DIjk_1 zL9;dJm97D>n^$50Nv!x@4V|jjU?_!XaK6)L^`<{QTq_1y+%)Ik8DYWAHIn=s1P2q} zIq(g}utX^OJHC=c7Z-0BsQM(oJeY{vbIu>!*F>)`?&tfi-3)sOg@@TZl4hLz5G1D_ zfWOnPGxtK;Ljtx_mKXN){l!p|E&f^=tKaW7^{=wb{ySBP?rDABbLd%}E&m0xwYPn% zY2_d8ApU7GSYIEn4mCZ9G&}LrF3<&g{4x?%&A{5@aa>j+Q+zT>izRdZAM|a) zb(06~JD(}c=ojfrD8ags8Qeg_9J2cMe%L{+(!cjzh`|PDr8rkF{rG^ifl9+Y<;+cr zL>+AXJ3I#lbBDG>^nuw(S7zvDBOgyn`Y1BT?I%Sb`NtKD+}H zP1?z}Wt4E^{8QZXU%$76Y&nO-=B=n{!U7z$rCpW-WKq^=6#|oSzw<&3A7<%CCeZxj zu`2-1?ZO@~gP8W$HRt_ZV!i@!ZVF}9TI@eg;fEYWKy3w<6Ym$-h)vjSl7Zt!DR7Ng zCk~4Qf8X4+Q-zM%gt+_*^rN~>DzF>xc^eBhZ@H_8PQDz8NP(6(mRtZ^qB1B0G~RSc z8$KA|$LoP?l@rKT8E-wg&KS8Jst9Ayd#fCxk$w{(L~F5tLR+)T=7Lu_V?m*(kd5Bi zk^a{a=@DS^)ZN#{rDwYv!{ zD=#teoGqyq3ZQ9B(Gh5%&4k^FSrMs$qgb7ClQxvCbn;>$6^+zo<9^HC#Aj z`xbIdK5dOnRiK2-4eahd0ZK!cKMe$sfg%ynOrht8oCPm&>WGzH1ZIflve<1v|Z?$Qf1twLDGEL>Y%O#|dU64Gb_UpDQ^J_yxqo`rwS>;AbD zmy1B|^gskrM^osc_-{{HQbcD$!om^?)XLz|oYhy_a}z>`P}*D!HzR{3y})j)W0Yeu1F}T_odVSfAc+9;l1sGKMQ40 zYak5h2wm9;GsoBb6nP!uWOL&vIP5fn5&ZpR@I|`9_Y61^@%!gAKhzBue z2yISt)ft^}Nb_s~Hg-YPr87RWeVgqMm%|kY?aJkF@PhZ-Srg#oSn(U8(~A<&dNFXS z<{<0FEjHXb1p713Bm^o=WX|`SNbPyTmX?)pw$B@|M@E}>lf*k)E_3>eS1i7)co7Zi+<63}p zfIKjE+dj6yToW2op7R4FuP5_v_s?rZ?%e_M$kfLBLFIj67axiVA7oeIDxA{0U!&-N z?z%nms?F?+IjW>vB}K%d$HMQWageGgNivXa@A4K$eN#14CeZ$q^L~boHfVtSu28ka zdo&y1FY)pRv;$0piN$b@`3Q1sj09iKXnVeSO_#5-h;vQ+U{k(UmW^>JTdcg*>Ca2aqS%(~OIf$N z<-}$y%cm0WE~>s9!H>l!Cuj99TZ3wZ%n(tyPqexRac#go^;;H3F0XFqUmv_TF+wy= zdRtENC0u%#DcmKAUAh3f|WJPMWJ?CnLbXdi|M>uH?G|RGJBu z5i}9~L}-W)5c?*nE>(&+kWJTc`-U z`8P=;G3gpvQ-e|I8&!$Im7cN}B14XX%5w#cG5f1@ZcNVrtdKz9(CxkloobXsJdMnD zDx#MLCFAral%4xm%A96L1sU;Pb1>i!zVW%zk?>G z{F|6j^3Doj-4&yrnqa?x$S}t$>m!I*MjLndx>v6pg}_qR>o!I{X4Kr;%X-o?XPCMj zbcX1Np3)1QeYAVbWh`vyE|xBBc~ybnX54e{mzFwllz32;&x z-G91z^6(G)zaxLol@GxW<$0dGgf%LOL`?dJN3>8A;u3y4d*V-8QGT*BX%P|+K*K*fcdO{jejg=T%rDD z>lUlemZN}J{zajEsuv|z2rVasyYX9;>xVk2;kIaCcCAE^NDq>D%Y-&c`f-da+zODM zQqbpIb~AXwhC_XR2qao3y92vBp#SvyDKPo^L*@V{ie9@J$P~|GTFbNE@!~hI(+u~Ic@&-V5cLFbMbP1_Y{w0TwIz5*R?r5Sv@CE)3pqq3HoKV(WnFpxwlrV7v zIQJjD5C@Lb$QXBuMd<3jr$$uQ_+h=}_gDn3;2|BjhYbu?paaCX&`$%VAG|kh116Lr z8aHCv(Zoa=(oy#fjMhuZjxV(H5aH&UbDJKW$(ABF$+v`Ox{z7Pe$3;Q{?zHL)9Gfl zHR2Ba+9!SU9axkTYEt5DmNGuikLog-^jL(Dvr}#2wMP0Q($y7WbPhBbvl~js&@JvR))ROQjD&<60R$OEWo+ux!*zGBJUJfm4H7i$06 zi%E)@B2Z^;8@RWNp+2?WBl+--hq6)og7A3xm&U$~!W=AHxpEB6N5k#9_o*wA?@*kk zolXA}S8df(I}7HP>7STdb4D~S_|psVg$4|?Q5l+G{BrI+~A?`T3H zwki?BnHMea>ar+n|NQ0Gd4rSpOxki6Z{P!$X9=Wt8#b!3VTXEP zz8Jn+R$d7A_5HyjyV^6_0*Kg(9O+YVo%-Vns9OPbPF}XEa7vpw0{W#f{VLyu_t~Z0 zf+R?InB=BpobaQXSpNE9P*+|yVJ$j>WLJ!~YC+C%!< z9_Lp&H9C@yYTO3+WVgafU_lt#S^m9By%~-K>C(O`?b5dn+`bD_{HF6?J5_KSy4iD3 zfdrP|T9Ldl+I?u^{)dr!`)j9KTy4o$$rNDk6E)5V>Dx6x{IgRVvNKXJM!o_EIO#Yqs$#?JS>D4)VkI<#0=ysI6Iy!EKB7@S|T&c|hYQQUc& z&S;ceh@1lajGj_jIi-wzN`zPSOy!cX>-ufYkvyWJzs9RF721YtWhHIjk`bBB}jM)UTf zC~r?kzgPOh3!Ok8=#2^N%GCRJ7hs0-eqUsP7sa<@@gViy`TB5w&u^4k#tPO?Jojbj zIBYwB$il{Troly)E#oTy=#VH15Eh=%t-Ifr1#}^yV7)$cUH&>fcN*- z^w$ZIiSF0+nMLlR7NbU;Wdn2U;c|t;E4rw5)Dhym#=Ymoof?uMN7Nrqj`^r)@i%Lz z>d~buBPe}hrhfoth4bz7Zef^I!nvwWC-Mcn3*&CwBsxDDY3igjSe&P?QR3Nc^35yJ z#iH_)i)VY{!QkP&K}O&1^Gz5ca(7jRhq_nNV!Op_q;3qnwVwY-Nn8|1 zeGcOG&$kl_G>(y$+09vCW*i#laW1h&rjL@%`8DQ?=^{cI9# z+fxByG~+96;D8);8Vp=)!BhgTvF}8;bjk???ppn%)VHO%Gt{0C^5@c*`f?fn?)K4a zz~Hqt97T_(Xz1gPb4yJ;JXl~B8^8_n_b*Rwp~JP?93eQRu=ky?iWFqrkxepU2!adt zDMms%#*I8CH4}FT^+Cg3lFN1Gc{YmlER6IFbp8Z@c$rQU{>cOC;_S&FqXVxi-TgvS ze3@|T1Hg#IB0FsZ z>fjWrr%j&ZuXIQ4GR~FMh^n2|f#^HntvyK*XKEV#|Ce1jm7u_E^z zX#<@FdYgj?M72~GoH%-)AOZfkVNJ2Po3>++g|PIScgb9}SZM`at-V*Ul32@RGV#p` z;WB0Ci++sdOsd$OxRtM6xh&;nx$00CIKIG%deDZ#k2bAXCYnAd+Z9wDATS=y7RDoa zIFLh}FXHyhawf5(vbH?8eC$9%0QYnMX!piO)LfPh$!FJw1r!y`U7oDv4mcsaB5%96 z_(J5?ZR(6ZrWfEe963tYFS7>b!TO|Qe;VjKv`WkS3|prn>}}nBc|3K6D*K%UhAa9S zQ;&hHNVyU{^>zvD)}{SIa8uI$65{EGh19IQ!4nre-05iK-Y0-$g`BE%;dK2R@VO-E zHl40%WZV~^_ZU~ye_2anf9h9f@Qp*Rvl<-Pf|h-W69YECtWxuHxJ|_d-1ORQnU~&^@2+bVJy`b7TzcE}G2wMcN(Dn$%NIO8l6dPQa27_lr z27(6;xNPL2lP>c@ZiQ6huW$R{PY{LRU!<*M>FKeG%|MSC3_cNqdsTnWB(z8Ehhifi za*V$IOfTMDAWTua%$*3=)U+!|6<}gGzj~8RyWx-B5#_IYykX%$_Dp*G9Vct;9(jIM zRZ%?^t4OW!xz~Jz>n@tFe^0qI15d)Ox%+A8yd0Mf!2I|M8%Qmx7g?O8xP9^?2h@(U zTX@*!Cg0r~T$^}DY9}2UU;+2EKsj*2VapN4NEIN4Jw_MuEEbbQuH(2!r5;-o2vQ~L zn|sA}+RFY+H8c(dQv|?fwXf@5f_rA_UI7Dm3rW^LZ}OUTwnGp}m%4Am<~;o39dr!Z zO2yBv^0pqcA@joqu z!t}JA7<%g%kA2LLM2}Co(M@Vg7J0q9wXog6?!HRISkPw0}L>EG`f*IUU z&hiy}s?vSFI#A40EL3zvJ55&rjO_4qxkWGIrw4A7Y!k4~w2~|+tMNQ|fDT1&+Mx>M z+WW;6y`Og$X|(+YFx^%@nrUe6PaorTJ6;1F_-4OQWiV#TI%4>-e{+fvhn<#ikni}h zdYrRa{*3d7_&(GZvpKwkhoqk(#@RqrVk-MSYE+%uw|I*!w=mk>^r?HBQt@E{n+Q6hB9=7tGo!E%R9#B1) z)S4X79_As!W~_xf{vgKLN;5RH)HqE}h|S%UJSp|10q=#beE?<|JV}R4C=u$!i#GQ{ zb^I80w%yz+$MFb)@Pt0zbTu#oBHi=g0q=@KkW{pAXb00Uy0|RhKUnn&DvkrfupWMp zNu%N)-=R->J8oE@=FLP%q|Gdom(G~!o{t=`aEtdszbPA|2QHNaET{Yq=>a%FJ_L8P z>uH$vq8w9UtXOh@x%6w65e&iZEz~KCuetW8d9yI*S)V5gKU7?b{w2a)?9&bsF!4a>?iFt zDs;VvhlhJdM}}|N=RC;aEP`yH&3FH~gnPjdQoGG5w_vMEN z8zNkhd%ud#BE1=)BC#+106XGy$|6(;s8brA#a^r%PO&KV7L#^K8)W+>%h=H5KK_~Jnqp2_*v4f z)OkZ|e`t9EzV^>MvRHyWC+V2&uUA>MnJ-uffpau797V0zJo;2*b^MHm>F-$P6+_8| zg}gN(B$?|z+cqqytvCN;l~R!0P~7PHl@J&i@$H9X1F-2tR<=Hk7h$7*S%_r~7JjC< z!->wi|C~piYfS$6c)Yt<&Vj~LO*7ETePX>*3fimRgi<K$t}AEyHDkk<1y78 zH+OzeT=+;Z?I~1!Y2i9|qo%Dm$FFum{8!gI&A@rhAe}8GzuY43p8L|4UnOQwUnf&g zqGsaD&@4J-=Ky#SDq5?`;fGbLUOvc>vbom>GZi|hrNMqhSpLoS8{gU`%ZDG-xubI9 zA5aU*zHHDU*E|U!uViRJ0SWHnFN_*J#ZnQt_#B16m9O_eW1T99Tqy7CPE;4oC1bh^ z9_pk@Yap@bTj+qI{^FeqSYcw3TMEwl|2e+H`W6yns8q0QtXOCjg{r90X#aM9L2Jf} z7|tVq(1BpUiIa11ODiiIk-{!D_KoHk9Xpkj*oWkkFujYp#)KX<;B>4vH%n2 z)18gz=pIBJ8{nAe^|1WC{kK62nKC*+o}Me0lqHEj?JpI78sqn*eF3zMhhDL^|ZrPge^y)bAfK%cu#{qJSu z_o&zmvoOB2@J;z<(p=ybCeD2m9+#Pq2<3@wnMw2Y!HVT50|Sr6QjR7MXs-AVAsmbX zZ0rBD6Anik_i~dA0K>-c^S#tNOZw*kj?QD??OJ4GabbNbEH<7+c0r*VR zLp#HM7AzYW@{iTEZ&+}h5DGYwy6DlAV#FNFmZ3bF-QAxKq@=4x0+5K4cZRf zc$HLk>57rB-*`9+YamZ3nhmpDl5Zy%=w}zplG!ip?=-R^(lM_!j}KldE8pqI!5zt= z(b+e?IEl<21wRmI5ZPZ z*QWrV+u^@HSk-dy4|KyrKqJ~tMA0=HEkG%n^Mgcjc-T)As>a^CUKcEFrz_aN+M_H( z?!nh?4KP1&FX$JQ+=n2N+m>};;W;N-a z*0B0!=!SJ;I&y2SeLpKiE@XA;P?!6d zM|*QZ(JS&gcojdSGLX~WyMfg@aqV&y+$+2RP>~fJTJF&GR2U)Cw(gh(y!Y+znS}N- zd=MD#U7O;MFnY;{dV`kaHQ}0?Mzr36kdJfjlGQqE2eEO5mjDJ1u8LN#1kZrA>kcHlSa`ofXV@59S=TGYDa>RBN>2MaeCJ=j< z@A%s)E2F5R3#g@OSJYr#3bATweExBF&BB-%xhLcKJejPtYw8-Mx^ul^wf}2St^{m;Oh6`eJElOHO$dL zBMpuq4=ZcyPUp<<+a;#tCPGhOY)qaf=gN@H^Bc7|hg)3|Hb#_Ioc~3v3}G5k0@lMu zzuLM6TF<&27r>)mkyOV4^T+ONRMA8rYJA|1)dMGTu^2HmA`M@Ae3ZxZPWN$c%u0Y1 zNm3n;m7NZXR8y3SC`4;{?8Hrv^9h^?v=El!U$DtJw*?Fk6Qf_L9tF|Y(5y5}s>QvB ziWh_nEZn;0N2@Hoh2|+nCSJl4xB=8=o zQ{@20j<-6{K`~b!Z8I&D!Y^o`&HPdbC+QaA9Au@wco9t=2r3;sq>raY*{+<*&gELo2>DU6t9su-@o*h+XuW=yOR#YWigVDdhtQCV`Zpb z+s@soBoqZ1-xz2YA=#<|^^4wrL#q}sCP{EQ_$+_yD$1v019O~zl=VqwvxIxS} zAO#CmMuEpS_i3i2a^kw83&IHFY>7_z+HSdZr{!!ixj#{nBAvQS>N2mzxprov=059y zDu+KaxQaupL1oXv2!Rk^&UU|`sRk_dngqGXiUvkApE_<* z+|aSyeIjv*qP~UVR8fl0fO#iK@8&p_y8cg@N7IZzE@nWm`}(pZgP+fE14GIVbd(o5 z$gETVbc)II4GbK2H{>*&AShK3m*ZF{Ohklg6s`lF&i|0?B@l6CokQI;9$DW=9 z6WD18w+Y2M>d%3H`%FxRX!{uwmVaa8L|1Wxau*v~!CK z2FJy?-bA?G2tb7~>kK1ZYXEcrv_gU!GHsoRcQp8W>p#pdk$|GCrt8fwiMs@GBSBxA z8Q|@tFPQgLx9Ij(#9*r-HvZRFAGc7Xn+izbHar^DrF1Ec| z?=&Si%h_lYy((i@Kio(Gi)+cje}Ml;j~Pf?~pn`bU|lU?6H8`u=#5=3E`a#IR}d@(ulQN z_HqkL=YLst@gPrLHPYvKij_G2 zV0t1NX)7fe=_qhk{b9zTBk}wppWc{CGs3oX?Y=|7wt`TpJ+L1k^*1ZnA~DMa7Cnoa za#C|Zx6B|gFfB$~^tN88Ff8WO@35gj-oF9QNVqgL2?hg6<2Em36<|rhP8Yav4DN$G zqaH*#9W{#xz77P!&U>A3B&RqMyIBPnb3j8^?}>kL zVKT6aQUW&=^Ft}r$Y9zD-fH}Zsc%sF?8SOh-{kgGyaDqPnW_UvV%!eZP)Em6{VPiQ zx*f*dB*tUp6)`T;6$f!k{{e=Lu_Cv6wL7n^+(KNN=hBMJ@Lex-uUkxiU;#AQZhV@{OX(+vPkxd`O`UjYN>HCs%_J)FY_Fg; zeuTH#t)X#8Kt4l`a&YRYxCO=h1i4lQIk+E1F!lr~#yH*BIog@|+EQ0c2oWoua(mCu z^swf>Cn1?NxkVa|7Y%6HW>-aPQ`#dP0u`~vGlTz4ji<0$KKT)!5WjCeD>p}3x35CR zhZM2?GXKqtilRLY|2d{I&K2_dn4rAvT%H%Ya6MR#Rf4;W0I#&U;iefZGBv3Ym$G4@ zl||6+9)^MxyPJ7-g582EOEoinMl$!a%Q5q!9+2~W9m{hZf#a@)fpD);>fS<7IODhV zaBZ{Hp@$q3cls{eip|6|K<4D-R=d3_qZyZ$Bx=8Og7nd8y6q~x5CopudeesP3)Zy& zuSfuZ2CO%eXLS#dh$~9>FU26g#h8A`ff6u5J7}_C- z<^V&~aoCT%CALEuVLRPR`wtu_>yHeP)VDi-$q-6Uc<@)nzsl2}9F?8B6g1gD%)9wH zrn=v}k(U28+|}WZw#xJvT3bKnuoFX&WZDYT`nu ziw%*y>-2(Ws>FDKkLhfrA5n*S_rrTz>>kUtSrW(UlZsBg@v!PrPAcwuM#eaQsPgSe!AG%Zlx9Y0bI|8U13h|OSzQN=+jw)-X3ha1yh!>$con)} zp;0!`_uy{>j-K%AG^T!|m`VTLInqp7m8q+Z#}7TlkpH#Ix*_GD{Gm?|{2wN2@<#^t zorG^G?Mem|q8?dyaG%YA`8;4ijl{T)Tp@qyk_2<0?#e^J`s zRt3^j(*_1Xh$XE_@F_>gP-w$l*MnJHb1Dyj_*u8U(mN39f09xapyQg&%n0RUG621JvUjwpSonWLg zi9|#)OzOy;l1+6VzAr`1EG&;$Vmj|wn?KT?=RQlB|5|^rOryxZSD}zjS^+K}?a+4n; zMTV?QP0mq!Dl|#TA#9$0G0TYD3?cRRb%gCF|p9`0ZA&WIzFpDvn+k+M|AaC&-v3jxfH0 zJ8tAby?2mOnHVlZEtcxT^Ty}BrreRJ?Px^cw4Do!q@h-dMe@hP~J-%BfDM8s9?b z5+#L%lifNuPKIGH;h^kHj&9jm!DSG=U?khXJ%>64=pkWEO=XZkn+t6|qor)=&t0U< znO-F^)+u+-Zs{(13;l78jX*v~uRCie2dTetGC%bwB6ft!5d%|stE}gJ5uB^Vy0a0v zpticNuUO!@C>oW527OH!~4{n<7tO{Kk+|V{QaG#QR_RYzhBD+hA|MVu}h1a zgWGHc7q59BF*6*(&h2cX>8Q$(p%Dw>GAy=p=!C6oX?!>PW{4d^jz+vb5)mFiI%@B%I(NE(ef<3@tx33EY zeDftzAAnf`iELZ_eDI1c%dzf)V*;K_-fSB6#Zp2t)-pguuzFuys@|-cPEsMel+h*iX zR$Nban&7VVaA%m5tc_xARa`RpY+NLlGtw!Rlbm?5#oC|{6;v}nz_@-1*y>(*qoEqQ z^3+6_Dg*u~h|#gCJ!camT-~3hBOKHRDiVkMOH6L!dLCVb9(>W`M5<^1YZp7$OVbm& zdp+Er@mlR3&D5oghHFG07Df&jUtK7qaN{lk#4+4IF;HYRYZu}fwN z!(_n7O8lXto|Pq;ro?c0QdP;86hdMn1{_O3%SZTWtX^@2kS8s6X~7B!|2`BI_#esFlNq%84 z1Y`H+gwx7#1TpdBqs2-FZu3Uf1(yx4a5M{y5Sisaz0`4?>urxekYqBHxH#CFc^FiK zLXDkCj}AB`nSwrSKH(F#Irs-)s0WAdI%xJZP4gZq*9#~-2&a#V4c@6cDsVY2x}228 z^N;G%3+-6neae`>re$b=DeXZm^h%?;#f0&>Ozna-oY_}I2S(kg`<)df6)~r5T>w%G zh<>R`E{nvt&2;Atm2Nbls?Vt7^%i5j(5j+%n4n#AT&F2IDjQbH$4R7Ky02aFxz4d$3)Gkb%}ocPn&Zg#GuqzoBu-1vX*{5nKjIh#9LEd{ZmI@2OqHJ`e#T-k%Cm!$_VSXwgl>`%ALe zN_7#JOGVt3gBqT&_Q{!KghDy1S7pfUzioo0Al^d2aKZ^PTu^6K{NhQd8GB*Eeho{|4T3?H5(2pBeZ>M7NO7n2OCKxc+7iwh zQgVbVoKYLCVJ&hrf({mgOs7Wvt#FENy9~tzO7ip3!id{RCMzqQqtwi;IV`GBVR}I?0olaU*2O$8cJ2OLY zHw?W2Q3qjt#om>Snsn97jTvDPvpB&nV9k^9J9`cjR4Z zRTEI0e#O`63c_BCj$@}s>2*9Ks-TT3Ivoh_O{&X)j78Flg*{~agdU0SU zR_3nWhY?NDft#wx6%J=b4hMqaJ{`_<=bfGAQ>F368y1X#X(Ht6a_1svrUlxyElzFj zK=6d42?xFB(8+Xx=U_~QfKIz2LT83H5_AfI2=BThACe(3mF6IMR)-aAUS@hv zm}KeqwdUkZ)3Z*#MgJB%#_gWixQh%o`fK%W#tO;QCBFy3lw(Y_fsagQ{e_u0vM@<% zOW;^e=`qrA9R4;h@BO#YXBu~VA^)}}Aow+ql)4=U%jw0A>XgBc3^&>7!<=02StuAs zsN3-~RjxJTCP9tox=nOQ@9mCG$Q5GA?87TQOyiC-Yvf(Rgc;px_j~{hw5}2X!&V%@ z*n}Qj5jPc6+Z6tOZZ~S^tGoEfZ%&tNG}Ds()TjA^jQYBtT0p7F*Ga=Kjca`eHGZTj zn^FP=6G5u-RbD$gN6)qjJ3b-|4q(mB{s#|eU2@z6ju?X2x~YI4LG2)#p}wi}9{a6) z9BcTBzlwNI%L%ox*S);&=|HJdZP7u3!i`)hhbFbksLYAwD_zT7l==F+uOcgrAmAq4 zJzefH-0Vzx!_4$>S+QA>&1%RKr1dHf!c`GX`_+Ni6E@!m-a+ooxepQ*S)DPyKE`eW z$D>hKcaF8nzO$`3;H+HxAgxec)_wH2&Qg_PF{h5gqCr!t(bHs+EYFE>$HnWFZMCtO zLs<7gThw9&2*K4_ZmS?P127V@#u4IM!`w^#>Uj%~TmE43Rt!36Qp$-OjsaDrq(oFjw{9>lZiFE00MBntTl=<3*GOd z^eu|n#d@yheZB2B2i)KT0C7nV-NVN8ceod;jIaPDW`Yd0nAfrbB(LI-n?UhrZnTU3 z(~s?2NB3E9y?1C__7S#?5Ep(U>_RP3vP=_}Hvk3atA?EhsjSJ?vofqn+_7y|d|jSz%pq!41J@q3q`?p)`b zI@IZpst=d%$f|wtRJ$oP@PN}yx~}dVNT;=bL-`QGc;ukWmOtp8ENRU_{LF$hatDym zbfOEucnm6Bc|1P_l9sK!+h2kS0QY>+p0eaETQ2?5 zx%-v4LXGObsz?Qo^&|yan2yUfqsrQ@i&%G7KlSb82&OJPVm&7sq}0pK(MC!IeM0&9`rI|B(=$^M2O5Rhvb{R|{%O;a|V>=jZ0$m)SBlFhI( zCca-G!H~qO$m-Ce*L2jrug$|`L^G%t3rtXgBK0hkT#&M9QbT=-+i=O8}~5oP{vN#&+nIOqHfsl zw}_P<=o;`qyY?%zdUUrsc@T_OC`JDhSmNA6ApU+^4}3&nM-yu>VO(UPV+b|pX9rA= ztz6}W_I>^gZ<$?rTYjjC}>xngYi>@P3sL9 z%TR@qnIR)d@%Bni)BFMfsjs;jE?2eM_-k=UDo+SS$0wVj+^j2~?@CW~bC@3MkiHTF*Q-j~1mT6O49qF8QpG*gC|{^M{e*c$rBd8m zp79djv@;T)keGZ)4|bgkik?sKFBV}`^ru7uI~<*YDb(Gdmqr)S5WaxcwZQZIcrz*749ofoClnPHNbit{1W>-6|q zsmaQn9jh))usFKfo;#XEekv2I-kJZd#6pqptQ9kLpwdHW?pJ&Ufv@dO|{d| zy!s6rKwX?p5*e8>o8X|vdGqjy9sVT4;#oH)!3H`jn6>Gl9~=ewO$F~uTCf8F!|%W& z=cr$^vv*o>VeQZYGt@wZ*3WL@F>W41fwp6}M-yVZjFA{0QMyQjH=sI}Df!zBu$}Za zhvWdY!T62xoEjJD8_+5uOS4w}yW7d(k`)#zJW+HYFA+;XCS>AN)L>lzh;JJhW_bVC(;&iMis`F*`nMBFYm86>5;%n--Iaueo!h6mlvGQ4C zFm<+^ld5Y3@@%&pUxDjAbo^2>ogA+> zRC3(z|Dz85!aJN8sWfphF19itmLK^rIQ7ec975j3vzXT6j5|z*9TsI&y; z&H)q7-LI35fq~Egbm5TgPy;U!*#qPdK`&cD0aHKrF1J+W;xb-CBu*zRt1WR?v&eXB zu5w>NqRTgT=6KKANvQ=KkN?mVOSxyvDVad-jx_DrD2~{NBj#9k*63eFms=J(IL6SR zCA^02R0cYga*(ubG!+KfJ{bUBAkJ5#V1Jb>nWtM+;EkNTqL8AiBj?Ox3~GYW&tx)| zJ3zyI=1~%FfNHH_A?ga}d}Ac2q~tS?QWROvtvNh4uY|;M{V%IqdUKH;U`%KaMU&!+ z?{f35q4qwUZy5A2scNwiSq#fPQ>{P1LmB^Ly0bqmMnqUB&(-{*jN>`VH40`XwXsEV zb8n{}-O(B6q0t!^))}2qq2#^DFlk{dd1N_s{>pCdgG$aL`AX5(-;s;&42NRp3b(Fs zvl7H)Yor`+N6BJb{BhGYU@C-}9A$ZXx_;0kmjc1)9!TUKv*CeJ-YR)|8$d>|2?*y|*CNIZfy(m+5Cxc9d3uOzQ-SOA2l@r}EJ^Z^s zmW_U?p{M>qrA6%~HpMC7M$&A1nhFTvU(LB=3P$HJ1(nPV=`R^nEB7rXUd>PCcYD+C zIMyWMVdr{le0W!LeWBIBpuz7^-1-@AO37&*`(>Sl{Tg;F52g2GwpU2%)HwVsF;+JT z2y-Y?Z^xwrD_}>Qh7`EfCknYKfs{Dp0MT6_TZ;+Sc(b)Cc zSvPGz1l@3e{g4yk3hJ~2W-#Khd!c)5e{2s{6?hWRGSj{^LzW@I$Gwp z3JWqYSti&nC^eKn3<(bx2z&;>WIh1CU8U>HfD*%uKf_2~PgbQE#_eAPYU(mfmK2Nt zx@k0P_BwRrb!MVKZna~eepbH7^>%(zOoFt-aJ_=(#)14D^KG`NLk1|-7R3%_2K(0p z{F4!hVHj~p67Xx+1aBpvZ>Agw2agn%W2B)+miPdlfWU?otBay^kegfd74fBq;Xm;I zblol%fuh`YBeerk67?9?DY}YSWxdMP;bk@wMRg&ba%l% zKBv=mqid%RPhP;l)Li;S3<lvcmGV{xgQCH`U`Lx$G_z-L;;y+VN!v;rUV5rt+M|-8T1SQuRi}0 zUEGeCw@$@xgTgY7g@b90xV-xH$3-n?R59c-;}LEFgT#Y~6rXVmVc?S6T%ripmtf_w zY6Y?bDa%he+YSHAvRz{buhV){MWE3=I4w`2=9}()>UcA|uw6pTWQS;jej&d=|IT8+ zMj+3%8FL;~fWmXfAh^a2gBMSo`)tXQB z1&yZKKH*nlCCRxSAiCx}ixL(T_@8)_W7cb#jIj zbc+t!!!KyYC+0FLS^Qdx9?vVY)?~C-sl6Wc%jDj}3*lXu5qCB2njSIV1?6jYyee-^ z69*OKRJQ;qEF-RX$$TbpG)=B~`gc@**n4Y8-z>W`rUG76Sh0oU3sBrDuGqxaooIh9 zRQCGmP}k1ais#j`ubeyS?BZtRHqtBKFErQD&b7U;Q%z-O%tSwjzV^<*@Eiy7bNO#^ zaz~D1&Xmc!U!FmS9ciWlkD1ct7O0ZnvI|ojf~!|*e_4V?K%83c^vkJH@2qj=u!WCX zsIgVE1p$dIm&YW~9>EzEYEx|+`ZCH*Rm@C`ZjP1(+4T~p-cQZ-8^Leu z2np?cvO=3*!UF`EROZ_poe`tP@hbmzf4JWEf#Qd=Wb9C_m@CDRou(q5Z{ZM%>HQ+_ zNc+a^-WLYi9G5yx&2n7M;O|nx<>##h=vA{>m9CW_RhWfXHCGDzM)9*C7=BJ&q9A$L zMI6EJIP1ZDRuM7d)Lm@z(_&R=Z`F-3r9o3=6pB~MT}KZ!x390)MpzGQ0O<>2H(Y4D#0yhVHCx#E;r=s<>uhr|x!c0Ti-eo_5+skC z9BVwjFo#l>r6f*`?l2=0@*K7NWF|F%SNc{sgSf5lwXIhuD6(OzzINzNXX9Ranry)StZwC^2&Xaq{I_6W8_(EuFRhhOSVmh%`~ub<2qH!)reeJSOxo`}>PV_h-#sChKJEF^H~-`p57SA_w{%S9&rK9Lp>K zc0REt(vOKnBN<^A02!NB3cX%@0wgLTE3~*~#_P2l8~r`+NTl?@eV;N&{n25JAVC|P zjnlQ(X%Ix+idrc944e{OcR1yz?xtj~x*8+0mor-4XqlK~s}gKyeB&N9q1>vy_d> zXh}15A|LEB8Z*mapE{8`5=jmfSrE`(-Ou_RN_PAMC0)9gTOy-dqQU8`3@{Uj3v>>} zb={S8ZSR<>cbRbgs1W(xBp4tRcCpj2_E=|^PF zcw_pqtmJGwSGharJmKUj^d8;&e4+lrGd_2qwuI6XVI{ou%Ppj_W?W0pw;zQEPuq^b z+7E`|o@$jrh7zBri@hI8__PbB-)P$&Bayr3db{GnmnM#CbO_Oe^NU7LF z%();Xh^|=46)oVtev`*VT)|BLsTA9cH~WT8KjF6;4!n)C?NVlR??EHFy&f|T>-OEJO+j;vG6B+roe^+% zBdjO+VJ1#}1cX)kTwn<(qQ%COgO3aN{fV7O+V*t1xdroke>qhq!cC==97Q?x@UJ^} znrCIWh?%2JQy+vRiWD>`T#woL9_3FC4ePp6nf% z8mUq8qe_0>;^E24+qC`exZe;>rXL|Gesrj=$8|vZ3NQuiH|T~axFm+_J5zy5P+hsO zEYOw=R z0QL}0Pus`<-3Wd@0N%cu;tU^-g8yZ4y3dEfm-pUz3L*JrfJg8vSo#L^>k;~feP7w} z?Jp7%P6lQqx~M<;folK z6ihKc_Ibt@0-D_#yNr}SV!TeID)EZZn9ky-FJ%2%FX1F#PrT(Ay{Gg6PEm2_jk_IX zn03U%<}IU~7X!p@c=>l0cwqArKzHhlOfcN;FhH`VCayRryl1oJA7Hvdgp)8I(Me9w z0IxgO-iU={<`IG@->y(Ve;HL!p8#|-?C!Y+;gj$EJxKLGs{Q3~{S6ZO!4lGO41UOm z!_o#dFEN?Rooec=R53F+pJH1%Qe++Q{z8|!DqomGlUlZ8a>`t_9A;U?)9K-au>rIM?M6Q~~iqNG0Bau>CLi$&J7pMV!bF?9kBH^1D;( zF3=vMuWPs8Bg;#KoY*x`t(m7J=sx|#f?Pdt^~i-dHq7UISF?e7YpRook!hZ@CT+Iw zK|znI9qYx*pFnyHdD1nc`H7GxL!1FTvD0zn)-3k`3%RbMz7%|6~>|16&xGD{v7Hi>5~S*WzG zES6ZHnd640<~l-z2Td)87v{)j)Ih9>Hw)3h@}#oe#W3v9IPiq$JsV<>Hum#wJ)J3t zaV=uu+EE&X&)tuL8#a@4?$8+6mj~iI-wQl1vgcSnSw#um}%ZV=x3wMwOcus2!<}poE7J7s* zqe8Z1Pl~`6GmWu$smc56Onpa_oTRlPev{63tj609r!}wx1Hej!yOuZ;m4sN2R;vF+ zC`On;BIj#g=HPvcj#eFwqHE;I#F#M_EJ-KyC4wT*7XGqzXX7ivUy0J^Wt1qShlwpU!r~0GUiaUC1QV*!qw@NTgcb7WD z3CK(L?C$}>t*{)|7v;X?fSUg4!heUXR8CJzDEYr5rc8_%aH+z-d;DNfDP>P(OM zi5a}vTNLJOJy@TkseH4^qQD*n9|%miWxl31zb(_5sjLHuI_ zxI)(wr`<+rA@XWB%L}D}5l8^0Ot-n$F>(?*C4<7s*dsK!PaT6E9CXxWiFDy|xRky^ zwJSql^U1ikQU?(Fx!joq7C1an@ewjMI7^(qaN((JW4g0?wAe;ZrPzV^7cY+4{YzbC z#L*nE(FGy}O3;iM)D8;a$`4JEdRtsnIJY+TzlW}`B65Xa#gT$7Jst^Zc|3Au_q5rj zvi0ARwLvkz{Mb9;u=SxN2oR&_KTm|_28%CZoCqxldOCKFu9OJ98!4cM`IkaANaTIS z!^#hIRJOR4Pmwe0S$-b0kINo4j{xhN7%%Wxe2M;VJNJJEMV(4n>(1m|(H!eF+FE$; zUY~zumhCya{?nXlGX-uwt@~4JPWa02IWeKKFI#V{Wbi*ug#PxP*#~5>^t)61EJ#ND z3)sJcxN{mR`U=;YIGNdr4RNRDD}24nRT-@Wu9UT#G#Fvp>wxA7Hhv#gz_h~1lAdOi zJ>&{Z@HXYobI0AtV2=&tAS7$osr+1hfp$$ulw?kx?X4dlLbbDixJ{zaZl-Czm)wkQ zq=Z=@`dK9PZFEWfetj7^hnzq6*i{5lxsqR`Wqs@QE3|3*R80h-TNhSnH$?xyu_s{r zqMKhrE*q5F24E8-ehH5=tLOPd9xqKxNx!bs>N>MU@|okLGyw#m&MBP{gU}2ea?B&< zVeMo+I7%WE0a`DT55LpdUQdrE}%oj~gdo)6~?wRH1Y!h*j#JGU%>qAD0U*E|O3$3a) ziy(G?4VrI$H1(;6-KQ#N^0=Q&4{*sUTn}d?fEs{ixWVjTc;GEakjfa(^Wr>Pl6Q`_ z+OPM=hQpI^5vHd+#9zu0)y91`ls}p(yjxjJvB>4@@!{G2n0>|qHJ_l1ayxMfHLtU- z5CeR;$=GUB0S}EH;x=~`>T7P9NXOKPSI);!`Av5GQ1lvo<4dH##FVWvZ$T3~z79l=g&BownPGAC`{MSU2DTh#(p|{}9^-zJNqB=L3 zr(`-hNDCS-s-9$X|3Zo2mHa!+1);jLc8Ks_46yBUlU8ONitfS0#ed!R7`jIdjI~op!*sTQPG-0bi6@?h=}|*z~NnUnw8n;^HV8e_IJcsOez``nqA1zOn0jd!GWFjb{wX|lFnq{Yv4Mc zcU@1n600g}g1vBg&k0`z8AUdp9Y7!KPCt5&p(14ktXhBiEmUOc;d^UL7UWEpq zG9{8=LwK!%$c(hOB~s~4wjfP<*>e7o22=}@Ka`ikw>TTRkkEhWL=5docBKmN(jE4F zK8>1x5(09@s(q=kU$MopQYPLO+mj!%9l0l&P12%1#(Xd{9)F(MOXceDIHGArZDy5x zZP6g_SoT?dx4rvpPi$;_M}yy-O1FNJ#}$x2DQHht2T^=Beqm-K^LL~5dmatxv~;3B zO2!>6z9bZ5c*IZk0x5c>c`hpHpT~>EF@tzQN22+O8|~RLAQdLsOoW^0DDn&OW?lCA zv=qhrxEI(126^K3=d3+duy#KEVusa|*|E&ysYJzHo@Xm-al^rrS5F4dIE_(E__4ZA z;#N(hl3sc5d5N0SaM3TQt@QKh`0Fp(0YI>jDg(H+_R^mIJVR=FFTj_$g7m3w&~8*z zfQNHW;gJ-ax*o4vUE7(gC`C3>o;K~k_}=NZZ2#E4z1m@094oLw&qHrI@xHc79y?!d zNVaO1HU%x9=3lnIk?|!?L^%y7Zo?DhK!%~BByf{7i!Foy~uMUdnFfNa4h#m`|xfhDNKDxBg=5f zL`^llMB&PLdi9_EJ|Tk+aPKd_Ld~JBD~g-w)3>m(ng@b;YNaNBi#xi0H&VLlW1Ak& zj~ooTR|kUx^%*|xqD0&6MTrptJRcl~FNoS#ilU49GF163yt%p6ly97O!CChYQPn`1 z%S_v0o<0I@sMAs04Lin!Y5g)!;j>920mXmx+OD5#5d0rNR)llkmqGx2w{c_^XtdBe zZQe?&o?SIEnK$D%)gk6r2W}(XGY`y)qGKha(UtP9Q|*#3^fP*O+V>~nu9|?>3tYdW zRQnc6Yaxxc3_Xe>kiU9=4}6|CMxhKrQz7dF-%EkoKH_`$I-W|nA5;WkU*Y8H9TG)_ z%p+<1;&ee?BInW{=7=Z0hVM0$8gm=vwYphRkZ4(#Ek9MNRKqkKrvq~S3y<`TCY@Z< zLv$;COf9*S(V^z6`rBT%;XwZ6*3*3cJn=wYt9yet)-~V)5|cga^y;dV#_Olwupw?> zE4hN~Qz0?vz=|_76~TQxN>d^V-L<{)H$eJxZ_3@A`;2C~xe9_GEC!OcOP7dyjnHk~)_CD`xEF|CHj86@?#jWleh)|iq8BuFLVTOaEsQ!Me zC;VIolAcO-6*iEoW7r;&AJA;ud(b%P=Xv?NlcqByC9~ZrW4Y`zpjfFO0z;$8a;YRWcMR@pcc0|7f_Ir; zUSI07Ffqs$*-M&m$~~7QJC`Fku5uL>QES+tS^a+U(^)h@Z6ZvrWZ`l%sbl1+Z3j*L zK~d-2!VZF(C@3K*xsBlNNSH6!?^hJV5&$}pxLWc)+St}GxdmKNiy2-~7l6rXA@H6k zBn5b0PT&zH8SJqJnzLKnlyBgboW{a%SBj2^9mxcgRs4^Q&pn__F`;sI>TTTK@gYVi zSrd_iS}_aA^t#yz7$9h~j63>%??$#lBhruH5~h7rK#xJ{E-F$6@$IQs?S!@F4y4@= zcb*S?IMUhK{O1Xu!63Mq#3^RW==I%(KuVkD^!Om{M`ae(Px7{INiR(I8Q?IqI&`ha z&U)LnZ61kdF2Pf!|6-sHAd3uVBD`kc3-3nJUIuI@7Da7I@BoP*HHUP95WwL2EC?*iOw+5S&usYdbvzEK z;@vYG9|m1t=%b^I!Q0vVZtzv06R~Cb%9LRk@#|Y8HHW9eAQ&1VatK&!TG|`gew@~F zLf!(EyH)iSyF%fIln^cCUV5q~8hfpHs`^{r-{+i^m;T`g(SLEbq2s1LW_|?~Hgt@} zrdX&2(XlB`L-)BGQ>{Xkt^y^PO`IF8uGXUt6wmXIiZ&Fy8P_{Q$ficUspifou8?$I zI8+sH`*Gr>^+-l%kWLKw^`BLV(HbEM=rhnJE}QVr(_3{AzeJAd!9B9*ww=KHTqU{9 zo20buwrS>dncEu^cP4 z_mr#hjH85~ib!b<(`*x3ujr9(LmqP4;G?!b7G02xE{04UPj}`js z0j~-$Tc}<(#EnMoLXJbwk~jK%ev}9sv48jlIMg3d+J)T$TprVG+DfbK9>-NYRO-pw zqN63R%+{Eu?~>4+NR>ecCvPZ0FX*D1b{3n?o>rc$_sk?e^DF+WdpbW+6(l?Cu`>QS z1@X^-`B`|Ssym%ektk9nfB&M#kBb`qR2b^+qrRgRVp=@N9Wkyhwe2}naB3&yTrJ8t z%H~1grG0It273jcrVZ)}=6AD>iJsjx8!Y!pZM^%#$)bR`XP|zEKXIj!axN5-)qM`1Pkaq{s`kEKqF+ zpXWxaN=%85l>k8|;O{;kc3Sf3b zk2&i6lHXM| z$*I2!x$;hx7O#yh&bMzU3mWMd!1SG2S3p&tSD=i_sqU;XG2PlgYYF0RBRaaYuX6>L z2ZA>op1w@+*aHR`aG8Q5E(m7GMRzjO?!=flh+?+zPS$Cf_&j4R?Ca$>G2IZ-M2R5E z=ieM13zEKk(dcI5>oF+_G2?BI(O6}Zc%zVcW5Nu-RHSyl7XcRbM`SFO?PNRCjIW;=dwcwiQw;W#qf z%8f2lcy}QA3!hf#HHTtDLS@j2j}Xmi8<^~CUl$VNJ~`63e_J*>*$*Vk9&ce3eb@j< zW1Dxk0t$8m$+8n0ODrB;K&d{SnSdPvOBb(_J%}9i2c*JskPD3d^RBzsGEgAvJCUZJ z?dc|8m$B5}LWVa72fJ{L->apOR`}!dPJnI_qbyk=tqF360WR=vFZ%Jj%lKk7gRV)Z zkknJpfGkF@KSHE*aQ+n(2tjbd`-|!qrCYaLZYdi7m>{EPcqs18--z>L!WlhA-~73e zjIiT7;v5fXFs&$KYIbUf28N%=NI^xr-pyC8ENm*xpaulMepNc%2n^CBw=6ey{(4C9 z)Xb$PV|<&ty1G0Pw9hb{vJL{0WfEce`aBG_XGqIN9on8{yKn(<_oXFb;FKkN5nuti zqaS%0y*|AQye{B+ZqKq${ykekBr(I|U-~Lb1A|HaM{|BUjh38QdjVhH=)a#zSNSVE zgd_`Gog0GVQ$PQXU>Z`M<2k#b#$Dp_H_o#-mX?vj_RjrwVutPE$jC?;@4FR1E1l4{ zjsbZ0Ah_!V{ij7iqHqCtN1WZGb2;u!wbQNYsY$iaRYD*G_VaEH*!;0H?UJ2fNeS2P z-G!GV4RX5v#nmK8I0;@ipU{K&7oUsRd_&HO!Zk^kVdcAEJ6N|qU8L{QtON<=%swD) zZK8Ko$E1|0ZxcH4q4mo<2x_9)4Z2n3VpVJAnH}D zt(INkWViqnh}ES525y?&Na5wYL%J}hj&U3YZT$xonH287qWC4l%B)v?K4iA;iXR8omY4?2m z{G`_csUokaL@#_Due-z5iX!* z!)Io9iSOBeHvUzh^q;ZPzb0-z8!Nl{MeW<;OdbKdPjOmC9TM}@d6#xKYS-GbB*%t{ z;WCG-p~eoEhQivyq-@32ga(^ZE!s!(i59B!GFV4z52Xpy7Frm}jsH)9MV5z!0F?97 zLJ3A>{n_-}k1MqukgM0#};eqLdxnCrGIL^7#hbg80ie zWSS)9d0ea0q;F&DqW)`*tLdU>bm34Q*v?rC#CI1)d^cNh{= z(Cy>T+_`7U4b3io@_nHza$K#HkL&4t4DOw3~-&{=)*#f z7Rs z{@A(7#}eKJ?Mf8CqrLOtayC9b>Z_<>7FtD*C48mb)#L~fL9dF2E8BSP=5(xjU=Fg)w-F2U3}uT*q^YECSIucnK_aKvBn)&GcX~ai zF-p*MO#AjI#6AbbU!NBwj;2l)pt3kWteT?^_D&66n;dT5>V9TnV#@yRTx!EkiD6o$ zFge|Q&So@(#G_{V=K60o(}i-y!t>LAc4z!m`3J?KA5Y^Mz|&skx66;TtlODU#14LG z>Br;9JY7WR8VR}Nx#Cobf_b9r>m5C$+4qjJ$zoj+c-q;wt#BITk}f`N$ArgKsWGj9 z12f-U#?#8EJ4qYe#|o?D6)>_6XkK5|ar6V?=ZLD*$CzjH)*}N0R9XeARR?YIlWFg; zc`{4qrGR%@XAn|JW$}`8vpq=EmdB30sgDh*jI>%_WZNZBh`U ztZv>bNI>(%-eagbOn~Z`-lDiPqs2cW8=0A>FISTK;P2AESGH|)NoCzPSdj?Yg15_j z%I-6IdpDS$ws-ws@Z!RdHT~WsMIULAEc9Q@)xpet6q+EsbNk(z)jRRHv~ym4Qe>`7 zp_V}GTKyw0bv@Ip8m`U?IzSMgegdN+SAlu631U}6TZX#DY-VPL8E(z$UiDlHrac=5MyHv=8Mq`>Q-|MEr%D4vRhK^PfaWWrxh+bJX@B<0qGmR$O>oo1NP}Ue*-Q;5~+vx2dAE&)?w{A;wh`OhOXusl=#-0`_{^$ z{Z7PISM|6sEGu|Vm%+lfy`XJC`7VsECK$d+@Z-D`#h`A-HQ`-IWI3?C#CiNbFe-IZ~@?*=K=W4{YVJipcmLCIw--wa59ErKuylio=8 zVhSps+AZfgGu%rND~!p=rN~%q=rmRDVA%Ps3^0t8k|GZz2AeGbBfGM#uwXhZg0=uT zJ2nMHsiOoR17&p5xBNLnCHgCnf8MnOJWALB9v=d`{(*8?@jtOM z-HaWc3jLR}Vqm)CPB$P^0qUY8{v7kPBdtllqtPpd`c0U<=()_@(Ey`;1T~ji(mM~$ zP5AW0j9s@@^d&ke&R{iD?A!XgXDd(Dbrifz;T{w%jEOk7q9`r-`eE45Cyxm%&HaX&i8${=CA-Q(nyAUkl);WY}+m`Gns%o#Fuw&4Rf*V1N zIs#aP6ACu1BLOt{eqC^b+iCVr_bC1nx%@NpQ6vR{5;Yf^IWO(g?B7y4Ov<)#XmcQL zF3B*gb)jz(#4*m>H<$D{XIDVVV&83)D4w?I!)Oc#wR)?*s}@7IkH?m7#K1v)4s=nPJ6skqXkq4Gq~`$aFcX~ ztyFFM8~-ebX!BS%W30{2QRuIkD{LFj_737~?>u5AsB#^%%kH=nXBC7ETU{CggPa$wo5-qnn{aj#D} z5B7bW1_(-y3R5MFXy|)J;0Jr=a`loh%od-3>XlekZpdimow{J< zHSZO-|2VJ6eSqrGC!1G@AEkfq0N7a5))GE=?ET=digS`rhYlV(KhBB{Ab-Ih0MtoUR>8v5S&z>JxKMUybF{KHP zx}|R+{xH}vkJkq9H8N5Md{^r@7HXWleBNVV-Zg4MO0aD_^WkmO{IBwEuvsY$+3;f-C!mhK$FYRa8{QyNSH&>gxabz`Pqyo;p?7+iQ8Uo!a!m zb%D5D`NDJFrpYgZwXA%=cGHT3tR5Qg56g?EU;>w5{?}1AK*Y#F5 z$R<{j0q9K{wO3^Hwimd-b8(Ddp=auTHD+RNE?-@SruZ(cOZavTK_;UqvPuIVdO-N8!r1%{oq5v zVP~s&o^hYb=-a6LBl|Mo3+T`9W6N~FZlIx#_Zz^yW+P+3%zbZR85%cg!Jg^JZRLTN zcRmTgo2*;y41$YMf&yR&Tv^qAx+Y$f(SBxTX90%2FbjcZ=EBZvK^C|Yf4~X)WIHIO zkiKdrW=-(6#M3e!c;spkZ~_mQ55j@8bmfP#07@ndGr!<8xS@^fy)Z>bgXk0ydBF1ZwG@rhrAeFq}9?IHiGX;eEY`7@Q??W0g4%24=;X~ zzyp}j=4hWFqlpIq9ALUVL~aFMI|C%3=w&0rsKN@sElL(cD-2R|f`kvvR zfM;}61zVtt_GU}4CQ`P)5+f1qL?&4LQW2V9WF#Q@9k%uqMsL#ql%~Y|FVz;g1qG-k z1>7T3X>ooBUYu@rZkisbsgZPbxcnY_RlMG0iukapdrKh6bsBY z%UF2xF2dw~x<+24b%_ru&A^9BrFwvOD~L8A{K-*0vSlw1@#2t)tKQ-H^04;e7R&!~++6YuBs6oaX>TNvnz ziuzJUqL~eBVU&s0%CLj}f27EL!AYZ3IFPzzzees_!p{@dw zKIIGZfi5&lDs%nj^Wf${-1p}SC&9>Wu%9*3VT@V=zQZx$X*hN@c+)x&Ofbr%#2^F> z6$R#ZkDvGfnx7}waf$Y_Fa}9nk@8^K9c9hOVZHV5hvzi#we>irYr)qVkm`n!a$spF`YddSFB9KQ?kfB0$%Cczf2bZ zf$6phoXb+u2MihE>(>CJ(zd>Dg@dvP)?nr7Jr%YYVMDVmOnBN&;O6!kr}|VNrA{hSk>6 zHthQYZ1`p~hhs3+cHp0)+_Gi2>BRY#8--7zN%xJ*BOW|>0Gzm$vtbNlY&IWD2E3M+ znj!^%cJ=D#&!1nud^tMSUq9J|FZS%qSZo`kZgRI#lR%BMMd10C@ygQb8UBF+l_W>3 zQEy=nHJxu(W+^-1>mkkUXk=`j_>tnsx85ECZ>~M8`Tu>g|GUg3wp{U#|J}`%d3A}? g{u`$KvcNQF~FDxu<^aGs08MT|q{wyrq+8#POcl300MDO@R+&sKpSy*npNV3>(Y0`ci zVe>X#`#vjsPSiL%>jl=FsJ?x-J&G}S+%Fdb;UbwD6xL$n~Ryg#y0P4VmzPY z`-p8{AM0i9SwD$0ZntlXKXNT&KtqGsM9Vi&*|$kZ2D9q^8ts@I%PsCN^BP<#JX23t zCd_Q#ToGaU@T%rrn*3WQ%gET1oCns<2=tgvNC_BOOf>nYM_oDoMV6(Y;K7xt+blmv z#lZ3tZr2V|wW2ehYDDs~99H`R4-HKS<;}n9eaPoJZ|AzeJm{CpuAyT1t%T3*f}dX2 zeUH(P*FMuRdz%RwSi>0Dmptryu2ke{HC1+HIKye&clLH{ypvKi^y&QFeqJvf`qh?% zQJq)D9@Kxvw5LeXsUfpT}cZ(7rF* zyz)~9kEfHA)4wE4CpPzOb6<)noaqht;*U{q;K@;aaXWXHB%@_h7*}}0O+sAfcH~gG zcKmM~AJx7Xz0IH<$nF01Nyhx=``4DIBx4B2rmi$!ynH7sj^8C(XDV?+BJ^$R*1hy{ z_QmrO<49x@Z{ByF!B4Z_r=JJ3j%;0Xn|go5K|$HyPfi1Vr|jOcrHaG5lchhmL_2t1 znY{XX;ltR;TutAe-or_2bG!|D2d9#p*K%OBqWs$-&(4F+T|Fvk|E`-W?(UZ-^84RB z;&{v=bj!(bf5KH|-b$+S`X}`p-~$sZm%g*ZCJJ0Qb;UcOQLCqT4z?nB*CJi++rDp1 z;W;SE42(9j6}tMYu|`|#%zllhS9q>JvJH_>iaT~B)L?J#KdbVrdcs}uMSqsz4dR=HjfG_$sd8K9JCPej^Ez8@hpF^MBvof4 zwCpu)_?S(UP)mt`D=)vT zpW&4W&A^=BCT|V6tlV!&lSz|1?&3B(kh&Vvl3(%EHvi~D3wH|@78k;auOyOT5xpVu zR~4K%$7Fr9@**-a@+_flm5*hS5;e+P4em*11qq>8GC*BPFAx1DDA&L{VRMab`7)d1 zKAA_d8FA99ELx&0>`(l74aM_L2@PLv?zR`;z(22UtH78Q?r*~iQF<+qR51bHo@B57i+x5SS<&k^;w zb?(gdobR_?w2WUCU6+dQzIC{qNA*?4H91kC?>;kPKb|z+)XF-EdbaTXTY-|=G1vFE zFYhzdmVBT3>$gf?yGL?2I{Q5zp0JKXT$9hj^&72ojKs0t<{$Xnby4?K zTJ+PA`WMe!;cevd{$zRWoRd`6yE`|BZeF>5g>BRL#H7%hTR+c^@M=!l-xhP@bLUOr z#qn+Of{&?3$Gu>PA8UU!qfM5$k#HpkW_-y*$m7I3`~1PenDwhS%)Xj_HIuk5VRoay zILA1z++?=4gFne#3fZ3KB3-Vcn$~p|EGd5$BBAm|KB3_|@`Ckmr(eoTM7}jl)xD7a z^KQ?b<-6krC{!9ctneG^peYfRj9xa=EZx3C%}%RMYdU4%)0k%JQ#cY-mEc)}9luj( z>gCCzl#~Xx(=X0KCY`ne zpAM8B{d5$2)>jRssvvYo&`U@}ZC>?;8o#R2l_o(CRfWKrmv4wS;Q>LxJ%Qs3Fx-t% ziLu=8x$U!6=q71g)eq{(`Gtb`y8$ zHP1B*61tl+;SVvFF^2HWUAJ99rYBRE^*C$eBOlh@cR^j=b8>T?+C=RK6Im^rGLS); z!FExZ^Op<63uG>) zNjFMdlRcyL;mp+Coba40=U`o^OPe2xK1AI;+%K9Y)FibHe`VN}`0nh-FGKH! zI)?He$URV;?)f?}CHZybRYVH!yE2(c-w!??E`L^6HvF+O^L{3&J=0k?HdATC z_@&cJ(cDW(iD`-TvQ-hbtF=ng_1_xeQ#U#n!7!8-*zkMSZpWvN_8&q zm$CCdwitFAK3zc|UMG6@3Vb*0!6)RuO-T;6ERgLwT76qNRl~>s`S+`)G7A|~ny%f6 zS`VT^(%IkKj=wxdY7yBoFDza^G5>iV=5fbk9+QMT;dgmThF6LM>ly1`2ax>Fe%n=M zL$+r2YYceLHKy{oUvl?ByzVkW;x*dt^*mO!3(z4hztc@kQC!(PL;R3*145MSg;+Z_ zIxatM!$j>G+Z5T$$TipXxlew(K|9m>q55m~@lqRlag>Gs$DehTMeCi%mfyx;>gm?I zq?RAAa(y~Al6@?VY~^g>&QF;yg`}&@kC(t2J-blb*mbM~noyEjId?79X82oGV%3kT z%-J`yuSqgES%=NsGw7)>SI1d|R5t3X*$R#_=rGJ%mgSqh_U*@Oq%xN>enhaou6d-s z-uuu4gVyAHn}lk_&g(UsZspHDxuea#Cwy1Nj@D%7eyhlH%kS@Tt-4=aS?!La*)26Z z2~rtU9rPK`uE~EWKO~LcfjNvcpWZpUp(5faSfGACuxx>M>5iVJF{r7zR1BR}aVhuX zv>$D9?i?~tM>iwZ8{`>ONq)Ed(Y5VD+X+FDD|bUaFmryntQ2;c#HC9r5i~V7)~k|- zlB*Suy7wRtt3tO0DPu1v&BQqS)n-51ndy6m~ao zL!oT-Scf%?|AJ>9wgNYVle3GfAAfLm?OSpBG$d$z3ct{t3EjbFK2}Gr^fc+#TKGFq z;KOX$`>_#2jF0&P*_5Hr@1GaPO^M*cyTa*HFJ{_PN;trU;7&xU7J5EtNo$1f$G|RZ zj8byq!wDobhWT^5Vj*{_^GO5;9Xp~@T}n!#VOvNN5bkmB2+~G%9Vn04+Rzw`B&5JD z(#odKPEXf?+mVQH#P-wmTsj($%Yz&xEFYdZ%pHB@UM$^$?$n{#Q$R}b($!MW z&CrlV3OGK%!p?e%g#$QZ1wLA=r~f{_$$FlJ?f3KhSXiEWu(1Dmj}h>__ZJU*_Qw42 z&Gz;g%YNY3G2j#Oe&1hrANum1?XP1tI&h8Us)>%C9`J4A?CzdiJS-X^YQUk7>q95e z5Fc;f0JRX!Gr!-V1|095mOmr<`z;T>G|yNX-VxP-_`8ZK%UzVact-1>sHmuhzl)oi z@%5X34hMc|p1J?1lB%kz{6z(M1qE5)4%vWE--k{ivc3Ul z|Cr>jd9J$#IQx6}J@kP1itf$pbPp2vQ1i^0y@me%`{OxXLp=UxCEtKQw*_oae(#F> zCAo|8f6omJ)z~|$cE=;c)!XX2hYvtAU=6KH7ZomQ{664+UHYFT|7ED<{|r^UcuC=3 zhyKf@e;;ZV;Oegf@c|ZnsP#V#`*ZNWUi@>QhWy^%|AiKR5c>DC0Hw7KYRLc1HLZhn zX;+Q|j1=^^4mJh80WsVAW8DS*o&Vz-INrw*^XcJtEDMV^i{ACCrXj40c&xS`plMmO#f1A3Hl#?Hkd-a0hOZ_Rmb`95B zxsn*>Zqew-;BfE?v_~@-8QxpB$$&OxG;NM}!Ac4CP(n)YO2hGbN{_mT>?ziL><4&7 zwI8wkzdm^9xbG0x7jT)PTQJvuH&Rsl{W0EX+|mDXK~ZhTYX=W}0e>HG%m1%e)E2cD z`LDLOcXK4y1yNp*7&txi+_2Sx;$3$3Danr1a^&9<6V6{dKWEeNN5fF zUD|AI6*BpBryef-=S-Yu{kHfcP|D)Gppy_Av45I=9GJdsRNy1%@9Ei&(r{Fj#HYut z3%u*wzp)J?UjHd@&SwGsD3=WC{{yKVKO6vrCQ;E9^taG#OT}x$Mog6PIi;j3g+Cx|3Aqd87}^RrpcS5i@jJ22RkFJtr`FM+?3=(ll-3YFK={( ztq@E)u~z}l{uhrU+W=t`saSpWyE@%=0l4=9wy~)G5B!U)JHZ=`gno`Jnf!D}Ugo)Q z*!|Fn$o*ly7NX6PuT}oCX+FSI%1Mz33#k+EKGjFY`F~e@@N#PsA`9%%O^|O=DMdUhVYsO?K@BY^Ji>B`#B5ArJ z;gxif15?VvKGd~tI##`-(qo|Ghk?AExqav?c}5acS5!BbrlYdR|HmWrSpej0_O}-J zrzhf)1hyM#G)zG<>HeGe;NJ4Kd_pS^9WBA;5vdXIGFT?(D11AL3S80HR!92Y^) z{+-MK7i_J(imN&}PkY}nxAw@jO7<~2$wOiSq_Q>US|7B5$R?aKlsfHO>on}&DVyG) zI!@)*c7>zrlOE|qu6Rdl6k z(@$V%U&RJ+wgKJcw5b@L)H#m7IM#9}vX1MEBscT3$ZlEFrwb-|iSAmIRP4$KJ+B*r z0hZ3P4V%x^SzfL9d_+Bws6kX+n$`HzQ_R@|CUg04apXS{Abb@>sRGhp!GJGT6BTj>SOLe7@f?^(vUcnk)c!(~pMm=7}qc9GzwXd^`m57Ma zuTUe<=2$={A7V8Mu>$?mIRjx%Vn64U;uUhDiBX}{XQG^DFMOi{O{m9nijPMC&YVu^ zmB;MhSER`FT_|B^+=7GHHYKP*n|Lw{$gq2=TA*j$O7o6B+*!V9{fESp^OV!jGX%5q zY{mh%JLMV{%h){HJ`JkvSrtb*5E{i10-SsQmh=D10rxdW$4HuVN$m&m0!>>R_||ME zQERiVy#MCuZUxdJ;DqXS0mD<15jbcy`|UI&!xgEb}+YTja%H_?5;J!7VgWv8en8olD!3a%5PqCf?nkv1@yb+F%a2d91+2~hW{7c`d!Li zWwt2c!14sf?9X!Y!I)$m#;@+?AR(|LDf;*NrSkr8lY~x#pQd^W!tUTv} z@vWVF=IKxlrZ^k0C_cGTUzKETUFcmupEsM@3vIbf_IZOYhXhZw_+-m{bH#DjNcjP%j-^~w=O=~+fuxE0V1}(E2XJnI92+F%I_n+t6Q za&lGHzCx! z&>OzquWOMsO0kFUxNO0zxEtFcS@o`W?!rynAN{}w6Y$N-#D+z{p*xwmHX(vPXdL`I z&b=4{ILy}c(KCOU-+wop7b6bY`hR82mX(g*O?C*I^q;%x)kWA(`Xv>~ygp_OEEOjj<3Ejo)xmxg?mmO{tD*R#!Nm)VYOj6+v8E)?X~ zNUn#x^^t7MOb$HPwY$5C!8FbrLegAcux~Ke^Fd|<0k|iU8k1Z)b9%oPIpjmG^5415 zOPL3hFMgAla)K)_Hs{PTppFPBml2KS{*072zAJ&HP`j;({UA%9lJTC!?HhC53V7@7 ztN1qfE(K}9->gWuwJ9;VXkw(AJBlKrJ-FbKG-jU;PgR_>SQpHQl-w$(DuTb34fr#8 zl)BetBJ#3_G5=IiN-s(0h-%I@9XCK8(m26o9sz7#nrkZ9tIo~VuayH!R70-RV;MPc zJZY^BwEIAb_uLAu^WjEuV+zTXbC|yR9Msi`lDM342X+k?ND6X~-)`EP{}>wU7HlhU zeihhN{a);2=YHA;w6z?JvB`{EkKrEr)#!Eg)3N~VZSqRWqK>=zLP2)NushPut~bCA z>?MpF+i3D-6an5nJDNwG>?5J1xs4#P0%W*}`xKU&Ks^;~(`-iTd_F6;_mU4lol zs4xs27s0?qrwI9eQGjdwph;)6zE+})?N z7yjQCB>_p~+>6kzc-fr!mpJDC5X|G;fY@?OYpBGL;|F}@G0o&i@&ZxLX1pFx?yfoa zg%Y`cimy^%o3=3uk&oEwba-Z}wDg<$d``kM(-dJnQFsv!8GgIvesR?xDpm}8uUCW8 z_15p_a}a&9N2kEv9l^c|1X?82HTQj?5wy=D`cjQS)lph}2f$C4*s%*8+gx{nX!c`f zg+=KmK;4aXQ+cCXb&9Jvgj0rA-P*U)=!pmWvI{@af97jdkr2CpfE7HhQ0gTDc=``jxOY2FJe3VK_45CdwPbrOHkY?+8!+rIXw%rVdG$Y%$$6R@@Un}lTrlbeF zrAlbqSnxx76hRu0XJ<`@)meX4n%Wq(TqZ7{&?Trmp^%A4ollf(Rr&6vaA3o}kkS$D z>E4<41avR>9Cmb`JAH6K5v&!k#SL115ZBo$RxEIQi9Z^NZ_eRaKe%~hxQ`wF00>DF zYnrLWSobryuP|Z%VjG_-EXvE$+vKZSvCR-I&8wskg^~Q8zA$eE2Lc=^Fl>qs-EcEU zKDQ$bMJ3iJlXRA~?$o$pzE_%#ESn`o)N0fHa4*;|lDxd|&((I!H?qc!5X5&58|5ix zlQXt$5tMX->D4VJgTU+_t%#zaUr>gTbE=+jd*3GNAbE$tY~GemH6F9Z6d+g{cfO(4!NWIW>wp`UuTEj968#3ZaDPLWR>wr{(u- zZm-e>%=C{SLipy});DK;<|zWT3CVq7n!h}AIyckgBR9vI&SA34LFqo#!!i|frxFS8 zQdN*uWBl(cW&_?4)U3dqCfu8+-L}`~X8}uA%(>4sle$vwOX^nR8|3d-usiXn5SzZFmSFv#y)oOc4%nUZK$ToE`CXS??t`85 zx+8(rg6`H`YNg;|Ol8kJ_6w8#B6N2t>8i)2OnhVatcD~I##{%0G~_M!&gTD^r0`t@ zLgzCXxx35GLeUZJ#|=_6gKV=f(jqM*oTs9}U&q+L9ebu{@JJ%-lYx9wB`IsUk<#M| zL?jT5itX5A5j-Ar~N_0cQHY$*IKt$Sm*sA+do1n>4x#x$@9I+}`J0tdyt|BQbRz_F(KFF;* z*K+`W%@4X<=dNc)Bx+^-31I{tXn)hm>p$=YG(4A6bXz8HWv^xBB=yhZ|7V~g(v2$}X~iqxA$BNOiU1FDd+wFHT< z83&A|dBaV={b@C$e^f1rF-)^RO`+5Es7v@yKyE%_`#Q>SSO;d zP@R`W9!TP0npQJlk1Qz4 zacKLHqMh+Qb4YI9Y$_eG<@A}|$=j9rxf9=J8egijRv3be6v&jtI0OwjJ@=f@-}ZzX zbS;-0@17P~3SAs~zP1{6Ig&;%=?s)#(h=Fdqx8-_r|s>*CF;tv5aOwkxIGm~8aphu zqz4W&4wP#sR>6&?Lz?UA`LyAmf67MAM^P&x0$bF=r6=dPUAiCTx*2Gz;~Lgu!gd>x zk&aw-?i0r0HIdxB(R$W7=@peTH)3RhB{ z?(FgGE6&P26GaZM>AexqbapsjRjH1MfTAJ}{lrX?A&)GJ01C}^NS78VBUyR7FNSS^ zgfoM0Sq0+*(JP+?b$6Rg2@k{x0+-NFHip}{0-0)2tZK_Kba!;t-A? zuav=XZWF0D0kn5Z4^S1jT6*l$|HCQ$OCEJHs>RMkL@pQ{o>SEPdN;A=0>yi(N7sw+5BpPM`J=)vDSl!7OE!|I9I0zDNGP=_J0RszTwG;Ux%Y7X_>7JfR5#iFlXPJ%-uuusQ7tnAlVQ~?bQCl#GRsn2Jn21HMvJ-C%OfD#C!I0k6*f>! z300(rYz({e6w*r$?67_Kk%cz<(ZYPmcPTGB3jNZ1olzi_(Z+*c+(ph^6S;Lq zGpS;LJUkRM>Vqa`M)>=TJ!lg`zjqx_6`P5AGhj?rqpW)dI&|!GxA<2$h*xG@l_@l) zgP0VM4yU|p-BtD}(gL1K5k4#JFt2t;eCI7Cy%ofz7Q116>FD;Y*s9RjFpfe+hk*B% zl#$Um_}hl?8E7D2&f>ID%pK z^eIz9=cA&<&wdPPr|G?f;MDCOJ`?7#dw>TTm5xKEToJcTUzzd^uD&n4a=*uUNp)Np z@GW5GD+lWgN`p_1>?ARuSF27&Fww2So)4f>z#`Q;O zn$vpGS<-ETM`}+L(ivwax5nzkWRWjm&wi{dBu@k_*T138`0{QQCl1;|gra)`FJg`^ zI6{@2r@_ky6FoP!y3O`aord(1FFzSWkQ4K}6q;0>YQxh^5(IT(FCnvrt8k5TN=sh1 z=u?(8d4_qjv8Ht~+H}R*ix|Bh$6O*t1`y_&U{IGMvS*$^`=$~WX0BMX>(cirV;vu+ z8V5jFCrbg$ahlpSB>xwB`tP~Ld&0ha_~x?zc&{~wFtXZoc&AF|(3q-71iBl^q!iA^5bDqxRIY>$G@`*v(sGY&dwejJWAOqN-JV-LD+ z<%$^iZ65q0Sqc9o5S&P`r)u-(fR-8P`rPHN2kXbwp?Q~K}(zX8hIt~+TFgLX8W(hng+)kE|s;?c2%C;XGC%<+k-8j*nnCfHH`!z+ru>m%nuM=}}kfip2 zfJIT#aW#0VpXxFxI2mKBPSJ23Q7E1soAWS8sqt-qS;0QGZHTQLu0Jf!i|mNzvc!s& z_dL0MAm1vmVGAy$pQq+xU8+sDECnSgn@rYPbZRZv5a1P!nSOnH0e%pFTaUmoUpL;R`{n(@Z_)Z@CRo73Rsi$B!lVr_ZHV`q)Qw9&FBNrY8xL3{D z4|w71XA%%+Zf z^Bd4T7L;k>L|fds_2~^)iI86ltC@pQ1b9f==Q7b%d=zayP8-;B=%$&mPM^x>!I{AU z?@}UZg5N?`*w*x;RoSlj!_$%N-#iKs;{bZ)y7045qV?Oh-03~|SE*Ja$v2?5OR2)b z;G&{g5k;VrSW)s8zj$IEA|RO5d%s5g@~c2mEvFvXuE~6jqA7lt=g&7$>gG)osamAb}F4}|nTGYZYa{TnE>jZ;7Wrj54TqI-Lh=J9OI`IW zC8_MezK(8lo>hHq72N!olGr5EK0MgB*EwKxdVcHvzO^lUS^y%MAd8n;-BkNVPi}`a zR$5@Tv33!oRO_JH`8_DHV=v-9oGP0|q5JCRlIgf5UpWBz5<$1}SzW}0ssLEgLKuAd zwMw!Ba~o$?RxjWO7Ae|a`73AewFMV+Z~!xh~cRuKP`bAR=)2O~;x?VXO#R(1PBrnLGQZYC_1zX5ZHoUG6*sxz91RvV1h!!Sly8v%c-f`M6}g;#GfCD$a)@X()vp z3ZLC}oj)FpTwCqBNQYR9QDh?eZ1NKGwI0kdgG+RV0?J=|FYaKPO9h#)BWh~WJBXV# z8>PMt5DZM(wAq_VeiyJ@bd6{Oa&0eXp3eJOkw}T?KS)P{6v6Q<+smW9U4rh<)E1lO zrzMj<6I3a*n+>H)hl4cqc(r!SYw~AL@Pt72CIK(#x_)p*Oc$9@Hd5 z^E8c*d%RA!tT6iunq(C6bd9~pTj^>#MJGhEPXXlxW`khh&%2s&a;>RA5u%y2Q%Fu1 zz4fP!F@r%lHYyy5839JBQ_*M14{vqC-I>s;VWD96r-K|~ydwFgeBcR5h8T7#eaUVm z%!0pi1VB&S1#aEhp8@5V;?wRR9&b-0iW766{sDp!n zNfS}xdl|meqjXUxYt8RsW}18t(?*pW%1DjepNX|MkHymWARrFB+Dh0RWGT?x0z@SF z!A+U=j5Comc67jr=TS6chVm1Kg^6Fc_N{UK4B9}3fqq!Hoj6uXOvg`$EVFIBnzOEI zyG=?lwG}Rr5YP0i?RMM?O=eOeWwweW&(pGa!VByowMfDw zZJ3>p#;Dr%uWar$Jk(+YRTcSvVc%?{QdZ}3hHV?l1TF<{X|=2i__qr=q0#fa*Y zOHreW=W~bC%(@iMU%)_)*@KnVD^D~|a9~@?7jBTi+aAl6@#eh*ZlvUUq~{9tePWTc z6SNR!0^R_{hP@)mt`4p z7{!5w-Q-9%)t;WfpOG}~>X*TchI5_Y&YOGef%RzC1K2|mKtzAB73heNq;Xa5{9T>< zw}X~wjyZrZ_J{5Pn|#;spO}i4Jv#OLavWq5b5|^t_bM z)=BFo-Qh{?F^PPjT5GerpqPm5F7=MDDFg>)=u{55y#}OwKx`%a*O$=_q_rHgY0KCZ ziRXR1}(`EoY3xRsZun)5J>G*vjeXpW^-ZZ`V+4{r=BQjD@Sn{|vD zn$$4z!}sRP+hV0nNySWhZ`{g-eCCR#YhCVfIb;~F!!^_FgxBmDeg2$Y{Qe(WlxDMz zgh9k`#KJ7+))fGC2H3k1bRLas)dY}$Vku@&GxCoAr&At(7FNzXti7~&xNN->h)Uf9 z0evwgNb!sxvgaKz$oSijbqkLT4;x2nU@wxY-vbGPR5RDIxNTxtnDssrdFXH_s$~#J zCDjc=+-oNSmWGxtnQ8Q@uQZB5aut5r5Vi-UW|H*FJh&F!9Ci0u?$!6rl$5S6L?Mrr z-B#bFO?P_aXPQ$}*3;pYzD~GMp=Z4hls>;Hyl(salgZLLP{fu6(OP-*1qY63(I&{` z6ij0n0z}>N1pgxEP#S?G@nvBk|IfJ(h5l(}PWXkTE;(KdP~kFVZN@ckRQ--5?JMPK z&U-D+>i|KIPCGppGUw>8Z&HyQ_~s=t1Qgd$FB|ckWv^yva}T#{!4xZl+|dNER^7;T zzzij3c-^$?S7(S_Wvu5`PVQR+&^&ACxM$=yWRx_i?BTg!lFc5?P^=1}AhDh(gLGLj zEKEzyMyodvOYhmdxgFNH@n$iCB6#>jjuFQOw>Xf?UtOvM!kwZ$oCO@{qklk^4S!6n zTh;KEDuH!g5&|6^MZN41IFQ8aiBgihg>>}Yhnhbd_y;Q8<7L|PdT!c}fmVeeh_wr% zKAfSm^;R`NL2O3?yUOR=^FZQGx}(oEa_A!c@Y~t6HuW@$B?Q{284GBiGmTCJIp8JE z^-|Q82x6bBYV8BDW?1jNdXf^0OReio3cSaO(=M+g1P%(^$=piJ*U0q-s$8?xqrP4i z0$^7fcl7=0v81y}0ARm+^R&-TSVMiDtK$tV>CoFxw_z!Hfrq}}MvE=p9$Xb@e3PsW zedr5HA_Qm%gv3kz0^&kk7rn=~n!StfGI$hV#j`PwLRANZ3fo%d?QpuP}J{9xG?t zYUH(>=d-1eGtXQ8+3T8*9i!HwKoi-pep*8=j*LA*4yW*lM@`u?h^IR%U2$ zHj@D@J;HYC7xRWmp)t!7r%|2@ZI}qGC$F~h#k6pr70b4S`h5&Fw4Ak8MH|Y@bu%oP zra9KoMuDfusR0(FbEFCqyGqVij zW{Aq`f!w8xp~Kk9reOOl4d%R#bce`;L7`dqh(?TycB>d9J+$pnzc2x)U8^!dn5o_J zwXm2JFb0~m&5LdW*;5=lY5nE>VF?|_iVrd=sJ-myWi}UIPsvj4bgbEp&YsX?-eX~; zVs!f-*2VPFH~>bd2NyHykE~Wn(usPX?(P-2h%;iPe3jr|)fKtq`xW}c$H$_<+Ls2o z&gQE5327S^DDkJLOTWhX;y)FJmLd>k#zI3SDTN|@)=PqY@vF|u8u6l*;VF+bH^(Ya zT~?dVGZNG;rusRdLh`0){1&!%xA~(=Ha`jk3Go1#kcFb4)rvdO`LXSm;9^CumX1_a z>|En37pTCk2WwwX)C6ab8@UfvA`p}$ZJ71)?RYOeYdbttJdzeg4l~Q@CSnxJEPNb) zi}>a&ax0L*9_c_uS(qWKR5gpjwrrVvJwhenbBf3d0+bQz6M^SdvEe4z zodtBM99QNXC6o2H&m2?kHHp;$->@h96$)nDLJ1)q{LKgN_+|t zW5fPJg)7(lx5hIXh2z4-cL*A~pE(Lit=wD%gSc71dMvgHxM$ooqxNj-H#-w;j4f!TY9%cNAL(mv?4C{=ZN@F?S_Ri6uO!=6{5W<{*q8T4Ge%+P;?f}p z+O@s@6f|+mBmy~0Q=EA3TgN$PJ+!2V*BI~5XbU?%z4V|Gpu$`5mB}bi#~ZO1aJd%E zF2-Nl*7%?_(x*b@O=`oD7$6yyY7PRSVq#_fb^T!>VfNpcE zg&XYyOvBHj_gXG&cJ^PyF@X!t-!Z|)mzsyCc7dL!`ZUx1Q;PSct}@?L0>z0@%Hu9u0m3zL>`Nt@{_p+G-Yq@mqZT0$v-^ZejGOu0QXq$N8<3RguX+g!kPSQlHL8BIuxdePw30iD zQ!X?>n3jn5_A6GP+WZVEkR(jwEQ`c1BC9vofRv(2Y811Al}=3Yt$Voj8OxIxhwz7n ze&aMqMmAm3T`^qqzH{WbDyCDwArCJ-&H|$(v; zXb%$HhLN7S#C{}@tpK1fP6~!4o#cCLMEw+XQr)IP-{W^M1am|?c-e-b7FkgL)<;t| zj*BcghC|QR){Jt09Z;_zKzqcJ-XVQat^FCTEL?Ku;t?+wJ-C)68f$^xoMgM8 z;oP)A`haXk+9uQxA~#A3y*5@j5!%AK-;|$F0h-S)gxNo@D+h5k!+|`!^l@*X#H49b zDRI7PS2Yu{YGv&{+plT|#>@frKfdFJqB63r4eT>N)O6lqzYHRQlE&NyxvoR>c~5fq zE$V7Lb;L-sNg%pcA@JnoZ-v(psCp`1%vh{u+%3T2*`a5HJ3r#u)&}<4CUQiXfMFUW0aY^vKL<_a;uNff>T8-qX{9F*4=?CLvUM>wx)l%_kq`na} z0AhyC&`?~%xS5%?Z`x0ykMFkL##wAOh4FFfrL;M24F)0Z?EF=ke`kfd`derws$;KH zz>GuViG2NGzdvFIHxMc)rxQ8Hl4&Zr!p#;eXH1wE9|+Lw#NYm5{6L*`qLO>UGJ-(` z>_7tRjjdP%FJTjVmAK%`#7m@;aXx?n5Hx5!CQ~x1&?#er$BQx~pUjZ$LV%{O6rl3s zeT$~b7U*A-%02}>T!6x<{%FfUddoX44x7=9X4gy~fIA_5ra`4>6gcg2VYoZOchn9j zX`>=vaQPE^_H7BJCp+Kb*c6PALJ+sp2W5GY4UC*0P<+}>0xJhKGjd^5U4*0!@R~^z z3)cJ&%Rc-ic-f@l2mRJi09D+Igdp#CN^i5=6*QgoN3 zU-PH(7*h%;pWg^HVQA7Jz=Q^?ZnkYEpU?;|Bu8dibV;F=^LMP}aa-@4hbAA9&fMqOD>W@yI(}f~xY1$z(s`euVGTW3nkh4)-N4>BGoM|FPo~51 zX?-}!h9EUB!gW^SN@mOQv(vto$DU;pY)buHq$a2d@%#+laCk| zh{gbUK04g!jw+k=vA06tNZPOvsWC)8Yb@%iNh*kSSoLtw;FA{$eWI;DCX>Nc44}Oh z-MC9FEnQZNclO0yDO+VfE`PW>Emu+~6f(Wo>p=RsW7E z|73b(U;W#NW(*II(7bgF-M(w*qe;>SR8@4_o-=r+{<8D8SE%|B&X?UKnLtU_Oo9$8 zk*ojpVMFK;;Cf5Dg39chdgvvEPbjH_f4J!;z)b@#OEuYnK6S$EeDjg#z00&?o;@s4 z+^YsqATqJy$a&td(e0Z_h0b@@N#C^I_WLxCx4uRZSWKzjnoGCfA6A?|!x=e}nuTHG zbQ22&u%%|!3i~gVhAfhtrT`@O`0A9Fo z{IvJh%8-eB+ka~Rj#T|^571s*_zSH2G)rCX_4p10EE$GA-C9oIs!6&~Jh3@n%tC|4 z*45btcfRuP*hMF~rdZcD?oO*Dr?xVbY?81L*@!LTdO3k)A=4YaU`#EERl7KD-x`5< z$T@`^JXjN4-D$(Ws$vf@4qbLLFf+rFHcOl4v&!2()|fgmO9g9h0$QQlS@L|`^eG8E z=8i2Nlv0c#W^N8ZI}1dIY2TK4FplebP_;~OyF>~%*H4*S zk+cwTm$Z0ODK^Or*whP2^H!8|tZH5YIt>4N6%&{A@OIJXh(dDxUW@er7}V4VAmnmD z(*YFPV;XF9QcH}%z3^S=u$s=`QYx`RvI=|zttgnf`ruj~SC2$gMwxBJ>$%1*Uubur z3UYJL@s8GQ@0IrzXkwGYjSf7IPV(?irMD>yBP3^17DyhFnGb)+V8haP_DZJ~W-k;A z#3wP8o${E*MIax=Qug}jlC>SE-y(K%>X+5W*J2mUC_QZ5@O}aP9@Q@Q4He9mp&5tD z)n4`|Z3?^&79R=svtR2!ZfmmMke%G(rs)CC+`d(%)O(j$^&zwiXoT5BwJM}0FD!A5 z!12ulObhC<*Gs-5cLEzVh;+N6!KQ0`SA!y%gXsMNba*-RT5H+^ zy-UBB`4MsAyad|__bclBUDKy3>d(+c0RvL43bf5+s%aw|e*O%0K5z}y=8hy?>BR}WBO!hpVB?B8qf9}%d){2M&@cZ)#FeU9d_gSrvf{BM`dUxeS3Hf@^L zg`DT9@!f{EoX$@qde^&DrK%oAyJs0#V~2UX%-q2{#?syhAA1^l2ZbyA*32;1iJ`uB zI~{mqU1--U)jV*!9b2Cs(X-vEIa?$$t~g_9^YNYeFUB*4D*kU;^h{|BGc3;5qu>9> ze)r?=OK@8+@D@@cO!89k;|IAg>U*UQ|Kc(j{$@ofwLCL!2kGavMFHidWHO<{>d&M%4sWn<|4=TiDbkE!xth z+PK~YRs%L*el`a15%KMZtVOjZhu2ep_eshReqjQ^)p=dP2iNtU{_-t8q8eoGwXD=M zs#b4k5V*ScDuE1!PL@dOHQCbSpGcP_USiD+Lrgwy?RJ$UQ-+_OW-d&1pW`L`e7B{MH5w(RcwfS)87KjuZ>K7_CJUHo zs)&tsMJ#@7GdC+yG`SWFVD|}glmt;L=Xz3E-#vlDGxb53EP}&Xq!`A;Lm9W5X9em>&-ymRVA&!uQNdI zd`c z zJ#sCoi*FrJy417T14zoK@)Q1rLS0u4y*;5ul*XJGP+&QaO9~U!9S2uY zxs|)La$bf?weQ8tr&iyv!c~b}zi>~6`~?NxrxJeE8Gzw@vzOSeD@15zZ`ma~8{D82 zfg))|pyDGoD(D~XceFM8lqfS$u}MmZD_$V_XvNrJ@h48c&0BL_=7znSp8-_|T1j4= zz0^+UTHX_qB(EO#Wio*2#)_FyNPzpL;1j(Jt($TG4}0$!)#SRZ4J)DuD58i82n1=; z6$GRQR62y-i&B*;AiXOJ(t9r|y+(Qm3koDOL27`|0@8ag`R=T>*E(mMeb&2o&KTbq z=N<3*Lm(v2bC-F~a?NYb$FaIFHN=f+&*+lgu-kN)!bynhx#8~ubN61|-CT{hP}0dO zltJ7yI9E^BVtFi-LG?J+Uqa}*Q0~5vIxhBB~=(W+Z3ue+FUySq2gcG2S`td zG$-mQ3u8;7b+(d-J$R0FdJZpXOQ?u@AUd$HF%V#G6mMIK5Zn0fWdVeAF{Dbs{g;Rq zH;u$fY4ER1;wQfI^{#RDCpK2P<{P&#HM?-Vc-@UsQ}@RQnV2)JXtfjP#n6H>52HcSN2|vmO;|7u)!36A1_bL~w@7zi>V)c@k0fMr)U~$z4GgCHJp75;O z1$`V5iv#=Qs7+VXv~PF&9`t;f5kw1R77V1HHQEbJS`FLT1F#35puq<(o9g%9j!!(s zwj2))FD0-RSL*0Ge|v;FOh{}?xzt#CbT}^aCN*&9!Wl2#k5I30w#njvTZ1}`m}7cw z9RC)9%Zf+KzaG;gh@{{9L^NU#7)=>T->r_zN%;pvs=LJxH6-H>oHy6|GEQE&Pm&rB zrN`b;7{GDt>cxupXBg@CWi@RZglr{ATUZ~)K8790>Aif5#-8kYzHz~E6QCc38ix~- ztK5x@>^iAF}7x;XH1cbaHm0__-@=1?BO-J=hwHn0T zlU3UA`u(9%Ss#`@&^vhaGB)3;8SqD|I3m%2O4M&i;W#F{$p5s! z!%2L(h~C}(1KSSsaTJ;k z{dB~1 zXMCi)9>JnBvXbqUq?WGv?9-`$?#0K>vvMlJn?ENH6s@%&xJ=%_$}4w!;L{a-nGh=B zOz?Bj^C2~d%8g^?I6Be>0Mt>jh4Ok+=B2k&TiZ2R8qD5?vG-CwS;pyVhj#mk3)He9j|g_30dh-5A$CJ_&M5`!B_VKT-5qY^>TSnc z?5@12hr`|<;wl+`S@xX#SseXeIRuQ9vYIQ$$~DEw$ISYihDPO`W#-##A@POq45RS- zo8gXqX?%PM$3UgL0x;I(#>uoDm+R=}^Q4B3FPI@QZ7vS3X4y&}aN_55)P9IjE>n>B z9&)fGRBG!MjlO6NbzE>=En$yf-Sb`3*Jg*IAHTH{+21_AaG0p~M=t5r@~Z@k1HXp9;R4xQ6OEsO(;-mOo;2E}zQUUkc&^puSh zjawOfs#yekhfSrgEHf$g^BHLs31{eANj`W!yxjxf_U^?;`mx6Qzo^ZJrGd_qSc^p= z%(W$Su30xW?^f2P9Q8qLeP^w8Bq>3QJs=IH~RInrIk24S(AnDhg>S?+EV>Ks^ znUzaJy!H@b{>u4U_iSO6403BEewn(s$wjA-*VlX8cXN;k!K!SnIb~0al|hSUi&{@7 zxiaS^(9BFj&#Z6zXGfb!33LWGZhLmN22@@dDL_5<-}7klf%^h*TSI?I>sj4+E}<9& ziTtOPsSqCS{97Z%%zJ4N2dO)qRqxJDIV}-SF)OCTpHeLt-LZ21R13;-2OYP!C}5(c z1;KjFCG@^qne=hTJ1DrvFh@=DLPq^^l|Da=J%!G7)M0ySbK8{f=5Vpzb1Cioj#{+~rB^s9Afag9+_HBox#T8{I#zmuCA9;z9tzkgoT-u>Qe!ONql_mm9pAHj5aiYX3TCQWn9( zM{pI#co;-na0I9}Y86#3QTn52Aatiy zZmQ>#EfI3d5yFmc#msuRjJwZ8*I&!ZE^zu%mA2ft>TNHglj?WEl48>9B=Kj!4UC;+V(OTgd_j>}^=e(@X^VXJv;s6$i?((!e9m5c zz_ndAZXNEuY~2sQ(umIG(7?IPTjUwjUCxmA7#x$x_KlB5FzoEwg+$qR_4*3@8wP=k zP-ijNxmy+!a_5#o$h|~kTcPTFu`t&X&8IUzVht~5)r`nLN;j1?Zg+h;i*&XFRU7Qr z4PU78cO#8YHrOE&W}gQ_G+uV^qzoQy&2LR#C`0T@>AgIh|C;ZUA*s_Df1J;;iYu|w z{I)%N*g03Le5o4dWD@P%D&9Y4nz85;E-_e{$Xb&=Jq&WTBlS^exg;a)WjMOc3$!Tk z0Q6=JuWmykHFpUvx>`@Qv9j>QpL6vk2xFotC)*(Hg26$q!+2lg&Tdzh{D^+W(*&gP zpf2~Na}zDsf2BVD!%5JIJe$+)R~t>(S1ov=`QeCSGid@JhG+t$J)j)To{EHm=NKC#yz>$sr=kxr~{TV}SumST~{qd6rp zBs@1*S%3i*bl53C2kN6sC9X6>LMRW_FG|=}v<|x89PXOiG5S3(_yIkp9WJz6YHCpB z9I8-xFvf*VpKs`54~aM3)vM$jUm)Eq=!}6L$&v}YFV>*6tBN`|m4e*z5NBPamxG9- zaP&YAE$`Jz`m{cg!uG*co*>{G-#D)5Q|3sqt9#aY7db6SM61yR!gj6Gv$zoU*hj2U z?0stt&i5sub8bp|JVa`=_Of7P9#`M5d2uC`^-0#~X~<{vq|}=QVmG0F=YT}Og!nw}IZc5hxd#S(93CoczzZ@rKHXaqH)$du;9}{iP+%f6wUHEIy2% z0a&>3LY|fHhGFbU|M0@}f(&}mz;$lX0wTVet@dK2Axfn41sS&@R{qys+N#$ac{d>( zfD(s?KsUh4R!5bOg-?%NV3Jq(=mN@etbGR-+!tDLiL~MMwfk@oVDFTYe6wtvzpkFE zTzrBQLR8*=#cm;ZiWP}*g?;IFPdDG7S{}fbV~3?@mHa#rv1|`suFPlFqNL7w?9_2v z(sst%T4a=8BK~CQ%c_+8Gu3>eTgD{X-LVrEqM1I|OxSH2bH`I^Sty=xTSWgHlEsU@ zyo1kuV`ex#w+mC>SOOC2_aY}eeKIX*Y7gb54z^FdvO^~0*!urEFDA|Sd>62e8N2H; z4fZ4EF`u=d;Z_!PKqCR2Y5DvvX zj+P2T2MDd6<3@P8fl(EPw;$Dfuz8G$u^j!gWb%S;7FFu_x;rv{tje$nu#varefLm@ z2pD(DfOxuw6#Ds?9Hv=Ma$`AN%*l7beX(`-rjt|cN<3Bn-C3QK<;~*f2fKO<+k}1H zR?D82Tb6b^m9-%Es!@y`rq^rjaqM{ax&oJEYbcVH1+0KUE!)@>(8A%3hn!$7mxezir^4OD`9 zN%?~E3%mN>jpwaN^1=CJ%?OZ0O{)AIuj_>#fesD@J+q)`0KZwY%)u>9Yfy)XEh)#H z=n`_4E08-SXVp%NNlp?ns2h5uIS=F$_%n{pgHxVw05UoKhWDcZCr zjHUKK7XhV8{d}(sL^H9RRp#|X`os38_tdt9GHTtJVp9sx^QQR=#bJt%jleVlh3wPf zrqoY+wbHuY96p_Bola_|0zLOS+x(O^`@$d><*NE;u_wD;;(SmW@?t)6i6GmNb3lvF zRsxmYAbzImAFUVutLq2)a*+B{U6Rl%X||P?t6X72!j8VAZm$=W_I}u`=-nGlMF#3U z*-MQHm}KSQ6PRO~+#|}BP}52&d#d{)>SV4*&mn%}(^k^^6K2-Vx)Z_^`vz}mUAz3( z$Y5kx=iTtxn->630(B=C3#9jP+rD2vICGZWvcz z%9zxc%o@k1uImM=ffi~*p7%E3b$1-b2XTDz4MO zz96*V3mj1SH21{LXq5nMS|IFsZ3tvL3U=Rcic0fLm^x_>TeZDvQ~?NdhDlHL4AbgZLeYT1lxwqp+0~OAC{glhTs2? z3UpV5?V3b)xcI@ZvQ7b12d}pa1wev(H$Yi?`4Om8S=OcU-I!@N2aXvB+-JKG3D_Be z_5E>Fd=|g8h3GeR`VQo*(f~k@opLyY{demd3Vkd}I8~lPteF84`pm9!)EboQm)8=_ z0|;u%YkmU@PTRW;o28)mbEP@%w4)3l!4kFMDA`n z07`nl1E@SLX6~l2feo=q?gUJz%c=d@XX!@6CpY4jODAo%rDy_>uGR)6z}E6~>a;#d z=J9M)YxUr?X<)nNcXuqRn{aqCb)>#_Va8a)_;2i?TYw_aoGRBU`Bz{?hcbXOT!hDI zcnC-}AU+kp7%#R&xE#m-IRKsK0O!1(>a;BTTQ`Z5Yak|S?uKXn`GG%H`43scaVS$i ziRP0_j4E$?7(<#8Lg)X4NS$z;g290d{H%^?%6re_HiF7-BkW zga_0?#%p^w@BZP9etJj=wk^-H%jdU4;r|}2#sjg2zbv-~d-(Ak@%a{>r!%|IAPByxGBSTD-Z=TG0Qby{E5G^YVgy{1h4{RG zeT_ZEu53L(oJ{0*6@TpouExBP=)TV%lJ>_pep~t9R@(opA()cn>gn)|f4uq8k7V@4 zCYSm5P%3joFh@LSFjdUi0D3;;1$_N|$Q1hED<&Bdo_}dcaHF)tHDc51+4xgFDbyyK zK=Y$C*Aq{vmHIy#%~??Dcoy?pradM3=fVA_cQzY=)Oh&@(?b4u6UyUHlndk8RceGQ z)?-gLKtq+Ca`N8@@gJZ0`=0-5k^kRW+fPyvRU>~QviW}yA>6;9zTkKrEUx#_&*O|OB)JlYEB;jF$tdEJp+pO>B%cP|M{B#;+?;z`7gHh@7??t zqWHJa{)^N4_u2fzH2?Ry=x-JDw+h0oz~7qoZ%vC^fxivP-v$M@0)N}hzwPGVf!M!} z1pl_1f7{Lf4R&)4J6wNhFQ?!nVp>A@oq0mzt_#F-RqU+fx5h_Tj}L2lWWI^18r%+| zIP384-OHuM-E+g95mIbiV^}A$?u*alMwpaPS^J}dA+xjLPTlSqDKk}GtHcE7A%3Uu z`2iImCJ*Xnc}n&Qudn_2;R(aj1ncA5S@#IZp{Gvc{lnkp3WPjEspsZ7{J)F)#;1A$ z!l}kZxo7y!`yUe+W`aV*Q{D+(z-P_=oV=f(>)G=yo`W&H|D(yDCM=WlgL(-_I^Do~ z;eKDPRTA=`-4e?Z9xh&9wP+LA+<{9k6#r|jo{zn)-6wKZm4twd@O+C1^huPHf1iwN z`>-=(n*Aqg{I5v3L};~5XS*A!OW42xg^bsy*EZcFSg&mgc?Q9EsV6jnh#6tBKHdX+ z-RHM<&l~(bfB5kl-Xa63JmtUllTLKAkd>@A;Ow1MVDue5J=v8@KOqF<*Mm zlLx`heCgW)f9I}#)x&$G{C|Q;se7(<{4U5ziwcb78H@Fa=WgM5j!zL7Q9{U9HI+#e z7=cJ%JP+%;4o+6nCF{$0pG!4TFsbFwe1A$;Cq2P>Aj}|={$!l<5$SA<3=uzK+gs`k z-j_DM2F8eNoW?pclCNUD6N|al0-;AEwyl_*C+b;V`BJ>ADI#V4fv*_wo&EVNaOsSZ zSC-?rWiaeEFsAn3GeCd4M~<(QtQ7#^fIF(`fJZZ}KZCj@H$kQS_r6(Qk~7mNs<2T} zmA`s1p?uENyss<9@veX*bw3A(x%}0Sw2zN>rpr7Q?+!cPQIjvQUDV=XFx1zG@p6A< ziyG>#DLW27wlw=5-3Yq6Su=llo_MtTIlk%^BLDIIP%>0+`%)Q!uE zOg7e?46{7GowttCNBQpbQB(0dgxMM&Ce02eBskb_mm0T-;(J{Epx1!>bel|^;z@^k z`DL715kU7t8#L|Qbb@~7s~MPZGb0|Hv5OR6wnpSqXd&k$;Bh^X5GqG&Mt(;xDk1mT z51}W{X-i^W%IpbTxB8lD`illJHRZ-vJt;jmzg-h#Xfvdt9{7tbYp*K$2dnh#X(Gl2=ioLq#dee3#7)^odq* zc)JlrP26OevQXvx%YUD9c3FTjw|RkTwvbz)9z}Mr{-|@5XSKw&wF6Bd<1k3bC?pgO19!0_&CB#?zLaL8k$@iTP=K zB{k9%EVNUoID||QA`Yn=_6+N=f%r^G=wteSgk-l@nwuYq2pVaYwmZ4DZPIrqELz1! z%Q4ztajhOatLMMx$V`ejvR`djxWS3HiY0Fo2ij%Wk~T=@7CC)TWBOYGO9Ap`o3Br5 zS26il;&~ok%lqi@aPp%@rTf7v@W|LnyZOG79IrJ+*5Bl8){xbFLfu#Pht!j@ja*Ol zxNZu3xBHa;5^kXz$NgP^W%eJKPO`DLII}J%*WSBWYHW^k0*!$}ug7NKGJPZ8+y01LVpc4q%P;nbPh zU9qS2f&}XaAFZot6Q0m_#yn`vw5e>Pq2`Pj?Nxb`=PN0hAL!`ndT2<*h?&3aWY&7Z zSG9qc9D0q&twuB*84gIdJbZ)W`4s ze${8z$;dw6!e6coa8C7g=bTFdub~Z>T!MMD-$EOBcFCL{;2R-$!Tg{NtJ`vR1(!kN zfuAYEJ=egg@#|o~SK0gj-!UdO;KqO0Rocd|t5u&OKi{RT5gkLpeEWU|6RS~lY)go; zej4j-jh7qO;lYGs8_l+B=S&3MnOvIWedF?&)X=XGc>OT=7ONlqc)=9j?)^g%BedF z1#>$vDUUI$bQYPksJ$@KCGEQr|E7%o;S&x+RH{~A4L`q1b~z77>zT?qF`M0Ng#;!6 z0j&?SzKW3X2M=73seWWF*XOq?*qI(3BN^X@SjzQEGz)Xx+iJqfsuWw#^nNjBdeSHL zKU>1ywYR$Jcd~E21L2YrbqzBJak#hzx&Z%XUDr=O3P0Ot_8urc&X#P@X*TuD3fL6a z?vUa}poGQ`b7F(=S{$erhzPso4G|UrZe6FV!RYG0efzEp0SA&)h3yJnvf|Hq71)Q@ zHCH>&|Nk;BOl6oW-P>3}Un6Er$n{v)6O334y7s7(FnROA1RK>} zr$N3-g&*fJEQ5m*`K1=s+L@5;32O^HPHF!{Q{aJ-75YI|I7K+cMNEfB6z2bl970Zi z<2Y5l7?7XUH+(z{K&e5W1Mr_}D1unYeK9tV$?y@9PxL^<4j=IB+tU)a9NMEXvYp;+ ztx#D+I)LqhK z>PnWu`b)KHlYoj6Z6m1%FK5tSqspe&blI+^YYPv+-TF^FJ|k#bF>W)D@^!hT7Btx{ zs&p$bK@X*kd7X*L5R6U(^wD95%1NM|3>(84{lTk^qRX+V(YHdXZ)&!R6)yNl%2Rgo zRB9fhjI|{^nE5oj(m9b(h+qmEfUNxE4(pR+uP|%Z2Kj3xvragK1mM=AxXU zV`Dir<*&!bJ0s<`%N~hx^;#dAn?rmiSEkG6+o*)R#HE7+UMH(gpawZP;%<>7AJ|Jc zDCgLUF=cR#vVYr{iaFml%JFa!a$_=|5rKUEJb3~P{tKG2=iq)EX zw(CKeEnkW>*A-c?-TJlqjr5HiYQda|P3VzJswiu{@*}wb;qO$=%HE}B^-H4eM(0LV z9*t;lm@c!YStle&6{N2y>jJ4DqoP_^?xkVyIiBPz!*-liH|Vxzmv4$h!cP}^;Ug`l z36F607rUJ#(Vqxykl2pD^s_&KU|mbL;jvrZ&terttySSDd08HM{;XFdbqVEMSle}O z_w8KDD+!Ti`WBs6d9Jc>NKi8g+xQ#?QC*2;jp*>S^wYQ5qlP{nJ z3!drbh%sJajMJDHv$Fm{u%D&csmAO2^}R;QA_SZJ3WZvTw#ue~y||C9A4ZwY%s7*U zt*mHhzd;Xlgk5QDex^k@mC4QACN_N?QTHI1R%lrtVB3)y( z*E7!UT{-BLRWqr*t^pDKbjs!D{VId)x)7hwE2bMgE#IzWBywJ=EaKvEu6O+EUtaa` zCSHeb7ns{aANqZ#i{Q{Xtzw{K-gn92R2p4Fp?Tvq7SH5V7?ZITj{{RhuQM`-1R%fc zo|8A?M{F7KCwp{jpTs__Y_c!6vLJMCG>AAa;XZ69)&WoMV_T^Cp~mvTPqb&7nxYBqy_I^@Xq6#!f`)HtW)5C2KReYKz93URVy@UD zAscodjw(R*d_JzVH_nf!EEb8Fp2Kp$m35xD%%jWdD(b6V5`Tvedw;2rjL?&?cq-!> zyme5edpXG0YJ+6o`>PLM@Zs0u1=+OTN0^TX=lnE@V2F$qvbH{2c!CV-XB=1SEzIDy zr}-$tQ0<}jt54Im@X|_n1NCroF7;_6qM>>(al@^)jWCY*$g)Qp3N}(7Iu3S&*c4XR z3Q5WY)ic`{+eundSh0uiWuka=?-*Q(iOKdoX5SbKPAh|jC6Djc$vb-2PE{!#8tBU* zu98)YxjM6Oj7%21FUbUWOqn<9%=mf;6Zc)Y7xWE7+vl`XzPjLLF~qrnFVHH|)?FwA z8SfUkQ(w3m;_oJAb)XG<5r3|qhbXkN{YUxrY-EUU;+QY+V+&O{x zz-hS`dXXPjt-vo5#v%>mKXWZSZOF7 z5yjp%Hs9=<7`2+ADR}yVmf)Eon8&@tevt?ym#x(>PuI?V(fJlZHO9IOt!3Y8dy3j( z)ol%1;pYN+UBoFo6kDc@5!Qu1YjM~MVLCvn`0k6K^REYl`H6|Dl zI(TTnAGubK7@xfVT(hiBjF{ z$U6ubuh#`;IMpsChNJ8jIohd1akxO*cU3F*WkY{%DRr7y$1@5;;^@UR=i5GqZ=K2x zHDXgk!{f@YPb=({r>rqCq-mh>q-aTUX94LzN2d6?3;=bj&9fdjH-Q68GM4oH7DS4`;7aG8+j9~i;qh-X^HugwD=wjp8J8V zyO+n5Nxyh|{jFT@RKD?ymtw;cE(+C8q3pg7Dx-NnI(KR?)-7Iq;b%xROryoa+}!kB zDDUW}cE}7o&J!~$z>i7u)XScDHtq07FMx&gErNC5k|7S5Z=)>rASai^rNt*w-v=cG zj7<82hU#p*L$`85frd2OJ;Rzg6PQ~shpaR2<3P;UmGV`!9mWjEE_z^Q4s+{-$GCH~ z%^pE~I--|1x|YIno-uruR5K1cXWZ;{cU~>Gp`s}H;M^H)a64P^+xDG$Jgu1~w2l|m zGJBd6Sjdgb&7LIj{RGN*rQ$2?0}gUug|Ak;-YACJ=WLR6R=WB|3j?3tUnj-GH3Y_9 z9~gVz-3<6(IN~{{fMh~H@keyt_BTm|J?3Wgq)0_2igYts%7CUnwXf+%c+{o ztqGQoAVz`<5n+cIL&{(Y2L|V$MqNzw#t*-1YE7~}eLG<3(IrD{a3y2Qs$y*xsn-k0 zw{*vi8tz~E277cTqfZ(c{cIAeizuT*=W_qy&%~&40XKa#PO;w+LOlR?1FL|(D z#?A?7&3I@7+F7?tYY40tTv3OCViwD>>r%qXbI~f9WJSuR!DiG|Z(v23!Iu-oDAV;& zp{eJw?%fToACqp@b%$S*%4`nrQvIxokL57xAzVrsVCRw$8~i%bvhzzBVzm0xAWX|w zFAP#5V>*~wmMds313Ae(D`K{{hsl?2fx(r9D!B^@*6YL$=-!IuW}1DC;^VbGyqr|O z8N|er+m>m&fcV(8trSxX^%A0tG(OQl7vAs)Ph!)Lc!U5?R6{I^(Y*C;I*d81Z$5L> zNk=p#vt`XveJ{_W|E`$s#IlQy^Yd>#BrSo3gQKswtdPI5vGOKrj_lV9Me%edjd9N8 z?t7kU@XE6rercf7m|z`uIwxPFF%Qi`8=PGgYBkS}q$zo{?9RfDIp<8LFnynwvn^xT ztCTE@C2mLfh{L{YzL9Ne`0U6ntSo);8P6Dv;qdp7p)v48?p0WVrU3O;_5uK@Wjg z&lDkv1a(@@SFGc9f#qL?cq<6jd0ZML>FhLRa%_Ylu#?i!8nNEGl7=44Mki4h!b}Js zsM^mLKmxmt{r@>!}7({mFN!Hxn`a*u56IKxJkLv zmBI5+Jh{w>EF(M~yJZcbl)qXbw<21s#a$>L};m&E&dV$hJ}*%sLDdE z&?~@924aN1(X-euVZOS)*()EWgPosnQOo*duI~NnbIWTH^F@EYjg#YIB{5(928Axe z&=`Ti_yl3fTODN&5U$^ok6D05n61v4w@RwA*U5~%r-tR_a~C1cNKECll}KWNxz;Ej z48l1jT^`g)@Vmc0YI@Rquh>A%r@lERe`o%eLhsX-t9CYl z6~?F&FHddPv8C)Ft@#EN%oGg@qI#|`3vs(1e}q~U^c<&RU$5{ayui$bs1LU7_19-B zZS!t?HC(oF{)rjU+(nRacet0+u22K8U(i!9zCQWqoyudEZ_4hr2f}=y;8*xYcK|-{ zOr!{imgmBKw)(Zej`pXo|M1ucJE=bJ=V42U0Gp=a0sxWQ{3G>b0*mk5yRl(=CzCVI zydEPg<;7%4i@v#;Cgy!jeU1zQMjrP{Ze@t|p0Mxp{1j*GZ}RY{^L;?oyLeDo^@DU2 z0O#H-R6+{zb|M9q*+$tWAI|Kuc7JQTMkzYj2b}P@I?51p1&9&7JtlnGaW{~!v-9*L z%^OY1kIa(=H3nA1pGfztdOcvK;;YCU*Qjf{Hyki$T-%s5+Q07T<~}*gxbca#JZwF@ zbT@H%E5cJ#Bq7g5{O7SAmB7bR+gesW;GDxxM&LQ@%p`9>BIQ=r)02j>ZJn$oaoX&W zD@?u;qnZnGY*vLObPPrjAB~{iPSE3xZ_EM=sw9E03qE>ymVR0Em^QJxx=-?mv~8U4 zx}xkj`(u^Ng$*M#%WOV)(x|&g1j3u*TGI0N(``)1Oad)yH`4Nqe)7jA&64_vajQVG zmU`1)x5?p);wCFBRFzLto_+W9PAQ-vDyT&Rxxr zjc<#>sn)#@-}d|f>jvc)GT*1K%FP3g6dno^s- z1#A>kKGJ|3e!td_X{Vu)eZs%GaDM%nOieYyX>J{$BpV9>lx23ia&lE~#I(=;iW(!@ ztx*P$$A~}Hi+z)JmmhPX>051U zG4xBfF0x-0>=xUfEOx1}zP~re%xil1#Pl84Qtlj9kBee(<*s0bm#d8aGviH!pumuQ zD_&`6KP;eS>}E7=v?c?M*=mIREPQAJX_Bt}z&pL|<2d_;`K<;v_&Qn4dH&w&ayEv$ zy5Z?z@om5Ev%QnvE>UTC7cfWaP*p3s-G6J}8e>$OZ+_7odorM0ja&+*h`Q_eu)i>t z`ncYvl)xf(yCXg+>6nL^sl91>j#+n1RHatq3EK_2`;o(P_W8$8EOa; z1jTY+wL6)gs4btbIe6nA^P|*)X>a2ZNn&JOWihv90`-%o$b*5GddwlR49+FyF1_Am z*rl`Tp>*y|@7Pf^F@mVclJ|lOFe*HPkW&OrTp)a`K5Ru>gYXgUc_(EtLlts@8rB~x z_>sNGL~%607+f!mi4x__v*pq)l)lrvo2hGZcFJ%JowghTP5Ak z>}xb9=wG7=p<(<3#Q(Qu5a|z)@e6)BX>7Rc)D&q$8Xrl^ppRQB>bBJ#J4y#vd%d+4 z=&;k0S3J=aI;BFoDY^6!wFVDcb7K#(QGT{(083Rbd!jpSB+x3kpx(R!pvUkBzdZWg{k4U#BftD%~PqwWO7H#>)XG!0|UeWf16+fb@ofyOZ%GP;hJq6-gXH(f9tsa zsT=l^0SC)?F=|v3FwbfWC7xX*IL!by8B&)djTbR)<_b76wONeb6;%JbhtyU8Jxb@h z4#0k01Z5Y`+0EC=IJ(IH75;3wuyJ9Y9k^B8^kSN*?{!Io2mki;)2+n>39`q zyfXC0KzLU}8lycI9zw)-x0tykA&&{m*vWmr%BauSD5f?HRK6b_%*t6eaGA)uKrLZh zaE*hBG;pgU?zFlsRL^n)|pUKa~R|n~9eOb5sTgwriLOJZ-dvNQ$RVn$R%` z3VO(*m&RpR>@>dST@LCtXT8)ZiH}BUbHV9gWjYliCUSa}F(uQ>ZL|csltD0Mxw}wF z_l==<=UY&>6Y;tx^b=@0w@z1D8WRFU*)YI_kC1$g_O$)EDbp)g%)f^-=2H*W$+NL( z-WlcG1W$i@su5ar-%XzEp)@in1mvL^+TLXRvhBwjc>M1FUKGE4s9XN^P7At%MI@81mvH53(FyU+FEhdsKU6x4B`lQ=%&-uE6LYd%=l#!`Y7>Xwa@FN9;+&=Jg(ICzk zr^OOnhC&L@Hc$mcgeye8rP)bl_ieJNPYrt;Qv~{6YoDE}z8Miq1|GjvIrUoeo!G;j zv|5NUe1y$oYcHO? z^dmAG=ZxtL$4NkdhA4<^w}?uWO?GX>+eM#rVQ|I9>|P98;l0bmo5xDEHsFY zl$D60Cp{Y51dc0w;G)=WD>nF-L*YC0MK3oM@zbmK!IW1O-(bc;8hG{MBLwu6zYXP@ z1!TNwN~}4_oL~`mZd8e!uF?)N*6!}xRR!d$qN*hXKV7>65W7cTHxaJYRdLdo5LAKZ z!?V9o5hpTJS7K(qiYsAe7Ymb}wIDt{7yD-_OD-~tZXsmXcsg}BB#OptUoIg7cdFtr zx5=@YBP)+8z1NTS%`g<+w*H=t>Je~D!~n(I%TU!SJP26Fh8P7)hKlcIpN=18#}q!TX=zdIPPtDRq`XPy{$gdaKZ6L5Lmh;DOk z;7lM~RgE0XCIL4aM$_q;ecyq$r8F6)ySfqAeYdk7%$gfqXz{IZEA*;(8uFsh(5Q)e zXyj_V5paW;A6YeQru=H9y5th$ksXrkee>+pcJL(MVEeAMi(DoNv)P7NT1OR3QI|oz zijG{2;P|jHInf-?(L%(4o}8cm$XyQfBp*lH$$3cRGQr7>ph3oB*L|y;o2r>#z%h>Z z8Prho-#VXvQ&r{GPub<;rVK{mTdT^Dlk#koS@uaX62FGJWLPq{!&vZ<^wftB@` zlbp(#e(sBk?(=7-x?5W0UPa_2E?0?q$6O^N<=^ZA&_BkO;#ynpG3(6Ex&Cags3R6X zbdj3e{&GpUg)yE3jRb4&K?A$i;7)4o90in(Tn>aN2p3e@yi#($i! z3%Z_JU$=_g8OVUeO!F3J;%)>Et$c6=HNR%K^tZ%iWI-LLXHhjF5vFvx8&4kQJV0Gxut4V|97|SL zaz_T}%Ij;b(4)^zKYE>)6yM6p#=aAE&$4*?B|0(nmS^`vx!|;SnLu;XejxZ&sX!Tb z1d?;pKi%Kf#Xd$*^*6uXD)BV0aN!__@BVnQU+|3tF(2Ykwz_myPHS?<8u73Vz3Z%n zIXkb^5ew2J_G`nvw!9{vEy|ajmf)p=2x%E5M@P=j_TF@WVauwUiu_QbM=W*!7@Q>Gv%`U+48N8!i#jy&oLeP`qNM zXQ$vi`id@T;yk6qau(hvJ>>JghUQe`wf+HxDvihYo_-F*+KdiZzXC75REB z3dv4vcjc-IAA4#TywtcIZG3i_R75PCGH5Wfy&Kp9!g-)y%a zM6HF2a!!FIz~tJ!MOZJEMU?n3>#JaFts<~=Mq zde2Rz*;+P_f|NFJ3lBBvI_bd?E0$J@Yni#4@}Pv_1~+Wvx~jUaS|s@8H}9q6wcK**1{`(gG|T*CCx|~@2&t|tC`q{fU;Kl zl;Xc9Z+~o3=(<>tjRD?Aw$1yaDsv&Bm_Gl##DIs-4(5%oII~GgZ`(6?FKQy>$il<$ z!RaS*fi*j>m_Et^PbxXi%FJUSIB#$nticJ?tim8jArd&YvOm@uw8^HPv0%|of%dHLxD>LjAnj#-3>^M}ZsyPOyHo3JIaqHeue7P~>P{oGC3*{hx&JKVZ zwu>AMfWr%lLBG!>($ z`{c=!D|B=#6{CPyj>8)E%+;A4Hm~HOk|-J8N8gf+X1&XOyu376;Qer!ab?N0xtZEo zjQKu%Mow#4PPxMPD_#)oDS}$5cSoDXv%xMs=h3*BH04;T7#vty|F$++)JS9`Ih#ga zETxTMnZ`A`}PN1T`wBTwZhXcbe;xla~*E_UP%; zysMl#Jrly!!SiToUD2=bY5NW(6npdKL8ooZ&F1~XZIP*5LN7Q^&yu_QT13&n3 z0!e@guHM2ds0vEpaF97#+3m~cANuFz-39~DQ0iv<$D+qFFfd4d$jrn@Z5X^&8lM1T z`0%hTv!JEHk$rvoMRmEcF7FI6WmiN(pPcE^^?Ffyb_~<48G*NQdOSbaHVwujF5)Ls zYuEipn9*Aa1!^z}+~NnONylA2{{q+vf1h(bK6oJtzMzD671oIHb!H|9&03|yE}wS; z7dq`ra_Ub&=ijLYnUKMqy1eqEj|JfMV_zOGfj=NViT8A$8o`SqJ^{mBD8Kp$UIJd> z-FbHjPUA{CCSa#e;e}Tm;gk=(b3DCP9C&r1)|7_yoFxz=Z?^cxugC zkNU#Ct5KwLJo(<6^3P>^G`su8lH$af*G>wZ%nxU$H-7QiX32%fi)#vMb4(*` zYp$k1j3e~87qw{PV_Gw_RVKZ7_po~Mhb8=mw&^e%SCz)xh7aTVB$UK4ux8o`3TH%@ z{IA`5idJq}*{2^h@{Kw$4?OEKkVKEa781JHN-*0u-%~(na^AA{9z87Bu66&Mo|cR0 zylx>s@Aw)s)_stL(y;dm_tcYCGbvjdagg6x7B4JO50+DM9(xaGHb~<(l)q~Drs>>| z7@rI!dHGT?*UFn!NL~NYHNEX^fth&gNZ7dHZ5l25_bP zW|QZSdF*0MzHM4+TwVO{j|n6JN1M;4U1aP8Kr|z*BtIxEclL*Zl$gq=FxsZ*#`>kzLYleyfDY{176~MlWOT=a}d-j@?Sr*^ZSayiVUDUlcBc zbvwWiJ@B`K=_%a8Z5B#TCg0C`6rs5Wbwhf+y_QzcGscv4DfKRS!z=G4C6MpWMDd!s zAJ{5at7uu&L+#v(4;*P4yf5;uA6Az6cI(#8R~@L;8|B-G8NV6bF8oE~ZtUJ%Kk8zi zX(Lv}bF*W(@}abMZ|UT1A&FjAbAJ9TxiR-LFiTHq z?$x!d-Q9wrM8ewL9O5#EvCBUO6<5O-QmPjI!q)hox(o~ngf{I#Cm_&2Si1>|&_%DG zp8-fgl)}RZ-k~CA^L@Ks+ACLMTb*R-$DGhBe7%{N8*Hj&ew?Fw0e!04*Sp5KUajsu zS};)8%jxh{X*x7{-H#A)9mD+ zHQQ$Y%kN^+cJcE?W7}8GeQ(L4uyfybdJ7Es-v-w)oh8KpvhJ3ZYi=dSrFL3xpF4+o zk}9sqwHKZQ>j`JppFCkGo1C~qV=C=!@_MLu#gfC13QoG`2<=}CQ&N8?0uU6h@Ou64 zJkta19No%-l}$Tu%G7p8dKccjrzmrtzT2M3-8z0_SmE5b!-Hh4Y@g)R)LWg4p|LJ$ zR&7B)1e2qtQU(rsMtdlqqR&U_eaEI_-V7G-RDVVdLUu1ELXg*2tH&;+q*>ndcWWJl zP;I}Z4Dz*blW~9kG-PS1%ug)-x2>g}QihC=Ee2L>@6PB!7Dkzcd^qK2mC&`M-M`D^$AN?jR&Os-qj=KGtfXV_ z;7gP6y~0lm!rOV6kMaX~D+e5%&DdkI(w8aK=t0>x&4Xi;;sama?EZ<}#2=#>HwL)-EFVfwE-KPP(g=W+$%wuc z{lPb{{Y1JT^q^(fXI`b8J%*s^rb?$-AMfZ`8ni)BymgH2fw8Bvm8g6;ZFqD&oz*R# zrvCnc^ufT+$c!d^4oY^)xZy&2zDYL};vS!e*`5z=lfdMD%4&n8K(PpW^$ToQX!OFu z`ORm+GtbC~Xa3wPs@L%Hje8_p(1vpBx1k3Qn^zawPwJRC4&RnN^4fNdUiwAj3fPAR z688)0SqBNm^g{*Ej!%r!%b^Y9-%-A>Qu_U4U_gRS;u|Ys3Vq=Ro7J7XOQp>n=<*Xa z#;(BGi^}>}yZ=fX<8opc+SIR|pc%qf+E0_0p`X0{p@7uVv-6?WbEZiq1^E#u-GKV% zC(#|+jJb>XK3^TxYW400qUPaeBwU!}H!!48cHRp^Rl}vrS-*T=|2*GATi0K~Rh^;R zsJQHTPu}V$H^X)bhbiI*W*&P=tWf$OI7}(>!?z8jv(?tmlEwvKG9!;y92Xu|P`xj8 zdwbMd;LXohH{9h5YadU`?f03qNTRe`9cqmCTL0Razf+x#Gii1F^tRHP$9sz@W+xqn zAI`8+y>@lJ{UD-iB*&D0GwxTr99wVJED_>;$uBsqs{mJ0v$g*>|DzQ5&lW9`|TADE`MtK^_Mgq!_1A@wV7jRHai{ z9Pc5chm=3ar$7{Oaj!{t;X|o&Y;LFQcjc(EWx>*5=gx+iygLfTm~d*?{`(fk`t|klQ(9g`gj#fzP+1NdHW$VrpO=q~2XL5pwS)06CPV@02Jo@}gKDK&3l1-06B ziS(XkITuM2BZ{FDr#u|2DkIQJBQ3*Xj1?~CiCxif9h;LurG&KGc3Zh|jhSZ&)GXHr zIh!-ZGaX~1+d6gUt%-N1E5`57hU7?sKQ|f9`voNIFm62;9x|lIZ7-ET=Wn{~KILTr zq+xOgOc{eDj;@ife_60RU;?UMhLK?LKKNyXmCRR0ld&>04=jBv0#Cb(;P^e0+HjMO z69-fN*8otx@Az(=g$+x;8{7F^VR7urCcJs{b7W+S+h$a;dvjv5X$@KYe9lm%aY&Op zOdok$cK232=A(m_ux4lHp)S-VCS6AjeZY)$H#2KoFmtfpl2Gv&zn9%K)o|~#Tg!qN z2MYZM9(AXz;V_xRA-4a%M=VqqDQw~kRrZJ}CZ5beQuFny5}>7wmD(8>HGSmG{6~sA zgI*aKCir)@Tm)oLB0;+C65b!s?ejpk2vlEqVj*o{Ed@cvaPZ6^8E2=zJE%W>`yywt zb4>rD)h+7_Ad|sYd}VN?1spi>w1D>Qff;D~{M>Iwm=AsHNo!PPx`*+F1Gyfyr^R#< zN;C`dvf7el6=Cl(j@qS-&mHYc$E;+7W)qxt4Vi)d2_JM7BG(d|(d?F0q1vxzG+7n2 zUT=UFk8DMRa3Jln_bs08FMBAM+%mf|;f}-Jn}Ql)ar0tDc&p>Z(Mf|?c3B!Nuz3eznY5+Kq!G-8S?KA+{k@N7QDa5-EUwFdIhLh0YY zU(U+jxi(Y%=W0T7roXj&ex7@A8qkDBnBD#4<3|Ku(4z=xUv>pUvy*BX`m3CRIHmgZ zSjBL7JuVSt2bx_W^{uE~0_CU4ZF3x6n^>3JC?>tET)rrG%AkzG47J~4FMchX5v8@GR!3C|NmWCxVw zwPAs+`59Y$+DQ?-^T&a?w}>mMaQYwm9aqEpM%)|%_l_=`_3d;W^6#}2eQy)X4d4FL z<<#Cg5U4YLdKjKzn32|p>?=r5|ZS&jzhjI-{}Uk{nN{xgo#ozY1gJ4h8@>G2^AaH*}q$rgUqW~U#(-F34A zFTvpmjn&+Df_AewE#=!m6(ZXiuV>4CbdcU((8OAQJ}Ry3VlDymR>OOfyy;Pa;4|QC z?1N2cFixs|H~DK^$1a$7Q>5f5$T;G2Exc#g0l`>geBe0~W=9qMxLQdeMnBVfXR~Ow zQKHFrwCQBw162f+UGDHw=L6#&^ctW^&+{l-v>Un=3Tnq`t1KWDbTMrQDmgC}G(jT? z?z-@%lP=m5`A)_?{`frQPL6xtrS*9_ynItRw>3$!9bwP|$YhQ~oaB@)!?|YJ+q@B< zNvaDslPsugoondyZBZT}a!}rCK;5`l{TlQkw&66 zcDya0P5-8WENs1x*K@M2e`Th+m*Ip0InQZgw+4Xfn&}jzv`s6DCdpdAYSKoLxSURT zd1PU?h=oSb&|=qHPGu6_af5>xTBF}fnXZWcK$?e2`(|Ck`gP0VQlgE&i1@Uci_vYjnO^W8~-exli9sFlV^&^gGy8+x&zy_9ne7QS`N(; z&xv1;a$Tg-LT{&rGMDfzz4Oblp|1NXv-XzTb{&=x=#ky*K$yg^EH1h(@zIz&N2b}o)HrJ z#NUtYryI4bvGsD<1pLR1EVLa~?)B1uM z`;03x2dAQ2SbHr*r4aov za(-8Jy=3e*{Rb|yl8F6zSHr^66NNZZNpDz)mR`&$7=B06$45{2rL8)ev0s^#$DjVpf6Z2U*;I3M9^9$VjO-MHx-9!4 zNOm)x^Tv&r$>j%Lcf1-cqm`Wui(Ias0jYS{VL!=dwX&jVlTv&c?bu&UpBE~YFkm(} znL4i6zhq1|^6DpWI8~o|Al`Hqe)wUai7hM?$+A~!yW6{Ws(J2_P^Ud6tp|@-8}P0B zOB4G3SOR{LQI)QRd>nAFrIU`gxvl#N4-IyaNXpMWyIJAK^(-v_b0R~-h$fhsX4=zr z^gE#q4m3^4mixZj+Kb|ynw11mg&M`1o3nub{m{t zl3SUB$_L9&vvlk#!_~fl-KLQ`K5tKXCHGP@TI+J8Y+|{f>ft${#a+fzM9niO(_uEK zebKK(VGfu|{Zoh^*&lv3nBX+k=QQDO>s;0;8fZ08=apl={_1Gd5dFw$cBZ1WA}FwF zQ3Mp#S;~Vwt_Hv@MPcMwT83gE&b9sV0BF#!VOR(1FspQxM%1$x(&7{IB?$@2)^EOm z?H@Rho508{^fKUJ6z2*Q_Ih;o9{u)ol{lob4?X%9^MDSRj4x>4kPg}10Q7;`7!JAE zx#YjR_;_tC`agtR)CUYH(4@PaDCtc$0b<^9I8NB5twmiNNQPvjgSkX=0~j-p5F#hO z>7c)rsLkHtaXt^Pr+ev04HohMGDdJQ#-#_Jl9*S#b}kQ8vwXm(1Z`E7|vAmfBE( z2}bP`BnC8L8vUhk(CdlI+Q*+v!u35~FVE+PzdTSeLv;T??*RClG+MI90ch!g;>eh|Yr%L3*`DcrTY~To1&(5h)+n*5M3y zZ*Zx{1ztf8bQDcrv$Pc92Hi}0!-N&zHtqQe8J?i5V6?*fZ-gJLYf!S+;%$oM=gJM% z>Pp({NY3Ojq+O#*qK>iQlAFn`kMj*7E`j+(rn?iGXiJ<8?x(mjKMOAGQ}{wd*N6ZS{_mGO!n$HUXV&NM!}JYE;GgEto@R=l3`gl)gMYeqYv{lR=Gy&UYW zL@V}VwiR349h8;f2dr(;6wl2ez3ZWuH@kas`Ma@Y(1c7_;J1`(4wgOA?z+%$YxjuG zsVJlqf;#WQXF#2A+PWctEMA``LSCi#?0QTtLHz=?rxd=5juD;(AZS*qQp#uDqdaai z_Ad;d=$<4i;y*ERfAy7mb7;hYTjWv3S+Hf`+)^NTWKS0hp*2$NE=_b0&>B$>% zQ2iBUDU8qGIqV5dW?^VelEwcAfE?a>_~!vDAGHI;BZSqj-CKP~uHHG18+2ZaPU($~ zf*ct4CYHxs9i%T5Awxf4QD}9lvCaVRhc*|-Fdx_CzjCsB_jsQf+_G~}qzcT?Rx$-@ zkZDGDmxV7>=AnFWqJlP9(w&9Y*!t!J$ZyAqI zzCDgd0`#00{A_D3o^$8BGye?e(>54Y?ab5tJKw&-L^oS{zDbck2kNFdu$L{W+W!{nKJ(b6!tuzI6R?57v+uB?R-L zZ*|z*ZerjSEFg@EFYtgW)KTSaHA4RHUfE`*vK9_9f&Dz_u+72HwurGM|K@(Imi89HT){k$*7!Qt2Ot0gIBr>h~>1U+rh9MOvc&cdT-H?N2*>XyuU z93vP$`|c(EJ}{NGtQ(WWbF}iP%!risK2>53(A6(Tbie7ud6g^5#!9%qp#pnreWo1a zn+el$Buw|HFjrZrSeCoiZdt1g^$-^T*wY0@A9uRaL9OP~3MWx(_`Ix3!x z_;g@4rf-cfI8v zLHnL3G{Wzz>yU=pz0sY&A0iI;(>*G8#r`t+Tdh8$`c=2}Ofz)<)OkhX)p(s;57fqK z!~=pr8ZA{ordtY4^*n4mtL6aX?g8V1PeH3a==3TwKTQb!b!3`-M|raw6+c}Eh~(Tl znIVzwVwTmV7|sJaFhLF1D(&#h#Jaqndm%l&tV*iF4+;XN;dQx+Kc*8}odm?2&n*s7 zS;uogJLeeLa)3jft>SO5;{c1K_1*K80Rf458*?al#3QJK=*ax z#3_Tr%4Rybh#{LBn_+(#&{T5n$>bkBjK4;2VkFk0&|it6g$zr0gHKO|l@wMT$|Mg> z<67NF-ZMs2K}=|xueWkG-hc2b1#A9@1-FkH`!5?^G-GX(a?*go=LQD9z0qcxBNlIv zrv0#d&+7ti;e2q_?r02=2ef<>=gz8l9siRGcLS>by%)eXu=axkI-gy~rn70{_tm5` zp~*)oA7ik&EvXKgTK1@7EBRaaE2Zy^YQl|JeaJtWf=pN#OKE%18{2+D z&-Men)*tsQJKSvTUbF@vVUNBIhk3XsoJXLmpC<*c7)St$Ea){>LC(LM`5%ou__09m z-Oru9Ya1l@N(JWv|J9_43QNRu!eCCk2M>Zd=~WEADXEk_V!U&Qi|0wf$H`zVE;i<; zJm2$~zD}|!1)L0f#QKtPcw#b#VorvU5agk#xRCfs&iwp*?bwBe<8)&1+1$5L_pdjI zLgkqr+&%)bdjg3Asr*R|&4V7MNZ&f`_Y>fTSH8*;<=1XB$E#W;#*&@UnW&sO;oS3})b_Fg~XH-52Ovl!w!=NP0U9j|&?E zkxyxaDhd*-=-LBjTNdWSSI+VpsRk#y5q=8cu8KJ~>-`yOFRb{te!KgBBVk-Apf}Bv zJ0eGpMf}X5>3M+J!YTP$G+GSbFD9)ma?)8lNOLorUlM$GI^|it^l%Jif!u@2XM%iN zKX^O0=-<{_rOLZaVV1|OfsHPqn7rL862l}lhRox0J z(w4|6fDkR#YnS=sm_vbhmi=l{^q}_70x4P7p9du130N25`xcas(gLJv+kbA>X3qoU z;ry(*=)-8n3WnqMIon* zPm7}O3d9+l8-WhqoOits$!`+mJx8gs&G8B713sSPZne*Gl0p@n~Sr)e=?jh;(*iTIAtd`WzID5cJ(0bU0*eI@Tran(kpmha* zWsPKgNOn{x{UXh#wS;(gomIa|+6;{ueKzjj0|hpV(6|W6aDSn0v6TfwpIkZmcO1{a z+typtq`eTUA9z=?7;>@**Sc}OcLR{k91t@}>N&q}DXf~=^3)R}NSz!%RD!uM^P?G$ zz`W9`(qr)lOgH~=1>viF3_inO_~Dvw_^_uhKFnM|=_CF=MTER;+fj{}>!^?xHfUV3 zfFRE)`ByLTsFhKx682un#_C1*V3+h*KR=sKX`w_pi_Eqz^7|W~=JSV7zp7;QLR2T# z=2;;ywXXu&Tx_F{%i5b;D0^~V1jzb+QO}kr;nv$!%3}BakqiIex8fVMiJ-hdV~R9! zJQt^#iI<-Q9msY$k{XXC#pCwaI++t+eYANO5nS8o&MPP;m34UomGk zg$|Mrow(Irt{B?t&L!LC6Y1DS(Yo5`cZ59ln^HDhB8i98Ps-iS_SAFU=9sd{Bxp&E+1iTwPV?$gdwoB*Jm`81ef+ZNw{2Gs)EOJS>@Y>RmS<0kmbNOj z4A$XSS6;iR5SdRq9^|MBy)=6 z627}F8dQ>pIsL>-iF&b9)%j^%iou=%*(RvR9&1o_L4u8gcvTG1om)@xF_7)N=m>;A z^=|Iw)|@^tirW^A7nq%^Ir>7Cm^O2lzti`;VmJG2PSkBXQ;^+&iQlj#z{J1^M>|=d z`CTu-yHuU#L`Oze;)(Z|Z00HL#suokrkyuVmgkQYWb5lU7(e;PCTcvdI_9lX2fZui zTX3`7p$Hv&HJ61rHK3;x>UF+lq6Nc$Ne>8}W(Bq0cwi!6DArpwwBj3-MbJeY@*H`C=QpY7s zO}eXxD<>gG`6E6Me*e$w0oBdBYZSV^z4Z&P$r+)B zp_|M%9}uei&5-1!FMr&v%Iht2di|PNfj>AH1gk2+-!RD6Ng!Gf-^oR1u~r}FhX1fV z)Y$nZk@->|yL{a6^%*40)`lnjp#2N}QU5S^C&_o8ye+o8O)%nCsBBrv!PFb(0L}bR z4svUjpV}@D>O-+6=hqY(q}aCjjW@vo$Q#VRk*%@B_>RP#@TXDK`;%*)Ws0>je0$6t zfj_oovJMtZBw0p_VPl8 z%78~cOV@>O9(@jPlo@=iI>zyu-N zIz)JEu0pUE2V~BptL*B(*FrYW%n+MVc0BlDhF!s(S5&bVUd7(27gV}NU8T=pU8p?g zDxLTSlV{uA`c4IVC-7}tt%^i|`L&&7r&0#*r5Kd}+m0tQCT<^X zN&MN3J}0Q02XlZ=A3fpK>vbXTV@Va1u{0_(eUtDO>_751GA*BPhp-9yt=#4L2rs*L zFJ%~B(B~e#wfTq_;DBE znxeWhds68pX5&-VQBG9?#4L-0OxjRCEH>(xGY)yHzN||0rCJ<_Ghu4Q+L07P*Ar3Y zFR9t&oh@*0f0!bgiD682tie}?mh)vXy(5Doq6M_+QbY28MJ?n1idyf0`p2hh^Mnc` z9hI#vL~A2w2+Qn)LSBu9`JR{on+G6V*G_7f&kOZi-Fu*!HsUaA{>d5&KQt(x7zowT z=8q-e6tWVNp;+I~qI?Z!S>OHQgT8VbJeYU;k%IV=0WZU%(u?#O_fCCu;@$;w$Q^#U zLG-VS)fVqz56AXa40`YlIpd!CPdwn0FWAT8s2b z-pK3agrVfk?Bv#~G@g256%p!xyLQ@Z^}RuXOg#7TGH5Y>N;PT5j7<|22funFb^W0} zu_{roms4{H#!MG{RzU2jx|po}^7IQywO8|#TN~eZZ*Q!R?K%it-1>xdn3(eo+4Z89 z+(z()Rm}4BuR1aG-XIJzoz*;hpS4l|p55QsC>qJ~@B>`}U5-Ea+#n+J9S6nHnbT@a z$FKPLk|&(?XX%IHS=+%~jw_hUd&HA7somucG6Y;R@T?DDJ!iP1(@6U&)~;O==FKa&gv@*Go|-15u%j8prw58X#K2Pxe?Gk6hJZtye#!!$qtKS_;M zUY;F7{yjS$`(a7a0*2p2Wa%#p+!t&{N?HDdiX#!E6Z&#$b4O`a^+B&wOv#il4zIlv z8oDPQ7rn}It$8^H*0C>1;5r3-;XX#~brM}RpOZYp$;


i5Q9pf1KVexmvwj}F{Rg@9ia+Ez8BA+9VdJRoEE}VO}T*hP<8d9IAQXkIt67g&O zxYKyz>cC(Phe`l2P>qK#!93P`S+}m{FMaIMDd=s`cvb1^K-YRv7}wMsyZl2Cal){8 ztDb20{g!fS%KL!N_9=SM zi6h9v)sVLbL}#&<)}tOs_@U|;v$XzvWI^Y`qy5OJe?P}`yw|2{jz(kl%36Kh^U8dL z#n}C3QSg}yE<3p8=L)@o&AeMaMLC{r@G`ti&ChXF|E2<^|DEaCJ1=J~mN@=Joo_x+ zn#1Re(40GyehyXt9qV_i>TZ`Ixg@>nj1-^rV`|=OvSkS9_umFAEKPbvZ z_q`oKpj=Q+5NR&I#<7{bsY`Jr%;0?{OMw@P7nP`I({}m1D`JcJUPfnO2$k&WT)Z42 zAB?;$2$=@Fi-Wz^UFi=5&E@4v42mK~ zsjof3((L}G864m?OvFR(^vz^Kw)aq#P}Tky*u@G_(!_ygkEZ4N@PxC0uu5^1~mmUxw;%s|F=6? z*AQR&*5;1O$O*i9qDq6nond{%5Jf$o0L9?aw_Q%B{96JpW7j%Un9${2pUfB5?NkxL z=EcEhX=dPfJ+aYAjEGlwd(nsKIf5>?BAcRt!2;iUgqPPBP?w#Fniz)>ArpqFl{Y;&NN z#n?|w%J$KXlly@>GH@XqkE8Gbg)7 zkxk)%wbet*duvSizy0l3TVmM<)fc<`Q|u?aQ}oK@je-a)s17Y2LHoEYdfWy%?{}fB z#NEk3xH|}$K!2HKc=h0&U`%uLef-#Wly}N|%}JcG@xtv{{*4~B35~+BB}4Syf`fEc zkL;pZnbPS{R}W}JEekCB6IG}>Lb z?|0WM+}b9RT#m*@=I9PZ33YcqV*JW8pZln|k7Y98toHxpY<7dFcSY(>B-THWzW@95 zBKQ<5<1ujkZ+5>v694kTFM_sfZ2U3fgjQ$RPzPH0eiP}U>q(KBH-_B4Ym+QGlzZ=M z#SErHacwRb*)SAUQMhMYktq8zrtj{aO`^`A{)aW{z4~}dVAlNyQ>tfY4g=Jr5}J0% zwoI)cq>(;i>LGBlbcy3n=2wA`0lHm-@}NHQ2j{CaH_Ec|H4@ZXN`1M5tF8*kE-%Io zF1ymhGnzjNx6ebkBFLb~r!TId>!5+_mcSyttFF5;>~pyG!JfW3BOP3?`^LBd?%o~n z)jd`jynkKsazLHlZ?`vB{PgxwwQLrG6=467GOTykR(D!E9!$wP*7_OgDFd8OGYkl{ zb$q`1oRZtab5*8gU;MC}>vQN(>FXCf=)@b0uoui!;f*Ki8eQ#wP5_U&^9=95V=lWw?c(i@}5glqU7;W3vEEZ z@A|^6$+puM`zqbmn0^Aes}s+NleZl<5p>skZ9i#GcMp~&@#K3tb=HVk+`2?pV5P%|oEet)9+cqlAn!NG|+ z>CQ9ueK=(Bp(+?iRV)-qgXxL;?SsUix;e(@8A zQQM5kSG*5)XORg_eqtpUWJ{l$@43t+Pw#TX2!#x_yfIx%;Lyzd6!(Y#S_$I|C(E3{ zj4Q*X%c{f^+A~^G>ogkvXe{m?MH=?pi+7A~tyL+V9!OHWkgg}3U9wpIpW41j`IeOA z;@B6_sYbiEO;9Rbbg%zPBab4QieUC&rvxE{#$_5IGD! zYqvexH_X58Lvo<$2=93EXP=O6r)WrK3*X{$%y=jivcO)Ll>$IMp00D7Bat^5OurB~hUx9p*~(D{fEx6~&(B<5p8x zfOn;oweXE;v*JMKi+E))WUs;5^F&cXn|i^-@wYdu$R@1X!P zXreL!K~v;T{`YZp8?VoS7`~1`yn27q3D0@N)j0<*?#+$VMgfA|>x`c(bN|5&4g90+ zKz@ZZkoe&1#X(|>Mfttkutm~#d&vzi_>RP)x(OZiE*xI~t+rMuoeCV#hhPBNtqinT zE%Tv~5A_A|dfG)%rSDoj=_r;OC-}^pmiMy4h1tw-BZK0J=eLes#i{wNewNyVCwbAm zJHg6fMXAT!o8T-GQ>QoEe??rated?E(3EWmOTb9N4v{*Aw${cUyXJP`x!0!e)?8Ho zi50A0T!>zi=eR72@J>eupEZr&<-SGqOtec1$So_UTthRi4PW~bEu$evavsNYP ztz^C4coT;yni8(zZbyU?1pbh>4rtf$<*V&&E&F*ei9>U#jGyKpz$Y~wUL|yRV|@xM ziCi9nB^WOvZ=H4*&@ab?6zu>d0=`i4DcOF>_b;`*ixqArWWi2%gf-dysz;0N%kf|J z*5=OaIo1t31$-Q*!QtVC@gwforD#<>*05>qviS1>)JTXl#2D6v!}rgsPLFR zNBoMq_EA?+O3~v|6=Rzf_8?@vS6wM@e(YLna($A62ks^Rk$Pj*WwDYn-@ozmL}i6b z)P#$aS08f1S>gHVBR&3(7yp3~mdTH{J<&G{ulQN*4X#yrRKlR9hfev=lloeW!mvfukHHHXiX^wU9-EKLEnX&Dx!qRS}(T4|Ehen!r#dE(E9!8dGqNlK1Z!ElWr_u0;P(d zb9-Y~ab*_;hOkY=2njCaTg>{45rvt(#N4B|@?;!-8k3h1e-EV1#Pz3#{}0iK zwAc+k4S$z1^Mf?(g?{s$XX4%j<)p_Y_Igu=((;Ri2lcGQ@a?*X9*+%3h$ptZ;IROB#b1r5_c_S! zC(xx_Yd_QSsm2SYF;)sWV;GZ4Dx>}aRNRqAYKLW=%#}pnM;Vt!Zx5gE6(_y>a}nIR z(r%azX>+;E#*3Yyh9v}vCmnSP&z%FLB0OldF#)8y-1~bI5CJl(n%Vwc!9KN36uPWa-*b47#`mKkM$H)17dtVWL;7N#nEKd-8L?@a2F>x}NieOPDxJ^4gr{<)x=mJAN1;yTj=REZNJ)4nI1|s|qD{ z8KqnlBAp)Wh^UsgFQpgA+7$InvqnE4XxAxv zmc1Uo$owj*v_leq1W5UuWs2C!!Uy7o{GYFS>uXvH2_@Pz9)C#{ zbug60jg;ONNeHJSK}dU|+ALcn>luW~pOG)PpTclO zE80!(K5m|tAP@6|bsf;L0!}j0N9^3MY&WcVwL_BQHttEQAxWXkckY|WlhmsHOLQu$ zX1x4oPKzwSS`jNr&TA47hP~hlWpi_k8(FCiL@Jv+k*L-0GXtF*rKWStwIA}>f_Exu zb+7I|DptQNAcA5}5RNIM6-BCP$KD&y>Q!Dhy>uL_y)|>os(bFysLfZG({LFpi0{3V zijdOLVsYKjpS5!p0jI7;r!IxWXv{;ek{n^DG9cTxcIvG*24Qs+l_yyvtxjtif+?3D z{XfO(|MlXNs2jJB;%8uuEe#KQ;Pz`0j6`2#`thIWkdwJMrK=}fMV;D$N0&{@9taMd zC3Z!Z7}eSAg){PS%k>FICcFaR(thUIV~(Ni{MY1;UG5nPH*S68$Zmla>5rL70ZIdv zmlNc{UFsqB&1VRYudK2?F;2RTrJ6nAy`K*nBFLi4twQybA5nHua7a91`RRMM{k;sZ zk6tZIp?`VMZ!XmDHCirJCyyLTEiUms?TsSA%LI<=Xsi__MB1JD+8dky7I;zGzV^~+ z4k_x&!j2`?{TxKNgkQcpel<|C;6gn%qtElS@>HOq*-qG5j+uNA`P8}~d{L-qqD=kW zZy~d;QKXKh@R`bNo&h>m-Z+qJMd0t-x^y9VKCg^zqnc)= zLz0RNcfQ*f#Bf$^>(8**|NffbwmB%=t~G&qwzx9`U9)Gj$RKcd$IvME^8oo&ku)n| z%cHk+201#|*S)K{%>Emf%{vG9n5RbUf4$5NzC;6SUT2E(fLtQTJr=lVx^EUpz|Kw-n`=zC(Oyt*KVLLxSYJ;H^M^%sKOe7n&%h4th{q) zAI-O~H~mM$RRhx`#&yEW3N*7pvbiV_bdD4eA7n2*&`++~-gK1Vi_YWt5AVMmdzMnA9Hv&n z`s#B&QL%OT-M<=kX!y_SJVZUGR6Og~C2t}7xTDp9wB|1|mJ};=tTl@{^?;^DsCENZ zeJ$o@>K)$6npRtwR6ajt_s)6ct%M3*y!y(*%%0nt*R;LHj!=G`Fc^dJodM)Xlzm`i z0&DWH!KA#1{%rRD+tmX8^OH;fCGNv+zHh<;H^Ogjt%=7<9)K6m8$l-hW=rYcEcf5V zGEhVdC<4QiYB7}giZjB75AeSuAb?mG2T||EOZc~T|1V7+cwL?!@ZVUPj=-`rPa`^S zfAPKxJcK_FUbt^X8+Qhn*rZE%yD2C1Ub_vY@R=nOQ}gztX;U^n)7{hm@-XsvZ~b~{ zj)FfM{Vn79EJ~TRf>k|n3p1M^>y6?kh+FlfJVnK31*ujaQqFz@?L?pPiv zGd&>66!RF=KFC*2P-*$FBazb%;WcStrRFn5U8VmwfAPQnn)Cf2wKT!yT%n`q@k+qR zDmi-00xQErZD~OnqR6KF)0ots;z)4(Dxyr;?6cmK*?N_SCdd32>+`?;H6~f|{*-2u;9`Pj_d!6_ z!w6dZRPY3tvN9|C^R;dV3p$i6@%d_LT3Q7vo1ljKJ#vl#M_BgGBNrz3Nm8!94Y&Q> zZy%4!32IpnxIxaw$?MQCqt1r*95pwU!u*jdbbS8HtO4HYp~m}E z+eT~ldC;v_?(c)@RBMVW0GPJ)|KGqx_xx?V?y8ZLBxl@km^Hbj{E=jeAXy{ zs9D!`_}y8fMb3D=5Y5IO6u}`Q>(>LhiT7V_@4vhiq4@XC6=NvLqJTS>aC}achW{i> zCZBi>T5+W4ZN0m6*ytREIQaTkJbZ4a z-9PTC{RJ8^5G~X$Opg?6L*t3k9W9>lC zVlczrQ8|UV;NE$zYbNJZ1vh?gYhL)6@}#9QEBI>BF=xR^Y>~sfCxdVwdyv|8-Rn1y zCLYoTDjrquqd2Da^<+r-Yo5-#6{d9LOq1x(xGeQ8Z@C`qdd%0ar|vKNYmI0-Cj{HW zf|^5@T>zRc%a9tvWS-gXc@T!EUG{s#=SjpG>L6SRW&S=adk15}l`+fkrO3=nrc~-d z=a%Mb)8Nu5+tnE^-?-UNy=GIX2mxN@Bt9WjW8#%gz~}YeR=hYhheRk2l zo1xsyerPfQtcd*f zbd{J&0Nmqcx}>%t414zGVr5Nv_{yJZ0O+gu+f%Wz*;snPg9d12zFv{RR}&&cvZK$N zI~|Ss3inK(lIeD+cBIb!0*ld|Iqki%(|4+)S`xPPiMy=1`u5!aLph2OCSwn35jQtv zh&IAcmlV+eHZfodQ2>24M1StB_%+Y1Zd`E-KO;_BP$59)4#Di7!9_CFvQw)Hv-t&S zfc*Oj+gDijr>cmdifx~%H$B0%oJTzOIdgB8bboAcEs)&J);_o?iy=cmia8Yl=9h2cPXAnKrW}7;xzN4AQZ+3gh(+}$~Unvifu0-aq*V~C0TwSgw z4x3OmjouX6>@w0|Vm2;wZ6*a8X4C*d?({(PX-s-b;;-K|u*s{*tCA8tIT%Q~lx-xZ z9hsah&YL+5H-4g@@%EYITT=QLEX$@Q_QegzKlc!+QN=eJ&*Ie$(5$wF1;=0uEr(L3B&pvq8-*>0Tz z;qG&?#v_2JKIkb@O9Rd}4E!A;pr7&Reg;HL^Ue~Hcu*7fYZ70pcEJx%BFF%j*p5NF z*Xaw$3|KeW0%&7ebzUM?{2$&n;N0{dlJXtCLqL< zI+N6?su7>d##kG3Wr1#{tWn0cA*400%PERY)a|VN@~m5fW&xa*E!`$Qj%j0I574Cf zAGc8A^{U4C@ghfi*~U5DKBg2P(#c|!ii3bBV7Ua++=bUmL^jL)o8FNN{Kfgy!q}wG zK^8ev{jmzFT)cfsK`jC%8k?b7FmsOQj9_ zZQ`T#Al!CRo<6DqzecDk4ikVBaGcibH@}l+S7SBB%+u5d;d`D_-hRku44sf)70D)7 zBK#H8WEhSy&8d=X>CW&)=rP=FR>Y?;3-=4YK_lP7bk$1|PkUArJoAO?Z=6!!5GMPi z#KcSdR<7J|d;?I7SDWI*2X47n?PAE1x`ES|w4Da~_mDHFX+)OZ)?kmo-sWFPrE4M9 zkPa!jU4Lur4@qh4>>99mgPSb_ME8}$K4!vH>%b=!Gx(L!B2wmMlkU13{jdMDoW>3~ zKimoS3UrS&cM5i;?h*q>4tTzYJ#j^+ZRpm;P{_6=oQG7^w5AbHb6uyIZvINok(QNM z3%$TREJI*`yg^xBgP`BPG8I1A<2hF8Gr(fX4F=c!YBrSHYS(Q3OR<5k0dPKi*w1QX zNx-XXzUk!yd>HYcN`B&0Q~EGomws^I8CVBHbhwnJTBGl+p~z_YwQ>$k`MCD5q5}@0mq*+)X^5FgOMM z9i_**?o?6`dYo-o>v>-}ox57NnNHlt)YrC6(%rt~HLy9nbA$VZz_~xdHHkOrYBXZ8 zY=(=qyTeby(C1_|4z4-KvMDuXszVUt%nGFa2ZS2Ikr3v=kC0iFnd2R{i7K;JW~;lW z+H{}qCevf6lliVp8XP59!$~vaQ8ZdAylj4DZ+SX5I)w5#25MgUZcGnE9k0ki244IbyGQekbZJ;JvExs%iE_xA=k9^_25Q z*PVcU0#_$k5F6RP?kcd$#^oNygJ$bZ#u`B1(n(QkiT)I2h2A*uBruNWGd;^2(adS# zw3{sa0@Pc(0XN4kR|$1j6$U1oEqCHtt@b*HRi$0Ro%Nob+`7;l3KJ$^RUAcWZFNb2 zDBm5H!#9pM9kfi)?a%EJfc3KevqVA>-s2D#6Wn><(}8rJtcSeGuehX*Wy}I!1l5!3 z<8EeOi7E4wOeY-hbKMwz#4unQYrcEV#(QK*$U`&H0u-IeR!HkFz;W5ok|@G znDy^)USHEH@d@y!D*yJn@N#4+nDQo?z{rbaIKMYs4_>jV{PiUEo?Bq^(%OF->TVIO z@tL%65pOwC_g5a53Y;(qd#0OnfPqtZMCkrD)b+)gd`VKpsN50ulzf}_k+(EU{0_tu+bbqe};c{1W>p zT5|^gPm&cMtKXJ=bPj_OS+)7K!-dX-_n{DW*%4hbRfCb@ZCQ2v)LM1~Q?|=*_FB5s z?x!pH^7Aq;QUmuAfdyYvXDJ;K6u9q@PMP!Y$7`jUc(kjPJiE75C*FU~Fo(j-A12kO zTUzHfjSG&pD#7dyVmB#$k7Oy$dI-i7tr&@!-AFEgf| zlXEG>sd0AhFI9Ug0P{srf|Zxu4LweByzV)^TMY+(WAnx89pz?d$@CPbc`N+H=i%e% zM+^^!&htP+c+*Ur?LGF4>%qn>>O1c3+L5;hH4&-opRx?qkFiXjG+x4qL`Xtt$7i`o z&Pi`I?~G1L(pX-~eABeze{B|TvSQ)3HQPs<(1gMk0L?`ZX*Hq^WD^T{oX-%A-Ecr| zRyRSBH|drRX|{Ye7wiBFaRJHQPnV@PJ{qSKa8WQnU96wq*jaTZkjA4XgcRmHpW{g{ z&E6y~Jl=Y2OWN&|*9 z0*|aD6zMjR0O@C4*}|}TRj~Kh3!I0$FK*u|a_phU&3-}*uGHj3tF0#&oj_`nR(6h1 zeHPnk;-hDQ%jXctMntvOEXWfOchO@>zl=B=aD^=eN# z`yCB%yN45x`m{xEVOD1oW9Z9QVugEwmE@PauF&tqNqT8V^jd2p#VSAkx}*u{v)L^m zIfpZ}%w`GrDh&{m(+p=ZvD*9I{uJ~14bbbJ@|d>SeyfiOIc zB+eqdG$jD3ge(OAsjv|VrE|wAw_vBCps~ZtX@hdYntmWg{JeG})Mfo>>1>tN7`|Q`#y8eq(;kw>#?>O6=;-d&*?3+G0?C{Adwk{@QPe zF3WY$W;rWmf;T49 z7voO!pyeZj556m_=EO7RG&Qsss~!B!B&OerBbj%-X_N}+@E~%f9`Y!Kjkxz8Md+F`Pu(6}qpEs45 z9BD%SN7S(&$xoYsa8Peh=O3Jsr4$a^4u;(c>#R{wkoVmegXXlKS^MKh2yab*1L3JC zN@)SI{Ut-qPi_T@v|67dzpG{R*pv#>Is#hAoH`Bgp zJn~JPS#}2`PJnNMw{G5&kh}ci1|iq!)pb`midzZTuA(|L+cAG(;KL#H$L*5WAk4-e zPz*=QElzj41-!;LK1uDe^mpJcXp{+ZJU&&_82s8QTic$78kXgLM)pyMs?1>Ww6@~j z3@4P!UySPe2&$aG&_%zO!9dfLKiaddFrekipg4tF<%^D?Ng}`JOXwxPHJ3Dk!SalO!_>?zsCVL=R#ZS`Txio8C#z$Ga7~G!FEkOzKE{84|o z$}E;`16>s3JO0Hh_5!>v4W%UkLZ(lA!BxkV>c6Z&0W)uc`Ph?4Fb43vT+`^e3+%=` z>yAFEV=+N@(c#%~=OysL1N%BlB{+6wE;ubU`CK|#&yxo5;8rda?S+y|8-}g0{&DXZ zJr2Wr8puP45RD*aRsUeeM^V6=a*VFqe<0U>^%@?bntRldF1n}n)>A>DRG3bIyjs6H zzc)zhw zCJrPIS15_GY-F{;iP(VheJT}9$>nnA9ZCe)aY;9L%Cbl%iG47Rzvsz zIj}cDF=rUxH((v{Kqwgt?+O(1 z@dr9D9BsdbfubG-97@oKS^T1o`jT`%K5Og{cpu{zE1(t&C_7<`XLOmrS49h5om1}Q zUE9xKw6!OLzMCf&>tjU*wyIe^qkGb{*yMO5yPi~wx zwC{Y7*;l${H_ud&0~LUCXKP*hz$9x$77v%sB4NU{1mXmXqNhgDTX%-*!a=~ zv};oQ7y!XWDV(|4I4zyWgRh6%NxSw$54ccj@W(g(%Zt3v8SW^M)g96woV{aLTY!^G z>>nrB5ULzmsg6*fM;Goklxgc0c}edzU07)XnmjZPm>78t_64R{o}0^50@7$CQhkkA zd><)L?iyNEYKY9_DDD0ZyJJIOcXsnVA!z`1N1`2I3Sf5*e_47x#)P#zKZS)8iM#qg z7Ps$uXK{bvixfN2+7bSir3M3+J$!X>kUYu)KQQ3x!?U}J_qtM^t-CL^S})xX7n|p; z)y~}U3Ut;}z$+0@m{bpjaE1M!n<^@}I?d{U;Ly9nLvSlphaM}F>Hv8(`vvzQ) z{diTv#aTL+8keFET|v;L`&g(#7BurXX#4o&9hwEy1~2n%TM&?E3?=)~-PJegG2a6M zLvmY>IY7J3=EsRrK5;<*yvq*`Aa>YA-piiKc|plua5wc7DJLJ>9s3$jxI2shiId+* zxa9HMSAiakLukI0l99i_Y!Apyx9^`d@q zG^G6PrCTekf*ievcCVFAq&(8=iF;o5I5&iRz=hwWn+d5})0th}_3??KFDoFkjT ziR_%@Cm0A%aO(buUABnpiG7jZa^ajq|NJYAO%DLS1aQo)uc$EU*2a%LJ{Q%`)I((_ zNQ4Wc>%NM1Ab9s>p~$ljk4PVWeZUBuXxfp)dH8-Gn{X#LQcNU8^CVR45BW-?D}Wnn z+t?n(DfCcmZ03nJ2!sK?i4wsl@+)_QB|$020A4uBdl}95d$s{;x~LAw7iEm=6rk%2 znXdahxZK2@eLrS|l=L&ou3F&NK37(g+O`}#7A|!Aw4jdhYMKgH~ zj9IhoU|eUx1x7#J`F`%$4#x#4;`>Sot7#^qAKuqaZ?J z;J|AC0_yop^9+DzV0;C=6pHU`j~d7jHe$Na_ec6gsvz2#W|NF1XJ1UK zlj>llACOEu+{)Fxe(FDG1sunATeEh1M%Agg?;>~22@3DphrxrE;{^&Ud^L_J&Z4PL^#x-oc*5Ud3;O8Lde>~E;tJ@|d0-7i4>g|FuS!*067n1wr zXQywK*+H4I0Q-b6=H$r4)84@}BL8q#B8+{SGFo(U$2ypNkZ22`oJK?A;K86rF~X_Xj%9*& zVOYDLe{~^Kwt}7k3f|vYyGl|}!#z=B*s1cyk!!;2W^c(r_n4V_fc3bb;>Q!9cYgD0 z5oBDBDl}7opS6@F5dC_7{GI+DheYemg@$eA@l;e{O|Kb?1f!?U#OxyfRdY+su;;=5 zZAS(IFO}9H{nwyMO)?aEnY2HuZ|ytj!z&C-6UgoF3GCUA-}HAwD*A#327q{qVwj20!pLY_wx_D977r3YQR)3Hy5fNmeV>8CH7@omkV^JGq&t>4RraC$B z7jTLO$TTF|XJG=9e^y0hmzS_je`!1RCV$G|wCX(PvRPPH*7Br*n|`)X!28LLo)+{6 zP$3FWUs0d{aYshSY> zqL;|RNm_|YRZq{bad)Ny9AZ8Kg21gLz?NKZ1$OKh`peiL%>k<|ik-M>$P@@y!!rQD zPRqMX$wr5Jre=jbIOOs@xwLS}z;rl(&vf zG~lC5K(Ltrq|MzuGY{Au7l*1BC+W3UZT%Y5Mqj$!NaT1YR&YL@a!E5Q()P`_9b}rr zAP7B|Y1zt@0Nu*0pyasaHs?dQ@T88Xmd_p!LL7$PW{jY|E`%S<0b=>L4gQLpGfya4 zN>$+QgbTO&rdtmjJi+a>fQtCv;U;1m2nxn&PLR(EU}bGSZVLddf%pr+^>|6_+S3n! zlRrFXpI{n&?Gl+|`nAJ9|D)w4)jyd6)$%NZ_X^jF2dFR?=+W|E?5P_TnC%9uCtV;0 z)%(g`Ui5v-&->#t>U^MuUQ?{Ji>9`r7MBTkq6xMCb3nza87keDT`#?14#qMTwXEt= z{-LVCtA86|aDxMSb5O@NDMH<=GVXA=hiPq(p&VJg!%PKk%|DVa9~7*5;5Re zn#2U~W`xOcw{Fr(E%=tpc#k;X4b@@%cDY>N3vGvQ?lr-DqQPFz*Lf5)uLyHdtqJ{i zGcL3|Y{S9~Jt9KuEue-doO)g&FAtkF&OkQ%)#pv=g7t|DDuUbcyL+nSOwI0;6D+yH z(>W=iqZ&4TQ>%6CiR{~{<+PA_&N-QXH1kKJ%Zimq?iy1wU)87{16pX)jiJ|MJpix+7-@6!d;r&Mj=;DxQ!9XWv#ehk_Bj5c5|$0HJ|UW-P^sA-)a5PUMl5Dr(-mqJ6@j^rqz~vj%l7bY0MG z01*vPD({1t5Txh6TNu|n)9f-AZgL508<;+re7ux&uU zWxoNIkI0=Zc(EerTBb4$FKVv|qBNzUcJ=&k>JEt>P*Wh;{&}O!3Meh4B`Qe)_0t8W zgtIG6V`i82W9N$T70|ttnDMazy$V6th&5N}K@Cld$HP zy&&kgrTgUc%iZJ=s1-XRK-Y=r!8$!M1;L0C;}L^sIp9=#l{BO`&(gtVLUs8JKR@T z3+3HT%S^FF2<|k6xNCg@ud5RoR&!OB4{)fQFT&wwXrm2yU35N>AB*yLGSro~*pv1& zJNsi7^FTifFuZxuLI7ILa3{r6lObY7Qi2UQP^vBrqphC;#-JL*HEVv~o+E^#P^mWh zIvbc2+L=l7k48=J=Bn-a?G=&Gg}&<}z`*}qle2*7ZUEq=Wrye90s4R*{y3~KL|}Z0 zGV^GdpuGoji)=Li(s$28$10`hczgfbpFF+=HK2nUwk6NGE1M@YND=w+AiExme{Qm0 zbyA7ssAm~%lVjdD;>lQE8D}%g9gftvys_^3sVs!Or89SEcfCU3Y10@V?p>1DFc->Y z2s8n_@F|IL@1jiVETfOq+Gf^68hc>K3N>Z9|W#`#&_tsmlDR6=X{V{rV+nSLOReCAXeRE6i?EmWF)>)13$XiP}U!efE z`KrYVXtXEoUq|M-hB@&M`-4aONB^CD5BNSNLJf^sBC@V+3-H?3tQ{x7Ws35Dasg(P z)~EPh{ludqecMg}ol!+Pc|Bg-k8+slGIp)-P1AEsLX*e6(B&hQ0`2QNJ5QbdL?MZq zYU1Gwed$0C6SJh{(2p&EzIW`T{`7H2&eY(=?oT`jqjCo!Mm7El)0+rw@w@Wd2TVqE z-zDjjS8pNipTDlb^?Ff}>Oz*G3EkLkmX6I+xmr2N=XlGanl3pbZinSm-`FHRXxt(M zPNrCG68o-KK33R^Y&}7sE+=5*eNm`z&q|kYiPhn0AKO(g?i28VulT<4wrMl`!sMX` zJy!Zfa`!=m8rUSHTI!nrzYty~-A-Bv$JokwsX{Td>9p z=H6MlwivW=`3t*F-4$cf-X~(QbhEZu)bhZ~^JcRVi7uwX?0&m!CNG=QwEE1;u}=SP zW#6;xYz8p;lEHYNNE6$8jAhJKyUv>*=%%Z`6$ZPD=hT~ZQ_pptcyxE_(3oPGBKXFB zzDWc=<`2}c?HYUL1W4$-BznBL7rq8<0`L{8Tb5V$Q+W4L06NX20~<+C%e(ef3e}@y z;8SBkefGOIb9%Et&en%hi|kR$`8ckVeX7t|F-pL<1~U7M3v~W%hxh^`Si940#py|R z4Kb2LnIX`)V)yzAPYMtDVwaQ|P7y&3ua)X&ilqT}(AlTby7BWX`?Gs3dMbo^D9a#E zK3UwsKQN9~{ik#~Ur3BfD7;9ed`vYGP%gl+6l-&4k5&CP&KqrAi8yYU7*L+Ioq?~5cwpmSSZHR`=#4N><_iB zEp&d@>eO0B?aHPUHRLRx)+j@^XhRej&_~AuRJr#_$H?XAOn-C5Nc=wp`tLjwdf@?c zQ!|vbz(||WZ|T6z7v_y))HTzZ#ke3mN{Z&Th4~&Yc01pEUL@z~GulZUKHNIz#dN1P zv!VZ2)yDp#YK0-%PrYF_D7j_^GkOguFtb}{u2DA?62?QDwVn;`2*42SA}1k|Q4p|t zo^G{-D#1-^Ig1ID>UaoHs@uA4lbsPXRF>j*BHbAzI=3SqTl>Uhl3qvH2>cmz?gKKh ze7%MzO6Fa?TZ(%*+T&^6|3(W*z=Hs;TuytuHi1GtXE1yW{N)S+AQDSBxEQZg{1sv- z_lwhmxJis;wdBQi>X3lzZm>v343ZG^Zyc9oM{AG-fdsdA1q@zqhf?f7o~z+S~S z#_L}DhgaGDP1*w*11!b~g{DwEAiD{OW<&JowBCK8nqb|y$0r}pkxpRd=d~$!0pfSH z76$-&434BaJ0~@4v6Q9DP4=)v^1BgKzX+6B&Ru-s8*Q8z=9kU;+aOFQ3XOAn zJvM4A>xTw($4;TFe)WL6wOpPqT(RPblKTCeyf?2;)2hep#o(tFtH)hjjnE;|GPzdJ z<-oj#u+VZUXzdIU+mubp`Y}BR=dYMLvnhO!j!d|7m_;cq!@=dy%$(Ry*^00{U-$!9 zW`5Nodg8~(o7u{(&j>E#>_BhWOPY))^XnW@*uJABAfod!#glXZ0-jRmZVlgv3-cIE zEtfK{u{2I_&dK{bc;NuE9{D;a7?4+;9JS507E>Gwb?%P%)ML)|2?ei%j`(m%r$y#H z0<$#kN~7kK3vTWG&g-@FQBN^3bZL{qES7|#D1N;MT`nUn-s82$QZ;>8X(xu$F2rU8 zBQ*ITYe5?ayRuvks&t+;ehHT%9}O*NMI zH#l3F9A-74u-q9e$+PcRYw{G`T^`1)74g3Oe-Ui9I&yIUDrvorLdv8WG=J5=i(3t~ z&H|1B&BsVtU(JpE<{S%Jsjr$0Qo*G<{(vVgX3=*-YC*TxPBZn?GxjiG4#k!8^yQE& zr|3Hjm&Mfv_P4@Y-W-88zt5<4Z)Ef&Pf>=D9rYNLdH^3SWb0~aHzwV)E*-GMk*ra~ zaKaEqT9hV@EAJ!6G&2AUY+AZxHMvk^w^*+>Jb1!r?O$EsDCA$cg>%y1K^I; zGd#x0t+o*u$5emJ(+;>~89;RorH^vIfRL@Mk!kli{}zC$z(7tx1v07N~{+I9+{ zRc{y&Q9aceJmAhwSumt>zus6G{ypycn8jYSRK(-@%Y5Ux3u0WzlS$W|ez^Zu@oG+% zFIYY~F$eRpc)+`X5^jCJG(*W#ywG8TiMNqC4hMbToyY6pNU1{m{LhKlW#yX;*&pPW z%&?V|61_#ovhKFZt=K(Jm(6el=EUDD{Fk_#(!nCs)! zPrVkB6J&`qYhma{HBun_C!rR1Gq{-qtInD>NKZb0e{8f?u5(v-F@zRtHSOluiR+28 z5772Bk;(LaH4 zsH$n?7%vuTuDHw$g@6p;6%JTyw1Q#3J5fr}eizJ|9KM^e>icEh>sX{`860yltDz`8 zC55%;8$L~=9;}H)3uOGj8e-hYIDUMe1k%^GBmJA}spCNv!}0t}-jZ0v!%ng&jWgx? z$%2Ve151mHE(U)74tk2&Z!?92dDY4U9VEPC!`x+qfhm|KDLDB9O38PWYt+Da(E8aN zPoPJMBZ@GCL|#~37%A0{%h$8xR!SxD`=H{pkp}>zTGTZFqPc(#Ia3O8F)KaYjF?MK z!8En|=$gPc9zfc#{ZPE?-C^e{qCi=#k1hF@L-p<}BmIANO}Yw=yb(mSvI z&*;BSjWpC{=x;bE&;M|d{A9z~RG4KbXF;^+m-)(#?3u^st8mDD=otoJqg7&hxPkKF zCe*)ARjf^RAEM<*=4c;6|6RXbc7$j9?ipd>1ownVH5V2anj>D|8mQJmPl|A};BW=t zrFX!CC2Bv)+JZQ-;ha(X#)+Ip0;UP!hSya9&Xn#K!059UZ!)jX?LIB>{g#}tx+GK`QBq3a?U-#Z2DRd7I{7=%?0eb zzs?lReJgiShQGr4oGYq?!X!m93)-RR@>Eyi91oCY6P)wc({08W5x2)rK2IqCDn16i zDG}m!4gU%B`@3y~w7qj!w=HeyK!~U-Y#*TWANFDC8~%%s36=*#R2o7+Mrpccd{9>G zw+tO5z3e7Pvd2VzP z$?QO*LQNWUcj9>9)sgL=k>vH^Gp>CGUSy}vb*=SG?G7%xfIP-KAQ%IUIqr9OJ4^m> z+uf2y-u1U$z&_gltkDX!f-2@m{i^gIBR1Zba`v|lt|*xk)f z(`omiRTNHa)f0Poyysxw|AqU>R)2Hzq6>zt?SLJi%9%)&r5f$A_*8VG#qsd5rM@?% zTmGkN?~NeZ2T!3cPehtu%!8}`%2V(2izBMXFV>csVIla9D;dY{vse_-h9AH1B5Qy4|Dn#=-iS{lE*r;{ zDprG|q}*&r^aH2iGIqb&bnz8CtUsP{tV1d)$XWxM3@Cy3ym#`T*;6x_{Y&zCa@veK z9)-5&my)bTq0)7Ii(2$*dm96}ciGw~)2brc3=V~LDBR!P>eqO_X4 zM?KNc22ZR)(y-J{Wgb?aqIB?hBmfc%hnkyucR^BYU>fgmuILasx-=j$87z=+Iyw;b z3ZYowm*emy%RU6);R_1qtm$Iv&IXj#-t){K5$BB2dhGTPVGZhP@<)5@lcIo2Kd-*o zRfoU%iY}`N`{L8@0B`2H`Fg7JA*Lnu9xr*;9xu_m1}S|D5Sa|h=|Dy!cb|gkke7_- zBtLLh2%kjs!+H`X`ltVb%EMt_qny?~QCYYTl+nJVz}qZl`cI=K1i}%I>r=Vo0R?XV zmXNTqVGQ{}law}p`=RT>)@=7*S)PUt2Z(b+h%wOPOq4WtDE+zVPutCq8&365B&cne zel3*~80vz^2aLf!w+~G5VsMorcaBZxIW}3UI!eBF$0|)xT3e#@nyS0yQZB z5Fz3tM2N^Afi&CN`h7UsR9p}1hij9In{vVv&5+gWKY)sFha|W4g`5AX&Xr$Jg?4z8 zo4cZc62O*obRd&;{OljjQ?9-BTE50L3Y}>l!S^8Nd(3W5s|#JQO7pvFwvi^|eKnkGL~?$K^9~F~Mapq{G~i%9&|r2dJ|*)Zlfv1wS3RcN7b$!9R`>d`mGNqn<}9 z{-gx8r4RYU^qto1>OQ37Gxx6cCyq^s(8><1O|Bn=Q97cfHY8;~0}!#H^}fWRjDS?Q zMA(h$USlI&jlBMKQF)x2S1E>zf28`(BWgrytj{0s?{ZgG#x<}Em^~!%%nG z!mLCW|KpiuIdUL@JAT?mVt73)6|&3M^@W|GZD&98KK6Y&Zi?}rv_`Tc_XcPx45P<} z!|IfTkEyf#(_L~u7CoqbY2HwEW+lQKML)j`&OecSID$AMR20@xiMIoka(E5d!ly!h zL!!r$&l~3bOoluE*=Tfey1u>XtQ@q`M3m=UapP&mNeo@W*XjyKSVcTi3|} zj!WNmgJXTxVIh4W+$Vq*)(OX*((3GQ%N)2|CO-hUwzbZ-%?XkJZ&X$=*sb6n~-xm|n_F6s2@`d{xtblOLk8;!4T zB)$Oeaqq2#F9R-Di!n{P%+sOW$UR-gjq!YzIl5-GS3AB69y^Q8+rh*iHZ=4%ofqPk zgg$7BQj7Q~P}Jl=H~LaRX~)8)j8&;*1Jyd7(nA@%WjXg|Q%LFk5;38t)PUcEYcWeZ zmTcO(-!l7`vuzWrgWT3(o!qpGf|k~V7Zlx!5ZbVMuM0SW4vX9YYte_s^M)+qn@Ygc$7(Z%Zy47$sO0)zUlH&IV6kCkh*t`!xAgb;Ma4ToUnCfZ#j z)2CI(| zfx2F^<3A919vgm^R_S_0`jv;=FyVndoVcdmW#%c1KkAc7Kyeeie{B3#j^J^JJS_<_$pjRyB)+UQNGMGoO6UBEF(^pe}mT!0P>GBIMc| z3p_cRRyPIQ_JBa+cwX$TIQOL`;JEYF(rN!<_qU;Q?*Q9RpA}R`PX}wwp4Txe7H|oh zAmU-r8~BD91_4V<4gWR}L5!Jd>N8*nyi;JV`n)riIO+R(T37!M;JB>aBZ`hUby0qW zQMb}KEc5ws8=>OjzDnsfdp1J<1g;ap1S~J{CyA$mGT&&{diY9orWn^GjJw@*y3pTG zb9I!wjQX+?K}Z#czarvu_FE00k6s+^dI8#Az}tPU!*!%NXy%@6YnS|Ny&DZkrC5Q@ z&r$}%d2U2_*(%paDW~)uzq1~|GaB#-ebHIE#Zc^!)LkteHdJjJBDbwdlb&hZkj^jE zkk^05!`OsBs~+z>Or6-7DyUpbtuWk%7X$hhdO+tw;=Fh@5Vjom@96JGm@oegme3z~ zi3r|vopPGhNHlb~yev=#M6tcMYu16XBo)#?#&azkIyK)cb-F{Uv6t|>%R+Om#cmTn z2F9j4BjdDD1$Arz5^%ZPw<=;rCVqO#>~iPS+CI@we=srkMD)u`FjzwnzGt8O^RzyK=6i+PQs02G>~4Q#JXmJLy+8? z-Y^YWLt=?(S892|R`X7GKf8TErR)soGu|g?xRmbU*BbT9G&*gj=@;$XBp#maSv%Io z{u&Od<%pf9UAcfK(1nf2oC+FG?YWz8u75huw~Jr%QzQW_e~LG(@dz#xFz3AHCaI5* zR=o3Dh5h?Al*9v&SGu3SjK3EH)(a$;tZ%@6<3Wmh9gf}zgcA<50n;Y#0uVgR+Wo-< z_o;>O2jBAn*G;9%(3i4VFBOsmf%ucPmnOttfPbb>@OfrRKhtcmDBjQPZH@X5;Y zkro&F81{~`sEj<9f9 zpR)&~5h3*nqXN+_9+(**Uv0aEhVR_+X*@-FvdJS*%zp-EO!nIM@gjmE;$HqD8cmBx z=NX$|oHp-qZyzw1@Adt!J{Az%z>Z=!m>|&lEO9d`4B>DMi|%<5FP2B>z!DDef@zAm z$f%f(^;TpHlLK)fhHLXxyRkDP4*;Ef;kPzfFY}{ICimox=>xb-ifQ`$3jnmX=q^{#q{{ zZh5BM1mFL++u%~1MiD9l%m4Y>To_R>|H^v+o@Lw;vZnuk^%MdwC4nt_IjIe+CB$N|&){OE7?(BJ%M4Ify@btU8O&L4nX8J8mZ9YcULBIzOz{%HEY3wfad zAp$u-wd%JlEI=Sf#jvIkLAVD{1yVjFG>`tfPws*EdTt1itukr+qZ)^sXDt2>O#JAHLKK{FL!!FrGMp z-!^5Yse5&Ef9T7X2istM6La&Cp$yM#t^4Z{ZVZ~wkC6VyFJf*4z>6z0%~)=t6UBTk z(z)M9eICWe3x~jTRyt92HY(`MH&=TS^L#hg`2EC4q#As9mDUIe=zlLOI&D627+Z&` zS>t5JDH7ZA8idvqu|54j9M>BssTY%+J#&moSV|s?RPmX+do2S~!v8!E{$HS)5w{F>M`-Uc-k2>KIgz^OT zT>hoM#n;|@8_7qHQSik6qc8YKumw~yCNUZiovQ#J5{60mXLhtTf2B|B$lGiKnXUsL z5&{1G&-pThHPN#F^%@KnG1OMsmlS#FeggTh-Zmg^gf*V^e$S39K(6MZdw>3ex5ub()}g z!l&EJ0u+SiBPy+84XVruQZCYdVZ}CnN8`%@cD~oF>~PM+LCmf~%5Z;&GXkxK?FJrJ z9Zh`;0~ITh(F$frb9NVe%kCR{(qW~mhrZf3>dC6&!<%8v-_D}0`?Q|MJqBA#DMFRi zs;^S!4?ntGLhkxf_p&15IdSWF;T7{EHvic!`D-^ZcRAyl`~+Um_GEK&1%u`*KClke zQZ#W zbZkt3qVw(gk#Gtph#&@;bqaNv;VSZ)9B|ytW)9seJjz3kr-?Dv@g$CO$2;!0yG5ikI z@qIwMzVX}};rmaE0mVoAGZqXaa!2L|1D(Z5qEHSW7X=M78eOG2cM7 zgVK>s?`-l8)&2+zzxfy=)jhBmRJi%kBns2lpf!yKpZ2YCwFhWdw>B8%QXte-L@j)# z9dLE9r8&D4sVfH}-P&%?0~O+ufBwAiha}14ik}@rnI%aHMxIWy*P7UZuS+F5^`6HF~WfQ9fQM{gHW@+E^xgc6|CAw)8itX<)-X3b@jogg)a~+6) zWE%^(*l(|$bq^L?``=d@T57e1mnxq&T(6MLy0dA!co^KMF=fnr0!LnCeU(<50*CBD z3rDik9JSD;4=D>6@TjxDz9d>EH)rmE`kX5mn`QP+ZGS;7+&H2pDc-vgFg?kAo_E-{ z`$OVf1>NP@;qC&sQ9*Chf4d)h3!dXoQa<;=)LZ#Z{Ve@)agkW7v$yTF*Olimm5lAw zwf^!D(Vh2LA`HE*p;7H9vmylgKc4dcx(H0@A0xrEEcxtOegQ}P`Bnljy(WKQrD+>0ivz)hxV5$ z&;8m}^zwWH7I5YdA;KWH#-Rm2N%0^glN%P_L;l$$k@LoHUtg&$rP4&m%xrAp)Na7G z)NF3@YYb7>T-&_v;;ISAyt_Xodle6Wvyg8-g6o}s0h`-lc{K8+y|jYm$Ksr!Mc1KK z%BGXcpMLWB>$dbZ`x9Jj_S+o10hfasgqwBsy^#hnluj$d_jRyF9a2Hd+Nfm*v7=%Q z{m^p@8ed2{yESfhTo}2=H-4cR`MPxenIh_!I}e>m+)4SPF7X6z@McgYm31dBqpXWA z*vHx38C`L|3pz4uadf&pH~qCxN3B}D`Hr&M4+4H;8r|rB?kgwpAKg8FmE{a@is&_0 z^7*1*bQQ+ZNxgj(AomP-M7N5?M(4OCZiA22YFnhDm5aZ*`K7zT_ww_kkMKNQ=LBcH zgq5{5UNdiE1Es`PN zJGEoo`kJW0c>y_jActl)a@?%Th3Og93+u3p8n*V$LY%yr8^a&EP2XsYsjXP91*J;% zC-j?)AWO^l?SVDA=Dnll18nCEA&fezaU}CIPNcoiJ&>=ANZxnqCu0GggX66eGc0Zc zLe=~Oi_Db|Sy%^!lWaYDiA{THXUH!Hr&=~CI}|Pb>*)TIRtiyMD=fg%^EEB7A8{oJ zhX@>H5Ish*6Kk76f~D;tuG*8kLK6?@`|*0WSrf0~&&{{-iEe-DcK>+p!pGm;Bqy> z^97+#?0j2_FLbK;`s$g#`rF@Oo!G&zFPc;EYH%b(r>cq(4b-Rf)8shWx^(6>!41)q zk%E!*VRuE}0$ZWi$h~D>a~}E6US+oy1U3u?H2~I;(dHs_+@(WxgOi`kN9xt$mtLSV zu;%c5V$>*pa%GdNS#2)naIchdf?w(IJ8JvwQhTR36g zS;S(1|DWF9$6CNq2&fQKomPRHEhIG_akF9Jg)35p*Za7(d7Q#dop?^ZWwbnF+%~RA z**o#NAy*4%t}NatOZTN|^%}!ygl~rwIKc^rOZsG=Q?gjI|MAtOx^|5yLTWuOVk`av zKmYm*G%bZ)ACB`LUaF)G+Mk!K;IjP#mglnC#qq3EqQA0FZko0_6)+s)O08Z0`DB0W z<597DOH-3Xq06J zFbTxiZ>OS5sEf|1oXm;*G31x5{z?5rt7bdclcIB8?>Tj#1CXJOn^~jX0|Y35-MpWB z=E<09=xK)i>jL_pW|F^V5BQZ1jl^S&F7sFH=u(dYqVRwTMHsIFd4gSDi{(_c@(ry_ zVx;?+3toe?U9XrA&^Sw?r?UfPE0wOE^x#q)Z!>DIe(T%FaD`F2d>7h;n(;fAp|W zgmfpM?2Dxsa3E8>c4eprTexa!$&3V+v`p3YK4{x`qaJ-uy4opZ_)p7o4ehPpdNlfw zGQ;&T=O3NhKtI7f1!Ex^SRXr+lgr>C2Wb&ZwG=_)20~8et`+CKH1)HZrd5w0_Cxdu z3YJg|dF?AypoP@Rzj0Y6S+vCNeezla63XQ&SSH~b@S7VE3hglWtJI$#ZuijzKw33w z-zGiIgPz%!)FamvuUvy_k52z_(g}g&)=p4L|KqWz4ILGu>t-d-e2tzkd{oj#A>e2aI6-w9Y$H~)4Wn!{Y4+jpuhxQ#-=x)@dwsvr zaF(OL5xrm3-%Dm*sbp<|9DttFKL=%7;xr%cmMgRQdlW0KGJO>>9sJ~J)qxcz=A*s! zB45lW9e=_UxDtwRx|+%VvGA?^QYsrBs->I
cd#o8>1@I|7ddKUM7ooZ#jpW_{OV|5%7n#vXb#!Ja5<@;~ZcKSc^F3BW8ietZ{uRZ36il^6%y|!cHLe8tOAR^x zI0hH?Z1oyUt4{UollJQ(=M~d1uQ5!qX;2;s+bRh zkux(2o;3Ai1Dr1Z-z9NpCF`W&*(Jptq9^mpn7~Nr8Pt^WW%^>=bcu4E53d4_sH7{S zD+QU40jte>iOHWt$p1juAm#Hc6>@s!f%}4OS#uYsX(9y!7oW}4Og>{_&(|aFk9Xb* zeSJddAAm;TfIt7H3P)VS&l_B%0BUgM^{kVRR?Y657jpxalp6_VnJVEP;Bn{B4f`p} z^=bX>l?_ZCIG%M+N{aOj7z|Q$X4niyl7Im||1(zMHr5mAX?8(2eYT&+p6gs8CI=F> zw(~9L^=^}D2f+MC51(Xb?>-402-|CqdY_Ly&=6JuX>Z%xzb#s>Ilj5}Rc`k1^UQUA zUZni8FR~D5iY&@lbQI@&q_eJ^^D#$6 zm%v}Z3*VCtIZALd_ALyFp|VAoEMsR{IZ2C?_=1J0q3O1b^z}jn1|`d7xxHv$s>Kl zk-(BWZ2n*ONP&&Dof43Yx-ZB-XwgFSg|6H5JzfJa**1adBK3u;&NZhhz18Xr#~RFj zZ>ZDqO_8W~mX6cc?$NLC*`b@+_fmA&N#~CmExLk`v-PxOl9@R!+l6CCIy;A3^Dm1! zEzKw!d)8D1>r%dQ9+>Uh0-MIz??YwVv-hp(4G4e&b#O&x79C!|cQA+trMSL)nqY_=W^F#E%n1WY+?EMAj) zxVW*_j_#^gGHx9%*^950z~=O5F=XQ3f8?FPIqm=9@kc!gp1Z#oaPz)JjIIAlS+$g1 z&4Oy+-HPg=rk5Etr|c1>KPZ*WZ)s78#wchKRqox=_Sr3BmRX*XQTm_E*gyEh@bQB1mMvBzG5dcW-}x_}<

gq!7n(IsL)&H8$Ml%d96QRO>Rv zb}Q3p6CN8f<-xuqn>)eX=PTN;#vp8rf)rWZ;RxLiF+usM2(w;tZG5`2d&etHE7>fe z&mC=-YFL0=P<|4Z&}kMiQoCq;i-^a4ghwFu)@%61nuD({#r;^?`IbLd`gobqf3p)O zQqS%5>Vq7Au9-xfR^~)eYg*~w51-Xn&GATpHjxV7Frf<#F;W6NOTVq{HF<1o6ER*N zlfl_-weR9=bhcvc5F_i^k#y^-OzPm*>ez(d#Vainw03$Yi&w!A&dVnVKRo?)0b1N@>#dMuTQT0#P#9 z-x=zgk;I#JJstr#G4b?GkCQ*=kH|vUs-vLa__;S+h4zRErKtq}hd{%+4xfK`*0}E$ z+(KL?cq8O);>1)i+lOAZ8Ms3r#hgd%p2D!X`qGnow~*&Lp2%hkL@!!z58*4d-FWuN z40!GClCW+iiJC8RUXR0S+SraKh8e+SX5jl@mS4Pqy*nE$xfED~C}CQl5gkR`i*tvG zyl>#d{m5pv$)MwS-#yJxE?^NMI4S#MlU|fn~AeYtuM&eNXIc$!kP_V+Z041P9}^SG^cKV3sSHgTygxZTkl&IOCby>ODlW>^1V* zFZcg4izM&x$Q8d}Zdz9Lz3a`kuKtkMkwfn3&2Xsn6A2DO;;+PwuzdV`FF+A zo22F*P+MQ*(sboyjIe%4!HXV=G{S_+9^xJkiaAY|Tv&^6KCoWJGiviptvc{>#n3k( zvtE{)JPu^m7I5v-7nP0}`QE&)sSu>neKfJIE;qXjY0Q75voS+j{XxTDuP!py-;o(! zzEO4cgm&B7oymN2-1Twqe1&POmu#y=n{V=0eSogJ@?HH8$pBQutWI=aPyYgbr({)iR@35y||~_Ki_A z(BoPKeQ(qX%YG&9zI$a9GyBc&;$KYuM&8}l#hbmHDxy(QPyt#b7P+EdWiBdAmd)eY zm-qZO>@;Lcnc8l7qTzdyVyx(#RTTf~pqGA#E)8Ujz|DvA?-+qnD5bioc_=i62i%Up zXQxr&vuJ2+sg%d`Nucb0kgk`ctvd`J`ja273f-ZEh+H@?7Q*#o(fMmc6^En;<7-@A||L%4Fz~c2kLAncEw92W+3m;qnILyz< z1hB=PEi^ArNJm%qdIKX9(+BHH&q~hwK|$5lRo*&9n%vP_UihKS?#t?JJ`jXgb3FZo z@@6UFLeHk*I;pF-WfNsuv%qbB?|IvegshJz&eT1$L6X0Kul>Jq+y8Ox2kv}+9Hmv- zW_-~}_8hpyQ-be!`9|mIVq#)spFewch>?W_6YOgf5g8c^-f*Fao>u&K-+$cxgPMkh zhCu4;?yby2p?d^&UK5^?iQ0n@4uduL0$TjVh9b}tbgR^(=@ z2QB2>Fpubjnmuooy)pC7YA1YqlI7q1I_87gU#_Sj-Nc4+dnuaY?5ci&8RR`NR!&aw z*RL6ocFc^-%n8xatQbXTO#tN4qeqi#Z&uIL1;56J1l(Ri)CkJcz2JA;;)&W@kb8A% zTmQqEiOC-ua*GzZ0rEVEi!H-D&aVk&+C^d)6N!7X2R@PiJOVYJ(?X=3xF>#J z0he~u`Q*jJw%|M>CVcC|PgKxezR=q6PE?1|`U|%EOyd31ZI{fSg8aIl>(VD%FOFt5 z5m3Dx3Vi8egD$lvolT*a;zVXEU;*wox!6Rpof=8E6PnM8T;)^0DpXGxJGb0EAcQy6 zXdUgvW#64^znIu?$bCGq#wl|Os9o8Qm#jf5{bA`9Tl6%J1tqTjxYKb-%|yRTFSi5jq?qKRKb8 ztPRk9)ZW0q*gIE_j#r+zZUP>-CehRNC~Gt8VcU~GshK|-89JbGit};tB4O(z#q)It zE0J{{&e;FrGvCH0Di!K)*Ga_CW25xeJ;SE@7zV!)uK>Xzt2@uKWZAWBSMS{fr%c*g zSA~ee+e_UT&X|v)7MzA`XJ#d7Wv~!V0l~u*#PHA=Oqz^hwei?Z;yy}^A+5bvNY*|Rv%7%w! zu5||&TEF-IIqsH;!pR9+X@{6*_^YNycYLn!Cg}K2cS1a61v)8(}2>|MGt4`xV92e`<381hIvL_^Q5M{yofZD~5kKzfz6l zR|SL&Pfq#zc_xI|dwZ?%ZC!hflG;vGJFR7vmm_3al`H+^e_304T%;p*38 z4L0xE@fVa%jdvO_I79^0s}nFg`=>U@>piJ%2abMQWrx_tg9CP?cgF#SyeTCJFyzL0 zTA@mVt(r4i#AO-ic)>1-G3Te=^lRf$#r}RzKhta>I0mD(jOGhxcWQ%VZe3A-p5?e2 z)K%%i$hhKlay|gbiheQ1&;=;ey zCQC?P&wJmy7yb&1w4?bY?z>Tf9&7;QI`dMgcSF{{+|&&pFcXFg?`U2igS6s^iVCleNUcnV*-OGUF}1N>Aqv&W#$ebotSVuK zBYU+@YAI^tW4iBz6LXnog$BDT{k>AFTB#ke?Cs09s1nxY@vWO#JB71yN6bt^gJw1d zH%Mzh_xmu2B-PX=iSCdyMJHHXJ-kpuinb>R2d;-S>jGF^+T}VFV8_dfnEx$o@v>fA za`Z{)Ru}B8hhpvox4YmeJ@#{=D~rf4%qSz{y*D$b%H4d}Bl-?cY9VYz6lQlHcek#k z8^YUFs!GTJVz&ZUdnafb|H<(HRm`^SYtJOQao2 zeW8=1u=_l>N_(5GlCuJj@DEz>Osd?AczKMzdDCKN5LO}{Nc6hOdly+PeGB+lTG}{W zS|)y@`Zx8Qzcc-ue?|iJiL2K)-!w>&%;No(88$QgU^9>q~oCr-D)CQpw$f)#a_oDKUtr$=QiPmx#r#Fa4Ma1woe(+OUM>|r7MUV`m`_oPs~oJzR8{*AHJsj2OfP*-;OfGXZ}0=<-dEg) z2>_*UPcKa~r}e%jq?Z02n&~Vro zIS$Y=%*Sd@qVcmllH2Fq=VTZMlMe9h!fL5)_rJ74^d z3beal%AWl@V$MWJNu!r}(HZ)@&4^PQP5!P?>3#K9qe_qY2K>EwUv7=}uaKI$y%+S( zl^yTXJL8~4116)2yepCw=&5V(3Ao^$4*}Y@RWD>7Sx?9g&P7`+#Q8eH;;@hhRAsh) zqkQOFtOy(@GsmEvfC_Arf0^F3@2Yjs%6`2C$N`jAeZr@CepZCnkh~}mP@g?R(~ryG zXsLFJV$`xS@{Y#(TQNjK=gihwZP2A8SE61?qMjQpK+-Af&7+(%)`uB|HPt zVApu8v%rnI+594A3Z8kg>@%usp16^oPtY{VCA>1Vs6DtkQVK$0un$ps>IcYdOwxkD zg0VqkX-cfHZQ`@l>4}-C$CFp(1BDGcnrx(SQ>0B9B%SX=NQ;#mMJ)&zvOJeNPZ&%s z2_{>_Br4oA@3kqWd{f$(KVwQCG-e7lV_Tl*wF@qYI`4j(^X-!5m$IN<%xfEm!D9au z`<^aNTB%}F&keFBwCSVuuuxdbOGIHp8Jy|$KHPohFPhXjCA-ra<&DDp_Qa)k#jbjS;aSC z^gDjOI*V)e@z$9A>q<}D*w{M~U;>;p+v2N&kfPkl^1$Nn?h%f6RWFo0?qpn-eTBg) zs2m*|vOsXt%r+J-N;|e);m|AH{zvGp)b1sc;AlB^H`gi1b4niBZpPfV4Q(kn>C z)k^Tf{84J(i5hAUe;eJq|qC67`3`Kv?*&G07mEkI1|cx)^hmTtNvaA2RdynGYbD<+P6Ga28t{lnnu0x+?% z#jmn~v_iQEY=2n*o|)}>-aOVwmGg}as&oxMck)C+g1h?notsl9CKE!%k%P?go-IDx zpv+LZ6t*ejHcwI~qo+UWZX$NEH9eA$`iE8lh=E`QU7F&@;WO3^&9>fOF-!kp$`ja{ z(6H9?l8rfZtE~NwZs?XQUrrlH38tJ8zB2z=_@LORoaWWMdv^U;^Nar2C(7ZhSkWPv z#nK0iWA?6b7&B`APZh43)JIuVye#gf)qXAfk^|}XU+d;-)L0Jp~ z1Z=K{Zt4G=HUl!J3SBKn1og88-<(C06sxKQBn&Z$$vy)pAuxMLu@x?6w^hqGy7Aa7 zuY;4c_G>Nm!)Lk%U!169L%@w<0yG4Qv9L+1>Nwg5f2Lf%I3p9NAHR6`H?sK<+xZF@ zEq_FAeu|Fqm5m2LVF0E}cHn$jw7cnNni}+U-et`-!xTYlKZyeZuX~uyu#@dVbb&1}$5|VXtpj6M zOMm#_U`60OY%vejFG)b?F6GqMaG#gf08_cSGM~dol zduFzEP!2<{j!a*gMm`Z*6sYC|PEkU^kT8o}k*oTjkz*y@*V&U!M;NtnRGs*Vr__B} zszKvw$o7#UYmX@N{VBiN0>xZXLb$Ta````W@ z0P2iKI!8RrxGtBrw?8aq8r5pFiILoqi;a&41iPOHth~BIo1N9Xe7~5aUBhM2&PlKG>@`=yG7GRXnggp4c6j*oa7Z% zLZ9xgx92?>Xdwk3kDXi`xXgbK5@>rJ;7Dc`DX?{#T50yR4RHF%xK5H8u9`Zs31b^i z%~h|4`JyDrGIkGAbK2m&Uuh;IWq z)K6INJDL&$0vYm0>>p!+)glIB^t7z#O5NJ1Gcc0}3?Mc$UHvPg7;gt& zZr*ncBYVYMq(vbbqo7GthF=_$!uijm0y`ny>jg8|4CEh`5reQzet4A3C6zmdw$);t zsKCd@ZOCckksYgu+{tF*pKo9~__6q`u0&RAq{*N6Y~RyTDbHwR=oV0@+JI6e zmCVL8ZO*%Hohb^7n#K3L9D)nU%c^`oIwgu+WYp=ir&c~b`w^4FldV?0ME<_}TV$wq zc;S)lYCsB80~rDtC&|<8?PZy-2AS~O=zj}Dj>u`Hs2llW=wX{Ch17eHfxuoN@$`Jw znqYj!^BxRJYMzz4GZUzDFPfq|BpfzO@-Xdo0_bCHAsq#IrrL4xDlqrvTMI>LG(qE) zoxWVX6*gX20RdkBKB;1urF}>o=;yiWf)Eg zxq&;{NVv;lyE~sAF6qj-aDLxNxq3A<;M*5qNiG@9mvD^4?E8ero+hg78xMC@4TLW& zQf^9GvF)cjL!qF-Wr?=%Z9-91iL%xGR=ZSxpy^QWl!vF5cpj5f>?ClWoLFTQ=+MjBEnxN+A-g{xml=ouh_e)@a0fn@pCia zjCa*F;K5g}Rbsqi2Gw?#St??398To=TL);trvESnCW^93oJc3SbjbMrlW>@lLv+pHDKJMZX}Yt!s%I|J5yq#2x7G0czO*%H!G zX^YMyCm*%=Pr>sFiZFAn^Ht;iud6Hdv)#I-QmOn%BtfA$+J?1y^$P505K%vi@>>zki? zA|>Z!!rvGo^O;r?ds2?jDC?y};mKb@*DL0`5e0EADJ(jKK?eum9nM#Jx#mKv`RG+ARn6KR>$;pkU64jkbku zdFNEmg$Mq}CCYda^wP{ChsqWN|B$iUC7UZ3O|R}zt9oLUc;#U`cRoeDkDn%8__Zae_N_9=aJJez|b513CNW?4JC?Te)c&paR!|P31Oj2C=M2fvK9=_Gw$sacT7T2B z?|M52(Pa*JL4LMPFTKSm$aoFh@s5NiJVNqS^`=bXUZ;_ z67r9QWk=Qn8#S^rm+7|l_lLMi#sl}Hy~#!AfZNZT&K93OD1iyJ(toD5D=wR1x!?&d z`77hYn*ipOfU-~AyAso=&P4Y_@{C)8HAq-L8Wywe|BYC<5!8r0>XZ8ZLuaBZJLk2! zbLl)lgNrj-dK_fx`Utmu8Q6>ddaTl{*mHNk#Nj;(6obrx#YvwQeZeO1E;Kh;ax=cQ z>oxfl+xe|-vu!=LFJkHXW`<9fHdOS~K`;IAQ6*52B`-AeUOo#7Kz1Pu?+($)-2%5` zpw?M$Dqw>ZaIzb3zX;S^ey3-)v~?#C>0av)L#j5&duG?f?(HhQ#CJbB4VSkWKXb<5 zaqTk&m4ET;CZN$Qz2vlVmjvK@i#`j0y~u&W+pfX$nX>;AdHJ77P8PW6pi6)Hui^m5 zH#NAU1vD|UO!^6XKd(B}67j)nN z`5%!hRR2swL_~9zw8te|poc6+mUhVSMgOMH%*^B$7H+Y=^Ym%)%a^;*hr>9B|ElZq zhqUllRy~YCNmRfqxv1%r$C+Xx2}({@iCyYj4a+SuG70@?TitwA+hWL_R$cA%+$my3 zyr;wWn~A{(C#nBc*F7(^y%ROyzx7r*5^UGb@v}cyK!B)J^Y$*|7vPTo<-z<@KW&p# z1%9ao&DOJA)#`ZgdOzUlT5Of&+5Ooj&qdu&Oh4;pbHZnEA-`I6iuOD8c3*{^G2$7K zK5unkCH(5*g;y3!B4WF<`)flgU2?@n zd`)(6BBC{@#EL?Ut)IuvefglV4{>jf$3KK#>W8%!J1ka`UCTHkYxzOa&eBM;p6=d$ znIuo`n5i8gP+=pQ10KZZCtb|%B*zOcc-++6OphDDMzx%li2?=Q$yL0OR$qKGHooF6@8i_UyGee!9bOE5ucb;c9)!0~hTz?L79 z5NrnxlROj}n6LFpp1<#BsKBrJHmwKi->k*6 zcsZmnP2(|rgMA5Fmmw9l@ly%uDOH+{FcqHLR=DWg6bl2{TGa}UI9^k8Jy3U9Yua1P zdK5a;vXeFQW}CPl;QL)*z;`K=A6nXXKHvIU)P@CljZ>yoTZJvUSG;suRT(W+^Bz7U zdBUDFCcS2fZtOxO&xc>!s2ur(y;W;livJ{AFByK0x$|n>NOvo68?LHaG{5E4(nl=r z-NL+b+;-gcH+knU@ZHpN%(GfPcx;6=Y15bieu~#UT-xAW@SmUY1^!W8;f^oQnVDx*NxeoeBU(spiJMa#-Bt8AS) z_3N9qZc3XD2lYCs^|FB|0iT&RzW_+F)x-0Z$BqU83>1`fInBfN)K9Jar-ARr&R(tW zH1etaqvg3nk-t|f<@92=*avgc-Nqp831s-8l{s}>n3X!dQNBiL-0RYU*i*l0eK}bZ z*O}m!NDB}gL@}29dPIb%3+DCx+cJkrWXDX=K&6tB4xvLGuE3uHRDxCE?O%^_0=8FY?^(jP-g3No8{?HYt`Yv8&vPp7{+)H58NBYtL*L4y z_BYN&5I&3C(fHY{;IKy@6mR|siOtuj)mgN?ZY1IS z8Ckas!L`7tG=0C5f&6l}X zw*2ywMf`wOg1A`Kcy&=n?dq0%WmY5pv;AS=OCiHz0YF<1~+#0ga7;<|UcgQd?(**RmfrXGpfhHS`?~l0e^}DmE$VECz1jQ_4sL`ESVl)vY&%ttVq4oC65HmW%s`H^d^B>sc%h{z zXRxO2{ehdjnoDRfccEwK%n?3Tgq}Vhd+0M5BWUHX?NPKFHe7j0!6S`bZu$F9D{Pa{ z!4j0Y;;JPlZfiKG@PF^tYLtxigT!zD)r_Ai-z@(v6>GM&B?`&dY4~Mz$HKJ&_E6;I zZb;v;E`u;fSe9kzY@Rk5$jUX+|%H2v{TaWfDzIz z)+LJi)!K1ENhE`KD~z*~>@mCcJyZPpqivt{L-i(X*1f?dcro;*2v6PqtR>siDBu3_ zv;d(;&yyKM&mlC!M(2YS1~Q^ZcfyAmWKfT|n2USoj9{HHy}{Kr7A9aj;KtzbetXk& zd&iC#8K{2zvkPK>WXN%Ks^-!@JcjA>Otg#<`P#Z&U0ukxaoak{glb;--wk|TxQ64K z;>I;P56zZyrZHK4WbPVcb7c7GjoVDz-cKGAaAHwrll%v zMYppQ#ENcDn0v8B*oLgcDc$tRx4w(3VYpFiVB==dMwF;4Ac&$n?!!c+S)A|w=8!a` zpJ;L$lB`1^6t=1-Ul*BN6YH;t!kjR_XSr+vobfP=TbtKUkChmPeaS_(&Q#Ul@{5#= z>oP5S5!H-8=b0!I5Fsc7<;9WRsqJ1F*QPo1@~rj&xPHiRu@R&$>iT!vz0aZUJE3%( zjpW|?{eTS*Px@opV15g~tjb+3m}g49`tqWb7Al-;!+JhvIjq;E7OrM(@s{exYM3x)E;u<%aZetU- zSEC%RVsw9V^`Ef9eZwQ$|44lxl-Q;WZ=KJmbW0b<7jfAPT&e1+4e7tluyHwGZ5vqC zr+&YOPmUdn=h!T#TkX}Yu{}X@JQ57ss}4!$h^(^b%Cl4`MlpWv?DyB;qS}RRa~5(A z-CjF7Q|7}4t*qN_cAgfe+X0Rkm&8)H((~Tw$qjLCEfOnzB< zXF}}oIUvml6$AT7CDNc-T$Y1w71RIHQ~vnYKl_RFiAd~FC@y;OS%H-} z8#Hz5k-4XnM~Uy*cRpT2ElqPo*m_OA9imdsysk;+i9%i574>#fcaTB`2tmqjV>M_y~+Vt=ST}B$W zl5^q6r(#=37=|UQP8snfsKzC+N9Yo7H}_JW(UT|UJSx^Tiw`x>0$mmAck!|mhO6c$ ziH^@#!B~x~mgam<6Z!>P3)oq|pG$6ZUS@Fry$X7$n~lYnK9=-vE}i<8Zd~KVBMGC( zsJ$vkxRG9+?RnjSps|vz*Akm4_cRY^dTBmJ%Q@fq9VU2zuG$#e^yl;n1R!UJA;TKx z!g-f5g`ptLu#C>8#x|BGzA5{g@gEnDtm58iG}qLLpW&sCXxKYhF#k8h1KL8U^bc%j3pZ?oeqq;r$#eVaHG5T-`(u{Qznyrd#B5uwhtZBF9Z( zE8t%qhPSp9OPq4eHTds`_hS8AL|@-JK(LuFra48~5@E}(?pQ916PMqo#-^!u#C{@< zg`c_|@S3@DzQUm9R_3~|SNq!Y!>QTop+jIo##g#kHeA)VONtkY^jBkj^DyNNC0%S- zUD*&90*7^99Yq^0C`dXeC-rC8*dP<1B4|*jt0$tMB_vq*}a%VbTa_c zE!J$w2FAD)s0?R>#2KzfvX-G+`{k{zZ&S6V#^X_IdC0ZhWaQ?;P_1Q+-bga$uRpwG z`IPSYXw@ZHqHC~=g&z}_AP^F#sB$(RdS({16m{a&{eM4{7G+I8f5fQo3j>JN2%9>K zed#APH-w?ujr%Br>*f}rTMocIsaz-iA>b-g^G}8@6}c9}X~#Phv{MT}*Ak-r*nwXa zgRy--3&ZVKCL!ZNQ3eAc_eRXgIYnkm$l<~f*=q$)h;*w<9b=2%s#7hL-NAPz5qtVm$ogR&6{f)>cQd9W1NonR?nXad4+_!Ac^OZl}_pVT=fsFq=N0ACS zPfkxs**V+Ql;y_kXKXE|&g<+}A7oCnTFz#JN*`!j)qQjy${7nkC{eXE1w5>k2RJS0r~&F5{?mAX(RaHVFL}0zzA?Nr zi*I!Do%N&%2M~V5j|oGgFik;-zvnMj24TJPO-yRd)+dLo>BkFQrhd1bParHgFIMYS zArQNrY}f6YPV2Pc>6HBEMk$|Pi*nMf63_$Uu#VveMH%CPSt9-*utohiGo=&RV8X1A!X}vl2AJhT#3fDy(Ru&odfWI1+fW=kP=A;c4vi*rr=~@0@T}Vm4`& zhK|b$j#Wek=*AJrrTf1go}JWH&s+$~h?-A;T!m^^~ce~PnutPLJXM$4oL z>k8XH{r~-i85^dM;l-39ChGEP8$bUa+i8~BPX!`gbH0sLr+&@{b2;A|I64CV*ekDA z(D~)XFNL!EwvdZd=_9DOflOSs-#8>?RFfY|1g@9X&?=>+t^mQ+q8v(!SEA-Oo99cZ zp2ugFl)zktx}>ZA&%Ig`W&%>|`l%OJnmqr;R}vQPX>qw5w^Rs=A4@CJ;_k=W-S3+) z2-|uG@^ci(&!4w`G%4bX1izeDv97+OVpTER-Z?Hcbo$#GM8&cw(5>#gYq=Yhn_nNiy$3|h+k)2~AtmwWb8-r+Z!7Eemmo(F7d+9Es#&O)6>~MB1!AV zhKiQT{iof|+Vy-*+4piyd|@m~BF$phXECNH-5?E08h7>SNy9RnYv{dI{=a<{Mv#bH z-hUL;|N9?%NpMyMp467$9|H>@=AvEoaUP!93t;Hn!WqkA^2=QSGyL;z-3iBz4sWMS zwm}rLEt7uaJTGm~B>ghw*~j^m+pA6B?Z-i5LHi^`K!~Cdi=UjTD0dCF4BmRnxIiSX zdvRl-t$D5M7yl~JM^Sg3f~rh5Z96y0QfueQUiuHVj*vd9A#xe$L+)Xlj=o+*L2_mO zD-uf9MR`4KlU$d^5Z@KNR??AuK~so{=gcs7}qVyg5%ur8`HL-jC;Wg$B&& z=3)B)Q^u09vQPURgXh-QLF4FT!FJ6o;hnoPZ7(b4V@wxr2H*1jxaa3xU?%cWYdW7Q zXyStYZ6m2#h!^dwD3ItZ%S$lvU0-<-ay(PpFTx*x=Z)f@gH9&o(E~k;ohIELZKqy= zf}32nH7gya_McG&ci9HDP`Zoat>-=7D=vX9!3A^`@~&as(dB6IC|TsEf+(BSZ^%s5+mOF~ z-*-7X1z83V1;|p`zxf|IUJ3NQ5cG!B6>#&iInxk8dSE^K`Ae_}{YtKm-5Gva$rrJE%)e6~Jj=;)GrA;pv!~ zl#kJYO=zdcqU6FOUH`)%h2vIaE!|MR;pM$MR`yv;n0Z-!$j9E$N5d8Q*Mevv&Fkn6 zMW--xTf^cBp|J6(8ic;@umz`n2$VkdA8s=OZ1UHkHc+iBCOj3s@QGGXF!68D58H`W zL*RwaFDde}YSci<+PyAvf*14)|<8T|Msw#tFjz z>^`u6yDBA$edj#rf>U%E5dTm%ol%KIw6eYZ?l&}L?{gGa<5kIP;Xnz*2Sn* z?<2Ir@sc8BFzPiY02|WqEH?*{9#2Sr(D;oyr8xW^w|^laCO2GWfl~VRf3EWWFa0>t z<+LWO!tHN3&D%Wt<|%iJ7!GsO24YmF^^QeiT4zLk@_3$nNOFLl4jg~6nC-s03mO4B zjjZNbWhHWv-|BiQa!wkDKIXh>wVjzmuG@E#hb+x65&uuV$l({(N9i>#)?M0ssV z&W;s}|G%sb-$WFM48eQ~_*mrf?CV;@0_`3^qrj9Kl{~qo9*aB_I?DLFAM~{~=8&qY zzc%E4AFe_WrYI&^R>b?lY-`wsCScCw6vd3U+HKbtxi>UXdXAiPtNrBc)6EO#H21|F zGqe^q#c(IAd{!Hf-C(wWwkzxv|Dd!Y`jQ^wl~HoD&w@DNotuT0noD_i8_k6J=9T>| z*~Tv?hzXrtf}yY};nEw*=%z<9d&P~M$bbL|NR5rf?>=%xZBya&qWngP^MKa!%tOTa3yrkf}tN0|S*PTuHt`+FA3F=8CkReRvBjpj=<0^6uGg!}@052y~Y zzOZ_6F`6+*Ioktivh#gi%FzcdvvG}0w<38Y=f&h-5mzK8zB3&^7*~|85D~0TZ)Rju z?TYMXH^mX*#WKqHu(s6_3;MDNv#WDplvdTrANX`5GQofPInxY9ul$GqRiV)UHh)UU zT=}Qd$3{3zEv810$j$lrLS&JJYTRhiRU`v_Rgj#P&!<`*njX4TIKOr1C3dgtNsS71 z?1a>G$X}K#aqo}E8nfLqLgzGe1wy9$Isc%uM#Mg=mYS?P+9wuZqytQ%>X@v!M?A>PT^+>x5SM`O6e0&Ol!U<-L;)l$0wFyf}q`Nui%eOQl+6L zwcKbBA|?RTx|s23(3%mG=vWYIiQP}Hp&W`uDo}6kmlSV|cLko!(p|0*z4K zv>D$KtYuYFu2GPmLt=Dy`<6qv^>!Wif)uK$$OrlPpfK}iI;ak9;hk2?8jp@oIK#dc z9p`xfs{J!VN5;}(q#YJY4DJ?=P8mFDk>0b?k~zhvCAks-{7v@H+XlDQZa2T?MYd|G z`sOqfT0!;9iSIPMeUtKKLL7y4Vh#9of;R-#1rnTlEPJ5A3AxqYjF`~-#2D|u_v#p zTJ~FT`}(6F3#YYp4C2WM8*xI>;1xUzSA2t@iG6=OG&{q;Ro$lyygx2>Cd687CP7#@ zR-d#P#A`mjgm!Fgf2WwG|5x2HICgTx@Kms-TtW~{NbLO?s>!Z=7KJR!0V*A#p(>(I5F!#Vtj}yj|=~I-tnKr#i|Mp%Y>8d zuZiR{5m8MwjS76xO|)&XdgR&ff92VU5CuHOqZ~W#?!fr7X;{K8?6g|_v2mlcGS;vY z>?Y0j3H&k)C$@e+r-)6aG%) zIr3k31!*@d$RD4$%l2e&wV}}Vr?i8{0fFRyscW9mYL!dDUVjFQMJf%zCUVDrab=q%&>K%St6=Cad#pi@g_-D=2Z+fURb1Lu)e^E_{Efx6ZdbSXfdbR3dQ~h5s z;T)Y5hm0mdbkA7WKZhxV3(&h_v^hyK;R@}SMcc<*D*Q3PgU+#GT)xK8H}#&qlssM0ZWsPfivsffI0 zWHiB=t(MlQB^>m?|MR)%C$!}rgr@Mxg7I?i`%e`dr8}+xrf{lUD(r9EbDfg(q_TLq z@q|niz6h4yelVwCiKRwcTcTgx5HoF+`y}_TZshn;;IiV@e>Ijw^QTFlOJV?naq>Yr zD(zgPTMaugnMjgjSrcDblj^o7CUCClk`p{D8Gm&j+^kDm`Qw7%%RjLaoHW3;*WCDc z9t=9ULt8EOS|T@?#mhAEzl81oyEW7R<1|&&g)}2+6Ik?J^??;< zKsXvqT8@=-ON4@iH{m&>U6b`eWhbu3^t#J$0b%oNcO?oeTjZ<>`h+Ue3Qh%)-=c8| zgV}G|bk4e`SHcs;@HMs-+&T$0Elt^ej}GS>7Zh)xAeJsk9@TESmLum%hfKnlbkUdC z7A=6ML_bxkPW)Kx57QCwCn`CuSdYrgPGZ`9gR2xg8u(TA{ za=LyF4rlx>x3_YcKpv61e|Uf7JZUnc_f+r;8DqrUY`(0J>I727$1fYBwN*<-NP8Ny zJ6q)!P#>rqv{?L~$HQ}UT>f}R4sEC8w^g538!h5w)s364#0t!+B(h)>dno@<(q=(RZ3y3ISqlokl(g~r34nYy= zy#xpZsR2R=J&-`y5AXWcI(uJdulHN;`E~y4b%p0i?m6!<#~gFaIz{PU%`vTqAid%-bXaB%h9mv_uX26^s>P))W zF*|N!FDeYEe=h|$u!T=22N<(AD(GuooAO2PB7bZR>*RgkFLlj&T963LvAPtkB&YQ*>Z}pr@tZ_}2oy?7%v=wNDG6D^snfS;=UY+XI zD1*A{Z(aD2{zu~f?9xyI^6-U@miTMOlgEz~c>(*z;w$aYU?0{sxqz?t#*tdswW%j` zsbIapi7GJNmy~-3qlg8j(XHn~Qf!euGZ}{5pYWq4;pZjK5!}i_rp%AaA(Ajtb zR=9}7Rr%U4b*-%jV?EFdbMp*I+6+Lmc?AH((%9aA7jpmC_>4LUr2A{lmV>3h`E9u* zLPK+AUOcz~;7Z*VEqtLf_O)fMj=qIDV!jT)Z|?VHfi|BVMQlz9qdNQV;OA?FdKQvG zkyh>b{GxcY$>u_H?_j2z)btuhtxbjO>%|Jf`pfADL=Kt_Qzq;`I;d80z!B~KY9${G zm`aN*09XFm=Yyv&+_6L{cAzDa3v9oHVrdtzM>qiZa1`TjljZ`@T5a;&rN z+b{b%|EJ*p_}knN1d>=F$wanT3V@koYQ=!I?)$}dU=k~vpDzdNZ#h3-ptHIBCc`z1JoB~IG2X54V@3vt%G#&onNK5Cnd^(XM z`U*}c9v5iJrN=|fak{_INe0Qf&)olK`6(EgT+sr@u?5EI|D-vi(! zly=h|c;#Bb`SJ94!&U9~m#jB-4)2KE_z^go(LlFo!r|@e4bhkH2$Z#^j`}-SUYAX~ zSuIg;E7IG}0&sp=zgQ&EnJvcFft{bbs23h&*BP+yGVV8Ub~}BWc$Z;c6P<+i^)2ef z&~DNH6CiTpBygXnwfu1af?FZEt{c$X^nO${W%_m0;8=a`y3}}*=}Sat!(?MlpXAOk zKSbUkW%c3Ld+c#$ek~_Df_RHdZq@SCHwl~6WatXMR39ZM*eP#Ht7Ar(>^rwiz+a}lbs#>u|z^(SVh&=Y1Z2MCfGe6-fwu-BJcIW%^jjVEuzLUQ72GHZ|?=1VzE?u5K zKrqQLnD&Jp=l{%3+2j0wc78-L;Q0za(bFTTgYBfrXYb_SL(kGLT#zdoojxt zDdN+d&z?mq96Z!|0v})HC>dE0mP6ECz8_^qt+#?350%ErqH}5wv9z;&a(VC6>&vU1 z*6)5M?z}hd$EE`yCxXvBf|f(R8f8F_04~h!Gk9O zA2@juac*@|3Fsvi^pyiXP`tnq6)6+B_|GD6{3h^8;IXZ4C5B(PM3bJ-n^FG2?|@5# zb;SK&bMk+ez~=*jzh4`~ZHr?ASQt%M95BWsatyLReEIOtfL%Kq@JZu8(`?xQXx_JK zFL60|@))X;R@JIb|C5N|AsyrJL&`&$>L|cTZ>0h-Rtx`sAl*Oskrk_s6BOI7Y{d(j z4PP6Bip(?fT*vQgYgT^eAAHUUnpyxR1guTBjn}>hNGqLmYhbgLIt_(Hj2|A>m2v#> zmNw^AnZ+ZV`@x>8RKQ+oinKj-x|DaxhpeI$lDkV8USL!Ps^&qBm~yXZZ>~V?ocLJ0&((FUk`A4-M6sT80PkzziMSi!(bZP?~gxRYU-mp ze~pP*SQ^NthV1`OIr0BID2BJAzJ2?qqGloD`x1C}(@DI87>%pYA2_pF{)-#@MwNE` z_oe@nCvY~r+kwyht1um3){G9=pI^!oes^4#MhkLRa+A^yx@8{Dl0IlZ`iLX)@_9SP z&audU`2WHY2uyE8{=Yc_`t#SLV)%5oS{XZKx{yix-&|M6soU(XufX~P}zwDGmF6-N5J{`?OIU?k4yQuy|3 z9L)Q~KHX+cxAg*Xd{kV!2tYOe-8*35iu|7+xG2hT-A9F`q=KIUgBv@Bz}C56$7Lq3 z1h3NnP-yipRs{@X1zSe{G{d{Wd$JWp<1bFTMFNR>vO;+L(*M75mo1?wrNTG9;emC5 zIKtS7I!QRlU`#LdD>@+IMzkBuPx|5pBQ}3bfjCr#1cPOs(F<2TeT{3alv!ZKv~sc&?%!lJ3{ zdiBhxTAv9>r+%}QQEf!Kbro8(Tu!D`+rqzf;Hx?oH0(R0euGwLEgSp7J?kovJ(Qlt z$uM_x(sUWDoV4T$d6`prP=I$PMFjWalxR2AUT-Pf$J0(8W??Dc?p_`zwv8Kg;rN83 zYr$S!8gTq`L>=l7O9xJ?HyfNrJYnsP#&^SVj}8mHjT+@AU#~rjOr)T&HdK^kSLWXX zXDeX#cduBoPfZPoxAkL4|OudoFBa;VX)4wk-sNc7&ERdJjj+=6xC0Q*F{2;8O}VRq9}rNd$$q6=WCW+!pGlhS>_hGcSu| zvUmh+LC39r68d(X@5d+nnXc_SSG{%3)#L(14+IMz<5cxaf%!UKD1b`qT6s}Iq%^~l z=Rhw6!NsLa&*jE)T&qs=3sa^qi2BcI`i@+ugt#}C5kGryH%ZzeQ!FFGDAFL05ces3 z2S9+BFP%25gE=QT`d8YDO-p5@@{=1V@Zu@hixP-yiLB@AF7Y{=ov>Z!&&ZR1!qmfZ z>eW@xI?n9IhiW6pcIAzurWPf&!ii94O-HHyfF6g6SQ%2++_Z31ZLZi8x6TgniauwW zqKSefiwcy`L&QfybBxWqhZN?enlGL#yd~oTS{g=Heb1Qo{J@QFIrATLk@FT?6dma> zkAr>+LnC6(68(FZq=%LqgFUHnmm2dh4Jr0Rq+SmVc=&Snx|Ge8M619ydhvYTqiy3` zA*p@r;Y4%k;Hj~~5)ZhENRhsja@U6it_RADpI^_qv_*QoSzZ5)_Uiq_b@lo-mqMtD zs$2&VmB2eus~;H;?Qw?Ic;Ns6@kA&gi&Q!xa9Q}KWzck26J4^`>ZXPpsok(@i{LG3 z#)FINc*Tb{?|S|4Z!@~03jj2JIPUSz9m7kf_Uk|m@(D=mwjwo#lXBTM9r-yNjyc{; zg0%K5=mP=CKzTj(B!Mq;&MUW$5*xLtBCeUn!=*_LA~mp#X8|o#%8-EXPu<^XZmJ3e ziMqyE(UHV-iJ2-x?+a0#xuQ;9l2;xMHIEi5n9YJyi{#Vp4QX6$quqy@z4KuqRQa_K z*oB79lBpU+$y)UlsLSHPFvkJS>>E`j&Ow%WaGSz%=$rNRjm!Jvx<52UPQG>wLS?*% zZ#pR?C`4apkuC}Sbmg$FO&+AEoWhjG{Tg@J;s_g}jTRpsKAfJjs%991ZPq*4!jNd@k7V7-`7N{bBTYyr${CL+{M@se%NUzeOO5P+VV&4qW zidvPs%^v4Xm|4@9@7>KKAvxHb5y24{yXr?Wc?Un z>rJ9i6tlyti?kt~OmtGhN@YS`y;KHITvCrUMO!c^tJK4>E|g9MlS?o4{@CRW zC=TD6TkPcIH7~efh8EB8+Tqi2$Y?a)_$i8(z0r((8o|Ixn$XH?pZCOA?IEXfG>~qy zN>RaY4KBdMROJmzHG92U?XR?NDM)}GN_p+u_?mA!NI`ZFZp-Dq@Wb)r%#D zsrwakFUr-$g1zD4lR5e-QV@Z+hzGHD=!BKRXDW;nuh~6T_I{(Yek8g|?nwODyG^{e z>Th6e+!El-1QBM@cV>m}`k3W~Ub_2g#U^9Ri9GmpS-(E3!M;%1p1F2|(d^j~d1QY7 zBz($l*AwXPCYA@*Gv!>6uYfzfyMx*76sYKRER5aJa~makP=X$?sv%eGQDi;B68OUP zXhRO~iNboLF9-PrXVY}QM~TCAjmV>jn?|lvm`OJz*`ZH5cGaNH44YDGtWS_NKQ);O z)mzsXtaq{oznqZ#)17QrHKM`cSYwj+$(p@d_Q=3M1RT~UHlo~>^bu$&OYwR%_-iOa zzB34DH0&r9KnpoZ{zQL6``wYcFkv+vLLgOtD0BAkgTxuhA=zt+f^Bs66+MANgsnBL zqmANpHPnVL=kWo*UUo>C+V> z&<-?k3`4b$o14>qAorLAcX+M;FwCUp5g<%_NxpJVK&#){kr04nG%oU6Q=uI1<2V(07=2bWON}pU=@&h^d7hT9+=dDwn|fopagq z5pF+Qig)|NS(9#JL)!FoA|;u2F=b%%*QBKJz<2$mX)#J`qCy>NyGf;5Iejw=^~|qN z!f*VyI7(!VJOcpbdE@iqsowk1R8#Bun*Ie&$q_PMR%}w=#|)D8JsUL;Y?Qxc805V} zGQb$1N#El1I=?5YRN{=k&#Bf&-m0bF?yV^HW&eV8w*PaqzQHm$VI0<#ObPoui3pSN zz9kiC0nAGK8P{^=bDD2Y6rwxLb~(SEZT*xmP1YW4F$SCP#l2vi-I08Ll$r@g)&8!7 zOdgC>?RRCb9#Vi)?HLl%w9-`D>oGR$p9EQ4)+PU<`TG`?;^;Ax!j5aGy9_WXo_!Tl zXMU9(Cmb{BRr-C%P;bVgT5lV~SCp2q`PA z-RhCj?x+iV;~$GrlCj`M!c@9`2gO|<^N9To`cGykSRlbAiVkP|$FN0df(t)oEN)d2 z4A)1bVI|;PW`q=rUH?P(GQ&7JduKa6)wI5YU#Xc-G;Vb;_flS zCy!E|gnVv65MGBgIn{)2mK-dpaulZp_$$cNkag1Q5krl2_Df`yV$6ySyIPH09At9y zzzlRd0NtMGwXUsGhJ42-et9jS{{t7LS9el^;)jKu(#ZStGk5mDibKL=GwcS0ZKs;J zm}qJ7&33eX<1IwEdnMsdUHzKf8F(hc^a`jK11w1wUW*diE}D5luqqq#C|q}DUxQI* zcOh>b2lgaIIbFrStxk*EQTIVpjtMUjaQ0QoOjT1=VV(EnYMuN(w-``G$p2`;Y!{p( zd(gozAuzFt_s2vQOrF%tiH&Z7Ch!e(-w{U+AG<+XG7Q7p&-lo?432*14*W--`v!Ji z8nAD{=C-X>92#|;7?Qg8b#XUd|6RI8pxJ?7i>hd(T`!~e+OnHbG9E+Xog_GtzrB&v z$dix*iuyR~cXfz$1*fUqvH^)QgUsHbTys zeZQZCf7|OG?o0l{b!B2kHE$%s3p+aa@Vq2C{D1)^_vlIW9Ibl*uoLdRoN2>cJqfF6 z5WFTfSvK>V)U<*$=vz=6TJ{;Bnzs5G*LV)%0~&Enm8er3T(#Xynk4p>fYDwioE#-5 zb@5P88dF-C{0K+_mH$ct9$UkjLhh2pxS3CqH9cjVW|b@k_el8YwEF1l3IlYr*>WWw zA;|2O5X-=`K8_>jdHkGBTRh${vW279$vwFta3L@ex82l>s-4p!!bcP5h}B{PLES^? zNF7_CF#YJ2GU%__Nc#S7s_ZYvRkmwyk=hOBbOVws=8NKWMxYtpz~}*OcUo&amGjhM zqA=O$h9|P%bIA=CrVuDkQ~b5jDP5D{2hKTPRUCMBWOOo(1U;dYpQo#hTP?4=-Q>Dt zlxWzp&d;aSk&=3g<yYT6LkLA1A{i^V$Pgc#5C` zO3*t`orgM;ZuRLlnKe@0F#RGuh(&`+5wTcM6k;UNBx50BcP9t-Wz&nz2^f*e{{rtW zIRy$YykWzn2nna=`3r5n|IB^pmB#b{FKbL|$aIKz8_ow9A_>KM>}fCYcX=}j=WT#0 zWU5C<>*(~^6v$n2W4gik$JkRG`qd)~Q>WRB3+x0opuPh-9Rm zlup{&Mtr@77ne54$W3L~zFKTmWn1OCQ{~e3E~5T5@y57=zcHp`2BxLPM)}57GDjyKS_V`7Z5}`B$PlP9#wd#hW`GoE0 zPn-0nF3S3kA+)#JcwG)l8I18YL+Yw*a_1Y+oUCh3y+>8K7A168s-!dAa|GJoT~D`H z0%DIFem=qe2X7?+n#Pu-HCEYql7kLQ+tcc?orD20UJA}>UHKLs8ayRpG;XZC({aU5 zXxh0qY)g>01C1gQ#s(W~+IBjQ5rBaKgpjYm=3NN7nOjH3#i?v6e~8*k=#^ha>K(xtQj?csIp9*a`h{&4BBa$ z=}8oP90WEMKg#_#Qdq11F5QG)1}7ITv8N3-htf$JC)?8CLNpe%nL3~+I`oL68HDAl z$sAkTc&2oX*k{n7SOssHY5kArOf2)a&;|e%s8>Gs%gBwL_dGz;S67@wD9$~WS>@H) zaedG5%dXYi(ddSDh^Wy9=!H9L8^T^6oF<~@gRK1%!!ZrcY$OJu;&U*hPE`ZX9lhl9 zi`g?(pF%aGe&!k-ZYr3pGM=i7ilaL;dGBK~s>iJrx6DzMhY@y?J219#7RtG(W>SG~&J&?I7^&F~}tY0Ubl@?t`5m#j%M%KEoRS(nGG-D?47=w851y6^^V z=V5$Kx>M$i~zo<5mwN|V*h1w zxJdqf5jGMb#9ka|Z4SORSQF~4%R;>_4DEIvhutUCusVL) zZE(S*VTBuP0=xVZB3_sH%JBKfsQ#)um+D+f`KZ`NNW0b6jojv-h<{MzGJ^Q+{!Yz8 z9ra3$ImcjlkI=757=^})^^TAe)n4b^dOYpcL+z2u=Biu%nHZL(hdwT=TAQs|Os`LE z)EzTO8I+h;fd~sJp_JgJ6&4n%6eJV&@oiB5luVjT`XN9MRj*ZXMgV;O?J4!J_6vyB z+2nf@?m`nAo{t9BEK_+W45JY7Knk!5(^~}Sy z*3a87RPT6JoZnkf$}`f?Kec$xu`a@Bpsdd~)^S}An1l9(d}r#XE&4_|T!Cl(a3hlK z??OTua;G6Q80UoM?JsbACkt;vvED-txue+Ix94MWE}SoWoafT?88j z^$l2xZsSJ0E<3;PPz)C1Itu

;`arX$fO(N*1UWRDSh*sDK+UZL97*D9v@)%9{0 z*wEj}`~C{d)CjdB6fPnch`Dd!O$J0TD#hv4%#a4pB&wxDO(*^%b-F^+8oXdyJ@eKZ z1!CPGDp$$Ws;8XZD%MHxrQp_Sx5yIzC0o6j|uCTKn-S04FPOzyW#wBXcJBgiV zbW#PaZcTn@g+Y7+UN%RI-ly|jT8N%^dd;Fi?SSsR(Y)vbyFjrmf9bjtj%uWU6LR-(pgPGgU+8gI&KdxDuyQGKWJ)Ge zQ$U+Qv6bUn{<+?^e;wh)JgDh~UF@xPVV(6%a>12@<0OYUx`=xB+TLSwT?Hk&Ld=dF zWJH$b?6*2mo-S^4H3r`D(6)-Y#il1Ruobya^4Fe%x!tMDpwTJfQt4mUD%$#SeI)Q{ zq&H4!)YK&pBj+JXi`^PJ%8)`n+y2x6$~DFe5Up;5Umjl=1AMLyC@SIs^J4`Pt4(cX|ZkcL{n-U#XU} zN@mUNq}D{qV4z(=BY}mOAjyXvd{#*X_9jT(KhHO@5T!=%M|K9ZMnzAdKD;j;^!MO0 z7$)t*O$F6@Q9)5mg4I6O-{6y!wH(fzScX>n7cZ6Vj5hY3>+IKag-imDS%z@J19N-`1cRn#1VX>6rsjjuj$bG;wu=s85^H*$y=8OJ3wZ}=`e?|cUq zIwx+F^g3EIdqfX$U{?+=HtV<=;A?V$2xFg`+{bP)~SR= z8eb}_y=QHmoDuCo2Lt0{89ShrrB?e}H_@AQsy^AiliPSeYDyr`k|x-Qe~eafC%^D$ zGQKLS;b?!EcWtNF#}K)tnDlWg_-BvP!D;~0mBJyj-aK?MhQv|R8O!&Cx$F;m?Yk5t&Pq7V-p^x^cb4T|nvW^j+)9JF@fiP0N$thx557pFMw35uNw^A%{Rcg>Q+WpSFtvk>G*AwQ>1g(l@`BJPV3 z(Wg@DX95`kr5^w_efuOEtB3qUUi8q;B4ZF(on_+keaF5SReuRakYN&Wn?c8KI`b&) zu2b6E0qo?_5vg1g^4pbLGl_B}4KzoO(q3)#=RndN=6=rrH2B|WmUy;~+iy+$?gzlc z#ep+h1F+%w;jyyPX9{GHU|vnv;OuOmMIFp82q?1)vCA)f`Tq7bZyo1ptbd>G!=MAo z{%(c-t5uWga)#x&${QO4T@oFtTWGpsW^Q2po-H{cv0DG-h0EA2_~UTNzJr=qHi?A( z+~8JE|MSl|b?z7y1>3VyK)l^qv6^aEq)eC&_fI-Hw2#Sj{oq#r=9_E379_NMmp+0s zULoReoX$r4VfINMrqaj0B$1DwvQS)7=<3 zy9E0B@sqwg=K=Nc0J9dboKzpG2j3hNFUf(TWGkTg3 z?=&sQ9(wfa-M)RiP_&ag_T)7=USkkibFt$Tx9!gu`UX7PQX}I$?b%=T8_4Hn`huhO z$ccg3W59Sf(7a`3ll*5}FFoiLrO5bbTo*j<1CRk4$o&{MY=zmxc&+9=C+wP@U>=lq zCr<;k0>B9r> z^=X){7S#}UR$mu3@ut?w*X;Y65hb)f=37YP|GJ-ug;scEj$XLAnZ8*%4X0T2sxV?4 zEN0L?q;T&mrkmJh)A`OqZkXoWoxIU5eVxj94=6UQ_6&U)h zTkf1*yb;IZu7R2qN`75MH~j_e5Azv}V2?1tf7(v#tV*hxeI*OF&~H#nCvDE;J=H%Jy#Unp4Eokz?E|M`dH?_8@er{{_H)x)F@q+-n()p3e z{eb^u1YUxgsp;c_1b-sSlgWU|-N$~tDl$z%d=s$ZAuDm_^l=iNiYqCxteJjmN3+oj z8l>TTFnj#3a}CLS8~WlHwMT)Wf_bGofTe9bMwX^H}ooF~}73E!E< z20ZG2hEJkYS}q1U0t|W`wMV1;NNZzYPC<@*U?qX>U1jt8; z;PqWxh52>9ilhf7jh+V9iei0s4e~6gTNjA+T~A> zV||W#!_H5S*gmN2j_+{r9!^7VxGQ;hvm^_aehri4l$&-^t%?vENQ60u-Fq;ysg|rs zJ7bg&kyw*)DD2g$8VLrSK8h}2ZcQ9y_)jl@;*LHjs8#2{0YkrmzB8#QhgfhlA=?_g0lW{bZMF*@|o5WSGVje!FL*ObG7&`Cd5n zhzLf*p2(%9rsG=?`4@Wx<}2V*_fDlBqI;#3L5RqC0})|;B_)Ac?S;{ zs%BogHw2kSkizOxxnG83G~!ti{}DP?Hpyt6UF;Wj)E=O=cH7){$WJyOYB4v>)Zvacg+Dg7yM=dwM zlKLgL%EglAyt{dKz|sj`z^wK6vncKCK-3t{r5MC3@yH|Ae5*KuAF=@gWFL6>BpE4^ zbuuEXUNSl;(Y~qmEL)URE<3Wxe#rhalRD@L6aTRa2wXNC=(>2E?LGBNw9AWl@Fq{b z6G10NwsT%)jie!OEp&1qh@)=qg z5z+pkOO&%OlrO}5RX7)YkklYa;Ad$M0_gVTR~2uWO!kHECIO5upZq{x?IfVEIVuiF z7b}N1C7$+~EBq99U;pXil;qv!tIPDX$xj5=reyuNtSrUO$LhIOP1n6zLP%S48=5(e`;WquXgiI zmhi@b)8*+8f>$BBBwET=Yz9dU2V^&kiFHNCaE_0#fCU*CP z*rk7jYF?3lFsAW!&9i}ld78Z7fth)Hs)>C03_Pk>&$RKr?w&TuIZqN5=SUyfi6SSS zSLo67-gDIosws!u_m$`dDaEsX0x(~LjMFv)C;5AX`TZiqF`4m{6px`J-2Q%J)Aln- z&$eO8+enVo4rmH2pXnTqo2X5ZGEJ2^b)4#4+Vb2lj0WH7-)Hag#KI$w@TeZ*doQO! zzC?A=)O?#{zR3JqQ-dbd!rQ1BGf81N=beL*T-fc~#SfRs z!;y+Thl_yLWFYNIX0M!yO~wmH8)4;mmZcg`Tm+qN-sFI=RoFdPoqD==6B0%5*S%84 zQG8z_wXHHF#mnl$LLIZf*s8RkpWrX82e_}H{)fbYNwNw{_yunv8L>mAf$2+*We29M zZpJl)0Rw}nY0v8$fjAg9Ab_Bt)w-P*wOQ;(r*~hXNt=g4PyCNBs}i$H%BwtAoMowh z^NTH64iY}NcM6ba)gKKK5}l>>SB8?_B40%adZ(EU%^Lfz{EYiy;}`}k%9GD|oUXqa z>ub?=m~D%pq$!eKQpMak>#6-Y(Ua2!(MRZId3;P%rH+!ymiTR$z~#TUN#}tcD`UU7 zo0^;r-fO7CjXRy5eZ_;IoAr#ZZH~sks3%#Nra*o(f>Kt6x9mMzRsBH~KVEZ|{P@ZT zEcrTBh+ThG!~6{#M;g)laQ%2--HZ1Ce<=TzR8m;O*ihHst9At$t=5aU=ElaJ;OI~# zHhvm}tX=<{-2R@lU7?R1Jv&*_DCQq~0Gp-Zc%I^CtH6>XY=I(=ymL-ZDOGGTZR!Xd zb@?1>RY`j`W!VL8RmAEjlWafIQnG4MIF){0q_A4DHzC@wvF}}QrBS{90g0~JGgaKD z_=hw6H(cwi%l!!mF9KupbA3o91OdbA#Tw8dk@8M~(G5293H^ZkEb8GPol;N@909+~ z?T}ar_Dv)Q zBIqW>_X;Uag$(1+1s^yMF^Mq}k{?(qg)$zBc+jJ9M@O!ToMNDY1!ayu1wXtXnf{_` zV&Ji0qy3gw)rP_-C|^)- zw^(%wnLvuA!nfJ;S^L0x_mgdgxF}9d=Ta*s&U4uA>fIVT$v?=plHj&=#yjA*ThkN& zRJ1#43q0MS-Y4)(WTm2D? zWbpDaM{io=u7yKrJ=<^&RzfB=o&#cgPG6>U$Cm{5PaOf=T_6rq#I!VnmG5i9NtDx$ z!Wt&_M`+TahHYI?Lf)*;sYd(V(PGw3YF`myM+w1EW_(5IuR3steR^%DtXyHg#(3($ zcpvxv{;Eh0CJ*%g)<7kKSboa%^(SQIwv`hAu+~12ed5`_UZa=kQ zId0T@*o!z;^$X92mda$A-q=3bu>6(!C_4+#_>kcE@DlNUZ6)j$jVp+rzccANx z!(p!ibK`*SH*{gSQL2t|B>UOn2lI7P^so|XliR_2A9&yR&*5}DJ-~9WS{m8c0;b*t zipwq=J*gUbPlvQ{VPat(I`Y0QZ6AV>4q_r`)hzrJr|?Wb($X6qn}SYiIR+T>pcxod6efX(i<+t>gRU`@DO3v013P zCflSpWf*U2@LpMN_U}})p2~{RKXJW+gKR=BtBWkX`lmjy{~odYUcf*oG-S{bF_sDO z!9a)NG3IsWDel!<3v1_@8q6>xp@0JJvk4zbJDsVwR2UblagkhVe&<$vziWM7ukzQp zLygY8gsKu)lpe@9klLN7`sHEeg08U!m|fOw%Z&?w(R7n#y7Bs&w->6X-r2le;1Cci zo%QN|T!%8wsQM-)aYgn)JG!H}mh@nx3%x3B9$-c z5sbuf?`22;9fEfgb-yN!GH)ZO4~?t!aeMYZ2vBy9xFVu;VE~^`*WJkZR{S=Cv)Ncx z;xM(?I_kghx5R|@xromA+^eW$ntpznPZo$ zoU?CLaf|P(VnYPSim?|CQLWUV$wDCN6}Pg;E+}LX<9ES>o&d|e3DwXB{jyv z&E_z5D?XX;fnAs?Atjsh_clrFuO@`$c-wzOFGQ3QDEETLG@fN;Zv(hJC(TJgL87UW zYoe%y0WJgSzUjm*!=f%IR2NusAA9d6MuYwy%q>KM)A zrcDuGrV~}pIN^grGsmzm`;shY3GeyU=9zR} z8lQ@t5O2u8Vaude9~bR7a=0@y397OvNltIFw}ZIMtw^jB;o(KoDl%WHJ4-aViy;f{ z@Hbq=*1-M!@eeS7&=H!FH$ARkm+l zE&DT_hQ3f!9%Yvp`Iti`;56#hTbIVOg28Ce&Vj!Lvo$>>uG3SpiSt!nI{_oRNc#r)nMcX`r_nqf8M%~I z{;kV2jlI{mJuHS?0hMPj3782Pb8q1={;~*k3r$O*9nP5(PJ?BO&Ny=$A?0emG1sDL z2rlM`;+57t)qtT!;2$`$M~y<=5p}Uz7aner=(oO|XX+^t=e#uOTo#yvo?Be3-qYS1 zPW<#G*-L4Vjo^7hw{#r%5?M~K(sXn0_;pNj+tJ^EzbervvYH;Niy-ibbb71;Tch02 zJ&L4jnY{}yhE%rvS@Eu~CA$v>+%tN`S_pFefX;8rBxH)THj>)+gJwfIjrx1qF%Yju z*yKg7vKV^!h#xDB+R7t6uVwyq59jr)Qg+O7PWf|f3d{&RYre0H^J918<%aE)gX-mN za;|UZM^arb$(ZaML*El4RcQ&wUoFs*9Sq9zNv@|K#HV?64?{DnmjjTqz+y)B@C#^< z`W=YGM4-4<)`tATt;x!O$iE$fixL4M_}Z4`N9Kz{&^$F^lui{!`; zNKPdOTWz|0*e{z}3ot|PSTM!epApm@Ipk?#>*;J>d673)m%WS$2{(aLMI7Pm~RBVfU***xevn8Lq(x|_j zZeFDdw5BofygXE3oM{!LnvRX0{(MF2K3kjI;=|!yHg>?OsUch!7zuj;hQGEY+%6;h zxw!Q!B1?PRqmaQ{F8tRE#I#=kzG2&sdu4C8S2$_wr;Ld>vBKh7oqXL;;ges?E_ryoUzPM@R|eiUmBD~F~18wRYdgE?k~TCn&m??pf~1`bf5Qg z33}xDV!?B2>Y2m%i_5qV(5fUp35|*$qsJ?<=vUr*6Ni+$axI|x!}Fa9Dkq+7VF>5cMb{~6wjI(>+h z5h4Q9-_DH2q7GkSb;0tEvmNq1j(P7I?*AOMegDSG!p~>&`Zbi>r_+A+Uyh_}dXud` zv6gGW27AT+-53a9g)Op7z3F#wN1Vl?tc94w)9QXutZq}-w%@mB~7w}+0S|n zMMiI9j$d9@)TtyuJO~bqB|egG$f~$)lab_9b;D}&twI{AH?+~o5v)Z;*bb?u`7ida z6+B~HAQ%efSjUQPj_k;V^vb54+L(Bhi4OXW=|Af4l6i)nU=4Ay)uhfYK-3mm`PFhF ze1HD%mLxH}8fh3Q%C<)Ne*C>!Pyg7sR)No13=b?jiSHw8Z~K;PhH}-irx{mW=6&j-u(c zG5yLdx_?*sANPUa-lkx7pz^P38#g`e77S<{Qz}?=G)|}pKEg@<$UE5ITu5QH)F{{B z$!_~~lBJ2TOLQ&AyOf`^*EKQa7#6}*-O0PR7g7tgnjjN4UlDDzk-B9m`6qj8ETg_(X3%!S%b+z(B(`8!cxwWFJ zMmjsXczx8Bc`9hvxTMret?Q+HL43m~B%{bhP*61Z7k@~CZr z@$dR%CxR;gk0$EsxTx(#nvLtUXLy*sfqXpY6kqo9%;P&5!7JJP{9hBI6!li}=gYn4 zV{+ZQOKx>|-RWc%yvJ}uy5qV+qJV!Smsl#~()VJ9idOKDedPp18LSj=zg z&3#RA8}zagfbRZln_ckay+4hr{D%1%m!EZ~CridV{w;UvRt)@L*XyBftsgIj_p#PT ze*t3YxP}8$lHn@iLz;GcXBu*d8Av2{z^ zT9R>@1u&D}>ST3`xgGpp?s8v*USz!acK7AYyNYgZ<=5r>h695HcC7k0wm$_h>}vs_ z`Odr#q^aAiIZ*aaCN9;6y)P`_tGWDwjNI*{1;53M-p;A+35OTGF|B9+k?qat%TEQs zLVsN7Zbn@L-b+k5+85Z@7Tc8*d;lAKSrI581=4vD7}b zrnm3s{|~#nUzf){Hy!^n<9lxXOu!}fHX=zt6&D%qQ|H4avRr=UwkO6$YJ(iJ(Qxe_ zJ?$ICcQmZiWYM1@*O-ErfNY`a{QEMnnfKUhp~9Em%4Gxi8qc)P|L<@3pS}tI&A%z8 z9GhxXaI)DJsqx77dR;C0bb{t45LJ}Bfa%&aB@+2h9QCq*Nk4A*)j8E;*$4am-zvc2 zJ2q`oT@6hzc&ixD?VF8Q92t1LEL0Q(B|FCuWuonQh;FKBzmUgIhlNQ6E&ygH zdnUim-hA^01+0Azie6GcOjVh29XV9#%I4+Edc*RTY@>qt-RyY+r>M$qH$i#=tb#Z1 zF!KJcd>@n|tM$Jqd+)HOwrzb_MMP1AEg}NaRJww6=^!G~rAy!RPNasKh=^bTl-{G# zq)V?MA|f561_(*$p@jee0))V~IOp8+-F<)O?z{INK0L;UxYnF=jPZ_lj`0pDj{7%9 zwXP-Mnrgoi_x##L#O!u>yI}A2tOhy$AA0b#=q zh}~U4OcU4>OvG<PTK~7H_bH!jh=A0m?_PlgI2iZOv$;%X4?N`m#lxAnPM~)3%$*wCd{UwvxpzR zm6By@Wi9zB@Xwoe{;-N%?^8|$V)EcwJ19V)Wwp=jCLr&3LU)}z-8T=oJGMsU!{ZwF zJ77tWdin*cXVS*!R(0;33Ei2D3$;b64Iw91u z5wZ(k6T{MTj+@Dv=#s~HP2ypv!!?&u=U=Pa;2FnP}DCD@6z7wm+E=w8^L*~$i@(zL~8(4Q26 z{Bi#Pr#*Ed>@H0}uVI09y9osSQz`%}8gKz>?ge+yH$#B-V%;4fjfr?Qhk;KHX7#oy zH*^)bOZ!W?m=fNYwKK zs0#IS@AmX*nL0&$wVC$C)M)pA5~0J`cLj4wL$x2KP|exL$k4xI*FGn1!m`1_xAzec zbVZw9FaehRn0m{O>u3nq@?n29pj!!ov|cPlao=((-SJ-jS}Y6WTdgIe&|oqIwWQW} z!*CcsuP0{Kh?cr=C7|f3;#w@&8l$?LT_o#aj9hI9?coX`T#pIKb zzFCE1oIH6{e&3&9+t6ToD`-C6B;cBi41;r z8qT>m&h{~^rrM+K?|Kn*q23)?o)6b6J( zBrFf83chqENQS|tG0=8@)vnP82x@$@UA06VFfd8Etp}rO#Lk2oa>zJ!pD01-hMJE4}Lj9YZc?k#EDjM82SQ7DOcvUXk|5=h--4+}g2jgQVQsrs_h0mrq& zFE=w%pEsFgVW0JIX=p#*;N(H$v>xdtu~>i)1P2S)!cYG%LH>^p_n$6MS@j`kV zi~J=KPE;~}5I7hXq*Kc_Tbl38(v{cz$ zJ%2aexW1>ikIkwdDK2C6TgVHhEP(Roi7MQQI$S*$IfY72E2?`TG(+B}&nf78uQO^U zr^OZ{o%UNatTHIdQZ&H45opy~*h4_qaSL0sQwu(N#Ne%z+8CfEO8oC&*wvE&+Gv%Y zBNYv(@lLXrTmsa10gTh)#F{_P$d@+=Kdx$Plr9s*T@^`O3?W3x3E#KrFMTT5K{E@T zsh#|+J(64sa5^**#CNhvZYFp42GrD$Ox2g1J4OnUUzG1}t?ESqS^`ZMMyb5g9-j5MBvxBv8@q z0rG;i-zm59We?%`n(}|n1N?C)rExl8m_W{+95qrewoC1La(k$34LFonQRSA{b9ThDC5^lShrsT&FXbj6P;f*Jvki~#odO``N; zyQ4!N)wpvlj!gvUhKWAhPVk-GzT32YqeI=6vRdk&F4EAD}#=HC7?qfZabLc^ed zSN#f;iF*u`fLQgnVQUvmB)N~UCL12rd~41AJ_l%c1S{; zx)C=7H)uaJA5o6CYn}luYHU$AZXic&VEs+KWF^ub#@SHgx2pB$c+vRX~H=LB;y#zx}oHo{E zPB}16nEy36SLrv*A(I&T{j$y85Fs}Z@C}3x_3lPId7(mRvfH26sN8dF=kJ3;ccB8yX=K+y9g(YXV^t#Yz@m-us%rq%nc zg+yZ0L@Y=*bj5XJe0Us`i_Lwo&z+>k*9Ctd%bsYqKtsQRrY^#6@DxMQe0r7>CS}X% zQVg)CTq-8b0i~cRHl4?iHy3)aV+9q5L7gC5f9!b`%tL_5&`{u^dlg$Tij3O9bHUw= zw8c9R+7?`@DJ2OP-A&>@G1ux5(H|18?zhDq4`9MN)Q$6?H=vKXE?hYMfL^Wf%uO|h z_m>+D98{FAn)#dcOn$v}=$Dk_NKO5t@y*-D#`E203=*whTra$0&h<$n#_z!c4QdFk zA+V$Co`%z_+qt2!B57%Ib(v&Eu-DOkHttbZCOYayPfg)X!`p`?GJ|dj34_jG8C| z;qEAlAq#|1@lYt!3LYvTz;N5-4#DlbWc^Gc3(ULM*Mvn@GG&xSE3wB^MaC6hqBH%!>zfoj>nX*Pt2sI zYcFACOlIl+&g(t~(p~1E@IJl*H7-TirL$-tUJn4Dg?J@#BJY%h@BJ8&a6R6!lgUoJ z8MZq5KF$l#K6@-10BinwQ}a!l9(H$`-}AfBe248Py}xX(01a4nLYhjN2Y3KW>&pUs z5Dvgqo9S0~RHH7sb6M*dsBYqhg&#jhz!ERge(jgz=Fy2C5{X9Gh!#E-yB9&t`5&Xs ze+C=>%Mm;~8}_n?FN0}OIaHB5pY88FeajWV&6K~&`Q;DLN}t#abkB!>7kU5wJr}cx zb+xXoRtCX0vsx>rHFlK_EU;uTfX}Z@lyAv!M)M_P#MH zdbIPN4+LgmW5oH!05lbPn}L&)u%xp&+s-2)F|s+aqXV$Qw|RIPg#Q*OJxMa=KPW$a z`jmtHb=y@n!2hawkH|QmrLBMN8@c!V`Kb#RE_7|bTch1%X4HMW@{=j8tia~1zZBD~ zj$EC8?9NJ;61Q}k8!n%xG<~^i#k6G3rY!xegp2)MJu^kh8^Xo87vIXLN`@N~lkec{ znVEhX&S(Cu_s=|A1irDh{krb_{0V8Fx#Iu=s8pIWuV26RnR(DCV<38gmNq_~N6RUA zq`F!{CSaZst@*$Y-?(1G#P9U%WFjuDDm)U-yWKOhZSH~(KIQBsvo}sQr{xu zx3sugevjjX{~g}98DLfkrPo(OPB=!kyJGz>&-{FTsPoA#$1}X^1i7<>jV*oo8%JbwZ z)#mZjS`E8czRvd+Y?@8!Nj_onyX~-~6lTx&V&OavcGy~(;O5UaqV?oqg6ev(l-VPQ zL6eUw%!+hF@$RjHk`IEm++w>kmpSmxL*Glc?ENygK{hrA)rG5rQl?Eq2f#S#hz7P& zwES>ELv@MGz8l!gp-xXzCq?68t>(zO{g~uc)*D=U+a5Qq(#M{qIxL254)Mq&BrZPH z!(J6XW+sX7@=$3$7Nsbwemr$y?cLiHC9{x5CXX=s$soHqMya#_-vY= zbr`clx2yfHglr_rdpjF8)Y$GB5{gCb5TkLt3F29&ER>N@1 zakp+u;$QfA24r1ofe$jRe*I(e@yAp6Z}ZYS2>^oa!7Tt@C#l7FGAK`{(_U>?K720l zTjlPwcMt;1)=)g?->}XZcH24_Qt?pmERE)fc$m+2VD{Nr`t19+j;&a4IX*b!(8-yx zu(Vme(C>6qt@R4ZA;9y7tkKH{-E9zZx1;Z&%w5yG%UEP1+lWvn56-P@!U!Q%@}tFt zH9B6h1$j1!ayGELh#R`^RkGMRl{o50b0EP@Ly6H(C%^z*cOd*S^3P!#s)k`4DjX~&@8YWZpW6VuBA$h<0wn7A~5D$s{tGzEa*-Eh89|XN-POlH26J~ zW8eg4XAg2JEfMv71Q}o%{l^E8s%CK^%}I&#DaV)bypRT?SBEN$OO{N&Pk$_dM$-x4;L|Q>x%A;z6S&$sfth>>rq*>k{8D0-| zyYdf+n=fy76e2 z0ZvGr1||!xZ;pMUmVaq=#)~QWuVh8Q!n5O8BQ)EpT%pMR?XGzP+$;x)R$P$P z7RD>Cu~p*2*4?kpYF0He@z@bn_919%Ky|Ed%cwkm@%n@uie}Vp9$@fOQmG0`y62^h z=o4yViZt6;*E^18z$u;d7W8JLKWkP3ojlR~w|w^IWiH&A5kXsw`81iEtoDkk^uH$7 zb(Y!aX<=)3a^cGf=tikx?jC9OYQ$U%oZ_O-PwWL z8G>uZJe2Qhug85J*RCmWbBDJtECwg#s`pIJmW{*9n*8QYPk5CzsdfxJmL&Q`Vm)XwcZTqZE@oE8AG#Xq$o??OD>ssQ+F0|&m3pc4a{VDOM|8@rthGMD zIqzLV)E70rVNj_qVY1oZ7meAeDnM}KyvMwe6)2CNW+-O)dCr@dbES7y1(-L z#=Qi^{(@XBVy70mv&+EboQ32Ang!?S=wiQg*IRn62L7rQJGOW;vk9KMC zTNwK+$6w>rV+|tGt?(7z_p4L!2E%#ix5FjRNFAzP2}%xE%mXk#ci`vl!4)1ksRXbC z&?096J)!?ODKIPJ)n)bLhOBLg(|S#gH88L&#_4F5TRGXbY) z#*IktqUzU)_^3zn;?+xk2KTKY8}e#n7WY9Vb0SC1N`;KsLH>K(oNArl>BuGTcNU>& zLUTw5xn!)6v;n=b+R(Be9H`f0s9H&-T_4nIGuc#jlasBW4uSriIvo*0+4>Q8ItSPF zQ=ucb=7N~}7YnMd{rK1n_ij1f`XX*f6UFX9y-2EZ(DqTn?#+(kb7@MUpS50Xe38!Y zi_-oeu2Bedo`WP@6#SpkZ~l-2nyz#OuPgaRREsmxwuDw>gq;^U3thd4y{{;T7!-wm zKkK{`T$%CeQ1W&OWPNg#gcT%#U8ncDEu=eMd0_&{Y?U2me7z6TjykpqCKieoC zt>C%yGJFi;Gqm<5HjO%ayUoH~4_&R)NI1TO;bnAEYw(9`)V>n4d%~u!c^Bpc46e`e57Q$Ym*KV$NpcHG zBGAfpWH2@W*}I-M_w~5TmRP5J{1Y>=LPBV)k2(J(0;3EFSXRLBwuFbFnolcIyXBkVx6dtu;S?nI2KION|LbM z>;KF&3-<($e_h&>&3BzvVnIRg5=E0nU@N1&-sekymL&ZEGzsP|67%y5xk_yJBp4@Q z`*qeszCS#Q8#m?@GCF@9-OV&ja^*cFNi!Y|7tOGyf-l3Jo{DDBV>oVAaghPcw@)@L zF8#GF4$lC(2AnLlmjW8Se|@xj_WK1hseFV#WHPUJSKME%N`Ibal`PZ3Cpj;PQ9PAnUg6OZj0g3QDjt;n zvB6}x7Uf}B6ShIB^L4!6jFggmV95|J!e)*Olo-4^0d)vmtE(o0+u)bW)pf{ zg7LgWs>1x-`3tlgDKHF&OHmTdxQ&HzLDMCe7d55hk{2J9tR*PSnI>;XHR&sZI?wh_ z{);M;*MR7FSgGR%KBk1!<&@coI4E^lvZPvw3jlc-KS9zS&N&@=M5=SHk3C(}+{dJ^ z$UrDSd(7`;x>NNFbwwKoBhNV&>!x3yARA0)Q%M28%cv*aZ`4vQ zW0R}#9>*;wje?9Kt&5Bd^wJ!%X%CAF-8W1#&b8n*cSOQ=b9&8dP_oDBy)wWr&%Wz9 zuu^wiXEls$0r?JMbEH7%l9U#bxWoPtzi3UUPSd@ zaCd+2(%DkRTW5N6qn{co*dXJ?^<>A7x+2CJTV8%vXed~yP8&5#fyysONsnRbT}`)x zks36U+lBPO_`KY_o`L+?7Fn>D0lHDwid@L})NkuGEbZI9(J8&1<+HNCxlN>E2?Wf+ zvO+xygzyX#{0{2_wd5CGhHh^M5r4d+8VkEGO|^-+Uz{RMf?8o4mf;_Fe6f#<()jCU zOHZ3>->gE8z804a&xf&ORM3OJG85nY1*-YEI$&1{c6L0R0E#1@?{EUj$nYoN^zx55 z^A-)>Qj`FlP|}B>h-0pH%HfTOt&9fZVhvqwIH0&21lF`Fw1!}AOM zI`glk5TRF`igL~rcKPm3VcI8LmIFy zep5q-anF*lNLE*6%YrCH^?v)wwKD~))MCprKQSGz#j&yH|+&8ALtMi47(&8s+ zN?(*SMWJif#)6(5J}~?L6O8Gj(3l=%{Wpkx>b&80-LQd&2@5sDKXBlr(R z2%Lkeh0McwD(!l>gDPw;78^8|ebhU5|3m6+P%0nm6l!zpELBJwRY(8h@B3F93Qefp zKh$~cLZ;vcTPJ}C)_iZJwU&ifYdqmzrYa&}{3+kU!_<&?;%!!Vf3n9E=)q1+$-0#% zmts|jxo$cj=;b(pwt&~{{?s8@)xqV5gMB)*7BPpZJ@BCNw7p7<{N@aYujd5i(p?rV zg8Z0HvEIiVd2=S{~cQ@>uFqTdU7C7k&vLl6^}}>3Xn6JlfX? zZxYmPY%L^ZHE(L_> zT?-Dm1XkINzDu((NJ=(u-D>B3Qp$wRLWNXGcNe_0lP}5QWMH*G6_e9@#f`f72^=K){=M1nPzGf_RAM4@4F%1NqusmD_acc z0eXl!YE{3oPFrq0f~wbtY@puW_92&#y*!_F-BwzTpVG=`RYJSA>k~HWh6EqtE!+oR zk!Z}2x+mntSQuYP(PkO<2C-{-$ZJWWxM=SRBgg2kW|4d|>RE-&fXhFmOZ5AHTO2D8 zaM?M&wTUb79Xg8Pn8Fx+D)$JhJsx}#y7tM?$?nmM8{(rCdTG->4t|*rGYVvuWZX|# zTM$0@I+DW^OnAsp`Cnt352-?w`pYqmGN9E1ObYgi$(($U*}Hd{jX&iJxh-3Q3!D7w z9WyR>+H>9!56F){8n)6o)%0r4+y@^g^2tSRI%`znE?=J|*#p{N#)&ZxnRC)J7(8{* zT|s)vsIdlVXqlvxnAVisxiN~|ad+KcGg^Ipyb;sC2yB|?Il zN{~S>SLeBLJ8Q%-4eboYX6(Mu|Iusl&+F6wMid-zo;r1Wd@uOP^nb~rtf9f7S4WS2 zZt`(o(0IHA2McHt-~#YM{_&m35D;P&9ssvPx)Y-5mif;cBniSv7>mc}Z|5HMr%hQ& zzpre1`TqStv$d16bH9x5Vwluhs60_exoG0Y$<^fit?<;;>pXmXX)HAS5)wwXwzg^g zilUm3(a+EBZKCTvzND zXd43v{1l}`Z+NT_As<4bgt+7lyjS!2y(ra}$RZd&WmOxBagvt0%NX=k=HTLhWQgBz zhM{qjowrVq?FI|ZiYA+_LHU+biDA{GQSXLW7RLp|$8X%$>>%Y^jgSXP5Kz0^b z0g_?Zzpda)aKq;V%7Rd)J4Dn!#`?%xzZMD7FUW)hR>lnr=XkedHCPWo*^rnlqL~cpSkA?dbiSSEw(GVMxzIaC%=z5}Ijv%{nv{xZ z$a#BETQrl2I7qUob|9*R?L5}=cLCt1qD&A%lv%qTXEFed)&E`jxl?m+o?y}eSzKX$!KT`yM+FWxHQn*jS3n1L2^6|L>p)hP zGs4)v92jdBb-}TMQo^EEP46(FL>G~oQqxwE;Sj~tPuS$OU*c`%t3NsDhUf?}r1iFn zm8~EA1=!DZQH2f;E{&oAH{K1xIeSMMt<;Sx9DdV*E@)EIP|JGq-kr=hg_!43>GKWm zc*7ft41udq`$@teUxMj!4UAc?-~}ypk$%jOn-RRaQM$0Gc1Nhb@qT$reT`fxs4={k z4GM{>G{K-<{n=vbcj>Ffl7R!D+oKyEXwWA6m29-nX+~fo0nz-zvV_ z6chk;t6sxR9$^|3m0AvJf;U3g6*$TXy(Cv5B7CI*FLR~1KQ7MT)3;Ned)vGu0qEFK z`my%d=NR(NB0fv}Fk< zh_i)}03U_$x2YBuZ*^{(IOUsWXRCW(R#bq;>iQ@yV`FK$M^cQo5_PJA?!{z-zsu7u z`}ysrnr`OdtS~ki8gW)!jHuT3jEGPcLmizjtYx1yI!>D2!ZH*m-G9%}rqNQeZ$}SX zCN@LL<2^k3PKTE$A*%H+w~A=9&55L2C7?|wwl{}PhyfXtw z@63`e?u-)L(F6rts_~P_+NLEbwrIVo^?YC${E+=^%3p0w9x^Qef zt0A>Z6e~ANV@|&BArdcNbTkkHUXNRWby>t zJWF&P{%0R{_T?-+hZg6@x|t0mXm$SVx`L7grhc^K*`o7+bx+McQ7&;*7B%pO(i?bg z7GJZgo^_AD{4h`8ZJkj2T+mSGR|bXqUkb^#)X)cm6V%PVlf91M1NQ*pM#rQ@ohkG{ z3r4A(466UB=#Vg)jFuN6BSb6dTk6kjwj3A>Fcuyx&M-IWNYPu3iqhKzdEsP%Tc0W= z>~S{9o}?soe3?%Am;*gYv{MK&?ito=!b(dV3ulTQE0!Eg2S+kp_ThB?E0u)dYXGWu z=KHZ<1@O&Kso-tkvWs|9ZC5mhSwnBihHfQ7u8*nVi?Uecy+|`6lk8y+Z0MH1c;$DK z>w0RJma)mH$|0^X@D__?cN|MJ!My4P<2M+B`0-8TE0>WPE8}wQvD2d^=SVyZpkm_z z7RV?|n&}R^WKb#k*4n7h5Z}VDX700wZwf%|1(iZ+xuS%6dHVph5aw~lgI6->DYsO8( z#xY888{HQ01R~g%#%kh|?km+M!5NfWe*%oNM94Zlch36)3&zw$#ns~Oq)ALIYKT;3 ztZ&FPWkOeHyo8WqDn*6p|Ayrp1_(?{U$<$Twfc>HyqtR>)GI zF;<5y?Zv@*pGkZzQ=T6E57ap+p%(vR(bDnYr^CtkU+{!#EMm=klQVYvsLS1%$y{~o6e$gwLPMukb{?gq*_b(pzF-*}QkD`vaBLt+(Yuywo z^aGch79v)FcOaox;%Zi}Of-Z^yTgwp?I4YarJ0qGlcOcCzyKnXS8(QhNJ0eeM&-pH zx{8o}caJMz_KVSqQF*^*J^@rM{5A zf`Kbfrla%F^9`Kb+?bsi0_ymDPNuh-a~*AgoLx58+Zu~?6ck2zN`30xC2McUlzG8) zle_ZVcG<&>avr{1#NYq&2|Hl$?2~ezXonyZ9avInZj>ongq5{qfkZzdV5m!((JyNKh;S(~ z@@(O=$K33174k?r)H~L9i$bL2io-zVAN1dc4gRE5hCgQeLfP3@DKe>bsrBu0$z%}< zSl$IgO#^3sX9RRmf_853=g@9W$k=!aeiIR8ng$WVUUMKSBXQ3vlQu&X#eZHS8ukNy=mSxij_%KAzP+QTJDO*;D8nK%5G(;ZR}BFt3Z_Qg++|moF$%3sg6Ux^X*E$_ztF`@(k>G*^^?oN=na9& zRnc5~E4Ys#c{oaL4z6^9kKvoUybMZV`?}~&jGuFh76$%^TP1R2Df_qngQE=x&I0pB zT%Q9|G|9u6mzX(t5d3ATet(F3mj~)8Zse z4=oXI40z;KMyQ$osC*Lbai{a&9ZcF!^oi#vH;~F9DQRZ1k+MS-#Jd>*qwA9IfWc3s$kVEo4^v0wwx@F=1?^s7uo;4zI)aIQW77{G#Cvgyn?gO5 zrj26jnC9`xshOX96pwW*VmVBsRu;BfKh_F*3AUi_wq{l_&6{#Y4`qZ|bO9%o-TJAP z4j0BtU0d=%Nc%jrcp22TA$1khw%Ou^0q%u6eiZhpD&1ENdFAc7;L4WWFMMfPreHqN zXhL&Q>a1to7S9`(U3XQ7;z-=cB3{xmT#t>Tj@zecqDATu6p_MgDwJ18Sem_@f+E%m z?$acg@K4ESZfs0D?Y?LHRy(6R;O+6Od3a7qrgh3TpY_NWS!evQwX$^>lwUsDl>fQT zllY!x?gUzTSNd{DL*H^xJYwIZx=74vrOhY7+39=#777Q5P$Z$I*$#M<)EcPgA-5_Ff2Dx7U9ehyBs#z zCdvZbZ-_z7?CJ{79{>92RAedpJuQM#fmePPOz|x-JPrMk10Sh{P3;`s1}E*HL4o{8GCEWqvi3taC{#)y&NWqyQz58>BZ4vPM&o_6d$D1m;&HD z;Arwi*tc&xAGIGCRJOosTKBU<@to{c6lt!1SkG<<@<{E&l|`%!nSwJfrdw=H$MxCF zJ*~Gz5Lv2WYRJWbN$0Eoa`MPbd@%*S4%F06a{52m6-kmP4_ma!s=!0itMv7t3P>|f zvZ?6!zpEeq#HKdXo+~$tm{;!@13$~{s?`B#APm&O++_f5x$7#UO(dVX^ z*JX1RMERK91m)x4ElsS+LhII5`*QRvlblXMT`#|SqRuT^K7IDy&{a`ZQ6*)xFn6HL z1!l{Ixm+oC@bX+->L;oS?;_Lu)JIk*vzmuF&YVt_3)qj`dy`Lc-_?W$s`BA#ht03rRj~$`<__NoU2qrAKe6E_MMfe)37qg z-3`7fN!u9{oM)cFZ;fAkF0`jd(XTa5DmS2$5zs9AnK)My;Krp{fod~A;l)KJF<7CI z{fDKq4Oth`IgPs?x{VBnWevnCEC!E%TSq1)-4LaqUOe8Z+B z%)81S^?skO{CF``_-zJ)RewPK9)lM|FU3P~T7GRf$JIE@4@B}frt9Qq0CkYs2g=-$ ze9!=n!>_=Q@t#L3Mh_}I`~#6roRG~+KgdlpsxdNk(&uzWH@?TDBL^0;v1wU8`RIPDO9jUhagp$TmSDU(0%vUkz zW~I{vd+5XDdoDa};{CNet9kG}PPtiV%~=%nZMu>x^d1~lx@6pJdr-YbaTj;t4jNv>d-rS)mJ2x+C!+d}n8Nk-NQMdNu1t6ou@WW?9E_WR zTg6IQl0NKJ^d3)XRyX^${aM|L)(|d5cvKX%ha}#cSbN_%-wF>n&}JncHQyZ%lRn~C z#v7z|JQmvzOrKlL+OazB!ZOWTMI3C&&70$Z+#s6?pW-QsMKGGCa_wUL5Hw@Yo;6Jd z&vsuEgsm0J3YP;nNfTme4rOgwOK%9Ky{?_ zp@IV4@x>Wg#bKRuW>sTa(wy2PP-3Y$L>=}>x1F-KxD1Gd7wtA(3f-wPrVs)NU{5qN zp7lXZ9~c9tWGtx=DUL`eg-^P1V1nIZ29r{QINA|NM|i=ANR>5kByO>^3U5#7LlU6K zn(Vz zv)GftB)>g62b3gEUFDAs-&+gyya$<^x!#_s`Ag05O|jn|wzaqS;Ae)SMI49en+v+8 zBENk)Z}?h4S_(w9S=MkPTDCA^kT1(^xb~oyPLABzP;|z|>qT)nB`2pG4R?bXBEPEa z!t>8;LVU3cTDg%)o=Zb1edp3l3HLw0pH?ooS?Am-Qw0E?T5d14>^K=$Mf-f&fWpt8 z(&+MRjwkcUO6iV*Z`9qo-$pR89)de%hjpEUZen^tQ5VW5`yjfy<=PEoG>q_^Z0qPOyks+_+2}6)M{2 zaVg}`G6eNz+(Mh{{!NLe{(?+kj^8`o6!j}M7sI0&`@wJNrcxx29Z4apZ!JuawK$(y zFn^W%ZQi_8L@Yz`mvKWuJ9)v30rN*t*Kd+>`25c;luM&x+$(PuTO%^3e1>|lK|${L zw;ArjyWrrAeR$3Bqlu^sy2n`oHAK=VvyufGiu-h(?~+Z+!`)RED<2;`+`sl(3*;Du znEpk=&gs-EDFZ2R<}!Hh7OZb@Z{K2yzIAU^SLsiHz`qJ7!(RZ>8!gjiVO~=*K-KP( zMONVABj@A-K2aP=LzHBI)-q0U`hPe2bu>}9BEN1!<>y^vu>9(sqIb8^Pju$`Q{axb z9_&)^c1mS@t?UCjka>3B0+U`OQ%qVaYl+D$+QyMNjwS6{S~6-dg~F6Bih@{HtTR^& z`0YwsZI3(ORyx?4DDarr^_o!qV&3={VJ98Q3tTEtoDX*x^q7%0JQeg0bRn zb`I1>mgaBy6QD5)bAbUgP88>8!Nc-d-vDH%SINcA!!p$lc4Y3nQJCAAC3cZAKa3YK zwQvzVQ^CmudtXw{(W1@H>ZF)*$QnJMg~54#YWRBVJtNlj$+b)2C;Xt?TuR|drnb-! z4zx_CL1TU>ZupP(AW{+Uo;?6fjcHJ!Mg*_lp91=&{@79V|Nd1u7U;1U@HRNN4K&yh z!dr!bOo$#R3fi~N63cM(IyXC~%Tq|nQ4(Bqc!6mF=;fNC4-?ggirH+)jSwGP!<;&z zWMtJo`F_$iG_*2~)kw!COQ|lO)SAl_?7L<#TJl620e{xu5qaEO3%pJpDZ#W^ylMd$ z*gnr5t`GGZGPF5b4&{suZLTvPy$k(0ubZtovLK>6v^bh>)5b6xb|s&Ld7>lgdvvN$A+jMjp8T2FPpKD~!0_2EU4X)J{n+cZvNzbF2_u zd^4C>Ex@7w6iANydon-wQTPG6H3S4ra=+5jz+n4MGOc%X z7nulm@;e292!HEL8KM2#w^K>NtjRMa@(9D5ePHL*{mYJjs+#jJX#LFwSnH$i!2vel zCx(9zXnfyF1ki(g%?(98Uf`J(^CK%uGfN-x#vv(4A;2dt$U7#e2nwzjsu ziH`29CKMVL_-)N=;6mNDKWL*S7*Drr%HRxjIr=KIa9!Uv;$6dHL$DoHQ018?)GuGk zMO9|NoDql31fN>c^jm*Oi}PaazyH(!Td?A)L%2bb8`C@rhZ(2q%r}q+_r;&^+-bCdT;tT7{;$tk7`4?!$4bazQQ$?FxcIs-xUM z!f#+Y)1tJV*Kx3I`L!L$?~$yuh%HEaUdiBQB8xStXMnEfEc(-_NPgQ*37+0#9wjWT z@89V}VAy!@u^Z8{`?f)>x|_6aqgFw>6(q@b^{4Y&t5TF{8b&9e!d#X?BJeA91z#{j#Dps=uw;rqh6m?PaUK@3w>6C#rKpewNO3M>6;T@QQNot=j+XNc=CI0snSF;ZuMMg|T&0lUUErUR02C@I_UG~0bAt1;; zoI$aG4f2(9j0bGjUr8`DQWUdPQypIC;lnk~JgLskYSd188|fa1Fp-gwu>ULd>HlT5 z)<7E@>CNj$kdrca##k?)DCp=ctq+%?D7P6^6Qrm#Bi3aprf+*~1A_x`A2wG4RIR>H z-SUUmZ?Afs*|f;M;qx;oy;)LMT`zmcH$`%|@9ye-9PS@faK8~9<40JSDNOueew4}+ z3(Hj)Kn1`EeUj8J^;P-E7pNYc!fVoM;MZKe|&j1!?m+^3^v}n#D?2Y0Z5!Ey&vxVwN!%FSWDJx zgk;6FU$Z(_T3U;5-SqBza7Uk7a?Eh*t7^EZ0F=r#USY~HusH&Gy> z46-ye6?06NZ+xnPFLre&Gp@nblr8q;LnUt>qb|Z(O8>s;0mL80`RG?TdbSS0OKw%Xq5Dcb-WsV9K6XJ zwu$8Rlnb_`Vl^_JdjmB+Ab>C_ahJsBJbWII0~jpSTHNupy7!LCt)h0S(wwD7Z|=j-t!zV84bpfxq@Z|qk`6PyuLE=uGG z(s5?g6d_c10py+DB{mr?pz`b&5T{nVAB|?!mT~UdKG)?u`kcD=knbsoYBw8Y!kAlU zjKenrr>gUNYcS}E1s^}btqF)^0c6rHw{U(IV`n4sPu?Z$YrQPF*N-DID7MW zsN248yrhJR2$8J?Ayjr^v`E>@zD}rw82fG}EfS%##lEDnZ`qf@l(l5ZzK;wU45rC4 z!)(7}&hxtN`?-Iw*K^&|^Ot!szTe~c9G~_5Sq`TryVV%|B0PLSq<&&V#==|?zp|%$P$@?hSVJTyW&_TS1oI{lz^m)V8RgKXv$k@5|?%$!}+Q-#(duBw-7D=pFFPUV0 zLXXUwn}abq{sFIqfU3qcf1AtW8D5MluXr<;q^0zwKyqef)yLgD`-8Tk(+)r0(A0+4 z78*?7)idK`dKr~bpd#r{d&QoGQ~}!+>xGtC-7WSm?4ERAw_()EYexKvWoUP@2Qoim z)7R!;I}mK9eWp(eU{~<+{;vk$r{kr5FrDRh{NK9^85gi^3eRdz^M9XOU6>xk-e}yA{e~Df8sceAiYCaIW6J z;$KP;HS)K^#is9~T;P2y=I0V5cebpo$M)w2;_cnax9Wqn zi#;a$iu&H}3kmXROtyjVZ$o;uT0Vd_6QgMp0oLy=y+$(^d1ZvnuNjC5pFS2}JL%2q zi-4`!O6@{zF8UbFR9UU#ab^0s6_jX+v`vIr;-Xl|vWCUjYJIt+@Jyqx+o(jv!8R1S z>`keAI<1T)9O)mMnb|ld*cqQRd{taqR-~7C|4_$^0skHR`7rDt=yMKQt@mU7UBWmb zNXX2UII>TUC^;yII+v-EQtF`ZTj=bJ$yj%q{VZdzcDl7j%9;0da&qy5T?N*Jj|V2x ziym!DbK~7!fMo1lSb~**_(Scw#*CfeGE906-;bwoO|xo!UH8#)UU$>dI?1W<0>8Ci z&w{IiT_1MoMoG(`yB_#_IKjU3Qsh1w!tO2~2Dal9fA>B*4=eAH3k20iImppr^nm=4 zeDxq=Knrn6D7f+Xzd#aZBj|Jiz>3%>O`q&vgZ^$CcdsLnoO*A?_hBFrux50yQxz?khyX1|;n|GZ#UmN}DiKHp_ z9>TE^(VmO~bIH3RIwn^-Ng?^{1w&)jjKv$cVd|AzIw@iDHXT#G38iloxb>L#xw8l-{7N zRjkV zN*KB*4MAyjvqSpd$m5_yUG5J>c-vJM-n_bQ^o2HozpK7G;#d^Sw5G1*(!;9YQK!5Z z>YV`DrOM+X``hvc7ye*kNjd|s!(MLsVgE`e{IiY68ep-j3>I6(1bA#UZ>`0T7{G{w ze4|Re-ddgkMD10GZ?D#7OHKnJ-cqn8jI>q06EJ0~%P&`qQh}tF_P}^Yagm7UmS1nq zrq-?fZc`4Dr!6ALLRnl+=*us$zz$xK>)#5pPD->JRJ*6+gud#D;&Ku>8c2!Xiv=Hr4!$NJ#IPuSSDl}EPj{v@JMVZfeO6Pm{vBV~-E z*Fk`WW#w=eN6WW)UPgQuQ>o>zSQ_Fn|ypL-uQxVIU) zBG$C1=IyM*iW5L3x?aMZ6U^w^u#N!ZPJv-g$;7h$!Gaz!NoCt!iHF)dWT(vCp_LGQ_66Fc zg=~>nDK2&C3I3QW^wr2(*GBkNsZw|Q%k-tm^Y6`snGa)u3i@RRz_KiF|8-CW)M#a> z1VFRKQ>HIKPFzGA?wW}Xb<1sOd8F)%Iq2?veY>&h!gz0cmJS8L2_xwx$yGT@}F*7SPqbO;zHn{`TnnF8bg!s5UDt9w{SkYUqzJ-G}XS-W?Dh48ia)aA>U z`S|%0mmc%-^70)!R`hs$?sb*FW$x^5Dq_4SgoL4UCGUAq-nWwf@- z1xoHce85QBQ2pKuu__ zRzovGUYy)N{{k#c_~GrNAn?=rr%OZP`tIEv2M=0+c}4N|7gSU%pAmdwBFNfhow0Rv zk(^X*_Oc6@0I|NPNDB{HQm@pTkN&wlcPWb@%MYg&e2LzBAM43Dp|1eMdh=nEiPxV@ zLevpAdfOEX(TV&oJQu_vy>Gx0{Up*f=Xj#~L|jWB{yTVOLa^rf7~*tCLcTlX?{vi* z^05N66hjfc*B8z}!K!)wu~WV|b2$+yuCRE|UT6N+o{AUk8CpJ>1D(4cUxkRg146<3vJGypRP|2T_4!svu%Rta|PbdAF=p0 zKlJfAGgk>AXDzWPHYV0feVH0o1d9 z&z{|`%du}-$bU1r@uf|dLnV?VXL85~swpM14?#sHu4upGQh?<(Zx+>+eFmkMOdRk( zU-^;17&Sk%I0r_d1jBOfJ>A6;auuvB(}n{j`4XUms6deczL{>*pU%1r_YB@64GHkw zi@E%W!)=b&sv$4))ge~QbnzePYMnz2-M0Hm)E5Q`a+T7-aOpYZ#V#mkNIV?Nh#heQWIkEsDTAphF67a*o7AnLxc zXu8rS=rX^s8+c{Xk|eD#9K1x8SaEXsu^-oF*AcVY2|n{N^J}$4(zgbOP-0)TjG&`6 zy`idrY$+qaX+`+XU83#Xtv*t4RsD@T&$2cwXod93kpAJd`LKdrH^u70bO>~2tiE(C z%p91c{N(z;pBUYt9Lo?3xy-xTo#_zJ4InRXWgpNJQCD-@k4o?el4&U(1kP>!AV#fgr?s+QZ6!;x_hsp}rt7c|2tzDw7X0X(?M&aEy4b zzFq;TTxfdJO0_imQY7LjA%mB~G$;Vf)pZ4a{{J>u^Rska`t_(Dh757B?YVGvmZOcBZ`MCG=!DJYaD70h-gB(4f;TrG%MLa|#1~i?o?urwxTLwrOr4MnR;VfBiLi&yLnjGAxL>hPe4W;x^@ zT8V#7f&AJ;;+p!qi%^rGOTS}{thEQsHEJ25`KNIM`dqFU{rht{@s?{M>K0ULeTqM1Gug45er>f=t;JCc&FX|7PBY!lVfr--4r?5Kp_O zMY?-CIY)3gOS0)4D<{jAR#->EBDT^F2`3C*mK3MIVDHN68Ch8+Xxx`sM@{7Al)Bj} zPMc~P1`q*?2;_L6cvf0D#4o*4JGMJ?oLm_hj%^#=h%-snrXopUSti=vu_$kPR7Gq=52fX!CHv zkX9qifarsCxlF(v5*}PQc~0YA%Sg=x3B&}|UFN)hgWN6;d$BI-lH4mgw_ zX43-|UWtR<^8448!^wSq%6D45fPO6$k~nGjfbvnk#O`|5!KD|ot$Rx0=tfYGS~zla zG*iH0;9HYzzTuU7xzqNZbj`XF04gpYs(=pHMcqw($`_xhmebC4$Xwbig*u!fyZN~> zAQZrmS_A4O6-}tf;J=CXx3QHxg0CNUr_gsi3nmy*R5w)U50ZYnSt}T`^E0@yeZb&S z#uxhqfX5cg0PbMwzV;4~%$4B2#`m@EZ(@cR&^E&Lq4B9xZY@SyLm!;46GIKHk!A|1 zF^goQy&KPK!07qfi?q-(YH?lL?L3)=lK9Nw1<>ukW&Umle$HqzUNk7GGKv@0Y<`$t zRG2laT%k#vrEG@Y>EOt-F}?LxD6RS<-NC(ZtX7q8MoZPM0Bu=}D%bUF`d(R|Nsp-4 zwyV2%GAdvMpSHD#Yfm%E+2+q32$wr3cV}{uPG&U-;-8H_WLFTm-frBs`SiDL*hcE#xuOwHkVR;DquS zt4puwJ_VDsSd_}rxZAgM;=ylxL6^p>_4zDpz!b8U2?wG2`v#e%+^|QqO~+%a-H3$@ z6$*4H0w%Z`d@Q;)VrIuB4xL&#A-J%R7_c#Qr{}!TK$(k+rRS(@?mKeipy;K04%N@m zqGMOz#6)S%^{QCxl+oce%Sm#jGr+Rd;7!hRDW7s5fApv-nCK+MI+J}>J?`j)%|S=8 z67yGgq145n9MVS?H;7W8|AtX6PX5^50ek@t#dtxittWB2|DYWf2T6nO|YjIK!yHmU_R`gv|Wk*BA}8f!A~u{XMJx~Cle{PAGh&MJJ`f9fGd)e678;wSJVkbb~oa@4mSn0^% zB$YDewfPHPpv?U){dMjYH*{Cr>H*G_BamwlCb(3zeB%xe9MZ3%A3?Y0AQQz6$zm5XwcjDA3hJfE;<5$V3E z`j4O7Yi|Xqy3n@dO=@`IR!92K@l@SL&3R!&jN@5Kz>ZrKHMn6KDV&_Ep#(~eOMh! z%gHxqVefbr=&4S%Syo=7t-Jy4W7b`83RaXg z>A7)m>-dB7z0FOoc6+tQ%^gr{$njJp72Mt;TZAuNJwf*V{6GPbbTD1@4D`~R*Gg&2 z#V6_TX4*CV>o+>N;KRV39QhGjB>jD?uI8!anW06C$>FZbn=)!k(A2M5cLHP2L`g&ll@l-2TXV8P$PEe|y(15|T~skuG>3hLAu<7%aj z2%c1AxO88=g+aEgSm)xQHkhVRk-Qjn+5AB*r_#8&D(qh2^Dr+H6B~>hv`_D7JdZ_w1bz$CIgXnS?= zDolFH#jQF$|Vrh@1Ig0YqJ@mt%gz>E@(OQc+yka)o zjxb#vP+)PfwyOC3F?jNvc}(pr$yoUt(pa;sv~X#w2l;5?o@uN5Qg7IL$G)+reYO&I zHxSV7ul^1Uqd(^$lvCslJ9aAX!!ky-V-a*rHa9lBZx}62E}cR$hF{~Ee?n3go@_c6$|slxBq$i|f0x%12e z7e|f&3EO)or>`%9CmI4-vj^(27pOgo&5xgvr2y&nbD8MG@NBGid1^bzh>PpA8dgWx zg=GE7r71e%F5o4CNN*)qMr8IJ^vFNZkq31QeYz`t-UW?9(KF74OGHv|OkdCiA{t(J zuW+Q2c{T<*<2K`ZHWz{Th3CMcr~BMK0J;w>ObTq@_EEHH#&NHVRXI0Jb=soq%5&kpW7Z$?1} zzFGnxfujlIe^M#Y%qRh|c)s<|z~|T+5E@7^2>a~LuQ2d@YVP}!%rk)a3kAkPzTNqz z`U;e0!4hMhfr}Fj#5FW*rKP2NCttka=99kJ7O-6GnDI8kx8h|@@kVI+Tidww8flzm z@U)aetFy!r>+(XsFG;xEV<)qkd{S~GC1ids!uPVWo|ag+r;X&y1=m+`se9Qp9&%QYgoa$NnNQt!UI`*;3io;g^^CpQ)e z{nu)%`(cN_r~^Jxcl~+|P|<(|AUm7_^2NV?{SvPKO_=L^^ZNCNIz0MUXyZo@QJ;wp z>W>JqzR!EUS(we1TFKm@i%J*?FEL3=YB4Z=>7V}jNEY|xuj?lNAn z+$W8oa~2g9$>$+MN$q^2glKpv1;=|3!^gGf>FAp}e_fNHsNXp587lqr%5w}PSObdY zeDx;?FZUP6w*wxKJ2TPz`$>%YP7}3;;}1S?+7|FtD6hI($F$84OWs7;vdGLVIRT07WzDa@cv;Q6AZg_TZz#-0pluXvh zVARs$pImu5bf_8tG-^8r+Zr{y@m16Q7c27Ua?FIi>a-fV2cIgdZF2>+?2q66gb z%1Nvu&*QAFW$Rcd9G-1n@Kdz9H=XU$P-YZSU1mEHkuE~t%fG5!3`>OdkGu!mnOeBq zesbgMrs%eIK+T6NNj*iwe>Ki z7Ou^$kfeTn>zHovqlD*#`X}sXAJ}2rSJcyqx);=aU2B}cGd-D`Iq1B#wWlF)NO)Kd zRHS(=E%nDb`~f|Gz`*e!>^>%~gVXjyzdn3e=Z7!@i0b@=4JLt>f+P-wVm{&l@4gp0 zjcF(J0w>_Vivo4CyhY{fvaico$U0t`Ovo_Bb>&P)lJYcF2c}EpeM|K~8QPd2E3?)j zlG72G%e|;J?B+i9wk#{SJZKl80rNrBlf!Qiq=1*0n3tQ&u${X_YcGkT_spFMdOycI z^s9-$0jdl-*w|!0nBHnQZ1x;A&C(-O2W}XZXLtMq+*1}%!Dh*O?LNa;m55V8*fzSC z!&V%=Rk}w*I0)Q9C0!b;e%MEcqJvC9bAhrF( z>-#O;>qOC!iIe>r-sc@}NK%9k3xDv{y)>M6Rd@KTr?1Dl1UKq7{_v_AKQ7bu>&KSm zo9`RR5yos7SI@~ZKJxo2&4tG?PowgE`%f&pms$;*>(@?Z^AdJkI(JJQ(awik5HGCs z7Vfz*;eYC?oa`IRH_mRWtAzYI)}E;vlP8Zj3SiS+TQ9amAq9Jpht!8h6OuQh4Gb-x zedJb_hu&<$S>F7}o)hL3=&0A@`qrh|w*sdffzvJ-3o@TQe$o9#!Rs9dbBXCkESV8a zQjn^ws;?q4s{=$+$xzihf+C(*wH0ol{7U`&Q&f9TXE6N0|%e8AW;1 z^r+VU_yG3xkdK{AO(pmzJ0K1NeA+jOdo|+<_zhJizOMIn*WY0EoL{0_+k~gk&Duir z>PSlyXw4HF8h%CdZyA^MT6{ksDZDZ<5dpE7eCMM4E=Y4T-_nuI=tVt271dyz7go)O z!ovdp%Dgu&K27l3>`2m~w7#Mbm=x6_^L%~kF{!?-Icr39zUTz?H~BaOt(Dym$wnLt zDbQ1#9;-^cuZA3Fx9@+I)Y5bbi7L@MxOyUN`gt)xQTOw9Gb)dOl1ukGaIBQ>Z>ssX z=9{uirQhq{cQ6Zi_1RbHwYfm4mJ3to8m;^`Rd*vs7aFck-_+eS#~N%54dwKPT4JcneOjicuZ)>yH})qcpgR5y%@RMHpmZ=#{<0ipzWogfFfJRlCGqiPfmcW zg8l$(lIYf*>tKv3-=kSz^kiU-&^H{f5E*PI81cTq(W|Iz^44}eh?+>a6V%4LPrGy< zFZCt{SoE2z&UhB4D@;XPn;TgjauJaq(JH~IpPFta^`s0=M%XTOMGdFBsGOd`v|g`p z!dFOsXd;!udmnLZ%|j5-+_FT^k?CCeV1~TU1Vq`hz|UmbaC19V;v}Ej!#O*n_6-vO zm~UZct>W(AS*2?$Rn-tMUJVI7e%`PNXKuO4b6D~L{Msv(m71rU_O+5bdg~g!&1Dc3 z(Rd%x3#7%egB;3lR>bhZ%W%Vvu_eKf&j(G4%XQpkK<^Y1e1O6 z(;AaH)HYlL^3b0E3IEpv1>LM~qo77N3vK3eMu~wfD=NX${s8g)jdUO%dee2!oF3oX z`3YPPn|dB9XY?zgQ*%o0FkAc*@_5EjL8yA(^}8PB&h#6bBIl;rb6~;VA30K%seyib zF;5+CUhYdC|R(sKO-1{58~D4@&fb291ctSSFO^7Fyqn9+^`8^?c1{ zFD5ok8w%~e*dv?hOPma&e9(wc(IrT(tc3=PPlP8B^R5TOO)#_S_Fk52Zt}* z|M>O7F2Xf(RA|1#1oX_W18u}oyRYoDNtvDd%zG3s7|m`=MG)|aS;eWJH2uV| zQD}K$4y5nz-$f#@gd0Pm~Rk>)ub`F4e#Tn$YzrMN8^oJ+YC&R`;5$ zGW0{Y2kx#d&kG(moNlzLeV$P?ky}A7Mr`<|9-z9_EUc;JhfNF(l0o(;@os>jchfUt zNy!45=G>8cpn?zbg}osx-tEz>?3P}N`-YVN7jpt6^!!EWndx~Y8)r}@9<#|f6o}Aw zOR9X~im)Gb%vt7J5G_1rJ+PX+V$!<-GC9tQ!6nKR3f5Anqn!4ms4xtRku~aDfjvxi z%w(p)L^Yl^NXYQ>P~at*6ba$o=^azb6&A~E!)1j;a&_P#B$(EKF5PNmKG@NrF^WSV zBhskempb8a2la(Go!$J50&Og|Y~ZP^%F;rt$MVlZMjQAL+0CzAJv+^?Fr(>m6KE;A za5f`#xnZBOQZPkWZY;?9J)6HdGUm=y)@z@%Ae_2lFOAn{%_UQl(Qx+p-NDe1E&Zn_ z0W1l|Qk~_SHCV2g;2Umsw=t0vfxO9&ojF!^+bjpsuC>Sf%j4-ZO3*{0l&vf-_q+u$ zV_%jTM2ZfXlED@*oKQKOt1n2sP!$`N4%`AQ?SS&!6z_``p&|9^tn!A8$thXYxCc?x^HvA-`35(R zPY?UfA8D67PV#(cXTDCr9*)K2etndv->rsy-3h zb7Ge&-1#h!R~~mgdD^w_r(W^2CiC$xygRfen>#FqUQYLm`S#$y-d{j1qM> zFMS-A*}Re-t3wIn98o9EG~L}B5uwY5&E7-)bur~c_OgAGeePh9Lq-0Y^6=IwsI@nf zG-jHtFJKIHke1Vx#0%7cLQ>rIlcpc{ayW}?%S+n`M4am{V4+<`Z8f-KvV<(l8qg~P zhSIs0f(H-33Q(fGzPjOsWyN}`(K_KGQ(~_T5SiJ-PZQMn7NF<(7WPcJeo##+9{hzG z;UnoZf`JM=_qBQ6|0KsRtC}@YiE?gyQ+PKA6O(|3wvw$>_Ii{@3tD9tN_n}?t=5DS zzleLhfU;@UH8HU(v`ZW9-*PGMt@j!AH;dKJNvE0h)G<`Y#~+vez8>_tW#TpqLc^H2 z;ZI~8I3O5xMqH5Z%0pIxJv>(|uX{}WTyqFG_)T|O`AkD;DXwr4?cme?;N9u-*It~x zviGZc?+d59w=@iI=eCvlr_v^3Ea_#FXxFOywEa9@BTU_S>xY&sGKYci52LkbN>h-Vy7J?i8?GF2G zQ}NhHQh1t4-AXQL&Y-MKw7#|tvf*wr>-kIXe&wxw;Y=n>vhm1zSY7Y zRx1r=m#im71r+HX!*+8DVWUnyKlqCtj<@USf!jvQ*N8;ghIP2x7PWk^D!y)Z#YKP5 zZ`cFKPpoQ3yxHVjeWt_{)yBTinZKUkI1|q#Q00^P(oEc+-H|1`+@b4Sc+FX;I#RZy z_*h{k7-9b+%wXsEq!+(Hmany259YJ?9s;Bh`&A7;5#~$uh(l_JhWn&AArb<`n<2!f zLQm<$;U|!zBy8>4u{;#KvyQsG*9X&gR{;GSF`c5<8CXx(|JXA`XKb#FLrrx8d4n zp%nY=wbr@KG$UzBH)=CHNsOYxU$Pj>wW_R0BgBRAp@AMpEY0Idzb&e}0 zXtl;@iQ;|8^3Z{{Y&x-vM851OfJaB9S-B(cP=te%9H^Rog!oS)@?T=~!4}BM z@Tx&Lfrao4_7@8~x3!&kQ6ve{L~(DNQ&r_xLsx#QA@sBoW&7v%d?DNW-|6mqGQ94z z{j~Q2Jnq&`9;`tMzVU0Sg;Y>~>-Wg2=ea!B8^=#U?dKJOCgo|HA0s#+i1qn(LO&RS zE?r;Fg&mQBqXU-?`t(ssN!^jiPC7bhv}I`+m%1JqbQPC%xxZcA10i*e8$}&OSe*77 ztVtYmsr-5_r&?V!byUQpapKs)dWZ&Tnw)T^aW8zcDLwG3kg(B11f^{!!T&6L=U3rZ->%q2(r zI;vGXS^-^4Jd|@&wA&$!XaYOEf-1U=7mwaZ+Vb*gK1I^7O(h72}!v1>`>Fd)xlZlYDh~>* z3{jFqv74}>uc%dPY5Of^`b_oOmBJgmDbrJvee~^IHfOwCZW|vS;vR?N^o8}z;2+vN z`M5Le+E~q90gd<+;O7{FLM``xNsxsH0!i)Dy5HAwY-as42}$2seex@e7D_ls*bdXm z_ZLH)}NxZZPP59GMehp-Y8Z6XlPczxngZ)VtN&p#{K^2!@e2h zNus7$6rwY9=QrkEPJQj{i~tJpyZsBw6Gd|C-8;Vy8>0W(t=YNT&g-U{Lt|%eJw&)p zo5&w3_MdIhzXfeT3b?e*nK=c{ktC%6gA&&LM_7S+6^q+~WO1H=^f#TB{P}PQX}B64 zKQ-CdNj3wtb>-@gGg)AnC&Sd`;C>T_T`YgM+$n2cZ!&N>EAVnA{HDh&J;24k_Gbi@ z@a19VqD}LOnyw}X_+9yMR5s)Mnt~dS$7iaabo?@$T;_rM>ghpA+NGj|xX_EGe-x*8 z>|lLipR&+sAx+PL^8Se(KR?Gs8zBWbNB%)S^q{7#-bNysFqRkg?@jz?jdC`8cO+>x zE#*ctfFrgoukwh$;0C{Wtn{wseAKb%K9nGR13E3Ns->l1JmE4)T9`a!kIz8*eg17A z8Xnp-s^VW!S}Jcm=|TR2a?|>nHN1z=iLV&mXHatI(Lt|?z047sM*$F;#Glik6G<|! zH6gH8NP}OjHFHUEBQSklryI+)=vOx=*rdE>i9_hn!A+J6_1-sxw;vUahI!_{LXdt< zI$tHXWk;D)ZR;aRwkr9dyhr|bpMyanopu%$0^eYYbrc|g;I$}j@fT+R0d8-))D0hy z;zkXR%1fVt6x2&=lM|aT3PiM1U2V}b!cUM`yR9nq85v_XAD# z;fA!FnYeaNoS91G8BXvEB$*R)3HlDP0t)eOWUV?rwS?R$vw{SyTey(PU73{~pLU|68yfy9DerhF zP3r$%XZ%;|IjCy|FO&2Te{nC;O>-@Eapy>q7g&Km(PWeLy!2ViNRos>;uQn}A$=cu zXzkM{9jCER50kzLHuom)NDrP6O4G-Z1$q=jMwc&MCf|KhGCVq(REƋF3Qs{u)$ z|14PlBJpkOx0RKOt;pKVa(?b;F%KskulU(zmZqkr`X8aRxRrqZfdQV9l9KoE1Yd#f z(SgxHK3UrK+)ra07yIdXw|+`N8Go-_!_SA-^*_EUmyTi*zIMXpQzeb!;^X6Or!ut; z|0mM_&vTwTEzS*zAM?_E@M#eoY(l(CwdLXHvcyI*53>)6oZ#h-E^Er1I2@TOz#VN8 z6uTEQI1L%*W%>c9fG9{69CJUe^OhlJ70EoB*Tl8UOKu!x#VIzbKOE0>9#bQ(!>*Poe3LE! zyn(w0UK&GLkPsBG2r@;KQ9A(|6v?v|FYb#*Bl(Rl|LNZiSP1!OU;- zG}z{UD=Y{LHTRr2VdCJs}#fGyDvJ-A)znxc4$gm-{zrHT#gDzl8 z4ApR%_@H(R+v5W78z8l|DzsO{C#u~}kb94~byl>mC+v65+Eh9_!5#m*C7tT$Qrq2Q zLmsU3*X?e`*S9RqaZMZ-TE>lto_9wz9fuE{mb=3AF`Q@Eoo&5cy8!KO%W>07zO6lj zQMb^WD$56vMhUSr+F0GsYynI+%$`@#)iB;9qY4qe^={JHade<1-AL2U<*A$oKtvtm zUvZ5~_d;+l0!A8s#E|E3NoYQ#rDAnPo1Dq{ zO#rg}oty^QjBT-->x<}%^x5hUJEG8DO}ubSY$?XJ}%3b{0i|k!8>crN4RQ{LSsvh-Q$I3CI6C zgMj^Zfk@{reVXXnS**FxVMiR7lvl%kAr16-C5Snay_bl=7r0pnbJ;7R|3NSQ&p(MD z1F}25Sn0r%FCBRXq+Ja?W7C=gyELec{}zeMO~W!9nzu zlu@)Momv&JW922T4Iz;F=#rul&9b@JmQ4jYdmhc6WML~f;c>u-a1k%4X_xSDU|!p4 z0ZOiHPQ|l-aAaUlR-sAlpT&@M-;uG9^_nq6UJqyIqeq7W0uHybw?V__Tzt?8Ub0$j z&GeE9t?NS~W)Mgx_dUW!?;&y$J~W z7e}hegHLsgNSNLhJ54CJ*P-@fk@^cT_;e!Sk<&iR`YUkn%Nw80Ifc$~J^W#8GxpO! zXpxwG0Cv!)cdu&O&{ksITpC`^L1rI@PMapT>(QLcTDHjxXQFvg7mBpYPi#F;-pV z%zuk-5_u*7n|0IUjlmJP0%8;axE4{*;yNN3U#%o zwg>4sIQ;am&@lTK8JTtntAXPF`>Z0{KyYD&ewUdei>c;|=pUFmD#Dv43%$pxxEG`M zz*UBMhWiMDIf{@l95`zGz`B+E-CC77+F)ZHe1EXxV6e~D_4D9H6xRJ48yjsPLdbr} z&MVp0_Kdg4(xN=E*+#(1{IWxso|5oZP0Q41W|QJh12q3$hLVx{Brf9y6e^aHk>Ms| z2Y>_SLM6ZvEPx!mV@D(oi=VC1v9hvq)A=VtN`fa4F9rn#9XNC-p6F?YKs7WpBwq;e zT_!s1`Gdy?fX5f%@N2C5&BaZ_Wy9d^etv#c)zuasyA}U74v&u&6Rlm8@?3c?uS7V%9pO&$M~uIRnh&kA>6Vru9wEjjgS#2n2M^*`cbc z>WYU4Zig+tbv#^7HpyR)_=axXKfNqv{OoIIx>YiACcT zSCVO87_dqVI6htW8P@$Dt^%}Fxx^Nh%|gK4@p<_NNl{GU3k~|0 zB4hd?M*zWNd79a|uf>5!xZ75vFYU#Qe!~D%klAB1jRgO>_g*fyxG5vHU*1XBSYsia zxkMJb8;sOH7z^VfJz8Q{+_sl<bEh#zlb((m&{bfe+=D(*nRm)ofj$~FR#<(kbC(h2pyp}yglFII z(Yf~Gw1U3$#;av?Y_Ali(rQNJm0j8#Eg{RaA@H~EXyC1}) zut*ZmlRZpRUFYpp!0jGYfy6=VNw^E|v^|Em455?o$&~GM9O@AQB zIDj}nvts;M_n!i354eSR^VkQEtl)FKwDuk#;oLyNnO3fM0brmw=zdJ%Z~g#8Ussw0 zHM`6{;Vcisk6BCs2$ltS(q2r_?;rKj%>VxjHSqs!)mJXE?!RxSBkm{;cj=49G5dB# zkOSUY+3$)xWoiakhJ~QKsWbT};dnV<-*$gV9&uB--I2jSnzh2#4478t`%f7FO=?3m zJ-D0Y4v=gs(P+f-Fq%Z=Rp|JzkQ+SHteIG)5b-M|85L~P=#~j+hyYD{EyBks{I^_K2R(Ff?a6wO7F|F67i5$#UW`0+y?M zylir})6=9c7MArZD!6lP@<=aztsjdyiE5C`HA?3uhOao}l(*|p^0;PXX$x^kzTTPj zmLfv0n4_H<9Ox=tgZDb&?A=V|oc`g#7yAK!U8%Zwn05aFJ%*HLhs!qy8HX>&K-%GU zo;)b+xi8sKl9*K0HmP?9W0+ST%Ey5>$?I9g&N9yPB~-i_cI)l;UmZI;i(*Hg4=OZE zdu5V+)9bK-1Q=fJG=b}pPB_NygAUVj^+E`&d#Fl38zflv0*9DB=*NjEhE541y)@K8 zc7sP@LtgGX1w(z>`FZlb;VsyQ%jh*i2uyiq5DOgm1dC z)alZY(LFomTG*Wf@=GNQC%6!o&L0pEa4zKcywaNzOQsjDBOVF1%zi)S<$ms?wr`7y zij>}#m!F%MoHRgnlHVSU7Q>TssqlZ*i7>7F&jaQ8s-Mk*Tm0;S$jr>lqeqXP?!M>d z_H1Ni#Hm|!IvarU{k7q4XSUIKvbJ@lF78zsg!-1LJ4p`Q)IM#?ehs(Op{?xo_db{p zi;Iib%*NfBKTlR30KR8^%02yTS7i3y-^Vv-wC1U|4cD%SH`jXHjwErx>p#xkNeWjy z20mk12pNN{n5Xb=hd+2j7dMz~w&yE12>v9%yNcllYE!|`q1l&$%;VDg?*o??zUQnl za1xy^{;vDE_8J(RcN6DUoWX94?y@j%DnIT9Z$J1+ z4a^+$1)jvK$uwUX-e?8AuSc0@qqmHNlke;2pu_hDV5n#AogZTlM~kVXurqIRKkWlt zqq=KOX*bJPJ^++9s0vJP-tM-kJ8e83MjrF5{Cb(Iw8bKlB=Z(2!Swd>Z-A8vc@}#f z&c6dbWh2SxtnPTbtW1HKVDd$u0msRMT6G-2eJXmemdNyC|9{@s)3s6^%6IKjrh(nv zEY*yxq-IkUFH6edFTUdNTi$2A7!I5Uqin*KC#|-&qcknBd}_ z6nvQ2!^5UFDMh%Ay2>2&yBX9GwGE_Iv0q6rCMi$sFq>DC8^LwB84&9#zJqyUEr5gd zD9uq>3d8RE_bh-bZ?_m|BebRh5>KMNAzbQvnT$SnPTT*xe7S|I-35ZaiT&31N`m!T z+%GHmkL0@b`n`ll5Us<0ezV&_E6qkLS}yKOU{cE(k-uKG%ZhxG8@hU^4#GTRFkqI6 zrJM}zFV7hP1~iSTL+LEx5@1=iLi|cj9?-}h!qYx~CM`{yv>(>&iI($o+9uL&2Ttv5 zG?F?dxnf>zO_VChOS7>Mt}i{-VBR&}0a7+Nb6HdTg*qcZOdyvP&1Hv76B6|Bysly& z*I#G}M2o%b)cphTi(*hXKc(i9xGW=2z_lQR#GxqX6H8VSpjke*S89hPFV}$Hp=+W7 zOox=aB85Qqt}OfR1V?1?evq?4`QL%cbJL41g~3Q)e9Gsl;*Z;bCW=!Du}rhV1D%76 z{Jg26=Mx=E)(ZaUK4QQO%#pwBjEyKnef6W7KUps7egLS=DC-nEnzzzKec@8%Q@+}Y z)pNmCEiN9_7=y-5Oub^7%ek*50XF}~nENs!zO3;6O&@Y4Cnkctc9b4IMZ2uc=1QaF z*3rHuVY1@Us=XSw@{V^}&b`wKS00*X{!K%<;6df4!s-6q&E?;6Gkn(G1a&0f;f22>w`0-d1x9@PT%?t74oS5fm7#$9J6)9?w^y$1*!-*S(wPA1`cc~ z<{PdrhV!@hX9R0`CHkuU3)-g=)wos^5gEKZtV?)e|A5v_ zwQ9OZi^_A1QIv0%7N{$WRi}IFJ9*KvP?V)OpY^Gvxsz;oA*^V9&=Tbu7?`K`&K1ln zWR|HuHUI>2StuUhK7gtA5F+;MAy6UO{4-6fe_3x#bloh|uuGkr4)Z#e6}+C=-#=ik zM&CRp_8dRo@e?}_*;Z|(?{vR+o{`IIzd;YNn4giM=eQ7K2Z?Dmb&dzxRF#K1&9c<% zz2)hBOZ6`(EB4m3_pmWJ+H7=_K32~cB>H)@{Qf|yh8f}dsJGz*9T`T66SPTPLBW<^ zVdKoR9zWQ#tjw@I;g=n>e>X3To(i}u2Wn}6y94Dnb7o_2ld4hXILWk=T>zj36Nhg{ z3?DGWF$+BK@8S?K}Bt2rpk$^qzsv&kV>WDA(@&GMWZ^2lB84=J-@{{$2qR|I%nPQ^W}Z6>+RF} z(AxL^{tv(Zc;CyO$DfEaAY1uN-ca#iOIMd+hOBeydCZ7eM#Q9}Ld@!V{fH)OP!afr z32IA+q@Xq!k$Zs-2wd(uckbM4si_ef9?An6TwXh5PmTCo+kRN3Mt6ez@y^#bD;w;3 z?FP^6Te+(F^qK|Dk+Grqd%}^1%Nf`z9VURVli-{!kh?(igrpA?P^^qv0LRn#csZM z((BRG^B;~S8P;QN=i$SHXRbaWC~J(j!tw%hYjI(YKKdY{E#Vyr(5}|7Pi)mOi1vdr zJbtWnSe$z2{8gO>{wL}L`7ScsGip3Pw*2YD{j4?Pv;{WerDCHd4C{EkbpS~EUk?Z| zrQ2Bh>l1OFdxfEq3r2YreRbN_c?96-fss zA=X=;lHe>Fn%DZFr6!*l(;N{}sZdnyv-(6$n$8S8i;o`s0tbvMqRUV6{gjA>sr1{?h^UEdw z`jn9|PXVJJ!*Dw*+ggrbKSCYni!V~=#z``vj$_7E)kMF#of`H&cT=P9taek`xe?`t zl5TdBjbC}O(ijrgFHE)Nq>Kwg3mdbp^|h^zR}6c$dftq#dljiZun|4JxVG)AS;xN{ z2KCz&<(9x*XF$0Kx^F8j$hT~bby!oA*4Mp8d2y#bBDVXFUB|GNc~Gt~g#OKo+lt*q z-0Age{nkxxGu4vj;(6E|_P<20hPPC&@RLypTl%$*+M%1J$bSBeYiNTc`|FrV?se2r z2jzu6*#1(UVEsC*FHtkDzc_U{6}Pt2z0ZqxkkrBdGAx5S@O-`8y(ILTgZ{6BR=K`V zC(vJ({8E>I{;QxrylTu7G3xSYSjLq>d3|pL=?}A94Q{OJQE^H$pE2lDai{VgrP2Pn zQ8G>SNu3qvOAjte-&ov`*pkz5O>5Z$bJIpm?-IWvyHo4mBL;s}UT#&!KHdSyq9G=F zuYQ~`#{{D$V>0R9<$YUhXQp^s2ejSYQg?BSv*h!@TgQ&nf6mF6F5TQ*_uBH+hO;dk3rQlw)ej5qIbBSWF<=`zUg#+TGXUaK+`A3b$}g^l()a z@bz>)7Ph@?nPzEbxyvS)?)AL3pCFpwg`91S=^YLH>ZGtIbnlp02fo6RnVBWw6}la% zQprknQkq0hXNhb$TWq@B}t>i*iv>?6aqP${#tnwKJ{ zZ5Zf~Q@tTCKTRV~zddt>K_7i%eRN^nh}P+@+Y4tnKW$n6@l>(fyHB5fK6VxL>y#Cp zjy3VtdJ25Krqhz_XK^CIwqn<2w_~(lf1qkjlgp7qQC_crsG1=&C?qQpMe!{_Nw-lp z_&O6yOEG4e&KvwsP2q5-Q4FxESM591m2&usm$QSJ``YRtQ$LGAd1uesDY6%Z2~$`0 z+g)lDl&j4oWI@58-{QG{ok&lg{`~p#3#;c{jBh>Pqjcd!;wq=9uG5p%()#ZGbh0n$7 zzjQF)1V!?lX?bU_cx$kpqeeMW43e}^osTKzNKV4+S6k8_dhO(Ic5!iuR5uD-m>rQ_ zow%^`lI4PHzDpWQ*L-enPZ<3s>6~vk`(QzsW6O+~ZRl5t6Ui@_sI;bFFk9_KL_(_o zWXqiFTK3Qf@OGK+cDGRWT&1j-;zgv@E-@;MspS6JF?#|_%VDA+!8$B%XH%0U!as)q zC41a7qMd6XF>~Cgk}{T)f+1nY9-UNulATXy5PqT*?&Ukl7|WBPw^@g1z=sP6_Z&6+ zH09|yW^~T9p@4l-%jK0ufNyRe9=r2a&W4pqxTTs9kepU0q6IRUF%Z@We?k3mxn*zZ^0yV4qq?^}}Q%zo{l+zht)Tn#th1X294xT;-3f`>5lF*24uz91{|81(wG| z2V!nkTmt2dYXa@I9s&+%$LhVFxAzldm%Kmsqd>cPTG>ky+E*cpJYv3zLTJK7zorVM z1ZAq3ati` zu$`G^w$1Z#;cyj!)ky_~$^?#rzQpu5?TuP?)66)7ddW%$5@R{UOn(jdWh_YlJ^;wt zR^#kJ`jcOL96|aY2Ub`Moi=AkpFhKG*Z1K^_GQzrW0qT2%ud8O%lFw!)`l*)0FbfU zS!FK5AGrt0ZroA8KGQ}V&V<4(9gi${E162L`lVEB0-@!Gfp7Znuj9gWai&~WK3s#u zJ?8_Mw)PfQ9Yh??#8>>1O$3Y62e9)pV>!3&bObaexzx>{Waq96vG}cNiI~!~`hC=D zzUK0qJC16tcbctaD@maGLAeVV`r{K*AO#6QVq)T>i~|P_Jglx>`l*G;lT*x4__Nn3 zG2-W>1W#)CHIBqgwv=Xz9SKxb;tc`Qo7O`38?LlhE-g*&J+{ooI*gSXaCDn5>B3G?tH5HY@;JEgNU84woEf8 z*!e_8Sk%?Pjvm(Bw>v12W)xDtPTAg4gN2ut}TBneh^L zFJSQnGdu|D??M6%yj|AuZl91~7|1vqXJSeM$osd0?OIf3f3O|ry!0}TFCdZHXAHvGY6j<)65`{mh8Oxo~^- z4K=`tDU4-)s`*3i9|vyc}5x|_0BdbklgG;|Zmxs($)ATkD0^KZ zu`}aPD7c?r{5%f#3v#|zYa)2N3l;N6gYr+8Hn4X7xknj3izFBT`R79LS4vZ+5r~`8 z!MUgm4XeTnOD6?c!H2>2n)d4_Pm*dnRr z5MB8DSU$>=8QMsBgj*MC{_ zblD9?D;+97BaOqMpx$5+6_S|coCvHjYRxHg{#~ZlochTvjrGzH05fjsVVu7i?LfFM zvhfnQ>qMAD{r#5Ja3Rb&-!&8M2?h(Ou&toIqpM^nZzVISvQZ^%IeWeZ<@$kf-;QsZ zAe*t=;8RLcGa0Fe#oLm-6%gz(<+nqW%J<@WZC}bpqq%#=&k{1I;FY=`?&~u<}Y8`%gSD(ga*Fb(Ov`x zqG>8Fh|;ZnwBL-%#6yov?HAU%4fv_yZ1eTNS(F!0&ld|;;~J@dvZmh+BZwZ3oep2H z8z(4jK)+4dGxqNa3A8xdy0tQ=;Kb+{kdQ=h90pwy?6le+yw(!b%<5gO*`*b7YpVH+ zx64S#7ZN<^&uu#*qb(2;*O=y-{-nNfOA{as=9BAI=$1$4Ll2`02P#OUgNuE|tGt_RQL* zl~Yy_5q1rxlvdh_>eyZ$j95*3$6pEm+aKdEg%NMviBAH>Lal0wsxXzyS~)yTu?^GP zK3v`=*kP-m&MuOiY^nr5PxDkK#o?L)KIfWf?(#sX!)5Wyw?&3 z_#Es;d!rhznWfJCLIL8Ye^9mf#DELg@YGb0jD!5PG>>!(`$ihC7+o9teVRz3UN z!V%Ou>h#VDqJ^+>&;+T21RN`&X?~5+GdX~!5)&=Sox78V2Kc!-8(h8zm^K#eknfWop;FQSY&1h`j}Xl5FeyBqE% zfg~rO-u+Akca26UWC1jCUp0Uva+?o-Vz=B2lhUY+wP|?8kRVtvG%_?P4-~0LV zXMAE%EjlxuM}SiJ&K?Z4{ZcP#dojF=WzEZzH-FYBEL01rS2MR!2$v$sxahce>@7ak zS&Xup1MS@L5UnKELK3_KCGV6r33EPz-5sKJ2RUi)C2);=B!6RkHybL1*&>+mYJW?j#@)!;`kG!Kz2zYM>Zg1! znEjrG1OvdR0e45w!o92z)*{QZB*DZJ#^Mu5bf5OWB29LopbwnxT(nDDq7mm%Fm zfPU*zK`x1AD}q+Vi$f>Sac2N2XA+493)chp)_bo){aqJNOUa7CKEktZbO8uGL4l`| zI!(tMdL} zGXm{8Hx9JkY_h_gR|9A2>4{dfcy|SV%W|VaH~xJz^w_;=zL3y}K9f1K=KsJZ`Yae| zF=7QW%WTAdjg6(SgIwfExQ;8KBSFn?n4F@F+Q-1`b9|AqSY*XIoH1Vt_B93$cw$tM=xi zmDiTMH0c2eN*4j;V$B;wMfk4}sMb7?FCeM7Ur&Tv%=mH~EjUq_f*##&Nn*n4z}jNZ z^NDkoy@fc^J<3u3X3zOX*nRQF_jX9|$HOPDkuoGHb1b$--e(XHDo@usWAWJZkN6?h zs|(Vxcw?${*Vr)RkFm?{B^nc$gqA+m9`K7E?p#-eRXX;l;RY|(tJm6tUYC3RIQ^~% z>Gd5g(j#$j1qhw=61*}73uN&-vi{x}uV%gR+&=8P&+=|7^aW`}O_k|jl4|mGAlMhh zH=~iyOu*uEJ9=R=>=zG!jCXm?iAfqtTtjWob>Ig7xC-W&qD& zU_M!+Z;{X)1rbME`}2i4@t`|?e9ZTb(m!KFFlv>dS$dBpi~;MG>E1_3!YNs-9BLeZ zc)gXQs;YXcqN2jRpr@zj#fujwx(*f^{qyqQ(bsAgqEX=|2N8`9UwCOj&riyd`xbgwQQjS}l5&^^hdk%Vq1(7mFyxPv|Mp#)++>uX}pzV>}H6)Vc+*pYrJVpL9T-!TwB8t1guO|{ni(ZDU);37R&OX z3_Xlk|2}tuyXyGei01-LyK$m+IgoGpf>j9mS(_zo3;EhE-Hlp0!XrohCz0&m6%zPD zL49YaPZo6Gp(9C1h+yShL^7=g|9N(UszZ}D!5#M$-w4chwv(Sg*J8`9w%^g0MWNRh zS&zTy|0nG%!?6DgN&CCr-kGjx6;*QX9v-{To;~}I`R@-F@RyvIamtQA|Hn>w>C&aU z($dnZwtVgHMio{qCh5@9wE>81KQ8!!nJWwK)8-GCpWXNp^>Q_$rmMqm_q=p0Ul z5dv&fdB052>x*J3S%yh!ObWhu+UwC=ad=n>l?q!D?-qzSwXgr3Bp&}2rU+^pmzuHv z;0%wy26*eC@^&+qw-`{LbF@VG2eJL#J_L&Z_0Crek5FNA0k|$T?gaNg^SgmueKGr) zYz*@*H7)A~lenlga8VxjTbAR-E|e{fSZx~cL)!9pC`4t$GkqCa6+~+Tw=)iBl2o#4 zPea7x_o;&JMWDOgyMTyplqo%&TTE~}F?!`AwrwIh;&~l(cXJL{Qm61x|FkOd@5J*1 zLp&j1y=iU8)??sWXE=tnv374*6Cbh{AKNp?nxBwy~dw z5PYHm zp-2q`kP--lyo)o=%$oVH`Mr7Tu~x|C-h0|U`|SPgZ*N{`KT@Lyvw=Y%5WV_+)h8ej zT|Wp!9YsqG+*u2+odJPPE8455XsfHJaBF+G+Sof;gFyFRB+|fVYOE|KO);VLrYw)wLPyWY#93^_QBhf&3XCR6 z+|0(d{%SdBZP>ohK{#E}<0U{bZtw z3vSo1*l1{63$-pM2KxI_a98fZ^E9woL{g32*8W)`(EZb&S7c8~FwRAQW*{bSB)LGj zFY7*}3cs~92#dc!O}lmFVz2HD-^EAzGc6uz&n3@)76cU)xl7JzfQBY6YYETTDAVuC z$7DpxhB1NYr9TJz`zQJ{!IYfnoK=~+cP_3dEL$CGia_rte)@K)J+`4U?!jxtD_sj3 zB!$5()I+l}ul{JUQU}Aia>=oD%W0PdjriA=Vljb{D~|`59952#J|<46ynIrbZ}|co zI26Ng*_KWw7n9+io=qF`iksog;=Q|k2Ro;gXxX3EEBD)n2lNY_ zIRCD-pK$tiT=CC7&(9vHJLZh}QZFEN6tTxxLsc3|aRw}+`rmB$s{OE1^ zK5aqIa=U=uQ1&YWHl8@l**VG9TcX-I2`pAIDs%7luKT}jJ9wH_Nx8;*eL5>EiK(!Y zaio2rb3WR$ZTvvlW-gn-{EoPXn~-dicIxoN6%$>YYAm`HC zN1wE{$ke0`o=bYRl^;~k%|h^t;#IhzbcV<5L(i#%$DbpFY2JiVg@ZWmTWZoIDv2{y z?~3oV%iPtXodMnMqzsxVvZ7YI)*bkKlZ_GFmc_IcW@TrR-JHw_<|cW^KukE4qMGXz z<*(4lMoKcOhMM>ZCncO^@PA0I#eH=@+Lxi!jBR_mauZV z(Z{7^{T1g7u8zcVB+ljx@2a*oQn!5hHvcV5Iy_{bJ*cu$<3YM(MqoOMm+G=zVm;$X|N1_`hH98WmrYIod8Ss$IqiqrIh@i;1$xPr{tGP40uN3 zpr2nz(!=KC-Ke5xUr4fMh4+0;o?+Q%Xy$ZnL(Va7$se(YJ-Bs6lBxNWQ@erk4NJb9 z58C%e?@6jkQsAGk&T_oDKXi4RNp99m)aDgcrnXXTOF1RQz@Q zZlYv<(39Kt9QLd$lq=xkxE-atkT1GlAlFr|L+%zm$$wHkuGb~r2q!jmX5BPmW{(xV9`;r;WQT3cTJfNZeb6|in_@7jA5S9Shmz< zG|6&fHyTT`yuJHy2fu8NCRr&;(2957Hn8lsaKkW3R7<=ROLwSm+k9!c;g&Hg|Eb~A z!>A3e;0{``N{LvJRLklwMZ-my(a(}Y23F0z`_=Bq7n$mz>b2h-66bHo6^iO8=!C5} z44y0OTWR#VA(w3S$TbI_axo<`MZHHnttJI3UUyqdRQL|&SuiI2XOa7nL7 z-IZpM5|eDXWG{8c`)BN%rF+4iKEA!))2l(~yA#(Z3pxwFE!32>@S|&f?2hxU7QKG# zc~2}pU}16QtM9~&Pj|+m{8wmBVAY{MqZVU@T83HztiU$aHfc#Sis5Qh#dG)RxEYpZ zu?}nzTFsSlBWQy-9daN^__@Efv%={tlBy7ZUG8XIY`ui(Y0U`sLW!a@gENk8jxUiM zNNQx~$(lo*$@)I{e0N$DTI^O_Qgolm`G^;A9T6D$#x1}rT6C>Q&_G7=cKwxor5OMF zHus(Hn->KxzwA%6d|n;75UsNbC2_(`I43Pr=@QM zeTU!1=a%L^e@s8XoyyU|M+knY`TgC8s}-L|Ka6&b!rX=2Mdo|I49@X<*?1X}%=DpL zVAdtqIajnoO8u&R&=buchd;A_CVk6zrWT(ew)Z5~GM2mGcGA1lca4HIAtsykV)Kn( zn-YXML!O2AJhwk5q#>IAL1t_8+AH;Rb($Zv_TTJf23y^7rH0dwy?g>70bI#nOuM0N zbTf#>-A(8L|3TlKAQ3$qFI$35S=ZXPUwwStt1RV|EaCcNmSgNym?hOEr@o7wn!Qtr zu(!$YeGQ5Pzn`hq5KobH_K5CO(iK=upOdrhc_;7w+%JvtjmB%yMeIke1HEFz4(rM% za#VO%IHOKtA?JreF-=K?cO!A<%U~8u6!Wn-MOItJfb5{tVsi?k?QL7<%vawZWlhO; zJnapaGWAqh!hKLnNfz0_Us=jcy6d{c)8}er(QJVW??632eqvl|CLq+>&~H2Y_3r+a zw%pn;dFPSFM~LV89u-3k)ulV#XK`=iP>n}M$JmcQUKTia%f5Fucw{1E68tQZ6wAS1 zqj$b6sM+B=oG`pIe7yuymQuZ_oMJrowdP&TkD82yHw&+@0%$>Ve8H8Hxd3a6g-pIY z_!r0qdV9otjHx`wC2#BNk5^gZr^KhmFEuu_jyEiyu zqSb}fWn=P8U0%W0szMvsK(BR;9iqC{7QJt}-W1^@F(NhMJe^ku^AaBApE?XOA8%zp zyt*gBWpSxU#?HHZm1$jDUG9lOODpnnNlw-6f{J;!{n^DES%oTU>G4hq4nEbtKHw^> zJ8pKcUgDB`?3YW*U$)vP{;rdd#v_K2liS;=c|ZETR^*IrZx+2&AmP&XIMNWIYPDd!J0gVY zXn0(r7Ku&TAO48F?s|IKDFnM$+n`WLYHMot&B7!H-P$jozdAqPp!F>)6Pihg+$lIJ znL-!3p26Vgf6||hk$f6|qz{T<>FET`?D~L;Zi4z}4qo1QzV@3al8;s;Ksg4IW`DcF^!dFJ|*n`OQ_w05j=* ziM#HLN0)I~#@ssI6bf}D^1H@CYIAf8Y(Pj6Yi*!zqp1nv1FmU7lw@omD&UF?_{fv7 z|Ks`|883+9_kD5@DB2!G`S*7o0pBM-uYu1=o482KvSDr#nr=_TU_Xt(5)--U~X=1Sr024=_ji9{@xt;OYVxDmzSHgu&}SMuaK{Z zkgJES@NFq6DdAgpgzwxD1l|zz^mp;H^b>UPy!y9J{`8}2?fJ~Z-p$M2)rI?{U(2Vi z-d=K7uAB_?kDtHI)7sDepCh?={(V`%0)2-+-E({7|R>zj*)l z4O~+?z<*}X?twsxAazwGT|ct5sZ*1>&qy6x=7+7wC;qRm#de(^%UnBqWoI-AeAiE) zc)+~6RmXXl(_*nwpQPs=cT$WsN8 zQF`1DJ+x&|s=P?aG>WCH55alE(kbqN!Y-YiIZjb|?Lf=nMHd?O%?_{qxYk7$l&N zZPKNF3QDH?nq;`FV*lx7F>>0Us8CeM+3Y@&r_xD_8cN!J*vLU{9WA%E8znFa@*S$B z^!{@|Bl?p@?wQNhxKlrEzKY`mG9-+9UG7N^JB!E!4@xI*`QHW%Yd*tU?J&$ozT^L_$LxQc!Jj$*dK?OZ%Fk8(f1blR zZpAF$k+oC*vkZRE01yOi|I$_Yrh9)T^uMj6Q-%y!zHLwD|JLjuTlW9^@_F6Jn|wEF zHPgf=qF%8(Pt-b7m4$fGo=C@{?@W&kNi#M++@Ipo#L*b{g3JW$CMQ_Pcdcpi8%N?|FafmS)ei1Mtc9% z6LD$*s)vGQJdUUNGvYtC_20GOzBACU7<#-c_h;DGg20`zxo6fVD+{9p>;O;b=PQ3C z^Jl>Sm=lQH{{|rNzM`=+e|pm11RAq8dnzZK^dzSl3L<9u+>_$`OObKc0J;wwJz$q` zN!R>vt1XyB^fj)sc?Bp<&x7{@_OI9_YUQDX{Lz7az)1dHTc;~9>jNnDr~)v0`4Syd zHfFo;wpz@8neX40l>ZjkduA!)CZF}nU?O46*tLbFs)Oc|TPQu3yzHXvjVpf?W(!zN zN1~d;`PzI&NJsYE1Cws?{QuVsDSQdVxBtA%~&-(b@ z=W|;5X?>J`)mo{T&AR_ zIHi4YP8`x+<;s!Zg0zl*j&U;m7rYgxe@n>+7{gXA!TZc#58`12tmRSMV*B3L1TI(K zNq4o+@8g(nKAr#is_NH5x0=Qh{U5!0{Sw$Rt7?Puzt_PJNVh8YtMva14Q;rtZ9%aE zSRrwue!bi5C7Hd+(ZzTUnNquf+*7;m*bzi+z^((oep!0bO`RJ#-qrmyTtOCqgN`+5 z1$0B)Yq>whzFFyyL4M29SqvhM*El*)HF_5+1p{KQd{Xe>`UJ~+wUc*7RRA^K$l-ba z>G#h5R~bCf>Rc;xw0OH8OuWn)I@oM|pr(a?>abOlc zD{t&b&0C+&~2h@DL zjMHp;@&2j!AKhZQ`Fa>cJUZB3 zuR)u|UEsdZuu$#GcFXSzi+(=4zy(Lc!pkT2U@ZmEI{f$TuE)Rs{6DPWKh?3APn(;3 zhjffbInRB5QOH-AQlq0KH%!=DXRk0Q1wmb$wtpsS)dE}D(6xVtgSO=q^)iaE%HOnJ zJ8@Q;6U70sX~C8?t>wlEc_h;I2VOk>yGc@X0b5;HL~b6@2{$txYv%J%b9i)KuH9%o zY=mtK7?ghu-k+tJD7TEj1`NX?h=<8SdW~Kyld{aUuTHF%wj!`6to`Ry>i+$_|Ht-( zwdiImQZ6UfgdA;kXj=MUo>DCnn^j3RzG?D`C;(|S{e1gSBP7uo?t*+!M#JIt`b@T# zUc$l|MMZMJuzF~<8dO@nfRyO<>`k~T41zLL#{u=w%+a4h@j>7}Gf%(ev((=;4VxBo z9Qgn@#g5uvq|5(x(7WIHv~Sq<>DJtr+CaQZiD6YCun_*e@W9z2D~cVq&Hd452@PG_ zcVn4v)4L2Mx`A1RF6GqO58hxGbI{%YrUIx;k*W7El2*Ar;ER+<2ai1|KX%&2}<_uu?uLjN57f0GzE zU6J?Lh8TsYp1VtDSQp37T7Nu{Bd3OIkf4K@WuN`o=q87~5`ARt${?(13=_2d>cnR~eH_`2Ly?X*Ma>VyWOG7W`a?rA zhWCh}kO8{PRK~X#P+?Mm@e(kz+7nX}a=guj1)ph!wBnRU^L22U9D*Eb$L zxNdRGLGk{LOw)y~B!v6?SDfE2Wwb?H-6OY)8wmRs0wlm<7)MakPu{pA-2qlX{p#W` z?DO!_F)&)%|46}DeOV>yx$WV4B#&x~wwt&gBJajSwhk2{);I%RPNolfWjEm zw@lZ_M9*KGtA|+`j%rz3#x4!l=j!KL4gm439V}(ljaAvI3FMZ0z#e6o?8`j8H^dGV z_X;W2M`pI{ju=1oB>A&rTk|$nSBFa`-l%cfvzqzktl%FiIenvE-px1$3{#WJz)~qN z^^2S%*-8;>b(mn!Zj%t=E)M@BivODAHrhO-#t+3J=5{{}O4Q?D)<70eA)A~^ptI@Z)NT<&chdA$!tW}z3hp9)# z)gIB15`)hUy0IDN{Jb3o7UVl%AHMAKrL>2i7!^jn(WXnvcLyxM?8TLpM$Lfza^7Bn zW!2t$#dg>C8dI&ZKBo&Nx2o3iVV24CQn9OEJmKQ%gb3~VUUnWRF|y%T#hRADw%bPT$!eM-(zV@-cG8U%SR7@(ZL*FNqXQ&wjq5*nY6kup^$slvYcN zX7pTYolfKy6t8(0{-&k<6&r8%2*OyRX)cz(JsOKv|7^^Ez48cH+$DxY8?X49mTqB` ztc`)*k}f(ayu3~6MsOf_qyxTKqFAC_5%^>flz^LRYQmZ>spt4ZaW>yS%)(W%CfENZrgx`GW=I}2~ZSg0q3eSC;Dam=jJKw+Hk^% zA$GposAGhx>9tiDZGXvw<*Gh;#}3XwC+VRam1rYWF!6^b3UHSuU)*|PShD+GIrG`! z0aw$3NSAh*tp5-u4~caJTu0-xeE?FJV)AUgR(^abYgkO({An@Tucc7~Y9SzEx8Sdd z7}7DCo^!Kfj`S#9a)9~sHe{dZ#r5DNOSYD%>R6r{BkN?yOc>1?fIzm=#OZ&~8kOXh zIYD)XO{k&)aONs8=@yZd{sYhfW@lLFe9_~)N!!OX7f#21HRj<|t?ah-iYLnJ8k$1V zBLu5J(9P6SWp%I?Jqr^GIp!fqk-@C)6mm$E`>s}}c4sqy`o!I4KE`~)YxMvbn;m-Mw!zFK2rx@9VnQZ(jX z)*xV|qxB&eh^a}$yPr92RDloTmg>A#o(yJlL2Xx?v4Lt!=qwSA5WPO%N7E`$67irD z;iwi7?zJ%zDn|cx&`}}#o)S(16ErBwU1s&|E$$mVo8;N4)}vn!CGLOHVm#j496Z7p zpjYzzz<{qbQd}Btt6Naqw%%P3_WJ=LvBc;B=W5%+4G=6mA)EK@oi_J1H`w)<#!8qo09Mr9@qUV~yvvm%iL;hIXfB5}|-26l%4KI&!2mcFG) zB(phm7ZTz(i;~YSMgbW3lvV=EgPj{MsNGm5^+QQOM+rrX$XgZdFTsrbL2PoPN<;{X zB_BWp_VrpREe0(CH7h8qRKDd^)TGPBvErwdYfC+8w$Q^p?;QJGo;>fTyAopyBGMhf zhtg`UExUT3ZEp%w>`)z~6R@HL!k!QKO=cjjp-|~8_!oh12Rv;KpdGzj)l-2h1<4xt z`sKn0y+-#YwC|g;G!Gw1?RE2b1|K!P2o8Q`%B4U$swy!nYh~n=m9*hDjCd#QwLGId z7(h_HlZ6ZqY8BZY{K##3&bZde9kL9OGZU0qlHu9NX(cs4*Nc8NQ^zmgWMMlxQ(~{3 zlu7uJuYPXO*UxJ(l$kz$(1XBGY>sNBn^qHs;Va5?Q0bZP-O#4KDb%|~PY9iFt`bYh zwowf4{6r&0n;a9 z+bcT7vy&eu(kpqj*)SsxaJo(2yBBYoHde0`Ukf1IyY(E&vg4HsK!gSiO(V_f3I%ZL zJ+&$a&QS#o+CIvG%56Pu3#Y=#=xW^?2ytjkaA>6Vv{Q`BEX-44J}#D)R}7hZ!P6Kl zXk7b!=TILnFohHe43EmB&W5=Zxe{8WY4Xyf9NB zH&BkAjPi*w5c@i0DFmE=&i`T@6}4wR-DF=&_%g8`ey)2AFlyAcV;_Cd2E0*r8NG)v z!v2zo_*g=SQ_Wxk-?MJ?T6HnXrQ!>}Z@sscTxB&!B}Snk)Q^ntD$RF{J*!ltN1^rc zO_^ph^@lc1ZI6z3mh=v1@rb>_tcg;OhW3&Dk7q}!gVjq6%I^DBC~Y9z4;yilv3k|_ zjRp)klCy$)##j)#FRE9J>^Bb`ox5vIM=n<%#d9>RCo!Up3i|iFzRwprS7)gl^0AE{ zwRy))_zQL!ZapQHvQOKmv!cFF*kB^R4{eMO#~CI@JTOSq4vAycocN0OtR2lyNkf>e z&$)!O^*Bv8PM95AykcRnmMNoyJQseemy8~99=9G09(#)EnSRP%x8^~uCeCYJQ*t-6 zPxV!u)5D3rwS$SZI;shT%WkgEDB1PWfP!3~fp4UrFtp30^)u9%g{N{i&~x|dh=7eC zTXHS+<>Y1;s$6nt_j|#0z{|Y~TsuK%bjy^i+7})SM-KxL4*=9j2Yn$L!?yEcR69|O zT%5O!E%_lzuh36-O_TN^#lv;W@Sk%}>jy{sx)ja{nuQ+7H zuUzBcf48i8mVQRaymb=t4O0FPG6L1v^^dG&KCiF+B*_a}uEi3oCB1TubvgBxWjKo< zoBy-Ti5NU^UR=x~mxj^b*K~`Gm$>)yL%)x_5Wnwp@Upi;-WTiyKkK!5WS8ZCF&ljh zUX_wtc&B6X@pfLDG+d)hd+vrF71 zs3JCUL>|BJhQ7RTyFK!g$V3$N$7Siqn+jD-a7-4-Si8^nO0Y2Ms4_5B6$tAlZ4Eo# zWo&08;k>dbQbe6#!8-%r&XcWp#YoB5(U&(rs&2-3W z9`wcq1RELL&GlSS*#RQsGWPP^6@pw|VHg$LJ5m2%R(OuZ8`Q^-QC_e3W;Q5x!p3Hk z3F|p+@8#cdW}flOdEMv~$13a+uo_1F=_UZ#T(5b4*D2I+pCx@PylMvLI=(3fUcqi> zL#fi@vCDZ0K1rluR=F{?2u7=d4Yz~U;M~5@RO*6GbrjvO*J(m2WtvHit6W?N1&b2?N0_|0aVR_3`V%{F-?`(txSE0H`$I=EmmfGWZ!#_ zFzS8myziRy>+aeXdx`OIto>zz3o&^-J#u1w09#`CDWrhvD8#(J#|4M_ju^zk`Fu1! zRm%$-w|pvFulij5Nm#-W8}?IFoAs01ipXPy!?mhH3mE)r$(?Ff zu)$4j*sYZIwYKvoN1B--F|hQ!sX72NYRkZ-d)E~p?;+d|Ss>A;FV5{DW|G_I;rVcj z7VKOK(9?URp2mxcdRW-I8unxY(@%fU6VwnfCqm!RkaK zV{rhq`Gy*ODXreGy(LNhr4A%JmhI(mv;n#U2*B*%cF;aKe0?aibl2mlk&+BXx={6~ z^$;h6hZc18!uyf$Gqw@Co~tJ@ZyGwD+93l#`?!@W_N1CSTJ@luN)md6hEUGtPX)<>nA=3GqrVxIIj zp75Gl;x3BV)MqybGcMaxI)KL=%R}EkuJNOf(JjEcqKG3LfmJp=?ONNLWB~}+f~aCS zz;NPqd{F+CVMu2Yiv#91iB-#FEOE<2ZCssM$7@#TSYv zV$DgFstJZ#nOjf@iytIY|4R_@{1Ya#9-3vbdR0^bb0$&;7Qa1^{fR5&Xh3vR5rYJ% zB_9gb9mAm+W-(`doRL0-x@e3lhTjNHvs{xNEQ2*!Exrj}alG}MkVnQK3XrI#0LN4u zh+CHWudW|NK0t0r-|r~1RKx3Nd&%_cdBmvYRRwcbg=q3!Z{^{xEEJ+}LencWII(?O zxa<~0T!HZV?FjJ~pWfO=7gl~UqtmD6C1WCgITUc)Yn{~BovnGl6GvcJ=fz1xnM0%U zBAkIVuI9MDehRzZCDJwVh4hI3-Nn;YSx~2oO|#+EPIIsv@U~WS%lIQY$RMi0dBH8z z?66cXC&s&>{4IozYMUnt{u#vq-rS(2zg53o+2x~L^ef<=b5}yz`Yc+n5xxa?LbieF zl3nYT?jb6Yus%j<{pUbsa@rSy8@Zl`lo&lslxXZHiA*V9Oh?h2z4ok`ZwTgl!^LfBcgUt-JywM8 zd92|1%3lS8_rN<`72{unn+D>Y4Su9BGi1ahs2oCf^SoV9Y(cLztb;>d8WN2GjuspO zP-{f@1f|&;fM=6>+Nvyj!pNtc=U2>PpZG4IxIvN87SFetX8~#UnSUh_Me#8BVq_&X z!=+_R)Tg(zr*c?}*|PO#G%deSFi3Le8lkwZZ2<&;aol;q@MyLwLN%ksJ%J0mi&!Ko zzK9LEInD0cJPX(34@JN~11XQ_8duD;OZ_Z0b|5brx-+2Waxroth-zuV6*VQ)vh$+| z!E-F02cUrHrC~=`SbwU#j01SJH~_X3%JUuZ^@2q`7fBz`gTiKhvsA40BZ^WAw0un+a7+|I zi)?MVkmnI^IbuVEs*!~-;+t-C>!=Lh$jGOP?2GHodyj5`V0mZE5JqBxi>ZbDUYK%A z6`8g2Pm`it+4^Gs9BEwH1#N1LE&%btC2?V6L-PY;Gk+(NcmOa6Y?lmjHmn8H4E-iu zwIN6ZDmQm! z*<@n$%a6YVh|%Bb_pJ**Zalx^|09w7$Tuz%myMxqrAp9@5c7(+16Y#O6P9GVdEl=K z6rP^45>rINSMa9BV$I(Sf#{&g%)kX)P1=}+BJAEM3(Y`}r|1w?TgrO@MX3Qu*5nD{ z?b@-eR}aS4*ZG>PkEny{W!|hiWCWTSSptCj?3q~f&Tg8|n5LFs%|gyCD5iIKt)KXC zG$E&T>f&k|-?Rb~ysNCRmCw(tw8*eQoA>C84BmYZs(?Gv;$;r;TZsV?&zZuFiPEH> z95~MU$>4oxiRrHm4M(OGrBK{C2edwAaKY13;Z7!udWyZ2J92FUUprV|# z8_{;$W9&O&%_sm~Q1NYA=!)hWDj~t;&GVj|(a~=8;+5T)APPsvK0L1c(pX(=Wy_hU z*ZhMEKn}sL!`cbpbsA$dPx3fd4&Tl|1WjvJ)t6eQ31>AiUY4$Rl<3YWG}RSFE>m|o zzfSNT-vzjvX8FzMi2gLA-^m)0DiCqIPr+5Vb^mF3nh>|B{g3?muq@ABsSb!0eOZ2J zCVZN(ND*<|a$M*TY6jJJGlD#bX5d(Dhn#SlIiG=mw!tkeJoE3U!GTWi^d9!+_ucOz zef9lCS%shvnO@m^tMEeN<|oFc{?zbaK)&1OP%owx$l#RyYJNW~-S%KD*!bJ=E0&Kv z37gG?_4PT96KZaq4?;r`5ey&);;*f}Go@B#)&RdQZ&k8z#LYr=nq}r)NiB;U_a|4a zq~~!g{DqLKy8&ZnI~kr)f2Uxu*)dop0Qv5{pKWS zwzMR7XV!daIv2{NnRwj;y6jo1?cz1;1~uCqi+7O=+^eP&s^<>yXy2ggI?#)5F~4`& zgUWz~Q`o4ws3ae8=-|^^x0m+&^skx7oSgktn)h~0Vd12UVhVl_SO~irrG*ZYjb^zt z=cc>{H48VqpeSAg4yWVRyf(uvYDYUcxuhF}n*wd?KRd8M1_-Rm$S(ff8=@su4`f5< ze2h^TQ8x>KFev<3gVP63FX`%ddqJf17d#;`Xjd@g9$qtoYlnlYv^;p|vD)3!gtt;5cZIsE&G;M{x*F#b)OA6$I1mgIf4X zvGUk7Tp#>nSsMtT3U(3)iMy9QBfB97Qwm``DbGf@&+a}%H9+JPMgdxU0p1UscIwy5 zH|4>fuy~*HZ;x%-+CbxF)aUFtFyVRt2Ejy|3rT8)1+FKa1O_*Oz(A~zz2z@Tf4JX= zGWsYC<-ki~IZYOuadb|$MeT$Stu>z#p|nlVA|nD-4t%B6zTx49sa=}N?8;?1d`0X+ z>~o48qs7<6*e38UTo$w41z%|&{^q1Vao9(Ub28->AbH@SCU^Zbx7Wu=2nL8H#*V`X zQogOlyA+=@1*H>wCs$X8!=P(jCKg8YS@yx!bQAmbX-+fiVhFwATpv@+ms+pgM}hO1 zMnVRtpq@ufi*LT@HL6~&bK&h}bGa&S?1~JkAL#`{%28Vz;Jw+D$HGRKnvd(|VLp)y z5$j?4`i*tSNO-NjkomQ0630kR-&wQ-y==d{LRQ5$P9R8t1a0_3#y>{ld0fkv)R68I zgFX?OEX%M+>I=ZpU%Kv?{y22n^`VgM^SvDH;0WRagBRLKgd;oPSR+HRo3o!xhr8{b z-_$AmT*^FEtiyvHt;jQ$)eDuEs7)~hg7-WiZ^%nvS_2aA9!ZjUwngE^;mvK?$?qFh z;UyZEm!Tgx91|mTu9!{t012v1T}#=C7@$-bQPI*nR{GR>Ulu`8;Xyr`nLSh=N*ukvEXyhW$aV4-GNY%Sj$e6@l)&5Aj@ zkGGRewzafXh<3>fNP7$2D*iYRr1Fx7awdH-P!|p$roL>ZV?Eb|YwU)~?~FmV{}vXc zWnfj%Ntq9rh(lmV-tk&hp9$A)-{%+S)|1VHE6iE~QsAt%FqY@rh~q%iOZfyKUE0EK zGE(&TD(mP-z?c-qNCAceh{3T2pF7e%yWT*)w(L$VUkG;k{p0P3+dD1UZITXcWbTa$ z2ZgQtZrkoOvmo)6rO7k&A-~-?pzxu_NX%m1hYs*&q+^?Y8DrE>u@-;@cab6`Fm85$ z@rTy?aUgOkbAZNcQ&8Js`conCABpte55iQbD%rA8>umk#N(}QT9)8#e zN#KVYY=3&tV11+*<~K38qe6mA63nuE1ioExoO(&V!!Ev2sb*$wkBR2nJS^NEklpbL z<9{UVD#jQ6;Bd*3pNTAh1KF;Rd=9UqRuO?;8waw=`UfK8R{hyZLd&Hq734dqnS&=l z8>!gLmkb}bE5G>~|CQZUl`@J&_DSUSw?rNqR&Qh;1zz6ig2}BUik;TEW@Q7gxWlc_ z)%JsHJrWoV{cpM*mrmmGJ9Lk!dv=VL+*XH*U9=<0*y$~383Ou6sd{y%OejQ|8+A6t zVX5Tx4&bE){Wx26jtY(b%HC=T(g$r*9oE*((1VC%-Y?x%3&h6tZ3?l^%gBK+GnZVnj{BXC{b6f0p ztYQMMgu#1D=IV?9531JnBCu#9#Kl|&e7JgeZ+Kbk0-xS?5`krz2qdE?E%=}cj#EGd z&KINWvYOpA3a%ApgPZL+tnhY(TP-1Z5iZtv=MI?3Ng{(ARth|n6{A_2IsYXti`>7UqNm#ia~l3 z&alSI7&&JMeMN5_0GjB|L|W@5F(xuvePX1-PPu?X1s4?X$WXN5g3$y2#Q9!4x>bCW8pI9u0_>7-)Fop z&BTWiK4Fupe_8*44<@8F4rH>mj`!M|eJztO$u-^kc!l4v`E|>AIL^HK{y0HJ-puin zys2wV2@5?QEwe_uuA@a&bP-(1ABuecvf_OouOjlmf$qI?oon#)Q;|idnu{oBcl5&~ zZ5ATyv9ZXMzq;{N-($gBz(JjEqv3J_ckpWG_4jpfg{&z*7>F(dC5B4^)i&-`L5g<* zq+0sEIoq5E*kpO^9q&Nz!bp4H$wMGC-B#y#NM-qnvrv@ zeLFZfOCD-6Gth@Qcl@;~P0F*7PK`gN$N?$8U-&G(?vZSQRdV0*CADa$nm!9}yOzE_ zP|IExrU&DztKJld$E`HxWfxuwIb5%ciCqWlnR)hz0hKB@@niiur*bENfZtx<%)cPe zD5AX*EC)aEOiu!WT+N+rb}Zq;?PpQ!_a8TU+3RyO0QD9FroNV%^l!&1o~eb=3Ba&U zXV2Z)9>l}SUI0+tXp=KJV|%?R`P|=mahK+;dxJVoMl}xBcuWXN;du;-O|jrGgM7G= zRI(yt5)2M^Jd*HeG4`kku_q z)c$QCm#MXB77Kl*4klI1(FQo)bovj`jcl7sX~Ag@OsY04%Qel6UnT7NKe9=#nqZ1uQN)6}V5B_Qs z2PlK3yuJiRG!6f(qhfe(7PZHUIzwYL4FwQw(eLb#6BqTHHrwewp@uYZKxQ{89;dQfL0`qSevXO1o0) zJE?`YvdFXA76D$`zL+zc1R%Dx+6fm=NEnL;P{o?YZowpbk0h)#5orc@PB6=jUON_eKJhs;*%Xr#F+CRxzf z=A?Xr2&ni5$j$+Bc$ZC*Oe_N-45)?o#*@;=qeOGLmDv$vS8uJhfkC&R zIPVMx1u%0-cx^ZVOy>ydw~Xdk)aPdksbb5Jp1I(^ml$Z(krLRLmSO*0A*c1tO5oz@#acF zREyqPBlyyE<~#ZrPku$==o3O(_oSTi(6sQd#bh@lx0L|94Me5Xw^kbUXwA%y)lais z%L%N+iAf7_WMz)$fSS`GQ|1)LVzf$2N*D>A46i4ufV0i9Dp0CqJIW7UX}XUZ_R`9` z9IChqX3Ex@@>J{li;C@-^M2cj-g}Gn-g>AB2@H@w9`{}3f6m`f;-U^E+%dDgnc>MdM}tN4A3!DSVAcle)p5 z-rCEu2eo{n>*H$YC*rjW4uqWf#!@=oxmVRXGiIRr8!<=KW8>+%y=Af@2yjT{*9{F5 z*NACjN17o%8!e3PRj1rog1$oY=sFhHuNd=bPpiZYW#27?Wxiw95}YhIT+=vQLew&c zW_e;=fs!Tu`-R-4=Y)bG@437H~Hh{4+IwGj)R4$$8c-Gm*-D;^5Z8z#;u1Jg}|MQ6tHXWf`n@ zi9slDv%b9!ywD23$u`5l&yKFF{LQ7=UO?%$X}_kD+Qp&+@3Xsm-5uj<0-s;@x|hC2 z)aJB0WRiz(k?*Xc$b$zEfR8!}XP3X-Bu=McxxIoK_gJ)bM|B`EEN(#nA+0%_8lg$1 zPSsPBu8i2lqJ5W4Bh;A{M{F1SRT#bTqLB*F?ZfV_gDtGh5N+Lo|bmM_b~wa>p@j!SoNy`4_>C0U4*JF zBMHTQPrP(MMme*|WwvbBpY^@z_ z<_Bn3$+1o|j6}fPLp`Hgr3mMhBfcV}!y*`l^^GzDO6%8Nt1!4DBa-@e(^R@k+8Y$q z2$Q_~NY4_?^;KB-PeMqY>K@tft_*nM3?vyRdYP5Qll=-mzIb_eXRGny-SxzyVfD_`>LM<|t`F!H3l zp99a66*O0y$8)CW7pfMq*`mDf02ml=yfgHD(6*kma6ngBY4wI5>KrlP=q@NMTu?Ez z)gp^4Ao?%LO74)OdM9iEHjByUMQ-F;KgEO5_Js*ki@=WFYQY7oPQ{sT`y(gnyU$0$vrDEt+^;*12?%Ou`Y@jd zM<6SFZxk+saGr)Zxt@hjmhE-St&i*$7vOeJy?Qv7kqmK1?rJ@PWb21)1X-q(D4XXR zT-Kir4&;qF-Zr1mco8qo^S;WlPo1vuFhh&v{IJs@GSHZZxjE#xwmSA8xb--gFm)qv z4gqm_g5bOBjB&WTj*pyhg7AJWQ=-zE)}3WlsO|v5x1TFB0bSAhOPkC2`^gP}BE=Xb~dCT-Ign$8i+2$4L4gk^g*;$}= z6b)2eNwK#>Mu?f}j*OygHhHa^0&)Tn#*LV)QK-@(d56Ugw0^l^u-U8rl zIC*(^YoF}4#^&Y~Gjq2#Dxgk6R-bBEKTf|Kp_agTru#1HG0QBFs=TY^gS96DxuF5C zb=5lHe^C1GC2+1U+Na1N{F_b+*L2;n)@L zdrJn8Jjf|&Jsl>U;abS>-g+H|L3pl?HIr#qnZljfZ*nlTcOp)>b(uPJ|Jk72-H5x) zeE$!7?;X};mWP1~2r7u6fCvcK00HSu>4<=I6(Mv)dY4{95dlS|DIgt@A}yi!B3S6X z6MBabdP@S~p3Lmb?#|Bc%Cd4JN(PH^jOG6mkZh%!sU+KpHJ9-x?Ta38NN`FhI zBzASf6iTvIv#mzE!o>ST+QVxH4EK97lxg<`&vt~nPCm4(@Y-Fm_Xg&@Twpn^WL#X z_}&Sspn(nT37(LZfd(z^8r(5&zeKw;DE(&1JA)AOj!oTd`v~V^uvC}Vl^gHtyyRX$ zvXRYGhgLcu9pQ{%f}`jAr2rMF%rs0#9d-avDPvb;^Rr|$M1P>S7pt*08wY~7pR~Sb zw;!3pSr6pQ!`y?WgeaBO??1HZNei+{jxUoiZafLmF`N{rvdDf^7ay}J<5N3StFP6t z{dpNb5thJDgxx!r`2L`Vr90yzY4WaiYg(7~;*b+fD|H#N3vL=tp=Tu~TlmHud-zV% z+kDO%dyw$f&AYnQt@nJ!bzwwZ*HgVJudG@}b*^%9%_4_BlaJYiM+k~^&D0@M(kgdk zeSb{;h*g##yIr|~?nu-KY;qZo#rVEIG)m!IR`I1Gr<0jF$IEy0q1o!2PuIV0>n>K0 z=&dHlYE|IQl558CTqK>qx@mq+tIDZyx6x}+lh{geg}}zGAiIXEjW{Cv@UsQsDP=v$ zKHjCJ)`o+kDJd6k?IT|ALTF}Bgw5apZd@9Lw#L=Gk6CbsRINdzRPr#IP_yBI zkI-BrNRrOPX!0XTr-1s&Wgnj{lswk1LD;>HG^DIgQ1EstG?!Fsah>#7SCI3E>Kg)=_#|eGUH*@YDTQ9}aNx6Qq8;;WIFH~u{iwCtt)Fzuo#O8lFxkRG^ z`Lfi)mQn&bof3`GG-8?=_yqd3j@)fSe@QXKDrgu3&&!p@+>omO?agfm>Rx(b+Ye+J zB!Nl3jzQ0*2bfKgZbaN?vfo-#jLkz{^uTNpt{!($=2$lV&2Vs5=0Zyf?eMQZvJexJr}Q@S}rf z$j}jwVW0F>iC6M%ob51~S1SrGeLh@S$a@1_N5BhQcTIVGUur@+ddV(39HWlaxGxEZ z*S3f0H0794`#>fSZJ=3i=!II7ILVbBn|V#vfSmK@Y4|$X2pdB1QUfG)=nLC;7bH)6 zy2h1>N=VJE{uV$qdg5_T4z(i;e_+m@ECuFY)yTe+QAkB)|NEh&R%Pr z4*=rVZn-yOt=tM!BwzKkf{yVR)v5Yq+eij3QR($l9CJVFhFJmMJ_QuLhs73M{|WH< zx46AbObUW9R74;->b}3^EEg~BFNN~381dG1X&x(yCu}JpCRnTVe zRsx_kvEN?T1CxRLho9u#0D>N)cn2qPDYYJS0t9?k9*3140;hZ@?jC^Y#fSnEcvec9zdKc>z{PLZ(IO{14&c zQQE-vT3vpN%U$2VC{Bg!2vq_}qO)e0Xt&EwTUd)c(x_2Rv$# z({rBwz7Ep!mwu@S10Dl5dkTj|s4sqL|15+N{LH<3;A_2Sbml+&!kr5^3HXO>r@ue_ z=_Yk9g6o=zYT>mretE~g`M+jjaH&fFR@VRB)c><9b&*TgR?Hn$L@$)1_jYA~O8@ou zpKs{CKCtN-ty`~tvB2No#rRhvuwZjKPkxa2^)19&Pf(+-W%frHLWGnkn1|yD{ipl0 zYuA&Vc`u*&L#y?dZpC*oMl=+fel?I)vek)Ngs|PHErYOq2>@uC`32e%{A6DNx4{j} ze&PLf!~feICw)a7SnMLZh}f^2_YcGJ*MzW$AY|K?-A{<-xSENik; zvA`egkY7LXPxlvah;LWZm%hS`7hNC{t^N~U z?)RQevo|mxPlryX|JHzHfB{LS7Ayak22_J*@a-?Gp8t0y@}Hge|9<}ep8o%g|9^Y` zzwti*bAA7B@ciBbOrTR~2d~)*#fwA?0@kPd>TFYZ`Tm4=7kA~{1)GT)&sOib@0;R) z`CbMX#>nw%_p%z$)HDO_jsnoa(*ek|cN2p^b_XwUPz3RkU=-u7NAGZs^hT?i~#RHy}xOAh(W?LcL!kgI4^IY zw*yjDJ9|C6s1HyA%F^2nIK{;4NpbmhH>_eN)Mfi!Qfjv^b`OObcb^oh)xt{`jX(QJ zZZ8!?N)tM4s>MZBiZ8u|z(^nTSZx{jp5FUb=lLf@tA4Xk5m1nLjEI<$(VYfD17dq% z^r_R8Eb*0Q$yw}KvW&ZvnU+0M7tEk~6$liZg+L({2oX)`dN7$?^*hFS%nwDn~2d8VrIeg=~j;pC&Ddfc8jPrfS0I zkGBjcpbO$c;Gmgy3|MyhEZ)QufN6x%N)WIJ1E$06MK!%o|KMZhmAaffAaB&>2K0NX z*e>*EYcwVMY$?)y_FIUvXhbf32Qaa*!X%eo#XZp9({`zlcshM@!giu&L=7mI^qmNj zJ$L6DV;@lIFn9(9bM8R^5A~i7qPQ%G2@_kpjaTu( z!@-vccferzr#=r}t>yWQGy>g%}^bQGXH(dc6Y}6FNm7%&&4z;L$W|9(bKD`t+$Ri|_oLZj;dc zBrz>=x9$_f^nYBd{btKbcj9%`?kJa0%TD#FOSv|nUUOG9YoZ6Q-J!V+F@ zQ$fRQ5A4Qau$3I5IW$T4?=>J5n4$f6QU+w+XQ}(+-gjCfZJ_aEaKgaYzV@7OP&NSB z*;McJDjTGFZ%j47bTN3qe9rq^Lhk}O=5US-KOmIl_U*jFFo44`==VQGPzw8wc$44@_~xSkW@`ncev--H zHfbEIZE_8cLV)w6i2=L6a%C^xt%=bcC>=?di_((f7VqH^9umO4;<1WV97hdyE86ww z1qL#$fUZz8EtJb!E+Ds}1-h`wmnl~n;D5bVDB=bkTclfFoWDPXBpzXp_w42JE>q$G z9qD@aWLEH_DdO@#0R}g+2x1DRR}QlqtLV|R zkV57a>o@DmxM_w%3Jf(71Aw9kr-||fT1%*sJ~d~8XF-iLQCgP_jm;*(OY1ZXbwDsI z4+LA`r{7Lj1M;Pc<{+DT^6K3!T~<+PxK)lMoU$S(Czrp@p)Rc^r_3G9;MBv=?qC|B zRq13#>klXpzU^j*-#iGsIvghJYIDbM>wd?Gl4Bq*SoHuyL?Ql!hU4+GkG|yMFNZ)V zBBceiVkr4d51{ZfmX>pGvck~>ME>&Jc4h~tR4t%B37fT2<00*2U$M(TQLhN}%$p~? zkX&L@{zR+&8tSe@XG^4#OmteG0w9@IvAJr!F6@X=%C`m?2;(F7zX|Nw0|b8a0kxs! z{PKwv5^nK>sle%@zsu46WhelUS$1DRc7F^s8UrBn#k(7%b2bU2F}dgyNBD#VPiqAcK6vQiXPxNs@zJ^; z3Zw|f7D3&dK|PvigJ`sXb&hnEa# z<^ZhN5(sKF0PBzEsOw4+8&c)g$b5r@02Q9`Col1g+aM&Kq-3!Vz9C^R+j_6TnQF{j z%ZLXbR3!Ga^?;Zo1Fs{9ZkdS3{J?_oa$|V_IXnn(jUtv;4cI>XCJp!#e@6Tg-tG}Q zU6*DaD{ou}SSS9Vf?W(iskt?50QH*c?>_!@Ky$Io_$vu*%{iDK4!u~tQBJr*ngdq1 zmZM(*a-0mZzTBI5M*!%7(1n532Uf6=meb7hDe#vMJ%?`bfr-JU*)&+o>a+PLt7ocC;n z@y;Rv;4w=rdm3G!Ve~>AY0Q6DmHelc-GAz{G*^MHejcLGoRF}RCKZfz_EBIO?PhF2 zoDhhYwkrcVogyu#mXJemhnyv*@hbOqdnjh75O;*t1A1=bw#=!105X>kAUfxBZ9&Qn zq*a&iB0vu5vJKQOgk)u$cQvlAgE;#l5Z!Zi=OyFG2}=XJ*;Yvl#Dur9FDy z)PS>q_@Q2f{-h9I2AE@C07Lvz0>7D36}pi1W;-l&AFp-^NFJ5l$1Jh>Pm458`+X0k z83f~wJ}QDVPGr-lp?M!bQLH}4(wC$jon;j52kMj5g}Pd|imew@GE=ROL2VMgbjsmR zFY2#e2xNCFh>W&s0+?+MjMIg29M5y~M+7qXiQal&vkRWmY#sR3$s8d8}0JO^Chp#NVkGsZ70F_Ga&vhnE=Y~Wyzjfth z#~%VUq}T^ETR@@(kN44R4W}J-=uQn_(B^erc?|Sjb=tXISBHB@$mvb->LhP3@m(9| z&q{KO&%FpIJrHGX$ZR0bu|LMkwBZp%9P_6Zo4lbNLPNtq>tlv+yKbg7XACK>4wS9( z0pR@GScI)S7oE?!jmLIqyM2qVig=1}ykMu4r{5K26pu@yi9cCr>rRV0v=SMit#Uu;T9=v2y zjW&=^O{ygk``qG;-kM(<3!+up3c|p*#Z8+jHA)ZoOny|bo&7ed@{=*^OEQMM%HcP-+%|F-#q|jsZ-NR z`KJfRKrGb;B;rm@@SFosa0&<2(S+jj20a&0Lvc6P16f@le4I{Jh_!QBpBM)NQIogy zLQXP1QU-qRs8KV#R32GHQ;r(5{$RZpr4jNy2zPk3%&^I>cL==$^qV! zJ10M!E#<1>~NP`})(*H19*^x9&JE4C7(HpzbaL zApglu5U4jI(I=oFLg)uMYx+Z)4K+t;HncL_c4e@TwwuyYKzv3FG!Vv|NP{`4lU4de z@l*4T1d%SlSE6zs0I8Se4Bn#c0H~Uq8eRu*6+J=Y10v)X9y|enL3|B9q6smWH$0WQ zg>OcPx~*D*;96eXDcT0mD3(UP=nPe`(*YJ~mZRJT(Xa=YRKoxuy%y#NyG#X4()#`NqJtkl04}6(E@r3Xl!k}S=jeme zd`QhKGP1oeN$wNNgloD+0c#C`r>YBp+bBhWG~Ow}pwv2=te)BN+j-a{47teI6F@z2 zZ9oGVl**|CKt?&=L(H7Y3b8e8fnQL!=C}bokY+@)!EjMI@N~c6!4$y73K91RHD9S>e~$yYlEcG=CitZNb#l_$ z(NfPR)N?ZKXV8H@f;0raF5JZexzx(6(>G0F&T}c3Q|C4B7#;OFT6fFJccd)7iRlr_ z+(zp0$M`ZHss2zK*mWhhF`}nTlOg+aF6F>)Z;enbbSr%JAe?)7ZEY=v#EO8OhJ~7d zSkIP#hU}I!Jq_OpnY-x>%l1~_zm4O=WNcSa`Pc-C$$QMn_ebpN+Ujb%zOF7uk@fHyQLxJsxWu8BIL6QndMd@6@HXV*1;u4>JqGx5CZ&c*gR$Jt;ZSBgxr zwt=vdx^XygKaYw`TOKf;NwiACXvxky&U`D3-WiY0KxQ-?gH*n?HJWogB{T|Kr(MM} z3=yl1zYC(k91z@z+?;Ky?s|^>6Ih+!yu}@GxpCYCo}R;mv^<6-G(ZA=og3(qz5|u$ z{p?2>Yab4#8UmxhUt&MJ8G5qRnL&hcuaST@9TX7mxU0!H z8U^-%GPs1D0%3~bD&IrkVa-5KRruRF*UtK8)bX5bR&fK64B-OhD-LSY4LVpoxlBvm z^6qsbqesB*PQ87(kvxu8>SpCo8Mrj57GJ{xhi zUdnQ}Yt3;2Ss?$H@6A$Qzw^o{JWD0+S_lPm47Wkm!EV)Y?>8VBSIj6h;W118ia}%z zTDbx`-4Q@Rz4cSLBuBPJRvph!fc&30hT>NQ#7{QVa0@hHK9rxc`!B%;r$2!5!2cmX z{zQm>NX!0HMe&dC3wQ$jF>||KNZB8Qrha4dKmU1B1HD6IEcSyd(yn%3Q1^#9KSpE@g^B-^eA0Dyv7XWoQ z^(Kn$zcrs?V7^Z#L`#4^=I`F%KN{=ryyeOhuzp1HEpPst4+C>~t^>@M*MV{LCjinv z{IFj=j|7_UC^I)}IQ1*9^&My#1H27l1!(DJ1Kwv-@?BaoX>A*u4M_@cNCXgn#)`1Qg&m zGF=fr`?n0@{|7hKa4GfJx}D<{qgafKUx~^Kl*M0Pwg7J)u<{cL$B>x&{sHq*QT9!$HAuEL)bGtyx%Q5BHG&bV zro6DDm;o8eadRt*Q9t;uKi zpMDQxnSX8W)G=FGW|r>q@hq%q>>o?SKetU@N&@@qv0(0tRq>b1beOg@tOtje%l^oZ z=JC~LCw9(=Wb{jk0d1y{LG!K;`sU;|oPkZSNS>y=s2i_l6oa0|%@5}LnJotgI-xMC zo^WJfZ<+7ap;0tHYMxVMt|$4ODPrQ?g|@>iV;_s9k?b^s;O!53fBm^E;WIqw9v`#? zG&(UC!xnb8S)`do)NHLLOFoD{W{EZJh*0S?sIcbzCbEQieWt4^*6}FeRq%@IaNYj< z65m~so}u>3qguV`_itdslWC2z20Y)}ODN5Jf6LY8XA7sG`|G*Ob9~kvVHa_!|1#kY zpQQ=OY|g=jRKId%8qYlA$Oj$Fy9g4rX+Qt+U}lNllj1-%%TS3o%x2+AOx4Az!)r zQEP2z1Tmivr)}2gP;Sm;uu)#=3B4R-2jA_A2vbTJVpWXUQ8Zimq7`-9SiD-xf=53R zW6;!k$2oV_*+P{ar;;m>jX+zB`0XU#LgUUTzTB=$p{DKL8|nZh?Owx$&1TCM^h<6yfrRm+CCRvicAy?5_Z0U*r0+^;i450a$f# z{Nqu1$6D+~s?>U;vVQ2O8sX0Va`2~-e|?<4mx%rS-~K;yS4nrA<#Dx+cd8N^{&m5= zXk2mg5m<=%7D}Ult96x~n@mX7*tqmBA#zPQK>Grxnh$!&XNUql;WzHHUhirIJM&il ziax=1wN~D+V@H=RSJayP$Ym#X@%#KOP&!?6^?LMqnS+lqZv^vCN1%1+zgWQI)Eu25>6 zoH1{)fXx5+n3Vahxt`CgRGZ>&SARy;y)8lDB|^DI^0#i#Lv6w)HHY`bB27WN)U*E* z->%RT_x|vdvDY8w*^HWh|2p?ab19L{r$V7y|Jnc~WT9l!kykXtYF2vgMSN3?)ZxaB zL2wd^{*n*s+Sxo#UJ&w@5q#`!{G~YaAy@&6w(h^3E=$Np$>uECGM4Q=W!{`fc~F0X zz{@S@CfGR488z*ffAa_e?t@NLxF93XZ-LuaKI2elk+&2(W7? zddaDX;^q7$No>D8F(m$}7x~*>$`?P-WH|EkL>908W)JY;yALW8Re1^3;NINH*M4pv zQVM`?Y-&f*pS1y3zEpPitUVEkv^9=8LjU|c1F+yY%_C?n+EVuv-;Dv`QZ9akE zq*rKx-TBd<9j5dYewilT-w6g+worfhh`@+wN11jlntfKJg{b91D*}p?w@OH}UTC|B zde<^5tn|7`>HbLQNTYtcL%r>L8s$C3qs*Wi?l?jIjXt?34%G{u`H+0xEO&Vv7JffZ zyYu?_uBP&o>51A49@y_zQGz;z%Oll$wYsM1u~aH{N8Vjl=y_~uk^8`2aj=K!B6 zU)-~L<{p0WtX-c~H0CuetaXp8iN&t!MwA4EqbsXcaxW3SXaeV%dv%lQbf)wDdz^GC zP28RC=k4;RJS0Et#Z6JZ!f1Uf*-i>wu3YJr8oD9#yyAk0;p&5z57CT#{m<8C2naCM zU$(b;Me2?D>6yuNigIP~UU^V^rs7?9868oqNG7Q~i^=*@W)9qo*#AXlk74RybPF zE?cj7V^ppXf3f50z?SJt3~|xl#{J^JCcOPOcz@}oHD1_Yk@iFPx>`N(e3^LL_hIl+ zzdVO#8{J^ubKfx>)FRyw?J(D%>uI{Ap8wT$U17aFW<3N}l5~%%JKV|KL1G3eCRs< z$=BGu^2HM?WTzcly5^CC+~js=_ihHq)**5rF=Qc&758H-w?_SRS~MewF={~LPD ze9D&GB=OdC>|0y5kk2|w8(q7Gqc%-ZhH$lV#vD<%TuPSPDK#Pr7rPMG>v`60(Ldu! zf!FUa^tQXKa9mp3I=n-%>|?ZYm~bnuUcr8x|J7ZA^!su5CFzFmo12i?;yTr+Kc3)>+OWAoYzB?}D zTjVx)*DK*$WK-+x;SDkN=V8r%ce~I;F`ShJXL2o%`=9ptv-OjQXS#WYUGF6y8h@nj zN>s{psWVvRK0Y7(|#85!%|!TY4?6Yx#G-+bA&vDH)p!6(L#1t$k*h&lcE`ltz&C; zvDTis_Ggy$EByuH|5i|~#eqJqxBO9x9;gY9$5`vjIaiha4T+{d)#-4<_p`ZB)$i-e z$D&>NhylrdXz{ ze0xfzMpYRDo3gMJkCn<3Y-H7^^aq7!d$Wv)EH2p3?-w}zq6P3tX3Ce8nnYR8$ag^` zZV#d5ndW?_$NyQ{H^=#vXoe{6uSx6Cmc!{X)y@2sSj8F-N7Io#+I1aT+{t3W^D0gxWM!)f3lNpcfQ5akj0qLE{YkI4JP&Zh?oZX9IjpV zT%7gV(r)5+SED30d2W{S>bd5&O-$nY-3nld27D>TLoo3xqsHb3{$@_SE+iCSOzs>O zy8hDFZt65Et28sggB!`L(tZr66waXxxk;-s!XxHdX1629+I#!VEQX1FQ}&#-LIFvS0r`#-n>Iq?>4?!5?u~12ma&y* z(b$n|(3&4?A|msv#R%z6an*o3Fp#1%jh7iUk<382Zn}=@hHv6<5zL~P7s328GC;6` z7Uen!j{Ugaz=bzcX%~F9&W-+1uMg5nKL!OPy*zd5WFe|TFjwf&7R`N5Ds%U!;mp>p zBXO043OU0y{uZtN!Pe2_uU(V*krVKai1Q{0r67(w_hhYH8E7Kl3Z)x~{ksQLeGN|< z@{;(WR>S?7kwsVtJQ4SVLH*y$Zw(pu_*Sg-Q5*0ahsR%Ea};Ql={pI zZRX{z_<-2?{m2;ZQO@gihOE&xb5UwtS`lz5otw%F>yfhxJgt?*u`G@= z2T}FUM6LE8>tsLVaj6z9Kaz{ex_^TiIZuc|4y{)Q3-M2X)z9eU_7w&T4}CHab!{ovQ^m+K|<15_+# zbE)%1J&#F+-zhC`>CB_TS1L36JaMH$rk>vJNl_YX$AuEDeVNMF2CNq&!+vnN#@`T) z^VwLqHDYr2#=2k61O5cFm1naUF3-J|%_vEeN6UT3Zl#A(w5ySkM6t@V3ZeIlg}<+s zs3nNHaPkCVc_LkNDPGwl(td2`Qy`U?`WmkB2f|U|?KMf#AKlC^@jZ*P)pZ{hjmeG} z&{A{F$uSbD-K>@rRx6rUVXp;;jqMw+z!T=njf7h@Bsi2ZPC1WemE6U?U>;Jg&T4Bt zM-9>PuX!_+@0qnMsUU~^$|}cr#UqiyV2OFGL*8|yHGgx zUUrer>ZSua3VMOl?H0Qcd$kQySDo%aorUX*PIb6_p=GCi4=<#(|5e7zY}`?6wGe8{ zfM;tqjOy-yNe9{)0v)9&T{#f>;ed@XaawCPYl&kk6(6hmFdl1?u!o##^X^TVbq|5f zXCSC%=i~Vfo_Yv)=eZcEO2ESTn<_bkoJVq*Dx_#9po?P{l53?F$;WAfp}SeO+TO#7 z7Tot&N)&IX<4Srf^`jM?Zt?fo$jJis9StreFm2| zy@c2wvO0g3bFtnnUb1)@eoDA-HVH-5ylr3_X7G`ou7T+2&G2y`de6tr@>s{&UY#X6 zXZ}6y#Anu5@4l6%5T(uf&_1kA9B^u928YDjj=QLss{>in^*eT*2)cH@q8e2Ho$o%v zbPi>olG~!hDJR{0JZs(UoCiL5blU1KcD$5S#(35BbxE7l8*yImpvm45$V(OzFgu49 z&rY#Q?26<=`DCTi=Ou6Sk<2DScDAb)ySGQ!oo0sR&>jz{5a-!NJKgJ;m`hQ0mlI4H z(P~4xRp{i(HlMX#pjtT@e>9{AwBg^8%!s8KXj30jjT2xdA#2f_%5JTn2DaN`>eX8g z-<~q->T!+|fbv_}{E*O+*kSp&TiM`e`<~oet+lW?_J(TFy(l&fCo5gaa>1JkvWe({ zn=x{uL)Z_PC1FcxJjAlx#;6=Jp@>AWSK=HV9qW8PV5)~OQs0Ds>l=Te_|i_gUO zHm=mni`+`kNT%b3xWvP&AIF+5xRdWscjbquB1Dv<@5ZIy4LIBDFr&-|J5k^>BAX0iI=Ah5BMfbOJU{1J*M`G0 zO-m+QM?80%-y4BORz?lGa!MD7DS(P%5GLihIX8dv*w2CKP zXKHHpo9KrK+sxHDc^>o3c=?pe?64>7wGEe6&&P2Pvy2m+nJ<6q*S2S3!D177V@u{! zMD7Ean1@oGZh>4Gqu6x|9;0{q+3#AevpC%kAw{oFGM4gn`i#cqpb{ujVk*VLn%CYf zc9ba27Z!P@C!r^y6$m$E4Q!%){q^DT;OVJFaMCnj7B^LM)mwhbq)_Myc8H-%n zL2qRgd%Q)ak@Hl#ex0vIsD&C!?MiHv&+=VgxF97ocWfUNt_{vVKXJmJ^Y}0 ziLxugb+co!t|PWq8SY;nMe*r07bT(F$J`#s$FCe9!c&SDi2Jf4ESPWI&~ysIB~NTN z$whrTX_z*8}QZ#=~uG8n)x1mcOjGl16Qh_A6<2jRNd1)u}OoolCNB)I% zpjZ2vO010ho{f;5#ldPqrZDxyK5B&DYgmbH7x@WxqEO&1$pgWA*+~R~Uk-3cfsFJs zEyC6hhvU9Y407$iU7d7i?XM_c5_Hhh$M_C3OU~aCG0XY(s7U`J!_9H?#)tGi?xUGh z)>xtT&$4&7n{Z?3-ewsp!oDxL!F3k5kWXcwU}{KmtK2_a8F5N4G0p@!O3kcPKl)Et z8dG&MVimk!G4Pc691Pw<8y|3>H`?TTXL=%}>UfgN^^)zkUw6tcz&tx?@`hY-j{Vkj zhq9cI#gkd8j_ow3XFjy0Sv|p3obgQ--wZcxPgKcFAZ<}&F%gqxKHe-nlI-N}H&sh; zO`rX2TvhyoPnNaM-1++n@&qe|myhm96*DrjcKC7hi<9i?-6E1SF#Y<1qMXAz#ktXs zHN@VpxqDYVl0On9{t^4h&B>!iqnSbp^L*1}P#F$6E`W1saTJqR+X;Ov@5|y=3{M_V zX$s?j*)drf8GI%wFVzSMUfszd6;WM`HY3}Q@8lFwQ3%KMF0?#ylhEy_WA}@DQVNs3 zx-HpvRWttEp7q(i0OsR^qRzUKHl6fhE}=Qb$&-uD2~V~I>bsiJ#gJ7uF_)>lT8u2L z#%dM2@@3M3MJMz{kq<%{)_Jzm&3Pe?MUS-lNT?eYu#wE_^V!&e(Px}(C2qFUMVRWhlM6*p){V-dSALm@wDEHYB#)9MP`Z(;>{6C;c(F zty)rY&`0;R8D}|C^gvW9DVqdih!h^l8c41$t2S+qY8SFI?UYq7#f`YhGuY&GA$_ML zOT33|$@*TIZ#~aq@%xC75|=l>Q%8RrTi6%2F%et0me2sArR$Dsii@~Ut!r4MDAZ$7 z3~HDwV)N2vYCrrw3WIiiZ*%hlPIg=8N>IDbQ6A%>Ewf*^0OqN~qL|;}vUMVb`;LI( z>(T3WP1F)hHWt9YhL40Pk2q&Uy>b>E=~Cav_$(?{?_ACgoSn^sZ6|kk6=UwyXt$^o z*$>EINA$%`E!|O(f>)<}-6=IjQ5i#Hc6NQTQUdB2YwpU&K^4BvjG}v&)3X^V>jjgH zT@Cp*eX-xSnV? zH@4y~b;P~EB@OSpGdZxfAeL5KBRwha?(e%%^0xQ5yc9Oj8nr;6n0O_5Rn$kzK2FNM zZX_qqC68eXivIG7Y-7D7^c*g4Cop7Jtt8+PC z7TocZW`+lHq!#L2?LBE!k*WVr4u!3^%Un;b!C}2pw-(Bkua%Y zRAGsqO{J}9T^VkUV#IB#=y2>8eP38Eom|XLpGP#@__+`)5D^AL7L-*XKuo zg3QZX-5|n?xTv|HtQ=nC3!-=ct2TCI>@BNh;v=`qQk){~`|^xV>0Z(oB`5Fu>J%$X z973ayBz5?etr;E}m0&;Xi%bp(QLn#PO+w$M)X`v#{KopFrtc(TcR#Qk8T~$sa~!9N zu}2(*Z+dtf*!C?rM~%Ih&7E(#f}}`}IRZa>%H?w0ku6b=@KQu|*X{Dji%%6Lsl+9% z?>4h_>@TaZ-jJ&Imc=3(H!}QM3NjG$6`tII?586}U%YpH@Y+FF^P&ag*P`Aq^4TF& zw8#o|EO(~Bb8NSDzdlvBfF#1pZUaPRGD_s;)gSGG4D_A>_9%ge*fh>)hGxqCw}#$z|(ZDHeZvxtvMD#1DSc^7&@xhFk50 zM=cfoSo7dBbqQ}?jKm7v@a?V9B3i`gHAf6aXZU_Z5EJ)A_zG6&0crqR)zhuxG~q&eGm0s1x@O4-zpoUY7-IH zC*c}9%^{qH(HE(o`@bi(L8IMYCN4?Y|3vNU=VA}9QW}D~p2%icji@X+x-zNk(0Man zFh{gs^3d9pjOs({oVc08S8qciX*&jAhn&MqzN@_!`fxEH&^_T6XeOz$h@VOeHZQjq zwGNzH8*pD#IaW^g-hyNsvW1^T7nUM-)hzE5`RlqG%054i=aHvZ>wSR2cVrz)jeDo2 z?{1+B%wa=lXN6p9>m{w+@Os=BwNBAIv0}`vkK{c|TV8wFb{4clFFJ(2z-TCy7cuQ& zeXhOTrRPLc=PO%gMMjjm>HCLd5OBZGzRUS5WxW|%qk|8k+&Hvr5B)89A-Uto)M;;_ z!4tItmIx*X{?TBUBqa=)IqE%kcsLcg^qpuxfX2D&ksBE;GTGW`Ilsuyt8;2<8&pXd z61JL}qSGinTwaT|-8g8CX6P+zaAy_wX&>m3$=bEpC_Tcp_r(d{QXn7C`D7UVQ{1LmQrqKpa5To{6nh@c$gC@wt5D(%B~hZ4nISGV?ajiO20_3SJ1g} zik;gtNblasS@&(->ns{EbNo&E=<8+6FFyq`iS2lNik>LFE@K+Ju?m%NxXozBBm{ZU zebh`RD3}dlS>AeTCe|wO;4?&N**q+l-g?0Kfljdzate9qqAL-;#HSasP%3|aAifec zK(){{!7d?s$gw@N8$q3YVPsK4j2-RG)Eg(RZ}I|q+{K%eT=KjGo4TkW0S~)8q{RWD z$nMPPQutd)(pgYs z(4<|)23ra{8)!e9FqKy_mR*mE(-g;@6y^jq@)j znLed5e#I!RbuoP4=9OAzGIVFoMf$X3lU>Q>!GaGNJwwY#tfY>ju4c#L%!;UGa=T%< z5s6DS_io!-5;xnnH55;Z$yaHX3eQfI`qcFmc86Ki^`Ui* zqzr$;InvP{Y53*Lr1Qcy1IOXak$L7dY8v^A@I4_#)!R?zY3*(#qi?O)zP4DQjfUvG z(ATj+;`*Y*Oq>*-l)V=CoKJ7Fdps?1*SENPag1oEvxKGU_~z!!w@6K+GDmsGUxiM8 zMl4Z2AuuA`>AWU%E0X?>a99VS*1Q3JVmj zBeG>^KQ`>2Vc1i&*LW|p&&f!-pLAujqs(xqI1ajs>x#Kj)GSj9T%s*FWRu13&WmP8KiMbzb|7uhI2Kur!YwTAlyLuY+zs9x@^3@}Tk)GH?Q zVrQ}D^)4jjJ;!zP{w^YrYuyb3((t5PqH!BFZixqbh1QQIT(+M44Di3g0j9?a8Ebyt z6DOCB5eTW)AxXgfCAlkjDQ#d_=nXYZU@)7|J@jlpTe=H6;#r|q+u%xnG7X{7ms)46 z<48HS1V5-$OQ|sp45cUR=S>x`w7eOV-COnyy3d=tfON@Mm9xyJueI#kF7v9`*7b9k z^fSrKGlJe{ZC|s@rH#dpD20kD`>nLE^ydjm8)bLXmMQIte9{Arb5FYh&i;GHeyOM- z4(zt-hau>Ja|6?I|B3~66tvbWrOb%J5(XK|qF01YgmXqqPvEh>;iqQ};WGDSc-^&d*hPD>r((rxG_Mg=#D<@h&>zYDwM zO!qaPqwhWoKZ=ZlmkWbKvaS2uY;sqz3{1|Ac54HNrBM1%o|s(|#pG+CyKwPBI?i`V z3B8mw(p#tONwrYT_+Uey*}<+>&nTZi&sZ_v2|lnk9wI4W(X2iRa}286sgyoR3e~aj zL(F$%wV9ZhHLMz>B@vB@ty?~LBvvMlRnlC>={AR7ohVCymAkQO*JDzl%*bel!*RAo zGDjrebkXvOPXw;xB8f7b2i9_k=HRBmmz9S*7-YSmh3AFhB2ZbaPn2|nJ%{bmmtB%y{wOg+>I6&CH+v8k46%&@3b;| z`F3AfNg>;71rz9M^@|Hoqb^y@3_Z)K=1AE^x!ju;&F=;K55craF;jKyn$cMY?%jlm zb1y}y(8ZUH$ZJqdRVrQk8GT_d_f(f1W;xfuS;~fi`F*-#))U$c3H<) zQ8#oIuG3}o;X|>ETO)_hbBR(VvuLK6Z(*WQHJ>44J=2Cs>n=LDlD32{b7$9&3M*k3 zP`tvci`_EvC>O7tW>1~!eIf7Yiw2qH(8lfJ^m-tW0cK@rpGB9^d~Kl+AM9}$DJ_qF zbdGs_Z)Yu;$X-FYHM7B@n1LHOOWi2EgKyd=i)k&8hrlaI^1F2DYzdV$cP23G;nM5QH?giIGE@UL&6?&v<0-dM|8Jh2h6> zEkr}CCE(oxMWcgC0nHHYhc6X4NA*+SOfX{CeU4R89FiT0I<8fDaO_AmJoM3YKV>cl zO>{8RrK@C3wYo2SAfWEBecGl5-sUjo9ZG(D+*@4tzJOXVc#%UiWlQZ^R3=`i|S*_zjKeV(?5Fksh>a%?0uCb+Fr zQ^ZOzIK#dG8P2^F{$m@qki_M8BdtfBqLiubH!9@*37)@}FU?E5^p*6DU-{Dz(CO4& zesXsOK6rMSdi``Qwuv4+aPsY`gw_di6*C7meVk68Q*TZ`>Y9XTEy=(T({1*8DGf-d zIVt-Zl~y+*uAHa*yHxVcSlDC)ai*6~=)E)gJa^nSi~0||bU%NoB5Cf6^G6rO##dza zi5ay|-ZJ7Iwv}z6R~u}IJpt9$!i>gb-mMCr3@OXLMnQ9SdG&ZDorb^japv)7M;*@+ z75k+2a}%TK14oFrhF3~ero7*u@v)IRq_BX+G7XTnwF$izuF!fmp7%2w)!ULW(JV_jgJ-K(LBXNm*qSHTZOg%iH#}Ga&O`feY(XoUvYpxMS~z^y2Eze>UE+es=4adW5~zr;**FX+rFb zG1Ol|wvUp3s`zSHQOTWprkQwy9;(zVsC*d@5D;#wIHr9Q*MR#N+eRohNA0_x2e0nF_>jYb?teQ{$c(p06D zp~X^+JP|l%iwXXSz(FnbCLU_o0fHH)aaZR%xNE;~Tg$DtDXJ5xu|)@#wv=I^jr^fC z3BUMR8wa%BP@i>(%;GH#f2<_5%Lz!U6&gI}xnWM;gWs$}LQ-2U6%Y*Vzi4(EyC-G70x)x}=6h1K+s_kPM_ z_WlTnPJQle5>}T)N~8#A?0kkBBCfXRzH@wt|sqV z7f{1^C8w39S3(qbW$cU6IH(!bNWqWnHcU8fLZ-wa2tY-9cG8lqD>i~Z^mqtdtNVcM zEo0zAfr-r8_G(RxUef*@d=9Z>&B+oWT0w`o_{01h|Lhz}DSpJ%MPsXVzHo5sxMxnu zbKUDH|hsLvWTa)Chd%SzPt~aUk6a2o!XP4>(U28oln=C0rKbzB4ryR6i)T z{wFhLUBF1dx6YBwJrKo$o?|1+1Glyo5Tq;setqli(ONE4zIH4DE_fogB`&0!IP4ro zE#Y$Dnt5=>7gG<&?j#mA6CT(kP_oJsW!7gVohWfj_}9yMwF*d!h#9oGp^G=Y(K{fw3wO+eADX&~(&6Q7u zG6fUA@GVrCWS#Qr+Aed1rPzanxe1VIb{+E)aCA&OBJ&kG_1!ru8L|K1ogUDQgk;&6 z*ZpRH2>W0wgng)WqAA_wyQV@^W;17kF}1h6g}asKUOjwh48u<{e`a;j$~i*^i8f|e z+IlnP35G|7do2&G~_QqsMB~7r(*` zEl>OOm+WcE@Qtf1Yk8bfq1MxFc_Z@m^)dCG~M0gJY zL5vXIgaAeuz4u)VIL6Y`kjK`+>nk}hBQe21WS`J)1pm&aI$LgtLxXo_j z@r6fwe-ZMldR!K1o%>WVhS8`erX9eIVF*fK5B6LrYh>R%WHZMe9%~k7gs2!XNt7a~ z*yHyHq`S9Kh!p&l@=5&7MdHNbdW(zZ>merw=cfQ~vRF%i2jjICk+PC%@|J^&H_h3rxIqOdw<-GYu@Y-kZ8S)(*s?pAPZ|q2D zKn@->zVjm+U6{pB^OXkW)Cf>%hhti$!v(tWhIL;!6rX0sKdpBNGZxPI&=oIUm)QH4 z3bp|R^$?%l$yDo?GpnSy&KI0WtZ&-wGE4y)Cpk}U1Rd*#7` zb+!$ry$eU?s25*Z9YqcBTAfY|zlvEYDJF=7JsdoWH6Psb2eekw+TlJ-Db>tLg%Ki` zOnUWmf_0zD;Tuhb4v%NGx*{M&>NY1@zR&h=z`yk+^tPf7maAkhS7zJ#EikFlPS-o+ z&2w6XWc@^Ovx<8@bRV%H*(?OG@t4s1HBxmtEmKQjmS*Ownf__vhk7NdaP0R?MqPP1 z+COT&8odLIi_x~OH-D1RE+0wE;I9e}GL8O8Y)GhkYbYfWTwqBj zEtjgA$(1D%sodS}G5+==^VAk}1x%`5F^?_zniMlF$We`@wPO_7Za-6ppFh*^L<9?+ zfl!MD$fMrNOu-r|rk2_T1B3m^vy(kddsoL&n^N+#`Fkw1E7Vu`GFih(uBY`zNFU=} z5xso1Uz{0OUIc}1-ljg^`)-!(>jk}35*GZXS-mXom0s^cvW5*pFNZ%mYZM<#DngKP zYAkop?6c06Q>fG27elPKdp|>>J@!JP&djL#lcPX%Nm<9~?jv7Qc3$)VILp%w zu@V;WdFz(-OxVMina5)WC+x&yyiuOXb948}kh$N2SM}LhOZJjF|3uNgw_8Syc1i6E zo71806T9Yg$!u?ilPylKE7UWel*~QnefU*~?9=zMcbRc-&DaNg^@-uB zw%He>D4znLd6=uQx3?+@#Xio+WWtCoc-|idZBuF@+?ynXGjl<1#k4by zIv^2DEq|W0Mxq^EQtssD-pPep(k9|?B+1NKMQEWg;&RazC|iUxH1Y5Zp8*iMfSP5> zNf-_}`_9B49%T4t?>LBq_m`F`xN5axN8D-1@Omcxnm142& z{Z`N-rB&UoUILVnO&CrOS+_eg>Pgm(2~uDaGq!sR^O5XvUU1>N%r-+yzpz3{8ss9E z)D>{?Wwd!*Y6+zL- zmA2PQHj^93Wv|P0%S}AzMa?&@=T!Il*86D}Qt*#OgxW4;*4)DhYws&2w5Kr1H|2fp z(^}>EWl>|!pCk^*w^*J3owkt4l&rY70lu~mD&;Mon2xX_EPS>PIJ?d^y>SA14OnPh z74p-=eEen|t*O2(fEYeLO|P%-zNebiaP(M{DX{SfO7`q5UBEA0Xf)4S05oIp7JW$$ zlI7UE$1dqVc7>zSS+jl6sLY!?E6r;=NiJgfbgx+mzI#^zOB&55bPt8FcZ zsfzGmow<-tn-X8N0JJ|GQXVtEN@S>KO}(WXt&#FG2N7#)9ix$#dMr0p>;mQvwfy>BgkA&xwpwA}qUycA3G z?M+1cDCD3c=hX!1v;;YVgL1f1Z3)^Ulm%Xf|f=;h#s6N(C__5R6`i}l1pMm_fKf>T(YZhq+gDYa{U z*iNa$~}yt9q)c`Xw<@9|C7I>clBdf&3FruI`m3@t;|o2*Ia2! zJOz9Po4)i$egJ=`UN{k2=+}BQ8TBSYPy$lU{`(Goziu!0{&6V_RR6}VJbd(T0r;F1 z$ril+Vt2t^ZZcKrRAhr(rzm;Iq;_kaZ@ta;=&ZYcV;it*!Cy@Qy%pV>+)JCay#ot*vbdh`_g;EQC1mh%|4A2 z%l2V*nZG`?xDmSCJjjLm_N{EPunqdvnD^gzb35DeS%);YT-*VUCA-iYT|hy}vd*p? z4!}YSP*J}(MQMb|imK;Y5^zFwwB~S}1K`v5TKn^TSu7kD*1n1vRe^MFzT$-=-d3w4pH|Q5!1O1}Yx6v4 z^P14)WYp!RQY-qUaI^V#UY{;Y&l)ekO_WFHYt$kCPT4C=&V1Le*XZ%HRE1ZX8075Y zwkQlHUUGQN2?j58YawXti#A$;tnF3fZpELpsGoOptcT9l{_%v}4Xez?wj=E*C* z-UPbFFDVf5^6Lh}jOWX);$o_+*9AA=#@79}nuGqQk33-(l%7S#J>;9|0_(nLQ0DF9 zO0&)??Oo`U)4ziyE%Dor?}&!|t3A?uc&!2n3T+cm2f+tL)q>O_$^z8lT=!M)ejwoA zCe|4SwOlky6fCse_d75C`NBWV;6~p$Z2wfy{%kY-G8QkURktf28GrAU6m;9$y z=4n1Jy#s4lW2F;!#4U@4!Ldv{0*POG9Ud!=zK7MmM@DswE_KTueW=_y(vAq{eK(@B zDd?WtqFw&krA+IG-WM3gWk~L6(RIZeO3xNMY7jT4E~FoyN0Md|9ircmNd%O{HY1$2 zN2-rHuT**aU8FpkH2gf9jTLYWZy%$*JM#roo7MM<-rXc_DhyA%uzsL%Iz}^TV4)=` z?i9iH01vkI>8Z^i`kwc-BhK%pLm8?=y2gN#Y~)U-{y(#+wouT0rUU|zqAwK z9%@PauiAi*e>(bDzgb%c>Tm#BB$D(aKq>tZ?iQ#>9*huvTb);@#&^EZRarfsYErF3 zybl;I%-K_UHi8nF-=kvrW|ELy*R(gymt-Gwa>xe~swOV*Is$mSByJugp+9%J8TM*) zRP9Gy0=P(EJ#7lA#d-VA4iIs-2QNYo;YgbXDGv3hw2@a)G_ZHdA75PZ~6(Gm-j|kJfwRcw~ zJH75=*rC!%bD3$z^SGS#Gs=*VerpeR#3YK&uUYBUd1S)cKN@dJh8*QPT?IMo+9eB` zCB`g<=4YQV0%KTKv zz%{k`aAK<5!1eVE(DuHDcMD)0m47oHy4Gx5mc05#mvag5kWi!j*?-Zk>x@J{&x!%p z6q_U~Q`|GH<@{KtH%_9If0VX`@Op<#pZq)kPkQ_EH7L7*$Y?e)?t;&~&kWj^%%L$( zhDwXF6>?kowEOb-!*&d2_~bK@drJ`K$+wI)o!y6HRY+Fn5(QQ-yTOcYw`7I;pN7(# zRPo6NX)9IV&Ssj0b93Y@*Q+f*Xlm8w>sA!w*~sdCA(k;exHyQ+M0JgW1(lJX-WzF8 z)rxtwlt7RSfF_iyb#fVp1b5p$O+?UVm=83w8rej% z;SKqPum@^_8dv9auVYG|M$bf<54}V>V1Vix(c^F?{ld4P@0jDuxH8m8Uggu>MxCVc z-GgUWMp5NB$Wt?baMc++wOV*khk|;ctJS^G^2b6YW_k4}sN36dXZWQRjToWI1^)^v zG=qm80MFH5;7*r0fP{6W5{ot>DXm{eLG>|qW%QNZewGu6vY|znVB23U6~{D36;!_E zL+(vOe`r{BTI^#$|DyH4f5;t;^qcV6gUrHfTF%~Y?Bl$^_!Q{yxL&JXC)a3?=Bd)N zs9!Ni`}1aA+Adr)-WUV!^o8i|Uz!&e??$3)pz+2f)f)-EpS+n2wVGvwGs3E%{!Vq` zZa1}2$EmXU_>MJvSu|`glkjsJ;G3FFh;P1d`M5F|FO`4dU~}Uk?0G@mnC%W%VL$&AFIR6KPK$R2Aei_1 z`P#1`!kNuB%5WFVtFI|VH_Dd_dk?j?2g9lsv|bw6h=}E>X7r@Q|3`C$kGJse0Fj~e zzH0E`*F3!6{qaMtj8-s^jhQ>_LVJJ zQMKcHT}sbx7IgBimrEa+U$l%i`)$7V_Xl}hS8+}|Yf-bsjtz}w-nz&GB&sN~L`4vp z!p)C6WApbQ+Z*D8dYY_RJ~zENLAPD34H?rO*y;89l6p&&2)uue9@$^(M@tv%*dEu~ z5~jzmF{~-*ICsKQ=C_ndHBF2{_>#($N`cMX7%qw#AJdyagSNJljOQghHnUpeNURNeAjaDA|*KA(A#ft-s?6$#?+fqe*ovZ2;=E`l(FeALh06 zmmX3pPt}3ljoWacBbY<%X>Rs%mFMN`h=hs_iv>rJA^uA?Cmm?u6QKuuLVYn*T%_EF2cn^&pN73OERQT1fNI?|{>RvF!Da*M zTX)?dnlfj#gZg4kpB&kY=!yw`4y9FU)*Q74%hxUkk|p|m+h&|?d&RMD!#~0hg6QXU z4#HyDxIFy9TxV0qJ!a6Lnt7k&>J06yX+GMZS{JCm;CvDgUr5@EGe%;1IA?UU1LXVa zlZriHTR-N}Hfb-Pm0hM7le}8ct#?3H=^7797Fn$)1Mu&f0xtms2S;@`vdog@`_2S- zRxfA07WOo&-c72Za`ibcPN%vDyjhzC7XH|R89X^q#|lsU(4IuMN_XfZ`-s`)rP#pr zZ-5NP>9Ae$2ONTUL=X+s9_VhREqW1> zn3T3OA?^L#^qk>1JE<jJp@l@jZq-Q&@p&*B_X-&sL% zE|@G*e^yyuV9ip>C6P?ug25+~%v!4lC9bMsYdl^aGud{CglzfvQq^dhp=KC%JkjsPbh z*K7oCf>879&JNa7$M>4A^{NYEQu1$?qx43dC1ASiAk5zC_Tk9Xzr6I#Qz752uU{xC z1{_icpGN&c7+DOse==RWDv*Bctes)b=5P}`7=XPJV6LJ4fWTG> zS`|1jF{;tCqfMuK`T_2oz?mlNmvNeRN#6mZ;lx7!Tyr+B&Cz4jEMlyh`n8_OKg?RIF9SQE_W~n{l1vVo;>N<^v^?Y zx6TlIOth!0&OYA$>}5=IPUg1Ye4PGri(j^EzJX1ob<}gLSJ*jhJH_@oG1rQ_H)@G) z?|J|gg%iZrKD7#}_Gnpnpa+o5zUS||*+*MOp-?9`YH^N}1*my}=QmHb&Cx|S+H1uz zlSTD06WWAEk*;vWY(qP?rtV(s;}EW|GF`1oODbPS?5azG3ea;A?_hZuRU!dK z`THt}@ztD=z=MpA&l_iHw@_Oc7IQ@J{Uhbe)Ex41=!Zs}mg4n6)|5DwSTW`~kb5`X z0QhmMX3RI1dQwSC{T==9jt%P8eTl|pmF z?wHnUkNe6Yj5GP%-#w?@-&Vz)xqe|RcL=bsdSr}BDUUosc4r~ zzpxnchqgh-Q@v~``k0Z=7175Mm@1TO|Gmnx5GzM}s(@$Lv}3Z2WsnMzFX7uPhdxd> znLUwNciQ7Q+=RF8p1se%$yAsvA*YFBQn#M!_q31n@{|R=XH<7=4$la-Z?4aL7w9Ge z6`6ehT2dqBU3PHjxYD2t1$si`;&;bIHv6wxmH~+bd_Out6>I<1p(N-Vr+KZ^XTsKv z4`?dJgYBR!FNJcoil&Y6i7Pq z>+O}}UgfGKNri)M%KiOPp6wm==9XQ5H4lA|^h1t>-U$_gPIphAbe9c%W3N*}yiK8O znO%hsJ%+GXN#%h$X8U91uf89&M4ee>3VHq7k)ZN(b=?}7B0|Hzjq}o3Q`JI>Vz>*S^b#j$nnyDf6F&Kb+1T!K1AoYOc3=RsBdQorFuECn1F%S9Kn|128w|k+ zFKmkXzg!PgM?#lH2yOUxb-gFP3O1=H4Cuu*f6BO~w(=1^y#%R)3DP$w%f-#hzvNLp zhSEVk){G*wGN#njhCc8B6uSx-km+>e{SJ0b2z}%#*ZT-X=`xt1YtQlw2y6j59{8D- zT0Whdv-N&$RA4h9pz4Hagb@A-X|{g>DRXDIkjYS5ZJt>@z6krk5@7QFPHeIPHkI}} z^nMG0^OUGnw5N&K{=U1+u8RTvEX$v4lV1HkKS+6f>((9=cJg*Ozs>y=brY?biEBGz z5_LcEmT%dE-qpq42!Zr6g27nv6rbU;uNF`g_J3{{+F-+^aA+8f4S1w zcAv5HHr@&Xp|6OIl1EkqE0uq##&RQo*`Ci+cJUzDAU-z*FWHkc;X8(dl=ct_>Q;TZ z!5Mn5ySJz`Qj6>m0_@B*QAY1Mh7lJBZg>m1@Hos+^1^%w}JCt|c*P z0GWvtJNB~>e(Uc>H&SU_tGBBzS3E&sZW`9cq9OLtIBn2ZT$e+}JTu1YN-mDeKNB7D zbI$Mchbuh89IoC-zb*Ty?{7sJ+epA8>EGt^+QW9nCguHHdLO3g!Mr6;%89LkNWE>W z{1CXRPVKpICIzytXq+oXPdvTWSdJ6Ze`26e^b;@7yS9DQs!Lzg-~4TO2< z4)PRh-4hJCJz@+C??^%k0t?3-XMV*MW%WW=k<}jRaur?d-kYYW+dq4v)HjxJ1d5v> zWKzGXcwIK%!Ups|FbB0aWUTz%i0q&CKIBS(-kz0^`i?#=P}x^Ecw@FNx|qw}#OlYY zNGpj;>;;^k6m=Z+kc2!>>7J}i$#+)OS9e~ezJ0wJdW`f2ZW3Zig&Yh*eIV1E(}S}W zX?9=vuFV(Vf{?W#|Fyv#O9agM?x2l|3_!O?$?mw-Qd#yjm*o^^YJ56kRP>GEb65Hq zK{GXAPl%c$^a)V9E5CKBYV>Qy#8}txfes0n2iz2BtNW?OLtItA^)owR}d#@q5`?7MhklSwX}e* zmYC>QQyRNf(ir-=rN46w@t&lv=*BuhiW?cRWQ1&Zgh9+Jjf5>1w+cs@30JdB;(0*y zp}0qFfh;{)GDRBxT6sSVh&~uk$8rYoj20QaD1oy6q;?mNn>RrGJd%}vt`esAjm9ux zr_$YWKE?=JW`~*0w@cSw&d4aE_$}qC!uVBgNEv;Q7moSv2YLMHkJX58pzeIo42@38 z(3jqdJ$IL(9WNG(P==t;fKE?(peBCCap2ALkRc!68A#Ju<|y1ba;J)}*Bmqnp0(3W*AcwhLhT8Y2dQZU2mI7ew@;HtM*M1N6LhBgeZ{m8 z&S3hsk=||ZV`{M)7?!)8h`V$6yQ$&>8MCY_#`L#qtp%o6dCNQ6iMA&}T<D0kIhvW=DZ zpmMWkVs<9cu<)zRY&-ib`MU8pjLF2lf+S?cKmU5zPcJg9I9#5c-(mFEmlq6~Eymih z>ku}}!h{|q`0m~3qpq&|j;k??Nk5)8lIQAPc*k&1Hnba0+b@+oiTlcnW|1O2@p%z@ z3oiz^0$5100+4ZD43^VUSiS>W>e05U9@t(lJbHBd4=HIi#sqv>Ff0mP@=I$C_`?`K}HfUa3^>d%=R5cl$*_=n5(D!j}kX(eG zuH8O$9~&!kB)=g27e`s|Bj-)ry5_Fp=04Z)N9wRg(3OjKfVCn(!xG7`M&hN zpGegh5MA_W{7r2~j-W+1o}I*{Y5jb~(c8>zaJdkvXY*QfNo@e!v(j#I;EE%ubDS|(@$`(^BrE&_L_oVI@k!)_ z1GWmIB=(<+I1Tr;Bu$zZ$Z1vy%Xj4{%C$CyWO$+VjN*j-OUFs^*>e+{`|*XzOmdDh z%_w^2C}fH3)WlZHuMDoDC!2*i1qU7~nKk!8b?Ntc(pJ&B3sVAISu~@N##Q(MVV`zy zxE$o+10I8#X71nco;6}}*QR1KQN8cmWmerroDBMV^_iS)60P>~<%puJv3sl1lr_T|LV_#7YQ5KwM$ZCD!L6abn683p31Wh3EQeKLPEK^+@8@=0I zmfV(yI)>;aVtb9%B**^Ov`FG6lMGv3+Z5f^!LQ}}b+qT|ddCBZ7z2A})!)rd+v-?7 zqw&*i{Vb2WkvrD2IRg|gRzi{GX@~Z%Bbn-}a#xH#Q4Y5jH(->7?!bayJZGl*3SMkc{V6&dzlo$(P ztJI3TPnUk_qdkcFk}Jzdtcq8n=l4u`jY_WCJv{4e;e!$2V3XOMk6=#e;dyx*hvIPa zUf*1IGi#VK?m?pqp4=lTY#x$DtKOs-nJv6B_tnqn_42{%Mp8dY0+37zmBjxBtpDKb*qt1GNyyA=OU8c( z-oNgD2oIpdf@$96<-_w}#R3Mn#?1QBzhLSXUPH{SyT{z0dvl}!z!HB*2zTTMK4GL9 zzxp?l{q;tFqna(SOZJ~caR&X@DhaqF8VNipW0Ik!+X#4R_s4pP%KzG0lGqv2zyd&Q zMkHI}-93I$Dlb+dR_`z`^_S^H(k|9F%FEw_)fgSw3%<4VMum>JyIWUq^&R={~nS*`2ByEq!!6RR9dfwD{!)x(f1moz##^I&JHMmz4B2L2~J0%%FwM5aK#e_OQi!$V`rBl*TW7zelnX3aph%-#b6APBkTIP=NLRXP4n`2;L_|!Ap|Nc7TVhZrDo(=eAYyYpC?7-+XhcoscY5{+E+aQLy{~qHHfBrF} z?ypQi%)roDQ~Ru&Vq4xlema%jK1X>O^NR-yPVsiEQ%6S4F>NxVD}B>@3wB;%^6$Q0stI3cf)Dfoli5(I=@gO|~ZR6NvIWM11xy3mHtjej7UUhfUFCVRowao)g- zZ&VpSgG=O~Yk@T^pZqJnZ)8F%36&T>GOtYl~k|NHpH<(;nK&E_Jk< zoF1!xU3|>*U5o2pm#QD%z(63}Fj%77enJh~g2Pf47QP(A0bw67LK!Dttgg(@61@>Q zi>46I#MRe>_Jxf(y&mR4Z;AUE6vkgs=^X!}JJQ-4fz|yTQ*PP#0 zZiJ#+(jLny<@pN-J{|JtOLmkVOK1POZG_8hH3_o)QMKmk;f#;=(I(h!4l?eT2Q=$V z*Scc)e_6i&fiwqp*h_dD^%?N+Z}$+n4j)xm6WM-LKV7Dq^yTxv=eOW-XJ3~qz>z02)}-%NyqcE|o0t^(;@?q2kS!^;o0J3gvom>dZ^by;*^bws0!3 z7-pldNj4OyWv@_4i;d)|H~AWmw0dsHloAkM6sry3@l!&THoY%g-UCwgv)1TaOurm- zP=YHQ@i>@}N&cz>etcXRmRH#Yzh;`p>^PHsiTB+&?~>rM97je2ntyuj)TGP=)(ws5 z(khwL?1Q855Y-TG+0@8~4}-I!R8C{*vLG71^!SfdLm+TX!Yj$uF+Yj}6s+XigII z5uoB5BetO_NH1XojamJ>53Q@`P9L~h#>#&7Q_g+p((L;3G<8iY)^K0ygK#yWm{z8V zyMx$0$K;d)6tki3(4q<6#y4|!A9Z!OEMK7jkN*kV!oXP)$7F)fy6W${<@k4y zdep0wStDn9Uic5B2A;c5u?-e$Q|{GJ36JkinaWAFVy;||jVcH{H80GxmaCd7lIcaG z*>)=yI$EUuwmX0D_wL>wQ_ky+`BBdt!Xl{X}}5Ed!8dqXX` zKXkJhIzy^AE1-H|b`O5x@xl*!`lwH<2b<*EL`mj727gdHFez3LHKY?daWUQ6Ea#`TDFoN#}x znLbf-0!V8rAXvFSA$xk~c<(%lqUg7-k2DsB1AFBkqx=6EX2!*(KUV30Fvqi8x z8#6Y++^8{#Ev*$YKD)2pT}0`yNJthcS}&SqUpO9H?r6t;($)cN9SQhBkwq_y1+_4R#zNt;AkchKY$8KQA%&fU zT^I4uE!-eHK;|pKf^XeJmW0ri*`<>h;|aFS&A4|iwqmyn$CzK;nDFGRLf-n4lF8f4 z1zO%6ZjkLsbPHCsZ%*t=HrcMUy}YZi@JLHkg61`uaD1Sw%vZfj28}v8)`wQXV0rs? zxY1+&Zc+>0t)4x3YCDMZyBU&A1|`oM&Hidq0XLJvWYAXSi2Rd$4xfyl1EU5>r~& z>FQ5W7p2p#t$V~jQ)RbA0`w#xmyd@!PEhn}((;SI!9%0Wl;GVob)9CublDFdo+~9G zav-XS^mM_IwssGf$8A0A6WiOX`@auU_yd?0Hez$N`(_!@56Y;Lh}i0%X?T3BFi2us zslChXxcb}396=Qzjw#A6X>wr9T|n9xJh*piJUIeWr*8hpIUScPn`Y`!tm??XF%T7I z%EY&E0YRb1LdgUPm6H1H*m4ar!_g@TWEvSe4wY^fcwB2riS3W?Y8~%KqqQea3>=(( z{~*rpGjv`*WzbxHofCDFb514x;ch)dOmgxW+W|T)==g0^@ea@kFU7_3+F|zF%O*JJ zb~Jh564{gO#?ht}-twFLwMKmH>P;h}l5$}YfyqbmJ5W7AbIva`l~~?x##-A0MnF$y zTw$jInLxk?DtKdTA1;Q4S#vFzfIVH>B_moW6xMO0f1#Q2&}((nXlKD!F^OlV(=rSQ zG5?roKSkg1ClsU>(i9ixpK+0^!J#G6ziFAHOnD*w&ny}Bh?G{JY026;7Wu|Ly~DPGgr&3|1AoHM@Dpy) z<}a3%0|sua9|JGT-c5h%5OO}s*RLP@E?iaPwv}Eio;L6r!)mLS^AkC9xEcp3DmF4} z`C|I6ByGtz@ntiz*DCA1|K3lTT80Fl;DlN}BXkTR?kJ_}{2ArgBnXSWeM>LyjCDwu zE_2FWbHTycaaV~LC)(jVRcwwaj1MFczH7O*mRx~UG|2qEpVB>XMx%H%cU9I^E`|m3 zVR7wrmoZ9tdYv7BBzUdI`2R(2|AcTJ#8~p!=w5?pv#Ta)A>U>0DxB{N85n85%fV#M z7dZUf-wa09q9+b?TcJeu`Z#0m=|3mObRAJhhT{_bTis>Iu1~&JhbAXdfVV-bRcwHP z;}?>${HPF=@(X#+y(i*M#h|;_ovr*4*XICt8vAOvEhLH@MIU4n5t}!tSw|%wg75j7Nms9wVQ&T`x}D_d>0v)1v;cL`@4vB8@1 z;ugl1TzZBPE~g_DH&sXFOq9_%KF{(-OkSjhJ zf7l8lM!-hjb5nZoA`BZ%F&djO^FSWki-a|C=!D_x`Ng}zjEvC75_`&n4c?b@N^8&Z z5jQ7bj(E-_HZ5xTsY{oXK+o#aR>qY$Y?H-KQWEU>rS^OL)>U(+l(-UUpy@;G| zyw6oHOmzqt|%pYFL4zRMllHxH(kO!cY?l6DSCGq7!j_XDISZTs7_Nsvfspx%u z{~|6)S9C|yRD~p9O&5Fq{ry>5IFL0t_hGwEtsnqA_2_)a<3t0%CC6)-GA$A}uFP$g zeamBiy*sm zxnp(}r9i)8U~cFpHhK5U&G9X)7Tdnv`qc)DgS-pSar=$Y77mh84^6P2p1%1;uel0<^GYUHc`z;ckiSATyU`(%%3+6iZ4 zeQPd7^8AA3$C@;aemdnBKfb)~_>39vgK8>UjFWi%lbpAJST28K-QnnlH5MNNVlxku7uWkj|Rmfww8>eH+8 zS_>!WBNnu`RM%v+#lL-zu&iH{`nw@xlg9 zSIsT3>#K*;+fd48^T!4GagPp>Xlg}?&9ALq{Fq6oz z3UTFBqx}`W;eE5ZML~Wy8BXq@q~N%GA=>M_bdzr-$qmS`+tRyLW-i{k`+J2Fx=%3- z+n%VI*4(VEBueEO`l3ltH7a+yQJA^JU5Uc za8!M;f9Xf3L2|`4aaVoG=hO>cilk%PNdyZ+pucI!HW{gwteV=q8+ImQy2xLhvq+|+ zm+a!*Zl$uv5hn$g^7>ErNMk}?lA_lj$j`2Vq6c`#tEJbIY}gmog4&CsOx8nb0+d4w z_!{(RmY$4JqUcPVZCk~J7|-=}Rj!=VPNf6nMW$;H&>Yr@`CttD3Z}H^vpRJ9p-Sen zYS5I7&k3k5uhN#t^x*eA#km(-ixGQ>xS-bw)>VZ|G+{-7|FY^Pinp>E*`LD<}#k-u7oQ zr?^ox=6^nJ9&hl>$7yTGUwy-Zh*+ne&7$-u`ZTDh^xfsL@VF4@v4^_wS5Y~e)so64 zXCp=pY#+s;m0J7m=^wOOXbZY4QV$bbM6d#}LUC{;&s}rXZ4EGjeMJPuzJnUTo<;D_Dq1{-uSNFXg={ z(!`CORlkKP%(=p0C3(-1t6(VV|8zq6%jBm$CZz_}oaw+zZdxsh^Q>@?Ue5t z4EMUOQTeNW(k~gYCBsB`=-bw}l*>coWVF0@hWTH97o~OuBSL7pt2V=!W2=Z5r@6H@@T-8~Rm5xX z*IzX9n6ssIZI?Xduo|u$Mo0Ev@QY_M$s8klOkwB76cGVP1yh6m;J6xvsnEPuFF;%G zAZ!%zBeJI|6D9$c??}2V2`Y^&zAj=YhKS7=@WpML#$C~6$A#-(8%I0odt4nXX|`ye z5iwE2O&wigT80>_PeGdbv4WgY^iAYZRU9Ab1f1nWnJu3Fu4`crZ^j*t#D$ac*SQx= zKO#w3fw^20ZWXReqdjv-ag~p~6_5P3@}LUe?~MO8U*d-UUU^e~B{UyB_PBmRjY*?b zh8FVXO#P>>f0lkr<9%$c8hgcr`_VtSn06MtugNgt^f$@xox`e9(yRIx$4)x%H->Xd zqT0C#ab5c(CkROriY(+?>1w&8jx%;GH?2_njr;n^Pch7hlmV+?NsrE)TrZa36{Xj3 zCtadl+}}INF;MoOo)%+E#yoUSk0zYBM11Pa*vQFSi=xtbQXY8A#9NET@yzcmes*k4 zab$fnw4F#(ysnW#CZoWv_>6dqHMmMTNwbbM*nYd59MqBU)j+P)`R z8j-e&mi=)pRZ%_ZT!o6Y;5U^vZZPlmB8%?f>RpwRnF+G%Ub3z2IyI%CGHH#T?Q%>} z=1phcrXsz90gIlxg%MN;!v!e82c6jHPm(R(eKF%he4DP^EZm0P;s-Hv9jH<#8|~*c zgyL=t4)9s7`RVO{5f{|V?_D#f^1W{Jp<-)sEzmg6^C!SCgzj9UrY}(k3&*}QALYgO z6~aqS)Mj?R7~y341Zjo|UOCO0dBM|Rh)=L-4}4$GiD#g61fpZ{5xxf+*YA0enOt~5&=&z$p@P)!62S@TL9 z$7>+g(%qt)Zq(uM)dc<@!jV-I4{QB9mla*$I_9ZcR ze3DFb@@GX7^;aaj)Tb+-XyAg184CwC9Mq~F3G*C_!xhwpydM_j+PB|RRN%rjM}aLK zyRVIA?H>GV>LannrEJMFM)DEv){ykT8}Yptym?jve_nlH$~ULaHf?E$1X|{1`7oqi zk%J>MIN0}r+A0j4?v$uF!g{YO;}S7t9Jq%Fc3S-WYHA&C6zDGnB&#|YpQ{FDJ<&n>RBhFaB&n?=%XiK?0c1@g=~*4}EUs#*Pt8*;`UA?8;!% zApB;?${m+WolR+F>54}KLSwCNTd~oh{l(gNy=P}&kc?cmPA?u+HV@`#`~bU_Sl@$@ zf}K)fqM~OebBShaFbvDcGZyPyBmZ@g>_5iK<#JE<+Lj!|ggc1#WYnI>Gy3+2RLv2C z+1VNSQV9?GHpW=_mQqP&(LQE9Shw+7BEK7uMb0uxa%%nvhZL;eQ6=GC@))OW+dJ+Ew%9auffWx=O@%xIzt zkG```)L?PgcHOVH7n{sgyILASH$o{i?Pejon!2&2;CHy?IpF$u)2r!1&y}u(`#ZFd z9jTIfAvx2$q2YO>2-$7Nyf4s-b&Tz2=G#P=lEDU2wGlK~Jw37Zm50z|Eo>9-rG=6G zZ8eD1H;6O`md!xQi2!F?pn!I_e-ycQDYz=Ez-dv(wH-ge^ReL{DA2EE+8}hkU-}6d zx<+Vs*7G`S)@&lbUNCOavy9}hoY!(d1D}tMw6a$ZM)<#p2m~j$YO=;?8?SKaGa4nR zvwIL#BMaY^PBx~#lH>M&i$+oNNevZ+qQL==BJD~7gXB$K4HC}Kl7;j1W{9x3bG2yi zFi8bpAcjN7o?=4d7Vo~r7vf~?fQ_x0;D=D)pfiCi>U;^jvjFSt(dE>~4sTC%y=z~} zf{7hJfyuffXcqxiI%NVvV=HYABTn3-dyF`K6k zM`&33xKMC4xeZsd+b$X;?N;r09E5iehP*HmJzU}#j&SkOxtS#eGN3Z(z4CZmfB{fO zG(T-9hMUbVFDFae`GEeg5OC3s9V`;QndbiUC3O&KhoZHpg-m1SFxUX#%LJp0{zKmW z1p!2QZDW1&^WsY^W&d<#X%RGSRRw8l*Um${@U-9xF5R{iTd=J^l@AjV#k{RvX(CFf zJ|_oku<%duFx1#I!W`CMfplgYc#FL|mx7eT2d%JJF$S|g4M?+LtmZQxF4D@8|H(WE zFMzU~F$dEX<@ZJ07=`W*O-jCOByWzDeKXef1t#z-8OdztpaW55u6d6V55+IwrdqAl z?*mBSTrVB`OhZbJEQH&9CEvRuLG*9JVae&!L;^J%5@iii+EUyi*y)mC6GOcCYQufM z=YPbH*#eKNfkd=cc67`|?jP)Qa4`P~)st!dqA2!Q{-5Pk=bI>#wO>{;5%jLtHP!zw=7Aj}A+HW{uj*{@( zYK!UPWo}U7h&bQsHdei9_p|%1k+ae)OJRR}-o7xOCgUkH>kpUC7*_VfkL7Y-l;bpl zH`$VtimgwmiN*LrtBk*8S3FSx0Qs(T=W_`_I`;PL#f-@;Cu=hFBF)J->!q*vTU9VVDNlyKYzb;AcafDPx><>GzbxUz8U>md!f?r1Tz!8jB52`sh%&)HL53wdtO-UN(o z-!&uVKOx`QepUK89}IF$mYpFG^4cqxCQft5i~klyxYf;X_CqOc4z&NGwD(jQIPiOF zMsisUCf>|&|1pI~;|~tC%qNps&TxMi!GU-A&;v>PxaA;b*Sfe}Fk(3nb8q~EgCZz_ zQ*~MC3s+iiHG!mf>z#!zJ#$1cbXdLd;1qLP-lieiL@`Z%-|&Er)I4aABziC$*@XW& zF!A+VKtDs63sm&68FZVn`56a31B1ovcWXYK(95NM(|p}**?;1LNvXgIEiNV(0jFjv z0>?j(14WxlTynjZmC4o7{tgbLgi}S@>A=dDGEW6GZJdq-5A$w=q&bZHIKp>ljPT5y zgBA{BXQICpVr8Wjk%^fq~1PUR&#+ zg@qW7&WNAzemH# ztkc|oXS$e$@gnTi>A_OP!z&H;8v;`QqEtIysEtKxHqf$zqK}Y;fO2Y0VS)4D+B}63Sk!~%M9}vu|b9 zNAX3Zj7J@M`7bSQUDqIP`5-kh`*?VhgW9(SXKiUiSnldsOxud^bDr_Fjv929vWt|) zvGQZxewm~h#|xGWzzi2)?OB{pL~)R4H8QQbc_$I!%+*i_u8s@5o9@FvQb%poBBf@X zFg7`?=*gFj=S*NX=eIz-04W^a9)-$9JrY3|@_Dzv0BlQSDiQf@ZKb!6E9eMy+bUb* zR9^g7nnZ`h?*{aZ*#U08J+@&JSFXjY@OcAnwAahWRkWFejyg!~zi21i;Y*ptm{=^=@4h@m>y>5P%-A^RS?>jq-i2;Oe(z zgDr%FX8}yYa;BC!2g9KAbW3K%w9tBhKit#w@$s2jJY8IqhMxR>yn);C+X6_a*nh*tGkbS1AkM*p%0{^xcc_?((O{Kc4msw@FV96KnFpP*lx+ zYRonbT=OB1f$>rNEfKWdGVzIP*wQqxtZgopj7qP*cD_oCfZE}z@25T$N4v{O`bj#N zR08}hdX1HD9U9wX@IyC<{HFOKNnyG%(X`@1z8cz7iM!Y|= zZ-sdSo&vMv4S&DFSB@L2xYjen`NR#0W2(*m>w zY(tysw6EQ=;0M)n4+}Z`T+!jF^>0t`Nm3j4TAMS~zseRE?n&D8%!RHn8va&od`x~y z8khov_3nPk&v%$FIHuEgt+ZMRuUP-wH{CH;O=*+HnVJc_HwRncl?Ml^6e})IJ_nI! z#51?te}gl=Z@!~A{pE1;g-FU!0#h{*$>#ULa}7dz(CtGy-FGl$i4J>w3%z+bc4n6T zLY%{O8_qqCA-xK9x%C1=uEQNg1TX1gl2OkXATTf;t$OD1c0wHWWMB)183e=9gSA}6 zVz7hj7>z=S8_@VYNi+mn9k)eTOJUsj&GHyUaYVu*I9>L$b=?}&8ik|cIBfiI@Cp>& z4@>c7o4o-5++Tv7`-3-GM`V>A4PkiJdRG5(Ds5=3bF^lTzhH%x-#^@$2&6&uqafiiT{%<<#sfs9nu#ldM&R^B6`@j@j|DSg3lwp2;N|7KnIh;f~XyKQjA= z;AA3WYNseIwdT`E_$hm(Hz&m|h3E}vEsj%f zzkE3<`&71UJ}g5m?igB0&aLRA$(t>j7TChYg4PqU7)a8pnJaQ%vF_E39c9AYbhFS< z@)r3BAP4r{T*_dTL*X^tApw2!sYibTT2~r zJ|g(#(i(QhKR3^WB`aQ(P;2uQdtLDtT4;j{3|u!;dEHUDmC0QtDmo+)HwjgKeP*ae zOY6S(yvlwoa{*L8;Vn9~HV<;^R9X9xNS8)EnETs0G#J10!H>9_>_kUKb1;f5sA=}E zhw`4bF^(D!2G|b@d$Z<~D(Gjqrv8K~p?6!E&79+q<{!zP*T#r|%9*hL_b@f9Zqj%0 zc+wAX*2j6WoO|hkC62=}UuTWBl(!Z@iTI z%G<u5L^$;*C8D2IaH|67m|U(XS}1ILDBj=Hu*4Bl zz45ir;n360XPGx`O2QeHdIL!B?9dQUcECRgo^?VSjUzc@wWq zL)Ev@EbX_oM{?9Q^9T?@k6^W#{7gPJ;~~LU>JE^TBS~$v%ONY}V#SLQe+I7n&9W;ocRpk|O0X}`lmA4xfGLBobb-Gk zML44Q%$Z~jCd5obRbC!lry-`?mPGh?y`h+4LKifE9OKJoHC2XZ0i+Rk?0zXg_C#lQ z@onP`lLqcU_;&}%S))GGe@O#Z3!hXOlW9Q@5P1O-*xhKh=Bl_rE1;9GmxKRZyMX(I zuXR}T2;*=*te4nbuwi20%TI<4{?U$iqYBp%YereOw_gIREB3xn08hskX$wrd|DCTq zT0et&x}CHsDfduwtDlWf3UXtM<|R$ES#n}SfLK@2Wr>`0J6D#1N+NrI0BB&>)PSQh zIV|dJKA1H&Hlh2|QiC(exe1XMIJXveOw!&qXkG&#xL>7@f>&_kV4!bN)TjFA5$gom zNBcM59xUDiQGH?L9(-!~2p7m$F1vSt@dwaYKMLL+2k7Ew3Rc43HU%d4Z`_Pw-FW=7 zXYM6Ye}!3WSp2hdxpg+MxuZhjtR=W@LH#b5&qP7(6TdrF=#_@_?>vWmDB8=4iSIe% zG`9DEa;`do-O@j@02!xZ|VdoAiU)wBwEpDn(&19G6L zI^^*&!;T7N<`xv)de51fbEl%^^vK=XUlZj(>*OA$v`ll(qAn<}~2;IG8W=0N-` zNOotmb3kON9)72?zNN_)b|Eb=8l1hA=+-o1lkIPKFR}G8aYTn=&fs{k;iL7I$Hrjt zIF^Ik7rE%$rt?~_aje*P>oL1Z(h;aGD0=NIy=KX*o(i>-p^&O#vr z9FZp0_c6GDK9T#@y$a*ymCkgyp(wFy>2R^Ig8zXUC@Q#1(DLklIu5$i`qdSxK?@Bb zs_C0^8DuVF54TL03Q9U0oV@@FhZ7eIrR7b)6<;yu__7NXT1dr~rsrjpv)<8Q{&f!<)!-*QPs;5 z$*BNp@bqDK+|qquN>M-Fm^;emyK0@v<7m0%yKwYLXjw)sLgyatZhu4ry*C1BwL;L> zDT_v6Q@Rg}B#HZ}Ea~|}!%qd|$3w;EVWb_hTOxTrmQML}vaS!R(H!|fl_|BdJclXd z+fN#$5VHW#&GJ_0b5g_$p@^WrQ5owq{6qPasqMSijf!3>b8P;2)a;!pEgU_`Zy-8~#&&cad{ybFn zrZc>&P)qgfd&^CAFlVYF;CZBiLG!?bCjqc2`}s1FCy(6iFKNEGj}#J=UCvV6Pc{+i zFh+}L0vt+ecwp9qJVnOiw`JVHMOS?HpV;7tGe!u4uJb+CytHOf_E_nm#)uFFo&0@s zkL)P`3zXfh$%I;I;l4gI8ox>HGnW-$M0~ALk7w|SXxylOq?eX!N-*3DquHCU&+}g( z!TTgYMa!3Duf)pw#H8BuC~a2vRb)q!>bLxj>n<9?coBJK+hNL7^D% zbNj+{=I6N7JJZm&GF$Nw>cM@z{h|yvhT;0^nTz9tEb41=A?u_IkJ-v~_{+nFg7XDY zG;}F(<4J?Nd0-Bu2kVxCUJ@Y)ludGW&I&a+xg#bU7lRE4ka zA$(2g-11fll|FWMhHK8?_9wix$zg5HhbDt^^(?V?d9C31z5>K@T09aT@B`jy0SDh( z{oRO$`+Ooo^0N}~n`Zk{v`>XZFJ5IH0}>pENsGdXv^tp*KZ=JtfOfiPcTb046qZ8J z#p8HLFR8@>9caU|7sO&ms~r&=8_}9*bju|=|1b|5iXk|cc&8)or{aMl&Gre`Q3Nw5 zzXIUptdaFSfXp$D)Cc-b{A}TKUwfg#$eRZ~v^lvObev>~Q6HS?OZ-UsnQ86S4{lu= zUXK#$p1?e7oaysFx#y0f8D~xiOYS)$vp7Y@GiyJE0YdGg-S4d3lbIA1(dsYmKO{dKg$@;ytaW)21cm7f@Fd|c(-zcbI!w8WDZ~@0miGO#Jey)k`qODR`%Z=Ds9;)ZX+R3U+E%)rESP|-sLf_JG*r$|*l2EW&;y;oZz zXjg*-)Y)xgw#YZmb^1fCk~Ii4*lwk2`3;Oc>EzLxx=kb&Nb!Nm$?b}noByAH1{KWa zyxaZ$_MCEw?e0ozJiSq};26Pg@|LQwaz3_ag#SBV4XkJGII!Ca4SCWeaLMFuZ*4dg z85|a!J!SRvL9K?~&;)&*TXih@9g~ru`GVPDOTO1K1HR5*GMxV!pT55pe3Hu+5;)4c zU%M?$vj>|9GwYIwgD=0A#*c3w1WjzNofbLDnQy@JD1P@b+Z+kR4st7lUMRcklxU*v zY`Q<*aGxx6lbr;SphHoOra`xr>^^0%LAI18kC%#EO2Tu%nR!KueQ9BUg+W1mie%fI zz&1}Z*mh2F-DOm`nBPOaGIGBo)~f|zl)T&56_k*7wh1{xh#xJ-(ePp`as|2gbq|`p zJ$H?HtD|6S^S=Ba5`NL&Ref?i?$0)ypg{v1o_ECkMxXr(>2)j1aGp3908hp{UKG2L zKR=a*1*)pgUC(I7(mVVIIeGImU@2OQFlqwM5FHrKVX*@+FLjIiItE=>fxD|MSG6gU zJ|r3x@WHMY9Z9ng_OX{8$c}+WbZnh#9PV22+=5~VPnh}HEo_GNq-U011A+ed2gQ|0 zJo3sNT@AWVL$^y_0F7wrbSS#Rx zFLW~7kfaml;X#40t@SR9Fa8c2Zu@tB9qjISUv9j~aOG>X;sZB$`AU4Zeg$h(gkUg) z-!dBDdZzF|gwyfH%(J%;PXY9mE^2GjJFvo~COJ2oe84rxiXJqI4Hs0E;rOIdZ`}={ zzXaFOc&7o*#LmdE*PFP2(^kEDb|kW3Kjaf+THky*k7RXT`C+W`8N1)HA2N zg|W%N^!&CL6|k}>D&yVv0g~T%_pw+yhyjsNWv17!ZJzxZkDE=ok9s7H6^!k`#P1Gvz*w@ zVRAV&XG1-5J?IHp-#(t%t9b_v9F z_4-1M>Fix8W?>JVqDh*RNl#w>FUR(Yg9O&uWOoPS<{!+D^83FdJqGB{Gr$u^=MHRc z^9ID@Cemh@E?nFr35j>9^b>T?$ySaI@h*nN*Xm4jQk)g^3xatVKYtbt9)y10V>OTG z<@3j2FV+SnO<%u$IcYLVM0ICMMOZkJ2(UTDjtVs#2CwfRIK;dtb24DaPGl-jumMze zUafV_sai)%ao?D&Y9GHsxiqC#CaQipF_q-0XESPk^HR`;p)6kIV5elesjP$>$S+K> z3Ngg@_HEIbb+|Rq&MD{is;;aZM5Nz)$mD-~WI0|c`-yf=hb1a+<$<^R`8wv1HL%?*rra;A2h`Vg)8|9FYI1^P$<9@3G_yg`>UH`?eJA)M&cH$6}tg z$p2*k6Ja1Cmxe+Wjha`P_0~G>P$&*ynDPLINgfm&Zxh?c|MBx;UG_)_i^LD_#8Zbb z{kT;04R03b4s)>~7gXhb4J-7swgR!MqY?+yY)H9>2$rN;$7`?k_?64Q4Q{jcr_lI zXYI7_)!a{80DiDU8+m-~hV?5<2svTXW8ZWEq}4=pjgc7p!z(bYLlV@>>v55UYZ4;R zf#K>OJM(6we!=65holHT-WR8E9=Q1%5FHwbJQk^xy<}wq)K~_=LfYr6+}_~6`Tv(6 zu`ZJHHvj13#8z*`0LET$rF1jpjdIT7f*FIvGtBULso`*B=aT2vd;&kKf`mCu-);_s zBZ0i#PF9hbsiS^QlHhzenSP$43L%&J6H;ZS`pTRNrMxF~#PDjGrQB@{^Od5$%I-^5 z3fF%K$m5@3HcCjQpH8LAtHy8BsEBf6u|4i&7Kvx3!KrgEMztHP9@y;;m+yvO1}NvT z7%&v8&;E=#;y6DI}f*`)PG^ZGsblx0_>)*BliIp#G5Zs2U=&lw3r76L;@M2r^* z)ghdB>f?Y*WZFtyY@-tVf=ze6giPz$t5@eZC6S~fIXTsROG`Tqh&dzkH1rgERr(rx zG(aqH=5K(O_bzXk$TH$_gPk50hg?q-TBA%(WPP1%V#j9;8}3e!!*%L^WzD#fnR}%d zwj!@IJR)sdElqf@S4pxSOS@AJ*>Wu?Rwo~Qq+mr;@XQ`e0hL1tzcpYiW9%XTWbRCA zL#QKVso*+%;>WBg*eV6)$A{bI0FDxbQxCr7-c;torEhd5Qb+D6i ztT`$m<$-aZHKlLA*SN#xy=ySJ+FX4JN@XwjU+$o>9{@`-R=JYpGX>aH19XDBKlxK- z#OJ-fG2`?gsYC>f9e#0#QsDJzf6H4j?T3zg^_OLO+Ej+Bn3>sJ>LF=a^SCB^=?^mu z2a7Jt7xb_fGv3yq<8S9HO*Xn>hp*Y!A6a6GQgB`;<1i)j&ToXEpvlYUH*$4q6f9gE zwjceZ6xwXqp9_loXw@V^sALvTl-*SffP(0o*w{b1lL#=o>#}%n>g^F0{vTQW0UXM5 zrGoWuYTXIzlT9>D?`p_3f=NIG!k$dCA@0*HQjst;?DjDRP_?+P`t&qRHE&yYRb=Cx z$ZvY@sFSWpky}v*gu~H7TJ4vYgHY5bGJ>$yxqlRkoc>wbmevt7f+jU0R$iudJ64CR zFRhU_QgHN-nE!u=t@K0%JRg@0CX1NRW=fYkd<*I2^7L^dtbSB>A1*m#0uTG?tbH>F zTTa);fpgcNJ0F_-KmWgL0N?6*BlSGKIGlUcVZ+XsI>5YNWw<`C$R+8D#ZTgPYZ&4LujUl5Gu;bQ#A7)!8;8m@Jy!e{A}4zY(v?v?+3ifu=8L{f zBo=Yqx&^(0GnFS#Znof&{cB5lKW)jZ6Y6w2q*cQUM^=ZKnU+*?;bJ(lB0{{6TkmGy)NNHhmx>%FfTiblcl+d_1a>*E^<>lqLtAA@{|MTyGYYlur zAm(!me2Gr{?DxpYec$@d%KI8G1Pi3jQ;C8R9?zU^&c|gFVJhS1us16NBk4u0yL_%# zmH4>NHQrFA?GZRX%(BbRC)+f+o!rkUHgAa)k`|{REDF32k6?_ZklON(RyVW&#JTPk zByd(%tw=lRY5aRz8frX$L!tZ1zW4Cg;!?c7KJ6GGM72eD`Ep;SJB$Di8H?O2K3 z2!15LqhHt}QiZSX{JpMsDWl||m%`C<68t8k$b=uP%vh%5h!x59nmR0-I0Ax;Rvrd!~dK61O6-66<#)tM^H`< z<46|YYt?)^+ue2c8CDrZWSKK#=Rg9gYC2kFbLZ8^P=Wqyl2ySIQUpZCpfB_x4?_~~ zTB(Q;(|@O~a*nlr9IHY+jl}-2=Gc~MLVJ30Q(`)gk|YCviQF6Bul}5CZlsNt97FIa zK>>r%nnet_BfFG@kpF`~w!wf!>&R~~{g*=Uzdu*j20a)>eSwS{_#Yo6hA^;| zNrbtghsvaYYicyrUql34|Fu#8JaF3r=%n`Uym0HDuH$HvAXzHki|^Bw2YD56hw-H8 z4K{d>7X0Bq_NHicv{)IFUQgNPx^j|@{NHP+iveDNp{uX`Z*;RjkV*;=*8-bZ)mv5))e!HafO|cx*k0Xz4f4v zeo5ZmqpMo!8lhEoB)o4BzDZDY@}>0zIF(er-N zHd8G3{<^?SaKKfT=WT;6$y!&KkB%~mQX7MGe!&z{Q>)Gt<0c}u zyyPR>7lIdVElOAA7u84>+EKUBhh&)92@<~ zC;(YzqM5P@i!6;ZSnB_g&{{&~eZ(iUtJT3|Z(%^uK4RY2;nVx{IN4x{pl1WNs-_E9 z@I0VTGCkonw(4AjnT(Z4GuYF`i#Z?k^knCcL~=>AkG|tQ(0&lCROtn-TJ10~+`;aLtH_=sXEe-{F;R zX`6McMK=|!C|hQGoWW8))z-jf@A`F2Q49{UlJ6;1MEalUp*xPcD`sz>^lW1o&)XYK z#rWxW1gI=PjF!tyiEYf={Oo2%wH4$btkzlOb>&2QAhw~Idq_0N-lG>VM-%foc_+^t zuOAd9J6$u82O8%C$8*e>C<`gG{n?gDU;!+FR{4a^^SI~^^iAv(73hBP^T>vP3*wK_ zYX_L}-YezEK6W7gDr$~>B<;m|ceN`=)ni3CxlT2NA@nx;-P+H#Oi|{v1);9--#t_< zHuGorj2>F|ANPr1%QeR&g)RJ^pD?zE5%%_Z?^Hwq%e>GgPC)QO2aoL2MlEX#$?-W% z9?3N7kodMy%(T@5#Sn|hG|&-ClvI}^eUznE=3}88vidsHPdP zf=VG*LjQ_b=Ntc1$sOUSnSN>-4sXQG*by%>1!wL7DJKdHh^gbzVYRm8M7)@0b&~#{ z%>afru;WRzUBe;&YI^^z)BVQ}VBe>>s%F~{|46>#4SM=Y|4)zu?|f_@#@ajz-PK`3 zAiEi?giPIR8#ULS|JY;8CUmi6Dev*0o4Reh+JwCh4gdmk&E&5Mc zxVpElt~}qFa0rLQ0Qu@v3i&eduFf4aGqKx)q8W}R{B;$)#skqFO%MtsAnv2o@u95Q zx}SvmWT+?Pbib-rE>8#-Tb#c)Q*mO6n-p`~Q_lO&CG1=`}eP%3VU~P zDssDPp_1=+#@@WrtVY-TtlO2#jULI8OTt$qkFYrfRh5uJOYP|#68okWw7&kc+3PIt zQmi)k#=cqM$PU^%+o(Q8OWbW#nKwQ~aRvQ7GSt6oQ2Chf$2g|?_L?G8-4p4KVR@1e zB}c|ziDn)))rihi<{@GDIZe$Zw%~dKk|vOU<{TyYF}3+B)J!Vl$j7eMyX1xeTH~a1?)mFMqK$K-9&5R%sXaGZqQn4r8CPMP$u@_}!l4#Qlriqz4lORkaKL(H z_?2fc6>aeL&hg1-ah@wGPJUYQ%XsHJ-9z&^=Xp!9Pr~i?XS!6!x6Y_})9_#Sg$|Hy zLEsds?sIGAIR$S z>suEx#|I_TsZ4U6A}I4p*sV+!tTlFxlx>!3w*>`3pyd;1s6qoWv51?Av zLz>p_T&oXYavx`?+Eu^@Y-KYLM1I$Bb;Zk7UFhSSVy#ll#kP3-$3ooe8YMzN#WZSQ z`xFO}-N}_ZU{q6;55z$L&KR=)N)c%Ql1_*BQqdbOOA2~|EmPy*7r$YodGXQQCW&M86(Q)JYUJ(L*4y^KJClv~Xmf1$f9>iuAZ=N0i&B}#T zJ7LZ(zrrhr_8hBZIwgkg0PTcS@-`rR<(#<8EaFlX(q>UeXR!MQ5RIYf_mqYvh(2GR zl0tU=n5%I;GKGK0TYENsaR}O(txOpp8hWLvvkAbvs+m2mL{Ci?CXtS-(bFbdp<51G z`9$Iu*c(#B*b+wmavy)LsQGTc-Fw-m2G6i&*~;J&*fVKfWgY^x8WKaEaTR>a>h zsO$muA`Y;)$@T#4F1`MhO8d=I7(y|Y*Hd=u&G_}Y)+#RoubcOQB*&wgauxEWKzZEi?HkvQe z>NzcGV~!V?FJZDGnD5hF#5*g6=^V?R6CkTHkc6Ef*igLWVRh9?p_xz4V6gQX!ZL@`ZrsCooa#r`aXhvVQT7p-!FPY#@ylFSDG}CG%p#7} zX~om~NTh1*r$i*}54Wlx+fK=5R=3li;lz$=gRBQ&fY$5_;OFnnIOCQCmBmo+rK1-h zV=8M*!C5t^wD~UPQw5%xi z06}9SPPYSPP>sm>3ilwUqSvLRMB40Rp~NRQ3ioVZk|A0J?Ct=C`(Wk$P0+XG!9_WUt#2Zb@7~XDUmdI#6dzL6JG$?*KHD%9~6j3 zMQ#(0C3ycd)d4bsizbVWWNtftJ{4}{1c@TZrNI0v53Zu${mXWX8R~GUvRTW3N>9Ib zuy4)|arJ&qV6g=XZ|0`7{L^{%03kI7CHkB24+)!sad!14!qd8H@H%{^T^ex`$O)+nN6z}^W+$k|Y8*{qss>0wk zym{Ux=KiNSvuUmgmq2N2+W5kY!9P(zewAPE8vW7e{UW9^zqiD&;f~JNT-kuhVBWbp{CgXZ- zl`&{hxQ3V5o=?XhWJ-=z7)9!xB~@uxe>PqR_Grj#i~F2bK|JFd<`1H0FN2CdM8AK` zP{#;Buwm8v-;?Y29Z8@e+?ltlO8!5<&{HZX2!RlJC+rL?u4~t>ACJ0ITGZxz6~m#6 za3ShaY!5#~`K!&U+UNEj!X`&;DQ;+iIT{jKWF0GHs>z(ctgU?%+k2roqTR7Qg_5n9 z$kXsvSb$OJ>bpZQne|VMFDK*Kc%xssU2^2$?mt0IGrCRzC+i1+Gx23osN~u&(A=wu zljT9En(YSsKb@?)V5@4w)$d|hNPxgh{x^3MCnCKDUdIeg);G0Xol%3yQjX8M_#ewZ zpx{VdDb$P!&wR-X_STXPN(~4@{guq*TgPepvP~0FCr12ersStUgycrTQ&ytd{4`d% z8Vc_3{K*gSE@XYY^aP*B+a1XiDkg7$_>xn|!rBgdPEC|@(2n(9L4T@i?98^mTQy;8 z#GJPycbpIjj?MA_cjw1W42hQIruQ!*%uMU~i+!{ghSz_&c>dhEgp>TX(igqCcC%2R zqhA$#nSw19qsCSe?CSGBj+SotiGY9;UDAg4nsBf;Q!`zauncR=JdjQ-dAnZF z`&2eD6#A1`J1lxlLb^{xM-^g9 zb6+q?eD5Y`uO__6jHkApHf>yZF7LX3&Uif&z^y)Mb*&!|%IHErdcM7yPOu7P7tZQTP8ADvrUNo* zs#NS!VvtMxk#HB6fIrn;C^16c+r_>X3LsL)tSK~!7p@&0VJt4O zagd8go(ysC631`QMM^RBdES~^SwkQji3>79Y0hFcaogtIbl>@fFAnMi#N>7BvuP^~ zJ@nljW3%XLBM`ekC$|(b~N>N zuXW^3d3@|<^Bp~Ex}nW@%aST-h&~sDHh14KMrvY@<=g#!RTd*E{E(d-bmuHzmP*)=aS?$o#L0!hn%_&uzUjS(jdv372c<4a1oq zd~=9282jJ_;w?2tVF@d_CcOKfKOy$4*DHL~5kJk?8gs9Yk3plZ&YfK)%YvTQ>^Y|t zsYTgx^R+8M=r^s{KsVq1t%$=9hAA&C_gscihzVjgowUGW#f+8>n zfqGPIJjkH-e|I7OFo`V(34l>9L<2;U_}$ji!l^xV5t}s&K@-P~5WIDQSkaEA?@=ux z8CME|yAaybB_7M>Z>W|JbKXDaCiIL$fQ$svQS?$(czVY!Ub&j+kpt7{?9s>w(!ypQM)f>U2g_K9>~LXo3qaA(^0==>_1o*r2--{CkG zzi=VzD)Yl?o7felPCJtO>SChcxK~goYQsyEG~4FT(}Ti=I{Swf z9lCFiM~ZC}f^W9zlDS?R1+|xu+REZH(huq8hCBa^Z%g za@e8*$rCd|MS;JfL!AjZve3xkn*(c&&O-guOZh2>t-edLRlLM(9HOnE8g-=%il7hR zLoKfqI{7rTZ{J*Enw!!)ql$f{|5=?vBLK=F4)OV2mvKqGG@MAbFZ-<(6*_hvD_ zFVCIp(dB&j1rblE&!{F&avL7lCMm+hwB#IT>D32`rIGm`z?MdnOqa&ahOvM|Q?YH8 z$1I=d(N+cK+s3@RJx(o^rR0ihks~Sflq!RB5xS*uPH=2yeS}Cx{1Vq-c9)bN_a|L} zeB8&K8p_9;PZ*_STB7L7z8?%6_r%Y!Y&^C)mt1<@aOzwfV?r&yhSOBr>Ro%iPT>2( zZanyiVMK%QFlfj`#FIyy!}Dbxj{=1Bxss4HYCM-{the&WPbfp;H4Y%JT@F(RT^=E1+y zfDSlsd)BV6k8_@C54ATiX5O5IMNdAeRNThIZOYL)HoT9QY)Ltm=RnDEkdm_OH%C%=FR`T2v|xJvq)%!{6u zqB0eco=;iC$qWiFJ19SaeT;A3WBL)OB)wSad5GK@s6WpYT81wiEp{N?ppDVc1+|+b zE@lrgqT8g5b`aeSF97GO5Ay{cx2hE`#PJ16tE-E!#)*$;MqIM?-4Ljz|Mn_X>DbP* z4y+-EztT9iB0cK>b)*TT^B>9)xx4gjMOK)rRUHSuZHC-@5NEYG7r&1)yv$rF?J4ea zwXY@TY7k!p5wTkhpB?3@nTCp3z`#ihinus9^s#WTfXOm7zwE1Ww(mN)>dG{%X!U1#oog~A`=1CmD@^W; zs;tkbzzL7eTJBW^Y>LNdOa{vV4Z~bv=X+ghzxM9Kf0)f+9NW1|hu~R!q>nPlS?+6B zM~$NMwrBbQ-|Um2MmGs8v;h2fj7YEHG~t^kE7!fz^NzUue9UmJvrQQ7r~0t!435N< zqv%DlnBMHT(l7dTi3y=x>`&rakUUl%rXt?*gS8&%9;PZuQqUl=Z! z4IXFWrjN(HB1FG*vZgiP0A~q0!ReMRhRoNGe^G)(Q-8Gjpo56e z)@bsaJe;5hS0T;rBYX+nC_zF0);piOt2EPcMZ;Z4*rV@N_y7&aor|cRD~zMCAM8#RsvXZf>W<3N$WMbChPT9%`afP@)#sIz^Yd zorN}DjyPRjP|#q%MSa|s6ylvny>rwRxw<@;_Rfo8&lz$$U9w*pQ0ey$P5klxYXw4Y z`jJV_$lGSbTX}xV_DLJ=F;$V@M+I45q$(&YkLO*GS*Q-XGhs6_6^`SEr%<7gVorEJ zYm^K8%q7O(xeh_Au&(@&a{WIxy~7xfbk^t*oi;v`=M3=8V2Wd)hoJ}Mk8FI^P}0}2 zIcZ4%W|nd?%$-#F(Sd8j9+bIyA-+djl0aXN+fK{oGD__ivSII8M=&)jYB@)A1FR=T zMr>G;vP5}%@Y*G+E|2euCzTNk(x2-Ti9G8D;m!$yqv-}C$uTcBiP>bs7kj1=T-3>x zMd<%O?7d}FT-(w%x`PA_?jGDVxCVEEI|NA}5L_B-2myjaAh=6#hu|LEC0KB8+_ka0 zID7B&owJkoj&F=R?j85X_pit5hVC_2Rn3}DJx`Tdf03(_PI;+Wep*?jSdrUBl!rz1 z&(OIo4-YThC&Devkvz}duGP&xGpm|S%`#Z5pf{q0l`Q6sX35Q&h@dT-%#uMqd&<%x zjqqqN9n3Pz1p7#5n}&i8yU(TVL_(_bO0%m%5SNUl{K+d1Fmm_!0yvc9GaY;tH@l+T z+ep5ihehr;FWRofxhxyF{VV%=_A$mKPE6E$7b$GUt<&`Dr3rDw>+dz#1O{aUvp55} zH`rP*&DYD!4#bTce9|@oA)Ak*h_a(MyN*jSx6BGRqGeibvA(kYGrgkl?Unoh12#`$ zxrqJU32YgRBpD<2uyu`sLsU&b=CDxSsx49174nX#Kfz|l?k5`Z8S(IfSRnu0sE$-2 zc@gE5LM6@UbaDlnjO}92l&wn?gPeE~%-yMWS@ny4X6@y<^up`~I+_$tdQZwy$sl}8@4J=**%*3Oc?em6|Bg#z^u)p{ zTu9=01rk+_$!&Q_F5+3c;LTYdimd-Bx#kOtV6@hK9X4t;HTx*V4h$w&#XW&@{?&v@ z{96y1#fP(fz1!Tdwt3^?ZnF09(T}yF-*oFWaw-YfSw@4whDaL?^YT z;QNZa9VEc(F<0u3-6eUa-XEY@30$Mxly(JeWo()~q!oD}1Z~Sx@U)8VE!1wl-y6DX zjN@)S07sGnrY(%l`Kht??p48&n;Shy#q}N|an4KTDFb#AB#7mw{toB(&~D_pDW&cN z*Zz{0bj`vvQ#{%72WXgVbSig$!EVviaAulMe7V;O2P%m!euZh*D1@f4@!CSxA_-jK zO0M6iU0W;|$XWu)@c!XNAW~qGUng6Y?sZh=EnT~BK;ok~7aA+ruw%$~pH@!}JE=44 zWndsWLUZDicL1Dh`dn%f)H3kJ6~};?D9|mQwG|;FYnZ#cyn7wTGfq9G3ZaOfYy&9h z#^Q%kjjC*RPZceVo>pC?w4)cHWyC_?oX0=yaygt7f5{9jGk{VfQEFr?`JA&RDH@}1 z_av!UQ~B!EGj0148BZbsoRR`4e6GF?sgm)@l6D3tX0GP6c62jxNH&#TM9ZZlnEH4j z$DO3{`VTDM`xCHe@tzvCyE@(2bt~hTPaW2MiBv`zpKX21+e7^D&y*&knu!fC-v2mW zQ25G<0rpRwK<)bLe4s}JVh|Cqf1mRe?3mPY60!r^nLIeC+k=!HINvqr;r%h|`+9o? zlw-rC9RaEExlyS*Ls56iyEJ1;^e3J$cNa?dM-Br%M<#(; zSNHLHcnB8;QU*YqhhUNE-Q5uOET3fjIQlMNPKH#(yph27vTJ^8z;O`MI9>B1LQ)u& zC`q4P&Bg}iYrTgN!m2k*#@Y&(?TI84kw|9^eSSwOQt^C;cOdZ1X(T{o!0CD%X#d#%4C0mT!8=jS8?Ry+5`s@Y*h&Xrf>mQEJXg z1`+d)`bO{C&;Ti$dy$v2k_#DF?2NbImz zXvSlz&&_6m>S^SNsIGr(sLIyX7U)o@J*HJZ%e1@6dN* z=ae{!dZa%DzQ<%owB%K$m6q2-mwd`A)%iD1qH=JOZf3@t8AHM5?L$$pyzEw1W_Q=9 z&+xZ@014+eXSSXbFwm+$a^scg64N?)QTF!0O#rdLbCr7y$NVoWxra=nf#;Cft*WN( z;3%@hFCdPQRSqP7q$kK{CLg0>b*Nq|W#IcZevGb^Ix0;O=B_L>mMkkU&Wa~*{V3=? z!cpO_J28|@200Lt4i7Fm(%UanX95XOYf{!-#tI4;&xTq08bL?Fi^>a!%zeV6#joGO z<+t2BEyI)+9e-m=BlX+Ci!~K!E)zSX=Y3aPfpvpU-@Y_JMHXTqTM9kJk(m%>m3^QN zkj{vgdRAHY93-!z(HHepq-Dg}`9>|V$=UGs@3{&8&eNiNfoRpsuCPb-Th^BjW)MB_ zn~dF00)-2SkhHJL|5$)-?~)sXB2Cp<`D@sQ^pW<%)Hz+Z=E&L#j&$8NfuE&&4a+fH??D<)jh`-;NT+n}q|=hx7(Qr1!dGMi5=7 zsY(X9pfd_>_-rY@eD6mjg}z0DIRQJ_3Chh)jmpASqVzpx3NtK1EwxBCCV!pRmM;*O>r};CQ4k(c0KB%1(f3DcfNdGE3Co- z*)y!>1vTU$F!SY3QT!8+mui`&?_fa~Ki!O$E@=kdbRMF>w&wYN3O(DNFA&VvHYq$X zI`obm%h4uO*ABnodslfikE_KnL+BpvK6 z@SSni(g|3{>4eDK^95D#-kp1n*Hwn478OdXXL^^4HQH*|?jNb~1|>p=+RYeH=0!i} zG^FwpZDIAF@BKEdOuQfui=~S3Uq1cO^3}-&NB-slA?BHdZN}P?{#d=U@>!YgjE0IO z+E-(VDYEbHCsi=-Xu2W@u^0Lw-47EwrimP zOy=ClS%!KvMd!gf%tiM-enS>^PcgUX@)6HE09hkuxV#m|;>5BK0`H_KnR8+lX<*qB zZ)(z1p^(+OytT&&?0?ah0n(?v3y|jUDy&phP0)kDowZl~O? zZ*Mmghaf~Xe>~>XwHC1IMwV#GK((7SWsiw5< zM*Ye^!N`8VD4J1$u49uGfKf?3nNUNNXDj;su!J$AGWs}?k{rk~n!{u%vlaZEsVeP{ zRG}glGRNWPE^n!@_(QsNR(OaWxloC& zD{`{{%ZEH>0lsnUgM*jZpV4m|{Nj9Qh+4i<%dM^b7#UcF98MJ=p2=0bz04CZrqsmm zKSEjU0(G=2fL+oseU}WriGDj2(3?yQ$fEhNRtEG0P9x;T^?&NJ9Q~Fu1a*y^;C`j_0 zoW034=>A~I371sko88ZSv0(*%h0BubfQkjv)tPfU=QxhbsU67# ze);5X*j*vJgC%8zM_jTrEgbaCE~erF+7Vx5zmt!?S$vO(#ocPK$nl&Kl59*2R4?G5 zK=tA*QAi2MEb3>=rM$gv)LnikQ>(RR8-%rJ(6ggeTKh8_aJoW2JNF-8XVhL<3Oa)55L^ysy zyJB#y1*cP)Rr)vBr4KLo#?qAn?oYbEWrOQ}?>no-mZP_|Ovq+`Ln~AE=I?<)|K@fF zSpv>Z35dtc51c(z^CJ6p==jmDaNAlJ1zT@Hiv2Kf@7qoMwQcxT|#HZ!Tn zlsT=7@GP;c9@vNqsWpb1YlMcIM}5j!M`ewh4Sw$qEcINNRNB2-Mx>AvE`z6)N(LzB zrJVWI^IbIl`5Z?89tU6(wD?+$qx3sS#o^A}6Y}@xQB1h$=7uHW$cfpY&B^pt<5;(8WuYL)0T~z^ua4hrHX`xu&Ccj}`Lb7{lZ+v*QdI#vdU&p+ zFcFAn<8#K|I+2NAE_A{h4g{!7Rf;hIbGfYFE}{wr8s3|F;sZoiJ@6p9;Nz!LlFQd@ z3TS2Eq4^@{@xx0pCF5US6}_|#yDkZ)(O5~dU>yzgTss_Fo)<-1{cy=0Xno~vsnn^m z-1NE$XNxoq84r~PNZK>ux>qx)Fn+>8)2GOs-9BtgUN|Ztzjdr@%K7NSl|+n?)y_X6;+gy{&V6+6s!HDcp3;a{!q`Uo0%8gzK!{c?|)!_4Rk!(Jo9 z2f7Zw6PNt4u{QnnY-`D@La*GTZ3=xPu79oI4Ec`>Q~TEH{8!vXA(BhqtzsbTy97)h zsbGX5-Sy-+z-n!=qqpAHJpoOYK6`}w!w7~q4rKSEv@+7c7(l}b7xSQoLG%tQ)eKMq z9!diiutcRVh*itkqU16nKVGq*ZI6H<_E%8hv(gyycUV2OI-e5%Z8v|tz`#X%l@S?I z)&KWc?EfbE0%s(kz)@j|pVsOTUfhS3sUEy{Bu_A+$E`f_V6|aH?HcK4SA_R%ZmQdf zx8h_%^BZFfeAhTlt^T5#uVK91?$)Ve^F#-ZI}#t`=l~+fwmAqmwsufO3APYw2%n${ zBwfs_`id-{6QgPhqFr4>vmFLt02ET<1r45ehpR>gWS-AXS)6Lri7bT|JhFV)DsvILO^2w1!@P(B(tmXpSLLnGfXY!yO`*#%lo^h|Dg!8oMyC9&-f=fmK#1y zCg5v1-bQNza3r`}hIu_>d2|uTXJdh~SCKDXz<{qVBUqx_Zm=`N9_~?f{oVj^l*P%! zmH;zDj9i*R_t`Js(B}F0*+oR%C%?P}EVmdnrI@b>mJ(hgR2e84v|`uDx?{q7OsDc7 zpJ9sf0+GWTZ3Y&Cl_7R$&@xu<3j7lHiw=`LbTXkf;)lGIQLN;D2lfAWF$ZT_*E0=| zMW-B=z+pDaI#~7VqC8%~=tC0nxhDa}79*8qwaC(p$uog4VQ63FLn%+c|>ZiE#n9&i{p4F1oxdfm_=15iY)y zX{-N3GQ(%vX{tUP5O)8^Py6e&{f7_w{d)~KfLFf$AH4D_9k7pJogJJnSVwsak^F%K zM~5M3gO++uxm@UxBTk;8qTxYK_x97nCRAR7NGpawAthmZlFxKm>JJEQ7VQh^uq*q} zC^;*Nwx}6icx5qk>=T&q|VEz-Os9_Jr4|KRj0&kEiY5g@*rA$^bl_4=gYa zPFmtk-@});w}S~dOlKJTo&JZF+(v5hy2kSM_C87$gnM#XDg!;X|AQJMMeO!zGX>V& zzBZL#`sxk6G=zYGbbgyWFtiVSd*)a18(eAZr z;^W()c3A;r*JQY2>>z9^VNSY3(`Eo+Y7+zu;vr($Ezt}x8o}=1oL@PNDFFMr`IN8< z{x^$H@nrfr;1MpHAB7#zfXFetvJ+CRmI3P0McTuLhX$n+KUyEcp05)SAV923TLywa zKoYuHVFwIw<&Q+AU)PEcK;WAt5_EnyA6S+m1OspnDcb0ca)2t;tocIq0PfMq)&B2| z9A~TJ3FmL@+dc>g=7H=L7{B!>9N^YOLKLY3O=5wM6UoEkDw8=7#o^x)@ zi2#UWAlv$&oYM$OldQTe(XY}8Pdra)Ej2kz;lLvx6N$FJV(+yr=`ES4%$gRwYNbJU z`SypxF0WG3R0y3n+YwNapgyygX}YpCnZnn{4FDwrv9R<7@21nc@SJOsrC!&JAU^<8;Z0s>inYo_*4qqvRyWSr*eKK=yOyGuvdgZ#Jgz5FZaI_s`4oy zL(tP$ezm?j9ANobxk>j>e`gXOz932ZiyeZu(WcVsJ_%fpXUl$CS0-NRJ(s~yAPjjZ zN?Nia<^rX*f7rND713uLe6NjJVb(SAhHV}kma9IswjHXiGgpMhNvmWDbo8{hfdN)w zB&|gLFkh;j zgp*V-2JjGdPnY*No|ZiTxN?OgOGr{}F6;@uGWP8^?}sWq#nT%cyJpKm=j*|IeQ#vP z^^5Q0EU7GU@q>?Pp$#$H>+2eoWyo%1G{3ZI=X$sN_Ys?OqBMJ8M)}#W1&N_P5DE+} zL{hr<8;azX8p%pFT^8G-X?6h4zYpQX6_Fv zad)pcJnUW4VgCCzxhxhykLTu35uJ7cL>U6Y;K>6$&+O^esPkQy|T&snMYi!0% z-GJ24VYQb9F5eKtYGNP&DA0a-mpoR%DFX+ctM!@`4`vXUy$RVj{7hkz!Y)y5I)-$% zU5N;AZN*Ey7>;j$D$KXu;Apch_MAe>oCOWBK=!@P4h2UJ7IZoDT$%_dAP(w{>Om0FJ$<+@r%Q!8CkN2auhW3`sI?*+A7f(S zabaZ|(!>r2-4UOR^%35<8cQWVtunE$`2JOO?m3F8w4#;28ZKuha7w0E<)M1ZW@T0l z`WLI}{=-Uj#hV3w=Xb&g`jKb;0E4Ay`pEzEapMqK+nR*|HV%Q)l^3?a#sQZ!^y?Fx zd31VKO1xiF1mH@LI9yhh*wWe(sgCy#maj+{ve!9I70f_WM-XdBo589t#ZXrLBs~-09Ea2(sW7zmF}V71I=ppGluO? zZ$z^^41vFD7G&!zq@g?i2(Md*5!d%Q-!F6wE7RlWN1%MN;Bj@+`aND$Ln`ke=lj=f z5bhSho`0pW8YD)J>EyKD_RMSIj7FMJE;X94Afr~;d@N7SQ6>=AQ3Mowvl$s#*LkOq&J2mc#lMJ*>Qqo@-q6W*OHlbLaFL9?x%bX z`<#NhQ@&G?6|O7#sq}A%b84nX#W7TXhg(ptzIxd{@(xxz;?K?tG*YJzZcHHX)@ITHzkh480`1DVJDKyiLQUpc2(MPD?$9o4K(jx21F@D4RbxO0jr8$y=#et zgCoJ*615S;Po7HYS{K#NPz(RuND1h_Y;7RhW8Pf(0+kSDOw;v;d+$H2<5R^CZW*d6 zY>e8Hf|$l2r+LK4ytsWHm)j#sj*xBaS{F(4U_GD11MGAD2wxl{q*oPsJkA=cE|u%P zm*L+WR`!GyNgH7UV-cC7vcCYi-FtrZ$3l1h0()%%F{Q1-*Nu|hk+s{zM%id)krQ0S zaJH7RkBYiqlKOQ8K6|^WhZ*JfUYq$?!$fx49ZE2?I_*l`^4Q|{YYk_09XYhVZwKnJ z8a}s2TPs|hk>m*cryiO6Jzc8xZ)IEJX1(Ht04!DVHokYveX0d~qFo#|O z0STmJJA+C`fesKj1lL!guV$+#WQk7}Hy8sXt=H|MffcXrd^T2LHx!y*CY=RWFV9Ik zoGD^y`_O{yTKx$atvvcT36<8i@87uNuq2WJHG4KJ&+d+qwVhp$i-U$NkjU0lUekRs zoZSx%Uu-l0)x_lI6`mdGfokTtyl~UKeN7D%ARQKOC#U8?uRh+F+Qch{^1P4bF5@bG zbKg8DGPatg0g+=44J<=jpTW%1`oJ)$bx6o5C4kumA zd!QK{!*(ZJ`Ox4>y&-b&;G|St)HT<85n&(I5&?aO9TF!Wd0-sF8}8oMl&!-Fp>HT? zGTljuYPoerI#}`yDe$TF-z$F2ZZlWUH=NSoS3RJzQf$hIA_j0X5!26@u{TkM!djX} znBbw8nLXAvz>(B06m6uL7oecsWw?8CSpSftmfMQR(rr3gowO!t^&r3WN;jL3*%&RC z#1m3I@oG+wUkic43shr4PQNOAluU0IOMFM5MrF@YXQsP48r z7~WRmxNh#Znpkl&$TIJJhRP6|kZy4IkwvaQ{Gl#OWE20Fq}ezB5we;{GmeP(9aD#b z-nSfQtBn3#^L;0`+wCpl+M7S@JTh)O!p2F}L_y>*{2pkPjUr6zh;ohHNr zYn6-Ud{lK&o4hro)W>dmI^^{s9ur=Kfi~y;nuVUL@kEY*X-`N20*A?|1FA6y&RvbJ z{at$CEc$2caog{8wtNn!zF0?Q6d)^=9~n23tYMtGYNiv=Y=YWtf~ON~As0E03B&pV zmgjR%_gdVDi##UK)Z%k02c3jrF5o<(gcUXk;3FjuJ|m7%qCZt>iA>7|*j55JkzF>b zO^Uq>h@Z}k1A(S4s#)))2aP6ZWpU#q4KSH}(kl^K^S8QyOsxwT5;Y^ctCjNIf)tNJ zB9~gv7UUEfkS!mC^U`Sjke^9(Ty?k1$U7)^GEV{;pC@LexriOlCe*f#Eh3xTj%|JE z{6iB^T;Wx}U76n&lwBvp--z8}jjG*%)*a86d(F|w=Jn6XQ|id$oO}COi+zak`C&_RXkmh^5pY|4!4jvOMzRxWCdK_;DT;d(Q;zFN`8?()$uM3CcU z1M#h%boOP7c)aX73VxMBh_$vP}QWH?5y{6vR;V`3=*retw zqSm46I~Uoh!f8@kYvM8j$Y($#AWePe>X+A(}@K6nAW{q0k zax(8vAk{M+NoC84Yc_609-SV57^V3F`I>3F1N|$$XZJ>nM(JD+l;Tay;?Z5Il%42M zk7vmEDtYMx()Br~k(p()fLid|cAJH4Dr*$0Re7Z=`Ltb}O8l4n%QB;<7y4;^x=z$R z)lBHeTkTwKIVZ2i_8S4_1z$rkl>u4dZvQ%2Nx-ENf+&|uV-8fr{;JKTR~$xHhYA>5 zn@68&{cREa($(a%IiP|Fvf4)jMD1Vk_B(%|V6XGDsIYynQ26h~tjx&T7- zI-1HcXbKJ*Uk3Xs<5IN2ni%25xMNcjCWH4_+G%ezCr606oNxQ-rAc=;cD^AWl{ARC z%-a|3_RZ!|PNOB~`y46X``ALk_z=FOY~?37!zto+v0udQV47cF@*6GzJ({v?x>7KM z18n|5E(NkX$v=QJAM=k?rRe_#6aP&7BnckGijBKCgYUxI)Wgc#R1+ zxle!?3Y^EuuS#e9)cjS{9=V98?_i(MXS-#uPE`onnYF3L8xI>H_tcPRAN-q^cVX*q zRri(P2hr~5o$&7B5Bz#BQ#EnCa(kuP(jN6tqPExQdev+n$u%WS6t;txP$rl}rP7H6 zn;DRC0(It&88T~OUmM_QQ_z6uxWOTk)AXIC2j&EEY0cWc1} zO1yypN-z0j%28sutk&9h?E0aH(qGNrIRT8nt8xiduLlK|?EL7h!`=LTL(lk0_tx8T zGJtCOvQb5TSpm>Pu>fLnJK5-Pz4e|^W8#F;T4#IvLvF8Emmu5)k$S9y#)Odhx(md~ z5T#dRY1n4r(VAw3AL8gD7RO=o8$I8v^9LWXM71z!j(qC#seZ8$xZw+ECEnQ3L^4MC z+#Ek_BTmQ7*6>c(49`X9hu2UegVIYwU8wKne7=qTY=?)&Ta|{sB`h65QjgN62_|61 z{K{pakPC0z_r>yOWt-`A!Qu&zEa%tx6!#_yRR{{6QYdeKz5Hi(w9R|>(HuBvP*O6t z)ih$T2y4`U)DE7>jBGcUsmb&Xl0^tv=dRIvUv+9u0kU@TxmYKCQN$s!a9VbD_x$o( zb>*gu;MzsBsftckJkmX^K4J zw7PO#EXJxjyxt4UV5iBgZJ#=G!0n8!W zGbJ&lORB6&MI&v;9lRCZ#-L^~`Ls#RklVuD&+uk~DrZwmC_Kdieh2%I%u|b;F-RjL zw5eSKkLH%WmW;h^S zFI6PR8dsw)p0yX9v74aZi;G+&GHrEf#5&WBhmDHZ!cIi82k33T3r`ZFowSXOSQED+}t7E@0#=f5d6;pfNDJqpF z>+lr9=CJ#TjT*-L!hnI@Qp51eS4(Gc|DkLIyDiyH^aJ5Zm@R-!L%?5;xx>?kBGD9= zf9TgURmB((Kgf?f5))SM0rVEzBcykiCkA=lI!kT=dGi+inRMfn5wZ5Ql9bS8yYawm zre)t*!^}*kKKr`~SFdXcP3IjldbfTg!Zo0Afy4GKxYyM829z$|IR|9ag_iy4de*DW zC&2a-iyW@)SSEUrcC*?%Tj5&qJif+Kp8!^7szPV4GfLl5&87iQPwIWF`QTa`rV9n; zmQ)kV7mF9SUUwW0t|%IXQ43jYG}AyyQTR(|Pn<-0z>My5w`8?=t}p+C#z?&N7mZQW z|BD)<r6;7Z8uSnRaE|U}=bjn3{FeRAQdz|GHnQx&0bkB%| z1rqGGGo(fiv->~*S*g;n3lS4_Qxtx2SDrEag{?9uPYjv~y$Pyt>vSH6N24EYY@?PO zV+D4(EbLvzj!N25g@>@M6UV(+J~Cu;#RyJ*^-QUsdqH_x8zLT17^#*k{NUBD_LdbE zKg46842omLRJZS!EMNT+3m!_rclGHP=OBG_4%>eI*S{pK01hz)_gyWc_5KLSD%=>} zn1)tf%bjGs`j@Pc`ZX-In*3 z(~Uw@1T|#RjVk%S?h)bb3sOp#8G0jQJ)0B4YNbg8Z(Z(kSf7wBumnf2V%5s_49($) z1E^^30pI~Fa`lcCIXp_S0xQ6_OK#*ug`z{9<@JH{Gr6c&sZ4|Te*R}o7F(pW;)UAw zfj&>O1HW{IG|DF)-kO+0lUc}A4b7@kxBzc!M51k%H(zg0yV`B5!bp}1bM+yynj98d zI)!3y{yZXz1ROyyaIg zDJdz1H(~%km|1IO3;bRQpk`_TJ}*()#8Bb(qo?7SOg#W%6YeM9B4KX&kZF^%SPe}y z!VV@}W|meduQhpaXcY;xu6>>5?z}M||FH_Muc)w2Pynj5pAx4Ir63*rDt1UgkB7ny z7+R)uH;TAKTbvl%`JNCMyR}5rR|5ag(!}T_x{zh!{;nxEZfoGu_=85;v5TZTzr|N4 zUh>S%3>v(D$$IFIf7Lm(r2y3qCn6!*|0V(a39AL|JlZe5)@AzD+VoIQ#oathO5>@H zLP+#dsfMfUSGPwMk5;Qq+d*ipDz${8vl`}-(^thXjo?)?=Czv#0FtxmPztunY{{1` z_7|miQZ&Yt914<~yV~TLR%6CdYKBWUbMlUil8)+6n0I-Gu_;B}x~uZoR5ROju0*x5 zcE*fmT5lK|YSS&SUgR`Ax9JW&%)K$HQNuGIOo5a8LL+wXEJ_9L-eMVz1P;)-5p%&U zjL5DsxX<*Xxf8+DQwj?XVbXi4XG|mRdn2)yD8L=Ei5Q0A zqC`r#e!8-7_jCgUN?+4Jm|HZA?vN3AYfOGHt;&xqtkV#`%dXoLu1L~gvBUSiM%{FR zTD`-#C)e>eV$^3p=xlcbxVJgotl%=;BW9wThui@lP4@PVNFKxC-r3+3eRHalRqeB6 zk;96Of$?G53iCrnkd!M%6g3JLwHzF{US{YE)t_oKIZf5jn3F<7Y`o%Kf!Q9{LU#(%iQ@{X}`L zuVbsDu}Lk%2j8lx8}qTQl-bF7s0etQozZchH9Gs->*vLyj<~6Xo9)`+p_HHz(PO<5 zoH?kv`d6i-HjP^_<*3he6Q4+7%A5xN-;Kooo@oixtg;Xu|zx;3FC55lpBgmKR z1ct8SYLPaE+$&tJ#zK=@E~mpkQ@1qAWjwUlVmfO?H${;Ns|${z>ms6qc=zsdIp#HR zJgLGL+)%TZUS>oVh~JZ4v#uYU{V?Su)jS$)pl?e{tWSobEH39Em3F1K z?ssX!7Me^?h0Zjr2yK?_TZXvVNgD9rcZPd7<<N!hmzc*Pe$pja+av;N+HFbZ&LX z`#W8Cvatw56JTbSZOl{3UfoN7D7l-wQpbrV9aHgj>9gAS=;qJxxR%_kU-Nnm4<-`~B56Q_q*acn}Le5rORwTcO#O_DCW52)LURcuoSX4aQGaW0V2oO6;1 zyCn1uI&M^4{C5{$(C;TlwrWyC*CO9}RyfLNH>30{ab22a5Gf=_hVDY@CGQU>gzaBO z8d!ufzhLefE07Ub=U*;KOO578i!9vWml>Jug>)n|ZR+GxjFB?k=aG&=P1|gv!`v} z{9h&2zYUf$34o-7(w@}(-Q4)UP-~2|$M9(Eb%o{V;Z6zKX+x~!jQ9h0v$lT=x6XM4 zpUHDVJZ(abV&k3{gg+ZI7?Dl{C!G^tsZB2Q@)}cN1#?=d?;;%Yur;U@JyvJQX;}Ha zuqU>eUW|11$HcFEwE41^9n+-k<+LWY_O@7mTkQmJCBVQA)`P#aaLqVkUGFmR?QIkWY*=ixQFEqG^yh| zwrb{!r0UYn7yt^j0_Nuoz~nYPk#B1ST_PpbGWY|7 zYm-hkmI0tTU*9$)qVo2ror{#veK83wm($p%1~n8y(*Dp=;9uH2kaSX8tWCKU==Axe zRJL(rUNVC*lu!a`nUIra%>4;tLlt6?!@SvZrG`!c6-fv&V{}WoY;1Rjv(1qoPQ^o@ z#S^*}{BqhWhP#$G`??sxU`XHM^DIeX{F!8HeMpJi1Xhm6`S_Qs>zi}8uL)q`aw3r( z=#(5XYn5o-1I_?GiOj%-)W_aBg-y~f?S=R(*N%At;gaA3$M9%u2hHQ?5qSXI?PfnK z;%3lRerl(FVlG0y=<~_EZ!I~iVgS;18711JMk9F(=kS%_ztuD~!&R4EK#rs8L}g~H zYE4^e;PnCMxcYiMraSkhV-{@vu6Szm9^C)w+zXJ_3UZ>?q`7b!>0bu5ubryknQW|L z3kX0L1Xb8K4&(B5Sdtxx{@i1faPL7B|6glqIANOkpOFPV67o#szPJQX04kbR#~%Dx zA02-nRZ(Ry7BE+eMVy|9(`33d{+`-1!hXLNlCMrpRg+fdi`H5?eIC~zg^V=G_5*=! zv0ZoxB2e}>w!Dy%eiw`rylQf@{JCh5ZNWQ?2o~07`<<2mvCt%2f8-?Fnke|lH8!?@ zlI+(qq9p$#?`S^8QS84w`|r-9Jc#}|VoRQjtSmk8BXL9lKj zi{jNv9di;6s_d>kzneC>(6Ec4PsXgmuSRlQ#BdnU7z}fhhw;hD>JZg(NfY(Nk0cif zOx9!O1kP3h$TbF*F(dy+HuL`@*-TqmcbDysXUcqSbXa|@Myu+g8+Sjg3eXU{ijJ$& zuPH)}vbA@_gr6L!P5+62p?(JtFjU7f59Ml`t3Yq2ODIuwzO<_vyP9PhRgJk_w#b^x zLWufRaR*H3Wu|KP?5+R&Ufn{(mIu>=9Us4WjW>SbG+t?gS&gguPV_Ay_VFo@m zE@K>zJwjACIo*!Pk3NOhXHcMLC>u+&zYuD1$GcaN$)8=AZlr*fMPMpmpZ$Lz<6 za1`TF6+}u*1juifyF2pe1x=C0sg*yF8uK7}Cmm8fc~q>RzWldh#pL>bPqDHOd8Hik zf#Kt~-e>?b{_(xeVGO7!kNpejG&)%q!H^TtJ%Q7UBjYH*+~FQ73fKWO=BxADjomp7 z?B=NUfrm-U$8aX2JbZ;X(QJ-36sSK46?ncqc zYpjBXSD%bA*jkWd+zZNvxhRj4LoWg^3}uQ0s$P9F_W*kRq_SFUhYI@|QQvQJTV#nv zy+~+m@XKL*ZC7fO8Xq)^r8_66HK)avHus%YP@#tmm{g;xn3$Z*;^GFLtVTsYt z*h7+y05L3`G}>$sK(@&&!|D}ej4~LX7LGGLp3lyeB95Le=n|c&xQ2NDgRq@~cYhRa`R7Rcx_I5O2Z@=c-jZH61GX#C~S{xh^gpx!V3O)AaN$;5i zhd|&tUeKYUij-Fw!VSv5kMx?_Y^XS%C1fhWuk8Yj6Q4RHl z#zAv5vW6Ab-GXJw2i{L6sw(f_eqsWFqv*X!s8w5q(wna_s-Lnvdp=6MVbdHUjt-zi;hB7ip!yFdEjbF{o6oM6MCKS)l8RP15{ss>T_L4=_~ z6`6zEf^RxDWJvs3()3x7dM?mW=V3q`+0dJ-WpH%w31ymseDWeTS@1mYPnvy7vX&Gi z&ny`8-!z;BuSD^cZX4*31-%qYuAeCqA(z@WV;Jf zbXsFHkobWv1LvWgj9?)oBaJ$r4|<&-atYha&hP?tDW@0!7p`qFL3TGQQL{GG*?RA( zLtik{SlCxgD4P#ti;@3)%Y1Sc4+CCLERy}H4hZ%e(RRRaP>{`+>A$9^{6{rU(iTXy zs4j0SE`OV9W&TSxQ1!aDBQ|y*2^j!E0g(n14xrPx(j=`CS2*AGn0$H5_cGh^5GbaX zc+X`}F6VtGCuM9DkN+76q5yP;Y?JpV9Ug-z-+HVj&Ow$*@%~&-fV)diZV{-glidWy zGL4NLcGM@2Rq_Luhr^vji2&xFDwvElwKY;NhYo1*KTmL~j|~wAmAg9mF19NeWX=4o zFS$9n8%+vIU)~p&4rFsKrvMn}FDrEN0m2$OH>VK>5pZ)75}|K?wZs%TfXK_WU1|F} zz~hPb<8429EwTnS>a*%wy_Arxpv}AbI*I*@gK8?6=H+X%StHbgT0Gps zx0+E~`a0n!B;l%`u15jJ3qP+(gs^M?umEEwWZu^fum^>P#GjPj2N7_5d>!(c_kT%7 z5`O+pFfxp_Whd(KDM|sI(hmC7Qky{=hq~wauaDjT5w|o^iaG!FEg&7Q<9uv%#B?cG z480Ioe)hCX0MVGHpO-0xkcP{1yD-m2G=Qs?JX3>~?FCCTe{qtTFs2y5KsIWq0mwv%)F-P@zu0i-C`Cs z3doBBLH@T9@n8U3yKgz&K-XNVcxproN8p>54r>80U=Eopi?y-spWnZ zqw@s;-1B?G{9_^+0N?=mV+}NM0gHAR7J>h3IozM)aXnFECV#)4z_6k-R$<{i1A<5H z{8J1nIowBTdDbI?{ePyGi@F{CVP_J+-BY6-q#u>c^WAZL*k8k+2(WT z3G34o98I;Jf%+&EL5H0%;Ht-aGOW}&B7@o~iCVWEaYPn;b^RLsd)fyfi)d;&tP9+# zZc9L)0RHxX$cO&{@r=zb-NlxwIQ6fh%pQjQX@99g)7i=;wP3zm(qEUeUOcId@fF1e zu%q!GgYCavw_BMujIE_B(XQRb>D~aiEE3x3rR|grXo*EhQ zlb*pXN%8s4Y)^s-fD4Pl_$%PxQqmwyy62{J@;QMIJF|&fA--!7-{DO2244D#AdlEd zqJ;yByYa}UHcR}?_$|q_l73N+L4Ph*sUn5yP)iV@CKjG(0qe95fV z#_aUMRF19G4>NBH6^*PYjL0rOJ?#8s{XyqS{aNheqFJnv51#iTS1|QQ#LlpEoV3ZC zm7i1fZZ2+4aOWR~qCcD>cB(Dy{%|E)ZhETY^bO$Oip(m5uTWC}b(8&Dray0{28f5X zLxC25IR-_;qZJyyObhLUGoD~2EaPc3B3hHkk9ON>t-<@wIrDTTUwHT z(13j{Up|^g!dtZHe-`Q<)BI)8$1haHKQNTo2~^G2Bb`d8*1B{9YiTu#_+;2zE$4%=1QsGr7D-vx_(OFVb1+*R}kYJoAcVtp@{x>^{4Z&`b5ix<QecmIz_&D_ zN0Bc&Tp5<M|qbQ-XkwF`~p`!Bl3d$2?PeHlKLFqxH8XzMP%t( z-fO^h;FGs4Pvwji8bnY)zC(u=okVm%SIAcQ!~WlOQ!X*kcfG4b{*_?(j$+z#1H#t& zxd-q=U+RqowubGJ_Nygd-`dopmD?Ny_Z@UxWiksvu>RChz5lvmvI1HY`iMw`@JF=^C_ zRA^-u&OkTgP|$-4&rS#%?)99<^-<4>+16Mgy+;je>4=iPJz|H&es{(J?r5b}8pVd; z^^l4>==DWq_RH?o!F!lCJw5ikW7CgLk0_8}9Bw42_OY3+v}%Q08ZtH%Dx7{T~| zG4|GRRdw6`uu_63T}nweh%{2tjdX)_Nq09$mvl;ZcbBwuBVC)W4I4J~E}lB~+;e{S z^ZvOOo3+-QbByokF<|bKEl|h71=}t=k3+2R%Lll^CzK-6C)$^t=)PXPeL2uuF zJHCTKv3p>ZALI#)jrk}d z&0~O=u6YD6iE+t{Hm>m&eh`Ejo^{u`d(GQy(R!)Z@@v6C-{8xBIlXJUu2Pn$ElV0M zqF3YkLkbA={&CoSqBjpUsQmTcPx$ZpDpfFAYice?FCi6-L7xA)%y z?`Hs?mTNHm_$mjg*UWVr_u4C8o$O?j8{5d^MDZmZ+Vd9I^E1yN+D+ANAy1~&aiR8t z7sqRQIy;_`&RVCR8k#6DK_PjR}o(vVY;5 z?JpVtU1Rb78n20n3CNfpI5N7wx~=(&KWQwvnSv;P2bKQK;q(sczm=+pm+hvdtGR7` zp!E2R<9K0I36x=%%RFr|+efCgn#{TVZ|4~5D=ML-Fl}YbT$=LX$x}Iu)nyMC5T7+w0&$DqAj5FB6VdSM zl)n~QC`UEE%#5z;Mqf^WsU{D8dxzZn^<%edm- z^nyu$@x``0nW_dRoe2>j11Oh4+8n?}@jl8Dw*e>w-=`+=jSf1Bp4WUTDGdO|Acls$<>g{M%Kj9@Diu}r;oRzh+lhaOe zTYB4XmJ;S051`=Dpq)ln0H|Zz@s8^M5R@G1_q~Q2)CZK<9Gs z81v2rEw#WVW`R|Wa}TK^xAdWsa=iQDopu19t1pHtLsbvNeCv!#%=nErUr3G2Tau@j zqrpfd1B$OilXI?>pa3G`^FO}{4d$N<0YOZ0Q{RW~HnF6dF=DU55e?DrMEUhcht7C+?VJ%>cMY6ik#`Pm~H)mOaX;Pf#; z4Tt6Cr!7jO&!mr4(%+O;ppDQp23}wS7Wt^&q~n zGwS+Qp`~0O9Hpy&9-G7Iw4WbJt^3}YUCD7I%qyLigpBvdX9~*u>DnNVgK+mbAr=0) zTJXKoeD8AM3_GQ#-F(Ft@WnMovtXOby1q_E<xeRU&Xg z^Jjbblqi~5E!Zm{G5-ziGN=*-B=e zB{}%p+**noQUEC`%AUMnc2`hxqdP}v)bRx;kBD`gM7=7xKsljPtL@30T~*CUA>@Md zO!+=BcgtIhSx^dmEFsQG*SeJRjEPT9iN)Tn2CEfLWj9uo;=G)k@p{$$dYI}!G$q@i zCH;7w9rHAU9=*62w(rB`k~3cW46>@3N4d~Vi!x0)%GvoZ&5=cC1fpH5b)ID*Ln`KW z>5RkcCNFiY!5=N`=8iO6#@HE(z21)`WicVyj~092vd^p!1|KD-yi(og=2Y<8&b@dO zj#LkVI2;6#EH*6+rI)Bq_(Ifg@8ubFQA!Ww<(4(^u!*jMPBiTTJnI1z<3rkFDe}~r z^HW|Pp7iWT1`JR(N?-9<{|mK2D6=bc%LxaS%X6v`C|bBClFvfT?=NA)?|9=( z#N)5seps2uMDl#@c+(@vr^EI$rs2!m57o<%mk*F#9g6qWc$q;&KkW8Dc={G3>!ZNi zefiZXDlQXcr&&2neBqp~(rv)l+%@=yau&CCC{&%-LU@UC;SZ2RgCQpNori-?ltda# zWIR(dmJL1^DLo5qA4vaUOBvAKD0j5sw-X;)D9W@<{7pB3Sb0-?L6A!?hMB=-V>aSe zZstmcm1toqQBRhlUA?$H?INHeD;0H``(dh7U(W?$Ia2>_&`+J$@NU9C7XEJ#G=BFE zrtM9ZWXwNk_=K4MTnNNcL`TE$O4-Wne(fj2I3N($r%U2xVT_?3Q}FN_9+|%t!W`ju zz_CY7`>Bm_`gH(yJm6Yzp{9`f(=B^GTcgyJUMo?qPcb$krIH(De zPL9l@Bd7BvU-iUEBBSh$?FSHJ`Lb8QE37Ye!-r0ihpOi7;B(*-)#@d!(j|4VM@4jGKmG6L$viwX$C=ot4 z+2MuDJijc}<~R}un=G-$D-&xrP?HEc+4ngqUJ#=hCvLWTp{+`&GYuOM8}& z=Z-9ZbP?hLkOCx9^WWsmI#NRToY}N8!WQrQ2@_BFMQQM**ti&z3~dw2lEr!pPk^2+ z3A^frE7&fuj^ZyLbJCIFNu@&yuol{Wwx%PJqVwQOtgwBxT$93lRUX7g@`)eY$!q!B zF|h;YQ6nGFOFb-jOn3e6wBErxTA?I6_Eueic;;K50|qolG76NU?soUK&P-%(@qHS3 zC!N9KF}pWztAvm4?;-^A#eGUSVaHcJj`ZUlKb$TMUq7h+rp33KYLj~puQU?JKz8LZ zg|}A}k@g$5JO^@K=}w%&8lAAXB>r1W^W<_Z5cd)%RG=*V-t3 z(9F~3U%BJpc?tP*e_Q=%e7m>8b#o@~)zsX;1VL6jKelc&u;6tk`$amHd{+iX5y&2QLou+9 zQl^e`63=GvoDB=UM7cli`+&)%EbO>4?X^=9KM5t04p_%wqf+nlAfNl29RD;A?H*l1 zaI7eXs382B_rD~by$ilZV64OW14ktHi5h8Qic54dA!jG$j)}V&p z-iyk>MC#^M-y6tAUOR5IZ-(Sbz4W>`4DayuvI?;h%uzObc_Bf4CimwLP=io`3NMZ^ zxsFuDY1jU274l@Ed#eme$52i{`qo5|a$mywrR@ zLtAghZYNE>WwLfHgyx`m`K@c5VG@?%&6NO4%Bj!h?iCz6)AziIKzCiowHE5(8jZR#zru^1LkyWz=CGO~U zwU3vPq5FN6i!X&o!c7}O?KdZBZ|M2!{n3@;tJ^khHqM@qy~(x_(tgrvY(;0pw7>u@8K5N@~+p2BnFDJ!^;%eD^@Hb&fis2D5Daqt*Jt3rEbO zF^GJcOSfQ|x+c0LX5f|AlA3OR5}aGcvvqDymP0%B&U^37B0G%DC`53`CoWeaAvoyP z!b!;NegFn&7_6CZ3?fV)S6Oy2eZ4jS9_6nPB!XtQT1(Se*0~v(J&=oJbcpKC3uU$j zw2Wc9Xg7xKEf?p|h&*lslYhG3dH-R4W%4|!HKDMo8%QG3H#KsW(@p~g55cO!)Wc>Q zbJ5ji%tBg~A@A0H&Sr*uoB)SJ;}XSsX=Av=cSkydZoy z7vP8m2Xi=uvS)r3Pl1!h2K9StVwN4ek&8Cv_(JG0y3)01R<`!VA}?_&7-IXWzYitm ztycLF3`G>N&ZN3X%G?JVL$^ex#CHwK#)siw|ijlzir)z@hSq&p8M7Ow3Nt<$?O2-cA9 zXbmGhxpV5_i#_Vet5wUA90HLtx$;3QAgTxFZDZad-jT|mW8tVpIv#bh5OGKJT%(U^jF+NODJ_@=y*$(y)~pU8wc zh7{EBDlCh*AT+sB(F5L%4^E5+H5ZAJhxwQ>)0%WOjMpN4bog*%1b6D;YV+b@Pw8GWF>?K7ji(~CHkBKOQxhZ9UF7UX87Kl9kL9KnE6hTBv}!}%l!2^^QADhSvl z(CV8zMs_y}TWRDI-*wD)(q;YGIiIcrDLI_-3eH~@YK!VJGQC>e51eUF?8HQUyRfUe zl#L1EihUfzkJ3Y4ZV(9%vQ{d5v+}>Bcr+T!{cFU%5B|X&ByyBqOJUMJ{#4 z+>j6O_Sv){=awbdLI^1gBWB>@O|@F8XZ{*Mi9>U<7;W)uFcr7&N~m!xF#-NfSQ;Yu zEtf0yUq(Q5Bb;!SoYsmR>)$5Bzcbug4~Don(5l?O;QH$yw?Ak3^xGP1GBgb{YFKMFQy!Y2RUkK*6C;|Qe(xHm%Z|C+wtOColftN6z zN_Jdf2Q%DEpUwmqs$3JQeh+?g(G&;Sr)f>WnoZ?~s=`5Uw+58<8aYMJlUZVgo3b^- zU9%VQ;sl6eUYv}aeoRgOSN9~??TgDB45C}+-HT(yrn;i=`+ zYV0_6G}wr4#&Y{kn+8{MqzIfSP~%4GqN+^Xa5xu*DKydN3}t+sO(q@JoUlH9kuUsk zNd8@5ta2D~FD6nCNzO?fIBzFJO9ir&7kAHt8TkMgE-Z74wIwTcMO;?h=DFL-gL2#l z&}wmO{UUwE88*lnVWo5Y7gcA>5E*4;TKi)QC3J>vN0GF<(_eHV zc-#3nI7;vhhbAPt@6Cl-|r9{A`t< ze#|ey9LId3Wyg}P!6jXz*yuN-e^AUeHSF~VwK(YKmakfa%e1q z6f~j)|54|SQ6D0m0S1Wdd`+9**!Llrp}!J3ZFD!Y2=8clffu88?2H84Kq!_uMDac; zd(gY%hI%%m-4E$C-P%QU4Ar6OM_@e7RfcYh;xtrO|C@$&5=2p3mKMe{&^lrLXz4LJ z3Ff}@=0)Yzxa-J$IivZpTfjXGTGOzOf&}*BZ@4KlzAT99qLc5QdFODlsYO6h%J3zQ0v5*wK~SWW1BgnbRiaq0w6Oq< zTh*c-4kC-GF9@(ZzOEd~*N-tH z@u36qE(MYlzN0@!2SwNnqy|s|Xw5s8L3??`FP_PcEh}wq4d%?;tyszW+F?|g+jR{J zsF!pp<@~}`Jx(UYv*-ENUrhYh;zsb^1kkv7QAsdy(FtkUgGTO8w{$Gxb456}o?-+o zRAqy{^0>M2wn$!thGHLeO}S#nvSO^Cy-X!jnhTO`=fkSfC8vLH%aZVg3SR8?%FY>k}4r23&H#v$6+8>tu#=+Wu0EK09u6 zOFFMHmmaXX`a-1Um|YpMY{1=lnv^j~JZoUMA19*pUEQeA?+U&M=9~Z`6@^P`P zu3%^qJXAdx_PyC7&8sqPoHp7(-LOV&%&YY7L$c?1b6Gsx33SB?uG;tduFM_rXD6L$ zx>6|ux^-D>qBC^Ck#tvz!aKXy^~JnirN>!DQce|T=%9sL+T~kotjbZCD5SnPMXx1L z*xrH1`=VgF-^F{B;dkvf`~d>|CUM01<(GX~PS_LoESvUU&dr(bs+y%;Q-PT=j|t#x{3~KK1!QO`VBH_%XhbToO){q}cKzN2n12p-jwu9FO%-OjUp8 zTOWSADZ@s#<%4$>B9t?-?U4%++GPa%tG)~|?2bcfOC3YI7}qP=_S^cRt|ih7-wJB^ z^xGmn{M`J-q}f&lGDV{u-)f&(-tT@F-7a4grQ0f+05+TER)lwJhtP<{T;Hth_WI!T zLHJlnZ1*!1-?PDZdcZ-sUckK9OUz6e8E*i}d|R^JFy*u6&ObNkZ&>l$xazhn8hlwU zq!Kf|!8`{7jsdweMsG0Cd{?4_=&3k;1JFQC3fjiUy}mxFcnj?xxMvZGOTCx&gUwPs zCTjKdq)$``=ln9Wb}HEwn0vl*(zI?Wv!}5RFEC1a8+p*xA`{^ACJoM5lQF&Uaoh9`X9U?EIAzz~;n9QA@-h*XKKm~@-6g>n?!-qj?!>-F*XPvizMoq}CW~-lLWcUCcW#&rTBi2|(hi`;3B^wP%5bXiScIk(g6=MWFQVHzS*u02D{rIqoyx|s9xE)NnwtcPhv7o=1YXMa(yUqtXOIN z`Pwa8B*>xYi$T%m5BA>Eek|YizE^KKgfiEzqZqyz*+g1Q40`8^pRM8z%($Cs9alSK zhKhEa0GeD*VrH#Q$nkR_2TYp&A`2q!XHo4vw;%A-d$?mi9xUlkXGN(UjSc;)iLw$GJ(bE2(A5rzBbbps5RDL?m z!R2(g$`6$t!%i23@eF$p%zs-JE9AFpEB3Z9(Nr5x+!b?pPJ9t)Uuzlr`cL*hJwyK{ zk!5n$Hp_JI9Zz?PggA83@TVh`SWdHzag)f?HgzTk;Vsu*du44#>uWyTlP+#e1SM+o z2z>C*`{{VJdooEGY|zX#i@86H8SBGg9JQ1he8A|=7C1TIua|CrmW+cq_ z%2_71Euw)i1D5e`60LbUGd5MD{T?(~^J$8|59Vm#rE8V5@t@iuYIt-YoUF77l{8bx zSKFXm!>|0z_uLhCkclX>UnH?k!ot_}eb{NEGM#j$$cR?@s!C2({Bc_>^gGR(Hwj+9 z?6UCQhRU^xhkcnjWrw*XYv2o9Xgkb(Z(v4J$1E;vv(l$AmB?Zp*aEjN_Zjb`?|!gr zOw*RnI=-n_mw{2^Y(^^P+EF~vzrARO;Z1~eT#8lR9I|2k%78DE!tK`lAo6(G@(mv~ zb6tu`GrWlSVLHSsHA?T--tfSuxze30_~>*UPjT*L_jR0hXPUsL_zDK-29L^mRjkFx zJsxgo+qP+t;iqcW0L{ou2w~&iEO>&uP)ZT;56xYF-y2bU{2C8~%=37+Hy~l*pqmLbr z85bj#V5l@LN(m;_w!@C1Xka|~F-GPl$vIlIfPnB^M5^Tww#w#iLrD~MQ;XZg?}2m% zGNgm9(-vD@!i1mK_soc3p5(}39q;p(q3T9IC1XK17d@FZPnz}M@LXj>mGCcpcJuSj zJhKAmF_b)?CWH@{hEa-1by0obzU}z6@@)>+;J z!m@X!*s3i&1D{xuW(nH_ZnP&#^{?s6M2MazHLf5tnW1eNW=0v~DH`hirE{F_p#k9H zeRsze#02R@SuI&h9@Nm6cxa)FG}UFeL)ZJknwpTQ)ja{a341r@w_UZ55y5pk`ujbQ zt8~7US2>#U{I*}%%YmBMV1kIqyEURevumBDq0VifVN0G zM1!9orek|rSsKC_Ved@5k3ZV+bFOB%fb^FIx{2$iZZIsfUJJ2~0+zNcL2?G!bY|QA zGtUGN{%MvV~gdQkH?rm@Wn?~Z4ia{4Qzx4G1VjsB7xBj(* zf}YZW%Zu`pq^97?lV0Uk*IQZLAWJVPkA+g!MQ`H@U6Xx3pnXKQ|1kYFJMQcPi{6+Y zN(>6RavK#BUuaW_=cyuxiGwp;$<-Uq2spQ>chaQ3?XF1z6(Se9(wKN@!#fd5PD*q@ z{6R`qU1}nJYRzTUW_1@E7`xk#D%Cnl=bAArlm3CC-HEX%_gcvV{iRd|%?Ys>EOS4L zz~Jd3^xV~6-=^$CVGK42Fd85?aMuo9!XH@HZ+20J8G@aTg&@#`3jGCn1HQX9X48`^L zfXPBxan&7g%xJoe@$iw)x)glWv0t;n`|tlSP%P!D7LHn{s#E zE3@((F5wvE^Epq2R5~pMlr{g`@h`%dP|6OQ4W#ENZHa|Q90d=c+*vaiUa?$AQWh}b zjk?$qKh#|JhZ-Y2@o(`zvFyG~Y;4?}Dz&9VEYSOT7}ZWE0Z?BbMcTkMQ8{c)5K?kk^JA{|kMW&~!w1E++$Hw1VHIxYrUj z)Y#gE&|sq7B7Ot(nZ%Zf>dKVb&Im% zP+m3i1S{AivRTI=1Ydwm%+$He7? z(o^rUo)`q!%txD8HoMQ;HEa=ix}q^NS7#(}#y(9fxC-`2hYGCn(8Jsx!aAG?#XX~H zD14Ri`%6+9WqHp*!{hpNRswY9L=p9P5qkxf*>Q}9%}kIkAU&~};#?n5WMxb!Q&ILe zt_QEtC%)q8;KiDV>{yrwVQuy6O_#LJ13iYOYYZD>#m8zMs3dun!EsTv)%Xyaj z@hmKl=NSULX#$Z{G&4oQZpG9{2de{YPgL)Om5%jc<-VX8QoE? z{Z?$xHw{$-ctc)aNp@)RZ`mPTeAjM0355Cxv+SRog z>l>^j&|t~jT48dZRA9a(D5i6$Kkeu zGI@9x!?>;_G+*yLrTv-uH8mmtIZ?Oulqu7dKN&&~GXg2UZqw1x%xtvsmUwYtqw%2U1b*$-~Pe!=BPq{XIjAUps;JNf1aX+Z9ZuHz9H3%4IYa@r} z`<1r7xjEW#|3ZFTm2WIm{@pw)RuxZGEDcPX3ND+~T#V9q&a*iHj6e~;z4Xf--= zyJ`qi7zcPO_FXf?bqK2f2W`SqlmlLdNSfQ1e-O=+ zHGLun=)DjVO80*|>3>}iGx-8>fCmd68LK2@5lKgTdPBR1+~}B}4ct==)Lb^Z&JmsH zhOgsxf##`zlU2*o^3-`wBPiAIhK%%-561Jrt(m%!SpGu4bl>yk6`S6Vz0A#Gti5z9 zH0k?WJhiMxU>dquRup>f*Km#}2gas0l0OV*=v@U}Bz|d#Yn4V3kLht~m<_T0Xl~0T zP?wpz6biEFGjKavb&-|}*4E_+XU!MuDEP|!;(27+s&yIF&_tXU!YqUT5PaDt@8G+7 zH=xfEKFI=+Dv(>&AD?z;f&oyuOgDM*pETY%0!U>JmM=6qe6`#N zM{A8u7*Tkp!Ns-+JRZPI@%JkaZ}(UNoz+J%c|+#7X2PoP34v0+~Rya zv%-;i{om`Sa+u=`N5Er+@Hr0khMhk8rlc974$DJ3blu?&fCJL?Msn}ZrF?n&+#VS`i~`qnV)wq?S^=kN&o(diA7=N> zl>nBWyTGJC_qPZ0-?yNX=g9%|%f2^Pd4I|IQSXqQn1@on-La{=P-|X8*=D7y2C?}T zP5GsIPJG;rVMVUU=2WT7SD5>DOx1NS5b5GH6B5){O}{l0&s2@=kb$&gdYSuSz&f?Kr$tPTNp5ZuYm6faQPij2hRTd&EHPg ze|$>4}V?6o20DAJi z^Gs6G1ei|YC|NF72e?5>Nb~EDG#rmDfUX_~K7KA{`U_3p`91~61>WW1{rAmX%K_eX zFE$~gFzE8+{vtZ%tJVhm+s%ZOF!#YH_`uhD5R1Pak@{n)GNXB60-0&<{OF5BZ{LPK z>hQ`1kPVXjeU`#L7kl?L!=SG->)VN9W^8Yu430M512)Z5~-PsiT^1 zhYO{6ES@jc9!v4tdx*weCF(32jRJ|ter>at3r%JW>MW|=ml|~O7)COB28LfM?O16Z zNWhkO-15n5i~`kLCB7kc^jzQm(wKcgh`EfP1uZ4hxg8zU4cq>uNQWaDj;5)@K@F!@ zCzbx|W)8;tP>w%}eOOK%#~;M@uf4^jjPA=%GG9*B+#Sfo^?krRHqRaH@k|#a$R(d2 zEV{rgD3|b^!x%aCzmyGiNRH;#v~s-TnDggTIc@ck(emv7J64?XM7F z0IS5s`4+GN&Ri}B60Lh65r|;ocGO&i9g06-k8*bsIIGdNOhijuUq1AA!+){IJC!F{ z5GwxRUvlp6^ZU=$X5u~JMiol@j_{LsB`423e2ETZ>iv8O8dgaeOEu5TD5u>~T!*(cCgb?&b^9^acMG7-sV*3xJrDkB5V zv~T?%g%YW`=V*z3rnVjwkdCi<;yEpkU7cw|Fcq_itA61F-r=6cd~9hxG3h~ZV(fSY zVfzmyW0Qa!^SXIyIY8*Zm}Zli_wBOf67;5=-z8K_{lh!!dTH_a$wmcO4gw*S=5`dz z&x{-8nZ384QX}aeZLUow8lr9u+E%gCT3vN7CEN?L3zf!)z8>NFu6#9(^Xvi$;#RS< z^%;D~d_ejiih53~BGMGh0t;-)=#h5+tL~ zHx$LC9g8k^*|iW|2`tap!sfq7-0oBJ*+#t0lJi)iUu;FHBguzSPBZ5cJi(xV;1j+4 z5SQfo52E_l3I2Vyo&ACU12tYw_n1_CwmOxpy9hKRL{A^VZJ3N3)$u;&^G8bccwcX6 zxlVh(Ao;LasAU*y?Vk1V^Dj(yW%Zs8zz$*K5d$e>>Vkw?bOMOh0X(*{%nP zo*V0%^+AGY2ei4Kpu++89IX{+8(et$tHgrRFd4DF~pM|D+1O z`QZ6$cbNXN$){_n?R~C_I=Eb`JGcGjpqDFrej9d9u&J`Kj836II%#Z@@}&UuGKdVy zotrmSsGJYp&-9O=Xs6bMyGy~_Q$z#ZH0wUJ7`>cpYSP|tSmyUSMsb2_iS(QYel9P( z5#A_-PGk9cRrk~6WUUHE?+}s;akjbeRm;V0;g`$!G z6tX?wiCkH>OZj+|26N2M77{1^rh$y=SO-Wpai)H0-*$$t3_efyPa(zRB9LICs1?w4 zyO!LJcDawW(FX;8f%NNmmtHo1lT{AOiLj8i z$msC$2Vy82b|B1FmkSl6gM+WZzHE{fElEe0010GHV&ZB+>%b-!Hq~ z5!9Jjir_S}A001!E0;Ne7{2bQ&eq^_I74}8I7cIj=FD9EfLV-H9fXZ^zI)8)M=YY& zA=PgZ^YFW}-W;;UIG)+~He3bS>4!=c51y19=|C#Oc|dR>XTM20BKYhF)otI?xg>!G z>RmA3i)8I~72J(P42VXtSJl*aK2J$!Epf$WEf6DmLx~O|-n?#*iJ_}Glkp?T=?xbE zhES8Zs^h>2zAJhH<|0V+^k1n9|K}Ntn*tJ$igiM+q!k1QwZ($7*Ea?))l+vWRS(ga{Wt|E9z681jNk8vuxV=A+m9XBUxaKleVBO|Jtc75|re`BHFGfNN$;w%1_m*%3_K6#&5G(y=rsPwE9M z>%vb57+^tRZKDSBPSy?;N?IZcCoLh!}F%yoMg8iX(aKPe^{1k0i0jr%aF62{KRTsT-sL?1FFMS$Dpr z{D;U_dJ_3cZi4%NRb>930)rgqDPLG-C9!Jq0>*8oD%f%Mm74bk28?wODbqayf3pk) zJ-?k!$n^c&cwa>lx*T)o2Lp~$YQe)T6y?inmT z&l=A59>>Wkb=_3`jo#yeW}{H-?;MkgXy5vU?K-$b&4TY$zZVTwh_F4~`w|(U#VIYG z6PK{6J|gq0^_kr(KB=7EyCn21c|yr|T&IdPcGST9ECoXT3XNCxyrZ2ENyLYofp+XL zH3Q+2(8?;rC=C2~fw8+3nU$BpcMDZj-ejIlLCC)g+ICJ9OM;r>#m;`?1f?ox_SY^` z+w-hGr4~-KxHB#WlkJ{RKjuhI*d%VWG#h=cs>E()oQDkyH)Ad;V^K+O{APevb8*pa7-@GtyJX@oow7L+<>z=)?w{kH&bzDiT3?l9G z7CWGwWM?>QCvgZ5Ts9}c9%N8-PV0T>OkEQ{i=tM`;|{jh%$8!~4nFFk=qsbD&|jFa z2d$e2r%+scN#5leS;qCD+bTJ|iKRN^8Ia4oJS(Fu=}B+5C6+PyDzxV@4OwNMF8%_B zqQ=)8O)GyNiX-%Yo5-?ge9U2s4O;f^_DFWuYkviQrPMC{J9+8lkdTbM+@J4w_B86$7e^(@l&+JEP^6lBaV3 zbsn=gb^Dd8V>UN`x%_d#N#uYJ$!rx~r8HXq{Qia8uf+(zS@rkDIW_Xj>;!G!>b`fV z3cXOOnc@Q4G;4Pk`|pl7MH2d11#~apzHH+YU>puNRV>4Ehv`C6kN8&|sa_;O`jiH=Jpq>XuQF2a2X=>cJZ;U%+@Q7y_+}+a z-BRJ}xKm7|mcR>EE0Y5EfDaD$f0mHO zh?E4mF>o2j+fCjD*Rs25^Z=nm)4P7B@A>!akaFwVsH-4;x~KY4>)@yQ(Sy^mmlfe= zw_D3p%oZK|L)*i?j=cLKw_sm1J8b)C`~e^_csEXfFK%(yG2+;{5XES(-4Kqjqp|A@ z_L!&h`*Ohmu19Y9ZZ1tR=<3hP&3atUu(miOBDb``EiR*}~9zU2-v%S||G4$i&I4-x$-l z>Qe>q{IA~?Pn-NCDPCf8xv~!Wt=@A&mpzpF`2a9Fsbdvng#o!GMoxNy`P~Ny%T5Kh ztT`X>)jI+;*baJ5DOKn^EyFd|m~WqT=ad2Il`5awxwiZ9(~-RboWX%nKcQCGVMV`G z_oT!&JDfHq!c=XtQYZWpl5Il}_G(xQw;BfEAVilF=7U7nQ$Ept2+txtD)N3f4i)3C zk#>};a3}|wt_n2A%ua?I$H(x=(1LaB)#5eD-Endy;uUzS1Bp9&IXdv0r$6m~rz7FZnPADmcU{%|HN~R3&s^$Px`v%kL#-70 zyi|w?(?iW=gzj&aY?C?Tv)R?Avc@p6J*)zJh~qCzq*Bl|KM-e3;P%hY?;(Hxqo`B> zg9NZ!G4>FPg8p%#0LH@q!^Dm@ee!+n9`ZyCYI;ZrHwdqlz1VLsEZ6EvQ?E2JwGUrf zi@0&WI}?MT$b3!vyBEOQLfu8q4T}jcshB9ZF3%nf(2`>7Ne}6 zWE8fjB);wLG9N8DSko%ssQMA-U?FvL4T>Pii{E=d_l4A4=+65l4*d`@$7fp$uviK#_gU!dyzmRHLQ+U3m0>MDA>D zL27F#;d#po4zWImW(jzCBrwn57n~3%{gU^dhG6`gGt{u9NPm^$na8WL;Gz0HQoFB9 zWC^&L5G0E6nw-fiNcw4qD=`g-zlA3OYW|v1zO}gQb?9rTYDn1K4Ptit_~dy0HnUX? zM>><4jiIPwO*!%s81G4YXAKe3N#Y`xuPE(;iOV&&4?reBvbXf9! zeAZvMI_?H$Y~GRBPdRh{xUVVF{0=t;(*oqAAyP80245MV_P#g2%XGg#J5V7GC*yj! z7^+X|$jqji>S!TfSuz~{Mv&}?Vfu;N+SnP0`hMZ^o@Ly>#laqLGN1P3H_ySvA>u&> zl5DL{nxRmtK0%;cOLFjF;tzq?p+p+-_E$|zZo2u;Z$6OsS?uX{gy9_?=dEz>4E}&I z$e=8s4uF<+l)$!^7!&6Sc!P!r9kd-kOEg?)Hg&Q|kNkg}y#-KQTemG72!voExQ7JS z;1EcV1osduXeYQ6+#v*q;O_3yxVyW%25+>HMjLsXbG}#i-v560o>zZWs(?_%-o5tP zbIm#C9Al)}{NXYT2Xhvt;xqjoFIq{*t$1g8Sll)8Qsr%dyis#P|9=ma$gWC=P||4$-nvYS!rJst1W9 zg`mV+5~z(nCj7TzlF@l-0qdw1I#9j-#g>&K-@6fCNsRU5`5~%;*0S&|^!>FimtZ8T zq1dedw~KDc8pB-dtQHA8hQjTu9F(i5>ZQ zj+BO)h{*vZ)N%)KjacP~QpJzHwr6=Yab@G-q`9?bLi+W{NXlI~>l7mU$y0e38#$0g zz$0X#D$~qDnLYl$F=fB}(Ne-ehpp|1q2Y_hX$!Z|?qrx9q*{^e_aa{Y+=n#ZzDAo6 zQ2PGJk0lpLEbEHGcly$HlWKn+x@JXvsz8~MrtKiuTF)Geu^TNq>W&@J4xiHFlbaxE zx~|((N@Z?~BEA6CVIB#EHVNfQT}3eiPmuJ!^nF?rN)21$lW%B^b=%lpf~A}Kt#d%? z`Bz7V6%klF=GNJS{m@9RGgFDMXM10t){^zww8{`E2V4G0?zH;D<<;WWl1y}&5T)a? zYJWuAFg|Q7pIG9wxlg;t&Nzvc+CFt>bWF4C6Ke7&+v=$dEa_?KT9Ro)5)GVTf2wU0 z?!HN^1ki(>FGEA4Fjp0oZ)}`x=@rwmdRucp?lLUrSHy#9Qmf~0tgwQdJSmbIXT%c9 z=95|v#+Ji*D6if42QLeh3bfzh2uQf4ZR93>Bv#Vq@=7)Eh&{)|9eil5&d*fEPpk7Y z{F&1pXl2hEqXx;%;yKykw}N~jY!agLOkCcs(XYKpBY?xXs2v(eYdP)CtYo5yCVZzO z&Mfoh>jcy&K3OzKeVtOh><*mmRR*oRmHI11kA6(iqg_PyDF4S`{U2eR`Wb=}GSGA) zwW}qe8Sg7k$0?`yVt4V0Iigtdk*h?2cpse(iX8XHG$8_~4V4dpEldq*~ z3NQeQmsi(i`5|K#mzd5@cK-_V2$reND~lq$9J-3UHua#)RpWJncZ8MgLLh&c7P97* z*ldr^)lSFc20uaciwuHkGlXdc82}d8HV4_=u{kso5*%O(tLcUYCqGmnm#w{^OXF_mnKJuircIeTET>>+}#n-B+Db6NGL4)XUPGq<)$r zkrFgEY*FU|{C4N;dUK z$|GBtQ!gT)irs@OtjimLDeT)$!v0mYy+KtG-dmYVohAwZl2VFv$>Km(Rv$LB>}234 zynI+9BSW|shbYzk27zl%>W6v_S$zigtDPurrmA`sLIw&BjR40?nJCKNBi|X*!jzII z|FA+dl(vHD$Ib>Gp^YJ0*5BqIqo@k^GDV7pzI$p!c8zk}pRARHbIO+SE{zOwN|k{; z4n8oB;W8L{VBg#M1zZoE+yf)LOjvz!3545&ZYF;QKmU=dZS8bR(BTJd|IO@8ABlKA`o9?z~Uix5kF3*HKKj6K@6W-2_ohdJ0)l4B!m}R*NJO>2f*q@q49$;h^!T zuP&adB*eyuIALZMJysvvs^Reu7TQzHC9c0vWnljS!TeYq-bCx7(gT=6gxaP{B=+uS z*9>l_p$>hVcw5V{sq)|LEcjf>1s(*Rheo%QiP*CgDI(pdS*>JT26-PpZwM66In`4& zGit^VG;GA$CV!zk4ndi9oC?!Uq+S^)f0^6hn|8^(UHN~xz!}I=SIlRwn8w(L)w>KK zw)RF=D)76NQWCQM6HZxn3&+L~$ zxTds}r=F*|yS>(R{%t6eba6Y!4&)&nkv$rgeR7-2U^8)00{J3qKaOS? z9{&kL5%b@{Q%p~D<$9^-03Szs;tB&FeXAq&v%KuV0iVS$RN-jEz0F?jQ9R+}zo!AN z9QVxnRbL(j3`JsLRi>gjR<{rwEAcm%Ov)F)A*FKYvS0nL7{$MONxIm;EG)%*10{dG zXy`j~La)>;T?&%%iTps$C52xPA*R}>`+t-nlWQ883!xVu_o;m-#uZmj4=W5sLA1&^gElHQ zD(ehE3zFzx#t4J#yEHz~M2taakDm`MoJ6jh>7v78H{HmH*9rRH@VVXd@DDAQY~EPh zkuWLG(zodiqmrsrT|X779fNY3B(ZKE@MTc~9eL+aj8W_ZXXo?4;P}~`XL}1fomVV^ z@r|PtGOjZpxf3xmY3yGpjdjTYL!f|E^c(8rin)=UvfiJ$jG9_VE`a8@^6UPntOZkl z=`Ua?)7Yqhc*}2;YYsfVRH{DG+>G~qaVNB(-13n)^&cOeQ#ryjCA!y%bb80xipEdB zq@fcvEu)K=#`p@<^D~d;Wl1i2)|Q0Y;8m}%siZceDnM*qb3yv*^K2lkI5}WWJ*scf zcw8PRseDA-?RE^b!U!il0CX-kVRG$HSIy0+m+C8p2Dr?j*@sTLsINb7-Z!9%K%; z-E?(0c`N$PQWoW#^>QLD3HkM#+ZL}gg+*JE|3YZ4E$4ztM46V3XEQip)Lw%+Q4_1U zqG-vvShd3|A;>pU!%P_^O4!w*569Qq<}m5?c!HIuonVIC8NKoG*S8hZq}oMzy+0QW zU|Xjh$0a&n4_#3|oTNuIADM^%i$d1_uoKc-=1Xh&KTw(<^+CeK3pva;v7mf%~E!rwd+QPKV8`KtQPNx%xK zIsM|PS*&W2n8SX*P&l$^AS*|Knd7ljvi{B+CD(@c#+aBzer3 z0(2Wx+W`s#;ot48Z~24lIImb^-{gLMEgQuLMpV_$Ps3 zfIUw;53F}ps8=T^__0g*H63y^Darlj%_T6FGA&rT;U0;~grQAd-XIRGcVCi!r0Uk1 za3BnxH~^w}20!I}eoLpbenj*b*P1W0|5nFrVpm3eL0b+`9g}}J{itg)4#;YsVllPp zX>H!mlpfb2lKFD$7P;Y>3pr_K2x^KA)vEJmy8P|<{AGmLI>^QoR&u>Yd z7#YC75o~JA?fU-Rdedq?&rSYV<1JWku3&t+6SJMWR7Dr`hf&+9p<(p0mw60|(d3npr0|m} zMnaa1&;7x1?T8E=h9&vLk-^lvdB-y2jnu5|f~v~+aJ9QgYKzZgm;J&bJ&1F1^8EG5H^j zGz10?uu}sx>q=1yb~!6`WJ|l{oooPg2QoY!g1c2kUrah2QMxgS;@&bAov$_rWQ4dD zB;xi**s41Lhyn^+#%#SQZXHHtl&!X(Dued=k;VxO{V;)U3PK@X0kAz;>3*rB0*py$ zqZ4ULEHv!o3KU!09sf+8Q2|&U&m}i9CIy`ikcHOg+OMx-gnXWvt?abrEWV?-{fbH1 zl))Lp$6q`YPYoCx-#9!RbVo2bDyF`=o@oAJ9bGo?jMU}#;H}=rc!xhgMt{He)MaWW znWax|G4u=2UjokQz#`#?&Ro5(?t2y3ge5Nq@7{XRy@F-*J$Ub;KJhOQI4sa6p3KhW z!52C2;Gq}g{m5th%w923_A@YYgqhL=j13-R1A`9tR0muI75@)l(o0rEDY0~r&Fhy` zGh_-syzbGQpH%ml2YgAl;&)~m(P{+~Lc)?8y{*n`5E*DHj z!v35tmlZW!=r!5Vw)D=A@}&mbcuGq`id$B+@YOen?j*^OqTuIuow017TyAI48vZcJ z{ij=l=2W0u4&YdoyN3@;lwZK`WsScgq{U%|-4t)l>TrAMxQ>FC8BFh36-7%F&=Bvy zt_r%LJL>rCt-RIKS_}oxVfZ(9!L_gi84Ry`tFR}(Z!afbLc+8tLFN(uBovKGJ~nEL zOZ*@}VT~DF$x84jme;SXF(B1$qGn&@P01B%y32oUGo5SkOKt{(=yUJ8_4-&ki$Y~% z)rOxJ1m{Lpo#Z&c;mG3=6v79nf;c_WyIrY}Wca5=F&B^X+#yT^-Jkv&!@C6?P`;5W zr0X49_{8Uj{^6-T_P!k8yh&HwndAtS1&Hu7Sq}|ai~}Dex>=KWUGZ2D$@;yja_AC% z1Oe|BHP}j(qK7ZBuFoB&mogz;qmV!;7HyEAm-8I9oz7^$0gTUVePCmH97eU`ankKf zDEcO~!sCC0Sgf*roy1E7wf-XD(zU;6C@BK^49qBM0vyJT*JTElNZhdCV?ib;PN{)Gq;Y)@;a1{Qg>40eMk|m~V%;O6}n#)~VsB&m-nKD?w+KB*W$jMAIP`*4t{{ zCx6w|CCA2pw5~JnVrkXAQd6G#dksb~(nu}qE&epPj{jokUe^2YtYLK7dwUCAPb1H? zFd!evCG^WEc7}UzZOAU0lwlGdC&*y!DRA>OF`n2>^6+jpT$K9>)4pJuN3Lo42>M!yfT1?I=+V^&P|Y`TQ&YLKuHTwJAaRHu=-(FT&hAt6Fik^BcK6Bk0! z>K1a!$y4Yx4~H8v?m2gP*M4mxUE%87AB}~*%Za?s#D`S(?IN4>EUpM*Mg)mh*mH44 zXLT&@RE!Z0dUFOt;*9-jXe-qo+?h&!1lt3liS3L^F77%yfX;1ePvs_kVH3%cWY=go zZOj4`0n_CJ!%wRHQDaqN%NNMgH!ti&T>eau!_p-1V^IsE`K4)C05Ir2Uinpxp*jP7+D`tD3{HN&Wi4E! zssUJXEn(xRR&QJ>%`pJBiQB9m`j7sgKjzU;nGhQeEevJua*VG;_fY%AURlYM7(H2! zp@<4A$!+Z1v0;<{Z4z>Tr}D0_EAt9V2CG0$h>H<@QEUEpMR(2JX>)HO=1}saCfYA2 z?oD>o*n;rcFLa<5m$MhVeOr%;x8}?oBr#T3v>C`ZiMTv~PgU+|=sh zK}vXhIWN0zSML2N->fU=5;bpxwbiY>kPXy*^(^&70QlnS+PcFR<^E!SBig6_qB560 zTHpgkwNl`*bbwkxybM9{fsqZnrKIwXbSHr+t?BBiLSh@08o@bU+UT;E=d>&MJA>O- zUKrVm6AWi1g?6V*Lu}1+*nVX-8#61Lg=#^$Lo)_#*CP0CBz^V30-{yV=2zpQzGo$1 zf_W?Ix6GT5Ss{LGWQTBS)V`tw=-g8-*&5DZpM@Omo%}9-9wE!0qEL#?D8^lC(yeVB zp@Od)smloDee^BAkv67Uh8nTm5*S$Bh);}_+eVw$XR1=mKf;AHU7EcPzK`5x&021l z_C1{%O2hRN7W$kyQxtI23ts)Wm_S|pQF zMqDgpEd`gmD|Bv(-pN4PKWH?MTPa1lD{Ggc0$$5LxNSF(|5SJgSxWsJv!dbmzzU4C zqTWJB7IH$OBi56Ma8YUSc)&iATp=T#NNA>Sw+mhG^?YO30uv)CUm zY(q)M`-S2pPbhpNig;fUg3Nnp$wNFQ>Ewo|?e0SfQjizSM*^iCFiy}N`=ILs2kdclof1Ne-jpZOUl6~?TYoYPQ_P#9F&U<7R zT+RCsS3t&pBb7l0L*zAwm&eS#5~S6JRU=qm@T0!1%r4lS?s%94G!>H*o!hED%sf_v z6&tc-yN^|Gv>80uN;z0WQ+jW#hIx*j6sx`0Xcrd?F~jQ%|&q(ri#$PMpUKtppKT;zl5^C z2m)g)5G-gZu=~;i;ZAON7M2sjog=Vi`+gUzpeRhI&7c4}^ki~qD}O=K(yekonls}Jm(l*$`G<`20Bw2b5<~FXm1!a{UTt|9`K>xx75$Otk>8Pw zPDH)CxpI0dHKGm`vHwf#(V$>i?aKHff*34NU0@1bX{P>)dHk8;(`780F(G{3c1{J= zXnWfkz1rPA8m(|)?-=x*wjsfDd%^SGNMRw%Ed*H&(w}cEm@;jvKkv@Cg@350`l7Xq za0bhrr{z0nq$Ckf4_AKIEo^6d@1k?o2zW+DBnC)7jWGbK;Cz^veUji6>1m9}7Pw5Uf zi(?bsPG`x_Z6~Io47ld6&28PB6NX4{c=KnR+U+pxmmgl?vy?;cYC_cIwHiPpYwC+# zyWzH92OO|>RZ*e$J90Z^IJ55eMu|weQt4{HT$|Yk?NTfT2}KDaoS=Pt+_z#rx8-K~ z2eTAi;i~w@4hDmILi)Rcva2q5I>@%)9KbHy_aDOPce+tVrZ-&lolu(Zl9O$@C0NEI zPI%9GtIeRq#IvB8Of5kpW{4iY(Xg-R8FemVGvkqW_xfOftPYzQ5oo&&9h6&q#epf? z@_S&>>e})oY{YoJS?r{#kI^zRO)^h#o%kHT{w}Jp8-xRyw!T-h%)Dc!VDTzEU5T(! zA*sB6O=~Gsyw6s%OaA@dZRgP6#5F9-X5}D$v0OD?AUDsE`dz`!mDo*pQ)qSdpuRo_ zlFQZw-e4-RbWN-C!5e|iirNO97l&HJtE_Xu3}!FZ2YI%}Dp`PM9_;QEShnOyM`2XE;V?M`SUh~q!-^Iq+3;gEH|;>`K1Ol z;`c?o!-uLUI_syta^r^}rWa|t>3eG9u;x>v*(eNooks5`CRX8B?xx7`8>>wP!PTni zY6)f!Rfu_;H$`*G4B<~_4Kk-@l-7v`>NCCoe!FVt7&=?QChwm{h|_j)oeiP$o;)8d zNbg)%D!drp)j)~KS!EQX87sOw%q{6Xs+=u@pAl#Ts4aHnVw+uV)LKZer+YBKN)6H0 z&5}JgG+i_78F*8gCqbm#!Nkz1Pv^h*ItflD0C_alH*0?H0K+1|P~i%qK_p6C$l@rh zJ+$)URh|eYON=JJ#_$27kafYz)@&`Z;Zn#l8MSzek=1@krE*jLUW2k-3830h+NpR}AJbWWKnH7+_eeE;Fi z!4FclU`M)fC!pL)Vv3*>^Zc4mwZSwF+`tk=%i5Fs71xd-<{W0|OrEqNtmG(z>Vni+ zrG=q_IppR?R^*NFS-Bf3QPapMS5{fA$xP(}p^EFY2km2MEu#TWBMTF*If8!(w6DE< zUIWtE4@H+BZvp#AamV7{HTdWn<`Y7iVc)NGK_3YClxXMzQ{ShHsk^b;qS(mzXo)zf z!k5HWMp7BbYuwC&#tLf+xb)0kw`iApb zQZ(YAjIN5dT^>fM_E2rFcE&7SefP#8s&N8`3b%ewmpWBpImPRLuh*bF z$Nr8B(f!jUD>E@o8+|r}9BV1z$)a zrZrEn#KFWr-ZBUGKXKoe?j|j6g5b z0g~g7AStN$l!Hp!(lXt+XS+NXQwkRDdsTF`*#LZ-7gmC%g8r3-wqG;Hv0{ub0K2Ll zR=IY9Jac};7{_eUbm%c95LzYsp{wAfFbUR`(QfU^Z(^V*luXfE0-+M|f)Dd$S5$*~ z;LPnJ$h8XER@YE!#(ELStnSkth_wJ$akJ%_GoO(eFcqSDa2ccWa^aSFgE8v;w&%<1 zI=_8bFd+b*i}kh~8wmdXGZ?^jUyF3iMMiZvs7wG7-#o=LP54PDg?yx}d(HYjEFRge zx=8CSotsJ{HKSzNPy$>*w{6e;Dp%g5NsZkuvRxkg^J;3XI*5+0I{&v}_o*>f!8~Y% z{xA|>rqM^9hiy+Oj8Xb_O28{WlJ)&|lM2(p+q>J+fZkWENH@qSrdhhLjBHa4*2T`% zD>`ra?SnQo?nh}Owg&4BMS3TYtG-%|9Ipe+BG21~Gy323H>Q>B&fh+9VnYOE+dsV@ zUH(0s2mW@U$*st}S_IHuRvt8uFkj43V{fz2;M?Ebfq2$`$|fsHVahyJXAluHORL`f z`mWvWRNC{m`erb+QAH$)F?T;!(Xd-#2eO|94I>YcV|QUbS;MG5%ImqHkd3V3e^*g6 zddwFD1%%*c51BH}n%B2o?qRXd*U>N6BAV!6CmabT^R>e#odDXgxc3BRrTI#LBoXO` zRWuz$jd(YAG(FxiXoq#g|DC1gRfA8fkVODvx7N^>_+=LfpwpBBhj1GessC0pkasTP zC+XAASfl))u2kCmZ14`r(E~~DV0J<$$Tu`EXktZaKoxsO zfCec4U!7O~@c2k;M{n)wEn11?2*pWgN>anNW2E5s>-J+Ei#Vi0gGq0yjZez#4EWLD z2rPg3a!Ec)hryThMx2;{PLi(d3m9ok$N}vW+;m-J; zDVlC|LJ3IDYQE5`s!iTtVfrX%`8;VLREHYDFOBM(u-Vyme3eMSPg{3|?@K*T1WY>U zMdgf(c2d!w(ww&J2giJz`l=s2DlZ*g8|@HAXg;rGt6tcjBwj<9`Lr( zNM&-TMS<|Js}(-#lf-s>u_~B&S^Tzo*t|0q_ks`kri|B`IEh;K9LVGFtA~Z8E#SyQLp%Ue#;Z`sGOWH)Czu- z@(JP6f*&-cXASzJacw45Ww`5^Y9fUS@j!_WPJ47E0v&S&=tFP?;2kSLjyKc!T3EgMxJ2 zI3S@H@gKUtRr4WBH2z*&ga@iTDf>SuT!D%-azAE1bY)z zUMUM*X_1+XX`NBn3L)n}<+kE-{Fo`TD4FC4_zA!Uw2(vd(Zc*f;ni#MXF?Fo2=r~E#TP0 zZdue#Qq}t~Mo+B+zz&-bPu=hFpk`5At9eKD5`^fU%pH#Ww<%^50bp|4O3*nZ|KBZr z|Fc^r@V(EQmuiUa@syzJ-DGmQGN@hn^cIGt67~y|FB&OA<(#w*4;^T*asz9z%(7XE zatwe9+LL%cY^QuO7mL5z3*=s&g1lV(j&_Ins_*DDVlgQWc^mYY4(K-G1S7sX>OYx?|9_V1?;q#L5H#^1MpitOlxcpANi(;(n*z~o@n<=URS7B z$${$et?fQLh7O}1^r(<(d~slIF-sUM-v$&a8}F@zrr!SfCA6R_Hn7=WBFK73lf{At zquPd+lYqGEN2-f~KY3dd&t8BI3%Z7-cwJe>u$tn>UPg*$Dg;4kG(XfbS4)ZQLK}ae z5Wv%>8)H5QHHe8-vE1$9jx$f^)hMQi{l=(mWds%gEFIn}0XT@eM?2L;{bk|^Xd!$~ zS9m>E?Duax9si5BV4LCdxlCaz&L5G+A}xuHxS_jWvOLc!zyNLaL7+E)2L9u64Y#V` z^OG2~EvKB*_5DA`9_Hg@VNahs#yVfW;0K;F4iZ&&x3I} ze?R`O5SEOjq;c%O+s{^c(p1DB}cvyr9kBQ+p19*Sm+%S;eU$L#|?mC_(Ma; z)&499cwnRPO6j%B<+;Fx^hCS_PkMi{1b`mFq&Vol7C-XMtTPNZ9opXB?sO0FK%DAg z{MR#kZG+f9@eY!rUZEfO0r&FX?)3lipTanl$H!|T9{}&}D=2ZQkbAO^OHXO|&p_dd zsa;sqPT1B5Bk2jGA-%is7m+2Hb`@2*tLte(YjC#l9$IxYYMBHLH3bFMad9vFUMhsG z+p)_)i8iqHI35JF(XJ3s%H(z5AA*ajh`;|YaK}p$HG4o+61su1Cy8y1^6!19|Kk>g zaj3pMhmBIK_D(LwMSg|Ok>fo1!k6Un^cFhB{vqxmKJu*>9`aI-ZT~S$_uIr_cFF~F zj@ftOM13cl5Rb3AnBOb7h{Gq?^Gfe`uk`|_F=k{CPsCpvymACKhWO?ATVSK?3*4Wq zz|*5#t&L_koxuM1|GK=eGSUR0RiNE<;cMI2Vnu_Ezs+&~SDZ?c1L;vkB0T)%(#ku6 zQTj2u1Zx9{PpJCE7lL2-@H?`+pa`Q3WB!p+E~66|DZhKYT#v_$#3%NXQ!W7wstwUp zEYZGFp~EJ+i^bc`7wQr9?ZO|H*a#Kw+zx7~%=u77KfM-9rTlToO&!_1B#T&12h_F~ z2Ts}ez|o+I5UBq5Mp#dIf_&p7F@*MC-sOL19%}yk39<_*=}R8Ek;in$Hk(KDHL^<* zISCLSZp$Bcx?USW7wcmf9lW%En*uD?ha1!}VEIs=B&htoQlgK+w<;js>mT2%Kwy`r zbv=t-b`At?FfN>W9uufZ9gP{r=(iN7HZ((V>5QrNX-&W1Q~OfuknTpNMxdl930`&|o$1tAN+l(;Rp*_e5pLCy%#& zd+MGdlPCPZALfAwO97-1g$?ol&ZWj5^DGuh!~Y$b1b!>23#>m~If8+s&Z9jt4YicR zQ)HJ#at$#8PwRW3t^bH%pf)(oNAN#V3>3^W+mPxm4H>%b_8{V6v1l5BaXylhwW1EI*)w zpRRdnKo;4~r^KIx$YD40_JhX_hRA9Gj?v-G4fs$pj-TdPiwgV5KvizK4;}=i6)Epz((H@{J4%xu3m#gk%_;Wn(_@EyN9kHTY_0;Pvvs z6~52Tzg~(Qm6zY0{NtaCe_ky#LW(_z_^{%L=s{@pCs>2#&mQDuX|$0I0$IffmSgte zVYY(GsR35)>I?`-5YDkHRxcFOYgfxFgJUIV#=vvVXzrd1LS60CZ#0|BCi zW=8%g1`>mNwdMNDIsD>9SAr16ZHa`ASSvh*v^#%PbhmPi7Ogb&hNmJy4|iub`P#Hr z*>ZJ1Sdb2%v2>p=VzC*0R*Ghdq?%}2`uNuo@Uj-zC%r5RvEu)9B7B2CX6`W`uHr7g zyG=E537V+)81i!s?$Ec~xuV>(sv`)HIQK|3dVuhMj-OrXevM;hS&khp$2*+3Qx*zH6r57-NLu;QOM4Rf&rhD#T{>}w^#eFqErLorq4yZL~)`}K~ z)?TFh6!tD$e2rpd^t!*e>!rV3lhqzN5o+*F8v7iOQ_KoJ?u6H7NH^?Bh%mk-a8J>)+MxW`U+}QBbx52=v57|VPD0fon>ZZF*jc;( z&^jShyD@}dvf28fU&*$dLX9bO)GSuGHK*~|s*U)};d~ROW@VbY#zVUvyn;jOwY`vGy zSj`Uk5lQTKt<9l+usG&er0cy}kAw@!1M>%9b2@zgJUalng1nBL7BNliq52{bk!zWqdzK;?!W+9T1DJJN!wDj-M19Q(Qn6j7{#h8cqXmzQS5!6cusqy_*fqF{$_yKA;i zwYu1_QyYnfXoeF>Rfgvgs86fuz@nkQciA$WX`)9Deb8&68Q*w%KeO>1Gjjj~bjv9D zcZ-brvQcQHF5KcS>NM>uILI!pr0j4LR%91AF;k;?0U+EDUpOL~$&pNG|J(ahg* zBp0kbwMDFu*z^4+cF)jqu(rh8jrH@Fn}cfsjH#tpRoZ;-vN7=&!%`aqQO_>d zrnVOAoOd!(hx0R6X5U7kEU(ggnTPJ$d2SaA1r;zM{H^!}PjzGQO4OrS=}a#GmeO#Vd}_ya+AdND!b@ z73mN)Cq|1t2D`Ldq7@lcE6>vdV%ioEY3PfBPK0@fD;$rw=ahJuK z;brhSe66w86C=^-^;1&M{fZyq`T62OvgxaDEZa8m?kj3qa(2TB^A0m*d;Lh_1 zS6866{=wRC8WD}?zBEiwP07z&J`t+U`xeG0Sv%0~vbQQ@8M%$6d-?C4Y!F_r!(tZe z-i)h9^?NSR(8r(%khlcNgcD#<@0mjb_cQk7W0{GYg0{I51H&|;#jiVZV|Gd`{7(ry zVg$+F*<-3%E&E4Zq4{cYW{GVf8h?2sWC$ngoSH_l=8l*5#BaVJ=xrJA0DbDZcudEo|y;^d*BH|=b`zX!V zD*@%hNIO%r-dYh^B0yODNEWd^&ynF8_{%tawA(G&<#eR*P%@2?#-l_H8;e@QdUpDK z+4(xRBzyk)p7{t|{aQCao-^s1JMF7N8;O_4Vt-NH!&Ts0JD7$ipp!!)P(;|R=o}Vy ze$iXDy%iy{Vq4E?B+MQ+v^}5z^ol?{=j;wFz)a+?@YI+O`VJSx{Z9fXZam*s-WE%Ols58S0*zBx69u+3QC<0qD+c5$}aW1 zmTa8(CYvN3zvGJ5r~v95hKG&GEL`vBQYp^pMSnL71JEM@PZ&WM6Adk_(7@8=@?<}p z&z;21YST+f2CEIKAKtPOYiHZROWyqXU7U$q>>)M-MqGNU}bh zdHVZMT?7Oe3s<)oo$M@hzH>OqZLuUAjKXhTr7wW;_E_(YM+!e|?s@_$%XA}yUuR-n zKhEBCpTB`C=yYZqz^z4_SnXK{S&zQ%7y81QIqH{4e{asit?DUq+b^sSL*!6TiL{Y! zLMz?M?qtJTXq~qPEiIK=_g(=h5wRXGc~`DdoHe&-sUy~_&X1JQgg`rUnGsLCxubOZ zb}tZ^)>KHn^S7G0T3;%8ImNVDNL5$V#OMO=D)BF0+cZe7^FUGem6M8NCil%N_Ap+nb|~_Hg;fI*0snQ_WEu5zO5Gy z!1Uh8?xb#$Xf$CnR*I2D$B6C6D`@LZ{(+%xw8~16JzD*rJPU~*r+!OuIm7iFe313k zqRr3+WbNXktRQnCqX!1q1!K?w@x=G>cFuPscABLUyX;MheA>CtZqknZmoGrgzM32_ zkSh&D7bCCF-}wlPDsQs@xP4^R;KQjL@vJ_51Sk62!Sns9vcC@a&1z{BFj7`Og_+`~ zI}AX6Mg$96uaK0NSA)ywe~wj}WM$QJI60fl+^?6Y1A2NH_bn z6zN-S-E|~x6k=zFVi`;7KQ<3Albf7mkQ^nLU#;Nmf+S&PTQZ($sQVFWNUrC|oH1?* z7jdjvA8m?_Jm`ifmM|C$5^_wKuuQ$~6Wyd#K=+mXaTYPr@j@pX{6u`VaU*Bv?{$o| z7=+{p2^UeY(kSx+E($SYC$MZU{#+W|LtYOqV;<_@r`=Y0AvV)|UivFLJdp(=`iG~6 zC;yn;^PG+$7XM!@00|(|Ux`${FDia1)up`mOpT6<`iWIgYZCAZg&u@4R-{ zKpS{cxyq~8KN3axUUdTJ``0JZp-Xr_5R~9E5+#xK)^{UMtB9JEA9}$0;xr)?vEQ7e zT^yrUH{&o&E$>5CBZXVx-e3TN$$_jHw|t5r(7XYYidK)QF%au=q1~KaM^{u#%qgzhD22boJh0jn+oy{cM=2 zw~5({?|X^f4VVj>FHzNU-1s=w=@RnW@KS}95#7uBj#E8LgypfRFYPVO1&>2~1&=Ws z0xyoP*)nRRG@2Nk1@5&%apV)qYb?l9YDYd;CAZirSlJQ;o-TMx95=h*j&6pqSAV2w zQhXg(KXj`U_gZOBdG0|TcggbxkA=XhCu6_1s;Ma3l?t;^3vt@U7wx>qK~VfLKDKPn z8N74lq+aM)h++Ckp`kW$JtB2)*jn69Zr@U8C?=RtWpmS`&Cs&&FIlBT=Zb8WnIf2&2+j+^&D1L2Gpu$f zefvr+=Q;k&O)qZOgvMUnR&%lqxC|v&97q8iIhukdh^yyXf#j^|Csp9ktY!m~vDt$^ zU#8eoExfka!@D6-e<<>dkSq1o>}@gG&VNZ3-j8D8_f|s6x|P6VQdUV3w{WV9e(QI^ zer06Fv`=X-r45!xBVcsIXqro?n65Sp7i=|e^Sj$ol-RtMayQ*a)jZ=tSx;)M-FN-A z`)j0+5tp)ttRWwYm10Uby+z`yu~$^iDoBw9kkx~Q-M@;Oa$^^N4IS&J0?o4c7FzO+ zeH$=Mj=204Sz0Oc5FLL+w^P=zBKBJY@Ss;aZ3-GWa=*pd3o-|v<`G~)Q)PwCPV#sW zX);}Ns|pk=h2;-g?UZCLcJ)6+I*#`AQJp%;M!gv$?cExQezm)m>Zyg~&Rwq%cn(#i z&veAnY^7+0e(+Y*g4)MB`#?IEBn-n77@cN8nW6!FjVO}pu`8oe$j2qYbAFC-Ir47^y_uxUZmu z8$~qmrZrIF;y-jQZlNX6xY`fNu0~y%PU{Gyrg9jX>i%L>s#eS?Lu^hYJIRu~79oih zn?3bCzYp*{`rw8*T8@6c?QAvcr?xUCMPRCD2M0tjD8LM3y(lWN1&>gp^Gh&NF*byA zK-TWOn^l)&p_yc$ai?E znJje2I+KxCrzL3oP|WKe_Rw}*ma%=z)1C66WEV3C+SMRNyRV35x+ubA@%nDj3*D~Y z+C)>;9Q>{EV#Z`=_wTHn1_>b_PPeXqxs}JjbNCp%?t8;bvwphu^G=ut8b2IXS@2KX zfzxJYDQ@V;NbRm9AIo6WM-Zmt)X?G|16`oPAHa7@_muSNWd!CR9+#9dBZvFyDxhc| z)I#^`csl@J!w)1-98=#(XyucB5r-Z=$lx8@p`t&9Fj*_D58r^Of;={$pT}=WJs(Uo z%Z=fvuxR>oPz@7!Fo1rH>AHMY)QUx2epeFcLv^(h1$|Osk{}A5jt20=qO~ji6=D6BeqX z>+uDL+bTOD53K`#`E>z|BdvM2rbvj|5nl*aV(i3XZi#>owyncPm6IRCPglU>H~! zh*0gV*_mdV0>cRj-l|=IvfmN>cv-=Hg9Ps)8E&llwMb(yuok zMWXQ{bZBVCn5>(OM~1HIYVxX>k}kp~@}vS~aoB|lU4B_485bPI4K&N`BdquqIdD8jgR4C`=DpUKHrK67 zM(obRT?4~uKO-VzLH>!=3y>EcLu9+FKB;Z?4e(+$TwEqpVUymCW?4J){)Vb3rBO-m zS+V`d2t6)2LN)D(ldd(ORDOyL_l62i7?50IYvMyxl7hq3^GDb2?lcPH)+XLW?4$GX zQHCJpa8D%a5uU94D#7tu#`mn@U6z?{DMP%|E^V*db>wCLV)V-qoI8Qs{&y`}mUO?3wd5rR)C}mo9+sUo|4>Ke zI=Ia#Uv_JFtr&}R2g;lWy`;OT<>c9wps9VZ>V5?SYvWKTqc*elW`~J4;-|gp*u-3m z;gxYAHxW!J{`ST7RN6h2#k000)DFzp!h-8+s-TmzShagc+ecT?oVfH^>9i2v#?YbA zm+`wQ9)#a9_*HNjqPt8&S2{osys*fa_t~EdCN!-OSzcA$Rf{q}6g5eZ#p4mH6*6DB za*_8EZ0W1e0zAS|)?ApS(7jG_${ z{hLB}!_IC70XJG7&eD16vi&(wBM(1g?ohlA z&(*&sm`2;}gI#V7!ran?4|I-z7y;x17W z`2s*M^>t)YBx({5$}kO1xl}3s2x+wQr=dH4PEzS3zJO4l9;aM)JbRy0-*v(kY7yy- z49^4R2P-wAmrJ^Dowh~qs)R8eQ#8aQ7qh^w>bhMyJhtnTWthlIpn!esK%d1nYKHG` zp47EY{ah45J2%5Uxdg~3%Y|N}Ro=&fAhH1oT}|+&9EMJa!E$k!6uEl@uk_6>m#nz`*4OTp(Y4ABlp|%Ql>&ISI@ky^u~L~mZV5ff(&QIHh0=i zU~#4#4sq5}RqQmzzIC=j@%UojRTtsTse4AxdeYHJduxgWMi%nMGI36W@P3M&&IyzF zfw&Eq;sb>stCZdCl6u?Da@V!`+=*+z2;aM62w7A-~tvE_>=4Le_fI3?GGGr8nZB*Qfo=#jwZ$ z%Um(em54y*%gbGR{A||GhnkEOEFijfK?>lOB1W`B1)%ByhXdv; z9Cx#3co%zUQf)Eo_^WLpb9=MQRbj#4a@o0BAH7hj;UpTg)~^DQ-qQlaRB#Lhrflz` zyVY0hCMR)ABqEY#(YC%qbl&gg1=Xm2xrSms_kpLc6V^-9`F+3Sr5oVy3fCnkLt( zOYH_t=!xcy6YHYmmtSG8*`(XlZ`CSV{GqLpli2sum^&r-u3gLPn$9(5csKQK%T`++ zwz8uwVHtzm(7JOPae#n`aQxCb@O>uOxybJo*U|;|A@*H0S;lAycwa&}k0jLGI28jxXUgWCZvTxcoOOC?P}tbsP22h>cg6Uws=$62%!GnAT1 zlbIJozSAq#G8i0XsGDmsVE7biJ|4oqq5KWECiT?J#)pRRLZ!0DuU4ukG>-(`ir8(U zb}Ms?#m`*+UZga1NZ$Swn%VKsP7JshBb}vLGq(p>j_-?WL-w<#Ut7 zl6i6AIXSD=@7-jS!vTVK(vh0(IoJzvseE>QKTTfwX#b*AuJH#=xEra;NAX)oOlwTo&fhg*_$6j$strqtz9Wccsz(uG^AVG554 zMhjC(=zNsqq#yG{advvH*XBvr7WX&5)Gt*Vy|v}-ch5fpJx_|^H<8?W`sBpUztryc zwsGZNA=M}Zy(PrA`M7SKX!Z@5E=yG(8k_Dk4lafnJ`G3a2iHL>OVk&w9- z@%?<9c%VL~ot%(~WI}wj1uf3wDyf9X+Yf(Le)YjXVO0~kx8v_W|DiN|5GA6Ee!eN3 z9{6P>Js6NJ$soJY{uGYETa9~L7;v>LTqd1T9C+gGs!G|4J|kXpw*zG4@h>bMuzM^r zh9-Bw->i03s8?X&;AkZ8bAOzk_dw~*GD=`mp%pxp1u4E5rRzCSLo)s2lH_ zC{4o`U(I5wffdCsuNR$G;IG6D!D-U8mj;t2eV-V646;g?!k9({>e2%$?qgREegJfb zC>2J+n-eS*fpmW;v&h!q*?qmNiQq9E4=?p`IPiKq4O)5Xp31S8b$1%g!bf!nqnHs- z8(-BV`2&wX$+tzPUj*nDO{IQDH1s`AiAzwx4%_J+_b&!jy&SnMR}Nc@+g!EB^teua zr95-S$ne82{-cK7QoOmcp+|V(nNq%~b~s;!Qc4e0>^8hMYAHtzpjV?2Ki_z3GrJW; zMXA|Zwb+Pt4@aa*=qD0Rn*3wgLXT}pKQ{Vu;%i|hie zxw}M!VnRr2>PRu+a9nw-`3GBo3Tsv@O2;X~VKQD|D(T5w)lwo1G%xUh(uy4I-0|7Fz%R1dG#qz)I3dd+H4&57YPr7;#jeqgAVm%?sL&Gy~ znG>uWFFgO?D4iRV0s+&poOeWnpPd^(4+B{v<*T1W-Z6&VA6!VEc*BUz$;^uQY7zb? z%b)10$8TMWLyi<=KkwGX!aoNq zFctsQaB)82%l&H&w;CB65M`uMP*tt3Pzm}>wNcfPwdGRZ6o;J9F3MMB!6R3yXM_Rc zdX!7~gAm-l*rcSQm20`Hq$1Gz%KI$;Wt(T``jUEY3u6hIqICRAeqD_fXuUbNRM*&6 zHjVc}P`qJk{BZ-gk=U|Mw#SAb-5=gYD=X6>o8Z3o=(&Eis_T~#f7BBlZh*fN_kExL z%{zEoF{@L}sHXCdjIfV<(4}5YlE~wqMvINbG(uE*p)8z?r zgY``6^}MaHd2@csQOVA+K_WDhEdo-d8bq9!aUB3*w6Lu(5<$}2Q;5uYBWK=BaIq)g zFND~;z4ukrdF@PCKf=IY7p)w+WBs5XEjXU902`19DJ5bzx_>Oc?2e10?Uq&h4KuhH+b}{fqM&~4~K&bld*HyQx72Qr8cO9H{!SCV>rR zc|zY)+GE41LfGv(&w#V(!0Q*q22zS{&lZ$`eK{{9esy`Bo-NXIcKu-WJ@x292gBSK zk~gCZ+YNIHCF|nAq=s=@aMeB4?R3>E$yB{JG9wpeJM9QSX*1W(qwKya3PhhdlCAN$ zc|;onugh2&ubxuhTEHf2TNx3WT?0KfHoZyPQdbx#e{*tQZQwOpsn7o6NTW-)++%Is zl^hnS72fimG>fXIWapBxNajpkI5o0|Bgg0Aw;6*TL-VrD?g+iG_0Rsa0`}X5d=4vHM7hU|;eY81|EcI$@f8X1!pUgyLVt3U&zLyUMoul2dO6oD~N4pOgK#YwQq^6!n=tG{7e zh?5@argdsd$7!uJM7X!;)f2WI?x5J~41IWhCA(#-SNnY#a0C$s>FN5h5ttJ>-dv?7dXD&%N|OM`(PI#i+X^j?cF` zFVF4w6d#J@zQ1v1JOCAhy6eQvS=w*8ge=D|y)JX-&*+R20gz>73xs5|{YF0vvpe*0 zxvbNP>zybPN3}%1P(PNm=xtpI@0!sRF!~eJ=}+iNQ!|FpsCyAXw&n zj+3AA_n>nPd6WDK)1>#1LMl6TkzCJS>lY;f1C5OVn|T{Rc{$_C^+qhNWSmnAZxz6< zazDl`GVcikq$vW(ig?qzH<>gKiTVFY0%RuFUr)f}B_Z`|&8QjO&II>`_&fymE&$@} zdY;sVh;m1see?4aVYiOns4ATJ9mlPmg#135Cz`Mh;oFat84|^sfHwO@zUM~s86eKS zrgdU5AoM4RmrT@oSH9@|t*ii&ZT*4Av2j``R9=FIYPZDY8Zh+|OH^EOB=w($ua%9X z9N&}RIpOVvK^cC(fU_Vc-InN#R|d5i_D zpL%;0%n4vi7wt7E70#~10eLYq4O@kk>I;gxP=EC$lAXJ~>v)hy$JN@)>kG?JKuIR1?{`+Btrh!4=5D+B-SKs+eHoY? zdbROJ*R)DfTkADLekjn6F_axz`-dAT7j|PL`KR2Mvu%^TIU{$RG1zXSn)58IKN$Rk zzT9x`G8P_wSWGcbnGpMx`?LQD!SD0U1%@9?oQ$3gYfXc)i$2FO51(`SxVG+-&6S7F z^KJ&?_|4C|kd|h+xx#nmmRvWUYyxbdJ!@F>S7KGDEN@pft- zELgJd1-{P`p&%l%q5s1J`ahh&DyM(isQ#iKE>M40LTcS}-ySQeJlipSY3OvdXJ)^Y z>2U7#RvRFh8|{J2b;{m`g-Q7Qp5$U)a+d`#DGxWMY8gVHT4Kr&syt<{=##=jb0^1h zl+{-sj(Pe!6o16hV@rxd>&L_KT*|ET0f%hUdt(hmchmDfK!9@SenEMN+;2u!QFa0r z6B~lvhs)L;cw!|iRK%b$)92x4a_AGYkl5HF)p&frKfwQU7G`YP4yZl6w(2+Ag?&&v z>kS-7Nd-$uZV0=mOCM-r7siOHt5lhag9X(ss7tFt9+k8Py0)V}o+Q#g6P|u{-s1<$ zoYu8fun?ie@Y3yGk}tbIEt~av{cLVLPgEpm{BJ%BRfc`5 zMe-i192lMD#DMcKN7AAWuT7xv+x_0>?mZUEkNTtqz2VQKf$s(CDoIgIFjYFvBvD=? z>q}XKj|#%QAmarwwyO`E49T3;@h6b57jUyVq{1J<&GCndp*7mPo^*f7UM1Bu!N=-O z9&vv-KpJ^V=Vw69U${4h^6BQw#@_w4^#ImTmTRODoFV7H-kK@LnTV&g?5rlTsSM#% z!t%mFTg&6x3OJ--uUh<~ax`}P&v?Id!j-LS63OehC=h%8ah@M@M26+zLb)av?kP{~ zPo$pl^=*{>;s_m(0e(U*`?c^lI_ETW&IXArSJBKIbB41Ade@FihPmM9Pi@r(A++!_ z>``^eGR>>+LLhXwVTO2u=dHzFBD@7#^7 z2|I%9i@#DoSIMxYl7sa+a|SRUC3w z-JTCggPaA9)@pqTdgvD~3rf4kG8Y&US*VK}0Di#2)p!ymYPLTySPHRp|rRrW7QI%ZWL(?{wqdIv<=-FM9xE3S|rZguqF{ZCzrX6r@0 zT5Yj>feYpOg$yke4^_&jMGFADi~o|^?3AAfnHF5q+bqQx!-U>_QTj&A8{Zh`4?&6t zWZR>8$h5W;M80*{9IYo>Z*Q4Jb@6dWT`oL=pgh>TaZFkxL(`fwCWfMUk}0a*B8l{g zoVU6PA{c5l+TO(Xu^E}F-ZDYN#C@uKOCQAvtVz>ODBzr6&PqL^cIRokc1Yl7NXCtz@;E)Dc z56#nNuo;=4|A?2@Se25^oi-%9QcL*N9M-oL8jsc(arJ&i<(AucrZCZkyH$L^!0uO( z+-f2n?cMULNDtJt9ottwuJJH@3+~)0tUQIo zZmR}?8aO_J+6YBJh~uP2_UL>cs!{k)AS=6eVu*za9;1O!rom`kV!bT`;#%X+Ub;-&P-QUuvGyi9gS8|v2Jy(hVDi@$o^rSn#U zAv95RiJ~o|UbFvwHg#*s{TtPgnW=KTm|>p_+2|$yx;ZBwdy~ly1fbwmE~_+^(7lgfe}0Z`g6A9Np4wp=Zh`21`Ec#rgY^P8zqLgk06>+p z=-$bjuLe%1=UWWg*>EKBXyewV6^#q%*sFaX~$KdyPJp3h61i|Q~_T%7ejiTrWznq5&F42q%8g?bu zCqLUQJNe*2!{uhUHntP$>*x$hgXtG$C(9~C(^lc7jBG-fQ6#C5|X zf%s@RX|2X~Z`tiq;~9BxLt)5?tV?t_F7wRJ1dq7W`5jg9ki}(KU@r3UPs{pE!IY;c zG#1U9y7;dme&shD+WCjinAVNx&tH!&I3VV@|Bjm%C;=NG?rz_*ze44Iilt?Ek?I&n z=}8|?|CoH3qXG(3>M$I-?F$3i-okcawY9xJ^`}66O ztk|)3tcxb9?>-vB0tr(WUJgjU30qKEu_|KGG+9$T>|?q>_Pb~${`FdYqpFpqmfuQ; zVKg}J*S7s1?WOLxi5A-*ksmhpUkU&~Glcv{Irx~bnOP67r}?`z)NU&3&Bpi&iFgjqDGu4M1XWLxLqIU>P>$IulWsh8v3(k zy^c=L1aeHb%^@4jv;q-Z%??w|$h^Oe#Ze~Ign`p^UrSJrvBt{SjQG10T55L*TqGF^FAdG zUjsH5Bw~z8Cg2Aj2|e^Cz*)C^32RWhI~onet$H8bB;ZBe(ttF7&}l$vT%G5apRr##nG3 zEw`qqHFx{`t)`wO@-+eKAmTwUT&6AleUQE(8%h5JI)MteMHkewc9mn2OCoX0Q>tHz zH93zAqpsaJ5}$Jms6iHMez=D{I`xEAt4bR*GUEdpDU}V^8RIjW9f?K!!SPbM@5~Aw zxq}_80%g4m(rBtLqal}y3bx4FDG;ZGF&KDOrWE zyV>!G8!2vxCx1SzjCbjVDN^6W7s>qYmEU9VMz^*4lg$^4Vn{9@DFfxR9S#L2hdqC7 zEl>fow2qh23g_S0DF3NeZ$ScH2YD17JdXn0xUkFHkISH)+xxNWqX9?)+b3f+1{65q zs*xE#uwa&7p$>gamiJx$_%v+jUw4-OI~sr8SpJ#J@wjJmOu+kG@;R{E@}R&IIpqqe z0dXVlZtp(ucJE1!J`e*-o<7pPrb{1qewVUg%0JW+|I0(a@qnxWgyJSJ$qL-mhZ}~) zN~vT<<3yaOJOgB_zld|h07m080?vikf4(oTxy!f-aOF_4qWj+s+$Vro*jk&LIHLdO zi_L(;-J5}boT?>8Ciqz}n;qM4P75wPB>z4WzMPMrz;B9lD*xQr&Js3{B znkr=Nr*4Uksw`U9_HJ=WH3!ZyKv+ z3m@^hpq$lbh@6F?D0OsjjVzpT0L5(!x8aqn$HDO5rzuRl~@Z{x-`9W8<)9PYP1 z8;vBJ>#|Y+eiq@8j|nQw>;@Qai-x0Zgv-Sd+?&Vx;v)=a-B%xVaCSzX24{9dn3Vq< z!&U}MMpvkwupWx$C+rQPL~4!$zD-bw)Yw5%MV+*G_{|hRD8DkEC}KMHfnbSk+_AFT zu=`l-=F#5CV1yNr)l&#fUzDtk_*~>`?L`;t4R+1@i6DmRo+J|6VQ0iM1+g*WPFOEE z-|%UqHAdv4IAnQ6m2O?OeN+TYP<1>H8(C%;oH0~(J8ks_ahkDH4oek!j)w-*c2?>H zmMwpSXNSfv7LrA5hiZ?@bq55xS;e-TidI)3w95uHPne=odu*+XGb=r9!gb@E3>@x^ z?#7Ebl5vM3S4m!dz7HLk9jyj;-JP^5D?CbU+vbI9Fhwa^CLRSFbCXS8-T6$F@$;0n zwv8-<&=5L}Jmaif4kLjp3u;_2$n^w=F4k)yz-K`E31HTz1=An}dh?`QQ3MW@6JmT- z!k^G-4Nz@!UD`y+$qCH4ASV*8&<^B*Y1mx`3Y)IT#EP#$MViTYh` zvPUjl)^j;__?^01oR<<>CKM9L|8ebb{%A+H#Js^f^rjsj1+GFdB3AsPuhLaF*i9NI zwrTn{Lwe)`Nrfui)54u$mSDDE={74uo7WzQ4*HP_m;@#g+tc-WE>wGkY%$<85lAI7 zyZJmbXF2;|sM=uS_1@yL(>gc{c_5UfMIv(~s!Etua?@*$yR1Uw}ODwRBLzKy=M_-ubL-79PZe&cNWl#100@32#(!#it0CXXmkZNbJJL z%uVk25}MISO=-~EjGvi@4}{8PHC8;<)w8}dE$o0FW9Bu4|#zOgEd?%Lu)Xz*At)IH6zgDD12pe5R%z4@%(XoY?#OXH_G z-x5jsdro7u_0!@8WJTx^FJnnms%U~k;BnNQYEgqcnhZ-x^#N?5z4-Ezr+3DtE84y0 zp(Lv`>c&vaYF-u7ragz;oyC`%P`NA(bmc1oxn0yyc!uU|iP~WTUW3#2V~dRu-}d1s zUIH&g#rY;SCYv7#Q;+al^ho*)m=#Qh8nlaH>h;Q*Aay?hAr2tmqNeUR{D&X@zdZE0 z02x>qE8dye_rt%g$*0C@#C*uWi?=`B-7u_8dHxg-*!H7Sp#&r*F`Z4XS%ykpMtIlr zC94l2#@&DlBFW3AEU)ST?*LxXEdse$X&usXE^BAW>DLVw_Gd2ZeZo{BuW*&mGhT5i z0R(oW9Q+-b{#YGN_No9+HRWXW&YMsc#w_=f7QVI9rk`@hyGy*fZMhHa_{bh|5pPDH zu;c9*I~-Pa3RKp!0~RLKm`k-vaVnQRv{d$Aey*`PpLFCux~n;SHr+ad38ZAq8XQ)w zVZ5N{B%WN{kZ$%r$m4OZB|FH-Dk4;bB6h#|^=NRU3xzu31z?U8CpKBk5J@}VQqp@P z#pk}x3i!^TjhJ?LgdQ8r_9?@v%yc6;)=t5H=P-uAQHmkwKk~06qW{>eDW79jNM%Aj z_Jxp!7e`wKJx=Jl+2$ZQBmDgN9f@6Sc1|*p=`RH?9`?w~WnvivecBvW02wH0$kBzM z(gbJ1a-G-6*TDr3QoTL=SeecGiutQjpFwcfr6Tpe9j@3_FfWVb{$zh=lr(-gx;nC?~`kC8rxP^Qz06aVPl4nW*zw4Ox`6vNlA zrBQHEv$Gp87d_b)Jz+_vd1e}B1JjW4sx-5X{;{{UGXdp4;_8jdh1%b&%Oy&bdr!~_^;ogWTTmM;2` z_OsoMtDCski+zQpj%TQtQ#qT5{g7E2iPrY3ZT^_l+t)^Wrd_`t=Mtw>_DM}`RsmJ) zt;#%5D0&}Y(BensUY+AumK~IXccN~)2x25PzN<4j`^A~Bk5MnrkUm02W`Qr7gN8y@ zY&ldO+u(H5^tI>sE6Hkl7s|bfSKK}iC->@ldfV|}IuVLMRc6iGx)_c{PmV#av{ld} zV*(q+6Tq4JG^J&eG8u0qT4*XvN3YVmjoUIUbojQlH-Q|x?|L6^LB4Zof`AH(6%65Q zHWAqsINDmyR|$$UJL2_&@=$c5x6y*>!;ar-7& za>9!YINveE5D=wtB;JW9SWCVP6}!`3s?u;|SD!DyicxPWU5#6Cd6-Jtx6??kUm-8~ z;*$_{X@I_SHo|BE{^DaIZJ^`+!5cF}MA&W0?BHedHwdwsuVx*?OkS4~bNyQ%YsYh-9IxLJEnob&L#h5EN~Gu-YuoFm zFzb0K&}OAKlsXeTWBSqUa%JlHN1A9*pF2|-uN#^$941vzwY;P83zgr$W03VV>agBV zB6qrX$4$en3SpZaj%|6rU!VEMZS{Hyz1evLpJI5ee0G)z1C>;4DOE^VqI%^>`cN=9 zIyfh#a4n78m0!{3i9j|NXz8w~HylwN;+XYQ}9_zz+J%RRs?*We&QLyPOH4vnd zi?aY`bsB$qei-S0%xrd){kfD1Iz-v8%HyCcThU?Wk){pS~b08mh?UL z*5Xd1&Pgr6Mo$TC4)KcN36_as{Ib&EjxJ6P2`wOrYtQ<9W)6RNw}BmMeNjAg-Q{A22FjnU5;eK4-= ziSTXTGoE^hMtEdpfl1gxt%@!V1hIk+s$??t=a6Q-b|SzBdXg_LZ!4a(2vj9I90FYo zgp)%%#-lj`9f&@h4&KNhhjtd-Px5JNoN$Y68+Y7Shyx-C2RYBpirTZK@~K?5o3~o{ zq%FesWsMLA@S@b3LE}1d&5~fA0b`9NAJr+SXT)s1}JtukPRlf~CLHJy%-d_K{sLE~o3TvJ7^~Rd16`v12&xYZ{hwKhZCw&F# znS;&*D*JdmWykuEvl-JtuA7bCFuZ%6lywXOoA!opt=DHKPYZQsCsL~)3RB-(46(lW zs?jHt1RGfiDMk?-W!FYFjaxRr`LzVDxy-9$pj7r3-<&HHFnW6059>D=)+*`<=$ds} zE5#Cr+URtoYW%8=*)obQ~Ugu-w8n_{ff39#)RQ)I~oEg?eyy+bl)f5^MZg z&-0RxjSpi$bf!*ixp|Mob5}mJs*4gr4CuTyP>`E&d(f2$&_3H6~I>CCEx!l42Xr2AdD1j$L)+P=#`$&LF) zt)bvRy19M(zqX#9^UmU2xJ7?dZNAk>0_XVRgS-zQ-VnB;zl(B@i}_3A@62_6=z| zdM&8QKPbw77{o`Rz00qaI9Zg+vzCI+1?h)3a(oekS#(ug+P$T-=KE&5F!jx`21GDP z!buP;PF>xu2IDmZmcUWa>tf89xfrhg#yeU2;6!mUXJv%s}cAAa?t zV&g`!ozq*D170$V^e1}XO!IyzFeZ~ppWogO!aOo)N3Tk>!~*VP#)F)C-*AA=16`6w zWHpfCKQ$LpK(U)YK>HFP4DFjW_@pE{JiUPmF`Yb9-)L>`w*{SP=vLqIPX%mbD9_z& zz5Wz1q=RSvBhdPXeE9E&eq+)64iSMUdqBcU%Zl#ltCbA0`=HcVvk6x3g9t#nVd~<| zzx1f8EmdBR$(}qvH>JUD4qIUiupI9_hX4}RAlwzpPO(yK<-6c*l1yiHidaQN%k3>O zPM42cMiU)cbJ+S&u)MSec)OrwqFVxa+KWFrO}swKM`}^$?kl}cE_?^6744EON-HQ& zCpt@A-8RClu1#SYakuj5uE{PqR0{Di!NSfZET*JP@=+QM(w*k`Q-w?9VKV9AEc3R5 zx{$kMTfyaB~huC{ci5TsRh5m#nTND1At`PiFr;~AB+0xB8%M1^4)V4@H&0-;)~ z<_b%3Ko4~zqeJpsO;rwORim-K9Pp}%?LE!mo94Kp-H5)?3!^OkK>dgZx5!y(9qm)i zRc=UH$tuEuf8#sDU^2&t8cU{HqIygn#XzH4iMFfjRC<;F@d9ud@iVT96+cw29x{zY zB0JYcrS@n2?jy)_zh_W_yKIenalktdVqwk$%5@mwg!}bRmt=9Dc$Hx?su@ww&ae2dW>Y}wnNV6-`2z>6m2zxd zkCyMwbWS;UVYJcgR}5{J?IJ~lO~;**yvtr9LK$@-kyMx|+;TaEk>A>+3p>eJUBqX* zp3CgZ4DPFKQcsfZPRWoaibf} zyEW{w$rt4^F$7$mrui87t&!V&y3>`@k#mYFtK=qc zstR=}6G9&{)bX=TM>dRd;iyUIwOMe_$@lh0<;HH_(;kJG>3RNf)JMX{m}<|IuImB>xk{quqn5J`faj6%|j+ZjDc-3_hLn`!-~m768(ot%WCiy8*4 zN5rdbi4oT4CeP2O-)k)}zzVJ0nk?AQ!IHRe!M5|B?t8*_Y@-64;TuVz=?Kvk`;#e> z87=@m2Cv?r0@geaqR&r#{@?M*e`4l;M>R5r$fQ_1Wpo+Mc0^pusSz2d@7Tff>TeBd z*J-w1RZ6B}ih;G*@0S72PCXC|ck4A9uoc_J006J%g`ehgUq zy?gcyd^?OLj<cl#eDBmq>PvPL-#w`#K*;Lt>Z_`>~tuRg<|2v#^jBsR=$Z zvs+!@l1|VElRIVX*9O!>t|h_j^cxUiXNY z`bcWx9Q&Sog-9`F33wX-K|&l8AV!zI>(6+d@pDo%qzjCc!`T3Yq{hgHk7`uMmB#6) zk_S;NlEL3x!hF1EfoCZMO~DI*eX)+0l;Se2RQ^G_^btR3mz6Sh#U{|A>on1;DJS&E zDm%Oj9!dx)@?&;VsdQgDKB|*B^U%+kpYhl?sJ`t#Woz0gB1H1u^?QH3SV+6&&Gj&4 zR)yKuvEe=o0zw8$Pc_^XAHEl==@t=t`pSoQ7B}2pP!H5)OI<9s>*cwjuCYcD6gVmN zoY%eFp;3!*&Xq9xmKY1nAB6J%bil)qcgO4rNA|MB|5`l&a?k615tiMtHA)B27i;a7d?5sH@y??tza z@UD4!TZNsDC4(3EgBGJjY}I2%Z}e8afKbTZg3t6!Mc3!TH7gE&en4%2>%qC6Vp(zU6#5zRnmJJmLjiSUp=2Q%0xC z*;XvIkJLBw>H*-;oDNG2F+f)2Kf2!fUvRE}07t(gJ*Sb#yowFk0FHT55@`XO?A8*e zPYT&PuXeHU8o6Aqn6qyG&PED*hc(VGXOUW$bvA3E@*6<%M9w2wq_*uQxVcZ~IoBMKW?*5Wll$dC;#!&AH`ikvp0`^SJMt-jo!97@YzVA!2(X=Yfr$ zQ|q3ws<8S{2Z*wBS#CH=F_e7{ZgHhby=?pN@Z51I{b!^3nev22Bozh!*Usy_4ClJE zC^?II{717K^Y001uvRm`A-%-xjYB=vcB0v-mL_hIrv}6P1Whw6QKzS0^BVT5%5^eW zgj0QknifQ6EI#2n;j%?&`8^8#oa4nb8g==hp6d0$V)ef;(Tk6@-X;QKhx2)K6 z`Zy@?S*PqadWlTMoke5Yhe)|hgaSUTH*8+;N&Y-P-uuA{_G1_gZv4xWLonx@0h)$housk37>!Ok~pe&t(EE3%|!A}FHQQ5P+8#h zOe1v6!e2g5a5pz`SucFHBq@$^lIcsfuA4b&y{1yikL|}rH3}2NiPDeNG^#m}%5IJ+ z3@H!KjtV73Y#1=Ttuno&I{Ho3p2MFai3_t zJsN?w6jPaArKIdoiod@~F5`%>pgRhcsV1uT$gUBy=bOszDgC+2 zVZb(2I&d4?A7DFlX7+OY@1k!?6s(2>5nl4*!V}i7X>Xgp9 zW|GW-Fm#eofl)%zw{HG;q`1}X>WYg!z!PtW%cvsmoyzTsZS6bd)iTE(2`Ss-BV4zp zFWo6=*`1#I$rrm+_cl6$vQbeic5s+bV7kFbaEXMAcm4;WNtP6fWvZcVY@s}w@wS9u zW2Dxp`U2uOF%*hh<#49{o9vsa6tkjh;d$GVgBNy4w~9e2=6xQbFvRgmGM%Cql6Q`% zs=W$6vw4%tJ)M)Xs8pztr(U-6zN=*^1;l`3I1tAJmO}$<++0mnQd&Ox0XL#lgCzlN zs@a1jn6S-s$?BijBb@yFS+EZYAk^fbrUW2yb5uOI(bT~PZg0o+MxR}cp}=M4zUEFZAlAa}T&7>Zf_}FCa`QRI zb}G_)VbDKJlU(X5HYe8j*710W$}J6X&lKLuYPiswzD4%+`S}(Quc*z=c*HURJz@=F z(uj)(jz~1iSKjg)Ba^=TRZW#uLRp4x^N!Yu^=}kWX+hO?P&f5Eic+$Z4=0SpU3lMC z3##L|8XFhBTwBqbd<(VK6sLj&e+GcoSs-Ds67ZRp&;;f9yuWcBQK%m1eHSWZQ2Tk( z^O{{Nphg3qT0O4t+}rYpNZ;Zdm?wp)yL`ppR?~(j9OFnc6ZHLNNu=WJ%kt5>n@zvP zyZ9~?8~|XYmmXAQ=^o~R{nYYK&NgaG3>R`b>6Dw8L0ix{mG3g>>Kg>;!%fxG{sE8*UW;I~G8$64Dj+PIQ3o+cC%58;ZN#Xb^$N`~A&=9Cy9}TQLTv#S7St7qDJf>nZpoDDyMcu&F*{tvB^3A&snRaYB4J z2NzzgKe;s8Ju6dsr^$Us-hko1Z7Y#s;o#GukK=8Sb8IOqgWv+m;_mRE1ZQ7N=It3sUP zM_lKn@eYGgd09t6x&=t^IJW%y{6;4>AlKPSpFM-LD*yn2Xd~Y)3?#SO_uo-c24RSMy12pAY{xDutRkjG{30Aa0$!?7%hoGS zf<12^Y?O!W5CpNIclRYV;dFj7cxNS{v9vszUi$}g=G-7DfoIf9KKtQ=_OZqm9({4TC7&DZL*?U`3ztHS6EpH%65 zl&uCY-RM4U>*2*m51okf%rno3b{kL6n@8(tl`>aH`gwVnO{jWL1TyFR&e&A>Wzl>aC$ z-(0EuTc|8xC8$MBsO@e9q6-p%n6{k`I1E1LMUKRUfY(8I?rm8z=Nj`3!h}mfH%TGq zj=@eMW})!%H|J}3?+~B)+BYW@hjo#>NU?PK@vS zqAfArsG$sIuIsRpjPz+WF?(gH-{}mgD(xGG2KAhd#-Ymz9a-~3Tu;?@jRR^8ou!W# zuMT=je>Y6+ke@U)eO~aeS&iF??57r@x#uDg(=ONh*+Ove8Zb$7iD!1D(JA?uHL{Ww zDc-rmdH{E(-AKT;pD_c46SdGN-?r zsr`o*5{0w0t%RksAIrydq8{r=e(RzSZfx=8mriMsJ6PFaPI>`Nqh5w+m0NA5X52WW zFnU|65=W_rFXVSPi?!YX*E9>^c z|2kubJ{F3U^E-2pvMnFxVBy&#ZMA??cxS&Pd0*q9dgmV=pG{zMo|$&3)X7ooSohYm ze;+TCGI|0ddF>>#eF{LF)qaC*aYp9Pv+l`^E-O#GJoPAQ?jAh;VnI8N5-w!a(k#=`Up(k-;1>mwiLzh16AV>C5uKVv;dy>Y=ljaWr=bHmb+eB@YH^aJilO%^P8-N@3NOVg{E^wS(iAPQt|*X( z9-q~0I;rrM4{GCDE9hX+SPhP9biEq`kZ5e~;~?hL$)3*|lln*{%aVX+62VyiZ#so{ zrkIIBAWpM_m|d&wND3^Y6;fWao!E#;Aulc`%uMDo7lAMO_6&!5P+sR(cP_?wOC)1~#JGbL96fR%CvvRYaCqha}Y~b+*AZU>Klblff z@1UjsW7*6XM+O)>@21CRErLb~A<>zV-6hn11wT?GD14+PIvP}rQ7;dmnY|lWZU%8l zD1pGDHT5ssy=ptJmbrfST9hKUc-mVvuVSG1I`r#XvK$jxOcF1f`KPMacq^|hu9^r- z;ux_{`-}=XszrAQ7=L2Fiy*kcb0m1@%53f1VEbZJdRNnO!Kco?=RUFq3YAET5i1W1 zbGTB@B@D`U7hA*Mv!9dAIndI7X$Xy#6KB+UD>19bA>Qy$>OJYG|T*OT_Lke)L_zcR&Z}Kb9()39~Q`W%!OAyJ3CsO?h|`KD0)% z=!69gwaUV|vP!sStOWf<2d@jOgy)RmemGXwk$T&)3`&!N-WO(EhcRPQS6t-MyqE~} zNHy&$@3j`G-9^`XMy@OYi>gALwsRoaZf(4syOkPLBbdTME_L6yOK&>*u#(dBb?p_(eTcBdG14+vMB8nmJ(NxL1vzz6Cda0w> zj&$(huERcr_Qc<&bzvdr0y&*8?0PCGtw^X?7jrB=Oou3!X8)n}&0QCBwnU_Lq3(MO zF>)4@p32W@!NOJcoQ^St*QvlRhWwl2li}d_bZ$JigNzy|;Rav|2$fPwBf;)cJdDTM z2RVKK(8koo!R=|1o7~i?kdqBs-Z1KI^??@cf)C%21kH&~GtWnlt!OuR_- zQw8b?fOs>B<4af(<8Ngc%ihG0BPYczD{|yOua%Wji_|hTU&GpTo$myHH0L`$PGYEs zZg;jng3QjTE~9dRE8Zf21(fg-Sr^^>)JJz@Ix&g7QWD!ikOrQ5=e;A=TAwMiS1O0s z-5kLnLQ#g1-l(|U8`)2vA)y{G)~`{e;{hA>TOXo zW8HZKbu|R2e_xcf)qjyo3*B+(N_fZp!D5HCIy+m!mitC8LIEoG#7<-P6CszCw(8Yi0@I?1z}}0jq!*SUJ!TXtK{-o%O8=uT z5|Ol7x1Dh=^2ei1hlSFNSd=@}@D{7L?%7OD#7&h8vEwf}ZRS9cU~(fsW9#hWVrYN%_TAAY0AV*cZt>N5 zKKeTiE=~Dds$ko6y&s-%V9TxMOJ;>FlhG9p$?{u-Ct6*m8+P%(QU+&cGG zfx%7m-uu4Vouw^}z%-{*7h3prJ}(D|<6es?SZ2@x`~@G8R5n(C@OO@q-t$^*7Q ze$c0xw1SUCf@;R`F!Qs}uL}^TQp_NU1?cF zm=Tum!$BSAcnXN(Io@(ke%Ar z=rESH_pGjRm~@HE=mb6~;$V)yVmp(R6AOmC8~qs)EQJ5{dMR$&xVk@-RKl}9mbtW$ zYXXCYa{a7`WaQJdAU$D&0mPn*Xe`v#=aYo10$&=(>A8Q9Oy1)WFBZVaNFMPxJJXr> z$`-{H&|Z+PPP-iX<8?Fa(~PNeCBYkYs5Qs=MOjWQavDC1>HFZlL*j8^_Y40qnwN!L zqrU}PaF5@(KkMN1!Gv>|hy_MZh8|NGBHLdjPPMY3`TiIq&sWa1A5$rmi%7?rqsyYTSmN72*+;y)P) zkBFBg6W`SlT)-bgzgIui}5PC$R8wB-RA^yLd_M40q1P}7^e;NfZjySEODQ*pn;CLiaY zOv+0}BgtvMRObD2^CE<4oJQS{7oQq;ev=7m6wZJI+a%tY)&qKsO$#&~zWWmAI zkVnOvE`iT6P?3ve0|2f_!EaFoGW{ID&E>`Sk^f_&{lCr1eY>A}}UKQ}j=_6e>(f@!1s}X!S6#T1Dr>FdSw7@ALA+b8yJ@%D-Q;YtPtU3M&+8@OD=sS8 zrvSn#p{x4z@{-O$OFkEP|Hr2;gAYKL!PrD#@#Jl#B?68AvJ$1Uyq?^6{@Xt?2~X>3 z(8#PH2T+ufqD#>OM@yCiax#L~c@#RZrI0?adv66W<&|{WtQs9(URt?b&b$1@7rggX zdy1VRA`NGHLSUDi_?&kx4!nlTH=tGG|4T2?0VGGTku)Bcy_b4yrr&^KS40;-n5$!k z_jw>kxix>%xSy^=7bNZV=jFA}{`ze%q8JHSTlCTMAuPq!x=GXZD3IHQ*Cz8vPVfApNr0g>7~hm`^P&+iEIqu`q7f^?GEQNT*U-+HuWR_s6AjL#Ng5ylIr z^N8#i=^@DaQ)+okj9Y`;JeDDClk(1^pJ*W7`6+suO|qs1fdHNM*B1Jl<@yh*fGGli zMGswPbx-+}iS*p_vKHI1>!Qhdo{=uGHGb~uxUK`E3clqrA^vI2i?dPZ=hsZ(7u*>o zMp$i*-=@_Z%O1%ioQJVnD;+3@62Op??4f7EdmTj58M#k#BA=BcQUeISh z2h=vG(NZ)PBJ7l5_S2Vp#;Ij&7RS1fu`UGXx6XDN48$~{mAcRk_)MB|`)3k)Yry~Z zfVR?gylCWYy^XMY;&rDcdQrEVk4{tcBqD6qytHe&G>ElWNmXq)>7wjM!}~GG`SOC4 z09oA^&JvGSAXbYV<@@P;E}aBMS55DOZkjN^iu(>a#{F3=OckkHyPq3Dk!_X??YHr% z08@!ST-RTrZ_N1j#??fFme5o}ce0WTa*+5?8|7qOg26A%7j|5Xc3ukDS-?Ea-pcRP z&K4*3>dxeD3rTPPmYXflU4fuslB2;Mq8lrGheJ}YgY-q3_lC?o?LN<&D6DNHQYnPf z@VjvX3)O{vEv5VE2(#y@!ugv85BLGFKGCYUi3I%~M^K;S5pU-k6HAXU$lX9>cT`wk zb0@%SDI>TPe19Z3LmE6856MH_8|L?A_K>eP6+L#a7NU@i_??$+JzL(RWYrXXH<$^f zso6)|VY%2+o6mSJ_5ngJj(oYUwZ}`L; zN4N=HW_0kmxUPD&TwDLR&)QZd(vYEL1auq8c)b^Xvoj!d$`rq7>n%9%z9dGm=`&G8 zk2l2p*6Tq#9Sq_pD&DNNHs|Mg7<}lYIjqv#veA1b+ zZ#(ojVZMqKQ_$qrml1$fsYMzeYuTCH!qYq*glv6wxm3IXslG0VFL*}l(cJZ zn{(o)j4wa;iqHbFJITm{{`Ym>DkJZt-mf1YEf1HzYD#16NqofKt<{b2KJVHC(ruu& z6TC=pyl<6oVxArCp&@@VgV>d1*Mm{)VryUZ5M>?EGT>c8}z<Oe|qm>u4*d` ziM3Q?%qoq{<7sf-pjI2yYkSNK`JTm@%l>iGSv&~PdweR3{&jv}wm9sBYuH6mPF@{` z{n4W-SLtfqQir^~RQq2uV$IFt`Z=nU!X}k+qS!~L87*j3GFzt1y8i^PN1Z|s1EjmG zao4+HaRv`PqFw+eUzQh2b2*lNdh7KCN7_9kwrnp@g6ENDH~aSSt0kiC02Dz*kvL0W zmx*XepFQVfGeHYXt9AU$v-M(bkaRAau>@tZE^fdP5D&HUQsQKlFGi-`I;>q4<=Zgb z+l^s{M0-k%Cd7hB>Mow+A+H{86(`RX)y2%Tznb$klU5R^=m`p&OV>u+V{5F+wY$v$ zj)@Ci?*%*bDw)j#Rt3;|l^jE8W<6eUWYAmZ+6zbs*Mn5;WMK?9eqAOi`)^Bt^Dh-y z&q@BLvsmu=+1PQ{O_{Q~Bg-D~!IuRXVU`F)H>V~I;{6|Dp6A0weg&)xKKT3iqha`_ zuNMf20?QNax_rU-U$@tB=Xw9>&!ECi?O(b~x1)149kpz3Pr84xq_2Nkd*Ajj?}>6i zxqsg%r+NBinax}?0=NoofMb>bg~Rh@`kT4q>eqnqM+s>8ko=_l0ayZ2_((>6{nu_) z=2cazKyAvJ*KO1jLPvfx%TOnpI|fQ?!`WK_1YB12=9Zr*S=ji@I)KV*1@zF(bH0i2 zrTLigajW+Qbtc@UHl{!5-|C-*QAIZ71yF9niKx&9_HGc<{Pz)>99V(Vu3kPk@h5VM zlHCAYNm1wZV^?Q`+Xdd2fu(%9f^fsjWwqe}%A4vdQ(CEk@4@=#0s({UYY7y^DZ*Jr zIuMI}?xGiG@pDYTXEVV=tEaN}%wRkAlNJqSRw1B1h60hj$UoAqD6IQS|D6N6s3aI14C1^3YZ zIB>$%7MF-#aXc)E8T8rLK`#A>&vAP=ka0aqRU>>o$2a-YP~tn>>c|#RT7ta%k|jwd zDiz@O^dvLaj+_~$yryJw0rWte#hS(W{ZX4mjQ=DyC0UvL{#`{ysV-ZYoM1`-i7{Jt*Wg4$Q zl9!p3?PGvA=7W>Y?az4fzJD5F?|FkI*4dyNI{u1=L`b^X`H#(bs?X8G%|y@2@|`D zuZ|g|{yz)JxnR_CkI{gp z)xyi0`LA&S*6*^u5T7W%(>EW?DZ?hynl42hluFYysa}OnKV8W7iDu)0)*Y90MmTdX z{&LYtJS{p6PWs8>mW*Ve-c$O0?a>^0HKP~z)Hq{O!$}s#D?z?rthUfpgD>S+0(8at zr5z{r`%#BnF{+Iz#SWjYB{?W9WYe0{^5J-67dl4If!FB>6f?GK1RJMTc#B_23j=Dr z$U3XTrsW$~(KkX?>PW3cJvafJu}(nps1ScUKgPvA@mQ;{Mh=Yfkp;O03+ecRL>Y^q zb*R;GXwt$Q`8*_=c2w0A`cH&bxk~F#HbcdLFqB?VJ4X(P>O$5HqPqYLDrJge0 zGd-*UM?=W9y_p>L4zQ~~kIE1&04jtH_htCPvy;6WS~pbF#DdbM=fheP?X|Zyg_qvS z61KRzrH<;(WxKFGEJWG7%XxrnCf1g|9SgLFns0T?Jj8NpW26sb^inlFDj~l!2c#Y6 zEgziFw~zS(^T+DNl;;V^f5;9^!8mJSPR|HKH-%-kM?-hl;N) z>+45G-xd1$-ry8-+eN4W_kmihH%XF3(O#7Ix!%(6yiD{@;?8yxc^5m~5qGQmoBqcM zIe46uR^rCE-YmY}Yew3i$@tA28I(~V+>r}EKCf1Z+_xT7&{!MSMS5bIe|MthrscBzyYvi=65SJ><+ zz>M`c73-2L13k-!)RA`ZGx%kr#TZ2QL*=c)0Mw+0a|NTi9zgl1ZT?99SI%7jnQ)Hb zE24wFI~028B`IO@sJSSMvtGD+Xm6y!s!cIP8oztjiBDmFy^b5MRY@!<@lF5Q7z9oe z)LvO>Y}gMzXc6=^xBw0gU=;NQvcphd)3m)gu^dmq? z$dep1PFikrbHL^x$GcO2>mNSq=Fxx+5iP{;k}VoBmuVdtw6TH=0LgkOk>v-1SAy{U zS7ra{<*}OT1k72{puM5WhiUCN&HTrj)OGCLgpknnMWrC4LDdN5Wri=$`ZcS*Zr{!D zpBik0x^~u^zH+4h#}3cv!SY6u;&U-#yQsuHDc?v?+ka;`{L_^CGM-(eq=NWL|c zk z0Fedj^!B~+>S}W>)?8EVQHn4wH?tayo597s*pbNi?RM(AmGjl94ci{BjK8ayg(5ne z!VvMH9LwwAmz)Bn8SYkbX!nY8o>GGFqO8PCsl{(e6+2KEkFa6RB&cRXwW@>Mc~WK= zl+{4NMWdF@a(Us1z3gpYPhA<-c5wHt=UH+Sv=H-JeMzZ!4>s$wyv<94mNdM0f zD3=~v^GBR+R8lTQW_fq*-(P=D8TRWlweN`YDedTRHye3G{qhNct$@n+A%pkb=?)+= zqu@`ip~I$%p!r8V)~q#2q#V~JwG!chX6&u6x6&@sWqLEAFlVtZZr^uSs~s}-J|bM+ zwjc?ApK2Ye7P3c>%CCovgtYJvb|>8ID&z|;G5y!6SoGsQ_4eF>WspdFCeMkYuimOs zdOoiMrLzHhhVjM*d@0UfB$`~AO8HM{?4+;4vt^*Vf$+jt;lGdMKK-R{H)S+n|3UEs zvyk86^?9RNFCXBNqNxL>eJl8&6*@esxT{-@xQ{=AMjEpXHX8`e$%2Q84ZI(x$x{H1 z^zt!1i=1I-k%qkgjh-{68htc&kHFG!?F`m3%;Ir*4v-f8mSp=A&j6OXUo5U<8p~xK z2gPs2ZkUtRgQ(@L3^8VCMyf-85KB@o6JiB+6zJsqwgW3Ovk%#Wad`L+dWjXHRUllm*ukCDKYDceT0V&#V}7ldi;y^{a7kh|5E6t$p&3l zmo%;B6$k@4w@AY5grS);emH~Jt-8_p5GHuzyyZ**0VE)^->-z=j|HQ(6GdvGy82MQ z8a-{LSjFNd-!OSAjkjystQHKCre|pA4X{OxaiE8Crp;ydO^$8 z`sdlZsH~n1ALg!iS3?8j-_zb!enI{zxcpWfO-gdB{#a>v3|N)zTma%Y*nkew)yE^k zm6Z?2P)h`=?f(ncUJ4I?WXXkS(Y;=Dbq~DD1)^FhGmvs-Z!mM7AK$&0xsYm=Zt5qe z95GbLnLO~+qAz%NZf@VhqIdp+WPO!0s<2B3|J= zuq_fIHEoX5No8Ig%p#awG%XSnF`UD)f8w5bma4FxyZW&4Y9qq@YX-1(g{_qd#)9wP z2}T&SVv8qPcP>T@Zsa$abu9ap6LB5;y~1l(O{$X#pSOc!5&V>t@(eYc|jjn@Q?&qSWBd(`Jbi8H=4}wUY|S^~!0vy8?s4WF_fgAfmm~35ION~?QeYf2 z&aca(SEfUo;UY1IOuYImky^FHkdg*r-v@vZP{^dbdw_r4eImG@)e+!wpMQpKKs{4G>l?9=POz*P)b6a zfv^~2uODEeD^l}L!8kMrxyySd8u3b}&(gZQ?smonK>S3-x;ZBgSC7cyb<7<*VZYn* zO4&0H$NJL>{bE=xk#|7fS*Smb7n>`{-&opiS%HlBw^?6c z6}CA=+OVLmdu<@Nh8V96btz=+StRPE{YZch&_0tp(Wbl4i9Xx#*7)q&XY1^57uRA;cBt~agHAxVno`J;gzDSDEzouI7K(ls{?%5}%14g!A}LpPTkbkT z;-p=pgT_Lm}sB z1EUtcWl|+|rob^B%?RZKJj^eP@TT$p&T4?o3m|INv+7J)%^QV`jQwOz{3fuvE?1So zVCKy)YGx5-OLgJ+QZB!2j!RioPS?PglYd@I_15Ahi@gT0X?1^1G6U6lz4^*QU2L!? z_4!MBxX`Qw{0}x-GnW)^RAB&qIWu~69xd<}Cu{o}6*)kUiz`RN^i+$W=U$FJOzyE9=z%x#k^ zr3qQ&-ggVhpeglqk@y#8_%9EcFBlOmq37oAtIUjX7{%M+tm z=oZ0)7V|})%MmC?mDdg|7xM$)zsT%9u+-Lu247UqZYFF$aI*PqCS`oUidEDPmOzhL z@$o1jA;cZA$eFa`$2H%4Y#3BlZp&eOQQ;hQlMf=Ud2?$JR~s!7l?E6c88-Lev|9l&<&D)E|NwsOFK&ZrD;0%+@Tm$8yb*&poqfLKLtRKQ{@xTd#E?)l=bpsDUUh3oe%t1fP z5<9P1Fb23~7MP2x6gmpFmXBwtz4fP(L%(lF&Q{gaM#RYBlfP+5sR3H`C~4!@>{GO_ z|HBIalapqIJIuw=8{E3wTfv|?;iLRdBC%kEvO-ZV*q$ZMM{S(Mb009BIneBlGkci^ zJ{t*ef7}3Tjf{vXIccZjSpM&`@WugQocq%ejTxYe_m57 zLgShgc8j_Se_ z5ymD0+I7eT)Qn`)oc^w6)FJ*Ja9?TH&mpRZnF#@6pKpdukPsEUFS|I{0|KkHcVX^= zyu0sLQfm!1J71DxBzmK?MihzNvCpx*c_;aPW_LN2W8ED;#!~o;c8}+ye0Q(my^DD6 z*%y`Dvjlgu)0qApA*@=j+i!)m>%t-Tg>FH0rI9rB(z2u1qbXuor+p;4(u;XBh3_AG z)|sBlXuoQ$4P0)n!eSe1rG4xGyPQNj_Lyje7q_`F5owA2QJo|7DZ7h)+b>cBkdo|@ ze%x}zo*$qO&BKaz6f@iXcOLX@bU$bB^bUe2^)i#%v}zwBALI@43xzJmtvz{xSO_|24y%_WF? zb!hpFhDB;U<@SMV&tNPfsc@8us;N5krW7b!ZVB*ys0UNDnsT=?+uX@5+}Ly#?J5s0 z+Gf@`Oaipk00-P5a6{8)_|Va?aqMom^NXOA1Hu6Pxc#Q4WNLt`m`u+v;0fu(Cmxa- z=#UJOl*~5dUtPi6bt*uMATJNo)Clv>6e7W#QzJSWYi6@J;vo(MNUO=TTk36TA*fgp zZ9u1oC>A(f`<2T*S2p( ztG^ZcOtTZwin$g*@N%0NB~s6){m?GQ9;#oWIg|Fa?C4~NGxkr!zrFZ%;!^$fdJ^~0 ztG<}-!sxz{I;=)5lC;&mAcVwj=2sZ{3P?vZd5a5tnas)0T4*IHV8{Tp2JgPZB=#Cf z-t$rdYNZUOJAUn%*1>!Mf~7fF_j`pR!pb=f?NrK3&7r3ce!(XilNLW0B!ttWB~^;u zImDHr{A0tB)oZRA-FJRfxq;O15sv5pNs_8>a`>o{Q6j?T7Y4gayejBe zh`T_Or<-0A;#ZBWx^`2kW$|DjRhdbNj3v6iQzZn9(S#z)8c=3`|9?$?=Hqb;$bz>X?)Z+3u@3Wt*{J`Bo+JKg}UH`fM)FrZS{1 z(~Chplw>w4ZU*_9JFoVXKjldE9cR_?a-6@63^d?CLp)?{AVWh@i=HjKXskA8!`wSc z$TdR>QzLNSqZg=d{5b7_FPK0cn~MO4b8$($Jw|K&oS7TTyR_;^IyPVxB0x&tj0kv` zq8?+J&3~lN8;y|}$CfCvBUAA@%)`uj2d~zW_*0@aUdx(3RS=ayrQqeFmhFNVI(Y0M zHu~;XJO51J{GLV%2G~Z0pFsjbqGqMxzoDcG0;~(?ABMhC0{wB%!Z2yk(llOKQlKSN z2@P*kY3tKxWTCX1I`h%RQ-o=!$D1ZB=Y9sR6COC&J6j-FT1?|`d~%{#AxjOGuYWcZNb)E0X2P)Y?oL@yYP< zB;ae3B+(ElD0EoS#haae+ZhmHgCc`IOZB}d%;cV3M#`&X4*|$gxJ}P@n6uZS02$l@ znhD>`$D%JPdX2y}ov?KI@#_b7cWe1!WnD?&GG_f$HJN(}T922#AKmy)0tY?+8^0@p{F&uOs>-(^$rS1+1V#XyM1o78SW6^G-$;5yBU-D zT}~m<$Cg!^1-Z}_J0V;l?8yRHfZ#;q_c6Fe-`(LG*LWoNqtUt2yR(P=5lg-PDT$o= zoM8;@Y9wL1`Jl{vuTynDdS~~{Yem7A2(6KWfKd#Egmc5&i8gAYsVY)F>P;_Ps__DB-<8T`VwH2E2LMMUoNs&B3_yv2?q`t?%mT$O0UAJ3 zGj0TP&}(of9y0wA6D84Z@iFZwu14&A-nIg9^?|nf^(+8aL$ARZeOb;mSHl~F2*>F1PN7+{O)-e9c96nD zYNy-xDmdvJV8TZ6Y~OwH2I^GoahkuEXOJSowxk!?44~>lc1_&A()!u6BxO8!;Xt-V zo)*R~RHonQ2$KfHsX&JJW$Qu&8oSiFwW1`pKLWVNUq#wDK*JHd9HRa?_$%t*dtV7^ zfDVw=uU^T6C4+|$)g+R|I8HM05#V($`GzTFD>arX>NClq1H~pn%xmh(CoI%Dhj9!@ zJXXj%mHuuJWvHGeA=cNFd;KX#SLFD0jDQ4-=g-60jq|a7B1FEeK#~P)^1UX{6^8Nw z0_RZfSleGQIr2UtB(WGunVM35y?9}(-yBPS*RPmw^DoxyKeqa9h)W+A8C5(^rW#jB z4j9<}$kT;>J96o-lx?$@%>vl7?5Qw2$#d>l&*z<~aiRZa&#LuQFLB5N{}v&Mr*p<% zv1gsIp4hXRENkv_A=C4+*Ypb5Uq@AkFCQ0D;N;^|07VtUuUcC9y*}R(PXz;~%SFtg zZXh{T{_@_}tL+%+D)_G-6MK_pfnYw2sRV#|r6Qg*hl~|0`XrxwdC3}lDrfxEMMR5I z`}N=5a`N6F%$DhkOGqG&vJgeSTLbvZ{S_4iAu{hle$PJrI$o;b&Q}AxM1#3{a1*NG zAM_xbQSVe~@*3*$uU<5I3J_{el6Kyvu}|`anjZMPFqLL5u&}qk;Lzi(pr2pp43&NV z3x#KZoc`q6FSf`vF)i>Za=QG+%KmOT6`zRJI}KHuF2DxY-bG$Z3J|OtDdc^HZumMm^n=W;twrv#&qe2U{xzAwf<8CzW4-HyUSsOV!QQj*nt*KB0x5H)V`yG{L5udYjPQ4Zf z^m(?%4o;Kb-X4QarzfCQ`u0W9*DV+{K&ROeWU*8C0(x2434)i>m{zml%GBu1Dk=n- z;DN)Q1qsH%Iy^8b`|@(&dq%=vib^IiWk3jUz~`Dw9Omd+t4|g5qYcn&-5`DJgTK0(9WtNMD%^H)IIYl@JXeXZ zpw3h{lit7UQpmo5Jtjs`xf!HWsK?Q);VsrSO}rJN8zpW8GK(}()#+ULEnnP(VH){| zv{gM3lVBF#Ing9XoffnGE!M7AnQ-UB1#~=Ho+N(wBs$na5mqEdv66j>QM?&qd%w&y zDt2tx%wn?8Tb(@KNWP%XfCKWg6L=-+2dT?W)MaMO0ZGaFSo6~K*}Z4_aJxXqi*ngc z+#cM{0{K0bAGYdqEJ+zV6_sfMU6!^?A44@Gx*8+HJX!fMxBz#>!;J7D)R3qpDgvEp z^n*Cg1ilB?pRCQ)ah;pqIOCmJ8esIkynJN5?(8;%{xGg>xdA!xC#UAmlu)lMHVeSd zz$Z-m8v!kuougRu8En~=L1=tXhoF?k5I#d|j*^-;^>&D$qp5rHg>8Hez2nJJNH-<= zVfEbm_G-ZeC#DE@g_p$`>VtKAdRQAjxz$dT_R&ocVVjP^eF=RxoCTHZ--Nv__X9~c zSV365Mhgy<6)P=p`(lSqV`OGJY+Ra+B@W0Pe5pw zav@!LP8J`wo6mQ(RH+1t-z5!mOLUt>h49lp2vmUd!ph0Vo6&FZ&8ewRQ)V8Dr#`+$ zfG=IlR<6MW9l--pWj*Jz)IR!6ArmyrMk;393*`$1XrQHmA+4`xFM2f`CGBPrbmqy~ zyqbwu3S*`)i{GhF4&G_By`>lz_PS5ayOOMs&3et>de^bgD=^Vi4-ON40iFL|GS8k} z5k|4ne)nzFVh!cg;}s@)n>qY@a&rsr08 zVF!OmiEr_Iq2i~j2EoImJT-(cjIt8-It3c6T?m8boY|*-=UX0Z3^~jqGax0f=O~&kg)@j(f#v)9IdHyH5Lu zphT6$!AMU#6VotGH8UJ6Vh#g+5#HP9o>i`%d+DxYyfF$zfe8RG|6nMq$YV_7dmQTg8+TzKrHDH}^P*NtxWlBZwQ?M}GcGt~INh2^c4tT9s}12#!}c!z!w_^zLt_9J8?^%X15 zm1MhDlp8-w=Mc*qZqfkqTm?Ryu?zxn!H*Q>CNHk&aJ_-{6Wzw4@*V(ZIyCd1v=)MC zvXCVHCtPE{%18O(-@Cem8`V{&2MRKaV8{Bi+D}uQK~=hNx^Cyrk9jdRgE`r8*U4`O z@e$z5H02+%D;dn~+JiAmmnU)+m9-%u#Zlu{+d{Yc;p&#%?>rTJy+>j|Hy%-0E#`7kmsypwJLdyjz187JP4(Nh?ccs3v%DcBiD=QbnW!>; z=J=y76r`J%j0g+ftxvAJC5qI0E%E8=TYb$xw6W~i*BQ-0I<}1!yFoe*&my%IU0k91W=paOe`|!I|>Bw`Zq(HAaz3yY55Cqm?gyv@}0n?$6Gq`&0FG z&$X`T4fpD6SOZ_KIkWWrhL5yVVmmfrpl2Od|Jm764tfp~xzoHC*4?q-@4*+i$}~`# z$o-F28xIFx;|*(*7HlJ?ATv|LFzPyV)XRq?l3cSRDq0QlcU(s&W%WwRI5Vw21?X&c zH#dA=nRDR7Axy$tR$d}W5j+)lso^hJ5Iv}<0!3K%_oco+44AmgpglJRg*hDO-EdHj zyQEZd|G8^K7M$w2@c059oReus^8`-junOAv!IN**+?gG7yhC$sUfC9D$tEk~7xo&E z{Y#=p$E*;($XlcVlp}S$^zk_F+{2#`a*&lG{=`Bt?&GC(+}JEJ{+birn|hLf@rxOR zo{PB+e0MG@Awv_a-JX;0NRBNq7g1XenEo5)EbzaNvk#Yym`Au5rze;4Lf|7-gsdR4 zdU1C(z?fXPo>3CqkV2cIXlx+t7?U~m93)q^A0G8?otJSg_>uwcOfQ4B&Y>!QJLv6o zas7>&sv0z-P7^^KSVpYB|4%-gna-D{!@XV9Llkm_6EWGX#m0?u;6QMpoi*mEg z>t&NvG8)uHa=QEOy5Rjx;j~hmGclQAa7}gWy}vwDtdOrkKR6$`RMK4zoSytg$4+fg zld?AH-YTNR}KM|$VNv`yErEm+c^yzNs%Qx_v+;O*OdIXEDl-t%+P^r43 z5jA7^*PjT<{IX|cDtaTECBm|KYmRe!^P1`VoAvf+alnn3ak*29+y=OCmb}OMc=cW}>Lj3QNruzsw7jm)3?;lgfhCW6b!YeUEp*8HB$% zj&>$?%z7^SOcq(wU2k3?>&%L=dgz)#xzDU*vaTD{zf36WfO z2-r?Y#}vPo7yf%UX!SUl;j!O*&2@b68oyp?)&_7Q^5QQGnr(ZRe5;AZVQs)MFO`jt zIcCzFg$6Ys%OYqbW?SWhlYE*)!D=WdxsbhV{4_1>_=}s1xZT%qmv^~Y)P5`eyt-?8 znFkDKO~fb*8l=qW@YA1sAiTo_i`8P6z?A)!2Un#-ZCu8aBUk^ z1SLgU1Vn~z5u_W2M!LHjB&0(+hL)B_q!~dP29$0XkZzK}=*kmF8-RESNg72n{N@3wjM$FZQ3Ra0lJ3y^3AE=u!0Uh}OP&o$sL2m?qYD zj}Nd}0T}%yr{sVJW!QPskn?ZRF=N`tDim0Wtgc%@?*xwgrqnRYwgt_EAPu}r8hDT6 z*KggxMp4i0WUDm@!Kn_4pvw5URAuwsofipaF=FgrjI|PnB>B1W&_W{E6XS2|1JZ8~b(Z(otOrA}at-&mW6AER-3$pT*k(_k>*XMiLuGQ@y0vc0kLJ#Y0`6hU*c$ zNa_pLce{n4VTpV130%B-n%Nd7OIXnwBb9XTUlSm*PlDgdOIOLrcD5szBR*O#;PO5C zP1|}sU3J##P0)u0Bc7G@U2F1OH0(y}gpa~2ZcMCEIxU-TY#TN@_xvZ;#c`Z(x&%gE zNveZ$Nv{^%V4%WS~p~$MceKd&D<9KP%5xC+V?V;#C+mY*(VKQ^m>BT zEm*A{YOs>L;qKrD z4ANS;fH}6wji5kDzx_+UaRFY=?9i-*G561r!Y=7-c(99g)J(uH)T zAnB(HQFea(xmy-g@2Urt?_;B%i#jk=8z9B&_;f6iC&f40475=qG zbztowO%4xfvj$uHoFdktp;MMc6wHs8?*?|!{RN9fqIt|)>sEi-CwQT_niDvJI*YFRU9S@B#C%9f`lDo)?F zt}}{t1dCEoOLVjpP;P-2EmbZUI0iE11-nitJR<$-xHga3Mo$(0r$yO}s`l+)?& z*KSitwQHVaP8F@0Upy?=B&HHd7XR(40u!-9^8+Gt7&}RPlxKbFwC#Y>rL`y7Sf0Le zO7r|3x90RxDV^b`Eq%`!_ayhh5Qm{M#)Z@z@eGoL&BHd#9;^%}_xYzmBfpIYq5HAK zXfFM4oQbxyj9`$ThMTYJ@|nGxiAVW!4pPm;l;D=lJl;g?ADxr+KYagm<^k-L5A}TL zxR9`sAu8E1T=4sm><(f!+G?sO)U_YiR1JRA&l^uHI}vC@A_ONyfvui>dorm9r#uB| z@zF3-SLqqU!aG$9*IX|$tq|3k8=rb3<7+7q$?==TLM0t+N+anq`_J!aIUJ=rXttrP za*D=SKI7TwZ4GGfde|8e2g4`?eQ$AGBrfV@Dxdseq}j}j{|?)wen5~H!WXK=RL{cL zc^ZQd{QQTKf(N}$Knz)d5}Q3+4|0B;eBZRo!uwo<>buaW-qL5*8TI?b<0pR>Pvp(F z<=5^xu(DWD_F+oE$h>+TV4;4lR^zD@$;BRvm$1!z*Yb2`zk({81mwW$PSKG{*#jk< zF+#tPO)v%Ua%1#*1#n|#vxd}e|16fb?zm~=DWZ;4DXRQXJmI=c-8Aq{KH;6;rU@|f ziQ&6Q`;Q2of7>@QE2z;#TSaaq5tGR^XpDh$Rj5d!a2ce558a~aMdz8Yw)YRC*px>Y z$V#s_1*Qr1))K0zg^eZNgaDeSA3Yw}^2D2gQG$ae6e59Zk<2cGP+h6+ds6IK2H!io zKhdF~3%uh1Q$V@g9!0J7hENn_^j0HLy=hBtQSgg*QwE9VY<_1IF^)pyy46sZ*J(aL ze#Pe3DD4$T6<-4qS0t*~3>9+xeG+};6IdscuBS?+vL@+}VMG!r<68t1`I5OktZ8g> zD?rI~Vx_DqTT@c>;GGlGDA7A3=~l76hh@ge!`Q8ELyYhYu*Fi7C_8f0bXO(Q8=;Sh zm@;bZ_GpeZQ^~64eQ`-xhH|j#dEMkG>f$R9g)Z_%0T27@JqV-3f4b^aHH`!x=9nRm zw=cv{4U+-Zv{hDf7^79Cu?_6RI{soj2yMYN;?P69L)m#qxCUqsy=v#rh-B-ou`v)` zSzlk}d>5+oK9frnW(aCdPsfmyyuwA$C23KqoahV0or&Fvr;7*xj2I=1>)BJC@qiR) zqeO|3YtA$=9P#97Mc5?G0HpXgh|!`A9B9b0%rB+Vbw+Ep$>P`3UFQ^cXn>s-@Z7@2 z-$3!S+|yC>W%Wx<;;*V3^|>5GvL3<&>-}S&-n$U{m z#$oFh_Y>3J9|}i|Qpi84YzR!UfJhRz1k56R)3>v~T`TYsO{vrxSyM?3D@@-VS3K!I z20{()$1j+^A}`IjF%Icp3U}0f7CYLVMQ>Fy%so3dyX=B7Ec#vIa{4_oM|fS7m-hC( zoQv%{v0z~GV~@f&;#GTWUR&3>D$m1lYyUtySF18HO$7MP4p=VUtxx|41YSEjje2fQbsq`j0%;<3!DMYH zoI^~pDa^cL?t}xCeYtbXfw2svrI#s*lRXS%KiaIr;(fpJ(EAr-6kde}JR=lvDLcH# zy#EffbRL}bfB3!L=oj-5eW56O{wtEAh$T8Y8)tV*JyJi-G829y(^Zd4msj4yd=ZxG zCnj{V^Pp{lttp?SA4kdbM}Ye6P*t5nhTz>v^>O3kk zVSgacZa+WYW{2?et75_^@9USz+pH)cZ=Z`MHf;dj@~g8)J7=jUoF}_D-*VOcImcHj z<tQ$vV%q!8`Ry&fpg{pB#fv?L&|8(SEeL6!9h-ka$MvDqZXE!%|iwJLRbJ8>3 zbv2(B9r>TSz#Mq*5wtHHg`+hIS^e)}(toIOEZfv%c(453cQSWEuce>;C?q+skbo(= z*M)cCQf=U>b%FTBdF<3Ct`MEsnw_Rl>-*-oW0czlxymh#mXAo!sw(7;0roln6F)Cq zG{91pTV(xxL2r_(KR_Os92hM)JO#BcEf2>0lpSOB8$*8PIfM_H5&0^KeDn#`*RIZc zG&rOI9|TjcQjanR`P*lWPNhuu(`#m9*qb=b60DHaze3j4FWQ35Ptp!H+hJO->d+D0 z!#dtd_QpkTv}%jBAN1T(f-W@(V-@T|%SkIHq?s(}+$tm7Couc2XKU|8xZ(;!N6OD1 z=RliWxA!VGIX?SK-^^CBG)*vm6^@&ZP|Th3ONrQQkzWch7Mn^LDSxVaYWs(}g*$zW z=H7n46X$+H4svOZSgzEI(|m+~Nn7VktijA;_M2kvw5scJzrpu{@9Dd9QO9s>4Eu`X zaDa;zWM(7fK>@h@7?M=WzowEztLfvF1O3W*0e`po5); z(r4)U(2%Jn@M-rA?pZC?V%lB@zCv4xf;V2Tn?2sn)MZff{J#4>#miR_L-=t09ny76 z^_BOKP(;u>_i4eFglH7z6G@$_`;7u+d&(J9u&agYQx2r-UFAU9iq})MCUwbOx#1j& zDW3x>@)pE{EIY#PV)u1Xwrc*!t^J-yFY_!yZ(MSO1BHW7TVgpcR#&q&t8?<({_KD_ zL$3(O@8<<(XX4N2@>*DmuBBz-gBhb5Wkh(GOJWzHNt{VgrChDpiJM>az#Y~|x&F_M z)n8JHQ;D4tao=tPAOHF}4_UGt;W zg3H*;xcy>QSPvb#t>)=<_jm1@>*{&hZl{q;;y3Nhmg$%FWNOe#1ml;k6oNW=41Z38 z?_T60eUR0~`QJWThCSl8>NK)oULps3VMnr?1GE8aq=ph!~b<0>`ag z^FIduyjs5duEonykToepdvYLr#hd-KJGv+`1)DiG&h8B`_wpyCB{h^e2CwA$g4ZDMVYgpa(BP)}_e z3ktt8^`Eq0O>wLcR+Z}FQ$R{VdQPp!50)qqVyUKAuM49-3x=OalP~#iI=dCN^znPi zk*>QI-cw);dB6PAW-j69jL^z8u3Z9zkkgzo9~<)VisX-1=G9XHM$%wd2m5V(|G_P9 z`HK^h!SacT=@YZWNeMt?u6S$F5D-qR#l;>)3ehdzT!ylaLh3?WB0~HunLoJQEN`xO zQoO(kFX#SNtqEIM-TpmQbGgh~&2RBBG_-`6V2HgN9Z|I~5{ByS%x1R1GQ>^VOaU!t5L=YHo*4$4PJhLZTNZL^InF*QBrZuoS2);;u6`g0LAniTL zV8h0`cs+h9m@&FqFCu*Rj&Fk`#4%uR{8~XZmSALs3Nepwql|T&S*KRLR+M)$-L0c; zjD47v9oOu8V7x5O&TCcQG3-3F&M6e)Uj+NYQBzcpVsUwCNnC}#O2jJY`lX&r8@@`L zkUi#<{T`|KiS^T4maY8<&WgYmPkx@tlkxxy(Ve*J@^4W3Kb)^#O8q&t+Q6n>!Bu(| zZpTfuu2cklDppQ{3GI!dN3^KkRN7OQA2|E!HH!@epPX3Lf|pNU?}1bug*~{ z>S+PZ9NOdXJi%J8iDmEV%*z6$ToSmWJdQ4w06sgcsBSVUq>TKMX%$EAu13n=KEyCL+OU@nlCoz!C_W8UY~fTepQBvZEw}}@bo7#yOkJy9rls|t zJN+G($&d|;$a0)pXLW*)AtPKQ_geB>^DQso{n(j$>w9h(3hde5lR*&{XiQU~OGj8f z{8~SN#llmizEV_V?XciTu%xM|G_^Ofh^DX(6x(%bDdpT`2>xZ!hL+Wp_spS~!=Ad+ z^JVs!+yQ}ut$-K39MSt9-nuP`@#ZqQBp)tjJzfYnK2eeMQoHnK6nf$j8AbS zG)g7DxQDt@7lbwny1$S;|D##xt)}Qr8zyhfdhtVY{3u!47ddWw%|VXq9rmcgE-wwX zUli?RJOIog?E3Lo(zat%)OHIb>c_l&5d2)22g97^iz3-s%Z4gl`i$gy{t75C-t)zM zl4mGw&nBDC*yHld)UhN916w5->C^Ol*-kUyrcs4t0=?rsnr&mk>uQXj`58n8S8ulwSw~EEV8$Q5J{yW7 z?I5PTrbn2E_I^U&MJ965NZ5!He)<+mGr%`?Yq-$#r^;`R>;~e?V-K+e`x;MXZ2B9K zTeeZcEi<1K{X+hXm<1_Ff^hE=dFZxI!0|b!y?~!y)RhlijWRnv!N~R)qK;g^o0#JB zB3<{UTM4CR-vRWl@L}hrHQg~bgjJ-@S#yV@Q_o8n^9rWxOmAKUcJ$b~a!Dgb{hAlR z9RBOm2&u}_FC44&IURo+jmI#u;!3?joN*sb@M#6jdGG2t^c^$+>*1?Tk}g7_F2Ovmkl-X@1V>r-^<(!-AaduQoc{=Jk+g z_sDo&-*x|QgL3j9JAx<|W}0g?nrf%}7|`e^1o4zPwglCWI3th8hN;0{UO_Vp9)OdH z1SQ@45`a$fW>!Ca{5Nr=EeJ!l34C?)d}h*s0-(H&S`IP4Dt8`5W-(cFY;kP9%A$WJ z|74ObeN>ikLN2IiE{-YCpEYvwpr%2Tle8Zy&`$tLyg3VHve7-&9@-zHISE&m)KwV{ z(Zr6ZVm`lZ`=%gmdYXA?4u4YfEFT%6_zZ6wH=reU_>|39Pv!5CicJeiBU*{xy|?mf?MK`PZ`u!+d~RO@f{#Cg?>b5- zhl=~m9kZ$@A>^&3%a<<3B@OoIbS{!#UqjzuJt;;tOSrhS;6;uIgvcWvqtZs+^t(AU zA`BRNPET&$z1jWgioX@^D9IKj6JT8}mn#ym%c!d~8~Mg9tAb)uxdddG102Qos)QH! zDufrBG%X`k#|E;i3#b-%3o+p)2}h1vPpGic`k%(pkq% zexF_I;%$-Ww*)%Vv;V%ZEgY?qd8wlRwJX)hbhU&z!XKJZp#NshG=Q0?C${Y`)^b@|?{;rg^fpJQai^ZTi138nu!<-Sa)*{N>&pk{ndFY!Uqa-_R z8!E>oWg*;}cZg$1iH`0i8cymG_5{?cMOD)^=iuBw=dj9uosT8k6~8ThbSbOTKOqcf z`titFDu;tVCzr!I=;F-!`{N#%quEUNp>m$>%SNRoNhHCx;aT95OoI_@^`}0+R^D~9#8JFKY zCUg?Qb9##hsY&|Ml5YH5$j5t9&7l6ifM>y;&zXP|fPLpGxmutz=$9@ZBAcJ$`UF@A zXWH)d7}_5wu&OT&%Xu}D;`j8yv>@a%qz%nCCod2wZzt`o^YQ}cQRdM=T~7d z?#+}jnD-fr0padvQjSEMK5_}$67Bgb##W4T}nj5;CgeZQ*y3dyt;mu$-KRgR=p1$6%lm*syM6oN7`(P4lVdrpt>~CI@ z)QqUz%3R<*ZeT!3L3HB#3S$w_i@B_X$0K7D9}*5VV89~9B0_xVYZPjBML2edrW&73 zan5SX*ep)_N21|8tVxq3Po1L*{wXX}=p&Um03XJzBjmfXqXCw|W!>(6IBvt;$=mj& zR*3`b$@ewY5w05I*=_esL6ucCy|SPde22wDFNPf_e$>=K_vwl1rbQo@<-G6HqSbdn zvlj3c(N)&b2v_=5ua7`MQ5X1jO}97aOmwPMCpCo3Ar|gBP4Z29`p^v3dp-VgaU_z% ziqT>8c262~4agslpI~e%%9zI4sb)h}2v-bP4#xdjyESvtB!Iz9{H@E;28ch$1e9^KN4-1k_7nG|ai4!cw4>2epo8Szh=P#BVqZqG3 z;omvZ_eiSTBvXp6^jDB}7Ahll?9_z9iN59s$pYq>x7{3swf=ZuXwE;O(-i+gWcbK( z)qYyEcUzqbZAFXII)ET`RN4u zkA?A~Bf#JDz+Mid+*DKYz&IX{FtGl>T1hZ~DAMAQU4i(DJ%){DwJFtQa#VQ9=BvY?pa@Xj`H zaWwN!4-!iDnTfVUcaF6>u~U1Al=ZTQ8#N&nU( zbRUWsVh4j2LylBu`eoI}7=KJ&Nv@M2Z;SAV&xj@TQpKdmu!8**8PQ?^f!_Ie=_UfY z-|t`LNZK*C>+P0sxv3*iAb`W z5U>!`gN{FMZPWu-1`@>oo{=#FqV+XL!b!zmm7sZ3XZ!%Ci6-I$_)_-x>rTH;uF^!u zNS4*#(om|Phd{+xEq2M_0{%BtmfFGzU}$}NQqho55pLkOz2#UYreOs>^eM&3wleF_ zodn2vkH5vUufX^bQBmT6Bj;#E=+5V!4t$;{+YJ$oQA4Af1fCYNEpf zqwD^Pm2u9%PI}{vvS&b9iA%Pl0ocM*i@#H5|6_*`N-git)uUr!b(Gz<{q|$PLNMBU zHVR1o_&R&>dO-Do2`6%#2lydRz$T~TGs7M`uwLA9nGhPjCu0&Pv~;NKWMDP%nv!=*--|Vp!|Ou<^W5Q6z4Gy5OxnN zW*Y5J7KOkFf50Rm2AEs!#^C^=X93s^e;i)_NW0IlvkDY`8z+2`_19y}gg=>1vG$3MOx;6u8tC5`UD>p9(WUHa>jkx&-+ri6Uu z^Ncva&u8~TX5};RKuZrT|Nr|q-W?rC=#++j!zp?2=VZw`g~>2n89Lh7OEi<_5sI41 zwP+VK3K6^H`g;k_?A91W%Et4B$~~*A7`0%2OUpZA|k>XBQf?O5@f=igxc$vFW{4`?ERZA%BhO)$do!j31N2@ zRaG)ImJ}^l&jfBLb0zwG-;boz!gmz+qExFzDPNuuufUxW7Td{FwuKFfy59)8@I4b4 z9`#k8A+^A=czga$lG&4f#>)2!|LF!c;^KRGJWub0-sltt+sxKflXkdgg%+k`NKG-O$6M0QRB;UL zV+gIV3;4xnXOpx|xXW8>`1*nffVG_v=<>l9SO{e*N00xu!2dv1eH{3UZggbc`z4*t z_;L?jH^k|mauT6`ar8*b$9GVee_B%?34ws7XmcrA?JNc9?b8oZKC1v3m~x|xl?<_* z>R0MbZ7L9S-gRcMuNZW;mt)q}OS93q2pohX3+nsdfPN#}hg}qEFlkpwBv5X*{5Gow zH~IZbRk201KB(l(K+La(?c(w!&lRQ@D`Mx{me({Cgx&DeUZhZ^3$``cyUd`+cn7a? z*7a`8U)QqQAWFGzdo9x9D+FW;0-SA5=YQX&ZG0eY<6(o;(Vld#kFx7d#)|?X0M`>; zG8u?TYb}@mkWD%7avWtw(@KwL%{_DhXwrV`C~WED?UxcER~L9RbMLLHaOv)vaEZ#+ zyNt@_e*RLRwWF{3`47vD(*%ra2mi$Tv4Fg}NB_s}muy?}0~D4Ubh<@0|L7CZCufVU zz%@hc)(!K8dTo)iFQ%=k#Qd#%YIb#BoMjMdQkEt;j5pGBD>@Y~=KLZ9=#;b#TzIAB z0PeD{^6QlPil_{<>b|eUnN4`4)?z&~l%MH7jQ`eXYi+z847Qr~VMWJ*tV!C?Uwck` zb~<^?JBm`MCc+gtb?mD=h6k*K?dh-Q8Nk82ul3*}=dY`mGpg#Jko#rZwDhlCGwJ)$ zF9(tsMwO@%+>cwCFKc52nx@!nxvOypcuDKn-qCaYR?rf-Wc0cnlb+my*6h{|C!f@GEn&Il>`^^$;hXp z0DKS$E~8VP%mE4v_@xfVr(PfYK#PWI$qiq*h^}6Sqak@wu7kRCa_@t_sm+uC<=^7b z@Pfr(BO_q|^x(uf19A6IG??V4<2jXHMry$Ths-Vb5>cR`WG)Yt&GnW!u6D-fQnC89 z8Umh|VXqR+of2B>M4^spcw~JBxp5^}K&9yQ9YFC@Ftgjca{@s8|IW6r4!Esf?FherzqTsMYbhU> z*1uE6!*M|xB}zHvO6w75XcVBGJ0(rKzGoIFydG`gvWtO*=yQ;45c(gWqE`=r;_);` zK>N_|5p1XN$InUp>>@`A^l835ht~(qC)z%H{O(yWh}e)^zIiK)ZpAio)8G87*;fxW z@B&f#f)(Y*tPlK%ay4vMcf-!Bxshi;DFakRX6nt=?}r_WG{UQl7C&V+B;<2C3HlRU zWVfQA5r(PJ*{L&R;utWPey^zILk2DqOqtX29hP&e{8O{#k`TGC;p9y5g=Z z7e3Jz%q6yM+E8{c6A7JW(~jh-R@5PM8+%D0aV4JHcD+XQn9IFuZ=ZxCzgEEemkPL1 z1kkeZ56#oJL8>rgJWnUkkXa9LzwO+xBAB$$-=0`?3~fSR+b;3F#l>^*QcgBK@i9e5 z)CU=v92`H;_k14R1Eo&rnnfAbv4^+Q^1LAFi)yHGh3W-4& z?Gbp%A)R*_f$(xO@LZ`2hh+U(kFkR#Q^AJx*oO(T4vx%*>!eDnrUTcqN>}Idtxx!} z$diK9H0gv(sA|Y|Y6861#wu9Oi+nT^bh7!a_Uk#)EMv+-q1@;NG=Ir-YT`-5ITxYM zxL#g`o*@FNjY_xLjqLB+DB_F2M1VUR&vI(N3`E0tNsnZtHrb6X$8YZ?)SB+IDB!^x z%p5$5`-Y@sHp8clcc|>``A2@g@t>Vsk(u(9Ieo~GAG4Y88)Qch7>8+gbzunEWOH%* z_Sol zv+~O2lHB&QO9t%S3{g9aZR3yAd3=2-cuB6@C?m3})_dqGaJQ=Qp17--yF~q;EHhHM zQ<)-?WM9Yh-4ZrvXcZl{-kk>5-ef2T-U%%{+GQVe!bS!hTBP}=@WFklH?k`ahI^$> z2dpz7PNSKE6$#Cmkj}^@zsrx=WGyRci}?9Ma{S)c9K_j8Oq*kqm0o!Q^-z8fEC|Cr zTNbm?S46L4nYUro9m83R-~B_=Y!R1hJV&&)u$V?tOv}EHmR&C6_xdO5a;Qk!3TCsE zdJ*5?Y>B~I+G>mT=_cBemWnZ&yPI|Z@!q%pP=jv+r4JlDWbpT;@5!HSe`Ao!(Q!&1 zwJ}K3-@^vnUhDnl!Vw+VZ+v{ng;=5t1WYs+3))@@S44UC0}zDP68I@<*JisHb8`Mjvi%0o|yefhzgXhnKz{vp2>YZ=4|BA z&|smKBLGmCtrSxoc1t*eH)UuqxY;wxfm^SKDN#C|eEgibUu$C?n7G0Aw{>vABE!^8 z78A?S^UuHE3#Mc@#wuuTEAvZRPPl&FXy05>Nt-BdAzYC@6o|7Bo;vpVU2Uxgf|k$h zPExxW`lvhRJRj9&orKg?^7;_S+!Z0@Osrw;k_H_+29&6^dV+izcGy^wMkG5(PyFAfHhOk_MQMv4*WK4E*k#< zbQ43CF#LUD*8;7!uy3zpt`DE<@ZHNk15gP;NE~i#D4<5D`mw_Dw^YnW3}46}{jZ|p zSiatVzO^<#kBg;joHBmhX}t9dN&wg>98QfA`7!E#r|p|iA#AKYOEOS(G&+%BRiBEe zw8tI?+*Ut6EZgcJMni~IJ4uW}_S)9hSl0$J9u~@!l9Zq(b8;#|zH`p?9rou~;W0%mk8QISj`LTQaqICTAu5tgt#2LY)=z9{92c&}464b8?qoxn5^go%k7Va9nw zyv@`TcUy!fR6tIa6{cZ;V*IWkmfh6HkcX~G5W@hI@tRhW$&ev3Kq$Iznm6mQEc zaW$0O(Bn8;l4$E*&>f944w=UZcFz%o&nLTuOWsltV%&$zmCD@4q#OiCU2xqP8l{H8)KK6W$gifma|;j5LW3ZurIF?t;U3KsKp28)L3{ z0$z?R`y;-EJZ{9fOl?U2gV0B2=>gkEL(xBNABFD+a?ng+@wK6(LD_9#&yKRThRFUn zKG+2@a>4wH-Hypvf$rn0ISq98*FUAE%9K9lNMePj95e@2a`E66Mx2^D64*S14VgY( z@L^>l{&86IQHI8^XCRs#(GxGaRQJ& zE*$aWrsz2+3ycJwRpt=vzeH0;f)yU($(ESr!wvt;eKJ>+s(SmA3^ABrZ|g}o5-gID z^2s7>kC(b&62jhLmm=DR8M=P=IN&h(dhX zG2rW%O9jJNQ{qnr{e;wq4f3vrU(w7!Xn=PL%~?;8+G{D7sX zKJXXIkquRtV8p#I9?<;<;0>0X&;Wk2ev{32fA1$t2mEBW&+&5w5-!U>1ENT)s@f19 z+2RbF$#bkU*e$5rogqASLPR9aQfI+|YP1>GF#L7Y4(#82DZ+Gt{#h3|(b`@pg;>1C zx9m7vxIaIF4X5{g*na}u1~ypIljt^X_87dU zYPxHrd~4upeEdej<~`}JusHt z?Jrgv#rX8pu(MrFtEx%jgZglpx0&nYdSSYo7!Hny4d!Zz^BwRf2|6o6fSf!&Gg-*|_WNVeQsn3vGFLo2<5whL` zBAe+}I4+z^$^eMf*JQ(o8P_{*j7Uq#D(@4|pJ)e+FnV=tKR@o;(lm!A3x-R!w&rM*J<|iH# z()wKee4z8Eo8zc5Lu*6ZRQ?a@TVE7nPCZ2YEx0Dp9_kBN-z|r9gJvu?~m~Np+^Z> z`>P>YaYud4w;o$BofPKqzbm-lZ4ucR%)3hEP*ZZaqg;uj~y%Lb0|w#ueH zNI@x?lsmoE)&|X<=n2SI@OP0AZ;4Rx`Cml(c62x98i$POnM+pJ%Z1})+2%j!+XD@q zpZ6fBQ;(&`U3g_75cf-wcqiae69oCbmY2!r41&}~cxfs+~!3ar>`YNlEBz5R*G2GO-@8Q@H-{PI4EsqNbKmH&dP zrrefQfPGYS%C>o3M78fF&}g`_M_3TfT)tm=-0PI{klD}4bnT_l?4iQ7ld)t4{+3?r zH&u&h6d{9i!V*IPxtkV(hkuUY9b&ikr3&V<-O&XZSrk8A%t!u?!yZirk}HeM&-4KG z_D4`&{eQDmh?KFj2}GA@F>>a)f@{h&W!PHY@89DpY$-k#{ys~VDc}|tCx4A!P)-%t z+B8Ql8PfJMoh+E(w_DCco|10B>4s2~S9#Fk_LKL}cr1Btw~viiMZKHA(jpSO|Nhu| zZ*+<{W{+l$E0A?h(Fe3M%HJYzSa>0bBwmgP0-#^2>qK_;nes3AyO(5YIxr%#s_}bS zbjz1#9j&`!TN;(m{LA*~;+l?*LVdBM2Ufk_A9YKp52O;g>Z!+Hd=!8sbPTtX>o5e} z7D}_fzuF{rQz{m}1#oj*9>ej%+x$+5m2;lW=V!MELhfpm5Wl^GR?F|ep(A%P4J^Eq z=iy1ynEx7t|K~&Ff2zL&)fHdmq1^UMJbd1Kjy{ZY3-+>hy> zB%loDPc9MgoKp{jZtM})w6Q{vu9nN)Uck?OT|Uatcmm>j)xLF($axx9_o`yx0woJA2#>O!9pN+gY5nP|~f)$(#1v}X^ z-0u;+8?-L^a1Z$tB^DkgT`(5^Ap&%)`$ekemAPilj*9?Q0I)N19xVBsKO}Fue2`^J z{F9+6r*!AXpz)#l1iQQDXWF6g z-)_@5-|R7&h;Pef8f$1523n^tXYr{C)@lKy2TNX&VM7qp?tEg~SR=h$5(g|VJdEo> z&7G~wpCcW|{9G@d>Npr%IIuO}0izgQ-y>q3tT#W}#n1Ls)2#jJ=mYQ2V38fC{X%Xm z8V@#VJ}Str#ykhE=ZJx5_uE)kl>vT%&6QREp8_Lv$H+aOw9U8oYKDf0s=E%M*(7jzm_*xKLenB0HpKeu~~ip*6IiO4e_rCVqTx$k%YrtHm!h^ z7;J_c4%t8NkAsR*%EG4&7+$oY_04N- zd`Q|<@8+h*I5Q;BfRFn*34=e|G^;SRJ>tf8$erN!GfMmVlXYC^It^ToJmy#3q*0))#MUU(si%b;5IJQq? z-jaBuc~gDCTP3@=EM4BwJRkjXlk#!L?gsUZdhK)n za(!$@T9n)Pwz&4wFFSWoLlAv-qWNMJ@qJREPtWr}L+oxD@8FJE>Z!E``!+PD^fQs0&-@De_sfL125NZr%}xvtW)|PBxe1$zt;@iuf`~yLbKFI)|GAt4y45>U-WtKW{;npuRV#0 zL(ynPctDYcvVJ@{KPo>y`6g=>hQ+nXI0mammq{Y~2l0AUBclj-Ac&bJzM#p2BKbyR zH=4EJPOlrqNar#AL%ksvt2jbt;H2v}e4NxYCphP$dg3o(v7HNe4r|UVqw?5&C%C|T z&qZ5RvH;-w@j805{zFsJzkTcn{XfhGK>p@`nGKjqL8Jlvy0vE`D`jalE9JUeBbh(( zJ9Lw~*#}}fWF>R&8H6EYq1X41p6D?HIfHo`Lv4my9NuR(Hs~a zd8rEi7{AR!F-d8y#NY~AfBT&PYe)-=W2cp~peG-yWB(gy0VZU4D;5HzMD4tJ^4LK!VKQ%Mg z^Z2mln^5JsM5Hj|NlYN1F={$$HY~=k^eW|Pxng?-Qxvp(>o?`k@qOuJ(YBOMKM!5~ z+;r^qDT;LrJlQ*>cydyiSNVp!4gQq`Lw-Gjl{oX`&S|3SJ0tC{oS{qq<~n!-x?PEz zf&(+o(SNuO@;nyUF-W#ZvYMhIpRk+CNqD$1q$@3SGAoIXE+4`G1-=#YTPG?-Q@39BI^nBW}Lp+1_ry_OkLJo**wih@xya= zgaP>&{-m*Yr%zY#VHIMsSWkY5JLul1eHp>zkC5>1)am-NCT$4{hlk)OlcP4jBn>$EnBQFOr2vAG0p2GQfI;NR$Zz_`3q0y zwm$}l!}@_!fxb7m*`Gv3+^AhtB#{!IG*E7@^pxPc-vMcMM#A^c zDt1|e?Mn8v+loFfB8}Rj!h8a^-yAMEWT||;q5rA77tI=wS|s9s07G+VPHLZC63O8K zT*;z5aT=ZyP9)E+^7T99u$-^McD!<-VqYz7R)7FkOVU!KQ_sTk4dI3TTj3?M+kRJV z!A%-}s=PLV$XgH&**Bc*T_CFV#?pGWAAGP2x5SC{S?I2gS;`Zi5Om3D6#?|N$PHW- ztr48XX%8WpRt$+2Jc%!9{rw7$0t}_wx}eQ|Rhzo6@`c${3Bn`zjy6l#bX}{a+mJV` z*mEC+dQNM4$uObR!AT1%t&>$evjl$4f zBQ1^8&^V+B(j6iq4Baqv4~TSwbPXUebmvg}Xt@Gzzi?v{}7JT^J z+;QFS>vdi4`M@p}P*beQH$)dms={;&|ATTOZ|0E4Qk;WWjWVMk7BG#hVDPO>2k2f_I+UIsFECf z_V@TGD-h-&%dSY(k0XL;iKj%K9SBX!ch5|k5*5x~;NQeNr-`&gWJELg-vwvYj8hsL zeBWgO=iuKD3-nLPwym(B45+*c2Q891_2AkNqMOVJy`QK}fL+$8PWZasnU}bF&<fb=OU?uO=S z((}e5S>8x9(xd;v-K{QZYHq z!FOK|tGqUf&Uvn-_TudmF!4Gv2z4Jxz^xqAQqa^o~s<2WKci%nCjD*G~te_ zIyhMs_qu$VxLlW+G2mHe`HNtEgy9D?HWoeD%`M=6>9(4PHaOyI-4Xw>PMNNo3Q;@A zgx9!xy+|zm(@_1z?&v55!2I&eP_g}8L-pqi%zmhtQVV;5Km(?dX@eCqgUdxmwO%67 zTrH6wCqqF4GCvS%Sb6OJ-OPq%Xl5qpYfr?tsyod^A_24rP4KA)XPYK{`*J%tP2B02 z)XB4Uqs+I~N?2RIZN<;EC(Wo|eQ*!=aRXD#=BY>=ls(fTX4Z=Hl~~*%?j+lgI23=N zE187i(YHICQy(hqU2>TzA)^(q!NE2BW6TL;b_FCO?RH}}P;WWpcTudX1O<1O(bJ-z z?)x2L4z3DcBN*gTQZg}QKyiUSO7#h`7bQbuxyj~K;To5-KPuX7IrA}>y<#%K(|{B- zVrp(|QHd2)q44w4erAq^RPp4`&|R=7mfr!o@|)!bKBEIHJ^%Y3erURA`n#Cp&Qflm zL2@aCNIQ*&m+di@cfN=f8X9W`hX!UCt}y$FDQ0N$Q@_HWfCeqq_&ieE{w$(KDgS!@ zH)yDtANpXGM#P5NTe&9u?OZOoQ5#0UV@&ZZT zT-sNu4Bz|f+>;u0nV3^6=LWT!hMrwYv_TPoO zkE+*FcE+A8qQ3-{S$Z$UY-6Bccn*_L!69?5auz^Ltpa6b;yz8ZS_LJZ+3AlR4ciqlKTR*WXC)c*xZIZQT!=#(RNq;ga zUv1uT+^4q##x(Bak{*o$91;d9I{(f$BU<7iFmpR^eb!+Ky#+AOyeWJ*l71-0R4Gw8 zKQXoa%?{utuBl3K1{eV6k2)_6=WW2#;Bd=%>#vMvSYjmr$BCb}1!y-;vlq={*hDoTStoLCIQLq4bI>}}M~tDz2hHyae?z{%purghBl=ojeGt2Hk1 zCbBOD9Rn>nkFhrRpL?Ec<(rmLnSUY#)?8Zc)YHG&u5R5IfPbVN5|4b?>aRzZGXGrf zxgR!TbW5*H&Dp{%*FG9}@0Zuub{yU9foE>6YrwoZXXCjZU9|9LxMt?!N48%aq= zn{5G+MkPP7c4Ze<>aQ{pO@cn z!W|nt{`&7M)a8PK$wJRVz7?wR=NI6O)9bJR-wHi;-=Ff~2ET)RT z3oHrJm+EJLLr0cy%l~=8zpp)(SYVXSlr}9#_*%ypMDI1LuOx+1i+O~{_(EG#_Ab^` zTXC+NV5uyJjpR`@;>cG@-*P+0BVY9Dv6<>bnZtT7zm{m3o3as&xZY2Y@k0r%7p5=D z(%r!7EXxg|*o#0Og}WYvCPfDGthQF7U#H~8rN$cA!} zDD3`&$Ejd6S=f)}H1|uP)quiXX|W%3T^A06UvJx!{}L|x97P7Clt<5Dtz^%%a|@XB z9~}9w+RHF5o;80Vp%Sw9WNq+3cYCib)?U2KaRcfNrP|{YXLp!3>Ez}t0S}>_bNAIX zV0FcDY%>1$PuD;0+EULY{KR~Qt}vH=FFMi`L zDrg|m(6_$weKdC%o7aH_IMIVM?S1yo)!vA5&I$Gq0?~rx!?<65nb?@J%@ewmAJnQd zoGg!a>n@zwK!oNU{bLhw@>v}o4~@`W0Vb$W0N3q{eb-*rHT{w<=hIynMIJq2sZmzUM{JP5^pE|nPNLy|{Lug^byJuvjns=|FRf2KjO#yS6{F2k_vD{TtcE=Q7~Tji!U|C$ha z9Sn$gJGyz74Vi<$`p69(vCA`*9 zcN)#MmGS0cVq^YC+9&qbq-#sU16izph9B!3)yOX3!XVXqAU%4S#R24-oSxon3rA2V zgl2kzX^_-8_QIjYg(vSm#Ni3uz)0@{Mh_%wyQqe!>UqS_@5_TzJ^BZK2678Fp2Llh zvpr*wHV*YQH;D{j(ZP_^{>8x=ZLN-Sh({BBBrzeVr(hEXjqF3oi%byVB0 zd7dV2Cv$iIs&YO`*@ZYDSi2{Zjd$djd5hi^;PLkb-q_|yz)OJ(*Wp@`hzvN*y-mn@ z{OL0t1YO5^(M5O6lA@d|5QfNABvp32@e_OBkK-x%cPGm}n@)taHN&5#|AzPDA<`=={iGYNzFM@O) zyetvj4^BaQZCU{e^jqa%k$o7MFY#bvzdj!aPT8@>nCtGfq!*LjJs?>fMq z+#M1ntR)>YtJBv{?yaddmdhdfQ52L@*tL@&|G`ui;`{7;j+rV>!go*c<@7^?Tg^WO zxuCK(nO5;CysI^g#p5u4qx?^V=M^*Fs4tbM)0wfibr(gE_YfE;y$tSALCKC0XX~nD zVjtxl+v9dC;WF`Ug0)?7udL7OuCEgvUY$LjHF%X403oepF~E&bSe8WSP?nw_X@zXy zpOJag58#ZlMl`#@+r}CQth5TA8^X%0;uj+gYG@ibu`|yf+#RZVQ^t(mI(K??#>WU1 z3Ku2wcRW`XXnbY)#t~6ug@)j=dp~$LpG|pYy)CB6vhex(7a3julrP0iw|GMC0Qou~ zxe!pf;g1MS1bS@#u(`39Sbez8RcF8((0gTU^Ih8P@ULL<*y zc5m20_)wYUgLN)#OF=`eca^0xY!4%N>S5;ODi&9~O;}fwFssXk)A(!mRf{|kNoqRy zo?4NIHbtbtpl<+?ST8 zEJtyTtWE>6S2t7P3k|1xo`dxmu`OP^XS1(zI%_zEPuZzY@Q$j-yXxZ-jqOsZqS4=V zGfNH>lZFZk_Pj8w32Bt>d*|hIUC*KWUq^FT%cYZr{UfRdvp$GVVdktf>eHx0D|VP{ zzN>qR0SZ79)^~m%yrTir-fw?*g?X-q3*ZbYI+gfK7Q3IIEmQ(-V%YfL9nJh`chVy2 zx!Xq96?d9n2!CYR<(0_9V>Ir2D!V`7D=DUej~%{lWw!X4KWEXeD@q=vl*m_1x{8>6 z*y>6^b-3IWo?zrA!IdT>JBHm*XFfNb3Z3;57t>u@)ueK8W-dQ>0jJh5Yy@6;V! z&AhOZp3IHOYhqq3e~T&-@MR~a1&I`_Z5K9KbCzeyJq(vigA4FKi6h$@F88jv*TnUB z(d{igYvW;}82?h7MV|UA_jsr3ftlEG&q#yF2td-^iPmP;m>kj11rBo*5yZG3<@|ND zhC^4AB^Cr4nosC|Xyx@x@6!9Z()_Q&KS&diJPsp@;<#gRtfZx%SzN3^6br3;VN}7GCB2IMSqX(Kg>V(V? z#O(FPJ^L)BDbCAdCyNTt-6)Hx5Hj3yOWRHUd+h4 z#NRwyy!uwowHhM9>G>$_Y^DEq;8qET0EiG7`!0w#|6^)G=%x_W6%(Hy%BitndoXeL z=fGi$>L<@U5xOtPVeIVGKB(7SOgh`0{mm28uEj3|B!`}$gBD4u___v;WOl}?{Rz-z zG>PAtT_=~ns9qx(ErT*P&p0MKr#E4t!4+Q7fo7Uejcu4fkVRa^m zkEVTW$N@?1)MN1=0ngICGyY`kxJ>jgH^7KtzfkG%hG203tO_Q1tk-WYF~8asFSawd z1~76-!pbIljoSy5hfh}?cv#zSf_)&dPLP}5Plr1ylO(#)hbP}VqP0o(zXczE$w%+r z#ue}&Xb$;mh%htcNvg!QG?P?ZZ=70O*$LfDC%_ePH>lxS8Pl~|h5Yb>d0}Ic%dgoY zzxwOz=$Ea;Rj&%o_%u^_4&3c!{Fw1{X4nWlA3Yy~M`zK>34Q#+1Cc)vrZThIu_aI{ zlNMzcQ5!-9wM+44qI5J&R1&mN*8gR5M5a@?A()oR4)nMYH61&Y6;1s8Eq3~fcxJa_ z{Wa%0tvn*%x zM)iderT^>&Fg!|ZF2Ly5v=j`UbUvV{R`wuI1!;D~??}(ejI=l(=XdXpO4enlWCj!} z>Y?hx_ zVfI|vhOk#=pFKjEO3deLY{sEMFxrDikc z417iU@v%9c>8B#mgnJy*C7I)eODda*lJnM_d6GQI_4?3*$s1!&Bsm>pi*JGKQLFZL zi^Ejb5ZyH6T~D;i{buXUR^c&)bm%czTEIGP%*{-XJL15 z>CMkawr6>~c*_n!8!8dQhpt!D2eWfmIyl}mCGjWo@h2DKG4mNYoEwMxb~7TuZy%cw zt!*37RgX{Eo!i+Q&&cPYM>}167Yty^$H_G$hmTWVLvLcwiu~U9mAUZttp7O~FB%q$ zE?DrM`)~oVHdMOh4i4~n_gz|1{M!E1D@z^-jF+A&LM)}2 zZjtlZuritr2+PInMI3I6b{0!qIo;jUxCyMcRmAe{$G;xK@>)1We4BAx_M@#u6D5rz z9F(PIee>wnaSZCS=O4aoQrWf)@B${5UKTR`cDnG%D*!kq==--Y27%;8zv(c@qQw-W0$YdE*14n zKJ2Nl3brvx%iN*d=uRGk;(SrainNZTSBd(M2M~I^v(I|QXBT7jdAct*R&g`EY~g{= zi_KQb2-9bE=cN&ILA!i3<1wpRW@tD)-g)qtr%OCq=R$cgQJHr*Pf9FY7fB{=EBcAt zU?amKM!)_XXiVKK%X7?0Ux1<1Quyurs92#iPy$M>Wx$)W>Gs3>|XUA@X#48c^3J&g4OS)oW%=>O-3Djot>$_StnGo z{5`-PN$2TLy7)%u)#1;V2!2#aPLP35tIS`f^VvQl*}oY*7OrKa)cZ6>c}tsSD7Go~ zAcEK^#JleRc_y}y7RiGE4cs&?zLe&;KRS%QnIQ?tpW94Jhl+XC@jMYP5f2yl9FtL^ z8A+D;JyXpUI4NfOuLY5`(aHr&4)S;qh_SO?g5WxvjmRQRV0(IKT@O=9+pBON!}~{a z+dk!|?yWkf8ZOe8_J9|3DqD*VpdSl|z1U>&$*W(>;<; z#Y~e)D-zP4O_<>xaUCCaq~L06M;18yW zaMw1+YI2a+Bi4A7=BhC?XTI40=2D{Zi)s$VoUDHrQwXYue5<_GE?V)<6>c#cM782+ zi{+gqoBHhfZ3L1Gy0+P5ZK%_>nxVE4+)G9x*VH}O>nO9l4k%$6E<3i&al*oeK4r{3 ztKG&W)`v-jAI=A>bidL}6yWJ99HCVSDiWBDj+5_N8Pq>-veSFCom2eNua{O}&sf-5 zY6qCk@E21dO2j-23-?KBD1}RMlIf*pV__JI|jL-eyQ(B2i-{%D3C z3t3DIh#)22P!wsO_y=vsp=l6>)yW$lJ*^H84uLJ|_~@p^QIhXZ>U6u&tjBrF{)PDYb47`xrjA>tgWUe;d%r)N*sc7uS~RSFuJVT=1o%{ za7`z+I|IhG%D{Y*FN9o_W!C1s7ljPZ*>2H{?~i;slM1;OgJcXs`gZd!awSUJH4`}*?oc0`)d0nAG4x=wHj;( zuWlU}nb=!BI<|lrkh}_T<4-*LfRvUS8$RD(qzN(ue!gl@MPKA~T+D4b@So{4NjF4O z=kb8u$KZB*wqoj5xf|tAFv^7x9YP1EH*2SvN=s{O(ue@I~MF05XVU%GDd^smKW#dk=BJtXfd+5O6v0k+WIJ+$dpb* zntb={nfJRT<|Qv`Kh%>WJ6E2Zrb$-#Q?!Wn5%``8O+cil-Z%dHYtrJQ)69Yv)ufJz zO~8H4S(Fkv&s>O&seJ#OVAeD;^?K7SWC;06wKpROF&n$~>wVcmm9$VjQccE7Y&Vpr zC_@O}uv$Z5O?Vy}u~kCun!sv(J{_01r@KOUrUd$o@Gc||UFrKI?w0byve!+{$FW;H z^7W&hMRs2eVyJ66FA%jj`qZ4i{BzcKFdP_L=YG=;0t7<~o}iL|cbAwuZymR=P(`|g z0K20`HcMwxDUUUQ?%C?CT!Kp4lC$ z6gu={+hmW=5uTsYp*2H|Li$pM^KGV5bK>oZz`XTV1t~mk$0WujY2sX$46N9rJ zZr7XaWI)@;ZL-$^r?|<=_x>rbwTC|L;*9Af(o2J+Y>2)m8Q&5NvVY{Y-we5m=~zzN zxHhaK4^#@`$nbtda|O7r7~bEXlT!U7Aje_G!C|a9R>wef%`=%7KU3o-O0SY3(XDiG zCEOTza97Z`s2-Z35|PlA2v++UA*hlr{(WT7<5sf8zOk8lU!wT}8){4SCu>2MWp`WL zK}Wjz28?j>?sc{ru%e3pCls~NZn_k+xaaX>{a0DwrtdpKDt1q59j-X^xpO3UwvzfM zrk&p@q(LT{g-lbVJ~W5(OIcsf6Xolyh<`Z3_1zLNCXKQ2ZGLKLA83NUu#>Wi6hb63 zIDMEPzum=+oxSN!rwaRpC8Einot2HG$aLA+!o0l-kxQ%L8vsy{%&W>V zhyLitlqmb#Q1ZeU&B}GQ^hL&VJ^?l#U?6@?ouPcm`|GQ`L3?)xed6|S4qlU3-aSfE;Pj< zpsggpn_;JLnN4jX@x&%4%3qRB6%1M}v`m%{o43j7%xvz0f1MU{y^8eVAoVzSI6Bx) znO@E%pBgr$IQXLrC(40#nd4QrXcALlvRca6U?sKSB5LJ&u$KRdn&tq_by;vN^h21X>&u#zFLGs|eu@@jnsGhSj0Hxz5dj{)h z{eZyvAV9Uu(_n1=(u<#h^s|%UNVg^6>^p0t7iRUlZfTX-WBX3D5(e8WE1vhI27-g1 z^ZfYNm$mt@YM_LXsc$M`mvpCn$bM|7>UzX#el=`)p&A!Xf;v~Q%y-io{m^T+*$6~* z1_(13hCQ~VCWwZ(*K}QZ$@r7CUXtTC(lP9rxewQWnM}(tu1{-U4I{XFJS-f+!y-DF z@bP)3CM_J))U2xKLbK{&8sAG7=vv|6X9dmUSKc{vaSB3IXJrio!tUFTH@q)t@VZvR zs#H*hrJHA3F4R>|NxT~OcNa*xs!iFs`TVZU_Uw4hVS*s@0j;B5`MV;n*em9Q+gHC* z9KKT@#g^!X49b5zIUWeah`}mfUdQ78KCiGCAN~TBneE)#IaU)hQZ~4b(4lm#+z)I& z{GFI+-C{BrSXCn3y~_!zB-FA|fNQ2CE%)D_8893HZgdyT?tI9*uMJ$brrCKe-zPO) z-)tnb3y&#zX-OxN-=6%if#+0j-nMhwF7t>Ei0r2Yt{!-peth|r{h_9T-Q}`RBJP;6o%~v%Q5Imovn$HuqNZo{42H@NK? zbEe*Ap0Jf9Zh61C8rx&6#yHGvaY9LErT@|TP`NEF`!Jlu*bALxgIn)L>DxSYYbofVfg)x3$+Fbt?5NZr5k`L)@;+BO>Dw0XGKz!e9CM^Rb3!9yi}S zX}Y!le1r{{3o$JTvJ4C+Qx+_ zt2{V;COznTl3B5UoKY){jZ2M%Yhh66xoEK~SBF=XBTN3A%E|RVaSs78H_MRoBY}up zcRg~`&D;N-Ir!gy9W8(jNG-l^8}UtVWM`K2j>A|h`vO&`=_54@6&zZ_6>2QbO-P)RBaQoe zbQZRfKbj78qxm;ZV%By=v-%+$Uq*H0ftCv!2|u2>tbZBM_ywEUXm`2MH~2-Fn9-*< zvIwAI{X~kfnL=f|@lqSq)lcNL;a4yPXPsVu;l&u{n`*hVn~RavJbh5XQ|ryke-Qw4 z^NXU2YDMguDA*-ly+WlIFU>|H1!U>9bDwG13md9jGUL;(QECiOUqCsLr;=G@Zg^zM z(8Bh`_1?hG&kL##*EbOJ*l9iavKQ7wpKLmgCT}G9z4HODs^B9VY4=f>A5%%jNLPEF zrU;qEW9yQ`tt%oR^4}2rz<>>BpZPpTXg)_wN6{6_az`khC7hE2GNfcsPZWr{vHOV% z&WU2C=c}Wq`$;AR-qOE04mief zxWw2wvoI|3e_2~*t*5H(O~KbG5pUWKD6- znG-v$wc1)k`L?6DM=MNyt==J9z6-4<9odhs{yqISPC4>H>$Pcxf9Nb!>headLr~pp z0r3qYop_+qzV(zX@(WA`^_J_yI>!rP4{8FH)Ubl!Zx#5~7or;T;XmTe%(|5;6OZ}I zT(z9-tl70M+-Nyak6Wi}zGZ#|W4qa0UASIwd9hsXioS;+vn7xuuNgd75tEP#7x9`# z%Hyfy&JrRFbx6~IYrESeU;`P)x|%W9?~{mpAmSNrg*=%zu8v3N#y5p&Lr1I6yR! zReMh>CwimAwHSUiZZyXpMy%}@aPU^&BqJXc_mKRo;Y>!2n^Y1Yw{Z#95;S)in|Xtp zUfL}PGGTeM6Rfef-^^LA)L8W3X>x3R*72qwAJe(2lu4_xjL5rZqHDQ3!CDNDxl8}% z`j?^75%=g?A7?p7^IP(~iSZ3Fr5N70Fh|z$!7LB)sl$JiOl_B203p5AexB|>pzT^sB>+2_M-UoJpo9nuW?N;V98Wf$#njMPwTOr5 zX~wABKQh!YC^Mc_sC8qbs@X+Jrt9mZ(K~yNG|U&bm#5_Jn>OFGQf*2@CYF6C!UZm6 zl>LEoz8^L zIj?dy9b3MzlmM~f53zPWpf!Sr%|uOdHAREjVVlR4m+GX+q2E7rG_m7Ftb@858ad0Y zW1w*|ZV44+7=*|1W};ri7_Bm4gp|Y20q(knYNo%`Q^0+xgcNlc%me4xt~gCMhv{`r ze7=$kd573pCoE5TF6KIA%_R7}LMDyB!+4M)#4d&0Rd9nlAEj~vTp&YlZ7;>oKd5dPg@~S8(HWBZVF*CZATOM| z^V0VbMUx&UtR8){Y3& zaEqsD>hbYwa*NTo zGRkoXO(C*;)CauW{{)in{Z}CQeB%0$jT%3Cljl0#U5h= z1%fm|#telSatv7B<)1nR7}@^tzhf>8g#+qoD%vCM1F^UM=5LkQ`osSY1T)0)|KXNB zuYdpY^t_9y9qlfUqy>;)E~+Js2LNC=v{cpY2atx#FBQ5gqJT%D8_OpC_i=#4zs>b` z3BXtuT73Y0egXiyZy0|?8mp$&+))RSq)WhM7udgLOF(b?2mJ#y0@$aVi7N1zHl9+V zzkAGr02ToO0WLm14z>8tC7e?#2$v1WkN(YbrF)RMxXx3vlqE;Pb&`g~hgk1l)vsIA z%s+6JJ}%;>#!elGsQ(A*opzCpYXS5BVR|k8Fufl=2FwOi=emnaF{82{ft2fa*T|&5 z*)yMmiC$u3I%AAKIBc4PQnTSC3f(u`I~cy>`{Cs4+97~cAF#?Al?FWId_?_S^jmCD z(u>Y7O&|M|D3X$$zbChS`@vH}EsIWq@DF;@ms$923&s75GbVCiElI}mW=-{aobp$H zk@^3^8JiC{ZBQrti!*M>yuic#!x{6MQ45g&ubgpGYc2mxr`gm^{Lp%(j`Pp$2!h(0 z4H}WjDn5pXA*Rp$n6ZC#GZu(I@uOlpIq4U0fob~eb&PBD-_1@!Aju$hU+G@MZ(`sV zquzFt;E*LyJD^LE@+2lYxk$3|;vvZu|(;LXYu-OwN5hW{sM zF;T>C5MBY5+C(BYv)|NaoJ6EpFQHi2`WEd_SPwdm4 zpPYWiy@&nDJ^Dch5m!QP-PS-({uJP90}4E&aTXab-Ypz$QB zHcPw?F4O%JjSs%^9|7e8VVNMd+?_;Td=-{qnfa^hIjGj$TEyz{Cyu%DzEGzkjGLJ? zehL}#+U5Pob4yal*>Zm<$FZTri2UdN2?}$g5}`aBQ~0vB(Fo)7vIka%<+oT@d)F)7noth6*3HViR4ZH> zXP%aR5WW_65Rw=5yr_Cz2VLE4thhOi&6~atr=lH#CMwZj}bcLUW@F9j|72q#`5c z9A(&Wx1DY4`i?XARYBsLHRQ#a>xAPGLJf~*oTqplT0_`9oLL5-04%p+_Ha>@w{}LS``47l>Aj;4; zAlMIG^0}v37~^&0wz6+;qd{zvX^R$Ocv)^4VY#7%M}C5PBs^f$c_SBKZiKKsE2(PU zaeH(BNPJfy(AehWom3{?np#We`z|l0fzM%5aoQQ4Ucb62-BnM~*LIk?CKP(S#Mr1N{G#O~9` zq7|J#;hOnRs=ukZyGRlEvUnIdXTyFFK?pd8 zV|q#V+t3D=Y9dB_&QrBi0F9|NfxN)qPpKp?-4#(beE8b!RFPSOo=I#+lN{`1$;EGZ zqz2Z4*B{+F3I19FYJjjVM$j5uxh}hBIw)dYwQ#psal72U#6!0=(iTT#sELhzo&Q#; zi+<#jjVHf6XnzXR`{gkYg_y4FR0vT37wH&aHz};I_eDNT#y5BceONk1mbu+_4U|j3 zahoD;it%{d)xdhxs+pD7J3v)K@-; z+0wh}Q9)QQZJr}Z_fF*HFO>vTkv`UO=xU42ja@*pf`>D)FQt~?N<2`cdab~}Kli0o zz=;8yzjZfV0_bSSycJe@P>)ihPqx)dVy47RW9Lvm5;_Lnn$+yl2gvz)xR8Hav{c+FW zGMNLlUdkvJTvMzQQ>&$eT2cx>UM7z93&`BODC>Ee!nuiK{y@4pfVIdVW}=XZ z(62L^gU^p&sIxR-Aq|%fyfE%J@9Ua(Bz)9YU`#gps~1(F?J-I_NrujR(_YFE36_iX z8i~ghN7`-PwQ`m2M!U8j3`!t})=Bk)(`lPjoTt6w7s{qHbvCuMrws^*_6uR37OyB7 z-&NglY`LHU@N~<`?BFQhUi#2l#jy>90bX=S)m^N6LR% zs{zF&p-gZ|roReClKx1zxi6-J94p*gYTiAj5~leS1hOsA>=J&U9kuh3^JdBqup<2t ze@c<2_W6_Aa}wVx{^i3Xi637hS1-v>UjuPK+&M%stOnDSx?h`}xE}&#%XBJ{eQ(dr zam<|LEIFXNuWhe1)ux0W8U-woy@}?ncwl|L0<@al6acap`rtEzR~hVhA7E>%;bH}x z8~SC9I*!-6%x|P-HHT*)-ec7E?)VQ#V%RnMUmW*9>b@Me zCQFjAJb@nCPHt1QlcTf4pNz|0mMxxT>wU?H8kYBNNMWhPuAt(E9vOQ9F<{5LmhevO z-M$+DNvAPPFt#5WTVGIo(*yFV6}0ZdUeofkk--5*J6p^zGEP`x7!rt$Bm%Y`A(NMK zpLI&5iGL~tO|uTqjj+erH&>iJL8hnE!bvMo5d5-VSEw($d1l4;E4OS9e0m!pGQh?`ki3+QQ|}ZlnVS z6x@GS{rIjR<(&}ElRw!GDUT?=nj8Q+pWPstaQ(k*jgB>bb) zQmRXd)8t$&Dat{VMiBfzjrK#3dp3egniu0|w_xxJ*KNxPM@!kpc!Zc^WIAWUTzuixh zHeK10=k4B;Zi*W|@u+wyAW^+CCfZV2R7B1(FNF;VP(O;em~ z4zjDSi&f_JNjEF}iUxnyl^z8*9=|(bB#wmzJJECry`cMfahdw4|MazspJDgm#`jPT;w2m`WDj=Fpl0X@*wg9(acIN3h3CaC0b8>cW} z_?t>VvVsbWhp7$HS8=UTPDN*=(mcD?wK)_Vlg&UD_DlQJm2aX4k4tg4zFf1eUvyTF z4L6$kaF2tGZgOwLCFdGg-!#TdCvfC3c-su>XX@L+H`b?rpC$;<4?2%EZN$k7niF-v zIme_Gn`t_@b;;tzhbICFH^(e)C5&QhMGeMtizty|VO^L;;uo8TCAPoig2s^FzVY+F zr7ZkCU7|^A07v?oB?=uPxebrXC4SaoWXE|hpvGv{RoKO*CBEbz0_e4h-t zaYdFTowJT5H4xo0&W#%I1J`EJ8!FMii7F)U=mD_`ZWz$||45Jb5B(=SPCi2SU+M9a zd~8cjC&{#$@j*52nx%;qoIe?!%Xx)_7+R#95Z}MyRj%>vmMAj>a@Fo5Xwlaktruzd-Pa73*wYRcNY_q{Mcl+!+*^X zg+oUPdHL-Z;nkZE)*s9ZpM0d73-L8!vN+7*$JKd!0-717=C(dSa0lIRn@kOCj@6&l^^CqoEJgplM@?L&0~GIuY%@E zRN{6UlWWiKXE$USqXb6XeDdal`|V#Od>7rGQe4++LSLK}Dm>GS=~`tKjgPqJyCarP z*V{~Qg3*lW4Jw`n;4m{W)+96SOtzYpCOZIn{vI2ANHf9i?{2;%(G1h$Qpw;flwE`K zei&x9KFuX;^lnZq?nU@E+&vJ3J@DI@unieEc#(?}ui31N?LY!<^2aS8(uItz;QR+c z&dF-+F>jr7ZpG(X_a*SuchazTW}`7%0?{->kt?*jW(IH@b_o>1GGW_5ouzlq`>Gg; z`|a{+{IT`;Lf2)w3cF#3Q2Ll9=QWKlSV-29i)N+r^+ga+v-)GX`r2aHr?#(bS~e|X z7@yS5+BsA{@D{XN2VQy@<%s-Nq1Mm3fxEt;&ahYb=;;PKv7T9lWXF-?JhO!*SrI9q z;`4K8CcBNUK9jkBWF67`(m1lj(2nZWcSD$wnW#Yq_#Z_B&7{ zSLN~6(-lHJpjXBw^SP4h@S&D2!{%Yt6s7qcEooa47J~?A?ImO0BW>g|IZy4*$nkp- zv@oXIIqkEYW)1InZR4X>6il76^`tj2u^(DBM0f*(Rcj}g%SN?J@{#R$BO;pM+Fi3p zv2`##A11Mc`LF<4;P&vzX~WhZ=&bwQJ@VV%Tuqec^^MOVw7WxiYN#U=H@=Bnsu=V8E{=f?y>Q8C~`u&P@U7?3%fTj5H7nN2sn z?!_?FIocV4!;yxrCCM`?x#hDlubK#S*hQSTH0yrEzr5CR7LF8oiGAuNIJXE9U;^!k zkH#GNbIkpM-g`Qqd&2#uiFPQF{dY(>A(GESp6Or>PQyPoC4EH_=9y{WN;4a4b)Jiu z?fAsb=n}Zd)Nrxf>XskH%q?oCd}3^UAijTuSD@>D?)uOdVWi9l8M#pqis)ccGf>Ave609QB=yUkZa)Se@$Xcc*2|&ICbA<8gx5 z#m?+mTEfuZU*jt=zXM=GziUFR5gA#Xnqh=pz2V7W`Ug2!4DZ+kYcB)NfpU7H1~c~M z1X;9#C-3<2nOq$n;fkkn)V);{y`>-OX+RS`$GxTRQj6NZ4hTaOn>9MF?Pjd40Cym* zSJ)VDnrb4LPo|Hlgy)n?8tQTMfQe5}Bs}b;F~v}gV7Y>(3!Ons4L;DA1)D*t(Qs_$ zNRek?>hL}8(HAHlP@u!MDGgRM0v%?do zB7JZ?eyq}q2{Y5rp>M<;M`+m>5uO!EN4IwPL)Z6 zG~=Z7l7^@|Q-&-w>5W&1j+TI`6O(vUn#s*mYz4X)p|USyJ%puG;E5Rlxs7*aGlmMy zY>GNuPrM2&V?+;mJm!29Q66t)zCj4kzSBO$trI(+g;E&?C3!m|%pzs(ZMEZ<%b81~ zh4L7nv9iHZeqyxDxdVJae}DlPI`6NDAoZ8G0TGbX2oa)Ol_71a{g#ShNJS5Z+iGb$ zVp2_5x%BN1!{>tn8W^!&U03U5H`v_UI&UiEFjWHl&ks4`sdn|A{nfRdQor;r$YQ; zd7aX5`IuIzi(j8zfO4o#ddmW=vXK1Y&FT};I=uf0 zm1ldqRz_PbkE;V&+D(8Ij#pxafc zVhV9U&)psW*9t3Q(fs^vE;Nd^OHn7~Nv|0ghJpg-C5u6IWhcuFJifHe7G`yIv~QO$ z4$9xRXsaK>tloL*flWj#0s8#I{^Pz`q5!z?xKv#1xk)1`DJg_usE{i^cy~i5H@3!; zF+R@nol;DJ5sMyA4@J%KcU_j4X`JrEy@M17gQykwn}h0z2=8yn74e!S(^HHDbD2zf zgR^VqrWZTRe%G1V<$Nb8KSxh^(?Fxm%#@F98LGtaPyYvJ?;Q( z5~8>0BBbbD^yu9XEy^fKkVLeI=!S^iqSrA&^fG!IjOcX+GYm6k=J)Zv@4f4;b^mz1 zzkjT(u*PxDK6^j=+0Wib(hXlFZfNTB*LyJ2eZe&#Q3Su{Kbk+mf!VP6HFNZd$_KA^gpW50PLYu z7@#B7vIO6-JZTtz1kA+S$}S)bKVI%dMO>EA|LWOkHj@9WuPE=+?+I!=Lu+<{sK<{O zygbmAZmwlbkBz7nAj!%fIdQPKGmh+^z&@DXi9(Eiswv8Wa}5>0BtT8S4`1D42=KpC ztg;+@?jB9R2jzZ<>R3keiqHh4M*e3EVEi0{i!!+ia4Dl~W8I~AG>Lkr@G z{rLc^5no(4v^dGEeKoCbxhJMUjtvl9FJ6IEZ(x~Est*ccw1fHDs_$~=n?CQ~G~`qr zG1^FrjubNdsi5U(&TKkp)&X$%SDs{*nt5@QpD>4>_N)HO=}NBumxV2)pRY1BeGS66L>;^X-TB#Av|rOCGTM4W!QYCzb+g)lCataR69tC4aE|6SLF+g+0HqC ze;x^&_#zg}*gT!KaV|H|ki!u6KY;e`~2l$aR1wF*7A9NTV|9!HilOrhd4!?j}Zy9NR^(J;cmTS^IJ6<^oq$>-Fp(q*r zzbg<$jQ^wq%qLoqi5H-1fav|r=d2UO`)lUt>gT>jtW}B%)ldDg*M!GjstWPr27=zb z{Pk}RUHhDt;c4Jv-iI5wTU1cp=F9S*OzUbBUH<5uiI zm)`T0y@14P@b3(CvC4gxMvIF(O}mM8^xH6Rri_(RD$BZ(ef8EH}*R{6l23o6RJ_ z?dSaYN*zVdF9JizxM)6Dvp$my+&nTBCu=+_WO^SD%wpK89S5~B*x0+s2ZrkfNh-X) z)cr#d`7d^S>Mq!1vQFyD^6u~E$4_GbDR|wjNTu$hUoO?rcWEV!?bXd#*G%ZKFw*R42a7=T~d|fbO;0f9i#>S76d@Nn%O))k*>;tw;AJ3Dfz*HZMP@OK{$Z5-$%SRx8cThqVOVh`8duBjSO?^GF~|2%kRvfQ^( z)>p_##af08cj@%*&X~L3n#%WS}8cdXG!_c=gdS@izUr@yj*w z4`81LcvV!DZJ3#74SuSv#a|%6(_B@ukdNLv6P$|0yBI-rp>>~DpX**-DgPMZauGjM5ow)HX;Gdr=y$&G}Us91HbrdZiOLL>_2`#Dkp6K;C1;3Kd+z&KxBLHu`hlp8f zG~=4SzpGZQK{ZShV7jEAnBM650+|$kzqd|x)juKnR>UWg_xwmtbIPhNYs^0_pf0t8 zX5I3s(7W$1h|Ax?n01?v9l6K1Tpp9cfbX#1oKLHx*|HS?W)-}UQ5XelTP+5C?6FVe z;`AGW-e`7Z%|lC5ieGv#%#I=9VeG$ECGKLsYw%f$6;eb&sLfiKVxsfQvTUZ?gunjv z{UII}T-7YRVE6e~IsWP$DsUr@f>O+?Yq1}IVvrY-#Dmu#H@3Wf+{*^x-)bTMbaOm- zU$Q4L?mby5moNXinWO4bdgqI4@lZ=~ON`JYFeu?R*==ZeUGh_q`+Za0h8l-5Pd77p zj=Mwb!w2J{Z_Uy60dCC_fn0_eaR-m!;a2;#-`2OhJ}6_*%SNyL#wT7ciTtSVPuN?% zZUDnan+m`jc+Hd6j30cKEze}-<=>gUaLQ-d9IWNd)=ApQ6-Yj_jYmo(j)~W;`0*#n zCWtoun)KiFFOkStNko29uW$_*W0gGy1_yqljpaBU?#Sg-Deylj#(61u{`lXs0D!_A z)#+`9G~al?3W0z4mdS4P_*eKIM^~BRO7VAnYJ60yUoOP!DqApT3yoKnX3B|)fP(iS zZ7oN`$4ZrEAz*e#BODm_en+X?@?ZKx%YM^XHzEo1Bt-5yFbQpUtxLm4>VOA$O{*<^ z?#TV0#l;;%M)NJ$8vz%851Gf;&MedK zVmf3T zEa9*T2y87|G8*jWU%->I0I{=e7Ok*vbrWfy+dL@hr*ZkAG}Qjc|8E88f5 z9z?CQ0)=nyq{KHCTE7AkEw1gEtK`BsJUJ~T3JdHA3HL-;asBVvXrMKY_5ZNURf=+j zb_UggZV<(CWrGJL_y9YV57z!QLS-`;!G&BzE$;Mznq9(W9qSWUe#P75q5EPG10`&1 z)B|g8d)G~yR5fOQW0f~$-{DMNO%ru>eDLp802XgMr~DGaZyf+kA9dtv^SLzn^+NNF zuc1syLoi>bX3MH-maZ-GuC4t2${>kHUIdH#E!Uq0k8MJathVAG>TI9;FG0HHe+kkd z8%d=XYf`d)bT9F$2FMw+@_xbmZ&iQz^EY3*G5pFuB}YuXBy4*Ad3gZOyuw5ChYO<_ zsfuQWuC^uKa8LQkfK{EXmCzIiJ?J{gU)z}?r=VJ^?q(MpXQWE{xdwHKfu5&1_j?73 z{dPsAECG7a8sj%H{n6#!^#|bn0Y;(% z^p*rifxi(u?HE|B*c7}RcMJpOH$(@UL}*XnGBBCHul-`GI9m3+Ao4Fu{9Ip;Tybxw z_)RLPiH(M3cP!Pl15$N=g^3?-#ZSe}Rh#{m{##lHQ=-P>Fw$2O83lpiG@G%i^4Bqs z1KRuBxYn>{npgc>a$jP3jRQ5!<24x30<%Su+G7IgrcvWfEAw-FAXDvEJ!|ZQDV`xj z!0Kl2;_jM1@E1v>CU%!g{!DN2f1!{xK66y_p@b3A+~`vy&)#dQgT;Rj{8`rQrc??) zx9|If-@y*N+7m`@92>Rc05E`EW{f#3oEG+Ya))f0A@XjLypb0nKrZ=!&(K}E7*wut z{W!!nRQ6dNM@Lqp-Edve208Yt6yzbyfg|%I^QKS#y4m@f<;*%sovgZ~>wO)701#h) z&Bp(K;u!#Mu=xM>5)>%q30Er^adsA@$YlNeSp3U&Q%iZ4^lp=O8RIRfHnf?JP8(gw z7-!t-^wqT-*no?_5c0;e1Wn}f0)tBI2J3KtGJ|<3ga3YYyD;Tv!La{)PW~DZ8d-g; zI`>Ul4H#Qe=oBWiKEj`o{|i$)9<@PgkpCQ4r1{<7MOs$*-Mnmel|VjeTjqht?(cBz zO8hq5G!?^t8TIn|#6BpE8V^%&i|Kvvlis>^P;fq&9<4<;MC6!nzn(Hvi8^i6Y*Ows z!ekE?dy3x{JKy>GaUWaP1~B#*7$!(!GbMJ4Z;n26Nns_vezmI_Q7-DiB0wE!4U8Zo za#I*+s>~RrT><*yx=5Uj8)~O|gkP4TGxyQ7ne|^Zxma^__so)|)(h|7gq9|^Ab=Lv zX14cvuu{c@#bcowFr)cEEF#wW?+o#k?-()tThGi`4t8{GdGq(aY(k z#;l(&|6>?7eVP3sCR;t$3nI?l8b6KfJR@ z2DI^UfFx+??a95BQ*nGwS~+U;y0K_OGQ7jr;{`RURn5<8dKjbK{Powp+z_BE-n{pP zd7vnj)?&f~nPH37ch*})&piv?!N*(7KSkY}F9R1#eIY6Y`f+fMC&D~W?kc|=T0o&t zLyKj({BDKX=-S-IIif+W5~7sBbQ5)*zGE%}_*K|$H&Xyt)IdBI_5Dq znh?ac&lZL#wq!V@?+2@;Acl=Pa3Bo`HdMpDC*>Mb%{g>wh>-J_$xbAWVtrGO8+knu=suY_w!*E% zta`aP=zQVMEiYO+GFCz{1lfI@T4@K-GnhS~jy@vSGRZ5oneOY4NvgM!&lx zH#!p3Wiw|fH#h1tmPz-OaE_>0*m~&UF5=F zR0AF4>7r>`kc8$74Vr^eBbEyd0o+(zyH9Lwl&$Zrs3Ss?6-#@wP3EnUQq@&NZ6UC)g{e1*@8!yu zWgn{bc~l^jFfFu#YR}hrM|jV)3un3}x$S%o8@^BLf+EyCJhAk;f%tHOoGy=Sl)JQc?z3x_HX1Z6abs)kvvV_)t>)ij>@XEZ~`?%U^ z=dlKj*CN^W?HCCmCkUvis>@ zmMdpJlE&}xIyaSf1d!O_;wKlW2%?|;hmlr$N<%x$a^#k$oZP@9f&vVII5=*!aHYCX zZP-QoIE$~zxU=kkOmi{oHiKEO-u|Ba${P2GfTbkp$H=bAn>4(!I3{$=^5}_l0haaX zCSL<#3Vn1*4=g{Wuz~KRA&`x?aey^=v_L+eFup?Fy0!&L-}*jvW1}X60vyoeKw7$% z1wWZg7F9mW5r;kIjx(XekJ++I1Xle>;oY3CHA1--#pz%C-f?LtP$u$-c!@({OCJnR z5`CF4zrLjZIu;p;n|$#{FO;M_=I?M#D~-NhuMRb-U^?#2%hm-+DHS@2C=A|qb1|;{ zgtsXCw6i0>F^rxxK-74nua;kytA7Sw`FcQ2TkhAa$pK{?%3tvgHn-Afhsyt5&}0mn z8QWrd^)siA5`RCUp^Q_8jMAX-0GX;`yg#*w_q{_98*ys9iXh0)Z%3_q6q~%!w1yCP zNe|3)vB0605COfCvUeMDvs_P??08{_=fU5#N6dGC8Rk-l&(5+Rl+;1`e^}1}OvB8t z+@1>na5NHyFbpC3i0xwsY$sdRbJ|V6^ghc=F>iyhTi@>HuRKz;+}u%^UDLmF|D`Aj zU2@X;6y;XNDq$w|E&}&-#*q7EpdRgoA#&y0rnYZa4})`ZK#@=|mm*9=C)HyVGR}4*_s}pcw864vcEY)|a4bi28UR_F}Z8u*brM4BLBMaVp_V32~Pzj`v>vQDfy zuh6X7+n7uQf4+AwXS~i%MZ8QGo7!o!_MPq>{g8sRef&=WGyM*#1|Q20y}apsFM2Pv zS;*gAV4w_ugzmP!vi(Z|y)0re`u!XI(B%`aOeHVsBDZksO^cc_agL~rt1mB8Qeii; zGsRxBXILk>FM)B|Uz*TJ}vYgt-kmjwM#d7?x2 z#Vhp^z^6~HcFligxRRwc8=>{z`?u4*3cL77F;xDUg#>rTMc@N^{`4-G(+v?&H1`!D zq{#>hFy8WIg<@qeZgB)jCxk#>}zW(=nTwJ+*N#4h84eI_e>G~5a z-7HtODa>Z8I)dPW4sKu7gAXNSoe(Qt4x+~?_RXVxEY}y+Q(Y2bCI6oD@5htBUR!B*djO0US?AlG2 z#T3Wd>9-#ruGw%tynkQ$=~LmvbW7t2iQc<72qc>K5jhy_Zf`j=2KuVp4Y4P z@0n0v7_yAA=$uKSfTAq~PuJ=OYztZZ4efa1wZ;OLN|KvX5iy!3dYVR}kU|Er&|)^4rl<+Q4=*)RJK+M z`K~V{X`sDbewDL;Ch+Y%Vt-EDoV6?8yeHI4bhaQnf_Lz;&|s!4EkXa7C<>~Z=BJwl zd*9D_CTDQv6VHMRCEmP?YYd;T=d4$e+yX20X`b$E2K)5VgKgTrmiv~K|FDZHm|J5q zsPVAS@GtOsPuzLV9#KM^e+UlH6ZI)IR0XM<<^lsso7=WC5J ztdGm3Ge*~b8Y~fYdRcE3jB6cCaOJQ#HyhjTb!ZHX=c@J;M0`0fvRmYmCpG%)LTC50!_OQ*^zc2>Z$lxZfNLsnzH+S?GoP_kdjtkQpE;42&$A zN7lU2_G9j2dOO}9*>7Hsvo9Tvlh+upG`l)Zep8*A_^8Cnuco1>+@-yGfZ&7v-C%O4 zFUZ`R_-qcAQk1Il?7*-tqD&PWx@twNRHfC zb%O;9akZw#T?7Lqhzi?cuMs*Q#gO)@@f~mp*7fOzcz%6rL;zPC1TPz4W%7A=PqmNs zK;J|0uvoL6)Rki(Z7D$X@sokd1V)o2Lq6om54|#=Ux5~t&%G(0KzzYG3lP_VZ{{9! zelwhRDbIGnx?fXeQeh>!lS1(3d7Fs@AZ|#cW09vM{#bCngOasKn5Fs9qKaEvH$$9KH*}AcktWq>0=_ zf7OSx<(3z5pL)3rsgTxb!cIzYqTq|H+s=jt?3W?lyi+hMUTW4Jj*@SAZ5}?NT8PdP zL@Cn9b+Ub+xafD`4O!;_&HRv_W~qbcleGBpAc;Y&f9FR6`}fBS+e>)ig`{asAHrpg zcOw#$uW=G(B^v*D#I4$IyNPrOI2UaqP3qD~!#`6i9il>f%Z6ov$B{CoWOwHKa2(@t zQd`xeA-W8q$sDED*-fLSbXtkB`>@wtKITz~Eu*0|1_|ffX>{c@OsDXDK&cn;i5BI{ z$BK|^E^vqB00`h;< zMwgZhIaqbMr;uf}=f*i8yG6<2qSNz_S9{okS0O=FBjIzHcSS0U`PWE+UIRq0uBTq) zuVXz0j^W9Qel=~!-sRIUa7R53-V_1%={(6YP2aYAK1jN=?_ph@wiPptX|39 zKlUpAo5=HS3RAt7mN@{d?Y3U79DeZucp|@HfBC{E(E9?!t+odIO$Ar-`Yhk&m>LU6G>INKV)%+~nXvgD_FMV_tK8YH6?j1WcF%&dx{ai9FO!0CosA8?#<<`zRySC>EfrSkfXH8pJ-DVf9WtZJFK$I>TYbb` z<~wx4Xa>Hs8d%dhi$p@I3~TYt8KW|*GD$oShoo9|LY^J}q~C{07ogerN<)C_X^@|{5K+~?sFuNr`lXRU9>l?@vYjGWTsg#TSK(b zV{XwZ6$!B0rw6-GMZqrB>+tt6dxE!5=HF2Ld#3SVAxUP!i)u?S#Tm<}KGozmAmWLy z!P_?brrdGT-C<#}4=I0PlaJuROl)POxb#acd#(EX`xgDJFJ&$LMN^NV5qadJ0IV@L zS2`t}rouF(wLM*k6vTwD8xa2y-Hp)sk|yNh_%ci4w)95wlm}jp?y>rB0{6I?4UYsI zsXjzL-c!q18WYs{b4&82S1qTvQjwoT!TMub4~}+=D25~fDUciU>K7V}BmA?d=K8#V zP_kXo;hI>~U01dL7`DTot=$h4L?6Y&_-8k7TV}G z$(9vjho=Wuc}MM^KVKMjjPsrU*gDA}UB+dvGZc4QW2{h@ZVub0?qDjBaOBG|d7IE7 z;gg;Pjs)?@wBk}#lrnY?;;VE9@7-7s`EbHd8A1Go7|yUPfWaY;0g&qDq<;T=HVC8mZLyF~%e?LX{i{QMf6nmu` z&};TcsaS8HGOeY%HU^U?9PIu^2;gDE8!e&2&G}X;{9!g;y00?Ulb!9$<(u`>U*rkH(sKr{L0Ny+)Oz^r!ysdSdmq=cwd zA~Ka06=%|A%#;L_-3eI{Ph51%J-oynhis=-iagXX-W`p2Ets-P`p6h~P+j-~w!L9t zzfEy)WS{CPn%>LuydXU9Vjt;#T?uWsL z04cN$`LzobE|GruvtD*kWg&M6x%=4J;r$X4M<9@#&S8Crqe`EUKNh~Rgw;bsEDQyz zhB^#eL;f$sa(H*-pa$?BmCzSsYh5>sY+2CiA-}5vGXu3U4f?dYWSE!S7Utxo+67df zcnQpa$(0#9#C)WdX!Qv7Mi--N`Ao;Yj7>cic91wL>z`v3`}+94`_~58W=Tn?Te2Y zU)$NE8#1L~1Co`vpwo0*(&qA+tV{7vJ`F%SG*RvHt1}-V)y3Z-?y0}4_!nuTJ~Rr~ z&qLqj*>&P8MeGt{a`JtS)PKcfO6A9{mgxC2saGfVn-4_Px``h?(87B2K5DA%BF$|f zoGP~3JH0U@#Tx*4(|CzRWN{dH<8R4=I0vUZ+vJ< z-gRnMEgL`~AJGlLTzFZK&PB6LGb&6arAMnCE27!G%y6u)o8|;a3v(ji8f77|Ci~md zPHr;KFx}HKwOXCaaA$07^`t3PsL<`uTrI$hRu4Ad|c~;^%&(Hl$gBCZc>-HhD(=SdQ4w=_b7K&o(!|l81H-BTsgp; z#W~wE3m-_3yyjfh4@Q+C*VbFP4Y1Zt(sVw1&Ilc*?Q-(4BiJQV(;9Z}Lw@<#EJvi+ z3Q5yqCqz#AY8LEv@2zt{uAOt5Yriv);yKg#caKoLqFEPA-E8EK_&OS<=OEsv&tEs* zA7avIAgFGZ7nx=-j>(g5DAt&3))G{Io%bQ1%RFrs*7tFBWVWlZ0m;KMK z4v%IW<;&RIuwbIqa&jRb->)fjETT$l&}`7X8nRLaWxBZ1^nPRS8M@3S|9R+qjKS8N zj%LTY5B$J~F{1>TxqAGwK|&$bWx0^prH_Omsh_*4ZJSKF%r7rh6uDz-czPUOThGd@F8l+C2n_Q(AM4 zmT;|C%Ot0wJb2(bn&zd1A}F@%C?18XFc%bY4kb-8BM#7?)yC`WZ-Q+zU9DN!#EG`) zn#nv4A5(ZyzOC_YzLb*~`@3cQXt*IP!H!njB0$aUu@kfJDB&E3LB_lx3+4(r(9~Ct$7fPkjkb2 ztCrfpdH5{>45wy}WRrnXVGXV3%bwxi_So|yL-ctx))BQ-cb~F(2C^GGzJdYqS5Oe& z&qexdfvVC8YsA$a*3v%nhLBWZU)t}WjmKBA6xhK}qa(hKOYwBi>sbZ1`H(*ypJGlZ z5?Zn36f%qNd(1!U^O{lh`ViRO${y*jsHi~WF zIvZa^cLB%*FUNj*%NxJeb2Up41?QKkx7~*1efdP+ zz5YUl_5B(W%`ghe6WLo2L;#^&JpjS&KE=oW2U53`1&re@t!t8etArIy>Foe8r%pca zgN$t6>-`)Kvw)p|G);5itnOnkplL;*(q!JQGO0u`@p57}`_JRpF8%OBF+&sDb%hMr zQ_#Tc(zThf5gY#Ero5zeG;nyMb=#AFkZVP;Qt;S;(u(mKKlC4YjX4`l+ z77_C6p(0teDWDPlaSxmuE=#NsK+R?q3`2ONbCdE*hWym()^mM*#6fF!2WEX0muPE^ zcC3#xz}syf8|`%r+%a{I(^;9QaP7nP*sr+rUb-w54>S}k{lmvlw;1aMooD+0&sqZX z2meS87pwDj;~a0YXk<+K=Mh|8jUnPo$82y}(HdC{cw5~jyw<^0E-NrNEvNF>%V{uY zZkEWz;MOE#=3ns^DSU9^TjXABG*8#sMV2kZNrb}w4beq6lz+IC5u=|I+qo{obOLf< zp}0RL8$RzwMz__j+cACrDhKJaCp^^qeAF`>WyiOl?-v?;K(g7~8_?&mm7YlVYdC3` z%XeOrwj$m>&1KA(2v3zJ+XUq62V`~Lk zu=ghOT%WS%dxDl`$yN+e03I>Hcz?jGex?l6O84ZIVeMmD#HtNm1~A8P?-Y0xz-W*6 z(@7>U({r1KpP1rEUULF+QtlN2`uw?Cf#P0oOu&h_TT{hrw-Fa1i$1zyz1^Ue4(@rB~IjRa8V-qab$WvkJI0!de&~PK)Uc1Q+K=8NeO@!i+1gz4P53T2KTq z>qLmm13`5V8Ei;e&{{*T{cv!9+lg>Hw{4jm0`birdM1qXG7W>G1x3exD?g298=BZi z8oCTD(-%g9^^d$akIc9$zdrn$=$zkkIjDYhLBor!>gAmem zDAX)rKDuJ=WENS=Kd?yQB7^1>u2n1Di50a*f&Cu1r!73aHuduVSeC_wD_JOprNVX8D7&{k(?z-z`$_l1J{aTVox8f)=*Hr9XuWG*ZZ;md*m5||N zSvmX9sx!Mwi3@f46`N3U(oYR-@Qwn3TY@L-(s^-mZCe(cB<$s;xNa&&19*5s(ym;PY5V?re<@r37t zli&FXV*YPl7zn1rr^!(d(y;h`gMcQlXLA<$LFvK?0bY2s>P$$bdV&NxfvDM(T6%@7 z-k0jxDf5OmQ9;PLMdk*G5IkBJE^WyB?-m(g%|nn19|nk34Gkfhk~pm<$47=V_4JM& zgHMmZ3~!w1ORmZ7kvJf0A@nkdMx@eXL22|@w4WpnGh^q5Q%*B~Vq4se2t)=ky}?yU zst6*;8fJM1tFL%+28ccK*}0@H8!WW&IfzyN{#be-QbxFzz5ImH!(h^nB~9)crfy{B z@VIqqCJ>qN8Nu!S^<(YTEUlN}Zl|_50@MW)Bz~Sh6F$BZmreZ*C`_#Wb$vHvXafgI zJqGHDE8D^Ikyn`5$;!+A;&{ie&MX7@fLr02*lBdH%QkLGz5}6a_#qgFp!2ktq`SZL zri%;i1m6=Kd>>)$|LnV*h0qx7y4#fBFq2%m%K6%FUehNH%jmVFuQ^BVM!Z1%yXMtS z3J_+{7|ccfIX`7DjXQI+qk6^WVhoY4it@AiiGsE)=b8IBg2rLulNFcJI^+(Kc2`Gv zAPq*1sim)BJ5x?8!dl%0Wha2i+GA3Yv`Xf;EGP^JG zR3Vv1noTun~x(1Y;L6hJ^Or4uV(@;c+>#c2VJLE|lTS|Bkzl>=P#u1MxR z+x9I)D>|QFhf3M2;xDq*ygUsG8<+iba+UmbMLxn&nBv0gFmJ8;NeugJL)cqdeBhiy zVNAt2?5l%;PY|Xo_$MQNJFmm*S+OneUPbl8s!n-nmP6N|BG7xvgB%C=2BTi|JYu5K z_3A;XPA3AjF#(8OR^_X3pC-FFXS0Kg<{flJ710N;?pH1OR=xYz3Fj#S$P#>sR+TMk zhcE|-4M^hF5f|<(qyc6D#xx4?ohjp0ywgE-^MN7^z*^uIj~x62Z%&MX>aU|9uxFQL z5@__B-28b{5r5)8xOh&6;&Zwo2eHZJ{-o2tX&tD;UO3?g3iluei!K z{_#Ap=-VD5(=UMEm9nibI=cZV2gqtbK29vB6a};)Ur)6mHWrfhXDv>i()T)S^4<4= zqZI84$1aLp7zzygu4xX`&Y6Nr+OK8BhPSGnZrIIJ9}uQubjGvhx}dD0@<>EPx6xZC zDF<%%@sa2Y>TeI?_#gh=;ACSQH{p#7Af&S^Cx-@)DehN_ek zb?_1Pq>|56G4B2Q_t&po^RmSLn-&oVe)}H>ptwlr&wX?6jFEKn0zmpnjJ*>M0&QOG zeR$z0E;OVkc|m|OfV1(TlUSmt!{GWzu9}mWG(g4w314@7WGZ1k+p=d8VTE+L(#?{t z7u=x=@I187d{rpIN=~=f#lmlp?MxL9kqX6XI`QYq$`7rl(i**OExG0OH2I6^sl_*o zvO4FKL*d(^C1&o*OBKL?3O}WcWCQ=9A43F=HR?2jUbLjpJew#$ z1aiu9`DsYy)C2ag6tH_K40|fsxV;H9bBD^`z5qH^SfGIu3S3hbJfp3&1IP$2l=S6u zo;G0V^#7(9!}PBMANnQv5yuyRuUMHnW$){O-~5^%C)f62LSqmzM^hwZWe=lB0-OSb|Gfk8HTx5B0mAOZ(^-oR|18s!Er>h+Gn1KnzJ z&33h8mldOBi#c@jyUx~^jxxJ~twupg{8sNk>y?+J5Q2ihAi)rNOXgu z7133=+Y^v9KR&HDp%x(Hu8mrv?MHc&TAg4(=#aQt^IhE=JlPq*L54K=@r-llP*0Le#ZzAm}V|}#C}-h60KWgS+pSj^?PuW zA$aHVO3&4T2k-;U;rqNTBI@C9SBBag0&^Y7F4G%Hze@wG)RQyO`bL*)OMLmCnNNc0 zhAfTTFGVQ6r)s&972-tV1w-xAGTpT`$gbDDE}I4|eBwsQW}#Np3asK!=mv?~;SA0P z1pRP`Tt1{xVv_LzLlXfi1WIf%Z_@D7=PSm!znpG+h0`ZymNT|Pxi%)wRqepLPnc~Wvf2xk2I)dwDdNf%&3XBl_m$o3N_DIrRH^_a#x zS1K%B({JXmtw(@35U*wEIWpj96@HuJwqP zH?2`Amnbhi?_%^7vj-^V#{K%u_=TUI2!q|##XfR6}aAtg+r#|Lfl4a(lQCw8ZePM7ODY7*;=mY&$W!*EwDU3Aua#;C^nHsW=~ z+NX$qlYGYWv0DJHOcYFhLEQW?DmTE}2hS{VvH<||Z##r|mZwVTT!@*d1bVV|S8wpv zd9fnL+OE>?5I{*Bix(eV^OAHNZ^;o!QjYFrdCQ9dG!NVTeWa;(yC3J%T1*hJ=@Y}E zI+daVw(=m;hWgvC#s)NRS3Phwf)m>cdGOVe$%tn>-izD*#-@Q8J5p0FB(*6+s1Ub0 z+^@*xuzYoX(t@Bh5FADd4ruaYV(w)YEt3pJJssHvRPJJb?Hs9_ukT*Z3h|uhb(8Wm zLCqv3Z=+@`ah7KB1$WmRox!G zu39e-4h-rugSg!!C2=aZ0eCKBlWnsIsnu_mry#L2(&fgs&LS!ZaRpG`ex=YOz+Hj? z^Jxi*PQH6KrxW-1lqxURcwGi!b=M7MdRb!Gm^Elw|I6omj7*p-`y%Rf=`VLuKxwYHRe>X%CVE(X>b{TDQzxe=pHW zGaOk-^VVrJI{~!=bx9h3&k^Z;3HR!qo{^9>2ME|F{y@H-@Y`Vpa9BkF>Nwe@D>gUQGXfS7DH?V?cW2YLh7wxR&=u< z5W@)0$){D01-{uqY?EuxX_DP)8O zzS4c;6kK-tSpPVa>kbW_{EsZ^vBlECzroj4ncLZm{x-bl5Zc)8M49w2+l=Z3^R=nv zvD#U5HiBeW%OGJ&!F8dp`9g@aAjvq8^^SjO$i*VJ@=cR!qQh{rb_AjF&{ZJP^41f%`y6PV=d z+B3xr))B$7^K`}VpvopDbw8T*(sp(^HYrRkI6oB=_$S?xl>XFYS6y&L$jT3)%cTfv z--P=VsRL*T|N>==j1YzDVqwXm&Z7T`JtEd1%Je>n&%E!%p)Pu1PnMC)o z_#VnQW{NG8LaLszkpa-u`R8J6DC_AC>iKbunKQAjulDAO|8nbJ{FQIdKZ<7elj4F+ zRp;FOTG*UFFF}Y z*@G|kfI|IJQgPkp3Qh5WeP1Kn>#Am#3v0N?OpCCAk4I<`D?**d`n;ZKNS^*$+fpJx401e;LHK=P|FgxQ1?N*Ia)4xWQkojZZyruK8|8kioW}Z6 z0V1k--@3a;8j)I#UGbyzlFaLF^@ODqm(##85cIPtP?_EJGUKJOpCN}5t=skI3HGgd z`u{m(G^^VL-%^rO7F~e?qNO&_{jejX|4>0Aj;yCA1uHI0M*|w0(frTu!gn!KrRXHR znjrYqXz;HDy}h~MA8eF(rG~eT|Ei$YoT|0bBy)v+T|&_VY)v6GXVdi1NfG!(&DuNl zT>R&)L&pu22V+A+L%@VU4k&ud#onq7JW#xzD{x?gW?QRa9EJU_KvC?x>O_Sj%XS%G z-&>lKls4Eh&q!EWo8jRc^g1 zQWx??SWD`aJ5Yy>mO1DzSqZ4|cawYFWc`h2W=pUtuIHB5C{#{;93Oh0V2Qg#ywU^n8K>&)LrEkH50VoC|G;0@|7|RfbPyBxCRWz#{|J+3Wey^7@d;I4F z2!N1UPl&}Wc5c%Y*q5jv@8XF08KpVrA2G-Q;yiN}$T+*8%{P4OJzc{L@1eMQvb?Wa zjO8|QWD^i7SAVlJqi$vemRpyDy(#x1>e4MwDr{OTWP*cfbt~#pw73J>idbotzlxmD z2~>a50xP^gc5QbK=sa;qV;m<3qUd>qK+mvy2%fy3M2);ks9O)?P)Ku7!n{ePCeqir zNMPkl7bm_57pr-Bw3|OLz2OHuZL0`A3O+}P_l8lc1KFQQ9)J$N`p+ec|5qI5bC-DX zzh?pbH?0+M1puTAXj(eiiNuaKGn!Q33N)W*`gQg~L|=x?PFgTCta#4zuw+@*FU7eN zd?S>oh(4eyUT5=)g5K-hr9ff02)ii+sJoyPL{TnQDT7tCv+gS&)$1OEf}~jK2MkZv zP=E+*ySsy}!n&#DrpJfEc}}Jl^0Yzl2`Wgvf0AA07*kKE%PFzhydkNM9;smm~~;fdctEd zj$c0NxXo8;0o)Ki<4%=7fv2v5lOIt~*FNF$iLP02inC1E6z8-fH&?wqBs?eUTbAjf zKlga1q2%%pH;*E?Brls6sKu$@EI;I#ULuK7??H<*NtQQN^tEN_d=c)4`xQ$3>&Z|5 zr$1pHNSAT*Uoy`Y$rK~5>~bq|5|kxw$!1?xKML54fHneH0y@=@iK$>5BG~oijSJ3R zZ)$3KPsC(wZnpjqX8vH#(Mw#byH2xC9<6C%>hz0F_=e2noePn0fX-(&s*+tTO=IV^ z+n06$AYEOelbh|YfkbEExW#6$?nB`>a$?~R$ZJ4U0pRzaMop4KfJ^hvHbSWEyC_t)vSv+-ow4s@X`vD-WZ(C(jEJ#~sU&3=V;eK0 z?8_jGVVIfUnfK@Ox$paXUHA3<_~ZWT{;Muryyl$qI_L3xK9=*c%X#(ja~)q7^Btcm zRB7x!rFO9>q)Rt`ty4Vcev}0sYt9%EiJ$ivpkgntF>k@^%c>5$nZNc^KzU7;J3C2P zYUg$=2G1J8QBLkZJ{L~-L%3$I_-!-+1IzV1iotrR1aj9LZR#(2p^q7VmmuW06Z>Gaq@7+gj) zb3G(aS99{tZ1pod!N=geDfbA$A|5C)E@}Wx#SYNBFVyVTn&ZXPM>_7=-X|HyF;Jez zrTUW+>$1dHT1OG(WRPR5-q~M=2Y!J&B!4h=%mA$!ua1Xu_+3P}tX*d>X$CL2 zy!6ZB%F*-hrkxW%OfR^0mKsoPx2vQxB!*fN^)3!PquvJbIv%RPpR%2;R(RSep~HZO zG9yQ4oC{=_T+h8Ri^8$aR%Bjo<>0NkLopR(|o86f7)9R zquTqM71K(Ld)-#%2B-WwG_6RT=XG`v$bnEkTg@*X^2>sUa$}8sE5-iQ zoS9ZG&EN}9xMvxKAYS3Ws@)R~%e=MDO&uIRrKqg=P9-6Gsb&_l^=nJM?*YT;SOZh& zp2UT%@cB?9D*$3-90o2)D;+;$BaUsTEpeknhRMI;&Ou-*xP-hUFo#|K_#p|HM~bRz zTHH(h4<)*Vl|n|4kpRR#s_36T%IOcmxM&l(Xd1?Zi?AHwFrSVMb-iIycX^*$3?7ezdO+;K zX{-BE<492zo!-qIT^rDxuQbu*4yNU@wY2~R+Ti;9%L(D*>co%Kg_~H& z6OyXIF`R;c4=Ug6IL}|4KVS5=Oq=D=JkY#I(7MAR^V?X>B;8w72^I1-#BT)wD$c8p z4d_d0mHk1UXblAk56jrulzA?m?Q3`zp{DNM6oF|p6~>KyV*4#NEQUn@ew1|k z6IduB57|MlIc+rl-T`H3FWCVaZV7C5>BU-61e9y9&BTsBoOmg&``};pm_tN(eaJ@3$Oq(jE(22W*48xA%~DVY8Emb;!2gttjc`}2Qs!}?3vpb&q} zU+1=MJz4e@A`KKjQ*AST!mF6}_a8*AsKUXX-B*8K6ztjg^gX+*yW`Qp_Uz~)`V)z% z-y##0K<^Nj*7{|c?PH~h8FVR`*c^FR@ZsSFNV$VX`RZ!_P#A9Y<)dgm;BozQW9}r> zUPOv{%N4o-PlFL1(w2yjkClaC?o`SQPnfNZAZ_l0U;TZDMqN_dwfI-O`pb(&;o(eM z;)sXFprt`)#OtPm$;$T|tSm(ZR{Ve6h|=IN zIR%{ZwNF5iQt6$dtaqTj^!r`hEPz+kNn!(3W@=&I(u18GgTTNn zPNsK}==`V##Ebw{!=9L&?*BMN@J&hAd1*uS`yR=p`QSQIGg+@D0C z(rP=hMt_(8zoRZs1L~5_Wr8=M6)d?+G#-Wn61V?x?dR@*VVI6v7m8j6AC*eZ?9@C< zXN6knxdpA)HKT7#9QfepMfYy{0Mo<>XM9$m;jew#?P>qXOIBz~whlYn$43~~b; zA6g;}U`34aw;nL^;c9dgoMl1Em6q%5`l0+XYUe-TcV+j{!WOg$U%c zq1)fA`G;%C=0`N8Ffcw*=^)WMQb zjy10SLrVPHt8ELs+IDt~6d?KzP&)gw{E)+CXHKMMSd|k*HBaaG9hFld_GhB0FpKlK zb&$PV9n)DDEUNcT+msd(U-yA()ua;_6uWFSqpUty8{@{;504R!w^+?%=nboEsqGjy zKoROT{VU0cWlynL z&gAXnXNh#hQdnllL?Ey@J8MK%PUM692LJI^8AlC3B80uZ);1-AC3V_LjfGf6I6W{l zoDH)Kg9UKNOPF6YKMmvQE4?W|aw|>O+1+n4SA!T_px)i}T_SkJ+s1B@5n^jG#q286 zm+(PmI&;-SWWaiUu|=`7_*OV%`}PqxyLtV1zNM{wU7J(&aen&BI&8hsDeVTEO=3>q zoKoI37U!^)o5%AKt3JCQs;#E8SlBh=kuMyFlY1wfGc2T*bVFO6?c^)Xu1~*T8CY-1 zBx=oP0I~Y>LO#!-5@e@&ftIwOT7>R=q0G{BEW`|#b!`<&h--2uEiV6XN5h=hu=sMP ztbR3Q)1%crE*hkHblS+a*%Z}}D+*H`8Q`eD)f}tgJSXozDbZ_q^G`1U_+2bE*|6U`ytc1^~cv0VoIQMMw$+%%ZbE`b)>*%X8O*zrL7<N} zUhM(2OCRPp#VnxZ64s`oE3kc9KCZa7+^u}ytCZGK{5Hc}vZVhTd}QxjYPt2|{zNP3 znY4ZL%$B&RQX+Q^&5z2fJ_^qlTDjYeghPGyFx>TzHm^Zx24eMvFyn$vXbX`boGSZ^V|ns3D)= zg}&AFHb;PO&g?8L&zYN8VK*|~*7vMSlDDT>oF8k)%i%Vs4)=_w4vhZi|#81qv** z-PeB8@RR&68?mQD^QBq#4NKhVXrr>WcD|(H!cbQZ0;UJ);A@|y_zNPilvCpf&k?G# z>dsku{EgEj5mNdi=BwuzMj?`W1`)^XVbwaJ8mWSrQ=2gC*!*e+MY)J>&u3&C-3SyI zM)X=Bxqx6M#TXdJJE?E2Iol0+_D4&qho}080YU8*xeUkl(rzNRX9Qhg=MlRePs+=3 zd&`#``pjAp1R(LKgBB!YtCx<`7GPIJTLFn?6RX&w;9Z?dP<_0qfu(h-@m&7A_xx$- z#m7WEX$YX=!W^{PpbILWqFuj zS=T+~$)_MhKVGMCMuioP5+QhkMe*-lIUn!bif;yP?PRPTQb;kM0h|=?7ck<>{>FPt zLr6Z{@^$q9Ed?IQ#NT5 z>!@_M^@ba1pB}U)eoM=1%`G>;%NpIsJ({XtT)#QKL~IBp-{JP&cUe_QQOsYtC;E)w zrZ6DLf1>OHNUCH`tnRl(g+Ud2%_23cYsA7N=%uOOZU430k$G2MD>sc41dxd|j ztTxA;8}X#BYE|>ig0BbbuVD&QIU!o%{l9Sr1oA|n#C(kfZ!jf1OgWE zk~BDj<02WA4yKCw9{w-SEc>byRnh3MQFZ{+dV|WYad*Q%DKXx4u+TO;LB%dJ)8~_3 zn!zeRk-fw(7Bac+u;!KwjsZTpJ=vgszL{Vo ze{FS;JF_dC>P1}G5jL%&QAN$wSQqS!FYGMMyZM{UHItAgsbU=n&RA)7)H;d^RSBu^KKMvG{(sDWpvN9q4A>Uj7 zWn&%w2;!VOZi3;Fql5p4FWHFe^+)i^202vt&QL+Km3%I(}TGynd|0V{Hw%|EB_YeJOY#6)DlNTA<^8f@GU^yf+ zSeRLC|MH0#l|Rb1(i8^MqVmSG`C<#R!IowYNvGE_0^m<*xT{-Q3xU79GYTeR@pQ zey`>JW)u5x4kS88?febOSN-dtRyljwZ(PE@1ce62&v(<&QoqS z9~#@%@LkY}>MlYh(xl*?RrM%0=;3or z^<#WY3P)E*FjxWQGvri*dgA-pS842v7qY8)*k`ttRsjy%0&bftDa(c|+o=W@0x=#@ zIGN0k+u&>o8%-Sc_hwmu*E6BAGfQ95a;3-e5KfgNex7cNA9&GrrSGO7Kvn?C1N0Je zr(EZy%9=Pak8@hNBIg8-fA^(_o)WZyg0f{=BJ3@m;)oaj{xOWGPvPJ7smh6GVWsltG5l_5RgKv+@2U5IvdeU|D`mU$1hr4qZ z$jmkPm|vmI$JcKKccXO1WE+GA)KUFDOlSowf?Ey~wpwGG)J5qW-o%tQSJ{Uku~m0g z5?ez(nRf&Xjzshz1^pNNi};~xy7=^|NYy$9kqFhmZD6i!xy{UPW;T_=PHgD^F0V?Q zS?E}@M<#0!Fq7j|_2u5E<=|)UHO6lP#jH11N(N_Pk8RG5``cBy&5**|U(8=)4;%e{ zE(Q?@U32`h*3`@N^24`sTk7QMpme4sx5CPu_=5aCd4o$@nqCwk%EK8ZbLnJDQ=yfa zOBOXxzj6%-C#scBznwR~d_J~Lq<#AXzSICw`ea zALi0z$_y~QU$@H6!9g5|O9W-76g_hSv4_&ESKUwMZN)o)0Nl>>S75m&Iro)!)U}@s zpcd3Y6iu;|=~%DVvPQRX!LnZ3j5~*84TfHuhxt2W+{pa|!^b16jhOst=j=C;cwU}t zPc}m2*)Nlly~;e_Jy%oCL>pweV?)D-wOKM59F{k3 zWKV5^y`{eE(3Gd-!|n9UCk#j|f)2drzn&7STxGYy5O5zYL>F-NOSHP6(O-9LAp)Hc zxhE~GK|w*{7Ol@!c$RKq9PrI1#Ztxg6WVqzAGCeIw}K2st{>URkzMpklB;FQJhscp zm-$w9K5MtfzJ)fq>2VV@Pa3!rgdaUw7sw@4Wzn8-6>d8+|7n0;;hykv7ve0t4;vDe z`}Osv4cB*zUo;p*HlTukD(e9=QNhXq#OT#mSoN@llPc^iIt^7J$}8%A_Jp>KrPrfy zqkFT&&B3OcxKM=q)3Tv$7xgv}moY^=byrj80ZUuFF?_-1{jE2mOGAAsLVyr8QLp1HWwWFxkTS|P4Ye&ul-D==hCQ7|O z0+!rUs!-4D|K(W^#O=IfaAy;t$}P0{GahDI{l-!>*97%}WhybN zDX$uyVCn7h4EDYYJ|cfhwxESAt8Tk9a71{RI}ro}p|l!JTtH2&u00h@>!#|XWZ*AEWFFs~bQ_sRhp zZz6_Ag8B0hd_%vMT_=35>dOQGJwoR7nA}498BE>CGG-PNFbk`)k)9=y2DwZ4DFUKd zcHa)w&SwrF-Nk9EE<6f6ov9iTh3=MFk)Aae)>h5N4{yntB2A@n9LP~x@RNw<`V{W% z2-hXIaku4$u27He5K|UxLwW<@%@^q#7FQYwD{8r3b~=c3iJj`%g{EWbmz_TBF*3G9 zUPtjZ=ig-u8TxF$*W#yeaX)w0Oy-@XDTp}Fi0O>v-e?H&7@klkyL=X%cVw{aY#@il zf=C13Sd~7VZt4`GT7?Ca+1zRugABw@TeyEmN)01IZjk<@Ne+#!j24z8PCP#9y3)-S ziTJ9fSxtCG7Q6yY!tMDLUPAP3U&3F1q?q$*0Zb}5IqHq2;nepO$Kx1#_f{uc4??6FsEODqV8(q1C zLa|S09h(M{`M-`S(ANYPeN7y>A$!0zQFY`zFs1*H&u%_eVprNHj(`M$Em`m0Rp*VF zkHMHjG4^Kd#-cFY!B+mrpx9tc==9X%O@=6r?-+)N{H!*k8{|tR&0wNmPMKuOZ%RJU z-VUMEwqynmeU6#;WTAT8yIS!GclwpP^y+80&#Z?_qpW>|>7TZ=^2@>jYM=v6ty-@EbE4AC+Nh&8c76zI<8 zct+E9RE+w7HCBMO_bEwFtaE6=dJh>s3**dEgV(Izmbi1d)OU}zXLr26#$NM!6mGf5 z{{;vMrDGk|ra@cLgPp>s86k%kl5VfQM7>{x8}7;PfdnP1%R#;zNlSa`f+xHrCgGdG zIRG~YSO$=Gih}udf&OxCl;QW{)p0Jspawo*)e^bJXw1CD7I;$cDMhTWd^&F0>>@mO zj;rb3ye-&%%ca__?yC8y-U^-|%iu&f{u%kEXD6uL8dSf8gLAD4IEaiTgIhe{}Rp*MFMNw^^LyNvW z1i|&wc8*2>nDYWkv-Ek(*VFU`_}>>^^z|d);h<-wUzYs1c-eXMj^TKy31V2hl$DVs z$>;o9fFVoh+P$CLOgDM-R~aZaC>cv{t7p`;Io7V!A;t2c{r-?2=(R}JThW5x85q*f zfbp>d1hsK0A|>uXy{zchmjIqVho`*uA<&iE&^ZZHgqH8&1yd5bG+`Xfnjk6q9`FTE z01hH|d-as;0g>@<5N$3j!*zHl8nlP~y}6s7$^AoY;I%;&Lle5p46rucR5~;VeK>NU z7E6>?7L%i+BS1JQ)wUz(7wbwGABO0U5^sKV;?qOh!gqfzUAPQ6^s`m#{?t7?q{OEB z!#0Rjb)r^DPggQpPeVgvgOs>$+p59sXh!eLd(yL*ZAfwB2V6*O?0}H7Jg4n1zM%hv z;2Nc6&Hde?=85cMrY%=O-xd7U`D4K&hZcHS>{M#h)$J@Yn%^6=DJK_u-vmFfRDI8W~9aFfomnURoS%h_o{3LDNoNJluu_;8&gh;=|8m51x4K zM$h>`Kv2K*&WZtBojusp9ZoOiI^mk|Er`EUCx~Ozg|}B>H3?&zlE>aG-;sNH%(yKab|r zrHj-IO~GAd+_IT4Gx|ujOj2-J7(<7WaJ)uGGzSH3r`Zg1P8T2jb$mi;zt4T;M@#9N z+UYWBnMBLI=He|PHz8?ww;q>yo+Fe)jBdhJGrIBVHq#%Lg{NPnVRHk2`t=tPs^MzwEAyW$ZzC45%F9IiQP$=vuiZ$2(s%XX^R@2^q+Bo+<#01U zcT-l1L`QC1e_sk~4SUxIz|DCDBl_eW8Q2s$&~^&H5=c5;@z0A) z*+uU#ZShIl-%VU|mPlet?GMlM`^Z3bpN=-NYID>&<9xSVT}ybBXtw8_RqOnt zGT>?G1vX$5e>bMY23Av}6Qjg{gX|f>0~SZ+x4{zKn51U{O=l$A1DtRp-tX9YwN^N2 zQY9o$FmL(TaWRq`AB}<7vm-@bU>%q5Q7^0h_d|)kKZp#{A)lBOzGCob-~v@z9-PuB z(M#AWLTn%GQFK=5bahEz@UP>k#18zjJf!^$LE}VOxH8nd&Vx}s$WJFlDg&clx);-H zhC8jXgl~SQYk(~Z2s2t*GB3YY{3Vo-AS#g|6Wc;iG*wul3|4H7L^(AV8$KpQ7+bbw ze7vU9Sv?{NE`I-!XCjMuIuW*#C7Y`{VNNm56;5=hc%I3*yfc1`<@vL2yjTT{b!OB~ z5LRxN9p%!lYuj7^De)Tq5+7T?=kmsv{MfYPL%VuYV1x zmxto8A2NnUOf^T5f8X!J`0P}cR85o$#xj|o&qaB8-MhmTSW$c_UAAeS|Adg^Un4GU zO){~@hgI9mZ_kdtM(?59yu_iD>tR8dua#c|AWx*9EkC_dQU3sQMNoyO^5+fT@{s!9 zq0JEW>2#?pI>Gf)jG=JfpQo5^p3uLj71^JAH07L)1r(>c5%&1|B!_k6*6Xm!Gj;+J zB9NJ&ul1}8GMO?tnxkt0)HO{3kG9xq*!7&#ZuTWLt6o-jI(&DR>rc6V+#FU$;;Zs+ zWjqXQ@}GUhnqXgawrT&-{IoUiigL-X?uPQ9_@B*97WbTkhL_po@o!1-_GK6LjJ`)) zR}{A!3xKiKS4vwxro`*@FU1wx)c2ZAY6|dsRV$XtmZ~mWde2S4v@|#7c*&0jd8;(S zZ#AreaLwg-fhE}8e6`hxhc-WEmf@!pIa5$5RCsv!1NSc)^feZ31>UR9CtsuwxZtFY zf*mY~WzchdBBov@lOf;k7GGc0xDL51=Hp(Q@=aB)!xF{`D{l7+pN*_37e5v2@0JO( zFbt_kWn!Q8?JK{!u4mVLI;2hAqAgfRP~}vm7I&+($CuqY-10Q1btliyK$VTD4ta>> z&^s1C73kVkMw+DPCD*4!g%_*V&B%?^?Odf&w_^4(6CurP6+T!%i*d#>64o#5S<%PQ zrDQ1c?8d?zLR6J8P<4K(_~^n|`q91y=->A{gMw5E4?tc?Fm+I7&ZV#Ushg#$R<7oV z#dw|5&E(wWjH~GkC4v=J)YDy=L!FbUb$x0Cog+aF@TJ#~wcGupVk%r5C%+SxZAW(p z;wBnNAN{zHf_Vu0K(#%FC(!l0Cn0(FyQ5|Mn0X$*ni+XE8g?;ujmS0#0<~}_T1C9= zSWd7HSn^-hZGzUhqm$ILLfWoE>n#LTqy)UV6mp%1SOf3YpAE@=NY4v~&RIhd4`Mg= z_R1oDJZTPItX0SyAeSF?Bo{eziyfg&ysjrl%yL4#AD$z^1y8-oMeLQW^4EV#Z-3Y6 z<$rjYU>j*$e;ZG9FX!ZmQ|&CYH`f1pfk<&pXgIA}VKB$sx}-VkD9|?9*1-0IZ*UFh z%sRv?viz2lFXm;=5Dk|3Lu+pYjDtYiKh9U6W3j~^_o-vMCeFAHzdd(c zyWOmNo8@n7uzW7GN60CiXLL;_`mWqu7S9&KRGw4qFyVAx~M?Rm$MBC}JI8(w=Oh6Ayp>B&S1 z!D1Tz^vECHT6eBPj~v3Gps=TY)cdB_sHOL8&>e@CG#gPV;dOdBs7|v2+77&)y!A`L zsHF^;7yH7uHk&d4Ch)`6#9DWUPF%PWA#J*MxqV7C6IR`%&@$GwWz3OVYW?`A+hhec z8>&OKEruATgsYN_g%v?B7#H?e_#@jazg(S8H-#$05fr;hH0hg%B z;EE47kI13)2R^0S+(H3J=IZlJX9~sil4GDHG}wFPfgv{VJCG0^**s>6=K7|Bl?g7& z^jM#i-rjUu`39A65J}~RL#3E8n`)*@-mSbm7ZVw<#%RjooL3LfPy)IOdVau z=UJ3MBUI)11x?d*R4&^e3LlY71V^ws&Pplcs9mwL)hY%`3#nlMfID4* zwGxD=GJ4k2+~4LJu3m{}hAUUz9P`v);-Xn;f9zr%+p<}M3$QLC;6s(G=TH)Azia(C z6rKr>{$?M5Q}-3rH^xu*V!ZW{87ZT0_N-M>p4!b{_}0&pml$HG@aP_aD%>l>u;bEE zicqmrj3jfF^6w|omW$@lXC=>Kn731znp@Ry%Z%g+F4}LiZHMhYQiBq`n69nf9I{Tw z(V2l(F0Vklz*p1@TDb+2f5m83GL?h-qMXMZjLi4mmOKa#GrfXS8ztt>e(AUkQ}zcE z<&B}crk*&92)L@p6Moi{!%I4)W@^TqhR8M6(p>%$J9Sg}h{>q!)rb?^dBU!+Unob| zZ!Qnpd+@-r>D6;{ujsVgV#VV`J&lXADjFW2MLBF5??D0-xJuFRsR>Fh>d*pB=L;u& z63>B26uL5aKgA0t#!<8b(aJ<;C_94DM>d1vD62XjmGx{u83kQPk&aeC}IM1j$Q(wwJ8n$+93x`0&L3{W6~M+ z7!U?gu4v;h)s6h9G*3GWxUf$_$yUnkEWd#5g{#ovG&Ovw7ZYMAxyNi8)glFtKZFO}88n`yK^qW_RVt z*FHdDbMGDn712AV(}#ZZ>I1-_+Wxt2#P#b2q3_Yr`7Kkz?_G4Ofg^}UY%SlWt~n3) zYW5FqFpJfHJ+u&%qf*LP$h*+3(fiSR?FywWFmXo`I5Y`q^z0H7N0}-=Tl2N_e?r;?q`qe78y`SmeSQf|Vov z2^R5JS++*q)w3FYoXU%gm^mAN@1rSRZTuQx>5$9#NMZ9d#dTxz2aLX%^P=M_4Bk$5 z<#t<|1F#sS{g#mokU#~&@1dET1E&=EVki$KjSs@S)j)1`7f-_&qfi6ra;LS4R4WvK z=o~^c!&DiE048vUUK!W<#%~5-GL}_5yVqdn&Y5drGIqn6%CZlYBliWuOfDnpD2oMBWN)f2$MT@nZBp() zk<{V(Koc3iK5;C0SxtSYIc7yBD%=BxN>oJ8FQ;Bw$Z~c7FG{4_Z{K)Kqp?o2yxbY8 z?|){(5p9H2fpUPN`2IgdvHb~9(q7PSzV_eGKl%}QH||qCF!0(UVb)xfN`PS9XPptE zR(5LD43tUY;Q(4^-Ef_22nRGTMJFkXk!*uYFECe4bRP+WX6NmzqTmBTvI@()R1WzX zzjzl!$!{dSg?Vc`)Lmnz?#nsp13WbI&%jU$z;$UJhdGJCiS-^CN;&b5>1_6aC?Yz+A+yH>{kQ!Y@4W>w$FOkA)xEvI(a2sB~ro5OFQdd5pyR4x3r#~COrex3rc_(c!Y_Rw95N>JuX zW1+zX@|O@wHs$4?O*t)y>MHTub;CR43uF$LA$$O1IWSq)}W{|Y8Ub#tl)$8v#w?;zN)W@;32FV?m98&-)Tuyz`%#OC3)*{JVFwUZCa1HjvB4HtLW`T22V%Nmjq@MB&=jaNH{O=}9Ogh8tKiJOn=9mQx+a8b+qPte z`grSPPdP=jh%)-l?p33I2U{ZMSd$&qzCrJ4TaFBB zb?5JfedL_NYt4VqY_*JQ@8xfmMRa^&Z}z@3WVtrmBymZdTN5dg`c z1sqQ(g1{OCD{T?DPBTaV*FG_m70Uj_v6}R-eWP=%hPQY|MEpD|l`THu_0-*yEh039 ztJHn1HvbbS0VIHI(nTKTrM*MJ=>?NtjM=5-_F z1D}la8EsyDFd>R0b>2ckgZzjPyC3zKX;gJcs^ojUJwumC3uVM@FWO4zd`|bYM$CV* z0Opn^^Iq2xLO(o&DnjenqVyW-er@Z84Ub?@GC|;W;82-bCZs0{t#*2SqW^Y%Ac>7K zG`ToxA6xYWl3HRMe^-C!LKSU^(-{M@fYtWLJxU)$5`VJDk!QUCGKt${h!%^ec7H{% zmka$J)EUvs_ZDCi97$He{d;c3B&kSWBNB^ic%{LQ&=u@yXKqijiq@zOs|y@uQ{N;o zm1eXG;-yn=%?#<4Y=oyoh4%%Hb#%l2Oc|HdhFb&$L|8^?EInm!fM?=!Dp`5rT!!Q9 zvi*|CAR@BdH!C)ZbckN&hD28S?m7*1iG^Ml@Sj?^K|cvfI97_>ZfvM@w%IorLi+9D zq-KR-&w%b5?zscsLN>caWDM=9TrphYJ!(g0tv%4BQXBM+aU`k=TLnPS-Q0}FTRw^3 zs^@C)Ck-|CZ1nn`yR7Qind1)qDU|+T*FU&m{%i&^TE-xpcH0fHtO3)eG;+D>$4lV{w8iuH-7malqMjED_u(G~g$ao|(-4T83^ox=npx~UkM!U_2dC@DNsXp z&2*o#th>I3^2mu$ar?3c@^Q{=eo6+{8+T~@N@4nkJLN!@ofZ9}T9N^ofdMT|R)DAg#7L;jgP%MHI^{&( z@E^K+Rg3nMD=r0t*Q1TXuemk)&%L<@@W1mLf!@a@ZuEhP&gYYVgku%d6o$1%jH9cy zeXv&YBIXV@*6F7~bc%0jni$wb>G$526A~^T>{3##DSj64rz7J{XZ;VoI}a$Ln;u!u zqAXd|Kp1a;iAo&Z*2RBV01RkUC*Y#gO9~E}PNo2!zBGRC>!l>R4s|gw>tz%Id@1bq z)kO+ufHVs_pN-P`#tZPkhc!)0ddvL3_u&^!L@r%2$j{IJiI!{*6;~6qKa-j~$a3}Z zrr(i`+3wUq@y3Wl)tOpT<8{q$x+s1jU+mnQUx)j$Zlu@le(&tm1Gn3r%3-G48$X$X zfy%ix$a%mG;2cvm=$^^jYd|l?_hh)9oodn=`JIyW4UCCm;RPlfFOUJs2#&21su%lp zqW>=%QYdurFYp(<0mBw_bDi&ie@u+&`0?ZHoSYWxjX{P{RBx3g32El&NS5%WfVU8e z*I6wZoX!16(Z^3sd0NlctV(Ox+D$`UzU}FWB*9U>YAVC!8jy;Cx_<^V7)&8fR?PZ< zUNBc(fWkAH%njsTg85E5(&>667-$EPd{>gjc>c}y9eETC2JaMAis3-BcBDuV{P-dv z!B?GrRVCU==$+RRHhtzvpOF9GvZeY2Msf%yabYcB4-Iz=k(ahq>TMM5 zye{)hZvnJ=x)zfRspEB`oyUIQ<_oerm3W%w{v@c7x|Orp`gmw zXEfEnvWt@D+qWnnOs(L-pcN)03`o)-aX*EbUh=K$Mn!adPReMb)HGXgm{3lzoX~t} z7OV@3c=Tx%T>3aQm7MuEbVCZPvHtft;IZbdaZa-^t#l2#y`R%@$dUD`r3uHJ@q>t9#s`PyK zPz-R2CdI~wPKZqb$@q&|jC}Bj3Ua(WhgDABE;st+&<>)WV*XYL4=xKcNSE7;?gO2& zVn@2&R$m^x7pA>c2Te$h3Zn0NomUD;?@Xd1_C9C@>^*`>@by-_OboJk-}B?1voe|9 zOvOHxI#eAC&e-LAA*g}GfF9{S-O#&k2423>yB`lUDu6D15d}Wm`t4NW?uubX3*kjz zwo+-ha=2N=qX>0;eRUUa&#dj@*ML3ZJPf;JRbK?M^C2u7r8FxT_q-1Ak5sS!Qg~WS z-&mfySpTBGcUpO?Bv03DX@mKzxj<%S%hATYxs|TjfOI;GFe#Hs=Vs!qWVz1E;){ z!*EmegOL5~z?`(Pm`FGtu4%D4@^r45eSUjrM}F^7`%FW>;=`WKMWvinaRJT+1W$|H zfEfg0+SEAo6HQa^l~kn|VS-ICiHBh>UXj?znpPn%Qz>gJO3vDxI6N}2a|1kW{pu(2 z(p=WZ?27@W>KaSLkfm+Rh8_9aRy}PiWS(L&X`5-sv@tu}0u@kRX6{u_LlSFraad~t z?7jDP@$moM9$Y`Kh5EB_5BkBsq9MJ%rY`>%`}a)K-tLnl*+fYf>gq#s=(qwwRRDly zzw#Z;+z!SNzcQN?1`fr}{o2Rqn$`FejACZk*|5H>QOTi^i#G3j01@d^ew?t{&ckeQDO>1!XfmBYVs>MAdLNhZF>d+cjm(7MYX{r&JK!9+Tb+9}{wq zn)jaAm0ukcn4EWTkHYn#$Q6+=w-4bN8)kf?$yun}zKJ47Sw$BYRLPR5=GE^?J8Nii z#9-j8A{X1Rq@6yWAw%Tm+d89x$TT(@31*|4lQDL!Ev@dJu#p4Ytz%P}F-Twlaod%9 zA(vzuAWP<=62mSDyb>N*!5`+AXj8hCD(&DC^U-mL{kh+A z9$OJ>jIG&m?)BVFkD%SXM*-D?rikNUhq2+#LvZPzl+Pbw`qn31+}qCMFp1L!*wvgw zywdepyQ%j1Fb?pW?oSz{>}UKNK3?{c@;(P5#W5a!UeCr{g#OIJ~$=uR!~( zboo4~?jz;Vo0vez3}>aPSm=(g$Ym?6Zr?CaF~c0&^loLE(CjzUU3I7mt*EKDUZPqk z3p{)>pUcamh+2ORb$gDW0c+8f^47h#F&4jipCUPAx!h^qvXz})m|bBf!Ex%LHf_9q z>4#8Xpf3_^&(inEAf$bZ4(O10$7&E9egJy#Y8HJBA;hZQa_HlS3To{y^UQnlIJ7iA z!9bT%&;!)B=6-9Zujs`E3i)Xd*}13q+GuDxHJLuAv_}{YBO;W=UJH3VAt-3ASp$z^ zK}5T=%CCL4#GpWlNzB6!&%fEt2X82X2Ky22OK`_8+kxpMl#tQi*lNtcUZZ@Nd9{fb zTE@=g!?a}rD=PL3>-WDlj<@M$kiOf@Ava>! zR`5I*Y9NVHV?Jigx_xMZp#*i_l40e~j6G<%ATCqRy{Vz=McU`Jhkq@XH$T#Fk3?40 z^Rt}vRr#2U1SH(7S|z&w(2(9Gd^KPp7JH3|a(UM}T%P6}jWZ#wIlIBjm1wq^8KIRS z^6Gy^uTttmP(m(u@5vzG+*Qa@0b!ZR%0ym|pLQkeRGaMO7UQ~UgNaa+#7rNjHMO%g zH`*Y*SNTT@KMc){-;~^%CnNjFM+t7rze>!rh|=duRHLc!{abf9H{V2oOA1u~YNbQS zatnYhU`TlbI+8uW#-X1pR~;SL*RbD|Heh`77xEA`iy{j= zZ%;L)Og7nX5W6XaEa+#^;jtaUpN$_GH*PyD&F%*TJkZ%(Hk{qnDaoLuAN^(Ym~6gh zw3K9%S)rHx)2MDtd%Aj4eK#{rq>*D_R9Ma8^RMINKrQ$Bio4nkwP8IMu83Agg?W75 zNTY~;6wG{HK|7-;Z-I)L#oCQcxz48QDx>&o$mq6J3cZF;g}-kd=tlNeUy?gRYBBhZ zv_qfZ4+(OIvo6rAnrybuys~T_mE}5qJx0HfBd13IY^SHvR*$c1iL^0LzVoPVee0>` zA8V|&CmahJpj!vBzi^tmw#{?Zopg#I7C6WM~MzR3>fhdxlPD#f$RSsA02A6wsI~ac1VlFtX6AdY=9y9n-az&%0jb zjuU8Ep6yN!m$g-3z{q^U1ExpBoUTSqK|H^CK)`>K?}dyOM2DbYB7}c8B$lbb**5O_ zKnEigYcTL3@0gf3M6!}r>tAZW>s~D6Vuvoj4-uF!p>$7Mx-FG3*PbD#b=TNrlv`r2 z_N{^`xq|6LWTye=M5o0a1bUcockJ02cb1cvxelGYL(}Ck;F-B(G@yA!)Q4puSK9a6qo(gjH>A8OeonIr3riGUih6jZJfwexn(X?g{N{ZskU3W}qz^ z#bq&2>IHr>GmT0dbBLnuidSr5Ca`3fG2QJ!a=s3f+p3-A7|zA~4{-tLO3RCBHr#8g zB75p7!?Pv>ECJ-e=Dy6s6+ov~xpZR_<}?`ITx_I_mUxfH68>Mt`qDBU4xSQbj7EB3LP5GdeXzr4x;4K`pNS9=zy?F*flA9 zW?n(aKGCDp`UB@Xtl9fDd(~QN&?EvVj$h^B(^ZJMIeo+N$~8bY6h>!&ql{&ye{sua zd9<$Hqu&QDX*W==?^Q8|xzc9y(NMU5vTXSjAV+Llm(z4Fjg{0NHWI(MRUkXRuN=PB zsUtY+uDu0z#OIw4EnWnv7|z^&z`RAo{k?3ay?tIQa%^&$cg}wF(pv}a+=c(c-Frqg zwYBZSf)x}|5m6DaY+(xuTj?!eK`az0A<|V!=tze^C@P{Npd!5lr4xE@At)-n6Iy_X zbO;0?w1gyQ#^=F(-uFCXAI>=E`}6&9496lYR_2=XF4uis_pRm+dAs&}5;PUTGA&oS zpr}XISI(KH+`d7rZyHAUwk+KbQ?ZR!l6ue)l za{4fnmWjPz2)#b((7_RL$LdS#r5{8bka1$njEC_VU4DbRV@XlG*tpDdQ=%C*Z>tBb zAXsXjC>pqXGOG0Uu+!*|^XW~ryWM&*d*A2U0P52F_IA~|(0DCCqRi54y%GxuCI(R= zS?%7^Zl6~c|Of?PZc@5LKrXxU%)G3xZ({6F@cs z7p}yg|7-kX;wG}-7*S>xn27nR1zs~(QHg8&Q6$jS+@;@G?&R^0SN$$_E-4BP0W~ep zXGy1F$Z&7@qvD|5-#`N$X4>OWa%W~4Z}>;kA0QXK{#k6M)Ddn=`_xS#mQGgwM4294 z>}f^NOWaX=LJU62$LjXPVR#3P7ru^kvz(inZA;^pEQgWJN;~y+h6hF%tZ7Yt4p?HF zS9&XT(&+v(z~KnTYU;rWKV3esUmnFhn(SjIim`ZB0>-uOEBAeWpqPgW8l;ig9#)h< zdKwt!s`HRb!sAlu$IYML?+Lwu_;&j$%07%)$Tq#|=naq&m zIT%wzgFj8Mp`2g7;TvWw0bS*cnJXeB16pKN8qX9#t%lz8bNb!}mD*fiFVyCK>Q|`n zf*CAU?ls(|MU3TP@-+Wgd?JUGPyr4-OHTo1025AYNpoJ9tb_lub;A0}ehNKjV(VUE zgn9^IO61+lOiy5KtkG%R(={Q&n%~ak{n|?7<(Nqv6P7$bw-yDN5gR z*)-ux=ti#K4b<`Qp5sYa^J`I(cgJDFUAx@|bnhi% zX)jz7R!ufnmA$3&(j8WxA=BH?bse|cDx@XADh{d`VQpB`)`qAz&U&0qdz%UdLBQz1XWK=YFC9TaY4HK?5)rBK?t-3cpSsue6) zsZuuxO}g*%3_tq&6j?H9Fp?f?S|#XvYD#3siG_s^PIo0N=B`eV8`A*YTK1ubcgKpq zzp=njX5xawz0TaqC{P@w?mrzt5Ey%1GRAOl9^~71KrdKK5Dhxc7?7=`;V87JxAEBC zP3pk>g3LE|7RvF{c1*ztKL~8r4!yG6=1wv=40e)ZUfuJCy_!Zj6@pgwb$z*C_k1Tg zm<;IJF&y(T-KyawW1eqJh_RQ|Vf)Vx`U-*OkONDtD$QM7mdl7$yFvuW@NN zUl+ttI1zirH#N->GJUWQ&&3`mJPpVXC+vMR)obFb^aVgmD^3T8v%D9vk>{m?*C}~Q zp4p-?{+pD(b33EmiLb;$-_Z`UP#@GeEFXGm?4x$N=XsCmycU5Lyu%%v?ogUzTw_{n zd_65!XFauHFBfqqPdJMB<5ble)r$%K)L_d(26aO>C0VEZy$Yrdpvuk2 zel3Bta@%pEOAQNYk=o7A4LwvjAmWqin>lGch`sERql5EZeVPza zFTDg49pX50KgOK(XB|jR+H~HcqDm`h2QZ&>qspL*$$HHU`*{aNC5s43&&A|0ZwdD` zA6&L1jp)cH%k!~<(+#@8S}{~lyc4ljFNbX^TzbjhM*nO1gw6F8`aq!#-UXR}+}m1b z*NTlE*G(ydQsxT>6;5|hgY>_pC4CzKHRh|MzXG&90Elk%zZ12+Jyg4L6aYS?SX7Rl z?{~F)FfFAZC37+nFXnXh225!+kK+eP;jpdPY<&U zB=VeZvfcl7s@Y@;MAGTAJ|B$!rCOZzwF{P3e>6wHQ$aHUlkRss2-!Y3njUqTu3uyx z>XreL%7b>E^zSkIn=C0sq{C78$a8SqSdn#>!?l$$fw`meJZz8n`h(>&f$T=D^4jT_ z%+jCn@L+xgN9^9%_B3t5L#Sq5m#_*Ml(^u%1)sKPDSe`jR%3O)=|kBE--?4dX6vQ$ z%o9ILdT8#Z*b1fqby!0Z2w%mgL#7J1rKLMfV+Z<8KMZ%9(LRr?PH(xeW8A@6z{Dm> zX?D6N>k+%Mkm(BiE|1&yEUh)pI#J7+^2BTE4y`pockJuVm!JWZ?E{ z5PxW$&H+>Hubi>djEV$u)+!Ty&A4={Dnb;gs|hKj$iJLp!qM&H5Bah=s2mg~IMcK7 z7wxgBp`?iEm$5l)X7O2vCyiB7(wK0WB+|Fe1Z@a~^7I@`cUsc8=m~cABh(uca z0)VSc8HRU2OGLs>OJH}cbwK(*Snb%dy_tN6UNAo&FLCOY5V&`DW61cGu3{#v?MG$a z*t_-sSR;Gp8pW+`yQi-c`?J8n3lb8#mCpJwG2_=oe0vYwLbJvcQe}180l&}yvuiR$ zIhJRig0~*56>7x6d3(9qddNC{r4t{{KY2;Om6YB;!67_||aq8{LK zHK4K_nee&ZCq_!_G+)c$o@v@14wtNUyfFFVFZ1^QTU8&}OcE~WXsaV&Wq+w6|6Ohu zED!->N1u%W0#U;v)0Ax*ZnKl^El1{7#q&)d9)XP+cHy%W>jj{lg-o=Xg@W#-OxYyB z&EeofiY_0wThzDQrETj^pAd*@0J0g3t8KEbSXBm1(l(;vjOARpX?%%UEd z2}<0viPyR25{lSFv?~2M{&7}m6)hf-aE^5ODy2c6gBN=fsPwzI6z6Uxr1kZI&YKDE z4<5c%%&a{vU|V_zli_3?!Z90IZnXHfD3Q{+gbiMa@#mCEjC;k6ux2&}SY<}v6Gd&{d6puzTm>k=iH*M#GRZQL7{Hv3Q=sYhP##yKabXxYisfEL@f9cw?Yu5Y4$u>h##VUNd&G=24Z41NNjv@rwBy$o=Bm zRjtRXrYR}rv-WFLDt4=S2k#GRQNzt zh=`fHm1JkN`IR2x>X<&eFjf$VVHo9K&)Y1%5!HI!t%*mQ9$#ZnzL9@c)Yyr2-Mt!F z-wqm3m?cz*o*dK!k*@n%g)iy$3ex2rAo6B7<+*ySdUvIxpY~6V%S|?#YStHyF0(be z&qd%*%5(0yF;A^|C@$$Yb|%C#D|xU-IMX4IKB4gaUXCsccheYX*L2O|I;D4d8FbQf zpXYOkCWP{4@QG%L&#NN5qe3Nb6Z7h8jzRb0iIFUv-pw##kjJld*%cc ziOu!{R(ua*_`>9w7u|w(vA3ZuJ9%;^0_hwHYj17wyneveU4IKXR4&Clf@19kXsD`& zHDO>sd(H6=Cw9tZ8H`r9`2*#j(jm{Q#=eQEE)RL2MYkXkZE-r@JKJsNUY_1P-ugjh zXVo-rV2(T-n?T8T*~v8R;tSQ7de{2sz&1+P-pb0SEAF{U*~6(MssyZl>8qq${lj6U zhcCPHI6lLr39@sc32sj)B`uy@3s&iNG3T&B5b-ngGyA10&e-24C>iR_DHZqqVe7BqLKMIT?82BHwXPQ`;&*1_=g!E z4ZOZB=k$kmOA$X&(@X0~D4!y@)2b?m5uUHU{rU9ssBd84RXujl*g|arQMD`3sE0%F zFTYiva&f)P&lRp~9Ww^S8!xz)NL~4_yW4827io{u+3D1^64$I=p)^fz-xk>cWTGgr zeXY4%vukbUOzw5}$m~4-kHwh>B}j+YOw#uNX~peg=_#3G@G=UL4E6NLOe7i&MFOqnJ*xN@p1P4wU=|H;lMsYgw@{ z^VHaO)=ygUacI4idxEN;R_R6k;-zlG1gXXqa8xVIXLvxXB!J0OAquX#^4?mr^!M$5 zcj;uMXVIRXOL=ATJuV!n82HnbwbiGHkGG+%gGgPEFz`@WQN8uXyt4L*F_qeWc7s#d z*#62$erw$UeG++0&0dK+MLldy8x)WevX*(&rU`qNjhnwV!+NIHDh%MlBbqfuPi=TY zHfzAtDPAB3^^os{;jVeb>!gw79%T)S>9OeK_-;HM9*aT3%ul6iEDr7In12@y zv8P766jBw8kH|+h=fS&|P9pq7reX~s)N~->w{29VMF<6jiIa(Ld&yZ9qK2c8$w%m) z?Or6owFTQ4?7aCv=`UGYVZa8H=}(Xrx*&7$no>-XPcWR(5faj5-TySlLf}Py#Lqq- z->RxBzIJ9nuT40qcL)%X`+y#C+USKG{jcj&b?4qox>!QR;$bCfD(i+ek#4Z(W$Q>k zRQg1a2B->^-so4QD@<3 zKdSlOk{rGP&6e5{+iV~jf`ZoD$srSi8OFIuO+3wWc7u23OCM2d+a$?8*yn}Z!({>r zXrT8OPI@4kuKtMnT2`p99E5Ac`%E85oKZT;?=-;Hk~ipO#EF!`^O=;HRkF_eLiF7< z>t}7WlFOTpw&iSA_&`?z1i0@tRIyNA+=c0q3QVD=%TT^FY?}=Y+llxHH(thuIv^ci zNzE(es0gg;gkMeWDRr*UI!4Y5A9X2Zjm3xDM9pK886x6)ZI ziTL1meYh|4xoz3gERiQ0o8uspEdTv5%KT+ZI1A(J2Ds`(#-g#gU4r7fYZl=Wy^njx{ z^&V^WtO{%5Zca9xwCkS3Uc?#Cqvc#mIo11vgpBUmxl1XdWOW47wJ~U8=gk$gGqWL* zHs}KNF4>y`LJ7`Yxo{vUA&cHH>N?65>gUvZ()bEiuW)j7p$T3i)XS%s%X zN53j8OFiZ+<)b&3kew+5eYIsCxeh0hR+lfPlRylWp;i;W;9Fe*Xs~gyWROLjxXtukm3SkOZJjRjeF4u{ z$y|~1z2bz69rO0+=@9w2o8tOKbI&LkasBh^Dr*-USKnjV4iBFHOU6jLT0-M0&*xUU zi3W7NT)0D1zXGP|BdN)KcN{nV2@|heGJnqU``{iTwR)qB>xIDD9gm0$M@|%%+1p

5;#aAusOW5F-{-&-tM61woVnj0bi}B+q=kSB;vnL` z*BCRiJ%S_IwO13hUz~ecb*7LlWzgcsoT6@yu6dwk6(7ET9wmU@DbyarPAy|L-~c_& ziSoW}USjWsPi@$;loECdwTXC_wg6G-6(hvIG=*2e(PxT&*D5Z9VDjHI?s5|r%1^_C z8P1=Evh-w@`fd{=@a*IqRgLbEqEE4qq~u@KMs=>|Wgk4NUK4R@3Br+&drf07fQdFR zkxjVwp}b~pct~IAkn7dfnwTG8Sl%H5P!YoYl4iy_Z#PT&kuD>9a|%GFH9-Wmf}o2- zDV*}s9hW4V<*3AF0-ker^kuVdV~b1gu(S*c3BN8jYj?k#okA5}QzGP~tDUET;uiFa zZh8CLOK2B5D5?$wvA&vGigN62_&Lm1fB%fKz|LptAc?BI4$eFOwO@q&?a-V=n3%i$ zn~HF@I(({Q@GHh^ydCz*e z#3Rvp@*}7;3w)?t9Es4^b=jSd>b=3wK|$1P7Q&&yC%-~lo;CE%%VvTuld|e|>ppGX z(iO4wHnWiF(WbV#fZ3%c&-9fdb`wX_U(J|NM1^pct1aw z{eHf{N{RH*l$S@}j(EQng{DN^V-5J@*)xyCQDW?u_r~2)RfN$i^76|qIuOlV7kb51 z)DHVo&Izmxn3nI#r&}xZhc&|s5gH+`s$({7aJ55zYmG%&S{K!FL?Z8vSKPlupNkfM z!JwD4;?mt!sxUxo{h7p<^X^w+a{DX!(y^YDLCs)L zri7C3&x081GD6;NzTDwSFCVXNM?OodZl%@IopC{;{WXH-nqyj3ZswflmyH<>RAy`A zz}x*o3$G`p+jM&cmO6d73rvje%OU(}Oa5bt8>d#fZIB!IV^xU44StghfBxwK<-8DM zYgiIWoH@TSCILG`IF%#73#Z0Cu1UUDUE#25*$=ymOIESKcsqK#Gib4~zl zoW{PKm@1lNM5wuK98Tax-}uDZHfJV9?9?gh9UOJ#()Kt7hB+6nn^zu8P|OSTFiFl* z5o1^!aPhXs)5U#Ngd&P_^WBP!+O5YgG6epp0V=@Wa05XS^T!T0U41x1@cQl7S%$S*@s;GQK+VyN1gYK@dZn*d7M(P zFwHX&G-9D|@RboYRsDU(`Rs8;D%^a9SqsktcL|@pD5EbA!^Ve(hOVK(iKUWS55*!2FNgC3OlVT+B=ySZ&>lJB$SesFP> zh%deVC}vbVl_2pR<$6GMH>E7s+9qh0Gf_70FI)bv;m_8?OWS9`s1*H6cl^L`QV{^z zA}!MOfrxG!arydde>4lF<;fo$9c#8Q#iHSf*=3>jZ-Ws8@@Od*kQY%;uf&t6;EzzW*(redgMyF*}ge(3i*!`dFpgk0aFGN%Nd* zkqibMal7lGW_(yTdcU+F=KX?~Jo>CRYLL6zK0I7He9yFFHcVD1sI(qvKgY%ofK1Qi zEYNw~m3n8fji+aRZ@|Al z)|uGS5Oa!JB2Uh?%E>!e!gv}ochH`M?3Hk7HCeHRU02VFTJ+=*oK|GjO=>c#9QxIp4zp$z@2(CWo3oQ@1hTZU{aZ{L@4 z;Set`Iy(CJjP^%S!+rSIX+VVHZ z(T?TJq&bs&&RW35q%EUshmR~~j|2+iTa29-Cf7fkkei5)8&B~s?{LSp<2Sv(} zv9cFz+ia-2z?7l32lskuGIttY-17C-PV;;P&WPp!+vGL}Sen1}Pv0|*qFTm0*-2X= z)=ugdceQwoy>xMgLn~w%X6fxBrZqmoCNljA7vQ%I!58>^OK8im`fD=JK%+gpbL-01 zWBf93EKMe{_Ow~UyI4H z{IGT9pC3qs`9RqC5|wk`a=Y$6HL$r?Zy@{ao#8_g`PlSJmphI%7f2Jmo9$n}p-Jd& z;`hzhqnHF1%5zSiyd6qPIlg`mnldeCJitRV-G*=dNqSxK}%w;k@#Yg*ry z;hgzb#07%}{wkxN*AVliQf*!>25V6@i6u}xXJTUB|TafF}E z<`2Hg%#-gZ+~e}ONM5BbysZo zSd9qkL`a0^%)14oW9|>GBV;;~@9423(b@P^L(ZA*6LCGI8YhN_a}q}qs!f8W1$sKW zr?}l98fUK3OBp!{HfXcDrN+e8mQCsTkRC&>-Yn~ocZkKx=WV`zCZ&eqQS)7g<_yH= z6309@yH(_zr1VS`2K9Y6mS+;zCjU(F$BtmiFporydcLns*=&^7t&;9G$EKbNoB3qI zfh(pEXflrGKc0bbwRTKv%cj=W11CUT7u(UyYiyCkI^@IqG9Lu&D9M5vynGpDer<$DRB(F2&|oHQHR|b4 z0Zn{HXLzq1OS8(e!G|Q{AdTi{-%uwyj2JTp^cN?$aBNQ5Qa)On!*_A#9N0 zf6P%fV?Fq#`wqhEvbE_GZoZZu75mO7Q=185*rIk0KYkbimX_%|u}>XQla3sYfbj99 zND0*|!XUt`!mXkvxY0j0afv9<&Q#PilQB z3p#u?4BPIBsVGgCo2w-v7x)&Sqt zm6QNmm(l6W<=G|TA5~48<2;D0chHKFQ%oZk%pI-dCe_A|zV`CoFoYs~X~4PR;D(2- zUZVrt_W;!<(|9){L>@l?og1F%RzNvxOva*v=_)KZ$NsS3-~r?t`xvaeY+pYDtgp8^b4MwEB2Peqw&lIBe`HHSKSYEXmy90KbmEWW-5 zG`ez?x;qUdsm6t5W@n#YR^1bfhDMMGKyb2kxMrX#BSn*%Q&SDs#2NXgU5yQ-Y| zN?1rR_>1ImA8HOUG%XfHCRwqbq@e z4U)^$sbfbVH%}s0=LC`B#puBvQL`|xl)I)A4C~l_2QdCY%pKbi^3Y&gJJ!Dz%iW;n zPp@!{v?hL@r;v8?;Yx0QHP`v8ua!nc^z@A(WBdA6b2c(e zHsUn&Hl6N29f`ic741iBE1jh_NvPC+n==&oli)rrvtnhULK^apXxV=_?Fzk;)6re; zo2I=TE4eLa^zWWMwby|?^`8Fr*QIS3Q}B`R1a@;lWyUy9@O%>q1#vBrle&nqV;|!o zy8=X^|%7{KESsA{!_VLZCcae#&6C(*7wd|gWlyS zz{cL0PkTf)?(4yI)8ZO+T?sXe_medvu!(cm=A{-^W1KZYos2hJJ+hck<6w1$Bjo2O{NWQ#e z{_a`0cn^dC{FRs0wmBSpy^J*%#C5#=*UCI{6YQeH((F6%;l0-Vpj&v6?XdaUNNl+t zN{x^)T2|VnK5v`Gn?zFd$&vytbVv&c?PT|lJ7@8o5`KbrrT1BhyQH@c4w5|Psry>( z>uSe$Wu!z(GT-9DJ#00^d)#I!`PVA7g&N5?d0gzCSiB4du!6#&rLqg4>f8ckpbn-r zG)Zk1JM{W^)g9Be!AIEB4@{2ULg-?b(HA*8;^-tFc&TC-J~ubVancT#-ABo|GxM3^ zJ0gIXp-xnm=v?w{n@nX}UV1ims}(eHSe|Nz!?HYzn?u{b#~L3sDnfs*t1J3f0ps6$ zXpuRG)TFxN;;-TA|64!pqjNfW2kkvKn68{2?CKH}i`aY$q-)9PBu*eA050 zuKXdTnT^CO-6iLakz$PN?br&-JR8tX;lTH}IZnSqWCpY*Bdunr8KUGQ*qartq{sHM*YbY7n3!?;Aor_* zsYz8EGYzPVd3}Q0ituaG5XI1dg_L zmf5=n^00TsEc1|?a&qQv1vee8Ekf`73NDE|R+ur)_m@_~L!IoyoRz04nb>9nJ&R%i zO|v>f^OCr;EKP(#<_Kk)u22xi%|lwldRY7N=;hj`?IT%9JCuy^)rEW?34WW|y$cFy zimD&UwD7(^=FD)R=PIpx^4w7q!9j!E3t;SPN{~D{K6URp3V(i7oLo@QLqeVl|H?7fuSrT1cUO|ib@XZx+Hmu#qZUbm;oURh~vaiWPt z1y6(FM3h|_|6tk6J*Lz()1Bq>IRxUJ4VCNrttP5=M!c7ktYU`9?al6J%ZkS(j`vLJ zLE)66mvkS~bT~_TU7mX%xqDL6p``u_b!aq`%^gbH$TKwwA6R2KmyUKSIYOCdv>FSK4Fhbl>@{l=e10Ax$DM)S4o6 zs~B)28#V%IU7PD)<;e@}bMyHnW0hEX;Y%WCt^Q13~X9!>NL2&~^*$(4mJUfWGp zZr_8jC*G-IrNzJXgC(^SH|JTh1^#E08UFtf%KSSV`knoSNybyQWn~I`tF{bvr+V zce3wi)4g&3aiM#zuPh&*5LcE|fD>1&$F3D)B&5WBpc8I07Hz99s|T#gyzHsed(A~R z2No|D+_D`Ra2nQ4R6>esoDY7M@%U5HMS&+fkdfhCzk7mxXU?!eTB7y;{2jae)0bI) z^$SHs{2!vIZ0wIKFhY_*nsdAAlhIZh6;OR1P_`ZCd5X4aKdvyx?40RTIvEXG06C%Q zPKr7E?QNrQD>;e)N_QW1&l#9O`1`J3$^o?dl21l|8>&SEri-X5xwyTK2tJ@~4l8-T z6n{sHq&F_lm_stt7-$QiYy#NSL$BEyq(-)60Z(R8H;x@6N$NR>0{YNhIqxOw6 zH5T#5;0jxjP^~w!?@+r*;gPK?|Ln?YK~`56Y5Uj28St7UgoboDwCE6i|h;5Fc za#lGlC8c-y^5w478^)E#qeNUD+}l;CbJ|one;DcM>FIv(I$`VH0JR3Xlac+>5$Atn zT7UoSfBm1o<9-=dGU$ca_ch~sO~|KNjP~gyPf2fMFeS;^TmR_RrhAja#PlMf+O~&0 z2gLKd2n&O`x)y(&4AWx;>qyvfv&B8&-w^`n!Y41e^LMrahV+4XZl&*cZ|{-wm z*g-N?`fvG3GiW4`L)$im-O{_#~W zx`9nrL+AZYn-fwd(Y#yOx0We@2%291m#ppgxBg#W1~}N`@4%B7J%RV1V$Lcv>pAdj z>$H0&09%%a*FN_9%0J$zM)Z3oXpvr{pY~nZPfnAaJt-#6v&c$W-rVW+BmPRPOBc05l992@(_`Ln+*?>4TVBWBWtxZsa7&|@A4IVS zt<`01xgiT4NGKJQNd6V9)WX5bptk50-P^kI&kw}Sd>|0nemrajn@PQTNixGM=agAv zPQC|XKlzJnsx;yKnzJo^>d_c(4QK8C*!SDwNZSr%-QF4)sK@DHmHwL0Eh5WF6c%Fyma$GZ=S6-YEsk9O_-u_Q9l*dt3ajlT_84mK%Cg2M7}&xf?qO}A^y(hGfwby`_QSJZaq*h(8R z&MS7YdoyaN<&bzvRq4D^W`>JXZ#6qwGtQ=Res-|QF+|aAf?xf9wNAoGy^b6hqv#R6 zqn&4Fedc&b&D0&0XS?y&Eql_6zQ%FRPR}gAk`qXObJ`Ej=+!8}6+GN9Z@09h$F1I+&0({} z72mwA=xKZ%j#U!F=QK9ETfcD2iE(RpQlQVAO_1S5i}BB2#BOf5wtKrA)vh%qjo?i# z^N89kQe8@X@DHhLsTQr;q4nekdCzSOXe8fw=_^jxc2m^8|NQM(zmG9p?Iw@^#I|&X z{%dS0JV}hDvP+cBvQ|0Ez@od)gEH<)_%Y$m*JW-R*f>~ zr>3jOlQ+wn-#-ZH%(rpkzND)hTy{!#^Xmdv8t73nxDyW@))OUG^MxFk*y{h4Kw|#|nfaIUTO$!|rVq zAKy%d&e|H<4SO*05UqQ)7Si3GtCh$~QeDODR*Jv;3;&{w zVy%`<=JNJdJ3VbO6jP4gFk1QIS;YX5Kn}t&iFI?ixgdSbJuh9z!ODi9X;1vSRFX6- zSmg^MkYByW{tcQ}n}S4fjdWS$KNsfyJG3sq#YDsQqvdD~PH4##_;tmuDy=IOJXq7* z%ggjP=du^}b%a$X=TJ(}iP`o_B6e8cwac{4&u&>DVMdApJOeL_m)H#Ac2`-gr$}h5 zZZ@0gS#@<8Qx<*EKM>t_+ID!^qR-OHG>y#4LPB4{(DmO2uP&!1@9jr{wUoOpn{FQ7 z@IAQ7Y{kd=Rq4EZW-X^M`{{IX5pKHi;arYn6MGJ^CI5F$P&hnlP ztkaW^(>1#x6Y(vQFiwwp=k*mEC^VT=(`BMl^+D;vtWkU74Kz4w9A^8B3P}0>G)>$AFRjZ*TVf1F)xxu4d84wyykR_H}yEarYH4Ok^qe ztNR3)1XXFk#~Z0B75FpQqhZ&Wda5heeN9li>EXFcWS>MS`jp5?ln-e()s~oLU*)p} zrIQ!tX0kOB8Am>3r3Uw#s;9c^6wq>QXJ=>Vt>(g&js(9uE~X^((%}#8yuSdQ`f?_q zqo+2cQsQeD|KkT2v34^yK~)lz6WSIlPjnx);yk$2$CogdVLyB&OZTiAKnOB=6$u$W zKeK`C-c(I@?2MW2#f$gN5-V@Q6fc`_uGpB<`tI#iZS9!EYFITI(r`<_8ak zA$1$T%uY~<|J$#7W`LNEbNG1LHYN9)hAf~}Qagv;2fTcrhE$7cx776f?KufMW%1#G z5{E)>M|C5#W}0KqqM;!uRY`UOE)72f3S6koLPkxN)$ZEXw^tt18efMXY(E=R#bPpT zP@PT+!Z*_%d0RbBf}HH?Jm~3>VQPX>bS_!e`n5{Gl70YtIj=`^Q`{G%6vdM&A{`V{ zKmp2%X$|wbCV2MUd~KX62DdLTJ@l6t`yYx~gbirq-f4A)M~yf>vEClm*DLK(YSU|r z)J=7Lh*TnRV#qY-;la#EMfh^pGcT{tPAQgfR`SO3M3D$ie$b*G{)IyySg zsR%R}Tf54$04$l0WFJkau$Qcr=T1K z30W58H4@#MqpM~)A6ln{qZdFz-2B4!gf<)G|MXu# zc$C^dG`aKm6fpeBoLv3VpO7v+!lt@9rVAD*lnj?Gv_$~@?1rs)_E=O@uFgFBSE9@E zxsn2TMMW)YD5^l1NXP(%!36u(#*t(=kr2p!UX)R?qnlJ?2IoHh#?&I1yAW?u9-Z{` z?bOjzv&f{-9&7)ECScX=69o?fNK{CHdvEug_FW~$|(a3dCBIA{LJ!v!;fBn;ptqcDoK zJ|&zw*1mwFfLNPaaqk~=8s3#7FO`w>aA$0$Yv*`GgL*&ySq*A|pypw$)A#emayN7R z!;8-GXbhcb?o7zcRvJ&<5PpOy&vzy7piZ#VD=y2o0kfFV1?#KCP&Fg!R34~>MM z1QQ=ph98J-Wwy#AOdzCeb=#xLPk;h#`Ty*s z(N$4dX){s}B-Voua{o3|o;ggUWZ2-?HeG};U?);`^Kx2E#$smsa-YZQijAU&vgLVp zWhkTePapVAa`b%3^gRQ&BL4>UfBU8XU;+2Z>~a=XCLIsc+~sTl&Z|bSIR928^j#6e zMi{u5{@*3(Ef4pf?sWPAz`Uk-KH-_wS?a-}U*l<+LHTJafF z_P;P3nro*cM+*U~6^a+_D@Ox8-B)I)?^O=Q-ZiD_aX6V8>L+a%8yvJ}BPUE6%eAA4Y#)Rf4uqM zf7&w!9NPAQ4zVc*oGq8$Tq>{ObR^ma(e`C&T@AdbpB(0tty?IIGL4Rk&+~2i)xzsw zf!#CcRS-7tgk`T20eAfEp|!0Q0m2fd(ThCL_xF{5I!OTv!f)bODAu<09?iQRvkRhb z(6blsE9DI)13WL&sNmX3NPg<2lRT&-2E9^izmz~n?l~=Xv7;#n?z3cFk|2QcS~BS< z&Z&%BpH2_=mp+HYVf3+#6Pb5=v1!n|v#BhXbcGeZ43xHACx~49<6g7siie5I+83Oc zY_F$bkinqSbg^YFqnUnD|Bo9q16cR>+?DYtC6upGbzS0`X?=T^^Ghasu_IA?w0mvdZZ>f;cPahtr5s`G;G9w=Z@cv znHJMlNuOiOhW_&gNE$lO;V0n|ep6FFeQ@HH4fTT#;NO^&ZhignAy&FNtgYTy7uA<; zJ2l!ZY`1#^j}t|6c%Lg#TFPefZ|9aVx{!}=Xf52=Yr4NayFncqLK|e|AB=y|JLBRc ztS@uJ#bhA|fR$8V%bnzfmga(QJ{u`_dzbD6@#vOwlRdK6R78gSY^kx7M{RmY2)u~< zO4wVrKGjXJX_EraXh=C#c~Orq|%R$u%6`XRPStmg60k(1{aMFJ+~CGLZYRXM;N7aq!`43(<`B>Tv8Pxuk7Q z)(vz?NKZKd22A}MiiQBMKT?*Sy}e1aK1Q`btRrZLRIMMJ9ZC7f{elzvo*R*84t*$a z*{Ou@qBInTTIkkgVeGe%zczz0)WaE;m6UjNfb#z>q*dOxE4UGLLoT`$6c01R4YV$M zc;uRuwSumE30|_3HYP{@g0ApFFF3S+(oOSQ4_0ZsHeuQ}oYg0o-FsKMwBr&C_diJc z@_4A*?{Aa}Ns`==rHvvKA-j@9mMmjmTI_q)tV1f5P)YWEA6wbCp^{`@hipT(v5m3J zkYQ$?Yrfx~aX+v7e)aNuo_~5x*O;*T8llO_pt`UqS`TFe?q@0UkS zFF?+n`%(2UOSLA~(+uv;-0qhe?LYdoH8h0Q{w0JUGxzByz5AFGZH;ulKz8R6#$v4H zzf_>XraRdd1Qd{tl*ij`m2qu%!1B2>5Y9(FqNY^?k-9|^kQw#!_>87ibcopOsG-O& z{9LE0_1F;i$%1fUwPJxGVZSnre3A;@&j_`YkJyh>zgO?k-h*HICFB|1{b|K+vrp3# z;wOqpIfq#3Wf#pW0!Yx&CalvDvX7aDd#OjT0gw>+Ze<aXh+sjr0N)X#;oyHyWO57?&pL1 zWyBPq<2Oq=zY77Gr|2uD;lW2kSj$uHZCQtx(t!5RTMI4Ld@!eH58_1fLq^3#seQaq zQJ`@r7jmDJ#wyhbqayq~FO6M{nVuiYNkdi_6c{ma6ot~g^lZd`Jsp(Gp5@g&S{7GF z+*-H=C$#zqTsCi_Z!dkd<>Yo`etco=jiSR2b2bzG z*){fJiQMum>E)ejcWqn(aL#||MKQU%+Aroi0DN-+u{lb4+_E4!Ia)TB ze_}@>?fS9*^2rYj65EG;PE3kG*)Dx2RG0ay!@KIAtxj+GDuyS?6=gzC=_P-FphI(? zwRSd{_N911d!NJK`j86q+#MrTJ%`(XAUX&naw4c9gZN^9>W6xiu=;Q(sm{uli48HR zu11sy;F;r5Ow@Kh+n+(rxFhbTQ03BhRfygd@ev+H48XC8xHv zRepG_HWebqdhxFeQ9ZFueP_FAlZTH4-q%@tW9%r)kdgG8Pblk$@vsC1PMX$ zHJ6$%t8#A*4FT7hRA62}xRko0>Fy@8v<`+^RWv`|P3w$90i$n1{)HJR=X%E|2S2J+ zX19qV`sfx62HjT|ZKG^`j}{);qec z_*V<8`BdvmXSLSLcmz^LYm^|e14Xf>jUy}hbj~5P2ki!P0>bY&L93;Tu>C(5Up~_7 zlGj>LW^xZF;c6XyCQ)VN60D+Fwu4!6!@Y7vo8w;m%qP;6if|s~n&^zJyn36hFFJVF zABgG}pE;hAkUV>JsSdG*Y#U%;_*84^jE+`m^ccS?>$fs{a5-vPqTZ{--x+FAIwUXO zU3gm_`g@v+fR}CZXowf>D>b4E^G%@g7r9xtYsgN$AQvgzw>tVaTKpS*{%4H`wck}# z0n@>+u7vY%>g4AornRsNSR5RWlyj!RhR-iHb$*)#@i>@)j4m-?Gg7>Db?x;G&iTOA zU*0T6Odn^=`i=6EFVvhK`(oc%FhL>4gvpI1!g~tB` zPY>;mj2ce|o9XX9X(+z4@<*xlxQLw9=BqQDSp7=K|LAq&Mp`S{yzW!xjVIftPRUvg z8DK^xm?SIj*?gp6)c?|4rh8|J4|49vy0FucvUP`55Zc}ReuCW{(rzL9c-SaXP$*Ft zLaC&|K&~>W zqV=B8RT*{A2p&0ZBCE3-2XtX3-AYqe+XD-I$1nqhzUHObEWe>-3I za-xgLy{w+|Jr#W9&bp_j_l{7!RwTLXPGaxazqY{3)r@@3@y`b5DjvW*e=nc=vrPV> z0SGjhduJ>O{0_^T3_?p zBYNzb&f9SXl>2jaZ<0yV3EFXujydp$M{KV8_-0WrVw;K^6|{dTyA1Cm8m zZpC(EkJ|B*Db)i)Mc=1iRjtj|J8d-=%U^ZvI1skjtt%l`h%ee|ZaFCXNMk9#vlpca ze5cV2LCPa2Xp^mpzt?&HahqxtK1Wdz;s|eje1_~0{8~4;v5wOw9>8x+F8W!hZ$&8l zHOjtFZiPrBSfO6>J%^L;yBL(+nG9;jYgBe|WP>t*9E-Gb3IC(`q8z@*K_CQ){7i8%pq1@nj+SiPAS(Nz&@8Tsqimw+p_B_g zfuEHTGR%KotW;5!^)S|}V=}sZ>*~^mY;~rsn4_GZs+$*A&(e<)*aMueTO@4VLwU0F zEAkPj-T5+65LdDGv8_7v`TpF1SQpvR+@Mj-iOf6IZi~kn`OeL!a~KKaD+H$B*>Y=6 zO2ipUY z8xHldWZtMGUk*LiPZtT=$gBYbaj)et`m<+l*FDw}n&Fuo-O@LlcOzuj^6cdkGtXK- zT)QV26rt>>1?_0LXYJvE*^xZx2wxAv=90K1k|kL?i) z%fIWxhqKCD51^6EBw>K#%!#u{rh_b625M0TKgT@yq@>^%yCEeb_8aXTec|KuN&BvQwCcamf^Gck*w`D2O0Ib96B6c@k#m z_GbP%ZzbW|dq`a6qoM=G5M+qy=;x%gvkGGweoxIxo=lYG^Uux1lo3*vxGHWo=G(2d za?JsJntnr|&-B=S4qbTtALs6-Nt8BGzcIJF=I4SXdgfwocW1ZnZ)>zI@hWX_&bB}qy zgcAm9u)oZ12iKYBH2Q}B6uxL;TvKan#jUAfTJT8k5Y)ZsgdPH~x?U!ztdVPUGv>)9 ziKbtd(FX9>b?iSo>JkR2OisvBQC)MHqd-9?`oDzqRsJj}>pf zxG*Y5P&Y16{mtTp$+2R6H9UZft5@N^gtQC6V9K5lKwVB}-;9OV#6(1qKDsYdbYKv& zl00i5Jg6hQU9HC=zKCcd80GmEtae1x59tc!ay!{NgZih?3iaUaHT&uGf%6P)#>Oe= zeum>S!beYqhIhYod*tKXeS$3QXqntAlF4lr8pSIIsqqfqGVOPc_UBe|;%M_g38k2Q z*ZcaHPs_5@nukH+x16qd;F6X>J!9P|6U}PTnIyU%XXwlUS|h|nFGH6!e;!mkrJ&kG zYn_`K&!XqC{@t07P`U+8V56t+`jCzPIRG`NFmA{y(8Bnz;+!xa$PJKs7x3qR=pglrGjNvMZ(CJgv3JPT@3Qp6?V z7D2lU@3u4lvUK>M9xc>u^uIlj>!Dn={eWjSvlpfF>i@rEUEin(LA|KH8GDAUM)BcFThkK%?1I7}jmXIBk2@ z-Jsd_ux@3y)6aa*&nlSlfHqhcjaY#JYYJj5w41tm3`*mDFhiPp!GAm^6y^N`1c8M; z9WzL!Ilo52zTJs-Z+t<=fA85OlzYe+^z2JwVyS%L1AK;2$l%iT6|;35mcMIb+pk=l zODgx7Vqw|7@rw=fR{2I}`Gy{D6K^`#q`}=jUez#5r~+eThjLN*u%$cerw~s}L&R`} z9We8(9Kus_ZHx|kLzeeudgd?|KN8l@K!v-S;fTIEEa`MAB0S2Q?le%W#3CpMZp$Z0 zN>4bw=s>yK)o!wwLP~8*>&Jjt^Pcw_xGKS+I1FgW`!c^9|PdU8i zd{>`({k-p`wTJpsCq3}9`m79eJ-IOqK;pEJ8Ugr}?8uyG3ANAKlg}Rgp){#_Jjk|m z*Y3M0T_Hnj(MusxnP1UthfYa6S2HQhHD14%c8^*qDHJiVuShp!K9jRzI>$~PSU9pj zmoRsU+le6_TM?m)b!~U(W_~Pgt8+t>Wb84TN_oX_$z%C!97L&;1w5o}e&gS+kZlIK zmm&y4yE*3t&JzzR+@_WP$B_a3ou;-L96PcF$d3eI7bYJ6TVWf;4fr?Nj64mpTs|yu zOV}}U#~%TV0s)i#KVy{o+fUDi7-Bq>2gy9PehpXn0jHe%Sp=$mAxg&2nhXK;2t7;m&)sn%yW&-gRmehC{pl%U4FbV`_1Zf0IM za-X(?&{v}2Eildn#_pt*SwS-#udMeU(b(6EX{is-BT1|+p5`}5#XVE39=7*S#4P-~8f=jZ% zDVdm6DAqdPOLR=kD-d%{GsXEK(#ej9?}Hm-=`&8cBaQpFv{>Gga7l&hn^FQF2`?b2 zL}39n^tHkZS_T-LQ?AkZ&}%}?%qd0Mm-@i;U;e202HgkTgXy-#>FBNAceJ>6j3P!Dm1Fqqka}ez6`Zk=QhrVqt;QY@W64*EQbDoa52v)31)dPlkxe%{}_v=A9RZmxKuE-9ArG%Llhi z232m(W&si}idNs_N5+2+$cM0wwl!iS2hU35)toi5A54^v zt%?L~^yrBZ-zUw<#;=+Iq(t;9nTL}5=U+`cn~s*>L}oZ(UXFnwRxfM5ChW+#pxV=ca@pzs%X{(9T1GN_t|3h?fC9* z$lVR7cK?U{|N4rQ8|{3n5J!ee>GgZTacJr^kO1B`B{BRv7aPA%zPy8C%#ocHiu(S? zg~(d}jUl)~EbElFCL8bu=cK=F?=T2;Ct!L^m2X2EW#w<;{hw<=(DVOOM|APZ_2`L7 zB7|G!X&@iN6a)RIY&I^grRH7>r=52Ig9gbxiTfm;UEEWH9~Dh3v&;Tj;l@P$uPggv z=U!PeFW;snxr!}%GpB6)K<^F%4pb)l0JG@(uDkd8)2F~Knw6E6SK;CPRiDnj|4Sth z>3=B(cL1Q zzftC&bQ3v04(6dfxRIuj6rqz+pZse7L8&H=^;SFDgTEJ^IfW$*j7Undd1;}9zUAG$ z8+e0;FzWrjoJ;8AJ=A?L%}p5RY@#G65cLBwcr2w85^&%@v7_Hm&n4+P*cr^^Z?+!A zLn<~w0LtX4+4~vo8_dT&x*>~|X?bTVU$9a)HX-|I{YN04O7GJGZy&rq2T_pH&fHMA zKqn&_f-<9fI`!Q|#c2ukTSPKeRMdI~jIxj=v&H9Za&HoL<R#Cky1@>kof~2(&4(TqM|5MUm?MScf|Lp~-3(o_ zGcoiMgDoZv^u|Y^oV&&^3ShOf@$;55Kkh_JZ0J3lHK08>HSmghwCE%=HQxSKd+4t4 zASwXPh4KjLP;wQ2I;&JPlBdU20*Q7DEl%LyHJ_oS_BU18m|8tTZIuKK_9eY-pJ8`* z_rUz`$4nIYr@+MwxLoKkK&j9t(g z&ulsKYD1>0d0BcM#-<_P*PU(uVK}-4f>I%~AbyL!+J) z_vmbZL6Kufq;8$d^+u&<(&)_eGglYCKkp#KzSo6s5RBS~DwREjulzapUkr+%MEeB0~SulT`M4yPAHCG_0qa_G*UPzN&tA-(XdC;-pl;h{tUW zsdC-Zx}n9W1ohF%>1Vp&!3gOOURU!oq&Xn28*n39AI;8hvTxa(o^SxsOmkL;LiK=) zK(+`+B&zRevlYxv`~b6M6{q~01bcEzF5;)V=*l95Gnr+LDC ziu_$p8veYEoNShgaW^{8@XUUFOzI*Sx5(W-mclGwI6!1+eie3D=NCqBm8K<*Z_LIbi{1!Buf&z_k<6HO9*WP7Z77f6keW{^hy8^t_kDfaKv*% zTD&8}C884-wwk0{%Xo*an)LXMZcDezh(9)~y%%F8-`~u6wbP+c_=6+j^UFK$?T}hU z?)Ulnsogn#b~-qFaweIqkN9k3)Cdkw@SAPWl}}nr_dCmr@<4bD1Gs z+;w%;#Gx~0zp`lXwmK^CEoIRJp5OJP9_@lWlYBkWC%SH?q~aUG9vB^Jc5=ARG-P&ri zaW#=1Q++4fuiW1*=B-TbMlQzWn}|IRhkP_#BMY6s`bi6oc-Kw65)90zWB<51wIAQe z7->O?y~Z`3OT>D)H20^o@Bnf?`eItDDf@-G-;D~yQd|x0Git3(`S9tuv5LI4AVrX0 z$WLfyWoW;(55&>NuTbjcm)(E}3m`HDqW8wuNR}UA)c6O?sYc5d16O}Kx0|I`w3C9R z2L?>Tgq}}}(#H;XTPA2E-swK2^GS~X(l^4)cry_jPJo<4`?xct`^}5P_;U&AK80?GDNE33w3J zGjK`4+<*43E%=iU>zJ9stIh)!0z?TvG)MM4DZ;3*m{+p`i}ufwKQlb z5YXUOapBtK`WhO4@G4+#!B%(HJA$&TagM29E*NZSC>wca;O(d{h%2*N$n2<)@p@pz z5jGYF_m=XT(xNt}>w;F>L^?!y0%czlo}{}b!N)HyqDFAOZ9F&X@%jrj^}JEu>opXH zu?r)Pv9&@z6et{SSy*qeDdcMsbcKO26P3mD-x{wGgh^Twg&+A@!)ke{eWG>R-*w~H zQ`e(~8#k|g4mkG_xqr6d3602zas5gt7V*kFL3O+Q3~8Em))cdfRVy@56K4k*_%?8}&z&DHI?n!APSn0?X|uv3*&SIq{jCpKS-H|I zH9g$oM2C!{fAwN}mm2WIl@5M3mPbl~a(UJhMj;Xbpwi;`88?cqA(C~Cd}pHR-VP<# ziDgi0Ga&Cc@bKY@*^XOLK*t7O&2}GyqHMoEADy>^!I@O^g(JDW%zY#H7D5Vq5$R6e z*@Ef!D$&3LC1%jYj1e(|MpX-Mko9G;)qQQXad5^MFU~9Kw2Du}Qa6&xx_={1?Sp9~ z6q(C@J}r^T=a#P<)9P~8^l)S;;r6=^SWH$LbE@sF(aphA?W48&{}l=RHOX*k>jGp< znUO?4f1a0f^Y6((6?@N|&geBrrom}-bnh3s1#`0?q((xN=q6B-)=9;@ubqqf*4Pqc zpYEU1H_@feMmw~4qoq81a>LEfh>Y0Y!GPn7)wS7;d@kPvN`g-nOKAt#Wq^PC>zx=(Ojji+fKrdC_8Vn+Gi!PHv`u>t+{JPCRcch(BT})kNcG%W8(RlkVuU1_1 zlQ4R^!!9>Arqx~qgwRz0Q&a&=%XFD3cC#v#*l3lh^?tB;Bh0u=eWfwIFUw8^Ey{g# zB~N>u z24vhOou)-e{sCV0O$|Zr`JmcjsLKKmc7LawL>vK@a`^zqL(rxPjtS|55GlcQI^s@GHFL_vwdfVmKbbC1`q$ zpn$~L0F$Cr9eul>VT_2903~ai%KfHdD=%geO}`=r3+w0=rH6PX^+#jis>=Hc(YK>IIF)exGlf25=!k;v$VwBez&1mNo=Mrlw0vy zqVU%BK%&S|BE>;Hx@-WLzDd`8cjb?g&`RK0Bo3Qx_P}ZVW__+UC=(GWwM^qui>;tL@glX{J(&Pg@9?u8XiOvkp6r!p zG1`6W)k1Qh#J7BLZvbN3x@3&Wbo2IsIk#Mx!By=I@!T{}s<5qX`&r`7@slSqg3$B$ zX`*R-68upFZ_#M#@Vvfmu!xdLdlgZFb=Hl*{D}hW&oe-78!A(7`1)_+^Pg)$V|>8u zCT_IWOmQ9kRQRQAzj1;E$`Z@aJ9g5Syc`(5JN`P~mK zup-&g*}6-ne?H94AA=pjFeJ$N8#OmK_iXB#Ui13CC4BGJepdq?^V^jrPiALlPnppb z?H1`QlQR-^o)7Zb{&tB!1R8$aBoBRenR&0xgl@@@h+tlp4f^~Jk^lxkY`{w%Hc~iJ z1K2E`b1dV8os~alECZuyI})X{GcwMd597${Yp8+CNcWz8vqp28j*~U+%>_UL=!twv z2>mLi|x*V%FZO!fVW14v=VrQ z0do1OL8rUM?fT$oV`VVxCrbKAuKAV}KQrsn^kA$|9x%E~_-{dX`G=gZj^1CtN7~Aq zc{98Z+ko26zYh|&)A1a6)|0CBci^kN>AI!&a%Pwoi8bG?J?h{>c-wZW&q8Orm|urc!I1aQ!}Tf8r$cS;Ry^nDI{Cq>I@jKFK9alNz9A{S z#rXuAxrh-{MPcep)WcqE33U6TZG>D*nI32Z75C4Kdvs;UZSj3Mr%b9!#Cg~)lTzNo zk;3jf;(3mK2}`zG=vR_Sf8o-P{1xK$x;@jkKJ6@HJt!t5U>-a?N}*98hzNGd@7v|9 zKYikfptCrUBJaVgG&+`?7RGzc{K6%J0e!(#i1ZQLreEit(bC$@M<(h@@qVRyUE%Hc zgr4mX;)!*&0KnumwnKC%&aWenBZN#1#$<-B>!>LZijvzQgI}5X&Ch9H58A{<&Xr&u z7u!X>>aLVSk50l_Q#4g%G5Jk}RoW}J(Ncce${LAU4yi8fi(^W^z65o+ZKVb>Zm=Dq z(=JhMjPfBq2^cBWx7N`StMolP+MQACYo07Tl40E>tS7gY%-Au^#2~&X3xo42I17W> zpGIW?9#76RJbB%&PR~g1()sCLtK6uZ-}CusLD&`lp{n7k6);T22V*G0vO}(MXR`Z? z{x&GQv+^eP=qK)x1vPeoea9&kmQfjB><~@f6Xxv>5-@|S!67qU_K9p&F=$iwBdn0aO7 zm)@@P>u02t)Oyr&b+>b?=_n}3wFkiRUd7OF9#j(}h!?5r7ROpo?Q=wwK6sQ}M0hk- zKV#EhJ-S2%cl2HE#fbT%g%}sQ4JyR&1@8hdnkc)Wp+x++h^X}QR%t*T_~egEJ5o7N z9;p6}CLQa}os~Zx&Ntb#A`D#vzAFNJ4sv-=eWhmyYXsNh?N-DU%rYOJ)_GP}((4pF zE`AhQtDR3=yJws&IER%K;}X$09Yk%k?BIo}us_BTR}r*ns{GFV_tZ&sL%8Cd3eGO+ z3JLvz>gfb0Bz+f~;9s!KeKj>^V4hUXqX-{=nLCboO>>hq#&c)aQa`uO1P3WQ=VaEQ z?Xu^#wtt^zuez7MbvCd&RKosqjFR2m!kjvvYmU>u<<6pPiw8sVXyA!9#2I+?!*6O3 z0p?6h((F_lyjM5N*^+T>uSuvU zM=2in%XA=5vAxIk1E^qKlXJ9#nTJTR%RGvei>)vtNcSNc9PKjZ;sFMirwwVQVyJQ> z=N7K+y1H7UkG=O7Hx%Nj0hoI)hjdqL)6 z4rQBn{t_*9X_Y(wUNFtqSXU?N=LHm}_#5InHrc(M2CJ+dNSt1Qon%XY8R4N@-_3B< zb5Jjv9bTXcofGp}Iid3?AazSIkZ3N{6_aT!Ry%;R(?9F@lW0ec7gjq&vSs;N@oQVc z$v%r$vFYM@+!LMH`nx}!5d>j>;f+C%KTh^nw4&_hHe{ScMMXs-vAOf4>`ZW49QE*v znkF@^NN{kRJ@9&|u$wbIV?VX67W3W3^$^Y%FJ4`JQA+cQ@j_U}A=R@NM>&I;IUFvR z6P6Zce8Ugx9X^3tuC8aQ*fQDhFkh)%Ct&>LGF}!7J=$Zo@=#@%!sEx4H*VZGzG@6r z+RqtUUUXZ4U%PNXconE6m`Y_(BUHxh5C~%i<6n3MmciUz#&*nHfQk} zvG%Gz>H|T@CGnasYz{F=x3sR+Us1k&=C&9xtdZvcDm2&FD^4D+c9v;sjT! zR5BrJ{G*F3WXyar+OeWZBd-Najhx?db(uhv@&Sl;&XcSH%9 zYxQ1tb3cuJHB*mNJ@0DS%`F{1m#YGW=_cjJt*E-8&;L9R@def@=NB@$1K}eHO@i4vNeQYMC^wZcQ zcRLZA^7Vsey3Heoy4jDp1T45=tiBpX)VMs)=!_f%ZswD@RQ84QN^bljsz$Zw9M6Pn z*{^7e?WC45B&TJ?TI3t5T6A4E$CjZ`|%NT@PjkHwMm9(g*d{=)+ww+!2T?)@}D zOL-%gIPRXQ7;W-P9iK0|UDMglI;%eUaxEHCPdcn*5NdOmzo^c?`qOAMw+g^tmYZ`p zd2)v_T)cqXUJCfSNRi}!WR)e!px-pu15-JyTo}-R|6xf+NT+J_Q7qLqi^rY3JLq@e zyeQ0!;*8BkE`v`!1R~qpH8$wvh^@}JFxdJD+B4F$$XukT*qC)8+xc3`FFjg( zIVn$pgec~mm+%Uf!+^8Z$ItAT@HF584yn1Sv5?y~2VbPBnS}FNuGzbws4UyZ(Kf{m zm}KKM{|dVbPFD%09v`3CXSb%YM!oc_*xyC_>**Tgx6mR4d;i$aCcIC_vRv;cR{sTq zY-!jok;+Qz-S5fQ{!(UGV4&lDEZ+=kgN~8-*T$A(hHEYX9uW!_IkjUGxiA4lmHLn) z@dcD(iXDdfH~s$^e)`QDP~87jS6$vu91MeJ<{yvlrq3X;$#`K^W}R8SjXk7YR4g>n z`L!7PUce3iv@UKoz>09N!=!KUjesX^%i&~iqf~#CJ+tSJQPHpv)#>Il!U8wBhds_+ zxS1WS2;tS#vs6Q|8cWppZ>_l5iTNd&mIbT;SY7Bd%%opPh!+5-A$Dv(=GfWd%4kr^ zFxHygiQJQ_`YcH6#HWzlLv=m+YoBw+_x-}@X3a7{{@Fa);!|#t2r$eXBMuhTq3v^w z6SbSKOv}oHsIxColZt9brtowF+lfWJDfCa2+NGYt{Bf5uDnfRN#JRtU19EHal3&qa zvxT0+h$zwbNa+GS|1PtW`C}$3U}B%#Tv#l54B~&d)J31nygtQ?ln(SiGp`}FK7CgW z{&WG-s|6+pRY1I&>T3`St-*VmSP{TLxS1RSN@L-hd7$yLtIyuQzPFe7oJi&qQF){W z0ld5p>Ex#y6mpc2JIf0A8mj+Q`u^jYgKm=l{@kges@G)CSd#z5s~uuxNP#!bUAk?s z>-6k=^)HU$As!cNn)~$T>7*b8|EEE8^3+o#&(q z>I2*_CzgD~p5h|^`H7F69zWEjY+9B7qC}bH3QZBun`+vg839Y-H&p)fa(sFe1dfbb z-j%&_BY2p4H^p1G zN6Fl{vH4MIXP0@z$j{lH-7vKCoIA(qAB^~)FM@6!%2=l&B-e!7uV*RFo1|CTv9?CN z;9OO`v43PRmdgJIk!E(c)!=9(plrk?I55tQd`k6R zo{Kc|8ZSNJBn>;re_R{!Rq~xh(rE^XR#A0!f_cd+6%k_M;W(2Huj23D>h1)}eR8d( zFdFDiO{)L%3sJuP+zyCpF5g85MfYX95xbil?Q?t;a?eRl)5EiWNEd_<=SDB8e6NqZ zPS-W|k_)q|04npq!Ss?fT;Cg->jsEP;un-P1LeH=3EAIB-2iRw*nAts45WrBS@*nSc9d;1g8wba47`ze5 zT+{9?ro~=gfVGS#>ZSe~z_wMimfB%1`WBUiqCA%{Wm&9{^xGOJ)NiMEy_>)a60>eS z4M*m`gPrJLcC;Pk+fGf0?XmyS;E`75EaP{}Txbx+$=9*4JuScwj3+5Qfh@lKdO9Yz zu8S_9Y>wJJi;@`{A?HWqPn{e7Ba`kPlv6Y-w6ntIRaxulGxos7X?celR<%$&9M zxOU;@9K6Tw%R+(fWn*6HyU54Omn=JH(;=7&eIre#rsJL@&$FoE)UDW}(vLK=%Gj@7 zt&nTJywV?+EODc**nCc>qBrvK0lv9aYXoV=u6U2~lPfhs&rKnleuKIfYKq|fiF}>8 zfvj&xYiMyizK9dwLf;i12u-?la#1#>k*(vcN;+3wkrs42?+b&t^v1*WcqILIjGVVw z5|^!R)x9HRYr9U)jQ>Gc{Q&)9d{-@9m%JnGqOdtUtK;Db+pU*8nV*QLeXuIA8y`?`7u2MxA4x7}n%dIkEDY# zRNOk4^D;)O=rz*bP>FZZNk}=TRuS=_gMVtYj;IL5NZt?WFxI;?)#vXuWS943OOA2L zq^a>=fSK=~23GYdNhL?b674yvT|!MANB5(~oFR}z z`{NaP`Iu5zk^E)9LeAHuUxU0IpZKv!XE3E)_-rpeYvTt_?^-uKX|R04CVIW6-SP={ zO3b_!lw5xg8P@>cQ~f|^f7Km*Y?HBLTB7%Z^`scDLtT8g!n}65k8BPQkL>>ckDnx1 zUI;6gPfzB0(&FTi4(a(PYZGfE5JMbHaRu5{B?gdL<6$&I1!uQgWOSa>OBp;dcHzs) zn~yFC{GvqWorPQocWDJw`oNXc&33)%&2+t}-#h~MWJzNUM$pT5&1H=A9r@Wl)c_JA zR|3*SHm@$HO0~RgZo1-MIz~s=#Wbrse56_0>?#Bl8%dgPM0I4VXo|aD9j@m%9FxnU zm*ZzV7J}}owXHR##!tD{0SuoC0ngmde4J03lP!F(R{LsbgjL}{5ovK?wh)I?ct)x- zU@p)(768=q)5^n?*;Q|{b{^=}MZ(*n|`r@t-{ zEk|Zv`FqdR#9Ub_B_KcQ&C8UzI}GxyIX;vL{EQFh)g80P8xfpnXVhO1XYBtI5y;FL zT|D%8W8p4281Tq#;ITZnv{!TC0d9x(*nU`&xp+}B|M4GI2Y+!wmfCq<(sglPbL%D^ zyB@6VjYI2%r59vQFMAI?nn)-Q7%_~^r^n4y@6f^@b8-PY3LEV{>_qI!^AMAT)VH;_ zs}O;@ea9k7y<--PmjmrSx^6T0*UrkHUj5KW9?c`#*$AUwYj}e71^h#{#?-_s}rDS7h(?($6w{xB!Dy3 z{1POZckF&+U`;ytmOAV$1H1$YssuQ9HfBo==2LUOQW@dW zO`i5C9S_-ZAH{CRbxq1`jWVR_7r@qgr|f0Thx5{F^I^;3_GD4G3Z9Azzfd zCbrfX$Qx|EFdloa-M=8SWo9jMZeR_?I&V~yA!lpeJ2)^h0<3+1y)t~m^)E(i*AGr( z)wvzRzxmy^Lpkd&Y9|{m;9S}PB+jP~@S8fd(C^>(`4FO`*6S|-N5omuM-P3gqoM<0 zENbqQdPdSO(O3ktU5ESFGP9xcc9B#jEq!i@hzp?F| zY2_r9f+>j&A6_B!{P_YSUC{sw z5C!Nr*wcWTff2zFY~w`1(8GepN3ol6QGzy*xS~`O3Ed|WzcAGG&OPJ+i@r`QQa6Vi z?jC=jeJtZT6G96V%R+|b9iBCC7$LA2fke&7q*|_TUTxi0-!MaV{>|fKb%{||5~D}` zzQTTzwx^~=5JQ{;zneIW>@SZ0emqufdSoXzF9Wc z*N3?-Q42qP`fTAx)zKPj>Uyu*K|=rrdmsF zC*LDr?#b^5MqQ4SFM1{eh=gg)>@-P{Ve`i?oZCU@_7i7`B~}Z0>E!$4;VT7SRuVuk z?8)DT)>Q}qciFRXc5kWeLm93?!*pHQSiL7g&kPZ>610s?-id%6=PEawR+D zPLF0NL6;FSX6IJId1*?4#_rqv+b0Kb2)9pH)`0ta$=_e67((a3AUS+SiKWHvGydrl zj}ke^a7a3mHy!-W;+qx!&R_q>Q?3~dD(2i-`J<3j;JI{3Ej2Y&+u~_jp(*Wt>I##-e3ebKpwpf8*s-*=ypRQ>A1{4Pg3&eU3eO*?N2MBwGN*Vo z^;&A~*pLc6)wdQk6wX9{Y7iX%nui;(x(%4N6yWQ1IQ!okUFyvKe1ul+v) zt_QD4zPv@@mv^?QL+T8UctBJ9UgZ-s zcunh8Ud-f=WNU<}=2;EauHU^8b%ru(Nm(4EAlq8%1+6pm?3S{ydeS& zz}V_J1@XI4pb=2RL%hYfi$}l1e*bHs$2x0kQ9J+A^(+6YT2L^@gtc9jsuKC+t-{C67s4~3j~K*EAB{n3Sy zUyl@tObfswL?>+)Y)qBZa$vKGK@5OIm9L`y9k%xGoJ-qPy>|llhcKl!83@ zM*+_BCV?EKQGZ-FD&6a`{Eb#R(5(EnC#`uMtYEA2!kIIxAE*ek<`P4v*I&=Ub!+_O zly2R!o&NB!+DGt)e^Oz#a{MNrop1Dw2AoS#?2!Ldg6pQ`-N7_T_tnMoXvt$4ITvhE z!kYHK;(~p=f1F`$Ni-M}PMWMupU>=^nm-%zK{b3w%>x6*H|(*vd_g_djT`LUB`^JYGRyeJ9?gss|z;nKDM)jgGKt&EKf#9M1Jy zNmxHigFSM3g9N@LbjEN}j)Mw$f^e?I$vxJH-abvvB;t}uTG>y8=Hi)*`BQELp!hco zqQ;HkR|Jg$111&)=SidN@V-y4%zP`WONy#CAAhAMe7sk4ptkQWn&ZQ(^-HI@VX`X) zg$DP3Tc&fD3H)lph10b=kFw>Q%?|bux~9gU$KCbZH2W&H?7N+5MJ0is8eh48bm1fj zw@PVxQli%9fa=Ci^;rr1`2_EL%X4x+$e15Auj6Mi=^UTT^kAuJ6(|UCQ@TPsD}Ra< z9bj0ib9T%oYs;SX(YlyK)~F1$&fgMFvaCb1uj6KHkU14J%SU&~-X&e;d|^*(>#J zUzf;Tsbh&~<2slkV6LzQRyLcGO-YxgN9^u?->LpF2DTq1ullfr*EC-wf2?`IUbmOU z`5)-~WEIoT%Afauc$O8Q2g8h5aHZhF6dXTQ0hor4RIq&cu{aXBsl8FL%MRT^%VvMVWGo#B8f5 zeQ<@}Es0*)R5ky@6<((?73a>Lg>2-#s*k8K%|b8mYK4k7Gkiau$i3WugPH7!8k#py z(^4>M16Jd~HQTuF8rYQ3giPA?Ri#b*N5%!njLa>=Y{Zfnqk9+!aWDdE-N{4Y&i8CZ89x_sw`XS1inZ;euO#HbpwrR)^CX(%YT4pgRFChLKIkT)&O=8C$aj$~XZ363(vT{y+w8(=p_elKGHOw4O zm91K1(de)-Ozv)gEHg57_s%YO*!=s9OA!c-(P@`2CBMn*op zW1x&hcAd*zC?Qc-Z1!NkH zj~261-8?r+&a{~d+fM8d(!RUJ^NEgBQ5MUu=01}Umwr@kOjV2F!n07rr{srXWsfM1{yrwC8M{c8#hD3%tJK0TZ%k**bJ7R|-$Ox=g-R&>Cr9dP!j+gPL2jy$mQZsIZV&pdg`|M-!c;gTqMWI_V;bz@7)_FLr& zf@pTPmN$_*#GIFzuF&J(GIbRF=(xP{=?I zjcjh<_BQ|H$^Rs6ZGR{ypQ4@lIcdtm1Nh>(mXJ-8OZpmuYjQ!WwD1i#>Mw&r{j_ ztA7f}jY#OM4NhnWpVK4l`LLBm{Ky>qMy|Je_Eu8(Hl6oxPcLRcq4<6AXvaxbR!WYzZ4%r)QcDW=C0mxCnD6sbp(&v9W zOM&%}`Ygwm&eHkE;r(Z-7>*`}YQ3D?T<=};rzvY<}qzIUq= z`0e`gJ9YIXJ;w=@y{zQu0@kHVI(~l~IfbY@v>uIFzY0gKddYiwddnoFe~;nEI_Tr1 zCR9v6mK040sbWLA+@%zK`7KwzELJKYvG)`!3HN{Qtb`cut>+|shwMs2KO+HPA z`SoAZGJ5uYLl3#x^1^RxlEM={q+2o3M`nup)mnpYIS*n&Xz8pJs>tK3B?xZd1s68jy2C_y)hS<1!vB2?Otth6Q8*6 zSAqFH-`kVVlp%?a47>+JS2EJpySVOw5|(R@$rCLDpA4}M8TFWDRnrB%ICy>>DfHp! zL$uE{;st@M*AO+=v3QQ{VW{Z1v;AAhE%f6cT@LxRGd56{g0%!o4*4-BSswU^S;p)4 zKXTn!3=4_lBa2uzK@b^cIl5I2&Cf50hC17xS@v$8|398~Kc$B>EB=e}7f4p8_L{Y3 zx>XD`GM&{rbb6-S{w;4PH0z_=0sAFj{(&-EiKRw+4WPc>CB6sF3A?h zzuP~=!ZB#T;w~U^>@05J*QxnI){I7Dm_AzPvP179Gu1i81`Is6P$GlSWT^56BEpWdPUD{9O<&~h7FmSCmvx>s#PW-%TdGvB0UzMV{zvIxI{?E2v(n>oEZHEuNe6m=lMxQ3i^*M5H!By}?_sG0>;o>#&dg&RAi+O5rtuxA^UV@ZYw&*GB3fLrB6^mapQ^2PN%( zLqA$WjF5H#oq@sja5Z#kJScQ{{`O>8iHC_fisMst#~D%9X0^4+_RawL2w`)R$q&5i zzWied&O{IKj~?a4(GMB1=|Vob*B1VsId%3zz4Gbp%G-8e)HriQ-8J@#2AY|W5 zm32h~J)L4Lq*f={$XNuABv390Gf$s5%V?&h(JG_Nx6j^Z-H1~rIa*7RqxXL$dXhba zVo@Er1~{J)xYTad5=e~Z_`DXyTck_Mvb%>U?P8NJi0aOSUT1O3d1z?HlK+%QxADc1 z>uleodewBHFE*yF?;iP41)Z0eW|Qu<?zY>-hw?=y0u9%nmP43cN_%V_c8=Y5-Kliq*6b5mo zL*H1!#t!ejmQ0gkRCs#I83BEqXAdxGhYgIqq z6`ZQ28gn*n7o`H(Se(_UxN_fZR&AeWGQq!|v>@V}IB+9TWDWBddGO7W<`gS z`lczD*!Klw&=r>hu$UD=xoTb9g!kJ!F)7e%T|d?m!W~OI1U^=!cSL8le>t zhivHYU;e&O0)vC>v$Z)=^J_}$JK)2i!t_VUN1CoHl4 zxF>tVEJ**`Q;I_!q~E*#i&%(DJfPr4RkJ%=wW9i{L!i7MIWG^w6J?}Z1FbvMfJOQr;;X}0cV z03t>V&)u&bOGEg?`$Q8a(+pklzda>#6mC~VcT9@_HcQ|~%xvwNHfnL;U^L;-f7Bw< zLQ};>0(-WMds3mk|~^-jCf`-C8?suHrHmU1u;bWHbqXLFP3S>?JE}O z;nuDQ6qF(27yFmpRvOPo&-^BwV)H1DiK#e}i2b~hQC_&wHPoL}B+>^Va)!?hJvK?# zdXZs~eD5{C5|z?Di=QSRm@k5f7DiG`^drRW`v~%E7a}l+T4NYFV>CJe&gq2TKUFkq zRmgB>@#=BQz4?e=kkVio_`(DqbwLVMU71bM| zGWAnOcofmI%jexs4{Z^hhf8Uxf>>{x<~KV6oYhS<;wjMLkYSGQwTN#bsP_(D5+PL?`W@E zBa;AI%Vs1s~)EoPaY)%t7rzm-+Wkw1lF8 z!l@w>VCUucpH{Ss;zGQ9c{8V@<&59a& zn8=3BV6*h|#V&~1S;?elI>*ORR`}+mfi?NWPF4X-J>8k76sW<2- z;>5=lLV1cHQq$lKanqWn11DfvYFmx{@u@VLrn3qLC(GB9tb&Mk>sp~1SHq`RyE#w) zt$#AyDQ8I&-F2s%nz4`Nos2!QS8&p^bM}s+)=S0DaFJP)-OKW`y(4z2f#&^M{WVUi zt^NAJNN?z6tCqKIha6bAI+~f{qV#BpaWxKG^khLeAi&(E($UZrK0d#9W+3NvLDQaD zGVHT#Gb_v#zCR;FIT{v~9X zs@?=KED#bWpDJ?98CxOSdMGMp*sXaW`kuLN3M7Ow_9}=;j`bd~AzaEOI}(2`dxn^H z)++wCABiDF?FKfY@bdX* zJ;KZ5Pvg4SV+4o+udRa$kd~k@ow8e}^HPDp*klu1MS3#)_6Dy~!aGvqb~J4<@GN&L zC>rcAK4;)8E#1x2wpuj^;D(P6Al**v!F%`UegY0P{U;YL?%!ms9?^Q_2s>--Jr&Xz*l zzLo!YDun>v*=u>CewNJOdY19qRtNOW zxBB-V@qf-V09c4mmhCM5oqvIuO2qB8-!j0=Xtara99z~775Xp0Q*w(O;WRS?p9|vV zY1sC0fZpyfre9(bo2=g-1r1Nw@xPP7AQ5MCsI}qun7&`pzkihTU&=$XaDTPXZt6zFNKkR7Z)>$p3)wt-E6*JiSy+7a5uQcXe zlZ&g|AAC;g%Jt%{1xyJ8^F034#$@}_|LKV*6Nr4i$cv+CC%`$(9RJIRadS_Gy=XQJ zuF>f7=E&BM{OjZn(|pk&IAm%s_}t*|;a>X9&xI+|xY>R2wmSkAyZlpVnQZ#`!7(N@ zf5iLnJDi)_>o-f|uu)>$rI`K)E+&?~Hf|XsrGk)-a zgU9lh?sJB=kFbiJ9*@-A7A!#VLzIV2qKVW=1A@szXYl3n4mq8yj=M$O}v9a)O4zC3P9D7qFjIp03= zY;tZG{`Q{l%-YrD3i(Kv%1(|%ZZW(Q=KxzubQjtnOfVE_;&CDUg@zhHinYx}e3KwF z`kz4%)hKXoD!M)M|JeZM4?Cd*(mrqix6j9@YiEi|6K0dr$3|=;U!sGe&MBdN8nWDn z`!3_VLh1{h1Nub)`bi1ynM!D#F-JD=p`zp$a_7?tDpV_X z45`3Im|x?gczv{BvibP>&nP=V?>E8CYv;}-;b*mC2r&fg$nMGGtgNX%3it1Ah}aWUwCPR zi97CKXRWksQ61@?@Ye7# z06RMZCuz6Q$mvBGe1R~NhZl}3VAlBhGpRo$n^zj5a_x-JibcQ)!>Q<8vvNUCxa5R! zB>q&%h19liZf6j>5<`_vYrD2lynBhQIRNqC)tnvik%~8zi;D`&d%6oIE~&6B!%8#2 zX=jta@_q|UZ2#@wp1$$|Lsj9TxLMI{=18XrSCis3h^NWjl@GbTs1vqJt3?RolFz(a zI4`xR()haLiBdI0Kti?oDX0NEL7EwWTIq?TGFigyP$(qhWdf%o5tu+-8hoy#`=9HL zKM|o5@^=k$q(rVv5E|ZJh1i{Fg$Q0iTh8)F4^tA`90+&PmqgDjH49a>4VnAb$6{0o zr&v4QGuOF|wR*}HDo!G8Ac@)rlk1NMzn)0smhuVU)Gx{3Fn9^Ak5MehC#6PZE^yzS zFY2Cb!VofNnm?ucqHn>5KH9)@jU>Ew)giSu0x*-)5ot{Y_P};&F-3$WP})I6sIe?txY&!E=UvzZU&41znsa zq6r3w_t=ehHLp|2Iq+{h%ApE68r;keM`A%kvhb(>xT9>eI%l z0Qxqr^2skffe_g7O6u2o9Fq4^Q*8|b2nd6lupgx>CHSD(vU5t%B-cZp-su=j))6K- z&a-+Wl@V}Z4fCFB)9tBlZ)|IE0OI!n7B1N{-9F~0`)^oZEgz)H*&@Ky0rQ$Uzv80N zauSH-=T^VsGtrsj4!Vg+Fg^GWe&le27pD#73p9IsdiUY3DC6d33|m7W{quo&>mc9+ z2URFx6^Bh)_%r3j(eY%%#f(_A0rezjsSn>_jRRKyVC zD|V#qw#Vs2Zv5g=4Pb;2B?Lc>Ks_EAtV%(-rUvfm7Ve^C9;>0L4)sXSQPa{M0%D!5 zCGck~7Kqu8XcEI@H1IZ#vpl!nRz6X*=nLJtZ})b%#H;@mU83!SjCUan@ipOBT_i@i zFCCG01ikm{meQ2Hjf_e)xmezVG=Y3|O&ZA|=DP5q_W4AKqg-xsV9RVQBpL<`-9A8+ zya;wkH{h)Ar6c*}e>r~CMs$;kG(`X%3Ag~Ch^v^%BOhvNM02JiTQMI=Iz9qBj` zec+Fpq7GHGfUqo|r6KyJRjZ(?53M>)+!rks&na<-ykyygA-m{>X?;mlSl(Fe5M}1= z|H_u=p?{aU6r*X^mGAoS%YaVBf0RpoWL>xP`%E6ig`h|d=?7(Yo%|^{YxjZ#U|%Pc zz`JXEW#{b(&1@SUVvk%+i>CF5HagM7doVHg5Lkkk2X95YcN|Ftuiahm{*AyR_=I~{ zac<&F60CVFMst>kV$TsOW+^$9C^g!7QR2GJTOE@UBIz^=8=KA_F4YGaF)dsSbwJ1> zl(>BsU)x2T%s$ADDzfgLZAOr-X|Z-NY&4Rak8*6*q;dDL6{$~fmm`TK?5o=u-^0IX zIB|WK@pd=yKZ3GH1xj{@T4lRTO6DFh#Gz8gO)bLr*LXNlQC+!L?n6>U?ihX(v4Ui@ zM0OjdIaloO#{vhFWwwd0HgKeLrNQ4!|604`-SeyYAXiX-|^70A6! zij|cepj=A6lvLfSG)Ua*GhkQ;l#PwwMqHG&E$!Z7V8`&G^X1`8__|OF?NP)zNSc{r zlM%_akRLb}(d7Dv74_?uS_UOm&%>ll;B|q(8I$=W^YjYHn>fVkyS?G1`9N)3=W$rZa2O3`MmS146_4;JE=$y@#w*oabVg7SnXse-T9ZU97n=i`)qH zE*!d~>O_t+DWe7s*nyJ0Q=Fq8u)IB#aldgBQFw`0rM8+Atn)e~=wO5maOC!_l~#%%J5_MB2>DZT*sBkSNuwfzF%)MQ(QAC=bue(Aae_x9!mXhV5K^ehO*ji^9;7d z%b%Wj=xF>l)uJ4#rMMnmj?YLN+HLYx<`+Jp(B1QnmQb_3jjFg%B+c7S`qVGnS={B?7Xm~f<`RY#AQwm*?cWIJ~7B^ZR zYSUu&F7pr+{rIs>MOoP?Iuh_pA=GA_=21c>uyj z=BA)`QPDA)Q=cY|33+g9n73XF2(l_5$8cM(GZc7aJPeQ%(C=(<3>8#@KY)j|KS|N~ zbhev(rJ~1a|#rF$DTTb@g*STt?$9_<1>PEK7 zAAfJq0EvR~n3y~HXBOb^zYdrI-yUpg{1HY#;=1vAyM1q(VKPJj{WcAXRQV|9n`74{ zl>M`#Vrcb8RVYora<_iYXgBp=wKs6X2eH)_I<>Hy%W?Ap@3ls3mx{#9@%%bxyoioW zAPCDyD)5ggY-cKqshZ$;z%bQs1l-{DRiMbBtPeKP_A#sATl2bX?G>Du$5i9=VP6uVXMnL;;_!$p$ zxq|r@M{NE`=|oH4r*F&dy~~pDR;bGBwaiJzkh#gx7s%GnzVMlXksM5(+WCs6#bo#C zPp@CT>?}t)$$Z2wG!%$i)*Dy>kP961QP5(1-@~TG?sq7!>88D2iy>Z@_f8S#6_O>p zEY5-s&js&O$D__Y+c1V*Pa8Co+B-$_YZ@l3L*fkxYt}jS7s8!na!lQfc!wQxPwmD} z6pSA1NL9;X?29Z~iP8Yh`SAVg+&$g~*o`iXOlU2~e@vdgmQ@0P2M|+!ONX5CeYhC_~t&4Y+5{pz2qu#5+a8ksVn2SQzo zW5Rr;1pjhE^vLFZhp5i1wjG!uc)GY1A-ZDh>pf0k$RVC@gjbxLoU|OINx3j5foYl$ zRN!7r>{DEVN_a0{s)m1{r1}?m4}B(oikH|Le;v$>vBADE&AJMUx>riywt@mFQ#7-8l^7u-5yL}e@)Wo7W$`q^^y-mB&k#`+ zyXPO3?YEe0jN|V*uL6LXR(xqjwcZ(bw4A3{oyjf5T7<8YKvJMdWcvZ5yDr#^3VCm0 zPP3>#pp4}O$-oslbGdLa(&kUoB%oY96@uEvIdJL3n~QUzP|=t3qEO);+qk?;XK2QQ z;wxmC?^oc$`l>>Fido9OLq2)lGNkk|CqR*9whU~ZP(OQ`H+6p5lCU}Q=l84@qP<>B z)ZYOZuMFHM?`s|?{XgMIK{OkDtsPQ?kkYi#gti+hDkk+2g8p)4BAp4NIsWw`5`J#s z>nNI0(_UBiYis0BN^+OKDJ|wt$|={(R|%*xUTotu9sqAg&fpXOPsr)&c@Qlu!%I1Q zQ-e0s4%^QsHs@1aL{sQpo_Rav4lMv>xOlX!<{K^ebCoWCD~*wd0gR;cDD1S2+O0E( zPn<|=ZPixcZzqY!@u%HU-=nKveQkPfjk3P3>&mIV4YB_J(UR1bAy}O%WZdaA{N~M@ z%FGht(V*rO-d0v_gvtEZug_jMn|Q&3q;tC{Juel4&zwK2h|cr z7xK1DE*=}O6qDJC1>3ms-#CS>F-NwSAt4Y@6twn4W!kD5U{Vh?`bi5PZvD;6AYN&_ zXUfG#&}i%E54(X)L^(DXUE1VYw=ej=J%s@;38@fPIzy}VhJv$r(r<1)PdeEA$q2dS z9make)7%3=ci6iu!RNdi4L%><{HE$z+Q{JhygQpC|3fUW12{&u-s^HW)_2I`c7fDsgOOcu)RhbsUYsV%``*2o?$d@#<1VqD3+Z{14HqGQwI-hc z4L*~L|53Be0%W5NWfcc~+u=S;)7rgqlLz<0itiyeJoT=$}MmOG2s_Z zSjlbvG7pZnHnQGL;XB%D&^8jR4f;`o4ywz#GGKel&dzprm&tR0nbZzK; zGASf{ps?QyKr@zP=Bk!s(7gAnmP-zJQR~e6yqDf7BX49Wzf!g{6}#?Hg3M(4c7Q(> z^qK0aJdnMXFon5E+tl8-WUOhDt_r>Qbi8}>%FC%M8XwUr4IT-tuYwFA#YSN>5hA&` z5^lYcy{!1QR$+km?l-($_Oe-YpO?Rz!++KU(r_H(_Z~7oxs)f6Df{LVw@}BsxH`hm zFXo*^77VHd4U?T1a#4?ShT*3tuQ9(m%H!L1!K~$9_%f z@?*Ov*Y=~Xd=9XS5Smyzn76opMIn2@MK`e74BRfo3atcd$ng`bmWIQMUc%~-+iW^z z!%tUGtU0jQN2>PZ^)n5biEv*OMTv2;^gDr2W{Pj95tDJ8zm=*RfT=Qg`GV+}lt{l8 zN6BA$z6)kBBf1r_7gj-&+zXWPouwl1urPs}sKwv2F~lO9ok%i>4)(m4q~FD)$H;au z5oT}&1b6FL?k(zsaoS5upz~^1lBDkYX z9U=FhklOJ2^Q>kUl(MpiOZUHx(WW+(6y?`R_%ElmXq0>zOH=l-!%RZs%dRE}W3##> z>L2ut%FlU+Z`_*oy0IG{y?Du`+vgtQSN7y(`xU~XPE)>$n~b>fv5c~emYl78K1 z-pV564W?S^M8Amg2Z2?$95y<2=N!NP4J!2%H{MwGVs3!9uU|G){8gBgMNw-sqs!_GcMfY8y5CLPO zG!F7^oXF$-n3$@cp>Wg4IgJbGG6=t-ErYu%x-b_+c=84>;OvD2e(F@1eRipw1r<6> zrirTF;;ml>XZU}JzIq+fohZd-dUYk}sv9YbeeQpR2Wb17mcgBOP6K-^-tIw3oe!$6*J2M{4KD zFKz9`T_|_GHO`U5-3H>V5z9G!O+o1>UQJvjX!@MpT| zr&SxYNtVv;kNsC4MKwE;e(h_GDiBzk)H;!fG(BU4W1ha>62Z>m@+?yciny6*fEKT| zN_7(HODkb>D1rKxPKNUMBp-na%1G8M*bA(tyWW>n!VXU(b7lyyNLK4SQMZ@VL}9Z! z*m^l!{be-usnYG`vaAqL!QwA=#!Uj{hd01c%R}+YUpG(aA8|!k1H%A&qLjWyJmtoI zg9l#yUu7SbO`kH=&8A`^hn_RBy$()|53TT8ffsYOSHVRn1 zB~~h!o1^HPEnXGwo(C6ghWr+$8>c7+%7J|-0!`_XY@ATm>c@S0livhTT~a3lnVx|@1bn8FF=fCyyHSiPR8 z-$j`eB-%EX%*N4?EZJ%?YpKRe{3lW*EE#3?<*qfV`)#a#d*pUnOpPFTqI)3S8!Bed z&z226xG$tN_m+o1t0GmRd$1SatGJKBTLnNiUvQB)g5{z;$6w|99PI`uA;E{tMX)wNeEY~GK+g|oNg3);C&kto}KXM zV#dTVJ;>kZxa~fim!&lZdN707Y54k0NcZ{?6r{xGan!~ekvF?wb|>5dwe;R9!E;VY z{n%iATK)yC>z9C(L^;Pa#9#mVHM30&Dp-1lE{9vO=d#ax7-&x}YO;4L3BVZzYy>m( zH#wdi*%V_E{@I6Gon(9ro?%HtxuR8b0loqb4CS14RpFmxT%?sbXCw5$>QHkAvZAU6 z?-=GvvCcK!wL(dFkKT>0Utv`{p1&KPhH9BTPy~?5M^`Ob<_BxuJwtoGD4OJ(BAW)Y z|3<6ohVxh4uTpqL3U?EM08gz?W$K@w2xW)2btNMVKa~m?&kGV`JAuG$lLv*^@}8N> zkk2cwtwdcjDW5I1i)8Nl9CL~non4q~Bd(Od&QjnP47&Vc$wTB@ia7aHnrH&(GVQ0r zuV2UT-w>3QhTSfb8wfrHsxb{LMLF4zPyJd~2iAEA5nXaUYbCyLFgRiK2>?E*vPUr_ z@vQ8U1>7AeXgNoY_AK>i!0x%jRkoFfPoxUIy)2+7e)B4WFH#pi;hb_d$r2)2n#kGg z5ggqGI!_k5mcG~$9AWQiHrhW&{v>wyJ^7?$z&OSHQxBTk)kp}0RBHQ?B#{pxTr90! z$&*F*i2EtaX)nA6NQwaI)rdv8EekHsF)hoV8oDzGRXTMjF49&?wS5=mmqQ(X3U1gF z+3YP@el>u8y>Mfgol~04ny3NZm|3J{AWWI0dQ%D5|-@&1Od&arC1 zDl7Ur_r1-0KZNg{7C146#8@(x>+6;5LZ!NRL z52a7Y0WYi}x2J(M5j;EHx{uR`%+;SyUX@;1K4=GN%H@Hd7d04lq{^!&$SI+TW9J7+ zee@L^ltcpzO?IGgE;&5D0OnCH<%d!P>8+Ji-6%(3A7@4!33JkoYDWtsPj}s)W!p!h zE*{uJ+?eD8h172+D0XSg+ea3WQ;C7QU|z~*+d(@wL0d7HooL|>ZFUO*TF=$d)CxlV zoW@50I|ti}Uw{Y(YN;Nwr9Y{A!BOy7il&!1xAe6%yS1)YC9*M=IXG^i-c%Offz&Jm z!lSyo%6;cZl8|LYYGk`y8kQ|<#dbYg!~Ju=kBBYg0z4NgCQljXfLyT9Zh+1?eVn(f zUy^q}+~4T*=5Utt+90Q$-O_r_%ur&I!s)(VM=7{^>e}?|p_!ZjQxH=6ws~cjpf=o+ z!o*?QbQcjDWBG$ot!RLW3?~6d0j!8#sp+4k#-}8#dbx@+*^@G<-SX4t$#OZfgLN&D z`#)1f9m8&)&uoP^@fcjQma~(;3`q^zYlrzjCwvWN8(nqD5_N;+!Ck}m9%ciuUr83R zRTeR!u>~-7Zyr^6o^Y31Zm+RlmIUf{Xv3l#(Wyv{9$N9Fq*v*VC@JQ^JOLtH z?_HmjmbYaX35}ytRQ7fl6g<3~wdCWC5oUW>)fc!H>Br*RXN8+@&2^iXtP5guGNL2- z!uL#ZLgvwh!Tly(?9!mM@by;;+bW75nEhI}0wgzWu0Jyo)j>4Vu41_ju$cN&kboQ4 zv+ga{y7$PO8x1suG!qjJ6$!>0@Rky)htyZMI!bGwQ%?r__Uk&S+-tWZw#&@T>PRxsAvaSkh;4XA*cRi zqaY#fUwzxusAv|rwK6#NGW(&^8KJ)CRwIyX+rg+MV4 z1nls3@zavcd(WqKVL%g%<}@heuWD%WUU99=0}foYW^}uy0?uz0X0rfY2Zz)oUt8_S zrrt%*4vhZ<9e$E0tuVUNpmf((4A2wXcJNBaZ%xJjoW`);|^K*S&9SnWeCsjKiWcKFYGbL}O`j7BW zc^XA%gEZpb5e(o7JLBKeV-x^_ULQ7J{{}s)lyF8|#xqh$X*A||Ybhur<&7!MGum8% z>T|R;`6~H&-{#2wa5Z6IHiI@lZ^qLaX^Rm9AX{5L1WM-*opOIAvBNEorWKUff3e&v z6>K?jK=Q=amV+v0ph;}c-V@uw4S(GHQs7WUkuS3U6r*kVMAhf*x`U@^ulGeb@5atv zkLMV8Ju!b3LnN(kMr8l@q)ju7E)+}`ccg!(^|t7U`%7h&;48ONmd3+xvmSEAp4B@! zy~d8d(Ar{{Op?$VnITBQ%Dm9UT=K{f6i3#>yu?|q3xJy#op`8kbU%Aiq>^wylJ8N8d$ON-iFP1*{jY@tzSW!_)vw?_W>y8g3AAAvv&8fO^% z(GfY?I_5pcLjlpzyQMIki;=JQkn9IP__I&NEeeNHoKjCX$fkLI@y@hZ`^ApFY~0df zFpudmK82~WHr8P8Q(1$--VsWxkjo`p7)3>ByR@jQ(VEQCIqS;dbXbm{l5i0!wb78 z*Agxk2y+1#^8^$wk6BLxhcuL|E*mWBX$pd+}LS zgO7|tU30SBB^qRqluByE5ZN}}ecfG0VJ(Hk`g*o=d>_E7o5!WO%6i=cKJaPWTXSYO zx`9bpzuJQO()1%J{iP^agOl^sfejfw2$dlIv}h)W$6x>cbsDE<(4&z|megy?|Mt$p zz@%G@Be5krkllkM5{Op{H0guO=m!;Z7AS}araVa8Tault>X^+3eHJlxsfD!hF0Rj8 zRR7bFHiUL%SYOvZkiHHfv}cEycn=p+1oz}*1zy2%bQSo2G~~~T*oeC#TekKb)C#=H zerStHxJEOuTdkKfwnh6Np2DPP2DTR0L#fvi3Gr!H1dlt>_f#n;AZ!^Xv`nuOmhb8a zm|nc87C)P{OA&f%-Oe?8Zxsc_`Ab;uFKxFRZ8AXFWbb=Pu8)=m-O*#q*RR)QZn!|R zG|TP?Jwbq$T#J)fg9p@{Z~fxaxy2lf{iSjCY{PVbn^vIN3Dfj z{sJu@^2lhE5o7kO%5DlK&t2L<*4+Xi7{OLW^a(|mAOVw+$1at&P0Md2upj7zT%lwW zche57|YG+r0TM6`_B1_B`XU4D?B^RO;-IQUh?zEQGT;KnwWZSl&-av$G7C3&$ml zdKvft`<4mveXDiG5)H3Z=C#P3-;rV)JL5mc1X#JuIn@eG_ZRb7{vS+ns>ZB?j1=!Y z_k_|E{%aK(E^$l6B((y`DR|b(^u!IR2TSJju{ES?Hprecs~!upcZh0S)y?X6*3FDk z9DS^E&8I2Ls^ys)1jXHR@B)FUnBN}21Y0eO!DGk0@ptO_yjHr>Xo#?TcwbzMcGdpL z7|TS~LaZ~svI{MK^ZWtE9*3XQ4^ex()MIU=&_pFZiSkd+)uR53QcM$o-)!TFA~!}Y z0@hcs(&qb@fypQD%g3KdxZhEe$T6!IZ@3J-W*<^Obt@QjUuuUCo;bw+8a+l>J12!u z!L++q?C;YuEx`I_-qCKbv%9i%s^6}o;o1D`E$UxbbV~OCOw(^@Fe!MeUhfzujYF|P zdg}gB{%@W2bM(Eei6T}3KMm2Bs#OT{xRQo-C;EP3VU{h_{(eAi^61g*Dheb?yPnRq zDkWg+x$&WD$^T2a*?sOh5FnZ&nsbFI}fUS{c0S!M9we@Ll;nc z1<{O)_Ro)Z5Di$pTnag5^$e5m6*<(-H~TKgJ>JvzjA_j9r?P+dK}sK|LubB;t=0E0 z2LXSmRf!7OTY{K4-TnNRy`q?g$}^@1qdiyM$q|v>5{@f3n5XqslSz_*x$#x@Qz+Yq z-BQwfDrt?VOXhdnh`RmAkDt3JHd!lnbJ-yr7-ZQCBg~)%4~p;sr=P5@15^CfC2qJu zH(&Mj&Po@jjkQnId)aY7TR#l)m@`)ZeT%tvYKx+A3koeR7XsbDy(*FTo zm1a+*u1~@^#9W?DLiH3`oK1!*VO1nEdNRVGxx=NCn=_XR!sBY#O6qjGp+u3s%r3cAPC?_+TqbLh+3PNvIjk<+wJi zpK#XGPeGJdlMqiak&3vGJ>EieE5pTcyH7Q3OTwWI-wFykc?s6u{Bt+nT7!`D7zXnx zoF109~_D^a{1D`QO|`Y83bl&Huq0^sMG{MZSXU}DD-vG`eOX5UHg zeuv&4RRZRJwI007P1X#U3ASnn3db$_k}6O*wg5j0Kvr86(kyS2J;uyFIGASMzw4d7 z`usZCm%3ghK&|ur{`LIS*VIQ0r127XX7$2{X5eF*-u3t&74;xMn*A>~;*Yk;Z%sNa z&{;voTLr}@q#X_bH zOQIud7=x48J?%{+8A?v9k%q`y%T~2R05Iyr%$zZ}Kt^<(zT(hgPvT{6O(@JA#FKmi zRKN8>7G;fR>upk}JQ7MX+!$LjMwrjz36}DW4qldRk|^@%p%Bg zyTc@FrgSuefETa0y@qI`aB8efMj5EAfi9iMHp)j?!G@Rv6-S16;!!#cE3pxoFK}1vj4-3P z62vboChf&wA*F|b;BEhwV^F(2*AX`rg%@jN`X$A&7BYx$q7t-&5UJrbh&kybkAVAD z)yF#x3>yd&&r{#AhZ3XaI%f|Q&lJh_KhO@c6TT(^A2B6Im%KqY^>W#>`xLsCC>WKi z0Xrmap0`KgR~N6HMQS0X_RG00-;>Ac7DvLGXDraQ5@Z_-=S!bRJhR=q+^f6-2Pskw zl^s`(C5k*`m?HN%0Di6`5e|0>RirFpp(c-lU8<25B65kl511sbvb8t3b=N914Of{xYaQcscr7Xu&1v z@$psHvbTb4f13gM5nmydz13~xj|+FWRM@|e9{(lXN=^w|W&M&Qh4eDF{}CbuE4D*^ zZ|=)zcB`LGJ`)#-?dFqGNHdf>255^O-V<8|F`uN+Xp4apC$@ZA|7O2#kC~yBOh8*d z?nv3uV@DgjXzr5w=I4}FXaO%=Hr87i<(vESPc5}QmQQ2hi*>|w4$;EE&OAxm3IlTn zY0o6H&#&aRQjGsGfh{X~dn`$JbwnXnecR^KQxqbH3nL+5_K?^@{R^^Zb9TeP>vc+qSNvsDOxy zh=LTCOHdG5f`Eb)3!+k`cSNLDrI!#;5d{Gi0jUA$q4yf1BB1mZLQ6uCUP23jgpfP4 z&KA6TugA+d=l;txG4cE69CMU+ykm?v@pQhY#b&1f7@aQZyoFO$+yt*P->Zj+`(8GhDYj!ne9BZK_?RbDie)5*y>Po*BO%dU>w%Y2-&WtsL@DJh|9W*oPd zwt&7k#@sz5jw0MCBP|iWPuwWm315$|oN_DgwvB%3`z8-sGCDPAuT`h?T-@A|j>j>V zp}~=OnIu;BJh)u}FY3IBA&RTc_ZNMQTgC9m^YGn5S^aHe z;cc#zixO#zW9`vRJ(%<&es>Ygv0(OYz&umGN51sqS*ifXkG#Oj9#mDZ zyVder=b+cGG-^5TP1gAfX(roUied|tueVuxDx_a9k)pCW+?}hf$5nMWcU()+y0;}H zj^e)Qb{}GFl1O4xw3_ci#LeARP1A2H?j=d6<=uEJJy2%dskrLth0d$^+`cb058xL1;;zM-DKH_4Y(yKUB#ZW59sW>_$uqm%SpAmT#G8ErLe_2mb~Cv?RtVVfYq zCu3Y_V3~JIAjRqpLR(Z7kR1ttf;X0*V;j)Hl@K~JzWE+Zf@RfUv&YC5bA0yq2ry=w0d4O*DZZw~F+nuBbEJkU>K|J?9orc$t8~ zu&c9S>C+#W`nHbL1uLPG7&PN2D%QcI`4bFkfSSb>(oGOuGR&*4I9xhSqBjVgEZ}CI zO*`MTYBiW0;`P0-3OyaW0+MUXWRlgL3CU@5tsDu^?7zQRQiBNqh>jxHY(l6`WnPSh z3DN@M#?P>Z*_Z}lo5nLnsP!-tmJ@^);Zkx-h~QKXd7!B+rs352tI@0eju#3A33)uS zx@U9PS=A+*onzUL#uy-vF0Ah1%AI{khZPY5m>(ydSpDs+S)Ro4L@K4-Y>vfHW{znO(yyZa2w zlIT*vlMuhy`# z7@<4#{nO!-%%Zhrao9>67893(ju!H1YsP$Z1SzZqDQ~h zWlf7cPH6U0qF)CGvr zexU&B<%gcqKN4sp1Jc;R*pJ~Ax=oA|I)WOSOfW=UA9_Fqq#6f83ygo>sZY+dWrEW~)Dft>%tMQ#l zT~TvU49zDjE^26DPXX|k>N6%2vhIceXd&Q3^}pWi$$bqfzV3!S>(TdC`Y@xOi8BPe z1kG35M1B8inx~mJccm@(ACBxl546IVH*3$zPZG{Nv%qw1QoII133O$gj%y)VeZPo! z>?lRBy^k8hPS@6Gu8+`{jMQxgsov3~F;|`Zkm5>rkcE|I%Bq-7-LO@wA(31uBuJE4 zca$c3nUcKf>iDfN<<~_}IIi8Kjy?L5pIb|e(tPlNT)+Zn4c_;c@$|!zOn0G9$|zrY zyr?f(2@{ACn2nQ4WoK@#D;4(PUp{5PkDaFptY#vyl*??3-r+Yvt6+&kJUIK)g#G@N z{S5XNO3ERp7W9~XXT0f26X-&AQl`sWxrs_*{6SQ7(~)W2^f-0KcB|Z{*oE5ok9<=X#hMaFzi)LHc%TTSpp{I-s%Lt3mVh#X6rPgkbPW3OQ zK~lf2{HNFbi;M7yi5~2?=`v$5;ytYYqQsoXLH(Ryq zJj`heiCUp!&sXdK807yZtiR`-Kdw_y)x} z+X))jKh|BYe%ITs2s_r}?3Qa_tie5H6K3{h1+cja+V+>gEp$?mM^9!wE-O<9)IpYn z8aqULzHZc+hjxOjiD9o_g{#*16xMEjkHhNbYvFy7wTA|9T}j$OhYmIUi^`Wf72f08 zkazI)WS1t3dzK~6);RP-$#6cO0rFZqHOz#&oG2@6O{mczZ2)GsN!F~Ga|#OFULmgB ztx)Vh3T^P#|FyCe&Hj_r8=w0pmfiE87yNIQ-7eL?S$2;X8Tus`-7KB636$isRV$yv zs$5X;KjyE*Xq`_z0*QrtqB`$cb9do`waAx!d-&?U)(wDF_$!+zzJV9BQA1eXqoN1l2#&%ES@#~P+*+AzqtKbyscCoaEKlD6Wn=_ zRyxCuPB$4!K&VF4J53nq*F zAh#%<+*Y}7Xalj-nOk?{S?uyRU1kh<_J{rCZ3WLjC#Pl0-)I9&isdF=TYbcpnKNuE- za_sRK&rqvYgST0KSwG02!XRvM?u|yZWzI8sPMHydUpFrPQxtY zX4*Ss8Q1^XIq;u7*j{D+x$p8fP_dJ>v^jpTttVIeWs$zu3&%$P(DAW=Hl2A5a1S+8 zT}-8_k&6pgr6ufhS_9;5hF==|9qdHJ*PKe4A03D5UWYC^0twXR z`RS$%RnQz4cSqfmKUe7W1j#E6dC_8{^#%e zO@IiPSS@WT|8wQ{HtCZI2)d|IYt{4P-#%PXHgQ+;Rx#`Zo;8Rp;Od!QraSUKPU~EF zJH7ms|4*AQvmuPZL8woWsBXAdv?kfBpB{jyrDw9YYAqIMkQKH_C;QlMRr8jw^NrRC z@!+M7@4+;7*5Y`<)~r_EKw4$j!oI(@$20x*RfZhE|4b|0Q>BeGwwC(hwG}<$X$>}| zz3cAn@H!l1xFs)tUPMIw-ba)IiP1P&%t;}7r<@iQ$3Au#OOD~NodfRxMsj)f74Yx8VA`LoKgEtM92us}p zwys_PhB;8eZ)#_|zT2J2!^>~8cC3mp{Y5C|ul$I4J1xxE;+cmd-S(Z=;+cXw({D;< zK|%^IqN$DSqVq{bI+~Z;O|leLH9ph%UaUQ3s=KssID+dkSDGn_T9?8CZW4!*B;;VlTo!3W!cJsJbDE#`vp^TwVj1Lyc2t? zugb}qezt&AKMcE#=~sfzs14-2l&FzW4Qw+iXz&>yuO}%)d0iY8#;(V1F0S9RY4?w-5X*u?aW&b1MOOt1&%*0RX`$X=Wd0T^`N@_R>a}PW z?DW0O=uR@UD>T&2V9rXF*x~PTU31p^OwW{I8gSIBRL4x~yw=wQtNETYF*U<)D$$^N zd;nExvBZxwPA)Qi>owErCdqR=^*he7NU+-EK$v=s4^ZU()9O{#o7l|Id&d}^u+-rt zjF;>1ape=e_uNb2^LEh5qA~qK^7Bpd154K&4Q${1l}n#jlVCad$k0-O?$|`glG$|S z9{u9c57QBXxf5Ozk1OoxRuozTrxqOugw-&ql?@?ff?9*58w|BybtWM!vKMEm+vams z?t8btFFc<QYvS*mlL ze)3$Jx?zgY%=5{Iw4srwx;N$&2#>-5n3|c=bcF~h>=uvn@#d_O7At!_o7(g#dgEPW z=1GAws9iS_Fq`j=Mo++RecqJ=sX;E0NmuTcQb6%jcF-KNU}Sq-gJb7I zLBq|kHXXS7s~d3j!j~IMWUDIFM03O)u?#gsQJ$HpYS~oYat2cg&qV`T;=Jk01+ZWB8Pp$G;2G>hifvrUB7_lDM|9X3#15TPY= zMswO%`L(UcpI1xf1m-~f$SAeQC7n+Qo3^_J-guv|fenk=G*+f09GXTg>`VVzgv;%c zm|?Mgr^N+HMBy~!S<0TS^U2|_JRCXdGC-F7_Iz$)W0|k&3!)-Y{jks2rbZPKPeB*qq*Fex`MCjuQGJoJISJ44Z1Zj zuJPzbEHtdx=JK&A!t)3spCI+|m#dMSoreW<2-vmaz;q_=LS|dX7cxeO1 zdFkE0&58Z%r17poOHxLPmN_*}>n$=%Yl=o?GC7F2Z=cPJ99m-+W zG+c^qoOi^bR3IEWrB$hby=3ac+QO-+`i;wNl$79m;Pb(2GedYVVqp#R*UW*iNL~~`f6#jLklCgx0A8mq?#kuLKqS? zUQ8>fGn=({gMrk6tA}f;ib-hx9%34a6lKd|wUSLBd8b#p(R+7wJfBw3vk%VpD`)87 zm&H~m=a9s59!dbH&0P~o9jXbP))q;Ra8Bi-?624VX%Zni-K{lgUAKOnsF3jRpQ3DVN*If)U+s zR!g)*Q$9!sGK3=uqi&uL{jk-?+O0=UMMv!gU&vzoOh0FNmpc(U9a}WMsv`v zR_|8YI_quq{!gFE4FEd?a^vC0=@8OwV6Xc(MtIjJh|ldzY+W zC^*2DQ%t2?TGi&A>5t7P-qSKc><4aRJ2h`g^*mk`<o|s*?+wpQFem@bhxd=*}OrhwD;7BfdN_2 zsEB;1aBGAoaEU-r6elgZ~ znTMC{`rGs?0KdPwD4ryiX;;S2@-+5#*Tkl3gvwnFk!HJv7rfFfS>w6XTW6RmLome8fE>W z{H?5knPu0jBKpCwS^K<(W=ffA)BxL>r7X6~`tiz*a_+)@mIOrhdaV~IKzZO@O3lHh zUvd_oUX4T%rW3W@Vk0KdvqH%JHXDJB^y7&V@uF2x<2rOQCPlae%T9ayxVDE9DN7BS z-Ks6!gYSB3P9)PPN~V;m^G7Eg+nhd=h|YE3!Ij?fPHb4_-V*pjQX3GaH2q+X0)p#B z(Yq+H<8j+q*&`Kzx{Oy(Zq+LNlVEPIc~sK~0^vzIsO1oPMxbOdPd-m;X2tnrAEV^5 zvX{&+oTSxd;I4II#;&4yWU>Zi*E$@kJ~O%sb9OY;G<1A_-o*l|`p97(1XnIOEcl2g z6SZH(V0h_7Ra2~cu@*8-Yj*iP-O3TrpGCytMG!q(d(2x(4&(Y=bqZAJ;SF?%ahxfX z+^=tv1y7}xFHJP`0&e?Mn>VUb|0IOqR3`JO7yAzx=CEBYtOYYNw)J+1Ou2|UQKR=*Sv3nfC`)H{Rvvjo;IM|MeQ&Wf*W#zM&OR&n^iV> z{rbBXGr!Y~VE9^!(C!PUs&|LHTPhfqVyD;BU}wHLe!=JI+HO9a^Oc%)aA?HOBtu;? z(@im;K|%DYM$!=ynW}A-PPp;(YI73R8VosPJ8^h4$)H&}Iw z4AnTrYb?mNXH?oqV9?Sg@xtsWE_O$qC}PV*a?EM@G0T8S4JqHEr_+|Te`raid;r~R z6mw!R4mT#rZO;XI;h#)WA03HiKP=*%QWnK`1!TG`93y2$NlbLn@q+1OK**iq`emB3 zHx04EV|P4FS>7LHV>M3k?&oZ1!)2>#&v$(AZE=MT=wB^=Hz+~S0YT0;yl=;(L9F$i zzKr?E&%Qf1CMrWNELW_rLz61YCAQ1mfCuRy`H$QqlXU&pV6t{AU4c z4MrJn{(zu;4;|b0Q#JSx(nyvPb9`;vC4c`QsN3pX_x~yge#Ssop~4G;CQ_6>fBsxC z-{-M~2ki3CrWG`o)V$u|Wq4HOcSa6&f-7llxuFQ+%Xf=6i+(98&sr5!SY)N1+~NR! zqt)L!;}Vu`{oX71*I(Z>Y$1k@eCLKYUR}%mJUVgPpH9dtQ?3aMii0`bqb#=wmWA5D zy7iDmB!3FzZ+Cd;R~wiqFXTB|!IA z7=hn$z?mj&ad^K;0-d5~+4OBkuj?llN~5+OapP;D4L0w4*`oRLdvX^6W(;c*f8Ve} z)+?C6=Ez${!!hTinwPgc06iK{bH?uE7EklHrswdtBU{8pK9vAQS_4C*a|pI2DxE;ER6>`83QWTlQr@CCpV%dri>5dZr zq~qpF8dP-N?toFUbPy}y5U(`^-5BBu*ZR+eJiJ(OOT6lC3~NxM1WHs48P(di9rL18Hl@-pMYkVD5H) zAcfXj!Id+2TT%S>lks_wiu3*4YDTgdJuTgxTPle@m-TES*Q)uca;v4iapwC?RBFvK znXqs2jM=D|%k;gRt3J2am+sob-#-q$+RT9r5DU9{lcLvpq;++BRw9>jqGS=Y0j*BufE*7qttz~C&p~_=n71#Kl6?s(pkxQX*sD&#LPW| z`(xk$x*#uABF)UNyXQxF!rQjj&0_B4`^q>UU5rf>dQXn6vFL0#E(g_@e-0s*5z-o{ zVBPs{PSQt4i9PrB_kO+zfm0c|Vog){Iom7%u1!$pb&<~bNRJzSo5FNYr0D+c7i+19 zq4Gd)y7|3%&L+2dyU#l_(eGei`*c}(NwL<Vsu+X zWWsRknHx_v%RuLPskcLPYY|G5-t&7P2!HadZ81yV`oXoCgZ9!-?wOyt;p&QM)UTp` zSdHN*#l9w!GW3vz@Q&~dXK z#$nYpw&()UX>p4nCZw=u3i9c*dtN4{XfWpCa+badtHMhAN(lM#h)J;KnbKmiAMc-6 z?#whx2e$p0bvHs60#S9xFR#ZV-H*~c-SDf~b@=>NvxZ|DK=;xb1K!aGLUY;Fu@-DX}o=n0i25!BH_l+aM-l?G1zN)^J?$qDlY z{_MWgBSsuk1>7wdinyGkRElgrB6=p9`=L$0S_N~7@PM;sSnS0m;zpHVbzVMkaw?1P znHcM;5V$XQv>Jltc5K5X>R9A-4ULzq^bf7-S4bE?!@9}n9NohxkB1(8tgM#N z)43JZS-qbOPqEH|p<`A(O`HoyJ*781(F>~MHc3;F)Ky{0%r-3)B5$o$qG@Hlk;}_o z&70Zbev@mw>K$e0-Ck|VmdZp*5>M+B9(E;;4h?1HKws}4?wbiD`o-8?kn88v_x#l9 z=Mrc_IZ;!zGkojZ7rKD+a?n%;G)xw1+{D8*Gn`eB!r^m4%i@@dRcSw&^;buGwRRu<;0i79?uSjEX`bz-w7@1yd5ueQfutFvY} zH8gR1$1OLc{=$*tr*c_0quh=1#Ki>44NPJYDvm7!Ltlu<^6&-Fzka_C4HDVzw&? zp+FdK51mFAzo-&IE)%P>czr>A)B$;$EiGE70sMl+L3iz)h28SHiTQgc!{Y9``Ooi* z_(;GY5Epxoa3+>rC$*?DYgjqm9XpmN>mhlU%Q!mY_TvuHs=F-uJNK$g!UuSyFx2(4 zlnBfFLrfhc{=o92$AYCZi?5o*^7WJ{o_F^5<*sp=#Ak$mw=C&?PO;yG!^QMXykEz# zOA(1iZZ+{Hu#}ti7NP48616KtNE3zjUQd=7`xdC@x^i&b;+oX2DPttJQJKEOkk#5; z3`g%7*=2Wn#)bX6NpxRRj{P)EYR$k_u@^C0R{Zo|w&A^h_@uT%$(~G{Hq&dCqwKcBb`|zT^`>?b)mFDrE)YX(5{#)b+W+<;|XE8Gcy! zH`8{Ud!IoBH)`4!hG;4GGCchZmOv-QrkE}NBukBfjYUe<}#olOd*14nC_LFO+^FNU&toQMGkMyN0 zO(~+SFrC_o8Z{56eC;rfl*D&utI_s;i0@3JY^*2K*q|pS>z$|NWG!b@D@S>j#Xh;* zgVwqzl_{*|S3-0~Cyh9I zqaEU^x^5s!j(g%sppV*sL&hg1h<_++w)(daZx1hZnV zvneu8dSZ6!sAeY_d&wC+(*^=0?zk-pKX8mzx-V}jthZBY>|Qz{Ip`cL-Ky=CsQ&7t zoYpz61oNmDRQufH4zei|j&?c2_A{P&h^vkWV;BW=oXJU&2y}B9t)W{Hw}N&xI2XH7 zFHy{O`3d&w7|e`dwl6xJ>9XzeZcKk}O~O>yJ-#ly^r3i_9wOhLvgPtZ_}C0MbgrqiZEwpm6kLH$0HDWqyCqtaSZ z`|%Rgxr?`LR~u|cDy0kk=k08EhdevLpp&kp?=#RA%qxCBXMQXHyb1MjJ0xrWu>}wXSjcO6H!e3j5HyG`3O~kM^|f4Ud(H zNv^9pR8@f9SJHMgzw8-pgZXu?gvoArMy3A{DatkFX?25Yhn>&yWLb5!T{XmuTfp7w zVcLiwb5zHP`q+i4xl+ffu7T0Ev}EchzylYCl--6J72gFstXyC-D=e>(DWsu89NpDv zoMaTgvk?Y5*rR1M{k5WCV*}hu9_fUw3XPyL5M+!XsM*{+T!s98?>)AbXF^fA2?3KiW{kstM946AG#cm(w7gi52 z9eQ$lF5Ro_iNCjmmG6(@+nl9Pw|;>Z3@Myx)O)^c_tL-s8r@<)?e$_1Z_T+nRR$AM zS9w7t$@a?=bwy0u$YQy;(#u?-L>8~%@ zj>G~{hi`6Uc=hzcDP5j%8Lty&l~kexm2H>zS_~Ctk&UWilCa36EIdCOlW(CDxqtvF zWnpR#)1B#dR!3MCO!=Nr2($bNTlkL%$@$fI;AOLR);sQxb4~@jAS#N+R+3>X67gU5 ze+V6Q{Lnf|4>@%yenR!nc&*fo1M&*bjn%OG=7`aUAuUhLH4Bu!(gPh>b7RU$$TtNU z%9m1paY{`2gUpqTAM`|HJ6`G0>eYlhYD;~WjgDyCyFbj{I`L4f&;)r(tO!}V?0&tb z$&G-skhzl|zixX%)nqk+xUUi5X8wh&&D&0k4{%zTa0!>pwxX@y$_lT}ZqT7VmIbD1 zsF{*Q{HCKwQ#{f>Mj9CRxd<8DVji1R@k9K57}|$f23J<{$IWLDqb2VP&VE>Y;GS9k z(N~#X!&NBH1g#0l#~fBNc0+3bfsm#i-9{TOf;MoJ)W*dv)abW9DDUA!y3M_lc@{>zEu*RAQ+Zg3;>2;AiZ#@M!khxdp zo{Hm+B{p1cN_fWp4stwCEN^%OA5Ow`_W?-1mmyY&eq zoc)xdt$Bx{0LcwmUcL=%@P>nnuXG}AJJ8)WSO4%y{yK2d?)S>~yrB)>9sIR%>VK~M z_B4*&@PGgQ*javl#gkK#(0RMlhPyRx^Sb*VH9a!_;=_jzgN{E0e(o^%p+4X%S?(ED z-I?qq<}-QFtxRZyzvo6G^MwP~&p7$ZIiW}nuXj=CFCW}~F~eIxg5>?mx80N9KeV{N z^F-_U)2FX9z>pcQ*Y*V1lbYH6gBhQfOy%U4LT>qOe$*V?{)soVs%r7Wn>*~3!dL}1 z^-ImOX6i#AQo?+{wsDA~fo*@*UioEDej{&ccWBlT@Zb79X}v2g1st{?;Tx^AgYy7% zhesYj!|P0O*TLHDLX>M&s}~8cf7ARex#j8Vzz2Ws}%` zghw>wj`_LlPa*%UYT4!g+ozE~a9y-@>&s!ZS>Ny@6h4a4*RJv#1rTJ~uvp}1n62)lt8)4kO z4}Nl|kC%hsAD4P0k*L#5r1&3`BTCl}D(tZan%NomE8zBnMvS!FG2j-UPCJFn8RE2w zpxb1w3bU&{qOsq1r5JelomD+OVgi%?xjLTIn(keYr3x1eWr~iHznp*X{Ts<{_>Y`* zZw+LKXPC4K9}Go}EznkUP(I&=VkX9RR>H#nsGS1fCE)jVG|*k{Cau9 zH7a_!qWBJaCPsd!uERuGdwceUlav$QaBc`5hq%<5#Y zmXs@`7G9YHp)fVdTsAETjl0{yc~cTJl9bPDHBOITv~;;}N!}t>GfC-rB`WWux1@w- zh59gdg6ZKV_Tl=q({eCXIqoe7B&Yz$_rvP-ZC1u_?`=>T&DraF)(&iWcP(q=xhcaK zw`gA1u-iITvn@u|SE4u)RV8E7l6aas>u>Ta{Z;jxqfXx2R_5_dCf97E@AT#58Y!bd z75=IjfUEMI>4h8LBircGA)vwWBDSyoa4 zS)X}OnE@3=&INMrY=M!!hLgCB4DaW;@GWz3*#BK!b^2Hn`g3EX9 zCKeawi|BS$L?)Zk9ZCK(R~%tw4Fk&(lM#9|m&-kc5aQ(s z?fDS&5Ye*n#kwUHJ%BNTp1Km9aWPM|hpi*^GX8UofOY@$L`fP1`!}S>0>z4LgjeWW zRgNJk!79_vTCRA(&4m^bNX?T#W6o~lX9nF*j3!vsyA1uX&Ru$45a?uJ(O?68`0W)< zjjkjdHvkmVeXnou$(bAu>IRahQFB=OXD@*NGVej@KtM+-@Yx){Hv|9rYf!$NDWHE@ ze<4h?!O%+z`#q#u268}`oBIP)VhH?wNcYD;as{SvH;FN|wxriZtIg*^ukW3*4g1_& zTQx17&1oC=?>fCz>%pvsQIrZKKFg}hAM1ST5`LOge+(tzzkZT>sF~<%Svm8ZI%b5< zQVwC9??GGSbr~#+moHHAs9fE|^t3#nBi}^fHmhyx(qObQRc4NS^3WSo24Z~KdSthIz7Nlruo5$tHrbce%PT2zafnmc!>kH^2S?^ zpL$hp5fJuGt`Xz4c9aN`J97Noh25kxmhX%btFEV7LEIhlnB2bZo3wx9yR1=rjKfg< zDm^GiQ&A7p;e+2dEUuwo+FT0*5n=pk@UbQnqlM7sJ*l-fiZ{QXcCZ=@n)C7*c>+jA z8c^=e>!pIe4LbbOkCya=#7v=_pNM4^t5?l^iFK_MNL0Mz1e$;9O+mDo%!v%G+}E2G zcCiU@)i=a9WE)*($^w~DvX`tYYB1%h;$t5 z5VKva-d3^0FNWkl0kUHd|1U=qaZr)_@g}h;S@5q`jgAiG56tY&VPA|&MDK#!x}z4l zo3tlQgQmm-$`^2A5POK1(N?*@KI#!M2%;=WGVn0>S!IA6j zcUzzfx3z-Lk6~Au!YpjR=bo|A%c-89?fv9B92OkZ9T;3T8FE#&GHgF^>+;VZ+&heUplj=aO7i54MU-0ULu_s1QEJeJUH33@S1oG2mS}A2=?3 z@u<7?!n%%UU*%$)WP4j5DSUpd!H4;;_)gqkTwQZ-rL!GyC=jr>NZqp`m_0Mp#bQ1* zg;}QMcJl+b<~ygpRY&N}n6kMpeb73e*T{lzKc#? z0VvRpK1J;vrs}Ww%Q3Z^kSEUfJCD4Ml$KOuyBBELmpTlM%?cP`nOf&Gu=N=5!rD5v z8swoZsTsb?wv#PMT$u8_T?poW+{)sq91K(mda~y(Bv#X%NMg*w5vJ%``hDceO0)7T zNyYEJuXu;uOb;g@7{0RrtY4~>GHGo@9oMXjWbFE$Qj}Xu0rsLAJ@-mh4AOF5u^@P3 zxUK41>O5t{TY3F)ULz;bgEfQdu_iXHj|{4+#^0hP8}6EaxjBgKy+~cXV*Qjrgf2ci zZYMhlqHi9)-ZRK$cX`H11>IOIqwRpgrAwFSHQ)XcS=NE(i;%~C);(L*7+dCNdmpzy z)!pW8r(0oB*3HiAnboc>w{jI7o~yNdCzfq)Uy`MFKb9q46Xu@Y8c=s=O#0H zQ(Xk^N@HWvo_9Oj7+$@N?#{X}a1vUp+5qT^N4oTC;%ds5B^{Z)pL8iW7ki*hC06?r zSM}&t-P-@oD=w@bPWJ_|?u6;d* zw#T`pmo>nY6{!Az>teXERd;mw936--fT@@8H~&6P^1h@r}em2 zhdEsHxPib$=W>VSw)e(Xc{Fg!Qd1kB2Ut!0m+OA&T71fp(c{)YiDMhE5`?N)Vy_vh z#zXpwZ^7y*)dgcmwao1LgHw|?hzZw0N1`6p+05ga z6WtY8h;Ew0SJwzSGm|?$^_)px@n{>bOm|LhjvCy5@JutwjYn#<9zfC;T?Jx838wSO zftS+AQ0TJB?tF#o%{VpzKOUF=rTap2xZ|{IoElCuQy=B-`=23I-`wuzS$|pieEqC& zIq~5hko7Ae-g^43Ep003gL)7n(n*Fr>n(*emJvGtwd!wgNd+T7WNH*6sRV5S?E~j-lTN3@M zOK#b8PeU7!!}X|bd*ze~7KF7zC$_<>-+l7m#kRmN2c<^IT6I=RV|8geA}=KXf~_FK=NP97jflpj1qtNOG(_wFj{V3zJzB5p#oF0dXsr}$kY>^^JM zd4X%1$zI}ZBCR}iv?hj&1J2r8Xv(qoz>$0UF6Ge9Oxa;x8bG(_QKQnH-AI6Ukn8GN zejX|ad1(;Pm!Tb2q@H7>se&&T3ttS7rUw`H53+RZ2lnb@ z;p-2yo5H;Z)h;uAf$YI*uWba zq7!FdzC*ua%LOau0oy|PG@`v_PX7BFwRAL5wdq^?Vv1(KKKk>Le{@$(-q+AR_s5@i zR0(#&TudwBo&zj4og@TD&&SAqbjFFpT>x%b-@`h&%rA#;b9l1VS;tS#0=S0JVpp~z z`QPb~-Ft)l(-R;k^QaG-98<9F=OvMw z7OC@od7?wHhKS%1pGnoTC8*~M-u+YFVJ(I*X8WL6YeXDuO{{V^dWN6Rp zWzTC!yP%UC)Zu08v>y)#z!UYs_v6yCW+4ePDD|?^%!wTDtEzODX1QKEFIlqCL4abQ z^eNB(A{{h*sA%8BwgCL@liZQR-xU31(#=7G`n&bWMLCv46FV_~8GvW84_wdPHWWW- zbW#43^}pQc-{?sH8v<_({CvGPtfi$P5$@*paIN+|Xu~Orl-{iO+9<;E7_rDk;?Ge2c8g2e+_0>6&YFH_XNUqk7V3tf!JbQU{jT zmpPm>V?+7O8iSbQpL;2r+@Cg2ErE~LOG``~TUAk3L>n+k*tC{DE1U@pIjGSkaLH@7 z(jr%-eto%exkz3zDj~`Jg^Q-9$IJ8-GQw)Y6hW@#GcuA=LC*KKhZtq2rOT-}FB!JY z;8El->*g=a{VtO!*+z3q=9g3SgfQ6BgnJ;acvU~eG}?{u)xucR7xJ+97HKLReJ5t| zz&k>iEf+nWgMP}j-*rtXNG00qNPYF=R4d{vfsp0FBb>4B&2)T$0ni@yrEcYD)LsKc z#-7|_k@>sx+ERRK*Y-YPYIl@*a*rii&9$@Q@|>jGZD~$zRIh0~;;(gvb|msqaQ$X} z9ewF&+C0mr12UCVO<&;Z$b66Ara=B}#96r=_KQCKKzuJUj#@UEw-N2dJHDutArS%ATxj06n#l zV#2wI6Ki$^6=@msfX)^#1H=2RkX`u?8cwsXmGK;ZJ&(Cp6$-nLMk+b?DDqU_|9n=_ z%cNlZ(_;eFDJ@3p6)L5wvT88_uk6lq#Zd3+Z4l`Q0{Bln#SyoNZC9B_O)|Qh0KZP^ zZ4;nsZHgqSAN^_6&hgPK;rH7`I~$2~PKK_6eo~rI{ElO7H~q=PvJ%<9PLn1qvpbv- z`qMq)dQu<5;)cXcGMwUZMC$)L(#~bq?7^Rm@5$(t&S=vHoix)R4g`kz}wiipJN@6-@*=7BBWao z?n@X9X8*p@`ONo4)1=ih8u!T4!dUF#y%`;9TwZ__OGVf*(&bzWN9Ts*@TeJQ)-Qnm zD%Dj|lg2sVH2HiwGEDu$>C7 z8iZd^l_t#H3o)rOz#p2+z=ps*Wd$ZRv)t2ru|n;~mv;wH zgb$y(d#9)1t7eE^{`Hc^h>mI;!L4n=eKYr)Ie>6BMMl)OV7qf8G|^kzSF|;V`}gci zUmDe??7BVYJ4fAu>>AERXD8|9)?QojJIJxc(~M%65oe}$Z^WemC5Fy4-1cx_=??x`WRkoQp%uJYb)AW|G9dQXK%Bx}Ff#;}37MiDrN)C_Hb( z#Q?#fKJLFqfz+)sjmLWqg-OTEFPJ&ENBs{KNkB6@I(^DjXpL;xYYhZwy4^7Fz5EiLBQ*ugs=`P73 zsgFKNBCg8oM29YFC{L`-Dv%}ks&=i7-@ke6$kF$^ztN!IIF4-94f4`zF9#k^I=nfb*s z3*SAHEUJdnmCd-$v|P}rX~LPgC959puzt?H#jUB#Xc<4%uBPhZ$Ty$`T^`oBc-Ta! z@_m)%;`|xX7;Zv9X`%OrAYguca}Z+M z+4$@cfd%Bjb||nVl+Bs59N5a-+eC*5bEX$iNfRcwQ=Q8e8^@h7&eatUo*BoM8B>%M z)+F}3I@JOgH{EV99T!5qP7r*LazEn*fAs1oyGvpj{rrYql7&6xE^w#PExk*+p(CWu zB+>5||CCM9Qzl*{n*qPYh}c>tz;T;UHC|4t@!voLqu{_r<0z)aKh< zWfzku>hq-u2dRvK&5}>3LY}iUaH~wrjZ1cPHT}huonx5GvT1F?C<7zY#Z0(4bUp|w zE`77K3Q5U3q!}9ILL3`Yup-Ma+TwHo4CYx8@BNFryFlyjqVBX)ho0G=YVL=9k(TF8 zHrRi)*i)$PgB)qeSxhB2wNZQ2Vv=Jx}l zDjP?(?Tohg=-YnJ+1D1aYCa6CF+36*`mPyz9f$_N`}&mmC&9z9@~C=S1<}wTy^zT{ zPuArI$Rge^f)|iRdK~n`H=>}hth(_SY2z83O7xypw`Lml^~iS3`5GBx11WaNVu;&6 zRWFzo?dN@RPWo*k8+jzY&a%vfzyU^B5Kp1& z1eEKD;R6U^(@fv?3+exG_uf%WZC|@EMMObGR760E$D=5Ss5I%=K&AIiR1lB=QR$%s zYxt@28z(n9s^Htb1tKhD-BEMm@UNq(7|2{id?&bl_oO7;O8pY*9xwe<>!I%mQ z$godbrKl*CxOD5Uf9>2WJY;;P#82uN!o0{XZ1iUu26-OiH?;-0r#6x-_DY&&oKjJQ5pR~uptMo_JA zBS32#=6bf>fU-qM$(LNPqVt=n{0$uI#2KZU$5W?Bvk9vbiFXciT8K^?4=hz3R0GI* z4@lY_v#MyYQ8*N@ab>vYs6Nu{k^TkVx0~-|345j?Ggt#sPG(Mmn#yMS(qZ7h`+P=m z^5pU29PTOBJKrjG%6D1uGz*R{Z=1RKG;^Ixl*c{SzGB&i(7Mn+kNtjBw@98`^kkp@ zbC!o3dROK~tZEbwQ&Jhd1r}MHXeDMW{vh1`lCRlH^Fe4fQ-qqak|$47(c_3!3nesgR!j*KRN)b^55h6;=J?oI}yguf6{Lo=xB7V0Ghc1 zh=bDTsYzOI>ODDNrFNLddY8|;i#wS!tY49@AFeSevbY4 zOe*Kmqeq*%tsHk>D?ON0t3W69@;t4eZ}-~IK5eou?U_cW{dt}=P2PV0-FOd!jzF1- zx0EQu8hyFE8q3aO?z08{3_VI-jW$T|=MTQ~{UylQP`T6H+qbp8r-`0>PKToS%CRpo zd|cQ%m*ov%Ph{K3e(-GPhLmc-ezSpYSJ!t|Xd&=VHblTzi45d<3LTQhNQJ$|aO`3< zh8F{%MS3SL9$2Lv$X~y(i`sw$cjn8o(MQl=4`N#q@E6U8qo-Cvj+MOF|GQgBrEw`L z#vk$mTjf`y(n=e$uuqS=QX$sLy0bmM-n_D#;QNvL8ZBQcgQxk}FLag$=k19F(&IaL z;(mwzZdHyVcwj=wjsDwJ>=ev&HFP|rI-#(zOVT@g+!KR7Upekb~ zXK_Sd%Kb?dpuZPyfAkr-jL?W!kk90bVM})#x3lxOC5_-iO?ZxO-7mC=E;bOwJ7mY0 z_Lfh`Fm2uwX#bjkXCea4vQ-!}L<>j-LaDX6*1z2+^bdj1NIFu0_cGkR`Kd(aiR{w7 zZc8~bHS|Qb2>Z@evEJ!&3lSG+41A;_a`n%{bt8UP1rh549aEaVBMHM3wC2}zeyt$B z6NCP4;pZ`WzbZiUGC2#tSgUOwwlz9py}>^?CnBGB;{SwK2vjx52!hFch8~8!sU}SF zkK4jWYF3XXQk;Jr3No)Ut_%@O2gN#>CX!v4?BnKlf%`Dtt_jFq34q4&PF|%5#9np{ z5!zD&;Z)lr#KAou*Z2wViQkNlqx#~#{Bj@r2YtGNH^ed>X34<0a2#@S(zuW7DRM>? zKTe*smpxax*w=JPBKt^kV-9q3)+$ypS60#VlhX_r+~&3lVE(z^tr&gd%GX<|Xy1h? z9$%XaJ~-T0ZwF_Y5En$_tH>58%7=yvLy7AC8qEWkixA}-(U`6Uhq(;k&qv$FR$tN> zo-@QaSYOy$9+Oga`SX;qB?xYT`we#(fs6D#(eg)sntq?bdB2s%Fu)aU0jUWfEI*5` zDZNLZ0Cf31t!COOuD=Zg=JJ4j1np4$5@fHxAy5i>lz}W`=ze_=U3TYK+Vm9O*AL?D z?Pm=GIifAq65Dw;Q?q-rIchJOqiRd!!DhPB<3YL>&n0`qzx~h34p-or4cvS7O>)Sh zWzS67NOv7i5~U?62>!4=WW=NYu+OLco-kRsiR1>^DrB;v(wot3*sxD_**D*eNDebm`+7afHwxX4)vyQMJH~ z|Eg|?grb_8q3T;%xzhbl5W#2zT+cWF;D@tH>p6Z5G1GzI>Zu=M>;C8Ik?f+m4K5c_ zi?|JH?K2ODS-~d)2lUWMLkgQsrZJ$5R3IyPaGN8}1i!lQa7(?9&Sl?0gm{Cv&Wk&^ z@W~=5=P`lf?`_o4@&lkNz9^&68Jg{U0CE}CAMC|jXj=MLI@$00X#gvaB;R|!CaGo~ zN)I>PVmz9J@M++d$0Ac49m0xF6Pxa|sFkjnR}`9ilk#m?!!HpA$JU$vY7xIb>=<;( zd_={3v{k+HXd+BMyUOCywPod0)e3oQd+EeVMaU=M(QLD^4W%!Q2_)RdcUm8k>_GT? zV8>IfZ+D+94Yqyhzq zVcBm-=_*J~iGzdPL!N@8!^7V9?#;#Dl}6^k5Xhrc0h+J7R7>c zghO_&FXa*fjB@`1?2Vh{mcb)ZT_DnX9Q2 zUhtMkCRaO_%3UKE5{of;lf?z|W58j56h0HJ@MDqB?tx4OAUQ~R{(zo|y0|83#mCD& z=}petpXw&@d5y@?9DQ+3s%+W9vZh(Z&)GO#De-lsM|U)*XQQ|hzX&QMhts18$vA8u)?eoxYet z6TLq`<$CpN*rk@=#32L^zov)t^pu?1e8tf+Tgk4-j1|KLqO6~mAF(Ym6r)C(We`a|@pDDUy4gNPFo4UND}28k})dtuyg%X^+FHnE2O zm&07HY-UA7EqMTf_I7sEC>@bMExTvb(7xv0NE9=3B&UD6VuZggl1uOT~_E+T@ydAV8pQt3A7`3A$#ZiapReb%@%Q1M^;0^QWa^IaS6jqO34Id_L)`zTFkE0 zNlp0|t%qTLaXS9(X|lWk!}#P)r-NPbsLHob$W`1`S1rDsJy|pP){jNV_={)T-oBXa z8R703c{O4FMO=f^oDyQ3S*&<5Z9EzQ_FU42$nd_ToJqu>aQNJtgY!2 zso3+QiVD#I52Mu*{+@}-3gn}3Bf<+RneMzlcJk_WJ0WH*t$(hSnNHmqHq{_KGFARq z{M4=W(=R}@N^&t0JV>$%uD~FZLSVDs%Wt94M#;csc|z~WQ-t>F@mh2O=+(7<#HhBFh`VECT#xt3b5|ZxB|rz;J3KoypT`d-j{jnWuUH>wteTUQo`m^8bE2+)*1bL zCU5v%IeQp3r7c_%=q5DP)-OK(_@P z%oq4)25u5uHZ{*77E7L2EI?SzM7Fe_$*;&FOf9j?VvY#A5AEygx25 zv3GSs2&uxS@UfogVi%#xO9bK#!6LvwlR$DKMI9F+9@Zf?3FRjCIsN0s@8D=T0CfW`Z=-94_Q7HmO;0%X`a)Xzmf0q9RPR7o=MU2Ya#9Tr~)(!A&k*IkH6uc#itxZ57s*0*1BWPZhrS#D%5Lo9ORL@ z)#bH8n#`cSi#XmDdJh|1J5#u*zP`n{43p_# z{+@G5a7WK2KBj_AC%68-@SD#h#|pqE#VD^{jE7a%mA0X$N`Q>;T_$u(h@7i0Tl`>D z8xrXi4E)dVyD#@{RZZgVP12EotF-Mcm$;}H@jjE&Cg4w_jCSd#f;~ZHS5!ARC3q%?*i=NJi4LMz%#iGd87q+CGpUwR z=)S#}mh1^^-We;#>0}#Fy*ZlVdgh>=Nw;AJ<*SiEJBUpv6A28WG~P(S3jiMcksoy7 zHz$K`gix``t+N*l?wVB0z7Of*U6{>yF!P!scS6a=sg$>e3NXI`HRBZ~n{?4!y8HJ~ zagIFJR$6D*bFo0USf{ElLwDZIPtiV_$O)+U4S^o13V$>0VR;K~_Ii=A8pqDneV4$` z)0do5+tm@j{Iw>aqlw!rD_)6!mc_Ql`@m;sqm{-0JjXVKx^KO+@^hWCbG7!&{(sZp zG9vY!`ybwC1qwFbI>JGM#!pUtau?1!>6@v-iQBQ#g!mp>oMPY+Ipm)*OrkVUlS z`S@D`0!+_x({Yoxd=&&r{qN8eAU&w3R!A)M|4Ligxxt_RKAAOE)=gvo{{45i=Wjjy z8;A-dRFJ`3pvVFCvUmaT;QXp}TWPJx_Q_G-Ua4T~?~Zl_`kCmpy!`kSfLy^3 z3rFYyuCJgZ_3F`Y-e@E>51{kIJtvQPa4*Q#%k3OozgjBr>hAd^@K-V8j)W-D`B5`B zXik1MP6qHaFYkqY_+#fKfD&l%TAVFE9N$^_DY!`6N*ZsK^cPE}8iEnH^&SpokqlDrML8^wx_#a~T#Q)SAdYQ&Z0`&r63NV$}xSAQVHM!hBg< z2VJ$oe1?*#of^E`0w~@6M-nLsE2~d;PE(>55N*+(!N!T54cocJf9vesH9uBrrmFP* zv?=8>q%E_-t}EBm{f)RgS(RFyx!FXfLUQp@C3pEn`!h!&o+*5@y?-aD45O%pRe|bT zn5||zYB9$snt6qhsl3DxR2G2j^x8cvk_=V#fpwlmiCbn{%wmW{NzTV626DXaKfC^%TGzoyfr18+6lhP?u7rE1El`%zqdgWjrabZ=#8<1R74G^w~&{2NSQJy&aeOz4pGfpZon(n1uccjI0jwZ8edhE zSUfZ&^dedfK}fkc)1w7zA50A2`X26e(Hy+o*{Bf_{h9~HZlh_F<@s+Up@ii<7$HMt z{p8cb?#0tx@Nxu?TYPjxPU$49BQ~4)A@&FlS88f%yQO?|Mu|s*lN#|!t^%a^M$KZ+ zZH|@~b92R>MVm`^Kxt1(r?+W~SXzuJ76Yjn{Uos!GPCWG0cykGC7P=G!Nkqsl9775 zeoq6OZ0coeid;31nV5kyWw4YQYL(@$wggX2o$3R+Bnn=_H+}UH%fT6Tm$sLg$tBi zb@PKTQ>-UUgLithLU80d)8iup5p>@4mvm?b?_2~>0&jkl6x!|_p8VkKJFvXg_TLtVDYv z_+R?(nIZ85fDkn1nouhMBzwIay|Ng?w!SX-6ldIf+ex{5^J@R}J5v{+L{~gHyWwNz zsrl!*FLH{jBO@0DVEn|EX8YqSI1@ZNRN5=q&Rs*|px zue`3B@~CUoyx4>!m)YVhHP`KPf{TTvu?jfvsX(5XIehDY&K1|3kBodj{OZFE2fXP5AJMoqB}Td zqVZepnZKo{68BLZGQx$yy)&NG@}!#MiKPp}2u~MFQmWJGThWK6&!>hFYY8^x0vm!g z70Tvwl%Z_$KPVy5P9B>U?&x5M7BeS!7IBqcTa%UP!H&yE3;&t^iHGo_mRTw{1It{D zzH;1tvAla|bgFqa)A7qUNdef63D9MYyKJs=E~et*a!JI&=cCpYKd5ciG(TLbSoi>A z)AWWE|Do1bHE6199=Z9oX7QWdpp8Zp!P(m>8k95I#qS@-UiwTL>a|W-o+?{;xt~SC z$%52Z%gV>I393Z%?b3|h7)hOb#?%?TI2ZXh+jP{83QbS9@&#Vu-CFo&Y>J%yf&UVe zjY;o9`)z}?$n@e9);5&`_le&=`YbXsL+Ig%^@(SHOfeoNC)_%?Nu; zceg?LVRF-4bj6muVtjOk&Nz0yt1{Ko2PcdqVHwtfASYloh8+QK8LQ$Z7h>%jDU!Kr z&gPUF#I&-MdPui0)%2zy8*DiHA4YBr=W&|Fa8!sxEtxLG#!IUpZRq z&1058&DOiG8tm_X3@8`P(XJmGF4DYr$~>v`&ivebNv_8SFt} zDx#-(z?Cp@{6MJ`^%Cf+wg8Fy)Ija3Rw`vy)XSfi)iWCD;6(VbR_E$hgS32p5*yL& zR30?d<410avf_LPvz7UZAXFwyC6ot)nq>h}gyJE6(4-B<42RGYRn!Hy(BO{%o6Q16 zS7l%j6)xT?Ud0Cu?2KXP)XstN`z{Qy%#B5P;oqXb`~4xLWyG+CFs)E8QS}7g$3NiS zTo?x@$H%u=n}xN-{)Uc30X$S^nBLp9E13SEwtNwQhbC<|enS7C@z_f`1g&vV#f`4zm5cR4(A?r)gv ziVaQ)=V53;n0WSe;{RM=6VL6LPFTd~xH+fehGVIfp0;c5OyL(kN&v*QBF$5jc=fBu znj;LI!AOeDR-9Q!Tn8``-cA%7> z#@UoGdVf7~W^H`@d4Zf&9Jd@L<;(Nu#gqAd?JZ6>y86W>Jf1vDIt_?-o%D`ZON)m$ zuv{i?V`Y>H;$UUCn$M8uNtw8;Te57AZm*g+UMRg+hWc#itI)Zp6-Kto>77?htvN84&XZf= zF6GH{2`AcpLv@B>U8b^iP?}kxwmcx~~*b^pfU+A<{x!L7PN?AOE-Z z86F@BpTK$MUoAF%?#BL?L(B43>HeXP!BD&lOx7XRKILn~@W%-nYY&uEsDCVQVEpJy( zDO@lZQ49w2bg8^46}oV>>E$9c`Y_4js3RnwkPaG>Jt?+t8|t0(Hs;ghi_NM|Huf#2-H&+FXy9wRdcjm>2>9oL}Q0@wy6w%ixfyJDa3fEit zZ_Wu!Hw*RZ>nXiG)a8jo5%}&3{P`(#J>IF0Yv10ku&A#Dck?JQv`4$H0^8Q8e2oOQ z%9e7zoQpyt8kx4#nHk&#{DAm7&SuZ-K7=m^Z4wTWVv1F}&snWq2xnWu2252NJMe@+ zv=U&PnfEu7qOasV$3%4TGpwD@=-7>ULyrU1`$#*9?Kd6Vn+%Wkci<$iPsIg=x->>V z#N&M<)`v0M_X@PthgIhwi}zuw=r$~JeYVcSRH917@}R9zJ-m3fvL?fw9+K_zt=j4a zsfg_Rs(s*7sVwu{U)Gp;bn7MJ64T%@o`ieFQTI0$DgB)lQjCkZ_WrDT<-XwqwSzf{ z518;}mm8i3Wtym4M&fz}Sxcc-xo4GMZ^QNBrjXU{EG5|SJgK@t|BGh%d{OipBX0UV zgXYh&6U#BS+jkr;6n1TYYV*fuXb;Pa|1q#0&2b#3g$f**HiD$En&+naq@nsDYISW< z!rE~7m%9#PLSutDFur}AD2Zkxh*UyZB7*DGmgW7oC5@SmdSR5-bUE0oMTbZcB}lm~ z{ahoXHIw`572z2~4=P_?4px%lTw?8M(*7r?h%=O8So2ZVLZD;3+px}Haz*h;d0p#U z#2=IjzC@$7 z0uUu`*x)-tR567XOy0evQFRWj2hcI}P*FCwaFJLD-=g&psUbY{!zW8+rqvJ7Wg+N` zF;=4cu=|HsvSLb*p-~m6#eB}{+siB=)u{o>p@ZEYM8&Wv=|<4uTPst#8P9wW#--A( zcAl!}?Ww?8@)^s8GlBf)j;-U#ElBGQvU{KY>BBe}>GikXLnU;i*YR#h&tVoB2fa-x zkvH6uGpH+`XJ?{9z)Y!skB)szEqRNPDElo#Nx#QT2~370wkY=vR9Qq{>jEkhD;N6<#CF{7U&b~~$qa5R9c*Op zVlZJ$FU9DZd}mQ~0OwL$*xg+9(Zpxf7Tix^B)M|LU~1&NVzP8)FcMKGWT#i1p56aV zF+&Nrr1Cezj(0-u59R^j@>J@+{Q5C>uKES;2a_)o^4UFD=^H_s4res=*>2||y$y9m zEv?>!_Vd4dB0q5e8)-5gF^@F?!#8#L`QOmq{8K6)EN4Yb`uFrwD4qepc>%$}5l4=` zKgJn@KYHm8hVlzz5ool@H6VEf#9BYwiI@D?$uP_aQcxno6+A)D{b+cNq258eao&;tpWTxRTZibdr-@i8uWn6) ze$S_;%X+S1E92O+x%%|{FjS|=)gOn9hh%)(d>rI`v_)O2R8v^C9xhKGh_nEEJj0Q! zNE1_Es%)u0`^~0*phEg2=-%i!<9INk@KzmBywmqA7;MJ3M%=?8JeG{P0e?76TB}}b z-Qb8W951t$$KEP#+4G6uLbRba#JaHe;nkg2A;3mOP;dCR8T{7a6M%uUhbB6Kv@Je0 zit8=l;;p&U`pGp7g1B^beY4PW=jVT^k{(|J+hB}H2ldz^;$o*ky=ovIj%KessK;VN z0pa?4^V2S#0|w07-s@SlQIW3ddVKHaj>k+B=rs64M!ZQRF%wMkOSxh>r%fvP7P^?B zZxB+FPUg(uJe$*7{@ON(tK&N79}mRsMkIL^5Vtr_TTV=#1zU+Mk!ZHnnu}&xBjnfo zKlXcJVWxaP-22((>Ja78(d*-29B9eW`weqA@;HvPA0_(yuvn0F zGG1)>ZWnQ=LvgUz;7#JYuf!;b0pHc+{5Nio8oypj&ecm(V~^V(($&ez9*sBS#9Ta@ z2&P6n>U#Cu4pJD%2QwZQwUkr5AKuP$D1l2-3P%#caTmokhmD7R|e z^Wo3-NRE_~A6n2E)G*USBhdP&r+(22Wnq)t^mxQ}kr@XANk7B(nukXPw6arh?QYR* zghZ_hRb@>kRP%NhS#U9k?oTGf7cvE#Q1k2YEql;~6+dv;^k5RnP=eAzZ&P^Q|$~3Kx zEa69MU4w|$rIvbC$<144NgvB(ZV=I{W2qP4jPhJq7_y23@}E^`_2-U&3glAdA1c`H z^IP&?`-4e`wF7Y#T3Bsf-Iy6mhGX}x5FwX*=D){rOS{FurLwZLNioJ6Y>2`Q_^oyS z>pdwJ9+kt;Ff1s1zrp=zZO(E{3fZp0{n<-Q309d?FPJ$Rxf(RP#3@0d$QW}hw%HhS zBuWL;&HZS{<3DKgjt<@EDR!49la6F7dTGF%S$k^@BwXend|Kp^)+H;u`!pBw>gU_i zLp?k-AUX;8t3SSdOFfh8Y^a@lo$Qeu?NiAv#^DesM$TBbGe~)X!EUcku&EZiqtAQ# zj@a|8jb9PzbJ3~WlGLVD!t}Zj#Ks@dHFGZ-MjlvK*M&=f16|ngmNxgsuZqez7mb(k zq?s?88QfPpNb-v3Gg-A3o$Ho-%@r-hxGE@mOK9FMYjW{wPLK+Eu8`Rz z-IEkxP@2Wk!TBGZ*PmLV5Q$SvR;e-1S}$&%9&8+#<{9^As0-vja9~l(P;|&t)OfTs z)v;qkVrxN>;X@HQ(ITcwBS`3YH;$raAx(~278Y;pdy~z8lJ1y&Zi64gzR^q8yHDEm zGK9__Reng61b;_fdKG^!j$e5!Wl1(Orsn97x439&wI*4r3T!tn)cx6Z{ExM!(^Rj_ z-#kdW&W%!*{Gui#(zObUo(%&|cIdZmXGyuc)?*`CW8~FnMs67w)ZXHpGlkDk9#J2O zQb8q(L80YbNi}$GZ-f*6K#rA!jg2okFu#40Uqio5hdz4Mi}T>7W!%{;*>q9o|%g%MW?W8NWP({TV^hz1R0kFyGsu*ywRv>1Euu0L@zlrOGpUy z4rXrE^rra5JL(GepYZznpAkl1Z|~?8B?~GO9;*>cqN(nkOFx6-d|Oz7D~MR>(tqhC zt_}{r@yF%&m#XCDoFOEZbrr~kfeh`{l9`NN35ie8yo%tcWR%TC=nQgLKj(D~23A}4 z(!}$e-e-=0*}ozr^s=q)Ll23CDp-GZ{b2-(i`WzSCG}mJk9D?A$;_qPfvz>C))`2^ z3x!B+*iU%>z<^cgg5n&9zfn#NKf~)Gqra}pd`D*!(eKQO ziG*Qz-lI^CHH!V`LkCg0ALI9Byv@QbFsxmEvfs}7#pLNgmTk>fJw4bKS5wO<%cl<$ zfQpWnMl=tI{LLw8FG*%DyvCL0vrYjy_TZg?MI>03v>`BKU35 zEhHEl4WF+((@RuYgQivo2|>H_S7&8itM$(#7FW%GMB<187jM;P*1a}n2?@pwaaLs7 zEnhdOntC6>dfGTc7spa5-5Wiy5bxQ zlSSnfPi)4eZ26O!@E;5kGdRMZr%e$Xu|!01t(`FBZ-}bJf+NauuE*zZHLB-w?F>tX zarissCIu5t7Sb9=m{Zl;iVd zo~AQ@wj;E)l~^S^dic?aPwadMSc9@izo&`y57)tmd&l^p+U9KH2MZ+n6=j2?Q%E1b zad$}=X%^=^f=R*F&5U z=PN#;_x9otmxNYJHz5znF?>!j^;G+M;{CTBBkCtdZOX#ND{1;$L8f076}CaCf60pW z*?#aMY5a(uq(BY(?HYEuQv98{G%hfBa?d;%yQP1s15(dQOvPgk5X7*Ei7y=Hfq})D zqnuw8a3z-Y4T46e+}Nbt^o5OYTPVLKyJG*!yB+uDwL#Fkm*I*D*o6w?g-T6`4)(HB zRwkYmQlUu^_ChqexqpzRD#g`thaE8&5T!OnYDq(n{GL)g!_)=Fz&ZHlAun@FLRF(9 zJFldFg6}oF>qLp2-uV7WBD!EE4@jO7n-ntQsP5#ZG>Abr5e#&+5Sfb#blsP+*u`RK zva_?Rv=A03u9Ld8{S0gTtwXWaO`&!vJuQ9=Yab1hI958vH{qUkZ9J$68?UDiYp6<4 z%6jk4bJglomEi2x0@0n7Un{{vhITMpd;JD5Rz2woh?@t6kLyw@kyp1|W39o&wYbT2 zqjNja-`Hay1-y&5jSlau{3Lxvd)|Elp*x%22r2_`8#aHl(Dqt|ip!)6)>ZuM(f{X~ z-vO!#6F8YG9R%2yY0^T+X$11Cp!5vxFMH(pZtK8Yysq^63yYIOV9WRY2 z*U~c$Re5LYGws=Pb!^t?UqQ-gASG@S-S-`SlN)kt%QOHlymO0-^EIQ^_o^*`6e?@H;N2re{(9aHA*>1URkhS)A*q|O+EB^^vbjXj;jL1p}*pn)6^8D{jy)n}g5TPgbZS3*Rw@r(fm(PCw z@f%B1f+oIs?`-i!yncq!HPwd=y~#v}x>Lit$SghwZIY;tsG3=}6MHAs6}@=#Z`+=HETGrY<>D6(_hxYDNvXO%@CV)nhL3CL&6*vyWdp z6NVS7$&!1lY{`l13J$xk49U=l`a|X@Ukb4Q;&bH}GtJK`iIwtoe>%vFskR0?Q_uiu zaQFEl6eo33LCoH9WD6qG6?zK1Pp${!XLe8b4eA`01HYDC+Evb@AlT*S;K{Xsv|@Zt zUxhSbO|vs2yGq76UF0t4R9nPH6CN0b3x9_(H4FxQ_*lEGV6v(#>H#ksOeu*gOOL0- zBdK1V_hZ2};JQ&Z9+eC2AtH4;pRKGoV_lOqr^9VABBnQW<7aFUib#ev;N$kfTO|xd zw6z55a%Oe(vhE&V=}wBog(Z}x;fJKOuoP zq$Wo`Q7SDbGG;k}D^?0&l!u#t7$?E7Hu;dXBgVi%G5#Y+3%pW?ZMu8(H5xgc(_;%X zO@GA^@MuMhFfdsDe<2hE=o95}ueEojSBSne37>Ot0$+1)yRTkW;_0F!-qFZ7nJ85l z%SX0e#s|!^y-Zg{t+-YKfE5FRIq|M(Y~wV{ATC7w*`~Gch^zSQ9B~V zr9fyX23;Ve1Uu%BZHb$kyPPajUGAR_U50OOoiPDU^vLw_EWd2K*}nSQ!(vZUw}Y0p z-`mJ$KImlVU0IK<{-j_z)lyfdu4*xplcfA|dsSJ2+XJJ?&j22 z{s_C}c-?h6RKMhO<3p3YxlgYM4oPIU(ezK0wU8DG1a^NAs=n3BUldQmsS`--s2dg)}7DlUOsgQQUHm(?*C$GxB1 zl-%EBp24pst{a$Isl`TnUAna?Ctn08KD}uUhsDHUdnYw50`&SYMs34S%y>fLu{LZ) zS_C3Twj+R^ME|4n3e+9EFODLVLwqWY7NFssr6I+gQVncOuecOXoEZ9;V^LOWQBVz2 zA5L6`t`8#iTzW*hhLZG%xx6{_gFL4)Lp^_EgP{yShbF-8+|d?Dv23ZX-F%bbZMaDM*%0 z+h}n33$Kr#zwhqpK3z@Zm=B>s;mMN@-MZOOyW7}%F1%tYo@le)F2hQs89M?w>idMX zcwH;~neZ{pQ4wgm$=5HvC#?m%Tc0g7=hKlMo>o${&zyaKJ}ChnzsCYq^EUXo5~4}jf4Xk8M%=QB@I^^Cz=(2 zN$LOBz;9n9)$_$4wMOhbIn9Fq%>4_UqKdTC<-KILjN=OmDJs#1SXp`QuWbu^JNuW; zMV(qxI4C@7E55jwQlL{f9bWW%a({GP~%L_ zL=;dPznA1)p~Xj`f>br|m!);s@9kOS9)Qs&RD0?3D#m5sPPT(*chTFKql$)tCI~sY z_f0&n>fxK2?7BvAca%%WOVW6Xogc=a`gKi2)xa>ku{VSCI*RJKJiB{Lr%>ojP^fni zFrCmnJ-No_Ak||g|6JIZ092*sZ8be7%N*tKfb*B{n3TV|o>y{h zf|e-ur?(;HM%U*2>8&})f6cMY_M{-;WyTUHEHWR1wl$dQEG!0A7YSRp`$;JXQ z0w&%%EFB5uCsP0d{QlmBB>iF?;*)!_NKr%qwW?^n|H_q}KyhpqoFjA6i+=u{m7hrw zZEZr@uZLmnY}1|7a^#V~Ad><@QET%;Ny~v+#onX}K3i@pXaU0Yq=f3$jXEm)8gFOb zJ>obrFYjbbIZ>JODgMqA$qe2h6#YbsbwA(17?d$T7jW;; z_n3%w%1m&WCL6~Z{vFr7Qgf;eY4T~)??VsvTD=@qyfVpCV_n1XREpmnUN*IUVYA+TC#u3}6>~ji}w%r_#`Xyq;JSz`3SAhvZtc$)Pm86s|bI1=X(+ zeWy6;&=?S;nRk7W&v7x+pdhp=@Gxm12Oyd3TIVB4!wh^hBfcib9sLJvQwB{S6yKiVIZr>XrE}HyM z>kdc^yq_dXcP`i8fr>xuzE71rK1Tx)!PM4OClETVQuff%Y|0gCtKmB5s@K2NN;G-8 zn(AA~G_CldT)Hjv6y|P5BqP)Gjoh2U6UmQO@asL%Qt+?njfrL-suF~?9@u#A|8PB40+o-fgoG4C<6dv^8J3&;;t&x61_@d?#GHYT?8U zo>#+3?*a`DetZ?I9CG{f!fq+bFYl@;R!L1QFey1XLD#d_o{+?8Cl9awoH&L z`}RGq#pKg6m~`|iKuT~;X>|j?bQNgZ;dfwPAH&+6eC@ks?pGL3FR?HLi;NjpL>DVj zkp+!^Aq&*TM8WPtS^IzLd(eJx=W5dnB{Q?wKTegzmE|B*<`e>`mP6#N!)Ci(S;qIH>L$vcxSH~9oSi+N$5`xeCSRp zr;)mIeN%O_w4M9k-%D_W=(uyQh6CU!P3GcpTC1X~U?KqAU*^#9=f4zg+MQZi0A)3K zkn3|k6=Yi9%#+#K;QtS>Q!rm5qykrD?I@q>6NF5^^W!^L->KYBw`#-V00;I5gwC7x z#Cvq8pv60u--P0PkFs8c`}zzO{8#wyNDbeuW7b}so)Sl3@3`ccT7cx7b&Fi`BrME7 zCWh>yiL&QX6nj4T-pRW6L#G=6DD;8}yZ?S)$13cXhk>S?4nL;OWyG?rRdRUZKWC}K zOI0l8yQ)k|VxJC|`h=CHRZ5~TQl<^))mT>R*DDCD1tN% zDEVqBl%WlNZjNcE8*$P!m=z0v8yxpk)5YD#_~oZcB@>3V{N??`2b~(!3ynpfne9A|Sr{L5wD?eDGWAE2#3cE6#bNej?bXtm zrA?}_IV~lXVMGrp&em3bZi3NEtO;3=czEmK+Ks84X+>4nM{|F%vY?J+CpBzP6GAr6 z13;^UXFld`N)XY(WJ}J|S)})emI^0DImdEC0DGv_p;7!%-8K2%stVV*>QCjeA#>9& z_hE#16VtBSW$F&EM3?+{P0b?5rG1$Icml6oIEH8V*^jA(jOj9WdHnd5Z0Qqr3&oGZ zkF1mAu^;2P4KG+z;s?|P6lrffDVIm7}amSG#YI2qFu!t=0mSt8V zA&GauX|mv=N`2c?Eo8Wc9Kb<~UYPdZL+<&0X3-piYLUY`RuJt2Fwx;MAT4 z0keXTqy?&>9YgMT%&~$8EeKMI7XH4)%l77dZe-ddszd)rz=o;citA~cbC+sDdJoK; z(PVQQJZ3pM>sJ+m?wN?J?lK?E=}U$M(xJGPM2(x!1JBDhSZjiQ{!+JGl+72?V7EE4MdYvaIiR^t+H&(5EeG=L8LMo2u z`amvrEQ;NKDn>>c3)L$0hB_YXsk(b-i_qiPI_9Gx%?A9i(F>!=PeO{iP{l20Oig}Z zee$;>Y6j{?6u#~os=4ffI&vheC(%oL&i}aZdV;pu^&~z!?oR~IVFz4Hm=x$-rV~?6 zlV~iJpJ_7n)seQc^Fn^U{VD$eb%gJc-CMZfC{@O&`);cRFPlA?4NDA9AvOl+C5 zsHty@Jado?N+rN0OVn*%PPfg@qDzpL#onb4auq%l-@&chW`QuNkRS%L_sq6@v86cg zUFz+S&_3#@Zz}bC^+v^BBxGF3ARLcU#^c*9txZ3JZXLy?A^L>u(TRnb9CaAJxb)$~ z*|gYhFt3D%FXd3OHP=?8^?}8#fLp9armTS6ynfndwhLn<;#2NfAck%l5nDM#PYg_7 z-W~tq3;I#UBbawp(62uIhld4QTErPJ;p;_RsL;;VVw>Fp;9ThyA9)!lv^JczzFDa; zJ(CcDq*7%zGr%nIrVm#oHXjG9c))Ez-YQFTP{7*OCq~s8O8$Og6}_N|#NX0K>uAu}_O7kI;eX^@82|4OF(sstaW(Rue_PDps+LS&ORrdGRx z2}EOWAic*{mc^qmO>F?sdAa=$pff-PbXqd+!Cb`wprc`)s~A*UMWe^qYkjpVVH)$t z|6{O51|%@Jw2S7&nV(foIKjCwtI%*3t>J=OSprqr%_!;H$_#5NpeVrSW(PHuy_9 zd-*8M1-U2;jL^f7h@jsyLKYlUZ08fZ0vneCl_&#R9+kDV45nW|jGA-UL-6%UaoiG! zn0PyVU8+pa**FyuS|5sdr&Ln;dc=b#O0}SD{z7HfwdFetUTN1?=SegYgb8+3c&C{T zCH-;phA)+T_`^?3#(gp1NZ7k97r8Ew=U?dWt3coOB~RDyD<)aJ0>P-;*1YC|3~vMo zkDNYBe~XQ$BdOM8=V!LYq=HD=B(;=W-eQ|;Qq_^&)=!QD(fd&_(eS?nevI0f$b8+l z|M%J}Pw4x}GEKK7dMtbloA|bv(+0zbm!^4Us!g3v!fYo~Su*C7*>4831PntlQd@ac zexv~|B}a_b#`qMloIqo1`c*8H_1y>YRAeXaAkZWmlQw$xRr;Gg(>(`4|L&d&DNo^A z(F7C&$&?_`Gp?LYnS}ns--E>{EDrW$n!!9H-qN8eD`QGft5So9xyY^ir$Q#}I{`&T zaO>K0htK*JTlxwB+g1b;Ilpg&moFPi?Q{x}aD0P@-oevDqR@FdqbL@+>LuY3>SF!|-C}Jzxm= z)$HZqBQiJ$v&*IS$`}0-Dxj~gbaR{4j_L7Lr6zbY#;wG5R(>6t_`Wys9TAuDn8_Rw z6M8Fp`T7*dIJruS(NT=v%)%4zgreG9yO+C-2l%;jPMJTaGS5n^=bUg+EyPB2OLj*^ z|2^xuHeb&~?fj7gA3^}hT7K5Bi0D)3wL`p_Ch&xJbbGdq_^2g_41Hv5FtLx69+fX{ z<_e$WL2jsrkF_lk8WE2AfY^<}WU0Xn(kXz|#(mihsSHdQQL;kYfl|hP27A@4E!$6g7gm;6w6;J6 z+pA@Zj4wVVlP9-8s@1YbjYc>qqY@_~WpyNJD>MV}vt9Qv{|9t>Yy8)?yU~lJ+%rbs zW}a1#l&H**#h-652i~20o4=fScsk)iQ~cuRuBGcBrK4dMPy~-SxMWD5F!yoY6e3Z# zYK?IgWn+Ve@F^FQvZ~u}-JApNN%*XrY++MCBiz3-3Xh)NP2$thXNNks@r*;A?P`@U4RB1YLV_EaiKh3rd| zeW~ojm^S+|vc@pTK7+vw!;G2vz2@FyeD1y7KE99N=a2I^)9XFn^LjmB&waJr9&3!; z9@{ynDzBgmi~$gD(!+P-mT#U!1f<@Y*Sq`#L|AuiY5_99Qd7yiTf<1{_$~Byp3V8E z2deB`+pELp-*t=s_^^$@(<{*ZD08Z_CYuY22F1#4mVJs~HrcU1hc+4TEI8QQzQNzW zo$<8j@Q5}uw2#}F?;JhjH@hx@k-g?20F2Ti&)2i83^4p!E&dyX?|}`2=lc8~NtME6 z@W44uLS#$6J|>nbdd{KQkA%}Z=y?LdXN(wHg(Qd}q3ia<%rUnAE5zGe`yVw>!JOQz z*1DhCP<;bNYHA@ZbCH~k*Y0$ncUYQTcJ$gBG6*-Yfux;pl{f1yC$}%IC2)t%?q85J ztV_Jl6zHQRtHMwapmYTRr`^AIV};=p`PBi&?}S!Xp1q`@fms@mej23|-5~d)R(#j0 z$-FS4nFyRr;;EAU3@6h^GrGr|B(t?10m3iFz+OC)li59ipb6X)CM&S9rxA7fE~iiR z@7h@T?Jk9{0T<`gaOi)8d(>9w1qUit8VsFy6Ze1h=`KAQQuJQ-)*gDQ*7@66jH6j= z({*Wqt!h8il4h7skd0RfikCP0--f-E}gRWZy!Mrg2CqsGSx|i#kT!wS* z*9J_W6^)-S;@R3$l1HFd2uAG>+$48*;OPr4*}k=0 znvf6<@B%QCkcIXPSINdg@EsrBoPhO701)ahbNJ;=3i{or5Ls|>;9bvX?p-BLpziB? z#^#*EPi+9JAiX17OUvQ*U=A&1ozA@RlQ8oj(z15>fo@8ae$%J*JcGmf9KAv&WDQ=z zgg8+&`V@+$=%2kJ#1)-Xo%FrpF%_L(o7g82U0?@Ae5*T#Nt!{G-auUyQ1P~)2HEZs z<4<>&$j`Hi5F7A4sckRn);-!4$Gcxpr0QzjE^@Ap$2ny`J7u>0OJ#bup3DA>TZRU6 zGeZ@hMxcQ{@jcAZY6Adjd+7CH%nn(M%l;KkZ?4@n6fOg9kyP9o##lg^$DOUi)nvEv z#csK|vjOMX*`EZ1$r4jh5~h!p5yf$dy(bF4efz5r9i$0TjtT9bULW+(YuoLHbMZNXL$SUh?;o(e+2{pO zVEe}5d_E3s&lkBs-tAXe?Mj`Z7!$HI7j6wTkyQ(NS59-N zLb!GQY467OHPgFi=lVaLCz9v6YLty8q1gf5M&5b*N(A?&SFl%6j{zyBYVX5{?>Nxg zy3M|FwB905|3_>!e4S$U@R3bVaJY{F9_BckuXN4$K92Nr+<}>db9V1rSi};v)3!s4 zf<^r8@RLJ0)0sHG>g7e*!k&EZG6!Vm(z{W6Rk7ZkU!}-Mru;vsDQM{nc*Q*_eNR01 z!uIXW^mC26iLccUBv~tZyBgF%W0@!^tTxyKVDk!kJ`9KY_*<~~ww#ME*Gah96cQ1abTNcl?j=b9oLNgqDv4oUeWX`ZmoLej-I~;U zfu|RL9Q*69y!No+mSLooD&yqdjcJJaoOauqJb3i>yfH*j*UPR^For@+2OHB{w{WxYj5={t!yAe}cpbFP17hL@ zLKbl`1f8H4RmRYHtHgm;>q|)x*r1x<3zEjW^n`}V2ecHpQQCTqu2Y#WUd-SxgkWVw zmWJM7&PrtDE`pR@u^L20cH2Do20CXKqGcB{$t_iI9txYXfv!L}3x#X@U%zxkevg0> zt4#(zsv2wT4Zw=U4_hf)vfY{PL1`l67ba>Q-^Qt>|p0usu>-hY9QTknGkRggNB6=xDJdf=AP%8LAm#&qbxHY*&UGnY~ z;X+VIYuyfsSaIn>cbw*0TZwL)m>P?nq zo1RwMvAZKpr=(qRn-$RKO7;st=yAFHox>aW&&=9MHx~;H2@HnyJM;&AH!cc;#wxn% zUuA4A(zWM9f1wx%2Kw7G1xlvN9^K;2q=1_&F^c;j0R9dxlFZ`DcoS%n&II>N<|`s>;hxjk1HrxYHAuAs^9j}t^2Wa z{Aoy1cCj1;G_)vcMZVp9Q#<%sv>)<$EQ9(spZ)67iCy%(0citGCI2IJDyNT(w6X~D z=3BJ@<+e4uabV+8LI>%6ofPL=x0ZFY(;;aiZGbXI*4(X};D$eSilyr42ydqjx^SKc zOgL9yo@C@hCYu4^;!_2-))!^9AMS}RWEhp_=*ceOpA#6R6pRb} zB@!{@cNN_QP@xyHwsB(SDLP}Wac*d4to&9Z3Ec-w_UuOWOb$97zWM6F7$L8=hhEKa z-6Ly@`23OHB*BM6Hprd+cXK?~f;N8mlOH{e0N4M4(LMIJUI{hfOU#!6wN)P!k+(0a zTOz-$oFtqsI=esbM0HfInu-*>uPAgG6UV=Di}uh%PTb6zf-%-cdJw$T0fy?{yLs}! z2F`kLgOxpZxlb~%_wSIixATnDw+O~Pb17wLX%Cq$7$Ww z@6^R&-ak^;4=T~m%#Gx+9RLvSOyF0y=EWLKn9e$Nn z6vt_2=i)8`ykU7Fu&uk$vLB(8{aKD40 zG*4nB{;?oL-$W5<)0?(K7t{FeN{^(idDuY{qqy{si65upgNA}GDqfH}fvCuyFEq5g z5SfPyvQ1ryjj}G&YI&Y>n3YVZ*lrNG0PE^XtmjpP@ca2jiDlyJMGR>kpvHa^Y() zPly};Xh6l6z*MZNt&()@j6vryOx*5{>bZkCme{El9_~FiU2;O~=CcY=o!g&ie2X9if=s1i zP5yu46JsZk7ccUUearD;R+aG2hz?LvE}%}XdpGEsF+-%>?!l7EbjfNJg9Nf$Zg2A0 zV!x=tOiPXQw3cQ1HKP+giZHG@n|{qPr|}H-^(z*=xpwZa7rjh@Lw_(I4AQ&MBdgNZ zQg9q%dLIP9A?115WUT3{>ZGKVWUfBAYMWWaQk~o)q;RhKRDRzM-V^BoG!9VaIw_#^ zF0{)mA8v4%iqLp>59M9W1dn;^JYSR>jJ6rtwb=0I`L$G@{jPD+D^YN(Jezme*vU3c zA{XU@I@hqtrLPLCs7E@B_-wauLj9Mvg99YV{EVQdDL1`b=mZ-WaFXHnXfy#URn5rp z%>(@IlUgswTRAn=kM<(V#YRoFA#-nlZ)obFi&gI1AdJ8bQ2(14tk{sZ2Z5tOn^0ee zs3)5$WySl1YWKato7oqL5$thAmW=zJ4tnB+LI=y`B~ETeqj&|BYuUc|EWu#Fnv1008r zSTPXq?p3-r@mW@4i+sR_Ct7+v~gzAb5)6sd3;=%vxnZAzrTf+ zoyTV<_(eJ zagxG7ePP@4Qi4JMRrFHvxRHeY8&Z#fd%xU%qhx6E&JMj{e`{pB?J+w&uChFH!*?93 zF8G8~Zi@U)Zu#ohg9}@~Fa;W0*I=Hf6n#-aThhGbfg~DwT}*?tl#;%kYx5r$>=83v zEvMWYsR6)BAz9T#3AQn<4KH#h>7-S+* zo03@`tBJ`*A71gnXSEWf?R@!dIo|q=a-7&)GC3(^_mD@e-dp6inhzQ*s_`v8I=f=tg z>M}0oUmmFK-v!)($S=8GDq})XOIQf?z5oPuWIa^cRZewaD(r8zG?|NIn zd-$MHjyi2U4J+qIl#ZMmFdlM^X_<|w!O^D~2oSEBGo~2;6A6HSRPwI%@r5xF8F&I_?45|K*V@kc@qb`!CMgl9B$!y#Ba}JahdI!pAdgFF|*_MEm1X z)YR265GOC9XuKLrl7l??T&=bbGC_7q5jTpQ3Sc|%UvOs*n`|l_DFTtE#*1Mh|Xi2Y?70N+`d-zjcvGmcDq^hzg^V0KJ zuFH9D6Ed)j>kkwgq6Wpmn~C!h;tb9GpXF>EvYTDrev2tD+E}WT)zaFMmD;%gP{^ZM z`iz2kZeakpZ4Q~o87?jcM1Bdc#vH&!pJ_@<>Vr4!z_c64Z%85FpaWMB#09>vaRjJm6%P3uUM2fDjqk4)=GJFfJA#2h**kW0mm6&6ck_YwQf}}cXEgTz{qYx4)B*x( zGBPsu;TL5!2M{g3JhD72K}Yn{?Nxbay`BK*Ha*g3OlSCi-!>h($E8JI(4o6DZ=lG4 zZV4T_y=mE7l%XoXeTVZj6eWOkDMC>B;TQ&`vC@m!nFda3GVI=8UNi#^GW2M$h~_hP zdN+X38vkxaoAmJl@TTPDshNLaOaB!WLy&ajOO9cu(8G&qZaXI!q{2)WI3Z^u47X_I zKHIr!A3|00Dp-pD*|Lwlsqv5YvrcqMs&K0QvWvH)%O4Db!d z@r(UxRK74t;|7X0R-+VEYDvHtK?(?$2#2`8@_~JPp0gy#U5m7Qe2U*MCcT zmRJCL7lDn#!o=-Tm4cH&~kn1ihyguHGI=|PP-S?%?%T0Ei!~4Bp0X-TjM$`rtLkn6 zRWqYAxbKaje0lznyK;;CYG)Qd3$0tk;v@iMVeT( zP##pIi@TT|Sb4W~ciXU^J`ZI2&34faVgm9m!3(NjiBOPO>7_G)cfAvUSE8$V=qFG~kc!rCy&WV)3R}PT2;}A~n1uJM!|8vN*!x0oHAP%;)$bU@MQ zt7Ug@v`u14Py((>jc!8zxcHhOVaMrG;n4jlM$49ext;&GJ2g}GWY-Y%qY)_*LEdJc zfvuhGYX!AoY($f#=R$lhT3PK9$9|7@dgz+VQSrJsa>fm6d5jCd)%N!)XnX>&Dq#+p zkM!(bi=Cvk-!FsuwTlIIBjC`!yzvzILN3(vOSxmmcuuDWnZUb<_e&pnNPePU!s>QI zMsm;JZkn{qTf78|!yZMOcbq3_(AZ#VC=u)~moWy8TxRLplcW|1S(p zob4lcDdE`!>E_~gWgi2cq24_Ayb0q#w*urgLZ+E}&xPpyGfk3hr(0Q9kY26*chvfx zrOed#FCMC**XYipj7%B%e}Bi=C(r$orvCHR&agXN4xxq)?{}HSxIw4CXuziKlhCm` zD45xW`nEem)6QJjF=abCgD}kr**_fgglO&Em0NrJCAUQ7@(tWM^5Mcg1lfwYF7IJv zFK3$zrpjHyFjAG$k~5LC+!%8dx}K2*D>F&ylEN$V(GxbNlcEWGV$dMQVm^KY162Zc@-TZjU8TJlUYawb<#sRhfZoR1tP8yr>N zg^5CuP5^;4vt%RZlr=f0l>S7XpdTi4v{Rtr;V0lt7pnBXJh1^UHC?)Qdq!YnZDZy4 zcpnl)mq)?ApB3Pzo!{BwhAhoaKU+`5}OB z{5hAJHF$Hv9s(Y6btse?;h3~M-F+1LQA?K7)c_?s=v`hRFz@D8USIlrs6j7p=rSV5 z@IwcpEvffjJ6jeYZ%iM9DaYBof69#R|d5&UB#69g#f$ zw=!39KJoH6D8wWsznZGrKpwRbfKFWN$_{KBE58Qt##1%1(7{9L6&xw>Ktz2p>dlRZ zyKdf_a&n&&+3(gt`uftvs_I$M%|A!m7e$ElBt5u)OEZI-3g{)=JzrWkW1$Rg{wt_D zl&HCLbvVl&i^5-^Yn+uQViy z8QvzwV2~U8-B3FJ^+A9bH$Q)VeEgp$P8c-Z$qrDEk3KxY#p0)~ZgXICbd;*U=YJsW z|GVij!rpN#O(FMzNVV);4twsM$3t0oU8?`jtU#}y*>N%VVwm7KhlD0OR`tXc(<>#l zN7Qgqis}N4OCAA?kSiIUVYo#RaRHEx?6+^}OyGirJ|$gd*jN9I;f->REN;Ae1MI)L zaN&@UVjwihmN9GVI)DKn=5{VKz86Ywq3^NsRC9|K*KWEx$ITPQLeE}hf(X82NZo(N zI^{ze7%RWuTrIsa+SOnZFAs2sS}xMf&=6Ul(-~L4xY!nrF&VnmctIOY0DD9#L}k>qvdHg>5d48Y9w zb|}Mii>R`Vjw&HBkr!Bw#<*W)d1ngjxmB+C%ZE2kS_N$FKuMJTYX+1zkNK-l$rAM5 zHw8HAk6G%l&%%ROoEGmEj=0!W*F5qn$X`0DF7Q)FBrEsc&BHcbMdcdWc7={8F7b2c zEDBv?dZNQD{Tuo}W5JA1EYEA#me4VUL%jJ)lL(nQ(5?LZ(V@5)96K*b;sduQ;lf*XI z2O}G|AO6}rrn9FIA||!>uG`!>FENBJe6uvsHU?Lf6_T+GcA6wSTYk_9nvjL5B*Qes zeW)`(>J@BzH72wvD`$Dra*{y*vB#a)p_5O~q@sp!LVOT8a#>x$hfsy?+>!Q_h=#nB z%LHn-i4)@a%#S7`cdpr5JJg$U@@%g6xA*N(-%d@5exEDSi|j6jWigMqZWj(rL}cfQ zXV(H6IpLSeDAZ@Ir6ct41=Ee&*tqBaLbh-J0CJa_AhiG1c!BpWDZ?ug`Ej6v_LV74 z;pcrEB>sgj?RC#kz!`0~uX8KMtxq9bUW;ehglF0;3_PmMRvEN=^Khve%cB!ddH%A* zxaR@FHHO_NG{DbBKOorQ*+G*Nxp#;wra_+bxA0)dQHdQ4mckD2^CGreS6sfJnmnn3 zga(Ajo2#yqA0Cu*hO5A;N?>vY1vG7ocV@vBSxTZJkh|-7*X~n&II%kM=-PePE>Ps- z`Iy=#8gEjjWPB~3S4UmM-XxqL{_!OyT*@S8QQ1%Id?Z!ba#{QlHZELJcR8oxSW$Np zLZUA}4CShiJveXaq+Cl0j27gt{xpOtu)M{4!dNZ8wN^-4N@;L7J9mD*|A%*ag}=Mq z#l|@KVody}_1ea01+OE$etLm#5$e0fwM?vZ0}iXD@nDn~KFme|x`Ec9@^RPJ=;)KU zM;23H4f|zzDzUNTarC2_ z@WgfkN5!FzuW{ubLlbl6C~ta2c#n1n+VjUXstHcJb4ehaFt#nQyHfLnjujl`Wsx?o zK5}NadsJ|c?6CLOiZq({RnMW`vNx2RpknIJFN-ayh|Y&_<r{GM@1} zD}ERz*B$CR8n{jg9IR@k0%#|s^gY+>l9oJQ{*@*K<~x3;O2`#>Qatx`IDfN8e!saK zFynkwR1_Pb2&+OIx#n6zrY_fT&a<(jCWd=mXw9EehWKaBbbTM*PPW%^UT+yD>A3m` z^<-Cg893w3*zS^mZqcRq3BMs8LjOZ`{x3NEH&andWO3%`sDR!s$k45G1yq9>NdJhP zPVK#TM%za8Abec#j3^o2yx*Z zUl!VT_U(YGobKNg8q1f!l?q(del-PQF~kP?>}$-{6C&>$sSW=&2z0Ym5+b(yGKh3Hw&z>pOMw)M(m+RUWa zS@}WgH#*Pln3gp{h>P;5c6tuF+FqQ#W_$6basS!GB62NAW13$aF~bwSW=KFs^UCKh zJs~vf757Rx757Dt?FW@$T+i{Fhh(NJS=Y*+s&7!yuXW%JbOE!h96-t6-K;YI=NKKH zn`t#JagV%!qPB=8DwHst`T#74y+>z90mlI#fD!1Nu(?mib>p;|s zo*Y?m@?Drb6*_YbaPfqdd%?|O@TC@+<(tM9LVK{uF7q*Vo%yj$ftp8EHWfqGk3b-* zp_N@2Ww#q=|21SfxzB<^bs$hEmG|*(cM+)q^y$F_vQb>F2e81Y_M-7Y!#U>?%iNCh77kaQd6 zXh>Y~t?U+5x0h`BJ+Ff24c~6{j%_oRu6eBJtk|QvFkvn6qh{E9PBo5+0vMQg9g1fN ztt~&g{;_VCzuEgBob+6jQm*c@m{_6ZoSGf$bcKM@8>LB`$1am>%>I9g9(8RP<(VA5 zu3|!RExf~ibs+Gx=0lEgGe11v?oP$Ojf`DfTzZh5jH#{wepf(lXkg;WT&daky$ z^@Wb%ev26;;6In#jKswsOup?UG@yKa|KSg<^1MX>!tnL3sZCs!`8y`Y$j#e)^s7%S zSLmST`LGBLm}Switm@)#F4rm%Xu@HKjLjSw60lb;oP3v&!b3*4$v2*06+uu{hd)lR_yvcZ{x0wrqO zta>_J`^t%_@@+HTqg`Xx9s$i?QCDN|E8$Ws1PO=PY% zrTsP~A)%(|PN_|A1h3>s??hFF)z@Zd9qUi62m1-hGASeWUJZ>2mgSTeip~Q5XNvMV z>lgF=_>*JhN-jGo|7>gF7Rf67zQf2&oMc>+^p*Pl`f~8HYmup3b2CsjBwB-Rq!VG3 zg^6TN=ufgLVfB;g!GT=cJl6wjH|&^hE+8TMT&N14^WutpD()%J%;W@Soa2jDSIj@o zrAr38X5>4z4k-^+l?H)aZLNo%n+)~F*N&4C%nR_N&EB6sV84yv_(lD+@2apoNFdVPD>|F3YG;dlVT^FAAVrczc-_=V zXZE>;{Rs7P@TT+zU_bxE!cmyes@$`1YwfRuik-7|yUyN*t@h>rIS(Ub%gtLO`{VYl z2uAdNh`y6|!}j@3*uv5@r*g3E|;Vv?_gvzY{KkUpDe_=XTf}7pc!OH?2vu7-a2uBt3#D&U%!pO*DFyB zIv+b!K36+%a~t8@9FTze)H3~J8^1LY41S9O(aYJQ#e7zZ*^X@*jL4{d$}5=@Y}fg- z1~cl!8UQk-(R~@MjACw907247zpLria>LIb8Odh~eBtq-l?h~V3xyi}i-RJueTB$kC%> z=MGC65oP`iRpV|ndjDQrR!K>TyZN2Q=AKmP&66%Coj5Oi_*!y6lnD>MYOAQR$7U3|H<_o~&i{Tv(P<>h^6B%}gL;>T`3 z+btZroO59>#6hV@ke!QzhbpFgm*cZU>%xFT8emESrj7K0<^cW*XKq|%!z z(uj{YCsb@g<``!X94__Z#=cxngUM2!6l zznTwdHhAa&hT#obw*$MR;r4Dedi?z{6X@}f6O!9^qyyY#8a%DJ>9MERkfhyab=)Fn ztEJ;24L7S$6%ai?cJC1p)$yF-!stF@q(ff7kSc|0Dk(Gvkr3 zOGXFF8r-o)Cb-PWeYiN#K>Dc?FQjO~D6I47V2!ei@xpU_fcc$~9JEcPtt;0h%+1yl zCK9$61@dBgICWfdG(Q|!>h`|e0uwX)>s!yYh5=V8FSq%ugljI8dovoc4Ru5#E*x9k&xi&Pfg}F zmf26Y%jc!Kw8u>q_Pz=X_r6ou3`2L%=aL_Z%BA#m7y6gYG_uQ@8C0C{SDAQeAdjs1 zn%L6|`}&gPJ?2iVR{#D52D|=XUIyD9*C4#^{lmGo#3SYPykZ6g*ysWWGZcihppCFTLV3DAk*5X&P|4PmI)l3@>1>5jq`; zKk6TKT>6}a(6Z~){ngYDcEtmE!Fo17YtKm88D7#kKKuyJl_X5LFn17B%YkVzisM`= zmJeNg@YX?I<;#PrDfAC32<3z6nHqOo*=O=a&%l-E+n&mXuJXakJ+Av8@ttG=fwXij z4Lwg+3EP}>h08HrGCXI81rHQ{S;OBNSXQ=qAN+a#0&IzF;huQM%=+zXUd<}xaN$sq z|1nA9A;UTXzQ@LsG)UhROw% zg9xIVWs7oe@9-2hS0PADH?rkXzU7l6^Y0#)?P%lJIeCc<>l{TpX<0lqSS`k@@z)J?+{$?n@_0=R;H?u5D%w@1$4H zoPXc6Sv~Vx#&BOtIFp%F@2C|o?5))oIJB$3eHtQZj=hhM^1YqmsvU=~$@*F~$1*(h zV8ZQYV5?(FO?s`Fk-cot#YsVkR=QtU zUNN`7kB$pc9Ac&6T~;FD#`Rt{s$GlS1-u78IE1;M!K*qcwDUE_^>GrF$V;Rif@yK@ z{Gc;EgJed_1nm`&=H=!)C(n%6YAw0VN43-DEDx(yRw9-ItTg5%Iop!eL+3u>*cVZf3%;6ffU= zm#^b7Sv&(HLrvL&{lR9Bffu%*Z-YC!FMZ(3D7h$V^`7+D2G;7d|OO zF(me!ZBU!73?c9R_6LOnT)@noB zdvG@`%%Z$+Cu1y|r4*ws=NHbrwX3~(ouZhM4xcL;M!3huRLm5ybDR#M==uWUr6pi$ zgFBBzT9oLd5pn7BJ>m|{bK3YbGARU}S2Q4^>iYRu(O~qd)7+2Z$jiF%MN8#KxV3yR zs%Ka*d&ji0Xk^Rl4oFyawe*SLQqn5loSf8^+cL}~AWKp{pn@#r-X*uhrLlLA4Wk)- zSVKpE`Y2>gZgbg^qf&S$D?Nv)yI=xv!AwEikBb!*1<{&iOG(4;c6A_tB4C?btqIrO zv7)DQY=!V#maTr6YF}uZSYfFbKQ-htfS(4x zzR>amFAQ~M)tY$@ln26s)(`RRuXuCE|Ky;j`L=p(UxC&s&biEwm(**<`V#a>wIIK5 znW{ZHd&!?GpH|UjQf9tar>ZvmX?#0z#9Q#S4v1hH%(`(hl!zWY*SG%lr z&>zh%B=vU6+9b{d-??)uZKhPkzk%82dwOE^SD${wWVnTx+1vM`HmD)*SHUj5yLxtV zbrqZWngEr@-5TmmtGOQMPFwcvkucFVD#E_0@mo4B>kirv3PI^DxM2c$&Z%AL+U#c0 zl3lmU8tY6o=petR8D=~^_>-VO`t#*aXSt2z5 zKuM1&8KW+{E{7)2C(zv-Iyx7eWMO&nLeB^zf%to~bVH zSM{=Q4+i5B+_5V;rZqW}oD?wc8ZoSR=S?lh0)DrPuk#bs#N?yLd0Fb?tfWtrZEpP2 zKrzE=kKR@c=gdUuLv?trRUG!^=cYk>Oy?AWI@u6zOzXEB=6R0wxOF?EJdW@UT>`>O z=e-SRCmcVCQ^;`wN;HKxWhgmNo47ib{Zo$G-0osq`!#ID^5fwVh%I7?*uxhGP#d`=VmH{_jq3vpHaT1orIhCyb%>spDF`bzL{P{X{otnjx z$RB5A8%ib<(jtEY1V0N!>sN8Epz!6o{>D-U z#!|knGn`0&(WV?!f>L6@YRONXIak~?E~`vxe+`v<(CzHqLHwV5^Ei`|4BP2}FQ{c& zSRKDZ8tpFewR=kH=ZLlzE?-)?K1w80DcgD~0OeN;bT_mofW5!b{s3T7v*l<%*^oM{ zPkQRsQ5rs&f-}A@Z+qB zgz3fWleMja*?;hq&3D}C{}cdpe`8@gbgS-Y{jvDxSViq?N}`7iOtdBH+TZL~G-xiG z-G(uJId>N_CB}2`mAg%Xx0Y{4m*kIjl44r~_sVuZCXV0pEMq2)}p!acCy7#Eul#w#jHSJE^>RIbRUIx;*%D)eUj+b(AP)@<24!#XI; zg{#hOFLQVZwp5_+sLMFVUo(V$YoXgL3GXZ+WH?mR*_oJ#x8!;HcIVT&jEH?CqK z6!@Hkgk{*aB$I?k^jJQ8`0(_}lb5glh&|-noTiz%pB4CykNwr%#v>>=AH5d8*;`Wh z9(|KY+mfy2sqOrO8C=H+6%F=dE38d<7EY(w_noxpl89ppJT<4_%E;jg06|c+{PWiP z8`SYzTG+W7`FDkt*DO&!mb&y)R=r|edr zfxJ!Z$LnIIZ}szXolo|^v73T?HyOg@I>MK7i`bU<{!a?W2oN4Lp0tm$LN<*IzV+gu zu~aMu+M3I{Tnl-&M=;|1WiFy=xM-}Q4Y4kcU34F<{m0P(*i<~|J`OiBEjG7~^Ucj~ zX{8;FcsLy<^m=9)vLCais8EGRhcZ(^Dn|%Jc!B}ybH@P;b}agx-V#~~zu*OESfVbs zoa_t~;L|RlDr!wKIWpTqc_XJsZC%LFi3 zE4SK+9^gdlTK|sz|Ajd2CeSBfmq)A-yZT%QpK%V<;woQ7N1Nno#}Ayz%fb4~8~49@ z0~?#T`ZPkXJBjFRw3zYGkiE^eRx542R9zs+r1!m6;&fb9lAO-~bf}}^<(WjLwQp=z zW!I++c$W6`OxR#Brd{kqK3aNy1p{}lh}27Y+H^%-N->Q67_1WPfg}~Zjy0>4EbzXj2C%2HHkX7`kIcW+xm8MnbX*PQ%WiYO}_x98+b9m{Bz?0r`_{)Q| zPGPwm+h(hub5c6prFDvgQ3Nzst0fPOPv!cM{fAfehq49{`Et+m{zy48S-Wp85jH4i zX&`-vv^t8$fI%6}R`9gw)}Vn8qt_3k_Ym~ZgXfQBjI~$FV|8|rdNz#fjT}t8RDnOj10D!VCgLntKw`f?)Nbd7qKT`hHSiUHCU2}!A`Ic%7wo2?-G=xth?1d ztHEU+bGDHI1;q-dbH_Pvp`O8--1F6QkBD9Wg5uYXM?YFltIUNL$$kz%N;_c`IB7SZ zlLtzDEO^;Z2&2wX8W+un{7tlmye&A89lL2Ot+&$pT3Zj;f?O}{4po)ISHt|zyx@F+ z8vC30#4^jCr`DK$RZX@3I4*kDF0)ixeB5@=58tx`ZE-SckgvEx2uU6A8aZ;<=jJ%6 zV*co*hC4hrAaVpxUrfs}&^$D3*9j>sxCf{^)ct-z|BoqBHcSdGm0*E;%Z4IE^@--Y zRNZ)bN=j&7W15+`yg+wo=CPi}g1~2fPEsJC(x)UAhRcoEjcLoTW+Vci z4SLU#t4>EPO|>D{C)R_qG%zu5s}~Oox_E-m7-E~9tO%)&8K(7~mMgN*7)G4>l!Ed+ zt{Et_s=MNMCS*7-$Pb4{Bz)9;$T8XHXoIQrJV5y{>=lL!q?z{#pjD1#sNeC$E>0U^ zyob#NKUGrfGrTvoOjQmeCF(Vt4|&V{GWlkU`)zwFoYcW|;rTGxUMbaY%|E2GAhm0pZG zG5|4Ugus8R7MvFbDsmty>GBCNQC%FZBo0=ZgMod>uDcP|712B2RV_L_gulO2eteYZ zj3LkPTZf>wSIv#G8>t3gK4yb{SET41{NqJHrmLz6U^xVGLtiyMm3y@#f*(72*bQ!v zocfx>p1<7M=m&eFCjvxXzOj)qWk!i|$}+H1iBox1Y2gU5UB*#-Fdj0XftUyrpK69`LmzS-P58|kbL z5*(}6(tQNazR5gxL+*QlXA_A6rJ##2eQ&b(eI)VFNn#zyTB_A|*t4OD2%iOtz>cZf z5|@pY|H6L%se&}tu>c{2azrew;SR2~XdcrSRWjDmHe2+G6vs3dE$=ftjxcljdA|@w zE<54Ul@59ye@uHGuREVIziRpBFxzP?#dY5eSRG0PeyR-K3!UcE)YR?$iG1{<7#i^H zcA%?58^SwSe)*Q!*A&HJA-zBuGdmq?&-z=p&4(YY$QtF(t+;QaaoKiRR&d^a?LsYO zA*5CflM#ZL@1jTOa9JZv@j~x(-}l6*1(RAh%Tv3E^|i**>zP#_$?g{j*+RvGLzgIu zy?3*fUE3OKvXNsEINx$(~ZL*hf^Fz$~06Wh^`xF5l&yfG9WHCYNg>+Gdg1 zEr#ulO|Ya<(h?5J+lNu{`dr=+ftN05&@csQ2{V#MEF>lNNajl;m-S5a=%j?krjd&G z7ohVey_>7{kjnBOrYI_Vc#bRBJgcwPBFDWje})ncg-)GK;}UuNw=Ze=|tVruUK_&PBOyG<} z$of11G||r1Y}6iW8WJ>Hx(p?RSwIQ(J_kE144Ow()&drOT=lNS^Vc2wYo_ZmdHPFM zY*$L3PqdU}<+Vq4?q}U?Gb%sT%<7*mO`nXc)MZIS;-OOK*O;H?=nwkKowFd#5hYyo;iY-8Qm=C@v!&2)P_~IV3RyhWDe~XC zZ$}A`qp;3WH*?+M?al_i^lD}${4x~pHqy*;LMY1ajWpw5pJ1BZJGYlf)G zW9>de+evQk5%>KP_a^mo(2yu{n4_d<4$EDVi+iH2+#9 zWQ(=QAENC*CyG{=o8jg&v;g5Do2F}0f0oZh8l!9cjLWR0eVeJ?VJY<=)TjY@##uRv ze0*us)PeIL(w%GDvC#T*NnT*u$Yp`is83B;7AVBBnRVujyVb11%D=8zi7E{@JeW2TjEagcgk%?ZX4fnA*|oXE>G+*9 zdgJ9kraGMTW>Ho?&*1~N&E4w~55phcT+7;)Smu>Yy>p2JC?yfbc<1pJ&$PC_DMXRs zVZ7o0N8EddHJNSub>3aE(m-b0lt z1PB&DX`zKs6O`U-2qYx=?woUobMBeT^*;CaJiotu-ETI_7jjz)J$^^*E56w2rquKmT>7zaNQi1hJTz>NK{Nr1`A7NyKW%vSm$irg?5DgxT}PV5^wm62XGI8F8A!p zrYg^r2uU&ZLjH3+FQCigTGG+O@y=cm`cV%p;z;e7g@noR&|@Agc54#R)o#=DT*ah= zf5{7N2Yw;d@tWyRnKMj#nAQG~t9WK~ZLG;nax=mSSGeNoQ^B!SYf#Y>H9SLcDhFOn zH^e+XU#PtKy~bQN`^Hj;6>L5}7`EL0{8ElvqHgpcPELhYsH4_)?C_!w4>N5edz^cD z^r2GIXJ7Q-$QrOZH%$yY76T>xiY^M zaZ&tiLVzLdtu*y`3XPPj+Jgv~IkaMfbe=4Apg5CYwGijr18akeN$FWWCqH`r$v;#Y zbYR8Cx^`B57i|HX{l)WnWeD38v@fGXg;V@InQOl}GW)sjw-4qmtu+;S0g>|a`#UGW`sUXXBx_f^sARxEk9^*68VjR8W33D4}AS(^=Gp` zC8Gls16dP5b6O)6OYM?%-tgX&-5z!Be`;Z0DHMcHdu}Bbsr9}I9$X{Unqi8rtD}!j zQU-rPIVs9t1V|OpY&wRvA6ApPHC*RzzY&OUD-G`@x z^Upb{-XI66$t+c9`+N)hL62iJD%|;5j9ZJK7eGSD^soCjVPJ{b|frHv6qvt@+<&6_oBU9i2Y|4NyP?B5Ov?I`+o1)bnpvI?YD zp(jLuZA+_JU-0-{Shut>%4j><5OkNG`F9Z6%Dp|ZJ%fVW$`Ec=toLHxE3>ZiK_eP8i&<2QWpp>* zcZ}}9mvrYdD%Ac~4Q|`@k8Xk`b~t);h`j;-I?Eqm$nJLFoKk!^Kz-^@&2t0_HiF^+ z$!R3gcf_Un#rB^m7Sk`{-X>ZnZjbD^M~5$gzkh6<_t-NP{OdVu#6azL#ep~I_?N6d zc4)^Z8a-1HO1>&tN6(G*)v;!5WsDqenyh#Z zgvwxe3Ewt5|ErOWq$VTLbH^3#DNzzQyn9XRGIjcOSap%vZu!2tR5#K+>`C>ZG86w< zUHz8O%&xN1RI=FVGcy3hs_r-UR2hAd=!y5O30uUm29kZl+k?YhrTR+#U8w6%8?Ena ztrFp$;=ksjY(|ViAPP>o3MSRJAIJA}sM@d&C3}?Y;sN2xdZ`x=83*@&pB&75L6MdW z>7ADCR-!gqm%CptjSK$<04vr2Rtc=a#EtF3WVcLGV%0T%G2_MRVtiMLhVk`_{?4D% z*y753G!f)u^9mWC$L^OG>Kgl~W4+LwjefMr;4nETn(PG{SKQ7@5@~GhxVD=1fgS6v zP858+D%Q+?1~5TL{I%QLT&NgG=gXI2X+H}PmChXWTZ*QG6G-(xJpG+C zUcJy9eX1|1*Kg9TaM>3^PdTi>!DCk@+k6#t5|?qUph?#aoY?0*0$G|z*S<2oIrb}5(H4}uovc^DkEd@|@z-2F=G`aZn#esf+QX-3~Vc)9Ix@X|hJzHxcOVs6Zn+6vFRqpqU z7*LZh*?{mUWr|hanVgKb{pUtnc2Sn5Th_qQf|VPHm|=3NO&Sk!{Dz{E`xjPgOh~ZD zga~yr1UlNO>kAXFsrS$?-7XZnBt+C%`)f-|dIC^$QM^}Aa1&4T0j%a@Bn(-Fa{0J| zMjNMcsSD5sgd{InF8{)n0y)f^x1w~w_tW|j>q4Tq-5H+n%GC9LfneErnnMDF&Drq0 zi!&#D^=!%uOeb6hmokj-XRQ6_5pOzUL}Y36)sr`1OeDT!&4(vxH-!RSpofaZ_n+@~ zGyE48*Nh3WB>v-5JQ*OxG^uluZ(7fL@|v_g-@7ijo2*?wXW>8e9wbCb$-!sQtHaNk z*9C*v#hr>CHC`BBCKrMvrNQGfdU>xE7Vrw=nWp4e(9CPt_}J5JIexziMA*)snfS>> zZ^iZf8y6KJRxRdi6pL^%7cF&{HM`McD3Hf1!SvR&Joxy!?FoCTpZz%AIZXbgD8fPmmpAmX>th<}m197qH_Zlsz1a@S z@76JIE(ZxxlLG}-77VfUmW~tj(2{S*>qQ2if`)c<0njP*W_W6tU!7~?a2sme5&(PW z#DN$Fl4nVV(*~9#zT&nO0&L*&k3*R)lja) zlU(~2p+yK#pu3A=%K?MicAa8d98#06%uk&0F6G;3~VZEo&e;N>vh z5;?i{4Y&BXRh6~ZodEeI2P6yOR;vi03C>V;X+8tR=L-y2Ge$1096PJQ?YBXWoWQ`v z9Shj3lqkE4y~>q>_uhE#{F}7NBGDnQU-r-xkZ7j2{0zo!n~^E!bL;K=gzZvGbbDJg(B)e{@B`b$rXm(VE~+&Nn!Cw#Z(3{M$-P{HTi+m)LgaX z`vzvoer8_fm-MCz96oCvP^0Zb3R8FuF#5w61=R$}r?`;|?6tLydHX>+SnRvsHoLFw z-Mi5;0G+fS*KK<;p~}Lk9p)v3urC3n+Yh>Q@wARTG_HBsLWpLff#qvGqM+-YLx+4SmGkh>Ka;l+Sm}=Iz(p8!tJ7J{3XNIR0YC+5ye4xjUHQQf^zr+kEuz z&Pa;Dfid^MfKTsqxGy^=71|xns`3p-JZBAs_n-=Rce!kSJs5TVL{3LqaQSkdof8#% zSI1&l+2)=YVwwiD^}mkD>Eitnc93vX(A*`*; zo5B(ZH97Y+G|#dqP6&|*x7*A?H ziCUCL#Ges;m)9v?2ac2Te5P~kUCFa1UH#~X^Q7_f^YXm8MzpESr>TPTyZqmXhjcl$=(At;QqlgZH{G| zeZJ5=W*?ij_`m0?|4Qi~xg%euch#0pA+yD?;>bg}hIa#{aI&|8qJ^7H^w0$}lf48> z;zV_(k0=>OvIzdpj1LWKV3d+GiG`ncLL1qK7?f8Fvm2w zY*QMc-FR}-s!hUgi+dv1PaNZF?F2>c>;~v;BC+;uW3uOf@+Tq{C?y z`q)eRx{?0CGPKr%xR9sCWgaT19B{5RmQ>~RQW)fWpzoy)wKg!UF}U#614{|heYzo%dt;yE1s# z=}$7bCWIAFlNJ6+^}xChnY$>TUk&$EKa3rVm?v``bL_Ek@akux@fXcM>~~YX9!XQj zOiw)@Aa}TKlBvACmA#H(m5&yBt%3*nZB3x)_`3n+FD;GT2ceu(Hgj@0mC-KU{AbWt zvdZLP@hVh(4+@Yv^XqwCIluEK$7IN6i%LYTMC3fEI;;+?qSXTjdsCEciaIw$eF^%> z`s-Y53F1p5Jj`o+e(xRG7;I_<*tF_k>)OsTfBUVX`GGNzzJ~OD2HMbzad4wtFPQ5q zc`9oSMQ!FUZG9-^I(ip@GZ4GeW8@UzuyLF~b1vjXBnMn)-^{ubjEf@PuXMSg`@+l) z@?I-&=8f2^Q+JbL?O&chkh!~VX{Pt+IpY0sH^FGVZz!y^o9^k1_nVeiHCLVSBC&<_ zF9a2#cf&)28JVWeXFw{_eE5^S3>d2IY5u+9G7x%s=O1FBRL-ZVSc@%>|Lr(+vvXNV z=PFf4hl$#-(B`4KYP|XGiJxz_-fBRP<^j5D6AGWn(vK1=Do%EJ-ZIX`k%G3mo5cu~ z$k7|Rv;1sBcc#UoU(PB*NLW3io6|@2HL#al7k5#BaQC1+tjHH$!XKK=IHY&Ivrf&8pw!w}d(_kn8@Y2o^F_uKotA1U4h(d0hp zjQn{V+*HT&gqCaSF8RT7qnReYO&@&hE&t+&NWWG%+TAYg*Wlgq50j^zLU;MYE|&eU z4(@|czk+6e|8z$2bub6V&!5kJOV8*taW2vGbgdsS1_cS z2nd71_j(^>q=f9a^gTm&1jNKNUcGu1N+Jmg-aPTJNw7p{PvI?rVt@GF3jVXqYX+ac z+}&o3dJ#Yn-|=^)cH1NWRpV?+fPaJtm*}YLNq*IHZFIuof5iR%_k;z_CAuG57+s+= zNz>U_Mnc(=4EPCiuQA8p{_U6ly9lM68Q_fUt)Q$NhSrb^_+qT`7U zy4^GOI^~XWTO3mJ-6fQ7+*+v6nK!JxgU>4wYMs|$2;c5YWLfUW*06}t$D4^Hw-V*5 z03N>Mr};}`N|X-Rkj-+j$t1UxKMQP*o8_973}9!xzFF3+Cx`cZ%`wzFcu3pu)}2i< zDJ$lCZ2#=o;$)tUuas(M5*I%()q^iCGN>j;@6>0LB!c|8prQCNR{*Wf`FTOkI;$F! zpHg(wQ)TsjQZn*ta?wEMq_4CA0G00g1z9ef3U1x8X) zzX*L16X2jJS)kS2pTs4-XKM9YtQSMlB+(D01Uky>r-XkX*Mau{YgQ)o4qm*YnR3L~ z6TeIEtoc|;w-0<;%E)4s$WkfEMA9-N;4YOPIDpyUsW$J$cK$((I9=`Z@z(->*Oz7l z|N3Y*Opf2}+TO~{bk!tEg>VHs$-dAmh*~LIW7? zg^ZO%+(&#eU_0m8KjZ|0HlGrC`sWMR63eVmH#JRi?X5Ltp$E>IpH)0SJOT83d%rIU zrfcI-a0i#DCWnjzk51*l*#L0 zhvqO$k*H~ws*JqUwVRjQ^ycP#^)Sr7*Vx~Id4N|fu&&VoGN?BW;p?TA)P+Bdo-pE^ zjIgDe`EwrUu`vjQduj2oIU8a+1@Um?uPI(ukZ`T7sc^Q0tD9?5vnqHtWsT{j{sS`( z!Xz?j<8pRcd%MghDk0DIMefGp`)dD{yl~}4+xgv(b$_o%cP7lDKmbJMg7^=1)(7m| z#3#zbXhO21P;}u0+SDaDrO9O=&%9h@ruS{R_-Sr=|ICl-0wjNq8{#@;#W>@#hC;=F zEx)q%#?R-@c4!iS>wM2Q?#gSZbObd2wGuO-H&CBsH>yx@iL+T#sbS_S$BMId>Ko%a zNL!(8-58GAzJES^S)zi!r#3R$p|L=IzD|G1PwTO8w4M~Sdhva}{bDbH^r&r}ykM_m zAbYBPfO8I7R~_8UDU#nT7>c-B^?Ja4?Txtv#W+56E?rj2y+%VnlaMUEDB;A`T3XV+ z&Z15rL~>aY2wyZ-RtXR6XqD8m8%nz=gl{h^Lc>)%<{{#pCH~)po{5!9V%B)E*R6RA zT4DJu7|P`dSHYu;D$u5_TsXSl!_aH1MmMY_WGZ?Pc7-2aW0`Hf>7RaOwYR$Bwxo@( zGaiJOoGd1OST6>E-jhk9vOh;}aXP(GMBR+)06CpZ^~(Vm%I>kn*;(BZL%~ zgz$E)YV#1@SW8D^{+pL_3#HXx(%jdSB;j6&W-jqGP9mGDC+`+Ut^o3|>yW?6H<#Ix zkJL6b{7vg33Gv|1X8rI)nuf0Hw_ODnFn#IYzk9Svua?hm*lotBEc8YnI)Uw$j~mci zFF+SAG+hWeeH3mJFH>FWVdfUDAk3+IRtd8)x90KUHiaA(98dVrH9w=1{aS$s_hED6 z$pr@r)m<{rP$klNX2B6X4~*_A(blbPVgJZGS&P@U;_Q2p7aFkixYeJ?leecW&zW>39C)FJ(oaEJ}6KZ|=aF^k)W zXw12Wd8X0sDWV9x1ly0_1=TQ{PwmZf%YFsfp)tA5=4q}eyog0Yza!hFsBs8)SNoOr zsQBlsf37D;c%A6_)NNol|M+q15EJ*txSW|@?wh3P{??@S{cmLJPIS_jD_ z7W3YmM_ji_@1dq2_e6aUW-hZ>ySsN|v-yq{CCQVpcB4{g-OXSU z+pjVan_dLHwyuUqNOQR_AnnbT&)b9yYytsP;>Ky~$J--+L(m_=)e$G=*u7J{OdE$l ziF3#-vQLiQbHz^hPp%cBWYjcE*1py=v3#RN=%vLXu^C zz;(netrY!X>5jr1i+(TX8WCI0!CeON!t*DUm?PyM%&$Y-dhO;@BS{Uqp%5;PY@tQ9 z&KQgNp2`J0=Q6KEUEST%TxXP-e&(h_yl!{4h}TS}56?WAHGX}1)j<|vFdqsVCx(%{ z8`@ih$80=k{j43by;IEr`4@VR1N^H9;pnr7Q%nI9YL?}9(RUL6K()l|r|P&YSip_y zF?HdD%K>M87n&C1cv)Fz+EE4b@s~h!Tj?!u&0Wcg4=~MuSLBN1RR@@x{$I80{bMPb zf9QRa^;>%$p7i`8(?$To91-*VBh7dl3!u)d(nPb(I1f44i-{bj9lA^uFN`?ljY)32-wt-xE=~RW)f-+EZY?SKg}E zJ{VaLy1~Q8{o490yr1eC#$8!|sMX{o=_<5MFC!O0rFfY2Ogx_E?kPqn{5GV29hH_ueL~w$@HbJ(>4>MfV zowzZOYagIqr^Nbw7=h!RfBDw76O-rnuI4-TnBJ!)dTvX zc@KmPUCas>@ECwXO1vj7mdIb~$=3@xBc|uII{t`mKW2jrx#NWL_GvyH9~?pqU-Iy{ z|CO|Km7^Lo=s|CrWql(W{9=ABsTa8gW%}ZJvfmqk1@YJ({89f=41ATnn&0AdgX#HzZu|YtyejqMx zbUD^x?bB2FIG8HCWRaoFX4!EZt$OsO*yn~p&faWi`4lX0qmBA0lpHkWD=c6))ZG< zoIS0B3CNMRS>ZP*NgiAM9hGcp$`@08ZTE~pC+%s4Ds`%ayhh@aqz%+NK}0)yN3$`N z&Q`UM_hF@{XjYM?_QiF59XQ?8Mbp}Zy5pEDwa!<1HU5vjlmLJLZyAB=ymcT|JO+Et zA*AJ^cfL|MTDCAGyQmAeG}E-|OT84{8QC}jk~Ix)$vllw+e=c)vaB6NSRlGWj)^!L zxOtUj5_~_s(&1jin3`eppPDaXFBXU$uP|cTn*U~VY5r}J3@GK*U(}T5?2^q5*<@CP zMH7^Ei5zw@V{4A;$shC`B%VpL#LU#%fo?!05E{%^T=G1ZB_3H#19upS>Xs$gSR+VDKxh@OOK(KjL&JxgL#bO4eLdZjHy47>! z)rIP8GkJMj`8$f>-X__LBq#sd7OA<0s5vM_@51FT3osc{nuc-{LQTtJ(pYrW?E;rx zq2UouwzW#K(WI?jZDk|pIH3ZrJ1P?f*B;dk%Kv4` zfbUsHIlis3(y09Y9od76v-g@aq1E}qi^O>Q2_@z&HLK+n&-XsV-y#RR*WL`D2AKj} zHm4O`*cRkHD-I=9rAV?LeE2j+yjH3kny7ZRHaYhN^#)O>1d#)|Z3y3o3wYyjLm=1$*FsZZKz`kpOg5Wp)sJl^= zGn!RRewKy7RM;C$8kd&x&zkibl@}dD3}im$jDMK|t>7;~X=!CU76;T8RD%19d9U|f z*!^f=V)hkY#-BXjQ`I;o?NyPRekkiZd}bD#7~+ooYz52FxlV@9Zi)aGu}Q>pz{W#! z5Vn3e>bRJoIikb7`AIAPREHv(R$-1&;q3m1 zq!lVcR|zQL4sjYbuw~-*@KVDJWGx|fG>lqsscTPjDNHFwWm8nGVZ-I7hd!2joY0eB z4$SwmEc}2(_qlu(f_|o($eK$qjTaliL!1Fd*dXUH{!nZrL+jG%Ux#^#If zz6>}TXV+`r7T@_VyKcbB>^lbshV*#ab0c87mNFkm%UhMUYhB?zN3G+#NGthpV^y5z zclHka#-~%F1HNhyQ@miCYrstOkrYkQkADdj=F{t;5l1OI({=l85c8=Kl|Tg%6B&zZ zv+yLjqPrZ2icBh=Z29p@4wPP$nOCq$6Gud`nn(*9Xh*s%yz)FFZP)fEB}BP#%H2I5 zI6rB?w{p90qkot3rOK99mfs4o!IEP={atu&F?wRCa82Fq(Rryl5|XvGs4sP~aMqEQ zx7X;z#6xN3HL=JGKjKv*uR%33_2~A*7{~I*C;s}x&H*k`{GFHO{_qISa+FW;q170d zx9pkhb%>E>-@Be5Q&h-LL(qLvU`SW3gOs8Fn*q63P!aX*%Q=5XL=?Nj(JY*Yu+QKH zpSb{NT;-i36Hrla;4+(SqV!%$f#$(wX0|`VeP8HRloL1C7~b#BE8PD^@hWQv*IC_T zv-%+DJ3MpQ@Zfd;R4WA}Npj_kTE5%K()c016eqwpNJytCe*kSVpvCA23?sI=2?8q$ z5P}b*<0eC{{asW+;N=1EO>Rt4=Y~~)D`~>Z@|>a9_tX22v$L~P z+w#%t@0Jq$itKy##{_Do3ivjhJb7}jt zpdttRBgDo=X3q+=zUZdB?-5}7(#{()LY`V3bW)aa++$R|{2<%kJt+z?KX~vU^u>#w z2KFR_Uztd1W!|QT_ik0Qzk#DaoKl1y){Kz+L0u6*T{y*u(Q)&CpzbK0x}cfmFlew* z8_&D1W5sBUcVfADbwT72L-M=2F z|Df-e2S6QO(P)0+3}U>vwnI~G z+x)9YrAsR)9QQ*(>`1Dy_kqYgQxVDyUz+B`F69)f`l{Vh=_*x~o*rtr9G1_Ob%?WN zU=%V?tSYOSsN=ip^Tuv+#T+aI=es_4Vte85wZVvi@E7m@SoGf>()zYNp!XzPN=k!J zm*CuTqRKahH=|OarVbXr<>$9%+d`T~gbml2pwAEIv`1Yhn4#A9FA>Vg3kq)LyR*;h zg0#|OFXnRXKPYu4K4wXxgUyzrqlX!eL>2ukN_)gIfEoUF7L3jM!ouXWuS2=9A&-`^ zxpKi`bMrJzX7`#$U5qu$_e$JF0rl8>T$VjPTPHXV?CVIoQ`_o7J4!jmw;!8xT(ze_ z<%&NZbv@L^Z;6ji7q{LyM*jB~JNlOY1qf?SSIy9Xpo__Bp?08erl0Vp2x+Bw7S_rb84Le zX>rEM*mdcBzRI%Az0spbN3ws(RGnIHl*Gv9KXOa(aO*B=(e2TF$=_XCo;J$i-jY9V z{P6NMYwq$Q!`^=4tG5J9wV$1n!h@T&aH^Sl^V^;|hicj6?7Azi)JFs1@dd&S&+W(A z{IwwyrSvv+#7EbypEDR-j1CnmXqfdwA?&!jD*(4}7sZa_smmHcTo_tb+>7hOR(+;M z9YiDcv8L)mYBk4lxFTh{hr|Z92pY&mR z6j!te{TW|=?)wX~+FC^J_kbzCH^yP{Rz-4sdvKLQT~MaFy14Si9A`o?VO@6r?mw;kX zGfycOdV!jrpq6~xJZ=Vc8BT|b;{-(f;9lJW$34blS?IC3`s-GzjD6yx@6%~p^Pg;O zG!~_3%|tRVaK?~ly#}Ht3PM-;UAC^H#qo{?$^0lcR2}GtL(90>xr~~b2cjzOYmHGlT?IbDp51nOlv%GQXN;6 zVbagpLmWP@309Xr5UuSIl_ox@T|Sa#9N^DygUn+hA(A!Tz{u*ZSObPq+1`mC^5w5jKIdK<1uBSLxZ{C}DJ)Ui@9;fE!(P() zEuWJSG$Mu;qGp!ScEqM^o!=nuD^>ziXn9=L(4_!>q=tQkh)q(mNoQ)uqJD zY5j500{r+&OfAU_Fg0nTpHWujssyH0^ep1T&a>Hm8%SoQ2X*xQKDB0&DD@A9JKtwD zI^zl&(qXy!U%!*=XZhqUgj?~roGb-=i3z&QDZ@7&^GW3_1Ar0k{1gW-dPVpKiNU z4U#+RoE;u=8D0=DPz9wq4=b{RKBms40j;vGYSQhdSu=v6cKsf;iSw8y4ZT0>cR9V! zO)f2Upq00@{Zp1%@rTCal z&fIJ_Oj8Q^IDeRI&0$TfTD!lJ%;K>)gc1I}m|dvXoNL*6%9tlWh3BK@pgdwP7iMLa ze0lP=h$5V}=&-h0xZD2jHL`r(TkN}>-@*_)S)QCNQv}T5+QI-YtXxN8xU2Z7TT+=b z=s=sF8FG4V1Vz}pOKg!|nqoHD=FG6a5&$-Qa^zoN3wCDx_d)asSntJnxU5c-*yp}f zFOjW<_jUMpni1y>vrm=_JE)<1!>!500{|hGL#I|@Sv#1d9lTqzT1j3W=^ly5L?}Yt z<}U5!TEUXwTj2h3D%f^d$~RcT-9?==j+uA&DBtgCg1^0rk-Kz5BE&;=88^m%Ncmg@BiUN?mCEO|K1@6;n_dtL5 zRiP97Db*)GtPnkVa?qp~P3q&D-5^K$ z!*y=!^ap(VlC*z?sJUEirF_aAsJSp=e;cz>15d;4W#+D-?U$P6D$M2V17XDeVwsur{CKN z!X};VQDZ*h48M=wYMyIL;y%mvn*S;q|8q=nf6ybo6ir|mueM0d`k5x&>|xbGhFZ;0 z{A-rk=fMG)7k<Z4dcH#D5Pcv0I14p(Ib zy69kJr}H_j409LGT)Z-f2dsD-9^K}PkzSy!hDqLrC$>j+7LX&=fR(XrzuhUOJHPp> zmgU>C3Y4YKGrRk)==R9Y^Hki<@p5&bZ?M1I`9q8?|BsQKx7?MU0yJy=;;bQ9%z<8? zcgEXWP@f2)~*cZ`t({huTMgYXp_nPws_ zmEKvoK#LN+^V#Es#P?~Jx4E}cP)XbvneJtL8$LD6_te@_N1kq~Zl@aACj0s|%h%@A z2bt~4P1A9Q%6lrzgXh#n_3nWTEzE;bqwO^aDAPk4XU{}3)ZES_-xLRyPXM8Cl;lJS zAUGKT$jYY^v`1!!ANq5i+drUP_WjI%&m}4%;=p5Z4<~oD^nNkzTP--g*y#~+Q!BQ} zVcl+^^l?v$y&aM!8;Sjj#wxQPbWWPy$amj>ElAaFd{Xgn)Vgy|nfMx&?4tkFY9L!6 z+08osv|@7XVKn3T?djt92N(dub^jk2gozVan6!q30+`j7u)NoGli|MFfS`f#;NNsI zDdWeps5O^XgHtzuO>yzWd+lFZ?VcePA}x_i5_e`OYrk`foXF`HXVdHa$YdaweecmD zzxHtX3}XEMIY%)Pw2^sm)WA4M;mqzecE1`$34=vU2+^|}BZ?URyq@3&j)Au8Uz~tI zdg@i=BcAQ;+nELHCH7q0-$5&O&2DTTNTYn4)U7ow7Je&c?E(^nEn%Azht?_ci`1;w zN(ZJ1gEBLdf^b`~wKAN?)AQ_~+PCpHVG8UxLAmA!E5iuCid$#em1gHW8ZhcO`w+;!3$sQk!h-)S)*qap9 z+*28b!J_Rdc9yIgK!UFaNFSoAydZlwk9Pwa)MI3i5QEd~Q zauj`nzYbb_y%f7(@EDOv*eDm}ZEMGOa*bLQe60q@#@ItRxQ}=|(G5xBG+Xi9l7BI$ z-9#OHmk{~-{yGoBrVVAOrI3+BX|LJ3x|dYbGhw!UG6HRYcp~Pn-k07U*_rzS!6umK zeFgXS8~taa4zOxZe^+vRSKRTPG?L`J?q3TJ5QRYbgM)u@z<%r2=scsYNG@N_<2-RR zN6;lq_uDSWWGSa?_)Md;^)$jbl{Eb)wS7H*=){Z5MSPlStt&Nldq?2LWvXaDq#Em= zx9#=Q^8vO)euD|Sh?v41Y4^PBNf*aSQx)UUg8EH(t2h5@GG_w&IsGqlloz}ymA?!y zksb(As>4fbR?`-J)CyR0a$Q>t>FSIWeayO;0P`zYdZ>Lieq*Nzq@YisCp`raJz-m3RTFy-)ObMpe;438?M9;^avT zJ8&cg+kCh6C=(i-`$Zp{=5WTyPDi1>WiOrfm-OG0qL%a45_0(h?h#ua4~Eyf*EXN5 zcDXAdnP;Rg#Nr(Ck)LMyZlVy9VxU$|I+8ehIdgH-H7P)1UVk6TrNtf)95>|DXybwF zVFDn@={@A>+n$(>ujqrik~&8C>h?HSpT%sUnYB$rj+MHbdTr5r@ zbKiwCeSlf(edb?o7GGIieTTgxv{583dJyjOG~a$f1b19eIsTql(;|D8Q-@}j4E(@q z!;>CPVu1pe<#%_2MCAMeKWVMyFQ1tjamXPx9(GA95LJ|BIeK3& ztCH&R2p%uaKGd20YPHtq0SKarzwu#!g^$DnO(?%=+Ryn@YTf&tqQ;)7g8g%f6AacpBKbh3Ahg?*5bWT$qhcKs&VO8f}k~VZPtBd>+pcQg|G$?zR zEUlcvBE9vly*? zrY3jyQn)MZrnuk3`wqjs$kbs$0>Gv=%@l_Z+8Y?6knqT(-S2-*fs^8`NDsbg_^*Km zg>HI=p+5PZvzzkPRAn_muUP~fi$xgKJz0Ie@7ynA^&u~S)3#f->g- zp_v7dzRaSZKsX2T*M~!sen_!%z!=D08~JPL+K-q2@uW4f_o1vJ1gGl=Z`O5O>^`OD zGK9O$%eQ%Zne|WOmEK!X`}Vh1K=mipc?02ds|}t}n>F+e)|e+HJDY z%yI<$-3^XtYU%|dL91v&Pv?b~pyHxoh+fz*qB&_RuGl*|X~RBup{DvBAF3jgau&7w zUn|quL1Bo1qEZ`2exqM^+j4+ALtle)D1-K z^twWrww$k9m$Cm=i_qYgk`l2zn6Iwmkkx9&Z{BM&ReBIgQ zE6y5}JnQ~_OYM!==SBb-nkIZ=#3sUHoP`NyP&W0W0_G+CUiBf{!Ve2|M+L=xm>%(5 z+_*xx5ac{topsMC_^4=x+Z`?fKyRO~C}Zwke|#Sjo>ZMu7@?^JOnDiA((MeB6v1UXF;lxP_h|sD{U~-TTM0S0sSUUI#;a36J*`tty|OC!l&3BFcvtlavi-dY zKB6JMrP^@P6GIuDTVHH*b6^V>DRrwLSvCAq-|MCqciEdu^eC(udIoR*hsdG}hNJF5;EJf1nqOR{=P%c*`$uIVDeD?}&4z{M0i@@C z(IXkbwAx;30xp7zrgF;A{M^Mo>`XyUlvt&Wl*_0usywev+}9z)%~@m*#9~>jR@S37 ze`X>)&ow2vTDkyn=4M$*UZTmo9Vo3E^MYU2G$AIJg^=P(NV^AnIvz-1LwS{*cv`+x zhbPvh_(Di#@#{&&5EF<(T~P>3s>q&Yq2TbQ7s{pb@kFl#90DC4F8Xspnsbcer(Q)> zjh4hA67Xd(BjieLy8Py%L1R%Dms`a^D%FLbBAc@?<%ZMsbP2#inMnM6i`n`f%~$yP zQ4%sX%1x~`1-ohqMF(}ynV9;9CzXd7L3N8iE@?usjYjYAu#p^Fn#-uaa`H`2JdN!4 zPDq@r;}W}zGNQ^vGQlOK3!1Rc@|0$;PaHT-Vf|Bw9$ z4B2;IP|?K+vmebb^T3=bAOK+R8HT!gSsxu_Pw1+j%pQp9wLx8KJ!EJjB_x7&v&4bZ zFJrsTshwZU8>h)#6pe@$doxeT&o&>s-4Z zK1?iZRSLv$ijsb0Q~plxE@=ALcPD#uTk#)Otlzx{gc(t<3uF+X5U+c(!+izjp}CFX z1DWOU6PlfGJ3}U&9rIGI!Gkdpank8m_Rp05%d8Z(W8Nq=KY=(EytV2sVzjKmmxAtY zuv)svymowKiBa05C~zHc{qdD0MlXXMO_9TAy_u_cQ`jLKnPNbqyi$aV`n!!;r7rSw zE%a~$@DlN@^hDkiv?K13*RMt<%UHL{=>~p%gz9LsryvZDD$B#zI922mn-S9BMnAsm z2ij9Tg&wS;9e;Q2Cn+NsG<2w=Xvjw`GMbWmto)jXQ?`s)foOVrm88qfMfVUJ;KaZ@ zwuF@$Kie6SjqS}U{gTd$DgyzR?96LC z45r*s9%u#9truvZondtd{hsvYKjEf&E{+Fkqvc2EGe?NNscy5l1xxVG+)H2!5P(`L=`y8>*pm~;O zAU{Ll#*dB!W08Pdevc*@0l$AE=6{e{(HZ#LXi=jH4=%9Wg@?c9@7|sc;A#-2!^OU1 zO#Lsj+WydA4#xJO>DUc;#kc-s$=>ZH3nbISbsTal?JqbP893Xo{(hau=~*W)LqhmKUmM)+L$5>`6xjS7I3UR*K8$N_hGEP2 z$X~zur3t;XK-}~NHu(7iVg0j;SDC2oR-exZ&FJdpGgG#7OKzoG?^O-ph;V({b-4vQ50dBYM5> zo{I-MAhI?9rJ+_eT~@LORH0q5I5ww%pjQE8ajM#p3ThMPJkNqgIE+m^}!+lt13j8$#^Lr#+bDsq>6V}W8yIA?cfR+Gh^H-n z`WEJ_o++p5p8o6f>KrV&ykv3o*Iy;AHxG#px#=P`lMLR~@4}qp$^XFisgZ1kg1Vy9bV;sl1;8mlu6m*)wQI-ea)K<-MkC9kRa|UnSv^+nPIf zQIO9oXXcem+;yHC!@laed71jEYr&u9K7qqSe(}_yiBH(Bc6J++ikLi$sU7_X>}lT) zl8u(HF%2wr90rjr`}V?5)%QOBI^11Ymo$a@8~~anxk!dfo+NQiICnf3nhJ13q>xLq z2B^__G%bh@g*trINw*J=PHIBDq4jF4gCTgv@hF%?Y$RF-+;E`|M zp47N}NLk50??U=6|4-6q&PgeJDD+Im!9eb(bZeZlRKbF&GjQSTYo)FVs!NgxLOtjx z2#^%3O^KBZ`!b0x{5dpU$?tr)8E?Pt89E1Wab7R3_B$pu9~qTf0CR*ZhGFO76cVYPPO2V`dq*rYOeHw$NeIik6?dV*4wsh6%6i9v zBcmyMXZYh~NhuCR&T)3q zQ-&7eGB>@5pmOj7$$CB+2J1EMHXkgbrJnJY8ASk97>D_(kof$Ma~2hl{_tej*-Fm)yEnovv<1$+nSD zE@fRiGZQZf9|!`ak{ZVFmcjSd+|?)yp|8(ZspJhGS0W5-Q?h3JA(`A(fbm=P2x}*Nijqz14ZtPQs)!dKvdf(neV`U3Bj^=Z2}KC-^PI<^*Z-Xw}Jy z-BQ%SzS2)=J47ur9_9`dI}qt7gq04Es+UP-=I7)laz&M9+U&3yo$;GZh{LU(&7RXo z%4o=J$7pcA^7-ufZJvw=$!RonbXu>KuOAS~Ow~K513L1f?=^{_9j@rLESKwwO7~@T zHFaY)7j)+9rKNa77rl~?HXEno7CwLA#UuZ-)|3HrYQ{ml-k&whe=moBv{p=W0CvTFkcYDxN7yCnN?8GQFV?Y;bK!uXh4dwbe{WYs$W6LH}-) zY^1^d6Mvl*izr#T8Q+S|jf^jDCA+5EWI~CWwVNFT{jvVxo%=)1Em~NH%90%3CIr$Q z)Dw0FIx``fqVUEwWQ=B(044@(VEf0{u}HX^vJGVRu>tkB^#*(@&R#wt(e%A~r zz;cM)jY!s=f8@xJ9vIZJ?|EhJXz+Kjzh`Z^=2ITf;(q$s$-j{EqmN2VFs>c^(eO5N zcSY8RU)N489AX|IS2psp=a-+#%Xov_fRCGs3!xuuA0?=Hd9KwZ{`MOXqc{I{dJv2A ze@|0ID8PdyZPz|LzXA+2N-E>x+LAf%An*>f@(sl`(EN{DI>?N|Y3=2p?wEi~&3p5= zY@lFd3a+hMJe-zeS>5!eWyyJb)t<6#8r^Em>~Q!oolUg#{=JhBle1J zfshKhbZJ6cm@E|Z&%v=J+I3aKae}n2>=$uoX`q3vGJ|8LHQ%} zz#hRCu`Xtp$FZ#mTXb!Go%^k7tCJgKmbl5;rFhw)VqW^kPObNjGq%lI1m$SRYNeBg(x}$ zQv9>Gwr8wuhdufdv~rFYW&hkkQJP}bff<`7NBz!vzGh>SI&}W*DPvG4T7e{NSvZmE ziBdz`2C}CaDVpCGdhp&WR=3!%(pI`pKn5!M-o-q1(VEM{F%>@TDMdZ~WV2#w+Vty= zTN=5pgI5YIrgx!?u|6huMokoq)Eer8e|z@}Ef0>SG3f2jbbm!jMIBAs)p6S6LV9n= z!zXP{i%Rc3qH+XXI!6kRxD2MCni!@r#74NfvPY1hcTcb_T`^ z$8O5!#kh-I3cGWbgHvpS{K98$ za@W6;8U$Z~h=#YJE{4C@Idyj8MW|inlO8#rFH7bT<~EG)&){FB4Qi%&lC`5GO%m)H zv^g@Mgs#Jm#_58%QsVbgd@~Oz)HOz_i9pb0C%Kck!gfg<^Cx(b!hICd1>({tn^wa^ z9|{FOd340XNed(Wx|$mF*!F9cgWO3Dk}SlztGfiB+(!IDv2D+rJq4MudP(|_huR}i z$RG-1!X8sdjeRNahN$|{gF=07y~-jgt9MJKDPJl?QTz7^2!Bxz!}ls?38|@M37=3o z()1>>r6vzP)EA{W@&5cTQOE7{%V(PeJ3~_p zAip@8pa~8%9hbDr#FTvS@W#+K?y+ez>=}9+wGw4*3YL)2Is5i_w6mg!!aW^1&`wFGN z$wV;|<#bd`6Z_z<2w0NO#*`>yTXuG`E^?(n|6iZ~flbeoE7HIwf1~66Hru^meEl)f znT0lSXS2bv&2(|D9gkGC!$RXSV`{66Khi#Z&mG0#230*B+Dl8noA$imn9L2Cm!FV+ zC=}KsPjbRq7)wmNho}>Euh1%m5J{^UBUFeX$0`mof7`~7-fUJ=vlEIt-?v;iSU~SLeN5D)YSYwK``r zr%;d*6?Sq*wxz{gdQEgq)oOUyLmX*VP<**x5sD305ZE0Lbw4A|qFFK>I81wW1O@Er z{8x9G#^gV32DqbZ3c5$9Imp|Er;9!@-8o$|WE@!Dou3Ar&I}j9Pi|HSq}Q7Ab?_ov z+%Q_%5>j86sfCXaCac4u{(|ij%`GSgON?nhf?D1t!@A%@*V~VQ}Qc)&BQga;3x~uR)+he6@2i}Cyq%@ z@eiJ$>T6e6q6JG;qb`HU_Sd*#br1~wpewAe9+tiYtb=!5#N^yWiS1yemfXM{PEyyOuxPgZ_kW3wN#!>?rbLEN+(SB+v|z33g_C7bih4WYpDH#j+C}xn-V!w0FKTx6+WU7r7bE@bcBUZ>g}|PRd8#o>W@s z_0<>N-;7VVk)IY+JL~Upg__D3nrzSZpyEH-UGIzzN?+t7{3W(1B zNjQRG{=EF?Q|pcT8e%HIu47v?>N#6+i@m;ZlHdK26<7R7<^5v5nj1=d`P}i)Fqc@y zK}~G)qcYZDUdjRH`mk@FA~?wAZzc6lmKd7`ns7|wR+n;>=ZbY~=?T^I`7Vl~vzal~ zc_$Y~hm~q;YvsqQ3ZWvA?e=ep$4UpYwGz8A*6A;m-Szjk+fR4fM*6f()YOjT7IBVb z0e_bT{9P^T_JlhkX@%D3#;>$L7xr$1nC@YJjizpN6KnFRINPAlv@Ofw{>oMIjQ3FJ z@@V8M_tDq9agY;1letcf;_Y^f0JS$bhBHcdw9!U*kuo#d-4_=&QKSDP*~6)N&V3iu zRd|eJvR41q9tzzL9E(?g)%I|#hAocQigL}0Fxk?w+SwP(M)OG|9b`GJx@}uqqx+xr z?p}u{yTvZ`aG3JNw;;9k-taR8afc}6qz1Y}r0s)esIp^l=12fn_epW8cg>745!+Jx zIXs#799q>Uyl|%IvYcb(S7oeM*Xew+bH!58;^@${Xy|-9th&W0mG;E`G(KosNPpu? zjF`W|RdY2pA7rC-Ve0W5t+d+#vo?LT+140|JbNW|V8D2ch8R8ayF#Ufx@4CI=nEzD z5Uz4wno9no^sw&C*0_Nys*_EHbB~_E+YYF|h#}M*up0?CDC1syQI2s>bA()VFrkE) z=a)@3f9_lz;UXVfE(e+wtBSsv-!F#EC;(>}IYuJZMC9sw5T8RhehiCPY}0(S&G;8h zXu((yCx1<$g#6+h(sZ@VGD@~H6!l!P#Xyg5$}0J;ccO|ygNQW@A|P$SEgGPxidMmu z`r}Iv`XQz*`#3*WE{a}^;ZsO636&P?pxo2a%IGA$ZZsoNkrfG^##O>-B6SI`U+$LCnPyyV48l7S; z*Ii@64=&1WH$Y?AXsUAG?$_~k7R`yVlegQMECg;?ilTB=kXY0@i2-6ady9bWNb4yc zl8>WYlAVlB2vvxB0Q&6kCefPJGsWM)&^)-btiS$PRH*QRP6QVw({;CQZ4Eda_VFN# zQ$k7LDcT5Ue!X>x*==3~zFY6I^!)#gl$WYru8ptc#ck&k%yX^|svslZZ*$%oD49Ps zF6-J~IRC)nE(QeQ|Dwk^4_GN7e4B+bOR#>-vv_1+4jV0^DA%s-9ap{RyQm87W0%VG zf{{e%@SklKGc4)}ur7 zZiNazkh0o}WZV#Q%&1$Bu|(=a+jhY4JRR8Zx(=5uaDhV-_^U#1_{CT15JywAV5no) zv5QNKIuEpCr#eP46uj|G_r;=$?)UPeoqODBON?%46CF)Pgj8LHRSTTCP14SuK%13R zEVZ-CYJIX)7*V49dMrL;Oo*MFzr#`GkA7$#<_3y>HrOS7U6M4%c7tUG^zUBcRr6jN zJl2*9kGAM{W7*p2BP9vf@PcwPjs8Ie`F`@(sp9jWUviB;-IbZxH?G%3S_)dFFU&Yy zmQLzx8Ax=0i*FcwLci{_M8S&NlI{fsN$5}a9LN&>RfR#Lp@exPbWl($#u&CO!5_aD z>4Qp=_RnwuOq??95g9#VZ7@>LsE-eL&(v*vQDax4>o|jX! z^IxjKY~CQ37ZJDU^O0O&loqwk~~afuT0>2f-;fE`~q} zUeVl;zvM_RooSg7lAi982x$Fc$~XCVAtMURm*iL8%Kr!f|HlC z6-)aLgAa}#+TKUV*2-0eEjI_@y1;^gn{^R|qfy!L+VoM|{;&J8e`zX3eDcYrypSGw z=ZYVFAgUJN~&+EoP#BGtfy;aKtkSjVFrOPSq7-D z*5AcyED9IXk!fP?WV(k=TFN3U&+L{qOr_0UCRE2rd@FajuObJ+OmBMG`OEpbRjw|U zym57;*Z86YvjVys19o)>c_`An)cgFBF6DDpjfdk{6vco{aKDVXK^#k4i3TfT2)P%o z$gec*U7de^?pRx{M{em9J%pV)Eb1p1jh*u;jU#EeG*fTJTnQVzmcHV;kY;(<%b0XF*t?zg{AekZvCTb|0Z^sP_!rL^yZ6XTU^&-yG^y>d^mDC=!2 zcle~7v-qM7R?<0N|Cr3mP2m)H9*ws@MwvRP;{?KQu(<3g~z$GqWe+F`TfYH zxXbEiptaF#G?mSY>VH_S_}_qiy0CJ~C3Ia|milHxHCPc_L4C6&xN3%jbMYxT5U&(I z9o}EYmY>W{UPSHPqqM_M>h-(32uxE<4OQ#0-Pfx3h`scX@{%g?+;OmbB1X<(ykBi{ zFvAs39k5c!67AYwV6|hfyoP?+L{N9mWt!SmXrg6@+TFOIQm^hj_!7h)%iEgu8sa|o z*sThMLMW(UZt^CI8-xc*48>VnDW(bRbnIxvSsmtRRZXJjOm|W4)P17bW6@JE#nHD6 z;n`~c((8}&7 zx`s|67+9ThA*}ax^Xu+}Qj7L}m!ZOK*u6?VWZ2dJhcX3z0VZ4)znHJO zuDOrr-VU@Y8h#xtw@}jHc9Pdb1(jn;mS(AZ7F&IDlRCz7uTlmQYsLasW@D5dPumFc zV0{R@x+fkQT|Cap9Xtn^+s^@o9st>XmIwX8MbxFMXEslkyh|JV zJ-HL!dx+$Ov3R$CRUpfjot*R0;->%g?TuKJBnI}8nDHy^!l-zyiJw>KCzbrV;3$sC z0AT;kD>t8Pe9|8u0PAj$O2^2);%R}5*sowoo= zQXymy!zY|ZM+m{oA!k&bpLyA{lYzOac`8p%mZXl=$*6v+C!qIHZ%~K7o7v(DGDpx` zmLHB2H5jsNQX2Cqf#@2O`-7jQnqYhr47uxfuFQS?{CSpu^tj?*Q`T-HgY>yh^_!b4 z#0xBmqU-*TlmpLw0{ z7+F5|cFGN3sQ2~#Wob8A5aL%ZW4-T~tDW#cUOzzQLpM-nxOF@Se|3}LY=s?|bDJq7 zhMd9PW+#UmRj8Tz6=u;$xjRLui>+(efqNizRuEhOp$q!fAE`G~_7?lyH0)#w?^ zr{xu`s|&sf76ZL-9hH)SFTG6E^d=Wzlqsk|63AukMQWqESJUX(PSfKE4l?y4aSp(5 zk*8&#bjI(JaVdeB)bROG04DF&fzHM@fjfocp{8`=`F6167a6dW$u;`b_&iPNeS(h# z2YGRMT2S1}4Z1fj2w!pq4n2`-a_9jg9{M%aG{4oWXEAXBVXCBwYC1lpqr*GCnxmv{ zSWT3WxI=F?2(xhwAqn94b~<%Eyappl<)M#F#`$FrSz)-Id@>p%NrC_ATgvtc>_i@> z!_dMb!6hEY)UY`*QB5T|^L#F-sI+>>i5tkYhmd3ZGBy!}BzWxW)lu>ORcAHgZ0$Ta ze`~bnnp?F|%KkSSXgfz<47=5$MTA7elSna!6f|j;8;46eqXjLh83nPy%hpXS^R6oe z(3(T|V9gp_W5t1gRii*dKBm@4xQ|ztp|PU1_^*nNGCclRr?0;AiCZ@7;P95UxLtkH zvj|$5@-+P;9NAScAH1=cFKqb!_uPaooMddo@jrJ zC2bYVv|58>;re>G_4(b32VkC5$>qij9DfF}*9Tb21eeGM2u(7bcN61!XMf?5pb5&m z-G2O8gW^H?xzjIsku7Pl0g7IysPh%Bh=-I|3Hb!S+y~jdG5m;2lpZ+Gp>%zeb-GcY z1}7diObcsLdPM7W<>!Gl*WoZgX;!bKFJ>3BleHv{V*%7ntmSVy$e98mk#wG!jN;(! zN8pTYl%!h--RP8Qs3>@UhN?U@%kNHsYSnge>uWR3$kfaO45G+8uvp0_oYHsV%O9D-C`e}ae@RDL^7y| ztN44H@(0>Doc63lz_WiiqGT~-5(FYvH^JoF69nf-boj2v!L`V}?gBEBYMmfDTbUPO zCgBP|67KChWEb-AxU_JziCT*!3j3_nqqj`<+X0I6E&lrEdxK4e=TtdAL3_NqluM0b zka6jo%k~5at~fq7xzi#Py}IIE^JZmGotYj?;k9%-=c_J;M|hyEvk+n$dL~^s8^Nuz zzU69Y7djc28YMZWlwmEeL3Z}+Y3;@i+<?`mmzG%1b8i!nodFanyU~=5HX;7PeN`PaPieJ?SU<{RLP+uZ9O|Tdcw=7!y+qUW znMRCw$pgO?hyDK3Jgd<=X6XfNeqkf`Sp#r27F0__yW7gH3uZQL&431mfU}0{hb=hf z52?89Gg}(4&y=}!Ijt}*s0(tQ4;Rygjx>9RF31}p=g8JqE?Rsj*4`fDmVLhLs8H^4 z6F1Sw%7NT**~X21Qk#pTV-pW9N8V@-eeXW5)v(#)1t;3OumVy4ABbS zC-_IIibZ^{c~ZQ941TSA-tZkG~=%ZcgqtDYt|5A z26JLD;UR8ayUxBrs7Mn`8cst7E_YkE7w3=E8SS!kuwM!&qq>%!phs4&m<(L;*2IJ! z1f}sKWzmjOL3DM@;E6DD$d<_q4iJtNlYTIu8_p@M>6|Wz@XT}**M|JZ-1q=z1`W*A z0?20{?NxBuE$-E)X!#Xg26z3ECY28`Op=Cw49j+AJOjxm!Fj|>mN5PJ$#We%V4{XG zmDw4xH3OuFX;U!>7AVjha%X4v3E1{mAIIvzpHC0u<^l0f_Qw2JS!SX%@GPkR_pe`R za)3@sZMEOay7A|a{(9%C-JYYIt+KCFRrw7BvDwG^RJIwL$6Lm@EM|uaDP_&cZ)SnE zt^b=@(W}FiL2|I>H{5+u_}wcBn)f&FEbmSQ3xHS@?g6QygUZD}^60GZt^I&?5q}3v z&0d%|h6l@%{&?2UUW5QH34Cl@A8fJGi7ev&n(_Sk7hgIHa;96uWd4_wZry!U81U!I z$qTA$Q_??Sk$$H;@F380ty_22wC%6G`H^60eh2)CZno&%_VfMw1HSWuJfG~n#K%87 z5-i4U($xX{xzqebf9=lCUc^5H>bGaV^^bb%3Zezt#qyyECVfPpvZfE{@M%U?eKYi|6fU#$6G)%-_Z{Z}>r ukv(FX|5eR@R@VQj<~p|Q{~NEl5)hm!7gccM+IKea@4S}&*{su7@BI&l6dUaT literal 0 HcmV?d00001 diff --git a/doc/src/manual/profile.md b/doc/src/manual/profile.md index a3baec593a53d..718b79815473e 100644 --- a/doc/src/manual/profile.md +++ b/doc/src/manual/profile.md @@ -297,6 +297,220 @@ Of course, you can decrease the delay as well as increase it; however, the overh grows once the delay becomes similar to the amount of time needed to take a backtrace (~30 microseconds on the author's laptop). +## Wall-time Profiler + +### Introduction & Problem Motivation + +The profiler described in the previous section is a sampling CPU profiler. At a high level, the profiler periodically stops all Julia compute threads to collect their backtraces and estimates the time spent in each function based on the number of backtrace samples that include a frame from that function. However, note that only tasks currently running on system threads just before the profiler stops them will have their backtraces collected. + +While this profiler is typically well-suited for workloads where the majority of tasks are compute-bound, it is less helpful for systems where most tasks are IO-heavy or for diagnosing contention on synchronization primitives in your code. + +Let's consider this simple workload: + +```Julia +using Base.Threads +using Profile +using PProf + +ch = Channel(1) + +const N_SPAWNED_TASKS = (1 << 10) +const WAIT_TIME_NS = 10_000_000 + +function spawn_a_bunch_of_tasks_waiting_on_channel() + for i in 1:N_SPAWNED_TASKS + Threads.@spawn begin + take!(ch) + end + end +end + +function busywait() + t0 = time_ns() + while true + if time_ns() - t0 > WAIT_TIME_NS + break + end + end +end + +function main() + spawn_a_bunch_of_tasks_waiting_on_channel() + for i in 1:N_SPAWNED_TASKS + put!(ch, i) + busywait() + end +end + +Profile.@profile main() +``` + +Our goal is to detect whether there is contention on the `ch` channel—i.e., whether the number of waiters is excessive given the rate at which work items are being produced in the channel. + +If we run this, we obtain the following [PProf](https://github.com/JuliaPerf/PProf.jl) flame graph: + +![CPU Profile](./img/cpu-profile.png)() + +This profile provides no information to help determine where contention occurs in the system’s synchronization primitives. Waiters on a channel will be blocked and descheduled, meaning no system thread will be running the tasks assigned to those waiters, and as a result, they won't be sampled by the profiler. + +### Wall-time Profiler + +Instead of sampling threads—and thus only sampling tasks that are running—a wall-time task profiler samples tasks independently of their scheduling state. For example, tasks that are sleeping on a synchronization primitive at the time the profiler is running will be sampled with the same probability as tasks that were actively running when the profiler attempted to capture backtraces. + +This approach allows us to construct a profile where backtraces from tasks blocked on the `ch` channel, as in the example above, are actually represented. + +Let's run the same example, but now with a wall-time profiler: + + +```Julia +using Base.Threads +using Profile +using PProf + +ch = Channel(1) + +const N_SPAWNED_TASKS = (1 << 10) +const WAIT_TIME_NS = 10_000_000 + +function spawn_a_bunch_of_tasks_waiting_on_channel() + for i in 1:N_SPAWNED_TASKS + Threads.@spawn begin + take!(ch) + end + end +end + +function busywait() + t0 = time_ns() + while true + if time_ns() - t0 > WAIT_TIME_NS + break + end + end +end + +function main() + spawn_a_bunch_of_tasks_waiting_on_channel() + for i in 1:N_SPAWNED_TASKS + put!(ch, i) + busywait() + end +end + +Profile.@profile_walltime main() +``` + +We obtain the following flame graph: + +![Wall-time Profile Channel](./img/wall-time-profiler-channel-example.png)() + +We see that a large number of samples come from channel-related `take!` functions, which allows us to determine that there is indeed an excessive number of waiters in `ch`. + +### A Compute-Bound Workload + +Despite the wall-time profiler sampling all live tasks in the system and not just the currently running ones, it can still be helpful for identifying performance hotspots, even if your code is compute-bound. Let’s consider a simple example: + +```Julia +using Base.Threads +using Profile +using PProf + +ch = Channel(1) + +const MAX_ITERS = (1 << 22) +const N_TASKS = (1 << 12) + +function spawn_a_task_waiting_on_channel() + Threads.@spawn begin + take!(ch) + end +end + +function sum_of_sqrt() + sum_of_sqrt = 0.0 + for i in 1:MAX_ITERS + sum_of_sqrt += sqrt(i) + end + return sum_of_sqrt +end + +function spawn_a_bunch_of_compute_heavy_tasks() + Threads.@sync begin + for i in 1:N_TASKS + Threads.@spawn begin + sum_of_sqrt() + end + end + end +end + +function main() + spawn_a_task_waiting_on_channel() + spawn_a_bunch_of_compute_heavy_tasks() +end + +Profile.@profile_walltime main() +``` + +After collecting a wall-time profile, we get the following flame graph: + +![Wall-time Profile Compute-Bound](./img/wall-time-profiler-compute-bound-example.png)() + +Notice how many of the samples contain `sum_of_sqrt`, which is the expensive compute function in our example. + +### Identifying Task Sampling Failures in your Profile + +In the current implementation, the wall-time profiler attempts to sample from tasks that have been alive since the last garbage collection, along with those created afterward. However, if most tasks are extremely short-lived, you may end up sampling tasks that have already completed, resulting in missed backtrace captures. + +If you encounter samples containing `failed_to_sample_task_fun` or `failed_to_stop_thread_fun`, this likely indicates a high volume of short-lived tasks, which prevented their backtraces from being collected. + +Let's consider this simple example: + +```Julia +using Base.Threads +using Profile +using PProf + +const N_SPAWNED_TASKS = (1 << 16) +const WAIT_TIME_NS = 100_000 + +function spawn_a_bunch_of_short_lived_tasks() + for i in 1:N_SPAWNED_TASKS + Threads.@spawn begin + # Do nothing + end + end +end + +function busywait() + t0 = time_ns() + while true + if time_ns() - t0 > WAIT_TIME_NS + break + end + end +end + +function main() + GC.enable(false) + spawn_a_bunch_of_short_lived_tasks() + for i in 1:N_SPAWNED_TASKS + busywait() + end + GC.enable(true) +end + +Profile.@profile_walltime main() +``` + +Notice that the tasks spawned in `spawn_a_bunch_of_short_lived_tasks` are extremely short-lived. Since these tasks constitute the majority in the system, we will likely miss capturing a backtrace for most sampled tasks. + +After collecting a wall-time profile, we obtain the following flame graph: + +![Task Sampling Failure](./img/task-sampling-failure.png)() + +The large number of samples from `failed_to_stop_thread_fun` confirms that we have a significant number of short-lived tasks in the system. + ## Memory allocation analysis One of the most common techniques to improve performance is to reduce memory allocation. Julia diff --git a/src/gc-stacks.c b/src/gc-stacks.c index a2d3862dc9501..a0ca2561c5cf9 100644 --- a/src/gc-stacks.c +++ b/src/gc-stacks.c @@ -296,6 +296,39 @@ void sweep_stack_pool_loop(void) JL_NOTSAFEPOINT jl_atomic_fetch_add(&gc_n_threads_sweeping_stacks, -1); } +// Builds a list of the live tasks. Racy: `live_tasks` can expand at any time. +arraylist_t *jl_get_all_tasks_arraylist(void) JL_NOTSAFEPOINT +{ + arraylist_t *tasks = (arraylist_t*)malloc_s(sizeof(arraylist_t)); + arraylist_new(tasks, 0); + size_t nthreads = jl_atomic_load_acquire(&jl_n_threads); + jl_ptls_t *allstates = jl_atomic_load_relaxed(&jl_all_tls_states); + for (size_t i = 0; i < nthreads; i++) { + // skip GC threads... + if (gc_is_collector_thread(i)) { + continue; + } + jl_ptls_t ptls2 = allstates[i]; + if (ptls2 == NULL) { + continue; + } + jl_task_t *t = ptls2->root_task; + if (t->ctx.stkbuf != NULL) { + arraylist_push(tasks, t); + } + small_arraylist_t *live_tasks = &ptls2->gc_tls_common.heap.live_tasks; + size_t n = mtarraylist_length(live_tasks); + for (size_t i = 0; i < n; i++) { + jl_task_t *t = (jl_task_t*)mtarraylist_get(live_tasks, i); + assert(t != NULL); + if (t->ctx.stkbuf != NULL) { + arraylist_push(tasks, t); + } + } + } + return tasks; +} + JL_DLLEXPORT jl_array_t *jl_live_tasks(void) { size_t nthreads = jl_atomic_load_acquire(&jl_n_threads); diff --git a/src/gc-stock.c b/src/gc-stock.c index 541c5b4ecc5c2..3a2027f9190a7 100644 --- a/src/gc-stock.c +++ b/src/gc-stock.c @@ -1025,7 +1025,22 @@ void gc_sweep_wait_for_all_stacks(void) JL_NOTSAFEPOINT } } -void sweep_stack_pools(jl_ptls_t ptls) JL_NOTSAFEPOINT +void sweep_mtarraylist_buffers(void) JL_NOTSAFEPOINT +{ + for (int i = 0; i < gc_n_threads; i++) { + jl_ptls_t ptls = gc_all_tls_states[i]; + if (ptls == NULL) { + continue; + } + small_arraylist_t *buffers = &ptls->lazily_freed_mtarraylist_buffers; + void *buf; + while ((buf = small_arraylist_pop(buffers)) != NULL) { + free(buf); + } + } +} + +void sweep_stack_pools_and_mtarraylist_buffers(jl_ptls_t ptls) JL_NOTSAFEPOINT { // initialize ptls index for parallel sweeping of stack pools assert(gc_n_threads); @@ -1035,9 +1050,12 @@ void sweep_stack_pools(jl_ptls_t ptls) JL_NOTSAFEPOINT else jl_atomic_store_relaxed(&gc_stack_free_idx, stack_free_idx + 1); jl_atomic_store_release(&gc_ptls_sweep_idx, gc_n_threads - 1); // idx == gc_n_threads = release stacks to the OS so it's serial + uv_mutex_lock(&live_tasks_lock); gc_sweep_wake_all_stacks(ptls); sweep_stack_pool_loop(); gc_sweep_wait_for_all_stacks(); + sweep_mtarraylist_buffers(); + uv_mutex_unlock(&live_tasks_lock); } static void gc_pool_sync_nfree(jl_gc_pagemeta_t *pg, jl_taggedvalue_t *last) JL_NOTSAFEPOINT @@ -3084,7 +3102,7 @@ static int _jl_gc_collect(jl_ptls_t ptls, jl_gc_collection_t collection) current_sweep_full = sweep_full; sweep_weak_refs(); uint64_t stack_pool_time = jl_hrtime(); - sweep_stack_pools(ptls); + sweep_stack_pools_and_mtarraylist_buffers(ptls); stack_pool_time = jl_hrtime() - stack_pool_time; gc_num.total_stack_pool_sweep_time += stack_pool_time; gc_num.stack_pool_sweep_time = stack_pool_time; @@ -3453,6 +3471,8 @@ void jl_init_thread_heap(jl_ptls_t ptls) jl_atomic_store_relaxed(&q->bottom, 0); jl_atomic_store_relaxed(&q->array, wsa2); arraylist_new(&mq->reclaim_set, 32); + // Initialize `lazily_freed_mtarraylist_buffers` + small_arraylist_new(&ptls->lazily_freed_mtarraylist_buffers, 0); memset(&ptls->gc_tls_common.gc_num, 0, sizeof(ptls->gc_tls_common.gc_num)); jl_atomic_store_relaxed(&ptls->gc_tls_common.gc_num.allocd, -(int64_t)gc_num.interval); diff --git a/src/init.c b/src/init.c index 413d4e8055e54..b3ca33344d258 100644 --- a/src/init.c +++ b/src/init.c @@ -744,6 +744,10 @@ JL_DLLEXPORT void julia_init(JL_IMAGE_SEARCH rel) // initialize symbol-table lock uv_mutex_init(&symtab_lock); + // initialize the live tasks lock + uv_mutex_init(&live_tasks_lock); + // initialize the profiler buffer lock + uv_mutex_init(&bt_data_prof_lock); // initialize backtraces jl_init_profile_lock(); diff --git a/src/julia_internal.h b/src/julia_internal.h index 8c4ee9fca36e0..ade5940f30687 100644 --- a/src/julia_internal.h +++ b/src/julia_internal.h @@ -211,6 +211,35 @@ JL_DLLEXPORT void jl_unlock_profile_wr(void) JL_NOTSAFEPOINT JL_NOTSAFEPOINT_LEA int jl_lock_stackwalk(void) JL_NOTSAFEPOINT JL_NOTSAFEPOINT_ENTER; void jl_unlock_stackwalk(int lockret) JL_NOTSAFEPOINT JL_NOTSAFEPOINT_LEAVE; +arraylist_t *jl_get_all_tasks_arraylist(void) JL_NOTSAFEPOINT; +typedef struct { + size_t bt_size; + int tid; +} jl_record_backtrace_result_t; +JL_DLLEXPORT jl_record_backtrace_result_t jl_record_backtrace(jl_task_t *t, struct _jl_bt_element_t *bt_data, + size_t max_bt_size, int all_tasks_profiler) JL_NOTSAFEPOINT; +extern volatile struct _jl_bt_element_t *profile_bt_data_prof; +extern volatile size_t profile_bt_size_max; +extern volatile size_t profile_bt_size_cur; +extern volatile int profile_running; +extern volatile int profile_all_tasks; +// Ensures that we can safely read the `live_tasks`field of every TLS when profiling. +// We want to avoid the case that a GC gets interleaved with `jl_profile_task` and shrinks +// the `live_tasks` array while we are reading it or frees tasks that are being profiled. +// Because of that, this lock must be held in `jl_profile_task` and `sweep_stack_pools_and_mtarraylist_buffers`. +extern uv_mutex_t live_tasks_lock; +// Ensures that we can safely write to `profile_bt_data_prof` and `profile_bt_size_cur`. +// We want to avoid the case that: +// - We start to profile a task very close to the profiling time window end. +// - The profiling time window ends and we start to read the profile data in a compute thread. +// - We write to the profile in a profiler thread while the compute thread is reading it. +// Locking discipline: `bt_data_prof_lock` must be held inside the scope of `live_tasks_lock`. +extern uv_mutex_t bt_data_prof_lock; +#define PROFILE_STATE_THREAD_NOT_SLEEPING (1) +#define PROFILE_STATE_THREAD_SLEEPING (2) +#define PROFILE_STATE_WALL_TIME_PROFILING (3) +void jl_profile_task(void); + // number of cycles since power-on static inline uint64_t cycleclock(void) JL_NOTSAFEPOINT { diff --git a/src/julia_threads.h b/src/julia_threads.h index 67da2978b4267..faa8ab9e0aaf4 100644 --- a/src/julia_threads.h +++ b/src/julia_threads.h @@ -157,6 +157,7 @@ typedef struct _jl_tls_states_t { int finalizers_inhibited; jl_gc_tls_states_t gc_tls; // this is very large, and the offset of the first member is baked into codegen jl_gc_tls_states_common_t gc_tls_common; // common tls for both GCs + small_arraylist_t lazily_freed_mtarraylist_buffers; volatile sig_atomic_t defer_signal; _Atomic(struct _jl_task_t*) current_task; struct _jl_task_t *next_task; diff --git a/src/mtarraylist.c b/src/mtarraylist.c index 8bad44797dab4..7af265a86ab63 100644 --- a/src/mtarraylist.c +++ b/src/mtarraylist.c @@ -37,7 +37,7 @@ static void mtarraylist_resizeto(small_mtarraylist_t *a, size_t len, size_t newl a->max = nm; if (olditems != (void*)&a->_space[0]) { jl_task_t *ct = jl_current_task; - jl_gc_add_quiescent(ct->ptls, (void**)olditems, free); + small_arraylist_push(&ct->ptls->lazily_freed_mtarraylist_buffers, olditems); } } } diff --git a/src/signal-handling.c b/src/signal-handling.c index ce7e8ba57af19..ff073cc82a0a5 100644 --- a/src/signal-handling.c +++ b/src/signal-handling.c @@ -18,46 +18,48 @@ extern "C" { #include // Profiler control variables -// Note: these "static" variables are also used in "signals-*.c" -static volatile jl_bt_element_t *bt_data_prof = NULL; -static volatile size_t bt_size_max = 0; -static volatile size_t bt_size_cur = 0; +uv_mutex_t live_tasks_lock; +uv_mutex_t bt_data_prof_lock; +volatile jl_bt_element_t *profile_bt_data_prof = NULL; +volatile size_t profile_bt_size_max = 0; +volatile size_t profile_bt_size_cur = 0; static volatile uint64_t nsecprof = 0; -static volatile int running = 0; -static const uint64_t GIGA = 1000000000ULL; +volatile int profile_running = 0; +volatile int profile_all_tasks = 0; +static const uint64_t GIGA = 1000000000ULL; // Timers to take samples at intervals JL_DLLEXPORT void jl_profile_stop_timer(void); -JL_DLLEXPORT int jl_profile_start_timer(void); +JL_DLLEXPORT int jl_profile_start_timer(uint8_t); /////////////////////// // Utility functions // /////////////////////// JL_DLLEXPORT int jl_profile_init(size_t maxsize, uint64_t delay_nsec) { - bt_size_max = maxsize; + profile_bt_size_max = maxsize; nsecprof = delay_nsec; - if (bt_data_prof != NULL) - free((void*)bt_data_prof); - bt_data_prof = (jl_bt_element_t*) calloc(maxsize, sizeof(jl_bt_element_t)); - if (bt_data_prof == NULL && maxsize > 0) + if (profile_bt_data_prof != NULL) + free((void*)profile_bt_data_prof); + profile_bt_data_prof = (jl_bt_element_t*) calloc(maxsize, sizeof(jl_bt_element_t)); + if (profile_bt_data_prof == NULL && maxsize > 0) return -1; - bt_size_cur = 0; + profile_bt_size_cur = 0; return 0; } JL_DLLEXPORT uint8_t *jl_profile_get_data(void) { - return (uint8_t*) bt_data_prof; + return (uint8_t*) profile_bt_data_prof; } JL_DLLEXPORT size_t jl_profile_len_data(void) { - return bt_size_cur; + return profile_bt_size_cur; } JL_DLLEXPORT size_t jl_profile_maxlen_data(void) { - return bt_size_max; + return profile_bt_size_max; } JL_DLLEXPORT uint64_t jl_profile_delay_nsec(void) @@ -67,12 +69,12 @@ JL_DLLEXPORT uint64_t jl_profile_delay_nsec(void) JL_DLLEXPORT void jl_profile_clear_data(void) { - bt_size_cur = 0; + profile_bt_size_cur = 0; } JL_DLLEXPORT int jl_profile_is_running(void) { - return running; + return profile_running; } // Any function that acquires this lock must be either a unmanaged thread @@ -184,7 +186,102 @@ JL_DLLEXPORT int jl_profile_is_buffer_full(void) // Declare buffer full if there isn't enough room to sample even just the // thread metadata and one max-sized frame. The `+ 6` is for the two block // terminator `0`'s plus the 4 metadata entries. - return bt_size_cur + ((JL_BT_MAX_ENTRY_SIZE + 1) + 6) > bt_size_max; + return profile_bt_size_cur + ((JL_BT_MAX_ENTRY_SIZE + 1) + 6) > profile_bt_size_max; +} + +NOINLINE int failed_to_sample_task_fun(jl_bt_element_t *bt_data, size_t maxsize, int skip) JL_NOTSAFEPOINT; +NOINLINE int failed_to_stop_thread_fun(jl_bt_element_t *bt_data, size_t maxsize, int skip) JL_NOTSAFEPOINT; + +#define PROFILE_TASK_DEBUG_FORCE_SAMPLING_FAILURE (0) +#define PROFILE_TASK_DEBUG_FORCE_STOP_THREAD_FAILURE (0) + +void jl_profile_task(void) +{ + if (jl_profile_is_buffer_full()) { + // Buffer full: Delete the timer + jl_profile_stop_timer(); + return; + } + + jl_task_t *t = NULL; + int got_mutex = 0; + if (uv_mutex_trylock(&live_tasks_lock) != 0) { + goto collect_backtrace; + } + got_mutex = 1; + + arraylist_t *tasks = jl_get_all_tasks_arraylist(); + uint64_t seed = jl_rand(); + const int n_max_random_attempts = 4; + // randomly select a task that is not done + for (int i = 0; i < n_max_random_attempts; i++) { + t = (jl_task_t*)tasks->items[cong(tasks->len, &seed)]; + assert(t == NULL || jl_is_task(t)); + if (t == NULL) { + continue; + } + int t_state = jl_atomic_load_relaxed(&t->_state); + if (t_state == JL_TASK_STATE_DONE) { + continue; + } + break; + } + arraylist_free(tasks); + free(tasks); + +collect_backtrace: + + uv_mutex_lock(&bt_data_prof_lock); + if (profile_running == 0) { + uv_mutex_unlock(&bt_data_prof_lock); + if (got_mutex) { + uv_mutex_unlock(&live_tasks_lock); + } + return; + } + + jl_record_backtrace_result_t r = {0, INT16_MAX}; + jl_bt_element_t *bt_data_prof = (jl_bt_element_t*)(profile_bt_data_prof + profile_bt_size_cur); + size_t bt_size_max = profile_bt_size_max - profile_bt_size_cur - 1; + if (t == NULL || PROFILE_TASK_DEBUG_FORCE_SAMPLING_FAILURE) { + // failed to find a task + r.bt_size = failed_to_sample_task_fun(bt_data_prof, bt_size_max, 0); + } + else { + if (!PROFILE_TASK_DEBUG_FORCE_STOP_THREAD_FAILURE) { + r = jl_record_backtrace(t, bt_data_prof, bt_size_max, 1); + } + // we failed to get a backtrace + if (r.bt_size == 0) { + r.bt_size = failed_to_stop_thread_fun(bt_data_prof, bt_size_max, 0); + } + } + + // update the profile buffer size + profile_bt_size_cur += r.bt_size; + + // store threadid but add 1 as 0 is preserved to indicate end of block + profile_bt_data_prof[profile_bt_size_cur++].uintptr = (uintptr_t)r.tid + 1; + + // store task id (never null) + profile_bt_data_prof[profile_bt_size_cur++].jlvalue = (jl_value_t*)t; + + // store cpu cycle clock + profile_bt_data_prof[profile_bt_size_cur++].uintptr = cycleclock(); + + // the thread profiler uses this block to record whether the thread is not sleeping (1) or sleeping (2) + // let's use a dummy value which is not 1 or 2 to + // indicate that we are profiling a task, and therefore, this block is not about the thread state + profile_bt_data_prof[profile_bt_size_cur++].uintptr = 3; + + // Mark the end of this block with two 0's + profile_bt_data_prof[profile_bt_size_cur++].uintptr = 0; + profile_bt_data_prof[profile_bt_size_cur++].uintptr = 0; + + uv_mutex_unlock(&bt_data_prof_lock); + if (got_mutex) { + uv_mutex_unlock(&live_tasks_lock); + } } static uint64_t jl_last_sigint_trigger = 0; diff --git a/src/signals-mach.c b/src/signals-mach.c index a939e4df71ae0..24508a8902d5e 100644 --- a/src/signals-mach.c +++ b/src/signals-mach.c @@ -724,6 +724,84 @@ void jl_unlock_stackwalk(int lockret) jl_unlock_profile_mach(1, lockret); } +// assumes holding `jl_lock_profile_mach` +void jl_profile_thread_mach(int tid) +{ + // if there is no space left, return early + if (jl_profile_is_buffer_full()) { + jl_profile_stop_timer(); + return; + } + if (_dyld_dlopen_atfork_prepare != NULL && _dyld_dlopen_atfork_parent != NULL) + _dyld_dlopen_atfork_prepare(); + if (_dyld_atfork_prepare != NULL && _dyld_atfork_parent != NULL) + _dyld_atfork_prepare(); // briefly acquire the dlsym lock + host_thread_state_t state; + int valid_thread = jl_thread_suspend_and_get_state2(tid, &state); + unw_context_t *uc = (unw_context_t*)&state; + if (_dyld_atfork_prepare != NULL && _dyld_atfork_parent != NULL) + _dyld_atfork_parent(); // quickly release the dlsym lock + if (_dyld_dlopen_atfork_prepare != NULL && _dyld_dlopen_atfork_parent != NULL) + _dyld_dlopen_atfork_parent(); + if (!valid_thread) + return; + if (profile_running) { +#ifdef LLVMLIBUNWIND + /* + * Unfortunately compact unwind info is incorrectly generated for quite a number of + * libraries by quite a large number of compilers. We can fall back to DWARF unwind info + * in some cases, but in quite a number of cases (especially libraries not compiled in debug + * mode, only the compact unwind info may be available). Even more unfortunately, there is no + * way to detect such bogus compact unwind info (other than noticing the resulting segfault). + * What we do here is ugly, but necessary until the compact unwind info situation improves. + * We try to use the compact unwind info and if that results in a segfault, we retry with DWARF info. + * Note that in a small number of cases this may result in bogus stack traces, but at least the topmost + * entry will always be correct, and the number of cases in which this is an issue is rather small. + * Other than that, this implementation is not incorrect as the other thread is paused while we are profiling + * and during stack unwinding we only ever read memory, but never write it. + */ + + forceDwarf = 0; + unw_getcontext(&profiler_uc); // will resume from this point if the next lines segfault at any point + + if (forceDwarf == 0) { + // Save the backtrace + profile_bt_size_cur += rec_backtrace_ctx((jl_bt_element_t*)profile_bt_data_prof + profile_bt_size_cur, profile_bt_size_max - profile_bt_size_cur - 1, uc, NULL); + } + else if (forceDwarf == 1) { + profile_bt_size_cur += rec_backtrace_ctx_dwarf((jl_bt_element_t*)profile_bt_data_prof + profile_bt_size_cur, profile_bt_size_max - profile_bt_size_cur - 1, uc, NULL); + } + else if (forceDwarf == -1) { + jl_safe_printf("WARNING: profiler attempt to access an invalid memory location\n"); + } + + forceDwarf = -2; +#else + profile_bt_size_cur += rec_backtrace_ctx((jl_bt_element_t*)profile_bt_data_prof + profile_bt_size_cur, profile_bt_size_max - profile_bt_size_cur - 1, uc, NULL); +#endif + jl_ptls_t ptls = jl_atomic_load_relaxed(&jl_all_tls_states)[tid]; + + // store threadid but add 1 as 0 is preserved to indicate end of block + profile_bt_data_prof[profile_bt_size_cur++].uintptr = ptls->tid + 1; + + // store task id (never null) + profile_bt_data_prof[profile_bt_size_cur++].jlvalue = (jl_value_t*)jl_atomic_load_relaxed(&ptls->current_task); + + // store cpu cycle clock + profile_bt_data_prof[profile_bt_size_cur++].uintptr = cycleclock(); + + // store whether thread is sleeping (don't ever encode a state as `0` since is preserved to indicate end of block) + int state = jl_atomic_load_relaxed(&ptls->sleep_check_state) == 0 ? PROFILE_STATE_THREAD_NOT_SLEEPING : PROFILE_STATE_THREAD_SLEEPING; + profile_bt_data_prof[profile_bt_size_cur++].uintptr = state; + + // Mark the end of this block with two 0's + profile_bt_data_prof[profile_bt_size_cur++].uintptr = 0; + profile_bt_data_prof[profile_bt_size_cur++].uintptr = 0; + } + // We're done! Resume the thread. + jl_thread_resume(tid); +} + void *mach_profile_listener(void *arg) { (void)arg; @@ -741,88 +819,21 @@ void *mach_profile_listener(void *arg) // sample each thread, round-robin style in reverse order // (so that thread zero gets notified last) int keymgr_locked = jl_lock_profile_mach(0); - int nthreads = jl_atomic_load_acquire(&jl_n_threads); - int *randperm = profile_get_randperm(nthreads); - for (int idx = nthreads; idx-- > 0; ) { - // Stop the threads in the random or reverse round-robin order. - int i = randperm[idx]; - // if there is no space left, break early - if (jl_profile_is_buffer_full()) { - jl_profile_stop_timer(); - break; - } - - if (_dyld_dlopen_atfork_prepare != NULL && _dyld_dlopen_atfork_parent != NULL) - _dyld_dlopen_atfork_prepare(); - if (_dyld_atfork_prepare != NULL && _dyld_atfork_parent != NULL) - _dyld_atfork_prepare(); // briefly acquire the dlsym lock - host_thread_state_t state; - int valid_thread = jl_thread_suspend_and_get_state2(i, &state); - unw_context_t *uc = (unw_context_t*)&state; - if (_dyld_atfork_prepare != NULL && _dyld_atfork_parent != NULL) - _dyld_atfork_parent(); // quickly release the dlsym lock - if (_dyld_dlopen_atfork_prepare != NULL && _dyld_dlopen_atfork_parent != NULL) - _dyld_dlopen_atfork_parent(); - if (!valid_thread) - continue; - if (running) { -#ifdef LLVMLIBUNWIND - /* - * Unfortunately compact unwind info is incorrectly generated for quite a number of - * libraries by quite a large number of compilers. We can fall back to DWARF unwind info - * in some cases, but in quite a number of cases (especially libraries not compiled in debug - * mode, only the compact unwind info may be available). Even more unfortunately, there is no - * way to detect such bogus compact unwind info (other than noticing the resulting segfault). - * What we do here is ugly, but necessary until the compact unwind info situation improves. - * We try to use the compact unwind info and if that results in a segfault, we retry with DWARF info. - * Note that in a small number of cases this may result in bogus stack traces, but at least the topmost - * entry will always be correct, and the number of cases in which this is an issue is rather small. - * Other than that, this implementation is not incorrect as the other thread is paused while we are profiling - * and during stack unwinding we only ever read memory, but never write it. - */ - - forceDwarf = 0; - unw_getcontext(&profiler_uc); // will resume from this point if the next lines segfault at any point - - if (forceDwarf == 0) { - // Save the backtrace - bt_size_cur += rec_backtrace_ctx((jl_bt_element_t*)bt_data_prof + bt_size_cur, bt_size_max - bt_size_cur - 1, uc, NULL); - } - else if (forceDwarf == 1) { - bt_size_cur += rec_backtrace_ctx_dwarf((jl_bt_element_t*)bt_data_prof + bt_size_cur, bt_size_max - bt_size_cur - 1, uc, NULL); - } - else if (forceDwarf == -1) { - jl_safe_printf("WARNING: profiler attempt to access an invalid memory location\n"); - } - - forceDwarf = -2; -#else - bt_size_cur += rec_backtrace_ctx((jl_bt_element_t*)bt_data_prof + bt_size_cur, bt_size_max - bt_size_cur - 1, uc, NULL); -#endif - jl_ptls_t ptls = jl_atomic_load_relaxed(&jl_all_tls_states)[i]; - - // META_OFFSET_THREADID store threadid but add 1 as 0 is preserved to indicate end of block - bt_data_prof[bt_size_cur++].uintptr = ptls->tid + 1; - - // META_OFFSET_TASKID store task id (never null) - bt_data_prof[bt_size_cur++].jlvalue = (jl_value_t*)jl_atomic_load_relaxed(&ptls->current_task); - - // META_OFFSET_CPUCYCLECLOCK store cpu cycle clock - bt_data_prof[bt_size_cur++].uintptr = cycleclock(); - - // META_OFFSET_SLEEPSTATE store whether thread is sleeping but add 1 as 0 is preserved to indicate end of block - bt_data_prof[bt_size_cur++].uintptr = jl_atomic_load_relaxed(&ptls->sleep_check_state) + 1; - - // Mark the end of this block with two 0's - bt_data_prof[bt_size_cur++].uintptr = 0; - bt_data_prof[bt_size_cur++].uintptr = 0; + if (profile_all_tasks) { + // Don't take the stackwalk lock here since it's already taken in `jl_rec_backtrace` + jl_profile_task(); + } + else { + int *randperm = profile_get_randperm(nthreads); + for (int idx = nthreads; idx-- > 0; ) { + // Stop the threads in random order. + int i = randperm[idx]; + jl_profile_thread_mach(i); } - // We're done! Resume the thread. - jl_thread_resume(i); } jl_unlock_profile_mach(0, keymgr_locked); - if (running) { + if (profile_running) { jl_check_profile_autostop(); // Reset the alarm kern_return_t ret = clock_alarm(clk, TIME_RELATIVE, timerprof, profile_port); @@ -831,7 +842,8 @@ void *mach_profile_listener(void *arg) } } -JL_DLLEXPORT int jl_profile_start_timer(void) + +JL_DLLEXPORT int jl_profile_start_timer(uint8_t all_tasks) { kern_return_t ret; if (!profile_started) { @@ -860,7 +872,8 @@ JL_DLLEXPORT int jl_profile_start_timer(void) timerprof.tv_sec = nsecprof/GIGA; timerprof.tv_nsec = nsecprof%GIGA; - running = 1; + profile_running = 1; + profile_all_tasks = all_tasks; // ensure the alarm is running ret = clock_alarm(clk, TIME_RELATIVE, timerprof, profile_port); HANDLE_MACH_ERROR("clock_alarm", ret); @@ -870,5 +883,8 @@ JL_DLLEXPORT int jl_profile_start_timer(void) JL_DLLEXPORT void jl_profile_stop_timer(void) { - running = 0; + uv_mutex_lock(&bt_data_prof_lock); + profile_running = 0; + profile_all_tasks = 0; + uv_mutex_unlock(&bt_data_prof_lock); } diff --git a/src/signals-unix.c b/src/signals-unix.c index caf0e977929c5..301b875018c1c 100644 --- a/src/signals-unix.c +++ b/src/signals-unix.c @@ -9,6 +9,10 @@ #include #include #include + +#include "julia.h" +#include "julia_internal.h" + #if defined(_OS_DARWIN_) && !defined(MAP_ANONYMOUS) #define MAP_ANONYMOUS MAP_ANON #endif @@ -646,7 +650,7 @@ int timer_graceperiod_elapsed(void) static timer_t timerprof; static struct itimerspec itsprof; -JL_DLLEXPORT int jl_profile_start_timer(void) +JL_DLLEXPORT int jl_profile_start_timer(uint8_t all_tasks) { struct sigevent sigprof; @@ -655,10 +659,12 @@ JL_DLLEXPORT int jl_profile_start_timer(void) sigprof.sigev_notify = SIGEV_SIGNAL; sigprof.sigev_signo = SIGUSR1; sigprof.sigev_value.sival_ptr = &timerprof; - // Because SIGUSR1 is multipurpose, set `running` before so that we know that the first SIGUSR1 came from the timer - running = 1; + // Because SIGUSR1 is multipurpose, set `profile_running` before so that we know that the first SIGUSR1 came from the timer + profile_running = 1; + profile_all_tasks = all_tasks; if (timer_create(CLOCK_REALTIME, &sigprof, &timerprof) == -1) { - running = 0; + profile_running = 0; + profile_all_tasks = 0; return -2; } @@ -668,7 +674,8 @@ JL_DLLEXPORT int jl_profile_start_timer(void) itsprof.it_value.tv_sec = nsecprof / GIGA; itsprof.it_value.tv_nsec = nsecprof % GIGA; if (timer_settime(timerprof, 0, &itsprof, NULL) == -1) { - running = 0; + profile_running = 0; + profile_all_tasks = 0; return -3; } return 0; @@ -676,11 +683,13 @@ JL_DLLEXPORT int jl_profile_start_timer(void) JL_DLLEXPORT void jl_profile_stop_timer(void) { - if (running) { + uv_mutex_lock(&bt_data_prof_lock); + if (profile_running) { timer_delete(timerprof); last_timer_delete_time = jl_hrtime(); - running = 0; + profile_running = 0; } + uv_mutex_unlock(&bt_data_prof_lock); } #elif defined(__OpenBSD__) @@ -797,7 +806,7 @@ void trigger_profile_peek(void) jl_safe_printf("\n======================================================================================\n"); jl_safe_printf("Information request received. A stacktrace will print followed by a %.1f second profile\n", profile_peek_duration); jl_safe_printf("======================================================================================\n"); - if (bt_size_max == 0){ + if (profile_bt_size_max == 0){ // If the buffer hasn't been initialized, initialize with default size // Keep these values synchronized with Profile.default_init() if (jl_profile_init(10000000, 1000000) == -1) { @@ -805,13 +814,62 @@ void trigger_profile_peek(void) return; } } - bt_size_cur = 0; // clear profile buffer - if (jl_profile_start_timer() < 0) + profile_bt_size_cur = 0; // clear profile buffer + if (jl_profile_start_timer(0) < 0) jl_safe_printf("ERROR: Could not start profile timer\n"); else profile_autostop_time = jl_hrtime() + (profile_peek_duration * 1e9); } +// assumes holding `jl_lock_stackwalk` +void jl_profile_thread_unix(int tid, bt_context_t *signal_context) +{ + if (jl_profile_is_buffer_full()) { + // Buffer full: Delete the timer + jl_profile_stop_timer(); + return; + } + // notify thread to stop + if (!jl_thread_suspend_and_get_state(tid, 1, signal_context)) + return; + // unwinding can fail, so keep track of the current state + // and restore from the SEGV handler if anything happens. + jl_jmp_buf *old_buf = jl_get_safe_restore(); + jl_jmp_buf buf; + + jl_set_safe_restore(&buf); + if (jl_setjmp(buf, 0)) { + jl_safe_printf("WARNING: profiler attempt to access an invalid memory location\n"); + } else { + // Get backtrace data + profile_bt_size_cur += rec_backtrace_ctx((jl_bt_element_t*)profile_bt_data_prof + profile_bt_size_cur, + profile_bt_size_max - profile_bt_size_cur - 1, signal_context, NULL); + } + jl_set_safe_restore(old_buf); + + jl_ptls_t ptls2 = jl_atomic_load_relaxed(&jl_all_tls_states)[tid]; + + // store threadid but add 1 as 0 is preserved to indicate end of block + profile_bt_data_prof[profile_bt_size_cur++].uintptr = ptls2->tid + 1; + + // store task id (never null) + profile_bt_data_prof[profile_bt_size_cur++].jlvalue = (jl_value_t*)jl_atomic_load_relaxed(&ptls2->current_task); + + // store cpu cycle clock + profile_bt_data_prof[profile_bt_size_cur++].uintptr = cycleclock(); + + // store whether thread is sleeping (don't ever encode a state as `0` since is preserved to indicate end of block) + int state = jl_atomic_load_relaxed(&ptls2->sleep_check_state) == 0 ? PROFILE_STATE_THREAD_NOT_SLEEPING : PROFILE_STATE_THREAD_SLEEPING; + profile_bt_data_prof[profile_bt_size_cur++].uintptr = state; + + // Mark the end of this block with two 0's + profile_bt_data_prof[profile_bt_size_cur++].uintptr = 0; + profile_bt_data_prof[profile_bt_size_cur++].uintptr = 0; + + // notify thread to resume + jl_thread_resume(tid); +} + static void *signal_listener(void *arg) { static jl_bt_element_t bt_data[JL_MAX_BT_SIZE + 1]; @@ -911,13 +969,13 @@ static void *signal_listener(void *arg) int doexit = critical; #ifdef SIGINFO if (sig == SIGINFO) { - if (running != 1) + if (profile_running != 1) trigger_profile_peek(); doexit = 0; } #else if (sig == SIGUSR1) { - if (running != 1 && timer_graceperiod_elapsed()) + if (profile_running != 1 && timer_graceperiod_elapsed()) trigger_profile_peek(); doexit = 0; } @@ -951,78 +1009,46 @@ static void *signal_listener(void *arg) bt_size = 0; #if !defined(JL_DISABLE_LIBUNWIND) bt_context_t signal_context; - // sample each thread, round-robin style in reverse order - // (so that thread zero gets notified last) - if (critical || profile) { + if (critical) { int lockret = jl_lock_stackwalk(); - int *randperm; - if (profile) - randperm = profile_get_randperm(nthreads); - for (int idx = nthreads; idx-- > 0; ) { - // Stop the threads in the random or reverse round-robin order. - int i = profile ? randperm[idx] : idx; + // sample each thread, round-robin style in reverse order + // (so that thread zero gets notified last) + for (int i = nthreads; i-- > 0; ) { // notify thread to stop if (!jl_thread_suspend_and_get_state(i, 1, &signal_context)) continue; // do backtrace on thread contexts for critical signals // this part must be signal-handler safe - if (critical) { - bt_size += rec_backtrace_ctx(bt_data + bt_size, - JL_MAX_BT_SIZE / nthreads - 1, - &signal_context, NULL); - bt_data[bt_size++].uintptr = 0; - } - - // do backtrace for profiler - if (profile && running) { - if (jl_profile_is_buffer_full()) { - // Buffer full: Delete the timer - jl_profile_stop_timer(); - } - else { - // unwinding can fail, so keep track of the current state - // and restore from the SEGV handler if anything happens. - jl_jmp_buf *old_buf = jl_get_safe_restore(); - jl_jmp_buf buf; - - jl_set_safe_restore(&buf); - if (jl_setjmp(buf, 0)) { - jl_safe_printf("WARNING: profiler attempt to access an invalid memory location\n"); - } else { - // Get backtrace data - bt_size_cur += rec_backtrace_ctx((jl_bt_element_t*)bt_data_prof + bt_size_cur, - bt_size_max - bt_size_cur - 1, &signal_context, NULL); - } - jl_set_safe_restore(old_buf); - - jl_ptls_t ptls2 = jl_atomic_load_relaxed(&jl_all_tls_states)[i]; - - // META_OFFSET_THREADID store threadid but add 1 as 0 is preserved to indicate end of block - bt_data_prof[bt_size_cur++].uintptr = ptls2->tid + 1; - - // META_OFFSET_TASKID store task id (never null) - bt_data_prof[bt_size_cur++].jlvalue = (jl_value_t*)jl_atomic_load_relaxed(&ptls2->current_task); - - // META_OFFSET_CPUCYCLECLOCK store cpu cycle clock - bt_data_prof[bt_size_cur++].uintptr = cycleclock(); - - // META_OFFSET_SLEEPSTATE store whether thread is sleeping but add 1 as 0 is preserved to indicate end of block - bt_data_prof[bt_size_cur++].uintptr = jl_atomic_load_relaxed(&ptls2->sleep_check_state) + 1; - - // Mark the end of this block with two 0's - bt_data_prof[bt_size_cur++].uintptr = 0; - bt_data_prof[bt_size_cur++].uintptr = 0; - } - } - - // notify thread to resume + bt_size += rec_backtrace_ctx(bt_data + bt_size, + JL_MAX_BT_SIZE / nthreads - 1, + &signal_context, NULL); + bt_data[bt_size++].uintptr = 0; jl_thread_resume(i); } jl_unlock_stackwalk(lockret); } + else if (profile) { + if (profile_all_tasks) { + // Don't take the stackwalk lock here since it's already taken in `jl_rec_backtrace` + jl_profile_task(); + } + else { + int lockret = jl_lock_stackwalk(); + int *randperm = profile_get_randperm(nthreads); + for (int idx = nthreads; idx-- > 0; ) { + // Stop the threads in the random order. + int i = randperm[idx]; + // do backtrace for profiler + if (profile_running) { + jl_profile_thread_unix(i, &signal_context); + } + } + jl_unlock_stackwalk(lockret); + } + } #ifndef HAVE_MACH - if (profile && running) { + if (profile_running) { jl_check_profile_autostop(); #if defined(HAVE_TIMER) timer_settime(timerprof, 0, &itsprof, NULL); diff --git a/src/signals-win.c b/src/signals-win.c index b5f8dd8bd79d9..2a594bc92b9b7 100644 --- a/src/signals-win.c +++ b/src/signals-win.c @@ -404,12 +404,16 @@ static DWORD WINAPI profile_bt( LPVOID lparam ) while (1) { DWORD timeout_ms = nsecprof / (GIGA / 1000); Sleep(timeout_ms > 0 ? timeout_ms : 1); - if (running) { + if (profile_running) { if (jl_profile_is_buffer_full()) { jl_profile_stop_timer(); // does not change the thread state SuspendThread(GetCurrentThread()); continue; } + else if (profile_all_tasks) { + // Don't take the stackwalk lock here since it's already taken in `jl_rec_backtrace` + jl_profile_task(); + } else { // TODO: bring this up to parity with other OS by adding loop over tid here int lockret = jl_lock_stackwalk(); @@ -421,26 +425,27 @@ static DWORD WINAPI profile_bt( LPVOID lparam ) break; } // Get backtrace data - bt_size_cur += rec_backtrace_ctx((jl_bt_element_t*)bt_data_prof + bt_size_cur, - bt_size_max - bt_size_cur - 1, &ctxThread, NULL); + profile_bt_size_cur += rec_backtrace_ctx((jl_bt_element_t*)profile_bt_data_prof + profile_bt_size_cur, + profile_bt_size_max - profile_bt_size_cur - 1, &ctxThread, NULL); jl_ptls_t ptls = jl_atomic_load_relaxed(&jl_all_tls_states)[0]; // given only profiling hMainThread // META_OFFSET_THREADID store threadid but add 1 as 0 is preserved to indicate end of block - bt_data_prof[bt_size_cur++].uintptr = ptls->tid + 1; + profile_bt_data_prof[profile_bt_size_cur++].uintptr = ptls->tid + 1; // META_OFFSET_TASKID store task id (never null) - bt_data_prof[bt_size_cur++].jlvalue = (jl_value_t*)jl_atomic_load_relaxed(&ptls->current_task); + profile_bt_data_prof[profile_bt_size_cur++].jlvalue = (jl_value_t*)jl_atomic_load_relaxed(&ptls->current_task); // META_OFFSET_CPUCYCLECLOCK store cpu cycle clock - bt_data_prof[bt_size_cur++].uintptr = cycleclock(); + profile_bt_data_prof[profile_bt_size_cur++].uintptr = cycleclock(); - // META_OFFSET_SLEEPSTATE store whether thread is sleeping but add 1 as 0 is preserved to indicate end of block - bt_data_prof[bt_size_cur++].uintptr = jl_atomic_load_relaxed(&ptls->sleep_check_state) + 1; + // store whether thread is sleeping (don't ever encode a state as `0` since is preserved to indicate end of block) + int state = jl_atomic_load_relaxed(&ptls->sleep_check_state) == 0 ? PROFILE_STATE_THREAD_NOT_SLEEPING : PROFILE_STATE_THREAD_SLEEPING; + profile_bt_data_prof[profile_bt_size_cur++].uintptr = state; // Mark the end of this block with two 0's - bt_data_prof[bt_size_cur++].uintptr = 0; - bt_data_prof[bt_size_cur++].uintptr = 0; + profile_bt_data_prof[profile_bt_size_cur++].uintptr = 0; + profile_bt_data_prof[profile_bt_size_cur++].uintptr = 0; jl_unlock_stackwalk(lockret); jl_thread_resume(0); jl_check_profile_autostop(); @@ -455,7 +460,7 @@ static DWORD WINAPI profile_bt( LPVOID lparam ) static volatile TIMECAPS timecaps; -JL_DLLEXPORT int jl_profile_start_timer(void) +JL_DLLEXPORT int jl_profile_start_timer(uint8_t all_tasks) { if (hBtThread == NULL) { @@ -483,20 +488,24 @@ JL_DLLEXPORT int jl_profile_start_timer(void) return -2; } } - if (running == 0) { + if (profile_running == 0) { // Failure to change the timer resolution is not fatal. However, it is important to // ensure that the timeBeginPeriod/timeEndPeriod is paired. if (TIMERR_NOERROR != timeBeginPeriod(timecaps.wPeriodMin)) timecaps.wPeriodMin = 0; } - running = 1; // set `running` finally + profile_all_tasks = all_tasks; + profile_running = 1; // set `profile_running` finally return 0; } JL_DLLEXPORT void jl_profile_stop_timer(void) { - if (running && timecaps.wPeriodMin) + uv_mutex_lock(&bt_data_prof_lock); + if (profile_running && timecaps.wPeriodMin) timeEndPeriod(timecaps.wPeriodMin); - running = 0; + profile_running = 0; + profile_all_tasks = 0; + uv_mutex_unlock(&bt_data_prof_lock); } void jl_install_default_signal_handlers(void) diff --git a/src/stackwalk.c b/src/stackwalk.c index 770daa8bf17a6..251e408c7fd2d 100644 --- a/src/stackwalk.c +++ b/src/stackwalk.c @@ -208,7 +208,7 @@ NOINLINE size_t rec_backtrace_ctx(jl_bt_element_t *bt_data, size_t maxsize, // // The first `skip` frames are omitted, in addition to omitting the frame from // `rec_backtrace` itself. -NOINLINE size_t rec_backtrace(jl_bt_element_t *bt_data, size_t maxsize, int skip) +NOINLINE size_t rec_backtrace(jl_bt_element_t *bt_data, size_t maxsize, int skip) JL_NOTSAFEPOINT { bt_context_t context; memset(&context, 0, sizeof(context)); @@ -224,6 +224,24 @@ NOINLINE size_t rec_backtrace(jl_bt_element_t *bt_data, size_t maxsize, int skip return bt_size; } +NOINLINE int failed_to_sample_task_fun(jl_bt_element_t *bt_data, size_t maxsize, int skip) JL_NOTSAFEPOINT +{ + if (maxsize < 1) { + return 0; + } + bt_data[0].uintptr = (uintptr_t) &failed_to_sample_task_fun; + return 1; +} + +NOINLINE int failed_to_stop_thread_fun(jl_bt_element_t *bt_data, size_t maxsize, int skip) JL_NOTSAFEPOINT +{ + if (maxsize < 1) { + return 0; + } + bt_data[0].uintptr = (uintptr_t) &failed_to_stop_thread_fun; + return 1; +} + static jl_value_t *array_ptr_void_type JL_ALWAYS_LEAFTYPE = NULL; // Return backtrace information as an svec of (bt1, bt2, [sp]) // @@ -1225,24 +1243,34 @@ return 0; #endif } -JL_DLLEXPORT size_t jl_record_backtrace(jl_task_t *t, jl_bt_element_t *bt_data, size_t max_bt_size) JL_NOTSAFEPOINT +JL_DLLEXPORT jl_record_backtrace_result_t jl_record_backtrace(jl_task_t *t, jl_bt_element_t *bt_data, size_t max_bt_size, int all_tasks_profiler) JL_NOTSAFEPOINT { - jl_task_t *ct = jl_current_task; - jl_ptls_t ptls = ct->ptls; + int16_t tid = INT16_MAX; + jl_record_backtrace_result_t result = {0, tid}; + jl_task_t *ct = NULL; + jl_ptls_t ptls = NULL; + if (!all_tasks_profiler) { + ct = jl_current_task; + ptls = ct->ptls; + ptls->bt_size = 0; + tid = ptls->tid; + } if (t == ct) { - return rec_backtrace(bt_data, max_bt_size, 0); + result.bt_size = rec_backtrace(bt_data, max_bt_size, 0); + result.tid = tid; + return result; } bt_context_t *context = NULL; bt_context_t c; int16_t old; - for (old = -1; !jl_atomic_cmpswap(&t->tid, &old, ptls->tid) && old != ptls->tid; old = -1) { + for (old = -1; !jl_atomic_cmpswap(&t->tid, &old, tid) && old != tid; old = -1) { int lockret = jl_lock_stackwalk(); // if this task is already running somewhere, we need to stop the thread it is running on and query its state if (!jl_thread_suspend_and_get_state(old, 1, &c)) { jl_unlock_stackwalk(lockret); if (jl_atomic_load_relaxed(&t->tid) != old) continue; - return 0; + return result; } jl_unlock_stackwalk(lockret); if (jl_atomic_load_relaxed(&t->tid) == old) { @@ -1277,13 +1305,16 @@ JL_DLLEXPORT size_t jl_record_backtrace(jl_task_t *t, jl_bt_element_t *bt_data, #endif } size_t bt_size = 0; - if (context) - bt_size = rec_backtrace_ctx(bt_data, max_bt_size, context, t->gcstack); + if (context) { + bt_size = rec_backtrace_ctx(bt_data, max_bt_size, context, all_tasks_profiler ? NULL : t->gcstack); + } if (old == -1) jl_atomic_store_relaxed(&t->tid, old); - else if (old != ptls->tid) + else if (old != tid) jl_thread_resume(old); - return bt_size; + result.bt_size = bt_size; + result.tid = old; + return result; } //-------------------------------------------------- @@ -1317,7 +1348,8 @@ JL_DLLEXPORT void jlbacktracet(jl_task_t *t) JL_NOTSAFEPOINT jl_ptls_t ptls = ct->ptls; ptls->bt_size = 0; jl_bt_element_t *bt_data = ptls->bt_data; - size_t bt_size = jl_record_backtrace(t, bt_data, JL_MAX_BT_SIZE); + jl_record_backtrace_result_t r = jl_record_backtrace(t, bt_data, JL_MAX_BT_SIZE, 0); + size_t bt_size = r.bt_size; size_t i; for (i = 0; i < bt_size; i += jl_bt_entry_size(bt_data + i)) { jl_print_bt_entry_codeloc(bt_data + i); @@ -1331,8 +1363,6 @@ JL_DLLEXPORT void jl_print_backtrace(void) JL_NOTSAFEPOINT jlbacktrace(); } -extern int gc_first_tid; - // Print backtraces for all live tasks, for all threads, to jl_safe_printf stderr JL_DLLEXPORT void jl_print_task_backtraces(int show_done) JL_NOTSAFEPOINT { diff --git a/stdlib/Profile/src/Profile.jl b/stdlib/Profile/src/Profile.jl index b753c9ca88f24..bea8f288937d0 100644 --- a/stdlib/Profile/src/Profile.jl +++ b/stdlib/Profile/src/Profile.jl @@ -23,7 +23,7 @@ Profiling support. module Profile global print -export @profile +export @profile, @profile_walltime public clear, print, fetch, @@ -63,6 +63,28 @@ macro profile(ex) end end +""" + @profile_walltime + +`@profile_walltime ` runs your expression while taking periodic backtraces of a sample of all live tasks (both running and not running). +These are appended to an internal buffer of backtraces. + +It can be configured via `Profile.init`, same as the `Profile.@profile`, and that you can't use `@profile` simultaneously with `@profile_walltime`. + +As mentioned above, since this tool sample not only running tasks, but also sleeping tasks and tasks performing IO, +it can be used to diagnose performance issues such as lock contention, IO bottlenecks, and other issues that are not visible in the CPU profile. +""" +macro profile_walltime(ex) + return quote + try + start_timer(true) + $(esc(ex)) + finally + stop_timer() + end + end +end + # An internal function called to show the report after an information request (SIGINFO or SIGUSR1). function _peek_report() iob = Base.AnnotatedIOBuffer() @@ -403,9 +425,10 @@ end function has_meta(data) for i in 6:length(data) - data[i] == 0 || continue # first block end null - data[i - 1] == 0 || continue # second block end null - data[i - META_OFFSET_SLEEPSTATE] in 1:2 || continue + data[i] == 0 || continue # first block end null + data[i - 1] == 0 || continue # second block end null + data[i - META_OFFSET_SLEEPSTATE] in 1:3 || continue # 1 for not sleeping, 2 for sleeping, 3 for task profiler fake state + # See definition in `src/julia_internal.h` data[i - META_OFFSET_CPUCYCLECLOCK] != 0 || continue data[i - META_OFFSET_TASKID] != 0 || continue data[i - META_OFFSET_THREADID] != 0 || continue @@ -608,9 +631,9 @@ Julia, and examine the resulting `*.mem` files. clear_malloc_data() = ccall(:jl_clear_malloc_data, Cvoid, ()) # C wrappers -function start_timer() +function start_timer(all_tasks::Bool=false) check_init() # if the profile buffer hasn't been initialized, initialize with default size - status = ccall(:jl_profile_start_timer, Cint, ()) + status = ccall(:jl_profile_start_timer, Cint, (Bool,), all_tasks) if status < 0 error(error_codes[status]) end @@ -722,12 +745,16 @@ function parse_flat(::Type{T}, data::Vector{UInt64}, lidict::Union{LineInfoDict, startframe = length(data) skip = false nsleeping = 0 + is_task_profile = false for i in startframe:-1:1 (startframe - 1) >= i >= (startframe - (nmeta + 1)) && continue # skip metadata (its read ahead below) and extra block end NULL IP ip = data[i] if is_block_end(data, i) # read metadata - thread_sleeping = data[i - META_OFFSET_SLEEPSTATE] - 1 # subtract 1 as state is incremented to avoid being equal to 0 + thread_sleeping_state = data[i - META_OFFSET_SLEEPSTATE] - 1 # subtract 1 as state is incremented to avoid being equal to 0 + if thread_sleeping_state == 2 + is_task_profile = true + end # cpu_cycle_clock = data[i - META_OFFSET_CPUCYCLECLOCK] taskid = data[i - META_OFFSET_TASKID] threadid = data[i - META_OFFSET_THREADID] @@ -735,7 +762,7 @@ function parse_flat(::Type{T}, data::Vector{UInt64}, lidict::Union{LineInfoDict, skip = true continue end - if thread_sleeping == 1 + if thread_sleeping_state == 1 nsleeping += 1 end skip = false @@ -769,14 +796,14 @@ function parse_flat(::Type{T}, data::Vector{UInt64}, lidict::Union{LineInfoDict, end end @assert length(lilist) == length(n) == length(m) == length(lilist_idx) - return (lilist, n, m, totalshots, nsleeping) + return (lilist, n, m, totalshots, nsleeping, is_task_profile) end const FileNameMap = Dict{Symbol,Tuple{String,String,String}} function flat(io::IO, data::Vector{UInt64}, lidict::Union{LineInfoDict, LineInfoFlatDict}, cols::Int, fmt::ProfileFormat, threads::Union{Int,AbstractVector{Int}}, tasks::Union{UInt,AbstractVector{UInt}}, is_subsection::Bool) - lilist, n, m, totalshots, nsleeping = parse_flat(fmt.combine ? StackFrame : UInt64, data, lidict, fmt.C, threads, tasks) + lilist, n, m, totalshots, nsleeping, is_task_profile = parse_flat(fmt.combine ? StackFrame : UInt64, data, lidict, fmt.C, threads, tasks) if false # optional: drop the "non-interpretable" ones keep = map(frame -> frame != UNKNOWN && frame.line != 0, lilist) lilist = lilist[keep] @@ -796,11 +823,15 @@ function flat(io::IO, data::Vector{UInt64}, lidict::Union{LineInfoDict, LineInfo return true end is_subsection || print_flat(io, lilist, n, m, cols, filenamemap, fmt) - Base.print(io, "Total snapshots: ", totalshots, ". Utilization: ", round(Int, util_perc), "%") + if is_task_profile + Base.print(io, "Total snapshots: ", totalshots, "\n") + else + Base.print(io, "Total snapshots: ", totalshots, ". Utilization: ", round(Int, util_perc), "%") + end if is_subsection println(io) print_flat(io, lilist, n, m, cols, filenamemap, fmt) - else + elseif !is_task_profile Base.print(io, " across all threads and tasks. Use the `groupby` kwarg to break down by thread and/or task.\n") end return false @@ -1034,12 +1065,16 @@ function tree!(root::StackFrameTree{T}, all::Vector{UInt64}, lidict::Union{LineI startframe = length(all) skip = false nsleeping = 0 + is_task_profile = false for i in startframe:-1:1 (startframe - 1) >= i >= (startframe - (nmeta + 1)) && continue # skip metadata (it's read ahead below) and extra block end NULL IP ip = all[i] if is_block_end(all, i) # read metadata - thread_sleeping = all[i - META_OFFSET_SLEEPSTATE] - 1 # subtract 1 as state is incremented to avoid being equal to 0 + thread_sleeping_state = all[i - META_OFFSET_SLEEPSTATE] - 1 # subtract 1 as state is incremented to avoid being equal to 0 + if thread_sleeping_state == 2 + is_task_profile = true + end # cpu_cycle_clock = all[i - META_OFFSET_CPUCYCLECLOCK] taskid = all[i - META_OFFSET_TASKID] threadid = all[i - META_OFFSET_THREADID] @@ -1048,7 +1083,7 @@ function tree!(root::StackFrameTree{T}, all::Vector{UInt64}, lidict::Union{LineI skip = true continue end - if thread_sleeping == 1 + if thread_sleeping_state == 1 nsleeping += 1 end skip = false @@ -1154,7 +1189,7 @@ function tree!(root::StackFrameTree{T}, all::Vector{UInt64}, lidict::Union{LineI nothing end cleanup!(root) - return root, nsleeping + return root, nsleeping, is_task_profile end function maxstats(root::StackFrameTree) @@ -1223,9 +1258,9 @@ end function tree(io::IO, data::Vector{UInt64}, lidict::Union{LineInfoFlatDict, LineInfoDict}, cols::Int, fmt::ProfileFormat, threads::Union{Int,AbstractVector{Int}}, tasks::Union{UInt,AbstractVector{UInt}}, is_subsection::Bool) if fmt.combine - root, nsleeping = tree!(StackFrameTree{StackFrame}(), data, lidict, fmt.C, fmt.recur, threads, tasks) + root, nsleeping, is_task_profile = tree!(StackFrameTree{StackFrame}(), data, lidict, fmt.C, fmt.recur, threads, tasks) else - root, nsleeping = tree!(StackFrameTree{UInt64}(), data, lidict, fmt.C, fmt.recur, threads, tasks) + root, nsleeping, is_task_profile = tree!(StackFrameTree{UInt64}(), data, lidict, fmt.C, fmt.recur, threads, tasks) end util_perc = (1 - (nsleeping / root.count)) * 100 is_subsection || print_tree(io, root, cols, fmt, is_subsection) @@ -1239,11 +1274,15 @@ function tree(io::IO, data::Vector{UInt64}, lidict::Union{LineInfoFlatDict, Line end return true end - Base.print(io, "Total snapshots: ", root.count, ". Utilization: ", round(Int, util_perc), "%") + if is_task_profile + Base.print(io, "Total snapshots: ", root.count, "\n") + else + Base.print(io, "Total snapshots: ", root.count, ". Utilization: ", round(Int, util_perc), "%") + end if is_subsection Base.println(io) print_tree(io, root, cols, fmt, is_subsection) - else + elseif !is_task_profile Base.print(io, " across all threads and tasks. Use the `groupby` kwarg to break down by thread and/or task.\n") end return false diff --git a/stdlib/Profile/test/runtests.jl b/stdlib/Profile/test/runtests.jl index 1769cbd12da3e..352d07086f25b 100644 --- a/stdlib/Profile/test/runtests.jl +++ b/stdlib/Profile/test/runtests.jl @@ -25,19 +25,64 @@ end end end -busywait(0, 0) # compile -@profile busywait(1, 20) +@noinline function sleeping_tasks(ch::Channel) + for _ in 1:100 + Threads.@spawn take!(ch) + end + sleep(10) +end -let r = Profile.retrieve() - mktemp() do path, io - serialize(io, r) - close(io) - open(path) do io - @test isa(deserialize(io), Tuple{Vector{UInt},Dict{UInt64,Vector{Base.StackTraces.StackFrame}}}) +function test_profile() + let r = Profile.retrieve() + mktemp() do path, io + serialize(io, r) + close(io) + open(path) do io + @test isa(deserialize(io), Tuple{Vector{UInt},Dict{UInt64,Vector{Base.StackTraces.StackFrame}}}) + end + end + end +end + +function test_has_task_profiler_sample_in_buffer() + let r = Profile.retrieve() + mktemp() do path, io + serialize(io, r) + close(io) + open(path) do io + all = deserialize(io) + data = all[1] + startframe = length(data) + for i in startframe:-1:1 + (startframe - 1) >= i >= (startframe - (Profile.nmeta + 1)) && continue # skip metadata (its read ahead below) and extra block end NULL IP + if Profile.is_block_end(data, i) + thread_sleeping_state = data[i - Profile.META_OFFSET_SLEEPSTATE] + @test thread_sleeping_state == 0x3 + end + end + end end end end +busywait(0, 0) # compile + +@profile_walltime busywait(1, 20) +test_profile() + +Profile.clear() + +ch = Channel(1) +@profile_walltime sleeping_tasks(ch) +test_profile() +close(ch) +test_has_task_profiler_sample_in_buffer() + +Profile.clear() + +@profile busywait(1, 20) +test_profile() + # test printing options for options in ((format=:tree, C=true), (format=:tree, maxdepth=2), From ee09ae70d9f4a04ed8b745f36d3c5d9d578d2887 Mon Sep 17 00:00:00 2001 From: Kristoffer Carlsson Date: Sat, 26 Oct 2024 03:01:53 +0200 Subject: [PATCH 318/537] recommend explicit `using Foo: Foo, ...` in package code (was: "using considered harmful") (#42080) I feel we are heading up against a "`using` crisis" where any new feature that is implemented by exporting a new name (either in Base or a package) becomes a breaking change. This is already happening (https://github.com/JuliaGPU/CUDA.jl/pull/1097, https://github.com/JuliaWeb/HTTP.jl/pull/745) and as projects get bigger and more names are exported, the likelihood of this rapidly increases. The flaw in `using Foo` is fundamental in that you cannot lexically see where a name comes from so when two packages export the same name, you are screwed. Any code that relies on `using Foo` and then using an exported name from `Foo` is vulnerable to another dependency exporting the same name. Therefore, I think we should start to strongly discourage the use of `using Foo` and only recommend `using Foo` for ephemeral work (e.g. REPL work). --------- Co-authored-by: Dilum Aluthge Co-authored-by: Mason Protter Co-authored-by: Max Horn Co-authored-by: Matt Bauman Co-authored-by: Alex Arslan Co-authored-by: Ian Butterworth Co-authored-by: Neven Sajko --- base/docs/basedocs.jl | 8 ++++++++ doc/src/manual/modules.md | 9 ++++++++- 2 files changed, 16 insertions(+), 1 deletion(-) diff --git a/base/docs/basedocs.jl b/base/docs/basedocs.jl index b080bf51e5e98..7441f5b993bf4 100644 --- a/base/docs/basedocs.jl +++ b/base/docs/basedocs.jl @@ -37,6 +37,14 @@ kw"help", kw"Julia", kw"julia", kw"" available for direct use. Names can also be used via dot syntax (e.g. `Foo.foo` to access the name `foo`), whether they are `export`ed or not. See the [manual section about modules](@ref modules) for details. + +!!! note + When two or more packages/modules export a name and that name does not refer to the + same thing in each of the packages, and the packages are loaded via `using` without + an explicit list of names, it is an error to reference that name without qualification. + It is thus recommended that code intended to be forward-compatible with future versions + of its dependencies and of Julia, e.g., code in released packages, list the names it + uses from each loaded package, e.g., `using Foo: Foo, f` rather than `using Foo`. """ kw"using" diff --git a/doc/src/manual/modules.md b/doc/src/manual/modules.md index b4f0fd78c816a..cf24474916bef 100644 --- a/doc/src/manual/modules.md +++ b/doc/src/manual/modules.md @@ -116,7 +116,7 @@ VERSION >= v"1.11.0-DEV.469" && eval(Meta.parse("public a, b, c")) ### Standalone `using` and `import` -Possibly the most common way of loading a module is `using ModuleName`. This [loads](@ref +For interactive use, the most common way of loading a module is `using ModuleName`. This [loads](@ref code-loading) the code associated with `ModuleName`, and brings 1. the module name @@ -172,6 +172,13 @@ Importantly, the module name `NiceStuff` will *not* be in the namespace. If you julia> using .NiceStuff: nice, DOG, NiceStuff ``` +When two or more packages/modules export a name and that name does not refer to the +same thing in each of the packages, and the packages are loaded via `using` without +an explicit list of names, it is an error to reference that name without qualification. +It is thus recommended that code intended to be forward-compatible with future versions +of its dependencies and of Julia, e.g., code in released packages, list the names it +uses from each loaded package, e.g., `using Foo: Foo, f` rather than `using Foo`. + Julia has two forms for seemingly the same thing because only `import ModuleName: f` allows adding methods to `f` *without a module path*. That is to say, the following example will give an error: From 446d20fb001fdf304cbe506d5fe0cfc9a7b88178 Mon Sep 17 00:00:00 2001 From: Jishnu Bhattacharya Date: Sun, 27 Oct 2024 07:46:08 +0530 Subject: [PATCH 319/537] Change some hardcoded loop ranges to axes in dense linalg functions (#56348) These should be safer in general, and are also easier to reason about. --- stdlib/LinearAlgebra/src/dense.jl | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/stdlib/LinearAlgebra/src/dense.jl b/stdlib/LinearAlgebra/src/dense.jl index d8f2513f5bfc8..19fc7e9d422a8 100644 --- a/stdlib/LinearAlgebra/src/dense.jl +++ b/stdlib/LinearAlgebra/src/dense.jl @@ -18,14 +18,14 @@ function isone(A::AbstractMatrix) m, n = size(A) m != n && return false # only square matrices can satisfy x == one(x) if sizeof(A) < ISONE_CUTOFF - _isone_triacheck(A, m) + _isone_triacheck(A) else - _isone_cachefriendly(A, m) + _isone_cachefriendly(A) end end -@inline function _isone_triacheck(A::AbstractMatrix, m::Int) - @inbounds for i in 1:m, j in i:m +@inline function _isone_triacheck(A::AbstractMatrix) + @inbounds for i in axes(A,2), j in axes(A,1) if i == j isone(A[i,i]) || return false else @@ -36,8 +36,8 @@ end end # Inner loop over rows to be friendly to the CPU cache -@inline function _isone_cachefriendly(A::AbstractMatrix, m::Int) - @inbounds for i in 1:m, j in 1:m +@inline function _isone_cachefriendly(A::AbstractMatrix) + @inbounds for i in axes(A,2), j in axes(A,1) if i == j isone(A[i,i]) || return false else @@ -198,7 +198,7 @@ function fillband!(A::AbstractMatrix{T}, x, l, u) where T require_one_based_indexing(A) m, n = size(A) xT = convert(T, x) - for j in 1:n + for j in axes(A,2) for i in max(1,j-u):min(m,j-l) @inbounds A[i, j] = xT end @@ -553,7 +553,7 @@ function (^)(A::AbstractMatrix{T}, p::Real) where T if isdiag(A) TT = promote_op(^, T, typeof(p)) retmat = copymutable_oftype(A, TT) - for i in 1:n + for i in axes(retmat,1) retmat[i, i] = retmat[i, i] ^ p end return retmat @@ -792,10 +792,10 @@ end ## Swap rows i and j and columns i and j in X function rcswap!(i::Integer, j::Integer, X::AbstractMatrix{<:Number}) - for k = 1:size(X,1) + for k = axes(X,1) X[k,i], X[k,j] = X[k,j], X[k,i] end - for k = 1:size(X,2) + for k = axes(X,2) X[i,k], X[j,k] = X[j,k], X[i,k] end end From fcf7ec081509c62967122e0949640e63a3d07571 Mon Sep 17 00:00:00 2001 From: Jishnu Bhattacharya Date: Sun, 27 Oct 2024 07:59:28 +0530 Subject: [PATCH 320/537] Make `LinearAlgebra.haszero` public (#56223) The trait `haszero` is used to check if a type `T` has a unique zero defined using `zero(T)`. This lets us dispatch to optimized paths without losing generality. This PR makes the function public so that this may be extended by packages (such as `StaticArrays`). --- NEWS.md | 2 ++ stdlib/LinearAlgebra/docs/src/index.md | 1 + stdlib/LinearAlgebra/src/LinearAlgebra.jl | 1 + stdlib/LinearAlgebra/src/dense.jl | 12 ++++++++++++ 4 files changed, 16 insertions(+) diff --git a/NEWS.md b/NEWS.md index 079625b1610aa..5e066ffd9cdcf 100644 --- a/NEWS.md +++ b/NEWS.md @@ -150,6 +150,8 @@ Standard library changes Custom array types may specialize this function to return an appropriate result ([#55252]). * The matrix multiplication `A * B` calls `matprod_dest(A, B, T::Type)` to generate the destination. This function is now public ([#55537]). +* The function `haszero(T::Type)` is used to check if a type `T` has a unique zero element defined as `zero(T)`. + This is now public. #### Logging diff --git a/stdlib/LinearAlgebra/docs/src/index.md b/stdlib/LinearAlgebra/docs/src/index.md index 1e44bf5cb04d7..3e18a45752aeb 100644 --- a/stdlib/LinearAlgebra/docs/src/index.md +++ b/stdlib/LinearAlgebra/docs/src/index.md @@ -895,6 +895,7 @@ LinearAlgebra.LAPACK.hseqr! ```@docs LinearAlgebra.matprod_dest +LinearAlgebra.haszero ``` ```@meta diff --git a/stdlib/LinearAlgebra/src/LinearAlgebra.jl b/stdlib/LinearAlgebra/src/LinearAlgebra.jl index 15354603943c2..330df503485cb 100644 --- a/stdlib/LinearAlgebra/src/LinearAlgebra.jl +++ b/stdlib/LinearAlgebra/src/LinearAlgebra.jl @@ -169,6 +169,7 @@ export public AbstractTriangular, Givens, checksquare, + haszero, hermitian, hermitian_type, isbanded, diff --git a/stdlib/LinearAlgebra/src/dense.jl b/stdlib/LinearAlgebra/src/dense.jl index 19fc7e9d422a8..b8d5c84c3db53 100644 --- a/stdlib/LinearAlgebra/src/dense.jl +++ b/stdlib/LinearAlgebra/src/dense.jl @@ -108,6 +108,18 @@ norm2(x::Union{Array{T},StridedVector{T}}) where {T<:BlasFloat} = length(x) < NRM2_CUTOFF ? generic_norm2(x) : BLAS.nrm2(x) # Conservative assessment of types that have zero(T) defined for themselves +""" + haszero(T::Type) + +Return whether a type `T` has a unique zero element defined using `zero(T)`. +If a type `M` specializes `zero(M)`, it may also choose to set `haszero(M)` to `true`. +By default, `haszero` is assumed to be `false`, in which case the zero elements +are deduced from values rather than the type. + +!!! note + `haszero` is a conservative check that is used to dispatch to + optimized paths. Extending it is optional, but encouraged. +""" haszero(::Type) = false haszero(::Type{T}) where {T<:Number} = isconcretetype(T) haszero(::Type{Union{Missing,T}}) where {T<:Number} = haszero(T) From 87ecf8f820f30aec981ffcf4c24668bd5858e901 Mon Sep 17 00:00:00 2001 From: Diogo Netto <61364108+d-netto@users.noreply.github.com> Date: Sun, 27 Oct 2024 17:02:42 -0300 Subject: [PATCH 321/537] remove spurious parens in profiler docs (#56357) --- doc/src/manual/profile.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/doc/src/manual/profile.md b/doc/src/manual/profile.md index 718b79815473e..49b58ba9671c2 100644 --- a/doc/src/manual/profile.md +++ b/doc/src/manual/profile.md @@ -349,7 +349,7 @@ Our goal is to detect whether there is contention on the `ch` channel—i.e., wh If we run this, we obtain the following [PProf](https://github.com/JuliaPerf/PProf.jl) flame graph: -![CPU Profile](./img/cpu-profile.png)() +![CPU Profile](./img/cpu-profile.png) This profile provides no information to help determine where contention occurs in the system’s synchronization primitives. Waiters on a channel will be blocked and descheduled, meaning no system thread will be running the tasks assigned to those waiters, and as a result, they won't be sampled by the profiler. @@ -402,7 +402,7 @@ Profile.@profile_walltime main() We obtain the following flame graph: -![Wall-time Profile Channel](./img/wall-time-profiler-channel-example.png)() +![Wall-time Profile Channel](./img/wall-time-profiler-channel-example.png) We see that a large number of samples come from channel-related `take!` functions, which allows us to determine that there is indeed an excessive number of waiters in `ch`. @@ -454,7 +454,7 @@ Profile.@profile_walltime main() After collecting a wall-time profile, we get the following flame graph: -![Wall-time Profile Compute-Bound](./img/wall-time-profiler-compute-bound-example.png)() +![Wall-time Profile Compute-Bound](./img/wall-time-profiler-compute-bound-example.png) Notice how many of the samples contain `sum_of_sqrt`, which is the expensive compute function in our example. @@ -507,7 +507,7 @@ Notice that the tasks spawned in `spawn_a_bunch_of_short_lived_tasks` are extrem After collecting a wall-time profile, we obtain the following flame graph: -![Task Sampling Failure](./img/task-sampling-failure.png)() +![Task Sampling Failure](./img/task-sampling-failure.png) The large number of samples from `failed_to_stop_thread_fun` confirms that we have a significant number of short-lived tasks in the system. From 2cdfe062952c3a1168da7545a10bfa0ec205b4db Mon Sep 17 00:00:00 2001 From: Aravindh Krishnamoorthy Date: Mon, 28 Oct 2024 12:32:21 +0100 Subject: [PATCH 322/537] Fix `log_quasitriu` for internal scaling `s=0` (#56311) This PR is a potential fix for #54833. ## Description The function https://github.com/JuliaLang/julia/blob/2a06376c18afd7ec875335070743dcebcd85dee7/stdlib/LinearAlgebra/src/triangular.jl#L2220 computes $\boldsymbol{A}^{\dfrac{1}{2^s}} - \boldsymbol{I}$ for a real-valued $2\times 2$ matrix $\boldsymbol{A}$ using Algorithm 5.1 in [R1]. However, the algorithm in [R1] as well as the above function do not handle the case $s=0.$ This fix extends the function to compute $\boldsymbol{A}^{\dfrac{1}{2^s}} - \boldsymbol{I} \Bigg|_{s=0} = \boldsymbol{A} - \boldsymbol{I}.$ ## Checklist - [X] Fix code: `stdlib\LinearAlgebra\src\triangular.jl` in function `_sqrt_pow_diag_block_2x2!(A, A0, s)`. - [X] Add test case: `stdlib\LinearAlgebra\test\triangular.jl`. - [X] Update `NEWS.md`. - [X] Testing and self review. | Tag | Reference | | --- | --- | | [R1] | Al-Mohy, Awad H. and Higham, Nicholas J. "Improved Inverse Scaling and Squaring Algorithms for the Matrix Logarithm", 2011, url: https://eprints.maths.manchester.ac.uk/1687/1/paper11.pdf | --------- Co-authored-by: Daniel Karrasch Co-authored-by: Oscar Smith --- stdlib/LinearAlgebra/src/triangular.jl | 5 +++++ stdlib/LinearAlgebra/test/triangular.jl | 10 ++++++++++ 2 files changed, 15 insertions(+) diff --git a/stdlib/LinearAlgebra/src/triangular.jl b/stdlib/LinearAlgebra/src/triangular.jl index 1a7d04115c97d..a032041a4116c 100644 --- a/stdlib/LinearAlgebra/src/triangular.jl +++ b/stdlib/LinearAlgebra/src/triangular.jl @@ -2218,6 +2218,11 @@ end # SIAM J. Sci. Comput., 34(4), (2012) C153–C169. doi: 10.1137/110852553 # Algorithm 5.1 Base.@propagate_inbounds function _sqrt_pow_diag_block_2x2!(A, A0, s) + if iszero(s) + A[1,1] -= 1 + A[2,2] -= 1 + return A + end _sqrt_real_2x2!(A, A0) if isone(s) A[1,1] -= 1 diff --git a/stdlib/LinearAlgebra/test/triangular.jl b/stdlib/LinearAlgebra/test/triangular.jl index 678827ceac720..2c8dd4db7fc2b 100644 --- a/stdlib/LinearAlgebra/test/triangular.jl +++ b/stdlib/LinearAlgebra/test/triangular.jl @@ -1386,4 +1386,14 @@ end end end +@testset "log_quasitriu with internal scaling s=0 (issue #54833)" begin + M = [0.9949357359852791 -0.015567763143324862 -0.09091193493947397 -0.03994428739762443 0.07338356301650806; + 0.011813655598647289 0.9968988574699793 -0.06204555000202496 0.04694097614450692 0.09028834462782365; + 0.092737943594701 0.059546719185135925 0.9935850721633324 0.025348893985651405 -0.018530261590167685; + 0.0369187299165628 -0.04903571106913449 -0.025962938675946543 0.9977767446862031 0.12901494726320517; + 0.0 0.0 0.0 0.0 1.0] + + @test exp(log(M)) ≈ M +end + end # module TestTriangular From 9dbdeb4fb323dfc78fde96a82bbb8fdefeb61a70 Mon Sep 17 00:00:00 2001 From: Jameson Nash Date: Mon, 28 Oct 2024 12:51:55 -0400 Subject: [PATCH 323/537] loading: clean up more concurrency issues (#56329) Guarantee that `__init__` runs before `using` returns. Could be slightly breaking for people that do crazy things inside `__init__`, but just don't do that. Since extensions then probably load after `__init__` (or at least, run their `__init__` after), this is a partial step towards changing things so that extensions are guaranteed to load if using all of their triggers before the corresponding `using` returns Fixes #55556 --- base/Base.jl | 4 +- base/loading.jl | 135 +++++++++++++----------------- base/methodshow.jl | 12 +-- contrib/generate_precompile.jl | 4 +- stdlib/REPL/src/Pkg_beforeload.jl | 12 +-- 5 files changed, 70 insertions(+), 97 deletions(-) diff --git a/base/Base.jl b/base/Base.jl index bfac74e5d7bab..c5e318ffe5e38 100644 --- a/base/Base.jl +++ b/base/Base.jl @@ -657,8 +657,8 @@ function __init__() init_active_project() append!(empty!(_sysimage_modules), keys(loaded_modules)) empty!(loaded_precompiles) # If we load a packageimage when building the image this might not be empty - for (mod, key) in module_keys - push!(get!(Vector{Module}, loaded_precompiles, key), mod) + for mod in loaded_modules_order + push!(get!(Vector{Module}, loaded_precompiles, PkgId(mod)), mod) end if haskey(ENV, "JULIA_MAX_NUM_PRECOMPILE_FILES") MAX_NUM_PRECOMPILE_FILES[] = parse(Int, ENV["JULIA_MAX_NUM_PRECOMPILE_FILES"]) diff --git a/base/loading.jl b/base/loading.jl index 6391e2511f8d5..8edc1fe79c617 100644 --- a/base/loading.jl +++ b/base/loading.jl @@ -524,8 +524,7 @@ See also [`pkgdir`](@ref). """ function pathof(m::Module) @lock require_lock begin - pkgid = get(module_keys, m, nothing) - pkgid === nothing && return nothing + pkgid = PkgId(m) origin = get(pkgorigins, pkgid, nothing) origin === nothing && return nothing path = origin.path @@ -1652,7 +1651,7 @@ get_extension(parent::Module, ext::Symbol) = get_extension(PkgId(parent), ext) function get_extension(parentid::PkgId, ext::Symbol) parentid.uuid === nothing && return nothing extid = PkgId(uuid5(parentid.uuid, string(ext)), string(ext)) - return get(loaded_modules, extid, nothing) + return maybe_root_module(extid) end # End extensions @@ -1811,7 +1810,7 @@ function show(io::IO, it::ImageTarget) end # should sync with the types of arguments of `stale_cachefile` -const StaleCacheKey = Tuple{Base.PkgId, UInt128, String, String} +const StaleCacheKey = Tuple{PkgId, UInt128, String, String} function compilecache_path(pkg::PkgId; ignore_loaded::Bool=false, @@ -2063,7 +2062,7 @@ end modpath, modkey, modbuild_id = dep::Tuple{String, PkgId, UInt128} # inline a call to start_loading here @assert canstart_loading(modkey, modbuild_id, stalecheck) === nothing - package_locks[modkey] = current_task() => Threads.Condition(require_lock) + package_locks[modkey] = (current_task(), Threads.Condition(require_lock), modbuild_id) startedloading = i modpaths = find_all_in_cache_path(modkey, DEPOT_PATH) for modpath_to_try in modpaths @@ -2139,7 +2138,7 @@ end end # to synchronize multiple tasks trying to import/using something -const package_locks = Dict{PkgId,Pair{Task,Threads.Condition}}() +const package_locks = Dict{PkgId,Tuple{Task,Threads.Condition,UInt128}}() debug_loading_deadlocks::Bool = true # Enable a slightly more expensive, but more complete algorithm that can handle simultaneous tasks. # This only triggers if you have multiple tasks trying to load the same package at the same time, @@ -2148,14 +2147,21 @@ debug_loading_deadlocks::Bool = true # Enable a slightly more expensive, but mor function canstart_loading(modkey::PkgId, build_id::UInt128, stalecheck::Bool) assert_havelock(require_lock) require_lock.reentrancy_cnt == 1 || throw(ConcurrencyViolationError("recursive call to start_loading")) - loaded = stalecheck ? maybe_root_module(modkey) : nothing - loaded isa Module && return loaded - if build_id != UInt128(0) + loading = get(package_locks, modkey, nothing) + if loading === nothing + loaded = stalecheck ? maybe_root_module(modkey) : nothing + loaded isa Module && return loaded + if build_id != UInt128(0) + loaded = maybe_loaded_precompile(modkey, build_id) + loaded isa Module && return loaded + end + return nothing + end + if !stalecheck && build_id != UInt128(0) && loading[3] != build_id + # don't block using an existing specific loaded module on needing a different concurrently loaded one loaded = maybe_loaded_precompile(modkey, build_id) loaded isa Module && return loaded end - loading = get(package_locks, modkey, nothing) - loading === nothing && return nothing # load already in progress for this module on the task task, cond = loading deps = String[modkey.name] @@ -2202,7 +2208,7 @@ function start_loading(modkey::PkgId, build_id::UInt128, stalecheck::Bool) while true loaded = canstart_loading(modkey, build_id, stalecheck) if loaded === nothing - package_locks[modkey] = current_task() => Threads.Condition(require_lock) + package_locks[modkey] = (current_task(), Threads.Condition(require_lock), build_id) return nothing elseif loaded isa Module return loaded @@ -2333,15 +2339,15 @@ For more details regarding code loading, see the manual sections on [modules](@r [parallel computing](@ref code-availability). """ function require(into::Module, mod::Symbol) - if _require_world_age[] != typemax(UInt) - Base.invoke_in_world(_require_world_age[], __require, into, mod) - else - @invokelatest __require(into, mod) + world = _require_world_age[] + if world == typemax(UInt) + world = get_world_counter() end + return invoke_in_world(world, __require, into, mod) end function __require(into::Module, mod::Symbol) - if into === Base.__toplevel__ && generating_output(#=incremental=#true) + if into === __toplevel__ && generating_output(#=incremental=#true) error("`using/import $mod` outside of a Module detected. Importing a package outside of a module \ is not allowed during package precompilation.") end @@ -2445,24 +2451,22 @@ function collect_manifest_warnings() return msg end -require(uuidkey::PkgId) = @lock require_lock _require_prelocked(uuidkey) - -function _require_prelocked(uuidkey::PkgId, env=nothing) - if _require_world_age[] != typemax(UInt) - Base.invoke_in_world(_require_world_age[], __require_prelocked, uuidkey, env) - else - @invokelatest __require_prelocked(uuidkey, env) +function require(uuidkey::PkgId) + world = _require_world_age[] + if world == typemax(UInt) + world = get_world_counter() end + return invoke_in_world(world, __require, uuidkey) end - -function __require_prelocked(uuidkey::PkgId, env=nothing) +__require(uuidkey::PkgId) = @lock require_lock _require_prelocked(uuidkey) +function _require_prelocked(uuidkey::PkgId, env=nothing) assert_havelock(require_lock) m = start_loading(uuidkey, UInt128(0), true) if m === nothing last = toplevel_load[] try toplevel_load[] = false - m = _require(uuidkey, env) + m = __require_prelocked(uuidkey, env) if m === nothing error("package `$(uuidkey.name)` did not define the expected \ module `$(uuidkey.name)`, check for typos in package module name") @@ -2474,8 +2478,6 @@ function __require_prelocked(uuidkey::PkgId, env=nothing) insert_extension_triggers(uuidkey) # After successfully loading, notify downstream consumers run_package_callbacks(uuidkey) - else - newm = root_module(uuidkey) end return m end @@ -2491,9 +2493,8 @@ const pkgorigins = Dict{PkgId,PkgOrigin}() const loaded_modules = Dict{PkgId,Module}() # available to be explicitly loaded const loaded_precompiles = Dict{PkgId,Vector{Module}}() # extended (complete) list of modules, available to be loaded const loaded_modules_order = Vector{Module}() -const module_keys = IdDict{Module,PkgId}() # the reverse of loaded_modules -root_module_key(m::Module) = @lock require_lock module_keys[m] +root_module_key(m::Module) = PkgId(m) function maybe_loaded_precompile(key::PkgId, buildid::UInt128) @lock require_lock begin @@ -2527,7 +2528,6 @@ end end maybe_loaded_precompile(key, module_build_id(m)) === nothing && push!(loaded_modules_order, m) loaded_modules[key] = m - module_keys[m] = key end nothing end @@ -2544,24 +2544,27 @@ using Base end # get a top-level Module from the given key +# this is similar to `require`, but worse in almost every possible way root_module(key::PkgId) = @lock require_lock loaded_modules[key] function root_module(where::Module, name::Symbol) key = identify_package(where, String(name)) key isa PkgId || throw(KeyError(name)) return root_module(key) end +root_module_exists(key::PkgId) = @lock require_lock haskey(loaded_modules, key) maybe_root_module(key::PkgId) = @lock require_lock get(loaded_modules, key, nothing) -root_module_exists(key::PkgId) = @lock require_lock haskey(loaded_modules, key) loaded_modules_array() = @lock require_lock copy(loaded_modules_order) # after unreference_module, a subsequent require call will try to load a new copy of it, if stale # reload(m) = (unreference_module(m); require(m)) function unreference_module(key::PkgId) + @lock require_lock begin if haskey(loaded_modules, key) m = pop!(loaded_modules, key) # need to ensure all modules are GC rooted; will still be referenced - # in module_keys + # in loaded_modules_order + end end end @@ -2582,7 +2585,7 @@ const PKG_PRECOMPILE_HOOK = Ref{Function}() disable_parallel_precompile::Bool = false # Returns `nothing` or the new(ish) module -function _require(pkg::PkgId, env=nothing) +function __require_prelocked(pkg::PkgId, env) assert_havelock(require_lock) # perform the search operation to select the module file require intends to load @@ -2682,7 +2685,7 @@ function _require(pkg::PkgId, env=nothing) unlock(require_lock) try include(__toplevel__, path) - loaded = get(loaded_modules, pkg, nothing) + loaded = maybe_root_module(pkg) finally lock(require_lock) if uuid !== old_uuid @@ -2755,38 +2758,18 @@ function require_stdlib(package_uuidkey::PkgId, ext::Union{Nothing, String}=noth @lock require_lock begin # the PkgId of the ext, or package if not an ext this_uuidkey = ext isa String ? PkgId(uuid5(package_uuidkey.uuid, ext), ext) : package_uuidkey - newm = maybe_root_module(this_uuidkey) - if newm isa Module - return newm - end - # first since this is a stdlib, try to look there directly first env = Sys.STDLIB - #sourcepath = "" - if ext === nothing - sourcepath = normpath(env, this_uuidkey.name, "src", this_uuidkey.name * ".jl") - else - sourcepath = find_ext_path(normpath(joinpath(env, package_uuidkey.name)), ext) - end - #mbypath = manifest_uuid_path(env, this_uuidkey) - #if mbypath isa String && isfile_casesensitive(mbypath) - # sourcepath = mbypath - #else - # # if the user deleted the stdlib folder, we next try using their environment - # sourcepath = locate_package_env(this_uuidkey) - # if sourcepath !== nothing - # sourcepath, env = sourcepath - # end - #end - #if sourcepath === nothing - # throw(ArgumentError(""" - # Package $(repr("text/plain", this_uuidkey)) is required but does not seem to be installed. - # """)) - #end - set_pkgorigin_version_path(this_uuidkey, sourcepath) - depot_path = append_bundled_depot_path!(empty(DEPOT_PATH)) newm = start_loading(this_uuidkey, UInt128(0), true) newm === nothing || return newm try + # first since this is a stdlib, try to look there directly first + if ext === nothing + sourcepath = normpath(env, this_uuidkey.name, "src", this_uuidkey.name * ".jl") + else + sourcepath = find_ext_path(normpath(joinpath(env, package_uuidkey.name)), ext) + end + depot_path = append_bundled_depot_path!(empty(DEPOT_PATH)) + set_pkgorigin_version_path(this_uuidkey, sourcepath) newm = _require_search_from_serialized(this_uuidkey, sourcepath, UInt128(0), false; DEPOT_PATH=depot_path) finally end_loading(this_uuidkey, newm) @@ -3968,32 +3951,32 @@ end if M !== nothing @assert PkgId(M) == req_key && module_build_id(M) === req_build_id depmods[i] = M - elseif root_module_exists(req_key) - M = root_module(req_key) + continue + end + M = maybe_root_module(req_key) + if M isa Module if PkgId(M) == req_key && module_build_id(M) === req_build_id depmods[i] = M + continue elseif M == Core @debug "Rejecting cache file $cachefile because it was made with a different julia version" record_reason(reasons, "wrong julia version") return true # Won't be able to fulfill dependency elseif ignore_loaded || !stalecheck # Used by Pkg.precompile given that there it's ok to precompile different versions of loaded packages - @goto locate_branch else @debug "Rejecting cache file $cachefile because module $req_key is already loaded and incompatible." record_reason(reasons, "wrong dep version loaded") return true # Won't be able to fulfill dependency end - else - @label locate_branch - path = locate_package(req_key) # TODO: add env and/or skip this when stalecheck is false - if path === nothing - @debug "Rejecting cache file $cachefile because dependency $req_key not found." - record_reason(reasons, "dep missing source") - return true # Won't be able to fulfill dependency - end - depmods[i] = (path, req_key, req_build_id) end + path = locate_package(req_key) # TODO: add env and/or skip this when stalecheck is false + if path === nothing + @debug "Rejecting cache file $cachefile because dependency $req_key not found." + record_reason(reasons, "dep missing source") + return true # Won't be able to fulfill dependency + end + depmods[i] = (path, req_key, req_build_id) end # check if this file is going to provide one of our concrete dependencies diff --git a/base/methodshow.jl b/base/methodshow.jl index 477acd2960c48..a2158cb9180e4 100644 --- a/base/methodshow.jl +++ b/base/methodshow.jl @@ -378,7 +378,6 @@ function url(m::Method) line = m.line line <= 0 || occursin(r"In\[[0-9]+\]"a, file) && return "" Sys.iswindows() && (file = replace(file, '\\' => '/')) - libgit2_id = PkgId(UUID((0x76f85450_5226_5b5a,0x8eaa_529ad045b433)), "LibGit2") if inbase(M) if isempty(Base.GIT_VERSION_INFO.commit) # this url will only work if we're on a tagged release @@ -386,8 +385,10 @@ function url(m::Method) else return "https://github.com/JuliaLang/julia/tree/$(Base.GIT_VERSION_INFO.commit)/base/$file#L$line" end - elseif root_module_exists(libgit2_id) - LibGit2 = root_module(libgit2_id) + end + libgit2_id = PkgId(UUID((0x76f85450_5226_5b5a,0x8eaa_529ad045b433)), "LibGit2") + LibGit2 = maybe_root_module(libgit2_id) + if LibGit2 isa Module try d = dirname(file) return LibGit2.with(LibGit2.GitRepoExt(d)) do repo @@ -404,11 +405,10 @@ function url(m::Method) end end catch - return fileurl(file) + # oops, this was a bad idea end - else - return fileurl(file) end + return fileurl(file) end function show(io::IO, ::MIME"text/html", m::Method) diff --git a/contrib/generate_precompile.jl b/contrib/generate_precompile.jl index 037e8926d5003..55de3492e9447 100644 --- a/contrib/generate_precompile.jl +++ b/contrib/generate_precompile.jl @@ -35,8 +35,8 @@ precompile(Base.unsafe_string, (Ptr{UInt8},)) precompile(Base.unsafe_string, (Ptr{Int8},)) # loading.jl -precompile(Base.__require_prelocked, (Base.PkgId, Nothing)) -precompile(Base._require, (Base.PkgId, Nothing)) +precompile(Base.__require, (Module, Symbol)) +precompile(Base.__require, (Base.PkgId,)) precompile(Base.indexed_iterate, (Pair{Symbol, Union{Nothing, String}}, Int)) precompile(Base.indexed_iterate, (Pair{Symbol, Union{Nothing, String}}, Int, Int)) precompile(Tuple{typeof(Base.Threads.atomic_add!), Base.Threads.Atomic{Int}, Int}) diff --git a/stdlib/REPL/src/Pkg_beforeload.jl b/stdlib/REPL/src/Pkg_beforeload.jl index e110910bafc2f..86b5cd35abd2f 100644 --- a/stdlib/REPL/src/Pkg_beforeload.jl +++ b/stdlib/REPL/src/Pkg_beforeload.jl @@ -1,17 +1,7 @@ ## Pkg stuff needed before Pkg has loaded const Pkg_pkgid = Base.PkgId(Base.UUID("44cfe95a-1eb2-52ea-b672-e2afdf69b78f"), "Pkg") - -function load_pkg() - REPLExt = Base.require_stdlib(Pkg_pkgid, "REPLExt") - @lock Base.require_lock begin - # require_stdlib does not guarantee that the `__init__` of the package is done when loading is done async - # but we need to wait for the repl mode to be set up - lock = get(Base.package_locks, Base.PkgId(REPLExt), nothing) - lock !== nothing && wait(lock[2]) - end - return REPLExt -end +load_pkg() = Base.require_stdlib(Pkg_pkgid, "REPLExt") ## Below here copied/tweaked from Pkg Types.jl so that the dummy Pkg prompt # can populate the env correctly before Pkg loads From e802eff6dd910a8321b45c6c629dc4300f5aef1e Mon Sep 17 00:00:00 2001 From: Oscar Smith Date: Mon, 28 Oct 2024 17:57:04 -0400 Subject: [PATCH 324/537] make `_unsetindex` fast for isbits eltype (#56364) fixes https://github.com/JuliaLang/julia/issues/56359#issuecomment-2441537634 ``` using Plots function f(n) a = Vector{Int}(undef, n) s = time_ns() resize!(a, 8) time_ns() - s end x = 8:10:1000000 y = f.(x) plot(x, y) ``` ![image](https://github.com/user-attachments/assets/5a1fb963-7d44-4cac-bedd-6f0733d4cf56) --- base/genericmemory.jl | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/base/genericmemory.jl b/base/genericmemory.jl index de1fc668333f5..f814aa4d84bdd 100644 --- a/base/genericmemory.jl +++ b/base/genericmemory.jl @@ -84,17 +84,16 @@ function _unsetindex!(A::MemoryRef{T}) where T MemT = typeof(mem) arrayelem = datatype_arrayelem(MemT) elsz = datatype_layoutsize(MemT) - isboxed = 1; isunion = 2 + isbits = 0; isboxed = 1; isunion = 2 + arrayelem == isbits && datatype_pointerfree(T::DataType) && return A t = @_gc_preserve_begin mem p = Ptr{Ptr{Cvoid}}(@inbounds pointer(A)) if arrayelem == isboxed Intrinsics.atomic_pointerset(p, C_NULL, :monotonic) elseif arrayelem != isunion - if !datatype_pointerfree(T::DataType) - for j = 1:Core.sizeof(Ptr{Cvoid}):elsz - # XXX: this violates memory ordering, since it writes more than one C_NULL to each - Intrinsics.atomic_pointerset(p + j - 1, C_NULL, :monotonic) - end + for j = 1:Core.sizeof(Ptr{Cvoid}):elsz + # XXX: this violates memory ordering, since it writes more than one C_NULL to each + Intrinsics.atomic_pointerset(p + j - 1, C_NULL, :monotonic) end end @_gc_preserve_end t From 4c076c80af9d9c8439cfa20e2efd5c884d88b64d Mon Sep 17 00:00:00 2001 From: matthias314 <56549971+matthias314@users.noreply.github.com> Date: Mon, 28 Oct 2024 18:02:41 -0400 Subject: [PATCH 325/537] improved `eltype` for `flatten` with tuple argument (#55946) We have always had ``` julia> t = (Int16[1,2], Int32[3,4]); eltype(Iterators.flatten(t)) Any ``` With this PR, the result is `Signed` (`promote_typejoin` applied to the element types of the tuple elements). The same applies to `NamedTuple`: ``` julia> nt = (a = [1,2], b = (3,4)); eltype(Iterators.flatten(nt)) Any # old Int64 # new ``` --- base/iterators.jl | 3 ++- test/iterators.jl | 2 ++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/base/iterators.jl b/base/iterators.jl index 8bd30991319b6..1a0d42ed7447f 100644 --- a/base/iterators.jl +++ b/base/iterators.jl @@ -30,7 +30,7 @@ end import .Base: first, last, isempty, length, size, axes, ndims, - eltype, IteratorSize, IteratorEltype, + eltype, IteratorSize, IteratorEltype, promote_typejoin, haskey, keys, values, pairs, getindex, setindex!, get, iterate, popfirst!, isdone, peek, intersect @@ -1213,6 +1213,7 @@ julia> [(x,y) for x in 0:1 for y in 'a':'c'] # collects generators involving It flatten(itr) = Flatten(itr) eltype(::Type{Flatten{I}}) where {I} = eltype(eltype(I)) +eltype(::Type{Flatten{I}}) where {I<:Union{Tuple,NamedTuple}} = promote_typejoin(map(eltype, fieldtypes(I))...) eltype(::Type{Flatten{Tuple{}}}) = eltype(Tuple{}) IteratorEltype(::Type{Flatten{I}}) where {I} = _flatteneltype(I, IteratorEltype(I)) IteratorEltype(::Type{Flatten{Tuple{}}}) = IteratorEltype(Tuple{}) diff --git a/test/iterators.jl b/test/iterators.jl index 0df4d9afd371a..d1e7525c43465 100644 --- a/test/iterators.jl +++ b/test/iterators.jl @@ -513,6 +513,8 @@ end @test collect(flatten(Any[flatten(Any[1:2, 6:5]), flatten(Any[6:7, 8:9])])) == Any[1,2,6,7,8,9] @test collect(flatten(Any[2:1])) == Any[] @test eltype(flatten(UnitRange{Int8}[1:2, 3:4])) == Int8 +@test eltype(flatten(([1, 2], [3.0, 4.0]))) == Real +@test eltype(flatten((a = [1, 2], b = Int8[3, 4]))) == Signed @test length(flatten(zip(1:3, 4:6))) == 6 @test length(flatten(1:6)) == 6 @test collect(flatten(Any[])) == Any[] From 710e1f99a75357f9b85d1c517926d083b5cba704 Mon Sep 17 00:00:00 2001 From: Jishnu Bhattacharya Date: Tue, 29 Oct 2024 13:29:21 +0530 Subject: [PATCH 326/537] Reland "Reroute (Upper/Lower)Triangular * Diagonal through __muldiag #55984" (#56270) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This relands #55984 which was reverted in #56267. Previously, in #55984, the destination in multiplying triangular matrices with diagonals was also assumed to be triangular, which is not necessarily the case in `mul!`. Tests for this case, however, were being run non-deterministically, so this wasn't caught by the CI runs. This improves performance: ```julia julia> U = UpperTriangular(rand(100,100)); D = Diagonal(rand(size(U,2))); C = similar(U); julia> @btime mul!($C, $D, $U); 1.517 μs (0 allocations: 0 bytes) # nightly 1.116 μs (0 allocations: 0 bytes) # This PR ``` --- stdlib/LinearAlgebra/src/LinearAlgebra.jl | 2 + stdlib/LinearAlgebra/src/diagonal.jl | 191 ++++++++++++---------- stdlib/LinearAlgebra/test/addmul.jl | 22 +++ stdlib/LinearAlgebra/test/diagonal.jl | 53 +++++- 4 files changed, 184 insertions(+), 84 deletions(-) diff --git a/stdlib/LinearAlgebra/src/LinearAlgebra.jl b/stdlib/LinearAlgebra/src/LinearAlgebra.jl index 330df503485cb..6e560428a7011 100644 --- a/stdlib/LinearAlgebra/src/LinearAlgebra.jl +++ b/stdlib/LinearAlgebra/src/LinearAlgebra.jl @@ -656,6 +656,8 @@ matprod_dest(A::StructuredMatrix, B::Diagonal, TS) = _matprod_dest_diag(A, TS) matprod_dest(A::Diagonal, B::StructuredMatrix, TS) = _matprod_dest_diag(B, TS) matprod_dest(A::Diagonal, B::Diagonal, TS) = _matprod_dest_diag(B, TS) _matprod_dest_diag(A, TS) = similar(A, TS) +_matprod_dest_diag(A::UnitUpperTriangular, TS) = UpperTriangular(similar(parent(A), TS)) +_matprod_dest_diag(A::UnitLowerTriangular, TS) = LowerTriangular(similar(parent(A), TS)) function _matprod_dest_diag(A::SymTridiagonal, TS) n = size(A, 1) ev = similar(A, TS, max(0, n-1)) diff --git a/stdlib/LinearAlgebra/src/diagonal.jl b/stdlib/LinearAlgebra/src/diagonal.jl index 417bcfa5715b1..1ed599fbb120e 100644 --- a/stdlib/LinearAlgebra/src/diagonal.jl +++ b/stdlib/LinearAlgebra/src/diagonal.jl @@ -397,89 +397,124 @@ function lmul!(D::Diagonal, T::Tridiagonal) return T end -function __muldiag!(out, D::Diagonal, B, _add::MulAddMul{ais1,bis0}) where {ais1,bis0} - require_one_based_indexing(out, B) - alpha, beta = _add.alpha, _add.beta - if iszero(alpha) - _rmul_or_fill!(out, beta) - else - if bis0 - @inbounds for j in axes(B, 2) - @simd for i in axes(B, 1) - out[i,j] = D.diag[i] * B[i,j] * alpha - end - end - else - @inbounds for j in axes(B, 2) - @simd for i in axes(B, 1) - out[i,j] = D.diag[i] * B[i,j] * alpha + out[i,j] * beta - end - end +@inline function __muldiag_nonzeroalpha!(out, D::Diagonal, B, _add::MulAddMul) + @inbounds for j in axes(B, 2) + @simd for i in axes(B, 1) + _modify!(_add, D.diag[i] * B[i,j], out, (i,j)) end end - return out -end -function __muldiag!(out, A, D::Diagonal, _add::MulAddMul{ais1,bis0}) where {ais1,bis0} - require_one_based_indexing(out, A) - alpha, beta = _add.alpha, _add.beta - if iszero(alpha) - _rmul_or_fill!(out, beta) - else - if bis0 - @inbounds for j in axes(A, 2) - dja = D.diag[j] * alpha - @simd for i in axes(A, 1) - out[i,j] = A[i,j] * dja - end - end - else - @inbounds for j in axes(A, 2) - dja = D.diag[j] * alpha - @simd for i in axes(A, 1) - out[i,j] = A[i,j] * dja + out[i,j] * beta - end + out +end +_has_matching_zeros(out::UpperOrUnitUpperTriangular, A::UpperOrUnitUpperTriangular) = true +_has_matching_zeros(out::LowerOrUnitLowerTriangular, A::LowerOrUnitLowerTriangular) = true +_has_matching_zeros(out, A) = false +function _rowrange_tri_stored(B::UpperOrUnitUpperTriangular, col) + isunit = B isa UnitUpperTriangular + 1:min(col-isunit, size(B,1)) +end +function _rowrange_tri_stored(B::LowerOrUnitLowerTriangular, col) + isunit = B isa UnitLowerTriangular + col+isunit:size(B,1) +end +_rowrange_tri_zeros(B::UpperOrUnitUpperTriangular, col) = col+1:size(B,1) +_rowrange_tri_zeros(B::LowerOrUnitLowerTriangular, col) = 1:col-1 +function __muldiag_nonzeroalpha!(out, D::Diagonal, B::UpperOrLowerTriangular, _add::MulAddMul) + isunit = B isa UnitUpperOrUnitLowerTriangular + out_maybeparent, B_maybeparent = _has_matching_zeros(out, B) ? (parent(out), parent(B)) : (out, B) + for j in axes(B, 2) + # store the diagonal separately for unit triangular matrices + if isunit + @inbounds _modify!(_add, D.diag[j] * B[j,j], out, (j,j)) + end + # The indices of out corresponding to the stored indices of B + rowrange = _rowrange_tri_stored(B, j) + @inbounds @simd for i in rowrange + _modify!(_add, D.diag[i] * B_maybeparent[i,j], out_maybeparent, (i,j)) + end + # Fill the indices of out corresponding to the zeros of B + # we only fill these if out and B don't have matching zeros + if !_has_matching_zeros(out, B) + rowrange = _rowrange_tri_zeros(B, j) + @inbounds @simd for i in rowrange + _modify!(_add, D.diag[i] * B[i,j], out, (i,j)) end end end return out end -function __muldiag!(out::Diagonal, D1::Diagonal, D2::Diagonal, _add::MulAddMul{ais1,bis0}) where {ais1,bis0} - d1 = D1.diag - d2 = D2.diag - alpha, beta = _add.alpha, _add.beta - if iszero(alpha) - _rmul_or_fill!(out.diag, beta) - else - if bis0 - @inbounds @simd for i in eachindex(out.diag) - out.diag[i] = d1[i] * d2[i] * alpha - end - else - @inbounds @simd for i in eachindex(out.diag) - out.diag[i] = d1[i] * d2[i] * alpha + out.diag[i] * beta + +@inline function __muldiag_nonzeroalpha!(out, A, D::Diagonal, _add::MulAddMul{ais1,bis0}) where {ais1,bis0} + beta = _add.beta + _add_aisone = MulAddMul{true,bis0,Bool,typeof(beta)}(true, beta) + @inbounds for j in axes(A, 2) + dja = _add(D.diag[j]) + @simd for i in axes(A, 1) + _modify!(_add_aisone, A[i,j] * dja, out, (i,j)) + end + end + out +end +function __muldiag_nonzeroalpha!(out, A::UpperOrLowerTriangular, D::Diagonal, _add::MulAddMul{ais1,bis0}) where {ais1,bis0} + isunit = A isa UnitUpperOrUnitLowerTriangular + beta = _add.beta + # since alpha is multiplied to the diagonal element of D, + # we may skip alpha in the second multiplication by setting ais1 to true + _add_aisone = MulAddMul{true,bis0,Bool,typeof(beta)}(true, beta) + # if both A and out have the same upper/lower triangular structure, + # we may directly read and write from the parents + out_maybeparent, A_maybeparent = _has_matching_zeros(out, A) ? (parent(out), parent(A)) : (out, A) + for j in axes(A, 2) + dja = _add(@inbounds D.diag[j]) + # store the diagonal separately for unit triangular matrices + if isunit + @inbounds _modify!(_add_aisone, A[j,j] * dja, out, (j,j)) + end + # indices of out corresponding to the stored indices of A + rowrange = _rowrange_tri_stored(A, j) + @inbounds @simd for i in rowrange + _modify!(_add_aisone, A_maybeparent[i,j] * dja, out_maybeparent, (i,j)) + end + # Fill the indices of out corresponding to the zeros of A + # we only fill these if out and A don't have matching zeros + if !_has_matching_zeros(out, A) + rowrange = _rowrange_tri_zeros(A, j) + @inbounds @simd for i in rowrange + _modify!(_add_aisone, A[i,j] * dja, out, (i,j)) end end end - return out + out end -function __muldiag!(out, D1::Diagonal, D2::Diagonal, _add::MulAddMul{ais1,bis0}) where {ais1,bis0} - require_one_based_indexing(out) - alpha, beta = _add.alpha, _add.beta - mA = size(D1, 1) + +@inline function __muldiag_nonzeroalpha!(out::Diagonal, D1::Diagonal, D2::Diagonal, _add::MulAddMul) d1 = D1.diag d2 = D2.diag - _rmul_or_fill!(out, beta) - if !iszero(alpha) - @inbounds @simd for i in 1:mA - out[i,i] += d1[i] * d2[i] * alpha - end + outd = out.diag + @inbounds @simd for i in eachindex(d1, d2, outd) + _modify!(_add, d1[i] * d2[i], outd, i) end - return out + out +end + +# ambiguity resolution +@inline function __muldiag_nonzeroalpha!(out, D1::Diagonal, D2::Diagonal, _add::MulAddMul) + @inbounds for j in axes(D2, 2), i in axes(D2, 1) + _modify!(_add, D1.diag[i] * D2[i,j], out, (i,j)) + end + out end +# muldiag mainly handles the zero-alpha case, so that we need only +# specialize the non-trivial case function _mul_diag!(out, A, B, _add) + require_one_based_indexing(out, A, B) _muldiag_size_check(size(out), size(A), size(B)) - __muldiag!(out, A, B, _add) + alpha, beta = _add.alpha, _add.beta + if iszero(alpha) + _rmul_or_fill!(out, beta) + else + __muldiag_nonzeroalpha!(out, A, B, _add) + end return out end @@ -659,31 +694,21 @@ for Tri in (:UpperTriangular, :LowerTriangular) @eval $fun(A::$Tri, D::Diagonal) = $Tri($fun(A.data, D)) @eval $fun(A::$UTri, D::Diagonal) = $Tri(_setdiag!($fun(A.data, D), $f, D.diag)) end + @eval *(A::$Tri{<:Any, <:StridedMaybeAdjOrTransMat}, D::Diagonal) = + @invoke *(A::AbstractMatrix, D::Diagonal) + @eval *(A::$UTri{<:Any, <:StridedMaybeAdjOrTransMat}, D::Diagonal) = + @invoke *(A::AbstractMatrix, D::Diagonal) for (fun, f) in zip((:*, :lmul!, :ldiv!, :\), (:identity, :identity, :inv, :inv)) @eval $fun(D::Diagonal, A::$Tri) = $Tri($fun(D, A.data)) @eval $fun(D::Diagonal, A::$UTri) = $Tri(_setdiag!($fun(D, A.data), $f, D.diag)) end + @eval *(D::Diagonal, A::$Tri{<:Any, <:StridedMaybeAdjOrTransMat}) = + @invoke *(D::Diagonal, A::AbstractMatrix) + @eval *(D::Diagonal, A::$UTri{<:Any, <:StridedMaybeAdjOrTransMat}) = + @invoke *(D::Diagonal, A::AbstractMatrix) # 3-arg ldiv! @eval ldiv!(C::$Tri, D::Diagonal, A::$Tri) = $Tri(ldiv!(C.data, D, A.data)) @eval ldiv!(C::$Tri, D::Diagonal, A::$UTri) = $Tri(_setdiag!(ldiv!(C.data, D, A.data), inv, D.diag)) - # 3-arg mul! is disambiguated in special.jl - # 5-arg mul! - @eval _mul!(C::$Tri, D::Diagonal, A::$Tri, _add) = $Tri(mul!(C.data, D, A.data, _add.alpha, _add.beta)) - @eval function _mul!(C::$Tri, D::Diagonal, A::$UTri, _add::MulAddMul{ais1,bis0}) where {ais1,bis0} - α, β = _add.alpha, _add.beta - iszero(α) && return _rmul_or_fill!(C, β) - diag′ = bis0 ? nothing : diag(C) - data = mul!(C.data, D, A.data, α, β) - $Tri(_setdiag!(data, _add, D.diag, diag′)) - end - @eval _mul!(C::$Tri, A::$Tri, D::Diagonal, _add) = $Tri(mul!(C.data, A.data, D, _add.alpha, _add.beta)) - @eval function _mul!(C::$Tri, A::$UTri, D::Diagonal, _add::MulAddMul{ais1,bis0}) where {ais1,bis0} - α, β = _add.alpha, _add.beta - iszero(α) && return _rmul_or_fill!(C, β) - diag′ = bis0 ? nothing : diag(C) - data = mul!(C.data, A.data, D, α, β) - $Tri(_setdiag!(data, _add, D.diag, diag′)) - end end @inline function kron!(C::AbstractMatrix, A::Diagonal, B::Diagonal) diff --git a/stdlib/LinearAlgebra/test/addmul.jl b/stdlib/LinearAlgebra/test/addmul.jl index 208fa930e8ee1..fcd0b51b2e4c0 100644 --- a/stdlib/LinearAlgebra/test/addmul.jl +++ b/stdlib/LinearAlgebra/test/addmul.jl @@ -239,4 +239,26 @@ end end end +@testset "Diagonal scaling of a triangular matrix with a non-triangular destination" begin + for MT in (UpperTriangular, UnitUpperTriangular, LowerTriangular, UnitLowerTriangular) + U = MT(reshape([1:9;],3,3)) + M = Array(U) + D = Diagonal(1:3) + A = reshape([1:9;],3,3) + @test mul!(copy(A), U, D, 2, 3) == M * D * 2 + A * 3 + @test mul!(copy(A), D, U, 2, 3) == D * M * 2 + A * 3 + + # nan values with iszero(alpha) + D = Diagonal(fill(NaN,3)) + @test mul!(copy(A), U, D, 0, 3) == A * 3 + @test mul!(copy(A), D, U, 0, 3) == A * 3 + + # nan values with iszero(beta) + A = fill(NaN,3,3) + D = Diagonal(1:3) + @test mul!(copy(A), U, D, 2, 0) == M * D * 2 + @test mul!(copy(A), D, U, 2, 0) == D * M * 2 + end +end + end # module diff --git a/stdlib/LinearAlgebra/test/diagonal.jl b/stdlib/LinearAlgebra/test/diagonal.jl index 1c3a9dfa676ac..16f3d2287f317 100644 --- a/stdlib/LinearAlgebra/test/diagonal.jl +++ b/stdlib/LinearAlgebra/test/diagonal.jl @@ -822,6 +822,19 @@ end @test @inferred(D[1,2]) isa typeof(s) @test all(iszero, D[1,2]) end + + @testset "mul!" begin + D1 = Diagonal(fill(ones(2,3), 2)) + D2 = Diagonal(fill(ones(3,2), 2)) + C = similar(D1, size(D1)) + mul!(C, D1, D2) + @test all(x -> size(x) == (2,2), C) + @test C == D1 * D2 + D = similar(D1) + mul!(D, D1, D2) + @test all(x -> size(x) == (2,2), D) + @test D == D1 * D2 + end end @testset "Eigensystem for block diagonal (issue #30681)" begin @@ -1188,7 +1201,7 @@ end @test oneunit(D3) isa typeof(D3) end -@testset "AbstractTriangular" for (Tri, UTri) in ((UpperTriangular, UnitUpperTriangular), (LowerTriangular, UnitLowerTriangular)) +@testset "$Tri" for (Tri, UTri) in ((UpperTriangular, UnitUpperTriangular), (LowerTriangular, UnitLowerTriangular)) A = randn(4, 4) TriA = Tri(A) UTriA = UTri(A) @@ -1218,6 +1231,44 @@ end @test outTri === mul!(outTri, D, UTriA, 2, 1)::Tri == mul!(out, D, Matrix(UTriA), 2, 1) @test outTri === mul!(outTri, TriA, D, 2, 1)::Tri == mul!(out, Matrix(TriA), D, 2, 1) @test outTri === mul!(outTri, UTriA, D, 2, 1)::Tri == mul!(out, Matrix(UTriA), D, 2, 1) + + # we may write to a Unit triangular if the diagonal is preserved + ID = Diagonal(ones(size(UTriA,2))) + @test mul!(copy(UTriA), UTriA, ID) == UTriA + @test mul!(copy(UTriA), ID, UTriA) == UTriA + + @testset "partly filled parents" begin + M = Matrix{BigFloat}(undef, 2, 2) + M[1,1] = M[2,2] = 3 + isupper = Tri == UpperTriangular + M[1+!isupper, 1+isupper] = 3 + D = Diagonal(1:2) + T = Tri(M) + TA = Array(T) + @test T * D == TA * D + @test D * T == D * TA + @test mul!(copy(T), T, D, 2, 3) == 2T * D + 3T + @test mul!(copy(T), D, T, 2, 3) == 2D * T + 3T + + U = UTri(M) + UA = Array(U) + @test U * D == UA * D + @test D * U == D * UA + @test mul!(copy(T), U, D, 2, 3) == 2 * UA * D + 3TA + @test mul!(copy(T), D, U, 2, 3) == 2 * D * UA + 3TA + + M2 = Matrix{BigFloat}(undef, 2, 2) + M2[1+!isupper, 1+isupper] = 3 + U = UTri(M2) + UA = Array(U) + @test U * D == UA * D + @test D * U == D * UA + ID = Diagonal(ones(size(U,2))) + @test mul!(copy(U), U, ID) == U + @test mul!(copy(U), ID, U) == U + @test mul!(copy(U), U, ID, 2, -1) == U + @test mul!(copy(U), ID, U, 2, -1) == U + end end struct SMatrix1{T} <: AbstractArray{T,2} From cb757c41102e4cdc899d76464acde0938c36de9e Mon Sep 17 00:00:00 2001 From: Jishnu Bhattacharya Date: Tue, 29 Oct 2024 13:30:18 +0530 Subject: [PATCH 327/537] Add one-arg `norm` method (#56330) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This reduces the latency of `norm` calls, as the single-argument method lacks branches and doesn't use aggressive constant propagation, and is therefore simpler to compile. Given that a lot of `norm` calls use `p==2`, it makes sense for us to reduce the latency on this call. ```julia julia> using LinearAlgebra julia> A = rand(2,2); julia> @time norm(A); 0.247515 seconds (390.09 k allocations: 19.993 MiB, 33.57% gc time, 99.99% compilation time) # master 0.067201 seconds (121.24 k allocations: 6.067 MiB, 99.98% compilation time) # this PR ``` An example of an improvement in ttfx because of this: ```julia julia> A = rand(2,2); julia> @time A ≈ A; 0.556475 seconds (1.16 M allocations: 59.949 MiB, 24.14% gc time, 100.00% compilation time) # master 0.333114 seconds (899.85 k allocations: 46.574 MiB, 8.11% gc time, 99.99% compilation time) # this PR ``` --- stdlib/LinearAlgebra/src/generic.jl | 21 ++++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) diff --git a/stdlib/LinearAlgebra/src/generic.jl b/stdlib/LinearAlgebra/src/generic.jl index 20c58e593d3f6..21719c0c50127 100644 --- a/stdlib/LinearAlgebra/src/generic.jl +++ b/stdlib/LinearAlgebra/src/generic.jl @@ -669,11 +669,9 @@ julia> norm(hcat(v,v), Inf) == norm(vcat(v,v), Inf) != norm([v,v], Inf) true ``` """ -Base.@constprop :aggressive function norm(itr, p::Real=2) +Base.@constprop :aggressive function norm(itr, p::Real) isempty(itr) && return float(norm(zero(eltype(itr)))) - v, s = iterate(itr) - !isnothing(s) && !ismissing(v) && v == itr && throw(ArgumentError( - "cannot evaluate norm recursively if the type of the initial element is identical to that of the container")) + norm_recursive_check(itr) if p == 2 return norm2(itr) elseif p == 1 @@ -688,6 +686,18 @@ Base.@constprop :aggressive function norm(itr, p::Real=2) normp(itr, p) end end +# Split into a separate method to reduce latency in norm(x) calls (#56330) +function norm(itr) + isempty(itr) && return float(norm(zero(eltype(itr)))) + norm_recursive_check(itr) + norm2(itr) +end +function norm_recursive_check(itr) + v, s = iterate(itr) + !isnothing(s) && !ismissing(v) && v == itr && throw(ArgumentError( + "cannot evaluate norm recursively if the type of the initial element is identical to that of the container")) + return nothing +end """ norm(x::Number, p::Real=2) @@ -808,7 +818,7 @@ julia> opnorm(A, 1) 5.0 ``` """ -Base.@constprop :aggressive function opnorm(A::AbstractMatrix, p::Real=2) +Base.@constprop :aggressive function opnorm(A::AbstractMatrix, p::Real) if p == 2 return opnorm2(A) elseif p == 1 @@ -819,6 +829,7 @@ Base.@constprop :aggressive function opnorm(A::AbstractMatrix, p::Real=2) throw(ArgumentError(lazy"invalid p-norm p=$p. Valid: 1, 2, Inf")) end end +opnorm(A::AbstractMatrix) = opnorm2(A) """ opnorm(x::Number, p::Real=2) From 07530bcb23d1a576b8260500833b767b81442517 Mon Sep 17 00:00:00 2001 From: Kristoffer Carlsson Date: Tue, 29 Oct 2024 15:42:48 +0100 Subject: [PATCH 328/537] fix a forgotten rename `readuntil` -> `copyuntil` (#56380) Fixes https://github.com/JuliaLang/julia/issues/56352, with the repro in that issue: ``` Master: 1.114874 seconds (13.01 M allocations: 539.592 MiB, 3.80% gc time) After: 0.369492 seconds (12.99 M allocations: 485.031 MiB, 10.73% gc time) 1.10: 0.341114 seconds (8.36 M allocations: 454.242 MiB, 2.69% gc time) ``` --- base/stream.jl | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/base/stream.jl b/base/stream.jl index 2f00538ad0e96..488acd41d2a9e 100644 --- a/base/stream.jl +++ b/base/stream.jl @@ -1028,7 +1028,7 @@ function readavailable(this::LibuvStream) return bytes end -function readuntil(x::LibuvStream, c::UInt8; keep::Bool=false) +function copyuntil(out::IO, x::LibuvStream, c::UInt8; keep::Bool=false) iolock_begin() buf = x.buffer @assert buf.seekable == false @@ -1058,9 +1058,9 @@ function readuntil(x::LibuvStream, c::UInt8; keep::Bool=false) end end end - bytes = readuntil(buf, c, keep=keep) + copyuntil(out, buf, c; keep) iolock_end() - return bytes + return out end uv_write(s::LibuvStream, p::Vector{UInt8}) = GC.@preserve p uv_write(s, pointer(p), UInt(sizeof(p))) From e3d5aa38a2e43726c42723b1da894e39e9f99ecd Mon Sep 17 00:00:00 2001 From: Shuhei Kadowaki <40514306+aviatesk@users.noreply.github.com> Date: Tue, 29 Oct 2024 23:44:07 +0900 Subject: [PATCH 329/537] remove unnecessary operations from `typejoin_union_tuple` (#56379) Removes the unnecessary call to `unwrap_unionall` and type assertion. --- base/promotion.jl | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/base/promotion.jl b/base/promotion.jl index 689a4e4be8f39..1004c64433ec1 100644 --- a/base/promotion.jl +++ b/base/promotion.jl @@ -199,9 +199,8 @@ end function typejoin_union_tuple(T::DataType) @_foldable_meta - u = Base.unwrap_unionall(T) - p = (u::DataType).parameters - lr = length(p)::Int + p = T.parameters + lr = length(p) if lr == 0 return Tuple{} end From e4dc9d357a1a46ac4078ae81265f6edd9b593bdf Mon Sep 17 00:00:00 2001 From: Jameson Nash Date: Tue, 29 Oct 2024 13:18:01 -0400 Subject: [PATCH 330/537] precompile: fix performance issues with IO (#56370) The string API here rapidly becomes unusably slow if dumping much debug output during precompile. Fix the design here to use an intermediate IO instead to prevent that. --- base/precompilation.jl | 51 +++++++++++++++++++++++++----------------- 1 file changed, 30 insertions(+), 21 deletions(-) diff --git a/base/precompilation.jl b/base/precompilation.jl index f597acef9b57f..92f4980c2b889 100644 --- a/base/precompilation.jl +++ b/base/precompilation.jl @@ -641,7 +641,7 @@ function _precompilepkgs(pkgs::Vector{String}, return false end end - std_outputs = Dict{PkgConfig,String}() + std_outputs = Dict{PkgConfig,IOBuffer}() taskwaiting = Set{PkgConfig}() pkgspidlocked = Dict{PkgConfig,String}() pkg_liveprinted = nothing @@ -663,7 +663,7 @@ function _precompilepkgs(pkgs::Vector{String}, print(io, ansi_cleartoendofline, str) end end - std_outputs[pkg_config] = string(get(std_outputs, pkg_config, ""), str) + write(get!(IOBuffer, std_outputs, pkg_config), str) if !in(pkg_config, taskwaiting) && occursin("waiting for IO to finish", str) !fancyprint && lock(print_lock) do println(io, pkg.name, color_string(" Waiting for background task / IO / timer.", Base.warn_color())) @@ -857,8 +857,9 @@ function _precompilepkgs(pkgs::Vector{String}, close(std_pipe.in) # close pipe to end the std output monitor wait(t_monitor) if err isa ErrorException || (err isa ArgumentError && startswith(err.msg, "Invalid header in cache file")) - failed_deps[pkg_config] = (strict || is_direct_dep) ? string(sprint(showerror, err), "\n", strip(get(std_outputs, pkg_config, ""))) : "" + errmsg = String(take!(get(IOBuffer, std_outputs, pkg_config))) delete!(std_outputs, pkg_config) # so it's not shown as warnings, given error report + failed_deps[pkg_config] = (strict || is_direct_dep) ? string(sprint(showerror, err), "\n", strip(errmsg)) : "" !fancyprint && lock(print_lock) do println(io, " "^9, color_string(" ✗ ", Base.error_color()), name) end @@ -936,20 +937,22 @@ function _precompilepkgs(pkgs::Vector{String}, end # show any stderr output, even if Pkg.precompile has been interrupted (quick_exit=true), given user may be # interrupting a hanging precompile job with stderr output. julia#48371 - filter!(kv -> !isempty(strip(last(kv))), std_outputs) # remove empty output - if !isempty(std_outputs) - plural1 = length(std_outputs) == 1 ? "y" : "ies" - plural2 = length(std_outputs) == 1 ? "" : "s" - print(iostr, "\n ", color_string("$(length(std_outputs))", Base.warn_color()), " dependenc$(plural1) had output during precompilation:") - for (pkg_config, err) in std_outputs - pkg, config = pkg_config - err = if pkg == pkg_liveprinted - "[Output was shown above]" - else - join(split(strip(err), "\n"), color_string("\n│ ", Base.warn_color())) + let std_outputs = Tuple{PkgConfig,SubString{String}}[(pkg_config, strip(String(take!(io)))) for (pkg_config,io) in std_outputs] + filter!(kv -> !isempty(last(kv)), std_outputs) + if !isempty(std_outputs) + plural1 = length(std_outputs) == 1 ? "y" : "ies" + plural2 = length(std_outputs) == 1 ? "" : "s" + print(iostr, "\n ", color_string("$(length(std_outputs))", Base.warn_color()), " dependenc$(plural1) had output during precompilation:") + for (pkg_config, err) in std_outputs + pkg, config = pkg_config + err = if pkg == pkg_liveprinted + "[Output was shown above]" + else + join(split(err, "\n"), color_string("\n│ ", Base.warn_color())) + end + name = haskey(exts, pkg) ? string(exts[pkg], " → ", pkg.name) : pkg.name + print(iostr, color_string("\n┌ ", Base.warn_color()), name, color_string("\n│ ", Base.warn_color()), err, color_string("\n└ ", Base.warn_color())) end - name = haskey(exts, pkg) ? string(exts[pkg], " → ", pkg.name) : pkg.name - print(iostr, color_string("\n┌ ", Base.warn_color()), name, color_string("\n│ ", Base.warn_color()), err, color_string("\n└ ", Base.warn_color())) end end end @@ -959,20 +962,26 @@ function _precompilepkgs(pkgs::Vector{String}, end end quick_exit && return - err_str = "" + err_str = IOBuffer() n_direct_errs = 0 for (pkg_config, err) in failed_deps dep, config = pkg_config if strict || (dep in direct_deps) - config_str = isempty(config[1]) ? "" : "$(join(config[1], " ")) " - err_str = string(err_str, "\n$(dep.name) $config_str\n\n$err", (n_direct_errs > 0 ? "\n" : "")) + print(err_str, "\n", dep.name, " ") + for cfg in config[1] + print(err_str, cfg, " ") + end + print(err_str, "\n\n", err) + n_direct_errs > 0 && write(err_str, "\n") n_direct_errs += 1 end end - if err_str != "" + if position(err_str) > 0 + skip(err_str, -1) + truncate(err_str, position(err_str)) pluralde = n_direct_errs == 1 ? "y" : "ies" direct = strict ? "" : "direct " - err_msg = "The following $n_direct_errs $(direct)dependenc$(pluralde) failed to precompile:\n$(err_str[1:end-1])" + err_msg = "The following $n_direct_errs $(direct)dependenc$(pluralde) failed to precompile:\n$(String(take!(err_str)))" if internal_call # aka. auto-precompilation if isinteractive() && !get(ENV, "CI", false) plural1 = length(failed_deps) == 1 ? "y" : "ies" From 9850a3881221a57a382e98c9b9ae2bf97ac3966d Mon Sep 17 00:00:00 2001 From: Kristoffer Carlsson Date: Tue, 29 Oct 2024 22:02:37 +0100 Subject: [PATCH 331/537] cache the `find_all_in_cache_path` call during parallel precompilation (#56369) Before (in an environment with DifferentialEquations.jl): ```julia julia> @time Pkg.precompile() 0.733576 seconds (3.44 M allocations: 283.676 MiB, 6.24% gc time) julia> isfile_calls[1:10] 10-element Vector{Pair{String, Int64}}: "/home/kc/.julia/juliaup/julia-nightly/share/julia/compiled/v1.12/Printf/3FQLY_zHycD.ji" => 178 "/home/kc/.julia/juliaup/julia-nightly/share/julia/compiled/v1.12/Printf/3FQLY_xxrt3.ji" => 178 "/home/kc/.julia/juliaup/julia-nightly/share/julia/compiled/v1.12/Dates/p8See_xxrt3.ji" => 158 "/home/kc/.julia/juliaup/julia-nightly/share/julia/compiled/v1.12/Dates/p8See_zHycD.ji" => 158 "/home/kc/.julia/juliaup/julia-nightly/share/julia/compiled/v1.12/TOML/mjrwE_zHycD.ji" => 155 "/home/kc/.julia/juliaup/julia-nightly/share/julia/compiled/v1.12/TOML/mjrwE_xxrt3.ji" => 155 "/home/kc/.julia/compiled/v1.12/Preferences/pWSk8_4Qv86.ji" => 152 "/home/kc/.julia/compiled/v1.12/Preferences/pWSk8_juhqb.ji" => 152 "/home/kc/.julia/juliaup/julia-nightly/share/julia/compiled/v1.12/StyledStrings/UcVoM_zHycD.ji" => 144 "/home/kc/.julia/juliaup/julia-nightly/share/julia/compiled/v1.12/StyledStrings/UcVoM_xxrt3.ji" => 144 ``` After: ```julia julia> @time Pkg.precompile() 0.460077 seconds (877.59 k allocations: 108.075 MiB, 4.77% gc time) julia> isfile_calls[1:10] 10-element Vector{Pair{String, Int64}}: "/tmp/jl_a5xFWK/Project.toml" => 15 "/tmp/jl_a5xFWK/Manifest.toml" => 7 "/home/kc/.julia/registries/General.toml" => 6 "/home/kc/.julia/juliaup/julia-nightly/share/julia/stdlib/v1.12/Markdown/src/Markdown.jl" => 3 "/home/kc/.julia/juliaup/julia-nightly/share/julia/stdlib/v1.12/Serialization/src/Serialization.jl" => 3 "/home/kc/.julia/juliaup/julia-nightly/share/julia/stdlib/v1.12/Distributed/src/Distributed.jl" => 3 "/home/kc/.julia/juliaup/julia-nightly/share/julia/stdlib/v1.12/UUIDs/src/UUIDs.jl" => 3 "/home/kc/.julia/juliaup/julia-nightly/share/julia/stdlib/v1.12/LibCURL/src/LibCURL.jl" => 3 ``` Performance is improved and we are not calling `isfile` on a bunch of the same ji files hundreds times. Benchmark is made on a linux machine so performance diff should be a lot better on Windows where these `isfile_casesensitive` call is much more expensive. Fixes https://github.com/JuliaLang/julia/issues/56366 --------- Co-authored-by: KristofferC Co-authored-by: Ian Butterworth --- base/loading.jl | 10 ++++++---- base/precompilation.jl | 6 ++++-- 2 files changed, 10 insertions(+), 6 deletions(-) diff --git a/base/loading.jl b/base/loading.jl index 8edc1fe79c617..f7f749e334ed1 100644 --- a/base/loading.jl +++ b/base/loading.jl @@ -1815,7 +1815,8 @@ const StaleCacheKey = Tuple{PkgId, UInt128, String, String} function compilecache_path(pkg::PkgId; ignore_loaded::Bool=false, stale_cache::Dict{StaleCacheKey,Bool}=Dict{StaleCacheKey, Bool}(), - cachepaths::Vector{String}=Base.find_all_in_cache_path(pkg), + cachepath_cache::Dict{PkgId, Vector{String}}=Dict{PkgId, Vector{String}}(), + cachepaths::Vector{String}=get!(() -> find_all_in_cache_path(pkg), cachepath_cache, pkg), sourcepath::Union{String,Nothing}=Base.locate_package(pkg), flags::CacheFlags=CacheFlags()) path = nothing @@ -1830,7 +1831,7 @@ function compilecache_path(pkg::PkgId; for dep in staledeps dep isa Module && continue modpath, modkey, modbuild_id = dep::Tuple{String, PkgId, UInt128} - modpaths = find_all_in_cache_path(modkey) + modpaths = get!(() -> find_all_in_cache_path(modkey), cachepath_cache, modkey) for modpath_to_try in modpaths::Vector{String} stale_cache_key = (modkey, modbuild_id, modpath, modpath_to_try)::StaleCacheKey if get!(() -> stale_cachefile(stale_cache_key...; ignore_loaded, requested_flags=flags) === true, @@ -1872,10 +1873,11 @@ fresh julia session specify `ignore_loaded=true`. function isprecompiled(pkg::PkgId; ignore_loaded::Bool=false, stale_cache::Dict{StaleCacheKey,Bool}=Dict{StaleCacheKey, Bool}(), - cachepaths::Vector{String}=Base.find_all_in_cache_path(pkg), + cachepath_cache::Dict{PkgId, Vector{String}}=Dict{PkgId, Vector{String}}(), + cachepaths::Vector{String}=get!(() -> find_all_in_cache_path(pkg), cachepath_cache, pkg), sourcepath::Union{String,Nothing}=Base.locate_package(pkg), flags::CacheFlags=CacheFlags()) - path = compilecache_path(pkg; ignore_loaded, stale_cache, cachepaths, sourcepath, flags) + path = compilecache_path(pkg; ignore_loaded, stale_cache, cachepath_cache, cachepaths, sourcepath, flags) return !isnothing(path) end diff --git a/base/precompilation.jl b/base/precompilation.jl index 92f4980c2b889..54f13d298a462 100644 --- a/base/precompilation.jl +++ b/base/precompilation.jl @@ -415,6 +415,8 @@ function _precompilepkgs(pkgs::Vector{String}, color_string(cstr::String, col::Union{Int64, Symbol}) = _color_string(cstr, col, hascolor) stale_cache = Dict{StaleCacheKey, Bool}() + cachepath_cache = Dict{PkgId, Vector{String}}() + exts = Dict{PkgId, String}() # ext -> parent # make a flat map of each dep and its direct deps depsmap = Dict{PkgId, Vector{PkgId}}() @@ -785,7 +787,7 @@ function _precompilepkgs(pkgs::Vector{String}, ## precompilation loop for (pkg, deps) in depsmap - cachepaths = Base.find_all_in_cache_path(pkg) + cachepaths = get!(() -> Base.find_all_in_cache_path(pkg), cachepath_cache, pkg) sourcepath = Base.locate_package(pkg) single_requested_pkg = length(requested_pkgs) == 1 && only(requested_pkgs) == pkg.name for config in configs @@ -808,7 +810,7 @@ function _precompilepkgs(pkgs::Vector{String}, wait(was_processed[(dep,config)]) end circular = pkg in circular_deps - is_stale = !Base.isprecompiled(pkg; ignore_loaded=true, stale_cache, cachepaths, sourcepath, flags=cacheflags) + is_stale = !Base.isprecompiled(pkg; ignore_loaded=true, stale_cache, cachepath_cache, cachepaths, sourcepath, flags=cacheflags) if !circular && is_stale Base.acquire(parallel_limiter) is_direct_dep = pkg in direct_deps From a9342d679979b0f19522949b9b00200c5b2fc010 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mos=C3=A8=20Giordano?= <765740+giordano@users.noreply.github.com> Date: Wed, 30 Oct 2024 10:50:25 +0000 Subject: [PATCH 332/537] [docs] Fix note admonition in llvm-passes.md (#56392) At the moment this is rendered incorrectly: https://docs.julialang.org/en/v1.11.1/devdocs/llvm-passes/#JuliaLICM --- doc/src/devdocs/llvm-passes.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/src/devdocs/llvm-passes.md b/doc/src/devdocs/llvm-passes.md index 736faf54c219b..da600f73fd696 100644 --- a/doc/src/devdocs/llvm-passes.md +++ b/doc/src/devdocs/llvm-passes.md @@ -144,6 +144,6 @@ This pass is used to hoist Julia-specific intrinsics out of loops. Specifically, 3. Hoist allocations out of loops when they do not escape the loop 1. We use a very conservative definition of escape here, the same as the one used in `AllocOptPass`. This transformation can reduce the number of allocations in the IR, even when an allocation escapes the function altogether. -!!!note +!!! note This pass is required to preserve LLVM's [MemorySSA](https://llvm.org/docs/MemorySSA.html) ([Short Video](https://www.youtube.com/watch?v=bdxWmryoHak), [Longer Video](https://www.youtube.com/watch?v=1e5y6WDbXCQ)) and [ScalarEvolution](https://baziotis.cs.illinois.edu/compilers/introduction-to-scalar-evolution.html) ([Newer Slides](https://llvm.org/devmtg/2018-04/slides/Absar-ScalarEvolution.pdf) [Older Slides](https://llvm.org/devmtg/2009-10/ScalarEvolutionAndLoopOptimization.pdf)) analyses. From 2fe65625cf787c205571a52e26b8ef970dc4f65b Mon Sep 17 00:00:00 2001 From: Jishnu Bhattacharya Date: Wed, 30 Oct 2024 18:23:46 +0530 Subject: [PATCH 333/537] structure-preserving broadcast for `SymTridiagonal` (#56001) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit With this PR, certain broadcasting operations preserve the structure of a `SymTridiagonal`: ```julia julia> S = SymTridiagonal([1,2,3,4], [1,2,3]) 4×4 SymTridiagonal{Int64, Vector{Int64}}: 1 1 ⋅ ⋅ 1 2 2 ⋅ ⋅ 2 3 3 ⋅ ⋅ 3 4 julia> S .* 2 4×4 SymTridiagonal{Int64, Vector{Int64}}: 2 2 ⋅ ⋅ 2 4 4 ⋅ ⋅ 4 6 6 ⋅ ⋅ 6 8 ``` This was deliberately disabled on master, but I couldn't find any test that fails if this is enabled. --- stdlib/LinearAlgebra/src/structuredbroadcast.jl | 5 +++-- stdlib/LinearAlgebra/test/structuredbroadcast.jl | 15 +++++++++++++-- 2 files changed, 16 insertions(+), 4 deletions(-) diff --git a/stdlib/LinearAlgebra/src/structuredbroadcast.jl b/stdlib/LinearAlgebra/src/structuredbroadcast.jl index 0c06f84116fc7..9a4d55fd58bf0 100644 --- a/stdlib/LinearAlgebra/src/structuredbroadcast.jl +++ b/stdlib/LinearAlgebra/src/structuredbroadcast.jl @@ -171,7 +171,7 @@ end function Base.similar(bc::Broadcasted{StructuredMatrixStyle{T}}, ::Type{ElType}) where {T,ElType} inds = axes(bc) fzerobc = fzeropreserving(bc) - if isstructurepreserving(bc) || (fzerobc && !(T <: Union{SymTridiagonal,UnitLowerTriangular,UnitUpperTriangular})) + if isstructurepreserving(bc) || (fzerobc && !(T <: Union{UnitLowerTriangular,UnitUpperTriangular})) return structured_broadcast_alloc(bc, T, ElType, length(inds[1])) elseif fzerobc && T <: UnitLowerTriangular return similar(convert(Broadcasted{StructuredMatrixStyle{LowerTriangular}}, bc), ElType) @@ -240,7 +240,8 @@ function copyto!(dest::SymTridiagonal, bc::Broadcasted{<:StructuredMatrixStyle}) end for i = 1:size(dest, 1)-1 v = @inbounds bc[BandIndex(1, i)] - v == (@inbounds bc[BandIndex(-1, i)]) || throw(ArgumentError(lazy"broadcasted assignment breaks symmetry between locations ($i, $(i+1)) and ($(i+1), $i)")) + v == transpose(@inbounds bc[BandIndex(-1, i)]) || + throw(ArgumentError(lazy"broadcasted assignment breaks symmetry between locations ($i, $(i+1)) and ($(i+1), $i)")) dest.ev[i] = v end return dest diff --git a/stdlib/LinearAlgebra/test/structuredbroadcast.jl b/stdlib/LinearAlgebra/test/structuredbroadcast.jl index 384ed5b3b60cf..71494aedcbef5 100644 --- a/stdlib/LinearAlgebra/test/structuredbroadcast.jl +++ b/stdlib/LinearAlgebra/test/structuredbroadcast.jl @@ -3,6 +3,10 @@ module TestStructuredBroadcast using Test, LinearAlgebra +const BASE_TEST_PATH = joinpath(Sys.BINDIR, "..", "share", "julia", "test") +isdefined(Main, :SizedArrays) || @eval Main include(joinpath($(BASE_TEST_PATH), "testhelpers", "SizedArrays.jl")) +using .Main.SizedArrays + @testset "broadcast[!] over combinations of scalars, structured matrices, and dense vectors/matrices" begin N = 10 s = rand() @@ -12,10 +16,11 @@ using Test, LinearAlgebra D = Diagonal(rand(N)) B = Bidiagonal(rand(N), rand(N - 1), :U) T = Tridiagonal(rand(N - 1), rand(N), rand(N - 1)) + S = SymTridiagonal(rand(N), rand(N - 1)) U = UpperTriangular(rand(N,N)) L = LowerTriangular(rand(N,N)) M = Matrix(rand(N,N)) - structuredarrays = (D, B, T, U, L, M) + structuredarrays = (D, B, T, U, L, M, S) fstructuredarrays = map(Array, structuredarrays) for (X, fX) in zip(structuredarrays, fstructuredarrays) @test (Q = broadcast(sin, X); typeof(Q) == typeof(X) && Q == broadcast(sin, fX)) @@ -166,10 +171,11 @@ end D = Diagonal(rand(N)) B = Bidiagonal(rand(N), rand(N - 1), :U) T = Tridiagonal(rand(N - 1), rand(N), rand(N - 1)) + S = SymTridiagonal(rand(N), rand(N - 1)) U = UpperTriangular(rand(N,N)) L = LowerTriangular(rand(N,N)) M = Matrix(rand(N,N)) - structuredarrays = (M, D, B, T, U, L) + structuredarrays = (M, D, B, T, S, U, L) fstructuredarrays = map(Array, structuredarrays) for (X, fX) in zip(structuredarrays, fstructuredarrays) @test (Q = map(sin, X); typeof(Q) == typeof(X) && Q == map(sin, fX)) @@ -363,6 +369,11 @@ end U = UpperTriangular([(i+j)*A for i in 1:3, j in 1:3]) standardbroadcastingtests(U, UpperTriangular) end + @testset "SymTridiagonal" begin + m = SizedArrays.SizedArray{(2,2)}([1 2; 3 4]) + S = SymTridiagonal(fill(m,4), fill(m,3)) + standardbroadcastingtests(S, SymTridiagonal) + end end end From 599b7ec51acfc7e88c7668e615ec85b285ff81ba Mon Sep 17 00:00:00 2001 From: DilumAluthgeBot <43731525+DilumAluthgeBot@users.noreply.github.com> Date: Wed, 30 Oct 2024 10:39:56 -0400 Subject: [PATCH 334/537] =?UTF-8?q?=F0=9F=A4=96=20[master]=20Bump=20the=20?= =?UTF-8?q?Pkg=20stdlib=20from=20116ba910c=20to=209f8e11a4c=20(#56386)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Stdlib: Pkg URL: https://github.com/JuliaLang/Pkg.jl.git Stdlib branch: master Julia branch: master Old commit: 116ba910c New commit: 9f8e11a4c Julia version: 1.12.0-DEV Pkg version: 1.12.0 Bump invoked by: @IanButterworth Powered by: [BumpStdlibs.jl](https://github.com/JuliaLang/BumpStdlibs.jl) Diff: https://github.com/JuliaLang/Pkg.jl/compare/116ba910c74ab565d348aa8a50d6dd10148f11ab...9f8e11a4c0efb3b68a1e25a33f372f398c89cd66 ``` $ git log --oneline 116ba910c..9f8e11a4c 9f8e11a4c strip out tree_hash for stdlibs that have have been freed in newer julia versions (#4062) c0df25a47 rm dead code (#4061) ``` Co-authored-by: Dilum Aluthge --- .../Pkg-116ba910c74ab565d348aa8a50d6dd10148f11ab.tar.gz/md5 | 1 - .../Pkg-116ba910c74ab565d348aa8a50d6dd10148f11ab.tar.gz/sha512 | 1 - .../Pkg-9f8e11a4c0efb3b68a1e25a33f372f398c89cd66.tar.gz/md5 | 1 + .../Pkg-9f8e11a4c0efb3b68a1e25a33f372f398c89cd66.tar.gz/sha512 | 1 + stdlib/Pkg.version | 2 +- 5 files changed, 3 insertions(+), 3 deletions(-) delete mode 100644 deps/checksums/Pkg-116ba910c74ab565d348aa8a50d6dd10148f11ab.tar.gz/md5 delete mode 100644 deps/checksums/Pkg-116ba910c74ab565d348aa8a50d6dd10148f11ab.tar.gz/sha512 create mode 100644 deps/checksums/Pkg-9f8e11a4c0efb3b68a1e25a33f372f398c89cd66.tar.gz/md5 create mode 100644 deps/checksums/Pkg-9f8e11a4c0efb3b68a1e25a33f372f398c89cd66.tar.gz/sha512 diff --git a/deps/checksums/Pkg-116ba910c74ab565d348aa8a50d6dd10148f11ab.tar.gz/md5 b/deps/checksums/Pkg-116ba910c74ab565d348aa8a50d6dd10148f11ab.tar.gz/md5 deleted file mode 100644 index 61dca3054d58f..0000000000000 --- a/deps/checksums/Pkg-116ba910c74ab565d348aa8a50d6dd10148f11ab.tar.gz/md5 +++ /dev/null @@ -1 +0,0 @@ -9905cd10c29974f3b0bb47f2e40951b0 diff --git a/deps/checksums/Pkg-116ba910c74ab565d348aa8a50d6dd10148f11ab.tar.gz/sha512 b/deps/checksums/Pkg-116ba910c74ab565d348aa8a50d6dd10148f11ab.tar.gz/sha512 deleted file mode 100644 index 3757366fd23cf..0000000000000 --- a/deps/checksums/Pkg-116ba910c74ab565d348aa8a50d6dd10148f11ab.tar.gz/sha512 +++ /dev/null @@ -1 +0,0 @@ -b99db15e6646b1eaa35df705ca39c7f3ddb05073293c779963231c22d17f4ae449739f4e8535a41ae9ae5fb1661f76c915fb2c7853a86fc695335b3e1ce3c06d diff --git a/deps/checksums/Pkg-9f8e11a4c0efb3b68a1e25a33f372f398c89cd66.tar.gz/md5 b/deps/checksums/Pkg-9f8e11a4c0efb3b68a1e25a33f372f398c89cd66.tar.gz/md5 new file mode 100644 index 0000000000000..1a0000a9d806e --- /dev/null +++ b/deps/checksums/Pkg-9f8e11a4c0efb3b68a1e25a33f372f398c89cd66.tar.gz/md5 @@ -0,0 +1 @@ +f8a63ab3677f5df71a93d6d0a1f6333d diff --git a/deps/checksums/Pkg-9f8e11a4c0efb3b68a1e25a33f372f398c89cd66.tar.gz/sha512 b/deps/checksums/Pkg-9f8e11a4c0efb3b68a1e25a33f372f398c89cd66.tar.gz/sha512 new file mode 100644 index 0000000000000..99020c2fa7a32 --- /dev/null +++ b/deps/checksums/Pkg-9f8e11a4c0efb3b68a1e25a33f372f398c89cd66.tar.gz/sha512 @@ -0,0 +1 @@ +3351c068974d2520a8f8fa9030d90c73cce69c87feae95c6ac6f166d3970a8096ed443280bef80b3409238a988aaea98f267bbec8978ad79594cedb0d59a37e5 diff --git a/stdlib/Pkg.version b/stdlib/Pkg.version index 24c73834eca22..32c6a094005f9 100644 --- a/stdlib/Pkg.version +++ b/stdlib/Pkg.version @@ -1,4 +1,4 @@ PKG_BRANCH = master -PKG_SHA1 = 116ba910c74ab565d348aa8a50d6dd10148f11ab +PKG_SHA1 = 9f8e11a4c0efb3b68a1e25a33f372f398c89cd66 PKG_GIT_URL := https://github.com/JuliaLang/Pkg.jl.git PKG_TAR_URL = https://api.github.com/repos/JuliaLang/Pkg.jl/tarball/$1 From 717bf54848376e93b13151c879ecb811077c2acc Mon Sep 17 00:00:00 2001 From: Kristoffer Carlsson Date: Wed, 30 Oct 2024 16:01:07 +0100 Subject: [PATCH 335/537] load extensions with fewer triggers earlier (#49891) Aimed to support the use case in https://github.com/JuliaLang/julia/issues/48734#issuecomment-1554626135. https://github.com/KristofferC/ExtSquared.jl is an example, see specifically https://github.com/KristofferC/ExtSquared.jl/blob/ded7c57d6f799674e3310b8174dfb07591bbe025/ext/BExt.jl#L4. I think this makes sense, happy for a second pair of eyes though. cc @termi-official --------- Co-authored-by: KristofferC Co-authored-by: Cody Tapscott <84105208+topolarity@users.noreply.github.com> --- base/loading.jl | 30 ++++++++++++++---------------- 1 file changed, 14 insertions(+), 16 deletions(-) diff --git a/base/loading.jl b/base/loading.jl index f7f749e334ed1..28875b8713b35 100644 --- a/base/loading.jl +++ b/base/loading.jl @@ -1459,6 +1459,7 @@ end mutable struct ExtensionId const id::PkgId const parentid::PkgId # just need the name, for printing + const n_total_triggers::Int ntriggers::Int # how many more packages must be defined until this is loaded end @@ -1554,7 +1555,7 @@ function _insert_extension_triggers(parent::PkgId, extensions::Dict{String, Any} continue # extension is already primed or loaded, don't add it again end EXT_PRIMED[id] = parent - gid = ExtensionId(id, parent, 1 + length(triggers)) + gid = ExtensionId(id, parent, 1 + length(triggers), 1 + length(triggers)) trigger1 = get!(Vector{ExtensionId}, EXT_DORMITORY, parent) push!(trigger1, gid) for trigger in triggers @@ -1598,25 +1599,22 @@ function run_extension_callbacks(pkgid::PkgId) # take ownership of extids that depend on this pkgid extids = pop!(EXT_DORMITORY, pkgid, nothing) extids === nothing && return + extids_to_load = Vector{ExtensionId}() for extid in extids - if extid.ntriggers > 0 - # indicate pkgid is loaded - extid.ntriggers -= 1 - end - if extid.ntriggers < 0 - # indicate pkgid is loaded - extid.ntriggers += 1 - succeeded = false - else - succeeded = true - end + @assert extid.ntriggers > 0 + extid.ntriggers -= 1 if extid.ntriggers == 0 - # actually load extid, now that all dependencies are met, - # and record the result - succeeded = succeeded && run_extension_callbacks(extid) - succeeded || push!(EXT_DORMITORY_FAILED, extid) + push!(extids_to_load, extid) end end + # Load extensions with the fewest triggers first + sort!(extids_to_load, by=extid->extid.n_total_triggers) + for extid in extids_to_load + # actually load extid, now that all dependencies are met, + succeeded = run_extension_callbacks(extid) + succeeded || push!(EXT_DORMITORY_FAILED, extid) + end + return end From c0ce290b8b72ccabfa15aadc8576ab4c14e27d8f Mon Sep 17 00:00:00 2001 From: Jishnu Bhattacharya Date: Wed, 30 Oct 2024 22:45:13 +0530 Subject: [PATCH 336/537] Dispatch in generic_matmatmul (#56384) Replacing the branches by dispatch reduces latency, presumably because there's less dead code in the method. ```julia julia> using LinearAlgebra julia> A = rand(Int,2,2); B = copy(A); C = similar(A); julia> @time mul!(C, A, B, 1, 2); 0.363944 seconds (1.65 M allocations: 84.584 MiB, 37.57% gc time, 99.99% compilation time) # master 0.102676 seconds (176.55 k allocations: 8.904 MiB, 27.04% gc time, 99.97% compilation time) # this PR ``` The latency is now distributed between the different branches: ```julia julia> @time mul!(C, A, B, 1, 2); 0.072441 seconds (176.55 k allocations: 8.903 MiB, 99.97% compilation time) julia> @time mul!(C, A', B, 1, 2); 0.085817 seconds (116.44 k allocations: 5.913 MiB, 99.96% compilation time: 4% of which was recompilation) julia> @time mul!(C, A', B', 1, 2); 0.345337 seconds (1.07 M allocations: 54.773 MiB, 25.77% gc time, 99.99% compilation time: 40% of which was recompilation) ``` It would be good to look into why there's recompilation in the last case, but the branch is less commonly taken than the others that have significantly lower latency after this PR. --- stdlib/LinearAlgebra/src/matmul.jl | 87 +++++++++++++++++------------- 1 file changed, 49 insertions(+), 38 deletions(-) diff --git a/stdlib/LinearAlgebra/src/matmul.jl b/stdlib/LinearAlgebra/src/matmul.jl index 74cb50f955bbb..8e90b21a4b7ce 100644 --- a/stdlib/LinearAlgebra/src/matmul.jl +++ b/stdlib/LinearAlgebra/src/matmul.jl @@ -994,49 +994,60 @@ _generic_matmatmul!(C::AbstractVecOrMat, A::AbstractVecOrMat, B::AbstractVecOrMa if BxN != CxN throw(DimensionMismatch(lazy"matrix B has axes ($BxK,$BxN), matrix C has axes ($CxM,$CxN)")) end - if isbitstype(R) && sizeof(R) ≤ 16 && !(A isa Adjoint || A isa Transpose) - _rmul_or_fill!(C, beta) - (iszero(alpha) || isempty(A) || isempty(B)) && return C - @inbounds for n in BxN, k in BxK - # Balpha = B[k,n] * alpha, but we skip the multiplication in case isone(alpha) - Balpha = @stable_muladdmul MulAddMul(alpha, false)(B[k,n]) - @simd for m in AxM - C[m,n] = muladd(A[m,k], Balpha, C[m,n]) - end - end - elseif isbitstype(R) && sizeof(R) ≤ 16 && ((A isa Adjoint && B isa Adjoint) || (A isa Transpose && B isa Transpose)) - _rmul_or_fill!(C, beta) - (iszero(alpha) || isempty(A) || isempty(B)) && return C - t = wrapperop(A) - pB = parent(B) - pA = parent(A) - tmp = similar(C, CxN) - ci = first(CxM) - ta = t(alpha) - for i in AxM - mul!(tmp, pB, view(pA, :, i)) - @views C[ci,:] .+= t.(ta .* tmp) - ci += 1 - end - else - if iszero(alpha) || isempty(A) || isempty(B) - return _rmul_or_fill!(C, beta) + __generic_matmatmul!(C, A, B, alpha, beta, Val(isbitstype(R) && sizeof(R) ≤ 16)) + return C +end +__generic_matmatmul!(C, A::Adjoint, B::Adjoint, alpha, beta, ::Val{true}) = _generic_matmatmul_adjtrans!(C, A, B, alpha, beta) +__generic_matmatmul!(C, A::Transpose, B::Transpose, alpha, beta, ::Val{true}) = _generic_matmatmul_adjtrans!(C, A, B, alpha, beta) +__generic_matmatmul!(C, A::Union{Adjoint, Transpose}, B, alpha, beta, ::Val{true}) = _generic_matmatmul_generic!(C, A, B, alpha, beta) +__generic_matmatmul!(C, A, B, alpha, beta, ::Val{true}) = _generic_matmatmul_nonadjtrans!(C, A, B, alpha, beta) +__generic_matmatmul!(C, A, B, alpha, beta, ::Val{false}) = _generic_matmatmul_generic!(C, A, B, alpha, beta) + +function _generic_matmatmul_nonadjtrans!(C, A, B, alpha, beta) + _rmul_or_fill!(C, beta) + (iszero(alpha) || isempty(A) || isempty(B)) && return C + @inbounds for n in axes(B, 2), k in axes(B, 1) + # Balpha = B[k,n] * alpha, but we skip the multiplication in case isone(alpha) + Balpha = @stable_muladdmul MulAddMul(alpha, false)(B[k,n]) + @simd for m in axes(A, 1) + C[m,n] = muladd(A[m,k], Balpha, C[m,n]) end - a1 = first(AxK) - b1 = first(BxK) - @inbounds for i in AxM, j in BxN - z2 = zero(A[i, a1]*B[b1, j] + A[i, a1]*B[b1, j]) - Ctmp = convert(promote_type(R, typeof(z2)), z2) - @simd for k in AxK - Ctmp = muladd(A[i, k], B[k, j], Ctmp) - end - @stable_muladdmul _modify!(MulAddMul(alpha,beta), Ctmp, C, (i,j)) + end + C +end +function _generic_matmatmul_adjtrans!(C, A, B, alpha, beta) + _rmul_or_fill!(C, beta) + (iszero(alpha) || isempty(A) || isempty(B)) && return C + t = wrapperop(A) + pB = parent(B) + pA = parent(A) + tmp = similar(C, axes(C, 2)) + ci = firstindex(C, 1) + ta = t(alpha) + for i in axes(A, 1) + mul!(tmp, pB, view(pA, :, i)) + @views C[ci,:] .+= t.(ta .* tmp) + ci += 1 + end + C +end +function _generic_matmatmul_generic!(C, A, B, alpha, beta) + if iszero(alpha) || isempty(A) || isempty(B) + return _rmul_or_fill!(C, beta) + end + a1 = firstindex(A, 2) + b1 = firstindex(B, 1) + @inbounds for i in axes(A, 1), j in axes(B, 2) + z2 = zero(A[i, a1]*B[b1, j] + A[i, a1]*B[b1, j]) + Ctmp = convert(promote_type(eltype(C), typeof(z2)), z2) + @simd for k in axes(A, 2) + Ctmp = muladd(A[i, k], B[k, j], Ctmp) end + @stable_muladdmul _modify!(MulAddMul(alpha,beta), Ctmp, C, (i,j)) end - return C + C end - # multiply 2x2 matrices Base.@constprop :aggressive function matmul2x2(tA, tB, A::AbstractMatrix{T}, B::AbstractMatrix{S}) where {T,S} matmul2x2!(similar(B, promote_op(matprod, T, S), 2, 2), tA, tB, A, B) From db6e95e20095cc41307dd6b80aa66320e98cb64b Mon Sep 17 00:00:00 2001 From: Jishnu Bhattacharya Date: Wed, 30 Oct 2024 22:54:36 +0530 Subject: [PATCH 337/537] Add `atol` to addmul tests (#56210) This avoids the issues as in https://github.com/JuliaLang/julia/issues/55781 and https://github.com/JuliaLang/julia/issues/55779 where we compare small numbers using a relative tolerance. Also, in this PR, I have added an extra test, so now we compare both `A * B * alpha + C * beta` and `A * B * alpha - C * beta` with the corresponding in-place versions. The idea is that if the terms `A * B * alpha` and ` C * beta` have similar magnitudes, at least one of the two expressions will usually result in a large enough number that may be compared using a relative tolerance. I am unsure if the `atol` chosen here is optimal, as I have ballparked it to use the maximum `eps` by looking at all the `eltype`s involved. Fixes #55781 Fixes #55779 --- stdlib/LinearAlgebra/test/addmul.jl | 69 ++++++++++++++++------------- 1 file changed, 39 insertions(+), 30 deletions(-) diff --git a/stdlib/LinearAlgebra/test/addmul.jl b/stdlib/LinearAlgebra/test/addmul.jl index fcd0b51b2e4c0..903e3b17f0ef1 100644 --- a/stdlib/LinearAlgebra/test/addmul.jl +++ b/stdlib/LinearAlgebra/test/addmul.jl @@ -130,6 +130,29 @@ for cmat in mattypes, push!(testdata, (cmat{celt}, amat{aelt}, bmat{belt})) end +strongzero(α) = iszero(α) ? false : α +function compare_matmul(C, A, B, α, β, + rtol = max(rtoldefault.(real.(eltype.((C, A, B))))..., + rtoldefault.(real.(typeof.((α, β))))...); + Ac = collect(A), Bc = collect(B), Cc = collect(C)) + @testset let A=A, B=B, C=C, α=α, β=β + Ccopy = copy(C) + returned_mat = mul!(Ccopy, A, B, α, β) + @test returned_mat === Ccopy + atol = max(maximum(eps∘real∘float∘eltype, (C,A,B)), + maximum(eps∘real∘float∘typeof, (α,β))) + exp_val = Ac * Bc * strongzero(α) + Cc * strongzero(β) + @test collect(returned_mat) ≈ exp_val rtol=rtol atol=atol + rtol_match = isapprox(collect(returned_mat), exp_val, rtol=rtol) + if !(rtol_match || β isa Bool || isapprox(β, 0, atol=eps(typeof(β)))) + negβ = -β + returned_mat = mul!(copy(C), A, B, α, negβ) + exp_val = Ac * Bc * strongzero(α) + Cc * negβ + @test collect(returned_mat) ≈ exp_val rtol=rtol atol=atol + end + end +end + @testset "mul!(::$TC, ::$TA, ::$TB, α, β)" for (TC, TA, TB) in testdata if needsquare(TA) na1 = na2 = rand(sizecandidates) @@ -147,32 +170,29 @@ end bsize = (na2, nb2) csize = (na1, nb2) + C = _rand(TC, csize) + A = _rand(TA, asize) + B = _rand(TB, bsize) + Cc = Matrix(C) + Ac = Matrix(A) + Bc = Matrix(B) + @testset for α in Any[true, eltype(TC)(1), _rand(eltype(TC))], β in Any[false, eltype(TC)(0), _rand(eltype(TC))] - C = _rand(TC, csize) - A = _rand(TA, asize) - B = _rand(TB, bsize) # This is similar to how `isapprox` choose `rtol` (when # `atol=0`) but consider all number types involved: rtol = max(rtoldefault.(real.(eltype.((C, A, B))))..., rtoldefault.(real.(typeof.((α, β))))...) - Cc = copy(C) - Ac = Matrix(A) - Bc = Matrix(B) - returned_mat = mul!(C, A, B, α, β) - @test returned_mat === C - @test collect(returned_mat) ≈ α * Ac * Bc + β * Cc rtol=rtol + compare_matmul(C, A, B, α, β, rtol; Ac, Bc, Cc) y = C[:, 1] x = B[:, 1] yc = Vector(y) xc = Vector(x) - returned_vec = mul!(y, A, x, α, β) - @test returned_vec === y - @test collect(returned_vec) ≈ α * Ac * xc + β * yc rtol=rtol + compare_matmul(y, A, x, α, β, rtol; Ac, Bc=xc, Cc=yc) if TC <: Matrix @testset "adjoint and transpose" begin @@ -183,35 +203,24 @@ end Af = fa === identity ? A : fa(_rand(TA, reverse(asize))) Bf = fb === identity ? B : fb(_rand(TB, reverse(bsize))) - Ac = collect(Af) - Bc = collect(Bf) - Cc = collect(C) - - returned_mat = mul!(C, Af, Bf, α, β) - @test returned_mat === C - @test collect(returned_mat) ≈ α * Ac * Bc + β * Cc rtol=rtol + compare_matmul(C, Af, Bf, α, β, rtol) end end end if isnanfillable(C) @testset "β = 0 ignores C .= NaN" begin - parent(C) .= NaN - Ac = Matrix(A) - Bc = Matrix(B) - returned_mat = mul!(C, A, B, α, zero(eltype(C))) - @test returned_mat === C - @test collect(returned_mat) ≈ α * Ac * Bc rtol=rtol + Ccopy = copy(C) + parent(Ccopy) .= NaN + compare_matmul(Ccopy, A, B, α, zero(eltype(C)), rtol; Ac, Bc, Cc) end end if isnanfillable(A) @testset "α = 0 ignores A .= NaN" begin - parent(A) .= NaN - Cc = copy(C) - returned_mat = mul!(C, A, B, zero(eltype(A)), β) - @test returned_mat === C - @test collect(returned_mat) ≈ β * Cc rtol=rtol + Acopy = copy(A) + parent(Acopy) .= NaN + compare_matmul(C, Acopy, B, zero(eltype(A)), β, rtol; Ac, Bc, Cc) end end end From c6e7f83a6b6e9b7ec34a083983ab812278fa74a5 Mon Sep 17 00:00:00 2001 From: Max Horn Date: Thu, 31 Oct 2024 06:49:58 +0100 Subject: [PATCH 338/537] Export jl_gc_new_weakref again via julia.h (#56373) This is how it used for at least Julia 1.0 - 1.11 Closes #56367 --- src/gc-common.h | 9 --------- src/julia.h | 5 +++++ 2 files changed, 5 insertions(+), 9 deletions(-) diff --git a/src/gc-common.h b/src/gc-common.h index bbac5f30c6755..32b7470b13a58 100644 --- a/src/gc-common.h +++ b/src/gc-common.h @@ -185,13 +185,4 @@ extern jl_ptls_t* gc_all_tls_states; extern int gc_logging_enabled; -// =========================================================================== // -// Misc -// =========================================================================== // - -// Allocates a new weak-reference, assigns its value and increments Julia allocation -// counters. If thread-local allocators are used, then this function should allocate in the -// thread-local allocator of the current thread. -JL_DLLEXPORT jl_weakref_t *jl_gc_new_weakref(jl_value_t *value); - #endif // JL_GC_COMMON_H diff --git a/src/julia.h b/src/julia.h index 1d36dba519700..ffd669cd828a4 100644 --- a/src/julia.h +++ b/src/julia.h @@ -1128,6 +1128,11 @@ JL_DLLEXPORT void jl_finalize(jl_value_t *o); JL_DLLEXPORT void *jl_malloc_stack(size_t *bufsz, struct _jl_task_t *owner) JL_NOTSAFEPOINT; JL_DLLEXPORT void jl_free_stack(void *stkbuf, size_t bufsz); +// Allocates a new weak-reference, assigns its value and increments Julia allocation +// counters. If thread-local allocators are used, then this function should allocate in the +// thread-local allocator of the current thread. +JL_DLLEXPORT jl_weakref_t *jl_gc_new_weakref(jl_value_t *value); + // GC write barriers STATIC_INLINE void jl_gc_wb(const void *parent, const void *ptr) JL_NOTSAFEPOINT From 2a24b8f4425560f8a1a53fe11397fbf4bf5a41d9 Mon Sep 17 00:00:00 2001 From: Shuhei Kadowaki <40514306+aviatesk@users.noreply.github.com> Date: Thu, 31 Oct 2024 14:53:17 +0900 Subject: [PATCH 339/537] InteractiveUtils: define `InteractiveUtils.@code_ircode` (#56390) --- stdlib/InteractiveUtils/src/macros.jl | 10 +++++++++- stdlib/InteractiveUtils/test/runtests.jl | 2 ++ 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/stdlib/InteractiveUtils/src/macros.jl b/stdlib/InteractiveUtils/src/macros.jl index e338d8626fb0f..0c272940b82d9 100644 --- a/stdlib/InteractiveUtils/src/macros.jl +++ b/stdlib/InteractiveUtils/src/macros.jl @@ -2,7 +2,7 @@ # macro wrappers for various reflection functions -import Base: typesof, insert!, replace_ref_begin_end!, infer_effects +import Base: typesof, insert!, replace_ref_begin_end!, infer_effects, code_ircode # defined in Base so it's possible to time all imports, including InteractiveUtils and its deps # via. `Base.@time_imports` etc. @@ -249,6 +249,14 @@ macro code_lowered(ex0...) end end +macro code_ircode(ex0...) + thecall = gen_call_with_extracted_types_and_kwargs(__module__, :code_ircode, ex0) + quote + local results = $thecall + length(results) == 1 ? results[1] : results + end +end + """ @functionloc diff --git a/stdlib/InteractiveUtils/test/runtests.jl b/stdlib/InteractiveUtils/test/runtests.jl index e729ae67bde19..bb64153818c1d 100644 --- a/stdlib/InteractiveUtils/test/runtests.jl +++ b/stdlib/InteractiveUtils/test/runtests.jl @@ -819,6 +819,8 @@ end end @test Base.infer_effects(sin, (Int,)) == InteractiveUtils.@infer_effects sin(42) +@test first(InteractiveUtils.@code_ircode sin(42)) isa Core.Compiler.IRCode +@test first(InteractiveUtils.@code_ircode optimize_until="Inlining" sin(42)) isa Core.Compiler.IRCode @testset "Docstrings" begin @test isempty(Docs.undocumented_names(InteractiveUtils)) From f8c6d1c106648f53874c9b0ebee1ae7382766dd0 Mon Sep 17 00:00:00 2001 From: Keno Fischer Date: Thu, 31 Oct 2024 13:21:46 -0400 Subject: [PATCH 340/537] Fix some missing write barriers and add some helpful comments (#56396) I was trying some performance optimization which didn't end up working out, but in the process I found two missing write barriers and added some helpful comments for future readers, so that part is probably still useful. --- base/runtime_internals.jl | 2 +- src/gc-interface.h | 5 +++++ src/julia.h | 6 +++--- src/module.c | 10 +++++++--- 4 files changed, 16 insertions(+), 7 deletions(-) diff --git a/base/runtime_internals.jl b/base/runtime_internals.jl index ab867f8fcae6d..dd526a24d6494 100644 --- a/base/runtime_internals.jl +++ b/base/runtime_internals.jl @@ -248,7 +248,7 @@ binding_kind(m::Module, s::Symbol) = binding_kind(lookup_binding_partition(tls_w delete_binding(mod::Module, sym::Symbol) Force the binding `mod.sym` to be undefined again, allowing it be redefined. -Note that this operation is very expensive, requirinig a full scan of all code in the system, +Note that this operation is very expensive, requiring a full scan of all code in the system, as well as potential recompilation of any methods that (may) have used binding information. diff --git a/src/gc-interface.h b/src/gc-interface.h index 0b5df17a3b8c5..eb6687d52d9ab 100644 --- a/src/gc-interface.h +++ b/src/gc-interface.h @@ -192,6 +192,11 @@ JL_DLLEXPORT void *jl_gc_perm_alloc(size_t sz, int zero, unsigned align, // object header must be included in the object size. This object is allocated in an // immortal region that is never swept. The second parameter specifies the type of the // object being allocated and will be used to set the object header. +// +// !!! warning: Because permanently allocated objects are not swept, the GC will not +// necessarily mark any objects that would have ordinarily been rooted by +// the allocated object. All objects stored in fields of this object +// must be either permanently allocated or have other roots. struct _jl_value_t *jl_gc_permobj(size_t sz, void *ty) JL_NOTSAFEPOINT; // ========================================================================= // diff --git a/src/julia.h b/src/julia.h index ffd669cd828a4..a710192d5756c 100644 --- a/src/julia.h +++ b/src/julia.h @@ -1138,15 +1138,15 @@ JL_DLLEXPORT jl_weakref_t *jl_gc_new_weakref(jl_value_t *value); STATIC_INLINE void jl_gc_wb(const void *parent, const void *ptr) JL_NOTSAFEPOINT { // parent and ptr isa jl_value_t* - if (__unlikely(jl_astaggedvalue(parent)->bits.gc == 3 && // parent is old and not in remset - (jl_astaggedvalue(ptr)->bits.gc & 1) == 0)) // ptr is young + if (__unlikely(jl_astaggedvalue(parent)->bits.gc == 3 /* GC_OLD_MARKED */ && // parent is old and not in remset + (jl_astaggedvalue(ptr)->bits.gc & 1 /* GC_MARKED */) == 0)) // ptr is young jl_gc_queue_root((jl_value_t*)parent); } STATIC_INLINE void jl_gc_wb_back(const void *ptr) JL_NOTSAFEPOINT // ptr isa jl_value_t* { // if ptr is old - if (__unlikely(jl_astaggedvalue(ptr)->bits.gc == 3)) { + if (__unlikely(jl_astaggedvalue(ptr)->bits.gc == 3 /* GC_OLD_MARKED */)) { jl_gc_queue_root((jl_value_t*)ptr); } } diff --git a/src/module.c b/src/module.c index 9b4d26cc7b000..bdacd487e978d 100644 --- a/src/module.c +++ b/src/module.c @@ -37,6 +37,7 @@ jl_binding_partition_t *jl_get_binding_partition(jl_binding_t *b, size_t world) _Atomic(jl_binding_partition_t *)*insert = &b->partitions; jl_binding_partition_t *bpart = jl_atomic_load_relaxed(insert); size_t max_world = (size_t)-1; + jl_binding_partition_t *new_bpart = NULL; while (1) { while (bpart && world < bpart->min_world) { insert = &bpart->next; @@ -46,11 +47,11 @@ jl_binding_partition_t *jl_get_binding_partition(jl_binding_t *b, size_t world) } if (bpart && world <= jl_atomic_load_relaxed(&bpart->max_world)) return bpart; - jl_binding_partition_t *new_bpart = new_binding_partition(); + if (!new_bpart) + new_bpart = new_binding_partition(); jl_atomic_store_relaxed(&new_bpart->next, bpart); jl_gc_wb_fresh(new_bpart, bpart); - if (bpart) - new_bpart->min_world = jl_atomic_load_relaxed(&bpart->max_world) + 1; + new_bpart->min_world = bpart ? jl_atomic_load_relaxed(&bpart->max_world) + 1 : 0; jl_atomic_store_relaxed(&new_bpart->max_world, max_world); if (jl_atomic_cmpswap(insert, &bpart, new_bpart)) { jl_gc_wb(parent, new_bpart); @@ -548,6 +549,7 @@ static jl_binding_t *jl_resolve_owner(jl_binding_t *b/*optional*/, jl_module_t * // changing, for example if this var is assigned to later. if (!jl_atomic_cmpswap(&bpart->restriction, &pku, encode_restriction((jl_value_t*)b2, BINDING_KIND_IMPLICIT))) goto retry; + jl_gc_wb(bpart, b2); if (b2->deprecated) { b->deprecated = 1; // we will warn about this below, but we might want to warn at the use sites too if (m != jl_main_module && m != jl_base_module && @@ -740,6 +742,7 @@ static void module_import_(jl_module_t *to, jl_module_t *from, jl_sym_t *asname, jl_ptr_kind_union_t new_pku = encode_restriction((jl_value_t*)b, (explici != 0) ? BINDING_KIND_IMPORTED : BINDING_KIND_EXPLICIT); if (!jl_atomic_cmpswap(&btopart->restriction, &bto_pku, new_pku)) goto retry; + jl_gc_wb(btopart, b); bto->deprecated |= b->deprecated; // we already warned about this above, but we might want to warn at the use sites too } else { @@ -749,6 +752,7 @@ static void module_import_(jl_module_t *to, jl_module_t *from, jl_sym_t *asname, jl_ptr_kind_union_t new_pku = encode_restriction(decode_restriction_value(bto_pku), (explici != 0) ? BINDING_KIND_IMPORTED : BINDING_KIND_EXPLICIT); if (!jl_atomic_cmpswap(&btopart->restriction, &bto_pku, new_pku)) goto retry; + // No wb, because the value is unchanged } } else if (jl_bkind_is_some_import(decode_restriction_kind(bto_pku))) { From da74ef1933b12410b217748e0f7fbcbe52e10d29 Mon Sep 17 00:00:00 2001 From: Jameson Nash Date: Thu, 31 Oct 2024 14:15:39 -0400 Subject: [PATCH 341/537] compiler: fix specialization mistake introduced by #40985 (#56404) Hopefully there aren't any others like this hiding around? Not useful to make a new closure for every method that we inline, since we just called `===` inside it --- base/compiler/ssair/inlining.jl | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/base/compiler/ssair/inlining.jl b/base/compiler/ssair/inlining.jl index 5017b619469ff..dfdd317f74d87 100644 --- a/base/compiler/ssair/inlining.jl +++ b/base/compiler/ssair/inlining.jl @@ -939,7 +939,6 @@ function analyze_method!(match::MethodMatch, argtypes::Vector{Any}, allow_typevars::Bool, invokesig::Union{Nothing,Vector{Any}}=nothing, volatile_inf_result::Union{Nothing,VolatileInferenceResult}=nothing) method = match.method - spec_types = match.spec_types # Check that we have the correct number of arguments na = Int(method.nargs) @@ -954,6 +953,7 @@ function analyze_method!(match::MethodMatch, argtypes::Vector{Any}, if !match.fully_covers # type-intersection was not able to give us a simple list of types, so # ir_inline_unionsplit won't be able to deal with inlining this + spec_types = match.spec_types if !(spec_types isa DataType && length(spec_types.parameters) == npassedargs && !isvarargtype(spec_types.parameters[end])) return nothing @@ -1428,14 +1428,13 @@ function handle_match!(cases::Vector{InliningCase}, match::MethodMatch, argtypes::Vector{Any}, @nospecialize(info::CallInfo), flag::UInt32, state::InliningState; allow_typevars::Bool, volatile_inf_result::Union{Nothing,VolatileInferenceResult}) - spec_types = match.spec_types # We may see duplicated dispatch signatures here when a signature gets widened # during abstract interpretation: for the purpose of inlining, we can just skip # processing this dispatch candidate (unless unmatched type parameters are present) - !allow_typevars && any(case::InliningCase->case.sig === spec_types, cases) && return true + !allow_typevars && any(case::InliningCase->case.sig === match.spec_types, cases) && return true item = analyze_method!(match, argtypes, info, flag, state; allow_typevars, volatile_inf_result) item === nothing && return false - push!(cases, InliningCase(spec_types, item)) + push!(cases, InliningCase(match.spec_types, item)) return true end @@ -1443,13 +1442,12 @@ function handle_const_prop_result!(cases::Vector{InliningCase}, result::ConstPro match::MethodMatch, @nospecialize(info::CallInfo), flag::UInt32, state::InliningState; allow_typevars::Bool) mi = result.result.linfo - spec_types = match.spec_types if !validate_sparams(mi.sparam_vals) (allow_typevars && !may_have_fcalls(mi.def::Method)) || return false end item = resolve_todo(mi, result.result, info, flag, state) item === nothing && return false - push!(cases, InliningCase(spec_types, item)) + push!(cases, InliningCase(match.spec_types, item)) return true end @@ -1479,11 +1477,10 @@ end function handle_semi_concrete_result!(cases::Vector{InliningCase}, result::SemiConcreteResult, match::MethodMatch, @nospecialize(info::CallInfo), flag::UInt32, state::InliningState) mi = result.mi - spec_types = match.spec_types validate_sparams(mi.sparam_vals) || return false item = semiconcrete_result_item(result, info, flag, state) item === nothing && return false - push!(cases, InliningCase(spec_types, item)) + push!(cases, InliningCase(match.spec_types, item)) return true end From 7715cf287a9920ba86cf7405f636b18b85eede47 Mon Sep 17 00:00:00 2001 From: Keno Fischer Date: Thu, 31 Oct 2024 19:07:10 -0400 Subject: [PATCH 342/537] Avoid racy double-load of binding restriction in `import_module` (#56395) Fixes #56333 --- src/toplevel.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/src/toplevel.c b/src/toplevel.c index c2fbc38d067eb..6dcab3095e320 100644 --- a/src/toplevel.c +++ b/src/toplevel.c @@ -665,11 +665,16 @@ static void import_module(jl_module_t *JL_NONNULL m, jl_module_t *import, jl_sym jl_sym_t *name = asname ? asname : import->name; // TODO: this is a bit race-y with what error message we might print jl_binding_t *b = jl_get_module_binding(m, name, 1); - if (jl_get_binding_value_if_const(b) == (jl_value_t*)import) - return; jl_binding_partition_t *bpart = jl_get_binding_partition(b, jl_current_task->world_age); jl_ptr_kind_union_t pku = jl_atomic_load_relaxed(&bpart->restriction); if (decode_restriction_kind(pku) != BINDING_KIND_GUARD && decode_restriction_kind(pku) != BINDING_KIND_FAILED) { + // Unlike regular constant declaration, we allow this as long as we eventually end up at a constant. + pku = jl_walk_binding_inplace(&b, &bpart, jl_current_task->world_age); + if (decode_restriction_kind(pku) == BINDING_KIND_CONST || decode_restriction_kind(pku) == BINDING_KIND_CONST_IMPORT) { + // Already declared (e.g. on another thread) or imported. + if (decode_restriction_value(pku) == (jl_value_t*)import) + return; + } jl_errorf("importing %s into %s conflicts with an existing global", jl_symbol_name(name), jl_symbol_name(m->name)); } From 040174c446ba63b89a13b6de267fced9ad270848 Mon Sep 17 00:00:00 2001 From: Shuhei Kadowaki <40514306+aviatesk@users.noreply.github.com> Date: Fri, 1 Nov 2024 14:56:36 +0900 Subject: [PATCH 343/537] define `InteractiveUtils.@infer_[return|exception]_type` (#56398) Also simplifies the definitions of `@code_typed` and the other similar macros. --- stdlib/InteractiveUtils/src/macros.jl | 40 ++++++++---------------- stdlib/InteractiveUtils/test/runtests.jl | 2 ++ 2 files changed, 15 insertions(+), 27 deletions(-) diff --git a/stdlib/InteractiveUtils/src/macros.jl b/stdlib/InteractiveUtils/src/macros.jl index 0c272940b82d9..a21bf30dbcd6c 100644 --- a/stdlib/InteractiveUtils/src/macros.jl +++ b/stdlib/InteractiveUtils/src/macros.jl @@ -2,7 +2,8 @@ # macro wrappers for various reflection functions -import Base: typesof, insert!, replace_ref_begin_end!, infer_effects, code_ircode +using Base: typesof, insert!, replace_ref_begin_end!, + infer_return_type, infer_exception_type, infer_effects, code_ircode # defined in Base so it's possible to time all imports, including InteractiveUtils and its deps # via. `Base.@time_imports` etc. @@ -225,35 +226,20 @@ macro which(ex0::Symbol) return :(which($__module__, $ex0)) end -for fname in [:code_warntype, :code_llvm, :code_native, :infer_effects] - @eval begin - macro ($fname)(ex0...) - gen_call_with_extracted_types_and_kwargs(__module__, $(Expr(:quote, fname)), ex0) - end - end -end - -macro code_typed(ex0...) - thecall = gen_call_with_extracted_types_and_kwargs(__module__, :code_typed, ex0) - quote - local results = $thecall - length(results) == 1 ? results[1] : results +for fname in [:code_warntype, :code_llvm, :code_native, + :infer_return_type, :infer_effects, :infer_exception_type] + @eval macro ($fname)(ex0...) + gen_call_with_extracted_types_and_kwargs(__module__, $(QuoteNode(fname)), ex0) end end -macro code_lowered(ex0...) - thecall = gen_call_with_extracted_types_and_kwargs(__module__, :code_lowered, ex0) - quote - local results = $thecall - length(results) == 1 ? results[1] : results - end -end - -macro code_ircode(ex0...) - thecall = gen_call_with_extracted_types_and_kwargs(__module__, :code_ircode, ex0) - quote - local results = $thecall - length(results) == 1 ? results[1] : results +for fname in [:code_typed, :code_lowered, :code_ircode] + @eval macro ($fname)(ex0...) + thecall = gen_call_with_extracted_types_and_kwargs(__module__, $(QuoteNode(fname)), ex0) + quote + local results = $thecall + length(results) == 1 ? results[1] : results + end end end diff --git a/stdlib/InteractiveUtils/test/runtests.jl b/stdlib/InteractiveUtils/test/runtests.jl index bb64153818c1d..0de67fea69dea 100644 --- a/stdlib/InteractiveUtils/test/runtests.jl +++ b/stdlib/InteractiveUtils/test/runtests.jl @@ -819,6 +819,8 @@ end end @test Base.infer_effects(sin, (Int,)) == InteractiveUtils.@infer_effects sin(42) +@test Base.infer_return_type(sin, (Int,)) == InteractiveUtils.@infer_return_type sin(42) +@test Base.infer_exception_type(sin, (Int,)) == InteractiveUtils.@infer_exception_type sin(42) @test first(InteractiveUtils.@code_ircode sin(42)) isa Core.Compiler.IRCode @test first(InteractiveUtils.@code_ircode optimize_until="Inlining" sin(42)) isa Core.Compiler.IRCode From 671e1d87595d706ec2fa1659d990c507596f8fbc Mon Sep 17 00:00:00 2001 From: Shuhei Kadowaki <40514306+aviatesk@users.noreply.github.com> Date: Fri, 1 Nov 2024 15:02:11 +0900 Subject: [PATCH 344/537] irinterp: set `IR_FLAG_REFINED` for narrowed `PhiNode`s (#56391) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit `adce_pass!` can transform a `Union`-type `PhiNode` into a narrower `PhiNode`, but in such cases, the `IR_FLAG_REFINED` flag isn’t set on that `PhiNode` statement. By setting this flag, irinterp can perform statement reprocessing using the narrowed `PhiNode`, enabling type stability in cases like JuliaLang/julia#56387. - fixes JuliaLang/julia#56387 --- base/compiler/ssair/passes.jl | 12 +++++++----- test/compiler/inference.jl | 12 ++++++++++++ 2 files changed, 19 insertions(+), 5 deletions(-) diff --git a/base/compiler/ssair/passes.jl b/base/compiler/ssair/passes.jl index e3f294c4e91fe..4ad5dcfb2a3c8 100644 --- a/base/compiler/ssair/passes.jl +++ b/base/compiler/ssair/passes.jl @@ -2114,18 +2114,19 @@ function adce_pass!(ir::IRCode, inlining::Union{Nothing,InliningState}=nothing) unionphi = unionphis[i] phi = unionphi[1] t = unionphi[2] + inst = compact.result[phi] if t === Union{} - stmt = compact[SSAValue(phi)][:stmt]::PhiNode + stmt = inst[:stmt]::PhiNode kill_phi!(compact, phi_uses, 1:length(stmt.values), SSAValue(phi), stmt, true) made_changes = true continue elseif t === Any continue - elseif ⊑(𝕃ₒ, compact.result[phi][:type], t) - continue end + ⊏ = strictpartialorder(𝕃ₒ) + t ⊏ inst[:type] || continue to_drop = Int[] - stmt = compact[SSAValue(phi)][:stmt] + stmt = inst[:stmt] stmt === nothing && continue stmt = stmt::PhiNode for i = 1:length(stmt.values) @@ -2137,7 +2138,8 @@ function adce_pass!(ir::IRCode, inlining::Union{Nothing,InliningState}=nothing) push!(to_drop, i) end end - compact.result[phi][:type] = t + inst[:type] = t + add_flag!(inst, IR_FLAG_REFINED) # t ⊏ inst[:type] kill_phi!(compact, phi_uses, to_drop, SSAValue(phi), stmt, false) made_changes = true end diff --git a/test/compiler/inference.jl b/test/compiler/inference.jl index dab8e57aa2309..2fc7e917186f4 100644 --- a/test/compiler/inference.jl +++ b/test/compiler/inference.jl @@ -6078,3 +6078,15 @@ let src = code_typed((Union{Nothing,AtomicModifySafety},)) do x end |> only |> first @test any(@nospecialize(x)->Meta.isexpr(x, :invoke_modify), src.code) end + +function issue56387(nt::NamedTuple, field::Symbol=:a) + NT = typeof(nt) + names = fieldnames(NT) + types = fieldtypes(NT) + index = findfirst(==(field), names) + if index === nothing + throw(ArgumentError("Field $field not found")) + end + types[index] +end +@test Base.infer_return_type(issue56387, (typeof((;a=1)),)) == Type{Int} From dc57caf2470fcdc21a9bb11c0f97924577d7c5c8 Mon Sep 17 00:00:00 2001 From: Micah Rufsvold <86363075+mrufsvold@users.noreply.github.com> Date: Fri, 1 Nov 2024 02:03:42 -0400 Subject: [PATCH 345/537] document isopen(::Channel) (#56376) This PR has two purposes -- 1) Add some documentation for public API 2) Add a small note about a footgun I've hit a few times: `!isopen(ch)` does not mean that you are "done" with the channel because buffered channels can still have items left in them that need to be taken. --------- Co-authored-by: CY Han --- base/channels.jl | 50 ++++++++++++++++++++++++++++++++++++---- doc/src/base/parallel.md | 1 + 2 files changed, 46 insertions(+), 5 deletions(-) diff --git a/base/channels.jl b/base/channels.jl index 3acbf37246a58..8882171095e7a 100644 --- a/base/channels.jl +++ b/base/channels.jl @@ -212,11 +212,51 @@ function close(c::Channel, @nospecialize(excp::Exception)) nothing end -# Use acquire here to pair with release store in `close`, so that subsequent `isready` calls -# are forced to see `isready == true` if they see `isopen == false`. This means users must -# call `isopen` before `isready` if you are using the race-y APIs (or call `iterate`, which -# does this right for you). -isopen(c::Channel) = ((@atomic :acquire c.state) === :open) +""" + isopen(c::Channel) +Determines whether a [`Channel`](@ref) is open for new [`put!`](@ref) operations. +Notice that a `Channel`` can be closed and still have +buffered elements which can be consumed with [`take!`](@ref). + +# Examples + +Buffered channel with task: +```jldoctest +julia> c = Channel(ch -> put!(ch, 1), 1); + +julia> isopen(c) # The channel is closed to new `put!`s +false + +julia> isready(c) # The channel is closed but still contains elements +true + +julia> take!(c) +1 + +julia> isready(c) +false +``` + +Unbuffered channel: +```jldoctest +julia> c = Channel{Int}(); + +julia> isopen(c) +true + +julia> close(c) + +julia> isopen(c) +false +``` +""" +function isopen(c::Channel) + # Use acquire here to pair with release store in `close`, so that subsequent `isready` calls + # are forced to see `isready == true` if they see `isopen == false`. This means users must + # call `isopen` before `isready` if you are using the race-y APIs (or call `iterate`, which + # does this right for you). + return ((@atomic :acquire c.state) === :open) +end """ empty!(c::Channel) diff --git a/doc/src/base/parallel.md b/doc/src/base/parallel.md index 9f24db176b538..cd5c95f17994a 100644 --- a/doc/src/base/parallel.md +++ b/doc/src/base/parallel.md @@ -67,6 +67,7 @@ Base.put!(::Channel, ::Any) Base.take!(::Channel) Base.isfull(::Channel) Base.isready(::Channel) +Base.isopen(::Channel) Base.fetch(::Channel) Base.close(::Channel) Base.bind(c::Channel, task::Task) From 706a4f6c5d159366bed25e8217ce80748e3963fa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mos=C3=A8=20Giordano?= <765740+giordano@users.noreply.github.com> Date: Fri, 1 Nov 2024 06:13:06 +0000 Subject: [PATCH 346/537] Make build system respect `FORCE_COLOR` and `NO_COLOR` settings (#56346) Follow up to #53742, but for the build system. CC: @omus. --- Make.inc | 63 ++++++++++++++++++++++++++++++++++++++++++-------------- Makefile | 2 +- NEWS.md | 4 ++-- 3 files changed, 50 insertions(+), 19 deletions(-) diff --git a/Make.inc b/Make.inc index cb79e3ca1b5a9..9f6535ae05885 100644 --- a/Make.inc +++ b/Make.inc @@ -118,6 +118,51 @@ SPACE:=$(eval) $(eval) export LC_ALL=C export LANG=C +# Respect `FORCE_COLOR` environment variable: . +ifndef FORCE_COLOR +FORCE_COLOR := "" +endif + +# Respect `NO_COLOR` environment variable: . +ifndef NO_COLOR +NO_COLOR := "" +endif + +# When both `FORCE_COLOR` and `NO_COLOR` are defined, the former has precedence. +ifneq ($(FORCE_COLOR), "") +NO_COLOR = "" +endif + +WARNCOLOR:="\033[33;1m" +ENDCOLOR:="\033[0m" + +CCCOLOR:="\033[34m" +LINKCOLOR:="\033[34;1m" +PERLCOLOR:="\033[35m" +FLISPCOLOR:="\033[32m" +JULIACOLOR:="\033[32;1m" +DTRACECOLOR:="\033[32;1m" + +SRCCOLOR:="\033[33m" +BINCOLOR:="\033[37;1m" +JULCOLOR:="\033[34;1m" + +ifneq ($(NO_COLOR), "") +WARNCOLOR:="" +ENDCOLOR:="" + +CCCOLOR:="" +LINKCOLOR:="" +PERLCOLOR:="" +FLISPCOLOR:="" +JULIACOLOR:="" +DTRACECOLOR:="" + +SRCCOLOR:="" +BINCOLOR:="" +JULCOLOR:="" +endif + # We need python for things like BB triplet recognition and relative path computation. # We don't really care about version, generally, so just find something that works: PYTHON := "$(shell which python 2>/dev/null || which python3 2>/dev/null || which python2 2>/dev/null || echo "{python|python3|python2} not found")" @@ -140,7 +185,7 @@ ifeq ($(BUILDROOT),) ifeq ("$(origin O)", "command line") BUILDROOT := $(abspath $O) BUILDDIR := $(abspath $(BUILDROOT)/$(call rel_path,$(JULIAHOME),$(SRCDIR))) - $(info $(shell printf '\033[32;1mBuilding into $(BUILDROOT)\033[0m')) # use printf to expand the escape sequences + $(info $(shell printf '$(JULIACOLOR)Building into $(BUILDROOT)$(ENDCOLOR)')) # use printf to expand the escape sequences else BUILDROOT:=$(JULIAHOME) endif @@ -1759,24 +1804,10 @@ ifndef VERBOSE VERBOSE := 0 endif -WARNCOLOR:="\033[33;1m" -ENDCOLOR:="\033[0m" - ifeq ($(VERBOSE), 0) QUIET_MAKE = -s -CCCOLOR:="\033[34m" -LINKCOLOR:="\033[34;1m" -PERLCOLOR:="\033[35m" -FLISPCOLOR:="\033[32m" -JULIACOLOR:="\033[32;1m" -DTRACECOLOR:="\033[32;1m" - -SRCCOLOR:="\033[33m" -BINCOLOR:="\033[37;1m" -JULCOLOR:="\033[34;1m" - GOAL=$(subst ','\'',$(subst $(abspath $(JULIAHOME))/,,$(abspath $@))) PRINT_CC = printf ' %b %b\n' $(CCCOLOR)CC$(ENDCOLOR) $(SRCCOLOR)$(GOAL)$(ENDCOLOR); $(1) @@ -1797,7 +1828,7 @@ PRINT_FLISP = echo '$(subst ','\'',$(1))'; $(1) PRINT_JULIA = echo '$(subst ','\'',$(1))'; $(1) PRINT_DTRACE = echo '$(subst ','\'',$(1))'; $(1) -endif +endif # VERBOSE # Makefile debugging trick: # call print-VARIABLE to see the runtime value of any variable diff --git a/Makefile b/Makefile index 4fd8b878c5d1f..d1e5b31f85b1c 100644 --- a/Makefile +++ b/Makefile @@ -650,7 +650,7 @@ testall1: check-whitespace $(JULIA_BUILD_MODE) test-%: check-whitespace $(JULIA_BUILD_MODE) .FORCE @([ $$(( $$(date +%s) - $$(date -r $(build_private_libdir)/sys.$(SHLIB_EXT) +%s) )) -le 100 ] && \ - printf '\033[93m HINT The system image was recently rebuilt. Are you aware of the test-revise-* targets? See CONTRIBUTING.md. \033[0m\n') || true + printf '$(WARNCOLOR) HINT The system image was recently rebuilt. Are you aware of the test-revise-* targets? See CONTRIBUTING.md. $(ENDCOLOR)\n') || true @$(MAKE) $(QUIET_MAKE) -C $(BUILDROOT)/test $* JULIA_BUILD_MODE=$(JULIA_BUILD_MODE) test-revise-%: .FORCE diff --git a/NEWS.md b/NEWS.md index 5e066ffd9cdcf..ba9ca1c521c55 100644 --- a/NEWS.md +++ b/NEWS.md @@ -57,8 +57,8 @@ Command-line option changes * The `-m/--module` flag can be passed to run the `main` function inside a package with a set of arguments. This `main` function should be declared using `@main` to indicate that it is an entry point. * Enabling or disabling color text in Julia can now be controlled with the -[`NO_COLOR`](https://no-color.org/) or [`FORCE_COLOR`](https://force-color.org/) environment -variables. ([#53742]). + [`NO_COLOR`](https://no-color.org/) or [`FORCE_COLOR`](https://force-color.org/) environment + variables. These variables are also honored by Julia's build system ([#53742], [#56346]). * `--project=@temp` starts Julia with a temporary environment. * New `--trace-compile-timing` option to report how long each method reported by `--trace-compile` took to compile, in ms. ([#54662]) From 76351909e2daf7cb07cffe9fa76d3e8e4d38137f Mon Sep 17 00:00:00 2001 From: Jameson Nash Date: Fri, 1 Nov 2024 06:15:16 -0400 Subject: [PATCH 347/537] Add `edges` vector to CodeInstance/CodeInfo to keep backedges as edges (#54894) Appears to add about 11MB (128MB to 139MB) to the system image, and to decrease the stdlib size by 55 MB (325MB to 270MB), so seems overall favorable right now. The edges are computed following the encoding to correctly reflect the backedges. Co-authored-by: Shuhei Kadowaki --- base/Base.jl | 1 - base/boot.jl | 6 +- base/compiler/abstractinterpretation.jl | 215 +++--- base/compiler/inferencestate.jl | 62 +- base/compiler/optimize.jl | 4 +- base/compiler/ssair/inlining.jl | 92 ++- base/compiler/ssair/irinterp.jl | 6 - base/compiler/ssair/passes.jl | 4 +- base/compiler/stmtinfo.jl | 220 ++++++- base/compiler/tfuncs.jl | 29 +- base/compiler/typeinfer.jl | 268 ++++---- base/compiler/types.jl | 53 +- base/compiler/utilities.jl | 37 +- base/expr.jl | 2 +- base/show.jl | 6 +- src/common_symbols1.inc | 2 - src/common_symbols2.inc | 4 +- src/gf.c | 126 ++-- src/ircode.c | 31 +- src/jltypes.c | 14 +- src/julia.h | 5 +- src/julia_internal.h | 15 +- src/method.c | 70 +- src/opaque_closure.c | 12 +- src/serialize.h | 90 +-- src/staticdata.c | 75 +-- src/staticdata_utils.c | 827 +++++++++--------------- src/toplevel.c | 2 +- stdlib/REPL/src/REPLCompletions.jl | 6 +- test/compiler/AbstractInterpreter.jl | 11 +- test/compiler/EscapeAnalysis/EAUtils.jl | 5 +- test/compiler/contextual.jl | 4 +- test/compiler/invalidation.jl | 11 +- test/core.jl | 2 +- test/precompile.jl | 32 +- test/precompile_absint1.jl | 10 +- test/precompile_absint2.jl | 17 +- test/stacktraces.jl | 5 +- 38 files changed, 1159 insertions(+), 1222 deletions(-) diff --git a/base/Base.jl b/base/Base.jl index c5e318ffe5e38..3b56dca166cee 100644 --- a/base/Base.jl +++ b/base/Base.jl @@ -204,7 +204,6 @@ function Core._hasmethod(@nospecialize(f), @nospecialize(t)) # this function has return Core._hasmethod(tt) end - # core operations & types include("promotion.jl") include("tuple.jl") diff --git a/base/boot.jl b/base/boot.jl index ed3e22391f215..5d40191ecab21 100644 --- a/base/boot.jl +++ b/base/boot.jl @@ -535,11 +535,11 @@ function CodeInstance( mi::MethodInstance, owner, @nospecialize(rettype), @nospecialize(exctype), @nospecialize(inferred_const), @nospecialize(inferred), const_flags::Int32, min_world::UInt, max_world::UInt, effects::UInt32, @nospecialize(analysis_results), - relocatability::UInt8, edges::Union{DebugInfo,Nothing}) + relocatability::UInt8, di::Union{DebugInfo,Nothing}, edges::SimpleVector) return ccall(:jl_new_codeinst, Ref{CodeInstance}, - (Any, Any, Any, Any, Any, Any, Int32, UInt, UInt, UInt32, Any, UInt8, Any), + (Any, Any, Any, Any, Any, Any, Int32, UInt, UInt, UInt32, Any, UInt8, Any, Any), mi, owner, rettype, exctype, inferred_const, inferred, const_flags, min_world, max_world, - effects, analysis_results, relocatability, edges) + effects, analysis_results, relocatability, di, edges) end GlobalRef(m::Module, s::Symbol) = ccall(:jl_module_globalref, Ref{GlobalRef}, (Any, Any), m, s) Module(name::Symbol=:anonymous, std_imports::Bool=true, default_names::Bool=true) = ccall(:jl_f_new_module, Ref{Module}, (Any, Bool, Bool), name, std_imports, default_names) diff --git a/base/compiler/abstractinterpretation.jl b/base/compiler/abstractinterpretation.jl index 777240adf581b..e20b74454bb22 100644 --- a/base/compiler/abstractinterpretation.jl +++ b/base/compiler/abstractinterpretation.jl @@ -57,7 +57,6 @@ function abstract_call_gf_by_type(interp::AbstractInterpreter, @nospecialize(f), gfresult = Future{CallMeta}() # intermediate work for computing gfresult rettype = exctype = Bottom - edges = MethodInstance[] conditionals = nothing # keeps refinement information of call argument types when the return type is boolean seenall = true const_results = nothing # or const_results::Vector{Union{Nothing,ConstResult}} if any const results are available @@ -73,7 +72,7 @@ function abstract_call_gf_by_type(interp::AbstractInterpreter, @nospecialize(f), napplicable = length(applicable) multiple_matches = napplicable > 1 while i <= napplicable - match = applicable[i]::MethodMatch + (; match, edges, edge_idx) = applicable[i] method = match.method sig = match.spec_types if bail_out_toplevel_call(interp, InferenceLoopState(sig, rettype, all_effects), sv) @@ -95,7 +94,7 @@ function abstract_call_gf_by_type(interp::AbstractInterpreter, @nospecialize(f), #end mresult = abstract_call_method(interp, method, sig, match.sparams, multiple_matches, si, sv)::Future function handle1(interp, sv) - local (; rt, exct, edge, effects, volatile_inf_result) = mresult[] + local (; rt, exct, effects, edge, volatile_inf_result) = mresult[] this_conditional = ignorelimited(rt) this_rt = widenwrappedconditional(rt) this_exct = exct @@ -109,6 +108,7 @@ function abstract_call_gf_by_type(interp::AbstractInterpreter, @nospecialize(f), if const_call_result !== nothing this_const_conditional = ignorelimited(const_call_result.rt) this_const_rt = widenwrappedconditional(const_call_result.rt) + const_edge = nothing if this_const_rt ⊑ₚ this_rt # As long as the const-prop result we have is not *worse* than # what we found out on types, we'd like to use it. Even if the @@ -119,9 +119,9 @@ function abstract_call_gf_by_type(interp::AbstractInterpreter, @nospecialize(f), # e.g. in cases when there are cycles but cached result is still accurate this_conditional = this_const_conditional this_rt = this_const_rt - (; effects, const_result, edge) = const_call_result + (; effects, const_result, const_edge) = const_call_result elseif is_better_effects(const_call_result.effects, effects) - (; effects, const_result, edge) = const_call_result + (; effects, const_result, const_edge) = const_call_result else add_remark!(interp, sv, "[constprop] Discarded because the result was wider than inference") end @@ -129,10 +129,13 @@ function abstract_call_gf_by_type(interp::AbstractInterpreter, @nospecialize(f), # because consistent-cy does not apply to exceptions. if const_call_result.exct ⋤ this_exct this_exct = const_call_result.exct - (; const_result, edge) = const_call_result + (; const_result, const_edge) = const_call_result else add_remark!(interp, sv, "[constprop] Discarded exception type because result was wider than inference") end + if const_edge !== nothing + edge = const_edge + end end all_effects = merge_effects(all_effects, effects) @@ -142,7 +145,6 @@ function abstract_call_gf_by_type(interp::AbstractInterpreter, @nospecialize(f), end const_results[i] = const_result end - edge === nothing || push!(edges, edge) @assert !(this_conditional isa Conditional || this_rt isa MustAlias) "invalid lattice element returned from inter-procedural context" if can_propagate_conditional(this_conditional, argtypes) # The only case where we need to keep this in rt is where @@ -165,6 +167,7 @@ function abstract_call_gf_by_type(interp::AbstractInterpreter, @nospecialize(f), conditionals[2][i] = conditionals[2][i] ⊔ᵢ cnd.elsetype end end + edges[edge_idx] = edge if i < napplicable && bail_out_call(interp, InferenceLoopState(sig, rettype, all_effects), sv) add_remark!(interp, sv, "Call inference reached maximally imprecise information. Bailing on.") seenall = false @@ -172,7 +175,7 @@ function abstract_call_gf_by_type(interp::AbstractInterpreter, @nospecialize(f), end i += 1 return true - end + end # function handle1 if isready(mresult) && handle1(interp, sv) continue else @@ -208,7 +211,7 @@ function abstract_call_gf_by_type(interp::AbstractInterpreter, @nospecialize(f), if (isa(sv, InferenceState) && infer_compilation_signature(interp) && (seenall && 1 == napplicable) && rettype !== Any && rettype !== Bottom && !is_removable_if_unused(all_effects)) - match = applicable[1]::MethodMatch + (; match) = applicable[1] method = match.method sig = match.spec_types mi = specialize_method(match; preexisting=true) @@ -230,8 +233,6 @@ function abstract_call_gf_by_type(interp::AbstractInterpreter, @nospecialize(f), # and avoid keeping track of a more complex result type. rettype = Any end - any_slot_refined = slotrefinements !== nothing - add_call_backedges!(interp, rettype, all_effects, any_slot_refined, edges, matches, atype.contents, sv) if isa(sv, InferenceState) # TODO (#48913) implement a proper recursion handling for irinterp: # This works just because currently the `:terminate` condition guarantees that @@ -247,7 +248,7 @@ function abstract_call_gf_by_type(interp::AbstractInterpreter, @nospecialize(f), gfresult[] = CallMeta(rettype, exctype, all_effects, info, slotrefinements) return true - end # infercalls + end # function infercalls # start making progress on the first call infercalls(interp, sv) || push!(sv.tasks, infercalls) return gfresult @@ -257,8 +258,14 @@ struct FailedMethodMatch reason::String end +struct MethodMatchTarget + match::MethodMatch + edges::Vector{Union{Nothing,CodeInstance}} + edge_idx::Int +end + struct MethodMatches - applicable::Vector{Any} + applicable::Vector{MethodMatchTarget} info::MethodMatchInfo valid_worlds::WorldRange end @@ -267,15 +274,9 @@ any_ambig(info::MethodMatchInfo) = any_ambig(info.results) any_ambig(m::MethodMatches) = any_ambig(m.info) fully_covering(info::MethodMatchInfo) = info.fullmatch fully_covering(m::MethodMatches) = fully_covering(m.info) -function add_uncovered_edges!(sv::AbsIntState, info::MethodMatchInfo, @nospecialize(atype)) - fully_covering(info) || add_mt_backedge!(sv, info.mt, atype) - nothing -end -add_uncovered_edges!(sv::AbsIntState, matches::MethodMatches, @nospecialize(atype)) = - add_uncovered_edges!(sv, matches.info, atype) struct UnionSplitMethodMatches - applicable::Vector{Any} + applicable::Vector{MethodMatchTarget} applicable_argtypes::Vector{Vector{Any}} info::UnionSplitInfo valid_worlds::WorldRange @@ -284,23 +285,14 @@ any_ambig(info::UnionSplitInfo) = any(any_ambig, info.split) any_ambig(m::UnionSplitMethodMatches) = any_ambig(m.info) fully_covering(info::UnionSplitInfo) = all(fully_covering, info.split) fully_covering(m::UnionSplitMethodMatches) = fully_covering(m.info) -function add_uncovered_edges!(sv::AbsIntState, info::UnionSplitInfo, @nospecialize(atype)) - all(fully_covering, info.split) && return nothing - # add mt backedges with removing duplications - for mt in uncovered_method_tables(info) - add_mt_backedge!(sv, mt, atype) - end -end -add_uncovered_edges!(sv::AbsIntState, matches::UnionSplitMethodMatches, @nospecialize(atype)) = - add_uncovered_edges!(sv, matches.info, atype) -function uncovered_method_tables(info::UnionSplitInfo) - mts = MethodTable[] + +nmatches(info::MethodMatchInfo) = length(info.results) +function nmatches(info::UnionSplitInfo) + n = 0 for mminfo in info.split - fully_covering(mminfo) && continue - any(mt′::MethodTable->mt′===mminfo.mt, mts) && continue - push!(mts, mminfo.mt) + n += nmatches(mminfo) end - return mts + return n end function find_method_matches(interp::AbstractInterpreter, argtypes::Vector{Any}, @nospecialize(atype); @@ -320,7 +312,7 @@ function find_union_split_method_matches(interp::AbstractInterpreter, argtypes:: @nospecialize(atype), max_methods::Int) split_argtypes = switchtupleunion(typeinf_lattice(interp), argtypes) infos = MethodMatchInfo[] - applicable = Any[] + applicable = MethodMatchTarget[] applicable_argtypes = Vector{Any}[] # arrays like `argtypes`, including constants, for each match valid_worlds = WorldRange() for i in 1:length(split_argtypes) @@ -333,14 +325,14 @@ function find_union_split_method_matches(interp::AbstractInterpreter, argtypes:: if thismatches === nothing return FailedMethodMatch("For one of the union split cases, too many methods matched") end - for m in thismatches - push!(applicable, m) - push!(applicable_argtypes, arg_n) - end valid_worlds = intersect(valid_worlds, thismatches.valid_worlds) thisfullmatch = any(match::MethodMatch->match.fully_covers, thismatches) - thisinfo = MethodMatchInfo(thismatches, mt, thisfullmatch) + thisinfo = MethodMatchInfo(thismatches, mt, sig_n, thisfullmatch) push!(infos, thisinfo) + for idx = 1:length(thismatches) + push!(applicable, MethodMatchTarget(thismatches[idx], thisinfo.edges, idx)) + push!(applicable_argtypes, arg_n) + end end info = UnionSplitInfo(infos) return UnionSplitMethodMatches( @@ -360,8 +352,9 @@ function find_simple_method_matches(interp::AbstractInterpreter, @nospecialize(a return FailedMethodMatch("Too many methods matched") end fullmatch = any(match::MethodMatch->match.fully_covers, matches) - info = MethodMatchInfo(matches, mt, fullmatch) - return MethodMatches(matches.matches, info, matches.valid_worlds) + info = MethodMatchInfo(matches, mt, atype, fullmatch) + applicable = MethodMatchTarget[MethodMatchTarget(matches[idx], info.edges, idx) for idx = 1:length(matches)] + return MethodMatches(applicable, info, matches.valid_worlds) end """ @@ -532,7 +525,7 @@ function conditional_argtype(𝕃ᵢ::AbstractLattice, @nospecialize(rt), @nospe end end -function collect_slot_refinements(𝕃ᵢ::AbstractLattice, applicable::Vector{Any}, +function collect_slot_refinements(𝕃ᵢ::AbstractLattice, applicable::Vector{MethodMatchTarget}, argtypes::Vector{Any}, fargs::Vector{Any}, sv::InferenceState) ⊏, ⊔ = strictpartialorder(𝕃ᵢ), join(𝕃ᵢ) slotrefinements = nothing @@ -546,7 +539,7 @@ function collect_slot_refinements(𝕃ᵢ::AbstractLattice, applicable::Vector{A end sigt = Bottom for j = 1:length(applicable) - match = applicable[j]::MethodMatch + (;match) = applicable[j] valid_as_lattice(match.spec_types, true) || continue sigt = sigt ⊔ fieldtype(match.spec_types, i) end @@ -561,31 +554,6 @@ function collect_slot_refinements(𝕃ᵢ::AbstractLattice, applicable::Vector{A return slotrefinements end -function add_call_backedges!(interp::AbstractInterpreter, @nospecialize(rettype), - all_effects::Effects, any_slot_refined::Bool, edges::Vector{MethodInstance}, - matches::Union{MethodMatches,UnionSplitMethodMatches}, @nospecialize(atype), - sv::AbsIntState) - # don't bother to add backedges when both type and effects information are already - # maximized to the top since a new method couldn't refine or widen them anyway - if rettype === Any - # ignore the `:nonoverlayed` property if `interp` doesn't use overlayed method table - # since it will never be tainted anyway - if !isoverlayed(method_table(interp)) - all_effects = Effects(all_effects; nonoverlayed=ALWAYS_FALSE) - end - if all_effects === Effects() && !any_slot_refined - return nothing - end - end - for edge in edges - add_backedge!(sv, edge) - end - # also need an edge to the method table in case something gets - # added that did not intersect with any existing method - add_uncovered_edges!(sv, matches, atype) - return nothing -end - const RECURSION_UNUSED_MSG = "Bounded recursion detected with unused result. Annotated return type may be wider than true result." const RECURSION_MSG = "Bounded recursion detected. Call was widened to force convergence." const RECURSION_MSG_HARDLIMIT = "Bounded recursion detected under hardlimit. Call was widened to force convergence." @@ -595,9 +563,9 @@ function abstract_call_method(interp::AbstractInterpreter, hardlimit::Bool, si::StmtInfo, sv::AbsIntState) sigtuple = unwrap_unionall(sig) sigtuple isa DataType || - return Future(MethodCallResult(Any, Any, false, false, nothing, Effects())) + return Future(MethodCallResult(Any, Any, Effects(), nothing, false, false)) all(@nospecialize(x) -> valid_as_lattice(unwrapva(x), true), sigtuple.parameters) || - return Future(MethodCallResult(Union{}, Any, false, false, nothing, EFFECTS_THROWS)) # catch bad type intersections early + return Future(MethodCallResult(Union{}, Any, EFFECTS_THROWS, nothing, false, false)) # catch bad type intersections early if is_nospecializeinfer(method) sig = get_nospecializeinfer_sig(method, sig, sparams) @@ -622,7 +590,7 @@ function abstract_call_method(interp::AbstractInterpreter, # we have a self-cycle in the call-graph, but not in the inference graph (typically): # break this edge now (before we record it) by returning early # (non-typically, this means that we lose the ability to detect a guaranteed StackOverflow in some cases) - return Future(MethodCallResult(Any, Any, true, true, nothing, Effects())) + return Future(MethodCallResult(Any, Any, Effects(), nothing, true, true)) end topmost = nothing edgecycle = true @@ -677,7 +645,7 @@ function abstract_call_method(interp::AbstractInterpreter, # since it's very unlikely that we'll try to inline this, # or want make an invoke edge to its calling convention return type. # (non-typically, this means that we lose the ability to detect a guaranteed StackOverflow in some cases) - return Future(MethodCallResult(Any, Any, true, true, nothing, Effects())) + return Future(MethodCallResult(Any, Any, Effects(), nothing, true, true)) end add_remark!(interp, sv, washardlimit ? RECURSION_MSG_HARDLIMIT : RECURSION_MSG) # TODO (#48913) implement a proper recursion handling for irinterp: @@ -803,9 +771,9 @@ function matches_sv(parent::AbsIntState, sv::AbsIntState) method_for_inference_limit_heuristics(sv) === method_for_inference_limit_heuristics(parent)) end -function is_edge_recursed(edge::MethodInstance, caller::AbsIntState) +function is_edge_recursed(edge::CodeInstance, caller::AbsIntState) return any(AbsIntStackUnwind(caller)) do sv::AbsIntState - return edge === frame_instance(sv) + return edge.def === frame_instance(sv) end end @@ -832,18 +800,15 @@ end struct MethodCallResult rt exct + effects::Effects + edge::Union{Nothing,CodeInstance} edgecycle::Bool edgelimited::Bool - edge::Union{Nothing,MethodInstance} - effects::Effects volatile_inf_result::Union{Nothing,VolatileInferenceResult} - function MethodCallResult(@nospecialize(rt), @nospecialize(exct), - edgecycle::Bool, - edgelimited::Bool, - edge::Union{Nothing,MethodInstance}, - effects::Effects, + function MethodCallResult(@nospecialize(rt), @nospecialize(exct), effects::Effects, + edge::Union{Nothing,CodeInstance}, edgecycle::Bool, edgelimited::Bool, volatile_inf_result::Union{Nothing,VolatileInferenceResult}=nothing) - return new(rt, exct, edgecycle, edgelimited, edge, effects, volatile_inf_result) + return new(rt, exct, effects, edge, edgecycle, edgelimited, volatile_inf_result) end end @@ -853,18 +818,17 @@ struct InvokeCall InvokeCall(@nospecialize(types), @nospecialize(lookupsig)) = new(types, lookupsig) end -struct ConstCallResults +struct ConstCallResult rt::Any exct::Any const_result::ConstResult effects::Effects - edge::MethodInstance - function ConstCallResults( + const_edge::Union{Nothing,CodeInstance} + function ConstCallResult( @nospecialize(rt), @nospecialize(exct), - const_result::ConstResult, - effects::Effects, - edge::MethodInstance) - return new(rt, exct, const_result, effects, edge) + const_result::ConstResult, effects::Effects, + const_edge::Union{Nothing,CodeInstance}) + return new(rt, exct, const_result, effects, const_edge) end end @@ -947,8 +911,7 @@ function concrete_eval_eligible(interp::AbstractInterpreter, return :none end end - mi = result.edge - if mi !== nothing && is_foldable(effects, #=check_rtcall=#true) + if result.edge !== nothing && is_foldable(effects, #=check_rtcall=#true) if f !== nothing && is_all_const_arg(arginfo, #=start=#2) if (is_nonoverlayed(interp) || is_nonoverlayed(effects) || # Even if overlay methods are involved, when `:consistent_overlay` is @@ -1010,15 +973,17 @@ function concrete_eval_call(interp::AbstractInterpreter, f = invoke end world = get_inference_world(interp) - edge = result.edge::MethodInstance + edge = result.edge::CodeInstance value = try Core._call_in_world_total(world, f, args...) catch e # The evaluation threw. By :consistent-cy, we're guaranteed this would have happened at runtime. # Howevever, at present, :consistency does not mandate the type of the exception - return ConstCallResults(Bottom, Any, ConcreteResult(edge, result.effects), result.effects, edge) + concrete_result = ConcreteResult(edge, result.effects) + return ConstCallResult(Bottom, Any, concrete_result, result.effects, #=const_edge=#nothing) end - return ConstCallResults(Const(value), Union{}, ConcreteResult(edge, EFFECTS_TOTAL, value), EFFECTS_TOTAL, edge) + concrete_result = ConcreteResult(edge, EFFECTS_TOTAL, value) + return ConstCallResult(Const(value), Bottom, concrete_result, EFFECTS_TOTAL, #=const_edge=#nothing) end # check if there is a cycle and duplicated inference of `mi` @@ -1262,9 +1227,9 @@ function semi_concrete_eval_call(interp::AbstractInterpreter, mi::MethodInstance, result::MethodCallResult, arginfo::ArgInfo, sv::AbsIntState) world = frame_world(sv) mi_cache = WorldView(code_cache(interp), world) - code = get(mi_cache, mi, nothing) - if code !== nothing - irsv = IRInterpretationState(interp, code, mi, arginfo.argtypes, world) + codeinst = get(mi_cache, mi, nothing) + if codeinst !== nothing + irsv = IRInterpretationState(interp, codeinst, mi, arginfo.argtypes, world) if irsv !== nothing assign_parentchild!(irsv, sv) rt, (nothrow, noub) = ir_abstract_constant_propagation(interp, irsv) @@ -1283,16 +1248,21 @@ function semi_concrete_eval_call(interp::AbstractInterpreter, effects = Effects(effects; noub=ALWAYS_TRUE) end exct = refine_exception_type(result.exct, effects) - return ConstCallResults(rt, exct, SemiConcreteResult(mi, ir, effects, spec_info(irsv)), effects, mi) + semi_concrete_result = SemiConcreteResult(codeinst, ir, effects, spec_info(irsv)) + const_edge = nothing # TODO use the edges from irsv? + return ConstCallResult(rt, exct, semi_concrete_result, effects, const_edge) end end end return nothing end -const_prop_result(inf_result::InferenceResult) = - ConstCallResults(inf_result.result, inf_result.exc_result, ConstPropResult(inf_result), - inf_result.ipo_effects, inf_result.linfo) +function const_prop_result(inf_result::InferenceResult) + @assert isdefined(inf_result, :ci_as_edge) "InferenceResult without ci_as_edge" + const_prop_result = ConstPropResult(inf_result) + return ConstCallResult(inf_result.result, inf_result.exc_result, const_prop_result, + inf_result.ipo_effects, inf_result.ci_as_edge) +end # return cached result of constant analysis return_localcache_result(::AbstractInterpreter, inf_result::InferenceResult, ::AbsIntState) = @@ -1305,7 +1275,7 @@ end function const_prop_call(interp::AbstractInterpreter, mi::MethodInstance, result::MethodCallResult, arginfo::ArgInfo, sv::AbsIntState, - concrete_eval_result::Union{Nothing, ConstCallResults}=nothing) + concrete_eval_result::Union{Nothing,ConstCallResult}=nothing) inf_cache = get_inference_cache(interp) 𝕃ᵢ = typeinf_lattice(interp) forwarded_argtypes = compute_forwarded_argtypes(interp, arginfo, sv) @@ -1353,6 +1323,7 @@ function const_prop_call(interp::AbstractInterpreter, pop!(callstack) return nothing end + inf_result.ci_as_edge = codeinst_as_edge(interp, frame) @assert frame.frameid != 0 && frame.cycleid == frame.frameid @assert frame.parentid == sv.frameid @assert inf_result.result !== nothing @@ -1691,7 +1662,7 @@ function abstract_iteration(interp::AbstractInterpreter, @nospecialize(itft), @n end iterateresult[] = AbstractIterationResult(ret, AbstractIterationInfo(calls, false)) return true - end # inferiterate_2arg + end # function inferiterate_2arg # continue making progress as much as possible, on iterate(arg, state) inferiterate_2arg(interp, sv) || push!(sv.tasks, inferiterate_2arg) return true @@ -1861,7 +1832,7 @@ function abstract_apply(interp::AbstractInterpreter, argtypes::Vector{Any}, si:: # For now, only propagate info if we don't also union-split the iteration applyresult[] = CallMeta(res, exctype, all_effects, retinfo) return true - end + end # function infercalls # start making progress on the first call infercalls(interp, sv) || push!(sv.tasks, infercalls) return applyresult @@ -2230,7 +2201,7 @@ function abstract_invoke(interp::AbstractInterpreter, arginfo::ArgInfo, si::Stmt mresult = abstract_call_method(interp, method, ti, env, false, si, sv)::Future match = MethodMatch(ti, env, method, argtype <: method.sig) return Future{CallMeta}(mresult, interp, sv) do result, interp, sv - (; rt, exct, edge, effects, volatile_inf_result) = result + (; rt, exct, effects, edge, volatile_inf_result) = result res = nothing sig = match.spec_types argtypes′ = invoke_rewrite(argtypes) @@ -2250,16 +2221,19 @@ function abstract_invoke(interp::AbstractInterpreter, arginfo::ArgInfo, si::Stmt result, f, arginfo, si, match, sv, invokecall) const_result = volatile_inf_result if const_call_result !== nothing + const_edge = nothing if const_call_result.rt ⊑ rt - (; rt, effects, const_result, edge) = const_call_result + (; rt, effects, const_result, const_edge) = const_call_result end if const_call_result.exct ⋤ exct - (; exct, const_result, edge) = const_call_result + (; exct, const_result, const_edge) = const_call_result + end + if const_edge !== nothing + edge = const_edge end end rt = from_interprocedural!(interp, rt, sv, arginfo, sig) - info = InvokeCallInfo(match, const_result) - edge !== nothing && add_invoke_backedge!(sv, lookupsig, edge) + info = InvokeCallInfo(edge, match, const_result, lookupsig) if !match.fully_covers effects = Effects(effects; nothrow=false) exct = exct ⊔ TypeError @@ -2328,7 +2302,8 @@ function abstract_call_known(interp::AbstractInterpreter, @nospecialize(f), return abstract_apply(interp, argtypes, si, sv, max_methods) elseif f === invoke return abstract_invoke(interp, arginfo, si, sv) - elseif f === modifyfield! || f === Core.modifyglobal! || f === Core.memoryrefmodify! || f === atomic_pointermodify + elseif f === modifyfield! || f === Core.modifyglobal! || + f === Core.memoryrefmodify! || f === atomic_pointermodify return abstract_modifyop!(interp, f, argtypes, si, sv) elseif f === Core.finalizer return abstract_finalizer(interp, argtypes, sv) @@ -2455,19 +2430,23 @@ function abstract_call_opaque_closure(interp::AbstractInterpreter, mresult = abstract_call_method(interp, ocmethod, sig, Core.svec(), false, si, sv) ocsig_box = Core.Box(ocsig) return Future{CallMeta}(mresult, interp, sv) do result, interp, sv - (; rt, exct, edge, effects, volatile_inf_result, edgecycle) = result + (; rt, exct, effects, volatile_inf_result, edge, edgecycle) = result 𝕃ₚ = ipo_lattice(interp) ⊑, ⋤, ⊔ = partialorder(𝕃ₚ), strictneqpartialorder(𝕃ₚ), join(𝕃ₚ) const_result = volatile_inf_result if !edgecycle const_call_result = abstract_call_method_with_const_args(interp, result, - nothing, arginfo, si, match, sv) + #=f=#nothing, arginfo, si, match, sv) if const_call_result !== nothing + const_edge = nothing if const_call_result.rt ⊑ rt - (; rt, effects, const_result, edge) = const_call_result + (; rt, effects, const_result, const_edge) = const_call_result end if const_call_result.exct ⋤ exct - (; exct, const_result, edge) = const_call_result + (; exct, const_result, const_edge) = const_call_result + end + if const_edge !== nothing + edge = const_edge end end end @@ -2481,8 +2460,7 @@ function abstract_call_opaque_closure(interp::AbstractInterpreter, end end rt = from_interprocedural!(interp, rt, sv, arginfo, match.spec_types) - info = OpaqueClosureCallInfo(match, const_result) - edge !== nothing && add_backedge!(sv, edge) + info = OpaqueClosureCallInfo(edge, match, const_result) return CallMeta(rt, exct, effects, info) end end @@ -3432,7 +3410,6 @@ function typeinf_local(interp::AbstractInterpreter, frame::InferenceState, nextr while currpc < bbend currpc += 1 frame.currpc = currpc - empty_backedges!(frame, currpc) stmt = frame.src.code[currpc] # If we're at the end of the basic block ... if currpc == bbend diff --git a/base/compiler/inferencestate.jl b/base/compiler/inferencestate.jl index a200d5ced4d93..43ada89f23133 100644 --- a/base/compiler/inferencestate.jl +++ b/base/compiler/inferencestate.jl @@ -247,7 +247,7 @@ mutable struct InferenceState # TODO: Could keep this sparsely by doing structural liveness analysis ahead of time. bb_vartables::Vector{Union{Nothing,VarTable}} # nothing if not analyzed yet ssavaluetypes::Vector{Any} - stmt_edges::Vector{Vector{Any}} + edges::Vector{Any} stmt_info::Vector{CallInfo} #= intermediate states for interprocedural abstract interpretation =# @@ -302,7 +302,7 @@ mutable struct InferenceState nssavalues = src.ssavaluetypes::Int ssavalue_uses = find_ssavalue_uses(code, nssavalues) nstmts = length(code) - stmt_edges = Vector{Vector{Any}}(undef, nstmts) + edges = [] stmt_info = CallInfo[ NoCallInfo() for i = 1:nstmts ] nslots = length(src.slotflags) @@ -327,7 +327,7 @@ mutable struct InferenceState unreachable = BitSet() pclimitations = IdSet{InferenceState}() limitations = IdSet{InferenceState}() - cycle_backedges = Vector{Tuple{InferenceState,Int}}() + cycle_backedges = Tuple{InferenceState,Int}[] callstack = AbsIntState[] tasks = WorkThunk[] @@ -350,10 +350,12 @@ mutable struct InferenceState restrict_abstract_call_sites = isa(def, Module) + parentid = frameid = cycleid = 0 + this = new( mi, world, mod, sptypes, slottypes, src, cfg, spec_info, - currbb, currpc, ip, handler_info, ssavalue_uses, bb_vartables, ssavaluetypes, stmt_edges, stmt_info, - tasks, pclimitations, limitations, cycle_backedges, callstack, 0, 0, 0, + currbb, currpc, ip, handler_info, ssavalue_uses, bb_vartables, ssavaluetypes, edges, stmt_info, + tasks, pclimitations, limitations, cycle_backedges, callstack, parentid, frameid, cycleid, result, unreachable, valid_worlds, bestguess, exc_bestguess, ipo_effects, restrict_abstract_call_sites, cache_mode, insert_coverage, interp) @@ -754,30 +756,6 @@ function record_ssa_assign!(𝕃ᵢ::AbstractLattice, ssa_id::Int, @nospecialize return nothing end -function add_cycle_backedge!(caller::InferenceState, frame::InferenceState) - update_valid_age!(caller, frame.valid_worlds) - backedge = (caller, caller.currpc) - contains_is(frame.cycle_backedges, backedge) || push!(frame.cycle_backedges, backedge) - add_backedge!(caller, frame.linfo) - return frame -end - -function get_stmt_edges!(caller::InferenceState, currpc::Int=caller.currpc) - stmt_edges = caller.stmt_edges - if !isassigned(stmt_edges, currpc) - return stmt_edges[currpc] = Any[] - else - return stmt_edges[currpc] - end -end - -function empty_backedges!(frame::InferenceState, currpc::Int=frame.currpc) - if isassigned(frame.stmt_edges, currpc) - empty!(frame.stmt_edges[currpc]) - end - return nothing -end - function narguments(sv::InferenceState, include_va::Bool=true) nargs = Int(sv.src.nargs) if !include_va @@ -1008,32 +986,6 @@ function callers_in_cycle(sv::InferenceState) end callers_in_cycle(sv::IRInterpretationState) = AbsIntCycle(sv.callstack::Vector{AbsIntState}, 0, 0) -# temporarily accumulate our edges to later add as backedges in the callee -function add_backedge!(caller::InferenceState, mi::MethodInstance) - isa(caller.linfo.def, Method) || return nothing # don't add backedges to toplevel method instance - return push!(get_stmt_edges!(caller), mi) -end -function add_backedge!(irsv::IRInterpretationState, mi::MethodInstance) - return push!(irsv.edges, mi) -end - -function add_invoke_backedge!(caller::InferenceState, @nospecialize(invokesig::Type), mi::MethodInstance) - isa(caller.linfo.def, Method) || return nothing # don't add backedges to toplevel method instance - return push!(get_stmt_edges!(caller), invokesig, mi) -end -function add_invoke_backedge!(irsv::IRInterpretationState, @nospecialize(invokesig::Type), mi::MethodInstance) - return push!(irsv.edges, invokesig, mi) -end - -# used to temporarily accumulate our no method errors to later add as backedges in the callee method table -function add_mt_backedge!(caller::InferenceState, mt::MethodTable, @nospecialize(typ)) - isa(caller.linfo.def, Method) || return nothing # don't add backedges to toplevel method instance - return push!(get_stmt_edges!(caller), mt, typ) -end -function add_mt_backedge!(irsv::IRInterpretationState, mt::MethodTable, @nospecialize(typ)) - return push!(irsv.edges, mt, typ) -end - get_curr_ssaflag(sv::InferenceState) = sv.src.ssaflags[sv.currpc] get_curr_ssaflag(sv::IRInterpretationState) = sv.ir.stmts[sv.curridx][:flag] diff --git a/base/compiler/optimize.jl b/base/compiler/optimize.jl index c5606f80468c0..e8508ade88b6c 100644 --- a/base/compiler/optimize.jl +++ b/base/compiler/optimize.jl @@ -141,8 +141,7 @@ struct InliningState{Interp<:AbstractInterpreter} interp::Interp end function InliningState(sv::InferenceState, interp::AbstractInterpreter) - edges = sv.stmt_edges[1] - return InliningState(edges, sv.world, interp) + return InliningState(sv.edges, sv.world, interp) end function InliningState(interp::AbstractInterpreter) return InliningState(Any[], get_inference_world(interp), interp) @@ -225,6 +224,7 @@ include("compiler/ssair/irinterp.jl") function ir_to_codeinf!(opt::OptimizationState) (; linfo, src) = opt src = ir_to_codeinf!(src, opt.ir::IRCode) + src.edges = opt.inlining.edges opt.ir = nothing maybe_validate_code(linfo, src, "optimized") return src diff --git a/base/compiler/ssair/inlining.jl b/base/compiler/ssair/inlining.jl index dfdd317f74d87..ae4c04241fa13 100644 --- a/base/compiler/ssair/inlining.jl +++ b/base/compiler/ssair/inlining.jl @@ -28,7 +28,8 @@ end struct ConstantCase val::Any - ConstantCase(@nospecialize val) = new(val) + edge::CodeInstance + ConstantCase(@nospecialize(val), edge::CodeInstance) = new(val, edge) end struct SomeCase @@ -68,11 +69,12 @@ struct InliningEdgeTracker new(state.edges, invokesig) end -function add_inlining_backedge!((; edges, invokesig)::InliningEdgeTracker, mi::MethodInstance) +function add_inlining_edge!(et::InliningEdgeTracker, edge::Union{CodeInstance,MethodInstance}) + (; edges, invokesig) = et if invokesig === nothing - push!(edges, mi) + add_one_edge!(edges, edge) else # invoke backedge - push!(edges, invoke_signature(invokesig), mi) + add_invoke_edge!(edges, invoke_signature(invokesig), edge) end return nothing end @@ -784,10 +786,10 @@ function rewrite_apply_exprargs!(todo::Vector{Pair{Int,Any}}, end function compileable_specialization(mi::MethodInstance, effects::Effects, - et::InliningEdgeTracker, @nospecialize(info::CallInfo); compilesig_invokes::Bool=true) + et::InliningEdgeTracker, @nospecialize(info::CallInfo), state::InliningState) mi_invoke = mi method, atype, sparams = mi.def::Method, mi.specTypes, mi.sparam_vals - if compilesig_invokes + if OptimizationParams(state.interp).compilesig_invokes new_atype = get_compileable_sig(method, atype, sparams) new_atype === nothing && return nothing if atype !== new_atype @@ -805,43 +807,41 @@ function compileable_specialization(mi::MethodInstance, effects::Effects, return nothing end end - add_inlining_backedge!(et, mi) # to the dispatch lookup - mi_invoke !== mi && push!(et.edges, method.sig, mi_invoke) # add_inlining_backedge to the invoke call, if that is different + add_inlining_edge!(et, mi) # to the dispatch lookup + if mi_invoke !== mi + add_invoke_edge!(et.edges, method.sig, mi_invoke) # add_inlining_edge to the invoke call, if that is different + end return InvokeCase(mi_invoke, effects, info) end -function compileable_specialization(match::MethodMatch, effects::Effects, - et::InliningEdgeTracker, @nospecialize(info::CallInfo); compilesig_invokes::Bool=true) - mi = specialize_method(match) - return compileable_specialization(mi, effects, et, info; compilesig_invokes) -end - struct InferredResult src::Any # CodeInfo or IRCode effects::Effects - InferredResult(@nospecialize(src), effects::Effects) = new(src, effects) + edge::CodeInstance + InferredResult(@nospecialize(src), effects::Effects, edge::CodeInstance) = new(src, effects, edge) end @inline function get_cached_result(state::InliningState, mi::MethodInstance) code = get(code_cache(state), mi, nothing) if code isa CodeInstance if use_const_api(code) # in this case function can be inlined to a constant - return ConstantCase(quoted(code.rettype_const)) + return ConstantCase(quoted(code.rettype_const), code) end return code end return nothing end @inline function get_local_result(inf_result::InferenceResult) + @assert isdefined(inf_result, :ci_as_edge) "InferenceResult without ci_as_edge" effects = inf_result.ipo_effects if is_foldable_nothrow(effects) res = inf_result.result if isa(res, Const) && is_inlineable_constant(res.val) # use constant calling convention - return ConstantCase(quoted(res.val)) + return ConstantCase(quoted(res.val), inf_result.ci_as_edge) end end - return InferredResult(inf_result.src, effects) + return InferredResult(inf_result.src, effects, inf_result.ci_as_edge) end # the general resolver for usual and const-prop'ed calls @@ -861,30 +861,28 @@ function resolve_todo(mi::MethodInstance, result::Union{Nothing,InferenceResult, inferred_result = get_cached_result(state, mi) end if inferred_result isa ConstantCase - add_inlining_backedge!(et, mi) + add_inlining_edge!(et, inferred_result.edge) return inferred_result elseif inferred_result isa InferredResult - (; src, effects) = inferred_result + (; src, effects, edge) = inferred_result elseif inferred_result isa CodeInstance src = @atomic :monotonic inferred_result.inferred effects = decode_effects(inferred_result.ipo_purity_bits) + edge = inferred_result else # there is no cached source available, bail out - return compileable_specialization(mi, Effects(), et, info; - compilesig_invokes=OptimizationParams(state.interp).compilesig_invokes) + return compileable_specialization(mi, Effects(), et, info, state) end # the duplicated check might have been done already within `analyze_method!`, but still # we need it here too since we may come here directly using a constant-prop' result if !OptimizationParams(state.interp).inlining || is_stmt_noinline(flag) - return compileable_specialization(mi, effects, et, info; - compilesig_invokes=OptimizationParams(state.interp).compilesig_invokes) + return compileable_specialization(edge.def, effects, et, info, state) end src_inlining_policy(state.interp, src, info, flag) || - return compileable_specialization(mi, effects, et, info; - compilesig_invokes=OptimizationParams(state.interp).compilesig_invokes) + return compileable_specialization(edge.def, effects, et, info, state) - add_inlining_backedge!(et, mi) + add_inlining_edge!(et, edge) if inferred_result isa CodeInstance ir, spec_info, debuginfo = retrieve_ir_for_inlining(inferred_result, src) else @@ -904,7 +902,7 @@ function resolve_todo(mi::MethodInstance, @nospecialize(info::CallInfo), flag::U cached_result = get_cached_result(state, mi) if cached_result isa ConstantCase - add_inlining_backedge!(et, mi) + add_inlining_edge!(et, cached_result.edge) return cached_result elseif cached_result isa CodeInstance src = @atomic :monotonic cached_result.inferred @@ -915,7 +913,7 @@ function resolve_todo(mi::MethodInstance, @nospecialize(info::CallInfo), flag::U src_inlining_policy(state.interp, src, info, flag) || return nothing ir, spec_info, debuginfo = retrieve_ir_for_inlining(cached_result, src) - add_inlining_backedge!(et, mi) + add_inlining_edge!(et, cached_result) return InliningTodo(mi, ir, spec_info, debuginfo, effects) end @@ -1119,7 +1117,7 @@ function inline_apply!(todo::Vector{Pair{Int,Any}}, # e.g. rewrite `((t::Tuple)...,)` to `t` nonempty_idx = 0 𝕃ₒ = optimizer_lattice(state.interp) - for i = (arg_start + 1):length(argtypes) + for i = (arg_start+1):length(argtypes) ti = argtypes[i] ⊑(𝕃ₒ, ti, Tuple{}) && continue if ⊑(𝕃ₒ, ti, Tuple) && nonempty_idx == 0 @@ -1137,7 +1135,7 @@ function inline_apply!(todo::Vector{Pair{Int,Any}}, # Try to figure out the signature of the function being called # and if rewrite_apply_exprargs can deal with this form arginfos = MaybeAbstractIterationInfo[] - for i = (arg_start + 1):length(argtypes) + for i = (arg_start+1):length(argtypes) thisarginfo = nothing if !is_valid_type_for_apply_rewrite(argtypes[i], OptimizationParams(state.interp)) isa(info, ApplyCallInfo) || return nothing @@ -1403,9 +1401,7 @@ function compute_inlining_cases(@nospecialize(info::CallInfo), flag::UInt32, sig result, match, argtypes, info, flag, state; allow_typevars=true) end if !fully_covered - atype = argtypes_to_type(sig.argtypes) - # We will emit an inline MethodError so we need a backedge to the MethodTable - add_uncovered_edges!(state.edges, info, atype) + # We will emit an inline MethodError in this case, but that info already came inference, so we must already have the uncovered edge for it end elseif !isempty(cases) # if we've not seen all candidates, union split is valid only for dispatch tuples @@ -1453,7 +1449,7 @@ end function semiconcrete_result_item(result::SemiConcreteResult, @nospecialize(info::CallInfo), flag::UInt32, state::InliningState) - mi = result.mi + mi = result.edge.def et = InliningEdgeTracker(state) if (!OptimizationParams(state.interp).inlining || is_stmt_noinline(flag) || @@ -1461,14 +1457,12 @@ function semiconcrete_result_item(result::SemiConcreteResult, # a `@noinline`-declared method when it's marked as `@constprop :aggressive`. # Suppress the inlining here (unless inlining is requested at the callsite). (is_declared_noinline(mi.def::Method) && !is_stmt_inline(flag))) - return compileable_specialization(mi, result.effects, et, info; - compilesig_invokes=OptimizationParams(state.interp).compilesig_invokes) + return compileable_specialization(mi, result.effects, et, info, state) end src_inlining_policy(state.interp, result.ir, info, flag) || - return compileable_specialization(mi, result.effects, et, info; - compilesig_invokes=OptimizationParams(state.interp).compilesig_invokes) + return compileable_specialization(mi, result.effects, et, info, state) - add_inlining_backedge!(et, mi) + add_inlining_edge!(et, result.edge) preserve_local_sources = OptimizationParams(state.interp).preserve_local_sources ir, _, debuginfo = retrieve_ir_for_inlining(mi, result.ir, preserve_local_sources) return InliningTodo(mi, ir, result.spec_info, debuginfo, result.effects) @@ -1476,7 +1470,7 @@ end function handle_semi_concrete_result!(cases::Vector{InliningCase}, result::SemiConcreteResult, match::MethodMatch, @nospecialize(info::CallInfo), flag::UInt32, state::InliningState) - mi = result.mi + mi = result.edge.def validate_sparams(mi.sparam_vals) || return false item = semiconcrete_result_item(result, info, flag, state) item === nothing && return false @@ -1499,11 +1493,10 @@ function concrete_result_item(result::ConcreteResult, @nospecialize(info::CallIn invokesig::Union{Nothing,Vector{Any}}=nothing) if !may_inline_concrete_result(result) et = InliningEdgeTracker(state, invokesig) - return compileable_specialization(result.mi, result.effects, et, info; - compilesig_invokes=OptimizationParams(state.interp).compilesig_invokes) + return compileable_specialization(result.edge.def, result.effects, et, info, state) end @assert result.effects === EFFECTS_TOTAL - return ConstantCase(quoted(result.result)) + return ConstantCase(quoted(result.result), result.edge) end function handle_cases!(todo::Vector{Pair{Int,Any}}, ir::IRCode, idx::Int, stmt::Expr, @@ -1552,11 +1545,16 @@ function handle_modifyop!_call!(ir::IRCode, idx::Int, stmt::Expr, info::ModifyOp info isa MethodResultPure && (info = info.info) info isa ConstCallInfo && (info = info.call) info isa MethodMatchInfo || return nothing - length(info.results) == 1 || return nothing + length(info.edges) == length(info.results) == 1 || return nothing match = info.results[1]::MethodMatch match.fully_covers || return nothing - case = compileable_specialization(match, Effects(), InliningEdgeTracker(state), info; - compilesig_invokes=OptimizationParams(state.interp).compilesig_invokes) + edge = info.edges[1] + if edge === nothing + edge = specialize_method(match) + else + edge = edge.def + end + case = compileable_specialization(edge, Effects(), InliningEdgeTracker(state), info, state) case === nothing && return nothing stmt.head = :invoke_modify pushfirst!(stmt.args, case.invoke) diff --git a/base/compiler/ssair/irinterp.jl b/base/compiler/ssair/irinterp.jl index f9565f3971733..0a8239dc590db 100644 --- a/base/compiler/ssair/irinterp.jl +++ b/base/compiler/ssair/irinterp.jl @@ -450,12 +450,6 @@ function ir_abstract_constant_propagation(interp::AbstractInterpreter, irsv::IRI (nothrow | noub) || break end - if last(irsv.valid_worlds) >= get_world_counter() - # if we aren't cached, we don't need this edge - # but our caller might, so let's just make it anyways - store_backedges(frame_instance(irsv), irsv.edges) - end - if irsv.frameid != 0 callstack = irsv.callstack::Vector{AbsIntState} @assert callstack[end] === irsv && length(callstack) == irsv.frameid diff --git a/base/compiler/ssair/passes.jl b/base/compiler/ssair/passes.jl index 4ad5dcfb2a3c8..bfe33c23871fe 100644 --- a/base/compiler/ssair/passes.jl +++ b/base/compiler/ssair/passes.jl @@ -1529,7 +1529,7 @@ function try_inline_finalizer!(ir::IRCode, argexprs::Vector{Any}, idx::Int, if code isa CodeInstance if use_const_api(code) # No code in the function - Nothing to do - add_inlining_backedge!(et, mi) + add_inlining_edge!(et, code) return true end src = @atomic :monotonic code.inferred @@ -1544,7 +1544,7 @@ function try_inline_finalizer!(ir::IRCode, argexprs::Vector{Any}, idx::Int, length(src.cfg.blocks) == 1 || return false # Ok, we're committed to inlining the finalizer - add_inlining_backedge!(et, mi) + add_inlining_edge!(et, code) # TODO: Should there be a special line number node for inlined finalizers? inline_at = ir[SSAValue(idx)][:line] diff --git a/base/compiler/stmtinfo.jl b/base/compiler/stmtinfo.jl index 9dba7a4459f9e..4cbd2ab39fd46 100644 --- a/base/compiler/stmtinfo.jl +++ b/base/compiler/stmtinfo.jl @@ -22,27 +22,114 @@ struct CallMeta end struct NoCallInfo <: CallInfo end +add_edges_impl(::Vector{Any}, ::NoCallInfo) = nothing """ info::MethodMatchInfo <: CallInfo -Captures the result of a `:jl_matching_methods` lookup for the given call (`info.results`). -This info may then be used by the optimizer to inline the matches, without having -to re-consult the method table. This info is illegal on any statement that is -not a call to a generic function. +Captures the essential arguments and result of a `:jl_matching_methods` lookup +for the given call (`info.results`). This info may then be used by the +optimizer, without having to re-consult the method table. +This info is illegal on any statement that is not a call to a generic function. """ struct MethodMatchInfo <: CallInfo results::MethodLookupResult mt::MethodTable + atype fullmatch::Bool + edges::Vector{Union{Nothing,CodeInstance}} + function MethodMatchInfo( + results::MethodLookupResult, mt::MethodTable, @nospecialize(atype), fullmatch::Bool) + edges = fill!(Vector{Union{Nothing,CodeInstance}}(undef, length(results)), nothing) + return new(results, mt, atype, fullmatch, edges) + end +end +add_edges_impl(edges::Vector{Any}, info::MethodMatchInfo) = _add_edges_impl(edges, info) +function _add_edges_impl(edges::Vector{Any}, info::MethodMatchInfo, mi_edge::Bool=false) + if !fully_covering(info) + # add legacy-style missing backedge info also + exists = false + for i in 1:length(edges) + if edges[i] === info.mt && edges[i+1] == info.atype + exists = true + break + end + end + if !exists + push!(edges, info.mt, info.atype) + end + end + nmatches = length(info.results) + if nmatches == length(info.edges) == 1 + # try the optimized format for the representation, if possible and applicable + # if this doesn't succeed, the backedge will be less precise, + # but the forward edge will maintain the precision + edge = info.edges[1] + m = info.results[1] + if edge === nothing + mi = specialize_method(m) # don't allow `Method`-edge for this optimized format + edge = mi + else + mi = edge.def + end + if mi.specTypes === m.spec_types + add_one_edge!(edges, edge) + return nothing + end + end + # add check for whether this lookup already existed in the edges list + for i in 1:length(edges) + if edges[i] === nmatches && edges[i+1] == info.atype + # TODO: must also verify the CodeInstance match too + return nothing + end + end + push!(edges, nmatches, info.atype) + for i = 1:nmatches + edge = info.edges[i] + m = info.results[i] + if edge === nothing + edge = mi_edge ? specialize_method(m) : m.method + else + @assert edge.def.def === m.method + end + push!(edges, edge) + end + nothing +end +function add_one_edge!(edges::Vector{Any}, edge::MethodInstance) + for i in 1:length(edges) + edgeᵢ = edges[i] + edgeᵢ isa CodeInstance && (edgeᵢ = edgeᵢ.def) + edgeᵢ isa MethodInstance || continue + if edgeᵢ === edge && !(i > 1 && edges[i-1] isa Type) + return # found existing covered edge + end + end + push!(edges, edge) + nothing +end +function add_one_edge!(edges::Vector{Any}, edge::CodeInstance) + for i in 1:length(edges) + edgeᵢ_orig = edgeᵢ = edges[i] + edgeᵢ isa CodeInstance && (edgeᵢ = edgeᵢ.def) + edgeᵢ isa MethodInstance || continue + if edgeᵢ === edge.def && !(i > 1 && edges[i-1] isa Type) + if edgeᵢ_orig isa MethodInstance + # found edge we can upgrade + edges[i] = edge + return + elseif true # XXX compare `CodeInstance` identify? + return + end + end + end + push!(edges, edge) + nothing end nsplit_impl(info::MethodMatchInfo) = 1 getsplit_impl(info::MethodMatchInfo, idx::Int) = (@assert idx == 1; info.results) getresult_impl(::MethodMatchInfo, ::Int) = nothing -function add_uncovered_edges_impl(edges::Vector{Any}, info::MethodMatchInfo, @nospecialize(atype)) - fully_covering(info) || push!(edges, info.mt, atype) - nothing -end """ info::UnionSplitInfo <: CallInfo @@ -56,25 +143,13 @@ This info is illegal on any statement that is not a call to a generic function. struct UnionSplitInfo <: CallInfo split::Vector{MethodMatchInfo} end - -nmatches(info::MethodMatchInfo) = length(info.results) -function nmatches(info::UnionSplitInfo) - n = 0 - for mminfo in info.split - n += nmatches(mminfo) - end - return n -end +add_edges_impl(edges::Vector{Any}, info::UnionSplitInfo) = + _add_edges_impl(edges, info) +_add_edges_impl(edges::Vector{Any}, info::UnionSplitInfo, mi_edge::Bool=false) = + for split in info.split; _add_edges_impl(edges, split, mi_edge); end nsplit_impl(info::UnionSplitInfo) = length(info.split) getsplit_impl(info::UnionSplitInfo, idx::Int) = getsplit(info.split[idx], 1) getresult_impl(::UnionSplitInfo, ::Int) = nothing -function add_uncovered_edges_impl(edges::Vector{Any}, info::UnionSplitInfo, @nospecialize(atype)) - all(fully_covering, info.split) && return nothing - # add mt backedges with removing duplications - for mt in uncovered_method_tables(info) - push!(edges, mt, atype) - end -end abstract type ConstResult end @@ -83,15 +158,15 @@ struct ConstPropResult <: ConstResult end struct ConcreteResult <: ConstResult - mi::MethodInstance + edge::CodeInstance effects::Effects result - ConcreteResult(mi::MethodInstance, effects::Effects) = new(mi, effects) - ConcreteResult(mi::MethodInstance, effects::Effects, @nospecialize val) = new(mi, effects, val) + ConcreteResult(edge::CodeInstance, effects::Effects) = new(edge, effects) + ConcreteResult(edge::CodeInstance, effects::Effects, @nospecialize val) = new(edge, effects, val) end struct SemiConcreteResult <: ConstResult - mi::MethodInstance + edge::CodeInstance ir::IRCode effects::Effects spec_info::SpecInfo @@ -116,17 +191,15 @@ struct ConstCallInfo <: CallInfo call::Union{MethodMatchInfo,UnionSplitInfo} results::Vector{Union{Nothing,ConstResult}} end +add_edges_impl(edges::Vector{Any}, info::ConstCallInfo) = add_edges!(edges, info.call) nsplit_impl(info::ConstCallInfo) = nsplit(info.call) getsplit_impl(info::ConstCallInfo, idx::Int) = getsplit(info.call, idx) getresult_impl(info::ConstCallInfo, idx::Int) = info.results[idx] -add_uncovered_edges_impl(edges::Vector{Any}, info::ConstCallInfo, @nospecialize(atype)) = add_uncovered_edges!(edges, info.call, atype) """ info::MethodResultPure <: CallInfo -This struct represents a method result constant was proven to be -effect-free, including being no-throw (typically because the value was computed -by calling an `@pure` function). +This struct represents a method result constant was proven to be effect-free. """ struct MethodResultPure <: CallInfo info::CallInfo @@ -135,6 +208,7 @@ let instance = MethodResultPure(NoCallInfo()) global MethodResultPure MethodResultPure() = instance end +add_edges_impl(edges::Vector{Any}, info::MethodResultPure) = add_edges!(edges, info.info) """ ainfo::AbstractIterationInfo @@ -161,10 +235,19 @@ not an `_apply_iterate` call. """ struct ApplyCallInfo <: CallInfo # The info for the call itself - call::Any + call::CallInfo # AbstractIterationInfo for each argument, if applicable arginfo::Vector{MaybeAbstractIterationInfo} end +function add_edges_impl(edges::Vector{Any}, info::ApplyCallInfo) + add_edges!(edges, info.call) + for arg in info.arginfo + arg === nothing && continue + for edge in arg.each + add_edges!(edges, edge.info) + end + end +end """ info::UnionSplitApplyCallInfo <: CallInfo @@ -175,6 +258,8 @@ This info is illegal on any statement that is not an `_apply_iterate` call. struct UnionSplitApplyCallInfo <: CallInfo infos::Vector{ApplyCallInfo} end +add_edges_impl(edges::Vector{Any}, info::UnionSplitApplyCallInfo) = + for split in info.infos; add_edges!(edges, split); end """ info::InvokeCallInfo @@ -184,8 +269,56 @@ the method that has been processed. Optionally keeps `info.result::InferenceResult` that keeps constant information. """ struct InvokeCallInfo <: CallInfo + edge::Union{Nothing,CodeInstance} match::MethodMatch result::Union{Nothing,ConstResult} + atype # ::Type +end +add_edges_impl(edges::Vector{Any}, info::InvokeCallInfo) = + _add_edges_impl(edges, info) +function _add_edges_impl(edges::Vector{Any}, info::InvokeCallInfo, mi_edge::Bool=false) + edge = info.edge + if edge === nothing + edge = mi_edge ? specialize_method(info.match) : info.match.method + end + add_invoke_edge!(edges, info.atype, edge) + nothing +end +function add_invoke_edge!(edges::Vector{Any}, @nospecialize(atype), edge::Union{MethodInstance,Method}) + for i in 2:length(edges) + edgeᵢ = edges[i] + edgeᵢ isa CodeInstance && (edgeᵢ = edgeᵢ.def) + edgeᵢ isa MethodInstance || edgeᵢ isa Method || continue + if edgeᵢ === edge + edge_minus_1 = edges[i-1] + if edge_minus_1 isa Type && edge_minus_1 == atype + return # found existing covered edge + end + end + end + push!(edges, atype, edge) + nothing +end +function add_invoke_edge!(edges::Vector{Any}, @nospecialize(atype), edge::CodeInstance) + for i in 2:length(edges) + edgeᵢ_orig = edgeᵢ = edges[i] + edgeᵢ isa CodeInstance && (edgeᵢ = edgeᵢ.def) + if ((edgeᵢ isa MethodInstance && edgeᵢ === edge.def) || + (edgeᵢ isa Method && edgeᵢ === edge.def.def)) + edge_minus_1 = edges[i-1] + if edge_minus_1 isa Type && edge_minus_1 == atype + if edgeᵢ_orig isa MethodInstance || edgeᵢ_orig isa Method + # found edge we can upgrade + edges[i] = edge + return + elseif true # XXX compare `CodeInstance` identify? + return + end + end + end + end + push!(edges, atype, edge) + nothing end """ @@ -196,9 +329,17 @@ the method that has been processed. Optionally keeps `info.result::InferenceResult` that keeps constant information. """ struct OpaqueClosureCallInfo <: CallInfo + edge::Union{Nothing,CodeInstance} match::MethodMatch result::Union{Nothing,ConstResult} end +function add_edges_impl(edges::Vector{Any}, info::OpaqueClosureCallInfo) + edge = info.edge + if edge !== nothing + add_one_edge!(edges, edge) + end + nothing +end """ info::OpaqueClosureCreateInfo <: CallInfo @@ -215,6 +356,9 @@ struct OpaqueClosureCreateInfo <: CallInfo return new(unspec) end end +# merely creating the object implies edges for OC, unlike normal objects, +# since calling them doesn't normally have edges in contrast +add_edges_impl(edges::Vector{Any}, info::OpaqueClosureCreateInfo) = add_edges!(edges, info.unspec.info) # Stmt infos that are used by external consumers, but not by optimization. # These are not produced by default and must be explicitly opted into by @@ -230,6 +374,7 @@ was supposed to analyze. struct ReturnTypeCallInfo <: CallInfo info::CallInfo end +add_edges_impl(edges::Vector{Any}, info::ReturnTypeCallInfo) = add_edges!(edges, info.info) """ info::FinalizerInfo <: CallInfo @@ -241,6 +386,8 @@ struct FinalizerInfo <: CallInfo info::CallInfo # the callinfo for the finalizer call effects::Effects # the effects for the finalizer call end +# merely allocating a finalizer does not imply edges (unless it gets inlined later) +add_edges_impl(::Vector{Any}, ::FinalizerInfo) = nothing """ info::ModifyOpInfo <: CallInfo @@ -256,5 +403,12 @@ Represents a resolved call of one of: struct ModifyOpInfo <: CallInfo info::CallInfo # the callinfo for the `op(getval(), x)` call end +add_edges_impl(edges::Vector{Any}, info::ModifyOpInfo) = add_edges!(edges, info.info) + +struct VirtualMethodMatchInfo <: CallInfo + info::Union{MethodMatchInfo,UnionSplitInfo,InvokeCallInfo} +end +add_edges_impl(edges::Vector{Any}, info::VirtualMethodMatchInfo) = + _add_edges_impl(edges, info.info, #=mi_edge=#true) @specialize diff --git a/base/compiler/tfuncs.jl b/base/compiler/tfuncs.jl index 2f78348b79844..80e252dde3a02 100644 --- a/base/compiler/tfuncs.jl +++ b/base/compiler/tfuncs.jl @@ -1432,7 +1432,7 @@ end return Future{CallMeta}(callinfo, interp, sv) do callinfo, interp, sv TF = TF.contents RT = RT.contents - TF2 = tmeet(callinfo.rt, widenconst(TF)) + TF2 = tmeet(ipo_lattice(interp), callinfo.rt, widenconst(TF)) if TF2 === Bottom RT = Bottom elseif isconcretetype(RT) && has_nontrivial_extended_info(𝕃ᵢ, TF2) # isconcrete condition required to form a PartialStruct @@ -2959,7 +2959,7 @@ function return_type_tfunc(interp::AbstractInterpreter, argtypes::Vector{Any}, s if isa(sv, InferenceState) sv.restrict_abstract_call_sites = old_restrict end - info = verbose_stmt_info(interp) ? MethodResultPure(ReturnTypeCallInfo(call.info)) : MethodResultPure() + info = MethodResultPure(ReturnTypeCallInfo(call.info)) rt = widenslotwrapper(call.rt) if isa(rt, Const) # output was computed to be constant @@ -2989,11 +2989,12 @@ end # a simplified model of abstract_call_gf_by_type for applicable function abstract_applicable(interp::AbstractInterpreter, argtypes::Vector{Any}, sv::AbsIntState, max_methods::Int) - length(argtypes) < 2 && return Future(CallMeta(Bottom, Any, EFFECTS_THROWS, NoCallInfo())) - isvarargtype(argtypes[2]) && return Future(CallMeta(Bool, Any, EFFECTS_THROWS, NoCallInfo())) + length(argtypes) < 2 && return Future(CallMeta(Bottom, ArgumentError, EFFECTS_THROWS, NoCallInfo())) + isvarargtype(argtypes[2]) && return Future(CallMeta(Bool, ArgumentError, EFFECTS_THROWS, NoCallInfo())) argtypes = argtypes[2:end] atype = argtypes_to_type(argtypes) matches = find_method_matches(interp, argtypes, atype; max_methods) + info = NoCallInfo() if isa(matches, FailedMethodMatch) rt = Bool # too many matches to analyze else @@ -3009,17 +3010,10 @@ function abstract_applicable(interp::AbstractInterpreter, argtypes::Vector{Any}, rt = Const(true) # has applicable matches end if rt !== Bool - for i in 1:napplicable - match = applicable[i]::MethodMatch - edge = specialize_method(match) - add_backedge!(sv, edge) - end - # also need an edge to the method table in case something gets - # added that did not intersect with any existing method - add_uncovered_edges!(sv, matches, atype) + info = VirtualMethodMatchInfo(matches.info) end end - return Future(CallMeta(rt, Union{}, EFFECTS_TOTAL, NoCallInfo())) + return Future(CallMeta(rt, Union{}, EFFECTS_TOTAL, info)) end add_tfunc(applicable, 1, INT_INF, @nospecs((𝕃::AbstractLattice, f, args...)->Bool), 40) @@ -3053,13 +3047,14 @@ function _hasmethod_tfunc(interp::AbstractInterpreter, argtypes::Vector{Any}, sv update_valid_age!(sv, valid_worlds) if match === nothing rt = Const(false) - add_mt_backedge!(sv, mt, types) # this should actually be an invoke-type backedge + vresults = MethodLookupResult(Any[], valid_worlds, true) + vinfo = MethodMatchInfo(vresults, mt, types, false) # XXX: this should actually be an info with invoke-type edge else rt = Const(true) - edge = specialize_method(match)::MethodInstance - add_invoke_backedge!(sv, types, edge) + vinfo = InvokeCallInfo(nothing, match, nothing, types) end - return CallMeta(rt, Any, EFFECTS_TOTAL, NoCallInfo()) + info = VirtualMethodMatchInfo(vinfo) + return CallMeta(rt, Union{}, EFFECTS_TOTAL, info) end # N.B.: typename maps type equivalence classes to a single value diff --git a/base/compiler/typeinfer.jl b/base/compiler/typeinfer.jl index 2a3bbf3854302..11337d5a4d047 100644 --- a/base/compiler/typeinfer.jl +++ b/base/compiler/typeinfer.jl @@ -95,26 +95,34 @@ const __measure_typeinf__ = RefValue{Bool}(false) function finish!(interp::AbstractInterpreter, caller::InferenceState; can_discard_trees::Bool=may_discard_trees(interp)) result = caller.result - valid_worlds = result.valid_worlds - if last(valid_worlds) >= get_world_counter() - # if we aren't cached, we don't need this edge - # but our caller might, so let's just make it anyways - store_backedges(result, caller.stmt_edges[1]) - end opt = result.src if opt isa OptimizationState - result.src = opt = ir_to_codeinf!(opt) + result.src = ir_to_codeinf!(opt) end + #@assert last(result.valid_worlds) <= get_world_counter() || isempty(caller.edges) if isdefined(result, :ci) ci = result.ci + # if we aren't cached, we don't need this edge + # but our caller might, so let's just make it anyways + if last(result.valid_worlds) >= get_world_counter() + # TODO: this should probably come after all store_backedges (after optimizations) for the entire graph in finish_cycle + # since we should be requiring that all edges first get their backedges set, as a batch + result.valid_worlds = WorldRange(first(result.valid_worlds), typemax(UInt)) + end + if last(result.valid_worlds) == typemax(UInt) + # if we can record all of the backedges in the global reverse-cache, + # we can now widen our applicability in the global cache too + store_backedges(ci, caller.edges) + end inferred_result = nothing relocatability = 0x1 const_flag = is_result_constabi_eligible(result) if !can_discard_trees || (is_cached(caller) && !const_flag) - inferred_result = transform_result_for_cache(interp, result.linfo, result.valid_worlds, result, can_discard_trees) + inferred_result = transform_result_for_cache(interp, result) + # TODO: do we want to augment edges here with any :invoke targets that we got from inlining (such that we didn't have a direct edge to it already)? relocatability = 0x0 if inferred_result isa CodeInfo - edges = inferred_result.debuginfo + di = inferred_result.debuginfo uncompressed = inferred_result inferred_result = maybe_compress_codeinfo(interp, result.linfo, inferred_result, can_discard_trees) result.is_src_volatile |= uncompressed !== inferred_result @@ -129,14 +137,12 @@ function finish!(interp::AbstractInterpreter, caller::InferenceState; end end # n.b. relocatability = isa(inferred_result, String) && inferred_result[end] - if !@isdefined edges - edges = DebugInfo(result.linfo) + if !@isdefined di + di = DebugInfo(result.linfo) end - ccall(:jl_update_codeinst, Cvoid, (Any, Any, Int32, UInt, UInt, UInt32, Any, UInt8, Any), - ci, inferred_result, const_flag, - first(result.valid_worlds), last(result.valid_worlds), - encode_effects(result.ipo_effects), result.analysis_results, - relocatability, edges) + ccall(:jl_update_codeinst, Cvoid, (Any, Any, Int32, UInt, UInt, UInt32, Any, UInt8, Any, Any), + ci, inferred_result, const_flag, first(result.valid_worlds), last(result.valid_worlds), encode_effects(result.ipo_effects), + result.analysis_results, relocatability, di, Core.svec(caller.edges...)) engine_reject(interp, ci) end return nothing @@ -160,8 +166,8 @@ end function finish_cycle(::AbstractInterpreter, frames::Vector{AbsIntState}, cycleid::Int) cycle_valid_worlds = WorldRange() cycle_valid_effects = EFFECTS_TOTAL - for caller in cycleid:length(frames) - caller = frames[caller]::InferenceState + for frameid = cycleid:length(frames) + caller = frames[frameid]::InferenceState @assert caller.cycleid == cycleid # converge the world age range and effects for this cycle here: # all frames in the cycle should have the same bits of `valid_worlds` and `effects` @@ -170,20 +176,20 @@ function finish_cycle(::AbstractInterpreter, frames::Vector{AbsIntState}, cyclei cycle_valid_worlds = intersect(cycle_valid_worlds, caller.valid_worlds) cycle_valid_effects = merge_effects(cycle_valid_effects, caller.ipo_effects) end - for caller in cycleid:length(frames) - caller = frames[caller]::InferenceState + for frameid = cycleid:length(frames) + caller = frames[frameid]::InferenceState adjust_cycle_frame!(caller, cycle_valid_worlds, cycle_valid_effects) finishinfer!(caller, caller.interp) end - for caller in cycleid:length(frames) - caller = frames[caller]::InferenceState + for frameid = cycleid:length(frames) + caller = frames[frameid]::InferenceState opt = caller.result.src if opt isa OptimizationState # implies `may_optimize(caller.interp) === true` optimize(caller.interp, opt, caller.result) end end - for caller in cycleid:length(frames) - caller = frames[caller]::InferenceState + for frameid = cycleid:length(frames) + caller = frames[frameid]::InferenceState finish!(caller.interp, caller) end resize!(frames, cycleid - 1) @@ -210,12 +216,7 @@ function is_result_constabi_eligible(result::InferenceResult) return isa(result_type, Const) && is_foldable_nothrow(result.ipo_effects) && is_inlineable_constant(result_type.val) end - -function transform_result_for_cache(interp::AbstractInterpreter, - ::MethodInstance, valid_worlds::WorldRange, result::InferenceResult, - can_discard_trees::Bool=may_discard_trees(interp)) - return result.src -end +transform_result_for_cache(::AbstractInterpreter, result::InferenceResult) = result.src function maybe_compress_codeinfo(interp::AbstractInterpreter, mi::MethodInstance, ci::CodeInfo, can_discard_trees::Bool=may_discard_trees(interp)) @@ -239,29 +240,20 @@ function maybe_compress_codeinfo(interp::AbstractInterpreter, mi::MethodInstance end end -function cache_result!(interp::AbstractInterpreter, result::InferenceResult) - if last(result.valid_worlds) == get_world_counter() - # if we've successfully recorded all of the backedges in the global reverse-cache, - # we can now widen our applicability in the global cache too - result.valid_worlds = WorldRange(first(result.valid_worlds), typemax(UInt)) - end - @assert isdefined(result.ci, :inferred) +function cache_result!(interp::AbstractInterpreter, result::InferenceResult, ci::CodeInstance) + @assert isdefined(ci, :inferred) # check if the existing linfo metadata is also sufficient to describe the current inference result # to decide if it is worth caching this right now mi = result.linfo - cache_results = true cache = WorldView(code_cache(interp), result.valid_worlds) - if cache_results && haskey(cache, mi) + if haskey(cache, mi) ci = cache[mi] # n.b.: accurate edge representation might cause the CodeInstance for this to be constructed later @assert isdefined(ci, :inferred) - cache_results = false + return false end - - if cache_results - code_cache(interp)[mi] = result.ci - end - return cache_results + code_cache(interp)[mi] = ci + return true end function cycle_fix_limited(@nospecialize(typ), sv::InferenceState) @@ -387,21 +379,13 @@ function refine_exception_type(@nospecialize(exc_bestguess), ipo_effects::Effect return exc_bestguess end +const empty_edges = Core.svec() + # inference completed on `me` # update the MethodInstance function finishinfer!(me::InferenceState, interp::AbstractInterpreter) # prepare to run optimization passes on fulltree @assert isempty(me.ip) - s_edges = get_stmt_edges!(me, 1) - for i = 2:length(me.stmt_edges) - isassigned(me.stmt_edges, i) || continue - edges = me.stmt_edges[i] - append!(s_edges, edges) - empty!(edges) - end - if me.src.edges !== nothing - append!(s_edges, me.src.edges::Vector) - end # inspect whether our inference had a limited result accuracy, # else it may be suitable to cache bestguess = me.bestguess = cycle_fix_limited(me.bestguess, me) @@ -426,6 +410,8 @@ function finishinfer!(me::InferenceState, interp::AbstractInterpreter) me.src.rettype = widenconst(ignorelimited(bestguess)) me.src.min_world = first(me.valid_worlds) me.src.max_world = last(me.valid_worlds) + istoplevel = !(me.linfo.def isa Method) + istoplevel || compute_edges!(me) # don't add backedges to toplevel method instance if limited_ret # a parent may be cached still, but not this intermediate work: @@ -481,17 +467,19 @@ function finishinfer!(me::InferenceState, interp::AbstractInterpreter) const_flags = 0x2 else rettype_const = nothing - const_flags = 0x00 + const_flags = 0x0 end relocatability = 0x0 - edges = nothing - ccall(:jl_fill_codeinst, Cvoid, (Any, Any, Any, Any, Int32, UInt, UInt, UInt32, Any, Any), - result.ci, widenconst(result_type), widenconst(result.exc_result), rettype_const, const_flags, - first(result.valid_worlds), last(result.valid_worlds), - encode_effects(result.ipo_effects), result.analysis_results, edges) + di = nothing + edges = empty_edges # `edges` will be updated within `finish!` + ci = result.ci + ccall(:jl_fill_codeinst, Cvoid, (Any, Any, Any, Any, Int32, UInt, UInt, UInt32, Any, Any, Any), + ci, widenconst(result_type), widenconst(result.exc_result), rettype_const, const_flags, + first(result.valid_worlds), last(result.valid_worlds), + encode_effects(result.ipo_effects), result.analysis_results, di, edges) if is_cached(me) - cached_results = cache_result!(me.interp, me.result) - if !cached_results + cached_result = cache_result!(me.interp, result, ci) + if !cached_result me.cache_mode = CACHE_MODE_NULL end end @@ -500,19 +488,29 @@ function finishinfer!(me::InferenceState, interp::AbstractInterpreter) end # record the backedges -store_backedges(caller::InferenceResult, edges::Vector{Any}) = store_backedges(caller.linfo, edges) -function store_backedges(caller::MethodInstance, edges::Vector{Any}) - isa(caller.def, Method) || return nothing # don't add backedges to toplevel method instance +function store_backedges(caller::CodeInstance, edges::Vector{Any}) + isa(caller.def.def, Method) || return # don't add backedges to toplevel method instance for itr in BackedgeIterator(edges) callee = itr.caller if isa(callee, MethodInstance) ccall(:jl_method_instance_add_backedge, Cvoid, (Any, Any, Any), callee, itr.sig, caller) else - typeassert(callee, MethodTable) ccall(:jl_method_table_add_backedge, Cvoid, (Any, Any, Any), callee, itr.sig, caller) end end - return nothing + nothing +end + +function compute_edges!(sv::InferenceState) + edges = sv.edges + for i in 1:length(sv.stmt_info) + add_edges!(edges, sv.stmt_info[i]) + end + user_edges = sv.src.edges + if user_edges !== nothing && user_edges !== empty_edges + append!(edges, user_edges) + end + nothing end function record_slot_assign!(sv::InferenceState) @@ -617,7 +615,7 @@ function type_annotate!(interp::AbstractInterpreter, sv::InferenceState) return nothing end -function merge_call_chain!(interp::AbstractInterpreter, parent::InferenceState, child::InferenceState) +function merge_call_chain!(::AbstractInterpreter, parent::InferenceState, child::InferenceState) # add backedge of parent <- child # then add all backedges of parent <- parent.parent frames = parent.callstack::Vector{AbsIntState} @@ -630,14 +628,21 @@ function merge_call_chain!(interp::AbstractInterpreter, parent::InferenceState, parent = frame_parent(child)::InferenceState end # ensure that walking the callstack has the same cycleid (DAG) - for frame = reverse(ancestorid:length(frames)) - frame = frames[frame]::InferenceState + for frameid = reverse(ancestorid:length(frames)) + frame = frames[frameid]::InferenceState frame.cycleid == ancestorid && break @assert frame.cycleid > ancestorid frame.cycleid = ancestorid end end +function add_cycle_backedge!(caller::InferenceState, frame::InferenceState) + update_valid_age!(caller, frame.valid_worlds) + backedge = (caller, caller.currpc) + contains_is(frame.cycle_backedges, backedge) || push!(frame.cycle_backedges, backedge) + return frame +end + function is_same_frame(interp::AbstractInterpreter, mi::MethodInstance, frame::InferenceState) return mi === frame_instance(frame) && cache_owner(interp) === cache_owner(frame.interp) end @@ -661,8 +666,8 @@ function resolve_call_cycle!(interp::AbstractInterpreter, mi::MethodInstance, pa parent isa InferenceState || return false frames = parent.callstack::Vector{AbsIntState} uncached = false - for frame = reverse(1:length(frames)) - frame = frames[frame] + for frameid = reverse(1:length(frames)) + frame = frames[frameid] isa(frame, InferenceState) || break uncached |= !is_cached(frame) # ensure we never add an uncached frame to a cycle if is_same_frame(interp, mi, frame) @@ -682,31 +687,20 @@ end ipo_effects(code::CodeInstance) = decode_effects(code.ipo_purity_bits) -struct EdgeCallResult - rt - exct - edge::Union{Nothing,MethodInstance} - effects::Effects - volatile_inf_result::Union{Nothing,VolatileInferenceResult} - function EdgeCallResult(@nospecialize(rt), @nospecialize(exct), - edge::Union{Nothing,MethodInstance}, - effects::Effects, - volatile_inf_result::Union{Nothing,VolatileInferenceResult} = nothing) - return new(rt, exct, edge, effects, volatile_inf_result) - end -end - # return cached result of regular inference function return_cached_result(interp::AbstractInterpreter, method::Method, codeinst::CodeInstance, caller::AbsIntState, edgecycle::Bool, edgelimited::Bool) rt = cached_return_type(codeinst) + exct = codeinst.exctype effects = ipo_effects(codeinst) + edge = codeinst update_valid_age!(caller, WorldRange(min_world(codeinst), max_world(codeinst))) - return Future(EdgeCall_to_MethodCall_Result(interp, caller, method, EdgeCallResult(rt, codeinst.exctype, codeinst.def, effects), edgecycle, edgelimited)) + return Future(MethodCallResult(interp, caller, method, rt, exct, effects, edge, edgecycle, edgelimited)) end -function EdgeCall_to_MethodCall_Result(interp::AbstractInterpreter, sv::AbsIntState, method::Method, result::EdgeCallResult, edgecycle::Bool, edgelimited::Bool) - (; rt, exct, edge, effects, volatile_inf_result) = result - +function MethodCallResult(::AbstractInterpreter, sv::AbsIntState, method::Method, + @nospecialize(rt), @nospecialize(exct), effects::Effects, + edge::Union{Nothing,CodeInstance}, edgecycle::Bool, edgelimited::Bool, + volatile_inf_result::Union{Nothing,VolatileInferenceResult}=nothing) if edge === nothing edgecycle = edgelimited = true end @@ -729,14 +723,34 @@ function EdgeCall_to_MethodCall_Result(interp::AbstractInterpreter, sv::AbsIntSt end end - return MethodCallResult(rt, exct, edgecycle, edgelimited, edge, effects, volatile_inf_result) + return MethodCallResult(rt, exct, effects, edge, edgecycle, edgelimited, volatile_inf_result) +end + +# allocate a dummy `edge::CodeInstance` to be added by `add_edges!` +function codeinst_as_edge(interp::AbstractInterpreter, sv::InferenceState) + mi = sv.linfo + owner = cache_owner(interp) + min_world, max_world = first(sv.valid_worlds), last(sv.valid_worlds) + if max_world >= get_world_counter() + max_world = typemax(UInt) + end + ci = CodeInstance(mi, owner, Any, Any, nothing, nothing, zero(Int32), + min_world, max_world, zero(UInt32), nothing, zero(UInt8), nothing, Core.svec(sv.edges...)) + if max_world == typemax(UInt) + # if we can record all of the backedges in the global reverse-cache, + # we can now widen our applicability in the global cache too + # TODO: this should probably come after we decide this edge is even useful + store_backedges(ci, sv.edges) + end + return ci end # compute (and cache) an inferred AST and return the current best estimate of the result type function typeinf_edge(interp::AbstractInterpreter, method::Method, @nospecialize(atype), sparams::SimpleVector, caller::AbsIntState, edgecycle::Bool, edgelimited::Bool) - mi = specialize_method(method, atype, sparams)::MethodInstance + mi = specialize_method(method, atype, sparams) cache_mode = CACHE_MODE_GLOBAL # cache edge targets globally by default force_inline = is_stmt_inline(get_curr_ssaflag(caller)) + edge_ci = nothing let codeinst = get(code_cache(interp), mi, nothing) if codeinst isa CodeInstance # return existing rettype if the code is already inferred inferred = @atomic :monotonic codeinst.inferred @@ -745,6 +759,7 @@ function typeinf_edge(interp::AbstractInterpreter, method::Method, @nospecialize # nevertheless we re-infer it here again in order to propagate the re-inferred # source to the inliner as a volatile result cache_mode = CACHE_MODE_VOLATILE + edge_ci = codeinst else @assert codeinst.def === mi "MethodInstance for cached edge does not match" return return_cached_result(interp, method, codeinst, caller, edgecycle, edgelimited) @@ -753,7 +768,7 @@ function typeinf_edge(interp::AbstractInterpreter, method::Method, @nospecialize end if ccall(:jl_get_module_infer, Cint, (Any,), method.module) == 0 && !generating_output(#=incremental=#false) add_remark!(interp, caller, "[typeinf_edge] Inference is disabled for the target module") - return Future(EdgeCall_to_MethodCall_Result(interp, caller, method, EdgeCallResult(Any, Any, nothing, Effects()), edgecycle, edgelimited)) + return Future(MethodCallResult(interp, caller, method, Any, Any, Effects(), nothing, edgecycle, edgelimited)) end if !is_cached(caller) && frame_parent(caller) === nothing # this caller exists to return to the user @@ -765,32 +780,36 @@ function typeinf_edge(interp::AbstractInterpreter, method::Method, @nospecialize if frame === false # completely new, but check again after reserving in the engine if cache_mode == CACHE_MODE_GLOBAL - ci = engine_reserve(interp, mi) - let codeinst = get(code_cache(interp), mi, nothing) - if codeinst isa CodeInstance # return existing rettype if the code is already inferred - engine_reject(interp, ci) - inferred = @atomic :monotonic codeinst.inferred - if inferred === nothing && force_inline - cache_mode = CACHE_MODE_VOLATILE - else - @assert codeinst.def === mi "MethodInstance for cached edge does not match" - return return_cached_result(interp, method, codeinst, caller, edgecycle, edgelimited) - end + ci_from_engine = engine_reserve(interp, mi) + edge_ci = ci_from_engine + codeinst = get(code_cache(interp), mi, nothing) + if codeinst isa CodeInstance # return existing rettype if the code is already inferred + engine_reject(interp, ci_from_engine) + ci_from_engine = nothing + inferred = @atomic :monotonic codeinst.inferred + if inferred === nothing && force_inline + cache_mode = CACHE_MODE_VOLATILE + edge_ci = codeinst + else + @assert codeinst.def === mi "MethodInstance for cached edge does not match" + return return_cached_result(interp, method, codeinst, caller, edgecycle, edgelimited) end end + else + ci_from_engine = nothing end result = InferenceResult(mi, typeinf_lattice(interp)) - if cache_mode == CACHE_MODE_GLOBAL - result.ci = ci + if ci_from_engine !== nothing + result.ci = ci_from_engine end frame = InferenceState(result, cache_mode, interp) # always use the cache for edge targets if frame === nothing add_remark!(interp, caller, "[typeinf_edge] Failed to retrieve source") # can't get the source for this, so we know nothing - if cache_mode == CACHE_MODE_GLOBAL - engine_reject(interp, ci) + if ci_from_engine !== nothing + engine_reject(interp, ci_from_engine) end - return Future(EdgeCall_to_MethodCall_Result(interp, caller, method, EdgeCallResult(Any, Any, nothing, Effects()), edgecycle, edgelimited)) + return Future(MethodCallResult(interp, caller, method, Any, Any, Effects(), nothing, edgecycle, edgelimited)) end assign_parentchild!(frame, caller) # the actual inference task for this edge is going to be scheduled within `typeinf_local` via the callstack queue @@ -799,15 +818,19 @@ function typeinf_edge(interp::AbstractInterpreter, method::Method, @nospecialize push!(caller.tasks, function get_infer_result(interp, caller) update_valid_age!(caller, frame.valid_worlds) local isinferred = is_inferred(frame) - local edge = isinferred ? mi : nothing + local edge = isinferred ? edge_ci : nothing local effects = isinferred ? frame.result.ipo_effects : # effects are adjusted already within `finish` for ipo_effects adjust_effects(effects_for_cycle(frame.ipo_effects), method) + local bestguess = frame.bestguess local exc_bestguess = refine_exception_type(frame.exc_bestguess, effects) # propagate newly inferred source to the inliner, allowing efficient inlining w/o deserialization: # note that this result is cached globally exclusively, so we can use this local result destructively - local volatile_inf_result = isinferred ? VolatileInferenceResult(result) : nothing - local edgeresult = EdgeCallResult(frame.bestguess, exc_bestguess, edge, effects, volatile_inf_result) - mresult[] = EdgeCall_to_MethodCall_Result(interp, caller, method, edgeresult, edgecycle, edgelimited) + local volatile_inf_result = if isinferred && edge_ci isa CodeInstance + result.ci_as_edge = edge_ci # set the edge for the inliner usage + VolatileInferenceResult(result) + end + mresult[] = MethodCallResult(interp, caller, method, bestguess, exc_bestguess, effects, + edge, edgecycle, edgelimited, volatile_inf_result) return true end) return mresult @@ -815,15 +838,15 @@ function typeinf_edge(interp::AbstractInterpreter, method::Method, @nospecialize elseif frame === true # unresolvable cycle add_remark!(interp, caller, "[typeinf_edge] Unresolvable cycle") - return Future(EdgeCall_to_MethodCall_Result(interp, caller, method, EdgeCallResult(Any, Any, nothing, Effects()), edgecycle, edgelimited)) + return Future(MethodCallResult(interp, caller, method, Any, Any, Effects(), nothing, edgecycle, edgelimited)) end # return the current knowledge about this cycle frame = frame::InferenceState update_valid_age!(caller, frame.valid_worlds) effects = adjust_effects(effects_for_cycle(frame.ipo_effects), method) + bestguess = frame.bestguess exc_bestguess = refine_exception_type(frame.exc_bestguess, effects) - edgeresult = EdgeCallResult(frame.bestguess, exc_bestguess, nothing, effects) - return Future(EdgeCall_to_MethodCall_Result(interp, caller, method, edgeresult, edgecycle, edgelimited)) + return Future(MethodCallResult(interp, caller, method, bestguess, exc_bestguess, effects, nothing, edgecycle, edgelimited)) end # The `:terminates` effect bit must be conservatively tainted unless recursion cycle has @@ -871,6 +894,7 @@ function codeinfo_for_const(interp::AbstractInterpreter, mi::MethodInstance, @no tree.debuginfo = DebugInfo(mi) tree.ssaflags = UInt32[0] tree.rettype = Core.Typeof(val) + tree.edges = Core.svec() set_inlineable!(tree, true) tree.parent = mi return tree @@ -888,7 +912,7 @@ function codeinstance_for_const_with_code(interp::AbstractInterpreter, code::Cod return CodeInstance(code.def, cache_owner(interp), code.rettype, code.exctype, code.rettype_const, src, Int32(0x3), code.min_world, code.max_world, code.ipo_purity_bits, code.analysis_results, - code.relocatability, src.debuginfo) + code.relocatability, src.debuginfo, src.edges) end result_is_constabi(interp::AbstractInterpreter, result::InferenceResult) = @@ -951,13 +975,13 @@ function typeinf_frame(interp::AbstractInterpreter, mi::MethodInstance, run_opti if run_optimizer if result_is_constabi(interp, frame.result) rt = frame.result.result::Const - opt = codeinfo_for_const(interp, frame.linfo, rt.val) + src = codeinfo_for_const(interp, frame.linfo, rt.val) else opt = OptimizationState(frame, interp) optimize(interp, opt, frame.result) - opt = ir_to_codeinf!(opt) + src = ir_to_codeinf!(opt) end - result.src = frame.src = opt + result.src = frame.src = src end return frame end @@ -1050,7 +1074,7 @@ function typeinf_ext(interp::AbstractInterpreter, mi::MethodInstance, source_mod src isa CodeInfo || return nothing return CodeInstance(mi, cache_owner(interp), Any, Any, nothing, src, Int32(0), get_inference_world(interp), get_inference_world(interp), - UInt32(0), nothing, UInt8(0), src.debuginfo) + UInt32(0), nothing, UInt8(0), src.debuginfo, src.edges) end end ci = engine_reserve(interp, mi) diff --git a/base/compiler/types.jl b/base/compiler/types.jl index b6c976da48f67..8899e7673d753 100644 --- a/base/compiler/types.jl +++ b/base/compiler/types.jl @@ -91,21 +91,31 @@ There are two constructor available: for constant inference, with extended lattice information included in `result.argtypes`. """ mutable struct InferenceResult + #=== constant fields ===# const linfo::MethodInstance const argtypes::Vector{Any} const overridden_by_const::Union{Nothing,BitVector} - result # extended lattice element if inferred, nothing otherwise - exc_result # like `result`, but for the thrown value - src # ::Union{CodeInfo, IRCode, OptimizationState} if inferred copy is available, nothing otherwise - valid_worlds::WorldRange # if inference and optimization is finished - ipo_effects::Effects # if inference is finished - effects::Effects # if optimization is finished + + #=== mutable fields ===# + result # extended lattice element if inferred, nothing otherwise + exc_result # like `result`, but for the thrown value + src # ::Union{CodeInfo, IRCode, OptimizationState} if inferred copy is available, nothing otherwise + valid_worlds::WorldRange # if inference and optimization is finished + ipo_effects::Effects # if inference is finished + effects::Effects # if optimization is finished analysis_results::AnalysisResults # AnalysisResults with e.g. result::ArgEscapeCache if optimized, otherwise NULL_ANALYSIS_RESULTS - is_src_volatile::Bool # `src` has been cached globally as the compressed format already, allowing `src` to be used destructively - ci::CodeInstance # CodeInstance if this result may be added to the cache + is_src_volatile::Bool # `src` has been cached globally as the compressed format already, allowing `src` to be used destructively + + #=== uninitialized fields ===# + ci::CodeInstance # CodeInstance if this result may be added to the cache + ci_as_edge::CodeInstance # CodeInstance as the edge representing locally cached result function InferenceResult(mi::MethodInstance, argtypes::Vector{Any}, overridden_by_const::Union{Nothing,BitVector}) - return new(mi, argtypes, overridden_by_const, nothing, nothing, nothing, - WorldRange(), Effects(), Effects(), NULL_ANALYSIS_RESULTS, false) + result = exc_result = src = nothing + valid_worlds = WorldRange() + ipo_effects = effects = Effects() + analysis_results = NULL_ANALYSIS_RESULTS + return new(mi, argtypes, overridden_by_const, result, exc_result, src, + valid_worlds, ipo_effects, effects, analysis_results, #=is_src_volatile=#false) end end function InferenceResult(mi::MethodInstance, 𝕃::AbstractLattice=fallback_lattice) @@ -399,7 +409,6 @@ engine_reserve(mi::MethodInstance, @nospecialize owner) = ccall(:jl_engine_reser # engine_fulfill(::AbstractInterpreter, ci::CodeInstance, src::CodeInfo) = ccall(:jl_engine_fulfill, Cvoid, (Any, Any), ci, src) # currently the same as engine_reject, so just use that one engine_reject(::AbstractInterpreter, ci::CodeInstance) = ccall(:jl_engine_fulfill, Cvoid, (Any, Ptr{Cvoid}), ci, C_NULL) - function already_inferred_quick_test end function lock_mi_inference end function unlock_mi_inference end @@ -416,7 +425,6 @@ function add_remark! end may_optimize(::AbstractInterpreter) = true may_compress(::AbstractInterpreter) = true may_discard_trees(::AbstractInterpreter) = true -verbose_stmt_info(::AbstractInterpreter) = false """ method_table(interp::AbstractInterpreter) -> MethodTableView @@ -463,18 +471,23 @@ abstract type CallInfo end @nospecialize +function add_edges!(edges::Vector{Any}, info::CallInfo) + if info === NoCallInfo() + return nothing # just a minor optimization to avoid dynamic dispatch + end + add_edges_impl(edges, info) + nothing +end nsplit(info::CallInfo) = nsplit_impl(info)::Union{Nothing,Int} getsplit(info::CallInfo, idx::Int) = getsplit_impl(info, idx)::MethodLookupResult -add_uncovered_edges!(edges::Vector{Any}, info::CallInfo, @nospecialize(atype)) = add_uncovered_edges_impl(edges, info, atype) - -getresult(info::CallInfo, idx::Int) = getresult_impl(info, idx) +getresult(info::CallInfo, idx::Int) = getresult_impl(info, idx)#=::Union{Nothing,ConstResult}=# -# must implement `nsplit`, `getsplit`, and `add_uncovered_edges!` to opt in to inlining +add_edges_impl(::Vector{Any}, ::CallInfo) = error(""" + All `CallInfo` is required to implement `add_edges_impl(::Vector{Any}, ::CallInfo)`""") nsplit_impl(::CallInfo) = nothing -getsplit_impl(::CallInfo, ::Int) = error("unexpected call into `getsplit`") -add_uncovered_edges_impl(::Vector{Any}, ::CallInfo, _) = error("unexpected call into `add_uncovered_edges!`") - -# must implement `getresult` to opt in to extended lattice return information +getsplit_impl(::CallInfo, ::Int) = error(""" + A `info::CallInfo` that implements `nsplit_impl(info::CallInfo) -> Int` must implement `getsplit_impl(info::CallInfo, idx::Int) -> MethodLookupResult` + in order to correctly opt in to inlining""") getresult_impl(::CallInfo, ::Int) = nothing @specialize diff --git a/base/compiler/utilities.jl b/base/compiler/utilities.jl index f9202788b6360..5361ff26f997c 100644 --- a/base/compiler/utilities.jl +++ b/base/compiler/utilities.jl @@ -252,8 +252,6 @@ struct BackedgeIterator backedges::Vector{Any} end -const empty_backedge_iter = BackedgeIterator(Any[]) - struct BackedgePair sig # ::Union{Nothing,Type} caller::Union{MethodInstance,MethodTable} @@ -262,11 +260,36 @@ end function iterate(iter::BackedgeIterator, i::Int=1) backedges = iter.backedges - i > length(backedges) && return nothing - item = backedges[i] - isa(item, MethodInstance) && return BackedgePair(nothing, item), i+1 # regular dispatch - isa(item, MethodTable) && return BackedgePair(backedges[i+1], item), i+2 # abstract dispatch - return BackedgePair(item, backedges[i+1]::MethodInstance), i+2 # `invoke` calls + while true + i > length(backedges) && return nothing + item = backedges[i] + if item isa Int + i += 2 + continue # ignore the query information if present + elseif isa(item, Method) + # ignore `Method`-edges (from e.g. failed `abstract_call_method`) + i += 1 + continue + end + if isa(item, CodeInstance) + item = item.def + end + if isa(item, MethodInstance) # regular dispatch + return BackedgePair(nothing, item), i+1 + elseif isa(item, MethodTable) # abstract dispatch (legacy style edges) + return BackedgePair(backedges[i+1], item), i+2 + else # `invoke` call + callee = backedges[i+1] + if isa(callee, Method) + i += 2 + continue + end + if isa(callee, CodeInstance) + callee = callee.def + end + return BackedgePair(item, callee::MethodInstance), i+2 + end + end end ######### diff --git a/base/expr.jl b/base/expr.jl index e281d9b677297..f57331ef02e74 100644 --- a/base/expr.jl +++ b/base/expr.jl @@ -79,7 +79,7 @@ function copy(c::CodeInfo) cnew.slottypes = copy(cnew.slottypes::Vector{Any}) end cnew.ssaflags = copy(cnew.ssaflags) - cnew.edges = cnew.edges === nothing ? nothing : copy(cnew.edges::Vector) + cnew.edges = cnew.edges === nothing || cnew.edges isa Core.SimpleVector ? cnew.edges : copy(cnew.edges::Vector) ssavaluetypes = cnew.ssavaluetypes ssavaluetypes isa Vector{Any} && (cnew.ssavaluetypes = copy(ssavaluetypes)) return cnew diff --git a/base/show.jl b/base/show.jl index dbdd85f0608da..627982b2bcb1a 100644 --- a/base/show.jl +++ b/base/show.jl @@ -1350,7 +1350,11 @@ function sourceinfo_slotnames(slotnames::Vector{Symbol}) return printnames end -show(io::IO, l::Core.MethodInstance) = show_mi(io, l) +show(io::IO, mi::Core.MethodInstance) = show_mi(io, mi) +function show(io::IO, codeinst::Core.CodeInstance) + print(io, "CodeInstance for ") + show_mi(io, codeinst.def) +end function show_mi(io::IO, mi::Core.MethodInstance, from_stackframe::Bool=false) def = mi.def diff --git a/src/common_symbols1.inc b/src/common_symbols1.inc index f54be52729a4f..3dfcf17a07b5c 100644 --- a/src/common_symbols1.inc +++ b/src/common_symbols1.inc @@ -88,5 +88,3 @@ jl_symbol("ifelse"), jl_symbol("Array"), jl_symbol("eq_int"), jl_symbol("throw_inexacterror"), -jl_symbol("|"), -jl_symbol("setproperty!"), diff --git a/src/common_symbols2.inc b/src/common_symbols2.inc index ee2a0e2edd9fe..2a6990bac52ff 100644 --- a/src/common_symbols2.inc +++ b/src/common_symbols2.inc @@ -1,3 +1,5 @@ +jl_symbol("|"), +jl_symbol("setproperty!"), jl_symbol("sext_int"), jl_symbol("String"), jl_symbol("Int"), @@ -244,5 +246,3 @@ jl_symbol("invokelatest"), jl_symbol("jl_array_del_end"), jl_symbol("_mod64"), jl_symbol("parameters"), -jl_symbol("monotonic"), -jl_symbol("regex.jl"), diff --git a/src/gf.c b/src/gf.c index 285942cd157c5..18141410fa625 100644 --- a/src/gf.c +++ b/src/gf.c @@ -322,7 +322,7 @@ jl_datatype_t *jl_mk_builtin_func(jl_datatype_t *dt, const char *name, jl_fptr_a jl_code_instance_t *codeinst = jl_new_codeinst(mi, jl_nothing, (jl_value_t*)jl_any_type, (jl_value_t*)jl_any_type, jl_nothing, jl_nothing, - 0, 1, ~(size_t)0, 0, jl_nothing, 0, NULL); + 0, 1, ~(size_t)0, 0, jl_nothing, 0, NULL, NULL); jl_mi_cache_insert(mi, codeinst); jl_atomic_store_relaxed(&codeinst->specptr.fptr1, fptr); jl_atomic_store_relaxed(&codeinst->invoke, jl_fptr_args); @@ -480,7 +480,7 @@ JL_DLLEXPORT jl_value_t *jl_call_in_typeinf_world(jl_value_t **args, int nargs) JL_DLLEXPORT jl_code_instance_t *jl_get_method_inferred( jl_method_instance_t *mi JL_PROPAGATES_ROOT, jl_value_t *rettype, - size_t min_world, size_t max_world, jl_debuginfo_t *edges) + size_t min_world, size_t max_world, jl_debuginfo_t *di, jl_svec_t *edges) { jl_value_t *owner = jl_nothing; // TODO: owner should be arg jl_code_instance_t *codeinst = jl_atomic_load_relaxed(&mi->cache); @@ -489,27 +489,30 @@ JL_DLLEXPORT jl_code_instance_t *jl_get_method_inferred( jl_atomic_load_relaxed(&codeinst->max_world) == max_world && jl_egal(codeinst->owner, owner) && jl_egal(codeinst->rettype, rettype)) { - if (edges == NULL) + if (di == NULL) return codeinst; jl_debuginfo_t *debuginfo = jl_atomic_load_relaxed(&codeinst->debuginfo); - if (edges == debuginfo) - return codeinst; - if (debuginfo == NULL && jl_atomic_cmpswap_relaxed(&codeinst->debuginfo, &debuginfo, edges)) - return codeinst; - if (debuginfo && jl_egal((jl_value_t*)debuginfo, (jl_value_t*)edges)) + if (di != debuginfo) { + if (!(debuginfo == NULL && jl_atomic_cmpswap_relaxed(&codeinst->debuginfo, &debuginfo, di))) + if (!(debuginfo && jl_egal((jl_value_t*)debuginfo, (jl_value_t*)di))) + continue; + } + // TODO: this is implied by the matching worlds, since it is intrinsic, so do we really need to verify it? + jl_svec_t *e = jl_atomic_load_relaxed(&codeinst->edges); + if (e && jl_egal((jl_value_t*)e, (jl_value_t*)edges)) return codeinst; } codeinst = jl_atomic_load_relaxed(&codeinst->next); } codeinst = jl_new_codeinst( mi, owner, rettype, (jl_value_t*)jl_any_type, NULL, NULL, - 0, min_world, max_world, 0, jl_nothing, 0, edges); + 0, min_world, max_world, 0, jl_nothing, 0, di, edges); jl_mi_cache_insert(mi, codeinst); return codeinst; } JL_DLLEXPORT int jl_mi_cache_has_ci(jl_method_instance_t *mi, - jl_code_instance_t *ci) + jl_code_instance_t *ci) { jl_code_instance_t *codeinst = jl_atomic_load_relaxed(&mi->cache); while (codeinst) { @@ -527,14 +530,16 @@ JL_DLLEXPORT jl_code_instance_t *jl_new_codeinst( int32_t const_flags, size_t min_world, size_t max_world, uint32_t effects, jl_value_t *analysis_results, uint8_t relocatability, - jl_debuginfo_t *edges /* , int absolute_max*/) + jl_debuginfo_t *di, jl_svec_t *edges /*, int absolute_max*/) { - jl_task_t *ct = jl_current_task; assert(min_world <= max_world && "attempting to set invalid world constraints"); + //assert((!jl_is_method(mi->def.value) || max_world != ~(size_t)0 || min_world <= 1 || edges == NULL || jl_svec_len(edges) != 0) && "missing edges"); + jl_task_t *ct = jl_current_task; jl_code_instance_t *codeinst = (jl_code_instance_t*)jl_gc_alloc(ct->ptls, sizeof(jl_code_instance_t), jl_code_instance_type); codeinst->def = mi; codeinst->owner = owner; + jl_atomic_store_relaxed(&codeinst->edges, edges); jl_atomic_store_relaxed(&codeinst->min_world, min_world); jl_atomic_store_relaxed(&codeinst->max_world, max_world); codeinst->rettype = rettype; @@ -543,7 +548,7 @@ JL_DLLEXPORT jl_code_instance_t *jl_new_codeinst( if ((const_flags & 2) == 0) inferred_const = NULL; codeinst->rettype_const = inferred_const; - jl_atomic_store_relaxed(&codeinst->debuginfo, (jl_value_t*)edges == jl_nothing ? NULL : edges); + jl_atomic_store_relaxed(&codeinst->debuginfo, (jl_value_t*)di == jl_nothing ? NULL : di); jl_atomic_store_relaxed(&codeinst->specptr.fptr, NULL); jl_atomic_store_relaxed(&codeinst->invoke, NULL); if ((const_flags & 1) != 0) { @@ -563,13 +568,17 @@ JL_DLLEXPORT void jl_update_codeinst( jl_code_instance_t *codeinst, jl_value_t *inferred, int32_t const_flags, size_t min_world, size_t max_world, uint32_t effects, jl_value_t *analysis_results, - uint8_t relocatability, jl_debuginfo_t *edges /* , int absolute_max*/) + uint8_t relocatability, jl_debuginfo_t *di, jl_svec_t *edges /* , int absolute_max*/) { + assert(min_world <= max_world && "attempting to set invalid world constraints"); + //assert((!jl_is_method(codeinst->def->def.value) || max_world != ~(size_t)0 || min_world <= 1 || jl_svec_len(edges) != 0) && "missing edges"); codeinst->relocatability = relocatability; codeinst->analysis_results = analysis_results; jl_gc_wb(codeinst, analysis_results); jl_atomic_store_relaxed(&codeinst->ipo_purity_bits, effects); - jl_atomic_store_relaxed(&codeinst->debuginfo, edges); + jl_atomic_store_relaxed(&codeinst->debuginfo, di); + jl_gc_wb(codeinst, di); + jl_atomic_store_relaxed(&codeinst->edges, edges); jl_gc_wb(codeinst, edges); if ((const_flags & 1) != 0) { assert(codeinst->rettype_const); @@ -587,9 +596,10 @@ JL_DLLEXPORT void jl_fill_codeinst( jl_value_t *inferred_const, int32_t const_flags, size_t min_world, size_t max_world, uint32_t effects, jl_value_t *analysis_results, - jl_debuginfo_t *edges /* , int absolute_max*/) + jl_debuginfo_t *di, jl_svec_t *edges /* , int absolute_max*/) { assert(min_world <= max_world && "attempting to set invalid world constraints"); + //assert((!jl_is_method(codeinst->def->def.value) || max_world != ~(size_t)0 || min_world <= 1 || jl_svec_len(edges) != 0) && "missing edges"); codeinst->rettype = rettype; jl_gc_wb(codeinst, rettype); codeinst->exctype = exctype; @@ -598,8 +608,12 @@ JL_DLLEXPORT void jl_fill_codeinst( codeinst->rettype_const = inferred_const; jl_gc_wb(codeinst, inferred_const); } - jl_atomic_store_relaxed(&codeinst->debuginfo, (jl_value_t*)edges == jl_nothing ? NULL : edges); + jl_atomic_store_relaxed(&codeinst->edges, edges); jl_gc_wb(codeinst, edges); + if ((jl_value_t*)di != jl_nothing) { + jl_atomic_store_relaxed(&codeinst->debuginfo, di); + jl_gc_wb(codeinst, di); + } if ((const_flags & 1) != 0) { // TODO: may want to follow ordering restrictions here (see jitlayers.cpp) assert(const_flags & 2); @@ -616,7 +630,7 @@ JL_DLLEXPORT void jl_fill_codeinst( JL_DLLEXPORT jl_code_instance_t *jl_new_codeinst_uninit(jl_method_instance_t *mi, jl_value_t *owner) { - jl_code_instance_t *codeinst = jl_new_codeinst(mi, owner, NULL, NULL, NULL, NULL, 0, 0, 0, 0, NULL, 0, NULL); + jl_code_instance_t *codeinst = jl_new_codeinst(mi, owner, NULL, NULL, NULL, NULL, 0, 0, 0, 0, NULL, 0, NULL, NULL); jl_atomic_store_relaxed(&codeinst->min_world, 1); // make temporarily invalid before returning, so that jl_fill_codeinst is valid later return codeinst; } @@ -1745,35 +1759,29 @@ JL_DLLEXPORT jl_value_t *jl_debug_method_invalidation(int state) static void _invalidate_backedges(jl_method_instance_t *replaced_mi, size_t max_world, int depth); // recursively invalidate cached methods that had an edge to a replaced method -static void invalidate_method_instance(jl_method_instance_t *replaced, size_t max_world, int depth) +static void invalidate_code_instance(jl_code_instance_t *replaced, size_t max_world, int depth) { jl_timing_counter_inc(JL_TIMING_COUNTER_Invalidations, 1); if (_jl_debug_method_invalidation) { jl_value_t *boxeddepth = NULL; JL_GC_PUSH1(&boxeddepth); - jl_array_ptr_1d_push(_jl_debug_method_invalidation, (jl_value_t*)replaced); + jl_array_ptr_1d_push(_jl_debug_method_invalidation, (jl_value_t*)replaced->def); boxeddepth = jl_box_int32(depth); jl_array_ptr_1d_push(_jl_debug_method_invalidation, boxeddepth); JL_GC_POP(); } - //jl_static_show(JL_STDERR, (jl_value_t*)replaced); - if (!jl_is_method(replaced->def.method)) + //jl_static_show(JL_STDERR, (jl_value_t*)replaced->def); + if (!jl_is_method(replaced->def->def.method)) return; // shouldn't happen, but better to be safe - JL_LOCK(&replaced->def.method->writelock); - jl_code_instance_t *codeinst = jl_atomic_load_relaxed(&replaced->cache); - while (codeinst) { - if (jl_atomic_load_relaxed(&codeinst->max_world) == ~(size_t)0) { - assert(jl_atomic_load_relaxed(&codeinst->min_world) - 1 <= max_world && "attempting to set illogical world constraints (probable race condition)"); - jl_atomic_store_release(&codeinst->max_world, max_world); - } - assert(jl_atomic_load_relaxed(&codeinst->max_world) <= max_world); - codeinst = jl_atomic_load_relaxed(&codeinst->next); + JL_LOCK(&replaced->def->def.method->writelock); + if (jl_atomic_load_relaxed(&replaced->max_world) == ~(size_t)0) { + assert(jl_atomic_load_relaxed(&replaced->min_world) - 1 <= max_world && "attempting to set illogical world constraints (probable race condition)"); + jl_atomic_store_release(&replaced->max_world, max_world); } - JL_GC_PUSH1(&replaced); + assert(jl_atomic_load_relaxed(&replaced->max_world) <= max_world); // recurse to all backedges to update their valid range also - _invalidate_backedges(replaced, max_world, depth + 1); - JL_GC_POP(); - JL_UNLOCK(&replaced->def.method->writelock); + _invalidate_backedges(replaced->def, max_world, depth + 1); + JL_UNLOCK(&replaced->def->def.method->writelock); } static void _invalidate_backedges(jl_method_instance_t *replaced_mi, size_t max_world, int depth) { @@ -1783,10 +1791,11 @@ static void _invalidate_backedges(jl_method_instance_t *replaced_mi, size_t max_ replaced_mi->backedges = NULL; JL_GC_PUSH1(&backedges); size_t i = 0, l = jl_array_nrows(backedges); - jl_method_instance_t *replaced; + jl_code_instance_t *replaced; while (i < l) { i = get_next_edge(backedges, i, NULL, &replaced); - invalidate_method_instance(replaced, max_world, depth); + JL_GC_PROMISE_ROOTED(replaced); // propagated by get_next_edge from backedges + invalidate_code_instance(replaced, max_world, depth); } JL_GC_POP(); } @@ -1808,11 +1817,14 @@ static void invalidate_backedges(jl_method_instance_t *replaced_mi, size_t max_w } // add a backedge from callee to caller -JL_DLLEXPORT void jl_method_instance_add_backedge(jl_method_instance_t *callee, jl_value_t *invokesig, jl_method_instance_t *caller) +JL_DLLEXPORT void jl_method_instance_add_backedge(jl_method_instance_t *callee, jl_value_t *invokesig, jl_code_instance_t *caller) { - JL_LOCK(&callee->def.method->writelock); if (invokesig == jl_nothing) invokesig = NULL; // julia uses `nothing` but C uses NULL (#undef) + assert(jl_is_method_instance(callee)); + assert(jl_is_code_instance(caller)); + assert(invokesig == NULL || jl_is_type(invokesig)); + JL_LOCK(&callee->def.method->writelock); int found = 0; // TODO: use jl_cache_type_(invokesig) like cache_method does to save memory if (!callee->backedges) { @@ -1843,8 +1855,9 @@ JL_DLLEXPORT void jl_method_instance_add_backedge(jl_method_instance_t *callee, } // add a backedge from a non-existent signature to caller -JL_DLLEXPORT void jl_method_table_add_backedge(jl_methtable_t *mt, jl_value_t *typ, jl_value_t *caller) +JL_DLLEXPORT void jl_method_table_add_backedge(jl_methtable_t *mt, jl_value_t *typ, jl_code_instance_t *caller) { + assert(jl_is_code_instance(caller)); JL_LOCK(&mt->writelock); if (!mt->backedges) { // lazy-init the backedges array @@ -1857,7 +1870,7 @@ JL_DLLEXPORT void jl_method_table_add_backedge(jl_methtable_t *mt, jl_value_t *t // check if the edge is already present and avoid adding a duplicate size_t i, l = jl_array_nrows(mt->backedges); for (i = 1; i < l; i += 2) { - if (jl_array_ptr_ref(mt->backedges, i) == caller) { + if (jl_array_ptr_ref(mt->backedges, i) == (jl_value_t*)caller) { if (jl_types_equal(jl_array_ptr_ref(mt->backedges, i - 1), typ)) { JL_UNLOCK(&mt->writelock); return; @@ -1867,7 +1880,7 @@ JL_DLLEXPORT void jl_method_table_add_backedge(jl_methtable_t *mt, jl_value_t *t // reuse an already cached instance of this type, if possible // TODO: use jl_cache_type_(tt) like cache_method does, instead of this linear scan? for (i = 1; i < l; i += 2) { - if (jl_array_ptr_ref(mt->backedges, i) != caller) { + if (jl_array_ptr_ref(mt->backedges, i) != (jl_value_t*)caller) { if (jl_types_equal(jl_array_ptr_ref(mt->backedges, i - 1), typ)) { typ = jl_array_ptr_ref(mt->backedges, i - 1); break; @@ -1875,7 +1888,7 @@ JL_DLLEXPORT void jl_method_table_add_backedge(jl_methtable_t *mt, jl_value_t *t } } jl_array_ptr_1d_push(mt->backedges, typ); - jl_array_ptr_1d_push(mt->backedges, caller); + jl_array_ptr_1d_push(mt->backedges, (jl_value_t*)caller); } JL_UNLOCK(&mt->writelock); } @@ -2161,6 +2174,7 @@ void jl_method_table_activate(jl_methtable_t *mt, jl_typemap_entry_t *newentry) size_t ins = 0; for (i = 1; i < na; i += 2) { jl_value_t *backedgetyp = backedges[i - 1]; + JL_GC_PROMISE_ROOTED(backedgetyp); int missing = 0; if (jl_type_intersection2(backedgetyp, (jl_value_t*)type, &isect, &isect2)) { // See if the intersection was actually already fully @@ -2189,8 +2203,9 @@ void jl_method_table_activate(jl_methtable_t *mt, jl_typemap_entry_t *newentry) } } if (missing) { - jl_method_instance_t *backedge = (jl_method_instance_t*)backedges[i]; - invalidate_method_instance(backedge, max_world, 0); + jl_code_instance_t *backedge = (jl_code_instance_t*)backedges[i]; + JL_GC_PROMISE_ROOTED(backedge); + invalidate_code_instance(backedge, max_world, 0); invalidated = 1; if (_jl_debug_method_invalidation) jl_array_ptr_1d_push(_jl_debug_method_invalidation, (jl_value_t*)backedgetyp); @@ -2253,13 +2268,14 @@ void jl_method_table_activate(jl_methtable_t *mt, jl_typemap_entry_t *newentry) if (backedges) { size_t ib = 0, insb = 0, nb = jl_array_nrows(backedges); jl_value_t *invokeTypes; - jl_method_instance_t *caller; + jl_code_instance_t *caller; while (ib < nb) { ib = get_next_edge(backedges, ib, &invokeTypes, &caller); + JL_GC_PROMISE_ROOTED(caller); // propagated by get_next_edge from backedges int replaced_edge; if (invokeTypes) { // n.b. normally we must have mi.specTypes <: invokeTypes <: m.sig (though it might not strictly hold), so we only need to check the other subtypes - if (jl_egal(invokeTypes, caller->def.method->sig)) + if (jl_egal(invokeTypes, caller->def->def.method->sig)) replaced_edge = 0; // if invokeTypes == m.sig, then the only way to change this invoke is to replace the method itself else replaced_edge = jl_subtype(invokeTypes, type) && is_replacing(ambig, type, m, d, n, invokeTypes, NULL, morespec); @@ -2268,7 +2284,7 @@ void jl_method_table_activate(jl_methtable_t *mt, jl_typemap_entry_t *newentry) replaced_edge = replaced_dispatch; } if (replaced_edge) { - invalidate_method_instance(caller, max_world, 1); + invalidate_code_instance(caller, max_world, 1); invalidated = 1; } else { @@ -2685,8 +2701,10 @@ jl_code_instance_t *jl_compile_method_internal(jl_method_instance_t *mi, size_t jl_code_instance_t *codeinst2 = jl_compile_method_internal(mi2, world); jl_code_instance_t *codeinst = jl_get_method_inferred( mi, codeinst2->rettype, - jl_atomic_load_relaxed(&codeinst2->min_world), jl_atomic_load_relaxed(&codeinst2->max_world), - jl_atomic_load_relaxed(&codeinst2->debuginfo)); + jl_atomic_load_relaxed(&codeinst2->min_world), + jl_atomic_load_relaxed(&codeinst2->max_world), + jl_atomic_load_relaxed(&codeinst2->debuginfo), + jl_atomic_load_relaxed(&codeinst2->edges)); if (jl_atomic_load_relaxed(&codeinst->invoke) == NULL) { codeinst->rettype_const = codeinst2->rettype_const; jl_gc_wb(codeinst, codeinst->rettype_const); @@ -2743,7 +2761,7 @@ jl_code_instance_t *jl_compile_method_internal(jl_method_instance_t *mi, size_t if (unspec && (unspec_invoke = jl_atomic_load_acquire(&unspec->invoke))) { jl_code_instance_t *codeinst = jl_new_codeinst(mi, jl_nothing, (jl_value_t*)jl_any_type, (jl_value_t*)jl_any_type, NULL, NULL, - 0, 1, ~(size_t)0, 0, jl_nothing, 0, NULL); + 0, 1, ~(size_t)0, 0, jl_nothing, 0, NULL, NULL); codeinst->rettype_const = unspec->rettype_const; uint8_t specsigflags; jl_callptr_t invoke; @@ -2768,7 +2786,7 @@ jl_code_instance_t *jl_compile_method_internal(jl_method_instance_t *mi, size_t if (!jl_code_requires_compiler(src, 0)) { jl_code_instance_t *codeinst = jl_new_codeinst(mi, jl_nothing, (jl_value_t*)jl_any_type, (jl_value_t*)jl_any_type, NULL, NULL, - 0, 1, ~(size_t)0, 0, jl_nothing, 0, NULL); + 0, 1, ~(size_t)0, 0, jl_nothing, 0, NULL, NULL); jl_atomic_store_release(&codeinst->invoke, jl_fptr_interpret_call); jl_mi_cache_insert(mi, codeinst); record_precompile_statement(mi, 0, 0); @@ -2828,7 +2846,7 @@ jl_code_instance_t *jl_compile_method_internal(jl_method_instance_t *mi, size_t jl_method_instance_t *unspec = jl_get_unspecialized(def); if (unspec == NULL) unspec = mi; - jl_code_instance_t *ucache = jl_get_method_inferred(unspec, (jl_value_t*)jl_any_type, 1, ~(size_t)0, NULL); + jl_code_instance_t *ucache = jl_get_method_inferred(unspec, (jl_value_t*)jl_any_type, 1, ~(size_t)0, NULL, NULL); // ask codegen to make the fptr for unspec jl_callptr_t ucache_invoke = jl_atomic_load_acquire(&ucache->invoke); if (ucache_invoke == NULL) { @@ -2848,7 +2866,7 @@ jl_code_instance_t *jl_compile_method_internal(jl_method_instance_t *mi, size_t } codeinst = jl_new_codeinst(mi, jl_nothing, (jl_value_t*)jl_any_type, (jl_value_t*)jl_any_type, NULL, NULL, - 0, 1, ~(size_t)0, 0, jl_nothing, 0, NULL); + 0, 1, ~(size_t)0, 0, jl_nothing, 0, NULL, NULL); codeinst->rettype_const = ucache->rettype_const; uint8_t specsigflags; jl_callptr_t invoke; diff --git a/src/ircode.c b/src/ircode.c index 873d33d2d7523..bec8d46513eef 100644 --- a/src/ircode.c +++ b/src/ircode.c @@ -25,6 +25,7 @@ typedef struct { ios_t *s; // method we're compressing for jl_method_t *method; + jl_svec_t *edges; jl_ptls_t ptls; uint8_t relocatability; } jl_ircode_state; @@ -72,7 +73,7 @@ static void literal_val_id(rle_reference *rr, jl_ircode_state *s, jl_value_t *v) { jl_array_t *rs = s->method->roots; int i, l = jl_array_nrows(rs); - if (jl_is_symbol(v) || jl_is_concrete_type(v)) { + if (jl_is_symbol(v) || jl_is_concrete_type(v)) { // TODO: or more generally, any ptr-egal value for (i = 0; i < l; i++) { if (jl_array_ptr_ref(rs, i) == v) return tagged_root(rr, s, i); @@ -84,6 +85,12 @@ static void literal_val_id(rle_reference *rr, jl_ircode_state *s, jl_value_t *v) return tagged_root(rr, s, i); } } + for (size_t i = 0; i < jl_svec_len(s->edges); i++) { + if (jl_svecref(s->edges, i) == v) { + rr->index = i; + return; + } + } jl_add_method_root(s->method, jl_precompile_toplevel_module, v); return tagged_root(rr, s, jl_array_nrows(rs) - 1); } @@ -102,13 +109,24 @@ static void jl_encode_int32(jl_ircode_state *s, int32_t x) static void jl_encode_as_indexed_root(jl_ircode_state *s, jl_value_t *v) { - rle_reference rr; + rle_reference rr = {.key = -1, .index = -1}; if (jl_is_string(v)) v = jl_as_global_root(v, 1); literal_val_id(&rr, s, v); int id = rr.index; assert(id >= 0); + if (rr.key == -1) { + if (id <= UINT8_MAX) { + write_uint8(s->s, TAG_EDGE); + write_uint8(s->s, id); + } + else { + write_uint8(s->s, TAG_LONG_EDGE); + write_uint32(s->s, id); + } + return; + } if (rr.key) { write_uint8(s->s, TAG_RELOC_METHODROOT); write_uint64(s->s, rr.key); @@ -689,6 +707,10 @@ static jl_value_t *jl_decode_value(jl_ircode_state *s) JL_GC_DISABLED return lookup_root(s->method, 0, read_uint8(s->s)); case TAG_LONG_METHODROOT: return lookup_root(s->method, 0, read_uint32(s->s)); + case TAG_EDGE: + return jl_svecref(s->edges, read_uint8(s->s)); + case TAG_LONG_EDGE: + return jl_svecref(s->edges, read_uint32(s->s)); case TAG_SVEC: JL_FALLTHROUGH; case TAG_LONG_SVEC: return jl_decode_value_svec(s, tag); case TAG_COMMONSYM: @@ -865,9 +887,11 @@ JL_DLLEXPORT jl_string_t *jl_compress_ir(jl_method_t *m, jl_code_info_t *code) m->roots = jl_alloc_vec_any(0); jl_gc_wb(m, m->roots); } + jl_value_t *edges = code->edges; jl_ircode_state s = { &dest, m, + (!isdef && jl_is_svec(edges)) ? (jl_svec_t*)edges : jl_emptysvec, jl_current_task->ptls, 1 }; @@ -950,6 +974,7 @@ JL_DLLEXPORT jl_code_info_t *jl_uncompress_ir(jl_method_t *m, jl_code_instance_t jl_ircode_state s = { &src, m, + metadata == NULL ? NULL : jl_atomic_load_relaxed(&metadata->edges), jl_current_task->ptls, 1 }; @@ -1015,6 +1040,8 @@ JL_DLLEXPORT jl_code_info_t *jl_uncompress_ir(jl_method_t *m, jl_code_instance_t jl_gc_wb(code, code->rettype); code->min_world = jl_atomic_load_relaxed(&metadata->min_world); code->max_world = jl_atomic_load_relaxed(&metadata->max_world); + code->edges = (jl_value_t*)s.edges; + jl_gc_wb(code, s.edges); } return code; diff --git a/src/jltypes.c b/src/jltypes.c index 11f1d11a14edc..71eaa003d7d4a 100644 --- a/src/jltypes.c +++ b/src/jltypes.c @@ -3638,7 +3638,7 @@ void jl_init_types(void) JL_GC_DISABLED jl_code_instance_type = jl_new_datatype(jl_symbol("CodeInstance"), core, jl_any_type, jl_emptysvec, - jl_perm_symsvec(17, + jl_perm_symsvec(18, "def", "owner", "next", @@ -3648,13 +3648,14 @@ void jl_init_types(void) JL_GC_DISABLED "exctype", "rettype_const", "inferred", - "debuginfo", // TODO: rename to edges? + "debuginfo", + "edges", //"absolute_max", "ipo_purity_bits", "analysis_results", "specsigflags", "precompile", "relocatability", "invoke", "specptr"), // function object decls - jl_svec(17, + jl_svec(18, jl_method_instance_type, jl_any_type, jl_any_type, @@ -3665,6 +3666,7 @@ void jl_init_types(void) JL_GC_DISABLED jl_any_type, jl_any_type, jl_debuginfo_type, + jl_simplevector_type, //jl_bool_type, jl_uint32_type, jl_any_type, @@ -3675,8 +3677,8 @@ void jl_init_types(void) JL_GC_DISABLED jl_emptysvec, 0, 1, 1); jl_svecset(jl_code_instance_type->types, 2, jl_code_instance_type); - const static uint32_t code_instance_constfields[1] = { 0b00000100011100011 }; // Set fields 1, 2, 6-8, 12 as const - const static uint32_t code_instance_atomicfields[1] = { 0b11011011100011100 }; // Set fields 3-5, 9, 10, 11, 13-14, 16-17 as atomic + const static uint32_t code_instance_constfields[1] = { 0b000001000011100011 }; // Set fields 1, 2, 6-8, 13 as const + const static uint32_t code_instance_atomicfields[1] = { 0b110110111100011100 }; // Set fields 3-5, 9-12, 14-15, 17-18 as atomic // Fields 4-5 are only operated on by construction and deserialization, so are effectively const at runtime // Fields ipo_purity_bits and analysis_results are not currently threadsafe or reliable, as they get mutated after optimization, but are not declared atomic // and there is no way to tell (during inference) if their value is finalized yet (to wait for them to be narrowed if applicable) @@ -3826,8 +3828,8 @@ void jl_init_types(void) JL_GC_DISABLED jl_svecset(jl_method_type->types, 13, jl_method_instance_type); //jl_svecset(jl_debuginfo_type->types, 0, jl_method_instance_type); // union(jl_method_instance_type, jl_method_type, jl_symbol_type) jl_svecset(jl_method_instance_type->types, 4, jl_code_instance_type); - jl_svecset(jl_code_instance_type->types, 15, jl_voidpointer_type); jl_svecset(jl_code_instance_type->types, 16, jl_voidpointer_type); + jl_svecset(jl_code_instance_type->types, 17, jl_voidpointer_type); jl_svecset(jl_binding_type->types, 0, jl_globalref_type); jl_svecset(jl_binding_partition_type->types, 3, jl_binding_partition_type); diff --git a/src/julia.h b/src/julia.h index a710192d5756c..bfb641d38374b 100644 --- a/src/julia.h +++ b/src/julia.h @@ -408,7 +408,7 @@ struct _jl_method_instance_t { } def; // pointer back to the context for this code jl_value_t *specTypes; // argument types this was specialized for jl_svec_t *sparam_vals; // static parameter values, indexed by def.method->sig - jl_array_t *backedges; // list of method-instances which call this method-instance; `invoke` records (invokesig, caller) pairs + jl_array_t *backedges; // list of code-instances which call this method-instance; `invoke` records (invokesig, caller) pairs _Atomic(struct _jl_code_instance_t*) cache; uint8_t cache_with_orig; // !cache_with_specTypes @@ -453,6 +453,7 @@ typedef struct _jl_code_instance_t { // - null, indicating that inference was not yet completed or did not succeed _Atomic(jl_value_t *) inferred; _Atomic(jl_debuginfo_t *) debuginfo; // stored information about edges from this object (set once, with a happens-before both source and invoke) + _Atomic(jl_svec_t *) edges; // forward edge info //TODO: uint8_t absolute_max; // whether true max world is unknown // purity results @@ -789,7 +790,7 @@ typedef struct _jl_methtable_t { _Atomic(jl_typemap_t*) cache; _Atomic(intptr_t) max_args; // max # of non-vararg arguments in a signature jl_module_t *module; // sometimes used for debug printing - jl_array_t *backedges; // (sig, caller::MethodInstance) pairs + jl_array_t *backedges; // (sig, caller::CodeInstance) pairs jl_mutex_t writelock; uint8_t offs; // 0, or 1 to skip splitting typemap on first (function) argument uint8_t frozen; // whether this accepts adding new methods diff --git a/src/julia_internal.h b/src/julia_internal.h index ade5940f30687..9a8750bbc2500 100644 --- a/src/julia_internal.h +++ b/src/julia_internal.h @@ -675,7 +675,7 @@ JL_DLLEXPORT jl_code_info_t *jl_gdbcodetyped1(jl_method_instance_t *mi, size_t w JL_DLLEXPORT jl_code_instance_t *jl_compile_method_internal(jl_method_instance_t *meth JL_PROPAGATES_ROOT, size_t world); JL_DLLEXPORT jl_code_instance_t *jl_get_method_inferred( jl_method_instance_t *mi JL_PROPAGATES_ROOT, jl_value_t *rettype, - size_t min_world, size_t max_world, jl_debuginfo_t *edges); + size_t min_world, size_t max_world, jl_debuginfo_t *di, jl_svec_t *edges); JL_DLLEXPORT jl_method_instance_t *jl_get_unspecialized(jl_method_t *def JL_PROPAGATES_ROOT); JL_DLLEXPORT void jl_read_codeinst_invoke(jl_code_instance_t *ci, uint8_t *specsigflags, jl_callptr_t *invoke, void **specptr, int waitcompile) JL_NOTSAFEPOINT; JL_DLLEXPORT jl_method_instance_t *jl_method_match_to_mi(jl_method_match_t *match, size_t world, size_t min_valid, size_t max_valid, int mt_cache); @@ -687,7 +687,7 @@ JL_DLLEXPORT jl_code_instance_t *jl_new_codeinst( jl_value_t *inferred_const, jl_value_t *inferred, int32_t const_flags, size_t min_world, size_t max_world, uint32_t effects, jl_value_t *analysis_results, - uint8_t relocatability, jl_debuginfo_t *edges /* , int absolute_max*/); + uint8_t relocatability, jl_debuginfo_t *di, jl_svec_t *edges /* , int absolute_max*/); JL_DLLEXPORT const char *jl_debuginfo_file(jl_debuginfo_t *debuginfo) JL_NOTSAFEPOINT; JL_DLLEXPORT const char *jl_debuginfo_file1(jl_debuginfo_t *debuginfo) JL_NOTSAFEPOINT; @@ -705,9 +705,9 @@ JL_DLLEXPORT jl_code_info_t *jl_new_code_info_uninit(void); JL_DLLEXPORT void jl_resolve_globals_in_ir(jl_array_t *stmts, jl_module_t *m, jl_svec_t *sparam_vals, int binding_effects); -int get_next_edge(jl_array_t *list, int i, jl_value_t** invokesig, jl_method_instance_t **caller) JL_NOTSAFEPOINT; -int set_next_edge(jl_array_t *list, int i, jl_value_t *invokesig, jl_method_instance_t *caller); -void push_edge(jl_array_t *list, jl_value_t *invokesig, jl_method_instance_t *caller); +int get_next_edge(jl_array_t *list, int i, jl_value_t** invokesig, jl_code_instance_t **caller) JL_NOTSAFEPOINT; +int set_next_edge(jl_array_t *list, int i, jl_value_t *invokesig, jl_code_instance_t *caller); +void push_edge(jl_array_t *list, jl_value_t *invokesig, jl_code_instance_t *caller); JL_DLLEXPORT void jl_add_method_root(jl_method_t *m, jl_module_t *mod, jl_value_t* root); void jl_append_method_roots(jl_method_t *m, uint64_t modid, jl_array_t* roots); @@ -727,6 +727,7 @@ JL_DLLEXPORT void jl_typeassert(jl_value_t *x, jl_value_t *t); #define JL_CALLABLE(name) \ JL_DLLEXPORT jl_value_t *name(jl_value_t *F, jl_value_t **args, uint32_t nargs) +JL_CALLABLE(jl_f_svec); JL_CALLABLE(jl_f_tuple); JL_CALLABLE(jl_f_intrinsic_call); JL_CALLABLE(jl_f_opaque_closure_call); @@ -1185,8 +1186,8 @@ JL_DLLEXPORT jl_value_t *jl_methtable_lookup(jl_methtable_t *mt JL_PROPAGATES_RO JL_DLLEXPORT jl_method_instance_t *jl_specializations_get_linfo( jl_method_t *m JL_PROPAGATES_ROOT, jl_value_t *type, jl_svec_t *sparams); jl_method_instance_t *jl_specializations_get_or_insert(jl_method_instance_t *mi_ins); -JL_DLLEXPORT void jl_method_instance_add_backedge(jl_method_instance_t *callee, jl_value_t *invokesig, jl_method_instance_t *caller); -JL_DLLEXPORT void jl_method_table_add_backedge(jl_methtable_t *mt, jl_value_t *typ, jl_value_t *caller); +JL_DLLEXPORT void jl_method_instance_add_backedge(jl_method_instance_t *callee, jl_value_t *invokesig, jl_code_instance_t *caller); +JL_DLLEXPORT void jl_method_table_add_backedge(jl_methtable_t *mt, jl_value_t *typ, jl_code_instance_t *caller); JL_DLLEXPORT void jl_mi_cache_insert(jl_method_instance_t *mi JL_ROOTING_ARGUMENT, jl_code_instance_t *ci JL_ROOTED_ARGUMENT JL_MAYBE_UNROOTED); JL_DLLEXPORT int jl_mi_try_insert(jl_method_instance_t *mi JL_ROOTING_ARGUMENT, diff --git a/src/method.c b/src/method.c index 629816319b334..8e3bb7d0060b7 100644 --- a/src/method.c +++ b/src/method.c @@ -650,10 +650,10 @@ JL_DLLEXPORT jl_code_info_t *jl_new_code_info_uninit(void) src->slotnames = NULL; src->slottypes = jl_nothing; src->rettype = (jl_value_t*)jl_any_type; + src->edges = (jl_value_t*)jl_emptysvec; src->parent = (jl_method_instance_t*)jl_nothing; src->min_world = 1; src->max_world = ~(size_t)0; - src->edges = jl_nothing; src->propagate_inbounds = 0; src->has_fcall = 0; src->nospecializeinfer = 0; @@ -754,9 +754,10 @@ JL_DLLEXPORT jl_code_info_t *jl_code_for_staged(jl_method_instance_t *mi, size_t assert(jl_is_method(def)); jl_code_info_t *func = NULL; jl_value_t *ex = NULL; + jl_value_t *kind = NULL; jl_code_info_t *uninferred = NULL; jl_code_instance_t *ci = NULL; - JL_GC_PUSH4(&ex, &func, &uninferred, &ci); + JL_GC_PUSH5(&ex, &func, &uninferred, &ci, &kind); jl_task_t *ct = jl_current_task; int last_lineno = jl_lineno; int last_in = ct->ptls->in_pure_callback; @@ -792,6 +793,7 @@ JL_DLLEXPORT jl_code_info_t *jl_code_for_staged(jl_method_instance_t *mi, size_t // but currently our isva determination is non-syntactic func->isva = def->isva; } + ex = NULL; // If this generated function has an opaque closure, cache it for // correctness of method identity. In particular, other methods that call @@ -815,7 +817,7 @@ JL_DLLEXPORT jl_code_info_t *jl_code_for_staged(jl_method_instance_t *mi, size_t } } - if (func->edges == jl_nothing && func->max_world == ~(size_t)0) { + if ((func->edges == jl_nothing || func->edges == (jl_value_t*)jl_emptysvec) && func->max_world == ~(size_t)0) { if (func->min_world != 1) { jl_error("Generated function result with `edges == nothing` and `max_world == typemax(UInt)` must have `min_world == 1`"); } @@ -824,27 +826,41 @@ JL_DLLEXPORT jl_code_info_t *jl_code_for_staged(jl_method_instance_t *mi, size_t if (cache || needs_cache_for_correctness) { uninferred = (jl_code_info_t*)jl_copy_ast((jl_value_t*)func); ci = jl_new_codeinst_for_uninferred(mi, uninferred); - - if (uninferred->edges != jl_nothing) { - // N.B.: This needs to match `store_backedges` on the julia side - jl_array_t *edges = (jl_array_t*)uninferred->edges; - for (size_t i = 0; i < jl_array_len(edges); ++i) { - jl_value_t *kind = jl_array_ptr_ref(edges, i); - if (jl_is_method_instance(kind)) { - jl_method_instance_add_backedge((jl_method_instance_t*)kind, jl_nothing, mi); - } else if (jl_is_mtable(kind)) { - jl_method_table_add_backedge((jl_methtable_t*)kind, jl_array_ptr_ref(edges, ++i), (jl_value_t*)mi); - } else { - jl_method_instance_add_backedge((jl_method_instance_t*)jl_array_ptr_ref(edges, ++i), kind, mi); - } - } - } - jl_code_instance_t *cached_ci = jl_cache_uninferred(mi, cache_ci, world, ci); if (cached_ci != ci) { func = (jl_code_info_t*)jl_copy_ast(jl_atomic_load_relaxed(&cached_ci->inferred)); assert(jl_is_code_info(func)); } + else if (uninferred->edges != jl_nothing) { + // N.B.: This needs to match `store_backedges` on the julia side + jl_value_t *edges = uninferred->edges; + size_t l; + jl_value_t **data; + if (jl_is_svec(edges)) { + l = jl_svec_len(edges); + data = jl_svec_data(edges); + } + else { + l = jl_array_dim0(edges); + data = jl_array_data(edges, jl_value_t*); + } + for (size_t i = 0; i < l; ) { + kind = data[i++]; + if (jl_is_method_instance(kind)) { + jl_method_instance_add_backedge((jl_method_instance_t*)kind, jl_nothing, ci); + } + else if (jl_is_mtable(kind)) { + assert(i < l); + ex = data[i++]; + jl_method_table_add_backedge((jl_methtable_t*)kind, ex, ci); + } + else { + assert(i < l); + ex = data[i++]; + jl_method_instance_add_backedge((jl_method_instance_t*)ex, kind, ci); + } + } + } if (cache) *cache = cached_ci; } @@ -1056,27 +1072,27 @@ JL_DLLEXPORT jl_method_t *jl_new_method_uninit(jl_module_t *module) // it will be the signature supplied in an `invoke` call. // If you don't need `invokesig`, you can set it to NULL on input. // Initialize iteration with `i = 0`. Returns `i` for the next backedge to be extracted. -int get_next_edge(jl_array_t *list, int i, jl_value_t** invokesig, jl_method_instance_t **caller) JL_NOTSAFEPOINT +int get_next_edge(jl_array_t *list, int i, jl_value_t** invokesig, jl_code_instance_t **caller) JL_NOTSAFEPOINT { jl_value_t *item = jl_array_ptr_ref(list, i); - if (jl_is_method_instance(item)) { - // Not an `invoke` call, it's just the MethodInstance + if (jl_is_code_instance(item)) { + // Not an `invoke` call, it's just the CodeInstance if (invokesig != NULL) *invokesig = NULL; - *caller = (jl_method_instance_t*)item; + *caller = (jl_code_instance_t*)item; return i + 1; } assert(jl_is_type(item)); // An `invoke` call, it's a (sig, MethodInstance) pair if (invokesig != NULL) *invokesig = item; - *caller = (jl_method_instance_t*)jl_array_ptr_ref(list, i + 1); + *caller = (jl_code_instance_t*)jl_array_ptr_ref(list, i + 1); if (*caller) - assert(jl_is_method_instance(*caller)); + assert(jl_is_code_instance(*caller)); return i + 2; } -int set_next_edge(jl_array_t *list, int i, jl_value_t *invokesig, jl_method_instance_t *caller) +int set_next_edge(jl_array_t *list, int i, jl_value_t *invokesig, jl_code_instance_t *caller) { if (invokesig) jl_array_ptr_set(list, i++, invokesig); @@ -1084,7 +1100,7 @@ int set_next_edge(jl_array_t *list, int i, jl_value_t *invokesig, jl_method_inst return i; } -void push_edge(jl_array_t *list, jl_value_t *invokesig, jl_method_instance_t *caller) +void push_edge(jl_array_t *list, jl_value_t *invokesig, jl_code_instance_t *caller) { if (invokesig) jl_array_ptr_1d_push(list, invokesig); diff --git a/src/opaque_closure.c b/src/opaque_closure.c index 9fe36f32d2030..65773f88a3951 100644 --- a/src/opaque_closure.c +++ b/src/opaque_closure.c @@ -113,8 +113,8 @@ static jl_opaque_closure_t *new_opaque_closure(jl_tupletype_t *argt, jl_value_t if (specptr == NULL) { jl_method_instance_t *mi_generic = jl_specializations_get_linfo(jl_opaque_closure_method, sigtype, jl_emptysvec); - // OC wrapper methods are not world dependent - ci = jl_get_method_inferred(mi_generic, selected_rt, 1, ~(size_t)0, NULL); + // OC wrapper methods are not world dependent and have no edges or other info + ci = jl_get_method_inferred(mi_generic, selected_rt, 1, ~(size_t)0, NULL, NULL); if (!jl_atomic_load_acquire(&ci->invoke)) jl_compile_codeinst(ci); specptr = jl_atomic_load_relaxed(&ci->specptr.fptr); @@ -145,7 +145,8 @@ JL_DLLEXPORT jl_opaque_closure_t *jl_new_opaque_closure_from_code_info(jl_tuplet { jl_value_t *root = NULL, *sigtype = NULL; jl_code_instance_t *inst = NULL; - JL_GC_PUSH3(&root, &sigtype, &inst); + jl_svec_t *edges = NULL; + JL_GC_PUSH4(&root, &sigtype, &inst, &edges); root = jl_box_long(lineno); root = jl_new_struct(jl_linenumbernode_type, root, file); jl_method_t *meth = jl_make_opaque_closure_method(mod, jl_nothing, nargs, root, ci, isva, isinferred); @@ -159,8 +160,11 @@ JL_DLLEXPORT jl_opaque_closure_t *jl_new_opaque_closure_from_code_info(jl_tuplet jl_value_t *argslotty = jl_array_ptr_ref(ci->slottypes, 0); sigtype = jl_argtype_with_function_type(argslotty, (jl_value_t*)argt); jl_method_instance_t *mi = jl_specializations_get_linfo((jl_method_t*)root, sigtype, jl_emptysvec); + edges = (jl_svec_t*)ci->edges; + if (!jl_is_svec(edges)) + edges = jl_emptysvec; // OC doesn't really have edges, so just drop them for now inst = jl_new_codeinst(mi, jl_nothing, rt_ub, (jl_value_t*)jl_any_type, NULL, (jl_value_t*)ci, - 0, world, world, 0, jl_nothing, 0, ci->debuginfo); + 0, world, world, 0, jl_nothing, 0, ci->debuginfo, edges); jl_mi_cache_insert(mi, inst); } diff --git a/src/serialize.h b/src/serialize.h index 3d3eb4df5e862..3aa82a1d09a9b 100644 --- a/src/serialize.h +++ b/src/serialize.h @@ -23,50 +23,52 @@ extern "C" { #define TAG_LONG_PHINODE 15 #define TAG_LONG_PHICNODE 16 #define TAG_METHODROOT 17 -#define TAG_STRING 18 -#define TAG_SHORT_INT64 19 -#define TAG_SHORT_GENERAL 20 -#define TAG_CNULL 21 -#define TAG_ARRAY1D 22 -#define TAG_SINGLETON 23 -#define TAG_MODULE 24 -#define TAG_TVAR 25 -#define TAG_METHOD_INSTANCE 26 -#define TAG_METHOD 27 -#define TAG_CODE_INSTANCE 28 -#define TAG_COMMONSYM 29 -#define TAG_NEARBYGLOBAL 30 -#define TAG_GLOBALREF 31 -#define TAG_CORE 32 -#define TAG_BASE 33 -#define TAG_BITYPENAME 34 -#define TAG_NEARBYMODULE 35 -#define TAG_INT32 36 -#define TAG_INT64 37 -#define TAG_UINT8 38 -#define TAG_VECTORTY 39 -#define TAG_PTRTY 40 -#define TAG_LONG_SSAVALUE 41 -#define TAG_LONG_METHODROOT 42 -#define TAG_SHORTER_INT64 43 -#define TAG_SHORT_INT32 44 -#define TAG_CALL1 45 -#define TAG_CALL2 46 -#define TAG_SHORT_BACKREF 47 -#define TAG_BACKREF 48 -#define TAG_UNIONALL 49 -#define TAG_GOTONODE 50 -#define TAG_QUOTENODE 51 -#define TAG_GENERAL 52 -#define TAG_GOTOIFNOT 53 -#define TAG_RETURNNODE 54 -#define TAG_ARGUMENT 55 -#define TAG_RELOC_METHODROOT 56 -#define TAG_BINDING 57 -#define TAG_MEMORYT 58 -#define TAG_ENTERNODE 59 - -#define LAST_TAG 59 +#define TAG_EDGE 18 +#define TAG_STRING 19 +#define TAG_SHORT_INT64 20 +#define TAG_SHORT_GENERAL 21 +#define TAG_CNULL 22 +#define TAG_ARRAY1D 23 +#define TAG_SINGLETON 24 +#define TAG_MODULE 25 +#define TAG_TVAR 26 +#define TAG_METHOD_INSTANCE 27 +#define TAG_METHOD 28 +#define TAG_CODE_INSTANCE 29 +#define TAG_COMMONSYM 30 +#define TAG_NEARBYGLOBAL 31 +#define TAG_GLOBALREF 32 +#define TAG_CORE 33 +#define TAG_BASE 34 +#define TAG_BITYPENAME 35 +#define TAG_NEARBYMODULE 36 +#define TAG_INT32 37 +#define TAG_INT64 38 +#define TAG_UINT8 39 +#define TAG_VECTORTY 40 +#define TAG_PTRTY 41 +#define TAG_LONG_SSAVALUE 42 +#define TAG_LONG_METHODROOT 43 +#define TAG_LONG_EDGE 44 +#define TAG_SHORTER_INT64 45 +#define TAG_SHORT_INT32 46 +#define TAG_CALL1 47 +#define TAG_CALL2 48 +#define TAG_SHORT_BACKREF 49 +#define TAG_BACKREF 50 +#define TAG_UNIONALL 51 +#define TAG_GOTONODE 52 +#define TAG_QUOTENODE 53 +#define TAG_GENERAL 54 +#define TAG_GOTOIFNOT 55 +#define TAG_RETURNNODE 56 +#define TAG_ARGUMENT 57 +#define TAG_RELOC_METHODROOT 58 +#define TAG_BINDING 59 +#define TAG_MEMORYT 60 +#define TAG_ENTERNODE 61 + +#define LAST_TAG 61 #define write_uint8(s, n) ios_putc((n), (s)) #define read_uint8(s) ((uint8_t)ios_getc((s))) diff --git a/src/staticdata.c b/src/staticdata.c index af3477a25128e..0d609db03aebc 100644 --- a/src/staticdata.c +++ b/src/staticdata.c @@ -548,9 +548,6 @@ typedef struct { jl_array_t *link_ids_gvars; jl_array_t *link_ids_external_fnvars; jl_ptls_t ptls; - // Set (implemented has a hasmap of MethodInstances to themselves) of which MethodInstances have (forward) edges - // to other MethodInstances. - htable_t callers_with_edges; jl_image_t *image; int8_t incremental; } jl_serializer_state; @@ -1767,6 +1764,7 @@ static void jl_write_values(jl_serializer_state *s) JL_GC_DISABLED if (s->incremental) { if (jl_atomic_load_relaxed(&ci->max_world) == ~(size_t)0) { if (jl_atomic_load_relaxed(&newci->min_world) > 1) { + //assert(jl_atomic_load_relaxed(&ci->edges) != jl_emptysvec); // some code (such as !==) might add a method lookup restriction but not keep the edges jl_atomic_store_release(&newci->min_world, ~(size_t)0); jl_atomic_store_release(&newci->max_world, WORLD_AGE_REVALIDATION_SENTINEL); arraylist_push(&s->fixup_objs, (void*)reloc_offset); @@ -2801,25 +2799,21 @@ JL_DLLEXPORT jl_value_t *jl_as_global_root(jl_value_t *val, int insert) } static void jl_prepare_serialization_data(jl_array_t *mod_array, jl_array_t *newly_inferred, uint64_t worklist_key, - /* outputs */ jl_array_t **extext_methods, jl_array_t **new_ext_cis, - jl_array_t **method_roots_list, jl_array_t **ext_targets, jl_array_t **edges) + /* outputs */ jl_array_t **extext_methods JL_REQUIRE_ROOTED_SLOT, + jl_array_t **new_ext_cis JL_REQUIRE_ROOTED_SLOT, + jl_array_t **method_roots_list JL_REQUIRE_ROOTED_SLOT, + jl_array_t **edges JL_REQUIRE_ROOTED_SLOT) { // extext_methods: [method1, ...], worklist-owned "extending external" methods added to functions owned by modules outside the worklist - // ext_targets: [invokesig1, callee1, matches1, ...] non-worklist callees of worklist-owned methods - // ordinary dispatch: invokesig=NULL, callee is MethodInstance - // `invoke` dispatch: invokesig is signature, callee is MethodInstance - // abstract call: callee is signature - // edges: [caller1, ext_targets_indexes1, ...] for worklist-owned methods calling external methods - assert(edges_map == NULL); + // edges: [caller1, ext_targets, ...] for worklist-owned methods calling external methods // Save the inferred code from newly inferred, external methods *new_ext_cis = queue_external_cis(newly_inferred); // Collect method extensions and edges data - JL_GC_PUSH1(&edges_map); - if (edges) - edges_map = jl_alloc_memory_any(0); *extext_methods = jl_alloc_vec_any(0); + internal_methods = jl_alloc_vec_any(0); + JL_GC_PUSH1(&internal_methods); jl_collect_methtable_from_mod(jl_type_type_mt, *extext_methods); jl_collect_methtable_from_mod(jl_nonfunction_mt, *extext_methods); size_t i, len = jl_array_len(mod_array); @@ -2832,18 +2826,14 @@ static void jl_prepare_serialization_data(jl_array_t *mod_array, jl_array_t *new if (edges) { size_t world = jl_atomic_load_acquire(&jl_world_counter); - jl_collect_missing_backedges(jl_type_type_mt); - jl_collect_missing_backedges(jl_nonfunction_mt); - // jl_collect_extext_methods_from_mod and jl_collect_missing_backedges also accumulate data in callers_with_edges. - // Process this to extract `edges` and `ext_targets`. - *ext_targets = jl_alloc_vec_any(0); - *edges = jl_alloc_vec_any(0); + // Extract `new_ext_cis` and `edges` now (from info prepared by jl_collect_methcache_from_mod) *method_roots_list = jl_alloc_vec_any(0); // Collect the new method roots for external specializations jl_collect_new_roots(&relocatable_ext_cis, *method_roots_list, *new_ext_cis, worklist_key); - jl_collect_edges(*edges, *ext_targets, *new_ext_cis, world); + *edges = jl_alloc_vec_any(0); + jl_collect_internal_cis(*edges, world); } - assert(edges_map == NULL); // jl_collect_edges clears this when done + internal_methods = NULL; JL_GC_POP(); } @@ -2852,7 +2842,7 @@ static void jl_prepare_serialization_data(jl_array_t *mod_array, jl_array_t *new static void jl_save_system_image_to_stream(ios_t *f, jl_array_t *mod_array, jl_array_t *worklist, jl_array_t *extext_methods, jl_array_t *new_ext_cis, jl_array_t *method_roots_list, - jl_array_t *ext_targets, jl_array_t *edges) JL_GC_DISABLED + jl_array_t *edges) JL_GC_DISABLED { htable_new(&field_replace, 0); htable_new(&bits_replace, 0); @@ -2972,7 +2962,6 @@ static void jl_save_system_image_to_stream(ios_t *f, jl_array_t *mod_array, s.link_ids_gctags = jl_alloc_array_1d(jl_array_int32_type, 0); s.link_ids_gvars = jl_alloc_array_1d(jl_array_int32_type, 0); s.link_ids_external_fnvars = jl_alloc_array_1d(jl_array_int32_type, 0); - htable_new(&s.callers_with_edges, 0); jl_value_t **const*const tags = get_tags(); // worklist == NULL ? get_tags() : NULL; @@ -3012,12 +3001,9 @@ static void jl_save_system_image_to_stream(ios_t *f, jl_array_t *mod_array, // Queue the worklist itself as the first item we serialize jl_queue_for_serialization(&s, worklist); jl_queue_for_serialization(&s, jl_module_init_order); - // Classify the CodeInstances with respect to their need for validation - classify_callers(&s.callers_with_edges, edges); } // step 1.1: as needed, serialize the data needed for insertion into the running system if (extext_methods) { - assert(ext_targets); assert(edges); // Queue method extensions jl_queue_for_serialization(&s, extext_methods); @@ -3026,7 +3012,6 @@ static void jl_save_system_image_to_stream(ios_t *f, jl_array_t *mod_array, // Queue the new roots jl_queue_for_serialization(&s, method_roots_list); // Queue the edges - jl_queue_for_serialization(&s, ext_targets); jl_queue_for_serialization(&s, edges); } jl_serialize_reachable(&s); @@ -3117,7 +3102,6 @@ static void jl_save_system_image_to_stream(ios_t *f, jl_array_t *mod_array, ); jl_exit(1); } - htable_free(&s.callers_with_edges); // step 3: combine all of the sections into one file assert(ios_pos(f) % JL_CACHE_BYTE_ALIGNMENT == 0); @@ -3206,7 +3190,6 @@ static void jl_save_system_image_to_stream(ios_t *f, jl_array_t *mod_array, jl_write_value(&s, extext_methods); jl_write_value(&s, new_ext_cis); jl_write_value(&s, method_roots_list); - jl_write_value(&s, ext_targets); jl_write_value(&s, edges); } write_uint32(f, jl_array_len(s.link_ids_gctags)); @@ -3297,18 +3280,18 @@ JL_DLLEXPORT void jl_create_system_image(void **_native_data, jl_array_t *workli } jl_array_t *mod_array = NULL, *extext_methods = NULL, *new_ext_cis = NULL; - jl_array_t *method_roots_list = NULL, *ext_targets = NULL, *edges = NULL; + jl_array_t *method_roots_list = NULL, *edges = NULL; int64_t checksumpos = 0; int64_t checksumpos_ff = 0; int64_t datastartpos = 0; - JL_GC_PUSH6(&mod_array, &extext_methods, &new_ext_cis, &method_roots_list, &ext_targets, &edges); + JL_GC_PUSH5(&mod_array, &extext_methods, &new_ext_cis, &method_roots_list, &edges); if (worklist) { mod_array = jl_get_loaded_modules(); // __toplevel__ modules loaded in this session (from Base.loaded_modules_array) // Generate _native_data` if (_native_data != NULL) { jl_prepare_serialization_data(mod_array, newly_inferred, jl_worklist_key(worklist), - &extext_methods, &new_ext_cis, NULL, NULL, NULL); + &extext_methods, &new_ext_cis, NULL, NULL); jl_precompile_toplevel_module = (jl_module_t*)jl_array_ptr_ref(worklist, jl_array_len(worklist)-1); *_native_data = jl_precompile_worklist(worklist, extext_methods, new_ext_cis); jl_precompile_toplevel_module = NULL; @@ -3341,7 +3324,7 @@ JL_DLLEXPORT void jl_create_system_image(void **_native_data, jl_array_t *workli if (worklist) { htable_new(&relocatable_ext_cis, 0); jl_prepare_serialization_data(mod_array, newly_inferred, jl_worklist_key(worklist), - &extext_methods, &new_ext_cis, &method_roots_list, &ext_targets, &edges); + &extext_methods, &new_ext_cis, &method_roots_list, &edges); if (!emit_split) { write_int32(f, 0); // No clone_targets write_padding(f, LLT_ALIGN(ios_pos(f), JL_CACHE_BYTE_ALIGNMENT) - ios_pos(f)); @@ -3353,7 +3336,7 @@ JL_DLLEXPORT void jl_create_system_image(void **_native_data, jl_array_t *workli } if (_native_data != NULL) native_functions = *_native_data; - jl_save_system_image_to_stream(ff, mod_array, worklist, extext_methods, new_ext_cis, method_roots_list, ext_targets, edges); + jl_save_system_image_to_stream(ff, mod_array, worklist, extext_methods, new_ext_cis, method_roots_list, edges); if (_native_data != NULL) native_functions = NULL; if (worklist) @@ -3443,7 +3426,7 @@ static void jl_restore_system_image_from_stream_(ios_t *f, jl_image_t *image, jl /* outputs */ jl_array_t **restored, jl_array_t **init_order, jl_array_t **extext_methods, jl_array_t **internal_methods, jl_array_t **new_ext_cis, jl_array_t **method_roots_list, - jl_array_t **ext_targets, jl_array_t **edges, + jl_array_t **edges, char **base, arraylist_t *ccallable_list, pkgcachesizes *cachesizes) JL_GC_DISABLED { jl_task_t *ct = jl_current_task; @@ -3514,7 +3497,7 @@ static void jl_restore_system_image_from_stream_(ios_t *f, jl_image_t *image, jl assert(!ios_eof(f)); s.s = f; uintptr_t offset_restored = 0, offset_init_order = 0, offset_extext_methods = 0, offset_new_ext_cis = 0, offset_method_roots_list = 0; - uintptr_t offset_ext_targets = 0, offset_edges = 0; + uintptr_t offset_edges = 0; if (!s.incremental) { size_t i; for (i = 0; tags[i] != NULL; i++) { @@ -3547,7 +3530,6 @@ static void jl_restore_system_image_from_stream_(ios_t *f, jl_image_t *image, jl offset_extext_methods = jl_read_offset(&s); offset_new_ext_cis = jl_read_offset(&s); offset_method_roots_list = jl_read_offset(&s); - offset_ext_targets = jl_read_offset(&s); offset_edges = jl_read_offset(&s); } s.buildid_depmods_idxs = depmod_to_imageidx(depmods); @@ -3574,13 +3556,12 @@ static void jl_restore_system_image_from_stream_(ios_t *f, jl_image_t *image, jl uint32_t external_fns_begin = read_uint32(f); jl_read_arraylist(s.s, ccallable_list ? ccallable_list : &s.ccallable_list); if (s.incremental) { - assert(restored && init_order && extext_methods && internal_methods && new_ext_cis && method_roots_list && ext_targets && edges); + assert(restored && init_order && extext_methods && internal_methods && new_ext_cis && method_roots_list && edges); *restored = (jl_array_t*)jl_delayed_reloc(&s, offset_restored); *init_order = (jl_array_t*)jl_delayed_reloc(&s, offset_init_order); *extext_methods = (jl_array_t*)jl_delayed_reloc(&s, offset_extext_methods); *new_ext_cis = (jl_array_t*)jl_delayed_reloc(&s, offset_new_ext_cis); *method_roots_list = (jl_array_t*)jl_delayed_reloc(&s, offset_method_roots_list); - *ext_targets = (jl_array_t*)jl_delayed_reloc(&s, offset_ext_targets); *edges = (jl_array_t*)jl_delayed_reloc(&s, offset_edges); *internal_methods = jl_alloc_vec_any(0); } @@ -4021,9 +4002,9 @@ static jl_value_t *jl_restore_package_image_from_stream(void* pkgimage_handle, i arraylist_t ccallable_list; jl_value_t *restored = NULL; - jl_array_t *init_order = NULL, *extext_methods = NULL, *internal_methods = NULL, *new_ext_cis = NULL, *method_roots_list = NULL, *ext_targets = NULL, *edges = NULL; + jl_array_t *init_order = NULL, *extext_methods = NULL, *internal_methods = NULL, *new_ext_cis = NULL, *method_roots_list = NULL, *edges = NULL; jl_svec_t *cachesizes_sv = NULL; - JL_GC_PUSH9(&restored, &init_order, &extext_methods, &internal_methods, &new_ext_cis, &method_roots_list, &ext_targets, &edges, &cachesizes_sv); + JL_GC_PUSH8(&restored, &init_order, &extext_methods, &internal_methods, &new_ext_cis, &method_roots_list, &edges, &cachesizes_sv); { // make a permanent in-memory copy of f (excluding the header) ios_bufmode(f, bm_none); @@ -4048,7 +4029,7 @@ static jl_value_t *jl_restore_package_image_from_stream(void* pkgimage_handle, i ios_static_buffer(f, sysimg, len); pkgcachesizes cachesizes; jl_restore_system_image_from_stream_(f, image, depmods, checksum, (jl_array_t**)&restored, &init_order, &extext_methods, &internal_methods, &new_ext_cis, &method_roots_list, - &ext_targets, &edges, &base, &ccallable_list, &cachesizes); + &edges, &base, &ccallable_list, &cachesizes); JL_SIGATOMIC_END(); // No special processing of `new_ext_cis` is required because recaching handled it @@ -4062,7 +4043,7 @@ static jl_value_t *jl_restore_package_image_from_stream(void* pkgimage_handle, i // allow users to start running in this updated world jl_atomic_store_release(&jl_world_counter, world); // but one of those immediate users is going to be our cache updates - jl_insert_backedges((jl_array_t*)edges, (jl_array_t*)ext_targets, (jl_array_t*)new_ext_cis, world); // restore external backedges (needs to be last) + jl_insert_backedges((jl_array_t*)edges, (jl_array_t*)new_ext_cis, world); // restore external backedges (needs to be last) // now permit more methods to be added again JL_UNLOCK(&world_counter_lock); // reinit ccallables @@ -4078,9 +4059,9 @@ static jl_value_t *jl_restore_package_image_from_stream(void* pkgimage_handle, i jl_svecset(cachesizes_sv, 4, jl_box_long(cachesizes.reloclist)); jl_svecset(cachesizes_sv, 5, jl_box_long(cachesizes.gvarlist)); jl_svecset(cachesizes_sv, 6, jl_box_long(cachesizes.fptrlist)); - restored = (jl_value_t*)jl_svec(8, restored, init_order, extext_methods, + restored = (jl_value_t*)jl_svec(7, restored, init_order, extext_methods, new_ext_cis ? (jl_value_t*)new_ext_cis : jl_nothing, - method_roots_list, ext_targets, edges, cachesizes_sv); + method_roots_list, edges, cachesizes_sv); } else { restored = (jl_value_t*)jl_svec(2, restored, init_order); @@ -4095,7 +4076,7 @@ static jl_value_t *jl_restore_package_image_from_stream(void* pkgimage_handle, i static void jl_restore_system_image_from_stream(ios_t *f, jl_image_t *image, uint32_t checksum) { JL_TIMING(LOAD_IMAGE, LOAD_Sysimg); - jl_restore_system_image_from_stream_(f, image, NULL, checksum | ((uint64_t)0xfdfcfbfa << 32), NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL); + jl_restore_system_image_from_stream_(f, image, NULL, checksum | ((uint64_t)0xfdfcfbfa << 32), NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL); } JL_DLLEXPORT jl_value_t *jl_restore_incremental_from_buf(void* pkgimage_handle, const char *buf, jl_image_t *image, size_t sz, jl_array_t *depmods, int completeinfo, const char *pkgname, int needs_permalloc) diff --git a/src/staticdata_utils.c b/src/staticdata_utils.c index 9a7653972ea7c..b69c1edb5429b 100644 --- a/src/staticdata_utils.c +++ b/src/staticdata_utils.c @@ -1,5 +1,5 @@ // inverse of backedges graph (caller=>callees hash) -jl_genericmemory_t *edges_map JL_GLOBALLY_ROOTED = NULL; // rooted for the duration of our uses of this +jl_array_t *internal_methods JL_GLOBALLY_ROOTED = NULL; // rooted for the duration of our uses of this static void write_float64(ios_t *s, double x) JL_NOTSAFEPOINT { @@ -185,9 +185,10 @@ static int has_backedge_to_worklist(jl_method_instance_t *mi, htable_t *visited, size_t i = 0, n = jl_array_nrows(mi->backedges); int cycle = depth; while (i < n) { - jl_method_instance_t *be; + jl_code_instance_t *be; i = get_next_edge(mi->backedges, i, NULL, &be); - int child_found = has_backedge_to_worklist(be, visited, stack); + JL_GC_PROMISE_ROOTED(be); // get_next_edge propagates the edge for us here + int child_found = has_backedge_to_worklist(be->def, visited, stack); if (child_found == 1 || child_found == 2) { // found what we were looking for, so terminate early found = 1; @@ -250,7 +251,7 @@ static jl_array_t *queue_external_cis(jl_array_t *list) continue; jl_method_instance_t *mi = ci->def; jl_method_t *m = mi->def.method; - if (jl_atomic_load_relaxed(&ci->inferred) && jl_is_method(m) && jl_object_in_image((jl_value_t*)m->module)) { + if (ci->owner == jl_nothing && jl_atomic_load_relaxed(&ci->inferred) && jl_is_method(m) && jl_object_in_image((jl_value_t*)m->module)) { int found = has_backedge_to_worklist(mi, &visited, &stack); assert(found == 0 || found == 1 || found == 2); assert(stack.len == 0); @@ -318,82 +319,19 @@ static void jl_collect_new_roots(htable_t *relocatable_ext_cis, jl_array_t *root htable_free(&mset); } -// Create the forward-edge map (caller => callees) -// the intent of these functions is to invert the backedges tree -// for anything that points to a method not part of the worklist -// -// from MethodTables -static void jl_collect_missing_backedges(jl_methtable_t *mt) -{ - jl_array_t *backedges = mt->backedges; - if (backedges) { - size_t i, l = jl_array_nrows(backedges); - for (i = 1; i < l; i += 2) { - jl_method_instance_t *caller = (jl_method_instance_t*)jl_array_ptr_ref(backedges, i); - jl_value_t *missing_callee = jl_array_ptr_ref(backedges, i - 1); // signature of abstract callee - jl_array_t *edges = (jl_array_t*)jl_eqtable_get(edges_map, (jl_value_t*)caller, NULL); - if (edges == NULL) { - edges = jl_alloc_vec_any(0); - JL_GC_PUSH1(&edges); - edges_map = jl_eqtable_put(edges_map, (jl_value_t*)caller, (jl_value_t*)edges, NULL); - JL_GC_POP(); - } - jl_array_ptr_1d_push(edges, NULL); - jl_array_ptr_1d_push(edges, missing_callee); - } - } -} - - -// from MethodInstances -static void collect_backedges(jl_method_instance_t *callee, int internal) -{ - jl_array_t *backedges = callee->backedges; - if (backedges) { - size_t i = 0, l = jl_array_nrows(backedges); - while (i < l) { - jl_value_t *invokeTypes; - jl_method_instance_t *caller; - i = get_next_edge(backedges, i, &invokeTypes, &caller); - jl_array_t *edges = (jl_array_t*)jl_eqtable_get(edges_map, (jl_value_t*)caller, NULL); - if (edges == NULL) { - edges = jl_alloc_vec_any(0); - JL_GC_PUSH1(&edges); - edges_map = jl_eqtable_put(edges_map, (jl_value_t*)caller, (jl_value_t*)edges, NULL); - JL_GC_POP(); - } - jl_array_ptr_1d_push(edges, invokeTypes); - jl_array_ptr_1d_push(edges, (jl_value_t*)callee); - } - } -} - -// For functions owned by modules not on the worklist, call this on each method. +// For every method: // - if the method is owned by a worklist module, add it to the list of things to be -// fully serialized -// - Collect all backedges (may be needed later when we invert this list). +// verified on reloading +// - if the method is extext, record that it needs to be reinserted later in the method table static int jl_collect_methcache_from_mod(jl_typemap_entry_t *ml, void *closure) { jl_array_t *s = (jl_array_t*)closure; jl_method_t *m = ml->func.method; - if (s && !jl_object_in_image((jl_value_t*)m->module)) { - jl_array_ptr_1d_push(s, (jl_value_t*)m); - } - if (edges_map == NULL) - return 1; - jl_value_t *specializations = jl_atomic_load_relaxed(&m->specializations); - if (!jl_is_svec(specializations)) { - jl_method_instance_t *callee = (jl_method_instance_t*)specializations; - collect_backedges(callee, !s); - } - else { - size_t i, l = jl_svec_len(specializations); - for (i = 0; i < l; i++) { - jl_method_instance_t *callee = (jl_method_instance_t*)jl_svecref(specializations, i); - if ((jl_value_t*)callee != jl_nothing) - collect_backedges(callee, !s); - } + if (!jl_object_in_image((jl_value_t*)m->module)) { + jl_array_ptr_1d_push(internal_methods, (jl_value_t*)m); + if (s) + jl_array_ptr_1d_push(s, (jl_value_t*)m); // extext } return 1; } @@ -401,10 +339,8 @@ static int jl_collect_methcache_from_mod(jl_typemap_entry_t *ml, void *closure) static int jl_collect_methtable_from_mod(jl_methtable_t *mt, void *env) { if (!jl_object_in_image((jl_value_t*)mt)) - env = NULL; // do not collect any methods from here + env = NULL; // mark internal, not extext jl_typemap_visitor(jl_atomic_load_relaxed(&mt->defs), jl_collect_methcache_from_mod, env); - if (env && edges_map) - jl_collect_missing_backedges(mt); return 1; } @@ -416,169 +352,38 @@ static void jl_collect_extext_methods_from_mod(jl_array_t *s, jl_module_t *m) foreach_mtable_in_module(m, jl_collect_methtable_from_mod, s); } -static void jl_record_edges(jl_method_instance_t *caller, arraylist_t *wq, jl_array_t *edges) +static void jl_record_edges(jl_method_instance_t *caller, jl_array_t *edges) { - jl_array_t *callees = NULL; - JL_GC_PUSH2(&caller, &callees); - callees = (jl_array_t*)jl_eqtable_pop(edges_map, (jl_value_t*)caller, NULL, NULL); - if (callees != NULL) { - jl_array_ptr_1d_push(edges, (jl_value_t*)caller); - jl_array_ptr_1d_push(edges, (jl_value_t*)callees); - size_t i, l = jl_array_nrows(callees); - for (i = 1; i < l; i += 2) { - jl_method_instance_t *c = (jl_method_instance_t*)jl_array_ptr_ref(callees, i); - if (c && jl_is_method_instance(c)) { - arraylist_push(wq, c); - } - } + jl_code_instance_t *ci = jl_atomic_load_relaxed(&caller->cache); + while (ci != NULL) { + if (jl_atomic_load_relaxed(&ci->edges) && + jl_atomic_load_relaxed(&ci->edges) != jl_emptysvec && + jl_atomic_load_relaxed(&ci->max_world) == ~(size_t)0) + jl_array_ptr_1d_push(edges, (jl_value_t*)ci); + ci = jl_atomic_load_relaxed(&ci->next); } - JL_GC_POP(); } - // Extract `edges` and `ext_targets` from `edges_map` -// `edges` = [caller1, targets_indexes1, ...], the list of methods and their edges -// `ext_targets` is [invokesig1, callee1, matches1, ...], the edges for each target -static void jl_collect_edges(jl_array_t *edges, jl_array_t *ext_targets, jl_array_t *external_cis, size_t world) +// `edges` = [caller1, ...], the list of codeinstances internal to methods +static void jl_collect_internal_cis(jl_array_t *edges, size_t world) { - htable_t external_mis; - htable_new(&external_mis, 0); - if (external_cis) { - for (size_t i = 0; i < jl_array_nrows(external_cis); i++) { - jl_code_instance_t *ci = (jl_code_instance_t*)jl_array_ptr_ref(external_cis, i); - jl_method_instance_t *mi = ci->def; - ptrhash_put(&external_mis, (void*)mi, (void*)mi); - } - } - arraylist_t wq; - arraylist_new(&wq, 0); - void **table = (void**) edges_map->ptr; // edges_map is caller => callees - size_t table_size = edges_map->length; - for (size_t i = 0; i < table_size; i += 2) { - assert(table == edges_map->ptr && table_size == edges_map->length && - "edges_map changed during iteration"); - jl_method_instance_t *caller = (jl_method_instance_t*)table[i]; - jl_array_t *callees = (jl_array_t*)table[i + 1]; - if (callees == NULL) - continue; - assert(jl_is_method_instance(caller) && jl_is_method(caller->def.method)); - if (!jl_object_in_image((jl_value_t*)caller->def.method->module) || - ptrhash_get(&external_mis, caller) != HT_NOTFOUND) { - jl_record_edges(caller, &wq, edges); + for (size_t i = 0; i < jl_array_nrows(internal_methods); i++) { + jl_method_t *m = (jl_method_t*)jl_array_ptr_ref(internal_methods, i); + jl_value_t *specializations = jl_atomic_load_relaxed(&m->specializations); + if (!jl_is_svec(specializations)) { + jl_method_instance_t *mi = (jl_method_instance_t*)specializations; + jl_record_edges(mi, edges); } - } - htable_free(&external_mis); - while (wq.len) { - jl_method_instance_t *caller = (jl_method_instance_t*)arraylist_pop(&wq); - jl_record_edges(caller, &wq, edges); - } - arraylist_free(&wq); - edges_map = NULL; - htable_t edges_map2; - htable_new(&edges_map2, 0); - htable_t edges_ids; - size_t l = edges ? jl_array_nrows(edges) : 0; - htable_new(&edges_ids, l); - for (size_t i = 0; i < l / 2; i++) { - jl_method_instance_t *caller = (jl_method_instance_t*)jl_array_ptr_ref(edges, i * 2); - void *target = (void*)((char*)HT_NOTFOUND + i + 1); - ptrhash_put(&edges_ids, (void*)caller, target); - } - // process target list to turn it into a memoized validity table - // and compute the old methods list, ready for serialization - jl_value_t *matches = NULL; - jl_array_t *callee_ids = NULL; - jl_value_t *sig = NULL; - JL_GC_PUSH3(&matches, &callee_ids, &sig); - for (size_t i = 0; i < l; i += 2) { - jl_array_t *callees = (jl_array_t*)jl_array_ptr_ref(edges, i + 1); - size_t l = jl_array_nrows(callees); - callee_ids = jl_alloc_array_1d(jl_array_int32_type, l + 1); - int32_t *idxs = jl_array_data(callee_ids, int32_t); - idxs[0] = 0; - size_t nt = 0; - for (size_t j = 0; j < l; j += 2) { - jl_value_t *invokeTypes = jl_array_ptr_ref(callees, j); - jl_value_t *callee = jl_array_ptr_ref(callees, j + 1); - assert(callee && "unsupported edge"); - - if (jl_is_method_instance(callee)) { - jl_methtable_t *mt = jl_method_get_table(((jl_method_instance_t*)callee)->def.method); - if (!jl_object_in_image((jl_value_t*)mt)) - continue; - } - - // (nullptr, c) => call - // (invokeTypes, c) => invoke - // (nullptr, invokeTypes) => missing call - // (invokeTypes, nullptr) => missing invoke (unused--inferred as Any) - void *target = ptrhash_get(&edges_map2, invokeTypes ? (void*)invokeTypes : (void*)callee); - if (target == HT_NOTFOUND) { - size_t min_valid = 0; - size_t max_valid = ~(size_t)0; - if (invokeTypes) { - assert(jl_is_method_instance(callee)); - jl_method_t *m = ((jl_method_instance_t*)callee)->def.method; - matches = (jl_value_t*)m; // valid because there is no method replacement permitted -#ifndef NDEBUG - jl_methtable_t *mt = jl_method_get_table(m); - if ((jl_value_t*)mt != jl_nothing) { - jl_value_t *matches = jl_gf_invoke_lookup_worlds(invokeTypes, (jl_value_t*)mt, world, &min_valid, &max_valid); - if (matches != jl_nothing) { - assert(m == ((jl_method_match_t*)matches)->method); - } - } -#endif - } - else { - if (jl_is_method_instance(callee)) { - jl_method_instance_t *mi = (jl_method_instance_t*)callee; - sig = jl_type_intersection(mi->def.method->sig, (jl_value_t*)mi->specTypes); - } - else { - sig = callee; - } - int ambig = 0; - matches = jl_matching_methods((jl_tupletype_t*)sig, jl_nothing, - INT32_MAX, 0, world, &min_valid, &max_valid, &ambig); - sig = NULL; - if (matches == jl_nothing) { - callee_ids = NULL; // invalid - break; - } - size_t k; - for (k = 0; k < jl_array_nrows(matches); k++) { - jl_method_match_t *match = (jl_method_match_t *)jl_array_ptr_ref(matches, k); - jl_array_ptr_set(matches, k, match->method); - } - } - jl_array_ptr_1d_push(ext_targets, invokeTypes); - jl_array_ptr_1d_push(ext_targets, callee); - jl_array_ptr_1d_push(ext_targets, matches); - target = (void*)((char*)HT_NOTFOUND + jl_array_nrows(ext_targets) / 3); - ptrhash_put(&edges_map2, (void*)callee, target); - } - idxs[++nt] = (char*)target - (char*)HT_NOTFOUND - 1; - } - jl_array_ptr_set(edges, i + 1, callee_ids); // swap callees for ids - if (!callee_ids) - continue; - idxs[0] = nt; - // record place of every method in edges - // add method edges to the callee_ids list - for (size_t j = 0; j < l; j += 2) { - jl_value_t *callee = jl_array_ptr_ref(callees, j + 1); - if (callee && jl_is_method_instance(callee)) { - void *target = ptrhash_get(&edges_ids, (void*)callee); - if (target != HT_NOTFOUND) { - idxs[++nt] = (char*)target - (char*)HT_NOTFOUND - 1; - } + else { + size_t j, l = jl_svec_len(specializations); + for (j = 0; j < l; j++) { + jl_method_instance_t *mi = (jl_method_instance_t*)jl_svecref(specializations, j); + if ((jl_value_t*)mi != jl_nothing) + jl_record_edges(mi, edges); } } - jl_array_del_end(callee_ids, l - nt); } - JL_GC_POP(); - htable_free(&edges_map2); } // Headers @@ -946,374 +751,324 @@ static void jl_copy_roots(jl_array_t *method_roots_list, uint64_t key) } } - -// verify that these edges intersect with the same methods as before -static jl_array_t *jl_verify_edges(jl_array_t *targets, size_t minworld) +static size_t verify_invokesig(jl_value_t *invokesig, jl_method_t *expected, size_t minworld) { - JL_TIMING(VERIFY_IMAGE, VERIFY_Edges); - size_t i, l = jl_array_nrows(targets) / 3; - static jl_value_t *ulong_array JL_ALWAYS_LEAFTYPE = NULL; - if (ulong_array == NULL) - ulong_array = jl_apply_array_type((jl_value_t*)jl_ulong_type, 1); - jl_array_t *maxvalids = jl_alloc_array_1d(ulong_array, l); - memset(jl_array_data(maxvalids, size_t), 0, l * sizeof(size_t)); - jl_value_t *loctag = NULL; - jl_value_t *matches = NULL; - jl_value_t *sig = NULL; - JL_GC_PUSH4(&maxvalids, &matches, &sig, &loctag); - for (i = 0; i < l; i++) { - jl_value_t *invokesig = jl_array_ptr_ref(targets, i * 3); - jl_value_t *callee = jl_array_ptr_ref(targets, i * 3 + 1); - jl_value_t *expected = jl_array_ptr_ref(targets, i * 3 + 2); - size_t min_valid = 0; - size_t max_valid = ~(size_t)0; - if (invokesig) { - assert(callee && "unsupported edge"); - jl_method_t *m = ((jl_method_instance_t*)callee)->def.method; - if (jl_egal(invokesig, m->sig)) { - // the invoke match is `m` for `m->sig`, unless `m` is invalid - if (jl_atomic_load_relaxed(&m->deleted_world) < max_valid) - max_valid = 0; - } - else { - jl_methtable_t *mt = jl_method_get_table(m); - if ((jl_value_t*)mt == jl_nothing) { - max_valid = 0; - } - else { - matches = jl_gf_invoke_lookup_worlds(invokesig, (jl_value_t*)mt, minworld, &min_valid, &max_valid); - if (matches == jl_nothing) { - max_valid = 0; - } - else { - matches = (jl_value_t*)((jl_method_match_t*)matches)->method; - if (matches != expected) { - max_valid = 0; - } - } - } - } + assert(jl_is_type(invokesig)); + assert(jl_is_method(expected)); + size_t min_valid = 0; + size_t max_valid = ~(size_t)0; + if (jl_egal(invokesig, expected->sig)) { + // the invoke match is `expected` for `expected->sig`, unless `expected` is invalid + if (jl_atomic_load_relaxed(&expected->deleted_world) < max_valid) + max_valid = 0; + } + else { + jl_methtable_t *mt = jl_method_get_table(expected); + if ((jl_value_t*)mt == jl_nothing) { + max_valid = 0; } else { - if (jl_is_method_instance(callee)) { - jl_method_instance_t *mi = (jl_method_instance_t*)callee; - sig = jl_type_intersection(mi->def.method->sig, (jl_value_t*)mi->specTypes); - } - else { - sig = callee; - } - assert(jl_is_array(expected)); - int ambig = 0; - // TODO: possibly need to included ambiguities too (for the optimizer correctness)? - // len + 1 is to allow us to log causes of invalidation (SnoopCompile's @snoopr) - matches = jl_matching_methods((jl_tupletype_t*)sig, jl_nothing, - _jl_debug_method_invalidation ? INT32_MAX : jl_array_nrows(expected), - 0, minworld, &min_valid, &max_valid, &ambig); - sig = NULL; + jl_value_t *matches = jl_gf_invoke_lookup_worlds(invokesig, (jl_value_t*)mt, minworld, &min_valid, &max_valid); if (matches == jl_nothing) { max_valid = 0; } else { - // setdiff!(matches, expected) - size_t j, k, ins = 0; - if (jl_array_nrows(matches) != jl_array_nrows(expected)) { + if (((jl_method_match_t*)matches)->method != expected) { max_valid = 0; } - for (k = 0; k < jl_array_nrows(matches); k++) { - jl_method_t *match = ((jl_method_match_t*)jl_array_ptr_ref(matches, k))->method; - size_t l = jl_array_nrows(expected); - for (j = 0; j < l; j++) - if (match == (jl_method_t*)jl_array_ptr_ref(expected, j)) - break; - if (j == l) { - // intersection has a new method or a method was - // deleted--this is now probably no good, just invalidate - // everything about it now - max_valid = 0; - if (!_jl_debug_method_invalidation) - break; - jl_array_ptr_set(matches, ins++, match); - } - } - if (max_valid != ~(size_t)0 && _jl_debug_method_invalidation) - jl_array_del_end((jl_array_t*)matches, jl_array_nrows(matches) - ins); } } - jl_array_data(maxvalids, size_t)[i] = max_valid; - if (max_valid != ~(size_t)0 && _jl_debug_method_invalidation) { - jl_array_ptr_1d_push(_jl_debug_method_invalidation, invokesig ? (jl_value_t*)invokesig : callee); - loctag = jl_cstr_to_string("insert_backedges_callee"); - jl_array_ptr_1d_push(_jl_debug_method_invalidation, loctag); - loctag = jl_box_int32((int32_t)i); - jl_array_ptr_1d_push(_jl_debug_method_invalidation, loctag); - jl_array_ptr_1d_push(_jl_debug_method_invalidation, matches); - } - //jl_static_show((JL_STREAM*)ios_stderr, (jl_value_t*)invokesig); - //jl_static_show((JL_STREAM*)ios_stderr, (jl_value_t*)callee); - //ios_puts(max_valid == ~(size_t)0 ? "valid\n" : "INVALID\n", ios_stderr); } - JL_GC_POP(); - return maxvalids; + return max_valid; } -// Combine all edges relevant to a method to initialize the maxvalids list -static jl_array_t *jl_verify_methods(jl_array_t *edges, jl_array_t *maxvalids) +static size_t verify_call(jl_value_t *sig, jl_svec_t *expecteds, size_t i, size_t n, size_t minworld, jl_value_t **matches JL_REQUIRE_ROOTED_SLOT) { - JL_TIMING(VERIFY_IMAGE, VERIFY_Methods); - jl_value_t *loctag = NULL; - jl_array_t *maxvalids2 = NULL; - JL_GC_PUSH2(&loctag, &maxvalids2); - size_t i, l = jl_array_nrows(edges) / 2; - maxvalids2 = jl_alloc_array_1d(jl_typeof(maxvalids), l); - size_t *maxvalids2_data = jl_array_data(maxvalids2, size_t); - memset(maxvalids2_data, 0, l * sizeof(size_t)); - for (i = 0; i < l; i++) { - jl_method_instance_t *caller = (jl_method_instance_t*)jl_array_ptr_ref(edges, 2 * i); - assert(jl_is_method_instance(caller) && jl_is_method(caller->def.method)); - jl_array_t *callee_ids = (jl_array_t*)jl_array_ptr_ref(edges, 2 * i + 1); - assert(jl_typetagis((jl_value_t*)callee_ids, jl_array_int32_type)); - if (callee_ids == NULL) { - // serializing the edges had failed - maxvalids2_data[i] = 0; + // verify that these edges intersect with the same methods as before + size_t min_valid = 0; + size_t max_valid = ~(size_t)0; + int ambig = 0; + // TODO: possibly need to included ambiguities too (for the optimizer correctness)? + jl_value_t *result = jl_matching_methods((jl_tupletype_t*)sig, jl_nothing, + _jl_debug_method_invalidation ? INT32_MAX : n, + 0, minworld, &min_valid, &max_valid, &ambig); + *matches = result; + if (result == jl_nothing) { + max_valid = 0; + } + else { + // setdiff!(result, expected) + size_t j, k, ins = 0; + if (jl_array_nrows(result) != n) { + max_valid = 0; } - else { - int32_t *idxs = jl_array_data(callee_ids, int32_t); - size_t j; - maxvalids2_data[i] = ~(size_t)0; - for (j = 0; j < idxs[0]; j++) { - int32_t idx = idxs[j + 1]; - size_t max_valid = jl_array_data(maxvalids, size_t)[idx]; - if (max_valid != ~(size_t)0 && _jl_debug_method_invalidation) { - jl_array_ptr_1d_push(_jl_debug_method_invalidation, (jl_value_t*)caller); - loctag = jl_cstr_to_string("verify_methods"); - jl_array_ptr_1d_push(_jl_debug_method_invalidation, loctag); - loctag = jl_box_int32((int32_t)idx); - jl_array_ptr_1d_push(_jl_debug_method_invalidation, loctag); + for (k = 0; k < jl_array_nrows(result); k++) { + jl_method_t *match = ((jl_method_match_t*)jl_array_ptr_ref(result, k))->method; + for (j = 0; j < n; j++) { + jl_value_t *t = jl_svecref(expecteds, j + i); + if (jl_is_code_instance(t)) + t = (jl_value_t*)((jl_code_instance_t*)t)->def; + jl_method_t *meth; + if (jl_is_method(t)) + meth = (jl_method_t*)t; + else { + assert(jl_is_method_instance(t)); + meth = ((jl_method_instance_t*)t)->def.method; } - if (max_valid < maxvalids2_data[i]) - maxvalids2_data[i] = max_valid; - if (max_valid == 0) + if (match == meth) + break; + } + if (j == n) { + // intersection has a new method or a method was + // deleted--this is now probably no good, just invalidate + // everything about it now + max_valid = 0; + if (!_jl_debug_method_invalidation) break; + jl_array_ptr_set(result, ins++, match); } } - //jl_static_show((JL_STREAM*)ios_stderr, (jl_value_t*)caller); - //ios_puts(maxvalids2_data[i] == ~(size_t)0 ? "valid\n" : "INVALID\n", ios_stderr); + if (max_valid != ~(size_t)0 && _jl_debug_method_invalidation) + jl_array_del_end((jl_array_t*)result, jl_array_nrows(result) - ins); } - JL_GC_POP(); - return maxvalids2; + return max_valid; } - -// Visit the entire call graph, starting from edges[idx] to determine if that method is valid -// Implements Tarjan's SCC (strongly connected components) algorithm, simplified to remove the count variable -// and slightly modified with an early termination option once the computation reaches its minimum -static int jl_verify_graph_edge(size_t *maxvalids2_data, jl_array_t *edges, size_t idx, arraylist_t *visited, arraylist_t *stack) +// Test all edges relevant to a method: +//// Visit the entire call graph, starting from edges[idx] to determine if that method is valid +//// Implements Tarjan's SCC (strongly connected components) algorithm, simplified to remove the count variable +//// and slightly modified with an early termination option once the computation reaches its minimum +static int jl_verify_method(jl_code_instance_t *codeinst, size_t minworld, size_t *maxworld, arraylist_t *stack, htable_t *visiting) { - if (maxvalids2_data[idx] == 0) { - visited->items[idx] = (void*)1; + size_t max_valid2 = jl_atomic_load_relaxed(&codeinst->max_world); + if (max_valid2 != WORLD_AGE_REVALIDATION_SENTINEL) { + *maxworld = max_valid2; return 0; } - size_t cycle = (size_t)visited->items[idx]; - if (cycle != 0) - return cycle - 1; // depth remaining - jl_value_t *cause = NULL; - arraylist_push(stack, (void*)idx); + assert(jl_is_method_instance(codeinst->def) && jl_is_method(codeinst->def->def.method)); + void **bp = ptrhash_bp(visiting, codeinst); + if (*bp != HT_NOTFOUND) + return (char*)*bp - (char*)HT_NOTFOUND; // cycle idx + arraylist_push(stack, (void*)codeinst); size_t depth = stack->len; - visited->items[idx] = (void*)(1 + depth); - jl_array_t *callee_ids = (jl_array_t*)jl_array_ptr_ref(edges, idx * 2 + 1); - assert(jl_typetagis((jl_value_t*)callee_ids, jl_array_int32_type)); - int32_t *idxs = jl_array_data(callee_ids, int32_t); - size_t i, n = jl_array_nrows(callee_ids); - cycle = depth; - for (i = idxs[0] + 1; i < n; i++) { - int32_t childidx = idxs[i]; - int child_cycle = jl_verify_graph_edge(maxvalids2_data, edges, childidx, visited, stack); - size_t child_max_valid = maxvalids2_data[childidx]; - if (child_max_valid < maxvalids2_data[idx]) { - maxvalids2_data[idx] = child_max_valid; - cause = jl_array_ptr_ref(edges, childidx * 2); + *bp = (char*)HT_NOTFOUND + depth; + JL_TIMING(VERIFY_IMAGE, VERIFY_Methods); + jl_value_t *loctag = NULL; + jl_value_t *sig = NULL; + jl_value_t *matches = NULL; + JL_GC_PUSH3(&loctag, &matches, &sig); + jl_svec_t *callees = jl_atomic_load_relaxed(&codeinst->edges); + assert(jl_is_svec((jl_value_t*)callees)); + // verify current edges + for (size_t j = 0; j < jl_svec_len(callees); ) { + jl_value_t *edge = jl_svecref(callees, j); + size_t max_valid2; + assert(!jl_is_method(edge)); // `Method`-edge isn't allowed for the optimized one-edge format + if (jl_is_code_instance(edge)) + edge = (jl_value_t*)((jl_code_instance_t*)edge)->def; + if (jl_is_method_instance(edge)) { + jl_method_instance_t *mi = (jl_method_instance_t*)edge; + sig = jl_type_intersection(mi->def.method->sig, (jl_value_t*)mi->specTypes); // TODO: ?? + max_valid2 = verify_call(sig, callees, j, 1, minworld, &matches); + sig = NULL; + j += 1; } - if (child_max_valid == 0) { - // found what we were looking for, so terminate early - break; + else if (jl_is_long(edge)) { + jl_value_t *sig = jl_svecref(callees, j + 1); + size_t nedges = jl_unbox_long(edge); + max_valid2 = verify_call(sig, callees, j + 2, nedges, minworld, &matches); + j += 2 + nedges; + edge = sig; } - else if (child_cycle && child_cycle < cycle) { - // record the cycle will resolve at depth "cycle" - cycle = child_cycle; + else if (jl_is_mtable(edge)) { + // skip the legacy edge (missing backedge) + j += 2; + continue; + } + else { + jl_method_instance_t *callee = (jl_method_instance_t*)jl_svecref(callees, j + 1); + jl_method_t *meth; + if (jl_is_code_instance(callee)) + callee = ((jl_code_instance_t*)callee)->def; + if (jl_is_method_instance(callee)) + meth = callee->def.method; + else { + assert(jl_is_method(callee)); + meth = (jl_method_t*)callee; + } + max_valid2 = verify_invokesig(edge, meth, minworld); + j += 2; + } + if (*maxworld > max_valid2) + *maxworld = max_valid2; + if (max_valid2 != ~(size_t)0 && _jl_debug_method_invalidation) { + jl_array_ptr_1d_push(_jl_debug_method_invalidation, edge); + loctag = jl_cstr_to_string("insert_backedges_callee"); + jl_array_ptr_1d_push(_jl_debug_method_invalidation, loctag); + jl_array_ptr_1d_push(_jl_debug_method_invalidation, (jl_value_t*)codeinst); + jl_array_ptr_1d_push(_jl_debug_method_invalidation, matches); + } + //jl_static_show((JL_STREAM*)ios_stderr, (jl_value_t*)edge); + //ios_puts(max_valid2 == ~(size_t)0 ? "valid\n" : "INVALID\n", ios_stderr); + if (max_valid2 == 0 && !_jl_debug_method_invalidation) + break; + } + JL_GC_POP(); + // verify recursive edges (if valid, or debugging) + size_t cycle = depth; + jl_code_instance_t *cause = codeinst; + if (*maxworld == ~(size_t)0 || _jl_debug_method_invalidation) { + for (size_t j = 0; j < jl_svec_len(callees); j++) { + jl_value_t *edge = jl_svecref(callees, j); + if (!jl_is_code_instance(edge)) + continue; + jl_code_instance_t *callee = (jl_code_instance_t*)edge; + size_t max_valid2 = ~(size_t)0; + size_t child_cycle = jl_verify_method(callee, minworld, &max_valid2, stack, visiting); + if (*maxworld > max_valid2) { + cause = callee; + *maxworld = max_valid2; + } + if (max_valid2 == 0) { + // found what we were looking for, so terminate early + break; + } + else if (child_cycle && child_cycle < cycle) { + // record the cycle will resolve at depth "cycle" + cycle = child_cycle; + } } } - size_t max_valid = maxvalids2_data[idx]; - if (max_valid != 0 && cycle != depth) + if (*maxworld != 0 && cycle != depth) return cycle; // If we are the top of the current cycle, now mark all other parts of // our cycle with what we found. // Or if we found a failed edge, also mark all of the other parts of the - // cycle as also having an failed edge. + // cycle as also having a failed edge. while (stack->len >= depth) { - size_t childidx = (size_t)arraylist_pop(stack); - assert(visited->items[childidx] == (void*)(2 + stack->len)); - if (idx != childidx) { - if (max_valid < maxvalids2_data[childidx]) - maxvalids2_data[childidx] = max_valid; - } - visited->items[childidx] = (void*)1; - if (_jl_debug_method_invalidation && max_valid != ~(size_t)0) { - jl_method_instance_t *mi = (jl_method_instance_t*)jl_array_ptr_ref(edges, childidx * 2); - jl_value_t *loctag = NULL; - JL_GC_PUSH1(&loctag); - jl_array_ptr_1d_push(_jl_debug_method_invalidation, (jl_value_t*)mi); + jl_code_instance_t *child = (jl_code_instance_t*)arraylist_pop(stack); + if (*maxworld != jl_atomic_load_relaxed(&child->max_world)) + jl_atomic_store_relaxed(&child->max_world, *maxworld); + void **bp = ptrhash_bp(visiting, codeinst); + assert(*bp == (char*)HT_NOTFOUND + stack->len + 1); + *bp = HT_NOTFOUND; + if (_jl_debug_method_invalidation && *maxworld != ~(size_t)0) { + jl_array_ptr_1d_push(_jl_debug_method_invalidation, (jl_value_t*)child); loctag = jl_cstr_to_string("verify_methods"); + JL_GC_PUSH1(&loctag); jl_array_ptr_1d_push(_jl_debug_method_invalidation, loctag); jl_array_ptr_1d_push(_jl_debug_method_invalidation, (jl_value_t*)cause); JL_GC_POP(); } } + //jl_static_show((JL_STREAM*)ios_stderr, (jl_value_t*)codeinst->def); + //ios_puts(max_valid == ~(size_t)0 ? "valid\n\n" : "INVALID\n\n", ios_stderr); return 0; } -// Visit all entries in edges, verify if they are valid -static void jl_verify_graph(jl_array_t *edges, jl_array_t *maxvalids2) +static size_t jl_verify_method_graph(jl_code_instance_t *codeinst, size_t minworld, arraylist_t *stack, htable_t *visiting) { - JL_TIMING(VERIFY_IMAGE, VERIFY_Graph); - arraylist_t stack, visited; - arraylist_new(&stack, 0); - size_t i, n = jl_array_nrows(edges) / 2; - arraylist_new(&visited, n); - memset(visited.items, 0, n * sizeof(size_t)); - size_t *maxvalids2_data = jl_array_data(maxvalids2, size_t); - for (i = 0; i < n; i++) { - assert(visited.items[i] == (void*)0 || visited.items[i] == (void*)1); - int child_cycle = jl_verify_graph_edge(maxvalids2_data, edges, i, &visited, &stack); - assert(child_cycle == 0); (void)child_cycle; - assert(stack.len == 0); - assert(visited.items[i] == (void*)1); + assert(stack->len == 0); + for (size_t i = 0, hsz = visiting->size; i < hsz; i++) + assert(visiting->table[i] == HT_NOTFOUND); + size_t maxworld = ~(size_t)0; + int child_cycle = jl_verify_method(codeinst, minworld, &maxworld, stack, visiting); + assert(child_cycle == 0); (void)child_cycle; + assert(stack->len == 0); + for (size_t i = 0, hsz = visiting->size / 2; i < hsz; i++) { + assert(visiting->table[2 * i + 1] == HT_NOTFOUND); + visiting->table[2 * i] = HT_NOTFOUND; } - arraylist_free(&stack); - arraylist_free(&visited); + return maxworld; } // Restore backedges to external targets -// `edges` = [caller1, targets_indexes1, ...], the list of worklist-owned methods calling external methods. -// `ext_targets` is [invokesig1, callee1, matches1, ...], the global set of non-worklist callees of worklist-owned methods. -static void jl_insert_backedges(jl_array_t *edges, jl_array_t *ext_targets, jl_array_t *ext_ci_list, size_t minworld) +// `edges` = [caller1, ...], the list of worklist-owned code instances internally +// `ext_ci_list` = [caller1, ...], the list of worklist-owned code instances externally +static void jl_insert_backedges(jl_array_t *edges, jl_array_t *ext_ci_list, size_t minworld) { // determine which CodeInstance objects are still valid in our image - jl_array_t *valids = jl_verify_edges(ext_targets, minworld); - JL_GC_PUSH1(&valids); - valids = jl_verify_methods(edges, valids); // consumes edges valids, initializes methods valids - jl_verify_graph(edges, valids); // propagates methods valids for each edge - - size_t n_ext_cis = ext_ci_list ? jl_array_nrows(ext_ci_list) : 0; - htable_t cis_pending_validation; - htable_new(&cis_pending_validation, n_ext_cis); - - // next build a map from external MethodInstances to their CodeInstance for insertion - for (size_t i = 0; i < n_ext_cis; i++) { - jl_code_instance_t *ci = (jl_code_instance_t*)jl_array_ptr_ref(ext_ci_list, i); - if (jl_atomic_load_relaxed(&ci->max_world) == WORLD_AGE_REVALIDATION_SENTINEL) { - assert(jl_atomic_load_relaxed(&ci->min_world) == minworld); - void **bp = ptrhash_bp(&cis_pending_validation, (void*)ci->def); - assert(!jl_atomic_load_relaxed(&ci->next)); - if (*bp == HT_NOTFOUND) - *bp = (void*)ci; - else { - // Do ci->owner bifurcates the cache, we temporarily - // form a linked list of all the CI that need to be connected later - jl_code_instance_t *prev_ci = (jl_code_instance_t *)*bp; - jl_atomic_store_relaxed(&ci->next, prev_ci); - *bp = (void*)ci; - } - } - else { - assert(jl_atomic_load_relaxed(&ci->min_world) == 1); - assert(jl_atomic_load_relaxed(&ci->max_world) == ~(size_t)0); - jl_method_instance_t *caller = ci->def; - if (jl_atomic_load_relaxed(&ci->inferred) && jl_rettype_inferred(ci->owner, caller, minworld, ~(size_t)0) == jl_nothing) { - jl_mi_cache_insert(caller, ci); - } - //jl_static_show((JL_STREAM*)ios_stderr, (jl_value_t*)caller); - //ios_puts("free\n", ios_stderr); - } - } - - // next enable any applicable new codes - size_t nedges = jl_array_nrows(edges) / 2; - for (size_t i = 0; i < nedges; i++) { - jl_method_instance_t *caller = (jl_method_instance_t*)jl_array_ptr_ref(edges, 2 * i); - size_t maxvalid = jl_array_data(valids, size_t)[i]; - if (maxvalid == ~(size_t)0) { - // if this callee is still valid, add all the backedges - jl_array_t *callee_ids = (jl_array_t*)jl_array_ptr_ref(edges, 2 * i + 1); - int32_t *idxs = jl_array_data(callee_ids, int32_t); - for (size_t j = 0; j < idxs[0]; j++) { - int32_t idx = idxs[j + 1]; - jl_value_t *invokesig = jl_array_ptr_ref(ext_targets, idx * 3); - jl_value_t *callee = jl_array_ptr_ref(ext_targets, idx * 3 + 1); - if (callee && jl_is_method_instance(callee)) { - jl_method_instance_add_backedge((jl_method_instance_t*)callee, invokesig, caller); + // to enable any applicable new codes + arraylist_t stack; + arraylist_new(&stack, 0); + htable_t visiting; + htable_new(&visiting, 0); + for (size_t external = 0; external < (ext_ci_list ? 2 : 1); external++) { + if (external) + edges = ext_ci_list; + size_t nedges = jl_array_nrows(edges); + for (size_t i = 0; i < nedges; i++) { + jl_code_instance_t *codeinst = (jl_code_instance_t*)jl_array_ptr_ref(edges, i); + jl_svec_t *callees = jl_atomic_load_relaxed(&codeinst->edges); + jl_method_instance_t *caller = codeinst->def; + if (jl_atomic_load_relaxed(&codeinst->min_world) != minworld) { + if (external && jl_atomic_load_relaxed(&codeinst->max_world) != WORLD_AGE_REVALIDATION_SENTINEL) { + assert(jl_atomic_load_relaxed(&codeinst->min_world) == 1); + assert(jl_atomic_load_relaxed(&codeinst->max_world) == ~(size_t)0); } else { - jl_value_t *sig = callee == NULL ? invokesig : callee; - jl_methtable_t *mt = jl_method_table_for(sig); - // FIXME: rarely, `callee` has an unexpected `Union` signature, - // see https://github.com/JuliaLang/julia/pull/43990#issuecomment-1030329344 - // Fix the issue and turn this back into an `assert((jl_value_t*)mt != jl_nothing)` - // This workaround exposes us to (rare) 265-violations. - if ((jl_value_t*)mt != jl_nothing) - jl_method_table_add_backedge(mt, sig, (jl_value_t*)caller); + continue; } } - } - // then enable any methods associated with it - void *ci = ptrhash_get(&cis_pending_validation, (void*)caller); - //assert(ci != HT_NOTFOUND); - if (ci != HT_NOTFOUND) { - // Update any external CIs and add them to the cache. - assert(jl_is_code_instance(ci)); - jl_code_instance_t *codeinst = (jl_code_instance_t*)ci; - while (codeinst) { - jl_code_instance_t *next_ci = jl_atomic_load_relaxed(&codeinst->next); - jl_atomic_store_relaxed(&codeinst->next, NULL); - - jl_value_t *owner = codeinst->owner; - JL_GC_PROMISE_ROOTED(owner); - - assert(jl_atomic_load_relaxed(&codeinst->min_world) == minworld); - // See #53586, #53109 - // assert(jl_atomic_load_relaxed(&codeinst->max_world) == WORLD_AGE_REVALIDATION_SENTINEL); - assert(jl_atomic_load_relaxed(&codeinst->inferred)); - jl_atomic_store_relaxed(&codeinst->max_world, maxvalid); - - if (jl_rettype_inferred(owner, caller, minworld, maxvalid) != jl_nothing) { - // We already got a code instance for this world age range from somewhere else - we don't need - // this one. - } else { - jl_mi_cache_insert(caller, codeinst); + size_t maxvalid = jl_verify_method_graph(codeinst, minworld, &stack, &visiting); + assert(jl_atomic_load_relaxed(&codeinst->max_world) == maxvalid); + if (maxvalid == ~(size_t)0) { + // if this callee is still valid, add all the backedges + for (size_t j = 0; j < jl_svec_len(callees); ) { + jl_value_t *edge = jl_svecref(callees, j); + if (jl_is_long(edge)) { + j += 2; // skip over signature and count but not methods + continue; + } + else if (jl_is_method(edge)) { + j += 1; + continue; + } + if (jl_is_code_instance(edge)) + edge = (jl_value_t*)((jl_code_instance_t*)edge)->def; + if (jl_is_method_instance(edge)) { + jl_method_instance_add_backedge((jl_method_instance_t*)edge, NULL, codeinst); + j += 1; + } + else if (jl_is_mtable(edge)) { + jl_methtable_t *mt = (jl_methtable_t*)edge; + jl_value_t *sig = jl_svecref(callees, j + 1); + jl_method_table_add_backedge(mt, sig, codeinst); + j += 2; + } + else { + jl_value_t *callee = jl_svecref(callees, j + 1); + if (jl_is_code_instance(callee)) + callee = (jl_value_t*)((jl_code_instance_t*)callee)->def; + else if (jl_is_method(callee)) { + j += 2; + continue; + } + jl_method_instance_add_backedge((jl_method_instance_t*)callee, edge, codeinst); + j += 2; + } } - codeinst = next_ci; - } - } - else { - // Likely internal. Find the CI already in the cache hierarchy. - for (jl_code_instance_t *codeinst = jl_atomic_load_relaxed(&caller->cache); codeinst; codeinst = jl_atomic_load_relaxed(&codeinst->next)) { - if (jl_atomic_load_relaxed(&codeinst->min_world) == minworld && jl_atomic_load_relaxed(&codeinst->max_world) == WORLD_AGE_REVALIDATION_SENTINEL) { - jl_atomic_store_relaxed(&codeinst->max_world, maxvalid); + if (external) { + jl_value_t *owner = codeinst->owner; + JL_GC_PROMISE_ROOTED(owner); + + // See #53586, #53109 + assert(jl_atomic_load_relaxed(&codeinst->inferred)); + + if (jl_rettype_inferred(owner, caller, minworld, maxvalid) != jl_nothing) { + // We already got a code instance for this world age range from somewhere else - we don't need + // this one. + } + else { + jl_mi_cache_insert(caller, codeinst); + } } } } } - htable_free(&cis_pending_validation); - - JL_GC_POP(); -} -static void classify_callers(htable_t *callers_with_edges, jl_array_t *edges) -{ - size_t l = edges ? jl_array_nrows(edges) / 2 : 0; - for (size_t i = 0; i < l; i++) { - jl_method_instance_t *caller = (jl_method_instance_t*)jl_array_ptr_ref(edges, 2 * i); - ptrhash_put(callers_with_edges, (void*)caller, (void*)caller); - } + htable_free(&visiting); + arraylist_free(&stack); } static jl_value_t *read_verify_mod_list(ios_t *s, jl_array_t *depmods) diff --git a/src/toplevel.c b/src/toplevel.c index 6dcab3095e320..017d61bbc8ceb 100644 --- a/src/toplevel.c +++ b/src/toplevel.c @@ -640,7 +640,7 @@ JL_DLLEXPORT jl_code_instance_t *jl_new_codeinst_for_uninferred(jl_method_instan jl_code_instance_t *ci = jl_new_codeinst(mi, (jl_value_t*)jl_uninferred_sym, (jl_value_t*)jl_any_type, (jl_value_t*)jl_any_type, jl_nothing, (jl_value_t*)src, 0, src->min_world, src->max_world, - 0, NULL, 1, NULL); + 0, NULL, 1, NULL, NULL); return ci; } diff --git a/stdlib/REPL/src/REPLCompletions.jl b/stdlib/REPL/src/REPLCompletions.jl index d59a18e6d4f16..67191a024da73 100644 --- a/stdlib/REPL/src/REPLCompletions.jl +++ b/stdlib/REPL/src/REPLCompletions.jl @@ -588,7 +588,7 @@ CC.cache_owner(::REPLInterpreter) = REPLCacheToken() CC.may_optimize(::REPLInterpreter) = false # REPLInterpreter doesn't need any sources to be cached, so discard them aggressively -CC.transform_result_for_cache(::REPLInterpreter, ::Core.MethodInstance, ::CC.WorldRange, ::CC.InferenceResult) = nothing +CC.transform_result_for_cache(::REPLInterpreter, ::CC.InferenceResult) = nothing # REPLInterpreter analyzes a top-level frame, so better to not bail out from it CC.bail_out_toplevel_call(::REPLInterpreter, ::CC.InferenceLoopState, ::CC.InferenceState) = false @@ -673,8 +673,8 @@ function CC.concrete_eval_eligible(interp::REPLInterpreter, @nospecialize(f), sv::CC.InferenceState) if (interp.limit_aggressive_inference ? is_repl_frame(sv) : is_call_graph_uncached(sv)) neweffects = CC.Effects(result.effects; consistent=CC.ALWAYS_TRUE) - result = CC.MethodCallResult(result.rt, result.exct, result.edgecycle, result.edgelimited, - result.edge, neweffects) + result = CC.MethodCallResult(result.rt, result.exct, neweffects, result.edge, + result.edgecycle, result.edgelimited, result.volatile_inf_result) end ret = @invoke CC.concrete_eval_eligible(interp::CC.AbstractInterpreter, f::Any, result::CC.MethodCallResult, arginfo::CC.ArgInfo, diff --git a/test/compiler/AbstractInterpreter.jl b/test/compiler/AbstractInterpreter.jl index 009128b289ade..a49647ad4ea43 100644 --- a/test/compiler/AbstractInterpreter.jl +++ b/test/compiler/AbstractInterpreter.jl @@ -15,7 +15,7 @@ CC.may_optimize(::AbsIntOnlyInterp1) = false # it should work even if the interpreter discards inferred source entirely @newinterp AbsIntOnlyInterp2 CC.may_optimize(::AbsIntOnlyInterp2) = false -CC.transform_result_for_cache(::AbsIntOnlyInterp2, ::Core.MethodInstance, ::CC.WorldRange, ::CC.InferenceResult) = nothing +CC.transform_result_for_cache(::AbsIntOnlyInterp2, ::CC.InferenceResult) = nothing @test Base.infer_return_type(Base.init_stdio, (Ptr{Cvoid},); interp=AbsIntOnlyInterp2()) >: IO # OverlayMethodTable @@ -406,10 +406,10 @@ import .CC: CallInfo struct NoinlineCallInfo <: CallInfo info::CallInfo # wrapped call end +CC.add_edges_impl(edges::Vector{Any}, info::NoinlineCallInfo) = CC.add_edges!(edges, info.info) CC.nsplit_impl(info::NoinlineCallInfo) = CC.nsplit(info.info) CC.getsplit_impl(info::NoinlineCallInfo, idx::Int) = CC.getsplit(info.info, idx) CC.getresult_impl(info::NoinlineCallInfo, idx::Int) = CC.getresult(info.info, idx) -CC.add_uncovered_edges_impl(edges::Vector{Any}, info::NoinlineCallInfo, @nospecialize(atype)) = CC.add_uncovered_edges!(edges, info.info, atype) function CC.abstract_call(interp::NoinlineInterpreter, arginfo::CC.ArgInfo, si::CC.StmtInfo, sv::CC.InferenceState, max_methods::Int) @@ -497,10 +497,9 @@ struct CustomData inferred CustomData(@nospecialize inferred) = new(inferred) end -function CC.transform_result_for_cache(interp::CustomDataInterp, - mi::Core.MethodInstance, valid_worlds::CC.WorldRange, result::CC.InferenceResult) - inferred_result = @invoke CC.transform_result_for_cache(interp::CC.AbstractInterpreter, - mi::Core.MethodInstance, valid_worlds::CC.WorldRange, result::CC.InferenceResult) +function CC.transform_result_for_cache(interp::CustomDataInterp, result::CC.InferenceResult) + inferred_result = @invoke CC.transform_result_for_cache( + interp::CC.AbstractInterpreter, result::CC.InferenceResult) return CustomData(inferred_result) end function CC.src_inlining_policy(interp::CustomDataInterp, @nospecialize(src), diff --git a/test/compiler/EscapeAnalysis/EAUtils.jl b/test/compiler/EscapeAnalysis/EAUtils.jl index 1f0a84f1a8365..c71b821fd25f3 100644 --- a/test/compiler/EscapeAnalysis/EAUtils.jl +++ b/test/compiler/EscapeAnalysis/EAUtils.jl @@ -11,9 +11,8 @@ const EA = EscapeAnalysis # imports import .CC: - AbstractInterpreter, NativeInterpreter, WorldView, WorldRange, - InferenceParams, OptimizationParams, get_world_counter, get_inference_cache, - ipo_dataflow_analysis!, cache_result! + AbstractInterpreter, NativeInterpreter, WorldView, WorldRange, InferenceParams, + OptimizationParams, get_world_counter, get_inference_cache, ipo_dataflow_analysis! # usings using Core: CodeInstance, MethodInstance, CodeInfo diff --git a/test/compiler/contextual.jl b/test/compiler/contextual.jl index fc91a37c5bd9e..8d526fdefdc5b 100644 --- a/test/compiler/contextual.jl +++ b/test/compiler/contextual.jl @@ -88,8 +88,8 @@ module MiniCassette src = retrieve_code_info(mi, world) @assert isa(src, CodeInfo) src = copy(src) - @assert src.edges === nothing - src.edges = MethodInstance[mi] + @assert src.edges === Core.svec() + src.edges = Any[mi] transform!(mi, src, length(args), match.sparams) # TODO: this is mandatory: code_info.min_world = max(code_info.min_world, min_world[]) # TODO: this is mandatory: code_info.max_world = min(code_info.max_world, max_world[]) diff --git a/test/compiler/invalidation.jl b/test/compiler/invalidation.jl index 76cf3cbdc0796..55faa4287da24 100644 --- a/test/compiler/invalidation.jl +++ b/test/compiler/invalidation.jl @@ -95,7 +95,8 @@ end const GLOBAL_BUFFER = IOBuffer() # test backedge optimization when the callee's type and effects information are maximized -begin take!(GLOBAL_BUFFER) +begin + take!(GLOBAL_BUFFER) pr48932_callee(x) = (print(GLOBAL_BUFFER, x); Base.inferencebarrier(x)) pr48932_caller(x) = pr48932_callee(Base.inferencebarrier(x)) @@ -150,11 +151,11 @@ begin take!(GLOBAL_BUFFER) ci = mi.cache @test isdefined(ci, :next) @test ci.owner === nothing - @test ci.max_world == typemax(UInt) + @test_broken ci.max_world == typemax(UInt) ci = ci.next @test !isdefined(ci, :next) @test ci.owner === InvalidationTesterToken() - @test ci.max_world == typemax(UInt) + @test_broken ci.max_world == typemax(UInt) end @test isnothing(pr48932_caller(42)) @@ -213,11 +214,11 @@ begin take!(GLOBAL_BUFFER) ci = mi.cache @test isdefined(ci, :next) @test ci.owner === nothing - @test ci.max_world == typemax(UInt) + @test_broken ci.max_world == typemax(UInt) ci = ci.next @test !isdefined(ci, :next) @test ci.owner === InvalidationTesterToken() - @test ci.max_world == typemax(UInt) + @test_broken ci.max_world == typemax(UInt) end @test isnothing(pr48932_caller_unuse(42)) @test "foo" == String(take!(GLOBAL_BUFFER)) diff --git a/test/core.jl b/test/core.jl index 5ba0e99e730d4..4b5a674ba44b3 100644 --- a/test/core.jl +++ b/test/core.jl @@ -32,7 +32,7 @@ end # sanity tests that our built-in types are marked correctly for atomic fields for (T, c) in ( (Core.CodeInfo, []), - (Core.CodeInstance, [:next, :min_world, :max_world, :inferred, :debuginfo, :ipo_purity_bits, :invoke, :specptr, :specsigflags, :precompile]), + (Core.CodeInstance, [:next, :min_world, :max_world, :inferred, :edges, :debuginfo, :ipo_purity_bits, :invoke, :specptr, :specsigflags, :precompile]), (Core.Method, [:primary_world, :deleted_world]), (Core.MethodInstance, [:cache, :flags]), (Core.MethodTable, [:defs, :leafcache, :cache, :max_args]), diff --git a/test/precompile.jl b/test/precompile.jl index adf10363298ba..1607d4c6b502b 100644 --- a/test/precompile.jl +++ b/test/precompile.jl @@ -823,6 +823,7 @@ precompile_test_harness("code caching") do dir mispecs = minternal.specializations::Core.SimpleVector @test mispecs[1] === mi mi = mispecs[2]::Core.MethodInstance + mi.specTypes == Tuple{typeof(M.getelsize),Vector{M.X2}} ci = mi.cache @test ci.relocatability == 0 # PkgA loads PkgB, and both add roots to the same `push!` method (both before and after loading B) @@ -914,7 +915,7 @@ precompile_test_harness("code caching") do dir # external callers mods = Module[] for be in mi.backedges - push!(mods, be.def.module) + push!(mods, (be.def.def::Method).module) # XXX end @test MA ∈ mods @test MB ∈ mods @@ -923,7 +924,7 @@ precompile_test_harness("code caching") do dir # internal callers meths = Method[] for be in mi.backedges - push!(meths, be.def) + push!(meths, (be.def::Method).def) # XXX end @test which(M.g1, ()) ∈ meths @test which(M.g2, ()) ∈ meths @@ -1056,11 +1057,11 @@ precompile_test_harness("code caching") do dir idxs = findall(==("verify_methods"), invalidations) idxsbits = filter(idxs) do i mi = invalidations[i-1] - mi.def == m + mi.def.def === m end idx = only(idxsbits) tagbad = invalidations[idx+1] - @test isa(tagbad, Int32) + @test isa(tagbad, Core.CodeInstance) j = findfirst(==(tagbad), invalidations) @test invalidations[j-1] == "insert_backedges_callee" @test isa(invalidations[j-2], Type) @@ -1068,7 +1069,7 @@ precompile_test_harness("code caching") do dir m = only(methods(MB.useA2)) mi = only(Base.specializations(m)) @test !hasvalid(mi, world) - @test mi ∈ invalidations + @test any(x -> x isa Core.CodeInstance && x.def === mi, invalidations) m = only(methods(MB.map_nbits)) @test !hasvalid(m.specializations::Core.MethodInstance, world+1) # insert_backedges invalidations also trigger their backedges @@ -1178,12 +1179,7 @@ precompile_test_harness("invoke") do dir @eval using $CallerModule M = getfield(@__MODULE__, CallerModule) - function get_method_for_type(func, @nospecialize(T)) # return the method func(::T) - for m in methods(func) - m.sig.parameters[end] === T && return m - end - error("no ::Real method found for $func") - end + get_method_for_type(func, @nospecialize(T)) = which(func, (T,)) # return the method func(::T) function nvalid(mi::Core.MethodInstance) isdefined(mi, :cache) || return 0 ci = mi.cache @@ -1200,7 +1196,7 @@ precompile_test_harness("invoke") do dir mi = m.specializations::Core.MethodInstance @test length(mi.backedges) == 2 @test mi.backedges[1] === Tuple{typeof(func), Real} - @test isa(mi.backedges[2], Core.MethodInstance) + @test isa(mi.backedges[2], Core.CodeInstance) @test mi.cache.max_world == typemax(mi.cache.max_world) end for func in (M.q, M.qnc) @@ -1208,18 +1204,18 @@ precompile_test_harness("invoke") do dir mi = m.specializations::Core.MethodInstance @test length(mi.backedges) == 2 @test mi.backedges[1] === Tuple{typeof(func), Integer} - @test isa(mi.backedges[2], Core.MethodInstance) + @test isa(mi.backedges[2], Core.CodeInstance) @test mi.cache.max_world == typemax(mi.cache.max_world) end m = get_method_for_type(M.h, Real) - @test isempty(Base.specializations(m)) + @test nvalid(m.specializations::Core.MethodInstance) == 0 m = get_method_for_type(M.hnc, Real) - @test isempty(Base.specializations(m)) + @test nvalid(m.specializations::Core.MethodInstance) == 0 m = only(methods(M.callq)) - @test isempty(Base.specializations(m)) || nvalid(m.specializations::Core.MethodInstance) == 0 + @test nvalid(m.specializations::Core.MethodInstance) == 0 m = only(methods(M.callqnc)) - @test isempty(Base.specializations(m)) || nvalid(m.specializations::Core.MethodInstance) == 0 + @test nvalid(m.specializations::Core.MethodInstance) == 0 m = only(methods(M.callqi)) @test (m.specializations::Core.MethodInstance).specTypes == Tuple{typeof(M.callqi), Int} m = only(methods(M.callqnci)) @@ -1733,7 +1729,7 @@ precompile_test_harness("issue #46296") do load_path mi = first(Base.specializations(first(methods(identity)))) ci = Core.CodeInstance(mi, nothing, Any, Any, nothing, nothing, zero(Int32), typemin(UInt), typemax(UInt), zero(UInt32), nothing, 0x00, - Core.DebugInfo(mi)) + Core.DebugInfo(mi), Core.svec()) __init__() = @assert ci isa Core.CodeInstance diff --git a/test/precompile_absint1.jl b/test/precompile_absint1.jl index 7bc0382ffda85..ab36af163dc50 100644 --- a/test/precompile_absint1.jl +++ b/test/precompile_absint1.jl @@ -41,29 +41,33 @@ precompile_test_harness() do load_path let m = only(methods(TestAbsIntPrecompile1.basic_callee)) mi = only(Base.specializations(m)) ci = mi.cache - @test isdefined(ci, :next) + @test_broken isdefined(ci, :next) @test ci.owner === nothing @test ci.max_world == typemax(UInt) @test Base.module_build_id(TestAbsIntPrecompile1) == Base.object_build_id(ci) + @test_skip begin ci = ci.next @test !isdefined(ci, :next) @test ci.owner === cache_owner @test ci.max_world == typemax(UInt) @test Base.module_build_id(TestAbsIntPrecompile1) == Base.object_build_id(ci) + end end let m = only(methods(sum, (Vector{Float64},))) found = false for mi in Base.specializations(m) if mi isa Core.MethodInstance && mi.specTypes == Tuple{typeof(sum),Vector{Float64}} ci = mi.cache - @test isdefined(ci, :next) - @test ci.owner === cache_owner + @test_broken isdefined(ci, :next) + @test_broken ci.owner === cache_owner + @test_skip begin @test ci.max_world == typemax(UInt) @test Base.module_build_id(TestAbsIntPrecompile1) == Base.object_build_id(ci) ci = ci.next + end @test !isdefined(ci, :next) @test ci.owner === nothing @test ci.max_world == typemax(UInt) diff --git a/test/precompile_absint2.jl b/test/precompile_absint2.jl index 066dcbaece4c4..75b84e26e06c6 100644 --- a/test/precompile_absint2.jl +++ b/test/precompile_absint2.jl @@ -22,10 +22,9 @@ precompile_test_harness() do load_path inferred CustomData(@nospecialize inferred) = new(inferred) end - function CC.transform_result_for_cache(interp::PrecompileInterpreter, - mi::Core.MethodInstance, valid_worlds::CC.WorldRange, result::CC.InferenceResult) - inferred_result = @invoke CC.transform_result_for_cache(interp::CC.AbstractInterpreter, - mi::Core.MethodInstance, valid_worlds::CC.WorldRange, result::CC.InferenceResult) + function CC.transform_result_for_cache(interp::PrecompileInterpreter, result::CC.InferenceResult) + inferred_result = @invoke CC.transform_result_for_cache( + interp::CC.AbstractInterpreter, result::CC.InferenceResult) return CustomData(inferred_result) end function CC.src_inlining_policy(interp::PrecompileInterpreter, @nospecialize(src), @@ -64,29 +63,33 @@ precompile_test_harness() do load_path let m = only(methods(TestAbsIntPrecompile2.basic_callee)) mi = only(Base.specializations(m)) ci = mi.cache - @test isdefined(ci, :next) + @test_broken isdefined(ci, :next) @test ci.owner === nothing @test ci.max_world == typemax(UInt) @test Base.module_build_id(TestAbsIntPrecompile2) == Base.object_build_id(ci) + @test_skip begin ci = ci.next @test !isdefined(ci, :next) @test ci.owner === cache_owner @test ci.max_world == typemax(UInt) @test Base.module_build_id(TestAbsIntPrecompile2) == Base.object_build_id(ci) + end end let m = only(methods(sum, (Vector{Float64},))) found = false for mi = Base.specializations(m) if mi isa Core.MethodInstance && mi.specTypes == Tuple{typeof(sum),Vector{Float64}} ci = mi.cache - @test isdefined(ci, :next) - @test ci.owner === cache_owner + @test_broken isdefined(ci, :next) + @test_broken ci.owner === cache_owner + @test_skip begin @test ci.max_world == typemax(UInt) @test Base.module_build_id(TestAbsIntPrecompile2) == Base.object_build_id(ci) ci = ci.next + end @test !isdefined(ci, :next) @test ci.owner === nothing @test ci.max_world == typemax(UInt) diff --git a/test/stacktraces.jl b/test/stacktraces.jl index bc86479dbab4b..12da6d571013e 100644 --- a/test/stacktraces.jl +++ b/test/stacktraces.jl @@ -103,10 +103,7 @@ end end let src = Meta.lower(Main, quote let x = 1 end end).args[1]::Core.CodeInfo - li = ccall(:jl_new_method_instance_uninit, Ref{Core.MethodInstance}, ()) - @atomic li.cache = ccall(:jl_new_codeinst_for_uninferred, Ref{Core.CodeInstance}, (Any, Any), li, src) - li.specTypes = Tuple{} - li.def = @__MODULE__ + li = ccall(:jl_method_instance_for_thunk, Ref{Core.MethodInstance}, (Any, Any), src, @__MODULE__) sf = StackFrame(:a, :b, 3, li, false, false, 0) repr = string(sf) @test repr == "Toplevel MethodInstance thunk at b:3" From 85dc2c72110e44f68f483c2b7b2e4dc68c73a143 Mon Sep 17 00:00:00 2001 From: CY Han Date: Fri, 1 Nov 2024 22:20:10 +0800 Subject: [PATCH 348/537] docs: remove `dirname.c` from THIRDPARTY file (#56413) - `dirname.c` was removed by https://github.com/JuliaLang/julia/commit/c2cec7ad57102e4fbb733b8fb79d617a9524f0ae --- THIRDPARTY.md | 1 - 1 file changed, 1 deletion(-) diff --git a/THIRDPARTY.md b/THIRDPARTY.md index 30f53727c50ab..3a74afec4a283 100644 --- a/THIRDPARTY.md +++ b/THIRDPARTY.md @@ -6,7 +6,6 @@ for exceptions. - [crc32c.c](https://stackoverflow.com/questions/17645167/implementing-sse-4-2s-crc32c-in-software) (CRC-32c checksum code by Mark Adler) [[ZLib](https://opensource.org/licenses/Zlib)]. - [LDC](https://github.com/ldc-developers/ldc/blob/master/LICENSE) (for ccall/cfunction ABI definitions) [BSD-3]. The portion of code that Julia uses from LDC is [BSD-3] licensed. - [LLVM](https://releases.llvm.org/3.9.0/LICENSE.TXT) (for parts of src/disasm.cpp) [UIUC] -- [MINGW](https://sourceforge.net/p/mingw/mingw-org-wsl/ci/legacy/tree/mingwrt/mingwex/dirname.c) (for dirname implementation on Windows) [MIT] - [NetBSD](https://www.netbsd.org/about/redistribution.html) (for setjmp, longjmp, and strptime implementations on Windows) [BSD-3] - [Python](https://docs.python.org/3/license.html) (for strtod implementation on Windows) [PSF] - [FEMTOLISP](https://github.com/JeffBezanson/femtolisp) [BSD-3] From 7d81897aef774ce953ed67a9951ae1a051d38544 Mon Sep 17 00:00:00 2001 From: Cody Tapscott <84105208+topolarity@users.noreply.github.com> Date: Fri, 1 Nov 2024 10:57:23 -0400 Subject: [PATCH 349/537] =?UTF-8?q?Allow=20ext=20=E2=86=92=20ext=20depende?= =?UTF-8?q?ncy=20if=20triggers=20are=20a=20strict=20superset=20(#56368)=20?= =?UTF-8?q?(#56402)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Forward port of #56368 - this was a pretty clean port, so it should be good to go once tests pass. --- base/loading.jl | 75 ++++++++++--------- base/precompilation.jl | 31 +++++--- test/loading.jl | 68 +++++++++++++++++ .../Manifest.toml | 32 ++++++++ .../Project.toml | 12 +++ .../ext/ExtAB.jl | 12 +++ .../src/CrossPackageExtToExtDependency.jl | 7 ++ .../Extensions/CyclicExtensions/Manifest.toml | 7 +- .../Extensions/EnvWithDeps/Manifest.toml | 7 +- .../EnvWithHasExtensions/Manifest.toml | 7 +- .../EnvWithHasExtensionsv2/Manifest.toml | 7 +- .../project/Extensions/ExtDep.jl/Project.toml | 1 + .../Extensions/ExtDep.jl/src/ExtDep.jl | 1 + .../ExtToExtDependency/Manifest.toml | 21 ++++++ .../ExtToExtDependency/Project.toml | 14 ++++ .../Extensions/ExtToExtDependency/ext/ExtA.jl | 6 ++ .../ExtToExtDependency/ext/ExtAB.jl | 12 +++ .../src/ExtToExtDependency.jl | 7 ++ .../HasDepWithExtensions.jl/Manifest.toml | 7 +- .../Extensions/SomeOtherPackage/Project.toml | 4 + .../SomeOtherPackage/src/SomeOtherPackage.jl | 5 ++ 21 files changed, 293 insertions(+), 50 deletions(-) create mode 100644 test/project/Extensions/CrossPackageExtToExtDependency/Manifest.toml create mode 100644 test/project/Extensions/CrossPackageExtToExtDependency/Project.toml create mode 100644 test/project/Extensions/CrossPackageExtToExtDependency/ext/ExtAB.jl create mode 100644 test/project/Extensions/CrossPackageExtToExtDependency/src/CrossPackageExtToExtDependency.jl create mode 100644 test/project/Extensions/ExtToExtDependency/Manifest.toml create mode 100644 test/project/Extensions/ExtToExtDependency/Project.toml create mode 100644 test/project/Extensions/ExtToExtDependency/ext/ExtA.jl create mode 100644 test/project/Extensions/ExtToExtDependency/ext/ExtAB.jl create mode 100644 test/project/Extensions/ExtToExtDependency/src/ExtToExtDependency.jl create mode 100644 test/project/Extensions/SomeOtherPackage/Project.toml create mode 100644 test/project/Extensions/SomeOtherPackage/src/SomeOtherPackage.jl diff --git a/base/loading.jl b/base/loading.jl index 28875b8713b35..7b45348f47009 100644 --- a/base/loading.jl +++ b/base/loading.jl @@ -756,8 +756,9 @@ function manifest_uuid_path(env::String, pkg::PkgId)::Union{Nothing,String,Missi proj = implicit_manifest_uuid_path(env, pkg) proj === nothing || return proj # if not found - parentid = get(EXT_PRIMED, pkg, nothing) - if parentid !== nothing + triggers = get(EXT_PRIMED, pkg, nothing) + if triggers !== nothing + parentid = triggers[1] _, parent_project_file = entry_point_and_project_file(env, parentid.name) if parent_project_file !== nothing parentproj = project_file_name_uuid(parent_project_file, parentid.name) @@ -1432,9 +1433,7 @@ function run_module_init(mod::Module, i::Int=1) end function run_package_callbacks(modkey::PkgId) - if !precompiling_extension - run_extension_callbacks(modkey) - end + run_extension_callbacks(modkey) assert_havelock(require_lock) unlock(require_lock) try @@ -1463,7 +1462,7 @@ mutable struct ExtensionId ntriggers::Int # how many more packages must be defined until this is loaded end -const EXT_PRIMED = Dict{PkgId, PkgId}() # Extension -> Parent +const EXT_PRIMED = Dict{PkgId,Vector{PkgId}}() # Extension -> Parent + Triggers (parent is always first) const EXT_DORMITORY = Dict{PkgId,Vector{ExtensionId}}() # Trigger -> Extensions that can be triggered by it const EXT_DORMITORY_FAILED = ExtensionId[] @@ -1554,7 +1553,7 @@ function _insert_extension_triggers(parent::PkgId, extensions::Dict{String, Any} if haskey(EXT_PRIMED, id) || haskey(Base.loaded_modules, id) continue # extension is already primed or loaded, don't add it again end - EXT_PRIMED[id] = parent + EXT_PRIMED[id] = trigger_ids = PkgId[parent] gid = ExtensionId(id, parent, 1 + length(triggers), 1 + length(triggers)) trigger1 = get!(Vector{ExtensionId}, EXT_DORMITORY, parent) push!(trigger1, gid) @@ -1562,6 +1561,7 @@ function _insert_extension_triggers(parent::PkgId, extensions::Dict{String, Any} # TODO: Better error message if this lookup fails? uuid_trigger = UUID(totaldeps[trigger]::String) trigger_id = PkgId(uuid_trigger, trigger) + push!(trigger_ids, trigger_id) if !haskey(Base.loaded_modules, trigger_id) || haskey(package_locks, trigger_id) trigger1 = get!(Vector{ExtensionId}, EXT_DORMITORY, trigger_id) push!(trigger1, gid) @@ -1573,6 +1573,7 @@ function _insert_extension_triggers(parent::PkgId, extensions::Dict{String, Any} end loading_extension::Bool = false +loadable_extensions::Union{Nothing,Vector{PkgId}} = nothing precompiling_extension::Bool = false function run_extension_callbacks(extid::ExtensionId) assert_havelock(require_lock) @@ -1603,7 +1604,7 @@ function run_extension_callbacks(pkgid::PkgId) for extid in extids @assert extid.ntriggers > 0 extid.ntriggers -= 1 - if extid.ntriggers == 0 + if extid.ntriggers == 0 && (loadable_extensions === nothing || extid.id in loadable_extensions) push!(extids_to_load, extid) end end @@ -2645,7 +2646,17 @@ function __require_prelocked(pkg::PkgId, env) # double-check the search now that we have lock m = _require_search_from_serialized(pkg, path, UInt128(0), true) m isa Module && return m - return compilecache(pkg, path; reasons) + triggers = get(EXT_PRIMED, pkg, nothing) + loadable_exts = nothing + if triggers !== nothing # extension + loadable_exts = PkgId[] + for (ext′, triggers′) in EXT_PRIMED + if triggers′ ⊊ triggers + push!(loadable_exts, ext′) + end + end + end + return compilecache(pkg, path; reasons, loadable_exts) end loaded isa Module && return loaded if isnothing(loaded) # maybe_cachefile_lock returns nothing if it had to wait for another process @@ -2996,10 +3007,16 @@ function check_package_module_loaded(pkg::PkgId) return nothing end +# protects against PkgId and UUID being imported and losing Base prefix +_pkg_str(_pkg::PkgId) = (_pkg.uuid === nothing) ? "Base.PkgId($(repr(_pkg.name)))" : "Base.PkgId(Base.UUID(\"$(_pkg.uuid)\"), $(repr(_pkg.name)))" +_pkg_str(_pkg::Vector) = sprint(show, eltype(_pkg); context = :module=>nothing) * "[" * join(map(_pkg_str, _pkg), ",") * "]" +_pkg_str(_pkg::Pair{PkgId}) = _pkg_str(_pkg.first) * " => " * repr(_pkg.second) +_pkg_str(_pkg::Nothing) = "nothing" + const PRECOMPILE_TRACE_COMPILE = Ref{String}() function create_expr_cache(pkg::PkgId, input::String, output::String, output_o::Union{Nothing, String}, concrete_deps::typeof(_concrete_dependencies), flags::Cmd=``, cacheflags::CacheFlags=CacheFlags(), - internal_stderr::IO = stderr, internal_stdout::IO = stdout, isext::Bool=false) + internal_stderr::IO = stderr, internal_stdout::IO = stdout, loadable_exts::Union{Vector{PkgId},Nothing}=nothing) @nospecialize internal_stderr internal_stdout rm(output, force=true) # Remove file if it exists output_o === nothing || rm(output_o, force=true) @@ -3007,8 +3024,9 @@ function create_expr_cache(pkg::PkgId, input::String, output::String, output_o:: dl_load_path = String[abspath(x) for x in DL_LOAD_PATH] load_path = String[abspath(x) for x in Base.load_path()] # if pkg is a stdlib, append its parent Project.toml to the load path - parentid = get(EXT_PRIMED, pkg, nothing) - if parentid !== nothing + triggers = get(EXT_PRIMED, pkg, nothing) + if triggers !== nothing + parentid = triggers[1] for env in load_path project_file = env_project_file(env) if project_file === true @@ -3026,22 +3044,6 @@ function create_expr_cache(pkg::PkgId, input::String, output::String, output_o:: any(path -> path_sep in path, load_path) && error("LOAD_PATH entries cannot contain $(repr(path_sep))") - deps_strs = String[] - # protects against PkgId and UUID being imported and losing Base prefix - function pkg_str(_pkg::PkgId) - if _pkg.uuid === nothing - "Base.PkgId($(repr(_pkg.name)))" - else - "Base.PkgId(Base.UUID(\"$(_pkg.uuid)\"), $(repr(_pkg.name)))" - end - end - for (pkg, build_id) in concrete_deps - push!(deps_strs, "$(pkg_str(pkg)) => $(repr(build_id))") - end - deps_eltype = sprint(show, eltype(concrete_deps); context = :module=>nothing) - deps = deps_eltype * "[" * join(deps_strs, ",") * "]" - precomp_stack = "Base.PkgId[$(join(map(pkg_str, vcat(Base.precompilation_stack, pkg)), ", "))]" - if output_o === nothing # remove options that make no difference given the other cache options cacheflags = CacheFlags(cacheflags, opt_level=0) @@ -3072,10 +3074,11 @@ function create_expr_cache(pkg::PkgId, input::String, output::String, output_o:: # write data over stdin to avoid the (unlikely) case of exceeding max command line size write(io.in, """ empty!(Base.EXT_DORMITORY) # If we have a custom sysimage with `EXT_DORMITORY` prepopulated - Base.track_nested_precomp($precomp_stack) - Base.precompiling_extension = $(loading_extension | isext) - Base.include_package_for_output($(pkg_str(pkg)), $(repr(abspath(input))), $(repr(depot_path)), $(repr(dl_load_path)), - $(repr(load_path)), $deps, $(repr(source_path(nothing)))) + Base.track_nested_precomp($(_pkg_str(vcat(Base.precompilation_stack, pkg)))) + Base.loadable_extensions = $(_pkg_str(loadable_exts)) + Base.precompiling_extension = $(loading_extension) + Base.include_package_for_output($(_pkg_str(pkg)), $(repr(abspath(input))), $(repr(depot_path)), $(repr(dl_load_path)), + $(repr(load_path)), $(_pkg_str(concrete_deps)), $(repr(source_path(nothing)))) """) close(io.in) return io @@ -3130,18 +3133,18 @@ This can be used to reduce package load times. Cache files are stored in `DEPOT_PATH[1]/compiled`. See [Module initialization and precompilation](@ref) for important notes. """ -function compilecache(pkg::PkgId, internal_stderr::IO = stderr, internal_stdout::IO = stdout; flags::Cmd=``, reasons::Union{Dict{String,Int},Nothing}=Dict{String,Int}(), isext::Bool=false) +function compilecache(pkg::PkgId, internal_stderr::IO = stderr, internal_stdout::IO = stdout; flags::Cmd=``, reasons::Union{Dict{String,Int},Nothing}=Dict{String,Int}(), loadable_exts::Union{Vector{PkgId},Nothing}=nothing) @nospecialize internal_stderr internal_stdout path = locate_package(pkg) path === nothing && throw(ArgumentError("$(repr("text/plain", pkg)) not found during precompilation")) - return compilecache(pkg, path, internal_stderr, internal_stdout; flags, reasons, isext) + return compilecache(pkg, path, internal_stderr, internal_stdout; flags, reasons, loadable_exts) end const MAX_NUM_PRECOMPILE_FILES = Ref(10) function compilecache(pkg::PkgId, path::String, internal_stderr::IO = stderr, internal_stdout::IO = stdout, keep_loaded_modules::Bool = true; flags::Cmd=``, cacheflags::CacheFlags=CacheFlags(), - reasons::Union{Dict{String,Int},Nothing}=Dict{String,Int}(), isext::Bool=false) + reasons::Union{Dict{String,Int},Nothing}=Dict{String,Int}(), loadable_exts::Union{Vector{PkgId},Nothing}=nothing) @nospecialize internal_stderr internal_stdout # decide where to put the resulting cache file @@ -3181,7 +3184,7 @@ function compilecache(pkg::PkgId, path::String, internal_stderr::IO = stderr, in close(tmpio_o) close(tmpio_so) end - p = create_expr_cache(pkg, path, tmppath, tmppath_o, concrete_deps, flags, cacheflags, internal_stderr, internal_stdout, isext) + p = create_expr_cache(pkg, path, tmppath, tmppath_o, concrete_deps, flags, cacheflags, internal_stderr, internal_stdout, loadable_exts) if success(p) if cache_objects diff --git a/base/precompilation.jl b/base/precompilation.jl index 54f13d298a462..edd8824ff8d68 100644 --- a/base/precompilation.jl +++ b/base/precompilation.jl @@ -436,6 +436,7 @@ function _precompilepkgs(pkgs::Vector{String}, return name end + triggers = Dict{Base.PkgId,Vector{Base.PkgId}}() for (dep, deps) in env.deps pkg = Base.PkgId(dep, env.names[dep]) Base.in_sysimage(pkg) && continue @@ -444,25 +445,22 @@ function _precompilepkgs(pkgs::Vector{String}, # add any extensions pkg_exts = Dict{Base.PkgId, Vector{Base.PkgId}}() for (ext_name, extdep_uuids) in env.extensions[dep] - ext_deps = Base.PkgId[] - push!(ext_deps, pkg) # depends on parent package + ext_uuid = Base.uuid5(pkg.uuid, ext_name) + ext = Base.PkgId(ext_uuid, ext_name) + triggers[ext] = Base.PkgId[pkg] # depends on parent package all_extdeps_available = true for extdep_uuid in extdep_uuids extdep_name = env.names[extdep_uuid] if extdep_uuid in keys(env.deps) - push!(ext_deps, Base.PkgId(extdep_uuid, extdep_name)) + push!(triggers[ext], Base.PkgId(extdep_uuid, extdep_name)) else all_extdeps_available = false break end end all_extdeps_available || continue - ext_uuid = Base.uuid5(pkg.uuid, ext_name) - ext = Base.PkgId(ext_uuid, ext_name) - filter!(!Base.in_sysimage, ext_deps) - depsmap[ext] = ext_deps exts[ext] = pkg.name - pkg_exts[ext] = ext_deps + pkg_exts[ext] = depsmap[ext] = filter(!Base.in_sysimage, triggers[ext]) end if !isempty(pkg_exts) pkg_exts_map[pkg] = collect(keys(pkg_exts)) @@ -478,6 +476,16 @@ function _precompilepkgs(pkgs::Vector{String}, append!(direct_deps, keys(filter(d->last(d) in keys(env.project_deps), exts))) @debug "precompile: deps collected" + + # An extension effectively depends on another extension if it has a strict superset of its triggers + for ext_a in keys(exts) + for ext_b in keys(exts) + if triggers[ext_a] ⊋ triggers[ext_b] + push!(depsmap[ext_a], ext_b) + end + end + end + # this loop must be run after the full depsmap has been populated for (pkg, pkg_exts) in pkg_exts_map # find any packages that depend on the extension(s)'s deps and replace those deps in their deps list with the extension(s), @@ -839,7 +847,12 @@ function _precompilepkgs(pkgs::Vector{String}, t = @elapsed ret = precompile_pkgs_maybe_cachefile_lock(io, print_lock, fancyprint, pkg_config, pkgspidlocked, hascolor) do Base.with_logger(Base.NullLogger()) do # The false here means we ignore loaded modules, so precompile for a fresh session - Base.compilecache(pkg, sourcepath, std_pipe, std_pipe, false; flags, cacheflags, isext = haskey(exts, pkg)) + keep_loaded_modules = false + # for extensions, any extension in our direct dependencies is one we have a right to load + # for packages, we may load any extension (all possible triggers are accounted for above) + loadable_exts = haskey(exts, pkg) ? filter((dep)->haskey(exts, dep), depsmap[pkg]) : nothing + Base.compilecache(pkg, sourcepath, std_pipe, std_pipe, keep_loaded_modules; + flags, cacheflags, loadable_exts) end end if ret isa Base.PrecompilableError diff --git a/test/loading.jl b/test/loading.jl index ecba64ca45a73..1cc20548d9bc8 100644 --- a/test/loading.jl +++ b/test/loading.jl @@ -1152,6 +1152,74 @@ end cmd = addenv(cmd, "JULIA_LOAD_PATH" => proj) @test occursin("Hello Cycles!", String(read(cmd))) + # Extension-to-extension dependencies + + mktempdir() do depot # Parallel pre-compilation + code = """ + Base.disable_parallel_precompile = false + using ExtToExtDependency + Base.get_extension(ExtToExtDependency, :ExtA) isa Module || error("expected extension to load") + Base.get_extension(ExtToExtDependency, :ExtAB) isa Module || error("expected extension to load") + ExtToExtDependency.greet() + """ + proj = joinpath(@__DIR__, "project", "Extensions", "ExtToExtDependency") + cmd = `$(Base.julia_cmd()) --startup-file=no -e $code` + cmd = addenv(cmd, + "JULIA_LOAD_PATH" => proj, + "JULIA_DEPOT_PATH" => depot * Base.Filesystem.pathsep(), + ) + @test occursin("Hello ext-to-ext!", String(read(cmd))) + end + mktempdir() do depot # Serial pre-compilation + code = """ + Base.disable_parallel_precompile = true + using ExtToExtDependency + Base.get_extension(ExtToExtDependency, :ExtA) isa Module || error("expected extension to load") + Base.get_extension(ExtToExtDependency, :ExtAB) isa Module || error("expected extension to load") + ExtToExtDependency.greet() + """ + proj = joinpath(@__DIR__, "project", "Extensions", "ExtToExtDependency") + cmd = `$(Base.julia_cmd()) --startup-file=no -e $code` + cmd = addenv(cmd, + "JULIA_LOAD_PATH" => proj, + "JULIA_DEPOT_PATH" => depot * Base.Filesystem.pathsep(), + ) + @test occursin("Hello ext-to-ext!", String(read(cmd))) + end + + mktempdir() do depot # Parallel pre-compilation + code = """ + Base.disable_parallel_precompile = false + using CrossPackageExtToExtDependency + Base.get_extension(CrossPackageExtToExtDependency.CyclicExtensions, :ExtA) isa Module || error("expected extension to load") + Base.get_extension(CrossPackageExtToExtDependency, :ExtAB) isa Module || error("expected extension to load") + CrossPackageExtToExtDependency.greet() + """ + proj = joinpath(@__DIR__, "project", "Extensions", "CrossPackageExtToExtDependency") + cmd = `$(Base.julia_cmd()) --startup-file=no -e $code` + cmd = addenv(cmd, + "JULIA_LOAD_PATH" => proj, + "JULIA_DEPOT_PATH" => depot * Base.Filesystem.pathsep(), + ) + @test occursin("Hello x-package ext-to-ext!", String(read(cmd))) + end + mktempdir() do depot # Serial pre-compilation + code = """ + Base.disable_parallel_precompile = true + using CrossPackageExtToExtDependency + Base.get_extension(CrossPackageExtToExtDependency.CyclicExtensions, :ExtA) isa Module || error("expected extension to load") + Base.get_extension(CrossPackageExtToExtDependency, :ExtAB) isa Module || error("expected extension to load") + CrossPackageExtToExtDependency.greet() + """ + proj = joinpath(@__DIR__, "project", "Extensions", "CrossPackageExtToExtDependency") + cmd = `$(Base.julia_cmd()) --startup-file=no -e $code` + cmd = addenv(cmd, + "JULIA_LOAD_PATH" => proj, + "JULIA_DEPOT_PATH" => depot * Base.Filesystem.pathsep(), + ) + @test occursin("Hello x-package ext-to-ext!", String(read(cmd))) + end + finally try rm(depot_path, force=true, recursive=true) diff --git a/test/project/Extensions/CrossPackageExtToExtDependency/Manifest.toml b/test/project/Extensions/CrossPackageExtToExtDependency/Manifest.toml new file mode 100644 index 0000000000000..5497fdb7091bb --- /dev/null +++ b/test/project/Extensions/CrossPackageExtToExtDependency/Manifest.toml @@ -0,0 +1,32 @@ +# This file is machine-generated - editing it directly is not advised + +julia_version = "1.11.1" +manifest_format = "2.0" +project_hash = "dc35c2cf8c6b82fb5b9624c9713c2df34ca30499" + +[[deps.CyclicExtensions]] +deps = ["ExtDep"] +path = "../CyclicExtensions" +uuid = "17d4f0df-b55c-4714-ac4b-55fa23f7355c" +version = "0.1.0" +weakdeps = ["SomePackage"] + + [deps.CyclicExtensions.extensions] + ExtA = ["SomePackage"] + ExtB = ["SomePackage"] + +[[deps.ExtDep]] +deps = ["SomeOtherPackage", "SomePackage"] +path = "../ExtDep.jl" +uuid = "fa069be4-f60b-4d4c-8b95-f8008775090c" +version = "0.1.0" + +[[deps.SomeOtherPackage]] +path = "../SomeOtherPackage" +uuid = "178f68a2-4498-45ee-a775-452b36359b63" +version = "0.1.0" + +[[deps.SomePackage]] +path = "../SomePackage" +uuid = "678608ae-7bb3-42c7-98b1-82102067a3d8" +version = "0.1.0" diff --git a/test/project/Extensions/CrossPackageExtToExtDependency/Project.toml b/test/project/Extensions/CrossPackageExtToExtDependency/Project.toml new file mode 100644 index 0000000000000..76ffb7bd1c882 --- /dev/null +++ b/test/project/Extensions/CrossPackageExtToExtDependency/Project.toml @@ -0,0 +1,12 @@ +name = "CrossPackageExtToExtDependency" +uuid = "30f07f2e-c47e-40db-93a2-cbc4d1b301cc" +version = "0.1.0" + +[deps] +CyclicExtensions = "17d4f0df-b55c-4714-ac4b-55fa23f7355c" + +[weakdeps] +SomePackage = "678608ae-7bb3-42c7-98b1-82102067a3d8" + +[extensions] +ExtAB = ["CyclicExtensions", "SomePackage"] diff --git a/test/project/Extensions/CrossPackageExtToExtDependency/ext/ExtAB.jl b/test/project/Extensions/CrossPackageExtToExtDependency/ext/ExtAB.jl new file mode 100644 index 0000000000000..1ded9f2df5097 --- /dev/null +++ b/test/project/Extensions/CrossPackageExtToExtDependency/ext/ExtAB.jl @@ -0,0 +1,12 @@ +module ExtAB + +using CrossPackageExtToExtDependency +using SomePackage +using CyclicExtensions + +const ExtA = Base.get_extension(CyclicExtensions, :ExtA) +if !(ExtA isa Module) + error("expected extension to load") +end + +end diff --git a/test/project/Extensions/CrossPackageExtToExtDependency/src/CrossPackageExtToExtDependency.jl b/test/project/Extensions/CrossPackageExtToExtDependency/src/CrossPackageExtToExtDependency.jl new file mode 100644 index 0000000000000..28b229e2d61bf --- /dev/null +++ b/test/project/Extensions/CrossPackageExtToExtDependency/src/CrossPackageExtToExtDependency.jl @@ -0,0 +1,7 @@ +module CrossPackageExtToExtDependency + +using CyclicExtensions + +greet() = print("Hello x-package ext-to-ext!") + +end # module CrossPackageExtToTextDependency diff --git a/test/project/Extensions/CyclicExtensions/Manifest.toml b/test/project/Extensions/CyclicExtensions/Manifest.toml index a506825cf7995..0f280293c07b6 100644 --- a/test/project/Extensions/CyclicExtensions/Manifest.toml +++ b/test/project/Extensions/CyclicExtensions/Manifest.toml @@ -5,7 +5,7 @@ manifest_format = "2.0" project_hash = "ec25ff8df3a5e2212a173c3de2c7d716cc47cd36" [[deps.ExtDep]] -deps = ["SomePackage"] +deps = ["SomePackage", "SomeOtherPackage"] path = "../ExtDep.jl" uuid = "fa069be4-f60b-4d4c-8b95-f8008775090c" version = "0.1.0" @@ -15,6 +15,11 @@ path = "../ExtDep2" uuid = "55982ee5-2ad5-4c40-8cfe-5e9e1b01500d" version = "0.1.0" +[[deps.SomeOtherPackage]] +path = "../SomeOtherPackage" +uuid = "178f68a2-4498-45ee-a775-452b36359b63" +version = "0.1.0" + [[deps.SomePackage]] path = "../SomePackage" uuid = "678608ae-7bb3-42c7-98b1-82102067a3d8" diff --git a/test/project/Extensions/EnvWithDeps/Manifest.toml b/test/project/Extensions/EnvWithDeps/Manifest.toml index 85ff259f0a4d5..554a317b370eb 100644 --- a/test/project/Extensions/EnvWithDeps/Manifest.toml +++ b/test/project/Extensions/EnvWithDeps/Manifest.toml @@ -5,7 +5,7 @@ manifest_format = "2.0" project_hash = "ec25ff8df3a5e2212a173c3de2c7d716cc47cd36" [[deps.ExtDep]] -deps = ["SomePackage"] +deps = ["SomePackage", "SomeOtherPackage"] path = "../ExtDep.jl" uuid = "fa069be4-f60b-4d4c-8b95-f8008775090c" version = "0.1.0" @@ -15,6 +15,11 @@ path = "../ExtDep2" uuid = "55982ee5-2ad5-4c40-8cfe-5e9e1b01500d" version = "0.1.0" +[[deps.SomeOtherPackage]] +path = "../SomeOtherPackage" +uuid = "178f68a2-4498-45ee-a775-452b36359b63" +version = "0.1.0" + [[deps.SomePackage]] path = "../SomePackage" uuid = "678608ae-7bb3-42c7-98b1-82102067a3d8" diff --git a/test/project/Extensions/EnvWithHasExtensions/Manifest.toml b/test/project/Extensions/EnvWithHasExtensions/Manifest.toml index 004ef7892c173..ca2be57c61596 100644 --- a/test/project/Extensions/EnvWithHasExtensions/Manifest.toml +++ b/test/project/Extensions/EnvWithHasExtensions/Manifest.toml @@ -5,7 +5,7 @@ manifest_format = "2.0" project_hash = "a4c480cfa7da9610333d5c42623bf746bd286c5f" [[deps.ExtDep]] -deps = ["SomePackage"] +deps = ["SomePackage", "SomeOtherPackage"] path = "../ExtDep.jl" uuid = "fa069be4-f60b-4d4c-8b95-f8008775090c" version = "0.1.0" @@ -25,6 +25,11 @@ version = "0.1.0" ExtDep2 = "55982ee5-2ad5-4c40-8cfe-5e9e1b01500d" LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" +[[deps.SomeOtherPackage]] +path = "../SomeOtherPackage" +uuid = "178f68a2-4498-45ee-a775-452b36359b63" +version = "0.1.0" + [[deps.SomePackage]] path = "../SomePackage" uuid = "678608ae-7bb3-42c7-98b1-82102067a3d8" diff --git a/test/project/Extensions/EnvWithHasExtensionsv2/Manifest.toml b/test/project/Extensions/EnvWithHasExtensionsv2/Manifest.toml index 66781a5701363..9f8c717041b6e 100644 --- a/test/project/Extensions/EnvWithHasExtensionsv2/Manifest.toml +++ b/test/project/Extensions/EnvWithHasExtensionsv2/Manifest.toml @@ -5,7 +5,7 @@ manifest_format = "2.0" project_hash = "caa716752e6dff3d77c3de929ebbb5d2024d04ef" [[deps.ExtDep]] -deps = ["SomePackage"] +deps = ["SomePackage", "SomeOtherPackage"] path = "../ExtDep.jl" uuid = "fa069be4-f60b-4d4c-8b95-f8008775090c" version = "0.1.0" @@ -19,6 +19,11 @@ weakdeps = ["ExtDep"] [deps.HasExtensions.extensions] Extension2 = "ExtDep" +[[deps.SomeOtherPackage]] +path = "../SomeOtherPackage" +uuid = "178f68a2-4498-45ee-a775-452b36359b63" +version = "0.1.0" + [[deps.SomePackage]] path = "../SomePackage" uuid = "678608ae-7bb3-42c7-98b1-82102067a3d8" diff --git a/test/project/Extensions/ExtDep.jl/Project.toml b/test/project/Extensions/ExtDep.jl/Project.toml index d246934b7f958..1ece7bf11f95a 100644 --- a/test/project/Extensions/ExtDep.jl/Project.toml +++ b/test/project/Extensions/ExtDep.jl/Project.toml @@ -4,3 +4,4 @@ version = "0.1.0" [deps] SomePackage = "678608ae-7bb3-42c7-98b1-82102067a3d8" +SomeOtherPackage = "178f68a2-4498-45ee-a775-452b36359b63" diff --git a/test/project/Extensions/ExtDep.jl/src/ExtDep.jl b/test/project/Extensions/ExtDep.jl/src/ExtDep.jl index 1c0022d879f51..2d3c6b7f28827 100644 --- a/test/project/Extensions/ExtDep.jl/src/ExtDep.jl +++ b/test/project/Extensions/ExtDep.jl/src/ExtDep.jl @@ -2,6 +2,7 @@ module ExtDep # loading this package makes the check for loading extensions trigger # which tests #47921 +using SomeOtherPackage using SomePackage struct ExtDepStruct end diff --git a/test/project/Extensions/ExtToExtDependency/Manifest.toml b/test/project/Extensions/ExtToExtDependency/Manifest.toml new file mode 100644 index 0000000000000..41546213cdd41 --- /dev/null +++ b/test/project/Extensions/ExtToExtDependency/Manifest.toml @@ -0,0 +1,21 @@ +# This file is machine-generated - editing it directly is not advised + +julia_version = "1.11.1" +manifest_format = "2.0" +project_hash = "90b427e837c654fabb1434527ea698dabad46d29" + +[[deps.ExtDep]] +deps = ["SomeOtherPackage", "SomePackage"] +path = "../ExtDep.jl" +uuid = "fa069be4-f60b-4d4c-8b95-f8008775090c" +version = "0.1.0" + +[[deps.SomeOtherPackage]] +path = "../SomeOtherPackage" +uuid = "178f68a2-4498-45ee-a775-452b36359b63" +version = "0.1.0" + +[[deps.SomePackage]] +path = "../SomePackage" +uuid = "678608ae-7bb3-42c7-98b1-82102067a3d8" +version = "0.1.0" diff --git a/test/project/Extensions/ExtToExtDependency/Project.toml b/test/project/Extensions/ExtToExtDependency/Project.toml new file mode 100644 index 0000000000000..980db74c04dc4 --- /dev/null +++ b/test/project/Extensions/ExtToExtDependency/Project.toml @@ -0,0 +1,14 @@ +name = "ExtToExtDependency" +uuid = "594ddb71-72fb-4cfe-9471-775d48a5b70b" +version = "0.1.0" + +[deps] +ExtDep = "fa069be4-f60b-4d4c-8b95-f8008775090c" + +[weakdeps] +SomeOtherPackage = "178f68a2-4498-45ee-a775-452b36359b63" +SomePackage = "678608ae-7bb3-42c7-98b1-82102067a3d8" + +[extensions] +ExtA = ["SomePackage"] +ExtAB = ["SomePackage", "SomeOtherPackage"] diff --git a/test/project/Extensions/ExtToExtDependency/ext/ExtA.jl b/test/project/Extensions/ExtToExtDependency/ext/ExtA.jl new file mode 100644 index 0000000000000..71ed09795157c --- /dev/null +++ b/test/project/Extensions/ExtToExtDependency/ext/ExtA.jl @@ -0,0 +1,6 @@ +module ExtA + +using ExtToExtDependency +using SomePackage + +end diff --git a/test/project/Extensions/ExtToExtDependency/ext/ExtAB.jl b/test/project/Extensions/ExtToExtDependency/ext/ExtAB.jl new file mode 100644 index 0000000000000..a5b2c43cafd58 --- /dev/null +++ b/test/project/Extensions/ExtToExtDependency/ext/ExtAB.jl @@ -0,0 +1,12 @@ +module ExtAB + +using ExtToExtDependency +using SomePackage +using SomeOtherPackage + +const ExtA = Base.get_extension(ExtToExtDependency, :ExtA) +if !(ExtA isa Module) + error("expected extension to load") +end + +end diff --git a/test/project/Extensions/ExtToExtDependency/src/ExtToExtDependency.jl b/test/project/Extensions/ExtToExtDependency/src/ExtToExtDependency.jl new file mode 100644 index 0000000000000..ec2bf58f18641 --- /dev/null +++ b/test/project/Extensions/ExtToExtDependency/src/ExtToExtDependency.jl @@ -0,0 +1,7 @@ +module ExtToExtDependency + +using ExtDep + +greet() = print("Hello ext-to-ext!") + +end # module ExtToExtDependency diff --git a/test/project/Extensions/HasDepWithExtensions.jl/Manifest.toml b/test/project/Extensions/HasDepWithExtensions.jl/Manifest.toml index f659a59e0910b..98510dcb27733 100644 --- a/test/project/Extensions/HasDepWithExtensions.jl/Manifest.toml +++ b/test/project/Extensions/HasDepWithExtensions.jl/Manifest.toml @@ -5,7 +5,7 @@ manifest_format = "2.0" project_hash = "4e196b07f2ee7adc48ac9d528d42b3cf3737c7a0" [[deps.ExtDep]] -deps = ["SomePackage"] +deps = ["SomePackage", "SomeOtherPackage"] path = "../ExtDep.jl" uuid = "fa069be4-f60b-4d4c-8b95-f8008775090c" version = "0.1.0" @@ -32,6 +32,11 @@ weakdeps = ["ExtDep", "ExtDep2"] ExtensionDep = "ExtDep3" ExtensionFolder = ["ExtDep", "ExtDep2"] +[[deps.SomeOtherPackage]] +path = "../SomeOtherPackage" +uuid = "178f68a2-4498-45ee-a775-452b36359b63" +version = "0.1.0" + [[deps.SomePackage]] path = "../SomePackage" uuid = "678608ae-7bb3-42c7-98b1-82102067a3d8" diff --git a/test/project/Extensions/SomeOtherPackage/Project.toml b/test/project/Extensions/SomeOtherPackage/Project.toml new file mode 100644 index 0000000000000..6e7eee40c7be2 --- /dev/null +++ b/test/project/Extensions/SomeOtherPackage/Project.toml @@ -0,0 +1,4 @@ +name = "SomeOtherPackage" +uuid = "178f68a2-4498-45ee-a775-452b36359b63" +authors = ["Cody Tapscott "] +version = "0.1.0" diff --git a/test/project/Extensions/SomeOtherPackage/src/SomeOtherPackage.jl b/test/project/Extensions/SomeOtherPackage/src/SomeOtherPackage.jl new file mode 100644 index 0000000000000..ba23eb3914561 --- /dev/null +++ b/test/project/Extensions/SomeOtherPackage/src/SomeOtherPackage.jl @@ -0,0 +1,5 @@ +module SomeOtherPackage + +greet() = print("Hello World!") + +end # module SomeOtherPackage From 3de1b1dc04dd6a0f4e2a0d32db89beb6b009164a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mos=C3=A8=20Giordano?= <765740+giordano@users.noreply.github.com> Date: Fri, 1 Nov 2024 16:47:42 +0000 Subject: [PATCH 350/537] [docs] Fix rendering of warning admonition in llvm passes page (#56412) Follow up to #56392: also the warning in https://docs.julialang.org/en/v1.11.1/devdocs/llvm-passes/#Multiversioning is rendered incorrectly because of a missing space. --- doc/src/devdocs/llvm-passes.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/src/devdocs/llvm-passes.md b/doc/src/devdocs/llvm-passes.md index da600f73fd696..7b847abaa2149 100644 --- a/doc/src/devdocs/llvm-passes.md +++ b/doc/src/devdocs/llvm-passes.md @@ -98,7 +98,7 @@ This pass performs modifications to a module to create functions that are optimi !!! warning - Use of `llvmcall` with multiversioning is dangerous. `llvmcall` enables access to features not typically exposed by the Julia APIs, and are therefore usually not available on all architectures. If multiversioning is enabled and code generation is requested for a target architecture that does not support the feature required by an `llvmcall` expression, LLVM will probably error out, likely with an abort and the message `LLVM ERROR: Do not know how to split the result of this operator!`. + Use of `llvmcall` with multiversioning is dangerous. `llvmcall` enables access to features not typically exposed by the Julia APIs, and are therefore usually not available on all architectures. If multiversioning is enabled and code generation is requested for a target architecture that does not support the feature required by an `llvmcall` expression, LLVM will probably error out, likely with an abort and the message `LLVM ERROR: Do not know how to split the result of this operator!`. ### GCInvariantVerifier From 4393f8ccf911617df158227a7fec06a9f8b000b8 Mon Sep 17 00:00:00 2001 From: Daniel Karrasch Date: Fri, 1 Nov 2024 21:23:52 +0100 Subject: [PATCH 351/537] Fix dispatch for `rdiv!` with `LU` (#55764) --- stdlib/LinearAlgebra/src/lu.jl | 2 +- stdlib/LinearAlgebra/src/triangular.jl | 16 ++++++++-------- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/stdlib/LinearAlgebra/src/lu.jl b/stdlib/LinearAlgebra/src/lu.jl index d2e82af5d6409..0837ac08e74ea 100644 --- a/stdlib/LinearAlgebra/src/lu.jl +++ b/stdlib/LinearAlgebra/src/lu.jl @@ -786,7 +786,7 @@ function ldiv!(adjA::AdjointFactorization{<:Any,<:LU{T,Tridiagonal{T,V}}}, B::Ab return B end -rdiv!(B::AbstractMatrix, A::LU) = transpose(ldiv!(transpose(A), transpose(B))) +rdiv!(B::AbstractMatrix, A::LU{T,Tridiagonal{T,V}}) where {T,V} = transpose(ldiv!(transpose(A), transpose(B))) # Conversions AbstractMatrix(F::LU) = (F.L * F.U)[invperm(F.p),:] diff --git a/stdlib/LinearAlgebra/src/triangular.jl b/stdlib/LinearAlgebra/src/triangular.jl index a032041a4116c..31447f1aff5ae 100644 --- a/stdlib/LinearAlgebra/src/triangular.jl +++ b/stdlib/LinearAlgebra/src/triangular.jl @@ -1048,16 +1048,16 @@ _trimul!(C::AbstractMatrix, A::AbstractTriangular, B::UpperOrLowerTriangular) = function lmul!(A::AbstractTriangular, B::AbstractVecOrMat) if istriu(A) - _trimul!(B, UpperTriangular(A), B) + _trimul!(B, uppertriangular(A), B) else - _trimul!(B, LowerTriangular(A), B) + _trimul!(B, lowertriangular(A), B) end end function rmul!(A::AbstractMatrix, B::AbstractTriangular) if istriu(B) - _trimul!(A, A, UpperTriangular(B)) + _trimul!(A, A, uppertriangular(B)) else - _trimul!(A, A, LowerTriangular(B)) + _trimul!(A, A, lowertriangular(B)) end end @@ -1097,16 +1097,16 @@ _rdiv!(C::AbstractMatrix, A::AbstractMatrix, B::UpperOrLowerTriangular) = function ldiv!(A::AbstractTriangular, B::AbstractVecOrMat) if istriu(A) - _ldiv!(B, UpperTriangular(A), B) + _ldiv!(B, uppertriangular(A), B) else - _ldiv!(B, LowerTriangular(A), B) + _ldiv!(B, lowertriangular(A), B) end end function rdiv!(A::AbstractMatrix, B::AbstractTriangular) if istriu(B) - _rdiv!(A, A, UpperTriangular(B)) + _rdiv!(A, A, uppertriangular(B)) else - _rdiv!(A, A, LowerTriangular(B)) + _rdiv!(A, A, lowertriangular(B)) end end From 770b1448be9ff532a56b0d3a58589b38fffa1b7b Mon Sep 17 00:00:00 2001 From: James Wrigley Date: Sat, 2 Nov 2024 00:57:14 +0100 Subject: [PATCH 352/537] Remove overwritten method of OffsetArray (#56414) This is overwritten three definitions later in `Base.reshape(A::OffsetArray, inds::Colon)`. Should remove warnings I saw when testing a package that uses it. --- test/testhelpers/OffsetArrays.jl | 1 - 1 file changed, 1 deletion(-) diff --git a/test/testhelpers/OffsetArrays.jl b/test/testhelpers/OffsetArrays.jl index f8da243da6b63..3463d5a94393d 100644 --- a/test/testhelpers/OffsetArrays.jl +++ b/test/testhelpers/OffsetArrays.jl @@ -560,7 +560,6 @@ Base.reshape(A::OffsetArray, inds::Tuple{Union{Integer,Base.OneTo},Vararg{Union{ Base.reshape(A::OffsetArray, inds::Dims) = _reshape_nov(A, inds) Base.reshape(A::OffsetVector, ::Colon) = A Base.reshape(A::OffsetVector, ::Tuple{Colon}) = A -Base.reshape(A::OffsetArray, ::Colon) = reshape(A, (Colon(),)) Base.reshape(A::OffsetArray, inds::Union{Int,Colon}...) = reshape(A, inds) Base.reshape(A::OffsetArray, inds::Tuple{Vararg{Union{Int,Colon}}}) = _reshape_nov(A, inds) # The following two additional methods for Colon are added to resolve method ambiguities to From c3c3cd1507208e2f9ea1bf6df2835bded1753497 Mon Sep 17 00:00:00 2001 From: Keno Fischer Date: Fri, 1 Nov 2024 20:36:41 -0400 Subject: [PATCH 353/537] Add a missing GC root in constant declaration (#56408) As pointed out in https://github.com/JuliaLang/julia/pull/56224#discussion_r1816974147. --- src/toplevel.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/toplevel.c b/src/toplevel.c index 017d61bbc8ceb..45143f99a178c 100644 --- a/src/toplevel.c +++ b/src/toplevel.c @@ -742,13 +742,16 @@ static void jl_eval_errorf(jl_module_t *m, const char *filename, int lineno, con JL_DLLEXPORT jl_binding_partition_t *jl_declare_constant_val2(jl_binding_t *b, jl_module_t *mod, jl_sym_t *var, jl_value_t *val, enum jl_partition_kind constant_kind) { + JL_GC_PUSH1(&val); jl_binding_partition_t *bpart = jl_get_binding_partition(b, jl_current_task->world_age); jl_ptr_kind_union_t pku = jl_atomic_load_relaxed(&bpart->restriction); int did_warn = 0; while (1) { if (jl_bkind_is_some_constant(decode_restriction_kind(pku))) { - if (!val) + if (!val) { + JL_GC_POP(); return bpart; + } jl_value_t *old = decode_restriction_value(pku); JL_GC_PROMISE_ROOTED(old); if (jl_egal(val, old)) @@ -778,6 +781,7 @@ JL_DLLEXPORT jl_binding_partition_t *jl_declare_constant_val2(jl_binding_t *b, j break; } } + JL_GC_POP(); return bpart; } From 10a1d6f3f54f8eef0b65622d1a0cb867a98c1785 Mon Sep 17 00:00:00 2001 From: Keno Fischer Date: Fri, 1 Nov 2024 20:37:18 -0400 Subject: [PATCH 354/537] Teach compiler about partitioned bindings (#56299) This commit teaches to compiler to update its world bounds whenever it looks at a binding partition, making the compiler sound in the presence of a partitioned binding. The key adjustment is that the compiler is no longer allowed to directly query the binding table without recording the world bounds, so all the various abstract evaluations that look at bindings need to be adjusted and are no longer pure tfuncs. We used to look at bindings a lot more, but thanks to earlier prep work to remove unnecessary binding-dependent code (#55288, #55289 and #55271), these changes become relatively straightforward. Note that as before, we do not create any binding partitions by default, so this commit is mostly preperatory. --------- Co-authored-by: Shuhei Kadowaki <40514306+aviatesk@users.noreply.github.com> --- base/compiler/abstractinterpretation.jl | 365 +++++++++++++++++++++--- base/compiler/cicache.jl | 2 + base/compiler/optimize.jl | 10 +- base/compiler/ssair/inlining.jl | 5 - base/compiler/ssair/ir.jl | 9 +- base/compiler/ssair/legacy.jl | 2 +- base/compiler/ssair/passes.jl | 6 +- base/compiler/ssair/slot2ssa.jl | 2 +- base/compiler/tfuncs.jl | 180 ++---------- base/runtime_internals.jl | 9 +- src/julia_internal.h | 1 - src/module.c | 12 - src/rtutils.c | 1 + stdlib/REPL/src/REPLCompletions.jl | 6 +- test/compiler/inference.jl | 3 - test/rebinding.jl | 12 + 16 files changed, 404 insertions(+), 221 deletions(-) diff --git a/base/compiler/abstractinterpretation.jl b/base/compiler/abstractinterpretation.jl index e20b74454bb22..f7f7e80a0ebe1 100644 --- a/base/compiler/abstractinterpretation.jl +++ b/base/compiler/abstractinterpretation.jl @@ -2290,6 +2290,191 @@ function abstract_throw_methoderror(interp::AbstractInterpreter, argtypes::Vecto return Future(CallMeta(Union{}, exct, EFFECTS_THROWS, NoCallInfo())) end +const generic_getglobal_effects = Effects(EFFECTS_THROWS, consistent=ALWAYS_FALSE, inaccessiblememonly=ALWAYS_FALSE) +function abstract_eval_getglobal(interp::AbstractInterpreter, sv::AbsIntState, @nospecialize(M), @nospecialize(s)) + ⊑ = partialorder(typeinf_lattice(interp)) + if M isa Const && s isa Const + M, s = M.val, s.val + if M isa Module && s isa Symbol + return CallMeta(abstract_eval_globalref(interp, GlobalRef(M, s), sv), NoCallInfo()) + end + return CallMeta(Union{}, TypeError, EFFECTS_THROWS, NoCallInfo()) + elseif !hasintersect(widenconst(M), Module) || !hasintersect(widenconst(s), Symbol) + return CallMeta(Union{}, TypeError, EFFECTS_THROWS, NoCallInfo()) + elseif M ⊑ Module && s ⊑ Symbol + return CallMeta(Any, UndefVarError, generic_getglobal_effects, NoCallInfo()) + end + return CallMeta(Any, Union{UndefVarError, TypeError}, generic_getglobal_effects, NoCallInfo()) +end + +function merge_exct(cm::CallMeta, @nospecialize(exct)) + if exct !== Bottom + cm = CallMeta(cm.rt, Union{cm.exct, exct}, Effects(cm.effects; nothrow=false), cm.info) + end + return cm +end + +function abstract_eval_getglobal(interp::AbstractInterpreter, sv::AbsIntState, @nospecialize(M), @nospecialize(s), @nospecialize(order)) + goe = global_order_exct(order, #=loading=#true, #=storing=#false) + cm = abstract_eval_getglobal(interp, sv, M, s) + return merge_exct(cm, goe) +end + +function abstract_eval_getglobal(interp::AbstractInterpreter, sv::AbsIntState, argtypes::Vector{Any}) + if length(argtypes) == 3 + return abstract_eval_getglobal(interp, sv, argtypes[2], argtypes[3]) + elseif length(argtypes) == 4 + return abstract_eval_getglobal(interp, sv, argtypes[2], argtypes[3], argtypes[4]) + elseif !isvarargtype(argtypes[end]) || length(argtypes) > 5 + return CallMeta(Union{}, ArgumentError, EFFECTS_THROWS, NoCallInfo()) + else + return CallMeta(Any, Union{ArgumentError, UndefVarError, TypeError, ConcurrencyViolationError}, + generic_getglobal_effects, NoCallInfo()) + end +end + +@nospecs function abstract_eval_get_binding_type(interp::AbstractInterpreter, sv::AbsIntState, M, s) + ⊑ = partialorder(typeinf_lattice(interp)) + if isa(M, Const) && isa(s, Const) + (M, s) = (M.val, s.val) + if !isa(M, Module) || !isa(s, Symbol) + return CallMeta(Union{}, TypeError, EFFECTS_THROWS, NoCallInfo()) + end + partition = abstract_eval_binding_partition!(interp, GlobalRef(M, s), sv) + + if is_some_guard(binding_kind(partition)) + # We do not currently assume an invalidation for guard -> defined transitions + # rt = Const(nothing) + rt = Type + elseif is_some_const_binding(binding_kind(partition)) + rt = Const(Any) + else + rt = Const(partition_restriction(partition)) + end + return CallMeta(rt, Union{}, EFFECTS_TOTAL, NoCallInfo()) + elseif !hasintersect(widenconst(M), Module) || !hasintersect(widenconst(s), Symbol) + return CallMeta(Union{}, TypeError, EFFECTS_THROWS, NoCallInfo()) + elseif M ⊑ Module && s ⊑ Symbol + return CallMeta(Type, Union{}, EFFECTS_TOTAL, NoCallInfo()) + end + return CallMeta(Type, TypeError, EFFECTS_THROWS, NoCallInfo()) +end + +function abstract_eval_get_binding_type(interp::AbstractInterpreter, sv::AbsIntState, argtypes::Vector{Any}) + if length(argtypes) == 3 + return abstract_eval_get_binding_type(interp, sv, argtypes[2], argtypes[3]) + elseif !isvarargtype(argtypes[end]) || length(argtypes) > 4 + return CallMeta(Union{}, ArgumentError, EFFECTS_THROWS, NoCallInfo()) + end + return CallMeta(Type, Union{TypeError, ArgumentError}, EFFECTS_THROWS, NoCallInfo()) +end + +const setglobal!_effects = Effects(EFFECTS_TOTAL; effect_free=ALWAYS_FALSE, nothrow=false, inaccessiblememonly=ALWAYS_FALSE) + +function abstract_eval_setglobal!(interp::AbstractInterpreter, sv::AbsIntState, @nospecialize(M), @nospecialize(s), @nospecialize(v)) + if isa(M, Const) && isa(s, Const) + M, s = M.val, s.val + if M isa Module && s isa Symbol + exct = global_assignment_exct(interp, sv, GlobalRef(M, s), v) + return CallMeta(v, exct, Effects(setglobal!_effects, nothrow=exct===Bottom), NoCallInfo()) + end + return CallMeta(Union{}, TypeError, EFFECTS_THROWS, NoCallInfo()) + end + ⊑ = partialorder(typeinf_lattice(interp)) + if !(hasintersect(widenconst(M), Module) && hasintersect(widenconst(s), Symbol)) + return CallMeta(Union{}, TypeError, EFFECTS_THROWS, NoCallInfo()) + elseif M ⊑ Module && s ⊑ Symbol + return CallMeta(v, ErrorException, setglobal!_effects, NoCallInfo()) + end + return CallMeta(v, Union{TypeError, ErrorException}, setglobal!_effects, NoCallInfo()) +end + +function abstract_eval_setglobal!(interp::AbstractInterpreter, sv::AbsIntState, @nospecialize(M), @nospecialize(s), @nospecialize(v), @nospecialize(order)) + goe = global_order_exct(order, #=loading=#false, #=storing=#true) + cm = abstract_eval_setglobal!(interp, sv, M, s, v) + return merge_exct(cm, goe) +end + +function abstract_eval_setglobal!(interp::AbstractInterpreter, sv::AbsIntState, argtypes::Vector{Any}) + if length(argtypes) == 4 + return abstract_eval_setglobal!(interp, sv, argtypes[2], argtypes[3], argtypes[4]) + elseif length(argtypes) == 5 + return abstract_eval_setglobal!(interp, sv, argtypes[2], argtypes[3], argtypes[4], argtypes[5]) + elseif !isvarargtype(argtypes[end]) || length(argtypes) > 6 + return CallMeta(Union{}, ArgumentError, EFFECTS_THROWS, NoCallInfo()) + else + return CallMeta(Any, Union{ArgumentError, TypeError, ErrorException, ConcurrencyViolationError}, setglobal!_effects, NoCallInfo()) + end +end + +function abstract_eval_setglobalonce!(interp::AbstractInterpreter, sv::AbsIntState, argtypes::Vector{Any}) + if length(argtypes) in (4, 5, 6) + cm = abstract_eval_setglobal!(interp, sv, argtypes[2], argtypes[3], argtypes[4]) + if length(argtypes) >= 5 + goe = global_order_exct(argtypes[5], #=loading=#true, #=storing=#true) + cm = merge_exct(cm, goe) + end + if length(argtypes) == 6 + goe = global_order_exct(argtypes[6], #=loading=#true, #=storing=#false) + cm = merge_exct(cm, goe) + end + return CallMeta(Bool, cm.exct, cm.effects, cm.info) + elseif !isvarargtype(argtypes[end]) || length(argtypes) > 6 + return CallMeta(Union{}, ArgumentError, EFFECTS_THROWS, NoCallInfo()) + else + return CallMeta(Bool, Union{ArgumentError, TypeError, ErrorException, ConcurrencyViolationError}, setglobal!_effects, NoCallInfo()) + end +end + +function abstract_eval_replaceglobal!(interp::AbstractInterpreter, sv::AbsIntState, argtypes::Vector{Any}) + if length(argtypes) in (5, 6, 7) + (M, s, x, v) = argtypes[2], argtypes[3], argtypes[4], argtypes[5] + + T = nothing + if isa(M, Const) && isa(s, Const) + M, s = M.val, s.val + if !(M isa Module && s isa Symbol) + return CallMeta(Union{}, TypeError, EFFECTS_THROWS, NoCallInfo()) + end + partition = abstract_eval_binding_partition!(interp, GlobalRef(M, s), sv) + rte = abstract_eval_partition_load(interp, partition) + if binding_kind(partition) == BINDING_KIND_GLOBAL + T = partition_restriction(partition) + end + exct = Union{rte.exct, global_assignment_binding_exct(partition, v)} + effects = merge_effects(rte.effects, Effects(setglobal!_effects, nothrow=exct===Bottom)) + sg = CallMeta(Any, exct, effects, NoCallInfo()) + else + sg = abstract_eval_setglobal!(interp, sv, M, s, v) + end + if length(argtypes) >= 6 + goe = global_order_exct(argtypes[6], #=loading=#true, #=storing=#true) + sg = merge_exct(sg, goe) + end + if length(argtypes) == 7 + goe = global_order_exct(argtypes[7], #=loading=#true, #=storing=#false) + sg = merge_exct(sg, goe) + end + rt = T === nothing ? + ccall(:jl_apply_cmpswap_type, Any, (Any,), S) where S : + ccall(:jl_apply_cmpswap_type, Any, (Any,), T) + return CallMeta(rt, sg.exct, sg.effects, sg.info) + elseif !isvarargtype(argtypes[end]) || length(argtypes) > 8 + return CallMeta(Union{}, ArgumentError, EFFECTS_THROWS, NoCallInfo()) + else + return CallMeta(Any, Union{ArgumentError, TypeError, ErrorException, ConcurrencyViolationError}, setglobal!_effects, NoCallInfo()) + end +end + +function args_are_actually_getglobal(argtypes) + length(argtypes) in (3, 4) || return false + M = argtypes[2] + s = argtypes[3] + isa(M, Const) || return false + isa(s, Const) || return false + return isa(M.val, Module) && isa(s.val, Symbol) +end + # call where the function is known exactly function abstract_call_known(interp::AbstractInterpreter, @nospecialize(f), arginfo::ArgInfo, si::StmtInfo, sv::AbsIntState, @@ -2313,6 +2498,33 @@ function abstract_call_known(interp::AbstractInterpreter, @nospecialize(f), return abstract_throw(interp, argtypes, sv) elseif f === Core.throw_methoderror return abstract_throw_methoderror(interp, argtypes, sv) + elseif f === Core.getglobal + return Future(abstract_eval_getglobal(interp, sv, argtypes)) + elseif f === Core.setglobal! + return Future(abstract_eval_setglobal!(interp, sv, argtypes)) + elseif f === Core.setglobalonce! + return Future(abstract_eval_setglobalonce!(interp, sv, argtypes)) + elseif f === Core.replaceglobal! + return Future(abstract_eval_replaceglobal!(interp, sv, argtypes)) + elseif f === Core.getfield && args_are_actually_getglobal(argtypes) + return Future(abstract_eval_getglobal(interp, sv, argtypes)) + elseif f === Core.isdefined && args_are_actually_getglobal(argtypes) + exct = Bottom + if length(argtypes) == 4 + order = argtypes[4] + exct = global_order_exct(order, true, false) + if !(isa(order, Const) && get_atomic_order(order.val, true, false).x >= MEMORY_ORDER_UNORDERED.x) + exct = Union{exct, ConcurrencyViolationError} + end + end + return Future(merge_exct(CallMeta(abstract_eval_isdefined( + interp, + GlobalRef((argtypes[2]::Const).val, + (argtypes[3]::Const).val), + sv), + NoCallInfo()), exct)) + elseif f === Core.get_binding_type + return Future(abstract_eval_get_binding_type(interp, sv, argtypes)) end rt = abstract_call_builtin(interp, f, arginfo, sv) ft = popfirst!(argtypes) @@ -2652,6 +2864,9 @@ struct RTEffects end end +CallMeta(rte::RTEffects, info::CallInfo) = + CallMeta(rte.rt, rte.exct, rte.effects, info, rte.refinements) + function abstract_call(interp::AbstractInterpreter, arginfo::ArgInfo, sv::InferenceState) unused = call_result_unused(sv, sv.currpc) if unused @@ -2832,13 +3047,9 @@ function abstract_eval_copyast(interp::AbstractInterpreter, e::Expr, vtypes::Uni return RTEffects(rt, Any, effects) end -function abstract_eval_isdefined(interp::AbstractInterpreter, e::Expr, vtypes::Union{VarTable,Nothing}, +function abstract_eval_isdefined_expr(interp::AbstractInterpreter, e::Expr, vtypes::Union{VarTable,Nothing}, sv::AbsIntState) sym = e.args[1] - rt = Bool - effects = EFFECTS_TOTAL - exct = Union{} - isa(sym, Symbol) && (sym = GlobalRef(frame_module(sv), sym)) if isa(sym, SlotNumber) && vtypes !== nothing vtyp = vtypes[slot_id(sym)] if vtyp.typ === Bottom @@ -2848,11 +3059,22 @@ function abstract_eval_isdefined(interp::AbstractInterpreter, e::Expr, vtypes::U else # form `Conditional` to refine `vtyp.undef` in the then branch rt = Conditional(sym, vtyp.typ, vtyp.typ; isdefined=true) end - elseif isa(sym, GlobalRef) - if InferenceParams(interp).assume_bindings_static - rt = Const(isdefined_globalref(sym)) - elseif isdefinedconst_globalref(sym) + return RTEffects(rt, Union{}, EFFECTS_TOTAL) + end + return abstract_eval_isdefined(interp, sym, sv) +end + +function abstract_eval_isdefined(interp::AbstractInterpreter, @nospecialize(sym), sv::AbsIntState) + rt = Bool + effects = EFFECTS_TOTAL + exct = Union{} + isa(sym, Symbol) && (sym = GlobalRef(frame_module(sv), sym)) + if isa(sym, GlobalRef) + rte = abstract_eval_globalref(interp, sym, sv) + if rte.exct == Union{} rt = Const(true) + elseif rte.rt === Union{} && rte.exct === UndefVarError + rt = Const(false) else effects = Effects(EFFECTS_TOTAL; consistent=ALWAYS_FALSE) end @@ -2936,7 +3158,7 @@ function abstract_eval_statement_expr(interp::AbstractInterpreter, e::Expr, vtyp elseif ehead === :invoke || ehead === :invoke_modify error("type inference data-flow error: tried to double infer a function") elseif ehead === :isdefined - return abstract_eval_isdefined(interp, e, vtypes, sv) + return abstract_eval_isdefined_expr(interp, e, vtypes, sv) elseif ehead === :throw_undef_if_not return abstract_eval_throw_undef_if_not(interp, e, vtypes, sv) elseif ehead === :boundscheck @@ -3041,45 +3263,116 @@ function override_effects(effects::Effects, override::EffectsOverride) nortcall = override.nortcall ? true : effects.nortcall) end -isdefined_globalref(g::GlobalRef) = !iszero(ccall(:jl_globalref_boundp, Cint, (Any,), g)) -isdefinedconst_globalref(g::GlobalRef) = isconst(g) && isdefined_globalref(g) +world_range(ir::IRCode) = ir.valid_worlds +world_range(ci::CodeInfo) = WorldRange(ci.min_world, ci.max_world) +world_range(compact::IncrementalCompact) = world_range(compact.ir) -function abstract_eval_globalref_type(g::GlobalRef) - if isdefinedconst_globalref(g) - return Const(ccall(:jl_get_globalref_value, Any, (Any,), g)) +function force_binding_resolution!(g::GlobalRef) + # Force resolution of the binding + # TODO: This will go away once we switch over to fully partitioned semantics + ccall(:jl_globalref_boundp, Cint, (Any,), g) + return nothing +end + +function abstract_eval_globalref_type(g::GlobalRef, src::Union{CodeInfo, IRCode, IncrementalCompact}, retry_after_resolve::Bool=true) + worlds = world_range(src) + partition = lookup_binding_partition(min_world(worlds), g) + partition.max_world < max_world(worlds) && return Any + while is_some_imported(binding_kind(partition)) + imported_binding = partition_restriction(partition)::Core.Binding + partition = lookup_binding_partition(min_world(worlds), imported_binding) + partition.max_world < max_world(worlds) && return Any + end + if is_some_guard(binding_kind(partition)) + if retry_after_resolve + # This method is surprisingly hot. For performance, don't ask the runtime to resolve + # the binding unless necessary - doing so triggers an additional lookup, which though + # not super expensive is hot enough to show up in benchmarks. + force_binding_resolution!(g) + return abstract_eval_globalref_type(g, src, false) + end + # return Union{} + return Any end - ty = ccall(:jl_get_binding_type, Any, (Any, Any), g.mod, g.name) - ty === nothing && return Any - return ty + if is_some_const_binding(binding_kind(partition)) + return Const(partition_restriction(partition)) + end + return partition_restriction(partition) end -abstract_eval_global(M::Module, s::Symbol) = abstract_eval_globalref_type(GlobalRef(M, s)) -function abstract_eval_globalref(interp::AbstractInterpreter, g::GlobalRef, sv::AbsIntState) - rt = abstract_eval_globalref_type(g) +function abstract_eval_binding_partition!(interp::AbstractInterpreter, g::GlobalRef, sv::AbsIntState) + force_binding_resolution!(g) + partition = lookup_binding_partition(get_inference_world(interp), g) + update_valid_age!(sv, WorldRange(partition.min_world, partition.max_world)) + + while is_some_imported(binding_kind(partition)) + imported_binding = partition_restriction(partition)::Core.Binding + partition = lookup_binding_partition(get_inference_world(interp), imported_binding) + update_valid_age!(sv, WorldRange(partition.min_world, partition.max_world)) + end + + return partition +end + +function abstract_eval_partition_load(interp::AbstractInterpreter, partition::Core.BindingPartition) consistent = inaccessiblememonly = ALWAYS_FALSE nothrow = false - if isa(rt, Const) - consistent = ALWAYS_TRUE - nothrow = true - if is_mutation_free_argtype(rt) - inaccessiblememonly = ALWAYS_TRUE - end - elseif InferenceParams(interp).assume_bindings_static - consistent = inaccessiblememonly = ALWAYS_TRUE - if isdefined_globalref(g) - nothrow = true + generic_effects = Effects(EFFECTS_TOTAL; consistent, nothrow, inaccessiblememonly) + if is_some_guard(binding_kind(partition)) + if InferenceParams(interp).assume_bindings_static + return RTEffects(Union{}, UndefVarError, EFFECTS_THROWS) else - rt = Union{} + # We do not currently assume an invalidation for guard -> defined transitions + # return RTEffects(Union{}, UndefVarError, EFFECTS_THROWS) + return RTEffects(Any, UndefVarError, generic_effects) + end + end + + if is_some_const_binding(binding_kind(partition)) + rt = Const(partition_restriction(partition)) + return RTEffects(rt, Union{}, Effects(EFFECTS_TOTAL, inaccessiblememonly=is_mutation_free_argtype(rt) ? ALWAYS_TRUE : ALWAYS_FALSE)) + end + + rt = partition_restriction(partition) + + if InferenceParams(interp).assume_bindings_static + if isdefined(g, :binding) && isdefined(g.binding, :value) + return RTEffects(rt, Union{}, Effecst(generic_effects, nothrow=true)) end - elseif isdefinedconst_globalref(g) - nothrow = true + # We do not assume in general that assigned global bindings remain assigned. + # The existence of pkgimages allows them to revert in practice. end - return RTEffects(rt, nothrow ? Union{} : UndefVarError, Effects(EFFECTS_TOTAL; consistent, nothrow, inaccessiblememonly)) + + return RTEffects(rt, UndefVarError, generic_effects) +end + +function abstract_eval_globalref(interp::AbstractInterpreter, g::GlobalRef, sv::AbsIntState) + partition = abstract_eval_binding_partition!(interp, g, sv) + return abstract_eval_partition_load(interp, partition) +end + +function global_assignment_exct(interp::AbstractInterpreter, sv::AbsIntState, g::GlobalRef, @nospecialize(newty)) + partition = abstract_eval_binding_partition!(interp, g, sv) + return global_assignment_binding_exct(partition, newty) +end + +function global_assignment_binding_exct(partition::Core.BindingPartition, @nospecialize(newty)) + kind = binding_kind(partition) + if is_some_guard(kind) || is_some_const_binding(kind) + return ErrorException + end + + ty = partition_restriction(partition) + if !(widenconst(newty) <: ty) + return TypeError + end + + return Union{} end function handle_global_assignment!(interp::AbstractInterpreter, frame::InferenceState, lhs::GlobalRef, @nospecialize(newty)) effect_free = ALWAYS_FALSE - nothrow = global_assignment_nothrow(lhs.mod, lhs.name, ignorelimited(newty)) + nothrow = global_assignment_exct(interp, frame, lhs, ignorelimited(newty)) === Union{} inaccessiblememonly = ALWAYS_FALSE if !nothrow sub_curr_ssaflag!(frame, IR_FLAG_NOTHROW) diff --git a/base/compiler/cicache.jl b/base/compiler/cicache.jl index bf32e8f12f085..a66d7f9f09650 100644 --- a/base/compiler/cicache.jl +++ b/base/compiler/cicache.jl @@ -31,6 +31,8 @@ WorldRange(r::UnitRange) = WorldRange(first(r), last(r)) first(wr::WorldRange) = wr.min_world last(wr::WorldRange) = wr.max_world in(world::UInt, wr::WorldRange) = wr.min_world <= world <= wr.max_world +min_world(wr::WorldRange) = first(wr) +max_world(wr::WorldRange) = last(wr) function intersect(a::WorldRange, b::WorldRange) ret = WorldRange(max(a.min_world, b.min_world), min(a.max_world, b.max_world)) diff --git a/base/compiler/optimize.jl b/base/compiler/optimize.jl index e8508ade88b6c..aeb3e6849773b 100644 --- a/base/compiler/optimize.jl +++ b/base/compiler/optimize.jl @@ -307,8 +307,10 @@ function stmt_effect_flags(𝕃ₒ::AbstractLattice, @nospecialize(stmt), @nospe isa(stmt, GotoNode) && return (true, false, true) isa(stmt, GotoIfNot) && return (true, false, ⊑(𝕃ₒ, argextype(stmt.cond, src), Bool)) if isa(stmt, GlobalRef) - nothrow = consistent = isdefinedconst_globalref(stmt) - return (consistent, nothrow, nothrow) + # Modeled more precisely in abstract_eval_globalref. In general, if a + # GlobalRef was moved to statement position, it is probably not `const`, + # so we can't say much about it anyway. + return (false, false, false) elseif isa(stmt, Expr) (; head, args) = stmt if head === :static_parameter @@ -444,7 +446,7 @@ function argextype( elseif isa(x, QuoteNode) return Const(x.value) elseif isa(x, GlobalRef) - return abstract_eval_globalref_type(x) + return abstract_eval_globalref_type(x, src) elseif isa(x, PhiNode) || isa(x, PhiCNode) || isa(x, UpsilonNode) return Any elseif isa(x, PiNode) @@ -1277,7 +1279,7 @@ function convert_to_ircode(ci::CodeInfo, sv::OptimizationState) # types of call arguments only once `slot2reg` converts this `IRCode` to the SSA form # and eliminates slots (see below) argtypes = sv.slottypes - return IRCode(stmts, sv.cfg, di, argtypes, meta, sv.sptypes) + return IRCode(stmts, sv.cfg, di, argtypes, meta, sv.sptypes, WorldRange(ci.min_world, ci.max_world)) end function process_meta!(meta::Vector{Expr}, @nospecialize stmt) diff --git a/base/compiler/ssair/inlining.jl b/base/compiler/ssair/inlining.jl index ae4c04241fa13..8d5ba8353b2c0 100644 --- a/base/compiler/ssair/inlining.jl +++ b/base/compiler/ssair/inlining.jl @@ -1694,11 +1694,6 @@ function early_inline_special_case(ir::IRCode, stmt::Expr, flag::UInt32, if has_flag(flag, IR_FLAG_NOTHROW) return SomeCase(quoted(val)) end - elseif f === Core.get_binding_type - length(argtypes) == 3 || return nothing - if get_binding_type_effect_free(argtypes[2], argtypes[3]) - return SomeCase(quoted(val)) - end end end if f === compilerbarrier diff --git a/base/compiler/ssair/ir.jl b/base/compiler/ssair/ir.jl index 90eab43a3f25b..41423a03cc276 100644 --- a/base/compiler/ssair/ir.jl +++ b/base/compiler/ssair/ir.jl @@ -430,14 +430,17 @@ struct IRCode cfg::CFG new_nodes::NewNodeStream meta::Vector{Expr} + valid_worlds::WorldRange - function IRCode(stmts::InstructionStream, cfg::CFG, debuginfo::DebugInfoStream, argtypes::Vector{Any}, meta::Vector{Expr}, sptypes::Vector{VarState}) + function IRCode(stmts::InstructionStream, cfg::CFG, debuginfo::DebugInfoStream, + argtypes::Vector{Any}, meta::Vector{Expr}, sptypes::Vector{VarState}, + valid_worlds=WorldRange(typemin(UInt), typemax(UInt))) return new(stmts, argtypes, sptypes, debuginfo, cfg, NewNodeStream(), meta) end function IRCode(ir::IRCode, stmts::InstructionStream, cfg::CFG, new_nodes::NewNodeStream) di = ir.debuginfo @assert di.codelocs === stmts.line - return new(stmts, ir.argtypes, ir.sptypes, di, cfg, new_nodes, ir.meta) + return new(stmts, ir.argtypes, ir.sptypes, di, cfg, new_nodes, ir.meta, ir.valid_worlds) end global function copy(ir::IRCode) di = ir.debuginfo @@ -445,7 +448,7 @@ struct IRCode di = copy(di) di.edges = copy(di.edges) di.codelocs = stmts.line - return new(stmts, copy(ir.argtypes), copy(ir.sptypes), di, copy(ir.cfg), copy(ir.new_nodes), copy(ir.meta)) + return new(stmts, copy(ir.argtypes), copy(ir.sptypes), di, copy(ir.cfg), copy(ir.new_nodes), copy(ir.meta), ir.valid_worlds) end end diff --git a/base/compiler/ssair/legacy.jl b/base/compiler/ssair/legacy.jl index 2b0721b8d2408..675ca2dea9b32 100644 --- a/base/compiler/ssair/legacy.jl +++ b/base/compiler/ssair/legacy.jl @@ -44,7 +44,7 @@ function inflate_ir!(ci::CodeInfo, sptypes::Vector{VarState}, argtypes::Vector{A di = DebugInfoStream(nothing, ci.debuginfo, nstmts) stmts = InstructionStream(code, ssavaluetypes, info, di.codelocs, ci.ssaflags) meta = Expr[] - return IRCode(stmts, cfg, di, argtypes, meta, sptypes) + return IRCode(stmts, cfg, di, argtypes, meta, sptypes, WorldRange(ci.min_world, ci.max_world)) end """ diff --git a/base/compiler/ssair/passes.jl b/base/compiler/ssair/passes.jl index bfe33c23871fe..b483c307a2f5e 100644 --- a/base/compiler/ssair/passes.jl +++ b/base/compiler/ssair/passes.jl @@ -474,9 +474,9 @@ function lift_leaves(compact::IncrementalCompact, field::Int, elseif isa(leaf, QuoteNode) leaf = leaf.value elseif isa(leaf, GlobalRef) - mod, name = leaf.mod, leaf.name - if isdefined(mod, name) && isconst(mod, name) - leaf = getglobal(mod, name) + typ = argextype(leaf, compact) + if isa(typ, Const) + leaf = typ.val else return nothing end diff --git a/base/compiler/ssair/slot2ssa.jl b/base/compiler/ssair/slot2ssa.jl index 2eacdf0f56cfe..6fc87934d3bc5 100644 --- a/base/compiler/ssair/slot2ssa.jl +++ b/base/compiler/ssair/slot2ssa.jl @@ -176,7 +176,7 @@ function typ_for_val(@nospecialize(x), ci::CodeInfo, ir::IRCode, idx::Int, slott end return (ci.ssavaluetypes::Vector{Any})[idx] end - isa(x, GlobalRef) && return abstract_eval_globalref_type(x) + isa(x, GlobalRef) && return abstract_eval_globalref_type(x, ci) isa(x, SSAValue) && return (ci.ssavaluetypes::Vector{Any})[x.id] isa(x, Argument) && return slottypes[x.n] isa(x, NewSSAValue) && return types(ir)[new_to_regular(x, length(ir.stmts))] diff --git a/base/compiler/tfuncs.jl b/base/compiler/tfuncs.jl index 80e252dde3a02..aaa1354fd5e54 100644 --- a/base/compiler/tfuncs.jl +++ b/base/compiler/tfuncs.jl @@ -407,10 +407,7 @@ end if isa(a1, DataType) && !isabstracttype(a1) if a1 === Module hasintersect(widenconst(sym), Symbol) || return Bottom - if isa(sym, Const) && isa(sym.val, Symbol) && isa(arg1, Const) && - isdefinedconst_globalref(GlobalRef(arg1.val::Module, sym.val::Symbol)) - return Const(true) - end + # isa(sym, Const) case intercepted in abstract interpretation elseif isa(sym, Const) val = sym.val if isa(val, Symbol) @@ -1160,7 +1157,9 @@ end if isa(sv, Module) setfield && return Bottom if isa(nv, Symbol) - return abstract_eval_global(sv, nv) + # In ordinary inference, this case is intercepted early and + # re-routed to `getglobal`. + return Any end return Bottom end @@ -1407,8 +1406,9 @@ end elseif ff === Core.modifyglobal! o = unwrapva(argtypes[2]) f = unwrapva(argtypes[3]) - RT = modifyglobal!_tfunc(𝕃ᵢ, o, f, Any, Any, Symbol) - TF = getglobal_tfunc(𝕃ᵢ, o, f, Symbol) + GT = abstract_eval_get_binding_type(interp, sv, o, f).rt + RT = isa(GT, Const) ? Pair{GT.val, GT.val} : Pair + TF = isa(GT, Const) ? GT.val : Any elseif ff === Core.memoryrefmodify! o = unwrapva(argtypes[2]) RT = memoryrefmodify!_tfunc(𝕃ᵢ, o, Any, Any, Symbol, Bool) @@ -2277,20 +2277,6 @@ function _builtin_nothrow(𝕃::AbstractLattice, @nospecialize(f::Builtin), argt elseif f === typeassert na == 2 || return false return typeassert_nothrow(𝕃, argtypes[1], argtypes[2]) - elseif f === getglobal - if na == 2 - return getglobal_nothrow(argtypes[1], argtypes[2]) - elseif na == 3 - return getglobal_nothrow(argtypes[1], argtypes[2], argtypes[3]) - end - return false - elseif f === setglobal! - if na == 3 - return setglobal!_nothrow(argtypes[1], argtypes[2], argtypes[3]) - elseif na == 4 - return setglobal!_nothrow(argtypes[1], argtypes[2], argtypes[3], argtypes[4]) - end - return false elseif f === Core.get_binding_type na == 2 || return false return get_binding_type_nothrow(𝕃, argtypes[1], argtypes[2]) @@ -2473,7 +2459,8 @@ function getfield_effects(𝕃::AbstractLattice, argtypes::Vector{Any}, @nospeci end end if hasintersect(widenconst(obj), Module) - inaccessiblememonly = getglobal_effects(argtypes, rt).inaccessiblememonly + # Modeled more precisely in abstract_eval_getglobal + inaccessiblememonly = ALWAYS_FALSE elseif is_mutation_free_argtype(obj) inaccessiblememonly = ALWAYS_TRUE else @@ -2482,24 +2469,7 @@ function getfield_effects(𝕃::AbstractLattice, argtypes::Vector{Any}, @nospeci return Effects(EFFECTS_TOTAL; consistent, nothrow, inaccessiblememonly, noub) end -function getglobal_effects(argtypes::Vector{Any}, @nospecialize(rt)) - 2 ≤ length(argtypes) ≤ 3 || return EFFECTS_THROWS - consistent = inaccessiblememonly = ALWAYS_FALSE - nothrow = false - M, s = argtypes[1], argtypes[2] - if (length(argtypes) == 3 ? getglobal_nothrow(M, s, argtypes[3]) : getglobal_nothrow(M, s)) - nothrow = true - # typeasserts below are already checked in `getglobal_nothrow` - Mval, sval = (M::Const).val::Module, (s::Const).val::Symbol - if isconst(Mval, sval) - consistent = ALWAYS_TRUE - if is_mutation_free_argtype(rt) - inaccessiblememonly = ALWAYS_TRUE - end - end - end - return Effects(EFFECTS_TOTAL; consistent, nothrow, inaccessiblememonly) -end + """ builtin_effects(𝕃::AbstractLattice, f::Builtin, argtypes::Vector{Any}, rt) -> Effects @@ -2525,11 +2495,13 @@ function builtin_effects(𝕃::AbstractLattice, @nospecialize(f::Builtin), argty if f === isdefined return isdefined_effects(𝕃, argtypes) elseif f === getglobal - return getglobal_effects(argtypes, rt) + 2 ≤ length(argtypes) ≤ 3 || return EFFECTS_THROWS + # Modeled more precisely in abstract_eval_getglobal + return Effects(EFFECTS_TOTAL; consistent=ALWAYS_FALSE, nothrow=false, inaccessiblememonly=ALWAYS_FALSE) elseif f === Core.get_binding_type length(argtypes) == 2 || return EFFECTS_THROWS - effect_free = get_binding_type_effect_free(argtypes[1], argtypes[2]) ? ALWAYS_TRUE : ALWAYS_FALSE - return Effects(EFFECTS_TOTAL; effect_free) + # Modeled more precisely in abstract_eval_get_binding_type + return Effects(EFFECTS_TOTAL; effect_free=ALWAYS_FALSE) elseif f === compilerbarrier length(argtypes) == 2 || return Effects(EFFECTS_THROWS; consistent=ALWAYS_FALSE) setting = argtypes[1] @@ -3065,118 +3037,28 @@ function typename_static(@nospecialize(t)) return isType(t) ? _typename(t.parameters[1]) : Core.TypeName end -function global_order_nothrow(@nospecialize(o), loading::Bool, storing::Bool) - o isa Const || return false +function global_order_exct(@nospecialize(o), loading::Bool, storing::Bool) + if !(o isa Const) + if o === Symbol + return ConcurrencyViolationError + elseif !hasintersect(o, Symbol) + return TypeError + else + return Union{ConcurrencyViolationError, TypeError} + end + end sym = o.val if sym isa Symbol order = get_atomic_order(sym, loading, storing) - return order !== MEMORY_ORDER_INVALID && order !== MEMORY_ORDER_NOTATOMIC - end - return false -end -@nospecs function getglobal_nothrow(M, s, o) - global_order_nothrow(o, #=loading=#true, #=storing=#false) || return false - return getglobal_nothrow(M, s) -end -@nospecs function getglobal_nothrow(M, s) - if M isa Const && s isa Const - M, s = M.val, s.val - if M isa Module && s isa Symbol - return isdefinedconst_globalref(GlobalRef(M, s)) - end - end - return false -end -@nospecs function getglobal_tfunc(𝕃::AbstractLattice, M, s, order=Symbol) - if M isa Const && s isa Const - M, s = M.val, s.val - if M isa Module && s isa Symbol - return abstract_eval_global(M, s) - end - return Bottom - elseif !(hasintersect(widenconst(M), Module) && hasintersect(widenconst(s), Symbol)) - return Bottom - end - T = get_binding_type_tfunc(𝕃, M, s) - T isa Const && return T.val - return Any -end -@nospecs function setglobal!_tfunc(𝕃::AbstractLattice, M, s, v, order=Symbol) - if !(hasintersect(widenconst(M), Module) && hasintersect(widenconst(s), Symbol)) - return Bottom - end - return v -end -@nospecs function swapglobal!_tfunc(𝕃::AbstractLattice, M, s, v, order=Symbol) - setglobal!_tfunc(𝕃, M, s, v) === Bottom && return Bottom - return getglobal_tfunc(𝕃, M, s) -end -@nospecs function modifyglobal!_tfunc(𝕃::AbstractLattice, M, s, op, v, order=Symbol) - T = get_binding_type_tfunc(𝕃, M, s) - T === Bottom && return Bottom - T isa Const || return Pair - T = T.val - return Pair{T, T} -end -@nospecs function replaceglobal!_tfunc(𝕃::AbstractLattice, M, s, x, v, success_order=Symbol, failure_order=Symbol) - v = setglobal!_tfunc(𝕃, M, s, v) - v === Bottom && return Bottom - T = get_binding_type_tfunc(𝕃, M, s) - T === Bottom && return Bottom - T isa Const || return ccall(:jl_apply_cmpswap_type, Any, (Any,), T) where T - T = T.val - return ccall(:jl_apply_cmpswap_type, Any, (Any,), T) -end -@nospecs function setglobalonce!_tfunc(𝕃::AbstractLattice, M, s, v, success_order=Symbol, failure_order=Symbol) - setglobal!_tfunc(𝕃, M, s, v) === Bottom && return Bottom - return Bool -end - -add_tfunc(Core.getglobal, 2, 3, getglobal_tfunc, 1) -add_tfunc(Core.setglobal!, 3, 4, setglobal!_tfunc, 3) -add_tfunc(Core.swapglobal!, 3, 4, swapglobal!_tfunc, 3) -add_tfunc(Core.modifyglobal!, 4, 5, modifyglobal!_tfunc, 3) -add_tfunc(Core.replaceglobal!, 4, 6, replaceglobal!_tfunc, 3) -add_tfunc(Core.setglobalonce!, 3, 5, setglobalonce!_tfunc, 3) - -@nospecs function setglobal!_nothrow(M, s, newty, o) - global_order_nothrow(o, #=loading=#false, #=storing=#true) || return false - return setglobal!_nothrow(M, s, newty) -end -@nospecs function setglobal!_nothrow(M, s, newty) - if M isa Const && s isa Const - M, s = M.val, s.val - if isa(M, Module) && isa(s, Symbol) - return global_assignment_nothrow(M, s, newty) - end - end - return false -end - -function global_assignment_nothrow(M::Module, s::Symbol, @nospecialize(newty)) - if !isconst(M, s) - ty = ccall(:jl_get_binding_type, Any, (Any, Any), M, s) - return ty isa Type && widenconst(newty) <: ty - end - return false -end - -@nospecs function get_binding_type_effect_free(M, s) - if M isa Const && s isa Const - M, s = M.val, s.val - if M isa Module && s isa Symbol - return ccall(:jl_get_binding_type, Any, (Any, Any), M, s) !== nothing + if order !== MEMORY_ORDER_INVALID && order !== MEMORY_ORDER_NOTATOMIC + return Union{} + else + return ConcurrencyViolationError end + else + return TypeError end - return false -end -@nospecs function get_binding_type_tfunc(𝕃::AbstractLattice, M, s) - if get_binding_type_effect_free(M, s) - return Const(Core.get_binding_type((M::Const).val::Module, (s::Const).val::Symbol)) - end - return Type end -add_tfunc(Core.get_binding_type, 2, 2, get_binding_type_tfunc, 0) @nospecs function get_binding_type_nothrow(𝕃::AbstractLattice, M, s) ⊑ = partialorder(𝕃) diff --git a/base/runtime_internals.jl b/base/runtime_internals.jl index dd526a24d6494..1da58af38d545 100644 --- a/base/runtime_internals.jl +++ b/base/runtime_internals.jl @@ -230,13 +230,20 @@ const BINDING_KIND_DECLARED = 0x7 const BINDING_KIND_GUARD = 0x8 is_some_const_binding(kind::UInt8) = (kind == BINDING_KIND_CONST || kind == BINDING_KIND_CONST_IMPORT) +is_some_imported(kind::UInt8) = (kind == BINDING_KIND_IMPLICIT || kind == BINDING_KIND_EXPLICIT || kind == BINDING_KIND_IMPORTED) +is_some_guard(kind::UInt8) = (kind == BINDING_KIND_GUARD || kind == BINDING_KIND_DECLARED || kind == BINDING_KIND_FAILED) function lookup_binding_partition(world::UInt, b::Core.Binding) ccall(:jl_get_binding_partition, Ref{Core.BindingPartition}, (Any, UInt), b, world) end function lookup_binding_partition(world::UInt, gr::Core.GlobalRef) - ccall(:jl_get_globalref_partition, Ref{Core.BindingPartition}, (Any, UInt), gr, world) + if isdefined(gr, :binding) + b = gr.binding + else + b = ccall(:jl_get_module_binding, Ref{Core.Binding}, (Any, Any, Cint), gr.mod, gr.name, true) + end + return lookup_binding_partition(world, b) end partition_restriction(bpart::Core.BindingPartition) = ccall(:jl_bpart_get_restriction_value, Any, (Any,), bpart) diff --git a/src/julia_internal.h b/src/julia_internal.h index 9a8750bbc2500..f3959490855c8 100644 --- a/src/julia_internal.h +++ b/src/julia_internal.h @@ -956,7 +956,6 @@ STATIC_INLINE int jl_bkind_is_some_guard(enum jl_partition_kind kind) JL_NOTSAFE } JL_DLLEXPORT jl_binding_partition_t *jl_get_binding_partition(jl_binding_t *b JL_PROPAGATES_ROOT, size_t world); -JL_DLLEXPORT jl_binding_partition_t *jl_get_globalref_partition(jl_globalref_t *gr JL_PROPAGATES_ROOT, size_t world); EXTERN_INLINE_DECLARE uint8_t jl_bpart_get_kind(jl_binding_partition_t *bpart) JL_NOTSAFEPOINT { return decode_restriction_kind(jl_atomic_load_relaxed(&bpart->restriction)); diff --git a/src/module.c b/src/module.c index bdacd487e978d..1655c781111b0 100644 --- a/src/module.c +++ b/src/module.c @@ -60,18 +60,6 @@ jl_binding_partition_t *jl_get_binding_partition(jl_binding_t *b, size_t world) } } -JL_DLLEXPORT jl_binding_partition_t *jl_get_globalref_partition(jl_globalref_t *gr, size_t world) -{ - if (!gr) - return NULL; - jl_binding_t *b = NULL; - if (gr) - b = gr->binding; - if (!b) - b = jl_get_module_binding(gr->mod, gr->name, 0); - return jl_get_binding_partition(b, world); -} - JL_DLLEXPORT jl_module_t *jl_new_module_(jl_sym_t *name, jl_module_t *parent, uint8_t default_names) { jl_task_t *ct = jl_current_task; diff --git a/src/rtutils.c b/src/rtutils.c index faa087dcb077d..7b04fbca5d032 100644 --- a/src/rtutils.c +++ b/src/rtutils.c @@ -288,6 +288,7 @@ JL_DLLEXPORT void jl_eh_restore_state(jl_task_t *ct, jl_handler_t *eh) if (!old_gc_state || !eh->gc_state) // it was or is unsafe now jl_gc_safepoint_(ptls); jl_value_t *exception = ptls->sig_exception; + JL_GC_PROMISE_ROOTED(exception); if (exception) { int8_t oldstate = jl_gc_unsafe_enter(ptls); /* The temporary ptls->bt_data is rooted by special purpose code in the diff --git a/stdlib/REPL/src/REPLCompletions.jl b/stdlib/REPL/src/REPLCompletions.jl index 67191a024da73..23f3337ab5e8e 100644 --- a/stdlib/REPL/src/REPLCompletions.jl +++ b/stdlib/REPL/src/REPLCompletions.jl @@ -624,11 +624,13 @@ function is_call_graph_uncached(sv::CC.InferenceState) return is_call_graph_uncached(parent::CC.InferenceState) end +isdefined_globalref(g::GlobalRef) = !iszero(ccall(:jl_globalref_boundp, Cint, (Any,), g)) + # aggressive global binding resolution within `repl_frame` function CC.abstract_eval_globalref(interp::REPLInterpreter, g::GlobalRef, sv::CC.InferenceState) if (interp.limit_aggressive_inference ? is_repl_frame(sv) : is_call_graph_uncached(sv)) - if CC.isdefined_globalref(g) + if isdefined_globalref(g) return CC.RTEffects(Const(ccall(:jl_get_globalref_value, Any, (Any,), g)), Union{}, CC.EFFECTS_TOTAL) end return CC.RTEffects(Union{}, UndefVarError, CC.EFFECTS_THROWS) @@ -655,7 +657,7 @@ function CC.builtin_tfunction(interp::REPLInterpreter, @nospecialize(f), a1val, a2val = a1.val, a2.val if isa(a1val, Module) && isa(a2val, Symbol) g = GlobalRef(a1val, a2val) - if CC.isdefined_globalref(g) + if isdefined_globalref(g) return Const(ccall(:jl_get_globalref_value, Any, (Any,), g)) end return Union{} diff --git a/test/compiler/inference.jl b/test/compiler/inference.jl index 2fc7e917186f4..9fafc9bdca6ad 100644 --- a/test/compiler/inference.jl +++ b/test/compiler/inference.jl @@ -1184,9 +1184,6 @@ let isdefined_tfunc(@nospecialize xs...) = @test isdefined_tfunc(ComplexF32, Const(0)) === Const(false) @test isdefined_tfunc(SometimesDefined, Const(:x)) == Bool @test isdefined_tfunc(SometimesDefined, Const(:y)) === Const(false) - @test isdefined_tfunc(Const(Base), Const(:length)) === Const(true) - @test isdefined_tfunc(Const(Base), Symbol) == Bool - @test isdefined_tfunc(Const(Base), Const(:NotCurrentlyDefinedButWhoKnows)) == Bool @test isdefined_tfunc(Core.SimpleVector, Const(1)) === Const(false) @test Const(false) ⊑ isdefined_tfunc(Const(:x), Symbol) @test Const(false) ⊑ isdefined_tfunc(Const(:x), Const(:y)) diff --git a/test/rebinding.jl b/test/rebinding.jl index 564be70e44913..c93c34be7a75c 100644 --- a/test/rebinding.jl +++ b/test/rebinding.jl @@ -3,6 +3,8 @@ module Rebinding using Test + make_foo() = Foo(1) + @test Base.binding_kind(@__MODULE__, :Foo) == Base.BINDING_KIND_GUARD struct Foo x::Int @@ -17,6 +19,16 @@ module Rebinding @test Base.binding_kind(@__MODULE__, :Foo) == Base.BINDING_KIND_GUARD @test contains(repr(x), "@world") + struct Foo + x::Int + end + @test Foo != typeof(x) + + # This tests that the compiler uses the correct world, but does not test + # invalidation. + @test typeof(Base.invoke_in_world(defined_world_age, make_foo)) == typeof(x) + @test typeof(make_foo()) == Foo + # Tests for @world syntax @test Base.@world(Foo, defined_world_age) == typeof(x) @test Base.@world(Rebinding.Foo, defined_world_age) == typeof(x) From 9d1cedb2379111a01eab53b811fbf76a1af53bd2 Mon Sep 17 00:00:00 2001 From: Keno Fischer Date: Fri, 1 Nov 2024 22:49:37 -0400 Subject: [PATCH 355/537] Restore JL_NOTSAFEPOINT in jl_stderr_obj (#56407) This is not a function we're really using, but it's used in the embedding examples, so I'm sure somebody would complain if I deleted it or made it a safepoint, so let's just give the same best-effort result as before. --- src/julia.h | 1 + src/module.c | 22 ++++++++++++++++++++++ src/rtutils.c | 2 +- 3 files changed, 24 insertions(+), 1 deletion(-) diff --git a/src/julia.h b/src/julia.h index bfb641d38374b..5b9986a5e68ee 100644 --- a/src/julia.h +++ b/src/julia.h @@ -1854,6 +1854,7 @@ JL_DLLEXPORT jl_sym_t *jl_tagged_gensym(const char *str, size_t len); JL_DLLEXPORT jl_sym_t *jl_get_root_symbol(void); JL_DLLEXPORT jl_value_t *jl_get_binding_value(jl_binding_t *b JL_PROPAGATES_ROOT); JL_DLLEXPORT jl_value_t *jl_get_binding_value_if_const(jl_binding_t *b JL_PROPAGATES_ROOT); +JL_DLLEXPORT jl_value_t *jl_get_binding_value_if_resolved(jl_binding_t *b JL_PROPAGATES_ROOT) JL_NOTSAFEPOINT; JL_DLLEXPORT jl_value_t *jl_get_binding_value_if_resolved_and_const(jl_binding_t *b JL_PROPAGATES_ROOT) JL_NOTSAFEPOINT; JL_DLLEXPORT jl_value_t *jl_declare_const_gf(jl_binding_t *b, jl_module_t *mod, jl_sym_t *name); JL_DLLEXPORT jl_method_t *jl_method_def(jl_svec_t *argdata, jl_methtable_t *mt, jl_code_info_t *f, jl_module_t *module); diff --git a/src/module.c b/src/module.c index 1655c781111b0..08ad0d64dbf55 100644 --- a/src/module.c +++ b/src/module.c @@ -360,6 +360,28 @@ JL_DLLEXPORT jl_value_t *jl_get_binding_value_if_resolved_and_const(jl_binding_t return decode_restriction_value(pku); } +JL_DLLEXPORT jl_value_t *jl_get_binding_value_if_resolved(jl_binding_t *b) +{ + // Unlike jl_get_binding_value this doesn't try to allocate new binding partitions if they + // don't already exist, making this JL_NOTSAFEPOINT. + if (!b) + return NULL; + jl_binding_partition_t *bpart = jl_atomic_load_relaxed(&b->partitions); + if (!bpart) + return NULL; + size_t max_world = jl_atomic_load_relaxed(&bpart->max_world); + if (bpart->min_world > jl_current_task->world_age || jl_current_task->world_age > max_world) + return NULL; + jl_ptr_kind_union_t pku = jl_atomic_load_relaxed(&bpart->restriction); + if (jl_bkind_is_some_guard(decode_restriction_kind(pku))) + return NULL; + if (jl_bkind_is_some_import(decode_restriction_kind(pku))) + return NULL; + if (jl_bkind_is_some_constant(decode_restriction_kind(pku))) + return decode_restriction_value(pku); + return jl_atomic_load_relaxed(&b->value); +} + JL_DLLEXPORT jl_value_t *jl_bpart_get_restriction_value(jl_binding_partition_t *bpart) { jl_ptr_kind_union_t pku = jl_atomic_load_relaxed(&bpart->restriction); diff --git a/src/rtutils.c b/src/rtutils.c index 7b04fbca5d032..7e1fb576008f6 100644 --- a/src/rtutils.c +++ b/src/rtutils.c @@ -567,7 +567,7 @@ JL_DLLEXPORT jl_value_t *jl_stderr_obj(void) JL_NOTSAFEPOINT if (jl_base_module == NULL) return NULL; jl_binding_t *stderr_obj = jl_get_module_binding(jl_base_module, jl_symbol("stderr"), 0); - return stderr_obj ? jl_get_binding_value(stderr_obj) : NULL; + return stderr_obj ? jl_get_binding_value_if_resolved(stderr_obj) : NULL; } // toys for debugging --------------------------------------------------------- From 715eb1dd51ee60cec30425affe5d14941a60d0ac Mon Sep 17 00:00:00 2001 From: Shuhei Kadowaki <40514306+aviatesk@users.noreply.github.com> Date: Sat, 2 Nov 2024 15:27:10 +0900 Subject: [PATCH 356/537] reland "Inlining: Remove outdated code path for GlobalRef movement (#46880)" (#56382) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit From the description of the original PR: > We used to not allow `GlobalRef` in `PhiNode` at all (because they > could have side effects). However, we then change the IR to make > side-effecting `GlobalRef`s illegal in statement position in general, > so now `PhiNode`s values are just regular value position, so there's > no reason any more to try to move `GlobalRef`s out to statement > position in inlining. Moreover, doing so introduces a bunch of > unnecessary `GlobalRef`s that weren't being moved back. We could fix > that separately by setting appropriate flags, but it's simpler to just > get rid of this special case entirely. This change itself does not sound to have any issues, and in fact, it is very useful for keeping the IR slim, especially in code generated by Cassette-like systems, so I would like to reland it. However, the original PR was reverted in JuliaLang/julia#46951 due to bugs like JuliaLang/julia#46940 and JuliaLang/julia#46943. I could not reproduce these bugs on my end (maybe they have been fixed on some GC-side fixes?), so I believe relanding the original PR’s changes would not cause any issues, but it is necessary to confirm that similar problems do not arise before merging this PR. --- base/compiler/ssair/inlining.jl | 12 ------------ test/compiler/EscapeAnalysis/EscapeAnalysis.jl | 4 ++-- test/compiler/inline.jl | 7 +++++++ 3 files changed, 9 insertions(+), 14 deletions(-) diff --git a/base/compiler/ssair/inlining.jl b/base/compiler/ssair/inlining.jl index 8d5ba8353b2c0..98be475520f01 100644 --- a/base/compiler/ssair/inlining.jl +++ b/base/compiler/ssair/inlining.jl @@ -665,18 +665,6 @@ function batch_inline!(ir::IRCode, todo::Vector{Pair{Int,Any}}, propagate_inboun compact.active_result_bb -= 1 refinish = true end - # It is possible for GlobalRefs and Exprs to be in argument position - # at this point in the IR, though in that case they are required - # to be effect-free. However, we must still move them out of argument - # position, since `Argument` is allowed in PhiNodes, but `GlobalRef` - # and `Expr` are not, so a substitution could anger the verifier. - for aidx in 1:length(argexprs) - aexpr = argexprs[aidx] - if isa(aexpr, Expr) || isa(aexpr, GlobalRef) - ninst = removable_if_unused(NewInstruction(aexpr, argextype(aexpr, compact), compact.result[idx][:line])) - argexprs[aidx] = insert_node_here!(compact, ninst) - end - end if isa(item, InliningTodo) compact.ssa_rename[old_idx] = ir_inline_item!(compact, idx, argexprs, item, boundscheck, state.todo_bbs) elseif isa(item, UnionSplit) diff --git a/test/compiler/EscapeAnalysis/EscapeAnalysis.jl b/test/compiler/EscapeAnalysis/EscapeAnalysis.jl index 9afe49c01562d..4799fe4cee5ca 100644 --- a/test/compiler/EscapeAnalysis/EscapeAnalysis.jl +++ b/test/compiler/EscapeAnalysis/EscapeAnalysis.jl @@ -671,8 +671,8 @@ end @test has_all_escape(result.state[Argument(3)]) # b end let result = @eval EATModule() begin - const Rx = SafeRef{String}("Rx") - $code_escapes((String,)) do s + const Rx = SafeRef(Ref("")) + $code_escapes((Base.RefValue{String},)) do s Rx[] = s Core.sizeof(Rx[]) end diff --git a/test/compiler/inline.jl b/test/compiler/inline.jl index 53f7adc2a2a77..416f3873c5422 100644 --- a/test/compiler/inline.jl +++ b/test/compiler/inline.jl @@ -2282,3 +2282,10 @@ let;Base.Experimental.@force_compile f_EA_finalizer(42000) @test foreign_buffer_checker.finalized end + +# Test that inlining doesn't unnecessarily move things to statement position +@noinline f_noinline_invoke(x::Union{Symbol,Nothing}=nothing) = Core.donotdelete(x) +g_noinline_invoke(x) = f_noinline_invoke(x) +let src = code_typed1(g_noinline_invoke, (Union{Symbol,Nothing},)) + @test !any(@nospecialize(x)->isa(x,GlobalRef), src.code) +end From 671fc4244a800561e870bf3aa4933063287d801b Mon Sep 17 00:00:00 2001 From: Mason Protter Date: Sat, 2 Nov 2024 04:58:08 -0700 Subject: [PATCH 357/537] copy effects key to `Base.infer_effects` (#56363) Copied from the docstring of `Core.Compiler.Effects`, this makes it easier to figure out what the output of `Base.infer_effects` is actually telling you. --- base/compiler/effects.jl | 75 +++++++++++++++++++++------------------- base/reflection.jl | 2 ++ 2 files changed, 42 insertions(+), 35 deletions(-) diff --git a/base/compiler/effects.jl b/base/compiler/effects.jl index 3d9b69360b317..a2e7e3dde603d 100644 --- a/base/compiler/effects.jl +++ b/base/compiler/effects.jl @@ -1,3 +1,42 @@ +const effects_key_string = """ +## Key for `show` output of Effects: + +The output represents the state of different effect properties in the following order: + +1. `consistent` (`c`): + - `+c` (green): `ALWAYS_TRUE` + - `-c` (red): `ALWAYS_FALSE` + - `?c` (yellow): `CONSISTENT_IF_NOTRETURNED` and/or `CONSISTENT_IF_INACCESSIBLEMEMONLY` +2. `effect_free` (`e`): + - `+e` (green): `ALWAYS_TRUE` + - `-e` (red): `ALWAYS_FALSE` + - `?e` (yellow): `EFFECT_FREE_IF_INACCESSIBLEMEMONLY` +3. `nothrow` (`n`): + - `+n` (green): `true` + - `-n` (red): `false` +4. `terminates` (`t`): + - `+t` (green): `true` + - `-t` (red): `false` +5. `notaskstate` (`s`): + - `+s` (green): `true` + - `-s` (red): `false` +6. `inaccessiblememonly` (`m`): + - `+m` (green): `ALWAYS_TRUE` + - `-m` (red): `ALWAYS_FALSE` + - `?m` (yellow): `INACCESSIBLEMEM_OR_ARGMEMONLY` +7. `noub` (`u`): + - `+u` (green): `true` + - `-u` (red): `false` + - `?u` (yellow): `NOUB_IF_NOINBOUNDS` +8. `:nonoverlayed` (`o`): + - `+o` (green): `ALWAYS_TRUE` + - `-o` (red): `ALWAYS_FALSE` + - `?o` (yellow): `CONSISTENT_OVERLAY` +9. `:nortcall` (`r`): + - `+r` (green): `true` + - `-r` (red): `false` +""" + """ effects::Effects @@ -74,42 +113,8 @@ initialized with `ALWAYS_TRUE`/`true` and then transitioned towards `ALWAYS_FALS Note that within the current flow-insensitive analysis design, effects detected by local analysis on each statement usually taint the global conclusion conservatively. -## Key for `show` output of Effects: -The output represents the state of different effect properties in the following order: - -1. `consistent` (`c`): - - `+c` (green): `ALWAYS_TRUE` - - `-c` (red): `ALWAYS_FALSE` - - `?c` (yellow): `CONSISTENT_IF_NOTRETURNED` and/or `CONSISTENT_IF_INACCESSIBLEMEMONLY` -2. `effect_free` (`e`): - - `+e` (green): `ALWAYS_TRUE` - - `-e` (red): `ALWAYS_FALSE` - - `?e` (yellow): `EFFECT_FREE_IF_INACCESSIBLEMEMONLY` -3. `nothrow` (`n`): - - `+n` (green): `true` - - `-n` (red): `false` -4. `terminates` (`t`): - - `+t` (green): `true` - - `-t` (red): `false` -5. `notaskstate` (`s`): - - `+s` (green): `true` - - `-s` (red): `false` -6. `inaccessiblememonly` (`m`): - - `+m` (green): `ALWAYS_TRUE` - - `-m` (red): `ALWAYS_FALSE` - - `?m` (yellow): `INACCESSIBLEMEM_OR_ARGMEMONLY` -7. `noub` (`u`): - - `+u` (green): `true` - - `-u` (red): `false` - - `?u` (yellow): `NOUB_IF_NOINBOUNDS` -8. `:nonoverlayed` (`o`): - - `+o` (green): `ALWAYS_TRUE` - - `-o` (red): `ALWAYS_FALSE` - - `?o` (yellow): `CONSISTENT_OVERLAY` -9. `:nortcall` (`r`): - - `+r` (green): `true` - - `-r` (red): `false` +$(effects_key_string) """ struct Effects consistent::UInt8 diff --git a/base/reflection.jl b/base/reflection.jl index 8fe8d324eb792..f2a554e0f27c5 100644 --- a/base/reflection.jl +++ b/base/reflection.jl @@ -877,6 +877,8 @@ signature, the `:nothrow` bit gets tainted. The `Base.infer_effects` function should not be used from generated functions; doing so will result in an error. +$(Core.Compiler.effects_key_string) + # See Also - [`Core.Compiler.Effects`](@ref): A type representing the computational effects of a method call. - [`Base.@assume_effects`](@ref): A macro for making assumptions about the effects of a method. From fe67097b0d277bce02186443b80c53f852b3e878 Mon Sep 17 00:00:00 2001 From: Zentrik Date: Sat, 2 Nov 2024 14:48:02 +0000 Subject: [PATCH 358/537] Fix `make install` for asan build (#56347) Now the makescript finds libclang_rt.asan-x86_64.so for example. The change from `-0` to `-1` is as with `-1`, `libclang_rt.asan-*` is searched for in `usr/lib/julia` instead of `usr/lib`. --- Makefile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index d1e5b31f85b1c..2cac2f8818324 100644 --- a/Makefile +++ b/Makefile @@ -255,9 +255,9 @@ JL_PRIVATE_LIBS-$(USE_SYSTEM_CSL) += libpthread endif ifeq ($(SANITIZE),1) ifeq ($(USECLANG),1) -JL_PRIVATE_LIBS-1 += libclang_rt.asan +JL_PRIVATE_LIBS-0 += libclang_rt.asan-* else -JL_PRIVATE_LIBS-1 += libasan +JL_PRIVATE_LIBS-0 += libasan endif endif From 9a77240f6f81b9d5999d40fdf5e2b6bb84e36783 Mon Sep 17 00:00:00 2001 From: Jishnu Bhattacharya Date: Sun, 3 Nov 2024 15:40:05 +0530 Subject: [PATCH 359/537] Add dims check to triangular mul (#56393) This adds a dimension check to triangular matrix multiplication methods. While such checks already exist in the individual branches (occasionally within `BLAS` methods), having these earlier would permit certain optimizations, as we are assured that the axes are compatible. This potentially duplicates the checks, but this is unlikely to be a concern given how cheap the checks are. I've also reused the `check_A_mul_B!_sizes` function that is defined in `bidiag.jl`, instead of hard-coding the checks. Further, I've replaced some hard-coded loop ranges by the corresponding `axes` and `first/lastindex` calls. These are identical under the 1-based indexing assumption, but the `axes` variants are easier to read and reason about. --- stdlib/LinearAlgebra/src/triangular.jl | 523 ++++++++++++------------- 1 file changed, 244 insertions(+), 279 deletions(-) diff --git a/stdlib/LinearAlgebra/src/triangular.jl b/stdlib/LinearAlgebra/src/triangular.jl index 31447f1aff5ae..4fed45b009fff 100644 --- a/stdlib/LinearAlgebra/src/triangular.jl +++ b/stdlib/LinearAlgebra/src/triangular.jl @@ -160,7 +160,7 @@ function imag(A::UnitLowerTriangular) L = LowerTriangular(A.data) Lim = similar(L) # must be mutable to set diagonals to zero Lim .= imag.(L) - for i in 1:size(Lim,1) + for i in axes(Lim,1) Lim[i,i] = zero(Lim[i,i]) end return Lim @@ -169,7 +169,7 @@ function imag(A::UnitUpperTriangular) U = UpperTriangular(A.data) Uim = similar(U) # must be mutable to set diagonals to zero Uim .= imag.(U) - for i in 1:size(Uim,1) + for i in axes(Uim,1) Uim[i,i] = zero(Uim[i,i]) end return Uim @@ -200,7 +200,7 @@ end function full!(A::UnitLowerTriangular) B = A.data tril!(B) - for i = 1:size(A,1) + for i in axes(A,1) B[i,i] = oneunit(eltype(B)) end B @@ -213,7 +213,7 @@ end function full!(A::UnitUpperTriangular) B = A.data triu!(B) - for i = 1:size(A,1) + for i in axes(A,1) B[i,i] = oneunit(eltype(B)) end B @@ -350,9 +350,8 @@ Base.@constprop :aggressive function istril(A::LowerTriangular, k::Integer=0) end @inline function _istril(A::LowerTriangular, k) P = parent(A) - m = size(A, 1) - for j in max(1, k + 2):m - all(iszero, view(P, j:min(j - k - 1, m), j)) || return false + for j in max(firstindex(P,2), k + 2):lastindex(P,2) + all(iszero, @view(P[j:min(j - k - 1, end), j])) || return false end return true end @@ -363,8 +362,8 @@ end @inline function _istriu(A::UpperTriangular, k) P = parent(A) m = size(A, 1) - for j in 1:min(m, m + k - 1) - all(iszero, view(P, max(1, j - k + 1):j, j)) || return false + for j in firstindex(P,2):min(m + k - 1, lastindex(P,2)) + all(iszero, @view(P[max(begin, j - k + 1):j, j])) || return false end return true end @@ -374,12 +373,11 @@ istriu(A::Adjoint, k::Integer=0) = istril(A.parent, -k) istriu(A::Transpose, k::Integer=0) = istril(A.parent, -k) function tril!(A::UpperTriangular{T}, k::Integer=0) where {T} - n = size(A,1) if k < 0 fill!(A.data, zero(T)) return A elseif k == 0 - for j in 1:n, i in 1:j-1 + for j in axes(A.data,2), i in intersect(axes(A.data,1), 1:j-1) A.data[i,j] = zero(T) end return A @@ -388,9 +386,8 @@ function tril!(A::UpperTriangular{T}, k::Integer=0) where {T} end end function triu!(A::UpperTriangular, k::Integer=0) - n = size(A,1) if k > 0 - for j in 1:n, i in max(1,j-k+1):j + for j in axes(A.data,2), i in intersect(axes(A.data,1), range(stop=j, length=k)) A.data[i,j] = zero(eltype(A)) end end @@ -398,7 +395,6 @@ function triu!(A::UpperTriangular, k::Integer=0) end function tril!(A::UnitUpperTriangular{T}, k::Integer=0) where {T} - n = size(A,1) if k < 0 fill!(A.data, zero(T)) return UpperTriangular(A.data) @@ -417,19 +413,18 @@ function tril!(A::UnitUpperTriangular{T}, k::Integer=0) where {T} end function triu!(A::UnitUpperTriangular, k::Integer=0) - for i in diagind(A) + for i in diagind(A.data) A.data[i] = oneunit(eltype(A)) end return triu!(UpperTriangular(A.data), k) end function triu!(A::LowerTriangular{T}, k::Integer=0) where {T} - n = size(A,1) if k > 0 fill!(A.data, zero(T)) return A elseif k == 0 - for j in 1:n, i in j+1:n + for j in axes(A.data,2), i in j+1:lastindex(A.data,1) A.data[i,j] = zero(T) end return A @@ -439,9 +434,8 @@ function triu!(A::LowerTriangular{T}, k::Integer=0) where {T} end function tril!(A::LowerTriangular, k::Integer=0) - n = size(A,1) if k < 0 - for j in 1:n, i in j:min(j-k-1,n) + for j in axes(A.data,2), i in intersect(range(j, length=-k), axes(A.data,1)) A.data[i, j] = zero(eltype(A)) end end @@ -449,7 +443,6 @@ function tril!(A::LowerTriangular, k::Integer=0) end function triu!(A::UnitLowerTriangular{T}, k::Integer=0) where T - n = size(A,1) if k > 0 fill!(A.data, zero(T)) return LowerTriangular(A.data) @@ -468,7 +461,7 @@ function triu!(A::UnitLowerTriangular{T}, k::Integer=0) where T end function tril!(A::UnitLowerTriangular, k::Integer=0) - for i in diagind(A) + for i in diagind(A.data) A.data[i] = oneunit(eltype(A)) end return tril!(LowerTriangular(A.data), k) @@ -502,7 +495,7 @@ function -(A::UnitLowerTriangular) Adata = A.data Anew = similar(Adata) # must be mutable, even if Adata is not @. Anew = -Adata - for i = 1:size(A, 1) + for i in axes(A, 1) Anew[i, i] = -A[i, i] end LowerTriangular(Anew) @@ -511,7 +504,7 @@ function -(A::UnitUpperTriangular) Adata = A.data Anew = similar(Adata) # must be mutable, even if Adata is not @. Anew = -Adata - for i = 1:size(A, 1) + for i in axes(A, 1) Anew[i, i] = -A[i, i] end UpperTriangular(Anew) @@ -555,28 +548,30 @@ for (T, UT) in ((:UpperTriangular, :UnitUpperTriangular), (:LowerTriangular, :Un end @inline function _copyto!(A::UpperOrUnitUpperTriangular, B::UnitUpperTriangular) @boundscheck checkbounds(A, axes(B)...) - n = size(B,1) B2 = Base.unalias(A, B) - for j = 1:n - for i = 1:j-1 - @inbounds parent(A)[i,j] = parent(B2)[i,j] + Ap = parent(A) + B2p = parent(B2) + for j in axes(B2,2) + for i in firstindex(Ap,1):j-1 + @inbounds Ap[i,j] = B2p[i,j] end if A isa UpperTriangular # copy diagonal - @inbounds parent(A)[j,j] = B2[j,j] + @inbounds Ap[j,j] = B2[j,j] end end return A end @inline function _copyto!(A::LowerOrUnitLowerTriangular, B::UnitLowerTriangular) @boundscheck checkbounds(A, axes(B)...) - n = size(B,1) B2 = Base.unalias(A, B) - for j = 1:n + Ap = parent(A) + B2p = parent(B2) + for j in axes(B2,2) if A isa LowerTriangular # copy diagonal - @inbounds parent(A)[j,j] = B2[j,j] + @inbounds Ap[j,j] = B2[j,j] end - for i = j+1:n - @inbounds parent(A)[i,j] = parent(B2)[i,j] + for i in j+1:lastindex(Ap,1) + @inbounds Ap[i,j] = B2p[i,j] end end return A @@ -611,10 +606,10 @@ end @boundscheck checkbounds(dest, axes(U)...) isunit = U isa UnitUpperTriangular for col in axes(dest,2) - for row in 1:col-isunit + for row in firstindex(dest,1):col-isunit @inbounds dest[row,col] = U.data[row,col] end - for row in col+!isunit:size(U,1) + for row in col+!isunit:lastindex(dest,1) @inbounds dest[row,col] = U[row,col] end end @@ -624,10 +619,10 @@ end @boundscheck checkbounds(dest, axes(L)...) isunit = L isa UnitLowerTriangular for col in axes(dest,2) - for row in 1:col-!isunit + for row in firstindex(dest,1):col-!isunit @inbounds dest[row,col] = L[row,col] end - for row in col+isunit:size(L,1) + for row in col+isunit:lastindex(dest,1) @inbounds dest[row,col] = L.data[row,col] end end @@ -646,84 +641,84 @@ function checksize1(A, B) end function _triscale!(A::UpperTriangular, B::UpperTriangular, c::Number, _add) - n = checksize1(A, B) + checksize1(A, B) iszero(_add.alpha) && return _rmul_or_fill!(A, _add.beta) - for j = 1:n - for i = 1:j + for j in axes(B.data,2) + for i in firstindex(B.data,1):j @inbounds _modify!(_add, B.data[i,j] * c, A.data, (i,j)) end end return A end function _triscale!(A::UpperTriangular, c::Number, B::UpperTriangular, _add) - n = checksize1(A, B) + checksize1(A, B) iszero(_add.alpha) && return _rmul_or_fill!(A, _add.beta) - for j = 1:n - for i = 1:j + for j in axes(B.data,2) + for i in firstindex(B.data,1):j @inbounds _modify!(_add, c * B.data[i,j], A.data, (i,j)) end end return A end function _triscale!(A::UpperOrUnitUpperTriangular, B::UnitUpperTriangular, c::Number, _add) - n = checksize1(A, B) + checksize1(A, B) iszero(_add.alpha) && return _rmul_or_fill!(A, _add.beta) - for j = 1:n + for j in axes(B.data,2) @inbounds _modify!(_add, c, A, (j,j)) - for i = 1:(j - 1) + for i in firstindex(B.data,1):(j - 1) @inbounds _modify!(_add, B.data[i,j] * c, A.data, (i,j)) end end return A end function _triscale!(A::UpperOrUnitUpperTriangular, c::Number, B::UnitUpperTriangular, _add) - n = checksize1(A, B) + checksize1(A, B) iszero(_add.alpha) && return _rmul_or_fill!(A, _add.beta) - for j = 1:n + for j in axes(B.data,2) @inbounds _modify!(_add, c, A, (j,j)) - for i = 1:(j - 1) + for i in firstindex(B.data,1):(j - 1) @inbounds _modify!(_add, c * B.data[i,j], A.data, (i,j)) end end return A end function _triscale!(A::LowerTriangular, B::LowerTriangular, c::Number, _add) - n = checksize1(A, B) + checksize1(A, B) iszero(_add.alpha) && return _rmul_or_fill!(A, _add.beta) - for j = 1:n - for i = j:n + for j in axes(B.data,2) + for i in j:lastindex(B.data,1) @inbounds _modify!(_add, B.data[i,j] * c, A.data, (i,j)) end end return A end function _triscale!(A::LowerTriangular, c::Number, B::LowerTriangular, _add) - n = checksize1(A, B) + checksize1(A, B) iszero(_add.alpha) && return _rmul_or_fill!(A, _add.beta) - for j = 1:n - for i = j:n + for j in axes(B.data,2) + for i in j:lastindex(B.data,1) @inbounds _modify!(_add, c * B.data[i,j], A.data, (i,j)) end end return A end function _triscale!(A::LowerOrUnitLowerTriangular, B::UnitLowerTriangular, c::Number, _add) - n = checksize1(A, B) + checksize1(A, B) iszero(_add.alpha) && return _rmul_or_fill!(A, _add.beta) - for j = 1:n + for j in axes(B.data,2) @inbounds _modify!(_add, c, A, (j,j)) - for i = (j + 1):n + for i in (j + 1):lastindex(B.data,1) @inbounds _modify!(_add, B.data[i,j] * c, A.data, (i,j)) end end return A end function _triscale!(A::LowerOrUnitLowerTriangular, c::Number, B::UnitLowerTriangular, _add) - n = checksize1(A, B) + checksize1(A, B) iszero(_add.alpha) && return _rmul_or_fill!(A, _add.beta) - for j = 1:n + for j in axes(B.data,2) @inbounds _modify!(_add, c, A, (j,j)) - for i = (j + 1):n + for i in (j + 1):lastindex(B.data,1) @inbounds _modify!(_add, c * B.data[i,j], A.data, (i,j)) end end @@ -731,36 +726,36 @@ function _triscale!(A::LowerOrUnitLowerTriangular, c::Number, B::UnitLowerTriang end function _trirdiv!(A::UpperTriangular, B::UpperOrUnitUpperTriangular, c::Number) - n = checksize1(A, B) - for j in 1:n - for i in 1:j + checksize1(A, B) + for j in axes(B,2) + for i in firstindex(B,1):j @inbounds A[i, j] = B[i, j] / c end end return A end function _trirdiv!(A::LowerTriangular, B::LowerOrUnitLowerTriangular, c::Number) - n = checksize1(A, B) - for j in 1:n - for i in j:n + checksize1(A, B) + for j in axes(B,2) + for i in j:lastindex(B,1) @inbounds A[i, j] = B[i, j] / c end end return A end function _trildiv!(A::UpperTriangular, c::Number, B::UpperOrUnitUpperTriangular) - n = checksize1(A, B) - for j in 1:n - for i in 1:j + checksize1(A, B) + for j in axes(B,2) + for i in firstindex(B,1):j @inbounds A[i, j] = c \ B[i, j] end end return A end function _trildiv!(A::LowerTriangular, c::Number, B::LowerOrUnitLowerTriangular) - n = checksize1(A, B) - for j in 1:n - for i in j:n + checksize1(A, B) + for j in axes(B,2) + for i in j:lastindex(B,1) @inbounds A[i, j] = c \ B[i, j] end end @@ -779,7 +774,7 @@ function dot(x::AbstractVector, A::UpperTriangular, y::AbstractVector) end x₁ = x[1] r = dot(x₁, A[1,1], y[1]) - @inbounds for j in 2:m + @inbounds for j in axes(A, 2)[2:end] yj = y[j] if !iszero(yj) temp = adjoint(A[1,j]) * x₁ @@ -800,7 +795,7 @@ function dot(x::AbstractVector, A::UnitUpperTriangular, y::AbstractVector) end x₁ = first(x) r = dot(x₁, y[1]) - @inbounds for j in 2:m + @inbounds for j in axes(A, 2)[2:end] yj = y[j] if !iszero(yj) temp = adjoint(A[1,j]) * x₁ @@ -821,11 +816,11 @@ function dot(x::AbstractVector, A::LowerTriangular, y::AbstractVector) return dot(zero(eltype(x)), zero(eltype(A)), zero(eltype(y))) end r = zero(typeof(dot(first(x), first(A), first(y)))) - @inbounds for j in 1:m + @inbounds for j in axes(A, 2) yj = y[j] if !iszero(yj) temp = adjoint(A[j,j]) * x[j] - @simd for i in j+1:m + @simd for i in j+1:lastindex(A,1) temp += adjoint(A[i,j]) * x[i] end r += dot(temp, yj) @@ -841,11 +836,11 @@ function dot(x::AbstractVector, A::UnitLowerTriangular, y::AbstractVector) return dot(zero(eltype(x)), zero(eltype(A)), zero(eltype(y))) end r = zero(typeof(dot(first(x), first(y)))) - @inbounds for j in 1:m + @inbounds for j in axes(A, 2) yj = y[j] if !iszero(yj) temp = x[j] - @simd for i in j+1:m + @simd for i in j+1:lastindex(A,1) temp += adjoint(A[i,j]) * x[i] end r += dot(temp, yj) @@ -952,25 +947,24 @@ function kron!(C::LowerTriangular{<:Any,<:StridedMaybeAdjOrTransMat}, A::LowerTr end function _triukron!(C, A, B) - n_A = size(A, 1) n_B = size(B, 1) - @inbounds for j = 1:n_A + @inbounds for j in axes(A,2) jnB = (j - 1) * n_B - for i = 1:(j-1) + for i in firstindex(A,1):(j-1) Aij = A[i, j] inB = (i - 1) * n_B - for l = 1:n_B - for k = 1:l + for l in axes(B,2) + for k in firstindex(B,1):l C[inB+k, jnB+l] = Aij * B[k, l] end - for k = 1:(l-1) + for k in firstindex(B,1):(l-1) C[inB+l, jnB+k] = zero(C[inB+k, jnB+l]) end end end Ajj = A[j, j] - for l = 1:n_B - for k = 1:l + for l in axes(B,2) + for k in firstindex(B,1):l C[jnB+k, jnB+l] = Ajj * B[k, l] end end @@ -980,22 +974,22 @@ end function _trilkron!(C, A, B) n_A = size(A, 1) n_B = size(B, 1) - @inbounds for j = 1:n_A + @inbounds for j in axes(A,2) jnB = (j - 1) * n_B Ajj = A[j, j] - for l = 1:n_B - for k = l:n_B + for l in axes(B,2) + for k in l:lastindex(B,1) C[jnB+k, jnB+l] = Ajj * B[k, l] end end - for i = (j+1):n_A + for i in (j+1):n_A Aij = A[i, j] inB = (i - 1) * n_B - for l = 1:n_B - for k = l:n_B + for l in axes(B,2) + for k in l:lastindex(B,1) C[inB+k, jnB+l] = Aij * B[k, l] end - for k = (l+1):n_B + for k in (l+1):lastindex(B,1) C[inB+l, jnB+k] = zero(C[inB+k, jnB+l]) end end @@ -1063,6 +1057,7 @@ end for TC in (:AbstractVector, :AbstractMatrix) @eval @inline function _mul!(C::$TC, A::AbstractTriangular, B::AbstractVector, alpha::Number, beta::Number) + check_A_mul_B!_sizes(size(C), size(A), size(B)) if isone(alpha) && iszero(beta) return _trimul!(C, A, B) else @@ -1075,6 +1070,7 @@ for (TA, TB) in ((:AbstractTriangular, :AbstractMatrix), (:AbstractTriangular, :AbstractTriangular) ) @eval @inline function _mul!(C::AbstractMatrix, A::$TA, B::$TB, alpha::Number, beta::Number) + check_A_mul_B!_sizes(size(C), size(A), size(B)) if isone(alpha) && iszero(beta) return _trimul!(C, A, B) else @@ -1198,7 +1194,7 @@ function eigvecs(A::UpperTriangular{<:BlasFloat,<:StridedMatrix}) LAPACK.trevc!('R', 'A', BlasInt[], triu!(A.data)) end function eigvecs(A::UnitUpperTriangular{<:BlasFloat,<:StridedMatrix}) - for i = 1:size(A, 1) + for i in axes(A, 1) A.data[i,i] = 1 end LAPACK.trevc!('R', 'A', BlasInt[], triu!(A.data)) @@ -1207,7 +1203,7 @@ function eigvecs(A::LowerTriangular{<:BlasFloat,<:StridedMatrix}) LAPACK.trevc!('L', 'A', BlasInt[], copy(tril!(A.data)')) end function eigvecs(A::UnitLowerTriangular{<:BlasFloat,<:StridedMatrix}) - for i = 1:size(A, 1) + for i in axes(A, 1) A.data[i,i] = 1 end LAPACK.trevc!('L', 'A', BlasInt[], copy(tril!(A.data)')) @@ -1230,7 +1226,7 @@ for (t, unitt) in ((UpperTriangular, UnitUpperTriangular), function (*)(A::$unitt, x::Number) B = $t(A.data)*x - for i = 1:size(A, 1) + for i in axes(A, 1) B.data[i,i] = x end return B @@ -1245,7 +1241,7 @@ for (t, unitt) in ((UpperTriangular, UnitUpperTriangular), function (*)(x::Number, A::$unitt) B = x*$t(A.data) - for i = 1:size(A, 1) + for i in axes(A, 1) B.data[i,i] = x end return B @@ -1261,7 +1257,7 @@ for (t, unitt) in ((UpperTriangular, UnitUpperTriangular), function (/)(A::$unitt, x::Number) B = $t(A.data)/x invx = inv(x) - for i = 1:size(A, 1) + for i in axes(A, 1) B.data[i,i] = invx end return B @@ -1277,7 +1273,7 @@ for (t, unitt) in ((UpperTriangular, UnitUpperTriangular), function (\)(x::Number, A::$unitt) B = x\$t(A.data) invx = inv(x) - for i = 1:size(A, 1) + for i in axes(A, 1) B.data[i,i] = invx end return B @@ -1288,33 +1284,25 @@ end ## Generic triangular multiplication function generic_trimatmul!(C::AbstractVecOrMat, uploc, isunitc, tfun::Function, A::AbstractMatrix, B::AbstractVecOrMat) require_one_based_indexing(C, A, B) - m, n = size(B, 1), size(B, 2) - N = size(A, 1) - if m != N - throw(DimensionMismatch(lazy"right hand side B needs first dimension of size $(size(A,1)), has size $m")) - end - mc, nc = size(C, 1), size(C, 2) - if mc != N || nc != n - throw(DimensionMismatch(lazy"output has dimensions ($mc,$nc), should have ($N,$n)")) - end + check_A_mul_B!_sizes(size(C), size(A), size(B)) oA = oneunit(eltype(A)) unit = isunitc == 'U' @inbounds if uploc == 'U' if tfun === identity - for j in 1:n - for i in 1:m + for j in axes(B,2) + for i in axes(B,1) Cij = (unit ? oA : A[i,i]) * B[i,j] - for k in i + 1:m + for k in i + 1:lastindex(B,1) Cij += A[i,k] * B[k,j] end C[i,j] = Cij end end else # tfun in (transpose, adjoint) - for j in 1:n - for i in m:-1:1 + for j in axes(B,2) + for i in reverse(axes(B,1)) Cij = (unit ? oA : tfun(A[i,i])) * B[i,j] - for k in 1:i - 1 + for k in firstindex(B,1):i - 1 Cij += tfun(A[k,i]) * B[k,j] end C[i,j] = Cij @@ -1323,20 +1311,20 @@ function generic_trimatmul!(C::AbstractVecOrMat, uploc, isunitc, tfun::Function, end else # uploc == 'L' if tfun === identity - for j in 1:n - for i in m:-1:1 + for j in axes(B,2) + for i in reverse(axes(B,1)) Cij = (unit ? oA : A[i,i]) * B[i,j] - for k in 1:i - 1 + for k in firstindex(B,1):i - 1 Cij += A[i,k] * B[k,j] end C[i,j] = Cij end end else # tfun in (transpose, adjoint) - for j in 1:n - for i in 1:m + for j in axes(B,2) + for i in axes(B,1) Cij = (unit ? oA : tfun(A[i,i])) * B[i,j] - for k in i + 1:m + for k in i + 1:lastindex(B,1) Cij += tfun(A[k,i]) * B[k,j] end C[i,j] = Cij @@ -1348,34 +1336,26 @@ function generic_trimatmul!(C::AbstractVecOrMat, uploc, isunitc, tfun::Function, end # conjugate cases function generic_trimatmul!(C::AbstractVecOrMat, uploc, isunitc, ::Function, xA::AdjOrTrans, B::AbstractVecOrMat) + require_one_based_indexing(C, xA, B) + check_A_mul_B!_sizes(size(C), size(xA), size(B)) A = parent(xA) - require_one_based_indexing(C, A, B) - m, n = size(B, 1), size(B, 2) - N = size(A, 1) - if m != N - throw(DimensionMismatch(lazy"right hand side B needs first dimension of size $(size(A,1)), has size $m")) - end - mc, nc = size(C, 1), size(C, 2) - if mc != N || nc != n - throw(DimensionMismatch(lazy"output has dimensions ($mc,$nc), should have ($N,$n)")) - end oA = oneunit(eltype(A)) unit = isunitc == 'U' @inbounds if uploc == 'U' - for j in 1:n - for i in 1:m + for j in axes(B,2) + for i in axes(B,1) Cij = (unit ? oA : conj(A[i,i])) * B[i,j] - for k in i + 1:m + for k in i + 1:lastindex(B,1) Cij += conj(A[i,k]) * B[k,j] end C[i,j] = Cij end end else # uploc == 'L' - for j in 1:n - for i in m:-1:1 + for j in axes(B,2) + for i in reverse(axes(B,1)) Cij = (unit ? oA : conj(A[i,i])) * B[i,j] - for k in 1:i - 1 + for k in firstindex(B,1):i - 1 Cij += conj(A[i,k]) * B[k,j] end C[i,j] = Cij @@ -1387,33 +1367,25 @@ end function generic_mattrimul!(C::AbstractMatrix, uploc, isunitc, tfun::Function, A::AbstractMatrix, B::AbstractMatrix) require_one_based_indexing(C, A, B) - m, n = size(A, 1), size(A, 2) - N = size(B, 1) - if n != N - throw(DimensionMismatch(lazy"right hand side B needs first dimension of size $n, has size $N")) - end - mc, nc = size(C, 1), size(C, 2) - if mc != m || nc != N - throw(DimensionMismatch(lazy"output has dimensions ($mc,$nc), should have ($m,$N)")) - end + check_A_mul_B!_sizes(size(C), size(A), size(B)) oB = oneunit(eltype(B)) unit = isunitc == 'U' @inbounds if uploc == 'U' if tfun === identity - for i in 1:m - for j in n:-1:1 + for i in axes(A,1) + for j in reverse(axes(A,2)) Cij = A[i,j] * (unit ? oB : B[j,j]) - for k in 1:j - 1 + for k in firstindex(A,2):j - 1 Cij += A[i,k] * B[k,j] end C[i,j] = Cij end end else # tfun in (transpose, adjoint) - for i in 1:m - for j in 1:n + for i in axes(A,1) + for j in axes(A,2) Cij = A[i,j] * (unit ? oB : tfun(B[j,j])) - for k in j + 1:n + for k in j + 1:lastindex(A,2) Cij += A[i,k] * tfun(B[j,k]) end C[i,j] = Cij @@ -1422,20 +1394,20 @@ function generic_mattrimul!(C::AbstractMatrix, uploc, isunitc, tfun::Function, A end else # uploc == 'L' if tfun === identity - for i in 1:m - for j in 1:n + for i in axes(A,1) + for j in axes(A,2) Cij = A[i,j] * (unit ? oB : B[j,j]) - for k in j + 1:n + for k in j + 1:lastindex(A,2) Cij += A[i,k] * B[k,j] end C[i,j] = Cij end end else # tfun in (transpose, adjoint) - for i in 1:m - for j in n:-1:1 + for i in axes(A,1) + for j in reverse(axes(A,2)) Cij = A[i,j] * (unit ? oB : tfun(B[j,j])) - for k in 1:j - 1 + for k in firstindex(A,2):j - 1 Cij += A[i,k] * tfun(B[j,k]) end C[i,j] = Cij @@ -1447,34 +1419,26 @@ function generic_mattrimul!(C::AbstractMatrix, uploc, isunitc, tfun::Function, A end # conjugate cases function generic_mattrimul!(C::AbstractMatrix, uploc, isunitc, ::Function, A::AbstractMatrix, xB::AdjOrTrans) + require_one_based_indexing(C, A, xB) + check_A_mul_B!_sizes(size(C), size(A), size(xB)) B = parent(xB) - require_one_based_indexing(C, A, B) - m, n = size(A, 1), size(A, 2) - N = size(B, 1) - if n != N - throw(DimensionMismatch(lazy"right hand side B needs first dimension of size $n, has size $N")) - end - mc, nc = size(C, 1), size(C, 2) - if mc != m || nc != N - throw(DimensionMismatch(lazy"output has dimensions ($mc,$nc), should have ($m,$N)")) - end oB = oneunit(eltype(B)) unit = isunitc == 'U' @inbounds if uploc == 'U' - for i in 1:m - for j in n:-1:1 + for i in axes(A,1) + for j in reverse(axes(A,2)) Cij = A[i,j] * (unit ? oB : conj(B[j,j])) - for k in 1:j - 1 + for k in firstindex(A,2):j - 1 Cij += A[i,k] * conj(B[k,j]) end C[i,j] = Cij end end else # uploc == 'L' - for i in 1:m - for j in 1:n + for i in axes(A,1) + for j in axes(A,2) Cij = A[i,j] * (unit ? oB : conj(B[j,j])) - for k in j + 1:n + for k in j + 1:lastindex(A,2) Cij += A[i,k] * conj(B[k,j]) end C[i,j] = Cij @@ -1498,7 +1462,7 @@ end function generic_trimatdiv!(C::AbstractVecOrMat, uploc, isunitc, tfun::Function, A::AbstractMatrix, B::AbstractVecOrMat) require_one_based_indexing(C, A, B) mA, nA = size(A) - m, n = size(B, 1), size(B,2) + m = size(B, 1) if nA != m throw(DimensionMismatch(lazy"second dimension of left hand side A, $nA, and first dimension of right hand side B, $m, must be equal")) end @@ -1510,30 +1474,30 @@ function generic_trimatdiv!(C::AbstractVecOrMat, uploc, isunitc, tfun::Function, @inbounds if uploc == 'U' if isunitc == 'N' if tfun === identity - for k in 1:n + for k in axes(B,2) amm = A[m,m] iszero(amm) && throw(SingularException(m)) Cm = C[m,k] = amm \ B[m,k] # fill C-column - for i in m-1:-1:1 + for i in reverse(axes(B,1))[2:end] C[i,k] = oA \ B[i,k] - _ustrip(A[i,m]) * Cm end - for j in m-1:-1:1 + for j in reverse(axes(B,1))[2:end] ajj = A[j,j] iszero(ajj) && throw(SingularException(j)) Cj = C[j,k] = _ustrip(ajj) \ C[j,k] - for i in j-1:-1:1 + for i in j-1:-1:firstindex(B,1) C[i,k] -= _ustrip(A[i,j]) * Cj end end end else # tfun in (adjoint, transpose) - for k in 1:n - for j in 1:m + for k in axes(B,2) + for j in axes(B,1) ajj = A[j,j] iszero(ajj) && throw(SingularException(j)) Bj = B[j,k] - for i in 1:j-1 + for i in firstindex(A,1):j-1 Bj -= tfun(A[i,j]) * C[i,k] end C[j,k] = tfun(ajj) \ Bj @@ -1542,24 +1506,24 @@ function generic_trimatdiv!(C::AbstractVecOrMat, uploc, isunitc, tfun::Function, end else # isunitc == 'U' if tfun === identity - for k in 1:n + for k in axes(B,2) Cm = C[m,k] = oA \ B[m,k] # fill C-column - for i in m-1:-1:1 + for i in reverse(axes(B,1))[2:end] C[i,k] = oA \ B[i,k] - _ustrip(A[i,m]) * Cm end - for j in m-1:-1:1 + for j in reverse(axes(B,1))[2:end] Cj = C[j,k] - for i in 1:j-1 + for i in firstindex(A,1):j-1 C[i,k] -= _ustrip(A[i,j]) * Cj end end end else # tfun in (adjoint, transpose) - for k in 1:n - for j in 1:m + for k in axes(B,2) + for j in axes(B,1) Bj = B[j,k] - for i in 1:j-1 + for i in firstindex(A,1):j-1 Bj -= tfun(A[i,j]) * C[i,k] end C[j,k] = oA \ Bj @@ -1570,30 +1534,30 @@ function generic_trimatdiv!(C::AbstractVecOrMat, uploc, isunitc, tfun::Function, else # uploc == 'L' if isunitc == 'N' if tfun === identity - for k in 1:n + for k in axes(B,2) a11 = A[1,1] iszero(a11) && throw(SingularException(1)) C1 = C[1,k] = a11 \ B[1,k] # fill C-column - for i in 2:m + for i in axes(B,1)[2:end] C[i,k] = oA \ B[i,k] - _ustrip(A[i,1]) * C1 end - for j in 2:m + for j in axes(B,1)[2:end] ajj = A[j,j] iszero(ajj) && throw(SingularException(j)) Cj = C[j,k] = _ustrip(ajj) \ C[j,k] - for i in j+1:m + for i in j+1:lastindex(A,1) C[i,k] -= _ustrip(A[i,j]) * Cj end end end else # tfun in (adjoint, transpose) - for k in 1:n - for j in m:-1:1 + for k in axes(B,2) + for j in reverse(axes(B,1)) ajj = A[j,j] iszero(ajj) && throw(SingularException(j)) Bj = B[j,k] - for i in j+1:m + for i in j+1:lastindex(A,1) Bj -= tfun(A[i,j]) * C[i,k] end C[j,k] = tfun(ajj) \ Bj @@ -1602,24 +1566,24 @@ function generic_trimatdiv!(C::AbstractVecOrMat, uploc, isunitc, tfun::Function, end else # isunitc == 'U' if tfun === identity - for k in 1:n + for k in axes(B,2) C1 = C[1,k] = oA \ B[1,k] # fill C-column - for i in 2:m + for i in axes(B,1)[2:end] C[i,k] = oA \ B[i,k] - _ustrip(A[i,1]) * C1 end - for j in 2:m + for j in axes(B,1)[2:end] Cj = C[j,k] - for i in j+1:m + for i in j+1:lastindex(A,1) C[i,k] -= _ustrip(A[i,j]) * Cj end end end else # tfun in (adjoint, transpose) - for k in 1:n - for j in m:-1:1 + for k in axes(B,2) + for j in reverse(axes(B,1)) Bj = B[j,k] - for i in j+1:m + for i in j+1:lastindex(A,1) Bj -= tfun(A[i,j]) * C[i,k] end C[j,k] = oA \ Bj @@ -1635,7 +1599,7 @@ function generic_trimatdiv!(C::AbstractVecOrMat, uploc, isunitc, ::Function, xA: A = parent(xA) require_one_based_indexing(C, A, B) mA, nA = size(A) - m, n = size(B, 1), size(B,2) + m = size(B, 1) if nA != m throw(DimensionMismatch(lazy"second dimension of left hand side A, $nA, and first dimension of right hand side B, $m, must be equal")) end @@ -1646,33 +1610,33 @@ function generic_trimatdiv!(C::AbstractVecOrMat, uploc, isunitc, ::Function, xA: oA = oneunit(eltype(A)) @inbounds if uploc == 'U' if isunitc == 'N' - for k in 1:n + for k in axes(B,2) amm = conj(A[m,m]) iszero(amm) && throw(SingularException(m)) Cm = C[m,k] = amm \ B[m,k] # fill C-column - for i in m-1:-1:1 + for i in reverse(axes(B,1))[2:end] C[i,k] = oA \ B[i,k] - _ustrip(conj(A[i,m])) * Cm end - for j in m-1:-1:1 + for j in reverse(axes(B,1))[2:end] ajj = conj(A[j,j]) iszero(ajj) && throw(SingularException(j)) Cj = C[j,k] = _ustrip(ajj) \ C[j,k] - for i in j-1:-1:1 + for i in j-1:-1:firstindex(A,1) C[i,k] -= _ustrip(conj(A[i,j])) * Cj end end end else # isunitc == 'U' - for k in 1:n + for k in axes(B,2) Cm = C[m,k] = oA \ B[m,k] # fill C-column - for i in m-1:-1:1 + for i in reverse(axes(B,1))[2:end] C[i,k] = oA \ B[i,k] - _ustrip(conj(A[i,m])) * Cm end - for j in m-1:-1:1 + for j in reverse(axes(B,1))[2:end] Cj = C[j,k] - for i in 1:j-1 + for i in firstindex(A,1):j-1 C[i,k] -= _ustrip(conj(A[i,j])) * Cj end end @@ -1680,33 +1644,33 @@ function generic_trimatdiv!(C::AbstractVecOrMat, uploc, isunitc, ::Function, xA: end else # uploc == 'L' if isunitc == 'N' - for k in 1:n + for k in axes(B,2) a11 = conj(A[1,1]) iszero(a11) && throw(SingularException(1)) C1 = C[1,k] = a11 \ B[1,k] # fill C-column - for i in 2:m + for i in axes(B,1)[2:end] C[i,k] = oA \ B[i,k] - _ustrip(conj(A[i,1])) * C1 end - for j in 2:m + for j in axes(A,2)[2:end] ajj = conj(A[j,j]) iszero(ajj) && throw(SingularException(j)) Cj = C[j,k] = _ustrip(ajj) \ C[j,k] - for i in j+1:m + for i in j+1:lastindex(A,1) C[i,k] -= _ustrip(conj(A[i,j])) * Cj end end end else # isunitc == 'U' - for k in 1:n + for k in axes(B,2) C1 = C[1,k] = oA \ B[1,k] # fill C-column - for i in 2:m + for i in axes(B,1)[2:end] C[i,k] = oA \ B[i,k] - _ustrip(conj(A[i,1])) * C1 end - for j in 1:m + for j in axes(A,2) Cj = C[j,k] - for i in j+1:m + for i in j+1:lastindex(A,1) C[i,k] -= _ustrip(conj(A[i,j])) * Cj end end @@ -1718,7 +1682,7 @@ end function generic_mattridiv!(C::AbstractMatrix, uploc, isunitc, tfun::Function, A::AbstractMatrix, B::AbstractMatrix) require_one_based_indexing(C, A, B) - m, n = size(A) + n = size(A,2) if size(B, 1) != n throw(DimensionMismatch(lazy"right hand side B needs first dimension of size $n, has size $(size(B,1))")) end @@ -1729,10 +1693,10 @@ function generic_mattridiv!(C::AbstractMatrix, uploc, isunitc, tfun::Function, A unit = isunitc == 'U' @inbounds if uploc == 'U' if tfun === identity - for i in 1:m - for j in 1:n + for i in axes(A,1) + for j in axes(A,2) Aij = A[i,j] - for k in 1:j - 1 + for k in firstindex(B,1):j - 1 Aij -= C[i,k]*B[k,j] end unit || (iszero(B[j,j]) && throw(SingularException(j))) @@ -1740,10 +1704,10 @@ function generic_mattridiv!(C::AbstractMatrix, uploc, isunitc, tfun::Function, A end end else # tfun in (adjoint, transpose) - for i in 1:m - for j in n:-1:1 + for i in axes(A,1) + for j in reverse(axes(A,2)) Aij = A[i,j] - for k in j + 1:n + for k in j + 1:lastindex(B,2) Aij -= C[i,k]*tfun(B[j,k]) end unit || (iszero(B[j,j]) && throw(SingularException(j))) @@ -1753,10 +1717,10 @@ function generic_mattridiv!(C::AbstractMatrix, uploc, isunitc, tfun::Function, A end else # uploc == 'L' if tfun === identity - for i in 1:m - for j in n:-1:1 + for i in axes(A,1) + for j in reverse(axes(A,2)) Aij = A[i,j] - for k in j + 1:n + for k in j + 1:lastindex(B,1) Aij -= C[i,k]*B[k,j] end unit || (iszero(B[j,j]) && throw(SingularException(j))) @@ -1764,10 +1728,10 @@ function generic_mattridiv!(C::AbstractMatrix, uploc, isunitc, tfun::Function, A end end else # tfun in (adjoint, transpose) - for i in 1:m - for j in 1:n + for i in axes(A,1) + for j in axes(A,2) Aij = A[i,j] - for k in 1:j - 1 + for k in firstindex(B,2):j - 1 Aij -= C[i,k]*tfun(B[j,k]) end unit || (iszero(B[j,j]) && throw(SingularException(j))) @@ -1781,7 +1745,7 @@ end function generic_mattridiv!(C::AbstractMatrix, uploc, isunitc, ::Function, A::AbstractMatrix, xB::AdjOrTrans) B = parent(xB) require_one_based_indexing(C, A, B) - m, n = size(A) + n = size(A,2) if size(B, 1) != n throw(DimensionMismatch(lazy"right hand side B needs first dimension of size $n, has size $(size(B,1))")) end @@ -1791,10 +1755,10 @@ function generic_mattridiv!(C::AbstractMatrix, uploc, isunitc, ::Function, A::Ab oB = oneunit(eltype(B)) unit = isunitc == 'U' if uploc == 'U' - @inbounds for i in 1:m - for j in 1:n + @inbounds for i in axes(A,1) + for j in axes(A,2) Aij = A[i,j] - for k in 1:j - 1 + for k in firstindex(B,1):j - 1 Aij -= C[i,k]*conj(B[k,j]) end unit || (iszero(B[j,j]) && throw(SingularException(j))) @@ -1802,10 +1766,10 @@ function generic_mattridiv!(C::AbstractMatrix, uploc, isunitc, ::Function, A::Ab end end else # uploc == 'L' - @inbounds for i in 1:m - for j in n:-1:1 + @inbounds for i in axes(A,1) + for j in reverse(axes(A,2)) Aij = A[i,j] - for k in j + 1:n + for k in j + 1:lastindex(B,1) Aij -= C[i,k]*conj(B[k,j]) end unit || (iszero(B[j,j]) && throw(SingularException(j))) @@ -1915,14 +1879,14 @@ function powm!(A0::UpperTriangular, p::Real) rmul!(A0, 1/normA0) theta = [1.53e-5, 2.25e-3, 1.92e-2, 6.08e-2, 1.25e-1, 2.03e-1, 2.84e-1] - n = checksquare(A0) + checksquare(A0) A, m, s = invsquaring(A0, theta) A = I - A # Compute accurate diagonal of I - T sqrt_diag!(A0, A, s) - for i = 1:n + for i in axes(A,1) A[i, i] = -A[i, i] end # Compute the Padé approximant @@ -1930,10 +1894,10 @@ function powm!(A0::UpperTriangular, p::Real) triu!(A) S = c * A Stmp = similar(S) - for j = m-1:-1:1 + for j in m-1:-1:1 j4 = 4 * j c = (-p - j) / (j4 + 2) - for i = 1:n + for i in axes(S,1) @inbounds S[i, i] = S[i, i] + 1 end copyto!(Stmp, S) @@ -1941,20 +1905,20 @@ function powm!(A0::UpperTriangular, p::Real) ldiv!(Stmp, S) c = (p - j) / (j4 - 2) - for i = 1:n + for i in axes(S,1) @inbounds S[i, i] = S[i, i] + 1 end copyto!(Stmp, S) mul!(S, A, c) ldiv!(Stmp, S) end - for i = 1:n + for i in axes(S,1) S[i, i] = S[i, i] + 1 end copyto!(Stmp, S) mul!(S, A, -p) ldiv!(Stmp, S) - for i = 1:n + for i in axes(S,1) @inbounds S[i, i] = S[i, i] + 1 end @@ -1986,7 +1950,7 @@ log(A::UnitLowerTriangular) = copy(transpose(log(copy(transpose(A))))) function log_quasitriu(A0::AbstractMatrix{T}) where T<:BlasFloat # allocate real A if log(A) will be real and complex A otherwise - n = checksquare(A0) + checksquare(A0) if isreal(A0) && (!istriu(A0) || !any(x -> real(x) < zero(real(T)), diag(A0))) A = T <: Complex ? real(A0) : copy(A0) else @@ -1994,7 +1958,7 @@ function log_quasitriu(A0::AbstractMatrix{T}) where T<:BlasFloat end if A0 isa UnitUpperTriangular A = UpperTriangular(parent(A)) - @inbounds for i in 1:n + @inbounds for i in axes(A,1) A[i,i] = 1 end end @@ -2023,13 +1987,13 @@ function _log_quasitriu!(A0, A) # Get the Gauss-Legendre quadrature points and weights R = zeros(Float64, m, m) - for i = 1:m - 1 + for i in 1:m - 1 R[i,i+1] = i / sqrt((2 * i)^2 - 1) R[i+1,i] = R[i,i+1] end x,V = eigen(R) w = Vector{Float64}(undef, m) - for i = 1:m + for i in 1:m x[i] = (x[i] + 1) / 2 w[i] = V[1,i]^2 end @@ -2039,9 +2003,9 @@ function _log_quasitriu!(A0, A) n = size(A, 1) Y = zeros(t, n, n) B = similar(A) - for k = 1:m + for k in 1:m B .= t(x[k]) .* A - @inbounds for i in 1:n + @inbounds for i in axes(B,1) B[i,i] += 1 end Y .+= t(w[k]) .* rdiv_quasitriu!(A, B) @@ -2072,7 +2036,6 @@ function _find_params_log_quasitriu!(A) 2.060962623452836e-001, 2.879093714241194e-001] tmax = size(theta, 1) - n = size(A, 1) p = 0 m = 0 @@ -2089,7 +2052,7 @@ function _find_params_log_quasitriu!(A) s0 = s # Compute repeated roots - for k = 1:min(s, maxsqrt) + for k in 1:min(s, maxsqrt) _sqrt_quasitriu!(A isa UpperTriangular ? parent(A) : A, A) end @@ -2150,7 +2113,7 @@ function _find_params_log_quasitriu!(A) end _sqrt_quasitriu!(A isa UpperTriangular ? parent(A) : A, A) copyto!(AmI, A) - for i in 1:n + for i in axes(AmI,1) @inbounds AmI[i,i] -= 1 end mul!(AmI2, AmI, AmI) @@ -2163,9 +2126,8 @@ end # Compute accurate diagonal of A = A0^s - I function sqrt_diag!(A0::UpperTriangular, A::UpperTriangular, s) - n = checksquare(A0) - T = eltype(A) - @inbounds for i = 1:n + checksquare(A0) + @inbounds for i in axes(A0,1) a = complex(A0[i,i]) A[i,i] = _sqrt_pow(a, s) end @@ -2206,7 +2168,7 @@ function _sqrt_pow(a::Number, s) z0 = a - 1 a = sqrt(a) r = 1 + a - for j = 1:s0-1 + for j in 1:s0-1 a = sqrt(a) r = r * (1 + a) end @@ -2375,7 +2337,7 @@ function invsquaring(A0::UpperTriangular, theta) # assumes theta is in ascending order maxsqrt = 100 tmax = size(theta, 1) - n = checksquare(A0) + checksquare(A0) A = complex(copy(A0)) p = 0 m = 0 @@ -2390,7 +2352,7 @@ function invsquaring(A0::UpperTriangular, theta) s = s + 1 end s0 = s - for k = 1:min(s, maxsqrt) + for k in 1:min(s, maxsqrt) A = sqrt(A) end @@ -2464,8 +2426,8 @@ end # Compute accurate diagonal and superdiagonal of A = A0^p function blockpower!(A::UpperTriangular, A0::UpperTriangular, p) - n = checksquare(A0) - @inbounds for k = 1:n-1 + checksquare(A0) + @inbounds for k in axes(A0,1)[1:end-1] Ak = complex(A0[k,k]) Akp1 = complex(A0[k+1,k+1]) @@ -2500,10 +2462,10 @@ unw(x::Number) = ceil((imag(x) - pi) / (2 * pi)) # compute A / B for upper quasi-triangular B, possibly overwriting B function rdiv_quasitriu!(A, B) - n = checksquare(A) + checksquare(A) AG = copy(A) # use Givens rotations to annihilate 2x2 blocks - @inbounds for k in 1:(n-1) + @inbounds for k in axes(B,2)[1:end-1] s = B[k+1,k] iszero(s) && continue # 1x1 block G = first(givens(B[k+1,k+1], s, k, k+1)) @@ -2518,15 +2480,14 @@ end sqrt(A::UpperTriangular) = sqrt_quasitriu(A) function sqrt(A::UnitUpperTriangular{T}) where T B = A.data - n = checksquare(B) t = typeof(sqrt(zero(T))) - R = Matrix{t}(I, n, n) + R = Matrix{t}(I, size(A)) tt = typeof(oneunit(t)*oneunit(t)) half = inv(R[1,1]+R[1,1]) # for general, algebraic cases. PR#20214 - @inbounds for j = 1:n - for i = j-1:-1:1 + @inbounds for j in axes(B,2) + for i in j-1:-1:firstindex(B) r::tt = B[i,j] - @simd for k = i+1:j-1 + @simd for k in i+1:j-1 r -= R[i,k]*R[k,j] end iszero(r) || (R[i,j] = half*r) @@ -2548,7 +2509,7 @@ function sqrt_quasitriu(A0; blockwidth = eltype(A0) <: Complex ? 512 : 256) if isreal(A0) is_sqrt_real = true if istriu(A0) - for i in 1:n + for i in axes(A0,1) Aii = real(A0[i,i]) if Aii < zero(Aii) is_sqrt_real = false @@ -2557,15 +2518,15 @@ function sqrt_quasitriu(A0; blockwidth = eltype(A0) <: Complex ? 512 : 256) end end if is_sqrt_real - R = zeros(Tr, n, n) + R = zeros(Tr, size(A0)) A = real(A0) else - R = zeros(Tc, n, n) + R = zeros(Tc, size(A0)) A = A0 end else A = A0 - R = zeros(Tc, n, n) + R = zeros(Tc, size(A0)) end _sqrt_quasitriu!(R, A; blockwidth=blockwidth, n=n) Rc = eltype(A0) <: Real ? R : complex(R) @@ -2865,7 +2826,7 @@ det(A::LowerTriangular) = prod(diag(A.data)) function logabsdet(A::Union{UpperTriangular{T},LowerTriangular{T}}) where T sgn = one(T) abs_det = zero(real(T)) - @inbounds for i in 1:size(A,1) + @inbounds for i in axes(A.data,1) diag_i = A.data[i,i] sgn *= sign(diag_i) abs_det += log(abs(diag_i)) @@ -2955,8 +2916,8 @@ end M_L₁ = zeros(T,4,4) M_Bᵢⱼ⁽⁰⁾ = zeros(T,2,2) M_Bᵢⱼ⁽¹⁾ = zeros(T,2,2) - for k = 1:n-1 - for i = 1:n-k + for k in axes(A,2)[1:end-1] + for i in axes(A,2)[1:end-k] if sizes[i] == 0 || sizes[i+k] == 0 continue end k₁, k₂ = i+1+(sizes[i+1]==0), i+k-1 i₁, i₂, j₁, j₂, s₁, s₂ = i, i+sizes[i]-1, i+k, i+k+sizes[i+k]-1, sizes[i], sizes[i+k] @@ -2982,7 +2943,11 @@ end end end # Make quasi triangular - for j=1:n for i=j+1+(sizes[j]==2):n A[i,j] = 0 end end + for j in axes(A,2) + for i=j+1+(sizes[j]==2):lastindex(A,1) + A[i,j] = 0 + end + end return A end From 0249febc9a0badbccf144b253a1ac8a221ba3abb Mon Sep 17 00:00:00 2001 From: "Steven G. Johnson" Date: Sun, 3 Nov 2024 16:40:02 -0500 Subject: [PATCH 360/537] clarify short-circuit && and || docs (#56420) This clarifies the docs to explain that `a && b` is equivalent to `a ? b : false` and that `a || b` is equivalent to `a ? true : b`. In particular, this explains why the second argument does not need to be a boolean value, which is a common point of confusion. (See e.g. [this discourse thread](https://discourse.julialang.org/t/internals-of-assignment-when-doing-short-circuit-evaluation/122178/2?u=stevengj).) --- base/docs/basedocs.jl | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/base/docs/basedocs.jl b/base/docs/basedocs.jl index 7441f5b993bf4..5afc61e8507b6 100644 --- a/base/docs/basedocs.jl +++ b/base/docs/basedocs.jl @@ -1305,6 +1305,12 @@ kw";" Short-circuiting boolean AND. +This is equivalent to `x ? y : false`: it returns `false` if `x` is `false` and the result of evaluating `y` if `x` is `true`. +Note that if `y` is an expression, it is only evaluated when `x` is `true`, which is called "short-circuiting" behavior. + +Also, `y` does not need to have a boolean value. This means that `(condition) && (statement)` can be used as shorthand for +`if condition; statement; end` for an arbitrary `statement`. + See also [`&`](@ref), the ternary operator `? :`, and the manual section on [control flow](@ref man-conditional-evaluation). # Examples @@ -1316,6 +1322,9 @@ true julia> x < 0 && error("expected positive x") false + +julia> x > 0 && "not a boolean" +"not a boolean" ``` """ kw"&&" @@ -1325,6 +1334,12 @@ kw"&&" Short-circuiting boolean OR. +This is equivalent to `x ? true : y`: it returns `true` if `x` is `true` and the result of evaluating `y` if `x` is `false`. +Note that if `y` is an expression, it is only evaluated when `x` is `false`, which is called "short-circuiting" behavior. + +Also, `y` does not need to have a boolean value. This means that `(condition) || (statement)` can be used as shorthand for +`if !(condition); statement; end` for an arbitrary `statement`. + See also: [`|`](@ref), [`xor`](@ref), [`&&`](@ref). # Examples @@ -1334,6 +1349,9 @@ true julia> false || true || println("neither is true!") true + +julia> pi < 3 || "not a boolean" +"not a boolean" ``` """ kw"||" From 74640a1a75733769a414bbabd2d09f952dcf6438 Mon Sep 17 00:00:00 2001 From: Florian Date: Mon, 4 Nov 2024 03:16:56 +0100 Subject: [PATCH 361/537] docs: replace 'leaf types' with 'concrete types' (#56418) Fixes #55044 --------- Co-authored-by: inkydragon --- base/reflection.jl | 4 +-- base/runtime_internals.jl | 4 +-- doc/src/manual/calling-c-and-fortran-code.md | 26 ++++++++++---------- doc/src/manual/performance-tips.md | 4 +-- stdlib/InteractiveUtils/src/codeview.jl | 4 +-- 5 files changed, 21 insertions(+), 21 deletions(-) diff --git a/base/reflection.jl b/base/reflection.jl index f2a554e0f27c5..0b7612e44f744 100644 --- a/base/reflection.jl +++ b/base/reflection.jl @@ -15,7 +15,7 @@ yielded by expanding the generators. The keyword `debuginfo` controls the amount of code metadata present in the output. -Note that an error will be thrown if `types` are not leaf types when `generated` is +Note that an error will be thrown if `types` are not concrete types when `generated` is `true` and any of the corresponding methods are an `@generated` method. """ function code_lowered(@nospecialize(f), @nospecialize(t=Tuple); generated::Bool=true, debuginfo::Symbol=:default) @@ -37,7 +37,7 @@ function code_lowered(@nospecialize(f), @nospecialize(t=Tuple); generated::Bool= else error("Could not expand generator for `@generated` method ", m, ". ", "This can happen if the provided argument types (", t, ") are ", - "not leaf types, but the `generated` argument is `true`.") + "not concrete types, but the `generated` argument is `true`.") end else code = uncompressed_ir(m.def::Method) diff --git a/base/runtime_internals.jl b/base/runtime_internals.jl index 1da58af38d545..4a04d406550b7 100644 --- a/base/runtime_internals.jl +++ b/base/runtime_internals.jl @@ -839,7 +839,7 @@ end """ isdispatchtuple(T) -Determine whether type `T` is a tuple "leaf type", +Determine whether type `T` is a tuple of concrete types, meaning it could appear as a type signature in dispatch and has no subtypes (or supertypes) which could appear in a call. If `T` is not a type, then return `false`. @@ -894,7 +894,7 @@ isconcretedispatch(@nospecialize t) = isconcretetype(t) && !iskindtype(t) using Core: has_free_typevars # equivalent to isa(v, Type) && isdispatchtuple(Tuple{v}) || v === Union{} -# and is thus perhaps most similar to the old (pre-1.0) `isleaftype` query +# and is thus perhaps most similar to the old (pre-1.0) `isconcretetype` query function isdispatchelem(@nospecialize v) return (v === Bottom) || (v === typeof(Bottom)) || isconcretedispatch(v) || (isType(v) && !has_free_typevars(v)) diff --git a/doc/src/manual/calling-c-and-fortran-code.md b/doc/src/manual/calling-c-and-fortran-code.md index b8d064f698208..f675ab5eb16e8 100644 --- a/doc/src/manual/calling-c-and-fortran-code.md +++ b/doc/src/manual/calling-c-and-fortran-code.md @@ -276,17 +276,17 @@ it to be freed prematurely. First, let's review some relevant Julia type terminology: -| Syntax / Keyword | Example | Description | -|:----------------------------- |:------------------------------------------- |:------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -| `mutable struct` | `BitSet` | "Leaf Type" :: A group of related data that includes a type-tag, is managed by the Julia GC, and is defined by object-identity. The type parameters of a leaf type must be fully defined (no `TypeVars` are allowed) in order for the instance to be constructed. | -| `abstract type` | `Any`, `AbstractArray{T, N}`, `Complex{T}` | "Super Type" :: A super-type (not a leaf-type) that cannot be instantiated, but can be used to describe a group of types. | -| `T{A}` | `Vector{Int}` | "Type Parameter" :: A specialization of a type (typically used for dispatch or storage optimization). | -| | | "TypeVar" :: The `T` in the type parameter declaration is referred to as a TypeVar (short for type variable). | -| `primitive type` | `Int`, `Float64` | "Primitive Type" :: A type with no fields, but a size. It is stored and defined by-value. | -| `struct` | `Pair{Int, Int}` | "Struct" :: A type with all fields defined to be constant. It is defined by-value, and may be stored with a type-tag. | -| | `ComplexF64` (`isbits`) | "Is-Bits" :: A `primitive type`, or a `struct` type where all fields are other `isbits` types. It is defined by-value, and is stored without a type-tag. | -| `struct ...; end` | `nothing` | "Singleton" :: a Leaf Type or Struct with no fields. | -| `(...)` or `tuple(...)` | `(1, 2, 3)` | "Tuple" :: an immutable data-structure similar to an anonymous struct type, or a constant array. Represented as either an array or a struct. | +| Syntax / Keyword | Example | Description | +|:----------------------------- |:------------------------------------------- |:------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `mutable struct` | `BitSet` | "Concrete Type" :: A group of related data that includes a type-tag, is managed by the Julia GC, and is defined by object-identity. The type parameters of a concrete type must be fully defined (no `TypeVars` are allowed) in order for the instance to be constructed. Also see [`isconcretetype`](@ref). | +| `abstract type` | `Any`, `AbstractArray{T, N}`, `Complex{T}` | "Super Type" :: A super-type (not a concrete type) that cannot be instantiated, but can be used to describe a group of types. Also see [`isabstracttype`](@ref). | +| `T{A}` | `Vector{Int}` | "Type Parameter" :: A specialization of a type (typically used for dispatch or storage optimization). | +| | | "TypeVar" :: The `T` in the type parameter declaration is referred to as a TypeVar (short for type variable). | +| `primitive type` | `Int`, `Float64` | "Primitive Type" :: A type with no fields, but a size. It is stored and defined by-value. | +| `struct` | `Pair{Int, Int}` | "Struct" :: A type with all fields defined to be constant. It is defined by-value, and may be stored with a type-tag. | +| | `ComplexF64` (`isbits`) | "Is-Bits" :: A `primitive type`, or a `struct` type where all fields are other `isbits` types. It is defined by-value, and is stored without a type-tag. | +| `struct ...; end` | `nothing` | "Singleton" :: a concrete Type or Struct with no fields. | +| `(...)` or `tuple(...)` | `(1, 2, 3)` | "Tuple" :: an immutable data-structure similar to an anonymous struct type, or a constant array. Represented as either an array or a struct. | ### [Bits Types](@id man-bits-types) @@ -626,7 +626,7 @@ For translating a C argument list to Julia: * argument value will be copied (passed by value) * `struct T` (including typedef to a struct) - * `T`, where `T` is a Julia leaf type + * `T`, where `T` is a concrete Julia type * argument value will be copied (passed by value) * `void*` @@ -679,7 +679,7 @@ For translating a C return type to Julia: * argument value will be copied (returned by-value) * `struct T` (including typedef to a struct) - * `T`, where `T` is a Julia Leaf Type + * `T`, where `T` is a concrete Julia Type * argument value will be copied (returned by-value) * `void*` diff --git a/doc/src/manual/performance-tips.md b/doc/src/manual/performance-tips.md index 3033720b5df8c..038000f55e761 100644 --- a/doc/src/manual/performance-tips.md +++ b/doc/src/manual/performance-tips.md @@ -805,7 +805,7 @@ or `nothing` if it is not found, a clear type instability. In order to make it e type instabilities that are likely to be important, `Union`s containing either `missing` or `nothing` are color highlighted in yellow, instead of red. -The following examples may help you interpret expressions marked as containing non-leaf types: +The following examples may help you interpret expressions marked as containing non-concrete types: * Function body starting with `Body::Union{T1,T2})` * Interpretation: function with unstable return type @@ -821,7 +821,7 @@ The following examples may help you interpret expressions marked as containing n element accesses * `Base.getfield(%%x, :(:data))::Array{Float64,N} where N` - * Interpretation: getting a field that is of non-leaf type. In this case, the type of `x`, say `ArrayContainer`, had a + * Interpretation: getting a field that is of non-concrete type. In this case, the type of `x`, say `ArrayContainer`, had a field `data::Array{T}`. But `Array` needs the dimension `N`, too, to be a concrete type. * Suggestion: use concrete types like `Array{T,3}` or `Array{T,N}`, where `N` is now a parameter of `ArrayContainer` diff --git a/stdlib/InteractiveUtils/src/codeview.jl b/stdlib/InteractiveUtils/src/codeview.jl index e3ef0a14a6608..92354d2fb9a75 100644 --- a/stdlib/InteractiveUtils/src/codeview.jl +++ b/stdlib/InteractiveUtils/src/codeview.jl @@ -128,10 +128,10 @@ end Prints lowered and type-inferred ASTs for the methods matching the given generic function and type signature to `io` which defaults to `stdout`. The ASTs are annotated in such a way -as to cause "non-leaf" types which may be problematic for performance to be emphasized +as to cause non-concrete types which may be problematic for performance to be emphasized (if color is available, displayed in red). This serves as a warning of potential type instability. -Not all non-leaf types are particularly problematic for performance, and the performance +Not all non-concrete types are particularly problematic for performance, and the performance characteristics of a particular type is an implementation detail of the compiler. `code_warntype` will err on the side of coloring types red if they might be a performance concern, so some types may be colored red even if they do not impact performance. From 50713ee4a82eb1b5613647cd74b027315f665080 Mon Sep 17 00:00:00 2001 From: Jishnu Bhattacharya Date: Mon, 4 Nov 2024 09:58:25 +0530 Subject: [PATCH 362/537] Remove aggressive constprop annotation on generic_matmatmul_wrapper! (#56400) This annotation seems unnecessary, as the method gets inlined and there's no computation being carried out using the value of the constant. --- stdlib/LinearAlgebra/src/matmul.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stdlib/LinearAlgebra/src/matmul.jl b/stdlib/LinearAlgebra/src/matmul.jl index 8e90b21a4b7ce..2f1a3fe2ba861 100644 --- a/stdlib/LinearAlgebra/src/matmul.jl +++ b/stdlib/LinearAlgebra/src/matmul.jl @@ -337,7 +337,7 @@ end # this indirection allows is to specialize on the types of the wrappers of A and B to some extent, # even though the wrappers are stripped off in mul! # By default, we ignore the wrapper info and forward the arguments to generic_matmatmul! -Base.@constprop :aggressive function generic_matmatmul_wrapper!(C, tA, tB, A, B, α, β, @nospecialize(val)) +function generic_matmatmul_wrapper!(C, tA, tB, A, B, α, β, @nospecialize(val)) generic_matmatmul!(C, tA, tB, A, B, α, β) end From 349f142bb0a066a6eb5f195b0037df51f11d06e4 Mon Sep 17 00:00:00 2001 From: Lilith Orion Hafner Date: Mon, 4 Nov 2024 13:39:53 -0600 Subject: [PATCH 363/537] Clarify the FieldError docstring (#55222) --- base/docs/basedocs.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/base/docs/basedocs.jl b/base/docs/basedocs.jl index 5afc61e8507b6..d618330e79874 100644 --- a/base/docs/basedocs.jl +++ b/base/docs/basedocs.jl @@ -1707,7 +1707,7 @@ ErrorException """ FieldError(type::DataType, field::Symbol) -An operation tried to access invalid `field` of `type`. +An operation tried to access invalid `field` on an object of `type`. !!! compat "Julia 1.12" Prior to Julia 1.12, invalid field access threw an [`ErrorException`](@ref) From 66c50ac63c3152e90f578b1bcf865cf74a69e780 Mon Sep 17 00:00:00 2001 From: Brooks Rady Date: Mon, 4 Nov 2024 20:09:49 +0000 Subject: [PATCH 364/537] Allow `Time`s to be rounded to `Period`s (#52629) Co-authored-by: CyHan Co-authored-by: Curtis Vogt --- stdlib/Dates/src/rounding.jl | 6 ++++++ stdlib/Dates/test/rounding.jl | 22 +++++++++++++++++++++- 2 files changed, 27 insertions(+), 1 deletion(-) diff --git a/stdlib/Dates/src/rounding.jl b/stdlib/Dates/src/rounding.jl index b5b6e52decba8..08a8218365d2c 100644 --- a/stdlib/Dates/src/rounding.jl +++ b/stdlib/Dates/src/rounding.jl @@ -84,6 +84,12 @@ function Base.floor(dt::DateTime, p::TimePeriod) return epochms2datetime(milliseconds - mod(milliseconds, value(Millisecond(p)))) end +function Base.floor(t::Time, p::TimePeriod) + value(p) < 1 && throw(DomainError(p)) + nanoseconds = value(t) + return Time(Nanosecond(nanoseconds - mod(nanoseconds, value(Nanosecond(p))))) +end + """ floor(x::Period, precision::T) where T <: Union{TimePeriod, Week, Day} -> T diff --git a/stdlib/Dates/test/rounding.jl b/stdlib/Dates/test/rounding.jl index 85c90981423d3..03c57c7a5bce3 100644 --- a/stdlib/Dates/test/rounding.jl +++ b/stdlib/Dates/test/rounding.jl @@ -188,7 +188,27 @@ end @test round(x, Dates.Microsecond) == Dates.Microsecond(2001000) @test round(x, Dates.Nanosecond) == x end - +@testset "Rounding Time" begin + x = Time(9, 25, 45, 25, 650, 500) + @test floor(x, Dates.Hour) == Time(9) + @test floor(x, Dates.Minute) == Time(9, 25) + @test floor(x, Dates.Second) == Time(9, 25, 45) + @test floor(x, Dates.Millisecond) == Time(9, 25, 45, 25) + @test floor(x, Dates.Microsecond) == Time(9, 25, 45, 25, 650) + @test floor(x, Dates.Nanosecond) == x + @test ceil(x, Dates.Hour) == Time(10) + @test ceil(x, Dates.Minute) == Time(9, 26) + @test ceil(x, Dates.Second) == Time(9, 25, 46) + @test ceil(x, Dates.Millisecond) == Time(9, 25, 45, 26) + @test ceil(x, Dates.Microsecond) == Time(9, 25, 45, 25, 651) + @test ceil(x, Dates.Nanosecond) == x + @test round(x, Dates.Hour) == Time(9) + @test round(x, Dates.Minute) == Time(9, 26) + @test round(x, Dates.Second) == Time(9, 25, 45) + @test round(x, Dates.Millisecond) == Time(9, 25, 45, 26) + @test round(x, Dates.Microsecond) == Time(9, 25, 45, 25, 651) + @test round(x, Dates.Nanosecond) == x +end @testset "Rounding DateTime to Date" begin now_ = DateTime(2020, 9, 1, 13) for p in (Year, Month, Day) From 36f06b88997f22461e4df410a9ae5c9218104355 Mon Sep 17 00:00:00 2001 From: Gabriel Baraldi Date: Mon, 4 Nov 2024 20:40:05 -0300 Subject: [PATCH 365/537] Replace unconditional store with cmpswap to avoid deadlocking in jl_fptr_wait_for_compiled_addr (#56444) That unconditional store could overwrite the actual compiled code in that pointer, so make it a cmpswap --- src/codegen.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/codegen.cpp b/src/codegen.cpp index b0d5038024900..e2cccafd42e5f 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -10166,7 +10166,8 @@ jl_llvm_functions_t jl_emit_codeinst( !(params.imaging_mode || jl_options.incremental)) { // don't delete code when generating a precompile file // Never end up in a situation where the codeinst has no invoke, but also no source, so we never fall // through the cracks of SOURCE_MODE_ABI. - jl_atomic_store_release(&codeinst->invoke, jl_fptr_wait_for_compiled_addr); + jl_callptr_t expected = NULL; + jl_atomic_cmpswap_relaxed(&codeinst->invoke, &expected, jl_fptr_wait_for_compiled_addr); jl_atomic_store_release(&codeinst->inferred, jl_nothing); } } From 589203ee2985816e56661e2b288f28e2b277a236 Mon Sep 17 00:00:00 2001 From: Keno Fischer Date: Mon, 4 Nov 2024 21:39:52 -0500 Subject: [PATCH 366/537] Correct nothrow modeling of `get_binding_type` (#56430) As pointed out in https://github.com/JuliaLang/julia/pull/56299#discussion_r1826509185, although the bug predates that PR. --- base/compiler/tfuncs.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/base/compiler/tfuncs.jl b/base/compiler/tfuncs.jl index aaa1354fd5e54..64ec7054221e1 100644 --- a/base/compiler/tfuncs.jl +++ b/base/compiler/tfuncs.jl @@ -2501,7 +2501,7 @@ function builtin_effects(𝕃::AbstractLattice, @nospecialize(f::Builtin), argty elseif f === Core.get_binding_type length(argtypes) == 2 || return EFFECTS_THROWS # Modeled more precisely in abstract_eval_get_binding_type - return Effects(EFFECTS_TOTAL; effect_free=ALWAYS_FALSE) + return Effects(EFFECTS_TOTAL; effect_free=ALWAYS_FALSE, nothrow=get_binding_type_nothrow(𝕃, argtypes[1], argtypes[2])) elseif f === compilerbarrier length(argtypes) == 2 || return Effects(EFFECTS_THROWS; consistent=ALWAYS_FALSE) setting = argtypes[1] From 8e0a17176593b72cedb26b7e32a55699f7d299a8 Mon Sep 17 00:00:00 2001 From: Ian Butterworth Date: Mon, 4 Nov 2024 22:32:06 -0500 Subject: [PATCH 367/537] add tip for module docstrings before load (#56445) --- stdlib/REPL/src/docview.jl | 7 ++++++- stdlib/REPL/test/docview.jl | 5 +++++ 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/stdlib/REPL/src/docview.jl b/stdlib/REPL/src/docview.jl index 3c5e102bb657e..566046f325499 100644 --- a/stdlib/REPL/src/docview.jl +++ b/stdlib/REPL/src/docview.jl @@ -473,7 +473,12 @@ function repl_corrections(io::IO, s, mod::Module) quot = any(isspace, s) ? "'" : "" print(io, quot) printstyled(io, s, color=:cyan) - print(io, quot, '\n') + print(io, quot) + if Base.identify_package(s) === nothing + print(io, '\n') + else + print(io, ", but a loadable package with that name exists. If you are looking for the package docs load the package first.\n") + end print_correction(io, s, mod) end repl_corrections(s) = repl_corrections(stdout, s) diff --git a/stdlib/REPL/test/docview.jl b/stdlib/REPL/test/docview.jl index 6b374ed7f0149..02f1dc8238f04 100644 --- a/stdlib/REPL/test/docview.jl +++ b/stdlib/REPL/test/docview.jl @@ -28,6 +28,11 @@ end @test occursin("Couldn't find 'mutable s'", str) end +@testset "non-loaded packages in doc search" begin + str = get_help_io("Profile") + @test occursin("Couldn't find Profile, but a loadable package with that name exists.", str) +end + @testset "Check @var_str also completes to var\"\" in REPL.doc_completions()" begin checks = ["var", "raw", "r"] symbols = "@" .* checks .* "_str" From 567a7ca7ead7db9dd8cad7dd43f46c6f8bd1029c Mon Sep 17 00:00:00 2001 From: Keno Fischer Date: Tue, 5 Nov 2024 04:30:03 -0500 Subject: [PATCH 368/537] compiler: Strengthen some assertions and fix a couple small bugs (#56449) --- base/compiler/abstractinterpretation.jl | 25 +++++++--------- base/compiler/inferencestate.jl | 40 ++++++++++++++++--------- base/compiler/optimize.jl | 2 +- base/compiler/typeinfer.jl | 20 ++++++------- 4 files changed, 48 insertions(+), 39 deletions(-) diff --git a/base/compiler/abstractinterpretation.jl b/base/compiler/abstractinterpretation.jl index f7f7e80a0ebe1..8b557b2105f1c 100644 --- a/base/compiler/abstractinterpretation.jl +++ b/base/compiler/abstractinterpretation.jl @@ -3315,16 +3315,13 @@ function abstract_eval_binding_partition!(interp::AbstractInterpreter, g::Global end function abstract_eval_partition_load(interp::AbstractInterpreter, partition::Core.BindingPartition) - consistent = inaccessiblememonly = ALWAYS_FALSE - nothrow = false - generic_effects = Effects(EFFECTS_TOTAL; consistent, nothrow, inaccessiblememonly) if is_some_guard(binding_kind(partition)) if InferenceParams(interp).assume_bindings_static return RTEffects(Union{}, UndefVarError, EFFECTS_THROWS) else # We do not currently assume an invalidation for guard -> defined transitions # return RTEffects(Union{}, UndefVarError, EFFECTS_THROWS) - return RTEffects(Any, UndefVarError, generic_effects) + return RTEffects(Any, UndefVarError, generic_getglobal_effects) end end @@ -3335,20 +3332,20 @@ function abstract_eval_partition_load(interp::AbstractInterpreter, partition::Co rt = partition_restriction(partition) - if InferenceParams(interp).assume_bindings_static + return RTEffects(rt, UndefVarError, generic_getglobal_effects) +end + +function abstract_eval_globalref(interp::AbstractInterpreter, g::GlobalRef, sv::AbsIntState) + partition = abstract_eval_binding_partition!(interp, g, sv) + ret = abstract_eval_partition_load(interp, partition) + if ret.rt !== Union{} && ret.exct === UndefVarError && InferenceParams(interp).assume_bindings_static if isdefined(g, :binding) && isdefined(g.binding, :value) - return RTEffects(rt, Union{}, Effecst(generic_effects, nothrow=true)) + return RTEffects(ret.rt, Union{}, Effects(generic_getglobal_effects, nothrow=true)) end # We do not assume in general that assigned global bindings remain assigned. # The existence of pkgimages allows them to revert in practice. end - - return RTEffects(rt, UndefVarError, generic_effects) -end - -function abstract_eval_globalref(interp::AbstractInterpreter, g::GlobalRef, sv::AbsIntState) - partition = abstract_eval_binding_partition!(interp, g, sv) - return abstract_eval_partition_load(interp, partition) + return ret end function global_assignment_exct(interp::AbstractInterpreter, sv::AbsIntState, g::GlobalRef, @nospecialize(newty)) @@ -4045,7 +4042,6 @@ function typeinf(interp::AbstractInterpreter, frame::InferenceState) takeprev = 0 while takenext >= frame.frameid callee = takenext == 0 ? frame : callstack[takenext]::InferenceState - interp = callee.interp if !isempty(callstack) if length(callstack) - frame.frameid >= minwarn topmethod = callstack[1].linfo @@ -4059,6 +4055,7 @@ function typeinf(interp::AbstractInterpreter, frame::InferenceState) takenext = length(callstack) end end + interp = callee.interp nextstateid = takenext + 1 - frame.frameid while length(nextstates) < nextstateid push!(nextstates, CurrentState()) diff --git a/base/compiler/inferencestate.jl b/base/compiler/inferencestate.jl index 43ada89f23133..fd421af733943 100644 --- a/base/compiler/inferencestate.jl +++ b/base/compiler/inferencestate.jl @@ -227,10 +227,24 @@ struct HandlerInfo handler_at::Vector{Tuple{Int,Int}} # tuple of current (handler, exception stack) value at the pc end +struct WorldWithRange + this::UInt + valid_worlds::WorldRange + function WorldWithRange(world::UInt, valid_worlds::WorldRange) + if !(world in valid_worlds) + error("invalid age range update") + end + return new(world, valid_worlds) + end +end + +intersect(world::WorldWithRange, valid_worlds::WorldRange) = + WorldWithRange(world.this, intersect(world.valid_worlds, valid_worlds)) + mutable struct InferenceState #= information about this method instance =# linfo::MethodInstance - world::UInt + world::WorldWithRange mod::Module sptypes::Vector{VarState} slottypes::Vector{Any} @@ -265,7 +279,6 @@ mutable struct InferenceState #= results =# result::InferenceResult # remember where to put the result unreachable::BitSet # statements that were found to be statically unreachable - valid_worlds::WorldRange bestguess #::Type exc_bestguess ipo_effects::Effects @@ -353,10 +366,10 @@ mutable struct InferenceState parentid = frameid = cycleid = 0 this = new( - mi, world, mod, sptypes, slottypes, src, cfg, spec_info, + mi, WorldWithRange(world, valid_worlds), mod, sptypes, slottypes, src, cfg, spec_info, currbb, currpc, ip, handler_info, ssavalue_uses, bb_vartables, ssavaluetypes, edges, stmt_info, tasks, pclimitations, limitations, cycle_backedges, callstack, parentid, frameid, cycleid, - result, unreachable, valid_worlds, bestguess, exc_bestguess, ipo_effects, + result, unreachable, bestguess, exc_bestguess, ipo_effects, restrict_abstract_call_sites, cache_mode, insert_coverage, interp) @@ -372,7 +385,7 @@ mutable struct InferenceState # Apply generated function restrictions if src.min_world != 1 || src.max_world != typemax(UInt) # From generated functions - this.valid_worlds = WorldRange(src.min_world, src.max_world) + update_valid_age!(this, WorldRange(src.min_world, src.max_world)) end return this @@ -772,14 +785,13 @@ mutable struct IRInterpretationState const spec_info::SpecInfo const ir::IRCode const mi::MethodInstance - const world::UInt + world::WorldWithRange curridx::Int const argtypes_refined::Vector{Bool} const sptypes::Vector{VarState} const tpdum::TwoPhaseDefUseMap const ssa_refined::BitSet const lazyreachability::LazyCFGReachability - valid_worlds::WorldRange const tasks::Vector{WorkThunk} const edges::Vector{Any} callstack #::Vector{AbsIntState} @@ -809,8 +821,8 @@ mutable struct IRInterpretationState tasks = WorkThunk[] edges = Any[] callstack = AbsIntState[] - return new(spec_info, ir, mi, world, curridx, argtypes_refined, ir.sptypes, tpdum, - ssa_refined, lazyreachability, valid_worlds, tasks, edges, callstack, 0, 0) + return new(spec_info, ir, mi, WorldWithRange(world, valid_worlds), curridx, argtypes_refined, ir.sptypes, tpdum, + ssa_refined, lazyreachability, tasks, edges, callstack, 0, 0) end end @@ -910,8 +922,8 @@ spec_info(sv::IRInterpretationState) = sv.spec_info propagate_inbounds(sv::AbsIntState) = spec_info(sv).propagate_inbounds method_for_inference_limit_heuristics(sv::AbsIntState) = spec_info(sv).method_for_inference_limit_heuristics -frame_world(sv::InferenceState) = sv.world -frame_world(sv::IRInterpretationState) = sv.world +frame_world(sv::InferenceState) = sv.world.this +frame_world(sv::IRInterpretationState) = sv.world.this function is_effect_overridden(sv::AbsIntState, effect::Symbol) if is_effect_overridden(frame_instance(sv), effect) @@ -933,9 +945,8 @@ has_conditional(::AbstractLattice, ::IRInterpretationState) = false # work towards converging the valid age range for sv function update_valid_age!(sv::AbsIntState, valid_worlds::WorldRange) - valid_worlds = sv.valid_worlds = intersect(valid_worlds, sv.valid_worlds) - @assert sv.world in valid_worlds "invalid age range update" - return valid_worlds + sv.world = intersect(sv.world, valid_worlds) + return sv.world.valid_worlds end """ @@ -1131,6 +1142,7 @@ function Future{T}(f, prev::Future{S}, interp::AbstractInterpreter, sv::AbsIntSt else @assert Core._hasmethod(Tuple{Core.Typeof(f), S, typeof(interp), typeof(sv)}) result = Future{T}() + @assert !isa(sv, InferenceState) || interp === sv.interp push!(sv.tasks, function (interp, sv) result[] = f(later[], interp, sv) # capture just later, instead of all of prev return true diff --git a/base/compiler/optimize.jl b/base/compiler/optimize.jl index aeb3e6849773b..73947ad3fc280 100644 --- a/base/compiler/optimize.jl +++ b/base/compiler/optimize.jl @@ -141,7 +141,7 @@ struct InliningState{Interp<:AbstractInterpreter} interp::Interp end function InliningState(sv::InferenceState, interp::AbstractInterpreter) - return InliningState(sv.edges, sv.world, interp) + return InliningState(sv.edges, frame_world(sv), interp) end function InliningState(interp::AbstractInterpreter) return InliningState(Any[], get_inference_world(interp), interp) diff --git a/base/compiler/typeinfer.jl b/base/compiler/typeinfer.jl index 11337d5a4d047..1b3ff144639e4 100644 --- a/base/compiler/typeinfer.jl +++ b/base/compiler/typeinfer.jl @@ -25,7 +25,7 @@ end function _typeinf_identifier(frame::Core.Compiler.InferenceState) mi_info = InferenceFrameInfo( frame.linfo, - frame.world, + frame_world(sv), copy(frame.sptypes), copy(frame.slottypes), length(frame.result.argtypes), @@ -173,7 +173,7 @@ function finish_cycle(::AbstractInterpreter, frames::Vector{AbsIntState}, cyclei # all frames in the cycle should have the same bits of `valid_worlds` and `effects` # that are simply the intersection of each partial computation, without having # dependencies on each other (unlike rt and exct) - cycle_valid_worlds = intersect(cycle_valid_worlds, caller.valid_worlds) + cycle_valid_worlds = intersect(cycle_valid_worlds, caller.world.valid_worlds) cycle_valid_effects = merge_effects(cycle_valid_effects, caller.ipo_effects) end for frameid = cycleid:length(frames) @@ -197,7 +197,7 @@ function finish_cycle(::AbstractInterpreter, frames::Vector{AbsIntState}, cyclei end function adjust_cycle_frame!(sv::InferenceState, cycle_valid_worlds::WorldRange, cycle_valid_effects::Effects) - sv.valid_worlds = cycle_valid_worlds + update_valid_age!(sv, cycle_valid_worlds) sv.ipo_effects = cycle_valid_effects # traverse the callees of this cycle that are tracked within `sv.cycle_backedges` # and adjust their statements so that they are consistent with the new `cycle_valid_effects` @@ -403,13 +403,13 @@ function finishinfer!(me::InferenceState, interp::AbstractInterpreter) end end result = me.result - result.valid_worlds = me.valid_worlds + result.valid_worlds = me.world.valid_worlds result.result = bestguess ipo_effects = result.ipo_effects = me.ipo_effects = adjust_effects(me) result.exc_result = me.exc_bestguess = refine_exception_type(me.exc_bestguess, ipo_effects) me.src.rettype = widenconst(ignorelimited(bestguess)) - me.src.min_world = first(me.valid_worlds) - me.src.max_world = last(me.valid_worlds) + me.src.min_world = first(me.world.valid_worlds) + me.src.max_world = last(me.world.valid_worlds) istoplevel = !(me.linfo.def isa Method) istoplevel || compute_edges!(me) # don't add backedges to toplevel method instance @@ -637,7 +637,7 @@ function merge_call_chain!(::AbstractInterpreter, parent::InferenceState, child: end function add_cycle_backedge!(caller::InferenceState, frame::InferenceState) - update_valid_age!(caller, frame.valid_worlds) + update_valid_age!(caller, frame.world.valid_worlds) backedge = (caller, caller.currpc) contains_is(frame.cycle_backedges, backedge) || push!(frame.cycle_backedges, backedge) return frame @@ -730,7 +730,7 @@ end function codeinst_as_edge(interp::AbstractInterpreter, sv::InferenceState) mi = sv.linfo owner = cache_owner(interp) - min_world, max_world = first(sv.valid_worlds), last(sv.valid_worlds) + min_world, max_world = first(sv.world.valid_worlds), last(sv.world.valid_worlds) if max_world >= get_world_counter() max_world = typemax(UInt) end @@ -816,7 +816,7 @@ function typeinf_edge(interp::AbstractInterpreter, method::Method, @nospecialize # while splitting off the rest of the work for this caller into a separate workq thunk let mresult = Future{MethodCallResult}() push!(caller.tasks, function get_infer_result(interp, caller) - update_valid_age!(caller, frame.valid_worlds) + update_valid_age!(caller, frame.world.valid_worlds) local isinferred = is_inferred(frame) local edge = isinferred ? edge_ci : nothing local effects = isinferred ? frame.result.ipo_effects : # effects are adjusted already within `finish` for ipo_effects @@ -842,7 +842,7 @@ function typeinf_edge(interp::AbstractInterpreter, method::Method, @nospecialize end # return the current knowledge about this cycle frame = frame::InferenceState - update_valid_age!(caller, frame.valid_worlds) + update_valid_age!(caller, frame.world.valid_worlds) effects = adjust_effects(effects_for_cycle(frame.ipo_effects), method) bestguess = frame.bestguess exc_bestguess = refine_exception_type(frame.exc_bestguess, effects) From 11cd8098714e7944fc899905114bdee404044f42 Mon Sep 17 00:00:00 2001 From: Shuhei Kadowaki <40514306+aviatesk@users.noreply.github.com> Date: Tue, 5 Nov 2024 18:30:35 +0900 Subject: [PATCH 369/537] inference: minor follow-ups to JuliaLang/julia#56299 (#56450) --- base/compiler/abstractinterpretation.jl | 16 ++++++++-------- base/compiler/tfuncs.jl | 4 ++-- test/compiler/effects.jl | 13 ++++++++----- 3 files changed, 18 insertions(+), 15 deletions(-) diff --git a/base/compiler/abstractinterpretation.jl b/base/compiler/abstractinterpretation.jl index 8b557b2105f1c..dbfe3bb9ccac4 100644 --- a/base/compiler/abstractinterpretation.jl +++ b/base/compiler/abstractinterpretation.jl @@ -2466,7 +2466,7 @@ function abstract_eval_replaceglobal!(interp::AbstractInterpreter, sv::AbsIntSta end end -function args_are_actually_getglobal(argtypes) +function argtypes_are_actually_getglobal(argtypes::Vector{Any}) length(argtypes) in (3, 4) || return false M = argtypes[2] s = argtypes[3] @@ -2506,21 +2506,21 @@ function abstract_call_known(interp::AbstractInterpreter, @nospecialize(f), return Future(abstract_eval_setglobalonce!(interp, sv, argtypes)) elseif f === Core.replaceglobal! return Future(abstract_eval_replaceglobal!(interp, sv, argtypes)) - elseif f === Core.getfield && args_are_actually_getglobal(argtypes) + elseif f === Core.getfield && argtypes_are_actually_getglobal(argtypes) return Future(abstract_eval_getglobal(interp, sv, argtypes)) - elseif f === Core.isdefined && args_are_actually_getglobal(argtypes) + elseif f === Core.isdefined && argtypes_are_actually_getglobal(argtypes) exct = Bottom if length(argtypes) == 4 order = argtypes[4] - exct = global_order_exct(order, true, false) - if !(isa(order, Const) && get_atomic_order(order.val, true, false).x >= MEMORY_ORDER_UNORDERED.x) + exct = global_order_exct(order, #=loading=#true, #=storing=#false) + if !(isa(order, Const) && get_atomic_order(order.val, #=loading=#true, #=storing=#false).x >= MEMORY_ORDER_UNORDERED.x) exct = Union{exct, ConcurrencyViolationError} end end return Future(merge_exct(CallMeta(abstract_eval_isdefined( interp, - GlobalRef((argtypes[2]::Const).val, - (argtypes[3]::Const).val), + GlobalRef((argtypes[2]::Const).val::Module, + (argtypes[3]::Const).val::Symbol), sv), NoCallInfo()), exct)) elseif f === Core.get_binding_type @@ -3048,7 +3048,7 @@ function abstract_eval_copyast(interp::AbstractInterpreter, e::Expr, vtypes::Uni end function abstract_eval_isdefined_expr(interp::AbstractInterpreter, e::Expr, vtypes::Union{VarTable,Nothing}, - sv::AbsIntState) + sv::AbsIntState) sym = e.args[1] if isa(sym, SlotNumber) && vtypes !== nothing vtyp = vtypes[slot_id(sym)] diff --git a/base/compiler/tfuncs.jl b/base/compiler/tfuncs.jl index 64ec7054221e1..f0212f1082331 100644 --- a/base/compiler/tfuncs.jl +++ b/base/compiler/tfuncs.jl @@ -2497,11 +2497,11 @@ function builtin_effects(𝕃::AbstractLattice, @nospecialize(f::Builtin), argty elseif f === getglobal 2 ≤ length(argtypes) ≤ 3 || return EFFECTS_THROWS # Modeled more precisely in abstract_eval_getglobal - return Effects(EFFECTS_TOTAL; consistent=ALWAYS_FALSE, nothrow=false, inaccessiblememonly=ALWAYS_FALSE) + return generic_getglobal_effects elseif f === Core.get_binding_type length(argtypes) == 2 || return EFFECTS_THROWS # Modeled more precisely in abstract_eval_get_binding_type - return Effects(EFFECTS_TOTAL; effect_free=ALWAYS_FALSE, nothrow=get_binding_type_nothrow(𝕃, argtypes[1], argtypes[2])) + return Effects(EFFECTS_TOTAL; nothrow=get_binding_type_nothrow(𝕃, argtypes[1], argtypes[2])) elseif f === compilerbarrier length(argtypes) == 2 || return Effects(EFFECTS_THROWS; consistent=ALWAYS_FALSE) setting = argtypes[1] diff --git a/test/compiler/effects.jl b/test/compiler/effects.jl index cdc26cddc440d..4174aa3d01030 100644 --- a/test/compiler/effects.jl +++ b/test/compiler/effects.jl @@ -357,18 +357,21 @@ end @test !Core.Compiler.builtin_nothrow(Core.Compiler.fallback_lattice, Core.get_binding_type, Any[Rational{Int}, Core.Const(:foo)], Any) -# Nothrow for assignment to globals +# effects modeling for assignment to globals global glob_assign_int::Int = 0 -f_glob_assign_int() = global glob_assign_int += 1 -let effects = Base.infer_effects(f_glob_assign_int, ()) +f_glob_assign_int() = global glob_assign_int = 1 +let effects = Base.infer_effects(f_glob_assign_int, (); optimize=false) + @test Core.Compiler.is_consistent(effects) @test !Core.Compiler.is_effect_free(effects) @test Core.Compiler.is_nothrow(effects) end -# Nothrow for setglobal! +# effects modeling for for setglobal! global SETGLOBAL!_NOTHROW::Int = 0 -let effects = Base.infer_effects() do +let effects = Base.infer_effects(; optimize=false) do setglobal!(@__MODULE__, :SETGLOBAL!_NOTHROW, 42) end + @test Core.Compiler.is_consistent(effects) + @test !Core.Compiler.is_effect_free(effects) @test Core.Compiler.is_nothrow(effects) end From 50ad4d96847c4a8153bc9435056d8d5e70e99716 Mon Sep 17 00:00:00 2001 From: Jameson Nash Date: Tue, 5 Nov 2024 07:53:20 -0500 Subject: [PATCH 370/537] Ensure that String(::Memory) returns only a String, not any owner (#56438) Fixes #56435 --- src/genericmemory.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/genericmemory.c b/src/genericmemory.c index 5c48e3202493e..c310eb829e198 100644 --- a/src/genericmemory.c +++ b/src/genericmemory.c @@ -197,7 +197,7 @@ JL_DLLEXPORT jl_value_t *jl_genericmemory_to_string(jl_genericmemory_t *m, size_ if (how != 0) { jl_value_t *o = jl_genericmemory_data_owner_field(m); jl_genericmemory_data_owner_field(m) = NULL; - if (how == 3 && + if (how == 3 && jl_is_string(o) && ((mlength + sizeof(void*) + 1 <= GC_MAX_SZCLASS) == (len + sizeof(void*) + 1 <= GC_MAX_SZCLASS))) { if (jl_string_data(o)[len] != '\0') jl_string_data(o)[len] = '\0'; From 9e99ed52cf9568b2a6c0c99d9174369c95d0de20 Mon Sep 17 00:00:00 2001 From: Gabriel Baraldi Date: Tue, 5 Nov 2024 11:10:38 -0300 Subject: [PATCH 371/537] Take safepoint lock before going to sleep in the scheduler. (#56443) This avoids a deadlock during exit. Between a thread going to sleep and the thread exiting. --- src/julia_internal.h | 2 +- src/safepoint.c | 19 +++++++++++++++++++ src/scheduler.c | 3 +-- 3 files changed, 21 insertions(+), 3 deletions(-) diff --git a/src/julia_internal.h b/src/julia_internal.h index f3959490855c8..c5bac37890042 100644 --- a/src/julia_internal.h +++ b/src/julia_internal.h @@ -1129,7 +1129,7 @@ void jl_safepoint_end_gc(void); // The caller should set it **BEFORE** calling this function. void jl_safepoint_wait_gc(void) JL_NOTSAFEPOINT; void jl_safepoint_wait_thread_resume(void) JL_NOTSAFEPOINT; - +int8_t jl_safepoint_take_sleep_lock(jl_ptls_t ptls) JL_NOTSAFEPOINT_ENTER; // Set pending sigint and enable the mechanisms to deliver the sigint. void jl_safepoint_enable_sigint(void); // If the safepoint is enabled to deliver sigint, disable it diff --git a/src/safepoint.c b/src/safepoint.c index 2e324078897a6..8e24543c6769d 100644 --- a/src/safepoint.c +++ b/src/safepoint.c @@ -276,6 +276,25 @@ void jl_safepoint_wait_thread_resume(void) jl_atomic_store_release(&ct->ptls->gc_state, state); uv_mutex_unlock(&ct->ptls->sleep_lock); } +// This takes the sleep lock and puts the thread in GC_SAFE +int8_t jl_safepoint_take_sleep_lock(jl_ptls_t ptls) +{ + int8_t gc_state = jl_gc_safe_enter(ptls); + uv_mutex_lock(&ptls->sleep_lock); + if (jl_atomic_load_relaxed(&ptls->suspend_count)) { + // This dance with the locks is because we are not allowed to hold both these locks at the same time + // This avoids a situation where jl_safepoint_suspend_thread loads our GC state and sees GC_UNSAFE + // But we are in the process of becoming GC_SAFE, and also trigger the old safepoint, this causes us + // to go sleep in scheduler and the suspender thread to go to sleep in safepoint_cond_begin meaning we hang + // To avoid this we do the broadcast below to force it to observe the new gc_state + uv_mutex_unlock(&ptls->sleep_lock); + uv_mutex_lock(&safepoint_lock); + uv_cond_broadcast(&safepoint_cond_begin); + uv_mutex_unlock(&safepoint_lock); + uv_mutex_lock(&ptls->sleep_lock); + } + return gc_state; +} // n.b. suspended threads may still run in the GC or GC safe regions // but shouldn't be observable, depending on which enum the user picks (only 1 and 2 are typically recommended here) diff --git a/src/scheduler.c b/src/scheduler.c index 7e23f654c2566..fff891d91a813 100644 --- a/src/scheduler.c +++ b/src/scheduler.c @@ -499,8 +499,7 @@ JL_DLLEXPORT jl_task_t *jl_task_get_next(jl_value_t *trypoptask, jl_value_t *q, // the other threads will just wait for an individual wake signal to resume JULIA_DEBUG_SLEEPWAKE( ptls->sleep_enter = cycleclock() ); - int8_t gc_state = jl_gc_safe_enter(ptls); - uv_mutex_lock(&ptls->sleep_lock); + int8_t gc_state = jl_safepoint_take_sleep_lock(ptls); // This puts the thread in GC_SAFE and takes the sleep lock while (may_sleep(ptls)) { if (ptls->tid == 0) { task = wait_empty; From 9af0dea9d2c7c957bec7a14acacf0b234447be31 Mon Sep 17 00:00:00 2001 From: Jerry Ling Date: Tue, 5 Nov 2024 09:52:14 -0500 Subject: [PATCH 372/537] Profile: mention `kill -s SIGUSR1 julia_pid` for Linux (#56441) currentlu this route is mentioned in docs https://docs.julialang.org/en/v1/stdlib/Profile/#Triggered-During-Execution but missing from the module docstring, this should help users who have little idea how to "send a kernel signal to a process" to get started --------- Co-authored-by: Ian Butterworth --- stdlib/Profile/src/Profile.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stdlib/Profile/src/Profile.jl b/stdlib/Profile/src/Profile.jl index bea8f288937d0..694d1292b02ab 100644 --- a/stdlib/Profile/src/Profile.jl +++ b/stdlib/Profile/src/Profile.jl @@ -9,7 +9,7 @@ Profiling support. - `@profile foo()` to profile a specific call. - `Profile.print()` to print the report. Paths are clickable links in supported terminals and specialized for JULIA_EDITOR etc. - `Profile.clear()` to clear the buffer. -- Send a $(Sys.isbsd() ? "SIGINFO (ctrl-t)" : "SIGUSR1") signal to the process to automatically trigger a profile and print. +- Send a SIGUSR1 (on linux) or SIGINFO (on macOS/BSD) signal to the process to automatically trigger a profile and print. i.e. `kill -s SIGUSR1/SIGINFO 1234`, where 1234 is the pid of the julia process. On macOS & BSD platforms `ctrl-t` can be used directly. ## Memory profiling - `Profile.Allocs.@profile [sample_rate=0.1] foo()` to sample allocations within a specific call. A sample rate of 1.0 will record everything; 0.0 will record nothing. From cbcad6f721e88fff829907b51f809ef963fa0a2a Mon Sep 17 00:00:00 2001 From: Lilith Orion Hafner Date: Tue, 5 Nov 2024 17:02:29 -0600 Subject: [PATCH 373/537] Fix and test an overflow issue in `searchsorted` (#56464) And remove `searchsorted` special cases for offset arrays in tests that had the impact of bypassing actually testing `searchsorted` behavior on offset arrays To be clear, after this bugfix the function is still broken, just a little bit less so. --- base/sort.jl | 4 ++-- test/sorting.jl | 4 ++++ test/testhelpers/OffsetArrays.jl | 11 ----------- 3 files changed, 6 insertions(+), 13 deletions(-) diff --git a/base/sort.jl b/base/sort.jl index ef0f208209fc8..2251d0b965228 100644 --- a/base/sort.jl +++ b/base/sort.jl @@ -231,8 +231,8 @@ function searchsorted(v::AbstractVector, x, ilo::T, ihi::T, o::Ordering)::UnitRa elseif lt(o, x, v[m]) hi = m else - a = searchsortedfirst(v, x, max(lo,ilo), m, o) - b = searchsortedlast(v, x, m, min(hi,ihi), o) + a = searchsortedfirst(v, x, lo+u, m, o) + b = searchsortedlast(v, x, m, hi-u, o) return a : b end end diff --git a/test/sorting.jl b/test/sorting.jl index 2714197f58823..8cbdb94f02b16 100644 --- a/test/sorting.jl +++ b/test/sorting.jl @@ -581,6 +581,10 @@ end @test searchsortedfirst(o, 1.5) == 0 @test searchsortedlast(o, 0) == firstindex(o) - 1 @test searchsortedlast(o, 1.5) == -1 + + # Issue #56457 + o2 = OffsetArray([2,2,3], typemax(Int)-3); + @test searchsorted(o2, 2) == firstindex(o2):firstindex(o2)+1 end function adaptive_sort_test(v; trusted=InsertionSort, kw...) diff --git a/test/testhelpers/OffsetArrays.jl b/test/testhelpers/OffsetArrays.jl index 3463d5a94393d..06e65f8928036 100644 --- a/test/testhelpers/OffsetArrays.jl +++ b/test/testhelpers/OffsetArrays.jl @@ -821,17 +821,6 @@ centered(A::AbstractArray, cp::Dims=center(A)) = OffsetArray(A, .-cp) centered(A::AbstractArray, i::CartesianIndex) = centered(A, Tuple(i)) -# we may pass the searchsorted* functions to the parent, and wrap the offset -for f in [:searchsortedfirst, :searchsortedlast, :searchsorted] - _safe_f = Symbol("_safe_" * String(f)) - @eval function $_safe_f(v::OffsetArray, x, ilo, ihi, o::Base.Ordering) - offset = firstindex(v) - firstindex(parent(v)) - $f(parent(v), x, ilo - offset, ihi - offset, o) .+ offset - end - @eval Base.$f(v::OffsetVector, x, ilo::T, ihi::T, o::Base.Ordering) where T<:Integer = - $_safe_f(v, x, ilo, ihi, o) -end - ## # Deprecations ## From d3130aebc287661a6420bd097cb280a584e4dcc1 Mon Sep 17 00:00:00 2001 From: Florian Date: Wed, 6 Nov 2024 04:33:56 +0100 Subject: [PATCH 374/537] Update docs of calling convention arg in `:foreigncall` AST node (#56417) --- doc/src/devdocs/ast.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/src/devdocs/ast.md b/doc/src/devdocs/ast.md index 50a64ec5813f7..d8db3ca677082 100644 --- a/doc/src/devdocs/ast.md +++ b/doc/src/devdocs/ast.md @@ -501,9 +501,9 @@ These symbols appear in the `head` field of [`Expr`](@ref)s in lowered form. The number of required arguments for a varargs function definition. - * `args[5]::QuoteNode{Symbol}` : calling convention + * `args[5]::QuoteNode{<:Union{Symbol,Tuple{Symbol,UInt16}}`: calling convention - The calling convention for the call. + The calling convention for the call, optionally with effects. * `args[6:5+length(args[3])]` : arguments From 80a279152bd090cca0a3bed598f0059cffa74b26 Mon Sep 17 00:00:00 2001 From: Tianyi Pu <44583944+putianyi889@users.noreply.github.com> Date: Wed, 6 Nov 2024 06:40:00 +0000 Subject: [PATCH 375/537] `step(::AbstractUnitRange{Bool})` should return `Bool` (#56405) The issue was introduced by #27302 , as ```julia julia> true-false 1 ``` By definitions below, `AbstractUnitRange{Bool} <: OrdinalRange{Bool, Bool}` whose step type is `Bool`. https://github.com/JuliaLang/julia/blob/da74ef1933b12410b217748e0f7fbcbe52e10d29/base/range.jl#L280-L299 --------- Co-authored-by: Matt Bauman Co-authored-by: Matt Bauman --- base/range.jl | 1 + test/ranges.jl | 4 ++++ 2 files changed, 5 insertions(+) diff --git a/base/range.jl b/base/range.jl index c4435f2ff3e97..b05dddb025a7c 100644 --- a/base/range.jl +++ b/base/range.jl @@ -711,6 +711,7 @@ julia> step(range(2.5, stop=10.9, length=85)) """ step(r::StepRange) = r.step step(r::AbstractUnitRange{T}) where {T} = oneunit(T) - zero(T) +step(r::AbstractUnitRange{Bool}) = true step(r::StepRangeLen) = r.step step(r::StepRangeLen{T}) where {T<:AbstractFloat} = T(r.step) step(r::LinRange) = (last(r)-first(r))/r.lendiv diff --git a/test/ranges.jl b/test/ranges.jl index 629c2966b2fa6..73595e3056081 100644 --- a/test/ranges.jl +++ b/test/ranges.jl @@ -65,6 +65,10 @@ using .Main.OffsetArrays unitrangeerrstr = "promotion of types Char and Char failed to change any arguments" @test_throws unitrangeerrstr UnitRange('a', 'b') + + @test step(false:true) === true # PR 56405 + @test eltype((false:true) + (Int8(0):Int8(1))) === Int8 + @test eltype((false:true:true) + (Int8(0):Int8(1))) === Int8 end using Dates, Random From d63dd4b5b2e454828d64adf545807e5356b274fa Mon Sep 17 00:00:00 2001 From: Shuhei Kadowaki Date: Tue, 5 Nov 2024 15:47:15 +0900 Subject: [PATCH 376/537] fixup! JuliaLang/julia#56028, fix up the type-level escapability check In JuliaLang/julia#56028, the type-level escapability check was changed to use `is_mutation_free_argtype`, but this was a mistake because EA no longer runs for structs like `mutable struct ForeignBuffer{T}; const ptr::Ptr{T}; end`. This commit changes it to use `is_identity_free_argtype` instead, which can be used to detect whether a type may contain any mutable allocations or not. --- base/compiler/ssair/EscapeAnalysis/EscapeAnalysis.jl | 11 +++++++---- test/compiler/inline.jl | 3 +++ 2 files changed, 10 insertions(+), 4 deletions(-) diff --git a/base/compiler/ssair/EscapeAnalysis/EscapeAnalysis.jl b/base/compiler/ssair/EscapeAnalysis/EscapeAnalysis.jl index 1f98758cd6055..887a21ef7e0f6 100644 --- a/base/compiler/ssair/EscapeAnalysis/EscapeAnalysis.jl +++ b/base/compiler/ssair/EscapeAnalysis/EscapeAnalysis.jl @@ -26,7 +26,7 @@ using ._TOP_MOD: # Base definitions using Core.Compiler: # Core.Compiler specific definitions AbstractLattice, Bottom, IRCode, IR_FLAG_NOTHROW, InferenceResult, SimpleInferenceLattice, argextype, fieldcount_noerror, hasintersect, has_flag, intrinsic_nothrow, - is_meta_expr_head, is_mutation_free_argtype, isexpr, println, setfield!_nothrow, + is_meta_expr_head, is_identity_free_argtype, isexpr, println, setfield!_nothrow, singleton_type, try_compute_field, try_compute_fieldidx, widenconst, ⊑ include(x) = _TOP_MOD.include(@__MODULE__, x) @@ -861,7 +861,7 @@ function add_escape_change!(astate::AnalysisState, @nospecialize(x), xinfo::Esca xinfo === ⊥ && return nothing # performance optimization xidx = iridx(x, astate.estate) if xidx !== nothing - if force || !is_mutation_free_argtype(argextype(x, astate.ir)) + if force || !is_identity_free_argtype(argextype(x, astate.ir)) push!(astate.changes, EscapeChange(xidx, xinfo)) end end @@ -871,7 +871,7 @@ end function add_liveness_change!(astate::AnalysisState, @nospecialize(x), livepc::Int) xidx = iridx(x, astate.estate) if xidx !== nothing - if !is_mutation_free_argtype(argextype(x, astate.ir)) + if !is_identity_free_argtype(argextype(x, astate.ir)) push!(astate.changes, LivenessChange(xidx, livepc)) end end @@ -1077,7 +1077,10 @@ function escape_invoke!(astate::AnalysisState, pc::Int, args::Vector{Any}) # to consider the possibility of aliasing between them and the return value. for argidx = first_idx:last_idx arg = args[argidx] - if !is_mutation_free_argtype(argextype(arg, astate.ir)) + if arg isa GlobalRef + continue # :effect_free guarantees that nothings escapes to the global scope + end + if !is_identity_free_argtype(argextype(arg, astate.ir)) add_alias_change!(astate, ret, arg) end end diff --git a/test/compiler/inline.jl b/test/compiler/inline.jl index 416f3873c5422..3ddb0e968d020 100644 --- a/test/compiler/inline.jl +++ b/test/compiler/inline.jl @@ -2275,6 +2275,9 @@ function f_EA_finalizer(N::Int) Base.@assume_effects :nothrow @noinline println(devnull, "ptr = ", ptr) end end +let src = code_typed1(foreign_alloc, (Type{Float64},Int,)) + @test count(iscall((src, Core.finalizer)), src.code) == 1 +end let src = code_typed1(f_EA_finalizer, (Int,)) @test count(iscall((src, Core.finalizer)), src.code) == 0 end From f49f2928fee994ef679dcabc8b1db739a57e4606 Mon Sep 17 00:00:00 2001 From: Shuhei Kadowaki Date: Wed, 6 Nov 2024 15:03:33 +0900 Subject: [PATCH 377/537] add `show(::IO, ::ArgEscapeInfo)` --- test/compiler/EscapeAnalysis/EAUtils.jl | 38 ++++++++++++++++++++++++- 1 file changed, 37 insertions(+), 1 deletion(-) diff --git a/test/compiler/EscapeAnalysis/EAUtils.jl b/test/compiler/EscapeAnalysis/EAUtils.jl index c71b821fd25f3..65fa9f75fe03f 100644 --- a/test/compiler/EscapeAnalysis/EAUtils.jl +++ b/test/compiler/EscapeAnalysis/EAUtils.jl @@ -18,7 +18,7 @@ using Core: CodeInstance, MethodInstance, CodeInfo using .CC: InferenceResult, InferenceState, OptimizationState, IRCode -using .EA: analyze_escapes, ArgEscapeCache, EscapeInfo, EscapeState +using .EA: analyze_escapes, ArgEscapeCache, ArgEscapeInfo, EscapeInfo, EscapeState struct EAToken end @@ -167,6 +167,42 @@ function Base.show(io::IO, x::EscapeInfo) end end +function get_sym_color(x::ArgEscapeInfo) + escape_bits = x.escape_bits + if escape_bits == EA.ARG_ALL_ESCAPE + color, sym = :red, "X" + elseif escape_bits == 0x00 + color, sym = :green, "✓" + else + color, sym = :bold, "*" + if !iszero(escape_bits & EA.ARG_RETURN_ESCAPE) + color, sym = :blue, "↑" + end + if !iszero(escape_bits & EA.ARG_THROWN_ESCAPE) + color = :yellow + end + end + return sym, color +end + +function Base.show(io::IO, x::ArgEscapeInfo) + escape_bits = x.escape_bits + if escape_bits == EA.ARG_ALL_ESCAPE + color, sym = :red, "X" + elseif escape_bits == 0x00 + color, sym = :green, "✓" + else + color, sym = :bold, "*" + if !iszero(escape_bits & EA.ARG_RETURN_ESCAPE) + color, sym = :blue, "↑" + end + if !iszero(escape_bits & EA.ARG_THROWN_ESCAPE) + color = :yellow + end + end + printstyled(io, "ArgEscapeInfo(", sym, ")"; color) +end + struct EscapeResult ir::IRCode state::EscapeState From 1782c6b2b1dd468e2f358de929b745f5353a499d Mon Sep 17 00:00:00 2001 From: Shuhei Kadowaki <40514306+aviatesk@users.noreply.github.com> Date: Wed, 6 Nov 2024 19:36:06 +0900 Subject: [PATCH 378/537] EA: disable finalizer inlining for allocations that are edges of `PhiNode`s (#56455) The current EA-based finalizer inlining implementation can create invalid IR when the target object is later aliased as a `PhiNode`, which was causing #56422. In such cases, finalizer inlining for the allocations that are edges of each `PhiNode` should be avoided, and instead, finalizer inlining should ideally be applied to the `PhiNode` itself, but implementing that is somewhat complex. As a temporary fix, this commit disables inlining in those cases. - fixes #56422 --- base/compiler/ssair/passes.jl | 5 ++++- test/compiler/inline.jl | 17 +++++++++++++++++ 2 files changed, 21 insertions(+), 1 deletion(-) diff --git a/base/compiler/ssair/passes.jl b/base/compiler/ssair/passes.jl index b483c307a2f5e..dad4a09a3e710 100644 --- a/base/compiler/ssair/passes.jl +++ b/base/compiler/ssair/passes.jl @@ -1731,8 +1731,11 @@ function sroa_mutables!(ir::IRCode, defuses::IdDict{Int,Tuple{SPCSet,SSADefUse}} if finalizer_useidx isa Int nargs = length(ir.argtypes) # COMBAK this might need to be `Int(opt.src.nargs)` estate = EscapeAnalysis.analyze_escapes(ir, nargs, 𝕃ₒ, get_escape_cache(inlining.interp)) + # disable finalizer inlining when this allocation is aliased to somewhere, + # mostly likely to edges of `PhiNode` + hasaliases = EscapeAnalysis.getaliases(SSAValue(defidx), estate) !== nothing einfo = estate[SSAValue(defidx)] - if EscapeAnalysis.has_no_escape(einfo) + if !hasaliases && EscapeAnalysis.has_no_escape(einfo) already = BitSet(use.idx for use in defuse.uses) for idx = einfo.Liveness if idx ∉ already diff --git a/test/compiler/inline.jl b/test/compiler/inline.jl index 3ddb0e968d020..9895471ab1b27 100644 --- a/test/compiler/inline.jl +++ b/test/compiler/inline.jl @@ -2286,6 +2286,23 @@ let;Base.Experimental.@force_compile @test foreign_buffer_checker.finalized end +# JuliaLang/julia#56422: +# EA-based finalizer inlining should not result in an invalid IR in the existence of `PhiNode`s +function issue56422(cnd::Bool, N::Int) + if cnd + workspace = foreign_alloc(Float64, N) + else + workspace = foreign_alloc(Float64, N+1) + end + GC.@preserve workspace begin + (;ptr) = workspace + Base.@assume_effects :nothrow @noinline println(devnull, "ptr = ", ptr) + end +end +let src = code_typed1(issue56422, (Bool,Int,)) + @test_broken count(iscall((src, Core.finalizer)), src.code) == 0 +end + # Test that inlining doesn't unnecessarily move things to statement position @noinline f_noinline_invoke(x::Union{Symbol,Nothing}=nothing) = Core.donotdelete(x) g_noinline_invoke(x) = f_noinline_invoke(x) From e3d26c27d3beb0965c353d9b69686a96012d793e Mon Sep 17 00:00:00 2001 From: Shuhei Kadowaki <40514306+aviatesk@users.noreply.github.com> Date: Wed, 6 Nov 2024 19:39:46 +0900 Subject: [PATCH 379/537] make `verify_ir` error messages more informative (#56452) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Currently, when `verify_ir` finds an error, the `IRCode` is printed, but it's not easy to determine which method instance generated that `IRCode`. This commit adds method instance and code location information to the error message, making it easier to identify the problematic code. E.g.: ```julia [...] 610 │ %95 = builtin Core.tuple(%48, %94)::Tuple{GMT.Gdal.IGeometry, GMT.Gdal.IGeometry} └─── return %95 ERROR: IR verification failed. Code location: ~/julia/packages/GMT/src/gdal_extensions.jl:606 Method instance: MethodInstance for GMT.Gdal.helper_2geoms(::Matrix{Float64}, ::Matrix{Float64}) Stacktrace: [1] error(::String, ::String, ::String, ::Symbol, ::String, ::Int32, ::String, ::String, ::Core.MethodInstance) @ Core.Compiler ./error.jl:53 [...] ``` --- base/compiler/optimize.jl | 2 +- base/compiler/ssair/verify.jl | 119 ++++++++++++++++++++-------------- test/compiler/ssair.jl | 2 +- 3 files changed, 73 insertions(+), 50 deletions(-) diff --git a/base/compiler/optimize.jl b/base/compiler/optimize.jl index 73947ad3fc280..edc374f675c5f 100644 --- a/base/compiler/optimize.jl +++ b/base/compiler/optimize.jl @@ -1033,7 +1033,7 @@ function run_passes_ipo_safe( end if is_asserts() @timeit "verify 3" begin - verify_ir(ir, true, false, optimizer_lattice(sv.inlining.interp)) + verify_ir(ir, true, false, optimizer_lattice(sv.inlining.interp), sv.linfo) verify_linetable(ir.debuginfo, length(ir.stmts)) end end diff --git a/base/compiler/ssair/verify.jl b/base/compiler/ssair/verify.jl index 268991282c483..14ca6ef2dbe9a 100644 --- a/base/compiler/ssair/verify.jl +++ b/base/compiler/ssair/verify.jl @@ -7,6 +7,7 @@ function maybe_show_ir(ir::IRCode) else Core.show(ir) end + Core.println(Core.stdout) end if !isdefined(@__MODULE__, Symbol("@verify_error")) @@ -25,7 +26,8 @@ end is_toplevel_expr_head(head::Symbol) = head === :global || head === :method || head === :thunk is_value_pos_expr_head(head::Symbol) = head === :static_parameter -function check_op(ir::IRCode, domtree::DomTree, @nospecialize(op), use_bb::Int, use_idx::Int, printed_use_idx::Int, print::Bool, isforeigncall::Bool, arg_idx::Int, allow_frontend_forms::Bool) +function check_op(ir::IRCode, domtree::DomTree, @nospecialize(op), use_bb::Int, use_idx::Int, printed_use_idx::Int, print::Bool, isforeigncall::Bool, arg_idx::Int, + allow_frontend_forms::Bool, @nospecialize(raise_error)) if isa(op, SSAValue) op.id > 0 || @verify_error "Def ($(op.id)) is invalid in final IR" if op.id > length(ir.stmts) @@ -39,14 +41,14 @@ function check_op(ir::IRCode, domtree::DomTree, @nospecialize(op), use_bb::Int, else if op.id >= use_idx @verify_error "Def ($(op.id)) does not dominate use ($(use_idx)) in same BB" - error("") + raise_error() end end else if !dominates(domtree, def_bb, use_bb) && !(bb_unreachable(domtree, def_bb) && bb_unreachable(domtree, use_bb)) # At the moment, we allow GC preserve tokens outside the standard domination notion @verify_error "Basic Block $def_bb does not dominate block $use_bb (tried to use value %$(op.id) at %$(printed_use_idx))" - error("") + raise_error() end end @@ -56,12 +58,12 @@ function check_op(ir::IRCode, domtree::DomTree, @nospecialize(op), use_bb::Int, # an earlier block got deleted, but for some reason we didn't figure # out yet that this entire block is dead also. @verify_error "At statement %$use_idx: Invalid use of value statement or terminator %$(op.id)" - error("") + raise_error() end elseif isa(op, GlobalRef) if !isdefined(op.mod, op.name) || !isconst(op.mod, op.name) @verify_error "Unbound GlobalRef not allowed in value position" - error("") + raise_error() end elseif isa(op, Expr) # Only Expr(:boundscheck) is allowed in value position @@ -72,15 +74,15 @@ function check_op(ir::IRCode, domtree::DomTree, @nospecialize(op), use_bb::Int, elseif !is_value_pos_expr_head(op.head) if !allow_frontend_forms || op.head !== :opaque_closure_method @verify_error "Expr not allowed in value position" - error("") + raise_error() end end elseif isa(op, Union{OldSSAValue, NewSSAValue}) @verify_error "At statement %$use_idx: Left over SSA marker ($op)" - error("") + raise_error() elseif isa(op, SlotNumber) @verify_error "Left over slot detected in converted IR" - error("") + raise_error() end end @@ -96,31 +98,49 @@ end function verify_ir(ir::IRCode, print::Bool=true, allow_frontend_forms::Bool=false, - 𝕃ₒ::AbstractLattice = SimpleInferenceLattice.instance) + 𝕃ₒ::AbstractLattice = SimpleInferenceLattice.instance, + mi::Union{Nothing,MethodInstance}=nothing) + function raise_error() + error_args = Any["IR verification failed."] + if isdefined(Core, :Main) && isdefined(Core.Main, :Base) + # ensure we use I/O that does not yield, as this gets called during compilation + firstline = invokelatest(Core.Main.Base.IRShow.debuginfo_firstline, ir.debuginfo) + else + firstline = nothing + end + if firstline !== nothing + file, line = firstline + push!(error_args, "\n", " Code location: ", file, ":", line) + end + if mi !== nothing + push!(error_args, "\n", " Method instance: ", mi) + end + error(error_args...) + end # Verify CFG graph. Must be well formed to construct domtree if !(length(ir.cfg.blocks) - 1 <= length(ir.cfg.index) <= length(ir.cfg.blocks)) @verify_error "CFG index length ($(length(ir.cfg.index))) does not correspond to # of blocks $(length(ir.cfg.blocks))" - error("") + raise_error() end if length(ir.stmts.stmt) != length(ir.stmts) @verify_error "IR stmt length is invalid $(length(ir.stmts.stmt)) / $(length(ir.stmts))" - error("") + raise_error() end if length(ir.stmts.type) != length(ir.stmts) @verify_error "IR type length is invalid $(length(ir.stmts.type)) / $(length(ir.stmts))" - error("") + raise_error() end if length(ir.stmts.info) != length(ir.stmts) @verify_error "IR info length is invalid $(length(ir.stmts.info)) / $(length(ir.stmts))" - error("") + raise_error() end if length(ir.stmts.line) != length(ir.stmts) * 3 @verify_error "IR line length is invalid $(length(ir.stmts.line)) / $(length(ir.stmts) * 3)" - error("") + raise_error() end if length(ir.stmts.flag) != length(ir.stmts) @verify_error "IR flag length is invalid $(length(ir.stmts.flag)) / $(length(ir.stmts))" - error("") + raise_error() end # For now require compact IR # @assert isempty(ir.new_nodes) @@ -132,43 +152,43 @@ function verify_ir(ir::IRCode, print::Bool=true, p == 0 && continue if !(1 <= p <= length(ir.cfg.blocks)) @verify_error "Predecessor $p of block $idx out of bounds for IR" - error("") + raise_error() end c = count_int(idx, ir.cfg.blocks[p].succs) if c == 0 @verify_error "Predecessor $p of block $idx not in successor list" - error("") + raise_error() elseif c == 2 if count_int(p, block.preds) != 2 @verify_error "Double edge from $p to $idx not correctly accounted" - error("") + raise_error() end end end for s in block.succs if !(1 <= s <= length(ir.cfg.blocks)) @verify_error "Successor $s of block $idx out of bounds for IR" - error("") + raise_error() end if !(idx in ir.cfg.blocks[s].preds) #Base.@show ir.cfg #Base.@show ir #Base.@show ir.argtypes @verify_error "Successor $s of block $idx not in predecessor list" - error("") + raise_error() end end if !(1 <= first(block.stmts) <= length(ir.stmts)) @verify_error "First statement of BB $idx ($(first(block.stmts))) out of bounds for IR (length=$(length(ir.stmts)))" - error("") + raise_error() end if !(1 <= last(block.stmts) <= length(ir.stmts)) @verify_error "Last statement of BB $idx ($(last(block.stmts))) out of bounds for IR (length=$(length(ir.stmts)))" - error("") + raise_error() end if idx <= length(ir.cfg.index) && last(block.stmts) + 1 != ir.cfg.index[idx] @verify_error "End of BB $idx ($(last(block.stmts))) is not one less than CFG index ($(ir.cfg.index[idx]))" - error("") + raise_error() end end # Verify statements @@ -177,7 +197,7 @@ function verify_ir(ir::IRCode, print::Bool=true, if first(block.stmts) != last_end + 1 #ranges = [(idx,first(bb.stmts),last(bb.stmts)) for (idx, bb) in pairs(ir.cfg.blocks)] @verify_error "First statement of BB $idx ($(first(block.stmts))) does not match end of previous ($last_end)" - error("") + raise_error() end last_end = last(block.stmts) terminator = ir[SSAValue(last_end)][:stmt] @@ -186,32 +206,32 @@ function verify_ir(ir::IRCode, print::Bool=true, if isa(terminator, ReturnNode) if !isempty(block.succs) @verify_error "Block $idx ends in return or unreachable, but has successors" - error("") + raise_error() end elseif isa(terminator, GotoNode) if length(block.succs) != 1 || block.succs[1] != terminator.label @verify_error "Block $idx successors ($(block.succs)), does not match GotoNode terminator ($(terminator.label))" - error("") + raise_error() end elseif isa(terminator, GotoIfNot) if terminator.dest == idx + 1 @verify_error "Block $idx terminator forms a double edge to block $(idx+1)" - error("") + raise_error() end if length(block.succs) != 2 || (block.succs != [terminator.dest, idx+1] && block.succs != [idx+1, terminator.dest]) @verify_error "Block $idx successors ($(block.succs)), does not match GotoIfNot terminator" - error("") + raise_error() end elseif isa(terminator, EnterNode) @label enter_check if length(block.succs) == 1 if terminator.catch_dest != 0 @verify_error "Block $idx successors ($(block.succs)), does not match :enter terminator" - error("") + raise_error() end elseif (block.succs != Int[terminator.catch_dest, idx+1] && block.succs != Int[idx+1, terminator.catch_dest]) @verify_error "Block $idx successors ($(block.succs)), does not match :enter terminator" - error("") + raise_error() end else if length(block.succs) != 1 || block.succs[1] != idx + 1 @@ -233,14 +253,14 @@ function verify_ir(ir::IRCode, print::Bool=true, # here, but that isn't always possible. else @verify_error "Block $idx successors ($(block.succs)), does not match fall-through terminator %$termidx ($terminator)::$stmttyp" - error("") + raise_error() end end end end if length(ir.stmts) != last(ir.cfg.blocks[end].stmts) @verify_error "End of last BB $(last(ir.cfg.blocks[end].stmts)) does not match last IR statement $(length(ir.stmts))" - error("") + raise_error() end lastbb = 0 is_phinode_block = false @@ -260,7 +280,7 @@ function verify_ir(ir::IRCode, print::Bool=true, if isa(stmt, PhiNode) if !is_phinode_block @verify_error "φ node $idx is not at the beginning of the basic block $bb" - error("") + raise_error() end lastphi = idx @assert length(stmt.edges) == length(stmt.values) @@ -271,20 +291,20 @@ function verify_ir(ir::IRCode, print::Bool=true, if edge == edge′ # TODO: Move `unique` to Core.Compiler. For now we assume the predecessor list is always unique. @verify_error "Edge list φ node $idx in bb $bb not unique (double edge?)" - error("") + raise_error() end end if !(edge == 0 && bb == 1) && !(edge in ir.cfg.blocks[bb].preds) #Base.@show ir.argtypes #Base.@show ir @verify_error "Edge $edge of φ node $idx not in predecessor list" - error("") + raise_error() end edge == 0 && continue if bb_unreachable(domtree, Int(edge)) # TODO: Disallow? #@verify_error "Unreachable edge from #$edge should have been cleaned up at idx $idx" - #error("") + #raise_error() continue end isassigned(stmt.values, i) || continue @@ -297,10 +317,11 @@ function verify_ir(ir::IRCode, print::Bool=true, # PhiNode type was $phiT # Value type was $(ir.stmts[val.id][:type]) #""" - #error("") + #raise_error() end end - check_op(ir, domtree, val, Int(edge), last(ir.cfg.blocks[stmt.edges[i]].stmts)+1, idx, print, false, i, allow_frontend_forms) + check_op(ir, domtree, val, Int(edge), last(ir.cfg.blocks[stmt.edges[i]].stmts)+1, idx, print, false, i, + allow_frontend_forms, raise_error) end continue end @@ -311,7 +332,8 @@ function verify_ir(ir::IRCode, print::Bool=true, for validate_idx in firstidx:(lastphi-1) validate_stmt = ir[SSAValue(validate_idx)][:stmt] isa(validate_stmt, PhiNode) && continue - check_op(ir, domtree, validate_stmt, bb, idx, idx, print, false, 0, allow_frontend_forms) + check_op(ir, domtree, validate_stmt, bb, idx, idx, print, false, 0, + allow_frontend_forms, raise_error) end is_phinode_block = false end @@ -321,21 +343,21 @@ function verify_ir(ir::IRCode, print::Bool=true, val = stmt.values[i] if !isa(val, SSAValue) @verify_error "Operand $i of PhiC node $idx must be an SSA Value." - error("") + raise_error() end if !isa(ir[val][:stmt], UpsilonNode) @verify_error "Operand $i of PhiC node $idx must reference an Upsilon node." - error("") + raise_error() end end elseif isterminator(stmt) if idx != last(ir.cfg.blocks[bb].stmts) @verify_error "Terminator $idx in bb $bb is not the last statement in the block" - error("") + raise_error() end if !isa(stmt, ReturnNode) && ir[SSAValue(idx)][:type] !== Any @verify_error "Explicit terminators (other than ReturnNode) must have `Any` type" - error("") + raise_error() end else isforeigncall = false @@ -343,7 +365,7 @@ function verify_ir(ir::IRCode, print::Bool=true, if stmt.head === :(=) if stmt.args[1] isa SSAValue @verify_error "SSAValue as assignment LHS" - error("") + raise_error() end if stmt.args[2] isa GlobalRef # undefined GlobalRef as assignment RHS is OK @@ -352,7 +374,7 @@ function verify_ir(ir::IRCode, print::Bool=true, elseif stmt.head === :isdefined if length(stmt.args) > 2 || (length(stmt.args) == 2 && !isa(stmt.args[2], Bool)) @verify_error "malformed isdefined" - error("") + raise_error() end if stmt.args[1] isa GlobalRef # undefined GlobalRef is OK in isdefined @@ -380,12 +402,12 @@ function verify_ir(ir::IRCode, print::Bool=true, arg = stmt.args[i] if !isa(arg, Union{Nothing, SSAValue}) @verify_error "Malformed :leave - Expected `Nothing` or SSAValue" - error() + raise_error() elseif isa(arg, SSAValue) enter_stmt = ir[arg::SSAValue][:stmt] if !isa(enter_stmt, Nothing) && !isa(enter_stmt, EnterNode) @verify_error "Malformed :leave - argument ssavalue should point to `nothing` or :enter" - error() + raise_error() end end end @@ -394,7 +416,8 @@ function verify_ir(ir::IRCode, print::Bool=true, n = 1 for op in userefs(stmt) op = op[] - check_op(ir, domtree, op, bb, idx, idx, print, isforeigncall, n, allow_frontend_forms) + check_op(ir, domtree, op, bb, idx, idx, print, isforeigncall, n, + allow_frontend_forms, raise_error) n += 1 end end diff --git a/test/compiler/ssair.jl b/test/compiler/ssair.jl index b7d75d0be5567..39ec60a429677 100644 --- a/test/compiler/ssair.jl +++ b/test/compiler/ssair.jl @@ -172,7 +172,7 @@ let code = Any[ ] ir = make_ircode(code; verify=false) ir = Core.Compiler.compact!(ir, true) - @test_throws ErrorException Core.Compiler.verify_ir(ir, false) + @test_throws ["IR verification failed.", "Code location: "] Core.Compiler.verify_ir(ir, false) end # Issue #29107 From ded3e3dcd7d0cbe2dd27d28d0ed886ac28234aec Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mos=C3=A8=20Giordano?= <765740+giordano@users.noreply.github.com> Date: Wed, 6 Nov 2024 14:15:41 +0000 Subject: [PATCH 380/537] [GHA] Explicitly install Julia for whitespace workflow (#56468) So far we relied on the fact that Julia comes in the default Ubuntu images on GitHub Actions runners, but this may change in the future (although there's apparently no plan in this direction for the time being). To make the workflow more future-proof, we now explicitly install Julia using a dedicated workflow. --- .github/workflows/Whitespace.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.github/workflows/Whitespace.yml b/.github/workflows/Whitespace.yml index 5706f6148dc33..37c9dbfd39a3c 100644 --- a/.github/workflows/Whitespace.yml +++ b/.github/workflows/Whitespace.yml @@ -18,6 +18,9 @@ jobs: uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 with: persist-credentials: false + - uses: julia-actions/setup-julia@9b79636afcfb07ab02c256cede01fe2db6ba808c # v2.6.0 + with: + version: '1' - name: Check whitespace run: | contrib/check-whitespace.jl From bce3d4d1a1cc2da737eb18dc1efe24256d6fe4cb Mon Sep 17 00:00:00 2001 From: Jameson Nash Date: Wed, 6 Nov 2024 10:29:01 -0500 Subject: [PATCH 381/537] Allow taking Matrix slices without an extra allocation (#56236) Since changing Array to use Memory as the backing, we had the option of making non-Vector arrays more flexible, but had instead preserved the restriction that they must be zero offset and equal in length to the Memory. This results in extra complexity, restrictions, and allocations however, but doesn't gain many known benefits. Nanosoldier shows a decrease in performance on linear eachindex loops, which we theorize is due to a minor failure to CSE before SCEV or a lack of NUW/NSW on the length multiplication calculation. --- base/Base.jl | 8 ++++---- base/abstractarray.jl | 2 +- base/array.jl | 4 ---- base/essentials.jl | 3 ++- base/reshapedarray.jl | 32 +++++++++++++++++--------------- src/codegen.cpp | 15 +-------------- src/genericmemory.c | 35 +---------------------------------- src/julia.h | 6 +----- src/staticdata.c | 24 +++++++++--------------- 9 files changed, 36 insertions(+), 93 deletions(-) diff --git a/base/Base.jl b/base/Base.jl index 3b56dca166cee..874cec56329d1 100644 --- a/base/Base.jl +++ b/base/Base.jl @@ -354,14 +354,14 @@ include("set.jl") include("char.jl") function array_new_memory(mem::Memory{UInt8}, newlen::Int) # add an optimization to array_new_memory for StringVector - if (@assume_effects :total @ccall jl_genericmemory_owner(mem::Any,)::Any) isa String + if (@assume_effects :total @ccall jl_genericmemory_owner(mem::Any,)::Any) === mem + # TODO: when implemented, this should use a memory growing call + return typeof(mem)(undef, newlen) + else # If data is in a String, keep it that way. # When implemented, this could use jl_gc_expand_string(oldstr, newlen) as an optimization str = _string_n(newlen) return (@assume_effects :total !:consistent @ccall jl_string_to_genericmemory(str::Any,)::Memory{UInt8}) - else - # TODO: when implemented, this should use a memory growing call - return typeof(mem)(undef, newlen) end end include("strings/basic.jl") diff --git a/base/abstractarray.jl b/base/abstractarray.jl index cbbae8e852b2e..5413f4e177518 100644 --- a/base/abstractarray.jl +++ b/base/abstractarray.jl @@ -1584,7 +1584,7 @@ their component parts. A typical definition for an array that wraps a parent is `Base.dataids(C::CustomArray) = dataids(C.parent)`. """ dataids(A::AbstractArray) = (UInt(objectid(A)),) -dataids(A::Memory) = (B = ccall(:jl_genericmemory_owner, Any, (Any,), A); (UInt(pointer(B isa typeof(A) ? B : A)),)) +dataids(A::Memory) = (UInt(A.ptr),) dataids(A::Array) = dataids(A.ref.mem) dataids(::AbstractRange) = () dataids(x) = () diff --git a/base/array.jl b/base/array.jl index 40907b2b00317..7a9649f20dded 100644 --- a/base/array.jl +++ b/base/array.jl @@ -3141,10 +3141,6 @@ function _wrap(ref::MemoryRef{T}, dims::NTuple{N, Int}) where {T, N} mem_len = length(mem) + 1 - memoryrefoffset(ref) len = Core.checked_dims(dims...) @boundscheck mem_len >= len || invalid_wrap_err(mem_len, dims, len) - if N != 1 && !(ref === GenericMemoryRef(mem) && len === mem_len) - mem = ccall(:jl_genericmemory_slice, Memory{T}, (Any, Ptr{Cvoid}, Int), mem, ref.ptr_or_offset, len) - ref = memoryref(mem) - end return ref end diff --git a/base/essentials.jl b/base/essentials.jl index 750ee0f9c434c..64fbaea95d4e7 100644 --- a/base/essentials.jl +++ b/base/essentials.jl @@ -9,7 +9,8 @@ const Bottom = Union{} # Define minimal array interface here to help code used in macros: length(a::Array{T, 0}) where {T} = 1 length(a::Array{T, 1}) where {T} = getfield(a, :size)[1] -length(a::Array) = getfield(getfield(getfield(a, :ref), :mem), :length) +length(a::Array{T, 2}) where {T} = (sz = getfield(a, :size); sz[1] * sz[2]) +# other sizes are handled by generic prod definition for AbstractArray length(a::GenericMemory) = getfield(a, :length) throw_boundserror(A, I) = (@noinline; throw(BoundsError(A, I))) diff --git a/base/reshapedarray.jl b/base/reshapedarray.jl index 019f1d30a25c2..07f608588837b 100644 --- a/base/reshapedarray.jl +++ b/base/reshapedarray.jl @@ -35,31 +35,33 @@ end length(R::ReshapedArrayIterator) = length(R.iter) eltype(::Type{<:ReshapedArrayIterator{I}}) where {I} = @isdefined(I) ? ReshapedIndex{eltype(I)} : Any -## reshape(::Array, ::Dims) returns an Array, except for isbitsunion eltypes (issue #28611) +@noinline throw_dmrsa(dims, len) = + throw(DimensionMismatch("new dimensions $(dims) must be consistent with array length $len")) + +## reshape(::Array, ::Dims) returns a new Array (to avoid conditionally aliasing the structure, only the data) # reshaping to same # of dimensions @eval function reshape(a::Array{T,M}, dims::NTuple{N,Int}) where {T,N,M} - throw_dmrsa(dims, len) = - throw(DimensionMismatch("new dimensions $(dims) must be consistent with array length $len")) len = Core.checked_dims(dims...) # make sure prod(dims) doesn't overflow (and because of the comparison to length(a)) if len != length(a) throw_dmrsa(dims, length(a)) end - isbitsunion(T) && return ReshapedArray(a, dims, ()) - if N == M && dims == size(a) - return a - end ref = a.ref - if M == 1 && N !== 1 - mem = ref.mem::Memory{T} - if !(ref === memoryref(mem) && len === mem.length) - mem = ccall(:jl_genericmemory_slice, Memory{T}, (Any, Ptr{Cvoid}, Int), mem, ref.ptr_or_offset, len) - ref = memoryref(mem)::typeof(ref) - end - end - # or we could use `a = Array{T,N}(undef, ntuple(0, Val(N))); a.ref = ref; a.size = dims; return a` here + # or we could use `a = Array{T,N}(undef, ntuple(i->0, Val(N))); a.ref = ref; a.size = dims; return a` here to avoid the eval return $(Expr(:new, :(Array{T,N}), :ref, :dims)) end +## reshape!(::Array, ::Dims) returns the original array, but must have the same dimensions and length as the original +# see also resize! for a similar operation that can change the length +function reshape!(a::Array{T,N}, dims::NTuple{N,Int}) where {T,N} + len = Core.checked_dims(dims...) # make sure prod(dims) doesn't overflow (and because of the comparison to length(a)) + if len != length(a) + throw_dmrsa(dims, length(a)) + end + setfield!(a, :dims, dims) + return a +end + + """ reshape(A, dims...) -> AbstractArray diff --git a/src/codegen.cpp b/src/codegen.cpp index e2cccafd42e5f..e2bc8fe6e43d1 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -1435,18 +1435,6 @@ static const auto jl_allocgenericmemory = new JuliaFunction{ - XSTR(jl_array_data_owner), - [](LLVMContext &C) { - auto T_prjlvalue = JuliaType::get_prjlvalue_ty(C); - return FunctionType::get(T_prjlvalue, - {T_prjlvalue}, false); - }, - [](LLVMContext &C) { return AttributeList::get(C, - Attributes(C, {Attribute::ReadOnly, Attribute::NoUnwind}), - Attributes(C, {Attribute::NonNull}), - None); }, -}; #define BOX_FUNC(ct,at,attrs,nbytes) \ static const auto box_##ct##_func = new JuliaFunction<>{ \ XSTR(jl_box_##ct), \ @@ -4341,8 +4329,7 @@ static bool emit_f_opmemory(jl_codectx_t &ctx, jl_cgval_t *ret, jl_value_t *f, } Value *data_owner = NULL; // owner object against which the write barrier must check if (isboxed || layout->first_ptr >= 0) { // if elements are just bits, don't need a write barrier - Value *V = emit_memoryref_FCA(ctx, ref, layout); - data_owner = emit_genericmemoryowner(ctx, CreateSimplifiedExtractValue(ctx, V, 1)); + data_owner = emit_memoryref_mem(ctx, ref, layout); } *ret = typed_store(ctx, ptr, diff --git a/src/genericmemory.c b/src/genericmemory.c index c310eb829e198..0bf2cf46edaae 100644 --- a/src/genericmemory.c +++ b/src/genericmemory.c @@ -197,7 +197,7 @@ JL_DLLEXPORT jl_value_t *jl_genericmemory_to_string(jl_genericmemory_t *m, size_ if (how != 0) { jl_value_t *o = jl_genericmemory_data_owner_field(m); jl_genericmemory_data_owner_field(m) = NULL; - if (how == 3 && jl_is_string(o) && + if (how == 3 && // implies jl_is_string(o) ((mlength + sizeof(void*) + 1 <= GC_MAX_SZCLASS) == (len + sizeof(void*) + 1 <= GC_MAX_SZCLASS))) { if (jl_string_data(o)[len] != '\0') jl_string_data(o)[len] = '\0'; @@ -221,39 +221,6 @@ JL_DLLEXPORT jl_genericmemory_t *jl_alloc_memory_any(size_t n) return jl_alloc_genericmemory(jl_memory_any_type, n); } -JL_DLLEXPORT jl_genericmemory_t *jl_genericmemory_slice(jl_genericmemory_t *mem, void *data, size_t len) -{ - // Given a GenericMemoryRef represented as `jl_genericmemory_ref ref = {data, mem}`, - // return a new GenericMemory that only accesses the slice from the given GenericMemoryRef to - // the given length if this is possible to return. This allows us to make - // `length(Array)==length(Array.ref.mem)`, for simplification of this. - jl_datatype_t *dt = (jl_datatype_t*)jl_typetagof(mem); - const jl_datatype_layout_t *layout = dt->layout; - // repeated checks here ensure the values cannot overflow, since we know mem->length is a reasonable value - if (len > mem->length) - jl_exceptionf(jl_argumenterror_type, "invalid GenericMemory slice"); // TODO: make a BoundsError - if (layout->flags.arrayelem_isunion) { - if (!((size_t)data == 0 && mem->length == len)) - jl_exceptionf(jl_argumenterror_type, "invalid GenericMemory slice"); // only exact slices are supported - data = mem->ptr; - } - else if (layout->size == 0) { - if ((size_t)data > mem->length || (size_t)data + len > mem->length) - jl_exceptionf(jl_argumenterror_type, "invalid GenericMemory slice"); // TODO: make a BoundsError - data = mem->ptr; - } - else { - if (data < mem->ptr || (char*)data > (char*)mem->ptr + mem->length * layout->size || (char*)data + len * layout->size > (char*)mem->ptr + mem->length * layout->size) - jl_exceptionf(jl_argumenterror_type, "invalid GenericMemory slice"); // TODO: make a BoundsError - } - jl_task_t *ct = jl_current_task; - jl_genericmemory_t *newmem = (jl_genericmemory_t*)jl_gc_alloc(ct->ptls, sizeof(jl_genericmemory_t) + sizeof(void*), dt); - newmem->length = len; - newmem->ptr = data; - jl_genericmemory_data_owner_field(newmem) = jl_genericmemory_owner(mem); - return newmem; -} - JL_DLLEXPORT void jl_genericmemory_copyto(jl_genericmemory_t *dest, char* destdata, jl_genericmemory_t *src, char* srcdata, size_t n) JL_NOTSAFEPOINT diff --git a/src/julia.h b/src/julia.h index 5b9986a5e68ee..301650540a15c 100644 --- a/src/julia.h +++ b/src/julia.h @@ -1233,7 +1233,7 @@ STATIC_INLINE jl_value_t *jl_svecset( 0 = data is inlined 1 = owns the gc-managed data, exclusively (will free it) 2 = malloc-allocated pointer (does not own it) - 3 = has a pointer to the object that owns the data pointer + 3 = has a pointer to the String object that owns the data pointer (m must be isbits) */ STATIC_INLINE int jl_genericmemory_how(jl_genericmemory_t *m) JL_NOTSAFEPOINT { @@ -1249,8 +1249,6 @@ STATIC_INLINE int jl_genericmemory_how(jl_genericmemory_t *m) JL_NOTSAFEPOINT STATIC_INLINE jl_value_t *jl_genericmemory_owner(jl_genericmemory_t *m JL_PROPAGATES_ROOT) JL_NOTSAFEPOINT { - if (jl_genericmemory_how(m) == 3) - return jl_genericmemory_data_owner_field(m); return (jl_value_t*)m; } @@ -1280,8 +1278,6 @@ STATIC_INLINE jl_value_t *jl_genericmemory_ptr_set( assert(i < m_->length); jl_atomic_store_release(((_Atomic(jl_value_t*)*)(m_->ptr)) + i, (jl_value_t*)x); if (x) { - if (jl_genericmemory_how(m_) == 3) - m = (void*)jl_genericmemory_data_owner_field(m_); jl_gc_wb(m, x); } return (jl_value_t*)x; diff --git a/src/staticdata.c b/src/staticdata.c index 0d609db03aebc..decc0ce6570aa 100644 --- a/src/staticdata.c +++ b/src/staticdata.c @@ -932,7 +932,7 @@ static void jl_insert_into_serialization_queue(jl_serializer_state *s, jl_value_ jl_genericmemory_t *m = (jl_genericmemory_t*)v; const char *data = (const char*)m->ptr; if (jl_genericmemory_how(m) == 3) { - jl_queue_for_serialization_(s, jl_genericmemory_data_owner_field(v), 1, immediate); + assert(jl_is_string(jl_genericmemory_data_owner_field(m))); } else if (layout->flags.arrayelem_isboxed) { size_t i, l = m->length; @@ -1472,17 +1472,7 @@ static void jl_write_values(jl_serializer_state *s) JL_GC_DISABLED jl_genericmemory_t *m = (jl_genericmemory_t*)v; const jl_datatype_layout_t *layout = t->layout; size_t len = m->length; - if (jl_genericmemory_how(m) == 3 && jl_is_genericmemory(jl_genericmemory_data_owner_field(m))) { - jl_genericmemory_t *owner = (jl_genericmemory_t*)jl_genericmemory_data_owner_field(m); - size_t data = ((char*)m->ptr - (char*)owner->ptr); // relocation offset (bytes) - write_uint(f, len); - write_uint(f, data); - write_pointerfield(s, (jl_value_t*)owner); - // similar to record_memoryref, but the field is always an (offset) pointer - arraylist_push(&s->memowner_list, (void*)(reloc_offset + offsetof(jl_genericmemory_t, ptr))); // relocation location - arraylist_push(&s->memowner_list, NULL); // relocation target (ignored) - } - // else if (jl_genericmemory_how(m) == 3) { + // if (jl_genericmemory_how(m) == 3) { // jl_value_t *owner = jl_genericmemory_data_owner_field(m); // write_uint(f, len); // write_pointerfield(s, owner); @@ -1491,7 +1481,8 @@ static void jl_write_values(jl_serializer_state *s) JL_GC_DISABLED // assert(new_mem->ptr == NULL); // new_mem->ptr = (void*)((char*)m->ptr - (char*)owner); // relocation offset // } - else { + // else + { size_t datasize = len * layout->size; size_t tot = datasize; int isbitsunion = layout->flags.arrayelem_isunion; @@ -1538,10 +1529,13 @@ static void jl_write_values(jl_serializer_state *s) JL_GC_DISABLED ios_write(s->const_data, (char*)m->ptr, tot); } } - if (len == 0) // TODO: should we have a zero-page, instead of writing each type's fragment separately? + if (len == 0) { // TODO: should we have a zero-page, instead of writing each type's fragment separately? write_padding(s->const_data, layout->size ? layout->size : isbitsunion); - else if (jl_genericmemory_how(m) == 3 && jl_is_string(jl_genericmemory_data_owner_field(m))) + } + else if (jl_genericmemory_how(m) == 3) { + assert(jl_is_string(jl_genericmemory_data_owner_field(m))); write_padding(s->const_data, 1); + } } else { // Pointer eltypes are encoded in the mutable data section From efc43cbd49c519ce5731c64905849e15b2570d87 Mon Sep 17 00:00:00 2001 From: Diogo Netto <61364108+d-netto@users.noreply.github.com> Date: Wed, 6 Nov 2024 12:53:27 -0300 Subject: [PATCH 382/537] [late-gc-lowering] null-out GC frame slots for dead objects (#52935) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Should fix https://github.com/JuliaLang/julia/issues/51818. MWE: ```julia function testme() X = @noinline rand(1_000_000_00) Y = @noinline sum(X) X = nothing GC.gc() return Y end ``` Note that it now stores a `NULL` in the GC frame before calling `jl_gc_collect`. Before: ```llvm ; Function Signature: testme() ; @ /Users/dnetto/Personal/test.jl:3 within `testme` define double @julia_testme_535() #0 { top: %gcframe1 = alloca [3 x ptr], align 16 call void @llvm.memset.p0.i64(ptr align 16 %gcframe1, i8 0, i64 24, i1 true) %pgcstack = call ptr inttoptr (i64 6595051180 to ptr)(i64 262) #10 store i64 4, ptr %gcframe1, align 16 %task.gcstack = load ptr, ptr %pgcstack, align 8 %frame.prev = getelementptr inbounds ptr, ptr %gcframe1, i64 1 store ptr %task.gcstack, ptr %frame.prev, align 8 store ptr %gcframe1, ptr %pgcstack, align 8 ; @ /Users/dnetto/Personal/test.jl:4 within `testme` %0 = call nonnull ptr @j_rand_539(i64 signext 100000000) %gc_slot_addr_0 = getelementptr inbounds ptr, ptr %gcframe1, i64 2 store ptr %0, ptr %gc_slot_addr_0, align 16 ; @ /Users/dnetto/Personal/test.jl:5 within `testme` %1 = call double @j_sum_541(ptr nonnull %0) ; @ /Users/dnetto/Personal/test.jl:7 within `testme` ; ┌ @ gcutils.jl:132 within `gc` @ gcutils.jl:132 call void @jlplt_ijl_gc_collect_543_got.jit(i32 1) %frame.prev4 = load ptr, ptr %frame.prev, align 8 store ptr %frame.prev4, ptr %pgcstack, align 8 ; └ ; @ /Users/dnetto/Personal/test.jl:8 within `testme` ret double %1 } ``` After: ```llvm ; Function Signature: testme() ; @ /Users/dnetto/Personal/test.jl:3 within `testme` define double @julia_testme_752() #0 { top: %gcframe1 = alloca [3 x ptr], align 16 call void @llvm.memset.p0.i64(ptr align 16 %gcframe1, i8 0, i64 24, i1 true) %pgcstack = call ptr inttoptr (i64 6595051180 to ptr)(i64 262) #10 store i64 4, ptr %gcframe1, align 16 %task.gcstack = load ptr, ptr %pgcstack, align 8 %frame.prev = getelementptr inbounds ptr, ptr %gcframe1, i64 1 store ptr %task.gcstack, ptr %frame.prev, align 8 store ptr %gcframe1, ptr %pgcstack, align 8 ; @ /Users/dnetto/Personal/test.jl:4 within `testme` %0 = call nonnull ptr @j_rand_756(i64 signext 100000000) %gc_slot_addr_0 = getelementptr inbounds ptr, ptr %gcframe1, i64 2 store ptr %0, ptr %gc_slot_addr_0, align 16 ; @ /Users/dnetto/Personal/test.jl:5 within `testme` %1 = call double @j_sum_758(ptr nonnull %0) store ptr null, ptr %gc_slot_addr_0, align 16 ; @ /Users/dnetto/Personal/test.jl:7 within `testme` ; ┌ @ gcutils.jl:132 within `gc` @ gcutils.jl:132 call void @jlplt_ijl_gc_collect_760_got.jit(i32 1) %frame.prev6 = load ptr, ptr %frame.prev, align 8 store ptr %frame.prev6, ptr %pgcstack, align 8 ; └ ; @ /Users/dnetto/Personal/test.jl:8 within `testme` ret double %1 } ``` --- src/llvm-gc-interface-passes.h | 7 +++--- src/llvm-late-gc-lowering.cpp | 36 +++++++++++++++++++++++++------ test/compiler/codegen.jl | 22 +++++++++++++++++++ test/llvmpasses/returnstwicegc.ll | 9 ++++++-- 4 files changed, 62 insertions(+), 12 deletions(-) diff --git a/src/llvm-gc-interface-passes.h b/src/llvm-gc-interface-passes.h index d33567e887118..278987858eab7 100644 --- a/src/llvm-gc-interface-passes.h +++ b/src/llvm-gc-interface-passes.h @@ -353,10 +353,11 @@ struct LateLowerGCFrame: private JuliaPassContext { State LocalScan(Function &F); void ComputeLiveness(State &S); void ComputeLiveSets(State &S); - SmallVector ColorRoots(const State &S); + std::pair, int> ColorRoots(const State &S); void PlaceGCFrameStore(State &S, unsigned R, unsigned MinColorRoot, ArrayRef Colors, Value *GCFrame, Instruction *InsertBefore); - void PlaceGCFrameStores(State &S, unsigned MinColorRoot, ArrayRef Colors, Value *GCFrame); - void PlaceRootsAndUpdateCalls(SmallVectorImpl &Colors, State &S, std::map>); + void PlaceGCFrameStores(State &S, unsigned MinColorRoot, ArrayRef Colors, int PreAssignedColors, Value *GCFrame); + void PlaceGCFrameReset(State &S, unsigned R, unsigned MinColorRoot, ArrayRef Colors, Value *GCFrame, Instruction *InsertBefore); + void PlaceRootsAndUpdateCalls(ArrayRef Colors, int PreAssignedColors, State &S, std::map>); void CleanupWriteBarriers(Function &F, State *S, const SmallVector &WriteBarriers, bool *CFGModified); bool CleanupIR(Function &F, State *S, bool *CFGModified); void NoteUseChain(State &S, BBState &BBS, User *TheUser); diff --git a/src/llvm-late-gc-lowering.cpp b/src/llvm-late-gc-lowering.cpp index 1d390a5115207..3e372ec9884e7 100644 --- a/src/llvm-late-gc-lowering.cpp +++ b/src/llvm-late-gc-lowering.cpp @@ -1820,7 +1820,7 @@ JL_USED_FUNC static void dumpColorAssignments(const State &S, const ArrayRef LateLowerGCFrame::ColorRoots(const State &S) { +std::pair, int> LateLowerGCFrame::ColorRoots(const State &S) { SmallVector Colors; Colors.resize(S.MaxPtrNumber + 1, -1); PEOIterator Ordering(S.Neighbors); @@ -1862,7 +1862,7 @@ SmallVector LateLowerGCFrame::ColorRoots(const State &S) { NewColor += PreAssignedColors; Colors[ActiveElement] = NewColor; } - return Colors; + return {Colors, PreAssignedColors}; } // Size of T is assumed to be `sizeof(void*)` @@ -2292,8 +2292,21 @@ void LateLowerGCFrame::PlaceGCFrameStore(State &S, unsigned R, unsigned MinColor new StoreInst(Val, slotAddress, InsertBefore); } +void LateLowerGCFrame::PlaceGCFrameReset(State &S, unsigned R, unsigned MinColorRoot, + ArrayRef Colors, Value *GCFrame, + Instruction *InsertBefore) { + // Get the slot address. + auto slotAddress = CallInst::Create( + getOrDeclare(jl_intrinsics::getGCFrameSlot), + {GCFrame, ConstantInt::get(Type::getInt32Ty(InsertBefore->getContext()), Colors[R] + MinColorRoot)}, + "gc_slot_addr_" + StringRef(std::to_string(Colors[R] + MinColorRoot)), InsertBefore); + // Reset the slot to NULL. + Value *Val = ConstantPointerNull::get(T_prjlvalue); + new StoreInst(Val, slotAddress, InsertBefore); +} + void LateLowerGCFrame::PlaceGCFrameStores(State &S, unsigned MinColorRoot, - ArrayRef Colors, Value *GCFrame) + ArrayRef Colors, int PreAssignedColors, Value *GCFrame) { for (auto &BB : *S.F) { const BBState &BBS = S.BBStates[&BB]; @@ -2306,6 +2319,15 @@ void LateLowerGCFrame::PlaceGCFrameStores(State &S, unsigned MinColorRoot, for(auto rit = BBS.Safepoints.rbegin(); rit != BBS.Safepoints.rend(); ++rit ) { const LargeSparseBitVector &NowLive = S.LiveSets[*rit]; + // reset slots which are no longer alive + for (int Idx : *LastLive) { + if (Idx >= PreAssignedColors && !HasBitSet(NowLive, Idx)) { + PlaceGCFrameReset(S, Idx, MinColorRoot, Colors, GCFrame, + S.ReverseSafepointNumbering[*rit]); + } + } + // store values which are alive in this safepoint but + // haven't been stored in the GC frame before for (int Idx : NowLive) { if (!HasBitSet(*LastLive, Idx)) { PlaceGCFrameStore(S, Idx, MinColorRoot, Colors, GCFrame, @@ -2317,7 +2339,7 @@ void LateLowerGCFrame::PlaceGCFrameStores(State &S, unsigned MinColorRoot, } } -void LateLowerGCFrame::PlaceRootsAndUpdateCalls(SmallVectorImpl &Colors, State &S, +void LateLowerGCFrame::PlaceRootsAndUpdateCalls(ArrayRef Colors, int PreAssignedColors, State &S, std::map>) { auto F = S.F; auto T_int32 = Type::getInt32Ty(F->getContext()); @@ -2439,7 +2461,7 @@ void LateLowerGCFrame::PlaceRootsAndUpdateCalls(SmallVectorImpl &Colors, St pushGcframe->setArgOperand(1, NRoots); // Insert GC frame stores - PlaceGCFrameStores(S, AllocaSlot - 2, Colors, gcframe); + PlaceGCFrameStores(S, AllocaSlot - 2, Colors, PreAssignedColors, gcframe); // Insert GCFrame pops for (auto &BB : *F) { if (isa(BB.getTerminator())) { @@ -2464,9 +2486,9 @@ bool LateLowerGCFrame::runOnFunction(Function &F, bool *CFGModified) { State S = LocalScan(F); ComputeLiveness(S); - SmallVector Colors = ColorRoots(S); + auto Colors = ColorRoots(S); std::map> CallFrames; // = OptimizeCallFrames(S, Ordering); - PlaceRootsAndUpdateCalls(Colors, S, CallFrames); + PlaceRootsAndUpdateCalls(Colors.first, Colors.second, S, CallFrames); CleanupIR(F, &S, CFGModified); return true; } diff --git a/test/compiler/codegen.jl b/test/compiler/codegen.jl index ae04250964554..fcb3beb87b5a5 100644 --- a/test/compiler/codegen.jl +++ b/test/compiler/codegen.jl @@ -1003,3 +1003,25 @@ end @test f55768(Vector) @test f55768(Vector{T} where T) @test !f55768(Vector{S} where S) + +# test that values get rooted correctly over throw +for a in ((@noinline Ref{Int}(2)), + (@noinline Ref{Int}(3)), + 5, + (@noinline Ref{Int}(4)), + 6) + @test a[] != 0 + try + b = (@noinline Ref{Int}(5), + @noinline Ref{Int}(6), + @noinline Ref{Int}(7), + @noinline Ref{Int}(8), + @noinline Ref{Int}(9), + @noinline Ref{Int}(10), + @noinline Ref{Int}(11)) + GC.gc(true) + GC.@preserve b throw(a) + catch ex + @test ex === a + end +end diff --git a/test/llvmpasses/returnstwicegc.ll b/test/llvmpasses/returnstwicegc.ll index 511cbb505519b..eb1c6444129c3 100644 --- a/test/llvmpasses/returnstwicegc.ll +++ b/test/llvmpasses/returnstwicegc.ll @@ -1,6 +1,6 @@ ; This file is a part of Julia. License is MIT: https://julialang.org/license -; RUN: opt --load-pass-plugin=libjulia-codegen%shlibext -passes='function(LateLowerGCFrame,FinalLowerGC)' -S %s | FileCheck %s --check-prefixes=OPAQUE +; RUN: opt --load-pass-plugin=libjulia-codegen%shlibext -passes='function(LateLowerGCFrame,FinalLowerGC)' -S %s | FileCheck %s declare void @boxed_simple({} addrspace(10)*, {} addrspace(10)*) @@ -13,7 +13,12 @@ declare void @one_arg_boxed({} addrspace(10)*) define void @try_catch(i64 %a, i64 %b) { ; Because of the returns_twice function, we need to keep aboxed live everywhere -; OPAQUE: %gcframe = alloca ptr addrspace(10), i32 4 +; CHECK: %gcframe = alloca ptr addrspace(10), i32 4 +; CHECK: store ptr addrspace(10) %aboxed, ptr [[slot_0:%.*]], +; CHECK-NOT: store {{.*}} ptr [[slot_0]] +; CHECK: store ptr addrspace(10) %bboxed, ptr {{%.*}} +; CHECK-NOT: store {{.*}} ptr [[slot_0]] + top: %sigframe = alloca [208 x i8], align 16 %sigframe.sub = getelementptr inbounds [208 x i8], [208 x i8]* %sigframe, i64 0, i64 0 From d90c2e2426f7c02ad7abb696e378a33be32bcc91 Mon Sep 17 00:00:00 2001 From: Priynsh <119518987+Priynsh@users.noreply.github.com> Date: Wed, 6 Nov 2024 21:53:45 +0530 Subject: [PATCH 383/537] Added test for resolving array references in exprresolve (#56471) added test to take care of non-real-index handling while resolving array references in exprresolve to test julia/base/cartesian.jl - line 427 to 432 --- test/cartesian.jl | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/test/cartesian.jl b/test/cartesian.jl index 9643da72642ec..7064b54ebbb8d 100644 --- a/test/cartesian.jl +++ b/test/cartesian.jl @@ -1,12 +1,20 @@ # This file is a part of Julia. License is MIT: https://julialang.org/license -@test Base.Cartesian.exprresolve(:(1 + 3)) == 4 + ex = Base.Cartesian.exprresolve(:(if 5 > 4; :x; else :y; end)) @test ex.args[2] == QuoteNode(:x) @test Base.Cartesian.lreplace!("val_col", Base.Cartesian.LReplace{String}(:col, "col", 1)) == "val_1" @test Base.setindex(CartesianIndex(1,5,4),3,2) == CartesianIndex(1, 3, 4) - +@testset "Expression Resolve" begin + @test Base.Cartesian.exprresolve(:(1 + 3)) == 4 + ex1 = Expr(:ref, [1, 2, 3], 2) + result1 = Base.Cartesian.exprresolve(ex1) + @test result1 == 2 + ex2 = Expr(:ref, [1, 2, 3], "non-real-index") + result2 = Base.Cartesian.exprresolve(ex2) + @test result2 == ex2 +end @testset "CartesianIndices constructions" begin @testset "AbstractUnitRange" begin for oinds in [ From 37f0220e21a579bc3725157260e15c637c700bc9 Mon Sep 17 00:00:00 2001 From: Lilith Orion Hafner Date: Wed, 6 Nov 2024 15:00:59 -0600 Subject: [PATCH 384/537] Fix and test searchsorted for arrays whose first index is `typemin(Int)` (#56474) This fixes the issue reported in https://github.com/JuliaLang/julia/issues/56457#issuecomment-2457223264 which, combined with #56464 which fixed the issue in the OP, fixes #56457. `searchsortedfirst` was fine all along, but I added it to tests regardless. --- base/sort.jl | 4 ++-- test/sorting.jl | 16 ++++++++++++++++ 2 files changed, 18 insertions(+), 2 deletions(-) diff --git a/base/sort.jl b/base/sort.jl index 2251d0b965228..6991f12551ab4 100644 --- a/base/sort.jl +++ b/base/sort.jl @@ -206,7 +206,7 @@ function searchsortedlast(v::AbstractVector, x, lo::T, hi::T, o::Ordering)::keyt u = T(1) lo = lo - u hi = hi + u - @inbounds while lo < hi - u + @inbounds while lo != hi - u m = midpoint(lo, hi) if lt(o, x, v[m]) hi = m @@ -224,7 +224,7 @@ function searchsorted(v::AbstractVector, x, ilo::T, ihi::T, o::Ordering)::UnitRa u = T(1) lo = ilo - u hi = ihi + u - @inbounds while lo < hi - u + @inbounds while lo != hi - u m = midpoint(lo, hi) if lt(o, v[m], x) lo = m diff --git a/test/sorting.jl b/test/sorting.jl index 8cbdb94f02b16..93e0cdd7de5ba 100644 --- a/test/sorting.jl +++ b/test/sorting.jl @@ -585,6 +585,22 @@ end # Issue #56457 o2 = OffsetArray([2,2,3], typemax(Int)-3); @test searchsorted(o2, 2) == firstindex(o2):firstindex(o2)+1 + + struct IdentityVector <: AbstractVector{Int} + lo::Int + hi::Int + end + function Base.getindex(s::IdentityVector, i::Int) + s.lo <= i <= s.hi || throw(BoundsError(s, i)) + i + end + Base.axes(s::IdentityVector) = (s.lo:s.hi,) + Base.size(s::IdentityVector) = length.(axes(s)) + + o3 = IdentityVector(typemin(Int), typemin(Int)+5) + @test searchsortedfirst(o3, typemin(Int)+2) === typemin(Int)+2 + @test searchsortedlast(o3, typemin(Int)+2) === typemin(Int)+2 + @test searchsorted(o3, typemin(Int)+2) === typemin(Int)+2:typemin(Int)+2 end function adaptive_sort_test(v; trusted=InsertionSort, kw...) From dbf2c4b93dd273544c932665a602d4bf7fcf5c6a Mon Sep 17 00:00:00 2001 From: Keno Fischer Date: Sat, 12 Oct 2024 05:37:55 +0000 Subject: [PATCH 385/537] Move Core.Compiler into Base This is the first step in what I am hoping will eventually result in making the compiler itself and upgradable stdlib. Over time, we've gained several non-Base consumers of `Core.Compiler`, and we've reached a bit of a breaking point where maintaining those downstream dependencies is getting more difficult than the close coupling of Core.Compiler to the runtime is worth. In this first step, I am moving Core.Compiler into Base, ending the duplication of common data structure and generic functions between Core.Compiler and Base. This split goes back quite far (although not all the way) to the early days of Julia and predates the world-age mechanism. The extant Base and Core.Compiler environments have some differences (other than the duplication). I think the primary ones are (but I will add more here if somebody points one out). - `Core.Compiler` does not use `getproperty` - `Core.Compiler` does not have extensible `==` equality In this, I decided to retain the former by setting `getproperty = getfield` for Core.Compiler itself (though of course not for the datatstructures shared with Base). I don't think it's strictly necessary, but might as well. For equality, I decided the easiest thing to do would be to try to merge the equalities and see what happens. In general, Core.Compiler is relatively restricted in the kinds of equality comparisons it can make, so I think it'll work out fine, but we can revisit this. This seems to be fully working and most of this is just moving code around. I think most of that refactoring is independently useful, so I'll pull some of it out into separate PRs to make this PR more manageable. --- base/Base.jl | 230 +-------------- base/Base_compiler.jl | 266 ++++++++++++++++++ base/array.jl | 29 +- base/bool.jl | 2 + base/client.jl | 2 +- base/compiler/abstractinterpretation.jl | 2 +- base/compiler/bootstrap.jl | 4 +- base/compiler/compiler.jl | 144 +++------- base/compiler/effects.jl | 8 +- .../ssair/EscapeAnalysis/EscapeAnalysis.jl | 9 +- base/compiler/ssair/ir.jl | 1 + base/compiler/ssair/show.jl | 16 +- base/compiler/ssair/tarjan.jl | 2 +- base/compiler/typeinfer.jl | 24 +- base/compilerimg.jl | 4 + base/docs/Docs.jl | 7 +- base/docs/basedocs.jl | 2 +- base/docs/core.jl | 18 +- base/error.jl | 1 + base/errorshow.jl | 2 +- base/essentials.jl | 6 +- base/float.jl | 2 - base/iterators.jl | 16 +- base/meta.jl | 2 +- base/opaque_closure.jl | 4 +- base/promotion.jl | 10 +- base/reduce.jl | 2 + base/reflection.jl | 4 +- base/show.jl | 44 ++- base/stacktraces.jl | 4 +- base/sysimg.jl | 8 +- base/tuple.jl | 4 +- src/jltypes.c | 18 +- stdlib/InteractiveUtils/src/codeview.jl | 18 +- stdlib/REPL/src/REPLCompletions.jl | 2 +- sysimage.mk | 10 +- test/ambiguous.jl | 7 +- test/backtrace.jl | 2 +- test/compiler/EscapeAnalysis/EAUtils.jl | 4 - test/compiler/effects.jl | 2 +- test/docs.jl | 2 +- test/misc.jl | 2 +- 42 files changed, 445 insertions(+), 501 deletions(-) create mode 100644 base/Base_compiler.jl create mode 100644 base/compilerimg.jl diff --git a/base/Base.jl b/base/Base.jl index 874cec56329d1..57b5142604d21 100644 --- a/base/Base.jl +++ b/base/Base.jl @@ -1,186 +1,13 @@ # This file is a part of Julia. License is MIT: https://julialang.org/license -baremodule Base - -using Core.Intrinsics, Core.IR - -# to start, we're going to use a very simple definition of `include` -# that doesn't require any function (except what we can get from the `Core` top-module) -# start this big so that we don't have to resize before we have defined how to grow an array -const _included_files = Array{Tuple{Module,String},1}(Core.undef, 400) -setfield!(_included_files, :size, (1,)) -function include(mod::Module, path::String) - len = getfield(_included_files.size, 1) - memlen = _included_files.ref.mem.length - lenp1 = Core.add_int(len, 1) - if len === memlen # by the time this is true we hopefully will have defined _growend! - _growend!(_included_files, UInt(1)) - else - setfield!(_included_files, :size, (lenp1,)) - end - Core.memoryrefset!(Core.memoryref(_included_files.ref, lenp1), (mod, ccall(:jl_prepend_cwd, Any, (Any,), path)), :not_atomic, true) - Core.println(path) - ccall(:jl_uv_flush, Nothing, (Ptr{Nothing},), Core.io_pointer(Core.stdout)) - Core.include(mod, path) -end -include(path::String) = include(Base, path) - -struct IncludeInto <: Function - m::Module -end -(this::IncludeInto)(fname::AbstractString) = include(this.m, fname) - -# from now on, this is now a top-module for resolving syntax -const is_primary_base_module = ccall(:jl_module_parent, Ref{Module}, (Any,), Base) === Core.Main -ccall(:jl_set_istopmod, Cvoid, (Any, Bool), Base, is_primary_base_module) - -# The @inline/@noinline macros that can be applied to a function declaration are not available -# until after array.jl, and so we will mark them within a function body instead. -macro inline() Expr(:meta, :inline) end -macro noinline() Expr(:meta, :noinline) end - -macro _boundscheck() Expr(:boundscheck) end - -# Try to help prevent users from shooting them-selves in the foot -# with ambiguities by defining a few common and critical operations -# (and these don't need the extra convert code) -getproperty(x::Module, f::Symbol) = (@inline; getglobal(x, f)) -getproperty(x::Type, f::Symbol) = (@inline; getfield(x, f)) -setproperty!(x::Type, f::Symbol, v) = error("setfield! fields of Types should not be changed") -setproperty!(x::Array, f::Symbol, v) = error("setfield! fields of Array should not be changed") -getproperty(x::Tuple, f::Int) = (@inline; getfield(x, f)) -setproperty!(x::Tuple, f::Int, v) = setfield!(x, f, v) # to get a decent error - -getproperty(x, f::Symbol) = (@inline; getfield(x, f)) -function setproperty!(x, f::Symbol, v) - ty = fieldtype(typeof(x), f) - val = v isa ty ? v : convert(ty, v) - return setfield!(x, f, val) -end - -typeof(function getproperty end).name.constprop_heuristic = Core.FORCE_CONST_PROP -typeof(function setproperty! end).name.constprop_heuristic = Core.FORCE_CONST_PROP - -dotgetproperty(x, f) = getproperty(x, f) - -getproperty(x::Module, f::Symbol, order::Symbol) = (@inline; getglobal(x, f, order)) -function setproperty!(x::Module, f::Symbol, v, order::Symbol=:monotonic) - @inline - ty = Core.get_binding_type(x, f) - val = v isa ty ? v : convert(ty, v) - return setglobal!(x, f, val, order) -end -getproperty(x::Type, f::Symbol, order::Symbol) = (@inline; getfield(x, f, order)) -setproperty!(x::Type, f::Symbol, v, order::Symbol) = error("setfield! fields of Types should not be changed") -getproperty(x::Tuple, f::Int, order::Symbol) = (@inline; getfield(x, f, order)) -setproperty!(x::Tuple, f::Int, v, order::Symbol) = setfield!(x, f, v, order) # to get a decent error - -getproperty(x, f::Symbol, order::Symbol) = (@inline; getfield(x, f, order)) -function setproperty!(x, f::Symbol, v, order::Symbol) - @inline - ty = fieldtype(typeof(x), f) - val = v isa ty ? v : convert(ty, v) - return setfield!(x, f, val, order) -end - -function swapproperty!(x, f::Symbol, v, order::Symbol=:not_atomic) - @inline - ty = fieldtype(typeof(x), f) - val = v isa ty ? v : convert(ty, v) - return Core.swapfield!(x, f, val, order) -end -function modifyproperty!(x, f::Symbol, op, v, order::Symbol=:not_atomic) - @inline - return Core.modifyfield!(x, f, op, v, order) -end -function replaceproperty!(x, f::Symbol, expected, desired, success_order::Symbol=:not_atomic, fail_order::Symbol=success_order) - @inline - ty = fieldtype(typeof(x), f) - val = desired isa ty ? desired : convert(ty, desired) - return Core.replacefield!(x, f, expected, val, success_order, fail_order) -end -function setpropertyonce!(x, f::Symbol, desired, success_order::Symbol=:not_atomic, fail_order::Symbol=success_order) - @inline - ty = fieldtype(typeof(x), f) - val = desired isa ty ? desired : convert(ty, desired) - return Core.setfieldonce!(x, f, val, success_order, fail_order) -end - -function swapproperty!(x::Module, f::Symbol, v, order::Symbol=:not_atomic) - @inline - ty = Core.get_binding_type(x, f) - val = v isa ty ? v : convert(ty, v) - return Core.swapglobal!(x, f, val, order) -end -function modifyproperty!(x::Module, f::Symbol, op, v, order::Symbol=:not_atomic) - @inline - return Core.modifyglobal!(x, f, op, v, order) -end -function replaceproperty!(x::Module, f::Symbol, expected, desired, success_order::Symbol=:not_atomic, fail_order::Symbol=success_order) - @inline - ty = Core.get_binding_type(x, f) - val = desired isa ty ? desired : convert(ty, desired) - return Core.replaceglobal!(x, f, expected, val, success_order, fail_order) +had_compiler = isdefined(Base, :Compiler) +if had_compiler; else +include("Base_compiler.jl") end -function setpropertyonce!(x::Module, f::Symbol, desired, success_order::Symbol=:not_atomic, fail_order::Symbol=success_order) - @inline - ty = Core.get_binding_type(x, f) - val = desired isa ty ? desired : convert(ty, desired) - return Core.setglobalonce!(x, f, val, success_order, fail_order) -end - -convert(::Type{Any}, Core.@nospecialize x) = x -convert(::Type{T}, x::T) where {T} = x -include("coreio.jl") - -eval(x) = Core.eval(Base, x) -eval(m::Module, x) = Core.eval(m, x) - -# init core docsystem -import Core: @doc, @__doc__, WrappedException, @int128_str, @uint128_str, @big_str, @cmd -if isdefined(Core, :Compiler) - import Core.Compiler.CoreDocs - Core.atdoc!(CoreDocs.docm) -end +const start_base_include = time_ns() -include("exports.jl") -include("public.jl") - -if false - # simple print definitions for debugging. enable these if something - # goes wrong during bootstrap before printing code is available. - # otherwise, they just just eventually get (noisily) overwritten later - global show, print, println - show(io::IO, x) = Core.show(io, x) - print(io::IO, a...) = Core.print(io, a...) - println(io::IO, x...) = Core.println(io, x...) -end - -""" - time_ns() -> UInt64 - -Get the time in nanoseconds relative to some arbitrary time in the past. The primary use is for measuring the elapsed time -between two moments in time. -""" -time_ns() = ccall(:jl_hrtime, UInt64, ()) - -start_base_include = time_ns() - -# A warning to be interpolated in the docstring of every dangerous mutating function in Base, see PR #50824 -const _DOCS_ALIASING_WARNING = """ -!!! warning - Behavior can be unexpected when any mutated argument shares memory with any other argument. -""" - -## Load essential files and libraries -include("essentials.jl") -include("ctypes.jl") -include("gcutils.jl") -include("generator.jl") -include("runtime_internals.jl") include("reflection.jl") -include("options.jl") # define invoke(f, T, args...; kwargs...), without kwargs wrapping # to forward to invoke @@ -234,33 +61,6 @@ end # The REPL stdlib hooks into Base using this Ref const REPL_MODULE_REF = Ref{Module}(Base) -include("checked.jl") -using .Checked -function cld end -function fld end - -# Lazy strings -include("strings/lazy.jl") - -# array structures -include("indices.jl") -include("genericmemory.jl") -include("array.jl") -include("abstractarray.jl") -include("subarray.jl") -include("views.jl") -include("baseext.jl") - -include("c.jl") -include("ntuple.jl") -include("abstractdict.jl") -include("iddict.jl") -include("idset.jl") -include("iterators.jl") -using .Iterators: zip, enumerate, only -using .Iterators: Flatten, Filter, product # for generators -using .Iterators: Stateful # compat (was formerly used in reinterpretarray.jl) -include("namedtuple.jl") # For OS specific stuff # We need to strcat things here, before strings are really defined @@ -332,13 +132,6 @@ include("reduce.jl") ## core structures include("reshapedarray.jl") include("reinterpretarray.jl") -include("bitarray.jl") -include("bitset.jl") - -if !isdefined(Core, :Compiler) - include("docs/core.jl") - Core.atdoc!(CoreDocs.docm) -end include("multimedia.jl") using .Multimedia @@ -347,7 +140,6 @@ using .Multimedia include("some.jl") include("dict.jl") -include("abstractset.jl") include("set.jl") # Strings @@ -485,10 +277,6 @@ include("accumulate.jl") include("permuteddimsarray.jl") using .PermutedDimsArrays -# basic data structures -include("ordering.jl") -using .Order - # Combinatorics include("sort.jl") using .Sort @@ -566,9 +354,8 @@ include("docs/basedocs.jl") # Documentation -- should always be included last in sysimg. include("docs/Docs.jl") using .Docs -if isdefined(Core, :Compiler) && is_primary_base_module - Docs.loaddocs(Core.Compiler.CoreDocs.DOCS) -end +Docs.loaddocs(CoreDocs.DOCS) +@eval CoreDocs DOCS = DocLinkedList() include("precompilation.jl") @@ -591,6 +378,9 @@ a_method_to_overwrite_in_test() = inferencebarrier(1) (this::IncludeInto)(fname::AbstractString) = include(identity, this.m, fname) (this::IncludeInto)(mapexpr::Function, fname::AbstractString) = include(mapexpr, this.m, fname) +# Compatibility with when Compiler was in Core +@eval Core const Compiler = Main.Base.Compiler + # External libraries vendored into Base Core.println("JuliaSyntax/src/JuliaSyntax.jl") include(@__MODULE__, string(BUILDROOT, "JuliaSyntax/src/JuliaSyntax.jl")) # include($BUILDROOT/base/JuliaSyntax/JuliaSyntax.jl) @@ -687,4 +477,4 @@ end @assert !isassigned(_included_files, 1) _included_files[1] = (parentmodule(Base), abspath(@__FILE__)) -end # baremodule Base +had_compiler && ccall(:jl_init_restored_module, Cvoid, (Any,), Base) diff --git a/base/Base_compiler.jl b/base/Base_compiler.jl new file mode 100644 index 0000000000000..3578b8f070db3 --- /dev/null +++ b/base/Base_compiler.jl @@ -0,0 +1,266 @@ +# This file is a part of Julia. License is MIT: https://julialang.org/license + +using Core.Intrinsics, Core.IR + +# to start, we're going to use a very simple definition of `include` +# that doesn't require any function (except what we can get from the `Core` top-module) +# start this big so that we don't have to resize before we have defined how to grow an array +const _included_files = Array{Tuple{Module,String},1}(Core.undef, 400) +setfield!(_included_files, :size, (1,)) +function include(mod::Module, path::String) + len = getfield(_included_files.size, 1) + memlen = _included_files.ref.mem.length + lenp1 = Core.add_int(len, 1) + if len === memlen # by the time this is true we hopefully will have defined _growend! + _growend!(_included_files, UInt(1)) + else + setfield!(_included_files, :size, (lenp1,)) + end + Core.memoryrefset!(Core.memoryref(_included_files.ref, lenp1), (mod, ccall(:jl_prepend_cwd, Any, (Any,), path)), :not_atomic, true) + Core.println(path) + ccall(:jl_uv_flush, Nothing, (Ptr{Nothing},), Core.io_pointer(Core.stdout)) + Core.include(mod, path) +end +include(path::String) = include(Base, path) + +struct IncludeInto <: Function + m::Module +end +(this::IncludeInto)(fname::AbstractString) = include(this.m, fname) + +# from now on, this is now a top-module for resolving syntax +const is_primary_base_module = ccall(:jl_module_parent, Ref{Module}, (Any,), Base) === Core.Main +ccall(:jl_set_istopmod, Cvoid, (Any, Bool), Base, is_primary_base_module) + +# The @inline/@noinline macros that can be applied to a function declaration are not available +# until after array.jl, and so we will mark them within a function body instead. +macro inline() Expr(:meta, :inline) end +macro noinline() Expr(:meta, :noinline) end + +macro _boundscheck() Expr(:boundscheck) end + +# Try to help prevent users from shooting them-selves in the foot +# with ambiguities by defining a few common and critical operations +# (and these don't need the extra convert code) +getproperty(x::Module, f::Symbol) = (@inline; getglobal(x, f)) +getproperty(x::Type, f::Symbol) = (@inline; getfield(x, f)) +setproperty!(x::Type, f::Symbol, v) = error("setfield! fields of Types should not be changed") +setproperty!(x::Array, f::Symbol, v) = error("setfield! fields of Array should not be changed") +getproperty(x::Tuple, f::Int) = (@inline; getfield(x, f)) +setproperty!(x::Tuple, f::Int, v) = setfield!(x, f, v) # to get a decent error + +getproperty(x, f::Symbol) = (@inline; getfield(x, f)) +function setproperty!(x, f::Symbol, v) + ty = fieldtype(typeof(x), f) + val = v isa ty ? v : convert(ty, v) + return setfield!(x, f, val) +end + +typeof(function getproperty end).name.constprop_heuristic = Core.FORCE_CONST_PROP +typeof(function setproperty! end).name.constprop_heuristic = Core.FORCE_CONST_PROP + +dotgetproperty(x, f) = getproperty(x, f) + +getproperty(x::Module, f::Symbol, order::Symbol) = (@inline; getglobal(x, f, order)) +function setproperty!(x::Module, f::Symbol, v, order::Symbol=:monotonic) + @inline + ty = Core.get_binding_type(x, f) + val = v isa ty ? v : convert(ty, v) + return setglobal!(x, f, val, order) +end +getproperty(x::Type, f::Symbol, order::Symbol) = (@inline; getfield(x, f, order)) +setproperty!(x::Type, f::Symbol, v, order::Symbol) = error("setfield! fields of Types should not be changed") +getproperty(x::Tuple, f::Int, order::Symbol) = (@inline; getfield(x, f, order)) +setproperty!(x::Tuple, f::Int, v, order::Symbol) = setfield!(x, f, v, order) # to get a decent error + +getproperty(x, f::Symbol, order::Symbol) = (@inline; getfield(x, f, order)) +function setproperty!(x, f::Symbol, v, order::Symbol) + @inline + ty = fieldtype(typeof(x), f) + val = v isa ty ? v : convert(ty, v) + return setfield!(x, f, val, order) +end + +function swapproperty!(x, f::Symbol, v, order::Symbol=:not_atomic) + @inline + ty = fieldtype(typeof(x), f) + val = v isa ty ? v : convert(ty, v) + return Core.swapfield!(x, f, val, order) +end +function modifyproperty!(x, f::Symbol, op, v, order::Symbol=:not_atomic) + @inline + return Core.modifyfield!(x, f, op, v, order) +end +function replaceproperty!(x, f::Symbol, expected, desired, success_order::Symbol=:not_atomic, fail_order::Symbol=success_order) + @inline + ty = fieldtype(typeof(x), f) + val = desired isa ty ? desired : convert(ty, desired) + return Core.replacefield!(x, f, expected, val, success_order, fail_order) +end +function setpropertyonce!(x, f::Symbol, desired, success_order::Symbol=:not_atomic, fail_order::Symbol=success_order) + @inline + ty = fieldtype(typeof(x), f) + val = desired isa ty ? desired : convert(ty, desired) + return Core.setfieldonce!(x, f, val, success_order, fail_order) +end + +function swapproperty!(x::Module, f::Symbol, v, order::Symbol=:not_atomic) + @inline + ty = Core.get_binding_type(x, f) + val = v isa ty ? v : convert(ty, v) + return Core.swapglobal!(x, f, val, order) +end +function modifyproperty!(x::Module, f::Symbol, op, v, order::Symbol=:not_atomic) + @inline + return Core.modifyglobal!(x, f, op, v, order) +end +function replaceproperty!(x::Module, f::Symbol, expected, desired, success_order::Symbol=:not_atomic, fail_order::Symbol=success_order) + @inline + ty = Core.get_binding_type(x, f) + val = desired isa ty ? desired : convert(ty, desired) + return Core.replaceglobal!(x, f, expected, val, success_order, fail_order) +end +function setpropertyonce!(x::Module, f::Symbol, desired, success_order::Symbol=:not_atomic, fail_order::Symbol=success_order) + @inline + ty = Core.get_binding_type(x, f) + val = desired isa ty ? desired : convert(ty, desired) + return Core.setglobalonce!(x, f, val, success_order, fail_order) +end + + +convert(::Type{Any}, Core.@nospecialize x) = x +convert(::Type{T}, x::T) where {T} = x +include("coreio.jl") + +import Core: @doc, @__doc__, WrappedException, @int128_str, @uint128_str, @big_str, @cmd + +# core docsystem +include("docs/core.jl") +Core.atdoc!(CoreDocs.docm) + +eval(x) = Core.eval(Base, x) +eval(m::Module, x) = Core.eval(m, x) + +include("exports.jl") +include("public.jl") + +if false + # simple print definitions for debugging. enable these if something + # goes wrong during bootstrap before printing code is available. + # otherwise, they just just eventually get (noisily) overwritten later + global show, print, println + show(io::IO, x) = Core.show(io, x) + print(io::IO, a...) = Core.print(io, a...) + println(io::IO, x...) = Core.println(io, x...) +end + +""" + time_ns() -> UInt64 + +Get the time in nanoseconds relative to some arbitrary time in the past. The primary use is for measuring the elapsed time +between two moments in time. +""" +time_ns() = ccall(:jl_hrtime, UInt64, ()) + +# A warning to be interpolated in the docstring of every dangerous mutating function in Base, see PR #50824 +const _DOCS_ALIASING_WARNING = """ +!!! warning + Behavior can be unexpected when any mutated argument shares memory with any other argument. +""" + +## Load essential files and libraries +include("essentials.jl") +include("ctypes.jl") +include("gcutils.jl") +include("generator.jl") +include("runtime_internals.jl") +include("options.jl") + +# define invoke(f, T, args...; kwargs...), without kwargs wrapping +# to forward to invoke +function Core.kwcall(kwargs::NamedTuple, ::typeof(invoke), f, T, args...) + @inline + # prepend kwargs and f to the invoked from the user + T = rewrap_unionall(Tuple{Core.Typeof(kwargs), Core.Typeof(f), (unwrap_unionall(T)::DataType).parameters...}, T) + return invoke(Core.kwcall, T, kwargs, f, args...) +end +# invoke does not have its own call cache, but kwcall for invoke does +setfield!(typeof(invoke).name.mt, :max_args, 3, :monotonic) # invoke, f, T, args... + +# define applicable(f, T, args...; kwargs...), without kwargs wrapping +# to forward to applicable +function Core.kwcall(kwargs::NamedTuple, ::typeof(applicable), @nospecialize(args...)) + @inline + return applicable(Core.kwcall, kwargs, args...) +end +function Core._hasmethod(@nospecialize(f), @nospecialize(t)) # this function has a special tfunc (TODO: make this a Builtin instead like applicable) + tt = rewrap_unionall(Tuple{Core.Typeof(f), (unwrap_unionall(t)::DataType).parameters...}, t) + return Core._hasmethod(tt) +end + + +# core operations & types +include("promotion.jl") +include("tuple.jl") +include("expr.jl") +include("pair.jl") +include("traits.jl") +include("range.jl") +include("error.jl") + +# core numeric operations & types +==(x, y) = x === y +include("bool.jl") +include("number.jl") +include("int.jl") +include("operators.jl") +include("pointer.jl") +include("refvalue.jl") +include("cmem.jl") + +include("checked.jl") +using .Checked +function cld end +function fld end + +# Lazy strings +include("strings/lazy.jl") + +# array structures +include("indices.jl") +include("genericmemory.jl") +include("array.jl") +include("abstractarray.jl") +include("subarray.jl") +include("views.jl") +include("baseext.jl") + +include("c.jl") +include("ntuple.jl") +include("abstractset.jl") +include("bitarray.jl") +include("bitset.jl") +include("abstractdict.jl") +include("iddict.jl") +include("idset.jl") +include("iterators.jl") +using .Iterators: zip, enumerate, only +using .Iterators: Flatten, Filter, product # for generators +using .Iterators: Stateful # compat (was formerly used in reinterpretarray.jl) +include("namedtuple.jl") + +include("ordering.jl") +using .Order + +include("compiler/compiler.jl") + +const _return_type = Compiler.return_type + +# Enable compiler +Core.eval(Compiler, quote +include("compiler/bootstrap.jl") +ccall(:jl_set_typeinf_func, Cvoid, (Any,), typeinf_ext_toplevel) + +include("compiler/parsing.jl") +Core._setparser!(fl_parse) +end) diff --git a/base/array.jl b/base/array.jl index 7a9649f20dded..68d0f13d3893a 100644 --- a/base/array.jl +++ b/base/array.jl @@ -769,28 +769,15 @@ end # gets inlined into the caller before recursion detection # gets a chance to see it, so that recursive calls to the caller # don't trigger the inference limiter -if isdefined(Core, :Compiler) - macro default_eltype(itr) - I = esc(itr) - return quote - if $I isa Generator && ($I).f isa Type - T = ($I).f - else - T = Core.Compiler.return_type(_iterator_upper_bound, Tuple{typeof($I)}) - end - promote_typejoin_union(T) - end - end -else - macro default_eltype(itr) - I = esc(itr) - return quote - if $I isa Generator && ($I).f isa Type - promote_typejoin_union($I.f) - else - Any - end +macro default_eltype(itr) + I = esc(itr) + return quote + if $I isa Generator && ($I).f isa Type + T = ($I).f + else + T = Base._return_type(_iterator_upper_bound, Tuple{typeof($I)}) end + promote_typejoin_union(T) end end diff --git a/base/bool.jl b/base/bool.jl index d7dcf76caa91b..3a5c36b09ae2c 100644 --- a/base/bool.jl +++ b/base/bool.jl @@ -184,3 +184,5 @@ end div(x::Bool, y::Bool) = y ? x : throw(DivideError()) rem(x::Bool, y::Bool) = y ? false : throw(DivideError()) mod(x::Bool, y::Bool) = rem(x,y) + +Bool(x::Real) = x==0 ? false : x==1 ? true : throw(InexactError(:Bool, Bool, x)) diff --git a/base/client.jl b/base/client.jl index a04556507d5dc..e95d518d3e501 100644 --- a/base/client.jl +++ b/base/client.jl @@ -557,7 +557,7 @@ function _start() try repl_was_requested = exec_options(JLOptions()) if should_use_main_entrypoint() && !is_interactive - if Core.Compiler.generating_output() + if Base.generating_output() precompile(Main.main, (typeof(ARGS),)) else ret = invokelatest(Main.main, ARGS) diff --git a/base/compiler/abstractinterpretation.jl b/base/compiler/abstractinterpretation.jl index dbfe3bb9ccac4..f3ffd6495ce50 100644 --- a/base/compiler/abstractinterpretation.jl +++ b/base/compiler/abstractinterpretation.jl @@ -2047,7 +2047,7 @@ function abstract_call_builtin(interp::AbstractInterpreter, f::Builtin, (; fargs elsetype = rt === Const(true) ? Bottom : widenslotwrapper(aty) return Conditional(a, thentype, elsetype) end - elseif f === Core.Compiler.not_int + elseif f === Core.Intrinsics.not_int aty = argtypes[2] if isa(aty, Conditional) thentype = rt === Const(false) ? Bottom : aty.elsetype diff --git a/base/compiler/bootstrap.jl b/base/compiler/bootstrap.jl index 12c83df74fe50..3162bccbdb4b9 100644 --- a/base/compiler/bootstrap.jl +++ b/base/compiler/bootstrap.jl @@ -6,7 +6,7 @@ # since we won't be able to specialize & infer them at runtime let time() = ccall(:jl_clock_now, Float64, ()) - + println("Compiling the compiler. This may take several minutes ...") interp = NativeInterpreter() # analyze_escapes_tt = Tuple{typeof(analyze_escapes), IRCode, Int, TODO} @@ -48,5 +48,5 @@ let time() = ccall(:jl_clock_now, Float64, ()) end end endtime = time() - println("Core.Compiler ──── ", sub_float(endtime,starttime), " seconds") + println("Base.Compiler ──── ", sub_float(endtime,starttime), " seconds") end diff --git a/base/compiler/compiler.jl b/base/compiler/compiler.jl index 7d1dba88c9011..f4b7b73f1bf76 100644 --- a/base/compiler/compiler.jl +++ b/base/compiler/compiler.jl @@ -1,13 +1,38 @@ # This file is a part of Julia. License is MIT: https://julialang.org/license -getfield(Core, :eval)(Core, :(baremodule Compiler + +baremodule Compiler using Core.Intrinsics, Core.IR -import Core: print, println, show, write, unsafe_write, stdout, stderr, +import Core: print, println, show, write, unsafe_write, _apply_iterate, svec, apply_type, Builtin, IntrinsicFunction, MethodInstance, CodeInstance, MethodTable, MethodMatch, PartialOpaque, - TypeofVararg + TypeofVararg, Core, SimpleVector, donotdelete, compilerbarrier, + memoryref_isassigned, memoryrefnew, memoryrefoffset, memoryrefget, + memoryrefset!, typename + +using ..Base +using ..Base: Ordering, vect, EffectsOverride, BitVector, @_gc_preserve_begin, @_gc_preserve_end, RefValue, + @nospecializeinfer, @_foldable_meta, fieldindex, is_function_def, indexed_iterate, isexpr, methods, + get_world_counter, JLOptions, _methods_by_ftype, unwrap_unionall, cconvert, unsafe_convert, + issingletontype, isType, rewrap_unionall, has_free_typevars, isvarargtype, hasgenerator, + IteratorSize, SizeUnknown, _array_for, Bottom, generating_output, diff_names, + ismutationfree, NUM_EFFECTS_OVERRIDES, _NAMEDTUPLE_NAME, datatype_fieldtypes, + argument_datatype, isfieldatomic, unwrapva, iskindtype, _bits_findnext, copy_exprargs, + Generator, Filter, ismutabletypename, isvatuple, datatype_fieldcount, + isconcretedispatch, isdispatchelem, min_world, max_world, datatype_layoutsize, + datatype_arrayelem, unionlen, isidentityfree, _uniontypes, uniontypes, OneTo, Callable, + DataTypeFieldDesc, datatype_nfields, datatype_pointerfree, midpoint, is_valid_intrinsic_elptr, + allocatedinline, isbitsunion, widen_diagonal, unconstrain_vararg_length, + rename_unionall, may_invoke_generator, is_meta_expr_head, is_meta_expr, quoted, + specialize_method, hasintersect, is_nospecializeinfer, is_nospecialized, + get_nospecializeinfer_sig, tls_world_age, uniontype_layout, kwerr, + moduleroot, is_file_tracked, decode_effects_override +using ..Base.Order +import ..Base: getindex, setindex!, length, iterate, push!, isempty, first, convert, ==, + copy, popfirst!, in, haskey, resize!, copy!, append!, last, get!, size, + get, iterate, findall const getproperty = Core.getfield const setproperty! = Core.setfield! @@ -21,117 +46,19 @@ ccall(:jl_set_istopmod, Cvoid, (Any, Bool), Compiler, false) eval(x) = Core.eval(Compiler, x) eval(m, x) = Core.eval(m, x) -include(x) = Core.include(Compiler, x) -include(mod, x) = Core.include(mod, x) - -# The @inline/@noinline macros that can be applied to a function declaration are not available -# until after array.jl, and so we will mark them within a function body instead. -macro inline() Expr(:meta, :inline) end -macro noinline() Expr(:meta, :noinline) end +include(x) = Base.include(Compiler, x) +include(mod, x) = Base.include(mod, x) macro _boundscheck() Expr(:boundscheck) end -convert(::Type{Any}, Core.@nospecialize x) = x -convert(::Type{T}, x::T) where {T} = x - # These types are used by reflection.jl and expr.jl too, so declare them here. # Note that `@assume_effects` is available only after loading namedtuple.jl. abstract type MethodTableView end abstract type AbstractInterpreter end -# essential files and libraries -include("essentials.jl") -include("ctypes.jl") -include("generator.jl") -include("runtime_internals.jl") -include("options.jl") - -ntuple(f, ::Val{0}) = () -ntuple(f, ::Val{1}) = (@inline; (f(1),)) -ntuple(f, ::Val{2}) = (@inline; (f(1), f(2))) -ntuple(f, ::Val{3}) = (@inline; (f(1), f(2), f(3))) -ntuple(f, ::Val{n}) where {n} = ntuple(f, n::Int) -ntuple(f, n) = (Any[f(i) for i = 1:n]...,) - -# core operations & types function return_type end # promotion.jl expects this to exist is_return_type(Core.@nospecialize(f)) = f === return_type -include("promotion.jl") -include("tuple.jl") -include("pair.jl") -include("traits.jl") -include("range.jl") -include("expr.jl") -include("error.jl") - -# core numeric operations & types -==(x::T, y::T) where {T} = x === y -include("bool.jl") -include("number.jl") -include("int.jl") -include("operators.jl") -include("pointer.jl") -include("refvalue.jl") - -# the same constructor as defined in float.jl, but with a different name to avoid redefinition -_Bool(x::Real) = x==0 ? false : x==1 ? true : throw(InexactError(:Bool, Bool, x)) -# fld(x,y) == div(x,y) - ((x>=0) != (y>=0) && rem(x,y) != 0 ? 1 : 0) -fld(x::T, y::T) where {T<:Unsigned} = div(x, y) -function fld(x::T, y::T) where T<:Integer - d = div(x, y) - return d - (signbit(x ⊻ y) & (d * y != x)) -end -# cld(x,y) = div(x,y) + ((x>0) == (y>0) && rem(x,y) != 0 ? 1 : 0) -function cld(x::T, y::T) where T<:Unsigned - d = div(x, y) - return d + (d * y != x) -end -function cld(x::T, y::T) where T<:Integer - d = div(x, y) - return d + (((x > 0) == (y > 0)) & (d * y != x)) -end -# checked arithmetic -const checked_add = + -const checked_sub = - -const SignedInt = Union{Int8,Int16,Int32,Int64,Int128} -const UnsignedInt = Union{UInt8,UInt16,UInt32,UInt64,UInt128} -sub_with_overflow(x::T, y::T) where {T<:SignedInt} = checked_ssub_int(x, y) -sub_with_overflow(x::T, y::T) where {T<:UnsignedInt} = checked_usub_int(x, y) -sub_with_overflow(x::Bool, y::Bool) = (x-y, false) -add_with_overflow(x::T, y::T) where {T<:SignedInt} = checked_sadd_int(x, y) -add_with_overflow(x::T, y::T) where {T<:UnsignedInt} = checked_uadd_int(x, y) -add_with_overflow(x::Bool, y::Bool) = (x+y, false) - -include("cmem.jl") -include("strings/lazy.jl") - -# core array operations -include("indices.jl") -include("genericmemory.jl") -include("array.jl") -include("abstractarray.jl") - -# core structures -include("bitarray.jl") -include("bitset.jl") -include("abstractdict.jl") -include("iddict.jl") -include("idset.jl") -include("abstractset.jl") -include("iterators.jl") -using .Iterators: zip, enumerate -using .Iterators: Flatten, Filter, product # for generators -include("namedtuple.jl") - -# core docsystem -include("docs/core.jl") -import Core.Compiler.CoreDocs -Core.atdoc!(CoreDocs.docm) - -# sorting -include("ordering.jl") -using .Order include("compiler/sort.jl") # We don't include some.jl, but this definition is still useful. @@ -144,7 +71,7 @@ something(x::Any, y...) = x baremodule BuildSettings using Core: ARGS, include -using Core.Compiler: >, getindex, length +using ..Compiler: >, getindex, length global MAX_METHODS::Int = 3 @@ -193,11 +120,4 @@ include("compiler/abstractinterpretation.jl") include("compiler/typeinfer.jl") include("compiler/optimize.jl") -include("compiler/bootstrap.jl") -ccall(:jl_set_typeinf_func, Cvoid, (Any,), typeinf_ext_toplevel) - -include("compiler/parsing.jl") -Core._setparser!(fl_parse) - -end # baremodule Compiler -)) +end diff --git a/base/compiler/effects.jl b/base/compiler/effects.jl index a2e7e3dde603d..e521166fd61fa 100644 --- a/base/compiler/effects.jl +++ b/base/compiler/effects.jl @@ -351,13 +351,13 @@ function decode_effects(e::UInt32) return Effects( UInt8((e >> 0) & 0x07), UInt8((e >> 3) & 0x03), - _Bool((e >> 5) & 0x01), - _Bool((e >> 6) & 0x01), - _Bool((e >> 7) & 0x01), + Bool((e >> 5) & 0x01), + Bool((e >> 6) & 0x01), + Bool((e >> 7) & 0x01), UInt8((e >> 8) & 0x03), UInt8((e >> 10) & 0x03), UInt8((e >> 12) & 0x03), - _Bool((e >> 14) & 0x01)) + Bool((e >> 14) & 0x01)) end decode_statement_effects_override(ssaflag::UInt32) = diff --git a/base/compiler/ssair/EscapeAnalysis/EscapeAnalysis.jl b/base/compiler/ssair/EscapeAnalysis/EscapeAnalysis.jl index 887a21ef7e0f6..e8de2e40c4880 100644 --- a/base/compiler/ssair/EscapeAnalysis/EscapeAnalysis.jl +++ b/base/compiler/ssair/EscapeAnalysis/EscapeAnalysis.jl @@ -22,15 +22,16 @@ using ._TOP_MOD: # Base definitions @nospecialize, @specialize, BitSet, Callable, Csize_t, IdDict, IdSet, UnitRange, Vector, copy, delete!, empty!, enumerate, error, first, get, get!, haskey, in, isassigned, isempty, ismutabletype, keys, last, length, max, min, missing, pop!, push!, pushfirst!, - unwrap_unionall, !, !=, !==, &, *, +, -, :, <, <<, =>, >, |, ∈, ∉, ∩, ∪, ≠, ≤, ≥, ⊆ -using Core.Compiler: # Core.Compiler specific definitions + unwrap_unionall, !, !=, !==, &, *, +, -, :, <, <<, =>, >, |, ∈, ∉, ∩, ∪, ≠, ≤, ≥, ⊆, + hasintersect +using ..Compiler: # Core.Compiler specific definitions AbstractLattice, Bottom, IRCode, IR_FLAG_NOTHROW, InferenceResult, SimpleInferenceLattice, argextype, fieldcount_noerror, hasintersect, has_flag, intrinsic_nothrow, is_meta_expr_head, is_identity_free_argtype, isexpr, println, setfield!_nothrow, - singleton_type, try_compute_field, try_compute_fieldidx, widenconst, ⊑ + singleton_type, try_compute_field, try_compute_fieldidx, widenconst, ⊑, Compiler include(x) = _TOP_MOD.include(@__MODULE__, x) -if _TOP_MOD === Core.Compiler +if _TOP_MOD === Compiler include("compiler/ssair/EscapeAnalysis/disjoint_set.jl") else include("disjoint_set.jl") diff --git a/base/compiler/ssair/ir.jl b/base/compiler/ssair/ir.jl index 41423a03cc276..1efa10f2437ad 100644 --- a/base/compiler/ssair/ir.jl +++ b/base/compiler/ssair/ir.jl @@ -272,6 +272,7 @@ function InstructionStream(len::Int) end InstructionStream() = InstructionStream(0) length(is::InstructionStream) = length(is.stmt) +iterate(is::Compiler.InstructionStream, st::Int=1) = (st <= Compiler.length(is)) ? (is[st], st + 1) : nothing isempty(is::InstructionStream) = isempty(is.stmt) function add_new_idx!(is::InstructionStream) ninst = length(is) + 1 diff --git a/base/compiler/ssair/show.jl b/base/compiler/ssair/show.jl index f3e11445d6c6c..2ad14c5c5b565 100644 --- a/base/compiler/ssair/show.jl +++ b/base/compiler/ssair/show.jl @@ -14,8 +14,6 @@ end import Base: show_unquoted using Base: printstyled, with_output_color, prec_decl, @invoke -using Core.Compiler: VarState, InvalidIRError, argextype, widenconst, singleton_type, - sptypes_from_meth_instance, EMPTY_SPTYPES function Base.show(io::IO, cfg::CFG) print(io, "CFG with $(length(cfg.blocks)) blocks:") @@ -982,7 +980,7 @@ function show_ir(io::IO, ir::IRCode, config::IRShowConfig=default_config(ir); pop_new_node! = new_nodes_iter(ir)) used = stmts_used(io, ir) cfg = ir.cfg - maxssaid = length(ir.stmts) + Core.Compiler.length(ir.new_nodes) + maxssaid = length(ir.stmts) + Compiler.length(ir.new_nodes) let io = IOContext(io, :maxssaid=>maxssaid) show_ir_stmts(io, ir, 1:length(ir.stmts), config, ir.sptypes, used, cfg, 1; pop_new_node!) end @@ -1039,13 +1037,13 @@ function show_ir(io::IO, compact::IncrementalCompact, config::IRShowConfig=defau still_to_be_inserted = (last(input_bb.stmts) - compact.idx) + count result_bb = result_bbs[compact.active_result_bb] - result_bbs[compact.active_result_bb] = Core.Compiler.BasicBlock(result_bb, - Core.Compiler.StmtRange(first(result_bb.stmts), compact.result_idx+still_to_be_inserted)) + result_bbs[compact.active_result_bb] = Compiler.BasicBlock(result_bb, + Compiler.StmtRange(first(result_bb.stmts), compact.result_idx+still_to_be_inserted)) end compact_cfg = CFG(result_bbs, Int[first(result_bbs[i].stmts) for i in 2:length(result_bbs)]) pop_new_node! = new_nodes_iter(compact) - maxssaid = length(compact.result) + Core.Compiler.length(compact.new_new_nodes) + maxssaid = length(compact.result) + Compiler.length(compact.new_new_nodes) bb_idx = let io = IOContext(io, :maxssaid=>maxssaid) show_ir_stmts(io, compact, 1:compact.result_idx-1, config, compact.ir.sptypes, used_compacted, compact_cfg, 1; pop_new_node!) @@ -1066,8 +1064,8 @@ function show_ir(io::IO, compact::IncrementalCompact, config::IRShowConfig=defau inputs_bbs = copy(cfg.blocks) for (i, bb) in enumerate(inputs_bbs) if bb.stmts.stop < bb.stmts.start - inputs_bbs[i] = Core.Compiler.BasicBlock(bb, - Core.Compiler.StmtRange(last(bb.stmts), last(bb.stmts))) + inputs_bbs[i] = Compiler.BasicBlock(bb, + Compiler.StmtRange(last(bb.stmts), last(bb.stmts))) # this is not entirely correct, and will result in the bb starting again, # but is the best we can do without changing how `finish_current_bb!` works. end @@ -1075,7 +1073,7 @@ function show_ir(io::IO, compact::IncrementalCompact, config::IRShowConfig=defau uncompacted_cfg = CFG(inputs_bbs, Int[first(inputs_bbs[i].stmts) for i in 2:length(inputs_bbs)]) pop_new_node! = new_nodes_iter(compact.ir, compact.new_nodes_idx) - maxssaid = length(compact.ir.stmts) + Core.Compiler.length(compact.ir.new_nodes) + maxssaid = length(compact.ir.stmts) + Compiler.length(compact.ir.new_nodes) let io = IOContext(io, :maxssaid=>maxssaid) # first show any new nodes to be attached after the last compacted statement if compact.idx > 1 diff --git a/base/compiler/ssair/tarjan.jl b/base/compiler/ssair/tarjan.jl index 3727fe218dc1d..e73039868c367 100644 --- a/base/compiler/ssair/tarjan.jl +++ b/base/compiler/ssair/tarjan.jl @@ -1,6 +1,6 @@ # This file is a part of Julia. License is MIT: https://julialang.org/license -using Core.Compiler: DomTree, CFG, BasicBlock, StmtRange, dominates +using .Compiler: DomTree, CFG, BasicBlock, StmtRange, dominates struct SCCStackItem v::Int32 diff --git a/base/compiler/typeinfer.jl b/base/compiler/typeinfer.jl index 1b3ff144639e4..c1b7db82bff3f 100644 --- a/base/compiler/typeinfer.jl +++ b/base/compiler/typeinfer.jl @@ -10,19 +10,19 @@ being used for this purpose alone. """ module Timings -using Core.Compiler: -, +, :, Vector, length, first, empty!, push!, pop!, @inline, +using ..Compiler: -, +, :, Vector, length, first, empty!, push!, pop!, @inline, @inbounds, copy, backtrace # What we record for any given frame we infer during type inference. struct InferenceFrameInfo mi::Core.MethodInstance world::UInt64 - sptypes::Vector{Core.Compiler.VarState} + sptypes::Vector{Compiler.VarState} slottypes::Vector{Any} nargs::Int end -function _typeinf_identifier(frame::Core.Compiler.InferenceState) +function _typeinf_identifier(frame::Compiler.InferenceState) mi_info = InferenceFrameInfo( frame.linfo, frame_world(sv), @@ -36,7 +36,7 @@ end _typeinf_identifier(frame::InferenceFrameInfo) = frame """ - Core.Compiler.Timing(mi_info, start_time, ...) + Compiler.Timing(mi_info, start_time, ...) Internal type containing the timing result for running type inference on a single MethodInstance. @@ -65,18 +65,18 @@ const _timings = Timing[] # ROOT() is an empty function used as the top-level Timing node to measure all time spent # *not* in type inference during a given recording trace. It is used as a "dummy" node. function ROOT() end -const ROOTmi = Core.Compiler.specialize_method( - first(Core.Compiler.methods(ROOT)), Tuple{typeof(ROOT)}, Core.svec()) +const ROOTmi = Compiler.specialize_method( + first(Compiler.methods(ROOT)), Tuple{typeof(ROOT)}, Core.svec()) """ - Core.Compiler.reset_timings() + Compiler.reset_timings() -Empty out the previously recorded type inference timings (`Core.Compiler._timings`), and +Empty out the previously recorded type inference timings (`Compiler._timings`), and start the ROOT() timer again. `ROOT()` measures all time spent _outside_ inference. """ function reset_timings() end push!(_timings, Timing( # The MethodInstance for ROOT(), and default empty values for other fields. - InferenceFrameInfo(ROOTmi, 0x0, Core.Compiler.VarState[], Any[Core.Const(ROOT)], 1), + InferenceFrameInfo(ROOTmi, 0x0, Compiler.VarState[], Any[Core.Const(ROOT)], 1), _time_ns())) function close_current_timer() end function enter_new_timer(frame) end @@ -85,7 +85,7 @@ function exit_current_timer(_expected_frame_) end end # module Timings """ - Core.Compiler.__set_measure_typeinf(onoff::Bool) + Compiler.__set_measure_typeinf(onoff::Bool) If set to `true`, record per-method-instance timings within type inference in the Compiler. """ @@ -1179,7 +1179,7 @@ end function return_type(@nospecialize(f), t::DataType) # this method has a special tfunc world = tls_world_age() args = Any[_return_type, NativeInterpreter(world), Tuple{Core.Typeof(f), t.parameters...}] - return ccall(:jl_call_in_typeinf_world, Any, (Ptr{Ptr{Cvoid}}, Cint), args, length(args)) + return ccall(:jl_call_in_typeinf_world, Any, (Ptr{Any}, Cint), args, length(args)) end function return_type(@nospecialize(f), t::DataType, world::UInt) @@ -1193,7 +1193,7 @@ end function return_type(t::DataType, world::UInt) args = Any[_return_type, NativeInterpreter(world), t] - return ccall(:jl_call_in_typeinf_world, Any, (Ptr{Ptr{Cvoid}}, Cint), args, length(args)) + return ccall(:jl_call_in_typeinf_world, Any, (Ptr{Any}, Cint), args, length(args)) end function _return_type(interp::AbstractInterpreter, t::DataType) diff --git a/base/compilerimg.jl b/base/compilerimg.jl new file mode 100644 index 0000000000000..c353ee614924b --- /dev/null +++ b/base/compilerimg.jl @@ -0,0 +1,4 @@ +# This file is a part of Julia. License is MIT: https://julialang.org/license + +baremodule Base; end +Core.include(Base, "Base_compiler.jl") diff --git a/base/docs/Docs.jl b/base/docs/Docs.jl index 1a2403bbb8644..61c0cf71e70c2 100644 --- a/base/docs/Docs.jl +++ b/base/docs/Docs.jl @@ -750,15 +750,16 @@ include("utils.jl") # Swap out the bootstrap macro with the real one. Core.atdoc!(docm) -function loaddocs(docs::Vector{Core.SimpleVector}) - for (mod, ex, str, file, line) in docs +function loaddocs(docs::Base.CoreDocs.DocLinkedList) + while isdefined(docs, :doc) + (mod, ex, str, file, line) = docs.doc data = Dict{Symbol,Any}(:path => string(file), :linenumber => line) doc = docstr(str, data) lno = LineNumberNode(line, file) docstring = docm(lno, mod, doc, ex, false) # expand the real @doc macro now Core.eval(mod, Expr(:var"hygienic-scope", docstring, Docs, lno)) + docs = docs.next end - empty!(docs) nothing end diff --git a/base/docs/basedocs.jl b/base/docs/basedocs.jl index d618330e79874..c872244964160 100644 --- a/base/docs/basedocs.jl +++ b/base/docs/basedocs.jl @@ -2614,7 +2614,7 @@ ERROR: UndefVarError: `a` not defined in `M` Suggestion: add an appropriate import or assignment. This global was declared but not assigned. Stacktrace: [1] getproperty(x::Module, f::Symbol) - @ Base ./Base.jl:42 + @ Base ./Base_compiler.jl:40 [2] top-level scope @ none:1 diff --git a/base/docs/core.jl b/base/docs/core.jl index 718e49917632f..93265416099f9 100644 --- a/base/docs/core.jl +++ b/base/docs/core.jl @@ -2,15 +2,21 @@ module CoreDocs -import ..esc, ..push!, ..getindex, ..unsafe_load, ..Csize_t, ..@nospecialize +import Core: @nospecialize, SimpleVector -@nospecialize # don't specialize on any arguments of the methods declared herein +struct DocLinkedList + doc::SimpleVector + next::DocLinkedList + DocLinkedList() = new() + DocLinkedList(doc::SimpleVector, next::DocLinkedList) = new(doc, next) +end +global DOCS = DocLinkedList() function doc!(source::LineNumberNode, mod::Module, str, ex) - push!(DOCS, Core.svec(mod, ex, str, source.file, source.line)) + global DOCS + DOCS = DocLinkedList(Core.svec(mod, ex, str, source.file, source.line), DOCS) nothing end -const DOCS = Array{Core.SimpleVector,1}() isexpr(x, h::Symbol) = isa(x, Expr) && x.head === h @@ -25,9 +31,9 @@ function docm(source::LineNumberNode, mod::Module, str, x) else out = Expr(:block, x, out) end - return esc(out) + return Expr(:escape, out) end docm(source::LineNumberNode, mod::Module, x) = - isexpr(x, :->) ? docm(source, mod, x.args[1], x.args[2].args[2]) : error("invalid '@doc'.") + (isa(x, Expr) && x.head === :->) ? docm(source, mod, x.args[1], x.args[2].args[2]) : error("invalid '@doc'.") end diff --git a/base/error.jl b/base/error.jl index c49ede624607d..276555033443a 100644 --- a/base/error.jl +++ b/base/error.jl @@ -42,6 +42,7 @@ typeof(error).name.max_methods = UInt8(2) Raise an `ErrorException` with the given message. """ error(s::AbstractString) = throw(ErrorException(s)) +error() = throw(ErrorException("")) """ error(msg...) diff --git a/base/errorshow.jl b/base/errorshow.jl index 7225a024f529e..70ac8105feb21 100644 --- a/base/errorshow.jl +++ b/base/errorshow.jl @@ -448,7 +448,7 @@ function show_method_candidates(io::IO, ex::MethodError, kwargs=[]) # pool MethodErrors for these two functions. if f === convert && !isempty(arg_types_param) at1 = arg_types_param[1] - if isType(at1) && !Core.Compiler.has_free_typevars(at1) + if isType(at1) && !has_free_typevars(at1) push!(funcs, (at1.parameters[1], arg_types_param[2:end])) end end diff --git a/base/essentials.jl b/base/essentials.jl index 64fbaea95d4e7..89b891e216d5a 100644 --- a/base/essentials.jl +++ b/base/essentials.jl @@ -184,11 +184,7 @@ end _nameof(m::Module) = ccall(:jl_module_name, Ref{Symbol}, (Any,), m) function _is_internal(__module__) - if ccall(:jl_base_relative_to, Any, (Any,), __module__)::Module === Core.Compiler || - _nameof(__module__) === :Base - return true - end - return false + return true end # can be used in place of `@assume_effects :total` (supposed to be used for bootstrapping) diff --git a/base/float.jl b/base/float.jl index ff628f0ac7126..90a5d8b1c66f4 100644 --- a/base/float.jl +++ b/base/float.jl @@ -248,8 +248,6 @@ for t1 in (Float16, Float32, Float64) end end -Bool(x::Real) = x==0 ? false : x==1 ? true : throw(InexactError(:Bool, Bool, x)) - promote_rule(::Type{Float64}, ::Type{UInt128}) = Float64 promote_rule(::Type{Float64}, ::Type{Int128}) = Float64 promote_rule(::Type{Float32}, ::Type{UInt128}) = Float32 diff --git a/base/iterators.jl b/base/iterators.jl index 1a0d42ed7447f..6b8d9fe75e302 100644 --- a/base/iterators.jl +++ b/base/iterators.jl @@ -18,14 +18,9 @@ using .Base: tail, fieldtypes, min, max, minimum, zero, oneunit, promote, promote_shape, LazyString using Core: @doc -if Base !== Core.Compiler using .Base: cld, fld, SubArray, view, resize!, IndexCartesian using .Base.Checked: checked_mul -else - # Checked.checked_mul is not available during bootstrapping: - const checked_mul = * -end import .Base: first, last, @@ -35,13 +30,9 @@ import .Base: getindex, setindex!, get, iterate, popfirst!, isdone, peek, intersect -export enumerate, zip, rest, countfrom, take, drop, takewhile, dropwhile, cycle, repeated, product, flatten, flatmap +export enumerate, zip, rest, countfrom, take, drop, takewhile, dropwhile, cycle, repeated, product, flatten, flatmap, partition public accumulate, filter, map, peel, reverse, Stateful -if Base !== Core.Compiler -export partition -end - """ Iterators.map(f, iterators...) @@ -279,10 +270,8 @@ pairs(v::Core.SimpleVector) = Pairs(v, LinearIndices(v)) pairs(A::AbstractVector) = pairs(IndexLinear(), A) # pairs(v::Pairs) = v # listed for reference, but already defined from being an AbstractDict -if Base !== Core.Compiler pairs(::IndexCartesian, A::AbstractArray) = Pairs(A, Base.CartesianIndices(axes(A))) pairs(A::AbstractArray) = pairs(IndexCartesian(), A) -end length(v::Pairs) = length(getfield(v, :itr)) axes(v::Pairs) = axes(getfield(v, :itr)) @@ -1302,7 +1291,6 @@ true """ flatmap(f, c...) = flatten(map(f, c...)) -if Base !== Core.Compiler # views are not defined @doc """ partition(collection, n) @@ -1509,8 +1497,6 @@ IteratorSize(::Type{<:Stateful{T}}) where {T} = IteratorSize(T) isa IsInfinite ? eltype(::Type{<:Stateful{T}}) where {T} = eltype(T) IteratorEltype(::Type{<:Stateful{T}}) where {T} = IteratorEltype(T) -end # if statement several hundred lines above - """ only(x) diff --git a/base/meta.jl b/base/meta.jl index e648df29c12f9..bcf4fbf632ab2 100644 --- a/base/meta.jl +++ b/base/meta.jl @@ -449,7 +449,7 @@ function _partially_inline!(@nospecialize(x), slot_replacements::Vector{Any}, @assert isa(arg, Union{GlobalRef, Symbol}) return x end - elseif !Core.Compiler.is_meta_expr_head(head) + elseif !Base.is_meta_expr_head(head) partially_inline!(x.args, slot_replacements, type_signature, static_param_values, slot_offset, statement_offset, boundscheck) end diff --git a/base/opaque_closure.jl b/base/opaque_closure.jl index 0f1fdf47afed8..26b39879ca852 100644 --- a/base/opaque_closure.jl +++ b/base/opaque_closure.jl @@ -38,8 +38,8 @@ macro opaque(ty, ex) end # OpaqueClosure construction from pre-inferred CodeInfo/IRCode -using Core.Compiler: IRCode, SSAValue -using Core: CodeInfo +using Core: CodeInfo, SSAValue +using Base.Compiler: IRCode function compute_ir_rettype(ir::IRCode) rt = Union{} diff --git a/base/promotion.jl b/base/promotion.jl index 1004c64433ec1..72257f8ba5a3d 100644 --- a/base/promotion.jl +++ b/base/promotion.jl @@ -207,7 +207,7 @@ function typejoin_union_tuple(T::DataType) c = Vector{Any}(undef, lr) for i = 1:lr pi = p[i] - U = Core.Compiler.unwrapva(pi) + U = unwrapva(pi) if U === Union{} ci = Union{} elseif U isa Union @@ -217,7 +217,7 @@ function typejoin_union_tuple(T::DataType) else ci = promote_typejoin_union(U) end - if i == lr && Core.Compiler.isvarargtype(pi) + if i == lr && isvarargtype(pi) c[i] = isdefined(pi, :N) ? Vararg{ci, pi.N} : Vararg{ci} else c[i] = ci @@ -493,12 +493,6 @@ max(x::Real, y::Real) = max(promote(x,y)...) min(x::Real, y::Real) = min(promote(x,y)...) minmax(x::Real, y::Real) = minmax(promote(x, y)...) -if isdefined(Core, :Compiler) - const _return_type = Core.Compiler.return_type -else - _return_type(@nospecialize(f), @nospecialize(t)) = Any -end - function TupleOrBottom(tt...) any(p -> p === Union{}, tt) && return Union{} return Tuple{tt...} diff --git a/base/reduce.jl b/base/reduce.jl index 0c37256b64fb5..952d71bb2a849 100644 --- a/base/reduce.jl +++ b/base/reduce.jl @@ -1333,6 +1333,8 @@ end end @inline _all_tuple(f, anymissing) = anymissing ? missing : true +all(::Tuple{Missing}) = missing + ## count _bool(f) = x->f(x)::Bool diff --git a/base/reflection.jl b/base/reflection.jl index 0b7612e44f744..834325dd41583 100644 --- a/base/reflection.jl +++ b/base/reflection.jl @@ -1,7 +1,5 @@ # This file is a part of Julia. License is MIT: https://julialang.org/license -const Compiler = Core.Compiler - """ code_lowered(f, types; generated=true, debuginfo=:default) @@ -151,7 +149,7 @@ function method_instances(@nospecialize(f), @nospecialize(t), world::UInt) # this make a better error message than the typeassert that follows world == typemax(UInt) && error("code reflection cannot be used from generated functions") for match in _methods_by_ftype(tt, -1, world)::Vector - instance = Core.Compiler.specialize_method(match::Core.MethodMatch) + instance = specialize_method(match::Core.MethodMatch) push!(results, instance) end return results diff --git a/base/show.jl b/base/show.jl index 627982b2bcb1a..26efd0a93f716 100644 --- a/base/show.jl +++ b/base/show.jl @@ -1,6 +1,6 @@ # This file is a part of Julia. License is MIT: https://julialang.org/license -using Core.Compiler: has_typevar +using .Compiler: has_typevar function show(io::IO, ::MIME"text/plain", u::UndefInitializer) show(io, u) @@ -1386,10 +1386,7 @@ function show_mi(io::IO, mi::Core.MethodInstance, from_stackframe::Bool=false) end # These sometimes show up as Const-values in InferenceFrameInfo signatures -show(io::IO, r::Core.Compiler.UnitRange) = show(io, r.start : r.stop) -show(io::IO, mime::MIME{Symbol("text/plain")}, r::Core.Compiler.UnitRange) = show(io, mime, r.start : r.stop) - -function show(io::IO, mi_info::Core.Compiler.Timings.InferenceFrameInfo) +function show(io::IO, mi_info::Compiler.Timings.InferenceFrameInfo) mi = mi_info.mi def = mi.def if isa(def, Method) @@ -1410,8 +1407,8 @@ function show(io::IO, mi_info::Core.Compiler.Timings.InferenceFrameInfo) end end -function show(io::IO, tinf::Core.Compiler.Timings.Timing) - print(io, "Core.Compiler.Timings.Timing(", tinf.mi_info, ") with ", length(tinf.children), " children") +function show(io::IO, tinf::Compiler.Timings.Timing) + print(io, "Compiler.Timings.Timing(", tinf.mi_info, ") with ", length(tinf.children), " children") end function show_delim_array(io::IO, itr::Union{AbstractArray,SimpleVector}, op, delim, cl, @@ -2851,21 +2848,14 @@ function show(io::IO, vm::Core.TypeofVararg) end module IRShow - const Compiler = Core.Compiler + import ..Compiler using Core.IR import ..Base import .Compiler: IRCode, CFG, scan_ssa_use!, isexpr, compute_basic_blocks, block_for_inst, IncrementalCompact, - Effects, ALWAYS_TRUE, ALWAYS_FALSE, DebugInfoStream, getdebugidx - Base.getindex(r::Compiler.StmtRange, ind::Integer) = Compiler.getindex(r, ind) - Base.size(r::Compiler.StmtRange) = Compiler.size(r) - Base.first(r::Compiler.StmtRange) = Compiler.first(r) - Base.last(r::Compiler.StmtRange) = Compiler.last(r) - Base.length(is::Compiler.InstructionStream) = Compiler.length(is) - Base.iterate(is::Compiler.InstructionStream, st::Int=1) = (st <= Compiler.length(is)) ? (is[st], st + 1) : nothing - Base.getindex(is::Compiler.InstructionStream, idx::Int) = Compiler.getindex(is, idx) - Base.getindex(node::Compiler.Instruction, fld::Symbol) = Compiler.getindex(node, fld) - Base.getindex(ir::IRCode, ssa::SSAValue) = Compiler.getindex(ir, ssa) + Effects, ALWAYS_TRUE, ALWAYS_FALSE, DebugInfoStream, getdebugidx, + VarState, InvalidIRError, argextype, widenconst, singleton_type, + sptypes_from_meth_instance, EMPTY_SPTYPES include("compiler/ssair/show.jl") const __debuginfo = Dict{Symbol, Any}( @@ -2893,12 +2883,12 @@ function show(io::IO, src::CodeInfo; debuginfo::Symbol=:source) print(io, ")") end -function show(io::IO, inferred::Core.Compiler.InferenceResult) +function show(io::IO, inferred::Compiler.InferenceResult) mi = inferred.linfo tt = mi.specTypes.parameters[2:end] tts = join(["::$(t)" for t in tt], ", ") rettype = inferred.result - if isa(rettype, Core.Compiler.InferenceState) + if isa(rettype, Compiler.InferenceState) rettype = rettype.bestguess end if isa(mi.def, Method) @@ -2908,19 +2898,19 @@ function show(io::IO, inferred::Core.Compiler.InferenceResult) end end -show(io::IO, sv::Core.Compiler.InferenceState) = +show(io::IO, sv::Compiler.InferenceState) = (print(io, "InferenceState for "); show(io, sv.linfo)) -show(io::IO, ::Core.Compiler.NativeInterpreter) = +show(io::IO, ::Compiler.NativeInterpreter) = print(io, "Core.Compiler.NativeInterpreter(...)") -show(io::IO, cache::Core.Compiler.CachedMethodTable) = - print(io, typeof(cache), "(", Core.Compiler.length(cache.cache), " entries)") +show(io::IO, cache::Compiler.CachedMethodTable) = + print(io, typeof(cache), "(", Compiler.length(cache.cache), " entries)") -function show(io::IO, limited::Core.Compiler.LimitedAccuracy) - print(io, "Core.Compiler.LimitedAccuracy(") +function show(io::IO, limited::Compiler.LimitedAccuracy) + print(io, "Compiler.LimitedAccuracy(") show(io, limited.typ) - print(io, ", #= ", Core.Compiler.length(limited.causes), " cause(s) =#)") + print(io, ", #= ", Compiler.length(limited.causes), " cause(s) =#)") end function dump(io::IOContext, x::SimpleVector, n::Int, indent) diff --git a/base/stacktraces.jl b/base/stacktraces.jl index 102e415a22de2..c3d86fc8f5151 100644 --- a/base/stacktraces.jl +++ b/base/stacktraces.jl @@ -124,7 +124,7 @@ end const top_level_scope_sym = Symbol("top-level scope") -function lookup(ip::Union{Base.InterpreterIP,Core.Compiler.InterpreterIP}) +function lookup(ip::Union{Base.InterpreterIP}) code = ip.code if code === nothing # interpreted top-level expression with no CodeInfo @@ -175,7 +175,7 @@ Return a stack trace in the form of a vector of `StackFrame`s. (By default stack doesn't return C functions, but this can be enabled.) When called without specifying a trace, `stacktrace` first calls `backtrace`. """ -Base.@constprop :none function stacktrace(trace::Vector{<:Union{Base.InterpreterIP,Core.Compiler.InterpreterIP,Ptr{Cvoid}}}, c_funcs::Bool=false) +Base.@constprop :none function stacktrace(trace::Vector{<:Union{Base.InterpreterIP,Ptr{Cvoid}}}, c_funcs::Bool=false) stack = StackTrace() for ip in trace for frame in lookup(ip) diff --git a/base/sysimg.jl b/base/sysimg.jl index ccc8ef38e81bc..8347d63d5b740 100644 --- a/base/sysimg.jl +++ b/base/sysimg.jl @@ -1,6 +1,12 @@ # This file is a part of Julia. License is MIT: https://julialang.org/license -Core.include(Main, "Base.jl") +# Can be built either a monolith or with a minimal Base image that just has the +# compiler. +if isdefined(Main, :Base); else +Core.eval(Main, :(baremodule Base; end)) +end + +Core.include(Base, "Base.jl") using .Base diff --git a/base/tuple.jl b/base/tuple.jl index fc213410cfd7c..8690f89bdc263 100644 --- a/base/tuple.jl +++ b/base/tuple.jl @@ -664,7 +664,9 @@ all(x::Tuple{}) = true all(x::Tuple{Bool}) = x[1] all(x::Tuple{Bool, Bool}) = x[1]&x[2] all(x::Tuple{Bool, Bool, Bool}) = x[1]&x[2]&x[3] -# use generic reductions for the rest +all(x::Tuple{Any}) = x[1] || return false +all(f, x::Tuple{}) = true +all(f, x::Tuple{Any}) = all((f(x[1]),)) any(x::Tuple{}) = false any(x::Tuple{Bool}) = x[1] diff --git a/src/jltypes.c b/src/jltypes.c index 71eaa003d7d4a..6c6325d84a5ff 100644 --- a/src/jltypes.c +++ b/src/jltypes.c @@ -3956,14 +3956,16 @@ void post_image_load_hooks(void) { // Ensure that `Base` has been loaded. assert(jl_base_module != NULL); - jl_libdl_module = (jl_module_t *)jl_get_global( - ((jl_module_t *)jl_get_global(jl_base_module, jl_symbol("Libc"))), - jl_symbol("Libdl") - ); - jl_libdl_dlopen_func = jl_get_global( - jl_libdl_module, - jl_symbol("dlopen") - ); + jl_module_t *libc_module = (jl_module_t *)jl_get_global(jl_base_module, jl_symbol("Libc")); + if (libc_module) { + jl_libdl_module = (jl_module_t *)jl_get_global(libc_module, jl_symbol("Libdl")); + } + if (jl_libdl_module) { + jl_libdl_dlopen_func = jl_get_global( + jl_libdl_module, + jl_symbol("dlopen") + ); + } } #undef XX diff --git a/stdlib/InteractiveUtils/src/codeview.jl b/stdlib/InteractiveUtils/src/codeview.jl index 92354d2fb9a75..030955b8e36d8 100644 --- a/stdlib/InteractiveUtils/src/codeview.jl +++ b/stdlib/InteractiveUtils/src/codeview.jl @@ -145,7 +145,7 @@ See also: [`@code_warntype`](@ref), [`code_typed`](@ref), [`code_lowered`](@ref) """ function code_warntype(io::IO, @nospecialize(f), @nospecialize(tt=Base.default_tt(f)); world=Base.get_world_counter(), - interp::Core.Compiler.AbstractInterpreter=Core.Compiler.NativeInterpreter(world), + interp::Base.Compiler.AbstractInterpreter=Base.Compiler.NativeInterpreter(world), debuginfo::Symbol=:default, optimize::Bool=false, kwargs...) (ccall(:jl_is_in_pure_context, Bool, ()) || world == typemax(UInt)) && error("code reflection cannot be used from generated functions") @@ -159,12 +159,12 @@ function code_warntype(io::IO, @nospecialize(f), @nospecialize(tt=Base.default_t return nothing end tt = Base.signature_type(f, tt) - matches = Core.Compiler.findall(tt, Core.Compiler.method_table(interp)) + matches = findall(tt, Base.Compiler.method_table(interp)) matches === nothing && Base.raise_match_failure(:code_warntype, tt) for match in matches.matches match = match::Core.MethodMatch - src = Core.Compiler.typeinf_code(interp, match, optimize) - mi = Core.Compiler.specialize_method(match) + src = Base.Compiler.typeinf_code(interp, match, optimize) + mi = Base.Compiler.specialize_method(match) mi.def isa Method && (nargs = (mi.def::Method).nargs) print_warntype_mi(io, mi) if src isa Core.CodeInfo @@ -202,7 +202,7 @@ function _dump_function(@nospecialize(f), @nospecialize(t), native::Bool, wrappe if !isa(f, Core.OpaqueClosure) world = Base.get_world_counter() match = Base._which(signature_type(f, t); world) - mi = Core.Compiler.specialize_method(match) + mi = Base.specialize_method(match) # TODO: use jl_is_cacheable_sig instead of isdispatchtuple isdispatchtuple(mi.specTypes) || (warning = GENERIC_SIG_WARNING) else @@ -213,9 +213,9 @@ function _dump_function(@nospecialize(f), @nospecialize(t), native::Bool, wrappe # specialization and we can't infer anything more precise either. world = f.source.primary_world mi = f.source.specializations::Core.MethodInstance - Core.Compiler.hasintersect(typeof(f).parameters[1], tt) || (warning = OC_MISMATCH_WARNING) + Base.hasintersect(typeof(f).parameters[1], tt) || (warning = OC_MISMATCH_WARNING) else - mi = Core.Compiler.specialize_method(f.source, Tuple{typeof(f.captures), tt.parameters...}, Core.svec()) + mi = Base.specialize_method(f.source, Tuple{typeof(f.captures), tt.parameters...}, Core.svec()) isdispatchtuple(mi.specTypes) || (warning = GENERIC_SIG_WARNING) end end @@ -237,7 +237,7 @@ function _dump_function(@nospecialize(f), @nospecialize(t), native::Bool, wrappe if isempty(str) # if that failed (or we want metadata), use LLVM to generate more accurate assembly output if !isa(f, Core.OpaqueClosure) - src = Core.Compiler.typeinf_code(Core.Compiler.NativeInterpreter(world), mi, true) + src = Base.Compiler.typeinf_code(Base.Compiler.NativeInterpreter(world), mi, true) else src, rt = Base.get_oc_code_rt(f, tt, true) end @@ -246,7 +246,7 @@ function _dump_function(@nospecialize(f), @nospecialize(t), native::Bool, wrappe end else if !isa(f, Core.OpaqueClosure) - src = Core.Compiler.typeinf_code(Core.Compiler.NativeInterpreter(world), mi, true) + src = Base.Compiler.typeinf_code(Base.Compiler.NativeInterpreter(world), mi, true) else src, rt = Base.get_oc_code_rt(f, tt, true) end diff --git a/stdlib/REPL/src/REPLCompletions.jl b/stdlib/REPL/src/REPLCompletions.jl index 23f3337ab5e8e..5142dd5e7f680 100644 --- a/stdlib/REPL/src/REPLCompletions.jl +++ b/stdlib/REPL/src/REPLCompletions.jl @@ -5,7 +5,7 @@ module REPLCompletions export completions, shell_completions, bslash_completions, completion_text using Core: Const -const CC = Core.Compiler +const CC = Base.Compiler using Base.Meta using Base: propertynames, something, IdSet using Base.Filesystem: _readdirx diff --git a/sysimage.mk b/sysimage.mk index d0e106d4ce3da..d3dee6906ccfa 100644 --- a/sysimage.mk +++ b/sysimage.mk @@ -22,6 +22,8 @@ $(build_private_libdir)/%.$(SHLIB_EXT): $(build_private_libdir)/%-o.a @$(DSYMUTIL) $@ COMPILER_SRCS := $(addprefix $(JULIAHOME)/, \ + base/Base_compiler.jl \ + base/compilerimg.jl \ base/boot.jl \ base/docs/core.jl \ base/abstractarray.jl \ @@ -49,7 +51,7 @@ COMPILER_SRCS := $(addprefix $(JULIAHOME)/, \ base/pointer.jl \ base/promotion.jl \ base/range.jl \ - base/reflection.jl \ + base/runtime_internals.jl \ base/traits.jl \ base/refvalue.jl \ base/tuple.jl) @@ -60,13 +62,13 @@ BASE_SRCS := $(sort $(shell find $(JULIAHOME)/base -name \*.jl -and -not -name s STDLIB_SRCS := $(JULIAHOME)/base/sysimg.jl $(SYSIMG_STDLIBS_SRCS) RELBUILDROOT := $(call rel_path,$(JULIAHOME)/base,$(BUILDROOT)/base)/ # <-- make sure this always has a trailing slash -$(build_private_libdir)/corecompiler.ji: $(COMPILER_SRCS) +$(build_private_libdir)/basecompiler.ji: $(COMPILER_SRCS) @$(call PRINT_JULIA, cd $(JULIAHOME)/base && \ $(call spawn,$(JULIA_EXECUTABLE)) -C "$(JULIA_CPU_TARGET)" $(HEAPLIM) --output-ji $(call cygpath_w,$@).tmp \ - --startup-file=no --warn-overwrite=yes -g$(BOOTSTRAP_DEBUG_LEVEL) -O1 compiler/compiler.jl) + --startup-file=no --warn-overwrite=yes -g$(BOOTSTRAP_DEBUG_LEVEL) -O1 compilerimg.jl) @mv $@.tmp $@ -$(build_private_libdir)/sys.ji: $(build_private_libdir)/corecompiler.ji $(JULIAHOME)/VERSION $(BASE_SRCS) $(STDLIB_SRCS) +$(build_private_libdir)/sys.ji: $(build_private_libdir)/basecompiler.ji $(JULIAHOME)/VERSION $(BASE_SRCS) $(STDLIB_SRCS) @$(call PRINT_JULIA, cd $(JULIAHOME)/base && \ if ! JULIA_BINDIR=$(call cygpath_w,$(build_bindir)) WINEPATH="$(call cygpath_w,$(build_bindir));$$WINEPATH" \ $(call spawn, $(JULIA_EXECUTABLE)) -g1 -O1 -C "$(JULIA_CPU_TARGET)" $(HEAPLIM) --output-ji $(call cygpath_w,$@).tmp $(JULIA_SYSIMG_BUILD_FLAGS) \ diff --git a/test/ambiguous.jl b/test/ambiguous.jl index 2f8a4193cf592..43ec1aab0557d 100644 --- a/test/ambiguous.jl +++ b/test/ambiguous.jl @@ -192,8 +192,7 @@ end # some ambiguities involving Union{} type parameters may be expected, but not required let ambig = Set(detect_ambiguities(Core; recursive=true, ambiguous_bottom=true)) - @test !isempty(ambig) - @test length(ambig) < 30 + @test isempty(ambig) end STDLIB_DIR = Sys.STDLIB @@ -349,10 +348,6 @@ end # TODO: review this list and remove everything between test_broken and test let need_to_handle_undef_sparam = Set{Method}(detect_unbound_args(Core; recursive=true)) - pop!(need_to_handle_undef_sparam, which(Core.Compiler.eltype, Tuple{Type{Tuple{Any}}})) - @test_broken isempty(need_to_handle_undef_sparam) - pop!(need_to_handle_undef_sparam, which(Core.Compiler._cat, Tuple{Any, AbstractArray})) - pop!(need_to_handle_undef_sparam, first(methods(Core.Compiler.same_names))) @test isempty(need_to_handle_undef_sparam) end let need_to_handle_undef_sparam = diff --git a/test/backtrace.jl b/test/backtrace.jl index 68873678df57b..ee04a46b17304 100644 --- a/test/backtrace.jl +++ b/test/backtrace.jl @@ -237,7 +237,7 @@ let trace = try end @test trace[1].func === Symbol("top-level scope") @test trace[1].file === :a_filename - @test trace[1].line == 3 + @test trace[1].line in (2, 3) end # issue #45171 diff --git a/test/compiler/EscapeAnalysis/EAUtils.jl b/test/compiler/EscapeAnalysis/EAUtils.jl index 65fa9f75fe03f..4f1d1c0bba898 100644 --- a/test/compiler/EscapeAnalysis/EAUtils.jl +++ b/test/compiler/EscapeAnalysis/EAUtils.jl @@ -127,10 +127,6 @@ end using Core: Argument, SSAValue using .CC: widenconst, singleton_type -if EA._TOP_MOD === CC - Base.getindex(estate::EscapeState, @nospecialize(x)) = CC.getindex(estate, x) -end - function get_name_color(x::EscapeInfo, symbol::Bool = false) getname(x) = string(nameof(x)) if x === EA.⊥ diff --git a/test/compiler/effects.jl b/test/compiler/effects.jl index 4174aa3d01030..bc9bc7e2295fe 100644 --- a/test/compiler/effects.jl +++ b/test/compiler/effects.jl @@ -927,7 +927,7 @@ unknown_sparam_nothrow2(x::Ref{Ref{T}}) where T = (T; nothing) # purely abstract recursion should not taint :terminates # https://github.com/JuliaLang/julia/issues/48983 abstractly_recursive1() = abstractly_recursive2() -abstractly_recursive2() = (Core.Compiler._return_type(abstractly_recursive1, Tuple{}); 1) +abstractly_recursive2() = (Base._return_type(abstractly_recursive1, Tuple{}); 1) abstractly_recursive3() = abstractly_recursive2() @test_broken Core.Compiler.is_terminates(Base.infer_effects(abstractly_recursive3, ())) actually_recursive1(x) = actually_recursive2(x) diff --git a/test/docs.jl b/test/docs.jl index 8db9db30b8463..8cfdbba3f2d97 100644 --- a/test/docs.jl +++ b/test/docs.jl @@ -1518,7 +1518,7 @@ struct B_20087 end # issue #27832 _last_atdoc = Core.atdoc -Core.atdoc!(Core.Compiler.CoreDocs.docm) # test bootstrap doc system +Core.atdoc!(Base.CoreDocs.docm) # test bootstrap doc system """ """ diff --git a/test/misc.jl b/test/misc.jl index e089395ce4557..7f9992e22a3d7 100644 --- a/test/misc.jl +++ b/test/misc.jl @@ -560,7 +560,7 @@ struct ambigconvert; end # inject a problematic `convert` method to ensure it st Base.convert(::Any, v::ambigconvert) = v import Base.summarysize -@test summarysize(Core) > (summarysize(Core.Compiler) + Base.summarysize(Core.Intrinsics)) > Core.sizeof(Core) +@test summarysize(Core) > Base.summarysize(Core.Intrinsics) > Core.sizeof(Core) @test summarysize(Base) > 100_000 * sizeof(Ptr) let R = Ref{Any}(nothing), depth = 10^6 From 22c5bdc586d43af128dc7b7d8274308196e0d235 Mon Sep 17 00:00:00 2001 From: Lilith Orion Hafner Date: Wed, 6 Nov 2024 20:19:07 -0600 Subject: [PATCH 386/537] Delete buggy `stat(::Integer)` method (#54855) "Where did someone get a RawFD as an integer anyway?" -@stefankarpinski See also #51711 Fixes #51710 --- base/deprecated.jl | 2 -- test/file.jl | 5 +++++ 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/base/deprecated.jl b/base/deprecated.jl index 953de358a68ee..84ef89e44b473 100644 --- a/base/deprecated.jl +++ b/base/deprecated.jl @@ -531,6 +531,4 @@ end # BEGIN 1.12 deprecations -@deprecate stat(fd::Integer) stat(RawFD(fd)) - # END 1.12 deprecations diff --git a/test/file.jl b/test/file.jl index 498761d6a624b..6425155c82965 100644 --- a/test/file.jl +++ b/test/file.jl @@ -459,6 +459,11 @@ end end end +# Issue #51710 and PR #54855 +@test_throws MethodError stat(7) +@test_throws MethodError ispath(false) +@test_throws MethodError ispath(1) + # On windows the filesize of a folder is the accumulation of all the contained # files and is thus zero in this case. if Sys.iswindows() From 671cd5e1db70322d043680336d96259553e7f023 Mon Sep 17 00:00:00 2001 From: Jameson Nash Date: Wed, 6 Nov 2024 21:50:04 -0500 Subject: [PATCH 387/537] missing gc-root store in subtype (#56472) Fixes #56141 Introduced by #52228 (a624d445c02c) --- src/subtype.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/subtype.c b/src/subtype.c index f5c13b77ea0cf..8de5b3514ef2f 100644 --- a/src/subtype.c +++ b/src/subtype.c @@ -4700,12 +4700,12 @@ static jl_value_t *insert_nondiagonal(jl_value_t *type, jl_varbinding_t *troot, jl_value_t *n = jl_unwrap_vararg_num(type); if (widen2ub == 0) widen2ub = !(n && jl_is_long(n)) || jl_unbox_long(n) > 1; - jl_value_t *newt; - JL_GC_PUSH2(&newt, &n); - newt = insert_nondiagonal(t, troot, widen2ub); - if (t != newt) + jl_value_t *newt = insert_nondiagonal(t, troot, widen2ub); + if (t != newt) { + JL_GC_PUSH1(&newt); type = (jl_value_t *)jl_wrap_vararg(newt, n, 0, 0); - JL_GC_POP(); + JL_GC_POP(); + } } else if (jl_is_datatype(type)) { if (jl_is_tuple_type(type)) { @@ -4742,7 +4742,7 @@ static jl_value_t *_widen_diagonal(jl_value_t *t, jl_varbinding_t *troot) { static jl_value_t *widen_diagonal(jl_value_t *t, jl_unionall_t *u, jl_varbinding_t *troot) { jl_varbinding_t vb = { u->var, NULL, NULL, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, NULL, troot }; - jl_value_t *nt; + jl_value_t *nt = NULL; JL_GC_PUSH2(&vb.innervars, &nt); if (jl_is_unionall(u->body)) nt = widen_diagonal(t, (jl_unionall_t *)u->body, &vb); From 4278ded29044bfaf90f792bf02547ec3937fe8af Mon Sep 17 00:00:00 2001 From: Jameson Nash Date: Thu, 7 Nov 2024 03:36:42 -0500 Subject: [PATCH 388/537] further defer jl_insert_backedges after loading (#56447) Finish fully breaking the dependency between method insertions and inferring whether the cache is valid. The cache should be inferable in parallel and in aggregate after all loading is finished. This prepares us for moving this code into Julia (Core.Compiler) next. --- src/staticdata.c | 5 +- src/staticdata_utils.c | 178 +++++++++++++++++++++++------------------ 2 files changed, 103 insertions(+), 80 deletions(-) diff --git a/src/staticdata.c b/src/staticdata.c index decc0ce6570aa..6b225d3808c8b 100644 --- a/src/staticdata.c +++ b/src/staticdata.c @@ -4034,12 +4034,13 @@ static jl_value_t *jl_restore_package_image_from_stream(void* pkgimage_handle, i // allocate a world for the new methods, and insert them there, invalidating content as needed size_t world = jl_atomic_load_relaxed(&jl_world_counter) + 1; jl_activate_methods(extext_methods, internal_methods, world); + // TODO: inject new_ext_cis into caches here, so the system can see them immediately as potential candidates (before validation) // allow users to start running in this updated world jl_atomic_store_release(&jl_world_counter, world); - // but one of those immediate users is going to be our cache updates - jl_insert_backedges((jl_array_t*)edges, (jl_array_t*)new_ext_cis, world); // restore external backedges (needs to be last) // now permit more methods to be added again JL_UNLOCK(&world_counter_lock); + // but one of those immediate users is going to be our cache insertions + jl_insert_backedges((jl_array_t*)edges, (jl_array_t*)new_ext_cis); // restore existing caches (needs to be last) // reinit ccallables jl_reinit_ccallable(&ccallable_list, base, pkgimage_handle); arraylist_free(&ccallable_list); diff --git a/src/staticdata_utils.c b/src/staticdata_utils.c index b69c1edb5429b..ba6f95269838b 100644 --- a/src/staticdata_utils.c +++ b/src/staticdata_utils.c @@ -751,56 +751,58 @@ static void jl_copy_roots(jl_array_t *method_roots_list, uint64_t key) } } -static size_t verify_invokesig(jl_value_t *invokesig, jl_method_t *expected, size_t minworld) +static void verify_invokesig(jl_value_t *invokesig, jl_method_t *expected, size_t world, size_t *minworld, size_t *maxworld) { assert(jl_is_type(invokesig)); assert(jl_is_method(expected)); - size_t min_valid = 0; - size_t max_valid = ~(size_t)0; if (jl_egal(invokesig, expected->sig)) { // the invoke match is `expected` for `expected->sig`, unless `expected` is invalid - if (jl_atomic_load_relaxed(&expected->deleted_world) < max_valid) - max_valid = 0; + *minworld = jl_atomic_load_relaxed(&expected->primary_world); + *maxworld = jl_atomic_load_relaxed(&expected->deleted_world); + assert(*minworld <= world); + if (*maxworld < world) + *maxworld = 0; } else { + *minworld = 1; + *maxworld = ~(size_t)0; jl_methtable_t *mt = jl_method_get_table(expected); if ((jl_value_t*)mt == jl_nothing) { - max_valid = 0; + *maxworld = 0; } else { - jl_value_t *matches = jl_gf_invoke_lookup_worlds(invokesig, (jl_value_t*)mt, minworld, &min_valid, &max_valid); + jl_value_t *matches = jl_gf_invoke_lookup_worlds(invokesig, (jl_value_t*)mt, world, minworld, maxworld); if (matches == jl_nothing) { - max_valid = 0; + *maxworld = 0; } else { if (((jl_method_match_t*)matches)->method != expected) { - max_valid = 0; + *maxworld = 0; } } } } - return max_valid; } -static size_t verify_call(jl_value_t *sig, jl_svec_t *expecteds, size_t i, size_t n, size_t minworld, jl_value_t **matches JL_REQUIRE_ROOTED_SLOT) +static void verify_call(jl_value_t *sig, jl_svec_t *expecteds, size_t i, size_t n, size_t world, size_t *minworld, size_t *maxworld, jl_value_t **matches JL_REQUIRE_ROOTED_SLOT) { // verify that these edges intersect with the same methods as before - size_t min_valid = 0; - size_t max_valid = ~(size_t)0; + *minworld = 1; + *maxworld = ~(size_t)0; int ambig = 0; // TODO: possibly need to included ambiguities too (for the optimizer correctness)? jl_value_t *result = jl_matching_methods((jl_tupletype_t*)sig, jl_nothing, _jl_debug_method_invalidation ? INT32_MAX : n, - 0, minworld, &min_valid, &max_valid, &ambig); + 0, world, minworld, maxworld, &ambig); *matches = result; if (result == jl_nothing) { - max_valid = 0; + *maxworld = 0; } else { // setdiff!(result, expected) size_t j, k, ins = 0; if (jl_array_nrows(result) != n) { - max_valid = 0; + *maxworld = 0; } for (k = 0; k < jl_array_nrows(result); k++) { jl_method_t *match = ((jl_method_match_t*)jl_array_ptr_ref(result, k))->method; @@ -822,29 +824,33 @@ static size_t verify_call(jl_value_t *sig, jl_svec_t *expecteds, size_t i, size_ // intersection has a new method or a method was // deleted--this is now probably no good, just invalidate // everything about it now - max_valid = 0; + *maxworld = 0; if (!_jl_debug_method_invalidation) break; jl_array_ptr_set(result, ins++, match); } } - if (max_valid != ~(size_t)0 && _jl_debug_method_invalidation) + if (*maxworld != ~(size_t)0 && _jl_debug_method_invalidation) jl_array_del_end((jl_array_t*)result, jl_array_nrows(result) - ins); } - return max_valid; } // Test all edges relevant to a method: //// Visit the entire call graph, starting from edges[idx] to determine if that method is valid //// Implements Tarjan's SCC (strongly connected components) algorithm, simplified to remove the count variable //// and slightly modified with an early termination option once the computation reaches its minimum -static int jl_verify_method(jl_code_instance_t *codeinst, size_t minworld, size_t *maxworld, arraylist_t *stack, htable_t *visiting) +static int jl_verify_method(jl_code_instance_t *codeinst, size_t *minworld, size_t *maxworld, arraylist_t *stack, htable_t *visiting) { + size_t world = jl_atomic_load_relaxed(&codeinst->min_world); size_t max_valid2 = jl_atomic_load_relaxed(&codeinst->max_world); if (max_valid2 != WORLD_AGE_REVALIDATION_SENTINEL) { + *minworld = world; *maxworld = max_valid2; return 0; } + *minworld = 1; + size_t current_world = jl_atomic_load_relaxed(&jl_world_counter); + *maxworld = current_world; assert(jl_is_method_instance(codeinst->def) && jl_is_method(codeinst->def->def.method)); void **bp = ptrhash_bp(visiting, codeinst); if (*bp != HT_NOTFOUND) @@ -862,6 +868,7 @@ static int jl_verify_method(jl_code_instance_t *codeinst, size_t minworld, size_ // verify current edges for (size_t j = 0; j < jl_svec_len(callees); ) { jl_value_t *edge = jl_svecref(callees, j); + size_t min_valid2; size_t max_valid2; assert(!jl_is_method(edge)); // `Method`-edge isn't allowed for the optimized one-edge format if (jl_is_code_instance(edge)) @@ -869,14 +876,14 @@ static int jl_verify_method(jl_code_instance_t *codeinst, size_t minworld, size_ if (jl_is_method_instance(edge)) { jl_method_instance_t *mi = (jl_method_instance_t*)edge; sig = jl_type_intersection(mi->def.method->sig, (jl_value_t*)mi->specTypes); // TODO: ?? - max_valid2 = verify_call(sig, callees, j, 1, minworld, &matches); + verify_call(sig, callees, j, 1, world, &min_valid2, &max_valid2, &matches); sig = NULL; j += 1; } else if (jl_is_long(edge)) { jl_value_t *sig = jl_svecref(callees, j + 1); size_t nedges = jl_unbox_long(edge); - max_valid2 = verify_call(sig, callees, j + 2, nedges, minworld, &matches); + verify_call(sig, callees, j + 2, nedges, world, &min_valid2, &max_valid2, &matches); j += 2 + nedges; edge = sig; } @@ -896,9 +903,11 @@ static int jl_verify_method(jl_code_instance_t *codeinst, size_t minworld, size_ assert(jl_is_method(callee)); meth = (jl_method_t*)callee; } - max_valid2 = verify_invokesig(edge, meth, minworld); + verify_invokesig(edge, meth, world, &min_valid2, &max_valid2); j += 2; } + if (*minworld < min_valid2) + *minworld = min_valid2; if (*maxworld > max_valid2) *maxworld = max_valid2; if (max_valid2 != ~(size_t)0 && _jl_debug_method_invalidation) { @@ -917,14 +926,19 @@ static int jl_verify_method(jl_code_instance_t *codeinst, size_t minworld, size_ // verify recursive edges (if valid, or debugging) size_t cycle = depth; jl_code_instance_t *cause = codeinst; - if (*maxworld == ~(size_t)0 || _jl_debug_method_invalidation) { + if (*maxworld != 0 || _jl_debug_method_invalidation) { for (size_t j = 0; j < jl_svec_len(callees); j++) { jl_value_t *edge = jl_svecref(callees, j); if (!jl_is_code_instance(edge)) continue; jl_code_instance_t *callee = (jl_code_instance_t*)edge; - size_t max_valid2 = ~(size_t)0; - size_t child_cycle = jl_verify_method(callee, minworld, &max_valid2, stack, visiting); + size_t min_valid2; + size_t max_valid2; + size_t child_cycle = jl_verify_method(callee, &min_valid2, &max_valid2, stack, visiting); + if (*minworld < min_valid2) + *minworld = min_valid2; + if (*minworld > max_valid2) + max_valid2 = 0; if (*maxworld > max_valid2) { cause = callee; *maxworld = max_valid2; @@ -947,12 +961,18 @@ static int jl_verify_method(jl_code_instance_t *codeinst, size_t minworld, size_ // cycle as also having a failed edge. while (stack->len >= depth) { jl_code_instance_t *child = (jl_code_instance_t*)arraylist_pop(stack); - if (*maxworld != jl_atomic_load_relaxed(&child->max_world)) - jl_atomic_store_relaxed(&child->max_world, *maxworld); + if (jl_atomic_load_relaxed(&jl_n_threads) == 1) { + // a different thread might simultaneously come to a different, but equally valid, alternative result + assert(jl_atomic_load_relaxed(&child->max_world) == WORLD_AGE_REVALIDATION_SENTINEL); + assert(*minworld <= jl_atomic_load_relaxed(&child->min_world)); + } + if (*maxworld != 0) + jl_atomic_store_relaxed(&child->min_world, *minworld); + jl_atomic_store_relaxed(&child->max_world, *maxworld); void **bp = ptrhash_bp(visiting, codeinst); assert(*bp == (char*)HT_NOTFOUND + stack->len + 1); *bp = HT_NOTFOUND; - if (_jl_debug_method_invalidation && *maxworld != ~(size_t)0) { + if (_jl_debug_method_invalidation && *maxworld < current_world) { jl_array_ptr_1d_push(_jl_debug_method_invalidation, (jl_value_t*)child); loctag = jl_cstr_to_string("verify_methods"); JL_GC_PUSH1(&loctag); @@ -966,26 +986,30 @@ static int jl_verify_method(jl_code_instance_t *codeinst, size_t minworld, size_ return 0; } -static size_t jl_verify_method_graph(jl_code_instance_t *codeinst, size_t minworld, arraylist_t *stack, htable_t *visiting) +static void jl_verify_method_graph(jl_code_instance_t *codeinst, arraylist_t *stack, htable_t *visiting) { + size_t minworld; + size_t maxworld; assert(stack->len == 0); for (size_t i = 0, hsz = visiting->size; i < hsz; i++) assert(visiting->table[i] == HT_NOTFOUND); - size_t maxworld = ~(size_t)0; - int child_cycle = jl_verify_method(codeinst, minworld, &maxworld, stack, visiting); + int child_cycle = jl_verify_method(codeinst, &minworld, &maxworld, stack, visiting); assert(child_cycle == 0); (void)child_cycle; assert(stack->len == 0); for (size_t i = 0, hsz = visiting->size / 2; i < hsz; i++) { assert(visiting->table[2 * i + 1] == HT_NOTFOUND); visiting->table[2 * i] = HT_NOTFOUND; } - return maxworld; + if (jl_atomic_load_relaxed(&jl_n_threads) == 1) { // a different thread might simultaneously come to a different, but equally valid, alternative result + assert(maxworld == 0 || jl_atomic_load_relaxed(&codeinst->min_world) == minworld); + assert(jl_atomic_load_relaxed(&codeinst->max_world) == maxworld); + } } // Restore backedges to external targets // `edges` = [caller1, ...], the list of worklist-owned code instances internally // `ext_ci_list` = [caller1, ...], the list of worklist-owned code instances externally -static void jl_insert_backedges(jl_array_t *edges, jl_array_t *ext_ci_list, size_t minworld) +static void jl_insert_backedges(jl_array_t *edges, jl_array_t *ext_ci_list) { // determine which CodeInstance objects are still valid in our image // to enable any applicable new codes @@ -1001,53 +1025,51 @@ static void jl_insert_backedges(jl_array_t *edges, jl_array_t *ext_ci_list, size jl_code_instance_t *codeinst = (jl_code_instance_t*)jl_array_ptr_ref(edges, i); jl_svec_t *callees = jl_atomic_load_relaxed(&codeinst->edges); jl_method_instance_t *caller = codeinst->def; - if (jl_atomic_load_relaxed(&codeinst->min_world) != minworld) { - if (external && jl_atomic_load_relaxed(&codeinst->max_world) != WORLD_AGE_REVALIDATION_SENTINEL) { - assert(jl_atomic_load_relaxed(&codeinst->min_world) == 1); - assert(jl_atomic_load_relaxed(&codeinst->max_world) == ~(size_t)0); - } - else { - continue; - } - } - size_t maxvalid = jl_verify_method_graph(codeinst, minworld, &stack, &visiting); - assert(jl_atomic_load_relaxed(&codeinst->max_world) == maxvalid); - if (maxvalid == ~(size_t)0) { - // if this callee is still valid, add all the backedges - for (size_t j = 0; j < jl_svec_len(callees); ) { - jl_value_t *edge = jl_svecref(callees, j); - if (jl_is_long(edge)) { - j += 2; // skip over signature and count but not methods - continue; - } - else if (jl_is_method(edge)) { - j += 1; - continue; - } - if (jl_is_code_instance(edge)) - edge = (jl_value_t*)((jl_code_instance_t*)edge)->def; - if (jl_is_method_instance(edge)) { - jl_method_instance_add_backedge((jl_method_instance_t*)edge, NULL, codeinst); - j += 1; - } - else if (jl_is_mtable(edge)) { - jl_methtable_t *mt = (jl_methtable_t*)edge; - jl_value_t *sig = jl_svecref(callees, j + 1); - jl_method_table_add_backedge(mt, sig, codeinst); - j += 2; - } - else { - jl_value_t *callee = jl_svecref(callees, j + 1); - if (jl_is_code_instance(callee)) - callee = (jl_value_t*)((jl_code_instance_t*)callee)->def; - else if (jl_is_method(callee)) { - j += 2; + jl_verify_method_graph(codeinst, &stack, &visiting); + size_t minvalid = jl_atomic_load_relaxed(&codeinst->min_world); + size_t maxvalid = jl_atomic_load_relaxed(&codeinst->max_world); + if (maxvalid >= minvalid) { + if (jl_atomic_load_relaxed(&jl_world_counter) == maxvalid) { + // if this callee is still valid, add all the backedges + for (size_t j = 0; j < jl_svec_len(callees); ) { + jl_value_t *edge = jl_svecref(callees, j); + if (jl_is_long(edge)) { + j += 2; // skip over signature and count but not methods continue; } - jl_method_instance_add_backedge((jl_method_instance_t*)callee, edge, codeinst); - j += 2; + else if (jl_is_method(edge)) { + j += 1; + continue; + } + if (jl_is_code_instance(edge)) + edge = (jl_value_t*)((jl_code_instance_t*)edge)->def; + if (jl_is_method_instance(edge)) { + jl_method_instance_add_backedge((jl_method_instance_t*)edge, NULL, codeinst); + j += 1; + } + else if (jl_is_mtable(edge)) { + jl_methtable_t *mt = (jl_methtable_t*)edge; + jl_value_t *sig = jl_svecref(callees, j + 1); + jl_method_table_add_backedge(mt, sig, codeinst); + j += 2; + } + else { + jl_value_t *callee = jl_svecref(callees, j + 1); + if (jl_is_code_instance(callee)) + callee = (jl_value_t*)((jl_code_instance_t*)callee)->def; + else if (jl_is_method(callee)) { + j += 2; + continue; + } + jl_method_instance_add_backedge((jl_method_instance_t*)callee, edge, codeinst); + j += 2; + } } } + if (jl_atomic_load_relaxed(&jl_world_counter) == maxvalid) { + maxvalid = ~(size_t)0; + jl_atomic_store_relaxed(&codeinst->max_world, maxvalid); + } if (external) { jl_value_t *owner = codeinst->owner; JL_GC_PROMISE_ROOTED(owner); @@ -1055,7 +1077,7 @@ static void jl_insert_backedges(jl_array_t *edges, jl_array_t *ext_ci_list, size // See #53586, #53109 assert(jl_atomic_load_relaxed(&codeinst->inferred)); - if (jl_rettype_inferred(owner, caller, minworld, maxvalid) != jl_nothing) { + if (jl_rettype_inferred(owner, caller, minvalid, maxvalid) != jl_nothing) { // We already got a code instance for this world age range from somewhere else - we don't need // this one. } From 9e14bf8fbffd1570d719dee1f23df9a1ac114ff7 Mon Sep 17 00:00:00 2001 From: Diogo Netto <61364108+d-netto@users.noreply.github.com> Date: Thu, 7 Nov 2024 06:07:26 -0300 Subject: [PATCH 389/537] count bytes allocated through malloc more precisely (#55223) Should make the accounting for memory allocated through malloc a bit more accurate. Should also simplify the accounting code by eliminating the use of `jl_gc_count_freed` in `jl_genericmemory_to_string`. --- src/gc-common.c | 91 ++++++++++------------------- src/gc-common.h | 8 +++ src/gc-stock.c | 122 ++++++++++++++++++++++++--------------- src/genericmemory.c | 5 +- src/julia_internal.h | 2 +- test/compiler/codegen.jl | 2 +- 6 files changed, 117 insertions(+), 113 deletions(-) diff --git a/src/gc-common.c b/src/gc-common.c index b552afb8228f0..c751b54f059f5 100644 --- a/src/gc-common.c +++ b/src/gc-common.c @@ -6,9 +6,6 @@ #include "julia_gcext.h" #include "julia_assert.h" #include "threading.h" -#ifdef __GLIBC__ -#include // for malloc_trim -#endif #ifdef __cplusplus extern "C" { @@ -120,6 +117,37 @@ JL_DLLEXPORT void jl_gc_set_cb_notify_gc_pressure(jl_gc_cb_notify_gc_pressure_t jl_gc_deregister_callback(&gc_cblist_notify_gc_pressure, (jl_gc_cb_func_t)cb); } +// =========================================================================== // +// malloc wrappers, aligned allocation +// =========================================================================== // + +#if defined(_OS_WINDOWS_) +// helper function based partly on wine msvcrt80+ heap.c +// but with several fixes to improve the correctness of the computation and remove unnecessary parameters +#define SAVED_PTR(x) ((void *)((DWORD_PTR)((char *)x - sizeof(void *)) & \ + ~(sizeof(void *) - 1))) +static size_t _aligned_msize(void *p) +{ + void *alloc_ptr = *(void**)SAVED_PTR(p); + return _msize(alloc_ptr) - ((char*)p - (char*)alloc_ptr); +} +#undef SAVED_PTR +#endif + +size_t memory_block_usable_size(void *p, int isaligned) JL_NOTSAFEPOINT +{ +#if defined(_OS_WINDOWS_) + if (isaligned) + return _aligned_msize(p); + else + return _msize(p); +#elif defined(_OS_DARWIN_) + return malloc_size(p); +#else + return malloc_usable_size(p); +#endif +} + // =========================================================================== // // Finalization // =========================================================================== // @@ -505,63 +533,6 @@ JL_DLLEXPORT jl_value_t *jl_gc_allocobj(size_t sz) return jl_gc_alloc(ptls, sz, NULL); } -// allocation wrappers that save the size of allocations, to allow using -// jl_gc_counted_* functions with a libc-compatible API. - -JL_DLLEXPORT void *jl_malloc(size_t sz) -{ - int64_t *p = (int64_t *)jl_gc_counted_malloc(sz + JL_SMALL_BYTE_ALIGNMENT); - if (p == NULL) - return NULL; - p[0] = sz; - return (void *)(p + 2); // assumes JL_SMALL_BYTE_ALIGNMENT == 16 -} - -//_unchecked_calloc does not check for potential overflow of nm*sz -STATIC_INLINE void *_unchecked_calloc(size_t nm, size_t sz) { - size_t nmsz = nm*sz; - int64_t *p = (int64_t *)jl_gc_counted_calloc(nmsz + JL_SMALL_BYTE_ALIGNMENT, 1); - if (p == NULL) - return NULL; - p[0] = nmsz; - return (void *)(p + 2); // assumes JL_SMALL_BYTE_ALIGNMENT == 16 -} - -JL_DLLEXPORT void *jl_calloc(size_t nm, size_t sz) -{ - if (nm > SSIZE_MAX/sz - JL_SMALL_BYTE_ALIGNMENT) - return NULL; - return _unchecked_calloc(nm, sz); -} - -JL_DLLEXPORT void jl_free(void *p) -{ - if (p != NULL) { - int64_t *pp = (int64_t *)p - 2; - size_t sz = pp[0]; - jl_gc_counted_free_with_size(pp, sz + JL_SMALL_BYTE_ALIGNMENT); - } -} - -JL_DLLEXPORT void *jl_realloc(void *p, size_t sz) -{ - int64_t *pp; - size_t szold; - if (p == NULL) { - pp = NULL; - szold = 0; - } - else { - pp = (int64_t *)p - 2; - szold = pp[0] + JL_SMALL_BYTE_ALIGNMENT; - } - int64_t *pnew = (int64_t *)jl_gc_counted_realloc_with_old_size(pp, szold, sz + JL_SMALL_BYTE_ALIGNMENT); - if (pnew == NULL) - return NULL; - pnew[0] = sz; - return (void *)(pnew + 2); // assumes JL_SMALL_BYTE_ALIGNMENT == 16 -} - // allocator entry points JL_DLLEXPORT jl_value_t *(jl_gc_alloc)(jl_ptls_t ptls, size_t sz, void *ty) diff --git a/src/gc-common.h b/src/gc-common.h index 32b7470b13a58..3007151009f7d 100644 --- a/src/gc-common.h +++ b/src/gc-common.h @@ -12,6 +12,14 @@ #endif #endif +#include + +#if defined(_OS_DARWIN_) +#include +#else +#include // for malloc_trim +#endif + #ifdef __cplusplus extern "C" { #endif diff --git a/src/gc-stock.c b/src/gc-stock.c index 3a2027f9190a7..86dbea3b9a17a 100644 --- a/src/gc-stock.c +++ b/src/gc-stock.c @@ -9,9 +9,6 @@ #include "julia_atomics.h" #include "julia_gcext.h" #include "julia_assert.h" -#ifdef __GLIBC__ -#include // for malloc_trim -#endif #ifdef __cplusplus extern "C" { @@ -569,11 +566,6 @@ void jl_gc_count_allocd(size_t sz) JL_NOTSAFEPOINT jl_batch_accum_heap_size(ptls, sz); } -void jl_gc_count_freed(size_t sz) JL_NOTSAFEPOINT -{ - jl_batch_accum_free_size(jl_current_task->ptls, sz); -} - // Only safe to update the heap inside the GC static void combine_thread_gc_counts(jl_gc_num_t *dest, int update_heap) JL_NOTSAFEPOINT { @@ -643,13 +635,15 @@ static void jl_gc_free_memory(jl_value_t *v, int isaligned) JL_NOTSAFEPOINT jl_genericmemory_t *m = (jl_genericmemory_t*)v; assert(jl_genericmemory_how(m) == 1 || jl_genericmemory_how(m) == 2); char *d = (char*)m->ptr; + size_t freed_bytes = memory_block_usable_size(d, isaligned); + assert(freed_bytes != 0); if (isaligned) jl_free_aligned(d); else free(d); jl_atomic_store_relaxed(&gc_heap_stats.heap_size, - jl_atomic_load_relaxed(&gc_heap_stats.heap_size) - jl_genericmemory_nbytes(m)); - gc_num.freed += jl_genericmemory_nbytes(m); + jl_atomic_load_relaxed(&gc_heap_stats.heap_size) - freed_bytes); + gc_num.freed += freed_bytes; gc_num.freecall++; } @@ -3652,14 +3646,69 @@ JL_DLLEXPORT uint64_t jl_gc_get_max_memory(void) return max_total_memory; } -// allocation wrappers that track allocation and let collection run +// allocation wrappers that add to gc pressure + +JL_DLLEXPORT void *jl_malloc(size_t sz) +{ + return jl_gc_counted_malloc(sz); +} + +//_unchecked_calloc does not check for potential overflow of nm*sz +STATIC_INLINE void *_unchecked_calloc(size_t nm, size_t sz) { + size_t nmsz = nm*sz; + return jl_gc_counted_calloc(nmsz, 1); +} + +JL_DLLEXPORT void *jl_calloc(size_t nm, size_t sz) +{ + if (nm > SSIZE_MAX/sz) + return NULL; + return _unchecked_calloc(nm, sz); +} + +JL_DLLEXPORT void jl_free(void *p) +{ + if (p != NULL) { + size_t sz = memory_block_usable_size(p, 0); + free(p); + jl_task_t *ct = jl_get_current_task(); + if (ct != NULL) + jl_batch_accum_free_size(ct->ptls, sz); + } +} + +JL_DLLEXPORT void *jl_realloc(void *p, size_t sz) +{ + size_t old = p ? memory_block_usable_size(p, 0) : 0; + void *data = realloc(p, sz); + jl_task_t *ct = jl_get_current_task(); + if (data != NULL && ct != NULL) { + sz = memory_block_usable_size(data, 0); + jl_ptls_t ptls = ct->ptls; + maybe_collect(ptls); + if (!(sz < old)) + jl_atomic_store_relaxed(&ptls->gc_tls_common.gc_num.allocd, + jl_atomic_load_relaxed(&ptls->gc_tls_common.gc_num.allocd) + (sz - old)); + jl_atomic_store_relaxed(&ptls->gc_tls_common.gc_num.realloc, + jl_atomic_load_relaxed(&ptls->gc_tls_common.gc_num.realloc) + 1); + + int64_t diff = sz - old; + if (diff < 0) { + jl_batch_accum_free_size(ptls, -diff); + } + else { + jl_batch_accum_heap_size(ptls, diff); + } + } + return data; +} JL_DLLEXPORT void *jl_gc_counted_malloc(size_t sz) { - jl_gcframe_t **pgcstack = jl_get_pgcstack(); - jl_task_t *ct = jl_current_task; void *data = malloc(sz); - if (data != NULL && pgcstack != NULL && ct->world_age) { + jl_task_t *ct = jl_get_current_task(); + if (data != NULL && ct != NULL) { + sz = memory_block_usable_size(data, 0); jl_ptls_t ptls = ct->ptls; maybe_collect(ptls); jl_atomic_store_relaxed(&ptls->gc_tls_common.gc_num.allocd, @@ -3673,54 +3722,29 @@ JL_DLLEXPORT void *jl_gc_counted_malloc(size_t sz) JL_DLLEXPORT void *jl_gc_counted_calloc(size_t nm, size_t sz) { - jl_gcframe_t **pgcstack = jl_get_pgcstack(); - jl_task_t *ct = jl_current_task; void *data = calloc(nm, sz); - if (data != NULL && pgcstack != NULL && ct->world_age) { + jl_task_t *ct = jl_get_current_task(); + if (data != NULL && ct != NULL) { + sz = memory_block_usable_size(data, 0); jl_ptls_t ptls = ct->ptls; maybe_collect(ptls); jl_atomic_store_relaxed(&ptls->gc_tls_common.gc_num.allocd, - jl_atomic_load_relaxed(&ptls->gc_tls_common.gc_num.allocd) + nm*sz); + jl_atomic_load_relaxed(&ptls->gc_tls_common.gc_num.allocd) + sz); jl_atomic_store_relaxed(&ptls->gc_tls_common.gc_num.malloc, jl_atomic_load_relaxed(&ptls->gc_tls_common.gc_num.malloc) + 1); - jl_batch_accum_heap_size(ptls, sz * nm); + jl_batch_accum_heap_size(ptls, sz); } return data; } JL_DLLEXPORT void jl_gc_counted_free_with_size(void *p, size_t sz) { - jl_gcframe_t **pgcstack = jl_get_pgcstack(); - jl_task_t *ct = jl_current_task; - free(p); - if (pgcstack != NULL && ct->world_age) { - jl_batch_accum_free_size(ct->ptls, sz); - } + return jl_free(p); } JL_DLLEXPORT void *jl_gc_counted_realloc_with_old_size(void *p, size_t old, size_t sz) { - jl_gcframe_t **pgcstack = jl_get_pgcstack(); - jl_task_t *ct = jl_current_task; - void *data = realloc(p, sz); - if (data != NULL && pgcstack != NULL && ct->world_age) { - jl_ptls_t ptls = ct->ptls; - maybe_collect(ptls); - if (!(sz < old)) - jl_atomic_store_relaxed(&ptls->gc_tls_common.gc_num.allocd, - jl_atomic_load_relaxed(&ptls->gc_tls_common.gc_num.allocd) + (sz - old)); - jl_atomic_store_relaxed(&ptls->gc_tls_common.gc_num.realloc, - jl_atomic_load_relaxed(&ptls->gc_tls_common.gc_num.realloc) + 1); - - int64_t diff = sz - old; - if (diff < 0) { - jl_batch_accum_free_size(ptls, -diff); - } - else { - jl_batch_accum_heap_size(ptls, diff); - } - } - return data; + return jl_realloc(p, sz); } // allocating blocks for Arrays and Strings @@ -3741,11 +3765,13 @@ JL_DLLEXPORT void *jl_gc_managed_malloc(size_t sz) if (b == NULL) jl_throw(jl_memory_exception); + size_t allocated_bytes = memory_block_usable_size(b, 1); + assert(allocated_bytes >= allocsz); jl_atomic_store_relaxed(&ptls->gc_tls_common.gc_num.allocd, - jl_atomic_load_relaxed(&ptls->gc_tls_common.gc_num.allocd) + allocsz); + jl_atomic_load_relaxed(&ptls->gc_tls_common.gc_num.allocd) + allocated_bytes); jl_atomic_store_relaxed(&ptls->gc_tls_common.gc_num.malloc, jl_atomic_load_relaxed(&ptls->gc_tls_common.gc_num.malloc) + 1); - jl_batch_accum_heap_size(ptls, allocsz); + jl_batch_accum_heap_size(ptls, allocated_bytes); #ifdef _OS_WINDOWS_ SetLastError(last_error); #endif diff --git a/src/genericmemory.c b/src/genericmemory.c index 0bf2cf46edaae..2b02f021ccdd0 100644 --- a/src/genericmemory.c +++ b/src/genericmemory.c @@ -165,7 +165,8 @@ JL_DLLEXPORT jl_genericmemory_t *jl_ptr_to_genericmemory(jl_value_t *mtype, void if (own_buffer) { int isaligned = 0; // TODO: allow passing memalign'd buffers jl_gc_track_malloced_genericmemory(ct->ptls, m, isaligned); - jl_gc_count_allocd(nel*elsz); + size_t allocated_bytes = memory_block_usable_size(data, isaligned); + jl_gc_count_allocd(allocated_bytes); } return m; } @@ -208,8 +209,6 @@ JL_DLLEXPORT jl_value_t *jl_genericmemory_to_string(jl_genericmemory_t *m, size_ JL_GC_PUSH1(&o); jl_value_t *str = jl_pchar_to_string((const char*)m->ptr, len); JL_GC_POP(); - if (how == 1) // TODO: we might like to early-call jl_gc_free_memory here instead actually, but hopefully `m` will die soon - jl_gc_count_freed(mlength); return str; } // n.b. how == 0 is always pool-allocated, so the freed bytes are computed from the pool not the object diff --git a/src/julia_internal.h b/src/julia_internal.h index c5bac37890042..5eb99be9e333f 100644 --- a/src/julia_internal.h +++ b/src/julia_internal.h @@ -615,8 +615,8 @@ jl_svec_t *jl_perm_symsvec(size_t n, ...); void jl_gc_track_malloced_genericmemory(jl_ptls_t ptls, jl_genericmemory_t *m, int isaligned) JL_NOTSAFEPOINT; size_t jl_genericmemory_nbytes(jl_genericmemory_t *a) JL_NOTSAFEPOINT; +size_t memory_block_usable_size(void *mem, int isaligned) JL_NOTSAFEPOINT; void jl_gc_count_allocd(size_t sz) JL_NOTSAFEPOINT; -void jl_gc_count_freed(size_t sz) JL_NOTSAFEPOINT; void jl_gc_run_all_finalizers(jl_task_t *ct); void jl_release_task_stack(jl_ptls_t ptls, jl_task_t *task); void jl_gc_add_finalizer_(jl_ptls_t ptls, void *v, void *f) JL_NOTSAFEPOINT; diff --git a/test/compiler/codegen.jl b/test/compiler/codegen.jl index fcb3beb87b5a5..83f4001e616e7 100644 --- a/test/compiler/codegen.jl +++ b/test/compiler/codegen.jl @@ -407,7 +407,7 @@ function g_dict_hash_alloc() end # Warm up f_dict_hash_alloc(); g_dict_hash_alloc(); -@test (@allocated f_dict_hash_alloc()) == (@allocated g_dict_hash_alloc()) +@test abs((@allocated f_dict_hash_alloc()) / (@allocated g_dict_hash_alloc()) - 1) < 0.1 # less that 10% difference # returning an argument shouldn't alloc a new box @noinline f33829(x) = (global called33829 = true; x) From 5848445ea6ab572253768812d999e132d4f85638 Mon Sep 17 00:00:00 2001 From: Gabriel Baraldi Date: Thu, 7 Nov 2024 11:16:43 -0300 Subject: [PATCH 390/537] Fix external IO loop thead interaction and add function to Base.Experimental to facilitate it's use. Also add a test. (#55529) While looking at https://github.com/JuliaLang/julia/issues/55525 I found that the implementation wasn't working correctly. I added it to Base.Experimental so people don't need to handroll their own and am also testing a version of what the issue was hitting. --- base/experimental.jl | 23 ++++++++++++++++++++++ base/task.jl | 5 +++++ src/scheduler.c | 3 ++- src/threading.c | 24 +++++++++++++++++++++++ test/threads.jl | 46 ++++++++++++++++++++++++++++++++++++++++++++ 5 files changed, 100 insertions(+), 1 deletion(-) diff --git a/base/experimental.jl b/base/experimental.jl index 982ed5e78aa8c..31238d4015b3b 100644 --- a/base/experimental.jl +++ b/base/experimental.jl @@ -457,6 +457,29 @@ without adding them to the global method table. """ :@MethodTable +""" + Base.Experimental.make_io_thread() + +Create a new thread that will run the Julia IO loop. This can potentially reduce the latency of some +IO operations as they no longer depend on the main thread to run it. This does mean that code that uses +this as implicit synchronization needs to be checked for correctness. +""" +function make_io_thread() + tid = UInt[0] + threadwork = @cfunction function(arg::Ptr{Cvoid}) + current_task().donenotify = Base.ThreadSynchronizer() #TODO: Should this happen by default in adopt thread? + Base.errormonitor(current_task()) # this may not go particularly well if the IO loop is dead, but try anyways + @ccall jl_set_io_loop_tid((Threads.threadid() - 1)::Int16)::Cvoid + wait() # spin uv_run as long as needed + nothing + end Cvoid (Ptr{Cvoid},) + err = @ccall uv_thread_create(tid::Ptr{UInt}, threadwork::Ptr{Cvoid}, C_NULL::Ptr{Cvoid})::Cint + err == 0 || Base.uv_error("uv_thread_create", err) + @ccall uv_thread_detach(tid::Ptr{UInt})::Cint + err == 0 || Base.uv_error("uv_thread_detach", err) + # n.b. this does not wait for the thread to start or to take ownership of the event loop +end + """ Base.Experimental.entrypoint(f, argtypes::Tuple) diff --git a/base/task.jl b/base/task.jl index f3a134f374421..2a922c4b85f24 100644 --- a/base/task.jl +++ b/base/task.jl @@ -849,6 +849,11 @@ function task_done_hook(t::Task) end end +function init_task_lock(t::Task) # Function only called from jl_adopt_thread so foreign tasks have a lock. + if t.donenotify === nothing + t.donenotify = ThreadSynchronizer() + end +end ## scheduler and work queue diff --git a/src/scheduler.c b/src/scheduler.c index fff891d91a813..731a0c5146605 100644 --- a/src/scheduler.c +++ b/src/scheduler.c @@ -437,7 +437,8 @@ JL_DLLEXPORT jl_task_t *jl_task_get_next(jl_value_t *trypoptask, jl_value_t *q, // responsibility, so need to make sure thread 0 will take care // of us. if (jl_atomic_load_relaxed(&jl_uv_mutex.owner) == NULL) // aka trylock - wakeup_thread(ct, 0); + jl_wakeup_thread(jl_atomic_load_relaxed(&io_loop_tid)); + } if (uvlock) { int enter_eventloop = may_sleep(ptls); diff --git a/src/threading.c b/src/threading.c index 50944a24eb29b..42174830d9b43 100644 --- a/src/threading.c +++ b/src/threading.c @@ -401,6 +401,28 @@ jl_ptls_t jl_init_threadtls(int16_t tid) return ptls; } +static _Atomic(jl_function_t*) init_task_lock_func JL_GLOBALLY_ROOTED = NULL; + +static void jl_init_task_lock(jl_task_t *ct) +{ + jl_function_t *done = jl_atomic_load_relaxed(&init_task_lock_func); + if (done == NULL) { + done = (jl_function_t*)jl_get_global(jl_base_module, jl_symbol("init_task_lock")); + if (done != NULL) + jl_atomic_store_release(&init_task_lock_func, done); + } + if (done != NULL) { + jl_value_t *args[2] = {done, (jl_value_t*)ct}; + JL_TRY { + jl_apply(args, 2); + } + JL_CATCH { + jl_no_exc_handler(jl_current_exception(ct), ct); + } + } +} + + JL_DLLEXPORT jl_gcframe_t **jl_adopt_thread(void) { // `jl_init_threadtls` puts us in a GC unsafe region, so ensure GC isn't running. @@ -423,6 +445,8 @@ JL_DLLEXPORT jl_gcframe_t **jl_adopt_thread(void) JL_GC_PROMISE_ROOTED(ct); uv_random(NULL, NULL, &ct->rngState, sizeof(ct->rngState), 0, NULL); jl_atomic_fetch_add(&jl_gc_disable_counter, -1); + ct->world_age = jl_get_world_counter(); // root_task sets world_age to 1 + jl_init_task_lock(ct); return &ct->gcstack; } diff --git a/test/threads.jl b/test/threads.jl index d5a801c1a6a1c..4d928ca05da16 100644 --- a/test/threads.jl +++ b/test/threads.jl @@ -360,6 +360,52 @@ end end end +@testset "io_thread" begin + function io_thread_test() + # This test creates a thread that does IO and then blocks the main julia thread + # This test hangs if you don't spawn an IO thread. + # It hanging or not is technically a race but I haven't seen julia win that race yet. + cmd = """ + Base.Experimental.make_io_thread() + function callback()::Cvoid + println("Running a command") + run(`echo 42`) + return + end + function call_on_thread(callback::Ptr{Nothing}) + tid = UInt[0] + threadwork = @cfunction function(arg::Ptr{Cvoid}) + current_task().donenotify = Base.ThreadSynchronizer() + Base.errormonitor(current_task()) + println("Calling Julia from thread") + ccall(arg, Cvoid, ()) + nothing + end Cvoid (Ptr{Cvoid},) + err = @ccall uv_thread_create(tid::Ptr{UInt}, threadwork::Ptr{Cvoid}, callback::Ptr{Cvoid})::Cint + err == 0 || Base.uv_error("uv_thread_create", err) + gc_state = @ccall jl_gc_safe_enter()::Int8 + err = @ccall uv_thread_join(tid::Ptr{UInt})::Cint + @ccall jl_gc_safe_leave(gc_state::Int8)::Cvoid + err == 0 || Base.uv_error("uv_thread_join", err) + return + end + function main() + callback_ptr = @cfunction(callback, Cvoid, ()) + call_on_thread(callback_ptr) + println("Done") + end + main() + + """ + proc = run(pipeline(`$(Base.julia_cmd()) -e $cmd`), wait=false) + t = Timer(60) do t; kill(proc); end; + @test success(proc) + close(t) + return true + end + @test io_thread_test() +end + # Make sure default number of BLAS threads respects CPU affinity: issue #55572. @testset "LinearAlgebra number of default threads" begin if AFFINITY_SUPPORTED From 910bf7e26bd38e1d4f4c83a02d4d551ec7e07299 Mon Sep 17 00:00:00 2001 From: Eric Hanson <5846501+ericphanson@users.noreply.github.com> Date: Thu, 7 Nov 2024 17:38:11 +0100 Subject: [PATCH 391/537] [REPL] raise default implicit `show` limit to 1MiB (#56297) https://github.com/JuliaLang/julia/pull/53959#issuecomment-2426946640 I would like to understand more where these issues are coming from; it would be easy to exempt some types from Base or Core with ```julia REPL.show_limited(io::IO, mime::MIME, x::SomeType) = show(io, mime, x) ``` but I'm not sure which are causing problems in practice. But meanwhile I think raising the limit makes sense. --- stdlib/REPL/src/REPL.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stdlib/REPL/src/REPL.jl b/stdlib/REPL/src/REPL.jl index ac791327e2d75..e3a58ec362d89 100644 --- a/stdlib/REPL/src/REPL.jl +++ b/stdlib/REPL/src/REPL.jl @@ -484,7 +484,7 @@ function repl_backend_loop(backend::REPLBackend, get_module::Function) return nothing end -SHOW_MAXIMUM_BYTES::Int = 20480 +SHOW_MAXIMUM_BYTES::Int = 1_048_576 # Limit printing during REPL display mutable struct LimitIO{IO_t <: IO} <: IO From 664479622d597606534db520a7ee0142c8c526c8 Mon Sep 17 00:00:00 2001 From: Tianyi Pu <44583944+putianyi889@users.noreply.github.com> Date: Thu, 7 Nov 2024 17:29:06 +0000 Subject: [PATCH 392/537] Add a docstring for `Base.divgcd` (#53769) Co-authored-by: Sukera <11753998+Seelengrab@users.noreply.github.com> --- base/rational.jl | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/base/rational.jl b/base/rational.jl index b4e450fd73abc..69d39770b2095 100644 --- a/base/rational.jl +++ b/base/rational.jl @@ -49,6 +49,13 @@ Rational(n::T, d::T) where {T<:Integer} = Rational{T}(n, d) Rational(n::Integer, d::Integer) = Rational(promote(n, d)...) Rational(n::Integer) = unsafe_rational(n, one(n)) +""" + divgcd(x::Integer, y::Integer) + +Returns `(x÷gcd(x,y), y÷gcd(x,y))`. + +See also [`div`](@ref), [`gcd`](@ref). +""" function divgcd(x::TX, y::TY)::Tuple{TX, TY} where {TX<:Integer, TY<:Integer} g = gcd(uabs(x), uabs(y)) div(x,g), div(y,g) From 8593792f8f5212d5513fe0829253664c3ceedd2a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mos=C3=A8=20Giordano?= <765740+giordano@users.noreply.github.com> Date: Thu, 7 Nov 2024 19:11:48 +0000 Subject: [PATCH 393/537] Fix compilation warning on aarch64-linux (#56480) This fixes the warning: ``` /cache/build/default-aws-aarch64-ci-1-3/julialang/julia-master/src/stackwalk.c: In function 'jl_simulate_longjmp': /cache/build/default-aws-aarch64-ci-1-3/julialang/julia-master/src/stackwalk.c:995:22: warning: initialization of 'mcontext_t *' {aka 'struct sigcontext *'} from incompatible pointer type 'struct unw_sigcontext *' [-Wincompatible-pointer-types] 995 | mcontext_t *mc = &c->uc_mcontext; | ^ ``` This is the last remaining warning during compilation on aarch64-linux. --- src/stackwalk.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/stackwalk.c b/src/stackwalk.c index 251e408c7fd2d..d6fca2c909f23 100644 --- a/src/stackwalk.c +++ b/src/stackwalk.c @@ -992,7 +992,13 @@ int jl_simulate_longjmp(jl_jmp_buf mctx, bt_context_t *c) JL_NOTSAFEPOINT #endif #elif defined(_OS_LINUX_) && defined(__GLIBC__) __jmp_buf *_ctx = &mctx->__jmpbuf; + #if defined(_CPU_AARCH64_) + // Only on aarch64-linux libunwind uses a different struct than system's one: + // . + struct unw_sigcontext *mc = &c->uc_mcontext; + #else mcontext_t *mc = &c->uc_mcontext; + #endif #if defined(_CPU_X86_) // https://github.com/bminor/glibc/blame/master/sysdeps/i386/__longjmp.S // https://github.com/bminor/glibc/blame/master/sysdeps/i386/jmpbuf-offsets.h From cd7250da83837d3e72f8b82aff73418b01bd425a Mon Sep 17 00:00:00 2001 From: Keno Fischer Date: Thu, 31 Oct 2024 23:56:04 +0000 Subject: [PATCH 394/537] Make Compiler an independent package This is a further extension to #56128 to make the compiler into a proper independent, useable outside of `Base` as `using Compiler` in the same way that `JuliaSyntax` works already. InteractiveUtils gains a new `@activate` macro that can be used to activate an outside Compiler package, either for reflection only or for codegen also. --- Compiler/Project.toml | 3 + Compiler/src/Compiler.jl | 194 ++++++++++++++++++ .../src}/abstractinterpretation.jl | 8 +- .../src}/abstractlattice.jl | 0 Compiler/src/bootstrap.jl | 66 ++++++ {base/compiler => Compiler/src}/cicache.jl | 0 {base/compiler => Compiler/src}/effects.jl | 0 .../src}/inferenceresult.jl | 0 .../src}/inferencestate.jl | 0 .../compiler => Compiler/src}/methodtable.jl | 0 {base/compiler => Compiler/src}/optimize.jl | 16 +- Compiler/src/reflection_interface.jl | 58 ++++++ {base/compiler => Compiler/src}/sort.jl | 0 .../ssair/EscapeAnalysis/EscapeAnalysis.jl | 13 +- .../src}/ssair/EscapeAnalysis/disjoint_set.jl | 0 .../src}/ssair/basicblock.jl | 0 .../src}/ssair/domtree.jl | 0 {base/compiler => Compiler/src}/ssair/heap.jl | 0 .../src}/ssair/inlining.jl | 0 {base/compiler => Compiler/src}/ssair/ir.jl | 2 - .../src}/ssair/irinterp.jl | 0 .../compiler => Compiler/src}/ssair/legacy.jl | 0 .../compiler => Compiler/src}/ssair/passes.jl | 0 {base/compiler => Compiler/src}/ssair/show.jl | 128 ++++++------ .../src}/ssair/slot2ssa.jl | 0 .../compiler => Compiler/src}/ssair/tarjan.jl | 0 .../compiler => Compiler/src}/ssair/verify.jl | 0 {base/compiler => Compiler/src}/stmtinfo.jl | 0 {base/compiler => Compiler/src}/tfuncs.jl | 4 +- {base/compiler => Compiler/src}/typeinfer.jl | 2 +- .../compiler => Compiler/src}/typelattice.jl | 80 ++------ {base/compiler => Compiler/src}/typelimits.jl | 2 +- {base/compiler => Compiler/src}/types.jl | 0 {base/compiler => Compiler/src}/typeutils.jl | 0 {base/compiler => Compiler/src}/utilities.jl | 6 - {base/compiler => Compiler/src}/validation.jl | 0 Makefile | 11 +- base/Base.jl | 94 ++------- base/Base_compiler.jl | 50 ++++- base/boot.jl | 1 - base/compiler/bootstrap.jl | 52 ----- base/compiler/compiler.jl | 123 ----------- base/compilerimg.jl | 4 - base/coreir.jl | 54 +++++ base/essentials.jl | 3 +- base/{compiler/parsing.jl => flparse.jl} | 0 base/loading.jl | 129 +++++++----- base/opaque_closure.jl | 11 +- base/reflection.jl | 193 +++++++++-------- base/runtime_internals.jl | 8 +- base/show.jl | 105 +++++----- base/stacktraces.jl | 2 +- base/sysimg.jl | 12 +- contrib/generate_precompile.jl | 1 + doc/src/devdocs/EscapeAnalysis.md | 12 +- src/module.c | 12 +- src/precompile_utils.c | 58 +++--- src/toplevel.c | 15 ++ .../InteractiveUtils/src/InteractiveUtils.jl | 3 +- stdlib/InteractiveUtils/src/codeview.jl | 4 +- stdlib/InteractiveUtils/src/macros.jl | 61 ++++++ stdlib/Makefile | 2 - stdlib/REPL/src/REPLCompletions.jl | 3 + sysimage.mk | 5 +- test/compiler/inference.jl | 63 +++--- 65 files changed, 966 insertions(+), 707 deletions(-) create mode 100644 Compiler/Project.toml create mode 100644 Compiler/src/Compiler.jl rename {base/compiler => Compiler/src}/abstractinterpretation.jl (99%) rename {base/compiler => Compiler/src}/abstractlattice.jl (100%) create mode 100644 Compiler/src/bootstrap.jl rename {base/compiler => Compiler/src}/cicache.jl (100%) rename {base/compiler => Compiler/src}/effects.jl (100%) rename {base/compiler => Compiler/src}/inferenceresult.jl (100%) rename {base/compiler => Compiler/src}/inferencestate.jl (100%) rename {base/compiler => Compiler/src}/methodtable.jl (100%) rename {base/compiler => Compiler/src}/optimize.jl (99%) create mode 100644 Compiler/src/reflection_interface.jl rename {base/compiler => Compiler/src}/sort.jl (100%) rename {base/compiler => Compiler/src}/ssair/EscapeAnalysis/EscapeAnalysis.jl (99%) rename {base/compiler => Compiler/src}/ssair/EscapeAnalysis/disjoint_set.jl (100%) rename {base/compiler => Compiler/src}/ssair/basicblock.jl (100%) rename {base/compiler => Compiler/src}/ssair/domtree.jl (100%) rename {base/compiler => Compiler/src}/ssair/heap.jl (100%) rename {base/compiler => Compiler/src}/ssair/inlining.jl (100%) rename {base/compiler => Compiler/src}/ssair/ir.jl (99%) rename {base/compiler => Compiler/src}/ssair/irinterp.jl (100%) rename {base/compiler => Compiler/src}/ssair/legacy.jl (100%) rename {base/compiler => Compiler/src}/ssair/passes.jl (100%) rename {base/compiler => Compiler/src}/ssair/show.jl (95%) rename {base/compiler => Compiler/src}/ssair/slot2ssa.jl (100%) rename {base/compiler => Compiler/src}/ssair/tarjan.jl (100%) rename {base/compiler => Compiler/src}/ssair/verify.jl (100%) rename {base/compiler => Compiler/src}/stmtinfo.jl (100%) rename {base/compiler => Compiler/src}/tfuncs.jl (99%) rename {base/compiler => Compiler/src}/typeinfer.jl (99%) rename {base/compiler => Compiler/src}/typelattice.jl (92%) rename {base/compiler => Compiler/src}/typelimits.jl (99%) rename {base/compiler => Compiler/src}/types.jl (100%) rename {base/compiler => Compiler/src}/typeutils.jl (100%) rename {base/compiler => Compiler/src}/utilities.jl (99%) rename {base/compiler => Compiler/src}/validation.jl (100%) delete mode 100644 base/compiler/bootstrap.jl delete mode 100644 base/compiler/compiler.jl delete mode 100644 base/compilerimg.jl create mode 100644 base/coreir.jl rename base/{compiler/parsing.jl => flparse.jl} (100%) diff --git a/Compiler/Project.toml b/Compiler/Project.toml new file mode 100644 index 0000000000000..b933d08db5205 --- /dev/null +++ b/Compiler/Project.toml @@ -0,0 +1,3 @@ +name = "Compiler" +uuid = "807dbc54-b67e-4c79-8afb-eafe4df6f2e1" +version = "0.0.1" diff --git a/Compiler/src/Compiler.jl b/Compiler/src/Compiler.jl new file mode 100644 index 0000000000000..c2c074dc92bbc --- /dev/null +++ b/Compiler/src/Compiler.jl @@ -0,0 +1,194 @@ +# This file is a part of Julia. License is MIT: https://julialang.org/license + +# When generating an incremental precompile file, we first check whether we +# already have a copy of this *exact* code in the system image. If so, we +# simply generates a pkgimage that has the dependency edges we recorded in +# the system image and simply returns that copy of the compiler. If not, +# we proceed to load/precompile this as an ordinary package. +if isdefined(Base, :generating_output) && Base.generating_output(true) && + Base.samefile(Base._compiler_require_dependencies[1][2], @eval @__FILE__) && + !Base.any_includes_stale( + map(Base.CacheHeaderIncludes, Base._compiler_require_dependencies), + "sysimg", nothing) + + Base.prepare_compiler_stub_image!() + append!(Base._require_dependencies, Base._compiler_require_dependencies) + # There isn't much point in precompiling native code - downstream users will + # specialize their own versions of the compiler code and we don't activate + # the compiler by default anyway, so let's save ourselves some disk space. + ccall(:jl_suppress_precompile, Cvoid, (Cint,), 1) + +else + +@eval baremodule Compiler + +# Needs to match UUID defined in Project.toml +ccall(:jl_set_module_uuid, Cvoid, (Any, NTuple{2, UInt64}), Compiler, + (0x807dbc54_b67e_4c79, 0x8afb_eafe4df6f2e1)) + +using Core.Intrinsics, Core.IR + +import Core: print, println, show, write, unsafe_write, + _apply_iterate, svec, apply_type, Builtin, IntrinsicFunction, + MethodInstance, CodeInstance, MethodTable, MethodMatch, PartialOpaque, + TypeofVararg, Core, SimpleVector, donotdelete, compilerbarrier, + memoryref_isassigned, memoryrefnew, memoryrefoffset, memoryrefget, + memoryrefset!, typename + +using Base +using Base: Ordering, vect, EffectsOverride, BitVector, @_gc_preserve_begin, @_gc_preserve_end, RefValue, + @nospecializeinfer, @_foldable_meta, fieldindex, is_function_def, indexed_iterate, isexpr, methods, + get_world_counter, JLOptions, _methods_by_ftype, unwrap_unionall, cconvert, unsafe_convert, + issingletontype, isType, rewrap_unionall, has_free_typevars, isvarargtype, hasgenerator, + IteratorSize, SizeUnknown, _array_for, Bottom, generating_output, diff_names, + ismutationfree, NUM_EFFECTS_OVERRIDES, _NAMEDTUPLE_NAME, datatype_fieldtypes, + argument_datatype, isfieldatomic, unwrapva, iskindtype, _bits_findnext, copy_exprargs, + Generator, Filter, ismutabletypename, isvatuple, datatype_fieldcount, + isconcretedispatch, isdispatchelem, datatype_layoutsize, + datatype_arrayelem, unionlen, isidentityfree, _uniontypes, uniontypes, OneTo, Callable, + DataTypeFieldDesc, datatype_nfields, datatype_pointerfree, midpoint, is_valid_intrinsic_elptr, + allocatedinline, isbitsunion, widen_diagonal, unconstrain_vararg_length, + rename_unionall, may_invoke_generator, is_meta_expr_head, is_meta_expr, quoted, + specialize_method, hasintersect, is_nospecializeinfer, is_nospecialized, + get_nospecializeinfer_sig, tls_world_age, uniontype_layout, kwerr, + moduleroot, is_file_tracked, decode_effects_override, lookup_binding_partition, + is_some_imported, binding_kind, is_some_guard, is_some_const_binding, partition_restriction, + BINDING_KIND_GLOBAL, structdiff +using Base.Order +import Base: getindex, setindex!, length, iterate, push!, isempty, first, convert, ==, + copy, popfirst!, in, haskey, resize!, copy!, append!, last, get!, size, + get, iterate, findall, min_world, max_world, _topmod + +const getproperty = Core.getfield +const setproperty! = Core.setfield! +const swapproperty! = Core.swapfield! +const modifyproperty! = Core.modifyfield! +const replaceproperty! = Core.replacefield! +const _DOCS_ALIASING_WARNING = "" + +ccall(:jl_set_istopmod, Cvoid, (Any, Bool), Compiler, false) + +eval(x) = Core.eval(Compiler, x) +eval(m, x) = Core.eval(m, x) + +function include(x::String) + if !isdefined(Base, :end_base_include) + # During bootstrap, all includes are relative to `base/` + x = Base.strcat(Base.strcat(Base.BUILDROOT, "../usr/share/julia/Compiler/src/"), x) + end + Base.include(Compiler, x) +end + +function include(mod::Module, x::String) + if !isdefined(Base, :end_base_include) + x = Base.strcat(Base.strcat(Base.BUILDROOT, "../usr/share/julia/Compiler/src/"), x) + end + Base.include(mod, x) +end + + +macro _boundscheck() Expr(:boundscheck) end + +# These types are used by reflection.jl and expr.jl too, so declare them here. +# Note that `@assume_effects` is available only after loading namedtuple.jl. +abstract type MethodTableView end +abstract type AbstractInterpreter end + +function return_type end +function is_return_type(Core.@nospecialize(f)) + f === return_type && return true + if isdefined(Base, :Compiler) && Compiler !== Base.Compiler + # Also model the return_type function of the builtin Compiler the same. + # This isn't completely sound. We don't actually have any idea what the + # base compiler will do at runtime. In the fullness of time, we should + # re-work the semantics to make the cache primary and thus avoid having + # to reason about what the compiler may do at runtime, but we're not + # fully there yet. + return f === Base.Compiler.return_type + end + return false +end + +include("sort.jl") + +# We don't include some.jl, but this definition is still useful. +something(x::Nothing, y...) = something(y...) +something(x::Any, y...) = x + +############ +# compiler # +############ + +baremodule BuildSettings +using Core: ARGS, include +using ..Compiler: >, getindex, length + +global MAX_METHODS::Int = 3 + +if length(ARGS) > 2 && ARGS[2] === "--buildsettings" + include(BuildSettings, ARGS[3]) +end +end + +if false + import Base: Base, @show +else + macro show(ex...) + blk = Expr(:block) + for s in ex + push!(blk.args, :(println(stdout, $(QuoteNode(s)), " = ", + begin local value = $(esc(s)) end))) + end + isempty(ex) || push!(blk.args, :value) + blk + end +end + +include("cicache.jl") +include("methodtable.jl") +include("effects.jl") +include("types.jl") +include("utilities.jl") +include("validation.jl") + +include("ssair/basicblock.jl") +include("ssair/domtree.jl") +include("ssair/ir.jl") +include("ssair/tarjan.jl") + +include("abstractlattice.jl") +include("stmtinfo.jl") +include("inferenceresult.jl") +include("inferencestate.jl") + +include("typeutils.jl") +include("typelimits.jl") +include("typelattice.jl") +include("tfuncs.jl") + +include("abstractinterpretation.jl") +include("typeinfer.jl") +include("optimize.jl") + +include("bootstrap.jl") +include("reflection_interface.jl") + +if isdefined(Base, :IRShow) + @eval module IRShow + import ..Compiler + using Core.IR + using ..Base + import .Compiler: IRCode, CFG, scan_ssa_use!, + isexpr, compute_basic_blocks, block_for_inst, IncrementalCompact, + Effects, ALWAYS_TRUE, ALWAYS_FALSE, DebugInfoStream, getdebugidx, + VarState, InvalidIRError, argextype, widenconst, singleton_type, + sptypes_from_meth_instance, EMPTY_SPTYPES, InferenceState, + NativeInterpreter, CachedMethodTable, LimitedAccuracy, Timings + # During bootstrap, Base will later include this into its own "IRShow module" + Compiler.include(IRShow, "ssair/show.jl") + end +end + +end + +end diff --git a/base/compiler/abstractinterpretation.jl b/Compiler/src/abstractinterpretation.jl similarity index 99% rename from base/compiler/abstractinterpretation.jl rename to Compiler/src/abstractinterpretation.jl index f3ffd6495ce50..d9319c02b110a 100644 --- a/base/compiler/abstractinterpretation.jl +++ b/Compiler/src/abstractinterpretation.jl @@ -2119,7 +2119,7 @@ function form_partially_defined_struct(@nospecialize(obj), @nospecialize(name)) else fldidx > nminfld || return nothing end - return PartialStruct(objt0, Any[obj isa PartialStruct && i≤length(obj.fields) ? + return PartialStruct(fallback_lattice, objt0, Any[obj isa PartialStruct && i≤length(obj.fields) ? obj.fields[i] : fieldtype(objt0,i) for i = 1:fldidx]) end @@ -2955,7 +2955,7 @@ function abstract_eval_new(interp::AbstractInterpreter, e::Expr, vtypes::Union{V # - any refinement information is available (`anyrefine`), or when # - `nargs` is greater than `n_initialized` derived from the struct type # information alone - rt = PartialStruct(rt, ats) + rt = PartialStruct(𝕃ᵢ, rt, ats) end else rt = refine_partial_type(rt) @@ -2990,7 +2990,7 @@ function abstract_eval_splatnew(interp::AbstractInterpreter, e::Expr, vtypes::Un all(i::Int -> ⊑(𝕃ᵢ, (at.fields::Vector{Any})[i], fieldtype(t, i)), 1:n) end)) nothrow = isexact - rt = PartialStruct(rt, at.fields::Vector{Any}) + rt = PartialStruct(𝕃ᵢ, rt, at.fields::Vector{Any}) end else rt = refine_partial_type(rt) @@ -3524,7 +3524,7 @@ end end fields[i] = a end - anyrefine && return PartialStruct(rt.typ, fields) + anyrefine && return PartialStruct(𝕃ᵢ, rt.typ, fields) end if isa(rt, PartialOpaque) return rt # XXX: this case was missed in #39512 diff --git a/base/compiler/abstractlattice.jl b/Compiler/src/abstractlattice.jl similarity index 100% rename from base/compiler/abstractlattice.jl rename to Compiler/src/abstractlattice.jl diff --git a/Compiler/src/bootstrap.jl b/Compiler/src/bootstrap.jl new file mode 100644 index 0000000000000..4205b072d232f --- /dev/null +++ b/Compiler/src/bootstrap.jl @@ -0,0 +1,66 @@ +# This file is a part of Julia. License is MIT: https://julialang.org/license + +# make sure that typeinf is executed before turning on typeinf_ext +# this ensures that typeinf_ext doesn't recurse before it can add the item to the workq +# especially try to make sure any recursive and leaf functions have concrete signatures, +# since we won't be able to specialize & infer them at runtime + +activate_codegen!() = ccall(:jl_set_typeinf_func, Cvoid, (Any,), typeinf_ext_toplevel) + +function bootstrap!() + let time() = ccall(:jl_clock_now, Float64, ()) + println("Compiling the compiler. This may take several minutes ...") + interp = NativeInterpreter() + + # analyze_escapes_tt = Tuple{typeof(analyze_escapes), IRCode, Int, TODO} + optimize_tt = Tuple{typeof(optimize), NativeInterpreter, OptimizationState{NativeInterpreter}, InferenceResult} + fs = Any[ + # we first create caches for the optimizer, because they contain many loop constructions + # and they're better to not run in interpreter even during bootstrapping + #=analyze_escapes_tt,=# optimize_tt, + # then we create caches for inference entries + typeinf_ext, typeinf, typeinf_edge, + ] + # tfuncs can't be inferred from the inference entries above, so here we infer them manually + for x in T_FFUNC_VAL + push!(fs, x[3]) + end + for i = 1:length(T_IFUNC) + if isassigned(T_IFUNC, i) + x = T_IFUNC[i] + push!(fs, x[3]) + else + println(stderr, "WARNING: tfunc missing for ", reinterpret(IntrinsicFunction, Int32(i))) + end + end + starttime = time() + for f in fs + if isa(f, DataType) && f.name === typename(Tuple) + tt = f + else + tt = Tuple{typeof(f), Vararg{Any}} + end + for m in _methods_by_ftype(tt, 10, get_world_counter())::Vector + # remove any TypeVars from the intersection + m = m::MethodMatch + typ = Any[m.spec_types.parameters...] + for i = 1:length(typ) + typ[i] = unwraptv(typ[i]) + end + typeinf_type(interp, m.method, Tuple{typ...}, m.sparams) + end + end + endtime = time() + println("Base.Compiler ──── ", sub_float(endtime,starttime), " seconds") + end + activate_codegen!() +end + +function activate!(; reflection=true, codegen=false) + if reflection + Base.REFLECTION_COMPILER[] = Compiler + end + if codegen + activate_codegen!() + end +end diff --git a/base/compiler/cicache.jl b/Compiler/src/cicache.jl similarity index 100% rename from base/compiler/cicache.jl rename to Compiler/src/cicache.jl diff --git a/base/compiler/effects.jl b/Compiler/src/effects.jl similarity index 100% rename from base/compiler/effects.jl rename to Compiler/src/effects.jl diff --git a/base/compiler/inferenceresult.jl b/Compiler/src/inferenceresult.jl similarity index 100% rename from base/compiler/inferenceresult.jl rename to Compiler/src/inferenceresult.jl diff --git a/base/compiler/inferencestate.jl b/Compiler/src/inferencestate.jl similarity index 100% rename from base/compiler/inferencestate.jl rename to Compiler/src/inferencestate.jl diff --git a/base/compiler/methodtable.jl b/Compiler/src/methodtable.jl similarity index 100% rename from base/compiler/methodtable.jl rename to Compiler/src/methodtable.jl diff --git a/base/compiler/optimize.jl b/Compiler/src/optimize.jl similarity index 99% rename from base/compiler/optimize.jl rename to Compiler/src/optimize.jl index edc374f675c5f..8cdd56f4c1a76 100644 --- a/base/compiler/optimize.jl +++ b/Compiler/src/optimize.jl @@ -212,14 +212,14 @@ end function argextype end # imported by EscapeAnalysis function try_compute_field end # imported by EscapeAnalysis -include("compiler/ssair/heap.jl") -include("compiler/ssair/slot2ssa.jl") -include("compiler/ssair/inlining.jl") -include("compiler/ssair/verify.jl") -include("compiler/ssair/legacy.jl") -include("compiler/ssair/EscapeAnalysis/EscapeAnalysis.jl") -include("compiler/ssair/passes.jl") -include("compiler/ssair/irinterp.jl") +include("ssair/heap.jl") +include("ssair/slot2ssa.jl") +include("ssair/inlining.jl") +include("ssair/verify.jl") +include("ssair/legacy.jl") +include("ssair/EscapeAnalysis/EscapeAnalysis.jl") +include("ssair/passes.jl") +include("ssair/irinterp.jl") function ir_to_codeinf!(opt::OptimizationState) (; linfo, src) = opt diff --git a/Compiler/src/reflection_interface.jl b/Compiler/src/reflection_interface.jl new file mode 100644 index 0000000000000..3fc182685e598 --- /dev/null +++ b/Compiler/src/reflection_interface.jl @@ -0,0 +1,58 @@ +# This file is a part of Julia. License is MIT: https://julialang.org/license + +_findall_matches(interp::AbstractInterpreter, @nospecialize(tt)) = findall(tt, method_table(interp)) +_default_interp(world::UInt) = NativeInterpreter(world) + +_may_throw_methoderror(matches::MethodLookupResult) = + matches.ambig || !any(match::Core.MethodMatch->match.fully_covers, matches.matches) + +function _infer_exception_type(interp::AbstractInterpreter, @nospecialize(tt), optimize::Bool) + matches = _findall_matches(interp, tt) + matches === nothing && return nothing + exct = Union{} + if _may_throw_methoderror(matches) + # account for the fact that we may encounter a MethodError with a non-covered or ambiguous signature. + exct = MethodError + end + for match in matches.matches + match = match::Core.MethodMatch + frame = typeinf_frame(interp, match, #=run_optimizer=#optimize) + frame === nothing && return Any + exct = tmerge(exct, widenconst(frame.result.exc_result)) + end + return exct +end + +function _infer_effects(interp::AbstractInterpreter, @nospecialize(tt), optimize::Bool) + matches = _findall_matches(interp, tt) + matches === nothing && return nothing + effects = EFFECTS_TOTAL + if _may_throw_methoderror(matches) + # account for the fact that we may encounter a MethodError with a non-covered or ambiguous signature. + effects = Effects(effects; nothrow=false) + end + for match in matches.matches + match = match::Core.MethodMatch + frame = typeinf_frame(interp, match, #=run_optimizer=#optimize) + frame === nothing && return Effects() + effects = merge_effects(effects, frame.result.ipo_effects) + end + return effects +end + +function statement_costs!(interp::AbstractInterpreter, cost::Vector{Int}, body::Vector{Any}, src::Union{CodeInfo, IRCode}, match::Core.MethodMatch) + params = OptimizationParams(interp) + sptypes = VarState[VarState(sp, false) for sp in match.sparams] + return statement_costs!(cost, body, src, sptypes, params) +end + +function findsup_mt(@nospecialize(tt), world, method_table) + if method_table === nothing + table = InternalMethodTable(world) + elseif method_table isa Core.MethodTable + table = OverlayMethodTable(world, method_table) + else + table = method_table + end + return findsup(tt, table) +end diff --git a/base/compiler/sort.jl b/Compiler/src/sort.jl similarity index 100% rename from base/compiler/sort.jl rename to Compiler/src/sort.jl diff --git a/base/compiler/ssair/EscapeAnalysis/EscapeAnalysis.jl b/Compiler/src/ssair/EscapeAnalysis/EscapeAnalysis.jl similarity index 99% rename from base/compiler/ssair/EscapeAnalysis/EscapeAnalysis.jl rename to Compiler/src/ssair/EscapeAnalysis/EscapeAnalysis.jl index e8de2e40c4880..c3b1a8b641af4 100644 --- a/base/compiler/ssair/EscapeAnalysis/EscapeAnalysis.jl +++ b/Compiler/src/ssair/EscapeAnalysis/EscapeAnalysis.jl @@ -30,13 +30,16 @@ using ..Compiler: # Core.Compiler specific definitions is_meta_expr_head, is_identity_free_argtype, isexpr, println, setfield!_nothrow, singleton_type, try_compute_field, try_compute_fieldidx, widenconst, ⊑, Compiler -include(x) = _TOP_MOD.include(@__MODULE__, x) -if _TOP_MOD === Compiler - include("compiler/ssair/EscapeAnalysis/disjoint_set.jl") -else - include("disjoint_set.jl") +function include(x) + if !isdefined(_TOP_MOD.Base, :end_base_include) + # During bootstrap, all includes are relative to `base/` + x = ccall(:jl_prepend_string, Ref{String}, (Any, Any), "ssair/EscapeAnalysis/", x) + end + _TOP_MOD.include(@__MODULE__, x) end +include("disjoint_set.jl") + const AInfo = IdSet{Any} """ diff --git a/base/compiler/ssair/EscapeAnalysis/disjoint_set.jl b/Compiler/src/ssair/EscapeAnalysis/disjoint_set.jl similarity index 100% rename from base/compiler/ssair/EscapeAnalysis/disjoint_set.jl rename to Compiler/src/ssair/EscapeAnalysis/disjoint_set.jl diff --git a/base/compiler/ssair/basicblock.jl b/Compiler/src/ssair/basicblock.jl similarity index 100% rename from base/compiler/ssair/basicblock.jl rename to Compiler/src/ssair/basicblock.jl diff --git a/base/compiler/ssair/domtree.jl b/Compiler/src/ssair/domtree.jl similarity index 100% rename from base/compiler/ssair/domtree.jl rename to Compiler/src/ssair/domtree.jl diff --git a/base/compiler/ssair/heap.jl b/Compiler/src/ssair/heap.jl similarity index 100% rename from base/compiler/ssair/heap.jl rename to Compiler/src/ssair/heap.jl diff --git a/base/compiler/ssair/inlining.jl b/Compiler/src/ssair/inlining.jl similarity index 100% rename from base/compiler/ssair/inlining.jl rename to Compiler/src/ssair/inlining.jl diff --git a/base/compiler/ssair/ir.jl b/Compiler/src/ssair/ir.jl similarity index 99% rename from base/compiler/ssair/ir.jl rename to Compiler/src/ssair/ir.jl index 1efa10f2437ad..9a76c7370c68d 100644 --- a/base/compiler/ssair/ir.jl +++ b/Compiler/src/ssair/ir.jl @@ -1,7 +1,5 @@ # This file is a part of Julia. License is MIT: https://julialang.org/license -Core.PhiNode() = Core.PhiNode(Int32[], Any[]) - isterminator(@nospecialize(stmt)) = isa(stmt, GotoNode) || isa(stmt, GotoIfNot) || isa(stmt, ReturnNode) || isa(stmt, EnterNode) || isexpr(stmt, :leave) diff --git a/base/compiler/ssair/irinterp.jl b/Compiler/src/ssair/irinterp.jl similarity index 100% rename from base/compiler/ssair/irinterp.jl rename to Compiler/src/ssair/irinterp.jl diff --git a/base/compiler/ssair/legacy.jl b/Compiler/src/ssair/legacy.jl similarity index 100% rename from base/compiler/ssair/legacy.jl rename to Compiler/src/ssair/legacy.jl diff --git a/base/compiler/ssair/passes.jl b/Compiler/src/ssair/passes.jl similarity index 100% rename from base/compiler/ssair/passes.jl rename to Compiler/src/ssair/passes.jl diff --git a/base/compiler/ssair/show.jl b/Compiler/src/ssair/show.jl similarity index 95% rename from base/compiler/ssair/show.jl rename to Compiler/src/ssair/show.jl index 2ad14c5c5b565..a2212272ce3fc 100644 --- a/base/compiler/ssair/show.jl +++ b/Compiler/src/ssair/show.jl @@ -5,13 +5,6 @@ @nospecialize -if Pair != Base.Pair -import Base: Base, IOContext, string, join, sprint -IOContext(io::IO, KV::Pair) = IOContext(io, Base.Pair(KV[1], KV[2])) -length(s::String) = Base.length(s) -^(s::String, i::Int) = Base.:^(s, i) -end - import Base: show_unquoted using Base: printstyled, with_output_color, prec_decl, @invoke @@ -141,9 +134,9 @@ function print_stmt(io::IO, idx::Int, @nospecialize(stmt), code::Union{IRCode,Co elseif stmt isa GotoNode print(io, "goto #", stmt.label) elseif stmt isa PhiNode - show_unquoted_phinode(io, stmt, indent, "#") + Base.show_unquoted_phinode(io, stmt, indent, "#") elseif stmt isa GotoIfNot - show_unquoted_gotoifnot(io, stmt, indent, "#") + Base.show_unquoted_gotoifnot(io, stmt, indent, "#") # everything else in the IR, defer to the generic AST printer else show_unquoted(io, stmt, indent, show_type ? prec_decl : 0) @@ -151,65 +144,6 @@ function print_stmt(io::IO, idx::Int, @nospecialize(stmt), code::Union{IRCode,Co nothing end -show_unquoted(io::IO, val::Argument, indent::Int, prec::Int) = show_unquoted(io, Core.SlotNumber(val.n), indent, prec) - -show_unquoted(io::IO, stmt::PhiNode, indent::Int, ::Int) = show_unquoted_phinode(io, stmt, indent, "%") -function show_unquoted_phinode(io::IO, stmt::PhiNode, indent::Int, prefix::String) - args = String[let - e = stmt.edges[i] - v = !isassigned(stmt.values, i) ? "#undef" : - sprint(; context=io) do io′ - show_unquoted(io′, stmt.values[i], indent) - end - "$prefix$e => $v" - end for i in 1:length(stmt.edges) - ] - print(io, "φ ", '(') - join(io, args, ", ") - print(io, ')') -end - -function show_unquoted(io::IO, stmt::PhiCNode, indent::Int, ::Int) - print(io, "φᶜ (") - first = true - for v in stmt.values - first ? (first = false) : print(io, ", ") - show_unquoted(io, v, indent) - end - print(io, ")") -end - -function show_unquoted(io::IO, stmt::PiNode, indent::Int, ::Int) - print(io, "π (") - show_unquoted(io, stmt.val, indent) - print(io, ", ") - printstyled(io, stmt.typ, color=:cyan) - print(io, ")") -end - -function show_unquoted(io::IO, stmt::UpsilonNode, indent::Int, ::Int) - print(io, "ϒ (") - isdefined(stmt, :val) ? - show_unquoted(io, stmt.val, indent) : - print(io, "#undef") - print(io, ")") -end - -function show_unquoted(io::IO, stmt::ReturnNode, indent::Int, ::Int) - if !isdefined(stmt, :val) - print(io, "unreachable") - else - print(io, "return ") - show_unquoted(io, stmt.val, indent) - end -end - -show_unquoted(io::IO, stmt::GotoIfNot, indent::Int, ::Int) = show_unquoted_gotoifnot(io, stmt, indent, "%") -function show_unquoted_gotoifnot(io::IO, stmt::GotoIfNot, indent::Int, prefix::String) - print(io, "goto ", prefix, stmt.dest, " if not ") - show_unquoted(io, stmt.cond, indent) -end - function should_print_ssa_type(@nospecialize node) if isa(node, Expr) return !(node.head in (:gc_preserve_begin, :gc_preserve_end, :meta, :leave)) @@ -1137,4 +1071,62 @@ function Base.show(io::IO, e::Effects) print(io, ')') end + +function show(io::IO, inferred::Compiler.InferenceResult) + mi = inferred.linfo + tt = mi.specTypes.parameters[2:end] + tts = join(["::$(t)" for t in tt], ", ") + rettype = inferred.result + if isa(rettype, Compiler.InferenceState) + rettype = rettype.bestguess + end + if isa(mi.def, Method) + print(io, mi.def.name, "(", tts, " => ", rettype, ")") + else + print(io, "Toplevel MethodInstance thunk from ", mi.def, " => ", rettype) + end +end + +Base.show(io::IO, sv::InferenceState) = + (print(io, "InferenceState for "); show(io, sv.linfo)) + +Base.show(io::IO, ::NativeInterpreter) = + print(io, "Compiler.NativeInterpreter(...)") + +Base.show(io::IO, cache::CachedMethodTable) = + print(io, typeof(cache), "(", length(cache.cache), " entries)") + +function Base.show(io::IO, limited::LimitedAccuracy) + print(io, "LimitedAccuracy(") + show(io, limited.typ) + print(io, ", #= ", length(limited.causes), " cause(s) =#)") +end + + +# These sometimes show up as Const-values in InferenceFrameInfo signatures +function show(io::IO, mi_info::Timings.InferenceFrameInfo) + mi = mi_info.mi + def = mi.def + if isa(def, Method) + if isdefined(def, :generator) && mi === def.generator + print(io, "InferenceFrameInfo generator for ") + show(io, def) + else + print(io, "InferenceFrameInfo for ") + argnames = [isa(a, Core.Const) ? (isa(a.val, Type) ? "" : a.val) : "" for a in mi_info.slottypes[1:mi_info.nargs]] + show_tuple_as_call(io, def.name, mi.specTypes; argnames, qualified=true) + end + else + di = mi.cache.inferred.debuginfo + file, line = debuginfo_firstline(di) + file = string(file) + line = isempty(file) || line < 0 ? "" : "$file:$line" + print(io, "Toplevel InferenceFrameInfo thunk from ", def, " starting at ", line) + end +end + +function show(io::IO, tinf::Timings.Timing) + print(io, "Compiler.Timings.Timing(", tinf.mi_info, ") with ", length(tinf.children), " children") +end + @specialize diff --git a/base/compiler/ssair/slot2ssa.jl b/Compiler/src/ssair/slot2ssa.jl similarity index 100% rename from base/compiler/ssair/slot2ssa.jl rename to Compiler/src/ssair/slot2ssa.jl diff --git a/base/compiler/ssair/tarjan.jl b/Compiler/src/ssair/tarjan.jl similarity index 100% rename from base/compiler/ssair/tarjan.jl rename to Compiler/src/ssair/tarjan.jl diff --git a/base/compiler/ssair/verify.jl b/Compiler/src/ssair/verify.jl similarity index 100% rename from base/compiler/ssair/verify.jl rename to Compiler/src/ssair/verify.jl diff --git a/base/compiler/stmtinfo.jl b/Compiler/src/stmtinfo.jl similarity index 100% rename from base/compiler/stmtinfo.jl rename to Compiler/src/stmtinfo.jl diff --git a/base/compiler/tfuncs.jl b/Compiler/src/tfuncs.jl similarity index 99% rename from base/compiler/tfuncs.jl rename to Compiler/src/tfuncs.jl index f0212f1082331..3b524742b1609 100644 --- a/base/compiler/tfuncs.jl +++ b/Compiler/src/tfuncs.jl @@ -1436,7 +1436,7 @@ end if TF2 === Bottom RT = Bottom elseif isconcretetype(RT) && has_nontrivial_extended_info(𝕃ᵢ, TF2) # isconcrete condition required to form a PartialStruct - RT = PartialStruct(RT, Any[TF, TF2]) + RT = PartialStruct(fallback_lattice, RT, Any[TF, TF2]) end info = ModifyOpInfo(callinfo.info) return CallMeta(RT, Any, Effects(), info) @@ -1996,7 +1996,7 @@ function tuple_tfunc(𝕃::AbstractLattice, argtypes::Vector{Any}) typ = Tuple{params...} # replace a singleton type with its equivalent Const object issingletontype(typ) && return Const(typ.instance) - return anyinfo ? PartialStruct(typ, argtypes) : typ + return anyinfo ? PartialStruct(𝕃, typ, argtypes) : typ end @nospecs function memoryrefget_tfunc(𝕃::AbstractLattice, mem, order, boundscheck) diff --git a/base/compiler/typeinfer.jl b/Compiler/src/typeinfer.jl similarity index 99% rename from base/compiler/typeinfer.jl rename to Compiler/src/typeinfer.jl index c1b7db82bff3f..94c65684e672c 100644 --- a/base/compiler/typeinfer.jl +++ b/Compiler/src/typeinfer.jl @@ -861,7 +861,7 @@ function cached_return_type(code::CodeInstance) # the second subtyping/egal conditions are necessary to distinguish usual cases # from rare cases when `Const` wrapped those extended lattice type objects if isa(rettype_const, Vector{Any}) && !(Vector{Any} <: rettype) - return PartialStruct(rettype, rettype_const) + return PartialStruct(fallback_lattice, rettype, rettype_const) elseif isa(rettype_const, PartialOpaque) && rettype <: Core.OpaqueClosure return rettype_const elseif isa(rettype_const, InterConditional) && rettype !== InterConditional diff --git a/base/compiler/typelattice.jl b/Compiler/src/typelattice.jl similarity index 92% rename from base/compiler/typelattice.jl rename to Compiler/src/typelattice.jl index 86fa8af21615f..2832edc9219ff 100644 --- a/base/compiler/typelattice.jl +++ b/Compiler/src/typelattice.jl @@ -6,48 +6,7 @@ # N.B.: Const/PartialStruct/InterConditional are defined in Core, to allow them to be used # inside the global code cache. - -import Core: Const, PartialStruct - -""" - struct Const - val - end - -The type representing a constant value. -""" -:(Const) - -""" - struct PartialStruct - typ - fields::Vector{Any} # elements are other type lattice members - end - -This extended lattice element is introduced when we have information about an object's -fields beyond what can be obtained from the object type. E.g. it represents a tuple where -some elements are known to be constants or a struct whose `Any`-typed field is initialized -with `Int` values. - -- `typ` indicates the type of the object -- `fields` holds the lattice elements corresponding to each field of the object - -If `typ` is a struct, `fields` represents the fields of the struct that are guaranteed to be -initialized. For instance, if the length of `fields` of `PartialStruct` representing a -struct with 4 fields is 3, the 4th field may not be initialized. If the length is 4, all -fields are guaranteed to be initialized. - -If `typ` is a tuple, the last element of `fields` may be `Vararg`. In this case, it is -guaranteed that the number of elements in the tuple is at least `length(fields)-1`, but the -exact number of elements is unknown. -""" -:(PartialStruct) -function PartialStruct(@nospecialize(typ), fields::Vector{Any}) - for i = 1:length(fields) - assert_nested_slotwrapper(fields[i]) - end - return Core._PartialStruct(typ, fields) -end +import Core: Const, InterConditional, PartialStruct """ cnd::Conditional @@ -87,23 +46,6 @@ end Conditional(var::SlotNumber, @nospecialize(thentype), @nospecialize(elsetype); isdefined::Bool=false) = Conditional(slot_id(var), thentype, elsetype; isdefined) -import Core: InterConditional -""" - struct InterConditional - slot::Int - thentype - elsetype - end - -Similar to `Conditional`, but conveys inter-procedural constraints imposed on call arguments. -This is separate from `Conditional` to catch logic errors: the lattice element name is `InterConditional` -while processing a call, then `Conditional` everywhere else. Thus `InterConditional` does not appear in -`CompilerTypes`—these type's usages are disjoint—though we define the lattice for `InterConditional`. -""" -:(InterConditional) -InterConditional(var::SlotNumber, @nospecialize(thentype), @nospecialize(elsetype)) = - InterConditional(slot_id(var), thentype, elsetype) - const AnyConditional = Union{Conditional,InterConditional} Conditional(cnd::InterConditional) = Conditional(cnd.slot, cnd.thentype, cnd.elsetype) InterConditional(cnd::Conditional) = InterConditional(cnd.slot, cnd.thentype, cnd.elsetype) @@ -388,8 +330,8 @@ end end end return Conditional(slot, - thenfields === nothing ? Bottom : PartialStruct(vartyp.typ, thenfields), - elsefields === nothing ? Bottom : PartialStruct(vartyp.typ, elsefields)) + thenfields === nothing ? Bottom : PartialStruct(fallback_lattice, vartyp.typ, thenfields), + elsefields === nothing ? Bottom : PartialStruct(fallback_lattice, vartyp.typ, elsefields)) else vartyp_widened = widenconst(vartyp) thenfields = thentype === Bottom ? nothing : Any[] @@ -405,8 +347,8 @@ end end end return Conditional(slot, - thenfields === nothing ? Bottom : PartialStruct(vartyp_widened, thenfields), - elsefields === nothing ? Bottom : PartialStruct(vartyp_widened, elsefields)) + thenfields === nothing ? Bottom : PartialStruct(fallback_lattice, vartyp_widened, thenfields), + elsefields === nothing ? Bottom : PartialStruct(fallback_lattice, vartyp_widened, elsefields)) end end @@ -734,7 +676,7 @@ widenconst(::AnyConditional) = Bool widenconst(a::AnyMustAlias) = widenconst(widenmustalias(a)) widenconst(c::Const) = (v = c.val; isa(v, Type) ? Type{v} : typeof(v)) widenconst(::PartialTypeVar) = TypeVar -widenconst(t::PartialStruct) = t.typ +widenconst(t::Core.PartialStruct) = t.typ widenconst(t::PartialOpaque) = t.typ @nospecializeinfer widenconst(@nospecialize t::Type) = t widenconst(::TypeVar) = error("unhandled TypeVar") @@ -799,3 +741,13 @@ function stoverwrite1!(state::VarTable, change::StateUpdate) state[changeid] = newtype return state end + +# The ::AbstractLattice argument is unused and simply serves to disambiguate +# different instances of the compiler that may share the `Core.PartialStruct` +# type. +function Core.PartialStruct(::AbstractLattice, @nospecialize(typ), fields::Vector{Any}) + for i = 1:length(fields) + assert_nested_slotwrapper(fields[i]) + end + return Core._PartialStruct(typ, fields) +end diff --git a/base/compiler/typelimits.jl b/Compiler/src/typelimits.jl similarity index 99% rename from base/compiler/typelimits.jl rename to Compiler/src/typelimits.jl index 3d0e5f3d0877d..e420db030715b 100644 --- a/base/compiler/typelimits.jl +++ b/Compiler/src/typelimits.jl @@ -641,7 +641,7 @@ end ⋤(𝕃, tyi, ft) # just a type-level information, but more precise than the declared type end end - anyrefine && return PartialStruct(aty, fields) + anyrefine && return PartialStruct(𝕃, aty, fields) end return nothing end diff --git a/base/compiler/types.jl b/Compiler/src/types.jl similarity index 100% rename from base/compiler/types.jl rename to Compiler/src/types.jl diff --git a/base/compiler/typeutils.jl b/Compiler/src/typeutils.jl similarity index 100% rename from base/compiler/typeutils.jl rename to Compiler/src/typeutils.jl diff --git a/base/compiler/utilities.jl b/Compiler/src/utilities.jl similarity index 99% rename from base/compiler/utilities.jl rename to Compiler/src/utilities.jl index 5361ff26f997c..0f1e2988bd669 100644 --- a/base/compiler/utilities.jl +++ b/Compiler/src/utilities.jl @@ -42,12 +42,6 @@ end anymap(f::Function, a::Array{Any,1}) = Any[ f(a[i]) for i in 1:length(a) ] -########### -# scoping # -########### - -_topmod(m::Module) = ccall(:jl_base_relative_to, Any, (Any,), m)::Module - ############ # inlining # ############ diff --git a/base/compiler/validation.jl b/Compiler/src/validation.jl similarity index 100% rename from base/compiler/validation.jl rename to Compiler/src/validation.jl diff --git a/Makefile b/Makefile index 2cac2f8818324..f0b1dfaa708e4 100644 --- a/Makefile +++ b/Makefile @@ -75,6 +75,13 @@ ifndef JULIA_VAGRANT_BUILD endif endif +TOP_LEVEL_PKGS := Compiler + +TOP_LEVEL_PKG_LINK_TARGETS := $(addprefix $(build_datarootdir)/julia/,$(TOP_LEVEL_PKGS)) + +# Generate symlinks for top level pkgs in usr/share/julia/ +$(foreach module, $(TOP_LEVEL_PKGS), $(eval $(call symlink_target,$$(JULIAHOME)/$(module),$$(build_datarootdir)/julia,$(module)))) + julia-deps: | $(DIRS) $(build_datarootdir)/julia/base $(build_datarootdir)/julia/test @$(MAKE) $(QUIET_MAKE) -C $(BUILDROOT)/deps @@ -103,10 +110,10 @@ julia-src-release julia-src-debug : julia-src-% : julia-deps julia_flisp.boot.in julia-cli-release julia-cli-debug: julia-cli-% : julia-deps @$(MAKE) $(QUIET_MAKE) -C $(BUILDROOT)/cli $* -julia-sysimg-ji : julia-stdlib julia-base julia-cli-$(JULIA_BUILD_MODE) julia-src-$(JULIA_BUILD_MODE) | $(build_private_libdir) +julia-sysimg-ji : $(TOP_LEVEL_PKG_LINK_TARGETS) julia-stdlib julia-base julia-cli-$(JULIA_BUILD_MODE) julia-src-$(JULIA_BUILD_MODE) | $(build_private_libdir) @$(MAKE) $(QUIET_MAKE) -C $(BUILDROOT) -f sysimage.mk sysimg-ji JULIA_EXECUTABLE='$(JULIA_EXECUTABLE)' -julia-sysimg-bc : julia-stdlib julia-base julia-cli-$(JULIA_BUILD_MODE) julia-src-$(JULIA_BUILD_MODE) | $(build_private_libdir) +julia-sysimg-bc : $(TOP_LEVEL_PKG_LINK_TARGETS) julia-stdlib julia-base julia-cli-$(JULIA_BUILD_MODE) julia-src-$(JULIA_BUILD_MODE) | $(build_private_libdir) @$(MAKE) $(QUIET_MAKE) -C $(BUILDROOT) -f sysimage.mk sysimg-bc JULIA_EXECUTABLE='$(JULIA_EXECUTABLE)' julia-sysimg-release julia-sysimg-debug : julia-sysimg-% : julia-sysimg-ji julia-src-% diff --git a/base/Base.jl b/base/Base.jl index 57b5142604d21..39507b625660d 100644 --- a/base/Base.jl +++ b/base/Base.jl @@ -1,54 +1,8 @@ # This file is a part of Julia. License is MIT: https://julialang.org/license -had_compiler = isdefined(Base, :Compiler) -if had_compiler; else -include("Base_compiler.jl") -end - const start_base_include = time_ns() include("reflection.jl") - -# define invoke(f, T, args...; kwargs...), without kwargs wrapping -# to forward to invoke -function Core.kwcall(kwargs::NamedTuple, ::typeof(invoke), f, T, args...) - @inline - # prepend kwargs and f to the invoked from the user - T = rewrap_unionall(Tuple{Core.Typeof(kwargs), Core.Typeof(f), (unwrap_unionall(T)::DataType).parameters...}, T) - return invoke(Core.kwcall, T, kwargs, f, args...) -end -# invoke does not have its own call cache, but kwcall for invoke does -setfield!(typeof(invoke).name.mt, :max_args, 3, :monotonic) # invoke, f, T, args... - -# define applicable(f, T, args...; kwargs...), without kwargs wrapping -# to forward to applicable -function Core.kwcall(kwargs::NamedTuple, ::typeof(applicable), @nospecialize(args...)) - @inline - return applicable(Core.kwcall, kwargs, args...) -end -function Core._hasmethod(@nospecialize(f), @nospecialize(t)) # this function has a special tfunc (TODO: make this a Builtin instead like applicable) - tt = rewrap_unionall(Tuple{Core.Typeof(f), (unwrap_unionall(t)::DataType).parameters...}, t) - return Core._hasmethod(tt) -end - -# core operations & types -include("promotion.jl") -include("tuple.jl") -include("expr.jl") -include("pair.jl") -include("traits.jl") -include("range.jl") -include("error.jl") - -# core numeric operations & types -==(x, y) = x === y -include("bool.jl") -include("number.jl") -include("int.jl") -include("operators.jl") -include("pointer.jl") -include("refvalue.jl") -include("cmem.jl") include("refpointer.jl") # now replace the Pair constructor (relevant for NamedTuples) with one that calls our Base.convert @@ -60,37 +14,7 @@ end # The REPL stdlib hooks into Base using this Ref const REPL_MODULE_REF = Ref{Module}(Base) - - -# For OS specific stuff -# We need to strcat things here, before strings are really defined -function strcat(x::String, y::String) - out = ccall(:jl_alloc_string, Ref{String}, (Csize_t,), Core.sizeof(x) + Core.sizeof(y)) - GC.@preserve x y out begin - out_ptr = unsafe_convert(Ptr{UInt8}, out) - unsafe_copyto!(out_ptr, unsafe_convert(Ptr{UInt8}, x), Core.sizeof(x)) - unsafe_copyto!(out_ptr + Core.sizeof(x), unsafe_convert(Ptr{UInt8}, y), Core.sizeof(y)) - end - return out -end - -BUILDROOT::String = "" - -baremodule BuildSettings -end - -let i = 1 - global BUILDROOT - while i <= length(Core.ARGS) - if Core.ARGS[i] == "--buildsettings" - include(BuildSettings, ARGS[i+1]) - i += 1 - else - BUILDROOT = Core.ARGS[i] - end - i += 1 - end -end +process_sysimg_args!() include(strcat(BUILDROOT, "build_h.jl")) # include($BUILDROOT/base/build_h.jl) include(strcat(BUILDROOT, "version_git.jl")) # include($BUILDROOT/base/version_git.jl) @@ -380,6 +304,7 @@ a_method_to_overwrite_in_test() = inferencebarrier(1) # Compatibility with when Compiler was in Core @eval Core const Compiler = Main.Base.Compiler +@eval Compiler const fl_parse = Core.Main.Base.fl_parse # External libraries vendored into Base Core.println("JuliaSyntax/src/JuliaSyntax.jl") @@ -471,10 +396,21 @@ end # enable threads support @eval PCRE PCRE_COMPILE_LOCK = Threads.SpinLock() +# Record dependency information for files belonging to the Compiler, so that +# we know whether the .ji can just give the Base copy or not. +# TODO: We may want to do this earlier to avoid TOCTOU issues. +const _compiler_require_dependencies = Any[] +for i = 1:length(_included_files) + isassigned(_included_files, i) || continue + (mod, file) = _included_files[i] + if mod === Compiler || parentmodule(mod) === Compiler || endswith(file, "/Compiler.jl") + _include_dependency!(_compiler_require_dependencies, true, mod, file, true, false) + end +end +@assert length(_compiler_require_dependencies) >= 15 + end # Ensure this file is also tracked @assert !isassigned(_included_files, 1) _included_files[1] = (parentmodule(Base), abspath(@__FILE__)) - -had_compiler && ccall(:jl_init_restored_module, Cvoid, (Any,), Base) diff --git a/base/Base_compiler.jl b/base/Base_compiler.jl index 3578b8f070db3..691e2c574acd6 100644 --- a/base/Base_compiler.jl +++ b/base/Base_compiler.jl @@ -1,5 +1,6 @@ # This file is a part of Julia. License is MIT: https://julialang.org/license +baremodule Base using Core.Intrinsics, Core.IR # to start, we're going to use a very simple definition of `include` @@ -198,7 +199,6 @@ function Core._hasmethod(@nospecialize(f), @nospecialize(t)) # this function has return Core._hasmethod(tt) end - # core operations & types include("promotion.jl") include("tuple.jl") @@ -252,15 +252,51 @@ include("namedtuple.jl") include("ordering.jl") using .Order -include("compiler/compiler.jl") +include("coreir.jl") + + +# For OS specific stuff +# We need to strcat things here, before strings are really defined +function strcat(x::String, y::String) + out = ccall(:jl_alloc_string, Ref{String}, (Csize_t,), Core.sizeof(x) + Core.sizeof(y)) + GC.@preserve x y out begin + out_ptr = unsafe_convert(Ptr{UInt8}, out) + unsafe_copyto!(out_ptr, unsafe_convert(Ptr{UInt8}, x), Core.sizeof(x)) + unsafe_copyto!(out_ptr + Core.sizeof(x), unsafe_convert(Ptr{UInt8}, y), Core.sizeof(y)) + end + return out +end + +BUILDROOT::String = "" + +baremodule BuildSettings +end + +function process_sysimg_args!() + let i = 1 + global BUILDROOT + while i <= length(Core.ARGS) + if Core.ARGS[i] == "--buildsettings" + include(BuildSettings, ARGS[i+1]) + i += 1 + else + BUILDROOT = Core.ARGS[i] + end + i += 1 + end + end +end +process_sysimg_args!() + +include(strcat(BUILDROOT, "../usr/share/julia/Compiler/src/Compiler.jl")) const _return_type = Compiler.return_type # Enable compiler -Core.eval(Compiler, quote -include("compiler/bootstrap.jl") -ccall(:jl_set_typeinf_func, Cvoid, (Any,), typeinf_ext_toplevel) +Compiler.bootstrap!() -include("compiler/parsing.jl") +include("flparse.jl") Core._setparser!(fl_parse) -end) + +# Further definition of Base will happen in Base.jl if loaded. +end diff --git a/base/boot.jl b/base/boot.jl index 5d40191ecab21..612efc0b50c8a 100644 --- a/base/boot.jl +++ b/base/boot.jl @@ -512,7 +512,6 @@ eval(Core, quote UpsilonNode(@nospecialize(val)) = $(Expr(:new, :UpsilonNode, :val)) UpsilonNode() = $(Expr(:new, :UpsilonNode)) Const(@nospecialize(v)) = $(Expr(:new, :Const, :v)) - # NOTE the main constructor is defined within `Core.Compiler` _PartialStruct(@nospecialize(typ), fields::Array{Any, 1}) = $(Expr(:new, :PartialStruct, :typ, :fields)) PartialOpaque(@nospecialize(typ), @nospecialize(env), parent::MethodInstance, source) = $(Expr(:new, :PartialOpaque, :typ, :env, :parent, :source)) InterConditional(slot::Int, @nospecialize(thentype), @nospecialize(elsetype)) = $(Expr(:new, :InterConditional, :slot, :thentype, :elsetype)) diff --git a/base/compiler/bootstrap.jl b/base/compiler/bootstrap.jl deleted file mode 100644 index 3162bccbdb4b9..0000000000000 --- a/base/compiler/bootstrap.jl +++ /dev/null @@ -1,52 +0,0 @@ -# This file is a part of Julia. License is MIT: https://julialang.org/license - -# make sure that typeinf is executed before turning on typeinf_ext -# this ensures that typeinf_ext doesn't recurse before it can add the item to the workq -# especially try to make sure any recursive and leaf functions have concrete signatures, -# since we won't be able to specialize & infer them at runtime - -let time() = ccall(:jl_clock_now, Float64, ()) - println("Compiling the compiler. This may take several minutes ...") - interp = NativeInterpreter() - - # analyze_escapes_tt = Tuple{typeof(analyze_escapes), IRCode, Int, TODO} - optimize_tt = Tuple{typeof(optimize), NativeInterpreter, OptimizationState{NativeInterpreter}, InferenceResult} - fs = Any[ - # we first create caches for the optimizer, because they contain many loop constructions - # and they're better to not run in interpreter even during bootstrapping - #=analyze_escapes_tt,=# optimize_tt, - # then we create caches for inference entries - typeinf_ext, typeinf, typeinf_edge, - ] - # tfuncs can't be inferred from the inference entries above, so here we infer them manually - for x in T_FFUNC_VAL - push!(fs, x[3]) - end - for i = 1:length(T_IFUNC) - if isassigned(T_IFUNC, i) - x = T_IFUNC[i] - push!(fs, x[3]) - else - println(stderr, "WARNING: tfunc missing for ", reinterpret(IntrinsicFunction, Int32(i))) - end - end - starttime = time() - for f in fs - if isa(f, DataType) && f.name === typename(Tuple) - tt = f - else - tt = Tuple{typeof(f), Vararg{Any}} - end - for m in _methods_by_ftype(tt, 10, get_world_counter())::Vector - # remove any TypeVars from the intersection - m = m::MethodMatch - typ = Any[m.spec_types.parameters...] - for i = 1:length(typ) - typ[i] = unwraptv(typ[i]) - end - typeinf_type(interp, m.method, Tuple{typ...}, m.sparams) - end - end - endtime = time() - println("Base.Compiler ──── ", sub_float(endtime,starttime), " seconds") -end diff --git a/base/compiler/compiler.jl b/base/compiler/compiler.jl deleted file mode 100644 index f4b7b73f1bf76..0000000000000 --- a/base/compiler/compiler.jl +++ /dev/null @@ -1,123 +0,0 @@ -# This file is a part of Julia. License is MIT: https://julialang.org/license - - -baremodule Compiler - -using Core.Intrinsics, Core.IR - -import Core: print, println, show, write, unsafe_write, - _apply_iterate, svec, apply_type, Builtin, IntrinsicFunction, - MethodInstance, CodeInstance, MethodTable, MethodMatch, PartialOpaque, - TypeofVararg, Core, SimpleVector, donotdelete, compilerbarrier, - memoryref_isassigned, memoryrefnew, memoryrefoffset, memoryrefget, - memoryrefset!, typename - -using ..Base -using ..Base: Ordering, vect, EffectsOverride, BitVector, @_gc_preserve_begin, @_gc_preserve_end, RefValue, - @nospecializeinfer, @_foldable_meta, fieldindex, is_function_def, indexed_iterate, isexpr, methods, - get_world_counter, JLOptions, _methods_by_ftype, unwrap_unionall, cconvert, unsafe_convert, - issingletontype, isType, rewrap_unionall, has_free_typevars, isvarargtype, hasgenerator, - IteratorSize, SizeUnknown, _array_for, Bottom, generating_output, diff_names, - ismutationfree, NUM_EFFECTS_OVERRIDES, _NAMEDTUPLE_NAME, datatype_fieldtypes, - argument_datatype, isfieldatomic, unwrapva, iskindtype, _bits_findnext, copy_exprargs, - Generator, Filter, ismutabletypename, isvatuple, datatype_fieldcount, - isconcretedispatch, isdispatchelem, min_world, max_world, datatype_layoutsize, - datatype_arrayelem, unionlen, isidentityfree, _uniontypes, uniontypes, OneTo, Callable, - DataTypeFieldDesc, datatype_nfields, datatype_pointerfree, midpoint, is_valid_intrinsic_elptr, - allocatedinline, isbitsunion, widen_diagonal, unconstrain_vararg_length, - rename_unionall, may_invoke_generator, is_meta_expr_head, is_meta_expr, quoted, - specialize_method, hasintersect, is_nospecializeinfer, is_nospecialized, - get_nospecializeinfer_sig, tls_world_age, uniontype_layout, kwerr, - moduleroot, is_file_tracked, decode_effects_override -using ..Base.Order -import ..Base: getindex, setindex!, length, iterate, push!, isempty, first, convert, ==, - copy, popfirst!, in, haskey, resize!, copy!, append!, last, get!, size, - get, iterate, findall - -const getproperty = Core.getfield -const setproperty! = Core.setfield! -const swapproperty! = Core.swapfield! -const modifyproperty! = Core.modifyfield! -const replaceproperty! = Core.replacefield! -const _DOCS_ALIASING_WARNING = "" - -ccall(:jl_set_istopmod, Cvoid, (Any, Bool), Compiler, false) - -eval(x) = Core.eval(Compiler, x) -eval(m, x) = Core.eval(m, x) - -include(x) = Base.include(Compiler, x) -include(mod, x) = Base.include(mod, x) - -macro _boundscheck() Expr(:boundscheck) end - -# These types are used by reflection.jl and expr.jl too, so declare them here. -# Note that `@assume_effects` is available only after loading namedtuple.jl. -abstract type MethodTableView end -abstract type AbstractInterpreter end - -function return_type end # promotion.jl expects this to exist -is_return_type(Core.@nospecialize(f)) = f === return_type - -include("compiler/sort.jl") - -# We don't include some.jl, but this definition is still useful. -something(x::Nothing, y...) = something(y...) -something(x::Any, y...) = x - -############ -# compiler # -############ - -baremodule BuildSettings -using Core: ARGS, include -using ..Compiler: >, getindex, length - -global MAX_METHODS::Int = 3 - -if length(ARGS) > 2 && ARGS[2] === "--buildsettings" - include(BuildSettings, ARGS[3]) -end -end - -if false - import Base: Base, @show -else - macro show(ex...) - blk = Expr(:block) - for s in ex - push!(blk.args, :(println(stdout, $(QuoteNode(s)), " = ", - begin local value = $(esc(s)) end))) - end - isempty(ex) || push!(blk.args, :value) - blk - end -end - -include("compiler/cicache.jl") -include("compiler/methodtable.jl") -include("compiler/effects.jl") -include("compiler/types.jl") -include("compiler/utilities.jl") -include("compiler/validation.jl") - -include("compiler/ssair/basicblock.jl") -include("compiler/ssair/domtree.jl") -include("compiler/ssair/ir.jl") -include("compiler/ssair/tarjan.jl") - -include("compiler/abstractlattice.jl") -include("compiler/stmtinfo.jl") -include("compiler/inferenceresult.jl") -include("compiler/inferencestate.jl") - -include("compiler/typeutils.jl") -include("compiler/typelimits.jl") -include("compiler/typelattice.jl") -include("compiler/tfuncs.jl") - -include("compiler/abstractinterpretation.jl") -include("compiler/typeinfer.jl") -include("compiler/optimize.jl") - -end diff --git a/base/compilerimg.jl b/base/compilerimg.jl deleted file mode 100644 index c353ee614924b..0000000000000 --- a/base/compilerimg.jl +++ /dev/null @@ -1,4 +0,0 @@ -# This file is a part of Julia. License is MIT: https://julialang.org/license - -baremodule Base; end -Core.include(Base, "Base_compiler.jl") diff --git a/base/coreir.jl b/base/coreir.jl new file mode 100644 index 0000000000000..a21eeceffe4c5 --- /dev/null +++ b/base/coreir.jl @@ -0,0 +1,54 @@ +# This file is a part of Julia. License is MIT: https://julialang.org/license + +Core.PhiNode() = Core.PhiNode(Int32[], Any[]) + +""" + struct Const + val + end + +The type representing a constant value. +""" +Core.Const + +""" + struct PartialStruct + typ + fields::Vector{Any} # elements are other type lattice members + end + +This extended lattice element is introduced when we have information about an object's +fields beyond what can be obtained from the object type. E.g. it represents a tuple where +some elements are known to be constants or a struct whose `Any`-typed field is initialized +with `Int` values. + +- `typ` indicates the type of the object +- `fields` holds the lattice elements corresponding to each field of the object + +If `typ` is a struct, `fields` represents the fields of the struct that are guaranteed to be +initialized. For instance, if the length of `fields` of `PartialStruct` representing a +struct with 4 fields is 3, the 4th field may not be initialized. If the length is 4, all +fields are guaranteed to be initialized. + +If `typ` is a tuple, the last element of `fields` may be `Vararg`. In this case, it is +guaranteed that the number of elements in the tuple is at least `length(fields)-1`, but the +exact number of elements is unknown. +""" +Core.PartialStruct + +""" + struct InterConditional + slot::Int + thentype + elsetype + end + +Similar to `Conditional`, but conveys inter-procedural constraints imposed on call arguments. +This is separate from `Conditional` to catch logic errors: the lattice element name is `InterConditional` +while processing a call, then `Conditional` everywhere else. Thus `InterConditional` does not appear in +`CompilerTypes`—these type's usages are disjoint—though we define the lattice for `InterConditional`. +""" +Core.InterConditional + +InterConditional(var::SlotNumber, @nospecialize(thentype), @nospecialize(elsetype)) = + InterConditional(slot_id(var), thentype, elsetype) diff --git a/base/essentials.jl b/base/essentials.jl index 89b891e216d5a..efae59b82b5f9 100644 --- a/base/essentials.jl +++ b/base/essentials.jl @@ -184,7 +184,8 @@ end _nameof(m::Module) = ccall(:jl_module_name, Ref{Symbol}, (Any,), m) function _is_internal(__module__) - return true + return _nameof(__module__) === :Base || + _nameof(ccall(:jl_base_relative_to, Any, (Any,), __module__)::Module) === :Compiler end # can be used in place of `@assume_effects :total` (supposed to be used for bootstrapping) diff --git a/base/compiler/parsing.jl b/base/flparse.jl similarity index 100% rename from base/compiler/parsing.jl rename to base/flparse.jl diff --git a/base/loading.jl b/base/loading.jl index 7b45348f47009..2765c6ea3ed1f 100644 --- a/base/loading.jl +++ b/base/loading.jl @@ -1296,7 +1296,7 @@ function _include_from_serialized(pkg::PkgId, path::String, ocachepath::Union{No for M in restored M = M::Module - if parentmodule(M) === M && PkgId(M) == pkg + if is_root_module(M) && PkgId(M) == pkg register && register_root_module(M) if timing_imports elapsed_time = time_ns() - t_before @@ -2239,15 +2239,22 @@ const include_callbacks = Any[] const _concrete_dependencies = Pair{PkgId,UInt128}[] # these dependency versions are "set in stone", because they are explicitly loaded, and the process should try to avoid invalidating them const _require_dependencies = Any[] # a list of (mod, abspath, fsize, hash, mtime) tuples that are the file dependencies of the module currently being precompiled const _track_dependencies = Ref(false) # set this to true to track the list of file dependencies -function _include_dependency(mod::Module, _path::AbstractString; track_content=true, - path_may_be_dir=false) + +function _include_dependency(mod::Module, _path::AbstractString; track_content::Bool=true, + path_may_be_dir::Bool=false) + _include_dependency!(_require_dependencies, _track_dependencies[], mod, _path, track_content, path_may_be_dir) +end + +function _include_dependency!(dep_list::Vector{Any}, track_dependencies::Bool, + mod::Module, _path::AbstractString, + track_content::Bool, path_may_be_dir::Bool) prev = source_path(nothing) if prev === nothing path = abspath(_path) else path = normpath(joinpath(dirname(prev), _path)) end - if !_track_dependencies[] + if !track_dependencies[] if !path_may_be_dir && !isfile(path) throw(SystemError("opening file $(repr(path))", Libc.ENOENT)) elseif path_may_be_dir && !Filesystem.isreadable(path) @@ -2258,9 +2265,9 @@ function _include_dependency(mod::Module, _path::AbstractString; track_content=t if track_content hash = isdir(path) ? _crc32c(join(readdir(path))) : open(_crc32c, path, "r") # use mtime=-1.0 here so that fsize==0 && mtime==0.0 corresponds to a missing include_dependency - push!(_require_dependencies, (mod, path, filesize(path), hash, -1.0)) + push!(dep_list, (mod, path, filesize(path), hash, -1.0)) else - push!(_require_dependencies, (mod, path, UInt64(0), UInt32(0), mtime(path))) + push!(dep_list, (mod, path, UInt64(0), UInt32(0), mtime(path))) end end end @@ -2797,9 +2804,6 @@ function require_stdlib(package_uuidkey::PkgId, ext::Union{Nothing, String}=noth end end - - - # relative-path load """ @@ -3326,6 +3330,10 @@ mutable struct CacheHeaderIncludes const modpath::Vector{String} # seemingly not needed in Base, but used by Revise end +function CacheHeaderIncludes(dep_tuple::Tuple{Module, String, Int64, UInt32, Float64}) + return CacheHeaderIncludes(PkgId(dep_tuple[1]), dep_tuple[2:end]..., String[]) +end + function replace_depot_path(path::AbstractString, depots::Vector{String}=normalize_depots_for_relocation()) for depot in depots if startswith(path, string(depot, Filesystem.pathsep())) || path == depot @@ -3865,6 +3873,56 @@ function list_reasons(reasons::Dict{String,Int}) end list_reasons(::Nothing) = "" +function any_includes_stale(includes::Vector{CacheHeaderIncludes}, cachefile::String, reasons::Union{Dict{String,Int},Nothing}=nothing) + for chi in includes + f, fsize_req, hash_req, ftime_req = chi.filename, chi.fsize, chi.hash, chi.mtime + if startswith(f, string("@depot", Filesystem.pathsep())) + @debug("Rejecting stale cache file $cachefile because its depot could not be resolved") + record_reason(reasons, "nonresolveable depot") + return true + end + if !ispath(f) + _f = fixup_stdlib_path(f) + if _f != f && isfile(_f) && startswith(_f, Sys.STDLIB) + continue + end + @debug "Rejecting stale cache file $cachefile because file $f does not exist" + record_reason(reasons, "missing sourcefile") + return true + end + if ftime_req >= 0.0 + # this is an include_dependency for which we only recorded the mtime + ftime = mtime(f) + is_stale = ( ftime != ftime_req ) && + ( ftime != floor(ftime_req) ) && # Issue #13606, PR #13613: compensate for Docker images rounding mtimes + ( ftime != ceil(ftime_req) ) && # PR: #47433 Compensate for CirceCI's truncating of timestamps in its caching + ( ftime != trunc(ftime_req, digits=6) ) && # Issue #20837, PR #20840: compensate for GlusterFS truncating mtimes to microseconds + ( ftime != 1.0 ) && # PR #43090: provide compatibility with Nix mtime. + !( 0 < (ftime_req - ftime) < 1e-6 ) # PR #45552: Compensate for Windows tar giving mtimes that may be incorrect by up to one microsecond + if is_stale + @debug "Rejecting stale cache file $cachefile because mtime of include_dependency $f has changed (mtime $ftime, before $ftime_req)" + record_reason(reasons, "include_dependency mtime change") + return true + end + else + fstat = stat(f) + fsize = filesize(fstat) + if fsize != fsize_req + @debug "Rejecting stale cache file $cachefile because file size of $f has changed (file size $fsize, before $fsize_req)" + record_reason(reasons, "include_dependency fsize change") + return true + end + hash = isdir(fstat) ? _crc32c(join(readdir(f))) : open(_crc32c, f, "r") + if hash != hash_req + @debug "Rejecting stale cache file $cachefile because hash of $f has changed (hash $hash, before $hash_req)" + record_reason(reasons, "include_dependency fhash change") + return true + end + end + end + return false +end + # returns true if it "cachefile.ji" is stale relative to "modpath.jl" and build_id for modkey # otherwise returns the list of dependencies to also check @constprop :none function stale_cachefile(modpath::String, cachefile::String; ignore_loaded::Bool = false, requested_flags::CacheFlags=CacheFlags(), reasons=nothing) @@ -4024,51 +4082,8 @@ end return true end end - for chi in includes - f, fsize_req, hash_req, ftime_req = chi.filename, chi.fsize, chi.hash, chi.mtime - if startswith(f, string("@depot", Filesystem.pathsep())) - @debug("Rejecting stale cache file $cachefile because its depot could not be resolved") - record_reason(reasons, "nonresolveable depot") - return true - end - if !ispath(f) - _f = fixup_stdlib_path(f) - if _f != f && isfile(_f) && startswith(_f, Sys.STDLIB) - continue - end - @debug "Rejecting stale cache file $cachefile because file $f does not exist" - record_reason(reasons, "missing sourcefile") - return true - end - if ftime_req >= 0.0 - # this is an include_dependency for which we only recorded the mtime - ftime = mtime(f) - is_stale = ( ftime != ftime_req ) && - ( ftime != floor(ftime_req) ) && # Issue #13606, PR #13613: compensate for Docker images rounding mtimes - ( ftime != ceil(ftime_req) ) && # PR: #47433 Compensate for CirceCI's truncating of timestamps in its caching - ( ftime != trunc(ftime_req, digits=6) ) && # Issue #20837, PR #20840: compensate for GlusterFS truncating mtimes to microseconds - ( ftime != 1.0 ) && # PR #43090: provide compatibility with Nix mtime. - !( 0 < (ftime_req - ftime) < 1e-6 ) # PR #45552: Compensate for Windows tar giving mtimes that may be incorrect by up to one microsecond - if is_stale - @debug "Rejecting stale cache file $cachefile because mtime of include_dependency $f has changed (mtime $ftime, before $ftime_req)" - record_reason(reasons, "include_dependency mtime change") - return true - end - else - fstat = stat(f) - fsize = filesize(fstat) - if fsize != fsize_req - @debug "Rejecting stale cache file $cachefile because file size of $f has changed (file size $fsize, before $fsize_req)" - record_reason(reasons, "include_dependency fsize change") - return true - end - hash = isdir(fstat) ? _crc32c(join(readdir(f))) : open(_crc32c, f, "r") - if hash != hash_req - @debug "Rejecting stale cache file $cachefile because hash of $f has changed (hash $hash, before $hash_req)" - record_reason(reasons, "include_dependency fhash change") - return true - end - end + if any_includes_stale(includes, cachefile, reasons) + return true end end @@ -4148,6 +4163,12 @@ macro __DIR__() return isempty(_dirname) ? pwd() : abspath(_dirname) end +function prepare_compiler_stub_image!() + ccall(:jl_add_to_module_init_list, Cvoid, (Any,), Compiler) + register_root_module(Compiler) + filter!(mod->mod !== Compiler, loaded_modules_order) +end + """ precompile(f, argtypes::Tuple{Vararg{Any}}) diff --git a/base/opaque_closure.jl b/base/opaque_closure.jl index 26b39879ca852..d7a91cff7d602 100644 --- a/base/opaque_closure.jl +++ b/base/opaque_closure.jl @@ -39,23 +39,24 @@ end # OpaqueClosure construction from pre-inferred CodeInfo/IRCode using Core: CodeInfo, SSAValue -using Base.Compiler: IRCode +using Base: Compiler +using .Compiler: IRCode function compute_ir_rettype(ir::IRCode) rt = Union{} for i = 1:length(ir.stmts) stmt = ir[SSAValue(i)][:stmt] - if isa(stmt, Core.Compiler.ReturnNode) && isdefined(stmt, :val) - rt = Core.Compiler.tmerge(Core.Compiler.argextype(stmt.val, ir), rt) + if isa(stmt, Core.ReturnNode) && isdefined(stmt, :val) + rt = Compiler.tmerge(Compiler.argextype(stmt.val, ir), rt) end end - return Core.Compiler.widenconst(rt) + return Compiler.widenconst(rt) end function compute_oc_signature(ir::IRCode, nargs::Int, isva::Bool) argtypes = Vector{Any}(undef, nargs) for i = 1:nargs - argtypes[i] = Core.Compiler.widenconst(ir.argtypes[i+1]) + argtypes[i] = Compiler.widenconst(ir.argtypes[i+1]) end if isva lastarg = pop!(argtypes) diff --git a/base/reflection.jl b/base/reflection.jl index 834325dd41583..1b8ed9413a35b 100644 --- a/base/reflection.jl +++ b/base/reflection.jl @@ -346,6 +346,36 @@ function raise_match_failure(name::Symbol, @nospecialize(tt)) error("$name: unanalyzable call given $sig_str") end +const REFLECTION_COMPILER = RefValue{Union{Nothing, Module}}(nothing) + +function invoke_in_typeinf_world(args...) + vargs = Any[args...] + return ccall(:jl_call_in_typeinf_world, Any, (Ptr{Any}, Cint), vargs, length(vargs)) +end + +function invoke_default_compiler(fname::Symbol, args...) + if REFLECTION_COMPILER[] === nothing + return invoke_in_typeinf_world(getglobal(Compiler, fname), args...) + else + return getglobal(REFLECTION_COMPILER[], fname)(args...) + end +end + +function invoke_interp_compiler(interp, fname::Symbol, args...) + if interp === nothing + return invoke_default_compiler(fname, args...) + else + T = typeof(interp) + while true + Tname = typename(T).name + Tname === :Any && error("Expected Interpreter") + Tname === :AbstractInterpreter && break + T = supertype(T) + end + return getglobal(typename(T).module, fname)(args...) + end +end + """ code_typed_by_type(types::Type{<:Tuple}; ...) @@ -356,7 +386,9 @@ function code_typed_by_type(@nospecialize(tt::Type); optimize::Bool=true, debuginfo::Symbol=:default, world::UInt=get_world_counter(), - interp::Compiler.AbstractInterpreter=Compiler.NativeInterpreter(world)) + interp=nothing) + passed_interp = interp + interp = passed_interp === nothing ? invoke_default_compiler(:_default_interp, world) : interp (ccall(:jl_is_in_pure_context, Bool, ()) || world == typemax(UInt)) && error("code reflection cannot be used from generated functions") if @isdefined(IRShow) @@ -368,12 +400,12 @@ function code_typed_by_type(@nospecialize(tt::Type); throw(ArgumentError("'debuginfo' must be either :source or :none")) end tt = to_tuple_type(tt) - matches = Compiler.findall(tt, Compiler.method_table(interp)) + matches = invoke_interp_compiler(passed_interp, :_findall_matches, interp, tt) matches === nothing && raise_match_failure(:code_typed, tt) asts = [] for match in matches.matches match = match::Core.MethodMatch - code = Compiler.typeinf_code(interp, match, optimize) + code = invoke_interp_compiler(passed_interp, :typeinf_code, interp, match, optimize) if code === nothing push!(asts, match.method => Any) else @@ -384,7 +416,7 @@ function code_typed_by_type(@nospecialize(tt::Type); return asts end -function get_oc_code_rt(oc::Core.OpaqueClosure, types, optimize::Bool) +function get_oc_code_rt(passed_interp, oc::Core.OpaqueClosure, types, optimize::Bool) @nospecialize oc types ccall(:jl_is_in_pure_context, Bool, ()) && error("code reflection cannot be used from generated functions") @@ -393,9 +425,9 @@ function get_oc_code_rt(oc::Core.OpaqueClosure, types, optimize::Bool) if isdefined(m, :source) if optimize tt = Tuple{typeof(oc.captures), to_tuple_type(types).parameters...} - mi = Compiler.specialize_method(m, tt, Core.svec()) - interp = Compiler.NativeInterpreter(m.primary_world) - code = Compiler.typeinf_code(interp, mi, optimize) + mi = specialize_method(m, tt, Core.svec()) + interp = invoke_interp_compiler(passed_interp, :_default_interp, m.primary_world) + code = invoke_interp_compiler(passed_interp, :typeinf_code, interp, mi, optimize) if code isa CodeInfo return Pair{CodeInfo, Any}(code, code.rettype) end @@ -418,9 +450,10 @@ end function code_typed_opaque_closure(oc::Core.OpaqueClosure, types; debuginfo::Symbol=:default, optimize::Bool=true, + interp=nothing, _...) @nospecialize oc types - (code, rt) = get_oc_code_rt(oc, types, optimize) + (code, rt) = get_oc_code_rt(interp, oc, types, optimize) debuginfo === :none && remove_linenums!(code) return Any[Pair{CodeInfo,Any}(code, rt)] end @@ -485,18 +518,20 @@ a full signature to query. function code_ircode_by_type( @nospecialize(tt::Type); world::UInt=get_world_counter(), - interp::Compiler.AbstractInterpreter=Compiler.NativeInterpreter(world), + interp=nothing, optimize_until::Union{Integer,AbstractString,Nothing}=nothing, ) + passed_interp = interp + interp = passed_interp === nothing ? invoke_default_compiler(:_default_interp, world) : interp (ccall(:jl_is_in_pure_context, Bool, ()) || world == typemax(UInt)) && error("code reflection cannot be used from generated functions") tt = to_tuple_type(tt) - matches = Compiler.findall(tt, Compiler.method_table(interp)) + matches = invoke_interp_compiler(passed_interp, :_findall_matches, interp, tt) matches === nothing && raise_match_failure(:code_ircode, tt) asts = [] for match in matches.matches match = match::Core.MethodMatch - (code, ty) = Compiler.typeinf_ircode(interp, match, optimize_until) + (code, ty) = invoke_interp_compiler(passed_interp, :typeinf_ircode, interp, match, optimize_until) if code === nothing push!(asts, match.method => Any) else @@ -506,24 +541,26 @@ function code_ircode_by_type( return asts end -function _builtin_return_type(interp::Compiler.AbstractInterpreter, +function _builtin_return_type(passed_interp, interp, @nospecialize(f::Core.Builtin), @nospecialize(types)) argtypes = Any[to_tuple_type(types).parameters...] - rt = Compiler.builtin_tfunction(interp, f, argtypes, nothing) - return Compiler.widenconst(rt) + rt = invoke_interp_compiler(passed_interp, :builtin_tfunction, interp, f, argtypes, nothing) + return invoke_interp_compiler(passed_interp, :widenconst, rt) end -function _builtin_effects(interp::Compiler.AbstractInterpreter, +function _builtin_effects(passed_interp, interp, @nospecialize(f::Core.Builtin), @nospecialize(types)) argtypes = Any[to_tuple_type(types).parameters...] - rt = Compiler.builtin_tfunction(interp, f, argtypes, nothing) - return Compiler.builtin_effects(Compiler.typeinf_lattice(interp), f, argtypes, rt) + rt = invoke_interp_compiler(passed_interp, :builtin_tfunction, interp, f, argtypes, nothing) + return invoke_interp_compiler(passed_interp, :builtin_effects, + invoke_interp_compiler(passed_interp, :typeinf_lattice, interp), + f, argtypes, rt) end -function _builtin_exception_type(interp::Compiler.AbstractInterpreter, +function _builtin_exception_type(passed_interp, interp, @nospecialize(f::Core.Builtin), @nospecialize(types)) - effects = _builtin_effects(interp, f, types) - return Compiler.is_nothrow(effects) ? Union{} : Any + effects = _builtin_effects(passed_interp, interp, f, types) + return invoke_interp_compiler(passed_interp, :is_nothrow, effects) ? Union{} : Any end check_generated_context(world::UInt) = @@ -579,20 +616,22 @@ julia> Base.return_types(sum, (Union{Vector{Int},UnitRange{Int}},)) """ function return_types(@nospecialize(f), @nospecialize(types=default_tt(f)); world::UInt=get_world_counter(), - interp::Compiler.AbstractInterpreter=Compiler.NativeInterpreter(world)) + interp=nothing) + passed_interp = interp + interp = passed_interp === nothing ? invoke_default_compiler(:_default_interp, world) : interp check_generated_context(world) if isa(f, Core.OpaqueClosure) - _, rt = only(code_typed_opaque_closure(f, types)) + _, rt = only(code_typed_opaque_closure(f, types; Compiler)) return Any[rt] elseif isa(f, Core.Builtin) - return Any[_builtin_return_type(interp, f, types)] + return Any[_builtin_return_type(passed_interp, interp, f, types)] end tt = signature_type(f, types) - matches = Compiler.findall(tt, Compiler.method_table(interp)) + matches = invoke_interp_compiler(passed_interp, :_findall_matches, interp, tt) matches === nothing && raise_match_failure(:return_types, tt) rts = Any[] for match in matches.matches - ty = Compiler.typeinf_type(interp, match::Core.MethodMatch) + ty = invoke_interp_compiler(passed_interp, :typeinf_type, interp, match::Core.MethodMatch) push!(rts, something(ty, Any)) end return rts @@ -647,20 +686,22 @@ On the other hand `Base.infer_return_type` returns one collective result that su """ function infer_return_type(@nospecialize(f), @nospecialize(types=default_tt(f)); world::UInt=get_world_counter(), - interp::Compiler.AbstractInterpreter=Compiler.NativeInterpreter(world)) + interp=nothing) + passed_interp = interp + interp = passed_interp === nothing ? invoke_default_compiler(:_default_interp, world) : interp check_generated_context(world) if isa(f, Core.OpaqueClosure) - return last(only(code_typed_opaque_closure(f, types))) + return last(only(code_typed_opaque_closure(f, types; interp=passed_interp))) elseif isa(f, Core.Builtin) - return _builtin_return_type(interp, f, types) + return _builtin_return_type(passed_interp, interp, f, types) end tt = signature_type(f, types) - matches = Compiler.findall(tt, Compiler.method_table(interp)) + matches = invoke_interp_compiler(passed_interp, :_findall_matches, interp, tt) matches === nothing && raise_match_failure(:infer_return_type, tt) rt = Union{} for match in matches.matches - ty = Compiler.typeinf_type(interp, match::Core.MethodMatch) - rt = Compiler.tmerge(rt, something(ty, Any)) + ty = invoke_interp_compiler(passed_interp, :typeinf_type, interp, match::Core.MethodMatch) + rt = invoke_interp_compiler(passed_interp, :tmerge, rt, something(ty, Any)) end return rt end @@ -717,32 +758,31 @@ julia> Base.infer_exception_types(throw_if_number, (Any,)) """ function infer_exception_types(@nospecialize(f), @nospecialize(types=default_tt(f)); world::UInt=get_world_counter(), - interp::Compiler.AbstractInterpreter=Compiler.NativeInterpreter(world)) + interp=nothing) + passed_interp = interp + interp = passed_interp === nothing ? invoke_default_compiler(:_default_interp, world) : interp check_generated_context(world) if isa(f, Core.OpaqueClosure) return Any[Any] # TODO elseif isa(f, Core.Builtin) - return Any[_builtin_exception_type(interp, f, types)] + return Any[_builtin_exception_type(passed_interp, interp, f, types)] end tt = signature_type(f, types) - matches = Compiler.findall(tt, Compiler.method_table(interp)) + matches = invoke_interp_compiler(passed_interp, :_findall_matches, interp, tt) matches === nothing && raise_match_failure(:infer_exception_types, tt) excts = Any[] for match in matches.matches - frame = Compiler.typeinf_frame(interp, match::Core.MethodMatch, #=run_optimizer=#false) + frame = invoke_interp_compiler(passed_interp, :typeinf_frame, interp, match::Core.MethodMatch, #=run_optimizer=#false) if frame === nothing exct = Any else - exct = Compiler.widenconst(frame.result.exc_result) + exct = invoke_interp_compiler(passed_interp, :widenconst, frame.result.exc_result) end push!(excts, exct) end return excts end -_may_throw_methoderror(matches#=::Core.Compiler.MethodLookupResult=#) = - matches.ambig || !any(match::Core.MethodMatch->match.fully_covers, matches.matches) - """ Base.infer_exception_type( f, types=default_tt(f); @@ -796,27 +836,18 @@ signature, the exception type is widened to `MethodError`. """ function infer_exception_type(@nospecialize(f), @nospecialize(types=default_tt(f)); world::UInt=get_world_counter(), - interp::Compiler.AbstractInterpreter=Compiler.NativeInterpreter(world)) + interp=nothing) + passed_interp = interp + interp = passed_interp === nothing ? invoke_default_compiler(:_default_interp, world) : interp check_generated_context(world) if isa(f, Core.OpaqueClosure) return Any # TODO elseif isa(f, Core.Builtin) - return _builtin_exception_type(interp, f, types) + return _builtin_exception_type(passed_interp, interp, f, types) end tt = signature_type(f, types) - matches = Compiler.findall(tt, Compiler.method_table(interp)) - matches === nothing && raise_match_failure(:infer_exception_type, tt) - exct = Union{} - if _may_throw_methoderror(matches) - # account for the fact that we may encounter a MethodError with a non-covered or ambiguous signature. - exct = Compiler.tmerge(exct, MethodError) - end - for match in matches.matches - match = match::Core.MethodMatch - frame = Compiler.typeinf_frame(interp, match, #=run_optimizer=#false) - frame === nothing && return Any - exct = Compiler.tmerge(exct, Compiler.widenconst(frame.result.exc_result)) - end + exct = invoke_interp_compiler(passed_interp, :_infer_exception_type, interp, tt, false) + exct === nothing && raise_match_failure(:infer_exception_type, tt) return exct end @@ -875,34 +906,25 @@ signature, the `:nothrow` bit gets tainted. The `Base.infer_effects` function should not be used from generated functions; doing so will result in an error. -$(Core.Compiler.effects_key_string) +$(Compiler.effects_key_string) # See Also -- [`Core.Compiler.Effects`](@ref): A type representing the computational effects of a method call. +- [`Compiler.Effects`](@ref): A type representing the computational effects of a method call. - [`Base.@assume_effects`](@ref): A macro for making assumptions about the effects of a method. """ function infer_effects(@nospecialize(f), @nospecialize(types=default_tt(f)); optimize::Bool=true, world::UInt=get_world_counter(), - interp::Compiler.AbstractInterpreter=Compiler.NativeInterpreter(world)) + interp=nothing) + passed_interp = interp + interp = passed_interp === nothing ? invoke_default_compiler(:_default_interp, world) : interp check_generated_context(world) if isa(f, Core.Builtin) - return _builtin_effects(interp, f, types) + return _builtin_effects(passed_interp, interp, f, types) end tt = signature_type(f, types) - matches = Compiler.findall(tt, Compiler.method_table(interp)) - matches === nothing && raise_match_failure(:infer_effects, tt) - effects = Compiler.EFFECTS_TOTAL - if _may_throw_methoderror(matches) - # account for the fact that we may encounter a MethodError with a non-covered or ambiguous signature. - effects = Compiler.Effects(effects; nothrow=false) - end - for match in matches.matches - match = match::Core.MethodMatch - frame = Compiler.typeinf_frame(interp, match, #=run_optimizer=#optimize) - frame === nothing && return Compiler.Effects() - effects = Compiler.merge_effects(effects, frame.result.ipo_effects) - end + effects = invoke_interp_compiler(passed_interp, :_infer_effects, interp, tt, optimize) + effects === nothing && raise_match_failure(:infer_effects, tt) return effects end @@ -919,24 +941,24 @@ end function print_statement_costs(io::IO, @nospecialize(tt::Type); world::UInt=get_world_counter(), - interp::Compiler.AbstractInterpreter=Compiler.NativeInterpreter(world)) + interp=nothing) + passed_interp = interp + interp = passed_interp === nothing ? invoke_default_compiler(:_default_interp, world) : interp tt = to_tuple_type(tt) world == typemax(UInt) && error("code reflection cannot be used from generated functions") - matches = Compiler.findall(tt, Compiler.method_table(interp)) + matches = invoke_interp_compiler(passed_interp, :_findall_matches, interp, tt) matches === nothing && raise_match_failure(:print_statement_costs, tt) - params = Compiler.OptimizationParams(interp) cst = Int[] for match in matches.matches match = match::Core.MethodMatch println(io, match.method) - code = Compiler.typeinf_code(interp, match, true) + code = invoke_interp_compiler(passed_interp, :typeinf_code, interp, match, true) if code === nothing println(io, " inference not successful") else empty!(cst) resize!(cst, length(code.code)) - sptypes = Compiler.VarState[Compiler.VarState(sp, false) for sp in match.sparams] - maxcost = Compiler.statement_costs!(cst, code.code, code, sptypes, params) + maxcost = invoke_interp_compiler(passed_interp, :statement_costs!, interp, cst, code.code, code, match) nd = ndigits(maxcost) irshow_config = IRShow.IRShowConfig() do io, linestart, idx print(io, idx > 0 ? lpad(cst[idx], nd+1) : " "^(nd+1), " ") @@ -951,18 +973,11 @@ end print_statement_costs(args...; kwargs...) = print_statement_costs(stdout, args...; kwargs...) function _which(@nospecialize(tt::Type); - method_table::Union{Nothing,Core.MethodTable,Compiler.MethodTableView}=nothing, + method_table #=::Union{Nothing,Core.MethodTable,Compiler.MethodTableView}=# =nothing, world::UInt=get_world_counter(), raise::Bool=true) world == typemax(UInt) && error("code reflection cannot be used from generated functions") - if method_table === nothing - table = Compiler.InternalMethodTable(world) - elseif method_table isa Core.MethodTable - table = Compiler.OverlayMethodTable(world, method_table) - else - table = method_table - end - match, = Compiler.findsup(tt, table) + match, = invoke_default_compiler(:findsup_mt, tt, world, method_table) if match === nothing raise && error("no unique matching method found for the specified argument types") return nothing @@ -982,7 +997,7 @@ See also: [`parentmodule`](@ref), [`@which`](@ref Main.InteractiveUtils.@which), function which(@nospecialize(f), @nospecialize(t)) tt = signature_type(f, t) world = get_world_counter() - match, _ = Compiler._findsup(tt, nothing, world) + match, _ = invoke_default_compiler(:_findsup, tt, nothing, world) if match === nothing me = MethodError(f, t, world) ee = ErrorException(sprint(io -> begin @@ -1348,7 +1363,7 @@ julia> @macroexpand @invoke (xs::Xs)[i::I] = v::V The additional syntax is supported as of Julia 1.10. """ macro invoke(ex) - topmod = Compiler._topmod(__module__) # well, except, do not get it via CC but define it locally + topmod = _topmod(__module__) f, args, kwargs = destructure_callex(topmod, ex) types = Expr(:curly, :Tuple) out = Expr(:call, GlobalRef(Core, :invoke)) @@ -1407,7 +1422,7 @@ julia> @macroexpand @invokelatest xs[i] = v The additional `x.f` and `xs[i]` syntax requires Julia 1.10. """ macro invokelatest(ex) - topmod = Compiler._topmod(__module__) # well, except, do not get it via CC but define it locally + topmod = _topmod(__module__) f, args, kwargs = destructure_callex(topmod, ex) out = Expr(:call, GlobalRef(Base, :invokelatest)) isempty(kwargs) || push!(out.args, Expr(:parameters, kwargs...)) diff --git a/base/runtime_internals.jl b/base/runtime_internals.jl index 4a04d406550b7..cbbde7d9535a2 100644 --- a/base/runtime_internals.jl +++ b/base/runtime_internals.jl @@ -20,7 +20,7 @@ Base """ parentmodule(m::Module) = (@_total_meta; ccall(:jl_module_parent, Ref{Module}, (Any,), m)) -is_root_module(m::Module) = parentmodule(m) === m || (isdefined(Main, :Base) && m === Main.Base) +is_root_module(m::Module) = parentmodule(m) === m || m === Compiler || (isdefined(Main, :Base) && m === Main.Base) """ moduleroot(m::Module) -> Module @@ -1556,3 +1556,9 @@ function specialize_method(match::Core.MethodMatch; kwargs...) end hasintersect(@nospecialize(a), @nospecialize(b)) = typeintersect(a, b) !== Bottom + +########### +# scoping # +########### + +_topmod(m::Module) = ccall(:jl_base_relative_to, Any, (Any,), m)::Module diff --git a/base/show.jl b/base/show.jl index 26efd0a93f716..8f305107d10f5 100644 --- a/base/show.jl +++ b/base/show.jl @@ -1385,32 +1385,6 @@ function show_mi(io::IO, mi::Core.MethodInstance, from_stackframe::Bool=false) end end -# These sometimes show up as Const-values in InferenceFrameInfo signatures -function show(io::IO, mi_info::Compiler.Timings.InferenceFrameInfo) - mi = mi_info.mi - def = mi.def - if isa(def, Method) - if isdefined(def, :generator) && mi === def.generator - print(io, "InferenceFrameInfo generator for ") - show(io, def) - else - print(io, "InferenceFrameInfo for ") - argnames = [isa(a, Core.Const) ? (isa(a.val, Type) ? "" : a.val) : "" for a in mi_info.slottypes[1:mi_info.nargs]] - show_tuple_as_call(io, def.name, mi.specTypes; argnames, qualified=true) - end - else - di = mi.cache.inferred.debuginfo - file, line = IRShow.debuginfo_firstline(di) - file = string(file) - line = isempty(file) || line < 0 ? "" : "$file:$line" - print(io, "Toplevel InferenceFrameInfo thunk from ", def, " starting at ", line) - end -end - -function show(io::IO, tinf::Compiler.Timings.Timing) - print(io, "Compiler.Timings.Timing(", tinf.mi_info, ") with ", length(tinf.children), " children") -end - function show_delim_array(io::IO, itr::Union{AbstractArray,SimpleVector}, op, delim, cl, delim_one, i1=first(LinearIndices(itr)), l=last(LinearIndices(itr))) print(io, op) @@ -2855,8 +2829,10 @@ module IRShow isexpr, compute_basic_blocks, block_for_inst, IncrementalCompact, Effects, ALWAYS_TRUE, ALWAYS_FALSE, DebugInfoStream, getdebugidx, VarState, InvalidIRError, argextype, widenconst, singleton_type, - sptypes_from_meth_instance, EMPTY_SPTYPES - include("compiler/ssair/show.jl") + sptypes_from_meth_instance, EMPTY_SPTYPES, InferenceState, + NativeInterpreter, CachedMethodTable, LimitedAccuracy, Timings + + Base.include(IRShow, Base.strcat(Base.BUILDROOT, "../usr/share/julia/Compiler/src/ssair/show.jl")) const __debuginfo = Dict{Symbol, Any}( # :full => src -> statementidx_lineinfo_printer(src), # and add variable slot information @@ -2883,34 +2859,63 @@ function show(io::IO, src::CodeInfo; debuginfo::Symbol=:source) print(io, ")") end -function show(io::IO, inferred::Compiler.InferenceResult) - mi = inferred.linfo - tt = mi.specTypes.parameters[2:end] - tts = join(["::$(t)" for t in tt], ", ") - rettype = inferred.result - if isa(rettype, Compiler.InferenceState) - rettype = rettype.bestguess - end - if isa(mi.def, Method) - print(io, mi.def.name, "(", tts, " => ", rettype, ")") - else - print(io, "Toplevel MethodInstance thunk from ", mi.def, " => ", rettype) +show_unquoted(io::IO, val::Argument, indent::Int, prec::Int) = show_unquoted(io, Core.SlotNumber(val.n), indent, prec) + +show_unquoted(io::IO, stmt::PhiNode, indent::Int, ::Int) = show_unquoted_phinode(io, stmt, indent, "%") +function show_unquoted_phinode(io::IO, stmt::PhiNode, indent::Int, prefix::String) + args = String[let + e = stmt.edges[i] + v = !isassigned(stmt.values, i) ? "#undef" : + sprint(; context=io) do io′ + show_unquoted(io′, stmt.values[i], indent) + end + "$prefix$e => $v" + end for i in 1:length(stmt.edges) + ] + print(io, "φ ", '(') + join(io, args, ", ") + print(io, ')') +end + +function show_unquoted(io::IO, stmt::PhiCNode, indent::Int, ::Int) + print(io, "φᶜ (") + first = true + for v in stmt.values + first ? (first = false) : print(io, ", ") + show_unquoted(io, v, indent) end + print(io, ")") end -show(io::IO, sv::Compiler.InferenceState) = - (print(io, "InferenceState for "); show(io, sv.linfo)) +function show_unquoted(io::IO, stmt::PiNode, indent::Int, ::Int) + print(io, "π (") + show_unquoted(io, stmt.val, indent) + print(io, ", ") + printstyled(io, stmt.typ, color=:cyan) + print(io, ")") +end -show(io::IO, ::Compiler.NativeInterpreter) = - print(io, "Core.Compiler.NativeInterpreter(...)") +function show_unquoted(io::IO, stmt::UpsilonNode, indent::Int, ::Int) + print(io, "ϒ (") + isdefined(stmt, :val) ? + show_unquoted(io, stmt.val, indent) : + print(io, "#undef") + print(io, ")") +end -show(io::IO, cache::Compiler.CachedMethodTable) = - print(io, typeof(cache), "(", Compiler.length(cache.cache), " entries)") +function show_unquoted(io::IO, stmt::ReturnNode, indent::Int, ::Int) + if !isdefined(stmt, :val) + print(io, "unreachable") + else + print(io, "return ") + show_unquoted(io, stmt.val, indent) + end +end -function show(io::IO, limited::Compiler.LimitedAccuracy) - print(io, "Compiler.LimitedAccuracy(") - show(io, limited.typ) - print(io, ", #= ", Compiler.length(limited.causes), " cause(s) =#)") +show_unquoted(io::IO, stmt::GotoIfNot, indent::Int, ::Int) = show_unquoted_gotoifnot(io, stmt, indent, "%") +function show_unquoted_gotoifnot(io::IO, stmt::GotoIfNot, indent::Int, prefix::String) + print(io, "goto ", prefix, stmt.dest, " if not ") + show_unquoted(io, stmt.cond, indent) end function dump(io::IOContext, x::SimpleVector, n::Int, indent) diff --git a/base/stacktraces.jl b/base/stacktraces.jl index c3d86fc8f5151..e16757541cfc9 100644 --- a/base/stacktraces.jl +++ b/base/stacktraces.jl @@ -124,7 +124,7 @@ end const top_level_scope_sym = Symbol("top-level scope") -function lookup(ip::Union{Base.InterpreterIP}) +function lookup(ip::Base.InterpreterIP) code = ip.code if code === nothing # interpreted top-level expression with no CodeInfo diff --git a/base/sysimg.jl b/base/sysimg.jl index 8347d63d5b740..476b9715b7e11 100644 --- a/base/sysimg.jl +++ b/base/sysimg.jl @@ -1,13 +1,17 @@ # This file is a part of Julia. License is MIT: https://julialang.org/license -# Can be built either a monolith or with a minimal Base image that just has the -# compiler. -if isdefined(Main, :Base); else -Core.eval(Main, :(baremodule Base; end)) +# Can be be loaded on top of either an existing system image built from +# `Base_compiler.jl` or standalone, in which case we will build it now. +let had_compiler = isdefined(Main, :Base) +if had_compiler; else +include("Base_compiler.jl") end Core.include(Base, "Base.jl") +had_compiler && ccall(:jl_init_restored_module, Cvoid, (Any,), Base) +end + using .Base # Set up Main module diff --git a/contrib/generate_precompile.jl b/contrib/generate_precompile.jl index 55de3492e9447..ffbbfd620997f 100644 --- a/contrib/generate_precompile.jl +++ b/contrib/generate_precompile.jl @@ -359,6 +359,7 @@ generate_precompile_statements() = try # Make sure `ansi_enablecursor` is printe eval(PrecompileStagingArea, :(const $(Symbol(_mod)) = $_mod)) end end + eval(PrecompileStagingArea, :(const Compiler = Base.Compiler)) n_succeeded = 0 # Make statements unique diff --git a/doc/src/devdocs/EscapeAnalysis.md b/doc/src/devdocs/EscapeAnalysis.md index 1bd7868790f7f..ea874bf7371b0 100644 --- a/doc/src/devdocs/EscapeAnalysis.md +++ b/doc/src/devdocs/EscapeAnalysis.md @@ -1,6 +1,6 @@ # `EscapeAnalysis` -`Core.Compiler.EscapeAnalysis` is a compiler utility module that aims to analyze +`Compiler.EscapeAnalysis` is a compiler utility module that aims to analyze escape information of [Julia's SSA-form IR](@ref Julia-SSA-form-IR) a.k.a. `IRCode`. This escape analysis aims to: @@ -59,7 +59,7 @@ The symbols on the side of each call argument and SSA statements represent the f - `✓` (green or cyan): this value never escapes (`has_no_escape(result.state[x])` holds), colored blue if it has arg escape also (`has_arg_escape(result.state[x])` holds) - `↑` (blue or yellow): this value can escape to the caller via return (`has_return_escape(result.state[x])` holds), colored yellow if it has unhandled thrown escape also (`has_thrown_escape(result.state[x])` holds) - `X` (red): this value can escape to somewhere the escape analysis can't reason about like escapes to a global memory (`has_all_escape(result.state[x])` holds) -- `*` (bold): this value's escape state is between the `ReturnEscape` and `AllEscape` in the partial order of [`EscapeInfo`](@ref Core.Compiler.EscapeAnalysis.EscapeInfo), colored yellow if it has unhandled thrown escape also (`has_thrown_escape(result.state[x])` holds) +- `*` (bold): this value's escape state is between the `ReturnEscape` and `AllEscape` in the partial order of [`EscapeInfo`](@ref Base.Compiler.EscapeAnalysis.EscapeInfo), colored yellow if it has unhandled thrown escape also (`has_thrown_escape(result.state[x])` holds) - `′`: this value has additional object field / array element information in its `AliasInfo` property Escape information of each call argument and SSA value can be inspected programmatically as like: @@ -74,7 +74,7 @@ result.state[Core.SSAValue(3)] # get EscapeInfo of `r3` ### Lattice Design `EscapeAnalysis` is implemented as a [data-flow analysis](https://en.wikipedia.org/wiki/Data-flow_analysis) -that works on a lattice of [`x::EscapeInfo`](@ref Core.Compiler.EscapeAnalysis.EscapeInfo), +that works on a lattice of [`x::EscapeInfo`](@ref Base.Compiler.EscapeAnalysis.EscapeInfo), which is composed of the following properties: - `x.Analyzed::Bool`: not formally part of the lattice, only indicates `x` has not been analyzed or not - `x.ReturnEscape::BitSet`: records SSA statements where `x` can escape to the caller via return @@ -366,9 +366,9 @@ More interestingly, it is also valid to use `IPO EA` escape information for type e.g., inference accuracy can be improved by forming `Const`/`PartialStruct`/`MustAlias` of mutable object. ```@docs -Core.Compiler.EscapeAnalysis.analyze_escapes -Core.Compiler.EscapeAnalysis.EscapeState -Core.Compiler.EscapeAnalysis.EscapeInfo +Base.Compiler.EscapeAnalysis.analyze_escapes +Base.Compiler.EscapeAnalysis.EscapeState +Base.Compiler.EscapeAnalysis.EscapeInfo ``` -------------------------------------------------------------------------------------------- diff --git a/src/module.c b/src/module.c index 08ad0d64dbf55..85813af6adc6f 100644 --- a/src/module.c +++ b/src/module.c @@ -1276,6 +1276,7 @@ JL_DLLEXPORT jl_uuid_t jl_module_uuid(jl_module_t* m) { return m->uuid; } // TODO: make this part of the module constructor and read-only? JL_DLLEXPORT void jl_set_module_uuid(jl_module_t *m, jl_uuid_t uuid) { m->uuid = uuid; } +JL_DLLEXPORT void jl_set_module_parent(jl_module_t *m, jl_module_t *parent) { m->parent = parent; } int jl_is_submodule(jl_module_t *child, jl_module_t *parent) JL_NOTSAFEPOINT { @@ -1308,15 +1309,20 @@ JL_DLLEXPORT void jl_clear_implicit_imports(jl_module_t *m) JL_UNLOCK(&m->lock); } +JL_DLLEXPORT void jl_add_to_module_init_list(jl_value_t *mod) +{ + if (jl_module_init_order == NULL) + jl_module_init_order = jl_alloc_vec_any(0); + jl_array_ptr_1d_push(jl_module_init_order, mod); +} + JL_DLLEXPORT void jl_init_restored_module(jl_value_t *mod) { if (!jl_generating_output() || jl_options.incremental) { jl_module_run_initializer((jl_module_t*)mod); } else { - if (jl_module_init_order == NULL) - jl_module_init_order = jl_alloc_vec_any(0); - jl_array_ptr_1d_push(jl_module_init_order, mod); + jl_add_to_module_init_list(mod); } } diff --git a/src/precompile_utils.c b/src/precompile_utils.c index fc361d8b88e6f..01e8a2040a751 100644 --- a/src/precompile_utils.c +++ b/src/precompile_utils.c @@ -281,6 +281,12 @@ static void *jl_precompile(int all) return native_code; } +static int suppress_precompile = 0; +JL_DLLEXPORT void jl_suppress_precompile(int suppress) +{ + suppress_precompile = suppress; +} + static void *jl_precompile_worklist(jl_array_t *worklist, jl_array_t *extext_methods, jl_array_t *new_ext_cis) { if (!worklist) @@ -289,34 +295,36 @@ static void *jl_precompile_worklist(jl_array_t *worklist, jl_array_t *extext_met // type signatures that were inferred but haven't been compiled jl_array_t *m = jl_alloc_vec_any(0); JL_GC_PUSH1(&m); - size_t i, n = jl_array_nrows(worklist); - for (i = 0; i < n; i++) { - jl_module_t *mod = (jl_module_t*)jl_array_ptr_ref(worklist, i); - assert(jl_is_module(mod)); - foreach_mtable_in_module(mod, precompile_enq_all_specializations_, m); - } - n = jl_array_nrows(extext_methods); - for (i = 0; i < n; i++) { - jl_method_t *method = (jl_method_t*)jl_array_ptr_ref(extext_methods, i); - assert(jl_is_method(method)); - jl_value_t *specializations = jl_atomic_load_relaxed(&method->specializations); - if (!jl_is_svec(specializations)) { - precompile_enq_specialization_((jl_method_instance_t*)specializations, m); + if (!suppress_precompile) { + size_t i, n = jl_array_nrows(worklist); + for (i = 0; i < n; i++) { + jl_module_t *mod = (jl_module_t*)jl_array_ptr_ref(worklist, i); + assert(jl_is_module(mod)); + foreach_mtable_in_module(mod, precompile_enq_all_specializations_, m); } - else { - size_t j, l = jl_svec_len(specializations); - for (j = 0; j < l; j++) { - jl_value_t *mi = jl_svecref(specializations, j); - if (mi != jl_nothing) - precompile_enq_specialization_((jl_method_instance_t*)mi, m); + n = jl_array_nrows(extext_methods); + for (i = 0; i < n; i++) { + jl_method_t *method = (jl_method_t*)jl_array_ptr_ref(extext_methods, i); + assert(jl_is_method(method)); + jl_value_t *specializations = jl_atomic_load_relaxed(&method->specializations); + if (!jl_is_svec(specializations)) { + precompile_enq_specialization_((jl_method_instance_t*)specializations, m); + } + else { + size_t j, l = jl_svec_len(specializations); + for (j = 0; j < l; j++) { + jl_value_t *mi = jl_svecref(specializations, j); + if (mi != jl_nothing) + precompile_enq_specialization_((jl_method_instance_t*)mi, m); + } } } - } - if (new_ext_cis) { - n = jl_array_nrows(new_ext_cis); - for (i = 0; i < n; i++) { - jl_code_instance_t *ci = (jl_code_instance_t*)jl_array_ptr_ref(new_ext_cis, i); - precompile_enq_specialization_(ci->def, m); + if (new_ext_cis) { + n = jl_array_nrows(new_ext_cis); + for (i = 0; i < n; i++) { + jl_code_instance_t *ci = (jl_code_instance_t*)jl_array_ptr_ref(new_ext_cis, i); + precompile_enq_specialization_(ci->def, m); + } } } void *native_code = jl_precompile_(m, 1); diff --git a/src/toplevel.c b/src/toplevel.c index 45143f99a178c..b0163683cf87c 100644 --- a/src/toplevel.c +++ b/src/toplevel.c @@ -1289,6 +1289,21 @@ JL_DLLEXPORT jl_value_t *jl_prepend_cwd(jl_value_t *str) return jl_cstr_to_string(path); } +JL_DLLEXPORT jl_value_t *jl_prepend_string(jl_value_t *prefix, jl_value_t *str) +{ + char path[1024]; + const char *pstr = (const char*)jl_string_data(prefix); + size_t sz = strlen(pstr); + const char *fstr = (const char*)jl_string_data(str); + if (strlen(fstr) + sz >= sizeof(path)) { + jl_errorf("use a bigger buffer for jl_fullpath"); + } + strcpy(path, pstr); + strcpy(path + sz, fstr); + return jl_cstr_to_string(path); +} + + #ifdef __cplusplus } #endif diff --git a/stdlib/InteractiveUtils/src/InteractiveUtils.jl b/stdlib/InteractiveUtils/src/InteractiveUtils.jl index f14e2f7de2f49..aa13fa3cdd31d 100644 --- a/stdlib/InteractiveUtils/src/InteractiveUtils.jl +++ b/stdlib/InteractiveUtils/src/InteractiveUtils.jl @@ -11,7 +11,8 @@ Base.Experimental.@optlevel 1 export apropos, edit, less, code_warntype, code_llvm, code_native, methodswith, varinfo, versioninfo, subtypes, supertypes, @which, @edit, @less, @functionloc, @code_warntype, - @code_typed, @code_lowered, @code_llvm, @code_native, @time_imports, clipboard, @trace_compile, @trace_dispatch + @code_typed, @code_lowered, @code_llvm, @code_native, @time_imports, clipboard, @trace_compile, @trace_dispatch, + @activate import Base.Docs.apropos diff --git a/stdlib/InteractiveUtils/src/codeview.jl b/stdlib/InteractiveUtils/src/codeview.jl index 030955b8e36d8..1aa83a19285ff 100644 --- a/stdlib/InteractiveUtils/src/codeview.jl +++ b/stdlib/InteractiveUtils/src/codeview.jl @@ -239,7 +239,7 @@ function _dump_function(@nospecialize(f), @nospecialize(t), native::Bool, wrappe if !isa(f, Core.OpaqueClosure) src = Base.Compiler.typeinf_code(Base.Compiler.NativeInterpreter(world), mi, true) else - src, rt = Base.get_oc_code_rt(f, tt, true) + src, rt = Base.get_oc_code_rt(nothing, f, tt, true) end src isa Core.CodeInfo || error("failed to infer source for $mi") str = _dump_function_native_assembly(mi, src, wrapper, syntax, debuginfo, binary, raw, params) @@ -248,7 +248,7 @@ function _dump_function(@nospecialize(f), @nospecialize(t), native::Bool, wrappe if !isa(f, Core.OpaqueClosure) src = Base.Compiler.typeinf_code(Base.Compiler.NativeInterpreter(world), mi, true) else - src, rt = Base.get_oc_code_rt(f, tt, true) + src, rt = Base.get_oc_code_rt(nothing, f, tt, true) end src isa Core.CodeInfo || error("failed to infer source for $mi") str = _dump_function_llvm(mi, src, wrapper, !raw, dump_module, optimize, debuginfo, params) diff --git a/stdlib/InteractiveUtils/src/macros.jl b/stdlib/InteractiveUtils/src/macros.jl index a21bf30dbcd6c..68afc40976275 100644 --- a/stdlib/InteractiveUtils/src/macros.jl +++ b/stdlib/InteractiveUtils/src/macros.jl @@ -429,3 +429,64 @@ like the julia arg `--trace-dispatch=stderr` but specifically for a call. """ :@trace_dispatch + +""" + @activate Component + +Activate a newly loaded copy of an otherwise builtin component. The `Component` +to be activated will be resolved using the ordinary rules of module resolution +in the current environment. + +When using `@activate`, additional options for a component may be specified in +square brackets `@activate Compiler[:option1, :option]` + +Currently `@activate Compiler` is the only available component that may be +activatived. + +For `@activate Compiler`, the following options are available: +1. `:reflection` - Activate the compiler for reflection purposes only. + The ordinary reflection functionality in `Base` and `InteractiveUtils`. + Will use the newly loaded compiler. Note however, that these reflection + functions will still interact with the ordinary native cache (both loading + and storing). An incorrect compiler implementation may thus corrupt runtime + state if reflection is used. Use external packages like `Cthulhu.jl` + introspecting compiler behavior with a separated cache partition. + +2. `:codegen` - Activate the compiler for internal codegen purposes. The new compiler + will be invoked whenever the runtime requests compilation. + +`@activate Compiler` without options is equivalent to `@activate Compiler[:reflection]`. + +""" +macro activate(what) + options = Symbol[] + if Meta.isexpr(what, :ref) + Component = what.args[1] + for i = 2:length(what.args) + arg = what.args[i] + if !isa(arg, QuoteNode) || !isa(arg.value, Symbol) + error("Usage Error: Option $arg is not a symbol") + end + push!(options, arg.value) + end + else + Component = what + end + if !isa(Component, Symbol) + error("Usage Error: Component $Component is not a symbol") + end + allowed_components = (:Compiler,) + if !(Component in allowed_components) + error("Usage Error: Component $Component is not recognized. Expected one of $allowed_components") + end + s = gensym() + if Component === :Compiler && isempty(options) + push!(options, :reflection) + end + options = map(options) do opt + Expr(:kw, opt, true) + end + Expr(:toplevel, + esc(:(import $Component as $s)), + esc(:($s.activate!(;$(options...))))) +end diff --git a/stdlib/Makefile b/stdlib/Makefile index ebc40c9db2b12..aacf7ca30e146 100644 --- a/stdlib/Makefile +++ b/stdlib/Makefile @@ -39,7 +39,6 @@ install-$$($(1)_JLL_NAME)_jll: get-$$($(1)_JLL_NAME)_jll endef $(foreach jll,$(JLLS),$(eval $(call download-artifacts-toml,$(jll)))) - STDLIBS = Artifacts Base64 CRC32c Dates FileWatching \ Future InteractiveUtils Libdl LibGit2 LinearAlgebra Logging \ Markdown Mmap Printf Profile Random REPL Serialization \ @@ -56,7 +55,6 @@ ifneq ($(filter $(STDLIBS),$(STDLIBS_EXT)),) $(error ERROR duplicated STDLIBS in list) endif - # Generate symlinks to all stdlibs at usr/share/julia/stdlib/vX.Y/ $(foreach module, $(STDLIBS), $(eval $(call symlink_target,$$(JULIAHOME)/stdlib/$(module),$$(build_datarootdir)/julia/stdlib/$$(VERSDIR),$(module)))) diff --git a/stdlib/REPL/src/REPLCompletions.jl b/stdlib/REPL/src/REPLCompletions.jl index 5142dd5e7f680..df3a0cad76878 100644 --- a/stdlib/REPL/src/REPLCompletions.jl +++ b/stdlib/REPL/src/REPLCompletions.jl @@ -5,6 +5,9 @@ module REPLCompletions export completions, shell_completions, bslash_completions, completion_text using Core: Const +# We want to insulate the REPLCompletion module from any changes the user may +# make to the compiler, since it runs by default and the system becomes unusable +# if it breaks. const CC = Base.Compiler using Base.Meta using Base: propertynames, something, IdSet diff --git a/sysimage.mk b/sysimage.mk index d3dee6906ccfa..9e3e52157aa45 100644 --- a/sysimage.mk +++ b/sysimage.mk @@ -23,7 +23,6 @@ $(build_private_libdir)/%.$(SHLIB_EXT): $(build_private_libdir)/%-o.a COMPILER_SRCS := $(addprefix $(JULIAHOME)/, \ base/Base_compiler.jl \ - base/compilerimg.jl \ base/boot.jl \ base/docs/core.jl \ base/abstractarray.jl \ @@ -55,7 +54,7 @@ COMPILER_SRCS := $(addprefix $(JULIAHOME)/, \ base/traits.jl \ base/refvalue.jl \ base/tuple.jl) -COMPILER_SRCS += $(shell find $(JULIAHOME)/base/compiler -name \*.jl) +COMPILER_SRCS += $(shell find $(JULIAHOME)/Compiler/src -name \*.jl) # sort these to remove duplicates BASE_SRCS := $(sort $(shell find $(JULIAHOME)/base -name \*.jl -and -not -name sysimg.jl) \ $(shell find $(BUILDROOT)/base -name \*.jl -and -not -name sysimg.jl)) @@ -65,7 +64,7 @@ RELBUILDROOT := $(call rel_path,$(JULIAHOME)/base,$(BUILDROOT)/base)/ # <-- make $(build_private_libdir)/basecompiler.ji: $(COMPILER_SRCS) @$(call PRINT_JULIA, cd $(JULIAHOME)/base && \ $(call spawn,$(JULIA_EXECUTABLE)) -C "$(JULIA_CPU_TARGET)" $(HEAPLIM) --output-ji $(call cygpath_w,$@).tmp \ - --startup-file=no --warn-overwrite=yes -g$(BOOTSTRAP_DEBUG_LEVEL) -O1 compilerimg.jl) + --startup-file=no --warn-overwrite=yes -g$(BOOTSTRAP_DEBUG_LEVEL) -O1 Base_compiler.jl $(RELBUILDROOT)) @mv $@.tmp $@ $(build_private_libdir)/sys.ji: $(build_private_libdir)/basecompiler.ji $(JULIAHOME)/VERSION $(BASE_SRCS) $(STDLIB_SRCS) diff --git a/test/compiler/inference.jl b/test/compiler/inference.jl index 9fafc9bdca6ad..8a14774e2404f 100644 --- a/test/compiler/inference.jl +++ b/test/compiler/inference.jl @@ -1535,7 +1535,7 @@ let nfields_tfunc(@nospecialize xs...) = @test sizeof_nothrow(String) @test !sizeof_nothrow(Type{String}) @test sizeof_tfunc(Type{Union{Int64, Int32}}) == Const(Core.sizeof(Union{Int64, Int32})) - let PT = Core.PartialStruct(Tuple{Int64,UInt64}, Any[Const(10), UInt64]) + let PT = Core.PartialStruct(Base.Compiler.fallback_lattice, Tuple{Int64,UInt64}, Any[Const(10), UInt64]) @test sizeof_tfunc(PT) === Const(16) @test nfields_tfunc(PT) === Const(2) @test sizeof_nothrow(PT) @@ -3381,9 +3381,9 @@ struct FooPartial b::Int c::Int end -let PT1 = PartialStruct(FooPartial, Any[Const(1), Const(2), Int]), - PT2 = PartialStruct(FooPartial, Any[Const(1), Int, Int]), - PT3 = PartialStruct(FooPartial, Any[Const(1), Int, Const(3)]) +let PT1 = PartialStruct(Base.Compiler.fallback_lattice, FooPartial, Any[Const(1), Const(2), Int]), + PT2 = PartialStruct(Base.Compiler.fallback_lattice, FooPartial, Any[Const(1), Int, Int]), + PT3 = PartialStruct(Base.Compiler.fallback_lattice, FooPartial, Any[Const(1), Int, Const(3)]) @test PT1 ⊑ PT2 @test !(PT1 ⊑ PT3) && !(PT2 ⊑ PT1) @@ -4635,18 +4635,18 @@ end @testset "issue #43784" begin ⊑ = Core.Compiler.partialorder(Core.Compiler.fallback_lattice) ⊔ = Core.Compiler.join(Core.Compiler.fallback_lattice) + 𝕃 = Core.Compiler.fallback_lattice Const, PartialStruct = Core.Const, Core.PartialStruct - let init = Base.ImmutableDict{Any,Any}() a = Const(init) - b = PartialStruct(typeof(init), Any[Const(init), Any, Any]) + b = PartialStruct(𝕃, typeof(init), Any[Const(init), Any, Any]) c = a ⊔ b @test a ⊑ c && b ⊑ c @test c === typeof(init) end let init = Base.ImmutableDict{Any,Any}(1,2) a = Const(init) - b = PartialStruct(typeof(init), Any[Const(getfield(init,1)), Any, Any]) + b = PartialStruct(𝕃, typeof(init), Any[Const(getfield(init,1)), Any, Any]) c = a ⊔ b @test a ⊑ c && b ⊑ c @test c isa PartialStruct @@ -4654,14 +4654,14 @@ end end let init = Base.ImmutableDict{Number,Number}() a = Const(init) - b = PartialStruct(typeof(init), Any[Const(init), Number, ComplexF64]) + b = PartialStruct(𝕃, typeof(init), Any[Const(init), Number, ComplexF64]) c = a ⊔ b @test a ⊑ c && b ⊑ c @test c === typeof(init) end let init = Base.ImmutableDict{Number,Number}() - a = PartialStruct(typeof(init), Any[Const(init), ComplexF64, ComplexF64]) - b = PartialStruct(typeof(init), Any[Const(init), Number, ComplexF64]) + a = PartialStruct(𝕃, typeof(init), Any[Const(init), ComplexF64, ComplexF64]) + b = PartialStruct(𝕃, typeof(init), Any[Const(init), Number, ComplexF64]) c = a ⊔ b @test a ⊑ c && b ⊑ c @test c isa PartialStruct @@ -4669,8 +4669,8 @@ end @test c.fields[3] === ComplexF64 end let init = Base.ImmutableDict{Number,Number}() - a = PartialStruct(typeof(init), Any[Const(init), ComplexF64, ComplexF64]) - b = PartialStruct(typeof(init), Any[Const(init), ComplexF32, Union{ComplexF32,ComplexF64}]) + a = PartialStruct(𝕃, typeof(init), Any[Const(init), ComplexF64, ComplexF64]) + b = PartialStruct(𝕃, typeof(init), Any[Const(init), ComplexF32, Union{ComplexF32,ComplexF64}]) c = a ⊔ b @test a ⊑ c && b ⊑ c @test c isa PartialStruct @@ -4678,8 +4678,8 @@ end @test c.fields[3] === Complex end let T = Base.ImmutableDict{Number,Number} - a = PartialStruct(T, Any[T]) - b = PartialStruct(T, Any[T, Number, Number]) + a = PartialStruct(𝕃, T, Any[T]) + b = PartialStruct(𝕃, T, Any[T, Number, Number]) @test b ⊑ a c = a ⊔ b @test a ⊑ c && b ⊑ c @@ -4687,7 +4687,7 @@ end @test length(c.fields) == 1 end let T = Base.ImmutableDict{Number,Number} - a = PartialStruct(T, Any[T]) + a = PartialStruct(𝕃, T, Any[T]) b = Const(T()) c = a ⊔ b @test a ⊑ c && b ⊑ c @@ -4695,7 +4695,7 @@ end end let T = Base.ImmutableDict{Number,Number} a = Const(T()) - b = PartialStruct(T, Any[T]) + b = PartialStruct(𝕃, T, Any[T]) c = a ⊔ b @test a ⊑ c && b ⊑ c @test c === T @@ -4742,22 +4742,23 @@ end let ⊑ = Core.Compiler.partialorder(Core.Compiler.fallback_lattice) ⊔ = Core.Compiler.join(Core.Compiler.fallback_lattice) + 𝕃 = Core.Compiler.fallback_lattice Const, PartialStruct = Core.Const, Core.PartialStruct - @test (Const((1,2)) ⊑ PartialStruct(Tuple{Int,Int}, Any[Const(1),Int])) - @test !(Const((1,2)) ⊑ PartialStruct(Tuple{Int,Int,Int}, Any[Const(1),Int,Int])) - @test !(Const((1,2,3)) ⊑ PartialStruct(Tuple{Int,Int}, Any[Const(1),Int])) - @test (Const((1,2,3)) ⊑ PartialStruct(Tuple{Int,Int,Int}, Any[Const(1),Int,Int])) - @test (Const((1,2)) ⊑ PartialStruct(Tuple{Int,Vararg{Int}}, Any[Const(1),Vararg{Int}])) - @test (Const((1,2)) ⊑ PartialStruct(Tuple{Int,Int,Vararg{Int}}, Any[Const(1),Int,Vararg{Int}])) broken=true - @test (Const((1,2,3)) ⊑ PartialStruct(Tuple{Int,Int,Vararg{Int}}, Any[Const(1),Int,Vararg{Int}])) - @test !(PartialStruct(Tuple{Int,Int}, Any[Const(1),Int]) ⊑ Const((1,2))) - @test !(PartialStruct(Tuple{Int,Int,Int}, Any[Const(1),Int,Int]) ⊑ Const((1,2))) - @test !(PartialStruct(Tuple{Int,Int}, Any[Const(1),Int]) ⊑ Const((1,2,3))) - @test !(PartialStruct(Tuple{Int,Int,Int}, Any[Const(1),Int,Int]) ⊑ Const((1,2,3))) - @test !(PartialStruct(Tuple{Int,Vararg{Int}}, Any[Const(1),Vararg{Int}]) ⊑ Const((1,2))) - @test !(PartialStruct(Tuple{Int,Int,Vararg{Int}}, Any[Const(1),Int,Vararg{Int}]) ⊑ Const((1,2))) - @test !(PartialStruct(Tuple{Int,Int,Vararg{Int}}, Any[Const(1),Int,Vararg{Int}]) ⊑ Const((1,2,3))) + @test (Const((1,2)) ⊑ PartialStruct(𝕃, Tuple{Int,Int}, Any[Const(1),Int])) + @test !(Const((1,2)) ⊑ PartialStruct(𝕃, Tuple{Int,Int,Int}, Any[Const(1),Int,Int])) + @test !(Const((1,2,3)) ⊑ PartialStruct(𝕃, Tuple{Int,Int}, Any[Const(1),Int])) + @test (Const((1,2,3)) ⊑ PartialStruct(𝕃, Tuple{Int,Int,Int}, Any[Const(1),Int,Int])) + @test (Const((1,2)) ⊑ PartialStruct(𝕃, Tuple{Int,Vararg{Int}}, Any[Const(1),Vararg{Int}])) + @test (Const((1,2)) ⊑ PartialStruct(𝕃, Tuple{Int,Int,Vararg{Int}}, Any[Const(1),Int,Vararg{Int}])) broken=true + @test (Const((1,2,3)) ⊑ PartialStruct(𝕃, Tuple{Int,Int,Vararg{Int}}, Any[Const(1),Int,Vararg{Int}])) + @test !(PartialStruct(𝕃, Tuple{Int,Int}, Any[Const(1),Int]) ⊑ Const((1,2))) + @test !(PartialStruct(𝕃, Tuple{Int,Int,Int}, Any[Const(1),Int,Int]) ⊑ Const((1,2))) + @test !(PartialStruct(𝕃, Tuple{Int,Int}, Any[Const(1),Int]) ⊑ Const((1,2,3))) + @test !(PartialStruct(𝕃, Tuple{Int,Int,Int}, Any[Const(1),Int,Int]) ⊑ Const((1,2,3))) + @test !(PartialStruct(𝕃, Tuple{Int,Vararg{Int}}, Any[Const(1),Vararg{Int}]) ⊑ Const((1,2))) + @test !(PartialStruct(𝕃, Tuple{Int,Int,Vararg{Int}}, Any[Const(1),Int,Vararg{Int}]) ⊑ Const((1,2))) + @test !(PartialStruct(𝕃, Tuple{Int,Int,Vararg{Int}}, Any[Const(1),Int,Vararg{Int}]) ⊑ Const((1,2,3))) t = Const((false, false)) ⊔ Const((false, true)) @test t isa PartialStruct && length(t.fields) == 2 && t.fields[1] === Const(false) @@ -4899,7 +4900,7 @@ let src = code_typed1() do end # Test that Const ⊑ PartialStruct respects vararg -@test Const((1,2)) ⊑ PartialStruct(Tuple{Vararg{Int}}, [Const(1), Vararg{Int}]) +@test Const((1,2)) ⊑ PartialStruct(Core.Compiler.fallback_lattice, Tuple{Vararg{Int}}, [Const(1), Vararg{Int}]) # Test that semi-concrete interpretation doesn't break on functions with while loops in them. Base.@assume_effects :consistent :effect_free :terminates_globally function pure_annotated_loop(x::Int, y::Int) From f336314762bbd982daa8ef65636b470103582074 Mon Sep 17 00:00:00 2001 From: Miles Cranmer Date: Thu, 7 Nov 2024 18:05:34 -0500 Subject: [PATCH 395/537] Make heap size hint available as an env variable (#55631) This makes `JULIA_HEAP_SIZE_HINT` the environment variable version of the `--heap-size-hint` command-line flag. Seems like there was interest in https://github.com/JuliaLang/julia/pull/45369#issuecomment-1544204022. The same syntax is used as for the command-line version with, for example, `2G` => 2 GB and `200M` => 200 MB. @oscardssmith want to take a look? --- doc/src/manual/environment-variables.md | 17 ++++ src/gc-stock.c | 7 ++ src/jloptions.c | 103 +++++++++++++----------- src/julia.h | 2 + src/options.h | 3 + 5 files changed, 84 insertions(+), 48 deletions(-) diff --git a/doc/src/manual/environment-variables.md b/doc/src/manual/environment-variables.md index 5aa0701c9aafe..b3bfa5204e603 100644 --- a/doc/src/manual/environment-variables.md +++ b/doc/src/manual/environment-variables.md @@ -400,6 +400,23 @@ If set to anything besides `0`, then Julia's thread policy is consistent with running on a dedicated machine: the master thread is on proc 0, and threads are affinitized. Otherwise, Julia lets the operating system handle thread policy. +## Garbage Collection + +### [`JULIA_HEAP_SIZE_HINT`](@id JULIA_HEAP_SIZE_HINT) + +Environment variable equivalent to the `--heap-size-hint=[]` command line option. + +Forces garbage collection if memory usage is higher than the given value. The value may be specified as a number of bytes, optionally in units of: + + - B (bytes) + - K (kibibytes) + - M (mebibytes) + - G (gibibytes) + - T (tebibytes) + - % (percentage of physical memory) + +For example, `JULIA_HEAP_SIZE_HINT=1G` would provide a 1 GB heap size hint to the garbage collector. + ## REPL formatting Environment variables that determine how REPL output should be formatted at the diff --git a/src/gc-stock.c b/src/gc-stock.c index 86dbea3b9a17a..1a8d85e249c29 100644 --- a/src/gc-stock.c +++ b/src/gc-stock.c @@ -3618,6 +3618,13 @@ void jl_gc_init(void) uint64_t mem_reserve = 250*1024*1024; // LLVM + other libraries need some amount of memory uint64_t min_heap_size_hint = mem_reserve + 1*1024*1024; uint64_t hint = jl_options.heap_size_hint; + + // check if heap size specified on command line + if (jl_options.heap_size_hint == 0) { + char *cp = getenv(HEAP_SIZE_HINT); + if (cp) + hint = parse_heap_size_hint(cp, "JULIA_HEAP_SIZE_HINT=\"[]\""); + } #ifdef _P64 total_mem = uv_get_total_memory(); if (hint == 0) { diff --git a/src/jloptions.c b/src/jloptions.c index 35f0a76e3f6e7..907f47d9030e4 100644 --- a/src/jloptions.c +++ b/src/jloptions.c @@ -34,6 +34,54 @@ JL_DLLEXPORT const char *jl_get_default_sysimg_path(void) return &system_image_path[1]; } +/* This function is also used by gc-stock.c to parse the + * JULIA_HEAP_SIZE_HINT environment variable. */ +uint64_t parse_heap_size_hint(const char *optarg, const char *option_name) +{ + long double value = 0.0; + char unit[4] = {0}; + int nparsed = sscanf(optarg, "%Lf%3s", &value, unit); + if (nparsed == 0 || strlen(unit) > 2 || (strlen(unit) == 2 && ascii_tolower(unit[1]) != 'b')) { + jl_errorf("julia: invalid argument to %s (%s)", option_name, optarg); + } + uint64_t multiplier = 1ull; + switch (ascii_tolower(unit[0])) { + case '\0': + case 'b': + break; + case 'k': + multiplier <<= 10; + break; + case 'm': + multiplier <<= 20; + break; + case 'g': + multiplier <<= 30; + break; + case 't': + multiplier <<= 40; + break; + case '%': + if (value > 100) + jl_errorf("julia: invalid percentage specified in %s", option_name); + uint64_t mem = uv_get_total_memory(); + uint64_t cmem = uv_get_constrained_memory(); + if (cmem > 0 && cmem < mem) + mem = cmem; + multiplier = mem/100; + break; + default: + jl_errorf("julia: invalid argument to %s (%s)", option_name, optarg); + break; + } + long double sz = value * multiplier; + if (isnan(sz) || sz < 0) { + jl_errorf("julia: invalid argument to %s (%s)", option_name, optarg); + } + const long double limit = ldexpl(1.0, 64); // UINT64_MAX + 1 + return sz < limit ? (uint64_t)sz : UINT64_MAX; +} + static int jl_options_initialized = 0; JL_DLLEXPORT void jl_init_options(void) @@ -231,10 +279,11 @@ static const char opts[] = " current environment and fallbacks to the latest\n" " compatible BugReporting.jl if not. For more\n" " information, see --bug-report=help.\n\n" - " --heap-size-hint= Forces garbage collection if memory usage is higher\n" + " --heap-size-hint=[] Forces garbage collection if memory usage is higher\n" " than the given value. The value may be specified as a\n" - " number of bytes, optionally in units of KB, MB, GB,\n" - " or TB, or as a percentage of physical memory with %.\n\n" + " number of bytes, optionally in units of: B, K (kibibytes),\n" + " M (mebibytes), G (gibibytes), T (tebibytes), or % (percentage\n" + " of physical memory).\n\n" ; static const char opts_hidden[] = @@ -880,52 +929,10 @@ JL_DLLEXPORT void jl_parse_opts(int *argcp, char ***argvp) jl_options.strip_ir = 1; break; case opt_heap_size_hint: - if (optarg != NULL) { - long double value = 0.0; - char unit[4] = {0}; - int nparsed = sscanf(optarg, "%Lf%3s", &value, unit); - if (nparsed == 0 || strlen(unit) > 2 || (strlen(unit) == 2 && ascii_tolower(unit[1]) != 'b')) { - jl_errorf("julia: invalid argument to --heap-size-hint (%s)", optarg); - } - uint64_t multiplier = 1ull; - switch (ascii_tolower(unit[0])) { - case '\0': - case 'b': - break; - case 'k': - multiplier <<= 10; - break; - case 'm': - multiplier <<= 20; - break; - case 'g': - multiplier <<= 30; - break; - case 't': - multiplier <<= 40; - break; - case '%': - if (value > 100) - jl_errorf("julia: invalid percentage specified in --heap-size-hint"); - uint64_t mem = uv_get_total_memory(); - uint64_t cmem = uv_get_constrained_memory(); - if (cmem > 0 && cmem < mem) - mem = cmem; - multiplier = mem/100; - break; - default: - jl_errorf("julia: invalid argument to --heap-size-hint (%s)", optarg); - break; - } - long double sz = value * multiplier; - if (isnan(sz) || sz < 0) { - jl_errorf("julia: invalid argument to --heap-size-hint (%s)", optarg); - } - const long double limit = ldexpl(1.0, 64); // UINT64_MAX + 1 - jl_options.heap_size_hint = sz < limit ? (uint64_t)sz : UINT64_MAX; - } + if (optarg != NULL) + jl_options.heap_size_hint = parse_heap_size_hint(optarg, "--heap-size-hint=[]"); if (jl_options.heap_size_hint == 0) - jl_errorf("julia: invalid memory size specified in --heap-size-hint"); + jl_errorf("julia: invalid memory size specified in --heap-size-hint=[]"); break; case opt_gc_threads: diff --git a/src/julia.h b/src/julia.h index 301650540a15c..81e6cf42da567 100644 --- a/src/julia.h +++ b/src/julia.h @@ -2536,6 +2536,8 @@ JL_DLLEXPORT ssize_t jl_sizeof_jl_options(void); JL_DLLEXPORT void jl_parse_opts(int *argcp, char ***argvp); JL_DLLEXPORT char *jl_format_filename(const char *output_pattern); +uint64_t parse_heap_size_hint(const char *optarg, const char *option_name); + // Set julia-level ARGS array according to the arguments provided in // argc/argv JL_DLLEXPORT void jl_set_ARGS(int argc, char **argv); diff --git a/src/options.h b/src/options.h index 800be866183b0..0715069faab32 100644 --- a/src/options.h +++ b/src/options.h @@ -137,6 +137,9 @@ // GC threads #define NUM_GC_THREADS_NAME "JULIA_NUM_GC_THREADS" +// heap size hint +#define HEAP_SIZE_HINT "JULIA_HEAP_SIZE_HINT" + // affinitization behavior #define MACHINE_EXCLUSIVE_NAME "JULIA_EXCLUSIVE" #define DEFAULT_MACHINE_EXCLUSIVE 0 From 0e811e44445155e598ac395e1e886071d6638d59 Mon Sep 17 00:00:00 2001 From: Jishnu Bhattacharya Date: Fri, 8 Nov 2024 16:23:26 +0530 Subject: [PATCH 396/537] Allow indexing `UniformScaling` with `CartesianIndex{2}` (#56461) Since indexing with two `Integer`s is defined, we might as well define indexing with a `CartesianIndex`. This makes certain loops convenient where the index is obtained using `eachindex`. --- stdlib/LinearAlgebra/src/uniformscaling.jl | 1 + stdlib/LinearAlgebra/test/uniformscaling.jl | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/stdlib/LinearAlgebra/src/uniformscaling.jl b/stdlib/LinearAlgebra/src/uniformscaling.jl index 472ea53078f87..cb3c5b6a4c3e1 100644 --- a/stdlib/LinearAlgebra/src/uniformscaling.jl +++ b/stdlib/LinearAlgebra/src/uniformscaling.jl @@ -86,6 +86,7 @@ julia> (0.7*I)(3) eltype(::Type{UniformScaling{T}}) where {T} = T ndims(J::UniformScaling) = 2 Base.has_offset_axes(::UniformScaling) = false +getindex(J::UniformScaling, ind::CartesianIndex{2}) = J[Tuple(ind)...] getindex(J::UniformScaling, i::Integer,j::Integer) = ifelse(i==j,J.λ,zero(J.λ)) getindex(J::UniformScaling, n::Integer, m::AbstractVector{<:Integer}) = getindex(J, m, n) diff --git a/stdlib/LinearAlgebra/test/uniformscaling.jl b/stdlib/LinearAlgebra/test/uniformscaling.jl index d335cd6f63521..10d427d1dc6c4 100644 --- a/stdlib/LinearAlgebra/test/uniformscaling.jl +++ b/stdlib/LinearAlgebra/test/uniformscaling.jl @@ -28,8 +28,8 @@ Random.seed!(1234543) end @testset "getindex" begin - @test I[1,1] == 1 - @test I[1,2] == 0 + @test I[1,1] == I[CartesianIndex(1,1)] == 1 + @test I[1,2] == I[CartesianIndex(1,2)] == 0 J = I(15) for (a, b) in [ From a005c07914040995784914cf0b2ccaf8af52ccc2 Mon Sep 17 00:00:00 2001 From: Jishnu Bhattacharya Date: Fri, 8 Nov 2024 18:29:45 +0530 Subject: [PATCH 397/537] Simplify first index in `FastContiguousSubArray` definition (#56491) Since `Slice <: AbstractUnitRange` and `Union{Slice, AbstractUnitRange} == AbstractUnitRange`, we may simplify the first index. --- base/subarray.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/base/subarray.jl b/base/subarray.jl index 47b4fa0584dba..d6ddf7786f7ec 100644 --- a/base/subarray.jl +++ b/base/subarray.jl @@ -342,7 +342,7 @@ end # We can avoid a multiplication if the first parent index is a Colon or AbstractUnitRange, # or if all the indices are scalars, i.e. the view is for a single value only -FastContiguousSubArray{T,N,P,I<:Union{Tuple{Union{Slice, AbstractUnitRange}, Vararg{Any}}, +FastContiguousSubArray{T,N,P,I<:Union{Tuple{AbstractUnitRange, Vararg{Any}}, Tuple{Vararg{ScalarIndex}}}} = SubArray{T,N,P,I,true} @inline _reindexlinear(V::FastContiguousSubArray, i::Int) = V.offset1 + i From 6541d1bfe004925675bc47b7c54239ed77cce86d Mon Sep 17 00:00:00 2001 From: Tianyi Pu <44583944+putianyi889@users.noreply.github.com> Date: Fri, 8 Nov 2024 13:54:04 +0000 Subject: [PATCH 398/537] Make `popat!` support `@inbounds` (#56323) Co-authored-by: Jishnu Bhattacharya --- base/array.jl | 1 + 1 file changed, 1 insertion(+) diff --git a/base/array.jl b/base/array.jl index 68d0f13d3893a..0f86bbeda7523 100644 --- a/base/array.jl +++ b/base/array.jl @@ -1647,6 +1647,7 @@ ERROR: BoundsError: attempt to access 3-element Vector{Int64} at index [4] ``` """ function popat!(a::Vector, i::Integer) + @_propagate_inbounds_meta x = a[i] _deleteat!(a, i, 1) x From bb234e2f71448e5d163f5d5e4c7680d32313448b Mon Sep 17 00:00:00 2001 From: Lasse Peters Date: Fri, 8 Nov 2024 14:58:37 +0100 Subject: [PATCH 399/537] NEWS.md: clarify `--trim` (#56460) Co-authored-by: Matt Bauman --- NEWS.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/NEWS.md b/NEWS.md index ba9ca1c521c55..74cda05e9d0e1 100644 --- a/NEWS.md +++ b/NEWS.md @@ -4,8 +4,8 @@ Julia v1.12 Release Notes New language features --------------------- -- New option `--trim` for building "trimmed" binaries, where code not provably reachable from entry points - is removed. Entry points can be marked using `Base.Experimental.entrypoint` ([#55047]). +- New option `--trim` creates smaller binaries by removing code that was not proven to be reachable from + the entry points. Entry points can be marked using `Base.Experimental.entrypoint` ([#55047]). - A new keyword argument `usings::Bool` has been added to `names`. By using this, we can now find all the names available in module `A` by `names(A; all=true, imported=true, usings=true)`. ([#54609]) - the `@atomic(...)` macro family supports now the reference assignment syntax, e.g. From 2f58a4b7ac34a4cacb218d8d55b3fc17b015cdac Mon Sep 17 00:00:00 2001 From: Jishnu Bhattacharya Date: Fri, 8 Nov 2024 19:35:23 +0530 Subject: [PATCH 400/537] Remove aggressive constprop annotation from 2x2 and 3x3 matmul (#56453) Removing these annotations reduces ttfx slightly. ```julia julia> using LinearAlgebra julia> A = rand(2,2); julia> @time mul!(similar(A), A, A, 1, 2); 0.296096 seconds (903.49 k allocations: 44.313 MiB, 4.25% gc time, 99.98% compilation time) # nightly 0.286009 seconds (835.88 k allocations: 40.732 MiB, 3.29% gc time, 99.98% compilation time) # this PR ``` --- stdlib/LinearAlgebra/src/matmul.jl | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/stdlib/LinearAlgebra/src/matmul.jl b/stdlib/LinearAlgebra/src/matmul.jl index 2f1a3fe2ba861..e22b6dce4bb03 100644 --- a/stdlib/LinearAlgebra/src/matmul.jl +++ b/stdlib/LinearAlgebra/src/matmul.jl @@ -1049,7 +1049,7 @@ function _generic_matmatmul_generic!(C, A, B, alpha, beta) end # multiply 2x2 matrices -Base.@constprop :aggressive function matmul2x2(tA, tB, A::AbstractMatrix{T}, B::AbstractMatrix{S}) where {T,S} +function matmul2x2(tA, tB, A::AbstractMatrix{T}, B::AbstractMatrix{S}) where {T,S} matmul2x2!(similar(B, promote_op(matprod, T, S), 2, 2), tA, tB, A, B) end @@ -1065,11 +1065,11 @@ function __matmul_checks(C, A, B, sz) end # separate function with the core of matmul2x2! that doesn't depend on a MulAddMul -Base.@constprop :aggressive function _matmul2x2_elements(C::AbstractMatrix, tA, tB, A::AbstractMatrix, B::AbstractMatrix) +function _matmul2x2_elements(C::AbstractMatrix, tA, tB, A::AbstractMatrix, B::AbstractMatrix) __matmul_checks(C, A, B, (2,2)) __matmul2x2_elements(tA, tB, A, B) end -Base.@constprop :aggressive function __matmul2x2_elements(tA, A::AbstractMatrix) +function __matmul2x2_elements(tA, A::AbstractMatrix) @inbounds begin tA_uc = uppercase(tA) # possibly unwrap a WrapperChar if tA_uc == 'N' @@ -1102,7 +1102,7 @@ Base.@constprop :aggressive function __matmul2x2_elements(tA, A::AbstractMatrix) end # inbounds A11, A12, A21, A22 end -Base.@constprop :aggressive __matmul2x2_elements(tA, tB, A, B) = __matmul2x2_elements(tA, A), __matmul2x2_elements(tB, B) +__matmul2x2_elements(tA, tB, A, B) = __matmul2x2_elements(tA, A), __matmul2x2_elements(tB, B) function _modify2x2!(Aelements, Belements, C, _add) (A11, A12, A21, A22), (B11, B12, B21, B22) = Aelements, Belements @@ -1114,7 +1114,7 @@ function _modify2x2!(Aelements, Belements, C, _add) end # inbounds C end -Base.@constprop :aggressive function matmul2x2!(C::AbstractMatrix, tA, tB, A::AbstractMatrix, B::AbstractMatrix, +function matmul2x2!(C::AbstractMatrix, tA, tB, A::AbstractMatrix, B::AbstractMatrix, α = true, β = false) Aelements, Belements = _matmul2x2_elements(C, tA, tB, A, B) @stable_muladdmul _modify2x2!(Aelements, Belements, C, MulAddMul(α, β)) @@ -1122,16 +1122,16 @@ Base.@constprop :aggressive function matmul2x2!(C::AbstractMatrix, tA, tB, A::Ab end # Multiply 3x3 matrices -Base.@constprop :aggressive function matmul3x3(tA, tB, A::AbstractMatrix{T}, B::AbstractMatrix{S}) where {T,S} +function matmul3x3(tA, tB, A::AbstractMatrix{T}, B::AbstractMatrix{S}) where {T,S} matmul3x3!(similar(B, promote_op(matprod, T, S), 3, 3), tA, tB, A, B) end # separate function with the core of matmul3x3! that doesn't depend on a MulAddMul -Base.@constprop :aggressive function _matmul3x3_elements(C::AbstractMatrix, tA, tB, A::AbstractMatrix, B::AbstractMatrix) +function _matmul3x3_elements(C::AbstractMatrix, tA, tB, A::AbstractMatrix, B::AbstractMatrix) __matmul_checks(C, A, B, (3,3)) __matmul3x3_elements(tA, tB, A, B) end -Base.@constprop :aggressive function __matmul3x3_elements(tA, A::AbstractMatrix) +function __matmul3x3_elements(tA, A::AbstractMatrix) @inbounds begin tA_uc = uppercase(tA) # possibly unwrap a WrapperChar if tA_uc == 'N' @@ -1172,7 +1172,7 @@ Base.@constprop :aggressive function __matmul3x3_elements(tA, A::AbstractMatrix) end # inbounds A11, A12, A13, A21, A22, A23, A31, A32, A33 end -Base.@constprop :aggressive __matmul3x3_elements(tA, tB, A, B) = __matmul3x3_elements(tA, A), __matmul3x3_elements(tB, B) +__matmul3x3_elements(tA, tB, A, B) = __matmul3x3_elements(tA, A), __matmul3x3_elements(tB, B) function _modify3x3!(Aelements, Belements, C, _add) (A11, A12, A13, A21, A22, A23, A31, A32, A33), @@ -1192,7 +1192,7 @@ function _modify3x3!(Aelements, Belements, C, _add) end # inbounds C end -Base.@constprop :aggressive function matmul3x3!(C::AbstractMatrix, tA, tB, A::AbstractMatrix, B::AbstractMatrix, +function matmul3x3!(C::AbstractMatrix, tA, tB, A::AbstractMatrix, B::AbstractMatrix, α = true, β = false) Aelements, Belements = _matmul3x3_elements(C, tA, tB, A, B) From bfcd3e9548c6893f0b30bcf21bcbaf2c97a0f371 Mon Sep 17 00:00:00 2001 From: Jishnu Bhattacharya Date: Fri, 8 Nov 2024 19:43:31 +0530 Subject: [PATCH 401/537] `sincos` for non-float symmetric matrices (#56484) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Ensures that the `eltype` of the array to which the result of `sincos` is a floating-point one, even if the argument doesn't have a floating-point `eltype`. After this, the following works: ```julia julia> A = diagm(0=>1:3) 3×3 Matrix{Int64}: 1 0 0 0 2 0 0 0 3 julia> sincos(A) ([0.8414709848078965 0.0 0.0; 0.0 0.9092974268256817 0.0; 0.0 0.0 0.1411200080598672], [0.5403023058681398 0.0 0.0; 0.0 -0.4161468365471424 0.0; 0.0 0.0 -0.9899924966004454]) ``` --- stdlib/LinearAlgebra/src/symmetric.jl | 6 ++++-- stdlib/LinearAlgebra/test/symmetric.jl | 11 +++++++++++ 2 files changed, 15 insertions(+), 2 deletions(-) diff --git a/stdlib/LinearAlgebra/src/symmetric.jl b/stdlib/LinearAlgebra/src/symmetric.jl index 265995d9e7806..f8cbac2490794 100644 --- a/stdlib/LinearAlgebra/src/symmetric.jl +++ b/stdlib/LinearAlgebra/src/symmetric.jl @@ -969,7 +969,8 @@ for (hermtype, wrapper) in [(:Symmetric, :Symmetric), (:SymTridiagonal, :Symmetr function sincos(A::$hermtype{<:Real}) n = checksquare(A) F = eigen(A) - S, C = Diagonal(similar(A, (n,))), Diagonal(similar(A, (n,))) + T = float(eltype(F.values)) + S, C = Diagonal(similar(A, T, (n,))), Diagonal(similar(A, T, (n,))) for i in 1:n S.diag[i], C.diag[i] = sincos(F.values[i]) end @@ -980,7 +981,8 @@ end function sincos(A::Hermitian{<:Complex}) n = checksquare(A) F = eigen(A) - S, C = Diagonal(similar(A, (n,))), Diagonal(similar(A, (n,))) + T = float(eltype(F.values)) + S, C = Diagonal(similar(A, T, (n,))), Diagonal(similar(A, T, (n,))) for i in 1:n S.diag[i], C.diag[i] = sincos(F.values[i]) end diff --git a/stdlib/LinearAlgebra/test/symmetric.jl b/stdlib/LinearAlgebra/test/symmetric.jl index 3aef23617b942..edd3af483b5f6 100644 --- a/stdlib/LinearAlgebra/test/symmetric.jl +++ b/stdlib/LinearAlgebra/test/symmetric.jl @@ -1167,4 +1167,15 @@ end @test a*H == H end +@testset "trigonometric functions for Integer matrices" begin + A = diagm(0=>1:4, 1=>1:3, -1=>1:3) + for B in (Symmetric(A), Symmetric(complex.(A))) + SC = @inferred(sincos(B)) + @test SC[1] ≈ sin(B) + @test SC[2] ≈ cos(B) + @test cos(A) ≈ real(exp(im*A)) + @test sin(A) ≈ imag(exp(im*A)) + end +end + end # module TestSymmetric From 9c4541bcaceac19487652ba67eae171f8df15c1f Mon Sep 17 00:00:00 2001 From: Jishnu Bhattacharya Date: Fri, 8 Nov 2024 19:58:42 +0530 Subject: [PATCH 402/537] Specialize 2-arg `show` for `LinearIndices` (#56482) After this, ```julia julia> l = LinearIndices((1:3, 1:4)); julia> show(l) LinearIndices((1:3, 1:4)) ``` The printed form is a valid constructor. --- base/indices.jl | 4 ++++ test/abstractarray.jl | 9 +++++++++ 2 files changed, 13 insertions(+) diff --git a/base/indices.jl b/base/indices.jl index 455bb0f7656a1..45f3495e51191 100644 --- a/base/indices.jl +++ b/base/indices.jl @@ -576,3 +576,7 @@ first(iter::LinearIndices) = 1 first(iter::LinearIndices{1}) = (@inline; first(axes1(iter.indices[1]))) last(iter::LinearIndices) = (@inline; length(iter)) last(iter::LinearIndices{1}) = (@inline; last(axes1(iter.indices[1]))) + +function show(io::IO, iter::LinearIndices) + print(io, "LinearIndices(", iter.indices, ")") +end diff --git a/test/abstractarray.jl b/test/abstractarray.jl index b40956b433630..c2c646ce8bee0 100644 --- a/test/abstractarray.jl +++ b/test/abstractarray.jl @@ -335,6 +335,15 @@ end R = LinearIndices((Base.IdentityUnitRange(0:1), 0:1)) @test axes(R) == (Base.IdentityUnitRange(0:1), Base.OneTo(2)) end + + @testset "show" begin + A = zeros(2,3) + for B in (A, view(A, Base.IdentityUnitRange(2:4))) + l = LinearIndices(B) + s = sprint(show, l) + @test s == "LinearIndices($(axes(B)))" + end + end end @testset "copy for LinearIndices/CartesianIndices" begin From d0af9c528e5c551444bb31c866ff08f9e0481d78 Mon Sep 17 00:00:00 2001 From: Jishnu Bhattacharya Date: Fri, 8 Nov 2024 20:00:56 +0530 Subject: [PATCH 403/537] Avoid constprop in `syevd!` and `syev!` (#56442) This improves compilation times slightly: ```julia julia> using LinearAlgebra julia> A = rand(2,2); julia> @time eigen!(Hermitian(A)); 0.163380 seconds (180.51 k allocations: 8.760 MiB, 99.88% compilation time) # master 0.155285 seconds (163.77 k allocations: 7.971 MiB, 99.87% compilation time) # This PR ``` The idea is that the constant propagation is only required to infer the return type, and isn't necessary in the body of the method. We may therefore annotate the body with a `@constprop :none`. --- stdlib/LinearAlgebra/src/lapack.jl | 24 ++++++++++++++++-------- 1 file changed, 16 insertions(+), 8 deletions(-) diff --git a/stdlib/LinearAlgebra/src/lapack.jl b/stdlib/LinearAlgebra/src/lapack.jl index 97dff0031329b..5c2b66881585c 100644 --- a/stdlib/LinearAlgebra/src/lapack.jl +++ b/stdlib/LinearAlgebra/src/lapack.jl @@ -5318,6 +5318,14 @@ solution `X`. """ hetrs!(uplo::AbstractChar, A::AbstractMatrix, ipiv::AbstractVector{BlasInt}, B::AbstractVecOrMat) +for f in (:syevd!, :syev!) + _f = Symbol(:_, f) + @eval function $f(jobz::AbstractChar, uplo::AbstractChar, A::AbstractMatrix) + W, A = $_f(jobz, uplo, A) + jobz == 'V' ? (W, A) : W + end +end + # Symmetric (real) eigensolvers for (syev, syevr, syevd, sygvd, elty) in ((:dsyev_,:dsyevr_,:dsyevd_,:dsygvd_,:Float64), @@ -5329,7 +5337,7 @@ for (syev, syevr, syevd, sygvd, elty) in # INTEGER INFO, LDA, LWORK, N # * .. Array Arguments .. # DOUBLE PRECISION A( LDA, * ), W( * ), WORK( * ) - Base.@constprop :aggressive function syev!(jobz::AbstractChar, uplo::AbstractChar, A::AbstractMatrix{$elty}) + Base.@constprop :none function _syev!(jobz::AbstractChar, uplo::AbstractChar, A::AbstractMatrix{$elty}) require_one_based_indexing(A) @chkvalidparam 1 jobz ('N', 'V') chkuplo(uplo) @@ -5350,7 +5358,7 @@ for (syev, syevr, syevd, sygvd, elty) in resize!(work, lwork) end end - jobz == 'V' ? (W, A) : W + W, A end # SUBROUTINE DSYEVR( JOBZ, RANGE, UPLO, N, A, LDA, VL, VU, IL, IU, @@ -5429,7 +5437,7 @@ for (syev, syevr, syevd, sygvd, elty) in # * .. Array Arguments .. # INTEGER IWORK( * ) # DOUBLE PRECISION A( LDA, * ), W( * ), WORK( * ) - Base.@constprop :aggressive function syevd!(jobz::AbstractChar, uplo::AbstractChar, A::AbstractMatrix{$elty}) + Base.@constprop :none function _syevd!(jobz::AbstractChar, uplo::AbstractChar, A::AbstractMatrix{$elty}) require_one_based_indexing(A) @chkvalidparam 1 jobz ('N', 'V') chkstride1(A) @@ -5459,7 +5467,7 @@ for (syev, syevr, syevd, sygvd, elty) in resize!(iwork, liwork) end end - jobz == 'V' ? (W, A) : W + W, A end # Generalized eigenproblem @@ -5526,7 +5534,7 @@ for (syev, syevr, syevd, sygvd, elty, relty) in # * .. Array Arguments .. # DOUBLE PRECISION RWORK( * ), W( * ) # COMPLEX*16 A( LDA, * ), WORK( * ) - Base.@constprop :aggressive function syev!(jobz::AbstractChar, uplo::AbstractChar, A::AbstractMatrix{$elty}) + Base.@constprop :none function _syev!(jobz::AbstractChar, uplo::AbstractChar, A::AbstractMatrix{$elty}) require_one_based_indexing(A) @chkvalidparam 1 jobz ('N', 'V') chkstride1(A) @@ -5550,7 +5558,7 @@ for (syev, syevr, syevd, sygvd, elty, relty) in resize!(work, lwork) end end - jobz == 'V' ? (W, A) : W + W, A end # SUBROUTINE ZHEEVR( JOBZ, RANGE, UPLO, N, A, LDA, VL, VU, IL, IU, @@ -5639,7 +5647,7 @@ for (syev, syevr, syevd, sygvd, elty, relty) in # INTEGER IWORK( * ) # DOUBLE PRECISION RWORK( * ) # COMPLEX*16 A( LDA, * ), WORK( * ) - Base.@constprop :aggressive function syevd!(jobz::AbstractChar, uplo::AbstractChar, A::AbstractMatrix{$elty}) + Base.@constprop :none function _syevd!(jobz::AbstractChar, uplo::AbstractChar, A::AbstractMatrix{$elty}) require_one_based_indexing(A) @chkvalidparam 1 jobz ('N', 'V') chkstride1(A) @@ -5673,7 +5681,7 @@ for (syev, syevr, syevd, sygvd, elty, relty) in resize!(iwork, liwork) end end - jobz == 'V' ? (W, A) : W + W, A end # SUBROUTINE ZHEGVD( ITYPE, JOBZ, UPLO, N, A, LDA, B, LDB, W, WORK, From 62f8cffbb5f9adbd585a97c221b6692eee5ed0fc Mon Sep 17 00:00:00 2001 From: Shuhei Kadowaki <40514306+aviatesk@users.noreply.github.com> Date: Sat, 9 Nov 2024 03:39:52 +0900 Subject: [PATCH 404/537] make: define `basecompiler.ji` target (#56498) For easier experimentation with just the bootstrap process. Additionally, as a follow-up to JuliaLang/julia#56409, this commit also includes some minor cosmetic changes. --- Compiler/src/Compiler.jl | 10 ++-------- Compiler/src/cicache.jl | 6 ------ Compiler/src/methodtable.jl | 2 ++ Compiler/src/types.jl | 8 +++++++- base/Base_compiler.jl | 11 +++++------ sysimage.mk | 1 + test/compiler/contextual.jl | 2 +- 7 files changed, 18 insertions(+), 22 deletions(-) diff --git a/Compiler/src/Compiler.jl b/Compiler/src/Compiler.jl index c2c074dc92bbc..edaf0c9332584 100644 --- a/Compiler/src/Compiler.jl +++ b/Compiler/src/Compiler.jl @@ -86,14 +86,8 @@ function include(mod::Module, x::String) Base.include(mod, x) end - macro _boundscheck() Expr(:boundscheck) end -# These types are used by reflection.jl and expr.jl too, so declare them here. -# Note that `@assume_effects` is available only after loading namedtuple.jl. -abstract type MethodTableView end -abstract type AbstractInterpreter end - function return_type end function is_return_type(Core.@nospecialize(f)) f === return_type && return true @@ -189,6 +183,6 @@ if isdefined(Base, :IRShow) end end -end +end # baremodule Compiler -end +end # if isdefined(Base, :generating_output) && ... diff --git a/Compiler/src/cicache.jl b/Compiler/src/cicache.jl index a66d7f9f09650..2893be2787b29 100644 --- a/Compiler/src/cicache.jl +++ b/Compiler/src/cicache.jl @@ -77,9 +77,3 @@ function setindex!(wvc::WorldView{InternalCodeCache}, ci::CodeInstance, mi::Meth setindex!(wvc.cache, ci, mi) return wvc end - -function code_cache(interp::AbstractInterpreter) - cache = InternalCodeCache(cache_owner(interp)) - worlds = WorldRange(get_inference_world(interp)) - return WorldView(cache, worlds) -end diff --git a/Compiler/src/methodtable.jl b/Compiler/src/methodtable.jl index ce04ff48d805e..24a8b1ecf8242 100644 --- a/Compiler/src/methodtable.jl +++ b/Compiler/src/methodtable.jl @@ -16,6 +16,8 @@ function iterate(result::MethodLookupResult, args...) end getindex(result::MethodLookupResult, idx::Int) = getindex(result.matches, idx)::MethodMatch +abstract type MethodTableView end + """ struct InternalMethodTable <: MethodTableView diff --git a/Compiler/src/types.jl b/Compiler/src/types.jl index 8899e7673d753..35c7880da2281 100644 --- a/Compiler/src/types.jl +++ b/Compiler/src/types.jl @@ -24,7 +24,7 @@ the following methods to satisfy the `AbstractInterpreter` API requirement: - `get_inference_cache(interp::NewInterpreter)` - return the local inference cache - `cache_owner(interp::NewInterpreter)` - return the owner of any new cache entries """ -:(AbstractInterpreter) +abstract type AbstractInterpreter end abstract type AbstractLattice end @@ -465,6 +465,12 @@ typeinf_lattice(::AbstractInterpreter) = InferenceLattice(BaseInferenceLattice.i ipo_lattice(::AbstractInterpreter) = InferenceLattice(IPOResultLattice.instance) optimizer_lattice(::AbstractInterpreter) = SimpleInferenceLattice.instance +function code_cache(interp::AbstractInterpreter) + cache = InternalCodeCache(cache_owner(interp)) + worlds = WorldRange(get_inference_world(interp)) + return WorldView(cache, worlds) +end + get_escape_cache(interp::AbstractInterpreter) = GetNativeEscapeCache(interp) abstract type CallInfo end diff --git a/base/Base_compiler.jl b/base/Base_compiler.jl index 691e2c574acd6..a860414454634 100644 --- a/base/Base_compiler.jl +++ b/base/Base_compiler.jl @@ -1,6 +1,7 @@ # This file is a part of Julia. License is MIT: https://julialang.org/license baremodule Base + using Core.Intrinsics, Core.IR # to start, we're going to use a very simple definition of `include` @@ -128,7 +129,6 @@ function setpropertyonce!(x::Module, f::Symbol, desired, success_order::Symbol=: return Core.setglobalonce!(x, f, val, success_order, fail_order) end - convert(::Type{Any}, Core.@nospecialize x) = x convert(::Type{T}, x::T) where {T} = x include("coreio.jl") @@ -254,7 +254,6 @@ using .Order include("coreir.jl") - # For OS specific stuff # We need to strcat things here, before strings are really defined function strcat(x::String, y::String) @@ -267,10 +266,9 @@ function strcat(x::String, y::String) return out end -BUILDROOT::String = "" +global BUILDROOT::String = "" -baremodule BuildSettings -end +baremodule BuildSettings end function process_sysimg_args!() let i = 1 @@ -299,4 +297,5 @@ include("flparse.jl") Core._setparser!(fl_parse) # Further definition of Base will happen in Base.jl if loaded. -end + +end # baremodule Base diff --git a/sysimage.mk b/sysimage.mk index 9e3e52157aa45..ceed9657dc807 100644 --- a/sysimage.mk +++ b/sysimage.mk @@ -6,6 +6,7 @@ include $(JULIAHOME)/stdlib/stdlib.mk default: sysimg-$(JULIA_BUILD_MODE) # contains either "debug" or "release" all: sysimg-release sysimg-debug +basecompiler-ji: $(build_private_libdir)/basecompiler.ji sysimg-ji: $(build_private_libdir)/sys.ji sysimg-bc: $(build_private_libdir)/sys-bc.a sysimg-release: $(build_private_libdir)/sys.$(SHLIB_EXT) diff --git a/test/compiler/contextual.jl b/test/compiler/contextual.jl index 8d526fdefdc5b..c6081634d5a3b 100644 --- a/test/compiler/contextual.jl +++ b/test/compiler/contextual.jl @@ -11,7 +11,7 @@ module MiniCassette # fancy features, but sufficient to exercise this code path in the compiler. using Core.IR - using Core.Compiler: retrieve_code_info, quoted, signature_type, anymap + using Core.Compiler: retrieve_code_info, quoted, anymap using Base.Meta: isexpr export Ctx, overdub From ca2d6aaedd32b482b1b0f351a32443dfc7fd33c7 Mon Sep 17 00:00:00 2001 From: Shuhei Kadowaki <40514306+aviatesk@users.noreply.github.com> Date: Sat, 9 Nov 2024 06:38:00 +0900 Subject: [PATCH 405/537] speed up bootstrapping by compiling few optimizer subroutines earlier (#56501) Speeds up the bootstrapping process by about 30 seconds. --- Compiler/src/bootstrap.jl | 28 ++++++++++++++++++---------- 1 file changed, 18 insertions(+), 10 deletions(-) diff --git a/Compiler/src/bootstrap.jl b/Compiler/src/bootstrap.jl index 4205b072d232f..7ee439cc7ac67 100644 --- a/Compiler/src/bootstrap.jl +++ b/Compiler/src/bootstrap.jl @@ -12,14 +12,17 @@ function bootstrap!() println("Compiling the compiler. This may take several minutes ...") interp = NativeInterpreter() - # analyze_escapes_tt = Tuple{typeof(analyze_escapes), IRCode, Int, TODO} + ssa_inlining_pass!_tt = Tuple{typeof(ssa_inlining_pass!), IRCode, InliningState{NativeInterpreter}, Bool} optimize_tt = Tuple{typeof(optimize), NativeInterpreter, OptimizationState{NativeInterpreter}, InferenceResult} + typeinf_ext_tt = Tuple{typeof(typeinf_ext), NativeInterpreter, MethodInstance, UInt8} + typeinf_tt = Tuple{typeof(typeinf), NativeInterpreter, InferenceState} + typeinf_edge_tt = Tuple{typeof(typeinf_edge), NativeInterpreter, Method, Any, SimpleVector, InferenceState, Bool, Bool} fs = Any[ # we first create caches for the optimizer, because they contain many loop constructions # and they're better to not run in interpreter even during bootstrapping - #=analyze_escapes_tt,=# optimize_tt, + compact!, ssa_inlining_pass!_tt, optimize_tt, # then we create caches for inference entries - typeinf_ext, typeinf, typeinf_edge, + typeinf_ext_tt, typeinf_tt, typeinf_edge_tt, ] # tfuncs can't be inferred from the inference entries above, so here we infer them manually for x in T_FFUNC_VAL @@ -40,14 +43,19 @@ function bootstrap!() else tt = Tuple{typeof(f), Vararg{Any}} end - for m in _methods_by_ftype(tt, 10, get_world_counter())::Vector - # remove any TypeVars from the intersection - m = m::MethodMatch - typ = Any[m.spec_types.parameters...] - for i = 1:length(typ) - typ[i] = unwraptv(typ[i]) + matches = _methods_by_ftype(tt, 10, get_world_counter())::Vector + if isempty(matches) + println(stderr, "WARNING: no matching method found for `", tt, "`") + else + for m in matches + # remove any TypeVars from the intersection + m = m::MethodMatch + params = Any[m.spec_types.parameters...] + for i = 1:length(params) + params[i] = unwraptv(params[i]) + end + typeinf_type(interp, m.method, Tuple{params...}, m.sparams) end - typeinf_type(interp, m.method, Tuple{typ...}, m.sparams) end end endtime = time() From 4cbeea559e903a353a5b1d08adbd54f47c5548fc Mon Sep 17 00:00:00 2001 From: Jeff Bezanson Date: Fri, 8 Nov 2024 23:47:13 -0500 Subject: [PATCH 406/537] remove top-level branches checking for Base (#56507) These are no longer needed, now that the files are no longer included twice. --- base/array.jl | 2 -- base/bitarray.jl | 2 -- base/genericmemory.jl | 2 -- base/int.jl | 57 ++++++++++++++++++++----------------------- base/namedtuple.jl | 6 ----- base/tuple.jl | 6 ----- 6 files changed, 26 insertions(+), 49 deletions(-) diff --git a/base/array.jl b/base/array.jl index 0f86bbeda7523..65cc29f38c911 100644 --- a/base/array.jl +++ b/base/array.jl @@ -616,11 +616,9 @@ promote_rule(a::Type{Array{T,n}}, b::Type{Array{S,n}}) where {T,n,S} = el_same(p ## Constructors ## -if nameof(@__MODULE__) === :Base # avoid method overwrite # constructors should make copies Array{T,N}(x::AbstractArray{S,N}) where {T,N,S} = copyto_axcheck!(Array{T,N}(undef, size(x)), x) AbstractArray{T,N}(A::AbstractArray{S,N}) where {T,N,S} = copyto_axcheck!(similar(A,T), A) -end ## copying iterators to containers diff --git a/base/bitarray.jl b/base/bitarray.jl index f7eeafbb62231..93fa48c56e379 100644 --- a/base/bitarray.jl +++ b/base/bitarray.jl @@ -543,10 +543,8 @@ end reinterpret(::Type{Bool}, B::BitArray, dims::NTuple{N,Int}) where {N} = reinterpret(B, dims) reinterpret(B::BitArray, dims::NTuple{N,Int}) where {N} = reshape(B, dims) -if nameof(@__MODULE__) === :Base # avoid method overwrite (::Type{T})(x::T) where {T<:BitArray} = copy(x)::T BitArray(x::BitArray) = copy(x) -end """ BitArray(itr) diff --git a/base/genericmemory.jl b/base/genericmemory.jl index f814aa4d84bdd..fbf60255935a3 100644 --- a/base/genericmemory.jl +++ b/base/genericmemory.jl @@ -211,10 +211,8 @@ promote_rule(a::Type{Memory{T}}, b::Type{Memory{S}}) where {T,S} = el_same(promo ## Constructors ## -if nameof(@__MODULE__) === :Base # avoid method overwrite # constructors should make copies Memory{T}(x::AbstractArray{S,1}) where {T,S} = copyto_axcheck!(Memory{T}(undef, size(x)), x) -end ## copying iterators to containers diff --git a/base/int.jl b/base/int.jl index a25b17e2cc958..8a80f90f7e2c1 100644 --- a/base/int.jl +++ b/base/int.jl @@ -587,37 +587,32 @@ julia> bitstring(bitrotate(0b01110010, 8)) bitrotate(x::T, k::Integer) where {T <: BitInteger} = (x << ((sizeof(T) << 3 - 1) & k)) | (x >>> ((sizeof(T) << 3 - 1) & -k)) -# @doc isn't available when running in Core at this point. -# Tuple syntax for documentation two function signatures at the same time -# doesn't work either at this point. -if nameof(@__MODULE__) === :Base - for fname in (:mod, :rem) - @eval @doc """ - rem(x::Integer, T::Type{<:Integer}) -> T - mod(x::Integer, T::Type{<:Integer}) -> T - %(x::Integer, T::Type{<:Integer}) -> T - - Find `y::T` such that `x` ≡ `y` (mod n), where n is the number of integers representable - in `T`, and `y` is an integer in `[typemin(T),typemax(T)]`. - If `T` can represent any integer (e.g. `T == BigInt`), then this operation corresponds to - a conversion to `T`. - - # Examples - ```jldoctest - julia> x = 129 % Int8 - -127 - - julia> typeof(x) - Int8 - - julia> x = 129 % BigInt - 129 - - julia> typeof(x) - BigInt - ``` - """ $fname(x::Integer, T::Type{<:Integer}) - end +for fname in (:mod, :rem) + @eval @doc """ + rem(x::Integer, T::Type{<:Integer}) -> T + mod(x::Integer, T::Type{<:Integer}) -> T + %(x::Integer, T::Type{<:Integer}) -> T + + Find `y::T` such that `x` ≡ `y` (mod n), where n is the number of integers representable + in `T`, and `y` is an integer in `[typemin(T),typemax(T)]`. + If `T` can represent any integer (e.g. `T == BigInt`), then this operation corresponds to + a conversion to `T`. + + # Examples + ```jldoctest + julia> x = 129 % Int8 + -127 + + julia> typeof(x) + Int8 + + julia> x = 129 % BigInt + 129 + + julia> typeof(x) + BigInt + ``` + """ $fname(x::Integer, T::Type{<:Integer}) end rem(x::T, ::Type{T}) where {T<:Integer} = x diff --git a/base/namedtuple.jl b/base/namedtuple.jl index a7379121b2ce2..991c4d35da52f 100644 --- a/base/namedtuple.jl +++ b/base/namedtuple.jl @@ -110,8 +110,6 @@ julia> (; t.x) """ Core.NamedTuple -if nameof(@__MODULE__) === :Base - @eval function (NT::Type{NamedTuple{names,T}})(args::Tuple) where {names, T <: Tuple} if length(args) != length(names::Tuple) throw(ArgumentError("Wrong number of arguments to named tuple constructor.")) @@ -150,8 +148,6 @@ end NamedTuple(itr) = (; itr...) -end # if Base - # Like NamedTuple{names, T} as a constructor, but omits the additional # `convert` call, when the types are known to match the fields @eval function _new_NamedTuple(T::Type{NamedTuple{NTN, NTT}} where {NTN, NTT}, args::Tuple) @@ -194,7 +190,6 @@ function convert(::Type{NT}, nt::NamedTuple{names}) where {names, NT<:NamedTuple return NT1(T1(nt))::NT1::NT end -if nameof(@__MODULE__) === :Base Tuple(nt::NamedTuple) = (nt...,) (::Type{T})(nt::NamedTuple) where {T <: Tuple} = (t = Tuple(nt); t isa T ? t : convert(T, t)::T) @@ -230,7 +225,6 @@ function show(io::IO, t::NamedTuple) print(io, ")") end end -end eltype(::Type{T}) where T<:NamedTuple = nteltype(T) nteltype(::Type) = Any diff --git a/base/tuple.jl b/base/tuple.jl index 8690f89bdc263..3791d74bfc698 100644 --- a/base/tuple.jl +++ b/base/tuple.jl @@ -427,10 +427,6 @@ fill_to_length(t::Tuple{}, val, ::Val{2}) = (val, val) # constructing from an iterator -# only define these in Base, to avoid overwriting the constructors -# NOTE: this means this constructor must be avoided in Core.Compiler! -if nameof(@__MODULE__) === :Base - function tuple_type_tail(T::Type) @_foldable_meta # TODO: this method is wrong (and not :foldable) if isa(T, UnionAll) @@ -496,8 +492,6 @@ _totuple(::Type{Tuple}, itr::NamedTuple) = (itr...,) _totuple(::Type{Tuple}, p::Pair) = (p.first, p.second) _totuple(::Type{Tuple}, x::Number) = (x,) # to make Tuple(x) inferable -end - ## find ## _findfirst_rec(f, i::Int, ::Tuple{}) = nothing From 435516da3a4fa1719f077cb5c0729e083f998ad4 Mon Sep 17 00:00:00 2001 From: Keno Fischer Date: Sat, 9 Nov 2024 03:23:10 -0500 Subject: [PATCH 407/537] Undo the decision to publish incomplete types to the binding table (#56497) This effectively reverts #36121 and replaces it with #36111, which was the originally proposed alternative to fix #36104. To recap, the question is what should happen for ``` module Foo struct F v::Foo.F end end ``` i.e. where the type reference tries to refer to the newly defined type via its global path. In #36121 we adjusted things so that we first assign the type to its global binding and then evaluate the field type (leaving the type in an incomplete state in the meantime). The primary reason that this choice was that we would have to deal with incomplete types assigned to global bindings anyway if we ever did #32658. However, I think this was the wrong choice. There is a difference between allowing incomplete types and semantically forcing incomplete types to be globally observable every time a new type is defined. The situation was a little different four years ago, but with more extensive threading (which can observe the incompletely constructed type) and the upcoming completion of bindings partition, the situation is different. For bindings partition in particular, this would require two invalidations on re-definition, one to the new incomplete type and then back to the complete type. I don't think this is worth it, for the (somewhat niche and possibly-should-be- deprecated-future) case of refering to incompletely defined types by their global names. So let's instead try the hack in #36111, which does a frontend rewrite of the global path. This should be sufficient to at least address the obvious cases. --- base/boot.jl | 8 ++++++++ src/julia-syntax.scm | 21 ++++++++++++++++----- src/utils.scm | 7 +++++++ test/core.jl | 2 +- 4 files changed, 32 insertions(+), 6 deletions(-) diff --git a/base/boot.jl b/base/boot.jl index 612efc0b50c8a..88a4e7438671e 100644 --- a/base/boot.jl +++ b/base/boot.jl @@ -984,6 +984,14 @@ Unsigned(x::Union{Float16, Float32, Float64, Bool}) = UInt(x) Integer(x::Integer) = x Integer(x::Union{Float16, Float32, Float64}) = Int(x) +# During definition of struct type `B`, if an `A.B` expression refers to +# the eventual global name of the struct, then return the partially-initialized +# type object. +# TODO: remove. This is a shim for backwards compatibility. +function struct_name_shim(@nospecialize(x), name::Symbol, mod::Module, @nospecialize(t)) + return x === mod ? t : getfield(x, name) +end + # Binding for the julia parser, called as # # Core._parse(text, filename, lineno, offset, options) diff --git a/src/julia-syntax.scm b/src/julia-syntax.scm index b48cb48bf0b79..7acc8a1954bc5 100644 --- a/src/julia-syntax.scm +++ b/src/julia-syntax.scm @@ -963,6 +963,19 @@ (ctors-min-initialized (car expr)) (ctors-min-initialized (cdr expr))))) +(define (insert-struct-shim field-types name) + (map (lambda (x) + (expr-replace (lambda (y) + (and (length= y 3) (eq? (car y) '|.|) + (or (equal? (caddr y) `(quote ,name)) + (equal? (caddr y) `(inert ,name))))) + x + (lambda (y) + `(call (core struct_name_shim) + ,(cadr y) ,(caddr y) + (thismodule) ,name)))) + field-types)) + (define (struct-def-expr- name params bounds super fields0 mut) (receive (fields defs) (separate eventually-decl? fields0) @@ -1022,11 +1035,9 @@ prev params) (quote parameters)))) - '())) - ;; otherwise do an assignment to trigger an error - (const (globalref (thismodule) ,name) ,name))) - (const (globalref (thismodule) ,name) ,name)) - (call (core _typebody!) ,name (call (core svec) ,@field-types)) + '()))))) + (call (core _typebody!) ,name (call (core svec) ,@(insert-struct-shim field-types name))) + (const (globalref (thismodule) ,name) ,name) (null))) ;; "inner" constructors (scope-block diff --git a/src/utils.scm b/src/utils.scm index 97464b9a14e5a..79e3a280b9886 100644 --- a/src/utils.scm +++ b/src/utils.scm @@ -48,6 +48,13 @@ (any (lambda (y) (expr-contains-p p y filt)) (cdr expr)))))) +(define (expr-replace p expr repl) + (cond ((p expr) (repl expr)) + ((and (pair? expr) (not (quoted? expr))) + (cons (car expr) + (map (lambda (x) (expr-replace p x repl)) (cdr expr)))) + (else expr))) + ;; find all subexprs satisfying `p`, applying `key` to each one (define (expr-find-all p expr key (filt (lambda (x) #t))) (if (filt expr) diff --git a/test/core.jl b/test/core.jl index 4b5a674ba44b3..1b36db466ce19 100644 --- a/test/core.jl +++ b/test/core.jl @@ -7669,7 +7669,7 @@ end end @test fieldtypes(M36104.T36104) == (Vector{M36104.T36104},) @test_throws ErrorException("expected") @eval(struct X36104; x::error("expected"); end) -@test @isdefined(X36104) +@test !@isdefined(X36104) struct X36104; x::Int; end @test fieldtypes(X36104) == (Int,) primitive type P36104 8 end From fb5e96acd533fc0619e91c474ce7b74baf04ede0 Mon Sep 17 00:00:00 2001 From: Jishnu Bhattacharya Date: Sat, 9 Nov 2024 16:22:55 +0530 Subject: [PATCH 408/537] Merge identical methods for Symmetric/Hermitian and SymTridiagonal (#56434) Since the methods do identical things, we may define each method once for a union of types instead of defining methods for each type. --- stdlib/LinearAlgebra/src/symmetric.jl | 151 +++++++++++--------------- 1 file changed, 65 insertions(+), 86 deletions(-) diff --git a/stdlib/LinearAlgebra/src/symmetric.jl b/stdlib/LinearAlgebra/src/symmetric.jl index f8cbac2490794..b059f31737b55 100644 --- a/stdlib/LinearAlgebra/src/symmetric.jl +++ b/stdlib/LinearAlgebra/src/symmetric.jl @@ -219,10 +219,16 @@ convert(::Type{T}, m::Union{Symmetric,Hermitian}) where {T<:Hermitian} = m isa T const HermOrSym{T, S} = Union{Hermitian{T,S}, Symmetric{T,S}} const RealHermSym{T<:Real,S} = Union{Hermitian{T,S}, Symmetric{T,S}} +const SymSymTri{T} = Union{Symmetric{T}, SymTridiagonal{T}} +const RealHermSymSymTri{T<:Real} = Union{RealHermSym{T}, SymTridiagonal{T}} const RealHermSymComplexHerm{T<:Real,S} = Union{Hermitian{T,S}, Symmetric{T,S}, Hermitian{Complex{T},S}} const RealHermSymComplexSym{T<:Real,S} = Union{Hermitian{T,S}, Symmetric{T,S}, Symmetric{Complex{T},S}} +const RealHermSymSymTriComplexHerm{T<:Real} = Union{RealHermSymComplexSym{T}, SymTridiagonal{T}} const SelfAdjoint = Union{Symmetric{<:Real}, Hermitian{<:Number}} +wrappertype(::Union{Symmetric, SymTridiagonal}) = Symmetric +wrappertype(::Hermitian) = Hermitian + size(A::HermOrSym) = size(A.data) axes(A::HermOrSym) = axes(A.data) @inline function Base.isassigned(A::HermOrSym, i::Int, j::Int) @@ -814,15 +820,15 @@ end ^(A::Symmetric{<:Complex}, p::Integer) = sympow(A, p) ^(A::SymTridiagonal{<:Real}, p::Integer) = sympow(A, p) ^(A::SymTridiagonal{<:Complex}, p::Integer) = sympow(A, p) +function sympow(A::SymSymTri, p::Integer) + if p < 0 + return Symmetric(Base.power_by_squaring(inv(A), -p)) + else + return Symmetric(Base.power_by_squaring(A, p)) + end +end for hermtype in (:Symmetric, :SymTridiagonal) @eval begin - function sympow(A::$hermtype, p::Integer) - if p < 0 - return Symmetric(Base.power_by_squaring(inv(A), -p)) - else - return Symmetric(Base.power_by_squaring(A, p)) - end - end function ^(A::$hermtype{<:Real}, p::Real) isinteger(p) && return integerpow(A, p) F = eigen(A) @@ -844,8 +850,8 @@ function ^(A::Hermitian, p::Integer) else retmat = Base.power_by_squaring(A, p) end - for i = 1:size(A,1) - retmat[i,i] = real(retmat[i,i]) + for i in diagind(retmat, IndexStyle(retmat)) + retmat[i] = real(retmat[i]) end return Hermitian(retmat) end @@ -857,8 +863,8 @@ function ^(A::Hermitian{T}, p::Real) where T if T <: Real return Hermitian(retmat) else - for i = 1:size(A,1) - retmat[i,i] = real(retmat[i,i]) + for i in diagind(retmat, IndexStyle(retmat)) + retmat[i] = real(retmat[i]) end return Hermitian(retmat) end @@ -873,34 +879,25 @@ function ^(A::Hermitian{T}, p::Real) where T end for func in (:exp, :cos, :sin, :tan, :cosh, :sinh, :tanh, :atan, :asinh, :atanh, :cbrt) - for (hermtype, wrapper) in [(:Symmetric, :Symmetric), (:SymTridiagonal, :Symmetric), (:Hermitian, :Hermitian)] - @eval begin - function ($func)(A::$hermtype{<:Real}) - F = eigen(A) - return $wrapper((F.vectors * Diagonal(($func).(F.values))) * F.vectors') - end - end - end @eval begin + function ($func)(A::RealHermSymSymTri) + F = eigen(A) + return wrappertype(A)((F.vectors * Diagonal(($func).(F.values))) * F.vectors') + end function ($func)(A::Hermitian{<:Complex}) - n = checksquare(A) F = eigen(A) retmat = (F.vectors * Diagonal(($func).(F.values))) * F.vectors' - for i = 1:n - retmat[i,i] = real(retmat[i,i]) + for i in diagind(retmat, IndexStyle(retmat)) + retmat[i] = real(retmat[i]) end return Hermitian(retmat) end end end -for wrapper in (:Symmetric, :Hermitian, :SymTridiagonal) - @eval begin - function cis(A::$wrapper{<:Real}) - F = eigen(A) - return Symmetric(F.vectors .* cis.(F.values') * F.vectors') - end - end +function cis(A::RealHermSymSymTri) + F = eigen(A) + return Symmetric(F.vectors .* cis.(F.values') * F.vectors') end function cis(A::Hermitian{<:Complex}) F = eigen(A) @@ -909,26 +906,21 @@ end for func in (:acos, :asin) - for (hermtype, wrapper) in [(:Symmetric, :Symmetric), (:SymTridiagonal, :Symmetric), (:Hermitian, :Hermitian)] - @eval begin - function ($func)(A::$hermtype{<:Real}) - F = eigen(A) - if all(λ -> -1 ≤ λ ≤ 1, F.values) - return $wrapper((F.vectors * Diagonal(($func).(F.values))) * F.vectors') - else - return Symmetric((F.vectors * Diagonal(($func).(complex.(F.values)))) * F.vectors') - end + @eval begin + function ($func)(A::RealHermSymSymTri) + F = eigen(A) + if all(λ -> -1 ≤ λ ≤ 1, F.values) + return wrappertype(A)((F.vectors * Diagonal(($func).(F.values))) * F.vectors') + else + return Symmetric((F.vectors * Diagonal(($func).(complex.(F.values)))) * F.vectors') end end - end - @eval begin function ($func)(A::Hermitian{<:Complex}) - n = checksquare(A) F = eigen(A) if all(λ -> -1 ≤ λ ≤ 1, F.values) retmat = (F.vectors * Diagonal(($func).(F.values))) * F.vectors' - for i = 1:n - retmat[i,i] = real(retmat[i,i]) + for i in diagind(retmat, IndexStyle(retmat)) + retmat[i] = real(retmat[i]) end return Hermitian(retmat) else @@ -938,25 +930,20 @@ for func in (:acos, :asin) end end -for (hermtype, wrapper) in [(:Symmetric, :Symmetric), (:SymTridiagonal, :Symmetric), (:Hermitian, :Hermitian)] - @eval begin - function acosh(A::$hermtype{<:Real}) - F = eigen(A) - if all(λ -> λ ≥ 1, F.values) - return $wrapper((F.vectors * Diagonal(acosh.(F.values))) * F.vectors') - else - return Symmetric((F.vectors * Diagonal(acosh.(complex.(F.values)))) * F.vectors') - end - end +function acosh(A::RealHermSymSymTri) + F = eigen(A) + if all(λ -> λ ≥ 1, F.values) + return wrappertype(A)((F.vectors * Diagonal(acosh.(F.values))) * F.vectors') + else + return Symmetric((F.vectors * Diagonal(acosh.(complex.(F.values)))) * F.vectors') end end function acosh(A::Hermitian{<:Complex}) - n = checksquare(A) F = eigen(A) if all(λ -> λ ≥ 1, F.values) retmat = (F.vectors * Diagonal(acosh.(F.values))) * F.vectors' - for i = 1:n - retmat[i,i] = real(retmat[i,i]) + for i in diagind(retmat, IndexStyle(retmat)) + retmat[i] = real(retmat[i]) end return Hermitian(retmat) else @@ -964,32 +951,28 @@ function acosh(A::Hermitian{<:Complex}) end end -for (hermtype, wrapper) in [(:Symmetric, :Symmetric), (:SymTridiagonal, :Symmetric), (:Hermitian, :Hermitian)] - @eval begin - function sincos(A::$hermtype{<:Real}) - n = checksquare(A) - F = eigen(A) - T = float(eltype(F.values)) - S, C = Diagonal(similar(A, T, (n,))), Diagonal(similar(A, T, (n,))) - for i in 1:n - S.diag[i], C.diag[i] = sincos(F.values[i]) - end - return $wrapper((F.vectors * S) * F.vectors'), $wrapper((F.vectors * C) * F.vectors') - end +function sincos(A::RealHermSymSymTri) + n = checksquare(A) + F = eigen(A) + T = float(eltype(F.values)) + S, C = Diagonal(similar(A, T, (n,))), Diagonal(similar(A, T, (n,))) + for i in eachindex(S.diag, C.diag, F.values) + S.diag[i], C.diag[i] = sincos(F.values[i]) end + return wrappertype(A)((F.vectors * S) * F.vectors'), wrappertype(A)((F.vectors * C) * F.vectors') end function sincos(A::Hermitian{<:Complex}) n = checksquare(A) F = eigen(A) T = float(eltype(F.values)) S, C = Diagonal(similar(A, T, (n,))), Diagonal(similar(A, T, (n,))) - for i in 1:n + for i in eachindex(S.diag, C.diag, F.values) S.diag[i], C.diag[i] = sincos(F.values[i]) end retmatS, retmatC = (F.vectors * S) * F.vectors', (F.vectors * C) * F.vectors' - for i = 1:n - retmatS[i,i] = real(retmatS[i,i]) - retmatC[i,i] = real(retmatC[i,i]) + for i in diagind(retmatS, IndexStyle(retmatS)) + retmatS[i] = real(retmatS[i]) + retmatC[i] = real(retmatC[i]) end return Hermitian(retmatS), Hermitian(retmatC) end @@ -999,28 +982,24 @@ for func in (:log, :sqrt) # sqrt has rtol arg to handle matrices that are semidefinite up to roundoff errors rtolarg = func === :sqrt ? Any[Expr(:kw, :(rtol::Real), :(eps(real(float(one(T))))*size(A,1)))] : Any[] rtolval = func === :sqrt ? :(-maximum(abs, F.values) * rtol) : 0 - for (hermtype, wrapper) in [(:Symmetric, :Symmetric), (:SymTridiagonal, :Symmetric), (:Hermitian, :Hermitian)] - @eval begin - function ($func)(A::$hermtype{T}; $(rtolarg...)) where {T<:Real} - F = eigen(A) - λ₀ = $rtolval # treat λ ≥ λ₀ as "zero" eigenvalues up to roundoff - if all(λ -> λ ≥ λ₀, F.values) - return $wrapper((F.vectors * Diagonal(($func).(max.(0, F.values)))) * F.vectors') - else - return Symmetric((F.vectors * Diagonal(($func).(complex.(F.values)))) * F.vectors') - end + @eval begin + function ($func)(A::RealHermSymSymTri{T}; $(rtolarg...)) where {T<:Real} + F = eigen(A) + λ₀ = $rtolval # treat λ ≥ λ₀ as "zero" eigenvalues up to roundoff + if all(λ -> λ ≥ λ₀, F.values) + return wrappertype(A)((F.vectors * Diagonal(($func).(max.(0, F.values)))) * F.vectors') + else + return Symmetric((F.vectors * Diagonal(($func).(complex.(F.values)))) * F.vectors') end end - end - @eval begin function ($func)(A::Hermitian{T}; $(rtolarg...)) where {T<:Complex} n = checksquare(A) F = eigen(A) λ₀ = $rtolval # treat λ ≥ λ₀ as "zero" eigenvalues up to roundoff if all(λ -> λ ≥ λ₀, F.values) retmat = (F.vectors * Diagonal(($func).(max.(0, F.values)))) * F.vectors' - for i = 1:n - retmat[i,i] = real(retmat[i,i]) + for i in diagind(retmat, IndexStyle(retmat)) + retmat[i] = real(retmat[i]) end return Hermitian(retmat) else From 473d0db201c01ecb4be77fdf80e12b019644560c Mon Sep 17 00:00:00 2001 From: Jishnu Bhattacharya Date: Sat, 9 Nov 2024 18:15:06 +0530 Subject: [PATCH 409/537] Specialize findlast for integer AbstractUnitRanges and StepRanges (#54902) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit For monotonic ranges, `findfirst` and `findlast` with `==(val)` as the predicate should be identical, as each value appears only once in the range. Since `findfirst` is specialized for some ranges, we may define `findlast` as well analogously. On v"1.12.0-DEV.770" ```julia julia> @btime findlast(==(1), $(Ref(1:1_000))[]) 1.186 μs (0 allocations: 0 bytes) 1 ``` This PR ```julia julia> @btime findlast(==(1), $(Ref(1:1_000))[]) 3.171 ns (0 allocations: 0 bytes) 1 ``` I've also specialized `findfirst(iszero, r::AbstractRange)` to make this be equivalent to `findfirst(==(0), ::AbstractRange)` for numerical ranges. Similarly, for `isone`. These now take the fast path as well. Thirdly, I've added some `convert` calls to address issues like ```julia julia> r = Int128(1):Int128(1):Int128(4); julia> findfirst(==(Int128(2)), r) |> typeof Int128 julia> keytype(r) Int64 ``` This PR ensures that the return type always corresponds to `keytype`, which is what the docstring promises. This PR also fixes ```julia julia> findfirst(==(0), UnitRange(-0.5, 0.5)) ERROR: InexactError: Int64(0.5) Stacktrace: [1] Int64 @ ./float.jl:994 [inlined] [2] findfirst(p::Base.Fix2{typeof(==), Int64}, r::UnitRange{Float64}) @ Base ./array.jl:2397 [3] top-level scope @ REPL[1]:1 ``` which now returns `nothing`, as expected. --- base/array.jl | 32 ++++++++++++++++++++++++++------ test/ranges.jl | 40 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 66 insertions(+), 6 deletions(-) diff --git a/base/array.jl b/base/array.jl index 65cc29f38c911..4c3dde73d52ba 100644 --- a/base/array.jl +++ b/base/array.jl @@ -2439,20 +2439,29 @@ end findfirst(testf::Function, A::Union{AbstractArray, AbstractString}) = findnext(testf, A, first(keys(A))) -findfirst(p::Union{Fix2{typeof(isequal),Int},Fix2{typeof(==),Int}}, r::OneTo{Int}) = - 1 <= p.x <= r.stop ? p.x : nothing +findfirst(p::Union{Fix2{typeof(isequal),T},Fix2{typeof(==),T}}, r::OneTo) where {T<:Integer} = + 1 <= p.x <= r.stop ? convert(keytype(r), p.x) : nothing -findfirst(p::Union{Fix2{typeof(isequal),T},Fix2{typeof(==),T}}, r::AbstractUnitRange) where {T<:Integer} = - first(r) <= p.x <= last(r) ? firstindex(r) + Int(p.x - first(r)) : nothing +findfirst(::typeof(iszero), ::OneTo) = nothing +findfirst(::typeof(isone), r::OneTo) = isempty(r) ? nothing : oneunit(keytype(r)) + +function findfirst(p::Union{Fix2{typeof(isequal),T},Fix2{typeof(==),T}}, r::AbstractUnitRange{<:Integer}) where {T<:Integer} + first(r) <= p.x <= last(r) || return nothing + i1 = first(keys(r)) + return i1 + oftype(i1, p.x - first(r)) +end function findfirst(p::Union{Fix2{typeof(isequal),T},Fix2{typeof(==),T}}, r::StepRange{T,S}) where {T,S} isempty(r) && return nothing minimum(r) <= p.x <= maximum(r) || return nothing - d = convert(S, p.x - first(r))::S + d = p.x - first(r) iszero(d % step(r)) || return nothing - return d ÷ step(r) + 1 + return convert(keytype(r), d ÷ step(r) + 1) end +findfirst(::typeof(iszero), r::AbstractRange) = findfirst(==(zero(first(r))), r) +findfirst(::typeof(isone), r::AbstractRange) = findfirst(==(one(first(r))), r) + """ findprev(A, i) @@ -2623,6 +2632,17 @@ end findlast(testf::Function, A::Union{AbstractArray, AbstractString}) = findprev(testf, A, last(keys(A))) +# for monotonic ranges, there is a unique index corresponding to a value, so findfirst and findlast are identical +function findlast(p::Union{Fix2{typeof(isequal),<:Integer},Fix2{typeof(==),<:Integer},typeof(iszero),typeof(isone)}, + r::AbstractUnitRange{<:Integer}) + findfirst(p, r) +end + +function findlast(p::Union{Fix2{typeof(isequal),T},Fix2{typeof(==),T},typeof(iszero),typeof(isone)}, + r::StepRange{T,S}) where {T,S} + findfirst(p, r) +end + """ findall(f::Function, A) diff --git a/test/ranges.jl b/test/ranges.jl index 73595e3056081..d79851d7056e0 100644 --- a/test/ranges.jl +++ b/test/ranges.jl @@ -438,15 +438,55 @@ end @test findfirst(isequal(3), Base.OneTo(10)) == 3 @test findfirst(==(0), Base.OneTo(10)) === nothing @test findfirst(==(11), Base.OneTo(10)) === nothing + @test @inferred((r -> Val(findfirst(iszero, r)))(Base.OneTo(10))) == Val(nothing) + @test findfirst(isone, Base.OneTo(10)) === 1 + @test findfirst(isone, Base.OneTo(0)) === nothing @test findfirst(==(4), Int16(3):Int16(7)) === Int(2) @test findfirst(==(2), Int16(3):Int16(7)) === nothing @test findfirst(isequal(8), 3:7) === nothing + @test findfirst(==(0), UnitRange(-0.5, 0.5)) === nothing + @test findfirst(==(2), big(1):big(2)) === 2 @test findfirst(isequal(7), 1:2:10) == 4 + @test findfirst(iszero, -5:5) == 6 + @test findfirst(iszero, 2:5) === nothing + @test findfirst(iszero, 6:5) === nothing + @test findfirst(isone, -5:5) == 7 + @test findfirst(isone, 2:5) === nothing + @test findfirst(isone, 6:5) === nothing @test findfirst(==(7), 1:2:10) == 4 @test findfirst(==(10), 1:2:10) === nothing @test findfirst(==(11), 1:2:10) === nothing @test findfirst(==(-7), 1:-1:-10) == 9 @test findfirst(==(2),1:-1:2) === nothing + @test findfirst(iszero, 5:-2:-5) === nothing + @test findfirst(iszero, 6:-2:-6) == 4 + @test findfirst(==(Int128(2)), Int128(1):Int128(1):Int128(4)) === 2 + end + @testset "findlast" begin + @test findlast(==(1), Base.IdentityUnitRange(-1:1)) == 1 + @test findlast(isequal(3), Base.OneTo(10)) == 3 + @test findlast(==(0), Base.OneTo(10)) === nothing + @test findlast(==(11), Base.OneTo(10)) === nothing + @test @inferred((() -> Val(findlast(iszero, Base.OneTo(10))))()) == Val(nothing) + @test findlast(isone, Base.OneTo(10)) == 1 + @test findlast(isone, Base.OneTo(0)) === nothing + @test findlast(==(4), Int16(3):Int16(7)) === Int(2) + @test findlast(==(2), Int16(3):Int16(7)) === nothing + @test findlast(isequal(8), 3:7) === nothing + @test findlast(==(0), UnitRange(-0.5, 0.5)) === nothing + @test findlast(==(2), big(1):big(2)) === 2 + @test findlast(isequal(7), 1:2:10) == 4 + @test findlast(iszero, -5:5) == 6 + @test findlast(iszero, 2:5) === nothing + @test findlast(iszero, 6:5) === nothing + @test findlast(==(7), 1:2:10) == 4 + @test findlast(==(10), 1:2:10) === nothing + @test findlast(==(11), 1:2:10) === nothing + @test findlast(==(-7), 1:-1:-10) == 9 + @test findlast(==(2),1:-1:2) === nothing + @test findlast(iszero, 5:-2:-5) === nothing + @test findlast(iszero, 6:-2:-6) == 4 + @test findlast(==(Int128(2)), Int128(1):Int128(1):Int128(4)) === 2 end @testset "reverse" begin @test reverse(reverse(1:10)) == 1:10 From 024d42a6116681dae3528c51184ac41d6d2550c5 Mon Sep 17 00:00:00 2001 From: Jishnu Bhattacharya Date: Sat, 9 Nov 2024 18:41:07 +0530 Subject: [PATCH 410/537] Loop over `Iterators.rest` in `_foldl_impl` (#56492) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit For reasons that I don't understand, this improves performance in `mapreduce` in the following example: ```julia julia> function g(A) for col in axes(A,2) mapreduce(iszero, &, view(A, UnitRange(axes(A,1)), col), init=true) || return false end return true end g (generic function with 2 methods) julia> A = zeros(2, 10000); julia> @btime g($A); 28.021 μs (0 allocations: 0 bytes) # nightly v"1.12.0-DEV.1571" 12.462 μs (0 allocations: 0 bytes) # this PR julia> A = zeros(1000,1000); julia> @btime g($A); 372.080 μs (0 allocations: 0 bytes) # nightly 321.753 μs (0 allocations: 0 bytes) # this PR ``` It would be good to understand what the underlying issue is, as the two seem equivalent to me. Perhaps this form makes it clear that it's not, in fact, an infinite loop? --- base/reduce.jl | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/base/reduce.jl b/base/reduce.jl index 952d71bb2a849..6ceb76089d59c 100644 --- a/base/reduce.jl +++ b/base/reduce.jl @@ -43,15 +43,15 @@ function foldl_impl(op::OP, nt, itr) where {OP} end function _foldl_impl(op::OP, init, itr) where {OP} - # Unroll the while loop once; if init is known, the call to op may - # be evaluated at compile time + # Unroll the loop once to check if the iterator is empty. + # If init is known, the call to op may be evaluated at compile time y = iterate(itr) y === nothing && return init v = op(init, y[1]) - while true - y = iterate(itr, y[2]) - y === nothing && break - v = op(v, y[1]) + # Using a for loop is more performant than a while loop (see #56492) + # This unrolls the loop a second time before entering the body + for x in Iterators.rest(itr, y[2]) + v = op(v, x) end return v end From ecfd1a042fd064e23ca602ac97e008cbafc4d4f9 Mon Sep 17 00:00:00 2001 From: "Steven G. Johnson" Date: Sat, 9 Nov 2024 09:38:13 -0500 Subject: [PATCH 411/537] better error message for rpad/lpad with zero-width padding (#56488) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Closes #45339 — throw a more informative `ArgumentError` message from `rpad` and `lpad` if a zero-`textwidth` padding is passed (not a `DivideError`). If the padding character has `ncodeunits == 1`, suggests that maybe they want `str * pad^max(0, npad - ncodeunits(str))` instead. --- base/strings/util.jl | 14 ++++++++++++-- test/strings/util.jl | 7 +++++++ 2 files changed, 19 insertions(+), 2 deletions(-) diff --git a/base/strings/util.jl b/base/strings/util.jl index fcccb9babadfd..1b73fbbbab5cf 100644 --- a/base/strings/util.jl +++ b/base/strings/util.jl @@ -476,7 +476,12 @@ function lpad( n = Int(n)::Int m = signed(n) - Int(textwidth(s))::Int m ≤ 0 && return stringfn(s) - l = textwidth(p) + l = Int(textwidth(p))::Int + if l == 0 + throw(ArgumentError("$(repr(p)) has zero textwidth" * (ncodeunits(p) != 1 ? "" : + "; maybe you want pad^max(0, npad - ncodeunits(str)) * str to pad by codeunits" * + (s isa AbstractString && codeunit(s) != UInt8 ? "?" : " (bytes)?")))) + end q, r = divrem(m, l) r == 0 ? stringfn(p^q, s) : stringfn(p^q, first(p, r), s) end @@ -508,7 +513,12 @@ function rpad( n = Int(n)::Int m = signed(n) - Int(textwidth(s))::Int m ≤ 0 && return stringfn(s) - l = textwidth(p) + l = Int(textwidth(p))::Int + if l == 0 + throw(ArgumentError("$(repr(p)) has zero textwidth" * (ncodeunits(p) != 1 ? "" : + "; maybe you want str * pad^max(0, npad - ncodeunits(str)) to pad by codeunits" * + (s isa AbstractString && codeunit(s) != UInt8 ? "?" : " (bytes)?")))) + end q, r = divrem(m, l) r == 0 ? stringfn(s, p^q) : stringfn(s, p^q, first(p, r)) end diff --git a/test/strings/util.jl b/test/strings/util.jl index ae16e24f4ea8b..bb87881bbaa1d 100644 --- a/test/strings/util.jl +++ b/test/strings/util.jl @@ -65,6 +65,13 @@ end @test rpad("⟨k|H₁|k̃⟩", 12) |> textwidth == 12 @test lpad("⟨k|H₁|k⟩", 12) |> textwidth == 12 @test rpad("⟨k|H₁|k⟩", 12) |> textwidth == 12 + for pad in (rpad, lpad), p in ('\0', "\0", "\0\0", "\u302") + if ncodeunits(p) == 1 + @test_throws r".*has zero textwidth.*maybe you want.*bytes.*" pad("foo", 10, p) + else + @test_throws r".*has zero textwidth$" pad("foo", 10, p) + end + end end @testset "string truncation (ltruncate, rtruncate, ctruncate)" begin From 803316a0c3dbf6cb5c7d18d2345bf4bc2b449caa Mon Sep 17 00:00:00 2001 From: Jishnu Bhattacharya Date: Sat, 9 Nov 2024 23:48:48 +0530 Subject: [PATCH 412/537] Safer indexing in dense linalg methods (#56451) Ensure that `eachindex` is used consistently alongside `@inbounds`, and use `diagind` to obtain indices along a diagonal. --- stdlib/LinearAlgebra/src/dense.jl | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/stdlib/LinearAlgebra/src/dense.jl b/stdlib/LinearAlgebra/src/dense.jl index b8d5c84c3db53..0a5f97889196c 100644 --- a/stdlib/LinearAlgebra/src/dense.jl +++ b/stdlib/LinearAlgebra/src/dense.jl @@ -559,14 +559,13 @@ function schurpow(A::AbstractMatrix, p) end end function (^)(A::AbstractMatrix{T}, p::Real) where T - n = checksquare(A) - + checksquare(A) # Quicker return if A is diagonal if isdiag(A) TT = promote_op(^, T, typeof(p)) retmat = copymutable_oftype(A, TT) - for i in axes(retmat,1) - retmat[i, i] = retmat[i, i] ^ p + for i in diagind(retmat, IndexStyle(retmat)) + retmat[i] = retmat[i] ^ p end return retmat end @@ -1080,7 +1079,7 @@ function sin(A::AbstractMatrix{<:Complex}) T = complex(float(eltype(A))) X = exp!(T.(im .* A)) Y = exp!(T.(.-im .* A)) - @inbounds for i in eachindex(X) + @inbounds for i in eachindex(X, Y) x, y = X[i]/2, Y[i]/2 X[i] = Complex(imag(x)-imag(y), real(y)-real(x)) end @@ -1128,7 +1127,7 @@ function sincos(A::AbstractMatrix{<:Complex}) T = complex(float(eltype(A))) X = exp!(T.(im .* A)) Y = exp!(T.(.-im .* A)) - @inbounds for i in eachindex(X) + @inbounds for i in eachindex(X, Y) x, y = X[i]/2, Y[i]/2 X[i] = Complex(imag(x)-imag(y), real(y)-real(x)) Y[i] = x+y @@ -1200,7 +1199,7 @@ function tanh(A::AbstractMatrix) end X = exp(A) Y = exp!(float.(.-A)) - @inbounds for i in eachindex(X) + @inbounds for i in eachindex(X, Y) x, y = X[i], Y[i] X[i] = x - y Y[i] = x + y From cd748a5c5b8575e00ce8e7f14ac4c79113d11171 Mon Sep 17 00:00:00 2001 From: "Viral B. Shah" Date: Sat, 9 Nov 2024 16:30:27 -0500 Subject: [PATCH 413/537] The `info` in LAPACK calls should be a Ref instead of a Ptr (#56511) Co-authored-by: Viral B. Shah --- stdlib/LinearAlgebra/src/lapack.jl | 164 ++++++++++++++--------------- 1 file changed, 82 insertions(+), 82 deletions(-) diff --git a/stdlib/LinearAlgebra/src/lapack.jl b/stdlib/LinearAlgebra/src/lapack.jl index 5c2b66881585c..f53e8bd98454d 100644 --- a/stdlib/LinearAlgebra/src/lapack.jl +++ b/stdlib/LinearAlgebra/src/lapack.jl @@ -160,7 +160,7 @@ for (gbtrf, gbtrs, elty) in info = Ref{BlasInt}() ccall((@blasfunc($gbtrf), libblastrampoline), Cvoid, (Ref{BlasInt}, Ref{BlasInt}, Ref{BlasInt}, Ref{BlasInt}, - Ptr{$elty}, Ref{BlasInt}, Ptr{BlasInt}, Ptr{BlasInt}), + Ptr{$elty}, Ref{BlasInt}, Ptr{BlasInt}, Ref{BlasInt}), m, n, kl, ku, AB, max(1,stride(AB,2)), ipiv, info) chklapackerror(info[]) AB, ipiv @@ -187,7 +187,7 @@ for (gbtrf, gbtrs, elty) in ccall((@blasfunc($gbtrs), libblastrampoline), Cvoid, (Ref{UInt8}, Ref{BlasInt}, Ref{BlasInt}, Ref{BlasInt}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ptr{BlasInt}, Ptr{$elty}, Ref{BlasInt}, - Ptr{BlasInt}, Clong), + Ref{BlasInt}, Clong), trans, n, kl, ku, size(B,2), AB, max(1,stride(AB,2)), ipiv, B, max(1,stride(B,2)), info, 1) chklapackerror(info[]) @@ -242,7 +242,7 @@ for (gebal, gebak, elty, relty) in info = Ref{BlasInt}() ccall((@blasfunc($gebal), libblastrampoline), Cvoid, (Ref{UInt8}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, - Ptr{BlasInt}, Ptr{BlasInt}, Ptr{$relty}, Ptr{BlasInt}, Clong), + Ptr{BlasInt}, Ptr{BlasInt}, Ptr{$relty}, Ref{BlasInt}, Clong), job, n, A, max(1,stride(A,2)), ilo, ihi, scale, info, 1) chklapackerror(info[]) ilo[], ihi[], scale @@ -586,7 +586,7 @@ for (gebrd, gelqf, geqlf, geqrf, geqp3, geqrt, geqrt3, gerqf, getrf, elty, relty info = Ref{BlasInt}() ccall((@blasfunc($getrf), libblastrampoline), Cvoid, (Ref{BlasInt}, Ref{BlasInt}, Ptr{$elty}, - Ref{BlasInt}, Ptr{BlasInt}, Ptr{BlasInt}), + Ref{BlasInt}, Ptr{BlasInt}, Ref{BlasInt}), m, n, A, lda, ipiv, info) chkargsok(info[]) A, ipiv, info[] #Error code is stored in LU factorization type @@ -923,7 +923,7 @@ for (tzrzf, ormrz, elty) in (Ref{UInt8}, Ref{UInt8}, Ref{BlasInt}, Ref{BlasInt}, Ref{BlasInt}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, - Ref{BlasInt}, Ptr{BlasInt}, Clong, Clong), + Ref{BlasInt}, Ref{BlasInt}, Clong, Clong), side, trans, m, n, k, l, A, lda, tau, C, ldc, work, @@ -987,7 +987,7 @@ for (gels, gesv, getrs, getri, elty) in ccall((@blasfunc($gels), libblastrampoline), Cvoid, (Ref{UInt8}, Ref{BlasInt}, Ref{BlasInt}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, - Ptr{$elty}, Ref{BlasInt}, Ptr{BlasInt}, Clong), + Ptr{$elty}, Ref{BlasInt}, Ref{BlasInt}, Clong), (btrn ? 'T' : 'N'), m, n, size(B,2), A, max(1,stride(A,2)), B, max(1,stride(B,2)), work, lwork, info, 1) chklapackerror(info[]) @@ -1055,7 +1055,7 @@ for (gels, gesv, getrs, getri, elty) in info = Ref{BlasInt}() ccall((@blasfunc($getrs), libblastrampoline), Cvoid, (Ref{UInt8}, Ref{BlasInt}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, - Ptr{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ptr{BlasInt}, Clong), + Ptr{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ref{BlasInt}, Clong), trans, n, size(B,2), A, max(1,stride(A,2)), ipiv, B, max(1,stride(B,2)), info, 1) chklapackerror(info[]) B @@ -1181,7 +1181,7 @@ for (gesvx, elty) in Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ptr{BlasInt}, Ref{UInt8}, Ptr{$elty}, Ptr{$elty}, Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, Ptr{$elty}, Ptr{$elty}, - Ptr{$elty}, Ptr{BlasInt}, Ptr{BlasInt}, Clong, Clong, Clong), + Ptr{$elty}, Ptr{BlasInt}, Ref{BlasInt}, Clong, Clong, Clong), fact, trans, n, nrhs, A, lda, AF, ldaf, ipiv, equed, R, C, B, ldb, X, n, rcond, ferr, berr, work, iwork, info, 1, 1, 1) chklapackerror(info[]) @@ -1253,7 +1253,7 @@ for (gesvx, elty, relty) in Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ptr{BlasInt}, Ref{UInt8}, Ptr{$relty}, Ptr{$relty}, Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ptr{$relty}, Ptr{$relty}, Ptr{$relty}, - Ptr{$elty}, Ptr{$relty}, Ptr{BlasInt}, Clong, Clong, Clong), + Ptr{$elty}, Ptr{$relty}, Ref{BlasInt}, Clong, Clong, Clong), fact, trans, n, nrhs, A, lda, AF, ldaf, ipiv, equed, R, C, B, ldb, X, n, rcond, ferr, berr, work, rwork, info, 1, 1, 1) chklapackerror(info[]) @@ -1634,7 +1634,7 @@ for (geev, gesvd, gesdd, ggsvd, elty, relty) in (Ref{UInt8}, Ref{UInt8}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, - Ptr{$relty}, Ptr{BlasInt}, Clong, Clong), + Ptr{$relty}, Ref{BlasInt}, Clong, Clong), jobvl, jobvr, n, A, max(1,stride(A,2)), W, VL, n, VR, n, work, lwork, rwork, info, 1, 1) else @@ -1642,7 +1642,7 @@ for (geev, gesvd, gesdd, ggsvd, elty, relty) in (Ref{UInt8}, Ref{UInt8}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, Ptr{$elty}, Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, - Ref{BlasInt}, Ptr{BlasInt}, Clong, Clong), + Ref{BlasInt}, Ref{BlasInt}, Clong, Clong), jobvl, jobvr, n, A, max(1,stride(A,2)), WR, WI, VL, n, VR, n, work, lwork, info, 1, 1) end @@ -1699,7 +1699,7 @@ for (geev, gesvd, gesdd, ggsvd, elty, relty) in (Ref{UInt8}, Ref{BlasInt}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ptr{$relty}, Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, - Ptr{$relty}, Ptr{BlasInt}, Ptr{BlasInt}, Clong), + Ptr{$relty}, Ptr{BlasInt}, Ref{BlasInt}, Clong), job, m, n, A, max(1,stride(A,2)), S, U, max(1,stride(U,2)), VT, max(1,stride(VT,2)), work, lwork, rwork, iwork, info, 1) else @@ -1707,7 +1707,7 @@ for (geev, gesvd, gesdd, ggsvd, elty, relty) in (Ref{UInt8}, Ref{BlasInt}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, - Ptr{BlasInt}, Ptr{BlasInt}, Clong), + Ptr{BlasInt}, Ref{BlasInt}, Clong), job, m, n, A, max(1,stride(A,2)), S, U, max(1,stride(U,2)), VT, max(1,stride(VT,2)), work, lwork, iwork, info, 1) end @@ -1771,7 +1771,7 @@ for (geev, gesvd, gesdd, ggsvd, elty, relty) in (Ref{UInt8}, Ref{UInt8}, Ref{BlasInt}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ptr{$relty}, Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, - Ref{BlasInt}, Ptr{$relty}, Ptr{BlasInt}, Clong, Clong), + Ref{BlasInt}, Ptr{$relty}, Ref{BlasInt}, Clong, Clong), jobu, jobvt, m, n, A, max(1,stride(A,2)), S, U, max(1,stride(U,2)), VT, max(1,stride(VT,2)), work, lwork, rwork, info, 1, 1) else @@ -1779,7 +1779,7 @@ for (geev, gesvd, gesdd, ggsvd, elty, relty) in (Ref{UInt8}, Ref{UInt8}, Ref{BlasInt}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, - Ref{BlasInt}, Ptr{BlasInt}, Clong, Clong), + Ref{BlasInt}, Ref{BlasInt}, Clong, Clong), jobu, jobvt, m, n, A, max(1,stride(A,2)), S, U, max(1,stride(U,2)), VT, max(1,stride(VT,2)), work, lwork, info, 1, 1) end @@ -2047,7 +2047,7 @@ for (f, elty, relty) in ((:zggsvd3_, :ComplexF64, :Float64), Ptr{$relty}, Ptr{$relty}, Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ptr{$relty}, Ptr{BlasInt}, - Ptr{BlasInt}, Clong, Clong, Clong), + Ref{BlasInt}, Clong, Clong, Clong), jobu, jobv, jobq, m, n, p, k, l, A, lda, B, ldb, @@ -2691,7 +2691,7 @@ for (gtsv, gttrf, gttrs, elty) in info = Ref{BlasInt}() ccall((@blasfunc($gttrf), libblastrampoline), Cvoid, (Ref{BlasInt}, Ptr{$elty}, Ptr{$elty}, Ptr{$elty}, Ptr{$elty}, - Ptr{BlasInt}, Ptr{BlasInt}), + Ptr{BlasInt}, Ref{BlasInt}), n, dl, d, du, du2, ipiv, info) chklapackerror(info[]) dl, d, du, du2, ipiv @@ -2724,7 +2724,7 @@ for (gtsv, gttrf, gttrs, elty) in ccall((@blasfunc($gttrs), libblastrampoline), Cvoid, (Ref{UInt8}, Ref{BlasInt}, Ref{BlasInt}, Ptr{$elty}, Ptr{$elty}, Ptr{$elty}, Ptr{$elty}, - Ptr{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ptr{BlasInt}, Clong), + Ptr{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ref{BlasInt}, Clong), trans, n, size(B,2), dl, d, du, du2, ipiv, B, max(1,stride(B,2)), info, 1) chklapackerror(info[]) B @@ -2946,7 +2946,7 @@ for (orglq, orgqr, orgql, orgrq, ormlq, ormqr, ormql, ormrq, gemqrt, elty) in ccall((@blasfunc($ormlq), libblastrampoline), Cvoid, (Ref{UInt8}, Ref{UInt8}, Ref{BlasInt}, Ref{BlasInt}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, Ptr{$elty}, Ref{BlasInt}, - Ptr{$elty}, Ref{BlasInt}, Ptr{BlasInt}, Clong, Clong), + Ptr{$elty}, Ref{BlasInt}, Ref{BlasInt}, Clong, Clong), side, trans, m, n, k, A, max(1,stride(A,2)), tau, C, max(1,stride(C,2)), work, lwork, info, 1, 1) chklapackerror(info[]) @@ -2994,7 +2994,7 @@ for (orglq, orgqr, orgql, orgrq, ormlq, ormqr, ormql, ormrq, gemqrt, elty) in (Ref{UInt8}, Ref{UInt8}, Ref{BlasInt}, Ref{BlasInt}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, - Ptr{BlasInt}, Clong, Clong), + Ref{BlasInt}, Clong, Clong), side, trans, m, n, k, A, max(1,stride(A,2)), tau, C, max(1, stride(C,2)), work, lwork, @@ -3044,7 +3044,7 @@ for (orglq, orgqr, orgql, orgrq, ormlq, ormqr, ormql, ormrq, gemqrt, elty) in (Ref{UInt8}, Ref{UInt8}, Ref{BlasInt}, Ref{BlasInt}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, - Ptr{BlasInt}, Clong, Clong), + Ref{BlasInt}, Clong, Clong), side, trans, m, n, k, A, max(1,stride(A,2)), tau, C, max(1, stride(C,2)), work, lwork, @@ -3093,7 +3093,7 @@ for (orglq, orgqr, orgql, orgrq, ormlq, ormqr, ormql, ormrq, gemqrt, elty) in ccall((@blasfunc($ormrq), libblastrampoline), Cvoid, (Ref{UInt8}, Ref{UInt8}, Ref{BlasInt}, Ref{BlasInt}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, Ptr{$elty}, Ref{BlasInt}, - Ptr{$elty}, Ref{BlasInt}, Ptr{BlasInt}, Clong, Clong), + Ptr{$elty}, Ref{BlasInt}, Ref{BlasInt}, Clong, Clong), side, trans, m, n, k, A, max(1,stride(A,2)), tau, C, max(1,stride(C,2)), work, lwork, info, 1, 1) chklapackerror(info[]) @@ -3150,7 +3150,7 @@ for (orglq, orgqr, orgql, orgrq, ormlq, ormqr, ormql, ormrq, gemqrt, elty) in (Ref{UInt8}, Ref{UInt8}, Ref{BlasInt}, Ref{BlasInt}, Ref{BlasInt}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, - Ptr{$elty}, Ptr{BlasInt}, Clong, Clong), + Ptr{$elty}, Ref{BlasInt}, Clong, Clong), side, trans, m, n, k, nb, V, ldv, T, max(1,stride(T,2)), C, max(1,ldc), @@ -3267,7 +3267,7 @@ for (posv, potrf, potri, potrs, pstrf, elty, rtyp) in info = Ref{BlasInt}() ccall((@blasfunc($posv), libblastrampoline), Cvoid, (Ref{UInt8}, Ref{BlasInt}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, - Ptr{$elty}, Ref{BlasInt}, Ptr{BlasInt}, Clong), + Ptr{$elty}, Ref{BlasInt}, Ref{BlasInt}, Clong), uplo, n, size(B,2), A, max(1,stride(A,2)), B, max(1,stride(B,2)), info, 1) chkargsok(info[]) chkposdef(info[]) @@ -3291,7 +3291,7 @@ for (posv, potrf, potri, potrs, pstrf, elty, rtyp) in end info = Ref{BlasInt}() ccall((@blasfunc($potrf), libblastrampoline), Cvoid, - (Ref{UInt8}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ptr{BlasInt}, Clong), + (Ref{UInt8}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ref{BlasInt}, Clong), uplo, size(A,1), A, lda, info, 1) chkargsok(info[]) #info[] > 0 means the leading minor of order info[] is not positive definite @@ -3312,7 +3312,7 @@ for (posv, potrf, potri, potrs, pstrf, elty, rtyp) in chkuplo(uplo) info = Ref{BlasInt}() ccall((@blasfunc($potri), libblastrampoline), Cvoid, - (Ref{UInt8}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ptr{BlasInt}, Clong), + (Ref{UInt8}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ref{BlasInt}, Clong), uplo, size(A,1), A, max(1,stride(A,2)), info, 1) chkargsok(info[]) chknonsingular(info[]) @@ -3342,7 +3342,7 @@ for (posv, potrf, potri, potrs, pstrf, elty, rtyp) in info = Ref{BlasInt}() ccall((@blasfunc($potrs), libblastrampoline), Cvoid, (Ref{UInt8}, Ref{BlasInt}, Ref{BlasInt}, Ptr{$elty}, - Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ptr{BlasInt}, Clong), + Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ref{BlasInt}, Clong), uplo, n, nrhs, A, lda, B, ldb, info, 1) chklapackerror(info[]) @@ -3368,7 +3368,7 @@ for (posv, potrf, potri, potrs, pstrf, elty, rtyp) in info = Ref{BlasInt}() ccall((@blasfunc($pstrf), libblastrampoline), Cvoid, (Ref{UInt8}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ptr{BlasInt}, - Ptr{BlasInt}, Ref{$rtyp}, Ptr{$rtyp}, Ptr{BlasInt}, Clong), + Ptr{BlasInt}, Ref{$rtyp}, Ptr{$rtyp}, Ref{BlasInt}, Clong), uplo, n, A, max(1,stride(A,2)), piv, rank, tol, work, info, 1) chkargsok(info[]) A, piv, rank[1], info[] #Stored in CholeskyPivoted @@ -3559,7 +3559,7 @@ for (pttrs, elty, relty) in info = Ref{BlasInt}() ccall((@blasfunc($pttrs), libblastrampoline), Cvoid, (Ref{UInt8}, Ref{BlasInt}, Ref{BlasInt}, Ptr{$relty}, Ptr{$elty}, - Ptr{$elty}, Ref{BlasInt}, Ptr{BlasInt}, Clong), + Ptr{$elty}, Ref{BlasInt}, Ref{BlasInt}, Clong), uplo, n, size(B,2), D, E, B, max(1,stride(B,2)), info, 1) chklapackerror(info[]) B @@ -3599,7 +3599,7 @@ for (trtri, trtrs, elty) in info = Ref{BlasInt}() ccall((@blasfunc($trtri), libblastrampoline), Cvoid, (Ref{UInt8}, Ref{UInt8}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, - Ptr{BlasInt}, Clong, Clong), + Ref{BlasInt}, Clong, Clong), uplo, diag, n, A, lda, info, 1, 1) chklapackerror(info[]) A @@ -3729,7 +3729,7 @@ for (trcon, trevc, trrfs, elty) in (Ref{UInt8}, Ref{UInt8}, Ptr{BlasInt}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ref{BlasInt}, Ptr{BlasInt}, - Ptr{$elty}, Ptr{BlasInt}, Clong, Clong), + Ptr{$elty}, Ref{BlasInt}, Clong, Clong), side, howmny, select, n, T, ldt, VL, ldvl, VR, ldvr, mm, m, @@ -3785,7 +3785,7 @@ for (trcon, trevc, trrfs, elty) in ccall((@blasfunc($trrfs), libblastrampoline), Cvoid, (Ref{UInt8}, Ref{UInt8}, Ref{UInt8}, Ref{BlasInt}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, - Ptr{$elty}, Ptr{$elty}, Ptr{$elty}, Ptr{BlasInt}, Ptr{BlasInt}, Clong, Clong, Clong), + Ptr{$elty}, Ptr{$elty}, Ptr{$elty}, Ptr{BlasInt}, Ref{BlasInt}, Clong, Clong, Clong), uplo, trans, diag, n, nrhs, A, max(1,stride(A,2)), B, max(1,stride(B,2)), X, max(1,stride(X,2)), Ferr, Berr, work, iwork, info, 1, 1, 1) @@ -3866,7 +3866,7 @@ for (trcon, trevc, trrfs, elty, relty) in (Ref{UInt8}, Ref{UInt8}, Ptr{BlasInt}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ref{BlasInt}, Ptr{BlasInt}, - Ptr{$elty}, Ptr{$relty}, Ptr{BlasInt}, Clong, Clong), + Ptr{$elty}, Ptr{$relty}, Ref{BlasInt}, Clong, Clong), side, howmny, select, n, T, ldt, VL, ldvl, VR, ldvr, mm, m, @@ -3922,7 +3922,7 @@ for (trcon, trevc, trrfs, elty, relty) in ccall((@blasfunc($trrfs), libblastrampoline), Cvoid, (Ref{UInt8}, Ref{UInt8}, Ref{UInt8}, Ref{BlasInt}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, - Ptr{$relty}, Ptr{$relty}, Ptr{$elty}, Ptr{$relty}, Ptr{BlasInt}, Clong, Clong, Clong), + Ptr{$relty}, Ptr{$relty}, Ptr{$elty}, Ptr{$relty}, Ref{BlasInt}, Clong, Clong, Clong), uplo, trans, diag, n, nrhs, A, max(1,stride(A,2)), B, max(1,stride(B,2)), X, max(1,stride(X,2)), Ferr, Berr, work, rwork, info, 1, 1, 1) @@ -3993,7 +3993,7 @@ for (stev, stebz, stegr, stein, elty) in info = Ref{BlasInt}() ccall((@blasfunc($stev), libblastrampoline), Cvoid, (Ref{UInt8}, Ref{BlasInt}, Ptr{$elty}, Ptr{$elty}, Ptr{$elty}, - Ref{BlasInt}, Ptr{$elty}, Ptr{BlasInt}, Clong), + Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Clong), job, n, dv, ev, Zmat, n, work, info, 1) chklapackerror(info[]) dv, Zmat @@ -4026,7 +4026,7 @@ for (stev, stebz, stegr, stein, elty) in Ref{$elty}, Ref{BlasInt}, Ref{BlasInt}, Ref{$elty}, Ptr{$elty}, Ptr{$elty}, Ptr{BlasInt}, Ptr{BlasInt}, Ptr{$elty}, Ptr{BlasInt}, Ptr{BlasInt}, Ptr{$elty}, - Ptr{BlasInt}, Ptr{BlasInt}, Clong, Clong), + Ptr{BlasInt}, Ref{BlasInt}, Clong, Clong), range, order, n, vl, vu, il, iu, abstol, dv, ev, m, nsplit, @@ -4220,7 +4220,7 @@ for (syconv, sysv, sytrf, sytri, sytrs, elty) in info = Ref{BlasInt}() ccall((@blasfunc($syconv), libblastrampoline), Cvoid, (Ref{UInt8}, Ref{UInt8}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, - Ptr{BlasInt}, Ptr{$elty}, Ptr{BlasInt}, Clong, Clong), + Ptr{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Clong, Clong), uplo, 'C', n, A, max(1,stride(A,2)), ipiv, work, info, 1, 1) chklapackerror(info[]) A, work @@ -4249,7 +4249,7 @@ for (syconv, sysv, sytrf, sytri, sytrs, elty) in for i = 1:2 # first call returns lwork as work[1] ccall((@blasfunc($sysv), libblastrampoline), Cvoid, (Ref{UInt8}, Ref{BlasInt}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ptr{BlasInt}, - Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ptr{BlasInt}, Clong), + Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ref{BlasInt}, Clong), uplo, n, size(B,2), A, max(1,stride(A,2)), ipiv, B, max(1,stride(B,2)), work, lwork, info, 1) chkargsok(info[]) @@ -4283,7 +4283,7 @@ for (syconv, sysv, sytrf, sytri, sytrs, elty) in for i = 1:2 # first call returns lwork as work[1] ccall((@blasfunc($sytrf), libblastrampoline), Cvoid, (Ref{UInt8}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, - Ptr{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ptr{BlasInt}, Clong), + Ptr{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ref{BlasInt}, Clong), uplo, n, A, stride(A,2), ipiv, work, lwork, info, 1) chkargsok(info[]) if i == 1 @@ -4319,7 +4319,7 @@ for (syconv, sysv, sytrf, sytri, sytrs, elty) in # for i in 1:2 # ccall((@blasfunc($sytri), libblastrampoline), Cvoid, # (Ptr{UInt8}, Ptr{BlasInt}, Ptr{$elty}, Ptr{BlasInt}, -# Ptr{BlasInt}, Ptr{$elty}, Ptr{BlasInt}, Ptr{BlasInt}, Clong), +# Ptr{BlasInt}, Ptr{$elty}, Ptr{BlasInt}, Ref{BlasInt}, Clong), # &uplo, &n, A, &max(1,stride(A,2)), ipiv, work, &lwork, info, 1) # @assertargsok # chknonsingular(info[]) @@ -4347,7 +4347,7 @@ for (syconv, sysv, sytrf, sytri, sytrs, elty) in info = Ref{BlasInt}() ccall((@blasfunc($sytri), libblastrampoline), Cvoid, (Ref{UInt8}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, - Ptr{BlasInt}, Ptr{$elty}, Ptr{BlasInt}, Clong), + Ptr{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Clong), uplo, n, A, max(1,stride(A,2)), ipiv, work, info, 1) chkargsok(info[]) chknonsingular(info[]) @@ -4374,7 +4374,7 @@ for (syconv, sysv, sytrf, sytri, sytrs, elty) in info = Ref{BlasInt}() ccall((@blasfunc($sytrs), libblastrampoline), Cvoid, (Ref{UInt8}, Ref{BlasInt}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, - Ptr{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ptr{BlasInt}, Clong), + Ptr{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ref{BlasInt}, Clong), uplo, n, size(B,2), A, max(1,stride(A,2)), ipiv, B, max(1,stride(B,2)), info, 1) chklapackerror(info[]) B @@ -4410,7 +4410,7 @@ for (sysv, sytrf, sytri, sytrs, syconvf, elty) in for i = 1:2 # first call returns lwork as work[1] ccall((@blasfunc($sysv), libblastrampoline), Cvoid, (Ref{UInt8}, Ref{BlasInt}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ptr{BlasInt}, - Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ptr{BlasInt}, Clong), + Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ref{BlasInt}, Clong), uplo, n, size(B,2), A, max(1,stride(A,2)), ipiv, B, max(1,stride(B,2)), work, lwork, info, 1) chkargsok(info[]) @@ -4445,7 +4445,7 @@ for (sysv, sytrf, sytri, sytrs, syconvf, elty) in for i = 1:2 # first call returns lwork as work[1] ccall((@blasfunc($sytrf), libblastrampoline), Cvoid, (Ref{UInt8}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, - Ptr{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ptr{BlasInt}, Clong), + Ptr{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ref{BlasInt}, Clong), uplo, n, A, stride(A,2), ipiv, work, lwork, info, 1) chkargsok(info[]) if i == 1 @@ -4472,7 +4472,7 @@ for (sysv, sytrf, sytri, sytrs, syconvf, elty) in info = Ref{BlasInt}() ccall((@blasfunc($sytri), libblastrampoline), Cvoid, (Ref{UInt8}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, - Ptr{BlasInt}, Ptr{$elty}, Ptr{BlasInt}, Clong), + Ptr{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Clong), uplo, n, A, max(1,stride(A,2)), ipiv, work, info, 1) chkargsok(info[]) chknonsingular(info[]) @@ -4499,7 +4499,7 @@ for (sysv, sytrf, sytri, sytrs, syconvf, elty) in info = Ref{BlasInt}() ccall((@blasfunc($sytrs), libblastrampoline), Cvoid, (Ref{UInt8}, Ref{BlasInt}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, - Ptr{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ptr{BlasInt}, Clong), + Ptr{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ref{BlasInt}, Clong), uplo, n, size(B,2), A, max(1,stride(A,2)), ipiv, B, max(1,stride(B,2)), info, 1) chklapackerror(info[]) B @@ -4575,7 +4575,7 @@ for (syconv, hesv, hetrf, hetri, hetrs, elty, relty) in info = Ref{BlasInt}() ccall((@blasfunc($syconv), libblastrampoline), Cvoid, (Ref{UInt8}, Ref{UInt8}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, - Ptr{BlasInt}, Ptr{$elty}, Ptr{BlasInt}, Clong, Clong), + Ptr{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Clong, Clong), uplo, 'C', n, A, max(1,stride(A,2)), ipiv, work, info, 1, 1) chklapackerror(info[]) A, work @@ -4604,7 +4604,7 @@ for (syconv, hesv, hetrf, hetri, hetrs, elty, relty) in for i = 1:2 # first call returns lwork as work[1] ccall((@blasfunc($hesv), libblastrampoline), Cvoid, (Ref{UInt8}, Ref{BlasInt}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ptr{BlasInt}, - Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ptr{BlasInt}, Clong), + Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ref{BlasInt}, Clong), uplo, n, size(B,2), A, max(1,stride(A,2)), ipiv, B, max(1,stride(B,2)), work, lwork, info, 1) chklapackerror(info[]) @@ -4635,7 +4635,7 @@ for (syconv, hesv, hetrf, hetri, hetrs, elty, relty) in for i in 1:2 # first call returns lwork as work[1] ccall((@blasfunc($hetrf), libblastrampoline), Cvoid, (Ref{UInt8}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, - Ptr{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ptr{BlasInt}, Clong), + Ptr{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ref{BlasInt}, Clong), uplo, n, A, max(1,stride(A,2)), ipiv, work, lwork, info, 1) chkargsok(info[]) if i == 1 @@ -4672,7 +4672,7 @@ for (syconv, hesv, hetrf, hetri, hetrs, elty, relty) in # for i in 1:2 # ccall((@blasfunc($hetri), libblastrampoline), Cvoid, # (Ptr{UInt8}, Ptr{BlasInt}, Ptr{$elty}, Ptr{BlasInt}, -# Ptr{BlasInt}, Ptr{$elty}, Ptr{BlasInt}, Ptr{BlasInt}, Clong), +# Ptr{BlasInt}, Ptr{$elty}, Ptr{BlasInt}, Ref{BlasInt}, Clong), # &uplo, &n, A, &max(1,stride(A,2)), ipiv, work, &lwork, info, 1) # chklapackerror(info[]) # if lwork < 0 @@ -4701,7 +4701,7 @@ for (syconv, hesv, hetrf, hetri, hetrs, elty, relty) in info = Ref{BlasInt}() ccall((@blasfunc($hetri), libblastrampoline), Cvoid, (Ref{UInt8}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, - Ptr{BlasInt}, Ptr{$elty}, Ptr{BlasInt}, Clong), + Ptr{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Clong), uplo, n, A, max(1,stride(A,2)), ipiv, work, info, 1) chklapackerror(info[]) A @@ -4727,7 +4727,7 @@ for (syconv, hesv, hetrf, hetri, hetrs, elty, relty) in info = Ref{BlasInt}() ccall((@blasfunc($hetrs), libblastrampoline), Cvoid, (Ref{UInt8}, Ref{BlasInt}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, - Ptr{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ptr{BlasInt}, Clong), + Ptr{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ref{BlasInt}, Clong), uplo, n, size(B,2), A, max(1,stride(A,2)), ipiv, B, max(1,stride(B,2)), info, 1) chklapackerror(info[]) B @@ -4762,7 +4762,7 @@ for (hesv, hetrf, hetri, hetrs, elty, relty) in for i = 1:2 # first call returns lwork as work[1] ccall((@blasfunc($hesv), libblastrampoline), Cvoid, (Ref{UInt8}, Ref{BlasInt}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ptr{BlasInt}, - Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ptr{BlasInt}, Clong), + Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ref{BlasInt}, Clong), uplo, n, size(B,2), A, max(1,stride(A,2)), ipiv, B, max(1,stride(B,2)), work, lwork, info, 1) chklapackerror(info[]) @@ -4794,7 +4794,7 @@ for (hesv, hetrf, hetri, hetrs, elty, relty) in for i in 1:2 # first call returns lwork as work[1] ccall((@blasfunc($hetrf), libblastrampoline), Cvoid, (Ref{UInt8}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, - Ptr{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ptr{BlasInt}, Clong), + Ptr{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ref{BlasInt}, Clong), uplo, n, A, max(1,stride(A,2)), ipiv, work, lwork, info, 1) chkargsok(info[]) if i == 1 @@ -4822,7 +4822,7 @@ for (hesv, hetrf, hetri, hetrs, elty, relty) in info = Ref{BlasInt}() ccall((@blasfunc($hetri), libblastrampoline), Cvoid, (Ref{UInt8}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, - Ptr{BlasInt}, Ptr{$elty}, Ptr{BlasInt}, Clong), + Ptr{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Clong), uplo, n, A, max(1,stride(A,2)), ipiv, work, info, 1) chklapackerror(info[]) A @@ -4848,7 +4848,7 @@ for (hesv, hetrf, hetri, hetrs, elty, relty) in info = Ref{BlasInt}() ccall((@blasfunc($hetrs), libblastrampoline), Cvoid, (Ref{UInt8}, Ref{BlasInt}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, - Ptr{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ptr{BlasInt}, Clong), + Ptr{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ref{BlasInt}, Clong), uplo, n, size(B,2), A, max(1,stride(A,2)), ipiv, B, max(1,stride(B,2)), info, 1) chklapackerror(info[]) B @@ -4884,7 +4884,7 @@ for (sysv, sytrf, sytri, sytrs, elty, relty) in for i = 1:2 # first call returns lwork as work[1] ccall((@blasfunc($sysv), libblastrampoline), Cvoid, (Ref{UInt8}, Ref{BlasInt}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ptr{BlasInt}, - Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ptr{BlasInt}, Clong), + Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ref{BlasInt}, Clong), uplo, n, size(B,2), A, max(1,stride(A,2)), ipiv, B, max(1,stride(B,2)), work, lwork, info, 1) chkargsok(info[]) @@ -4919,7 +4919,7 @@ for (sysv, sytrf, sytri, sytrs, elty, relty) in for i = 1:2 # first call returns lwork as work[1] ccall((@blasfunc($sytrf), libblastrampoline), Cvoid, (Ref{UInt8}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, - Ptr{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ptr{BlasInt}, Clong), + Ptr{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ref{BlasInt}, Clong), uplo, n, A, max(1,stride(A,2)), ipiv, work, lwork, info, 1) chkargsok(info[]) if i == 1 @@ -4956,7 +4956,7 @@ for (sysv, sytrf, sytri, sytrs, elty, relty) in # for i in 1:2 # ccall((@blasfunc($sytri), libblastrampoline), Cvoid, # (Ptr{UInt8}, Ptr{BlasInt}, Ptr{$elty}, Ptr{BlasInt}, -# Ptr{BlasInt}, Ptr{$elty}, Ptr{BlasInt}, Ptr{BlasInt}, Clong), +# Ptr{BlasInt}, Ptr{$elty}, Ptr{BlasInt}, Ref{BlasInt}, Clong), # &uplo, &n, A, &max(1,stride(A,2)), ipiv, work, &lwork, info, 1) # chklapackerror(info[]) # if lwork < 0 @@ -4984,7 +4984,7 @@ for (sysv, sytrf, sytri, sytrs, elty, relty) in info = Ref{BlasInt}() ccall((@blasfunc($sytri), libblastrampoline), Cvoid, (Ref{UInt8}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, - Ptr{BlasInt}, Ptr{$elty}, Ptr{BlasInt}, Clong), + Ptr{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Clong), uplo, n, A, max(1,stride(A,2)), ipiv, work, info, 1) chklapackerror(info[]) A @@ -5010,7 +5010,7 @@ for (sysv, sytrf, sytri, sytrs, elty, relty) in info = Ref{BlasInt}() ccall((@blasfunc($sytrs), libblastrampoline), Cvoid, (Ref{UInt8}, Ref{BlasInt}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, - Ptr{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ptr{BlasInt}, Clong), + Ptr{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ref{BlasInt}, Clong), uplo, n, size(B,2), A, max(1,stride(A,2)), ipiv, B, max(1,stride(B,2)), info, 1) chklapackerror(info[]) B @@ -5046,7 +5046,7 @@ for (sysv, sytrf, sytri, sytrs, syconvf, elty, relty) in for i = 1:2 # first call returns lwork as work[1] ccall((@blasfunc($sysv), libblastrampoline), Cvoid, (Ref{UInt8}, Ref{BlasInt}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ptr{BlasInt}, - Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ptr{BlasInt}, Clong), + Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ref{BlasInt}, Clong), uplo, n, size(B,2), A, max(1,stride(A,2)), ipiv, B, max(1,stride(B,2)), work, lwork, info, 1) chkargsok(info[]) @@ -5082,7 +5082,7 @@ for (sysv, sytrf, sytri, sytrs, syconvf, elty, relty) in for i = 1:2 # first call returns lwork as work[1] ccall((@blasfunc($sytrf), libblastrampoline), Cvoid, (Ref{UInt8}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, - Ptr{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ptr{BlasInt}, Clong), + Ptr{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ref{BlasInt}, Clong), uplo, n, A, max(1,stride(A,2)), ipiv, work, lwork, info, 1) chkargsok(info[]) if i == 1 @@ -5110,7 +5110,7 @@ for (sysv, sytrf, sytri, sytrs, syconvf, elty, relty) in info = Ref{BlasInt}() ccall((@blasfunc($sytri), libblastrampoline), Cvoid, (Ref{UInt8}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, - Ptr{BlasInt}, Ptr{$elty}, Ptr{BlasInt}, Clong), + Ptr{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Clong), uplo, n, A, max(1,stride(A,2)), ipiv, work, info, 1) chklapackerror(info[]) A @@ -5136,7 +5136,7 @@ for (sysv, sytrf, sytri, sytrs, syconvf, elty, relty) in info = Ref{BlasInt}() ccall((@blasfunc($sytrs), libblastrampoline), Cvoid, (Ref{UInt8}, Ref{BlasInt}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, - Ptr{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ptr{BlasInt}, Clong), + Ptr{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ref{BlasInt}, Clong), uplo, n, size(B,2), A, max(1,stride(A,2)), ipiv, B, max(1,stride(B,2)), info, 1) chklapackerror(info[]) B @@ -5350,7 +5350,7 @@ for (syev, syevr, syevd, sygvd, elty) in for i = 1:2 # first call returns lwork as work[1] ccall((@blasfunc($syev), libblastrampoline), Cvoid, (Ref{UInt8}, Ref{UInt8}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, - Ptr{$elty}, Ptr{$elty}, Ref{BlasInt}, Ptr{BlasInt}, Clong, Clong), + Ptr{$elty}, Ptr{$elty}, Ref{BlasInt}, Ref{BlasInt}, Clong, Clong), jobz, uplo, n, A, max(1,stride(A,2)), W, work, lwork, info, 1, 1) chklapackerror(info[]) if i == 1 @@ -5408,7 +5408,7 @@ for (syev, syevr, syevd, sygvd, elty) in Ref{BlasInt}, Ref{BlasInt}, Ref{$elty}, Ptr{BlasInt}, Ptr{$elty}, Ptr{$elty}, Ref{BlasInt}, Ptr{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ptr{BlasInt}, Ref{BlasInt}, - Ptr{BlasInt}, Clong, Clong, Clong), + Ref{BlasInt}, Clong, Clong, Clong), jobz, range, uplo, n, A, max(1,lda), vl, vu, il, iu, abstol, m, @@ -5455,7 +5455,7 @@ for (syev, syevr, syevd, sygvd, elty) in ccall((@blasfunc($syevd), libblastrampoline), Cvoid, (Ref{UInt8}, Ref{UInt8}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, Ptr{$elty}, Ref{BlasInt}, Ptr{BlasInt}, Ref{BlasInt}, - Ptr{BlasInt}, Clong, Clong), + Ref{BlasInt}, Clong, Clong), jobz, uplo, n, A, max(1,lda), W, work, lwork, iwork, liwork, info, 1, 1) @@ -5503,7 +5503,7 @@ for (syev, syevr, syevd, sygvd, elty) in (Ref{BlasInt}, Ref{UInt8}, Ref{UInt8}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, Ptr{$elty}, Ref{BlasInt}, Ptr{BlasInt}, - Ref{BlasInt}, Ptr{BlasInt}, Clong, Clong), + Ref{BlasInt}, Ref{BlasInt}, Clong, Clong), itype, jobz, uplo, n, A, lda, B, ldb, w, work, lwork, iwork, @@ -5667,7 +5667,7 @@ for (syev, syevr, syevd, sygvd, elty, relty) in ccall((@blasfunc($syevd), liblapack), Cvoid, (Ref{UInt8}, Ref{UInt8}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ptr{$relty}, Ptr{$elty}, Ref{BlasInt}, Ptr{$relty}, Ref{BlasInt}, - Ptr{BlasInt}, Ref{BlasInt}, Ptr{BlasInt}, Clong, Clong), + Ptr{BlasInt}, Ref{BlasInt}, Ref{BlasInt}, Clong, Clong), jobz, uplo, n, A, stride(A,2), W, work, lwork, rwork, lrwork, iwork, liwork, info, 1, 1) @@ -5830,7 +5830,7 @@ for (bdsqr, relty, elty) in (Ref{UInt8}, Ref{BlasInt}, Ref{BlasInt}, Ref{BlasInt}, Ref{BlasInt}, Ptr{$relty}, Ptr{$relty}, Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, - Ref{BlasInt}, Ptr{$relty}, Ptr{BlasInt}, Clong), + Ref{BlasInt}, Ptr{$relty}, Ref{BlasInt}, Clong), uplo, n, ncvt, nru, ncc, d, e_, Vt, ldvt, U, ldu, C, @@ -5954,7 +5954,7 @@ for (gecon, elty) in ccall((@blasfunc($gecon), libblastrampoline), Cvoid, (Ref{UInt8}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ref{$elty}, Ref{$elty}, Ptr{$elty}, Ptr{BlasInt}, - Ptr{BlasInt}, Clong), + Ref{BlasInt}, Clong), normtype, n, A, lda, anorm, rcond, work, iwork, info, 1) chklapackerror(info[]) @@ -5990,7 +5990,7 @@ for (gecon, elty, relty) in ccall((@blasfunc($gecon), libblastrampoline), Cvoid, (Ref{UInt8}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ref{$relty}, Ref{$relty}, Ptr{$elty}, Ptr{$relty}, - Ptr{BlasInt}, Clong), + Ref{BlasInt}, Clong), normtype, n, A, lda, anorm, rcond, work, rwork, info, 1) chklapackerror(info[]) @@ -6145,7 +6145,7 @@ for (ormhr, elty) in (Ref{UInt8}, Ref{UInt8}, Ref{BlasInt}, Ref{BlasInt}, Ref{BlasInt}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, - Ref{BlasInt}, Ptr{BlasInt}, Clong, Clong), + Ref{BlasInt}, Ref{BlasInt}, Clong, Clong), side, trans, mC, nC, ilo, ihi, A, max(1, stride(A, 2)), tau, C, max(1, stride(C, 2)), work, @@ -6292,7 +6292,7 @@ for (hetrd, elty) in ccall((@blasfunc($hetrd), libblastrampoline), Cvoid, (Ref{UInt8}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ptr{$relty}, Ptr{$relty}, - Ptr{$elty}, Ptr{$elty}, Ref{BlasInt}, Ptr{BlasInt}, Clong), + Ptr{$elty}, Ptr{$elty}, Ref{BlasInt}, Ref{BlasInt}, Clong), uplo, n, A, max(1, stride(A, 2)), d, e, tau, work, lwork, info, 1) chklapackerror(info[]) if i == 1 @@ -6342,7 +6342,7 @@ for (orgtr, elty) in ccall((@blasfunc($orgtr), libblastrampoline), Cvoid, (Ref{UInt8}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, Ptr{$elty}, Ref{BlasInt}, - Ptr{BlasInt}, Clong), + Ref{BlasInt}, Clong), uplo, n, A, max(1, stride(A, 2)), tau, work, lwork, info, 1) @@ -6403,7 +6403,7 @@ for (ormtr, elty) in (Ref{UInt8}, Ref{UInt8}, Ref{UInt8}, Ref{BlasInt}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, - Ref{BlasInt}, Ptr{BlasInt}, Clong, Clong, Clong), + Ref{BlasInt}, Ref{BlasInt}, Clong, Clong, Clong), side, uplo, trans, mC, nC, A, max(1, stride(A, 2)), tau, C, max(1, stride(C, 2)), work, @@ -6781,7 +6781,7 @@ for (trexc, trsen, tgsen, elty) in (Ref{UInt8}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ref{BlasInt}, Ref{BlasInt}, - Ptr{$elty}, Ptr{BlasInt}, Clong), + Ptr{$elty}, Ref{BlasInt}, Clong), compq, n, T, ldt, Q, ldq, ifst, ilst, @@ -6827,7 +6827,7 @@ for (trexc, trsen, tgsen, elty) in Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, Ptr{$elty}, Ref{BlasInt}, Ref{$elty}, Ref{$elty}, Ptr{$elty}, Ref{BlasInt}, Ptr{BlasInt}, Ref{BlasInt}, - Ptr{BlasInt}, Clong, Clong), + Ref{BlasInt}, Clong, Clong), job, compq, select, n, T, ldt, Q, ldq, wr, wi, m, s, sep, @@ -6938,7 +6938,7 @@ for (trexc, trsen, tgsen, elty, relty) in (Ref{UInt8}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ref{BlasInt}, Ref{BlasInt}, - Ptr{BlasInt}, Clong), + Ref{BlasInt}, Clong), compq, n, T, ldt, Q, ldq, ifst, ilst, @@ -6980,7 +6980,7 @@ for (trexc, trsen, tgsen, elty, relty) in Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ref{$relty}, Ref{$relty}, Ptr{$elty}, Ref{BlasInt}, - Ptr{BlasInt}, Clong, Clong), + Ref{BlasInt}, Clong, Clong), job, compq, select, n, T, ldt, Q, ldq, w, m, s, sep, @@ -7129,7 +7129,7 @@ for (fn, elty, relty) in ((:dtrsyl_, :Float64, :Float64), ccall((@blasfunc($fn), libblastrampoline), Cvoid, (Ref{UInt8}, Ref{UInt8}, Ref{BlasInt}, Ref{BlasInt}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, - Ptr{$relty}, Ptr{BlasInt}, Clong, Clong), + Ptr{$relty}, Ref{BlasInt}, Clong, Clong), transa, transb, isgn, m, n, A, lda, B, ldb, C, ldc, scale, info, 1, 1) From 0cc551870573a45d24fd36b908d64de8741deb65 Mon Sep 17 00:00:00 2001 From: Jishnu Bhattacharya Date: Sun, 10 Nov 2024 08:15:23 +0530 Subject: [PATCH 414/537] Scaling loop instead of broadcasting in strided matrix exp (#56463) Firstly, this is easier to read. Secondly, this merges the two loops into one. Thirdly, this avoids the broadcasting latency. ```julia julia> using LinearAlgebra julia> A = rand(2,2); julia> @time LinearAlgebra.exp!(A); 0.952597 seconds (2.35 M allocations: 116.574 MiB, 2.67% gc time, 99.01% compilation time) # master 0.877404 seconds (2.17 M allocations: 106.293 MiB, 2.65% gc time, 99.99% compilation time) # this PR ``` The performance also improves as there are fewer allocations in the first branch (`opnorm(A, 1) <= 2.1`): ```julia julia> B = diagm(0=>im.*(float.(1:200))./200, 1=>(1:199)./400, -1=>(1:199)./400); julia> opnorm(B,1) 1.9875 julia> @btime exp($B); 5.066 ms (30 allocations: 4.89 MiB) # nightly v"1.12.0-DEV.1581" 4.926 ms (27 allocations: 4.28 MiB) # this PR ``` --- stdlib/LinearAlgebra/src/dense.jl | 33 ++++++++++++++++++++----------- 1 file changed, 22 insertions(+), 11 deletions(-) diff --git a/stdlib/LinearAlgebra/src/dense.jl b/stdlib/LinearAlgebra/src/dense.jl index 0a5f97889196c..2711bba5cd3ac 100644 --- a/stdlib/LinearAlgebra/src/dense.jl +++ b/stdlib/LinearAlgebra/src/dense.jl @@ -707,25 +707,32 @@ function exp!(A::StridedMatrix{T}) where T<:BlasFloat # Compute U and V: Even/odd terms in Padé numerator & denom # Expansion of k=1 in for loop P = A2 - U = mul!(C[4]*P, true, C[2]*I, true, true) #U = C[2]*I + C[4]*P - V = mul!(C[3]*P, true, C[1]*I, true, true) #V = C[1]*I + C[3]*P + U = similar(P) + V = similar(P) + for ind in CartesianIndices(P) + U[ind] = C[4]*P[ind] + C[2]*I[ind] + V[ind] = C[3]*P[ind] + C[1]*I[ind] + end for k in 2:(div(length(C), 2) - 1) P *= A2 - for ind in eachindex(P) + for ind in eachindex(P, U, V) U[ind] += C[2k + 2] * P[ind] V[ind] += C[2k + 1] * P[ind] end end - U = A * U + # U = A * U, but we overwrite P to avoid an allocation + mul!(P, A, U) + # P may be seen as an alias for U in the following code # Padé approximant: (V-U)\(V+U) - tmp1, tmp2 = A, A2 # Reuse already allocated arrays - for ind in eachindex(tmp1) - tmp1[ind] = V[ind] - U[ind] - tmp2[ind] = V[ind] + U[ind] + VminU, VplusU = V, U # Reuse already allocated arrays + for ind in eachindex(V, U) + vi, ui = V[ind], P[ind] + VminU[ind] = vi - ui + VplusU[ind] = vi + ui end - X = LAPACK.gesv!(tmp1, tmp2)[1] + X = LAPACK.gesv!(VminU, VplusU)[1] else s = log2(nA/5.4) # power of 2 later reversed by squaring if s > 0 @@ -793,10 +800,14 @@ function exp!(A::StridedMatrix{T}) where T<:BlasFloat end if ilo > 1 # apply lower permutations in reverse order - for j in (ilo-1):-1:1; rcswap!(j, Int(scale[j]), X) end + for j in (ilo-1):-1:1 + rcswap!(j, Int(scale[j]), X) + end end if ihi < n # apply upper permutations in forward order - for j in (ihi+1):n; rcswap!(j, Int(scale[j]), X) end + for j in (ihi+1):n + rcswap!(j, Int(scale[j]), X) + end end X end From 4feca1fba117cfcaf38a772a8f840fc6136b7e22 Mon Sep 17 00:00:00 2001 From: Keno Fischer Date: Sat, 9 Nov 2024 23:38:22 -0500 Subject: [PATCH 415/537] codegen: Respect binding partition (#56494) Minor changes to make codegen correct in the face of partitioned constant bindings. Does not yet handle the envisioned semantics for globals that change restriction type, which will require a fair bit of additional work. --- src/codegen.cpp | 42 +++++++++++++++++++++++++++--------------- src/julia_internal.h | 15 +++++++++++++++ src/module.c | 11 +++++++++++ 3 files changed, 53 insertions(+), 15 deletions(-) diff --git a/src/codegen.cpp b/src/codegen.cpp index e2bc8fe6e43d1..8662016fd069f 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -3435,26 +3435,33 @@ static jl_value_t *jl_ensure_rooted(jl_codectx_t &ctx, jl_value_t *val) // --- generating function calls --- +static jl_cgval_t emit_globalref_runtime(jl_codectx_t &ctx, jl_binding_t *bnd, jl_module_t *mod, jl_sym_t *name) +{ + Value *bp = julia_binding_gv(ctx, bnd); + Value *v = ctx.builder.CreateCall(prepare_call(jlgetbindingvalue_func), { bp }); + undef_var_error_ifnot(ctx, ctx.builder.CreateIsNotNull(v), name, (jl_value_t*)mod); + return mark_julia_type(ctx, v, true, jl_any_type); +} + static jl_cgval_t emit_globalref(jl_codectx_t &ctx, jl_module_t *mod, jl_sym_t *name, AtomicOrdering order) { jl_binding_t *bnd = jl_get_module_binding(mod, name, 1); - jl_binding_partition_t *bpart = jl_get_binding_partition(bnd, ctx.max_world); + assert(bnd); + jl_binding_partition_t *bpart = jl_get_binding_partition_all(bnd, ctx.min_world, ctx.max_world); + if (!bpart) { + return emit_globalref_runtime(ctx, bnd, mod, name); + } jl_ptr_kind_union_t pku = jl_atomic_load_relaxed(&bpart->restriction); if (jl_bkind_is_some_guard(decode_restriction_kind(pku))) { // try to look this up now. // TODO: This is bad and we'd like to delete it. jl_get_binding(mod, name); } - assert(bnd); - Value *bp = NULL; // bpart was updated in place - this will change with full partition pku = jl_atomic_load_acquire(&bpart->restriction); if (jl_bkind_is_some_guard(decode_restriction_kind(pku))) { // Redo the lookup at runtime - bp = julia_binding_gv(ctx, bnd); - Value *v = ctx.builder.CreateCall(prepare_call(jlgetbindingvalue_func), { bp }); - undef_var_error_ifnot(ctx, ctx.builder.CreateIsNotNull(v), name, (jl_value_t*)mod); - return mark_julia_type(ctx, v, true, jl_any_type); + return emit_globalref_runtime(ctx, bnd, mod, name); } else { while (true) { if (!bpart) @@ -3465,7 +3472,9 @@ static jl_cgval_t emit_globalref(jl_codectx_t &ctx, jl_module_t *mod, jl_sym_t * cg_bdw(ctx, name, bnd); } bnd = (jl_binding_t*)decode_restriction_value(pku); - bpart = jl_get_binding_partition(bnd, ctx.max_world); + bpart = jl_get_binding_partition_all(bnd, ctx.min_world, ctx.max_world); + if (!bpart) + break; pku = jl_atomic_load_acquire(&bpart->restriction); } if (bpart && jl_bkind_is_some_constant(decode_restriction_kind(pku))) { @@ -3477,7 +3486,10 @@ static jl_cgval_t emit_globalref(jl_codectx_t &ctx, jl_module_t *mod, jl_sym_t * return mark_julia_const(ctx, constval); } } - bp = julia_binding_gv(ctx, bnd); + if (!bpart) { + return emit_globalref_runtime(ctx, bnd, mod, name); + } + Value *bp = julia_binding_gv(ctx, bnd); if (bnd->deprecated) { cg_bdw(ctx, name, bnd); } @@ -3496,7 +3508,7 @@ static jl_cgval_t emit_globalop(jl_codectx_t &ctx, jl_module_t *mod, jl_sym_t *s { jl_binding_t *bnd = NULL; Value *bp = global_binding_pointer(ctx, mod, sym, &bnd, true, alloc); - jl_binding_partition_t *bpart = jl_get_binding_partition(bnd, ctx.max_world); + jl_binding_partition_t *bpart = jl_get_binding_partition_all(bnd, ctx.min_world, ctx.max_world); if (bp == NULL) return jl_cgval_t(); if (bpart) { @@ -5854,7 +5866,7 @@ static Value *global_binding_pointer(jl_codectx_t &ctx, jl_module_t *m, jl_sym_t jl_binding_t **pbnd, bool assign, bool alloc) { jl_binding_t *b = jl_get_module_binding(m, s, 1); - jl_binding_partition_t *bpart = jl_get_binding_partition(b, ctx.max_world); + jl_binding_partition_t *bpart = jl_get_binding_partition_all(b, ctx.min_world, ctx.max_world); jl_ptr_kind_union_t pku = jl_atomic_load_relaxed(&bpart->restriction); if (assign) { if (jl_bkind_is_some_guard(decode_restriction_kind(pku))) @@ -5865,11 +5877,11 @@ static Value *global_binding_pointer(jl_codectx_t &ctx, jl_module_t *m, jl_sym_t if (jl_bkind_is_some_guard(decode_restriction_kind(pku))) { // try to look this up now b = jl_get_binding(m, s); - bpart = jl_get_binding_partition(b, ctx.max_world); + bpart = jl_get_binding_partition_all(b, ctx.min_world, ctx.max_world); } - pku = jl_walk_binding_inplace(&b, &bpart, ctx.max_world); + pku = jl_walk_binding_inplace_all(&b, &bpart, ctx.min_world, ctx.max_world); } - if (b == NULL) { + if (!b || !bpart) { // var not found. switch to delayed lookup. Constant *initnul = Constant::getNullValue(ctx.types().T_pjlvalue); GlobalVariable *bindinggv = new GlobalVariable(*ctx.f->getParent(), ctx.types().T_pjlvalue, @@ -6021,7 +6033,7 @@ static jl_cgval_t emit_isdefined(jl_codectx_t &ctx, jl_value_t *sym, int allow_i name = (jl_sym_t*)sym; } jl_binding_t *bnd = allow_import ? jl_get_binding(modu, name) : jl_get_module_binding(modu, name, 0); - jl_binding_partition_t *bpart = jl_get_binding_partition(bnd, ctx.min_world); + jl_binding_partition_t *bpart = jl_get_binding_partition_all(bnd, ctx.min_world, ctx.max_world); jl_ptr_kind_union_t pku = bpart ? jl_atomic_load_relaxed(&bpart->restriction) : encode_restriction(NULL, BINDING_KIND_GUARD); if (decode_restriction_kind(pku) == BINDING_KIND_GLOBAL || jl_bkind_is_some_constant(decode_restriction_kind(pku))) { if (jl_get_binding_value_if_const(bnd)) diff --git a/src/julia_internal.h b/src/julia_internal.h index 5eb99be9e333f..776fea3b1dbf1 100644 --- a/src/julia_internal.h +++ b/src/julia_internal.h @@ -956,12 +956,14 @@ STATIC_INLINE int jl_bkind_is_some_guard(enum jl_partition_kind kind) JL_NOTSAFE } JL_DLLEXPORT jl_binding_partition_t *jl_get_binding_partition(jl_binding_t *b JL_PROPAGATES_ROOT, size_t world); +JL_DLLEXPORT jl_binding_partition_t *jl_get_binding_partition_all(jl_binding_t *b JL_PROPAGATES_ROOT, size_t min_world, size_t max_world); EXTERN_INLINE_DECLARE uint8_t jl_bpart_get_kind(jl_binding_partition_t *bpart) JL_NOTSAFEPOINT { return decode_restriction_kind(jl_atomic_load_relaxed(&bpart->restriction)); } STATIC_INLINE jl_ptr_kind_union_t jl_walk_binding_inplace(jl_binding_t **bnd, jl_binding_partition_t **bpart, size_t world) JL_NOTSAFEPOINT; +STATIC_INLINE jl_ptr_kind_union_t jl_walk_binding_inplace_all(jl_binding_t **bnd, jl_binding_partition_t **bpart, size_t min_world, size_t max_world) JL_NOTSAFEPOINT; #ifndef __clang_analyzer__ STATIC_INLINE jl_ptr_kind_union_t jl_walk_binding_inplace(jl_binding_t **bnd, jl_binding_partition_t **bpart, size_t world) JL_NOTSAFEPOINT @@ -976,6 +978,19 @@ STATIC_INLINE jl_ptr_kind_union_t jl_walk_binding_inplace(jl_binding_t **bnd, jl *bpart = jl_get_binding_partition(*bnd, world); } } + +STATIC_INLINE jl_ptr_kind_union_t jl_walk_binding_inplace_all(jl_binding_t **bnd, jl_binding_partition_t **bpart, size_t min_world, size_t max_world) JL_NOTSAFEPOINT +{ + while (1) { + if (!*bpart) + return encode_restriction(NULL, BINDING_KIND_GUARD); + jl_ptr_kind_union_t pku = jl_atomic_load_acquire(&(*bpart)->restriction); + if (!jl_bkind_is_some_import(decode_restriction_kind(pku))) + return pku; + *bnd = (jl_binding_t*)decode_restriction_value(pku); + *bpart = jl_get_binding_partition_all(*bnd, min_world, max_world); + } +} #endif STATIC_INLINE int is10digit(char c) JL_NOTSAFEPOINT diff --git a/src/module.c b/src/module.c index 85813af6adc6f..38f4b980a72fd 100644 --- a/src/module.c +++ b/src/module.c @@ -60,6 +60,17 @@ jl_binding_partition_t *jl_get_binding_partition(jl_binding_t *b, size_t world) } } +jl_binding_partition_t *jl_get_binding_partition_all(jl_binding_t *b, size_t min_world, size_t max_world) { + if (!b) + return NULL; + jl_binding_partition_t *bpart = jl_get_binding_partition(b, min_world); + if (!bpart) + return NULL; + if (jl_atomic_load_relaxed(&bpart->max_world) < max_world) + return NULL; + return bpart; +} + JL_DLLEXPORT jl_module_t *jl_new_module_(jl_sym_t *name, jl_module_t *parent, uint8_t default_names) { jl_task_t *ct = jl_current_task; From 88201cf8025de81106510044237ab5870e86926c Mon Sep 17 00:00:00 2001 From: Ian Butterworth Date: Sun, 10 Nov 2024 00:31:40 -0500 Subject: [PATCH 416/537] Profile: fix Compiler short path (#56515) --- stdlib/Profile/src/Profile.jl | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/stdlib/Profile/src/Profile.jl b/stdlib/Profile/src/Profile.jl index 694d1292b02ab..895c67557984a 100644 --- a/stdlib/Profile/src/Profile.jl +++ b/stdlib/Profile/src/Profile.jl @@ -529,9 +529,11 @@ function flatten(data::Vector, lidict::LineInfoDict) end const SRC_DIR = normpath(joinpath(Sys.BUILD_ROOT_PATH, "src")) +const COMPILER_DIR = "././../usr/share/julia/Compiler/" # Take a file-system path and try to form a concise representation of it # based on the package ecosystem +# filenamecache is a dict of spath -> (fullpath or "" if !isfile, modulename, shortpath) function short_path(spath::Symbol, filenamecache::Dict{Symbol, Tuple{String,String,String}}) return get!(filenamecache, spath) do path = Base.fixup_stdlib_path(string(spath)) @@ -544,6 +546,10 @@ function short_path(spath::Symbol, filenamecache::Dict{Symbol, Tuple{String,Stri elseif startswith(path_norm, lib_dir) remainder = only(split(path_norm, lib_dir, keepempty=false)) return (isfile(path_norm) ? path_norm : ""), "@julialib", remainder + elseif startswith(path, COMPILER_DIR) + remainder = only(split(path, COMPILER_DIR, keepempty=false)) + possible_compiler_path = normpath(joinpath(Sys.BINDIR, Base.DATAROOTDIR, "julia", "Compiler", remainder)) + return (isfile(possible_compiler_path) ? possible_compiler_path : ""), "@Compiler", remainder elseif isabspath(path) if ispath(path) # try to replace the file-system prefix with a short "@Module" one, From b6a2cc1a8e759f3fc8a7222bdd28351416a4805d Mon Sep 17 00:00:00 2001 From: Jishnu Bhattacharya Date: Sun, 10 Nov 2024 17:09:56 +0530 Subject: [PATCH 417/537] Check `isdiag` in dense trig functions (#56483) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This improves performance for dense diagonal matrices, as we may apply the function only to the diagonal elements. ```julia julia> A = diagm(0=>rand(100)); julia> @btime cos($A); 349.211 μs (22 allocations: 401.58 KiB) # nightly v"1.12.0-DEV.1571" 16.215 μs (7 allocations: 80.02 KiB) # this PR ``` --------- Co-authored-by: Daniel Karrasch --- stdlib/LinearAlgebra/src/dense.jl | 72 +++++++++++++++++++++++------- stdlib/LinearAlgebra/test/dense.jl | 37 +++++++-------- 2 files changed, 75 insertions(+), 34 deletions(-) diff --git a/stdlib/LinearAlgebra/src/dense.jl b/stdlib/LinearAlgebra/src/dense.jl index 2711bba5cd3ac..d975df1cc0fb7 100644 --- a/stdlib/LinearAlgebra/src/dense.jl +++ b/stdlib/LinearAlgebra/src/dense.jl @@ -683,7 +683,12 @@ Base.:^(::Irrational{:ℯ}, A::AbstractMatrix) = exp(A) ## "Functions of Matrices: Theory and Computation", SIAM function exp!(A::StridedMatrix{T}) where T<:BlasFloat n = checksquare(A) - if ishermitian(A) + if isdiag(A) + for i in diagind(A, IndexStyle(A)) + A[i] = exp(A[i]) + end + return A + elseif ishermitian(A) return copytri!(parent(exp(Hermitian(A))), 'U', true) end ilo, ihi, scale = LAPACK.gebal!('B', A) # modifies A @@ -1014,9 +1019,16 @@ end cbrt(A::AdjointAbsMat) = adjoint(cbrt(parent(A))) cbrt(A::TransposeAbsMat) = transpose(cbrt(parent(A))) +function applydiagonal(f, A) + dinv = f(Diagonal(A)) + copyto!(similar(A, eltype(dinv)), dinv) +end + function inv(A::StridedMatrix{T}) where T checksquare(A) - if istriu(A) + if isdiag(A) + Ai = applydiagonal(inv, A) + elseif istriu(A) Ai = triu!(parent(inv(UpperTriangular(A)))) elseif istril(A) Ai = tril!(parent(inv(LowerTriangular(A)))) @@ -1044,14 +1056,18 @@ julia> cos(fill(1.0, (2,2))) ``` """ function cos(A::AbstractMatrix{<:Real}) - if issymmetric(A) + if isdiag(A) + return applydiagonal(cos, A) + elseif issymmetric(A) return copytri!(parent(cos(Symmetric(A))), 'U') end T = complex(float(eltype(A))) return real(exp!(T.(im .* A))) end function cos(A::AbstractMatrix{<:Complex}) - if ishermitian(A) + if isdiag(A) + return applydiagonal(cos, A) + elseif ishermitian(A) return copytri!(parent(cos(Hermitian(A))), 'U', true) end T = complex(float(eltype(A))) @@ -1077,14 +1093,18 @@ julia> sin(fill(1.0, (2,2))) ``` """ function sin(A::AbstractMatrix{<:Real}) - if issymmetric(A) + if isdiag(A) + return applydiagonal(sin, A) + elseif issymmetric(A) return copytri!(parent(sin(Symmetric(A))), 'U') end T = complex(float(eltype(A))) return imag(exp!(T.(im .* A))) end function sin(A::AbstractMatrix{<:Complex}) - if ishermitian(A) + if isdiag(A) + return applydiagonal(sin, A) + elseif ishermitian(A) return copytri!(parent(sin(Hermitian(A))), 'U', true) end T = complex(float(eltype(A))) @@ -1163,7 +1183,9 @@ julia> tan(fill(1.0, (2,2))) ``` """ function tan(A::AbstractMatrix) - if ishermitian(A) + if isdiag(A) + return applydiagonal(tan, A) + elseif ishermitian(A) return copytri!(parent(tan(Hermitian(A))), 'U', true) end S, C = sincos(A) @@ -1177,7 +1199,9 @@ end Compute the matrix hyperbolic cosine of a square matrix `A`. """ function cosh(A::AbstractMatrix) - if ishermitian(A) + if isdiag(A) + return applydiagonal(cosh, A) + elseif ishermitian(A) return copytri!(parent(cosh(Hermitian(A))), 'U', true) end X = exp(A) @@ -1191,7 +1215,9 @@ end Compute the matrix hyperbolic sine of a square matrix `A`. """ function sinh(A::AbstractMatrix) - if ishermitian(A) + if isdiag(A) + return applydiagonal(sinh, A) + elseif ishermitian(A) return copytri!(parent(sinh(Hermitian(A))), 'U', true) end X = exp(A) @@ -1205,7 +1231,9 @@ end Compute the matrix hyperbolic tangent of a square matrix `A`. """ function tanh(A::AbstractMatrix) - if ishermitian(A) + if isdiag(A) + return applydiagonal(tanh, A) + elseif ishermitian(A) return copytri!(parent(tanh(Hermitian(A))), 'U', true) end X = exp(A) @@ -1240,7 +1268,9 @@ julia> acos(cos([0.5 0.1; -0.2 0.3])) ``` """ function acos(A::AbstractMatrix) - if ishermitian(A) + if isdiag(A) + return applydiagonal(acos, A) + elseif ishermitian(A) acosHermA = acos(Hermitian(A)) return isa(acosHermA, Hermitian) ? copytri!(parent(acosHermA), 'U', true) : parent(acosHermA) end @@ -1271,7 +1301,9 @@ julia> asin(sin([0.5 0.1; -0.2 0.3])) ``` """ function asin(A::AbstractMatrix) - if ishermitian(A) + if isdiag(A) + return applydiagonal(asin, A) + elseif ishermitian(A) asinHermA = asin(Hermitian(A)) return isa(asinHermA, Hermitian) ? copytri!(parent(asinHermA), 'U', true) : parent(asinHermA) end @@ -1302,7 +1334,9 @@ julia> atan(tan([0.5 0.1; -0.2 0.3])) ``` """ function atan(A::AbstractMatrix) - if ishermitian(A) + if isdiag(A) + return applydiagonal(atan, A) + elseif ishermitian(A) return copytri!(parent(atan(Hermitian(A))), 'U', true) end SchurF = Schur{Complex}(schur(A)) @@ -1320,7 +1354,9 @@ logarithmic formulas used to compute this function, see [^AH16_4]. [^AH16_4]: Mary Aprahamian and Nicholas J. Higham, "Matrix Inverse Trigonometric and Inverse Hyperbolic Functions: Theory and Algorithms", MIMS EPrint: 2016.4. [https://doi.org/10.1137/16M1057577](https://doi.org/10.1137/16M1057577) """ function acosh(A::AbstractMatrix) - if ishermitian(A) + if isdiag(A) + return applydiagonal(acosh, A) + elseif ishermitian(A) acoshHermA = acosh(Hermitian(A)) return isa(acoshHermA, Hermitian) ? copytri!(parent(acoshHermA), 'U', true) : parent(acoshHermA) end @@ -1339,7 +1375,9 @@ logarithmic formulas used to compute this function, see [^AH16_5]. [^AH16_5]: Mary Aprahamian and Nicholas J. Higham, "Matrix Inverse Trigonometric and Inverse Hyperbolic Functions: Theory and Algorithms", MIMS EPrint: 2016.4. [https://doi.org/10.1137/16M1057577](https://doi.org/10.1137/16M1057577) """ function asinh(A::AbstractMatrix) - if ishermitian(A) + if isdiag(A) + return applydiagonal(asinh, A) + elseif ishermitian(A) return copytri!(parent(asinh(Hermitian(A))), 'U', true) end SchurF = Schur{Complex}(schur(A)) @@ -1357,7 +1395,9 @@ logarithmic formulas used to compute this function, see [^AH16_6]. [^AH16_6]: Mary Aprahamian and Nicholas J. Higham, "Matrix Inverse Trigonometric and Inverse Hyperbolic Functions: Theory and Algorithms", MIMS EPrint: 2016.4. [https://doi.org/10.1137/16M1057577](https://doi.org/10.1137/16M1057577) """ function atanh(A::AbstractMatrix) - if ishermitian(A) + if isdiag(A) + return applydiagonal(atanh, A) + elseif ishermitian(A) return copytri!(parent(atanh(Hermitian(A))), 'U', true) end SchurF = Schur{Complex}(schur(A)) diff --git a/stdlib/LinearAlgebra/test/dense.jl b/stdlib/LinearAlgebra/test/dense.jl index 1d43d76899392..10f50a80ab7fd 100644 --- a/stdlib/LinearAlgebra/test/dense.jl +++ b/stdlib/LinearAlgebra/test/dense.jl @@ -607,6 +607,7 @@ end -0.4579038628067864 1.7361475641080275 6.478801851038108]) A3 = convert(Matrix{elty}, [0.25 0.25; 0 0]) A4 = convert(Matrix{elty}, [0 0.02; 0 0]) + A5 = convert(Matrix{elty}, [2.0 0; 0 3.0]) cosA1 = convert(Matrix{elty},[-0.18287716254368605 -0.29517205254584633 0.761711400552759; 0.23326967400345625 0.19797853773269333 -0.14758602627292305; @@ -614,8 +615,8 @@ end sinA1 = convert(Matrix{elty}, [0.2865568596627417 -1.107751980582015 -0.13772915374386513; -0.6227405671629401 0.2176922827908092 -0.5538759902910078; -0.6227405671629398 -0.6916051440348725 0.3554214365346742]) - @test cos(A1) ≈ cosA1 - @test sin(A1) ≈ sinA1 + @test @inferred(cos(A1)) ≈ cosA1 + @test @inferred(sin(A1)) ≈ sinA1 cosA2 = convert(Matrix{elty}, [-0.6331745163802187 0.12878366262380136 -0.17304181968301532; 0.12878366262380136 -0.5596234510748788 0.5210483146041339; @@ -637,36 +638,36 @@ end @test sin(A4) ≈ sinA4 # Identities - for (i, A) in enumerate((A1, A2, A3, A4)) - @test sincos(A) == (sin(A), cos(A)) + for (i, A) in enumerate((A1, A2, A3, A4, A5)) + @test @inferred(sincos(A)) == (sin(A), cos(A)) @test cos(A)^2 + sin(A)^2 ≈ Matrix(I, size(A)) @test cos(A) ≈ cos(-A) @test sin(A) ≈ -sin(-A) - @test tan(A) ≈ sin(A) / cos(A) + @test @inferred(tan(A)) ≈ sin(A) / cos(A) @test cos(A) ≈ real(exp(im*A)) @test sin(A) ≈ imag(exp(im*A)) @test cos(A) ≈ real(cis(A)) @test sin(A) ≈ imag(cis(A)) - @test cis(A) ≈ cos(A) + im * sin(A) + @test @inferred(cis(A)) ≈ cos(A) + im * sin(A) - @test cosh(A) ≈ 0.5 * (exp(A) + exp(-A)) - @test sinh(A) ≈ 0.5 * (exp(A) - exp(-A)) - @test cosh(A) ≈ cosh(-A) - @test sinh(A) ≈ -sinh(-A) + @test @inferred(cosh(A)) ≈ 0.5 * (exp(A) + exp(-A)) + @test @inferred(sinh(A)) ≈ 0.5 * (exp(A) - exp(-A)) + @test @inferred(cosh(A)) ≈ cosh(-A) + @test @inferred(sinh(A)) ≈ -sinh(-A) # Some of the following identities fail for A3, A4 because the matrices are singular - if i in (1, 2) - @test sec(A) ≈ inv(cos(A)) - @test csc(A) ≈ inv(sin(A)) - @test cot(A) ≈ inv(tan(A)) - @test sech(A) ≈ inv(cosh(A)) - @test csch(A) ≈ inv(sinh(A)) - @test coth(A) ≈ inv(tanh(A)) + if i in (1, 2, 5) + @test @inferred(sec(A)) ≈ inv(cos(A)) + @test @inferred(csc(A)) ≈ inv(sin(A)) + @test @inferred(cot(A)) ≈ inv(tan(A)) + @test @inferred(sech(A)) ≈ inv(cosh(A)) + @test @inferred(csch(A)) ≈ inv(sinh(A)) + @test @inferred(coth(A)) ≈ inv(@inferred tanh(A)) end # The following identities fail for A1, A2 due to rounding errors; # probably needs better algorithm for the general case - if i in (3, 4) + if i in (3, 4, 5) @test cosh(A)^2 - sinh(A)^2 ≈ Matrix(I, size(A)) @test tanh(A) ≈ sinh(A) / cosh(A) end From afdba951b36e65cd8b728e460160d8765aae321f Mon Sep 17 00:00:00 2001 From: Ian Butterworth Date: Sun, 10 Nov 2024 08:19:17 -0500 Subject: [PATCH 418/537] Profile: add helper method for printing profile report to file (#56505) The IOContext part is isn't obvious, because otherwise the IO is assumed to be 80 chars wide, which makes for bad reports. --- stdlib/Profile/src/Profile.jl | 8 ++++++++ stdlib/Profile/test/runtests.jl | 3 +++ 2 files changed, 11 insertions(+) diff --git a/stdlib/Profile/src/Profile.jl b/stdlib/Profile/src/Profile.jl index 895c67557984a..409696c8c9354 100644 --- a/stdlib/Profile/src/Profile.jl +++ b/stdlib/Profile/src/Profile.jl @@ -217,6 +217,7 @@ const META_OFFSET_THREADID = 5 """ print([io::IO = stdout,] [data::Vector = fetch()], [lidict::Union{LineInfoDict, LineInfoFlatDict} = getdict(data)]; kwargs...) + print(path::String, [cols::Int = 1000], [data::Vector = fetch()], [lidict::Union{LineInfoDict, LineInfoFlatDict} = getdict(data)]; kwargs...) Prints profiling results to `io` (by default, `stdout`). If you do not supply a `data` vector, the internal buffer of accumulated backtraces @@ -357,6 +358,13 @@ function print(io::IO, return end +function print(path::String, cols::Int = 1000, args...; kwargs...) + open(path, "w") do io + ioc = IOContext(io, :displaysize=>(1000,cols)) + print(ioc, args...; kwargs...) + end +end + """ print([io::IO = stdout,] data::Vector, lidict::LineInfoDict; kwargs...) diff --git a/stdlib/Profile/test/runtests.jl b/stdlib/Profile/test/runtests.jl index 352d07086f25b..c1cb86d84975a 100644 --- a/stdlib/Profile/test/runtests.jl +++ b/stdlib/Profile/test/runtests.jl @@ -95,6 +95,9 @@ for options in ((format=:tree, C=true), Profile.print(iobuf; options...) str = String(take!(iobuf)) @test !isempty(str) + file, _ = mktemp() + Profile.print(file; options...) + @test filesize(file) > 0 end @testset "Profile.print() groupby options" begin From 3318941e585db632423366b8b703ea55a6ba8421 Mon Sep 17 00:00:00 2001 From: Jishnu Bhattacharya Date: Mon, 11 Nov 2024 07:31:41 +0530 Subject: [PATCH 419/537] Change in-place exp to out-of-place in matrix trig functions (#56242) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This makes the functions work for arbitrary matrix types that support `exp`, but not necessarily the in-place `exp!`. For example, the following works after this: ```julia julia> m = SMatrix{2,2}(1:4); julia> cos(m) 2×2 SMatrix{2, 2, Float64, 4} with indices SOneTo(2)×SOneTo(2): 0.855423 -0.166315 -0.110876 0.689109 ``` There's a slight performance improvement as well because we don't compute `im*A` and `-im*A` separately, but we negate the first to obtain the second. ```julia julia> A = rand(ComplexF64,100,100); julia> @btime sin($A); 2.796 ms (48 allocations: 1.84 MiB) # nightly v"1.12.0-DEV.1571" 2.304 ms (48 allocations: 1.84 MiB) # this PR ``` --- stdlib/LinearAlgebra/src/dense.jl | 80 +++++++++++++++++++----------- stdlib/LinearAlgebra/test/dense.jl | 17 +++++++ 2 files changed, 69 insertions(+), 28 deletions(-) diff --git a/stdlib/LinearAlgebra/src/dense.jl b/stdlib/LinearAlgebra/src/dense.jl index d975df1cc0fb7..d6d97be86f1bf 100644 --- a/stdlib/LinearAlgebra/src/dense.jl +++ b/stdlib/LinearAlgebra/src/dense.jl @@ -1039,6 +1039,11 @@ function inv(A::StridedMatrix{T}) where T return Ai end +# helper function to perform a broadcast in-place if the destination is strided +# otherwise, this performs an out-of-place broadcast +@inline _broadcast!!(f, dest::StridedArray, args...) = broadcast!(f, dest, args...) +@inline _broadcast!!(f, dest, args...) = broadcast(f, args...) + """ cos(A::AbstractMatrix) @@ -1061,8 +1066,8 @@ function cos(A::AbstractMatrix{<:Real}) elseif issymmetric(A) return copytri!(parent(cos(Symmetric(A))), 'U') end - T = complex(float(eltype(A))) - return real(exp!(T.(im .* A))) + M = im .* float.(A) + return real(exp_maybe_inplace(M)) end function cos(A::AbstractMatrix{<:Complex}) if isdiag(A) @@ -1070,10 +1075,13 @@ function cos(A::AbstractMatrix{<:Complex}) elseif ishermitian(A) return copytri!(parent(cos(Hermitian(A))), 'U', true) end - T = complex(float(eltype(A))) - X = exp!(T.(im .* A)) - @. X = (X + $exp!(T(-im*A))) / 2 - return X + M = im .* float.(A) + N = -M + X = exp_maybe_inplace(M) + Y = exp_maybe_inplace(N) + # Compute (X + Y)/2 and return the result. + # Compute the result in-place if X is strided + _broadcast!!((x,y) -> (x + y)/2, X, X, Y) end """ @@ -1098,8 +1106,8 @@ function sin(A::AbstractMatrix{<:Real}) elseif issymmetric(A) return copytri!(parent(sin(Symmetric(A))), 'U') end - T = complex(float(eltype(A))) - return imag(exp!(T.(im .* A))) + M = im .* float.(A) + return imag(exp_maybe_inplace(M)) end function sin(A::AbstractMatrix{<:Complex}) if isdiag(A) @@ -1107,14 +1115,13 @@ function sin(A::AbstractMatrix{<:Complex}) elseif ishermitian(A) return copytri!(parent(sin(Hermitian(A))), 'U', true) end - T = complex(float(eltype(A))) - X = exp!(T.(im .* A)) - Y = exp!(T.(.-im .* A)) - @inbounds for i in eachindex(X, Y) - x, y = X[i]/2, Y[i]/2 - X[i] = Complex(imag(x)-imag(y), real(y)-real(x)) - end - return X + M = im .* float.(A) + Mneg = -M + X = exp_maybe_inplace(M) + Y = exp_maybe_inplace(Mneg) + # Compute (X - Y)/2im and return the result. + # Compute the result in-place if X is strided + _broadcast!!((x,y) -> (x - y)/2im, X, X, Y) end """ @@ -1144,8 +1151,8 @@ function sincos(A::AbstractMatrix{<:Real}) cosA = copytri!(parent(symcosA), 'U') return sinA, cosA end - T = complex(float(eltype(A))) - c, s = reim(exp!(T.(im .* A))) + M = im .* float.(A) + c, s = reim(exp_maybe_inplace(M)) return s, c end function sincos(A::AbstractMatrix{<:Complex}) @@ -1155,9 +1162,13 @@ function sincos(A::AbstractMatrix{<:Complex}) cosA = copytri!(parent(hermcosA), 'U', true) return sinA, cosA end - T = complex(float(eltype(A))) - X = exp!(T.(im .* A)) - Y = exp!(T.(.-im .* A)) + M = im .* float.(A) + Mneg = -M + X = exp_maybe_inplace(M) + Y = exp_maybe_inplace(Mneg) + _sincos(X, Y) +end +function _sincos(X::StridedMatrix, Y::StridedMatrix) @inbounds for i in eachindex(X, Y) x, y = X[i]/2, Y[i]/2 X[i] = Complex(imag(x)-imag(y), real(y)-real(x)) @@ -1165,6 +1176,12 @@ function sincos(A::AbstractMatrix{<:Complex}) end return X, Y end +function _sincos(X, Y) + T = eltype(X) + S = T(0.5)*im .* (Y .- X) + C = T(0.5) .* (X .+ Y) + S, C +end """ tan(A::AbstractMatrix) @@ -1205,8 +1222,9 @@ function cosh(A::AbstractMatrix) return copytri!(parent(cosh(Hermitian(A))), 'U', true) end X = exp(A) - @. X = (X + $exp!(float(-A))) / 2 - return X + negA = @. float(-A) + Y = exp_maybe_inplace(negA) + _broadcast!!((x,y) -> (x + y)/2, X, X, Y) end """ @@ -1221,8 +1239,9 @@ function sinh(A::AbstractMatrix) return copytri!(parent(sinh(Hermitian(A))), 'U', true) end X = exp(A) - @. X = (X - $exp!(float(-A))) / 2 - return X + negA = @. float(-A) + Y = exp_maybe_inplace(negA) + _broadcast!!((x,y) -> (x - y)/2, X, X, Y) end """ @@ -1237,15 +1256,20 @@ function tanh(A::AbstractMatrix) return copytri!(parent(tanh(Hermitian(A))), 'U', true) end X = exp(A) - Y = exp!(float.(.-A)) + negA = @. float(-A) + Y = exp_maybe_inplace(negA) + X′, Y′ = _subadd!!(X, Y) + return X′ / Y′ +end +function _subadd!!(X::StridedMatrix, Y::StridedMatrix) @inbounds for i in eachindex(X, Y) x, y = X[i], Y[i] X[i] = x - y Y[i] = x + y end - X /= Y - return X + return X, Y end +_subadd!!(X, Y) = X - Y, X + Y """ acos(A::AbstractMatrix) diff --git a/stdlib/LinearAlgebra/test/dense.jl b/stdlib/LinearAlgebra/test/dense.jl index 10f50a80ab7fd..b80412f98e8a4 100644 --- a/stdlib/LinearAlgebra/test/dense.jl +++ b/stdlib/LinearAlgebra/test/dense.jl @@ -5,6 +5,10 @@ module TestDense using Test, LinearAlgebra, Random using LinearAlgebra: BlasComplex, BlasFloat, BlasReal +const BASE_TEST_PATH = joinpath(Sys.BINDIR, "..", "share", "julia", "test") +isdefined(Main, :FillArrays) || @eval Main include(joinpath($(BASE_TEST_PATH), "testhelpers", "FillArrays.jl")) +import Main.FillArrays + @testset "Check that non-floats are correctly promoted" begin @test [1 0 0; 0 1 0]\[1,1] ≈ [1;1;0] end @@ -1302,4 +1306,17 @@ end end end +@testset "trig functions for non-strided" begin + @testset for T in (Float32,ComplexF32) + A = FillArrays.Fill(T(0.1), 4, 4) # all.(<(1), eigvals(A)) for atanh + M = Matrix(A) + @testset for f in (sin,cos,tan,sincos,sinh,cosh,tanh) + @test f(A) == f(M) + end + @testset for f in (asin,acos,atan,asinh,acosh,atanh) + @test f(A) == f(M) + end + end +end + end # module TestDense From 14df0384fd6e65007be41af0a166d365154c9505 Mon Sep 17 00:00:00 2001 From: Keno Fischer Date: Mon, 11 Nov 2024 02:04:49 -0500 Subject: [PATCH 420/537] Test: Don't change scope kind in `test_{warn,nowarn}` (#56524) This was part of #56509, but is an independent bugfix. The basic issue is that these macro were using `do` block internally. This is undesirable for test macros, because we would like them not to affect the behavior of what they're testing. E.g. right now: ``` julia> using Test julia> const x = 1 1 julia> @test_nowarn const x = 1 ERROR: syntax: `global const` declaration not allowed inside function around /home/keno/julia/usr/share/julia/stdlib/v1.12/Test/src/Test.jl:927 Stacktrace: [1] top-level scope @ REPL[3]:1 ``` This PR just writes out the try/finally manually, so the above works fine after this PR. --- stdlib/Test/src/Test.jl | 27 +++++++++++++++++++-------- 1 file changed, 19 insertions(+), 8 deletions(-) diff --git a/stdlib/Test/src/Test.jl b/stdlib/Test/src/Test.jl index cf906591b9962..e8c7d49d076aa 100644 --- a/stdlib/Test/src/Test.jl +++ b/stdlib/Test/src/Test.jl @@ -890,10 +890,17 @@ macro test_warn(msg, expr) quote let fname = tempname() try - ret = open(fname, "w") do f - redirect_stderr(f) do - $(esc(expr)) - end + f = open(fname, "w") + stdold = stderr + redirect_stderr(f) + ret = try + # We deliberately don't use the thunk versions of open/redirect + # to ensure that adding the macro does not change the toplevel-ness + # of the resulting expression. + $(esc(expr)) + finally + redirect_stderr(stdold) + close(f) end @test contains_warn(read(fname, String), $(esc(msg))) ret @@ -922,10 +929,14 @@ macro test_nowarn(expr) # here. let fname = tempname() try - ret = open(fname, "w") do f - redirect_stderr(f) do - $(esc(expr)) - end + f = open(fname, "w") + stdold = stderr + redirect_stderr(f) + ret = try + $(esc(expr)) + finally + redirect_stderr(stdold) + close(f) end stderr_content = read(fname, String) print(stderr, stderr_content) # this is helpful for debugging From 97e41d719ab7448e274d3de206f62e8d3fadb87c Mon Sep 17 00:00:00 2001 From: Jishnu Bhattacharya Date: Mon, 11 Nov 2024 15:15:50 +0530 Subject: [PATCH 421/537] For loop instead of while in generic `copyto!` (#56517) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This appears to improve performance. ```julia julia> A = zeros(100_000); julia> x = (i for i in axes(A,1)); julia> @btime copyto!($A, 1, $x, 1, length($A)); 64.162 μs (0 allocations: 0 bytes) # nightly v"1.12.0-DEV.1593" 52.532 μs (0 allocations: 0 bytes) # this PR ``` --- base/abstractarray.jl | 15 ++++++++++----- test/abstractarray.jl | 14 ++++++++++++++ 2 files changed, 24 insertions(+), 5 deletions(-) diff --git a/base/abstractarray.jl b/base/abstractarray.jl index 5413f4e177518..6102a0a8a00fa 100644 --- a/base/abstractarray.jl +++ b/base/abstractarray.jl @@ -1012,14 +1012,19 @@ function copyto!(dest::AbstractArray, dstart::Integer, src, sstart::Integer, n:: end y = iterate(src, y[2]) end + if y === nothing + throw(ArgumentError(LazyString( + "source has fewer elements than required, ", + "expected at least ",sstart," got ", sstart-1))) + end + val, st = y i = Int(dstart) - while i <= dmax && y !== nothing - val, st = y - @inbounds dest[i] = val - y = iterate(src, st) + @inbounds dest[i] = val + for val in Iterators.take(Iterators.rest(src, st), n-1) i += 1 + @inbounds dest[i] = val end - i <= dmax && throw(BoundsError(dest, i)) + i < dmax && throw(BoundsError(dest, i)) return dest end diff --git a/test/abstractarray.jl b/test/abstractarray.jl index c2c646ce8bee0..16b973544801a 100644 --- a/test/abstractarray.jl +++ b/test/abstractarray.jl @@ -2172,3 +2172,17 @@ end @test one(Mat([1 2; 3 4])) == Mat([1 0; 0 1]) @test one(Mat([1 2; 3 4])) isa Mat end + +@testset "copyto! with non-AbstractArray src" begin + A = zeros(4) + x = (i for i in axes(A,1)) + copyto!(A, 1, x, 1, length(A)) + @test A == axes(A,1) + A .= 0 + copyto!(A, 1, x, 1, 2) + @test A[1:2] == first(x,2) + @test iszero(A[3:end]) + A .= 0 + copyto!(A, 1, x, 1) + @test A == axes(A,1) +end From 38e3d1433a2401a05342ee55c3f1c7373ae2168a Mon Sep 17 00:00:00 2001 From: Jishnu Bhattacharya Date: Mon, 11 Nov 2024 17:17:36 +0530 Subject: [PATCH 422/537] Add `diagview` to obtain a view along a diagonal (#56175) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit A function to obtain a view of a diagonal of a matrix is useful, and this is clearly being used widely within `LinearAlgebra`. The implementation here iterates according to the `IndexStyle` of the array: ```julia julia> using LinearAlgebra julia> A = reshape(1:9, 3, 3) 3×3 reshape(::UnitRange{Int64}, 3, 3) with eltype Int64: 1 4 7 2 5 8 3 6 9 julia> diagview(A,1) 2-element view(::UnitRange{Int64}, 4:4:8) with eltype Int64: 4 8 julia> T = Tridiagonal(1:3, 3:6, 4:6) 4×4 Tridiagonal{Int64, UnitRange{Int64}}: 3 4 ⋅ ⋅ 1 4 5 ⋅ ⋅ 2 5 6 ⋅ ⋅ 3 6 julia> diagview(T,1) 3-element view(::Tridiagonal{Int64, UnitRange{Int64}}, StepRangeLen(CartesianIndex(1, 2), CartesianIndex(1, 1), 3)) with eltype Int64: 4 5 6 ``` Closes https://github.com/JuliaLang/julia/issues/30250 --- NEWS.md | 3 +- stdlib/LinearAlgebra/src/LinearAlgebra.jl | 1 + stdlib/LinearAlgebra/src/abstractq.jl | 4 +-- stdlib/LinearAlgebra/src/bidiag.jl | 6 ++-- stdlib/LinearAlgebra/src/dense.jl | 35 +++++++++++++++++++--- stdlib/LinearAlgebra/src/diagonal.jl | 4 +-- stdlib/LinearAlgebra/src/special.jl | 14 ++++----- stdlib/LinearAlgebra/src/triangular.jl | 16 +++++----- stdlib/LinearAlgebra/src/tridiag.jl | 8 ++--- stdlib/LinearAlgebra/src/uniformscaling.jl | 4 +-- stdlib/LinearAlgebra/test/dense.jl | 9 ++++++ stdlib/LinearAlgebra/test/tridiag.jl | 10 +++++++ 12 files changed, 80 insertions(+), 34 deletions(-) diff --git a/NEWS.md b/NEWS.md index 74cda05e9d0e1..535d14208f0b8 100644 --- a/NEWS.md +++ b/NEWS.md @@ -151,7 +151,8 @@ Standard library changes * The matrix multiplication `A * B` calls `matprod_dest(A, B, T::Type)` to generate the destination. This function is now public ([#55537]). * The function `haszero(T::Type)` is used to check if a type `T` has a unique zero element defined as `zero(T)`. - This is now public. + This is now public ([#56223]). +* A new function `diagview` is added that returns a view into a specific band of an `AbstractMatrix` ([#56175]). #### Logging diff --git a/stdlib/LinearAlgebra/src/LinearAlgebra.jl b/stdlib/LinearAlgebra/src/LinearAlgebra.jl index 6e560428a7011..fc1081e007da2 100644 --- a/stdlib/LinearAlgebra/src/LinearAlgebra.jl +++ b/stdlib/LinearAlgebra/src/LinearAlgebra.jl @@ -87,6 +87,7 @@ export diag, diagind, diagm, + diagview, dot, eigen!, eigen, diff --git a/stdlib/LinearAlgebra/src/abstractq.jl b/stdlib/LinearAlgebra/src/abstractq.jl index 101fb2eb75735..0fa2233b89593 100644 --- a/stdlib/LinearAlgebra/src/abstractq.jl +++ b/stdlib/LinearAlgebra/src/abstractq.jl @@ -456,11 +456,9 @@ end det(Q::QRPackedQ) = _det_tau(Q.τ) det(Q::QRCompactWYQ) = - prod(i -> _det_tau(_diagview(Q.T[:, i:min(i + size(Q.T, 1), size(Q.T, 2))])), + prod(i -> _det_tau(diagview(Q.T[:, i:min(i + size(Q.T, 1), size(Q.T, 2))])), 1:size(Q.T, 1):size(Q.T, 2)) -_diagview(A) = @view A[diagind(A)] - # Compute `det` from the number of Householder reflections. Handle # the case `Q.τ` contains zeros. _det_tau(τs::AbstractVector{<:Real}) = diff --git a/stdlib/LinearAlgebra/src/bidiag.jl b/stdlib/LinearAlgebra/src/bidiag.jl index b38a983296065..aefaf16337d83 100644 --- a/stdlib/LinearAlgebra/src/bidiag.jl +++ b/stdlib/LinearAlgebra/src/bidiag.jl @@ -191,8 +191,8 @@ function Matrix{T}(A::Bidiagonal) where T B = Matrix{T}(undef, size(A)) if haszero(T) # optimized path for types with zero(T) defined size(B,1) > 1 && fill!(B, zero(T)) - copyto!(view(B, diagind(B)), A.dv) - copyto!(view(B, diagind(B, _offdiagind(A.uplo))), A.ev) + copyto!(diagview(B), A.dv) + copyto!(diagview(B, _offdiagind(A.uplo)), A.ev) else copyto!(B, A) end @@ -570,7 +570,7 @@ end # to avoid allocations in _mul! below (#24324, #24578) _diag(A::Tridiagonal, k) = k == -1 ? A.dl : k == 0 ? A.d : A.du _diag(A::SymTridiagonal{<:Number}, k) = k == 0 ? A.dv : A.ev -_diag(A::SymTridiagonal, k) = k == 0 ? view(A, diagind(A, IndexStyle(A))) : view(A, diagind(A, 1, IndexStyle(A))) +_diag(A::SymTridiagonal, k) = diagview(A,k) function _diag(A::Bidiagonal, k) if k == 0 return A.dv diff --git a/stdlib/LinearAlgebra/src/dense.jl b/stdlib/LinearAlgebra/src/dense.jl index d6d97be86f1bf..5e47984120196 100644 --- a/stdlib/LinearAlgebra/src/dense.jl +++ b/stdlib/LinearAlgebra/src/dense.jl @@ -290,6 +290,35 @@ julia> diag(A,1) """ diag(A::AbstractMatrix, k::Integer=0) = A[diagind(A, k, IndexStyle(A))] +""" + diagview(M, k::Integer=0) + +Return a view into the `k`th diagonal of the matrix `M`. + +See also [`diag`](@ref), [`diagind`](@ref). + +# Examples +```jldoctest +julia> A = [1 2 3; 4 5 6; 7 8 9] +3×3 Matrix{Int64}: + 1 2 3 + 4 5 6 + 7 8 9 + +julia> diagview(A) +3-element view(::Vector{Int64}, 1:4:9) with eltype Int64: + 1 + 5 + 9 + +julia> diagview(A, 1) +2-element view(::Vector{Int64}, 4:4:8) with eltype Int64: + 2 + 6 +``` +""" +diagview(A::AbstractMatrix, k::Integer=0) = @view A[diagind(A, k, IndexStyle(A))] + """ diagm(kv::Pair{<:Integer,<:AbstractVector}...) diagm(m::Integer, n::Integer, kv::Pair{<:Integer,<:AbstractVector}...) @@ -1636,13 +1665,11 @@ function pinv(A::AbstractMatrix{T}; atol::Real = 0.0, rtol::Real = (eps(real(flo return similar(A, Tout, (n, m)) end if isdiag(A) - indA = diagind(A) - dA = view(A, indA) + dA = diagview(A) maxabsA = maximum(abs, dA) tol = max(rtol * maxabsA, atol) B = fill!(similar(A, Tout, (n, m)), 0) - indB = diagind(B) - B[indB] .= (x -> abs(x) > tol ? pinv(x) : zero(x)).(dA) + diagview(B) .= (x -> abs(x) > tol ? pinv(x) : zero(x)).(dA) return B end SVD = svd(A) diff --git a/stdlib/LinearAlgebra/src/diagonal.jl b/stdlib/LinearAlgebra/src/diagonal.jl index 1ed599fbb120e..7594e8bca4f56 100644 --- a/stdlib/LinearAlgebra/src/diagonal.jl +++ b/stdlib/LinearAlgebra/src/diagonal.jl @@ -120,7 +120,7 @@ function Matrix{T}(D::Diagonal) where {T} B = Matrix{T}(undef, size(D)) if haszero(T) # optimized path for types with zero(T) defined size(B,1) > 1 && fill!(B, zero(T)) - copyto!(view(B, diagind(B)), D.diag) + copyto!(diagview(B), D.diag) else copyto!(B, D) end @@ -1041,7 +1041,7 @@ dot(x::AbstractVector, D::Diagonal, y::AbstractVector) = _mapreduce_prod(dot, x, dot(A::Diagonal, B::Diagonal) = dot(A.diag, B.diag) function dot(D::Diagonal, B::AbstractMatrix) size(D) == size(B) || throw(DimensionMismatch(lazy"Matrix sizes $(size(D)) and $(size(B)) differ")) - return dot(D.diag, view(B, diagind(B, IndexStyle(B)))) + return dot(D.diag, diagview(B)) end dot(A::AbstractMatrix, B::Diagonal) = conj(dot(B, A)) diff --git a/stdlib/LinearAlgebra/src/special.jl b/stdlib/LinearAlgebra/src/special.jl index 32a5476842933..6d25540ee3f07 100644 --- a/stdlib/LinearAlgebra/src/special.jl +++ b/stdlib/LinearAlgebra/src/special.jl @@ -22,7 +22,7 @@ function Tridiagonal(A::Bidiagonal) end _diagview(S::SymTridiagonal{<:Number}) = S.dv -_diagview(S::SymTridiagonal) = view(S, diagind(S, IndexStyle(S))) +_diagview(S::SymTridiagonal) = diagview(S) # conversions from SymTridiagonal to other special matrix types Diagonal(A::SymTridiagonal) = Diagonal(_diagview(A)) @@ -370,20 +370,20 @@ function copyto!(dest::BandedMatrix, src::BandedMatrix) end function _copyto_banded!(T::Tridiagonal, D::Diagonal) T.d .= D.diag - T.dl .= view(D, diagind(D, -1, IndexStyle(D))) - T.du .= view(D, diagind(D, 1, IndexStyle(D))) + T.dl .= diagview(D, -1) + T.du .= diagview(D, 1) return T end function _copyto_banded!(SymT::SymTridiagonal, D::Diagonal) issymmetric(D) || throw(ArgumentError("cannot copy a non-symmetric Diagonal matrix to a SymTridiagonal")) SymT.dv .= D.diag _ev = _evview(SymT) - _ev .= view(D, diagind(D, 1, IndexStyle(D))) + _ev .= diagview(D, 1) return SymT end function _copyto_banded!(B::Bidiagonal, D::Diagonal) B.dv .= D.diag - B.ev .= view(D, diagind(D, B.uplo == 'U' ? 1 : -1, IndexStyle(D))) + B.ev .= diagview(D, _offdiagind(B.uplo)) return B end function _copyto_banded!(D::Diagonal, B::Bidiagonal) @@ -411,10 +411,10 @@ function _copyto_banded!(T::Tridiagonal, B::Bidiagonal) T.d .= B.dv if B.uplo == 'U' T.du .= B.ev - T.dl .= view(B, diagind(B, -1, IndexStyle(B))) + T.dl .= diagview(B,-1) else T.dl .= B.ev - T.du .= view(B, diagind(B, 1, IndexStyle(B))) + T.du .= diagview(B, 1) end return T end diff --git a/stdlib/LinearAlgebra/src/triangular.jl b/stdlib/LinearAlgebra/src/triangular.jl index 4fed45b009fff..49ff5d7f9c3ec 100644 --- a/stdlib/LinearAlgebra/src/triangular.jl +++ b/stdlib/LinearAlgebra/src/triangular.jl @@ -188,7 +188,7 @@ end function full(A::UnitUpperOrUnitLowerTriangular) isupper = A isa UnitUpperTriangular Ap = _triangularize(A)(parent(A), isupper ? 1 : -1) - Ap[diagind(Ap, IndexStyle(Ap))] = @view A[diagind(A, IndexStyle(A))] + diagview(Ap) .= diagview(A) return Ap end @@ -400,12 +400,12 @@ function tril!(A::UnitUpperTriangular{T}, k::Integer=0) where {T} return UpperTriangular(A.data) elseif k == 0 fill!(A.data, zero(T)) - for i in diagind(A) + for i in diagind(A.data, IndexStyle(A.data)) A.data[i] = oneunit(T) end return UpperTriangular(A.data) else - for i in diagind(A) + for i in diagind(A.data, IndexStyle(A.data)) A.data[i] = oneunit(T) end return UpperTriangular(tril!(A.data,k)) @@ -413,7 +413,7 @@ function tril!(A::UnitUpperTriangular{T}, k::Integer=0) where {T} end function triu!(A::UnitUpperTriangular, k::Integer=0) - for i in diagind(A.data) + for i in diagind(A.data, IndexStyle(A.data)) A.data[i] = oneunit(eltype(A)) end return triu!(UpperTriangular(A.data), k) @@ -448,12 +448,12 @@ function triu!(A::UnitLowerTriangular{T}, k::Integer=0) where T return LowerTriangular(A.data) elseif k == 0 fill!(A.data, zero(T)) - for i in diagind(A) + for i in diagind(A.data, IndexStyle(A.data)) A.data[i] = oneunit(T) end return LowerTriangular(A.data) else - for i in diagind(A) + for i in diagind(A.data, IndexStyle(A.data)) A.data[i] = oneunit(T) end return LowerTriangular(triu!(A.data, k)) @@ -461,7 +461,7 @@ function triu!(A::UnitLowerTriangular{T}, k::Integer=0) where T end function tril!(A::UnitLowerTriangular, k::Integer=0) - for i in diagind(A.data) + for i in diagind(A.data, IndexStyle(A.data)) A.data[i] = oneunit(eltype(A)) end return tril!(LowerTriangular(A.data), k) @@ -2041,7 +2041,7 @@ function _find_params_log_quasitriu!(A) # Find s0, the smallest s such that the ρ(triu(A)^(1/2^s) - I) ≤ theta[tmax], where ρ(X) # is the spectral radius of X - d = complex.(@view(A[diagind(A)])) + d = complex.(diagview(A)) dm1 = d .- 1 s = 0 while norm(dm1, Inf) > theta[tmax] && s < maxsqrt diff --git a/stdlib/LinearAlgebra/src/tridiag.jl b/stdlib/LinearAlgebra/src/tridiag.jl index d6382d2e16a43..0d73e6dd46fdb 100644 --- a/stdlib/LinearAlgebra/src/tridiag.jl +++ b/stdlib/LinearAlgebra/src/tridiag.jl @@ -612,9 +612,9 @@ function Matrix{T}(M::Tridiagonal) where {T} A = Matrix{T}(undef, size(M)) if haszero(T) # optimized path for types with zero(T) defined size(A,1) > 2 && fill!(A, zero(T)) - copyto!(view(A, diagind(A)), M.d) - copyto!(view(A, diagind(A,1)), M.du) - copyto!(view(A, diagind(A,-1)), M.dl) + copyto!(diagview(A), M.d) + copyto!(diagview(A,1), M.du) + copyto!(diagview(A,-1), M.dl) else copyto!(A, M) end @@ -1092,7 +1092,7 @@ function show(io::IO, T::Tridiagonal) end function show(io::IO, S::SymTridiagonal) print(io, "SymTridiagonal(") - show(io, eltype(S) <: Number ? S.dv : view(S, diagind(S, IndexStyle(S)))) + show(io, _diagview(S)) print(io, ", ") show(io, S.ev) print(io, ")") diff --git a/stdlib/LinearAlgebra/src/uniformscaling.jl b/stdlib/LinearAlgebra/src/uniformscaling.jl index cb3c5b6a4c3e1..4422799fada85 100644 --- a/stdlib/LinearAlgebra/src/uniformscaling.jl +++ b/stdlib/LinearAlgebra/src/uniformscaling.jl @@ -201,7 +201,7 @@ end function (+)(A::Hermitian, J::UniformScaling{<:Complex}) TS = Base.promote_op(+, eltype(A), typeof(J)) B = copytri!(copymutable_oftype(parent(A), TS), A.uplo, true) - for i in diagind(B) + for i in diagind(B, IndexStyle(B)) B[i] = A[i] + J end return B @@ -211,7 +211,7 @@ function (-)(J::UniformScaling{<:Complex}, A::Hermitian) TS = Base.promote_op(+, eltype(A), typeof(J)) B = copytri!(copymutable_oftype(parent(A), TS), A.uplo, true) B .= .-B - for i in diagind(B) + for i in diagind(B, IndexStyle(B)) B[i] = J - A[i] end return B diff --git a/stdlib/LinearAlgebra/test/dense.jl b/stdlib/LinearAlgebra/test/dense.jl index b80412f98e8a4..a7616e2fc294a 100644 --- a/stdlib/LinearAlgebra/test/dense.jl +++ b/stdlib/LinearAlgebra/test/dense.jl @@ -1029,6 +1029,15 @@ end @test diag(zeros(0,1),2) == [] end +@testset "diagview" begin + for sz in ((3,3), (3,5), (5,3)) + A = rand(sz...) + for k in -5:5 + @test diagview(A,k) == diag(A,k) + end + end +end + @testset "issue #39857" begin @test lyap(1.0+2.0im, 3.0+4.0im) == -1.5 - 2.0im end diff --git a/stdlib/LinearAlgebra/test/tridiag.jl b/stdlib/LinearAlgebra/test/tridiag.jl index aa3baec8f6be8..dc14ddb1d1b27 100644 --- a/stdlib/LinearAlgebra/test/tridiag.jl +++ b/stdlib/LinearAlgebra/test/tridiag.jl @@ -1065,4 +1065,14 @@ end end end +@testset "diagview" begin + A = Tridiagonal(rand(3), rand(4), rand(3)) + for k in -5:5 + @test diagview(A,k) == diag(A,k) + end + v = diagview(A,1) + v .= 0 + @test all(iszero, diag(A,1)) +end + end # module TestTridiagonal From f93138ed0791799bf4bd33649cb3269054474a24 Mon Sep 17 00:00:00 2001 From: Jishnu Bhattacharya Date: Mon, 11 Nov 2024 20:48:37 +0530 Subject: [PATCH 423/537] Specialize `isbanded` for `StridedMatrix` (#56487) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This improves performance, as the loops in `istriu` and `istril` may be fused to improve cache-locality. This also changes the quick-return behavior, and only returns after the check over all the upper or lower bands for a column is complete. ```julia julia> using LinearAlgebra julia> A = zeros(2, 10_000); julia> @btime isdiag($A); 32.682 μs (0 allocations: 0 bytes) # nightly v"1.12.0-DEV.1593" 9.481 μs (0 allocations: 0 bytes) # this PR julia> A = zeros(10_000, 2); julia> @btime isdiag($A); 10.288 μs (0 allocations: 0 bytes) # nightly 2.579 μs (0 allocations: 0 bytes) # this PR julia> A = zeros(100, 100); julia> @btime isdiag($A); 6.616 μs (0 allocations: 0 bytes) # nightly 3.075 μs (0 allocations: 0 bytes) # this PR julia> A = diagm(0=>1:100); A[3,4] = 1; julia> @btime isdiag($A); 2.759 μs (0 allocations: 0 bytes) # nightly 85.371 ns (0 allocations: 0 bytes) # this PR ``` A similar change is added to `istriu`/`istril` as well, so that ```julia julia> A = zeros(2, 10_000); julia> @btime istriu($A); # trivial 7.358 ns (0 allocations: 0 bytes) # nightly 13.779 ns (0 allocations: 0 bytes) # this PR julia> @btime istril($A); 33.464 μs (0 allocations: 0 bytes) # nightly 9.476 μs (0 allocations: 0 bytes) # this PR julia> A = zeros(10_000, 2); julia> @btime istriu($A); 10.020 μs (0 allocations: 0 bytes) # nightly 2.620 μs (0 allocations: 0 bytes) # this PR julia> @btime istril($A); # trivial 6.793 ns (0 allocations: 0 bytes) # nightly 14.473 ns (0 allocations: 0 bytes) # this PR julia> A = zeros(100, 100); julia> @btime istriu($A); 3.435 μs (0 allocations: 0 bytes) # nightly 1.637 μs (0 allocations: 0 bytes) # this PR julia> @btime istril($A); 3.353 μs (0 allocations: 0 bytes) # nightly 1.661 μs (0 allocations: 0 bytes) # this PR ``` --------- Co-authored-by: Daniel Karrasch --- stdlib/LinearAlgebra/src/generic.jl | 95 ++++++++++++++++++------- stdlib/LinearAlgebra/src/hessenberg.jl | 10 +++ stdlib/LinearAlgebra/src/special.jl | 1 + stdlib/LinearAlgebra/src/triangular.jl | 8 ++- stdlib/LinearAlgebra/test/generic.jl | 58 ++++++++++++++- stdlib/LinearAlgebra/test/hessenberg.jl | 26 +++++++ 6 files changed, 170 insertions(+), 28 deletions(-) diff --git a/stdlib/LinearAlgebra/src/generic.jl b/stdlib/LinearAlgebra/src/generic.jl index 21719c0c50127..666ad631f919a 100644 --- a/stdlib/LinearAlgebra/src/generic.jl +++ b/stdlib/LinearAlgebra/src/generic.jl @@ -1353,6 +1353,14 @@ end ishermitian(x::Number) = (x == conj(x)) +# helper function equivalent to `iszero(v)`, but potentially without the fast exit feature +# of `all` if this improves performance +_iszero(V) = iszero(V) +# A Base.FastContiguousSubArray view of a StridedArray +FastContiguousSubArrayStrided{T,N,P<:StridedArray,I<:Tuple{AbstractUnitRange, Vararg{Any}}} = Base.SubArray{T,N,P,I,true} +# using mapreduce instead of all permits vectorization +_iszero(V::FastContiguousSubArrayStrided) = mapreduce(iszero, &, V, init=true) + """ istriu(A::AbstractMatrix, k::Integer = 0) -> Bool @@ -1384,20 +1392,9 @@ julia> istriu(c, -1) true ``` """ -function istriu(A::AbstractMatrix, k::Integer = 0) - require_one_based_indexing(A) - return _istriu(A, k) -end +istriu(A::AbstractMatrix, k::Integer = 0) = _isbanded_impl(A, k, size(A,2)-1) istriu(x::Number) = true -@inline function _istriu(A::AbstractMatrix, k) - m, n = size(A) - for j in 1:min(n, m + k - 1) - all(iszero, view(A, max(1, j - k + 1):m, j)) || return false - end - return true -end - """ istril(A::AbstractMatrix, k::Integer = 0) -> Bool @@ -1429,20 +1426,9 @@ julia> istril(c, 1) true ``` """ -function istril(A::AbstractMatrix, k::Integer = 0) - require_one_based_indexing(A) - return _istril(A, k) -end +istril(A::AbstractMatrix, k::Integer = 0) = _isbanded_impl(A, -size(A,1)+1, k) istril(x::Number) = true -@inline function _istril(A::AbstractMatrix, k) - m, n = size(A) - for j in max(1, k + 2):n - all(iszero, view(A, 1:min(j - k - 1, m), j)) || return false - end - return true -end - """ isbanded(A::AbstractMatrix, kl::Integer, ku::Integer) -> Bool @@ -1474,7 +1460,66 @@ julia> LinearAlgebra.isbanded(b, -1, 0) true ``` """ -isbanded(A::AbstractMatrix, kl::Integer, ku::Integer) = istriu(A, kl) && istril(A, ku) +isbanded(A::AbstractMatrix, kl::Integer, ku::Integer) = _isbanded(A, kl, ku) +_isbanded(A::AbstractMatrix, kl::Integer, ku::Integer) = istriu(A, kl) && istril(A, ku) +# Performance optimization for StridedMatrix by better utilizing cache locality +# The istriu and istril loops are merged +# the additional indirection allows us to reuse the isbanded loop within istriu/istril +# without encountering cycles +_isbanded(A::StridedMatrix, kl::Integer, ku::Integer) = _isbanded_impl(A, kl, ku) +function _isbanded_impl(A, kl, ku) + Base.require_one_based_indexing(A) + + #= + We split the column range into four possible groups, depending on the values of kl and ku. + + The first is the bottom left triangle, where bands below kl must be zero, + but there are no bands above ku in that column. + + The second is where there are both bands below kl and above ku in the column. + These are the middle columns typically. + + The third is the top right, where there are bands above ku but no bands below kl + in the column. + + The fourth is mainly relevant for wide matrices, where there is a block to the right + beyond ku, where the elements should all be zero. The reason we separate this from the + third group is that we may loop over all the rows using A[:, col] instead of A[rowrange, col], + which is usually faster. + =# + + last_col_nonzeroblocks = size(A,1) + ku # fully zero rectangular block beyond this column + last_col_emptytoprows = ku + 1 # empty top rows before this column + last_col_nonemptybottomrows = size(A,1) + kl - 1 # empty bottom rows after this column + + colrange_onlybottomrows = firstindex(A,2):min(last_col_nonemptybottomrows, last_col_emptytoprows) + colrange_topbottomrows = max(last_col_emptytoprows, last(colrange_onlybottomrows))+1:last_col_nonzeroblocks + colrange_onlytoprows_nonzero = last(colrange_topbottomrows)+1:last_col_nonzeroblocks + colrange_zero_block = last_col_nonzeroblocks+1:lastindex(A,2) + + for col in intersect(axes(A,2), colrange_onlybottomrows) # only loop over the bottom rows + botrowinds = max(firstindex(A,1), col-kl+1):lastindex(A,1) + bottomrows = @view A[botrowinds, col] + _iszero(bottomrows) || return false + end + for col in intersect(axes(A,2), colrange_topbottomrows) + toprowinds = firstindex(A,1):min(col-ku-1, lastindex(A,1)) + toprows = @view A[toprowinds, col] + _iszero(toprows) || return false + botrowinds = max(firstindex(A,1), col-kl+1):lastindex(A,1) + bottomrows = @view A[botrowinds, col] + _iszero(bottomrows) || return false + end + for col in intersect(axes(A,2), colrange_onlytoprows_nonzero) + toprowinds = firstindex(A,1):min(col-ku-1, lastindex(A,1)) + toprows = @view A[toprowinds, col] + _iszero(toprows) || return false + end + for col in intersect(axes(A,2), colrange_zero_block) + _iszero(@view A[:, col]) || return false + end + return true +end """ isdiag(A) -> Bool diff --git a/stdlib/LinearAlgebra/src/hessenberg.jl b/stdlib/LinearAlgebra/src/hessenberg.jl index bfe2fdd41aace..ed654c33aba55 100644 --- a/stdlib/LinearAlgebra/src/hessenberg.jl +++ b/stdlib/LinearAlgebra/src/hessenberg.jl @@ -77,6 +77,16 @@ Base.@constprop :aggressive function istriu(A::UpperHessenberg, k::Integer=0) k <= -1 && return true return _istriu(A, k) end +# additional indirection to dispatch to optimized method for banded parents (defined in special.jl) +@inline function _istriu(A::UpperHessenberg, k) + P = parent(A) + m = size(A, 1) + for j in firstindex(P,2):min(m + k - 1, lastindex(P,2)) + Prows = @view P[max(begin, j - k + 1):min(j+1,end), j] + _iszero(Prows) || return false + end + return true +end function Matrix{T}(H::UpperHessenberg) where T m,n = size(H) diff --git a/stdlib/LinearAlgebra/src/special.jl b/stdlib/LinearAlgebra/src/special.jl index 6d25540ee3f07..c61586a810140 100644 --- a/stdlib/LinearAlgebra/src/special.jl +++ b/stdlib/LinearAlgebra/src/special.jl @@ -592,3 +592,4 @@ end # istriu/istril for triangular wrappers of structured matrices _istril(A::LowerTriangular{<:Any, <:BandedMatrix}, k) = istril(parent(A), k) _istriu(A::UpperTriangular{<:Any, <:BandedMatrix}, k) = istriu(parent(A), k) +_istriu(A::UpperHessenberg{<:Any, <:BandedMatrix}, k) = istriu(parent(A), k) diff --git a/stdlib/LinearAlgebra/src/triangular.jl b/stdlib/LinearAlgebra/src/triangular.jl index 49ff5d7f9c3ec..76d97133de796 100644 --- a/stdlib/LinearAlgebra/src/triangular.jl +++ b/stdlib/LinearAlgebra/src/triangular.jl @@ -348,25 +348,29 @@ Base.@constprop :aggressive function istril(A::LowerTriangular, k::Integer=0) k >= 0 && return true return _istril(A, k) end +# additional indirection to dispatch to optimized method for banded parents (defined in special.jl) @inline function _istril(A::LowerTriangular, k) P = parent(A) for j in max(firstindex(P,2), k + 2):lastindex(P,2) - all(iszero, @view(P[j:min(j - k - 1, end), j])) || return false + _iszero(@view P[max(j, begin):min(j - k - 1, end), j]) || return false end return true end + Base.@constprop :aggressive function istriu(A::UpperTriangular, k::Integer=0) k <= 0 && return true return _istriu(A, k) end +# additional indirection to dispatch to optimized method for banded parents (defined in special.jl) @inline function _istriu(A::UpperTriangular, k) P = parent(A) m = size(A, 1) for j in firstindex(P,2):min(m + k - 1, lastindex(P,2)) - all(iszero, @view(P[max(begin, j - k + 1):j, j])) || return false + _iszero(@view P[max(begin, j - k + 1):min(j, end), j]) || return false end return true end + istril(A::Adjoint, k::Integer=0) = istriu(A.parent, -k) istril(A::Transpose, k::Integer=0) = istriu(A.parent, -k) istriu(A::Adjoint, k::Integer=0) = istril(A.parent, -k) diff --git a/stdlib/LinearAlgebra/test/generic.jl b/stdlib/LinearAlgebra/test/generic.jl index 725f9b3497db8..6d11ec824e538 100644 --- a/stdlib/LinearAlgebra/test/generic.jl +++ b/stdlib/LinearAlgebra/test/generic.jl @@ -3,6 +3,8 @@ module TestGeneric using Test, LinearAlgebra, Random +using Test: GenericArray +using LinearAlgebra: isbanded const BASE_TEST_PATH = joinpath(Sys.BINDIR, "..", "share", "julia", "test") @@ -511,56 +513,110 @@ end end @testset "generic functions for checking whether matrices have banded structure" begin - using LinearAlgebra: isbanded pentadiag = [1 2 3; 4 5 6; 7 8 9] tridiag = [1 2 0; 4 5 6; 0 8 9] + tridiagG = GenericArray([1 2 0; 4 5 6; 0 8 9]) + Tridiag = Tridiagonal(tridiag) ubidiag = [1 2 0; 0 5 6; 0 0 9] + ubidiagG = GenericArray([1 2 0; 0 5 6; 0 0 9]) + uBidiag = Bidiagonal(ubidiag, :U) lbidiag = [1 0 0; 4 5 0; 0 8 9] + lbidiagG = GenericArray([1 0 0; 4 5 0; 0 8 9]) + lBidiag = Bidiagonal(lbidiag, :L) adiag = [1 0 0; 0 5 0; 0 0 9] + adiagG = GenericArray([1 0 0; 0 5 0; 0 0 9]) + aDiag = Diagonal(adiag) @testset "istriu" begin @test !istriu(pentadiag) @test istriu(pentadiag, -2) @test !istriu(tridiag) + @test istriu(tridiag) == istriu(tridiagG) == istriu(Tridiag) @test istriu(tridiag, -1) + @test istriu(tridiag, -1) == istriu(tridiagG, -1) == istriu(Tridiag, -1) @test istriu(ubidiag) + @test istriu(ubidiag) == istriu(ubidiagG) == istriu(uBidiag) @test !istriu(ubidiag, 1) + @test istriu(ubidiag, 1) == istriu(ubidiagG, 1) == istriu(uBidiag, 1) @test !istriu(lbidiag) + @test istriu(lbidiag) == istriu(lbidiagG) == istriu(lBidiag) @test istriu(lbidiag, -1) + @test istriu(lbidiag, -1) == istriu(lbidiagG, -1) == istriu(lBidiag, -1) @test istriu(adiag) + @test istriu(adiag) == istriu(adiagG) == istriu(aDiag) end @testset "istril" begin @test !istril(pentadiag) @test istril(pentadiag, 2) @test !istril(tridiag) + @test istril(tridiag) == istril(tridiagG) == istril(Tridiag) @test istril(tridiag, 1) + @test istril(tridiag, 1) == istril(tridiagG, 1) == istril(Tridiag, 1) @test !istril(ubidiag) + @test istril(ubidiag) == istril(ubidiagG) == istril(ubidiagG) @test istril(ubidiag, 1) + @test istril(ubidiag, 1) == istril(ubidiagG, 1) == istril(uBidiag, 1) @test istril(lbidiag) + @test istril(lbidiag) == istril(lbidiagG) == istril(lBidiag) @test !istril(lbidiag, -1) + @test istril(lbidiag, -1) == istril(lbidiagG, -1) == istril(lBidiag, -1) @test istril(adiag) + @test istril(adiag) == istril(adiagG) == istril(aDiag) end @testset "isbanded" begin @test isbanded(pentadiag, -2, 2) @test !isbanded(pentadiag, -1, 2) @test !isbanded(pentadiag, -2, 1) @test isbanded(tridiag, -1, 1) + @test isbanded(tridiag, -1, 1) == isbanded(tridiagG, -1, 1) == isbanded(Tridiag, -1, 1) @test !isbanded(tridiag, 0, 1) + @test isbanded(tridiag, 0, 1) == isbanded(tridiagG, 0, 1) == isbanded(Tridiag, 0, 1) @test !isbanded(tridiag, -1, 0) + @test isbanded(tridiag, -1, 0) == isbanded(tridiagG, -1, 0) == isbanded(Tridiag, -1, 0) @test isbanded(ubidiag, 0, 1) + @test isbanded(ubidiag, 0, 1) == isbanded(ubidiagG, 0, 1) == isbanded(uBidiag, 0, 1) @test !isbanded(ubidiag, 1, 1) + @test isbanded(ubidiag, 1, 1) == isbanded(ubidiagG, 1, 1) == isbanded(uBidiag, 1, 1) @test !isbanded(ubidiag, 0, 0) + @test isbanded(ubidiag, 0, 0) == isbanded(ubidiagG, 0, 0) == isbanded(uBidiag, 0, 0) @test isbanded(lbidiag, -1, 0) + @test isbanded(lbidiag, -1, 0) == isbanded(lbidiagG, -1, 0) == isbanded(lBidiag, -1, 0) @test !isbanded(lbidiag, 0, 0) + @test isbanded(lbidiag, 0, 0) == isbanded(lbidiagG, 0, 0) == isbanded(lBidiag, 0, 0) @test !isbanded(lbidiag, -1, -1) + @test isbanded(lbidiag, -1, -1) == isbanded(lbidiagG, -1, -1) == isbanded(lBidiag, -1, -1) @test isbanded(adiag, 0, 0) + @test isbanded(adiag, 0, 0) == isbanded(adiagG, 0, 0) == isbanded(aDiag, 0, 0) @test !isbanded(adiag, -1, -1) + @test isbanded(adiag, -1, -1) == isbanded(adiagG, -1, -1) == isbanded(aDiag, -1, -1) @test !isbanded(adiag, 1, 1) + @test isbanded(adiag, 1, 1) == isbanded(adiagG, 1, 1) == isbanded(aDiag, 1, 1) end @testset "isdiag" begin @test !isdiag(tridiag) + @test isdiag(tridiag) == isdiag(tridiagG) == isdiag(Tridiag) @test !isdiag(ubidiag) + @test isdiag(ubidiag) == isdiag(ubidiagG) == isdiag(uBidiag) @test !isdiag(lbidiag) + @test isdiag(lbidiag) == isdiag(lbidiagG) == isdiag(lBidiag) @test isdiag(adiag) + @test isdiag(adiag) ==isdiag(adiagG) == isdiag(aDiag) + end +end + +@testset "isbanded/istril/istriu with rectangular matrices" begin + @testset "$(size(A))" for A in [zeros(0,4), zeros(2,5), zeros(5,2), zeros(4,0)] + @testset for m in -(size(A,1)-1):(size(A,2)-1) + A .= 0 + A[diagind(A, m)] .= 1 + G = GenericArray(A) + @testset for (kl,ku) in Iterators.product(-6:6, -6:6) + @test isbanded(A, kl, ku) == isbanded(G, kl, ku) == isempty(A) || (m in (kl:ku)) + end + @testset for k in -6:6 + @test istriu(A,k) == istriu(G,k) == isempty(A) || (k <= m) + @test istril(A,k) == istril(G,k) == isempty(A) || (k >= m) + end + end end end diff --git a/stdlib/LinearAlgebra/test/hessenberg.jl b/stdlib/LinearAlgebra/test/hessenberg.jl index 54dbb70aa2065..de58fea9fb27e 100644 --- a/stdlib/LinearAlgebra/test/hessenberg.jl +++ b/stdlib/LinearAlgebra/test/hessenberg.jl @@ -279,4 +279,30 @@ end @test H.H == D end +@testset "istriu/istril forwards to parent" begin + n = 10 + @testset "$(nameof(typeof(M)))" for M in [Tridiagonal(rand(n-1), rand(n), rand(n-1)), + Tridiagonal(zeros(n-1), zeros(n), zeros(n-1)), + Diagonal(randn(n)), + Diagonal(zeros(n)), + ] + U = UpperHessenberg(M) + A = Array(U) + for k in -n:n + @test istriu(U, k) == istriu(A, k) + @test istril(U, k) == istril(A, k) + end + end + z = zeros(n,n) + P = Matrix{BigFloat}(undef, n, n) + copytrito!(P, z, 'U') + P[diagind(P,-1)] .= 0 + U = UpperHessenberg(P) + A = Array(U) + @testset for k in -n:n + @test istriu(U, k) == istriu(A, k) + @test istril(U, k) == istril(A, k) + end +end + end # module TestHessenberg From ad243681bddad93d4b700d1417e0d45d00a31fc1 Mon Sep 17 00:00:00 2001 From: Michael Abbott <32575566+mcabbott@users.noreply.github.com> Date: Mon, 11 Nov 2024 11:21:59 -0500 Subject: [PATCH 424/537] Tighten typechecking in `repeat` to match what's documented (#55444) --- base/abstractarraymath.jl | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/base/abstractarraymath.jl b/base/abstractarraymath.jl index 0f028a0f66729..54b6d75cee2dc 100644 --- a/base/abstractarraymath.jl +++ b/base/abstractarraymath.jl @@ -518,6 +518,9 @@ function check(arr, inner, outer) # TODO: Currently one based indexing is demanded for inner !== nothing, # but not for outer !== nothing. Decide for something consistent. Base.require_one_based_indexing(arr) + if !all(n -> n isa Integer, inner) + throw(ArgumentError("repeat requires integer counts, got inner = $inner")) + end if any(<(0), inner) throw(ArgumentError("no inner repetition count may be negative; got $inner")) end @@ -526,6 +529,9 @@ function check(arr, inner, outer) end end if outer !== nothing + if !all(n -> n isa Integer, outer) + throw(ArgumentError("repeat requires integer counts, got outer = $outer")) + end if any(<(0), outer) throw(ArgumentError("no outer repetition count may be negative; got $outer")) end From 1e0cee5c8758b0f3165bce4085d450ef44b19494 Mon Sep 17 00:00:00 2001 From: Jishnu Bhattacharya Date: Mon, 11 Nov 2024 22:28:04 +0530 Subject: [PATCH 425/537] Replace `MulAddMul` by `alpha,beta` in `__muldiag` (#56360) This PR replaces `MulAddMul` arguments by `alpha, beta` pairs in the multiplication methods involving `Diagonal` matrices, and constructs the objects exactly where they are required. Such an approach improves latency. ```julia julia> D = Diagonal(1:2000); A = rand(size(D)...); C = similar(A); julia> @time mul!(C, A, D, 1, 2); # first-run latency is reduced 0.129741 seconds (180.18 k allocations: 9.607 MiB, 88.87% compilation time) # nightly v"1.12.0-DEV.1505" 0.083005 seconds (146.68 k allocations: 7.442 MiB, 82.94% compilation time) # this PR julia> @btime mul!($C, $A, $D, 1, 2); # runtime performance is unaffected 4.983 ms (0 allocations: 0 bytes) # nightly 4.938 ms (0 allocations: 0 bytes) # this PR ``` This PR sets the stage for a similar change for `Bidiagonal`/`Tridiaognal` matrices, which would lead to a bigger reduction in latencies. --- stdlib/LinearAlgebra/src/bidiag.jl | 16 +++-- stdlib/LinearAlgebra/src/diagonal.jl | 100 ++++++++++++++------------- 2 files changed, 63 insertions(+), 53 deletions(-) diff --git a/stdlib/LinearAlgebra/src/bidiag.jl b/stdlib/LinearAlgebra/src/bidiag.jl index aefaf16337d83..5b7264558f9ae 100644 --- a/stdlib/LinearAlgebra/src/bidiag.jl +++ b/stdlib/LinearAlgebra/src/bidiag.jl @@ -472,10 +472,14 @@ const BiTri = Union{Bidiagonal,Tridiagonal} @stable_muladdmul _mul!(C, A, B, MulAddMul(alpha, beta)) @inline _mul!(C::AbstractMatrix, A::BandedMatrix, B::AbstractVector, alpha::Number, beta::Number) = @stable_muladdmul _mul!(C, A, B, MulAddMul(alpha, beta)) -@inline _mul!(C::AbstractMatrix, A::BandedMatrix, B::AbstractMatrix, alpha::Number, beta::Number) = - @stable_muladdmul _mul!(C, A, B, MulAddMul(alpha, beta)) -@inline _mul!(C::AbstractMatrix, A::AbstractMatrix, B::BandedMatrix, alpha::Number, beta::Number) = - @stable_muladdmul _mul!(C, A, B, MulAddMul(alpha, beta)) +for T in (:AbstractMatrix, :Diagonal) + @eval begin + @inline _mul!(C::AbstractMatrix, A::BandedMatrix, B::$T, alpha::Number, beta::Number) = + @stable_muladdmul _mul!(C, A, B, MulAddMul(alpha, beta)) + @inline _mul!(C::AbstractMatrix, A::$T, B::BandedMatrix, alpha::Number, beta::Number) = + @stable_muladdmul _mul!(C, A, B, MulAddMul(alpha, beta)) + end +end @inline _mul!(C::AbstractMatrix, A::BandedMatrix, B::BandedMatrix, alpha::Number, beta::Number) = @stable_muladdmul _mul!(C, A, B, MulAddMul(alpha, beta)) @@ -831,6 +835,8 @@ function __bibimul!(C, A::Bidiagonal, B::Bidiagonal, _add) C end +_mul!(C::AbstractMatrix, A::BiTriSym, B::Diagonal, alpha::Number, beta::Number) = + @stable_muladdmul _mul!(C, A, B, MulAddMul(alpha, beta)) function _mul!(C::AbstractMatrix, A::BiTriSym, B::Diagonal, _add::MulAddMul) require_one_based_indexing(C) check_A_mul_B!_sizes(size(C), size(A), size(B)) @@ -1067,6 +1073,8 @@ function _mul!(C::AbstractMatrix, A::AbstractMatrix, B::Bidiagonal, _add::MulAdd C end +_mul!(C::AbstractMatrix, A::Diagonal, B::BiTriSym, alpha::Number, beta::Number) = + @stable_muladdmul _mul!(C, A, B, MulAddMul(alpha, beta)) _mul!(C::AbstractMatrix, A::Diagonal, B::Bidiagonal, _add::MulAddMul) = _dibimul!(C, A, B, _add) _mul!(C::AbstractMatrix, A::Diagonal, B::TriSym, _add::MulAddMul) = diff --git a/stdlib/LinearAlgebra/src/diagonal.jl b/stdlib/LinearAlgebra/src/diagonal.jl index 7594e8bca4f56..243df4d82eec2 100644 --- a/stdlib/LinearAlgebra/src/diagonal.jl +++ b/stdlib/LinearAlgebra/src/diagonal.jl @@ -397,13 +397,13 @@ function lmul!(D::Diagonal, T::Tridiagonal) return T end -@inline function __muldiag_nonzeroalpha!(out, D::Diagonal, B, _add::MulAddMul) +@inline function __muldiag_nonzeroalpha!(out, D::Diagonal, B, alpha::Number, beta::Number) @inbounds for j in axes(B, 2) @simd for i in axes(B, 1) - _modify!(_add, D.diag[i] * B[i,j], out, (i,j)) + @stable_muladdmul _modify!(MulAddMul(alpha,beta), D.diag[i] * B[i,j], out, (i,j)) end end - out + return out end _has_matching_zeros(out::UpperOrUnitUpperTriangular, A::UpperOrUnitUpperTriangular) = true _has_matching_zeros(out::LowerOrUnitLowerTriangular, A::LowerOrUnitLowerTriangular) = true @@ -418,116 +418,118 @@ function _rowrange_tri_stored(B::LowerOrUnitLowerTriangular, col) end _rowrange_tri_zeros(B::UpperOrUnitUpperTriangular, col) = col+1:size(B,1) _rowrange_tri_zeros(B::LowerOrUnitLowerTriangular, col) = 1:col-1 -function __muldiag_nonzeroalpha!(out, D::Diagonal, B::UpperOrLowerTriangular, _add::MulAddMul) +function __muldiag_nonzeroalpha!(out, D::Diagonal, B::UpperOrLowerTriangular, alpha::Number, beta::Number) isunit = B isa UnitUpperOrUnitLowerTriangular out_maybeparent, B_maybeparent = _has_matching_zeros(out, B) ? (parent(out), parent(B)) : (out, B) for j in axes(B, 2) # store the diagonal separately for unit triangular matrices if isunit - @inbounds _modify!(_add, D.diag[j] * B[j,j], out, (j,j)) + @inbounds @stable_muladdmul _modify!(MulAddMul(alpha,beta), D.diag[j] * B[j,j], out, (j,j)) end # The indices of out corresponding to the stored indices of B rowrange = _rowrange_tri_stored(B, j) @inbounds @simd for i in rowrange - _modify!(_add, D.diag[i] * B_maybeparent[i,j], out_maybeparent, (i,j)) + @stable_muladdmul _modify!(MulAddMul(alpha,beta), D.diag[i] * B_maybeparent[i,j], out_maybeparent, (i,j)) end # Fill the indices of out corresponding to the zeros of B # we only fill these if out and B don't have matching zeros if !_has_matching_zeros(out, B) rowrange = _rowrange_tri_zeros(B, j) @inbounds @simd for i in rowrange - _modify!(_add, D.diag[i] * B[i,j], out, (i,j)) + @stable_muladdmul _modify!(MulAddMul(alpha,beta), D.diag[i] * B[i,j], out, (i,j)) end end end return out end -@inline function __muldiag_nonzeroalpha!(out, A, D::Diagonal, _add::MulAddMul{ais1,bis0}) where {ais1,bis0} - beta = _add.beta - _add_aisone = MulAddMul{true,bis0,Bool,typeof(beta)}(true, beta) +@inline function __muldiag_nonzeroalpha_right!(out, A, D::Diagonal, alpha::Number, beta::Number) @inbounds for j in axes(A, 2) - dja = _add(D.diag[j]) + dja = @stable_muladdmul MulAddMul(alpha,false)(D.diag[j]) @simd for i in axes(A, 1) - _modify!(_add_aisone, A[i,j] * dja, out, (i,j)) + @stable_muladdmul _modify!(MulAddMul(true,beta), A[i,j] * dja, out, (i,j)) end end - out + return out +end + +function __muldiag_nonzeroalpha!(out, A, D::Diagonal, alpha::Number, beta::Number) + __muldiag_nonzeroalpha_right!(out, A, D, alpha, beta) end -function __muldiag_nonzeroalpha!(out, A::UpperOrLowerTriangular, D::Diagonal, _add::MulAddMul{ais1,bis0}) where {ais1,bis0} +function __muldiag_nonzeroalpha!(out, A::UpperOrLowerTriangular, D::Diagonal, alpha::Number, beta::Number) isunit = A isa UnitUpperOrUnitLowerTriangular - beta = _add.beta - # since alpha is multiplied to the diagonal element of D, - # we may skip alpha in the second multiplication by setting ais1 to true - _add_aisone = MulAddMul{true,bis0,Bool,typeof(beta)}(true, beta) # if both A and out have the same upper/lower triangular structure, # we may directly read and write from the parents - out_maybeparent, A_maybeparent = _has_matching_zeros(out, A) ? (parent(out), parent(A)) : (out, A) + out_maybeparent, A_maybeparent = _has_matching_zeros(out, A) ? (parent(out), parent(A)) : (out, A) for j in axes(A, 2) - dja = _add(@inbounds D.diag[j]) + dja = @stable_muladdmul MulAddMul(alpha,false)(@inbounds D.diag[j]) # store the diagonal separately for unit triangular matrices if isunit - @inbounds _modify!(_add_aisone, A[j,j] * dja, out, (j,j)) + # since alpha is multiplied to the diagonal element of D, + # we may skip alpha in the second multiplication by setting ais1 to true + @inbounds @stable_muladdmul _modify!(MulAddMul(true,beta), A[j,j] * dja, out, (j,j)) end # indices of out corresponding to the stored indices of A rowrange = _rowrange_tri_stored(A, j) @inbounds @simd for i in rowrange - _modify!(_add_aisone, A_maybeparent[i,j] * dja, out_maybeparent, (i,j)) + # since alpha is multiplied to the diagonal element of D, + # we may skip alpha in the second multiplication by setting ais1 to true + @stable_muladdmul _modify!(MulAddMul(true,beta), A_maybeparent[i,j] * dja, out_maybeparent, (i,j)) end # Fill the indices of out corresponding to the zeros of A # we only fill these if out and A don't have matching zeros if !_has_matching_zeros(out, A) rowrange = _rowrange_tri_zeros(A, j) @inbounds @simd for i in rowrange - _modify!(_add_aisone, A[i,j] * dja, out, (i,j)) + @stable_muladdmul _modify!(MulAddMul(true,beta), A[i,j] * dja, out, (i,j)) end end end - out + return out +end + +# ambiguity resolution +function __muldiag_nonzeroalpha!(out, D1::Diagonal, D2::Diagonal, alpha::Number, beta::Number) + __muldiag_nonzeroalpha_right!(out, D1, D2, alpha, beta) end -@inline function __muldiag_nonzeroalpha!(out::Diagonal, D1::Diagonal, D2::Diagonal, _add::MulAddMul) +@inline function __muldiag_nonzeroalpha!(out::Diagonal, D1::Diagonal, D2::Diagonal, alpha::Number, beta::Number) d1 = D1.diag d2 = D2.diag outd = out.diag @inbounds @simd for i in eachindex(d1, d2, outd) - _modify!(_add, d1[i] * d2[i], outd, i) + @stable_muladdmul _modify!(MulAddMul(alpha,beta), d1[i] * d2[i], outd, i) end - out -end - -# ambiguity resolution -@inline function __muldiag_nonzeroalpha!(out, D1::Diagonal, D2::Diagonal, _add::MulAddMul) - @inbounds for j in axes(D2, 2), i in axes(D2, 1) - _modify!(_add, D1.diag[i] * D2[i,j], out, (i,j)) - end - out + return out end -# muldiag mainly handles the zero-alpha case, so that we need only +# muldiag handles the zero-alpha case, so that we need only # specialize the non-trivial case -function _mul_diag!(out, A, B, _add) +function _mul_diag!(out, A, B, alpha, beta) require_one_based_indexing(out, A, B) _muldiag_size_check(size(out), size(A), size(B)) - alpha, beta = _add.alpha, _add.beta if iszero(alpha) _rmul_or_fill!(out, beta) else - __muldiag_nonzeroalpha!(out, A, B, _add) + __muldiag_nonzeroalpha!(out, A, B, alpha, beta) end return out end -_mul!(out::AbstractVecOrMat, D::Diagonal, V::AbstractVector, _add) = - _mul_diag!(out, D, V, _add) -_mul!(out::AbstractMatrix, D::Diagonal, B::AbstractMatrix, _add) = - _mul_diag!(out, D, B, _add) -_mul!(out::AbstractMatrix, A::AbstractMatrix, D::Diagonal, _add) = - _mul_diag!(out, A, D, _add) -_mul!(C::Diagonal, Da::Diagonal, Db::Diagonal, _add) = - _mul_diag!(C, Da, Db, _add) -_mul!(C::AbstractMatrix, Da::Diagonal, Db::Diagonal, _add) = - _mul_diag!(C, Da, Db, _add) +_mul!(out::AbstractVector, D::Diagonal, V::AbstractVector, alpha::Number, beta::Number) = + _mul_diag!(out, D, V, alpha, beta) +_mul!(out::AbstractMatrix, D::Diagonal, V::AbstractVector, alpha::Number, beta::Number) = + _mul_diag!(out, D, V, alpha, beta) +for MT in (:AbstractMatrix, :AbstractTriangular) + @eval begin + _mul!(out::AbstractMatrix, D::Diagonal, B::$MT, alpha::Number, beta::Number) = + _mul_diag!(out, D, B, alpha, beta) + _mul!(out::AbstractMatrix, A::$MT, D::Diagonal, alpha::Number, beta::Number) = + _mul_diag!(out, A, D, alpha, beta) + end +end +_mul!(C::AbstractMatrix, Da::Diagonal, Db::Diagonal, alpha::Number, beta::Number) = + _mul_diag!(C, Da, Db, alpha, beta) function (*)(Da::Diagonal, A::AbstractMatrix, Db::Diagonal) _muldiag_size_check(size(Da), size(A)) From c4802e14406d42f1c1d6c0c6ea424ec4b7049e07 Mon Sep 17 00:00:00 2001 From: DilumAluthgeBot <43731525+DilumAluthgeBot@users.noreply.github.com> Date: Mon, 11 Nov 2024 20:20:31 -0500 Subject: [PATCH 426/537] =?UTF-8?q?=F0=9F=A4=96=20[master]=20Bump=20the=20?= =?UTF-8?q?SparseArrays=20stdlib=20from=200dd8d45=20to=2014333ea=20(#56531?= =?UTF-8?q?)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Stdlib: SparseArrays URL: https://github.com/JuliaSparse/SparseArrays.jl.git Stdlib branch: main Julia branch: master Old commit: 0dd8d45 New commit: 14333ea Julia version: 1.12.0-DEV SparseArrays version: 1.12.0 Bump invoked by: @ViralBShah Powered by: [BumpStdlibs.jl](https://github.com/JuliaLang/BumpStdlibs.jl) Diff: https://github.com/JuliaSparse/SparseArrays.jl/compare/0dd8d45d55b305458d0d3d3451057589b684f72f...14333eae647464121150ae77d9f2dbe673aa244b ``` $ git log --oneline 0dd8d45..14333ea 14333ea Break recursion (#579) 07cf4a6 Update ci.yml (#578) 33491e0 added diagonal-sparse multiplication (#564) 8f02b7f doc: move solvers doc to `src\solvers.md` (#576) 485fd4b Inline sparse-times-dense in-place multiplication (#567) f10d4da added specialized method for 3-argument dot with diagonal matrix (#565) 70c06b1 Diagonal-sandwiched triple product for SparseMatrixCSC (#562) 313a04f Change default QR tolerance to match SPQR (#557) 81d49e9 Update ci.yml (#558) ``` Co-authored-by: Dilum Aluthge --- .../md5 | 1 - .../sha512 | 1 - .../md5 | 1 + .../sha512 | 1 + stdlib/SparseArrays.version | 2 +- 5 files changed, 3 insertions(+), 3 deletions(-) delete mode 100644 deps/checksums/SparseArrays-0dd8d45d55b305458d0d3d3451057589b684f72f.tar.gz/md5 delete mode 100644 deps/checksums/SparseArrays-0dd8d45d55b305458d0d3d3451057589b684f72f.tar.gz/sha512 create mode 100644 deps/checksums/SparseArrays-14333eae647464121150ae77d9f2dbe673aa244b.tar.gz/md5 create mode 100644 deps/checksums/SparseArrays-14333eae647464121150ae77d9f2dbe673aa244b.tar.gz/sha512 diff --git a/deps/checksums/SparseArrays-0dd8d45d55b305458d0d3d3451057589b684f72f.tar.gz/md5 b/deps/checksums/SparseArrays-0dd8d45d55b305458d0d3d3451057589b684f72f.tar.gz/md5 deleted file mode 100644 index 7182cc71f7b35..0000000000000 --- a/deps/checksums/SparseArrays-0dd8d45d55b305458d0d3d3451057589b684f72f.tar.gz/md5 +++ /dev/null @@ -1 +0,0 @@ -2db86c7030acc973d5b46a87f32f7e99 diff --git a/deps/checksums/SparseArrays-0dd8d45d55b305458d0d3d3451057589b684f72f.tar.gz/sha512 b/deps/checksums/SparseArrays-0dd8d45d55b305458d0d3d3451057589b684f72f.tar.gz/sha512 deleted file mode 100644 index a9e18eac9bfaa..0000000000000 --- a/deps/checksums/SparseArrays-0dd8d45d55b305458d0d3d3451057589b684f72f.tar.gz/sha512 +++ /dev/null @@ -1 +0,0 @@ -0d3f54e7e75b48966e1816608d6ddf62175b92a0c778813a562df20750c6ecef9e4ccc24f9f3fffe4051d4b6765332add8c289fcdc598c320f400cec57a223a3 diff --git a/deps/checksums/SparseArrays-14333eae647464121150ae77d9f2dbe673aa244b.tar.gz/md5 b/deps/checksums/SparseArrays-14333eae647464121150ae77d9f2dbe673aa244b.tar.gz/md5 new file mode 100644 index 0000000000000..70a9d57cb6e13 --- /dev/null +++ b/deps/checksums/SparseArrays-14333eae647464121150ae77d9f2dbe673aa244b.tar.gz/md5 @@ -0,0 +1 @@ +28f61ce3c94e2b5a795f077779ba80d3 diff --git a/deps/checksums/SparseArrays-14333eae647464121150ae77d9f2dbe673aa244b.tar.gz/sha512 b/deps/checksums/SparseArrays-14333eae647464121150ae77d9f2dbe673aa244b.tar.gz/sha512 new file mode 100644 index 0000000000000..f432dbedd64e6 --- /dev/null +++ b/deps/checksums/SparseArrays-14333eae647464121150ae77d9f2dbe673aa244b.tar.gz/sha512 @@ -0,0 +1 @@ +27d8de35f1e821bd6512ad46d8804719b2f1822d80e3b9ee19aae21efc0bd562d3814cf41b08dfd71df0fd7daabb11959a6d25045cde09c7385aaf52e0befdfe diff --git a/stdlib/SparseArrays.version b/stdlib/SparseArrays.version index 019306a3e9f65..9a738d89215b5 100644 --- a/stdlib/SparseArrays.version +++ b/stdlib/SparseArrays.version @@ -1,4 +1,4 @@ SPARSEARRAYS_BRANCH = main -SPARSEARRAYS_SHA1 = 0dd8d45d55b305458d0d3d3451057589b684f72f +SPARSEARRAYS_SHA1 = 14333eae647464121150ae77d9f2dbe673aa244b SPARSEARRAYS_GIT_URL := https://github.com/JuliaSparse/SparseArrays.jl.git SPARSEARRAYS_TAR_URL = https://api.github.com/repos/JuliaSparse/SparseArrays.jl/tarball/$1 From 366a38e6ed694e807e80b67676acf7cd50395891 Mon Sep 17 00:00:00 2001 From: Shuhei Kadowaki <40514306+aviatesk@users.noreply.github.com> Date: Tue, 12 Nov 2024 10:41:17 +0900 Subject: [PATCH 427/537] simplifies the definitions of `@test_[no]warn` (#56525) Since the expressions generated by those macros are almost identical, the implementation could be changed to use a common helper function to create expressions for each case. --- stdlib/Test/src/Test.jl | 72 ++++++++++++++++------------------------- 1 file changed, 27 insertions(+), 45 deletions(-) diff --git a/stdlib/Test/src/Test.jl b/stdlib/Test/src/Test.jl index e8c7d49d076aa..1b9505c59e327 100644 --- a/stdlib/Test/src/Test.jl +++ b/stdlib/Test/src/Test.jl @@ -887,28 +887,7 @@ Note: Warnings generated by `@warn` cannot be tested with this macro. Use [`@test_logs`](@ref) instead. """ macro test_warn(msg, expr) - quote - let fname = tempname() - try - f = open(fname, "w") - stdold = stderr - redirect_stderr(f) - ret = try - # We deliberately don't use the thunk versions of open/redirect - # to ensure that adding the macro does not change the toplevel-ness - # of the resulting expression. - $(esc(expr)) - finally - redirect_stderr(stdold) - close(f) - end - @test contains_warn(read(fname, String), $(esc(msg))) - ret - finally - rm(fname, force=true) - end - end - end + test_warn_expr(expr, msg) end """ @@ -921,32 +900,35 @@ Note: The absence of warnings generated by `@warn` cannot be tested with this macro. Use [`@test_logs`](@ref) instead. """ macro test_nowarn(expr) - quote - # Duplicate some code from `@test_warn` to allow printing the content of - # `stderr` again to `stderr` here while suppressing it for `@test_warn`. - # If that shouldn't be used, it would be possible to just use - # @test_warn isempty $(esc(expr)) - # here. - let fname = tempname() - try - f = open(fname, "w") - stdold = stderr - redirect_stderr(f) - ret = try - $(esc(expr)) - finally - redirect_stderr(stdold) - close(f) - end - stderr_content = read(fname, String) - print(stderr, stderr_content) # this is helpful for debugging - @test isempty(stderr_content) - ret + # allow printing the content of `stderr` again to `stderr` here while suppressing it + # for `@test_warn`. If that shouldn't be used, this could just be `test_warn_expr(expr, #=msg=#isempty)` + test_warn_expr(expr, function (s) + print(stderr, s) # this is helpful for debugging + isempty(s) + end) +end + +function test_warn_expr(@nospecialize(expr), @nospecialize(msg)) + return :(let fname = tempname() + try + f = open(fname, "w") + stdold = stderr + redirect_stderr(f) + ret = try + # We deliberately don't use the thunk versions of open/redirect + # to ensure that adding the macro does not change the toplevel-ness + # of the resulting expression. + $(esc(expr)) finally - rm(fname, force=true) + redirect_stderr(stdold) + close(f) end + @test contains_warn(read(fname, String), $(esc(msg))) + ret + finally + rm(fname, force=true) end - end + end) end #----------------------------------------------------------------------- From 001c666086de77101b6937c3d31f0888a35325db Mon Sep 17 00:00:00 2001 From: Jameson Nash Date: Mon, 11 Nov 2024 20:58:30 -0500 Subject: [PATCH 428/537] infer_compilation_signatures for more cases (#56495) This seems to have negligible impact on size or performance, but makes code quality much better. --- Compiler/src/abstractinterpretation.jl | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/Compiler/src/abstractinterpretation.jl b/Compiler/src/abstractinterpretation.jl index d9319c02b110a..edeb5d805b3d5 100644 --- a/Compiler/src/abstractinterpretation.jl +++ b/Compiler/src/abstractinterpretation.jl @@ -207,10 +207,9 @@ function abstract_call_gf_by_type(interp::AbstractInterpreter, @nospecialize(f), rettype = from_interprocedural!(interp, rettype, sv, arginfo, conditionals) # Also considering inferring the compilation signature for this method, so - # it is available to the compiler in case it ends up needing it. + # it is available to the compiler in case it ends up needing it for the invoke. if (isa(sv, InferenceState) && infer_compilation_signature(interp) && - (seenall && 1 == napplicable) && rettype !== Any && rettype !== Bottom && - !is_removable_if_unused(all_effects)) + (seenall && 1 == napplicable) && !is_removable_if_unused(all_effects)) (; match) = applicable[1] method = match.method sig = match.spec_types From 45c5c9b492c9b99c52a386153fa2c0dd5ce6c4ab Mon Sep 17 00:00:00 2001 From: Keno Fischer Date: Tue, 12 Nov 2024 15:56:57 -0500 Subject: [PATCH 429/537] Move `compiler` tests to `Compiler` package (#56522) This does not yet make the compiler tests independently runnable using `] test Compiler`; it only moves the files and wires them up to continue running as part of the Base test runner. --------- Co-authored-by: Shuhei Kadowaki --- Compiler/src/optimize.jl | 2 +- .../{EscapeAnalysis => }/EscapeAnalysis.jl | 2 +- .../{EscapeAnalysis => }/disjoint_set.jl | 0 Compiler/src/ssair/heap.jl | 3 -- .../test}/AbstractInterpreter.jl | 0 .../test}/EAUtils.jl | 0 .../test}/EscapeAnalysis.jl | 6 ++-- {test/compiler => Compiler/test}/codegen.jl | 0 {test/compiler => Compiler/test}/compact.jl | 0 .../compiler => Compiler/test}/contextual.jl | 0 .../test}/datastructures.jl | 0 {test/compiler => Compiler/test}/effects.jl | 0 {test/compiler => Compiler/test}/inference.jl | 0 {test/compiler => Compiler/test}/inline.jl | 2 +- .../test}/interpreter_exec.jl | 0 .../test}/invalidation.jl | 0 {test/compiler => Compiler/test}/irpasses.jl | 2 +- {test/compiler => Compiler/test}/irutils.jl | 0 {test/compiler => Compiler/test}/newinterp.jl | 0 Compiler/test/runtests.jl | 6 ++++ {test/compiler => Compiler/test}/ssair.jl | 0 {test/compiler => Compiler/test}/tarjan.jl | 0 Compiler/test/testgroups | 16 ++++++++++ .../compiler => Compiler/test}/validation.jl | 0 doc/src/devdocs/EscapeAnalysis.md | 4 +-- test/Makefile | 2 +- test/abstractarray.jl | 4 +-- test/choosetests.jl | 30 ++++++++++++------- test/interpreter.jl | 4 ++- test/operators.jl | 2 +- test/precompile_absint1.jl | 4 +-- test/precompile_absint2.jl | 4 +-- test/reflection.jl | 2 +- test/runtests.jl | 13 -------- test/scopedvalues.jl | 2 +- 35 files changed, 64 insertions(+), 46 deletions(-) rename Compiler/src/ssair/{EscapeAnalysis => }/EscapeAnalysis.jl (99%) rename Compiler/src/ssair/{EscapeAnalysis => }/disjoint_set.jl (100%) rename {test/compiler => Compiler/test}/AbstractInterpreter.jl (100%) rename {test/compiler/EscapeAnalysis => Compiler/test}/EAUtils.jl (100%) rename {test/compiler/EscapeAnalysis => Compiler/test}/EscapeAnalysis.jl (99%) rename {test/compiler => Compiler/test}/codegen.jl (100%) rename {test/compiler => Compiler/test}/compact.jl (100%) rename {test/compiler => Compiler/test}/contextual.jl (100%) rename {test/compiler => Compiler/test}/datastructures.jl (100%) rename {test/compiler => Compiler/test}/effects.jl (100%) rename {test/compiler => Compiler/test}/inference.jl (100%) rename {test/compiler => Compiler/test}/inline.jl (99%) rename {test/compiler => Compiler/test}/interpreter_exec.jl (100%) rename {test/compiler => Compiler/test}/invalidation.jl (100%) rename {test/compiler => Compiler/test}/irpasses.jl (99%) rename {test/compiler => Compiler/test}/irutils.jl (100%) rename {test/compiler => Compiler/test}/newinterp.jl (100%) create mode 100644 Compiler/test/runtests.jl rename {test/compiler => Compiler/test}/ssair.jl (100%) rename {test/compiler => Compiler/test}/tarjan.jl (100%) create mode 100644 Compiler/test/testgroups rename {test/compiler => Compiler/test}/validation.jl (100%) diff --git a/Compiler/src/optimize.jl b/Compiler/src/optimize.jl index 8cdd56f4c1a76..6de8973778c94 100644 --- a/Compiler/src/optimize.jl +++ b/Compiler/src/optimize.jl @@ -217,7 +217,7 @@ include("ssair/slot2ssa.jl") include("ssair/inlining.jl") include("ssair/verify.jl") include("ssair/legacy.jl") -include("ssair/EscapeAnalysis/EscapeAnalysis.jl") +include("ssair/EscapeAnalysis.jl") include("ssair/passes.jl") include("ssair/irinterp.jl") diff --git a/Compiler/src/ssair/EscapeAnalysis/EscapeAnalysis.jl b/Compiler/src/ssair/EscapeAnalysis.jl similarity index 99% rename from Compiler/src/ssair/EscapeAnalysis/EscapeAnalysis.jl rename to Compiler/src/ssair/EscapeAnalysis.jl index c3b1a8b641af4..648d9f4621578 100644 --- a/Compiler/src/ssair/EscapeAnalysis/EscapeAnalysis.jl +++ b/Compiler/src/ssair/EscapeAnalysis.jl @@ -33,7 +33,7 @@ using ..Compiler: # Core.Compiler specific definitions function include(x) if !isdefined(_TOP_MOD.Base, :end_base_include) # During bootstrap, all includes are relative to `base/` - x = ccall(:jl_prepend_string, Ref{String}, (Any, Any), "ssair/EscapeAnalysis/", x) + x = ccall(:jl_prepend_string, Ref{String}, (Any, Any), "ssair/", x) end _TOP_MOD.include(@__MODULE__, x) end diff --git a/Compiler/src/ssair/EscapeAnalysis/disjoint_set.jl b/Compiler/src/ssair/disjoint_set.jl similarity index 100% rename from Compiler/src/ssair/EscapeAnalysis/disjoint_set.jl rename to Compiler/src/ssair/disjoint_set.jl diff --git a/Compiler/src/ssair/heap.jl b/Compiler/src/ssair/heap.jl index 6e9883bc4ec60..1afb4eb5b2ffc 100644 --- a/Compiler/src/ssair/heap.jl +++ b/Compiler/src/ssair/heap.jl @@ -3,13 +3,11 @@ # Heap operations on flat vectors # ------------------------------- - # Binary heap indexing heapleft(i::Integer) = 2i heapright(i::Integer) = 2i + 1 heapparent(i::Integer) = div(i, 2) - # Binary min-heap percolate down. function percolate_down!(xs::Vector, i::Integer, x, o::Ordering, len::Integer=length(xs)) @inbounds while (l = heapleft(i)) <= len @@ -60,7 +58,6 @@ function heappush!(xs::Vector, x, o::Ordering) return xs end - """ heapify!(v, ord::Ordering) diff --git a/test/compiler/AbstractInterpreter.jl b/Compiler/test/AbstractInterpreter.jl similarity index 100% rename from test/compiler/AbstractInterpreter.jl rename to Compiler/test/AbstractInterpreter.jl diff --git a/test/compiler/EscapeAnalysis/EAUtils.jl b/Compiler/test/EAUtils.jl similarity index 100% rename from test/compiler/EscapeAnalysis/EAUtils.jl rename to Compiler/test/EAUtils.jl diff --git a/test/compiler/EscapeAnalysis/EscapeAnalysis.jl b/Compiler/test/EscapeAnalysis.jl similarity index 99% rename from test/compiler/EscapeAnalysis/EscapeAnalysis.jl rename to Compiler/test/EscapeAnalysis.jl index 4799fe4cee5ca..2d9090263fafa 100644 --- a/test/compiler/EscapeAnalysis/EscapeAnalysis.jl +++ b/Compiler/test/EscapeAnalysis.jl @@ -1,15 +1,15 @@ module test_EA -const use_core_compiler = true +global use_core_compiler::Bool = true if use_core_compiler const EscapeAnalysis = Core.Compiler.EscapeAnalysis else - include(normpath(Sys.BINDIR, "..", "..", "base", "compiler", "ssair", "EscapeAnalysis", "EscapeAnalysis.jl")) + include(normpath(Sys.BINDIR, "..", "..", "Compiler", "src", "ssair", "EscapeAnalysis.jl")) end include("EAUtils.jl") -include("../irutils.jl") +include("irutils.jl") using Test, .EscapeAnalysis, .EAUtils using .EscapeAnalysis: ignore_argescape diff --git a/test/compiler/codegen.jl b/Compiler/test/codegen.jl similarity index 100% rename from test/compiler/codegen.jl rename to Compiler/test/codegen.jl diff --git a/test/compiler/compact.jl b/Compiler/test/compact.jl similarity index 100% rename from test/compiler/compact.jl rename to Compiler/test/compact.jl diff --git a/test/compiler/contextual.jl b/Compiler/test/contextual.jl similarity index 100% rename from test/compiler/contextual.jl rename to Compiler/test/contextual.jl diff --git a/test/compiler/datastructures.jl b/Compiler/test/datastructures.jl similarity index 100% rename from test/compiler/datastructures.jl rename to Compiler/test/datastructures.jl diff --git a/test/compiler/effects.jl b/Compiler/test/effects.jl similarity index 100% rename from test/compiler/effects.jl rename to Compiler/test/effects.jl diff --git a/test/compiler/inference.jl b/Compiler/test/inference.jl similarity index 100% rename from test/compiler/inference.jl rename to Compiler/test/inference.jl diff --git a/test/compiler/inline.jl b/Compiler/test/inline.jl similarity index 99% rename from test/compiler/inline.jl rename to Compiler/test/inline.jl index 9895471ab1b27..9d828fb7a4cfd 100644 --- a/test/compiler/inline.jl +++ b/Compiler/test/inline.jl @@ -254,7 +254,7 @@ let code = code_typed(f_pointerref, Tuple{Type{Int}})[1][1].code @test !any_ptrref end -# Test that inlining can inline _applys of builtins/_applys on SimpleVectors +# Test that inlining can inline _apply_iterate of builtins/_apply_iterate on SimpleVectors function foo_apply_apply_type_svec() A = (Tuple, Float32) B = Tuple{Float32, Float32} diff --git a/test/compiler/interpreter_exec.jl b/Compiler/test/interpreter_exec.jl similarity index 100% rename from test/compiler/interpreter_exec.jl rename to Compiler/test/interpreter_exec.jl diff --git a/test/compiler/invalidation.jl b/Compiler/test/invalidation.jl similarity index 100% rename from test/compiler/invalidation.jl rename to Compiler/test/invalidation.jl diff --git a/test/compiler/irpasses.jl b/Compiler/test/irpasses.jl similarity index 99% rename from test/compiler/irpasses.jl rename to Compiler/test/irpasses.jl index 13ef05db2f23a..b770b7373b5bc 100644 --- a/test/compiler/irpasses.jl +++ b/Compiler/test/irpasses.jl @@ -436,7 +436,7 @@ let src = code_typed1() do @test count(isnew, src.code) == 1 end -# should eliminate allocation whose address isn't taked even if it has uninitialized field(s) +# should eliminate allocation whose address isn't taken even if it has uninitialized field(s) mutable struct BadRef x::String y::String diff --git a/test/compiler/irutils.jl b/Compiler/test/irutils.jl similarity index 100% rename from test/compiler/irutils.jl rename to Compiler/test/irutils.jl diff --git a/test/compiler/newinterp.jl b/Compiler/test/newinterp.jl similarity index 100% rename from test/compiler/newinterp.jl rename to Compiler/test/newinterp.jl diff --git a/Compiler/test/runtests.jl b/Compiler/test/runtests.jl new file mode 100644 index 0000000000000..10e613c8f52af --- /dev/null +++ b/Compiler/test/runtests.jl @@ -0,0 +1,6 @@ +# This file is a part of Julia. License is MIT: https://julialang.org/license +using Test, Compiler + +for file in readlines(joinpath(@__DIR__, "testgroups")) + include(file * ".jl") +end diff --git a/test/compiler/ssair.jl b/Compiler/test/ssair.jl similarity index 100% rename from test/compiler/ssair.jl rename to Compiler/test/ssair.jl diff --git a/test/compiler/tarjan.jl b/Compiler/test/tarjan.jl similarity index 100% rename from test/compiler/tarjan.jl rename to Compiler/test/tarjan.jl diff --git a/Compiler/test/testgroups b/Compiler/test/testgroups new file mode 100644 index 0000000000000..44e9b388f4821 --- /dev/null +++ b/Compiler/test/testgroups @@ -0,0 +1,16 @@ +AbstractInterpreter +EscapeAnalysis +codegen +compact +contextual +datastructures +effects +inference +inline +interpreter_exec +invalidation +irpasses +newinterp +ssair +tarjan +validation diff --git a/test/compiler/validation.jl b/Compiler/test/validation.jl similarity index 100% rename from test/compiler/validation.jl rename to Compiler/test/validation.jl diff --git a/doc/src/devdocs/EscapeAnalysis.md b/doc/src/devdocs/EscapeAnalysis.md index ea874bf7371b0..815b9857f1674 100644 --- a/doc/src/devdocs/EscapeAnalysis.md +++ b/doc/src/devdocs/EscapeAnalysis.md @@ -22,10 +22,10 @@ defines the convenience entries `code_escapes` and `@code_escapes` for testing a ```@repl EAUtils let JULIA_DIR = normpath(Sys.BINDIR, "..", "share", "julia") # load `EscapeAnalysis` module to define the core analysis code - include(normpath(JULIA_DIR, "base", "compiler", "ssair", "EscapeAnalysis", "EscapeAnalysis.jl")) + include(normpath(JULIA_DIR, "Compiler", "src", "ssair", "EscapeAnalysis.jl")) using .EscapeAnalysis # load `EAUtils` module to define the utilities - include(normpath(JULIA_DIR, "test", "compiler", "EscapeAnalysis", "EAUtils.jl")) + include(normpath(JULIA_DIR, "Compiler", "test", "EAUtils.jl")) using .EAUtils end diff --git a/test/Makefile b/test/Makefile index 6ebdd3c764fd5..9b151cd213274 100644 --- a/test/Makefile +++ b/test/Makefile @@ -11,7 +11,7 @@ export JULIA_LOAD_PATH := @$(PATHSEP)@stdlib unexport JULIA_PROJECT := unexport JULIA_BINDIR := -TESTGROUPS = unicode strings compiler +TESTGROUPS = unicode strings compiler Compiler TESTS = all default stdlib $(TESTGROUPS) \ $(patsubst $(STDLIBDIR)/%/,%,$(dir $(wildcard $(STDLIBDIR)/*/.))) \ $(filter-out runtests testdefs relocatedepot, \ diff --git a/test/abstractarray.jl b/test/abstractarray.jl index 16b973544801a..2a2ec8e8e432c 100644 --- a/test/abstractarray.jl +++ b/test/abstractarray.jl @@ -2,7 +2,7 @@ using Random, LinearAlgebra -include("compiler/irutils.jl") +include(joinpath(@__DIR__,"../Compiler/test/irutils.jl")) isdefined(Main, :InfiniteArrays) || @eval Main include("testhelpers/InfiniteArrays.jl") using .Main.InfiniteArrays @@ -1885,7 +1885,7 @@ end end module IRUtils - include("compiler/irutils.jl") + include(joinpath(@__DIR__,"../Compiler/test/irutils.jl")) end function check_pointer_strides(A::AbstractArray) diff --git a/test/choosetests.jl b/test/choosetests.jl index affdee412bd86..ec757f42b42c1 100644 --- a/test/choosetests.jl +++ b/test/choosetests.jl @@ -45,6 +45,22 @@ const INTERNET_REQUIRED_LIST = [ const NETWORK_REQUIRED_LIST = vcat(INTERNET_REQUIRED_LIST, ["Sockets"]) +function test_path(test) + t = split(test, '/') + if t[1] in STDLIBS + if length(t) == 2 + return joinpath(STDLIB_DIR, t[1], "test", t[2]) + else + return joinpath(STDLIB_DIR, t[1], "test", "runtests") + end + elseif t[1] == "Compiler" + testpath = length(t) >= 2 ? t[2:end] : ("runtests",) + return joinpath(@__DIR__, "..", t[1], "test", testpath...) + else + return joinpath(@__DIR__, test) + end +end + """ `(; tests, net_on, exit_on_error, seed) = choosetests(choices)` selects a set of tests to be run. `choices` should be a vector of test names; if empty or set to @@ -154,13 +170,7 @@ function choosetests(choices = []) "strings/io", "strings/types", "strings/annotated"]) # do subarray before sparse but after linalg filtertests!(tests, "subarray") - filtertests!(tests, "compiler", [ - "compiler/datastructures", "compiler/inference", "compiler/effects", "compiler/compact", - "compiler/validation", "compiler/ssair", "compiler/irpasses", "compiler/tarjan", - "compiler/codegen", "compiler/inline", "compiler/contextual", "compiler/invalidation", - "compiler/AbstractInterpreter", "compiler/EscapeAnalysis/EscapeAnalysis"]) - filtertests!(tests, "compiler/EscapeAnalysis", [ - "compiler/EscapeAnalysis/EscapeAnalysis"]) + filtertests!(tests, "compiler", ["Compiler"]) filtertests!(tests, "stdlib", STDLIBS) filtertests!(tests, "internet_required", INTERNET_REQUIRED_LIST) # do ambiguous first to avoid failing if ambiguities are introduced by other tests @@ -207,8 +217,8 @@ function choosetests(choices = []) new_tests = String[] for test in tests - if test in STDLIBS - testfile = joinpath(STDLIB_DIR, test, "test", "testgroups") + if test in STDLIBS || test == "Compiler" + testfile = test_path("$test/testgroups") if isfile(testfile) testgroups = readlines(testfile) length(testgroups) == 0 && error("no testgroups defined for $test") @@ -218,7 +228,7 @@ function choosetests(choices = []) end end end - filter!(x -> (x != "stdlib" && !(x in STDLIBS)) , tests) + filter!(x -> (x != "stdlib" && !(x in STDLIBS) && x != "Compiler") , tests) append!(tests, new_tests) requested_all || explicit_pkg || filter!(x -> x != "Pkg", tests) diff --git a/test/interpreter.jl b/test/interpreter.jl index e25b5f0c8511a..012a0f7fe7859 100644 --- a/test/interpreter.jl +++ b/test/interpreter.jl @@ -35,4 +35,6 @@ end @test success(pipeline(`$(Base.julia_cmd()) --compile=min -E 'include("staged.jl")'`; stderr)) # Test contextual execution mechanism in interpreter (#54360) -@test success(pipeline(`$(Base.julia_cmd()) --compile=min -E 'include("compiler/contextual.jl")'`; stderr)) +let compiler_contextual_test = escape_string(joinpath(@__DIR__,"../Compiler/test/contextual.jl")) + @test success(pipeline(`$(Base.julia_cmd()) --compile=min -E "include(\"$compiler_contextual_test\")"`; stderr)) +end diff --git a/test/operators.jl b/test/operators.jl index d97db15def80f..2e22238c3e9d9 100644 --- a/test/operators.jl +++ b/test/operators.jl @@ -2,7 +2,7 @@ using Random: randstring -include("compiler/irutils.jl") +include(joinpath(@__DIR__,"../Compiler/test/irutils.jl")) @testset "ifelse" begin @test ifelse(true, 1, 2) == 1 diff --git a/test/precompile_absint1.jl b/test/precompile_absint1.jl index ab36af163dc50..4202bf72b793f 100644 --- a/test/precompile_absint1.jl +++ b/test/precompile_absint1.jl @@ -10,12 +10,12 @@ precompile_test_harness() do load_path basic_caller(x) = basic_callee(x) end) |> string) - newinterp_path = abspath("compiler/newinterp.jl") + newinterp_path = abspath(joinpath(@__DIR__,"../Compiler/test/newinterp.jl")) write(joinpath(load_path, "TestAbsIntPrecompile1.jl"), :(module TestAbsIntPrecompile1 import SimpleModule: basic_caller, basic_callee module Custom - include("$($newinterp_path)") + include($newinterp_path) @newinterp PrecompileInterpreter end diff --git a/test/precompile_absint2.jl b/test/precompile_absint2.jl index 75b84e26e06c6..19317bf7b0683 100644 --- a/test/precompile_absint2.jl +++ b/test/precompile_absint2.jl @@ -10,13 +10,13 @@ precompile_test_harness() do load_path basic_caller(x) = basic_callee(x) end) |> string) - newinterp_path = abspath("compiler/newinterp.jl") + newinterp_path = abspath(joinpath(@__DIR__,"../Compiler/test/newinterp.jl")) write(joinpath(load_path, "TestAbsIntPrecompile2.jl"), :(module TestAbsIntPrecompile2 import SimpleModule: basic_caller, basic_callee module Custom const CC = Core.Compiler - include("$($newinterp_path)") + include($newinterp_path) @newinterp PrecompileInterpreter struct CustomData inferred diff --git a/test/reflection.jl b/test/reflection.jl index 8c701acb9c09d..9aa8fe512cd7c 100644 --- a/test/reflection.jl +++ b/test/reflection.jl @@ -2,7 +2,7 @@ using Test -include("compiler/irutils.jl") +include(joinpath(@__DIR__,"../Compiler/test/irutils.jl")) # code_native / code_llvm (issue #8239) # It's hard to really test these, but just running them should be diff --git a/test/runtests.jl b/test/runtests.jl index e48e896f4069e..67a15c0a03a1f 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -44,19 +44,6 @@ else end limited_worker_rss = max_worker_rss != typemax(Csize_t) -function test_path(test) - t = split(test, '/') - if t[1] in STDLIBS - if length(t) == 2 - return joinpath(STDLIB_DIR, t[1], "test", t[2]) - else - return joinpath(STDLIB_DIR, t[1], "test", "runtests") - end - else - return joinpath(@__DIR__, test) - end -end - # Check all test files exist isfiles = isfile.(test_path.(tests) .* ".jl") if !all(isfiles) diff --git a/test/scopedvalues.jl b/test/scopedvalues.jl index 2c2f4a510c1c9..2c38a0642ce24 100644 --- a/test/scopedvalues.jl +++ b/test/scopedvalues.jl @@ -2,7 +2,7 @@ using Base.ScopedValues -include("compiler/irutils.jl") +include(joinpath(@__DIR__,"../Compiler/test/irutils.jl")) @testset "errors" begin @test ScopedValue{Float64}(1)[] == 1.0 From 505907bd11618e97e9f8d565487cf245df772362 Mon Sep 17 00:00:00 2001 From: Keno Fischer Date: Tue, 12 Nov 2024 17:35:51 -0500 Subject: [PATCH 430/537] Add lowering and interpreter support for `:latestworld` (#56523) Split out from #56509 to facilitate adjusting downstream packages. --- base/boot.jl | 2 ++ src/ast.c | 2 ++ src/interpreter.c | 3 +++ src/julia-syntax.scm | 8 +++++++- src/julia_internal.h | 1 + src/toplevel.c | 3 ++- 6 files changed, 17 insertions(+), 2 deletions(-) diff --git a/base/boot.jl b/base/boot.jl index 88a4e7438671e..0df0cde64f8c0 100644 --- a/base/boot.jl +++ b/base/boot.jl @@ -271,6 +271,8 @@ macro nospecialize(x) end Expr(@nospecialize args...) = _expr(args...) +macro latestworld() Expr(:latestworld) end + _is_internal(__module__) = __module__ === Core # can be used in place of `@assume_effects :total` (supposed to be used for bootstrapping) macro _total_meta() diff --git a/src/ast.c b/src/ast.c index ea1de429a946c..474c0661f5230 100644 --- a/src/ast.c +++ b/src/ast.c @@ -119,6 +119,7 @@ JL_DLLEXPORT jl_sym_t *jl_release_sym; JL_DLLEXPORT jl_sym_t *jl_acquire_release_sym; JL_DLLEXPORT jl_sym_t *jl_sequentially_consistent_sym; JL_DLLEXPORT jl_sym_t *jl_uninferred_sym; +JL_DLLEXPORT jl_sym_t *jl_latestworld_sym; static const uint8_t flisp_system_image[] = { #include @@ -461,6 +462,7 @@ void jl_init_common_symbols(void) jl_acquire_release_sym = jl_symbol("acquire_release"); jl_sequentially_consistent_sym = jl_symbol("sequentially_consistent"); jl_uninferred_sym = jl_symbol("uninferred"); + jl_latestworld_sym = jl_symbol("latestworld"); } JL_DLLEXPORT void jl_lisp_prompt(void) diff --git a/src/interpreter.c b/src/interpreter.c index f9d981687c631..13dc45cf2ae6e 100644 --- a/src/interpreter.c +++ b/src/interpreter.c @@ -643,6 +643,9 @@ static jl_value_t *eval_body(jl_array_t *stmts, interpreter_state *s, size_t ip, jl_eval_const_decl(s->module, jl_exprarg(stmt, 0), val); s->locals[jl_source_nslots(s->src) + s->ip] = jl_nothing; } + else if (head == jl_latestworld_sym) { + ct->world_age = jl_atomic_load_acquire(&jl_world_counter); + } else if (jl_is_toplevel_only_expr(stmt)) { jl_toplevel_eval(s->module, stmt); } diff --git a/src/julia-syntax.scm b/src/julia-syntax.scm index 7acc8a1954bc5..e82c436e5a730 100644 --- a/src/julia-syntax.scm +++ b/src/julia-syntax.scm @@ -4512,6 +4512,7 @@ f(x) = yt(x) ((struct_type) "\"struct\" expression") ((method) "method definition") ((set_binding_type!) (string "type declaration for global \"" (deparse (cadr e)) "\"")) + ((latestworld) "World age increment") (else (string "\"" h "\" expression")))) (if (not (null? (cadr lam))) (error (string (head-to-text (car e)) " not at top level")))) @@ -4979,8 +4980,13 @@ f(x) = yt(x) (if tail (emit-return tail val)) val)) + ((latestworld-if-toplevel) + (if (null? (cadr lam)) + (emit `(latestworld))) + '(null)) + ;; other top level expressions - ((import using export public) + ((import using export public latestworld) (check-top-level e) (emit e) (let ((have-ret? (and (pair? code) (pair? (car code)) (eq? (caar code) 'return)))) diff --git a/src/julia_internal.h b/src/julia_internal.h index 776fea3b1dbf1..db09477de287b 100644 --- a/src/julia_internal.h +++ b/src/julia_internal.h @@ -1856,6 +1856,7 @@ extern JL_DLLEXPORT jl_sym_t *jl_release_sym; extern JL_DLLEXPORT jl_sym_t *jl_acquire_release_sym; extern JL_DLLEXPORT jl_sym_t *jl_sequentially_consistent_sym; extern JL_DLLEXPORT jl_sym_t *jl_uninferred_sym; +extern JL_DLLEXPORT jl_sym_t *jl_latestworld_sym; JL_DLLEXPORT enum jl_memory_order jl_get_atomic_order(jl_sym_t *order, char loading, char storing); JL_DLLEXPORT enum jl_memory_order jl_get_atomic_order_checked(jl_sym_t *order, char loading, char storing); diff --git a/src/toplevel.c b/src/toplevel.c index b0163683cf87c..cedc008af5cd0 100644 --- a/src/toplevel.c +++ b/src/toplevel.c @@ -607,7 +607,8 @@ int jl_is_toplevel_only_expr(jl_value_t *e) JL_NOTSAFEPOINT ((jl_expr_t*)e)->head == jl_const_sym || ((jl_expr_t*)e)->head == jl_toplevel_sym || ((jl_expr_t*)e)->head == jl_error_sym || - ((jl_expr_t*)e)->head == jl_incomplete_sym); + ((jl_expr_t*)e)->head == jl_incomplete_sym || + ((jl_expr_t*)e)->head == jl_latestworld_sym); } int jl_needs_lowering(jl_value_t *e) JL_NOTSAFEPOINT From fa9f0de253ea102f94751505efbb7eb0f91eeb6b Mon Sep 17 00:00:00 2001 From: Jameson Nash Date: Thu, 7 Nov 2024 16:18:43 +0000 Subject: [PATCH 431/537] do union split and concrete compilation search --- src/gf.c | 8 ++- src/julia_internal.h | 1 + src/precompile_utils.c | 130 +++++++++++++++++++++++------------------ 3 files changed, 80 insertions(+), 59 deletions(-) diff --git a/src/gf.c b/src/gf.c index 18141410fa625..3ff87772a2320 100644 --- a/src/gf.c +++ b/src/gf.c @@ -3188,6 +3188,12 @@ JL_DLLEXPORT void jl_compile_method_instance(jl_method_instance_t *mi, jl_tuplet } } +JL_DLLEXPORT void jl_compile_method_sig(jl_method_t *m, jl_value_t *types, jl_svec_t *env, size_t world) +{ + jl_method_instance_t *mi = jl_specializations_get_linfo(m, types, env); + jl_compile_method_instance(mi, NULL, world); +} + JL_DLLEXPORT int jl_compile_hint(jl_tupletype_t *types) { size_t world = jl_atomic_load_acquire(&jl_world_counter); @@ -3197,7 +3203,7 @@ JL_DLLEXPORT int jl_compile_hint(jl_tupletype_t *types) if (mi == NULL) return 0; JL_GC_PROMISE_ROOTED(mi); - jl_compile_method_instance(mi, types, world); + jl_compile_method_instance(mi, NULL, world); return 1; } diff --git a/src/julia_internal.h b/src/julia_internal.h index db09477de287b..a093cc5d21b14 100644 --- a/src/julia_internal.h +++ b/src/julia_internal.h @@ -695,6 +695,7 @@ JL_DLLEXPORT jl_module_t *jl_debuginfo_module1(jl_value_t *debuginfo_def) JL_NOT JL_DLLEXPORT const char *jl_debuginfo_name(jl_value_t *func) JL_NOTSAFEPOINT; JL_DLLEXPORT void jl_compile_method_instance(jl_method_instance_t *mi, jl_tupletype_t *types, size_t world); +JL_DLLEXPORT void jl_compile_method_sig(jl_method_t *m, jl_value_t *types, jl_svec_t *sparams, size_t world); JL_DLLEXPORT int jl_compile_hint(jl_tupletype_t *types); JL_DLLEXPORT int jl_add_entrypoint(jl_tupletype_t *types); jl_code_info_t *jl_code_for_interpreter(jl_method_instance_t *lam JL_PROPAGATES_ROOT, size_t world); diff --git a/src/precompile_utils.c b/src/precompile_utils.c index 01e8a2040a751..d008cd26a28e9 100644 --- a/src/precompile_utils.c +++ b/src/precompile_utils.c @@ -1,6 +1,8 @@ -// f{<:Union{...}}(...) is a common pattern -// and expanding the Union may give a leaf function -static void _compile_all_tvar_union(jl_value_t *methsig) +// This file is a part of Julia. License is MIT: https://julialang.org/license + +// f(...) where {T<:Union{...}} is a common pattern +// and expanding the Union may give some leaf functions +static int _compile_all_tvar_union(jl_value_t *methsig) { int tvarslen = jl_subtype_env_size(methsig); jl_value_t *sigbody = methsig; @@ -13,79 +15,86 @@ static void _compile_all_tvar_union(jl_value_t *methsig) assert(jl_is_unionall(sigbody)); idx[i] = 0; env[2 * i] = (jl_value_t*)((jl_unionall_t*)sigbody)->var; - env[2 * i + 1] = jl_bottom_type; // initialize the list with Union{}, since T<:Union{} is always a valid option + jl_value_t *tv = env[2 * i]; + while (jl_is_typevar(tv)) + tv = ((jl_tvar_t*)tv)->ub; + if (jl_is_abstracttype(tv) && !jl_is_type_type(tv)) { + JL_GC_POP(); + return 0; // Any as TypeVar is common and not useful here to try to analyze further + } + env[2 * i + 1] = tv; sigbody = ((jl_unionall_t*)sigbody)->body; } - for (i = 0; i < tvarslen; /* incremented by inner loop */) { - jl_value_t **sig = &roots[0]; + int all = 1; + int incr = 0; + while (!incr) { + for (i = 0, incr = 1; i < tvarslen; i++) { + jl_value_t *tv = env[2 * i]; + while (jl_is_typevar(tv)) + tv = ((jl_tvar_t*)tv)->ub; + if (jl_is_uniontype(tv)) { + size_t l = jl_count_union_components(tv); + size_t j = idx[i]; + env[2 * i + 1] = jl_nth_union_component(tv, j); + ++j; + if (incr) { + if (j == l) { + idx[i] = 0; + } + else { + idx[i] = j; + incr = 0; + } + } + } + } + jl_value_t *sig = NULL; JL_TRY { // TODO: wrap in UnionAll for each tvar in env[2*i + 1] ? // currently doesn't matter much, since jl_compile_hint doesn't work on abstract types - *sig = (jl_value_t*)jl_instantiate_type_with(sigbody, env, tvarslen); + sig = (jl_value_t*)jl_instantiate_type_with(sigbody, env, tvarslen); } JL_CATCH { - goto getnext; // sigh, we found an invalid type signature. should we warn the user? - } - if (!jl_has_concrete_subtype(*sig)) - goto getnext; // signature wouldn't be callable / is invalid -- skip it - if (jl_is_concrete_type(*sig)) { - if (jl_compile_hint((jl_tupletype_t *)*sig)) - goto getnext; // success + sig = NULL; } - - getnext: - for (i = 0; i < tvarslen; i++) { - jl_tvar_t *tv = (jl_tvar_t*)env[2 * i]; - if (jl_is_uniontype(tv->ub)) { - size_t l = jl_count_union_components(tv->ub); - size_t j = idx[i]; - if (j == l) { - env[2 * i + 1] = jl_bottom_type; - idx[i] = 0; - } - else { - jl_value_t *ty = jl_nth_union_component(tv->ub, j); - if (!jl_is_concrete_type(ty)) - ty = (jl_value_t*)jl_new_typevar(tv->name, tv->lb, ty); - env[2 * i + 1] = ty; - idx[i] = j + 1; - break; - } - } - else { - env[2 * i + 1] = (jl_value_t*)tv; - } + if (sig) { + roots[0] = sig; + if (jl_is_datatype(sig) && jl_has_concrete_subtype(sig)) + all = all && jl_compile_hint((jl_tupletype_t*)sig); + else + all = 0; } } JL_GC_POP(); + return all; } // f(::Union{...}, ...) is a common pattern // and expanding the Union may give a leaf function -static void _compile_all_union(jl_value_t *sig) +static int _compile_all_union(jl_value_t *sig) { jl_tupletype_t *sigbody = (jl_tupletype_t*)jl_unwrap_unionall(sig); size_t count_unions = 0; + size_t union_size = 1; size_t i, l = jl_svec_len(sigbody->parameters); jl_svec_t *p = NULL; jl_value_t *methsig = NULL; for (i = 0; i < l; i++) { jl_value_t *ty = jl_svecref(sigbody->parameters, i); - if (jl_is_uniontype(ty)) - ++count_unions; - else if (ty == jl_bottom_type) - return; // why does this method exist? - else if (jl_is_datatype(ty) && !jl_has_free_typevars(ty) && - ((!jl_is_kind(ty) && ((jl_datatype_t*)ty)->isconcretetype) || - ((jl_datatype_t*)ty)->name == jl_type_typename)) - return; // no amount of union splitting will make this a leaftype signature + if (jl_is_uniontype(ty)) { + count_unions += 1; + union_size *= jl_count_union_components(ty); + } + else if (jl_is_datatype(ty) && + ((!((jl_datatype_t*)ty)->isconcretetype || jl_is_kind(ty)) && + ((jl_datatype_t*)ty)->name != jl_type_typename)) + return 0; // no amount of union splitting will make this a dispatch signature } - if (count_unions == 0 || count_unions >= 6) { - _compile_all_tvar_union(sig); - return; + if (union_size <= 1 || union_size > 8) { + return _compile_all_tvar_union(sig); } int *idx = (int*)alloca(sizeof(int) * count_unions); @@ -93,6 +102,7 @@ static void _compile_all_union(jl_value_t *sig) idx[i] = 0; } + int all = 1; JL_GC_PUSH2(&p, &methsig); int idx_ctr = 0, incr = 0; while (!incr) { @@ -122,10 +132,12 @@ static void _compile_all_union(jl_value_t *sig) } methsig = jl_apply_tuple_type(p, 1); methsig = jl_rewrap_unionall(methsig, sig); - _compile_all_tvar_union(methsig); + if (!_compile_all_tvar_union(methsig)) + all = 0; } JL_GC_POP(); + return all; } static int compile_all_collect__(jl_typemap_entry_t *ml, void *env) @@ -147,29 +159,32 @@ static int compile_all_collect_(jl_methtable_t *mt, void *env) return 1; } -static void jl_compile_all_defs(jl_array_t *mis) +static void jl_compile_all_defs(jl_array_t *mis, int all) { jl_array_t *allmeths = jl_alloc_vec_any(0); JL_GC_PUSH1(&allmeths); jl_foreach_reachable_mtable(compile_all_collect_, allmeths); + size_t world = jl_atomic_load_acquire(&jl_world_counter); size_t i, l = jl_array_nrows(allmeths); for (i = 0; i < l; i++) { jl_method_t *m = (jl_method_t*)jl_array_ptr_ref(allmeths, i); if (jl_is_datatype(m->sig) && jl_isa_compileable_sig((jl_tupletype_t*)m->sig, jl_emptysvec, m)) { // method has a single compilable specialization, e.g. its definition // signature is concrete. in this case we can just hint it. - jl_compile_hint((jl_tupletype_t*)m->sig); + jl_compile_method_sig(m, m->sig, jl_emptysvec, world); } else { // first try to create leaf signatures from the signature declaration and compile those _compile_all_union(m->sig); - // finally, compile a fully generic fallback that can work for all arguments - jl_method_instance_t *unspec = jl_get_unspecialized(m); - if (unspec) - jl_array_ptr_1d_push(mis, (jl_value_t*)unspec); + if (all) { + // finally, compile a fully generic fallback that can work for all arguments (even invoke) + jl_method_instance_t *unspec = jl_get_unspecialized(m); + if (unspec) + jl_array_ptr_1d_push(mis, (jl_value_t*)unspec); + } } } @@ -273,8 +288,7 @@ static void *jl_precompile(int all) // array of MethodInstances and ccallable aliases to include in the output jl_array_t *m = jl_alloc_vec_any(0); JL_GC_PUSH1(&m); - if (all) - jl_compile_all_defs(m); + jl_compile_all_defs(m, all); jl_foreach_reachable_mtable(precompile_enq_all_specializations_, m); void *native_code = jl_precompile_(m, 0); JL_GC_POP(); From 8f24144d99bf2ba3d803d9db34c5d9ef8969b699 Mon Sep 17 00:00:00 2001 From: Jameson Nash Date: Tue, 12 Nov 2024 16:43:21 +0000 Subject: [PATCH 432/537] aotcompile: add missing codegen support for OC --- src/aotcompile.cpp | 37 +++++++++++++++++++++++-------------- src/staticdata.c | 13 ++++++++++++- 2 files changed, 35 insertions(+), 15 deletions(-) diff --git a/src/aotcompile.cpp b/src/aotcompile.cpp index a3ffdf1d051a9..06bc9269a02cc 100644 --- a/src/aotcompile.cpp +++ b/src/aotcompile.cpp @@ -524,7 +524,8 @@ void *jl_create_native_impl(jl_array_t *methods, LLVMOrcThreadSafeModuleRef llvm // Const returns do not do codegen, but juliac inspects codegen results so make a dummy fvar entry to represent it if (jl_options.trim != JL_TRIM_NO && jl_atomic_load_relaxed(&codeinst->invoke) == jl_fptr_const_return_addr) { data->jl_fvar_map[codeinst] = std::make_tuple((uint32_t)-3, (uint32_t)-3); - } else { + } + else { JL_GC_PROMISE_ROOTED(codeinst->rettype); orc::ThreadSafeModule result_m = jl_create_ts_module(name_from_method_instance(codeinst->def), params.tsctx, clone.getModuleUnlocked()->getDataLayout(), @@ -609,6 +610,9 @@ void *jl_create_native_impl(jl_array_t *methods, LLVMOrcThreadSafeModuleRef llvm else if (func == "jl_fptr_sparam") { func_id = -2; } + else if (decls.functionObject == "jl_f_opaque_closure_call") { + func_id = -4; + } else { //Safe b/c context is locked by params data->jl_sysimg_fvars.push_back(cast(clone.getModuleUnlocked()->getNamedValue(func))); @@ -904,11 +908,13 @@ static bool canPartition(const GlobalValue &G) { return true; } -static inline bool verify_partitioning(const SmallVectorImpl &partitions, const Module &M, size_t fvars_size, size_t gvars_size) { +static inline bool verify_partitioning(const SmallVectorImpl &partitions, const Module &M, DenseMap &fvars, DenseMap &gvars) { bool bad = false; #ifndef JL_NDEBUG - SmallVector fvars(fvars_size); - SmallVector gvars(gvars_size); + size_t fvars_size = fvars.size(); + size_t gvars_size = gvars.size(); + SmallVector fvars_partition(fvars_size); + SmallVector gvars_partition(gvars_size); StringMap GVNames; for (uint32_t i = 0; i < partitions.size(); i++) { for (auto &name : partitions[i].globals) { @@ -919,18 +925,18 @@ static inline bool verify_partitioning(const SmallVectorImpl &partiti GVNames[name.getKey()] = i; } for (auto &fvar : partitions[i].fvars) { - if (fvars[fvar.second] != 0) { + if (fvars_partition[fvar.second] != 0) { bad = true; - dbgs() << "Duplicate fvar " << fvar.first() << " in partitions " << i << " and " << fvars[fvar.second] - 1 << "\n"; + dbgs() << "Duplicate fvar " << fvar.first() << " in partitions " << i << " and " << fvars_partition[fvar.second] - 1 << "\n"; } - fvars[fvar.second] = i+1; + fvars_partition[fvar.second] = i+1; } for (auto &gvar : partitions[i].gvars) { - if (gvars[gvar.second] != 0) { + if (gvars_partition[gvar.second] != 0) { bad = true; - dbgs() << "Duplicate gvar " << gvar.first() << " in partitions " << i << " and " << gvars[gvar.second] - 1 << "\n"; + dbgs() << "Duplicate gvar " << gvar.first() << " in partitions " << i << " and " << gvars_partition[gvar.second] - 1 << "\n"; } - gvars[gvar.second] = i+1; + gvars_partition[gvar.second] = i+1; } } for (auto &GV : M.global_values()) { @@ -967,13 +973,14 @@ static inline bool verify_partitioning(const SmallVectorImpl &partiti } } for (uint32_t i = 0; i < fvars_size; i++) { - if (fvars[i] == 0) { + if (fvars_partition[i] == 0) { + auto gv = find_if(fvars.begin(), fvars.end(), [i](auto var) { return var.second == i; }); bad = true; - dbgs() << "fvar " << i << " not in any partition\n"; + dbgs() << "fvar " << gv->first->getName() << " at " << i << " not in any partition\n"; } } for (uint32_t i = 0; i < gvars_size; i++) { - if (gvars[i] == 0) { + if (gvars_partition[i] == 0) { bad = true; dbgs() << "gvar " << i << " not in any partition\n"; } @@ -1117,7 +1124,9 @@ static SmallVector partitionModule(Module &M, unsigned threads) { } } - bool verified = verify_partitioning(partitions, M, fvars.size(), gvars.size()); + bool verified = verify_partitioning(partitions, M, fvars, gvars); + if (!verified) + M.dump(); assert(verified && "Partitioning failed to partition globals correctly"); (void) verified; diff --git a/src/staticdata.c b/src/staticdata.c index 6b225d3808c8b..542a468acd1c6 100644 --- a/src/staticdata.c +++ b/src/staticdata.c @@ -587,6 +587,7 @@ typedef enum { JL_API_BOXED, JL_API_CONST, JL_API_WITH_PARAMETERS, + JL_API_OC_CALL, JL_API_INTERPRETED, JL_API_BUILTIN, JL_API_MAX @@ -1797,6 +1798,12 @@ static void jl_write_values(jl_serializer_state *s) JL_GC_DISABLED else if (invokeptr_id == -2) { fptr_id = JL_API_WITH_PARAMETERS; } + else if (invokeptr_id == -3) { + abort(); + } + else if (invokeptr_id == -4) { + fptr_id = JL_API_OC_CALL; + } else { assert(invokeptr_id > 0); ios_ensureroom(s->fptr_record, invokeptr_id * sizeof(void*)); @@ -2033,11 +2040,15 @@ static inline uintptr_t get_item_for_reloc(jl_serializer_state *s, uintptr_t bas case JL_API_BOXED: if (s->image->fptrs.nptrs) return (uintptr_t)jl_fptr_args; - JL_FALLTHROUGH; + return (uintptr_t)NULL; case JL_API_WITH_PARAMETERS: if (s->image->fptrs.nptrs) return (uintptr_t)jl_fptr_sparam; return (uintptr_t)NULL; + case JL_API_OC_CALL: + if (s->image->fptrs.nptrs) + return (uintptr_t)jl_f_opaque_closure_call; + return (uintptr_t)NULL; case JL_API_CONST: return (uintptr_t)jl_fptr_const_return; case JL_API_INTERPRETED: From 882f9401ec4a43d3ac1d65124d695e6a4a926e77 Mon Sep 17 00:00:00 2001 From: Jameson Nash Date: Tue, 12 Nov 2024 19:43:34 +0000 Subject: [PATCH 433/537] aotcompile: reimplement canPartition correctly This could previously cause any use of llvmcall to crash during ji generate or generate bad code. Now it uses the llvm attribute to specify this correctly. --- src/aotcompile.cpp | 27 +++++++++++---------------- 1 file changed, 11 insertions(+), 16 deletions(-) diff --git a/src/aotcompile.cpp b/src/aotcompile.cpp index 06bc9269a02cc..ff14901c2e47f 100644 --- a/src/aotcompile.cpp +++ b/src/aotcompile.cpp @@ -900,12 +900,9 @@ struct Partition { size_t weight; }; -static bool canPartition(const GlobalValue &G) { - if (auto F = dyn_cast(&G)) { - if (F->hasFnAttribute(Attribute::AlwaysInline)) - return false; - } - return true; +static bool canPartition(const Function &F) +{ + return !F.hasFnAttribute(Attribute::AlwaysInline); } static inline bool verify_partitioning(const SmallVectorImpl &partitions, const Module &M, DenseMap &fvars, DenseMap &gvars) { @@ -947,13 +944,6 @@ static inline bool verify_partitioning(const SmallVectorImpl &partiti } } else { // Local global values are not partitioned - if (!canPartition(GV)) { - if (GVNames.count(GV.getName())) { - bad = true; - dbgs() << "Shouldn't have partitioned " << GV.getName() << ", but is in partition " << GVNames[GV.getName()] << "\n"; - } - continue; - } if (!GVNames.count(GV.getName())) { bad = true; dbgs() << "Global " << GV << " not in any partition\n"; @@ -1042,8 +1032,6 @@ static SmallVector partitionModule(Module &M, unsigned threads) { for (auto &G : M.global_values()) { if (G.isDeclaration()) continue; - if (!canPartition(G)) - continue; // Currently ccallable global aliases have extern linkage, we only want to make the // internally linked functions/global variables extern+hidden if (G.hasLocalLinkage()) { @@ -1052,7 +1040,8 @@ static SmallVector partitionModule(Module &M, unsigned threads) { } if (auto F = dyn_cast(&G)) { partitioner.make(&G, getFunctionWeight(*F).weight); - } else { + } + else { partitioner.make(&G, 1); } } @@ -1380,6 +1369,12 @@ static void materializePreserved(Module &M, Partition &partition) { continue; if (Preserve.contains(&F)) continue; + if (!canPartition(F)) { + F.setLinkage(GlobalValue::AvailableExternallyLinkage); + F.setVisibility(GlobalValue::HiddenVisibility); + F.setDSOLocal(true); + continue; + } F.deleteBody(); F.setLinkage(GlobalValue::ExternalLinkage); F.setVisibility(GlobalValue::HiddenVisibility); From 1edc6f1b7752ed67059020ba7ce174dffa225954 Mon Sep 17 00:00:00 2001 From: Keno Fischer Date: Tue, 12 Nov 2024 21:00:27 -0500 Subject: [PATCH 434/537] Merge Compiler.isready and Base.isready (#56536) These didn't get merged when the Compiler moved out, because the Base function doesn't get defined until very late in the build process. However, they are semantically the same, so merge their method tables. --- Compiler/src/Compiler.jl | 2 +- base/Base_compiler.jl | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/Compiler/src/Compiler.jl b/Compiler/src/Compiler.jl index edaf0c9332584..41ae149dce372 100644 --- a/Compiler/src/Compiler.jl +++ b/Compiler/src/Compiler.jl @@ -57,7 +57,7 @@ using Base: Ordering, vect, EffectsOverride, BitVector, @_gc_preserve_begin, @_g using Base.Order import Base: getindex, setindex!, length, iterate, push!, isempty, first, convert, ==, copy, popfirst!, in, haskey, resize!, copy!, append!, last, get!, size, - get, iterate, findall, min_world, max_world, _topmod + get, iterate, findall, min_world, max_world, _topmod, isready const getproperty = Core.getfield const setproperty! = Core.setfield! diff --git a/base/Base_compiler.jl b/base/Base_compiler.jl index a860414454634..b2633c25eef3f 100644 --- a/base/Base_compiler.jl +++ b/base/Base_compiler.jl @@ -286,6 +286,8 @@ function process_sysimg_args!() end process_sysimg_args!() +function isready end + include(strcat(BUILDROOT, "../usr/share/julia/Compiler/src/Compiler.jl")) const _return_type = Compiler.return_type From c5ee5db2e8305d36b045fa28e213311553c5ebe0 Mon Sep 17 00:00:00 2001 From: Morten Piibeleht Date: Thu, 14 Nov 2024 00:56:56 +1300 Subject: [PATCH 435/537] Update Documenter 1.4.0 => 1.8.0 (#56538) Includes a bunch small change, but importantly the fix for the search not opening. I think we should also backport this to 1.10 and 1.11, but those should probably be done manually, to avoid manifest conflicts. Should I just PR the relevant `release-*` branches? --- doc/Manifest.toml | 63 ++++++++++++++++++++++------------------------- 1 file changed, 30 insertions(+), 33 deletions(-) diff --git a/doc/Manifest.toml b/doc/Manifest.toml index c0f8b693bd1ac..490754c4c3068 100644 --- a/doc/Manifest.toml +++ b/doc/Manifest.toml @@ -28,9 +28,9 @@ version = "1.11.0" [[deps.CodecZlib]] deps = ["TranscodingStreams", "Zlib_jll"] -git-tree-sha1 = "59939d8a997469ee05c4b4944560a820f9ba0d73" +git-tree-sha1 = "bce6804e5e6044c6daab27bb533d1295e4a2e759" uuid = "944b1d66-785c-5afd-91f1-9de20f533193" -version = "0.7.4" +version = "0.7.6" [[deps.Dates]] deps = ["Printf"] @@ -45,9 +45,9 @@ version = "0.9.3" [[deps.Documenter]] deps = ["ANSIColoredPrinters", "AbstractTrees", "Base64", "CodecZlib", "Dates", "DocStringExtensions", "Downloads", "Git", "IOCapture", "InteractiveUtils", "JSON", "LibGit2", "Logging", "Markdown", "MarkdownAST", "Pkg", "PrecompileTools", "REPL", "RegistryInstances", "SHA", "TOML", "Test", "Unicode"] -git-tree-sha1 = "f15a91e6e3919055efa4f206f942a73fedf5dfe6" +git-tree-sha1 = "d0ea2c044963ed6f37703cead7e29f70cba13d7e" uuid = "e30172f5-a6a5-5a46-863b-614d45cd2de4" -version = "1.4.0" +version = "1.8.0" [[deps.Downloads]] deps = ["ArgTools", "FileWatching", "LibCURL", "NetworkOptions"] @@ -56,9 +56,9 @@ version = "1.6.0" [[deps.Expat_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl"] -git-tree-sha1 = "4558ab818dcceaab612d1bb8c19cee87eda2b83c" +git-tree-sha1 = "1c6317308b9dc757616f0b5cb379db10494443a7" uuid = "2e619515-83b5-522b-bb60-26c02a35a201" -version = "2.5.0+0" +version = "2.6.2+0" [[deps.FileWatching]] uuid = "7b1f6079-737a-58dc-b8bc-7a2ca5c1b5ee" @@ -72,15 +72,15 @@ version = "1.3.1" [[deps.Git_jll]] deps = ["Artifacts", "Expat_jll", "JLLWrappers", "LibCURL_jll", "Libdl", "Libiconv_jll", "OpenSSL_jll", "PCRE2_jll", "Zlib_jll"] -git-tree-sha1 = "d18fb8a1f3609361ebda9bf029b60fd0f120c809" +git-tree-sha1 = "ea372033d09e4552a04fd38361cd019f9003f4f4" uuid = "f8c6e375-362e-5223-8a59-34ff63f689eb" -version = "2.44.0+2" +version = "2.46.2+0" [[deps.IOCapture]] deps = ["Logging", "Random"] -git-tree-sha1 = "8b72179abc660bfab5e28472e019392b97d0985c" +git-tree-sha1 = "b6d6bfdd7ce25b0f9b2f6b3dd56b2673a66c8770" uuid = "b5f81e59-6552-4d32-b1f0-c071b021bf89" -version = "0.2.4" +version = "0.2.5" [[deps.InteractiveUtils]] deps = ["Markdown"] @@ -89,9 +89,9 @@ version = "1.11.0" [[deps.JLLWrappers]] deps = ["Artifacts", "Preferences"] -git-tree-sha1 = "7e5d6779a1e09a36db2a7b6cff50942a0a7d0fca" +git-tree-sha1 = "be3dc50a92e5a386872a493a10050136d4703f9b" uuid = "692b3bcd-3c85-4b1f-b108-f13ce0eb3210" -version = "1.5.0" +version = "1.6.1" [[deps.JSON]] deps = ["Dates", "Mmap", "Parsers", "Unicode"] @@ -99,10 +99,15 @@ git-tree-sha1 = "31e996f0a15c7b280ba9f76636b3ff9e2ae58c9a" uuid = "682c06a0-de6a-54ab-a142-c8b1cf79cde6" version = "0.21.4" +[[deps.JuliaSyntaxHighlighting]] +deps = ["StyledStrings"] +uuid = "dc6e5ff7-fb65-4e79-a425-ec3bc9c03011" +version = "1.12.0" + [[deps.LazilyInitializedFields]] -git-tree-sha1 = "8f7f3cabab0fd1800699663533b6d5cb3fc0e612" +git-tree-sha1 = "0f2da712350b020bc3957f269c9caad516383ee0" uuid = "0e77f7df-68c5-4e49-93ce-4cd80f5598bf" -version = "1.2.2" +version = "1.3.0" [[deps.LibCURL]] deps = ["LibCURL_jll", "MozillaCACerts_jll"] @@ -114,10 +119,6 @@ deps = ["Artifacts", "LibSSH2_jll", "Libdl", "MbedTLS_jll", "Zlib_jll", "nghttp2 uuid = "deac9b47-8bc7-5906-a0fe-35ac56dc84c0" version = "8.6.0+0" -[[deps.JuliaSyntaxHighlighting]] -deps = ["StyledStrings"] -uuid = "dc6e5ff7-fb65-4e79-a425-ec3bc9c03011" - [[deps.LibGit2]] deps = ["LibGit2_jll", "NetworkOptions", "Printf", "SHA"] uuid = "76f85450-5226-5b5a-8eaa-529ad045b433" @@ -139,9 +140,9 @@ version = "1.11.0" [[deps.Libiconv_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl"] -git-tree-sha1 = "f9557a255370125b405568f9767d6d195822a175" +git-tree-sha1 = "61dfdba58e585066d8bce214c5a51eaa0539f269" uuid = "94ce4f54-9a6c-5748-9c1c-f9c7231a4531" -version = "1.17.0+0" +version = "1.17.0+1" [[deps.Logging]] uuid = "56ddb016-857b-54e1-b83d-db4d58db5568" @@ -161,7 +162,7 @@ version = "0.1.2" [[deps.MbedTLS_jll]] deps = ["Artifacts", "Libdl"] uuid = "c8ffd9c3-330d-5841-b78e-0817d7145fa1" -version = "2.28.6+0" +version = "2.28.6+1" [[deps.Mmap]] uuid = "a63ad114-7e13-5084-954f-fe012c677804" @@ -177,14 +178,14 @@ version = "1.2.0" [[deps.OpenSSL_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl"] -git-tree-sha1 = "3da7367955dcc5c54c1ba4d402ccdc09a1a3e046" +git-tree-sha1 = "7493f61f55a6cce7325f197443aa80d32554ba10" uuid = "458c3c95-2e84-50aa-8efc-19380b2a3a95" -version = "3.0.13+1" +version = "3.0.15+1" [[deps.PCRE2_jll]] deps = ["Artifacts", "Libdl"] uuid = "efcefdf7-47ab-520b-bdef-62a2eaa19f15" -version = "10.43.0+0" +version = "10.43.0+1" [[deps.Parsers]] deps = ["Dates", "PrecompileTools", "UUIDs"] @@ -266,13 +267,9 @@ uuid = "8dfed614-e22c-5e08-85e1-65c5234f0b40" version = "1.11.0" [[deps.TranscodingStreams]] -git-tree-sha1 = "71509f04d045ec714c4748c785a59045c3736349" +git-tree-sha1 = "0c45878dcfdcfa8480052b6ab162cdd138781742" uuid = "3bb67fe8-82b1-5028-8e26-92a6c54297fa" -version = "0.10.7" -weakdeps = ["Random", "Test"] - - [deps.TranscodingStreams.extensions] - TestExt = ["Test", "Random"] +version = "0.11.3" [[deps.UUIDs]] deps = ["Random", "SHA"] @@ -286,14 +283,14 @@ version = "1.11.0" [[deps.Zlib_jll]] deps = ["Libdl"] uuid = "83775a58-1f1d-513f-b197-d71354ab007a" -version = "1.3.1+0" +version = "1.3.1+1" [[deps.nghttp2_jll]] deps = ["Artifacts", "Libdl"] uuid = "8e850ede-7688-5339-a07c-302acd2aaf8d" -version = "1.60.0+0" +version = "1.63.0+1" [[deps.p7zip_jll]] deps = ["Artifacts", "Libdl"] uuid = "3f19e933-33d8-53b3-aaab-bd5110c3b7a0" -version = "17.5.0+0" +version = "17.5.0+1" From 16ece81e12c83616c3b305e5c2e057650433449b Mon Sep 17 00:00:00 2001 From: Alexis Montoison <35051714+amontoison@users.noreply.github.com> Date: Wed, 13 Nov 2024 06:35:58 -0600 Subject: [PATCH 436/537] [libblastrampoline_jll] Upgrade to v5.11.2 (#56534) --- deps/blastrampoline.version | 6 +- deps/checksums/blastrampoline | 72 +++++++++++------------ stdlib/libblastrampoline_jll/Project.toml | 2 +- 3 files changed, 40 insertions(+), 40 deletions(-) diff --git a/deps/blastrampoline.version b/deps/blastrampoline.version index 95771acad9ffa..7e6dc61d1cbe7 100644 --- a/deps/blastrampoline.version +++ b/deps/blastrampoline.version @@ -2,6 +2,6 @@ BLASTRAMPOLINE_JLL_NAME := libblastrampoline ## source build -BLASTRAMPOLINE_VER := 5.11.1 -BLASTRAMPOLINE_BRANCH=v5.11.1 -BLASTRAMPOLINE_SHA1=b09277feafd342520b8476ce443d35327b5e55b4 +BLASTRAMPOLINE_VER := 5.11.2 +BLASTRAMPOLINE_BRANCH=v5.11.2 +BLASTRAMPOLINE_SHA1=c48da8a1225c2537ff311c28ef395152fb879eae diff --git a/deps/checksums/blastrampoline b/deps/checksums/blastrampoline index cbde7fa45b1e2..987d4662e6cc7 100644 --- a/deps/checksums/blastrampoline +++ b/deps/checksums/blastrampoline @@ -1,36 +1,36 @@ -blastrampoline-b09277feafd342520b8476ce443d35327b5e55b4.tar.gz/md5/7516eaaa5777a93cf387da1bf4b14c8a -blastrampoline-b09277feafd342520b8476ce443d35327b5e55b4.tar.gz/sha512/00fea70f713be77be10bb014e7dad957616ea59d882e2bfa75d7b8b7237dd59d735cfb944b9cac3fa34fbe7b0a78c89c25b605bdea33e2c17278f29874e20363 -libblastrampoline.v5.11.1+0.aarch64-apple-darwin.tar.gz/md5/93ee5c360913b8ed7c558a2edeb7014b -libblastrampoline.v5.11.1+0.aarch64-apple-darwin.tar.gz/sha512/3f6e78d8c966fce6eecf82931186907cc10b95ceb71d5cfc3ee958b20a11d0e24d1a399fb7fba4cf7180fa61f3d0965db6e6ca9d99dd8c4ab56d36713fd9a327 -libblastrampoline.v5.11.1+0.aarch64-linux-gnu.tar.gz/md5/aad5e3585f585d54d9ebcf822bbe32cb -libblastrampoline.v5.11.1+0.aarch64-linux-gnu.tar.gz/sha512/11ff9227e16898895ad6cbd36853093941b243a49962785a5ab8b7dc2426831a2750ab5882ee814e3a662e8b9f8aecb273d750b88a4ea5a213e20c93cb121ce1 -libblastrampoline.v5.11.1+0.aarch64-linux-musl.tar.gz/md5/462639b4b21f5b7626febfdd1ae1f824 -libblastrampoline.v5.11.1+0.aarch64-linux-musl.tar.gz/sha512/866004e3fcdb5ab7418c8a2cae8f820c5739a511b9d0b32d0013ef72ff99f87396f5912d8fbd6bf4d01d7432715c6971ad1a5419c34fa7b048d0fbbe0f8520d2 -libblastrampoline.v5.11.1+0.aarch64-unknown-freebsd.tar.gz/md5/b6ce7d6d46d2ae772d4c3f629e754486 -libblastrampoline.v5.11.1+0.aarch64-unknown-freebsd.tar.gz/sha512/b2e7990cd0f7bb1bc376118955e397599c44aa3d09b0e87524ed8fed4bbb1d6a2b9c1bc02806bbeb86812ab0083c8016fe3c38894e0eb339025cf30f0cd64ffc -libblastrampoline.v5.11.1+0.armv6l-linux-gnueabihf.tar.gz/md5/8a48cc8243257362dbc920dcadc42a22 -libblastrampoline.v5.11.1+0.armv6l-linux-gnueabihf.tar.gz/sha512/bb4048c0e1ebbb89fc82b7cdabb0a4d9263b5344390c934b66c3a227631661ae956287870e4b156935f0a3c322049ceed3138fc033c92561fccf3675317af5b8 -libblastrampoline.v5.11.1+0.armv6l-linux-musleabihf.tar.gz/md5/53c12d04337b63d18f4a5469a36132b6 -libblastrampoline.v5.11.1+0.armv6l-linux-musleabihf.tar.gz/sha512/fbb9e1cd3c80cf6eada43c7b3d3e6990a2b54c3f7de492ba5407d64841e705a68a5c7aa8bf4873f3204a7f8a9631a0135e2e08b57d4291b32d0f928e887c1e14 -libblastrampoline.v5.11.1+0.armv7l-linux-gnueabihf.tar.gz/md5/08963ae41481cbd4d7d9c9790b8e161e -libblastrampoline.v5.11.1+0.armv7l-linux-gnueabihf.tar.gz/sha512/428e952b3ec6904c9aa233fab1a860a30b043aa8e7508978406a0aafffee03b4e73b51dcd1eaa8550032edf51bd84e1c8356cdbd180d48791c5c0486c3a925a1 -libblastrampoline.v5.11.1+0.armv7l-linux-musleabihf.tar.gz/md5/fae4f9b44ddca8f74f8999fe3a9f0a91 -libblastrampoline.v5.11.1+0.armv7l-linux-musleabihf.tar.gz/sha512/afd37260ee0ecc0a1fe34f0e78cb1fd563e8d0cad025bc8ad733186a56c1c1faa4ffb4de593aead0b21513c9108847e08734ec14443ab8c0c36468f990bdf38e -libblastrampoline.v5.11.1+0.i686-linux-gnu.tar.gz/md5/3d664f435a559022a8309f271a8376e5 -libblastrampoline.v5.11.1+0.i686-linux-gnu.tar.gz/sha512/60a2863237f0b668237c6b68c0671ecf17d62272b047f2ad5e6b466aeb7e0e92fa1207e9c107de7c96a2b8974925f2af69324104c22fa1c51a9cc207b84e2d22 -libblastrampoline.v5.11.1+0.i686-linux-musl.tar.gz/md5/3d63e967ae8301329e9a79a0882c14f6 -libblastrampoline.v5.11.1+0.i686-linux-musl.tar.gz/sha512/9c3950bccf578b3b3b609398ab7a05c13cb86ded686c585f916c521adb533589166530c825af8095bb6d88b9ae0d14dae992a53b578af502f19811be1aecc185 -libblastrampoline.v5.11.1+0.i686-w64-mingw32.tar.gz/md5/99890890c7e600d0817775026baca09b -libblastrampoline.v5.11.1+0.i686-w64-mingw32.tar.gz/sha512/87904de1637967e1ba6a17b788c7ae3d049934553d14302c715db829f1a2aaa55c35f3c04d3ef0fce7a589e66d41fba939906a5dd5b19daf3ede343d298bc018 -libblastrampoline.v5.11.1+0.powerpc64le-linux-gnu.tar.gz/md5/bda2bbfb9af8eb655fead11a6ce142cb -libblastrampoline.v5.11.1+0.powerpc64le-linux-gnu.tar.gz/sha512/ca318ff7b362ee5f15654c669f4acf45d4530499daa2b8e64da179c2b0ba2bddb0d0b30dc08b3427a55dd2f0ee239b7c00fb93bd27572d14a863677bf22a0173 -libblastrampoline.v5.11.1+0.x86_64-apple-darwin.tar.gz/md5/dec773fbfbf218b35e942325cf9305dc -libblastrampoline.v5.11.1+0.x86_64-apple-darwin.tar.gz/sha512/c7d4828689361c9a8708b7cf1b0b1fa4f237e2a50b45f71457782b84fcc88c757e00bc91f19e9c7bc94d1c69420ec2c4ebe39c62f9fd140e72ff8a408879474c -libblastrampoline.v5.11.1+0.x86_64-linux-gnu.tar.gz/md5/88545391ae715b0f83b786f6eb7a6ee5 -libblastrampoline.v5.11.1+0.x86_64-linux-gnu.tar.gz/sha512/f041dac97783108b6b4e90a74315c3c4074c82ab926b1d3c1b90dac03dd1b7ea60dbb96b0c36b34b9e386732c8f546c7c54ea8111c650d0454cfb6015535ddf2 -libblastrampoline.v5.11.1+0.x86_64-linux-musl.tar.gz/md5/7c8353b779cfae36984a0a806f985a7b -libblastrampoline.v5.11.1+0.x86_64-linux-musl.tar.gz/sha512/5288123a4cb81befac2b2504c503303e0cf7d6eee3e9ba3195378900b0204745ed0e818f31a1d344bd552ff06a9904075b1fb742eea5f1f5de907c0df141b8ca -libblastrampoline.v5.11.1+0.x86_64-unknown-freebsd.tar.gz/md5/7bc51751c09a1772d2f8638e5d3e4655 -libblastrampoline.v5.11.1+0.x86_64-unknown-freebsd.tar.gz/sha512/5fde7423915964e4491f9fc46da9fb046fc85a434408dd4cb61521efe70d090e7b5dd2a995345318b287f03c9f21c15de2f627244332038b5dc99e28c88a29b3 -libblastrampoline.v5.11.1+0.x86_64-w64-mingw32.tar.gz/md5/6e7f602ab0bf5a5c28bf4e959a1bbf77 -libblastrampoline.v5.11.1+0.x86_64-w64-mingw32.tar.gz/sha512/556e7ca1a2576c1d7825ac1bc2449ffe2cd40391cf316b10f60681a5c736939c97eb5221c2837640928b5544f89f44cb14ca44ccf54092376390ea1a6012c9e5 +blastrampoline-c48da8a1225c2537ff311c28ef395152fb879eae.tar.gz/md5/0747a7c65427a5e6ff4820ea1079f095 +blastrampoline-c48da8a1225c2537ff311c28ef395152fb879eae.tar.gz/sha512/8d5c60ce84ae42e529506821b051e043c0d8861cd7e39780ebc858c2b8638d6628b2f9ceffd67c9ee18983c9c7e5a454f65cf14fb414907c28c90eb67e7de8fe +libblastrampoline.v5.11.2+0.aarch64-apple-darwin.tar.gz/md5/c0f71f80654d6025e29e763f7bf2de92 +libblastrampoline.v5.11.2+0.aarch64-apple-darwin.tar.gz/sha512/49a7f8f2aac286763d7ce2c086b60b84e9ed7eb9dbbd8ba00c5956840ea6c642f4b1d80cb69888045dfdce55dcde1ee2843df9fa63947d3ce8615faf1523a902 +libblastrampoline.v5.11.2+0.aarch64-linux-gnu.tar.gz/md5/7e9b45c623aa527d65f85edff7d056dd +libblastrampoline.v5.11.2+0.aarch64-linux-gnu.tar.gz/sha512/f41378f63a6513ca9b25febb8c01257711cd34e86303a081865696adadc41db5e39c1fd1fdf50ff1ea5d3224fe22ea7f8e571dc7001ee8708be2a27d41410eb5 +libblastrampoline.v5.11.2+0.aarch64-linux-musl.tar.gz/md5/1a2b0eafdaedc1870508948f4a8fd6d8 +libblastrampoline.v5.11.2+0.aarch64-linux-musl.tar.gz/sha512/5d9c8cce5a0abfa10b2907d9b44ad62e62cd9cd7c4c94c14b0ae93f83adff7c1c9f386c1b82dbc2f8f1f959c86c724663ae5dfdbcdd081cebcbf8a91be87da7b +libblastrampoline.v5.11.2+0.aarch64-unknown-freebsd.tar.gz/md5/3c518305add0202d56798c30cbd04345 +libblastrampoline.v5.11.2+0.aarch64-unknown-freebsd.tar.gz/sha512/ac292d999cd258052a95dd641bd06d22db3e6c0574077e9aecb63dca70c1810395921d9bc939a629cf38ece16de42d541dd03aef84d53cc6bd7b7d65bb743b66 +libblastrampoline.v5.11.2+0.armv6l-linux-gnueabihf.tar.gz/md5/fd47f376283002dc6821c4dac0127198 +libblastrampoline.v5.11.2+0.armv6l-linux-gnueabihf.tar.gz/sha512/e56b3e5b5f0bf2b3138484a49a922cb82608152de7dd972c52294eb8611cb76b95b06f33a1dc38f00dd02702ca1ef9b6f69572349b185695a55b269b91cf231f +libblastrampoline.v5.11.2+0.armv6l-linux-musleabihf.tar.gz/md5/70222a8dd72f03888401a2d0cf5a206c +libblastrampoline.v5.11.2+0.armv6l-linux-musleabihf.tar.gz/sha512/609894123a512831c9159312ea5f496de9361c60a838f9428ea5dc6aa9aa6bbb2b33856bf08868765e9af2548d8d386389747254d87d7ed403e492259d61ce32 +libblastrampoline.v5.11.2+0.armv7l-linux-gnueabihf.tar.gz/md5/966dfbf17d7eac1ff046b935e8202e7a +libblastrampoline.v5.11.2+0.armv7l-linux-gnueabihf.tar.gz/sha512/de173d9c17970bff612e1759dbcd9188f0bca0dffd21e0a98d2ed5b72a5ba60cc0097cec1e42cb2bc42f14c1c0bed3987b5bd4a04c7991c9e8d908f2aed231cd +libblastrampoline.v5.11.2+0.armv7l-linux-musleabihf.tar.gz/md5/90b43518c75e0071e4b2efe3aef344ec +libblastrampoline.v5.11.2+0.armv7l-linux-musleabihf.tar.gz/sha512/2bbb2676b381e588e6315576ed9a1d4cad4612aa6c1b5ec95fdd8434f0f0fcb07cc0b61162c0a1dac72217a008f01702f5bf63566a007622d7a3ab35461b6645 +libblastrampoline.v5.11.2+0.i686-linux-gnu.tar.gz/md5/ecf7b2fcdf8feb2114525290d09b99c7 +libblastrampoline.v5.11.2+0.i686-linux-gnu.tar.gz/sha512/10922aa2e567f1534340ec9422516ccf0ea625ae73a433ed864dc72926235fe1dc6c52c2ca716aca5eeac80544a99e76892a0f19fccd2c2b9103332fd2289980 +libblastrampoline.v5.11.2+0.i686-linux-musl.tar.gz/md5/6cf17a410bf50b3a87b9f2af0c6955e9 +libblastrampoline.v5.11.2+0.i686-linux-musl.tar.gz/sha512/30f78dd4948b26b14d04cf5e1821a381e9d8aa67c6b3547cf45a0d53a469829a98d7d47722c542699e65e1ae3411a86da094d8b354821ece1562288fa523b1f1 +libblastrampoline.v5.11.2+0.i686-w64-mingw32.tar.gz/md5/9c4c1fa7410f9e53687dbde1479deb3a +libblastrampoline.v5.11.2+0.i686-w64-mingw32.tar.gz/sha512/511eed07956b16555ab236905fe91c584d870e45d1a6b736b3b564f84ec66e8c12d9561efabad259ddc65b8965eb4cdc29b079d0a9a6a465b424b503399eae7b +libblastrampoline.v5.11.2+0.powerpc64le-linux-gnu.tar.gz/md5/816ee59bf7cc937399c273709882369b +libblastrampoline.v5.11.2+0.powerpc64le-linux-gnu.tar.gz/sha512/4e1095288ff02a9e0714f982be16782271643c1844100b38d5fcf02c2e2f62d0635457d52dd120792a59a62b905c60aa7e253a89c2f759d98c33d617c89e897f +libblastrampoline.v5.11.2+0.x86_64-apple-darwin.tar.gz/md5/9a4a86a441aa232e12e85bbf6e62f589 +libblastrampoline.v5.11.2+0.x86_64-apple-darwin.tar.gz/sha512/2d80b4c9149b8d62ae89fa3be32ccb297e815c9cd56b3481482c5f6ee253fc845d410807e099f4c1814a77e397c04511ebabc9d82352fc43ebe81a3306819ccc +libblastrampoline.v5.11.2+0.x86_64-linux-gnu.tar.gz/md5/45fbfd0422131044fff9ed44d12f13e1 +libblastrampoline.v5.11.2+0.x86_64-linux-gnu.tar.gz/sha512/c7e4f87aa0ab403be46b81967d40ebd4bd4b32af93a325cb16f64593c2261a365be3f338195cdfeada0cb6ecab8e33e4be1b380596ff0bb1c4a7b5e6aac3dccc +libblastrampoline.v5.11.2+0.x86_64-linux-musl.tar.gz/md5/161816fa857775d78bc671c444846844 +libblastrampoline.v5.11.2+0.x86_64-linux-musl.tar.gz/sha512/ba0ab54a9ccfb451b7b8fe46b2bd8e8a8135d2e1f2a896bfdf4bcc6e82812f56d93ef1e7b85671e58d388afe2876d79affdf59bfe7b1db5b76412008303a121e +libblastrampoline.v5.11.2+0.x86_64-unknown-freebsd.tar.gz/md5/aeaab847455f5a43c434155b09107cde +libblastrampoline.v5.11.2+0.x86_64-unknown-freebsd.tar.gz/sha512/24425c8bdc861404156bb5a8e950654904fb22ff6a5ebe52c873629e4dd1cfaccafaae74b779c2cb02370f012cf18c2142a105dd614938b2685db2cd7527c73d +libblastrampoline.v5.11.2+0.x86_64-w64-mingw32.tar.gz/md5/450afb701cc2899c7c083bd3f3e580a0 +libblastrampoline.v5.11.2+0.x86_64-w64-mingw32.tar.gz/sha512/e4d1785a06b051a4f16edd7343021eed61ac45cf45d26b4e3ef1e54cfaadb44da2e74b7d854e31b05a733dbb3004f3e85644967316c4f41d1ad64400fed126f2 diff --git a/stdlib/libblastrampoline_jll/Project.toml b/stdlib/libblastrampoline_jll/Project.toml index eb71a4a9d532c..8d5a3b7b20bcc 100644 --- a/stdlib/libblastrampoline_jll/Project.toml +++ b/stdlib/libblastrampoline_jll/Project.toml @@ -1,6 +1,6 @@ name = "libblastrampoline_jll" uuid = "8e850b90-86db-534c-a0d3-1478176c7d93" -version = "5.11.1+0" +version = "5.11.2+0" [deps] Libdl = "8f399da3-3557-5675-b5ff-fb832c97cbdb" From 5cc5878cf63b6f832ce9c2d54712ba7a405b28e1 Mon Sep 17 00:00:00 2001 From: Dennis Hoelgaard Bal <61620837+KronosTheLate@users.noreply.github.com> Date: Wed, 13 Nov 2024 13:54:31 +0100 Subject: [PATCH 437/537] Improve docstrings for `filesize` and `stat` (#56244) Co-authored-by: Lilith Orion Hafner Co-authored-by: Neven Sajko --- base/stat.jl | 153 +++++++++++++++++++++++++++++++++++++++------------ 1 file changed, 117 insertions(+), 36 deletions(-) diff --git a/base/stat.jl b/base/stat.jl index c6fb239a96404..fc2ac9a04b0bf 100644 --- a/base/stat.jl +++ b/base/stat.jl @@ -28,8 +28,15 @@ export """ StatStruct -A struct which stores the information from `stat`. -The following fields of this struct is considered public API: +A struct which stores information about a file. Usually +constructed by calling [`stat`](@ref) on a path. + +This struct is used internally as the foundation of a number of utility +functions. Some return specific parts of the information stored in it +directly, such as [`filesize`](@ref), [`mtime`](@ref) and [`ctime`](@ref). Others add +some logic on top using bit-manipulation, such as [`isfifo`](@ref), [`ischardev`](@ref), and [`issetuid`](@ref). + +The following fields of this struct are considered public API: | Name | Type | Description | |:--------|:--------------------------------|:-------------------------------------------------------------------| @@ -200,10 +207,12 @@ if RawFD !== OS_HANDLE end """ - stat(file) - stat(joinpath...) + stat(path) + stat(path_elements...) Return a structure whose fields contain information about the file. +If multiple arguments are given, they are joined by [`joinpath`](@ref). + The fields of the structure are: | Name | Type | Description | @@ -226,13 +235,14 @@ stat(path) = (path2 = joinpath(path); path2 isa typeof(path) ? error("stat not i stat(path...) = stat(joinpath(path...)) """ - lstat(file) - lstat(joinpath...) + lstat(path) + lstat(path_elements...) -Like [`stat`](@ref), but for symbolic links gets the info for the link -itself rather than the file it refers to. -This function must be called on a file path rather than a file object or a file -descriptor. +Like [`stat`](@ref), but for symbolic links gets the info +for the link itself rather than the file it refers to. + +This function must be called on a file path rather +than a file object or a file descriptor. """ lstat(path) = (path2 = joinpath(path); path2 isa typeof(path) ? error("lstat not implemented for $(typeof(path))") : lstat(path2)) lstat(path...) = lstat(joinpath(path...)) @@ -285,9 +295,14 @@ const filemode_table = ( ) """ - filemode(file) + filemode(path) + filemode(path_elements...) + filemode(stat_struct) + +Return the mode of the file located at `path`, +or the mode indicated by the file descriptor `stat_struct`. -Equivalent to `stat(file).mode`. +Equivalent to `stat(path).mode` or `stat_struct.mode`. """ filemode(st::StatStruct) = st.mode filemode_string(st::StatStruct) = filemode_string(st.mode) @@ -308,23 +323,38 @@ function filemode_string(mode) end """ - filesize(path...) + filesize(path) + filesize(path_elements...) + filesize(stat_struct) + +Return the size of the file located at `path`, +or the size indicated by file descriptor `stat_struct`. -Equivalent to `stat(file).size`. +Equivalent to `stat(path).size` or `stat_struct.size`. """ filesize(st::StatStruct) = st.size """ - mtime(file) + mtime(path) + mtime(path_elements...) + mtime(stat_struct) -Equivalent to `stat(file).mtime`. +Return the unix timestamp of when the file at `path` was last modified, +or the last modified timestamp indicated by the file descriptor `stat_struct`. + +Equivalent to `stat(path).mtime` or `stat_struct.mtime`. """ mtime(st::StatStruct) = st.mtime """ - ctime(file) + ctime(path) + ctime(path_elements...) + ctime(stat_struct) + +Return the unix timestamp of when the metadata of the file at `path` was last modified, +or the last modified metadata timestamp indicated by the file descriptor `stat_struct`. -Equivalent to `stat(file).ctime`. +Equivalent to `stat(path).ctime` or `stat_struct.ctime`. """ ctime(st::StatStruct) = st.ctime @@ -332,9 +362,11 @@ ctime(st::StatStruct) = st.ctime """ ispath(path) -> Bool + ispath(path_elements...) -> Bool Return `true` if a valid filesystem entity exists at `path`, otherwise returns `false`. + This is the generalization of [`isfile`](@ref), [`isdir`](@ref) etc. """ ispath(st::StatStruct) = st.ioerrno == 0 @@ -351,22 +383,27 @@ ispath(path::AbstractString) = ispath(String(path)) """ isfifo(path) -> Bool + isfifo(path_elements...) -> Bool + isfifo(stat_struct) -> Bool -Return `true` if `path` is a FIFO, `false` otherwise. +Return `true` if the file at `path` or file descriptor `stat_struct` is FIFO, `false` otherwise. """ isfifo(st::StatStruct) = filemode(st) & 0xf000 == 0x1000 """ ischardev(path) -> Bool + ischardev(path_elements...) -> Bool + ischardev(stat_struct) -> Bool -Return `true` if `path` is a character device, `false` otherwise. +Return `true` if the path `path` or file descriptor `stat_struct` refer to a character device, `false` otherwise. """ ischardev(st::StatStruct) = filemode(st) & 0xf000 == 0x2000 """ isdir(path) -> Bool + isdir(path_elements...) -> Bool -Return `true` if `path` is a directory, `false` otherwise. +Return `true` if `path` points to a directory, `false` otherwise. # Examples ```jldoctest @@ -383,15 +420,18 @@ isdir(st::StatStruct) = filemode(st) & 0xf000 == 0x4000 """ isblockdev(path) -> Bool + isblockdev(path_elements...) -> Bool + isblockdev(stat_struct) -> Bool -Return `true` if `path` is a block device, `false` otherwise. +Return `true` if the path `path` or file descriptor `stat_struct` refer to a block device, `false` otherwise. """ isblockdev(st::StatStruct) = filemode(st) & 0xf000 == 0x6000 """ isfile(path) -> Bool + isfile(path_elements...) -> Bool -Return `true` if `path` is a regular file, `false` otherwise. +Return `true` if `path` points to a regular file, `false` otherwise. # Examples ```jldoctest @@ -417,15 +457,17 @@ isfile(st::StatStruct) = filemode(st) & 0xf000 == 0x8000 """ islink(path) -> Bool + islink(path_elements...) -> Bool -Return `true` if `path` is a symbolic link, `false` otherwise. +Return `true` if `path` points to a symbolic link, `false` otherwise. """ islink(st::StatStruct) = filemode(st) & 0xf000 == 0xa000 """ issocket(path) -> Bool + issocket(path_elements...) -> Bool -Return `true` if `path` is a socket, `false` otherwise. +Return `true` if `path` points to a socket, `false` otherwise. """ issocket(st::StatStruct) = filemode(st) & 0xf000 == 0xc000 @@ -433,29 +475,37 @@ issocket(st::StatStruct) = filemode(st) & 0xf000 == 0xc000 """ issetuid(path) -> Bool + issetuid(path_elements...) -> Bool + issetuid(stat_struct) -> Bool -Return `true` if `path` has the setuid flag set, `false` otherwise. +Return `true` if the file at `path` or file descriptor `stat_struct` have the setuid flag set, `false` otherwise. """ issetuid(st::StatStruct) = (filemode(st) & 0o4000) > 0 """ issetgid(path) -> Bool + issetgid(path_elements...) -> Bool + issetgid(stat_struct) -> Bool -Return `true` if `path` has the setgid flag set, `false` otherwise. +Return `true` if the file at `path` or file descriptor `stat_struct` have the setgid flag set, `false` otherwise. """ issetgid(st::StatStruct) = (filemode(st) & 0o2000) > 0 """ issticky(path) -> Bool + issticky(path_elements...) -> Bool + issticky(stat_struct) -> Bool -Return `true` if `path` has the sticky bit set, `false` otherwise. +Return `true` if the file at `path` or file descriptor `stat_struct` have the sticky bit set, `false` otherwise. """ issticky(st::StatStruct) = (filemode(st) & 0o1000) > 0 """ - uperm(file) + uperm(path) + uperm(path_elements...) + uperm(stat_struct) -Get the permissions of the owner of the file as a bitfield of +Return a bitfield of the owner permissions for the file at `path` or file descriptor `stat_struct`. | Value | Description | |:------|:-------------------| @@ -463,22 +513,52 @@ Get the permissions of the owner of the file as a bitfield of | 02 | Write Permission | | 04 | Read Permission | -For allowed arguments, see [`stat`](@ref). +The fact that a bitfield is returned means that if the permission +is read+write, the bitfield is "110", which maps to the decimal +value of 0+2+4=6. This is reflected in the printing of the +returned `UInt8` value. + +See also [`gperm`](@ref) and [`operm`](@ref). + +```jldoctest +julia> touch("dummy_file"); # Create test-file without contents + +julia> uperm("dummy_file") +0x06 + +julia> bitstring(ans) +"00000110" + +julia> has_read_permission(path) = uperm(path) & 0b00000100 != 0; # Use bit mask to check specific bit + +julia> has_read_permission("dummy_file") +true + +julia> rm("dummy_file") # Clean up test-file +``` """ uperm(st::StatStruct) = UInt8((filemode(st) >> 6) & 0x7) """ - gperm(file) + gperm(path) + gperm(path_elements...) + gperm(stat_struct) Like [`uperm`](@ref) but gets the permissions of the group owning the file. + +See also [`operm`](@ref). """ gperm(st::StatStruct) = UInt8((filemode(st) >> 3) & 0x7) """ - operm(file) + operm(path) + operm(path_elements...) + operm(stat_struct) + +Like [`uperm`](@ref) but gets the permissions for people who neither own the +file nor are a member of the group owning the file. -Like [`uperm`](@ref) but gets the permissions for people who neither own the file nor are a member of -the group owning the file +See also [`gperm`](@ref). """ operm(st::StatStruct) = UInt8((filemode(st) ) & 0x7) @@ -514,7 +594,7 @@ function samefile(a::StatStruct, b::StatStruct) end """ - samefile(path_a::AbstractString, path_b::AbstractString) + samefile(path_a, path_b) Check if the paths `path_a` and `path_b` refer to the same existing file or directory. """ @@ -522,6 +602,7 @@ samefile(a::AbstractString, b::AbstractString) = samefile(stat(a), stat(b)) """ ismount(path) -> Bool + ismount(path_elements...) -> Bool Return `true` if `path` is a mount point, `false` otherwise. """ From a23e313b2cff5583377fd36413df1dca4cf11b26 Mon Sep 17 00:00:00 2001 From: Keno Fischer Date: Wed, 13 Nov 2024 10:32:12 -0500 Subject: [PATCH 438/537] Fix and test special loading support for Compiler (#56535) The new `Compiler` package has a special stub that bypasses compilig a separate copy if you have `dev`'ed the version that's already compiled into the sysimg. It wasn't quite working properly in the final version of that PR if a compiler so loaded was a dependency of another precompiled package. Fix that and add a test to make sure it doesn't regress. --- Compiler/src/Compiler.jl | 6 +-- .../test/CompilerLoadingTest/Manifest.toml | 16 ++++++++ .../test/CompilerLoadingTest/Project.toml | 5 +++ .../compiler_loading_test.jl | 12 ++++++ .../src/CompilerLoadingTest.jl | 5 +++ Compiler/test/irutils.jl | 2 + Compiler/test/runtests.jl | 1 + Compiler/test/special_loading.jl | 9 +++++ Compiler/test/testgroups | 1 + base/Base.jl | 7 ++++ base/loading.jl | 38 ++++++++++--------- src/staticdata.c | 3 ++ src/staticdata_utils.c | 13 +++++-- 13 files changed, 93 insertions(+), 25 deletions(-) create mode 100644 Compiler/test/CompilerLoadingTest/Manifest.toml create mode 100644 Compiler/test/CompilerLoadingTest/Project.toml create mode 100644 Compiler/test/CompilerLoadingTest/compiler_loading_test.jl create mode 100644 Compiler/test/CompilerLoadingTest/src/CompilerLoadingTest.jl create mode 100644 Compiler/test/special_loading.jl diff --git a/Compiler/src/Compiler.jl b/Compiler/src/Compiler.jl index 41ae149dce372..dd96d8e449564 100644 --- a/Compiler/src/Compiler.jl +++ b/Compiler/src/Compiler.jl @@ -6,13 +6,13 @@ # the system image and simply returns that copy of the compiler. If not, # we proceed to load/precompile this as an ordinary package. if isdefined(Base, :generating_output) && Base.generating_output(true) && - Base.samefile(Base._compiler_require_dependencies[1][2], @eval @__FILE__) && + Base.samefile(joinpath(Sys.BINDIR, Base.DATAROOTDIR, Base._compiler_require_dependencies[1][2]), @eval @__FILE__) && !Base.any_includes_stale( - map(Base.CacheHeaderIncludes, Base._compiler_require_dependencies), + map(Base.compiler_chi, Base._compiler_require_dependencies), "sysimg", nothing) Base.prepare_compiler_stub_image!() - append!(Base._require_dependencies, Base._compiler_require_dependencies) + append!(Base._require_dependencies, map(Base.expand_compiler_path, Base._compiler_require_dependencies)) # There isn't much point in precompiling native code - downstream users will # specialize their own versions of the compiler code and we don't activate # the compiler by default anyway, so let's save ourselves some disk space. diff --git a/Compiler/test/CompilerLoadingTest/Manifest.toml b/Compiler/test/CompilerLoadingTest/Manifest.toml new file mode 100644 index 0000000000000..7fb3452a61017 --- /dev/null +++ b/Compiler/test/CompilerLoadingTest/Manifest.toml @@ -0,0 +1,16 @@ +# This file is machine-generated - editing it directly is not advised + +julia_version = "1.12.0-DEV" +manifest_format = "2.0" +project_hash = "10c2816629fed766649b89eb6670e7001df6ea18" + +[[deps.Compiler]] +path = "../.." +uuid = "807dbc54-b67e-4c79-8afb-eafe4df6f2e1" +version = "0.0.1" + +[[deps.CompilerLoadingTest]] +deps = ["Compiler"] +path = "." +uuid = "95defb8a-f82d-44d7-b2c9-37d658f648c1" +version = "0.0.0" diff --git a/Compiler/test/CompilerLoadingTest/Project.toml b/Compiler/test/CompilerLoadingTest/Project.toml new file mode 100644 index 0000000000000..5dca932dc7997 --- /dev/null +++ b/Compiler/test/CompilerLoadingTest/Project.toml @@ -0,0 +1,5 @@ +name = "CompilerLoadingTest" +uuid = "95defb8a-f82d-44d7-b2c9-37d658f648c1" + +[deps] +Compiler = "807dbc54-b67e-4c79-8afb-eafe4df6f2e1" diff --git a/Compiler/test/CompilerLoadingTest/compiler_loading_test.jl b/Compiler/test/CompilerLoadingTest/compiler_loading_test.jl new file mode 100644 index 0000000000000..a09f7751912b8 --- /dev/null +++ b/Compiler/test/CompilerLoadingTest/compiler_loading_test.jl @@ -0,0 +1,12 @@ +# This file is a part of Julia. License is MIT: https://julialang.org/license + +using Test, UUIDs + +# This file is loaded as part of special_loading.jl +Base.compilecache(Base.PkgId(UUID(0x95defb8a_f82d_44d7_b2c9_37d658f648c1), "CompilerLoadingTest")) + +using CompilerLoadingTest +@test Base.maybe_loaded_precompile(Base.PkgId(UUID(0x807dbc54_b67e_4c79_8afb_eafe4df6f2e1), "Compiler"), Base.module_build_id(Base.Compiler)) !== nothing + +using Compiler +@test CompilerLoadingTest.Compiler === Compiler === Base.Compiler diff --git a/Compiler/test/CompilerLoadingTest/src/CompilerLoadingTest.jl b/Compiler/test/CompilerLoadingTest/src/CompilerLoadingTest.jl new file mode 100644 index 0000000000000..61f8417a23251 --- /dev/null +++ b/Compiler/test/CompilerLoadingTest/src/CompilerLoadingTest.jl @@ -0,0 +1,5 @@ +# This file is a part of Julia. License is MIT: https://julialang.org/license + +module CompilerLoadingTest + using Compiler +end diff --git a/Compiler/test/irutils.jl b/Compiler/test/irutils.jl index c11444d8daabc..95525d2f2fe5a 100644 --- a/Compiler/test/irutils.jl +++ b/Compiler/test/irutils.jl @@ -1,3 +1,5 @@ +# This file is a part of Julia. License is MIT: https://julialang.org/license + using Core.IR using Core.Compiler: IRCode, IncrementalCompact, singleton_type, VarState using Base.Meta: isexpr diff --git a/Compiler/test/runtests.jl b/Compiler/test/runtests.jl index 10e613c8f52af..e4b312c6a65b7 100644 --- a/Compiler/test/runtests.jl +++ b/Compiler/test/runtests.jl @@ -2,5 +2,6 @@ using Test, Compiler for file in readlines(joinpath(@__DIR__, "testgroups")) + file == "special_loading" && continue # Only applicable to Base.Compiler include(file * ".jl") end diff --git a/Compiler/test/special_loading.jl b/Compiler/test/special_loading.jl new file mode 100644 index 0000000000000..ba012446dc61f --- /dev/null +++ b/Compiler/test/special_loading.jl @@ -0,0 +1,9 @@ +# This file is a part of Julia. License is MIT: https://julialang.org/license + +mktempdir() do dir + withenv("JULIA_DEPOT_PATH" => dir * (Sys.iswindows() ? ";" : ":"), "JULIA_LOAD_PATH" => nothing) do + cd(joinpath(@__DIR__, "CompilerLoadingTest")) do + @test success(pipeline(`$(Base.julia_cmd()[1]) --startup-file=no --project=. compiler_loading_test.jl`; stdout, stderr)) + end + end +end diff --git a/Compiler/test/testgroups b/Compiler/test/testgroups index 44e9b388f4821..5075caa8b34cf 100644 --- a/Compiler/test/testgroups +++ b/Compiler/test/testgroups @@ -14,3 +14,4 @@ newinterp ssair tarjan validation +special_loading diff --git a/base/Base.jl b/base/Base.jl index 39507b625660d..1f737452fa17a 100644 --- a/base/Base.jl +++ b/base/Base.jl @@ -407,6 +407,13 @@ for i = 1:length(_included_files) _include_dependency!(_compiler_require_dependencies, true, mod, file, true, false) end end +# Make relative to DATAROOTDIR to allow relocation +let basedir = joinpath(Sys.BINDIR, DATAROOTDIR) +for i = 1:length(_compiler_require_dependencies) + tup = _compiler_require_dependencies[i] + _compiler_require_dependencies[i] = (tup[1], relpath(tup[2], basedir), tup[3:end]...) +end +end @assert length(_compiler_require_dependencies) >= 15 end diff --git a/base/loading.jl b/base/loading.jl index 2765c6ea3ed1f..79b4fb8cb9fcc 100644 --- a/base/loading.jl +++ b/base/loading.jl @@ -1389,7 +1389,7 @@ function register_restored_modules(sv::SimpleVector, pkg::PkgId, path::String) if isdefined(M, Base.Docs.META) && getfield(M, Base.Docs.META) !== nothing push!(Base.Docs.modules, M) end - if parentmodule(M) === M + if is_root_module(M) push!(loaded_modules_order, M) push!(get!(Vector{Module}, loaded_precompiles, pkg), M) end @@ -3369,18 +3369,23 @@ function resolve_depot(inc::AbstractString) return :no_depot_found end - -function _parse_cache_header(f::IO, cachefile::AbstractString) - flags = read(f, UInt8) - modules = Vector{Pair{PkgId, UInt64}}() +function read_module_list(f::IO, has_buildid_hi::Bool) + modules = Vector{Pair{PkgId, UInt128}}() while true n = read(f, Int32) n == 0 && break sym = String(read(f, n)) # module name uuid = UUID((read(f, UInt64), read(f, UInt64))) # pkg UUID - build_id = read(f, UInt64) # build UUID (mostly just a timestamp) + build_id_hi = UInt128(has_buildid_hi ? read(f, UInt64) : UInt64(0)) << 64 + build_id = (build_id_hi | read(f, UInt64)) # build id (checksum + time - not a UUID) push!(modules, PkgId(uuid, sym) => build_id) end + return modules +end + +function _parse_cache_header(f::IO, cachefile::AbstractString) + flags = read(f, UInt8) + modules = read_module_list(f, false) totbytes = Int64(read(f, UInt64)) # total bytes for file dependencies + preferences # read the list of requirements # and split the list into include and requires statements @@ -3439,16 +3444,7 @@ function _parse_cache_header(f::IO, cachefile::AbstractString) totbytes -= 8 @assert totbytes == 0 "header of cache file appears to be corrupt (totbytes == $(totbytes))" # read the list of modules that are required to be present during loading - required_modules = Vector{Pair{PkgId, UInt128}}() - while true - n = read(f, Int32) - n == 0 && break - sym = String(read(f, n)) # module name - uuid = UUID((read(f, UInt64), read(f, UInt64))) # pkg UUID - build_id = UInt128(read(f, UInt64)) << 64 - build_id |= read(f, UInt64) - push!(required_modules, PkgId(uuid, sym) => build_id) - end + required_modules = read_module_list(f, true) l = read(f, Int32) clone_targets = read(f, l) @@ -3991,10 +3987,11 @@ end record_reason(reasons, "for different pkgid") return true end - id_build = (UInt128(checksum) << 64) | id.second + id_build = id.second + id_build = (UInt128(checksum) << 64) | (id_build % UInt64) if build_id != UInt128(0) if id_build != build_id - @debug "Ignoring cache file $cachefile for $modkey ($((UUID(id_build)))) since it does not provide desired build_id ($((UUID(build_id))))" + @debug "Ignoring cache file $cachefile for $modkey ($(UUID(id_build))) since it does not provide desired build_id ($((UUID(build_id))))" record_reason(reasons, "for different buildid") return true end @@ -4169,6 +4166,11 @@ function prepare_compiler_stub_image!() filter!(mod->mod !== Compiler, loaded_modules_order) end +function expand_compiler_path(tup) + (tup[1], joinpath(Sys.BINDIR, DATAROOTDIR, tup[2]), tup[3:end]...) +end +compiler_chi(tup::Tuple) = CacheHeaderIncludes(expand_compiler_path(tup)) + """ precompile(f, argtypes::Tuple{Vararg{Any}}) diff --git a/src/staticdata.c b/src/staticdata.c index 6b225d3808c8b..c5ba8ba396281 100644 --- a/src/staticdata.c +++ b/src/staticdata.c @@ -3938,6 +3938,9 @@ static void jl_restore_system_image_from_stream_(ios_t *f, jl_image_t *image, jl size_t len = jl_array_nrows(*restored); assert(len > 0); jl_module_t *topmod = (jl_module_t*)jl_array_ptr_ref(*restored, len-1); + // Ordinarily set during deserialization, but our compiler stub image, + // just returns a reference to the sysimage version, so we set it here. + topmod->build_id.hi = checksum; assert(jl_is_module(topmod)); arraylist_push(&jl_top_mods, (void*)topmod); } diff --git a/src/staticdata_utils.c b/src/staticdata_utils.c index ba6f95269838b..77e66c7459086 100644 --- a/src/staticdata_utils.c +++ b/src/staticdata_utils.c @@ -505,13 +505,18 @@ static int64_t write_header(ios_t *s, uint8_t pkgimage) return checksumpos; } +static int is_serialization_root_module(jl_module_t *mod) JL_NOTSAFEPOINT +{ + return mod->parent == jl_main_module || mod->parent == jl_base_module || mod->parent == mod; +} + // serialize information about the result of deserializing this file static void write_worklist_for_header(ios_t *s, jl_array_t *worklist) { int i, l = jl_array_nrows(worklist); for (i = 0; i < l; i++) { jl_module_t *workmod = (jl_module_t*)jl_array_ptr_ref(worklist, i); - if (workmod->parent == jl_main_module || workmod->parent == workmod) { + if (is_serialization_root_module(workmod)) { size_t l = strlen(jl_symbol_name(workmod->name)); write_int32(s, l); ios_write(s, jl_symbol_name(workmod->name), l); @@ -525,7 +530,7 @@ static void write_worklist_for_header(ios_t *s, jl_array_t *worklist) static void write_module_path(ios_t *s, jl_module_t *depmod) JL_NOTSAFEPOINT { - if (depmod->parent == jl_main_module || depmod->parent == depmod) + if (is_serialization_root_module(depmod)) return; const char *mname = jl_symbol_name(depmod->name); size_t slen = strlen(mname); @@ -603,13 +608,13 @@ static int64_t write_dependency_list(ios_t *s, jl_array_t* worklist, jl_array_t write_float64(s, jl_unbox_float64(jl_fieldref(deptuple, 4))); // mtime jl_module_t *depmod = (jl_module_t*)jl_fieldref(deptuple, 0); // evaluating module jl_module_t *depmod_top = depmod; - while (depmod_top->parent != jl_main_module && depmod_top->parent != depmod_top) + while (!is_serialization_root_module(depmod_top)) depmod_top = depmod_top->parent; unsigned provides = 0; size_t j, lj = jl_array_nrows(worklist); for (j = 0; j < lj; j++) { jl_module_t *workmod = (jl_module_t*)jl_array_ptr_ref(worklist, j); - if (workmod->parent == jl_main_module || workmod->parent == workmod) { + if (is_serialization_root_module(workmod)) { ++provides; if (workmod == depmod_top) { write_int32(s, provides); From 9945df9a663886566fdefe630c080373b9e0ec9a Mon Sep 17 00:00:00 2001 From: Keno Fischer Date: Wed, 13 Nov 2024 10:32:32 -0500 Subject: [PATCH 439/537] Some misc compiler-related code cleanup (#56540) I'm cleaning up some downstream packages following the compiler split and these were commonly found in compat code. One is the `Base.copy` method for Phi(C)Node, which we had an implementation for, just not wired up to `copy`. The other is `block_for_inst` with an SSAValue, which exists for IncremetalCompact, but not IRCode, so add the latter for consistency. --- Compiler/src/ssair/ir.jl | 1 + base/expr.jl | 38 ++++++++++++++++++++++---------------- 2 files changed, 23 insertions(+), 16 deletions(-) diff --git a/Compiler/src/ssair/ir.jl b/Compiler/src/ssair/ir.jl index 9a76c7370c68d..9103dba04fa54 100644 --- a/Compiler/src/ssair/ir.jl +++ b/Compiler/src/ssair/ir.jl @@ -482,6 +482,7 @@ function block_for_inst(ir::IRCode, inst::Int) end block_for_inst(ir.cfg, inst) end +block_for_inst(ir::IRCode, ssa::SSAValue) = block_for_inst(ir, ssa.id) function getindex(ir::IRCode, s::SSAValue) id = s.id diff --git a/base/expr.jl b/base/expr.jl index f57331ef02e74..354fae3f0a592 100644 --- a/base/expr.jl +++ b/base/expr.jl @@ -39,29 +39,35 @@ isexpr(@nospecialize(ex), head::Symbol) = isa(ex, Expr) && ex.head === head isexpr(@nospecialize(ex), head::Symbol, n::Int) = isa(ex, Expr) && ex.head === head && length(ex.args) == n copy(e::Expr) = exprarray(e.head, copy_exprargs(e.args)) +function copy(x::PhiNode) + values = x.values + nvalues = length(values) + new_values = Vector{Any}(undef, nvalues) + @inbounds for i = 1:nvalues + isassigned(values, i) || continue + new_values[i] = copy_exprs(values[i]) + end + return PhiNode(copy(x.edges), new_values) +end +function copy(x::PhiCNode) + values = x.values + nvalues = length(values) + new_values = Vector{Any}(undef, nvalues) + @inbounds for i = 1:nvalues + isassigned(values, i) || continue + new_values[i] = copy_exprs(values[i]) + end + return PhiCNode(new_values) +end # copy parts of an AST that the compiler mutates function copy_exprs(@nospecialize(x)) if isa(x, Expr) return copy(x) elseif isa(x, PhiNode) - values = x.values - nvalues = length(values) - new_values = Vector{Any}(undef, nvalues) - @inbounds for i = 1:nvalues - isassigned(values, i) || continue - new_values[i] = copy_exprs(values[i]) - end - return PhiNode(copy(x.edges), new_values) + return copy(x) elseif isa(x, PhiCNode) - values = x.values - nvalues = length(values) - new_values = Vector{Any}(undef, nvalues) - @inbounds for i = 1:nvalues - isassigned(values, i) || continue - new_values[i] = copy_exprs(values[i]) - end - return PhiCNode(new_values) + return copy(x) end return x end From 48e785478ff1796a39d9817be703ba7bc740a5f9 Mon Sep 17 00:00:00 2001 From: Shuhei Kadowaki <40514306+aviatesk@users.noreply.github.com> Date: Thu, 14 Nov 2024 01:13:17 +0900 Subject: [PATCH 440/537] fix `IRShow.show` of the standard library `Compiler` (#56542) Previously, definitions of overloaded `Base.show` and `IRShow.show` were mixed, causing `show` to not function properly for `Compiler` as a standard library. This commit fixes that issue and also includes some minor cleanups. --- Compiler/src/Compiler.jl | 10 +--------- Compiler/src/ssair/show.jl | 41 +++++++++++++++++++++----------------- base/show.jl | 13 ++---------- 3 files changed, 26 insertions(+), 38 deletions(-) diff --git a/Compiler/src/Compiler.jl b/Compiler/src/Compiler.jl index dd96d8e449564..d454a4853a228 100644 --- a/Compiler/src/Compiler.jl +++ b/Compiler/src/Compiler.jl @@ -169,15 +169,7 @@ include("reflection_interface.jl") if isdefined(Base, :IRShow) @eval module IRShow - import ..Compiler - using Core.IR - using ..Base - import .Compiler: IRCode, CFG, scan_ssa_use!, - isexpr, compute_basic_blocks, block_for_inst, IncrementalCompact, - Effects, ALWAYS_TRUE, ALWAYS_FALSE, DebugInfoStream, getdebugidx, - VarState, InvalidIRError, argextype, widenconst, singleton_type, - sptypes_from_meth_instance, EMPTY_SPTYPES, InferenceState, - NativeInterpreter, CachedMethodTable, LimitedAccuracy, Timings + using ..Compiler: Compiler # During bootstrap, Base will later include this into its own "IRShow module" Compiler.include(IRShow, "ssair/show.jl") end diff --git a/Compiler/src/ssair/show.jl b/Compiler/src/ssair/show.jl index a2212272ce3fc..6e4f2004e1a84 100644 --- a/Compiler/src/ssair/show.jl +++ b/Compiler/src/ssair/show.jl @@ -3,10 +3,17 @@ # This file is not loaded into `Core.Compiler` but rather loaded into the context of # `Base.IRShow` and thus does not participate in bootstrapping. -@nospecialize +using Base, Core.IR + +import Base: show +using Base: isexpr, prec_decl, show_unquoted, with_output_color +using .Compiler: ALWAYS_FALSE, ALWAYS_TRUE, argextype, BasicBlock, block_for_inst, + CachedMethodTable, CFG, compute_basic_blocks, DebugInfoStream, Effects, + EMPTY_SPTYPES, getdebugidx, IncrementalCompact, InferenceResult, InferenceState, + InvalidIRError, IRCode, LimitedAccuracy, NativeInterpreter, scan_ssa_use!, + singleton_type, sptypes_from_meth_instance, StmtRange, Timings, VarState, widenconst -import Base: show_unquoted -using Base: printstyled, with_output_color, prec_decl, @invoke +@nospecialize function Base.show(io::IO, cfg::CFG) print(io, "CFG with $(length(cfg.blocks)) blocks:") @@ -497,7 +504,7 @@ function DILineInfoPrinter(debuginfo, def, showtypes::Bool=false) started::Bool = false if !update_line_only && showtypes && !isa(frame.method, Symbol) && nctx != 1 print(io, linestart) - Base.with_output_color(linecolor, io) do io + with_output_color(linecolor, io) do io print(io, indent("│")) print(io, "┌ invoke ", frame.method) println(io) @@ -505,7 +512,7 @@ function DILineInfoPrinter(debuginfo, def, showtypes::Bool=false) started = true end print(io, linestart) - Base.with_output_color(linecolor, io) do io + with_output_color(linecolor, io) do io print(io, indent("│")) push!(context, frame) if update_line_only @@ -914,7 +921,7 @@ function show_ir(io::IO, ir::IRCode, config::IRShowConfig=default_config(ir); pop_new_node! = new_nodes_iter(ir)) used = stmts_used(io, ir) cfg = ir.cfg - maxssaid = length(ir.stmts) + Compiler.length(ir.new_nodes) + maxssaid = length(ir.stmts) + length(ir.new_nodes) let io = IOContext(io, :maxssaid=>maxssaid) show_ir_stmts(io, ir, 1:length(ir.stmts), config, ir.sptypes, used, cfg, 1; pop_new_node!) end @@ -971,13 +978,13 @@ function show_ir(io::IO, compact::IncrementalCompact, config::IRShowConfig=defau still_to_be_inserted = (last(input_bb.stmts) - compact.idx) + count result_bb = result_bbs[compact.active_result_bb] - result_bbs[compact.active_result_bb] = Compiler.BasicBlock(result_bb, - Compiler.StmtRange(first(result_bb.stmts), compact.result_idx+still_to_be_inserted)) + result_bbs[compact.active_result_bb] = BasicBlock(result_bb, + StmtRange(first(result_bb.stmts), compact.result_idx+still_to_be_inserted)) end compact_cfg = CFG(result_bbs, Int[first(result_bbs[i].stmts) for i in 2:length(result_bbs)]) pop_new_node! = new_nodes_iter(compact) - maxssaid = length(compact.result) + Compiler.length(compact.new_new_nodes) + maxssaid = length(compact.result) + length(compact.new_new_nodes) bb_idx = let io = IOContext(io, :maxssaid=>maxssaid) show_ir_stmts(io, compact, 1:compact.result_idx-1, config, compact.ir.sptypes, used_compacted, compact_cfg, 1; pop_new_node!) @@ -998,8 +1005,8 @@ function show_ir(io::IO, compact::IncrementalCompact, config::IRShowConfig=defau inputs_bbs = copy(cfg.blocks) for (i, bb) in enumerate(inputs_bbs) if bb.stmts.stop < bb.stmts.start - inputs_bbs[i] = Compiler.BasicBlock(bb, - Compiler.StmtRange(last(bb.stmts), last(bb.stmts))) + inputs_bbs[i] = BasicBlock(bb, + StmtRange(last(bb.stmts), last(bb.stmts))) # this is not entirely correct, and will result in the bb starting again, # but is the best we can do without changing how `finish_current_bb!` works. end @@ -1007,7 +1014,7 @@ function show_ir(io::IO, compact::IncrementalCompact, config::IRShowConfig=defau uncompacted_cfg = CFG(inputs_bbs, Int[first(inputs_bbs[i].stmts) for i in 2:length(inputs_bbs)]) pop_new_node! = new_nodes_iter(compact.ir, compact.new_nodes_idx) - maxssaid = length(compact.ir.stmts) + Compiler.length(compact.ir.new_nodes) + maxssaid = length(compact.ir.stmts) + length(compact.ir.new_nodes) let io = IOContext(io, :maxssaid=>maxssaid) # first show any new nodes to be attached after the last compacted statement if compact.idx > 1 @@ -1071,13 +1078,12 @@ function Base.show(io::IO, e::Effects) print(io, ')') end - -function show(io::IO, inferred::Compiler.InferenceResult) +function Base.show(io::IO, inferred::InferenceResult) mi = inferred.linfo tt = mi.specTypes.parameters[2:end] tts = join(["::$(t)" for t in tt], ", ") rettype = inferred.result - if isa(rettype, Compiler.InferenceState) + if isa(rettype, InferenceState) rettype = rettype.bestguess end if isa(mi.def, Method) @@ -1102,9 +1108,8 @@ function Base.show(io::IO, limited::LimitedAccuracy) print(io, ", #= ", length(limited.causes), " cause(s) =#)") end - # These sometimes show up as Const-values in InferenceFrameInfo signatures -function show(io::IO, mi_info::Timings.InferenceFrameInfo) +function Base.show(io::IO, mi_info::Timings.InferenceFrameInfo) mi = mi_info.mi def = mi.def if isa(def, Method) @@ -1125,7 +1130,7 @@ function show(io::IO, mi_info::Timings.InferenceFrameInfo) end end -function show(io::IO, tinf::Timings.Timing) +function Base.show(io::IO, tinf::Timings.Timing) print(io, "Compiler.Timings.Timing(", tinf.mi_info, ") with ", length(tinf.children), " children") end diff --git a/base/show.jl b/base/show.jl index 8f305107d10f5..e332cf521addb 100644 --- a/base/show.jl +++ b/base/show.jl @@ -2822,23 +2822,14 @@ function show(io::IO, vm::Core.TypeofVararg) end module IRShow - import ..Compiler - using Core.IR - import ..Base - import .Compiler: IRCode, CFG, scan_ssa_use!, - isexpr, compute_basic_blocks, block_for_inst, IncrementalCompact, - Effects, ALWAYS_TRUE, ALWAYS_FALSE, DebugInfoStream, getdebugidx, - VarState, InvalidIRError, argextype, widenconst, singleton_type, - sptypes_from_meth_instance, EMPTY_SPTYPES, InferenceState, - NativeInterpreter, CachedMethodTable, LimitedAccuracy, Timings - + using ..Compiler: Compiler Base.include(IRShow, Base.strcat(Base.BUILDROOT, "../usr/share/julia/Compiler/src/ssair/show.jl")) const __debuginfo = Dict{Symbol, Any}( # :full => src -> statementidx_lineinfo_printer(src), # and add variable slot information :source => src -> statementidx_lineinfo_printer(src), # :oneliner => src -> statementidx_lineinfo_printer(PartialLineInfoPrinter, src), - :none => src -> Base.IRShow.lineinfo_disabled, + :none => src -> lineinfo_disabled, ) const default_debuginfo = Ref{Symbol}(:none) debuginfo(sym) = sym === :default ? default_debuginfo[] : sym From 072d9d1a4786a782f1abc1fc5ab00209ccb34471 Mon Sep 17 00:00:00 2001 From: Shuhei Kadowaki <40514306+aviatesk@users.noreply.github.com> Date: Thu, 14 Nov 2024 01:14:40 +0900 Subject: [PATCH 441/537] remove type piracy from `==` defined in `Compiler` (#56541) `Compiler.:(==)` is now identical to `Base.:(==)`, so the following `==` methods defined in typelattice.jl are considered type piracy: https://github.com/JuliaLang/julia/blob/1edc6f1b7752ed67059020ba7ce174dffa225954/Compiler/src/typelattice.jl#L194-L197 In fact, loading `Compiler` as a standard library with this code can sometimes result in errors like the following: ```julia julia> using Compiler julia> Int == Core.Const(1) ERROR: MethodError: ==(::Type{Int64}, ::Core.Const) is ambiguous. ... ``` Since these `==` definitions no longer seem necessary, this commit simply removes them to resolve the issue. @nanosoldier `runbenchmarks("inference", vs=":master")` --- Compiler/src/typelattice.jl | 5 ----- base/coreir.jl | 3 +-- 2 files changed, 1 insertion(+), 7 deletions(-) diff --git a/Compiler/src/typelattice.jl b/Compiler/src/typelattice.jl index 2832edc9219ff..bd0d24167b75a 100644 --- a/Compiler/src/typelattice.jl +++ b/Compiler/src/typelattice.jl @@ -191,11 +191,6 @@ struct NotFound end const NOT_FOUND = NotFound() -const CompilerTypes = Union{Const, Conditional, MustAlias, NotFound, PartialStruct} -==(x::CompilerTypes, y::CompilerTypes) = x === y -==(x::Type, y::CompilerTypes) = false -==(x::CompilerTypes, y::Type) = false - ################# # lattice logic # ################# diff --git a/base/coreir.jl b/base/coreir.jl index a21eeceffe4c5..5199dfd35f028 100644 --- a/base/coreir.jl +++ b/base/coreir.jl @@ -45,8 +45,7 @@ Core.PartialStruct Similar to `Conditional`, but conveys inter-procedural constraints imposed on call arguments. This is separate from `Conditional` to catch logic errors: the lattice element name is `InterConditional` -while processing a call, then `Conditional` everywhere else. Thus `InterConditional` does not appear in -`CompilerTypes`—these type's usages are disjoint—though we define the lattice for `InterConditional`. +while processing a call, then `Conditional` everywhere else. """ Core.InterConditional From 286ece2e03a46c5d38a5de4ee8a8af36216a3894 Mon Sep 17 00:00:00 2001 From: Jameson Nash Date: Thu, 14 Nov 2024 10:39:36 -0500 Subject: [PATCH 442/537] inference: infer_compilation_signatures for even more cases (#56552) Refs #56495 --- Compiler/src/abstractinterpretation.jl | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/Compiler/src/abstractinterpretation.jl b/Compiler/src/abstractinterpretation.jl index edeb5d805b3d5..53be649105636 100644 --- a/Compiler/src/abstractinterpretation.jl +++ b/Compiler/src/abstractinterpretation.jl @@ -207,14 +207,14 @@ function abstract_call_gf_by_type(interp::AbstractInterpreter, @nospecialize(f), rettype = from_interprocedural!(interp, rettype, sv, arginfo, conditionals) # Also considering inferring the compilation signature for this method, so - # it is available to the compiler in case it ends up needing it for the invoke. + # it is available to the compiler, unless it should not end up needing it (for an invoke). if (isa(sv, InferenceState) && infer_compilation_signature(interp) && - (seenall && 1 == napplicable) && !is_removable_if_unused(all_effects)) + (seenall && 1 == napplicable) && (!is_removable_if_unused(all_effects) || !call_result_unused(si))) (; match) = applicable[1] method = match.method sig = match.spec_types mi = specialize_method(match; preexisting=true) - if mi !== nothing && !const_prop_methodinstance_heuristic(interp, mi, arginfo, sv) + if mi === nothing || !const_prop_methodinstance_heuristic(interp, mi, arginfo, sv) csig = get_compileable_sig(method, sig, match.sparams) if csig !== nothing && csig !== sig abstract_call_method(interp, method, csig, match.sparams, multiple_matches, StmtInfo(false), sv)::Future From 1e063d8e75b708e61afa6a011b0719172f505c34 Mon Sep 17 00:00:00 2001 From: Shuhei Kadowaki <40514306+aviatesk@users.noreply.github.com> Date: Fri, 15 Nov 2024 00:47:31 +0900 Subject: [PATCH 443/537] allow the Compiler.jl stdlib to be installed on older version of Julia (#56553) Since JuliaLang/julia#56409, Compiler.jl as a standard library has become available. However, for Julia versions prior to this change, even though the stdlib can be installed via Pkg.jl, the precompilation fails due to code compatibility issues. Consequently, when an external package that uses the Compiler stdlib adds Compiler.jl to its Project.toml, the package would stop working on older Julia versions. To address this, this commit adopts the same approach as JET.jl. Specifically, on older Julia versions, a dummy `Compiler` module is defined, allowing dependent packages to switch between using the Compiler.jl stdlib or the previous `Core.Compiler`. While this is a somewhat hacky solution, it should resolve the issue for now. Also includes a change to include `ssair/show.jl` in the context of `Compiler` to ensure that stale precompilation caches are not used. And as a result this commit bumps the version of the Compiler.jl standard library. --- Compiler/Project.toml | 2 +- Compiler/src/Compiler.jl | 29 +++++++++++++++++++++-------- Compiler/src/ssair/show.jl | 14 ++++++++++++-- base/show.jl | 15 ++------------- 4 files changed, 36 insertions(+), 24 deletions(-) diff --git a/Compiler/Project.toml b/Compiler/Project.toml index b933d08db5205..19ba8f7529c1a 100644 --- a/Compiler/Project.toml +++ b/Compiler/Project.toml @@ -1,3 +1,3 @@ name = "Compiler" uuid = "807dbc54-b67e-4c79-8afb-eafe4df6f2e1" -version = "0.0.1" +version = "0.0.2" diff --git a/Compiler/src/Compiler.jl b/Compiler/src/Compiler.jl index d454a4853a228..376721da46783 100644 --- a/Compiler/src/Compiler.jl +++ b/Compiler/src/Compiler.jl @@ -1,15 +1,28 @@ # This file is a part of Julia. License is MIT: https://julialang.org/license +if isdefined(Base, :end_base_include) && !isdefined(Base, :Compiler) + +# Define a dummy `Compiler` module to make it installable even on Julia versions where +# Compiler.jl is not available as a standard library. +@eval module Compiler + function __init__() + println(""" + The `Compiler` standard library is not available for this version of Julia. + Use Julia version `v"1.12.0-DEV.1581"` or later. + """) + end +end + # When generating an incremental precompile file, we first check whether we # already have a copy of this *exact* code in the system image. If so, we # simply generates a pkgimage that has the dependency edges we recorded in # the system image and simply returns that copy of the compiler. If not, # we proceed to load/precompile this as an ordinary package. -if isdefined(Base, :generating_output) && Base.generating_output(true) && +elseif (isdefined(Base, :generating_output) && Base.generating_output(true) && Base.samefile(joinpath(Sys.BINDIR, Base.DATAROOTDIR, Base._compiler_require_dependencies[1][2]), @eval @__FILE__) && !Base.any_includes_stale( map(Base.compiler_chi, Base._compiler_require_dependencies), - "sysimg", nothing) + "sysimg", nothing)) Base.prepare_compiler_stub_image!() append!(Base._require_dependencies, map(Base.expand_compiler_path, Base._compiler_require_dependencies)) @@ -167,12 +180,12 @@ include("optimize.jl") include("bootstrap.jl") include("reflection_interface.jl") -if isdefined(Base, :IRShow) - @eval module IRShow - using ..Compiler: Compiler - # During bootstrap, Base will later include this into its own "IRShow module" - Compiler.include(IRShow, "ssair/show.jl") - end +module IRShow end +if !isdefined(Base, :end_base_include) + # During bootstrap, skip including this file and defer it to base/show.jl to include later +else + # When this module is loaded as the standard library, include this file as usual + include(IRShow, "ssair/show.jl") end end # baremodule Compiler diff --git a/Compiler/src/ssair/show.jl b/Compiler/src/ssair/show.jl index 6e4f2004e1a84..b9ed220d59453 100644 --- a/Compiler/src/ssair/show.jl +++ b/Compiler/src/ssair/show.jl @@ -1,7 +1,8 @@ # This file is a part of Julia. License is MIT: https://julialang.org/license -# This file is not loaded into `Core.Compiler` but rather loaded into the context of -# `Base.IRShow` and thus does not participate in bootstrapping. +# This file does not participate in bootstrapping, but is included in the system image by +# being loaded from `base/show.jl`. Compiler.jl as the standard library will simply include +# this file in the context of `Compiler.IRShow`. using Base, Core.IR @@ -1135,3 +1136,12 @@ function Base.show(io::IO, tinf::Timings.Timing) end @specialize + +const __debuginfo = Dict{Symbol, Any}( + # :full => src -> statementidx_lineinfo_printer(src), # and add variable slot information + :source => src -> statementidx_lineinfo_printer(src), + # :oneliner => src -> statementidx_lineinfo_printer(PartialLineInfoPrinter, src), + :none => src -> lineinfo_disabled, + ) +const default_debuginfo = Ref{Symbol}(:none) +debuginfo(sym) = sym === :default ? default_debuginfo[] : sym diff --git a/base/show.jl b/base/show.jl index e332cf521addb..e6c2367e438b3 100644 --- a/base/show.jl +++ b/base/show.jl @@ -2821,19 +2821,8 @@ function show(io::IO, vm::Core.TypeofVararg) end end -module IRShow - using ..Compiler: Compiler - Base.include(IRShow, Base.strcat(Base.BUILDROOT, "../usr/share/julia/Compiler/src/ssair/show.jl")) - - const __debuginfo = Dict{Symbol, Any}( - # :full => src -> statementidx_lineinfo_printer(src), # and add variable slot information - :source => src -> statementidx_lineinfo_printer(src), - # :oneliner => src -> statementidx_lineinfo_printer(PartialLineInfoPrinter, src), - :none => src -> lineinfo_disabled, - ) - const default_debuginfo = Ref{Symbol}(:none) - debuginfo(sym) = sym === :default ? default_debuginfo[] : sym -end +Compiler.include(Compiler.IRShow, "ssair/show.jl") # define `show` for the compiler types +const IRShow = Compiler.IRShow # an alias for compatibility function show(io::IO, src::CodeInfo; debuginfo::Symbol=:source) # Fix slot names and types in function body From a32dba5492420d7484820b36ecece017b5a17b99 Mon Sep 17 00:00:00 2001 From: Jameson Nash Date: Thu, 14 Nov 2024 10:53:48 -0500 Subject: [PATCH 444/537] compiler: fix several more specialization mistake introduced by #40985 (#56547) Refs #56404 --- Compiler/src/abstractinterpretation.jl | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/Compiler/src/abstractinterpretation.jl b/Compiler/src/abstractinterpretation.jl index 53be649105636..b8390720df4e5 100644 --- a/Compiler/src/abstractinterpretation.jl +++ b/Compiler/src/abstractinterpretation.jl @@ -2199,9 +2199,13 @@ function abstract_invoke(interp::AbstractInterpreter, arginfo::ArgInfo, si::Stmt env = tienv[2]::SimpleVector mresult = abstract_call_method(interp, method, ti, env, false, si, sv)::Future match = MethodMatch(ti, env, method, argtype <: method.sig) + ft_box = Core.Box(ft) + ft′_box = Core.Box(ft′) return Future{CallMeta}(mresult, interp, sv) do result, interp, sv (; rt, exct, effects, edge, volatile_inf_result) = result - res = nothing + local argtypes = arginfo.argtypes + local ft = ft_box.contents + local ft′ = ft′_box.contents sig = match.spec_types argtypes′ = invoke_rewrite(argtypes) fargs = arginfo.fargs From cf3649701195ccf980404ac9b4e6aa0d776fea3b Mon Sep 17 00:00:00 2001 From: Jameson Nash Date: Thu, 14 Nov 2024 10:59:15 -0500 Subject: [PATCH 445/537] inference: complete the inference even for recursive cycles (#56551) We care more and more now that our inference graph exactly matches the callgraph, even in cases like this where we can easily prove that the inference graph is simpler than the full callgraph. However, given when the optimizer runs, it expects this information to be available and valid as soon as the cycles are completed. --- Compiler/src/abstractinterpretation.jl | 8 -------- 1 file changed, 8 deletions(-) diff --git a/Compiler/src/abstractinterpretation.jl b/Compiler/src/abstractinterpretation.jl index b8390720df4e5..67e7ecfbce66a 100644 --- a/Compiler/src/abstractinterpretation.jl +++ b/Compiler/src/abstractinterpretation.jl @@ -583,14 +583,6 @@ function abstract_call_method(interp::AbstractInterpreter, if infmi.specTypes::Type == sig::Type # avoid widening when detecting self-recursion # TODO: merge call cycle and return right away - if call_result_unused(si) - add_remark!(interp, sv, RECURSION_UNUSED_MSG) - # since we don't use the result (typically), - # we have a self-cycle in the call-graph, but not in the inference graph (typically): - # break this edge now (before we record it) by returning early - # (non-typically, this means that we lose the ability to detect a guaranteed StackOverflow in some cases) - return Future(MethodCallResult(Any, Any, Effects(), nothing, true, true)) - end topmost = nothing edgecycle = true break From 2e7e3edae7459243e38c2b23941b5177c5c9ccfc Mon Sep 17 00:00:00 2001 From: Shuhei Kadowaki <40514306+aviatesk@users.noreply.github.com> Date: Fri, 15 Nov 2024 03:16:13 +0900 Subject: [PATCH 446/537] add LICENSE.md and `[compat]` entry for the Compiler.jl stdlib (#56559) As requested [by JuliaRegistrator](https://github.com/JuliaRegistries/General/pull/119404#issuecomment-2476778171). --- Compiler/LICENSE.md | 26 ++++++++++++++++++++++++++ Compiler/Project.toml | 3 +++ 2 files changed, 29 insertions(+) create mode 100644 Compiler/LICENSE.md diff --git a/Compiler/LICENSE.md b/Compiler/LICENSE.md new file mode 100644 index 0000000000000..028a39923ef04 --- /dev/null +++ b/Compiler/LICENSE.md @@ -0,0 +1,26 @@ +MIT License + +Copyright (c) 2009-2024: Jeff Bezanson, Stefan Karpinski, Viral B. Shah, and other contributors: https://github.com/JuliaLang/julia/contributors + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +end of terms and conditions + +Please see [THIRDPARTY.md](../THIRDPARTY.md) for license information for other software used in this project. diff --git a/Compiler/Project.toml b/Compiler/Project.toml index 19ba8f7529c1a..9cb85fe7d05de 100644 --- a/Compiler/Project.toml +++ b/Compiler/Project.toml @@ -1,3 +1,6 @@ name = "Compiler" uuid = "807dbc54-b67e-4c79-8afb-eafe4df6f2e1" version = "0.0.2" + +[compat] +julia = "1.10" From d99d569f18245f4095de75c9b852094ba8f24a69 Mon Sep 17 00:00:00 2001 From: Keno Fischer Date: Thu, 14 Nov 2024 14:19:28 -0500 Subject: [PATCH 447/537] inference: Don't try to infer optimized opaque_closure (#56557) We don't have frontend syntax for it, but there is a use case for having `:new_opaque_closure` take an OC constructed from an optimized OpaqueClosure (and just replacing the capture environment). In this case, there is nothing inference can do to introspect into the opaque closure, so it just needs to bail out early. --- Compiler/src/abstractinterpretation.jl | 8 ++++++++ Compiler/src/stmtinfo.jl | 2 +- 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/Compiler/src/abstractinterpretation.jl b/Compiler/src/abstractinterpretation.jl index 67e7ecfbce66a..faaba0c2dc44f 100644 --- a/Compiler/src/abstractinterpretation.jl +++ b/Compiler/src/abstractinterpretation.jl @@ -2633,6 +2633,14 @@ function abstract_call_opaque_closure(interp::AbstractInterpreter, ocsig = rewrap_unionall(Tuple{Tuple, ocargsig′.parameters...}, ocargsig) hasintersect(sig, ocsig) || return Future(CallMeta(Union{}, Union{MethodError,TypeError}, EFFECTS_THROWS, NoCallInfo())) ocmethod = closure.source::Method + if !isdefined(ocmethod, :source) + # This opaque closure was created from optimized source. We cannot infer it further. + ocrt = rewrap_unionall((unwrap_unionall(tt)::DataType).parameters[2], tt) + if isa(ocrt, DataType) + return Future(CallMeta(ocrt, Any, Effects(), NoCallInfo())) + end + return Future(CallMeta(Any, Any, Effects(), NoCallInfo())) + end match = MethodMatch(sig, Core.svec(), ocmethod, sig <: ocsig) mresult = abstract_call_method(interp, ocmethod, sig, Core.svec(), false, si, sv) ocsig_box = Core.Box(ocsig) diff --git a/Compiler/src/stmtinfo.jl b/Compiler/src/stmtinfo.jl index 4cbd2ab39fd46..83d0b66e4d564 100644 --- a/Compiler/src/stmtinfo.jl +++ b/Compiler/src/stmtinfo.jl @@ -352,7 +352,7 @@ allow the optimizer to rewrite the return type parameter of the `OpaqueClosure` struct OpaqueClosureCreateInfo <: CallInfo unspec::CallMeta function OpaqueClosureCreateInfo(unspec::CallMeta) - @assert isa(unspec.info, OpaqueClosureCallInfo) + @assert isa(unspec.info, Union{OpaqueClosureCallInfo, NoCallInfo}) return new(unspec) end end From 100e305b9fd0fa6f05795bdad7f46838eff36f97 Mon Sep 17 00:00:00 2001 From: Jameson Nash Date: Thu, 14 Nov 2024 15:57:42 -0500 Subject: [PATCH 448/537] prevent deadlock when releasing the jl_unique_gcsafe_lock causes gc (#56563) Caught this by running threads test repeatedly locally: the sweep needs to acquire engine_lock, so we need to make sure to release that first (the other jl_unique_gcsafe_lock users shouldn't care about this ordering since they don't acquire their locks during sweeping) --- src/julia_locks.h | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/julia_locks.h b/src/julia_locks.h index 4d1345177f965..35bcf7dd97322 100644 --- a/src/julia_locks.h +++ b/src/julia_locks.h @@ -115,7 +115,7 @@ class jl_unique_gcsafe_lock { explicit jl_unique_gcsafe_lock(std::mutex &native) JL_NOTSAFEPOINT_ENTER { jl_task_t *ct = jl_current_task; - gc_state = jl_gc_safe_enter(ct->ptls); + gc_state = jl_gc_safe_enter(ct->ptls); // contains jl_gc_safepoint after enter this->native = std::unique_lock(native); ct->ptls->engine_nqueued++; // disables finalizers until inference is finished on this method graph } @@ -123,7 +123,8 @@ class jl_unique_gcsafe_lock { jl_unique_gcsafe_lock(jl_unique_gcsafe_lock &native) = delete; ~jl_unique_gcsafe_lock() JL_NOTSAFEPOINT_LEAVE { jl_task_t *ct = jl_current_task; - jl_gc_safe_leave(ct->ptls, gc_state); + native.unlock(); + jl_gc_safe_leave(ct->ptls, gc_state); // contains jl_gc_safepoint after leave ct->ptls->engine_nqueued--; // enable finalizers (but don't run them until the next gc) } void wait(std::condition_variable& cond) JL_NOTSAFEPOINT { From 2d9a2ee3c71a60d68386e3172f8f86adb5676fa0 Mon Sep 17 00:00:00 2001 From: Shuhei Kadowaki <40514306+aviatesk@users.noreply.github.com> Date: Fri, 15 Nov 2024 15:08:47 +0900 Subject: [PATCH 449/537] optimize `abstract_invoke` (#56560) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - removed unnecessary `Core.Box` allocation - made the type of the closure that is passed to `Future` concrete That said, it doesn’t seem ideal to require this sort of manual optimizations.. The value of using closures cannot be denied in this code base, and I feel that it would be better to work towards optimizing closures more (as we do with JuliaLang/julia#56532)? --- Compiler/src/abstractinterpretation.jl | 22 ++++++++++------------ 1 file changed, 10 insertions(+), 12 deletions(-) diff --git a/Compiler/src/abstractinterpretation.jl b/Compiler/src/abstractinterpretation.jl index faaba0c2dc44f..093c5889f809e 100644 --- a/Compiler/src/abstractinterpretation.jl +++ b/Compiler/src/abstractinterpretation.jl @@ -2182,27 +2182,26 @@ function abstract_invoke(interp::AbstractInterpreter, arginfo::ArgInfo, si::Stmt lookupsig = rewrap_unionall(Tuple{ft, unwrapped.parameters...}, types)::Type nargtype = Tuple{ft, nargtype.parameters...} argtype = Tuple{ft, argtype.parameters...} - match, valid_worlds = findsup(lookupsig, method_table(interp)) - match === nothing && return Future(CallMeta(Any, Any, Effects(), NoCallInfo())) + matched, valid_worlds = findsup(lookupsig, method_table(interp)) + matched === nothing && return Future(CallMeta(Any, Any, Effects(), NoCallInfo())) update_valid_age!(sv, valid_worlds) - method = match.method + method = matched.method tienv = ccall(:jl_type_intersection_with_env, Any, (Any, Any), nargtype, method.sig)::SimpleVector ti = tienv[1] env = tienv[2]::SimpleVector mresult = abstract_call_method(interp, method, ti, env, false, si, sv)::Future match = MethodMatch(ti, env, method, argtype <: method.sig) - ft_box = Core.Box(ft) ft′_box = Core.Box(ft′) + lookupsig_box = Core.Box(lookupsig) + invokecall = InvokeCall(types, lookupsig) return Future{CallMeta}(mresult, interp, sv) do result, interp, sv (; rt, exct, effects, edge, volatile_inf_result) = result - local argtypes = arginfo.argtypes - local ft = ft_box.contents local ft′ = ft′_box.contents sig = match.spec_types - argtypes′ = invoke_rewrite(argtypes) + argtypes′ = invoke_rewrite(arginfo.argtypes) fargs = arginfo.fargs fargs′ = fargs === nothing ? nothing : invoke_rewrite(fargs) - arginfo = ArgInfo(fargs′, argtypes′) + arginfo′ = ArgInfo(fargs′, argtypes′) # # typeintersect might have narrowed signature, but the accuracy gain doesn't seem worth the cost involved with the lattice comparisons # for i in 1:length(argtypes′) # t, a = ti.parameters[i], argtypes′[i] @@ -2211,9 +2210,8 @@ function abstract_invoke(interp::AbstractInterpreter, arginfo::ArgInfo, si::Stmt 𝕃ₚ = ipo_lattice(interp) ⊑, ⋤, ⊔ = partialorder(𝕃ₚ), strictneqpartialorder(𝕃ₚ), join(𝕃ₚ) f = singleton_type(ft′) - invokecall = InvokeCall(types, lookupsig) const_call_result = abstract_call_method_with_const_args(interp, - result, f, arginfo, si, match, sv, invokecall) + result, f, arginfo′, si, match, sv, invokecall) const_result = volatile_inf_result if const_call_result !== nothing const_edge = nothing @@ -2227,8 +2225,8 @@ function abstract_invoke(interp::AbstractInterpreter, arginfo::ArgInfo, si::Stmt edge = const_edge end end - rt = from_interprocedural!(interp, rt, sv, arginfo, sig) - info = InvokeCallInfo(edge, match, const_result, lookupsig) + rt = from_interprocedural!(interp, rt, sv, arginfo′, sig) + info = InvokeCallInfo(edge, match, const_result, lookupsig_box.contents) if !match.fully_covers effects = Effects(effects; nothrow=false) exct = exct ⊔ TypeError From e5f30108a30f8af87263f6f81ae0cc3d34ed4340 Mon Sep 17 00:00:00 2001 From: Denis Barucic Date: Fri, 15 Nov 2024 08:13:06 +0100 Subject: [PATCH 450/537] Sockets: fix `getipaddr()` (#56528) --- stdlib/Sockets/src/addrinfo.jl | 10 ++++------ stdlib/Sockets/test/runtests.jl | 25 ++++++++++++++++++++----- 2 files changed, 24 insertions(+), 11 deletions(-) diff --git a/stdlib/Sockets/src/addrinfo.jl b/stdlib/Sockets/src/addrinfo.jl index 866a1684c85a1..93194b85d4e8c 100644 --- a/stdlib/Sockets/src/addrinfo.jl +++ b/stdlib/Sockets/src/addrinfo.jl @@ -282,16 +282,14 @@ See also [`getipaddrs`](@ref). """ function getipaddr(addr_type::Type{T}) where T<:IPAddr addrs = getipaddrs(addr_type) + isempty(addrs) && error("No networking interface available") - if length(addrs) == 0 - error("No networking interface available") - end - - # Prefer the first IPv4 address + # When `addr_type` is `IPAddr`, `addrs` contain IP addresses of all types + # In that case, we prefer to return the first IPv4 i = something(findfirst(ip -> ip isa IPv4, addrs), 1) return addrs[i] end -getipaddr() = getipaddr(IPv4) +getipaddr() = getipaddr(IPAddr) """ diff --git a/stdlib/Sockets/test/runtests.jl b/stdlib/Sockets/test/runtests.jl index 669237acccb0a..26f95d4ce1819 100644 --- a/stdlib/Sockets/test/runtests.jl +++ b/stdlib/Sockets/test/runtests.jl @@ -639,11 +639,26 @@ end @testset "getipaddrs" begin @test getipaddr() in getipaddrs() - try - getipaddr(IPv6) in getipaddrs(IPv6) - catch - if !isempty(getipaddrs(IPv6)) - @test "getipaddr(IPv6) errored when it shouldn't have!" + + has_ipv4 = !isempty(getipaddrs(IPv4)) + if has_ipv4 + @test getipaddr(IPv4) in getipaddrs(IPv4) + else + @test_throws "No networking interface available" getipaddr(IPv4) + end + + has_ipv6 = !isempty(getipaddrs(IPv6)) + if has_ipv6 + @test getipaddr(IPv6) in getipaddrs(IPv6) + else + @test_throws "No networking interface available" getipaddr(IPv6) + end + + @testset "getipaddr() prefers IPv4 over IPv6" begin + if has_ipv4 + @test getipaddr() isa IPv4 + else + @test getipaddr() isa IPv6 end end From 55bdb54dd2e1b0ea6df017f74f9af1de7abb8038 Mon Sep 17 00:00:00 2001 From: James Wrigley Date: Fri, 15 Nov 2024 12:14:14 +0100 Subject: [PATCH 451/537] Delete unnecessary methods in OffsetArrays.jl (#56564) These are all defined elsewhere in Base: ```julia WARNING: Method definition fill(Any, Tuple{Vararg{Union{Integer, Base.AbstractUnitRange{T} where T}, N}}) where {N} in module Base at array.jl:542 overwritten in module OffsetArrays at /opt/hostedtoolcache/julia/nightly/x64/share/julia/test/testhelpers/OffsetArrays.jl:574. WARNING: Method definition zeros(Type{T}, Tuple{Vararg{Union{Integer, Base.AbstractUnitRange{T} where T}, N}}) where {T, N} in module Base at array.jl:603 overwritten in module OffsetArrays at /opt/hostedtoolcache/julia/nightly/x64/share/julia/test/testhelpers/OffsetArrays.jl:576. WARNING: Method definition ones(Type{T}, Tuple{Vararg{Union{Integer, Base.AbstractUnitRange{T} where T}, N}}) where {T, N} in module Base at array.jl:603 overwritten in module OffsetArrays at /opt/hostedtoolcache/julia/nightly/x64/share/julia/test/testhelpers/OffsetArrays.jl:578. WARNING: Method definition trues(Tuple{Vararg{Union{Integer, Base.AbstractUnitRange{T} where T}, N}}) where {N} in module Base at bitarray.jl:426 overwritten in module OffsetArrays at /opt/hostedtoolcache/julia/nightly/x64/share/julia/test/testhelpers/OffsetArrays.jl:580. WARNING: Method definition falses(Tuple{Vararg{Union{Integer, Base.AbstractUnitRange{T} where T}, N}}) where {N} in module Base at bitarray.jl:407 overwritten in module OffsetArrays at /opt/hostedtoolcache/julia/nightly/x64/share/julia/test/testhelpers/OffsetArrays.jl:582. ``` (similar to #56414) --- test/testhelpers/OffsetArrays.jl | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/test/testhelpers/OffsetArrays.jl b/test/testhelpers/OffsetArrays.jl index 06e65f8928036..17b2d8c28680a 100644 --- a/test/testhelpers/OffsetArrays.jl +++ b/test/testhelpers/OffsetArrays.jl @@ -571,17 +571,6 @@ Base.reshape(A::OffsetArray, inds::Tuple{Colon}) = _reshape_nov(A, inds) # This is a stopgap solution Base.permutedims(v::OffsetVector) = reshape(v, (1, axes(v, 1))) -Base.fill(v, inds::NTuple{N, Union{Integer, AbstractUnitRange}}) where {N} = - fill!(similar(Array{typeof(v)}, inds), v) -Base.zeros(::Type{T}, inds::NTuple{N, Union{Integer, AbstractUnitRange}}) where {T, N} = - fill!(similar(Array{T}, inds), zero(T)) -Base.ones(::Type{T}, inds::NTuple{N, Union{Integer, AbstractUnitRange}}) where {T, N} = - fill!(similar(Array{T}, inds), one(T)) -Base.trues(inds::NTuple{N, Union{Integer, AbstractUnitRange}}) where {N} = - fill!(similar(BitArray, inds), true) -Base.falses(inds::NTuple{N, Union{Integer, AbstractUnitRange}}) where {N} = - fill!(similar(BitArray, inds), false) - Base.zero(A::OffsetArray) = parent_call(zero, A) Base.fill!(A::OffsetArray, x) = parent_call(Ap -> fill!(Ap, x), A) From 5ec321513c1788cafe5375c413e8f8cad57a3957 Mon Sep 17 00:00:00 2001 From: Tim Besard Date: Fri, 15 Nov 2024 14:10:32 +0100 Subject: [PATCH 452/537] OpenBLAS: Source build fixes for RISC-V (#56556) The OpenBLAS build system supports both setting a dynamic architecture, and selecting a fallback Also ensure we build a 64-bit library for RISCV64. --- Make.inc | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/Make.inc b/Make.inc index 9f6535ae05885..29512bbbe7f45 100644 --- a/Make.inc +++ b/Make.inc @@ -28,13 +28,7 @@ BOOTSTRAP_DEBUG_LEVEL ?= 0 OPENBLAS_TARGET_ARCH:= OPENBLAS_SYMBOLSUFFIX:= OPENBLAS_LIBNAMESUFFIX:= - -# If OPENBLAS_TARGET_ARCH is set, we default to disabling OPENBLAS_DYNAMIC_ARCH -ifneq ($(OPENBLAS_TARGET_ARCH),) OPENBLAS_DYNAMIC_ARCH:=0 -else -OPENBLAS_DYNAMIC_ARCH:=1 -endif OPENBLAS_USE_THREAD:=1 # Flags for using libraries available on the system instead of building them. @@ -995,9 +989,15 @@ MTUNE=native endif endif +# If we are running on x86 or x86_64, set certain options automatically +ifeq (1,$(ISX86)) +OPENBLAS_DYNAMIC_ARCH:=1 +endif + # If we are running on powerpc64le or ppc64le, set certain options automatically ifneq (,$(filter $(ARCH), powerpc64le ppc64le)) JCFLAGS += -fsigned-char +OPENBLAS_DYNAMIC_ARCH:=1 OPENBLAS_TARGET_ARCH:=POWER8 BINARY:=64 # GCC doesn't do -march= on ppc64le @@ -1054,17 +1054,23 @@ endif # If we are running on ARM, set certain options automatically ifneq (,$(findstring arm,$(ARCH))) JCFLAGS += -fsigned-char -OPENBLAS_DYNAMIC_ARCH:=0 OPENBLAS_TARGET_ARCH:=ARMV7 BINARY:=32 endif # If we are running on aarch64 (e.g. ARMv8 or ARM64), set certain options automatically ifneq (,$(findstring aarch64,$(ARCH))) +OPENBLAS_DYNAMIC_ARCH:=1 OPENBLAS_TARGET_ARCH:=ARMV8 BINARY:=64 endif +# If we are running on riscv64, set certain options automatically +ifneq (,$(findstring riscv64,$(ARCH))) +OPENBLAS_DYNAMIC_ARCH:=1 +BINARY:=64 +endif + # Set MARCH-specific flags ifneq ($(MARCH),) CC += -march=$(MARCH) From caa2f7d52b430f50c8038a7f6766edba28a3fb65 Mon Sep 17 00:00:00 2001 From: Jameson Nash Date: Fri, 15 Nov 2024 11:44:35 -0500 Subject: [PATCH 453/537] infer more completely everything that the optimizer/codegen requires (#56565) Inlining wants to know information about every isa_compileable_sig method as well as everything it might consider inlining (which is almost the same thing). So even if inference could bail on computing the type since it already reached the maximum fixed point, it should keep going to get that information. This now uses two loops here now: one to compute the inference types information, then a second loop go back and get coverage of all of the compileable targets (unless that particular target is predicted to be inlined or dropped later). (system image size contribution seems to be fairly negligible) --- Compiler/src/abstractinterpretation.jl | 148 ++++++++++++++----------- Compiler/src/inferencestate.jl | 10 +- Compiler/test/AbstractInterpreter.jl | 13 +-- Compiler/test/inference.jl | 2 +- 4 files changed, 94 insertions(+), 79 deletions(-) diff --git a/Compiler/src/abstractinterpretation.jl b/Compiler/src/abstractinterpretation.jl index 093c5889f809e..f98b9336d97c0 100644 --- a/Compiler/src/abstractinterpretation.jl +++ b/Compiler/src/abstractinterpretation.jl @@ -51,14 +51,24 @@ function abstract_call_gf_by_type(interp::AbstractInterpreter, @nospecialize(f), end (; valid_worlds, applicable, info) = matches - update_valid_age!(sv, valid_worlds) + update_valid_age!(sv, valid_worlds) # need to record the negative world now, since even if we don't generate any useful information, inlining might want to add an invoke edge and it won't have this information anymore + if bail_out_toplevel_call(interp, sv) + napplicable = length(applicable) + for i = 1:napplicable + sig = applicable[i].match.spec_types + if !isdispatchtuple(sig) + # only infer fully concrete call sites in top-level expressions (ignoring even isa_compileable_sig matches) + add_remark!(interp, sv, "Refusing to infer non-concrete call site in top-level expression") + return Future(CallMeta(Any, Any, Effects(), NoCallInfo())) + end + end + end # final result gfresult = Future{CallMeta}() # intermediate work for computing gfresult rettype = exctype = Bottom conditionals = nothing # keeps refinement information of call argument types when the return type is boolean - seenall = true const_results = nothing # or const_results::Vector{Union{Nothing,ConstResult}} if any const results are available fargs = arginfo.fargs all_effects = EFFECTS_TOTAL @@ -69,16 +79,14 @@ function abstract_call_gf_by_type(interp::AbstractInterpreter, @nospecialize(f), f = Core.Box(f) atype = Core.Box(atype) function infercalls(interp, sv) - napplicable = length(applicable) - multiple_matches = napplicable > 1 + local napplicable = length(applicable) + local multiple_matches = napplicable > 1 while i <= napplicable (; match, edges, edge_idx) = applicable[i] method = match.method sig = match.spec_types - if bail_out_toplevel_call(interp, InferenceLoopState(sig, rettype, all_effects), sv) - # only infer concrete call sites in top-level expressions - add_remark!(interp, sv, "Refusing to infer non-concrete call site in top-level expression") - seenall = false + if bail_out_call(interp, InferenceLoopState(rettype, all_effects), sv) + add_remark!(interp, sv, "Call inference reached maximally imprecise information: bailing on doing more abstract inference.") break end # TODO: this is unmaintained now as it didn't seem to improve things, though it does avoid hard-coding the union split at the higher level, @@ -162,17 +170,13 @@ function abstract_call_gf_by_type(interp::AbstractInterpreter, @nospecialize(f), Any[Bottom for _ in 1:length(argtypes)] end for i = 1:length(argtypes) - cnd = conditional_argtype(𝕃ᵢ, this_conditional, sig, argtypes, i) + cnd = conditional_argtype(𝕃ᵢ, this_conditional, match.spec_types, argtypes, i) conditionals[1][i] = conditionals[1][i] ⊔ᵢ cnd.thentype conditionals[2][i] = conditionals[2][i] ⊔ᵢ cnd.elsetype end end edges[edge_idx] = edge - if i < napplicable && bail_out_call(interp, InferenceLoopState(sig, rettype, all_effects), sv) - add_remark!(interp, sv, "Call inference reached maximally imprecise information. Bailing on.") - seenall = false - i = napplicable # break in outer function - end + i += 1 return true end # function handle1 @@ -184,12 +188,12 @@ function abstract_call_gf_by_type(interp::AbstractInterpreter, @nospecialize(f), end end # while - if const_results !== nothing - @assert napplicable == nmatches(info) == length(const_results) - info = ConstCallInfo(info, const_results) - end - - if seenall + seenall = i > napplicable + if seenall # small optimization to skip some work that is already implied + if const_results !== nothing + @assert napplicable == nmatches(info) == length(const_results) + info = ConstCallInfo(info, const_results) + end if !fully_covering(matches) || any_ambig(matches) # Account for the fact that we may encounter a MethodError with a non-covered or ambiguous signature. all_effects = Effects(all_effects; nothrow=false) @@ -198,51 +202,67 @@ function abstract_call_gf_by_type(interp::AbstractInterpreter, @nospecialize(f), if sv isa InferenceState && fargs !== nothing slotrefinements = collect_slot_refinements(𝕃ᵢ, applicable, argtypes, fargs, sv) end + rettype = from_interprocedural!(interp, rettype, sv, arginfo, conditionals) + if call_result_unused(si) && !(rettype === Bottom) + add_remark!(interp, sv, "Call result type was widened because the return value is unused") + # We're mainly only here because the optimizer might want this code, + # but we ourselves locally don't typically care about it locally + # (beyond checking if it always throws). + # So avoid adding an edge, since we don't want to bother attempting + # to improve our result even if it does change (to always throw), + # and avoid keeping track of a more complex result type. + rettype = Any + end + # if from_interprocedural added any pclimitations to the set inherited from the arguments, + # some of those may be part of our cycles, so those can be deleted now + # TODO: and those might need to be deleted later too if the cycle grows to include them? + if isa(sv, InferenceState) + # TODO (#48913) implement a proper recursion handling for irinterp: + # This works just because currently the `:terminate` condition guarantees that + # irinterp doesn't fail into unresolved cycles, but it's not a good solution. + # We should revisit this once we have a better story for handling cycles in irinterp. + if !isempty(sv.pclimitations) # remove self, if present + delete!(sv.pclimitations, sv) + for caller in callers_in_cycle(sv) + delete!(sv.pclimitations, caller) + end + end + end else # there is unanalyzed candidate, widen type and effects to the top rettype = exctype = Any all_effects = Effects() + const_results = nothing end - rettype = from_interprocedural!(interp, rettype, sv, arginfo, conditionals) - # Also considering inferring the compilation signature for this method, so - # it is available to the compiler, unless it should not end up needing it (for an invoke). - if (isa(sv, InferenceState) && infer_compilation_signature(interp) && - (seenall && 1 == napplicable) && (!is_removable_if_unused(all_effects) || !call_result_unused(si))) - (; match) = applicable[1] - method = match.method - sig = match.spec_types - mi = specialize_method(match; preexisting=true) - if mi === nothing || !const_prop_methodinstance_heuristic(interp, mi, arginfo, sv) - csig = get_compileable_sig(method, sig, match.sparams) - if csig !== nothing && csig !== sig - abstract_call_method(interp, method, csig, match.sparams, multiple_matches, StmtInfo(false), sv)::Future - end - end - end - - if call_result_unused(si) && !(rettype === Bottom) - add_remark!(interp, sv, "Call result type was widened because the return value is unused") - # We're mainly only here because the optimizer might want this code, - # but we ourselves locally don't typically care about it locally - # (beyond checking if it always throws). - # So avoid adding an edge, since we don't want to bother attempting - # to improve our result even if it does change (to always throw), - # and avoid keeping track of a more complex result type. - rettype = Any - end - if isa(sv, InferenceState) - # TODO (#48913) implement a proper recursion handling for irinterp: - # This works just because currently the `:terminate` condition guarantees that - # irinterp doesn't fail into unresolved cycles, but it's not a good solution. - # We should revisit this once we have a better story for handling cycles in irinterp. - if !isempty(sv.pclimitations) # remove self, if present - delete!(sv.pclimitations, sv) - for caller in callers_in_cycle(sv) - delete!(sv.pclimitations, caller) + # it is available to the compiler in case it ends up needing it for the invoke. + if isa(sv, InferenceState) && infer_compilation_signature(interp) && (!is_removable_if_unused(all_effects) || !call_result_unused(si)) + i = 1 + function infercalls2(interp, sv) + local napplicable = length(applicable) + local multiple_matches = napplicable > 1 + while i <= napplicable + (; match, edges, edge_idx) = applicable[i] + i += 1 + method = match.method + sig = match.spec_types + mi = specialize_method(match; preexisting=true) + if mi === nothing || !const_prop_methodinstance_heuristic(interp, mi, arginfo, sv) + csig = get_compileable_sig(method, sig, match.sparams) + if csig !== nothing && (!seenall || csig !== sig) # corresponds to whether the first look already looked at this, so repeating abstract_call_method is not useful + sp_ = ccall(:jl_type_intersection_with_env, Any, (Any, Any), csig, method.sig)::SimpleVector + if match.sparams === sp_[2] + mresult = abstract_call_method(interp, method, csig, match.sparams, multiple_matches, StmtInfo(false), sv)::Future + isready(mresult) || return false # wait for mresult Future to resolve off the callstack before continuing + end + end + end end + return true end + # start making progress on the first call + infercalls2(interp, sv) || push!(sv.tasks, infercalls2) end gfresult[] = CallMeta(rettype, exctype, all_effects, info, slotrefinements) @@ -1787,6 +1807,14 @@ function abstract_apply(interp::AbstractInterpreter, argtypes::Vector{Any}, si:: i = 1 while i <= length(ctypes) ct = ctypes[i] + if bail_out_apply(interp, InferenceLoopState(res, all_effects), sv) + add_remark!(interp, sv, "_apply_iterate inference reached maximally imprecise information: bailing on analysis of more methods.") + # there is unanalyzed candidate, widen type and effects to the top + let retinfo = NoCallInfo() # NOTE this is necessary to prevent the inlining processing + applyresult[] = CallMeta(Any, Any, Effects(), retinfo) + return true + end + end lct = length(ct) # truncate argument list at the first Vararg for k = 1:lct-1 @@ -1808,14 +1836,6 @@ function abstract_apply(interp::AbstractInterpreter, argtypes::Vector{Any}, si:: res = tmerge(typeinf_lattice(interp), res, rt) exctype = tmerge(typeinf_lattice(interp), exctype, exct) all_effects = merge_effects(all_effects, effects) - if i < length(ctypes) && bail_out_apply(interp, InferenceLoopState(ctypes[i], res, all_effects), sv) - add_remark!(interp, sv, "_apply_iterate inference reached maximally imprecise information. Bailing on.") - # there is unanalyzed candidate, widen type and effects to the top - let retinfo = NoCallInfo() # NOTE this is necessary to prevent the inlining processing - applyresult[] = CallMeta(Any, Any, Effects(), retinfo) - return true - end - end end i += 1 end diff --git a/Compiler/src/inferencestate.jl b/Compiler/src/inferencestate.jl index fd421af733943..0ba37888b34d5 100644 --- a/Compiler/src/inferencestate.jl +++ b/Compiler/src/inferencestate.jl @@ -1032,17 +1032,15 @@ decode_statement_effects_override(sv::AbsIntState) = decode_statement_effects_override(get_curr_ssaflag(sv)) struct InferenceLoopState - sig rt effects::Effects - function InferenceLoopState(@nospecialize(sig), @nospecialize(rt), effects::Effects) - new(sig, rt, effects) + function InferenceLoopState(@nospecialize(rt), effects::Effects) + new(rt, effects) end end -bail_out_toplevel_call(::AbstractInterpreter, state::InferenceLoopState, sv::InferenceState) = - sv.restrict_abstract_call_sites && !isdispatchtuple(state.sig) -bail_out_toplevel_call(::AbstractInterpreter, ::InferenceLoopState, ::IRInterpretationState) = false +bail_out_toplevel_call(::AbstractInterpreter, sv::InferenceState) = sv.restrict_abstract_call_sites +bail_out_toplevel_call(::AbstractInterpreter, ::IRInterpretationState) = false bail_out_call(::AbstractInterpreter, state::InferenceLoopState, ::InferenceState) = state.rt === Any && !is_foldable(state.effects) diff --git a/Compiler/test/AbstractInterpreter.jl b/Compiler/test/AbstractInterpreter.jl index a49647ad4ea43..1939f4a19c05f 100644 --- a/Compiler/test/AbstractInterpreter.jl +++ b/Compiler/test/AbstractInterpreter.jl @@ -70,18 +70,15 @@ end |> !Core.Compiler.is_nonoverlayed # account for overlay possibility in unanalyzed matching method callstrange(::Float64) = strangesin(x) -callstrange(::Nothing) = Core.compilerbarrier(:type, nothing) # trigger inference bail out +callstrange(::Number) = Core.compilerbarrier(:type, nothing) # trigger inference bail out +callstrange(::Any) = 1.0 callstrange_entry(x) = callstrange(x) # needs to be defined here because of world age let interp = MTOverlayInterp(Set{Any}()) matches = Core.Compiler.findall(Tuple{typeof(callstrange),Any}, Core.Compiler.method_table(interp)) @test matches !== nothing - @test Core.Compiler.length(matches) == 2 - if Core.Compiler.getindex(matches, 1).method == which(callstrange, (Nothing,)) - @test Base.infer_effects(callstrange_entry, (Any,); interp) |> !Core.Compiler.is_nonoverlayed - @test "Call inference reached maximally imprecise information. Bailing on." in interp.meta - else - @warn "`nonoverlayed` test for inference bailing out is skipped since the method match sort order is changed." - end + @test Core.Compiler.length(matches) == 3 + @test Base.infer_effects(callstrange_entry, (Any,); interp) |> !Core.Compiler.is_nonoverlayed + @test "Call inference reached maximally imprecise information: bailing on doing more abstract inference." in interp.meta end # but it should never apply for the native compilation diff --git a/Compiler/test/inference.jl b/Compiler/test/inference.jl index 8a14774e2404f..e6bbf05caeabe 100644 --- a/Compiler/test/inference.jl +++ b/Compiler/test/inference.jl @@ -4114,7 +4114,7 @@ callsig_backprop_any(::Any) = nothing callsig_backprop_lhs(::Int) = nothing callsig_backprop_bailout(::Val{0}) = 0 callsig_backprop_bailout(::Val{1}) = undefvar # undefvar::Any triggers `bail_out_call` -callsig_backprop_bailout(::Val{2}) = 2 +callsig_backprop_bailout(::Val) = 2 callsig_backprop_addinteger(a::Integer, b::Integer) = a + b # results in too many matching methods and triggers `bail_out_call`) @test Base.infer_return_type(callsig_backprop_addinteger) == Any let effects = Base.infer_effects(callsig_backprop_addinteger) From 7fa26f011ec4fea616ad192eeaa8919f2cc17f97 Mon Sep 17 00:00:00 2001 From: Keno Fischer Date: Sat, 16 Nov 2024 14:20:39 -0500 Subject: [PATCH 454/537] Move Compiler <-> OpaqueClosure interface code to Compiler (#56576) After the excision, it is no longer permissable for Base to have `Compiler` data structures in arguments of methods it defines. To comply with this restriction, move the functions for creating OpaqueClosures from IRCode to `Compiler`. --- Compiler/src/Compiler.jl | 1 + Compiler/src/opaque_closure.jl | 56 +++++++++++++++++++++++++++++++++ base/opaque_closure.jl | 57 ---------------------------------- 3 files changed, 57 insertions(+), 57 deletions(-) create mode 100644 Compiler/src/opaque_closure.jl diff --git a/Compiler/src/Compiler.jl b/Compiler/src/Compiler.jl index 376721da46783..4104b71093f4d 100644 --- a/Compiler/src/Compiler.jl +++ b/Compiler/src/Compiler.jl @@ -179,6 +179,7 @@ include("optimize.jl") include("bootstrap.jl") include("reflection_interface.jl") +include("opaque_closure.jl") module IRShow end if !isdefined(Base, :end_base_include) diff --git a/Compiler/src/opaque_closure.jl b/Compiler/src/opaque_closure.jl new file mode 100644 index 0000000000000..d0a375c2a54b5 --- /dev/null +++ b/Compiler/src/opaque_closure.jl @@ -0,0 +1,56 @@ +# This file is a part of Julia. License is MIT: https://julialang.org/license + +function compute_ir_rettype(ir::IRCode) + rt = Union{} + for i = 1:length(ir.stmts) + stmt = ir[SSAValue(i)][:stmt] + if isa(stmt, Core.ReturnNode) && isdefined(stmt, :val) + rt = Compiler.tmerge(Compiler.argextype(stmt.val, ir), rt) + end + end + return Compiler.widenconst(rt) +end + +function compute_oc_signature(ir::IRCode, nargs::Int, isva::Bool) + argtypes = Vector{Any}(undef, nargs) + for i = 1:nargs + argtypes[i] = Compiler.widenconst(ir.argtypes[i+1]) + end + if isva + lastarg = pop!(argtypes) + if lastarg <: Tuple + append!(argtypes, lastarg.parameters) + else + push!(argtypes, Vararg{Any}) + end + end + return Tuple{argtypes...} +end + +function Core.OpaqueClosure(ir::IRCode, @nospecialize env...; + isva::Bool = false, + slotnames::Union{Nothing,Vector{Symbol}}=nothing, + kwargs...) + # NOTE: we need ir.argtypes[1] == typeof(env) + ir = Core.Compiler.copy(ir) + # if the user didn't specify a definition MethodInstance or filename Symbol to use for the debuginfo, set a filename now + ir.debuginfo.def === nothing && (ir.debuginfo.def = :var"generated IR for OpaqueClosure") + nargtypes = length(ir.argtypes) + nargs = nargtypes-1 + sig = compute_oc_signature(ir, nargs, isva) + rt = compute_ir_rettype(ir) + src = ccall(:jl_new_code_info_uninit, Ref{CodeInfo}, ()) + if slotnames === nothing + src.slotnames = fill(:none, nargtypes) + else + length(slotnames) == nargtypes || error("mismatched `argtypes` and `slotnames`") + src.slotnames = slotnames + end + src.slotflags = fill(zero(UInt8), nargtypes) + src.slottypes = copy(ir.argtypes) + src.isva = isva + src.nargs = UInt(nargtypes) + src = ir_to_codeinf!(src, ir) + src.rettype = rt + return Base.Experimental.generate_opaque_closure(sig, Union{}, rt, src, nargs, isva, env...; kwargs...) +end diff --git a/base/opaque_closure.jl b/base/opaque_closure.jl index d7a91cff7d602..5e38c8523f4a8 100644 --- a/base/opaque_closure.jl +++ b/base/opaque_closure.jl @@ -39,63 +39,6 @@ end # OpaqueClosure construction from pre-inferred CodeInfo/IRCode using Core: CodeInfo, SSAValue -using Base: Compiler -using .Compiler: IRCode - -function compute_ir_rettype(ir::IRCode) - rt = Union{} - for i = 1:length(ir.stmts) - stmt = ir[SSAValue(i)][:stmt] - if isa(stmt, Core.ReturnNode) && isdefined(stmt, :val) - rt = Compiler.tmerge(Compiler.argextype(stmt.val, ir), rt) - end - end - return Compiler.widenconst(rt) -end - -function compute_oc_signature(ir::IRCode, nargs::Int, isva::Bool) - argtypes = Vector{Any}(undef, nargs) - for i = 1:nargs - argtypes[i] = Compiler.widenconst(ir.argtypes[i+1]) - end - if isva - lastarg = pop!(argtypes) - if lastarg <: Tuple - append!(argtypes, lastarg.parameters) - else - push!(argtypes, Vararg{Any}) - end - end - return Tuple{argtypes...} -end - -function Core.OpaqueClosure(ir::IRCode, @nospecialize env...; - isva::Bool = false, - slotnames::Union{Nothing,Vector{Symbol}}=nothing, - kwargs...) - # NOTE: we need ir.argtypes[1] == typeof(env) - ir = Core.Compiler.copy(ir) - # if the user didn't specify a definition MethodInstance or filename Symbol to use for the debuginfo, set a filename now - ir.debuginfo.def === nothing && (ir.debuginfo.def = :var"generated IR for OpaqueClosure") - nargtypes = length(ir.argtypes) - nargs = nargtypes-1 - sig = compute_oc_signature(ir, nargs, isva) - rt = compute_ir_rettype(ir) - src = ccall(:jl_new_code_info_uninit, Ref{CodeInfo}, ()) - if slotnames === nothing - src.slotnames = fill(:none, nargtypes) - else - length(slotnames) == nargtypes || error("mismatched `argtypes` and `slotnames`") - src.slotnames = slotnames - end - src.slotflags = fill(zero(UInt8), nargtypes) - src.slottypes = copy(ir.argtypes) - src.isva = isva - src.nargs = nargtypes - src = Core.Compiler.ir_to_codeinf!(src, ir) - src.rettype = rt - return generate_opaque_closure(sig, Union{}, rt, src, nargs, isva, env...; kwargs...) -end function Core.OpaqueClosure(src::CodeInfo, @nospecialize env...; rettype, sig, nargs, isva=false, kwargs...) return generate_opaque_closure(sig, Union{}, rettype, src, nargs, isva, env...; kwargs...) From e1cfa73e2ca068de61cf4f22e7ec24f6d7c8b40f Mon Sep 17 00:00:00 2001 From: Keno Fischer Date: Mon, 18 Nov 2024 03:56:36 -0500 Subject: [PATCH 455/537] Fix debug build against non-debug LLVM (#56590) The ::dump functions are conditionally defined in LLVM. We have a helper to work around this, so use it. --- src/aotcompile.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/aotcompile.cpp b/src/aotcompile.cpp index ff14901c2e47f..583a8201587f7 100644 --- a/src/aotcompile.cpp +++ b/src/aotcompile.cpp @@ -1115,7 +1115,7 @@ static SmallVector partitionModule(Module &M, unsigned threads) { bool verified = verify_partitioning(partitions, M, fvars, gvars); if (!verified) - M.dump(); + llvm_dump(&M); assert(verified && "Partitioning failed to partition globals correctly"); (void) verified; From 37700519241c23555900ab9d1ec5c5f572974ec0 Mon Sep 17 00:00:00 2001 From: Shuhei Kadowaki <40514306+aviatesk@users.noreply.github.com> Date: Mon, 18 Nov 2024 19:46:01 +0900 Subject: [PATCH 456/537] EA: remove `_TOP_MOD` and just use `Base` (#56581) The reason we originally used `_TOP_MOD` was to make it possible to load `EscapeAnalysis.jl` from the `Main` context while developing EA. However, now that the Compiler stdlib allows the same thing to be done for the entire `Compiler` module including `EscapeAnalysis`, the trick on the EA side is no longer necessary. --- Compiler/src/ssair/EscapeAnalysis.jl | 16 ++++++++-------- Compiler/src/ssair/disjoint_set.jl | 12 ++++-------- doc/src/devdocs/EscapeAnalysis.md | 23 +++++++++-------------- 3 files changed, 21 insertions(+), 30 deletions(-) diff --git a/Compiler/src/ssair/EscapeAnalysis.jl b/Compiler/src/ssair/EscapeAnalysis.jl index 648d9f4621578..a8c450f5bb9e0 100644 --- a/Compiler/src/ssair/EscapeAnalysis.jl +++ b/Compiler/src/ssair/EscapeAnalysis.jl @@ -10,32 +10,32 @@ export has_thrown_escape, has_all_escape -const _TOP_MOD = ccall(:jl_base_relative_to, Any, (Any,), EscapeAnalysis)::Module +using Base: Base # imports -import ._TOP_MOD: ==, getindex, setindex! +import Base: ==, getindex, setindex! # usings using Core: MethodMatch, SimpleVector, ifelse, sizeof using Core.IR -using ._TOP_MOD: # Base definitions - @__MODULE__, @assert, @eval, @goto, @inbounds, @inline, @label, @noinline, @show, +using Base: # Base definitions + @__MODULE__, @assert, @eval, @goto, @inbounds, @inline, @label, @noinline, @nospecialize, @specialize, BitSet, Callable, Csize_t, IdDict, IdSet, UnitRange, Vector, copy, delete!, empty!, enumerate, error, first, get, get!, haskey, in, isassigned, isempty, ismutabletype, keys, last, length, max, min, missing, pop!, push!, pushfirst!, unwrap_unionall, !, !=, !==, &, *, +, -, :, <, <<, =>, >, |, ∈, ∉, ∩, ∪, ≠, ≤, ≥, ⊆, hasintersect -using ..Compiler: # Core.Compiler specific definitions +using ..Compiler: # Compiler specific definitions AbstractLattice, Bottom, IRCode, IR_FLAG_NOTHROW, InferenceResult, SimpleInferenceLattice, argextype, fieldcount_noerror, hasintersect, has_flag, intrinsic_nothrow, is_meta_expr_head, is_identity_free_argtype, isexpr, println, setfield!_nothrow, singleton_type, try_compute_field, try_compute_fieldidx, widenconst, ⊑, Compiler -function include(x) - if !isdefined(_TOP_MOD.Base, :end_base_include) +function include(x::String) + if !isdefined(Base, :end_base_include) # During bootstrap, all includes are relative to `base/` x = ccall(:jl_prepend_string, Ref{String}, (Any, Any), "ssair/", x) end - _TOP_MOD.include(@__MODULE__, x) + Compiler.include(@__MODULE__, x) end include("disjoint_set.jl") diff --git a/Compiler/src/ssair/disjoint_set.jl b/Compiler/src/ssair/disjoint_set.jl index 915bc214d7c3c..3f64fe643bd17 100644 --- a/Compiler/src/ssair/disjoint_set.jl +++ b/Compiler/src/ssair/disjoint_set.jl @@ -3,14 +3,9 @@ # under the MIT license: https://github.com/JuliaCollections/DataStructures.jl/blob/master/License.md # imports -import ._TOP_MOD: - length, - eltype, - union!, - push! +import Base: length, eltype, union!, push! # usings -import ._TOP_MOD: - OneTo, collect, zero, zeros, one, typemax +import Base: OneTo, collect, zero, zeros, one, typemax # Disjoint-Set @@ -27,7 +22,8 @@ import ._TOP_MOD: # ############################################################ -_intdisjointset_bounds_err_msg(T) = "the maximum number of elements in IntDisjointSet{$T} is $(typemax(T))" +_intdisjointset_bounds_err_msg(@nospecialize T) = + "the maximum number of elements in IntDisjointSet{$T} is $(typemax(T))" """ IntDisjointSet{T<:Integer}(n::Integer) diff --git a/doc/src/devdocs/EscapeAnalysis.md b/doc/src/devdocs/EscapeAnalysis.md index 815b9857f1674..484af9c2780f2 100644 --- a/doc/src/devdocs/EscapeAnalysis.md +++ b/doc/src/devdocs/EscapeAnalysis.md @@ -20,11 +20,8 @@ This escape analysis aims to: You can give a try to the escape analysis by loading the `EAUtils.jl` utility script that defines the convenience entries `code_escapes` and `@code_escapes` for testing and debugging purposes: ```@repl EAUtils +using Base.Compiler: EscapeAnalysis # or `using Compiler: EscapeAnalysis` to use the stdlib version let JULIA_DIR = normpath(Sys.BINDIR, "..", "share", "julia") - # load `EscapeAnalysis` module to define the core analysis code - include(normpath(JULIA_DIR, "Compiler", "src", "ssair", "EscapeAnalysis.jl")) - using .EscapeAnalysis - # load `EAUtils` module to define the utilities include(normpath(JULIA_DIR, "Compiler", "test", "EAUtils.jl")) using .EAUtils end @@ -37,19 +34,17 @@ Base.setindex!(x::SafeRef, v) = x.x = v; Base.isassigned(x::SafeRef) = true; get′(x) = isassigned(x) ? x[] : throw(x); -result = code_escapes((String,String,String,String)) do s1, s2, s3, s4 - r1 = Ref(s1) +result = code_escapes((Base.RefValue{String},String,String,)) do r1, s2, s3 r2 = Ref(s2) r3 = SafeRef(s3) try s1 = get′(r1) ret = sizeof(s1) catch err - global GV = err # will definitely escape `r1` + global GV = err # `r1` may escape end - s2 = get′(r2) # still `r2` doesn't escape fully - s3 = get′(r3) # still `r3` doesn't escape fully - s4 = sizeof(s4) # the argument `s4` doesn't escape here + s2 = get′(r2) # `r2` doesn't escape + s3 = get′(r3) # `r3` doesn't escape return s2, s3, s4 end ``` @@ -105,10 +100,10 @@ One distinctive design of this escape analysis is that it is fully _backward_, i.e. escape information flows _from usages to definitions_. For example, in the code snippet below, EA first analyzes the statement `return %1` and imposes `ReturnEscape` on `%1` (corresponding to `obj`), and then it analyzes -`%1 = %new(Base.RefValue{String, _2}))` and propagates the `ReturnEscape` imposed on `%1` -to the call argument `_2` (corresponding to `s`): +`%1 = %new(Base.RefValue{Base.RefValue{String}, _2}))` and propagates the `ReturnEscape` +imposed on `%1` to the call argument `_2` (corresponding to `s`): ```@repl EAUtils -code_escapes((String,)) do s +code_escapes((Base.RefValue{String},)) do s obj = Ref(s) return obj end @@ -120,7 +115,7 @@ As a result this scheme enables a simple implementation of escape analysis, e.g. `PhiNode` for example can be handled simply by propagating escape information imposed on a `PhiNode` to its predecessor values: ```@repl EAUtils -code_escapes((Bool, String, String)) do cnd, s, t +code_escapes((Bool, Base.RefValue{String}, Base.RefValue{String})) do cnd, s, t if cnd obj = Ref(s) else From 09906659e0baad10ee9566e6ac963a2857f89810 Mon Sep 17 00:00:00 2001 From: Paul Annesley Date: Mon, 18 Nov 2024 23:44:51 +1030 Subject: [PATCH 457/537] Buildkite Test Engine: fix JSON comma separate bug (#56588) --- test/buildkitetestjson.jl | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/test/buildkitetestjson.jl b/test/buildkitetestjson.jl index 49c47e0d8f151..0d51cd3b18f8f 100644 --- a/test/buildkitetestjson.jl +++ b/test/buildkitetestjson.jl @@ -31,10 +31,10 @@ json_repr(io::IO, val::Integer; indent::Int=0) = print(io, val) json_repr(io::IO, val::Float64; indent::Int=0) = print(io, val) function json_repr(io::IO, val::AbstractVector; indent::Int=0) print(io, '[') - for elt in val + for i in eachindex(val) print(io, '\n', ' '^(indent + 2)) - json_repr(io, elt; indent=indent+2) - elt === last(val) || print(io, ',') + json_repr(io, val[i]; indent=indent+2) + i == lastindex(val) || print(io, ',') end print(io, '\n', ' '^indent, ']') end From 83ef112f63a127c76cbf797125a9ceb1d0f4d292 Mon Sep 17 00:00:00 2001 From: Jameson Nash Date: Mon, 18 Nov 2024 10:39:38 -0500 Subject: [PATCH 458/537] staticdata edges: fix missing visiting clearing during cycle handling (#56574) --- src/staticdata_utils.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/staticdata_utils.c b/src/staticdata_utils.c index 77e66c7459086..81c2e5cb18e32 100644 --- a/src/staticdata_utils.c +++ b/src/staticdata_utils.c @@ -974,7 +974,7 @@ static int jl_verify_method(jl_code_instance_t *codeinst, size_t *minworld, size if (*maxworld != 0) jl_atomic_store_relaxed(&child->min_world, *minworld); jl_atomic_store_relaxed(&child->max_world, *maxworld); - void **bp = ptrhash_bp(visiting, codeinst); + void **bp = ptrhash_bp(visiting, child); assert(*bp == (char*)HT_NOTFOUND + stack->len + 1); *bp = HT_NOTFOUND; if (_jl_debug_method_invalidation && *maxworld < current_world) { From 38908e900cd536fb227c9e394dcebb7777f052ee Mon Sep 17 00:00:00 2001 From: Jameson Nash Date: Mon, 18 Nov 2024 10:40:10 -0500 Subject: [PATCH 459/537] [GCChecker] add a few more know functions to lock list (#56573) I ran into issues with glibc locally without these being specified, and seems like trylock just works too, though perhaps that is just a lack of accuracy from the checker since we have very few of those. --- src/clangsa/GCChecker.cpp | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/src/clangsa/GCChecker.cpp b/src/clangsa/GCChecker.cpp index 830fe322a0a38..cac89a6761d01 100644 --- a/src/clangsa/GCChecker.cpp +++ b/src/clangsa/GCChecker.cpp @@ -779,21 +779,27 @@ bool GCChecker::isFDAnnotatedNotSafepoint(const clang::FunctionDecl *FD, const S static bool isMutexLock(StringRef name) { return name == "uv_mutex_lock" || - //name == "uv_mutex_trylock" || + name == "uv_mutex_trylock" || name == "pthread_mutex_lock" || - //name == "pthread_mutex_trylock" || + name == "pthread_mutex_trylock" || + name == "__gthread_mutex_lock" || + name == "__gthread_mutex_trylock" || + name == "__gthread_recursive_mutex_lock" || + name == "__gthread_recursive_mutex_trylock" || name == "pthread_spin_lock" || - //name == "pthread_spin_trylock" || + name == "pthread_spin_trylock" || name == "uv_rwlock_rdlock" || - //name == "uv_rwlock_tryrdlock" || + name == "uv_rwlock_tryrdlock" || name == "uv_rwlock_wrlock" || - //name == "uv_rwlock_trywrlock" || + name == "uv_rwlock_trywrlock" || false; } static bool isMutexUnlock(StringRef name) { return name == "uv_mutex_unlock" || name == "pthread_mutex_unlock" || + name == "__gthread_mutex_unlock" || + name == "__gthread_recursive_mutex_unlock" || name == "pthread_spin_unlock" || name == "uv_rwlock_rdunlock" || name == "uv_rwlock_wrunlock" || From fa880a730e347a3b33181ec73a9f484240b5e123 Mon Sep 17 00:00:00 2001 From: Jameson Nash Date: Mon, 18 Nov 2024 10:40:58 -0500 Subject: [PATCH 460/537] threading deadlock: change jl_fptr_wait_for_compiled to actually compile code (#56571) The jl_fptr_wait_for_compiled state merely means it could compile that code, but in many circumstances, it will not actually compile that code until a long delay: when either all edges are satisfied or it is demanded to run immediately. The previous logic did not handle that possibility leading to deadlocks (possible even on one thread). A high rate of failure was shown on running the following CI test: $ ./julia -t 20 -q <specsigflags); // happens-before for subsequent read of fptr while (1) { jl_callptr_t initial_invoke = jl_atomic_load_acquire(&ci->invoke); // happens-before for subsequent read of fptr - while (initial_invoke == jl_fptr_wait_for_compiled_addr) { + if (initial_invoke == jl_fptr_wait_for_compiled_addr) { if (!waitcompile) { *invoke = NULL; *specptr = NULL; *specsigflags = 0b00; return; } - jl_cpu_pause(); - initial_invoke = jl_atomic_load_acquire(&ci->invoke); + jl_compile_codeinst(ci); + initial_invoke = jl_atomic_load_acquire(&ci->invoke); // happens-before for subsequent read of fptr } void *fptr = jl_atomic_load_relaxed(&ci->specptr.fptr); if (initial_invoke == NULL || fptr == NULL) { @@ -2759,14 +2759,14 @@ jl_code_instance_t *jl_compile_method_internal(jl_method_instance_t *mi, size_t jl_code_instance_t *unspec = jl_atomic_load_relaxed(&unspecmi->cache); jl_callptr_t unspec_invoke = NULL; if (unspec && (unspec_invoke = jl_atomic_load_acquire(&unspec->invoke))) { - jl_code_instance_t *codeinst = jl_new_codeinst(mi, jl_nothing, - (jl_value_t*)jl_any_type, (jl_value_t*)jl_any_type, NULL, NULL, - 0, 1, ~(size_t)0, 0, jl_nothing, 0, NULL, NULL); - codeinst->rettype_const = unspec->rettype_const; uint8_t specsigflags; jl_callptr_t invoke; void *fptr; jl_read_codeinst_invoke(unspec, &specsigflags, &invoke, &fptr, 1); + jl_code_instance_t *codeinst = jl_new_codeinst(mi, jl_nothing, + (jl_value_t*)jl_any_type, (jl_value_t*)jl_any_type, NULL, NULL, + 0, 1, ~(size_t)0, 0, jl_nothing, 0, NULL, NULL); + codeinst->rettype_const = unspec->rettype_const; jl_atomic_store_relaxed(&codeinst->specptr.fptr, fptr); jl_atomic_store_relaxed(&codeinst->invoke, invoke); // unspec is probably not specsig, but might be using specptr @@ -2864,14 +2864,14 @@ jl_code_instance_t *jl_compile_method_internal(jl_method_instance_t *mi, size_t jl_typeinf_timing_end(start, is_recompile); return ucache; } - codeinst = jl_new_codeinst(mi, jl_nothing, - (jl_value_t*)jl_any_type, (jl_value_t*)jl_any_type, NULL, NULL, - 0, 1, ~(size_t)0, 0, jl_nothing, 0, NULL, NULL); - codeinst->rettype_const = ucache->rettype_const; uint8_t specsigflags; jl_callptr_t invoke; void *fptr; jl_read_codeinst_invoke(ucache, &specsigflags, &invoke, &fptr, 1); + codeinst = jl_new_codeinst(mi, jl_nothing, + (jl_value_t*)jl_any_type, (jl_value_t*)jl_any_type, NULL, NULL, + 0, 1, ~(size_t)0, 0, jl_nothing, 0, NULL, NULL); + codeinst->rettype_const = ucache->rettype_const; // unspec is always not specsig, but might use specptr jl_atomic_store_relaxed(&codeinst->specptr.fptr, fptr); jl_atomic_store_relaxed(&codeinst->invoke, invoke); @@ -2906,16 +2906,9 @@ jl_value_t *jl_fptr_sparam(jl_value_t *f, jl_value_t **args, uint32_t nargs, jl_ jl_value_t *jl_fptr_wait_for_compiled(jl_value_t *f, jl_value_t **args, uint32_t nargs, jl_code_instance_t *m) { - // This relies on the invariant that the JIT will set the invoke ptr immediately upon adding `m` to itself. - size_t nthreads = jl_atomic_load_relaxed(&jl_n_threads); - // This should only be possible if there's more than one thread. If not, either there's a bug somewhere - // that resulted in this not getting cleared, or we're about to deadlock. Either way, that's bad. - if (nthreads == 1) { - jl_error("Internal error: Reached jl_fptr_wait_for_compiled in single-threaded execution."); - } jl_callptr_t invoke = jl_atomic_load_acquire(&m->invoke); - while (invoke == &jl_fptr_wait_for_compiled) { - jl_cpu_pause(); + if (invoke == &jl_fptr_wait_for_compiled) { + jl_compile_codeinst(m); invoke = jl_atomic_load_acquire(&m->invoke); } return invoke(f, args, nargs, m); diff --git a/src/jitlayers.cpp b/src/jitlayers.cpp index c8d8356687dcf..03c919f57da3f 100644 --- a/src/jitlayers.cpp +++ b/src/jitlayers.cpp @@ -278,6 +278,7 @@ static int jl_analyze_workqueue(jl_code_instance_t *callee, jl_codegen_params_t // But it must be consistent with the following invokenames lookup, which is protected by the engine_lock uint8_t specsigflags; void *fptr; + void jl_read_codeinst_invoke(jl_code_instance_t *ci, uint8_t *specsigflags, jl_callptr_t *invoke, void **specptr, int waitcompile) JL_NOTSAFEPOINT; // not a safepoint (or deadlock) in this file due to 0 parameter jl_read_codeinst_invoke(codeinst, &specsigflags, &invoke, &fptr, 0); //if (specsig ? specsigflags & 0b1 : invoke == jl_fptr_args_addr) if (invoke == jl_fptr_args_addr) { @@ -697,13 +698,13 @@ static void recursive_compile_graph( // and adds the result to the jitlayers // (and the shadow module), // and generates code for it -static jl_callptr_t _jl_compile_codeinst( +static void _jl_compile_codeinst( jl_code_instance_t *codeinst, jl_code_info_t *src) { recursive_compile_graph(codeinst, src); jl_compile_codeinst_now(codeinst); - return jl_atomic_load_acquire(&codeinst->invoke); + assert(jl_is_compiled_codeinst(codeinst)); } @@ -819,7 +820,7 @@ extern "C" JL_DLLEXPORT_CODEGEN int jl_compile_codeinst_impl(jl_code_instance_t *ci) { int newly_compiled = 0; - if (jl_atomic_load_relaxed(&ci->invoke) == NULL) { + if (!jl_is_compiled_codeinst(ci)) { ++SpecFPtrCount; uint64_t start = jl_typeinf_timing_begin(); _jl_compile_codeinst(ci, NULL); @@ -862,7 +863,7 @@ void jl_generate_fptr_for_unspecialized_impl(jl_code_instance_t *unspec) if (src) { // TODO: first prepare recursive_compile_graph(unspec, src) before taking this lock to avoid recursion? JL_LOCK(&jitlock); // TODO: use a better lock - if (jl_atomic_load_relaxed(&unspec->invoke) == NULL) { + if (!jl_is_compiled_codeinst(unspec)) { assert(jl_is_code_info(src)); ++UnspecFPtrCount; jl_debuginfo_t *debuginfo = src->debuginfo; diff --git a/src/julia_internal.h b/src/julia_internal.h index a093cc5d21b14..aadcbfdc94038 100644 --- a/src/julia_internal.h +++ b/src/julia_internal.h @@ -677,7 +677,7 @@ JL_DLLEXPORT jl_code_instance_t *jl_get_method_inferred( jl_method_instance_t *mi JL_PROPAGATES_ROOT, jl_value_t *rettype, size_t min_world, size_t max_world, jl_debuginfo_t *di, jl_svec_t *edges); JL_DLLEXPORT jl_method_instance_t *jl_get_unspecialized(jl_method_t *def JL_PROPAGATES_ROOT); -JL_DLLEXPORT void jl_read_codeinst_invoke(jl_code_instance_t *ci, uint8_t *specsigflags, jl_callptr_t *invoke, void **specptr, int waitcompile) JL_NOTSAFEPOINT; +JL_DLLEXPORT void jl_read_codeinst_invoke(jl_code_instance_t *ci, uint8_t *specsigflags, jl_callptr_t *invoke, void **specptr, int waitcompile); JL_DLLEXPORT jl_method_instance_t *jl_method_match_to_mi(jl_method_match_t *match, size_t world, size_t min_valid, size_t max_valid, int mt_cache); JL_DLLEXPORT jl_code_instance_t *jl_new_codeinst_uninit(jl_method_instance_t *mi, jl_value_t *owner); From b6eeef20f33c02425e4e93e51e939e0b4c1397f4 Mon Sep 17 00:00:00 2001 From: Jameson Nash Date: Mon, 18 Nov 2024 15:17:41 -0500 Subject: [PATCH 461/537] fix precompile(::MethodInstance) ccall signature (#56595) Prevents calling this method from triggering undefined behavior in C --- base/loading.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/base/loading.jl b/base/loading.jl index 79b4fb8cb9fcc..91be310cd5d17 100644 --- a/base/loading.jl +++ b/base/loading.jl @@ -4191,7 +4191,7 @@ end # Variants that work for `invoke`d calls for which the signature may not be sufficient precompile(mi::Core.MethodInstance, world::UInt=get_world_counter()) = - (ccall(:jl_compile_method_instance, Cvoid, (Any, Any, UInt), mi, C_NULL, world); return true) + (ccall(:jl_compile_method_instance, Cvoid, (Any, Ptr{Cvoid}, UInt), mi, C_NULL, world); return true) """ precompile(f, argtypes::Tuple{Vararg{Any}}, m::Method) From 0de916487973922a2de412e50e9de02703a0864f Mon Sep 17 00:00:00 2001 From: Keno Fischer Date: Mon, 18 Nov 2024 15:34:26 -0500 Subject: [PATCH 462/537] Don't throw an error in raw! if the stream is closed (#56589) This was added in #12568 to protect against a segfault after `close(stdin)`. However, the API is not great, because the stdin closing is an asynchronous event, so there isn't really any way to use this API without inccurring an error. Further, it already returns an error code of whether or not the action suceeded, and it's bad practice to have two ways for an operation to fail. Remove the error check and handle a closed stream gracefully returning an EOF error. In all users in Base, this EOF error is ignored, but we will gracefully check for EOF later and shut down the REPL, which is the desired behavior. Fixes https://github.com/timholy/Revise.jl/issues/859 --- src/jl_uv.c | 2 ++ stdlib/REPL/src/Terminals.jl | 6 ++---- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/jl_uv.c b/src/jl_uv.c index 4da23a5937770..3498952622dce 100644 --- a/src/jl_uv.c +++ b/src/jl_uv.c @@ -1160,6 +1160,8 @@ JL_DLLEXPORT uv_handle_type jl_uv_handle_type(uv_handle_t *handle) JL_DLLEXPORT int jl_tty_set_mode(uv_tty_t *handle, int mode) { + if (!handle) + return UV__EOF; if (handle->type != UV_TTY) return 0; uv_tty_mode_t mode_enum = UV_TTY_MODE_NORMAL; if (mode) diff --git a/stdlib/REPL/src/Terminals.jl b/stdlib/REPL/src/Terminals.jl index 0cf6888d248e8..aba6bff73a607 100644 --- a/stdlib/REPL/src/Terminals.jl +++ b/stdlib/REPL/src/Terminals.jl @@ -122,19 +122,17 @@ cmove_col(t::UnixTerminal, n) = (write(t.out_stream, '\r'); n > 1 && cmove_right if Sys.iswindows() function raw!(t::TTYTerminal,raw::Bool) - check_open(t.in_stream) if Base.ispty(t.in_stream) run((raw ? `stty raw -echo onlcr -ocrnl opost` : `stty sane`), t.in_stream, t.out_stream, t.err_stream) true else - ccall(:jl_tty_set_mode, Int32, (Ptr{Cvoid},Int32), t.in_stream.handle::Ptr{Cvoid}, raw) != -1 + ccall(:jl_tty_set_mode, Int32, (Ptr{Cvoid},Int32), t.in_stream.handle::Ptr{Cvoid}, raw) == 0 end end else function raw!(t::TTYTerminal, raw::Bool) - check_open(t.in_stream) - ccall(:jl_tty_set_mode, Int32, (Ptr{Cvoid},Int32), t.in_stream.handle::Ptr{Cvoid}, raw) != -1 + ccall(:jl_tty_set_mode, Int32, (Ptr{Cvoid},Int32), t.in_stream.handle::Ptr{Cvoid}, raw) == 0 end end From aff651200ccea28142aedac9a9104914687dd9e2 Mon Sep 17 00:00:00 2001 From: Cody Tapscott <84105208+topolarity@users.noreply.github.com> Date: Mon, 18 Nov 2024 15:37:43 -0500 Subject: [PATCH 463/537] codegen: Restore `ct->scope` in `jl_eh_restore_state` (#55907) This eliminates the need to associate a `catch` with every `with(...) do ... end` block, which was really just acting as a landing pad to restore `jl_current_task->scope` in the majority of cases. This change does not actually update lowering to remove the unnecessary `catch` block - that's left as a follow-up. --- src/codegen.cpp | 72 +++++++++++++++++++++++------------------------ src/interpreter.c | 8 ++---- src/julia.h | 1 + src/rtutils.c | 3 ++ 4 files changed, 42 insertions(+), 42 deletions(-) diff --git a/src/codegen.cpp b/src/codegen.cpp index 8662016fd069f..761c3e7eb8758 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -1955,7 +1955,7 @@ class jl_codectx_t { // local var info. globals are not in here. SmallVector slots; std::map phic_slots; - std::map > scope_restore; + std::map scope_tokens; SmallVector SAvalues; SmallVector, jl_value_t *>, 0> PhiNodes; SmallVector ssavalue_assigned; @@ -6573,8 +6573,6 @@ static void emit_stmtpos(jl_codectx_t &ctx, jl_value_t *expr, int ssaval_result) } else if (head == jl_leave_sym) { int hand_n_leave = 0; - Value *scope_to_restore = nullptr; - Value *scope_ptr = nullptr; for (size_t i = 0; i < jl_expr_nargs(ex); ++i) { jl_value_t *arg = args[i]; if (arg == jl_nothing) @@ -6584,8 +6582,20 @@ static void emit_stmtpos(jl_codectx_t &ctx, jl_value_t *expr, int ssaval_result) jl_value_t *enter_stmt = jl_array_ptr_ref(ctx.code, enter_idx); if (enter_stmt == jl_nothing) continue; - if (ctx.scope_restore.count(enter_idx)) - std::tie(scope_to_restore, scope_ptr) = ctx.scope_restore[enter_idx]; + if (ctx.scope_tokens.count(enter_idx)) { + // TODO: The semantics of `gc_preserve` are not perfect here. An `Expr(:enter, ...)` block may + // have multiple exits, but effects of `preserve_end` are only extended to the end of the + // dominance of each `Expr(:leave, ...)`. + // + // That means that a scope object can suddenly end up preserved again outside of an + // `Expr(:enter, ...)` region where it ought to be dead. It'd be preferable if the effects + // of gc_preserve_end propagated through a control-flow joins as long as all incoming + // agree about the preserve state. + // + // This is correct as-is anyway - it just means the scope lives longer than it needs to + // if the `Expr(:enter, ...)` has multiple exits. + ctx.builder.CreateCall(prepare_call(gc_preserve_end_func), {ctx.scope_tokens[enter_idx]}); + } if (jl_enternode_catch_dest(enter_stmt)) { // We're not actually setting up the exception frames for these, so // we don't need to exit them. @@ -6593,11 +6603,6 @@ static void emit_stmtpos(jl_codectx_t &ctx, jl_value_t *expr, int ssaval_result) } } ctx.builder.CreateCall(prepare_call(jlleave_noexcept_func), {get_current_task(ctx), ConstantInt::get(getInt32Ty(ctx.builder.getContext()), hand_n_leave)}); - if (scope_to_restore) { - jl_aliasinfo_t scope_ai = jl_aliasinfo_t::fromTBAA(ctx, ctx.tbaa().tbaa_gcframe); - scope_ai.decorateInst( - ctx.builder.CreateAlignedStore(scope_to_restore, scope_ptr, ctx.types().alignof_ptr)); - } } else if (head == jl_pop_exception_sym) { jl_cgval_t excstack_state = emit_expr(ctx, jl_exprarg(expr, 0)); @@ -7180,7 +7185,7 @@ static Value *get_tls_world_age_field(jl_codectx_t &ctx) static Value *get_scope_field(jl_codectx_t &ctx) { Value *ct = get_current_task(ctx); - return emit_ptrgep(ctx, ct, offsetof(jl_task_t, scope), "current_scope"); + return emit_ptrgep(ctx, ct, offsetof(jl_task_t, scope), "scope"); } Function *get_or_emit_fptr1(StringRef preal_decl, Module *M) @@ -9604,28 +9609,6 @@ static jl_llvm_functions_t continue; } else if (jl_is_enternode(stmt)) { - // For the two-arg version of :enter, twiddle the scope - Value *scope_ptr = NULL; - Value *old_scope = NULL; - jl_aliasinfo_t scope_ai = jl_aliasinfo_t::fromTBAA(ctx, ctx.tbaa().tbaa_gcframe); - if (jl_enternode_scope(stmt)) { - jl_cgval_t new_scope = emit_expr(ctx, jl_enternode_scope(stmt)); - if (new_scope.typ == jl_bottom_type) { - // Probably dead code, but let's be loud about it in case it isn't, so we fail - // at the point of the miscompile, rather than later when something attempts to - // read the scope. - emit_error(ctx, "(INTERNAL ERROR): Attempted to execute EnterNode with bad scope"); - find_next_stmt(-1); - continue; - } - Value *new_scope_boxed = boxed(ctx, new_scope); - scope_ptr = get_scope_field(ctx); - old_scope = scope_ai.decorateInst( - ctx.builder.CreateAlignedLoad(ctx.types().T_prjlvalue, scope_ptr, ctx.types().alignof_ptr)); - scope_ai.decorateInst( - ctx.builder.CreateAlignedStore(new_scope_boxed, scope_ptr, ctx.types().alignof_ptr)); - ctx.scope_restore[cursor] = std::make_pair(old_scope, scope_ptr); - } int lname = jl_enternode_catch_dest(stmt); if (lname) { // Save exception stack depth at enter for use in pop_exception @@ -9651,16 +9634,31 @@ static jl_llvm_functions_t ctx.builder.SetInsertPoint(catchpop); { ctx.builder.CreateCall(prepare_call(jlleave_func), {get_current_task(ctx), ConstantInt::get(getInt32Ty(ctx.builder.getContext()), 1)}); - if (old_scope) { - scope_ai.decorateInst( - ctx.builder.CreateAlignedStore(old_scope, scope_ptr, ctx.types().alignof_ptr)); - } ctx.builder.CreateBr(handlr); } ctx.builder.SetInsertPoint(tryblk); auto ehptr = emit_ptrgep(ctx, ct, offsetof(jl_task_t, eh)); ctx.builder.CreateAlignedStore(ehbuf, ehptr, ctx.types().alignof_ptr); } + // For the two-arg version of :enter, twiddle the scope + if (jl_enternode_scope(stmt)) { + jl_cgval_t scope = emit_expr(ctx, jl_enternode_scope(stmt)); + if (scope.typ == jl_bottom_type) { + // Probably dead code, but let's be loud about it in case it isn't, so we fail + // at the point of the miscompile, rather than later when something attempts to + // read the scope. + emit_error(ctx, "(INTERNAL ERROR): Attempted to execute EnterNode with bad scope"); + find_next_stmt(-1); + continue; + } + Value *scope_boxed = boxed(ctx, scope); + StoreInst *scope_store = ctx.builder.CreateAlignedStore(scope_boxed, get_scope_field(ctx), ctx.types().alignof_ptr); + jl_aliasinfo_t::fromTBAA(ctx, ctx.tbaa().tbaa_gcframe).decorateInst(scope_store); + // GC preserve the scope, since it is not rooted in the `jl_handler_t *` + // and may be removed from jl_current_task by any nested block and then + // replaced later + ctx.scope_tokens[cursor] = ctx.builder.CreateCall(prepare_call(gc_preserve_begin_func), {scope_boxed}); + } } else { emit_stmtpos(ctx, stmt, cursor); diff --git a/src/interpreter.c b/src/interpreter.c index 13dc45cf2ae6e..252049ad2db6d 100644 --- a/src/interpreter.c +++ b/src/interpreter.c @@ -527,16 +527,14 @@ static jl_value_t *eval_body(jl_array_t *stmts, interpreter_state *s, size_t ip, } s->locals[jl_source_nslots(s->src) + ip] = jl_box_ulong(jl_excstack_state(ct)); if (jl_enternode_scope(stmt)) { - jl_value_t *old_scope = ct->scope; - JL_GC_PUSH1(&old_scope); - jl_value_t *new_scope = eval_value(jl_enternode_scope(stmt), s); - ct->scope = new_scope; + jl_value_t *scope = eval_value(jl_enternode_scope(stmt), s); + JL_GC_PUSH1(&scope); + ct->scope = scope; if (!jl_setjmp(__eh.eh_ctx, 1)) { ct->eh = &__eh; eval_body(stmts, s, next_ip, toplevel); jl_unreachable(); } - ct->scope = old_scope; JL_GC_POP(); } else { diff --git a/src/julia.h b/src/julia.h index 81e6cf42da567..de88b48e59912 100644 --- a/src/julia.h +++ b/src/julia.h @@ -2263,6 +2263,7 @@ typedef struct _jl_excstack_t jl_excstack_t; typedef struct _jl_handler_t { jl_jmp_buf eh_ctx; jl_gcframe_t *gcstack; + jl_value_t *scope; struct _jl_handler_t *prev; int8_t gc_state; size_t locks_len; diff --git a/src/rtutils.c b/src/rtutils.c index 7e1fb576008f6..fcc4a489d3f38 100644 --- a/src/rtutils.c +++ b/src/rtutils.c @@ -244,6 +244,7 @@ JL_DLLEXPORT void jl_enter_handler(jl_task_t *ct, jl_handler_t *eh) // Must have no safepoint eh->prev = ct->eh; eh->gcstack = ct->gcstack; + eh->scope = ct->scope; eh->gc_state = jl_atomic_load_relaxed(&ct->ptls->gc_state); eh->locks_len = ct->ptls->locks.len; eh->defer_signal = ct->ptls->defer_signal; @@ -273,6 +274,7 @@ JL_DLLEXPORT void jl_eh_restore_state(jl_task_t *ct, jl_handler_t *eh) sig_atomic_t old_defer_signal = ptls->defer_signal; ct->eh = eh->prev; ct->gcstack = eh->gcstack; + ct->scope = eh->scope; small_arraylist_t *locks = &ptls->locks; int unlocks = locks->len > eh->locks_len; if (unlocks) { @@ -311,6 +313,7 @@ JL_DLLEXPORT void jl_eh_restore_state(jl_task_t *ct, jl_handler_t *eh) JL_DLLEXPORT void jl_eh_restore_state_noexcept(jl_task_t *ct, jl_handler_t *eh) { assert(ct->gcstack == eh->gcstack && "Incorrect GC usage under try catch"); + ct->scope = eh->scope; ct->eh = eh->prev; ct->ptls->defer_signal = eh->defer_signal; // optional, but certain try-finally (in stream.jl) may be slightly harder to write without this } From deac82ad915db6c1fd6e9246fd4bb3310b7cd71f Mon Sep 17 00:00:00 2001 From: Cody Tapscott <84105208+topolarity@users.noreply.github.com> Date: Mon, 18 Nov 2024 15:40:02 -0500 Subject: [PATCH 464/537] lowering: don't reverse handler order in `(pop-handler-list ...)` (#55871) We were accidentally emitting a different pop order for `Expr(:leave, ...)` if you uncomment the `nothing` below: ```julia let src = Meta.@lower let try try return 1 catch end finally # nothing # <- uncomment me end end println.(filter(stmt->Base.isexpr(stmt, :leave), src.args[1].code)) nothing end ``` --- src/julia-syntax.scm | 19 ++++++++++--------- test/syntax.jl | 23 +++++++++++++++++++++++ 2 files changed, 33 insertions(+), 9 deletions(-) diff --git a/src/julia-syntax.scm b/src/julia-syntax.scm index e82c436e5a730..6a9558bb06ba5 100644 --- a/src/julia-syntax.scm +++ b/src/julia-syntax.scm @@ -4447,15 +4447,16 @@ f(x) = yt(x) (define (pop-handler-list src-tokens dest-tokens lab) (if (eq? src-tokens dest-tokens) #f - (let loop ((s src-tokens) - (l '())) - (if (not (pair? s)) - (if (null? lab) - (error "Attempt to jump into catch block") - (error (string "cannot goto label \"" lab "\" inside try/catch block")))) - (if (eq? (cdr s) dest-tokens) - (cons (car s) l) - (loop (cdr s) (cons (car s) l)))))) + (reverse + (let loop ((s src-tokens) + (l '())) + (if (not (pair? s)) + (if (null? lab) + (error "Attempt to jump into catch block") + (error (string "cannot goto label \"" lab "\" inside try/catch block")))) + (if (eq? (cdr s) dest-tokens) + (cons (car s) l) + (loop (cdr s) (cons (car s) l))))))) (define (emit-return tail x) (define (emit- x) (let* ((tmp (if ((if (null? catch-token-stack) valid-ir-return? simple-atom?) x) diff --git a/test/syntax.jl b/test/syntax.jl index c19721b5c54b3..1f9d1d592931b 100644 --- a/test/syntax.jl +++ b/test/syntax.jl @@ -3865,6 +3865,29 @@ end end end +let src = Meta.@lower let + try + try + return 1 + catch + end + finally + nothing + end +end + code = src.args[1].code + for stmt in code + if Meta.isexpr(stmt, :leave) && length(stmt.args) > 1 + # Expr(:leave, ...) should list the arguments to pop from + # inner-most scope to outer-most + @test issorted(Int[ + (arg::Core.SSAValue).id + for arg in stmt.args + ]; rev=true) + end + end +end + # Test that globals can be `using`'d even if they are not yet defined module UndefGlobal54954 global theglobal54954::Int From c5899c2d54f88c66b3596639b6befb7400a68016 Mon Sep 17 00:00:00 2001 From: Cody Tapscott <84105208+topolarity@users.noreply.github.com> Date: Mon, 18 Nov 2024 16:57:54 -0500 Subject: [PATCH 465/537] cli: Add required `--experimental` flag for experimental features (e.g. `--trim`) (#56045) The intention here is to clearly signal when a feature is "not yet fully implemented" vs. "feature-complete and in pre-release" vs. "fully released and ready for production use" The only feature gated behind this right now is `--trim`. Trim has its core functionality implemented (and folks seem to be enjoying it!) but the deployment / linking story in particular is still in its very early stages, esp. because our existing techniques for, e.g., pre-loading `libunwind`, `libstdc++`, etc. no longer work in a shared library context. Once `--trim` is ready for a broader chunk of the ecosystem / language, we can peel off the `--experimental` flag --- base/options.jl | 1 + src/jloptions.c | 13 ++++++++++++- src/jloptions.h | 1 + src/julia.h | 3 +++ 4 files changed, 17 insertions(+), 1 deletion(-) diff --git a/base/options.jl b/base/options.jl index f535c27d99122..07baa3b51f65b 100644 --- a/base/options.jl +++ b/base/options.jl @@ -39,6 +39,7 @@ struct JLOptions worker::Int8 cookie::Ptr{UInt8} handle_signals::Int8 + use_experimental_features::Int8 use_sysimage_native_code::Int8 use_compiled_modules::Int8 use_pkgimages::Int8 diff --git a/src/jloptions.c b/src/jloptions.c index 907f47d9030e4..f81cf0453db21 100644 --- a/src/jloptions.c +++ b/src/jloptions.c @@ -130,6 +130,7 @@ JL_DLLEXPORT void jl_init_options(void) 0, // worker NULL, // cookie JL_OPTIONS_HANDLE_SIGNALS_ON, + JL_OPTIONS_USE_EXPERIMENTAL_FEATURES_NO, JL_OPTIONS_USE_SYSIMAGE_NATIVE_CODE_YES, JL_OPTIONS_USE_COMPILED_MODULES_YES, JL_OPTIONS_USE_PKGIMAGES_YES, @@ -150,7 +151,7 @@ JL_DLLEXPORT void jl_init_options(void) 0, // permalloc_pkgimg 0, // heap-size-hint 0, // trace_compile_timing - 0, // trim + JL_TRIM_NO, // trim }; jl_options_initialized = 1; } @@ -303,6 +304,7 @@ static const char opts_hidden[] = " functions\n\n" // compiler debugging and experimental (see the devdocs for tips on using these options) + " --experimental Enable the use of experimental (alpha) features\n" " --output-unopt-bc Generate unoptimized LLVM bitcode (.bc)\n" " --output-bc Generate LLVM bitcode (.bc)\n" " --output-asm Generate an assembly file (.s)\n" @@ -372,6 +374,7 @@ JL_DLLEXPORT void jl_parse_opts(int *argcp, char ***argvp) opt_gc_threads, opt_permalloc_pkgimg, opt_trim, + opt_experimental_features, }; static const char* const shortopts = "+vhqH:e:E:L:J:C:it:p:O:g:m:"; static const struct option longopts[] = { @@ -427,6 +430,7 @@ JL_DLLEXPORT void jl_parse_opts(int *argcp, char ***argvp) { "math-mode", required_argument, 0, opt_math_mode }, { "handle-signals", required_argument, 0, opt_handle_signals }, // hidden command line options + { "experimental", no_argument, 0, opt_experimental_features }, { "worker", optional_argument, 0, opt_worker }, { "bind-to", required_argument, 0, opt_bind_to }, { "lisp", no_argument, 0, 1 }, @@ -570,6 +574,9 @@ JL_DLLEXPORT void jl_parse_opts(int *argcp, char ***argvp) else jl_errorf("julia: invalid argument to --banner={yes|no|auto|short} (%s)", optarg); break; + case opt_experimental_features: + jl_options.use_experimental_features = JL_OPTIONS_USE_EXPERIMENTAL_FEATURES_YES; + break; case opt_sysimage_native_code: if (!strcmp(optarg,"yes")) jl_options.use_sysimage_native_code = JL_OPTIONS_USE_SYSIMAGE_NATIVE_CODE_YES; @@ -977,6 +984,10 @@ JL_DLLEXPORT void jl_parse_opts(int *argcp, char ***argvp) } } parsing_args_done: + if (!jl_options.use_experimental_features) { + if (jl_options.trim != JL_TRIM_NO) + jl_errorf("julia: --trim is an experimental feature, you must enable it with --experimental"); + } jl_options.code_coverage = codecov; jl_options.malloc_log = malloclog; int proc_args = *argcp < optind ? *argcp : optind; diff --git a/src/jloptions.h b/src/jloptions.h index e58797caace3c..b9910702f3f9b 100644 --- a/src/jloptions.h +++ b/src/jloptions.h @@ -43,6 +43,7 @@ typedef struct { int8_t worker; const char *cookie; int8_t handle_signals; + int8_t use_experimental_features; int8_t use_sysimage_native_code; int8_t use_compiled_modules; int8_t use_pkgimages; diff --git a/src/julia.h b/src/julia.h index de88b48e59912..87979f75e8d80 100644 --- a/src/julia.h +++ b/src/julia.h @@ -2599,6 +2599,9 @@ JL_DLLEXPORT int jl_generating_output(void) JL_NOTSAFEPOINT; #define JL_OPTIONS_HANDLE_SIGNALS_ON 1 #define JL_OPTIONS_HANDLE_SIGNALS_OFF 0 +#define JL_OPTIONS_USE_EXPERIMENTAL_FEATURES_YES 1 +#define JL_OPTIONS_USE_EXPERIMENTAL_FEATURES_NO 0 + #define JL_OPTIONS_USE_SYSIMAGE_NATIVE_CODE_YES 1 #define JL_OPTIONS_USE_SYSIMAGE_NATIVE_CODE_NO 0 From af9e6e3167f7e444140c81326a2c3cf058ddba1a Mon Sep 17 00:00:00 2001 From: Shuhei Kadowaki <40514306+aviatesk@users.noreply.github.com> Date: Tue, 19 Nov 2024 07:04:32 +0900 Subject: [PATCH 466/537] optimize `abstract_call_gf_by_type` (#56572) Combines many allocations into one and types them for better inference --- Compiler/src/abstractinterpretation.jl | 213 ++++++++++++++----------- 1 file changed, 120 insertions(+), 93 deletions(-) diff --git a/Compiler/src/abstractinterpretation.jl b/Compiler/src/abstractinterpretation.jl index f98b9336d97c0..68668b0ac2c91 100644 --- a/Compiler/src/abstractinterpretation.jl +++ b/Compiler/src/abstractinterpretation.jl @@ -38,7 +38,76 @@ function propagate_conditional(rt::InterConditional, cond::Conditional) return Conditional(cond.slot, new_thentype, new_elsetype) end -function abstract_call_gf_by_type(interp::AbstractInterpreter, @nospecialize(f), +mutable struct SafeBox{T} + x::T + SafeBox{T}(x::T) where T = new{T}(x) + SafeBox(@nospecialize x) = new{Any}(x) +end +getindex(box::SafeBox) = box.x +setindex!(box::SafeBox{T}, x::T) where T = setfield!(box, :x, x) + +struct FailedMethodMatch + reason::String +end + +struct MethodMatchTarget + match::MethodMatch + edges::Vector{Union{Nothing,CodeInstance}} + edge_idx::Int +end + +struct MethodMatches + applicable::Vector{MethodMatchTarget} + info::MethodMatchInfo + valid_worlds::WorldRange +end +any_ambig(result::MethodLookupResult) = result.ambig +any_ambig(info::MethodMatchInfo) = any_ambig(info.results) +any_ambig(m::MethodMatches) = any_ambig(m.info) +fully_covering(info::MethodMatchInfo) = info.fullmatch +fully_covering(m::MethodMatches) = fully_covering(m.info) + +struct UnionSplitMethodMatches + applicable::Vector{MethodMatchTarget} + applicable_argtypes::Vector{Vector{Any}} + info::UnionSplitInfo + valid_worlds::WorldRange +end +any_ambig(info::UnionSplitInfo) = any(any_ambig, info.split) +any_ambig(m::UnionSplitMethodMatches) = any_ambig(m.info) +fully_covering(info::UnionSplitInfo) = all(fully_covering, info.split) +fully_covering(m::UnionSplitMethodMatches) = fully_covering(m.info) + +nmatches(info::MethodMatchInfo) = length(info.results) +function nmatches(info::UnionSplitInfo) + n = 0 + for mminfo in info.split + n += nmatches(mminfo) + end + return n +end + +# intermediate state for computing gfresult +mutable struct CallInferenceState + inferidx::Int + rettype + exctype + all_effects::Effects + const_results::Union{Nothing,Vector{Union{Nothing,ConstResult}}} # keeps the results of inference with the extended lattice elements (if happened) + conditionals::Union{Nothing,Tuple{Vector{Any},Vector{Any}}} # keeps refinement information of call argument types when the return type is boolean + slotrefinements::Union{Nothing,Vector{Any}} # keeps refinement information on slot types obtained from call signature + + # some additional fields for untyped objects (just to avoid capturing) + func + matches::Union{MethodMatches,UnionSplitMethodMatches} + function CallInferenceState(@nospecialize(func), matches::Union{MethodMatches,UnionSplitMethodMatches}) + return new(#=inferidx=#1, #=rettype=#Bottom, #=exctype=#Bottom, #=all_effects=#EFFECTS_TOTAL, + #=const_results=#nothing, #=conditionals=#nothing, #=slotrefinements=#nothing, + func, matches) + end +end + +function abstract_call_gf_by_type(interp::AbstractInterpreter, @nospecialize(func), arginfo::ArgInfo, si::StmtInfo, @nospecialize(atype), sv::AbsIntState, max_methods::Int) 𝕃ₚ, 𝕃ᵢ = ipo_lattice(interp), typeinf_lattice(interp) @@ -50,12 +119,12 @@ function abstract_call_gf_by_type(interp::AbstractInterpreter, @nospecialize(f), return Future(CallMeta(Any, Any, Effects(), NoCallInfo())) end - (; valid_worlds, applicable, info) = matches + (; valid_worlds, applicable) = matches update_valid_age!(sv, valid_worlds) # need to record the negative world now, since even if we don't generate any useful information, inlining might want to add an invoke edge and it won't have this information anymore if bail_out_toplevel_call(interp, sv) - napplicable = length(applicable) + local napplicable = length(applicable) for i = 1:napplicable - sig = applicable[i].match.spec_types + local sig = applicable[i].match.spec_types if !isdispatchtuple(sig) # only infer fully concrete call sites in top-level expressions (ignoring even isa_compileable_sig matches) add_remark!(interp, sv, "Refusing to infer non-concrete call site in top-level expression") @@ -66,26 +135,17 @@ function abstract_call_gf_by_type(interp::AbstractInterpreter, @nospecialize(f), # final result gfresult = Future{CallMeta}() - # intermediate work for computing gfresult - rettype = exctype = Bottom - conditionals = nothing # keeps refinement information of call argument types when the return type is boolean - const_results = nothing # or const_results::Vector{Union{Nothing,ConstResult}} if any const results are available - fargs = arginfo.fargs - all_effects = EFFECTS_TOTAL - slotrefinements = nothing # keeps refinement information on slot types obtained from call signature + state = CallInferenceState(func, matches) # split the for loop off into a function, so that we can pause and restart it at will - i::Int = 1 - f = Core.Box(f) - atype = Core.Box(atype) function infercalls(interp, sv) local napplicable = length(applicable) local multiple_matches = napplicable > 1 - while i <= napplicable - (; match, edges, edge_idx) = applicable[i] - method = match.method - sig = match.spec_types - if bail_out_call(interp, InferenceLoopState(rettype, all_effects), sv) + while state.inferidx <= napplicable + (; match, edges, edge_idx) = applicable[state.inferidx] + local method = match.method + local sig = match.spec_types + if bail_out_call(interp, InferenceLoopState(state.rettype, state.all_effects), sv) add_remark!(interp, sv, "Call inference reached maximally imprecise information: bailing on doing more abstract inference.") break end @@ -108,10 +168,11 @@ function abstract_call_gf_by_type(interp::AbstractInterpreter, @nospecialize(f), this_exct = exct # try constant propagation with argtypes for this match # this is in preparation for inlining, or improving the return result - this_argtypes = isa(matches, MethodMatches) ? argtypes : matches.applicable_argtypes[i] - this_arginfo = ArgInfo(fargs, this_argtypes) + local matches = state.matches + this_argtypes = isa(matches, MethodMatches) ? argtypes : matches.applicable_argtypes[state.inferidx] + this_arginfo = ArgInfo(arginfo.fargs, this_argtypes) const_call_result = abstract_call_method_with_const_args(interp, - mresult[], f.contents, this_arginfo, si, match, sv) + mresult[], state.func, this_arginfo, si, match, sv) const_result = volatile_inf_result if const_call_result !== nothing this_const_conditional = ignorelimited(const_call_result.rt) @@ -146,12 +207,13 @@ function abstract_call_gf_by_type(interp::AbstractInterpreter, @nospecialize(f), end end - all_effects = merge_effects(all_effects, effects) + state.all_effects = merge_effects(state.all_effects, effects) if const_result !== nothing + local const_results = state.const_results if const_results === nothing - const_results = fill!(Vector{Union{Nothing,ConstResult}}(undef, napplicable), nothing) + const_results = state.const_results = fill!(Vector{Union{Nothing,ConstResult}}(undef, napplicable), nothing) end - const_results[i] = const_result + const_results[state.inferidx] = const_result end @assert !(this_conditional isa Conditional || this_rt isa MustAlias) "invalid lattice element returned from inter-procedural context" if can_propagate_conditional(this_conditional, argtypes) @@ -162,12 +224,14 @@ function abstract_call_gf_by_type(interp::AbstractInterpreter, @nospecialize(f), this_rt = this_conditional end - rettype = rettype ⊔ₚ this_rt - exctype = exctype ⊔ₚ this_exct - if has_conditional(𝕃ₚ, sv) && this_conditional !== Bottom && is_lattice_bool(𝕃ₚ, rettype) && fargs !== nothing + state.rettype = state.rettype ⊔ₚ this_rt + state.exctype = state.exctype ⊔ₚ this_exct + if has_conditional(𝕃ₚ, sv) && this_conditional !== Bottom && is_lattice_bool(𝕃ₚ, state.rettype) && arginfo.fargs !== nothing + local conditionals = state.conditionals if conditionals === nothing - conditionals = Any[Bottom for _ in 1:length(argtypes)], - Any[Bottom for _ in 1:length(argtypes)] + conditionals = state.conditionals = ( + Any[Bottom for _ in 1:length(argtypes)], + Any[Bottom for _ in 1:length(argtypes)]) end for i = 1:length(argtypes) cnd = conditional_argtype(𝕃ᵢ, this_conditional, match.spec_types, argtypes, i) @@ -177,7 +241,7 @@ function abstract_call_gf_by_type(interp::AbstractInterpreter, @nospecialize(f), end edges[edge_idx] = edge - i += 1 + state.inferidx += 1 return true end # function handle1 if isready(mresult) && handle1(interp, sv) @@ -188,22 +252,25 @@ function abstract_call_gf_by_type(interp::AbstractInterpreter, @nospecialize(f), end end # while - seenall = i > napplicable + seenall = state.inferidx > napplicable + retinfo = state.matches.info if seenall # small optimization to skip some work that is already implied + local const_results = state.const_results if const_results !== nothing - @assert napplicable == nmatches(info) == length(const_results) - info = ConstCallInfo(info, const_results) + @assert napplicable == nmatches(retinfo) == length(const_results) + retinfo = ConstCallInfo(retinfo, const_results) end - if !fully_covering(matches) || any_ambig(matches) + if !fully_covering(state.matches) || any_ambig(state.matches) # Account for the fact that we may encounter a MethodError with a non-covered or ambiguous signature. - all_effects = Effects(all_effects; nothrow=false) - exctype = exctype ⊔ₚ MethodError + state.all_effects = Effects(state.all_effects; nothrow=false) + state.exctype = state.exctype ⊔ₚ MethodError end + local fargs = arginfo.fargs if sv isa InferenceState && fargs !== nothing - slotrefinements = collect_slot_refinements(𝕃ᵢ, applicable, argtypes, fargs, sv) + state.slotrefinements = collect_slot_refinements(𝕃ᵢ, applicable, argtypes, fargs, sv) end - rettype = from_interprocedural!(interp, rettype, sv, arginfo, conditionals) - if call_result_unused(si) && !(rettype === Bottom) + state.rettype = from_interprocedural!(interp, state.rettype, sv, arginfo, state.conditionals) + if call_result_unused(si) && !(state.rettype === Bottom) add_remark!(interp, sv, "Call result type was widened because the return value is unused") # We're mainly only here because the optimizer might want this code, # but we ourselves locally don't typically care about it locally @@ -211,7 +278,7 @@ function abstract_call_gf_by_type(interp::AbstractInterpreter, @nospecialize(f), # So avoid adding an edge, since we don't want to bother attempting # to improve our result even if it does change (to always throw), # and avoid keeping track of a more complex result type. - rettype = Any + state.rettype = Any end # if from_interprocedural added any pclimitations to the set inherited from the arguments, # some of those may be part of our cycles, so those can be deleted now @@ -230,23 +297,24 @@ function abstract_call_gf_by_type(interp::AbstractInterpreter, @nospecialize(f), end else # there is unanalyzed candidate, widen type and effects to the top - rettype = exctype = Any - all_effects = Effects() - const_results = nothing + state.rettype = state.exctype = Any + state.all_effects = Effects() + state.const_results = nothing end # Also considering inferring the compilation signature for this method, so # it is available to the compiler in case it ends up needing it for the invoke. - if isa(sv, InferenceState) && infer_compilation_signature(interp) && (!is_removable_if_unused(all_effects) || !call_result_unused(si)) - i = 1 + if (isa(sv, InferenceState) && infer_compilation_signature(interp) && + (!is_removable_if_unused(state.all_effects) || !call_result_unused(si))) + inferidx = SafeBox{Int}(1) function infercalls2(interp, sv) local napplicable = length(applicable) local multiple_matches = napplicable > 1 - while i <= napplicable - (; match, edges, edge_idx) = applicable[i] - i += 1 - method = match.method - sig = match.spec_types + while inferidx[] <= napplicable + (; match, edges, edge_idx) = applicable[inferidx[]] + inferidx[] += 1 + local method = match.method + local sig = match.spec_types mi = specialize_method(match; preexisting=true) if mi === nothing || !const_prop_methodinstance_heuristic(interp, mi, arginfo, sv) csig = get_compileable_sig(method, sig, match.sparams) @@ -265,7 +333,7 @@ function abstract_call_gf_by_type(interp::AbstractInterpreter, @nospecialize(f), infercalls2(interp, sv) || push!(sv.tasks, infercalls2) end - gfresult[] = CallMeta(rettype, exctype, all_effects, info, slotrefinements) + gfresult[] = CallMeta(state.rettype, state.exctype, state.all_effects, retinfo, state.slotrefinements) return true end # function infercalls # start making progress on the first call @@ -273,47 +341,6 @@ function abstract_call_gf_by_type(interp::AbstractInterpreter, @nospecialize(f), return gfresult end -struct FailedMethodMatch - reason::String -end - -struct MethodMatchTarget - match::MethodMatch - edges::Vector{Union{Nothing,CodeInstance}} - edge_idx::Int -end - -struct MethodMatches - applicable::Vector{MethodMatchTarget} - info::MethodMatchInfo - valid_worlds::WorldRange -end -any_ambig(result::MethodLookupResult) = result.ambig -any_ambig(info::MethodMatchInfo) = any_ambig(info.results) -any_ambig(m::MethodMatches) = any_ambig(m.info) -fully_covering(info::MethodMatchInfo) = info.fullmatch -fully_covering(m::MethodMatches) = fully_covering(m.info) - -struct UnionSplitMethodMatches - applicable::Vector{MethodMatchTarget} - applicable_argtypes::Vector{Vector{Any}} - info::UnionSplitInfo - valid_worlds::WorldRange -end -any_ambig(info::UnionSplitInfo) = any(any_ambig, info.split) -any_ambig(m::UnionSplitMethodMatches) = any_ambig(m.info) -fully_covering(info::UnionSplitInfo) = all(fully_covering, info.split) -fully_covering(m::UnionSplitMethodMatches) = fully_covering(m.info) - -nmatches(info::MethodMatchInfo) = length(info.results) -function nmatches(info::UnionSplitInfo) - n = 0 - for mminfo in info.split - n += nmatches(mminfo) - end - return n -end - function find_method_matches(interp::AbstractInterpreter, argtypes::Vector{Any}, @nospecialize(atype); max_union_splitting::Int = InferenceParams(interp).max_union_splitting, max_methods::Int = InferenceParams(interp).max_methods) From 0bd8292ec35c2a10dab9f8e3b4f8ca1895c829dc Mon Sep 17 00:00:00 2001 From: Tim Holy Date: Tue, 19 Nov 2024 10:09:39 -0600 Subject: [PATCH 467/537] Add missing `convert` methods for `Cholesky` (#56562) Co-authored-by: Daniel Karrasch --- stdlib/LinearAlgebra/src/cholesky.jl | 7 ++++++- stdlib/LinearAlgebra/test/cholesky.jl | 18 ++++++++++++++++++ 2 files changed, 24 insertions(+), 1 deletion(-) diff --git a/stdlib/LinearAlgebra/src/cholesky.jl b/stdlib/LinearAlgebra/src/cholesky.jl index 545d92ec1704d..03f7c273ccbef 100644 --- a/stdlib/LinearAlgebra/src/cholesky.jl +++ b/stdlib/LinearAlgebra/src/cholesky.jl @@ -631,11 +631,16 @@ function Cholesky{T}(C::Cholesky) where T Cnew = convert(AbstractMatrix{T}, C.factors) Cholesky{T, typeof(Cnew)}(Cnew, C.uplo, C.info) end +Cholesky{T,S}(C::Cholesky) where {T,S<:AbstractMatrix} = Cholesky{T,S}(C.factors, C.uplo, C.info) Factorization{T}(C::Cholesky{T}) where {T} = C Factorization{T}(C::Cholesky) where {T} = Cholesky{T}(C) CholeskyPivoted{T}(C::CholeskyPivoted{T}) where {T} = C CholeskyPivoted{T}(C::CholeskyPivoted) where {T} = - CholeskyPivoted(AbstractMatrix{T}(C.factors),C.uplo,C.piv,C.rank,C.tol,C.info) + CholeskyPivoted(AbstractMatrix{T}(C.factors), C.uplo, C.piv, C.rank, C.tol, C.info) +CholeskyPivoted{T,S}(C::CholeskyPivoted) where {T,S<:AbstractMatrix} = + CholeskyPivoted{T,S,typeof(C.piv)}(C.factors, C.uplo, C.piv, C.rank, C.tol, C.info) +CholeskyPivoted{T,S,P}(C::CholeskyPivoted) where {T,S<:AbstractMatrix,P<:AbstractVector{<:Integer}} = + CholeskyPivoted{T,S,P}(C.factors, C.uplo, C.piv, C.rank, C.tol, C.info) Factorization{T}(C::CholeskyPivoted{T}) where {T} = C Factorization{T}(C::CholeskyPivoted) where {T} = CholeskyPivoted{T}(C) diff --git a/stdlib/LinearAlgebra/test/cholesky.jl b/stdlib/LinearAlgebra/test/cholesky.jl index 00bfc18a21638..6ba72432048a9 100644 --- a/stdlib/LinearAlgebra/test/cholesky.jl +++ b/stdlib/LinearAlgebra/test/cholesky.jl @@ -281,6 +281,24 @@ end end end end + + @testset "eltype/matrixtype conversions" begin + apd = Matrix(Hermitian(areal'*areal)) + capd = cholesky(apd) + @test convert(Cholesky{Float64}, capd) === capd + @test convert(Cholesky{Float64,Matrix{Float64}}, capd) === convert(typeof(capd), capd) === capd + @test eltype(convert(Cholesky{Float32}, capd)) === Float32 + @test eltype(convert(Cholesky{Float32,Matrix{Float32}}, capd)) === Float32 + + capd = cholesky(apd, RowMaximum()) + @test convert(CholeskyPivoted{Float64}, capd) === capd + @test convert(CholeskyPivoted{Float64,Matrix{Float64}}, capd) === capd + @test convert(CholeskyPivoted{Float64,Matrix{Float64},Vector{Int}}, capd) === convert(typeof(capd), capd) === capd + @test eltype(convert(CholeskyPivoted{Float32}, capd)) === Float32 + @test eltype(convert(CholeskyPivoted{Float32,Matrix{Float32}}, capd)) === Float32 + @test eltype(convert(CholeskyPivoted{Float32,Matrix{Float32},Vector{Int}}, capd)) === Float32 + @test eltype(convert(CholeskyPivoted{Float32,Matrix{Float32},Vector{Int16}}, capd).piv) === Int16 + end end @testset "behavior for non-positive definite matrices" for T in (Float64, ComplexF64, BigFloat) From 83ef55d80fa77de671ddfcca559a78a24913b006 Mon Sep 17 00:00:00 2001 From: Cody Tapscott <84105208+topolarity@users.noreply.github.com> Date: Tue, 19 Nov 2024 18:20:31 -0500 Subject: [PATCH 468/537] codegen: manually restore `ct->scope` when no exception handler is emitted (#56612) This fixes a bug introduced by #55907, which was neglecting that it's possible for `EnterNode` to have no `catch` destination and still have a scope. This can especially happen if the compiler has decided that the body is `nothrow` and chooses to optimize away the `catch` destination, but also #55907 intended to make the scope-only form of `:enter` legal (and not need an exception handler) even if the body is _not_ `nothrow`. This fixes all that up to restore the scope correctly on the happy path. ~~Needs tests - will add those soon~~ --- src/codegen.cpp | 22 +++++++++++++++++----- test/scopedvalues.jl | 7 +++++++ 2 files changed, 24 insertions(+), 5 deletions(-) diff --git a/src/codegen.cpp b/src/codegen.cpp index 761c3e7eb8758..968dab0f00430 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -1955,7 +1955,7 @@ class jl_codectx_t { // local var info. globals are not in here. SmallVector slots; std::map phic_slots; - std::map scope_tokens; + std::map > scope_restore; SmallVector SAvalues; SmallVector, jl_value_t *>, 0> PhiNodes; SmallVector ssavalue_assigned; @@ -6573,6 +6573,7 @@ static void emit_stmtpos(jl_codectx_t &ctx, jl_value_t *expr, int ssaval_result) } else if (head == jl_leave_sym) { int hand_n_leave = 0; + Value *scope_to_restore = nullptr, *token = nullptr; for (size_t i = 0; i < jl_expr_nargs(ex); ++i) { jl_value_t *arg = args[i]; if (arg == jl_nothing) @@ -6582,7 +6583,7 @@ static void emit_stmtpos(jl_codectx_t &ctx, jl_value_t *expr, int ssaval_result) jl_value_t *enter_stmt = jl_array_ptr_ref(ctx.code, enter_idx); if (enter_stmt == jl_nothing) continue; - if (ctx.scope_tokens.count(enter_idx)) { + if (ctx.scope_restore.count(enter_idx)) { // TODO: The semantics of `gc_preserve` are not perfect here. An `Expr(:enter, ...)` block may // have multiple exits, but effects of `preserve_end` are only extended to the end of the // dominance of each `Expr(:leave, ...)`. @@ -6594,15 +6595,22 @@ static void emit_stmtpos(jl_codectx_t &ctx, jl_value_t *expr, int ssaval_result) // // This is correct as-is anyway - it just means the scope lives longer than it needs to // if the `Expr(:enter, ...)` has multiple exits. - ctx.builder.CreateCall(prepare_call(gc_preserve_end_func), {ctx.scope_tokens[enter_idx]}); + std::tie(token, scope_to_restore) = ctx.scope_restore[enter_idx]; + ctx.builder.CreateCall(prepare_call(gc_preserve_end_func), {token}); } if (jl_enternode_catch_dest(enter_stmt)) { // We're not actually setting up the exception frames for these, so // we don't need to exit them. hand_n_leave += 1; + scope_to_restore = nullptr; // restored by exception handler } } ctx.builder.CreateCall(prepare_call(jlleave_noexcept_func), {get_current_task(ctx), ConstantInt::get(getInt32Ty(ctx.builder.getContext()), hand_n_leave)}); + if (scope_to_restore) { + Value *scope_ptr = get_scope_field(ctx); + jl_aliasinfo_t::fromTBAA(ctx, ctx.tbaa().tbaa_gcframe).decorateInst( + ctx.builder.CreateAlignedStore(scope_to_restore, scope_ptr, ctx.types().alignof_ptr)); + } } else if (head == jl_pop_exception_sym) { jl_cgval_t excstack_state = emit_expr(ctx, jl_exprarg(expr, 0)); @@ -9652,12 +9660,16 @@ static jl_llvm_functions_t continue; } Value *scope_boxed = boxed(ctx, scope); - StoreInst *scope_store = ctx.builder.CreateAlignedStore(scope_boxed, get_scope_field(ctx), ctx.types().alignof_ptr); + Value *scope_ptr = get_scope_field(ctx); + LoadInst *current_scope = ctx.builder.CreateAlignedLoad(ctx.types().T_prjlvalue, scope_ptr, ctx.types().alignof_ptr); + StoreInst *scope_store = ctx.builder.CreateAlignedStore(scope_boxed, scope_ptr, ctx.types().alignof_ptr); + jl_aliasinfo_t::fromTBAA(ctx, ctx.tbaa().tbaa_gcframe).decorateInst(current_scope); jl_aliasinfo_t::fromTBAA(ctx, ctx.tbaa().tbaa_gcframe).decorateInst(scope_store); // GC preserve the scope, since it is not rooted in the `jl_handler_t *` // and may be removed from jl_current_task by any nested block and then // replaced later - ctx.scope_tokens[cursor] = ctx.builder.CreateCall(prepare_call(gc_preserve_begin_func), {scope_boxed}); + Value *scope_token = ctx.builder.CreateCall(prepare_call(gc_preserve_begin_func), {scope_boxed}); + ctx.scope_restore[cursor] = std::make_pair(scope_token, current_scope); } } else { diff --git a/test/scopedvalues.jl b/test/scopedvalues.jl index 2c38a0642ce24..174bc690ac0a2 100644 --- a/test/scopedvalues.jl +++ b/test/scopedvalues.jl @@ -175,3 +175,10 @@ const inlineable_const_sv = ScopedValue(1) @test fully_eliminated(; retval=(inlineable_const_sv => 1)) do inlineable_const_sv => 1 end + +# Handle nothrow scope bodies correctly (#56609) +@eval function nothrow_scope() + $(Expr(:tryfinally, :(), nothing, 1)) + @test Core.current_scope() === nothing +end +nothrow_scope() From b6606493caa4ca029690bfc3d73a520c6151aa91 Mon Sep 17 00:00:00 2001 From: DilumAluthgeBot <43731525+DilumAluthgeBot@users.noreply.github.com> Date: Wed, 20 Nov 2024 04:05:12 -0500 Subject: [PATCH 469/537] =?UTF-8?q?=F0=9F=A4=96=20[master]=20Bump=20the=20?= =?UTF-8?q?SparseArrays=20stdlib=20from=2014333ea=20to=201b4933c=20(#56614?= =?UTF-8?q?)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Stdlib: SparseArrays URL: https://github.com/JuliaSparse/SparseArrays.jl.git Stdlib branch: main Julia branch: master Old commit: 14333ea New commit: 1b4933c Julia version: 1.12.0-DEV SparseArrays version: 1.12.0 Bump invoked by: @Keno Powered by: [BumpStdlibs.jl](https://github.com/JuliaLang/BumpStdlibs.jl) Diff: https://github.com/JuliaSparse/SparseArrays.jl/compare/14333eae647464121150ae77d9f2dbe673aa244b...1b4933ccc7b1f97427ff88bd7ba58950021f2c60 ``` $ git log --oneline 14333ea..1b4933c 1b4933c Make `allowscalar` a macro with auto-world-age-increment (#583) ``` Co-authored-by: Dilum Aluthge --- .../md5 | 1 - .../sha512 | 1 - .../md5 | 1 + .../sha512 | 1 + stdlib/SparseArrays.version | 2 +- 5 files changed, 3 insertions(+), 3 deletions(-) delete mode 100644 deps/checksums/SparseArrays-14333eae647464121150ae77d9f2dbe673aa244b.tar.gz/md5 delete mode 100644 deps/checksums/SparseArrays-14333eae647464121150ae77d9f2dbe673aa244b.tar.gz/sha512 create mode 100644 deps/checksums/SparseArrays-1b4933ccc7b1f97427ff88bd7ba58950021f2c60.tar.gz/md5 create mode 100644 deps/checksums/SparseArrays-1b4933ccc7b1f97427ff88bd7ba58950021f2c60.tar.gz/sha512 diff --git a/deps/checksums/SparseArrays-14333eae647464121150ae77d9f2dbe673aa244b.tar.gz/md5 b/deps/checksums/SparseArrays-14333eae647464121150ae77d9f2dbe673aa244b.tar.gz/md5 deleted file mode 100644 index 70a9d57cb6e13..0000000000000 --- a/deps/checksums/SparseArrays-14333eae647464121150ae77d9f2dbe673aa244b.tar.gz/md5 +++ /dev/null @@ -1 +0,0 @@ -28f61ce3c94e2b5a795f077779ba80d3 diff --git a/deps/checksums/SparseArrays-14333eae647464121150ae77d9f2dbe673aa244b.tar.gz/sha512 b/deps/checksums/SparseArrays-14333eae647464121150ae77d9f2dbe673aa244b.tar.gz/sha512 deleted file mode 100644 index f432dbedd64e6..0000000000000 --- a/deps/checksums/SparseArrays-14333eae647464121150ae77d9f2dbe673aa244b.tar.gz/sha512 +++ /dev/null @@ -1 +0,0 @@ -27d8de35f1e821bd6512ad46d8804719b2f1822d80e3b9ee19aae21efc0bd562d3814cf41b08dfd71df0fd7daabb11959a6d25045cde09c7385aaf52e0befdfe diff --git a/deps/checksums/SparseArrays-1b4933ccc7b1f97427ff88bd7ba58950021f2c60.tar.gz/md5 b/deps/checksums/SparseArrays-1b4933ccc7b1f97427ff88bd7ba58950021f2c60.tar.gz/md5 new file mode 100644 index 0000000000000..41d78a15f2ddb --- /dev/null +++ b/deps/checksums/SparseArrays-1b4933ccc7b1f97427ff88bd7ba58950021f2c60.tar.gz/md5 @@ -0,0 +1 @@ +a643f01ee101a274d86d6469dd6a9d48 diff --git a/deps/checksums/SparseArrays-1b4933ccc7b1f97427ff88bd7ba58950021f2c60.tar.gz/sha512 b/deps/checksums/SparseArrays-1b4933ccc7b1f97427ff88bd7ba58950021f2c60.tar.gz/sha512 new file mode 100644 index 0000000000000..1868f9a865af5 --- /dev/null +++ b/deps/checksums/SparseArrays-1b4933ccc7b1f97427ff88bd7ba58950021f2c60.tar.gz/sha512 @@ -0,0 +1 @@ +09a86606b28b17f1066d608374f4f8b2fcdcd17d08a8fa37b08edea7b27a9e6becadc8e8e93b1dcc1477dc247255d6a8ded4f8e678f46d80c9fd0ad72a7f3973 diff --git a/stdlib/SparseArrays.version b/stdlib/SparseArrays.version index 9a738d89215b5..af6fac41ddf84 100644 --- a/stdlib/SparseArrays.version +++ b/stdlib/SparseArrays.version @@ -1,4 +1,4 @@ SPARSEARRAYS_BRANCH = main -SPARSEARRAYS_SHA1 = 14333eae647464121150ae77d9f2dbe673aa244b +SPARSEARRAYS_SHA1 = 1b4933ccc7b1f97427ff88bd7ba58950021f2c60 SPARSEARRAYS_GIT_URL := https://github.com/JuliaSparse/SparseArrays.jl.git SPARSEARRAYS_TAR_URL = https://api.github.com/repos/JuliaSparse/SparseArrays.jl/tarball/$1 From 4ed88145fb5f69a5c01a7f72c41fa8a89de1147a Mon Sep 17 00:00:00 2001 From: Kristoffer Carlsson Date: Wed, 20 Nov 2024 11:01:35 +0100 Subject: [PATCH 470/537] gracefully fall back to non pid locked precompilation if FileWatching is not loaded (#56570) Fixes #56569 --- base/loading.jl | 6 +++--- base/precompilation.jl | 29 ++++++++++++++++------------- 2 files changed, 19 insertions(+), 16 deletions(-) diff --git a/base/loading.jl b/base/loading.jl index 91be310cd5d17..ae54ba19038e9 100644 --- a/base/loading.jl +++ b/base/loading.jl @@ -3839,9 +3839,9 @@ const compilecache_pidlock_stale_age = 10 function maybe_cachefile_lock(f, pkg::PkgId, srcpath::String; stale_age=compilecache_pidlock_stale_age) if @isdefined(mkpidlock_hook) && @isdefined(trymkpidlock_hook) && @isdefined(parse_pidfile_hook) pidfile = compilecache_pidfile_path(pkg) - cachefile = invokelatest(trymkpidlock_hook, f, pidfile; stale_age) + cachefile = @invokelatest trymkpidlock_hook(f, pidfile; stale_age) if cachefile === false - pid, hostname, age = invokelatest(parse_pidfile_hook, pidfile) + pid, hostname, age = @invokelatest parse_pidfile_hook(pidfile) verbosity = isinteractive() ? CoreLogging.Info : CoreLogging.Debug if isempty(hostname) || hostname == gethostname() @logmsg verbosity "Waiting for another process (pid: $pid) to finish precompiling $(repr("text/plain", pkg)). Pidfile: $pidfile" @@ -3850,7 +3850,7 @@ function maybe_cachefile_lock(f, pkg::PkgId, srcpath::String; stale_age=compilec end # wait until the lock is available, but don't actually acquire it # returning nothing indicates a process waited for another - return invokelatest(mkpidlock_hook, Returns(nothing), pidfile; stale_age) + return @invokelatest mkpidlock_hook(Returns(nothing), pidfile; stale_age) end return cachefile else diff --git a/base/precompilation.jl b/base/precompilation.jl index edd8824ff8d68..2fe560be9a805 100644 --- a/base/precompilation.jl +++ b/base/precompilation.jl @@ -2,7 +2,7 @@ module Precompilation using Base: PkgId, UUID, SHA1, parsed_toml, project_file_name_uuid, project_names, project_file_manifest_path, get_deps, preferences_names, isaccessibledir, isfile_casesensitive, - base_project + base_project, isdefined # This is currently only used for pkgprecompile but the plan is to use this in code loading in the future # see the `kc/codeloading2.0` branch @@ -1031,14 +1031,16 @@ end # Can be merged with `maybe_cachefile_lock` in loading? function precompile_pkgs_maybe_cachefile_lock(f, io::IO, print_lock::ReentrantLock, fancyprint::Bool, pkg_config, pkgspidlocked, hascolor) + if !(isdefined(Base, :mkpidlock_hook) && isdefined(Base, :trymkpidlock_hook) && Base.isdefined(Base, :parse_pidfile_hook)) + return f() + end pkg, config = pkg_config flags, cacheflags = config - FileWatching = Base.loaded_modules[Base.PkgId(Base.UUID("7b1f6079-737a-58dc-b8bc-7a2ca5c1b5ee"), "FileWatching")] stale_age = Base.compilecache_pidlock_stale_age pidfile = Base.compilecache_pidfile_path(pkg, flags=cacheflags) - cachefile = FileWatching.trymkpidlock(f, pidfile; stale_age) + cachefile = @invokelatest Base.trymkpidlock_hook(f, pidfile; stale_age) if cachefile === false - pid, hostname, age = FileWatching.Pidfile.parse_pidfile(pidfile) + pid, hostname, age = @invokelatest Base.parse_pidfile_hook(pidfile) pkgspidlocked[pkg_config] = if isempty(hostname) || hostname == gethostname() if pid == getpid() "an async task in this process (pidfile: $pidfile)" @@ -1052,15 +1054,16 @@ function precompile_pkgs_maybe_cachefile_lock(f, io::IO, print_lock::ReentrantLo println(io, " ", pkg.name, _color_string(" Being precompiled by $(pkgspidlocked[pkg_config])", Base.info_color(), hascolor)) end # wait until the lock is available - FileWatching.mkpidlock(pidfile; stale_age) do - # double-check in case the other process crashed or the lock expired - if Base.isprecompiled(pkg; ignore_loaded=true, flags=cacheflags) # don't use caches for this as the env state will have changed - return nothing # returning nothing indicates a process waited for another - else - delete!(pkgspidlocked, pkg_config) - return f() # precompile - end - end + @invokelatest Base.mkpidlock_hook(() -> begin + # double-check in case the other process crashed or the lock expired + if Base.isprecompiled(pkg; ignore_loaded=true, flags=cacheflags) # don't use caches for this as the env state will have changed + return nothing # returning nothing indicates a process waited for another + else + delete!(pkgspidlocked, pkg_config) + return f() # precompile + end + end, + pidfile; stale_age) end return cachefile end From bdd4e0568756142be22162c8a7920047499f1c62 Mon Sep 17 00:00:00 2001 From: Kristoffer Carlsson Date: Wed, 20 Nov 2024 16:21:57 +0100 Subject: [PATCH 471/537] make `choosetests` find the test files for devved stdlibs (#56558) I want to move out LinearAlgebra into its own repository but I still want to be able to run its tests in parallel. The easiest would be to be able to use `Base.runtests` but right now it hard codes the path to the stdlib folder. Instead, use the loading mechanism to look up where the stdlib of the active project actively resides. --- base/util.jl | 7 +++++-- test/choosetests.jl | 5 +++-- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/base/util.jl b/base/util.jl index 3ce64e50f7e29..c01ff697e64e3 100644 --- a/base/util.jl +++ b/base/util.jl @@ -678,7 +678,7 @@ end """ Base.runtests(tests=["all"]; ncores=ceil(Int, Sys.CPU_THREADS / 2), - exit_on_error=false, revise=false, [seed]) + exit_on_error=false, revise=false, propagate_project=true, [seed]) Run the Julia unit tests listed in `tests`, which can be either a string or an array of strings, using `ncores` processors. If `exit_on_error` is `false`, when one test @@ -686,12 +686,14 @@ fails, all remaining tests in other files will still be run; they are otherwise when `exit_on_error == true`. If `revise` is `true`, the `Revise` package is used to load any modifications to `Base` or to the standard libraries before running the tests. +If `propagate_project` is true the current project is propagated to the test environment. If a seed is provided via the keyword argument, it is used to seed the global RNG in the context where the tests are run; otherwise the seed is chosen randomly. """ function runtests(tests = ["all"]; ncores::Int = ceil(Int, Sys.CPU_THREADS / 2), exit_on_error::Bool=false, revise::Bool=false, + propagate_project::Bool=false, seed::Union{BitInteger,Nothing}=nothing) if isa(tests,AbstractString) tests = split(tests) @@ -706,8 +708,9 @@ function runtests(tests = ["all"]; ncores::Int = ceil(Int, Sys.CPU_THREADS / 2), ENV2["JULIA_LOAD_PATH"] = string("@", pathsep, "@stdlib") ENV2["JULIA_TESTS"] = "true" delete!(ENV2, "JULIA_PROJECT") + project_flag = propagate_project ? `--project` : `` try - run(setenv(`$(julia_cmd()) $(joinpath(Sys.BINDIR, + run(setenv(`$(julia_cmd()) $project_flag $(joinpath(Sys.BINDIR, Base.DATAROOTDIR, "julia", "test", "runtests.jl")) $tests`, ENV2)) nothing catch diff --git a/test/choosetests.jl b/test/choosetests.jl index ec757f42b42c1..ed441131f061f 100644 --- a/test/choosetests.jl +++ b/test/choosetests.jl @@ -48,10 +48,11 @@ const NETWORK_REQUIRED_LIST = vcat(INTERNET_REQUIRED_LIST, ["Sockets"]) function test_path(test) t = split(test, '/') if t[1] in STDLIBS + pkgdir = abspath(Base.find_package(String(t[1])), "..", "..") if length(t) == 2 - return joinpath(STDLIB_DIR, t[1], "test", t[2]) + return joinpath(pkgdir, "test", t[2]) else - return joinpath(STDLIB_DIR, t[1], "test", "runtests") + return joinpath(pkgdir, "test", "runtests") end elseif t[1] == "Compiler" testpath = length(t) >= 2 ? t[2:end] : ("runtests",) From 0592b5452a83bf02ed8b68040293da6d3e5c40d2 Mon Sep 17 00:00:00 2001 From: Kristoffer Carlsson Date: Wed, 20 Nov 2024 17:47:06 +0100 Subject: [PATCH 472/537] use the correct path to include Compiler.jl in release builds (#56601) https://github.com/JuliaLang/julia/pull/56409 broke PackageCompiler (or other use cases where you want to compile a new core compiler from a release build) since it hardcoded the relative path `../usr/` from Base to the `shared` directory but this is not true in releases where it is at `..`. --- Compiler/src/Compiler.jl | 4 ++-- base/Base_compiler.jl | 19 ++++++++++++------- sysimage.mk | 5 +++-- 3 files changed, 17 insertions(+), 11 deletions(-) diff --git a/Compiler/src/Compiler.jl b/Compiler/src/Compiler.jl index 4104b71093f4d..b648fd3f295eb 100644 --- a/Compiler/src/Compiler.jl +++ b/Compiler/src/Compiler.jl @@ -87,14 +87,14 @@ eval(m, x) = Core.eval(m, x) function include(x::String) if !isdefined(Base, :end_base_include) # During bootstrap, all includes are relative to `base/` - x = Base.strcat(Base.strcat(Base.BUILDROOT, "../usr/share/julia/Compiler/src/"), x) + x = Base.strcat(Base.strcat(Base.DATAROOT, "julia/Compiler/src/"), x) end Base.include(Compiler, x) end function include(mod::Module, x::String) if !isdefined(Base, :end_base_include) - x = Base.strcat(Base.strcat(Base.BUILDROOT, "../usr/share/julia/Compiler/src/"), x) + x = Base.strcat(Base.strcat(Base.DATAROOT, "julia/Compiler/src/"), x) end Base.include(mod, x) end diff --git a/base/Base_compiler.jl b/base/Base_compiler.jl index b2633c25eef3f..14edf3e93aad6 100644 --- a/base/Base_compiler.jl +++ b/base/Base_compiler.jl @@ -266,21 +266,25 @@ function strcat(x::String, y::String) return out end -global BUILDROOT::String = "" +BUILDROOT::String = "" +DATAROOT::String = "" baremodule BuildSettings end function process_sysimg_args!() - let i = 1 - global BUILDROOT + let i = 2 # skip file name while i <= length(Core.ARGS) + Core.println(Core.ARGS[i]) if Core.ARGS[i] == "--buildsettings" include(BuildSettings, ARGS[i+1]) - i += 1 + elseif Core.ARGS[i] == "--buildroot" + global BUILDROOT = Core.ARGS[i+1] + elseif Core.ARGS[i] == "--dataroot" + global DATAROOT = Core.ARGS[i+1] else - BUILDROOT = Core.ARGS[i] + error(strcat("invalid sysimage argument: ", Core.ARGS[i])) end - i += 1 + i += 2 end end end @@ -288,7 +292,8 @@ process_sysimg_args!() function isready end -include(strcat(BUILDROOT, "../usr/share/julia/Compiler/src/Compiler.jl")) +include(strcat(DATAROOT, "julia/Compiler/src/Compiler.jl")) + const _return_type = Compiler.return_type diff --git a/sysimage.mk b/sysimage.mk index ceed9657dc807..5371fbd975025 100644 --- a/sysimage.mk +++ b/sysimage.mk @@ -61,18 +61,19 @@ BASE_SRCS := $(sort $(shell find $(JULIAHOME)/base -name \*.jl -and -not -name s $(shell find $(BUILDROOT)/base -name \*.jl -and -not -name sysimg.jl)) STDLIB_SRCS := $(JULIAHOME)/base/sysimg.jl $(SYSIMG_STDLIBS_SRCS) RELBUILDROOT := $(call rel_path,$(JULIAHOME)/base,$(BUILDROOT)/base)/ # <-- make sure this always has a trailing slash +RELDATADIR := $(call rel_path,$(JULIAHOME)/base,$(build_datarootdir))/ # <-- make sure this always has a trailing slash $(build_private_libdir)/basecompiler.ji: $(COMPILER_SRCS) @$(call PRINT_JULIA, cd $(JULIAHOME)/base && \ $(call spawn,$(JULIA_EXECUTABLE)) -C "$(JULIA_CPU_TARGET)" $(HEAPLIM) --output-ji $(call cygpath_w,$@).tmp \ - --startup-file=no --warn-overwrite=yes -g$(BOOTSTRAP_DEBUG_LEVEL) -O1 Base_compiler.jl $(RELBUILDROOT)) + --startup-file=no --warn-overwrite=yes -g$(BOOTSTRAP_DEBUG_LEVEL) -O1 Base_compiler.jl --buildroot $(RELBUILDROOT) --dataroot $(RELDATADIR)) @mv $@.tmp $@ $(build_private_libdir)/sys.ji: $(build_private_libdir)/basecompiler.ji $(JULIAHOME)/VERSION $(BASE_SRCS) $(STDLIB_SRCS) @$(call PRINT_JULIA, cd $(JULIAHOME)/base && \ if ! JULIA_BINDIR=$(call cygpath_w,$(build_bindir)) WINEPATH="$(call cygpath_w,$(build_bindir));$$WINEPATH" \ $(call spawn, $(JULIA_EXECUTABLE)) -g1 -O1 -C "$(JULIA_CPU_TARGET)" $(HEAPLIM) --output-ji $(call cygpath_w,$@).tmp $(JULIA_SYSIMG_BUILD_FLAGS) \ - --startup-file=no --warn-overwrite=yes --sysimage $(call cygpath_w,$<) sysimg.jl $(RELBUILDROOT); then \ + --startup-file=no --warn-overwrite=yes --sysimage $(call cygpath_w,$<) sysimg.jl --buildroot $(RELBUILDROOT) --dataroot $(RELDATADIR); then \ echo '*** This error might be fixed by running `make clean`. If the error persists$(COMMA) try `make cleanall`. ***'; \ false; \ fi ) From d9d1fc5be8f40ae9b1276a556b62745de71a8ee0 Mon Sep 17 00:00:00 2001 From: Jameson Nash Date: Wed, 20 Nov 2024 15:17:40 -0500 Subject: [PATCH 473/537] fix some new-edges issues (#56598) - incorrect edge types were being added from inlining: there is minimal dispatch info available, so best not to add that (which was already added earlier) as it results in failures to validate later - MethodTable/sig order in edges could confuse the iterator: always put the type before the edge now as that is more consistent - edges wasn't converted to a SimpleVector, so they might get ignored later from being in the wrong format - edges were not populated for optimize=false, which made debugging them more inconvenient Fixes #56577 --- Compiler/src/optimize.jl | 2 +- Compiler/src/ssair/inlining.jl | 41 ++++++----------- Compiler/src/stmtinfo.jl | 77 ++++++++++++++++++++++++++++---- Compiler/src/typeinfer.jl | 57 +++++++++++++++++++----- Compiler/src/utilities.jl | 81 ---------------------------------- src/staticdata_utils.c | 28 ++++++------ 6 files changed, 143 insertions(+), 143 deletions(-) diff --git a/Compiler/src/optimize.jl b/Compiler/src/optimize.jl index 6de8973778c94..d2dfd26bfa00d 100644 --- a/Compiler/src/optimize.jl +++ b/Compiler/src/optimize.jl @@ -224,7 +224,7 @@ include("ssair/irinterp.jl") function ir_to_codeinf!(opt::OptimizationState) (; linfo, src) = opt src = ir_to_codeinf!(src, opt.ir::IRCode) - src.edges = opt.inlining.edges + src.edges = Core.svec(opt.inlining.edges...) opt.ir = nothing maybe_validate_code(linfo, src, "optimized") return src diff --git a/Compiler/src/ssair/inlining.jl b/Compiler/src/ssair/inlining.jl index 98be475520f01..02b58b518a72a 100644 --- a/Compiler/src/ssair/inlining.jl +++ b/Compiler/src/ssair/inlining.jl @@ -64,20 +64,11 @@ end struct InliningEdgeTracker edges::Vector{Any} - invokesig::Union{Nothing,Vector{Any}} - InliningEdgeTracker(state::InliningState, invokesig::Union{Nothing,Vector{Any}}=nothing) = - new(state.edges, invokesig) + InliningEdgeTracker(state::InliningState) = new(state.edges) end -function add_inlining_edge!(et::InliningEdgeTracker, edge::Union{CodeInstance,MethodInstance}) - (; edges, invokesig) = et - if invokesig === nothing - add_one_edge!(edges, edge) - else # invoke backedge - add_invoke_edge!(edges, invoke_signature(invokesig), edge) - end - return nothing -end +add_inlining_edge!(et::InliningEdgeTracker, edge::CodeInstance) = add_inlining_edge!(et.edges, edge) +add_inlining_edge!(et::InliningEdgeTracker, edge::MethodInstance) = add_inlining_edge!(et.edges, edge) function ssa_inlining_pass!(ir::IRCode, state::InliningState, propagate_inbounds::Bool) # Go through the function, performing simple inlining (e.g. replacing call by constants @@ -795,10 +786,7 @@ function compileable_specialization(mi::MethodInstance, effects::Effects, return nothing end end - add_inlining_edge!(et, mi) # to the dispatch lookup - if mi_invoke !== mi - add_invoke_edge!(et.edges, method.sig, mi_invoke) # add_inlining_edge to the invoke call, if that is different - end + add_inlining_edge!(et, mi_invoke) # to the dispatch lookup return InvokeCase(mi_invoke, effects, info) end @@ -834,9 +822,8 @@ end # the general resolver for usual and const-prop'ed calls function resolve_todo(mi::MethodInstance, result::Union{Nothing,InferenceResult,VolatileInferenceResult}, - @nospecialize(info::CallInfo), flag::UInt32, state::InliningState; - invokesig::Union{Nothing,Vector{Any}}=nothing) - et = InliningEdgeTracker(state, invokesig) + @nospecialize(info::CallInfo), flag::UInt32, state::InliningState) + et = InliningEdgeTracker(state) preserve_local_sources = true if isa(result, InferenceResult) @@ -922,7 +909,7 @@ end function analyze_method!(match::MethodMatch, argtypes::Vector{Any}, @nospecialize(info::CallInfo), flag::UInt32, state::InliningState; - allow_typevars::Bool, invokesig::Union{Nothing,Vector{Any}}=nothing, + allow_typevars::Bool, volatile_inf_result::Union{Nothing,VolatileInferenceResult}=nothing) method = match.method @@ -953,7 +940,7 @@ function analyze_method!(match::MethodMatch, argtypes::Vector{Any}, # Get the specialization for this method signature # (later we will decide what to do with it) mi = specialize_method(match) - return resolve_todo(mi, volatile_inf_result, info, flag, state; invokesig) + return resolve_todo(mi, volatile_inf_result, info, flag, state) end function retrieve_ir_for_inlining(cached_result::CodeInstance, src::String) @@ -1164,9 +1151,8 @@ function handle_invoke_call!(todo::Vector{Pair{Int,Any}}, return nothing end result = info.result - invokesig = sig.argtypes if isa(result, ConcreteResult) - item = concrete_result_item(result, info, state; invokesig) + item = concrete_result_item(result, info, state) elseif isa(result, SemiConcreteResult) item = semiconcrete_result_item(result, info, flag, state) else @@ -1175,13 +1161,13 @@ function handle_invoke_call!(todo::Vector{Pair{Int,Any}}, mi = result.result.linfo validate_sparams(mi.sparam_vals) || return nothing if Union{} !== argtypes_to_type(argtypes) <: mi.def.sig - item = resolve_todo(mi, result.result, info, flag, state; invokesig) + item = resolve_todo(mi, result.result, info, flag, state) handle_single_case!(todo, ir, idx, stmt, item, true) return nothing end end volatile_inf_result = result isa VolatileInferenceResult ? result : nothing - item = analyze_method!(match, argtypes, info, flag, state; allow_typevars=false, invokesig, volatile_inf_result) + item = analyze_method!(match, argtypes, info, flag, state; allow_typevars=false, volatile_inf_result) end handle_single_case!(todo, ir, idx, stmt, item, true) return nothing @@ -1477,10 +1463,9 @@ end may_inline_concrete_result(result::ConcreteResult) = isdefined(result, :result) && is_inlineable_constant(result.result) -function concrete_result_item(result::ConcreteResult, @nospecialize(info::CallInfo), state::InliningState; - invokesig::Union{Nothing,Vector{Any}}=nothing) +function concrete_result_item(result::ConcreteResult, @nospecialize(info::CallInfo), state::InliningState) if !may_inline_concrete_result(result) - et = InliningEdgeTracker(state, invokesig) + et = InliningEdgeTracker(state) return compileable_specialization(result.edge.def, result.effects, et, info, state) end @assert result.effects === EFFECTS_TOTAL diff --git a/Compiler/src/stmtinfo.jl b/Compiler/src/stmtinfo.jl index 83d0b66e4d564..830bfa02d2d99 100644 --- a/Compiler/src/stmtinfo.jl +++ b/Compiler/src/stmtinfo.jl @@ -49,14 +49,15 @@ function _add_edges_impl(edges::Vector{Any}, info::MethodMatchInfo, mi_edge::Boo if !fully_covering(info) # add legacy-style missing backedge info also exists = false - for i in 1:length(edges) - if edges[i] === info.mt && edges[i+1] == info.atype + for i in 2:length(edges) + if edges[i] === info.mt && edges[i-1] == info.atype exists = true break end end if !exists - push!(edges, info.mt, info.atype) + push!(edges, info.atype) + push!(edges, info.mt) end end nmatches = length(info.results) @@ -98,22 +99,27 @@ function _add_edges_impl(edges::Vector{Any}, info::MethodMatchInfo, mi_edge::Boo nothing end function add_one_edge!(edges::Vector{Any}, edge::MethodInstance) - for i in 1:length(edges) + i = 1 + while i <= length(edges) edgeᵢ = edges[i] + edgeᵢ isa Int && (i += 2 + edgeᵢ; continue) edgeᵢ isa CodeInstance && (edgeᵢ = edgeᵢ.def) - edgeᵢ isa MethodInstance || continue + edgeᵢ isa MethodInstance || (i += 1; continue) if edgeᵢ === edge && !(i > 1 && edges[i-1] isa Type) return # found existing covered edge end + i += 1 end push!(edges, edge) nothing end function add_one_edge!(edges::Vector{Any}, edge::CodeInstance) - for i in 1:length(edges) + i = 1 + while i <= length(edges) edgeᵢ_orig = edgeᵢ = edges[i] + edgeᵢ isa Int && (i += 2 + edgeᵢ; continue) edgeᵢ isa CodeInstance && (edgeᵢ = edgeᵢ.def) - edgeᵢ isa MethodInstance || continue + edgeᵢ isa MethodInstance || (i += 1; continue) if edgeᵢ === edge.def && !(i > 1 && edges[i-1] isa Type) if edgeᵢ_orig isa MethodInstance # found edge we can upgrade @@ -123,6 +129,7 @@ function add_one_edge!(edges::Vector{Any}, edge::CodeInstance) return end end + i += 1 end push!(edges, edge) nothing @@ -296,7 +303,8 @@ function add_invoke_edge!(edges::Vector{Any}, @nospecialize(atype), edge::Union{ end end end - push!(edges, atype, edge) + push!(edges, atype) + push!(edges, edge) nothing end function add_invoke_edge!(edges::Vector{Any}, @nospecialize(atype), edge::CodeInstance) @@ -317,10 +325,61 @@ function add_invoke_edge!(edges::Vector{Any}, @nospecialize(atype), edge::CodeIn end end end - push!(edges, atype, edge) + push!(edges, atype) + push!(edges, edge) nothing end +function add_inlining_edge!(edges::Vector{Any}, edge::MethodInstance) + # check if we already have an edge to this code + i = 1 + while i <= length(edges) + edgeᵢ = edges[i] + if edgeᵢ isa Method && edgeᵢ === edge.def + # found edge we can upgrade + edges[i] = edge + return + end + edgeᵢ isa CodeInstance && (edgeᵢ = edgeᵢ.def) + if edgeᵢ isa MethodInstance && edgeᵢ === edge + return # found existing covered edge + end + i += 1 + end + # add_invoke_edge alone + push!(edges, (edge.def::Method).sig) + push!(edges, edge) + nothing +end +function add_inlining_edge!(edges::Vector{Any}, edge::CodeInstance) + # check if we already have an edge to this code + i = 1 + while i <= length(edges) + edgeᵢ = edges[i] + if edgeᵢ isa Method && edgeᵢ === edge.def.def + # found edge we can upgrade + edges[i] = edge + return + end + if edgeᵢ isa MethodInstance && edgeᵢ === edge.def + # found edge we can upgrade + edges[i] = edge + return + end + if edgeᵢ isa CodeInstance && edgeᵢ.def === edge.def + # found existing edge + # XXX compare `CodeInstance` identify? + return + end + i += 1 + end + # add_invoke_edge alone + push!(edges, (edge.def.def::Method).sig) + push!(edges, edge) + nothing +end + + """ info::OpaqueClosureCallInfo diff --git a/Compiler/src/typeinfer.jl b/Compiler/src/typeinfer.jl index 94c65684e672c..544c5d5739795 100644 --- a/Compiler/src/typeinfer.jl +++ b/Compiler/src/typeinfer.jl @@ -97,7 +97,12 @@ function finish!(interp::AbstractInterpreter, caller::InferenceState; result = caller.result opt = result.src if opt isa OptimizationState - result.src = ir_to_codeinf!(opt) + src = ir_to_codeinf!(opt) + edges = src.edges::SimpleVector + caller.src = result.src = src + else + edges = Core.svec(caller.edges...) + caller.src.edges = edges end #@assert last(result.valid_worlds) <= get_world_counter() || isempty(caller.edges) if isdefined(result, :ci) @@ -112,7 +117,7 @@ function finish!(interp::AbstractInterpreter, caller::InferenceState; if last(result.valid_worlds) == typemax(UInt) # if we can record all of the backedges in the global reverse-cache, # we can now widen our applicability in the global cache too - store_backedges(ci, caller.edges) + store_backedges(ci, edges) end inferred_result = nothing relocatability = 0x1 @@ -142,7 +147,7 @@ function finish!(interp::AbstractInterpreter, caller::InferenceState; end ccall(:jl_update_codeinst, Cvoid, (Any, Any, Int32, UInt, UInt, UInt32, Any, UInt8, Any, Any), ci, inferred_result, const_flag, first(result.valid_worlds), last(result.valid_worlds), encode_effects(result.ipo_effects), - result.analysis_results, relocatability, di, Core.svec(caller.edges...)) + result.analysis_results, relocatability, di, edges) engine_reject(interp, ci) end return nothing @@ -488,14 +493,43 @@ function finishinfer!(me::InferenceState, interp::AbstractInterpreter) end # record the backedges -function store_backedges(caller::CodeInstance, edges::Vector{Any}) +function store_backedges(caller::CodeInstance, edges::SimpleVector) isa(caller.def.def, Method) || return # don't add backedges to toplevel method instance - for itr in BackedgeIterator(edges) - callee = itr.caller - if isa(callee, MethodInstance) - ccall(:jl_method_instance_add_backedge, Cvoid, (Any, Any, Any), callee, itr.sig, caller) + i = 1 + while true + i > length(edges) && return nothing + item = edges[i] + if item isa Int + i += 2 + continue # ignore the query information if present but process the contents + elseif isa(item, Method) + # ignore `Method`-edges (from e.g. failed `abstract_call_method`) + i += 1 + continue + end + if isa(item, CodeInstance) + item = item.def + end + if isa(item, MethodInstance) # regular dispatch + ccall(:jl_method_instance_add_backedge, Cvoid, (Any, Any, Any), item, nothing, caller) + i += 1 else - ccall(:jl_method_table_add_backedge, Cvoid, (Any, Any, Any), callee, itr.sig, caller) + callee = edges[i+1] + if isa(callee, MethodTable) # abstract dispatch (legacy style edges) + ccall(:jl_method_table_add_backedge, Cvoid, (Any, Any, Any), callee, item, caller) + i += 2 + continue + end + # `invoke` edge + if isa(callee, Method) + # ignore `Method`-edges (from e.g. failed `abstract_call_method`) + i += 2 + continue + elseif isa(callee, CodeInstance) + callee = callee.def + end + ccall(:jl_method_instance_add_backedge, Cvoid, (Any, Any, Any), callee, item, caller) + i += 2 end end nothing @@ -734,13 +768,14 @@ function codeinst_as_edge(interp::AbstractInterpreter, sv::InferenceState) if max_world >= get_world_counter() max_world = typemax(UInt) end + edges = Core.svec(sv.edges...) ci = CodeInstance(mi, owner, Any, Any, nothing, nothing, zero(Int32), - min_world, max_world, zero(UInt32), nothing, zero(UInt8), nothing, Core.svec(sv.edges...)) + min_world, max_world, zero(UInt32), nothing, zero(UInt8), nothing, edges) if max_world == typemax(UInt) # if we can record all of the backedges in the global reverse-cache, # we can now widen our applicability in the global cache too # TODO: this should probably come after we decide this edge is even useful - store_backedges(ci, sv.edges) + store_backedges(ci, edges) end return ci end diff --git a/Compiler/src/utilities.jl b/Compiler/src/utilities.jl index 0f1e2988bd669..11d926f0c9d4e 100644 --- a/Compiler/src/utilities.jl +++ b/Compiler/src/utilities.jl @@ -205,87 +205,6 @@ Check if `method` is declared as `Base.@constprop :none`. """ is_no_constprop(method::Union{Method,CodeInfo}) = method.constprop == 0x02 -############# -# backedges # -############# - -""" - BackedgeIterator(backedges::Vector{Any}) - -Return an iterator over a list of backedges. Iteration returns `(sig, caller)` elements, -which will be one of the following: - -- `BackedgePair(nothing, caller::MethodInstance)`: a call made by ordinary inferable dispatch -- `BackedgePair(invokesig::Type, caller::MethodInstance)`: a call made by `invoke(f, invokesig, args...)` -- `BackedgePair(specsig::Type, mt::MethodTable)`: an abstract call - -# Examples - -```julia -julia> callme(x) = x+1 -callme (generic function with 1 method) - -julia> callyou(x) = callme(x) -callyou (generic function with 1 method) - -julia> callyou(2.0) -3.0 - -julia> mi = which(callme, (Any,)).specializations -MethodInstance for callme(::Float64) - -julia> @eval Core.Compiler for (; sig, caller) in BackedgeIterator(Main.mi.backedges) - println(sig) - println(caller) - end -nothing -callyou(Float64) from callyou(Any) -``` -""" -struct BackedgeIterator - backedges::Vector{Any} -end - -struct BackedgePair - sig # ::Union{Nothing,Type} - caller::Union{MethodInstance,MethodTable} - BackedgePair(@nospecialize(sig), caller::Union{MethodInstance,MethodTable}) = new(sig, caller) -end - -function iterate(iter::BackedgeIterator, i::Int=1) - backedges = iter.backedges - while true - i > length(backedges) && return nothing - item = backedges[i] - if item isa Int - i += 2 - continue # ignore the query information if present - elseif isa(item, Method) - # ignore `Method`-edges (from e.g. failed `abstract_call_method`) - i += 1 - continue - end - if isa(item, CodeInstance) - item = item.def - end - if isa(item, MethodInstance) # regular dispatch - return BackedgePair(nothing, item), i+1 - elseif isa(item, MethodTable) # abstract dispatch (legacy style edges) - return BackedgePair(backedges[i+1], item), i+2 - else # `invoke` call - callee = backedges[i+1] - if isa(callee, Method) - i += 2 - continue - end - if isa(callee, CodeInstance) - callee = callee.def - end - return BackedgePair(item, callee::MethodInstance), i+2 - end - end -end - ######### # types # ######### diff --git a/src/staticdata_utils.c b/src/staticdata_utils.c index 81c2e5cb18e32..32e59d7d7c641 100644 --- a/src/staticdata_utils.c +++ b/src/staticdata_utils.c @@ -892,18 +892,19 @@ static int jl_verify_method(jl_code_instance_t *codeinst, size_t *minworld, size j += 2 + nedges; edge = sig; } - else if (jl_is_mtable(edge)) { - // skip the legacy edge (missing backedge) - j += 2; - continue; - } else { jl_method_instance_t *callee = (jl_method_instance_t*)jl_svecref(callees, j + 1); jl_method_t *meth; + if (jl_is_mtable(callee)) { + // skip the legacy edge (missing backedge) + j += 2; + continue; + } if (jl_is_code_instance(callee)) callee = ((jl_code_instance_t*)callee)->def; - if (jl_is_method_instance(callee)) + if (jl_is_method_instance(callee)) { meth = callee->def.method; + } else { assert(jl_is_method(callee)); meth = (jl_method_t*)callee; @@ -1052,16 +1053,17 @@ static void jl_insert_backedges(jl_array_t *edges, jl_array_t *ext_ci_list) jl_method_instance_add_backedge((jl_method_instance_t*)edge, NULL, codeinst); j += 1; } - else if (jl_is_mtable(edge)) { - jl_methtable_t *mt = (jl_methtable_t*)edge; - jl_value_t *sig = jl_svecref(callees, j + 1); - jl_method_table_add_backedge(mt, sig, codeinst); - j += 2; - } else { jl_value_t *callee = jl_svecref(callees, j + 1); - if (jl_is_code_instance(callee)) + if (jl_is_mtable(callee)) { + jl_methtable_t *mt = (jl_methtable_t*)callee; + jl_method_table_add_backedge(mt, edge, codeinst); + j += 2; + continue; + } + else if (jl_is_code_instance(callee)) { callee = (jl_value_t*)((jl_code_instance_t*)callee)->def; + } else if (jl_is_method(callee)) { j += 2; continue; From 6c5f221c1d1da0e174b15ab52af702e736493d48 Mon Sep 17 00:00:00 2001 From: Cody Tapscott <84105208+topolarity@users.noreply.github.com> Date: Wed, 20 Nov 2024 19:18:50 -0500 Subject: [PATCH 474/537] Prevent extensions from blocking parallel pre-compilation (#55910) Previously our precompilation code was causing anything with package A as a dependency to wait on all of A's extensions and weakdeps to finish before starting to pre-compile, even if it can't actually load those weakdeps (or the extensions themselves) This would lead to a pre-compile ordering like: ``` A B \ / \ Ext AB \ | / C / \ / D ``` Here `C` cannot pre-compile in parallel with `Ext {A,B}` and `B`, because it has to wait for `Ext {A,B}` to finish pre-compiling. That happens even though `C` has no way to load either of these. This change updates the pre-compile ordering to be more parallel, reflecting the true place where `Ext {A,B}` can be loaded: ``` A B / \ / \ C Ext AB | \ | / \-- D --/ ``` which allows `C` to compile in parallel with `B` and `Ext{A,B}` --- base/precompilation.jl | 157 +++++++++++++++++++++++++---------------- 1 file changed, 97 insertions(+), 60 deletions(-) diff --git a/base/precompilation.jl b/base/precompilation.jl index 2fe560be9a805..34dd4c4df9cb9 100644 --- a/base/precompilation.jl +++ b/base/precompilation.jl @@ -417,13 +417,16 @@ function _precompilepkgs(pkgs::Vector{String}, stale_cache = Dict{StaleCacheKey, Bool}() cachepath_cache = Dict{PkgId, Vector{String}}() - exts = Dict{PkgId, String}() # ext -> parent - # make a flat map of each dep and its direct deps - depsmap = Dict{PkgId, Vector{PkgId}}() - pkg_exts_map = Dict{PkgId, Vector{PkgId}}() + # a map from packages/extensions to their direct deps + direct_deps = Dict{Base.PkgId, Vector{Base.PkgId}}() + # a map from parent → extension, including all extensions that are loadable + # in the current environment (i.e. their triggers are present) + parent_to_exts = Dict{Base.PkgId, Vector{Base.PkgId}}() + # inverse map of `parent_to_ext` above (ext → parent) + ext_to_parent = Dict{Base.PkgId, Base.PkgId}() function describe_pkg(pkg::PkgId, is_direct_dep::Bool, flags::Cmd, cacheflags::Base.CacheFlags) - name = haskey(exts, pkg) ? string(exts[pkg], " → ", pkg.name) : pkg.name + name = haskey(ext_to_parent, pkg) ? string(ext_to_parent[pkg].name, " → ", pkg.name) : pkg.name name = is_direct_dep ? name : color_string(name, :light_black) if nconfigs > 1 && !isempty(flags) config_str = join(flags, " ") @@ -441,66 +444,101 @@ function _precompilepkgs(pkgs::Vector{String}, pkg = Base.PkgId(dep, env.names[dep]) Base.in_sysimage(pkg) && continue deps = [Base.PkgId(x, env.names[x]) for x in deps] - depsmap[pkg] = filter!(!Base.in_sysimage, deps) - # add any extensions - pkg_exts = Dict{Base.PkgId, Vector{Base.PkgId}}() - for (ext_name, extdep_uuids) in env.extensions[dep] + direct_deps[pkg] = filter!(!Base.in_sysimage, deps) + for (ext_name, trigger_uuids) in env.extensions[dep] ext_uuid = Base.uuid5(pkg.uuid, ext_name) ext = Base.PkgId(ext_uuid, ext_name) triggers[ext] = Base.PkgId[pkg] # depends on parent package - all_extdeps_available = true - for extdep_uuid in extdep_uuids - extdep_name = env.names[extdep_uuid] - if extdep_uuid in keys(env.deps) - push!(triggers[ext], Base.PkgId(extdep_uuid, extdep_name)) + all_triggers_available = true + for trigger_uuid in trigger_uuids + trigger_name = env.names[trigger_uuid] + if trigger_uuid in keys(env.deps) + push!(triggers[ext], Base.PkgId(trigger_uuid, trigger_name)) else - all_extdeps_available = false + all_triggers_available = false break end end - all_extdeps_available || continue - exts[ext] = pkg.name - pkg_exts[ext] = depsmap[ext] = filter(!Base.in_sysimage, triggers[ext]) - end - if !isempty(pkg_exts) - pkg_exts_map[pkg] = collect(keys(pkg_exts)) + all_triggers_available || continue + ext_to_parent[ext] = pkg + direct_deps[ext] = filter(!Base.in_sysimage, triggers[ext]) + + if !haskey(parent_to_exts, pkg) + parent_to_exts[pkg] = Base.PkgId[ext] + else + push!(parent_to_exts[pkg], ext) + end end end - direct_deps = [ + project_deps = [ Base.PkgId(uuid, name) for (name, uuid) in env.project_deps if !Base.in_sysimage(Base.PkgId(uuid, name)) ] - # consider exts of direct deps to be direct deps so that errors are reported - append!(direct_deps, keys(filter(d->last(d) in keys(env.project_deps), exts))) + # consider exts of project deps to be project deps so that errors are reported + append!(project_deps, keys(filter(d->last(d).name in keys(env.project_deps), ext_to_parent))) @debug "precompile: deps collected" # An extension effectively depends on another extension if it has a strict superset of its triggers - for ext_a in keys(exts) - for ext_b in keys(exts) + for ext_a in keys(ext_to_parent) + for ext_b in keys(ext_to_parent) if triggers[ext_a] ⊋ triggers[ext_b] - push!(depsmap[ext_a], ext_b) + push!(direct_deps[ext_a], ext_b) end end end - # this loop must be run after the full depsmap has been populated - for (pkg, pkg_exts) in pkg_exts_map - # find any packages that depend on the extension(s)'s deps and replace those deps in their deps list with the extension(s), - # basically injecting the extension into the precompile order in the graph, to avoid race to precompile extensions - for (_pkg, deps) in depsmap # for each manifest dep - if !in(_pkg, keys(exts)) && pkg in deps # if not an extension and depends on pkg - append!(deps, pkg_exts) # add the package extensions to deps - filter!(!isequal(pkg), deps) # remove the pkg from deps + # A package depends on an extension if it (indirectly) depends on all extension triggers + function expand_indirect_dependencies(direct_deps) + function visit!(visited, node, all_deps) + if node in visited + return + end + push!(visited, node) + for dep in get(Set{Base.PkgId}, direct_deps, node) + if !(dep in all_deps) + push!(all_deps, dep) + visit!(visited, dep, all_deps) + end + end + end + + indirect_deps = Dict{Base.PkgId, Set{Base.PkgId}}() + for package in keys(direct_deps) + # Initialize a set to keep track of all dependencies for 'package' + all_deps = Set{Base.PkgId}() + visited = Set{Base.PkgId}() + visit!(visited, package, all_deps) + # Update direct_deps with the complete set of dependencies for 'package' + indirect_deps[package] = all_deps + end + return indirect_deps + end + + # this loop must be run after the full direct_deps map has been populated + indirect_deps = expand_indirect_dependencies(direct_deps) + for ext in keys(ext_to_parent) + ext_loadable_in_pkg = Dict{Base.PkgId,Bool}() + for pkg in keys(direct_deps) + is_trigger = in(pkg, direct_deps[ext]) + is_extension = in(pkg, keys(ext_to_parent)) + has_triggers = issubset(direct_deps[ext], indirect_deps[pkg]) + ext_loadable_in_pkg[pkg] = !is_extension && has_triggers && !is_trigger + end + for (pkg, ext_loadable) in ext_loadable_in_pkg + if ext_loadable && !any((dep)->ext_loadable_in_pkg[dep], direct_deps[pkg]) + # add an edge if the extension is loadable by pkg, and was not loadable in any + # of the pkg's dependencies + push!(direct_deps[pkg], ext) end end end @debug "precompile: extensions collected" # return early if no deps - if isempty(depsmap) + if isempty(direct_deps) if isempty(pkgs) return elseif _from_loading @@ -518,7 +556,7 @@ function _precompilepkgs(pkgs::Vector{String}, was_processed = Dict{PkgConfig,Base.Event}() was_recompiled = Dict{PkgConfig,Bool}() for config in configs - for pkgid in keys(depsmap) + for pkgid in keys(direct_deps) pkg_config = (pkgid, config) started[pkg_config] = false was_processed[pkg_config] = Base.Event() @@ -527,7 +565,6 @@ function _precompilepkgs(pkgs::Vector{String}, end @debug "precompile: signalling initialized" - # find and guard against circular deps circular_deps = Base.PkgId[] # Three states @@ -554,8 +591,8 @@ function _precompilepkgs(pkgs::Vector{String}, could_be_cycle[pkg] = false return false end - for pkg in keys(depsmap) - if scan_pkg!(pkg, depsmap) + for pkg in keys(direct_deps) + if scan_pkg!(pkg, direct_deps) push!(circular_deps, pkg) for pkg_config in keys(was_processed) # notify all to allow skipping @@ -570,33 +607,33 @@ function _precompilepkgs(pkgs::Vector{String}, if !manifest if isempty(pkgs) - pkgs = [pkg.name for pkg in direct_deps] + pkgs = [pkg.name for pkg in project_deps] end # restrict to dependencies of given packages - function collect_all_deps(depsmap, dep, alldeps=Set{Base.PkgId}()) - for _dep in depsmap[dep] + function collect_all_deps(direct_deps, dep, alldeps=Set{Base.PkgId}()) + for _dep in direct_deps[dep] if !(_dep in alldeps) push!(alldeps, _dep) - collect_all_deps(depsmap, _dep, alldeps) + collect_all_deps(direct_deps, _dep, alldeps) end end return alldeps end keep = Set{Base.PkgId}() - for dep in depsmap + for dep in direct_deps dep_pkgid = first(dep) if dep_pkgid.name in pkgs push!(keep, dep_pkgid) - collect_all_deps(depsmap, dep_pkgid, keep) + collect_all_deps(direct_deps, dep_pkgid, keep) end end - for ext in keys(exts) - if issubset(collect_all_deps(depsmap, ext), keep) # if all extension deps are kept + for ext in keys(ext_to_parent) + if issubset(collect_all_deps(direct_deps, ext), keep) # if all extension deps are kept push!(keep, ext) end end - filter!(d->in(first(d), keep), depsmap) - if isempty(depsmap) + filter!(d->in(first(d), keep), direct_deps) + if isempty(direct_deps) if _from_loading # if called from loading precompilation it may be a package from another environment stack so # don't error and allow serial precompilation to try @@ -709,7 +746,7 @@ function _precompilepkgs(pkgs::Vector{String}, i = 1 last_length = 0 bar = MiniProgressBar(; indent=0, header = "Precompiling packages ", color = :green, percentage=false, always_reprint=true) - n_total = length(depsmap) * length(configs) + n_total = length(direct_deps) * length(configs) bar.max = n_total - n_already_precomp final_loop = false n_print_rows = 0 @@ -739,7 +776,7 @@ function _precompilepkgs(pkgs::Vector{String}, dep, config = pkg_config loaded = warn_loaded && haskey(Base.loaded_modules, dep) flags, cacheflags = config - name = describe_pkg(dep, dep in direct_deps, flags, cacheflags) + name = describe_pkg(dep, dep in project_deps, flags, cacheflags) line = if pkg_config in precomperr_deps string(color_string(" ? ", Base.warn_color()), name) elseif haskey(failed_deps, pkg_config) @@ -755,7 +792,7 @@ function _precompilepkgs(pkgs::Vector{String}, # Offset each spinner animation using the first character in the package name as the seed. # If not offset, on larger terminal fonts it looks odd that they all sync-up anim_char = anim_chars[(i + Int(dep.name[1])) % length(anim_chars) + 1] - anim_char_colored = dep in direct_deps ? anim_char : color_string(anim_char, :light_black) + anim_char_colored = dep in project_deps ? anim_char : color_string(anim_char, :light_black) waiting = if haskey(pkgspidlocked, pkg_config) who_has_lock = pkgspidlocked[pkg_config] color_string(" Being precompiled by $(who_has_lock)", Base.info_color()) @@ -791,10 +828,10 @@ function _precompilepkgs(pkgs::Vector{String}, if !_from_loading Base.LOADING_CACHE[] = Base.LoadingCache() end - @debug "precompile: starting precompilation loop" depsmap direct_deps + @debug "precompile: starting precompilation loop" direct_deps project_deps ## precompilation loop - for (pkg, deps) in depsmap + for (pkg, deps) in direct_deps cachepaths = get!(() -> Base.find_all_in_cache_path(pkg), cachepath_cache, pkg) sourcepath = Base.locate_package(pkg) single_requested_pkg = length(requested_pkgs) == 1 && only(requested_pkgs) == pkg.name @@ -821,13 +858,13 @@ function _precompilepkgs(pkgs::Vector{String}, is_stale = !Base.isprecompiled(pkg; ignore_loaded=true, stale_cache, cachepath_cache, cachepaths, sourcepath, flags=cacheflags) if !circular && is_stale Base.acquire(parallel_limiter) - is_direct_dep = pkg in direct_deps + is_project_dep = pkg in project_deps # std monitoring std_pipe = Base.link_pipe!(Pipe(); reader_supports_async=true, writer_supports_async=true) t_monitor = @async monitor_std(pkg_config, std_pipe; single_requested_pkg) - name = describe_pkg(pkg, is_direct_dep, flags, cacheflags) + name = describe_pkg(pkg, is_project_dep, flags, cacheflags) lock(print_lock) do if !fancyprint && isempty(pkg_queue) printpkgstyle(io, :Precompiling, something(target, "packages...")) @@ -850,7 +887,7 @@ function _precompilepkgs(pkgs::Vector{String}, keep_loaded_modules = false # for extensions, any extension in our direct dependencies is one we have a right to load # for packages, we may load any extension (all possible triggers are accounted for above) - loadable_exts = haskey(exts, pkg) ? filter((dep)->haskey(exts, dep), depsmap[pkg]) : nothing + loadable_exts = haskey(ext_to_parent, pkg) ? filter((dep)->haskey(ext_to_parent, dep), direct_deps[pkg]) : nothing Base.compilecache(pkg, sourcepath, std_pipe, std_pipe, keep_loaded_modules; flags, cacheflags, loadable_exts) end @@ -965,7 +1002,7 @@ function _precompilepkgs(pkgs::Vector{String}, else join(split(err, "\n"), color_string("\n│ ", Base.warn_color())) end - name = haskey(exts, pkg) ? string(exts[pkg], " → ", pkg.name) : pkg.name + name = haskey(ext_to_parent, pkg) ? string(ext_to_parent[pkg].name, " → ", pkg.name) : pkg.name print(iostr, color_string("\n┌ ", Base.warn_color()), name, color_string("\n│ ", Base.warn_color()), err, color_string("\n└ ", Base.warn_color())) end end @@ -981,7 +1018,7 @@ function _precompilepkgs(pkgs::Vector{String}, n_direct_errs = 0 for (pkg_config, err) in failed_deps dep, config = pkg_config - if strict || (dep in direct_deps) + if strict || (dep in project_deps) print(err_str, "\n", dep.name, " ") for cfg in config[1] print(err_str, cfg, " ") From 034e6093c53ce2aae989045cfd5942dade27198b Mon Sep 17 00:00:00 2001 From: Keno Fischer Date: Wed, 20 Nov 2024 23:51:38 -0500 Subject: [PATCH 475/537] Make world-age increments explicit (#56509) This PR introduces a new, toplevel-only, syntax form `:worldinc` that semantically represents the effect of raising the current task's world age to the latest world for the remainder of the current toplevel evaluation (that context being an entry to `eval` or a module expression). For detailed motivation on why this is desirable, see #55145, which I won't repeat here, but the gist is that we never really defined when world-age increments and worse are inconsistent about it. This is something we need to figure out now, because the bindings partition work will make world age even more observable via bindings. Having created a mechanism for world age increments, the big question is one of policy, i.e. when should these world age increments be inserted. Several reasonable options exist: 1. After world-age affecting syntax constructs (as proprosed in #55145) 2. Option 1 + some reasonable additional cases that people rely on 3. Before any top level `call` expression 4. Before any expression at toplevel whatsover As an example, case, consider `a == a` at toplevel. Depending on the semantics that could either be the same as in local scope, or each of the four world age dependent lookups (three binding lookups, one method lookup) could (potentially) occur in a different world age. The general tradeoff here is between the risk of exposing the user to confusing world age errors and our ability to optimize top-level code (in general, any `:worldinc` statement will require us to fully pessimize or recompile all following code). This PR basically implements option 2 with the following semantics: 1. The interpreter explicit raises the world age only at `:worldinc` exprs or after `:module` exprs. 2. The frontend inserts `:worldinc` after all struct definitions, method definitions, `using` and `import. 3. The `@eval` macro inserts a worldinc following the call to `eval` if at toplevel 4. A literal (syntactic) call to `include` gains an implicit `worldinc`. Of these the fourth is probably the most questionable, but is necessary to make this non-breaking for most code patterns. Perhaps it would have been better to make `include` a macro from the beginning (esp because it already has semantics that look a little like reaching into the calling module), but that ship has sailed. Unfortunately, I don't see any good intermediate options between this PR and option #3 above. I think option #3 is closest to what we have right now, but if we were to choose it and actually fix the soundness issues, I expect that we would be destroying all performance of global-scope code. For this reason, I would like to try to make the version in this PR work, even if the semantics are a little ugly. The biggest pattern that this PR does not catch is: ``` eval(:(f() = 1)) f() ``` We could apply the same `include` special case to eval, but given the existence of `@eval` which allows addressing this at the macro level, I decided not to. We can decide which way we want to go on this based on what the package ecosystem looks like. --- Compiler/src/abstractinterpretation.jl | 226 ++++++++++---------- Compiler/src/inferencestate.jl | 9 +- Compiler/src/ssair/irinterp.jl | 12 +- Compiler/src/tfuncs.jl | 2 +- Compiler/src/types.jl | 1 + Compiler/src/validation.jl | 1 + base/boot.jl | 3 +- base/essentials.jl | 12 +- base/sysimg.jl | 7 + base/tuple.jl | 2 +- src/codegen.cpp | 31 +-- src/interpreter.c | 2 - src/jlfrontend.scm | 2 +- src/julia-syntax.scm | 31 ++- src/toplevel.c | 20 +- stdlib/Logging/test/runtests.jl | 2 +- stdlib/REPL/src/REPL.jl | 4 +- stdlib/REPL/src/REPLCompletions.jl | 4 +- stdlib/REPL/test/replcompletions.jl | 273 +++++++++++++------------ stdlib/Serialization/test/runtests.jl | 4 +- stdlib/Test/src/Test.jl | 24 +++ test/arrayops.jl | 51 ++--- test/core.jl | 22 +- test/deprecation_exec.jl | 1 + test/error.jl | 2 +- test/math.jl | 78 +++---- test/ranges.jl | 6 +- test/sorting.jl | 7 +- test/syntax.jl | 25 ++- 29 files changed, 508 insertions(+), 356 deletions(-) diff --git a/Compiler/src/abstractinterpretation.jl b/Compiler/src/abstractinterpretation.jl index 68668b0ac2c91..9a5b19709e697 100644 --- a/Compiler/src/abstractinterpretation.jl +++ b/Compiler/src/abstractinterpretation.jl @@ -113,6 +113,10 @@ function abstract_call_gf_by_type(interp::AbstractInterpreter, @nospecialize(fun 𝕃ₚ, 𝕃ᵢ = ipo_lattice(interp), typeinf_lattice(interp) ⊑ₚ, ⋤ₚ, ⊔ₚ, ⊔ᵢ = partialorder(𝕃ₚ), strictneqpartialorder(𝕃ₚ), join(𝕃ₚ), join(𝕃ᵢ) argtypes = arginfo.argtypes + if si.saw_latestworld + add_remark!(interp, sv, "Cannot infer call, because we previously saw :latestworld") + return Future(CallMeta(Any, Any, Effects(), NoCallInfo())) + end matches = find_method_matches(interp, argtypes, atype; max_methods) if isa(matches, FailedMethodMatch) add_remark!(interp, sv, matches.reason) @@ -321,7 +325,7 @@ function abstract_call_gf_by_type(interp::AbstractInterpreter, @nospecialize(fun if csig !== nothing && (!seenall || csig !== sig) # corresponds to whether the first look already looked at this, so repeating abstract_call_method is not useful sp_ = ccall(:jl_type_intersection_with_env, Any, (Any, Any), csig, method.sig)::SimpleVector if match.sparams === sp_[2] - mresult = abstract_call_method(interp, method, csig, match.sparams, multiple_matches, StmtInfo(false), sv)::Future + mresult = abstract_call_method(interp, method, csig, match.sparams, multiple_matches, StmtInfo(false, false), sv)::Future isready(mresult) || return false # wait for mresult Future to resolve off the callstack before continuing end end @@ -1585,7 +1589,7 @@ function abstract_iteration(interp::AbstractInterpreter, @nospecialize(itft), @n @assert !isvarargtype(itertype) iterateresult = Future{AbstractIterationResult}() - call1future = abstract_call_known(interp, iteratef, ArgInfo(nothing, Any[itft, itertype]), StmtInfo(true), sv)::Future + call1future = abstract_call_known(interp, iteratef, ArgInfo(nothing, Any[itft, itertype]), StmtInfo(true, false), sv)::Future function inferiterate(interp, sv) call1 = call1future[] stateordonet = call1.rt @@ -1641,7 +1645,7 @@ function abstract_iteration(interp::AbstractInterpreter, @nospecialize(itft), @n valtype = getfield_tfunc(𝕃ᵢ, stateordonet, Const(1)) push!(ret, valtype) statetype = nstatetype - call2future = abstract_call_known(interp, iteratef, ArgInfo(nothing, Any[Const(iteratef), itertype, statetype]), StmtInfo(true), sv)::Future + call2future = abstract_call_known(interp, iteratef, ArgInfo(nothing, Any[Const(iteratef), itertype, statetype]), StmtInfo(true, false), sv)::Future if !isready(call2future) nextstate = 0x1 return false @@ -1683,7 +1687,7 @@ function abstract_iteration(interp::AbstractInterpreter, @nospecialize(itft), @n end valtype = tmerge(valtype, nounion.parameters[1]) statetype = tmerge(statetype, nounion.parameters[2]) - call2future = abstract_call_known(interp, iteratef, ArgInfo(nothing, Any[Const(iteratef), itertype, statetype]), StmtInfo(true), sv)::Future + call2future = abstract_call_known(interp, iteratef, ArgInfo(nothing, Any[Const(iteratef), itertype, statetype]), StmtInfo(true, false), sv)::Future if !isready(call2future) nextstate = 0x2 return false @@ -2292,7 +2296,7 @@ end function abstract_finalizer(interp::AbstractInterpreter, argtypes::Vector{Any}, sv::AbsIntState) if length(argtypes) == 3 finalizer_argvec = Any[argtypes[2], argtypes[3]] - call = abstract_call(interp, ArgInfo(nothing, finalizer_argvec), StmtInfo(false), sv, #=max_methods=#1)::Future + call = abstract_call(interp, ArgInfo(nothing, finalizer_argvec), StmtInfo(false, false), sv, #=max_methods=#1)::Future return Future{CallMeta}(call, interp, sv) do call, interp, sv return CallMeta(Nothing, Any, Effects(), FinalizerInfo(call.info, call.effects)) end @@ -2331,12 +2335,12 @@ function abstract_throw_methoderror(interp::AbstractInterpreter, argtypes::Vecto end const generic_getglobal_effects = Effects(EFFECTS_THROWS, consistent=ALWAYS_FALSE, inaccessiblememonly=ALWAYS_FALSE) -function abstract_eval_getglobal(interp::AbstractInterpreter, sv::AbsIntState, @nospecialize(M), @nospecialize(s)) +function abstract_eval_getglobal(interp::AbstractInterpreter, sv::AbsIntState, saw_latestworld::Bool, @nospecialize(M), @nospecialize(s)) ⊑ = partialorder(typeinf_lattice(interp)) if M isa Const && s isa Const M, s = M.val, s.val if M isa Module && s isa Symbol - return CallMeta(abstract_eval_globalref(interp, GlobalRef(M, s), sv), NoCallInfo()) + return CallMeta(abstract_eval_globalref(interp, GlobalRef(M, s), saw_latestworld, sv), NoCallInfo()) end return CallMeta(Union{}, TypeError, EFFECTS_THROWS, NoCallInfo()) elseif !hasintersect(widenconst(M), Module) || !hasintersect(widenconst(s), Symbol) @@ -2354,17 +2358,17 @@ function merge_exct(cm::CallMeta, @nospecialize(exct)) return cm end -function abstract_eval_getglobal(interp::AbstractInterpreter, sv::AbsIntState, @nospecialize(M), @nospecialize(s), @nospecialize(order)) +function abstract_eval_getglobal(interp::AbstractInterpreter, sv::AbsIntState, saw_latestworld::Bool, @nospecialize(M), @nospecialize(s), @nospecialize(order)) goe = global_order_exct(order, #=loading=#true, #=storing=#false) - cm = abstract_eval_getglobal(interp, sv, M, s) + cm = abstract_eval_getglobal(interp, sv, saw_latestworld, M, s) return merge_exct(cm, goe) end -function abstract_eval_getglobal(interp::AbstractInterpreter, sv::AbsIntState, argtypes::Vector{Any}) +function abstract_eval_getglobal(interp::AbstractInterpreter, sv::AbsIntState, saw_latestworld::Bool, argtypes::Vector{Any}) if length(argtypes) == 3 - return abstract_eval_getglobal(interp, sv, argtypes[2], argtypes[3]) + return abstract_eval_getglobal(interp, sv, saw_latestworld, argtypes[2], argtypes[3]) elseif length(argtypes) == 4 - return abstract_eval_getglobal(interp, sv, argtypes[2], argtypes[3], argtypes[4]) + return abstract_eval_getglobal(interp, sv, saw_latestworld, argtypes[2], argtypes[3], argtypes[4]) elseif !isvarargtype(argtypes[end]) || length(argtypes) > 5 return CallMeta(Union{}, ArgumentError, EFFECTS_THROWS, NoCallInfo()) else @@ -2411,11 +2415,11 @@ end const setglobal!_effects = Effects(EFFECTS_TOTAL; effect_free=ALWAYS_FALSE, nothrow=false, inaccessiblememonly=ALWAYS_FALSE) -function abstract_eval_setglobal!(interp::AbstractInterpreter, sv::AbsIntState, @nospecialize(M), @nospecialize(s), @nospecialize(v)) +function abstract_eval_setglobal!(interp::AbstractInterpreter, sv::AbsIntState, saw_latestworld::Bool, @nospecialize(M), @nospecialize(s), @nospecialize(v)) if isa(M, Const) && isa(s, Const) M, s = M.val, s.val if M isa Module && s isa Symbol - exct = global_assignment_exct(interp, sv, GlobalRef(M, s), v) + exct = global_assignment_exct(interp, sv, saw_latestworld, GlobalRef(M, s), v) return CallMeta(v, exct, Effects(setglobal!_effects, nothrow=exct===Bottom), NoCallInfo()) end return CallMeta(Union{}, TypeError, EFFECTS_THROWS, NoCallInfo()) @@ -2429,17 +2433,17 @@ function abstract_eval_setglobal!(interp::AbstractInterpreter, sv::AbsIntState, return CallMeta(v, Union{TypeError, ErrorException}, setglobal!_effects, NoCallInfo()) end -function abstract_eval_setglobal!(interp::AbstractInterpreter, sv::AbsIntState, @nospecialize(M), @nospecialize(s), @nospecialize(v), @nospecialize(order)) +function abstract_eval_setglobal!(interp::AbstractInterpreter, sv::AbsIntState, saw_latestworld::Bool, @nospecialize(M), @nospecialize(s), @nospecialize(v), @nospecialize(order)) goe = global_order_exct(order, #=loading=#false, #=storing=#true) - cm = abstract_eval_setglobal!(interp, sv, M, s, v) + cm = abstract_eval_setglobal!(interp, sv, saw_latestworld, M, s, v) return merge_exct(cm, goe) end -function abstract_eval_setglobal!(interp::AbstractInterpreter, sv::AbsIntState, argtypes::Vector{Any}) +function abstract_eval_setglobal!(interp::AbstractInterpreter, sv::AbsIntState, saw_latestworld::Bool, argtypes::Vector{Any}) if length(argtypes) == 4 - return abstract_eval_setglobal!(interp, sv, argtypes[2], argtypes[3], argtypes[4]) + return abstract_eval_setglobal!(interp, sv, saw_latestworld, argtypes[2], argtypes[3], argtypes[4]) elseif length(argtypes) == 5 - return abstract_eval_setglobal!(interp, sv, argtypes[2], argtypes[3], argtypes[4], argtypes[5]) + return abstract_eval_setglobal!(interp, sv, saw_latestworld, argtypes[2], argtypes[3], argtypes[4], argtypes[5]) elseif !isvarargtype(argtypes[end]) || length(argtypes) > 6 return CallMeta(Union{}, ArgumentError, EFFECTS_THROWS, NoCallInfo()) else @@ -2447,9 +2451,9 @@ function abstract_eval_setglobal!(interp::AbstractInterpreter, sv::AbsIntState, end end -function abstract_eval_setglobalonce!(interp::AbstractInterpreter, sv::AbsIntState, argtypes::Vector{Any}) +function abstract_eval_setglobalonce!(interp::AbstractInterpreter, sv::AbsIntState, saw_latestworld::Bool, argtypes::Vector{Any}) if length(argtypes) in (4, 5, 6) - cm = abstract_eval_setglobal!(interp, sv, argtypes[2], argtypes[3], argtypes[4]) + cm = abstract_eval_setglobal!(interp, sv, saw_latestworld, argtypes[2], argtypes[3], argtypes[4]) if length(argtypes) >= 5 goe = global_order_exct(argtypes[5], #=loading=#true, #=storing=#true) cm = merge_exct(cm, goe) @@ -2466,7 +2470,7 @@ function abstract_eval_setglobalonce!(interp::AbstractInterpreter, sv::AbsIntSta end end -function abstract_eval_replaceglobal!(interp::AbstractInterpreter, sv::AbsIntState, argtypes::Vector{Any}) +function abstract_eval_replaceglobal!(interp::AbstractInterpreter, sv::AbsIntState, saw_latestworld::Bool, argtypes::Vector{Any}) if length(argtypes) in (5, 6, 7) (M, s, x, v) = argtypes[2], argtypes[3], argtypes[4], argtypes[5] @@ -2485,7 +2489,7 @@ function abstract_eval_replaceglobal!(interp::AbstractInterpreter, sv::AbsIntSta effects = merge_effects(rte.effects, Effects(setglobal!_effects, nothrow=exct===Bottom)) sg = CallMeta(Any, exct, effects, NoCallInfo()) else - sg = abstract_eval_setglobal!(interp, sv, M, s, v) + sg = abstract_eval_setglobal!(interp, sv, saw_latestworld, M, s, v) end if length(argtypes) >= 6 goe = global_order_exct(argtypes[6], #=loading=#true, #=storing=#true) @@ -2539,15 +2543,15 @@ function abstract_call_known(interp::AbstractInterpreter, @nospecialize(f), elseif f === Core.throw_methoderror return abstract_throw_methoderror(interp, argtypes, sv) elseif f === Core.getglobal - return Future(abstract_eval_getglobal(interp, sv, argtypes)) + return Future(abstract_eval_getglobal(interp, sv, si.saw_latestworld, argtypes)) elseif f === Core.setglobal! - return Future(abstract_eval_setglobal!(interp, sv, argtypes)) + return Future(abstract_eval_setglobal!(interp, sv, si.saw_latestworld, argtypes)) elseif f === Core.setglobalonce! - return Future(abstract_eval_setglobalonce!(interp, sv, argtypes)) + return Future(abstract_eval_setglobalonce!(interp, sv, si.saw_latestworld, argtypes)) elseif f === Core.replaceglobal! - return Future(abstract_eval_replaceglobal!(interp, sv, argtypes)) + return Future(abstract_eval_replaceglobal!(interp, sv, si.saw_latestworld, argtypes)) elseif f === Core.getfield && argtypes_are_actually_getglobal(argtypes) - return Future(abstract_eval_getglobal(interp, sv, argtypes)) + return Future(abstract_eval_getglobal(interp, sv, si.saw_latestworld, argtypes)) elseif f === Core.isdefined && argtypes_are_actually_getglobal(argtypes) exct = Bottom if length(argtypes) == 4 @@ -2561,6 +2565,7 @@ function abstract_call_known(interp::AbstractInterpreter, @nospecialize(f), interp, GlobalRef((argtypes[2]::Const).val::Module, (argtypes[3]::Const).val::Symbol), + si.saw_latestworld, sv), NoCallInfo()), exct)) elseif f === Core.get_binding_type @@ -2815,8 +2820,8 @@ function sp_type_rewrap(@nospecialize(T), mi::MethodInstance, isreturn::Bool) return unwraptv(T) end -function abstract_eval_cfunction(interp::AbstractInterpreter, e::Expr, vtypes::Union{VarTable,Nothing}, sv::AbsIntState) - f = abstract_eval_value(interp, e.args[2], vtypes, sv) +function abstract_eval_cfunction(interp::AbstractInterpreter, e::Expr, sstate::StatementState, sv::AbsIntState) + f = abstract_eval_value(interp, e.args[2], sstate, sv) # rt = sp_type_rewrap(e.args[3], sv.linfo, true) atv = e.args[4]::SimpleVector at = Vector{Any}(undef, length(atv) + 1) @@ -2828,18 +2833,18 @@ function abstract_eval_cfunction(interp::AbstractInterpreter, e::Expr, vtypes::U # this may be the wrong world for the call, # but some of the result is likely to be valid anyways # and that may help generate better codegen - abstract_call(interp, ArgInfo(nothing, at), StmtInfo(false), sv)::Future + abstract_call(interp, ArgInfo(nothing, at), StmtInfo(false, false), sv)::Future rt = e.args[1] isa(rt, Type) || (rt = Any) return RTEffects(rt, Any, EFFECTS_UNKNOWN) end -function abstract_eval_special_value(interp::AbstractInterpreter, @nospecialize(e), vtypes::Union{VarTable,Nothing}, sv::AbsIntState) +function abstract_eval_special_value(interp::AbstractInterpreter, @nospecialize(e), sstate::StatementState, sv::AbsIntState) if isa(e, SSAValue) return RTEffects(abstract_eval_ssavalue(e, sv), Union{}, EFFECTS_TOTAL) elseif isa(e, SlotNumber) - if vtypes !== nothing - vtyp = vtypes[slot_id(e)] + if sstate.vtypes !== nothing + vtyp = sstate.vtypes[slot_id(e)] if !vtyp.undef return RTEffects(vtyp.typ, Union{}, EFFECTS_TOTAL) end @@ -2847,14 +2852,14 @@ function abstract_eval_special_value(interp::AbstractInterpreter, @nospecialize( end return RTEffects(Any, UndefVarError, EFFECTS_THROWS) elseif isa(e, Argument) - if vtypes !== nothing - return RTEffects(vtypes[slot_id(e)].typ, Union{}, EFFECTS_TOTAL) + if sstate.vtypes !== nothing + return RTEffects(sstate.vtypes[slot_id(e)].typ, Union{}, EFFECTS_TOTAL) else @assert isa(sv, IRInterpretationState) return RTEffects(sv.ir.argtypes[e.n], Union{}, EFFECTS_TOTAL) # TODO frame_argtypes(sv)[e.n] and remove the assertion end elseif isa(e, GlobalRef) - return abstract_eval_globalref(interp, e, sv) + return abstract_eval_globalref(interp, e, sstate.saw_latestworld, sv) end if isa(e, QuoteNode) e = e.value @@ -2878,21 +2883,21 @@ function abstract_eval_value_expr(interp::AbstractInterpreter, e::Expr, sv::AbsI return Any end -function abstract_eval_value(interp::AbstractInterpreter, @nospecialize(e), vtypes::Union{VarTable,Nothing}, sv::AbsIntState) +function abstract_eval_value(interp::AbstractInterpreter, @nospecialize(e), sstate::StatementState, sv::AbsIntState) if isa(e, Expr) return abstract_eval_value_expr(interp, e, sv) else - (;rt, effects) = abstract_eval_special_value(interp, e, vtypes, sv) + (;rt, effects) = abstract_eval_special_value(interp, e, sstate, sv) merge_effects!(interp, sv, effects) return collect_limitations!(rt, sv) end end -function collect_argtypes(interp::AbstractInterpreter, ea::Vector{Any}, vtypes::Union{VarTable,Nothing}, sv::AbsIntState) +function collect_argtypes(interp::AbstractInterpreter, ea::Vector{Any}, sstate::StatementState, sv::AbsIntState) n = length(ea) argtypes = Vector{Any}(undef, n) @inbounds for i = 1:n - ai = abstract_eval_value(interp, ea[i], vtypes, sv) + ai = abstract_eval_value(interp, ea[i], sstate, sv) if ai === Bottom return nothing end @@ -2915,12 +2920,12 @@ end CallMeta(rte::RTEffects, info::CallInfo) = CallMeta(rte.rt, rte.exct, rte.effects, info, rte.refinements) -function abstract_call(interp::AbstractInterpreter, arginfo::ArgInfo, sv::InferenceState) +function abstract_call(interp::AbstractInterpreter, arginfo::ArgInfo, sstate::StatementState, sv::InferenceState) unused = call_result_unused(sv, sv.currpc) if unused add_curr_ssaflag!(sv, IR_FLAG_UNUSED) end - si = StmtInfo(!unused) + si = StmtInfo(!unused, sstate.saw_latestworld) call = abstract_call(interp, arginfo, si, sv)::Future Future{Any}(call, interp, sv) do call, interp, sv # this only is needed for the side-effect, sequenced before any task tries to consume the return value, @@ -2931,25 +2936,26 @@ function abstract_call(interp::AbstractInterpreter, arginfo::ArgInfo, sv::Infere return call end -function abstract_eval_call(interp::AbstractInterpreter, e::Expr, vtypes::Union{VarTable,Nothing}, +function abstract_eval_call(interp::AbstractInterpreter, e::Expr, sstate::StatementState, sv::AbsIntState) ea = e.args - argtypes = collect_argtypes(interp, ea, vtypes, sv) + argtypes = collect_argtypes(interp, ea, sstate, sv) if argtypes === nothing return Future(RTEffects(Bottom, Any, Effects())) end arginfo = ArgInfo(ea, argtypes) - call = abstract_call(interp, arginfo, sv)::Future + call = abstract_call(interp, arginfo, sstate, sv)::Future return Future{RTEffects}(call, interp, sv) do call, interp, sv (; rt, exct, effects, refinements) = call return RTEffects(rt, exct, effects, refinements) end end -function abstract_eval_new(interp::AbstractInterpreter, e::Expr, vtypes::Union{VarTable,Nothing}, + +function abstract_eval_new(interp::AbstractInterpreter, e::Expr, sstate::StatementState, sv::AbsIntState) 𝕃ᵢ = typeinf_lattice(interp) - rt, isexact = instanceof_tfunc(abstract_eval_value(interp, e.args[1], vtypes, sv), true) + rt, isexact = instanceof_tfunc(abstract_eval_value(interp, e.args[1], sstate, sv), true) ut = unwrap_unionall(rt) exct = Union{ErrorException,TypeError} if isa(ut, DataType) && !isabstracttype(ut) @@ -2976,7 +2982,7 @@ function abstract_eval_new(interp::AbstractInterpreter, e::Expr, vtypes::Union{V local anyrefine = false local allconst = true for i = 1:nargs - at = widenslotwrapper(abstract_eval_value(interp, e.args[i+1], vtypes, sv)) + at = widenslotwrapper(abstract_eval_value(interp, e.args[i+1], sstate, sv)) ft = fieldtype(rt, i) nothrow && (nothrow = ⊑(𝕃ᵢ, at, ft)) at = tmeet(𝕃ᵢ, at, ft) @@ -3018,13 +3024,13 @@ function abstract_eval_new(interp::AbstractInterpreter, e::Expr, vtypes::Union{V return RTEffects(rt, exct, effects) end -function abstract_eval_splatnew(interp::AbstractInterpreter, e::Expr, vtypes::Union{VarTable,Nothing}, +function abstract_eval_splatnew(interp::AbstractInterpreter, e::Expr, sstate::StatementState, sv::AbsIntState) 𝕃ᵢ = typeinf_lattice(interp) - rt, isexact = instanceof_tfunc(abstract_eval_value(interp, e.args[1], vtypes, sv), true) + rt, isexact = instanceof_tfunc(abstract_eval_value(interp, e.args[1], sstate, sv), true) nothrow = false if length(e.args) == 2 && isconcretedispatch(rt) && !ismutabletype(rt) - at = abstract_eval_value(interp, e.args[2], vtypes, sv) + at = abstract_eval_value(interp, e.args[2], sstate, sv) n = fieldcount(rt) if (isa(at, Const) && isa(at.val, Tuple) && n == length(at.val::Tuple) && (let t = rt, at = at @@ -3048,14 +3054,14 @@ function abstract_eval_splatnew(interp::AbstractInterpreter, e::Expr, vtypes::Un return RTEffects(rt, Any, effects) end -function abstract_eval_new_opaque_closure(interp::AbstractInterpreter, e::Expr, vtypes::Union{VarTable,Nothing}, +function abstract_eval_new_opaque_closure(interp::AbstractInterpreter, e::Expr, sstate::StatementState, sv::AbsIntState) 𝕃ᵢ = typeinf_lattice(interp) rt = Union{} effects = Effects() # TODO if length(e.args) >= 5 ea = e.args - argtypes = collect_argtypes(interp, ea, vtypes, sv) + argtypes = collect_argtypes(interp, ea, sstate, sv) if argtypes === nothing rt = Bottom effects = EFFECTS_THROWS @@ -3073,7 +3079,7 @@ function abstract_eval_new_opaque_closure(interp::AbstractInterpreter, e::Expr, argtypes = most_general_argtypes(rt) pushfirst!(argtypes, rt.env) callinfo = abstract_call_opaque_closure(interp, rt, - ArgInfo(nothing, argtypes), StmtInfo(true), sv, #=check=#false)::Future + ArgInfo(nothing, argtypes), StmtInfo(true, false), sv, #=check=#false)::Future Future{Any}(callinfo, interp, sv) do callinfo, interp, sv sv.stmt_info[sv.currpc] = OpaqueClosureCreateInfo(callinfo) nothing @@ -3084,10 +3090,10 @@ function abstract_eval_new_opaque_closure(interp::AbstractInterpreter, e::Expr, return Future(RTEffects(rt, Any, effects)) end -function abstract_eval_copyast(interp::AbstractInterpreter, e::Expr, vtypes::Union{VarTable,Nothing}, +function abstract_eval_copyast(interp::AbstractInterpreter, e::Expr, sstate::StatementState, sv::AbsIntState) effects = EFFECTS_UNKNOWN - rt = abstract_eval_value(interp, e.args[1], vtypes, sv) + rt = abstract_eval_value(interp, e.args[1], sstate, sv) if rt isa Const && rt.val isa Expr # `copyast` makes copies of Exprs rt = Expr @@ -3095,11 +3101,11 @@ function abstract_eval_copyast(interp::AbstractInterpreter, e::Expr, vtypes::Uni return RTEffects(rt, Any, effects) end -function abstract_eval_isdefined_expr(interp::AbstractInterpreter, e::Expr, vtypes::Union{VarTable,Nothing}, +function abstract_eval_isdefined_expr(interp::AbstractInterpreter, e::Expr, sstate::StatementState, sv::AbsIntState) sym = e.args[1] - if isa(sym, SlotNumber) && vtypes !== nothing - vtyp = vtypes[slot_id(sym)] + if isa(sym, SlotNumber) && sstate.vtypes !== nothing + vtyp = sstate.vtypes[slot_id(sym)] if vtyp.typ === Bottom rt = Const(false) # never assigned previously elseif !vtyp.undef @@ -3109,16 +3115,16 @@ function abstract_eval_isdefined_expr(interp::AbstractInterpreter, e::Expr, vtyp end return RTEffects(rt, Union{}, EFFECTS_TOTAL) end - return abstract_eval_isdefined(interp, sym, sv) + return abstract_eval_isdefined(interp, sym, sstate.saw_latestworld, sv) end -function abstract_eval_isdefined(interp::AbstractInterpreter, @nospecialize(sym), sv::AbsIntState) +function abstract_eval_isdefined(interp::AbstractInterpreter, @nospecialize(sym), saw_latestworld::Bool, sv::AbsIntState) rt = Bool effects = EFFECTS_TOTAL exct = Union{} isa(sym, Symbol) && (sym = GlobalRef(frame_module(sv), sym)) if isa(sym, GlobalRef) - rte = abstract_eval_globalref(interp, sym, sv) + rte = abstract_eval_globalref(interp, sym, saw_latestworld, sv) if rte.exct == Union{} rt = Const(true) elseif rte.rt === Union{} && rte.exct === UndefVarError @@ -3142,8 +3148,8 @@ function abstract_eval_isdefined(interp::AbstractInterpreter, @nospecialize(sym) return RTEffects(rt, exct, effects) end -function abstract_eval_throw_undef_if_not(interp::AbstractInterpreter, e::Expr, vtypes::Union{VarTable,Nothing}, sv::AbsIntState) - condt = abstract_eval_value(interp, e.args[2], vtypes, sv) +function abstract_eval_throw_undef_if_not(interp::AbstractInterpreter, e::Expr, sstate::StatementState, sv::AbsIntState) + condt = abstract_eval_value(interp, e.args[2], sstate, sv) condval = maybe_extract_const_bool(condt) rt = Nothing exct = UndefVarError @@ -3183,32 +3189,32 @@ function abstract_eval_static_parameter(::AbstractInterpreter, e::Expr, sv::AbsI return RTEffects(rt, exct, effects) end -function abstract_eval_statement_expr(interp::AbstractInterpreter, e::Expr, vtypes::Union{VarTable,Nothing}, +function abstract_eval_statement_expr(interp::AbstractInterpreter, e::Expr, sstate::StatementState, sv::AbsIntState)::Future{RTEffects} ehead = e.head if ehead === :call - return abstract_eval_call(interp, e, vtypes, sv) + return abstract_eval_call(interp, e, sstate, sv) elseif ehead === :new - return abstract_eval_new(interp, e, vtypes, sv) + return abstract_eval_new(interp, e, sstate, sv) elseif ehead === :splatnew - return abstract_eval_splatnew(interp, e, vtypes, sv) + return abstract_eval_splatnew(interp, e, sstate, sv) elseif ehead === :new_opaque_closure - return abstract_eval_new_opaque_closure(interp, e, vtypes, sv) + return abstract_eval_new_opaque_closure(interp, e, sstate, sv) elseif ehead === :foreigncall - return abstract_eval_foreigncall(interp, e, vtypes, sv) + return abstract_eval_foreigncall(interp, e, sstate, sv) elseif ehead === :cfunction - return abstract_eval_cfunction(interp, e, vtypes, sv) + return abstract_eval_cfunction(interp, e, sstate, sv) elseif ehead === :method rt = (length(e.args) == 1) ? Any : Nothing return RTEffects(rt, Any, EFFECTS_UNKNOWN) elseif ehead === :copyast - return abstract_eval_copyast(interp, e, vtypes, sv) + return abstract_eval_copyast(interp, e, sstate, sv) elseif ehead === :invoke || ehead === :invoke_modify error("type inference data-flow error: tried to double infer a function") elseif ehead === :isdefined - return abstract_eval_isdefined_expr(interp, e, vtypes, sv) + return abstract_eval_isdefined_expr(interp, e, sstate, sv) elseif ehead === :throw_undef_if_not - return abstract_eval_throw_undef_if_not(interp, e, vtypes, sv) + return abstract_eval_throw_undef_if_not(interp, e, sstate, sv) elseif ehead === :boundscheck return RTEffects(Bool, Union{}, Effects(EFFECTS_TOTAL; consistent=ALWAYS_FALSE)) elseif ehead === :the_exception @@ -3245,16 +3251,16 @@ function refine_partial_type(@nospecialize t) return t end -function abstract_eval_foreigncall(interp::AbstractInterpreter, e::Expr, vtypes::Union{VarTable,Nothing}, sv::AbsIntState) +function abstract_eval_foreigncall(interp::AbstractInterpreter, e::Expr, sstate::StatementState, sv::AbsIntState) mi = frame_instance(sv) t = sp_type_rewrap(e.args[2], mi, true) for i = 3:length(e.args) - if abstract_eval_value(interp, e.args[i], vtypes, sv) === Bottom + if abstract_eval_value(interp, e.args[i], sstate, sv) === Bottom return RTEffects(Bottom, Any, EFFECTS_THROWS) end end effects = foreigncall_effects(e) do @nospecialize x - abstract_eval_value(interp, x, vtypes, sv) + abstract_eval_value(interp, x, sstate, sv) end cconv = e.args[5] if isa(cconv, QuoteNode) && (v = cconv.value; isa(v, Tuple{Symbol, UInt16})) @@ -3264,14 +3270,14 @@ function abstract_eval_foreigncall(interp::AbstractInterpreter, e::Expr, vtypes: return RTEffects(t, Any, effects) end -function abstract_eval_phi(interp::AbstractInterpreter, phi::PhiNode, vtypes::Union{VarTable,Nothing}, sv::AbsIntState) +function abstract_eval_phi(interp::AbstractInterpreter, phi::PhiNode, sstate::StatementState, sv::AbsIntState) rt = Union{} for i in 1:length(phi.values) isassigned(phi.values, i) || continue val = phi.values[i] # N.B.: Phi arguments are restricted to not have effects, so we can drop # them here safely. - thisval = abstract_eval_special_value(interp, val, vtypes, sv).rt + thisval = abstract_eval_special_value(interp, val, sstate, sv).rt rt = tmerge(typeinf_lattice(interp), rt, thisval) end return rt @@ -3293,10 +3299,6 @@ function merge_override_effects!(interp::AbstractInterpreter, effects::Effects, return effects end -function abstract_eval_statement(interp::AbstractInterpreter, @nospecialize(e), vtypes::VarTable, sv::InferenceState) - @assert !isa(e, Union{Expr, PhiNode, NewvarNode}) -end - function override_effects(effects::Effects, override::EffectsOverride) return Effects(effects; consistent = override.consistent ? ALWAYS_TRUE : effects.consistent, @@ -3383,7 +3385,10 @@ function abstract_eval_partition_load(interp::AbstractInterpreter, partition::Co return RTEffects(rt, UndefVarError, generic_getglobal_effects) end -function abstract_eval_globalref(interp::AbstractInterpreter, g::GlobalRef, sv::AbsIntState) +function abstract_eval_globalref(interp::AbstractInterpreter, g::GlobalRef, saw_latestworld::Bool, sv::AbsIntState) + if saw_latestworld + return RTEffects(Any, Any, generic_getglobal_effects) + end partition = abstract_eval_binding_partition!(interp, g, sv) ret = abstract_eval_partition_load(interp, partition) if ret.rt !== Union{} && ret.exct === UndefVarError && InferenceParams(interp).assume_bindings_static @@ -3396,7 +3401,10 @@ function abstract_eval_globalref(interp::AbstractInterpreter, g::GlobalRef, sv:: return ret end -function global_assignment_exct(interp::AbstractInterpreter, sv::AbsIntState, g::GlobalRef, @nospecialize(newty)) +function global_assignment_exct(interp::AbstractInterpreter, sv::AbsIntState, saw_latestworld::Bool, g::GlobalRef, @nospecialize(newty)) + if saw_latestworld + return Union{ErrorException, TypeError} + end partition = abstract_eval_binding_partition!(interp, g, sv) return global_assignment_binding_exct(partition, newty) end @@ -3415,9 +3423,9 @@ function global_assignment_binding_exct(partition::Core.BindingPartition, @nospe return Union{} end -function handle_global_assignment!(interp::AbstractInterpreter, frame::InferenceState, lhs::GlobalRef, @nospecialize(newty)) +function handle_global_assignment!(interp::AbstractInterpreter, frame::InferenceState, saw_latestworld::Bool, lhs::GlobalRef, @nospecialize(newty)) effect_free = ALWAYS_FALSE - nothrow = global_assignment_exct(interp, frame, lhs, ignorelimited(newty)) === Union{} + nothrow = global_assignment_exct(interp, frame, saw_latestworld, lhs, ignorelimited(newty)) === Union{} inaccessiblememonly = ALWAYS_FALSE if !nothrow sub_curr_ssaflag!(frame, IR_FLAG_NOTHROW) @@ -3609,7 +3617,8 @@ function handle_control_backedge!(interp::AbstractInterpreter, frame::InferenceS return nothing end -function update_bbstate!(𝕃ᵢ::AbstractLattice, frame::InferenceState, bb::Int, vartable::VarTable) +function update_bbstate!(𝕃ᵢ::AbstractLattice, frame::InferenceState, bb::Int, vartable::VarTable, saw_latestworld::Bool) + frame.bb_saw_latestworld[bb] |= saw_latestworld bbtable = frame.bb_vartables[bb] if bbtable === nothing # if a basic block hasn't been analyzed yet, @@ -3686,14 +3695,14 @@ function update_exc_bestguess!(interp::AbstractInterpreter, @nospecialize(exct), end end -function propagate_to_error_handler!(currstate::VarTable, frame::InferenceState, 𝕃ᵢ::AbstractLattice) +function propagate_to_error_handler!(currstate::VarTable, currsaw_latestworld::Bool, frame::InferenceState, 𝕃ᵢ::AbstractLattice) # If this statement potentially threw, propagate the currstate to the # exception handler, BEFORE applying any state changes. curr_hand = gethandler(frame) if curr_hand !== nothing enter = frame.src.code[curr_hand.enter_idx]::EnterNode exceptbb = block_for_inst(frame.cfg, enter.catch_dest) - if update_bbstate!(𝕃ᵢ, frame, exceptbb, currstate) + if update_bbstate!(𝕃ᵢ, frame, exceptbb, currstate, currsaw_latestworld) push!(frame.ip, exceptbb) end end @@ -3711,9 +3720,10 @@ end struct CurrentState result::Future currstate::VarTable + currsaw_latestworld::Bool bbstart::Int bbend::Int - CurrentState(result::Future, currstate::VarTable, bbstart::Int, bbend::Int) = new(result, currstate, bbstart, bbend) + CurrentState(result::Future, currstate::VarTable, currsaw_latestworld::Bool, bbstart::Int, bbend::Int) = new(result, currstate, currsaw_latestworld, bbstart, bbend) CurrentState() = new() end function typeinf_local(interp::AbstractInterpreter, frame::InferenceState, nextresult::CurrentState) @@ -3724,6 +3734,7 @@ function typeinf_local(interp::AbstractInterpreter, frame::InferenceState, nextr nbbs = length(bbs) 𝕃ᵢ = typeinf_lattice(interp) states = frame.bb_vartables + saw_latestworld = frame.bb_saw_latestworld currbb = frame.currbb currpc = frame.currpc @@ -3732,6 +3743,7 @@ function typeinf_local(interp::AbstractInterpreter, frame::InferenceState, nextr bbstart = nextresult.bbstart bbend = nextresult.bbend currstate = nextresult.currstate + currsaw_latestworld = nextresult.currsaw_latestworld @goto injectresult end @@ -3739,6 +3751,7 @@ function typeinf_local(interp::AbstractInterpreter, frame::InferenceState, nextr currbb = frame.currbb = _bits_findnext(W.bits, 1)::Int # next basic block end currstate = copy(states[currbb]::VarTable) + currsaw_latestworld = saw_latestworld[currbb] while currbb <= nbbs delete!(W, currbb) bbstart = first(bbs[currbb].stmts) @@ -3763,7 +3776,7 @@ function typeinf_local(interp::AbstractInterpreter, frame::InferenceState, nextr elseif isa(stmt, GotoIfNot) condx = stmt.cond condslot = ssa_def_slot(condx, frame) - condt = abstract_eval_value(interp, condx, currstate, frame) + condt = abstract_eval_value(interp, condx, StatementState(currstate, currsaw_latestworld), frame) if condt === Bottom ssavaluetypes[currpc] = Bottom empty!(frame.pclimitations) @@ -3781,7 +3794,7 @@ function typeinf_local(interp::AbstractInterpreter, frame::InferenceState, nextr add_curr_ssaflag!(frame, IR_FLAG_NOTHROW) else update_exc_bestguess!(interp, TypeError, frame) - propagate_to_error_handler!(currstate, frame, 𝕃ᵢ) + propagate_to_error_handler!(currstate, currsaw_latestworld, frame, 𝕃ᵢ) merge_effects!(interp, frame, EFFECTS_THROWS) end @@ -3831,7 +3844,7 @@ function typeinf_local(interp::AbstractInterpreter, frame::InferenceState, nextr if condslot isa SlotNumber # refine the type of this conditional object itself for this else branch stoverwrite1!(elsestate, condition_object_change(currstate, condt, condslot, #=then_or_else=#false)) end - else_changed = update_bbstate!(𝕃ᵢ, frame, falsebb, elsestate) + else_changed = update_bbstate!(𝕃ᵢ, frame, falsebb, elsestate, currsaw_latestworld) then_change = conditional_change(𝕃ᵢ, currstate, condt, #=then_or_else=#true) thenstate = currstate if then_change !== nothing @@ -3841,7 +3854,7 @@ function typeinf_local(interp::AbstractInterpreter, frame::InferenceState, nextr stoverwrite1!(thenstate, condition_object_change(currstate, condt, condslot, #=then_or_else=#true)) end else - else_changed = update_bbstate!(𝕃ᵢ, frame, falsebb, currstate) + else_changed = update_bbstate!(𝕃ᵢ, frame, falsebb, currstate, currsaw_latestworld) end if else_changed handle_control_backedge!(interp, frame, currpc, stmt.dest) @@ -3850,7 +3863,7 @@ function typeinf_local(interp::AbstractInterpreter, frame::InferenceState, nextr @goto fallthrough end elseif isa(stmt, ReturnNode) - rt = abstract_eval_value(interp, stmt.val, currstate, frame) + rt = abstract_eval_value(interp, stmt.val, StatementState(currstate, currsaw_latestworld), frame) if update_bestguess!(interp, frame, currstate, rt) update_cycle_worklists!(frame) do caller::InferenceState, caller_pc::Int # no reason to revisit if that call-site doesn't affect the final result @@ -3863,7 +3876,7 @@ function typeinf_local(interp::AbstractInterpreter, frame::InferenceState, nextr ssavaluetypes[currpc] = Any add_curr_ssaflag!(frame, IR_FLAG_NOTHROW) if isdefined(stmt, :scope) - scopet = abstract_eval_value(interp, stmt.scope, currstate, frame) + scopet = abstract_eval_value(interp, stmt.scope, StatementState(currstate, currsaw_latestworld), frame) handler = gethandler(frame, currpc + 1)::TryCatchFrame @assert handler.scopet !== nothing if !⊑(𝕃ᵢ, scopet, handler.scopet) @@ -3897,7 +3910,7 @@ function typeinf_local(interp::AbstractInterpreter, frame::InferenceState, nextr # the incoming values from all iterations, but `abstract_eval_phi` will only tmerge # over the first and last iterations. By tmerging in the current old_rt, we ensure that # we will not lose an intermediate value. - rt = abstract_eval_phi(interp, stmt, currstate, frame) + rt = abstract_eval_phi(interp, stmt, StatementState(currstate, currsaw_latestworld), frame) old_rt = frame.ssavaluetypes[currpc] rt = old_rt === NOT_FOUND ? rt : tmerge(typeinf_lattice(interp), old_rt, rt) else @@ -3907,7 +3920,7 @@ function typeinf_local(interp::AbstractInterpreter, frame::InferenceState, nextr stmt = stmt.args[2] end if !isa(stmt, Expr) - (; rt, exct, effects, refinements) = abstract_eval_special_value(interp, stmt, currstate, frame) + (; rt, exct, effects, refinements) = abstract_eval_special_value(interp, stmt, StatementState(currstate, currsaw_latestworld), frame) else hd = stmt.head if hd === :method @@ -3919,10 +3932,13 @@ function typeinf_local(interp::AbstractInterpreter, frame::InferenceState, nextr hd !== :boundscheck && # :boundscheck can be narrowed to Bool is_meta_expr(stmt))) rt = Nothing + elseif hd === :latestworld + currsaw_latestworld = true + rt = Nothing else - result = abstract_eval_statement_expr(interp, stmt, currstate, frame)::Future + result = abstract_eval_statement_expr(interp, stmt, StatementState(currstate, currsaw_latestworld), frame)::Future if !isready(result) || !isempty(frame.tasks) - return CurrentState(result, currstate, bbstart, bbend) + return CurrentState(result, currstate, currsaw_latestworld, bbstart, bbend) @label injectresult # reload local variables stmt = frame.src.code[currpc] @@ -3962,7 +3978,7 @@ function typeinf_local(interp::AbstractInterpreter, frame::InferenceState, nextr if isa(lhs, SlotNumber) changes = StateUpdate(lhs, VarState(rt, false)) elseif isa(lhs, GlobalRef) - handle_global_assignment!(interp, frame, lhs, rt) + handle_global_assignment!(interp, frame, currsaw_latestworld, lhs, rt) else merge_effects!(interp, frame, EFFECTS_UNKNOWN) end @@ -3974,7 +3990,7 @@ function typeinf_local(interp::AbstractInterpreter, frame::InferenceState, nextr # TODO: assert that these conditions match. For now, we assume the `nothrow` flag # to be correct, but allow the exct to be an over-approximation. end - propagate_to_error_handler!(currstate, frame, 𝕃ᵢ) + propagate_to_error_handler!(currstate, currsaw_latestworld, frame, 𝕃ᵢ) end if rt === Bottom ssavaluetypes[currpc] = Bottom @@ -4010,7 +4026,7 @@ function typeinf_local(interp::AbstractInterpreter, frame::InferenceState, nextr # Case 2: Directly branch to a different BB begin @label branch - if update_bbstate!(𝕃ᵢ, frame, nextbb, currstate) + if update_bbstate!(𝕃ᵢ, frame, nextbb, currstate, currsaw_latestworld) push!(W, nextbb) end end diff --git a/Compiler/src/inferencestate.jl b/Compiler/src/inferencestate.jl index 0ba37888b34d5..9eb929b725fbf 100644 --- a/Compiler/src/inferencestate.jl +++ b/Compiler/src/inferencestate.jl @@ -209,6 +209,11 @@ to enable flow-sensitive analysis. """ const VarTable = Vector{VarState} +struct StatementState + vtypes::Union{VarTable,Nothing} + saw_latestworld::Bool +end + const CACHE_MODE_NULL = 0x00 # not cached, optimization optional const CACHE_MODE_GLOBAL = 0x01 << 0 # cached globally, optimization required const CACHE_MODE_LOCAL = 0x01 << 1 # cached locally, optimization required @@ -260,6 +265,7 @@ mutable struct InferenceState ssavalue_uses::Vector{BitSet} # ssavalue sparsity and restart info # TODO: Could keep this sparsely by doing structural liveness analysis ahead of time. bb_vartables::Vector{Union{Nothing,VarTable}} # nothing if not analyzed yet + bb_saw_latestworld::Vector{Bool} ssavaluetypes::Vector{Any} edges::Vector{Any} stmt_info::Vector{CallInfo} @@ -320,6 +326,7 @@ mutable struct InferenceState nslots = length(src.slotflags) slottypes = Vector{Any}(undef, nslots) + bb_saw_latestworld = Bool[false for i = 1:length(cfg.blocks)] bb_vartables = Union{Nothing,VarTable}[ nothing for i = 1:length(cfg.blocks) ] bb_vartable1 = bb_vartables[1] = VarTable(undef, nslots) argtypes = result.argtypes @@ -367,7 +374,7 @@ mutable struct InferenceState this = new( mi, WorldWithRange(world, valid_worlds), mod, sptypes, slottypes, src, cfg, spec_info, - currbb, currpc, ip, handler_info, ssavalue_uses, bb_vartables, ssavaluetypes, edges, stmt_info, + currbb, currpc, ip, handler_info, ssavalue_uses, bb_vartables, bb_saw_latestworld, ssavaluetypes, edges, stmt_info, tasks, pclimitations, limitations, cycle_backedges, callstack, parentid, frameid, cycleid, result, unreachable, bestguess, exc_bestguess, ipo_effects, restrict_abstract_call_sites, cache_mode, insert_coverage, diff --git a/Compiler/src/ssair/irinterp.jl b/Compiler/src/ssair/irinterp.jl index 0a8239dc590db..dd5c907d3c25f 100644 --- a/Compiler/src/ssair/irinterp.jl +++ b/Compiler/src/ssair/irinterp.jl @@ -38,7 +38,7 @@ function abstract_eval_invoke_inst(interp::AbstractInterpreter, inst::Instructio mi_cache = WorldView(code_cache(interp), world) code = get(mi_cache, mi, nothing) code === nothing && return Pair{Any,Tuple{Bool,Bool}}(nothing, (false, false)) - argtypes = collect_argtypes(interp, stmt.args[2:end], nothing, irsv) + argtypes = collect_argtypes(interp, stmt.args[2:end], StatementState(nothing, false), irsv) argtypes === nothing && return Pair{Any,Tuple{Bool,Bool}}(Bottom, (false, false)) return concrete_eval_invoke(interp, code, argtypes, irsv) end @@ -46,11 +46,11 @@ end abstract_eval_ssavalue(s::SSAValue, sv::IRInterpretationState) = abstract_eval_ssavalue(s, sv.ir) function abstract_eval_phi_stmt(interp::AbstractInterpreter, phi::PhiNode, ::Int, irsv::IRInterpretationState) - return abstract_eval_phi(interp, phi, nothing, irsv) + return abstract_eval_phi(interp, phi, StatementState(nothing, false), irsv) end -function abstract_call(interp::AbstractInterpreter, arginfo::ArgInfo, irsv::IRInterpretationState) - si = StmtInfo(true) # TODO better job here? +function abstract_call(interp::AbstractInterpreter, arginfo::ArgInfo, sstate::StatementState, irsv::IRInterpretationState) + si = StmtInfo(true, sstate.saw_latestworld) # TODO better job here? call = abstract_call(interp, arginfo, si, irsv)::Future Future{Any}(call, interp, irsv) do call, interp, irsv irsv.ir.stmts[irsv.curridx][:info] = call.info @@ -147,7 +147,7 @@ function reprocess_instruction!(interp::AbstractInterpreter, inst::Instruction, if (head === :call || head === :foreigncall || head === :new || head === :splatnew || head === :static_parameter || head === :isdefined || head === :boundscheck) @assert isempty(irsv.tasks) # TODO: this whole function needs to be converted to a stackless design to be a valid AbsIntState, but this should work here for now - result = abstract_eval_statement_expr(interp, stmt, nothing, irsv) + result = abstract_eval_statement_expr(interp, stmt, StatementState(nothing, false), irsv) reverse!(irsv.tasks) while true if length(irsv.callstack) > irsv.frameid @@ -302,7 +302,7 @@ populate_def_use_map!(tpdum::TwoPhaseDefUseMap, ir::IRCode) = function is_all_const_call(@nospecialize(stmt), interp::AbstractInterpreter, irsv::IRInterpretationState) isexpr(stmt, :call) || return false @inbounds for i = 2:length(stmt.args) - argtype = abstract_eval_value(interp, stmt.args[i], nothing, irsv) + argtype = abstract_eval_value(interp, stmt.args[i], StatementState(nothing, false), irsv) is_const_argtype(argtype) || return false end return true diff --git a/Compiler/src/tfuncs.jl b/Compiler/src/tfuncs.jl index 3b524742b1609..87dad13c50a30 100644 --- a/Compiler/src/tfuncs.jl +++ b/Compiler/src/tfuncs.jl @@ -1426,7 +1426,7 @@ end # as well as compute the info for the method matches op = unwrapva(argtypes[op_argi]) v = unwrapva(argtypes[v_argi]) - callinfo = abstract_call(interp, ArgInfo(nothing, Any[op, TF, v]), StmtInfo(true), sv, #=max_methods=#1) + callinfo = abstract_call(interp, ArgInfo(nothing, Any[op, TF, v]), StmtInfo(true, si.saw_latestworld), sv, #=max_methods=#1) TF = Core.Box(TF) RT = Core.Box(RT) return Future{CallMeta}(callinfo, interp, sv) do callinfo, interp, sv diff --git a/Compiler/src/types.jl b/Compiler/src/types.jl index 35c7880da2281..5669ec3175c9e 100644 --- a/Compiler/src/types.jl +++ b/Compiler/src/types.jl @@ -41,6 +41,7 @@ struct StmtInfo need thus not be computed. """ used::Bool + saw_latestworld::Bool end struct SpecInfo diff --git a/Compiler/src/validation.jl b/Compiler/src/validation.jl index 78db5ef5e4ed8..6700aa8d4508f 100644 --- a/Compiler/src/validation.jl +++ b/Compiler/src/validation.jl @@ -39,6 +39,7 @@ const VALID_EXPR_HEADS = IdDict{Symbol,UnitRange{Int}}( :using => 1:typemax(Int), :export => 1:typemax(Int), :public => 1:typemax(Int), + :latestworld => 0:0, ) # @enum isn't defined yet, otherwise I'd use it for this diff --git a/base/boot.jl b/base/boot.jl index 0df0cde64f8c0..f66ee69780193 100644 --- a/base/boot.jl +++ b/base/boot.jl @@ -259,13 +259,14 @@ else const UInt = UInt32 end -function iterate end function Typeof end ccall(:jl_toplevel_eval_in, Any, (Any, Any), Core, quote (f::typeof(Typeof))(x) = ($(_expr(:meta,:nospecialize,:x)); isa(x,Type) ? Type{x} : typeof(x)) end) +function iterate end + macro nospecialize(x) _expr(:meta, :nospecialize, x) end diff --git a/base/essentials.jl b/base/essentials.jl index efae59b82b5f9..5683120df8d51 100644 --- a/base/essentials.jl +++ b/base/essentials.jl @@ -467,10 +467,18 @@ Evaluate an expression with values interpolated into it using `eval`. If two arguments are provided, the first is the module to evaluate in. """ macro eval(ex) - return Expr(:escape, Expr(:call, GlobalRef(Core, :eval), __module__, Expr(:quote, ex))) + return Expr(:let, Expr(:(=), :eval_local_result, + Expr(:escape, Expr(:call, GlobalRef(Core, :eval), __module__, Expr(:quote, ex)))), + Expr(:block, + Expr(:var"latestworld-if-toplevel"), + :eval_local_result)) end macro eval(mod, ex) - return Expr(:escape, Expr(:call, GlobalRef(Core, :eval), mod, Expr(:quote, ex))) + return Expr(:let, Expr(:(=), :eval_local_result, + Expr(:escape, Expr(:call, GlobalRef(Core, :eval), mod, Expr(:quote, ex)))), + Expr(:block, + Expr(:var"latestworld-if-toplevel"), + :eval_local_result)) end # use `@eval` here to directly form `:new` expressions avoid implicit `convert`s diff --git a/base/sysimg.jl b/base/sysimg.jl index 476b9715b7e11..e57ec1c99bfe6 100644 --- a/base/sysimg.jl +++ b/base/sysimg.jl @@ -39,6 +39,13 @@ actually evaluates `mapexpr(expr)`. If it is omitted, `mapexpr` defaults to [`i Use [`Base.include`](@ref) to evaluate a file into another module. +!!! note + Julia's syntax lowering recognizes an explicit call to a literal `include` + at top-level and inserts an implicit `@Core.latestworld` to make any include'd + definitions visible to subsequent code. Note however that this recognition + is *syntactic*. I.e. assigning `const myinclude = include` may require + and explicit `@Core.latestworld` call after `myinclude`. + !!! compat "Julia 1.5" Julia 1.5 is required for passing the `mapexpr` argument. """ diff --git a/base/tuple.jl b/base/tuple.jl index 3791d74bfc698..ee3174d783531 100644 --- a/base/tuple.jl +++ b/base/tuple.jl @@ -60,7 +60,7 @@ end function _setindex(v, i::Integer, args::Vararg{Any,N}) where {N} @inline - return ntuple(j -> ifelse(j == i, v, args[j]), Val{N}()) + return ntuple(j -> ifelse(j == i, v, args[j]), Val{N}())::NTuple{N, Any} end diff --git a/src/codegen.cpp b/src/codegen.cpp index 968dab0f00430..85d791052484c 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -6721,6 +6721,18 @@ static std::pair get_oc_function(jl_codectx_t &ctx, jl_met return std::make_pair(F, specF); } +static void emit_latestworld(jl_codectx_t &ctx) +{ + auto world_age_field = get_tls_world_age_field(ctx); + LoadInst *world = ctx.builder.CreateAlignedLoad(ctx.types().T_size, + prepare_global_in(jl_Module, jlgetworld_global), ctx.types().alignof_ptr, + /*isVolatile*/false); + world->setOrdering(AtomicOrdering::Acquire); + StoreInst *store_world = ctx.builder.CreateAlignedStore(world, world_age_field, + ctx.types().alignof_ptr, /*isVolatile*/false); + (void)store_world; +} + // `expr` is not clobbered in JL_TRY JL_GCC_IGNORE_START("-Wclobbered") static jl_cgval_t emit_expr(jl_codectx_t &ctx, jl_value_t *expr, ssize_t ssaidx_0based) @@ -7141,6 +7153,10 @@ static jl_cgval_t emit_expr(jl_codectx_t &ctx, jl_value_t *expr, ssize_t ssaidx_ ctx.builder.CreateCall(prepare_call(gc_preserve_end_func), {token.V}); return jl_cgval_t((jl_value_t*)jl_nothing_type); } + else if (head == jl_latestworld_sym && !jl_is_method(ctx.linfo->def.method)) { + emit_latestworld(ctx); + return jl_cgval_t((jl_value_t*)jl_nothing_type); + } else { if (jl_is_toplevel_only_expr(expr) && !jl_is_method(ctx.linfo->def.method)) { @@ -9568,7 +9584,9 @@ static jl_llvm_functions_t } mallocVisitStmt(sync_bytes, have_dbg_update); - if (toplevel || ctx.is_opaque_closure) + // N.B.: For toplevel thunks, we expect world age restore to be handled + // by the interpreter which invokes us. + if (ctx.is_opaque_closure) ctx.builder.CreateStore(last_age, world_age_field); assert(type_is_ghost(retty) || returninfo.cc == jl_returninfo_t::SRet || retval->getType() == ctx.f->getReturnType()); @@ -9933,17 +9951,6 @@ static jl_llvm_functions_t I.setDebugLoc(topdebugloc); } } - if (toplevel && !ctx.is_opaque_closure && !in_prologue) { - // we're at toplevel; insert an atomic barrier between every instruction - // TODO: inference is invalid if this has any effect (which it often does) - LoadInst *world = new LoadInst(ctx.types().T_size, - prepare_global_in(jl_Module, jlgetworld_global), Twine(), - /*isVolatile*/false, ctx.types().alignof_ptr, /*insertBefore*/&I); - world->setOrdering(AtomicOrdering::Acquire); - StoreInst *store_world = new StoreInst(world, world_age_field, - /*isVolatile*/false, ctx.types().alignof_ptr, /*insertBefore*/&I); - (void)store_world; - } } if (&I == &prologue_end) in_prologue = false; diff --git a/src/interpreter.c b/src/interpreter.c index 252049ad2db6d..cf2ae1a0d9f44 100644 --- a/src/interpreter.c +++ b/src/interpreter.c @@ -463,8 +463,6 @@ static jl_value_t *eval_body(jl_array_t *stmts, interpreter_state *s, size_t ip, s->ip = ip; if (ip >= ns) jl_error("`body` expression must terminate in `return`. Use `block` instead."); - if (toplevel) - ct->world_age = jl_atomic_load_acquire(&jl_world_counter); jl_value_t *stmt = jl_array_ptr_ref(stmts, ip); assert(!jl_is_phinode(stmt)); size_t next_ip = ip + 1; diff --git a/src/jlfrontend.scm b/src/jlfrontend.scm index 808af18ebfdbd..3d46940d6fcbb 100644 --- a/src/jlfrontend.scm +++ b/src/jlfrontend.scm @@ -139,7 +139,7 @@ (define (toplevel-only-expr? e) (and (pair? e) - (or (memq (car e) '(toplevel line module import using export public + (or (memq (car e) '(toplevel line module export public error incomplete)) (and (memq (car e) '(global const)) (every symbol? (cdr e)))))) diff --git a/src/julia-syntax.scm b/src/julia-syntax.scm index 6a9558bb06ba5..72e97da3c2daa 100644 --- a/src/julia-syntax.scm +++ b/src/julia-syntax.scm @@ -1038,6 +1038,7 @@ '()))))) (call (core _typebody!) ,name (call (core svec) ,@(insert-struct-shim field-types name))) (const (globalref (thismodule) ,name) ,name) + (latestworld) (null))) ;; "inner" constructors (scope-block @@ -1087,6 +1088,7 @@ (call (core _equiv_typedef) (globalref (thismodule) ,name) ,name)) (null) (const (globalref (thismodule) ,name) ,name)) + (latestworld) (null)))))) (define (primitive-type-def-expr n name params super) @@ -1107,6 +1109,7 @@ (call (core _equiv_typedef) (globalref (thismodule) ,name) ,name)) (null) (const (globalref (thismodule) ,name) ,name)) + (latestworld) (null)))))) ;; take apart a type signature, e.g. T{X} <: S{Y} @@ -2744,6 +2747,9 @@ ((and (eq? (identifier-name f) '^) (length= e 4) (integer? (cadddr e))) (expand-forms `(call (top literal_pow) ,f ,(caddr e) (call (call (core apply_type) (top Val) ,(cadddr e)))))) + ((eq? f 'include) + (let ((r (make-ssavalue))) + `(block (= ,r ,(map expand-forms e)) (latestworld-if-toplevel) ,r))) (else (map expand-forms e)))) (map expand-forms e))) @@ -4125,7 +4131,8 @@ f(x) = yt(x) `(lambda ,(cadr lam2) (,(clear-capture-bits (car vis)) ,@(cdr vis)) - ,body))))) + ,body))) + (latestworld))) (else (let* ((exprs (lift-toplevel (convert-lambda lam2 '|#anon| #t '() #f parsed-method-stack))) (top-stmts (cdr exprs)) @@ -4133,7 +4140,8 @@ f(x) = yt(x) `(toplevel-butfirst (block ,@sp-inits (method ,(cadr e) ,(cl-convert sig fname lam namemap defined toplevel interp opaq parsed-method-stack globals locals) - ,(julia-bq-macro newlam))) + ,(julia-bq-macro newlam)) + (latestworld)) ,@top-stmts)))) ;; local case - lift to a new type at top level @@ -4272,7 +4280,8 @@ f(x) = yt(x) `(toplevel-butfirst (null) ,@sp-inits - ,@mk-method) + ,@mk-method + (latestworld)) (begin (put! defined name #t) `(toplevel-butfirst @@ -4280,7 +4289,8 @@ f(x) = yt(x) ,@typedef ,@(map (lambda (v) `(moved-local ,v)) moved-vars) ,@sp-inits - ,@mk-method)))))))) + ,@mk-method + (latestworld))))))))) ((lambda) ;; happens inside (thunk ...) and generated function bodies (for-each (lambda (vi) (vinfo:set-asgn! vi #t)) (list-tail (car (lam:vinfo e)) (length (lam:args e)))) @@ -4513,7 +4523,7 @@ f(x) = yt(x) ((struct_type) "\"struct\" expression") ((method) "method definition") ((set_binding_type!) (string "type declaration for global \"" (deparse (cadr e)) "\"")) - ((latestworld) "World age increment") + ((latestworld) "World age increment") (else (string "\"" h "\" expression")))) (if (not (null? (cadr lam))) (error (string (head-to-text (car e)) " not at top level")))) @@ -4965,7 +4975,12 @@ f(x) = yt(x) (else (emit temp))))) ;; top level expressions - ((thunk module) + ((thunk) + (check-top-level e) + (emit e) + (if tail (emit-return tail '(null))) + '(null)) + ((module) (check-top-level e) (emit e) (if tail (emit-return tail '(null))) @@ -4989,7 +5004,9 @@ f(x) = yt(x) ;; other top level expressions ((import using export public latestworld) (check-top-level e) - (emit e) + (if (not (eq? (car e) 'latestworld)) + (emit e)) + (emit `(latestworld)) (let ((have-ret? (and (pair? code) (pair? (car code)) (eq? (caar code) 'return)))) (if (and tail (not have-ret?)) (emit-return tail '(null)))) diff --git a/src/toplevel.c b/src/toplevel.c index cedc008af5cd0..56a5f21f43661 100644 --- a/src/toplevel.c +++ b/src/toplevel.c @@ -607,8 +607,7 @@ int jl_is_toplevel_only_expr(jl_value_t *e) JL_NOTSAFEPOINT ((jl_expr_t*)e)->head == jl_const_sym || ((jl_expr_t*)e)->head == jl_toplevel_sym || ((jl_expr_t*)e)->head == jl_error_sym || - ((jl_expr_t*)e)->head == jl_incomplete_sym || - ((jl_expr_t*)e)->head == jl_latestworld_sym); + ((jl_expr_t*)e)->head == jl_incomplete_sym); } int jl_needs_lowering(jl_value_t *e) JL_NOTSAFEPOINT @@ -1002,8 +1001,15 @@ JL_DLLEXPORT jl_value_t *jl_toplevel_eval_flex(jl_module_t *JL_NONNULL m, jl_val jl_value_t *res = jl_nothing; int i; for (i = 0; i < jl_array_nrows(ex->args); i++) { - res = jl_toplevel_eval_flex(m, jl_array_ptr_ref(ex->args, i), fast, 0, toplevel_filename, toplevel_lineno); + root = jl_array_ptr_ref(ex->args, i); + if (jl_needs_lowering(root)) { + ct->world_age = jl_atomic_load_acquire(&jl_world_counter); + root = jl_expand_with_loc_warn(root, m, *toplevel_filename, *toplevel_lineno); + } + ct->world_age = jl_atomic_load_acquire(&jl_world_counter); + res = jl_toplevel_eval_flex(m, root, fast, 1, toplevel_filename, toplevel_lineno); } + ct->world_age = last_age; JL_GC_POP(); return res; } @@ -1112,9 +1118,12 @@ JL_DLLEXPORT jl_value_t *jl_toplevel_eval_in(jl_module_t *m, jl_value_t *ex) jl_value_t *v = NULL; int last_lineno = jl_lineno; const char *last_filename = jl_filename; + jl_task_t *ct = jl_current_task; jl_lineno = 1; jl_filename = "none"; + size_t last_age = ct->world_age; JL_TRY { + ct->world_age = jl_atomic_load_relaxed(&jl_world_counter); v = jl_toplevel_eval(m, ex); } JL_CATCH { @@ -1124,6 +1133,7 @@ JL_DLLEXPORT jl_value_t *jl_toplevel_eval_in(jl_module_t *m, jl_value_t *ex) } jl_lineno = last_lineno; jl_filename = last_filename; + ct->world_age = last_age; assert(v); return v; } @@ -1178,6 +1188,7 @@ static jl_value_t *jl_parse_eval_all(jl_module_t *module, jl_value_t *text, int err = 0; JL_TRY { + ct->world_age = jl_atomic_load_acquire(&jl_world_counter); for (size_t i = 0; i < jl_expr_nargs(ast); i++) { expression = jl_exprarg(ast, i); if (jl_is_linenode(expression)) { @@ -1186,9 +1197,10 @@ static jl_value_t *jl_parse_eval_all(jl_module_t *module, jl_value_t *text, jl_lineno = lineno; continue; } + ct->world_age = jl_atomic_load_relaxed(&jl_world_counter); expression = jl_expand_with_loc_warn(expression, module, jl_string_data(filename), lineno); - ct->world_age = jl_atomic_load_acquire(&jl_world_counter); + ct->world_age = jl_atomic_load_relaxed(&jl_world_counter); result = jl_toplevel_eval_flex(module, expression, 1, 1, &filename_str, &lineno); } } diff --git a/stdlib/Logging/test/runtests.jl b/stdlib/Logging/test/runtests.jl index 176860fcdec63..2fedbde557078 100644 --- a/stdlib/Logging/test/runtests.jl +++ b/stdlib/Logging/test/runtests.jl @@ -285,7 +285,7 @@ end AboveMaxLevel === Logging.AboveMaxLevel end """) - @test m.run() + @test invokelatest(m.run) end @testset "custom log macro" begin diff --git a/stdlib/REPL/src/REPL.jl b/stdlib/REPL/src/REPL.jl index e3a58ec362d89..50f610ff3b3e8 100644 --- a/stdlib/REPL/src/REPL.jl +++ b/stdlib/REPL/src/REPL.jl @@ -343,9 +343,9 @@ __repl_entry_eval_expanded_with_loc(mod::Module, @nospecialize(ast), toplevel_fi function toplevel_eval_with_hooks(mod::Module, @nospecialize(ast), toplevel_file=Ref{Ptr{UInt8}}(Base.unsafe_convert(Ptr{UInt8}, :REPL)), toplevel_line=Ref{Cint}(1)) if !isexpr(ast, :toplevel) - ast = __repl_entry_lower_with_loc(mod, ast, toplevel_file, toplevel_line) + ast = invokelatest(__repl_entry_lower_with_loc, mod, ast, toplevel_file, toplevel_line) check_for_missing_packages_and_run_hooks(ast) - return __repl_entry_eval_expanded_with_loc(mod, ast, toplevel_file, toplevel_line) + return invokelatest(__repl_entry_eval_expanded_with_loc, mod, ast, toplevel_file, toplevel_line) end local value=nothing for i = 1:length(ast.args) diff --git a/stdlib/REPL/src/REPLCompletions.jl b/stdlib/REPL/src/REPLCompletions.jl index df3a0cad76878..1f2c0cabbdb38 100644 --- a/stdlib/REPL/src/REPLCompletions.jl +++ b/stdlib/REPL/src/REPLCompletions.jl @@ -630,7 +630,7 @@ end isdefined_globalref(g::GlobalRef) = !iszero(ccall(:jl_globalref_boundp, Cint, (Any,), g)) # aggressive global binding resolution within `repl_frame` -function CC.abstract_eval_globalref(interp::REPLInterpreter, g::GlobalRef, +function CC.abstract_eval_globalref(interp::REPLInterpreter, g::GlobalRef, bailed::Bool, sv::CC.InferenceState) if (interp.limit_aggressive_inference ? is_repl_frame(sv) : is_call_graph_uncached(sv)) if isdefined_globalref(g) @@ -638,7 +638,7 @@ function CC.abstract_eval_globalref(interp::REPLInterpreter, g::GlobalRef, end return CC.RTEffects(Union{}, UndefVarError, CC.EFFECTS_THROWS) end - return @invoke CC.abstract_eval_globalref(interp::CC.AbstractInterpreter, g::GlobalRef, + return @invoke CC.abstract_eval_globalref(interp::CC.AbstractInterpreter, g::GlobalRef, bailed::Bool, sv::CC.InferenceState) end diff --git a/stdlib/REPL/test/replcompletions.jl b/stdlib/REPL/test/replcompletions.jl index 1355f74c9bfff..b259567884486 100644 --- a/stdlib/REPL/test/replcompletions.jl +++ b/stdlib/REPL/test/replcompletions.jl @@ -12,150 +12,151 @@ using REPL end end -let ex = quote - module CompletionFoo - using Random - import Test - - mutable struct Test_y - yy - end - mutable struct Test_x - xx :: Test_y - end - type_test = Test_x(Test_y(1)) - (::Test_y)() = "", "" - unicode_αβγ = Test_y(1) +let ex = + quote + module CompletionFoo + using Random + import Test + + mutable struct Test_y + yy + end + mutable struct Test_x + xx :: Test_y + end + type_test = Test_x(Test_y(1)) + (::Test_y)() = "", "" + unicode_αβγ = Test_y(1) - Base.:(+)(x::Test_x, y::Test_y) = Test_x(Test_y(x.xx.yy + y.yy)) - module CompletionFoo2 + Base.:(+)(x::Test_x, y::Test_y) = Test_x(Test_y(x.xx.yy + y.yy)) + module CompletionFoo2 - end - const bar = 1 - foo() = bar - macro foobar() - :() - end - macro barfoo(ex) - ex - end - macro error_expanding() - error("cannot expand @error_expanding") - :() - end - macro error_lowering_conditional(a) - if isa(a, Number) - return a end - throw(AssertionError("Not a Number")) - :() - end - macro error_throwing() - return quote - error("@error_throwing throws an error") + const bar = 1 + foo() = bar + macro foobar() + :() + end + macro barfoo(ex) + ex + end + macro error_expanding() + error("cannot expand @error_expanding") + :() + end + macro error_lowering_conditional(a) + if isa(a, Number) + return a + end + throw(AssertionError("Not a Number")) + :() + end + macro error_throwing() + return quote + error("@error_throwing throws an error") + end end - end - primitive type NonStruct 8 end - Base.propertynames(::NonStruct) = (:a, :b, :c) - x = reinterpret(NonStruct, 0x00) + primitive type NonStruct 8 end + Base.propertynames(::NonStruct) = (:a, :b, :c) + x = reinterpret(NonStruct, 0x00) - # Support non-Dict AbstractDicts, #19441 - mutable struct CustomDict{K, V} <: AbstractDict{K, V} - mydict::Dict{K, V} - end + # Support non-Dict AbstractDicts, #19441 + mutable struct CustomDict{K, V} <: AbstractDict{K, V} + mydict::Dict{K, V} + end - Base.keys(d::CustomDict) = collect(keys(d.mydict)) - Base.length(d::CustomDict) = length(d.mydict) + Base.keys(d::CustomDict) = collect(keys(d.mydict)) + Base.length(d::CustomDict) = length(d.mydict) - # Support AbstractDict with unknown length, #55931 - struct NoLengthDict{K,V} <: AbstractDict{K,V} - dict::Dict{K,V} - NoLengthDict{K,V}() where {K,V} = new(Dict{K,V}()) - end - Base.iterate(d::NoLengthDict, s...) = iterate(d.dict, s...) - Base.IteratorSize(::Type{<:NoLengthDict}) = Base.SizeUnknown() - Base.eltype(::Type{NoLengthDict{K,V}}) where {K,V} = Pair{K,V} - Base.setindex!(d::NoLengthDict, v, k) = d.dict[k] = v - - test(x::T, y::T) where {T<:Real} = pass - test(x::Real, y::Real) = pass - test(x::AbstractArray{T}, y) where {T<:Real} = pass - test(args...) = pass - - test1(x::Type{Float64}) = pass - - test2(x::AbstractString) = pass - test2(x::Char) = pass - test2(x::Cmd) = pass - - test3(x::AbstractArray{Int}, y::Int) = pass - test3(x::AbstractArray{Float64}, y::Float64) = pass - - test4(x::AbstractString, y::AbstractString) = pass - test4(x::AbstractString, y::Regex) = pass - - test5(x::Array{Bool,1}) = pass - test5(x::BitArray{1}) = pass - test5(x::Float64) = pass - const a=x->x - test6()=[a, a] - test7() = rand(Bool) ? 1 : 1.0 - test8() = Any[1][1] - test9(x::Char) = pass - test9(x::Char, i::Int) = pass - - test10(a, x::Int...) = pass - test10(a::Integer, b::Integer, c) = pass - test10(a, y::Bool...) = pass - test10(a, d::Integer, z::Signed...) = pass - test10(s::String...) = pass - - test11(a::Integer, b, c) = pass - test11(u, v::Integer, w) = pass - test11(x::Int, y::Int, z) = pass - test11(_, _, s::String) = pass - - test!12() = pass - - kwtest(; x=1, y=2, w...) = pass - kwtest2(a; x=1, y=2, w...) = pass - kwtest3(a::Number; length, len2, foobar, kwargs...) = pass - kwtest3(a::Real; another!kwarg, len2) = pass - kwtest3(a::Integer; namedarg, foobar, slurp...) = pass - kwtest4(a::AbstractString; _a1b, x23) = pass - kwtest4(a::String; _a1b, xαβγ) = pass - kwtest4(a::SubString; x23, _something) = pass - kwtest5(a::Int, b, x...; somekwarg, somekotherkwarg) = pass - kwtest5(a::Char, b; xyz) = pass - - const named = (; len2=3) - const fmsoebelkv = (; len2=3) - - array = [1, 1] - varfloat = 0.1 - - const tuple = (1, 2) - - test_y_array=[(@__MODULE__).Test_y(rand()) for i in 1:10] - test_dict = Dict("abc"=>1, "abcd"=>10, :bar=>2, :bar2=>9, Base=>3, - occursin=>4, `ls`=>5, 66=>7, 67=>8, ("q",3)=>11, - "α"=>12, :α=>13) - test_customdict = CustomDict(test_dict) - - macro teststr_str(s) end - macro tϵsτstρ_str(s) end - macro testcmd_cmd(s) end - macro tϵsτcmδ_cmd(s) end - - var"complicated symbol with spaces" = 5 - - struct WeirdNames end - Base.propertynames(::WeirdNames) = (Symbol("oh no!"), Symbol("oh yes!")) - - # https://github.com/JuliaLang/julia/issues/52551#issuecomment-1858543413 - export exported_symbol - exported_symbol(::WeirdNames) = nothing + # Support AbstractDict with unknown length, #55931 + struct NoLengthDict{K,V} <: AbstractDict{K,V} + dict::Dict{K,V} + NoLengthDict{K,V}() where {K,V} = new(Dict{K,V}()) + end + Base.iterate(d::NoLengthDict, s...) = iterate(d.dict, s...) + Base.IteratorSize(::Type{<:NoLengthDict}) = Base.SizeUnknown() + Base.eltype(::Type{NoLengthDict{K,V}}) where {K,V} = Pair{K,V} + Base.setindex!(d::NoLengthDict, v, k) = d.dict[k] = v + + test(x::T, y::T) where {T<:Real} = pass + test(x::Real, y::Real) = pass + test(x::AbstractArray{T}, y) where {T<:Real} = pass + test(args...) = pass + + test1(x::Type{Float64}) = pass + + test2(x::AbstractString) = pass + test2(x::Char) = pass + test2(x::Cmd) = pass + + test3(x::AbstractArray{Int}, y::Int) = pass + test3(x::AbstractArray{Float64}, y::Float64) = pass + + test4(x::AbstractString, y::AbstractString) = pass + test4(x::AbstractString, y::Regex) = pass + + test5(x::Array{Bool,1}) = pass + test5(x::BitArray{1}) = pass + test5(x::Float64) = pass + const a=x->x + test6()=[a, a] + test7() = rand(Bool) ? 1 : 1.0 + test8() = Any[1][1] + test9(x::Char) = pass + test9(x::Char, i::Int) = pass + + test10(a, x::Int...) = pass + test10(a::Integer, b::Integer, c) = pass + test10(a, y::Bool...) = pass + test10(a, d::Integer, z::Signed...) = pass + test10(s::String...) = pass + + test11(a::Integer, b, c) = pass + test11(u, v::Integer, w) = pass + test11(x::Int, y::Int, z) = pass + test11(_, _, s::String) = pass + + test!12() = pass + + kwtest(; x=1, y=2, w...) = pass + kwtest2(a; x=1, y=2, w...) = pass + kwtest3(a::Number; length, len2, foobar, kwargs...) = pass + kwtest3(a::Real; another!kwarg, len2) = pass + kwtest3(a::Integer; namedarg, foobar, slurp...) = pass + kwtest4(a::AbstractString; _a1b, x23) = pass + kwtest4(a::String; _a1b, xαβγ) = pass + kwtest4(a::SubString; x23, _something) = pass + kwtest5(a::Int, b, x...; somekwarg, somekotherkwarg) = pass + kwtest5(a::Char, b; xyz) = pass + + const named = (; len2=3) + const fmsoebelkv = (; len2=3) + + array = [1, 1] + varfloat = 0.1 + + const tuple = (1, 2) + + test_y_array=[(@__MODULE__).Test_y(rand()) for i in 1:10] + test_dict = Dict("abc"=>1, "abcd"=>10, :bar=>2, :bar2=>9, Base=>3, + occursin=>4, `ls`=>5, 66=>7, 67=>8, ("q",3)=>11, + "α"=>12, :α=>13) + test_customdict = CustomDict(test_dict) + + macro teststr_str(s) end + macro tϵsτstρ_str(s) end + macro testcmd_cmd(s) end + macro tϵsτcmδ_cmd(s) end + + var"complicated symbol with spaces" = 5 + + struct WeirdNames end + Base.propertynames(::WeirdNames) = (Symbol("oh no!"), Symbol("oh yes!")) + + # https://github.com/JuliaLang/julia/issues/52551#issuecomment-1858543413 + export exported_symbol + exported_symbol(::WeirdNames) = nothing end # module CompletionFoo test_repl_comp_dict = CompletionFoo.test_dict diff --git a/stdlib/Serialization/test/runtests.jl b/stdlib/Serialization/test/runtests.jl index a7d5023e1ec51..4d9b439e639d7 100644 --- a/stdlib/Serialization/test/runtests.jl +++ b/stdlib/Serialization/test/runtests.jl @@ -577,7 +577,7 @@ let io = IOBuffer() serialize(io, f) seekstart(io) f2 = deserialize(io) - @test f2(1) === 1f0 + @test invokelatest(f2, 1) === 1f0 end # using a filename; #30151 @@ -595,7 +595,7 @@ let f_data f_data = "N0pMBwAAAAA0MxMAAAAAAAAAAAEFIyM1IzYiAAAAABBYH04BBE1haW6bRCIAAAAAIgAAAABNTEy+AQIjNRUAI78jAQAAAAAAAAAfTgEETWFpbkQBAiM1AQdSRVBMWzJdvxBTH04BBE1haW6bRAMAAAAzLAAARkYiAAAAAE7BTBsVRsEWA1YkH04BBE1haW5EAQEqwCXAFgNWJB9OAQRNYWluRJ0ovyXBFgFVKMAVAAbBAQAAAAEAAAABAAAATsEVRr80EAEMTGluZUluZm9Ob2RlH04BBE1haW6bRB9OAQRNYWluRAECIzUBB1JFUExbMl2/vhW+FcEAAAAVRsGifX5MTExMTsEp" end f = deserialize(IOBuffer(base64decode(f_data))) - @test f(10,3) == 23 + @test invokelatest(f, 10,3) == 23 end # issue #33466, IdDict diff --git a/stdlib/Test/src/Test.jl b/stdlib/Test/src/Test.jl index 1b9505c59e327..7c985828d47f2 100644 --- a/stdlib/Test/src/Test.jl +++ b/stdlib/Test/src/Test.jl @@ -1563,6 +1563,13 @@ parent test set (with the context object appended to any failing tests.) !!! compat "Julia 1.10" Multiple `let` assignments are supported since Julia 1.10. +# Special implicit world age increment for `@testset begin` + +World age inside `@testset begin` increments implicitly after every statement. +This matches the behavior of ordinary toplevel code, but not that of ordinary +`begin/end` blocks, i.e. with respect to world age, `@testset begin` behaves +as if the body of the `begin/end` block was written at toplevel. + ## Examples ```jldoctest julia> @testset let logi = log(im) @@ -1657,6 +1664,21 @@ function testset_context(args, ex, source) return esc(ex) end +function insert_toplevel_latestworld(@nospecialize(tests)) + isa(tests, Expr) || return tests + (tests.head !== :block) && return tests + ret = Expr(:block) + for arg in tests.args + push!(ret.args, arg) + if isa(arg, LineNumberNode) || + (isa(arg, Expr) && arg.head in (:latestworld, :var"latestworld-if-toplevel")) + continue + end + push!(ret.args, Expr(:var"latestworld-if-toplevel")) + end + return ret +end + """ Generate the code for a `@testset` with a function call or `begin`/`end` argument """ @@ -1675,6 +1697,8 @@ function testset_beginend_call(args, tests, source) testsettype = :(get_testset_depth() == 0 ? DefaultTestSet : typeof(get_testset())) end + tests = insert_toplevel_latestworld(tests) + # Generate a block of code that initializes a new testset, adds # it to the task local storage, evaluates the test(s), before # finally removing the testset and giving it a chance to take diff --git a/test/arrayops.jl b/test/arrayops.jl index 49d51176dcf71..ca378c3f3036b 100644 --- a/test/arrayops.jl +++ b/test/arrayops.jl @@ -2667,31 +2667,32 @@ end end @testset "sign, conj[!], ~" begin - local A, B, C, D, E - A = [-10,0,3] - B = [-10.0,0.0,3.0] - C = [1,im,0] - - @test sign.(A) == [-1,0,1] - @test sign.(B) == [-1,0,1] - @test typeof(sign.(A)) == Vector{Int} - @test typeof(sign.(B)) == Vector{Float64} - - @test conj(A) == A - @test conj!(copy(A)) == A - @test conj(B) == A - @test conj(C) == [1,-im,0] - @test typeof(conj(A)) == Vector{Int} - @test typeof(conj(B)) == Vector{Float64} - @test typeof(conj(C)) == Vector{Complex{Int}} - D = [C copy(C); copy(C) copy(C)] - @test conj(D) == conj!(copy(D)) - E = [D, copy(D)] - @test conj(E) == conj!(copy(E)) - @test (@allocations conj!(E)) == 0 - - @test .~A == [9,-1,-4] - @test typeof(.~A) == Vector{Int} + let A, B, C, D, E # Suppress :latestworld to get good inference for the allocations test + A = [-10,0,3] + B = [-10.0,0.0,3.0] + C = [1,im,0] + + @test sign.(A) == [-1,0,1] + @test sign.(B) == [-1,0,1] + @test typeof(sign.(A)) == Vector{Int} + @test typeof(sign.(B)) == Vector{Float64} + + @test conj(A) == A + @test conj!(copy(A)) == A + @test conj(B) == A + @test conj(C) == [1,-im,0] + @test typeof(conj(A)) == Vector{Int} + @test typeof(conj(B)) == Vector{Float64} + @test typeof(conj(C)) == Vector{Complex{Int}} + D = [C copy(C); copy(C) copy(C)] + @test conj(D) == conj!(copy(D)) + E = [D, copy(D)] + @test conj(E) == conj!(copy(E)) + @test (@allocations conj!(E)) == 0 + + @test .~A == [9,-1,-4] + @test typeof(.~A) == Vector{Int} + end end # @inbounds is expression-like, returning its value; #15558 diff --git a/test/core.jl b/test/core.jl index 1b36db466ce19..836532d661638 100644 --- a/test/core.jl +++ b/test/core.jl @@ -2621,7 +2621,7 @@ end # issue #8338 let ex = Expr(:(=), :(f8338(x;y=4)), :(x*y)) eval(ex) - @test f8338(2) == 8 + @test invokelatest(f8338, 2) == 8 end # call overloading (#2403) @@ -8332,3 +8332,23 @@ let s = mktemp() do path, io end @test strip(s) == "xxx = 42" end + +# `module` has an implicit world-age increment +let foo = eval(Expr(:toplevel, :(module BarModuleInc; struct FooModuleInc; end; end), :(BarModuleInc.FooModuleInc()))) + @Core.latestworld + @test foo == BarModuleInc.FooModuleInc() +end + +let + eval(:(module BarModuleInc2; module BazModuleInc; struct FooModuleInc; end; end; const foo = BazModuleInc.FooModuleInc(); end)) + @Core.latestworld + @test BarModuleInc2.foo == BarModuleInc2.BazModuleInc.FooModuleInc() +end + +# `toplevel` has implicit world age increment between expansion and evaluation +macro define_call(sym) + Core.eval(__module__, :($sym() = 1)) + :($sym()) +end +@test eval(Expr(:toplevel, :(@define_call(f_macro_defined1)))) == 1 +@test @define_call(f_macro_defined2) == 1 diff --git a/test/deprecation_exec.jl b/test/deprecation_exec.jl index 61ffcc2a59ac6..8209b0e920a18 100644 --- a/test/deprecation_exec.jl +++ b/test/deprecation_exec.jl @@ -68,6 +68,7 @@ begin # @deprecate ex = :(module M22845; import ..DeprecationTests: bar; bar(x::Number) = x + 3; end) @test_warn "importing deprecated binding" eval(ex) + @Core.latestworld @test @test_nowarn(DeprecationTests.bar(4)) == 7 @test @test_warn "`f1` is deprecated, use `f` instead." f1() diff --git a/test/error.jl b/test/error.jl index 8657c70720779..f76a7809b08a9 100644 --- a/test/error.jl +++ b/test/error.jl @@ -93,7 +93,7 @@ end @testset "MethodError for methods without line numbers" begin try eval(Expr(:function, :(f44319()), 0)) - f44319(1) + @invokelatest f44319() catch e s = sprint(showerror, e) @test s == """MethodError: no method matching f44319(::Int$(Sys.WORD_SIZE)) diff --git a/test/math.jl b/test/math.jl index 5a9f3248e59f4..d794facb02d25 100644 --- a/test/math.jl +++ b/test/math.jl @@ -23,44 +23,46 @@ has_fma = Dict( ) @testset "clamp" begin - @test clamp(0, 1, 3) == 1 - @test clamp(1, 1, 3) == 1 - @test clamp(2, 1, 3) == 2 - @test clamp(3, 1, 3) == 3 - @test clamp(4, 1, 3) == 3 - - @test clamp(0.0, 1, 3) == 1.0 - @test clamp(1.0, 1, 3) == 1.0 - @test clamp(2.0, 1, 3) == 2.0 - @test clamp(3.0, 1, 3) == 3.0 - @test clamp(4.0, 1, 3) == 3.0 - - @test clamp.([0, 1, 2, 3, 4], 1.0, 3.0) == [1.0, 1.0, 2.0, 3.0, 3.0] - @test clamp.([0 1; 2 3], 1.0, 3.0) == [1.0 1.0; 2.0 3.0] - - @test clamp(-200, Int8) === typemin(Int8) - @test clamp(100, Int8) === Int8(100) - @test clamp(200, Int8) === typemax(Int8) - - begin - x = [0.0, 1.0, 2.0, 3.0, 4.0] - clamp!(x, 1, 3) - @test x == [1.0, 1.0, 2.0, 3.0, 3.0] - end + let + @test clamp(0, 1, 3) == 1 + @test clamp(1, 1, 3) == 1 + @test clamp(2, 1, 3) == 2 + @test clamp(3, 1, 3) == 3 + @test clamp(4, 1, 3) == 3 + + @test clamp(0.0, 1, 3) == 1.0 + @test clamp(1.0, 1, 3) == 1.0 + @test clamp(2.0, 1, 3) == 2.0 + @test clamp(3.0, 1, 3) == 3.0 + @test clamp(4.0, 1, 3) == 3.0 + + @test clamp.([0, 1, 2, 3, 4], 1.0, 3.0) == [1.0, 1.0, 2.0, 3.0, 3.0] + @test clamp.([0 1; 2 3], 1.0, 3.0) == [1.0 1.0; 2.0 3.0] + + @test clamp(-200, Int8) === typemin(Int8) + @test clamp(100, Int8) === Int8(100) + @test clamp(200, Int8) === typemax(Int8) + + begin + x = [0.0, 1.0, 2.0, 3.0, 4.0] + clamp!(x, 1, 3) + @test x == [1.0, 1.0, 2.0, 3.0, 3.0] + end - @test clamp(typemax(UInt64), Int64) === typemax(Int64) - @test clamp(typemin(Int), UInt64) === typemin(UInt64) - @test clamp(Int16(-1), UInt16) === UInt16(0) - @test clamp(-1, 2, UInt(0)) === UInt(2) - @test clamp(typemax(UInt16), Int16) === Int16(32767) + @test clamp(typemax(UInt64), Int64) === typemax(Int64) + @test clamp(typemin(Int), UInt64) === typemin(UInt64) + @test clamp(Int16(-1), UInt16) === UInt16(0) + @test clamp(-1, 2, UInt(0)) === UInt(2) + @test clamp(typemax(UInt16), Int16) === Int16(32767) - # clamp should not allocate a BigInt for typemax(Int16) - x = big(2) ^ 100 - @test (@allocated clamp(x, Int16)) == 0 + # clamp should not allocate a BigInt for typemax(Int16) + x = big(2) ^ 100 + @test (@allocated clamp(x, Int16)) == 0 - x = clamp(2.0, BigInt) - @test x isa BigInt - @test x == big(2) + x = clamp(2.0, BigInt) + @test x isa BigInt + @test x == big(2) + end end @testset "constants" begin @@ -1608,8 +1610,10 @@ function f44336() @inline hypot(as...) end @testset "Issue #44336" begin - f44336() - @test (@allocated f44336()) == 0 + let + f44336() + @test (@allocated f44336()) == 0 + end end @testset "constant-foldability of core math functions" begin diff --git a/test/ranges.jl b/test/ranges.jl index d79851d7056e0..89134be897ddd 100644 --- a/test/ranges.jl +++ b/test/ranges.jl @@ -2058,8 +2058,10 @@ end end @testset "allocation of TwicePrecision call" begin - @test @allocated(0:286.493442:360) == 0 - @test @allocated(0:286:360) == 0 + let + @test @allocated(0:286.493442:360) == 0 + @test @allocated(0:286:360) == 0 + end end @testset "range with start and stop" begin diff --git a/test/sorting.jl b/test/sorting.jl index 93e0cdd7de5ba..f12486b9c9b40 100644 --- a/test/sorting.jl +++ b/test/sorting.jl @@ -968,9 +968,10 @@ end end @testset "ScratchQuickSort allocations on non-concrete eltype" begin - v = Vector{Union{Nothing, Bool}}(rand(Bool, 10000)) - @test 10 > @allocations sort(v) - @test 10 > @allocations sort(v; alg=Base.Sort.ScratchQuickSort()) + let v = Vector{Union{Nothing, Bool}}(rand(Bool, 10000)) + @test 10 > @allocations sort(v) + @test 10 > @allocations sort(v; alg=Base.Sort.ScratchQuickSort()) + end # it would be nice if these numbers were lower (1 or 2), but these # test that we don't have O(n) allocations due to type instability end diff --git a/test/syntax.jl b/test/syntax.jl index 1f9d1d592931b..d9d311ac6615d 100644 --- a/test/syntax.jl +++ b/test/syntax.jl @@ -553,7 +553,14 @@ for (str, tag) in Dict("" => :none, "\"" => :string, "#=" => :comment, "'" => :c end # meta nodes for optional positional arguments -let src = Meta.lower(Main, :(@inline f(p::Int=2) = 3)).args[1].code[end-2].args[3] +let code = Meta.lower(Main, :(@inline f(p::Int=2) = 3)).args[1].code + local src + for i = length(code):-1:1 + if Meta.isexpr(code[i], :method) + src = code[i].args[3] + break + end + end @test Core.Compiler.is_declared_inline(src) end @@ -578,6 +585,7 @@ let thismodule = @__MODULE__, @test isa(ex, Expr) @test !isdefined(M16096, :foo16096) local_foo16096 = Core.eval(@__MODULE__, ex) + Core.@latestworld @test local_foo16096(2.0) == 1 @test !@isdefined foo16096 @test !@isdefined it @@ -3102,6 +3110,7 @@ end ex = Expr(:block) ex.args = fill!(Vector{Any}(undef, 700000), 1) f = eval(Expr(:function, :(), ex)) + @Core.latestworld @test f() == 1 ex = Expr(:vcat) ex.args = fill!(Vector{Any}(undef, 600000), 1) @@ -4010,3 +4019,17 @@ end @test f45494() === (0,) @test_throws "\"esc(...)\" used outside of macro expansion" eval(esc(:(const x=1))) + +# Inner function declaration world age +function create_inner_f_no_methods() + function inner_f end +end +@test isa(create_inner_f_no_methods(), Function) +@test length(methods(create_inner_f_no_methods())) == 0 + +function create_inner_f_one_method() + inner_f() = 1 +end +@test isa(create_inner_f_no_methods(), Function) +@test length(methods(create_inner_f_no_methods())) == 0 +@test Base.invoke_in_world(first(methods(create_inner_f_one_method)).primary_world, create_inner_f_one_method()) == 1 From 4709b6c48e79f6226e6dbee1b49bf7e563058ff7 Mon Sep 17 00:00:00 2001 From: Jishnu Bhattacharya Date: Thu, 21 Nov 2024 15:43:24 +0530 Subject: [PATCH 476/537] `copytrito!` for triangular matrices (#56620) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This does two things: 1. Forward `copytrito!` for triangular matrices to the parent in case the specified `uplo` corresponds to the stored part. This works because these matrices share their elements with the parents for the stored part. 2. Make `copytrito!` only copy the diagonal if the `uplo` corresponds to the non-stored part. This makes `copytrito!` involving a triangular matrix equivalent to that involving its parent if the filled part is copied, and O(N) otherwise. Examples of improvements in performance: ```julia julia> using LinearAlgebra julia> A1 = UpperTriangular(rand(400,400)); julia> A2 = similar(A1); julia> @btime copytrito!($A2, $A1, 'U'); 70.753 μs (0 allocations: 0 bytes) # nightly v"1.12.0-DEV.1657" 26.143 μs (0 allocations: 0 bytes) # this PR julia> @btime copytrito!(parent($A2), $A1, 'U'); 56.025 μs (0 allocations: 0 bytes) # nightly 26.633 μs (0 allocations: 0 bytes) # this PR ``` --- stdlib/LinearAlgebra/src/generic.jl | 13 ++++++----- stdlib/LinearAlgebra/src/triangular.jl | 30 +++++++++++++++++++++++++ stdlib/LinearAlgebra/test/triangular.jl | 20 +++++++++++++++++ 3 files changed, 58 insertions(+), 5 deletions(-) diff --git a/stdlib/LinearAlgebra/src/generic.jl b/stdlib/LinearAlgebra/src/generic.jl index 666ad631f919a..2b03b24932c80 100644 --- a/stdlib/LinearAlgebra/src/generic.jl +++ b/stdlib/LinearAlgebra/src/generic.jl @@ -2069,17 +2069,20 @@ function copytrito!(B::AbstractMatrix, A::AbstractMatrix, uplo::AbstractChar) require_one_based_indexing(A, B) BLAS.chkuplo(uplo) m,n = size(A) - m1,n1 = size(B) A = Base.unalias(B, A) if uplo == 'U' - LAPACK.lacpy_size_check((m1, n1), (n < m ? n : m, n)) + LAPACK.lacpy_size_check(size(B), (n < m ? n : m, n)) for j in axes(A,2), i in axes(A,1)[begin : min(j,end)] - @inbounds B[i,j] = A[i,j] + # extract the parents for UpperTriangular matrices + Bv, Av = uppertridata(B), uppertridata(A) + @inbounds Bv[i,j] = Av[i,j] end else # uplo == 'L' - LAPACK.lacpy_size_check((m1, n1), (m, m < n ? m : n)) + LAPACK.lacpy_size_check(size(B), (m, m < n ? m : n)) for j in axes(A,2), i in axes(A,1)[j:end] - @inbounds B[i,j] = A[i,j] + # extract the parents for LowerTriangular matrices + Bv, Av = lowertridata(B), lowertridata(A) + @inbounds Bv[i,j] = Av[i,j] end end return B diff --git a/stdlib/LinearAlgebra/src/triangular.jl b/stdlib/LinearAlgebra/src/triangular.jl index 76d97133de796..b602e08256afc 100644 --- a/stdlib/LinearAlgebra/src/triangular.jl +++ b/stdlib/LinearAlgebra/src/triangular.jl @@ -633,6 +633,36 @@ end return dest end +Base.@constprop :aggressive function copytrito_triangular!(Bdata, Adata, uplo, uplomatch, sz) + if uplomatch + copytrito!(Bdata, Adata, uplo) + else + BLAS.chkuplo(uplo) + LAPACK.lacpy_size_check(size(Bdata), sz) + # only the diagonal is copied in this case + copyto!(diagview(Bdata), diagview(Adata)) + end + return Bdata +end + +function copytrito!(B::UpperTriangular, A::UpperTriangular, uplo::AbstractChar) + m,n = size(A) + copytrito_triangular!(B.data, A.data, uplo, uplo == 'U', (m, m < n ? m : n)) + return B +end +function copytrito!(B::LowerTriangular, A::LowerTriangular, uplo::AbstractChar) + m,n = size(A) + copytrito_triangular!(B.data, A.data, uplo, uplo == 'L', (n < m ? n : m, n)) + return B +end + +uppertridata(A) = A +lowertridata(A) = A +# we restrict these specializations only to strided matrices to avoid cases where an UpperTriangular type +# doesn't share its indexing with the parent +uppertridata(A::UpperTriangular{<:Any, <:StridedMatrix}) = parent(A) +lowertridata(A::LowerTriangular{<:Any, <:StridedMatrix}) = parent(A) + @inline _rscale_add!(A::AbstractTriangular, B::AbstractTriangular, C::Number, alpha::Number, beta::Number) = @stable_muladdmul _triscale!(A, B, C, MulAddMul(alpha, beta)) @inline _lscale_add!(A::AbstractTriangular, B::Number, C::AbstractTriangular, alpha::Number, beta::Number) = diff --git a/stdlib/LinearAlgebra/test/triangular.jl b/stdlib/LinearAlgebra/test/triangular.jl index 2c8dd4db7fc2b..e69c86cc93663 100644 --- a/stdlib/LinearAlgebra/test/triangular.jl +++ b/stdlib/LinearAlgebra/test/triangular.jl @@ -1396,4 +1396,24 @@ end @test exp(log(M)) ≈ M end +@testset "copytrito!" begin + for T in (UpperTriangular, LowerTriangular) + M = Matrix{BigFloat}(undef, 2, 2) + M[1,1] = M[2,2] = 3 + U = T(M) + isupper = U isa UpperTriangular + M[1+!isupper, 1+isupper] = 4 + uplo, loup = U isa UpperTriangular ? ('U', 'L') : ('L', 'U' ) + @test copytrito!(similar(U), U, uplo) == U + @test copytrito!(zero(M), U, uplo) == U + @test copytrito!(similar(U), Array(U), uplo) == U + @test copytrito!(zero(U), U, loup) == Diagonal(U) + @test copytrito!(similar(U), MyTriangular(U), uplo) == U + @test copytrito!(zero(M), MyTriangular(U), uplo) == U + Ubig = T(similar(M, (3,3))) + copytrito!(Ubig, U, uplo) + @test Ubig[axes(U)...] == U + end +end + end # module TestTriangular From 33e69e5a13197b4f5d8e86b54abef0213d9dc721 Mon Sep 17 00:00:00 2001 From: Shuhei Kadowaki <40514306+aviatesk@users.noreply.github.com> Date: Thu, 21 Nov 2024 20:09:04 +0900 Subject: [PATCH 477/537] use `Base.@show` for the Compiler.jl standard library (#56616) Since `Base.@show` is much useful than `Base.Compiler.@show`. --- Compiler/src/Compiler.jl | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/Compiler/src/Compiler.jl b/Compiler/src/Compiler.jl index b648fd3f295eb..2cf7e5508196c 100644 --- a/Compiler/src/Compiler.jl +++ b/Compiler/src/Compiler.jl @@ -137,9 +137,7 @@ if length(ARGS) > 2 && ARGS[2] === "--buildsettings" end end -if false - import Base: Base, @show -else +if !isdefined(Base, :end_base_include) macro show(ex...) blk = Expr(:block) for s in ex @@ -149,6 +147,8 @@ else isempty(ex) || push!(blk.args, :value) blk end +else + using Base: @show end include("cicache.jl") From 0ded536dcb6e6d6c8c0104c22d4db8d9283377d5 Mon Sep 17 00:00:00 2001 From: DilumAluthgeBot <43731525+DilumAluthgeBot@users.noreply.github.com> Date: Thu, 21 Nov 2024 07:04:15 -0500 Subject: [PATCH 478/537] =?UTF-8?q?=F0=9F=A4=96=20[master]=20Bump=20the=20?= =?UTF-8?q?Pkg=20stdlib=20from=209f8e11a4c=20to=207b759d7f0=20(#56631)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Stdlib: Pkg URL: https://github.com/JuliaLang/Pkg.jl.git Stdlib branch: master Julia branch: master Old commit: 9f8e11a4c New commit: 7b759d7f0 Julia version: 1.12.0-DEV Pkg version: 1.12.0 Bump invoked by: @IanButterworth Powered by: [BumpStdlibs.jl](https://github.com/JuliaLang/BumpStdlibs.jl) Diff: https://github.com/JuliaLang/Pkg.jl/compare/9f8e11a4c0efb3b68a1e25a33f372f398c89cd66...7b759d7f0af56c5ad01f2289bbad71284a556970 ``` $ git log --oneline 9f8e11a4c..7b759d7f0 7b759d7f0 Automatically upgrade empty manifest files to v2 format (#4091) 69c6de019 Remove duplicated word "different" (#4088) 87a4a9172 Actually switch to "Resolving Deltas" (#4080) ef844e32f Update CHANGELOG.md: link to [sources] PR (#4084) e10883ce5 REPLExt: check for compliant repl mode during repl init (#4067) ``` Co-authored-by: Dilum Aluthge --- .../Pkg-7b759d7f0af56c5ad01f2289bbad71284a556970.tar.gz/md5 | 1 + .../Pkg-7b759d7f0af56c5ad01f2289bbad71284a556970.tar.gz/sha512 | 1 + .../Pkg-9f8e11a4c0efb3b68a1e25a33f372f398c89cd66.tar.gz/md5 | 1 - .../Pkg-9f8e11a4c0efb3b68a1e25a33f372f398c89cd66.tar.gz/sha512 | 1 - stdlib/Pkg.version | 2 +- 5 files changed, 3 insertions(+), 3 deletions(-) create mode 100644 deps/checksums/Pkg-7b759d7f0af56c5ad01f2289bbad71284a556970.tar.gz/md5 create mode 100644 deps/checksums/Pkg-7b759d7f0af56c5ad01f2289bbad71284a556970.tar.gz/sha512 delete mode 100644 deps/checksums/Pkg-9f8e11a4c0efb3b68a1e25a33f372f398c89cd66.tar.gz/md5 delete mode 100644 deps/checksums/Pkg-9f8e11a4c0efb3b68a1e25a33f372f398c89cd66.tar.gz/sha512 diff --git a/deps/checksums/Pkg-7b759d7f0af56c5ad01f2289bbad71284a556970.tar.gz/md5 b/deps/checksums/Pkg-7b759d7f0af56c5ad01f2289bbad71284a556970.tar.gz/md5 new file mode 100644 index 0000000000000..e55e74562d717 --- /dev/null +++ b/deps/checksums/Pkg-7b759d7f0af56c5ad01f2289bbad71284a556970.tar.gz/md5 @@ -0,0 +1 @@ +20d63322fc5b547d4c2464c27e9a6a0e diff --git a/deps/checksums/Pkg-7b759d7f0af56c5ad01f2289bbad71284a556970.tar.gz/sha512 b/deps/checksums/Pkg-7b759d7f0af56c5ad01f2289bbad71284a556970.tar.gz/sha512 new file mode 100644 index 0000000000000..5094dddb8142a --- /dev/null +++ b/deps/checksums/Pkg-7b759d7f0af56c5ad01f2289bbad71284a556970.tar.gz/sha512 @@ -0,0 +1 @@ +93dd178af474c76cce9368416d34570b66cc44c7c311e4dc14569d3f9deed70afcae8a2b1976535ed0732ed305c6d8d1b0ef04cbeeaa3af2891e97650d51467d diff --git a/deps/checksums/Pkg-9f8e11a4c0efb3b68a1e25a33f372f398c89cd66.tar.gz/md5 b/deps/checksums/Pkg-9f8e11a4c0efb3b68a1e25a33f372f398c89cd66.tar.gz/md5 deleted file mode 100644 index 1a0000a9d806e..0000000000000 --- a/deps/checksums/Pkg-9f8e11a4c0efb3b68a1e25a33f372f398c89cd66.tar.gz/md5 +++ /dev/null @@ -1 +0,0 @@ -f8a63ab3677f5df71a93d6d0a1f6333d diff --git a/deps/checksums/Pkg-9f8e11a4c0efb3b68a1e25a33f372f398c89cd66.tar.gz/sha512 b/deps/checksums/Pkg-9f8e11a4c0efb3b68a1e25a33f372f398c89cd66.tar.gz/sha512 deleted file mode 100644 index 99020c2fa7a32..0000000000000 --- a/deps/checksums/Pkg-9f8e11a4c0efb3b68a1e25a33f372f398c89cd66.tar.gz/sha512 +++ /dev/null @@ -1 +0,0 @@ -3351c068974d2520a8f8fa9030d90c73cce69c87feae95c6ac6f166d3970a8096ed443280bef80b3409238a988aaea98f267bbec8978ad79594cedb0d59a37e5 diff --git a/stdlib/Pkg.version b/stdlib/Pkg.version index 32c6a094005f9..8b40c45c4366f 100644 --- a/stdlib/Pkg.version +++ b/stdlib/Pkg.version @@ -1,4 +1,4 @@ PKG_BRANCH = master -PKG_SHA1 = 9f8e11a4c0efb3b68a1e25a33f372f398c89cd66 +PKG_SHA1 = 7b759d7f0af56c5ad01f2289bbad71284a556970 PKG_GIT_URL := https://github.com/JuliaLang/Pkg.jl.git PKG_TAR_URL = https://api.github.com/repos/JuliaLang/Pkg.jl/tarball/$1 From 859c25aee08aec13c4b0b52590a2f91d8eb94c3e Mon Sep 17 00:00:00 2001 From: Shuhei Kadowaki <40514306+aviatesk@users.noreply.github.com> Date: Thu, 21 Nov 2024 22:57:20 +0900 Subject: [PATCH 479/537] inference: refine `setglobal!` rt for invalid assignments (#56622) --- Compiler/src/abstractinterpretation.jl | 34 ++++++++++++++------------ Compiler/test/inference.jl | 9 +++++++ 2 files changed, 28 insertions(+), 15 deletions(-) diff --git a/Compiler/src/abstractinterpretation.jl b/Compiler/src/abstractinterpretation.jl index 9a5b19709e697..ef08183ee59dd 100644 --- a/Compiler/src/abstractinterpretation.jl +++ b/Compiler/src/abstractinterpretation.jl @@ -2419,8 +2419,8 @@ function abstract_eval_setglobal!(interp::AbstractInterpreter, sv::AbsIntState, if isa(M, Const) && isa(s, Const) M, s = M.val, s.val if M isa Module && s isa Symbol - exct = global_assignment_exct(interp, sv, saw_latestworld, GlobalRef(M, s), v) - return CallMeta(v, exct, Effects(setglobal!_effects, nothrow=exct===Bottom), NoCallInfo()) + rt, exct = global_assignment_rt_exct(interp, sv, saw_latestworld, GlobalRef(M, s), v) + return CallMeta(rt, exct, Effects(setglobal!_effects, nothrow=exct===Bottom), NoCallInfo()) end return CallMeta(Union{}, TypeError, EFFECTS_THROWS, NoCallInfo()) end @@ -2485,7 +2485,7 @@ function abstract_eval_replaceglobal!(interp::AbstractInterpreter, sv::AbsIntSta if binding_kind(partition) == BINDING_KIND_GLOBAL T = partition_restriction(partition) end - exct = Union{rte.exct, global_assignment_binding_exct(partition, v)} + exct = Union{rte.exct, global_assignment_binding_rt_exct(interp, partition, v)[2]} effects = merge_effects(rte.effects, Effects(setglobal!_effects, nothrow=exct===Bottom)) sg = CallMeta(Any, exct, effects, NoCallInfo()) else @@ -3401,31 +3401,35 @@ function abstract_eval_globalref(interp::AbstractInterpreter, g::GlobalRef, saw_ return ret end -function global_assignment_exct(interp::AbstractInterpreter, sv::AbsIntState, saw_latestworld::Bool, g::GlobalRef, @nospecialize(newty)) +function global_assignment_rt_exct(interp::AbstractInterpreter, sv::AbsIntState, saw_latestworld::Bool, g::GlobalRef, @nospecialize(newty)) if saw_latestworld - return Union{ErrorException, TypeError} + return Pair{Any,Any}(newty, Union{ErrorException, TypeError}) end partition = abstract_eval_binding_partition!(interp, g, sv) - return global_assignment_binding_exct(partition, newty) + return global_assignment_binding_rt_exct(interp, partition, newty) end -function global_assignment_binding_exct(partition::Core.BindingPartition, @nospecialize(newty)) +function global_assignment_binding_rt_exct(interp::AbstractInterpreter, partition::Core.BindingPartition, @nospecialize(newty)) kind = binding_kind(partition) - if is_some_guard(kind) || is_some_const_binding(kind) - return ErrorException + if is_some_guard(kind) + return Pair{Any,Any}(newty, ErrorException) + elseif is_some_const_binding(kind) + return Pair{Any,Any}(Bottom, ErrorException) end - ty = partition_restriction(partition) - if !(widenconst(newty) <: ty) - return TypeError + wnewty = widenconst(newty) + if !hasintersect(wnewty, ty) + return Pair{Any,Any}(Bottom, TypeError) + elseif !(wnewty <: ty) + retty = tmeet(typeinf_lattice(interp), newty, ty) + return Pair{Any,Any}(retty, TypeError) end - - return Union{} + return Pair{Any,Any}(newty, Bottom) end function handle_global_assignment!(interp::AbstractInterpreter, frame::InferenceState, saw_latestworld::Bool, lhs::GlobalRef, @nospecialize(newty)) effect_free = ALWAYS_FALSE - nothrow = global_assignment_exct(interp, frame, saw_latestworld, lhs, ignorelimited(newty)) === Union{} + nothrow = global_assignment_rt_exct(interp, frame, saw_latestworld, lhs, ignorelimited(newty))[2] === Union{} inaccessiblememonly = ALWAYS_FALSE if !nothrow sub_curr_ssaflag!(frame, IR_FLAG_NOTHROW) diff --git a/Compiler/test/inference.jl b/Compiler/test/inference.jl index e6bbf05caeabe..b8c869d737510 100644 --- a/Compiler/test/inference.jl +++ b/Compiler/test/inference.jl @@ -6088,3 +6088,12 @@ function issue56387(nt::NamedTuple, field::Symbol=:a) types[index] end @test Base.infer_return_type(issue56387, (typeof((;a=1)),)) == Type{Int} + +global setglobal!_refine::Int +@test Base.infer_return_type((Integer,)) do x + setglobal!(@__MODULE__, :setglobal!_refine, x) +end === Int +global setglobal!_must_throw::Int = 42 +@test Base.infer_return_type((String,)) do x + setglobal!(@__MODULE__, :setglobal!_must_throw, x) +end === Union{} From c31710a7d93c84b0e1f79c7d9c7ba7bca948ba10 Mon Sep 17 00:00:00 2001 From: Jameson Nash Date: Thu, 21 Nov 2024 09:10:18 -0500 Subject: [PATCH 480/537] Make Expr(:invoke) target be a CodeInstance, not MethodInstance (#54899) This changes our IR representation to use a CodeInstance directly as the invoke function target to specify the ABI in its entirety, instead of just the MethodInstance (specifically for the rettype). That allows removing the lookup call at that point to decide upon the ABI. It is based around the idea that eventually we now keep track of these anyways to form a graph of the inferred edge data, for use later in validation anyways (instead of attempting to invert the backedges graph in staticdata_utils.c), so we might as well use the same target type for the :invoke call representation also now. --- Compiler/src/abstractinterpretation.jl | 11 ++--- Compiler/src/ssair/EscapeAnalysis.jl | 5 ++- Compiler/src/ssair/inlining.jl | 57 +++++++++++++++----------- Compiler/src/ssair/irinterp.jl | 16 +++++--- Compiler/src/ssair/passes.jl | 12 +++--- Compiler/src/ssair/show.jl | 8 +++- Compiler/src/typeinfer.jl | 21 +++++++--- Compiler/test/inline.jl | 16 ++++---- Compiler/test/irutils.jl | 2 +- base/essentials.jl | 2 + base/reflection.jl | 17 +------- src/aotcompile.cpp | 13 +++--- src/cgutils.cpp | 3 +- src/codegen-stubs.c | 2 +- src/codegen.cpp | 34 ++++++--------- src/gf.c | 2 +- src/init.c | 16 +++++++- src/interpreter.c | 5 ++- src/julia.h | 4 -- src/julia_internal.h | 3 +- src/opaque_closure.c | 5 ++- src/precompile_utils.c | 5 ++- stdlib/REPL/src/precompile.jl | 36 ++++------------ 23 files changed, 151 insertions(+), 144 deletions(-) diff --git a/Compiler/src/abstractinterpretation.jl b/Compiler/src/abstractinterpretation.jl index ef08183ee59dd..64181f685e665 100644 --- a/Compiler/src/abstractinterpretation.jl +++ b/Compiler/src/abstractinterpretation.jl @@ -323,11 +323,11 @@ function abstract_call_gf_by_type(interp::AbstractInterpreter, @nospecialize(fun if mi === nothing || !const_prop_methodinstance_heuristic(interp, mi, arginfo, sv) csig = get_compileable_sig(method, sig, match.sparams) if csig !== nothing && (!seenall || csig !== sig) # corresponds to whether the first look already looked at this, so repeating abstract_call_method is not useful + #println(sig, " changed to ", csig, " for ", method) sp_ = ccall(:jl_type_intersection_with_env, Any, (Any, Any), csig, method.sig)::SimpleVector - if match.sparams === sp_[2] - mresult = abstract_call_method(interp, method, csig, match.sparams, multiple_matches, StmtInfo(false, false), sv)::Future - isready(mresult) || return false # wait for mresult Future to resolve off the callstack before continuing - end + sparams = sp_[2]::SimpleVector + mresult = abstract_call_method(interp, method, csig, sparams, multiple_matches, StmtInfo(false, false), sv)::Future + isready(mresult) || return false # wait for mresult Future to resolve off the callstack before continuing end end end @@ -1365,7 +1365,8 @@ function const_prop_call(interp::AbstractInterpreter, pop!(callstack) return nothing end - inf_result.ci_as_edge = codeinst_as_edge(interp, frame) + existing_edge = result.edge + inf_result.ci_as_edge = codeinst_as_edge(interp, frame, existing_edge) @assert frame.frameid != 0 && frame.cycleid == frame.frameid @assert frame.parentid == sv.frameid @assert inf_result.result !== nothing diff --git a/Compiler/src/ssair/EscapeAnalysis.jl b/Compiler/src/ssair/EscapeAnalysis.jl index a8c450f5bb9e0..47a7840628bb5 100644 --- a/Compiler/src/ssair/EscapeAnalysis.jl +++ b/Compiler/src/ssair/EscapeAnalysis.jl @@ -1068,7 +1068,10 @@ end # escape statically-resolved call, i.e. `Expr(:invoke, ::MethodInstance, ...)` function escape_invoke!(astate::AnalysisState, pc::Int, args::Vector{Any}) - mi = first(args)::MethodInstance + mi = first(args) + if !(mi isa MethodInstance) + mi = (mi::CodeInstance).def # COMBAK get escape info directly from CI instead? + end first_idx, last_idx = 2, length(args) add_liveness_changes!(astate, pc, args, first_idx, last_idx) # TODO inspect `astate.ir.stmts[pc][:info]` and use const-prop'ed `InferenceResult` if available diff --git a/Compiler/src/ssair/inlining.jl b/Compiler/src/ssair/inlining.jl index 02b58b518a72a..0c0d14bf8f25a 100644 --- a/Compiler/src/ssair/inlining.jl +++ b/Compiler/src/ssair/inlining.jl @@ -38,7 +38,7 @@ struct SomeCase end struct InvokeCase - invoke::MethodInstance + invoke::Union{CodeInstance,MethodInstance} effects::Effects info::CallInfo end @@ -764,8 +764,9 @@ function rewrite_apply_exprargs!(todo::Vector{Pair{Int,Any}}, return new_argtypes end -function compileable_specialization(mi::MethodInstance, effects::Effects, +function compileable_specialization(code::Union{MethodInstance,CodeInstance}, effects::Effects, et::InliningEdgeTracker, @nospecialize(info::CallInfo), state::InliningState) + mi = code isa CodeInstance ? code.def : code mi_invoke = mi method, atype, sparams = mi.def::Method, mi.specTypes, mi.sparam_vals if OptimizationParams(state.interp).compilesig_invokes @@ -773,10 +774,10 @@ function compileable_specialization(mi::MethodInstance, effects::Effects, new_atype === nothing && return nothing if atype !== new_atype sp_ = ccall(:jl_type_intersection_with_env, Any, (Any, Any), new_atype, method.sig)::SimpleVector - if sparams === sp_[2]::SimpleVector - mi_invoke = specialize_method(method, new_atype, sparams) - mi_invoke === nothing && return nothing - end + sparams = sp_[2]::SimpleVector + mi_invoke = specialize_method(method, new_atype, sparams) + mi_invoke === nothing && return nothing + code = mi_invoke end else # If this caller does not want us to optimize calls to use their @@ -786,8 +787,15 @@ function compileable_specialization(mi::MethodInstance, effects::Effects, return nothing end end - add_inlining_edge!(et, mi_invoke) # to the dispatch lookup - return InvokeCase(mi_invoke, effects, info) + # prefer using a CodeInstance gotten from the cache, since that is where the invoke target should get compiled to normally + # TODO: can this code be gotten directly from inference sometimes? + code = get(code_cache(state), mi_invoke, nothing) + if !isa(code, CodeInstance) + #println("missing code for ", mi_invoke, " for ", mi) + code = mi_invoke + end + add_inlining_edge!(et, code) # to the code and edges + return InvokeCase(code, effects, info) end struct InferredResult @@ -844,18 +852,18 @@ function resolve_todo(mi::MethodInstance, result::Union{Nothing,InferenceResult, src = @atomic :monotonic inferred_result.inferred effects = decode_effects(inferred_result.ipo_purity_bits) edge = inferred_result - else # there is no cached source available, bail out + else # there is no cached source available for this, but there might be code for the compilation sig return compileable_specialization(mi, Effects(), et, info, state) end # the duplicated check might have been done already within `analyze_method!`, but still # we need it here too since we may come here directly using a constant-prop' result if !OptimizationParams(state.interp).inlining || is_stmt_noinline(flag) - return compileable_specialization(edge.def, effects, et, info, state) + return compileable_specialization(edge, effects, et, info, state) end src_inlining_policy(state.interp, src, info, flag) || - return compileable_specialization(edge.def, effects, et, info, state) + return compileable_specialization(edge, effects, et, info, state) add_inlining_edge!(et, edge) if inferred_result isa CodeInstance @@ -1423,7 +1431,8 @@ end function semiconcrete_result_item(result::SemiConcreteResult, @nospecialize(info::CallInfo), flag::UInt32, state::InliningState) - mi = result.edge.def + code = result.edge + mi = code.def et = InliningEdgeTracker(state) if (!OptimizationParams(state.interp).inlining || is_stmt_noinline(flag) || @@ -1431,10 +1440,10 @@ function semiconcrete_result_item(result::SemiConcreteResult, # a `@noinline`-declared method when it's marked as `@constprop :aggressive`. # Suppress the inlining here (unless inlining is requested at the callsite). (is_declared_noinline(mi.def::Method) && !is_stmt_inline(flag))) - return compileable_specialization(mi, result.effects, et, info, state) + return compileable_specialization(code, result.effects, et, info, state) end src_inlining_policy(state.interp, result.ir, info, flag) || - return compileable_specialization(mi, result.effects, et, info, state) + return compileable_specialization(code, result.effects, et, info, state) add_inlining_edge!(et, result.edge) preserve_local_sources = OptimizationParams(state.interp).preserve_local_sources @@ -1466,7 +1475,7 @@ may_inline_concrete_result(result::ConcreteResult) = function concrete_result_item(result::ConcreteResult, @nospecialize(info::CallInfo), state::InliningState) if !may_inline_concrete_result(result) et = InliningEdgeTracker(state) - return compileable_specialization(result.edge.def, result.effects, et, info, state) + return compileable_specialization(result.edge, result.effects, et, info, state) end @assert result.effects === EFFECTS_TOTAL return ConstantCase(quoted(result.result), result.edge) @@ -1522,11 +1531,7 @@ function handle_modifyop!_call!(ir::IRCode, idx::Int, stmt::Expr, info::ModifyOp match = info.results[1]::MethodMatch match.fully_covers || return nothing edge = info.edges[1] - if edge === nothing - edge = specialize_method(match) - else - edge = edge.def - end + edge === nothing && return nothing case = compileable_specialization(edge, Effects(), InliningEdgeTracker(state), info, state) case === nothing && return nothing stmt.head = :invoke_modify @@ -1564,8 +1569,11 @@ function handle_finalizer_call!(ir::IRCode, idx::Int, stmt::Expr, info::Finalize # `Core.Compiler` data structure into the global cache item1 = cases[1].item if isa(item1, InliningTodo) - push!(stmt.args, true) - push!(stmt.args, item1.mi) + code = get(code_cache(state), item1.mi, nothing) # COMBAK: this seems like a bad design, can we use stmt_info instead to store the correct info? + if code isa CodeInstance + push!(stmt.args, true) + push!(stmt.args, code) + end elseif isa(item1, InvokeCase) push!(stmt.args, false) push!(stmt.args, item1.invoke) @@ -1578,7 +1586,10 @@ end function handle_invoke_expr!(todo::Vector{Pair{Int,Any}}, ir::IRCode, idx::Int, stmt::Expr, @nospecialize(info::CallInfo), flag::UInt32, sig::Signature, state::InliningState) - mi = stmt.args[1]::MethodInstance + mi = stmt.args[1] + if !(mi isa MethodInstance) + mi = (mi::CodeInstance).def + end case = resolve_todo(mi, info, flag, state) handle_single_case!(todo, ir, idx, stmt, case, false) return nothing diff --git a/Compiler/src/ssair/irinterp.jl b/Compiler/src/ssair/irinterp.jl index dd5c907d3c25f..e96d27a85bc37 100644 --- a/Compiler/src/ssair/irinterp.jl +++ b/Compiler/src/ssair/irinterp.jl @@ -33,11 +33,15 @@ end function abstract_eval_invoke_inst(interp::AbstractInterpreter, inst::Instruction, irsv::IRInterpretationState) stmt = inst[:stmt] - mi = stmt.args[1]::MethodInstance - world = frame_world(irsv) - mi_cache = WorldView(code_cache(interp), world) - code = get(mi_cache, mi, nothing) - code === nothing && return Pair{Any,Tuple{Bool,Bool}}(nothing, (false, false)) + ci = stmt.args[1] + if ci isa MethodInstance + world = frame_world(irsv) + mi_cache = WorldView(code_cache(interp), world) + code = get(mi_cache, ci, nothing) + code === nothing && return Pair{Any,Tuple{Bool,Bool}}(nothing, (false, false)) + else + code = ci::CodeInstance + end argtypes = collect_argtypes(interp, stmt.args[2:end], StatementState(nothing, false), irsv) argtypes === nothing && return Pair{Any,Tuple{Bool,Bool}}(Bottom, (false, false)) return concrete_eval_invoke(interp, code, argtypes, irsv) @@ -160,7 +164,7 @@ function reprocess_instruction!(interp::AbstractInterpreter, inst::Instruction, result isa Future && (result = result[]) (; rt, effects) = result add_flag!(inst, flags_for_effects(effects)) - elseif head === :invoke + elseif head === :invoke # COMBAK: || head === :invoke_modifyfield (similar to call, but for args[2:end]) rt, (nothrow, noub) = abstract_eval_invoke_inst(interp, inst, irsv) if nothrow add_flag!(inst, IR_FLAG_NOTHROW) diff --git a/Compiler/src/ssair/passes.jl b/Compiler/src/ssair/passes.jl index dad4a09a3e710..e61f3207fc07a 100644 --- a/Compiler/src/ssair/passes.jl +++ b/Compiler/src/ssair/passes.jl @@ -1302,7 +1302,7 @@ function sroa_pass!(ir::IRCode, inlining::Union{Nothing,InliningState}=nothing) # at the end of the intrinsic. Detect that here. if length(stmt.args) == 4 && stmt.args[4] === nothing # constant case - elseif length(stmt.args) == 5 && stmt.args[4] isa Bool && stmt.args[5] isa MethodInstance + elseif length(stmt.args) == 5 && stmt.args[4] isa Bool && stmt.args[5] isa Core.CodeInstance # inlining case else continue @@ -1522,9 +1522,9 @@ end # NOTE we resolve the inlining source here as we don't want to serialize `Core.Compiler` # data structure into the global cache (see the comment in `handle_finalizer_call!`) function try_inline_finalizer!(ir::IRCode, argexprs::Vector{Any}, idx::Int, - mi::MethodInstance, @nospecialize(info::CallInfo), inlining::InliningState, + code::CodeInstance, @nospecialize(info::CallInfo), inlining::InliningState, attach_after::Bool) - code = get(code_cache(inlining), mi, nothing) + mi = code.def et = InliningEdgeTracker(inlining) if code isa CodeInstance if use_const_api(code) @@ -1671,11 +1671,11 @@ function try_resolve_finalizer!(ir::IRCode, alloc_idx::Int, finalizer_idx::Int, if inline === nothing # No code in the function - Nothing to do else - mi = finalizer_stmt.args[5]::MethodInstance - if inline::Bool && try_inline_finalizer!(ir, argexprs, loc, mi, info, inlining, attach_after) + ci = finalizer_stmt.args[5]::CodeInstance + if inline::Bool && try_inline_finalizer!(ir, argexprs, loc, ci, info, inlining, attach_after) # the finalizer body has been inlined else - newinst = add_flag(NewInstruction(Expr(:invoke, mi, argexprs...), Nothing), flag) + newinst = add_flag(NewInstruction(Expr(:invoke, ci, argexprs...), Nothing), flag) insert_node!(ir, loc, newinst, attach_after) end end diff --git a/Compiler/src/ssair/show.jl b/Compiler/src/ssair/show.jl index b9ed220d59453..7d7b182655db7 100644 --- a/Compiler/src/ssair/show.jl +++ b/Compiler/src/ssair/show.jl @@ -92,11 +92,14 @@ function print_stmt(io::IO, idx::Int, @nospecialize(stmt), code::Union{IRCode,Co print(io, ", ") print(io, stmt.typ) print(io, ")") - elseif isexpr(stmt, :invoke) && length(stmt.args) >= 2 && isa(stmt.args[1], MethodInstance) + elseif isexpr(stmt, :invoke) && length(stmt.args) >= 2 && isa(stmt.args[1], Union{MethodInstance,CodeInstance}) stmt = stmt::Expr # TODO: why is this here, and not in Base.show_unquoted printstyled(io, " invoke "; color = :light_black) - mi = stmt.args[1]::Core.MethodInstance + mi = stmt.args[1] + if !(mi isa Core.MethodInstance) + mi = (mi::Core.CodeInstance).def + end show_unquoted(io, stmt.args[2], indent) print(io, "(") # XXX: this is wrong if `sig` is not a concretetype method @@ -110,6 +113,7 @@ function print_stmt(io::IO, idx::Int, @nospecialize(stmt), code::Union{IRCode,Co end join(io, (print_arg(i) for i = 3:length(stmt.args)), ", ") print(io, ")") + # TODO: if we have a CodeInstance, should we print that rettype info here, which may differ (wider or narrower than the ssavaluetypes) elseif isexpr(stmt, :call) && length(stmt.args) >= 1 && label_dynamic_calls ft = maybe_argextype(stmt.args[1], code, sptypes) f = singleton_type(ft) diff --git a/Compiler/src/typeinfer.jl b/Compiler/src/typeinfer.jl index 544c5d5739795..83ec0271ea474 100644 --- a/Compiler/src/typeinfer.jl +++ b/Compiler/src/typeinfer.jl @@ -449,9 +449,10 @@ function finishinfer!(me::InferenceState, interp::AbstractInterpreter) maybe_validate_code(me.linfo, me.src, "inferred") # finish populating inference results into the CodeInstance if possible, and maybe cache that globally for use elsewhere - if isdefined(result, :ci) && !limited_ret + if isdefined(result, :ci) result_type = result.result - @assert !(result_type === nothing || result_type isa LimitedAccuracy) + result_type isa LimitedAccuracy && (result_type = result_type.typ) + @assert !(result_type === nothing) if isa(result_type, Const) rettype_const = result_type.val const_flags = is_result_constabi_eligible(result) ? 0x3 : 0x2 @@ -760,16 +761,24 @@ function MethodCallResult(::AbstractInterpreter, sv::AbsIntState, method::Method return MethodCallResult(rt, exct, effects, edge, edgecycle, edgelimited, volatile_inf_result) end -# allocate a dummy `edge::CodeInstance` to be added by `add_edges!` -function codeinst_as_edge(interp::AbstractInterpreter, sv::InferenceState) +# allocate a dummy `edge::CodeInstance` to be added by `add_edges!`, reusing an existing_edge if possible +# TODO: fill this in fully correctly (currently IPO info such as effects and return types are lost) +function codeinst_as_edge(interp::AbstractInterpreter, sv::InferenceState, @nospecialize existing_edge) mi = sv.linfo - owner = cache_owner(interp) min_world, max_world = first(sv.world.valid_worlds), last(sv.world.valid_worlds) if max_world >= get_world_counter() max_world = typemax(UInt) end edges = Core.svec(sv.edges...) - ci = CodeInstance(mi, owner, Any, Any, nothing, nothing, zero(Int32), + if existing_edge isa CodeInstance + # return an existing_edge, if the existing edge has more restrictions already (more edges and narrower worlds) + if existing_edge.min_world >= min_world && + existing_edge.max_world <= max_world && + existing_edge.edges == edges + return existing_edge + end + end + ci = CodeInstance(mi, cache_owner(interp), Any, Any, nothing, nothing, zero(Int32), min_world, max_world, zero(UInt32), nothing, zero(UInt8), nothing, edges) if max_world == typemax(UInt) # if we can record all of the backedges in the global reverse-cache, diff --git a/Compiler/test/inline.jl b/Compiler/test/inline.jl index 9d828fb7a4cfd..5dbf0a01db4a8 100644 --- a/Compiler/test/inline.jl +++ b/Compiler/test/inline.jl @@ -121,7 +121,7 @@ f29083(;μ,σ) = μ + σ*randn() g29083() = f29083(μ=2.0,σ=0.1) let c = code_typed(g29083, ())[1][1].code # make sure no call to kwfunc remains - @test !any(e->(isa(e,Expr) && (e.head === :invoke && e.args[1].def.name === :kwfunc)), c) + @test !any(e->(isa(e,Expr) && (e.head === :invoke && e.args[1].def.def.name === :kwfunc)), c) end @testset "issue #19122: [no]inline of short func. def. with return type annotation" begin @@ -723,7 +723,7 @@ mktempdir() do dir ci, rt = only(code_typed(issue42246)) if any(ci.code) do stmt Meta.isexpr(stmt, :invoke) && - stmt.args[1].def.name === nameof(IOBuffer) + stmt.args[1].def.def.name === nameof(IOBuffer) end exit(0) else @@ -1797,7 +1797,7 @@ end isinvokemodify(y) = @nospecialize(x) -> isinvokemodify(y, x) isinvokemodify(sym::Symbol, @nospecialize(x)) = isinvokemodify(mi->mi.def.name===sym, x) -isinvokemodify(pred::Function, @nospecialize(x)) = isexpr(x, :invoke_modify) && pred(x.args[1]::MethodInstance) +isinvokemodify(pred::Function, @nospecialize(x)) = isexpr(x, :invoke_modify) && pred((x.args[1]::CodeInstance).def) mutable struct Atomic{T} @atomic x::T @@ -2131,7 +2131,7 @@ let src = code_typed1((Type,)) do x end @test count(src.code) do @nospecialize x isinvoke(:no_compile_sig_invokes, x) && - (x.args[1]::MethodInstance).specTypes == Tuple{typeof(no_compile_sig_invokes),Any} + (x.args[1]::Core.CodeInstance).def.specTypes == Tuple{typeof(no_compile_sig_invokes),Any} end == 1 end let src = code_typed1((Type,); interp=NoCompileSigInvokes()) do x @@ -2139,7 +2139,7 @@ let src = code_typed1((Type,); interp=NoCompileSigInvokes()) do x end @test count(src.code) do @nospecialize x isinvoke(:no_compile_sig_invokes, x) && - (x.args[1]::MethodInstance).specTypes == Tuple{typeof(no_compile_sig_invokes),Type} + (x.args[1]::Core.CodeInstance).def.specTypes == Tuple{typeof(no_compile_sig_invokes),Type} end == 1 end # test the union split case @@ -2148,7 +2148,7 @@ let src = code_typed1((Union{DataType,UnionAll},)) do x end @test count(src.code) do @nospecialize x isinvoke(:no_compile_sig_invokes, x) && - (x.args[1]::MethodInstance).specTypes == Tuple{typeof(no_compile_sig_invokes),Any} + (x.args[1]::Core.CodeInstance).def.specTypes == Tuple{typeof(no_compile_sig_invokes),Any} end == 2 end let src = code_typed1((Union{DataType,UnionAll},); interp=NoCompileSigInvokes()) do x @@ -2156,11 +2156,11 @@ let src = code_typed1((Union{DataType,UnionAll},); interp=NoCompileSigInvokes()) end @test count(src.code) do @nospecialize x isinvoke(:no_compile_sig_invokes, x) && - (x.args[1]::MethodInstance).specTypes == Tuple{typeof(no_compile_sig_invokes),DataType} + (x.args[1]::Core.CodeInstance).def.specTypes == Tuple{typeof(no_compile_sig_invokes),DataType} end == 1 @test count(src.code) do @nospecialize x isinvoke(:no_compile_sig_invokes, x) && - (x.args[1]::MethodInstance).specTypes == Tuple{typeof(no_compile_sig_invokes),UnionAll} + (x.args[1]::Core.CodeInstance).def.specTypes == Tuple{typeof(no_compile_sig_invokes),UnionAll} end == 1 end diff --git a/Compiler/test/irutils.jl b/Compiler/test/irutils.jl index 95525d2f2fe5a..50b3a858d89dc 100644 --- a/Compiler/test/irutils.jl +++ b/Compiler/test/irutils.jl @@ -38,7 +38,7 @@ end # check if `x` is a statically-resolved call of a function whose name is `sym` isinvoke(y) = @nospecialize(x) -> isinvoke(y, x) isinvoke(sym::Symbol, @nospecialize(x)) = isinvoke(mi->mi.def.name===sym, x) -isinvoke(pred::Function, @nospecialize(x)) = isexpr(x, :invoke) && pred(x.args[1]::MethodInstance) +isinvoke(pred::Function, @nospecialize(x)) = isexpr(x, :invoke) && pred((x.args[1]::CodeInstance).def) fully_eliminated(@nospecialize args...; retval=(@__FILE__), kwargs...) = fully_eliminated(code_typed1(args...; kwargs...); retval) diff --git a/base/essentials.jl b/base/essentials.jl index 5683120df8d51..3574116261968 100644 --- a/base/essentials.jl +++ b/base/essentials.jl @@ -1050,6 +1050,7 @@ call obsolete versions of a function `f`. Prior to Julia 1.9, this function was not exported, and was called as `Base.invokelatest`. """ function invokelatest(@nospecialize(f), @nospecialize args...; kwargs...) + @inline kwargs = merge(NamedTuple(), kwargs) if isempty(kwargs) return Core._call_latest(f, args...) @@ -1084,6 +1085,7 @@ of [`invokelatest`](@ref). world age refers to system state unrelated to the main Julia session. """ function invoke_in_world(world::UInt, @nospecialize(f), @nospecialize args...; kwargs...) + @inline kwargs = Base.merge(NamedTuple(), kwargs) if isempty(kwargs) return Core._call_in_world(world, f, args...) diff --git a/base/reflection.jl b/base/reflection.jl index 1b8ed9413a35b..9246b4cb0ac71 100644 --- a/base/reflection.jl +++ b/base/reflection.jl @@ -248,30 +248,17 @@ struct CodegenParams """ trim::Cint - """ - A pointer of type - - typedef jl_value_t *(*jl_codeinstance_lookup_t)(jl_method_instance_t *mi JL_PROPAGATES_ROOT, - size_t min_world, size_t max_world); - - that may be used by external compilers as a callback to look up the code instance corresponding - to a particular method instance. - """ - lookup::Ptr{Cvoid} - function CodegenParams(; track_allocations::Bool=true, code_coverage::Bool=true, prefer_specsig::Bool=false, gnu_pubnames::Bool=true, debug_info_kind::Cint = default_debug_info_kind(), debug_info_level::Cint = Cint(JLOptions().debug_level), safepoint_on_entry::Bool=true, - gcstack_arg::Bool=true, use_jlplt::Bool=true, trim::Cint=Cint(0), - lookup::Ptr{Cvoid}=unsafe_load(cglobal(:jl_rettype_inferred_addr, Ptr{Cvoid}))) + gcstack_arg::Bool=true, use_jlplt::Bool=true, trim::Cint=Cint(0)) return new( Cint(track_allocations), Cint(code_coverage), Cint(prefer_specsig), Cint(gnu_pubnames), debug_info_kind, debug_info_level, Cint(safepoint_on_entry), - Cint(gcstack_arg), Cint(use_jlplt), Cint(trim), - lookup) + Cint(gcstack_arg), Cint(use_jlplt), Cint(trim)) end end diff --git a/src/aotcompile.cpp b/src/aotcompile.cpp index 583a8201587f7..4b3f1f1171ded 100644 --- a/src/aotcompile.cpp +++ b/src/aotcompile.cpp @@ -289,21 +289,22 @@ static void makeSafeName(GlobalObject &G) G.setName(StringRef(SafeName.data(), SafeName.size())); } -jl_code_instance_t *jl_ci_cache_lookup(const jl_cgparams_t &cgparams, jl_method_instance_t *mi, size_t world) +static jl_code_instance_t *jl_ci_cache_lookup(jl_method_instance_t *mi, size_t world, jl_codeinstance_lookup_t lookup) { ++CICacheLookups; - jl_value_t *ci = cgparams.lookup(mi, world, world); + jl_value_t *ci = lookup(mi, world, world); JL_GC_PROMISE_ROOTED(ci); jl_code_instance_t *codeinst = NULL; if (ci != jl_nothing && jl_atomic_load_relaxed(&((jl_code_instance_t *)ci)->inferred) != jl_nothing) { codeinst = (jl_code_instance_t*)ci; } else { - if (cgparams.lookup != jl_rettype_inferred_addr) { + if (lookup != jl_rettype_inferred_addr) { // XXX: This will corrupt and leak a lot of memory which may be very bad jl_error("Refusing to automatically run type inference with custom cache lookup."); } else { + // XXX: SOURCE_MODE_ABI is wrong here (not sufficient) codeinst = jl_type_infer(mi, world, SOURCE_MODE_ABI); /* Even if this codeinst is ordinarily not cacheable, we need to force * it into the cache here, since it was explicitly requested and is @@ -440,13 +441,15 @@ static void compile_workqueue(jl_codegen_params_t ¶ms, CompilationPolicy pol // `_imaging_mode` controls if raw pointers can be embedded (e.g. the code will be loaded into the same session). // `_external_linkage` create linkages between pkgimages. extern "C" JL_DLLEXPORT_CODEGEN -void *jl_create_native_impl(jl_array_t *methods, LLVMOrcThreadSafeModuleRef llvmmod, const jl_cgparams_t *cgparams, int _policy, int _imaging_mode, int _external_linkage, size_t _world) +void *jl_create_native_impl(jl_array_t *methods, LLVMOrcThreadSafeModuleRef llvmmod, const jl_cgparams_t *cgparams, int _policy, int _imaging_mode, int _external_linkage, size_t _world, jl_codeinstance_lookup_t lookup) { JL_TIMING(NATIVE_AOT, NATIVE_Create); ++CreateNativeCalls; CreateNativeMax.updateMax(jl_array_nrows(methods)); if (cgparams == NULL) cgparams = &jl_default_cgparams; + if (lookup == NULL) + lookup = &jl_rettype_inferred_native; jl_native_code_desc_t *data = new jl_native_code_desc_t; CompilationPolicy policy = (CompilationPolicy) _policy; bool imaging = imaging_default() || _imaging_mode == 1; @@ -511,7 +514,7 @@ void *jl_create_native_impl(jl_array_t *methods, LLVMOrcThreadSafeModuleRef llvm // then we want to compile and emit this if (jl_atomic_load_relaxed(&mi->def.method->primary_world) <= this_world && this_world <= jl_atomic_load_relaxed(&mi->def.method->deleted_world)) { // find and prepare the source code to compile - jl_code_instance_t *codeinst = jl_ci_cache_lookup(*cgparams, mi, this_world); + jl_code_instance_t *codeinst = jl_ci_cache_lookup(mi, this_world, lookup); if (jl_options.trim != JL_TRIM_NO && !codeinst) { // If we're building a small image, we need to compile everything // to ensure that we have all the information we need. diff --git a/src/cgutils.cpp b/src/cgutils.cpp index a166b0a2c4800..157d253ba4f21 100644 --- a/src/cgutils.cpp +++ b/src/cgutils.cpp @@ -4485,8 +4485,7 @@ static int compare_cgparams(const jl_cgparams_t *a, const jl_cgparams_t *b) (a->debug_info_kind == b->debug_info_kind) && (a->safepoint_on_entry == b->safepoint_on_entry) && (a->gcstack_arg == b->gcstack_arg) && - (a->use_jlplt == b->use_jlplt) && - (a->lookup == b->lookup); + (a->use_jlplt == b->use_jlplt); } #endif diff --git a/src/codegen-stubs.c b/src/codegen-stubs.c index 98ac063ba36d6..fe50af3f8e84d 100644 --- a/src/codegen-stubs.c +++ b/src/codegen-stubs.c @@ -70,7 +70,7 @@ JL_DLLEXPORT size_t jl_jit_total_bytes_fallback(void) return 0; } -JL_DLLEXPORT void *jl_create_native_fallback(jl_array_t *methods, LLVMOrcThreadSafeModuleRef llvmmod, const jl_cgparams_t *cgparams, int _policy, int _imaging_mode, int _external_linkage, size_t _world) UNAVAILABLE +JL_DLLEXPORT void *jl_create_native_fallback(jl_array_t *methods, LLVMOrcThreadSafeModuleRef llvmmod, const jl_cgparams_t *cgparams, int _policy, int _imaging_mode, int _external_linkage, size_t _world, jl_codeinstance_lookup_t lookup) UNAVAILABLE JL_DLLEXPORT void jl_dump_compiles_fallback(void *s) { diff --git a/src/codegen.cpp b/src/codegen.cpp index 85d791052484c..e3225a1a7dec2 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -5496,10 +5496,19 @@ static jl_cgval_t emit_invoke(jl_codectx_t &ctx, const jl_cgval_t &lival, ArrayR bool handled = false; jl_cgval_t result; if (lival.constant) { - jl_method_instance_t *mi = (jl_method_instance_t*)lival.constant; + jl_method_instance_t *mi; + jl_value_t *ci = nullptr; + if (jl_is_method_instance(lival.constant)) { + mi = (jl_method_instance_t*)lival.constant; + } + else { + ci = lival.constant; + assert(jl_is_code_instance(ci)); + mi = ((jl_code_instance_t*)ci)->def; + } assert(jl_is_method_instance(mi)); if (mi == ctx.linfo) { - // handle self-recursion specially + // handle self-recursion specially (TODO: assuming ci is a valid invoke for mi?) jl_returninfo_t::CallingConv cc = jl_returninfo_t::CallingConv::Boxed; FunctionType *ft = ctx.f->getFunctionType(); StringRef protoname = ctx.f->getName(); @@ -5514,8 +5523,7 @@ static jl_cgval_t emit_invoke(jl_codectx_t &ctx, const jl_cgval_t &lival, ArrayR } } else { - jl_value_t *ci = ctx.params->lookup(mi, ctx.min_world, ctx.max_world); - if (ci != jl_nothing) { + if (ci) { jl_code_instance_t *codeinst = (jl_code_instance_t*)ci; auto invoke = jl_atomic_load_acquire(&codeinst->invoke); // check if we know how to handle this specptr @@ -10343,24 +10351,8 @@ int jl_opaque_ptrs_set = 0; extern "C" void jl_init_llvm(void) { - jl_default_cgparams = { - /* track_allocations */ 1, - /* code_coverage */ 1, - /* prefer_specsig */ 0, -#ifdef _OS_WINDOWS_ - /* gnu_pubnames */ 0, -#else - /* gnu_pubnames */ 1, -#endif - /* debug_info_kind */ (int) DICompileUnit::DebugEmissionKind::FullDebug, - /* debug_info_level */ (int) jl_options.debug_level, - /* safepoint_on_entry */ 1, - /* gcstack_arg */ 1, - /* use_jlplt*/ 1, - /* trim */ 0, - /* lookup */ jl_rettype_inferred_addr }; jl_page_size = jl_getpagesize(); - jl_default_debug_info_kind = (int) DICompileUnit::DebugEmissionKind::FullDebug; + jl_default_debug_info_kind = jl_default_cgparams.debug_info_kind = (int) DICompileUnit::DebugEmissionKind::FullDebug; jl_default_cgparams.debug_info_level = (int) jl_options.debug_level; InitializeNativeTarget(); InitializeNativeTargetAsmPrinter(); diff --git a/src/gf.c b/src/gf.c index 97a50cd8339a7..90b874d614b0c 100644 --- a/src/gf.c +++ b/src/gf.c @@ -3196,7 +3196,7 @@ JL_DLLEXPORT int jl_compile_hint(jl_tupletype_t *types) if (mi == NULL) return 0; JL_GC_PROMISE_ROOTED(mi); - jl_compile_method_instance(mi, NULL, world); + jl_compile_method_instance(mi, types, world); return 1; } diff --git a/src/init.c b/src/init.c index b3ca33344d258..1cd14e8556cc6 100644 --- a/src/init.c +++ b/src/init.c @@ -722,7 +722,21 @@ static void restore_fp_env(void) static NOINLINE void _finish_julia_init(JL_IMAGE_SEARCH rel, jl_ptls_t ptls, jl_task_t *ct); JL_DLLEXPORT int jl_default_debug_info_kind; -JL_DLLEXPORT jl_cgparams_t jl_default_cgparams; +JL_DLLEXPORT jl_cgparams_t jl_default_cgparams = { + /* track_allocations */ 1, + /* code_coverage */ 1, + /* prefer_specsig */ 0, +#ifdef _OS_WINDOWS_ + /* gnu_pubnames */ 0, +#else + /* gnu_pubnames */ 1, +#endif + /* debug_info_kind */ 0, // later DICompileUnit::DebugEmissionKind::FullDebug, + /* debug_info_level */ 0, // later jl_options.debug_level, + /* safepoint_on_entry */ 1, + /* gcstack_arg */ 1, + /* use_jlplt*/ 1, + /* trim */ 0 }; static void init_global_mutexes(void) { JL_MUTEX_INIT(&jl_modules_mutex, "jl_modules_mutex"); diff --git a/src/interpreter.c b/src/interpreter.c index cf2ae1a0d9f44..49a3afed14f0c 100644 --- a/src/interpreter.c +++ b/src/interpreter.c @@ -135,8 +135,9 @@ static jl_value_t *do_invoke(jl_value_t **args, size_t nargs, interpreter_state size_t i; for (i = 1; i < nargs; i++) argv[i-1] = eval_value(args[i], s); - jl_method_instance_t *meth = (jl_method_instance_t*)args[0]; - assert(jl_is_method_instance(meth)); + jl_value_t *c = args[0]; + assert(jl_is_code_instance(c) || jl_is_method_instance(c)); + jl_method_instance_t *meth = jl_is_method_instance(c) ? (jl_method_instance_t*)c : ((jl_code_instance_t*)c)->def; jl_value_t *result = jl_invoke(argv[0], nargs == 2 ? NULL : &argv[1], nargs - 2, meth); JL_GC_POP(); return result; diff --git a/src/julia.h b/src/julia.h index 87979f75e8d80..944fd3c43a297 100644 --- a/src/julia.h +++ b/src/julia.h @@ -2650,8 +2650,6 @@ JL_DLLEXPORT void jl_set_safe_restore(jl_jmp_buf *) JL_NOTSAFEPOINT; // codegen interface ---------------------------------------------------------- // The root propagation here doesn't have to be literal, but callers should // ensure that the return value outlives the MethodInstance -typedef jl_value_t *(*jl_codeinstance_lookup_t)(jl_method_instance_t *mi JL_PROPAGATES_ROOT, - size_t min_world, size_t max_world); typedef struct { int track_allocations; // can we track allocations? int code_coverage; // can we measure coverage? @@ -2667,8 +2665,6 @@ typedef struct { int use_jlplt; // Whether to use the Julia PLT mechanism or emit symbols directly int trim; // can we emit dynamic dispatches? - // Cache access. Default: jl_rettype_inferred_native. - jl_codeinstance_lookup_t lookup; } jl_cgparams_t; extern JL_DLLEXPORT int jl_default_debug_info_kind; extern JL_DLLEXPORT jl_cgparams_t jl_default_cgparams; diff --git a/src/julia_internal.h b/src/julia_internal.h index aadcbfdc94038..cd101533f1b8d 100644 --- a/src/julia_internal.h +++ b/src/julia_internal.h @@ -1946,7 +1946,8 @@ JL_DLLIMPORT jl_value_t *jl_dump_fptr_asm(uint64_t fptr, char emit_mc, const cha JL_DLLIMPORT jl_value_t *jl_dump_function_ir(jl_llvmf_dump_t *dump, char strip_ir_metadata, char dump_module, const char *debuginfo); JL_DLLIMPORT jl_value_t *jl_dump_function_asm(jl_llvmf_dump_t *dump, char emit_mc, const char* asm_variant, const char *debuginfo, char binary, char raw); -JL_DLLIMPORT void *jl_create_native(jl_array_t *methods, LLVMOrcThreadSafeModuleRef llvmmod, const jl_cgparams_t *cgparams, int policy, int imaging_mode, int cache, size_t world); +typedef jl_value_t *(*jl_codeinstance_lookup_t)(jl_method_instance_t *mi JL_PROPAGATES_ROOT, size_t min_world, size_t max_world); +JL_DLLIMPORT void *jl_create_native(jl_array_t *methods, LLVMOrcThreadSafeModuleRef llvmmod, const jl_cgparams_t *cgparams, int policy, int imaging_mode, int cache, size_t world, jl_codeinstance_lookup_t lookup); JL_DLLIMPORT void jl_dump_native(void *native_code, const char *bc_fname, const char *unopt_bc_fname, const char *obj_fname, const char *asm_fname, ios_t *z, ios_t *s, jl_emission_params_t *params); diff --git a/src/opaque_closure.c b/src/opaque_closure.c index 65773f88a3951..e3334c037f5a9 100644 --- a/src/opaque_closure.c +++ b/src/opaque_closure.c @@ -53,7 +53,8 @@ static jl_opaque_closure_t *new_opaque_closure(jl_tupletype_t *argt, jl_value_t jl_method_instance_t *mi = NULL; if (source->source) { mi = jl_specializations_get_linfo(source, sigtype, jl_emptysvec); - } else { + } + else { mi = (jl_method_instance_t *)jl_atomic_load_relaxed(&source->specializations); if (!jl_subtype(sigtype, mi->specTypes)) { jl_error("sigtype mismatch in optimized opaque closure"); @@ -116,7 +117,7 @@ static jl_opaque_closure_t *new_opaque_closure(jl_tupletype_t *argt, jl_value_t // OC wrapper methods are not world dependent and have no edges or other info ci = jl_get_method_inferred(mi_generic, selected_rt, 1, ~(size_t)0, NULL, NULL); if (!jl_atomic_load_acquire(&ci->invoke)) - jl_compile_codeinst(ci); + jl_compile_codeinst(ci); // confusing this actually calls jl_emit_oc_wrapper and never actually compiles ci (which would be impossible) specptr = jl_atomic_load_relaxed(&ci->specptr.fptr); } jl_opaque_closure_t *oc = (jl_opaque_closure_t*)jl_gc_alloc(ct->ptls, sizeof(jl_opaque_closure_t), oc_type); diff --git a/src/precompile_utils.c b/src/precompile_utils.c index d008cd26a28e9..81c60ba70d29f 100644 --- a/src/precompile_utils.c +++ b/src/precompile_utils.c @@ -278,7 +278,8 @@ static void *jl_precompile_(jl_array_t *m, int external_linkage) } } void *native_code = jl_create_native(m2, NULL, NULL, 0, 1, external_linkage, - jl_atomic_load_acquire(&jl_world_counter)); + jl_atomic_load_acquire(&jl_world_counter), + NULL); JL_GC_POP(); return native_code; } @@ -389,7 +390,7 @@ static void *jl_precompile_trimmed(size_t world) jl_cgparams_t params = jl_default_cgparams; params.trim = jl_options.trim; void *native_code = jl_create_native(m, NULL, ¶ms, 0, /* imaging */ 1, 0, - world); + world, NULL); JL_GC_POP(); return native_code; } diff --git a/stdlib/REPL/src/precompile.jl b/stdlib/REPL/src/precompile.jl index f7961a205e0b1..daa01f626aeab 100644 --- a/stdlib/REPL/src/precompile.jl +++ b/stdlib/REPL/src/precompile.jl @@ -142,13 +142,13 @@ function repl_workload() # wait for the definitive prompt before start writing to the TTY check_errors(readuntil(output_copy, JULIA_PROMPT)) write(debug_output, "\n#### REPL STARTED ####\n") - sleep(0.1) + sleep(0.01) check_errors(readavailable(output_copy)) # Input our script precompile_lines = split(repl_script::String, '\n'; keepempty=false) curr = 0 for l in precompile_lines - sleep(0.1) + sleep(0.01) # try to let a bit of output accumulate before reading again curr += 1 # consume any other output bytesavailable(output_copy) > 0 && check_errors(readavailable(output_copy)) @@ -168,7 +168,7 @@ function repl_workload() occursin(PKG_PROMPT, strbuf) && break occursin(SHELL_PROMPT, strbuf) && break occursin(HELP_PROMPT, strbuf) && break - sleep(0.1) + sleep(0.01) # try to let a bit of output accumulate before reading again end notify(repl_init_event) check_errors(strbuf) @@ -187,37 +187,15 @@ function repl_workload() nothing end -# Copied from PrecompileTools.jl let - function check_edges(node) - parentmi = node.mi_info.mi - for child in node.children - childmi = child.mi_info.mi - if !(isdefined(childmi, :backedges) && parentmi ∈ childmi.backedges) - precompile(childmi.specTypes) - end - check_edges(child) - end - end - if Base.generating_output() && Base.JLOptions().use_pkgimages != 0 - Core.Compiler.Timings.reset_timings() - Core.Compiler.__set_measure_typeinf(true) - try - repl_workload() - finally - Core.Compiler.__set_measure_typeinf(false) - Core.Compiler.Timings.close_current_timer() - end - roots = Core.Compiler.Timings._timings[1].children - for child in roots - precompile(child.mi_info.mi.specTypes) - check_edges(child) - end + repl_workload() precompile(Tuple{typeof(Base.setindex!), Base.Dict{Any, Any}, Any, Int}) precompile(Tuple{typeof(Base.delete!), Base.Set{Any}, String}) precompile(Tuple{typeof(Base.:(==)), Char, String}) - precompile(Tuple{typeof(Base.reseteof), Base.TTY}) + #for child in copy(Base.newly_inferred) + # precompile((child::Base.CodeInstance).def) + #end end end From 873a1e41a2dd84276b6ef6a3a47c452850808c50 Mon Sep 17 00:00:00 2001 From: Shuhei Kadowaki <40514306+aviatesk@users.noreply.github.com> Date: Fri, 22 Nov 2024 01:26:08 +0900 Subject: [PATCH 481/537] inference: add missing modeling for `swapglobal!` (#56623) This was missed from JuliaLang/julia#56299. --- Compiler/src/abstractinterpretation.jl | 51 ++++++++++++++++++++------ Compiler/test/inference.jl | 19 ++++++++++ 2 files changed, 59 insertions(+), 11 deletions(-) diff --git a/Compiler/src/abstractinterpretation.jl b/Compiler/src/abstractinterpretation.jl index 64181f685e665..a3abbf814165a 100644 --- a/Compiler/src/abstractinterpretation.jl +++ b/Compiler/src/abstractinterpretation.jl @@ -2329,13 +2329,13 @@ function abstract_throw_methoderror(interp::AbstractInterpreter, argtypes::Vecto elseif !isvarargtype(argtypes[2]) MethodError else - ⊔ = join(typeinf_lattice(interp)) - MethodError ⊔ ArgumentError + Union{MethodError, ArgumentError} end return Future(CallMeta(Union{}, exct, EFFECTS_THROWS, NoCallInfo())) end const generic_getglobal_effects = Effects(EFFECTS_THROWS, consistent=ALWAYS_FALSE, inaccessiblememonly=ALWAYS_FALSE) +const generic_getglobal_exct = Union{ArgumentError, TypeError, ConcurrencyViolationError, UndefVarError} function abstract_eval_getglobal(interp::AbstractInterpreter, sv::AbsIntState, saw_latestworld::Bool, @nospecialize(M), @nospecialize(s)) ⊑ = partialorder(typeinf_lattice(interp)) if M isa Const && s isa Const @@ -2373,8 +2373,7 @@ function abstract_eval_getglobal(interp::AbstractInterpreter, sv::AbsIntState, s elseif !isvarargtype(argtypes[end]) || length(argtypes) > 5 return CallMeta(Union{}, ArgumentError, EFFECTS_THROWS, NoCallInfo()) else - return CallMeta(Any, Union{ArgumentError, UndefVarError, TypeError, ConcurrencyViolationError}, - generic_getglobal_effects, NoCallInfo()) + return CallMeta(Any, generic_getglobal_exct, generic_getglobal_effects, NoCallInfo()) end end @@ -2440,6 +2439,8 @@ function abstract_eval_setglobal!(interp::AbstractInterpreter, sv::AbsIntState, return merge_exct(cm, goe) end +const generic_setglobal!_exct = Union{ArgumentError, TypeError, ErrorException, ConcurrencyViolationError} + function abstract_eval_setglobal!(interp::AbstractInterpreter, sv::AbsIntState, saw_latestworld::Bool, argtypes::Vector{Any}) if length(argtypes) == 4 return abstract_eval_setglobal!(interp, sv, saw_latestworld, argtypes[2], argtypes[3], argtypes[4]) @@ -2448,7 +2449,35 @@ function abstract_eval_setglobal!(interp::AbstractInterpreter, sv::AbsIntState, elseif !isvarargtype(argtypes[end]) || length(argtypes) > 6 return CallMeta(Union{}, ArgumentError, EFFECTS_THROWS, NoCallInfo()) else - return CallMeta(Any, Union{ArgumentError, TypeError, ErrorException, ConcurrencyViolationError}, setglobal!_effects, NoCallInfo()) + return CallMeta(Any, generic_setglobal!_exct, setglobal!_effects, NoCallInfo()) + end +end + +function abstract_eval_swapglobal!(interp::AbstractInterpreter, sv::AbsIntState, saw_latestworld::Bool, + @nospecialize(M), @nospecialize(s), @nospecialize(v)) + scm = abstract_eval_setglobal!(interp, sv, saw_latestworld, M, s, v) + scm.rt === Bottom && return scm + gcm = abstract_eval_getglobal(interp, sv, saw_latestworld, M, s) + return CallMeta(gcm.rt, Union{scm.exct,gcm.exct}, merge_effects(scm.effects, gcm.effects), NoCallInfo()) +end + +function abstract_eval_swapglobal!(interp::AbstractInterpreter, sv::AbsIntState, saw_latestworld::Bool, + @nospecialize(M), @nospecialize(s), @nospecialize(v), @nospecialize(order)) + scm = abstract_eval_setglobal!(interp, sv, saw_latestworld, M, s, v, order) + scm.rt === Bottom && return scm + gcm = abstract_eval_getglobal(interp, sv, saw_latestworld, M, s, order) + return CallMeta(gcm.rt, Union{scm.exct,gcm.exct}, merge_effects(scm.effects, gcm.effects), NoCallInfo()) +end + +function abstract_eval_swapglobal!(interp::AbstractInterpreter, sv::AbsIntState, saw_latestworld::Bool, argtypes::Vector{Any}) + if length(argtypes) == 4 + return abstract_eval_swapglobal!(interp, sv, saw_latestworld, argtypes[2], argtypes[3], argtypes[4]) + elseif length(argtypes) == 5 + return abstract_eval_swapglobal!(interp, sv, saw_latestworld, argtypes[2], argtypes[3], argtypes[4], argtypes[5]) + elseif !isvarargtype(argtypes[end]) || length(argtypes) > 6 + return CallMeta(Union{}, ArgumentError, EFFECTS_THROWS, NoCallInfo()) + else + return CallMeta(Any, Union{generic_getglobal_exct,generic_setglobal!_exct}, setglobal!_effects, NoCallInfo()) end end @@ -2467,20 +2496,18 @@ function abstract_eval_setglobalonce!(interp::AbstractInterpreter, sv::AbsIntSta elseif !isvarargtype(argtypes[end]) || length(argtypes) > 6 return CallMeta(Union{}, ArgumentError, EFFECTS_THROWS, NoCallInfo()) else - return CallMeta(Bool, Union{ArgumentError, TypeError, ErrorException, ConcurrencyViolationError}, setglobal!_effects, NoCallInfo()) + return CallMeta(Bool, generic_setglobal!_exct, setglobal!_effects, NoCallInfo()) end end function abstract_eval_replaceglobal!(interp::AbstractInterpreter, sv::AbsIntState, saw_latestworld::Bool, argtypes::Vector{Any}) if length(argtypes) in (5, 6, 7) (M, s, x, v) = argtypes[2], argtypes[3], argtypes[4], argtypes[5] - T = nothing if isa(M, Const) && isa(s, Const) M, s = M.val, s.val - if !(M isa Module && s isa Symbol) - return CallMeta(Union{}, TypeError, EFFECTS_THROWS, NoCallInfo()) - end + M isa Module || return CallMeta(Union{}, TypeError, EFFECTS_THROWS, NoCallInfo()) + s isa Symbol || return CallMeta(Union{}, TypeError, EFFECTS_THROWS, NoCallInfo()) partition = abstract_eval_binding_partition!(interp, GlobalRef(M, s), sv) rte = abstract_eval_partition_load(interp, partition) if binding_kind(partition) == BINDING_KIND_GLOBAL @@ -2507,7 +2534,7 @@ function abstract_eval_replaceglobal!(interp::AbstractInterpreter, sv::AbsIntSta elseif !isvarargtype(argtypes[end]) || length(argtypes) > 8 return CallMeta(Union{}, ArgumentError, EFFECTS_THROWS, NoCallInfo()) else - return CallMeta(Any, Union{ArgumentError, TypeError, ErrorException, ConcurrencyViolationError}, setglobal!_effects, NoCallInfo()) + return CallMeta(Any, Union{generic_getglobal_exct,generic_setglobal!_exct}, setglobal!_effects, NoCallInfo()) end end @@ -2547,6 +2574,8 @@ function abstract_call_known(interp::AbstractInterpreter, @nospecialize(f), return Future(abstract_eval_getglobal(interp, sv, si.saw_latestworld, argtypes)) elseif f === Core.setglobal! return Future(abstract_eval_setglobal!(interp, sv, si.saw_latestworld, argtypes)) + elseif f === Core.swapglobal! + return Future(abstract_eval_swapglobal!(interp, sv, si.saw_latestworld, argtypes)) elseif f === Core.setglobalonce! return Future(abstract_eval_setglobalonce!(interp, sv, si.saw_latestworld, argtypes)) elseif f === Core.replaceglobal! diff --git a/Compiler/test/inference.jl b/Compiler/test/inference.jl index b8c869d737510..560b9da02e643 100644 --- a/Compiler/test/inference.jl +++ b/Compiler/test/inference.jl @@ -6097,3 +6097,22 @@ global setglobal!_must_throw::Int = 42 @test Base.infer_return_type((String,)) do x setglobal!(@__MODULE__, :setglobal!_must_throw, x) end === Union{} + +global swapglobal!_xxx::Int = 42 +@test Base.infer_return_type((Int,)) do x + swapglobal!(@__MODULE__, :swapglobal!_xxx, x) +end === Int +@test Base.infer_return_type((String,)) do x + swapglobal!(@__MODULE__, :swapglobal!_xxx, x) +end === Union{} + +global swapglobal!_must_throw +@newinterp SwapGlobalInterp +let CC = Base.Compiler + CC.InferenceParams(::SwapGlobalInterp) = CC.InferenceParams(; assume_bindings_static=true) +end +function func_swapglobal!_must_throw(x) + swapglobal!(@__MODULE__, :swapglobal!_must_throw, x) +end +@test Base.infer_return_type(func_swapglobal!_must_throw, (Int,); interp=SwapGlobalInterp()) === Union{} +@test !Base.Compiler.is_effect_free(Base.infer_effects(func_swapglobal!_must_throw, (Int,); interp=SwapGlobalInterp()) ) From 1fb8df6c3e0cf58ed9b31d3aca524c6e3f136d43 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mos=C3=A8=20Giordano?= <765740+giordano@users.noreply.github.com> Date: Thu, 21 Nov 2024 12:21:25 -0500 Subject: [PATCH 482/537] Fix `unsafe_trunc` test for `NaN16` (#56630) The return value of the LLVM instruction `fptosi` (https://llvm.org/docs/LangRef.html#fptosi-to-instruction) does not guarantee that the truncation of `NaN` is 0, so we relax the test to only check that the output has the expected type. Fix #56582. --- test/float16.jl | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/test/float16.jl b/test/float16.jl index 10fb6b37db16d..4ff7cc663d07b 100644 --- a/test/float16.jl +++ b/test/float16.jl @@ -79,7 +79,8 @@ end @test unsafe_trunc(Int16, Float16(3)) === Int16(3) @test unsafe_trunc(UInt128, Float16(3)) === UInt128(3) @test unsafe_trunc(Int128, Float16(3)) === Int128(3) - @test unsafe_trunc(Int16, NaN16) === Int16(0) #18771 + # `unsafe_trunc` of `NaN` can be any value, see #56582 + @test unsafe_trunc(Int16, NaN16) isa Int16 # #18771 end @testset "fma and muladd" begin @test fma(Float16(0.1),Float16(0.9),Float16(0.5)) ≈ fma(0.1,0.9,0.5) From 712b2e55082af0a0af192e57a287a3cfaf27e25c Mon Sep 17 00:00:00 2001 From: Ian Butterworth Date: Thu, 21 Nov 2024 16:13:48 -0500 Subject: [PATCH 483/537] precompilepkgs: fix is_direct_dep -> is_project_dep (#56643) Fixes #56642 Missed rename from https://github.com/JuliaLang/julia/pull/55910 --- base/precompilation.jl | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/base/precompilation.jl b/base/precompilation.jl index 34dd4c4df9cb9..77e088f455fea 100644 --- a/base/precompilation.jl +++ b/base/precompilation.jl @@ -425,9 +425,9 @@ function _precompilepkgs(pkgs::Vector{String}, # inverse map of `parent_to_ext` above (ext → parent) ext_to_parent = Dict{Base.PkgId, Base.PkgId}() - function describe_pkg(pkg::PkgId, is_direct_dep::Bool, flags::Cmd, cacheflags::Base.CacheFlags) + function describe_pkg(pkg::PkgId, is_project_dep::Bool, flags::Cmd, cacheflags::Base.CacheFlags) name = haskey(ext_to_parent, pkg) ? string(ext_to_parent[pkg].name, " → ", pkg.name) : pkg.name - name = is_direct_dep ? name : color_string(name, :light_black) + name = is_project_dep ? name : color_string(name, :light_black) if nconfigs > 1 && !isempty(flags) config_str = join(flags, " ") name *= color_string(" `$config_str`", :light_black) @@ -911,7 +911,7 @@ function _precompilepkgs(pkgs::Vector{String}, if err isa ErrorException || (err isa ArgumentError && startswith(err.msg, "Invalid header in cache file")) errmsg = String(take!(get(IOBuffer, std_outputs, pkg_config))) delete!(std_outputs, pkg_config) # so it's not shown as warnings, given error report - failed_deps[pkg_config] = (strict || is_direct_dep) ? string(sprint(showerror, err), "\n", strip(errmsg)) : "" + failed_deps[pkg_config] = (strict || is_project_dep) ? string(sprint(showerror, err), "\n", strip(errmsg)) : "" !fancyprint && lock(print_lock) do println(io, " "^9, color_string(" ✗ ", Base.error_color()), name) end From 78fd186b7bb884f2777e5b27a4a8d3dfe63265de Mon Sep 17 00:00:00 2001 From: Keno Fischer Date: Thu, 21 Nov 2024 17:21:55 -0500 Subject: [PATCH 484/537] Make Compiler tests runnable as package (#56632) Makes `test Compiler` work properly (as in use the Compiler package, not Base.Compiler) and pass tests, but still needs to be made parallel in a follow-on. --------- Co-authored-by: Shuhei Kadowaki <40514306+aviatesk@users.noreply.github.com> Co-authored-by: Kristoffer Carlsson Co-authored-by: Shuhei Kadowaki --- Compiler/Project.toml | 9 + Compiler/test/AbstractInterpreter.jl | 165 +++---- Compiler/test/EAUtils.jl | 51 ++- Compiler/test/EscapeAnalysis.jl | 9 +- Compiler/test/codegen.jl | 27 +- Compiler/test/compact.jl | 22 +- Compiler/test/contextual.jl | 15 +- Compiler/test/datastructures.jl | 100 +++-- Compiler/test/effects.jl | 564 +++++++++++------------ Compiler/test/inference.jl | 641 +++++++++++++-------------- Compiler/test/inline.jl | 86 ++-- Compiler/test/interpreter_exec.jl | 14 +- Compiler/test/invalidation.jl | 31 +- Compiler/test/irpasses.jl | 224 +++++----- Compiler/test/irutils.jl | 18 +- Compiler/test/newinterp.jl | 37 +- Compiler/test/runtests.jl | 2 + Compiler/test/ssair.jl | 161 +++---- Compiler/test/tarjan.jl | 12 +- Compiler/test/validation.jl | 66 +-- test/precompile_absint1.jl | 4 +- test/precompile_absint2.jl | 24 +- 22 files changed, 1174 insertions(+), 1108 deletions(-) diff --git a/Compiler/Project.toml b/Compiler/Project.toml index 9cb85fe7d05de..046d672c4877c 100644 --- a/Compiler/Project.toml +++ b/Compiler/Project.toml @@ -4,3 +4,12 @@ version = "0.0.2" [compat] julia = "1.10" + +[extras] +Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" +InteractiveUtils = "b77e0a4c-d291-57a0-90e8-8db25a27a240" +Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" +Libdl = "8f399da3-3557-5675-b5ff-fb832c97cbdb" + +[targets] +test = ["Test", "InteractiveUtils", "Random", "Libdl"] diff --git a/Compiler/test/AbstractInterpreter.jl b/Compiler/test/AbstractInterpreter.jl index 1939f4a19c05f..81659443038e4 100644 --- a/Compiler/test/AbstractInterpreter.jl +++ b/Compiler/test/AbstractInterpreter.jl @@ -1,7 +1,14 @@ # This file is a part of Julia. License is MIT: https://julialang.org/license using Test -const CC = Core.Compiler + +if !@isdefined(Compiler) + if Base.identify_package("Compiler") === nothing + import Base.Compiler: Compiler + else + import Compiler + end +end include("irutils.jl") include("newinterp.jl") @@ -9,13 +16,13 @@ include("newinterp.jl") # interpreter that performs abstract interpretation only # (semi-concrete interpretation should be disabled automatically) @newinterp AbsIntOnlyInterp1 -CC.may_optimize(::AbsIntOnlyInterp1) = false +Compiler.may_optimize(::AbsIntOnlyInterp1) = false @test Base.infer_return_type(Base.init_stdio, (Ptr{Cvoid},); interp=AbsIntOnlyInterp1()) >: IO # it should work even if the interpreter discards inferred source entirely @newinterp AbsIntOnlyInterp2 -CC.may_optimize(::AbsIntOnlyInterp2) = false -CC.transform_result_for_cache(::AbsIntOnlyInterp2, ::CC.InferenceResult) = nothing +Compiler.may_optimize(::AbsIntOnlyInterp2) = false +Compiler.transform_result_for_cache(::AbsIntOnlyInterp2, ::Compiler.InferenceResult) = nothing @test Base.infer_return_type(Base.init_stdio, (Ptr{Cvoid},); interp=AbsIntOnlyInterp2()) >: IO # OverlayMethodTable @@ -32,9 +39,9 @@ end @newinterp MTOverlayInterp @MethodTable OVERLAY_MT -CC.method_table(interp::MTOverlayInterp) = CC.OverlayMethodTable(CC.get_inference_world(interp), OVERLAY_MT) +Compiler.method_table(interp::MTOverlayInterp) = Compiler.OverlayMethodTable(Compiler.get_inference_world(interp), OVERLAY_MT) -function CC.add_remark!(interp::MTOverlayInterp, ::CC.InferenceState, remark) +function Compiler.add_remark!(interp::MTOverlayInterp, ::Compiler.InferenceState, remark) if interp.meta !== nothing # Core.println(remark) push!(interp.meta, remark) @@ -63,10 +70,10 @@ end |> only === Union{Float64,Nothing} # effect analysis should figure out that the overlayed method is used @test Base.infer_effects((Float64,); interp=MTOverlayInterp()) do x strangesin(x) -end |> !Core.Compiler.is_nonoverlayed +end |> !Compiler.is_nonoverlayed @test Base.infer_effects((Any,); interp=MTOverlayInterp()) do x @invoke strangesin(x::Float64) -end |> !Core.Compiler.is_nonoverlayed +end |> !Compiler.is_nonoverlayed # account for overlay possibility in unanalyzed matching method callstrange(::Float64) = strangesin(x) @@ -74,20 +81,20 @@ callstrange(::Number) = Core.compilerbarrier(:type, nothing) # trigger inference callstrange(::Any) = 1.0 callstrange_entry(x) = callstrange(x) # needs to be defined here because of world age let interp = MTOverlayInterp(Set{Any}()) - matches = Core.Compiler.findall(Tuple{typeof(callstrange),Any}, Core.Compiler.method_table(interp)) + matches = Compiler.findall(Tuple{typeof(callstrange),Any}, Compiler.method_table(interp)) @test matches !== nothing - @test Core.Compiler.length(matches) == 3 - @test Base.infer_effects(callstrange_entry, (Any,); interp) |> !Core.Compiler.is_nonoverlayed + @test Compiler.length(matches) == 3 + @test Base.infer_effects(callstrange_entry, (Any,); interp) |> !Compiler.is_nonoverlayed @test "Call inference reached maximally imprecise information: bailing on doing more abstract inference." in interp.meta end # but it should never apply for the native compilation @test Base.infer_effects((Float64,)) do x strangesin(x) -end |> Core.Compiler.is_nonoverlayed +end |> Compiler.is_nonoverlayed @test Base.infer_effects((Any,)) do x @invoke strangesin(x::Float64) -end |> Core.Compiler.is_nonoverlayed +end |> Compiler.is_nonoverlayed # fallback to the internal method table @test Base.return_types((Int,); interp=MTOverlayInterp()) do x @@ -152,14 +159,14 @@ gpu_factorial1(x::Int) = myfactorial(x, raise_on_gpu1) gpu_factorial2(x::Int) = myfactorial(x, raise_on_gpu2) gpu_factorial3(x::Int) = myfactorial(x, raise_on_gpu3) -@test Base.infer_effects(cpu_factorial, (Int,); interp=MTOverlayInterp()) |> Core.Compiler.is_nonoverlayed -@test Base.infer_effects(gpu_factorial1, (Int,); interp=MTOverlayInterp()) |> !Core.Compiler.is_nonoverlayed -@test Base.infer_effects(gpu_factorial2, (Int,); interp=MTOverlayInterp()) |> Core.Compiler.is_consistent_overlay +@test Base.infer_effects(cpu_factorial, (Int,); interp=MTOverlayInterp()) |> Compiler.is_nonoverlayed +@test Base.infer_effects(gpu_factorial1, (Int,); interp=MTOverlayInterp()) |> !Compiler.is_nonoverlayed +@test Base.infer_effects(gpu_factorial2, (Int,); interp=MTOverlayInterp()) |> Compiler.is_consistent_overlay let effects = Base.infer_effects(gpu_factorial3, (Int,); interp=MTOverlayInterp()) # check if `@consistent_overlay` together works with `@assume_effects` # N.B. the overlaid `raise_on_gpu3` is not :foldable otherwise since `error_on_gpu` is (intetionally) undefined. - @test Core.Compiler.is_consistent_overlay(effects) - @test Core.Compiler.is_foldable(effects) + @test Compiler.is_consistent_overlay(effects) + @test Compiler.is_foldable(effects) end @test Base.infer_return_type(; interp=MTOverlayInterp()) do Val(gpu_factorial2(3)) @@ -172,11 +179,11 @@ end == Val{6} # https://github.com/JuliaLang/julia/issues/48097 @newinterp Issue48097Interp @MethodTable ISSUE_48097_MT -CC.method_table(interp::Issue48097Interp) = CC.OverlayMethodTable(CC.get_inference_world(interp), ISSUE_48097_MT) -function CC.concrete_eval_eligible(interp::Issue48097Interp, - @nospecialize(f), result::CC.MethodCallResult, arginfo::CC.ArgInfo, sv::CC.AbsIntState) - ret = @invoke CC.concrete_eval_eligible(interp::CC.AbstractInterpreter, - f::Any, result::CC.MethodCallResult, arginfo::CC.ArgInfo, sv::CC.AbsIntState) +Compiler.method_table(interp::Issue48097Interp) = Compiler.OverlayMethodTable(Compiler.get_inference_world(interp), ISSUE_48097_MT) +function Compiler.concrete_eval_eligible(interp::Issue48097Interp, + @nospecialize(f), result::Compiler.MethodCallResult, arginfo::Compiler.ArgInfo, sv::Compiler.AbsIntState) + ret = @invoke Compiler.concrete_eval_eligible(interp::Compiler.AbstractInterpreter, + f::Any, result::Compiler.MethodCallResult, arginfo::Compiler.ArgInfo, sv::Compiler.AbsIntState) if ret === :semi_concrete_eval # disable semi-concrete interpretation return :none @@ -192,7 +199,7 @@ end # https://github.com/JuliaLang/julia/issues/52938 @newinterp Issue52938Interp @MethodTable ISSUE_52938_MT -CC.method_table(interp::Issue52938Interp) = CC.OverlayMethodTable(CC.get_inference_world(interp), ISSUE_52938_MT) +Compiler.method_table(interp::Issue52938Interp) = Compiler.OverlayMethodTable(Compiler.get_inference_world(interp), ISSUE_52938_MT) inner52938(x, types::Type, args...; kwargs...) = x outer52938(x) = @inline inner52938(x, Tuple{}; foo=Ref(42), bar=1) @test fully_eliminated(outer52938, (Any,); interp=Issue52938Interp(), retval=Argument(2)) @@ -200,7 +207,7 @@ outer52938(x) = @inline inner52938(x, Tuple{}; foo=Ref(42), bar=1) # https://github.com/JuliaGPU/CUDA.jl/issues/2241 @newinterp Cuda2241Interp @MethodTable CUDA_2241_MT -CC.method_table(interp::Cuda2241Interp) = CC.OverlayMethodTable(CC.get_inference_world(interp), CUDA_2241_MT) +Compiler.method_table(interp::Cuda2241Interp) = Compiler.OverlayMethodTable(Compiler.get_inference_world(interp), CUDA_2241_MT) inner2241(f, types::Type, args...; kwargs...) = nothing function outer2241(f) @inline inner2241(f, Tuple{}; foo=Ref(42), bar=1) @@ -217,7 +224,7 @@ const cuda_kernel_state = Ref{Any}() # Should not concrete-eval overlayed methods in semi-concrete interpretation @newinterp OverlaySinInterp @MethodTable OVERLAY_SIN_MT -CC.method_table(interp::OverlaySinInterp) = CC.OverlayMethodTable(CC.get_inference_world(interp), OVERLAY_SIN_MT) +Compiler.method_table(interp::OverlaySinInterp) = Compiler.OverlayMethodTable(Compiler.get_inference_world(interp), OVERLAY_SIN_MT) overlay_sin1(x) = error("Not supposed to be called.") @overlay OVERLAY_SIN_MT overlay_sin1(x) = cos(x) @overlay OVERLAY_SIN_MT Base.sin(x::Union{Float32,Float64}) = overlay_sin1(x) @@ -252,30 +259,30 @@ end # =============== using Core: SlotNumber, Argument -using Core.Compiler: slot_id, tmerge_fast_path -import .CC: +using .Compiler: slot_id, tmerge_fast_path +import .Compiler: AbstractLattice, BaseInferenceLattice, IPOResultLattice, InferenceLattice, widenlattice, is_valid_lattice_norec, typeinf_lattice, ipo_lattice, optimizer_lattice, widenconst, tmeet, tmerge, ⊑, abstract_eval_special_value, widenreturn @newinterp TaintInterpreter -struct TaintLattice{PL<:AbstractLattice} <: CC.AbstractLattice +struct TaintLattice{PL<:AbstractLattice} <: Compiler.AbstractLattice parent::PL end -CC.widenlattice(𝕃::TaintLattice) = 𝕃.parent -CC.is_valid_lattice_norec(::TaintLattice, @nospecialize(elm)) = isa(elm, Taint) +Compiler.widenlattice(𝕃::TaintLattice) = 𝕃.parent +Compiler.is_valid_lattice_norec(::TaintLattice, @nospecialize(elm)) = isa(elm, Taint) -struct InterTaintLattice{PL<:AbstractLattice} <: CC.AbstractLattice +struct InterTaintLattice{PL<:AbstractLattice} <: Compiler.AbstractLattice parent::PL end -CC.widenlattice(𝕃::InterTaintLattice) = 𝕃.parent -CC.is_valid_lattice_norec(::InterTaintLattice, @nospecialize(elm)) = isa(elm, InterTaint) +Compiler.widenlattice(𝕃::InterTaintLattice) = 𝕃.parent +Compiler.is_valid_lattice_norec(::InterTaintLattice, @nospecialize(elm)) = isa(elm, InterTaint) const AnyTaintLattice{L} = Union{TaintLattice{L},InterTaintLattice{L}} -CC.typeinf_lattice(::TaintInterpreter) = InferenceLattice(TaintLattice(BaseInferenceLattice.instance)) -CC.ipo_lattice(::TaintInterpreter) = InferenceLattice(InterTaintLattice(IPOResultLattice.instance)) -CC.optimizer_lattice(::TaintInterpreter) = InterTaintLattice(SimpleInferenceLattice.instance) +Compiler.typeinf_lattice(::TaintInterpreter) = InferenceLattice(TaintLattice(BaseInferenceLattice.instance)) +Compiler.ipo_lattice(::TaintInterpreter) = InferenceLattice(InterTaintLattice(IPOResultLattice.instance)) +Compiler.optimizer_lattice(::TaintInterpreter) = InterTaintLattice(SimpleInferenceLattice.instance) struct Taint typ @@ -311,14 +318,14 @@ end const AnyTaint = Union{Taint, InterTaint} -function CC.tmeet(𝕃::AnyTaintLattice, @nospecialize(v), @nospecialize(t::Type)) +function Compiler.tmeet(𝕃::AnyTaintLattice, @nospecialize(v), @nospecialize(t::Type)) T = isa(𝕃, TaintLattice) ? Taint : InterTaint if isa(v, T) v = v.typ end return tmeet(widenlattice(𝕃), v, t) end -function CC.tmerge(𝕃::AnyTaintLattice, @nospecialize(typea), @nospecialize(typeb)) +function Compiler.tmerge(𝕃::AnyTaintLattice, @nospecialize(typea), @nospecialize(typeb)) r = tmerge_fast_path(𝕃, typea, typeb) r !== nothing && return r # type-lattice for Taint @@ -336,7 +343,7 @@ function CC.tmerge(𝕃::AnyTaintLattice, @nospecialize(typea), @nospecialize(ty end return tmerge(widenlattice(𝕃), typea, typeb) end -function CC.:⊑(𝕃::AnyTaintLattice, @nospecialize(typea), @nospecialize(typeb)) +function Compiler.:⊑(𝕃::AnyTaintLattice, @nospecialize(typea), @nospecialize(typeb)) T = isa(𝕃, TaintLattice) ? Taint : InterTaint if isa(typea, T) if isa(typeb, T) @@ -349,39 +356,39 @@ function CC.:⊑(𝕃::AnyTaintLattice, @nospecialize(typea), @nospecialize(type end return ⊑(widenlattice(𝕃), typea, typeb) end -CC.widenconst(taint::AnyTaint) = widenconst(taint.typ) +Compiler.widenconst(taint::AnyTaint) = widenconst(taint.typ) -function CC.abstract_eval_special_value(interp::TaintInterpreter, - @nospecialize(e), vtypes::CC.VarTable, sv::CC.InferenceState) - ret = @invoke CC.abstract_eval_special_value(interp::CC.AbstractInterpreter, - e::Any, vtypes::CC.VarTable, sv::CC.InferenceState) +function Compiler.abstract_eval_special_value(interp::TaintInterpreter, + @nospecialize(e), sstate::Compiler.StatementState, sv::Compiler.InferenceState) + ret = @invoke Compiler.abstract_eval_special_value(interp::Compiler.AbstractInterpreter, + e::Any, sstate::Compiler.StatementState, sv::Compiler.InferenceState) if isa(e, SlotNumber) || isa(e, Argument) return Taint(ret, slot_id(e)) end return ret end -function CC.widenreturn(𝕃::InferenceLattice{<:InterTaintLattice}, @nospecialize(rt), @nospecialize(bestguess), nargs::Int, slottypes::Vector{Any}, changes::CC.VarTable) +function Compiler.widenreturn(𝕃::InferenceLattice{<:InterTaintLattice}, @nospecialize(rt), @nospecialize(bestguess), nargs::Int, slottypes::Vector{Any}, changes::Compiler.VarTable) if isa(rt, Taint) return InterTaint(rt.typ, BitSet((id for id in rt.slots if id ≤ nargs))) end - return CC.widenreturn(widenlattice(𝕃), rt, bestguess, nargs, slottypes, changes) + return Compiler.widenreturn(widenlattice(𝕃), rt, bestguess, nargs, slottypes, changes) end -@test CC.tmerge(typeinf_lattice(TaintInterpreter()), Taint(Int, 1), Taint(Int, 2)) == Taint(Int, BitSet(1:2)) +@test Compiler.tmerge(typeinf_lattice(TaintInterpreter()), Taint(Int, 1), Taint(Int, 2)) == Taint(Int, BitSet(1:2)) # code_typed(ifelse, (Bool, Int, Int); interp=TaintInterpreter()) # External lattice without `Conditional` -import .CC: +import .Compiler: AbstractLattice, ConstsLattice, PartialsLattice, InferenceLattice, typeinf_lattice, ipo_lattice, optimizer_lattice @newinterp NonconditionalInterpreter -CC.typeinf_lattice(::NonconditionalInterpreter) = InferenceLattice(PartialsLattice(ConstsLattice())) -CC.ipo_lattice(::NonconditionalInterpreter) = InferenceLattice(PartialsLattice(ConstsLattice())) -CC.optimizer_lattice(::NonconditionalInterpreter) = PartialsLattice(ConstsLattice()) +Compiler.typeinf_lattice(::NonconditionalInterpreter) = InferenceLattice(PartialsLattice(ConstsLattice())) +Compiler.ipo_lattice(::NonconditionalInterpreter) = InferenceLattice(PartialsLattice(ConstsLattice())) +Compiler.optimizer_lattice(::NonconditionalInterpreter) = PartialsLattice(ConstsLattice()) @test Base.return_types((Any,); interp=NonconditionalInterpreter()) do x c = isa(x, Int) || isa(x, Float64) @@ -398,34 +405,34 @@ end |> only === Any @newinterp NoinlineInterpreter noinline_modules(interp::NoinlineInterpreter) = interp.meta::Set{Module} -import .CC: CallInfo +import .Compiler: CallInfo struct NoinlineCallInfo <: CallInfo info::CallInfo # wrapped call end -CC.add_edges_impl(edges::Vector{Any}, info::NoinlineCallInfo) = CC.add_edges!(edges, info.info) -CC.nsplit_impl(info::NoinlineCallInfo) = CC.nsplit(info.info) -CC.getsplit_impl(info::NoinlineCallInfo, idx::Int) = CC.getsplit(info.info, idx) -CC.getresult_impl(info::NoinlineCallInfo, idx::Int) = CC.getresult(info.info, idx) +Compiler.add_edges_impl(edges::Vector{Any}, info::NoinlineCallInfo) = Compiler.add_edges!(edges, info.info) +Compiler.nsplit_impl(info::NoinlineCallInfo) = Compiler.nsplit(info.info) +Compiler.getsplit_impl(info::NoinlineCallInfo, idx::Int) = Compiler.getsplit(info.info, idx) +Compiler.getresult_impl(info::NoinlineCallInfo, idx::Int) = Compiler.getresult(info.info, idx) -function CC.abstract_call(interp::NoinlineInterpreter, - arginfo::CC.ArgInfo, si::CC.StmtInfo, sv::CC.InferenceState, max_methods::Int) - ret = @invoke CC.abstract_call(interp::CC.AbstractInterpreter, - arginfo::CC.ArgInfo, si::CC.StmtInfo, sv::CC.InferenceState, max_methods::Int) - return CC.Future{CC.CallMeta}(ret, interp, sv) do ret, interp, sv +function Compiler.abstract_call(interp::NoinlineInterpreter, + arginfo::Compiler.ArgInfo, si::Compiler.StmtInfo, sv::Compiler.InferenceState, max_methods::Int) + ret = @invoke Compiler.abstract_call(interp::Compiler.AbstractInterpreter, + arginfo::Compiler.ArgInfo, si::Compiler.StmtInfo, sv::Compiler.InferenceState, max_methods::Int) + return Compiler.Future{Compiler.CallMeta}(ret, interp, sv) do ret, interp, sv if sv.mod in noinline_modules(interp) (;rt, exct, effects, info) = ret - return CC.CallMeta(rt, exct, effects, NoinlineCallInfo(info)) + return Compiler.CallMeta(rt, exct, effects, NoinlineCallInfo(info)) end return ret end end -function CC.src_inlining_policy(interp::NoinlineInterpreter, +function Compiler.src_inlining_policy(interp::NoinlineInterpreter, @nospecialize(src), @nospecialize(info::CallInfo), stmt_flag::UInt32) if isa(info, NoinlineCallInfo) return false end - return @invoke CC.src_inlining_policy(interp::CC.AbstractInterpreter, + return @invoke Compiler.src_inlining_policy(interp::Compiler.AbstractInterpreter, src::Any, info::CallInfo, stmt_flag::UInt32) end @@ -459,8 +466,8 @@ let NoinlineModule = Module() # it should work for cached results method = only(methods(inlined_usually, (Float64,Float64,Float64,))) - mi = CC.specialize_method(method, Tuple{typeof(inlined_usually),Float64,Float64,Float64}, Core.svec()) - @test CC.haskey(CC.code_cache(interp), mi) + mi = Compiler.specialize_method(method, Tuple{typeof(inlined_usually),Float64,Float64,Float64}, Core.svec()) + @test Compiler.haskey(Compiler.code_cache(interp), mi) let src = code_typed1(main_func, (Float64,Float64,Float64); interp) @test count(isinvoke(:inlined_usually), src.code) == 0 @test count(iscall((src, inlined_usually)), src.code) == 0 @@ -489,28 +496,28 @@ end @newinterp CustomDataInterp struct CustomDataInterpToken end -CC.cache_owner(::CustomDataInterp) = CustomDataInterpToken() +Compiler.cache_owner(::CustomDataInterp) = CustomDataInterpToken() struct CustomData inferred CustomData(@nospecialize inferred) = new(inferred) end -function CC.transform_result_for_cache(interp::CustomDataInterp, result::CC.InferenceResult) - inferred_result = @invoke CC.transform_result_for_cache( - interp::CC.AbstractInterpreter, result::CC.InferenceResult) +function Compiler.transform_result_for_cache(interp::CustomDataInterp, result::Compiler.InferenceResult) + inferred_result = @invoke Compiler.transform_result_for_cache( + interp::Compiler.AbstractInterpreter, result::Compiler.InferenceResult) return CustomData(inferred_result) end -function CC.src_inlining_policy(interp::CustomDataInterp, @nospecialize(src), - @nospecialize(info::CC.CallInfo), stmt_flag::UInt32) +function Compiler.src_inlining_policy(interp::CustomDataInterp, @nospecialize(src), + @nospecialize(info::Compiler.CallInfo), stmt_flag::UInt32) if src isa CustomData src = src.inferred end - return @invoke CC.src_inlining_policy(interp::CC.AbstractInterpreter, src::Any, - info::CC.CallInfo, stmt_flag::UInt32) + return @invoke Compiler.src_inlining_policy(interp::Compiler.AbstractInterpreter, src::Any, + info::Compiler.CallInfo, stmt_flag::UInt32) end -CC.retrieve_ir_for_inlining(cached_result::CodeInstance, src::CustomData) = - CC.retrieve_ir_for_inlining(cached_result, src.inferred) -CC.retrieve_ir_for_inlining(mi::MethodInstance, src::CustomData, preserve_local_sources::Bool) = - CC.retrieve_ir_for_inlining(mi, src.inferred, preserve_local_sources) +Compiler.retrieve_ir_for_inlining(cached_result::CodeInstance, src::CustomData) = + Compiler.retrieve_ir_for_inlining(cached_result, src.inferred) +Compiler.retrieve_ir_for_inlining(mi::MethodInstance, src::CustomData, preserve_local_sources::Bool) = + Compiler.retrieve_ir_for_inlining(mi, src.inferred, preserve_local_sources) let src = code_typed((Int,); interp=CustomDataInterp()) do x return sin(x) + cos(x) end |> only |> first diff --git a/Compiler/test/EAUtils.jl b/Compiler/test/EAUtils.jl index 4f1d1c0bba898..cec33ca265a80 100644 --- a/Compiler/test/EAUtils.jl +++ b/Compiler/test/EAUtils.jl @@ -2,7 +2,14 @@ module EAUtils export code_escapes, @code_escapes, __clear_cache! -const CC = Core.Compiler +if !@isdefined(Compiler) + if Base.identify_package("Compiler") === nothing + import Base.Compiler: Compiler + else + import Compiler + end +end + using ..EscapeAnalysis const EA = EscapeAnalysis @@ -10,19 +17,19 @@ const EA = EscapeAnalysis # ------------------- # imports -import .CC: +import .Compiler: AbstractInterpreter, NativeInterpreter, WorldView, WorldRange, InferenceParams, OptimizationParams, get_world_counter, get_inference_cache, ipo_dataflow_analysis! # usings using Core: CodeInstance, MethodInstance, CodeInfo -using .CC: +using .Compiler: InferenceResult, InferenceState, OptimizationState, IRCode using .EA: analyze_escapes, ArgEscapeCache, ArgEscapeInfo, EscapeInfo, EscapeState struct EAToken end -# when working outside of Core.Compiler, +# when working outside of CC, # cache entire escape state for later inspection and debugging struct EscapeCacheInfo argescapes::ArgEscapeCache @@ -59,18 +66,18 @@ mutable struct EscapeAnalyzer <: AbstractInterpreter end end -CC.InferenceParams(interp::EscapeAnalyzer) = interp.inf_params -CC.OptimizationParams(interp::EscapeAnalyzer) = interp.opt_params -CC.get_inference_world(interp::EscapeAnalyzer) = interp.world -CC.get_inference_cache(interp::EscapeAnalyzer) = interp.inf_cache -CC.cache_owner(::EscapeAnalyzer) = EAToken() -CC.get_escape_cache(interp::EscapeAnalyzer) = GetEscapeCache(interp) +Compiler.InferenceParams(interp::EscapeAnalyzer) = interp.inf_params +Compiler.OptimizationParams(interp::EscapeAnalyzer) = interp.opt_params +Compiler.get_inference_world(interp::EscapeAnalyzer) = interp.world +Compiler.get_inference_cache(interp::EscapeAnalyzer) = interp.inf_cache +Compiler.cache_owner(::EscapeAnalyzer) = EAToken() +Compiler.get_escape_cache(interp::EscapeAnalyzer) = GetEscapeCache(interp) -function CC.ipo_dataflow_analysis!(interp::EscapeAnalyzer, opt::OptimizationState, +function Compiler.ipo_dataflow_analysis!(interp::EscapeAnalyzer, opt::OptimizationState, ir::IRCode, caller::InferenceResult) # run EA on all frames that have been optimized nargs = Int(opt.src.nargs) - 𝕃ₒ = CC.optimizer_lattice(interp) + 𝕃ₒ = Compiler.optimizer_lattice(interp) get_escape_cache = GetEscapeCache(interp) estate = try analyze_escapes(ir, nargs, 𝕃ₒ, get_escape_cache) @@ -82,11 +89,11 @@ function CC.ipo_dataflow_analysis!(interp::EscapeAnalyzer, opt::OptimizationStat end if caller.linfo === interp.entry_mi # return back the result - interp.result = EscapeResultForEntry(CC.copy(ir), estate, caller.linfo) + interp.result = EscapeResultForEntry(Compiler.copy(ir), estate, caller.linfo) end record_escapes!(interp, caller, estate, ir) - @invoke CC.ipo_dataflow_analysis!(interp::AbstractInterpreter, opt::OptimizationState, + @invoke Compiler.ipo_dataflow_analysis!(interp::AbstractInterpreter, opt::OptimizationState, ir::IRCode, caller::InferenceResult) end @@ -94,7 +101,7 @@ function record_escapes!(interp::EscapeAnalyzer, caller::InferenceResult, estate::EscapeState, ir::IRCode) argescapes = ArgEscapeCache(estate) ecacheinfo = EscapeCacheInfo(argescapes, estate, ir) - return CC.stack_analysis_result!(caller, ecacheinfo) + return Compiler.stack_analysis_result!(caller, ecacheinfo) end struct GetEscapeCache @@ -113,19 +120,19 @@ struct FailedAnalysis get_escape_cache::GetEscapeCache end -function CC.finish!(interp::EscapeAnalyzer, state::InferenceState; can_discard_trees::Bool=CC.may_discard_trees(interp)) - ecacheinfo = CC.traverse_analysis_results(state.result) do @nospecialize result +function Compiler.finish!(interp::EscapeAnalyzer, state::InferenceState; can_discard_trees::Bool=Compiler.may_discard_trees(interp)) + ecacheinfo = Compiler.traverse_analysis_results(state.result) do @nospecialize result return result isa EscapeCacheInfo ? result : nothing end ecacheinfo isa EscapeCacheInfo && (interp.escape_cache.cache[state.linfo] = ecacheinfo) - return @invoke CC.finish!(interp::AbstractInterpreter, state::InferenceState; can_discard_trees) + return @invoke Compiler.finish!(interp::AbstractInterpreter, state::InferenceState; can_discard_trees) end # printing # -------- using Core: Argument, SSAValue -using .CC: widenconst, singleton_type +using .Compiler: widenconst, singleton_type function get_name_color(x::EscapeInfo, symbol::Bool = false) getname(x) = string(nameof(x)) @@ -323,7 +330,7 @@ function code_escapes(@nospecialize(f), @nospecialize(types=Base.default_tt(f)); debuginfo::Symbol = :none) tt = Base.signature_type(f, types) match = Base._which(tt; world, raise=true) - mi = Core.Compiler.specialize_method(match) + mi = Compiler.specialize_method(match) return code_escapes(mi; world, debuginfo) end @@ -331,7 +338,7 @@ function code_escapes(mi::MethodInstance; world::UInt = get_world_counter(), interp::EscapeAnalyzer=EscapeAnalyzer(world, GLOBAL_ESCAPE_CACHE; entry_mi=mi), debuginfo::Symbol = :none) - frame = Core.Compiler.typeinf_frame(interp, mi, #=run_optimizer=#true) + frame = Compiler.typeinf_frame(interp, mi, #=run_optimizer=#true) isdefined(interp, :result) || error("optimization didn't happen: maybe everything has been constant folded?") slotnames = let src = frame.src src isa CodeInfo ? src.slotnames : nothing @@ -357,7 +364,7 @@ Note that this version does not cache the analysis results. function code_escapes(ir::IRCode, nargs::Int; world::UInt = get_world_counter(), interp::AbstractInterpreter=EscapeAnalyzer(world, EscapeCache())) - estate = analyze_escapes(ir, nargs, CC.optimizer_lattice(interp), CC.get_escape_cache(interp)) + estate = analyze_escapes(ir, nargs, Compiler.optimizer_lattice(interp), Compiler.get_escape_cache(interp)) return EscapeResult(ir, estate) # return back the result end diff --git a/Compiler/test/EscapeAnalysis.jl b/Compiler/test/EscapeAnalysis.jl index 2d9090263fafa..1831bd355cd48 100644 --- a/Compiler/test/EscapeAnalysis.jl +++ b/Compiler/test/EscapeAnalysis.jl @@ -1,15 +1,10 @@ module test_EA -global use_core_compiler::Bool = true +include("irutils.jl") -if use_core_compiler - const EscapeAnalysis = Core.Compiler.EscapeAnalysis -else - include(normpath(Sys.BINDIR, "..", "..", "Compiler", "src", "ssair", "EscapeAnalysis.jl")) -end +const EscapeAnalysis = Compiler.EscapeAnalysis include("EAUtils.jl") -include("irutils.jl") using Test, .EscapeAnalysis, .EAUtils using .EscapeAnalysis: ignore_argescape diff --git a/Compiler/test/codegen.jl b/Compiler/test/codegen.jl index 83f4001e616e7..90ec16ca3b7ac 100644 --- a/Compiler/test/codegen.jl +++ b/Compiler/test/codegen.jl @@ -6,6 +6,14 @@ using Random using InteractiveUtils using Libdl +if !@isdefined(Compiler) + if Base.identify_package("Compiler") === nothing + import Base.Compiler: Compiler + else + import Compiler + end +end + const opt_level = Base.JLOptions().opt_level const coverage = (Base.JLOptions().code_coverage > 0) || (Base.JLOptions().malloc_log > 0) const Iptr = sizeof(Int) == 8 ? "i64" : "i32" @@ -181,15 +189,15 @@ end breakpoint_mutable(a::MutableStruct) = ccall(:jl_breakpoint, Cvoid, (Ref{MutableStruct},), a) # Allocation with uninitialized field as gcroot -mutable struct BadRef +mutable struct BadRefMutableStruct x::MutableStruct y::MutableStruct - BadRef(x) = new(x) + BadRefMutableStruct(x) = new(x) end -Base.cconvert(::Type{Ptr{BadRef}}, a::MutableStruct) = BadRef(a) -Base.unsafe_convert(::Type{Ptr{BadRef}}, ar::BadRef) = Ptr{BadRef}(pointer_from_objref(ar.x)) +Base.cconvert(::Type{Ptr{BadRefMutableStruct}}, a::MutableStruct) = BadRefMutableStruct(a) +Base.unsafe_convert(::Type{Ptr{BadRefMutableStruct}}, ar::BadRefMutableStruct) = Ptr{BadRefMutableStruct}(pointer_from_objref(ar.x)) -breakpoint_badref(a::MutableStruct) = ccall(:jl_breakpoint, Cvoid, (Ptr{BadRef},), a) +breakpoint_badref(a::MutableStruct) = ccall(:jl_breakpoint, Cvoid, (Ptr{BadRefMutableStruct},), a) struct PtrStruct a::Ptr{Cvoid} @@ -372,10 +380,9 @@ mktemp() do f_22330, _ end # Alias scope -using Base.Experimental: @aliasscope, Const function foo31018!(a, b) - @aliasscope for i in eachindex(a, b) - a[i] = Const(b)[i] + @Base.Experimental.aliasscope for i in eachindex(a, b) + a[i] = Base.Experimental.Const(b)[i] end end io = IOBuffer() @@ -788,8 +795,8 @@ f47247(a::Ref{Int}, b::Nothing) = setfield!(a, :x, b) @test_throws TypeError f47247(Ref(5), nothing) f48085(@nospecialize x...) = length(x) -@test Core.Compiler.get_compileable_sig(which(f48085, (Vararg{Any},)), Tuple{typeof(f48085), Vararg{Int}}, Core.svec()) === nothing -@test Core.Compiler.get_compileable_sig(which(f48085, (Vararg{Any},)), Tuple{typeof(f48085), Int, Vararg{Int}}, Core.svec()) === Tuple{typeof(f48085), Any, Vararg{Any}} +@test Compiler.get_compileable_sig(which(f48085, (Vararg{Any},)), Tuple{typeof(f48085), Vararg{Int}}, Core.svec()) === nothing +@test Compiler.get_compileable_sig(which(f48085, (Vararg{Any},)), Tuple{typeof(f48085), Int, Vararg{Int}}, Core.svec()) === Tuple{typeof(f48085), Any, Vararg{Any}} # Make sure that the bounds check is elided in tuple iteration @test !occursin("call void @", strip_debug_calls(get_llvm(iterate, Tuple{NTuple{4, Float64}, Int}))) diff --git a/Compiler/test/compact.jl b/Compiler/test/compact.jl index 0ac1bce8e9324..a636ab8172d63 100644 --- a/Compiler/test/compact.jl +++ b/Compiler/test/compact.jl @@ -1,4 +1,12 @@ -using Core.Compiler: IncrementalCompact, insert_node_here!, finish, +if !@isdefined(Compiler) + if Base.identify_package("Compiler") === nothing + import Base.Compiler: Compiler + else + import Compiler + end +end + +using .Compiler: IncrementalCompact, insert_node_here!, finish, NewInstruction, verify_ir, ReturnNode, SSAValue foo_test_function(i) = i == 1 ? 1 : 2 @@ -8,19 +16,19 @@ foo_test_function(i) = i == 1 ? 1 : 2 compact = IncrementalCompact(ir) # set up first iterator - x = Core.Compiler.iterate(compact) - x = Core.Compiler.iterate(compact, x[2]) + x = Compiler.iterate(compact) + x = Compiler.iterate(compact, x[2]) # set up second iterator - x = Core.Compiler.iterate(compact) + x = Compiler.iterate(compact) # consume remainder while x !== nothing - x = Core.Compiler.iterate(compact, x[2]) + x = Compiler.iterate(compact, x[2]) end ir = finish(compact) - @test Core.Compiler.verify_ir(ir) === nothing + @test Compiler.verify_ir(ir) === nothing end # Test early finish of IncrementalCompact @@ -40,7 +48,7 @@ end @testset "IncrementalCompact reverse affinity insert" begin ir = only(Base.code_ircode(foo_test_function, (Int,)))[1] compact = IncrementalCompact(ir) - @test !Core.Compiler.did_just_finish_bb(compact) + @test !Compiler.did_just_finish_bb(compact) insert_node_here!(compact, NewInstruction(ReturnNode(1), Union{}, ir[SSAValue(1)][:line]), true) new_ir = finish(compact) diff --git a/Compiler/test/contextual.jl b/Compiler/test/contextual.jl index c6081634d5a3b..08dc68ba42b34 100644 --- a/Compiler/test/contextual.jl +++ b/Compiler/test/contextual.jl @@ -3,6 +3,14 @@ # N.B.: This file is also run from interpreter.jl, so needs to be standalone-executable using Test +if !@isdefined(Compiler) + if Base.identify_package("Compiler") === nothing + import Base.Compiler: Compiler + else + import Compiler + end +end + # Cassette # ======== @@ -11,7 +19,8 @@ module MiniCassette # fancy features, but sufficient to exercise this code path in the compiler. using Core.IR - using Core.Compiler: retrieve_code_info, quoted, anymap + using ..Compiler + using ..Compiler: retrieve_code_info, quoted, anymap using Base.Meta: isexpr export Ctx, overdub @@ -45,7 +54,7 @@ module MiniCassette function transform!(mi::MethodInstance, ci::CodeInfo, nargs::Int, sparams::Core.SimpleVector) code = ci.code - di = Core.Compiler.DebugInfoStream(mi, ci.debuginfo, length(code)) + di = Compiler.DebugInfoStream(mi, ci.debuginfo, length(code)) ci.slotnames = Symbol[Symbol("#self#"), :ctx, :f, :args, ci.slotnames[nargs+1:end]...] ci.slotflags = UInt8[(0x00 for i = 1:4)..., ci.slotflags[nargs+1:end]...] # Insert one SSAValue for every argument statement @@ -82,7 +91,7 @@ module MiniCassette tt = Tuple{f, args...} match = Base._which(tt; world) - mi = Core.Compiler.specialize_method(match) + mi = Base.specialize_method(match) # Unsupported in this mini-cassette @assert !mi.def.isva src = retrieve_code_info(mi, world) diff --git a/Compiler/test/datastructures.jl b/Compiler/test/datastructures.jl index f3f862c49ea77..6b37d7c89e684 100644 --- a/Compiler/test/datastructures.jl +++ b/Compiler/test/datastructures.jl @@ -1,24 +1,32 @@ using Test +if !@isdefined(Compiler) + if Base.identify_package("Compiler") === nothing + import Base.Compiler: Compiler + else + import Compiler + end +end + @testset "CachedMethodTable" begin # cache result should be separated per `limit` and `sig` # https://github.com/JuliaLang/julia/pull/46799 - interp = Core.Compiler.NativeInterpreter() - table = Core.Compiler.method_table(interp) + interp = Compiler.NativeInterpreter() + table = Compiler.method_table(interp) sig = Tuple{typeof(*), Any, Any} - result1 = Core.Compiler.findall(sig, table; limit=-1) - result2 = Core.Compiler.findall(sig, table; limit=Core.Compiler.InferenceParams().max_methods) - @test result1 !== nothing && !Core.Compiler.isempty(result1) + result1 = Compiler.findall(sig, table; limit=-1) + result2 = Compiler.findall(sig, table; limit=Compiler.InferenceParams().max_methods) + @test result1 !== nothing && !Compiler.isempty(result1) @test result2 === nothing end @testset "BitSetBoundedMinPrioritySet" begin - bsbmp = Core.Compiler.BitSetBoundedMinPrioritySet(5) - Core.Compiler.push!(bsbmp, 2) - Core.Compiler.push!(bsbmp, 2) + bsbmp = Compiler.BitSetBoundedMinPrioritySet(5) + Compiler.push!(bsbmp, 2) + Compiler.push!(bsbmp, 2) iterateok = true cnt = 0 - @eval Core.Compiler for v in $bsbmp + @eval Compiler for v in $bsbmp if cnt == 0 iterateok &= v == 2 elseif cnt == 1 @@ -29,37 +37,37 @@ end cnt += 1 end @test iterateok - @test Core.Compiler.popfirst!(bsbmp) == 2 - Core.Compiler.push!(bsbmp, 1) - @test Core.Compiler.popfirst!(bsbmp) == 1 - @test Core.Compiler.isempty(bsbmp) + @test Compiler.popfirst!(bsbmp) == 2 + Compiler.push!(bsbmp, 1) + @test Compiler.popfirst!(bsbmp) == 1 + @test Compiler.isempty(bsbmp) end @testset "basic heap functionality" begin v = [2,3,1] - @test Core.Compiler.heapify!(v, Core.Compiler.Forward) === v - @test Core.Compiler.heappop!(v, Core.Compiler.Forward) === 1 - @test Core.Compiler.heappush!(v, 4, Core.Compiler.Forward) === v - @test Core.Compiler.heappop!(v, Core.Compiler.Forward) === 2 - @test Core.Compiler.heappop!(v, Core.Compiler.Forward) === 3 - @test Core.Compiler.heappop!(v, Core.Compiler.Forward) === 4 + @test Compiler.heapify!(v, Compiler.Forward) === v + @test Compiler.heappop!(v, Compiler.Forward) === 1 + @test Compiler.heappush!(v, 4, Compiler.Forward) === v + @test Compiler.heappop!(v, Compiler.Forward) === 2 + @test Compiler.heappop!(v, Compiler.Forward) === 3 + @test Compiler.heappop!(v, Compiler.Forward) === 4 end @testset "randomized heap correctness tests" begin - order = Core.Compiler.By(x -> -x[2]) + order = Compiler.By(x -> -x[2]) for i in 1:6 heap = Tuple{Int, Int}[(rand(1:i), rand(1:i)) for _ in 1:2i] mock = copy(heap) - @test Core.Compiler.heapify!(heap, order) === heap + @test Compiler.heapify!(heap, order) === heap sort!(mock, by=last) for _ in 1:6i if rand() < .5 && !isempty(heap) # The first entries may differ because heaps are not stable - @test last(Core.Compiler.heappop!(heap, order)) === last(pop!(mock)) + @test last(Compiler.heappop!(heap, order)) === last(pop!(mock)) else new = (rand(1:i), rand(1:i)) - Core.Compiler.heappush!(heap, new, order) + Compiler.heappush!(heap, new, order) push!(mock, new) sort!(mock, by=last) end @@ -68,29 +76,29 @@ end end @testset "searchsorted" begin - @test Core.Compiler.searchsorted([1, 1, 2, 2, 3, 3], 0) === Core.Compiler.UnitRange(1, 0) - @test Core.Compiler.searchsorted([1, 1, 2, 2, 3, 3], 1) === Core.Compiler.UnitRange(1, 2) - @test Core.Compiler.searchsorted([1, 1, 2, 2, 3, 3], 2) === Core.Compiler.UnitRange(3, 4) - @test Core.Compiler.searchsorted([1, 1, 2, 2, 3, 3], 4) === Core.Compiler.UnitRange(7, 6) - @test Core.Compiler.searchsorted([1, 1, 2, 2, 3, 3], 2.5; lt=<) === Core.Compiler.UnitRange(5, 4) + @test Compiler.searchsorted([1, 1, 2, 2, 3, 3], 0) === Compiler.UnitRange(1, 0) + @test Compiler.searchsorted([1, 1, 2, 2, 3, 3], 1) === Compiler.UnitRange(1, 2) + @test Compiler.searchsorted([1, 1, 2, 2, 3, 3], 2) === Compiler.UnitRange(3, 4) + @test Compiler.searchsorted([1, 1, 2, 2, 3, 3], 4) === Compiler.UnitRange(7, 6) + @test Compiler.searchsorted([1, 1, 2, 2, 3, 3], 2.5; lt=<) === Compiler.UnitRange(5, 4) - @test Core.Compiler.searchsorted(Core.Compiler.UnitRange(1, 3), 0) === Core.Compiler.UnitRange(1, 0) - @test Core.Compiler.searchsorted(Core.Compiler.UnitRange(1, 3), 1) === Core.Compiler.UnitRange(1, 1) - @test Core.Compiler.searchsorted(Core.Compiler.UnitRange(1, 3), 2) === Core.Compiler.UnitRange(2, 2) - @test Core.Compiler.searchsorted(Core.Compiler.UnitRange(1, 3), 4) === Core.Compiler.UnitRange(4, 3) + @test Compiler.searchsorted(Compiler.UnitRange(1, 3), 0) === Compiler.UnitRange(1, 0) + @test Compiler.searchsorted(Compiler.UnitRange(1, 3), 1) === Compiler.UnitRange(1, 1) + @test Compiler.searchsorted(Compiler.UnitRange(1, 3), 2) === Compiler.UnitRange(2, 2) + @test Compiler.searchsorted(Compiler.UnitRange(1, 3), 4) === Compiler.UnitRange(4, 3) - @test Core.Compiler.searchsorted([1:10;], 1, by=(x -> x >= 5)) === Core.Compiler.UnitRange(1, 4) - @test Core.Compiler.searchsorted([1:10;], 10, by=(x -> x >= 5)) === Core.Compiler.UnitRange(5, 10) - @test Core.Compiler.searchsorted([1:5; 1:5; 1:5], 1, 6, 10, Core.Compiler.Forward) === Core.Compiler.UnitRange(6, 6) - @test Core.Compiler.searchsorted(fill(1, 15), 1, 6, 10, Core.Compiler.Forward) === Core.Compiler.UnitRange(6, 10) + @test Compiler.searchsorted([1:10;], 1, by=(x -> x >= 5)) === Compiler.UnitRange(1, 4) + @test Compiler.searchsorted([1:10;], 10, by=(x -> x >= 5)) === Compiler.UnitRange(5, 10) + @test Compiler.searchsorted([1:5; 1:5; 1:5], 1, 6, 10, Compiler.Forward) === Compiler.UnitRange(6, 6) + @test Compiler.searchsorted(fill(1, 15), 1, 6, 10, Compiler.Forward) === Compiler.UnitRange(6, 10) - for (rg,I) in Any[(Core.Compiler.UnitRange(49, 57), 47:59), - (Core.Compiler.StepRange(1, 2, 17), -1:19)] - rg_r = Core.Compiler.reverse(rg) - rgv, rgv_r = Core.Compiler.collect(rg), Core.Compiler.collect(rg_r) + for (rg,I) in Any[(Compiler.UnitRange(49, 57), 47:59), + (Compiler.StepRange(1, 2, 17), -1:19)] + rg_r = Compiler.reverse(rg) + rgv, rgv_r = Compiler.collect(rg), Compiler.collect(rg_r) for i = I - @test Core.Compiler.searchsorted(rg,i) === Core.Compiler.searchsorted(rgv,i) - @test Core.Compiler.searchsorted(rg_r,i,rev=true) === Core.Compiler.searchsorted(rgv_r,i,rev=true) + @test Compiler.searchsorted(rg,i) === Compiler.searchsorted(rgv,i) + @test Compiler.searchsorted(rg_r,i,rev=true) === Compiler.searchsorted(rgv_r,i,rev=true) end end end @@ -98,16 +106,16 @@ end @testset "basic sort" begin v = [3,1,2] @test v == [3,1,2] - @test Core.Compiler.sort!(v) === v == [1,2,3] - @test Core.Compiler.sort!(v, by = x -> -x) === v == [3,2,1] - @test Core.Compiler.sort!(v, by = x -> -x, < = >) === v == [1,2,3] + @test Compiler.sort!(v) === v == [1,2,3] + @test Compiler.sort!(v, by = x -> -x) === v == [3,2,1] + @test Compiler.sort!(v, by = x -> -x, < = >) === v == [1,2,3] end @testset "randomized sorting tests" begin for n in [0, 1, 3, 10, 30, 100, 300], k in [0, 30, 2n] v = rand(-1:k, n) for by in [identity, x -> -x, x -> x^2 + .1x], lt in [<, >] - @test sort(v; by, lt) == Core.Compiler.sort!(copy(v); by, < = lt) + @test sort(v; by, lt) == Compiler.sort!(copy(v); by, < = lt) end end end diff --git a/Compiler/test/effects.jl b/Compiler/test/effects.jl index bc9bc7e2295fe..e4677daf0c483 100644 --- a/Compiler/test/effects.jl +++ b/Compiler/test/effects.jl @@ -6,7 +6,7 @@ function f_apply_bail(f) f(()...) return nothing end -@test !Core.Compiler.is_removable_if_unused(Base.infer_effects(f_apply_bail)) +@test !Compiler.is_removable_if_unused(Base.infer_effects(f_apply_bail)) @test !fully_eliminated((Function,)) do f f_apply_bail(f) nothing @@ -16,10 +16,10 @@ end # up the effects of the function being analyzed f_throws() = error() @noinline function return_type_unused(x) - Core.Compiler.return_type(f_throws, Tuple{}) + Compiler.return_type(f_throws, Tuple{}) return x+1 end -@test Core.Compiler.is_removable_if_unused(Base.infer_effects(return_type_unused, (Int,))) +@test Compiler.is_removable_if_unused(Base.infer_effects(return_type_unused, (Int,))) @test fully_eliminated((Int,)) do x return_type_unused(x) return nothing @@ -29,7 +29,7 @@ end ambig_effects_test(a::Int, b) = 1 ambig_effects_test(a, b::Int) = 1 ambig_effects_test(a, b) = 1 -@test !Core.Compiler.is_nothrow(Base.infer_effects(ambig_effects_test, (Int, Any))) +@test !Compiler.is_nothrow(Base.infer_effects(ambig_effects_test, (Int, Any))) global ambig_unknown_type_global::Any = 1 @noinline function conditionally_call_ambig(b::Bool, a) if b @@ -46,7 +46,7 @@ end # appropriately struct FCallback; f::Union{Nothing, Function}; end f_invoke_callback(fc) = let f=fc.f; (f !== nothing && f(); nothing); end -@test !Core.Compiler.is_removable_if_unused(Base.infer_effects(f_invoke_callback, (FCallback,))) +@test !Compiler.is_removable_if_unused(Base.infer_effects(f_invoke_callback, (FCallback,))) @test !fully_eliminated((FCallback,)) do fc f_invoke_callback(fc) return nothing @@ -79,7 +79,7 @@ Base.@assume_effects :terminates_globally function issue41694(x) end return res end -@test Core.Compiler.is_foldable(Base.infer_effects(issue41694, (Int,))) +@test Compiler.is_foldable(Base.infer_effects(issue41694, (Int,))) @test fully_eliminated() do issue41694(2) end @@ -89,8 +89,8 @@ Base.@assume_effects :terminates_globally function recur_termination1(x) 0 ≤ x < 20 || error("bad fact") return x * recur_termination1(x-1) end -@test Core.Compiler.is_foldable(Base.infer_effects(recur_termination1, (Int,))) -@test Core.Compiler.is_terminates(Base.infer_effects(recur_termination1, (Int,))) +@test Compiler.is_foldable(Base.infer_effects(recur_termination1, (Int,))) +@test Compiler.is_terminates(Base.infer_effects(recur_termination1, (Int,))) function recur_termination2() Base.@assume_effects :total !:terminates_globally recur_termination1(12) @@ -104,10 +104,10 @@ Base.@assume_effects :terminates_globally function recur_termination21(x) return recur_termination22(x) end recur_termination22(x) = x * recur_termination21(x-1) -@test Core.Compiler.is_foldable(Base.infer_effects(recur_termination21, (Int,))) -@test Core.Compiler.is_foldable(Base.infer_effects(recur_termination22, (Int,))) -@test Core.Compiler.is_terminates(Base.infer_effects(recur_termination21, (Int,))) -@test Core.Compiler.is_terminates(Base.infer_effects(recur_termination22, (Int,))) +@test Compiler.is_foldable(Base.infer_effects(recur_termination21, (Int,))) +@test Compiler.is_foldable(Base.infer_effects(recur_termination22, (Int,))) +@test Compiler.is_terminates(Base.infer_effects(recur_termination21, (Int,))) +@test Compiler.is_terminates(Base.infer_effects(recur_termination22, (Int,))) function recur_termination2x() Base.@assume_effects :total !:terminates_globally recur_termination21(12) + recur_termination22(12) @@ -133,61 +133,61 @@ end # control flow backedge should taint `terminates` @test Base.infer_effects((Int,)) do n for i = 1:n; end -end |> !Core.Compiler.is_terminates +end |> !Compiler.is_terminates # interprocedural-recursion should taint `terminates` **appropriately** function sumrecur(a, x) isempty(a) && return x return sumrecur(Base.tail(a), x + first(a)) end -@test Base.infer_effects(sumrecur, (Tuple{Int,Int,Int},Int)) |> Core.Compiler.is_terminates -@test Base.infer_effects(sumrecur, (Tuple{Int,Int,Int,Vararg{Int}},Int)) |> !Core.Compiler.is_terminates +@test Base.infer_effects(sumrecur, (Tuple{Int,Int,Int},Int)) |> Compiler.is_terminates +@test Base.infer_effects(sumrecur, (Tuple{Int,Int,Int,Vararg{Int}},Int)) |> !Compiler.is_terminates # https://github.com/JuliaLang/julia/issues/45781 @test Base.infer_effects((Float32,)) do a out1 = promote_type(Irrational{:π}, Bool) out2 = sin(a) out1, out2 -end |> Core.Compiler.is_terminates +end |> Compiler.is_terminates # refine :consistent-cy effect inference using the return type information @test Base.infer_effects((Any,)) do x taint = Ref{Any}(x) # taints :consistent-cy, but will be adjusted throw(taint) -end |> Core.Compiler.is_consistent +end |> Compiler.is_consistent @test Base.infer_effects((Int,)) do x if x < 0 taint = Ref(x) # taints :consistent-cy, but will be adjusted throw(DomainError(x, taint)) end return nothing -end |> Core.Compiler.is_consistent +end |> Compiler.is_consistent @test Base.infer_effects((Int,)) do x if x < 0 taint = Ref(x) # taints :consistent-cy, but will be adjusted throw(DomainError(x, taint)) end return x == 0 ? nothing : x # should `Union` of isbitstype objects nicely -end |> Core.Compiler.is_consistent +end |> Compiler.is_consistent @test Base.infer_effects((Symbol,Any)) do s, x if s === :throw taint = Ref{Any}(":throw option given") # taints :consistent-cy, but will be adjusted throw(taint) end return s # should handle `Symbol` nicely -end |> Core.Compiler.is_consistent +end |> Compiler.is_consistent @test Base.infer_effects((Int,)) do x return Ref(x) -end |> !Core.Compiler.is_consistent +end |> !Compiler.is_consistent @test Base.infer_effects((Int,)) do x return x < 0 ? Ref(x) : nothing -end |> !Core.Compiler.is_consistent +end |> !Compiler.is_consistent @test Base.infer_effects((Int,)) do x if x < 0 throw(DomainError(x, lazy"$x is negative")) end return nothing -end |> Core.Compiler.is_foldable +end |> Compiler.is_foldable # :the_exception expression should taint :consistent-cy global inconsistent_var::Int = 42 @@ -201,7 +201,7 @@ function catch_inconsistent() err end end -@test !Core.Compiler.is_consistent(Base.infer_effects(catch_inconsistent)) +@test !Compiler.is_consistent(Base.infer_effects(catch_inconsistent)) cache_inconsistent() = catch_inconsistent() function compare_inconsistent() a = cache_inconsistent() @@ -221,7 +221,7 @@ function catch_inconsistent(x::T) where T end return v end -@test !Core.Compiler.is_consistent(Base.infer_effects(catch_inconsistent, (Int,))) +@test !Compiler.is_consistent(Base.infer_effects(catch_inconsistent, (Int,))) cache_inconsistent(x) = catch_inconsistent(x) function compare_inconsistent(x::T) where T x = one(T) @@ -234,7 +234,7 @@ end @test !compare_inconsistent(3) # Effect modeling for Core.compilerbarrier -@test Base.infer_effects(Base.inferencebarrier, Tuple{Any}) |> Core.Compiler.is_removable_if_unused +@test Base.infer_effects(Base.inferencebarrier, Tuple{Any}) |> Compiler.is_removable_if_unused # effects modeling for allocation/access of uninitialized fields struct Maybe{T} @@ -249,19 +249,19 @@ struct SyntacticallyDefined{T} end @test Base.infer_effects() do Maybe{Int}() -end |> !Core.Compiler.is_consistent +end |> !Compiler.is_consistent @test Base.infer_effects() do Maybe{Int}()[] -end |> !Core.Compiler.is_consistent +end |> !Compiler.is_consistent @test !fully_eliminated() do Maybe{Int}()[] end @test Base.infer_effects() do Maybe{String}() -end |> Core.Compiler.is_consistent +end |> Compiler.is_consistent @test Base.infer_effects() do Maybe{String}()[] -end |> Core.Compiler.is_consistent +end |> Compiler.is_consistent let f() = Maybe{String}()[] @test Base.return_types() do f() # this call should be concrete evaluated @@ -269,16 +269,16 @@ let f() = Maybe{String}()[] end @test Base.infer_effects() do Ref{Int}() -end |> !Core.Compiler.is_consistent +end |> !Compiler.is_consistent @test Base.infer_effects() do Ref{Int}()[] -end |> !Core.Compiler.is_consistent +end |> !Compiler.is_consistent @test !fully_eliminated() do Ref{Int}()[] end @test Base.infer_effects() do Ref{String}()[] -end |> Core.Compiler.is_consistent +end |> Compiler.is_consistent let f() = Ref{String}()[] @test Base.return_types() do f() # this call should be concrete evaluated @@ -286,7 +286,7 @@ let f() = Ref{String}()[] end @test Base.infer_effects((SyntacticallyDefined{Float64}, Symbol)) do w, s getfield(w, s) -end |> Core.Compiler.is_foldable +end |> Compiler.is_foldable # effects propagation for `Core.invoke` calls # https://github.com/JuliaLang/julia/issues/44763 @@ -307,7 +307,7 @@ function A1_inbounds() end return r end -@test !Core.Compiler.is_consistent(Base.infer_effects(A1_inbounds)) +@test !Compiler.is_consistent(Base.infer_effects(A1_inbounds)) # Test that purity doesn't try to accidentally run unreachable code due to # boundscheck elimination @@ -317,7 +317,7 @@ function f_boundscheck_elim(n) # to run the `@inbounds getfield(sin, 1)` that `ntuple` generates. ntuple(x->(@inbounds ()[x]), n) end -@test !Core.Compiler.is_noub(Base.infer_effects(f_boundscheck_elim, (Int,))) +@test !Compiler.is_noub(Base.infer_effects(f_boundscheck_elim, (Int,))) @test Tuple{} <: only(Base.return_types(f_boundscheck_elim, (Int,))) # Test that purity modeling doesn't accidentally introduce new world age issues @@ -343,36 +343,36 @@ function entry_to_be_invalidated(c) end @test Base.infer_effects((Char,)) do x entry_to_be_invalidated(x) -end |> Core.Compiler.is_foldable +end |> Compiler.is_foldable @test fully_eliminated(; retval=97) do entry_to_be_invalidated('a') end getcharid(c) = CONST_DICT[c] # now this is not eligible for concrete evaluation @test Base.infer_effects((Char,)) do x entry_to_be_invalidated(x) -end |> !Core.Compiler.is_foldable +end |> !Compiler.is_foldable @test !fully_eliminated() do entry_to_be_invalidated('a') end -@test !Core.Compiler.builtin_nothrow(Core.Compiler.fallback_lattice, Core.get_binding_type, Any[Rational{Int}, Core.Const(:foo)], Any) +@test !Compiler.builtin_nothrow(Compiler.fallback_lattice, Core.get_binding_type, Any[Rational{Int}, Core.Const(:foo)], Any) # effects modeling for assignment to globals global glob_assign_int::Int = 0 f_glob_assign_int() = global glob_assign_int = 1 let effects = Base.infer_effects(f_glob_assign_int, (); optimize=false) - @test Core.Compiler.is_consistent(effects) - @test !Core.Compiler.is_effect_free(effects) - @test Core.Compiler.is_nothrow(effects) + @test Compiler.is_consistent(effects) + @test !Compiler.is_effect_free(effects) + @test Compiler.is_nothrow(effects) end # effects modeling for for setglobal! global SETGLOBAL!_NOTHROW::Int = 0 let effects = Base.infer_effects(; optimize=false) do setglobal!(@__MODULE__, :SETGLOBAL!_NOTHROW, 42) end - @test Core.Compiler.is_consistent(effects) - @test !Core.Compiler.is_effect_free(effects) - @test Core.Compiler.is_nothrow(effects) + @test Compiler.is_consistent(effects) + @test !Compiler.is_effect_free(effects) + @test Compiler.is_nothrow(effects) end # we should taint `nothrow` if the binding doesn't exist and isn't fixed yet, @@ -383,23 +383,23 @@ setglobal!_nothrow_undefinedyet() = setglobal!(@__MODULE__, :UNDEFINEDYET, 42) let effects = Base.infer_effects() do global_assignment_undefinedyet() end - @test !Core.Compiler.is_nothrow(effects) + @test !Compiler.is_nothrow(effects) end let effects = Base.infer_effects() do setglobal!_nothrow_undefinedyet() end - @test !Core.Compiler.is_nothrow(effects) + @test !Compiler.is_nothrow(effects) end global UNDEFINEDYET::String = "0" let effects = Base.infer_effects() do global_assignment_undefinedyet() end - @test !Core.Compiler.is_nothrow(effects) + @test !Compiler.is_nothrow(effects) end let effects = Base.infer_effects() do setglobal!_nothrow_undefinedyet() end - @test !Core.Compiler.is_nothrow(effects) + @test !Compiler.is_nothrow(effects) end @test_throws Union{ErrorException,TypeError} setglobal!_nothrow_undefinedyet() # TODO: what kind of error should this be? @@ -409,70 +409,70 @@ mutable struct SetfieldNothrow end f_setfield_nothrow() = SetfieldNothrow(0).x = 1 let effects = Base.infer_effects(f_setfield_nothrow, ()) - @test Core.Compiler.is_nothrow(effects) - @test Core.Compiler.is_effect_free(effects) # see EFFECT_FREE_IF_INACCESSIBLEMEMONLY + @test Compiler.is_nothrow(effects) + @test Compiler.is_effect_free(effects) # see EFFECT_FREE_IF_INACCESSIBLEMEMONLY end # even if 2-arg `getfield` may throw, it should be still `:consistent` -@test Core.Compiler.is_consistent(Base.infer_effects(getfield, (NTuple{5, Float64}, Int))) +@test Compiler.is_consistent(Base.infer_effects(getfield, (NTuple{5, Float64}, Int))) # SimpleVector allocation is consistent -@test Core.Compiler.is_consistent(Base.infer_effects(Core.svec)) +@test Compiler.is_consistent(Base.infer_effects(Core.svec)) @test Base.infer_effects() do Core.svec(nothing, 1, "foo") -end |> Core.Compiler.is_consistent +end |> Compiler.is_consistent # fastmath operations are in-`:consistent` -@test !Core.Compiler.is_consistent(Base.infer_effects((a,b)->@fastmath(a+b), (Float64,Float64))) +@test !Compiler.is_consistent(Base.infer_effects((a,b)->@fastmath(a+b), (Float64,Float64))) # issue 46122: @assume_effects for @ccall @test Base.infer_effects((Vector{Int},)) do a Base.@assume_effects :effect_free @ccall this_call_does_not_really_exist(a::Any)::Ptr{Int} -end |> Core.Compiler.is_effect_free +end |> Compiler.is_effect_free # `getfield_effects` handles access to union object nicely -let 𝕃 = Core.Compiler.fallback_lattice - getfield_effects = Core.Compiler.getfield_effects - @test Core.Compiler.is_consistent(getfield_effects(𝕃, Any[Some{String}, Core.Const(:value)], String)) - @test Core.Compiler.is_consistent(getfield_effects(𝕃, Any[Some{Symbol}, Core.Const(:value)], Symbol)) - @test Core.Compiler.is_consistent(getfield_effects(𝕃, Any[Union{Some{Symbol},Some{String}}, Core.Const(:value)], Union{Symbol,String})) +let 𝕃 = Compiler.fallback_lattice + getfield_effects = Compiler.getfield_effects + @test Compiler.is_consistent(getfield_effects(𝕃, Any[Some{String}, Core.Const(:value)], String)) + @test Compiler.is_consistent(getfield_effects(𝕃, Any[Some{Symbol}, Core.Const(:value)], Symbol)) + @test Compiler.is_consistent(getfield_effects(𝕃, Any[Union{Some{Symbol},Some{String}}, Core.Const(:value)], Union{Symbol,String})) end @test Base.infer_effects((Bool,)) do c obj = c ? Some{String}("foo") : Some{Symbol}(:bar) return getfield(obj, :value) -end |> Core.Compiler.is_consistent +end |> Compiler.is_consistent # getfield is nothrow when bounds checking is turned off @test Base.infer_effects((Tuple{Int,Int},Int)) do t, i getfield(t, i, false) -end |> Core.Compiler.is_nothrow +end |> Compiler.is_nothrow @test Base.infer_effects((Tuple{Int,Int},Symbol)) do t, i getfield(t, i, false) -end |> Core.Compiler.is_nothrow +end |> Compiler.is_nothrow @test Base.infer_effects((Tuple{Int,Int},String)) do t, i getfield(t, i, false) # invalid name type -end |> !Core.Compiler.is_nothrow +end |> !Compiler.is_nothrow @test Base.infer_effects((Some{Any},)) do some getfield(some, 1, :not_atomic) -end |> Core.Compiler.is_nothrow +end |> Compiler.is_nothrow @test Base.infer_effects((Some{Any},)) do some getfield(some, 1, :invalid_atomic_spec) -end |> !Core.Compiler.is_nothrow +end |> !Compiler.is_nothrow @test Base.infer_effects((Some{Any},Bool)) do some, boundscheck getfield(some, 1, boundscheck) -end |> Core.Compiler.is_nothrow +end |> Compiler.is_nothrow @test Base.infer_effects((Some{Any},Bool)) do some, boundscheck getfield(some, 1, :not_atomic, boundscheck) -end |> Core.Compiler.is_nothrow +end |> Compiler.is_nothrow @test Base.infer_effects((Some{Any},Bool)) do some, boundscheck getfield(some, 1, :invalid_atomic_spec, boundscheck) -end |> !Core.Compiler.is_nothrow +end |> !Compiler.is_nothrow @test Base.infer_effects((Some{Any},Any)) do some, boundscheck getfield(some, 1, :not_atomic, boundscheck) -end |> !Core.Compiler.is_nothrow +end |> !Compiler.is_nothrow -@test Core.Compiler.is_consistent(Base.infer_effects(setindex!, (Base.RefValue{Int}, Int))) +@test Compiler.is_consistent(Base.infer_effects(setindex!, (Base.RefValue{Int}, Int))) # :inaccessiblememonly effect const global constant_global::Int = 42 @@ -482,68 +482,68 @@ const global constant_mutable_global = Ref(0) const global constant_global_nonisbits = Some(:foo) @test Base.infer_effects() do constant_global -end |> Core.Compiler.is_inaccessiblememonly +end |> Compiler.is_inaccessiblememonly @test Base.infer_effects() do ConstantType -end |> Core.Compiler.is_inaccessiblememonly +end |> Compiler.is_inaccessiblememonly @test Base.infer_effects() do ConstantType{Any}() -end |> Core.Compiler.is_inaccessiblememonly +end |> Compiler.is_inaccessiblememonly @test Base.infer_effects() do constant_global_nonisbits -end |> Core.Compiler.is_inaccessiblememonly +end |> Compiler.is_inaccessiblememonly @test Base.infer_effects() do getglobal(@__MODULE__, :constant_global) -end |> Core.Compiler.is_inaccessiblememonly +end |> Compiler.is_inaccessiblememonly @test Base.infer_effects() do nonconstant_global -end |> !Core.Compiler.is_inaccessiblememonly +end |> !Compiler.is_inaccessiblememonly @test Base.infer_effects() do getglobal(@__MODULE__, :nonconstant_global) -end |> !Core.Compiler.is_inaccessiblememonly +end |> !Compiler.is_inaccessiblememonly @test Base.infer_effects((Symbol,)) do name getglobal(@__MODULE__, name) -end |> !Core.Compiler.is_inaccessiblememonly +end |> !Compiler.is_inaccessiblememonly @test Base.infer_effects((Int,)) do v global nonconstant_global = v -end |> !Core.Compiler.is_inaccessiblememonly +end |> !Compiler.is_inaccessiblememonly @test Base.infer_effects((Int,)) do v setglobal!(@__MODULE__, :nonconstant_global, v) -end |> !Core.Compiler.is_inaccessiblememonly +end |> !Compiler.is_inaccessiblememonly @test Base.infer_effects((Int,)) do v constant_mutable_global[] = v -end |> !Core.Compiler.is_inaccessiblememonly +end |> !Compiler.is_inaccessiblememonly module ConsistentModule const global constant_global::Int = 42 const global ConstantType = Ref end # module @test Base.infer_effects() do ConsistentModule.constant_global -end |> Core.Compiler.is_inaccessiblememonly +end |> Compiler.is_inaccessiblememonly @test Base.infer_effects() do ConsistentModule.ConstantType -end |> Core.Compiler.is_inaccessiblememonly +end |> Compiler.is_inaccessiblememonly @test Base.infer_effects() do ConsistentModule.ConstantType{Any}() -end |> Core.Compiler.is_inaccessiblememonly +end |> Compiler.is_inaccessiblememonly @test Base.infer_effects() do getglobal(@__MODULE__, :ConsistentModule).constant_global -end |> Core.Compiler.is_inaccessiblememonly +end |> Compiler.is_inaccessiblememonly @test Base.infer_effects() do getglobal(@__MODULE__, :ConsistentModule).ConstantType -end |> Core.Compiler.is_inaccessiblememonly +end |> Compiler.is_inaccessiblememonly @test Base.infer_effects() do getglobal(@__MODULE__, :ConsistentModule).ConstantType{Any}() -end |> Core.Compiler.is_inaccessiblememonly +end |> Compiler.is_inaccessiblememonly @test Base.infer_effects((Module,)) do M M.constant_global -end |> !Core.Compiler.is_inaccessiblememonly +end |> !Compiler.is_inaccessiblememonly @test Base.infer_effects((Module,)) do M M.ConstantType -end |> !Core.Compiler.is_inaccessiblememonly +end |> !Compiler.is_inaccessiblememonly @test Base.infer_effects() do M M.ConstantType{Any}() -end |> !Core.Compiler.is_inaccessiblememonly +end |> !Compiler.is_inaccessiblememonly # the `:inaccessiblememonly` helper effect allows us to prove `:consistent`-cy of frames # including `getfield` / `isdefined` accessing to local mutable object @@ -558,7 +558,7 @@ Base.isassigned(x::SafeRef) = true; function mutable_consistent(s) SafeRef(s)[] end -@test Core.Compiler.is_inaccessiblememonly(Base.infer_effects(mutable_consistent, (Symbol,))) +@test Compiler.is_inaccessiblememonly(Base.infer_effects(mutable_consistent, (Symbol,))) @test fully_eliminated(; retval=:foo) do mutable_consistent(:foo) end @@ -566,7 +566,7 @@ end function nested_mutable_consistent(s) SafeRef(SafeRef(SafeRef(SafeRef(SafeRef(s)))))[][][][][] end -@test Core.Compiler.is_inaccessiblememonly(Base.infer_effects(nested_mutable_consistent, (Symbol,))) +@test Compiler.is_inaccessiblememonly(Base.infer_effects(nested_mutable_consistent, (Symbol,))) @test fully_eliminated(; retval=:foo) do nested_mutable_consistent(:foo) end @@ -574,11 +574,11 @@ end const consistent_global = Some(:foo) @test Base.infer_effects() do consistent_global.value -end |> Core.Compiler.is_consistent +end |> Compiler.is_consistent const inconsistent_global = SafeRef(:foo) @test Base.infer_effects() do inconsistent_global[] -end |> !Core.Compiler.is_consistent +end |> !Compiler.is_consistent const inconsistent_condition_ref = Ref{Bool}(false) @test Base.infer_effects() do if inconsistent_condition_ref[] @@ -586,11 +586,11 @@ const inconsistent_condition_ref = Ref{Bool}(false) else return 1 end -end |> !Core.Compiler.is_consistent +end |> !Compiler.is_consistent # should handle va-method properly callgetfield1(xs...) = getfield(getfield(xs, 1), 1) -@test !Core.Compiler.is_inaccessiblememonly(Base.infer_effects(callgetfield1, (Base.RefValue{Symbol},))) +@test !Compiler.is_inaccessiblememonly(Base.infer_effects(callgetfield1, (Base.RefValue{Symbol},))) const GLOBAL_XS = Ref(:julia) global_getfield() = callgetfield1(GLOBAL_XS) @test let @@ -623,9 +623,9 @@ end end for f = Any[removable_if_unused1, removable_if_unused2] effects = Base.infer_effects(f) - @test Core.Compiler.is_inaccessiblememonly(effects) - @test Core.Compiler.is_effect_free(effects) - @test Core.Compiler.is_removable_if_unused(effects) + @test Compiler.is_inaccessiblememonly(effects) + @test Compiler.is_effect_free(effects) + @test Compiler.is_removable_if_unused(effects) @test @eval fully_eliminated() do $f() nothing @@ -637,9 +637,9 @@ end x end let effects = Base.infer_effects(removable_if_unused3, (Int,)) - @test Core.Compiler.is_inaccessiblememonly(effects) - @test Core.Compiler.is_effect_free(effects) - @test Core.Compiler.is_removable_if_unused(effects) + @test Compiler.is_inaccessiblememonly(effects) + @test Compiler.is_effect_free(effects) + @test Compiler.is_removable_if_unused(effects) end @test fully_eliminated((Int,)) do v removable_if_unused3(v) @@ -649,18 +649,18 @@ end @noinline function unremovable_if_unused1!(x) setref!(x, 42) end -@test !Core.Compiler.is_removable_if_unused(Base.infer_effects(unremovable_if_unused1!, (typeof(global_ref),))) -@test !Core.Compiler.is_removable_if_unused(Base.infer_effects(unremovable_if_unused1!, (Any,))) +@test !Compiler.is_removable_if_unused(Base.infer_effects(unremovable_if_unused1!, (typeof(global_ref),))) +@test !Compiler.is_removable_if_unused(Base.infer_effects(unremovable_if_unused1!, (Any,))) @noinline function unremovable_if_unused2!() setref!(global_ref, 42) end -@test !Core.Compiler.is_removable_if_unused(Base.infer_effects(unremovable_if_unused2!)) +@test !Compiler.is_removable_if_unused(Base.infer_effects(unremovable_if_unused2!)) @noinline function unremovable_if_unused3!() getfield(@__MODULE__, :global_ref)[] = nothing end -@test !Core.Compiler.is_removable_if_unused(Base.infer_effects(unremovable_if_unused3!)) +@test !Compiler.is_removable_if_unused(Base.infer_effects(unremovable_if_unused3!)) # array ops # ========= @@ -678,7 +678,7 @@ let good_dims = [1, 2, 3, 4, 10] dims = ntuple(i->dim, N) @test @eval Base.infer_effects() do construct_array(Int, $(dims...)) - end |> Core.Compiler.is_removable_if_unused + end |> Compiler.is_removable_if_unused @test @eval fully_eliminated() do construct_array(Int, $(dims...)) nothing @@ -692,7 +692,7 @@ let bad_dims = [-1, typemax(Int)] dims = ntuple(i->dim, N) @test @eval Base.infer_effects() do construct_array($T, $(dims...)) - end |> !Core.Compiler.is_removable_if_unused + end |> !Compiler.is_removable_if_unused @test @eval !fully_eliminated() do construct_array($T, $(dims...)) nothing @@ -716,8 +716,8 @@ for safesig = Any[ (Type{Any}, Any, Any) ] let effects = Base.infer_effects(getindex, safesig) - @test Core.Compiler.is_consistent_if_notreturned(effects) - @test Core.Compiler.is_removable_if_unused(effects) + @test Compiler.is_consistent_if_notreturned(effects) + @test Compiler.is_removable_if_unused(effects) end end for unsafesig = Any[ @@ -727,7 +727,7 @@ for unsafesig = Any[ (Type{Number}, Any) ] let effects = Base.infer_effects(getindex, unsafesig) - @test !Core.Compiler.is_nothrow(effects) + @test !Compiler.is_nothrow(effects) end end # vect @@ -737,53 +737,53 @@ for safesig = Any[ (Int, Int) ] let effects = Base.infer_effects(Base.vect, safesig) - @test Core.Compiler.is_consistent_if_notreturned(effects) - @test Core.Compiler.is_removable_if_unused(effects) + @test Compiler.is_consistent_if_notreturned(effects) + @test Compiler.is_removable_if_unused(effects) end end # array getindex let tt = (MemoryRef{Any},Symbol,Bool) @testset let effects = Base.infer_effects(Core.memoryrefget, tt) - @test Core.Compiler.is_consistent_if_inaccessiblememonly(effects) - @test Core.Compiler.is_effect_free(effects) - @test !Core.Compiler.is_nothrow(effects) - @test Core.Compiler.is_terminates(effects) + @test Compiler.is_consistent_if_inaccessiblememonly(effects) + @test Compiler.is_effect_free(effects) + @test !Compiler.is_nothrow(effects) + @test Compiler.is_terminates(effects) end end # array setindex! let tt = (MemoryRef{Any},Any,Symbol,Bool) @testset let effects = Base.infer_effects(Core.memoryrefset!, tt) - @test Core.Compiler.is_consistent_if_inaccessiblememonly(effects) - @test Core.Compiler.is_effect_free_if_inaccessiblememonly(effects) - @test !Core.Compiler.is_nothrow(effects) - @test Core.Compiler.is_terminates(effects) + @test Compiler.is_consistent_if_inaccessiblememonly(effects) + @test Compiler.is_effect_free_if_inaccessiblememonly(effects) + @test !Compiler.is_nothrow(effects) + @test Compiler.is_terminates(effects) end end # nothrow for arrayset @test Base.infer_effects((MemoryRef{Int},Int)) do a, v Core.memoryrefset!(a, v, :not_atomic, true) -end |> !Core.Compiler.is_nothrow +end |> !Compiler.is_nothrow @test Base.infer_effects((MemoryRef{Int},Int)) do a, v a[] = v # may throw -end |> !Core.Compiler.is_nothrow +end |> !Compiler.is_nothrow # when bounds checking is turned off, it should be safe @test Base.infer_effects((MemoryRef{Int},Int)) do a, v Core.memoryrefset!(a, v, :not_atomic, false) -end |> Core.Compiler.is_nothrow +end |> Compiler.is_nothrow @test Base.infer_effects((MemoryRef{Number},Number)) do a, v Core.memoryrefset!(a, v, :not_atomic, false) -end |> Core.Compiler.is_nothrow +end |> Compiler.is_nothrow # arraysize # --------- let effects = Base.infer_effects(size, (Array,Int)) - @test Core.Compiler.is_consistent_if_inaccessiblememonly(effects) - @test Core.Compiler.is_effect_free(effects) - @test !Core.Compiler.is_nothrow(effects) - @test Core.Compiler.is_terminates(effects) + @test Compiler.is_consistent_if_inaccessiblememonly(effects) + @test Compiler.is_effect_free(effects) + @test !Compiler.is_nothrow(effects) + @test Compiler.is_terminates(effects) end # Test that arraysize has proper effect modeling @test fully_eliminated(M->(size(M, 2); nothing), (Matrix{Float64},)) @@ -792,10 +792,10 @@ end # -------- let effects = Base.infer_effects(length, (Vector{Any},)) - @test Core.Compiler.is_consistent_if_inaccessiblememonly(effects) - @test Core.Compiler.is_effect_free(effects) - @test Core.Compiler.is_nothrow(effects) - @test Core.Compiler.is_terminates(effects) + @test Compiler.is_consistent_if_inaccessiblememonly(effects) + @test Compiler.is_effect_free(effects) + @test Compiler.is_nothrow(effects) + @test Compiler.is_terminates(effects) end # resize @@ -808,21 +808,21 @@ end # Base._deleteend!, # ] # let effects = Base.infer_effects(op, (Vector, Int)) -# @test Core.Compiler.is_effect_free_if_inaccessiblememonly(effects) -# @test Core.Compiler.is_terminates(effects) -# @test !Core.Compiler.is_nothrow(effects) +# @test Compiler.is_effect_free_if_inaccessiblememonly(effects) +# @test Compiler.is_terminates(effects) +# @test !Compiler.is_nothrow(effects) # end #end -@test Core.Compiler.is_noub(Base.infer_effects(Base._growbeg!, (Vector{Int}, Int))) -@test Core.Compiler.is_noub(Base.infer_effects(Base._growbeg!, (Vector{Any}, Int))) -@test Core.Compiler.is_noub(Base.infer_effects(Base._growend!, (Vector{Int}, Int))) -@test Core.Compiler.is_noub(Base.infer_effects(Base._growend!, (Vector{Any}, Int))) +@test Compiler.is_noub(Base.infer_effects(Base._growbeg!, (Vector{Int}, Int))) +@test Compiler.is_noub(Base.infer_effects(Base._growbeg!, (Vector{Any}, Int))) +@test Compiler.is_noub(Base.infer_effects(Base._growend!, (Vector{Int}, Int))) +@test Compiler.is_noub(Base.infer_effects(Base._growend!, (Vector{Any}, Int))) # tuple indexing # -------------- -@test Core.Compiler.is_foldable(Base.infer_effects(iterate, Tuple{Tuple{Int, Int}, Int})) +@test Compiler.is_foldable(Base.infer_effects(iterate, Tuple{Tuple{Int, Int}, Int})) # end to end # ---------- @@ -835,12 +835,12 @@ end #for T = Any[Int,Any], op! = Any[push!,pushfirst!], op = Any[length,size], # xs = Any[(Int,), (Int,Int,)] # let effects = Base.infer_effects(simple_vec_ops, (Type{T},typeof(op!),typeof(op),xs...)) -# @test Core.Compiler.is_foldable(effects) +# @test Compiler.is_foldable(effects) # end #end # Test that builtin_effects handles vararg correctly -@test !Core.Compiler.is_nothrow(Core.Compiler.builtin_effects(Core.Compiler.fallback_lattice, Core.isdefined, +@test !Compiler.is_nothrow(Compiler.builtin_effects(Compiler.fallback_lattice, Core.isdefined, Any[String, Vararg{Any}], Bool)) # Test that :new can be eliminated even if an sparam is unknown @@ -860,33 +860,33 @@ end # Effects for getfield of type instance @test Base.infer_effects(Tuple{Nothing}) do x WrapperOneField{typeof(x)}.instance -end |> Core.Compiler.is_foldable_nothrow +end |> Compiler.is_foldable_nothrow @test Base.infer_effects(Tuple{WrapperOneField{Float64}, Symbol}) do w, s getfield(w, s) -end |> Core.Compiler.is_foldable +end |> Compiler.is_foldable @test Base.infer_effects(Tuple{WrapperOneField{Symbol}, Symbol}) do w, s getfield(w, s) -end |> Core.Compiler.is_foldable +end |> Compiler.is_foldable # Flow-sensitive consistent for _typevar @test Base.infer_effects() do return WrapperOneField == (WrapperOneField{T} where T) -end |> Core.Compiler.is_foldable_nothrow +end |> Compiler.is_foldable_nothrow # Test that dead `@inbounds` does not taint consistency # https://github.com/JuliaLang/julia/issues/48243 @test Base.infer_effects(Tuple{Int64}) do i false && @inbounds (1,2,3)[i] return 1 -end |> Core.Compiler.is_foldable_nothrow +end |> Compiler.is_foldable_nothrow @test Base.infer_effects(Tuple{Int64}) do i @inbounds (1,2,3)[i] -end |> !Core.Compiler.is_noub +end |> !Compiler.is_noub @test Base.infer_effects(Tuple{Tuple{Int64}}) do x @inbounds x[1] -end |> Core.Compiler.is_foldable_nothrow +end |> Compiler.is_foldable_nothrow # Test that :new of non-concrete, but otherwise known type # does not taint consistency. @@ -894,46 +894,46 @@ end |> Core.Compiler.is_foldable_nothrow x::T ImmutRef(x) = $(Expr(:new, :(ImmutRef{typeof(x)}), :x)) end -@test Core.Compiler.is_foldable(Base.infer_effects(ImmutRef, Tuple{Any})) +@test Compiler.is_foldable(Base.infer_effects(ImmutRef, Tuple{Any})) -@test Core.Compiler.is_foldable_nothrow(Base.infer_effects(typejoin, ())) +@test Compiler.is_foldable_nothrow(Base.infer_effects(typejoin, ())) # nothrow-ness of subtyping operations # https://github.com/JuliaLang/julia/pull/48566 -@test !Core.Compiler.is_nothrow(Base.infer_effects((A,B)->A<:B, (Any,Any))) -@test !Core.Compiler.is_nothrow(Base.infer_effects((A,B)->A>:B, (Any,Any))) +@test !Compiler.is_nothrow(Base.infer_effects((A,B)->A<:B, (Any,Any))) +@test !Compiler.is_nothrow(Base.infer_effects((A,B)->A>:B, (Any,Any))) # GotoIfNot should properly mark itself as throwing when given a non-Bool # https://github.com/JuliaLang/julia/pull/48583 gotoifnot_throw_check_48583(x) = x ? x : 0 -@test !Core.Compiler.is_nothrow(Base.infer_effects(gotoifnot_throw_check_48583, (Missing,))) -@test !Core.Compiler.is_nothrow(Base.infer_effects(gotoifnot_throw_check_48583, (Any,))) -@test Core.Compiler.is_nothrow(Base.infer_effects(gotoifnot_throw_check_48583, (Bool,))) +@test !Compiler.is_nothrow(Base.infer_effects(gotoifnot_throw_check_48583, (Missing,))) +@test !Compiler.is_nothrow(Base.infer_effects(gotoifnot_throw_check_48583, (Any,))) +@test Compiler.is_nothrow(Base.infer_effects(gotoifnot_throw_check_48583, (Bool,))) # unknown :static_parameter should taint :nothrow # https://github.com/JuliaLang/julia/issues/46771 unknown_sparam_throw(::Union{Nothing, Type{T}}) where T = (T; nothing) unknown_sparam_nothrow1(x::Ref{T}) where T = (T; nothing) unknown_sparam_nothrow2(x::Ref{Ref{T}}) where T = (T; nothing) -@test Core.Compiler.is_nothrow(Base.infer_effects(unknown_sparam_throw, (Type{Int},))) -@test Core.Compiler.is_nothrow(Base.infer_effects(unknown_sparam_throw, (Type{<:Integer},))) -@test !Core.Compiler.is_nothrow(Base.infer_effects(unknown_sparam_throw, (Type,))) -@test !Core.Compiler.is_nothrow(Base.infer_effects(unknown_sparam_throw, (Nothing,))) -@test !Core.Compiler.is_nothrow(Base.infer_effects(unknown_sparam_throw, (Union{Type{Int},Nothing},))) -@test !Core.Compiler.is_nothrow(Base.infer_effects(unknown_sparam_throw, (Any,))) -@test Core.Compiler.is_nothrow(Base.infer_effects(unknown_sparam_nothrow1, (Ref,))) -@test Core.Compiler.is_nothrow(Base.infer_effects(unknown_sparam_nothrow2, (Ref{Ref{T}} where T,))) +@test Compiler.is_nothrow(Base.infer_effects(unknown_sparam_throw, (Type{Int},))) +@test Compiler.is_nothrow(Base.infer_effects(unknown_sparam_throw, (Type{<:Integer},))) +@test !Compiler.is_nothrow(Base.infer_effects(unknown_sparam_throw, (Type,))) +@test !Compiler.is_nothrow(Base.infer_effects(unknown_sparam_throw, (Nothing,))) +@test !Compiler.is_nothrow(Base.infer_effects(unknown_sparam_throw, (Union{Type{Int},Nothing},))) +@test !Compiler.is_nothrow(Base.infer_effects(unknown_sparam_throw, (Any,))) +@test Compiler.is_nothrow(Base.infer_effects(unknown_sparam_nothrow1, (Ref,))) +@test Compiler.is_nothrow(Base.infer_effects(unknown_sparam_nothrow2, (Ref{Ref{T}} where T,))) # purely abstract recursion should not taint :terminates # https://github.com/JuliaLang/julia/issues/48983 abstractly_recursive1() = abstractly_recursive2() abstractly_recursive2() = (Base._return_type(abstractly_recursive1, Tuple{}); 1) abstractly_recursive3() = abstractly_recursive2() -@test_broken Core.Compiler.is_terminates(Base.infer_effects(abstractly_recursive3, ())) +@test_broken Compiler.is_terminates(Base.infer_effects(abstractly_recursive3, ())) actually_recursive1(x) = actually_recursive2(x) actually_recursive2(x) = (x <= 0) ? 1 : actually_recursive1(x - 1) actually_recursive3(x) = actually_recursive2(x) -@test !Core.Compiler.is_terminates(Base.infer_effects(actually_recursive3, (Int,))) +@test !Compiler.is_terminates(Base.infer_effects(actually_recursive3, (Int,))) # `isdefined` effects struct MaybeSome{T} @@ -949,30 +949,30 @@ const defined_some = MaybeSome{String}("julia") let effects = Base.infer_effects() do isdefined(undefined_ref, :x) end - @test !Core.Compiler.is_consistent(effects) - @test Core.Compiler.is_nothrow(effects) + @test !Compiler.is_consistent(effects) + @test Compiler.is_nothrow(effects) end let effects = Base.infer_effects() do isdefined(defined_ref, :x) end - @test !Core.Compiler.is_consistent(effects) - @test Core.Compiler.is_nothrow(effects) + @test !Compiler.is_consistent(effects) + @test Compiler.is_nothrow(effects) end let effects = Base.infer_effects() do isdefined(undefined_some, :value) end - @test Core.Compiler.is_consistent(effects) - @test Core.Compiler.is_nothrow(effects) + @test Compiler.is_consistent(effects) + @test Compiler.is_nothrow(effects) end let effects = Base.infer_effects() do isdefined(defined_some, :value) end - @test Core.Compiler.is_consistent(effects) - @test Core.Compiler.is_nothrow(effects) + @test Compiler.is_consistent(effects) + @test Compiler.is_nothrow(effects) end # high-level interface test isassigned_effects(s) = isassigned(Ref(s)) -@test Core.Compiler.is_consistent(Base.infer_effects(isassigned_effects, (Symbol,))) +@test Compiler.is_consistent(Base.infer_effects(isassigned_effects, (Symbol,))) @test fully_eliminated(; retval=true) do isassigned_effects(:foo) end @@ -987,16 +987,16 @@ function optimize_throw_block_for_effects(x) return a end let effects = Base.infer_effects(optimize_throw_block_for_effects, (Int,)) - @test Core.Compiler.is_consistent_if_notreturned(effects) - @test Core.Compiler.is_effect_free(effects) - @test !Core.Compiler.is_nothrow(effects) - @test Core.Compiler.is_terminates(effects) + @test Compiler.is_consistent_if_notreturned(effects) + @test Compiler.is_effect_free(effects) + @test !Compiler.is_nothrow(effects) + @test Compiler.is_terminates(effects) end # :isdefined effects @test @eval Base.infer_effects() do @isdefined($(gensym("some_undef_symbol"))) -end |> !Core.Compiler.is_consistent +end |> !Compiler.is_consistent # Effects of Base.hasfield (#50198) hf50198(s) = hasfield(typeof((;x=1, y=2)), s) @@ -1012,13 +1012,13 @@ g50311(x) = Val{f50311((1.0, x), "foo")}() const my_defined_var = 42 @test Base.infer_effects() do getglobal(@__MODULE__, :my_defined_var, :monotonic) -end |> Core.Compiler.is_foldable_nothrow +end |> Compiler.is_foldable_nothrow @test Base.infer_effects() do getglobal(@__MODULE__, :my_defined_var, :foo) -end |> !Core.Compiler.is_nothrow +end |> !Compiler.is_nothrow @test Base.infer_effects() do getglobal(@__MODULE__, :my_defined_var, :foo, nothing) -end |> !Core.Compiler.is_nothrow +end |> !Compiler.is_nothrow # irinterp should refine `:nothrow` information only if profitable Base.@assume_effects :nothrow function irinterp_nothrow_override(x, y) @@ -1031,14 +1031,14 @@ end @test Base.infer_effects((Float64,)) do y isinf(y) && return zero(y) irinterp_nothrow_override(true, y) -end |> Core.Compiler.is_nothrow +end |> Compiler.is_nothrow # Effects for :compilerbarrier f1_compilerbarrier(b) = Base.compilerbarrier(:type, b) f2_compilerbarrier(b) = Base.compilerbarrier(:conditional, b) -@test !Core.Compiler.is_consistent(Base.infer_effects(f1_compilerbarrier, (Bool,))) -@test Core.Compiler.is_consistent(Base.infer_effects(f2_compilerbarrier, (Bool,))) +@test !Compiler.is_consistent(Base.infer_effects(f1_compilerbarrier, (Bool,))) +@test Compiler.is_consistent(Base.infer_effects(f2_compilerbarrier, (Bool,))) # Optimizer-refined effects function f1_optrefine(b) @@ -1047,7 +1047,7 @@ function f1_optrefine(b) end return b end -@test !Core.Compiler.is_consistent(Base.infer_effects(f1_optrefine, (Bool,))) +@test !Compiler.is_consistent(Base.infer_effects(f1_optrefine, (Bool,))) function f2_optrefine() if Ref(false)[] @@ -1055,15 +1055,15 @@ function f2_optrefine() end return true end -@test !Core.Compiler.is_nothrow(Base.infer_effects(f2_optrefine; optimize=false)) -@test Core.Compiler.is_nothrow(Base.infer_effects(f2_optrefine)) +@test !Compiler.is_nothrow(Base.infer_effects(f2_optrefine; optimize=false)) +@test Compiler.is_nothrow(Base.infer_effects(f2_optrefine)) function f3_optrefine(x) @fastmath sqrt(x) return x end -@test !Core.Compiler.is_consistent(Base.infer_effects(f3_optrefine; optimize=false)) -@test Core.Compiler.is_consistent(Base.infer_effects(f3_optrefine, (Float64,))) +@test !Compiler.is_consistent(Base.infer_effects(f3_optrefine; optimize=false)) +@test Compiler.is_consistent(Base.infer_effects(f3_optrefine, (Float64,))) # Check that :consistent is properly modeled for throwing statements const GLOBAL_MUTABLE_SWITCH = Ref{Bool}(false) @@ -1076,7 +1076,7 @@ GLOBAL_MUTABLE_SWITCH[] = true # Check that flipping the switch doesn't accidentally change the return type @test (Base.return_types(check_switch2) |> only) === Nothing -@test !Core.Compiler.is_consistent(Base.infer_effects(check_switch, (Base.RefValue{Bool},))) +@test !Compiler.is_consistent(Base.infer_effects(check_switch, (Base.RefValue{Bool},))) # post-opt IPO analysis refinement of `:effect_free`-ness function post_opt_refine_effect_free(y, c=true) @@ -1089,10 +1089,10 @@ function post_opt_refine_effect_free(y, c=true) end return r end -@test Core.Compiler.is_effect_free(Base.infer_effects(post_opt_refine_effect_free, (Base.RefValue{Any},))) +@test Compiler.is_effect_free(Base.infer_effects(post_opt_refine_effect_free, (Base.RefValue{Any},))) @test Base.infer_effects((Base.RefValue{Any},)) do y post_opt_refine_effect_free(y, true) -end |> Core.Compiler.is_effect_free +end |> Compiler.is_effect_free # Check EA-based refinement of :effect_free Base.@assume_effects :nothrow @noinline _noinline_set!(x) = (x[] = 1; nothing) @@ -1132,22 +1132,22 @@ function set_arg_arr!(x) end # This is inferable by type analysis only since the arguments have no mutable memory -@test Core.Compiler.is_effect_free_if_inaccessiblememonly(Base.infer_effects(_noinline_set!, (Base.RefValue{Int},))) -@test Core.Compiler.is_effect_free_if_inaccessiblememonly(Base.infer_effects(_noinline_set!, (Vector{Int},))) +@test Compiler.is_effect_free_if_inaccessiblememonly(Base.infer_effects(_noinline_set!, (Base.RefValue{Int},))) +@test Compiler.is_effect_free_if_inaccessiblememonly(Base.infer_effects(_noinline_set!, (Vector{Int},))) for func in (set_ref_with_unused_arg_1, set_ref_with_unused_arg_2, set_arr_with_unused_arg_1, set_arr_with_unused_arg_2) effects = Base.infer_effects(func, (Nothing,)) - @test Core.Compiler.is_inaccessiblememonly(effects) - @test Core.Compiler.is_effect_free(effects) + @test Compiler.is_inaccessiblememonly(effects) + @test Compiler.is_effect_free(effects) end # These need EA -@test Core.Compiler.is_effect_free(Base.infer_effects(set_ref_with_unused_arg_1, (Base.RefValue{Int},))) -@test Core.Compiler.is_effect_free(Base.infer_effects(set_ref_with_unused_arg_2, (Base.RefValue{Int},))) -@test Core.Compiler.is_effect_free_if_inaccessiblememonly(Base.infer_effects(set_arg_ref!, (Base.RefValue{Int},))) -@test_broken Core.Compiler.is_effect_free(Base.infer_effects(set_arr_with_unused_arg_1, (Vector{Int},))) -@test_broken Core.Compiler.is_effect_free(Base.infer_effects(set_arr_with_unused_arg_2, (Vector{Int},))) -@test_broken Core.Compiler.is_effect_free_if_inaccessiblememonly(Base.infer_effects(set_arg_arr!, (Vector{Int},))) +@test Compiler.is_effect_free(Base.infer_effects(set_ref_with_unused_arg_1, (Base.RefValue{Int},))) +@test Compiler.is_effect_free(Base.infer_effects(set_ref_with_unused_arg_2, (Base.RefValue{Int},))) +@test Compiler.is_effect_free_if_inaccessiblememonly(Base.infer_effects(set_arg_ref!, (Base.RefValue{Int},))) +@test_broken Compiler.is_effect_free(Base.infer_effects(set_arr_with_unused_arg_1, (Vector{Int},))) +@test_broken Compiler.is_effect_free(Base.infer_effects(set_arr_with_unused_arg_2, (Vector{Int},))) +@test_broken Compiler.is_effect_free_if_inaccessiblememonly(Base.infer_effects(set_arg_arr!, (Vector{Int},))) # EA-based refinement of :effect_free function f_EA_refine(ax, b) @@ -1155,7 +1155,7 @@ function f_EA_refine(ax, b) @noinline bx[] = b return ax[] + b end -@test Core.Compiler.is_effect_free(Base.infer_effects(f_EA_refine, (Base.RefValue{Int},Int))) +@test Compiler.is_effect_free(Base.infer_effects(f_EA_refine, (Base.RefValue{Int},Int))) function issue51837(; openquotechar::Char, newlinechar::Char) ncodeunits(openquotechar) == 1 || throw(ArgumentError("`openquotechar` must be a single-byte character")) @@ -1166,99 +1166,99 @@ function issue51837(; openquotechar::Char, newlinechar::Char) end @test Base.infer_effects() do openquotechar::Char, newlinechar::Char issue51837(; openquotechar, newlinechar) -end |> !Core.Compiler.is_nothrow +end |> !Compiler.is_nothrow @test_throws ArgumentError issue51837(; openquotechar='α', newlinechar='\n') # idempotency of effects derived by post-opt analysis callgetfield(x, f) = getfield(x, f, Base.@_boundscheck) -@test Base.infer_effects(callgetfield, (Some{Any},Symbol)).noub === Core.Compiler.NOUB_IF_NOINBOUNDS +@test Base.infer_effects(callgetfield, (Some{Any},Symbol)).noub === Compiler.NOUB_IF_NOINBOUNDS callgetfield1(x, f) = getfield(x, f, Base.@_boundscheck) callgetfield_simple(x, f) = callgetfield1(x, f) @test Base.infer_effects(callgetfield_simple, (Some{Any},Symbol)).noub === Base.infer_effects(callgetfield_simple, (Some{Any},Symbol)).noub === - Core.Compiler.ALWAYS_TRUE + Compiler.ALWAYS_TRUE callgetfield2(x, f) = getfield(x, f, Base.@_boundscheck) callgetfield_inbounds(x, f) = @inbounds callgetfield2(x, f) @test Base.infer_effects(callgetfield_inbounds, (Some{Any},Symbol)).noub === Base.infer_effects(callgetfield_inbounds, (Some{Any},Symbol)).noub === - Core.Compiler.ALWAYS_FALSE + Compiler.ALWAYS_FALSE # noub modeling for memory ops let (memoryrefnew, memoryrefget, memoryref_isassigned, memoryrefset!) = (Core.memoryrefnew, Core.memoryrefget, Core.memoryref_isassigned, Core.memoryrefset!) function builtin_effects(@nospecialize xs...) - interp = Core.Compiler.NativeInterpreter() - 𝕃 = Core.Compiler.typeinf_lattice(interp) - rt = Core.Compiler.builtin_tfunction(interp, xs..., nothing) - return Core.Compiler.builtin_effects(𝕃, xs..., rt) + interp = Compiler.NativeInterpreter() + 𝕃 = Compiler.typeinf_lattice(interp) + rt = Compiler.builtin_tfunction(interp, xs..., nothing) + return Compiler.builtin_effects(𝕃, xs..., rt) end - @test Core.Compiler.is_noub(builtin_effects(memoryrefnew, Any[Memory,])) - @test Core.Compiler.is_noub(builtin_effects(memoryrefnew, Any[MemoryRef,Int])) - @test Core.Compiler.is_noub(builtin_effects(memoryrefnew, Any[MemoryRef,Int,Core.Const(true)])) - @test !Core.Compiler.is_noub(builtin_effects(memoryrefnew, Any[MemoryRef,Int,Core.Const(false)])) - @test !Core.Compiler.is_noub(builtin_effects(memoryrefnew, Any[MemoryRef,Int,Bool])) - @test Core.Compiler.is_noub(builtin_effects(memoryrefnew, Any[MemoryRef,Int,Int])) - @test !Core.Compiler.is_noub(builtin_effects(memoryrefnew, Any[MemoryRef,Int,Vararg{Bool}])) - @test !Core.Compiler.is_noub(builtin_effects(memoryrefnew, Any[MemoryRef,Vararg{Any}])) - @test Core.Compiler.is_noub(builtin_effects(memoryrefget, Any[MemoryRef,Symbol,Core.Const(true)])) - @test !Core.Compiler.is_noub(builtin_effects(memoryrefget, Any[MemoryRef,Symbol,Core.Const(false)])) - @test !Core.Compiler.is_noub(builtin_effects(memoryrefget, Any[MemoryRef,Symbol,Bool])) - @test Core.Compiler.is_noub(builtin_effects(memoryrefget, Any[MemoryRef,Symbol,Int])) - @test !Core.Compiler.is_noub(builtin_effects(memoryrefget, Any[MemoryRef,Symbol,Vararg{Bool}])) - @test !Core.Compiler.is_noub(builtin_effects(memoryrefget, Any[MemoryRef,Vararg{Any}])) - @test Core.Compiler.is_noub(builtin_effects(memoryref_isassigned, Any[MemoryRef,Symbol,Core.Const(true)])) - @test !Core.Compiler.is_noub(builtin_effects(memoryref_isassigned, Any[MemoryRef,Symbol,Core.Const(false)])) - @test !Core.Compiler.is_noub(builtin_effects(memoryref_isassigned, Any[MemoryRef,Symbol,Bool])) - @test Core.Compiler.is_noub(builtin_effects(memoryref_isassigned, Any[MemoryRef,Symbol,Int])) - @test !Core.Compiler.is_noub(builtin_effects(memoryref_isassigned, Any[MemoryRef,Symbol,Vararg{Bool}])) - @test !Core.Compiler.is_noub(builtin_effects(memoryref_isassigned, Any[MemoryRef,Vararg{Any}])) - @test Core.Compiler.is_noub(builtin_effects(memoryrefset!, Any[MemoryRef,Any,Symbol,Core.Const(true)])) - @test !Core.Compiler.is_noub(builtin_effects(memoryrefset!, Any[MemoryRef,Any,Symbol,Core.Const(false)])) - @test !Core.Compiler.is_noub(builtin_effects(memoryrefset!, Any[MemoryRef,Any,Symbol,Bool])) - @test Core.Compiler.is_noub(builtin_effects(memoryrefset!, Any[MemoryRef,Any,Symbol,Int])) - @test !Core.Compiler.is_noub(builtin_effects(memoryrefset!, Any[MemoryRef,Any,Symbol,Vararg{Bool}])) - @test !Core.Compiler.is_noub(builtin_effects(memoryrefset!, Any[MemoryRef,Vararg{Any}])) + @test Compiler.is_noub(builtin_effects(memoryrefnew, Any[Memory,])) + @test Compiler.is_noub(builtin_effects(memoryrefnew, Any[MemoryRef,Int])) + @test Compiler.is_noub(builtin_effects(memoryrefnew, Any[MemoryRef,Int,Core.Const(true)])) + @test !Compiler.is_noub(builtin_effects(memoryrefnew, Any[MemoryRef,Int,Core.Const(false)])) + @test !Compiler.is_noub(builtin_effects(memoryrefnew, Any[MemoryRef,Int,Bool])) + @test Compiler.is_noub(builtin_effects(memoryrefnew, Any[MemoryRef,Int,Int])) + @test !Compiler.is_noub(builtin_effects(memoryrefnew, Any[MemoryRef,Int,Vararg{Bool}])) + @test !Compiler.is_noub(builtin_effects(memoryrefnew, Any[MemoryRef,Vararg{Any}])) + @test Compiler.is_noub(builtin_effects(memoryrefget, Any[MemoryRef,Symbol,Core.Const(true)])) + @test !Compiler.is_noub(builtin_effects(memoryrefget, Any[MemoryRef,Symbol,Core.Const(false)])) + @test !Compiler.is_noub(builtin_effects(memoryrefget, Any[MemoryRef,Symbol,Bool])) + @test Compiler.is_noub(builtin_effects(memoryrefget, Any[MemoryRef,Symbol,Int])) + @test !Compiler.is_noub(builtin_effects(memoryrefget, Any[MemoryRef,Symbol,Vararg{Bool}])) + @test !Compiler.is_noub(builtin_effects(memoryrefget, Any[MemoryRef,Vararg{Any}])) + @test Compiler.is_noub(builtin_effects(memoryref_isassigned, Any[MemoryRef,Symbol,Core.Const(true)])) + @test !Compiler.is_noub(builtin_effects(memoryref_isassigned, Any[MemoryRef,Symbol,Core.Const(false)])) + @test !Compiler.is_noub(builtin_effects(memoryref_isassigned, Any[MemoryRef,Symbol,Bool])) + @test Compiler.is_noub(builtin_effects(memoryref_isassigned, Any[MemoryRef,Symbol,Int])) + @test !Compiler.is_noub(builtin_effects(memoryref_isassigned, Any[MemoryRef,Symbol,Vararg{Bool}])) + @test !Compiler.is_noub(builtin_effects(memoryref_isassigned, Any[MemoryRef,Vararg{Any}])) + @test Compiler.is_noub(builtin_effects(memoryrefset!, Any[MemoryRef,Any,Symbol,Core.Const(true)])) + @test !Compiler.is_noub(builtin_effects(memoryrefset!, Any[MemoryRef,Any,Symbol,Core.Const(false)])) + @test !Compiler.is_noub(builtin_effects(memoryrefset!, Any[MemoryRef,Any,Symbol,Bool])) + @test Compiler.is_noub(builtin_effects(memoryrefset!, Any[MemoryRef,Any,Symbol,Int])) + @test !Compiler.is_noub(builtin_effects(memoryrefset!, Any[MemoryRef,Any,Symbol,Vararg{Bool}])) + @test !Compiler.is_noub(builtin_effects(memoryrefset!, Any[MemoryRef,Vararg{Any}])) # `:boundscheck` taint should be refined by post-opt analysis @test Base.infer_effects() do xs::Vector{Any}, i::Int memoryrefget(memoryrefnew(getfield(xs, :ref), i, Base.@_boundscheck), :not_atomic, Base.@_boundscheck) - end |> Core.Compiler.is_noub_if_noinbounds + end |> Compiler.is_noub_if_noinbounds end # high level tests -@test Core.Compiler.is_noub_if_noinbounds(Base.infer_effects(getindex, (Vector{Int},Int))) -@test Core.Compiler.is_noub_if_noinbounds(Base.infer_effects(getindex, (Vector{Any},Int))) -@test Core.Compiler.is_noub_if_noinbounds(Base.infer_effects(setindex!, (Vector{Int},Int,Int))) -@test Core.Compiler.is_noub_if_noinbounds(Base.infer_effects(Base._setindex!, (Vector{Any},Any,Int))) -@test Core.Compiler.is_noub_if_noinbounds(Base.infer_effects(isassigned, (Vector{Int},Int))) -@test Core.Compiler.is_noub_if_noinbounds(Base.infer_effects(isassigned, (Vector{Any},Int))) +@test Compiler.is_noub_if_noinbounds(Base.infer_effects(getindex, (Vector{Int},Int))) +@test Compiler.is_noub_if_noinbounds(Base.infer_effects(getindex, (Vector{Any},Int))) +@test Compiler.is_noub_if_noinbounds(Base.infer_effects(setindex!, (Vector{Int},Int,Int))) +@test Compiler.is_noub_if_noinbounds(Base.infer_effects(Base._setindex!, (Vector{Any},Any,Int))) +@test Compiler.is_noub_if_noinbounds(Base.infer_effects(isassigned, (Vector{Int},Int))) +@test Compiler.is_noub_if_noinbounds(Base.infer_effects(isassigned, (Vector{Any},Int))) @test Base.infer_effects((Vector{Int},Int)) do xs, i xs[i] -end |> Core.Compiler.is_noub +end |> Compiler.is_noub @test Base.infer_effects((Vector{Any},Int)) do xs, i xs[i] -end |> Core.Compiler.is_noub +end |> Compiler.is_noub @test Base.infer_effects((Vector{Int},Int,Int)) do xs, x, i xs[i] = x -end |> Core.Compiler.is_noub +end |> Compiler.is_noub @test Base.infer_effects((Vector{Any},Any,Int)) do xs, x, i xs[i] = x -end |> Core.Compiler.is_noub +end |> Compiler.is_noub @test Base.infer_effects((Vector{Int},Int)) do xs, i @inbounds xs[i] -end |> !Core.Compiler.is_noub +end |> !Compiler.is_noub @test Base.infer_effects((Vector{Any},Int)) do xs, i @inbounds xs[i] -end |> !Core.Compiler.is_noub +end |> !Compiler.is_noub Base.@propagate_inbounds getindex_propagate(xs, i) = xs[i] getindex_dont_propagate(xs, i) = xs[i] -@test Core.Compiler.is_noub_if_noinbounds(Base.infer_effects(getindex_propagate, (Vector{Any},Int))) -@test Core.Compiler.is_noub(Base.infer_effects(getindex_dont_propagate, (Vector{Any},Int))) +@test Compiler.is_noub_if_noinbounds(Base.infer_effects(getindex_propagate, (Vector{Any},Int))) +@test Compiler.is_noub(Base.infer_effects(getindex_dont_propagate, (Vector{Any},Int))) @test Base.infer_effects((Vector{Any},Int)) do xs, i @inbounds getindex_propagate(xs, i) -end |> !Core.Compiler.is_noub +end |> !Compiler.is_noub @test Base.infer_effects((Vector{Any},Int)) do xs, i @inbounds getindex_dont_propagate(xs, i) -end |> Core.Compiler.is_noub +end |> Compiler.is_noub # refine `:nothrow` when `exct` is known to be `Bottom` @test Base.infer_exception_type(getindex, (Vector{Int},Int)) == BoundsError @@ -1270,14 +1270,14 @@ function getindex_nothrow(xs::Vector{Int}, i::Int) rethrow(err) end end -@test Core.Compiler.is_nothrow(Base.infer_effects(getindex_nothrow, (Vector{Int}, Int))) +@test Compiler.is_nothrow(Base.infer_effects(getindex_nothrow, (Vector{Int}, Int))) # callsite `@assume_effects` annotation let ast = code_lowered((Int,)) do x Base.@assume_effects :total identity(x) end |> only ssaflag = ast.ssaflags[findfirst(!iszero, ast.ssaflags)::Int] - override = Core.Compiler.decode_statement_effects_override(ssaflag) + override = Compiler.decode_statement_effects_override(ssaflag) # if this gets broken, check if this is synced with expr.jl @test override.consistent && override.effect_free && override.nothrow && override.terminates_globally && !override.terminates_locally && @@ -1287,7 +1287,7 @@ end @test Base.infer_effects((Float64,)) do x isinf(x) && return 0.0 return Base.@assume_effects :nothrow sin(x) -end |> Core.Compiler.is_nothrow +end |> Compiler.is_nothrow let effects = Base.infer_effects((Vector{Float64},)) do xs isempty(xs) && return 0.0 Base.@assume_effects :nothrow begin @@ -1297,8 +1297,8 @@ let effects = Base.infer_effects((Vector{Float64},)) do xs end end # all nested overrides should be applied - @test Core.Compiler.is_nothrow(effects) - @test Core.Compiler.is_noub(effects) + @test Compiler.is_nothrow(effects) + @test Compiler.is_noub(effects) end @test Base.infer_effects((Int,)) do x res = 1 @@ -1308,20 +1308,20 @@ end x -= 1 end return res -end |> Core.Compiler.is_terminates +end |> Compiler.is_terminates # https://github.com/JuliaLang/julia/issues/52531 const a52531 = Core.Ref(1) @eval getref52531() = $(QuoteNode(a52531)).x -@test !Core.Compiler.is_consistent(Base.infer_effects(getref52531)) +@test !Compiler.is_consistent(Base.infer_effects(getref52531)) let global set_a52531!, get_a52531 _a::Int = -1 set_a52531!(a::Int) = (_a = a; return get_a52531()) get_a52531() = _a end -@test !Core.Compiler.is_consistent(Base.infer_effects(set_a52531!, (Int,))) -@test !Core.Compiler.is_consistent(Base.infer_effects(get_a52531, ())) +@test !Compiler.is_consistent(Base.infer_effects(set_a52531!, (Int,))) +@test !Compiler.is_consistent(Base.infer_effects(get_a52531, ())) @test get_a52531() == -1 @test set_a52531!(1) == 1 @test get_a52531() == 1 @@ -1333,35 +1333,35 @@ let is_initialized52531() = _is_initialized end top_52531(_) = (set_initialized52531!(true); nothing) -@test !Core.Compiler.is_consistent(Base.infer_effects(is_initialized52531)) -@test !Core.Compiler.is_removable_if_unused(Base.infer_effects(set_initialized52531!, (Bool,))) +@test !Compiler.is_consistent(Base.infer_effects(is_initialized52531)) +@test !Compiler.is_removable_if_unused(Base.infer_effects(set_initialized52531!, (Bool,))) @test !is_initialized52531() top_52531(0) @test is_initialized52531() const ref52843 = Ref{Int}() @eval func52843() = ($ref52843[] = 1; nothing) -@test !Core.Compiler.is_foldable(Base.infer_effects(func52843)) +@test !Compiler.is_foldable(Base.infer_effects(func52843)) let; Base.Experimental.@force_compile; func52843(); end @test ref52843[] == 1 -@test Core.Compiler.is_inaccessiblememonly(Base.infer_effects(identity∘identity, Tuple{Any})) -@test Core.Compiler.is_inaccessiblememonly(Base.infer_effects(()->Vararg, Tuple{})) +@test Compiler.is_inaccessiblememonly(Base.infer_effects(identity∘identity, Tuple{Any})) +@test Compiler.is_inaccessiblememonly(Base.infer_effects(()->Vararg, Tuple{})) # pointerref nothrow for invalid pointer -@test !Core.Compiler.intrinsic_nothrow(Core.Intrinsics.pointerref, Any[Type{Ptr{Vector{Int64}}}, Int, Int]) -@test !Core.Compiler.intrinsic_nothrow(Core.Intrinsics.pointerref, Any[Type{Ptr{T}} where T, Int, Int]) +@test !Compiler.intrinsic_nothrow(Core.Intrinsics.pointerref, Any[Type{Ptr{Vector{Int64}}}, Int, Int]) +@test !Compiler.intrinsic_nothrow(Core.Intrinsics.pointerref, Any[Type{Ptr{T}} where T, Int, Int]) # post-opt :consistent-cy analysis correctness # https://github.com/JuliaLang/julia/issues/53508 -@test !Core.Compiler.is_consistent(Base.infer_effects(getindex, (UnitRange{Int},Int))) -@test !Core.Compiler.is_consistent(Base.infer_effects(getindex, (Base.OneTo{Int},Int))) +@test !Compiler.is_consistent(Base.infer_effects(getindex, (UnitRange{Int},Int))) +@test !Compiler.is_consistent(Base.infer_effects(getindex, (Base.OneTo{Int},Int))) @noinline f53613() = @assert isdefined(@__MODULE__, :v53613) g53613() = f53613() h53613() = g53613() -@test !Core.Compiler.is_consistent(Base.infer_effects(f53613)) -@test !Core.Compiler.is_consistent(Base.infer_effects(g53613)) +@test !Compiler.is_consistent(Base.infer_effects(f53613)) +@test !Compiler.is_consistent(Base.infer_effects(g53613)) @test_throws AssertionError f53613() @test_throws AssertionError g53613() @test_throws AssertionError h53613() @@ -1373,12 +1373,12 @@ global v53613 = nothing # tuple/svec effects @test Base.infer_effects((Vector{Any},)) do xs Core.tuple(xs...) -end |> Core.Compiler.is_nothrow +end |> Compiler.is_nothrow @test Base.infer_effects((Vector{Any},)) do xs Core.svec(xs...) -end |> Core.Compiler.is_nothrow +end |> Compiler.is_nothrow # effects for unknown `:foreigncall`s @test Base.infer_effects() do @ccall unsafecall()::Cvoid -end == Core.Compiler.EFFECTS_UNKNOWN +end == Compiler.EFFECTS_UNKNOWN diff --git a/Compiler/test/inference.jl b/Compiler/test/inference.jl index 560b9da02e643..26fc80470795f 100644 --- a/Compiler/test/inference.jl +++ b/Compiler/test/inference.jl @@ -1,14 +1,14 @@ # This file is a part of Julia. License is MIT: https://julialang.org/license -# tests for Core.Compiler correctness and precision -import Core.Compiler: Const, Conditional, ⊑, ReturnNode, GotoIfNot -isdispatchelem(@nospecialize x) = !isa(x, Type) || Core.Compiler.isdispatchelem(x) +include("irutils.jl") + +# tests for Compiler correctness and precision +import .Compiler: Const, Conditional, ⊑, ReturnNode, GotoIfNot +isdispatchelem(@nospecialize x) = !isa(x, Type) || Compiler.isdispatchelem(x) using Random, Core.IR using InteractiveUtils -include("irutils.jl") - f39082(x::Vararg{T}) where {T <: Number} = x[1] let ast = only(code_typed(f39082, Tuple{Vararg{Rational}}))[1] @test ast.slottypes == Any[Const(f39082), Tuple{Vararg{Rational}}] @@ -18,92 +18,92 @@ let ast = only(code_typed(f39082, Tuple{Rational, Vararg{Rational}}))[1] end # demonstrate some of the type-size limits -@test Core.Compiler.limit_type_size(Ref{Complex{T} where T}, Ref, Ref, 100, 0) == Ref -@test Core.Compiler.limit_type_size(Ref{Complex{T} where T}, Ref{Complex{T} where T}, Ref, 100, 0) == Ref{Complex{T} where T} +@test Compiler.limit_type_size(Ref{Complex{T} where T}, Ref, Ref, 100, 0) == Ref +@test Compiler.limit_type_size(Ref{Complex{T} where T}, Ref{Complex{T} where T}, Ref, 100, 0) == Ref{Complex{T} where T} let comparison = Tuple{X, X} where X<:Tuple sig = Tuple{X, X} where X<:comparison ref = Tuple{X, X} where X - @test Core.Compiler.limit_type_size(sig, comparison, comparison, 100, 100) == Tuple{Tuple, Tuple} - @test Core.Compiler.limit_type_size(sig, ref, comparison, 100, 100) == Tuple{Any, Any} - @test Core.Compiler.limit_type_size(Tuple{sig}, Tuple{ref}, comparison, 100, 100) == Tuple{Tuple{Any, Any}} - @test Core.Compiler.limit_type_size(ref, sig, Union{}, 100, 100) == ref + @test Compiler.limit_type_size(sig, comparison, comparison, 100, 100) == Tuple{Tuple, Tuple} + @test Compiler.limit_type_size(sig, ref, comparison, 100, 100) == Tuple{Any, Any} + @test Compiler.limit_type_size(Tuple{sig}, Tuple{ref}, comparison, 100, 100) == Tuple{Tuple{Any, Any}} + @test Compiler.limit_type_size(ref, sig, Union{}, 100, 100) == ref end let ref = Tuple{T, Val{T}} where T<:Val sig = Tuple{T, Val{T}} where T<:(Val{T} where T<:Val) - @test Core.Compiler.limit_type_size(sig, ref, Union{}, 100, 100) == Tuple{Val, Val} - @test Core.Compiler.limit_type_size(ref, sig, Union{}, 100, 100) == ref + @test Compiler.limit_type_size(sig, ref, Union{}, 100, 100) == Tuple{Val, Val} + @test Compiler.limit_type_size(ref, sig, Union{}, 100, 100) == ref end let ref = Tuple{T, Val{T}} where T<:(Val{T} where T<:(Val{T} where T<:(Val{T} where T<:Val))) sig = Tuple{T, Val{T}} where T<:(Val{T} where T<:(Val{T} where T<:(Val{T} where T<:(Val{T} where T<:Val)))) - @test Core.Compiler.limit_type_size(sig, ref, Union{}, 100, 100) == Tuple{Val, Val} - @test Core.Compiler.limit_type_size(ref, sig, Union{}, 100, 100) == ref + @test Compiler.limit_type_size(sig, ref, Union{}, 100, 100) == Tuple{Val, Val} + @test Compiler.limit_type_size(ref, sig, Union{}, 100, 100) == ref end let t = Tuple{Ref{T},T,T} where T, c = Tuple{Ref, T, T} where T # #36407 - @test t <: Core.Compiler.limit_type_size(t, c, Union{}, 1, 100) + @test t <: Compiler.limit_type_size(t, c, Union{}, 1, 100) end # obtain Vararg with 2 undefined fields let va = ccall(:jl_type_intersection_with_env, Any, (Any, Any), Tuple{Tuple}, Tuple{Tuple{Vararg{Any, N}}} where N)[2][1] - @test Core.Compiler.__limit_type_size(Tuple, va, Core.svec(va, Union{}), 2, 2) === Tuple + @test Compiler.__limit_type_size(Tuple, va, Core.svec(va, Union{}), 2, 2) === Tuple end mutable struct TS14009{T}; end let A = TS14009{TS14009{TS14009{TS14009{TS14009{T}}}}} where {T}, B = Base.rewrap_unionall(TS14009{Base.unwrap_unionall(A)}, A) - @test Core.Compiler.Compiler.limit_type_size(B, A, A, 2, 2) == TS14009 + @test Compiler.Compiler.limit_type_size(B, A, A, 2, 2) == TS14009 end # issue #42835 -@test !Core.Compiler.type_more_complex(Int, Any, Core.svec(), 1, 1, 1) -@test !Core.Compiler.type_more_complex(Int, Type{Int}, Core.svec(), 1, 1, 1) -@test !Core.Compiler.type_more_complex(Type{Int}, Any, Core.svec(), 1, 1, 1) -@test Core.Compiler.type_more_complex(Type{Type{Int}}, Any, Core.svec(), 1, 1, 1) -@test Core.Compiler.limit_type_size(Type{Int}, Any, Union{}, 0, 0) == Type{Int} -@test Core.Compiler.type_more_complex(Type{Type{Int}}, Type{Int}, Core.svec(Type{Int}), 1, 1, 1) -@test Core.Compiler.type_more_complex(Type{Type{Int}}, Int, Core.svec(Type{Int}), 1, 1, 1) -@test Core.Compiler.type_more_complex(Type{Type{Int}}, Any, Core.svec(), 1, 1, 1) -@test Core.Compiler.type_more_complex(Type{Type{Type{Int}}}, Type{Type{Int}}, Core.svec(Type{Type{Int}}), 1, 1, 1) - -@test Core.Compiler.type_more_complex(ComplexF32, Any, Core.svec(), 1, 1, 1) -@test !Core.Compiler.type_more_complex(ComplexF32, Any, Core.svec(Type{ComplexF32}), 1, 1, 1) -@test Core.Compiler.type_more_complex(ComplexF32, Type{ComplexF32}, Core.svec(), 1, 1, 1) -@test !Core.Compiler.type_more_complex(Type{ComplexF32}, Any, Core.svec(Type{Type{ComplexF32}}), 1, 1, 1) -@test Core.Compiler.type_more_complex(Type{ComplexF32}, Type{Type{ComplexF32}}, Core.svec(), 1, 1, 1) -@test Core.Compiler.type_more_complex(Type{ComplexF32}, ComplexF32, Core.svec(), 1, 1, 1) -@test Core.Compiler.limit_type_size(Type{ComplexF32}, ComplexF32, Union{}, 1, 1) == Type{<:Complex} -@test Core.Compiler.type_more_complex(Type{ComplexF32}, Any, Core.svec(), 1, 1, 1) -@test Core.Compiler.type_more_complex(Type{Type{ComplexF32}}, Type{ComplexF32}, Core.svec(Type{ComplexF32}), 1, 1, 1) -@test Core.Compiler.type_more_complex(Type{Type{ComplexF32}}, ComplexF32, Core.svec(ComplexF32), 1, 1, 1) -@test Core.Compiler.type_more_complex(Type{Type{Type{ComplexF32}}}, Type{Type{ComplexF32}}, Core.svec(Type{ComplexF32}), 1, 1, 1) +@test !Compiler.type_more_complex(Int, Any, Core.svec(), 1, 1, 1) +@test !Compiler.type_more_complex(Int, Type{Int}, Core.svec(), 1, 1, 1) +@test !Compiler.type_more_complex(Type{Int}, Any, Core.svec(), 1, 1, 1) +@test Compiler.type_more_complex(Type{Type{Int}}, Any, Core.svec(), 1, 1, 1) +@test Compiler.limit_type_size(Type{Int}, Any, Union{}, 0, 0) == Type{Int} +@test Compiler.type_more_complex(Type{Type{Int}}, Type{Int}, Core.svec(Type{Int}), 1, 1, 1) +@test Compiler.type_more_complex(Type{Type{Int}}, Int, Core.svec(Type{Int}), 1, 1, 1) +@test Compiler.type_more_complex(Type{Type{Int}}, Any, Core.svec(), 1, 1, 1) +@test Compiler.type_more_complex(Type{Type{Type{Int}}}, Type{Type{Int}}, Core.svec(Type{Type{Int}}), 1, 1, 1) + +@test Compiler.type_more_complex(ComplexF32, Any, Core.svec(), 1, 1, 1) +@test !Compiler.type_more_complex(ComplexF32, Any, Core.svec(Type{ComplexF32}), 1, 1, 1) +@test Compiler.type_more_complex(ComplexF32, Type{ComplexF32}, Core.svec(), 1, 1, 1) +@test !Compiler.type_more_complex(Type{ComplexF32}, Any, Core.svec(Type{Type{ComplexF32}}), 1, 1, 1) +@test Compiler.type_more_complex(Type{ComplexF32}, Type{Type{ComplexF32}}, Core.svec(), 1, 1, 1) +@test Compiler.type_more_complex(Type{ComplexF32}, ComplexF32, Core.svec(), 1, 1, 1) +@test Compiler.limit_type_size(Type{ComplexF32}, ComplexF32, Union{}, 1, 1) == Type{<:Complex} +@test Compiler.type_more_complex(Type{ComplexF32}, Any, Core.svec(), 1, 1, 1) +@test Compiler.type_more_complex(Type{Type{ComplexF32}}, Type{ComplexF32}, Core.svec(Type{ComplexF32}), 1, 1, 1) +@test Compiler.type_more_complex(Type{Type{ComplexF32}}, ComplexF32, Core.svec(ComplexF32), 1, 1, 1) +@test Compiler.type_more_complex(Type{Type{Type{ComplexF32}}}, Type{Type{ComplexF32}}, Core.svec(Type{ComplexF32}), 1, 1, 1) # n.b. Type{Type{Union{}} === Type{Core.TypeofBottom} -@test !Core.Compiler.type_more_complex(Type{Union{}}, Any, Core.svec(), 1, 1, 1) -@test !Core.Compiler.type_more_complex(Type{Type{Union{}}}, Any, Core.svec(), 1, 1, 1) -@test Core.Compiler.type_more_complex(Type{Type{Type{Union{}}}}, Any, Core.svec(), 1, 1, 1) -@test Core.Compiler.type_more_complex(Type{Type{Type{Union{}}}}, Type{Type{Union{}}}, Core.svec(Type{Type{Union{}}}), 1, 1, 1) -@test Core.Compiler.type_more_complex(Type{Type{Type{Type{Union{}}}}}, Type{Type{Type{Union{}}}}, Core.svec(Type{Type{Type{Union{}}}}), 1, 1, 1) - -@test !Core.Compiler.type_more_complex(Type{1}, Type{2}, Core.svec(), 1, 1, 1) -@test Core.Compiler.type_more_complex(Type{Union{Float32,Float64}}, Union{Float32,Float64}, Core.svec(Union{Float32,Float64}), 1, 1, 1) -@test Core.Compiler.type_more_complex(Type{Type{Union{Float32,Float64}}}, Union{Float32,Float64}, Core.svec(Union{Float32,Float64}), 1, 1, 1) -@test Core.Compiler.type_more_complex(Type{Type{Union{Float32,Float64}}}, Type{Union{Float32,Float64}}, Core.svec(Type{Union{Float32,Float64}}), 1, 1, 1) -@test Core.Compiler.type_more_complex(Type{<:Union{Float32,Float64}}, Type{Union{Float32,Float64}}, Core.svec(Union{Float32,Float64}), 1, 1, 1) -@test Core.Compiler.type_more_complex(Type{<:Union{Float32,Float64}}, Any, Core.svec(Union{Float32,Float64}), 1, 1, 1) +@test !Compiler.type_more_complex(Type{Union{}}, Any, Core.svec(), 1, 1, 1) +@test !Compiler.type_more_complex(Type{Type{Union{}}}, Any, Core.svec(), 1, 1, 1) +@test Compiler.type_more_complex(Type{Type{Type{Union{}}}}, Any, Core.svec(), 1, 1, 1) +@test Compiler.type_more_complex(Type{Type{Type{Union{}}}}, Type{Type{Union{}}}, Core.svec(Type{Type{Union{}}}), 1, 1, 1) +@test Compiler.type_more_complex(Type{Type{Type{Type{Union{}}}}}, Type{Type{Type{Union{}}}}, Core.svec(Type{Type{Type{Union{}}}}), 1, 1, 1) + +@test !Compiler.type_more_complex(Type{1}, Type{2}, Core.svec(), 1, 1, 1) +@test Compiler.type_more_complex(Type{Union{Float32,Float64}}, Union{Float32,Float64}, Core.svec(Union{Float32,Float64}), 1, 1, 1) +@test Compiler.type_more_complex(Type{Type{Union{Float32,Float64}}}, Union{Float32,Float64}, Core.svec(Union{Float32,Float64}), 1, 1, 1) +@test Compiler.type_more_complex(Type{Type{Union{Float32,Float64}}}, Type{Union{Float32,Float64}}, Core.svec(Type{Union{Float32,Float64}}), 1, 1, 1) +@test Compiler.type_more_complex(Type{<:Union{Float32,Float64}}, Type{Union{Float32,Float64}}, Core.svec(Union{Float32,Float64}), 1, 1, 1) +@test Compiler.type_more_complex(Type{<:Union{Float32,Float64}}, Any, Core.svec(Union{Float32,Float64}), 1, 1, 1) # issue #49287 -@test !Core.Compiler.type_more_complex(Tuple{Vararg{Tuple{}}}, Tuple{Vararg{Tuple}}, Core.svec(), 0, 0, 0) -@test Core.Compiler.type_more_complex(Tuple{Vararg{Tuple}}, Tuple{Vararg{Tuple{}}}, Core.svec(), 0, 0, 0) +@test !Compiler.type_more_complex(Tuple{Vararg{Tuple{}}}, Tuple{Vararg{Tuple}}, Core.svec(), 0, 0, 0) +@test Compiler.type_more_complex(Tuple{Vararg{Tuple}}, Tuple{Vararg{Tuple{}}}, Core.svec(), 0, 0, 0) # issue #51694 -@test Core.Compiler.type_more_complex( +@test Compiler.type_more_complex( Base.Generator{Base.Iterators.Flatten{Array{Bool, 1}}, typeof(identity)}, Base.Generator{Array{Bool, 1}, typeof(identity)}, Core.svec(), 0, 0, 0) -@test Core.Compiler.type_more_complex( +@test Compiler.type_more_complex( Base.Generator{Base.Iterators.Flatten{Base.Generator{Array{Bool, 1}, typeof(identity)}}, typeof(identity)}, Base.Generator{Array{Bool, 1}, typeof(identity)}, Core.svec(), 0, 0, 0) @@ -111,31 +111,31 @@ end let # 40336 t = Type{Type{Type{Int}}} c = Type{Type{Int}} - r = Core.Compiler.limit_type_size(t, c, c, 100, 100) + r = Compiler.limit_type_size(t, c, c, 100, 100) @test t !== r && t <: r end -@test Core.Compiler.limit_type_size(Type{Type{Type{Int}}}, Type, Union{}, 0, 0) == Type{<:Type} -@test Core.Compiler.limit_type_size(Type{Type{Int}}, Type, Union{}, 0, 0) == Type{<:Type} -@test Core.Compiler.limit_type_size(Type{Int}, Type, Union{}, 0, 0) == Type{Int} -@test Core.Compiler.limit_type_size(Type{<:Int}, Type, Union{}, 0, 0) == Type{<:Int} -@test Core.Compiler.limit_type_size(Type{ComplexF32}, ComplexF32, Union{}, 0, 0) == Type{<:Complex} # added nesting -@test Core.Compiler.limit_type_size(Type{ComplexF32}, Type{ComplexF64}, Union{}, 0, 0) == Type{ComplexF32} # base matches -@test Core.Compiler.limit_type_size(Type{ComplexF32}, Type, Union{}, 0, 0) == Type{<:Complex} -@test_broken Core.Compiler.limit_type_size(Type{<:ComplexF64}, Type, Union{}, 0, 0) == Type{<:Complex} -@test Core.Compiler.limit_type_size(Type{<:ComplexF64}, Type, Union{}, 0, 0) == Type #50692 -@test Core.Compiler.limit_type_size(Type{Union{ComplexF32,ComplexF64}}, Type, Union{}, 0, 0) == Type -@test_broken Core.Compiler.limit_type_size(Type{Union{ComplexF32,ComplexF64}}, Type, Union{}, 0, 0) == Type{<:Complex} #50692 -@test Core.Compiler.limit_type_size(Type{Union{Float32,Float64}}, Type, Union{}, 0, 0) == Type -@test Core.Compiler.limit_type_size(Type{Union{Int,Type{Int}}}, Type{Type{Int}}, Union{}, 0, 0) == Type -@test Core.Compiler.limit_type_size(Type{Union{Int,Type{Int}}}, Union{Type{Int},Type{Type{Int}}}, Union{}, 0, 0) == Type -@test Core.Compiler.limit_type_size(Type{Union{Int,Type{Int}}}, Type{Union{Type{Int},Type{Type{Int}}}}, Union{}, 0, 0) == Type{Union{Int, Type{Int}}} -@test Core.Compiler.limit_type_size(Type{Union{Int,Type{Int}}}, Type{Type{Int}}, Union{}, 0, 0) == Type - - -@test Core.Compiler.limit_type_size(Type{Any}, Union{}, Union{}, 0, 0) == - Core.Compiler.limit_type_size(Type{Any}, Any, Union{}, 0, 0) == - Core.Compiler.limit_type_size(Type{Any}, Type, Union{}, 0, 0) == +@test Compiler.limit_type_size(Type{Type{Type{Int}}}, Type, Union{}, 0, 0) == Type{<:Type} +@test Compiler.limit_type_size(Type{Type{Int}}, Type, Union{}, 0, 0) == Type{<:Type} +@test Compiler.limit_type_size(Type{Int}, Type, Union{}, 0, 0) == Type{Int} +@test Compiler.limit_type_size(Type{<:Int}, Type, Union{}, 0, 0) == Type{<:Int} +@test Compiler.limit_type_size(Type{ComplexF32}, ComplexF32, Union{}, 0, 0) == Type{<:Complex} # added nesting +@test Compiler.limit_type_size(Type{ComplexF32}, Type{ComplexF64}, Union{}, 0, 0) == Type{ComplexF32} # base matches +@test Compiler.limit_type_size(Type{ComplexF32}, Type, Union{}, 0, 0) == Type{<:Complex} +@test_broken Compiler.limit_type_size(Type{<:ComplexF64}, Type, Union{}, 0, 0) == Type{<:Complex} +@test Compiler.limit_type_size(Type{<:ComplexF64}, Type, Union{}, 0, 0) == Type #50692 +@test Compiler.limit_type_size(Type{Union{ComplexF32,ComplexF64}}, Type, Union{}, 0, 0) == Type +@test_broken Compiler.limit_type_size(Type{Union{ComplexF32,ComplexF64}}, Type, Union{}, 0, 0) == Type{<:Complex} #50692 +@test Compiler.limit_type_size(Type{Union{Float32,Float64}}, Type, Union{}, 0, 0) == Type +@test Compiler.limit_type_size(Type{Union{Int,Type{Int}}}, Type{Type{Int}}, Union{}, 0, 0) == Type +@test Compiler.limit_type_size(Type{Union{Int,Type{Int}}}, Union{Type{Int},Type{Type{Int}}}, Union{}, 0, 0) == Type +@test Compiler.limit_type_size(Type{Union{Int,Type{Int}}}, Type{Union{Type{Int},Type{Type{Int}}}}, Union{}, 0, 0) == Type{Union{Int, Type{Int}}} +@test Compiler.limit_type_size(Type{Union{Int,Type{Int}}}, Type{Type{Int}}, Union{}, 0, 0) == Type + + +@test Compiler.limit_type_size(Type{Any}, Union{}, Union{}, 0, 0) == + Compiler.limit_type_size(Type{Any}, Any, Union{}, 0, 0) == + Compiler.limit_type_size(Type{Any}, Type, Union{}, 0, 0) == Type{Any} # issue #43296 @@ -159,32 +159,32 @@ Base.ndims(::Type{f}) where {f<:e43296} = ndims(supertype(f)) Base.ndims(g::e43296) = ndims(typeof(g)) @test only(Base.return_types(ndims, (h43296{Any, 0, Any, Int, Any},))) == Int -@test Core.Compiler.unionlen(Union{}) == 1 -@test Core.Compiler.unionlen(Int8) == 1 -@test Core.Compiler.unionlen(Union{Int8, Int16}) == 2 -@test Core.Compiler.unionlen(Union{Int8, Int16, Int32, Int64}) == 4 -@test Core.Compiler.unionlen(Tuple{Union{Int8, Int16, Int32, Int64}}) == 1 -@test Core.Compiler.unionlen(Union{Int8, Int16, Int32, T} where T) == 1 - -@test Core.Compiler.unioncomplexity(Union{}) == 0 -@test Core.Compiler.unioncomplexity(Int8) == 0 -@test Core.Compiler.unioncomplexity(Val{Union{Int8, Int16, Int32, Int64}}) == 0 -@test Core.Compiler.unioncomplexity(Union{Int8, Int16}) == 1 -@test Core.Compiler.unioncomplexity(Union{Int8, Int16, Int32, Int64}) == 3 -@test Core.Compiler.unioncomplexity(Tuple{Union{Int8, Int16, Int32, Int64}}) == 3 -@test Core.Compiler.unioncomplexity(Union{Int8, Int16, Int32, T} where T) == 3 -@test Core.Compiler.unioncomplexity(Tuple{Val{T}, Union{Int8, Int16}, Int8} where T<:Union{Int8, Int16, Int32, Int64}) == 3 -@test Core.Compiler.unioncomplexity(Tuple{Vararg{Tuple{Union{Int8, Int16}}}}) == 2 -@test Core.Compiler.unioncomplexity(Tuple{Vararg{Symbol}}) == 1 -@test Core.Compiler.unioncomplexity(Tuple{Vararg{Union{Symbol, Tuple{Vararg{Symbol}}}}}) == 3 -@test Core.Compiler.unioncomplexity(Tuple{Vararg{Union{Symbol, Tuple{Vararg{Union{Symbol, Tuple{Vararg{Symbol}}}}}}}}) == 5 -@test Core.Compiler.unioncomplexity(Tuple{Vararg{Union{Symbol, Tuple{Vararg{Union{Symbol, Tuple{Vararg{Union{Symbol, Tuple{Vararg{Symbol}}}}}}}}}}}) == 7 +@test Compiler.unionlen(Union{}) == 1 +@test Compiler.unionlen(Int8) == 1 +@test Compiler.unionlen(Union{Int8, Int16}) == 2 +@test Compiler.unionlen(Union{Int8, Int16, Int32, Int64}) == 4 +@test Compiler.unionlen(Tuple{Union{Int8, Int16, Int32, Int64}}) == 1 +@test Compiler.unionlen(Union{Int8, Int16, Int32, T} where T) == 1 + +@test Compiler.unioncomplexity(Union{}) == 0 +@test Compiler.unioncomplexity(Int8) == 0 +@test Compiler.unioncomplexity(Val{Union{Int8, Int16, Int32, Int64}}) == 0 +@test Compiler.unioncomplexity(Union{Int8, Int16}) == 1 +@test Compiler.unioncomplexity(Union{Int8, Int16, Int32, Int64}) == 3 +@test Compiler.unioncomplexity(Tuple{Union{Int8, Int16, Int32, Int64}}) == 3 +@test Compiler.unioncomplexity(Union{Int8, Int16, Int32, T} where T) == 3 +@test Compiler.unioncomplexity(Tuple{Val{T}, Union{Int8, Int16}, Int8} where T<:Union{Int8, Int16, Int32, Int64}) == 3 +@test Compiler.unioncomplexity(Tuple{Vararg{Tuple{Union{Int8, Int16}}}}) == 2 +@test Compiler.unioncomplexity(Tuple{Vararg{Symbol}}) == 1 +@test Compiler.unioncomplexity(Tuple{Vararg{Union{Symbol, Tuple{Vararg{Symbol}}}}}) == 3 +@test Compiler.unioncomplexity(Tuple{Vararg{Union{Symbol, Tuple{Vararg{Union{Symbol, Tuple{Vararg{Symbol}}}}}}}}) == 5 +@test Compiler.unioncomplexity(Tuple{Vararg{Union{Symbol, Tuple{Vararg{Union{Symbol, Tuple{Vararg{Union{Symbol, Tuple{Vararg{Symbol}}}}}}}}}}}) == 7 # PR 22120 function tuplemerge_test(a, b, r, commutative=true) - @test r == Core.Compiler.tuplemerge(a, b) - @test r == Core.Compiler.tuplemerge(b, a) broken=!commutative + @test r == Compiler.tuplemerge(a, b) + @test r == Compiler.tuplemerge(b, a) broken=!commutative end tuplemerge_test(Tuple{Int}, Tuple{String}, Tuple{Union{Int, String}}) tuplemerge_test(Tuple{Int}, Tuple{String, String}, Tuple) @@ -213,39 +213,39 @@ tuplemerge_test(Tuple{ComplexF64, ComplexF64, ComplexF32}, Tuple{Vararg{Union{Co Tuple{Vararg{Complex}}, false) tuplemerge_test(Tuple{}, Tuple{Complex, Vararg{Union{ComplexF32, ComplexF64}}}, Tuple{Vararg{Complex}}) -@test Core.Compiler.tmerge(Tuple{}, Union{Nothing, Tuple{ComplexF32, ComplexF32}}) == +@test Compiler.tmerge(Tuple{}, Union{Nothing, Tuple{ComplexF32, ComplexF32}}) == Union{Nothing, Tuple{}, Tuple{ComplexF32, ComplexF32}} -@test Core.Compiler.tmerge(Tuple{}, Union{Nothing, Tuple{ComplexF32}, Tuple{ComplexF32, ComplexF32}}) == +@test Compiler.tmerge(Tuple{}, Union{Nothing, Tuple{ComplexF32}, Tuple{ComplexF32, ComplexF32}}) == Union{Nothing, Tuple{Vararg{ComplexF32}}} -@test Core.Compiler.tmerge(Union{Nothing, Tuple{ComplexF32}}, Union{Nothing, Tuple{ComplexF32, ComplexF32}}) == +@test Compiler.tmerge(Union{Nothing, Tuple{ComplexF32}}, Union{Nothing, Tuple{ComplexF32, ComplexF32}}) == Union{Nothing, Tuple{ComplexF32}, Tuple{ComplexF32, ComplexF32}} -@test Core.Compiler.tmerge(Union{Nothing, Tuple{}, Tuple{ComplexF32}}, Union{Nothing, Tuple{ComplexF32, ComplexF32}}) == +@test Compiler.tmerge(Union{Nothing, Tuple{}, Tuple{ComplexF32}}, Union{Nothing, Tuple{ComplexF32, ComplexF32}}) == Union{Nothing, Tuple{Vararg{ComplexF32}}} -@test Core.Compiler.tmerge(Vector{Int}, Core.Compiler.tmerge(Vector{String}, Vector{Bool})) == +@test Compiler.tmerge(Vector{Int}, Compiler.tmerge(Vector{String}, Vector{Bool})) == Union{Vector{Bool}, Vector{Int}, Vector{String}} -@test Core.Compiler.tmerge(Vector{Int}, Core.Compiler.tmerge(Vector{String}, Union{Vector{Bool}, Vector{Symbol}})) == Vector -@test Core.Compiler.tmerge(Base.BitIntegerType, Union{}) === Base.BitIntegerType -@test Core.Compiler.tmerge(Union{}, Base.BitIntegerType) === Base.BitIntegerType -@test Core.Compiler.tmerge(Core.Compiler.fallback_ipo_lattice, Core.Compiler.InterConditional(1, Int, Union{}), Core.Compiler.InterConditional(2, String, Union{})) === Core.Compiler.Const(true) +@test Compiler.tmerge(Vector{Int}, Compiler.tmerge(Vector{String}, Union{Vector{Bool}, Vector{Symbol}})) == Vector +@test Compiler.tmerge(Base.BitIntegerType, Union{}) === Base.BitIntegerType +@test Compiler.tmerge(Union{}, Base.BitIntegerType) === Base.BitIntegerType +@test Compiler.tmerge(Compiler.fallback_ipo_lattice, Compiler.InterConditional(1, Int, Union{}), Compiler.InterConditional(2, String, Union{})) === Compiler.Const(true) # test issue behind https://github.com/JuliaLang/julia/issues/50458 -@test Core.Compiler.tmerge(Nothing, Tuple{Base.BitInteger, Int}) == Union{Nothing, Tuple{Base.BitInteger, Int}} -@test Core.Compiler.tmerge(Union{Nothing, Tuple{Int, Int}}, Tuple{Base.BitInteger, Int}) == Union{Nothing, Tuple{Any, Int}} -@test Core.Compiler.tmerge(Nothing, Tuple{Union{Char, String, SubString{String}, Symbol}, Int}) == Union{Nothing, Tuple{Union{Char, String, SubString{String}, Symbol}, Int}} -@test Core.Compiler.tmerge(Union{Nothing, Tuple{Char, Int}}, Tuple{Union{Char, String, SubString{String}, Symbol}, Int}) == Union{Nothing, Tuple{Union{Char, String, SubString{String}, Symbol}, Int}} -@test Core.Compiler.tmerge(Nothing, Tuple{Integer, Int}) == Union{Nothing, Tuple{Integer, Int}} -@test Core.Compiler.tmerge(Union{Nothing, Tuple{Int, Int}}, Tuple{Integer, Int}) == Union{Nothing, Tuple{Integer, Int}} -@test Core.Compiler.tmerge(Union{Nothing, Int, AbstractVector{Int}}, Vector) == Union{Nothing, Int, AbstractVector} -@test Core.Compiler.tmerge(Union{Nothing, Int, AbstractVector{Int}}, Matrix) == Union{Nothing, Int, AbstractArray} -@test Core.Compiler.tmerge(Union{Nothing, Int, AbstractVector{Int}}, Matrix{Int}) == Union{Nothing, Int, AbstractArray{Int}} -@test Core.Compiler.tmerge(Union{Nothing, Int, AbstractVector{Int}}, Array) == Union{Nothing, Int, AbstractArray} -@test Core.Compiler.tmerge(Union{Nothing, Int, AbstractArray{Int}}, Vector) == Union{Nothing, Int, AbstractArray} -@test Core.Compiler.tmerge(Union{Nothing, Int, AbstractVector}, Matrix{Int}) == Union{Nothing, Int, AbstractArray} -@test Core.Compiler.tmerge(Union{Nothing, AbstractFloat}, Integer) == Union{Nothing, AbstractFloat, Integer} -@test Core.Compiler.tmerge(AbstractVector, AbstractMatrix) == Union{AbstractVector, AbstractMatrix} -@test Core.Compiler.tmerge(Union{AbstractVector, Nothing}, AbstractMatrix) == Union{Nothing, AbstractVector, AbstractMatrix} -@test Core.Compiler.tmerge(Union{AbstractVector, Int}, AbstractMatrix) == Union{Int, AbstractVector, AbstractMatrix} -@test Core.Compiler.tmerge(Union{AbstractVector, Integer}, AbstractMatrix) == Union{Integer, AbstractArray} -@test Core.Compiler.tmerge(Union{AbstractVector, Nothing, Int}, AbstractMatrix) == Union{Nothing, Int, AbstractArray} +@test Compiler.tmerge(Nothing, Tuple{Base.BitInteger, Int}) == Union{Nothing, Tuple{Base.BitInteger, Int}} +@test Compiler.tmerge(Union{Nothing, Tuple{Int, Int}}, Tuple{Base.BitInteger, Int}) == Union{Nothing, Tuple{Any, Int}} +@test Compiler.tmerge(Nothing, Tuple{Union{Char, String, SubString{String}, Symbol}, Int}) == Union{Nothing, Tuple{Union{Char, String, SubString{String}, Symbol}, Int}} +@test Compiler.tmerge(Union{Nothing, Tuple{Char, Int}}, Tuple{Union{Char, String, SubString{String}, Symbol}, Int}) == Union{Nothing, Tuple{Union{Char, String, SubString{String}, Symbol}, Int}} +@test Compiler.tmerge(Nothing, Tuple{Integer, Int}) == Union{Nothing, Tuple{Integer, Int}} +@test Compiler.tmerge(Union{Nothing, Tuple{Int, Int}}, Tuple{Integer, Int}) == Union{Nothing, Tuple{Integer, Int}} +@test Compiler.tmerge(Union{Nothing, Int, AbstractVector{Int}}, Vector) == Union{Nothing, Int, AbstractVector} +@test Compiler.tmerge(Union{Nothing, Int, AbstractVector{Int}}, Matrix) == Union{Nothing, Int, AbstractArray} +@test Compiler.tmerge(Union{Nothing, Int, AbstractVector{Int}}, Matrix{Int}) == Union{Nothing, Int, AbstractArray{Int}} +@test Compiler.tmerge(Union{Nothing, Int, AbstractVector{Int}}, Array) == Union{Nothing, Int, AbstractArray} +@test Compiler.tmerge(Union{Nothing, Int, AbstractArray{Int}}, Vector) == Union{Nothing, Int, AbstractArray} +@test Compiler.tmerge(Union{Nothing, Int, AbstractVector}, Matrix{Int}) == Union{Nothing, Int, AbstractArray} +@test Compiler.tmerge(Union{Nothing, AbstractFloat}, Integer) == Union{Nothing, AbstractFloat, Integer} +@test Compiler.tmerge(AbstractVector, AbstractMatrix) == Union{AbstractVector, AbstractMatrix} +@test Compiler.tmerge(Union{AbstractVector, Nothing}, AbstractMatrix) == Union{Nothing, AbstractVector, AbstractMatrix} +@test Compiler.tmerge(Union{AbstractVector, Int}, AbstractMatrix) == Union{Int, AbstractVector, AbstractMatrix} +@test Compiler.tmerge(Union{AbstractVector, Integer}, AbstractMatrix) == Union{Integer, AbstractArray} +@test Compiler.tmerge(Union{AbstractVector, Nothing, Int}, AbstractMatrix) == Union{Nothing, Int, AbstractArray} # test that recursively more complicated types don't widen all the way to Any when there is a useful valid type upper bound # Specifically test with base types of a trivial type, a simple union, a complicated union, and a tuple. @@ -253,7 +253,7 @@ for T in (Nothing, Base.BitInteger, Union{Int, Int32, Int16, Int8}, Tuple{Int, I Ta, Tb = T, T for i in 1:10 Ta = Union{Tuple{Ta}, Nothing} - Tb = Core.Compiler.tmerge(Tuple{Tb}, Nothing) + Tb = Compiler.tmerge(Tuple{Tb}, Nothing) @test Ta <: Tb <: Union{Nothing, Tuple} end end @@ -366,9 +366,9 @@ barTuple2() = fooTuple{tuple(:y)}() @test Base.return_types(barTuple1,Tuple{})[1] == Base.return_types(barTuple2,Tuple{})[1] == fooTuple{(:y,)} # issue #6050 -@test Core.Compiler.getfield_tfunc(Core.Compiler.fallback_lattice, +@test Compiler.getfield_tfunc(Compiler.fallback_lattice, Dict{Int64,Tuple{UnitRange{Int64},UnitRange{Int64}}}, - Core.Compiler.Const(:vals)) == Memory{Tuple{UnitRange{Int64},UnitRange{Int64}}} + Compiler.Const(:vals)) == Memory{Tuple{UnitRange{Int64},UnitRange{Int64}}} # assert robustness of `getfield_tfunc` struct GetfieldRobustness @@ -647,7 +647,7 @@ f18450() = ifelse(true, Tuple{Vararg{Int}}, Tuple{Vararg}) @test f18450() == Tuple{Vararg{Int}} # issue #18569 -@test !Core.Compiler.isconstType(Type{Tuple}) +@test !Compiler.isconstType(Type{Tuple}) # issue #10880 function cat10880(a, b) @@ -778,9 +778,9 @@ end f_infer_abstract_fieldtype() = fieldtype(HasAbstractlyTypedField, :x) @test Base.return_types(f_infer_abstract_fieldtype, ()) == Any[Type{Union{Int,String}}] let fieldtype_tfunc(@nospecialize args...) = - Core.Compiler.fieldtype_tfunc(Core.Compiler.fallback_lattice, args...), - fieldtype_nothrow(@nospecialize(s0), @nospecialize(name)) = Core.Compiler.fieldtype_nothrow( - Core.Compiler.SimpleInferenceLattice.instance, s0, name) + Compiler.fieldtype_tfunc(Compiler.fallback_lattice, args...), + fieldtype_nothrow(@nospecialize(s0), @nospecialize(name)) = Compiler.fieldtype_nothrow( + Compiler.SimpleInferenceLattice.instance, s0, name) @test fieldtype_tfunc(Union{}, :x) == Union{} @test fieldtype_tfunc(Union{Type{Int32}, Int32}, Const(:x)) == Union{} @test fieldtype_tfunc(Union{Type{Base.RefValue{T}}, Type{Int32}} where {T<:Array}, Const(:x)) == Type{<:Array} @@ -823,7 +823,7 @@ end # Issue 19641 foo19641() = let a = 1.0 - Core.Compiler.return_type(x -> x + a, Tuple{Float64}) + Compiler.return_type(x -> x + a, Tuple{Float64}) end @inferred foo19641() @@ -977,15 +977,15 @@ test_no_apply(::Any) = true # issue #20033 # check return_type_tfunc for calls where no method matches -bcast_eltype_20033(f, A) = Core.Compiler.return_type(f, Tuple{eltype(A)}) +bcast_eltype_20033(f, A) = Compiler.return_type(f, Tuple{eltype(A)}) err20033(x::Float64...) = prod(x) @test bcast_eltype_20033(err20033, [1]) === Union{} @test Base.return_types(bcast_eltype_20033, (typeof(err20033), Vector{Int},)) == Any[Type{Union{}}] # return_type on builtins -@test Core.Compiler.return_type(tuple, Tuple{Int,Int8,Int}) === Tuple{Int,Int8,Int} +@test Compiler.return_type(tuple, Tuple{Int,Int8,Int}) === Tuple{Int,Int8,Int} # issue #21088 -@test Core.Compiler.return_type(typeof, Tuple{Int}) == Type{Int} +@test Compiler.return_type(typeof, Tuple{Int}) == Type{Int} # Inference of constant svecs @eval fsvecinf() = $(QuoteNode(Core.svec(Tuple{Int,Int}, Int)))[1] @@ -1160,7 +1160,7 @@ end struct UnionIsdefinedA; x; end struct UnionIsdefinedB; x; end let isdefined_tfunc(@nospecialize xs...) = - Core.Compiler.isdefined_tfunc(Core.Compiler.fallback_lattice, xs...) + Compiler.isdefined_tfunc(Compiler.fallback_lattice, xs...) @test isdefined_tfunc(typeof(NamedTuple()), Const(0)) === Const(false) @test isdefined_tfunc(typeof(NamedTuple()), Const(1)) === Const(false) @test isdefined_tfunc(typeof((a=1,b=2)), Const(:a)) === Const(true) @@ -1257,18 +1257,18 @@ function get_linfo(@nospecialize(f), @nospecialize(t)) # get the MethodInstance for the method match match = Base._which(Base.signature_type(f, t)) precompile(match.spec_types) - return Core.Compiler.specialize_method(match) + return Compiler.specialize_method(match) end function test_const_return(@nospecialize(f), @nospecialize(t), @nospecialize(val)) - interp = Core.Compiler.NativeInterpreter() - linfo = Core.Compiler.getindex(Core.Compiler.code_cache(interp), get_linfo(f, t)) + interp = Compiler.NativeInterpreter() + linfo = Compiler.getindex(Compiler.code_cache(interp), get_linfo(f, t)) # If coverage is not enabled, make the check strict by requiring constant ABI # Otherwise, check the typed AST to make sure we return a constant. if Base.JLOptions().code_coverage == 0 - @test Core.Compiler.invoke_api(linfo) == 2 + @test Compiler.invoke_api(linfo) == 2 end - if Core.Compiler.invoke_api(linfo) == 2 + if Compiler.invoke_api(linfo) == 2 @test linfo.rettype_const == val return end @@ -1288,7 +1288,7 @@ function test_const_return(@nospecialize(f), @nospecialize(t), @nospecialize(val @test ret === val || (isa(ret, QuoteNode) && (ret::QuoteNode).value === val) continue elseif isa(ex, Expr) - if Core.Compiler.is_meta_expr_head(ex.head) + if Compiler.is_meta_expr_head(ex.head) continue end end @@ -1308,7 +1308,7 @@ function find_call(code::Core.CodeInfo, @nospecialize(func), narg) farg = typeof(getfield(farg.mod, farg.name)) end elseif isa(farg, Core.SSAValue) - farg = Core.Compiler.widenconst(code.ssavaluetypes[farg.id]) + farg = Compiler.widenconst(code.ssavaluetypes[farg.id]) else farg = typeof(farg) end @@ -1355,7 +1355,7 @@ isdefined_f3(x) = isdefined(x, 3) @test find_call(only(code_typed(isdefined_f3, Tuple{Tuple{Vararg{Int}}}))[1], isdefined, 3) let isa_tfunc(@nospecialize xs...) = - Core.Compiler.isa_tfunc(Core.Compiler.fallback_lattice, xs...) + Compiler.isa_tfunc(Compiler.fallback_lattice, xs...) @test isa_tfunc(Array, Const(AbstractArray)) === Const(true) @test isa_tfunc(Array, Type{AbstractArray}) === Const(true) @test isa_tfunc(Array, Type{AbstractArray{Int}}) == Bool @@ -1395,7 +1395,7 @@ let isa_tfunc(@nospecialize xs...) = end let subtype_tfunc(@nospecialize xs...) = - Core.Compiler.subtype_tfunc(Core.Compiler.fallback_lattice, xs...) + Compiler.subtype_tfunc(Compiler.fallback_lattice, xs...) @test subtype_tfunc(Type{<:Array}, Const(AbstractArray)) === Const(true) @test subtype_tfunc(Type{<:Array}, Type{AbstractArray}) === Const(true) @test subtype_tfunc(Type{<:Array}, Type{AbstractArray{Int}}) == Bool @@ -1447,9 +1447,9 @@ end let egal_tfunc function egal_tfunc(a, b) - 𝕃 = Core.Compiler.fallback_lattice - r = Core.Compiler.egal_tfunc(𝕃, a, b) - @test r === Core.Compiler.egal_tfunc(𝕃, b, a) + 𝕃 = Compiler.fallback_lattice + r = Compiler.egal_tfunc(𝕃, a, b) + @test r === Compiler.egal_tfunc(𝕃, b, a) return r end @test egal_tfunc(Const(12345.12345), Const(12344.12345 + 1)) == Const(true) @@ -1518,11 +1518,11 @@ egal_conditional_lattice3(x, y) = x === y + y ? "" : 1 @test Base.return_types(egal_conditional_lattice3, (Int32, Int64)) == Any[Int] let nfields_tfunc(@nospecialize xs...) = - Core.Compiler.nfields_tfunc(Core.Compiler.fallback_lattice, xs...) + Compiler.nfields_tfunc(Compiler.fallback_lattice, xs...) sizeof_tfunc(@nospecialize xs...) = - Core.Compiler.sizeof_tfunc(Core.Compiler.fallback_lattice, xs...) + Compiler.sizeof_tfunc(Compiler.fallback_lattice, xs...) sizeof_nothrow(@nospecialize xs...) = - Core.Compiler.sizeof_nothrow(xs...) + Compiler.sizeof_nothrow(xs...) @test sizeof_tfunc(Const(Ptr)) === sizeof_tfunc(Union{Ptr, Int, Type{Ptr{Int8}}, Type{Int}}) === Const(Sys.WORD_SIZE ÷ 8) @test sizeof_tfunc(Type{Ptr}) === Const(sizeof(Ptr)) @test sizeof_nothrow(Union{Ptr, Int, Type{Ptr{Int8}}, Type{Int}}) @@ -1563,7 +1563,7 @@ let nfields_tfunc(@nospecialize xs...) = end let typeof_tfunc(@nospecialize xs...) = - Core.Compiler.typeof_tfunc(Core.Compiler.fallback_lattice, xs...) + Compiler.typeof_tfunc(Compiler.fallback_lattice, xs...) @test typeof_tfunc(Tuple{Vararg{Int}}) == Type{Tuple{Vararg{Int,N}}} where N @test typeof_tfunc(Tuple{Any}) == Type{<:Tuple{Any}} @test typeof_tfunc(Type{Array}) === DataType @@ -1577,13 +1577,13 @@ f_typeof_tfunc(x) = typeof(x) @test Base.return_types(f_typeof_tfunc, (Union{<:T, Int} where T<:Complex,)) == Any[Union{Type{Int}, Type{Complex{T}} where T<:Real}] # memoryref_tfunc, memoryrefget_tfunc, memoryrefset!_tfunc, memoryref_isassigned, memoryrefoffset_tfunc -let memoryref_tfunc(@nospecialize xs...) = Core.Compiler.memoryref_tfunc(Core.Compiler.fallback_lattice, xs...) - memoryrefget_tfunc(@nospecialize xs...) = Core.Compiler.memoryrefget_tfunc(Core.Compiler.fallback_lattice, xs...) - memoryref_isassigned_tfunc(@nospecialize xs...) = Core.Compiler.memoryref_isassigned_tfunc(Core.Compiler.fallback_lattice, xs...) - memoryrefset!_tfunc(@nospecialize xs...) = Core.Compiler.memoryrefset!_tfunc(Core.Compiler.fallback_lattice, xs...) - memoryrefoffset_tfunc(@nospecialize xs...) = Core.Compiler.memoryrefoffset_tfunc(Core.Compiler.fallback_lattice, xs...) - interp = Core.Compiler.NativeInterpreter() - builtin_tfunction(@nospecialize xs...) = Core.Compiler.builtin_tfunction(interp, xs..., nothing) +let memoryref_tfunc(@nospecialize xs...) = Compiler.memoryref_tfunc(Compiler.fallback_lattice, xs...) + memoryrefget_tfunc(@nospecialize xs...) = Compiler.memoryrefget_tfunc(Compiler.fallback_lattice, xs...) + memoryref_isassigned_tfunc(@nospecialize xs...) = Compiler.memoryref_isassigned_tfunc(Compiler.fallback_lattice, xs...) + memoryrefset!_tfunc(@nospecialize xs...) = Compiler.memoryrefset!_tfunc(Compiler.fallback_lattice, xs...) + memoryrefoffset_tfunc(@nospecialize xs...) = Compiler.memoryrefoffset_tfunc(Compiler.fallback_lattice, xs...) + interp = Compiler.NativeInterpreter() + builtin_tfunction(@nospecialize xs...) = Compiler.builtin_tfunction(interp, xs..., nothing) @test memoryref_tfunc(Memory{Int}) == MemoryRef{Int} @test memoryref_tfunc(Memory{Integer}) == MemoryRef{Integer} @test memoryref_tfunc(MemoryRef{Int}, Int) == MemoryRef{Int} @@ -1645,8 +1645,8 @@ let memoryref_tfunc(@nospecialize xs...) = Core.Compiler.memoryref_tfunc(Core.Co end let tuple_tfunc(@nospecialize xs...) = - Core.Compiler.tuple_tfunc(Core.Compiler.fallback_lattice, Any[xs...]) - @test Core.Compiler.widenconst(tuple_tfunc(Type{Int})) === Tuple{DataType} + Compiler.tuple_tfunc(Compiler.fallback_lattice, Any[xs...]) + @test Compiler.widenconst(tuple_tfunc(Type{Int})) === Tuple{DataType} # https://github.com/JuliaLang/julia/issues/44705 @test tuple_tfunc(Union{Type{Int32},Type{Int64}}) === Tuple{Type} @test tuple_tfunc(DataType) === Tuple{DataType} @@ -1662,8 +1662,8 @@ g23024(TT::Tuple{DataType}) = f23024(TT[1], v23024) @test Base.return_types(g23024, (Tuple{DataType},)) == Any[Int] @test g23024((UInt8,)) === 2 -@test !Core.Compiler.isconstType(Type{typeof(Union{})}) # could be Core.TypeofBottom or Type{Union{}} at runtime -@test !isa(Core.Compiler.getfield_tfunc(Core.Compiler.fallback_lattice, Type{Core.TypeofBottom}, Core.Compiler.Const(:name)), Core.Compiler.Const) +@test !Compiler.isconstType(Type{typeof(Union{})}) # could be Core.TypeofBottom or Type{Union{}} at runtime +@test !isa(Compiler.getfield_tfunc(Compiler.fallback_lattice, Type{Core.TypeofBottom}, Compiler.Const(:name)), Compiler.Const) @test Base.return_types(supertype, (Type{typeof(Union{})},)) == Any[Any] # issue #23685 @@ -1689,8 +1689,8 @@ gg13183(x::X...) where {X} = (_false13183 ? gg13183(x, x) : 0) # test the external OptimizationState constructor let linfo = get_linfo(Base.convert, Tuple{Type{Int64}, Int32}), world = UInt(23) # some small-numbered world that should be valid - interp = Core.Compiler.NativeInterpreter() - opt = Core.Compiler.OptimizationState(linfo, interp) + interp = Compiler.NativeInterpreter() + opt = Compiler.OptimizationState(linfo, interp) # make sure the state of the properties look reasonable @test opt.src !== linfo.def.source @test length(opt.src.slotflags) == linfo.def.nargs <= length(opt.src.slotnames) @@ -1726,7 +1726,7 @@ mutable struct ARef{T} @atomic x::T end let getfield_tfunc(@nospecialize xs...) = - Core.Compiler.getfield_tfunc(Core.Compiler.fallback_lattice, xs...) + Compiler.getfield_tfunc(Compiler.fallback_lattice, xs...) # inference of `T.mutable` @test getfield_tfunc(Const(Int.name), Const(:flags)) == Const(0x4) @@ -1762,7 +1762,7 @@ let getfield_tfunc(@nospecialize xs...) = @test getfield_tfunc(ARef{Int},Const(:x),Bool,Bool) === Union{} end -import Core.Compiler: Const +import .Compiler: Const mutable struct XY{X,Y} x::X y::Y @@ -1774,7 +1774,7 @@ mutable struct ABCDconst const d::Union{Int,Nothing} end let setfield!_tfunc(@nospecialize xs...) = - Core.Compiler.setfield!_tfunc(Core.Compiler.fallback_lattice, xs...) + Compiler.setfield!_tfunc(Compiler.fallback_lattice, xs...) @test setfield!_tfunc(Base.RefValue{Int}, Const(:x), Int) === Int @test setfield!_tfunc(Base.RefValue{Int}, Const(:x), Int, Symbol) === Int @test setfield!_tfunc(Base.RefValue{Int}, Const(1), Int) === Int @@ -1834,7 +1834,7 @@ let setfield!_tfunc(@nospecialize xs...) = @test setfield!_tfunc(ABCDconst, Const(4), Any) === Union{} end let setfield!_nothrow(@nospecialize xs...) = - Core.Compiler.setfield!_nothrow(Core.Compiler.SimpleInferenceLattice.instance, xs...) + Compiler.setfield!_nothrow(Compiler.SimpleInferenceLattice.instance, xs...) @test setfield!_nothrow(Base.RefValue{Int}, Const(:x), Int) @test setfield!_nothrow(Base.RefValue{Int}, Const(1), Int) @test setfield!_nothrow(Base.RefValue{Any}, Const(:x), Int) @@ -2131,12 +2131,12 @@ end # handle edge case @test (@eval Module() begin - edgecase(_) = $(Core.Compiler.InterConditional(2, Int, Any)) + edgecase(_) = $(Compiler.InterConditional(2, Int, Any)) Base.return_types(edgecase, (Any,)) # create cache Base.return_types((Any,)) do x edgecase(x) end - end) == Any[Core.Compiler.InterConditional] + end) == Any[Compiler.InterConditional] # a tricky case: if constant inference derives `Const` while non-constant inference has # derived `InterConditional`, we should not discard that constant information @@ -2235,7 +2235,7 @@ end end |> only == Int # the `fargs = nothing` edge case @test Base.return_types((Any,)) do a - Core.Compiler.return_type(invoke, Tuple{typeof(ispositive), Type{Tuple{Any}}, Any}) + Compiler.return_type(invoke, Tuple{typeof(ispositive), Type{Tuple{Any}}, Any}) end |> only == Type{Bool} # `InterConditional` handling: `abstract_call_opaque_closure` @@ -2264,27 +2264,25 @@ mutable struct AliasableConstField{S,T} f2::T end -import Core.Compiler: +import .Compiler: InferenceLattice, MustAliasesLattice, InterMustAliasesLattice, BaseInferenceLattice, SimpleInferenceLattice, IPOResultLattice, typeinf_lattice, ipo_lattice, optimizer_lattice include("newinterp.jl") @newinterp MustAliasInterpreter -let CC = Core.Compiler - CC.typeinf_lattice(::MustAliasInterpreter) = InferenceLattice(MustAliasesLattice(BaseInferenceLattice.instance)) - CC.ipo_lattice(::MustAliasInterpreter) = InferenceLattice(InterMustAliasesLattice(IPOResultLattice.instance)) - CC.optimizer_lattice(::MustAliasInterpreter) = SimpleInferenceLattice.instance -end +Compiler.typeinf_lattice(::MustAliasInterpreter) = InferenceLattice(MustAliasesLattice(BaseInferenceLattice.instance)) +Compiler.ipo_lattice(::MustAliasInterpreter) = InferenceLattice(InterMustAliasesLattice(IPOResultLattice.instance)) +Compiler.optimizer_lattice(::MustAliasInterpreter) = SimpleInferenceLattice.instance # lattice # ------- -import Core.Compiler: MustAlias, Const, PartialStruct, ⊑, tmerge +import .Compiler: MustAlias, Const, PartialStruct, ⊑, tmerge let 𝕃ᵢ = InferenceLattice(MustAliasesLattice(BaseInferenceLattice.instance)) - ⊑(@nospecialize(a), @nospecialize(b)) = Core.Compiler.:⊑(𝕃ᵢ, a, b) - tmerge(@nospecialize(a), @nospecialize(b)) = Core.Compiler.tmerge(𝕃ᵢ, a, b) - isa_tfunc(@nospecialize xs...) = Core.Compiler.isa_tfunc(𝕃ᵢ, xs...) - ifelse_tfunc(@nospecialize xs...) = Core.Compiler.ifelse_tfunc(𝕃ᵢ, xs...) + ⊑(@nospecialize(a), @nospecialize(b)) = Compiler.:⊑(𝕃ᵢ, a, b) + tmerge(@nospecialize(a), @nospecialize(b)) = Compiler.tmerge(𝕃ᵢ, a, b) + isa_tfunc(@nospecialize xs...) = Compiler.isa_tfunc(𝕃ᵢ, xs...) + ifelse_tfunc(@nospecialize xs...) = Compiler.ifelse_tfunc(𝕃ᵢ, xs...) @test (MustAlias(2, AliasableField{Any}, 1, Int) ⊑ Int) @test !(Int ⊑ MustAlias(2, AliasableField{Any}, 1, Int)) @@ -2553,11 +2551,11 @@ end |> only === Int end |> only === Some{Int} # handle the edge case -@eval intermustalias_edgecase(_) = $(Core.Compiler.InterMustAlias(2, Some{Any}, 1, Int)) +@eval intermustalias_edgecase(_) = $(Compiler.InterMustAlias(2, Some{Any}, 1, Int)) Base.return_types(intermustalias_edgecase, (Any,); interp=MustAliasInterpreter()) # create cache @test Base.return_types((Any,); interp=MustAliasInterpreter()) do x intermustalias_edgecase(x) -end |> only === Core.Compiler.InterMustAlias +end |> only === Compiler.InterMustAlias @test Base.infer_return_type((AliasableField,Integer,); interp=MustAliasInterpreter()) do a, x s = (;x) @@ -2768,9 +2766,9 @@ end |> only === Int # `apply_type_tfunc` accuracy for constrained type construction # https://github.com/JuliaLang/julia/issues/47089 import Core: Const -import Core.Compiler: apply_type_tfunc +import .Compiler: apply_type_tfunc struct Issue47089{A<:Number,B<:Number} end -let 𝕃 = Core.Compiler.fallback_lattice +let 𝕃 = Compiler.fallback_lattice A = Type{<:Integer} @test apply_type_tfunc(𝕃, Const(Issue47089), A, A) <: (Type{Issue47089{A,B}} where {A<:Integer, B<:Integer}) @test apply_type_tfunc(𝕃, Const(Issue47089), Const(Int), Const(Int), Const(Int)) === Union{} @@ -2789,7 +2787,7 @@ end @test only(Base.return_types(Base.afoldl, (typeof((m, n) -> () -> Returns(nothing)(m, n)), Function, Function, Vararg{Function}))) === Function let A = Tuple{A,B,C,D,E,F,G,H} where {A,B,C,D,E,F,G,H} - B = Core.Compiler.rename_unionall(A) + B = Compiler.rename_unionall(A) for i in 1:8 @test A.var != B.var && (i == 1 ? A == B : A != B) A, B = A.body, B.body @@ -3056,7 +3054,7 @@ let i end end end -Core.Compiler.renumber_ir_elements!(code28279, ssachangemap, labelchangemap) +Compiler.renumber_ir_elements!(code28279, ssachangemap, labelchangemap) @test length(code28279) === length(oldcode28279) offset = 1 let i @@ -3079,11 +3077,11 @@ end # issue #28356 # unit test to make sure countunionsplit overflows gracefully # we don't care what number is returned as long as it's large -@test Core.Compiler.unionsplitcost(Core.Compiler.JLTypeLattice(), Any[Union{Int32, Int64} for i=1:80]) > 100000 -@test Core.Compiler.unionsplitcost(Core.Compiler.JLTypeLattice(), Any[Union{Int8, Int16, Int32, Int64}]) == 2 -@test Core.Compiler.unionsplitcost(Core.Compiler.JLTypeLattice(), Any[Union{Int8, Int16, Int32, Int64}, Union{Int8, Int16, Int32, Int64}, Int8]) == 8 -@test Core.Compiler.unionsplitcost(Core.Compiler.JLTypeLattice(), Any[Union{Int8, Int16, Int32, Int64}, Union{Int8, Int16, Int32}, Int8]) == 6 -@test Core.Compiler.unionsplitcost(Core.Compiler.JLTypeLattice(), Any[Union{Int8, Int16, Int32}, Union{Int8, Int16, Int32, Int64}, Int8]) == 6 +@test Compiler.unionsplitcost(Compiler.JLTypeLattice(), Any[Union{Int32, Int64} for i=1:80]) > 100000 +@test Compiler.unionsplitcost(Compiler.JLTypeLattice(), Any[Union{Int8, Int16, Int32, Int64}]) == 2 +@test Compiler.unionsplitcost(Compiler.JLTypeLattice(), Any[Union{Int8, Int16, Int32, Int64}, Union{Int8, Int16, Int32, Int64}, Int8]) == 8 +@test Compiler.unionsplitcost(Compiler.JLTypeLattice(), Any[Union{Int8, Int16, Int32, Int64}, Union{Int8, Int16, Int32}, Int8]) == 6 +@test Compiler.unionsplitcost(Compiler.JLTypeLattice(), Any[Union{Int8, Int16, Int32}, Union{Int8, Int16, Int32, Int64}, Int8]) == 6 # make sure compiler doesn't hang in union splitting @@ -3326,8 +3324,8 @@ _rttf_test(::Int16) = 0 _rttf_test(::Int32) = 0 _rttf_test(::Int64) = 0 _rttf_test(::Int128) = 0 -_call_rttf_test() = Core.Compiler.return_type(_rttf_test, Tuple{Any}) -@test Core.Compiler.return_type(_rttf_test, Tuple{Any}) === Int +_call_rttf_test() = Compiler.return_type(_rttf_test, Tuple{Any}) +@test Compiler.return_type(_rttf_test, Tuple{Any}) === Int @test _call_rttf_test() === Int f_with_Type_arg(::Type{T}) where {T} = T @@ -3375,7 +3373,7 @@ end @test @inferred(foo30783(2)) == Val(1) # PartialStruct tmerge -using Core.Compiler: PartialStruct, tmerge, Const, ⊑ +using .Compiler: PartialStruct, tmerge, Const, ⊑ struct FooPartial a::Int b::Int @@ -3509,14 +3507,14 @@ const DenseIdx = Union{IntRange,Integer} @test @inferred(foo_26724((), 1:4, 1:5, 1:6)) === (4, 5, 6) # Non uniformity in expressions with PartialTypeVar -@test Core.Compiler.:⊑(Core.Compiler.PartialTypeVar(TypeVar(:N), true, true), TypeVar) +@test Compiler.:⊑(Compiler.PartialTypeVar(TypeVar(:N), true, true), TypeVar) let N = TypeVar(:N) - 𝕃 = Core.Compiler.SimpleInferenceLattice.instance - argtypes = Any[Core.Compiler.Const(NTuple), - Core.Compiler.PartialTypeVar(N, true, true), - Core.Compiler.Const(Any)] + 𝕃 = Compiler.SimpleInferenceLattice.instance + argtypes = Any[Compiler.Const(NTuple), + Compiler.PartialTypeVar(N, true, true), + Compiler.Const(Any)] rt = Type{Tuple{Vararg{Any,N}}} - @test Core.Compiler.apply_type_nothrow(𝕃, argtypes, rt) + @test Compiler.apply_type_nothrow(𝕃, argtypes, rt) end # issue #33768 @@ -3629,29 +3627,29 @@ end f() = _foldl_iter(step, (Missing[],), [0.0], 1) end -@test Core.Compiler.typesubtract(Tuple{Union{Int,Char}}, Tuple{Char}, 0) == Tuple{Int} -@test Core.Compiler.typesubtract(Tuple{Union{Int,Char}}, Tuple{Char}, 1) == Tuple{Int} -@test Core.Compiler.typesubtract(Tuple{Union{Int,Char}}, Tuple{Char}, 2) == Tuple{Int} -@test Core.Compiler.typesubtract(NTuple{3, Union{Int, Char}}, Tuple{Char, Any, Any}, 0) == +@test Compiler.typesubtract(Tuple{Union{Int,Char}}, Tuple{Char}, 0) == Tuple{Int} +@test Compiler.typesubtract(Tuple{Union{Int,Char}}, Tuple{Char}, 1) == Tuple{Int} +@test Compiler.typesubtract(Tuple{Union{Int,Char}}, Tuple{Char}, 2) == Tuple{Int} +@test Compiler.typesubtract(NTuple{3, Union{Int, Char}}, Tuple{Char, Any, Any}, 0) == Tuple{Int, Union{Char, Int}, Union{Char, Int}} -@test Core.Compiler.typesubtract(NTuple{3, Union{Int, Char}}, Tuple{Char, Any, Any}, 10) == +@test Compiler.typesubtract(NTuple{3, Union{Int, Char}}, Tuple{Char, Any, Any}, 10) == Union{Tuple{Int, Char, Char}, Tuple{Int, Char, Int}, Tuple{Int, Int, Char}, Tuple{Int, Int, Int}} -@test Core.Compiler.typesubtract(NTuple{3, Union{Int, Char}}, NTuple{3, Char}, 0) == +@test Compiler.typesubtract(NTuple{3, Union{Int, Char}}, NTuple{3, Char}, 0) == NTuple{3, Union{Int, Char}} -@test Core.Compiler.typesubtract(NTuple{3, Union{Int, Char}}, NTuple{3, Char}, 10) == +@test Compiler.typesubtract(NTuple{3, Union{Int, Char}}, NTuple{3, Char}, 10) == Union{Tuple{Char, Char, Int}, Tuple{Char, Int, Char}, Tuple{Char, Int, Int}, Tuple{Int, Char, Char}, Tuple{Int, Char, Int}, Tuple{Int, Int, Char}, Tuple{Int, Int, Int}} # Test that these don't throw -@test Core.Compiler.typesubtract(Tuple{Vararg{Int}}, Tuple{Vararg{Char}}, 0) == Tuple{Vararg{Int}} -@test Core.Compiler.typesubtract(Tuple{Vararg{Int}}, Tuple{Vararg{Int}}, 0) == Union{} -@test Core.Compiler.typesubtract(Tuple{String,Int}, Tuple{String,Vararg{Int}}, 0) == Union{} -@test Core.Compiler.typesubtract(Tuple{String,Vararg{Int}}, Tuple{String,Int}, 0) == Tuple{String,Vararg{Int}} -@test Core.Compiler.typesubtract(NTuple{3, Real}, NTuple{3, Char}, 0) == NTuple{3, Real} -@test Core.Compiler.typesubtract(NTuple{3, Union{Real, Char}}, NTuple{2, Char}, 0) == NTuple{3, Union{Real, Char}} +@test Compiler.typesubtract(Tuple{Vararg{Int}}, Tuple{Vararg{Char}}, 0) == Tuple{Vararg{Int}} +@test Compiler.typesubtract(Tuple{Vararg{Int}}, Tuple{Vararg{Int}}, 0) == Union{} +@test Compiler.typesubtract(Tuple{String,Int}, Tuple{String,Vararg{Int}}, 0) == Union{} +@test Compiler.typesubtract(Tuple{String,Vararg{Int}}, Tuple{String,Int}, 0) == Tuple{String,Vararg{Int}} +@test Compiler.typesubtract(NTuple{3, Real}, NTuple{3, Char}, 0) == NTuple{3, Real} +@test Compiler.typesubtract(NTuple{3, Union{Real, Char}}, NTuple{2, Char}, 0) == NTuple{3, Union{Real, Char}} -@test Core.Compiler.compatible_vatuple(Tuple{String,Vararg{Int}}, Tuple{String,Vararg{Int}}) -@test !Core.Compiler.compatible_vatuple(Tuple{String,Int}, Tuple{String,Vararg{Int}}) -@test !Core.Compiler.compatible_vatuple(Tuple{String,Vararg{Int}}, Tuple{String,Int}) +@test Compiler.compatible_vatuple(Tuple{String,Vararg{Int}}, Tuple{String,Vararg{Int}}) +@test !Compiler.compatible_vatuple(Tuple{String,Int}, Tuple{String,Vararg{Int}}) +@test !Compiler.compatible_vatuple(Tuple{String,Vararg{Int}}, Tuple{String,Int}) @test Base.return_types(Issue35566.f) == [Val{:expected}] @@ -3808,8 +3806,8 @@ f_generator_splat(t::Tuple) = tuple((identity(l) for l in t)...) # Issue #36710 - sizeof(::UnionAll) tfunc correctness @test (sizeof(Ptr),) == sizeof.((Ptr,)) == sizeof.((Ptr{Cvoid},)) -@test Core.Compiler.sizeof_tfunc(Core.Compiler.fallback_lattice, UnionAll) === Int -@test !Core.Compiler.sizeof_nothrow(UnionAll) +@test Compiler.sizeof_tfunc(Compiler.fallback_lattice, UnionAll) === Int +@test !Compiler.sizeof_nothrow(UnionAll) @test only(Base.return_types(Core._expr)) === Expr @test only(Base.return_types(Core.svec, (Any,))) === Core.SimpleVector @@ -3878,9 +3876,9 @@ f_apply_cglobal(args...) = cglobal(args...) @test only(Base.return_types(f_apply_cglobal, Tuple{Any, Type{Int}, Type{Int}, Vararg{Type{Int}}})) == Union{} # issue #37532 -@test Core.Compiler.intrinsic_nothrow(Core.bitcast, Any[Type{Ptr{Int}}, Int]) -@test Core.Compiler.intrinsic_nothrow(Core.bitcast, Any[Type{Ptr{T}} where T, Ptr]) -@test !Core.Compiler.intrinsic_nothrow(Core.bitcast, Any[Type{Ptr}, Ptr]) +@test Compiler.intrinsic_nothrow(Core.bitcast, Any[Type{Ptr{Int}}, Int]) +@test Compiler.intrinsic_nothrow(Core.bitcast, Any[Type{Ptr{T}} where T, Ptr]) +@test !Compiler.intrinsic_nothrow(Core.bitcast, Any[Type{Ptr}, Ptr]) f37532(T, x) = (Core.bitcast(Ptr{T}, x); x) @test Base.return_types(f37532, Tuple{Any, Int}) == Any[Int] @@ -3924,16 +3922,16 @@ Base.@constprop :aggressive @noinline f_constprop_aggressive_noinline(f, x) = (f Base.@constprop :none f_constprop_none(f, x) = (f(x); Val{x}()) Base.@constprop :none @inline f_constprop_none_inline(f, x) = (f(x); Val{x}()) -@test !Core.Compiler.is_aggressive_constprop(only(methods(f_constprop_simple))) -@test !Core.Compiler.is_no_constprop(only(methods(f_constprop_simple))) -@test Core.Compiler.is_aggressive_constprop(only(methods(f_constprop_aggressive))) -@test !Core.Compiler.is_no_constprop(only(methods(f_constprop_aggressive))) -@test Core.Compiler.is_aggressive_constprop(only(methods(f_constprop_aggressive_noinline))) -@test !Core.Compiler.is_no_constprop(only(methods(f_constprop_aggressive_noinline))) -@test !Core.Compiler.is_aggressive_constprop(only(methods(f_constprop_none))) -@test Core.Compiler.is_no_constprop(only(methods(f_constprop_none))) -@test !Core.Compiler.is_aggressive_constprop(only(methods(f_constprop_none_inline))) -@test Core.Compiler.is_no_constprop(only(methods(f_constprop_none_inline))) +@test !Compiler.is_aggressive_constprop(only(methods(f_constprop_simple))) +@test !Compiler.is_no_constprop(only(methods(f_constprop_simple))) +@test Compiler.is_aggressive_constprop(only(methods(f_constprop_aggressive))) +@test !Compiler.is_no_constprop(only(methods(f_constprop_aggressive))) +@test Compiler.is_aggressive_constprop(only(methods(f_constprop_aggressive_noinline))) +@test !Compiler.is_no_constprop(only(methods(f_constprop_aggressive_noinline))) +@test !Compiler.is_aggressive_constprop(only(methods(f_constprop_none))) +@test Compiler.is_no_constprop(only(methods(f_constprop_none))) +@test !Compiler.is_aggressive_constprop(only(methods(f_constprop_none_inline))) +@test Compiler.is_no_constprop(only(methods(f_constprop_none_inline))) # make sure that improvements to the compiler don't render the annotation effectless. @test Base.return_types((Function,)) do f @@ -3989,12 +3987,12 @@ end @testset "switchtupleunion" begin # signature tuple let - tunion = Core.Compiler.switchtupleunion(Tuple{Union{Int32,Int64}, Nothing}) + tunion = Compiler.switchtupleunion(Tuple{Union{Int32,Int64}, Nothing}) @test Tuple{Int32, Nothing} in tunion @test Tuple{Int64, Nothing} in tunion end let - tunion = Core.Compiler.switchtupleunion(Tuple{Union{Int32,Int64}, Union{Float32,Float64}, Nothing}) + tunion = Compiler.switchtupleunion(Tuple{Union{Int32,Int64}, Union{Float32,Float64}, Nothing}) @test Tuple{Int32, Float32, Nothing} in tunion @test Tuple{Int32, Float64, Nothing} in tunion @test Tuple{Int64, Float32, Nothing} in tunion @@ -4003,13 +4001,13 @@ end # argtypes let - tunion = Core.Compiler.switchtupleunion(Core.Compiler.ConstsLattice(), Any[Union{Int32,Int64}, Core.Const(nothing)]) + tunion = Compiler.switchtupleunion(Compiler.ConstsLattice(), Any[Union{Int32,Int64}, Core.Const(nothing)]) @test length(tunion) == 2 @test Any[Int32, Core.Const(nothing)] in tunion @test Any[Int64, Core.Const(nothing)] in tunion end let - tunion = Core.Compiler.switchtupleunion(Core.Compiler.ConstsLattice(), Any[Union{Int32,Int64}, Union{Float32,Float64}, Core.Const(nothing)]) + tunion = Compiler.switchtupleunion(Compiler.ConstsLattice(), Any[Union{Int32,Int64}, Union{Float32,Float64}, Core.Const(nothing)]) @test length(tunion) == 4 @test Any[Int32, Float32, Core.Const(nothing)] in tunion @test Any[Int32, Float64, Core.Const(nothing)] in tunion @@ -4118,10 +4116,10 @@ callsig_backprop_bailout(::Val) = 2 callsig_backprop_addinteger(a::Integer, b::Integer) = a + b # results in too many matching methods and triggers `bail_out_call`) @test Base.infer_return_type(callsig_backprop_addinteger) == Any let effects = Base.infer_effects(callsig_backprop_addinteger) - @test !Core.Compiler.is_consistent(effects) - @test !Core.Compiler.is_effect_free(effects) - @test !Core.Compiler.is_nothrow(effects) - @test !Core.Compiler.is_terminates(effects) + @test !Compiler.is_consistent(effects) + @test !Compiler.is_effect_free(effects) + @test !Compiler.is_nothrow(effects) + @test !Compiler.is_terminates(effects) end callsig_backprop_anti(::Any) = :any callsig_backprop_anti(::Int) = :int @@ -4249,16 +4247,16 @@ end let # Test the presence of PhiNodes in lowered IR by taking the above function, # running it through SSA conversion and then putting it into an opaque # closure. - mi = Core.Compiler.specialize_method(first(methods(f_convert_me_to_ir)), + mi = Compiler.specialize_method(first(methods(f_convert_me_to_ir)), Tuple{Bool, Float64}, Core.svec()) ci = Base.uncompressed_ast(mi.def) ci.slottypes = Any[ Any for i = 1:length(ci.slotflags) ] ci.ssavaluetypes = Any[Any for i = 1:ci.ssavaluetypes] - sv = Core.Compiler.OptimizationState(mi, Core.Compiler.NativeInterpreter()) - ir = Core.Compiler.convert_to_ircode(ci, sv) - ir = Core.Compiler.slot2reg(ir, ci, sv) - ir = Core.Compiler.compact!(ir) - Core.Compiler.replace_code_newstyle!(ci, ir) + sv = Compiler.OptimizationState(mi, Compiler.NativeInterpreter()) + ir = Compiler.convert_to_ircode(ci, sv) + ir = Compiler.slot2reg(ir, ci, sv) + ir = Compiler.compact!(ir) + Compiler.replace_code_newstyle!(ci, ir) ci.ssavaluetypes = length(ci.ssavaluetypes) @test any(x->isa(x, Core.PhiNode), ci.code) oc = @eval b->$(Expr(:new_opaque_closure, Tuple{Bool, Float64}, Any, Any, true, @@ -4436,7 +4434,7 @@ let x = Tuple{Int,Any}[ #=19=# (0, Expr(:pop_exception, Core.SSAValue(2))) #=20=# (0, Core.ReturnNode(Core.SlotNumber(3))) ] - (;handler_at, handlers) = Core.Compiler.compute_trycatch(last.(x)) + (;handler_at, handlers) = Compiler.compute_trycatch(last.(x)) @test map(x->x[1] == 0 ? 0 : handlers[x[1]].enter_idx, handler_at) == first.(x) end @@ -4486,7 +4484,7 @@ let # Vararg #=va=# Bound, unbound, # => Tuple{Integer,Integer} (invalid `TypeVar` widened beforehand) } where Bound<:Integer - argtypes = Core.Compiler.most_general_argtypes(method, specTypes) + argtypes = Compiler.most_general_argtypes(method, specTypes) popfirst!(argtypes) # N.B.: `argtypes` do not have va processing applied yet @test length(argtypes) == 12 @@ -4556,7 +4554,7 @@ end |> only == Tuple{Int,Int} end |> only == Int # form PartialStruct for mutables with `const` field -import Core.Compiler: Const, ⊑ +import .Compiler: Const, ⊑ mutable struct PartialMutable{S,T} const s::S t::T @@ -4633,9 +4631,9 @@ end # issue #43784 @testset "issue #43784" begin - ⊑ = Core.Compiler.partialorder(Core.Compiler.fallback_lattice) - ⊔ = Core.Compiler.join(Core.Compiler.fallback_lattice) - 𝕃 = Core.Compiler.fallback_lattice + ⊑ = Compiler.partialorder(Compiler.fallback_lattice) + ⊔ = Compiler.join(Compiler.fallback_lattice) + 𝕃 = Compiler.fallback_lattice Const, PartialStruct = Core.Const, Core.PartialStruct let init = Base.ImmutableDict{Any,Any}() a = Const(init) @@ -4717,32 +4715,32 @@ end end |> only === Union{} a = Val{Union{}} - a = Core.Compiler.tmerge(Union{a, Val{a}}, a) + a = Compiler.tmerge(Union{a, Val{a}}, a) @test a == Union{Val{Union{}}, Val{Val{Union{}}}} - a = Core.Compiler.tmerge(Union{a, Val{a}}, a) + a = Compiler.tmerge(Union{a, Val{a}}, a) @test a == Union{Val{Union{}}, Val{Val{Union{}}}, Val{Union{Val{Union{}}, Val{Val{Union{}}}}}} - a = Core.Compiler.tmerge(Union{a, Val{a}}, a) + a = Compiler.tmerge(Union{a, Val{a}}, a) @test a == Val a = Val{Union{}} - a = Core.Compiler.tmerge(Core.Compiler.JLTypeLattice(), Val{<:a}, a) + a = Compiler.tmerge(Compiler.JLTypeLattice(), Val{<:a}, a) @test_broken a != Val{<:Val{Union{}}} @test_broken a == Val{<:Val} || a == Val a = Tuple{Vararg{Tuple{}}} - a = Core.Compiler.tmerge(Core.Compiler.JLTypeLattice(), Tuple{a}, a) + a = Compiler.tmerge(Compiler.JLTypeLattice(), Tuple{a}, a) @test a == Union{Tuple{Tuple{Vararg{Tuple{}}}}, Tuple{Vararg{Tuple{}}}} - a = Core.Compiler.tmerge(Core.Compiler.JLTypeLattice(), Tuple{a}, a) + a = Compiler.tmerge(Compiler.JLTypeLattice(), Tuple{a}, a) @test a == Tuple{Vararg{Union{Tuple{Tuple{Vararg{Tuple{}}}}, Tuple{Vararg{Tuple{}}}}}} - a = Core.Compiler.tmerge(Core.Compiler.JLTypeLattice(), Tuple{a}, a) + a = Compiler.tmerge(Compiler.JLTypeLattice(), Tuple{a}, a) @test a == Tuple - a = Core.Compiler.tmerge(Core.Compiler.JLTypeLattice(), Tuple{a}, a) + a = Compiler.tmerge(Compiler.JLTypeLattice(), Tuple{a}, a) @test a == Tuple end -let ⊑ = Core.Compiler.partialorder(Core.Compiler.fallback_lattice) - ⊔ = Core.Compiler.join(Core.Compiler.fallback_lattice) - 𝕃 = Core.Compiler.fallback_lattice +let ⊑ = Compiler.partialorder(Compiler.fallback_lattice) + ⊔ = Compiler.join(Compiler.fallback_lattice) + 𝕃 = Compiler.fallback_lattice Const, PartialStruct = Core.Const, Core.PartialStruct @test (Const((1,2)) ⊑ PartialStruct(𝕃, Tuple{Int,Int}, Any[Const(1),Int])) @@ -4789,18 +4787,18 @@ end # at top level. @test let Base.Experimental.@force_compile - Core.Compiler.return_type(+, NTuple{2, Rational}) + Compiler.return_type(+, NTuple{2, Rational}) end == Rational # vararg-tuple comparison within `Compiler.PartialStruct` # https://github.com/JuliaLang/julia/issues/44965 -let 𝕃ᵢ = Core.Compiler.fallback_lattice - t = Core.Compiler.tuple_tfunc(𝕃ᵢ, Any[Const(42), Vararg{Any}]) - @test Core.Compiler.issimplertype(𝕃ᵢ, t, t) +let 𝕃ᵢ = Compiler.fallback_lattice + t = Compiler.tuple_tfunc(𝕃ᵢ, Any[Const(42), Vararg{Any}]) + @test Compiler.issimplertype(𝕃ᵢ, t, t) - t = Core.Compiler.tuple_tfunc(𝕃ᵢ, Any[Const(42), Vararg{Union{}}]) + t = Compiler.tuple_tfunc(𝕃ᵢ, Any[Const(42), Vararg{Union{}}]) @test t === Const((42,)) - t = Core.Compiler.tuple_tfunc(𝕃ᵢ, Any[Const(42), Int, Vararg{Union{}}]) + t = Compiler.tuple_tfunc(𝕃ᵢ, Any[Const(42), Int, Vararg{Union{}}]) @test t.typ === Tuple{Int, Int} @test t.fields == Any[Const(42), Int] end @@ -4900,7 +4898,7 @@ let src = code_typed1() do end # Test that Const ⊑ PartialStruct respects vararg -@test Const((1,2)) ⊑ PartialStruct(Core.Compiler.fallback_lattice, Tuple{Vararg{Int}}, [Const(1), Vararg{Int}]) +@test Const((1,2)) ⊑ PartialStruct(Compiler.fallback_lattice, Tuple{Vararg{Int}}, [Const(1), Vararg{Int}]) # Test that semi-concrete interpretation doesn't break on functions with while loops in them. Base.@assume_effects :consistent :effect_free :terminates_globally function pure_annotated_loop(x::Int, y::Int) @@ -4926,7 +4924,7 @@ invoke_concretized1(a::Integer) = a > 0 ? "integer" : nothing # check if `invoke(invoke_concretized1, Tuple{Integer}, ::Int)` is foldable @test Base.infer_effects((Int,)) do a @invoke invoke_concretized1(a::Integer) -end |> Core.Compiler.is_foldable +end |> Compiler.is_foldable @test Base.return_types() do @invoke invoke_concretized1(42::Integer) end |> only === String @@ -4936,7 +4934,7 @@ invoke_concretized2(a::Integer) = a > 0 ? :integer : nothing # check if `invoke(invoke_concretized2, Tuple{Integer}, ::Int)` is foldable @test Base.infer_effects((Int,)) do a @invoke invoke_concretized2(a::Integer) -end |> Core.Compiler.is_foldable +end |> Compiler.is_foldable @test let Base.Experimental.@force_compile @invoke invoke_concretized2(42::Integer) @@ -5015,15 +5013,15 @@ g() = empty_nt_values(Base.inferencebarrier(Tuple{})) # is to test the case where inference limited a recursion, but then a forced constprop nevertheless managed # to terminate the call. @newinterp RecurseInterpreter -let CC = Core.Compiler - function CC.const_prop_rettype_heuristic(interp::RecurseInterpreter, result::CC.MethodCallResult, - si::CC.StmtInfo, sv::CC.AbsIntState, force::Bool) - if result.rt isa CC.LimitedAccuracy - return force # allow forced constprop to recurse into unresolved cycles - end - return @invoke CC.const_prop_rettype_heuristic(interp::CC.AbstractInterpreter, result::CC.MethodCallResult, - si::CC.StmtInfo, sv::CC.AbsIntState, force::Bool) +function Compiler.const_prop_rettype_heuristic( + interp::RecurseInterpreter, result::Compiler.MethodCallResult, + si::Compiler.StmtInfo, sv::Compiler.AbsIntState, force::Bool) + if result.rt isa Compiler.LimitedAccuracy + return force # allow forced constprop to recurse into unresolved cycles end + return @invoke Compiler.const_prop_rettype_heuristic( + interp::Compiler.AbstractInterpreter, result::Compiler.MethodCallResult, + si::Compiler.StmtInfo, sv::Compiler.AbsIntState, force::Bool) end Base.@constprop :aggressive type_level_recurse1(x...) = x[1] == 2 ? 1 : (length(x) > 100 ? x : type_level_recurse2(x[1] + 1, x..., x...)) Base.@constprop :aggressive type_level_recurse2(x...) = type_level_recurse1(x...) @@ -5035,24 +5033,11 @@ type_level_recurse_entry() = Val{type_level_recurse1(1)}() f_no_bail_effects_any(x::Any) = x f_no_bail_effects_any(x::NamedTuple{(:x,), Tuple{Any}}) = getfield(x, 1) g_no_bail_effects_any(x::Any) = f_no_bail_effects_any(x) -@test Core.Compiler.is_foldable_nothrow(Base.infer_effects(g_no_bail_effects_any, Tuple{Any})) +@test Compiler.is_foldable_nothrow(Base.infer_effects(g_no_bail_effects_any, Tuple{Any})) # issue #48374 @test (() -> Union{<:Nothing})() == Nothing -# :static_parameter accuracy -unknown_sparam_throw(::Union{Nothing, Type{T}}) where T = @isdefined(T) ? T::Type : nothing -unknown_sparam_nothrow1(x::Ref{T}) where T = @isdefined(T) ? T::Type : nothing -unknown_sparam_nothrow2(x::Ref{Ref{T}}) where T = @isdefined(T) ? T::Type : nothing -@test only(Base.return_types(unknown_sparam_throw, (Type{Int},))) == Type{Int} -@test only(Base.return_types(unknown_sparam_throw, (Type{<:Integer},))) == Type{<:Integer} -@test only(Base.return_types(unknown_sparam_throw, (Type,))) == Union{Nothing, Type} -@test_broken only(Base.return_types(unknown_sparam_throw, (Nothing,))) === Nothing -@test_broken only(Base.return_types(unknown_sparam_throw, (Union{Type{Int},Nothing},))) === Union{Nothing,Type{Int}} -@test only(Base.return_types(unknown_sparam_throw, (Any,))) === Union{Nothing,Type} -@test only(Base.return_types(unknown_sparam_nothrow1, (Ref,))) === Type -@test only(Base.return_types(unknown_sparam_nothrow2, (Ref{Ref{T}} where T,))) === Type - struct Issue49027{Ty<:Number} x::Ty end @@ -5200,9 +5185,9 @@ end |> only === Tuple{Int,Symbol} end end) == Type{Nothing} -# Test that Core.Compiler.return_type inference works for the 1-arg version +# Test that Compiler.return_type inference works for the 1-arg version @test Base.return_types() do - Core.Compiler.return_type(Tuple{typeof(+), Int, Int}) + Compiler.return_type(Tuple{typeof(+), Int, Int}) end |> only == Type{Int} # Test that NamedTuple abstract iteration works for PartialStruct/Const @@ -5252,13 +5237,13 @@ let src = code_typed1((Bool,Base.RefValue{String}, Base.RefValue{Any},Int,)) do end struct Issue49785{S, T<:S} end -let 𝕃 = Core.Compiler.SimpleInferenceLattice.instance - argtypes = Any[Core.Compiler.Const(Issue49785), +let 𝕃 = Compiler.SimpleInferenceLattice.instance + argtypes = Any[Compiler.Const(Issue49785), Union{Type{String},Type{Int}}, Union{Type{String},Type{Int}}] rt = Type{Issue49785{<:Any, Int}} # the following should not throw - @test !Core.Compiler.apply_type_nothrow(𝕃, argtypes, rt) + @test !Compiler.apply_type_nothrow(𝕃, argtypes, rt) @test code_typed() do S = Union{Type{String},Type{Int}}[Int][1] map(T -> Issue49785{S,T}, (a = S,)) @@ -5715,7 +5700,7 @@ let x = 1, _Any = Any end # Issue #51927 -let 𝕃 = Core.Compiler.fallback_lattice +let 𝕃 = Compiler.fallback_lattice @test apply_type_tfunc(𝕃, Const(Tuple{Vararg{Any,N}} where N), Int) == Type{NTuple{_A, Any}} where _A end @@ -5738,7 +5723,7 @@ end @eval function has_tuin() $(Expr(:throw_undef_if_not, :x, false)) end -@test Core.Compiler.return_type(has_tuin, Tuple{}) === Union{} +@test Compiler.return_type(has_tuin, Tuple{}) === Union{} @test_throws UndefVarError has_tuin() function gen_tuin_from_arg(world::UInt, source, _, _) @@ -5793,7 +5778,7 @@ end # We want to make sure that both this returns `Tuple` and that # it doesn't infinite loop inside inference. -@test Core.Compiler.return_type(gen_infinite_loop_ssa, Tuple{}) === Tuple +@test Compiler.return_type(gen_infinite_loop_ssa, Tuple{}) === Tuple # inference local cache lookup with extended lattice elements that may be transformed # by `matching_cache_argtypes` @@ -5829,7 +5814,7 @@ function foo54341(a, b, c, d, args...) end bar54341(args...) = foo54341(4, args...) -@test Core.Compiler.return_type(bar54341, Tuple{Vararg{Int}}) === Int +@test Compiler.return_type(bar54341, Tuple{Vararg{Int}}) === Int # `PartialStruct` for partially initialized structs: struct PartiallyInitialized1 @@ -5883,47 +5868,47 @@ end == Val # 2. getfield modeling for partial struct @test Base.infer_effects((Any,Any); optimize=false) do a, b getfield(PartiallyInitialized1(a, b), :b) -end |> Core.Compiler.is_nothrow +end |> Compiler.is_nothrow @test Base.infer_effects((Any,Any,Symbol,); optimize=false) do a, b, f getfield(PartiallyInitialized1(a, b), f, #=boundscheck=#false) -end |> !Core.Compiler.is_nothrow +end |> !Compiler.is_nothrow @test Base.infer_effects((Any,Any,Any); optimize=false) do a, b, c getfield(PartiallyInitialized1(a, b, c), :c) -end |> Core.Compiler.is_nothrow +end |> Compiler.is_nothrow @test Base.infer_effects((Any,Any,Any,Symbol); optimize=false) do a, b, c, f getfield(PartiallyInitialized1(a, b, c), f, #=boundscheck=#false) -end |> Core.Compiler.is_nothrow +end |> Compiler.is_nothrow @test Base.infer_effects((Any,Any); optimize=false) do a, b getfield(PartiallyInitialized2(a, b), :b) -end |> Core.Compiler.is_nothrow +end |> Compiler.is_nothrow @test Base.infer_effects((Any,Any,Symbol,); optimize=false) do a, b, f getfield(PartiallyInitialized2(a, b), f, #=boundscheck=#false) -end |> !Core.Compiler.is_nothrow +end |> !Compiler.is_nothrow @test Base.infer_effects((Any,Any,Any); optimize=false) do a, b, c getfield(PartiallyInitialized2(a, b, c), :c) -end |> Core.Compiler.is_nothrow +end |> Compiler.is_nothrow @test Base.infer_effects((Any,Any,Any,Symbol); optimize=false) do a, b, c, f getfield(PartiallyInitialized2(a, b, c), f, #=boundscheck=#false) -end |> Core.Compiler.is_nothrow +end |> Compiler.is_nothrow # isdefined-Conditionals @test Base.infer_effects((Base.RefValue{Any},)) do x if isdefined(x, :x) return getfield(x, :x) end -end |> Core.Compiler.is_nothrow +end |> Compiler.is_nothrow @test Base.infer_effects((Base.RefValue{Any},)) do x if isassigned(x) return x[] end -end |> Core.Compiler.is_nothrow +end |> Compiler.is_nothrow @test Base.infer_effects((Any,Any); optimize=false) do a, c x = PartiallyInitialized2(a) x.c = c if isdefined(x, :c) return x.b end -end |> !Core.Compiler.is_nothrow +end |> !Compiler.is_nothrow @test Base.infer_effects((PartiallyInitialized2,); optimize=false) do x if isdefined(x, :b) if isdefined(x, :c) @@ -5932,14 +5917,14 @@ end |> !Core.Compiler.is_nothrow return x.b end return nothing -end |> Core.Compiler.is_nothrow +end |> Compiler.is_nothrow @test Base.infer_effects((Bool,Int,); optimize=false) do c, b x = c ? PartiallyInitialized1(true) : PartiallyInitialized1(true, b) if isdefined(x, :b) return Val(x.a), x.b end return nothing -end |> Core.Compiler.is_nothrow +end |> Compiler.is_nothrow # refine `undef` information from `@isdefined` check function isdefined_nothrow(c, x) @@ -5952,7 +5937,7 @@ function isdefined_nothrow(c, x) end return zero(Int) end -@test Core.Compiler.is_nothrow(Base.infer_effects(isdefined_nothrow, (Bool,Int))) +@test Compiler.is_nothrow(Base.infer_effects(isdefined_nothrow, (Bool,Int))) @test !any(first(only(code_typed(isdefined_nothrow, (Bool,Int)))).code) do @nospecialize x Meta.isexpr(x, :throw_undef_if_not) end @@ -5966,7 +5951,7 @@ end # InterConditional rt with Vararg argtypes fcondvarargs(a, b, c, d) = isa(d, Int64) gcondvarargs(a, x...) = return fcondvarargs(a, x...) ? isa(a, Int64) : !isa(a, Int64) -@test Core.Compiler.return_type(gcondvarargs, Tuple{Vararg{Any}}) === Bool +@test Compiler.return_type(gcondvarargs, Tuple{Vararg{Any}}) === Bool # JuliaLang/julia#55627: argtypes check in `abstract_call_opaque_closure` issue55627_make_oc() = Base.Experimental.@opaque (x::Int) -> 2x @@ -6002,13 +5987,13 @@ f_invoke_nothrow(::Number) = :number f_invoke_nothrow(::Int) = :int @test Base.infer_effects((Int,)) do x @invoke f_invoke_nothrow(x::Number) -end |> Core.Compiler.is_nothrow +end |> Compiler.is_nothrow @test Base.infer_effects((Char,)) do x @invoke f_invoke_nothrow(x::Number) -end |> !Core.Compiler.is_nothrow +end |> !Compiler.is_nothrow @test Base.infer_effects((Union{Nothing,Int},)) do x @invoke f_invoke_nothrow(x::Number) -end |> !Core.Compiler.is_nothrow +end |> !Compiler.is_nothrow # `exct` modeling for `invoke` calls f_invoke_exct(x::Number) = x < 0 ? throw(x) : x @@ -6042,7 +6027,7 @@ end t155751 = Union{AbstractArray{UInt8, 4}, Array{Float32, 4}, Grid55751{Float32, 3, _A} where _A} t255751 = Array{Float32, 3} -@test Core.Compiler.tmerge_types_slow(t155751,t255751) == AbstractArray # shouldn't hang +@test Compiler.tmerge_types_slow(t155751,t255751) == AbstractArray # shouldn't hang issue55882_nfields(x::Union{T,Nothing}) where T<:Number = nfields(x) @test Base.infer_return_type(issue55882_nfields) <: Int @@ -6108,9 +6093,7 @@ end === Union{} global swapglobal!_must_throw @newinterp SwapGlobalInterp -let CC = Base.Compiler - CC.InferenceParams(::SwapGlobalInterp) = CC.InferenceParams(; assume_bindings_static=true) -end +Compiler.InferenceParams(::SwapGlobalInterp) = Compiler.InferenceParams(; assume_bindings_static=true) function func_swapglobal!_must_throw(x) swapglobal!(@__MODULE__, :swapglobal!_must_throw, x) end diff --git a/Compiler/test/inline.jl b/Compiler/test/inline.jl index 5dbf0a01db4a8..158d9f545220a 100644 --- a/Compiler/test/inline.jl +++ b/Compiler/test/inline.jl @@ -276,7 +276,7 @@ f34900(x, y::Int) = y f34900(x::Int, y::Int) = invoke(f34900, Tuple{Int, Any}, x, y) @test fully_eliminated(f34900, Tuple{Int, Int}; retval=Core.Argument(2)) -using Core.Compiler: is_declared_inline, is_declared_noinline +using .Compiler: is_declared_inline, is_declared_noinline @testset "is_declared_[no]inline" begin @test is_declared_inline(only(methods(@inline x -> x))) @@ -297,7 +297,7 @@ using Core.Compiler: is_declared_inline, is_declared_noinline @test !is_declared_noinline(only(methods() do x x end)) end -using Core.Compiler: is_inlineable, set_inlineable! +using .Compiler: is_inlineable, set_inlineable! @testset "basic set_inlineable! functionality" begin ci = code_typed1() do @@ -345,8 +345,8 @@ struct NonIsBitsDimsUndef dims::NTuple{N, Int} where N NonIsBitsDimsUndef() = new() end -@test Core.Compiler.is_inlineable_constant(NonIsBitsDimsUndef()) -@test !Core.Compiler.is_inlineable_constant((("a"^1000, "b"^1000), nothing)) +@test Compiler.is_inlineable_constant(NonIsBitsDimsUndef()) +@test !Compiler.is_inlineable_constant((("a"^1000, "b"^1000), nothing)) # More nothrow modeling for apply_type f_apply_type_typeof(x) = (Ref{typeof(x)}; nothing) @@ -629,8 +629,8 @@ g41299(f::Tf, args::Vararg{Any,N}) where {Tf,N} = f(args...) # https://github.com/JuliaLang/julia/issues/42078 # idempotency of callsite inlining function getcache(mi::Core.MethodInstance) - cache = Core.Compiler.code_cache(Core.Compiler.NativeInterpreter()) - codeinst = Core.Compiler.get(cache, mi, nothing) + cache = Compiler.code_cache(Compiler.NativeInterpreter()) + codeinst = Compiler.get(cache, mi, nothing) return isnothing(codeinst) ? nothing : codeinst end @noinline f42078(a) = sum(sincos(a)) @@ -965,7 +965,7 @@ let # aggressive inlining of single, abstract method match end @inline isGoodType2(cnd, @nospecialize x::Type) = - x !== Any && !(@noinline (cnd ? Core.Compiler.isType : _has_free_typevars)(x)) + x !== Any && !(@noinline (cnd ? Compiler.isType : _has_free_typevars)(x)) let # aggressive inlining of single, abstract method match (with constant-prop'ed) src = code_typed((Type, Any,)) do x, y isGoodType2(true, x), isGoodType2(true, y) @@ -1203,7 +1203,7 @@ end end # Test that inlining doesn't accidentally delete a bad return_type call -f_bad_return_type() = Core.Compiler.return_type(+, 1, 2) +f_bad_return_type() = Compiler.return_type(+, 1, 2) @test_throws MethodError f_bad_return_type() # Test that inlining doesn't leave useless globalrefs around @@ -1218,7 +1218,7 @@ end # Test that we can inline a finalizer for a struct that does not otherwise escape @noinline nothrow_side_effect(x) = Base.@assume_effects :total !:effect_free @ccall jl_(x::Any)::Cvoid -@test Core.Compiler.is_finalizer_inlineable(Base.infer_effects(nothrow_side_effect, (Nothing,))) +@test Compiler.is_finalizer_inlineable(Base.infer_effects(nothrow_side_effect, (Nothing,))) mutable struct DoAllocNoEscape function DoAllocNoEscape() @@ -1403,7 +1403,7 @@ init_finalization_count!() = FINALIZATION_COUNT[] = 0 get_finalization_count() = FINALIZATION_COUNT[] @noinline add_finalization_count!(x) = FINALIZATION_COUNT[] += x @noinline Base.@assume_effects :nothrow safeprint(io::IO, x...) = (@nospecialize; print(io, x...)) -@test Core.Compiler.is_finalizer_inlineable(Base.infer_effects(add_finalization_count!, (Int,))) +@test Compiler.is_finalizer_inlineable(Base.infer_effects(add_finalization_count!, (Int,))) mutable struct DoAllocWithField x::Int @@ -1634,7 +1634,7 @@ end let effects = Base.infer_effects((Vector{T}, T)) do xs, x $f(xs, x) end - @test Core.Compiler.Core.Compiler.is_terminates(effects) + @test Compiler.Compiler.is_terminates(effects) end let src = code_typed1((Vector{T}, T, T)) do xs, x, y $f(xs, x, y) @@ -1651,7 +1651,7 @@ end end end -using Core.Compiler: is_declared_inline, is_declared_noinline +using .Compiler: is_declared_inline, is_declared_noinline # https://github.com/JuliaLang/julia/issues/45050 @testset "propagate :meta annotations to keyword sorter methods" begin @@ -1665,12 +1665,12 @@ using Core.Compiler: is_declared_inline, is_declared_noinline @test is_declared_noinline(only(methods(Core.kwcall, (Any, typeof(f), Vararg)))) end let Base.@constprop :aggressive f(::Any; x::Int=1) = 2x - @test Core.Compiler.is_aggressive_constprop(only(methods(f))) - @test Core.Compiler.is_aggressive_constprop(only(methods(Core.kwcall, (Any, typeof(f), Vararg)))) + @test Compiler.is_aggressive_constprop(only(methods(f))) + @test Compiler.is_aggressive_constprop(only(methods(Core.kwcall, (Any, typeof(f), Vararg)))) end let Base.@constprop :none f(::Any; x::Int=1) = 2x - @test Core.Compiler.is_no_constprop(only(methods(f))) - @test Core.Compiler.is_no_constprop(only(methods(Core.kwcall, (Any, typeof(f), Vararg)))) + @test Compiler.is_no_constprop(only(methods(f))) + @test Compiler.is_no_constprop(only(methods(Core.kwcall, (Any, typeof(f), Vararg)))) end # @nospecialize let f(@nospecialize(A::Any); x::Int=1) = 2x @@ -1683,19 +1683,19 @@ using Core.Compiler: is_declared_inline, is_declared_noinline end # Base.@assume_effects let Base.@assume_effects :notaskstate f(::Any; x::Int=1) = 2x - @test Core.Compiler.decode_effects_override(only(methods(f)).purity).notaskstate - @test Core.Compiler.decode_effects_override(only(methods(Core.kwcall, (Any, typeof(f), Vararg))).purity).notaskstate + @test Compiler.decode_effects_override(only(methods(f)).purity).notaskstate + @test Compiler.decode_effects_override(only(methods(Core.kwcall, (Any, typeof(f), Vararg))).purity).notaskstate end # propagate multiple metadata also let @inline Base.@assume_effects :notaskstate Base.@constprop :aggressive f(::Any; x::Int=1) = (@nospecialize; 2x) @test is_declared_inline(only(methods(f))) - @test Core.Compiler.is_aggressive_constprop(only(methods(f))) + @test Compiler.is_aggressive_constprop(only(methods(f))) @test is_declared_inline(only(methods(Core.kwcall, (Any, typeof(f), Vararg)))) - @test Core.Compiler.is_aggressive_constprop(only(methods(Core.kwcall, (Any, typeof(f), Vararg)))) + @test Compiler.is_aggressive_constprop(only(methods(Core.kwcall, (Any, typeof(f), Vararg)))) @test only(methods(f)).nospecialize == -1 @test only(methods(Core.kwcall, (Any, typeof(f), Vararg))).nospecialize == -1 - @test Core.Compiler.decode_effects_override(only(methods(f)).purity).notaskstate - @test Core.Compiler.decode_effects_override(only(methods(Core.kwcall, (Any, typeof(f), Vararg))).purity).notaskstate + @test Compiler.decode_effects_override(only(methods(f)).purity).notaskstate + @test Compiler.decode_effects_override(only(methods(Core.kwcall, (Any, typeof(f), Vararg))).purity).notaskstate end end @@ -1766,7 +1766,7 @@ end # Test getfield modeling of Type{Ref{_A}} where _A let getfield_tfunc(@nospecialize xs...) = - Core.Compiler.getfield_tfunc(Core.Compiler.fallback_lattice, xs...) + Compiler.getfield_tfunc(Compiler.fallback_lattice, xs...) @test getfield_tfunc(Type, Core.Const(:parameters)) !== Union{} @test !isa(getfield_tfunc(Type{Tuple{Union{Int, Float64}, Int}}, Core.Const(:name)), Core.Const) end @@ -1846,15 +1846,15 @@ end func_mul_int(a::Int, b::Int) = Core.Intrinsics.mul_int(a, b) multi_inlining1(a::Int, b::Int) = @noinline func_mul_int(a, b) let i::Int, continue_::Bool - interp = Core.Compiler.NativeInterpreter() + interp = Compiler.NativeInterpreter() # check if callsite `@noinline` annotation works ir, = only(Base.code_ircode(multi_inlining1, (Int,Int); optimize_until="inlining", interp)) i = findfirst(isinvoke(:func_mul_int), ir.stmts.stmt) @test i !== nothing # now delete the callsite flag, and see the second inlining pass can inline the call - @eval Core.Compiler $ir.stmts[$i][:flag] &= ~IR_FLAG_NOINLINE - inlining = Core.Compiler.InliningState(interp) - ir = Core.Compiler.ssa_inlining_pass!(ir, inlining, false) + @eval Compiler $ir.stmts[$i][:flag] &= ~IR_FLAG_NOINLINE + inlining = Compiler.InliningState(interp) + ir = Compiler.ssa_inlining_pass!(ir, inlining, false) @test findfirst(isinvoke(:func_mul_int), ir.stmts.stmt) === nothing @test (i = findfirst(iscall((ir, Core.Intrinsics.mul_int)), ir.stmts.stmt)) !== nothing lins = Base.IRShow.buildLineInfoNode(ir.debuginfo, nothing, i) @@ -1870,15 +1870,15 @@ end call_func_mul_int(a::Int, b::Int) = @noinline func_mul_int(a, b) multi_inlining2(a::Int, b::Int) = call_func_mul_int(a, b) let i::Int, continue_::Bool - interp = Core.Compiler.NativeInterpreter() + interp = Compiler.NativeInterpreter() # check if callsite `@noinline` annotation works ir, = only(Base.code_ircode(multi_inlining2, (Int,Int); optimize_until="inlining", interp)) i = findfirst(isinvoke(:func_mul_int), ir.stmts.stmt) @test i !== nothing # now delete the callsite flag, and see the second inlining pass can inline the call - @eval Core.Compiler $ir.stmts[$i][:flag] &= ~IR_FLAG_NOINLINE - inlining = Core.Compiler.InliningState(interp) - ir = Core.Compiler.ssa_inlining_pass!(ir, inlining, false) + @eval Compiler $ir.stmts[$i][:flag] &= ~IR_FLAG_NOINLINE + inlining = Compiler.InliningState(interp) + ir = Compiler.ssa_inlining_pass!(ir, inlining, false) @test findfirst(isinvoke(:func_mul_int), ir.stmts.stmt) === nothing @test (i = findfirst(iscall((ir, Core.Intrinsics.mul_int)), ir.stmts.stmt)) !== nothing lins = Base.IRShow.buildLineInfoNode(ir.debuginfo, nothing, i) @@ -1915,30 +1915,30 @@ end # optimize away `NamedTuple`s used for handling `@nospecialize`d keyword-argument # https://github.com/JuliaLang/julia/pull/47059 -abstract type CallInfo end -struct NewInstruction +abstract type TestCallInfo end +struct TestNewInstruction stmt::Any type::Any - info::CallInfo + info::TestCallInfo line::Int32 flag::UInt8 - function NewInstruction(@nospecialize(stmt), @nospecialize(type), @nospecialize(info::CallInfo), + function TestNewInstruction(@nospecialize(stmt), @nospecialize(type), @nospecialize(info::TestCallInfo), line::Int32, flag::UInt8) return new(stmt, type, info, line, flag) end end @nospecialize -function NewInstruction(newinst::NewInstruction; +function TestNewInstruction(newinst::TestNewInstruction; stmt=newinst.stmt, type=newinst.type, - info::CallInfo=newinst.info, + info::TestCallInfo=newinst.info, line::Int32=newinst.line, flag::UInt8=newinst.flag) - return NewInstruction(stmt, type, info, line, flag) + return TestNewInstruction(stmt, type, info, line, flag) end @specialize -let src = code_typed1((NewInstruction,Any,Any,CallInfo)) do newinst, stmt, type, info - NewInstruction(newinst; stmt, type, info) +let src = code_typed1((TestNewInstruction,Any,Any,TestCallInfo)) do newinst, stmt, type, info + TestNewInstruction(newinst; stmt, type, info) end @test count(issplatnew, src.code) == 0 @test count(iscall((src,NamedTuple)), src.code) == 0 @@ -2122,8 +2122,8 @@ end # `compilesig_invokes` inlining option @newinterp NoCompileSigInvokes -Core.Compiler.OptimizationParams(::NoCompileSigInvokes) = - Core.Compiler.OptimizationParams(; compilesig_invokes=false) +Compiler.OptimizationParams(::NoCompileSigInvokes) = + Compiler.OptimizationParams(; compilesig_invokes=false) @noinline no_compile_sig_invokes(@nospecialize x) = (x !== Any && !Base.has_free_typevars(x)) # test the single dispatch candidate case let src = code_typed1((Type,)) do x @@ -2207,7 +2207,7 @@ function issue53062(cond) return -1 end end -@test !Core.Compiler.is_nothrow(Base.infer_effects(issue53062, (Bool,))) +@test !Compiler.is_nothrow(Base.infer_effects(issue53062, (Bool,))) @test issue53062(false) == -1 @test_throws MethodError issue53062(true) diff --git a/Compiler/test/interpreter_exec.jl b/Compiler/test/interpreter_exec.jl index f00bc92c7443d..65f42a0c7b89b 100644 --- a/Compiler/test/interpreter_exec.jl +++ b/Compiler/test/interpreter_exec.jl @@ -4,6 +4,14 @@ using Test using Core.IR +if !@isdefined(Compiler) + if Base.identify_package("Compiler") === nothing + import Base.Compiler: Compiler + else + import Compiler + end +end + # test that interpreter correctly handles PhiNodes (#29262) let m = Meta.@lower 1 + 1 @assert Meta.isexpr(m, :thunk) @@ -23,7 +31,7 @@ let m = Meta.@lower 1 + 1 src.ssavaluetypes = nstmts src.ssaflags = fill(UInt8(0x00), nstmts) src.debuginfo = Core.DebugInfo(:none) - Core.Compiler.verify_ir(Core.Compiler.inflate_ir(src)) + Compiler.verify_ir(Compiler.inflate_ir(src)) global test29262 = true @test :a === @eval $m global test29262 = false @@ -64,7 +72,7 @@ let m = Meta.@lower 1 + 1 src.ssaflags = fill(UInt8(0x00), nstmts) src.debuginfo = Core.DebugInfo(:none) m.args[1] = copy(src) - Core.Compiler.verify_ir(Core.Compiler.inflate_ir(src)) + Compiler.verify_ir(Compiler.inflate_ir(src)) global test29262 = true @test (:b, :a, :c, :c) === @eval $m m.args[1] = copy(src) @@ -103,7 +111,7 @@ let m = Meta.@lower 1 + 1 src.ssavaluetypes = nstmts src.ssaflags = fill(UInt8(0x00), nstmts) src.debuginfo = Core.DebugInfo(:none) - Core.Compiler.verify_ir(Core.Compiler.inflate_ir(src)) + Compiler.verify_ir(Compiler.inflate_ir(src)) global test29262 = true @test :a === @eval $m global test29262 = false diff --git a/Compiler/test/invalidation.jl b/Compiler/test/invalidation.jl index 55faa4287da24..c986cb298369f 100644 --- a/Compiler/test/invalidation.jl +++ b/Compiler/test/invalidation.jl @@ -6,29 +6,28 @@ include("irutils.jl") using Test -const CC = Core.Compiler struct InvalidationTesterToken end -struct InvalidationTester <: CC.AbstractInterpreter +struct InvalidationTester <: Compiler.AbstractInterpreter world::UInt - inf_params::CC.InferenceParams - opt_params::CC.OptimizationParams - inf_cache::Vector{CC.InferenceResult} + inf_params::Compiler.InferenceParams + opt_params::Compiler.OptimizationParams + inf_cache::Vector{Compiler.InferenceResult} function InvalidationTester(; world::UInt = Base.get_world_counter(), - inf_params::CC.InferenceParams = CC.InferenceParams(), - opt_params::CC.OptimizationParams = CC.OptimizationParams(), - inf_cache::Vector{CC.InferenceResult} = CC.InferenceResult[]) + inf_params::Compiler.InferenceParams = Compiler.InferenceParams(), + opt_params::Compiler.OptimizationParams = Compiler.OptimizationParams(), + inf_cache::Vector{Compiler.InferenceResult} = Compiler.InferenceResult[]) return new(world, inf_params, opt_params, inf_cache) end end -CC.InferenceParams(interp::InvalidationTester) = interp.inf_params -CC.OptimizationParams(interp::InvalidationTester) = interp.opt_params -CC.get_inference_world(interp::InvalidationTester) = interp.world -CC.get_inference_cache(interp::InvalidationTester) = interp.inf_cache -CC.cache_owner(::InvalidationTester) = InvalidationTesterToken() +Compiler.InferenceParams(interp::InvalidationTester) = interp.inf_params +Compiler.OptimizationParams(interp::InvalidationTester) = interp.opt_params +Compiler.get_inference_world(interp::InvalidationTester) = interp.world +Compiler.get_inference_cache(interp::InvalidationTester) = interp.inf_cache +Compiler.cache_owner(::InvalidationTester) = InvalidationTesterToken() # basic functionality test # ------------------------ @@ -105,7 +104,7 @@ begin let rt = only(Base.return_types(pr48932_callee, (Any,))) @test rt === Any effects = Base.infer_effects(pr48932_callee, (Any,)) - @test Core.Compiler.Effects(effects) == Core.Compiler.Effects() + @test Compiler.Effects(effects) == Compiler.Effects() end # run inference on both `pr48932_caller` and `pr48932_callee` @@ -172,7 +171,7 @@ begin take!(GLOBAL_BUFFER) let rt = only(Base.return_types(pr48932_callee_inferable, (Any,))) @test rt === Int effects = Base.infer_effects(pr48932_callee_inferable, (Any,)) - @test Core.Compiler.Effects(effects) == Core.Compiler.Effects() + @test Compiler.Effects(effects) == Compiler.Effects() end # run inference on both `pr48932_caller` and `pr48932_callee`: @@ -234,7 +233,7 @@ begin take!(GLOBAL_BUFFER) let rt = only(Base.return_types(pr48932_callee_inlined, (Any,))) @test rt === Any effects = Base.infer_effects(pr48932_callee_inlined, (Any,)) - @test Core.Compiler.Effects(effects) == Core.Compiler.Effects() + @test Compiler.Effects(effects) == Compiler.Effects() end # run inference on `pr48932_caller_inlined` and `pr48932_callee_inlined` diff --git a/Compiler/test/irpasses.jl b/Compiler/test/irpasses.jl index b770b7373b5bc..412ff3b98cb19 100644 --- a/Compiler/test/irpasses.jl +++ b/Compiler/test/irpasses.jl @@ -29,9 +29,9 @@ let code = Any[ ReturnNode(Core.SSAValue(10)), ] ir = make_ircode(code) - domtree = Core.Compiler.construct_domtree(ir) - ir = Core.Compiler.domsort_ssa!(ir, domtree) - Core.Compiler.verify_ir(ir) + domtree = Compiler.construct_domtree(ir) + ir = Compiler.domsort_ssa!(ir, domtree) + Compiler.verify_ir(ir) phi = ir.stmts.stmt[3] @test isa(phi, Core.PhiNode) && length(phi.edges) == 1 end @@ -47,15 +47,15 @@ let code = Any[] push!(code, Expr(:call, :opaque)) push!(code, ReturnNode(nothing)) ir = make_ircode(code) - domtree = Core.Compiler.construct_domtree(ir) - ir = Core.Compiler.domsort_ssa!(ir, domtree) - Core.Compiler.verify_ir(ir) + domtree = Compiler.construct_domtree(ir) + ir = Compiler.domsort_ssa!(ir, domtree) + Compiler.verify_ir(ir) end # SROA # ==== -using Core.Compiler: widenconst +using .Compiler: widenconst is_load_forwarded(src::CodeInfo) = !any(iscall((src, getfield)), src.code) is_scalar_replaced(src::CodeInfo) = @@ -710,8 +710,8 @@ let code = Any[ ] slottypes = Any[Any, Any, Any] ir = make_ircode(code; ssavaluetypes, slottypes) - ir = @test_nowarn Core.Compiler.sroa_pass!(ir) - @test Core.Compiler.verify_ir(ir) === nothing + ir = @test_nowarn Compiler.sroa_pass!(ir) + @test Compiler.verify_ir(ir) === nothing end # A lifted Core.ifelse with an eliminated branch (#50276) @@ -754,8 +754,8 @@ let code = Any[ ] slottypes = Any[Any, Any, Any] ir = make_ircode(code; ssavaluetypes, slottypes) - ir = @test_nowarn Core.Compiler.sroa_pass!(ir) - @test Core.Compiler.verify_ir(ir) === nothing + ir = @test_nowarn Compiler.sroa_pass!(ir) + @test Compiler.verify_ir(ir) === nothing end # Issue #31546 - missing widenconst in SROA @@ -770,32 +770,32 @@ end # Tests for cfg simplification let src = code_typed(gcd, Tuple{Int, Int})[1].first # Test that cfg_simplify doesn't mangle IR on code with loops - ir = Core.Compiler.inflate_ir(src) - Core.Compiler.verify_ir(ir) - ir = Core.Compiler.cfg_simplify!(ir) - Core.Compiler.verify_ir(ir) + ir = Compiler.inflate_ir(src) + Compiler.verify_ir(ir) + ir = Compiler.cfg_simplify!(ir) + Compiler.verify_ir(ir) end let # Test that CFG simplify combines redundant basic blocks code = Any[ - Core.Compiler.GotoNode(2), - Core.Compiler.GotoNode(3), - Core.Compiler.GotoNode(4), - Core.Compiler.GotoNode(5), - Core.Compiler.GotoNode(6), - Core.Compiler.GotoNode(7), + Compiler.GotoNode(2), + Compiler.GotoNode(3), + Compiler.GotoNode(4), + Compiler.GotoNode(5), + Compiler.GotoNode(6), + Compiler.GotoNode(7), ReturnNode(2) ] ir = make_ircode(code) - ir = Core.Compiler.cfg_simplify!(ir) - Core.Compiler.verify_ir(ir) - ir = Core.Compiler.compact!(ir) - @test length(ir.cfg.blocks) == 1 && Core.Compiler.length(ir.stmts) == 1 + ir = Compiler.cfg_simplify!(ir) + Compiler.verify_ir(ir) + ir = Compiler.compact!(ir) + @test length(ir.cfg.blocks) == 1 && Compiler.length(ir.stmts) == 1 end # Test cfg_simplify in complicated sequences of dropped and merged bbs -using Core.Compiler: Argument, IRCode, GotoNode, GotoIfNot, ReturnNode, NoCallInfo, BasicBlock, StmtRange, SSAValue -bb_term(ir, bb) = Core.Compiler.getindex(ir, SSAValue(Core.Compiler.last(ir.cfg.blocks[bb].stmts)))[:stmt] +using .Compiler: Argument, IRCode, GotoNode, GotoIfNot, ReturnNode, NoCallInfo, BasicBlock, StmtRange, SSAValue +bb_term(ir, bb) = Compiler.getindex(ir, SSAValue(Compiler.last(ir.cfg.blocks[bb].stmts)))[:stmt] function each_stmt_a_bb(stmts, preds, succs) ir = IRCode() @@ -807,7 +807,7 @@ function each_stmt_a_bb(stmts, preds, succs) empty!(ir.stmts.info); append!(ir.stmts.info, [NoCallInfo() for _ = 1:length(stmts)]) empty!(ir.cfg.blocks); append!(ir.cfg.blocks, [BasicBlock(StmtRange(i, i), preds[i], succs[i]) for i = 1:length(stmts)]) empty!(ir.cfg.index); append!(ir.cfg.index, [i for i = 2:length(stmts)]) - Core.Compiler.verify_ir(ir) + Compiler.verify_ir(ir) return ir end @@ -843,8 +843,8 @@ for gotoifnot in (false, true) preds = Vector{Int}[Int[], [1], [2], [2], [4], [5], [6], [1], [3], [4, 9], [5, 10], gotoifnot ? [6,11] : [6], [7, 11]] succs = Vector{Int}[[2, 8], [3, 4], [9], [5, 10], [6, 11], [7, 12], [13], Int[], [10], [11], gotoifnot ? [12, 13] : [13], Int[], Int[]] ir = each_stmt_a_bb(stmts, preds, succs) - ir = Core.Compiler.cfg_simplify!(ir) - Core.Compiler.verify_ir(ir) + ir = Compiler.cfg_simplify!(ir) + Compiler.verify_ir(ir) if gotoifnot let term4 = bb_term(ir, 4), term5 = bb_term(ir, 5) @@ -874,8 +874,8 @@ let stmts = [ preds = Vector{Int}[Int[], [1], [2], [1], [2, 3]] succs = Vector{Int}[[2, 4], [3, 5], [5], Int[], Int[]] ir = each_stmt_a_bb(stmts, preds, succs) - ir = Core.Compiler.cfg_simplify!(ir) - Core.Compiler.verify_ir(ir) + ir = Compiler.cfg_simplify!(ir) + Compiler.verify_ir(ir) @test length(ir.cfg.blocks) == 4 terms = map(i->bb_term(ir, i), 1:length(ir.cfg.blocks)) @@ -884,11 +884,11 @@ end let # Test that CFG simplify doesn't mess up when chaining past return blocks code = Any[ - Core.Compiler.GotoIfNot(Core.Compiler.Argument(2), 3), - Core.Compiler.GotoNode(4), + Compiler.GotoIfNot(Compiler.Argument(2), 3), + Compiler.GotoNode(4), ReturnNode(1), - Core.Compiler.GotoNode(5), - Core.Compiler.GotoIfNot(Core.Compiler.Argument(2), 7), + Compiler.GotoNode(5), + Compiler.GotoIfNot(Compiler.Argument(2), 7), # This fall through block of the previous GotoIfNot # must be moved up along with it, when we merge it # into the goto 4 block. @@ -896,26 +896,26 @@ let # Test that CFG simplify doesn't mess up when chaining past return blocks ReturnNode(3) ] ir = make_ircode(code) - ir = Core.Compiler.cfg_simplify!(ir) - Core.Compiler.verify_ir(ir) + ir = Compiler.cfg_simplify!(ir) + Compiler.verify_ir(ir) @test length(ir.cfg.blocks) == 5 ret_2 = ir.stmts.stmt[ir.cfg.blocks[3].stmts[end]] - @test isa(ret_2, Core.Compiler.ReturnNode) && ret_2.val == 2 + @test isa(ret_2, Compiler.ReturnNode) && ret_2.val == 2 end let # Test that CFG simplify doesn't try to merge every block in a loop into # its predecessor code = Any[ # Block 1 - Core.Compiler.GotoNode(2), + Compiler.GotoNode(2), # Block 2 - Core.Compiler.GotoNode(3), + Compiler.GotoNode(3), # Block 3 - Core.Compiler.GotoNode(1) + Compiler.GotoNode(1) ] ir = make_ircode(code) - ir = Core.Compiler.cfg_simplify!(ir) - Core.Compiler.verify_ir(ir) + ir = Compiler.cfg_simplify!(ir) + Compiler.verify_ir(ir) @test length(ir.cfg.blocks) == 1 end @@ -926,10 +926,10 @@ let ir = Base.code_ircode(; optimize_until="slot2ssa") do end v end |> only |> first - Core.Compiler.verify_ir(ir) + Compiler.verify_ir(ir) nb = length(ir.cfg.blocks) - ir = Core.Compiler.cfg_simplify!(ir) - Core.Compiler.verify_ir(ir) + ir = Compiler.cfg_simplify!(ir) + Compiler.verify_ir(ir) na = length(ir.cfg.blocks) @test na < nb end @@ -1135,9 +1135,9 @@ let ci = code_typed1(optimize=false) do gcd(64, 128) end end - ir = Core.Compiler.inflate_ir(ci) + ir = Compiler.inflate_ir(ci) @test any(@nospecialize(stmt)->isa(stmt, Core.GotoIfNot), ir.stmts.stmt) - ir = Core.Compiler.compact!(ir, true) + ir = Compiler.compact!(ir, true) @test !any(@nospecialize(stmt)->isa(stmt, Core.GotoIfNot), ir.stmts.stmt) end @@ -1167,23 +1167,23 @@ function foo_cfg_empty(b) return b end let ci = code_typed(foo_cfg_empty, Tuple{Bool}, optimize=true)[1][1] - ir = Core.Compiler.inflate_ir(ci) + ir = Compiler.inflate_ir(ci) @test length(ir.stmts) == 3 @test length(ir.cfg.blocks) == 3 - Core.Compiler.verify_ir(ir) - ir = Core.Compiler.cfg_simplify!(ir) - Core.Compiler.verify_ir(ir) + Compiler.verify_ir(ir) + ir = Compiler.cfg_simplify!(ir) + Compiler.verify_ir(ir) @test length(ir.cfg.blocks) <= 2 @test isa(ir.stmts[length(ir.stmts)][:stmt], ReturnNode) end -@test Core.Compiler.is_effect_free(Base.infer_effects(getfield, (Complex{Int}, Symbol))) -@test Core.Compiler.is_effect_free(Base.infer_effects(getglobal, (Module, Symbol))) +@test Compiler.is_effect_free(Base.infer_effects(getfield, (Complex{Int}, Symbol))) +@test Compiler.is_effect_free(Base.infer_effects(getglobal, (Module, Symbol))) # Test that UseRefIterator gets SROA'd inside of new_to_regular (#44557) # expression and new_to_regular offset are arbitrary here, we just want to see the UseRefIterator erased let e = Expr(:call, Core.GlobalRef(Base, :arrayset), false, Core.SSAValue(4), Core.SSAValue(9), Core.SSAValue(8)) - new_to_reg(expr) = Core.Compiler.new_to_regular(expr, 1) + new_to_reg(expr) = Compiler.new_to_regular(expr, 1) @allocated new_to_reg(e) # warmup call @test (@allocated new_to_reg(e)) == 0 end @@ -1381,8 +1381,8 @@ end @test foo(true, 1) == 2 # ifelse folding -@test Core.Compiler.is_removable_if_unused(Base.infer_effects(exp, (Float64,))) -@test !Core.Compiler.is_inlineable(code_typed1(exp, (Float64,))) +@test Compiler.is_removable_if_unused(Base.infer_effects(exp, (Float64,))) +@test !Compiler.is_inlineable(code_typed1(exp, (Float64,))) @test fully_eliminated(; retval=Core.Argument(2)) do x::Float64 return Core.ifelse(true, x, exp(x)) end @@ -1492,19 +1492,19 @@ let code = Any[ mi.def = Module() # Simulate the important results from inference - interp = Core.Compiler.NativeInterpreter() - sv = Core.Compiler.OptimizationState(mi, src, interp) + interp = Compiler.NativeInterpreter() + sv = Compiler.OptimizationState(mi, src, interp) slot_id = 4 for block_id = 3:5 # (_4 !== nothing) conditional narrows the type, triggering PiNodes sv.bb_vartables[block_id][slot_id] = VarState(Bool, #= maybe_undef =# false) end - ir = Core.Compiler.convert_to_ircode(src, sv) - ir = Core.Compiler.slot2reg(ir, src, sv) - ir = Core.Compiler.compact!(ir) + ir = Compiler.convert_to_ircode(src, sv) + ir = Compiler.slot2reg(ir, src, sv) + ir = Compiler.compact!(ir) - Core.Compiler.verify_ir(ir) + Compiler.verify_ir(ir) end function f_with_merge_to_entry_block() @@ -1517,9 +1517,9 @@ function f_with_merge_to_entry_block() end let (ir, _) = only(Base.code_ircode(f_with_merge_to_entry_block)) - Core.Compiler.verify_ir(ir) - ir = Core.Compiler.cfg_simplify!(ir) - Core.Compiler.verify_ir(ir) + Compiler.verify_ir(ir) + ir = Compiler.cfg_simplify!(ir) + Compiler.verify_ir(ir) end # Test that CFG simplify doesn't leave an un-renamed SSA Value @@ -1540,12 +1540,12 @@ let # Test that CFG simplify doesn't try to merge every block in a loop into ReturnNode(1) ] ir = make_ircode(code) - ir = Core.Compiler.cfg_simplify!(ir) - Core.Compiler.verify_ir(ir) + ir = Compiler.cfg_simplify!(ir) + Compiler.verify_ir(ir) @test length(ir.cfg.blocks) == 4 end -# JET.test_opt(Core.Compiler.cfg_simplify!, (Core.Compiler.IRCode,)) +# JET.test_opt(Compiler.cfg_simplify!, (Compiler.IRCode,)) # Test support for Core.OptimizedGenerics.KeyValue protocol function persistent_dict_elim() @@ -1607,8 +1607,8 @@ let code = Any[ ReturnNode(1) ] ir = make_ircode(code) - ir = Core.Compiler.cfg_simplify!(ir) - Core.Compiler.verify_ir(ir) + ir = Compiler.cfg_simplify!(ir) + Compiler.verify_ir(ir) @test length(ir.cfg.blocks) <= 5 end @@ -1626,10 +1626,10 @@ let code = Any[ ReturnNode(SSAValue(5)) ] ir = make_ircode(code) - ir = Core.Compiler.cfg_simplify!(ir) - Core.Compiler.verify_ir(ir) + ir = Compiler.cfg_simplify!(ir) + Compiler.verify_ir(ir) @test length(ir.cfg.blocks) <= 2 - ir = Core.Compiler.compact!(ir) + ir = Compiler.compact!(ir) @test length(ir.stmts) <= 3 @test (ir[SSAValue(length(ir.stmts))][:stmt]::ReturnNode).val !== nothing end @@ -1646,12 +1646,12 @@ let code = Any[ argtypes = Any[Bool] ssavaluetypes = Any[Bool, Tuple{Int}, Tuple{Float64}, Tuple{Int}, Int, Any] ir = make_ircode(code; slottypes=argtypes, ssavaluetypes) - Core.Compiler.verify_ir(ir) - Core.Compiler.__set_check_ssa_counts(true) - ir = Core.Compiler.sroa_pass!(ir) - Core.Compiler.verify_ir(ir) + Compiler.verify_ir(ir) + Compiler.__set_check_ssa_counts(true) + ir = Compiler.sroa_pass!(ir) + Compiler.verify_ir(ir) finally - Core.Compiler.__set_check_ssa_counts(false) + Compiler.__set_check_ssa_counts(false) end end @@ -1687,11 +1687,11 @@ let code = Any[ Tuple{Tuple{Int, Int}, Int}, Tuple{Int, Int}, Int, Any] ir = make_ircode(code; slottypes=argtypes, ssavaluetypes) - Core.Compiler.verify_ir(ir) - ir = Core.Compiler.sroa_pass!(ir) - Core.Compiler.verify_ir(ir) - ir = Core.Compiler.compact!(ir) - Core.Compiler.verify_ir(ir) + Compiler.verify_ir(ir) + ir = Compiler.sroa_pass!(ir) + Compiler.verify_ir(ir) + ir = Compiler.compact!(ir) + Compiler.verify_ir(ir) end # Test correctness of current_scope folding @@ -1762,12 +1762,12 @@ let code = Any[ ReturnNode(SSAValue(6)) ] ir = make_ircode(code) - Core.Compiler.insert_node!(ir, SSAValue(5), - Core.Compiler.NewInstruction( + Compiler.insert_node!(ir, SSAValue(5), + Compiler.NewInstruction( Expr(:call, println, 2), Nothing, Int32(1)), #= attach_after = =# true) - ir = Core.Compiler.compact!(ir, true) - @test Core.Compiler.verify_ir(ir) === nothing + ir = Compiler.compact!(ir, true) + @test Compiler.verify_ir(ir) === nothing @test count(x->isa(x, GotoIfNot), ir.stmts.stmt) == 1 end @@ -1779,14 +1779,14 @@ let code = Any[ ReturnNode(1) ] ir = make_ircode(code; ssavaluetypes = Any[ImmutableRef{Any}, Any, Any, Any], slottypes=Any[Bool], verify=true) - ir = Core.Compiler.sroa_pass!(ir) - @test Core.Compiler.verify_ir(ir) === nothing + ir = Compiler.sroa_pass!(ir) + @test Compiler.verify_ir(ir) === nothing @test !any(iscall((ir, getfield)), ir.stmts.stmt) @test length(ir.cfg.blocks[end].stmts) == 1 end # https://github.com/JuliaLang/julia/issues/47065 -# `Core.Compiler.sort!` should be able to handle a big list +# `Compiler.sort!` should be able to handle a big list let n = 1000 ex = :(return 1) for _ in 1:n @@ -1829,9 +1829,9 @@ let code = Any[ ReturnNode(Core.SSAValue(3)) ] ir = make_ircode(code; ssavaluetypes=Any[Any, Nothing, Union{Int64, Float64}, Any]) - (ir, made_changes) = Core.Compiler.adce_pass!(ir) + (ir, made_changes) = Compiler.adce_pass!(ir) @test made_changes - @test (ir[Core.SSAValue(length(ir.stmts))][:flag] & Core.Compiler.IR_FLAG_REFINED) != 0 + @test (ir[Core.SSAValue(length(ir.stmts))][:flag] & Compiler.IR_FLAG_REFINED) != 0 end # JuliaLang/julia#52991: statements that may not :terminate should not be deleted @@ -1850,7 +1850,7 @@ end end return s end -@test !Core.Compiler.is_removable_if_unused(Base.infer_effects(issue52991, (Int,))) +@test !Compiler.is_removable_if_unused(Base.infer_effects(issue52991, (Int,))) let src = code_typed1((Int,)) do x issue52991(x) nothing @@ -1903,9 +1903,9 @@ let code = Any[ append!(ir.cfg.index, Int[2,3,4]) ir.stmts.stmt[1] = GotoIfNot(Core.Argument(2), 4) - Core.Compiler.verify_ir(ir) - ir = Core.Compiler.cfg_simplify!(ir) - Core.Compiler.verify_ir(ir) + Compiler.verify_ir(ir) + ir = Compiler.cfg_simplify!(ir) + Compiler.verify_ir(ir) @test length(ir.cfg.blocks) == 3 # should have removed block 3 end @@ -1929,17 +1929,17 @@ let code = Any[ ] ir = make_ircode(code; ssavaluetypes=Any[Any, Any, Any, Any, Any, Any, Union{}, Union{}]) @test length(ir.cfg.blocks) == 8 - Core.Compiler.verify_ir(ir) + Compiler.verify_ir(ir) # Union typed deletion marker in basic block 2 - Core.Compiler.setindex!(ir, nothing, SSAValue(2)) + Compiler.setindex!(ir, nothing, SSAValue(2)) # Test cfg_simplify - Core.Compiler.verify_ir(ir) - ir = Core.Compiler.cfg_simplify!(ir) - Core.Compiler.verify_ir(ir) + Compiler.verify_ir(ir) + ir = Compiler.cfg_simplify!(ir) + Compiler.verify_ir(ir) @test length(ir.cfg.blocks) == 6 - gotoifnot = Core.Compiler.last(ir.cfg.blocks[3].stmts) + gotoifnot = Compiler.last(ir.cfg.blocks[3].stmts) inst = ir[SSAValue(gotoifnot)] @test isa(inst[:stmt], GotoIfNot) # Make sure we didn't accidentally schedule the unreachable block as @@ -1962,10 +1962,10 @@ let f = (x)->nothing, mi = Base.method_instance(f, (Base.RefValue{Nothing},)), c ReturnNode(SSAValue(6)) ] ir = make_ircode(code; ssavaluetypes=Any[Base.RefValue{Nothing}, Nothing, Any, Nothing, Any, Nothing, Any]) - inlining = Core.Compiler.InliningState(Core.Compiler.NativeInterpreter()) - Core.Compiler.verify_ir(ir) - ir = Core.Compiler.sroa_pass!(ir, inlining) - Core.Compiler.verify_ir(ir) + inlining = Compiler.InliningState(Compiler.NativeInterpreter()) + Compiler.verify_ir(ir) + ir = Compiler.sroa_pass!(ir, inlining) + Compiler.verify_ir(ir) end let code = Any[ @@ -1988,11 +1988,11 @@ let code = Any[ ] ir = make_ircode(code; ssavaluetypes=Any[Any, Any, Union{}, Any, Any, Any, Union{}, Union{}]) @test length(ir.cfg.blocks) == 8 - Core.Compiler.verify_ir(ir) + Compiler.verify_ir(ir) # The IR should remain valid after domsorting # (esp. including the insertion of new BasicBlocks for any fix-ups) - domtree = Core.Compiler.construct_domtree(ir) - ir = Core.Compiler.domsort_ssa!(ir, domtree) - Core.Compiler.verify_ir(ir) + domtree = Compiler.construct_domtree(ir) + ir = Compiler.domsort_ssa!(ir, domtree) + Compiler.verify_ir(ir) end diff --git a/Compiler/test/irutils.jl b/Compiler/test/irutils.jl index 50b3a858d89dc..d1a3a2ea57c35 100644 --- a/Compiler/test/irutils.jl +++ b/Compiler/test/irutils.jl @@ -1,11 +1,19 @@ # This file is a part of Julia. License is MIT: https://julialang.org/license +if !@isdefined(Compiler) + if Base.identify_package("Compiler") === nothing + import Base.Compiler: Compiler + else + import Compiler + end +end + using Core.IR -using Core.Compiler: IRCode, IncrementalCompact, singleton_type, VarState +using .Compiler: IRCode, IncrementalCompact, singleton_type, VarState using Base.Meta: isexpr using InteractiveUtils: gen_call_with_extracted_types_and_kwargs -argextype(@nospecialize args...) = Core.Compiler.argextype(args..., VarState[]) +argextype(@nospecialize args...) = Compiler.argextype(args..., VarState[]) code_typed1(args...; kwargs...) = first(only(code_typed(args...; kwargs...)))::CodeInfo macro code_typed1(ex0...) return gen_call_with_extracted_types_and_kwargs(__module__, :code_typed1, ex0) @@ -91,11 +99,11 @@ let m = Meta.@lower 1 + 1 kwargs...) src = make_codeinfo(code; slottypes, kwargs...) if slottypes !== nothing - ir = Core.Compiler.inflate_ir(src, slottypes) + ir = Compiler.inflate_ir(src, slottypes) else - ir = Core.Compiler.inflate_ir(src) + ir = Compiler.inflate_ir(src) end - verify && Core.Compiler.verify_ir(ir) + verify && Compiler.verify_ir(ir) return ir end end diff --git a/Compiler/test/newinterp.jl b/Compiler/test/newinterp.jl index d86a1831def79..5ebcf332895fa 100644 --- a/Compiler/test/newinterp.jl +++ b/Compiler/test/newinterp.jl @@ -10,7 +10,7 @@ from the native code cache, satisfying the minimum interface requirements. When the `ephemeral_cache=true` option is specified, `NewInterpreter` will hold `CodeInstance` in an ephemeral non-integrated cache, rather than in the integrated -`Core.Compiler.InternalCodeCache`. +`Compiler.InternalCodeCache`. Keep in mind that ephemeral cache lacks support for invalidation and doesn't persist across sessions. However it is an usual Julia object of the type `code_cache::IdDict{MethodInstance,CodeInstance}`, making it easier for debugging and inspecting the compiler behavior. @@ -20,7 +20,6 @@ macro newinterp(InterpName, ephemeral_cache::Bool=false) InterpCacheName = esc(Symbol(string(InterpName, "Cache"))) InterpName = esc(InterpName) C = Core - CC = Core.Compiler quote $(ephemeral_cache && quote struct $InterpCacheName @@ -28,18 +27,18 @@ macro newinterp(InterpName, ephemeral_cache::Bool=false) end $InterpCacheName() = $InterpCacheName(IdDict{$C.MethodInstance,$C.CodeInstance}()) end) - struct $InterpName <: $CC.AbstractInterpreter + struct $InterpName <: $Compiler.AbstractInterpreter meta # additional information world::UInt - inf_params::$CC.InferenceParams - opt_params::$CC.OptimizationParams - inf_cache::Vector{$CC.InferenceResult} + inf_params::$Compiler.InferenceParams + opt_params::$Compiler.OptimizationParams + inf_cache::Vector{$Compiler.InferenceResult} $(ephemeral_cache && :(code_cache::$InterpCacheName)) function $InterpName(meta = nothing; world::UInt = Base.get_world_counter(), - inf_params::$CC.InferenceParams = $CC.InferenceParams(), - opt_params::$CC.OptimizationParams = $CC.OptimizationParams(), - inf_cache::Vector{$CC.InferenceResult} = $CC.InferenceResult[], + inf_params::$Compiler.InferenceParams = $Compiler.InferenceParams(), + opt_params::$Compiler.OptimizationParams = $Compiler.OptimizationParams(), + inf_cache::Vector{$Compiler.InferenceResult} = $Compiler.InferenceResult[], $(ephemeral_cache ? Expr(:kw, :(code_cache::$InterpCacheName), :($InterpCacheName())) : Expr(:kw, :_, :nothing))) @@ -48,17 +47,17 @@ macro newinterp(InterpName, ephemeral_cache::Bool=false) :(new(meta, world, inf_params, opt_params, inf_cache))) end end - $CC.InferenceParams(interp::$InterpName) = interp.inf_params - $CC.OptimizationParams(interp::$InterpName) = interp.opt_params - $CC.get_inference_world(interp::$InterpName) = interp.world - $CC.get_inference_cache(interp::$InterpName) = interp.inf_cache - $CC.cache_owner(::$InterpName) = $cache_token + $Compiler.InferenceParams(interp::$InterpName) = interp.inf_params + $Compiler.OptimizationParams(interp::$InterpName) = interp.opt_params + $Compiler.get_inference_world(interp::$InterpName) = interp.world + $Compiler.get_inference_cache(interp::$InterpName) = interp.inf_cache + $Compiler.cache_owner(::$InterpName) = $cache_token $(ephemeral_cache && quote - $CC.code_cache(interp::$InterpName) = $CC.WorldView(interp.code_cache, $CC.WorldRange(interp.world)) - $CC.get(wvc::$CC.WorldView{$InterpCacheName}, mi::$C.MethodInstance, default) = get(wvc.cache.dict, mi, default) - $CC.getindex(wvc::$CC.WorldView{$InterpCacheName}, mi::$C.MethodInstance) = getindex(wvc.cache.dict, mi) - $CC.haskey(wvc::$CC.WorldView{$InterpCacheName}, mi::$C.MethodInstance) = haskey(wvc.cache.dict, mi) - $CC.setindex!(wvc::$CC.WorldView{$InterpCacheName}, ci::$C.CodeInstance, mi::$C.MethodInstance) = setindex!(wvc.cache.dict, ci, mi) + $Compiler.code_cache(interp::$InterpName) = $Compiler.WorldView(interp.code_cache, $Compiler.WorldRange(interp.world)) + $Compiler.get(wvc::$Compiler.WorldView{$InterpCacheName}, mi::$C.MethodInstance, default) = get(wvc.cache.dict, mi, default) + $Compiler.getindex(wvc::$Compiler.WorldView{$InterpCacheName}, mi::$C.MethodInstance) = getindex(wvc.cache.dict, mi) + $Compiler.haskey(wvc::$Compiler.WorldView{$InterpCacheName}, mi::$C.MethodInstance) = haskey(wvc.cache.dict, mi) + $Compiler.setindex!(wvc::$Compiler.WorldView{$InterpCacheName}, ci::$C.CodeInstance, mi::$C.MethodInstance) = setindex!(wvc.cache.dict, ci, mi) end) end end diff --git a/Compiler/test/runtests.jl b/Compiler/test/runtests.jl index e4b312c6a65b7..ea3df3aa2855d 100644 --- a/Compiler/test/runtests.jl +++ b/Compiler/test/runtests.jl @@ -1,5 +1,7 @@ # This file is a part of Julia. License is MIT: https://julialang.org/license using Test, Compiler +using InteractiveUtils: @activate +@activate Compiler for file in readlines(joinpath(@__DIR__, "testgroups")) file == "special_loading" && continue # Only applicable to Base.Compiler diff --git a/Compiler/test/ssair.jl b/Compiler/test/ssair.jl index 39ec60a429677..d6707e4dec9c2 100644 --- a/Compiler/test/ssair.jl +++ b/Compiler/test/ssair.jl @@ -1,12 +1,11 @@ # This file is a part of Julia. License is MIT: https://julialang.org/license +include("irutils.jl") + using Base.Meta using Core.IR -const Compiler = Core.Compiler using .Compiler: CFG, BasicBlock, NewSSAValue -include("irutils.jl") - make_bb(preds, succs) = BasicBlock(Compiler.StmtRange(0, 0), preds, succs) # TODO: this test is broken @@ -102,7 +101,7 @@ let cfg = CFG(BasicBlock[ make_bb([0, 1, 2] , [5] ), # 0 predecessor should be preserved make_bb([2, 3] , [] ), ], Int[]) - insts = Compiler.InstructionStream([], [], Core.Compiler.CallInfo[], Int32[], UInt32[]) + insts = Compiler.InstructionStream([], [], Compiler.CallInfo[], Int32[], UInt32[]) di = Compiler.DebugInfoStream(insts.line) ir = Compiler.IRCode(insts, cfg, di, Any[], Expr[], Compiler.VarState[]) compact = Compiler.IncrementalCompact(ir, true) @@ -133,36 +132,36 @@ end let code = Any[ # block 1 Expr(:boundscheck), - Core.Compiler.GotoIfNot(SSAValue(1), 6), + Compiler.GotoIfNot(SSAValue(1), 6), # block 2 - Expr(:call, GlobalRef(Base, :size), Core.Compiler.Argument(3)), - Core.Compiler.ReturnNode(), + Expr(:call, GlobalRef(Base, :size), Compiler.Argument(3)), + Compiler.ReturnNode(), # block 3 Core.PhiNode(), - Core.Compiler.ReturnNode(), + Compiler.ReturnNode(), # block 4 GlobalRef(Main, :something), GlobalRef(Main, :somethingelse), Expr(:call, Core.SSAValue(7), Core.SSAValue(8)), - Core.Compiler.GotoIfNot(Core.SSAValue(9), 12), + Compiler.GotoIfNot(Core.SSAValue(9), 12), # block 5 - Core.Compiler.ReturnNode(Core.SSAValue(9)), + Compiler.ReturnNode(Core.SSAValue(9)), # block 6 - Core.Compiler.ReturnNode(Core.SSAValue(9)) + Compiler.ReturnNode(Core.SSAValue(9)) ] ir = make_ircode(code) - ir = Core.Compiler.compact!(ir, true) - @test Core.Compiler.verify_ir(ir) === nothing + ir = Compiler.compact!(ir, true) + @test Compiler.verify_ir(ir) === nothing end # Test that the verifier doesn't choke on cglobals (which aren't linearized) let code = Any[ Expr(:call, GlobalRef(Main, :cglobal), Expr(:call, Core.tuple, :(:c)), Nothing), - Core.Compiler.ReturnNode() + Compiler.ReturnNode() ] ir = make_ircode(code) - @test Core.Compiler.verify_ir(ir) === nothing + @test Compiler.verify_ir(ir) === nothing end # Test that GlobalRef in value position is non-canonical @@ -171,14 +170,14 @@ let code = Any[ ReturnNode(SSAValue(1)) ] ir = make_ircode(code; verify=false) - ir = Core.Compiler.compact!(ir, true) - @test_throws ["IR verification failed.", "Code location: "] Core.Compiler.verify_ir(ir, false) + ir = Compiler.compact!(ir, true) + @test_throws ["IR verification failed.", "Code location: "] Compiler.verify_ir(ir, false) end # Issue #29107 let code = Any[ # Block 1 - Core.Compiler.GotoNode(6), + Compiler.GotoNode(6), # Block 2 # The following phi node gets deleted because it only has one edge, so # the call to `something` is made to use the value of `something2()`, @@ -188,12 +187,12 @@ let code = Any[ Core.PhiNode(Int32[2], Any[Core.SSAValue(4)]), Expr(:call, :something, Core.SSAValue(2)), Expr(:call, :something2), - Core.Compiler.GotoNode(2), + Compiler.GotoNode(2), # Block 3 - Core.Compiler.ReturnNode(1000) + Compiler.ReturnNode(1000) ] ir = make_ircode(code) - ir = Core.Compiler.compact!(ir, true) + ir = Compiler.compact!(ir, true) # Make sure that if there is a call to `something` (block 2 should be # removed entirely with working DCE), it doesn't use any SSA values that # come after it. @@ -210,22 +209,22 @@ end # Make sure dead blocks that are removed are not still referenced in live phi nodes let code = Any[ # Block 1 - Core.Compiler.GotoNode(3), + Compiler.GotoNode(3), # Block 2 (no predecessors) - Core.Compiler.ReturnNode(3), + Compiler.ReturnNode(3), # Block 3 Core.PhiNode(Int32[1, 2], Any[100, 200]), - Core.Compiler.ReturnNode(Core.SSAValue(3)) + Compiler.ReturnNode(Core.SSAValue(3)) ] ir = make_ircode(code; verify=false) - ir = Core.Compiler.compact!(ir, true) - @test Core.Compiler.verify_ir(ir) === nothing + ir = Compiler.compact!(ir, true) + @test Compiler.verify_ir(ir) === nothing end # issue #37919 let ci = only(code_lowered(()->@isdefined(_not_def_37919_), ())) - ir = Core.Compiler.inflate_ir(ci) - @test Core.Compiler.verify_ir(ir) === nothing + ir = Compiler.inflate_ir(ci) + @test Compiler.verify_ir(ir) === nothing end let code = Any[ @@ -239,7 +238,7 @@ let code = Any[ ] ir = make_ircode(code; slottypes=Any[Any,Bool,Int]) visited = BitSet() - @test !Core.Compiler.visit_conditional_successors(ir, #=bb=#1) do succ::Int + @test !Compiler.visit_conditional_successors(ir, #=bb=#1) do succ::Int push!(visited, succ) return false end @@ -261,7 +260,7 @@ let code = Any[ ] ir = make_ircode(code; slottypes=Any[Any,Bool,Int]) visited = BitSet() - @test !Core.Compiler.visit_conditional_successors(ir, #=bb=#1) do succ::Int + @test !Compiler.visit_conditional_successors(ir, #=bb=#1) do succ::Int push!(visited, succ) return false end @@ -288,7 +287,7 @@ let code = Any[ ] ir = make_ircode(code; slottypes=Any[Any,Bool,Int,Int]) visited = BitSet() - @test !Core.Compiler.visit_conditional_successors(ir, #=bb=#1) do succ::Int + @test !Compiler.visit_conditional_successors(ir, #=bb=#1) do succ::Int push!(visited, succ) return false end @@ -394,6 +393,14 @@ f_if_typecheck() = (if nothing; end; unsafe_load(Ptr{Int}(0))) let # https://github.com/JuliaLang/julia/issues/42258 code = """ + if !@isdefined(Compiler) + if Base.identify_package("Compiler") === nothing + import Base.Compiler: Compiler + else + import Compiler + end + end + function foo() a = @noinline rand(rand(0:10)) if isempty(a) @@ -405,7 +412,7 @@ let # https://github.com/JuliaLang/julia/issues/42258 end code_typed(foo; optimize=true) - code_typed(Core.Compiler.setindex!, (Core.Compiler.UseRef,Core.Compiler.NewSSAValue); optimize=true) + code_typed(Compiler.setindex!, (Compiler.UseRef,Compiler.NewSSAValue); optimize=true) """ cmd = `$(Base.julia_cmd()) -g 2 -e $code` stderr = IOBuffer() @@ -468,18 +475,18 @@ let function _test_userefs(@nospecialize stmt) ex = Expr(:call, :+, Core.SSAValue(3), 1) - urs = Core.Compiler.userefs(stmt)::Core.Compiler.UseRefIterator - it = Core.Compiler.iterate(urs) + urs = Compiler.userefs(stmt)::Compiler.UseRefIterator + it = Compiler.iterate(urs) while it !== nothing - ur = getfield(it, 1)::Core.Compiler.UseRef + ur = getfield(it, 1)::Compiler.UseRef op = getfield(it, 2)::Int - v1 = Core.Compiler.getindex(ur) + v1 = Compiler.getindex(ur) # set to dummy expression and then back to itself to test `_useref_setindex!` - v2 = Core.Compiler.setindex!(ur, ex) + v2 = Compiler.setindex!(ur, ex) test_useref(v2, ex, op) - Core.Compiler.setindex!(ur, v1) - @test Core.Compiler.getindex(ur) === v1 - it = Core.Compiler.iterate(urs, op) + Compiler.setindex!(ur, v1) + @test Compiler.getindex(ur) === v1 + it = Compiler.iterate(urs, op) end end @@ -531,25 +538,25 @@ let ir = Base.code_ircode((Bool,Any)) do c, x end end # domination analysis - domtree = Core.Compiler.construct_domtree(ir) - @test Core.Compiler.dominates(domtree, 1, 2) - @test Core.Compiler.dominates(domtree, 1, 3) - @test Core.Compiler.dominates(domtree, 1, 4) + domtree = Compiler.construct_domtree(ir) + @test Compiler.dominates(domtree, 1, 2) + @test Compiler.dominates(domtree, 1, 3) + @test Compiler.dominates(domtree, 1, 4) for i = 2:4 for j = 1:4 i == j && continue - @test !Core.Compiler.dominates(domtree, i, j) + @test !Compiler.dominates(domtree, i, j) end end # post domination analysis - post_domtree = Core.Compiler.construct_postdomtree(ir) - @test Core.Compiler.postdominates(post_domtree, 4, 1) - @test Core.Compiler.postdominates(post_domtree, 4, 2) - @test Core.Compiler.postdominates(post_domtree, 4, 3) + post_domtree = Compiler.construct_postdomtree(ir) + @test Compiler.postdominates(post_domtree, 4, 1) + @test Compiler.postdominates(post_domtree, 4, 2) + @test Compiler.postdominates(post_domtree, 4, 3) for i = 1:3 for j = 1:4 i == j && continue - @test !Core.Compiler.postdominates(post_domtree, i, j) + @test !Compiler.postdominates(post_domtree, i, j) end end end @@ -568,20 +575,20 @@ end @test Meta.isexpr(add_stmt[:stmt], :call) && add_stmt[:stmt].args[3] == 42 # replace the addition with a slightly different one - inst = Core.Compiler.NewInstruction(Expr(:call, add_stmt[:stmt].args[1], add_stmt[:stmt].args[2], 999), Int) - node = Core.Compiler.insert_node!(ir, 1, inst) - Core.Compiler.setindex!(add_stmt, node, :stmt) + inst = Compiler.NewInstruction(Expr(:call, add_stmt[:stmt].args[1], add_stmt[:stmt].args[2], 999), Int) + node = Compiler.insert_node!(ir, 1, inst) + Compiler.setindex!(add_stmt, node, :stmt) # perform compaction (not by calling compact! because with DCE the bug doesn't trigger) - compact = Core.Compiler.IncrementalCompact(ir) - state = Core.Compiler.iterate(compact) + compact = Compiler.IncrementalCompact(ir) + state = Compiler.iterate(compact) while state !== nothing - state = Core.Compiler.iterate(compact, state[2]) + state = Compiler.iterate(compact, state[2]) end - ir = Core.Compiler.complete(compact) + ir = Compiler.complete(compact) # test that the inserted node was compacted - @test Core.Compiler.length(ir.new_nodes) == 0 + @test Compiler.length(ir.new_nodes) == 0 # test that we performed copy propagation, but that the undef node was trimmed @test length(ir.stmts) == instructions @@ -593,7 +600,7 @@ end # ======================= import Core: SSAValue -import Core.Compiler: NewInstruction, insert_node! +import .Compiler: NewInstruction, insert_node! # insert_node! for pending node let ir = Base.code_ircode((Int,Int); optimize_until="inlining") do a, b @@ -607,7 +614,7 @@ let ir = Base.code_ircode((Int,Int); optimize_until="inlining") do a, b newssa = insert_node!(ir, invoke_ssa, NewInstruction(Expr(:call, println, invoke_ssa), Nothing), #=attach_after=#true) newssa = insert_node!(ir, newssa, NewInstruction(Expr(:call, println, newssa), Nothing), #=attach_after=#true) - ir = Core.Compiler.compact!(ir) + ir = Compiler.compact!(ir) @test length(ir.stmts) == nstmts + 2 @test Meta.isexpr(ir.stmts[invoke_idx][:stmt], :invoke) call1 = ir.stmts[invoke_idx+1][:stmt] @@ -622,28 +629,28 @@ end let code = Any[ # block 1 #= %1: =# Expr(:boundscheck), - #= %2: =# Core.Compiler.GotoIfNot(SSAValue(1), 4), + #= %2: =# Compiler.GotoIfNot(SSAValue(1), 4), # block 2 #= %3: =# Expr(:call, println, Argument(1)), # block 3 #= %4: =# Core.PhiNode(), - #= %5: =# Core.Compiler.ReturnNode(), + #= %5: =# Compiler.ReturnNode(), ] ir = make_ircode(code) # Insert another call at end of "block 2" - compact = Core.Compiler.IncrementalCompact(ir) + compact = Compiler.IncrementalCompact(ir) new_inst = NewInstruction(Expr(:call, println, Argument(1)), Nothing) insert_node!(compact, SSAValue(3), new_inst, #= attach_after =# true) # Complete iteration - x = Core.Compiler.iterate(compact) + x = Compiler.iterate(compact) while x !== nothing - x = Core.Compiler.iterate(compact, x[2]) + x = Compiler.iterate(compact, x[2]) end - ir = Core.Compiler.complete(compact) + ir = Compiler.complete(compact) - @test Core.Compiler.verify_ir(ir) === nothing + @test Compiler.verify_ir(ir) === nothing end # compact constant PiNode @@ -652,7 +659,7 @@ let code = Any[ ReturnNode(SSAValue(1)) ] ir = make_ircode(code) - ir = Core.Compiler.compact!(ir) + ir = Compiler.compact!(ir) @test fully_eliminated(ir) end @@ -666,13 +673,13 @@ let ir = Base.code_ircode((Int,Int); optimize_until="inlining") do a, b invoke_ssa = SSAValue(invoke_idx) # effect-ful node - let compact = Core.Compiler.IncrementalCompact(Core.Compiler.copy(ir)) + let compact = Compiler.IncrementalCompact(Compiler.copy(ir)) insert_node!(compact, invoke_ssa, NewInstruction(Expr(:call, println, invoke_ssa), Nothing), #=attach_after=#true) - state = Core.Compiler.iterate(compact) + state = Compiler.iterate(compact) while state !== nothing - state = Core.Compiler.iterate(compact, state[2]) + state = Compiler.iterate(compact, state[2]) end - ir = Core.Compiler.finish(compact) + ir = Compiler.finish(compact) new_invoke_idx = findfirst(@nospecialize(stmt)->stmt==invoke_expr, ir.stmts.stmt) @test new_invoke_idx !== nothing new_call_idx = findfirst(ir.stmts.stmt) do @nospecialize(stmt) @@ -683,15 +690,15 @@ let ir = Base.code_ircode((Int,Int); optimize_until="inlining") do a, b end # effect-free node - let compact = Core.Compiler.IncrementalCompact(Core.Compiler.copy(ir)) + let compact = Compiler.IncrementalCompact(Compiler.copy(ir)) insert_node!(compact, invoke_ssa, NewInstruction(Expr(:call, GlobalRef(Base, :add_int), invoke_ssa, invoke_ssa), Int), #=attach_after=#true) - state = Core.Compiler.iterate(compact) + state = Compiler.iterate(compact) while state !== nothing - state = Core.Compiler.iterate(compact, state[2]) + state = Compiler.iterate(compact, state[2]) end - ir = Core.Compiler.finish(compact) + ir = Compiler.finish(compact) - ir = Core.Compiler.finish(compact) + ir = Compiler.finish(compact) new_invoke_idx = findfirst(@nospecialize(stmt)->stmt==invoke_expr, ir.stmts.stmt) @test new_invoke_idx !== nothing new_call_idx = findfirst(ir.stmts.stmt) do @nospecialize(x) diff --git a/Compiler/test/tarjan.jl b/Compiler/test/tarjan.jl index 11c6b68e58b1b..49124bdf650fe 100644 --- a/Compiler/test/tarjan.jl +++ b/Compiler/test/tarjan.jl @@ -1,9 +1,9 @@ # This file is a part of Julia. License is MIT: https://julialang.org/license -using Core.Compiler: CFGReachability, DomTree, CFG, BasicBlock, StmtRange, dominates, - bb_unreachable, kill_edge! +include("irutils.jl") -const CC = Core.Compiler +using .Compiler: CFGReachability, DomTree, CFG, BasicBlock, StmtRange, dominates, + bb_unreachable, kill_edge! function reachable(g::CFG, a::Int, b::Int; domtree=nothing) visited = BitVector(false for _ = 1:length(g.blocks)) @@ -83,7 +83,7 @@ function test_reachability(V, E; deletions = 2E ÷ 3, all_checks=false) if all_checks # checks for internal data structures - O(E^2) - # Nodes should be mutually reachable iff they are in the same SCC. + # Nodes should be mutually reachable iff they are in the same SCompiler. scc = reachability.scc reachable_nodes = BitSet(v for v = 1:V if !bb_unreachable(reachability, v)) for i ∈ reachable_nodes @@ -96,13 +96,13 @@ function test_reachability(V, E; deletions = 2E ÷ 3, all_checks=false) irreducible = reachability.irreducible for i ∈ reachable_nodes in_nontrivial_scc = any(v != i && scc[v] == scc[i] for v = 1:V) - @test CC.getindex(irreducible, i) == in_nontrivial_scc + @test Compiler.getindex(irreducible, i) == in_nontrivial_scc end end end cfg = rand_cfg(V, E) - domtree = Core.Compiler.construct_domtree(cfg) + domtree = Compiler.construct_domtree(cfg) reachability = CFGReachability(cfg, domtree) check_reachability(reachability, cfg, domtree, all_checks) diff --git a/Compiler/test/validation.jl b/Compiler/test/validation.jl index 5fd074fee73ae..38dfa9705d542 100644 --- a/Compiler/test/validation.jl +++ b/Compiler/test/validation.jl @@ -2,6 +2,14 @@ using Test, Core.IR +if !@isdefined(Compiler) + if Base.identify_package("Compiler") === nothing + import Base.Compiler: Compiler + else + import Compiler + end +end + function f22938(a, b, x...) nothing nothing @@ -21,17 +29,17 @@ end msig = Tuple{typeof(f22938),Int,Int,Int,Int} world = Base.get_world_counter() match = only(Base._methods_by_ftype(msig, -1, world)) -mi = Core.Compiler.specialize_method(match) -c0 = Core.Compiler.retrieve_code_info(mi, world) +mi = Compiler.specialize_method(match) +c0 = Compiler.retrieve_code_info(mi, world) -@test isempty(Core.Compiler.validate_code(mi, c0)) +@test isempty(Compiler.validate_code(mi, c0)) @testset "INVALID_EXPR_HEAD" begin c = copy(c0) c.code[1] = Expr(:invalid, 1) - errors = Core.Compiler.validate_code(c) + errors = Compiler.validate_code(c) @test length(errors) == 1 - @test errors[1].kind === Core.Compiler.INVALID_EXPR_HEAD + @test errors[1].kind === Compiler.INVALID_EXPR_HEAD end @testset "INVALID_LVALUE" begin @@ -39,9 +47,9 @@ end c.code[1] = Expr(:(=), GotoNode(1), 1) c.code[2] = Expr(:(=), :x, 1) c.code[3] = Expr(:(=), 3, 1) - errors = Core.Compiler.validate_code(c) + errors = Compiler.validate_code(c) @test length(errors) == 3 - @test all(e.kind === Core.Compiler.INVALID_LVALUE for e in errors) + @test all(e.kind === Compiler.INVALID_LVALUE for e in errors) end @testset "INVALID_RVALUE" begin @@ -52,9 +60,9 @@ end for h in (:line, :const, :meta) c.code[i+=1] = Expr(:(=), SlotNumber(2), Expr(h)) end - errors = Core.Compiler.validate_code(c) + errors = Compiler.validate_code(c) @test length(errors) == 5 - @test count(e.kind === Core.Compiler.INVALID_RVALUE for e in errors) == 5 + @test count(e.kind === Compiler.INVALID_RVALUE for e in errors) == 5 end @testset "INVALID_CALL_ARG" begin @@ -66,74 +74,74 @@ end for h in (:line, :const, :meta) c.code[i+=1] = Expr(:call, GlobalRef(@__MODULE__,:f), Expr(h)) end - errors = Core.Compiler.validate_code(c) + errors = Compiler.validate_code(c) @test length(errors) == 6 - @test count(e.kind === Core.Compiler.INVALID_CALL_ARG for e in errors) == 6 + @test count(e.kind === Compiler.INVALID_CALL_ARG for e in errors) == 6 end @testset "EMPTY_SLOTNAMES" begin c = copy(c0) empty!(c.slotnames) - errors = Core.Compiler.validate_code(c) + errors = Compiler.validate_code(c) @test length(errors) == 2 - @test any(e.kind === Core.Compiler.EMPTY_SLOTNAMES for e in errors) - @test any(e.kind === Core.Compiler.SLOTFLAGS_MISMATCH for e in errors) + @test any(e.kind === Compiler.EMPTY_SLOTNAMES for e in errors) + @test any(e.kind === Compiler.SLOTFLAGS_MISMATCH for e in errors) end @testset "SLOTFLAGS_MISMATCH" begin c = copy(c0) push!(c.slotflags, 0x00) - errors = Core.Compiler.validate_code(c) + errors = Compiler.validate_code(c) @test length(errors) == 1 - @test errors[1].kind === Core.Compiler.SLOTFLAGS_MISMATCH + @test errors[1].kind === Compiler.SLOTFLAGS_MISMATCH end @testset "SSAVALUETYPES_MISMATCH" begin c = code_typed(f22938, (Int,Int,Int,Int))[1][1] empty!(c.ssavaluetypes) - errors = Core.Compiler.validate_code(c) + errors = Compiler.validate_code(c) @test length(errors) == 1 - @test errors[1].kind === Core.Compiler.SSAVALUETYPES_MISMATCH + @test errors[1].kind === Compiler.SSAVALUETYPES_MISMATCH end @testset "SSAVALUETYPES_MISMATCH_UNINFERRED" begin c = copy(c0) c.ssavaluetypes -= 1 - errors = Core.Compiler.validate_code(c) + errors = Compiler.validate_code(c) @test length(errors) == 1 - @test errors[1].kind === Core.Compiler.SSAVALUETYPES_MISMATCH_UNINFERRED + @test errors[1].kind === Compiler.SSAVALUETYPES_MISMATCH_UNINFERRED end @testset "SSAFLAGS_MISMATCH" begin c = copy(c0) empty!(c.ssaflags) - errors = Core.Compiler.validate_code(c) + errors = Compiler.validate_code(c) @test length(errors) == 1 - @test errors[1].kind === Core.Compiler.SSAFLAGS_MISMATCH + @test errors[1].kind === Compiler.SSAFLAGS_MISMATCH end @testset "SIGNATURE_NARGS_MISMATCH" begin old_sig = mi.def.sig mi.def.sig = Tuple{1,2} - errors = Core.Compiler.validate_code(mi, nothing) + errors = Compiler.validate_code(mi, nothing) mi.def.sig = old_sig @test length(errors) == 1 - @test errors[1].kind === Core.Compiler.SIGNATURE_NARGS_MISMATCH + @test errors[1].kind === Compiler.SIGNATURE_NARGS_MISMATCH end @testset "NON_TOP_LEVEL_METHOD" begin c = copy(c0) c.code[1] = Expr(:method, :dummy) - errors = Core.Compiler.validate_code(c) + errors = Compiler.validate_code(c) @test length(errors) == 1 - @test errors[1].kind === Core.Compiler.NON_TOP_LEVEL_METHOD + @test errors[1].kind === Compiler.NON_TOP_LEVEL_METHOD end @testset "SLOTNAMES_NARGS_MISMATCH" begin mi.def.nargs += 20 - errors = Core.Compiler.validate_code(mi, c0) + errors = Compiler.validate_code(mi, c0) mi.def.nargs -= 20 @test length(errors) == 2 - @test count(e.kind === Core.Compiler.SLOTNAMES_NARGS_MISMATCH for e in errors) == 1 - @test count(e.kind === Core.Compiler.SIGNATURE_NARGS_MISMATCH for e in errors) == 1 + @test count(e.kind === Compiler.SLOTNAMES_NARGS_MISMATCH for e in errors) == 1 + @test count(e.kind === Compiler.SIGNATURE_NARGS_MISMATCH for e in errors) == 1 end diff --git a/test/precompile_absint1.jl b/test/precompile_absint1.jl index 4202bf72b793f..98078ebf41098 100644 --- a/test/precompile_absint1.jl +++ b/test/precompile_absint1.jl @@ -1,6 +1,7 @@ # This file is a part of Julia. License is MIT: https://julialang.org/license using Test +import Base.Compiler: Compiler include("precompile_utils.jl") @@ -15,6 +16,7 @@ precompile_test_harness() do load_path import SimpleModule: basic_caller, basic_callee module Custom + import Base.Compiler: Compiler include($newinterp_path) @newinterp PrecompileInterpreter end @@ -36,7 +38,7 @@ precompile_test_harness() do load_path @eval let using TestAbsIntPrecompile1 - cache_owner = Core.Compiler.cache_owner( + cache_owner = Compiler.cache_owner( TestAbsIntPrecompile1.Custom.PrecompileInterpreter()) let m = only(methods(TestAbsIntPrecompile1.basic_callee)) mi = only(Base.specializations(m)) diff --git a/test/precompile_absint2.jl b/test/precompile_absint2.jl index 19317bf7b0683..4aa84e0992f7c 100644 --- a/test/precompile_absint2.jl +++ b/test/precompile_absint2.jl @@ -15,30 +15,30 @@ precompile_test_harness() do load_path import SimpleModule: basic_caller, basic_callee module Custom - const CC = Core.Compiler + import Base.Compiler: Compiler include($newinterp_path) @newinterp PrecompileInterpreter struct CustomData inferred CustomData(@nospecialize inferred) = new(inferred) end - function CC.transform_result_for_cache(interp::PrecompileInterpreter, result::CC.InferenceResult) - inferred_result = @invoke CC.transform_result_for_cache( - interp::CC.AbstractInterpreter, result::CC.InferenceResult) + function Compiler.transform_result_for_cache(interp::PrecompileInterpreter, result::Compiler.InferenceResult) + inferred_result = @invoke Compiler.transform_result_for_cache( + interp::Compiler.AbstractInterpreter, result::Compiler.InferenceResult) return CustomData(inferred_result) end - function CC.src_inlining_policy(interp::PrecompileInterpreter, @nospecialize(src), - @nospecialize(info::CC.CallInfo), stmt_flag::UInt32) + function Compiler.src_inlining_policy(interp::PrecompileInterpreter, @nospecialize(src), + @nospecialize(info::Compiler.CallInfo), stmt_flag::UInt32) if src isa CustomData src = src.inferred end - return @invoke CC.src_inlining_policy(interp::CC.AbstractInterpreter, src::Any, - info::CC.CallInfo, stmt_flag::UInt32) + return @invoke Compiler.src_inlining_policy(interp::Compiler.AbstractInterpreter, src::Any, + info::Compiler.CallInfo, stmt_flag::UInt32) end - CC.retrieve_ir_for_inlining(cached_result::Core.CodeInstance, src::CustomData) = - CC.retrieve_ir_for_inlining(cached_result, src.inferred) - CC.retrieve_ir_for_inlining(mi::Core.MethodInstance, src::CustomData, preserve_local_sources::Bool) = - CC.retrieve_ir_for_inlining(mi, src.inferred, preserve_local_sources) + Compiler.retrieve_ir_for_inlining(cached_result::Core.CodeInstance, src::CustomData) = + Compiler.retrieve_ir_for_inlining(cached_result, src.inferred) + Compiler.retrieve_ir_for_inlining(mi::Core.MethodInstance, src::CustomData, preserve_local_sources::Bool) = + Compiler.retrieve_ir_for_inlining(mi, src.inferred, preserve_local_sources) end Base.return_types((Float64,)) do x From 858cb629e9ed476a4f76662676366400b95e5ced Mon Sep 17 00:00:00 2001 From: Ian Butterworth Date: Thu, 21 Nov 2024 22:06:37 -0500 Subject: [PATCH 485/537] Profile: Fix short names (#56627) --- stdlib/Profile/Project.toml | 3 ++- stdlib/Profile/src/Profile.jl | 18 ++++++++++-------- stdlib/Profile/test/runtests.jl | 18 ++++++++++++++++++ 3 files changed, 30 insertions(+), 9 deletions(-) diff --git a/stdlib/Profile/Project.toml b/stdlib/Profile/Project.toml index 13cd11f70d9b4..6b70f9c7cd19d 100644 --- a/stdlib/Profile/Project.toml +++ b/stdlib/Profile/Project.toml @@ -10,9 +10,10 @@ StyledStrings = "1.11.0" [extras] Base64 = "2a0f44e3-6c83-55bd-87e4-b1978d98bd5f" +InteractiveUtils = "b77e0a4c-d291-57a0-90e8-8db25a27a240" Logging = "56ddb016-857b-54e1-b83d-db4d58db5568" Serialization = "9e88b42a-f829-5b0c-bbe9-9e923198166b" Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" [targets] -test = ["Base64", "Logging", "Serialization", "Test"] +test = ["Base64", "InteractiveUtils", "Logging", "Serialization", "Test"] diff --git a/stdlib/Profile/src/Profile.jl b/stdlib/Profile/src/Profile.jl index 409696c8c9354..f59b49d8a4a36 100644 --- a/stdlib/Profile/src/Profile.jl +++ b/stdlib/Profile/src/Profile.jl @@ -43,6 +43,8 @@ using StyledStrings: @styled_str const nmeta = 4 # number of metadata fields per block (threadid, taskid, cpu_cycle_clock, thread_sleeping) +const slash = Sys.iswindows() ? "\\" : "/" + # deprecated functions: use `getdict` instead lookup(ip::UInt) = lookup(convert(Ptr{Cvoid}, ip)) @@ -537,7 +539,7 @@ function flatten(data::Vector, lidict::LineInfoDict) end const SRC_DIR = normpath(joinpath(Sys.BUILD_ROOT_PATH, "src")) -const COMPILER_DIR = "././../usr/share/julia/Compiler/" +const COMPILER_DIR = "../usr/share/julia/Compiler/" # Take a file-system path and try to form a concise representation of it # based on the package ecosystem @@ -554,8 +556,8 @@ function short_path(spath::Symbol, filenamecache::Dict{Symbol, Tuple{String,Stri elseif startswith(path_norm, lib_dir) remainder = only(split(path_norm, lib_dir, keepempty=false)) return (isfile(path_norm) ? path_norm : ""), "@julialib", remainder - elseif startswith(path, COMPILER_DIR) - remainder = only(split(path, COMPILER_DIR, keepempty=false)) + elseif contains(path, COMPILER_DIR) + remainder = split(path, COMPILER_DIR, keepempty=false)[end] possible_compiler_path = normpath(joinpath(Sys.BINDIR, Base.DATAROOTDIR, "julia", "Compiler", remainder)) return (isfile(possible_compiler_path) ? possible_compiler_path : ""), "@Compiler", remainder elseif isabspath(path) @@ -572,7 +574,7 @@ function short_path(spath::Symbol, filenamecache::Dict{Symbol, Tuple{String,Stri project_file = joinpath(root, proj) if Base.isfile_casesensitive(project_file) pkgid = Base.project_file_name_uuid(project_file, "") - isempty(pkgid.name) && return path # bad Project file + isempty(pkgid.name) && return path, "", path # bad Project file # return the joined the module name prefix and path suffix _short_path = path[nextind(path, sizeof(root)):end] return path, string("@", pkgid.name), _short_path @@ -944,8 +946,8 @@ function print_flat(io::IO, lilist::Vector{StackFrame}, Base.printstyled(io, pkgname, color=pkgcolor) file_trunc = ltruncate(file, max(1, wfile)) wpad = wfile - textwidth(pkgname) - if !isempty(pkgname) && !startswith(file_trunc, "/") - Base.print(io, "/") + if !isempty(pkgname) && !startswith(file_trunc, slash) + Base.print(io, slash) wpad -= 1 end if isempty(path) @@ -1048,8 +1050,8 @@ function tree_format(frames::Vector{<:StackFrameTree}, level::Int, cols::Int, ma pkgcolor = get!(() -> popfirst!(Base.STACKTRACE_MODULECOLORS), PACKAGE_FIXEDCOLORS, pkgname) remaining_path = ltruncate(filename, max(1, widthfile - textwidth(pkgname) - 1)) linenum = li.line == -1 ? "?" : string(li.line) - slash = (!isempty(pkgname) && !startswith(remaining_path, "/")) ? "/" : "" - styled_path = styled"{$pkgcolor:$pkgname}$slash$remaining_path:$linenum" + _slash = (!isempty(pkgname) && !startswith(remaining_path, slash)) ? slash : "" + styled_path = styled"{$pkgcolor:$pkgname}$(_slash)$remaining_path:$linenum" rich_file = if isempty(path) styled_path else diff --git a/stdlib/Profile/test/runtests.jl b/stdlib/Profile/test/runtests.jl index c1cb86d84975a..b73a2a618011b 100644 --- a/stdlib/Profile/test/runtests.jl +++ b/stdlib/Profile/test/runtests.jl @@ -204,6 +204,24 @@ end @test getline(values(fdictc)) == getline(values(fdict0)) + 2 end +import InteractiveUtils + +@testset "Module short names" begin + Profile.clear() + @profile InteractiveUtils.peakflops() + io = IOBuffer() + ioc = IOContext(io, :displaysize=>(1000,1000)) + Profile.print(ioc, C=true) + str = String(take!(io)) + slash = Sys.iswindows() ? "\\" : "/" + @test occursin("@Compiler" * slash, str) + @test occursin("@Base" * slash, str) + @test occursin("@InteractiveUtils" * slash, str) + @test occursin("@LinearAlgebra" * slash, str) + @test occursin("@juliasrc" * slash, str) + @test occursin("@julialib" * slash, str) +end + # Profile deadlocking in compilation (debuginfo registration) let cmd = Base.julia_cmd() script = """ From e624440e6ca7b4f19db3f02cb6506405be68b800 Mon Sep 17 00:00:00 2001 From: Shuhei Kadowaki <40514306+aviatesk@users.noreply.github.com> Date: Fri, 22 Nov 2024 13:45:28 +0900 Subject: [PATCH 486/537] inference: add missing tfuncs to the `xxxglobal` builtins (#56641) These builtins are now special-cased within `abstract_call_known` after JuliaLang/julia#56299, making them unnecessary for basic inference. As a result, their tfuncs have been removed in the PR. However the algorithm for calculating inlining costs still looks up these tfuncs, so they need to be recovered. Additionally, the `generate_builtins.jl` script in JuliaInterpreter also uses these tfuncs, so it would be worthwhile to register even simple placeholder tfuncs for now. @nanosoldier `runbenchmarks("inference", vs=":master")` --- Compiler/src/tfuncs.jl | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/Compiler/src/tfuncs.jl b/Compiler/src/tfuncs.jl index 87dad13c50a30..60a3332030069 100644 --- a/Compiler/src/tfuncs.jl +++ b/Compiler/src/tfuncs.jl @@ -2469,8 +2469,6 @@ function getfield_effects(𝕃::AbstractLattice, argtypes::Vector{Any}, @nospeci return Effects(EFFECTS_TOTAL; consistent, nothrow, inaccessiblememonly, noub) end - - """ builtin_effects(𝕃::AbstractLattice, f::Builtin, argtypes::Vector{Any}, rt) -> Effects @@ -3065,6 +3063,14 @@ end return M ⊑ Module && s ⊑ Symbol end +add_tfunc(getglobal, 2, 3, @nospecs((𝕃::AbstractLattice, args...)->Any), 1) +add_tfunc(setglobal!, 3, 4, @nospecs((𝕃::AbstractLattice, args...)->Any), 3) +add_tfunc(swapglobal!, 3, 4, @nospecs((𝕃::AbstractLattice, args...)->Any), 3) +add_tfunc(modifyglobal!, 4, 5, @nospecs((𝕃::AbstractLattice, args...)->Any), 3) +add_tfunc(replaceglobal!, 4, 6, @nospecs((𝕃::AbstractLattice, args...)->Any), 3) +add_tfunc(setglobalonce!, 3, 5, @nospecs((𝕃::AbstractLattice, args...)->Bool), 3) +add_tfunc(Core.get_binding_type, 2, 2, @nospecs((𝕃::AbstractLattice, args...)->Type), 0) + # foreigncall # =========== From 1bf2ef9c5e146d60ae7929561b09ea55b7f4f08f Mon Sep 17 00:00:00 2001 From: Shuhei Kadowaki <40514306+aviatesk@users.noreply.github.com> Date: Fri, 22 Nov 2024 16:05:55 +0900 Subject: [PATCH 487/537] allow `apply_type_tfunc` to handle argtypes with `Union` (#56617) This is an alternative to JuliaLang/julia#56532 and can resolve #31909. Currently `apply_type_tfunc` is unable to handle `Union`-argtypes with any precision. With this change, `apply_type_tfunc` now performs union-splitting on `Union`-argtypes and returns the merged result of the splits. While this can improve inference precision, we might need to be cautious about potential inference time bloat. --------- Co-authored-by: Jameson Nash --- Compiler/src/tfuncs.jl | 64 +++++++++++++++++++++++++------------- Compiler/test/inference.jl | 43 +++++++++++++++++++------ 2 files changed, 76 insertions(+), 31 deletions(-) diff --git a/Compiler/src/tfuncs.jl b/Compiler/src/tfuncs.jl index 60a3332030069..af8863c1b3a3a 100644 --- a/Compiler/src/tfuncs.jl +++ b/Compiler/src/tfuncs.jl @@ -1350,14 +1350,14 @@ end T = _fieldtype_tfunc(𝕃, o′, f, isconcretetype(o′)) T === Bottom && return Bottom PT = Const(Pair) - return instanceof_tfunc(apply_type_tfunc(𝕃, PT, T, T), true)[1] + return instanceof_tfunc(apply_type_tfunc(𝕃, Any[PT, T, T]), true)[1] end @nospecs function replacefield!_tfunc(𝕃::AbstractLattice, o, f, x, v, success_order=Symbol, failure_order=Symbol) o′ = widenconst(o) T = _fieldtype_tfunc(𝕃, o′, f, isconcretetype(o′)) T === Bottom && return Bottom PT = Const(ccall(:jl_apply_cmpswap_type, Any, (Any,), T) where T) - return instanceof_tfunc(apply_type_tfunc(𝕃, PT, T), true)[1] + return instanceof_tfunc(apply_type_tfunc(𝕃, Any[PT, T]), true)[1] end @nospecs function setfieldonce!_tfunc(𝕃::AbstractLattice, o, f, v, success_order=Symbol, failure_order=Symbol) setfield!_tfunc(𝕃, o, f, v) === Bottom && return Bottom @@ -1713,8 +1713,12 @@ end const _tvarnames = Symbol[:_A, :_B, :_C, :_D, :_E, :_F, :_G, :_H, :_I, :_J, :_K, :_L, :_M, :_N, :_O, :_P, :_Q, :_R, :_S, :_T, :_U, :_V, :_W, :_X, :_Y, :_Z] -# TODO: handle e.g. apply_type(T, R::Union{Type{Int32},Type{Float64}}) -@nospecs function apply_type_tfunc(𝕃::AbstractLattice, headtypetype, args...) +function apply_type_tfunc(𝕃::AbstractLattice, argtypes::Vector{Any}; + max_union_splitting::Int=InferenceParams().max_union_splitting) + if isempty(argtypes) + return Bottom + end + headtypetype = argtypes[1] headtypetype = widenslotwrapper(headtypetype) if isa(headtypetype, Const) headtype = headtypetype.val @@ -1723,15 +1727,15 @@ const _tvarnames = Symbol[:_A, :_B, :_C, :_D, :_E, :_F, :_G, :_H, :_I, :_J, :_K, else return Any end - if !isempty(args) && isvarargtype(args[end]) + largs = length(argtypes) + if largs > 1 && isvarargtype(argtypes[end]) return isvarargtype(headtype) ? TypeofVararg : Type end - largs = length(args) if headtype === Union - largs == 0 && return Const(Bottom) + largs == 1 && return Const(Bottom) hasnonType = false - for i = 1:largs - ai = args[i] + for i = 2:largs + ai = argtypes[i] if isa(ai, Const) if !isa(ai.val, Type) if isa(ai.val, TypeVar) @@ -1750,14 +1754,14 @@ const _tvarnames = Symbol[:_A, :_B, :_C, :_D, :_E, :_F, :_G, :_H, :_I, :_J, :_K, end end end - if largs == 1 # Union{T} --> T - return tmeet(widenconst(args[1]), Union{Type,TypeVar}) + if largs == 2 # Union{T} --> T + return tmeet(widenconst(argtypes[2]), Union{Type,TypeVar}) end hasnonType && return Type ty = Union{} allconst = true - for i = 1:largs - ai = args[i] + for i = 2:largs + ai = argtypes[i] if isType(ai) aty = ai.parameters[1] allconst &= hasuniquerep(aty) @@ -1768,6 +1772,18 @@ const _tvarnames = Symbol[:_A, :_B, :_C, :_D, :_E, :_F, :_G, :_H, :_I, :_J, :_K, end return allconst ? Const(ty) : Type{ty} end + if 1 < unionsplitcost(𝕃, argtypes) ≤ max_union_splitting + rt = Bottom + for split_argtypes = switchtupleunion(𝕃, argtypes) + this_rt = widenconst(_apply_type_tfunc(𝕃, headtype, split_argtypes)) + rt = Union{rt, this_rt} + end + return rt + end + return _apply_type_tfunc(𝕃, headtype, argtypes) +end +@nospecs function _apply_type_tfunc(𝕃::AbstractLattice, headtype, argtypes::Vector{Any}) + largs = length(argtypes) istuple = headtype === Tuple if !istuple && !isa(headtype, UnionAll) && !isvarargtype(headtype) return Union{} @@ -1781,20 +1797,20 @@ const _tvarnames = Symbol[:_A, :_B, :_C, :_D, :_E, :_F, :_G, :_H, :_I, :_J, :_K, # first push the tailing vars from headtype into outervars outer_start, ua = 0, headtype while isa(ua, UnionAll) - if (outer_start += 1) > largs + if (outer_start += 1) > largs - 1 push!(outervars, ua.var) end ua = ua.body end - if largs > outer_start && isa(headtype, UnionAll) # e.g. !isvarargtype(ua) && !istuple + if largs - 1 > outer_start && isa(headtype, UnionAll) # e.g. !isvarargtype(ua) && !istuple return Bottom # too many arguments end - outer_start = outer_start - largs + 1 + outer_start = outer_start - largs + 2 varnamectr = 1 ua = headtype - for i = 1:largs - ai = widenslotwrapper(args[i]) + for i = 2:largs + ai = widenslotwrapper(argtypes[i]) if isType(ai) aip1 = ai.parameters[1] canconst &= !has_free_typevars(aip1) @@ -1868,7 +1884,7 @@ const _tvarnames = Symbol[:_A, :_B, :_C, :_D, :_E, :_F, :_G, :_H, :_I, :_J, :_K, # If the names are known, keep the upper bound, but otherwise widen to Tuple. # This is a widening heuristic to avoid keeping type information # that's unlikely to be useful. - if !(uw.parameters[1] isa Tuple || (i == 2 && tparams[1] isa Tuple)) + if !(uw.parameters[1] isa Tuple || (i == 3 && tparams[1] isa Tuple)) ub = Any end else @@ -1910,7 +1926,7 @@ const _tvarnames = Symbol[:_A, :_B, :_C, :_D, :_E, :_F, :_G, :_H, :_I, :_J, :_K, # throwing errors. appl = headtype if isa(appl, UnionAll) - for _ = 1:largs + for _ = 2:largs appl = appl::UnionAll push!(outervars, appl.var) appl = appl.body @@ -1930,6 +1946,8 @@ const _tvarnames = Symbol[:_A, :_B, :_C, :_D, :_E, :_F, :_G, :_H, :_I, :_J, :_K, end return ans end +@nospecs apply_type_tfunc(𝕃::AbstractLattice, headtypetype, args...) = + apply_type_tfunc(𝕃, Any[i == 0 ? headtypetype : args[i] for i in 0:length(args)]) add_tfunc(apply_type, 1, INT_INF, apply_type_tfunc, 10) # convert the dispatch tuple type argtype to the real (concrete) type of @@ -2016,7 +2034,7 @@ end T = _memoryref_elemtype(mem) T === Bottom && return Bottom PT = Const(Pair) - return instanceof_tfunc(apply_type_tfunc(𝕃, PT, T, T), true)[1] + return instanceof_tfunc(apply_type_tfunc(𝕃, Any[PT, T, T]), true)[1] end @nospecs function memoryrefreplace!_tfunc(𝕃::AbstractLattice, mem, x, v, success_order, failure_order, boundscheck) memoryrefset!_tfunc(𝕃, mem, v, success_order, boundscheck) === Bottom && return Bottom @@ -2024,7 +2042,7 @@ end T = _memoryref_elemtype(mem) T === Bottom && return Bottom PT = Const(ccall(:jl_apply_cmpswap_type, Any, (Any,), T) where T) - return instanceof_tfunc(apply_type_tfunc(𝕃, PT, T), true)[1] + return instanceof_tfunc(apply_type_tfunc(𝕃, Any[PT, T]), true)[1] end @nospecs function memoryrefsetonce!_tfunc(𝕃::AbstractLattice, mem, v, success_order, failure_order, boundscheck) memoryrefset!_tfunc(𝕃, mem, v, success_order, boundscheck) === Bottom && return Bottom @@ -2666,6 +2684,8 @@ function builtin_tfunction(interp::AbstractInterpreter, @nospecialize(f), argtyp end end return current_scope_tfunc(interp, sv) + elseif f === Core.apply_type + return apply_type_tfunc(𝕃ᵢ, argtypes; max_union_splitting=InferenceParams(interp).max_union_splitting) end fidx = find_tfunc(f) if fidx === nothing diff --git a/Compiler/test/inference.jl b/Compiler/test/inference.jl index 26fc80470795f..c896c0c390285 100644 --- a/Compiler/test/inference.jl +++ b/Compiler/test/inference.jl @@ -3,7 +3,7 @@ include("irutils.jl") # tests for Compiler correctness and precision -import .Compiler: Const, Conditional, ⊑, ReturnNode, GotoIfNot +using .Compiler: Conditional, ⊑ isdispatchelem(@nospecialize x) = !isa(x, Type) || Compiler.isdispatchelem(x) using Random, Core.IR @@ -1721,7 +1721,7 @@ g_test_constant() = (f_constant(3) == 3 && f_constant(4) == 4 ? true : "BAD") f_pure_add() = (1 + 1 == 2) ? true : "FAIL" @test @inferred f_pure_add() -import Core: Const +using Core: Const mutable struct ARef{T} @atomic x::T end @@ -1762,7 +1762,7 @@ let getfield_tfunc(@nospecialize xs...) = @test getfield_tfunc(ARef{Int},Const(:x),Bool,Bool) === Union{} end -import .Compiler: Const +using Core: Const mutable struct XY{X,Y} x::X y::Y @@ -2765,10 +2765,10 @@ end |> only === Int # `apply_type_tfunc` accuracy for constrained type construction # https://github.com/JuliaLang/julia/issues/47089 -import Core: Const -import .Compiler: apply_type_tfunc struct Issue47089{A<:Number,B<:Number} end -let 𝕃 = Compiler.fallback_lattice +let apply_type_tfunc = Compiler.apply_type_tfunc + 𝕃 = Compiler.fallback_lattice + Const = Core.Const A = Type{<:Integer} @test apply_type_tfunc(𝕃, Const(Issue47089), A, A) <: (Type{Issue47089{A,B}} where {A<:Integer, B<:Integer}) @test apply_type_tfunc(𝕃, Const(Issue47089), Const(Int), Const(Int), Const(Int)) === Union{} @@ -4554,7 +4554,8 @@ end |> only == Tuple{Int,Int} end |> only == Int # form PartialStruct for mutables with `const` field -import .Compiler: Const, ⊑ +using Core: Const +using .Compiler: ⊑ mutable struct PartialMutable{S,T} const s::S t::T @@ -5700,7 +5701,8 @@ let x = 1, _Any = Any end # Issue #51927 -let 𝕃 = Compiler.fallback_lattice +let apply_type_tfunc = Compiler.apply_type_tfunc + 𝕃 = Compiler.fallback_lattice @test apply_type_tfunc(𝕃, Const(Tuple{Vararg{Any,N}} where N), Int) == Type{NTuple{_A, Any}} where _A end @@ -6074,6 +6076,29 @@ function issue56387(nt::NamedTuple, field::Symbol=:a) end @test Base.infer_return_type(issue56387, (typeof((;a=1)),)) == Type{Int} +# `apply_type_tfunc` with `Union` in its arguments +let apply_type_tfunc = Compiler.apply_type_tfunc + 𝕃 = Compiler.fallback_lattice + Const = Core.Const + @test apply_type_tfunc(𝕃, Any[Const(Vector), Union{Type{Int},Type{Nothing}}]) == Union{Type{Vector{Int}},Type{Vector{Nothing}}} +end + +@test Base.infer_return_type((Bool,Int,)) do b, y + x = b ? 1 : missing + inner = y -> x + y + return inner(y) +end == Union{Int,Missing} + +function issue31909(ys) + x = if @noinline rand(Bool) + 1 + else + missing + end + map(y -> x + y, ys) +end +@test Base.infer_return_type(issue31909, (Vector{Int},)) == Union{Vector{Int},Vector{Missing}} + global setglobal!_refine::Int @test Base.infer_return_type((Integer,)) do x setglobal!(@__MODULE__, :setglobal!_refine, x) @@ -6098,4 +6123,4 @@ function func_swapglobal!_must_throw(x) swapglobal!(@__MODULE__, :swapglobal!_must_throw, x) end @test Base.infer_return_type(func_swapglobal!_must_throw, (Int,); interp=SwapGlobalInterp()) === Union{} -@test !Base.Compiler.is_effect_free(Base.infer_effects(func_swapglobal!_must_throw, (Int,); interp=SwapGlobalInterp()) ) +@test !Compiler.is_effect_free(Base.infer_effects(func_swapglobal!_must_throw, (Int,); interp=SwapGlobalInterp()) ) From 215189e5c28b671f0ed7347f540ea5fe9dd61c2e Mon Sep 17 00:00:00 2001 From: Shuhei Kadowaki <40514306+aviatesk@users.noreply.github.com> Date: Fri, 22 Nov 2024 17:41:00 +0900 Subject: [PATCH 488/537] complete the tests for Compiler.jl as the stdlib. (#56648) With JuliaLang/julia#56632, Compiler.jl as the stdlib can now be tested. However, the PR was incomplete, and when tests are actually run on `Compiler`, which is `!== Base.Compiler`, various errors occur, including issues caused by JuliaLang/julia#56647. This commit resolves all these issues: - manage the code for loading `Compiler` in `setup_Compiler.jl`, ensuring that the stdlib version of `Compiler` is loaded when `@activate Compiler` is used beforehand - replace `Base.IRShow` with `Compiler.IRShow` - test `Base.Compiler.return_type` instead of `Compiler.return_type` This was split off from JuliaLang/julia#56636. --- Compiler/src/ssair/verify.jl | 2 +- Compiler/test/AbstractInterpreter.jl | 8 ------ Compiler/test/EAUtils.jl | 18 +++++-------- Compiler/test/codegen.jl | 9 ++----- Compiler/test/compact.jl | 12 ++++----- Compiler/test/contextual.jl | 9 +------ Compiler/test/datastructures.jl | 10 ++------ Compiler/test/effects.jl | 2 ++ Compiler/test/inference.jl | 38 +++++++++++++++------------- Compiler/test/inline.jl | 4 +-- Compiler/test/interpreter_exec.jl | 10 ++------ Compiler/test/invalidation.jl | 6 ++--- Compiler/test/irutils.jl | 10 ++------ Compiler/test/runtests.jl | 9 ++++--- Compiler/test/setup_Compiler.jl | 9 +++++++ Compiler/test/ssair.jl | 12 +++------ Compiler/test/tarjan.jl | 2 ++ Compiler/test/validation.jl | 8 +----- base/Base_compiler.jl | 1 - 19 files changed, 69 insertions(+), 110 deletions(-) create mode 100644 Compiler/test/setup_Compiler.jl diff --git a/Compiler/src/ssair/verify.jl b/Compiler/src/ssair/verify.jl index 14ca6ef2dbe9a..59051058e1750 100644 --- a/Compiler/src/ssair/verify.jl +++ b/Compiler/src/ssair/verify.jl @@ -104,7 +104,7 @@ function verify_ir(ir::IRCode, print::Bool=true, error_args = Any["IR verification failed."] if isdefined(Core, :Main) && isdefined(Core.Main, :Base) # ensure we use I/O that does not yield, as this gets called during compilation - firstline = invokelatest(Core.Main.Base.IRShow.debuginfo_firstline, ir.debuginfo) + firstline = invokelatest(IRShow.debuginfo_firstline, ir.debuginfo) else firstline = nothing end diff --git a/Compiler/test/AbstractInterpreter.jl b/Compiler/test/AbstractInterpreter.jl index 81659443038e4..533eaf93937a3 100644 --- a/Compiler/test/AbstractInterpreter.jl +++ b/Compiler/test/AbstractInterpreter.jl @@ -2,14 +2,6 @@ using Test -if !@isdefined(Compiler) - if Base.identify_package("Compiler") === nothing - import Base.Compiler: Compiler - else - import Compiler - end -end - include("irutils.jl") include("newinterp.jl") diff --git a/Compiler/test/EAUtils.jl b/Compiler/test/EAUtils.jl index cec33ca265a80..5a5c42fc89106 100644 --- a/Compiler/test/EAUtils.jl +++ b/Compiler/test/EAUtils.jl @@ -2,13 +2,7 @@ module EAUtils export code_escapes, @code_escapes, __clear_cache! -if !@isdefined(Compiler) - if Base.identify_package("Compiler") === nothing - import Base.Compiler: Compiler - else - import Compiler - end -end +include("setup_Compiler.jl") using ..EscapeAnalysis const EA = EscapeAnalysis @@ -267,22 +261,22 @@ end function print_with_info(preprint, postprint, io::IO, ir::IRCode, source::Bool) io = IOContext(io, :displaysize=>displaysize(io)) - used = Base.IRShow.stmts_used(io, ir) + used = Compiler.IRShow.stmts_used(io, ir) if source line_info_preprinter = function (io::IO, indent::String, idx::Int) - r = Base.IRShow.inline_linfo_printer(ir)(io, indent, idx) + r = Compiler.IRShow.inline_linfo_printer(ir)(io, indent, idx) idx ≠ 0 && preprint(io, idx) return r end else - line_info_preprinter = Base.IRShow.lineinfo_disabled + line_info_preprinter = Compiler.IRShow.lineinfo_disabled end - line_info_postprinter = Base.IRShow.default_expr_type_printer + line_info_postprinter = Compiler.IRShow.default_expr_type_printer preprint(io) bb_idx_prev = bb_idx = 1 for idx = 1:length(ir.stmts) preprint(io, idx) - bb_idx = Base.IRShow.show_ir_stmt(io, ir, idx, line_info_preprinter, line_info_postprinter, ir.sptypes, used, ir.cfg, bb_idx) + bb_idx = Compiler.IRShow.show_ir_stmt(io, ir, idx, line_info_preprinter, line_info_postprinter, ir.sptypes, used, ir.cfg, bb_idx) postprint(io, idx, bb_idx != bb_idx_prev) bb_idx_prev = bb_idx end diff --git a/Compiler/test/codegen.jl b/Compiler/test/codegen.jl index 90ec16ca3b7ac..b6805a77124ca 100644 --- a/Compiler/test/codegen.jl +++ b/Compiler/test/codegen.jl @@ -5,14 +5,9 @@ using Random using InteractiveUtils using Libdl +using Test -if !@isdefined(Compiler) - if Base.identify_package("Compiler") === nothing - import Base.Compiler: Compiler - else - import Compiler - end -end +include("setup_Compiler.jl") const opt_level = Base.JLOptions().opt_level const coverage = (Base.JLOptions().code_coverage > 0) || (Base.JLOptions().malloc_log > 0) diff --git a/Compiler/test/compact.jl b/Compiler/test/compact.jl index a636ab8172d63..b01e209d5ce9b 100644 --- a/Compiler/test/compact.jl +++ b/Compiler/test/compact.jl @@ -1,10 +1,8 @@ -if !@isdefined(Compiler) - if Base.identify_package("Compiler") === nothing - import Base.Compiler: Compiler - else - import Compiler - end -end +# This file is a part of Julia. License is MIT: https://julialang.org/license + +using Test + +include("irutils.jl") using .Compiler: IncrementalCompact, insert_node_here!, finish, NewInstruction, verify_ir, ReturnNode, SSAValue diff --git a/Compiler/test/contextual.jl b/Compiler/test/contextual.jl index 08dc68ba42b34..a9c63ab34c0c0 100644 --- a/Compiler/test/contextual.jl +++ b/Compiler/test/contextual.jl @@ -2,14 +2,7 @@ # N.B.: This file is also run from interpreter.jl, so needs to be standalone-executable using Test - -if !@isdefined(Compiler) - if Base.identify_package("Compiler") === nothing - import Base.Compiler: Compiler - else - import Compiler - end -end +include("setup_Compiler.jl") # Cassette # ======== diff --git a/Compiler/test/datastructures.jl b/Compiler/test/datastructures.jl index 6b37d7c89e684..608e4e770998a 100644 --- a/Compiler/test/datastructures.jl +++ b/Compiler/test/datastructures.jl @@ -1,12 +1,6 @@ -using Test +# This file is a part of Julia. License is MIT: https://julialang.org/license -if !@isdefined(Compiler) - if Base.identify_package("Compiler") === nothing - import Base.Compiler: Compiler - else - import Compiler - end -end +include("setup_Compiler.jl") @testset "CachedMethodTable" begin # cache result should be separated per `limit` and `sig` diff --git a/Compiler/test/effects.jl b/Compiler/test/effects.jl index e4677daf0c483..a7a1d18159137 100644 --- a/Compiler/test/effects.jl +++ b/Compiler/test/effects.jl @@ -1,3 +1,5 @@ +# This file is a part of Julia. License is MIT: https://julialang.org/license + using Test include("irutils.jl") diff --git a/Compiler/test/inference.jl b/Compiler/test/inference.jl index c896c0c390285..b3099897faf51 100644 --- a/Compiler/test/inference.jl +++ b/Compiler/test/inference.jl @@ -1,5 +1,7 @@ # This file is a part of Julia. License is MIT: https://julialang.org/license +using Test + include("irutils.jl") # tests for Compiler correctness and precision @@ -823,7 +825,7 @@ end # Issue 19641 foo19641() = let a = 1.0 - Compiler.return_type(x -> x + a, Tuple{Float64}) + Base._return_type(x -> x + a, Tuple{Float64}) end @inferred foo19641() @@ -977,15 +979,15 @@ test_no_apply(::Any) = true # issue #20033 # check return_type_tfunc for calls where no method matches -bcast_eltype_20033(f, A) = Compiler.return_type(f, Tuple{eltype(A)}) +bcast_eltype_20033(f, A) = Base._return_type(f, Tuple{eltype(A)}) err20033(x::Float64...) = prod(x) @test bcast_eltype_20033(err20033, [1]) === Union{} @test Base.return_types(bcast_eltype_20033, (typeof(err20033), Vector{Int},)) == Any[Type{Union{}}] # return_type on builtins -@test Compiler.return_type(tuple, Tuple{Int,Int8,Int}) === Tuple{Int,Int8,Int} +@test Base._return_type(tuple, Tuple{Int,Int8,Int}) === Tuple{Int,Int8,Int} # issue #21088 -@test Compiler.return_type(typeof, Tuple{Int}) == Type{Int} +@test Base._return_type(typeof, Tuple{Int}) == Type{Int} # Inference of constant svecs @eval fsvecinf() = $(QuoteNode(Core.svec(Tuple{Int,Int}, Int)))[1] @@ -1535,7 +1537,7 @@ let nfields_tfunc(@nospecialize xs...) = @test sizeof_nothrow(String) @test !sizeof_nothrow(Type{String}) @test sizeof_tfunc(Type{Union{Int64, Int32}}) == Const(Core.sizeof(Union{Int64, Int32})) - let PT = Core.PartialStruct(Base.Compiler.fallback_lattice, Tuple{Int64,UInt64}, Any[Const(10), UInt64]) + let PT = Core.PartialStruct(Compiler.fallback_lattice, Tuple{Int64,UInt64}, Any[Const(10), UInt64]) @test sizeof_tfunc(PT) === Const(16) @test nfields_tfunc(PT) === Const(2) @test sizeof_nothrow(PT) @@ -2235,7 +2237,7 @@ end end |> only == Int # the `fargs = nothing` edge case @test Base.return_types((Any,)) do a - Compiler.return_type(invoke, Tuple{typeof(ispositive), Type{Tuple{Any}}, Any}) + Base._return_type(invoke, Tuple{typeof(ispositive), Type{Tuple{Any}}, Any}) end |> only == Type{Bool} # `InterConditional` handling: `abstract_call_opaque_closure` @@ -3324,8 +3326,8 @@ _rttf_test(::Int16) = 0 _rttf_test(::Int32) = 0 _rttf_test(::Int64) = 0 _rttf_test(::Int128) = 0 -_call_rttf_test() = Compiler.return_type(_rttf_test, Tuple{Any}) -@test Compiler.return_type(_rttf_test, Tuple{Any}) === Int +_call_rttf_test() = Base._return_type(_rttf_test, Tuple{Any}) +@test Base._return_type(_rttf_test, Tuple{Any}) === Int @test _call_rttf_test() === Int f_with_Type_arg(::Type{T}) where {T} = T @@ -3379,9 +3381,9 @@ struct FooPartial b::Int c::Int end -let PT1 = PartialStruct(Base.Compiler.fallback_lattice, FooPartial, Any[Const(1), Const(2), Int]), - PT2 = PartialStruct(Base.Compiler.fallback_lattice, FooPartial, Any[Const(1), Int, Int]), - PT3 = PartialStruct(Base.Compiler.fallback_lattice, FooPartial, Any[Const(1), Int, Const(3)]) +let PT1 = PartialStruct(Compiler.fallback_lattice, FooPartial, Any[Const(1), Const(2), Int]), + PT2 = PartialStruct(Compiler.fallback_lattice, FooPartial, Any[Const(1), Int, Int]), + PT3 = PartialStruct(Compiler.fallback_lattice, FooPartial, Any[Const(1), Int, Const(3)]) @test PT1 ⊑ PT2 @test !(PT1 ⊑ PT3) && !(PT2 ⊑ PT1) @@ -4788,7 +4790,7 @@ end # at top level. @test let Base.Experimental.@force_compile - Compiler.return_type(+, NTuple{2, Rational}) + Base._return_type(+, NTuple{2, Rational}) end == Rational # vararg-tuple comparison within `Compiler.PartialStruct` @@ -5186,9 +5188,9 @@ end |> only === Tuple{Int,Symbol} end end) == Type{Nothing} -# Test that Compiler.return_type inference works for the 1-arg version +# Test that Base._return_type inference works for the 1-arg version @test Base.return_types() do - Compiler.return_type(Tuple{typeof(+), Int, Int}) + Base._return_type(Tuple{typeof(+), Int, Int}) end |> only == Type{Int} # Test that NamedTuple abstract iteration works for PartialStruct/Const @@ -5725,7 +5727,7 @@ end @eval function has_tuin() $(Expr(:throw_undef_if_not, :x, false)) end -@test Compiler.return_type(has_tuin, Tuple{}) === Union{} +@test Base.infer_return_type(has_tuin, Tuple{}) === Union{} @test_throws UndefVarError has_tuin() function gen_tuin_from_arg(world::UInt, source, _, _) @@ -5780,7 +5782,7 @@ end # We want to make sure that both this returns `Tuple` and that # it doesn't infinite loop inside inference. -@test Compiler.return_type(gen_infinite_loop_ssa, Tuple{}) === Tuple +@test Base.infer_return_type(gen_infinite_loop_ssa, Tuple{}) === Tuple # inference local cache lookup with extended lattice elements that may be transformed # by `matching_cache_argtypes` @@ -5816,7 +5818,7 @@ function foo54341(a, b, c, d, args...) end bar54341(args...) = foo54341(4, args...) -@test Compiler.return_type(bar54341, Tuple{Vararg{Int}}) === Int +@test Base.infer_return_type(bar54341, Tuple{Vararg{Int}}) === Int # `PartialStruct` for partially initialized structs: struct PartiallyInitialized1 @@ -5953,7 +5955,7 @@ end # InterConditional rt with Vararg argtypes fcondvarargs(a, b, c, d) = isa(d, Int64) gcondvarargs(a, x...) = return fcondvarargs(a, x...) ? isa(a, Int64) : !isa(a, Int64) -@test Compiler.return_type(gcondvarargs, Tuple{Vararg{Any}}) === Bool +@test Base.infer_return_type(gcondvarargs, Tuple{Vararg{Any}}) === Bool # JuliaLang/julia#55627: argtypes check in `abstract_call_opaque_closure` issue55627_make_oc() = Base.Experimental.@opaque (x::Int) -> 2x diff --git a/Compiler/test/inline.jl b/Compiler/test/inline.jl index 158d9f545220a..46b78db3b781c 100644 --- a/Compiler/test/inline.jl +++ b/Compiler/test/inline.jl @@ -1857,7 +1857,7 @@ let i::Int, continue_::Bool ir = Compiler.ssa_inlining_pass!(ir, inlining, false) @test findfirst(isinvoke(:func_mul_int), ir.stmts.stmt) === nothing @test (i = findfirst(iscall((ir, Core.Intrinsics.mul_int)), ir.stmts.stmt)) !== nothing - lins = Base.IRShow.buildLineInfoNode(ir.debuginfo, nothing, i) + lins = Compiler.IRShow.buildLineInfoNode(ir.debuginfo, nothing, i) @test (continue_ = length(lins) == 2) # :multi_inlining1 -> :func_mul_int if continue_ def1 = lins[1].method @@ -1881,7 +1881,7 @@ let i::Int, continue_::Bool ir = Compiler.ssa_inlining_pass!(ir, inlining, false) @test findfirst(isinvoke(:func_mul_int), ir.stmts.stmt) === nothing @test (i = findfirst(iscall((ir, Core.Intrinsics.mul_int)), ir.stmts.stmt)) !== nothing - lins = Base.IRShow.buildLineInfoNode(ir.debuginfo, nothing, i) + lins = Compiler.IRShow.buildLineInfoNode(ir.debuginfo, nothing, i) @test_broken (continue_ = length(lins) == 3) # see TODO in `ir_inline_linetable!` if continue_ def1 = lins[1].method diff --git a/Compiler/test/interpreter_exec.jl b/Compiler/test/interpreter_exec.jl index 65f42a0c7b89b..4972df1a27202 100644 --- a/Compiler/test/interpreter_exec.jl +++ b/Compiler/test/interpreter_exec.jl @@ -1,17 +1,11 @@ # This file is a part of Julia. License is MIT: https://julialang.org/license # tests that interpreter matches codegen +include("setup_Compiler.jl") + using Test using Core.IR -if !@isdefined(Compiler) - if Base.identify_package("Compiler") === nothing - import Base.Compiler: Compiler - else - import Compiler - end -end - # test that interpreter correctly handles PhiNodes (#29262) let m = Meta.@lower 1 + 1 @assert Meta.isexpr(m, :thunk) diff --git a/Compiler/test/invalidation.jl b/Compiler/test/invalidation.jl index c986cb298369f..2642c1647a682 100644 --- a/Compiler/test/invalidation.jl +++ b/Compiler/test/invalidation.jl @@ -104,7 +104,7 @@ begin let rt = only(Base.return_types(pr48932_callee, (Any,))) @test rt === Any effects = Base.infer_effects(pr48932_callee, (Any,)) - @test Compiler.Effects(effects) == Compiler.Effects() + @test effects == Compiler.Effects() end # run inference on both `pr48932_caller` and `pr48932_callee` @@ -171,7 +171,7 @@ begin take!(GLOBAL_BUFFER) let rt = only(Base.return_types(pr48932_callee_inferable, (Any,))) @test rt === Int effects = Base.infer_effects(pr48932_callee_inferable, (Any,)) - @test Compiler.Effects(effects) == Compiler.Effects() + @test effects == Compiler.Effects() end # run inference on both `pr48932_caller` and `pr48932_callee`: @@ -233,7 +233,7 @@ begin take!(GLOBAL_BUFFER) let rt = only(Base.return_types(pr48932_callee_inlined, (Any,))) @test rt === Any effects = Base.infer_effects(pr48932_callee_inlined, (Any,)) - @test Compiler.Effects(effects) == Compiler.Effects() + @test effects == Compiler.Effects() end # run inference on `pr48932_caller_inlined` and `pr48932_callee_inlined` diff --git a/Compiler/test/irutils.jl b/Compiler/test/irutils.jl index d1a3a2ea57c35..c1616ad4a8fd0 100644 --- a/Compiler/test/irutils.jl +++ b/Compiler/test/irutils.jl @@ -1,12 +1,6 @@ # This file is a part of Julia. License is MIT: https://julialang.org/license -if !@isdefined(Compiler) - if Base.identify_package("Compiler") === nothing - import Base.Compiler: Compiler - else - import Compiler - end -end +include("setup_Compiler.jl") using Core.IR using .Compiler: IRCode, IncrementalCompact, singleton_type, VarState @@ -68,7 +62,7 @@ macro fully_eliminated(ex0...) end let m = Meta.@lower 1 + 1 - @assert Meta.isexpr(m, :thunk) + @assert isexpr(m, :thunk) orig_src = m.args[1]::CodeInfo global function make_codeinfo(code::Vector{Any}; ssavaluetypes::Union{Nothing,Vector{Any}}=nothing, diff --git a/Compiler/test/runtests.jl b/Compiler/test/runtests.jl index ea3df3aa2855d..6a38fce678ba0 100644 --- a/Compiler/test/runtests.jl +++ b/Compiler/test/runtests.jl @@ -3,7 +3,10 @@ using Test, Compiler using InteractiveUtils: @activate @activate Compiler -for file in readlines(joinpath(@__DIR__, "testgroups")) - file == "special_loading" && continue # Only applicable to Base.Compiler - include(file * ".jl") +@testset "Compiler.jl" begin + for file in readlines(joinpath(@__DIR__, "testgroups")) + file == "special_loading" && continue # Only applicable to Base.Compiler + testfile = file * ".jl" + @eval @testset $testfile include($testfile) + end end diff --git a/Compiler/test/setup_Compiler.jl b/Compiler/test/setup_Compiler.jl new file mode 100644 index 0000000000000..a28a3f918aaf9 --- /dev/null +++ b/Compiler/test/setup_Compiler.jl @@ -0,0 +1,9 @@ +# This file is a part of Julia. License is MIT: https://julialang.org/license + +if !@isdefined(Compiler) + if Base.REFLECTION_COMPILER[] === nothing + using Base.Compiler: Compiler + else + const Compiler = Base.REFLECTION_COMPILER[] + end +end diff --git a/Compiler/test/ssair.jl b/Compiler/test/ssair.jl index d6707e4dec9c2..6100aad673040 100644 --- a/Compiler/test/ssair.jl +++ b/Compiler/test/ssair.jl @@ -2,8 +2,8 @@ include("irutils.jl") -using Base.Meta -using Core.IR +using Test + using .Compiler: CFG, BasicBlock, NewSSAValue make_bb(preds, succs) = BasicBlock(Compiler.StmtRange(0, 0), preds, succs) @@ -393,13 +393,7 @@ f_if_typecheck() = (if nothing; end; unsafe_load(Ptr{Int}(0))) let # https://github.com/JuliaLang/julia/issues/42258 code = """ - if !@isdefined(Compiler) - if Base.identify_package("Compiler") === nothing - import Base.Compiler: Compiler - else - import Compiler - end - end + using Base: Compiler function foo() a = @noinline rand(rand(0:10)) diff --git a/Compiler/test/tarjan.jl b/Compiler/test/tarjan.jl index 49124bdf650fe..aa04bd94a6f6a 100644 --- a/Compiler/test/tarjan.jl +++ b/Compiler/test/tarjan.jl @@ -1,5 +1,7 @@ # This file is a part of Julia. License is MIT: https://julialang.org/license +using Test + include("irutils.jl") using .Compiler: CFGReachability, DomTree, CFG, BasicBlock, StmtRange, dominates, diff --git a/Compiler/test/validation.jl b/Compiler/test/validation.jl index 38dfa9705d542..5328516f63d36 100644 --- a/Compiler/test/validation.jl +++ b/Compiler/test/validation.jl @@ -2,13 +2,7 @@ using Test, Core.IR -if !@isdefined(Compiler) - if Base.identify_package("Compiler") === nothing - import Base.Compiler: Compiler - else - import Compiler - end -end +include("setup_Compiler.jl") function f22938(a, b, x...) nothing diff --git a/base/Base_compiler.jl b/base/Base_compiler.jl index 14edf3e93aad6..6014a6b7c9dd0 100644 --- a/base/Base_compiler.jl +++ b/base/Base_compiler.jl @@ -294,7 +294,6 @@ function isready end include(strcat(DATAROOT, "julia/Compiler/src/Compiler.jl")) - const _return_type = Compiler.return_type # Enable compiler From 522f496994d374afe4e38db31cbb6543cda144ef Mon Sep 17 00:00:00 2001 From: Kristoffer Carlsson Date: Fri, 22 Nov 2024 11:49:57 +0100 Subject: [PATCH 489/537] only import REPL in runtests if we are actually going to use it (#56635) External stdlibs that want to use this might not want to have to load REPL etc. REPL is used in https://github.com/JuliaLang/julia/blob/4709b6c48e79f6226e6dbee1b49bf7e563058ff7/test/runtests.jl#L215. --- test/runtests.jl | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/test/runtests.jl b/test/runtests.jl index 67a15c0a03a1f..fd0326d48ee6c 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -3,7 +3,9 @@ using Test using Distributed using Dates -import REPL +if !Sys.iswindows() && isa(stdin, Base.TTY) + import REPL +end using Printf: @sprintf using Base: Experimental From bdf78c9ece6f46d71ef78801410deb6fe99af642 Mon Sep 17 00:00:00 2001 From: Jameson Nash Date: Fri, 22 Nov 2024 09:42:11 -0500 Subject: [PATCH 490/537] fix jl_mutex_lock deadlock under rr (#56644) --- src/gc-stock.c | 6 +++--- src/julia_internal.h | 8 ++++---- src/safepoint.c | 32 +++++++++++++++----------------- src/signals-unix.c | 2 +- src/signals-win.c | 2 +- src/threading.c | 17 ++++++++++------- 6 files changed, 34 insertions(+), 33 deletions(-) diff --git a/src/gc-stock.c b/src/gc-stock.c index 1a8d85e249c29..61a013f347975 100644 --- a/src/gc-stock.c +++ b/src/gc-stock.c @@ -3333,10 +3333,10 @@ JL_DLLEXPORT void jl_gc_collect(jl_gc_collection_t collection) jl_atomic_store_release(&ptls->gc_state, JL_GC_STATE_WAITING); // `jl_safepoint_start_gc()` makes sure only one thread can run the GC. uint64_t t0 = jl_hrtime(); - if (!jl_safepoint_start_gc()) { + if (!jl_safepoint_start_gc(ct)) { // either another thread is running GC, or the GC got disabled just now. jl_gc_state_set(ptls, old_state, JL_GC_STATE_WAITING); - jl_safepoint_wait_thread_resume(); // block in thread-suspend now if requested, after clearing the gc_state + jl_safepoint_wait_thread_resume(ct); // block in thread-suspend now if requested, after clearing the gc_state return; } @@ -3390,7 +3390,7 @@ JL_DLLEXPORT void jl_gc_collect(jl_gc_collection_t collection) jl_safepoint_end_gc(); jl_gc_state_set(ptls, old_state, JL_GC_STATE_WAITING); JL_PROBE_GC_END(); - jl_safepoint_wait_thread_resume(); // block in thread-suspend now if requested, after clearing the gc_state + jl_safepoint_wait_thread_resume(ct); // block in thread-suspend now if requested, after clearing the gc_state // Only disable finalizers on current thread // Doing this on all threads is racy (it's impossible to check diff --git a/src/julia_internal.h b/src/julia_internal.h index cd101533f1b8d..05256fec5bb6d 100644 --- a/src/julia_internal.h +++ b/src/julia_internal.h @@ -1133,7 +1133,7 @@ void jl_safepoint_init(void); // before calling this function. If the calling thread is to run the GC, // it should also wait for the mutator threads to hit a safepoint **AFTER** // this function returns -int jl_safepoint_start_gc(void); +int jl_safepoint_start_gc(jl_task_t *ct); // Can only be called by the thread that have got a `1` return value from // `jl_safepoint_start_gc()`. This disables the safepoint (for GC, // the `mprotect` may not be removed if there's pending SIGINT) and wake @@ -1143,8 +1143,8 @@ void jl_safepoint_end_gc(void); // Wait for the GC to finish // This function does **NOT** modify the `gc_state` to inform the GC thread // The caller should set it **BEFORE** calling this function. -void jl_safepoint_wait_gc(void) JL_NOTSAFEPOINT; -void jl_safepoint_wait_thread_resume(void) JL_NOTSAFEPOINT; +void jl_safepoint_wait_gc(jl_task_t *ct) JL_NOTSAFEPOINT; +void jl_safepoint_wait_thread_resume(jl_task_t *ct) JL_NOTSAFEPOINT; int8_t jl_safepoint_take_sleep_lock(jl_ptls_t ptls) JL_NOTSAFEPOINT_ENTER; // Set pending sigint and enable the mechanisms to deliver the sigint. void jl_safepoint_enable_sigint(void); @@ -1170,7 +1170,7 @@ JL_DLLEXPORT void jl_pgcstack_getkey(jl_get_pgcstack_func **f, jl_pgcstack_key_t extern pthread_mutex_t in_signal_lock; #endif -void jl_set_gc_and_wait(void); // n.b. not used on _OS_DARWIN_ +void jl_set_gc_and_wait(jl_task_t *ct); // n.b. not used on _OS_DARWIN_ // Query if a Julia object is if a permalloc region (due to part of a sys- pkg-image) STATIC_INLINE size_t n_linkage_blobs(void) JL_NOTSAFEPOINT diff --git a/src/safepoint.c b/src/safepoint.c index 8e24543c6769d..7eab653edd089 100644 --- a/src/safepoint.c +++ b/src/safepoint.c @@ -158,17 +158,16 @@ void jl_gc_wait_for_the_world(jl_ptls_t* gc_all_tls_states, int gc_n_threads) } } -int jl_safepoint_start_gc(void) +int jl_safepoint_start_gc(jl_task_t *ct) { // The thread should have just set this before entry - assert(jl_atomic_load_relaxed(&jl_current_task->ptls->gc_state) == JL_GC_STATE_WAITING); + assert(jl_atomic_load_relaxed(&ct->ptls->gc_state) == JL_GC_STATE_WAITING); uv_mutex_lock(&safepoint_lock); uv_cond_broadcast(&safepoint_cond_begin); // make sure we are permitted to run GC now (we might be required to stop instead) - jl_task_t *ct = jl_current_task; while (jl_atomic_load_relaxed(&ct->ptls->suspend_count)) { uv_mutex_unlock(&safepoint_lock); - jl_safepoint_wait_thread_resume(); + jl_safepoint_wait_thread_resume(ct); uv_mutex_lock(&safepoint_lock); } // In case multiple threads enter the GC at the same time, only allow @@ -178,7 +177,7 @@ int jl_safepoint_start_gc(void) uint32_t running = 0; if (!jl_atomic_cmpswap(&jl_gc_running, &running, 1)) { uv_mutex_unlock(&safepoint_lock); - jl_safepoint_wait_gc(); + jl_safepoint_wait_gc(ct); return 0; } // Foreign thread adoption disables the GC and waits for it to finish, however, that may @@ -213,9 +212,8 @@ void jl_safepoint_end_gc(void) uv_cond_broadcast(&safepoint_cond_end); } -void jl_set_gc_and_wait(void) // n.b. not used on _OS_DARWIN_ +void jl_set_gc_and_wait(jl_task_t *ct) // n.b. not used on _OS_DARWIN_ { - jl_task_t *ct = jl_current_task; // reading own gc state doesn't need atomic ops since no one else // should store to it. int8_t state = jl_atomic_load_relaxed(&ct->ptls->gc_state); @@ -223,18 +221,19 @@ void jl_set_gc_and_wait(void) // n.b. not used on _OS_DARWIN_ uv_mutex_lock(&safepoint_lock); uv_cond_broadcast(&safepoint_cond_begin); uv_mutex_unlock(&safepoint_lock); - jl_safepoint_wait_gc(); + jl_safepoint_wait_gc(ct); jl_atomic_store_release(&ct->ptls->gc_state, state); - jl_safepoint_wait_thread_resume(); // block in thread-suspend now if requested, after clearing the gc_state + jl_safepoint_wait_thread_resume(ct); // block in thread-suspend now if requested, after clearing the gc_state } // this is the core of jl_set_gc_and_wait -void jl_safepoint_wait_gc(void) JL_NOTSAFEPOINT +void jl_safepoint_wait_gc(jl_task_t *ct) JL_NOTSAFEPOINT { - jl_task_t *ct = jl_current_task; (void)ct; - JL_TIMING_SUSPEND_TASK(GC_SAFEPOINT, ct); - // The thread should have set this is already - assert(jl_atomic_load_relaxed(&ct->ptls->gc_state) != JL_GC_STATE_UNSAFE); + if (ct) { + JL_TIMING_SUSPEND_TASK(GC_SAFEPOINT, ct); + // The thread should have set this is already + assert(jl_atomic_load_relaxed(&ct->ptls->gc_state) != JL_GC_STATE_UNSAFE); + } // Use normal volatile load in the loop for speed until GC finishes. // Then use an acquire load to make sure the GC result is visible on this thread. while (jl_atomic_load_relaxed(&jl_gc_running) || jl_atomic_load_acquire(&jl_gc_running)) { @@ -249,9 +248,8 @@ void jl_safepoint_wait_gc(void) JL_NOTSAFEPOINT } // equivalent to jl_set_gc_and_wait, but waiting on resume-thread lock instead -void jl_safepoint_wait_thread_resume(void) +void jl_safepoint_wait_thread_resume(jl_task_t *ct) { - jl_task_t *ct = jl_current_task; // n.b. we do not permit a fast-path here that skips the lock acquire since // we otherwise have no synchronization point to ensure that this thread // will observe the change to the safepoint, even though the other thread @@ -333,7 +331,7 @@ int jl_safepoint_suspend_thread(int tid, int waitstate) // It will be unable to reenter helping with GC because we have // changed its safepoint page. uv_mutex_unlock(&safepoint_lock); - jl_set_gc_and_wait(); + jl_set_gc_and_wait(jl_current_task); uv_mutex_lock(&safepoint_lock); } while (jl_atomic_load_acquire(&ptls2->suspend_count) != 0) { diff --git a/src/signals-unix.c b/src/signals-unix.c index 301b875018c1c..394c4a108b647 100644 --- a/src/signals-unix.c +++ b/src/signals-unix.c @@ -392,7 +392,7 @@ JL_NO_ASAN static void segv_handler(int sig, siginfo_t *info, void *context) return; } if (sig == SIGSEGV && info->si_code == SEGV_ACCERR && jl_addr_is_safepoint((uintptr_t)info->si_addr) && !is_write_fault(context)) { - jl_set_gc_and_wait(); + jl_set_gc_and_wait(ct); // Do not raise sigint on worker thread if (jl_atomic_load_relaxed(&ct->tid) != 0) return; diff --git a/src/signals-win.c b/src/signals-win.c index 2a594bc92b9b7..dbf95fdb19791 100644 --- a/src/signals-win.c +++ b/src/signals-win.c @@ -256,7 +256,7 @@ LONG WINAPI jl_exception_handler(struct _EXCEPTION_POINTERS *ExceptionInfo) break; case EXCEPTION_ACCESS_VIOLATION: if (jl_addr_is_safepoint(ExceptionInfo->ExceptionRecord->ExceptionInformation[1])) { - jl_set_gc_and_wait(); + jl_set_gc_and_wait(ct); // Do not raise sigint on worker thread if (ptls->tid != 0) return EXCEPTION_CONTINUE_EXECUTION; diff --git a/src/threading.c b/src/threading.c index 42174830d9b43..8f0dfb3330885 100644 --- a/src/threading.c +++ b/src/threading.c @@ -427,11 +427,9 @@ JL_DLLEXPORT jl_gcframe_t **jl_adopt_thread(void) { // `jl_init_threadtls` puts us in a GC unsafe region, so ensure GC isn't running. // we can't use a normal safepoint because we don't have signal handlers yet. - // we also can't use jl_safepoint_wait_gc because that assumes we're in a task. jl_atomic_fetch_add(&jl_gc_disable_counter, 1); - while (jl_atomic_load_acquire(&jl_gc_running)) { - jl_cpu_pause(); - } + // pass NULL as a special token to indicate we are running on an unmanaged task + jl_safepoint_wait_gc(NULL); // this check is coupled with the one in `jl_safepoint_wait_gc`, where we observe if a // foreign thread has asked to disable the GC, guaranteeing the order of events. @@ -915,15 +913,20 @@ void _jl_mutex_wait(jl_task_t *self, jl_mutex_t *lock, int safepoint) jl_profile_lock_acquired(lock); return; } - if (safepoint) { - jl_gc_safepoint_(self->ptls); - } if (jl_running_under_rr(0)) { // when running under `rr`, use system mutexes rather than spin locking + int8_t gc_state; + if (safepoint) + gc_state = jl_gc_safe_enter(self->ptls); uv_mutex_lock(&tls_lock); if (jl_atomic_load_relaxed(&lock->owner)) uv_cond_wait(&cond, &tls_lock); uv_mutex_unlock(&tls_lock); + if (safepoint) + jl_gc_safe_leave(self->ptls, gc_state); + } + else if (safepoint) { + jl_gc_safepoint_(self->ptls); } jl_cpu_suspend(); owner = jl_atomic_load_relaxed(&lock->owner); From 2a02fc34af060430083bafe06fdda2a12968c7e9 Mon Sep 17 00:00:00 2001 From: Katharine Hyatt Date: Fri, 22 Nov 2024 10:31:11 -0500 Subject: [PATCH 491/537] More testsets, fix commented out test, add Array{UInt8} conversion test (#56586) Moved some `let...end` blocks into `@testset begin ... end` format. Added a test for converting a string to `Array{UInt8}`. Restored a commented out testset. --- test/strings/basic.jl | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/test/strings/basic.jl b/test/strings/basic.jl index ee92995bd2e11..b7d7f0c039711 100644 --- a/test/strings/basic.jl +++ b/test/strings/basic.jl @@ -343,9 +343,7 @@ end @test_throws StringIndexError get(utf8_str, 2, 'X') end -#= -# issue #7764 -let +@testset "issue #7764" begin srep = repeat("Σβ",2) s="Σβ" ss=SubString(s,1,lastindex(s)) @@ -358,16 +356,15 @@ let @test iterate(srep, 7) == ('β',9) @test srep[7] == 'β' - @test_throws BoundsError srep[8] + @test_throws StringIndexError srep[8] end -=# # This caused JuliaLang/JSON.jl#82 @test first('\x00':'\x7f') === '\x00' @test last('\x00':'\x7f') === '\x7f' -# make sure substrings do not accept code unit if it is not start of codepoint -let s = "x\u0302" +@testset "make sure substrings do not accept code unit if it is not start of codepoint" begin + s = "x\u0302" @test s[1:2] == s @test_throws BoundsError s[0:3] @test_throws BoundsError s[1:4] @@ -1076,8 +1073,8 @@ let s = "∀x∃y", u = codeunits(s) @test Base.elsize(u) == Base.elsize(typeof(u)) == 1 end -# issue #24388 -let v = unsafe_wrap(Vector{UInt8}, "abc") +@testset "issue #24388" begin + v = unsafe_wrap(Vector{UInt8}, "abc") s = String(v) @test_throws BoundsError v[1] push!(v, UInt8('x')) @@ -1093,8 +1090,8 @@ let v = [0x40,0x41,0x42] @test String(view(v, 2:3)) == "AB" end -# issue #54369 -let v = Base.StringMemory(3) +@testset "issue #54369" begin + v = Base.StringMemory(3) v .= [0x41,0x42,0x43] s = String(v) @test s == "ABC" @@ -1116,8 +1113,8 @@ let rng = MersenneTwister(1), strs = ["∀εa∀aε"*String(rand(rng, UInt8, 100 end end -# conversion of SubString to the same type, issue #25525 -let x = SubString("ab", 1, 1) +@testset "conversion of SubString to the same type, issue #25525" begin + x = SubString("ab", 1, 1) y = convert(SubString{String}, x) @test y === x chop("ab") === chop.(["ab"])[1] @@ -1170,6 +1167,9 @@ end apple_uint8 = Vector{UInt8}("Apple") @test apple_uint8 == [0x41, 0x70, 0x70, 0x6c, 0x65] + apple_uint8 = Array{UInt8}("Apple") + @test apple_uint8 == [0x41, 0x70, 0x70, 0x6c, 0x65] + Base.String(::tstStringType) = "Test" abstract_apple = tstStringType(apple_uint8) @test hash(abstract_apple, UInt(1)) == hash("Test", UInt(1)) From e29d211547be67704643e5c9014502ac5a7802e4 Mon Sep 17 00:00:00 2001 From: Katharine Hyatt Date: Fri, 22 Nov 2024 10:31:29 -0500 Subject: [PATCH 492/537] Test lastindex for LazyString (#56585) Should get the lazy strings file to 100% coverage --- test/strings/basic.jl | 1 + 1 file changed, 1 insertion(+) diff --git a/test/strings/basic.jl b/test/strings/basic.jl index b7d7f0c039711..bc4e5ae66419a 100644 --- a/test/strings/basic.jl +++ b/test/strings/basic.jl @@ -1195,6 +1195,7 @@ end @test codeunit(l) == UInt8 @test codeunit(l,2) == 0x2b @test isvalid(l, 1) + @test lastindex(l) == lastindex("1+2") @test Base.infer_effects((Any,)) do a throw(lazy"a is $a") end |> Core.Compiler.is_foldable From 5fab51a38cbb41da5d2619a6923619144725c437 Mon Sep 17 00:00:00 2001 From: Katharine Hyatt Date: Fri, 22 Nov 2024 11:12:45 -0500 Subject: [PATCH 493/537] Tests for substrings of annotated strings (#56584) Codecov shows these as not covered yet --- test/strings/annotated.jl | 3 +++ 1 file changed, 3 insertions(+) diff --git a/test/strings/annotated.jl b/test/strings/annotated.jl index 85acab74abf7b..3379452d3e871 100644 --- a/test/strings/annotated.jl +++ b/test/strings/annotated.jl @@ -34,6 +34,9 @@ @test str[3:4] == SubString(str, 3, 4) @test str[3:4] != SubString("me") @test SubString("me") != str[3:4] + @test Base.AnnotatedString(str[3:4]) == SubString(str, 3, 4) + @test repeat(SubString(str, 3, 4), 2) == repeat(Base.AnnotatedString(str[3:4]), 2) + @test reverse(SubString(str, 3, 4)) == reverse(Base.AnnotatedString(str[3:4])) @test Base.AnnotatedString(str[3:4]) == Base.AnnotatedString("me", [(1:2, :thing, 0x01), (1:2, :all, 0x03)]) @test Base.AnnotatedString(str[3:6]) == From 7354be369c15fe0ab6b0a7b66f82457cc6d2e12a Mon Sep 17 00:00:00 2001 From: Jameson Nash Date: Fri, 22 Nov 2024 14:30:41 -0500 Subject: [PATCH 494/537] codegen: make more judicious use of global rooting (#56625) Only temporarily root objects during codegen, so that it is the responsibility of the caller to ensure the values live (including write barriers and old generations) only as long as necessary for correct execution, and not preserve values that never make it into the IR. --- src/aotcompile.cpp | 123 +++++++++++++++++++++++++++++++++++++++---- src/ccall.cpp | 9 ++-- src/cgutils.cpp | 12 +++-- src/codegen.cpp | 46 +++++++--------- src/jitlayers.cpp | 60 ++++++++++++++++++++- src/jitlayers.h | 1 + src/julia_internal.h | 2 +- 7 files changed, 204 insertions(+), 49 deletions(-) diff --git a/src/aotcompile.cpp b/src/aotcompile.cpp index 4b3f1f1171ded..6af5227aafd92 100644 --- a/src/aotcompile.cpp +++ b/src/aotcompile.cpp @@ -320,14 +320,106 @@ static jl_code_instance_t *jl_ci_cache_lookup(jl_method_instance_t *mi, size_t w return codeinst; } +namespace { // file-local namespace +class egal_set { +public: + jl_genericmemory_t *list = (jl_genericmemory_t*)jl_an_empty_memory_any; + jl_genericmemory_t *keyset = (jl_genericmemory_t*)jl_an_empty_memory_any; + egal_set(egal_set&) = delete; + egal_set(egal_set&&) = delete; + egal_set() = default; + void insert(jl_value_t *val) + { + jl_value_t *rval = jl_idset_get(list, keyset, val); + if (rval == NULL) { + ssize_t idx; + list = jl_idset_put_key(list, val, &idx); + keyset = jl_idset_put_idx(list, keyset, idx); + } + } + jl_value_t *get(jl_value_t *val) + { + return jl_idset_get(list, keyset, val); + } +}; +} +using ::egal_set; typedef DenseMap> jl_compiled_functions_t; -static void compile_workqueue(jl_codegen_params_t ¶ms, CompilationPolicy policy, jl_compiled_functions_t &compiled_functions) + +static void record_method_roots(egal_set &method_roots, jl_method_instance_t *mi) +{ + jl_method_t *m = mi->def.method; + if (!jl_is_method(m)) + return; + // the method might have a root for this already; use it if so + JL_LOCK(&m->writelock); + if (m->roots) { + size_t j, len = jl_array_dim0(m->roots); + for (j = 0; j < len; j++) { + jl_value_t *v = jl_array_ptr_ref(m->roots, j); + if (jl_is_globally_rooted(v)) + continue; + method_roots.insert(v); + } + } + JL_UNLOCK(&m->writelock); +} + +static void aot_optimize_roots(jl_codegen_params_t ¶ms, egal_set &method_roots, jl_compiled_functions_t &compiled_functions) +{ + for (size_t i = 0; i < jl_array_dim0(params.temporary_roots); i++) { + jl_value_t *val = jl_array_ptr_ref(params.temporary_roots, i); + auto ref = params.global_targets.find((void*)val); + if (ref == params.global_targets.end()) + continue; + auto get_global_root = [val, &method_roots]() { + if (jl_is_globally_rooted(val)) + return val; + jl_value_t *mval = method_roots.get(val); + if (mval) + return mval; + return jl_as_global_root(val, 1); + }; + jl_value_t *mval = get_global_root(); + if (mval != val) { + GlobalVariable *GV = ref->second; + params.global_targets.erase(ref); + auto mref = params.global_targets.find((void*)mval); + if (mref != params.global_targets.end()) { + // replace ref with mref in all Modules + std::string OldName(GV->getName()); + StringRef NewName(mref->second->getName()); + for (auto &def : compiled_functions) { + orc::ThreadSafeModule &TSM = std::get<0>(def.second); + Module &M = *TSM.getModuleUnlocked(); + if (GlobalValue *GV2 = M.getNamedValue(OldName)) { + if (GV2 == GV) + GV = nullptr; + // either replace or rename the old value to use the other equivalent name + if (GlobalValue *GV3 = M.getNamedValue(NewName)) { + GV2->replaceAllUsesWith(GV3); + GV2->eraseFromParent(); + } + else { + GV2->setName(NewName); + } + } + } + assert(GV == nullptr); + } + else { + params.global_targets[(void*)mval] = GV; + } + } + } +} + +static void compile_workqueue(jl_codegen_params_t ¶ms, egal_set &method_roots, CompilationPolicy policy, jl_compiled_functions_t &compiled_functions) { decltype(params.workqueue) workqueue; std::swap(params.workqueue, workqueue); - jl_code_info_t *src = NULL; jl_code_instance_t *codeinst = NULL; - JL_GC_PUSH2(&src, &codeinst); + JL_GC_PUSH1(&codeinst); assert(!params.cache); while (!workqueue.empty()) { auto it = workqueue.pop_back_val(); @@ -352,6 +444,7 @@ static void compile_workqueue(jl_codegen_params_t ¶ms, CompilationPolicy pol jl_create_ts_module(name_from_method_instance(codeinst->def), params.tsctx, params.DL, params.TargetTriple); auto decls = jl_emit_codeinst(result_m, codeinst, NULL, params); + record_method_roots(method_roots, codeinst->def); if (result_m) it = compiled_functions.insert(std::make_pair(codeinst, std::make_pair(std::move(result_m), std::move(decls)))).first; } @@ -432,7 +525,6 @@ static void compile_workqueue(jl_codegen_params_t ¶ms, CompilationPolicy pol JL_GC_POP(); } - // takes the running content that has collected in the shadow module and dump it to disk // this builds the object file portion of the sysimage files for fast startup, and can // also be used be extern consumers like GPUCompiler.jl to obtain a module containing @@ -454,8 +546,6 @@ void *jl_create_native_impl(jl_array_t *methods, LLVMOrcThreadSafeModuleRef llvm CompilationPolicy policy = (CompilationPolicy) _policy; bool imaging = imaging_default() || _imaging_mode == 1; jl_method_instance_t *mi = NULL; - jl_code_info_t *src = NULL; - JL_GC_PUSH1(&src); auto ct = jl_current_task; bool timed = (ct->reentrant_timing & 1) == 0; if (timed) @@ -479,11 +569,14 @@ void *jl_create_native_impl(jl_array_t *methods, LLVMOrcThreadSafeModuleRef llvm auto target_info = clone.withModuleDo([&](Module &M) { return std::make_pair(M.getDataLayout(), Triple(M.getTargetTriple())); }); + egal_set method_roots; jl_codegen_params_t params(ctxt, std::move(target_info.first), std::move(target_info.second)); params.params = cgparams; params.imaging_mode = imaging; params.debug_level = cgparams->debug_info_level; params.external_linkage = _external_linkage; + params.temporary_roots = jl_alloc_array_1d(jl_array_any_type, 0); + JL_GC_PUSH3(¶ms.temporary_roots, &method_roots.list, &method_roots.keyset); size_t compile_for[] = { jl_typeinf_world, _world }; int worlds = 0; if (jl_options.trim != JL_TRIM_NO) @@ -508,13 +601,13 @@ void *jl_create_native_impl(jl_array_t *methods, LLVMOrcThreadSafeModuleRef llvm continue; } mi = (jl_method_instance_t*)item; - src = NULL; // if this method is generally visible to the current compilation world, // and this is either the primary world, or not applicable in the primary world // then we want to compile and emit this if (jl_atomic_load_relaxed(&mi->def.method->primary_world) <= this_world && this_world <= jl_atomic_load_relaxed(&mi->def.method->deleted_world)) { // find and prepare the source code to compile jl_code_instance_t *codeinst = jl_ci_cache_lookup(mi, this_world, lookup); + JL_GC_PROMISE_ROOTED(codeinst); if (jl_options.trim != JL_TRIM_NO && !codeinst) { // If we're building a small image, we need to compile everything // to ensure that we have all the information we need. @@ -529,11 +622,12 @@ void *jl_create_native_impl(jl_array_t *methods, LLVMOrcThreadSafeModuleRef llvm data->jl_fvar_map[codeinst] = std::make_tuple((uint32_t)-3, (uint32_t)-3); } else { - JL_GC_PROMISE_ROOTED(codeinst->rettype); orc::ThreadSafeModule result_m = jl_create_ts_module(name_from_method_instance(codeinst->def), params.tsctx, clone.getModuleUnlocked()->getDataLayout(), Triple(clone.getModuleUnlocked()->getTargetTriple())); jl_llvm_functions_t decls = jl_emit_codeinst(result_m, codeinst, NULL, params); + JL_GC_PROMISE_ROOTED(codeinst->def); // analyzer seems confused + record_method_roots(method_roots, codeinst->def); if (result_m) compiled_functions[codeinst] = {std::move(result_m), std::move(decls)}; else if (jl_options.trim != JL_TRIM_NO) { @@ -555,9 +649,11 @@ void *jl_create_native_impl(jl_array_t *methods, LLVMOrcThreadSafeModuleRef llvm } } } - JL_GC_POP(); // finally, make sure all referenced methods also get compiled or fixed up - compile_workqueue(params, policy, compiled_functions); + compile_workqueue(params, method_roots, policy, compiled_functions); + aot_optimize_roots(params, method_roots, compiled_functions); + params.temporary_roots = nullptr; + JL_GC_POP(); // process the globals array, before jl_merge_module destroys them SmallVector gvars(params.global_targets.size()); @@ -2161,7 +2257,11 @@ void jl_get_llvmf_defn_impl(jl_llvmf_dump_t* dump, jl_method_instance_t *mi, jl_ // This would also be nice, but it seems to cause OOMs on the windows32 builder // To get correct names in the IR this needs to be at least 2 output.debug_level = params.debug_info_level; + output.temporary_roots = jl_alloc_array_1d(jl_array_any_type, 0); + JL_GC_PUSH1(&output.temporary_roots); auto decls = jl_emit_code(m, mi, src, output); + output.temporary_roots = nullptr; + JL_GC_POP(); // GC the global_targets array contents now since reflection doesn't need it Function *F = NULL; if (m) { @@ -2171,7 +2271,8 @@ void jl_get_llvmf_defn_impl(jl_llvmf_dump_t* dump, jl_method_instance_t *mi, jl_ for (auto &global : output.global_targets) { if (jl_options.image_codegen) { global.second->setLinkage(GlobalValue::ExternalLinkage); - } else { + } + else { auto p = literal_static_pointer_val(global.first, global.second->getValueType()); #if JL_LLVM_VERSION >= 170000 Type *elty = PointerType::get(output.getContext(), 0); diff --git a/src/ccall.cpp b/src/ccall.cpp index f559ddbe93a43..952625a71287b 100644 --- a/src/ccall.cpp +++ b/src/ccall.cpp @@ -1548,7 +1548,7 @@ static jl_cgval_t emit_ccall(jl_codectx_t &ctx, jl_value_t **args, size_t nargs) return jl_cgval_t(); } if (rt != args[2] && rt != (jl_value_t*)jl_any_type) - rt = jl_ensure_rooted(ctx, rt); + jl_temporary_root(ctx, rt); function_sig_t sig("ccall", lrt, rt, retboxed, (jl_svec_t*)at, unionall, nreqargs, cc, llvmcall, &ctx.emission_context); @@ -2036,8 +2036,11 @@ jl_cgval_t function_sig_t::emit_a_ccall( if (ctx.spvals_ptr == NULL && !toboxed && unionall_env && jl_has_typevar_from_unionall(jargty, unionall_env) && jl_svec_len(ctx.linfo->sparam_vals) > 0) { jargty_in_env = jl_instantiate_type_in_env(jargty_in_env, unionall_env, jl_svec_data(ctx.linfo->sparam_vals)); - if (jargty_in_env != jargty) - jargty_in_env = jl_ensure_rooted(ctx, jargty_in_env); + if (jargty_in_env != jargty) { + JL_GC_PUSH1(&jargty_in_env); + jl_temporary_root(ctx, jargty_in_env); + JL_GC_POP(); + } } Value *v; diff --git a/src/cgutils.cpp b/src/cgutils.cpp index 157d253ba4f21..7d4bd917eff30 100644 --- a/src/cgutils.cpp +++ b/src/cgutils.cpp @@ -397,7 +397,8 @@ static llvm::SmallVector get_gc_roots_for(jl_codectx_t &ctx, const jl_ // --- emitting pointers directly into code --- - +static void jl_temporary_root(jl_codegen_params_t &ctx, jl_value_t *val); +static void jl_temporary_root(jl_codectx_t &ctx, jl_value_t *val); static inline Constant *literal_static_pointer_val(const void *p, Type *T); static Constant *julia_pgv(jl_codectx_t &ctx, const char *cname, void *addr) @@ -777,7 +778,8 @@ static Type *_julia_struct_to_llvm(jl_codegen_params_t *ctx, LLVMContext &ctxt, if (ntypes == 0 || jl_datatype_nbits(jst) == 0) return getVoidTy(ctxt); Type *_struct_decl = NULL; - // TODO: we should probably make a temporary root for `jst` somewhere + if (ctx) + jl_temporary_root(*ctx, jt); // don't use pre-filled struct_decl for llvmcall (f16, etc. may be different) Type *&struct_decl = (ctx && !llvmcall ? ctx->llvmtypes[jst] : _struct_decl); if (struct_decl) @@ -3506,8 +3508,6 @@ static Value *call_with_attrs(jl_codectx_t &ctx, JuliaFunction *intr, return Call; } -static jl_value_t *jl_ensure_rooted(jl_codectx_t &ctx, jl_value_t *val); - static Value *as_value(jl_codectx_t &ctx, Type *to, const jl_cgval_t &v) { assert(!v.isboxed); @@ -3540,7 +3540,9 @@ static Value *_boxed_special(jl_codectx_t &ctx, const jl_cgval_t &vinfo, Type *t if (Constant *c = dyn_cast(vinfo.V)) { jl_value_t *s = static_constant_instance(jl_Module->getDataLayout(), c, jt); if (s) { - s = jl_ensure_rooted(ctx, s); + JL_GC_PUSH1(&s); + jl_temporary_root(ctx, s); + JL_GC_POP(); return track_pjlvalue(ctx, literal_pointer_val(ctx, s)); } } diff --git a/src/codegen.cpp b/src/codegen.cpp index e3225a1a7dec2..3645a0b25827e 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -3410,27 +3410,20 @@ static void simple_use_analysis(jl_codectx_t &ctx, jl_value_t *expr) // ---- Get Element Pointer (GEP) instructions within the GC frame ---- -static jl_value_t *jl_ensure_rooted(jl_codectx_t &ctx, jl_value_t *val) -{ - if (jl_is_globally_rooted(val)) - return val; - jl_method_t *m = ctx.linfo->def.method; - if (!jl_options.strip_ir && jl_is_method(m)) { - // the method might have a root for this already; use it if so - JL_LOCK(&m->writelock); - if (m->roots) { - size_t i, len = jl_array_dim0(m->roots); - for (i = 0; i < len; i++) { - jl_value_t *mval = jl_array_ptr_ref(m->roots, i); - if (mval == val || jl_egal(mval, val)) { - JL_UNLOCK(&m->writelock); - return mval; - } - } +static void jl_temporary_root(jl_codegen_params_t &ctx, jl_value_t *val) +{ + if (!jl_is_globally_rooted(val)) { + jl_array_t *roots = ctx.temporary_roots; + for (size_t i = 0; i < jl_array_dim0(roots); i++) { + if (jl_array_ptr_ref(roots, i) == val) + return; } - JL_UNLOCK(&m->writelock); + jl_array_ptr_1d_push(roots, val); } - return jl_as_global_root(val, 1); +} +static void jl_temporary_root(jl_codectx_t &ctx, jl_value_t *val) +{ + jl_temporary_root(ctx.emission_context, val); } // --- generating function calls --- @@ -5060,7 +5053,7 @@ static bool emit_builtin_call(jl_codectx_t &ctx, jl_cgval_t *ret, jl_value_t *f, jl_value_t *ty = static_apply_type(ctx, argv, nargs + 1); if (ty != NULL) { JL_GC_PUSH1(&ty); - ty = jl_ensure_rooted(ctx, ty); + jl_temporary_root(ctx, ty); JL_GC_POP(); *ret = mark_julia_const(ctx, ty); return true; @@ -6785,7 +6778,7 @@ static jl_cgval_t emit_expr(jl_codectx_t &ctx, jl_value_t *expr, ssize_t ssaidx_ val = jl_fieldref_noalloc(expr, 0); // Toplevel exprs are rooted but because codegen assumes this is constant, it removes the write barriers for this code. // This means we have to globally root the value here. (The other option would be to change how we optimize toplevel code) - val = jl_ensure_rooted(ctx, val); + jl_temporary_root(ctx, val); return mark_julia_const(ctx, val); } @@ -7905,7 +7898,7 @@ static jl_cgval_t emit_cfunction(jl_codectx_t &ctx, jl_value_t *output_type, con return jl_cgval_t(); } if (rt != declrt && rt != (jl_value_t*)jl_any_type) - rt = jl_ensure_rooted(ctx, rt); + jl_temporary_root(ctx, rt); function_sig_t sig("cfunction", lrt, rt, retboxed, argt, unionall_env, false, CallingConv::C, false, &ctx.emission_context); assert(sig.fargt.size() + sig.sret == sig.fargt_sig.size()); @@ -7975,12 +7968,12 @@ static jl_cgval_t emit_cfunction(jl_codectx_t &ctx, jl_value_t *output_type, con if (closure_types) { assert(ctx.spvals_ptr); size_t n = jl_array_nrows(closure_types); - jl_svec_t *fill_i = jl_alloc_svec_uninit(n); + fill = jl_alloc_svec_uninit(n); for (size_t i = 0; i < n; i++) { - jl_svecset(fill_i, i, jl_array_ptr_ref(closure_types, i)); + jl_svecset(fill, i, jl_array_ptr_ref(closure_types, i)); } - JL_GC_PUSH1(&fill_i); - fill = (jl_svec_t*)jl_ensure_rooted(ctx, (jl_value_t*)fill_i); + JL_GC_PUSH1(&fill); + jl_temporary_root(ctx, (jl_value_t*)fill); JL_GC_POP(); } Type *T_htable = ArrayType::get(ctx.types().T_size, sizeof(htable_t) / sizeof(void*)); @@ -10106,7 +10099,6 @@ static jl_llvm_functions_t jl_emit_oc_wrapper(orc::ThreadSafeModule &m, jl_codeg return declarations; } - jl_llvm_functions_t jl_emit_codeinst( orc::ThreadSafeModule &m, jl_code_instance_t *codeinst, diff --git a/src/jitlayers.cpp b/src/jitlayers.cpp index 03c919f57da3f..d7e8ca4a4850a 100644 --- a/src/jitlayers.cpp +++ b/src/jitlayers.cpp @@ -170,6 +170,53 @@ void jl_link_global(GlobalVariable *GV, void *addr) JL_NOTSAFEPOINT } } +// convert local roots into global roots, if they are needed +static void jl_optimize_roots(jl_codegen_params_t ¶ms, jl_method_instance_t *mi, Module &M) +{ + JL_GC_PROMISE_ROOTED(params.temporary_roots); // rooted by caller + if (jl_array_dim0(params.temporary_roots) == 0) + return; + jl_method_t *m = mi->def.method; + if (jl_is_method(m)) + // the method might have a root for this already; use it if so + JL_LOCK(&m->writelock); + for (size_t i = 0; i < jl_array_dim0(params.temporary_roots); i++) { + jl_value_t *val = jl_array_ptr_ref(params.temporary_roots, i); + auto ref = params.global_targets.find((void*)val); + if (ref == params.global_targets.end()) + continue; + auto get_global_root = [val, m]() { + if (jl_is_globally_rooted(val)) + return val; + if (jl_is_method(m) && m->roots) { + size_t j, len = jl_array_dim0(m->roots); + for (j = 0; j < len; j++) { + jl_value_t *mval = jl_array_ptr_ref(m->roots, j); + if (jl_egal(mval, val)) { + return mval; + } + } + } + return jl_as_global_root(val, 1); + }; + jl_value_t *mval = get_global_root(); + if (mval != val) { + GlobalVariable *GV = ref->second; + params.global_targets.erase(ref); + auto mref = params.global_targets.find((void*)mval); + if (mref != params.global_targets.end()) { + GV->replaceAllUsesWith(mref->second); + GV->eraseFromParent(); + } + else { + params.global_targets[(void*)mval] = GV; + } + } + } + if (jl_is_method(m)) + JL_UNLOCK(&m->writelock); +} + void jl_jit_globals(std::map &globals) JL_NOTSAFEPOINT { for (auto &global : globals) { @@ -648,9 +695,16 @@ static void jl_emit_codeinst_to_jit( params.debug_level = jl_options.debug_level; orc::ThreadSafeModule result_m = jl_create_ts_module(name_from_method_instance(codeinst->def), params.tsctx, params.DL, params.TargetTriple); + params.temporary_roots = jl_alloc_array_1d(jl_array_any_type, 0); + JL_GC_PUSH1(¶ms.temporary_roots); jl_llvm_functions_t decls = jl_emit_codeinst(result_m, codeinst, src, params); // contains safepoints - if (!result_m) + if (!result_m) { + JL_GC_POP(); return; + } + jl_optimize_roots(params, codeinst->def, *result_m.getModuleUnlocked()); // contains safepoints + params.temporary_roots = nullptr; + JL_GC_POP(); { // drop lock before acquiring engine_lock auto release = std::move(params.tsctx_lock); } @@ -683,8 +737,10 @@ static void recursive_compile_graph( while (!workqueue.empty()) { auto this_code = workqueue.pop_back_val(); if (Seen.insert(this_code).second) { - if (this_code != codeinst) + if (this_code != codeinst) { + JL_GC_PROMISE_ROOTED(this_code); // rooted transitively from following edges from original argument jl_emit_codeinst_to_jit(this_code, nullptr); // contains safepoints + } jl_unique_gcsafe_lock lock(engine_lock); auto edges = complete_graph.find(this_code); if (edges != complete_graph.end()) { diff --git a/src/jitlayers.h b/src/jitlayers.h index ba4ac3081795e..d5fa878211200 100644 --- a/src/jitlayers.h +++ b/src/jitlayers.h @@ -229,6 +229,7 @@ struct jl_codegen_params_t { // outputs jl_workqueue_t workqueue; std::map global_targets; + jl_array_t *temporary_roots = nullptr; std::map, GlobalVariable*> external_fns; std::map ditypes; std::map llvmtypes; diff --git a/src/julia_internal.h b/src/julia_internal.h index 05256fec5bb6d..ca3f63b274968 100644 --- a/src/julia_internal.h +++ b/src/julia_internal.h @@ -1657,7 +1657,7 @@ void smallintset_empty(const jl_genericmemory_t *a) JL_NOTSAFEPOINT; JL_DLLEXPORT jl_genericmemory_t *jl_idset_rehash(jl_genericmemory_t *keys, jl_genericmemory_t *idxs, size_t newsz); JL_DLLEXPORT ssize_t jl_idset_peek_bp(jl_genericmemory_t *keys, jl_genericmemory_t *idxs, jl_value_t *key) JL_NOTSAFEPOINT; -jl_value_t *jl_idset_get(jl_genericmemory_t *keys JL_PROPAGATES_ROOT, jl_genericmemory_t *idxs, jl_value_t *key) JL_NOTSAFEPOINT; +JL_DLLEXPORT jl_value_t *jl_idset_get(jl_genericmemory_t *keys JL_PROPAGATES_ROOT, jl_genericmemory_t *idxs, jl_value_t *key) JL_NOTSAFEPOINT; JL_DLLEXPORT jl_genericmemory_t *jl_idset_put_key(jl_genericmemory_t *keys, jl_value_t *key, ssize_t *newidx); JL_DLLEXPORT jl_genericmemory_t *jl_idset_put_idx(jl_genericmemory_t *keys, jl_genericmemory_t *idxs, ssize_t idx); JL_DLLEXPORT ssize_t jl_idset_pop(jl_genericmemory_t *keys, jl_genericmemory_t *idxs, jl_value_t *key) JL_NOTSAFEPOINT; From 0bedaae82864ff2c6f2642ff7230d63a82b58792 Mon Sep 17 00:00:00 2001 From: Ian Butterworth Date: Fri, 22 Nov 2024 16:08:44 -0500 Subject: [PATCH 495/537] precompilepkgs: make the circular dep warning clearer and more informative (#56621) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Was e.g. ``` ┌ Warning: Circular dependency detected. Precompilation will be skipped for: │ Base.PkgId(Base.UUID("eb0c05c4-6780-5852-a67e-5d31d2970b9a"), "ArrayInterfaceTrackerExt") │ Base.PkgId(Base.UUID("f517fe37-dbe3-4b94-8317-1923a5111588"), "Polyester") │ Base.PkgId(Base.UUID("0d7ed370-da01-4f52-bd93-41d350b8b718"), "StaticArrayInterface") │ Base.PkgId(Base.UUID("6a4ca0a5-0e36-4168-a932-d9be78d558f1"), "AcceleratedKernels") │ Base.PkgId(Base.UUID("244f68ed-b92b-5712-87ae-6c617c41e16a"), "NNlibAMDGPUExt") │ Base.PkgId(Base.UUID("06b0261c-7a9b-5753-9bdf-fd6840237b4a"), "StaticArrayInterfaceStaticArraysExt") │ Base.PkgId(Base.UUID("21141c5a-9bdb-4563-92ae-f87d6854732e"), "AMDGPU") │ Base.PkgId(Base.UUID("9f7883ad-71c0-57eb-9f7f-b5c9e6d3789c"), "Tracker") └ @ Base.Precompilation precompilation.jl:511 ``` Now ![Screenshot 2024-11-21 at 11 20 50 PM](https://github.com/user-attachments/assets/6939d834-90c3-4d87-baa9-cf6a4931ca03) Thanks to @topolarity figuring out proper cycles tracking. --------- Co-authored-by: Cody Tapscott --- base/precompilation.jl | 99 ++++++++++++++++++++++++++++++++++-------- 1 file changed, 81 insertions(+), 18 deletions(-) diff --git a/base/precompilation.jl b/base/precompilation.jl index 77e088f455fea..6eebded8b2f93 100644 --- a/base/precompilation.jl +++ b/base/precompilation.jl @@ -364,6 +364,50 @@ end const Config = Pair{Cmd, Base.CacheFlags} const PkgConfig = Tuple{PkgId,Config} +# name or parent → ext +function full_name(ext_to_parent::Dict{PkgId, PkgId}, pkg::PkgId) + if haskey(ext_to_parent, pkg) + return string(ext_to_parent[pkg].name, " → ", pkg.name) + else + return pkg.name + end +end + +function excluded_circular_deps_explanation(io::IOContext{IO}, ext_to_parent::Dict{PkgId, PkgId}, circular_deps, cycles) + outer_deps = copy(circular_deps) + cycles_names = "" + for cycle in cycles + filter!(!in(cycle), outer_deps) + cycle_str = "" + for (i, pkg) in enumerate(cycle) + j = max(0, i - 1) + if length(cycle) == 1 + line = " ─ " + elseif i == 1 + line = " ┌ " + elseif i < length(cycle) + line = " │ " * " " ^j + else + line = " └" * "─" ^j * " " + end + hascolor = get(io, :color, false)::Bool + line = _color_string(line, :light_black, hascolor) * full_name(ext_to_parent, pkg) * "\n" + cycle_str *= line + end + cycles_names *= cycle_str + end + plural1 = length(cycles) > 1 ? "these cycles" : "this cycle" + plural2 = length(cycles) > 1 ? "cycles" : "cycle" + msg = """Circular dependency detected. + Precompilation will be skipped for dependencies in $plural1: + $cycles_names""" + if !isempty(outer_deps) + msg *= "Precompilation will also be skipped for the following, which depend on the above $plural2:\n" + msg *= join((" " * full_name(ext_to_parent, pkg) for pkg in outer_deps), "\n") + end + return msg +end + function precompilepkgs(pkgs::Vector{String}=String[]; internal_call::Bool=false, strict::Bool = false, @@ -426,7 +470,7 @@ function _precompilepkgs(pkgs::Vector{String}, ext_to_parent = Dict{Base.PkgId, Base.PkgId}() function describe_pkg(pkg::PkgId, is_project_dep::Bool, flags::Cmd, cacheflags::Base.CacheFlags) - name = haskey(ext_to_parent, pkg) ? string(ext_to_parent[pkg].name, " → ", pkg.name) : pkg.name + name = full_name(ext_to_parent, pkg) name = is_project_dep ? name : color_string(name, :light_black) if nconfigs > 1 && !isempty(flags) config_str = join(flags, " ") @@ -566,32 +610,51 @@ function _precompilepkgs(pkgs::Vector{String}, @debug "precompile: signalling initialized" # find and guard against circular deps - circular_deps = Base.PkgId[] - # Three states - # !haskey -> never visited - # true -> cannot be compiled due to a cycle (or not yet determined) - # false -> not depending on a cycle + cycles = Vector{Base.PkgId}[] + # For every scanned package, true if pkg found to be in a cycle + # or depends on packages in a cycle and false otherwise. could_be_cycle = Dict{Base.PkgId, Bool}() + # temporary stack for the SCC-like algorithm below + stack = Base.PkgId[] function scan_pkg!(pkg, dmap) - did_visit_dep = true - inpath = get!(could_be_cycle, pkg) do - did_visit_dep = false - return true - end - if did_visit_dep ? inpath : scan_deps!(pkg, dmap) - # Found a cycle. Delete this and all parents - return true + if haskey(could_be_cycle, pkg) + return could_be_cycle[pkg] + else + return scan_deps!(pkg, dmap) end - return false end function scan_deps!(pkg, dmap) + push!(stack, pkg) + cycle = nothing for dep in dmap[pkg] - scan_pkg!(dep, dmap) && return true + if dep in stack + # Created fresh cycle + cycle′ = stack[findlast(==(dep), stack):end] + if cycle === nothing || length(cycle′) < length(cycle) + cycle = cycle′ # try to report smallest cycle possible + end + elseif scan_pkg!(dep, dmap) + # Reaches an existing cycle + could_be_cycle[pkg] = true + pop!(stack) + return true + end + end + pop!(stack) + if cycle !== nothing + push!(cycles, cycle) + could_be_cycle[pkg] = true + return true end could_be_cycle[pkg] = false return false end + # set of packages that depend on a cycle (either because they are + # a part of a cycle themselves or because they transitively depend + # on a package in some cycle) + circular_deps = Base.PkgId[] for pkg in keys(direct_deps) + @assert isempty(stack) if scan_pkg!(pkg, direct_deps) push!(circular_deps, pkg) for pkg_config in keys(was_processed) @@ -601,7 +664,7 @@ function _precompilepkgs(pkgs::Vector{String}, end end if !isempty(circular_deps) - @warn """Circular dependency detected. Precompilation will be skipped for:\n $(join(string.(circular_deps), "\n "))""" + @warn excluded_circular_deps_explanation(io, ext_to_parent, circular_deps, cycles) end @debug "precompile: circular dep check done" @@ -1002,7 +1065,7 @@ function _precompilepkgs(pkgs::Vector{String}, else join(split(err, "\n"), color_string("\n│ ", Base.warn_color())) end - name = haskey(ext_to_parent, pkg) ? string(ext_to_parent[pkg].name, " → ", pkg.name) : pkg.name + name = full_name(ext_to_parent, pkg) print(iostr, color_string("\n┌ ", Base.warn_color()), name, color_string("\n│ ", Base.warn_color()), err, color_string("\n└ ", Base.warn_color())) end end From a1dbfd0aaed0977ffd97674d460ebde56ca78223 Mon Sep 17 00:00:00 2001 From: Cody Tapscott Date: Sun, 24 Nov 2024 12:09:33 +0000 Subject: [PATCH 496/537] Test extensions of "parent" dependencies These are the main correctness fix from #55910, so it's important that we have test coverage for it. --- test/loading.jl | 34 +++++++++++++++++++ .../DepWithParentExt.jl/Project.toml | 9 +++++ .../DepWithParentExt.jl/ext/ParentExt.jl | 6 ++++ .../src/DepWithParentExt.jl | 5 +++ .../Extensions/Parent.jl/Manifest.toml | 20 +++++++++++ .../project/Extensions/Parent.jl/Project.toml | 7 ++++ .../Extensions/Parent.jl/src/Parent.jl | 7 ++++ 7 files changed, 88 insertions(+) create mode 100644 test/project/Extensions/DepWithParentExt.jl/Project.toml create mode 100644 test/project/Extensions/DepWithParentExt.jl/ext/ParentExt.jl create mode 100644 test/project/Extensions/DepWithParentExt.jl/src/DepWithParentExt.jl create mode 100644 test/project/Extensions/Parent.jl/Manifest.toml create mode 100644 test/project/Extensions/Parent.jl/Project.toml create mode 100644 test/project/Extensions/Parent.jl/src/Parent.jl diff --git a/test/loading.jl b/test/loading.jl index 1cc20548d9bc8..09f96e1f43578 100644 --- a/test/loading.jl +++ b/test/loading.jl @@ -1220,6 +1220,40 @@ end @test occursin("Hello x-package ext-to-ext!", String(read(cmd))) end + # Extensions for "parent" dependencies + # (i.e. an `ExtAB` where A depends on / loads B, but B provides the extension) + + mktempdir() do depot # Parallel pre-compilation + code = """ + Base.disable_parallel_precompile = false + using Parent + Base.get_extension(getfield(Parent, :DepWithParentExt), :ParentExt) isa Module || error("expected extension to load") + Parent.greet() + """ + proj = joinpath(@__DIR__, "project", "Extensions", "Parent.jl") + cmd = `$(Base.julia_cmd()) --startup-file=no -e $code` + cmd = addenv(cmd, + "JULIA_LOAD_PATH" => proj, + "JULIA_DEPOT_PATH" => depot * Base.Filesystem.pathsep(), + ) + @test occursin("Hello parent!", String(read(cmd))) + end + mktempdir() do depot # Serial pre-compilation + code = """ + Base.disable_parallel_precompile = true + using Parent + Base.get_extension(getfield(Parent, :DepWithParentExt), :ParentExt) isa Module || error("expected extension to load") + Parent.greet() + """ + proj = joinpath(@__DIR__, "project", "Extensions", "Parent.jl") + cmd = `$(Base.julia_cmd()) --startup-file=no -e $code` + cmd = addenv(cmd, + "JULIA_LOAD_PATH" => proj, + "JULIA_DEPOT_PATH" => depot * Base.Filesystem.pathsep(), + ) + @test occursin("Hello parent!", String(read(cmd))) + end + finally try rm(depot_path, force=true, recursive=true) diff --git a/test/project/Extensions/DepWithParentExt.jl/Project.toml b/test/project/Extensions/DepWithParentExt.jl/Project.toml new file mode 100644 index 0000000000000..bc487252ced4e --- /dev/null +++ b/test/project/Extensions/DepWithParentExt.jl/Project.toml @@ -0,0 +1,9 @@ +name = "DepWithParentExt" +uuid = "8a35c396-5ffc-40d2-b7ec-e8ed2248da32" +version = "0.1.0" + +[weakdeps] +Parent = "58cecb9c-f68a-426e-b92a-89d456ae7acc" + +[extensions] +ParentExt = "Parent" diff --git a/test/project/Extensions/DepWithParentExt.jl/ext/ParentExt.jl b/test/project/Extensions/DepWithParentExt.jl/ext/ParentExt.jl new file mode 100644 index 0000000000000..56176d2f5921d --- /dev/null +++ b/test/project/Extensions/DepWithParentExt.jl/ext/ParentExt.jl @@ -0,0 +1,6 @@ +module ParentExt + +using Parent +using DepWithParentExt + +end diff --git a/test/project/Extensions/DepWithParentExt.jl/src/DepWithParentExt.jl b/test/project/Extensions/DepWithParentExt.jl/src/DepWithParentExt.jl new file mode 100644 index 0000000000000..3d4ebc4ebf8a0 --- /dev/null +++ b/test/project/Extensions/DepWithParentExt.jl/src/DepWithParentExt.jl @@ -0,0 +1,5 @@ +module DepWithParentExt + +greet() = print("Hello dep w/ ext for parent dep!") + +end # module DepWithParentExt diff --git a/test/project/Extensions/Parent.jl/Manifest.toml b/test/project/Extensions/Parent.jl/Manifest.toml new file mode 100644 index 0000000000000..eb0c323ac36f5 --- /dev/null +++ b/test/project/Extensions/Parent.jl/Manifest.toml @@ -0,0 +1,20 @@ +# This file is machine-generated - editing it directly is not advised + +julia_version = "1.12.0-DEV" +manifest_format = "2.0" +project_hash = "b6ac643184d62cc94427c9aa665ff1fb63d66038" + +[[deps.DepWithParentExt]] +path = "../DepWithParentExt.jl" +uuid = "8a35c396-5ffc-40d2-b7ec-e8ed2248da32" +version = "0.1.0" +weakdeps = ["Parent"] + + [deps.DepWithParentExt.extensions] + ParentExt = "Parent" + +[[deps.Parent]] +deps = ["DepWithParentExt"] +path = "." +uuid = "58cecb9c-f68a-426e-b92a-89d456ae7acc" +version = "0.1.0" diff --git a/test/project/Extensions/Parent.jl/Project.toml b/test/project/Extensions/Parent.jl/Project.toml new file mode 100644 index 0000000000000..d62594cf15d3f --- /dev/null +++ b/test/project/Extensions/Parent.jl/Project.toml @@ -0,0 +1,7 @@ +name = "Parent" +uuid = "58cecb9c-f68a-426e-b92a-89d456ae7acc" +version = "0.1.0" +authors = ["Cody Tapscott "] + +[deps] +DepWithParentExt = "8a35c396-5ffc-40d2-b7ec-e8ed2248da32" diff --git a/test/project/Extensions/Parent.jl/src/Parent.jl b/test/project/Extensions/Parent.jl/src/Parent.jl new file mode 100644 index 0000000000000..471f4b13ecca3 --- /dev/null +++ b/test/project/Extensions/Parent.jl/src/Parent.jl @@ -0,0 +1,7 @@ +module Parent + +using DepWithParentExt + +greet() = print("Hello parent!") + +end # module Parent From 06f851903a85eecaa06ad1caf59f4a07af1c9d54 Mon Sep 17 00:00:00 2001 From: Cody Tapscott Date: Sun, 24 Nov 2024 12:11:06 +0000 Subject: [PATCH 497/537] Prevent pre-compilation target package from triggering extensions It is possible for an extension `ExtAB` to be loadable by one of its triggers, e.g. if A loads B. However this loading is only supposed to happen after loading for A is finished, so it shouldn't be included as part of pre-compiling A. Getting this wrong means disagreeing with the scheduled pre-compile jobs (A is not scheduled to depend on or generate a cache file for ExtAB but accidentally does both) and leads to confusing errors about missing cache files. To avoid trying to use / generate a cache file for ExtAB while still pre-compiling A, this change tracks the package being currently pre- compiled so that its extension triggers can be ignored. --- base/loading.jl | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/base/loading.jl b/base/loading.jl index ae54ba19038e9..0a70564077692 100644 --- a/base/loading.jl +++ b/base/loading.jl @@ -1433,6 +1433,7 @@ function run_module_init(mod::Module, i::Int=1) end function run_package_callbacks(modkey::PkgId) + @assert modkey != precompilation_target run_extension_callbacks(modkey) assert_havelock(require_lock) unlock(require_lock) @@ -1562,7 +1563,7 @@ function _insert_extension_triggers(parent::PkgId, extensions::Dict{String, Any} uuid_trigger = UUID(totaldeps[trigger]::String) trigger_id = PkgId(uuid_trigger, trigger) push!(trigger_ids, trigger_id) - if !haskey(Base.loaded_modules, trigger_id) || haskey(package_locks, trigger_id) + if !haskey(Base.loaded_modules, trigger_id) || haskey(package_locks, trigger_id) || (trigger_id == precompilation_target) trigger1 = get!(Vector{ExtensionId}, EXT_DORMITORY, trigger_id) push!(trigger1, gid) else @@ -1575,6 +1576,7 @@ end loading_extension::Bool = false loadable_extensions::Union{Nothing,Vector{PkgId}} = nothing precompiling_extension::Bool = false +precompilation_target::Union{Nothing,PkgId} = nothing function run_extension_callbacks(extid::ExtensionId) assert_havelock(require_lock) succeeded = try @@ -3081,6 +3083,7 @@ function create_expr_cache(pkg::PkgId, input::String, output::String, output_o:: Base.track_nested_precomp($(_pkg_str(vcat(Base.precompilation_stack, pkg)))) Base.loadable_extensions = $(_pkg_str(loadable_exts)) Base.precompiling_extension = $(loading_extension) + Base.precompilation_target = $(_pkg_str(pkg)) Base.include_package_for_output($(_pkg_str(pkg)), $(repr(abspath(input))), $(repr(depot_path)), $(repr(dl_load_path)), $(repr(load_path)), $(_pkg_str(concrete_deps)), $(repr(source_path(nothing)))) """) From fa1895126543d5bb9dbd7183a2dfb3bf3aef6454 Mon Sep 17 00:00:00 2001 From: Shuhei Kadowaki <40514306+aviatesk@users.noreply.github.com> Date: Sun, 24 Nov 2024 22:12:08 +0900 Subject: [PATCH 498/537] make EAUtils.jl loadable even if `Main.EscapeAnalysis` isn't defined (#56665) --- Compiler/test/EAUtils.jl | 3 +-- doc/src/devdocs/EscapeAnalysis.md | 3 ++- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Compiler/test/EAUtils.jl b/Compiler/test/EAUtils.jl index 5a5c42fc89106..f124aea2544fd 100644 --- a/Compiler/test/EAUtils.jl +++ b/Compiler/test/EAUtils.jl @@ -4,8 +4,7 @@ export code_escapes, @code_escapes, __clear_cache! include("setup_Compiler.jl") -using ..EscapeAnalysis -const EA = EscapeAnalysis +using .Compiler: EscapeAnalysis as EA # AbstractInterpreter # ------------------- diff --git a/doc/src/devdocs/EscapeAnalysis.md b/doc/src/devdocs/EscapeAnalysis.md index 484af9c2780f2..d8efd759fa131 100644 --- a/doc/src/devdocs/EscapeAnalysis.md +++ b/doc/src/devdocs/EscapeAnalysis.md @@ -20,7 +20,8 @@ This escape analysis aims to: You can give a try to the escape analysis by loading the `EAUtils.jl` utility script that defines the convenience entries `code_escapes` and `@code_escapes` for testing and debugging purposes: ```@repl EAUtils -using Base.Compiler: EscapeAnalysis # or `using Compiler: EscapeAnalysis` to use the stdlib version +# InteractiveUtils.@activate Compiler # to use the stdlib version of the Compiler + let JULIA_DIR = normpath(Sys.BINDIR, "..", "share", "julia") include(normpath(JULIA_DIR, "Compiler", "test", "EAUtils.jl")) using .EAUtils From 377643f9848cf88b79e7b24d1fce852eccec5e43 Mon Sep 17 00:00:00 2001 From: Jakob Peters Date: Sun, 24 Nov 2024 08:27:25 -0800 Subject: [PATCH 499/537] Highlight circular references (#56663) The text `"#= circular reference @-$d =#"` is printed yellow. Adds a test with the context `:color => true`. --- base/show.jl | 2 +- test/show.jl | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/base/show.jl b/base/show.jl index e6c2367e438b3..23957d6e29b2d 100644 --- a/base/show.jl +++ b/base/show.jl @@ -443,7 +443,7 @@ function show_circular(io::IOContext, @nospecialize(x)) for (k, v) in io.dict if k === :SHOWN_SET if v === x - print(io, "#= circular reference @-$d =#") + printstyled(io, "#= circular reference @-$d =#"; color = :yellow) return true end d += 1 diff --git a/test/show.jl b/test/show.jl index de5cf32b726ee..07916c249d533 100644 --- a/test/show.jl +++ b/test/show.jl @@ -1275,6 +1275,7 @@ let x = [], y = [], z = Base.ImmutableDict(x => y) push!(y, x) push!(y, z) @test replstr(x) == "1-element Vector{Any}:\n Any[Any[#= circular reference @-2 =#], Base.ImmutableDict{Vector{Any}, Vector{Any}}([#= circular reference @-3 =#] => [#= circular reference @-2 =#])]" + @test replstr(x, :color => true) == "1-element Vector{Any}:\n Any[Any[\e[33m#= circular reference @-2 =#\e[39m], Base.ImmutableDict{Vector{Any}, Vector{Any}}([\e[33m#= circular reference @-3 =#\e[39m] => [\e[33m#= circular reference @-2 =#\e[39m])]" @test repr(z) == "Base.ImmutableDict{Vector{Any}, Vector{Any}}([Any[Any[#= circular reference @-2 =#], Base.ImmutableDict{Vector{Any}, Vector{Any}}(#= circular reference @-3 =#)]] => [Any[Any[#= circular reference @-2 =#]], Base.ImmutableDict{Vector{Any}, Vector{Any}}(#= circular reference @-2 =#)])" @test sprint(dump, x) == """ Array{Any}((1,)) From f892a9e428b817c4e835647926f17d92d2429f87 Mon Sep 17 00:00:00 2001 From: Lilith Orion Hafner Date: Sun, 24 Nov 2024 12:03:06 -0600 Subject: [PATCH 500/537] Fix typo in nextfloat and prevfloat docs (#56670) --- base/float.jl | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/base/float.jl b/base/float.jl index 90a5d8b1c66f4..c7230459d0822 100644 --- a/base/float.jl +++ b/base/float.jl @@ -924,8 +924,8 @@ end """ nextfloat(x::AbstractFloat) -Return the smallest floating point number `y` of the same type as `x` such `x < y`. If no -such `y` exists (e.g. if `x` is `Inf` or `NaN`), then return `x`. +Return the smallest floating point number `y` of the same type as `x` such that `x < y`. +If no such `y` exists (e.g. if `x` is `Inf` or `NaN`), then return `x`. See also: [`prevfloat`](@ref), [`eps`](@ref), [`issubnormal`](@ref). """ @@ -942,8 +942,8 @@ prevfloat(x::AbstractFloat, d::Integer) = nextfloat(x, -d) """ prevfloat(x::AbstractFloat) -Return the largest floating point number `y` of the same type as `x` such `y < x`. If no -such `y` exists (e.g. if `x` is `-Inf` or `NaN`), then return `x`. +Return the largest floating point number `y` of the same type as `x` such that `y < x`. +If no such `y` exists (e.g. if `x` is `-Inf` or `NaN`), then return `x`. """ prevfloat(x::AbstractFloat) = nextfloat(x,-1) From e5e7be25b07252fb7a9eba42b05ad8a49753339a Mon Sep 17 00:00:00 2001 From: Kristoffer Carlsson Date: Sun, 24 Nov 2024 19:44:57 +0100 Subject: [PATCH 501/537] move out LinearAlgebra into its own repository (#56637) This moves out LinearAlgebra into its own repo https://github.com/JuliaLang/LinearAlgebra.jl. This repo is still a bit bare (README needs to be added) but it has CI set up to run on buildkite (https://buildkite.com/julialang/linearalgebra-dot-jl/builds/18) and doc building on GHA. The external repo has all commits up to https://github.com/JuliaLang/julia/commit/4709b6c48e79f6226e6dbee1b49bf7e563058ff7 included in it. The reason for the move is to be able to focus issues and PRs and development regarding LinearAlgebra in one place. --- .../md5 | 1 + .../sha512 | 1 + julia.spdx.json | 12 + stdlib/.gitignore | 2 + stdlib/LinearAlgebra.version | 4 + stdlib/LinearAlgebra/Project.toml | 15 - stdlib/LinearAlgebra/docs/src/index.md | 903 --- stdlib/LinearAlgebra/src/LinearAlgebra.jl | 843 -- stdlib/LinearAlgebra/src/abstractq.jl | 642 -- stdlib/LinearAlgebra/src/adjtrans.jl | 524 -- stdlib/LinearAlgebra/src/bidiag.jl | 1489 ---- stdlib/LinearAlgebra/src/bitarray.jl | 272 - stdlib/LinearAlgebra/src/blas.jl | 2258 ------ stdlib/LinearAlgebra/src/bunchkaufman.jl | 1601 ---- stdlib/LinearAlgebra/src/cholesky.jl | 1038 --- stdlib/LinearAlgebra/src/dense.jl | 1885 ----- stdlib/LinearAlgebra/src/deprecated.jl | 7 - stdlib/LinearAlgebra/src/diagonal.jl | 1148 --- stdlib/LinearAlgebra/src/eigen.jl | 682 -- stdlib/LinearAlgebra/src/exceptions.jl | 76 - stdlib/LinearAlgebra/src/factorization.jl | 202 - stdlib/LinearAlgebra/src/generic.jl | 2093 ----- stdlib/LinearAlgebra/src/givens.jl | 429 - stdlib/LinearAlgebra/src/hessenberg.jl | 624 -- stdlib/LinearAlgebra/src/lapack.jl | 7218 ----------------- stdlib/LinearAlgebra/src/lbt.jl | 348 - stdlib/LinearAlgebra/src/ldlt.jl | 224 - stdlib/LinearAlgebra/src/lq.jl | 203 - stdlib/LinearAlgebra/src/lu.jl | 834 -- stdlib/LinearAlgebra/src/matmul.jl | 1339 --- stdlib/LinearAlgebra/src/qr.jl | 769 -- stdlib/LinearAlgebra/src/schur.jl | 449 - stdlib/LinearAlgebra/src/special.jl | 595 -- .../LinearAlgebra/src/structuredbroadcast.jl | 297 - stdlib/LinearAlgebra/src/svd.jl | 578 -- stdlib/LinearAlgebra/src/symmetric.jl | 1064 --- stdlib/LinearAlgebra/src/symmetriceigen.jl | 410 - stdlib/LinearAlgebra/src/transpose.jl | 257 - stdlib/LinearAlgebra/src/triangular.jl | 2990 ------- stdlib/LinearAlgebra/src/tridiag.jl | 1099 --- stdlib/LinearAlgebra/src/uniformscaling.jl | 448 - stdlib/LinearAlgebra/test/abstractq.jl | 156 - stdlib/LinearAlgebra/test/addmul.jl | 273 - stdlib/LinearAlgebra/test/adjtrans.jl | 721 -- stdlib/LinearAlgebra/test/ambiguous_exec.jl | 21 - stdlib/LinearAlgebra/test/bidiag.jl | 1141 --- stdlib/LinearAlgebra/test/blas.jl | 783 -- stdlib/LinearAlgebra/test/bunchkaufman.jl | 260 - stdlib/LinearAlgebra/test/cholesky.jl | 661 -- stdlib/LinearAlgebra/test/dense.jl | 1331 --- stdlib/LinearAlgebra/test/diagonal.jl | 1455 ---- stdlib/LinearAlgebra/test/eigen.jl | 282 - stdlib/LinearAlgebra/test/factorization.jl | 94 - stdlib/LinearAlgebra/test/generic.jl | 840 -- stdlib/LinearAlgebra/test/givens.jl | 124 - stdlib/LinearAlgebra/test/hessenberg.jl | 308 - stdlib/LinearAlgebra/test/lapack.jl | 902 -- stdlib/LinearAlgebra/test/ldlt.jl | 41 - stdlib/LinearAlgebra/test/lq.jl | 237 - stdlib/LinearAlgebra/test/lu.jl | 502 -- stdlib/LinearAlgebra/test/matmul.jl | 1151 --- stdlib/LinearAlgebra/test/pinv.jl | 186 - stdlib/LinearAlgebra/test/qr.jl | 543 -- stdlib/LinearAlgebra/test/runtests.jl | 10 - stdlib/LinearAlgebra/test/schur.jl | 221 - stdlib/LinearAlgebra/test/special.jl | 862 -- .../LinearAlgebra/test/structuredbroadcast.jl | 379 - stdlib/LinearAlgebra/test/svd.jl | 297 - stdlib/LinearAlgebra/test/symmetric.jl | 1181 --- stdlib/LinearAlgebra/test/symmetriceigen.jl | 187 - stdlib/LinearAlgebra/test/testgroups | 30 - stdlib/LinearAlgebra/test/testutils.jl | 27 - stdlib/LinearAlgebra/test/triangular.jl | 1419 ---- stdlib/LinearAlgebra/test/trickyarithmetic.jl | 66 - stdlib/LinearAlgebra/test/tridiag.jl | 1078 --- stdlib/LinearAlgebra/test/uniformscaling.jl | 577 -- stdlib/Makefile | 4 +- 77 files changed, 22 insertions(+), 54201 deletions(-) create mode 100644 deps/checksums/LinearAlgebra-56d561c22e1ab8e0421160edbdd42f3f194ecfa8.tar.gz/md5 create mode 100644 deps/checksums/LinearAlgebra-56d561c22e1ab8e0421160edbdd42f3f194ecfa8.tar.gz/sha512 create mode 100644 stdlib/LinearAlgebra.version delete mode 100644 stdlib/LinearAlgebra/Project.toml delete mode 100644 stdlib/LinearAlgebra/docs/src/index.md delete mode 100644 stdlib/LinearAlgebra/src/LinearAlgebra.jl delete mode 100644 stdlib/LinearAlgebra/src/abstractq.jl delete mode 100644 stdlib/LinearAlgebra/src/adjtrans.jl delete mode 100644 stdlib/LinearAlgebra/src/bidiag.jl delete mode 100644 stdlib/LinearAlgebra/src/bitarray.jl delete mode 100644 stdlib/LinearAlgebra/src/blas.jl delete mode 100644 stdlib/LinearAlgebra/src/bunchkaufman.jl delete mode 100644 stdlib/LinearAlgebra/src/cholesky.jl delete mode 100644 stdlib/LinearAlgebra/src/dense.jl delete mode 100644 stdlib/LinearAlgebra/src/deprecated.jl delete mode 100644 stdlib/LinearAlgebra/src/diagonal.jl delete mode 100644 stdlib/LinearAlgebra/src/eigen.jl delete mode 100644 stdlib/LinearAlgebra/src/exceptions.jl delete mode 100644 stdlib/LinearAlgebra/src/factorization.jl delete mode 100644 stdlib/LinearAlgebra/src/generic.jl delete mode 100644 stdlib/LinearAlgebra/src/givens.jl delete mode 100644 stdlib/LinearAlgebra/src/hessenberg.jl delete mode 100644 stdlib/LinearAlgebra/src/lapack.jl delete mode 100644 stdlib/LinearAlgebra/src/lbt.jl delete mode 100644 stdlib/LinearAlgebra/src/ldlt.jl delete mode 100644 stdlib/LinearAlgebra/src/lq.jl delete mode 100644 stdlib/LinearAlgebra/src/lu.jl delete mode 100644 stdlib/LinearAlgebra/src/matmul.jl delete mode 100644 stdlib/LinearAlgebra/src/qr.jl delete mode 100644 stdlib/LinearAlgebra/src/schur.jl delete mode 100644 stdlib/LinearAlgebra/src/special.jl delete mode 100644 stdlib/LinearAlgebra/src/structuredbroadcast.jl delete mode 100644 stdlib/LinearAlgebra/src/svd.jl delete mode 100644 stdlib/LinearAlgebra/src/symmetric.jl delete mode 100644 stdlib/LinearAlgebra/src/symmetriceigen.jl delete mode 100644 stdlib/LinearAlgebra/src/transpose.jl delete mode 100644 stdlib/LinearAlgebra/src/triangular.jl delete mode 100644 stdlib/LinearAlgebra/src/tridiag.jl delete mode 100644 stdlib/LinearAlgebra/src/uniformscaling.jl delete mode 100644 stdlib/LinearAlgebra/test/abstractq.jl delete mode 100644 stdlib/LinearAlgebra/test/addmul.jl delete mode 100644 stdlib/LinearAlgebra/test/adjtrans.jl delete mode 100644 stdlib/LinearAlgebra/test/ambiguous_exec.jl delete mode 100644 stdlib/LinearAlgebra/test/bidiag.jl delete mode 100644 stdlib/LinearAlgebra/test/blas.jl delete mode 100644 stdlib/LinearAlgebra/test/bunchkaufman.jl delete mode 100644 stdlib/LinearAlgebra/test/cholesky.jl delete mode 100644 stdlib/LinearAlgebra/test/dense.jl delete mode 100644 stdlib/LinearAlgebra/test/diagonal.jl delete mode 100644 stdlib/LinearAlgebra/test/eigen.jl delete mode 100644 stdlib/LinearAlgebra/test/factorization.jl delete mode 100644 stdlib/LinearAlgebra/test/generic.jl delete mode 100644 stdlib/LinearAlgebra/test/givens.jl delete mode 100644 stdlib/LinearAlgebra/test/hessenberg.jl delete mode 100644 stdlib/LinearAlgebra/test/lapack.jl delete mode 100644 stdlib/LinearAlgebra/test/ldlt.jl delete mode 100644 stdlib/LinearAlgebra/test/lq.jl delete mode 100644 stdlib/LinearAlgebra/test/lu.jl delete mode 100644 stdlib/LinearAlgebra/test/matmul.jl delete mode 100644 stdlib/LinearAlgebra/test/pinv.jl delete mode 100644 stdlib/LinearAlgebra/test/qr.jl delete mode 100644 stdlib/LinearAlgebra/test/runtests.jl delete mode 100644 stdlib/LinearAlgebra/test/schur.jl delete mode 100644 stdlib/LinearAlgebra/test/special.jl delete mode 100644 stdlib/LinearAlgebra/test/structuredbroadcast.jl delete mode 100644 stdlib/LinearAlgebra/test/svd.jl delete mode 100644 stdlib/LinearAlgebra/test/symmetric.jl delete mode 100644 stdlib/LinearAlgebra/test/symmetriceigen.jl delete mode 100644 stdlib/LinearAlgebra/test/testgroups delete mode 100644 stdlib/LinearAlgebra/test/testutils.jl delete mode 100644 stdlib/LinearAlgebra/test/triangular.jl delete mode 100644 stdlib/LinearAlgebra/test/trickyarithmetic.jl delete mode 100644 stdlib/LinearAlgebra/test/tridiag.jl delete mode 100644 stdlib/LinearAlgebra/test/uniformscaling.jl diff --git a/deps/checksums/LinearAlgebra-56d561c22e1ab8e0421160edbdd42f3f194ecfa8.tar.gz/md5 b/deps/checksums/LinearAlgebra-56d561c22e1ab8e0421160edbdd42f3f194ecfa8.tar.gz/md5 new file mode 100644 index 0000000000000..e240a1083833c --- /dev/null +++ b/deps/checksums/LinearAlgebra-56d561c22e1ab8e0421160edbdd42f3f194ecfa8.tar.gz/md5 @@ -0,0 +1 @@ +00198e6d92d033fb33e75cf4eac34dca diff --git a/deps/checksums/LinearAlgebra-56d561c22e1ab8e0421160edbdd42f3f194ecfa8.tar.gz/sha512 b/deps/checksums/LinearAlgebra-56d561c22e1ab8e0421160edbdd42f3f194ecfa8.tar.gz/sha512 new file mode 100644 index 0000000000000..5eeceaf1dbed3 --- /dev/null +++ b/deps/checksums/LinearAlgebra-56d561c22e1ab8e0421160edbdd42f3f194ecfa8.tar.gz/sha512 @@ -0,0 +1 @@ +ba4b390d99644c31d64594352da888e9ef18021cc9b7700c37a6cdb0c1ff2532eb208ecaccf93217e3183e1db8e6c089456fa5d93633b8ff037e8796199934e7 diff --git a/julia.spdx.json b/julia.spdx.json index 63683dd302a39..0e0067f00efb1 100644 --- a/julia.spdx.json +++ b/julia.spdx.json @@ -86,6 +86,18 @@ "copyrightText": "Copyright (c) 2020 Stefan Karpinski and contributors", "summary": "ArgTools provides tools for creating consistent, flexible APIs that work with various kinds of function arguments." }, + { + "name": "LinearAlgebra.jl", + "SPDXID": "SPDXRef-JuliaLinearAlgebra", + "downloadLocation": "git+https://github.com/JuliaLang/LinearAlgebra.jl.git", + "filesAnalyzed": false, + "homepage": "https://juliastats.org", + "sourceInfo": "The git hash of the version in use can be found in the file stdlib/LinearAlgebra.version", + "licenseConcluded": "MIT", + "licenseDeclared": "MIT", + "copyrightText": "Copyright (c) 2009-2024: Jeff Bezanson, Stefan Karpinski, Viral B. Shah, and other contributors: https://github.com/JuliaLang/julia/contributors", + "summary": "Development repository for the LinearAlgebra standard library (stdlib) that ships with Julia." + }, { "name": "Tar.jl", "SPDXID": "SPDXRef-JuliaTar", diff --git a/stdlib/.gitignore b/stdlib/.gitignore index 93668857189af..5996091c5a0ef 100644 --- a/stdlib/.gitignore +++ b/stdlib/.gitignore @@ -29,6 +29,8 @@ /StyledStrings /JuliaSyntaxHighlighting-* /JuliaSyntaxHighlighting +/LinearAlgebra-* +/LinearAlgebra /*_jll/StdlibArtifacts.toml /*/Manifest.toml /*.image diff --git a/stdlib/LinearAlgebra.version b/stdlib/LinearAlgebra.version new file mode 100644 index 0000000000000..d6a33ea421adf --- /dev/null +++ b/stdlib/LinearAlgebra.version @@ -0,0 +1,4 @@ +LINEARALGEBRA_BRANCH = master +LINEARALGEBRA_SHA1 = 56d561c22e1ab8e0421160edbdd42f3f194ecfa8 +LINEARALGEBRA_GIT_URL := https://github.com/JuliaLang/LinearAlgebra.jl.git +LINEARALGEBRA_TAR_URL = https://api.github.com/repos/JuliaLang/LinearAlgebra.jl/tarball/$1 diff --git a/stdlib/LinearAlgebra/Project.toml b/stdlib/LinearAlgebra/Project.toml deleted file mode 100644 index 892de0397c219..0000000000000 --- a/stdlib/LinearAlgebra/Project.toml +++ /dev/null @@ -1,15 +0,0 @@ -name = "LinearAlgebra" -uuid = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" -version = "1.11.0" - -[deps] -Libdl = "8f399da3-3557-5675-b5ff-fb832c97cbdb" -libblastrampoline_jll = "8e850b90-86db-534c-a0d3-1478176c7d93" -OpenBLAS_jll = "4536629a-c528-5b80-bd46-f80d51c5b363" - -[extras] -Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" -Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" - -[targets] -test = ["Test", "Random"] diff --git a/stdlib/LinearAlgebra/docs/src/index.md b/stdlib/LinearAlgebra/docs/src/index.md deleted file mode 100644 index 3e18a45752aeb..0000000000000 --- a/stdlib/LinearAlgebra/docs/src/index.md +++ /dev/null @@ -1,903 +0,0 @@ -```@meta -EditURL = "https://github.com/JuliaLang/julia/blob/master/stdlib/LinearAlgebra/docs/src/index.md" -``` - -# [Linear Algebra](@id man-linalg) - -```@meta -DocTestSetup = :(using LinearAlgebra) -``` - -In addition to (and as part of) its support for multi-dimensional arrays, Julia provides native implementations -of many common and useful linear algebra operations which can be loaded with `using LinearAlgebra`. Basic operations, such as [`tr`](@ref), [`det`](@ref), -and [`inv`](@ref) are all supported: - -```jldoctest -julia> A = [1 2 3; 4 1 6; 7 8 1] -3×3 Matrix{Int64}: - 1 2 3 - 4 1 6 - 7 8 1 - -julia> tr(A) -3 - -julia> det(A) -104.0 - -julia> inv(A) -3×3 Matrix{Float64}: - -0.451923 0.211538 0.0865385 - 0.365385 -0.192308 0.0576923 - 0.240385 0.0576923 -0.0673077 -``` - -As well as other useful operations, such as finding eigenvalues or eigenvectors: - -```jldoctest -julia> A = [-4. -17.; 2. 2.] -2×2 Matrix{Float64}: - -4.0 -17.0 - 2.0 2.0 - -julia> eigvals(A) -2-element Vector{ComplexF64}: - -1.0 - 5.0im - -1.0 + 5.0im - -julia> eigvecs(A) -2×2 Matrix{ComplexF64}: - 0.945905-0.0im 0.945905+0.0im - -0.166924+0.278207im -0.166924-0.278207im -``` - -In addition, Julia provides many [factorizations](@ref man-linalg-factorizations) which can be used to -speed up problems such as linear solve or matrix exponentiation by pre-factorizing a matrix into a form -more amenable (for performance or memory reasons) to the problem. See the documentation on [`factorize`](@ref) -for more information. As an example: - -```jldoctest -julia> A = [1.5 2 -4; 3 -1 -6; -10 2.3 4] -3×3 Matrix{Float64}: - 1.5 2.0 -4.0 - 3.0 -1.0 -6.0 - -10.0 2.3 4.0 - -julia> factorize(A) -LU{Float64, Matrix{Float64}, Vector{Int64}} -L factor: -3×3 Matrix{Float64}: - 1.0 0.0 0.0 - -0.15 1.0 0.0 - -0.3 -0.132196 1.0 -U factor: -3×3 Matrix{Float64}: - -10.0 2.3 4.0 - 0.0 2.345 -3.4 - 0.0 0.0 -5.24947 -``` - -Since `A` is not Hermitian, symmetric, triangular, tridiagonal, or bidiagonal, an LU factorization may be the -best we can do. Compare with: - -```jldoctest -julia> B = [1.5 2 -4; 2 -1 -3; -4 -3 5] -3×3 Matrix{Float64}: - 1.5 2.0 -4.0 - 2.0 -1.0 -3.0 - -4.0 -3.0 5.0 - -julia> factorize(B) -BunchKaufman{Float64, Matrix{Float64}, Vector{Int64}} -D factor: -3×3 Tridiagonal{Float64, Vector{Float64}}: - -1.64286 0.0 ⋅ - 0.0 -2.8 0.0 - ⋅ 0.0 5.0 -U factor: -3×3 UnitUpperTriangular{Float64, Matrix{Float64}}: - 1.0 0.142857 -0.8 - ⋅ 1.0 -0.6 - ⋅ ⋅ 1.0 -permutation: -3-element Vector{Int64}: - 1 - 2 - 3 -``` - -Here, Julia was able to detect that `B` is in fact symmetric, and used a more appropriate factorization. -Often it's possible to write more efficient code for a matrix that is known to have certain properties e.g. -it is symmetric, or tridiagonal. Julia provides some special types so that you can "tag" matrices as having -these properties. For instance: - -```jldoctest -julia> B = [1.5 2 -4; 2 -1 -3; -4 -3 5] -3×3 Matrix{Float64}: - 1.5 2.0 -4.0 - 2.0 -1.0 -3.0 - -4.0 -3.0 5.0 - -julia> sB = Symmetric(B) -3×3 Symmetric{Float64, Matrix{Float64}}: - 1.5 2.0 -4.0 - 2.0 -1.0 -3.0 - -4.0 -3.0 5.0 -``` - -`sB` has been tagged as a matrix that's (real) symmetric, so for later operations we might perform on it, -such as eigenfactorization or computing matrix-vector products, efficiencies can be found by only referencing -half of it. For example: - -```jldoctest -julia> B = [1.5 2 -4; 2 -1 -3; -4 -3 5] -3×3 Matrix{Float64}: - 1.5 2.0 -4.0 - 2.0 -1.0 -3.0 - -4.0 -3.0 5.0 - -julia> sB = Symmetric(B) -3×3 Symmetric{Float64, Matrix{Float64}}: - 1.5 2.0 -4.0 - 2.0 -1.0 -3.0 - -4.0 -3.0 5.0 - -julia> x = [1; 2; 3] -3-element Vector{Int64}: - 1 - 2 - 3 - -julia> sB\x -3-element Vector{Float64}: - -1.7391304347826084 - -1.1086956521739126 - -1.4565217391304346 -``` - -The `\` operation here performs the linear solution. The left-division operator is pretty -powerful and it's easy to write compact, readable code that is flexible enough to solve all -sorts of systems of linear equations. - -## Special matrices - -[Matrices with special symmetries and structures](https://www2.imm.dtu.dk/pubdb/views/publication_details.php?id=3274) -arise often in linear algebra and are frequently associated with various matrix factorizations. -Julia features a rich collection of special matrix types, which allow for fast computation with -specialized routines that are specially developed for particular matrix types. - -The following tables summarize the types of special matrices that have been implemented in Julia, -as well as whether hooks to various optimized methods for them in LAPACK are available. - -| Type | Description | -|:----------------------------- |:--------------------------------------------------------------------------------------------- | -| [`Symmetric`](@ref) | [Symmetric matrix](https://en.wikipedia.org/wiki/Symmetric_matrix) | -| [`Hermitian`](@ref) | [Hermitian matrix](https://en.wikipedia.org/wiki/Hermitian_matrix) | -| [`UpperTriangular`](@ref) | Upper [triangular matrix](https://en.wikipedia.org/wiki/Triangular_matrix) | -| [`UnitUpperTriangular`](@ref) | Upper [triangular matrix](https://en.wikipedia.org/wiki/Triangular_matrix) with unit diagonal | -| [`LowerTriangular`](@ref) | Lower [triangular matrix](https://en.wikipedia.org/wiki/Triangular_matrix) | | -| [`UnitLowerTriangular`](@ref) | Lower [triangular matrix](https://en.wikipedia.org/wiki/Triangular_matrix) with unit diagonal | -| [`UpperHessenberg`](@ref) | Upper [Hessenberg matrix](https://en.wikipedia.org/wiki/Hessenberg_matrix) -| [`Tridiagonal`](@ref) | [Tridiagonal matrix](https://en.wikipedia.org/wiki/Tridiagonal_matrix) | -| [`SymTridiagonal`](@ref) | Symmetric tridiagonal matrix | -| [`Bidiagonal`](@ref) | Upper/lower [bidiagonal matrix](https://en.wikipedia.org/wiki/Bidiagonal_matrix) | -| [`Diagonal`](@ref) | [Diagonal matrix](https://en.wikipedia.org/wiki/Diagonal_matrix) | -| [`UniformScaling`](@ref) | [Uniform scaling operator](https://en.wikipedia.org/wiki/Uniform_scaling) | - -### Elementary operations - -| Matrix type | `+` | `-` | `*` | `\` | Other functions with optimized methods | -|:----------------------------- |:--- |:--- |:--- |:--- |:----------------------------------------------------------- | -| [`Symmetric`](@ref) | | | | MV | [`inv`](@ref), [`sqrt`](@ref), [`cbrt`](@ref), [`exp`](@ref) | -| [`Hermitian`](@ref) | | | | MV | [`inv`](@ref), [`sqrt`](@ref), [`cbrt`](@ref), [`exp`](@ref) | -| [`UpperTriangular`](@ref) | | | MV | MV | [`inv`](@ref), [`det`](@ref), [`logdet`](@ref) | -| [`UnitUpperTriangular`](@ref) | | | MV | MV | [`inv`](@ref), [`det`](@ref), [`logdet`](@ref) | -| [`LowerTriangular`](@ref) | | | MV | MV | [`inv`](@ref), [`det`](@ref), [`logdet`](@ref) | -| [`UnitLowerTriangular`](@ref) | | | MV | MV | [`inv`](@ref), [`det`](@ref), [`logdet`](@ref) | -| [`UpperHessenberg`](@ref) | | | | MM | [`inv`](@ref), [`det`](@ref) | -| [`SymTridiagonal`](@ref) | M | M | MS | MV | [`eigmax`](@ref), [`eigmin`](@ref) | -| [`Tridiagonal`](@ref) | M | M | MS | MV | | -| [`Bidiagonal`](@ref) | M | M | MS | MV | | -| [`Diagonal`](@ref) | M | M | MV | MV | [`inv`](@ref), [`det`](@ref), [`logdet`](@ref), [`/`](@ref) | -| [`UniformScaling`](@ref) | M | M | MVS | MVS | [`/`](@ref) | - -Legend: - -| Key | Description | -|:---------- |:------------------------------------------------------------- | -| M (matrix) | An optimized method for matrix-matrix operations is available | -| V (vector) | An optimized method for matrix-vector operations is available | -| S (scalar) | An optimized method for matrix-scalar operations is available | - -### Matrix factorizations - -| Matrix type | LAPACK | [`eigen`](@ref) | [`eigvals`](@ref) | [`eigvecs`](@ref) | [`svd`](@ref) | [`svdvals`](@ref) | -|:----------------------------- |:------ |:------------- |:----------------- |:----------------- |:------------- |:----------------- | -| [`Symmetric`](@ref) | SY | | ARI | | | | -| [`Hermitian`](@ref) | HE | | ARI | | | | -| [`UpperTriangular`](@ref) | TR | A | A | A | | | -| [`UnitUpperTriangular`](@ref) | TR | A | A | A | | | -| [`LowerTriangular`](@ref) | TR | A | A | A | | | -| [`UnitLowerTriangular`](@ref) | TR | A | A | A | | | -| [`SymTridiagonal`](@ref) | ST | A | ARI | AV | | | -| [`Tridiagonal`](@ref) | GT | | | | | | -| [`Bidiagonal`](@ref) | BD | | | | A | A | -| [`Diagonal`](@ref) | DI | | A | | | | - -Legend: - -| Key | Description | Example | -|:------------ |:------------------------------------------------------------------------------------------------------------------------------- |:-------------------- | -| A (all) | An optimized method to find all the characteristic values and/or vectors is available | e.g. `eigvals(M)` | -| R (range) | An optimized method to find the `il`th through the `ih`th characteristic values are available | `eigvals(M, il, ih)` | -| I (interval) | An optimized method to find the characteristic values in the interval [`vl`, `vh`] is available | `eigvals(M, vl, vh)` | -| V (vectors) | An optimized method to find the characteristic vectors corresponding to the characteristic values `x=[x1, x2,...]` is available | `eigvecs(M, x)` | - -### The uniform scaling operator - -A [`UniformScaling`](@ref) operator represents a scalar times the identity operator, `λ*I`. The identity -operator `I` is defined as a constant and is an instance of `UniformScaling`. The size of these -operators are generic and match the other matrix in the binary operations [`+`](@ref), [`-`](@ref), -[`*`](@ref) and [`\`](@ref). For `A+I` and `A-I` this means that `A` must be square. Multiplication -with the identity operator `I` is a noop (except for checking that the scaling factor is one) -and therefore almost without overhead. - -To see the `UniformScaling` operator in action: - -```jldoctest -julia> U = UniformScaling(2); - -julia> a = [1 2; 3 4] -2×2 Matrix{Int64}: - 1 2 - 3 4 - -julia> a + U -2×2 Matrix{Int64}: - 3 2 - 3 6 - -julia> a * U -2×2 Matrix{Int64}: - 2 4 - 6 8 - -julia> [a U] -2×4 Matrix{Int64}: - 1 2 2 0 - 3 4 0 2 - -julia> b = [1 2 3; 4 5 6] -2×3 Matrix{Int64}: - 1 2 3 - 4 5 6 - -julia> b - U -ERROR: DimensionMismatch: matrix is not square: dimensions are (2, 3) -Stacktrace: -[...] -``` - -If you need to solve many systems of the form `(A+μI)x = b` for the same `A` and different `μ`, it might be beneficial -to first compute the Hessenberg factorization `F` of `A` via the [`hessenberg`](@ref) function. -Given `F`, Julia employs an efficient algorithm for `(F+μ*I) \ b` (equivalent to `(A+μ*I)x \ b`) and related -operations like determinants. - -## [Matrix factorizations](@id man-linalg-factorizations) - -[Matrix factorizations (a.k.a. matrix decompositions)](https://en.wikipedia.org/wiki/Matrix_decomposition) -compute the factorization of a matrix into a product of matrices, and are one of the central concepts -in (numerical) linear algebra. - -The following table summarizes the types of matrix factorizations that have been implemented in -Julia. Details of their associated methods can be found in the [Standard functions](@ref) section -of the Linear Algebra documentation. - -| Type | Description | -|:------------------ |:-------------------------------------------------------------------------------------------------------------- | -| `BunchKaufman` | Bunch-Kaufman factorization | -| `Cholesky` | [Cholesky factorization](https://en.wikipedia.org/wiki/Cholesky_decomposition) | -| `CholeskyPivoted` | [Pivoted](https://en.wikipedia.org/wiki/Pivot_element) Cholesky factorization | -| `LDLt` | [LDL(T) factorization](https://en.wikipedia.org/wiki/Cholesky_decomposition#LDL_decomposition) | -| `LU` | [LU factorization](https://en.wikipedia.org/wiki/LU_decomposition) | -| `QR` | [QR factorization](https://en.wikipedia.org/wiki/QR_decomposition) | -| `QRCompactWY` | Compact WY form of the QR factorization | -| `QRPivoted` | Pivoted [QR factorization](https://en.wikipedia.org/wiki/QR_decomposition) | -| `LQ` | [QR factorization](https://en.wikipedia.org/wiki/QR_decomposition) of `transpose(A)` | -| `Hessenberg` | [Hessenberg decomposition](https://mathworld.wolfram.com/HessenbergDecomposition.html) | -| `Eigen` | [Spectral decomposition](https://en.wikipedia.org/wiki/Eigendecomposition_of_a_matrix) | -| `GeneralizedEigen` | [Generalized spectral decomposition](https://en.wikipedia.org/wiki/Eigendecomposition_of_a_matrix#Generalized_eigenvalue_problem) | -| `SVD` | [Singular value decomposition](https://en.wikipedia.org/wiki/Singular_value_decomposition) | -| `GeneralizedSVD` | [Generalized SVD](https://en.wikipedia.org/wiki/Generalized_singular_value_decomposition#Higher_order_version) | -| `Schur` | [Schur decomposition](https://en.wikipedia.org/wiki/Schur_decomposition) | -| `GeneralizedSchur` | [Generalized Schur decomposition](https://en.wikipedia.org/wiki/Schur_decomposition#Generalized_Schur_decomposition) | - -Adjoints and transposes of [`Factorization`](@ref) objects are lazily wrapped in -`AdjointFactorization` and `TransposeFactorization` objects, respectively. Generically, -transpose of real `Factorization`s are wrapped as `AdjointFactorization`. - -## [Orthogonal matrices (`AbstractQ`)](@id man-linalg-abstractq) - -Some matrix factorizations generate orthogonal/unitary "matrix" factors. These -factorizations include QR-related factorizations obtained from calls to [`qr`](@ref), i.e., -`QR`, `QRCompactWY` and `QRPivoted`, the Hessenberg factorization obtained from calls to -[`hessenberg`](@ref), and the LQ factorization obtained from [`lq`](@ref). While these -orthogonal/unitary factors admit a matrix representation, their internal representation -is, for performance and memory reasons, different. Hence, they should be rather viewed as -matrix-backed, function-based linear operators. In particular, reading, for instance, a -column of its matrix representation requires running "matrix"-vector multiplication code, -rather than simply reading out data from memory (possibly filling parts of the vector with -structural zeros). Another clear distinction from other, non-triangular matrix types is -that the underlying multiplication code allows for in-place modification during multiplication. -Furthermore, objects of specific `AbstractQ` subtypes as those created via [`qr`](@ref), -[`hessenberg`](@ref) and [`lq`](@ref) can behave like a square or a rectangular matrix -depending on context: - -```julia -julia> using LinearAlgebra - -julia> Q = qr(rand(3,2)).Q -3×3 LinearAlgebra.QRCompactWYQ{Float64, Matrix{Float64}, Matrix{Float64}} - -julia> Matrix(Q) -3×2 Matrix{Float64}: - -0.320597 0.865734 - -0.765834 -0.475694 - -0.557419 0.155628 - -julia> Q*I -3×3 Matrix{Float64}: - -0.320597 0.865734 -0.384346 - -0.765834 -0.475694 -0.432683 - -0.557419 0.155628 0.815514 - -julia> Q*ones(2) -3-element Vector{Float64}: - 0.5451367118802273 - -1.241527373086654 - -0.40179067589600226 - -julia> Q*ones(3) -3-element Vector{Float64}: - 0.16079054743832022 - -1.674209978965636 - 0.41372375588835797 - -julia> ones(1,2) * Q' -1×3 Matrix{Float64}: - 0.545137 -1.24153 -0.401791 - -julia> ones(1,3) * Q' -1×3 Matrix{Float64}: - 0.160791 -1.67421 0.413724 -``` - -Due to this distinction from dense or structured matrices, the abstract `AbstractQ` type -does not subtype `AbstractMatrix`, but instead has its own type hierarchy. Custom types -that subtype `AbstractQ` can rely on generic fallbacks if the following interface is satisfied. -For example, for - -```julia -struct MyQ{T} <: LinearAlgebra.AbstractQ{T} - # required fields -end -``` - -provide overloads for - -```julia -Base.size(Q::MyQ) # size of corresponding square matrix representation -Base.convert(::Type{AbstractQ{T}}, Q::MyQ) # eltype promotion [optional] -LinearAlgebra.lmul!(Q::MyQ, x::AbstractVecOrMat) # left-multiplication -LinearAlgebra.rmul!(A::AbstractMatrix, Q::MyQ) # right-multiplication -``` - -If `eltype` promotion is not of interest, the `convert` method is unnecessary, since by -default `convert(::Type{AbstractQ{T}}, Q::AbstractQ{T})` returns `Q` itself. -Adjoints of `AbstractQ`-typed objects are lazily wrapped in an `AdjointQ` wrapper type, -which requires its own `LinearAlgebra.lmul!` and `LinearAlgebra.rmul!` methods. Given this -set of methods, any `Q::MyQ` can be used like a matrix, preferably in a multiplicative -context: multiplication via `*` with scalars, vectors and matrices from left and right, -obtaining a matrix representation of `Q` via `Matrix(Q)` (or `Q*I`) and indexing into the -matrix representation all work. In contrast, addition and subtraction as well as more -generally broadcasting over elements in the matrix representation fail because that would -be highly inefficient. For such use cases, consider computing the matrix representation -up front and cache it for future reuse. - -## [Pivoting Strategies](@id man-linalg-pivoting-strategies) - -Several of Julia's [matrix factorizations](@ref man-linalg-factorizations) support -[pivoting](https://en.wikipedia.org/wiki/Pivot_element), which can be used to improve their -numerical stability. In fact, some matrix factorizations, such as the LU -factorization, may fail without pivoting. - -In pivoting, first, a [pivot element](https://en.wikipedia.org/wiki/Pivot_element) -with good numerical properties is chosen based on a pivoting strategy. Next, the rows and -columns of the original matrix are permuted to bring the chosen element in place for -subsequent computation. Furthermore, the process is repeated for each stage of the factorization. - -Consequently, besides the conventional matrix factors, the outputs of -pivoted factorization schemes also include permutation matrices. - -In the following, the pivoting strategies implemented in Julia are briefly described. Note -that not all matrix factorizations may support them. Consult the documentation of the -respective [matrix factorization](@ref man-linalg-factorizations) for details on the -supported pivoting strategies. - -See also [`LinearAlgebra.ZeroPivotException`](@ref). - -```@docs -LinearAlgebra.NoPivot -LinearAlgebra.RowNonZero -LinearAlgebra.RowMaximum -LinearAlgebra.ColumnNorm -``` - -## Standard functions - -Linear algebra functions in Julia are largely implemented by calling functions from [LAPACK](https://www.netlib.org/lapack/). -Sparse matrix factorizations call functions from [SuiteSparse](http://suitesparse.com). -Other sparse solvers are available as Julia packages. - -```@docs -Base.:*(::AbstractMatrix, ::AbstractMatrix) -Base.:*(::AbstractMatrix, ::AbstractMatrix, ::AbstractVector) -Base.:\(::AbstractMatrix, ::AbstractVecOrMat) -Base.:/(::AbstractVecOrMat, ::AbstractVecOrMat) -LinearAlgebra.SingularException -LinearAlgebra.PosDefException -LinearAlgebra.ZeroPivotException -LinearAlgebra.RankDeficientException -LinearAlgebra.LAPACKException -LinearAlgebra.dot -LinearAlgebra.dot(::Any, ::Any, ::Any) -LinearAlgebra.cross -LinearAlgebra.axpy! -LinearAlgebra.axpby! -LinearAlgebra.rotate! -LinearAlgebra.reflect! -LinearAlgebra.factorize -LinearAlgebra.Diagonal -LinearAlgebra.Bidiagonal -LinearAlgebra.SymTridiagonal -LinearAlgebra.Tridiagonal -LinearAlgebra.Symmetric -LinearAlgebra.Hermitian -LinearAlgebra.LowerTriangular -LinearAlgebra.UpperTriangular -LinearAlgebra.UnitLowerTriangular -LinearAlgebra.UnitUpperTriangular -LinearAlgebra.UpperHessenberg -LinearAlgebra.UniformScaling -LinearAlgebra.I -LinearAlgebra.UniformScaling(::Integer) -LinearAlgebra.Factorization -LinearAlgebra.LU -LinearAlgebra.lu -LinearAlgebra.lu! -LinearAlgebra.Cholesky -LinearAlgebra.CholeskyPivoted -LinearAlgebra.cholesky -LinearAlgebra.cholesky! -LinearAlgebra.lowrankupdate -LinearAlgebra.lowrankdowndate -LinearAlgebra.lowrankupdate! -LinearAlgebra.lowrankdowndate! -LinearAlgebra.LDLt -LinearAlgebra.ldlt -LinearAlgebra.ldlt! -LinearAlgebra.QR -LinearAlgebra.QRCompactWY -LinearAlgebra.QRPivoted -LinearAlgebra.qr -LinearAlgebra.qr! -LinearAlgebra.LQ -LinearAlgebra.lq -LinearAlgebra.lq! -LinearAlgebra.BunchKaufman -LinearAlgebra.bunchkaufman -LinearAlgebra.bunchkaufman! -LinearAlgebra.Eigen -LinearAlgebra.GeneralizedEigen -LinearAlgebra.eigvals -LinearAlgebra.eigvals! -LinearAlgebra.eigmax -LinearAlgebra.eigmin -LinearAlgebra.eigvecs -LinearAlgebra.eigen -LinearAlgebra.eigen! -LinearAlgebra.Hessenberg -LinearAlgebra.hessenberg -LinearAlgebra.hessenberg! -LinearAlgebra.Schur -LinearAlgebra.GeneralizedSchur -LinearAlgebra.schur -LinearAlgebra.schur! -LinearAlgebra.ordschur -LinearAlgebra.ordschur! -LinearAlgebra.SVD -LinearAlgebra.GeneralizedSVD -LinearAlgebra.svd -LinearAlgebra.svd! -LinearAlgebra.svdvals -LinearAlgebra.svdvals! -LinearAlgebra.Givens -LinearAlgebra.givens -LinearAlgebra.triu -LinearAlgebra.triu! -LinearAlgebra.tril -LinearAlgebra.tril! -LinearAlgebra.diagind -LinearAlgebra.diag -LinearAlgebra.diagm -LinearAlgebra.rank -LinearAlgebra.norm -LinearAlgebra.opnorm -LinearAlgebra.normalize! -LinearAlgebra.normalize -LinearAlgebra.cond -LinearAlgebra.condskeel -LinearAlgebra.tr -LinearAlgebra.det -LinearAlgebra.logdet -LinearAlgebra.logabsdet -Base.inv(::AbstractMatrix) -LinearAlgebra.pinv -LinearAlgebra.nullspace -Base.kron -Base.kron! -LinearAlgebra.exp(::StridedMatrix{<:LinearAlgebra.BlasFloat}) -Base.cis(::AbstractMatrix) -Base.:^(::AbstractMatrix, ::Number) -Base.:^(::Number, ::AbstractMatrix) -LinearAlgebra.log(::StridedMatrix) -LinearAlgebra.sqrt(::StridedMatrix) -LinearAlgebra.cbrt(::AbstractMatrix{<:Real}) -LinearAlgebra.cos(::StridedMatrix{<:Real}) -LinearAlgebra.sin(::StridedMatrix{<:Real}) -LinearAlgebra.sincos(::StridedMatrix{<:Real}) -LinearAlgebra.tan(::StridedMatrix{<:Real}) -LinearAlgebra.sec(::StridedMatrix) -LinearAlgebra.csc(::StridedMatrix) -LinearAlgebra.cot(::StridedMatrix) -LinearAlgebra.cosh(::StridedMatrix) -LinearAlgebra.sinh(::StridedMatrix) -LinearAlgebra.tanh(::StridedMatrix) -LinearAlgebra.sech(::StridedMatrix) -LinearAlgebra.csch(::StridedMatrix) -LinearAlgebra.coth(::StridedMatrix) -LinearAlgebra.acos(::StridedMatrix) -LinearAlgebra.asin(::StridedMatrix) -LinearAlgebra.atan(::StridedMatrix) -LinearAlgebra.asec(::StridedMatrix) -LinearAlgebra.acsc(::StridedMatrix) -LinearAlgebra.acot(::StridedMatrix) -LinearAlgebra.acosh(::StridedMatrix) -LinearAlgebra.asinh(::StridedMatrix) -LinearAlgebra.atanh(::StridedMatrix) -LinearAlgebra.asech(::StridedMatrix) -LinearAlgebra.acsch(::StridedMatrix) -LinearAlgebra.acoth(::StridedMatrix) -LinearAlgebra.lyap -LinearAlgebra.sylvester -LinearAlgebra.issuccess -LinearAlgebra.issymmetric -LinearAlgebra.isposdef -LinearAlgebra.isposdef! -LinearAlgebra.istril -LinearAlgebra.istriu -LinearAlgebra.isdiag -LinearAlgebra.ishermitian -Base.transpose -LinearAlgebra.transpose! -LinearAlgebra.Transpose -LinearAlgebra.TransposeFactorization -Base.adjoint -LinearAlgebra.adjoint! -LinearAlgebra.Adjoint -LinearAlgebra.AdjointFactorization -Base.copy(::Union{Transpose,Adjoint}) -LinearAlgebra.stride1 -LinearAlgebra.checksquare -LinearAlgebra.peakflops -LinearAlgebra.hermitianpart -LinearAlgebra.hermitianpart! -LinearAlgebra.copy_adjoint! -LinearAlgebra.copy_transpose! -``` - -## Low-level matrix operations - -In many cases there are in-place versions of matrix operations that allow you to supply -a pre-allocated output vector or matrix. This is useful when optimizing critical code in order -to avoid the overhead of repeated allocations. These in-place operations are suffixed with `!` -below (e.g. `mul!`) according to the usual Julia convention. - -```@docs -LinearAlgebra.mul! -LinearAlgebra.lmul! -LinearAlgebra.rmul! -LinearAlgebra.ldiv! -LinearAlgebra.rdiv! -``` - -## BLAS functions - -In Julia (as in much of scientific computation), dense linear-algebra operations are based on -the [LAPACK library](https://www.netlib.org/lapack/), which in turn is built on top of basic linear-algebra -building-blocks known as the [BLAS](https://www.netlib.org/blas/). There are highly optimized -implementations of BLAS available for every computer architecture, and sometimes in high-performance -linear algebra routines it is useful to call the BLAS functions directly. - -`LinearAlgebra.BLAS` provides wrappers for some of the BLAS functions. Those BLAS functions -that overwrite one of the input arrays have names ending in `'!'`. Usually, a BLAS function has -four methods defined, for [`Float32`](@ref), [`Float64`](@ref), [`ComplexF32`](@ref Complex), -and [`ComplexF64`](@ref Complex) arrays. - -### [BLAS character arguments](@id stdlib-blas-chars) - -Many BLAS functions accept arguments that determine whether to transpose an argument (`trans`), -which triangle of a matrix to reference (`uplo` or `ul`), -whether the diagonal of a triangular matrix can be assumed to -be all ones (`dA`) or which side of a matrix multiplication -the input argument belongs on (`side`). The possibilities are: - -#### [Multiplication order](@id stdlib-blas-side) - -| `side` | Meaning | -|:-------|:--------------------------------------------------------------------| -| `'L'` | The argument goes on the *left* side of a matrix-matrix operation. | -| `'R'` | The argument goes on the *right* side of a matrix-matrix operation. | - -#### [Triangle referencing](@id stdlib-blas-uplo) - -| `uplo`/`ul` | Meaning | -|:------------|:------------------------------------------------------| -| `'U'` | Only the *upper* triangle of the matrix will be used. | -| `'L'` | Only the *lower* triangle of the matrix will be used. | - -#### [Transposition operation](@id stdlib-blas-trans) - -| `trans`/`tX` | Meaning | -|:-------------|:--------------------------------------------------------| -| `'N'` | The input matrix `X` is not transposed or conjugated. | -| `'T'` | The input matrix `X` will be transposed. | -| `'C'` | The input matrix `X` will be conjugated and transposed. | - -#### [Unit diagonal](@id stdlib-blas-diag) - -| `diag`/`dX` | Meaning | -|:------------|:----------------------------------------------------------| -| `'N'` | The diagonal values of the matrix `X` will be read. | -| `'U'` | The diagonal of the matrix `X` is assumed to be all ones. | - -```@docs -LinearAlgebra.BLAS -LinearAlgebra.BLAS.set_num_threads -LinearAlgebra.BLAS.get_num_threads -``` - -BLAS functions can be divided into three groups, also called three levels, -depending on when they were first proposed, the type of input parameters, -and the complexity of the operation. - -### Level 1 BLAS functions - -The level 1 BLAS functions were first proposed in ([Lawson, 1979](https://dl.acm.org/doi/10.1145/355841.355847)) and -define operations between scalars and vectors. - -```@docs -# xROTG -# xROTMG -LinearAlgebra.BLAS.rot! -# xROTM -# xSWAP -LinearAlgebra.BLAS.scal! -LinearAlgebra.BLAS.scal -LinearAlgebra.BLAS.blascopy! -# xAXPY! -# xAXPBY! -LinearAlgebra.BLAS.dot -LinearAlgebra.BLAS.dotu -LinearAlgebra.BLAS.dotc -# xxDOT -LinearAlgebra.BLAS.nrm2 -LinearAlgebra.BLAS.asum -LinearAlgebra.BLAS.iamax -``` - -### Level 2 BLAS functions - -The level 2 BLAS functions were published in ([Dongarra, 1988](https://dl.acm.org/doi/10.1145/42288.42291)) -and define matrix-vector operations. - -**return a vector** - -```@docs -LinearAlgebra.BLAS.gemv! -LinearAlgebra.BLAS.gemv(::Any, ::Any, ::Any, ::Any) -LinearAlgebra.BLAS.gemv(::Any, ::Any, ::Any) -LinearAlgebra.BLAS.gbmv! -LinearAlgebra.BLAS.gbmv -LinearAlgebra.BLAS.hemv! -LinearAlgebra.BLAS.hemv(::Any, ::Any, ::Any, ::Any) -LinearAlgebra.BLAS.hemv(::Any, ::Any, ::Any) -# hbmv!, hbmv -LinearAlgebra.BLAS.hpmv! -LinearAlgebra.BLAS.symv! -LinearAlgebra.BLAS.symv(::Any, ::Any, ::Any, ::Any) -LinearAlgebra.BLAS.symv(::Any, ::Any, ::Any) -LinearAlgebra.BLAS.sbmv! -LinearAlgebra.BLAS.sbmv(::Any, ::Any, ::Any, ::Any, ::Any) -LinearAlgebra.BLAS.sbmv(::Any, ::Any, ::Any, ::Any) -LinearAlgebra.BLAS.spmv! -LinearAlgebra.BLAS.trmv! -LinearAlgebra.BLAS.trmv -# xTBMV -# xTPMV -LinearAlgebra.BLAS.trsv! -LinearAlgebra.BLAS.trsv -# xTBSV -# xTPSV -``` - -**return a matrix** - -```@docs -LinearAlgebra.BLAS.ger! -# xGERU -# xGERC -LinearAlgebra.BLAS.her! -# xHPR -# xHER2 -# xHPR2 -LinearAlgebra.BLAS.syr! -LinearAlgebra.BLAS.spr! -# xSYR2 -# xSPR2 -``` - -### Level 3 BLAS functions - -The level 3 BLAS functions were published in ([Dongarra, 1990](https://dl.acm.org/doi/10.1145/77626.79170)) -and define matrix-matrix operations. - -```@docs -LinearAlgebra.BLAS.gemmt! -LinearAlgebra.BLAS.gemmt(::Any, ::Any, ::Any, ::Any, ::Any, ::Any) -LinearAlgebra.BLAS.gemmt(::Any, ::Any, ::Any, ::Any, ::Any) -LinearAlgebra.BLAS.gemm! -LinearAlgebra.BLAS.gemm(::Any, ::Any, ::Any, ::Any, ::Any) -LinearAlgebra.BLAS.gemm(::Any, ::Any, ::Any, ::Any) -LinearAlgebra.BLAS.symm! -LinearAlgebra.BLAS.symm(::Any, ::Any, ::Any, ::Any, ::Any) -LinearAlgebra.BLAS.symm(::Any, ::Any, ::Any, ::Any) -LinearAlgebra.BLAS.hemm! -LinearAlgebra.BLAS.hemm(::Any, ::Any, ::Any, ::Any, ::Any) -LinearAlgebra.BLAS.hemm(::Any, ::Any, ::Any, ::Any) -LinearAlgebra.BLAS.syrk! -LinearAlgebra.BLAS.syrk -LinearAlgebra.BLAS.herk! -LinearAlgebra.BLAS.herk -LinearAlgebra.BLAS.syr2k! -LinearAlgebra.BLAS.syr2k -LinearAlgebra.BLAS.her2k! -LinearAlgebra.BLAS.her2k -LinearAlgebra.BLAS.trmm! -LinearAlgebra.BLAS.trmm -LinearAlgebra.BLAS.trsm! -LinearAlgebra.BLAS.trsm -``` - -## [LAPACK functions](@id man-linalg-lapack-functions) - -`LinearAlgebra.LAPACK` provides wrappers for some of the LAPACK functions for linear algebra. - Those functions that overwrite one of the input arrays have names ending in `'!'`. - -Usually a function has 4 methods defined, one each for [`Float64`](@ref), [`Float32`](@ref), -`ComplexF64` and `ComplexF32` arrays. - -Note that the LAPACK API provided by Julia can and will change in the future. Since this API is -not user-facing, there is no commitment to support/deprecate this specific set of functions in -future releases. - -```@docs -LinearAlgebra.LAPACK -LinearAlgebra.LAPACK.gbtrf! -LinearAlgebra.LAPACK.gbtrs! -LinearAlgebra.LAPACK.gebal! -LinearAlgebra.LAPACK.gebak! -LinearAlgebra.LAPACK.gebrd! -LinearAlgebra.LAPACK.gelqf! -LinearAlgebra.LAPACK.geqlf! -LinearAlgebra.LAPACK.geqrf! -LinearAlgebra.LAPACK.geqp3! -LinearAlgebra.LAPACK.gerqf! -LinearAlgebra.LAPACK.geqrt! -LinearAlgebra.LAPACK.geqrt3! -LinearAlgebra.LAPACK.getrf! -LinearAlgebra.LAPACK.tzrzf! -LinearAlgebra.LAPACK.ormrz! -LinearAlgebra.LAPACK.gels! -LinearAlgebra.LAPACK.gesv! -LinearAlgebra.LAPACK.getrs! -LinearAlgebra.LAPACK.getri! -LinearAlgebra.LAPACK.gesvx! -LinearAlgebra.LAPACK.gelsd! -LinearAlgebra.LAPACK.gelsy! -LinearAlgebra.LAPACK.gglse! -LinearAlgebra.LAPACK.geev! -LinearAlgebra.LAPACK.gesdd! -LinearAlgebra.LAPACK.gesvd! -LinearAlgebra.LAPACK.ggsvd! -LinearAlgebra.LAPACK.ggsvd3! -LinearAlgebra.LAPACK.geevx! -LinearAlgebra.LAPACK.ggev! -LinearAlgebra.LAPACK.ggev3! -LinearAlgebra.LAPACK.gtsv! -LinearAlgebra.LAPACK.gttrf! -LinearAlgebra.LAPACK.gttrs! -LinearAlgebra.LAPACK.orglq! -LinearAlgebra.LAPACK.orgqr! -LinearAlgebra.LAPACK.orgql! -LinearAlgebra.LAPACK.orgrq! -LinearAlgebra.LAPACK.ormlq! -LinearAlgebra.LAPACK.ormqr! -LinearAlgebra.LAPACK.ormql! -LinearAlgebra.LAPACK.ormrq! -LinearAlgebra.LAPACK.gemqrt! -LinearAlgebra.LAPACK.posv! -LinearAlgebra.LAPACK.potrf! -LinearAlgebra.LAPACK.potri! -LinearAlgebra.LAPACK.potrs! -LinearAlgebra.LAPACK.pstrf! -LinearAlgebra.LAPACK.ptsv! -LinearAlgebra.LAPACK.pttrf! -LinearAlgebra.LAPACK.pttrs! -LinearAlgebra.LAPACK.trtri! -LinearAlgebra.LAPACK.trtrs! -LinearAlgebra.LAPACK.trcon! -LinearAlgebra.LAPACK.trevc! -LinearAlgebra.LAPACK.trrfs! -LinearAlgebra.LAPACK.stev! -LinearAlgebra.LAPACK.stebz! -LinearAlgebra.LAPACK.stegr! -LinearAlgebra.LAPACK.stein! -LinearAlgebra.LAPACK.syconv! -LinearAlgebra.LAPACK.sysv! -LinearAlgebra.LAPACK.sytrf! -LinearAlgebra.LAPACK.sytri! -LinearAlgebra.LAPACK.sytrs! -LinearAlgebra.LAPACK.hesv! -LinearAlgebra.LAPACK.hetrf! -LinearAlgebra.LAPACK.hetri! -LinearAlgebra.LAPACK.hetrs! -LinearAlgebra.LAPACK.syev! -LinearAlgebra.LAPACK.syevr! -LinearAlgebra.LAPACK.syevd! -LinearAlgebra.LAPACK.sygvd! -LinearAlgebra.LAPACK.bdsqr! -LinearAlgebra.LAPACK.bdsdc! -LinearAlgebra.LAPACK.gecon! -LinearAlgebra.LAPACK.gehrd! -LinearAlgebra.LAPACK.orghr! -LinearAlgebra.LAPACK.gees! -LinearAlgebra.LAPACK.gges! -LinearAlgebra.LAPACK.gges3! -LinearAlgebra.LAPACK.trexc! -LinearAlgebra.LAPACK.trsen! -LinearAlgebra.LAPACK.tgsen! -LinearAlgebra.LAPACK.trsyl! -LinearAlgebra.LAPACK.hseqr! -``` - -## Developer Documentation - -```@docs -LinearAlgebra.matprod_dest -LinearAlgebra.haszero -``` - -```@meta -DocTestSetup = nothing -``` diff --git a/stdlib/LinearAlgebra/src/LinearAlgebra.jl b/stdlib/LinearAlgebra/src/LinearAlgebra.jl deleted file mode 100644 index fc1081e007da2..0000000000000 --- a/stdlib/LinearAlgebra/src/LinearAlgebra.jl +++ /dev/null @@ -1,843 +0,0 @@ -# This file is a part of Julia. License is MIT: https://julialang.org/license - -""" -Linear algebra module. Provides array arithmetic, -matrix factorizations and other linear algebra related -functionality. -""" -module LinearAlgebra - -import Base: \, /, //, *, ^, +, -, == -import Base: USE_BLAS64, abs, acos, acosh, acot, acoth, acsc, acsch, adjoint, asec, asech, - asin, asinh, atan, atanh, axes, big, broadcast, cbrt, ceil, cis, collect, conj, convert, - copy, copyto!, copymutable, cos, cosh, cot, coth, csc, csch, eltype, exp, fill!, floor, - getindex, hcat, getproperty, imag, inv, invpermuterows!, isapprox, isequal, isone, iszero, - IndexStyle, kron, kron!, length, log, map, ndims, one, oneunit, parent, permutecols!, - permutedims, permuterows!, power_by_squaring, promote_rule, real, sec, sech, setindex!, - show, similar, sin, sincos, sinh, size, sqrt, strides, stride, tan, tanh, transpose, trunc, - typed_hcat, vec, view, zero -using Base: IndexLinear, promote_eltype, promote_op, print_matrix, - @propagate_inbounds, reduce, typed_hvcat, typed_vcat, require_one_based_indexing, - splat, BitInteger -using Base.Broadcast: Broadcasted, broadcasted -using Base.PermutedDimsArrays: CommutativeOps -using OpenBLAS_jll -using libblastrampoline_jll -import Libdl - -export -# Modules - BLAS, - LAPACK, - -# Types - Adjoint, - Bidiagonal, - BunchKaufman, - Cholesky, - CholeskyPivoted, - ColumnNorm, - Diagonal, - Eigen, - Factorization, - GeneralizedEigen, - GeneralizedSVD, - GeneralizedSchur, - Hermitian, - Hessenberg, - LDLt, - LQ, - LU, - LowerTriangular, - NoPivot, - QR, - QRPivoted, - RowMaximum, - RowNonZero, - SVD, - Schur, - SymTridiagonal, - Symmetric, - Transpose, - Tridiagonal, - UniformScaling, - UnitLowerTriangular, - UnitUpperTriangular, - UpperHessenberg, - UpperTriangular, - - -# Functions - adjoint!, - adjoint, - axpby!, - axpy!, - bunchkaufman!, - bunchkaufman, - cholesky!, - cholesky, - cond, - condskeel, - copy_adjoint!, - copy_transpose!, - copyto!, - copytrito!, - cross, - det, - diag, - diagind, - diagm, - diagview, - dot, - eigen!, - eigen, - eigmax, - eigmin, - eigvals!, - eigvals, - eigvecs, - factorize, - givens, - hermitianpart!, - hermitianpart, - hessenberg!, - hessenberg, - isdiag, - ishermitian, - isposdef!, - isposdef, - issuccess, - issymmetric, - istril, - istriu, - kron!, - kron, - ldiv!, - ldlt!, - ldlt, - lmul!, - logabsdet, - logdet, - lowrankdowndate!, - lowrankdowndate, - lowrankupdate!, - lowrankupdate, - lq!, - lq, - lu!, - lu, - lyap, - mul!, - norm, - normalize!, - normalize, - nullspace, - opnorm, - ordschur!, - ordschur, - pinv, - qr!, - qr, - rank, - rdiv!, - reflect!, - rmul!, - rotate!, - schur!, - schur, - svd!, - svd, - svdvals!, - svdvals, - sylvester, - tr, - transpose!, - transpose, - tril!, - tril, - triu!, - triu, - - -# Operators - \, - /, - -# Constants - I - -# not exported, but public names -public AbstractTriangular, - Givens, - checksquare, - haszero, - hermitian, - hermitian_type, - isbanded, - peakflops, - symmetric, - symmetric_type, - zeroslike, - matprod_dest - -const BlasFloat = Union{Float64,Float32,ComplexF64,ComplexF32} -const BlasReal = Union{Float64,Float32} -const BlasComplex = Union{ComplexF64,ComplexF32} - -if USE_BLAS64 - const BlasInt = Int64 -else - const BlasInt = Int32 -end - - -abstract type Algorithm end -struct DivideAndConquer <: Algorithm end -struct QRIteration <: Algorithm end -struct RobustRepresentations <: Algorithm end - -# Pivoting strategies for matrix factorization algorithms. -abstract type PivotingStrategy end - -""" - NoPivot - -Pivoting is not performed. This is the default strategy for [`cholesky`](@ref) and -[`qr`](@ref) factorizations. Note, however, that other matrix factorizations such as the LU -factorization may fail without pivoting, and may also be numerically unstable for -floating-point matrices in the face of roundoff error. In such cases, this pivot strategy -is mainly useful for pedagogical purposes. -""" -struct NoPivot <: PivotingStrategy end - -""" - RowNonZero - -First non-zero element in the remaining rows is chosen as the pivot element. - -Beware that for floating-point matrices, the resulting LU algorithm is numerically unstable -— this strategy is mainly useful for comparison to hand calculations (which typically use -this strategy) or for other algebraic types (e.g. rational numbers) not susceptible to -roundoff errors. Otherwise, the default `RowMaximum` pivoting strategy should be generally -preferred in Gaussian elimination. - -Note that the [element type](@ref eltype) of the matrix must admit an [`iszero`](@ref) -method. -""" -struct RowNonZero <: PivotingStrategy end - -""" - RowMaximum - -A row (and potentially also column) pivot is chosen based on a maximum property. -This is the default strategy for LU factorization and for pivoted Cholesky factorization -(though [`NoPivot`] is the default for [`cholesky`](@ref)). - -In the LU case, the maximum-magnitude element within the current column in the remaining -rows is chosen as the pivot element. This is sometimes referred to as the "partial -pivoting" algorithm. In this case, the [element type](@ref eltype) of the matrix must admit -an [`abs`](@ref) method, whose result type must admit a [`<`](@ref) method. - -In the Cholesky case, the maximal element among the remaining diagonal elements is -chosen as the pivot element. This is sometimes referred to as the "diagonal pivoting" -algorithm, and leads to _complete pivoting_ (i.e., of both rows and columns by the same -permutation). In this case, the (real part of the) [element type](@ref eltype) of the -matrix must admit a [`<`](@ref) method. -""" -struct RowMaximum <: PivotingStrategy end - -""" - ColumnNorm - -The column with the maximum norm is used for subsequent computation. This is used for -pivoted QR factorization. - -Note that the [element type](@ref eltype) of the matrix must admit [`norm`](@ref) and -[`abs`](@ref) methods, whose respective result types must admit a [`<`](@ref) method. -""" -struct ColumnNorm <: PivotingStrategy end - -using Base: DimOrInd - -# Check that stride of matrix/vector is 1 -# Writing like this to avoid splatting penalty when called with multiple arguments, -# see PR 16416 -""" - stride1(A) -> Int - -Return the distance between successive array elements -in dimension 1 in units of element size. - -# Examples -```jldoctest -julia> A = [1,2,3,4] -4-element Vector{Int64}: - 1 - 2 - 3 - 4 - -julia> LinearAlgebra.stride1(A) -1 - -julia> B = view(A, 2:2:4) -2-element view(::Vector{Int64}, 2:2:4) with eltype Int64: - 2 - 4 - -julia> LinearAlgebra.stride1(B) -2 -``` -""" -stride1(x) = stride(x,1) -stride1(x::Array) = 1 -stride1(x::DenseArray) = stride(x, 1)::Int - -@inline chkstride1(A...) = _chkstride1(true, A...) -@noinline _chkstride1(ok::Bool) = ok || error("matrix does not have contiguous columns") -@inline _chkstride1(ok::Bool, A, B...) = _chkstride1(ok & (stride1(A) == 1), B...) - -# Subtypes of StridedArrays that satisfy certain properties on their strides -# Similar to Base.RangeIndex, but only include range types where the step is statically known to be non-zero -const IncreasingRangeIndex = Union{BitInteger, AbstractUnitRange{<:BitInteger}} -const NonConstRangeIndex = Union{IncreasingRangeIndex, StepRange{<:BitInteger, <:BitInteger}} -# StridedArray subtypes for which _fullstride2(::T) === true is known from the type -DenseOrStridedReshapedReinterpreted{T,N} = - Union{DenseArray{T,N}, Base.StridedReshapedArray{T,N}, Base.StridedReinterpretArray{T,N}} -# Similar to Base.StridedSubArray, except with a NonConstRangeIndex instead of a RangeIndex -StridedSubArrayStandard{T,N,A<:DenseOrStridedReshapedReinterpreted, - I<:Tuple{Vararg{Union{NonConstRangeIndex, Base.ReshapedUnitRange, Base.AbstractCartesianIndex}}}} = Base.StridedSubArray{T,N,A,I} -StridedArrayStdSubArray{T,N} = Union{DenseOrStridedReshapedReinterpreted{T,N},StridedSubArrayStandard{T,N}} -# Similar to Base.StridedSubArray, except with a IncreasingRangeIndex instead of a RangeIndex -StridedSubArrayIncr{T,N,A<:DenseOrStridedReshapedReinterpreted, - I<:Tuple{Vararg{Union{IncreasingRangeIndex, Base.ReshapedUnitRange, Base.AbstractCartesianIndex}}}} = Base.StridedSubArray{T,N,A,I} -StridedArrayStdSubArrayIncr{T,N} = Union{DenseOrStridedReshapedReinterpreted{T,N},StridedSubArrayIncr{T,N}} -# These subarrays have a stride of 1 along the first dimension -StridedSubArrayAUR{T,N,A<:DenseOrStridedReshapedReinterpreted, - I<:Tuple{AbstractUnitRange{<:BitInteger}}} = Base.StridedSubArray{T,N,A,I} -StridedArrayStride1{T,N} = Union{DenseOrStridedReshapedReinterpreted{T,N},StridedSubArrayIncr{T,N}} -# StridedMatrixStride1 may typically be forwarded to LAPACK methods -StridedMatrixStride1{T} = StridedArrayStride1{T,2} - -""" - LinearAlgebra.checksquare(A) - -Check that a matrix is square, then return its common dimension. -For multiple arguments, return a vector. - -# Examples -```jldoctest -julia> A = fill(1, (4,4)); B = fill(1, (5,5)); - -julia> LinearAlgebra.checksquare(A, B) -2-element Vector{Int64}: - 4 - 5 -``` -""" -function checksquare(A) - m,n = size(A) - m == n || throw(DimensionMismatch(lazy"matrix is not square: dimensions are $(size(A))")) - m -end - -function checksquare(A...) - sizes = Int[] - for a in A - size(a,1)==size(a,2) || throw(DimensionMismatch(lazy"matrix is not square: dimensions are $(size(a))")) - push!(sizes, size(a,1)) - end - return sizes -end - -function char_uplo(uplo::Symbol) - if uplo === :U - return 'U' - elseif uplo === :L - return 'L' - else - throw_uplo() - end -end - -function sym_uplo(uplo::Char) - if uplo == 'U' - return :U - elseif uplo == 'L' - return :L - else - throw_uplo() - end -end - -@noinline throw_uplo() = throw(ArgumentError("uplo argument must be either :U (upper) or :L (lower)")) - -""" - ldiv!(Y, A, B) -> Y - -Compute `A \\ B` in-place and store the result in `Y`, returning the result. - -The argument `A` should *not* be a matrix. Rather, instead of matrices it should be a -factorization object (e.g. produced by [`factorize`](@ref) or [`cholesky`](@ref)). -The reason for this is that factorization itself is both expensive and typically allocates memory -(although it can also be done in-place via, e.g., [`lu!`](@ref)), -and performance-critical situations requiring `ldiv!` usually also require fine-grained -control over the factorization of `A`. - -!!! note - Certain structured matrix types, such as `Diagonal` and `UpperTriangular`, are permitted, as - these are already in a factorized form - -# Examples -```jldoctest -julia> A = [1 2.2 4; 3.1 0.2 3; 4 1 2]; - -julia> X = [1; 2.5; 3]; - -julia> Y = zero(X); - -julia> ldiv!(Y, qr(A), X); - -julia> Y ≈ A\\X -true -``` -""" -ldiv!(Y, A, B) - -""" - ldiv!(A, B) - -Compute `A \\ B` in-place and overwriting `B` to store the result. - -The argument `A` should *not* be a matrix. Rather, instead of matrices it should be a -factorization object (e.g. produced by [`factorize`](@ref) or [`cholesky`](@ref)). -The reason for this is that factorization itself is both expensive and typically allocates memory -(although it can also be done in-place via, e.g., [`lu!`](@ref)), -and performance-critical situations requiring `ldiv!` usually also require fine-grained -control over the factorization of `A`. - -!!! note - Certain structured matrix types, such as `Diagonal` and `UpperTriangular`, are permitted, as - these are already in a factorized form - -# Examples -```jldoctest -julia> A = [1 2.2 4; 3.1 0.2 3; 4 1 2]; - -julia> X = [1; 2.5; 3]; - -julia> Y = copy(X); - -julia> ldiv!(qr(A), X); - -julia> X ≈ A\\Y -true -``` -""" -ldiv!(A, B) - - -""" - rdiv!(A, B) - -Compute `A / B` in-place and overwriting `A` to store the result. - -The argument `B` should *not* be a matrix. Rather, instead of matrices it should be a -factorization object (e.g. produced by [`factorize`](@ref) or [`cholesky`](@ref)). -The reason for this is that factorization itself is both expensive and typically allocates memory -(although it can also be done in-place via, e.g., [`lu!`](@ref)), -and performance-critical situations requiring `rdiv!` usually also require fine-grained -control over the factorization of `B`. - -!!! note - Certain structured matrix types, such as `Diagonal` and `UpperTriangular`, are permitted, as - these are already in a factorized form -""" -rdiv!(A, B) - -""" - copy_oftype(A, T) - -Creates a copy of `A` with eltype `T`. No assertions about mutability of the result are -made. When `eltype(A) == T`, then this calls `copy(A)` which may be overloaded for custom -array types. Otherwise, this calls `convert(AbstractArray{T}, A)`. -""" -copy_oftype(A::AbstractArray{T}, ::Type{T}) where {T} = copy(A) -copy_oftype(A::AbstractArray{T,N}, ::Type{S}) where {T,N,S} = convert(AbstractArray{S,N}, A) - -""" - copymutable_oftype(A, T) - -Copy `A` to a mutable array with eltype `T` based on `similar(A, T)`. - -The resulting matrix typically has similar algebraic structure as `A`. For -example, supplying a tridiagonal matrix results in another tridiagonal matrix. -In general, the type of the output corresponds to that of `similar(A, T)`. - -In LinearAlgebra, mutable copies (of some desired eltype) are created to be passed -to in-place algorithms (such as `ldiv!`, `rdiv!`, `lu!` and so on). If the specific -algorithm is known to preserve the algebraic structure, use `copymutable_oftype`. -If the algorithm is known to return a dense matrix (or some wrapper backed by a dense -matrix), then use `copy_similar`. - -See also: `Base.copymutable`, `copy_similar`. -""" -copymutable_oftype(A::AbstractArray, ::Type{S}) where {S} = copyto!(similar(A, S), A) - -""" - copy_similar(A, T) - -Copy `A` to a mutable array with eltype `T` based on `similar(A, T, size(A))`. - -Compared to `copymutable_oftype`, the result can be more flexible. In general, the type -of the output corresponds to that of the three-argument method `similar(A, T, size(A))`. - -See also: `copymutable_oftype`. -""" -copy_similar(A::AbstractArray, ::Type{T}) where {T} = copyto!(similar(A, T, size(A)), A) - -""" - BandIndex(band, index) - -Represent a Cartesian index as a linear index along a band. -This type is primarily meant to index into a specific band without branches, -so, for best performance, `band` should be a compile-time constant. -""" -struct BandIndex - band :: Int - index :: Int -end -function _cartinds(b::BandIndex) - (; band, index) = b - bandg0 = max(band,0) - row = index - band + bandg0 - col = index + bandg0 - CartesianIndex(row, col) -end -function Base.to_indices(A, inds, t::Tuple{BandIndex, Vararg{Any}}) - to_indices(A, inds, (_cartinds(first(t)), Base.tail(t)...)) -end -function Base.checkbounds(::Type{Bool}, A::AbstractMatrix, b::BandIndex) - checkbounds(Bool, A, _cartinds(b)) -end -function Base.checkbounds(A::Broadcasted, b::BandIndex) - checkbounds(A, _cartinds(b)) -end - -include("adjtrans.jl") -include("transpose.jl") - -include("exceptions.jl") -include("generic.jl") - -include("blas.jl") -include("matmul.jl") -include("lapack.jl") - -include("dense.jl") -include("tridiag.jl") -include("triangular.jl") - -include("factorization.jl") -include("eigen.jl") -include("svd.jl") -include("symmetric.jl") -include("cholesky.jl") -include("lu.jl") -include("bunchkaufman.jl") -include("diagonal.jl") -include("symmetriceigen.jl") -include("bidiag.jl") -include("uniformscaling.jl") -include("qr.jl") -include("lq.jl") -include("hessenberg.jl") -include("abstractq.jl") -include("givens.jl") -include("special.jl") -include("bitarray.jl") -include("ldlt.jl") -include("schur.jl") -include("structuredbroadcast.jl") -include("deprecated.jl") - -const ⋅ = dot -const × = cross -export ⋅, × - -# Separate the char corresponding to the wrapper from that corresponding to the uplo -# In most cases, the former may be constant-propagated, while the latter usually can't be. -# This improves type-inference in wrap for Symmetric/Hermitian matrices -# A WrapperChar is equivalent to `isuppertri ? uppercase(wrapperchar) : lowercase(wrapperchar)` -struct WrapperChar <: AbstractChar - wrapperchar :: Char - isuppertri :: Bool -end -function Base.Char(w::WrapperChar) - T = w.wrapperchar - if T ∈ ('N', 'T', 'C') # known cases where isuppertri is true - T - else - _isuppertri(w) ? uppercase(T) : lowercase(T) - end -end -Base.codepoint(w::WrapperChar) = codepoint(Char(w)) -WrapperChar(n::UInt32) = WrapperChar(Char(n)) -WrapperChar(c::Char) = WrapperChar(c, isuppercase(c)) -# We extract the wrapperchar so that the result may be constant-propagated -# This doesn't return a value of the same type on purpose -Base.uppercase(w::WrapperChar) = uppercase(w.wrapperchar) -Base.lowercase(w::WrapperChar) = lowercase(w.wrapperchar) -_isuppertri(w::WrapperChar) = w.isuppertri -_isuppertri(x::AbstractChar) = isuppercase(x) # compatibility with earlier Char-based implementation -_uplosym(x) = _isuppertri(x) ? (:U) : (:L) - -wrapper_char(::AbstractArray) = 'N' -wrapper_char(::Adjoint) = 'C' -wrapper_char(::Adjoint{<:Real}) = 'T' -wrapper_char(::Transpose) = 'T' -wrapper_char(A::Hermitian) = WrapperChar('H', A.uplo == 'U') -wrapper_char(A::Hermitian{<:Real}) = WrapperChar('S', A.uplo == 'U') -wrapper_char(A::Symmetric) = WrapperChar('S', A.uplo == 'U') - -wrapper_char_NTC(A::AbstractArray) = uppercase(wrapper_char(A)) == 'N' -wrapper_char_NTC(A::Union{StridedArray, Adjoint, Transpose}) = true -wrapper_char_NTC(A::Union{Symmetric, Hermitian}) = false - -Base.@constprop :aggressive function wrap(A::AbstractVecOrMat, tA::AbstractChar) - # merge the result of this before return, so that we can type-assert the return such - # that even if the tmerge is inaccurate, inference can still identify that the - # `_generic_matmatmul` signature still matches and doesn't require missing backedges - tA_uc = uppercase(tA) - B = if tA_uc == 'N' - A - elseif tA_uc == 'T' - transpose(A) - elseif tA_uc == 'C' - adjoint(A) - elseif tA_uc == 'H' - Hermitian(A, _uplosym(tA)) - elseif tA_uc == 'S' - Symmetric(A, _uplosym(tA)) - end - return B::AbstractVecOrMat -end - -_unwrap(A::AbstractVecOrMat) = A - -## convenience methods -## return only the solution of a least squares problem while avoiding promoting -## vectors to matrices. -_cut_B(x::AbstractVector, r::UnitRange) = length(x) > length(r) ? x[r] : x -_cut_B(X::AbstractMatrix, r::UnitRange) = size(X, 1) > length(r) ? X[r,:] : X - -# SymTridiagonal ev can be the same length as dv, but the last element is -# ignored. However, some methods can fail if they read the entire ev -# rather than just the meaningful elements. This is a helper function -# for getting only the meaningful elements of ev. See #41089 -_evview(S::SymTridiagonal) = @view S.ev[begin:begin + length(S.dv) - 2] - -## append right hand side with zeros if necessary -_zeros(::Type{T}, b::AbstractVector, n::Integer) where {T} = zeros(T, max(length(b), n)) -_zeros(::Type{T}, B::AbstractMatrix, n::Integer) where {T} = zeros(T, max(size(B, 1), n), size(B, 2)) - -# append a zero element / drop the last element -_pushzero(A) = (B = similar(A, length(A)+1); @inbounds B[begin:end-1] .= A; @inbounds B[end] = zero(eltype(B)); B) -_droplast!(A) = deleteat!(A, lastindex(A)) - -# destination type for matmul -matprod_dest(A::StructuredMatrix, B::StructuredMatrix, TS) = similar(B, TS, size(B)) -matprod_dest(A, B::StructuredMatrix, TS) = similar(A, TS, size(A)) -matprod_dest(A::StructuredMatrix, B, TS) = similar(B, TS, size(B)) -# diagonal is special, as it does not change the structure of the other matrix -# we call similar without a size to preserve the type of the matrix wherever possible -# reroute through _matprod_dest_diag to allow speicalizing on the type of the StructuredMatrix -# without defining methods for both the orderings -matprod_dest(A::StructuredMatrix, B::Diagonal, TS) = _matprod_dest_diag(A, TS) -matprod_dest(A::Diagonal, B::StructuredMatrix, TS) = _matprod_dest_diag(B, TS) -matprod_dest(A::Diagonal, B::Diagonal, TS) = _matprod_dest_diag(B, TS) -_matprod_dest_diag(A, TS) = similar(A, TS) -_matprod_dest_diag(A::UnitUpperTriangular, TS) = UpperTriangular(similar(parent(A), TS)) -_matprod_dest_diag(A::UnitLowerTriangular, TS) = LowerTriangular(similar(parent(A), TS)) -function _matprod_dest_diag(A::SymTridiagonal, TS) - n = size(A, 1) - ev = similar(A, TS, max(0, n-1)) - dv = similar(A, TS, n) - Tridiagonal(ev, dv, similar(ev)) -end - -# Special handling for adj/trans vec -matprod_dest(A::Diagonal, B::AdjOrTransAbsVec, TS) = similar(B, TS) - -# General fallback definition for handling under- and overdetermined system as well as square problems -# While this definition is pretty general, it does e.g. promote to common element type of lhs and rhs -# which is required by LAPACK but not SuiteSparse which allows real-complex solves in some cases. Hence, -# we restrict this method to only the LAPACK factorizations in LinearAlgebra. -# The definition is put here since it explicitly references all the Factorization structs so it has -# to be located after all the files that define the structs. -const LAPACKFactorizations{T,S} = Union{ - BunchKaufman{T,S}, - Cholesky{T,S}, - LQ{T,S}, - LU{T,S}, - QR{T,S}, - QRCompactWY{T,S}, - QRPivoted{T,S}, - SVD{T,<:Real,S}} - -(\)(F::LAPACKFactorizations, B::AbstractVecOrMat) = ldiv(F, B) -(\)(F::AdjointFactorization{<:Any,<:LAPACKFactorizations}, B::AbstractVecOrMat) = ldiv(F, B) -(\)(F::TransposeFactorization{<:Any,<:LU}, B::AbstractVecOrMat) = ldiv(F, B) - -function ldiv(F::Factorization, B::AbstractVecOrMat) - require_one_based_indexing(B) - m, n = size(F) - if m != size(B, 1) - throw(DimensionMismatch("arguments must have the same number of rows")) - end - - TFB = typeof(oneunit(eltype(B)) / oneunit(eltype(F))) - FF = Factorization{TFB}(F) - - # For wide problem we (often) compute a minimum norm solution. The solution - # is larger than the right hand side so we use size(F, 2). - BB = _zeros(TFB, B, n) - - if n > size(B, 1) - # Underdetermined - copyto!(view(BB, 1:m, :), B) - else - copyto!(BB, B) - end - - ldiv!(FF, BB) - - # For tall problems, we compute a least squares solution so only part - # of the rhs should be returned from \ while ldiv! uses (and returns) - # the complete rhs - return _cut_B(BB, 1:n) -end -# disambiguate -(\)(F::LAPACKFactorizations{T}, B::VecOrMat{Complex{T}}) where {T<:BlasReal} = - @invoke \(F::Factorization{T}, B::VecOrMat{Complex{T}}) -(\)(F::AdjointFactorization{T,<:LAPACKFactorizations}, B::VecOrMat{Complex{T}}) where {T<:BlasReal} = - ldiv(F, B) -(\)(F::TransposeFactorization{T,<:LU}, B::VecOrMat{Complex{T}}) where {T<:BlasReal} = - ldiv(F, B) - -""" - LinearAlgebra.peakflops(n::Integer=4096; eltype::DataType=Float64, ntrials::Integer=3, parallel::Bool=false) - -`peakflops` computes the peak flop rate of the computer by using double precision -[`gemm!`](@ref LinearAlgebra.BLAS.gemm!). By default, if no arguments are specified, it -multiplies two `Float64` matrices of size `n x n`, where `n = 4096`. If the underlying BLAS is using -multiple threads, higher flop rates are realized. The number of BLAS threads can be set with -[`BLAS.set_num_threads(n)`](@ref). - -If the keyword argument `eltype` is provided, `peakflops` will construct matrices with elements -of type `eltype` for calculating the peak flop rate. - -By default, `peakflops` will use the best timing from 3 trials. If the `ntrials` keyword argument -is provided, `peakflops` will use those many trials for picking the best timing. - -If the keyword argument `parallel` is set to `true`, `peakflops` is run in parallel on all -the worker processors. The flop rate of the entire parallel computer is returned. When -running in parallel, only 1 BLAS thread is used. The argument `n` still refers to the size -of the problem that is solved on each processor. - -!!! compat "Julia 1.1" - This function requires at least Julia 1.1. In Julia 1.0 it is available from - the standard library `InteractiveUtils`. -""" -function peakflops(n::Integer=4096; eltype::DataType=Float64, ntrials::Integer=3, parallel::Bool=false) - t = zeros(Float64, ntrials) - for i=1:ntrials - a = ones(eltype,n,n) - t[i] = @elapsed a2 = a*a - @assert a2[1,1] == n - end - - if parallel - let Distributed = Base.require(Base.PkgId( - Base.UUID((0x8ba89e20_285c_5b6f, 0x9357_94700520ee1b)), "Distributed")) - nworkers = @invokelatest Distributed.nworkers() - results = @invokelatest Distributed.pmap(peakflops, fill(n, nworkers)) - return sum(results) - end - else - return 2*Float64(n)^3 / minimum(t) - end -end - - -function versioninfo(io::IO=stdout) - indent = " " - config = BLAS.get_config() - build_flags = join(string.(config.build_flags), ", ") - println(io, "BLAS: ", BLAS.libblastrampoline, " (", build_flags, ")") - for lib in config.loaded_libs - interface = uppercase(string(lib.interface)) - println(io, indent, "--> ", lib.libname, " (", interface, ")") - end - println(io, "Threading:") - println(io, indent, "Threads.threadpoolsize() = ", Threads.threadpoolsize()) - println(io, indent, "Threads.maxthreadid() = ", Base.Threads.maxthreadid()) - println(io, indent, "LinearAlgebra.BLAS.get_num_threads() = ", BLAS.get_num_threads()) - println(io, "Relevant environment variables:") - env_var_names = [ - "JULIA_NUM_THREADS", - "MKL_DYNAMIC", - "MKL_NUM_THREADS", - # OpenBLAS has a hierarchy of environment variables for setting the - # number of threads, see - # https://github.com/xianyi/OpenBLAS/blob/c43ec53bdd00d9423fc609d7b7ecb35e7bf41b85/README.md#setting-the-number-of-threads-using-environment-variables - ("OPENBLAS_NUM_THREADS", "GOTO_NUM_THREADS", "OMP_NUM_THREADS"), - ] - printed_at_least_one_env_var = false - print_var(io, indent, name) = println(io, indent, name, " = ", ENV[name]) - for name in env_var_names - if name isa Tuple - # If `name` is a Tuple, then find the first environment which is - # defined, and disregard the following ones. - for nm in name - if haskey(ENV, nm) - print_var(io, indent, nm) - printed_at_least_one_env_var = true - break - end - end - else - if haskey(ENV, name) - print_var(io, indent, name) - printed_at_least_one_env_var = true - end - end - end - if !printed_at_least_one_env_var - println(io, indent, "[none]") - end - return nothing -end - -function __init__() - try - verbose = parse(Bool, get(ENV, "LBT_VERBOSE", "false")) - BLAS.lbt_forward(OpenBLAS_jll.libopenblas_path; clear=true, verbose) - BLAS.check() - catch ex - Base.showerror_nostdio(ex, "WARNING: Error during initialization of module LinearAlgebra") - end - # register a hook to disable BLAS threading - Base.at_disable_library_threading(() -> BLAS.set_num_threads(1)) - - # https://github.com/xianyi/OpenBLAS/blob/c43ec53bdd00d9423fc609d7b7ecb35e7bf41b85/README.md#setting-the-number-of-threads-using-environment-variables - if !haskey(ENV, "OPENBLAS_NUM_THREADS") && !haskey(ENV, "GOTO_NUM_THREADS") && !haskey(ENV, "OMP_NUM_THREADS") - @static if Sys.isapple() && Base.BinaryPlatforms.arch(Base.BinaryPlatforms.HostPlatform()) == "aarch64" - BLAS.set_num_threads(max(1, @ccall(jl_effective_threads()::Cint))) - else - BLAS.set_num_threads(max(1, @ccall(jl_effective_threads()::Cint) ÷ 2)) - end - end -end - -end # module LinearAlgebra diff --git a/stdlib/LinearAlgebra/src/abstractq.jl b/stdlib/LinearAlgebra/src/abstractq.jl deleted file mode 100644 index 0fa2233b89593..0000000000000 --- a/stdlib/LinearAlgebra/src/abstractq.jl +++ /dev/null @@ -1,642 +0,0 @@ -# This file is a part of Julia. License is MIT: https://julialang.org/license - -abstract type AbstractQ{T} end - -struct AdjointQ{T,S<:AbstractQ{T}} <: AbstractQ{T} - Q::S -end - -parent(adjQ::AdjointQ) = adjQ.Q -eltype(::Type{<:AbstractQ{T}}) where {T} = T -Base.eltypeof(Q::AbstractQ) = eltype(Q) -ndims(::AbstractQ) = 2 - -# inversion/adjoint/transpose -inv(Q::AbstractQ) = Q' -adjoint(Q::AbstractQ) = AdjointQ(Q) -transpose(Q::AbstractQ{<:Real}) = AdjointQ(Q) -transpose(Q::AbstractQ) = error("transpose not implemented for $(typeof(Q)). Consider using adjoint instead of transpose.") -adjoint(adjQ::AdjointQ) = adjQ.Q - -(^)(Q::AbstractQ, p::Integer) = p < 0 ? power_by_squaring(inv(Q), -p) : power_by_squaring(Q, p) -@inline Base.literal_pow(::typeof(^), Q::AbstractQ, ::Val{1}) = Q -@inline Base.literal_pow(::typeof(^), Q::AbstractQ, ::Val{-1}) = inv(Q) - -# promotion with AbstractMatrix, at least for equal eltypes -promote_rule(::Type{<:AbstractMatrix{T}}, ::Type{<:AbstractQ{T}}) where {T} = - (@inline; Union{AbstractMatrix{T},AbstractQ{T}}) - -# conversion -# the following eltype promotion should be defined for each subtype `QType` -# convert(::Type{AbstractQ{T}}, Q::QType) where {T} = QType{T}(Q) -# and then care has to be taken that -# QType{T}(Q::QType{T}) where T = ... -# is implemented as a no-op - -# the following conversion method ensures functionality when the above method is not defined -# (as for HessenbergQ), but no eltype conversion is required either (say, in multiplication) -convert(::Type{AbstractQ{T}}, Q::AbstractQ{T}) where {T} = Q -convert(::Type{AbstractQ{T}}, adjQ::AdjointQ{T}) where {T} = adjQ -convert(::Type{AbstractQ{T}}, adjQ::AdjointQ) where {T} = convert(AbstractQ{T}, adjQ.Q)' - -# ... to matrix -collect(Q::AbstractQ) = copyto!(Matrix{eltype(Q)}(undef, size(Q)), Q) -Matrix{T}(Q::AbstractQ) where {T} = convert(Matrix{T}, Q*I) # generic fallback, yields square matrix -Matrix{T}(adjQ::AdjointQ{S}) where {T,S} = convert(Matrix{T}, lmul!(adjQ, Matrix{S}(I, size(adjQ)))) -Matrix(Q::AbstractQ{T}) where {T} = Matrix{T}(Q) -Array{T}(Q::AbstractQ) where {T} = Matrix{T}(Q) -Array(Q::AbstractQ) = Matrix(Q) -convert(::Type{T}, Q::AbstractQ) where {T<:AbstractArray} = T(Q) -# legacy -@deprecate(convert(::Type{AbstractMatrix{T}}, Q::AbstractQ) where {T}, - convert(LinearAlgebra.AbstractQ{T}, Q)) - -function size(Q::AbstractQ, dim::Integer) - if dim < 1 - throw(BoundsError()) - elseif dim <= 2 # && 1 <= dim - return size(Q)[dim] - else # 2 < dim - return 1 - end -end -size(adjQ::AdjointQ) = reverse(size(adjQ.Q)) - -# comparison -(==)(Q::AbstractQ, A::AbstractMatrix) = lmul!(Q, Matrix{eltype(Q)}(I, size(A))) == A -(==)(A::AbstractMatrix, Q::AbstractQ) = Q == A -(==)(Q::AbstractQ, P::AbstractQ) = Matrix(Q) == Matrix(P) -isapprox(Q::AbstractQ, A::AbstractMatrix; kwargs...) = - isapprox(lmul!(Q, Matrix{eltype(Q)}(I, size(A))), A, kwargs...) -isapprox(A::AbstractMatrix, Q::AbstractQ; kwargs...) = isapprox(Q, A, kwargs...) -isapprox(Q::AbstractQ, P::AbstractQ; kwargs...) = isapprox(Matrix(Q), Matrix(P), kwargs...) - -# pseudo-array behaviour, required for indexing with `begin` or `end` -axes(Q::AbstractQ) = map(Base.oneto, size(Q)) -axes(Q::AbstractQ, d::Integer) = d in (1, 2) ? axes(Q)[d] : Base.OneTo(1) - -copymutable(Q::AbstractQ{T}) where {T} = lmul!(Q, Matrix{T}(I, size(Q))) -copy(Q::AbstractQ) = copymutable(Q) - -# legacy compatibility -similar(Q::AbstractQ) = similar(Q, eltype(Q), size(Q)) -similar(Q::AbstractQ, ::Type{T}) where {T} = similar(Q, T, size(Q)) -similar(Q::AbstractQ, size::DimOrInd...) = similar(Q, eltype(Q), size...) -similar(Q::AbstractQ, ::Type{T}, size::DimOrInd...) where {T} = similar(Q, T, Base.to_shape(size)) -similar(Q::AbstractQ, size::Tuple{Vararg{DimOrInd}}) = similar(Q, eltype(Q), Base.to_shape(size)) -similar(Q::AbstractQ, ::Type{T}, size::NTuple{N,Integer}) where {T,N} = Array{T,N}(undef, size) - -# getindex -@inline function getindex(Q::AbstractQ, inds...) - @boundscheck Base.checkbounds_indices(Bool, axes(Q), inds) || Base.throw_boundserror(Q, inds) - return _getindex(Q, inds...) -end -@inline getindex(Q::AbstractQ, ::Colon) = copymutable(Q)[:] -@inline getindex(Q::AbstractQ, ::Colon, ::Colon) = copy(Q) - -@inline _getindex(Q::AbstractQ, inds...) = @inbounds copymutable(Q)[inds...] -@inline function _getindex(Q::AbstractQ, ::Colon, J::AbstractVector{<:Integer}) - Y = zeros(eltype(Q), size(Q, 2), length(J)) - @inbounds for (i,j) in enumerate(J) - Y[j,i] = oneunit(eltype(Q)) - end - lmul!(Q, Y) -end -@inline _getindex(Q::AbstractQ, I::AbstractVector{Int}, J::AbstractVector{Int}) = @inbounds Q[:,J][I,:] -@inline function _getindex(Q::AbstractQ, ::Colon, j::Int) - y = zeros(eltype(Q), size(Q, 2)) - y[j] = oneunit(eltype(Q)) - lmul!(Q, y) -end -@inline _getindex(Q::AbstractQ, i::Int, j::Int) = @inbounds Q[:,j][i] - -# needed because AbstractQ does not subtype AbstractMatrix -qr(Q::AbstractQ{T}, arg...; kwargs...) where {T} = qr!(Matrix{_qreltype(T)}(Q), arg...; kwargs...) -lq(Q::AbstractQ{T}, arg...; kwargs...) where {T} = lq!(Matrix{lq_eltype(T)}(Q), arg...; kwargs...) -hessenberg(Q::AbstractQ{T}) where {T} = hessenberg!(Matrix{eigtype(T)}(Q)) - -# needed when used interchangeably with AbstractMatrix (analogous to views of ranges) -view(A::AbstractQ, I...) = getindex(A, I...) - -# specialization avoiding the fallback using slow `getindex` -function copyto!(dest::AbstractMatrix, src::AbstractQ) - copyto!(dest, I) - lmul!(src, dest) -end -# needed to resolve method ambiguities -function copyto!(dest::PermutedDimsArray{T,2,perm}, src::AbstractQ) where {T,perm} - if perm == (1, 2) - copyto!(parent(dest), src) - else - @assert perm == (2, 1) # there are no other permutations of two indices - if T <: Real - copyto!(parent(dest), I) - lmul!(src', parent(dest)) - else - # LAPACK does not offer inplace lmul!(transpose(Q), B) for complex Q - tmp = similar(parent(dest)) - copyto!(tmp, I) - rmul!(tmp, src) - permutedims!(parent(dest), tmp, (2, 1)) - end - end - return dest -end -# used in concatenations: Base.__cat_offset1! -Base._copy_or_fill!(A, inds, Q::AbstractQ) = (A[inds...] = collect(Q)) -# overloads of helper functions -Base.cat_size(A::AbstractQ) = size(A) -Base.cat_size(A::AbstractQ, d) = size(A, d) -Base.cat_length(a::AbstractQ) = prod(size(a)) -Base.cat_ndims(a::AbstractQ) = ndims(a) -Base.cat_indices(A::AbstractQ, d) = axes(A, d) -Base.cat_similar(A::AbstractQ, T::Type, shape::Tuple) = Array{T}(undef, shape) -Base.cat_similar(A::AbstractQ, T::Type, shape::Vector) = Array{T}(undef, shape...) - -function show(io::IO, ::MIME{Symbol("text/plain")}, Q::AbstractQ) - print(io, Base.dims2string(size(Q)), ' ', summary(Q)) -end - -# multiplication -# generically, treat AbstractQ like a matrix with its definite size -qsize_check(Q::AbstractQ, B::AbstractVecOrMat) = - size(Q, 2) == size(B, 1) || - throw(DimensionMismatch(lazy"second dimension of Q, $(size(Q,2)), must coincide with first dimension of B, $(size(B,1))")) -qsize_check(A::AbstractVecOrMat, Q::AbstractQ) = - size(A, 2) == size(Q, 1) || - throw(DimensionMismatch(lazy"second dimension of A, $(size(A,2)), must coincide with first dimension of Q, $(size(Q,1))")) -qsize_check(Q::AbstractQ, P::AbstractQ) = - size(Q, 2) == size(P, 1) || - throw(DimensionMismatch(lazy"second dimension of A, $(size(Q,2)), must coincide with first dimension of B, $(size(P,1))")) - -# mimic the AbstractArray fallback -*(Q::AbstractQ{<:Number}) = Q - -(*)(Q::AbstractQ, J::UniformScaling) = Q*J.λ -function (*)(Q::AbstractQ, b::Number) - T = promote_type(eltype(Q), typeof(b)) - lmul!(convert(AbstractQ{T}, Q), Matrix{T}(b*I, size(Q))) -end -function (*)(Q::AbstractQ, B::AbstractVector) - T = promote_type(eltype(Q), eltype(B)) - qsize_check(Q, B) - mul!(similar(B, T, size(Q, 1)), convert(AbstractQ{T}, Q), B) -end -function (*)(Q::AbstractQ, B::AbstractMatrix) - T = promote_type(eltype(Q), eltype(B)) - qsize_check(Q, B) - mul!(similar(B, T, (size(Q, 1), size(B, 2))), convert(AbstractQ{T}, Q), B) -end - -(*)(J::UniformScaling, Q::AbstractQ) = J.λ*Q -function (*)(a::Number, Q::AbstractQ) - T = promote_type(typeof(a), eltype(Q)) - rmul!(Matrix{T}(a*I, size(Q)), convert(AbstractQ{T}, Q)) -end -function (*)(A::AbstractVector, Q::AbstractQ) - T = promote_type(eltype(A), eltype(Q)) - qsize_check(A, Q) - return mul!(similar(A, T, length(A)), A, convert(AbstractQ{T}, Q)) -end -function (*)(A::AbstractMatrix, Q::AbstractQ) - T = promote_type(eltype(A), eltype(Q)) - qsize_check(A, Q) - return mul!(similar(A, T, (size(A, 1), size(Q, 2))), A, convert(AbstractQ{T}, Q)) -end -(*)(u::AdjointAbsVec, Q::AbstractQ) = (Q'u')' - -### Q*Q (including adjoints) -(*)(Q::AbstractQ, P::AbstractQ) = Q * (P*I) - -### mul! -function mul!(C::AbstractVecOrMat{T}, Q::AbstractQ{T}, B::Union{AbstractVecOrMat,AbstractQ}) where {T} - require_one_based_indexing(C, B) - mB, nB = size(B, 1), size(B, 2) - mC, nC = size(C, 1), size(C, 2) - qsize_check(Q, B) - nB != nC && throw(DimensionMismatch()) - if mB < mC - inds = CartesianIndices(axes(B)) - copyto!(view(C, inds), B) - C[CartesianIndices((mB+1:mC, axes(C, 2)))] .= zero(T) - return lmul!(Q, C) - else - return lmul!(Q, copyto!(C, B)) - end -end -function mul!(C::AbstractVecOrMat{T}, A::AbstractVecOrMat, Q::AbstractQ{T}) where {T} - require_one_based_indexing(C, A) - mA, nA = size(A, 1), size(A, 2) - mC, nC = size(C, 1), size(C, 2) - mA != mC && throw(DimensionMismatch()) - qsize_check(A, Q) - if nA < nC - inds = CartesianIndices(axes(A)) - copyto!(view(C, inds), A) - C[CartesianIndices((axes(C, 1), nA+1:nC))] .= zero(T) - return rmul!(C, Q) - else - return rmul!(copyto!(C, A), Q) - end -end - -### division -\(Q::AbstractQ, A::AbstractVecOrMat) = Q'*A -/(A::AbstractVecOrMat, Q::AbstractQ) = A*Q' -/(Q::AbstractQ, A::AbstractVecOrMat) = Matrix(Q) / A -ldiv!(Q::AbstractQ, A::AbstractVecOrMat) = lmul!(Q', A) -ldiv!(C::AbstractVecOrMat, Q::AbstractQ, A::AbstractVecOrMat) = mul!(C, Q', A) -rdiv!(A::AbstractVecOrMat, Q::AbstractQ) = rmul!(A, Q') - -logabsdet(Q::AbstractQ) = (d = det(Q); return log(abs(d)), sign(d)) -function logdet(A::AbstractQ) - d, s = logabsdet(A) - return d + log(s) -end - -########################################################### -################ Q from QR decompositions ################# -########################################################### - -""" - QRPackedQ <: LinearAlgebra.AbstractQ - -The orthogonal/unitary ``Q`` matrix of a QR factorization stored in [`QR`](@ref) or -[`QRPivoted`](@ref) format. -""" -struct QRPackedQ{T,S<:AbstractMatrix{T},C<:AbstractVector{T}} <: AbstractQ{T} - factors::S - τ::C - - function QRPackedQ{T,S,C}(factors, τ) where {T,S<:AbstractMatrix{T},C<:AbstractVector{T}} - require_one_based_indexing(factors, τ) - new{T,S,C}(factors, τ) - end -end -QRPackedQ(factors::AbstractMatrix{T}, τ::AbstractVector{T}) where {T} = - QRPackedQ{T,typeof(factors),typeof(τ)}(factors, τ) -QRPackedQ{T}(factors::AbstractMatrix, τ::AbstractVector) where {T} = - QRPackedQ(convert(AbstractMatrix{T}, factors), convert(AbstractVector{T}, τ)) -# backwards-compatible constructors (remove with Julia 2.0) -@deprecate(QRPackedQ{T,S}(factors::AbstractMatrix{T}, τ::AbstractVector{T}) where {T,S}, - QRPackedQ{T,S,typeof(τ)}(factors, τ), false) - -""" - QRCompactWYQ <: LinearAlgebra.AbstractQ - -The orthogonal/unitary ``Q`` matrix of a QR factorization stored in [`QRCompactWY`](@ref) -format. -""" -struct QRCompactWYQ{S, M<:AbstractMatrix{S}, C<:AbstractMatrix{S}} <: AbstractQ{S} - factors::M - T::C - - function QRCompactWYQ{S,M,C}(factors, T) where {S,M<:AbstractMatrix{S},C<:AbstractMatrix{S}} - require_one_based_indexing(factors, T) - new{S,M,C}(factors, T) - end -end -QRCompactWYQ(factors::AbstractMatrix{S}, T::AbstractMatrix{S}) where {S} = - QRCompactWYQ{S,typeof(factors),typeof(T)}(factors, T) -QRCompactWYQ{S}(factors::AbstractMatrix, T::AbstractMatrix) where {S} = - QRCompactWYQ(convert(AbstractMatrix{S}, factors), convert(AbstractMatrix{S}, T)) -# backwards-compatible constructors (remove with Julia 2.0) -@deprecate(QRCompactWYQ{S,M}(factors::AbstractMatrix{S}, T::AbstractMatrix{S}) where {S,M}, - QRCompactWYQ{S,M,typeof(T)}(factors, T), false) - -QRPackedQ{T}(Q::QRPackedQ) where {T} = QRPackedQ(convert(AbstractMatrix{T}, Q.factors), convert(AbstractVector{T}, Q.τ)) -QRCompactWYQ{S}(Q::QRCompactWYQ) where {S} = QRCompactWYQ(convert(AbstractMatrix{S}, Q.factors), convert(AbstractMatrix{S}, Q.T)) - -# override generic square fallback -Matrix{T}(Q::Union{QRCompactWYQ{S},QRPackedQ{S}}) where {T,S} = - convert(Matrix{T}, lmul!(Q, Matrix{S}(I, size(Q, 1), min(size(Q.factors)...)))) -Matrix(Q::Union{QRCompactWYQ{S},QRPackedQ{S}}) where {S} = Matrix{S}(Q) - -convert(::Type{AbstractQ{T}}, Q::QRPackedQ) where {T} = QRPackedQ{T}(Q) -convert(::Type{AbstractQ{T}}, Q::QRCompactWYQ) where {T} = QRCompactWYQ{T}(Q) - -size(Q::Union{QRCompactWYQ,QRPackedQ}, dim::Integer) = - size(Q.factors, dim == 2 ? 1 : dim) -size(Q::Union{QRCompactWYQ,QRPackedQ}) = (n = size(Q.factors, 1); (n, n)) - -## Multiplication -### QB -lmul!(A::QRCompactWYQ{T,<:StridedMatrix}, B::StridedVecOrMat{T}) where {T<:BlasFloat} = - LAPACK.gemqrt!('L', 'N', A.factors, A.T, B) -lmul!(A::QRPackedQ{T,<:StridedMatrix}, B::StridedVecOrMat{T}) where {T<:BlasFloat} = - LAPACK.ormqr!('L', 'N', A.factors, A.τ, B) -function lmul!(A::QRPackedQ, B::AbstractVecOrMat) - require_one_based_indexing(B) - mA, nA = size(A.factors) - mB, nB = size(B,1), size(B,2) - if mA != mB - throw(DimensionMismatch(lazy"matrix A has dimensions ($mA,$nA) but B has dimensions ($mB, $nB)")) - end - Afactors = A.factors - @inbounds begin - for k = min(mA,nA):-1:1 - for j = 1:nB - vBj = B[k,j] - for i = k+1:mB - vBj += conj(Afactors[i,k])*B[i,j] - end - vBj = A.τ[k]*vBj - B[k,j] -= vBj - for i = k+1:mB - B[i,j] -= Afactors[i,k]*vBj - end - end - end - end - B -end - -### QcB -lmul!(adjQ::AdjointQ{<:Any,<:QRCompactWYQ{T,<:StridedMatrix}}, B::StridedVecOrMat{T}) where {T<:BlasReal} = - (Q = adjQ.Q; LAPACK.gemqrt!('L', 'T', Q.factors, Q.T, B)) -lmul!(adjQ::AdjointQ{<:Any,<:QRCompactWYQ{T,<:StridedMatrix}}, B::StridedVecOrMat{T}) where {T<:BlasComplex} = - (Q = adjQ.Q; LAPACK.gemqrt!('L', 'C', Q.factors, Q.T, B)) -lmul!(adjQ::AdjointQ{<:Any,<:QRPackedQ{T,<:StridedMatrix}}, B::StridedVecOrMat{T}) where {T<:BlasReal} = - (Q = adjQ.Q; LAPACK.ormqr!('L', 'T', Q.factors, Q.τ, B)) -lmul!(adjQ::AdjointQ{<:Any,<:QRPackedQ{T,<:StridedMatrix}}, B::StridedVecOrMat{T}) where {T<:BlasComplex} = - (Q = adjQ.Q; LAPACK.ormqr!('L', 'C', Q.factors, Q.τ, B)) -function lmul!(adjA::AdjointQ{<:Any,<:QRPackedQ}, B::AbstractVecOrMat) - require_one_based_indexing(B) - A = adjA.Q - mA, nA = size(A.factors) - mB, nB = size(B,1), size(B,2) - if mA != mB - throw(DimensionMismatch(lazy"matrix A has dimensions ($mA,$nA) but B has dimensions ($mB, $nB)")) - end - Afactors = A.factors - @inbounds begin - for k = 1:min(mA,nA) - for j = 1:nB - vBj = B[k,j] - for i = k+1:mB - vBj += conj(Afactors[i,k])*B[i,j] - end - vBj = conj(A.τ[k])*vBj - B[k,j] -= vBj - for i = k+1:mB - B[i,j] -= Afactors[i,k]*vBj - end - end - end - end - B -end - -### AQ -rmul!(A::StridedVecOrMat{T}, B::QRCompactWYQ{T,<:StridedMatrix}) where {T<:BlasFloat} = - LAPACK.gemqrt!('R', 'N', B.factors, B.T, A) -rmul!(A::StridedVecOrMat{T}, B::QRPackedQ{T,<:StridedMatrix}) where {T<:BlasFloat} = - LAPACK.ormqr!('R', 'N', B.factors, B.τ, A) -function rmul!(A::AbstractVecOrMat, Q::QRPackedQ) - require_one_based_indexing(A) - mQ, nQ = size(Q.factors) - mA, nA = size(A,1), size(A,2) - if nA != mQ - throw(DimensionMismatch(lazy"matrix A has dimensions ($mA,$nA) but matrix Q has dimensions ($mQ, $nQ)")) - end - Qfactors = Q.factors - @inbounds begin - for k = 1:min(mQ,nQ) - for i = 1:mA - vAi = A[i,k] - for j = k+1:mQ - vAi += A[i,j]*Qfactors[j,k] - end - vAi = vAi*Q.τ[k] - A[i,k] -= vAi - for j = k+1:nA - A[i,j] -= vAi*conj(Qfactors[j,k]) - end - end - end - end - A -end - -### AQc -rmul!(A::StridedVecOrMat{T}, adjQ::AdjointQ{<:Any,<:QRCompactWYQ{T}}) where {T<:BlasReal} = - (Q = adjQ.Q; LAPACK.gemqrt!('R', 'T', Q.factors, Q.T, A)) -rmul!(A::StridedVecOrMat{T}, adjQ::AdjointQ{<:Any,<:QRCompactWYQ{T}}) where {T<:BlasComplex} = - (Q = adjQ.Q; LAPACK.gemqrt!('R', 'C', Q.factors, Q.T, A)) -rmul!(A::StridedVecOrMat{T}, adjQ::AdjointQ{<:Any,<:QRPackedQ{T}}) where {T<:BlasReal} = - (Q = adjQ.Q; LAPACK.ormqr!('R', 'T', Q.factors, Q.τ, A)) -rmul!(A::StridedVecOrMat{T}, adjQ::AdjointQ{<:Any,<:QRPackedQ{T}}) where {T<:BlasComplex} = - (Q = adjQ.Q; LAPACK.ormqr!('R', 'C', Q.factors, Q.τ, A)) -function rmul!(A::AbstractVecOrMat, adjQ::AdjointQ{<:Any,<:QRPackedQ}) - require_one_based_indexing(A) - Q = adjQ.Q - mQ, nQ = size(Q.factors) - mA, nA = size(A,1), size(A,2) - if nA != mQ - throw(DimensionMismatch(lazy"matrix A has dimensions ($mA,$nA) but matrix Q has dimensions ($mQ, $nQ)")) - end - Qfactors = Q.factors - @inbounds begin - for k = min(mQ,nQ):-1:1 - for i = 1:mA - vAi = A[i,k] - for j = k+1:mQ - vAi += A[i,j]*Qfactors[j,k] - end - vAi = vAi*conj(Q.τ[k]) - A[i,k] -= vAi - for j = k+1:nA - A[i,j] -= vAi*conj(Qfactors[j,k]) - end - end - end - end - A -end - -det(Q::QRPackedQ) = _det_tau(Q.τ) -det(Q::QRCompactWYQ) = - prod(i -> _det_tau(diagview(Q.T[:, i:min(i + size(Q.T, 1), size(Q.T, 2))])), - 1:size(Q.T, 1):size(Q.T, 2)) - -# Compute `det` from the number of Householder reflections. Handle -# the case `Q.τ` contains zeros. -_det_tau(τs::AbstractVector{<:Real}) = - isodd(count(!iszero, τs)) ? -one(eltype(τs)) : one(eltype(τs)) - -# In complex case, we need to compute the non-unit eigenvalue `λ = 1 - c*τ` -# (where `c = v'v`) of each Householder reflector. As we know that the -# reflector must have the determinant of 1, it must satisfy `abs2(λ) == 1`. -# Combining this with the constraint `c > 0`, it turns out that the eigenvalue -# (hence the determinant) can be computed as `λ = -sign(τ)^2`. -# See: https://github.com/JuliaLang/julia/pull/32887#issuecomment-521935716 -_det_tau(τs) = prod(τ -> iszero(τ) ? one(τ) : -sign(τ)^2, τs) - -########################################################### -######## Q from Hessenberg decomposition ################## -########################################################### - -""" - HessenbergQ <: AbstractQ - -Given a [`Hessenberg`](@ref) factorization object `F`, `F.Q` returns -a `HessenbergQ` object, which is an implicit representation of the unitary -matrix `Q` in the Hessenberg factorization `QHQ'` represented by `F`. -This `F.Q` object can be efficiently multiplied by matrices or vectors, -and can be converted to an ordinary matrix type with `Matrix(F.Q)`. -""" -struct HessenbergQ{T,S<:AbstractMatrix,W<:AbstractVector,sym} <: AbstractQ{T} - uplo::Char - factors::S - τ::W - function HessenbergQ{T,S,W,sym}(uplo::AbstractChar, factors, τ) where {T,S<:AbstractMatrix,W<:AbstractVector,sym} - new(uplo, factors, τ) - end -end -HessenbergQ(F::Hessenberg{<:Any,<:UpperHessenberg,S,W}) where {S,W} = HessenbergQ{eltype(F.factors),S,W,false}(F.uplo, F.factors, F.τ) -HessenbergQ(F::Hessenberg{<:Any,<:SymTridiagonal,S,W}) where {S,W} = HessenbergQ{eltype(F.factors),S,W,true}(F.uplo, F.factors, F.τ) - -size(Q::HessenbergQ, dim::Integer) = size(getfield(Q, :factors), dim == 2 ? 1 : dim) -size(Q::HessenbergQ) = size(Q, 1), size(Q, 2) - -# HessenbergQ from LAPACK/BLAS (as opposed to Julia libraries like GenericLinearAlgebra) -const BlasHessenbergQ{T,sym} = HessenbergQ{T,<:StridedMatrix{T},<:StridedVector{T},sym} where {T<:BlasFloat,sym} - -## reconstruct the original matrix -Matrix{T}(Q::BlasHessenbergQ{<:Any,false}) where {T} = convert(Matrix{T}, LAPACK.orghr!(1, size(Q.factors, 1), copy(Q.factors), Q.τ)) -Matrix{T}(Q::BlasHessenbergQ{<:Any,true}) where {T} = convert(Matrix{T}, LAPACK.orgtr!(Q.uplo, copy(Q.factors), Q.τ)) - -lmul!(Q::BlasHessenbergQ{T,false}, X::StridedVecOrMat{T}) where {T<:BlasFloat} = - LAPACK.ormhr!('L', 'N', 1, size(Q.factors, 1), Q.factors, Q.τ, X) -rmul!(X::StridedVecOrMat{T}, Q::BlasHessenbergQ{T,false}) where {T<:BlasFloat} = - LAPACK.ormhr!('R', 'N', 1, size(Q.factors, 1), Q.factors, Q.τ, X) -lmul!(adjQ::AdjointQ{<:Any,<:BlasHessenbergQ{T,false}}, X::StridedVecOrMat{T}) where {T<:BlasFloat} = - (Q = adjQ.Q; LAPACK.ormhr!('L', ifelse(T<:Real, 'T', 'C'), 1, size(Q.factors, 1), Q.factors, Q.τ, X)) -rmul!(X::StridedVecOrMat{T}, adjQ::AdjointQ{<:Any,<:BlasHessenbergQ{T,false}}) where {T<:BlasFloat} = - (Q = adjQ.Q; LAPACK.ormhr!('R', ifelse(T<:Real, 'T', 'C'), 1, size(Q.factors, 1), Q.factors, Q.τ, X)) - -lmul!(Q::BlasHessenbergQ{T,true}, X::StridedVecOrMat{T}) where {T<:BlasFloat} = - LAPACK.ormtr!('L', Q.uplo, 'N', Q.factors, Q.τ, X) -rmul!(X::StridedVecOrMat{T}, Q::BlasHessenbergQ{T,true}) where {T<:BlasFloat} = - LAPACK.ormtr!('R', Q.uplo, 'N', Q.factors, Q.τ, X) -lmul!(adjQ::AdjointQ{<:Any,<:BlasHessenbergQ{T,true}}, X::StridedVecOrMat{T}) where {T<:BlasFloat} = - (Q = adjQ.Q; LAPACK.ormtr!('L', Q.uplo, ifelse(T<:Real, 'T', 'C'), Q.factors, Q.τ, X)) -rmul!(X::StridedVecOrMat{T}, adjQ::AdjointQ{<:Any,<:BlasHessenbergQ{T,true}}) where {T<:BlasFloat} = - (Q = adjQ.Q; LAPACK.ormtr!('R', Q.uplo, ifelse(T<:Real, 'T', 'C'), Q.factors, Q.τ, X)) - -lmul!(Q::HessenbergQ{T}, X::Adjoint{T,<:StridedVecOrMat{T}}) where {T} = rmul!(X', Q')' -rmul!(X::Adjoint{T,<:StridedVecOrMat{T}}, Q::HessenbergQ{T}) where {T} = lmul!(Q', X')' -lmul!(adjQ::AdjointQ{<:Any,<:HessenbergQ{T}}, X::Adjoint{T,<:StridedVecOrMat{T}}) where {T} = rmul!(X', adjQ')' -rmul!(X::Adjoint{T,<:StridedVecOrMat{T}}, adjQ::AdjointQ{<:Any,<:HessenbergQ{T}}) where {T} = lmul!(adjQ', X')' - -# division by a matrix -function /(Q::Union{QRPackedQ,QRCompactWYQ,HessenbergQ}, B::AbstractVecOrMat) - size(B, 2) in size(Q.factors) || - throw(DimensionMismatch(lazy"second dimension of B, $(size(B,2)), must equal one of the dimensions of Q, $(size(Q.factors))")) - if size(B, 2) == size(Q.factors, 2) - return Matrix(Q) / B - else - return collect(Q) / B - end -end -function \(A::AbstractVecOrMat, adjQ::AdjointQ{<:Any,<:Union{QRPackedQ,QRCompactWYQ,HessenbergQ}}) - Q = adjQ.Q - size(A, 1) in size(Q.factors) || - throw(DimensionMismatch(lazy"first dimension of A, $(size(A,1)), must equal one of the dimensions of Q, $(size(Q.factors))")) - if size(A, 1) == size(Q.factors, 2) - return A \ Matrix(Q)' - else - return A \ collect(Q)' - end -end - -# flexible left-multiplication (and adjoint right-multiplication) -qsize_check(Q::Union{QRPackedQ,QRCompactWYQ,HessenbergQ}, B::AbstractVecOrMat) = - size(B, 1) in size(Q.factors) || - throw(DimensionMismatch(lazy"first dimension of B, $(size(B,1)), must equal one of the dimensions of Q, $(size(Q.factors))")) -qsize_check(A::AbstractVecOrMat, adjQ::AdjointQ{<:Any,<:Union{QRPackedQ,QRCompactWYQ,HessenbergQ}}) = - (Q = adjQ.Q; size(A, 2) in size(Q.factors) || - throw(DimensionMismatch(lazy"second dimension of A, $(size(A,2)), must equal one of the dimensions of Q, $(size(Q.factors))"))) - -det(Q::HessenbergQ) = _det_tau(Q.τ) - -########################################################### -################ Q from LQ decomposition ################## -########################################################### - -struct LQPackedQ{T,S<:AbstractMatrix{T},C<:AbstractVector{T}} <: AbstractQ{T} - factors::S - τ::C -end - -LQPackedQ{T}(Q::LQPackedQ) where {T} = LQPackedQ(convert(AbstractMatrix{T}, Q.factors), convert(AbstractVector{T}, Q.τ)) -@deprecate(AbstractMatrix{T}(Q::LQPackedQ) where {T}, - convert(AbstractQ{T}, Q), - false) -Matrix{T}(A::LQPackedQ) where {T} = convert(Matrix{T}, LAPACK.orglq!(copy(A.factors), A.τ)) -convert(::Type{AbstractQ{T}}, Q::LQPackedQ) where {T} = LQPackedQ{T}(Q) - -# size(Q::LQPackedQ) yields the shape of Q's square form -size(Q::LQPackedQ) = (n = size(Q.factors, 2); return n, n) - -## Multiplication -# out-of-place right application of LQPackedQs -# -# these methods: (1) check whether the applied-to matrix's (A's) appropriate dimension -# (columns for A_*, rows for Ac_*) matches the number of columns (nQ) of the LQPackedQ (Q), -# and if so effectively apply Q's square form to A without additional shenanigans; and -# (2) if the preceding dimensions do not match, check whether the appropriate dimension of -# A instead matches the number of rows of the matrix of which Q is a factor (i.e. -# size(Q.factors, 1)), and if so implicitly apply Q's truncated form to A by zero extending -# A as necessary for check (1) to pass (if possible) and then applying Q's square form - -qsize_check(adjQ::AdjointQ{<:Any,<:LQPackedQ}, B::AbstractVecOrMat) = - size(B, 1) in size(adjQ.Q.factors) || - throw(DimensionMismatch(lazy"first dimension of B, $(size(B,1)), must equal one of the dimensions of Q, $(size(adjQ.Q.factors))")) -qsize_check(A::AbstractVecOrMat, Q::LQPackedQ) = - size(A, 2) in size(Q.factors) || - throw(DimensionMismatch(lazy"second dimension of A, $(size(A,2)), must equal one of the dimensions of Q, $(size(Q.factors))")) - -# in-place right-application of LQPackedQs -# these methods require that the applied-to matrix's (A's) number of columns -# match the number of columns (nQ) of the LQPackedQ (Q) (necessary for in-place -# operation, and the underlying LAPACK routine (ormlq) treats the implicit Q -# as its (nQ-by-nQ) square form) -rmul!(A::StridedVecOrMat{T}, B::LQPackedQ{T}) where {T<:BlasFloat} = - LAPACK.ormlq!('R', 'N', B.factors, B.τ, A) -rmul!(A::StridedVecOrMat{T}, adjB::AdjointQ{<:Any,<:LQPackedQ{T}}) where {T<:BlasReal} = - (B = adjB.Q; LAPACK.ormlq!('R', 'T', B.factors, B.τ, A)) -rmul!(A::StridedVecOrMat{T}, adjB::AdjointQ{<:Any,<:LQPackedQ{T}}) where {T<:BlasComplex} = - (B = adjB.Q; LAPACK.ormlq!('R', 'C', B.factors, B.τ, A)) - -### QB / QcB -lmul!(A::LQPackedQ{T}, B::StridedVecOrMat{T}) where {T<:BlasFloat} = LAPACK.ormlq!('L','N',A.factors,A.τ,B) -lmul!(adjA::AdjointQ{<:Any,<:LQPackedQ{T}}, B::StridedVecOrMat{T}) where {T<:BlasReal} = - (A = adjA.Q; LAPACK.ormlq!('L', 'T', A.factors, A.τ, B)) -lmul!(adjA::AdjointQ{<:Any,<:LQPackedQ{T}}, B::StridedVecOrMat{T}) where {T<:BlasComplex} = - (A = adjA.Q; LAPACK.ormlq!('L', 'C', A.factors, A.τ, B)) - -# division by a matrix -function /(adjQ::AdjointQ{<:Any,<:LQPackedQ}, B::AbstractVecOrMat) - Q = adjQ.Q - size(B, 2) in size(Q.factors) || - throw(DimensionMismatch(lazy"second dimension of B, $(size(B,2)), must equal one of the dimensions of Q, $(size(Q.factors))")) - if size(B, 2) == size(Q.factors, 1) - return Matrix(Q)' / B - else - return collect(Q)' / B - end -end -function \(A::AbstractVecOrMat, Q::LQPackedQ) - size(A, 1) in size(Q.factors) || - throw(DimensionMismatch(lazy"first dimension of A, $(size(A,1)), must equal one of the dimensions of Q, $(size(Q.factors))")) - if size(A, 1) == size(Q.factors, 1) - return A \ Matrix(Q) - else - return A \ collect(Q) - end -end - -# In LQ factorization, `Q` is expressed as the product of the adjoint of the -# reflectors. Thus, `det` has to be conjugated. -det(Q::LQPackedQ) = conj(_det_tau(Q.τ)) diff --git a/stdlib/LinearAlgebra/src/adjtrans.jl b/stdlib/LinearAlgebra/src/adjtrans.jl deleted file mode 100644 index b722e49bb2c3d..0000000000000 --- a/stdlib/LinearAlgebra/src/adjtrans.jl +++ /dev/null @@ -1,524 +0,0 @@ -# This file is a part of Julia. License is MIT: https://julialang.org/license - -### basic definitions (types, aliases, constructors, abstractarray interface, sundry similar) - -# note that Adjoint and Transpose must be able to wrap not only vectors and matrices -# but also factorizations, rotations, and other linear algebra objects, including -# user-defined such objects. so do not restrict the wrapped type. -""" - Adjoint - -Lazy wrapper type for an adjoint view of the underlying linear algebra object, -usually an `AbstractVector`/`AbstractMatrix`. -Usually, the `Adjoint` constructor should not be called directly, use [`adjoint`](@ref) -instead. To materialize the view use [`copy`](@ref). - -This type is intended for linear algebra usage - for general data manipulation see -[`permutedims`](@ref Base.permutedims). - -# Examples -```jldoctest -julia> A = [3+2im 9+2im; 0 0] -2×2 Matrix{Complex{Int64}}: - 3+2im 9+2im - 0+0im 0+0im - -julia> Adjoint(A) -2×2 adjoint(::Matrix{Complex{Int64}}) with eltype Complex{Int64}: - 3-2im 0+0im - 9-2im 0+0im -``` -""" -struct Adjoint{T,S} <: AbstractMatrix{T} - parent::S -end -""" - Transpose - -Lazy wrapper type for a transpose view of the underlying linear algebra object, -usually an `AbstractVector`/`AbstractMatrix`. -Usually, the `Transpose` constructor should not be called directly, use [`transpose`](@ref) -instead. To materialize the view use [`copy`](@ref). - -This type is intended for linear algebra usage - for general data manipulation see -[`permutedims`](@ref Base.permutedims). - -# Examples -```jldoctest -julia> A = [2 3; 0 0] -2×2 Matrix{Int64}: - 2 3 - 0 0 - -julia> Transpose(A) -2×2 transpose(::Matrix{Int64}) with eltype Int64: - 2 0 - 3 0 -``` -""" -struct Transpose{T,S} <: AbstractMatrix{T} - parent::S -end - -# basic outer constructors -Adjoint(A) = Adjoint{Base.promote_op(adjoint,eltype(A)),typeof(A)}(A) -Transpose(A) = Transpose{Base.promote_op(transpose,eltype(A)),typeof(A)}(A) - -""" - inplace_adj_or_trans(::AbstractArray) -> adjoint!|transpose!|copyto! - inplace_adj_or_trans(::Type{<:AbstractArray}) -> adjoint!|transpose!|copyto! - -Return [`adjoint!`](@ref) from an `Adjoint` type or object and -[`transpose!`](@ref) from a `Transpose` type or object. Otherwise, -return [`copyto!`](@ref). Note that `Adjoint` and `Transpose` have -to be the outer-most wrapper object for a non-`identity` function to be -returned. -""" -inplace_adj_or_trans(::T) where {T <: AbstractArray} = inplace_adj_or_trans(T) -inplace_adj_or_trans(::Type{<:AbstractArray}) = copyto! -inplace_adj_or_trans(::Type{<:Adjoint}) = adjoint! -inplace_adj_or_trans(::Type{<:Transpose}) = transpose! - -# unwraps Adjoint, Transpose, Symmetric, Hermitian -_unwrap(A::Adjoint) = parent(A) -_unwrap(A::Transpose) = parent(A) - -# unwraps Adjoint and Transpose only -_unwrap_at(A) = A -_unwrap_at(A::Adjoint) = parent(A) -_unwrap_at(A::Transpose) = parent(A) - -Base.dataids(A::Union{Adjoint, Transpose}) = Base.dataids(A.parent) -Base.unaliascopy(A::Union{Adjoint,Transpose}) = typeof(A)(Base.unaliascopy(A.parent)) - -# wrapping lowercase quasi-constructors -""" - A' - adjoint(A) - -Lazy adjoint (conjugate transposition). Note that `adjoint` is applied recursively to -elements. - -For number types, `adjoint` returns the complex conjugate, and therefore it is equivalent to -the identity function for real numbers. - -This operation is intended for linear algebra usage - for general data manipulation see -[`permutedims`](@ref Base.permutedims). - -# Examples -```jldoctest -julia> A = [3+2im 9+2im; 0 0] -2×2 Matrix{Complex{Int64}}: - 3+2im 9+2im - 0+0im 0+0im - -julia> B = A' # equivalently adjoint(A) -2×2 adjoint(::Matrix{Complex{Int64}}) with eltype Complex{Int64}: - 3-2im 0+0im - 9-2im 0+0im - -julia> B isa Adjoint -true - -julia> adjoint(B) === A # the adjoint of an adjoint unwraps the parent -true - -julia> Adjoint(B) # however, the constructor always wraps its argument -2×2 adjoint(adjoint(::Matrix{Complex{Int64}})) with eltype Complex{Int64}: - 3+2im 9+2im - 0+0im 0+0im - -julia> B[1,2] = 4 + 5im; # modifying B will modify A automatically - -julia> A -2×2 Matrix{Complex{Int64}}: - 3+2im 9+2im - 4-5im 0+0im -``` - -For real matrices, the `adjoint` operation is equivalent to a `transpose`. - -```jldoctest -julia> A = reshape([x for x in 1:4], 2, 2) -2×2 Matrix{Int64}: - 1 3 - 2 4 - -julia> A' -2×2 adjoint(::Matrix{Int64}) with eltype Int64: - 1 2 - 3 4 - -julia> adjoint(A) == transpose(A) -true -``` - -The adjoint of an `AbstractVector` is a row-vector: -```jldoctest -julia> x = [3, 4im] -2-element Vector{Complex{Int64}}: - 3 + 0im - 0 + 4im - -julia> x' -1×2 adjoint(::Vector{Complex{Int64}}) with eltype Complex{Int64}: - 3+0im 0-4im - -julia> x'x # compute the dot product, equivalently x' * x -25 + 0im -``` - -For a matrix of matrices, the individual blocks are recursively operated on: -```jldoctest -julia> A = reshape([x + im*x for x in 1:4], 2, 2) -2×2 Matrix{Complex{Int64}}: - 1+1im 3+3im - 2+2im 4+4im - -julia> C = reshape([A, 2A, 3A, 4A], 2, 2) -2×2 Matrix{Matrix{Complex{Int64}}}: - [1+1im 3+3im; 2+2im 4+4im] [3+3im 9+9im; 6+6im 12+12im] - [2+2im 6+6im; 4+4im 8+8im] [4+4im 12+12im; 8+8im 16+16im] - -julia> C' -2×2 adjoint(::Matrix{Matrix{Complex{Int64}}}) with eltype Adjoint{Complex{Int64}, Matrix{Complex{Int64}}}: - [1-1im 2-2im; 3-3im 4-4im] [2-2im 4-4im; 6-6im 8-8im] - [3-3im 6-6im; 9-9im 12-12im] [4-4im 8-8im; 12-12im 16-16im] -``` -""" -adjoint(A::AbstractVecOrMat) = Adjoint(A) - -""" - transpose(A) - -Lazy transpose. Mutating the returned object should appropriately mutate `A`. Often, -but not always, yields `Transpose(A)`, where `Transpose` is a lazy transpose wrapper. Note -that this operation is recursive. - -This operation is intended for linear algebra usage - for general data manipulation see -[`permutedims`](@ref Base.permutedims), which is non-recursive. - -# Examples -```jldoctest -julia> A = [3 2; 0 0] -2×2 Matrix{Int64}: - 3 2 - 0 0 - -julia> B = transpose(A) -2×2 transpose(::Matrix{Int64}) with eltype Int64: - 3 0 - 2 0 - -julia> B isa Transpose -true - -julia> transpose(B) === A # the transpose of a transpose unwraps the parent -true - -julia> Transpose(B) # however, the constructor always wraps its argument -2×2 transpose(transpose(::Matrix{Int64})) with eltype Int64: - 3 2 - 0 0 - -julia> B[1,2] = 4; # modifying B will modify A automatically - -julia> A -2×2 Matrix{Int64}: - 3 2 - 4 0 -``` - -For complex matrices, the `adjoint` operation is equivalent to a conjugate-transpose. -```jldoctest -julia> A = reshape([Complex(x, x) for x in 1:4], 2, 2) -2×2 Matrix{Complex{Int64}}: - 1+1im 3+3im - 2+2im 4+4im - -julia> adjoint(A) == conj(transpose(A)) -true -``` - -The `transpose` of an `AbstractVector` is a row-vector: -```jldoctest -julia> v = [1,2,3] -3-element Vector{Int64}: - 1 - 2 - 3 - -julia> transpose(v) # returns a row-vector -1×3 transpose(::Vector{Int64}) with eltype Int64: - 1 2 3 - -julia> transpose(v) * v # compute the dot product -14 -``` - -For a matrix of matrices, the individual blocks are recursively operated on: -```jldoctest -julia> C = [1 3; 2 4] -2×2 Matrix{Int64}: - 1 3 - 2 4 - -julia> D = reshape([C, 2C, 3C, 4C], 2, 2) # construct a block matrix -2×2 Matrix{Matrix{Int64}}: - [1 3; 2 4] [3 9; 6 12] - [2 6; 4 8] [4 12; 8 16] - -julia> transpose(D) # blocks are recursively transposed -2×2 transpose(::Matrix{Matrix{Int64}}) with eltype Transpose{Int64, Matrix{Int64}}: - [1 2; 3 4] [2 4; 6 8] - [3 6; 9 12] [4 8; 12 16] -``` -""" -transpose(A::AbstractVecOrMat) = Transpose(A) - -# unwrapping lowercase quasi-constructors -adjoint(A::Adjoint) = A.parent -transpose(A::Transpose) = A.parent -adjoint(A::Transpose{<:Real}) = A.parent -transpose(A::Adjoint{<:Real}) = A.parent -adjoint(A::Transpose{<:Any,<:Adjoint}) = transpose(A.parent.parent) -transpose(A::Adjoint{<:Any,<:Transpose}) = adjoint(A.parent.parent) -# disambiguation -adjoint(A::Transpose{<:Real,<:Adjoint}) = transpose(A.parent.parent) -transpose(A::Adjoint{<:Real,<:Transpose}) = A.parent - -# printing -function Base.showarg(io::IO, v::Adjoint, toplevel) - print(io, "adjoint(") - Base.showarg(io, parent(v), false) - print(io, ')') - toplevel && print(io, " with eltype ", eltype(v)) - return nothing -end -function Base.showarg(io::IO, v::Transpose, toplevel) - print(io, "transpose(") - Base.showarg(io, parent(v), false) - print(io, ')') - toplevel && print(io, " with eltype ", eltype(v)) - return nothing -end -function Base.show(io::IO, v::Adjoint{<:Real, <:AbstractVector}) - print(io, "adjoint(") - show(io, parent(v)) - print(io, ")") -end -function Base.show(io::IO, v::Transpose{<:Number, <:AbstractVector}) - print(io, "transpose(") - show(io, parent(v)) - print(io, ")") -end - -# some aliases for internal convenience use -const AdjOrTrans{T,S} = Union{Adjoint{T,S},Transpose{T,S}} where {T,S} -const AdjointAbsVec{T} = Adjoint{T,<:AbstractVector} -const AdjointAbsMat{T} = Adjoint{T,<:AbstractMatrix} -const TransposeAbsVec{T} = Transpose{T,<:AbstractVector} -const TransposeAbsMat{T} = Transpose{T,<:AbstractMatrix} -const AdjOrTransAbsVec{T} = AdjOrTrans{T,<:AbstractVector} -const AdjOrTransAbsMat{T} = AdjOrTrans{T,<:AbstractMatrix} - -# for internal use below -wrapperop(_) = identity -wrapperop(::Adjoint) = adjoint -wrapperop(::Transpose) = transpose - -# the following fallbacks can be removed if Adjoint/Transpose are restricted to AbstractVecOrMat -size(A::AdjOrTrans) = reverse(size(A.parent)) -axes(A::AdjOrTrans) = reverse(axes(A.parent)) -# AbstractArray interface, basic definitions -length(A::AdjOrTrans) = length(A.parent) -size(v::AdjOrTransAbsVec) = (1, length(v.parent)) -size(A::AdjOrTransAbsMat) = reverse(size(A.parent)) -axes(v::AdjOrTransAbsVec) = (axes(v.parent,2), axes(v.parent)...) -axes(A::AdjOrTransAbsMat) = reverse(axes(A.parent)) -IndexStyle(::Type{<:AdjOrTransAbsVec}) = IndexLinear() -@propagate_inbounds Base.isassigned(v::AdjOrTransAbsVec, i::Int) = isassigned(v.parent, i-1+first(axes(v.parent)[1])) -@propagate_inbounds Base.isassigned(v::AdjOrTransAbsMat, i::Int, j::Int) = isassigned(v.parent, j, i) -@propagate_inbounds getindex(v::AdjOrTransAbsVec{T}, i::Int) where {T} = wrapperop(v)(v.parent[i-1+first(axes(v.parent)[1])])::T -@propagate_inbounds getindex(A::AdjOrTransAbsMat{T}, i::Int, j::Int) where {T} = wrapperop(A)(A.parent[j, i])::T -@propagate_inbounds setindex!(v::AdjOrTransAbsVec, x, i::Int) = (setindex!(v.parent, wrapperop(v)(x), i-1+first(axes(v.parent)[1])); v) -@propagate_inbounds setindex!(A::AdjOrTransAbsMat, x, i::Int, j::Int) = (setindex!(A.parent, wrapperop(A)(x), j, i); A) -# AbstractArray interface, additional definitions to retain wrapper over vectors where appropriate -@propagate_inbounds getindex(v::AdjOrTransAbsVec, ::Colon, is::AbstractArray{Int}) = wrapperop(v)(v.parent[is]) -@propagate_inbounds getindex(v::AdjOrTransAbsVec, ::Colon, ::Colon) = wrapperop(v)(v.parent[:]) - -# conversion of underlying storage -convert(::Type{Adjoint{T,S}}, A::Adjoint) where {T,S} = Adjoint{T,S}(convert(S, A.parent))::Adjoint{T,S} -convert(::Type{Transpose{T,S}}, A::Transpose) where {T,S} = Transpose{T,S}(convert(S, A.parent))::Transpose{T,S} - -# Strides and pointer for transposed strided arrays — but only if the elements are actually stored in memory -Base.strides(A::Adjoint{<:Real, <:AbstractVector}) = (stride(A.parent, 2), stride(A.parent, 1)) -Base.strides(A::Transpose{<:Any, <:AbstractVector}) = (stride(A.parent, 2), stride(A.parent, 1)) -# For matrices it's slightly faster to use reverse and avoid calling stride twice -Base.strides(A::Adjoint{<:Real, <:AbstractMatrix}) = reverse(strides(A.parent)) -Base.strides(A::Transpose{<:Any, <:AbstractMatrix}) = reverse(strides(A.parent)) - -Base.cconvert(::Type{Ptr{T}}, A::Adjoint{<:Real, <:AbstractVecOrMat}) where {T} = Base.cconvert(Ptr{T}, A.parent) -Base.cconvert(::Type{Ptr{T}}, A::Transpose{<:Any, <:AbstractVecOrMat}) where {T} = Base.cconvert(Ptr{T}, A.parent) - -Base.elsize(::Type{<:Adjoint{<:Real, P}}) where {P<:AbstractVecOrMat} = Base.elsize(P) -Base.elsize(::Type{<:Transpose{<:Any, P}}) where {P<:AbstractVecOrMat} = Base.elsize(P) - -# for vectors, the semantics of the wrapped and unwrapped types differ -# so attempt to maintain both the parent and wrapper type insofar as possible -similar(A::AdjOrTransAbsVec) = wrapperop(A)(similar(A.parent)) -similar(A::AdjOrTransAbsVec, ::Type{T}) where {T} = wrapperop(A)(similar(A.parent, Base.promote_op(wrapperop(A), T))) -# for matrices, the semantics of the wrapped and unwrapped types are generally the same -# and as you are allocating with similar anyway, you might as well get something unwrapped -similar(A::AdjOrTrans) = similar(A.parent, eltype(A), axes(A)) -similar(A::AdjOrTrans, ::Type{T}) where {T} = similar(A.parent, T, axes(A)) -similar(A::AdjOrTrans, ::Type{T}, dims::Dims{N}) where {T,N} = similar(A.parent, T, dims) - -# AbstractMatrix{T} constructor for adjtrans vector: preserve wrapped type -AbstractMatrix{T}(A::AdjOrTransAbsVec) where {T} = wrapperop(A)(AbstractVector{T}(A.parent)) - -# sundry basic definitions -parent(A::AdjOrTrans) = A.parent -vec(v::TransposeAbsVec{<:Number}) = parent(v) -vec(v::AdjointAbsVec{<:Real}) = parent(v) - -### concatenation -# preserve Adjoint/Transpose wrapper around vectors -# to retain the associated semantics post-concatenation -hcat(avs::Union{Number,AdjointAbsVec}...) = _adjoint_hcat(avs...) -hcat(tvs::Union{Number,TransposeAbsVec}...) = _transpose_hcat(tvs...) -_adjoint_hcat(avs::Union{Number,AdjointAbsVec}...) = adjoint(vcat(map(adjoint, avs)...)) -_transpose_hcat(tvs::Union{Number,TransposeAbsVec}...) = transpose(vcat(map(transpose, tvs)...)) -typed_hcat(::Type{T}, avs::Union{Number,AdjointAbsVec}...) where {T} = adjoint(typed_vcat(T, map(adjoint, avs)...)) -typed_hcat(::Type{T}, tvs::Union{Number,TransposeAbsVec}...) where {T} = transpose(typed_vcat(T, map(transpose, tvs)...)) -# otherwise-redundant definitions necessary to prevent hitting the concat methods in LinearAlgebra/special.jl -hcat(avs::Adjoint{<:Any,<:Vector}...) = _adjoint_hcat(avs...) -hcat(tvs::Transpose{<:Any,<:Vector}...) = _transpose_hcat(tvs...) -hcat(avs::Adjoint{T,Vector{T}}...) where {T} = _adjoint_hcat(avs...) -hcat(tvs::Transpose{T,Vector{T}}...) where {T} = _transpose_hcat(tvs...) -# TODO unify and allow mixed combinations - - -### higher order functions -# preserve Adjoint/Transpose wrapper around vectors -# to retain the associated semantics post-map/broadcast -# -# note that the caller's operation f operates in the domain of the wrapped vectors' entries. -# hence the adjoint->f->adjoint shenanigans applied to the parent vectors' entries. -function map(f, av::AdjointAbsVec, avs::AdjointAbsVec...) - s = (av, avs...) - adjoint(map((xs...) -> adjoint(f(adjoint.(xs)...)), parent.(s)...)) -end -function map(f, tv::TransposeAbsVec, tvs::TransposeAbsVec...) - s = (tv, tvs...) - transpose(map((xs...) -> transpose(f(transpose.(xs)...)), parent.(s)...)) -end -quasiparentt(x) = parent(x); quasiparentt(x::Number) = x # to handle numbers in the defs below -quasiparenta(x) = parent(x); quasiparenta(x::Number) = conj(x) # to handle numbers in the defs below -quasiparentc(x) = parent(parent(x)); quasiparentc(x::Number) = conj(x) # to handle numbers in the defs below -broadcast(f, avs::Union{Number,AdjointAbsVec}...) = adjoint(broadcast((xs...) -> adjoint(f(adjoint.(xs)...)), quasiparenta.(avs)...)) -broadcast(f, tvs::Union{Number,TransposeAbsVec}...) = transpose(broadcast((xs...) -> transpose(f(transpose.(xs)...)), quasiparentt.(tvs)...)) -# Hack to preserve behavior after #32122; this needs to be done with a broadcast style instead to support dotted fusion -Broadcast.broadcast_preserving_zero_d(f, avs::Union{Number,AdjointAbsVec}...) = adjoint(broadcast((xs...) -> adjoint(f(adjoint.(xs)...)), quasiparenta.(avs)...)) -Broadcast.broadcast_preserving_zero_d(f, tvs::Union{Number,TransposeAbsVec}...) = transpose(broadcast((xs...) -> transpose(f(transpose.(xs)...)), quasiparentt.(tvs)...)) -Broadcast.broadcast_preserving_zero_d(f, tvs::Union{Number,Transpose{<:Any,<:AdjointAbsVec}}...) = - transpose(adjoint(broadcast((xs...) -> adjoint(transpose(f(conj.(xs)...))), quasiparentc.(tvs)...))) -Broadcast.broadcast_preserving_zero_d(f, tvs::Union{Number,Adjoint{<:Any,<:TransposeAbsVec}}...) = - adjoint(transpose(broadcast((xs...) -> transpose(adjoint(f(conj.(xs)...))), quasiparentc.(tvs)...))) -# TODO unify and allow mixed combinations with a broadcast style - - -### reductions -# faster to sum the Array than to work through the wrapper (but only in commutative reduction ops as in Base/permuteddimsarray.jl) -Base._mapreduce_dim(f, op::CommutativeOps, init::Base._InitialValue, A::Transpose, dims::Colon) = - Base._mapreduce_dim(f∘transpose, op, init, parent(A), dims) -Base._mapreduce_dim(f, op::CommutativeOps, init::Base._InitialValue, A::Adjoint, dims::Colon) = - Base._mapreduce_dim(f∘adjoint, op, init, parent(A), dims) -# in prod, use fast path only in the commutative case to avoid surprises -Base._mapreduce_dim(f::typeof(identity), op::Union{typeof(*),typeof(Base.mul_prod)}, init::Base._InitialValue, A::Transpose{<:Union{Real,Complex}}, dims::Colon) = - Base._mapreduce_dim(f∘transpose, op, init, parent(A), dims) -Base._mapreduce_dim(f::typeof(identity), op::Union{typeof(*),typeof(Base.mul_prod)}, init::Base._InitialValue, A::Adjoint{<:Union{Real,Complex}}, dims::Colon) = - Base._mapreduce_dim(f∘adjoint, op, init, parent(A), dims) -# count allows for optimization only if the parent array has Bool eltype -Base._count(::typeof(identity), A::Transpose{Bool}, ::Colon, init) = Base._count(identity, parent(A), :, init) -Base._count(::typeof(identity), A::Adjoint{Bool}, ::Colon, init) = Base._count(identity, parent(A), :, init) -Base._any(f, A::Transpose, ::Colon) = Base._any(f∘transpose, parent(A), :) -Base._any(f, A::Adjoint, ::Colon) = Base._any(f∘adjoint, parent(A), :) -Base._all(f, A::Transpose, ::Colon) = Base._all(f∘transpose, parent(A), :) -Base._all(f, A::Adjoint, ::Colon) = Base._all(f∘adjoint, parent(A), :) -# sum(A'; dims) -Base.mapreducedim!(f, op::CommutativeOps, B::AbstractArray, A::TransposeAbsMat) = - (Base.mapreducedim!(f∘transpose, op, switch_dim12(B), parent(A)); B) -Base.mapreducedim!(f, op::CommutativeOps, B::AbstractArray, A::AdjointAbsMat) = - (Base.mapreducedim!(f∘adjoint, op, switch_dim12(B), parent(A)); B) -Base.mapreducedim!(f::typeof(identity), op::Union{typeof(*),typeof(Base.mul_prod)}, B::AbstractArray, A::TransposeAbsMat{<:Union{Real,Complex}}) = - (Base.mapreducedim!(f∘transpose, op, switch_dim12(B), parent(A)); B) -Base.mapreducedim!(f::typeof(identity), op::Union{typeof(*),typeof(Base.mul_prod)}, B::AbstractArray, A::AdjointAbsMat{<:Union{Real,Complex}}) = - (Base.mapreducedim!(f∘adjoint, op, switch_dim12(B), parent(A)); B) - -switch_dim12(B::AbstractVector) = permutedims(B) -switch_dim12(B::AbstractVector{<:Number}) = transpose(B) # avoid allocs due to permutedims -switch_dim12(B::AbstractArray{<:Any,0}) = B -switch_dim12(B::AbstractArray) = PermutedDimsArray(B, (2, 1, ntuple(Base.Fix1(+,2), ndims(B) - 2)...)) - -### linear algebra - -(-)(A::Adjoint) = Adjoint( -A.parent) -(-)(A::Transpose) = Transpose(-A.parent) - -tr(A::Adjoint) = adjoint(tr(parent(A))) -tr(A::Transpose) = transpose(tr(parent(A))) - -## multiplication * - -function _dot_nonrecursive(u, v) - lu = length(u) - if lu != length(v) - throw(DimensionMismatch(lazy"first array has length $(lu) which does not match the length of the second, $(length(v)).")) - end - if lu == 0 - zero(eltype(u)) * zero(eltype(v)) - else - sum(uu*vv for (uu, vv) in zip(u, v)) - end -end - -# Adjoint/Transpose-vector * vector -*(u::AdjointAbsVec{<:Number}, v::AbstractVector{<:Number}) = dot(u.parent, v) -*(u::TransposeAbsVec{T}, v::AbstractVector{T}) where {T<:Real} = dot(u.parent, v) -*(u::AdjOrTransAbsVec, v::AbstractVector) = _dot_nonrecursive(u, v) - - -# vector * Adjoint/Transpose-vector -*(u::AbstractVector, v::AdjOrTransAbsVec) = broadcast(*, u, v) - -# AdjOrTransAbsVec{<:Any,<:AdjOrTransAbsVec} is a lazy conj vectors -# We need to expand the combinations to avoid ambiguities -(*)(u::TransposeAbsVec, v::AdjointAbsVec{<:Any,<:TransposeAbsVec}) = _dot_nonrecursive(u, v) -(*)(u::AdjointAbsVec, v::AdjointAbsVec{<:Any,<:TransposeAbsVec}) = _dot_nonrecursive(u, v) -(*)(u::TransposeAbsVec, v::TransposeAbsVec{<:Any,<:AdjointAbsVec}) = _dot_nonrecursive(u, v) -(*)(u::AdjointAbsVec, v::TransposeAbsVec{<:Any,<:AdjointAbsVec}) = _dot_nonrecursive(u, v) - -## pseudoinversion -pinv(v::AdjointAbsVec, tol::Real = 0) = pinv(v.parent, tol).parent -pinv(v::TransposeAbsVec, tol::Real = 0) = pinv(conj(v.parent)).parent - - -## left-division \ -\(u::AdjOrTransAbsVec, v::AdjOrTransAbsVec) = pinv(u) * v - - -## right-division / -/(u::AdjointAbsVec, A::AbstractMatrix) = adjoint(adjoint(A) \ u.parent) -/(u::TransposeAbsVec, A::AbstractMatrix) = transpose(transpose(A) \ u.parent) -/(u::AdjointAbsVec, A::TransposeAbsMat) = adjoint(conj(A.parent) \ u.parent) # technically should be adjoint(copy(adjoint(copy(A))) \ u.parent) -/(u::TransposeAbsVec, A::AdjointAbsMat) = transpose(conj(A.parent) \ u.parent) # technically should be transpose(copy(transpose(copy(A))) \ u.parent) - -## complex conjugate -conj(A::Transpose) = adjoint(A.parent) -conj(A::Adjoint) = transpose(A.parent) - -## structured matrix methods ## -function Base.replace_in_print_matrix(A::AdjOrTrans,i::Integer,j::Integer,s::AbstractString) - Base.replace_in_print_matrix(parent(A), j, i, s) -end diff --git a/stdlib/LinearAlgebra/src/bidiag.jl b/stdlib/LinearAlgebra/src/bidiag.jl deleted file mode 100644 index 5b7264558f9ae..0000000000000 --- a/stdlib/LinearAlgebra/src/bidiag.jl +++ /dev/null @@ -1,1489 +0,0 @@ -# This file is a part of Julia. License is MIT: https://julialang.org/license - -# Bidiagonal matrices -struct Bidiagonal{T,V<:AbstractVector{T}} <: AbstractMatrix{T} - dv::V # diagonal - ev::V # sub/super diagonal - uplo::Char # upper bidiagonal ('U') or lower ('L') - function Bidiagonal{T,V}(dv, ev, uplo::AbstractChar) where {T,V<:AbstractVector{T}} - require_one_based_indexing(dv, ev) - if length(ev) != max(length(dv)-1, 0) - throw(DimensionMismatch(lazy"length of diagonal vector is $(length(dv)), length of off-diagonal vector is $(length(ev))")) - end - (uplo != 'U' && uplo != 'L') && throw_uplo() - new{T,V}(dv, ev, uplo) - end -end -function Bidiagonal{T,V}(dv, ev, uplo::Symbol) where {T,V<:AbstractVector{T}} - Bidiagonal{T,V}(dv, ev, char_uplo(uplo)) -end -function Bidiagonal{T}(dv::AbstractVector, ev::AbstractVector, uplo::Union{Symbol,AbstractChar}) where {T} - Bidiagonal(convert(AbstractVector{T}, dv)::AbstractVector{T}, - convert(AbstractVector{T}, ev)::AbstractVector{T}, - uplo) -end -function Bidiagonal{T,V}(A::Bidiagonal) where {T,V<:AbstractVector{T}} - Bidiagonal{T,V}(A.dv, A.ev, A.uplo) -end - -""" - Bidiagonal(dv::V, ev::V, uplo::Symbol) where V <: AbstractVector - -Constructs an upper (`uplo=:U`) or lower (`uplo=:L`) bidiagonal matrix using the -given diagonal (`dv`) and off-diagonal (`ev`) vectors. The result is of type `Bidiagonal` -and provides efficient specialized linear solvers, but may be converted into a regular -matrix with [`convert(Array, _)`](@ref) (or `Array(_)` for short). The length of `ev` -must be one less than the length of `dv`. - -# Examples -```jldoctest -julia> dv = [1, 2, 3, 4] -4-element Vector{Int64}: - 1 - 2 - 3 - 4 - -julia> ev = [7, 8, 9] -3-element Vector{Int64}: - 7 - 8 - 9 - -julia> Bu = Bidiagonal(dv, ev, :U) # ev is on the first superdiagonal -4×4 Bidiagonal{Int64, Vector{Int64}}: - 1 7 ⋅ ⋅ - ⋅ 2 8 ⋅ - ⋅ ⋅ 3 9 - ⋅ ⋅ ⋅ 4 - -julia> Bl = Bidiagonal(dv, ev, :L) # ev is on the first subdiagonal -4×4 Bidiagonal{Int64, Vector{Int64}}: - 1 ⋅ ⋅ ⋅ - 7 2 ⋅ ⋅ - ⋅ 8 3 ⋅ - ⋅ ⋅ 9 4 -``` -""" -function Bidiagonal(dv::V, ev::V, uplo::Symbol) where {T,V<:AbstractVector{T}} - Bidiagonal{T,V}(dv, ev, uplo) -end -function Bidiagonal(dv::V, ev::V, uplo::AbstractChar) where {T,V<:AbstractVector{T}} - Bidiagonal{T,V}(dv, ev, uplo) -end - -#To allow Bidiagonal's where the "dv" is Vector{T} and "ev" Vector{S}, -#where T and S can be promoted -function Bidiagonal(dv::Vector{T}, ev::Vector{S}, uplo::Symbol) where {T,S} - TS = promote_type(T,S) - return Bidiagonal{TS,Vector{TS}}(dv, ev, uplo) -end - -""" - Bidiagonal(A, uplo::Symbol) - -Construct a `Bidiagonal` matrix from the main diagonal of `A` and -its first super- (if `uplo=:U`) or sub-diagonal (if `uplo=:L`). - -# Examples -```jldoctest -julia> A = [1 1 1 1; 2 2 2 2; 3 3 3 3; 4 4 4 4] -4×4 Matrix{Int64}: - 1 1 1 1 - 2 2 2 2 - 3 3 3 3 - 4 4 4 4 - -julia> Bidiagonal(A, :U) # contains the main diagonal and first superdiagonal of A -4×4 Bidiagonal{Int64, Vector{Int64}}: - 1 1 ⋅ ⋅ - ⋅ 2 2 ⋅ - ⋅ ⋅ 3 3 - ⋅ ⋅ ⋅ 4 - -julia> Bidiagonal(A, :L) # contains the main diagonal and first subdiagonal of A -4×4 Bidiagonal{Int64, Vector{Int64}}: - 1 ⋅ ⋅ ⋅ - 2 2 ⋅ ⋅ - ⋅ 3 3 ⋅ - ⋅ ⋅ 4 4 -``` -""" -function Bidiagonal(A::AbstractMatrix, uplo::Symbol) - Bidiagonal(diag(A, 0), diag(A, uplo === :U ? 1 : -1), uplo) -end - - -Bidiagonal(A::Bidiagonal) = A -Bidiagonal{T}(A::Bidiagonal{T}) where {T} = A -Bidiagonal{T}(A::Bidiagonal) where {T} = Bidiagonal{T}(A.dv, A.ev, A.uplo) - -_offdiagind(uplo) = uplo == 'U' ? 1 : -1 - -@inline function Base.isassigned(A::Bidiagonal, i::Int, j::Int) - @boundscheck checkbounds(Bool, A, i, j) || return false - if i == j - return @inbounds isassigned(A.dv, i) - elseif i == j - _offdiagind(A.uplo) - return @inbounds isassigned(A.ev, A.uplo == 'U' ? i : j) - else - return true - end -end - -@inline function Base.isstored(A::Bidiagonal, i::Int, j::Int) - @boundscheck checkbounds(A, i, j) - if i == j - return @inbounds Base.isstored(A.dv, i) - elseif i == j - _offdiagind(A.uplo) - return @inbounds Base.isstored(A.ev, A.uplo == 'U' ? i : j) - else - return false - end -end - -@inline function getindex(A::Bidiagonal{T}, i::Int, j::Int) where T - @boundscheck checkbounds(A, i, j) - if i == j - return @inbounds A.dv[i] - elseif i == j - _offdiagind(A.uplo) - return @inbounds A.ev[A.uplo == 'U' ? i : j] - else - return diagzero(A, i, j) - end -end - -@inline function getindex(A::Bidiagonal{T}, b::BandIndex) where T - @boundscheck checkbounds(A, b) - if b.band == 0 - return @inbounds A.dv[b.index] - elseif b.band ∈ (-1,1) && b.band == _offdiagind(A.uplo) - # we explicitly compare the possible bands as b.band may be constant-propagated - return @inbounds A.ev[b.index] - else - return diagzero(A, Tuple(_cartinds(b))...) - end -end - -@inline function setindex!(A::Bidiagonal, x, i::Integer, j::Integer) - @boundscheck checkbounds(A, i, j) - if i == j - @inbounds A.dv[i] = x - elseif i == j - _offdiagind(A.uplo) - @inbounds A.ev[A.uplo == 'U' ? i : j] = x - elseif !iszero(x) - throw(ArgumentError(LazyString(lazy"cannot set entry ($i, $j) off the ", - A.uplo == 'U' ? "upper" : "lower", " bidiagonal band to a nonzero value ", x))) - end - return A -end - -Base._reverse(A::Bidiagonal, dims) = reverse!(Matrix(A); dims) -Base._reverse(A::Bidiagonal, ::Colon) = Bidiagonal(reverse(A.dv), reverse(A.ev), A.uplo == 'U' ? :L : :U) - -## structured matrix methods ## -function Base.replace_in_print_matrix(A::Bidiagonal,i::Integer,j::Integer,s::AbstractString) - i==j || i==j-_offdiagind(A.uplo) ? s : Base.replace_with_centered_mark(s) -end - -#Converting from Bidiagonal to dense Matrix -function Matrix{T}(A::Bidiagonal) where T - B = Matrix{T}(undef, size(A)) - if haszero(T) # optimized path for types with zero(T) defined - size(B,1) > 1 && fill!(B, zero(T)) - copyto!(diagview(B), A.dv) - copyto!(diagview(B, _offdiagind(A.uplo)), A.ev) - else - copyto!(B, A) - end - return B -end -Matrix(A::Bidiagonal{T}) where {T} = Matrix{promote_type(T, typeof(zero(T)))}(A) -Array(A::Bidiagonal) = Matrix(A) -promote_rule(::Type{Matrix{T}}, ::Type{<:Bidiagonal{S}}) where {T,S} = - @isdefined(T) && @isdefined(S) ? Matrix{promote_type(T,S)} : Matrix -promote_rule(::Type{<:Matrix}, ::Type{<:Bidiagonal}) = Matrix - -#Converting from Bidiagonal to Tridiagonal -function Tridiagonal{T}(A::Bidiagonal) where T - dv = convert(AbstractVector{T}, A.dv) - ev = convert(AbstractVector{T}, A.ev) - # ensure that the types are identical, even if zero returns a different type - z = oftype(ev, zero(ev)) - A.uplo == 'U' ? Tridiagonal(z, dv, ev) : Tridiagonal(ev, dv, z) -end -promote_rule(::Type{<:Tridiagonal{T}}, ::Type{<:Bidiagonal{S}}) where {T,S} = - @isdefined(T) && @isdefined(S) ? Tridiagonal{promote_type(T,S)} : Tridiagonal -promote_rule(::Type{<:Tridiagonal}, ::Type{<:Bidiagonal}) = Tridiagonal - -# When asked to convert Bidiagonal to AbstractMatrix{T}, preserve structure by converting to Bidiagonal{T} <: AbstractMatrix{T} -AbstractMatrix{T}(A::Bidiagonal) where {T} = Bidiagonal{T}(A) -AbstractMatrix{T}(A::Bidiagonal{T}) where {T} = copy(A) - -convert(::Type{T}, m::AbstractMatrix) where {T<:Bidiagonal} = m isa T ? m : T(m)::T - -similar(B::Bidiagonal, ::Type{T}) where {T} = Bidiagonal(similar(B.dv, T), similar(B.ev, T), B.uplo) -similar(B::Bidiagonal, ::Type{T}, dims::Union{Dims{1},Dims{2}}) where {T} = similar(B.dv, T, dims) - -tr(B::Bidiagonal) = sum(B.dv) - -function kron(A::Diagonal, B::Bidiagonal) - # `_droplast!` is only guaranteed to work with `Vector` - kdv = convert(Vector, kron(diag(A), B.dv)) - kev = _droplast!(convert(Vector, kron(diag(A), _pushzero(B.ev)))) - Bidiagonal(kdv, kev, B.uplo) -end - -################### -# LAPACK routines # -################### - -#Singular values -svdvals!(M::Bidiagonal{<:BlasReal}) = LAPACK.bdsdc!(M.uplo, 'N', M.dv, M.ev)[1] -function svd!(M::Bidiagonal{<:BlasReal}; full::Bool = false) - d, e, U, Vt, Q, iQ = LAPACK.bdsdc!(M.uplo, 'I', M.dv, M.ev) - SVD(U, d, Vt) -end -function svd(M::Bidiagonal; kw...) - svd!(copy(M), kw...) -end - -#################### -# Generic routines # -#################### - -function show(io::IO, M::Bidiagonal) - print(io, "Bidiagonal(") - show(io, M.dv) - print(io, ", ") - show(io, M.ev) - print(io, ", ") - show(io, sym_uplo(M.uplo)) - print(io, ")") -end - -size(M::Bidiagonal) = (n = length(M.dv); (n, n)) -axes(M::Bidiagonal) = (ax = axes(M.dv, 1); (ax, ax)) - -#Elementary operations -for func in (:conj, :copy, :real, :imag) - @eval ($func)(M::Bidiagonal) = Bidiagonal(($func)(M.dv), ($func)(M.ev), M.uplo) -end - -adjoint(B::Bidiagonal{<:Number}) = Bidiagonal(vec(adjoint(B.dv)), vec(adjoint(B.ev)), B.uplo == 'U' ? :L : :U) -adjoint(B::Bidiagonal{<:Number, <:Base.ReshapedArray{<:Number,1,<:Adjoint}}) = - Bidiagonal(adjoint(parent(B.dv)), adjoint(parent(B.ev)), B.uplo == 'U' ? :L : :U) -transpose(B::Bidiagonal{<:Number}) = Bidiagonal(B.dv, B.ev, B.uplo == 'U' ? :L : :U) -permutedims(B::Bidiagonal) = Bidiagonal(B.dv, B.ev, B.uplo == 'U' ? 'L' : 'U') -function permutedims(B::Bidiagonal, perm) - Base.checkdims_perm(axes(B), axes(B), perm) - NTuple{2}(perm) == (2, 1) ? permutedims(B) : B -end -function Base.copy(aB::Adjoint{<:Any,<:Bidiagonal}) - B = aB.parent - return Bidiagonal(map(x -> copy.(adjoint.(x)), (B.dv, B.ev))..., B.uplo == 'U' ? :L : :U) -end -function Base.copy(tB::Transpose{<:Any,<:Bidiagonal}) - B = tB.parent - return Bidiagonal(map(x -> copy.(transpose.(x)), (B.dv, B.ev))..., B.uplo == 'U' ? :L : :U) -end - -@noinline function throw_zeroband_error(A) - uplo = A.uplo - zeroband = uplo == 'U' ? "lower" : "upper" - throw(ArgumentError(LazyString("cannot set the ", - zeroband, " bidiagonal band to a nonzero value for uplo=:", uplo))) -end - -# copyto! for matching axes -function _copyto_banded!(A::Bidiagonal, B::Bidiagonal) - A.dv .= B.dv - if A.uplo == B.uplo - A.ev .= B.ev - elseif iszero(B.ev) # diagonal source - A.ev .= B.ev - else - throw_zeroband_error(A) - end - return A -end - -iszero(M::Bidiagonal) = iszero(M.dv) && iszero(M.ev) -isone(M::Bidiagonal) = all(isone, M.dv) && iszero(M.ev) -Base.@constprop :aggressive function istriu(M::Bidiagonal, k::Integer=0) - if M.uplo == 'U' - if k <= 0 - return true - elseif k == 1 - return iszero(M.dv) - else # k >= 2 - return iszero(M.dv) && iszero(M.ev) - end - else # M.uplo == 'L' - if k <= -1 - return true - elseif k == 0 - return iszero(M.ev) - else # k >= 1 - return iszero(M.ev) && iszero(M.dv) - end - end -end -Base.@constprop :aggressive function istril(M::Bidiagonal, k::Integer=0) - if M.uplo == 'U' - if k >= 1 - return true - elseif k == 0 - return iszero(M.ev) - else # k <= -1 - return iszero(M.ev) && iszero(M.dv) - end - else # M.uplo == 'L' - if k >= 0 - return true - elseif k == -1 - return iszero(M.dv) - else # k <= -2 - return iszero(M.dv) && iszero(M.ev) - end - end -end -isdiag(M::Bidiagonal) = iszero(M.ev) -issymmetric(M::Bidiagonal) = isdiag(M) && all(issymmetric, M.dv) -ishermitian(M::Bidiagonal) = isdiag(M) && all(ishermitian, M.dv) - -function tril!(M::Bidiagonal{T}, k::Integer=0) where T - n = length(M.dv) - if !(-n - 1 <= k <= n - 1) - throw(ArgumentError(LazyString(lazy"the requested diagonal, $k, must be at least ", - lazy"$(-n - 1) and at most $(n - 1) in an $n-by-$n matrix"))) - elseif M.uplo == 'U' && k < 0 - fill!(M.dv, zero(T)) - fill!(M.ev, zero(T)) - elseif k < -1 - fill!(M.dv, zero(T)) - fill!(M.ev, zero(T)) - elseif M.uplo == 'U' && k == 0 - fill!(M.ev, zero(T)) - elseif M.uplo == 'L' && k == -1 - fill!(M.dv, zero(T)) - end - return M -end - -function triu!(M::Bidiagonal{T}, k::Integer=0) where T - n = length(M.dv) - if !(-n + 1 <= k <= n + 1) - throw(ArgumentError(LazyString(lazy"the requested diagonal, $k, must be at least", - lazy"$(-n + 1) and at most $(n + 1) in an $n-by-$n matrix"))) - elseif M.uplo == 'L' && k > 0 - fill!(M.dv, zero(T)) - fill!(M.ev, zero(T)) - elseif k > 1 - fill!(M.dv, zero(T)) - fill!(M.ev, zero(T)) - elseif M.uplo == 'L' && k == 0 - fill!(M.ev, zero(T)) - elseif M.uplo == 'U' && k == 1 - fill!(M.dv, zero(T)) - end - return M -end - -function diag(M::Bidiagonal, n::Integer=0) - # every branch call similar(..., ::Int) to make sure the - # same vector type is returned independent of n - v = similar(M.dv, max(0, length(M.dv)-abs(n))) - if n == 0 - copyto!(v, M.dv) - elseif (n == 1 && M.uplo == 'U') || (n == -1 && M.uplo == 'L') - copyto!(v, M.ev) - elseif -size(M,1) <= n <= size(M,1) - for i in eachindex(v) - v[i] = M[BandIndex(n,i)] - end - end - return v -end - -function +(A::Bidiagonal, B::Bidiagonal) - if A.uplo == B.uplo || length(A.dv) == 0 - Bidiagonal(A.dv+B.dv, A.ev+B.ev, A.uplo) - else - newdv = A.dv+B.dv - Tridiagonal((A.uplo == 'U' ? (typeof(newdv)(B.ev), newdv, typeof(newdv)(A.ev)) : (typeof(newdv)(A.ev), newdv, typeof(newdv)(B.ev)))...) - end -end - -function -(A::Bidiagonal, B::Bidiagonal) - if A.uplo == B.uplo || length(A.dv) == 0 - Bidiagonal(A.dv-B.dv, A.ev-B.ev, A.uplo) - else - newdv = A.dv-B.dv - Tridiagonal((A.uplo == 'U' ? (typeof(newdv)(-B.ev), newdv, typeof(newdv)(A.ev)) : (typeof(newdv)(A.ev), newdv, typeof(newdv)(-B.ev)))...) - end -end - --(A::Bidiagonal)=Bidiagonal(-A.dv,-A.ev,A.uplo) -*(A::Bidiagonal, B::Number) = Bidiagonal(A.dv*B, A.ev*B, A.uplo) -*(B::Number, A::Bidiagonal) = Bidiagonal(B*A.dv, B*A.ev, A.uplo) -function rmul!(B::Bidiagonal, x::Number) - if size(B,1) > 1 - isupper = B.uplo == 'U' - row, col = 1 + isupper, 1 + !isupper - # ensure that zeros are preserved on scaling - y = B[row,col] * x - iszero(y) || throw(ArgumentError(LazyString(lazy"cannot set index ($row, $col) off ", - lazy"the tridiagonal band to a nonzero value ($y)"))) - end - @. B.dv *= x - @. B.ev *= x - return B -end -function lmul!(x::Number, B::Bidiagonal) - if size(B,1) > 1 - isupper = B.uplo == 'U' - row, col = 1 + isupper, 1 + !isupper - # ensure that zeros are preserved on scaling - y = x * B[row,col] - iszero(y) || throw(ArgumentError(LazyString(lazy"cannot set index ($row, $col) off ", - lazy"the tridiagonal band to a nonzero value ($y)"))) - end - @. B.dv = x * B.dv - @. B.ev = x * B.ev - return B -end -/(A::Bidiagonal, B::Number) = Bidiagonal(A.dv/B, A.ev/B, A.uplo) -\(B::Number, A::Bidiagonal) = Bidiagonal(B\A.dv, B\A.ev, A.uplo) - -function ==(A::Bidiagonal, B::Bidiagonal) - if A.uplo == B.uplo - return A.dv == B.dv && A.ev == B.ev - else - return iszero(A.ev) && iszero(B.ev) && A.dv == B.dv - end -end - -const BandedMatrix = Union{Bidiagonal,Diagonal,Tridiagonal,SymTridiagonal} # or BiDiTriSym -const BiTriSym = Union{Bidiagonal,Tridiagonal,SymTridiagonal} -const TriSym = Union{Tridiagonal,SymTridiagonal} -const BiTri = Union{Bidiagonal,Tridiagonal} -@inline _mul!(C::AbstractVector, A::BandedMatrix, B::AbstractVector, alpha::Number, beta::Number) = - @stable_muladdmul _mul!(C, A, B, MulAddMul(alpha, beta)) -@inline _mul!(C::AbstractMatrix, A::BandedMatrix, B::AbstractVector, alpha::Number, beta::Number) = - @stable_muladdmul _mul!(C, A, B, MulAddMul(alpha, beta)) -for T in (:AbstractMatrix, :Diagonal) - @eval begin - @inline _mul!(C::AbstractMatrix, A::BandedMatrix, B::$T, alpha::Number, beta::Number) = - @stable_muladdmul _mul!(C, A, B, MulAddMul(alpha, beta)) - @inline _mul!(C::AbstractMatrix, A::$T, B::BandedMatrix, alpha::Number, beta::Number) = - @stable_muladdmul _mul!(C, A, B, MulAddMul(alpha, beta)) - end -end -@inline _mul!(C::AbstractMatrix, A::BandedMatrix, B::BandedMatrix, alpha::Number, beta::Number) = - @stable_muladdmul _mul!(C, A, B, MulAddMul(alpha, beta)) - -# B .= A * B -function lmul!(A::Bidiagonal, B::AbstractVecOrMat) - _muldiag_size_check(size(A), size(B)) - (; dv, ev) = A - if A.uplo == 'U' - for k in axes(B,2) - for i in axes(ev,1) - B[i,k] = dv[i] * B[i,k] + ev[i] * B[i+1,k] - end - B[end,k] = dv[end] * B[end,k] - end - else - for k in axes(B,2) - for i in reverse(axes(dv,1)[2:end]) - B[i,k] = dv[i] * B[i,k] + ev[i-1] * B[i-1,k] - end - B[1,k] = dv[1] * B[1,k] - end - end - return B -end -# B .= D * B -function lmul!(D::Diagonal, B::Bidiagonal) - _muldiag_size_check(size(D), size(B)) - (; dv, ev) = B - isL = B.uplo == 'L' - dv[1] = D.diag[1] * dv[1] - for i in axes(ev,1) - ev[i] = D.diag[i + isL] * ev[i] - dv[i+1] = D.diag[i+1] * dv[i+1] - end - return B -end -# B .= B * A -function rmul!(B::AbstractMatrix, A::Bidiagonal) - _muldiag_size_check(size(A), size(B)) - (; dv, ev) = A - if A.uplo == 'U' - for k in reverse(axes(dv,1)[2:end]) - for i in axes(B,1) - B[i,k] = B[i,k] * dv[k] + B[i,k-1] * ev[k-1] - end - end - for i in axes(B,1) - B[i,1] *= dv[1] - end - else - for k in axes(ev,1) - for i in axes(B,1) - B[i,k] = B[i,k] * dv[k] + B[i,k+1] * ev[k] - end - end - for i in axes(B,1) - B[i,end] *= dv[end] - end - end - return B -end -# B .= B * D -function rmul!(B::Bidiagonal, D::Diagonal) - _muldiag_size_check(size(B), size(D)) - (; dv, ev) = B - isU = B.uplo == 'U' - dv[1] *= D.diag[1] - for i in axes(ev,1) - ev[i] *= D.diag[i + isU] - dv[i+1] *= D.diag[i+1] - end - return B -end - -@noinline function check_A_mul_B!_sizes((mC, nC)::NTuple{2,Integer}, (mA, nA)::NTuple{2,Integer}, (mB, nB)::NTuple{2,Integer}) - # check for matching sizes in one column of B and C - check_A_mul_B!_sizes((mC,), (mA, nA), (mB,)) - # ensure that the number of columns in B and C match - if nB != nC - throw(DimensionMismatch(lazy"second dimension of output C, $nC, and second dimension of B, $nB, must match")) - end -end -@noinline function check_A_mul_B!_sizes((mC,)::Tuple{Integer}, (mA, nA)::NTuple{2,Integer}, (mB,)::Tuple{Integer}) - if mA != mC - throw(DimensionMismatch(lazy"first dimension of A, $mA, and first dimension of output C, $mC, must match")) - elseif nA != mB - throw(DimensionMismatch(lazy"second dimension of A, $nA, and first dimension of B, $mB, must match")) - end -end - -# function to get the internally stored vectors for Bidiagonal and [Sym]Tridiagonal -# to avoid allocations in _mul! below (#24324, #24578) -_diag(A::Tridiagonal, k) = k == -1 ? A.dl : k == 0 ? A.d : A.du -_diag(A::SymTridiagonal{<:Number}, k) = k == 0 ? A.dv : A.ev -_diag(A::SymTridiagonal, k) = diagview(A,k) -function _diag(A::Bidiagonal, k) - if k == 0 - return A.dv - elseif k == _offdiagind(A.uplo) - return A.ev - else - return diag(A, k) - end -end - -_mul!(C::AbstractMatrix, A::BiTriSym, B::TriSym, _add::MulAddMul) = - _bibimul!(C, A, B, _add) -_mul!(C::AbstractMatrix, A::BiTriSym, B::Bidiagonal, _add::MulAddMul) = - _bibimul!(C, A, B, _add) -function _bibimul!(C, A, B, _add) - require_one_based_indexing(C) - check_A_mul_B!_sizes(size(C), size(A), size(B)) - n = size(A,1) - iszero(n) && return C - # We use `_rmul_or_fill!` instead of `_modify!` here since using - # `_modify!` in the following loop will not update the - # off-diagonal elements for non-zero beta. - _rmul_or_fill!(C, _add.beta) - iszero(_add.alpha) && return C - if n <= 3 - # naive multiplication - for I in CartesianIndices(C) - C[I] += _add(sum(A[I[1], k] * B[k, I[2]] for k in axes(A,2))) - end - return C - end - @inbounds begin - # first column of C - C[1,1] += _add(A[1,1]*B[1,1] + A[1, 2]*B[2,1]) - C[2,1] += _add(A[2,1]*B[1,1] + A[2,2]*B[2,1]) - C[3,1] += _add(A[3,2]*B[2,1]) - # second column of C - C[1,2] += _add(A[1,1]*B[1,2] + A[1,2]*B[2,2]) - C[2,2] += _add(A[2,1]*B[1,2] + A[2,2]*B[2,2] + A[2,3]*B[3,2]) - C[3,2] += _add(A[3,2]*B[2,2] + A[3,3]*B[3,2]) - C[4,2] += _add(A[4,3]*B[3,2]) - end # inbounds - # middle columns - __bibimul!(C, A, B, _add) - @inbounds begin - C[n-3,n-1] += _add(A[n-3,n-2]*B[n-2,n-1]) - C[n-2,n-1] += _add(A[n-2,n-2]*B[n-2,n-1] + A[n-2,n-1]*B[n-1,n-1]) - C[n-1,n-1] += _add(A[n-1,n-2]*B[n-2,n-1] + A[n-1,n-1]*B[n-1,n-1] + A[n-1,n]*B[n,n-1]) - C[n, n-1] += _add(A[n,n-1]*B[n-1,n-1] + A[n,n]*B[n,n-1]) - # last column of C - C[n-2, n] += _add(A[n-2,n-1]*B[n-1,n]) - C[n-1, n] += _add(A[n-1,n-1]*B[n-1,n ] + A[n-1,n]*B[n,n ]) - C[n, n] += _add(A[n,n-1]*B[n-1,n ] + A[n,n]*B[n,n ]) - end # inbounds - C -end -function __bibimul!(C, A, B, _add) - n = size(A,1) - Al = _diag(A, -1) - Ad = _diag(A, 0) - Au = _diag(A, 1) - Bl = _diag(B, -1) - Bd = _diag(B, 0) - Bu = _diag(B, 1) - @inbounds begin - for j in 3:n-2 - Aj₋2j₋1 = Au[j-2] - Aj₋1j = Au[j-1] - Ajj₊1 = Au[j] - Aj₋1j₋1 = Ad[j-1] - Ajj = Ad[j] - Aj₊1j₊1 = Ad[j+1] - Ajj₋1 = Al[j-1] - Aj₊1j = Al[j] - Aj₊2j₊1 = Al[j+1] - Bj₋1j = Bu[j-1] - Bjj = Bd[j] - Bj₊1j = Bl[j] - - C[j-2, j] += _add(Aj₋2j₋1*Bj₋1j) - C[j-1, j] += _add(Aj₋1j₋1*Bj₋1j + Aj₋1j*Bjj) - C[j, j] += _add(Ajj₋1*Bj₋1j + Ajj*Bjj + Ajj₊1*Bj₊1j) - C[j+1, j] += _add(Aj₊1j*Bjj + Aj₊1j₊1*Bj₊1j) - C[j+2, j] += _add(Aj₊2j₊1*Bj₊1j) - end - end - C -end -function __bibimul!(C, A, B::Bidiagonal, _add) - n = size(A,1) - Al = _diag(A, -1) - Ad = _diag(A, 0) - Au = _diag(A, 1) - Bd = _diag(B, 0) - if B.uplo == 'U' - Bu = _diag(B, 1) - @inbounds begin - for j in 3:n-2 - Aj₋2j₋1 = Au[j-2] - Aj₋1j = Au[j-1] - Aj₋1j₋1 = Ad[j-1] - Ajj = Ad[j] - Ajj₋1 = Al[j-1] - Aj₊1j = Al[j] - Bj₋1j = Bu[j-1] - Bjj = Bd[j] - - C[j-2, j] += _add(Aj₋2j₋1*Bj₋1j) - C[j-1, j] += _add(Aj₋1j₋1*Bj₋1j + Aj₋1j*Bjj) - C[j, j] += _add(Ajj₋1*Bj₋1j + Ajj*Bjj) - C[j+1, j] += _add(Aj₊1j*Bjj) - end - end - else # B.uplo == 'L' - Bl = _diag(B, -1) - @inbounds begin - for j in 3:n-2 - Aj₋1j = Au[j-1] - Ajj₊1 = Au[j] - Ajj = Ad[j] - Aj₊1j₊1 = Ad[j+1] - Aj₊1j = Al[j] - Aj₊2j₊1 = Al[j+1] - Bjj = Bd[j] - Bj₊1j = Bl[j] - - C[j-1, j] += _add(Aj₋1j*Bjj) - C[j, j] += _add(Ajj*Bjj + Ajj₊1*Bj₊1j) - C[j+1, j] += _add(Aj₊1j*Bjj + Aj₊1j₊1*Bj₊1j) - C[j+2, j] += _add(Aj₊2j₊1*Bj₊1j) - end - end - end - C -end -function __bibimul!(C, A::Bidiagonal, B, _add) - n = size(A,1) - Bl = _diag(B, -1) - Bd = _diag(B, 0) - Bu = _diag(B, 1) - Ad = _diag(A, 0) - if A.uplo == 'U' - Au = _diag(A, 1) - @inbounds begin - for j in 3:n-2 - Aj₋2j₋1 = Au[j-2] - Aj₋1j = Au[j-1] - Ajj₊1 = Au[j] - Aj₋1j₋1 = Ad[j-1] - Ajj = Ad[j] - Aj₊1j₊1 = Ad[j+1] - Bj₋1j = Bu[j-1] - Bjj = Bd[j] - Bj₊1j = Bl[j] - - C[j-2, j] += _add(Aj₋2j₋1*Bj₋1j) - C[j-1, j] += _add(Aj₋1j₋1*Bj₋1j + Aj₋1j*Bjj) - C[j, j] += _add(Ajj*Bjj + Ajj₊1*Bj₊1j) - C[j+1, j] += _add(Aj₊1j₊1*Bj₊1j) - end - end - else # A.uplo == 'L' - Al = _diag(A, -1) - @inbounds begin - for j in 3:n-2 - Aj₋1j₋1 = Ad[j-1] - Ajj = Ad[j] - Aj₊1j₊1 = Ad[j+1] - Ajj₋1 = Al[j-1] - Aj₊1j = Al[j] - Aj₊2j₊1 = Al[j+1] - Bj₋1j = Bu[j-1] - Bjj = Bd[j] - Bj₊1j = Bl[j] - - C[j-1, j] += _add(Aj₋1j₋1*Bj₋1j) - C[j, j] += _add(Ajj₋1*Bj₋1j + Ajj*Bjj) - C[j+1, j] += _add(Aj₊1j*Bjj + Aj₊1j₊1*Bj₊1j) - C[j+2, j] += _add(Aj₊2j₊1*Bj₊1j) - end - end - end - C -end -function __bibimul!(C, A::Bidiagonal, B::Bidiagonal, _add) - n = size(A,1) - Ad = _diag(A, 0) - Bd = _diag(B, 0) - if A.uplo == 'U' && B.uplo == 'U' - Au = _diag(A, 1) - Bu = _diag(B, 1) - @inbounds begin - for j in 3:n-2 - Aj₋2j₋1 = Au[j-2] - Aj₋1j = Au[j-1] - Aj₋1j₋1 = Ad[j-1] - Ajj = Ad[j] - Bj₋1j = Bu[j-1] - Bjj = Bd[j] - - C[j-2, j] += _add(Aj₋2j₋1*Bj₋1j) - C[j-1, j] += _add(Aj₋1j₋1*Bj₋1j + Aj₋1j*Bjj) - C[j, j] += _add(Ajj*Bjj) - end - end - elseif A.uplo == 'U' && B.uplo == 'L' - Au = _diag(A, 1) - Bl = _diag(B, -1) - @inbounds begin - for j in 3:n-2 - Aj₋1j = Au[j-1] - Ajj₊1 = Au[j] - Ajj = Ad[j] - Aj₊1j₊1 = Ad[j+1] - Bjj = Bd[j] - Bj₊1j = Bl[j] - - C[j-1, j] += _add(Aj₋1j*Bjj) - C[j, j] += _add(Ajj*Bjj + Ajj₊1*Bj₊1j) - C[j+1, j] += _add(Aj₊1j₊1*Bj₊1j) - end - end - elseif A.uplo == 'L' && B.uplo == 'U' - Al = _diag(A, -1) - Bu = _diag(B, 1) - @inbounds begin - for j in 3:n-2 - Aj₋1j₋1 = Ad[j-1] - Ajj = Ad[j] - Ajj₋1 = Al[j-1] - Aj₊1j = Al[j] - Bj₋1j = Bu[j-1] - Bjj = Bd[j] - - C[j-1, j] += _add(Aj₋1j₋1*Bj₋1j) - C[j, j] += _add(Ajj₋1*Bj₋1j + Ajj*Bjj) - C[j+1, j] += _add(Aj₊1j*Bjj) - end - end - else # A.uplo == 'L' && B.uplo == 'L' - Al = _diag(A, -1) - Bl = _diag(B, -1) - @inbounds begin - for j in 3:n-2 - Ajj = Ad[j] - Aj₊1j₊1 = Ad[j+1] - Aj₊1j = Al[j] - Aj₊2j₊1 = Al[j+1] - Bjj = Bd[j] - Bj₊1j = Bl[j] - - C[j, j] += _add(Ajj*Bjj) - C[j+1, j] += _add(Aj₊1j*Bjj + Aj₊1j₊1*Bj₊1j) - C[j+2, j] += _add(Aj₊2j₊1*Bj₊1j) - end - end - end - C -end - -_mul!(C::AbstractMatrix, A::BiTriSym, B::Diagonal, alpha::Number, beta::Number) = - @stable_muladdmul _mul!(C, A, B, MulAddMul(alpha, beta)) -function _mul!(C::AbstractMatrix, A::BiTriSym, B::Diagonal, _add::MulAddMul) - require_one_based_indexing(C) - check_A_mul_B!_sizes(size(C), size(A), size(B)) - n = size(A,1) - iszero(n) && return C - _rmul_or_fill!(C, _add.beta) # see the same use above - iszero(_add.alpha) && return C - Al = _diag(A, -1) - Ad = _diag(A, 0) - Au = _diag(A, 1) - Bd = B.diag - @inbounds begin - # first row of C - for j in 1:min(2, n) - C[1,j] += _add(A[1,j]*B[j,j]) - end - # second row of C - if n > 1 - for j in 1:min(3, n) - C[2,j] += _add(A[2,j]*B[j,j]) - end - end - for j in 3:n-2 - C[j, j-1] += _add(Al[j-1]*Bd[j-1]) - C[j, j ] += _add(Ad[j ]*Bd[j ]) - C[j, j+1] += _add(Au[j ]*Bd[j+1]) - end - if n > 3 - # row before last of C - for j in n-2:n - C[n-1,j] += _add(A[n-1,j]*B[j,j]) - end - end - # last row of C - if n > 2 - for j in n-1:n - C[n,j] += _add(A[n,j]*B[j,j]) - end - end - end # inbounds - C -end - -function _mul!(C::AbstractMatrix, A::Bidiagonal, B::Diagonal, _add::MulAddMul) - require_one_based_indexing(C) - check_A_mul_B!_sizes(size(C), size(A), size(B)) - n = size(A,1) - iszero(n) && return C - _rmul_or_fill!(C, _add.beta) # see the same use above - iszero(_add.alpha) && return C - (; dv, ev) = A - Bd = B.diag - rowshift = A.uplo == 'U' ? -1 : 1 - evshift = Int(A.uplo == 'U') - @inbounds begin - # first row of C - C[1,1] += _add(dv[1]*Bd[1]) - if n > 1 - if A.uplo == 'L' - C[2,1] += _add(ev[1]*Bd[1]) - end - for col in 2:n-1 - C[col+rowshift, col] += _add(ev[col - evshift]*Bd[col]) - C[col, col] += _add(dv[col]*Bd[col]) - end - if A.uplo == 'U' - C[n-1,n] += _add(ev[n-1]*Bd[n]) - end - C[n, n] += _add(dv[n]*Bd[n]) - end - end # inbounds - C -end - -function _mul!(C::Bidiagonal, A::Bidiagonal, B::Diagonal, _add::MulAddMul) - check_A_mul_B!_sizes(size(C), size(A), size(B)) - n = size(A,1) - iszero(n) && return C - iszero(_add.alpha) && return _rmul_or_fill!(C, _add.beta) - Adv, Aev = A.dv, A.ev - Cdv, Cev = C.dv, C.ev - Bd = B.diag - shift = Int(A.uplo == 'U') - if C.uplo == A.uplo - @inbounds begin - _modify!(_add, Adv[1]*Bd[1], Cdv, 1) - for j in eachindex(IndexLinear(), Aev, Cev) - _modify!(_add, Aev[j]*Bd[j+shift], Cev, j) - _modify!(_add, Adv[j+1]*Bd[j+1], Cdv, j+1) - end - end # inbounds - else - @inbounds begin - _modify!(_add, Adv[1]*Bd[1], Cdv, 1) - for j in eachindex(IndexLinear(), Aev, Cev) - _modify!(_add, Adv[j+1]*Bd[j+1], Cdv, j+1) - # this branch will error unless the value is zero - _modify!(_add, Aev[j]*Bd[j+shift], C, (j+1-shift, j+shift)) - # zeros of the correct type - _modify!(_add, A[j+shift, j+1-shift]*Bd[j+1-shift], Cev, j) - end - end - end - C -end - -function _mul!(C::AbstractVecOrMat, A::BiTriSym, B::AbstractVecOrMat, _add::MulAddMul) - require_one_based_indexing(C, B) - check_A_mul_B!_sizes(size(C), size(A), size(B)) - nA = size(A,1) - nB = size(B,2) - (iszero(nA) || iszero(nB)) && return C - iszero(_add.alpha) && return _rmul_or_fill!(C, _add.beta) - if nA <= 3 - # naive multiplication - for I in CartesianIndices(C) - col = Base.tail(Tuple(I)) - _modify!(_add, sum(A[I[1], k] * B[k, col...] for k in axes(A,2)), C, I) - end - return C - end - _mul_bitrisym!(C, A, B, _add) -end -function _mul_bitrisym!(C::AbstractVecOrMat, A::Bidiagonal, B::AbstractVecOrMat, _add::MulAddMul) - nA = size(A,1) - nB = size(B,2) - d = A.dv - if A.uplo == 'U' - u = A.ev - @inbounds begin - for j = 1:nB - b₀, b₊ = B[1, j], B[2, j] - _modify!(_add, d[1]*b₀ + u[1]*b₊, C, (1, j)) - for i = 2:nA - 1 - b₀, b₊ = b₊, B[i + 1, j] - _modify!(_add, d[i]*b₀ + u[i]*b₊, C, (i, j)) - end - _modify!(_add, d[nA]*b₊, C, (nA, j)) - end - end - else - l = A.ev - @inbounds begin - for j = 1:nB - b₀, b₊ = B[1, j], B[2, j] - _modify!(_add, d[1]*b₀, C, (1, j)) - for i = 2:nA - 1 - b₋, b₀, b₊ = b₀, b₊, B[i + 1, j] - _modify!(_add, l[i - 1]*b₋ + d[i]*b₀, C, (i, j)) - end - _modify!(_add, l[nA - 1]*b₀ + d[nA]*b₊, C, (nA, j)) - end - end - end - C -end -function _mul_bitrisym!(C::AbstractVecOrMat, A::TriSym, B::AbstractVecOrMat, _add::MulAddMul) - nA = size(A,1) - nB = size(B,2) - l = _diag(A, -1) - d = _diag(A, 0) - u = _diag(A, 1) - @inbounds begin - for j = 1:nB - b₀, b₊ = B[1, j], B[2, j] - _modify!(_add, d[1]*b₀ + u[1]*b₊, C, (1, j)) - for i = 2:nA - 1 - b₋, b₀, b₊ = b₀, b₊, B[i + 1, j] - _modify!(_add, l[i - 1]*b₋ + d[i]*b₀ + u[i]*b₊, C, (i, j)) - end - _modify!(_add, l[nA - 1]*b₀ + d[nA]*b₊, C, (nA, j)) - end - end - C -end - -function _mul!(C::AbstractMatrix, A::AbstractMatrix, B::TriSym, _add::MulAddMul) - require_one_based_indexing(C, A) - check_A_mul_B!_sizes(size(C), size(A), size(B)) - n = size(A,1) - m = size(B,2) - (iszero(_add.alpha) || iszero(m)) && return _rmul_or_fill!(C, _add.beta) - if m == 1 - B11 = B[1,1] - return mul!(C, A, B11, _add.alpha, _add.beta) - end - Bl = _diag(B, -1) - Bd = _diag(B, 0) - Bu = _diag(B, 1) - @inbounds begin - # first and last column of C - B11 = Bd[1] - B21 = Bl[1] - Bmm = Bd[m] - Bm₋1m = Bu[m-1] - for i in 1:n - _modify!(_add, A[i,1] * B11 + A[i, 2] * B21, C, (i, 1)) - _modify!(_add, A[i, m-1] * Bm₋1m + A[i, m] * Bmm, C, (i, m)) - end - # middle columns of C - for j = 2:m-1 - Bj₋1j = Bu[j-1] - Bjj = Bd[j] - Bj₊1j = Bl[j] - for i = 1:n - _modify!(_add, A[i, j-1] * Bj₋1j + A[i, j]*Bjj + A[i, j+1] * Bj₊1j, C, (i, j)) - end - end - end # inbounds - C -end - -function _mul!(C::AbstractMatrix, A::AbstractMatrix, B::Bidiagonal, _add::MulAddMul) - require_one_based_indexing(C, A) - check_A_mul_B!_sizes(size(C), size(A), size(B)) - m, n = size(A) - (iszero(m) || iszero(n)) && return C - iszero(_add.alpha) && return _rmul_or_fill!(C, _add.beta) - @inbounds if B.uplo == 'U' - for j in n:-1:2, i in 1:m - _modify!(_add, A[i,j] * B.dv[j] + A[i,j-1] * B.ev[j-1], C, (i, j)) - end - for i in 1:m - _modify!(_add, A[i,1] * B.dv[1], C, (i, 1)) - end - else # uplo == 'L' - for j in 1:n-1, i in 1:m - _modify!(_add, A[i,j] * B.dv[j] + A[i,j+1] * B.ev[j], C, (i, j)) - end - for i in 1:m - _modify!(_add, A[i,n] * B.dv[n], C, (i, n)) - end - end - C -end - -_mul!(C::AbstractMatrix, A::Diagonal, B::BiTriSym, alpha::Number, beta::Number) = - @stable_muladdmul _mul!(C, A, B, MulAddMul(alpha, beta)) -_mul!(C::AbstractMatrix, A::Diagonal, B::Bidiagonal, _add::MulAddMul) = - _dibimul!(C, A, B, _add) -_mul!(C::AbstractMatrix, A::Diagonal, B::TriSym, _add::MulAddMul) = - _dibimul!(C, A, B, _add) -function _dibimul!(C, A, B, _add) - require_one_based_indexing(C) - check_A_mul_B!_sizes(size(C), size(A), size(B)) - n = size(A,1) - iszero(n) && return C - # ensure that we fill off-band elements in the destination - _rmul_or_fill!(C, _add.beta) - iszero(_add.alpha) && return C - if n <= 3 - # For simplicity, use a naive multiplication for small matrices - # that loops over all elements. - for I in CartesianIndices(C) - C[I] += _add(A.diag[I[1]] * B[I[1], I[2]]) - end - return C - end - Ad = A.diag - Bl = _diag(B, -1) - Bd = _diag(B, 0) - Bu = _diag(B, 1) - @inbounds begin - # first row of C - C[1,1] += _add(A[1,1]*B[1,1]) - C[1,2] += _add(A[1,1]*B[1,2]) - # second row of C - C[2,1] += _add(A[2,2]*B[2,1]) - C[2,2] += _add(A[2,2]*B[2,2]) - C[2,3] += _add(A[2,2]*B[2,3]) - for j in 3:n-2 - Ajj = Ad[j] - C[j, j-1] += _add(Ajj*Bl[j-1]) - C[j, j ] += _add(Ajj*Bd[j]) - C[j, j+1] += _add(Ajj*Bu[j]) - end - # row before last of C - C[n-1,n-2] += _add(A[n-1,n-1]*B[n-1,n-2]) - C[n-1,n-1] += _add(A[n-1,n-1]*B[n-1,n-1]) - C[n-1,n ] += _add(A[n-1,n-1]*B[n-1,n ]) - # last row of C - C[n,n-1] += _add(A[n,n]*B[n,n-1]) - C[n,n ] += _add(A[n,n]*B[n,n ]) - end # inbounds - C -end -function _dibimul!(C::AbstractMatrix, A::Diagonal, B::Bidiagonal, _add) - require_one_based_indexing(C) - check_A_mul_B!_sizes(size(C), size(A), size(B)) - n = size(A,1) - iszero(n) && return C - # ensure that we fill off-band elements in the destination - _rmul_or_fill!(C, _add.beta) - iszero(_add.alpha) && return C - Ad = A.diag - Bdv, Bev = B.dv, B.ev - rowshift = B.uplo == 'U' ? -1 : 1 - evshift = Int(B.uplo == 'U') - @inbounds begin - # first row of C - C[1,1] += _add(Ad[1]*Bdv[1]) - if n > 1 - if B.uplo == 'L' - C[2,1] += _add(Ad[2]*Bev[1]) - end - for col in 2:n-1 - evrow = col+rowshift - C[evrow, col] += _add(Ad[evrow]*Bev[col - evshift]) - C[col, col] += _add(Ad[col]*Bdv[col]) - end - if B.uplo == 'U' - C[n-1,n] += _add(Ad[n-1]*Bev[n-1]) - end - C[n, n] += _add(Ad[n]*Bdv[n]) - end - end # inbounds - C -end -function _dibimul!(C::Bidiagonal, A::Diagonal, B::Bidiagonal, _add) - check_A_mul_B!_sizes(size(C), size(A), size(B)) - n = size(A,1) - n == 0 && return C - iszero(_add.alpha) && return _rmul_or_fill!(C, _add.beta) - Ad = A.diag - Bdv, Bev = B.dv, B.ev - Cdv, Cev = C.dv, C.ev - shift = Int(B.uplo == 'L') - if C.uplo == B.uplo - @inbounds begin - _modify!(_add, Ad[1]*Bdv[1], Cdv, 1) - for j in eachindex(IndexLinear(), Bev, Cev) - _modify!(_add, Ad[j+shift]*Bev[j], Cev, j) - _modify!(_add, Ad[j+1]*Bdv[j+1], Cdv, j+1) - end - end # inbounds - else - @inbounds begin - _modify!(_add, Ad[1]*Bdv[1], Cdv, 1) - for j in eachindex(IndexLinear(), Bev, Cev) - _modify!(_add, Ad[j+1]*Bdv[j+1], Cdv, j+1) - # this branch will error unless the value is zero - _modify!(_add, Ad[j+shift]*Bev[j], C, (j+shift, j+1-shift)) - # zeros of the correct type - _modify!(_add, Ad[j+1-shift]*B[j+1-shift,j+shift], Cev, j) - end - end - end - C -end - -function *(A::UpperOrUnitUpperTriangular, B::Bidiagonal) - TS = promote_op(matprod, eltype(A), eltype(B)) - C = mul!(similar(A, TS, size(A)), A, B) - return B.uplo == 'U' ? UpperTriangular(C) : C -end - -function *(A::LowerOrUnitLowerTriangular, B::Bidiagonal) - TS = promote_op(matprod, eltype(A), eltype(B)) - C = mul!(similar(A, TS, size(A)), A, B) - return B.uplo == 'L' ? LowerTriangular(C) : C -end - -function *(A::Bidiagonal, B::UpperOrUnitUpperTriangular) - TS = promote_op(matprod, eltype(A), eltype(B)) - C = mul!(similar(B, TS, size(B)), A, B) - return A.uplo == 'U' ? UpperTriangular(C) : C -end - -function *(A::Bidiagonal, B::LowerOrUnitLowerTriangular) - TS = promote_op(matprod, eltype(A), eltype(B)) - C = mul!(similar(B, TS, size(B)), A, B) - return A.uplo == 'L' ? LowerTriangular(C) : C -end - -function dot(x::AbstractVector, B::Bidiagonal, y::AbstractVector) - require_one_based_indexing(x, y) - nx, ny = length(x), length(y) - (nx == size(B, 1) == ny) || throw(DimensionMismatch()) - if nx ≤ 1 - nx == 0 && return dot(zero(eltype(x)), zero(eltype(B)), zero(eltype(y))) - return dot(x[1], B.dv[1], y[1]) - end - ev, dv = B.ev, B.dv - @inbounds if B.uplo == 'U' - x₀ = x[1] - r = dot(x[1], dv[1], y[1]) - for j in 2:nx-1 - x₋, x₀ = x₀, x[j] - r += dot(adjoint(ev[j-1])*x₋ + adjoint(dv[j])*x₀, y[j]) - end - r += dot(adjoint(ev[nx-1])*x₀ + adjoint(dv[nx])*x[nx], y[nx]) - return r - else # B.uplo == 'L' - x₀ = x[1] - x₊ = x[2] - r = dot(adjoint(dv[1])*x₀ + adjoint(ev[1])*x₊, y[1]) - for j in 2:nx-1 - x₀, x₊ = x₊, x[j+1] - r += dot(adjoint(dv[j])*x₀ + adjoint(ev[j])*x₊, y[j]) - end - r += dot(x₊, dv[nx], y[nx]) - return r - end -end - -#Linear solvers -#Generic solver using naive substitution -ldiv!(A::Bidiagonal, b::AbstractVecOrMat) = @inline ldiv!(b, A, b) -function ldiv!(c::AbstractVecOrMat, A::Bidiagonal, b::AbstractVecOrMat) - require_one_based_indexing(c, A, b) - N = size(A, 2) - mb, nb = size(b, 1), size(b, 2) - if N != mb - throw(DimensionMismatch(lazy"second dimension of A, $N, does not match first dimension of b, $mb")) - end - mc, nc = size(c, 1), size(c, 2) - if mc != mb || nc != nb - throw(DimensionMismatch(lazy"size of result, ($mc, $nc), does not match the size of b, ($mb, $nb)")) - end - - if N == 0 - return copyto!(c, b) - end - - zi = findfirst(iszero, A.dv) - isnothing(zi) || throw(SingularException(zi)) - - @inbounds for j in 1:nb - if A.uplo == 'L' #do colwise forward substitution - c[1,j] = bi1 = A.dv[1] \ b[1,j] - for i in 2:N - c[i,j] = bi1 = A.dv[i] \ (b[i,j] - A.ev[i - 1] * bi1) - end - else #do colwise backward substitution - c[N,j] = bi1 = A.dv[N] \ b[N,j] - for i in (N - 1):-1:1 - c[i,j] = bi1 = A.dv[i] \ (b[i,j] - A.ev[i] * bi1) - end - end - end - return c -end -ldiv!(A::AdjOrTrans{<:Any,<:Bidiagonal}, b::AbstractVecOrMat) = @inline ldiv!(b, A, b) -ldiv!(c::AbstractVecOrMat, A::AdjOrTrans{<:Any,<:Bidiagonal}, b::AbstractVecOrMat) = - (t = wrapperop(A); _rdiv!(t(c), t(b), t(A)); return c) - -### Generic promotion methods and fallbacks -\(A::Bidiagonal, B::AbstractVecOrMat) = - ldiv!(matprod_dest(A, B, promote_op(\, eltype(A), eltype(B))), A, B) -\(xA::AdjOrTrans{<:Any,<:Bidiagonal}, B::AbstractVecOrMat) = copy(xA) \ B - -### Triangular specializations -for tri in (:UpperTriangular, :UnitUpperTriangular) - @eval function \(B::Bidiagonal, U::$tri) - A = ldiv!(matprod_dest(B, U, promote_op(\, eltype(B), eltype(U))), B, U) - return B.uplo == 'U' ? UpperTriangular(A) : A - end - @eval function \(U::$tri, B::Bidiagonal) - A = ldiv!(matprod_dest(U, B, promote_op(\, eltype(U), eltype(B))), U, B) - return B.uplo == 'U' ? UpperTriangular(A) : A - end -end -for tri in (:LowerTriangular, :UnitLowerTriangular) - @eval function \(B::Bidiagonal, L::$tri) - A = ldiv!(matprod_dest(B, L, promote_op(\, eltype(B), eltype(L))), B, L) - return B.uplo == 'L' ? LowerTriangular(A) : A - end - @eval function \(L::$tri, B::Bidiagonal) - A = ldiv!(matprod_dest(L, B, promote_op(\, eltype(L), eltype(B))), L, B) - return B.uplo == 'L' ? LowerTriangular(A) : A - end -end - -### Diagonal specialization -function \(B::Bidiagonal, D::Diagonal) - A = ldiv!(similar(D, promote_op(\, eltype(B), eltype(D)), size(D)), B, D) - return B.uplo == 'U' ? UpperTriangular(A) : LowerTriangular(A) -end - -function _rdiv!(C::AbstractMatrix, A::AbstractMatrix, B::Bidiagonal) - require_one_based_indexing(C, A, B) - m, n = size(A) - if size(B, 1) != n - throw(DimensionMismatch(lazy"right hand side B needs first dimension of size $n, has size $(size(B,1))")) - end - mc, nc = size(C) - if mc != m || nc != n - throw(DimensionMismatch(lazy"expect output to have size ($m, $n), but got ($mc, $nc)")) - end - - zi = findfirst(iszero, B.dv) - isnothing(zi) || throw(SingularException(zi)) - - if B.uplo == 'L' - diagB = B.dv[n] - for i in 1:m - C[i,n] = A[i,n] / diagB - end - for j in n-1:-1:1 - diagB = B.dv[j] - offdiagB = B.ev[j] - for i in 1:m - C[i,j] = (A[i,j] - C[i,j+1]*offdiagB)/diagB - end - end - else - diagB = B.dv[1] - for i in 1:m - C[i,1] = A[i,1] / diagB - end - for j in 2:n - diagB = B.dv[j] - offdiagB = B.ev[j-1] - for i = 1:m - C[i,j] = (A[i,j] - C[i,j-1]*offdiagB)/diagB - end - end - end - C -end -rdiv!(A::AbstractMatrix, B::Bidiagonal) = @inline _rdiv!(A, A, B) -rdiv!(A::AbstractMatrix, B::AdjOrTrans{<:Any,<:Bidiagonal}) = @inline _rdiv!(A, A, B) -_rdiv!(C::AbstractMatrix, A::AbstractMatrix, B::AdjOrTrans{<:Any,<:Bidiagonal}) = - (t = wrapperop(B); ldiv!(t(C), t(B), t(A)); return C) - -/(A::AbstractMatrix, B::Bidiagonal) = - _rdiv!(similar(A, promote_op(/, eltype(A), eltype(B)), size(A)), A, B) - -### Triangular specializations -for tri in (:UpperTriangular, :UnitUpperTriangular) - @eval function /(U::$tri, B::Bidiagonal) - A = _rdiv!(matprod_dest(U, B, promote_op(/, eltype(U), eltype(B))), U, B) - return B.uplo == 'U' ? UpperTriangular(A) : A - end - @eval function /(B::Bidiagonal, U::$tri) - A = _rdiv!(matprod_dest(B, U, promote_op(/, eltype(B), eltype(U))), B, U) - return B.uplo == 'U' ? UpperTriangular(A) : A - end -end -for tri in (:LowerTriangular, :UnitLowerTriangular) - @eval function /(L::$tri, B::Bidiagonal) - A = _rdiv!(matprod_dest(L, B, promote_op(/, eltype(L), eltype(B))), L, B) - return B.uplo == 'L' ? LowerTriangular(A) : A - end - @eval function /(B::Bidiagonal, L::$tri) - A = _rdiv!(matprod_dest(B, L, promote_op(/, eltype(B), eltype(L))), B, L) - return B.uplo == 'L' ? LowerTriangular(A) : A - end -end - -### Diagonal specialization -function /(D::Diagonal, B::Bidiagonal) - A = _rdiv!(similar(D, promote_op(/, eltype(D), eltype(B)), size(D)), D, B) - return B.uplo == 'U' ? UpperTriangular(A) : LowerTriangular(A) -end - -/(A::AbstractMatrix, B::Transpose{<:Any,<:Bidiagonal}) = A / copy(B) -/(A::AbstractMatrix, B::Adjoint{<:Any,<:Bidiagonal}) = A / copy(B) -# disambiguation -/(A::AdjointAbsVec, B::Bidiagonal) = adjoint(adjoint(B) \ parent(A)) -/(A::TransposeAbsVec, B::Bidiagonal) = transpose(transpose(B) \ parent(A)) -/(A::AdjointAbsVec, B::Transpose{<:Any,<:Bidiagonal}) = adjoint(adjoint(B) \ parent(A)) -/(A::TransposeAbsVec, B::Transpose{<:Any,<:Bidiagonal}) = transpose(transpose(B) \ parent(A)) -/(A::AdjointAbsVec, B::Adjoint{<:Any,<:Bidiagonal}) = adjoint(adjoint(B) \ parent(A)) -/(A::TransposeAbsVec, B::Adjoint{<:Any,<:Bidiagonal}) = transpose(transpose(B) \ parent(A)) - -factorize(A::Bidiagonal) = A -function inv(B::Bidiagonal{T}) where T - n = size(B, 1) - dest = zeros(typeof(inv(oneunit(T))), (n, n)) - ldiv!(dest, B, Diagonal{typeof(one(T)/one(T))}(I, n)) - return B.uplo == 'U' ? UpperTriangular(dest) : LowerTriangular(dest) -end - -# Eigensystems -eigvals(M::Bidiagonal) = copy(M.dv) -function eigvecs(M::Bidiagonal{T}) where T - n = length(M.dv) - Q = Matrix{T}(undef, n,n) - blks = [0; findall(iszero, M.ev); n] - v = zeros(T, n) - if M.uplo == 'U' - for idx_block = 1:length(blks) - 1, i = blks[idx_block] + 1:blks[idx_block + 1] #index of eigenvector - fill!(v, zero(T)) - v[blks[idx_block] + 1] = one(T) - for j = blks[idx_block] + 1:i - 1 #Starting from j=i, eigenvector elements will be 0 - v[j+1] = (M.dv[i] - M.dv[j])/M.ev[j] * v[j] - end - c = norm(v) - for j = 1:n - Q[j, i] = v[j] / c - end - end - else - for idx_block = 1:length(blks) - 1, i = blks[idx_block + 1]:-1:blks[idx_block] + 1 #index of eigenvector - fill!(v, zero(T)) - v[blks[idx_block+1]] = one(T) - for j = (blks[idx_block+1] - 1):-1:max(1, (i - 1)) #Starting from j=i, eigenvector elements will be 0 - v[j] = (M.dv[i] - M.dv[j+1])/M.ev[j] * v[j+1] - end - c = norm(v) - for j = 1:n - Q[j, i] = v[j] / c - end - end - end - Q #Actually Triangular -end -eigen(M::Bidiagonal) = Eigen(eigvals(M), eigvecs(M)) - -Base._sum(A::Bidiagonal, ::Colon) = sum(A.dv) + sum(A.ev) -function Base._sum(A::Bidiagonal, dims::Integer) - res = Base.reducedim_initarray(A, dims, zero(eltype(A))) - n = length(A.dv) - if n == 0 - # Just to be sure. This shouldn't happen since there is a check whether - # length(A.dv) == length(A.ev) + 1 in the constructor. - return res - elseif n == 1 - res[1] = A.dv[1] - return res - end - @inbounds begin - if (dims == 1 && A.uplo == 'U') || (dims == 2 && A.uplo == 'L') - res[1] = A.dv[1] - for i = 2:length(A.dv) - res[i] = A.ev[i-1] + A.dv[i] - end - elseif (dims == 1 && A.uplo == 'L') || (dims == 2 && A.uplo == 'U') - for i = 1:length(A.dv)-1 - res[i] = A.ev[i] + A.dv[i] - end - res[end] = A.dv[end] - elseif dims >= 3 - if A.uplo == 'U' - for i = 1:length(A.dv)-1 - res[i,i] = A.dv[i] - res[i,i+1] = A.ev[i] - end - else - for i = 1:length(A.dv)-1 - res[i,i] = A.dv[i] - res[i+1,i] = A.ev[i] - end - end - res[end,end] = A.dv[end] - end - end - res -end diff --git a/stdlib/LinearAlgebra/src/bitarray.jl b/stdlib/LinearAlgebra/src/bitarray.jl deleted file mode 100644 index ccc9138d227a3..0000000000000 --- a/stdlib/LinearAlgebra/src/bitarray.jl +++ /dev/null @@ -1,272 +0,0 @@ -# This file is a part of Julia. License is MIT: https://julialang.org/license - -function dot(x::BitVector, y::BitVector) - # simplest way to mimic Array dot behavior - length(x) == length(y) || throw(DimensionMismatch()) - s = 0 - xc = x.chunks - yc = y.chunks - @inbounds for i = 1:length(xc) - s += count_ones(xc[i] & yc[i]) - end - s -end - -## slower than the unpacked version, which is MUCH slower -# than blas'd (this one saves storage though, keeping it commented -# just in case) -#function aTb(A::BitMatrix, B::BitMatrix) - #(mA, nA) = size(A) - #(mB, nB) = size(B) - #C = falses(nA, nB) - #if mA != mB; throw(DimensionMismatch()) end - #if mA == 0; return C; end - #col_ch = num_bit_chunks(mA) - ## TODO: avoid using aux chunks and copy (?) - #aux_chunksA = zeros(UInt64, col_ch) - #aux_chunksB = [zeros(UInt64, col_ch) for j=1:nB] - #for j = 1:nB - #Base.copy_chunks!(aux_chunksB[j], 1, B.chunks, (j-1)*mA+1, mA) - #end - #for i = 1:nA - #Base.copy_chunks!(aux_chunksA, 1, A.chunks, (i-1)*mA+1, mA) - #for j = 1:nB - #for k = 1:col_ch - ## TODO: improve - #C[i, j] += count_ones(aux_chunksA[k] & aux_chunksB[j][k]) - #end - #end - #end - #C -#end - -#aCb(A::BitMatrix{T}, B::BitMatrix{S}) where {T,S} = aTb(A, B) - -function triu(B::BitMatrix, k::Integer=0) - m,n = size(B) - if !(-m + 1 <= k <= n + 1) - throw(ArgumentError(string("the requested diagonal, $k, must be at least", - "$(-m + 1) and at most $(n + 1) in an $m-by-$n matrix"))) - end - A = falses(m,n) - Ac = A.chunks - Bc = B.chunks - for i = max(k+1,1):n - j = clamp((i - 1) * m + 1, 1, i * m) - Base.copy_chunks!(Ac, j, Bc, j, min(i-k, m)) - end - A -end - -function tril(B::BitMatrix, k::Integer=0) - m,n = size(B) - if !(-m - 1 <= k <= n - 1) - throw(ArgumentError(string("the requested diagonal, $k, must be at least ", - "$(-m - 1) and at most $(n - 1) in an $m-by-$n matrix"))) - end - A = falses(m, n) - Ac = A.chunks - Bc = B.chunks - for i = 1:min(n, m+k) - j = clamp((i - 1) * m + i - k, 1, i * m) - Base.copy_chunks!(Ac, j, Bc, j, max(m-i+k+1, 0)) - end - A -end - -## diag - -function diag(B::BitMatrix) - n = minimum(size(B)) - v = similar(B, n) - for i = 1:n - v[i] = B[i,i] - end - v -end - -## norm and rank - -svd(A::BitMatrix) = svd(float(A)) -qr(A::BitMatrix) = qr(float(A)) - -## kron - -@inline function kron!(R::BitVector, a::BitVector, b::BitVector) - m = length(a) - n = length(b) - @boundscheck length(R) == n*m || throw(DimensionMismatch()) - Rc = R.chunks - bc = b.chunks - for j = 1:m - a[j] && Base.copy_chunks!(Rc, (j-1)*n+1, bc, 1, n) - end - return R -end - -function kron(a::BitVector, b::BitVector) - m = length(a) - n = length(b) - R = falses(n * m) - return @inbounds kron!(R, a, b) -end - -function kron!(R::BitMatrix, a::BitMatrix, b::BitMatrix) - mA,nA = size(a) - mB,nB = size(b) - @boundscheck size(R) == (mA*mB, nA*nB) || throw(DimensionMismatch()) - - for i = 1:mA - ri = (1:mB) .+ ((i-1)*mB) - for j = 1:nA - if a[i,j] - rj = (1:nB) .+ ((j-1)*nB) - R[ri,rj] = b - end - end - end - return R -end - -function kron(a::BitMatrix, b::BitMatrix) - mA,nA = size(a) - mB,nB = size(b) - R = falses(mA*mB, nA*nB) - return @inbounds kron!(R, a, b) -end - -## Structure query functions - -issymmetric(A::BitMatrix) = size(A, 1)==size(A, 2) && count(!iszero, A - copy(A'))==0 -ishermitian(A::BitMatrix) = issymmetric(A) - -function nonzero_chunks(chunks::Vector{UInt64}, pos0::Int, pos1::Int) - k0, l0 = Base.get_chunks_id(pos0) - k1, l1 = Base.get_chunks_id(pos1) - - delta_k = k1 - k0 - - z = UInt64(0) - u = ~z - if delta_k == 0 - msk_0 = (u << l0) & ~(u << l1 << 1) - else - msk_0 = (u << l0) - msk_1 = ~(u << l1 << 1) - end - - @inbounds begin - (chunks[k0] & msk_0) == z || return true - delta_k == 0 && return false - for i = k0 + 1 : k1 - 1 - chunks[i] == z || return true - end - (chunks[k1] & msk_1)==z || return true - end - return false -end - -function istriu(A::BitMatrix) - m, n = size(A) - for j = 1:min(n,m-1) - stride = (j-1) * m - nonzero_chunks(A.chunks, stride+j+1, stride+m) && return false - end - return true -end - -function istril(A::BitMatrix) - m, n = size(A) - (m == 0 || n == 0) && return true - for j = 2:n - stride = (j-1) * m - nonzero_chunks(A.chunks, stride+1, stride+min(j-1,m)) && return false - end - return true -end - -# fast 8x8 bit transpose from Henry S. Warrens's "Hacker's Delight" -# https://www.hackersdelight.org/hdcodetxt/transpose8.c.txt -function transpose8x8(x::UInt64) - y = x - t = xor(y, y >>> 7) & 0x00aa00aa00aa00aa - y = xor(y, t, t << 7) - t = xor(y, y >>> 14) & 0x0000cccc0000cccc - y = xor(y, t, t << 14) - t = xor(y, y >>> 28) & 0x00000000f0f0f0f0 - return xor(y, t, t << 28) -end - -function form_8x8_chunk(Bc::Vector{UInt64}, i1::Int, i2::Int, m::Int, cgap::Int, cinc::Int, nc::Int, msk8::UInt64) - x = UInt64(0) - - k, l = Base.get_chunks_id(i1 + (i2 - 1) * m) - r = 0 - for j = 1:8 - k > nc && break - x |= ((Bc[k] >>> l) & msk8) << r - if l + 8 >= 64 && nc > k - r0 = 8 - Base._mod64(l + 8) - x |= (Bc[k + 1] & (msk8 >>> r0)) << (r + r0) - end - k += cgap + (l + cinc >= 64 ? 1 : 0) - l = Base._mod64(l + cinc) - r += 8 - end - return x -end - -# note: assumes B is filled with 0's -function put_8x8_chunk(Bc::Vector{UInt64}, i1::Int, i2::Int, x::UInt64, m::Int, cgap::Int, cinc::Int, nc::Int, msk8::UInt64) - k, l = Base.get_chunks_id(i1 + (i2 - 1) * m) - r = 0 - for j = 1:8 - k > nc && break - Bc[k] |= ((x >>> r) & msk8) << l - if l + 8 >= 64 && nc > k - r0 = 8 - Base._mod64(l + 8) - Bc[k + 1] |= ((x >>> (r + r0)) & (msk8 >>> r0)) - end - k += cgap + (l + cinc >= 64 ? 1 : 0) - l = Base._mod64(l + cinc) - r += 8 - end - return -end - -adjoint(B::Union{BitVector,BitMatrix}) = Adjoint(B) -transpose(B::Union{BitVector,BitMatrix}) = Transpose(B) -Base.copy(B::Adjoint{Bool,BitMatrix}) = transpose!(falses(size(B)), B.parent) -Base.copy(B::Transpose{Bool,BitMatrix}) = transpose!(falses(size(B)), B.parent) -function transpose!(C::BitMatrix, B::BitMatrix) - @boundscheck size(C) == reverse(size(B)) || throw(DimensionMismatch()) - l1, l2 = size(B) - - cgap1, cinc1 = Base._div64(l1), Base._mod64(l1) - cgap2, cinc2 = Base._div64(l2), Base._mod64(l2) - - Bc = B.chunks - Cc = C.chunks - - nc = length(Bc) - - for i = 1:8:l1 - msk8_1 = UInt64(0xff) - if (l1 < i + 7) - msk8_1 >>>= i + 7 - l1 - end - - for j = 1:8:l2 - x = form_8x8_chunk(Bc, i, j, l1, cgap1, cinc1, nc, msk8_1) - x = transpose8x8(x) - - msk8_2 = UInt64(0xff) - if (l2 < j + 7) - msk8_2 >>>= j + 7 - l2 - end - - put_8x8_chunk(Cc, j, i, x, l2, cgap2, cinc2, nc, msk8_2) - end - end - return C -end diff --git a/stdlib/LinearAlgebra/src/blas.jl b/stdlib/LinearAlgebra/src/blas.jl deleted file mode 100644 index 3c15630091162..0000000000000 --- a/stdlib/LinearAlgebra/src/blas.jl +++ /dev/null @@ -1,2258 +0,0 @@ -# This file is a part of Julia. License is MIT: https://julialang.org/license - -""" -Interface to BLAS subroutines. -""" -module BLAS - -using Base: require_one_based_indexing, USE_BLAS64 - -export -# Note: `xFUNC_NAME` is a placeholder for not exported BLAS functions -# ref: https://www.netlib.org/blas/blasqr.pdf -# Level 1 - # xROTG - # xROTMG - rot!, - # xROTM - # xSWAP - scal!, - scal, - blascopy!, - # xAXPY!, - # xAXPBY!, - # xDOT - dotc, - dotu, - # xxDOT - nrm2, - asum, - iamax, -# Level 2 - gemv!, - gemv, - gbmv!, - gbmv, - hemv!, - hemv, - # xHBMV - hpmv!, - symv!, - symv, - sbmv!, - sbmv, - spmv!, - trmv!, - trmv, - # xTBMV - # xTPMV - trsv!, - trsv, - # xTBSV - # xTPSV - ger!, - geru!, - # xGERU - # xGERC - her!, - # xHPR - # xHER2 - # xHPR2 - syr!, - spr!, - # xSYR2 - # xSPR2 -# Level 3 - gemmt!, - gemmt, - gemm!, - gemm, - symm!, - symm, - hemm!, - hemm, - syrk!, - syrk, - herk!, - herk, - syr2k!, - syr2k, - her2k!, - her2k, - trmm!, - trmm, - trsm!, - trsm - -using ..LinearAlgebra: libblastrampoline, BlasReal, BlasComplex, BlasFloat, BlasInt, DimensionMismatch, checksquare, chkstride1 - -include("lbt.jl") - -# Legacy bindings that some packages (such as NNlib.jl) use. -# We maintain these for backwards-compatibility but new packages -# should not look at these, instead preferring to parse the output -# of BLAS.get_config() -const libblas = libblastrampoline -const liblapack = libblastrampoline - -vendor() = :lbt - -""" - get_config() - -Return an object representing the current `libblastrampoline` configuration. - -!!! compat "Julia 1.7" - `get_config()` requires at least Julia 1.7. -""" -get_config() = lbt_get_config() - -if USE_BLAS64 - macro blasfunc(x) - return Expr(:quote, Symbol(x, "64_")) - end -else - macro blasfunc(x) - return Expr(:quote, x) - end -end - -_tryparse_env_int(key) = tryparse(Int, get(ENV, key, "")) - - -""" - set_num_threads(n::Integer) - set_num_threads(::Nothing) - -Set the number of threads the BLAS library should use equal to `n::Integer`. - -Also accepts `nothing`, in which case julia tries to guess the default number of threads. -Passing `nothing` is discouraged and mainly exists for historical reasons. -""" -set_num_threads(nt::Integer)::Nothing = lbt_set_num_threads(Int32(nt)) -function set_num_threads(::Nothing) - nt = something( - _tryparse_env_int("OPENBLAS_NUM_THREADS"), - _tryparse_env_int("OMP_NUM_THREADS"), - _tryparse_env_int("VECLIB_MAXIMUM_THREADS"), - max(1, Sys.CPU_THREADS ÷ 2), - ) - return set_num_threads(nt) -end - -""" - get_num_threads() - -Get the number of threads the BLAS library is using. - -!!! compat "Julia 1.6" - `get_num_threads` requires at least Julia 1.6. -""" -get_num_threads()::Int = lbt_get_num_threads() - -function check() - # TODO: once we have bitfields of the BLAS functions that are actually forwarded, - # ensure that we have a complete set here (warning on an incomplete BLAS implementation) - config = get_config() - - # Ensure that one of our loaded libraries satisfies our interface requirement - interface = USE_BLAS64 ? :ilp64 : :lp64 - if !any(lib.interface == interface for lib in config.loaded_libs) - interfacestr = uppercase(string(interface)) - println(Core.stderr, "No loaded BLAS libraries were built with $interfacestr support.") - exit(1) - end -end - -"Check that upper/lower (for special matrices) is correctly specified" -function chkuplo(uplo::AbstractChar) - if !(uplo == 'U' || uplo == 'L') - throw(ArgumentError(lazy"uplo argument must be 'U' (upper) or 'L' (lower), got '$uplo'")) - end - uplo -end - -# Level 1 -# A help function to pick the pointer and inc for 1d like inputs. -@inline function vec_pointer_stride(x::AbstractArray, stride0check = nothing) - Base._checkcontiguous(Bool, x) && return pointer(x), 1 # simplify runtime check when possible - st, ptr = checkedstride(x), pointer(x) - isnothing(stride0check) || (st == 0 && throw(stride0check)) - ptr += min(st, 0) * sizeof(eltype(x)) * (length(x) - 1) - ptr, st -end -function checkedstride(x::AbstractArray) - szs::Dims = size(x) - sts::Dims = strides(x) - _, st, n = Base.merge_adjacent_dim(szs, sts) - n === ndims(x) && return st - throw(ArgumentError("only support vector like inputs")) -end -## copy - -""" - blascopy!(n, X, incx, Y, incy) - -Copy `n` elements of array `X` with stride `incx` to array `Y` with stride `incy`. Returns `Y`. -""" -function blascopy! end - -for (fname, elty) in ((:dcopy_,:Float64), - (:scopy_,:Float32), - (:zcopy_,:ComplexF64), - (:ccopy_,:ComplexF32)) - @eval begin - # SUBROUTINE DCOPY(N,DX,INCX,DY,INCY) - function blascopy!(n::Integer, DX::Union{Ptr{$elty},AbstractArray{$elty}}, incx::Integer, DY::Union{Ptr{$elty},AbstractArray{$elty}}, incy::Integer) - ccall((@blasfunc($fname), libblastrampoline), Cvoid, - (Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}), - n, DX, incx, DY, incy) - DY - end - end -end - - -## rot - -""" - rot!(n, X, incx, Y, incy, c, s) - -Overwrite `X` with `c*X + s*Y` and `Y` with `-conj(s)*X + c*Y` for the first `n` elements of array `X` with stride `incx` and -first `n` elements of array `Y` with stride `incy`. Returns `X` and `Y`. - -!!! compat "Julia 1.5" - `rot!` requires at least Julia 1.5. -""" -function rot! end - -for (fname, elty, cty, sty, lib) in ((:drot_, :Float64, :Float64, :Float64, libblastrampoline), - (:srot_, :Float32, :Float32, :Float32, libblastrampoline), - (:zdrot_, :ComplexF64, :Float64, :Float64, libblastrampoline), - (:csrot_, :ComplexF32, :Float32, :Float32, libblastrampoline), - (:zrot_, :ComplexF64, :Float64, :ComplexF64, libblastrampoline), - (:crot_, :ComplexF32, :Float32, :ComplexF32, libblastrampoline)) - @eval begin - # SUBROUTINE DROT(N,DX,INCX,DY,INCY,C,S) - function rot!(n::Integer, DX::Union{Ptr{$elty},AbstractArray{$elty}}, incx::Integer, DY::Union{Ptr{$elty},AbstractArray{$elty}}, incy::Integer, C::$cty, S::$sty) - ccall((@blasfunc($fname), $lib), Cvoid, - (Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ref{$cty}, Ref{$sty}), - n, DX, incx, DY, incy, C, S) - DX, DY - end - end -end - -## scal - -""" - scal!(n, a, X, incx) - scal!(a, X) - -Overwrite `X` with `a*X` for the first `n` elements of array `X` with stride `incx`. Returns `X`. - -If `n` and `incx` are not provided, `length(X)` and `stride(X,1)` are used. -""" -function scal! end - -""" - scal(n, a, X, incx) - scal(a, X) - -Return `X` scaled by `a` for the first `n` elements of array `X` with stride `incx`. - -If `n` and `incx` are not provided, `length(X)` and `stride(X,1)` are used. -""" -function scal end - -for (fname, elty) in ((:dscal_,:Float64), - (:sscal_,:Float32), - (:zscal_,:ComplexF64), - (:cscal_,:ComplexF32)) - @eval begin - # SUBROUTINE DSCAL(N,DA,DX,INCX) - function scal!(n::Integer, DA::$elty, DX::Union{Ptr{$elty},AbstractArray{$elty}}, incx::Integer) - ccall((@blasfunc($fname), libblastrampoline), Cvoid, - (Ref{BlasInt}, Ref{$elty}, Ptr{$elty}, Ref{BlasInt}), - n, DA, DX, incx) - DX - end - - function scal!(DA::$elty, DX::AbstractArray{$elty}) - p, st = vec_pointer_stride(DX, ArgumentError("dest vector with 0 stride is not allowed")) - GC.@preserve DX scal!(length(DX), DA, p, abs(st)) - DX - end - end -end -scal(n, DA, DX, incx) = scal!(n, DA, copy(DX), incx) -scal(DA, DX) = scal!(DA, copy(DX)) - -## dot - -""" - dot(n, X, incx, Y, incy) - -Dot product of two vectors consisting of `n` elements of array `X` with stride `incx` and -`n` elements of array `Y` with stride `incy`. - -# Examples -```jldoctest -julia> BLAS.dot(10, fill(1.0, 10), 1, fill(1.0, 20), 2) -10.0 -``` -""" -function dot end - -""" - dotc(n, X, incx, U, incy) - -Dot function for two complex vectors, consisting of `n` elements of array `X` -with stride `incx` and `n` elements of array `U` with stride `incy`, -conjugating the first vector. - -# Examples -```jldoctest -julia> BLAS.dotc(10, fill(1.0im, 10), 1, fill(1.0+im, 20), 2) -10.0 - 10.0im -``` -""" -function dotc end - -""" - dotu(n, X, incx, Y, incy) - -Dot function for two complex vectors consisting of `n` elements of array `X` -with stride `incx` and `n` elements of array `Y` with stride `incy`. - -# Examples -```jldoctest -julia> BLAS.dotu(10, fill(1.0im, 10), 1, fill(1.0+im, 20), 2) --10.0 + 10.0im -``` -""" -function dotu end - -for (fname, elty) in ((:cblas_ddot,:Float64), - (:cblas_sdot,:Float32)) - @eval begin - # DOUBLE PRECISION FUNCTION DDOT(N,DX,INCX,DY,INCY) - # * .. Scalar Arguments .. - # INTEGER INCX,INCY,N - # * .. - # * .. Array Arguments .. - # DOUBLE PRECISION DX(*),DY(*) - function dot(n::Integer, DX::Union{Ptr{$elty},AbstractArray{$elty}}, incx::Integer, DY::Union{Ptr{$elty},AbstractArray{$elty}}, incy::Integer) - ccall((@blasfunc($fname), libblastrampoline), $elty, - (BlasInt, Ptr{$elty}, BlasInt, Ptr{$elty}, BlasInt), - n, DX, incx, DY, incy) - end - end -end -for (fname, elty) in ((:cblas_zdotc_sub,:ComplexF64), - (:cblas_cdotc_sub,:ComplexF32)) - @eval begin - # DOUBLE PRECISION FUNCTION DDOT(N,DX,INCX,DY,INCY) - # * .. Scalar Arguments .. - # INTEGER INCX,INCY,N - # * .. - # * .. Array Arguments .. - # DOUBLE PRECISION DX(*),DY(*) - function dotc(n::Integer, DX::Union{Ptr{$elty},AbstractArray{$elty}}, incx::Integer, DY::Union{Ptr{$elty},AbstractArray{$elty}}, incy::Integer) - result = Ref{$elty}() - ccall((@blasfunc($fname), libblastrampoline), Cvoid, - (BlasInt, Ptr{$elty}, BlasInt, Ptr{$elty}, BlasInt, Ptr{$elty}), - n, DX, incx, DY, incy, result) - result[] - end - end -end -for (fname, elty) in ((:cblas_zdotu_sub,:ComplexF64), - (:cblas_cdotu_sub,:ComplexF32)) - @eval begin - # DOUBLE PRECISION FUNCTION DDOT(N,DX,INCX,DY,INCY) - # * .. Scalar Arguments .. - # INTEGER INCX,INCY,N - # * .. - # * .. Array Arguments .. - # DOUBLE PRECISION DX(*),DY(*) - function dotu(n::Integer, DX::Union{Ptr{$elty},AbstractArray{$elty}}, incx::Integer, DY::Union{Ptr{$elty},AbstractArray{$elty}}, incy::Integer) - result = Ref{$elty}() - ccall((@blasfunc($fname), libblastrampoline), Cvoid, - (BlasInt, Ptr{$elty}, BlasInt, Ptr{$elty}, BlasInt, Ptr{$elty}), - n, DX, incx, DY, incy, result) - result[] - end - end -end - -for (elty, f) in ((Float32, :dot), (Float64, :dot), - (ComplexF32, :dotc), (ComplexF64, :dotc), - (ComplexF32, :dotu), (ComplexF64, :dotu)) - @eval begin - function $f(x::AbstractArray{$elty}, y::AbstractArray{$elty}) - n, m = length(x), length(y) - n == m || throw(DimensionMismatch(lazy"dot product arguments have lengths $n and $m")) - GC.@preserve x y $f(n, vec_pointer_stride(x)..., vec_pointer_stride(y)...) - end - end -end - -## nrm2 - -""" - nrm2(n, X, incx) - -2-norm of a vector consisting of `n` elements of array `X` with stride `incx`. - -# Examples -```jldoctest -julia> BLAS.nrm2(4, fill(1.0, 8), 2) -2.0 - -julia> BLAS.nrm2(1, fill(1.0, 8), 2) -1.0 -``` -""" -function nrm2 end - -for (fname, elty, ret_type) in ((:dnrm2_,:Float64,:Float64), - (:snrm2_,:Float32,:Float32), - (:dznrm2_,:ComplexF64,:Float64), - (:scnrm2_,:ComplexF32,:Float32)) - @eval begin - # SUBROUTINE DNRM2(N,X,INCX) - function nrm2(n::Integer, X::Union{Ptr{$elty},AbstractArray{$elty}}, incx::Integer) - ccall((@blasfunc($fname), libblastrampoline), $ret_type, - (Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}), - n, X, incx) - end - end -end -# openblas returns 0 for negative stride -function nrm2(x::AbstractArray) - p, st = vec_pointer_stride(x, ArgumentError("input vector with 0 stride is not allowed")) - GC.@preserve x nrm2(length(x), p, abs(st)) -end - -## asum - -""" - asum(n, X, incx) - -Sum of the magnitudes of the first `n` elements of array `X` with stride `incx`. - -For a real array, the magnitude is the absolute value. For a complex array, the -magnitude is the sum of the absolute value of the real part and the absolute value -of the imaginary part. - -# Examples -```jldoctest -julia> BLAS.asum(5, fill(1.0im, 10), 2) -5.0 - -julia> BLAS.asum(2, fill(1.0im, 10), 5) -2.0 -``` -""" -function asum end - -for (fname, elty, ret_type) in ((:dasum_,:Float64,:Float64), - (:sasum_,:Float32,:Float32), - (:dzasum_,:ComplexF64,:Float64), - (:scasum_,:ComplexF32,:Float32)) - @eval begin - # SUBROUTINE ASUM(N, X, INCX) - function asum(n::Integer, X::Union{Ptr{$elty},AbstractArray{$elty}}, incx::Integer) - ccall((@blasfunc($fname), libblastrampoline), $ret_type, - (Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}), - n, X, incx) - end - end -end -function asum(x::AbstractArray) - p, st = vec_pointer_stride(x, ArgumentError("input vector with 0 stride is not allowed")) - GC.@preserve x asum(length(x), p, abs(st)) -end - -## axpy - -""" - axpy!(a, X, Y) - -Overwrite `Y` with `X*a + Y`, where `a` is a scalar. Return `Y`. - -# Examples -```jldoctest -julia> x = [1.; 2; 3]; - -julia> y = [4. ;; 5 ;; 6]; - -julia> BLAS.axpy!(2, x, y) -1×3 Matrix{Float64}: - 6.0 9.0 12.0 -``` -""" -function axpy! end - -for (fname, elty) in ((:daxpy_,:Float64), - (:saxpy_,:Float32), - (:zaxpy_,:ComplexF64), - (:caxpy_,:ComplexF32)) - @eval begin - # SUBROUTINE DAXPY(N,DA,DX,INCX,DY,INCY) - # DY <- DA*DX + DY - #* .. Scalar Arguments .. - # DOUBLE PRECISION DA - # INTEGER INCX,INCY,N - #* .. Array Arguments .. - # DOUBLE PRECISION DX(*),DY(*) - function axpy!(n::Integer, alpha::($elty), dx::Union{Ptr{$elty}, AbstractArray{$elty}}, incx::Integer, dy::Union{Ptr{$elty}, AbstractArray{$elty}}, incy::Integer) - ccall((@blasfunc($fname), libblastrampoline), Cvoid, - (Ref{BlasInt}, Ref{$elty}, Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}), - n, alpha, dx, incx, dy, incy) - dy - end - end -end - -function axpy!(alpha::Number, x::AbstractArray{T}, y::AbstractArray{T}) where T<:BlasFloat - if length(x) != length(y) - throw(DimensionMismatch(lazy"x has length $(length(x)), but y has length $(length(y))")) - end - GC.@preserve x y axpy!(length(x), T(alpha), vec_pointer_stride(x)..., - vec_pointer_stride(y, ArgumentError("dest vector with 0 stride is not allowed"))...) - y -end - -function axpy!(alpha::Number, x::Array{T}, rx::AbstractRange{Ti}, - y::Array{T}, ry::AbstractRange{Ti}) where {T<:BlasFloat,Ti<:Integer} - if length(rx) != length(ry) - throw(DimensionMismatch("ranges of differing lengths")) - end - if minimum(rx) < 1 || maximum(rx) > length(x) - throw(ArgumentError(lazy"range out of bounds for x, of length $(length(x))")) - end - if minimum(ry) < 1 || maximum(ry) > length(y) - throw(ArgumentError(lazy"range out of bounds for y, of length $(length(y))")) - end - GC.@preserve x y axpy!( - length(rx), - T(alpha), - pointer(x, minimum(rx)), - step(rx), - pointer(y, minimum(ry)), - step(ry)) - - return y -end - -""" - axpby!(a, X, b, Y) - -Overwrite `Y` with `X*a + Y*b`, where `a` and `b` are scalars. Return `Y`. - -# Examples -```jldoctest -julia> x = [1., 2, 3]; - -julia> y = [4., 5, 6]; - -julia> BLAS.axpby!(2., x, 3., y) -3-element Vector{Float64}: - 14.0 - 19.0 - 24.0 -``` -""" -function axpby! end - -for (fname, elty) in ((:daxpby_,:Float64), (:saxpby_,:Float32), - (:zaxpby_,:ComplexF64), (:caxpby_,:ComplexF32)) - @eval begin - # SUBROUTINE DAXPBY(N,DA,DX,INCX,DB,DY,INCY) - # DY <- DA*DX + DB*DY - #* .. Scalar Arguments .. - # DOUBLE PRECISION DA,DB - # INTEGER INCX,INCY,N - #* .. Array Arguments .. - # DOUBLE PRECISION DX(*),DY(*) - function axpby!(n::Integer, alpha::($elty), dx::Union{Ptr{$elty}, - AbstractArray{$elty}}, incx::Integer, beta::($elty), - dy::Union{Ptr{$elty}, AbstractArray{$elty}}, incy::Integer) - ccall((@blasfunc($fname), libblastrampoline), Cvoid, (Ref{BlasInt}, Ref{$elty}, Ptr{$elty}, - Ref{BlasInt}, Ref{$elty}, Ptr{$elty}, Ref{BlasInt}), - n, alpha, dx, incx, beta, dy, incy) - dy - end - end -end - -function axpby!(alpha::Number, x::AbstractArray{T}, beta::Number, y::AbstractArray{T}) where T<:BlasFloat - require_one_based_indexing(x, y) - if length(x) != length(y) - throw(DimensionMismatch(lazy"x has length $(length(x)), but y has length $(length(y))")) - end - GC.@preserve x y axpby!(length(x), T(alpha), vec_pointer_stride(x)..., T(beta), - vec_pointer_stride(y, ArgumentError("dest vector with 0 stride is not allowed"))...) - y -end - -## iamax -for (fname, elty) in ((:idamax_,:Float64), - (:isamax_,:Float32), - (:izamax_,:ComplexF64), - (:icamax_,:ComplexF32)) - @eval begin - function iamax(n::Integer, dx::Union{Ptr{$elty}, AbstractArray{$elty}}, incx::Integer) - ccall((@blasfunc($fname), libblastrampoline),BlasInt, - (Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}), - n, dx, incx) - end - end -end -function iamax(dx::AbstractArray) - p, st = vec_pointer_stride(dx) - st <= 0 && return BlasInt(0) - iamax(length(dx), p, st) -end - -""" - iamax(n, dx, incx) - iamax(dx) - -Find the index of the element of `dx` with the maximum absolute value. `n` is the length of `dx`, and `incx` is the -stride. If `n` and `incx` are not provided, they assume default values of `n=length(dx)` and `incx=stride1(dx)`. -""" -iamax - -# Level 2 -## mv -### gemv -for (fname, elty) in ((:dgemv_,:Float64), - (:sgemv_,:Float32), - (:zgemv_,:ComplexF64), - (:cgemv_,:ComplexF32)) - @eval begin - #SUBROUTINE DGEMV(TRANS,M,N,ALPHA,A,LDA,X,INCX,BETA,Y,INCY) - #* .. Scalar Arguments .. - # DOUBLE PRECISION ALPHA,BETA - # INTEGER INCX,INCY,LDA,M,N - # CHARACTER TRANS - #* .. Array Arguments .. - # DOUBLE PRECISION A(LDA,*),X(*),Y(*) - function gemv!(trans::AbstractChar, alpha::Union{($elty), Bool}, - A::AbstractVecOrMat{$elty}, X::AbstractVector{$elty}, - beta::Union{($elty), Bool}, Y::AbstractVector{$elty}) - require_one_based_indexing(A, X, Y) - m,n = size(A,1),size(A,2) - if trans == 'N' && (length(X) != n || length(Y) != m) - throw(DimensionMismatch(lazy"A has dimensions $(size(A)), X has length $(length(X)) and Y has length $(length(Y))")) - elseif trans == 'C' && (length(X) != m || length(Y) != n) - throw(DimensionMismatch(lazy"the adjoint of A has dimensions $n, $m, X has length $(length(X)) and Y has length $(length(Y))")) - elseif trans == 'T' && (length(X) != m || length(Y) != n) - throw(DimensionMismatch(lazy"the transpose of A has dimensions $n, $m, X has length $(length(X)) and Y has length $(length(Y))")) - end - chkstride1(A) - lda = stride(A,2) - pX, sX = vec_pointer_stride(X, ArgumentError("input vector with 0 stride is not allowed")) - pY, sY = vec_pointer_stride(Y, ArgumentError("dest vector with 0 stride is not allowed")) - pA = pointer(A) - if lda < 0 - pA += (size(A, 2) - 1) * lda * sizeof($elty) - lda = -lda - trans == 'N' ? (sX = -sX) : (sY = -sY) - end - lda >= size(A,1) || size(A,2) <= 1 || error("when `size(A,2) > 1`, `abs(stride(A,2))` must be at least `size(A,1)`") - lda = max(1, size(A,1), lda) - GC.@preserve A X Y ccall((@blasfunc($fname), libblastrampoline), Cvoid, - (Ref{UInt8}, Ref{BlasInt}, Ref{BlasInt}, Ref{$elty}, - Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, - Ref{$elty}, Ptr{$elty}, Ref{BlasInt}, Clong), - trans, size(A,1), size(A,2), alpha, - pA, lda, pX, sX, - beta, pY, sY, 1) - Y - end - function gemv(trans::AbstractChar, alpha::($elty), A::AbstractMatrix{$elty}, X::AbstractVector{$elty}) - gemv!(trans, alpha, A, X, zero($elty), similar(X, $elty, size(A, (trans == 'N' ? 1 : 2)))) - end - function gemv(trans::AbstractChar, A::AbstractMatrix{$elty}, X::AbstractVector{$elty}) - gemv!(trans, one($elty), A, X, zero($elty), similar(X, $elty, size(A, (trans == 'N' ? 1 : 2)))) - end - end -end - -""" - gemv!(tA, alpha, A, x, beta, y) - -Update the vector `y` as `alpha*A*x + beta*y` or `alpha*A'x + beta*y` -according to [`tA`](@ref stdlib-blas-trans). -`alpha` and `beta` are scalars. Return the updated `y`. -""" -gemv! - -""" - gemv(tA, alpha, A, x) - -Return `alpha*A*x` or `alpha*A'x` according to [`tA`](@ref stdlib-blas-trans). -`alpha` is a scalar. -""" -gemv(tA, alpha, A, x) - -""" - gemv(tA, A, x) - -Return `A*x` or `A'x` according to [`tA`](@ref stdlib-blas-trans). -""" -gemv(tA, A, x) - -### (GB) general banded matrix-vector multiplication - -""" - gbmv!(trans, m, kl, ku, alpha, A, x, beta, y) - -Update vector `y` as `alpha*A*x + beta*y` or `alpha*A'*x + beta*y` according to [`trans`](@ref stdlib-blas-trans). -The matrix `A` is a general band matrix of dimension `m` by `size(A,2)` with `kl` -sub-diagonals and `ku` super-diagonals. `alpha` and `beta` are scalars. Return the updated `y`. -""" -function gbmv! end - -""" - gbmv(trans, m, kl, ku, alpha, A, x) - -Return `alpha*A*x` or `alpha*A'*x` according to [`trans`](@ref stdlib-blas-trans). -The matrix `A` is a general band matrix of dimension `m` by `size(A,2)` with `kl` sub-diagonals and `ku` -super-diagonals, and `alpha` is a scalar. -""" -function gbmv end - -for (fname, elty) in ((:dgbmv_,:Float64), - (:sgbmv_,:Float32), - (:zgbmv_,:ComplexF64), - (:cgbmv_,:ComplexF32)) - @eval begin - # SUBROUTINE DGBMV(TRANS,M,N,KL,KU,ALPHA,A,LDA,X,INCX,BETA,Y,INCY) - # * .. Scalar Arguments .. - # DOUBLE PRECISION ALPHA,BETA - # INTEGER INCX,INCY,KL,KU,LDA,M,N - # CHARACTER TRANS - # * .. Array Arguments .. - # DOUBLE PRECISION A(LDA,*),X(*),Y(*) - function gbmv!(trans::AbstractChar, m::Integer, kl::Integer, ku::Integer, - alpha::Union{($elty), Bool}, A::AbstractMatrix{$elty}, - x::AbstractVector{$elty}, beta::Union{($elty), Bool}, - y::AbstractVector{$elty}) - require_one_based_indexing(A, x, y) - chkstride1(A) - px, stx = vec_pointer_stride(x, ArgumentError("input vector with 0 stride is not allowed")) - py, sty = vec_pointer_stride(y, ArgumentError("dest vector with 0 stride is not allowed")) - GC.@preserve x y ccall((@blasfunc($fname), libblastrampoline), Cvoid, - (Ref{UInt8}, Ref{BlasInt}, Ref{BlasInt}, Ref{BlasInt}, - Ref{BlasInt}, Ref{$elty}, Ptr{$elty}, Ref{BlasInt}, - Ptr{$elty}, Ref{BlasInt}, Ref{$elty}, Ptr{$elty}, - Ref{BlasInt}, Clong), - trans, m, size(A,2), kl, - ku, alpha, A, max(1,stride(A,2)), - px, stx, beta, py, sty, 1) - y - end - function gbmv(trans::AbstractChar, m::Integer, kl::Integer, ku::Integer, alpha::($elty), A::AbstractMatrix{$elty}, x::AbstractVector{$elty}) - n = size(A,2) - leny = trans == 'N' ? m : n - gbmv!(trans, m, kl, ku, alpha, A, x, zero($elty), similar(x, $elty, leny)) - end - function gbmv(trans::AbstractChar, m::Integer, kl::Integer, ku::Integer, A::AbstractMatrix{$elty}, x::AbstractVector{$elty}) - gbmv(trans, m, kl, ku, one($elty), A, x) - end - end -end - -### symv - -""" - symv!(ul, alpha, A, x, beta, y) - -Update the vector `y` as `alpha*A*x + beta*y`. `A` is assumed to be symmetric. -Only the [`ul`](@ref stdlib-blas-uplo) triangle of `A` is used. -`alpha` and `beta` are scalars. Return the updated `y`. -""" -function symv! end - -for (fname, elty, lib) in ((:dsymv_,:Float64,libblastrampoline), - (:ssymv_,:Float32,libblastrampoline), - (:zsymv_,:ComplexF64,libblastrampoline), - (:csymv_,:ComplexF32,libblastrampoline)) - # Note that the complex symv are not BLAS but auiliary functions in LAPACK - @eval begin - # SUBROUTINE DSYMV(UPLO,N,ALPHA,A,LDA,X,INCX,BETA,Y,INCY) - # .. Scalar Arguments .. - # DOUBLE PRECISION ALPHA,BETA - # INTEGER INCX,INCY,LDA,N - # CHARACTER UPLO - # .. Array Arguments .. - # DOUBLE PRECISION A(LDA,*),X(*),Y(*) - function symv!(uplo::AbstractChar, alpha::Union{($elty), Bool}, - A::AbstractMatrix{$elty}, x::AbstractVector{$elty}, - beta::Union{($elty), Bool}, y::AbstractVector{$elty}) - chkuplo(uplo) - require_one_based_indexing(A, x, y) - m, n = size(A) - if m != n - throw(DimensionMismatch(lazy"matrix A is $m by $n but must be square")) - end - if n != length(x) - throw(DimensionMismatch(lazy"A has size $(size(A)), and x has length $(length(x))")) - end - if m != length(y) - throw(DimensionMismatch(lazy"A has size $(size(A)), and y has length $(length(y))")) - end - chkstride1(A) - px, stx = vec_pointer_stride(x, ArgumentError("input vector with 0 stride is not allowed")) - py, sty = vec_pointer_stride(y, ArgumentError("dest vector with 0 stride is not allowed")) - GC.@preserve x y ccall((@blasfunc($fname), $lib), Cvoid, - (Ref{UInt8}, Ref{BlasInt}, Ref{$elty}, Ptr{$elty}, - Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ref{$elty}, - Ptr{$elty}, Ref{BlasInt}, Clong), - uplo, n, alpha, A, - max(1,stride(A,2)), px, stx, beta, - py, sty, 1) - y - end - function symv(uplo::AbstractChar, alpha::($elty), A::AbstractMatrix{$elty}, x::AbstractVector{$elty}) - symv!(uplo, alpha, A, x, zero($elty), similar(x)) - end - function symv(uplo::AbstractChar, A::AbstractMatrix{$elty}, x::AbstractVector{$elty}) - symv(uplo, one($elty), A, x) - end - end -end - -""" - symv(ul, alpha, A, x) - -Return `alpha*A*x`. `A` is assumed to be symmetric. -Only the [`ul`](@ref stdlib-blas-uplo) triangle of `A` is used. -`alpha` is a scalar. -""" -symv(ul, alpha, A, x) - -""" - symv(ul, A, x) - -Return `A*x`. `A` is assumed to be symmetric. -Only the [`ul`](@ref stdlib-blas-uplo) triangle of `A` is used. -""" -symv(ul, A, x) - -### hemv -""" - hemv!(ul, alpha, A, x, beta, y) - -Update the vector `y` as `alpha*A*x + beta*y`. `A` is assumed to be Hermitian. -Only the [`ul`](@ref stdlib-blas-uplo) triangle of `A` is used. -`alpha` and `beta` are scalars. Return the updated `y`. -""" -function hemv! end - -for (fname, elty) in ((:zhemv_,:ComplexF64), - (:chemv_,:ComplexF32)) - @eval begin - function hemv!(uplo::AbstractChar, α::Union{$elty, Bool}, A::AbstractMatrix{$elty}, x::AbstractVector{$elty}, β::Union{$elty, Bool}, y::AbstractVector{$elty}) - chkuplo(uplo) - require_one_based_indexing(A, x, y) - m, n = size(A) - if m != n - throw(DimensionMismatch(lazy"matrix A is $m by $n but must be square")) - end - if n != length(x) - throw(DimensionMismatch(lazy"A has size $(size(A)), and x has length $(length(x))")) - end - if m != length(y) - throw(DimensionMismatch(lazy"A has size $(size(A)), and y has length $(length(y))")) - end - chkstride1(A) - lda = max(1, stride(A, 2)) - px, stx = vec_pointer_stride(x, ArgumentError("input vector with 0 stride is not allowed")) - py, sty = vec_pointer_stride(y, ArgumentError("dest vector with 0 stride is not allowed")) - GC.@preserve x y ccall((@blasfunc($fname), libblastrampoline), Cvoid, - (Ref{UInt8}, Ref{BlasInt}, Ref{$elty}, Ptr{$elty}, - Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ref{$elty}, - Ptr{$elty}, Ref{BlasInt}, Clong), - uplo, n, α, A, - lda, px, stx, β, - py, sty, 1) - y - end - function hemv(uplo::AbstractChar, α::($elty), A::AbstractMatrix{$elty}, x::AbstractVector{$elty}) - hemv!(uplo, α, A, x, zero($elty), similar(x)) - end - function hemv(uplo::AbstractChar, A::AbstractMatrix{$elty}, x::AbstractVector{$elty}) - hemv(uplo, one($elty), A, x) - end - end -end - -""" - hemv(ul, alpha, A, x) - -Return `alpha*A*x`. `A` is assumed to be Hermitian. -Only the [`ul`](@ref stdlib-blas-uplo) triangle of `A` is used. -`alpha` is a scalar. -""" -hemv(ul, alpha, A, x) - -""" - hemv(ul, A, x) - -Return `A*x`. `A` is assumed to be Hermitian. -Only the [`ul`](@ref stdlib-blas-uplo) triangle of `A` is used. -""" -hemv(ul, A, x) - -### hpmv!, (HP) Hermitian packed matrix-vector operation defined as y := alpha*A*x + beta*y. -for (fname, elty) in ((:zhpmv_, :ComplexF64), - (:chpmv_, :ComplexF32)) - @eval begin - # SUBROUTINE ZHPMV(UPLO,N,ALPHA,AP,X,INCX,BETA,Y,INCY) - # Y <- ALPHA*AP*X + BETA*Y - # * .. Scalar Arguments .. - # DOUBLE PRECISION ALPHA,BETA - # INTEGER INCX,INCY,N - # CHARACTER UPLO - # * .. Array Arguments .. - # DOUBLE PRECISION A(N,N),X(N),Y(N) - function hpmv!(uplo::AbstractChar, - n::Integer, - α::$elty, - AP::Union{Ptr{$elty}, AbstractArray{$elty}}, - x::Union{Ptr{$elty}, AbstractArray{$elty}}, - incx::Integer, - β::$elty, - y::Union{Ptr{$elty}, AbstractArray{$elty}}, - incy::Integer) - - ccall((@blasfunc($fname), libblastrampoline), Cvoid, - (Ref{UInt8}, # uplo, - Ref{BlasInt}, # n, - Ref{$elty}, # α, - Ptr{$elty}, # AP, - Ptr{$elty}, # x, - Ref{BlasInt}, # incx, - Ref{$elty}, # β, - Ptr{$elty}, # y, output - Ref{BlasInt}, # incy - Clong), # length of uplo - uplo, - n, - α, - AP, - x, - incx, - β, - y, - incy, - 1) - return y - end - end -end - -function hpmv!(uplo::AbstractChar, - α::Number, AP::AbstractArray{T}, x::AbstractArray{T}, - β::Number, y::AbstractArray{T}) where {T <: BlasComplex} - require_one_based_indexing(AP, x, y) - N = length(x) - if N != length(y) - throw(DimensionMismatch(lazy"x has length $(N), but y has length $(length(y))")) - end - if 2*length(AP) < N*(N + 1) - throw(DimensionMismatch(lazy"Packed hermitian matrix A has size smaller than length(x) = $(N).")) - end - chkstride1(AP) - px, stx = vec_pointer_stride(x, ArgumentError("input vector with 0 stride is not allowed")) - py, sty = vec_pointer_stride(y, ArgumentError("dest vector with 0 stride is not allowed")) - GC.@preserve x y hpmv!(uplo, N, T(α), AP, px, stx, T(β), py, sty) - y -end - -""" - hpmv!(uplo, α, AP, x, β, y) - -Update vector `y` as `α*A*x + β*y`, where `A` is a Hermitian matrix provided -in packed format `AP`. - -With `uplo = 'U'`, the array AP must contain the upper triangular part of the -Hermitian matrix packed sequentially, column by column, so that `AP[1]` -contains `A[1, 1]`, `AP[2]` and `AP[3]` contain `A[1, 2]` and `A[2, 2]` -respectively, and so on. - -With `uplo = 'L'`, the array AP must contain the lower triangular part of the -Hermitian matrix packed sequentially, column by column, so that `AP[1]` -contains `A[1, 1]`, `AP[2]` and `AP[3]` contain `A[2, 1]` and `A[3, 1]` -respectively, and so on. - -The scalar inputs `α` and `β` must be complex or real numbers. - -The array inputs `x`, `y` and `AP` must all be of `ComplexF32` or `ComplexF64` type. - -Return the updated `y`. - -!!! compat "Julia 1.5" - `hpmv!` requires at least Julia 1.5. -""" -hpmv! - -### sbmv, (SB) symmetric banded matrix-vector multiplication -for (fname, elty) in ((:dsbmv_,:Float64), - (:ssbmv_,:Float32)) - @eval begin - # SUBROUTINE DSBMV(UPLO,N,K,ALPHA,A,LDA,X,INCX,BETA,Y,INCY) - # * .. Scalar Arguments .. - # DOUBLE PRECISION ALPHA,BETA - # INTEGER INCX,INCY,K,LDA,N - # CHARACTER UPLO - # * .. Array Arguments .. - # DOUBLE PRECISION A(LDA,*),X(*),Y(*) - function sbmv!(uplo::AbstractChar, k::Integer, alpha::($elty), A::AbstractMatrix{$elty}, x::AbstractVector{$elty}, beta::($elty), y::AbstractVector{$elty}) - chkuplo(uplo) - require_one_based_indexing(A, x, y) - chkstride1(A) - px, stx = vec_pointer_stride(x, ArgumentError("input vector with 0 stride is not allowed")) - py, sty = vec_pointer_stride(y, ArgumentError("dest vector with 0 stride is not allowed")) - GC.@preserve x y ccall((@blasfunc($fname), libblastrampoline), Cvoid, - (Ref{UInt8}, Ref{BlasInt}, Ref{BlasInt}, Ref{$elty}, - Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, - Ref{$elty}, Ptr{$elty}, Ref{BlasInt}, Clong), - uplo, size(A,2), k, alpha, - A, max(1,stride(A,2)), px, stx, - beta, py, sty, 1) - y - end - function sbmv(uplo::AbstractChar, k::Integer, alpha::($elty), A::AbstractMatrix{$elty}, x::AbstractVector{$elty}) - n = size(A,2) - sbmv!(uplo, k, alpha, A, x, zero($elty), similar(x, $elty, n)) - end - function sbmv(uplo::AbstractChar, k::Integer, A::AbstractMatrix{$elty}, x::AbstractVector{$elty}) - sbmv(uplo, k, one($elty), A, x) - end - end -end - -""" - sbmv(uplo, k, alpha, A, x) - -Return `alpha*A*x` where `A` is a symmetric band matrix of order `size(A,2)` with `k` -super-diagonals stored in the argument `A`. -Only the [`uplo`](@ref stdlib-blas-uplo) triangle of `A` is used. -""" -sbmv(uplo, k, alpha, A, x) - -""" - sbmv(uplo, k, A, x) - -Return `A*x` where `A` is a symmetric band matrix of order `size(A,2)` with `k` -super-diagonals stored in the argument `A`. -Only the [`uplo`](@ref stdlib-blas-uplo) triangle of `A` is used. -""" -sbmv(uplo, k, A, x) - -""" - sbmv!(uplo, k, alpha, A, x, beta, y) - -Update vector `y` as `alpha*A*x + beta*y` where `A` is a symmetric band matrix of order -`size(A,2)` with `k` super-diagonals stored in the argument `A`. The storage layout for `A` -is described the reference BLAS module, level-2 BLAS at -. -Only the [`uplo`](@ref stdlib-blas-uplo) triangle of `A` is used. - -Return the updated `y`. -""" -sbmv! - -### spmv!, (SP) symmetric packed matrix-vector operation defined as y := alpha*A*x + beta*y. -for (fname, elty) in ((:dspmv_, :Float64), - (:sspmv_, :Float32)) - @eval begin - # SUBROUTINE DSPMV(UPLO,N,ALPHA,AP,X,INCX,BETA,Y,INCY) - # Y <- ALPHA*AP*X + BETA*Y - # * .. Scalar Arguments .. - # DOUBLE PRECISION ALPHA,BETA - # INTEGER INCX,INCY,N - # CHARACTER UPLO - # * .. Array Arguments .. - # DOUBLE PRECISION A(N,N),X(N),Y(N) - function spmv!(uplo::AbstractChar, - n::Integer, - α::$elty, - AP::Union{Ptr{$elty}, AbstractArray{$elty}}, - x::Union{Ptr{$elty}, AbstractArray{$elty}}, - incx::Integer, - β::$elty, - y::Union{Ptr{$elty}, AbstractArray{$elty}}, - incy::Integer) - - ccall((@blasfunc($fname), libblastrampoline), Cvoid, - (Ref{UInt8}, # uplo, - Ref{BlasInt}, # n, - Ref{$elty}, # α, - Ptr{$elty}, # AP, - Ptr{$elty}, # x, - Ref{BlasInt}, # incx, - Ref{$elty}, # β, - Ptr{$elty}, # y, out - Ref{BlasInt}, # incy - Clong), # length of uplo - uplo, - n, - α, - AP, - x, - incx, - β, - y, - incy, - 1) - return y - end - end -end - -function spmv!(uplo::AbstractChar, - α::Real, AP::AbstractArray{T}, x::AbstractArray{T}, - β::Real, y::AbstractArray{T}) where {T <: BlasReal} - require_one_based_indexing(AP, x, y) - N = length(x) - if N != length(y) - throw(DimensionMismatch(lazy"x has length $(N), but y has length $(length(y))")) - end - if 2*length(AP) < N*(N + 1) - throw(DimensionMismatch(lazy"Packed symmetric matrix A has size smaller than length(x) = $(N).")) - end - chkstride1(AP) - px, stx = vec_pointer_stride(x, ArgumentError("input vector with 0 stride is not allowed")) - py, sty = vec_pointer_stride(y, ArgumentError("dest vector with 0 stride is not allowed")) - GC.@preserve x y spmv!(uplo, N, T(α), AP, px, stx, T(β), py, sty) - y -end - -""" - spmv!(uplo, α, AP, x, β, y) - -Update vector `y` as `α*A*x + β*y`, where `A` is a symmetric matrix provided -in packed format `AP`. - -With `uplo = 'U'`, the array AP must contain the upper triangular part of the -symmetric matrix packed sequentially, column by column, so that `AP[1]` -contains `A[1, 1]`, `AP[2]` and `AP[3]` contain `A[1, 2]` and `A[2, 2]` -respectively, and so on. - -With `uplo = 'L'`, the array AP must contain the lower triangular part of the -symmetric matrix packed sequentially, column by column, so that `AP[1]` -contains `A[1, 1]`, `AP[2]` and `AP[3]` contain `A[2, 1]` and `A[3, 1]` -respectively, and so on. - -The scalar inputs `α` and `β` must be real. - -The array inputs `x`, `y` and `AP` must all be of `Float32` or `Float64` type. - -Return the updated `y`. - -!!! compat "Julia 1.5" - `spmv!` requires at least Julia 1.5. -""" -spmv! - -### spr!, (SP) symmetric packed matrix-vector operation defined as A := alpha*x*x' + A -for (fname, elty) in ((:dspr_, :Float64), - (:sspr_, :Float32)) - @eval begin - function spr!(uplo::AbstractChar, - n::Integer, - α::$elty, - x::Union{Ptr{$elty}, AbstractArray{$elty}}, - incx::Integer, - AP::Union{Ptr{$elty}, AbstractArray{$elty}}) - - ccall((@blasfunc($fname), libblastrampoline), Cvoid, - (Ref{UInt8}, # uplo, - Ref{BlasInt}, # n, - Ref{$elty}, # α, - Ptr{$elty}, # x, - Ref{BlasInt}, # incx, - Ptr{$elty}, # AP, - Clong), # length of uplo - uplo, - n, - α, - x, - incx, - AP, - 1) - return AP - end - end -end - -function spr!(uplo::AbstractChar, - α::Real, x::AbstractArray{T}, - AP::AbstractArray{T}) where {T <: BlasReal} - chkuplo(uplo) - require_one_based_indexing(AP, x) - N = length(x) - if 2*length(AP) < N*(N + 1) - throw(DimensionMismatch(lazy"Packed symmetric matrix A has size smaller than length(x) = $(N).")) - end - chkstride1(AP) - px, stx = vec_pointer_stride(x, ArgumentError("input vector with 0 stride is not allowed")) - return GC.@preserve x spr!(uplo, N, T(α), px, stx , AP) -end - -""" - spr!(uplo, α, x, AP) - -Update matrix `A` as `A+α*x*x'`, where `A` is a symmetric matrix provided -in packed format `AP` and `x` is a vector. - -With `uplo = 'U'`, the array AP must contain the upper triangular part of the -symmetric matrix packed sequentially, column by column, so that `AP[1]` -contains `A[1, 1]`, `AP[2]` and `AP[3]` contain `A[1, 2]` and `A[2, 2]` -respectively, and so on. - -With `uplo = 'L'`, the array AP must contain the lower triangular part of the -symmetric matrix packed sequentially, column by column, so that `AP[1]` -contains `A[1, 1]`, `AP[2]` and `AP[3]` contain `A[2, 1]` and `A[3, 1]` -respectively, and so on. - -The scalar input `α` must be real. - -The array inputs `x` and `AP` must all be of `Float32` or `Float64` type. -Return the updated `AP`. - -!!! compat "Julia 1.8" - `spr!` requires at least Julia 1.8. -""" -spr! - -### hbmv, (HB) Hermitian banded matrix-vector multiplication -for (fname, elty) in ((:zhbmv_,:ComplexF64), - (:chbmv_,:ComplexF32)) - @eval begin - # SUBROUTINE ZHBMV(UPLO,N,K,ALPHA,A,LDA,X,INCX,BETA,Y,INCY) - # * .. Scalar Arguments .. - # DOUBLE PRECISION ALPHA,BETA - # INTEGER INCX,INCY,K,LDA,N - # CHARACTER UPLO - # * .. Array Arguments .. - # DOUBLE PRECISION A(LDA,*),X(*),Y(*) - function hbmv!(uplo::AbstractChar, k::Integer, alpha::($elty), A::AbstractMatrix{$elty}, x::AbstractVector{$elty}, beta::($elty), y::AbstractVector{$elty}) - chkuplo(uplo) - require_one_based_indexing(A, x, y) - chkstride1(A) - px, stx = vec_pointer_stride(x, ArgumentError("input vector with 0 stride is not allowed")) - py, sty = vec_pointer_stride(y, ArgumentError("dest vector with 0 stride is not allowed")) - GC.@preserve x y ccall((@blasfunc($fname), libblastrampoline), Cvoid, - (Ref{UInt8}, Ref{BlasInt}, Ref{BlasInt}, Ref{$elty}, - Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, - Ref{$elty}, Ptr{$elty}, Ref{BlasInt}, Clong), - uplo, size(A,2), k, alpha, - A, max(1,stride(A,2)), px, stx, - beta, py, sty, 1) - y - end - function hbmv(uplo::AbstractChar, k::Integer, alpha::($elty), A::AbstractMatrix{$elty}, x::AbstractVector{$elty}) - n = size(A,2) - hbmv!(uplo, k, alpha, A, x, zero($elty), similar(x, $elty, n)) - end - function hbmv(uplo::AbstractChar, k::Integer, A::AbstractMatrix{$elty}, x::AbstractVector{$elty}) - hbmv(uplo, k, one($elty), A, x) - end - end -end - -### trmv, Triangular matrix-vector multiplication - -""" - trmv(ul, tA, dA, A, b) - -Return `op(A)*b`, where `op` is determined by [`tA`](@ref stdlib-blas-trans). -Only the [`ul`](@ref stdlib-blas-uplo) triangle of `A` is used. -[`dA`](@ref stdlib-blas-diag) determines if the diagonal values are read or -are assumed to be all ones. -""" -function trmv end - -""" - trmv!(ul, tA, dA, A, b) - -Return `op(A)*b`, where `op` is determined by [`tA`](@ref stdlib-blas-trans). -Only the [`ul`](@ref stdlib-blas-uplo) triangle of `A` is used. -[`dA`](@ref stdlib-blas-diag) determines if the diagonal values are read or -are assumed to be all ones. -The multiplication occurs in-place on `b`. -""" -function trmv! end - -for (fname, elty) in ((:dtrmv_,:Float64), - (:strmv_,:Float32), - (:ztrmv_,:ComplexF64), - (:ctrmv_,:ComplexF32)) - @eval begin - # SUBROUTINE DTRMV(UPLO,TRANS,DIAG,N,A,LDA,X,INCX) - # * .. Scalar Arguments .. - # INTEGER INCX,LDA,N - # CHARACTER DIAG,TRANS,UPLO - # * .. Array Arguments .. - # DOUBLE PRECISION A(LDA,*),X(*) - function trmv!(uplo::AbstractChar, trans::AbstractChar, diag::AbstractChar, A::AbstractMatrix{$elty}, x::AbstractVector{$elty}) - chkuplo(uplo) - require_one_based_indexing(A, x) - n = checksquare(A) - if n != length(x) - throw(DimensionMismatch(lazy"A has size ($n,$n), x has length $(length(x))")) - end - chkstride1(A) - px, stx = vec_pointer_stride(x, ArgumentError("input vector with 0 stride is not allowed")) - GC.@preserve x ccall((@blasfunc($fname), libblastrampoline), Cvoid, - (Ref{UInt8}, Ref{UInt8}, Ref{UInt8}, Ref{BlasInt}, - Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, - Clong, Clong, Clong), - uplo, trans, diag, n, - A, max(1,stride(A,2)), px, stx, 1, 1, 1) - x - end - function trmv(uplo::AbstractChar, trans::AbstractChar, diag::AbstractChar, A::AbstractMatrix{$elty}, x::AbstractVector{$elty}) - trmv!(uplo, trans, diag, A, copy(x)) - end - end -end - -### trsv, Triangular matrix-vector solve - -""" - trsv!(ul, tA, dA, A, b) - -Overwrite `b` with the solution to `A*x = b` or one of the other two variants determined by -[`tA`](@ref stdlib-blas-trans) and [`ul`](@ref stdlib-blas-uplo). -[`dA`](@ref stdlib-blas-diag) determines if the diagonal values are read or -are assumed to be all ones. -Return the updated `b`. -""" -function trsv! end - -""" - trsv(ul, tA, dA, A, b) - -Return the solution to `A*x = b` or one of the other two variants determined by -[`tA`](@ref stdlib-blas-trans) and [`ul`](@ref stdlib-blas-uplo). -[`dA`](@ref stdlib-blas-diag) determines if the diagonal values are read or -are assumed to be all ones. -""" -function trsv end - -for (fname, elty) in ((:dtrsv_,:Float64), - (:strsv_,:Float32), - (:ztrsv_,:ComplexF64), - (:ctrsv_,:ComplexF32)) - @eval begin - # SUBROUTINE DTRSV(UPLO,TRANS,DIAG,N,A,LDA,X,INCX) - # .. Scalar Arguments .. - # INTEGER INCX,LDA,N - # CHARACTER DIAG,TRANS,UPLO - # .. Array Arguments .. - # DOUBLE PRECISION A(LDA,*),X(*) - function trsv!(uplo::AbstractChar, trans::AbstractChar, diag::AbstractChar, A::AbstractMatrix{$elty}, x::AbstractVector{$elty}) - chkuplo(uplo) - require_one_based_indexing(A, x) - n = checksquare(A) - if n != length(x) - throw(DimensionMismatch(lazy"size of A is $n != length(x) = $(length(x))")) - end - chkstride1(A) - px, stx = vec_pointer_stride(x, ArgumentError("input vector with 0 stride is not allowed")) - GC.@preserve x ccall((@blasfunc($fname), libblastrampoline), Cvoid, - (Ref{UInt8}, Ref{UInt8}, Ref{UInt8}, Ref{BlasInt}, - Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, - Clong, Clong, Clong), - uplo, trans, diag, n, - A, max(1,stride(A,2)), px, stx, 1, 1, 1) - x - end - function trsv(uplo::AbstractChar, trans::AbstractChar, diag::AbstractChar, A::AbstractMatrix{$elty}, x::AbstractVector{$elty}) - trsv!(uplo, trans, diag, A, copy(x)) - end - end -end - -### ger - -""" - ger!(alpha, x, y, A) - -Rank-1 update of the matrix `A` with vectors `x` and `y` as `alpha*x*y' + A`. -""" -function ger! end - -for (fname, elty) in ((:dger_,:Float64), - (:sger_,:Float32), - (:zgerc_,:ComplexF64), - (:cgerc_,:ComplexF32)) - @eval begin - function ger!(α::$elty, x::AbstractVector{$elty}, y::AbstractVector{$elty}, A::AbstractMatrix{$elty}) - require_one_based_indexing(A, x, y) - m, n = size(A) - if m != length(x) || n != length(y) - throw(DimensionMismatch(lazy"A has size ($m,$n), x has length $(length(x)), y has length $(length(y))")) - end - px, stx = vec_pointer_stride(x, ArgumentError("input vector with 0 stride is not allowed")) - py, sty = vec_pointer_stride(y, ArgumentError("input vector with 0 stride is not allowed")) - GC.@preserve x y ccall((@blasfunc($fname), libblastrampoline), Cvoid, - (Ref{BlasInt}, Ref{BlasInt}, Ref{$elty}, Ptr{$elty}, - Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, - Ref{BlasInt}), - m, n, α, px, stx, py, sty, A, max(1,stride(A,2))) - A - end - end -end - -### geru - -""" - geru!(alpha, x, y, A) - -Rank-1 update of the matrix `A` with vectors `x` and `y` as `alpha*x*transpose(y) + A`. -""" -function geru! end - -for (fname, elty) in ((:zgeru_,:ComplexF64), (:cgeru_,:ComplexF32)) - @eval begin - function geru!(α::$elty, x::AbstractVector{$elty}, y::AbstractVector{$elty}, A::AbstractMatrix{$elty}) - require_one_based_indexing(A, x, y) - m, n = size(A) - if m != length(x) || n != length(y) - throw(DimensionMismatch(lazy"A has size ($m,$n), x has length $(length(x)), y has length $(length(y))")) - end - px, stx = vec_pointer_stride(x, ArgumentError("input vector with 0 stride is not allowed")) - py, sty = vec_pointer_stride(y, ArgumentError("input vector with 0 stride is not allowed")) - GC.@preserve x y ccall((@blasfunc($fname), libblastrampoline), Cvoid, - (Ref{BlasInt}, Ref{BlasInt}, Ref{$elty}, Ptr{$elty}, - Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, - Ref{BlasInt}), - m, n, α, px, stx, py, sty, A, max(1,stride(A,2))) - A - end - end -end -for elty in (:Float64, :Float32) - @eval begin - geru!(α::$elty, x::AbstractVector{$elty}, y::AbstractVector{$elty}, A::AbstractMatrix{$elty}) = - ger!(α, x, y, A) - end -end - -### syr - -""" - syr!(uplo, alpha, x, A) - -Rank-1 update of the symmetric matrix `A` with vector `x` as `alpha*x*transpose(x) + A`. -[`uplo`](@ref stdlib-blas-uplo) controls which triangle of `A` is updated. Returns `A`. -""" -function syr! end - -for (fname, elty, lib) in ((:dsyr_,:Float64,libblastrampoline), - (:ssyr_,:Float32,libblastrampoline), - (:zsyr_,:ComplexF64,libblastrampoline), - (:csyr_,:ComplexF32,libblastrampoline)) - @eval begin - function syr!(uplo::AbstractChar, α::$elty, x::AbstractVector{$elty}, A::AbstractMatrix{$elty}) - chkuplo(uplo) - require_one_based_indexing(A, x) - n = checksquare(A) - if length(x) != n - throw(DimensionMismatch(lazy"A has size ($n,$n), x has length $(length(x))")) - end - px, stx = vec_pointer_stride(x, ArgumentError("input vector with 0 stride is not allowed")) - GC.@preserve x ccall((@blasfunc($fname), $lib), Cvoid, - (Ref{UInt8}, Ref{BlasInt}, Ref{$elty}, Ptr{$elty}, - Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}), - uplo, n, α, px, stx, A, max(1,stride(A, 2))) - A - end - end -end - -### her - -""" - her!(uplo, alpha, x, A) - -Methods for complex arrays only. Rank-1 update of the Hermitian matrix `A` with vector `x` -as `alpha*x*x' + A`. -[`uplo`](@ref stdlib-blas-uplo) controls which triangle of `A` is updated. Returns `A`. -""" -function her! end - -for (fname, elty, relty) in ((:zher_,:ComplexF64, :Float64), - (:cher_,:ComplexF32, :Float32)) - @eval begin - function her!(uplo::AbstractChar, α::$relty, x::AbstractVector{$elty}, A::AbstractMatrix{$elty}) - chkuplo(uplo) - require_one_based_indexing(A, x) - n = checksquare(A) - if length(x) != n - throw(DimensionMismatch(lazy"A has size ($n,$n), x has length $(length(x))")) - end - px, stx = vec_pointer_stride(x, ArgumentError("input vector with 0 stride is not allowed")) - GC.@preserve x ccall((@blasfunc($fname), libblastrampoline), Cvoid, - (Ref{UInt8}, Ref{BlasInt}, Ref{$relty}, Ptr{$elty}, - Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Clong), - uplo, n, α, px, stx, A, max(1,stride(A,2)), 1) - A - end - end -end - -# Level 3 -## (GE) general matrix-matrix multiplication - -""" - gemmt!(uplo, tA, tB, alpha, A, B, beta, C) - -Update the lower or upper triangular part specified by [`uplo`](@ref stdlib-blas-uplo) of `C` as -`alpha*A*B + beta*C` or the other variants according to [`tA`](@ref stdlib-blas-trans) and `tB`. -Return the updated `C`. - -!!! compat "Julia 1.11" - `gemmt!` requires at least Julia 1.11. -""" -function gemmt! end - -for (gemmt, elty) in - ((:dgemmt_,:Float64), - (:sgemmt_,:Float32), - (:zgemmt_,:ComplexF64), - (:cgemmt_,:ComplexF32)) - @eval begin - # SUBROUTINE DGEMMT(UPLO,TRANSA,TRANSB,N,K,ALPHA,A,LDA,B,LDB,BETA,C,LDC) - # * .. Scalar Arguments .. - # DOUBLE PRECISION ALPHA,BETA - # INTEGER K,LDA,LDB,LDC,N - # CHARACTER UPLO,TRANSA,TRANSB - # * .. Array Arguments .. - # DOUBLE PRECISION A(LDA,*),B(LDB,*),C(LDC,*) - function gemmt!(uplo::AbstractChar, transA::AbstractChar, transB::AbstractChar, - alpha::Union{($elty), Bool}, - A::AbstractVecOrMat{$elty}, B::AbstractVecOrMat{$elty}, - beta::Union{($elty), Bool}, - C::AbstractVecOrMat{$elty}) - chkuplo(uplo) - require_one_based_indexing(A, B, C) - m = size(A, transA == 'N' ? 1 : 2) - ka = size(A, transA == 'N' ? 2 : 1) - kb = size(B, transB == 'N' ? 1 : 2) - n = size(B, transB == 'N' ? 2 : 1) - if ka != kb || m != n || m != size(C,1) || n != size(C,2) - throw(DimensionMismatch(lazy"A has size ($m,$ka), B has size ($kb,$n), C has size $(size(C))")) - end - chkstride1(A) - chkstride1(B) - chkstride1(C) - ccall((@blasfunc($gemmt), libblastrampoline), Cvoid, - (Ref{UInt8}, Ref{UInt8}, Ref{UInt8}, Ref{BlasInt}, - Ref{BlasInt}, Ref{$elty}, Ptr{$elty}, Ref{BlasInt}, - Ptr{$elty}, Ref{BlasInt}, Ref{$elty}, Ptr{$elty}, - Ref{BlasInt}, Clong, Clong, Clong), - uplo, transA, transB, n, - ka, alpha, A, max(1,stride(A,2)), - B, max(1,stride(B,2)), beta, C, - max(1,stride(C,2)), 1, 1, 1) - C - end - function gemmt(uplo::AbstractChar, transA::AbstractChar, transB::AbstractChar, alpha::($elty), A::AbstractMatrix{$elty}, B::AbstractMatrix{$elty}) - gemmt!(uplo, transA, transB, alpha, A, B, zero($elty), similar(B, $elty, (size(A, transA == 'N' ? 1 : 2), size(B, transB == 'N' ? 2 : 1)))) - end - function gemmt(uplo::AbstractChar, transA::AbstractChar, transB::AbstractChar, A::AbstractMatrix{$elty}, B::AbstractMatrix{$elty}) - gemmt(uplo, transA, transB, one($elty), A, B) - end - end -end - -""" - gemmt(uplo, tA, tB, alpha, A, B) - -Return the lower or upper triangular part specified by [`uplo`](@ref stdlib-blas-uplo) of `A*B` or the other three variants according to [`tA`](@ref stdlib-blas-trans) and `tB`. - -!!! compat "Julia 1.11" - `gemmt` requires at least Julia 1.11. -""" -gemmt(uplo, tA, tB, alpha, A, B) - -""" - gemmt(uplo, tA, tB, A, B) - -Return the lower or upper triangular part specified by [`uplo`](@ref stdlib-blas-uplo) of `A*B` or the other three variants according to [`tA`](@ref stdlib-blas-trans) and `tB`. - -!!! compat "Julia 1.11" - `gemmt` requires at least Julia 1.11. -""" -gemmt(uplo, tA, tB, A, B) - -""" - gemm!(tA, tB, alpha, A, B, beta, C) - -Update `C` as `alpha*A*B + beta*C` or the other three variants according to -[`tA`](@ref stdlib-blas-trans) and `tB`. Return the updated `C`. -""" -function gemm! end - -for (gemm, elty) in - ((:dgemm_,:Float64), - (:sgemm_,:Float32), - (:zgemm_,:ComplexF64), - (:cgemm_,:ComplexF32)) - @eval begin - # SUBROUTINE DGEMM(TRANSA,TRANSB,M,N,K,ALPHA,A,LDA,B,LDB,BETA,C,LDC) - # * .. Scalar Arguments .. - # DOUBLE PRECISION ALPHA,BETA - # INTEGER K,LDA,LDB,LDC,M,N - # CHARACTER TRANSA,TRANSB - # * .. Array Arguments .. - # DOUBLE PRECISION A(LDA,*),B(LDB,*),C(LDC,*) - function gemm!(transA::AbstractChar, transB::AbstractChar, - alpha::Union{($elty), Bool}, - A::AbstractVecOrMat{$elty}, B::AbstractVecOrMat{$elty}, - beta::Union{($elty), Bool}, - C::AbstractVecOrMat{$elty}) -# if any([stride(A,1), stride(B,1), stride(C,1)] .!= 1) -# error("gemm!: BLAS module requires contiguous matrix columns") -# end # should this be checked on every call? - require_one_based_indexing(A, B, C) - m = size(A, transA == 'N' ? 1 : 2) - ka = size(A, transA == 'N' ? 2 : 1) - kb = size(B, transB == 'N' ? 1 : 2) - n = size(B, transB == 'N' ? 2 : 1) - if ka != kb || m != size(C,1) || n != size(C,2) - throw(DimensionMismatch(lazy"A has size ($m,$ka), B has size ($kb,$n), C has size $(size(C))")) - end - chkstride1(A) - chkstride1(B) - chkstride1(C) - ccall((@blasfunc($gemm), libblastrampoline), Cvoid, - (Ref{UInt8}, Ref{UInt8}, Ref{BlasInt}, Ref{BlasInt}, - Ref{BlasInt}, Ref{$elty}, Ptr{$elty}, Ref{BlasInt}, - Ptr{$elty}, Ref{BlasInt}, Ref{$elty}, Ptr{$elty}, - Ref{BlasInt}, Clong, Clong), - transA, transB, m, n, - ka, alpha, A, max(1,stride(A,2)), - B, max(1,stride(B,2)), beta, C, - max(1,stride(C,2)), 1, 1) - C - end - function gemm(transA::AbstractChar, transB::AbstractChar, alpha::($elty), A::AbstractMatrix{$elty}, B::AbstractMatrix{$elty}) - gemm!(transA, transB, alpha, A, B, zero($elty), similar(B, $elty, (size(A, transA == 'N' ? 1 : 2), size(B, transB == 'N' ? 2 : 1)))) - end - function gemm(transA::AbstractChar, transB::AbstractChar, A::AbstractMatrix{$elty}, B::AbstractMatrix{$elty}) - gemm(transA, transB, one($elty), A, B) - end - end -end - -""" - gemm(tA, tB, alpha, A, B) - -Return `alpha*A*B` or the other three variants according to [`tA`](@ref stdlib-blas-trans) and `tB`. -""" -gemm(tA, tB, alpha, A, B) - -""" - gemm(tA, tB, A, B) - -Return `A*B` or the other three variants according to [`tA`](@ref stdlib-blas-trans) and `tB`. -""" -gemm(tA, tB, A, B) - - -## (SY) symmetric matrix-matrix and matrix-vector multiplication -for (mfname, elty) in ((:dsymm_,:Float64), - (:ssymm_,:Float32), - (:zsymm_,:ComplexF64), - (:csymm_,:ComplexF32)) - @eval begin - # SUBROUTINE DSYMM(SIDE,UPLO,M,N,ALPHA,A,LDA,B,LDB,BETA,C,LDC) - # .. Scalar Arguments .. - # DOUBLE PRECISION ALPHA,BETA - # INTEGER LDA,LDB,LDC,M,N - # CHARACTER SIDE,UPLO - # .. Array Arguments .. - # DOUBLE PRECISION A(LDA,*),B(LDB,*),C(LDC,*) - function symm!(side::AbstractChar, uplo::AbstractChar, alpha::Union{($elty), Bool}, - A::AbstractMatrix{$elty}, B::AbstractMatrix{$elty}, - beta::Union{($elty), Bool}, C::AbstractMatrix{$elty}) - chkuplo(uplo) - require_one_based_indexing(A, B, C) - m, n = size(C) - j = checksquare(A) - M, N = size(B) - if side == 'L' - if j != m - throw(DimensionMismatch(lazy"A has first dimension $j but needs to match first dimension of C, $m")) - end - if N != n - throw(DimensionMismatch(lazy"B has second dimension $N but needs to match second dimension of C, $n")) - end - if j != M - throw(DimensionMismatch(lazy"A has second dimension $j but needs to match first dimension of B, $M")) - end - else - if j != n - throw(DimensionMismatch(lazy"B has second dimension $j but needs to match second dimension of C, $n")) - end - if N != j - throw(DimensionMismatch(lazy"A has second dimension $N but needs to match first dimension of B, $j")) - end - if M != m - throw(DimensionMismatch(lazy"A has first dimension $M but needs to match first dimension of C, $m")) - end - end - chkstride1(A) - chkstride1(B) - chkstride1(C) - ccall((@blasfunc($mfname), libblastrampoline), Cvoid, - (Ref{UInt8}, Ref{UInt8}, Ref{BlasInt}, Ref{BlasInt}, - Ref{$elty}, Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, - Ref{BlasInt}, Ref{$elty}, Ptr{$elty}, Ref{BlasInt}, - Clong, Clong), - side, uplo, m, n, - alpha, A, max(1,stride(A,2)), B, - max(1,stride(B,2)), beta, C, max(1,stride(C,2)), - 1, 1) - C - end - function symm(side::AbstractChar, uplo::AbstractChar, alpha::($elty), A::AbstractMatrix{$elty}, B::AbstractMatrix{$elty}) - symm!(side, uplo, alpha, A, B, zero($elty), similar(B)) - end - function symm(side::AbstractChar, uplo::AbstractChar, A::AbstractMatrix{$elty}, B::AbstractMatrix{$elty}) - symm(side, uplo, one($elty), A, B) - end - end -end - -""" - symm(side, ul, alpha, A, B) - -Return `alpha*A*B` or `alpha*B*A` according to [`side`](@ref stdlib-blas-side). -`A` is assumed to be symmetric. Only -the [`ul`](@ref stdlib-blas-uplo) triangle of `A` is used. -""" -symm(side, ul, alpha, A, B) - -""" - symm(side, ul, A, B) - -Return `A*B` or `B*A` according to [`side`](@ref stdlib-blas-side). -`A` is assumed to be symmetric. Only the [`ul`](@ref stdlib-blas-uplo) -triangle of `A` is used. -""" -symm(side, ul, A, B) - -""" - symm!(side, ul, alpha, A, B, beta, C) - -Update `C` as `alpha*A*B + beta*C` or `alpha*B*A + beta*C` according to [`side`](@ref stdlib-blas-side). -`A` is assumed to be symmetric. Only the [`ul`](@ref stdlib-blas-uplo) triangle of -`A` is used. Return the updated `C`. -""" -symm! - -## (HE) Hermitian matrix-matrix and matrix-vector multiplication -for (mfname, elty) in ((:zhemm_,:ComplexF64), - (:chemm_,:ComplexF32)) - @eval begin - # SUBROUTINE DHEMM(SIDE,UPLO,M,N,ALPHA,A,LDA,B,LDB,BETA,C,LDC) - # .. Scalar Arguments .. - # DOUBLE PRECISION ALPHA,BETA - # INTEGER LDA,LDB,LDC,M,N - # CHARACTER SIDE,UPLO - # .. Array Arguments .. - # DOUBLE PRECISION A(LDA,*),B(LDB,*),C(LDC,*) - function hemm!(side::AbstractChar, uplo::AbstractChar, alpha::Union{($elty), Bool}, - A::AbstractMatrix{$elty}, B::AbstractMatrix{$elty}, - beta::Union{($elty), Bool}, C::AbstractMatrix{$elty}) - chkuplo(uplo) - require_one_based_indexing(A, B, C) - m, n = size(C) - j = checksquare(A) - M, N = size(B) - if side == 'L' - if j != m - throw(DimensionMismatch(lazy"A has first dimension $j but needs to match first dimension of C, $m")) - end - if N != n - throw(DimensionMismatch(lazy"B has second dimension $N but needs to match second dimension of C, $n")) - end - if j != M - throw(DimensionMismatch(lazy"A has second dimension $j but needs to match first dimension of B, $M")) - end - else - if j != n - throw(DimensionMismatch(lazy"B has second dimension $j but needs to match second dimension of C, $n")) - end - if N != j - throw(DimensionMismatch(lazy"A has second dimension $N but needs to match first dimension of B, $j")) - end - if M != m - throw(DimensionMismatch(lazy"A has first dimension $M but needs to match first dimension of C, $m")) - end - end - chkstride1(A) - chkstride1(B) - chkstride1(C) - ccall((@blasfunc($mfname), libblastrampoline), Cvoid, - (Ref{UInt8}, Ref{UInt8}, Ref{BlasInt}, Ref{BlasInt}, - Ref{$elty}, Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, - Ref{BlasInt}, Ref{$elty}, Ptr{$elty}, Ref{BlasInt}, - Clong, Clong), - side, uplo, m, n, - alpha, A, max(1,stride(A,2)), B, - max(1,stride(B,2)), beta, C, max(1,stride(C,2)), - 1, 1) - C - end - function hemm(side::AbstractChar, uplo::AbstractChar, alpha::($elty), A::AbstractMatrix{$elty}, B::AbstractMatrix{$elty}) - hemm!(side, uplo, alpha, A, B, zero($elty), similar(B)) - end - function hemm(side::AbstractChar, uplo::AbstractChar, A::AbstractMatrix{$elty}, B::AbstractMatrix{$elty}) - hemm(side, uplo, one($elty), A, B) - end - end -end - -""" - hemm(side, ul, alpha, A, B) - -Return `alpha*A*B` or `alpha*B*A` according to [`side`](@ref stdlib-blas-side). -`A` is assumed to be Hermitian. Only the [`ul`](@ref stdlib-blas-uplo) triangle -of `A` is used. -""" -hemm(side, ul, alpha, A, B) - -""" - hemm(side, ul, A, B) - -Return `A*B` or `B*A` according to [`side`](@ref stdlib-blas-side). `A` is assumed -to be Hermitian. Only the [`ul`](@ref stdlib-blas-uplo) triangle of `A` is used. -""" -hemm(side, ul, A, B) - -""" - hemm!(side, ul, alpha, A, B, beta, C) - -Update `C` as `alpha*A*B + beta*C` or `alpha*B*A + beta*C` according to -[`side`](@ref stdlib-blas-side). `A` is assumed to be Hermitian. Only the -[`ul`](@ref stdlib-blas-uplo) triangle of `A` is used. Return the updated `C`. -""" -hemm! - -## syrk - -""" - syrk!(uplo, trans, alpha, A, beta, C) - -Rank-k update of the symmetric matrix `C` as `alpha*A*transpose(A) + beta*C` or -`alpha*transpose(A)*A + beta*C` according to [`trans`](@ref stdlib-blas-trans). -Only the [`uplo`](@ref stdlib-blas-uplo) triangle of `C` is used. Return `C`. -""" -function syrk! end - -""" - syrk(uplo, trans, alpha, A) - -Return either the upper triangle or the lower triangle of `A`, -according to [`uplo`](@ref stdlib-blas-uplo), -of `alpha*A*transpose(A)` or `alpha*transpose(A)*A`, -according to [`trans`](@ref stdlib-blas-trans). -""" -function syrk end - -for (fname, elty) in ((:dsyrk_,:Float64), - (:ssyrk_,:Float32), - (:zsyrk_,:ComplexF64), - (:csyrk_,:ComplexF32)) - @eval begin - # SUBROUTINE DSYRK(UPLO,TRANS,N,K,ALPHA,A,LDA,BETA,C,LDC) - # * .. Scalar Arguments .. - # REAL ALPHA,BETA - # INTEGER K,LDA,LDC,N - # CHARACTER TRANS,UPLO - # * .. Array Arguments .. - # REAL A(LDA,*),C(LDC,*) - function syrk!(uplo::AbstractChar, trans::AbstractChar, - alpha::Union{($elty), Bool}, A::AbstractVecOrMat{$elty}, - beta::Union{($elty), Bool}, C::AbstractMatrix{$elty}) - chkuplo(uplo) - require_one_based_indexing(A, C) - n = checksquare(C) - nn = size(A, trans == 'N' ? 1 : 2) - if nn != n throw(DimensionMismatch(lazy"C has size ($n,$n), corresponding dimension of A is $nn")) end - k = size(A, trans == 'N' ? 2 : 1) - chkstride1(A) - chkstride1(C) - ccall((@blasfunc($fname), libblastrampoline), Cvoid, - (Ref{UInt8}, Ref{UInt8}, Ref{BlasInt}, Ref{BlasInt}, - Ref{$elty}, Ptr{$elty}, Ref{BlasInt}, Ref{$elty}, - Ptr{$elty}, Ref{BlasInt}, Clong, Clong), - uplo, trans, n, k, - alpha, A, max(1,stride(A,2)), beta, - C, max(1,stride(C,2)), 1, 1) - C - end - end -end -function syrk(uplo::AbstractChar, trans::AbstractChar, alpha::Number, A::AbstractVecOrMat) - T = eltype(A) - n = size(A, trans == 'N' ? 1 : 2) - syrk!(uplo, trans, convert(T,alpha), A, zero(T), similar(A, T, (n, n))) -end -syrk(uplo::AbstractChar, trans::AbstractChar, A::AbstractVecOrMat) = syrk(uplo, trans, one(eltype(A)), A) - -""" - herk!(uplo, trans, alpha, A, beta, C) - -Methods for complex arrays only. Rank-k update of the Hermitian matrix `C` as -`alpha*A*A' + beta*C` or `alpha*A'*A + beta*C` according to [`trans`](@ref stdlib-blas-trans). -Only the [`uplo`](@ref stdlib-blas-uplo) triangle of `C` is updated. Returns `C`. -""" -function herk! end - -""" - herk(uplo, trans, alpha, A) - -Methods for complex arrays only. Returns the [`uplo`](@ref stdlib-blas-uplo) -triangle of `alpha*A*A'` or `alpha*A'*A`, according to [`trans`](@ref stdlib-blas-trans). -""" -function herk end - -for (fname, elty, relty) in ((:zherk_, :ComplexF64, :Float64), - (:cherk_, :ComplexF32, :Float32)) - @eval begin - # SUBROUTINE CHERK(UPLO,TRANS,N,K,ALPHA,A,LDA,BETA,C,LDC) - # * .. Scalar Arguments .. - # REAL ALPHA,BETA - # INTEGER K,LDA,LDC,N - # CHARACTER TRANS,UPLO - # * .. - # * .. Array Arguments .. - # COMPLEX A(LDA,*),C(LDC,*) - function herk!(uplo::AbstractChar, trans::AbstractChar, - α::Union{$relty, Bool}, A::AbstractVecOrMat{$elty}, - β::Union{$relty, Bool}, C::AbstractMatrix{$elty}) - chkuplo(uplo) - require_one_based_indexing(A, C) - n = checksquare(C) - nn = size(A, trans == 'N' ? 1 : 2) - if nn != n - throw(DimensionMismatch(lazy"the matrix to update has dimension $n but the implied dimension of the update is $(size(A, trans == 'N' ? 1 : 2))")) - end - chkstride1(A) - chkstride1(C) - k = size(A, trans == 'N' ? 2 : 1) - ccall((@blasfunc($fname), libblastrampoline), Cvoid, - (Ref{UInt8}, Ref{UInt8}, Ref{BlasInt}, Ref{BlasInt}, - Ref{$relty}, Ptr{$elty}, Ref{BlasInt}, Ref{$relty}, - Ptr{$elty}, Ref{BlasInt}, Clong, Clong), - uplo, trans, n, k, - α, A, max(1,stride(A,2)), β, - C, max(1,stride(C,2)), 1, 1) - C - end - function herk(uplo::AbstractChar, trans::AbstractChar, α::$relty, A::AbstractVecOrMat{$elty}) - n = size(A, trans == 'N' ? 1 : 2) - herk!(uplo, trans, α, A, zero($relty), similar(A, (n,n))) - end - herk(uplo::AbstractChar, trans::AbstractChar, A::AbstractVecOrMat{$elty}) = herk(uplo, trans, one($relty), A) - end -end - -## syr2k -for (fname, elty) in ((:dsyr2k_,:Float64), - (:ssyr2k_,:Float32), - (:zsyr2k_,:ComplexF64), - (:csyr2k_,:ComplexF32)) - @eval begin - # SUBROUTINE DSYR2K(UPLO,TRANS,N,K,ALPHA,A,LDA,B,LDB,BETA,C,LDC) - # - # .. Scalar Arguments .. - # REAL PRECISION ALPHA,BETA - # INTEGER K,LDA,LDB,LDC,N - # CHARACTER TRANS,UPLO - # .. - # .. Array Arguments .. - # REAL PRECISION A(LDA,*),B(LDB,*),C(LDC,*) - function syr2k!(uplo::AbstractChar, trans::AbstractChar, - alpha::($elty), A::AbstractVecOrMat{$elty}, B::AbstractVecOrMat{$elty}, - beta::($elty), C::AbstractMatrix{$elty}) - chkuplo(uplo) - require_one_based_indexing(A, B, C) - n = checksquare(C) - nn = size(A, trans == 'N' ? 1 : 2) - if nn != n throw(DimensionMismatch(lazy"C has size ($n,$n), corresponding dimension of A is $nn")) end - k = size(A, trans == 'N' ? 2 : 1) - chkstride1(A) - chkstride1(B) - chkstride1(C) - ccall((@blasfunc($fname), libblastrampoline), Cvoid, - (Ref{UInt8}, Ref{UInt8}, Ref{BlasInt}, Ref{BlasInt}, - Ref{$elty}, Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ref{$elty}, - Ptr{$elty}, Ref{BlasInt}, Clong, Clong), - uplo, trans, n, k, - alpha, A, max(1,stride(A,2)), B, max(1,stride(B,2)), beta, - C, max(1,stride(C,2)), 1, 1) - C - end - end -end - -""" - syr2k!(uplo, trans, alpha, A, B, beta, C) - -Rank-2k update of the symmetric matrix `C` as -`alpha*A*transpose(B) + alpha*B*transpose(A) + beta*C` or -`alpha*transpose(A)*B + alpha*transpose(B)*A + beta*C` -according to [`trans`](@ref stdlib-blas-trans). -Only the [`uplo`](@ref stdlib-blas-uplo) triangle of `C` is used. Returns `C`. -""" -function syr2k! end - -""" - syr2k(uplo, trans, alpha, A, B) - -Returns the [`uplo`](@ref stdlib-blas-uplo) triangle of -`alpha*A*transpose(B) + alpha*B*transpose(A)` or -`alpha*transpose(A)*B + alpha*transpose(B)*A`, -according to [`trans`](@ref stdlib-blas-trans). -""" -function syr2k(uplo::AbstractChar, trans::AbstractChar, alpha::Number, A::AbstractVecOrMat, B::AbstractVecOrMat) - T = eltype(A) - n = size(A, trans == 'N' ? 1 : 2) - syr2k!(uplo, trans, convert(T,alpha), A, B, zero(T), similar(A, T, (n, n))) -end -""" - syr2k(uplo, trans, A, B) - -Return the [`uplo`](@ref stdlib-blas-uplo) triangle of `A*transpose(B) + B*transpose(A)` -or `transpose(A)*B + transpose(B)*A`, according to [`trans`](@ref stdlib-blas-trans). -""" -syr2k(uplo::AbstractChar, trans::AbstractChar, A::AbstractVecOrMat, B::AbstractVecOrMat) = syr2k(uplo, trans, one(eltype(A)), A, B) - -for (fname, elty1, elty2) in ((:zher2k_,:ComplexF64,:Float64), (:cher2k_,:ComplexF32,:Float32)) - @eval begin - # SUBROUTINE CHER2K(UPLO,TRANS,N,K,ALPHA,A,LDA,B,LDB,BETA,C,LDC) - # - # .. Scalar Arguments .. - # COMPLEX ALPHA - # REAL BETA - # INTEGER K,LDA,LDB,LDC,N - # CHARACTER TRANS,UPLO - # .. - # .. Array Arguments .. - # COMPLEX A(LDA,*),B(LDB,*),C(LDC,*) - function her2k!(uplo::AbstractChar, trans::AbstractChar, alpha::($elty1), - A::AbstractVecOrMat{$elty1}, B::AbstractVecOrMat{$elty1}, - beta::($elty2), C::AbstractMatrix{$elty1}) - chkuplo(uplo) - require_one_based_indexing(A, B, C) - n = checksquare(C) - nn = size(A, trans == 'N' ? 1 : 2) - if nn != n throw(DimensionMismatch(lazy"C has size ($n,$n), corresponding dimension of A is $nn")) end - chkstride1(A) - chkstride1(B) - chkstride1(C) - k = size(A, trans == 'N' ? 2 : 1) - ccall((@blasfunc($fname), libblastrampoline), Cvoid, - (Ref{UInt8}, Ref{UInt8}, Ref{BlasInt}, Ref{BlasInt}, - Ref{$elty1}, Ptr{$elty1}, Ref{BlasInt}, Ptr{$elty1}, Ref{BlasInt}, - Ref{$elty2}, Ptr{$elty1}, Ref{BlasInt}, Clong, Clong), - uplo, trans, n, k, - alpha, A, max(1,stride(A,2)), B, max(1,stride(B,2)), - beta, C, max(1,stride(C,2)), 1, 1) - C - end - function her2k(uplo::AbstractChar, trans::AbstractChar, alpha::($elty1), A::AbstractVecOrMat{$elty1}, B::AbstractVecOrMat{$elty1}) - n = size(A, trans == 'N' ? 1 : 2) - her2k!(uplo, trans, alpha, A, B, zero($elty2), similar(A, $elty1, (n,n))) - end - her2k(uplo::AbstractChar, trans::AbstractChar, A::AbstractVecOrMat{$elty1}, B::AbstractVecOrMat{$elty1}) = - her2k(uplo, trans, one($elty1), A, B) - end -end - -""" - her2k!(uplo, trans, alpha, A, B, beta, C) - -Rank-2k update of the Hermitian matrix `C` as -`alpha*A*B' + alpha*B*A' + beta*C` or `alpha*A'*B + alpha*B'*A + beta*C` -according to [`trans`](@ref stdlib-blas-trans). The scalar `beta` has to be real. -Only the [`uplo`](@ref stdlib-blas-uplo) triangle of `C` is used. Return `C`. -""" -function her2k! end - -""" - her2k(uplo, trans, alpha, A, B) - -Return the [`uplo`](@ref stdlib-blas-uplo) triangle of `alpha*A*B' + alpha*B*A'` -or `alpha*A'*B + alpha*B'*A`, according to [`trans`](@ref stdlib-blas-trans). -""" -her2k(uplo, trans, alpha, A, B) - -""" - her2k(uplo, trans, A, B) - -Return the [`uplo`](@ref stdlib-blas-uplo) triangle of `A*B' + B*A'` -or `A'*B + B'*A`, according to [`trans`](@ref stdlib-blas-trans). -""" -her2k(uplo, trans, A, B) - -## (TR) Triangular matrix and vector multiplication and solution - -""" - trmm!(side, ul, tA, dA, alpha, A, B) - -Update `B` as `alpha*A*B` or one of the other three variants determined by -[`side`](@ref stdlib-blas-side) and [`tA`](@ref stdlib-blas-trans). -Only the [`ul`](@ref stdlib-blas-uplo) triangle of `A` is used. -[`dA`](@ref stdlib-blas-diag) determines if the diagonal values are read or -are assumed to be all ones. -Return the updated `B`. -""" -function trmm! end - -""" - trmm(side, ul, tA, dA, alpha, A, B) - -Return `alpha*A*B` or one of the other three variants determined by -[`side`](@ref stdlib-blas-side) and [`tA`](@ref stdlib-blas-trans). -Only the [`ul`](@ref stdlib-blas-uplo) triangle of `A` is used. -[`dA`](@ref stdlib-blas-diag) determines if the diagonal values are read or -are assumed to be all ones. -""" -function trmm end - -""" - trsm!(side, ul, tA, dA, alpha, A, B) - -Overwrite `B` with the solution to `A*X = alpha*B` or one of the other three variants -determined by [`side`](@ref stdlib-blas-side) and [`tA`](@ref stdlib-blas-trans). -Only the [`ul`](@ref stdlib-blas-uplo) triangle of `A` is used. -[`dA`](@ref stdlib-blas-diag) determines if the diagonal values are read or -are assumed to be all ones. -Returns the updated `B`. -""" -function trsm! end - -""" - trsm(side, ul, tA, dA, alpha, A, B) - -Return the solution to `A*X = alpha*B` or one of the other three variants determined by -determined by [`side`](@ref stdlib-blas-side) and [`tA`](@ref stdlib-blas-trans). -Only the [`ul`](@ref stdlib-blas-uplo) triangle of `A` is used. -[`dA`](@ref stdlib-blas-diag) determines if the diagonal values are read or -are assumed to be all ones. -""" -function trsm end - -for (mmname, smname, elty) in - ((:dtrmm_,:dtrsm_,:Float64), - (:strmm_,:strsm_,:Float32), - (:ztrmm_,:ztrsm_,:ComplexF64), - (:ctrmm_,:ctrsm_,:ComplexF32)) - @eval begin - # SUBROUTINE DTRMM(SIDE,UPLO,TRANSA,DIAG,M,N,ALPHA,A,LDA,B,LDB) - # * .. Scalar Arguments .. - # DOUBLE PRECISION ALPHA - # INTEGER LDA,LDB,M,N - # CHARACTER DIAG,SIDE,TRANSA,UPLO - # * .. Array Arguments .. - # DOUBLE PRECISION A(LDA,*),B(LDB,*) - function trmm!(side::AbstractChar, uplo::AbstractChar, transa::AbstractChar, diag::AbstractChar, alpha::Number, - A::AbstractMatrix{$elty}, B::AbstractMatrix{$elty}) - chkuplo(uplo) - require_one_based_indexing(A, B) - m, n = size(B) - nA = checksquare(A) - if nA != (side == 'L' ? m : n) - throw(DimensionMismatch(lazy"size of A, $(size(A)), doesn't match $side size of B with dims, $(size(B))")) - end - chkstride1(A) - chkstride1(B) - ccall((@blasfunc($mmname), libblastrampoline), Cvoid, - (Ref{UInt8}, Ref{UInt8}, Ref{UInt8}, Ref{UInt8}, Ref{BlasInt}, Ref{BlasInt}, - Ref{$elty}, Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, - Clong, Clong, Clong, Clong), - side, uplo, transa, diag, m, n, - alpha, A, max(1,stride(A,2)), B, max(1,stride(B,2)), - 1, 1, 1, 1) - B - end - function trmm(side::AbstractChar, uplo::AbstractChar, transa::AbstractChar, diag::AbstractChar, - alpha::$elty, A::AbstractMatrix{$elty}, B::AbstractMatrix{$elty}) - trmm!(side, uplo, transa, diag, alpha, A, copy(B)) - end - # SUBROUTINE DTRSM(SIDE,UPLO,TRANSA,DIAG,M,N,ALPHA,A,LDA,B,LDB) - # * .. Scalar Arguments .. - # DOUBLE PRECISION ALPHA - # INTEGER LDA,LDB,M,N - # CHARACTER DIAG,SIDE,TRANSA,UPLO - # * .. Array Arguments .. - # DOUBLE PRECISION A(LDA,*),B(LDB,*) - function trsm!(side::AbstractChar, uplo::AbstractChar, transa::AbstractChar, diag::AbstractChar, - alpha::$elty, A::AbstractMatrix{$elty}, B::AbstractMatrix{$elty}) - chkuplo(uplo) - require_one_based_indexing(A, B) - m, n = size(B) - k = checksquare(A) - if k != (side == 'L' ? m : n) - throw(DimensionMismatch(lazy"size of A is ($k,$k), size of B is ($m,$n), side is $side, and transa='$transa'")) - end - chkstride1(A) - chkstride1(B) - ccall((@blasfunc($smname), libblastrampoline), Cvoid, - (Ref{UInt8}, Ref{UInt8}, Ref{UInt8}, Ref{UInt8}, - Ref{BlasInt}, Ref{BlasInt}, Ref{$elty}, Ptr{$elty}, - Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, - Clong, Clong, Clong, Clong), - side, uplo, transa, diag, - m, n, alpha, A, - max(1,stride(A,2)), B, max(1,stride(B,2)), - 1, 1, 1, 1) - B - end - function trsm(side::AbstractChar, uplo::AbstractChar, transa::AbstractChar, diag::AbstractChar, alpha::$elty, A::AbstractMatrix{$elty}, B::AbstractMatrix{$elty}) - trsm!(side, uplo, transa, diag, alpha, A, copy(B)) - end - end -end - -end # module - -function copyto!(dest::Array{T}, rdest::AbstractRange{Ti}, - src::Array{T}, rsrc::AbstractRange{Ti}) where {T<:BlasFloat,Ti<:Integer} - if minimum(rdest) < 1 || maximum(rdest) > length(dest) - throw(ArgumentError(lazy"range out of bounds for dest, of length $(length(dest))")) - end - if minimum(rsrc) < 1 || maximum(rsrc) > length(src) - throw(ArgumentError(lazy"range out of bounds for src, of length $(length(src))")) - end - if length(rdest) != length(rsrc) - throw(DimensionMismatch(lazy"ranges must be of the same length")) - end - GC.@preserve src dest BLAS.blascopy!( - length(rsrc), - pointer(src, minimum(rsrc)), - step(rsrc), - pointer(dest, minimum(rdest)), - step(rdest)) - - return dest -end diff --git a/stdlib/LinearAlgebra/src/bunchkaufman.jl b/stdlib/LinearAlgebra/src/bunchkaufman.jl deleted file mode 100644 index a44f1a1c99094..0000000000000 --- a/stdlib/LinearAlgebra/src/bunchkaufman.jl +++ /dev/null @@ -1,1601 +0,0 @@ -# This file is a part of Julia. License is MIT: https://julialang.org/license - -## Create an extractor that extracts the modified original matrix, e.g. -## LD for BunchKaufman, UL for CholeskyDense, LU for LUDense and -## define size methods for Factorization types using it. - -##----------- Type utilities for generic Bunch-Kaufman implementation ------------ -# Generic real type. Any real number type should able to approximate -# real numbers, and thus be closed under arithmetic operations. -# Therefore so Int, Complex{Int}, etc. are excluded. -ClosedReal = T where T <: Union{AbstractFloat, Rational} -# Similarly, we also use a closed scalar type -ClosedScalar = Union{T, Complex{T}} where T <: ClosedReal -##-------------------------------------------------------------------------------- - -""" - BunchKaufman <: Factorization - -Matrix factorization type of the Bunch-Kaufman factorization of a symmetric or -Hermitian matrix `A` as `P'UDU'P` or `P'LDL'P`, depending on whether the upper -(the default) or the lower triangle is stored in `A`. If `A` is complex symmetric -then `U'` and `L'` denote the unconjugated transposes, i.e. `transpose(U)` and -`transpose(L)`, respectively. This is the return type of [`bunchkaufman`](@ref), -the corresponding matrix factorization function. - -If `S::BunchKaufman` is the factorization object, the components can be obtained -via `S.D`, `S.U` or `S.L` as appropriate given `S.uplo`, and `S.p`. - -Iterating the decomposition produces the components `S.D`, `S.U` or `S.L` -as appropriate given `S.uplo`, and `S.p`. - -# Examples -```jldoctest -julia> A = Float64.([1 2; 2 3]) -2×2 Matrix{Float64}: - 1.0 2.0 - 2.0 3.0 - -julia> S = bunchkaufman(A) # A gets wrapped internally by Symmetric(A) -BunchKaufman{Float64, Matrix{Float64}, Vector{Int64}} -D factor: -2×2 Tridiagonal{Float64, Vector{Float64}}: - -0.333333 0.0 - 0.0 3.0 -U factor: -2×2 UnitUpperTriangular{Float64, Matrix{Float64}}: - 1.0 0.666667 - ⋅ 1.0 -permutation: -2-element Vector{Int64}: - 1 - 2 - -julia> d, u, p = S; # destructuring via iteration - -julia> d == S.D && u == S.U && p == S.p -true - -julia> S = bunchkaufman(Symmetric(A, :L)) -BunchKaufman{Float64, Matrix{Float64}, Vector{Int64}} -D factor: -2×2 Tridiagonal{Float64, Vector{Float64}}: - 3.0 0.0 - 0.0 -0.333333 -L factor: -2×2 UnitLowerTriangular{Float64, Matrix{Float64}}: - 1.0 ⋅ - 0.666667 1.0 -permutation: -2-element Vector{Int64}: - 2 - 1 -``` -""" -struct BunchKaufman{T,S<:AbstractMatrix,P<:AbstractVector{<:Integer}} <: Factorization{T} - LD::S - ipiv::P - uplo::Char - symmetric::Bool - rook::Bool - info::BlasInt - - function BunchKaufman{T,S,P}(LD, ipiv, uplo, symmetric, rook, info) where {T,S<:AbstractMatrix,P<:AbstractVector} - require_one_based_indexing(LD) - new{T,S,P}(LD, ipiv, uplo, symmetric, rook, info) - end -end -BunchKaufman(A::AbstractMatrix{T}, ipiv::AbstractVector{<:Integer}, uplo::AbstractChar, - symmetric::Bool, rook::Bool, info::BlasInt) where {T} = - BunchKaufman{T,typeof(A),typeof(ipiv)}(A, ipiv, uplo, symmetric, rook, info) -# backwards-compatible constructors (remove with Julia 2.0) -@deprecate(BunchKaufman{T,S}(LD, ipiv, uplo, symmetric, rook, info) where {T,S}, - BunchKaufman{T,S,typeof(ipiv)}(LD, ipiv, uplo, symmetric, rook, info), false) - -# iteration for destructuring into components -Base.iterate(S::BunchKaufman) = (S.D, Val(:UL)) -Base.iterate(S::BunchKaufman, ::Val{:UL}) = (S.uplo == 'L' ? S.L : S.U, Val(:p)) -Base.iterate(S::BunchKaufman, ::Val{:p}) = (S.p, Val(:done)) -Base.iterate(S::BunchKaufman, ::Val{:done}) = nothing -copy(S::BunchKaufman) = BunchKaufman(copy(S.LD), copy(S.ipiv), S.uplo, S.symmetric, S.rook, S.info) - -""" - bunchkaufman!(A, rook::Bool=false; check = true) -> BunchKaufman - -`bunchkaufman!` is the same as [`bunchkaufman`](@ref), but saves space by overwriting the -input `A`, instead of creating a copy. -""" -function bunchkaufman!(A::RealHermSymComplexSym{<:BlasReal,<:StridedMatrix}, - rook::Bool = false; check::Bool = true) - LD, ipiv, info = rook ? LAPACK.sytrf_rook!(A.uplo, A.data) : LAPACK.sytrf!(A.uplo, A.data) - check && checknonsingular(info) - BunchKaufman(LD, ipiv, A.uplo, true, rook, info) -end -function bunchkaufman!(A::Hermitian{<:BlasComplex,<:StridedMatrix}, - rook::Bool = false; check::Bool = true) - LD, ipiv, info = rook ? LAPACK.hetrf_rook!(A.uplo, A.data) : LAPACK.hetrf!(A.uplo, A.data) - check && checknonsingular(info) - BunchKaufman(LD, ipiv, A.uplo, false, rook, info) -end -function bunchkaufman!(A::StridedMatrix{<:BlasFloat}, rook::Bool = false; check::Bool = true) - if ishermitian(A) - return bunchkaufman!(Hermitian(A), rook; check = check) - elseif issymmetric(A) - return bunchkaufman!(Symmetric(A), rook; check = check) - else - throw(ArgumentError("Bunch-Kaufman decomposition is only valid for symmetric or Hermitian matrices")) - end -end - -bkcopy_oftype(A, S) = eigencopy_oftype(A, S) -bkcopy_oftype(A::Symmetric{<:Complex}, S) = Symmetric(copytrito!(similar(parent(A), S, size(A)), A.data, A.uplo), sym_uplo(A.uplo)) - -""" - bunchkaufman(A, rook::Bool=false; check = true) -> S::BunchKaufman - -Compute the Bunch-Kaufman [^Bunch1977] factorization of a symmetric or -Hermitian matrix `A` as `P'*U*D*U'*P` or `P'*L*D*L'*P`, depending on -which triangle is stored in `A`, and return a [`BunchKaufman`](@ref) object. -Note that if `A` is complex symmetric then `U'` and `L'` denote -the unconjugated transposes, i.e. `transpose(U)` and `transpose(L)`. - -Iterating the decomposition produces the components `S.D`, `S.U` or `S.L` -as appropriate given `S.uplo`, and `S.p`. - -If `rook` is `true`, rook pivoting is used. If `rook` is false, -rook pivoting is not used. - -When `check = true`, an error is thrown if the decomposition fails. -When `check = false`, responsibility for checking the decomposition's -validity (via [`issuccess`](@ref)) lies with the user. - -The following functions are available for `BunchKaufman` objects: -[`size`](@ref), `\\`, [`inv`](@ref), [`issymmetric`](@ref), -[`ishermitian`](@ref), [`getindex`](@ref). - -[^Bunch1977]: J R Bunch and L Kaufman, Some stable methods for calculating inertia and solving symmetric linear systems, Mathematics of Computation 31:137 (1977), 163-179. [url](https://www.ams.org/journals/mcom/1977-31-137/S0025-5718-1977-0428694-0/). - -# Examples -```jldoctest -julia> A = Float64.([1 2; 2 3]) -2×2 Matrix{Float64}: - 1.0 2.0 - 2.0 3.0 - -julia> S = bunchkaufman(A) # A gets wrapped internally by Symmetric(A) -BunchKaufman{Float64, Matrix{Float64}, Vector{Int64}} -D factor: -2×2 Tridiagonal{Float64, Vector{Float64}}: - -0.333333 0.0 - 0.0 3.0 -U factor: -2×2 UnitUpperTriangular{Float64, Matrix{Float64}}: - 1.0 0.666667 - ⋅ 1.0 -permutation: -2-element Vector{Int64}: - 1 - 2 - -julia> d, u, p = S; # destructuring via iteration - -julia> d == S.D && u == S.U && p == S.p -true - -julia> S.U*S.D*S.U' - S.P*A*S.P' -2×2 Matrix{Float64}: - 0.0 0.0 - 0.0 0.0 - -julia> S = bunchkaufman(Symmetric(A, :L)) -BunchKaufman{Float64, Matrix{Float64}, Vector{Int64}} -D factor: -2×2 Tridiagonal{Float64, Vector{Float64}}: - 3.0 0.0 - 0.0 -0.333333 -L factor: -2×2 UnitLowerTriangular{Float64, Matrix{Float64}}: - 1.0 ⋅ - 0.666667 1.0 -permutation: -2-element Vector{Int64}: - 2 - 1 - -julia> S.L*S.D*S.L' - A[S.p, S.p] -2×2 Matrix{Float64}: - 0.0 0.0 - 0.0 0.0 -``` -""" -bunchkaufman(A::AbstractMatrix{T}, rook::Bool=false; check::Bool = true) where {T} = - bunchkaufman!(bkcopy_oftype(A, typeof(sqrt(oneunit(T)))), rook; check = check) - -BunchKaufman{T}(B::BunchKaufman) where {T} = - BunchKaufman(convert(Matrix{T}, B.LD), B.ipiv, B.uplo, B.symmetric, B.rook, B.info) -Factorization{T}(B::BunchKaufman) where {T} = BunchKaufman{T}(B) - -size(B::BunchKaufman) = size(getfield(B, :LD)) -size(B::BunchKaufman, d::Integer) = size(getfield(B, :LD), d) -issymmetric(B::BunchKaufman) = B.symmetric -ishermitian(B::BunchKaufman{T}) where T = T<:Real || !B.symmetric - -function _ipiv2perm_bk(v::AbstractVector{T}, maxi::Integer, uplo::AbstractChar, rook::Bool) where T - require_one_based_indexing(v) - p = T[1:maxi;] - uploL = uplo == 'L' - i = uploL ? 1 : maxi - # if uplo == 'U' we construct the permutation backwards - @inbounds while 1 <= i <= length(v) - vi = v[i] - if vi > 0 # the 1x1 blocks - p[i], p[vi] = p[vi], p[i] - i += uploL ? 1 : -1 - else # the 2x2 blocks - if rook - p[i], p[-vi] = p[-vi], p[i] - end - if uploL - vp = rook ? -v[i+1] : -vi - p[i + 1], p[vp] = p[vp], p[i + 1] - i += 2 - else # 'U' - vp = rook ? -v[i-1] : -vi - p[i - 1], p[vp] = p[vp], p[i - 1] - i -= 2 - end - end - end - return p -end - -function getproperty(B::BunchKaufman{TS}, - d::Symbol) where TS <: ClosedScalar{TR} where TR <: ClosedReal - n = size(B, 1) - if d === :p - return _ipiv2perm_bk(getfield(B, :ipiv), n, getfield(B, :uplo), B.rook) - elseif d === :P - return Matrix{TS}(I, n, n)[:,invperm(B.p)] - elseif d === :L || d === :U || d === :D - if d === :D - _, od, md = generic_syconv(B, false) - elseif typeof(B) <: BunchKaufman{T,<:StridedMatrix} where {T<:BlasFloat} - # We use LAPACK whenever we can - if getfield(B, :rook) - LUD, _ = LAPACK.syconvf_rook!(getfield(B, :uplo), 'C', - copy(getfield(B, :LD)), getfield(B, :ipiv)) - else - LUD, _ = LAPACK.syconv!(getfield(B, :uplo), copy(getfield(B, :LD)), - getfield(B, :ipiv)) - end - else - LUD, _ = generic_syconv(B) - end - if d === :D - if getfield(B, :uplo) == 'L' - odl = od[1:n - 1] - return Tridiagonal(odl, md, getfield(B, :symmetric) ? odl : conj.(odl)) - else # 'U' - odu = od[2:n] - return Tridiagonal(getfield(B, :symmetric) ? odu : conj.(odu), md, odu) - end - elseif d === :L - if getfield(B, :uplo) == 'L' - return UnitLowerTriangular(LUD) - else - throw(ArgumentError("factorization is U*D*U' but you requested L")) - end - else # :U - if B.uplo == 'U' - return UnitUpperTriangular(LUD) - else - throw(ArgumentError("factorization is L*D*L' but you requested U")) - end - end - else - getfield(B, d) - end -end - -Base.propertynames(B::BunchKaufman, private::Bool=false) = - (:p, :P, :L, :U, :D, (private ? fieldnames(typeof(B)) : ())...) - -function Base.:(==)(B1::BunchKaufman, B2::BunchKaufman) - # check for the equality between properties instead of fields - B1.p == B2.p || return false - if B1.uplo == 'L' - B1.L == B2.L || return false - else - B1.U == B2.U || return false - end - return (B1.D == B2.D) -end - -function getproperties!(B::BunchKaufman{T,<:StridedMatrix}) where {T<:BlasFloat} - # NOTE: Unlike in the 'getproperty' function, in this function L/U and D are computed in place. - if B.rook - LUD, od = LAPACK.syconvf_rook!(B.uplo, 'C', B.LD, B.ipiv) - else - LUD, od = LAPACK.syconv!(B.uplo, B.LD, B.ipiv) - end - if B.uplo == 'U' - M = UnitUpperTriangular(LUD) - du = od[2:end] - # Avoid aliasing dl and du. - dl = B.symmetric ? du : conj.(du) - else - M = UnitLowerTriangular(LUD) - dl = od[1:end-1] - # Avoid aliasing dl and du. - du = B.symmetric ? dl : conj.(dl) - end - return (M, Tridiagonal(dl, diag(LUD), du), B.p) -end - -issuccess(B::BunchKaufman) = B.info == 0 - -function adjoint(B::BunchKaufman) - if ishermitian(B) - return B - else - throw(ArgumentError("adjoint not implemented for complex symmetric matrices")) - end -end - -function Base.show(io::IO, mime::MIME{Symbol("text/plain")}, B::BunchKaufman) - if issuccess(B) - summary(io, B); println(io) - println(io, "D factor:") - show(io, mime, B.D) - println(io, "\n$(B.uplo) factor:") - show(io, mime, B.uplo == 'L' ? B.L : B.U) - println(io, "\npermutation:") - show(io, mime, B.p) - else - print(io, "Failed factorization of type $(typeof(B))") - end -end - -function inv(B::BunchKaufman{<:BlasReal,<:StridedMatrix}) - if B.rook - copytri!(LAPACK.sytri_rook!(B.uplo, copy(B.LD), B.ipiv), B.uplo, true) - else - copytri!(LAPACK.sytri!(B.uplo, copy(B.LD), B.ipiv), B.uplo, true) - end -end - -function inv(B::BunchKaufman{<:BlasComplex,<:StridedMatrix}) - if issymmetric(B) - if B.rook - copytri!(LAPACK.sytri_rook!(B.uplo, copy(B.LD), B.ipiv), B.uplo) - else - copytri!(LAPACK.sytri!(B.uplo, copy(B.LD), B.ipiv), B.uplo) - end - else - if B.rook - copytri!(LAPACK.hetri_rook!(B.uplo, copy(B.LD), B.ipiv), B.uplo, true) - else - copytri!(LAPACK.hetri!(B.uplo, copy(B.LD), B.ipiv), B.uplo, true) - end - end -end - -function ldiv!(B::BunchKaufman{T,<:StridedMatrix}, R::StridedVecOrMat{T}) where {T<:BlasReal} - if B.rook - LAPACK.sytrs_rook!(B.uplo, B.LD, B.ipiv, R) - else - LAPACK.sytrs!(B.uplo, B.LD, B.ipiv, R) - end -end -function ldiv!(B::BunchKaufman{T,<:StridedMatrix}, R::StridedVecOrMat{T}) where {T<:BlasComplex} - if B.rook - if issymmetric(B) - LAPACK.sytrs_rook!(B.uplo, B.LD, B.ipiv, R) - else - LAPACK.hetrs_rook!(B.uplo, B.LD, B.ipiv, R) - end - else - if issymmetric(B) - LAPACK.sytrs!(B.uplo, B.LD, B.ipiv, R) - else - LAPACK.hetrs!(B.uplo, B.LD, B.ipiv, R) - end - end -end - -function logabsdet(F::BunchKaufman) - M = F.LD - p = F.ipiv - n = size(F.LD, 1) - - if !issuccess(F) - return eltype(F)(-Inf), zero(eltype(F)) - end - s = one(real(eltype(F))) - i = 1 - abs_det = zero(real(eltype(F))) - while i <= n - if p[i] > 0 - elm = M[i,i] - s *= sign(elm) - abs_det += log(abs(elm)) - i += 1 - else - # 2x2 pivot case. Make sure not to square before the subtraction by scaling - # with the off-diagonal element. This is safe because the off diagonal is - # always large for 2x2 pivots. - if F.uplo == 'U' - elm = M[i, i + 1]*(M[i,i]/M[i, i + 1]*M[i + 1, i + 1] - - (issymmetric(F) ? M[i, i + 1] : conj(M[i, i + 1]))) - s *= sign(elm) - abs_det += log(abs(elm)) - else - elm = M[i + 1,i]*(M[i, i]/M[i + 1, i]*M[i + 1, i + 1] - - (issymmetric(F) ? M[i + 1, i] : conj(M[i + 1, i]))) - s *= sign(elm) - abs_det += log(abs(elm)) - end - i += 2 - end - end - return abs_det, s -end - -## reconstruct the original matrix -## TODO: understand the procedure described at -## https://www.nag.com/numeric/FL/nagdoc_fl22/pdf/F07/f07mdf.pdf - - -##-------------------------------------------------------------------------- -##------------- Start of generic Bunch-Kaufman Implementation -------------- -##-------------------------------------------------------------------------- - -export inertia - -function arg_illegal(fun_name::AbstractString, - info::Integer, - waer::AbstractChar) - if waer == 'W' - @warn " ** On entry to '$(fun_name)' parameter number " * - "$(info) had an illegal value" - else - error(" ** On entry to '$(fun_name)' parameter number " * - "$(info) had an illegal value") - end -end - - -function cabs1(z::T) where T <: Complex - return abs(real(z)) + abs(imag(z)) -end - - -function cabsr(z::T) where T <: Complex - return abs(real(z)) -end - - -""" -generic_adr1!(uplo, alpha, x, y, A, syhe) -> nothing - -`generic_adr1!` performs the following adjoint (symmetric or Hermitian) -rank 1 operation - -`A[1:K,1:L] = alpha*x*y' + A[1:K,1:L]` - -in-place, where `alpha` is a scalar, `x` is a K element vector, `y` -is an L element vector and `A` is an `NxM` matrix. Note that `y'` can -denote either the transpose, i.e. `transpose(y)` or the conjugate -transpose , i.e. `adjoint(y)`. - -`uplo` is a character, either `'U'`, `'L'` or `'F'`, indicating whether -the matrix is stored in the upper triangular part (`uplo=='U'`), the -lower triangular part (`uplo=='L'`), or the full storage space is used -(`uplo=='F'`). If `uplo!='F'` then only the corresponding triangular -part is updated. The values `'U'` or `'L'` can only be used when A is -square (`N==M`). - -`syhe` is a character, either `'S'` or `'H'`, indicating whether the -symmetric adjoint (`syhe=='S'`, and `y'==transpose(y)`) or the hermitian -adjoint (`syhe=='H'`, and `y'==adjoint(y)`) must be used. -""" -function generic_adr1!(uplo::AbstractChar, - alpha::ClosedScalar{TR}, - x::AbstractVector{TS}, - y::AbstractVector{TS}, - A::AbstractMatrix{TS}, - syhe::AbstractChar - ) where TS <: ClosedScalar{TR} where TR <: ClosedReal - - # Inputs must be 1-indexed; bounds may not be checked. - Base.require_one_based_indexing(x, A) - - # Check argument validity - K = length(x) - L = length(y) - N, M = size(A) - info = 0::BlasInt - if (uplo != 'U' && uplo != 'L' && uplo != 'F') || (uplo != 'F' && N != M) - info = (-1)::BlasInt - elseif K > N - info = (-3)::BlasInt - elseif L > M - info = (-4)::BlasInt - elseif syhe != 'S' && syhe != 'H' - info = (-6)::BlasInt - end - if info < 0 - arg_illegal("generic_sadr1!", -info, 'E') - end - - # Load the requested adjoining operator - adj_op = syhe == 'S' ? identity : conj - - # Define loop range function according to the type of storage - # TODO: can we adjust the range without anonymous functions, - # but without having to write the same code thrice? - i_range = uplo == 'F' ? _ -> (1:K) : uplo == 'U' ? j -> (1:min(j,K)) : j -> (j:K) - - # Compute rank update of A - for j in 1:L; @inbounds begin - if y[j] != 0 - temp = alpha * adj_op(y[j]) - for i in i_range(j) - A[i,j] += x[i] * temp - end - end - end; end - return -end - - -""" -generic_mvpv!(trans, alpha, A, x, beta, y) -> nothing - -`generic_mvpv!` performs the following matrix-vector operation: - -`y[1:K] = alpha*A'*x[1:L] + beta*y[1:K]` - -in-place, where `alpha` and `beta` are scalars, `x` is a vector with at -least L elements, `y` is a vector with at least K elements, and `A` is -an `NxM` matrix. `A'` can denote the transpose, i.e. `transpose(A)` or -the conjugate transpose, i.e. `adjoint(A)`, and then `M==K && N==L`. -`A'` can also denote no adjoining at all, i.e. `A'==A`, and then -`N==K && M==L`. - -`trans` is a character, either `'T'`, `'C'` or `'N'`, indicating whether -`A'=transpose(A)`, `A'=adjoint(A)` or `A'=A`, respectively. -""" -function generic_mvpv!(trans::AbstractChar, - alpha::ClosedScalar{TR}, - A::AbstractMatrix{TS}, - x::AbstractVector{TS}, - beta::ClosedScalar{TR}, - y::AbstractVector{TS}, - ) where TS <: ClosedScalar{TR} where TR <: ClosedReal - - # Inputs must be 1-indexed; bounds may not be checked. - Base.require_one_based_indexing(A, x, y) - - # Check argument validity - M, N = size(A) - K = trans == 'N' ? M : N - L = trans == 'N' ? N : M - info = 0::BlasInt - if trans != 'T' && trans != 'C' && trans != 'N' - info = (-1)::BlasInt - elseif length(y) < K - info = (-3)::BlasInt - elseif length(x) < L - info = (-4)::BlasInt - end - if info < 0 - arg_illegal("generic_sadr1!", -info, 'E') - end - - # Quick return if possible. - if K == 0 || (alpha == 0 && beta == 1); return; end - - # Start the operations. In this version the elements of A are - # accessed sequentially with one pass through A. - # First form y := beta*y. - @inbounds begin - if beta != 1 - if beta == 0 - # Way less allocations and way faster for BigFloat. - # For Float64 there is some (acceptable IMO) performance loss. - y[1:K] .= 0 - else - for i in 1:K; y[i] *= beta; end - end - end - if alpha == 0 || L == 0; return; end - - if trans == 'N' - # Form y := alpha*A*x + y. - for j in 1:L - # Faster than a loop - axpy!(alpha*x[j], view(A, 1:K, j), view(y, 1:K)) - end - else - # Form y := alpha*A**T*x + y or y := alpha*A**H*x + y. - noconj = (trans == 'T') - for i = 1:K - temp = 0 - if noconj - for j = 1:L - temp = temp + A[j,i]*x[j] - end - else - for j = 1:L - temp = temp + conj(A[j,i])*x[j] - end - end - y[i] += alpha*temp - end - end - end - return -end - - -""" -bk_rowcol_swap!(A, k, kp, kstep, upper, herm) -> did_swap::Bool - -Performs the row and column interchange of the Bunch-Kaufman factorization. -If `upper==true` then the rows and columns `kp` of `A[1:k,1:k]` are -interchanged with either rows and columns `k` or `k-1` of `A[1:k,1:k]`, -depending on whether `kstep==1` or `kstep==2`, respectively. If -`upper==false` then the rows and columns `kp-k+1` of `A[k:N,k:N]` are -interchanged with either rows and columns `1` or `2` of `A[k:N,k:N]`, -depending on whether `kstep==1` or `kstep==2`, respectively. `herm=true` -then it is assumed that `A` is Hermitian, and conjugation is applied to -the appropriate entries of the interchanged rows and columns. If -`herm=false` no conjugation is performed. - -This is an internal helper function for the main Bunch-Kaufman -factorization function, `generic_bunchkaufman!`. As such, validity of the -input values is not verified. -""" -function bk_rowcol_swap!( - A::AbstractMatrix{TS}, - k::Integer, - kp::Integer, - kstep::Integer, - upper::Bool, - herm::Bool - ) where TS <: ClosedScalar{TR} where TR <: ClosedReal - kk = upper ? k - kstep + 1 : k + kstep - 1 - if kp != kk - if kp > 1 - thisview = upper ? view(A, 1:(kp-1), :) : view(A, (kp+1):size(A,1), :) - Base.swapcols!(thisview, kp, kk) - end - thisrange = upper ? ((kp+1):(kk-1)) : ((kk+1):(kp-1)) - if !herm - # Real/complex symmetric case - for j in thisrange - A[j,kk], A[kp,j] = A[kp,j], A[j,kk] - end - A[kk,kk], A[kp,kp] = A[kp,kp], A[kk,kk] - else - # Hermitian case - for j in thisrange - A[j,kk], A[kp,j] = conj(A[kp,j]), conj(A[j,kk]) - end - A[kp,kk] = conj(A[kp,kk]) - A[kk,kk], A[kp,kp] = real(A[kp,kp]), real(A[kk,kk]) - end - if kstep == 2 - if herm - # Force diagonal entry to be purely real - A[k,k] = real(A[k,k]) - end - if upper - A[k-1,k], A[kp,k] = A[kp,k], A[k-1,k] - else - A[k+1,k], A[kp,k] = A[kp,k], A[k+1,k] - end - end - return true - else - return false - end -end - - -""" -generic_bunchkaufman!(uplo, A, syhe, rook::Bool=false) -> -LD<:AbstractMatrix, ipiv<:AbstractVector{Integer}, info::BlasInt - -Computes the Bunch-Kaufman factorization of a symmetric or Hermitian -matrix `A` of size `NxN` as `P'*U*D*U'*P` or `P'*L*D*L'*P`, depending on -which triangle is stored in `A`. Note that if `A` is complex symmetric -then `U'` and `L'` denote the unconjugated transposes, i.e. -`transpose(U)` and `transpose(L)`. The resulting `U` or `L` and D are -stored in-place in `A`, LAPACK style. `LD` is just a reference to `A` -(that is, `LD===A`). `ipiv` stores the permutation information of the -algorithm in LAPACK format. `info` indicates whether the factorization -was successful and non-singular when `info==0`, or else `info` takes a -different value. The outputs `LD`, `ipiv`, `info` follow the format of -the LAPACK functions of the Bunch-Kaufman factorization (`dsytrf`, -`csytrf`, `chetrf`, etc.), so this function can (ideally) be used -interchangeably with its LAPACK counterparts `LAPACK.sytrf!`, -`LAPACK.sytrf_rook!`, etc. - -`uplo` is a character, either `'U'` or `'L'`, indicating whether the -matrix is stored in the upper triangular part (`uplo=='U'`) or in the -lower triangular part (`uplo=='L'`). - -`syhe` is a character, either `'S'` or `'H'`, indicating whether the -matrix is real/complex symmetric (`syhe=='S'`, and the symmetric -Bunch-Kaufman factorization is performed) or complex hermitian -(`syhe=='H'`, and the hermitian Bunch-Kaufman factorization is -performed). - -If `rook` is `true`, rook pivoting is used (also called bounded -Bunch-Kaufman factorization). If `rook` is `false`, rook pivoting is -not used (standard Bunch-Kaufman factorization). Rook pivoting can -require up to `~N^3/6` extra comparisons in addition to the `~N^3/3` -additions and `~N^3/3` multiplications of the standard Bunch-Kaufman -factorization. However, rook pivoting guarantees that the entries of -`U` or `L` are bounded. - -This function implements the factorization algorithm entirely in -native Julia, so it supports any number type representing real or -complex numbers. -""" -function generic_bunchkaufman!( - uplo::AbstractChar, - A::AbstractMatrix{TS}, - syhe::AbstractChar, - rook::Bool=false - ) where TS <: ClosedScalar{TR} where TR <: ClosedReal - - # Inputs must be 1-indexed; bounds may not be checked. - Base.require_one_based_indexing(A) - - # Initialize info integer as 0 - info = 0::BlasInt - # Get size of matrix - N, N2 = size(A) - # Initialize permutation vector - ipiv = Vector{BlasInt}(undef, N) - - # Check input correctness - if uplo != 'U' && uplo != 'L' - info = (-1)::BlasInt - elseif N != N2 - info = (-2)::BlasInt - elseif syhe != 'S' && syhe != 'H' - info = (-3)::BlasInt - end - if info < 0 - arg_illegal("generic_bunchkaufman!", -info, 'W') - return A, ipiv, info - end - # if rook - # error("Rook pivoting not implemented yet.") - # end - - # Initialize `alpha` for use in choosing pivot block size. - # The exact value is - # (1 + sqrt(17)) / 8 ~= 0.6404 - # For rational matrices we a the small denominator approximation: - # 16/25 = 0.64 ~= (1 + sqrt(17)) / 8 - # in order to not increase the denominator size too much in computations. - # The error of this approximation is ≤0.1%, and it still guarantees that a - # 2x2 block in the D factor has a positive-negative eigenvalue pair, as long - # as the approximation lies in (0,1). - alpha = TR <: AbstractFloat ? (1 + sqrt(TR(17))) / 8 : TR(16//25) - # Use complex 1-norm for pivot selection, as in LAPACK - abs1_fun = TS <: Real ? abs : cabs1 - - # Check if the matrix is symmetric of hermitian - if syhe == 'S' || (syhe == 'H' && TS <: Real) - # Use symmetric variant if matrix is real, regardless of 'syhe' value - syhe = 'S' - diag_abs_fun = abs1_fun - else - diag_abs_fun = cabsr - end - - # Compute machine safe minimum when working with floating point numbers. - # LAPACK doesn't use this for diagonal pivoting though... - if rook - if TR <: AbstractFloat - # eps(0) gives the smallest subnormal number, and eps(1) gives the floating - # point type epsilon. eps(0)/eps(1) gives the smallest normal number, plus - # possibly some rounding error. - sfmin = nextfloat(eps(TR(0)) / eps(TR(1)), 2) - small = 1 / prevfloat(typemax(TR), 2) - if small >= sfmin - # 1/sfmin may overflow, so use 'small' plus a bit as the safe minimum - sfmin = nextfloat(small * (1 + eps(TR(1))), 2) - end - else - # We're working with rationals in this case, so the all results are exact. - sfmin = TR(0) - end - end - - # Run factorization depending on where the data is stored - upper = (uplo == 'U') - herm = (syhe == 'H') - # TODO: Is this gonna inline properly? - @inline k_cond = upper ? k -> k >= 1 : k -> k <= N - @inline irange = upper ? j -> (j:-1:1) : j -> (j:N) - @inline conj_op = herm ? conj : identity - @inline diagreal_op = herm ? (j -> A[j,j] = TS(real(A[j,j]))) : _ -> () - k = upper ? N : 1 - # Main loop, comments refer to the upper triangular version of the factorization. - # The lower triangular version is analogous. - while k_cond(k); @inbounds begin - kstep = 1 - knext = upper ? k - 1 : k + 1 - p = k - # Determine rows and columns to be interchanged and whether - # a 1-by-1 or 2-by-2 pivot block will be used - absakk = diag_abs_fun(A[k,k]) - # IMAX is the row-index of the largest off-diagonal element in - # column K, and COLMAX is its absolute value. - # Determine both COLMAX and IMAX. - if upper && k > 1 - colmax, imax = findmax(abs1_fun, view(A, 1:(k-1), k)) - elseif (!upper) && k < N - colmax, imax = findmax(abs1_fun, view(A, (k+1):N, k)) - imax += k - else - colmax = 0 - end - if (max(absakk, colmax) == 0) || isnan(absakk) - # Column K is zero or underflow, or contains a NaN: - # set INFO and continue - if info == 0 - info = k::BlasInt - end - kp = k - if herm - # Force diagonal entry to be purely real - A[k,k] = real(A[k,k]) - end - else - if absakk >= alpha*colmax - # no interchange, use 1-by-1 pivot block - kp = k - elseif rook - # Loop until pivot found - while true - # Begin pivot search loop body - # JMAX is the column-index of the largest off-diagonal - # element in row IMAX, and ROWMAX is its absolute value. - # Determine both ROWMAX and JMAX. - if imax != k - thisview = upper ? view(A, imax, (imax+1):k) : - view(A, imax, k:(imax-1)) - rowmax, jmax = findmax(abs1_fun, thisview) - jmax += upper ? imax : k - 1 - else - # LAPACK makes rowmax=0 in this case, but I believe it's - # better to make rowmax=-1, so that we guarantee that jmax - # will be define in the next if-block. - # TODO: is this correct/safe? - rowmax = 0 - end - if (upper && imax > 1) || ((!upper) && imax < N) - # Remember that we only have the upper triangular part - # of the matrix. We inspect the part of the row in the - # lower triangular part by traversing the corresponding - # part of the transpose column. - if upper - stemp, itemp = findmax(abs1_fun, view(A, 1:(imax-1), imax)) - else - stemp, itemp = findmax(abs1_fun, view(A, (imax+1):N, imax)) - itemp += imax - end - if stemp > rowmax - rowmax = stemp - jmax = itemp - end - end - # Equivalent to testing for (used to handle NaN and Inf) - # CABS1( A( IMAX, IMAX ) ).GE.ALPHA*ROWMAX - if !(diag_abs_fun(A[imax,imax]) < alpha*rowmax) - # interchange rows and columns K and IMAX, - # use 1-by-1 pivot block - kp = imax - break - # Equivalent to testing for ROWMAX .EQ. COLMAX, - # used to handle NaN and Inf - elseif (p == jmax || rowmax <= colmax) - # interchange rows and columns K+1 and IMAX, - # use 2-by-2 pivot block - kp = imax - kstep = 2 - break - else - # Pivot NOT found, set variables and repeat - p = imax - colmax = rowmax - imax = jmax - end - # End pivot search loop body - end - else - # JMAX is the column-index of the largest off-diagonal - # element in row IMAX, and ROWMAX is its absolute value - # We don't really need JMAX, se we don't store it - thisview = upper ? view(A, imax, (imax+1):k) : view(A, imax, k:(imax-1)) - rowmax = findmax(abs1_fun, thisview)[1] - if (upper && imax > 1) || ((!upper) && imax < N) - # Remember that we only have the upper triangular part - # of the matrix. We inspect the part of the row in the - # lower triangular part by traversing the corresponding - # part of the transpose column. - thisview = upper ? view(A, 1:(imax-1), imax) : - view(A, (imax+1):N, imax) - rowmax = max(rowmax, findmax(abs1_fun, thisview)[1]) - end - if absakk >= alpha * colmax * (colmax/rowmax) - # no interchange, use 1-by-1 pivot block - kp = k - elseif diag_abs_fun(A[imax,imax]) >= alpha * rowmax - # interchange rows and columns K and IMAX, use 1-by-1 - # pivot block - kp = imax - else - # interchange rows and columns K-1 and IMAX, use 2-by-2 - # pivot block - kp = imax - p = imax - kstep = 2 - end - end - # Swap TWO rows and TWO columns - # First swap - # The first swap only needs to be done when using rook pivoting - if rook && kstep == 2 - # Interchange rows and column K and P in the leading - # submatrix A(1:k,1:k) if we have a 2-by-2 pivot - bk_rowcol_swap!(A, k, p, 1, upper, herm) - end - # Second swap - did_swap = bk_rowcol_swap!(A, k, kp, kstep, upper, herm) - if herm && (!did_swap) - # Force diagonal entries to be purely real - A[k,k] = real(A[k,k]) - if kstep == 2 - A[knext,knext] = real(A[knext,knext]) - end - end - if kstep == 1 - # 1-by-1 pivot block D(k): column k now holds - # W(k) = U(k)*D(k) - # where U(k) is the k-th column of U - # When rook=false, sfmin is not defined, but the short-circuit - # evaluation of the conditional avoids an error. - if (!rook) || absakk >= sfmin - # Perform a rank-1 update of A(1:k-1,1:k-1) as - # A := A - U(k)*D(k)*U(k)' = A - W(k)*1/D(k)*W(k)' - # Compute 1/D(k) - r1 = !herm ? 1 / A[k,k] : 1 / real(A[k,k]) - # Perform rank-1 update to store the Schur complement - # in a submatrix of A - x = upper ? view(A, 1:(k-1), k) : view(A, (k+1):N, k) - # if 'upper' this should assign by reference - thisview = upper ? A : view(A, (k+1):N, (k+1):N) - generic_adr1!(uplo, -r1, x, x, thisview, syhe) - # Store U(k) in column k - thisrange = upper ? (1:(k-1)) : ((k+1):N) - for i in thisrange - A[i,k] *= r1 - end - else - # Compute D(k) - r1 = !herm ? A[k,k] : real(A[k,k]) - # Store U(k) in column k - thisrange = upper ? (1:(k-1)) : ((k+1):N) - for i in thisrange - A[i,k] /= r1 - end - # Perform a rank-1 update of A(k+1:n,k+1:n) as - # A := A - U(k)*D(k)*U(k)**T - # = A - W(k)*(1/D(k))*W(k)**T - # = A - (W(k)/D(k))*(D(k))*(W(k)/D(K))**T - # Perform rank-1 update to store the Schur complement - # in a submatrix of A - x = upper ? view(A, 1:(k-1), k) : view(A, (k+1):N, k) - # if 'upper' this should assign by reference - thisview = upper ? A : view(A, (k+1):N, (k+1):N) - generic_adr1!(uplo, -r1, x, x, thisview, syhe) - end - elseif (upper && k > 2) || ((!upper) && k < N - 1) - # 2-by-2 pivot block D(k): columns k and k-1 now hold - # ( W(k-1) W(k) ) = ( U(k-1) U(k) )*D(k) - # where U(k) and U(k-1) are the k-th and (k-1)-th columns - # of U - # Perform a rank-2 update of A(1:k-2,1:k-2) as - # A := A - ( U(k-1) U(k) )*D(k)*( U(k-1) U(k) )' - # = A - ( W(k-1) W(k) )*inv(D(k))*( W(k-1) W(k) )' - thisrange = upper ? ((k-2):-1:1) : ((k+2):N) - if !herm - # Real/complex symmetric case - #TODO: is this way to compute the inverse backward stable? - # (it probably is as it comes from LAPACK) - dxk = A[knext,k] - dxx = A[knext,knext] / dxk - dkk = A[k,k] / dxk - t = 1 / (dkk * dxx - 1) - dxk = t / dxk - dkx = dxk - else - # Hermitian case - # TODO: is this way to compute the inverse backward stable? - # (it probably is as it is a small modification of LAPACK's - # method) - dxk = A[knext,k] - dxx = real(A[knext,knext]) / dxk - dkk = real(A[k,k]) / conj(dxk) - t = 1 / (real(dkk * dxx) - 1) - dkx = t / conj(dxk) - dxk = t / dxk - end - for j in thisrange - wknext = dxk * (dkk*A[j,knext] - A[j,k]) - wk = dkx * (dxx*A[j,k] - A[j,knext]) - for i in irange(j) - A[i,j] -= (A[i,k]*conj_op(wk) + A[i,knext]*conj_op(wknext)) - end - A[j,k] = wk - A[j,knext] = wknext - # Force diagonal entry to be purely real, but still of - # complex type TS (I don't know why in LAPACK this - # case, unlike the rest, enforces a complex type - # explicitly). - diagreal_op(j) - end - end - end - # Store details of the interchanges in IPIV - if kstep == 1 - ipiv[k] = kp - else - ipiv[k] = -p - ipiv[knext] = -kp - end - # Decrease K and return to the start of the main loop - # k -= upper ? kstep : -kstep - if upper; k -= kstep; else; k += kstep; end - end; end - return A, ipiv, info -end - - -""" -generic_syconv(F, gettri::Bool=true) -> -(TLU<:Union{AbstractMatrix,Nothing}, e<:AbstractVector, - d<:Union{AbstractVector,Nothing}) - -`generic_syconv` takes the Bunch-Kaufman object `F` and returns the -block-diagonal factor `D`, and the triangular factor `L` (or `U`) if -requested. If the `L` or `U` factor is requested then both `L` (or `U`) and -the main diagonal of `D` will be stored in `TLU`, following LAPACK format, -and `d` will be set to `nothing`. `e` contains the first subdiagonal of -`D`. If the triangular factor is not requested, then `TLU` will not be set -to `nothing`, and the main diagonal of `D` will be stored in `d`. - -`gettri` is a `Bool`, indicating whether the `L` (or `U`) triangular factor -should be computed (`gettri==true`) or not (`gettri==false`). If the -triangular factor is required, a copy of `A.LD` will be created, and the -triangular factor will be computed in-place in said copy. -""" -function generic_syconv( - F::BunchKaufman{TS}, - gettri::Bool=true - ) where TS <: ClosedScalar{TR} where TR <: ClosedReal - - # Inputs must be 1-indexed; bounds may not be checked. - Base.require_one_based_indexing(F.LD, F.ipiv) - - # Extract necessary variables - A, ipiv, rook = gettri ? deepcopy(F.LD) : F.LD, F.ipiv, F.rook - - # Get size of matrix - N = size(A)[1] - - # Initialize off-diagonal and diagonal vector - e = Vector{TS}(undef, N) - d = gettri ? nothing : diag(A, 0) - - # Quick return if possible - if N == 0; return gettri ? A : nothing, e, d; end - - # Main loops - upper = (F.uplo == 'U') - @inline icond_d = upper ? i -> i > 1 : i -> i < N - @inline icond_T = upper ? i -> i >= 1 : i -> i <= N - @inline inext = upper ? i -> i - 1 : i -> i + 1 - # Convert VALUE - i = upper ? N : 1 - e[N+1-i] = 0 - while icond_d(i); @inbounds begin - if ipiv[i] < 0 - ix = inext(i) - e[i] = A[ix,i] - e[ix] = 0 - if gettri; A[ix,i] = 0; end - if upper; i -= 1; else; i += 1; end - else - e[i] = 0 - end - if upper; i -= 1; else; i += 1; end - end; end - # Convert PERMUTATIONS - if gettri - i = upper ? N : 1 - while icond_T(i); @inbounds begin - thisview = upper ? view(A, :, (i+1):N) : view(A, :, 1:(i-1)) - ip = ipiv[i] - if ip > 0 || rook - Base.swaprows!(thisview, abs(ip), i) - end - if ip <= 0 - ix = inext(i) - Base.swaprows!(thisview, -ipiv[ix], ix) - if upper; i -= 1; else; i += 1; end - end - if upper; i -= 1; else; i += 1; end - end; end - end - return gettri ? A : nothing, e, d -end - - -""" -generic_bksolve!(F, B) -> X<:AbstractVecOrMat - -`generic_bksolve!` solves a system of linear equations `A*X = B` where -the Bunch-Kaufman factorization of `A` is provided by `F`. -""" -function generic_bksolve!( - F::BunchKaufman{TS}, - B0::AbstractVecOrMat{TS}, - ) where TS <: ClosedScalar{TR} where TR <: ClosedReal - - # Inputs must be 1-indexed; bounds may not be checked. - Base.require_one_based_indexing(F.LD, F.ipiv, B0) - - # Get size of matrices - N = size(F.LD)[1] - if typeof(B0) <: AbstractVector - N3 = size(B0)[1] - M = 1 - B = view(B0, :, :) - else - N3, M = size(B0) - B = B0 - end - - # Initialize info integer as 0 - info = 0::BlasInt - - # Check input correctness - if N3 != N - info = (-2)::BlasInt - end - if info < 0 - arg_illegal("generic_bksolve!", -info, 'E') - end - - # Quick return if possible - if N == 0 || M == 0; return B; end - - # Extract necessary variables - A, ipiv, symm, rook = F.LD, F.ipiv, issymmetric(F), F.rook - - # Load the requested adjoining operator - adj_op = symm ? identity : conj - - R1 = TR(1) - upper = (F.uplo == 'U') - @inline kcond1 = upper ? k -> k >= 1 : k -> k <= N - @inline kcond2 = upper ? k -> k <= N : k -> k >= 1 - @inline knext = upper ? k -> k - 1 : k -> k + 1 - @inline knext2 = upper ? k -> k + 1 : k -> k - 1 - k = upper ? N : 1 - while kcond1(k); @inbounds begin - kp = ipiv[k] - if kp > 0 - # 1 x 1 diagonal block - # Interchange rows K and IPIV(K). - Base.swaprows!(B, k, kp) - # Multiply by inv(U(K)), where U(K) is the transformation - # stored in column K of A. - Aview = upper ? view(A, 1:(k-1), k) : view(A, (k+1):N, k) - Bview = upper ? B : view(B, (k+1):N, :) - generic_adr1!('F', -R1, Aview, view(B, k, :), Bview, 'S') - # Multiply by the inverse of the diagonal block. - s = symm ? 1 / A[k,k] : 1 / real(A[k,k]) - for j in 1:M; B[k,j] *= s; end - if upper; k -= 1; else; k += 1; end - else - # 2 x 2 diagonal block - # Interchange rows K and -IPIV(K) THEN K-1 and -IPIV(K-1) - # The first interchange is only needed when rook pivoting is used - if rook; Base.swaprows!(B, k, -kp); end - kx = knext(k) - Base.swaprows!(B, kx, -ipiv[kx]) - # Multiply by inv(U(K)), where U(K) is the transformation - # stored in columns K-1 and K of A. - Aview = upper ? view(A, 1:(k-2), k) : view(A, (k+2):N, k) - Bview = upper ? B : view(B, (k+2):N, :) - generic_adr1!('F', -R1, Aview, view(B, k, :), Bview, 'S') - Aview = upper ? view(A, 1:(k-2), kx) : view(A, (k+2):N, kx) - generic_adr1!('F', -R1, Aview, view(B, kx, :), Bview, 'S') - # Multiply by the inverse of the diagonal block. - axk = A[kx,k] - axx = A[kx,kx] / axk - akk = A[k,k] / adj_op(axk) - denom = axx*akk - 1 - for j in 1:M - bx = B[kx,j] / axk - bk = B[k,j] / adj_op(axk) - B[kx,j] = (akk*bx - bk) / denom - B[k,j] = (axx*bk - bx) / denom - end - if upper; k -= 2; else; k += 2; end - end - end; end - # Next solve U'*X = B, overwriting B with X. - # K is the main loop index, increasing from 1 to N in steps of - # 1 or 2, depending on the size of the diagonal blocks. - k = upper ? 1 : N - while kcond2(k); @inbounds begin - Aview = upper ? view(A, 1:(k-1), k) : view(A, (k+1):N, k) - Bview = upper ? view(B, 1:(k-1), :) : view(B, (k+1):N, :) - B_row = view(B, k, :) - kp = ipiv[k] - if kp > 0 - # 1 x 1 diagonal block - # Multiply by inv(U**T(K)), where U(K) is the transformation - # stored in column K of A. - if symm - generic_mvpv!('T', -R1, Bview, Aview, R1, B_row) - else - conj!(B_row) - generic_mvpv!('C', -R1, Bview, Aview, R1, B_row) - conj!(B_row) - end - # Interchange rows K and IPIV(K). - Base.swaprows!(B, k, kp) - if upper; k += 1; else; k -= 1; end - else - # 2 x 2 diagonal block - # Multiply by inv(U**T(K+1)), where U(K+1) is the transformation - # stored in columns K and K+1 of A. - kx = knext2(k) - if symm - generic_mvpv!('T', -R1, Bview, Aview, R1, B_row) - Aview = upper ? view(A, 1:(k-1), kx) : view(A, (k+1):N, kx) - B_row = view(B, kx, :) - generic_mvpv!('T', -R1, Bview, Aview, R1, B_row) - elseif k > 1 - conj!(B_row) - generic_mvpv!('C', -R1, Bview, Aview, R1, B_row) - conj!(B_row) - Aview = upper ? view(A, 1:(k-1), kx) : view(A, (k+1):N, kx) - B_row = view(B, kx, :) - conj!(B_row) - generic_mvpv!('C', -R1, Bview, Aview, R1, B_row) - conj!(B_row) - end - # Interchange rows K and -IPIV(K) THEN K+1 and -IPIV(K+1). - # The second interchange is only needed when rook pivoting is used - Base.swaprows!(B, k, -kp) - if rook; Base.swaprows!(B, kx, -ipiv[kx]); end - if upper; k += 2; else; k -= 2; end - end - end; end - return B -end - - -""" -inertia(B::BunchKaufman; atol::Real=0, rtol::Real=atol>0 ? 0 : n*ϵ) -> - np::Union{Nothing,Integer}, nn::Union{Nothing,Integer}, nz::Integer - -`inertia` computes the numerical inertia (the number of positive, -negative and zero eigenvalues, given by `np`, `nn` and `nz`, -respectively) of a real symmetric of Hermitian matrix `B` that has been -factored using the Bunch-Kaufman algorithm. For complex symmetric -matrices the inertia is not defined. in that case `np` and `nn` are set -to `nothing`, but the function still returns the number of zero -eigenvalues. The inertia is computed by counting the eigenvalues signs -of `B.D`. The number of zero eigenvalues is computed as the number of -estimated eigenvalues with complex 1-norm (defined as `|re(.)|+|im(.)|`) -less or equal than `max(atol, rtol*s₁)`, where `s₁` is an upper bound of -the largest singular value of `B.D`, `σ₁` (more specifically, -`0.5*s₁ <= σ₁ <= s₁` for real matrices and `0.35*s₁ <= σ₁ <= s₁` for -complex matrices). `atol` and `rtol` are the absolute and relative -tolerances, respectively. The default relative tolerance is `n*ϵ`, where -`n` is the size of of `A`, and `ϵ` is the [`eps`](@ref) of the number -type of `A`, if this type is a subtype of `AbstractFloat`. In any other -case (if the number type of `A` is `Rational`, for example) `ϵ` is set -to `0`. - -!!! note - Numerical inertia can be a sensitive and imprecise characterization of - ill-conditioned matrices with eigenvalues that are close in magnitude to the - threshold tolerance `max(atol, rtol*s₁)`. In such cases, slight perturbations - to the Bunch-Kaufman computation or to the matrix can change the result of - `rank` by pushing one or more eigenvalues across the threshold. These - variations can even occur due to changes in floating-point errors between - different Julia versions, architectures, compilers, or operating systems. - In particular, the size of the entries of the tringular factor directly - influende the scale of the eigenvalues of the diagonal factor, so it is - strongly recommended to use rook pivoting is the inertia is going to be - computed. - On the other hand, if the matrix has rational entries, the inertia - computation is guaranteed is to be exact, as long as there is no - under/overflow in the underlying integer type (and in such cases Julia itself - throws an error), or a positive tolerance (absolute or relative) is - specified. -""" -function inertia(B::BunchKaufman{TS}; - atol::TR = TR(0), - rtol::TR = TR(0) - ) where TS <: ClosedScalar{TR} where TR <: ClosedReal - - # Check if matrix is complex symmetric - get_inertia = !(issymmetric(B) && TS <: Complex) - - # Initialize outputs - np, nn, nz = get_inertia ? (0, 0, 0) : (nothing, nothing, 0) - - # Compute matrix size - N = size(B, 1) - - # Quick return if possible - if N == 0; return np, nn, nz; end - - # Compute default relative tolerance - if rtol <= 0 && atol <= 0 - rtol = TR <: AbstractFloat ? (N * eps(TR)) : TR(0) - end - - # We use the complex 1-norm for complex matrices - real_matrix = (TS <: Real) - abs1_fun = real_matrix ? abs : cabs1 - real_fun = real_matrix ? identity : real - - # Check if we must track the largest singular value - get_s1 = (rtol > 0) - - # Constant for lower bound estimation of the smallest eigenvalue in 2x2 blocks. - # The best (largest) value for complex matrices is 1/sqrt(2), but for rational - # matrices we use the small denominator approximation 12/17, in order to not - # increase the denominator size too much in computations. The error of this - # approximation is ≤0.2%, and we still get a valid lower bound. - c = real_matrix ? TR(1) : (TR <: AbstractFloat ? 1/sqrt(TR(2)) : TR(12//17)) - - # First pass, estimate largest singular value and group together size-1 blocks - D = B.D - s1 = TR(0) - i = 1 - while i <= N; @inbounds begin - if i < N && D[i,i+1] != 0 - # 2x2 block - # The largest singular value of a 2x2 matrix is between [1, 2] times - # its complex max-norm, which is between [c, 1] times the largest - # complex 1-norm among the entries of the 2x2 matrix. See "Roger - # Horn and Charles Johnson. Matrix Analysis, 2nd Edition, 5.6.P23". - abs_Dii = abs1_fun(D[i,i]) - abs_Dxx = abs1_fun(D[i+1,i+1]) - s1_block = 2 * max(abs_Dii, abs1_fun(D[i,i+1]), abs_Dxx) - if get_s1; s1 = max(s1, s1_block); end - # Lower bound on the smallest eigenvalue complex 2-norm is - # abs(λ₂) ≥ abs(det(block)) / s1_block - # so the bound in terms of the complex 1-norm becomes - # abs1_fun(λ₂) ≥ c * abs1_fun(det(block)) / s1_block - # For rational matrices, if λ₂=0 then det(block)=0 and then the bound - # becomes zero too. If λ₁=0 too then the block has all zero entries - # and 's1_block'=0, but 'D[i,i+1]' != 0 and so 's1_block' > 0. However, we - # may still have that 'smin_block'≈0, then the value of 'smin_block' may not - # be accurate. In that case the counting routine will detect that both - # eigenvalues are zero without using 'smin_block', so it doesn't matter. - # TODO: is this the most numerically stable way to compute the determinant? - # TODO: is this the best way to avoid under/overflow? - if abs_Dii >= abs_Dxx - smin_block = c * abs1_fun((D[i,i]/s1_block)*D[i+1,i+1] - - (D[i,i+1]/s1_block)*D[i+1,i]) - else - smin_block = c * abs1_fun(D[i,i]*(D[i+1,i+1]/s1_block) - - (D[i,i+1]/s1_block)*D[i+1,i]) - end - # Store lower bound in-place in the lower off-diagonal and upper bound - # in-place in the upper off-diagonal. The trace is stored in the first - # diagonal entry block, but only if the full inertia is needed. - D[i,i+1] = s1_block - D[i+1,i] = smin_block - if get_inertia; D[i,i] += D[i+1,i+1]; end - i += 2 - else - # 1x1 block - if get_s1; s1 = max(s1, abs1_fun(D[i,i])); end - i += 1 - end - end; end - - # Second pass, count eigenvalue signs - tol = max(atol, rtol * s1) - i = 1 - while i <= N; @inbounds begin - if i < N && D[i,i+1] != 0 - # 2x2 block. For the counting of zero eigenvalues we use the lower bound on the - # eigenvalues' magnitude. This way, if an eigenvalue is deemed non-zero, then - # it is guaranteed that its magnitude is greater than the tolerance. - s1_block = real_fun(D[i,i+1]) - if (c / 2) * s1_block <= tol - # Lower bound of largest eigenvalue is smaller than the tolerance, - # we consider the both eigenvalues of this block to be zero. - nz += 2 - i += 2 - continue - end - # Reaching this part of the lopp implies that 's1_block' != 0. - smin_block = real_fun(D[i+1,i]) - trace_block = real_fun(D[i,i]) - if smin_block > tol || trace_block == 0 - # If first condition holds then the lower bound of the smallest eigenvalue - # is larger than the tolerance. If the second condition holds then the trace - # is exactly zero, so both eigenvalues have the same magnitude, and we - # already know that the largest one is non-zero. In any case we conclude - # that both eigenvalues are non-zero. - if get_inertia - # The eigenvalues of a 2x2 block are guaranteed to be a - # positive-negative pair. - np += 1 - nn += 1 - end - else - # The lower bound of smallest eigenvalue is smaller than the tolerance and - # the trace is non-zero, so we consider the smallest eigenvalues of this - # block to be zero. - nz += 1 - if get_inertia - # The trace is non-zero, and its sign is the same of the largest - # eigenvalue. - if trace_block >= 0 - np += 1 - else - nn += 1 - end - end - end - i += 2 - else - # 1x1 block - if get_inertia - eig = real_fun(D[i,i]) - if eig > tol - np += 1 - elseif eig < -tol - nn += 1 - else - nz += 1 - end - elseif abs1_fun(D[i,i]) <= tol - nz += 1 - end - i += 1 - end - end; end - - return np, nn, nz -end - - -""" - bunchkaufman_native!(A, rook::Bool=false; check = true) -> BunchKaufman - -`bunchkaufman_native!` is the same as [`bunchkaufman!`](@ref), but it performs -the factorization in native Julia code instead of calling LAPACK. -""" -function bunchkaufman_native!(A::AbstractMatrix{TS}, - rook::Bool = false; - check::Bool = true, - ) where TS <: ClosedScalar{TR} where TR <: ClosedReal - if A isa RealHermSymComplexSym{TR} - syhe = 'S' - elseif ishermitian(A) - syhe = 'H' - elseif issymmetric(A) - syhe = 'S' - else - throw(ArgumentError("Bunch-Kaufman decomposition is only valid for " * - "symmetric or Hermitian matrices")) - end - if A isa HermOrSym - Adata = A.data - uplo = A.uplo - else - Adata = A - uplo = 'U' - end - LD, ipiv, info = generic_bunchkaufman!(uplo, Adata, syhe, rook) - check && checknonsingular(info) - return BunchKaufman(LD, ipiv, uplo, syhe == 'S', rook, info) -end - - -""" -Overload 'bunchkaufman.jl' methods through multiple dispatch -""" - -function bunchkaufman!(A::AbstractMatrix{TS}, - rook::Bool = false; - check::Bool = true - ) where TS <: ClosedScalar{TR} where TR <: ClosedReal - return bunchkaufman_native!(A, rook; check) -end - -function bunchkaufman(A::AbstractMatrix{TS}, - rook::Bool = false; - check::Bool = true - ) where TS <: ClosedScalar{TR} where TR <: ClosedReal - return bunchkaufman!(bkcopy_oftype(A, TS), rook; check) -end - -function bunchkaufman(A::AbstractMatrix{TS}, - rook::Bool = false; - check::Bool = true - ) where TS <:Union{TI, Complex{TI}} where TI <: Integer - - # Identity whether matrix is symmetric or Hermitian or none - if A isa Symmetric - TA = Symmetric - elseif A isa Hermitian - TA = Hermitian - else - TA = Nothing - end - - # Create a rational copy of input integer matrix, as the Bunch-Kaufman - # algorithm is closed over the rationals but not over the integers. - # We promote input to BigInt to avoid overflow problems - if TA == Nothing - if TS <: Integer - M = Rational{BigInt}.(bkcopy_oftype(A, TS)) - else - M = Complex{Rational{BigInt}}.(bkcopy_oftype(A, TS)) - end - else - if TS <: Integer - M = TA(Rational{BigInt}.(bkcopy_oftype(A, TS)), Symbol(A.uplo)) - else - M = TA(Complex{Rational{BigInt}}.(bkcopy_oftype(A, TS)), - Symbol(A.uplo)) - end - end - - return bunchkaufman_native!(M, rook; check) -end - -function ldiv!(B::BunchKaufman{TS}, - R::AbstractVecOrMat{TS} - ) where TS <: ClosedScalar{TR} where TR <: ClosedReal - return generic_bksolve!(B, R) -end - -function inv(B::BunchKaufman{TS}) where TS <: ClosedScalar{TR} where TR <: ClosedReal - # I don't think there's value in implementing tha LAPACK in-place inverse - # functions `dsytri`, `chetri`, etc., unless of course an efficient - # in-place inverse function `inv!` is needed. - # TODO: reduce the operation count of the inverse by not computing the - # lower/upper triangular part. - if issymmetric(B) - return copytri!(B \ I, B.uplo) - else - return copytri!(B \ I, B.uplo, true) - end -end diff --git a/stdlib/LinearAlgebra/src/cholesky.jl b/stdlib/LinearAlgebra/src/cholesky.jl deleted file mode 100644 index 03f7c273ccbef..0000000000000 --- a/stdlib/LinearAlgebra/src/cholesky.jl +++ /dev/null @@ -1,1038 +0,0 @@ -# This file is a part of Julia. License is MIT: https://julialang.org/license - -########################## -# Cholesky Factorization # -########################## - -# The dispatch structure in the cholesky, and cholesky! methods is a bit -# complicated and some explanation is therefore provided in the following -# -# In the methods below, LAPACK is called when possible, i.e. StridedMatrices with Float32, -# Float64, ComplexF32, and ComplexF64 element types. For other element or -# matrix types, the unblocked Julia implementation in _chol! is used. For cholesky -# and cholesky! pivoting is supported through a RowMaximum() argument. A type argument is -# necessary for type stability since the output of cholesky and cholesky! is either -# Cholesky or CholeskyPivoted. The latter is only -# supported for the four LAPACK element types. For other types, e.g. BigFloats RowMaximum() will -# give an error. It is required that the input is Hermitian (including real symmetric) either -# through the Hermitian and Symmetric views or exact symmetric or Hermitian elements which -# is checked for and an error is thrown if the check fails. - -# The internal structure is as follows -# - _chol! returns the factor and info without checking positive definiteness -# - cholesky/cholesky! returns Cholesky without checking positive definiteness - -# FixMe? The dispatch below seems overly complicated. One simplification could be to -# merge the two Cholesky types into one. It would remove the need for Val completely but -# the cost would be extra unnecessary/unused fields for the unpivoted Cholesky and runtime -# checks of those fields before calls to LAPACK to check which version of the Cholesky -# factorization the type represents. -""" - Cholesky <: Factorization - -Matrix factorization type of the Cholesky factorization of a dense symmetric/Hermitian -positive definite matrix `A`. This is the return type of [`cholesky`](@ref), -the corresponding matrix factorization function. - -The triangular Cholesky factor can be obtained from the factorization `F::Cholesky` -via `F.L` and `F.U`, where `A ≈ F.U' * F.U ≈ F.L * F.L'`. - -The following functions are available for `Cholesky` objects: [`size`](@ref), [`\\`](@ref), -[`inv`](@ref), [`det`](@ref), [`logdet`](@ref) and [`isposdef`](@ref). - -Iterating the decomposition produces the components `L` and `U`. - -# Examples -```jldoctest -julia> A = [4. 12. -16.; 12. 37. -43.; -16. -43. 98.] -3×3 Matrix{Float64}: - 4.0 12.0 -16.0 - 12.0 37.0 -43.0 - -16.0 -43.0 98.0 - -julia> C = cholesky(A) -Cholesky{Float64, Matrix{Float64}} -U factor: -3×3 UpperTriangular{Float64, Matrix{Float64}}: - 2.0 6.0 -8.0 - ⋅ 1.0 5.0 - ⋅ ⋅ 3.0 - -julia> C.U -3×3 UpperTriangular{Float64, Matrix{Float64}}: - 2.0 6.0 -8.0 - ⋅ 1.0 5.0 - ⋅ ⋅ 3.0 - -julia> C.L -3×3 LowerTriangular{Float64, Matrix{Float64}}: - 2.0 ⋅ ⋅ - 6.0 1.0 ⋅ - -8.0 5.0 3.0 - -julia> C.L * C.U == A -true - -julia> l, u = C; # destructuring via iteration - -julia> l == C.L && u == C.U -true -``` -""" -struct Cholesky{T,S<:AbstractMatrix} <: Factorization{T} - factors::S - uplo::Char - info::BlasInt - - function Cholesky{T,S}(factors, uplo, info) where {T,S<:AbstractMatrix} - require_one_based_indexing(factors) - new(factors, uplo, info) - end -end -Cholesky(A::AbstractMatrix{T}, uplo::Symbol, info::Integer) where {T} = - Cholesky{T,typeof(A)}(A, char_uplo(uplo), info) -Cholesky(A::AbstractMatrix{T}, uplo::AbstractChar, info::Integer) where {T} = - Cholesky{T,typeof(A)}(A, uplo, info) -Cholesky(U::UpperTriangular{T}) where {T} = Cholesky{T,typeof(U.data)}(U.data, 'U', 0) -Cholesky(L::LowerTriangular{T}) where {T} = Cholesky{T,typeof(L.data)}(L.data, 'L', 0) - -# iteration for destructuring into components -Base.iterate(C::Cholesky) = (C.L, Val(:U)) -Base.iterate(C::Cholesky, ::Val{:U}) = (C.U, Val(:done)) -Base.iterate(C::Cholesky, ::Val{:done}) = nothing - - -""" - CholeskyPivoted - -Matrix factorization type of the pivoted Cholesky factorization of a dense symmetric/Hermitian -positive semi-definite matrix `A`. This is the return type of [`cholesky(_, ::RowMaximum)`](@ref), -the corresponding matrix factorization function. - -The triangular Cholesky factor can be obtained from the factorization `F::CholeskyPivoted` -via `F.L` and `F.U`, and the permutation via `F.p`, where `A[F.p, F.p] ≈ Ur' * Ur ≈ Lr * Lr'` -with `Ur = F.U[1:F.rank, :]` and `Lr = F.L[:, 1:F.rank]`, or alternatively -`A ≈ Up' * Up ≈ Lp * Lp'` with `Up = F.U[1:F.rank, invperm(F.p)]` and -`Lp = F.L[invperm(F.p), 1:F.rank]`. - -The following functions are available for `CholeskyPivoted` objects: -[`size`](@ref), [`\\`](@ref), [`inv`](@ref), [`det`](@ref), and [`rank`](@ref). - -Iterating the decomposition produces the components `L` and `U`. - -# Examples -```jldoctest -julia> X = [1.0, 2.0, 3.0, 4.0]; - -julia> A = X * X'; - -julia> C = cholesky(A, RowMaximum(), check = false) -CholeskyPivoted{Float64, Matrix{Float64}, Vector{Int64}} -U factor with rank 1: -4×4 UpperTriangular{Float64, Matrix{Float64}}: - 4.0 2.0 3.0 1.0 - ⋅ 0.0 6.0 2.0 - ⋅ ⋅ 9.0 3.0 - ⋅ ⋅ ⋅ 1.0 -permutation: -4-element Vector{Int64}: - 4 - 2 - 3 - 1 - -julia> C.U[1:C.rank, :]' * C.U[1:C.rank, :] ≈ A[C.p, C.p] -true - -julia> l, u = C; # destructuring via iteration - -julia> l == C.L && u == C.U -true -``` -""" -struct CholeskyPivoted{T,S<:AbstractMatrix,P<:AbstractVector{<:Integer}} <: Factorization{T} - factors::S - uplo::Char - piv::P - rank::BlasInt - tol::Real - info::BlasInt - - function CholeskyPivoted{T,S,P}(factors, uplo, piv, rank, tol, info) where {T,S<:AbstractMatrix,P<:AbstractVector} - require_one_based_indexing(factors) - new{T,S,P}(factors, uplo, piv, rank, tol, info) - end -end -CholeskyPivoted(A::AbstractMatrix{T}, uplo::AbstractChar, piv::AbstractVector{<:Integer}, - rank::Integer, tol::Real, info::Integer) where T = - CholeskyPivoted{T,typeof(A),typeof(piv)}(A, uplo, piv, rank, tol, info) -# backwards-compatible constructors (remove with Julia 2.0) -@deprecate(CholeskyPivoted{T,S}(factors, uplo, piv, rank, tol, info) where {T,S<:AbstractMatrix}, - CholeskyPivoted{T,S,typeof(piv)}(factors, uplo, piv, rank, tol, info), false) - - -# iteration for destructuring into components -Base.iterate(C::CholeskyPivoted) = (C.L, Val(:U)) -Base.iterate(C::CholeskyPivoted, ::Val{:U}) = (C.U, Val(:done)) -Base.iterate(C::CholeskyPivoted, ::Val{:done}) = nothing - - -# make a copy that allow inplace Cholesky factorization -choltype(A) = promote_type(typeof(sqrt(oneunit(eltype(A)))), Float32) -cholcopy(A::AbstractMatrix) = eigencopy_oftype(A, choltype(A)) - -# _chol!. Internal methods for calling unpivoted Cholesky -## BLAS/LAPACK element types -function _chol!(A::StridedMatrix{<:BlasFloat}, ::Type{UpperTriangular}) - C, info = LAPACK.potrf!('U', A) - return UpperTriangular(C), info -end -function _chol!(A::StridedMatrix{<:BlasFloat}, ::Type{LowerTriangular}) - C, info = LAPACK.potrf!('L', A) - return LowerTriangular(C), info -end - -## Non BLAS/LAPACK element types (generic) -function _chol!(A::AbstractMatrix, ::Type{UpperTriangular}) - require_one_based_indexing(A) - n = checksquare(A) - realdiag = eltype(A) <: Complex - @inbounds begin - for k = 1:n - Akk = realdiag ? real(A[k,k]) : A[k,k] - for i = 1:k - 1 - Akk -= realdiag ? abs2(A[i,k]) : A[i,k]'A[i,k] - end - A[k,k] = Akk - Akk, info = _chol!(Akk, UpperTriangular) - if info != 0 - return UpperTriangular(A), convert(BlasInt, k) - end - A[k,k] = Akk - AkkInv = inv(copy(Akk')) - for j = k + 1:n - @simd for i = 1:k - 1 - A[k,j] -= A[i,k]'A[i,j] - end - A[k,j] = AkkInv*A[k,j] - end - end - end - return UpperTriangular(A), convert(BlasInt, 0) -end -function _chol!(A::AbstractMatrix, ::Type{LowerTriangular}) - require_one_based_indexing(A) - n = checksquare(A) - realdiag = eltype(A) <: Complex - @inbounds begin - for k = 1:n - Akk = realdiag ? real(A[k,k]) : A[k,k] - for i = 1:k - 1 - Akk -= realdiag ? abs2(A[k,i]) : A[k,i]*A[k,i]' - end - A[k,k] = Akk - Akk, info = _chol!(Akk, LowerTriangular) - if info != 0 - return LowerTriangular(A), convert(BlasInt, k) - end - A[k,k] = Akk - AkkInv = inv(copy(Akk')) - for j = 1:k - 1 - Akjc = A[k,j]' - @simd for i = k + 1:n - A[i,k] -= A[i,j]*Akjc - end - end - @simd for i = k + 1:n - A[i,k] *= AkkInv - end - end - end - return LowerTriangular(A), convert(BlasInt, 0) -end - -## Numbers -function _chol!(x::Number, _) - rx = real(x) - iszero(rx) && return (rx, convert(BlasInt, 1)) - rxr = sqrt(abs(rx)) - rval = convert(promote_type(typeof(x), typeof(rxr)), rxr) - return (rval, convert(BlasInt, rx != abs(x))) -end - -# _cholpivoted!. Internal methods for calling pivoted Cholesky -Base.@propagate_inbounds function _swap_rowcols!(A, ::Type{UpperTriangular}, n, j, q) - j == q && return - @assert j < q - # swap rows and cols without touching the possibly undef-ed triangle - A[q, q] = A[j, j] - for k in 1:j-1 # initial vertical segments - A[k,j], A[k,q] = A[k,q], A[k,j] - end - for k in j+1:q-1 # intermediate segments - A[j,k], A[k,q] = conj(A[k,q]), conj(A[j,k]) - end - A[j,q] = conj(A[j,q]) # corner case - for k in q+1:n # final horizontal segments - A[j,k], A[q,k] = A[q,k], A[j,k] - end - return -end -Base.@propagate_inbounds function _swap_rowcols!(A, ::Type{LowerTriangular}, n, j, q) - j == q && return - @assert j < q - # swap rows and cols without touching the possibly undef-ed triangle - A[q, q] = A[j, j] - for k in 1:j-1 # initial horizontal segments - A[j,k], A[q,k] = A[q,k], A[j,k] - end - for k in j+1:q-1 # intermediate segments - A[k,j], A[q,k] = conj(A[q,k]), conj(A[k,j]) - end - A[q,j] = conj(A[q,j]) # corner case - for k in q+1:n # final vertical segments - A[k,j], A[k,q] = A[k,q], A[k,j] - end - return -end -### BLAS/LAPACK element types -_cholpivoted!(A::StridedMatrix{<:BlasFloat}, ::Type{UpperTriangular}, tol::Real, check::Bool) = - LAPACK.pstrf!('U', A, tol) -_cholpivoted!(A::StridedMatrix{<:BlasFloat}, ::Type{LowerTriangular}, tol::Real, check::Bool) = - LAPACK.pstrf!('L', A, tol) -## Non BLAS/LAPACK element types (generic) -function _cholpivoted!(A::AbstractMatrix, ::Type{UpperTriangular}, tol::Real, check::Bool) - rTA = real(eltype(A)) - # checks - Base.require_one_based_indexing(A) - n = LinearAlgebra.checksquare(A) - # initialization - piv = collect(1:n) - dots = zeros(rTA, n) - temp = similar(dots) - - @inbounds begin - # first step - Akk, q = findmax(i -> real(A[i,i]), 1:n) - stop = tol < 0 ? eps(rTA)*n*abs(Akk) : tol - Akk ≤ stop && return A, piv, convert(BlasInt, 0), convert(BlasInt, 1) - # swap - _swap_rowcols!(A, UpperTriangular, n, 1, q) - piv[1], piv[q] = piv[q], piv[1] - A[1,1] = Akk = sqrt(Akk) - AkkInv = inv(copy(Akk')) - @simd for j in 2:n - A[1, j] *= AkkInv - end - - for k in 2:n - @simd for j in k:n - dots[j] += abs2(A[k-1, j]) - temp[j] = real(A[j,j]) - dots[j] - end - Akk, q = findmax(j -> temp[j], k:n) - Akk ≤ stop && return A, piv, convert(BlasInt, k - 1), convert(BlasInt, 1) - q += k - 1 - # swap - _swap_rowcols!(A, UpperTriangular, n, k, q) - dots[k], dots[q] = dots[q], dots[k] - piv[k], piv[q] = piv[q], piv[k] - # update - A[k,k] = Akk = sqrt(Akk) - AkkInv = inv(copy(Akk')) - for j in (k+1):n - @simd for i in 1:(k-1) - A[k,j] -= A[i,k]'A[i,j] - end - A[k,j] = AkkInv * A[k,j] - end - end - return A, piv, convert(BlasInt, n), convert(BlasInt, 0) - end -end -function _cholpivoted!(A::AbstractMatrix, ::Type{LowerTriangular}, tol::Real, check::Bool) - rTA = real(eltype(A)) - # checks - Base.require_one_based_indexing(A) - n = LinearAlgebra.checksquare(A) - # initialization - piv = collect(1:n) - dots = zeros(rTA, n) - temp = similar(dots) - - @inbounds begin - # first step - Akk, q = findmax(i -> real(A[i,i]), 1:n) - stop = tol < 0 ? eps(rTA)*n*abs(Akk) : tol - Akk ≤ stop && return A, piv, convert(BlasInt, 0), convert(BlasInt, 1) - # swap - _swap_rowcols!(A, LowerTriangular, n, 1, q) - piv[1], piv[q] = piv[q], piv[1] - A[1,1] = Akk = sqrt(Akk) - AkkInv = inv(copy(Akk')) - @simd for i in 2:n - A[i,1] *= AkkInv - end - - for k in 2:n - @simd for j in k:n - dots[j] += abs2(A[j, k-1]) - temp[j] = real(A[j,j]) - dots[j] - end - Akk, q = findmax(i -> temp[i], k:n) - Akk ≤ stop && return A, piv, convert(BlasInt, k-1), convert(BlasInt, 1) - q += k - 1 - # swap - _swap_rowcols!(A, LowerTriangular, n, k, q) - dots[k], dots[q] = dots[q], dots[k] - piv[k], piv[q] = piv[q], piv[k] - # update - A[k,k] = Akk = sqrt(Akk) - for j in 1:(k-1) - Akjc = A[k,j]' - @simd for i in (k+1):n - A[i,k] -= A[i,j]*Akjc - end - end - AkkInv = inv(copy(Akk')) - @simd for i in (k+1):n - A[i, k] *= AkkInv - end - end - return A, piv, convert(BlasInt, n), convert(BlasInt, 0) - end -end -function _cholpivoted!(x::Number, tol) - rx = real(x) - iszero(rx) && return (rx, convert(BlasInt, 1)) - rxr = sqrt(abs(rx)) - rval = convert(promote_type(typeof(x), typeof(rxr)), rxr) - return (rval, convert(BlasInt, !(rx == abs(x) > tol))) -end - -# cholesky!. Destructive methods for computing Cholesky factorization of real symmetric -# or Hermitian matrix -## No pivoting (default) -function cholesky!(A::SelfAdjoint, ::NoPivot = NoPivot(); check::Bool = true) - C, info = _chol!(A.data, A.uplo == 'U' ? UpperTriangular : LowerTriangular) - check && checkpositivedefinite(info) - return Cholesky(C.data, A.uplo, info) -end - -### for AbstractMatrix, check that matrix is symmetric/Hermitian -""" - cholesky!(A::AbstractMatrix, NoPivot(); check = true) -> Cholesky - -The same as [`cholesky`](@ref), but saves space by overwriting the input `A`, -instead of creating a copy. An [`InexactError`](@ref) exception is thrown if -the factorization produces a number not representable by the element type of -`A`, e.g. for integer types. - -# Examples -```jldoctest -julia> A = [1 2; 2 50] -2×2 Matrix{Int64}: - 1 2 - 2 50 - -julia> cholesky!(A) -ERROR: InexactError: Int64(6.782329983125268) -Stacktrace: -[...] -``` -""" -function cholesky!(A::AbstractMatrix, ::NoPivot = NoPivot(); check::Bool = true) - checksquare(A) - if !ishermitian(A) # return with info = -1 if not Hermitian - check && checkpositivedefinite(convert(BlasInt, -1)) - return Cholesky(A, 'U', convert(BlasInt, -1)) - else - return cholesky!(Hermitian(A), NoPivot(); check = check) - end -end -@deprecate cholesky!(A::StridedMatrix, ::Val{false}; check::Bool = true) cholesky!(A, NoPivot(); check) false -@deprecate cholesky!(A::RealHermSymComplexHerm, ::Val{false}; check::Bool = true) cholesky!(A, NoPivot(); check) false - -## With pivoting -### Non BLAS/LAPACK element types (generic). -function cholesky!(A::SelfAdjoint, ::RowMaximum; tol = 0.0, check::Bool = true) - AA, piv, rank, info = _cholpivoted!(A.data, A.uplo == 'U' ? UpperTriangular : LowerTriangular, tol, check) - C = CholeskyPivoted(AA, A.uplo, piv, rank, tol, info) - check && chkfullrank(C) - return C -end -@deprecate cholesky!(A::RealHermSymComplexHerm{<:Real}, ::Val{true}; kwargs...) cholesky!(A, RowMaximum(); kwargs...) false - -""" - cholesky!(A::AbstractMatrix, RowMaximum(); tol = 0.0, check = true) -> CholeskyPivoted - -The same as [`cholesky`](@ref), but saves space by overwriting the input `A`, -instead of creating a copy. An [`InexactError`](@ref) exception is thrown if the -factorization produces a number not representable by the element type of `A`, -e.g. for integer types. -""" -function cholesky!(A::AbstractMatrix, ::RowMaximum; tol = 0.0, check::Bool = true) - checksquare(A) - if !ishermitian(A) - C = CholeskyPivoted(A, 'U', Vector{BlasInt}(), convert(BlasInt, 1), - tol, convert(BlasInt, -1)) - check && checkpositivedefinite(convert(BlasInt, -1)) - return C - else - return cholesky!(Hermitian(A), RowMaximum(); tol, check) - end -end -@deprecate cholesky!(A::StridedMatrix, ::Val{true}; kwargs...) cholesky!(A, RowMaximum(); kwargs...) false - -# cholesky. Non-destructive methods for computing Cholesky factorization of real symmetric -# or Hermitian matrix -## No pivoting (default) -""" - cholesky(A, NoPivot(); check = true) -> Cholesky - -Compute the Cholesky factorization of a dense symmetric positive definite matrix `A` -and return a [`Cholesky`](@ref) factorization. The matrix `A` can either be a [`Symmetric`](@ref) or [`Hermitian`](@ref) -[`AbstractMatrix`](@ref) or a *perfectly* symmetric or Hermitian `AbstractMatrix`. - -The triangular Cholesky factor can be obtained from the factorization `F` via `F.L` and `F.U`, -where `A ≈ F.U' * F.U ≈ F.L * F.L'`. - -The following functions are available for `Cholesky` objects: [`size`](@ref), [`\\`](@ref), -[`inv`](@ref), [`det`](@ref), [`logdet`](@ref) and [`isposdef`](@ref). - -If you have a matrix `A` that is slightly non-Hermitian due to roundoff errors in its construction, -wrap it in `Hermitian(A)` before passing it to `cholesky` in order to treat it as perfectly Hermitian. - -When `check = true`, an error is thrown if the decomposition fails. -When `check = false`, responsibility for checking the decomposition's -validity (via [`issuccess`](@ref)) lies with the user. - -# Examples -```jldoctest -julia> A = [4. 12. -16.; 12. 37. -43.; -16. -43. 98.] -3×3 Matrix{Float64}: - 4.0 12.0 -16.0 - 12.0 37.0 -43.0 - -16.0 -43.0 98.0 - -julia> C = cholesky(A) -Cholesky{Float64, Matrix{Float64}} -U factor: -3×3 UpperTriangular{Float64, Matrix{Float64}}: - 2.0 6.0 -8.0 - ⋅ 1.0 5.0 - ⋅ ⋅ 3.0 - -julia> C.U -3×3 UpperTriangular{Float64, Matrix{Float64}}: - 2.0 6.0 -8.0 - ⋅ 1.0 5.0 - ⋅ ⋅ 3.0 - -julia> C.L -3×3 LowerTriangular{Float64, Matrix{Float64}}: - 2.0 ⋅ ⋅ - 6.0 1.0 ⋅ - -8.0 5.0 3.0 - -julia> C.L * C.U == A -true -``` -""" -cholesky(A::AbstractMatrix, ::NoPivot=NoPivot(); check::Bool = true) = - _cholesky(cholcopy(A); check) -@deprecate cholesky(A::Union{StridedMatrix,RealHermSymComplexHerm{<:Real,<:StridedMatrix}}, ::Val{false}; check::Bool = true) cholesky(A, NoPivot(); check) false - -function cholesky(A::AbstractMatrix{Float16}, ::NoPivot=NoPivot(); check::Bool = true) - X = _cholesky(cholcopy(A); check = check) - return Cholesky{Float16}(X) -end -@deprecate cholesky(A::Union{StridedMatrix{Float16},RealHermSymComplexHerm{Float16,<:StridedMatrix}}, ::Val{false}; check::Bool = true) cholesky(A, NoPivot(); check) false -# allow packages like SparseArrays.jl to hook into here and redirect to out-of-place `cholesky` -_cholesky(A::AbstractMatrix, args...; kwargs...) = cholesky!(A, args...; kwargs...) - -# allow cholesky of cholesky -cholesky(A::Cholesky) = A - -## With pivoting -""" - cholesky(A, RowMaximum(); tol = 0.0, check = true) -> CholeskyPivoted - -Compute the pivoted Cholesky factorization of a dense symmetric positive semi-definite matrix `A` -and return a [`CholeskyPivoted`](@ref) factorization. The matrix `A` can either be a [`Symmetric`](@ref) -or [`Hermitian`](@ref) [`AbstractMatrix`](@ref) or a *perfectly* symmetric or Hermitian `AbstractMatrix`. - -The triangular Cholesky factor can be obtained from the factorization `F` via `F.L` and `F.U`, -and the permutation via `F.p`, where `A[F.p, F.p] ≈ Ur' * Ur ≈ Lr * Lr'` with `Ur = F.U[1:F.rank, :]` -and `Lr = F.L[:, 1:F.rank]`, or alternatively `A ≈ Up' * Up ≈ Lp * Lp'` with -`Up = F.U[1:F.rank, invperm(F.p)]` and `Lp = F.L[invperm(F.p), 1:F.rank]`. - -The following functions are available for `CholeskyPivoted` objects: -[`size`](@ref), [`\\`](@ref), [`inv`](@ref), [`det`](@ref), and [`rank`](@ref). - -The argument `tol` determines the tolerance for determining the rank. -For negative values, the tolerance is equal to `eps()*size(A,1)*maximum(diag(A))`. - -If you have a matrix `A` that is slightly non-Hermitian due to roundoff errors in its construction, -wrap it in `Hermitian(A)` before passing it to `cholesky` in order to treat it as perfectly Hermitian. - -When `check = true`, an error is thrown if the decomposition fails. -When `check = false`, responsibility for checking the decomposition's -validity (via [`issuccess`](@ref)) lies with the user. - -# Examples -```jldoctest -julia> X = [1.0, 2.0, 3.0, 4.0]; - -julia> A = X * X'; - -julia> C = cholesky(A, RowMaximum(), check = false) -CholeskyPivoted{Float64, Matrix{Float64}, Vector{Int64}} -U factor with rank 1: -4×4 UpperTriangular{Float64, Matrix{Float64}}: - 4.0 2.0 3.0 1.0 - ⋅ 0.0 6.0 2.0 - ⋅ ⋅ 9.0 3.0 - ⋅ ⋅ ⋅ 1.0 -permutation: -4-element Vector{Int64}: - 4 - 2 - 3 - 1 - -julia> C.U[1:C.rank, :]' * C.U[1:C.rank, :] ≈ A[C.p, C.p] -true - -julia> l, u = C; # destructuring via iteration - -julia> l == C.L && u == C.U -true -``` -""" -cholesky(A::AbstractMatrix, ::RowMaximum; tol = 0.0, check::Bool = true) = - _cholesky(cholcopy(A), RowMaximum(); tol, check) -@deprecate cholesky(A::Union{StridedMatrix,RealHermSymComplexHerm{<:Real,<:StridedMatrix}}, ::Val{true}; tol = 0.0, check::Bool = true) cholesky(A, RowMaximum(); tol, check) false - -function cholesky(A::AbstractMatrix{Float16}, ::RowMaximum; tol = 0.0, check::Bool = true) - X = _cholesky(cholcopy(A), RowMaximum(); tol, check) - return CholeskyPivoted{Float16}(X) -end - -## Number -function cholesky(x::Number, uplo::Symbol=:U) - C, info = _chol!(x, uplo) - xf = fill(C, 1, 1) - Cholesky(xf, uplo, info) -end - - -function Cholesky{T}(C::Cholesky) where T - Cnew = convert(AbstractMatrix{T}, C.factors) - Cholesky{T, typeof(Cnew)}(Cnew, C.uplo, C.info) -end -Cholesky{T,S}(C::Cholesky) where {T,S<:AbstractMatrix} = Cholesky{T,S}(C.factors, C.uplo, C.info) -Factorization{T}(C::Cholesky{T}) where {T} = C -Factorization{T}(C::Cholesky) where {T} = Cholesky{T}(C) -CholeskyPivoted{T}(C::CholeskyPivoted{T}) where {T} = C -CholeskyPivoted{T}(C::CholeskyPivoted) where {T} = - CholeskyPivoted(AbstractMatrix{T}(C.factors), C.uplo, C.piv, C.rank, C.tol, C.info) -CholeskyPivoted{T,S}(C::CholeskyPivoted) where {T,S<:AbstractMatrix} = - CholeskyPivoted{T,S,typeof(C.piv)}(C.factors, C.uplo, C.piv, C.rank, C.tol, C.info) -CholeskyPivoted{T,S,P}(C::CholeskyPivoted) where {T,S<:AbstractMatrix,P<:AbstractVector{<:Integer}} = - CholeskyPivoted{T,S,P}(C.factors, C.uplo, C.piv, C.rank, C.tol, C.info) -Factorization{T}(C::CholeskyPivoted{T}) where {T} = C -Factorization{T}(C::CholeskyPivoted) where {T} = CholeskyPivoted{T}(C) - -AbstractMatrix(C::Cholesky) = C.uplo == 'U' ? C.U'C.U : C.L*C.L' -AbstractArray(C::Cholesky) = AbstractMatrix(C) -Matrix(C::Cholesky) = Array(AbstractArray(C)) -Array(C::Cholesky) = Matrix(C) - -function AbstractMatrix(F::CholeskyPivoted) - ip = invperm(F.p) - U = F.U[1:F.rank,ip] - U'U -end -AbstractArray(F::CholeskyPivoted) = AbstractMatrix(F) -Matrix(F::CholeskyPivoted) = Array(AbstractArray(F)) -Array(F::CholeskyPivoted) = Matrix(F) - -copy(C::Cholesky) = Cholesky(copy(C.factors), C.uplo, C.info) -copy(C::CholeskyPivoted) = CholeskyPivoted(copy(C.factors), C.uplo, C.piv, C.rank, C.tol, C.info) - -size(C::Union{Cholesky, CholeskyPivoted}) = size(C.factors) -size(C::Union{Cholesky, CholeskyPivoted}, d::Integer) = size(C.factors, d) - -function _choleskyUfactor(Cfactors, Cuplo) - if Cuplo === 'U' - return UpperTriangular(Cfactors) - else - return copy(LowerTriangular(Cfactors)') - end -end -function _choleskyLfactor(Cfactors, Cuplo) - if Cuplo === 'L' - return LowerTriangular(Cfactors) - else - return copy(UpperTriangular(Cfactors)') - end -end - -function getproperty(C::Cholesky, d::Symbol) - Cfactors = getfield(C, :factors) - Cuplo = getfield(C, :uplo) - if d === :U - _choleskyUfactor(Cfactors, Cuplo) - elseif d === :L - _choleskyLfactor(Cfactors, Cuplo) - elseif d === :UL - return (Cuplo === 'U' ? UpperTriangular(Cfactors) : LowerTriangular(Cfactors)) - else - return getfield(C, d) - end -end -Base.propertynames(F::Cholesky, private::Bool=false) = - (:U, :L, :UL, (private ? fieldnames(typeof(F)) : ())...) - -function Base.:(==)(C1::Cholesky, C2::Cholesky) - C1.uplo == C2.uplo || return false - C1.uplo == 'L' ? (C1.L == C2.L) : (C1.U == C2.U) -end - -function getproperty(C::CholeskyPivoted{T}, d::Symbol) where {T} - Cfactors = getfield(C, :factors) - Cuplo = getfield(C, :uplo) - if d === :U - _choleskyUfactor(Cfactors, Cuplo) - elseif d === :L - _choleskyLfactor(Cfactors, Cuplo) - elseif d === :p - return getfield(C, :piv) - elseif d === :P - n = size(C, 1) - P = zeros(T, n, n) - for i = 1:n - P[getfield(C, :piv)[i], i] = one(T) - end - return P - else - return getfield(C, d) - end -end -Base.propertynames(F::CholeskyPivoted, private::Bool=false) = - (:U, :L, :p, :P, (private ? fieldnames(typeof(F)) : ())...) - -function Base.:(==)(C1::CholeskyPivoted, C2::CholeskyPivoted) - (C1.uplo == C2.uplo && C1.p == C2.p) || return false - C1.uplo == 'L' ? (C1.L == C2.L) : (C1.U == C2.U) -end - -issuccess(C::Union{Cholesky,CholeskyPivoted}) = C.info == 0 - -adjoint(C::Union{Cholesky,CholeskyPivoted}) = C - -function show(io::IO, mime::MIME{Symbol("text/plain")}, C::Cholesky) - if issuccess(C) - summary(io, C); println(io) - println(io, "$(C.uplo) factor:") - show(io, mime, C.UL) - else - print(io, "Failed factorization of type $(typeof(C))") - end -end - -function show(io::IO, mime::MIME{Symbol("text/plain")}, C::CholeskyPivoted) - summary(io, C); println(io) - println(io, "$(C.uplo) factor with rank $(rank(C)):") - show(io, mime, C.uplo == 'U' ? C.U : C.L) - println(io, "\npermutation:") - show(io, mime, C.p) -end - -ldiv!(C::Cholesky{T,<:StridedMatrix}, B::StridedVecOrMat{T}) where {T<:BlasFloat} = - LAPACK.potrs!(C.uplo, C.factors, B) - -function ldiv!(C::Cholesky, B::AbstractVecOrMat) - if C.uplo == 'L' - return ldiv!(adjoint(LowerTriangular(C.factors)), ldiv!(LowerTriangular(C.factors), B)) - else - return ldiv!(UpperTriangular(C.factors), ldiv!(adjoint(UpperTriangular(C.factors)), B)) - end -end - -function ldiv!(C::CholeskyPivoted{T,<:StridedMatrix}, B::StridedVector{T}) where T<:BlasFloat - invpermute!(LAPACK.potrs!(C.uplo, C.factors, permute!(B, C.piv)), C.piv) -end -function ldiv!(C::CholeskyPivoted{T,<:StridedMatrix}, B::StridedMatrix{T}) where T<:BlasFloat - n = size(C, 1) - for i=1:size(B, 2) - permute!(view(B, 1:n, i), C.piv) - end - LAPACK.potrs!(C.uplo, C.factors, B) - for i=1:size(B, 2) - invpermute!(view(B, 1:n, i), C.piv) - end - B -end - -function ldiv!(C::CholeskyPivoted, B::AbstractVector) - if C.uplo == 'L' - ldiv!(adjoint(LowerTriangular(C.factors)), - ldiv!(LowerTriangular(C.factors), permute!(B, C.piv))) - else - ldiv!(UpperTriangular(C.factors), - ldiv!(adjoint(UpperTriangular(C.factors)), permute!(B, C.piv))) - end - invpermute!(B, C.piv) -end - -function ldiv!(C::CholeskyPivoted, B::AbstractMatrix) - n = size(C, 1) - for i in 1:size(B, 2) - permute!(view(B, 1:n, i), C.piv) - end - if C.uplo == 'L' - ldiv!(adjoint(LowerTriangular(C.factors)), - ldiv!(LowerTriangular(C.factors), B)) - else - ldiv!(UpperTriangular(C.factors), - ldiv!(adjoint(UpperTriangular(C.factors)), B)) - end - for i in 1:size(B, 2) - invpermute!(view(B, 1:n, i), C.piv) - end - B -end - -function rdiv!(B::AbstractMatrix, C::Cholesky) - if C.uplo == 'L' - return rdiv!(rdiv!(B, adjoint(LowerTriangular(C.factors))), LowerTriangular(C.factors)) - else - return rdiv!(rdiv!(B, UpperTriangular(C.factors)), adjoint(UpperTriangular(C.factors))) - end -end - -function LinearAlgebra.rdiv!(B::AbstractMatrix, C::CholeskyPivoted) - n = size(C, 2) - for i in 1:size(B, 1) - permute!(view(B, i, 1:n), C.piv) - end - if C.uplo == 'L' - rdiv!(rdiv!(B, adjoint(LowerTriangular(C.factors))), - LowerTriangular(C.factors)) - else - rdiv!(rdiv!(B, UpperTriangular(C.factors)), - adjoint(UpperTriangular(C.factors))) - end - for i in 1:size(B, 1) - invpermute!(view(B, i, 1:n), C.piv) - end - B -end - -isposdef(C::Union{Cholesky,CholeskyPivoted}) = C.info == 0 - -function det(C::Cholesky) - dd = one(real(eltype(C))) - @inbounds for i in 1:size(C.factors,1) - dd *= real(C.factors[i,i])^2 - end - return dd -end - -function logdet(C::Cholesky) - dd = zero(real(eltype(C))) - @inbounds for i in 1:size(C.factors,1) - dd += log(real(C.factors[i,i])) - end - dd + dd # instead of 2.0dd which can change the type -end - -function det(C::CholeskyPivoted) - if C.rank < size(C.factors, 1) - return zero(real(eltype(C))) - else - dd = one(real(eltype(C))) - for i in 1:size(C.factors,1) - dd *= real(C.factors[i,i])^2 - end - return dd - end -end - -function logdet(C::CholeskyPivoted) - if C.rank < size(C.factors, 1) - return real(eltype(C))(-Inf) - else - dd = zero(real(eltype(C))) - for i in 1:size(C.factors,1) - dd += log(real(C.factors[i,i])) - end - return dd + dd # instead of 2.0dd which can change the type - end -end - -logabsdet(C::Union{Cholesky, CholeskyPivoted}) = logdet(C), one(eltype(C)) # since C is p.s.d. - -inv!(C::Cholesky{<:BlasFloat,<:StridedMatrix}) = - copytri!(LAPACK.potri!(C.uplo, C.factors), C.uplo, true) - -inv(C::Cholesky{<:BlasFloat,<:StridedMatrix}) = inv!(copy(C)) - -function inv(C::CholeskyPivoted{<:BlasFloat,<:StridedMatrix}) - ipiv = invperm(C.piv) - copytri!(LAPACK.potri!(C.uplo, copy(C.factors)), C.uplo, true)[ipiv, ipiv] -end - -function chkfullrank(C::CholeskyPivoted) - if C.rank < size(C.factors, 1) - throw(RankDeficientException(C.rank)) - end -end - -rank(C::CholeskyPivoted) = C.rank - -""" - lowrankupdate!(C::Cholesky, v::AbstractVector) -> CC::Cholesky - -Update a Cholesky factorization `C` with the vector `v`. If `A = C.U'C.U` then -`CC = cholesky(C.U'C.U + v*v')` but the computation of `CC` only uses `O(n^2)` -operations. The input factorization `C` is updated in place such that on exit `C == CC`. -The vector `v` is destroyed during the computation. -""" -function lowrankupdate!(C::Cholesky, v::AbstractVector) - A = C.factors - n = length(v) - if size(C, 1) != n - throw(DimensionMismatch("updating vector must fit size of factorization")) - end - if C.uplo == 'U' - conj!(v) - end - - for i = 1:n - - # Compute Givens rotation - c, s, r = givensAlgorithm(A[i,i], v[i]) - - # Store new diagonal element - A[i,i] = r - - # Update remaining elements in row/column - if C.uplo == 'U' - for j = i + 1:n - Aij = A[i,j] - vj = v[j] - A[i,j] = c*Aij + s*vj - v[j] = -s'*Aij + c*vj - end - else - for j = i + 1:n - Aji = A[j,i] - vj = v[j] - A[j,i] = c*Aji + s*vj - v[j] = -s'*Aji + c*vj - end - end - end - return C -end - -""" - lowrankdowndate!(C::Cholesky, v::AbstractVector) -> CC::Cholesky - -Downdate a Cholesky factorization `C` with the vector `v`. If `A = C.U'C.U` then -`CC = cholesky(C.U'C.U - v*v')` but the computation of `CC` only uses `O(n^2)` -operations. The input factorization `C` is updated in place such that on exit `C == CC`. -The vector `v` is destroyed during the computation. -""" -function lowrankdowndate!(C::Cholesky, v::AbstractVector) - A = C.factors - n = length(v) - if size(C, 1) != n - throw(DimensionMismatch("updating vector must fit size of factorization")) - end - if C.uplo == 'U' - conj!(v) - end - - for i = 1:n - - Aii = A[i,i] - - # Compute Givens rotation - s = conj(v[i]/Aii) - s2 = abs2(s) - if s2 > 1 - throw(LinearAlgebra.PosDefException(i)) - end - c = sqrt(1 - abs2(s)) - - # Store new diagonal element - A[i,i] = c*Aii - - # Update remaining elements in row/column - if C.uplo == 'U' - for j = i + 1:n - vj = v[j] - Aij = (A[i,j] - s*vj)/c - A[i,j] = Aij - v[j] = -s'*Aij + c*vj - end - else - for j = i + 1:n - vj = v[j] - Aji = (A[j,i] - s*vj)/c - A[j,i] = Aji - v[j] = -s'*Aji + c*vj - end - end - end - return C -end - -""" - lowrankupdate(C::Cholesky, v::AbstractVector) -> CC::Cholesky - -Update a Cholesky factorization `C` with the vector `v`. If `A = C.U'C.U` -then `CC = cholesky(C.U'C.U + v*v')` but the computation of `CC` only uses -`O(n^2)` operations. -""" -lowrankupdate(C::Cholesky, v::AbstractVector) = lowrankupdate!(copy(C), copy(v)) - -""" - lowrankdowndate(C::Cholesky, v::AbstractVector) -> CC::Cholesky - -Downdate a Cholesky factorization `C` with the vector `v`. If `A = C.U'C.U` -then `CC = cholesky(C.U'C.U - v*v')` but the computation of `CC` only uses -`O(n^2)` operations. -""" -lowrankdowndate(C::Cholesky, v::AbstractVector) = lowrankdowndate!(copy(C), copy(v)) - -function diag(C::Cholesky{T}, k::Int = 0) where {T} - N = size(C, 1) - absk = abs(k) - iabsk = N - absk - z = Vector{T}(undef, iabsk) - UL = C.factors - if C.uplo == 'U' - for i in 1:iabsk - z[i] = zero(T) - for j in 1:min(i, i+absk) - z[i] += UL[j, i]'UL[j, i+absk] - end - end - else - for i in 1:iabsk - z[i] = zero(T) - for j in 1:min(i, i+absk) - z[i] += UL[i, j]*UL[i+absk, j]' - end - end - end - if !(T <: Real) && k < 0 - z .= adjoint.(z) - end - return z -end diff --git a/stdlib/LinearAlgebra/src/dense.jl b/stdlib/LinearAlgebra/src/dense.jl deleted file mode 100644 index 5e47984120196..0000000000000 --- a/stdlib/LinearAlgebra/src/dense.jl +++ /dev/null @@ -1,1885 +0,0 @@ -# This file is a part of Julia. License is MIT: https://julialang.org/license - -# Linear algebra functions for dense matrices in column major format - -## BLAS cutoff threshold constants - -#TODO const DOT_CUTOFF = 128 -const ASUM_CUTOFF = 32 -const NRM2_CUTOFF = 32 - -# Generic cross-over constant based on benchmarking on a single thread with an i7 CPU @ 2.5GHz -# L1 cache: 32K, L2 cache: 256K, L3 cache: 6144K -# This constant should ideally be determined by the actual CPU cache size -const ISONE_CUTOFF = 2^21 # 2M - -function isone(A::AbstractMatrix) - require_one_based_indexing(A) # multiplication not defined yet among offset matrices - m, n = size(A) - m != n && return false # only square matrices can satisfy x == one(x) - if sizeof(A) < ISONE_CUTOFF - _isone_triacheck(A) - else - _isone_cachefriendly(A) - end -end - -@inline function _isone_triacheck(A::AbstractMatrix) - @inbounds for i in axes(A,2), j in axes(A,1) - if i == j - isone(A[i,i]) || return false - else - iszero(A[i,j]) && iszero(A[j,i]) || return false - end - end - return true -end - -# Inner loop over rows to be friendly to the CPU cache -@inline function _isone_cachefriendly(A::AbstractMatrix) - @inbounds for i in axes(A,2), j in axes(A,1) - if i == j - isone(A[i,i]) || return false - else - iszero(A[j,i]) || return false - end - end - return true -end - - -""" - isposdef!(A) -> Bool - -Test whether a matrix is positive definite (and Hermitian) by trying to perform a -Cholesky factorization of `A`, overwriting `A` in the process. -See also [`isposdef`](@ref). - -# Examples -```jldoctest -julia> A = [1. 2.; 2. 50.]; - -julia> isposdef!(A) -true - -julia> A -2×2 Matrix{Float64}: - 1.0 2.0 - 2.0 6.78233 -``` -""" -isposdef!(A::AbstractMatrix) = - ishermitian(A) && isposdef(cholesky!(Hermitian(A); check = false)) - -""" - isposdef(A) -> Bool - -Test whether a matrix is positive definite (and Hermitian) by trying to perform a -Cholesky factorization of `A`. - -See also [`isposdef!`](@ref), [`cholesky`](@ref). - -# Examples -```jldoctest -julia> A = [1 2; 2 50] -2×2 Matrix{Int64}: - 1 2 - 2 50 - -julia> isposdef(A) -true -``` -""" -isposdef(A::AbstractMatrix) = - ishermitian(A) && isposdef(cholesky(Hermitian(A); check = false)) -isposdef(x::Number) = imag(x)==0 && real(x) > 0 - -function norm(x::StridedVector{T}, rx::Union{UnitRange{TI},AbstractRange{TI}}) where {T<:BlasFloat,TI<:Integer} - if minimum(rx) < 1 || maximum(rx) > length(x) - throw(BoundsError(x, rx)) - end - GC.@preserve x BLAS.nrm2(length(rx), pointer(x)+(first(rx)-1)*sizeof(T), step(rx)) -end - -norm1(x::Union{Array{T},StridedVector{T}}) where {T<:BlasReal} = - length(x) < ASUM_CUTOFF ? generic_norm1(x) : BLAS.asum(x) - -norm2(x::Union{Array{T},StridedVector{T}}) where {T<:BlasFloat} = - length(x) < NRM2_CUTOFF ? generic_norm2(x) : BLAS.nrm2(x) - -# Conservative assessment of types that have zero(T) defined for themselves -""" - haszero(T::Type) - -Return whether a type `T` has a unique zero element defined using `zero(T)`. -If a type `M` specializes `zero(M)`, it may also choose to set `haszero(M)` to `true`. -By default, `haszero` is assumed to be `false`, in which case the zero elements -are deduced from values rather than the type. - -!!! note - `haszero` is a conservative check that is used to dispatch to - optimized paths. Extending it is optional, but encouraged. -""" -haszero(::Type) = false -haszero(::Type{T}) where {T<:Number} = isconcretetype(T) -haszero(::Type{Union{Missing,T}}) where {T<:Number} = haszero(T) -@propagate_inbounds _zero(M::AbstractArray{T}, inds...) where {T} = haszero(T) ? zero(T) : zero(M[inds...]) - -""" - triu!(M, k::Integer) - -Return the upper triangle of `M` starting from the `k`th superdiagonal, -overwriting `M` in the process. - -# Examples -```jldoctest -julia> M = [1 2 3 4 5; 1 2 3 4 5; 1 2 3 4 5; 1 2 3 4 5; 1 2 3 4 5] -5×5 Matrix{Int64}: - 1 2 3 4 5 - 1 2 3 4 5 - 1 2 3 4 5 - 1 2 3 4 5 - 1 2 3 4 5 - -julia> triu!(M, 1) -5×5 Matrix{Int64}: - 0 2 3 4 5 - 0 0 3 4 5 - 0 0 0 4 5 - 0 0 0 0 5 - 0 0 0 0 0 -``` -""" -function triu!(M::AbstractMatrix, k::Integer) - require_one_based_indexing(M) - m, n = size(M) - for j in 1:min(n, m + k) - for i in max(1, j - k + 1):m - @inbounds M[i,j] = _zero(M, i,j) - end - end - M -end - -triu(M::Matrix, k::Integer) = triu!(copy(M), k) - -""" - tril!(M, k::Integer) - -Return the lower triangle of `M` starting from the `k`th superdiagonal, overwriting `M` in -the process. - -# Examples -```jldoctest -julia> M = [1 2 3 4 5; 1 2 3 4 5; 1 2 3 4 5; 1 2 3 4 5; 1 2 3 4 5] -5×5 Matrix{Int64}: - 1 2 3 4 5 - 1 2 3 4 5 - 1 2 3 4 5 - 1 2 3 4 5 - 1 2 3 4 5 - -julia> tril!(M, 2) -5×5 Matrix{Int64}: - 1 2 3 0 0 - 1 2 3 4 0 - 1 2 3 4 5 - 1 2 3 4 5 - 1 2 3 4 5 -``` -""" -function tril!(M::AbstractMatrix, k::Integer) - require_one_based_indexing(M) - m, n = size(M) - for j in max(1, k + 1):n - for i in 1:min(j - k - 1, m) - @inbounds M[i,j] = _zero(M, i,j) - end - end - M -end - -tril(M::Matrix, k::Integer) = tril!(copy(M), k) - -""" - fillband!(A::AbstractMatrix, x, l, u) - -Fill the band between diagonals `l` and `u` with the value `x`. -""" -function fillband!(A::AbstractMatrix{T}, x, l, u) where T - require_one_based_indexing(A) - m, n = size(A) - xT = convert(T, x) - for j in axes(A,2) - for i in max(1,j-u):min(m,j-l) - @inbounds A[i, j] = xT - end - end - return A -end - -diagind(m::Integer, n::Integer, k::Integer=0) = diagind(IndexLinear(), m, n, k) -diagind(::IndexLinear, m::Integer, n::Integer, k::Integer=0) = - k <= 0 ? range(1-k, step=m+1, length=min(m+k, n)) : range(k*m+1, step=m+1, length=min(m, n-k)) - -function diagind(::IndexCartesian, m::Integer, n::Integer, k::Integer=0) - Cstart = CartesianIndex(1 + max(0,-k), 1 + max(0,k)) - Cstep = CartesianIndex(1, 1) - length = max(0, k <= 0 ? min(m+k, n) : min(m, n-k)) - StepRangeLen(Cstart, Cstep, length) -end - -""" - diagind(M::AbstractMatrix, k::Integer = 0, indstyle::IndexStyle = IndexLinear()) - diagind(M::AbstractMatrix, indstyle::IndexStyle = IndexLinear()) - -An `AbstractRange` giving the indices of the `k`th diagonal of the matrix `M`. -Optionally, an index style may be specified which determines the type of the range returned. -If `indstyle isa IndexLinear` (default), this returns an `AbstractRange{Integer}`. -On the other hand, if `indstyle isa IndexCartesian`, this returns an `AbstractRange{CartesianIndex{2}}`. - -If `k` is not provided, it is assumed to be `0` (corresponding to the main diagonal). - -See also: [`diag`](@ref), [`diagm`](@ref), [`Diagonal`](@ref). - -# Examples -```jldoctest -julia> A = [1 2 3; 4 5 6; 7 8 9] -3×3 Matrix{Int64}: - 1 2 3 - 4 5 6 - 7 8 9 - -julia> diagind(A, -1) -2:4:6 - -julia> diagind(A, IndexCartesian()) -StepRangeLen(CartesianIndex(1, 1), CartesianIndex(1, 1), 3) -``` - -!!! compat "Julia 1.11" - Specifying an `IndexStyle` requires at least Julia 1.11. -""" -function diagind(A::AbstractMatrix, k::Integer=0, indexstyle::IndexStyle = IndexLinear()) - require_one_based_indexing(A) - diagind(indexstyle, size(A,1), size(A,2), k) -end - -diagind(A::AbstractMatrix, indexstyle::IndexStyle) = diagind(A, 0, indexstyle) - -""" - diag(M, k::Integer=0) - -The `k`th diagonal of a matrix, as a vector. - -See also [`diagm`](@ref), [`diagind`](@ref), [`Diagonal`](@ref), [`isdiag`](@ref). - -# Examples -```jldoctest -julia> A = [1 2 3; 4 5 6; 7 8 9] -3×3 Matrix{Int64}: - 1 2 3 - 4 5 6 - 7 8 9 - -julia> diag(A,1) -2-element Vector{Int64}: - 2 - 6 -``` -""" -diag(A::AbstractMatrix, k::Integer=0) = A[diagind(A, k, IndexStyle(A))] - -""" - diagview(M, k::Integer=0) - -Return a view into the `k`th diagonal of the matrix `M`. - -See also [`diag`](@ref), [`diagind`](@ref). - -# Examples -```jldoctest -julia> A = [1 2 3; 4 5 6; 7 8 9] -3×3 Matrix{Int64}: - 1 2 3 - 4 5 6 - 7 8 9 - -julia> diagview(A) -3-element view(::Vector{Int64}, 1:4:9) with eltype Int64: - 1 - 5 - 9 - -julia> diagview(A, 1) -2-element view(::Vector{Int64}, 4:4:8) with eltype Int64: - 2 - 6 -``` -""" -diagview(A::AbstractMatrix, k::Integer=0) = @view A[diagind(A, k, IndexStyle(A))] - -""" - diagm(kv::Pair{<:Integer,<:AbstractVector}...) - diagm(m::Integer, n::Integer, kv::Pair{<:Integer,<:AbstractVector}...) - -Construct a matrix from `Pair`s of diagonals and vectors. -Vector `kv.second` will be placed on the `kv.first` diagonal. -By default the matrix is square and its size is inferred -from `kv`, but a non-square size `m`×`n` (padded with zeros as needed) -can be specified by passing `m,n` as the first arguments. -For repeated diagonal indices `kv.first` the values in the corresponding -vectors `kv.second` will be added. - -`diagm` constructs a full matrix; if you want storage-efficient -versions with fast arithmetic, see [`Diagonal`](@ref), [`Bidiagonal`](@ref) -[`Tridiagonal`](@ref) and [`SymTridiagonal`](@ref). - -# Examples -```jldoctest -julia> diagm(1 => [1,2,3]) -4×4 Matrix{Int64}: - 0 1 0 0 - 0 0 2 0 - 0 0 0 3 - 0 0 0 0 - -julia> diagm(1 => [1,2,3], -1 => [4,5]) -4×4 Matrix{Int64}: - 0 1 0 0 - 4 0 2 0 - 0 5 0 3 - 0 0 0 0 - -julia> diagm(1 => [1,2,3], 1 => [1,2,3]) -4×4 Matrix{Int64}: - 0 2 0 0 - 0 0 4 0 - 0 0 0 6 - 0 0 0 0 -``` -""" -diagm(kv::Pair{<:Integer,<:AbstractVector}...) = _diagm(nothing, kv...) -diagm(m::Integer, n::Integer, kv::Pair{<:Integer,<:AbstractVector}...) = _diagm((Int(m),Int(n)), kv...) -function _diagm(size, kv::Pair{<:Integer,<:AbstractVector}...) - A = diagm_container(size, kv...) - for p in kv - inds = diagind(A, p.first) - for (i, val) in enumerate(p.second) - A[inds[i]] += val - end - end - return A -end -function diagm_size(size::Nothing, kv::Pair{<:Integer,<:AbstractVector}...) - mnmax = mapreduce(x -> length(x.second) + abs(Int(x.first)), max, kv; init=0) - return mnmax, mnmax -end -function diagm_size(size::Tuple{Int,Int}, kv::Pair{<:Integer,<:AbstractVector}...) - mmax = mapreduce(x -> length(x.second) - min(0,Int(x.first)), max, kv; init=0) - nmax = mapreduce(x -> length(x.second) + max(0,Int(x.first)), max, kv; init=0) - m, n = size - (m ≥ mmax && n ≥ nmax) || throw(DimensionMismatch(lazy"invalid size=$size")) - return m, n -end -function diagm_container(size, kv::Pair{<:Integer,<:AbstractVector}...) - T = promote_type(map(x -> eltype(x.second), kv)...) - # For some type `T`, `zero(T)` is not a `T` and `zeros(T, ...)` fails. - U = promote_type(T, typeof(zero(T))) - return zeros(U, diagm_size(size, kv...)...) -end -diagm_container(size, kv::Pair{<:Integer,<:BitVector}...) = - falses(diagm_size(size, kv...)...) - -""" - diagm(v::AbstractVector) - diagm(m::Integer, n::Integer, v::AbstractVector) - -Construct a matrix with elements of the vector as diagonal elements. -By default, the matrix is square and its size is given by -`length(v)`, but a non-square size `m`×`n` can be specified -by passing `m,n` as the first arguments. - -# Examples -```jldoctest -julia> diagm([1,2,3]) -3×3 Matrix{Int64}: - 1 0 0 - 0 2 0 - 0 0 3 -``` -""" -diagm(v::AbstractVector) = diagm(0 => v) -diagm(m::Integer, n::Integer, v::AbstractVector) = diagm(m, n, 0 => v) - -function tr(A::StridedMatrix{T}) where T - checksquare(A) - isempty(A) && return zero(T) - reduce(+, (A[i] for i in diagind(A, IndexStyle(A)))) -end - -_kronsize(A::AbstractMatrix, B::AbstractMatrix) = map(*, size(A), size(B)) -_kronsize(A::AbstractMatrix, B::AbstractVector) = (size(A, 1)*length(B), size(A, 2)) -_kronsize(A::AbstractVector, B::AbstractMatrix) = (length(A)*size(B, 1), size(B, 2)) - -""" - kron!(C, A, B) - -Computes the Kronecker product of `A` and `B` and stores the result in `C`, -overwriting the existing content of `C`. This is the in-place version of [`kron`](@ref). - -!!! compat "Julia 1.6" - This function requires Julia 1.6 or later. -""" -function kron!(C::AbstractVecOrMat, A::AbstractVecOrMat, B::AbstractVecOrMat) - size(C) == _kronsize(A, B) || throw(DimensionMismatch("kron!")) - _kron!(C, A, B) -end -function kron!(c::AbstractVector, a::AbstractVector, b::AbstractVector) - length(c) == length(a) * length(b) || throw(DimensionMismatch("kron!")) - m = firstindex(c) - @inbounds for i in eachindex(a) - ai = a[i] - for k in eachindex(b) - c[m] = ai*b[k] - m += 1 - end - end - return c -end -kron!(c::AbstractVecOrMat, a::AbstractVecOrMat, b::Number) = mul!(c, a, b) -kron!(c::AbstractVecOrMat, a::Number, b::AbstractVecOrMat) = mul!(c, a, b) - -function _kron!(C, A::AbstractMatrix, B::AbstractMatrix) - m = firstindex(C) - @inbounds for j in axes(A,2), l in axes(B,2), i in axes(A,1) - Aij = A[i,j] - for k in axes(B,1) - C[m] = Aij*B[k,l] - m += 1 - end - end - return C -end -function _kron!(C, A::AbstractMatrix, b::AbstractVector) - m = firstindex(C) - @inbounds for j in axes(A,2), i in axes(A,1) - Aij = A[i,j] - for k in eachindex(b) - C[m] = Aij*b[k] - m += 1 - end - end - return C -end -function _kron!(C, a::AbstractVector, B::AbstractMatrix) - m = firstindex(C) - @inbounds for l in axes(B,2), i in eachindex(a) - ai = a[i] - for k in axes(B,1) - C[m] = ai*B[k,l] - m += 1 - end - end - return C -end - -""" - kron(A, B) - -Computes the Kronecker product of two vectors, matrices or numbers. - -For real vectors `v` and `w`, the Kronecker product is related to the outer product by -`kron(v,w) == vec(w * transpose(v))` or -`w * transpose(v) == reshape(kron(v,w), (length(w), length(v)))`. -Note how the ordering of `v` and `w` differs on the left and right -of these expressions (due to column-major storage). -For complex vectors, the outer product `w * v'` also differs by conjugation of `v`. - -# Examples -```jldoctest -julia> A = [1 2; 3 4] -2×2 Matrix{Int64}: - 1 2 - 3 4 - -julia> B = [im 1; 1 -im] -2×2 Matrix{Complex{Int64}}: - 0+1im 1+0im - 1+0im 0-1im - -julia> kron(A, B) -4×4 Matrix{Complex{Int64}}: - 0+1im 1+0im 0+2im 2+0im - 1+0im 0-1im 2+0im 0-2im - 0+3im 3+0im 0+4im 4+0im - 3+0im 0-3im 4+0im 0-4im - -julia> v = [1, 2]; w = [3, 4, 5]; - -julia> w*transpose(v) -3×2 Matrix{Int64}: - 3 6 - 4 8 - 5 10 - -julia> reshape(kron(v,w), (length(w), length(v))) -3×2 Matrix{Int64}: - 3 6 - 4 8 - 5 10 -``` -""" -function kron(A::AbstractVecOrMat{T}, B::AbstractVecOrMat{S}) where {T,S} - C = Matrix{promote_op(*,T,S)}(undef, _kronsize(A, B)) - return kron!(C, A, B) -end -function kron(a::AbstractVector{T}, b::AbstractVector{S}) where {T,S} - c = Vector{promote_op(*,T,S)}(undef, length(a)*length(b)) - return kron!(c, a, b) -end -kron(a::Number, b::Union{Number, AbstractVecOrMat}) = a * b -kron(a::AbstractVecOrMat, b::Number) = a * b -kron(a::AdjointAbsVec, b::AdjointAbsVec) = adjoint(kron(adjoint(a), adjoint(b))) -kron(a::AdjOrTransAbsVec, b::AdjOrTransAbsVec) = transpose(kron(transpose(a), transpose(b))) - -# Matrix power -(^)(A::AbstractMatrix, p::Integer) = p < 0 ? power_by_squaring(inv(A), -p) : power_by_squaring(A, p) -function (^)(A::AbstractMatrix{T}, p::Integer) where T<:Integer - # make sure that e.g. [1 1;1 0]^big(3) - # gets promotes in a similar way as 2^big(3) - TT = promote_op(^, T, typeof(p)) - return power_by_squaring(convert(AbstractMatrix{TT}, A), p) -end -function integerpow(A::AbstractMatrix{T}, p) where T - TT = promote_op(^, T, typeof(p)) - return (TT == T ? A : convert(AbstractMatrix{TT}, A))^Integer(p) -end -function schurpow(A::AbstractMatrix, p) - if istriu(A) - # Integer part - retmat = A ^ floor(Integer, p) - # Real part - if p - floor(p) == 0.5 - # special case: A^0.5 === sqrt(A) - retmat = retmat * sqrt(A) - else - retmat = retmat * powm!(UpperTriangular(float.(A)), real(p - floor(p))) - end - else - S,Q,d = Schur{Complex}(schur(A)) - # Integer part - R = S ^ floor(Integer, p) - # Real part - if p - floor(p) == 0.5 - # special case: A^0.5 === sqrt(A) - R = R * sqrt(S) - else - R = R * powm!(UpperTriangular(float.(S)), real(p - floor(p))) - end - retmat = Q * R * Q' - end - - # if A has nonpositive real eigenvalues, retmat is a nonprincipal matrix power. - if isreal(retmat) - return real(retmat) - else - return retmat - end -end -function (^)(A::AbstractMatrix{T}, p::Real) where T - checksquare(A) - # Quicker return if A is diagonal - if isdiag(A) - TT = promote_op(^, T, typeof(p)) - retmat = copymutable_oftype(A, TT) - for i in diagind(retmat, IndexStyle(retmat)) - retmat[i] = retmat[i] ^ p - end - return retmat - end - - # For integer powers, use power_by_squaring - isinteger(p) && return integerpow(A, p) - - # If possible, use diagonalization - if ishermitian(A) - return (Hermitian(A)^p) - end - - # Otherwise, use Schur decomposition - return schurpow(A, p) -end - -""" - ^(A::AbstractMatrix, p::Number) - -Matrix power, equivalent to ``\\exp(p\\log(A))`` - -# Examples -```jldoctest -julia> [1 2; 0 3]^3 -2×2 Matrix{Int64}: - 1 26 - 0 27 -``` -""" -(^)(A::AbstractMatrix, p::Number) = exp(p*log(A)) - -# Matrix exponential - -""" - exp(A::AbstractMatrix) - -Compute the matrix exponential of `A`, defined by - -```math -e^A = \\sum_{n=0}^{\\infty} \\frac{A^n}{n!}. -``` - -For symmetric or Hermitian `A`, an eigendecomposition ([`eigen`](@ref)) is -used, otherwise the scaling and squaring algorithm (see [^H05]) is chosen. - -[^H05]: Nicholas J. Higham, "The squaring and scaling method for the matrix exponential revisited", SIAM Journal on Matrix Analysis and Applications, 26(4), 2005, 1179-1193. [doi:10.1137/090768539](https://doi.org/10.1137/090768539) - -# Examples -```jldoctest -julia> A = Matrix(1.0I, 2, 2) -2×2 Matrix{Float64}: - 1.0 0.0 - 0.0 1.0 - -julia> exp(A) -2×2 Matrix{Float64}: - 2.71828 0.0 - 0.0 2.71828 -``` -""" -exp(A::AbstractMatrix) = exp!(copy_similar(A, eigtype(eltype(A)))) -exp(A::AdjointAbsMat) = adjoint(exp(parent(A))) -exp(A::TransposeAbsMat) = transpose(exp(parent(A))) - -""" - cis(A::AbstractMatrix) - -More efficient method for `exp(im*A)` of square matrix `A` -(especially if `A` is `Hermitian` or real-`Symmetric`). - -See also [`cispi`](@ref), [`sincos`](@ref), [`exp`](@ref). - -!!! compat "Julia 1.7" - Support for using `cis` with matrices was added in Julia 1.7. - -# Examples -```jldoctest -julia> cis([π 0; 0 π]) ≈ -I -true -``` -""" -cis(A::AbstractMatrix) = exp(im * A) # fallback -cis(A::AbstractMatrix{<:Base.HWNumber}) = exp_maybe_inplace(float.(im .* A)) - -exp_maybe_inplace(A::StridedMatrix{<:Union{ComplexF32, ComplexF64}}) = exp!(A) -exp_maybe_inplace(A) = exp(A) - -""" - ^(b::Number, A::AbstractMatrix) - -Matrix exponential, equivalent to ``\\exp(\\log(b)A)``. - -!!! compat "Julia 1.1" - Support for raising `Irrational` numbers (like `ℯ`) - to a matrix was added in Julia 1.1. - -# Examples -```jldoctest -julia> 2^[1 2; 0 3] -2×2 Matrix{Float64}: - 2.0 6.0 - 0.0 8.0 - -julia> ℯ^[1 2; 0 3] -2×2 Matrix{Float64}: - 2.71828 17.3673 - 0.0 20.0855 -``` -""" -Base.:^(b::Number, A::AbstractMatrix) = exp!(log(b)*A) -# method for ℯ to explicitly elide the log(b) multiplication -Base.:^(::Irrational{:ℯ}, A::AbstractMatrix) = exp(A) - -## Destructive matrix exponential using algorithm from Higham, 2008, -## "Functions of Matrices: Theory and Computation", SIAM -function exp!(A::StridedMatrix{T}) where T<:BlasFloat - n = checksquare(A) - if isdiag(A) - for i in diagind(A, IndexStyle(A)) - A[i] = exp(A[i]) - end - return A - elseif ishermitian(A) - return copytri!(parent(exp(Hermitian(A))), 'U', true) - end - ilo, ihi, scale = LAPACK.gebal!('B', A) # modifies A - nA = opnorm(A, 1) - ## For sufficiently small nA, use lower order Padé-Approximations - if (nA <= 2.1) - if nA > 0.95 - C = T[17643225600.,8821612800.,2075673600.,302702400., - 30270240., 2162160., 110880., 3960., - 90., 1.] - elseif nA > 0.25 - C = T[17297280.,8648640.,1995840.,277200., - 25200., 1512., 56., 1.] - elseif nA > 0.015 - C = T[30240.,15120.,3360., - 420., 30., 1.] - else - C = T[120.,60.,12.,1.] - end - A2 = A * A - # Compute U and V: Even/odd terms in Padé numerator & denom - # Expansion of k=1 in for loop - P = A2 - U = similar(P) - V = similar(P) - for ind in CartesianIndices(P) - U[ind] = C[4]*P[ind] + C[2]*I[ind] - V[ind] = C[3]*P[ind] + C[1]*I[ind] - end - for k in 2:(div(length(C), 2) - 1) - P *= A2 - for ind in eachindex(P, U, V) - U[ind] += C[2k + 2] * P[ind] - V[ind] += C[2k + 1] * P[ind] - end - end - - # U = A * U, but we overwrite P to avoid an allocation - mul!(P, A, U) - # P may be seen as an alias for U in the following code - - # Padé approximant: (V-U)\(V+U) - VminU, VplusU = V, U # Reuse already allocated arrays - for ind in eachindex(V, U) - vi, ui = V[ind], P[ind] - VminU[ind] = vi - ui - VplusU[ind] = vi + ui - end - X = LAPACK.gesv!(VminU, VplusU)[1] - else - s = log2(nA/5.4) # power of 2 later reversed by squaring - if s > 0 - si = ceil(Int,s) - twopowsi = convert(T,2^si) - for ind in eachindex(A) - A[ind] /= twopowsi - end - end - CC = T[64764752532480000.,32382376266240000.,7771770303897600., - 1187353796428800., 129060195264000., 10559470521600., - 670442572800., 33522128640., 1323241920., - 40840800., 960960., 16380., - 182., 1.] - A2 = A * A - A4 = A2 * A2 - A6 = A2 * A4 - tmp1, tmp2 = similar(A6), similar(A6) - - # Allocation economical version of: - # U = A * (A6 * (CC[14].*A6 .+ CC[12].*A4 .+ CC[10].*A2) .+ - # CC[8].*A6 .+ CC[6].*A4 .+ CC[4]*A2+CC[2]*I) - for ind in eachindex(tmp1) - tmp1[ind] = CC[14]*A6[ind] + CC[12]*A4[ind] + CC[10]*A2[ind] - tmp2[ind] = CC[8]*A6[ind] + CC[6]*A4[ind] + CC[4]*A2[ind] - end - mul!(tmp2, true,CC[2]*I, true, true) # tmp2 .+= CC[2]*I - U = mul!(tmp2, A6, tmp1, true, true) - U, tmp1 = mul!(tmp1, A, U), A # U = A * U0 - - # Allocation economical version of: - # V = A6 * (CC[13].*A6 .+ CC[11].*A4 .+ CC[9].*A2) .+ - # CC[7].*A6 .+ CC[5].*A4 .+ CC[3]*A2 .+ CC[1]*I - for ind in eachindex(tmp1) - tmp1[ind] = CC[13]*A6[ind] + CC[11]*A4[ind] + CC[9]*A2[ind] - tmp2[ind] = CC[7]*A6[ind] + CC[5]*A4[ind] + CC[3]*A2[ind] - end - mul!(tmp2, true, CC[1]*I, true, true) # tmp2 .+= CC[1]*I - V = mul!(tmp2, A6, tmp1, true, true) - - for ind in eachindex(tmp1) - tmp1[ind] = V[ind] + U[ind] - tmp2[ind] = V[ind] - U[ind] # tmp2 already contained V but this seems more readable - end - X = LAPACK.gesv!(tmp2, tmp1)[1] # X now contains r_13 in Higham 2008 - - if s > 0 - # Repeated squaring to compute X = r_13^(2^si) - for t=1:si - mul!(tmp2, X, X) - X, tmp2 = tmp2, X - end - end - end - - # Undo the balancing - for j = ilo:ihi - scj = scale[j] - for i = 1:n - X[j,i] *= scj - end - for i = 1:n - X[i,j] /= scj - end - end - - if ilo > 1 # apply lower permutations in reverse order - for j in (ilo-1):-1:1 - rcswap!(j, Int(scale[j]), X) - end - end - if ihi < n # apply upper permutations in forward order - for j in (ihi+1):n - rcswap!(j, Int(scale[j]), X) - end - end - X -end - -## Swap rows i and j and columns i and j in X -function rcswap!(i::Integer, j::Integer, X::AbstractMatrix{<:Number}) - for k = axes(X,1) - X[k,i], X[k,j] = X[k,j], X[k,i] - end - for k = axes(X,2) - X[i,k], X[j,k] = X[j,k], X[i,k] - end -end - -""" - log(A::AbstractMatrix) - -If `A` has no negative real eigenvalue, compute the principal matrix logarithm of `A`, i.e. -the unique matrix ``X`` such that ``e^X = A`` and ``-\\pi < Im(\\lambda) < \\pi`` for all -the eigenvalues ``\\lambda`` of ``X``. If `A` has nonpositive eigenvalues, a nonprincipal -matrix function is returned whenever possible. - -If `A` is symmetric or Hermitian, its eigendecomposition ([`eigen`](@ref)) is -used, if `A` is triangular an improved version of the inverse scaling and squaring method is -employed (see [^AH12] and [^AHR13]). If `A` is real with no negative eigenvalues, then -the real Schur form is computed. Otherwise, the complex Schur form is computed. Then -the upper (quasi-)triangular algorithm in [^AHR13] is used on the upper (quasi-)triangular -factor. - -[^AH12]: Awad H. Al-Mohy and Nicholas J. Higham, "Improved inverse scaling and squaring algorithms for the matrix logarithm", SIAM Journal on Scientific Computing, 34(4), 2012, C153-C169. [doi:10.1137/110852553](https://doi.org/10.1137/110852553) - -[^AHR13]: Awad H. Al-Mohy, Nicholas J. Higham and Samuel D. Relton, "Computing the Fréchet derivative of the matrix logarithm and estimating the condition number", SIAM Journal on Scientific Computing, 35(4), 2013, C394-C410. [doi:10.1137/120885991](https://doi.org/10.1137/120885991) - -# Examples -```jldoctest -julia> A = Matrix(2.7182818*I, 2, 2) -2×2 Matrix{Float64}: - 2.71828 0.0 - 0.0 2.71828 - -julia> log(A) -2×2 Matrix{Float64}: - 1.0 0.0 - 0.0 1.0 -``` -""" -function log(A::AbstractMatrix) - # If possible, use diagonalization - if ishermitian(A) - logHermA = log(Hermitian(A)) - return ishermitian(logHermA) ? copytri!(parent(logHermA), 'U', true) : parent(logHermA) - elseif istriu(A) - return triu!(parent(log(UpperTriangular(A)))) - elseif isreal(A) - SchurF = schur(real(A)) - if istriu(SchurF.T) - logA = SchurF.Z * log(UpperTriangular(SchurF.T)) * SchurF.Z' - else - # real log exists whenever all eigenvalues are positive - is_log_real = !any(x -> isreal(x) && real(x) ≤ 0, SchurF.values) - if is_log_real - logA = SchurF.Z * log_quasitriu(SchurF.T) * SchurF.Z' - else - SchurS = Schur{Complex}(SchurF) - logA = SchurS.Z * log(UpperTriangular(SchurS.T)) * SchurS.Z' - end - end - return eltype(A) <: Complex ? complex(logA) : logA - else - SchurF = schur(A) - return SchurF.vectors * log(UpperTriangular(SchurF.T)) * SchurF.vectors' - end -end - -log(A::AdjointAbsMat) = adjoint(log(parent(A))) -log(A::TransposeAbsMat) = transpose(log(parent(A))) - -""" - sqrt(A::AbstractMatrix) - -If `A` has no negative real eigenvalues, compute the principal matrix square root of `A`, -that is the unique matrix ``X`` with eigenvalues having positive real part such that -``X^2 = A``. Otherwise, a nonprincipal square root is returned. - -If `A` is real-symmetric or Hermitian, its eigendecomposition ([`eigen`](@ref)) is -used to compute the square root. For such matrices, eigenvalues λ that -appear to be slightly negative due to roundoff errors are treated as if they were zero. -More precisely, matrices with all eigenvalues `≥ -rtol*(max |λ|)` are treated as semidefinite -(yielding a Hermitian square root), with negative eigenvalues taken to be zero. -`rtol` is a keyword argument to `sqrt` (in the Hermitian/real-symmetric case only) that -defaults to machine precision scaled by `size(A,1)`. - -Otherwise, the square root is determined by means of the -Björck-Hammarling method [^BH83], which computes the complex Schur form ([`schur`](@ref)) -and then the complex square root of the triangular factor. -If a real square root exists, then an extension of this method [^H87] that computes the real -Schur form and then the real square root of the quasi-triangular factor is instead used. - -[^BH83]: - - Åke Björck and Sven Hammarling, "A Schur method for the square root of a matrix", - Linear Algebra and its Applications, 52-53, 1983, 127-140. - [doi:10.1016/0024-3795(83)80010-X](https://doi.org/10.1016/0024-3795(83)80010-X) - -[^H87]: - - Nicholas J. Higham, "Computing real square roots of a real matrix", - Linear Algebra and its Applications, 88-89, 1987, 405-430. - [doi:10.1016/0024-3795(87)90118-2](https://doi.org/10.1016/0024-3795(87)90118-2) - -# Examples -```jldoctest -julia> A = [4 0; 0 4] -2×2 Matrix{Int64}: - 4 0 - 0 4 - -julia> sqrt(A) -2×2 Matrix{Float64}: - 2.0 0.0 - 0.0 2.0 -``` -""" -sqrt(::AbstractMatrix) - -function sqrt(A::AbstractMatrix{T}) where {T<:Union{Real,Complex}} - if checksquare(A) == 0 - return copy(A) - elseif ishermitian(A) - sqrtHermA = sqrt(Hermitian(A)) - return ishermitian(sqrtHermA) ? copytri!(parent(sqrtHermA), 'U', true) : parent(sqrtHermA) - elseif istriu(A) - return triu!(parent(sqrt(UpperTriangular(A)))) - elseif isreal(A) - SchurF = schur(real(A)) - if istriu(SchurF.T) - sqrtA = SchurF.Z * sqrt(UpperTriangular(SchurF.T)) * SchurF.Z' - else - # real sqrt exists whenever no eigenvalues are negative - is_sqrt_real = !any(x -> isreal(x) && real(x) < 0, SchurF.values) - # sqrt_quasitriu uses LAPACK functions for non-triu inputs - if typeof(sqrt(zero(T))) <: BlasFloat && is_sqrt_real - sqrtA = SchurF.Z * sqrt_quasitriu(SchurF.T) * SchurF.Z' - else - SchurS = Schur{Complex}(SchurF) - sqrtA = SchurS.Z * sqrt(UpperTriangular(SchurS.T)) * SchurS.Z' - end - end - return eltype(A) <: Complex ? complex(sqrtA) : sqrtA - else - SchurF = schur(A) - return SchurF.vectors * sqrt(UpperTriangular(SchurF.T)) * SchurF.vectors' - end -end - -sqrt(A::AdjointAbsMat) = adjoint(sqrt(parent(A))) -sqrt(A::TransposeAbsMat) = transpose(sqrt(parent(A))) - -""" - cbrt(A::AbstractMatrix{<:Real}) - -Computes the real-valued cube root of a real-valued matrix `A`. If `T = cbrt(A)`, then -we have `T*T*T ≈ A`, see example given below. - -If `A` is symmetric, i.e., of type `HermOrSym{<:Real}`, then ([`eigen`](@ref)) is used to -find the cube root. Otherwise, a specialized version of the p-th root algorithm [^S03] is -utilized, which exploits the real-valued Schur decomposition ([`schur`](@ref)) -to compute the cube root. - -[^S03]: - - Matthew I. Smith, "A Schur Algorithm for Computing Matrix pth Roots", - SIAM Journal on Matrix Analysis and Applications, vol. 24, 2003, pp. 971–989. - [doi:10.1137/S0895479801392697](https://doi.org/10.1137/s0895479801392697) - -# Examples -```jldoctest -julia> A = [0.927524 -0.15857; -1.3677 -1.01172] -2×2 Matrix{Float64}: - 0.927524 -0.15857 - -1.3677 -1.01172 - -julia> T = cbrt(A) -2×2 Matrix{Float64}: - 0.910077 -0.151019 - -1.30257 -0.936818 - -julia> T*T*T ≈ A -true -``` -""" -function cbrt(A::AbstractMatrix{<:Real}) - if checksquare(A) == 0 - return copy(A) - elseif issymmetric(A) - return cbrt(Symmetric(A, :U)) - else - S = schur(A) - return S.Z * _cbrt_quasi_triu!(S.T) * S.Z' - end -end - -# Cube roots of adjoint and transpose matrices -cbrt(A::AdjointAbsMat) = adjoint(cbrt(parent(A))) -cbrt(A::TransposeAbsMat) = transpose(cbrt(parent(A))) - -function applydiagonal(f, A) - dinv = f(Diagonal(A)) - copyto!(similar(A, eltype(dinv)), dinv) -end - -function inv(A::StridedMatrix{T}) where T - checksquare(A) - if isdiag(A) - Ai = applydiagonal(inv, A) - elseif istriu(A) - Ai = triu!(parent(inv(UpperTriangular(A)))) - elseif istril(A) - Ai = tril!(parent(inv(LowerTriangular(A)))) - else - Ai = inv!(lu(A)) - Ai = convert(typeof(parent(Ai)), Ai) - end - return Ai -end - -# helper function to perform a broadcast in-place if the destination is strided -# otherwise, this performs an out-of-place broadcast -@inline _broadcast!!(f, dest::StridedArray, args...) = broadcast!(f, dest, args...) -@inline _broadcast!!(f, dest, args...) = broadcast(f, args...) - -""" - cos(A::AbstractMatrix) - -Compute the matrix cosine of a square matrix `A`. - -If `A` is symmetric or Hermitian, its eigendecomposition ([`eigen`](@ref)) is used to -compute the cosine. Otherwise, the cosine is determined by calling [`exp`](@ref). - -# Examples -```jldoctest -julia> cos(fill(1.0, (2,2))) -2×2 Matrix{Float64}: - 0.291927 -0.708073 - -0.708073 0.291927 -``` -""" -function cos(A::AbstractMatrix{<:Real}) - if isdiag(A) - return applydiagonal(cos, A) - elseif issymmetric(A) - return copytri!(parent(cos(Symmetric(A))), 'U') - end - M = im .* float.(A) - return real(exp_maybe_inplace(M)) -end -function cos(A::AbstractMatrix{<:Complex}) - if isdiag(A) - return applydiagonal(cos, A) - elseif ishermitian(A) - return copytri!(parent(cos(Hermitian(A))), 'U', true) - end - M = im .* float.(A) - N = -M - X = exp_maybe_inplace(M) - Y = exp_maybe_inplace(N) - # Compute (X + Y)/2 and return the result. - # Compute the result in-place if X is strided - _broadcast!!((x,y) -> (x + y)/2, X, X, Y) -end - -""" - sin(A::AbstractMatrix) - -Compute the matrix sine of a square matrix `A`. - -If `A` is symmetric or Hermitian, its eigendecomposition ([`eigen`](@ref)) is used to -compute the sine. Otherwise, the sine is determined by calling [`exp`](@ref). - -# Examples -```jldoctest -julia> sin(fill(1.0, (2,2))) -2×2 Matrix{Float64}: - 0.454649 0.454649 - 0.454649 0.454649 -``` -""" -function sin(A::AbstractMatrix{<:Real}) - if isdiag(A) - return applydiagonal(sin, A) - elseif issymmetric(A) - return copytri!(parent(sin(Symmetric(A))), 'U') - end - M = im .* float.(A) - return imag(exp_maybe_inplace(M)) -end -function sin(A::AbstractMatrix{<:Complex}) - if isdiag(A) - return applydiagonal(sin, A) - elseif ishermitian(A) - return copytri!(parent(sin(Hermitian(A))), 'U', true) - end - M = im .* float.(A) - Mneg = -M - X = exp_maybe_inplace(M) - Y = exp_maybe_inplace(Mneg) - # Compute (X - Y)/2im and return the result. - # Compute the result in-place if X is strided - _broadcast!!((x,y) -> (x - y)/2im, X, X, Y) -end - -""" - sincos(A::AbstractMatrix) - -Compute the matrix sine and cosine of a square matrix `A`. - -# Examples -```jldoctest -julia> S, C = sincos(fill(1.0, (2,2))); - -julia> S -2×2 Matrix{Float64}: - 0.454649 0.454649 - 0.454649 0.454649 - -julia> C -2×2 Matrix{Float64}: - 0.291927 -0.708073 - -0.708073 0.291927 -``` -""" -function sincos(A::AbstractMatrix{<:Real}) - if issymmetric(A) - symsinA, symcosA = sincos(Symmetric(A)) - sinA = copytri!(parent(symsinA), 'U') - cosA = copytri!(parent(symcosA), 'U') - return sinA, cosA - end - M = im .* float.(A) - c, s = reim(exp_maybe_inplace(M)) - return s, c -end -function sincos(A::AbstractMatrix{<:Complex}) - if ishermitian(A) - hermsinA, hermcosA = sincos(Hermitian(A)) - sinA = copytri!(parent(hermsinA), 'U', true) - cosA = copytri!(parent(hermcosA), 'U', true) - return sinA, cosA - end - M = im .* float.(A) - Mneg = -M - X = exp_maybe_inplace(M) - Y = exp_maybe_inplace(Mneg) - _sincos(X, Y) -end -function _sincos(X::StridedMatrix, Y::StridedMatrix) - @inbounds for i in eachindex(X, Y) - x, y = X[i]/2, Y[i]/2 - X[i] = Complex(imag(x)-imag(y), real(y)-real(x)) - Y[i] = x+y - end - return X, Y -end -function _sincos(X, Y) - T = eltype(X) - S = T(0.5)*im .* (Y .- X) - C = T(0.5) .* (X .+ Y) - S, C -end - -""" - tan(A::AbstractMatrix) - -Compute the matrix tangent of a square matrix `A`. - -If `A` is symmetric or Hermitian, its eigendecomposition ([`eigen`](@ref)) is used to -compute the tangent. Otherwise, the tangent is determined by calling [`exp`](@ref). - -# Examples -```jldoctest -julia> tan(fill(1.0, (2,2))) -2×2 Matrix{Float64}: - -1.09252 -1.09252 - -1.09252 -1.09252 -``` -""" -function tan(A::AbstractMatrix) - if isdiag(A) - return applydiagonal(tan, A) - elseif ishermitian(A) - return copytri!(parent(tan(Hermitian(A))), 'U', true) - end - S, C = sincos(A) - S /= C - return S -end - -""" - cosh(A::AbstractMatrix) - -Compute the matrix hyperbolic cosine of a square matrix `A`. -""" -function cosh(A::AbstractMatrix) - if isdiag(A) - return applydiagonal(cosh, A) - elseif ishermitian(A) - return copytri!(parent(cosh(Hermitian(A))), 'U', true) - end - X = exp(A) - negA = @. float(-A) - Y = exp_maybe_inplace(negA) - _broadcast!!((x,y) -> (x + y)/2, X, X, Y) -end - -""" - sinh(A::AbstractMatrix) - -Compute the matrix hyperbolic sine of a square matrix `A`. -""" -function sinh(A::AbstractMatrix) - if isdiag(A) - return applydiagonal(sinh, A) - elseif ishermitian(A) - return copytri!(parent(sinh(Hermitian(A))), 'U', true) - end - X = exp(A) - negA = @. float(-A) - Y = exp_maybe_inplace(negA) - _broadcast!!((x,y) -> (x - y)/2, X, X, Y) -end - -""" - tanh(A::AbstractMatrix) - -Compute the matrix hyperbolic tangent of a square matrix `A`. -""" -function tanh(A::AbstractMatrix) - if isdiag(A) - return applydiagonal(tanh, A) - elseif ishermitian(A) - return copytri!(parent(tanh(Hermitian(A))), 'U', true) - end - X = exp(A) - negA = @. float(-A) - Y = exp_maybe_inplace(negA) - X′, Y′ = _subadd!!(X, Y) - return X′ / Y′ -end -function _subadd!!(X::StridedMatrix, Y::StridedMatrix) - @inbounds for i in eachindex(X, Y) - x, y = X[i], Y[i] - X[i] = x - y - Y[i] = x + y - end - return X, Y -end -_subadd!!(X, Y) = X - Y, X + Y - -""" - acos(A::AbstractMatrix) - -Compute the inverse matrix cosine of a square matrix `A`. - -If `A` is symmetric or Hermitian, its eigendecomposition ([`eigen`](@ref)) is used to -compute the inverse cosine. Otherwise, the inverse cosine is determined by using -[`log`](@ref) and [`sqrt`](@ref). For the theory and logarithmic formulas used to compute -this function, see [^AH16_1]. - -[^AH16_1]: Mary Aprahamian and Nicholas J. Higham, "Matrix Inverse Trigonometric and Inverse Hyperbolic Functions: Theory and Algorithms", MIMS EPrint: 2016.4. [https://doi.org/10.1137/16M1057577](https://doi.org/10.1137/16M1057577) - -# Examples -```julia-repl -julia> acos(cos([0.5 0.1; -0.2 0.3])) -2×2 Matrix{ComplexF64}: - 0.5-8.32667e-17im 0.1+0.0im - -0.2+2.63678e-16im 0.3-3.46945e-16im -``` -""" -function acos(A::AbstractMatrix) - if isdiag(A) - return applydiagonal(acos, A) - elseif ishermitian(A) - acosHermA = acos(Hermitian(A)) - return isa(acosHermA, Hermitian) ? copytri!(parent(acosHermA), 'U', true) : parent(acosHermA) - end - SchurF = Schur{Complex}(schur(A)) - U = UpperTriangular(SchurF.T) - R = triu!(parent(-im * log(U + im * sqrt(I - U^2)))) - return SchurF.Z * R * SchurF.Z' -end - -""" - asin(A::AbstractMatrix) - -Compute the inverse matrix sine of a square matrix `A`. - -If `A` is symmetric or Hermitian, its eigendecomposition ([`eigen`](@ref)) is used to -compute the inverse sine. Otherwise, the inverse sine is determined by using [`log`](@ref) -and [`sqrt`](@ref). For the theory and logarithmic formulas used to compute this function, -see [^AH16_2]. - -[^AH16_2]: Mary Aprahamian and Nicholas J. Higham, "Matrix Inverse Trigonometric and Inverse Hyperbolic Functions: Theory and Algorithms", MIMS EPrint: 2016.4. [https://doi.org/10.1137/16M1057577](https://doi.org/10.1137/16M1057577) - -# Examples -```julia-repl -julia> asin(sin([0.5 0.1; -0.2 0.3])) -2×2 Matrix{ComplexF64}: - 0.5-4.16334e-17im 0.1-5.55112e-17im - -0.2+9.71445e-17im 0.3-1.249e-16im -``` -""" -function asin(A::AbstractMatrix) - if isdiag(A) - return applydiagonal(asin, A) - elseif ishermitian(A) - asinHermA = asin(Hermitian(A)) - return isa(asinHermA, Hermitian) ? copytri!(parent(asinHermA), 'U', true) : parent(asinHermA) - end - SchurF = Schur{Complex}(schur(A)) - U = UpperTriangular(SchurF.T) - R = triu!(parent(-im * log(im * U + sqrt(I - U^2)))) - return SchurF.Z * R * SchurF.Z' -end - -""" - atan(A::AbstractMatrix) - -Compute the inverse matrix tangent of a square matrix `A`. - -If `A` is symmetric or Hermitian, its eigendecomposition ([`eigen`](@ref)) is used to -compute the inverse tangent. Otherwise, the inverse tangent is determined by using -[`log`](@ref). For the theory and logarithmic formulas used to compute this function, see -[^AH16_3]. - -[^AH16_3]: Mary Aprahamian and Nicholas J. Higham, "Matrix Inverse Trigonometric and Inverse Hyperbolic Functions: Theory and Algorithms", MIMS EPrint: 2016.4. [https://doi.org/10.1137/16M1057577](https://doi.org/10.1137/16M1057577) - -# Examples -```julia-repl -julia> atan(tan([0.5 0.1; -0.2 0.3])) -2×2 Matrix{ComplexF64}: - 0.5+1.38778e-17im 0.1-2.77556e-17im - -0.2+6.93889e-17im 0.3-4.16334e-17im -``` -""" -function atan(A::AbstractMatrix) - if isdiag(A) - return applydiagonal(atan, A) - elseif ishermitian(A) - return copytri!(parent(atan(Hermitian(A))), 'U', true) - end - SchurF = Schur{Complex}(schur(A)) - U = im * UpperTriangular(SchurF.T) - R = triu!(parent(log((I + U) / (I - U)) / 2im)) - return SchurF.Z * R * SchurF.Z' -end - -""" - acosh(A::AbstractMatrix) - -Compute the inverse hyperbolic matrix cosine of a square matrix `A`. For the theory and -logarithmic formulas used to compute this function, see [^AH16_4]. - -[^AH16_4]: Mary Aprahamian and Nicholas J. Higham, "Matrix Inverse Trigonometric and Inverse Hyperbolic Functions: Theory and Algorithms", MIMS EPrint: 2016.4. [https://doi.org/10.1137/16M1057577](https://doi.org/10.1137/16M1057577) -""" -function acosh(A::AbstractMatrix) - if isdiag(A) - return applydiagonal(acosh, A) - elseif ishermitian(A) - acoshHermA = acosh(Hermitian(A)) - return isa(acoshHermA, Hermitian) ? copytri!(parent(acoshHermA), 'U', true) : parent(acoshHermA) - end - SchurF = Schur{Complex}(schur(A)) - U = UpperTriangular(SchurF.T) - R = triu!(parent(log(U + sqrt(U - I) * sqrt(U + I)))) - return SchurF.Z * R * SchurF.Z' -end - -""" - asinh(A::AbstractMatrix) - -Compute the inverse hyperbolic matrix sine of a square matrix `A`. For the theory and -logarithmic formulas used to compute this function, see [^AH16_5]. - -[^AH16_5]: Mary Aprahamian and Nicholas J. Higham, "Matrix Inverse Trigonometric and Inverse Hyperbolic Functions: Theory and Algorithms", MIMS EPrint: 2016.4. [https://doi.org/10.1137/16M1057577](https://doi.org/10.1137/16M1057577) -""" -function asinh(A::AbstractMatrix) - if isdiag(A) - return applydiagonal(asinh, A) - elseif ishermitian(A) - return copytri!(parent(asinh(Hermitian(A))), 'U', true) - end - SchurF = Schur{Complex}(schur(A)) - U = UpperTriangular(SchurF.T) - R = triu!(parent(log(U + sqrt(I + U^2)))) - return SchurF.Z * R * SchurF.Z' -end - -""" - atanh(A::AbstractMatrix) - -Compute the inverse hyperbolic matrix tangent of a square matrix `A`. For the theory and -logarithmic formulas used to compute this function, see [^AH16_6]. - -[^AH16_6]: Mary Aprahamian and Nicholas J. Higham, "Matrix Inverse Trigonometric and Inverse Hyperbolic Functions: Theory and Algorithms", MIMS EPrint: 2016.4. [https://doi.org/10.1137/16M1057577](https://doi.org/10.1137/16M1057577) -""" -function atanh(A::AbstractMatrix) - if isdiag(A) - return applydiagonal(atanh, A) - elseif ishermitian(A) - return copytri!(parent(atanh(Hermitian(A))), 'U', true) - end - SchurF = Schur{Complex}(schur(A)) - U = UpperTriangular(SchurF.T) - R = triu!(parent(log((I + U) / (I - U)) / 2)) - return SchurF.Z * R * SchurF.Z' -end - -for (finv, f, finvh, fh, fn) in ((:sec, :cos, :sech, :cosh, "secant"), - (:csc, :sin, :csch, :sinh, "cosecant"), - (:cot, :tan, :coth, :tanh, "cotangent")) - name = string(finv) - hname = string(finvh) - @eval begin - @doc """ - $($name)(A::AbstractMatrix) - - Compute the matrix $($fn) of a square matrix `A`. - """ ($finv)(A::AbstractMatrix{T}) where {T} = inv(($f)(A)) - @doc """ - $($hname)(A::AbstractMatrix) - - Compute the matrix hyperbolic $($fn) of square matrix `A`. - """ ($finvh)(A::AbstractMatrix{T}) where {T} = inv(($fh)(A)) - end -end - -for (tfa, tfainv, hfa, hfainv, fn) in ((:asec, :acos, :asech, :acosh, "secant"), - (:acsc, :asin, :acsch, :asinh, "cosecant"), - (:acot, :atan, :acoth, :atanh, "cotangent")) - tname = string(tfa) - hname = string(hfa) - @eval begin - @doc """ - $($tname)(A::AbstractMatrix) - Compute the inverse matrix $($fn) of `A`. """ ($tfa)(A::AbstractMatrix{T}) where {T} = ($tfainv)(inv(A)) - @doc """ - $($hname)(A::AbstractMatrix) - Compute the inverse matrix hyperbolic $($fn) of `A`. """ ($hfa)(A::AbstractMatrix{T}) where {T} = ($hfainv)(inv(A)) - end -end - -""" - factorize(A) - -Compute a convenient factorization of `A`, based upon the type of the input matrix. -If `A` is passed as a generic matrix, `factorize` checks to see if it is -symmetric/triangular/etc. To this end, `factorize` may check every element of `A` to -verify/rule out each property. It will short-circuit as soon as it can rule out -symmetry/triangular structure. The return value can be reused for efficient solving -of multiple systems. For example: `A=factorize(A); x=A\\b; y=A\\C`. - -| Properties of `A` | type of factorization | -|:---------------------------|:-----------------------------------------------| -| Dense Symmetric/Hermitian | Bunch-Kaufman (see [`bunchkaufman`](@ref)) | -| Sparse Symmetric/Hermitian | LDLt (see [`ldlt`](@ref)) | -| Triangular | Triangular | -| Diagonal | Diagonal | -| Bidiagonal | Bidiagonal | -| Tridiagonal | LU (see [`lu`](@ref)) | -| Symmetric real tridiagonal | LDLt (see [`ldlt`](@ref)) | -| General square | LU (see [`lu`](@ref)) | -| General non-square | QR (see [`qr`](@ref)) | - -# Examples -```jldoctest -julia> A = Array(Bidiagonal(fill(1.0, (5, 5)), :U)) -5×5 Matrix{Float64}: - 1.0 1.0 0.0 0.0 0.0 - 0.0 1.0 1.0 0.0 0.0 - 0.0 0.0 1.0 1.0 0.0 - 0.0 0.0 0.0 1.0 1.0 - 0.0 0.0 0.0 0.0 1.0 - -julia> factorize(A) # factorize will check to see that A is already factorized -5×5 Bidiagonal{Float64, Vector{Float64}}: - 1.0 1.0 ⋅ ⋅ ⋅ - ⋅ 1.0 1.0 ⋅ ⋅ - ⋅ ⋅ 1.0 1.0 ⋅ - ⋅ ⋅ ⋅ 1.0 1.0 - ⋅ ⋅ ⋅ ⋅ 1.0 -``` - -This returns a `5×5 Bidiagonal{Float64}`, which can now be passed to other linear algebra -functions (e.g. eigensolvers) which will use specialized methods for `Bidiagonal` types. -""" -function factorize(A::AbstractMatrix{T}) where T - m, n = size(A) - if m == n - if m == 1 return A[1] end - utri = true - utri1 = true - herm = true - sym = true - for j = 1:n-1, i = j+1:m - if utri1 - if A[i,j] != 0 - utri1 = i == j + 1 - utri = false - end - end - if sym - sym &= A[i,j] == A[j,i] - end - if herm - herm &= A[i,j] == conj(A[j,i]) - end - if !(utri1|herm|sym) break end - end - ltri = true - ltri1 = true - for j = 3:n, i = 1:j-2 - ltri1 &= A[i,j] == 0 - if !ltri1 break end - end - if ltri1 - for i = 1:n-1 - if A[i,i+1] != 0 - ltri &= false - break - end - end - if ltri - if utri - return Diagonal(A) - end - if utri1 - return Bidiagonal(diag(A), diag(A, -1), :L) - end - return LowerTriangular(A) - end - if utri - return Bidiagonal(diag(A), diag(A, 1), :U) - end - if utri1 - # TODO: enable once a specialized, non-dense bunchkaufman method exists - # if (herm & (T <: Complex)) | sym - # return bunchkaufman(SymTridiagonal(diag(A), diag(A, -1))) - # end - return lu(Tridiagonal(diag(A, -1), diag(A), diag(A, 1))) - end - end - if utri - return UpperTriangular(A) - end - if herm - return factorize(Hermitian(A)) - end - if sym - return factorize(Symmetric(A)) - end - return lu(A) - end - qr(A, ColumnNorm()) -end -factorize(A::Adjoint) = adjoint(factorize(parent(A))) -factorize(A::Transpose) = transpose(factorize(parent(A))) -factorize(a::Number) = a # same as how factorize behaves on Diagonal types - -## Moore-Penrose pseudoinverse - -""" - pinv(M; atol::Real=0, rtol::Real=atol>0 ? 0 : n*ϵ) - pinv(M, rtol::Real) = pinv(M; rtol=rtol) # to be deprecated in Julia 2.0 - -Computes the Moore-Penrose pseudoinverse. - -For matrices `M` with floating point elements, it is convenient to compute -the pseudoinverse by inverting only singular values greater than -`max(atol, rtol*σ₁)` where `σ₁` is the largest singular value of `M`. - -The optimal choice of absolute (`atol`) and relative tolerance (`rtol`) varies -both with the value of `M` and the intended application of the pseudoinverse. -The default relative tolerance is `n*ϵ`, where `n` is the size of the smallest -dimension of `M`, and `ϵ` is the [`eps`](@ref) of the element type of `M`. - -For inverting dense ill-conditioned matrices in a least-squares sense, -`rtol = sqrt(eps(real(float(oneunit(eltype(M))))))` is recommended. - -For more information, see [^issue8859], [^B96], [^S84], [^KY88]. - -# Examples -```jldoctest -julia> M = [1.5 1.3; 1.2 1.9] -2×2 Matrix{Float64}: - 1.5 1.3 - 1.2 1.9 - -julia> N = pinv(M) -2×2 Matrix{Float64}: - 1.47287 -1.00775 - -0.930233 1.16279 - -julia> M * N -2×2 Matrix{Float64}: - 1.0 -2.22045e-16 - 4.44089e-16 1.0 -``` - -[^issue8859]: Issue 8859, "Fix least squares", [https://github.com/JuliaLang/julia/pull/8859](https://github.com/JuliaLang/julia/pull/8859) - -[^B96]: Åke Björck, "Numerical Methods for Least Squares Problems", SIAM Press, Philadelphia, 1996, "Other Titles in Applied Mathematics", Vol. 51. [doi:10.1137/1.9781611971484](http://epubs.siam.org/doi/book/10.1137/1.9781611971484) - -[^S84]: G. W. Stewart, "Rank Degeneracy", SIAM Journal on Scientific and Statistical Computing, 5(2), 1984, 403-413. [doi:10.1137/0905030](http://epubs.siam.org/doi/abs/10.1137/0905030) - -[^KY88]: Konstantinos Konstantinides and Kung Yao, "Statistical analysis of effective singular values in matrix rank determination", IEEE Transactions on Acoustics, Speech and Signal Processing, 36(5), 1988, 757-763. [doi:10.1109/29.1585](https://doi.org/10.1109/29.1585) -""" -function pinv(A::AbstractMatrix{T}; atol::Real = 0.0, rtol::Real = (eps(real(float(oneunit(T))))*min(size(A)...))*iszero(atol)) where T - m, n = size(A) - Tout = typeof(zero(T)/sqrt(oneunit(T) + oneunit(T))) - if m == 0 || n == 0 - return similar(A, Tout, (n, m)) - end - if isdiag(A) - dA = diagview(A) - maxabsA = maximum(abs, dA) - tol = max(rtol * maxabsA, atol) - B = fill!(similar(A, Tout, (n, m)), 0) - diagview(B) .= (x -> abs(x) > tol ? pinv(x) : zero(x)).(dA) - return B - end - SVD = svd(A) - tol2 = max(rtol*maximum(SVD.S), atol) - Stype = eltype(SVD.S) - Sinv = fill!(similar(A, Stype, length(SVD.S)), 0) - index = SVD.S .> tol2 - Sinv[index] .= pinv.(view(SVD.S, index)) - return SVD.Vt' * (Diagonal(Sinv) * SVD.U') -end -function pinv(x::Number) - xi = inv(x) - return ifelse(isfinite(xi), xi, zero(xi)) -end - -## Basis for null space - -""" - nullspace(M; atol::Real=0, rtol::Real=atol>0 ? 0 : n*ϵ) - nullspace(M, rtol::Real) = nullspace(M; rtol=rtol) # to be deprecated in Julia 2.0 - -Computes a basis for the nullspace of `M` by including the singular -vectors of `M` whose singular values have magnitudes smaller than `max(atol, rtol*σ₁)`, -where `σ₁` is `M`'s largest singular value. - -By default, the relative tolerance `rtol` is `n*ϵ`, where `n` -is the size of the smallest dimension of `M`, and `ϵ` is the [`eps`](@ref) of -the element type of `M`. - -# Examples -```jldoctest -julia> M = [1 0 0; 0 1 0; 0 0 0] -3×3 Matrix{Int64}: - 1 0 0 - 0 1 0 - 0 0 0 - -julia> nullspace(M) -3×1 Matrix{Float64}: - 0.0 - 0.0 - 1.0 - -julia> nullspace(M, rtol=3) -3×3 Matrix{Float64}: - 0.0 1.0 0.0 - 1.0 0.0 0.0 - 0.0 0.0 1.0 - -julia> nullspace(M, atol=0.95) -3×1 Matrix{Float64}: - 0.0 - 0.0 - 1.0 -``` -""" -function nullspace(A::AbstractVecOrMat; atol::Real = 0.0, rtol::Real = (min(size(A, 1), size(A, 2))*eps(real(float(oneunit(eltype(A))))))*iszero(atol)) - m, n = size(A, 1), size(A, 2) - (m == 0 || n == 0) && return Matrix{eigtype(eltype(A))}(I, n, n) - SVD = svd(A; full=true) - tol = max(atol, SVD.S[1]*rtol) - indstart = sum(s -> s .> tol, SVD.S) + 1 - return copy((@view SVD.Vt[indstart:end,:])') -end - -""" - cond(M, p::Real=2) - -Condition number of the matrix `M`, computed using the operator `p`-norm. Valid values for -`p` are `1`, `2` (default), or `Inf`. -""" -function cond(A::AbstractMatrix, p::Real=2) - if p == 2 - v = svdvals(A) - maxv = maximum(v) - return iszero(maxv) ? oftype(real(maxv), Inf) : maxv / minimum(v) - elseif p == 1 || p == Inf - checksquare(A) - try - Ainv = inv(A) - return opnorm(A, p)*opnorm(Ainv, p) - catch e - if isa(e, LAPACKException) || isa(e, SingularException) - return convert(float(real(eltype(A))), Inf) - else - rethrow() - end - end - end - throw(ArgumentError(lazy"p-norm must be 1, 2 or Inf, got $p")) -end - -## Lyapunov and Sylvester equation - -# AX + XB + C = 0 - -""" - sylvester(A, B, C) - -Computes the solution `X` to the Sylvester equation `AX + XB + C = 0`, where `A`, `B` and -`C` have compatible dimensions and `A` and `-B` have no eigenvalues with equal real part. - -# Examples -```jldoctest -julia> A = [3. 4.; 5. 6] -2×2 Matrix{Float64}: - 3.0 4.0 - 5.0 6.0 - -julia> B = [1. 1.; 1. 2.] -2×2 Matrix{Float64}: - 1.0 1.0 - 1.0 2.0 - -julia> C = [1. 2.; -2. 1] -2×2 Matrix{Float64}: - 1.0 2.0 - -2.0 1.0 - -julia> X = sylvester(A, B, C) -2×2 Matrix{Float64}: - -4.46667 1.93333 - 3.73333 -1.8 - -julia> A*X + X*B ≈ -C -true -``` -""" -function sylvester(A::AbstractMatrix, B::AbstractMatrix, C::AbstractMatrix) - T = promote_type(float(eltype(A)), float(eltype(B)), float(eltype(C))) - return sylvester(copy_similar(A, T), copy_similar(B, T), copy_similar(C, T)) -end -function sylvester(A::AbstractMatrix{T}, B::AbstractMatrix{T}, C::AbstractMatrix{T}) where {T<:BlasFloat} - RA, QA = schur(A) - RB, QB = schur(B) - D = QA' * C * QB - D .= .-D - Y, scale = LAPACK.trsyl!('N', 'N', RA, RB, D) - rmul!(QA * Y * QB', inv(scale)) -end - -Base.@propagate_inbounds function _sylvester_2x1!(A, B, C) - b = B[1] - a21, a12 = A[2, 1], A[1, 2] - m11 = b + A[1, 1] - m22 = b + A[2, 2] - d = m11 * m22 - a12 * a21 - c1, c2 = C - C[1] = (a12 * c2 - m22 * c1) / d - C[2] = (a21 * c1 - m11 * c2) / d - return C -end -Base.@propagate_inbounds function _sylvester_1x2!(A, B, C) - a = A[1] - b21, b12 = B[2, 1], B[1, 2] - m11 = a + B[1, 1] - m22 = a + B[2, 2] - d = m11 * m22 - b21 * b12 - c1, c2 = C - C[1] = (b21 * c2 - m22 * c1) / d - C[2] = (b12 * c1 - m11 * c2) / d - return C -end -function _sylvester_2x2!(A, B, C) - _, scale = LAPACK.trsyl!('N', 'N', A, B, C) - rmul!(C, -inv(scale)) - return C -end - -sylvester(a::Union{Real,Complex}, b::Union{Real,Complex}, c::Union{Real,Complex}) = -c / (a + b) - -# AX + XA' + C = 0 - -""" - lyap(A, C) - -Computes the solution `X` to the continuous Lyapunov equation `AX + XA' + C = 0`, where no -eigenvalue of `A` has a zero real part and no two eigenvalues are negative complex -conjugates of each other. - -# Examples -```jldoctest -julia> A = [3. 4.; 5. 6] -2×2 Matrix{Float64}: - 3.0 4.0 - 5.0 6.0 - -julia> B = [1. 1.; 1. 2.] -2×2 Matrix{Float64}: - 1.0 1.0 - 1.0 2.0 - -julia> X = lyap(A, B) -2×2 Matrix{Float64}: - 0.5 -0.5 - -0.5 0.25 - -julia> A*X + X*A' ≈ -B -true -``` -""" -function lyap(A::AbstractMatrix, C::AbstractMatrix) - T = promote_type(float(eltype(A)), float(eltype(C))) - return lyap(copy_similar(A, T), copy_similar(C, T)) -end -function lyap(A::AbstractMatrix{T}, C::AbstractMatrix{T}) where {T<:BlasFloat} - R, Q = schur(A) - D = Q' * C * Q - D .= .-D - Y, scale = LAPACK.trsyl!('N', T <: Complex ? 'C' : 'T', R, R, D) - rmul!(Q * Y * Q', inv(scale)) -end -lyap(a::Union{Real,Complex}, c::Union{Real,Complex}) = -c/(2real(a)) diff --git a/stdlib/LinearAlgebra/src/deprecated.jl b/stdlib/LinearAlgebra/src/deprecated.jl deleted file mode 100644 index 28c090634a2d8..0000000000000 --- a/stdlib/LinearAlgebra/src/deprecated.jl +++ /dev/null @@ -1,7 +0,0 @@ -# This file is a part of Julia. License is MIT: https://julialang.org/license - -# To be deprecated in 2.0 -rank(A::AbstractMatrix, tol::Real) = rank(A,rtol=tol) -nullspace(A::AbstractVector, tol::Real) = nullspace(reshape(A, length(A), 1), rtol= tol) -nullspace(A::AbstractMatrix, tol::Real) = nullspace(A, rtol=tol) -pinv(A::AbstractMatrix{T}, tol::Real) where T = pinv(A, rtol=tol) diff --git a/stdlib/LinearAlgebra/src/diagonal.jl b/stdlib/LinearAlgebra/src/diagonal.jl deleted file mode 100644 index 243df4d82eec2..0000000000000 --- a/stdlib/LinearAlgebra/src/diagonal.jl +++ /dev/null @@ -1,1148 +0,0 @@ -# This file is a part of Julia. License is MIT: https://julialang.org/license - -## Diagonal matrices - -struct Diagonal{T,V<:AbstractVector{T}} <: AbstractMatrix{T} - diag::V - - function Diagonal{T,V}(diag) where {T,V<:AbstractVector{T}} - require_one_based_indexing(diag) - new{T,V}(diag) - end -end -Diagonal(v::AbstractVector{T}) where {T} = Diagonal{T,typeof(v)}(v) -Diagonal{T}(v::AbstractVector) where {T} = Diagonal(convert(AbstractVector{T}, v)::AbstractVector{T}) - -function Base.promote_rule(A::Type{<:Diagonal{<:Any,V}}, B::Type{<:Diagonal{<:Any,W}}) where {V,W} - X = promote_type(V, W) - T = eltype(X) - isconcretetype(T) && return Diagonal{T,X} - return typejoin(A, B) -end - -""" - Diagonal(V::AbstractVector) - -Construct a lazy matrix with `V` as its diagonal. - -See also [`UniformScaling`](@ref) for the lazy identity matrix `I`, -[`diagm`](@ref) to make a dense matrix, and [`diag`](@ref) to extract diagonal elements. - -# Examples -```jldoctest -julia> d = Diagonal([1, 10, 100]) -3×3 Diagonal{$Int, Vector{$Int}}: - 1 ⋅ ⋅ - ⋅ 10 ⋅ - ⋅ ⋅ 100 - -julia> diagm([7, 13]) -2×2 Matrix{$Int}: - 7 0 - 0 13 - -julia> ans + I -2×2 Matrix{Int64}: - 8 0 - 0 14 - -julia> I(2) -2×2 Diagonal{Bool, Vector{Bool}}: - 1 ⋅ - ⋅ 1 -``` - -!!! note - A one-column matrix is not treated like a vector, but instead calls the - method `Diagonal(A::AbstractMatrix)` which extracts 1-element `diag(A)`: - -```jldoctest -julia> A = transpose([7.0 13.0]) -2×1 transpose(::Matrix{Float64}) with eltype Float64: - 7.0 - 13.0 - -julia> Diagonal(A) -1×1 Diagonal{Float64, Vector{Float64}}: - 7.0 -``` -""" -Diagonal(V::AbstractVector) - -""" - Diagonal(A::AbstractMatrix) - -Construct a matrix from the principal diagonal of `A`. -The input matrix `A` may be rectangular, but the output will -be square. - -# Examples -```jldoctest -julia> A = [1 2; 3 4] -2×2 Matrix{Int64}: - 1 2 - 3 4 - -julia> D = Diagonal(A) -2×2 Diagonal{Int64, Vector{Int64}}: - 1 ⋅ - ⋅ 4 - -julia> A = [1 2 3; 4 5 6] -2×3 Matrix{Int64}: - 1 2 3 - 4 5 6 - -julia> Diagonal(A) -2×2 Diagonal{Int64, Vector{Int64}}: - 1 ⋅ - ⋅ 5 -``` -""" -Diagonal(A::AbstractMatrix) = Diagonal(diag(A)) -Diagonal{T}(A::AbstractMatrix) where T = Diagonal{T}(diag(A)) -Diagonal{T,V}(A::AbstractMatrix) where {T,V<:AbstractVector{T}} = Diagonal{T,V}(diag(A)) -function convert(::Type{T}, A::AbstractMatrix) where T<:Diagonal - checksquare(A) - isdiag(A) ? T(A) : throw(InexactError(:convert, T, A)) -end - -Diagonal(D::Diagonal) = D -Diagonal{T}(D::Diagonal{T}) where {T} = D -Diagonal{T}(D::Diagonal) where {T} = Diagonal{T}(D.diag) - -AbstractMatrix{T}(D::Diagonal) where {T} = Diagonal{T}(D) -AbstractMatrix{T}(D::Diagonal{T}) where {T} = copy(D) -Matrix(D::Diagonal{T}) where {T} = Matrix{promote_type(T, typeof(zero(T)))}(D) -Matrix(D::Diagonal{Any}) = Matrix{Any}(D) -Array(D::Diagonal{T}) where {T} = Matrix(D) -function Matrix{T}(D::Diagonal) where {T} - B = Matrix{T}(undef, size(D)) - if haszero(T) # optimized path for types with zero(T) defined - size(B,1) > 1 && fill!(B, zero(T)) - copyto!(diagview(B), D.diag) - else - copyto!(B, D) - end - return B -end - -""" - Diagonal{T}(undef, n) - -Construct an uninitialized `Diagonal{T}` of length `n`. See `undef`. -""" -Diagonal{T}(::UndefInitializer, n::Integer) where T = Diagonal(Vector{T}(undef, n)) - -similar(D::Diagonal, ::Type{T}) where {T} = Diagonal(similar(D.diag, T)) -similar(D::Diagonal, ::Type{T}, dims::Union{Dims{1},Dims{2}}) where {T} = similar(D.diag, T, dims) - -# copyto! for matching axes -_copyto_banded!(D1::Diagonal, D2::Diagonal) = (copyto!(D1.diag, D2.diag); D1) - -size(D::Diagonal) = (n = length(D.diag); (n,n)) - -axes(D::Diagonal) = (ax = axes(D.diag, 1); (ax, ax)) - -@inline function Base.isassigned(D::Diagonal, i::Int, j::Int) - @boundscheck checkbounds(Bool, D, i, j) || return false - if i == j - @inbounds r = isassigned(D.diag, i) - else - r = true - end - r -end - -@inline function Base.isstored(D::Diagonal, i::Int, j::Int) - @boundscheck checkbounds(D, i, j) - if i == j - @inbounds r = Base.isstored(D.diag, i) - else - r = false - end - r -end - -function Base.minimum(D::Diagonal{T}) where T <: Number - mindiag = minimum(D.diag) - size(D, 1) > 1 && return (min(zero(T), mindiag)) - return mindiag -end - -function Base.maximum(D::Diagonal{T}) where T <: Number - maxdiag = Base.maximum(D.diag) - size(D, 1) > 1 && return (max(zero(T), maxdiag)) - return maxdiag -end - -@inline function getindex(D::Diagonal, i::Int, j::Int) - @boundscheck checkbounds(D, i, j) - if i == j - @inbounds r = D.diag[i] - else - r = diagzero(D, i, j) - end - r -end -""" - diagzero(A::AbstractMatrix, i, j) - -Return the appropriate zero element `A[i, j]` corresponding to a banded matrix `A`. -""" -diagzero(A::AbstractMatrix, i, j) = zero(eltype(A)) -diagzero(A::AbstractMatrix{M}, i, j) where {M<:AbstractMatrix} = - zeroslike(M, axes(A[i,i], 1), axes(A[j,j], 2)) -diagzero(A::AbstractMatrix, inds...) = diagzero(A, to_indices(A, inds)...) -# dispatching on the axes permits specializing on the axis types to return something other than an Array -zeroslike(M::Type, ax::Vararg{Union{AbstractUnitRange, Integer}}) = zeroslike(M, ax) -""" - zeroslike(::Type{M}, ax::Tuple{AbstractUnitRange, Vararg{AbstractUnitRange}}) where {M<:AbstractMatrix} - zeroslike(::Type{M}, sz::Tuple{Integer, Vararg{Integer}}) where {M<:AbstractMatrix} - -Return an appropriate zero-ed array similar to `M`, with either the axes `ax` or the size `sz`. -This will be used as a structural zero element of a matrix-valued banded matrix. -By default, `zeroslike` falls back to using the size along each axis to construct the array. -""" -zeroslike(M::Type, ax::Tuple{AbstractUnitRange, Vararg{AbstractUnitRange}}) = zeroslike(M, map(length, ax)) -zeroslike(M::Type, sz::Tuple{Integer, Vararg{Integer}}) = zeros(M, sz) -zeroslike(::Type{M}, sz::Tuple{Integer, Vararg{Integer}}) where {M<:AbstractMatrix} = zeros(eltype(M), sz) - -@inline function getindex(D::Diagonal, b::BandIndex) - @boundscheck checkbounds(D, b) - if b.band == 0 - @inbounds r = D.diag[b.index] - else - r = diagzero(D, Tuple(_cartinds(b))...) - end - r -end - -function setindex!(D::Diagonal, v, i::Int, j::Int) - @boundscheck checkbounds(D, i, j) - if i == j - @inbounds D.diag[i] = v - elseif !iszero(v) - throw(ArgumentError(lazy"cannot set off-diagonal entry ($i, $j) to a nonzero value ($v)")) - end - return D -end - - -## structured matrix methods ## -function Base.replace_in_print_matrix(A::Diagonal,i::Integer,j::Integer,s::AbstractString) - i==j ? s : Base.replace_with_centered_mark(s) -end -function Base.show(io::IO, A::Diagonal) - print(io, "Diagonal(") - show(io, A.diag) - print(io, ")") -end - -parent(D::Diagonal) = D.diag - -copy(D::Diagonal) = Diagonal(copy(D.diag)) - -Base._reverse(A::Diagonal, dims) = reverse!(Matrix(A); dims) -Base._reverse(A::Diagonal, ::Colon) = Diagonal(reverse(A.diag)) -Base._reverse!(A::Diagonal, ::Colon) = (reverse!(A.diag); A) - -ishermitian(D::Diagonal{<:Number}) = isreal(D.diag) -ishermitian(D::Diagonal) = all(ishermitian, D.diag) -issymmetric(D::Diagonal{<:Number}) = true -issymmetric(D::Diagonal) = all(issymmetric, D.diag) -isposdef(D::Diagonal) = all(isposdef, D.diag) - -factorize(D::Diagonal) = D - -real(D::Diagonal) = Diagonal(real(D.diag)) -imag(D::Diagonal) = Diagonal(imag(D.diag)) - -iszero(D::Diagonal) = all(iszero, D.diag) -isone(D::Diagonal) = all(isone, D.diag) -isdiag(D::Diagonal) = all(isdiag, D.diag) -isdiag(D::Diagonal{<:Number}) = true -Base.@constprop :aggressive istriu(D::Diagonal, k::Integer=0) = k <= 0 || iszero(D.diag) ? true : false -Base.@constprop :aggressive istril(D::Diagonal, k::Integer=0) = k >= 0 || iszero(D.diag) ? true : false -function triu!(D::Diagonal{T}, k::Integer=0) where T - n = size(D,1) - if !(-n + 1 <= k <= n + 1) - throw(ArgumentError(string("the requested diagonal, $k, must be at least ", - "$(-n + 1) and at most $(n + 1) in an $n-by-$n matrix"))) - elseif k > 0 - fill!(D.diag, zero(T)) - end - return D -end - -function tril!(D::Diagonal{T}, k::Integer=0) where T - n = size(D,1) - if !(-n - 1 <= k <= n - 1) - throw(ArgumentError(LazyString(lazy"the requested diagonal, $k, must be at least ", - lazy"$(-n - 1) and at most $(n - 1) in an $n-by-$n matrix"))) - elseif k < 0 - fill!(D.diag, zero(T)) - end - return D -end - -(==)(Da::Diagonal, Db::Diagonal) = Da.diag == Db.diag -(-)(A::Diagonal) = Diagonal(-A.diag) -(+)(Da::Diagonal, Db::Diagonal) = Diagonal(Da.diag + Db.diag) -(-)(Da::Diagonal, Db::Diagonal) = Diagonal(Da.diag - Db.diag) - -(*)(x::Number, D::Diagonal) = Diagonal(x * D.diag) -(*)(D::Diagonal, x::Number) = Diagonal(D.diag * x) -function lmul!(x::Number, D::Diagonal) - if size(D,1) > 1 - # ensure that zeros are preserved on scaling - y = D[2,1] * x - iszero(y) || throw(ArgumentError(LazyString("cannot set index (2, 1) off ", - lazy"the tridiagonal band to a nonzero value ($y)"))) - end - @. D.diag = x * D.diag - return D -end -function rmul!(D::Diagonal, x::Number) - if size(D,1) > 1 - # ensure that zeros are preserved on scaling - y = x * D[2,1] - iszero(y) || throw(ArgumentError(LazyString("cannot set index (2, 1) off ", - lazy"the tridiagonal band to a nonzero value ($y)"))) - end - @. D.diag *= x - return D -end -(/)(D::Diagonal, x::Number) = Diagonal(D.diag / x) -(\)(x::Number, D::Diagonal) = Diagonal(x \ D.diag) -(^)(D::Diagonal, a::Number) = Diagonal(D.diag .^ a) -(^)(D::Diagonal, a::Real) = Diagonal(D.diag .^ a) # for disambiguation -(^)(D::Diagonal, a::Integer) = Diagonal(D.diag .^ a) # for disambiguation -Base.literal_pow(::typeof(^), D::Diagonal, valp::Val) = - Diagonal(Base.literal_pow.(^, D.diag, valp)) # for speed -Base.literal_pow(::typeof(^), D::Diagonal, ::Val{-1}) = inv(D) # for disambiguation - -function _muldiag_size_check(szA::NTuple{2,Integer}, szB::Tuple{Integer,Vararg{Integer}}) - nA = szA[2] - mB = szB[1] - @noinline throw_dimerr(szB::NTuple{2}, nA, mB) = throw(DimensionMismatch(lazy"second dimension of A, $nA, does not match first dimension of B, $mB")) - @noinline throw_dimerr(szB::NTuple{1}, nA, mB) = throw(DimensionMismatch(lazy"second dimension of D, $nA, does not match length of V, $mB")) - nA == mB || throw_dimerr(szB, nA, mB) - return nothing -end -# the output matrix should have the same size as the non-diagonal input matrix or vector -@noinline throw_dimerr(szC, szA) = throw(DimensionMismatch(lazy"output matrix has size: $szC, but should have size $szA")) -function _size_check_out(szC::NTuple{2}, szA::NTuple{2}, szB::NTuple{2}) - (szC[1] == szA[1] && szC[2] == szB[2]) || throw_dimerr(szC, (szA[1], szB[2])) -end -function _size_check_out(szC::NTuple{1}, szA::NTuple{2}, szB::NTuple{1}) - szC[1] == szA[1] || throw_dimerr(szC, (szA[1],)) -end -function _muldiag_size_check(szC::Tuple{Vararg{Integer}}, szA::Tuple{Vararg{Integer}}, szB::Tuple{Vararg{Integer}}) - _muldiag_size_check(szA, szB) - _size_check_out(szC, szA, szB) -end - -function (*)(Da::Diagonal, Db::Diagonal) - _muldiag_size_check(size(Da), size(Db)) - return Diagonal(Da.diag .* Db.diag) -end - -function (*)(D::Diagonal, V::AbstractVector) - _muldiag_size_check(size(D), size(V)) - return D.diag .* V -end - -function rmul!(A::AbstractMatrix, D::Diagonal) - _muldiag_size_check(size(A), size(D)) - for I in CartesianIndices(A) - row, col = Tuple(I) - @inbounds A[row, col] *= D.diag[col] - end - return A -end -# T .= T * D -function rmul!(T::Tridiagonal, D::Diagonal) - _muldiag_size_check(size(T), size(D)) - (; dl, d, du) = T - d[1] *= D.diag[1] - for i in axes(dl,1) - dl[i] *= D.diag[i] - du[i] *= D.diag[i+1] - d[i+1] *= D.diag[i+1] - end - return T -end - -function lmul!(D::Diagonal, B::AbstractVecOrMat) - _muldiag_size_check(size(D), size(B)) - for I in CartesianIndices(B) - row = I[1] - @inbounds B[I] = D.diag[row] * B[I] - end - return B -end - -# in-place multiplication with a diagonal -# T .= D * T -function lmul!(D::Diagonal, T::Tridiagonal) - _muldiag_size_check(size(D), size(T)) - (; dl, d, du) = T - d[1] = D.diag[1] * d[1] - for i in axes(dl,1) - dl[i] = D.diag[i+1] * dl[i] - du[i] = D.diag[i] * du[i] - d[i+1] = D.diag[i+1] * d[i+1] - end - return T -end - -@inline function __muldiag_nonzeroalpha!(out, D::Diagonal, B, alpha::Number, beta::Number) - @inbounds for j in axes(B, 2) - @simd for i in axes(B, 1) - @stable_muladdmul _modify!(MulAddMul(alpha,beta), D.diag[i] * B[i,j], out, (i,j)) - end - end - return out -end -_has_matching_zeros(out::UpperOrUnitUpperTriangular, A::UpperOrUnitUpperTriangular) = true -_has_matching_zeros(out::LowerOrUnitLowerTriangular, A::LowerOrUnitLowerTriangular) = true -_has_matching_zeros(out, A) = false -function _rowrange_tri_stored(B::UpperOrUnitUpperTriangular, col) - isunit = B isa UnitUpperTriangular - 1:min(col-isunit, size(B,1)) -end -function _rowrange_tri_stored(B::LowerOrUnitLowerTriangular, col) - isunit = B isa UnitLowerTriangular - col+isunit:size(B,1) -end -_rowrange_tri_zeros(B::UpperOrUnitUpperTriangular, col) = col+1:size(B,1) -_rowrange_tri_zeros(B::LowerOrUnitLowerTriangular, col) = 1:col-1 -function __muldiag_nonzeroalpha!(out, D::Diagonal, B::UpperOrLowerTriangular, alpha::Number, beta::Number) - isunit = B isa UnitUpperOrUnitLowerTriangular - out_maybeparent, B_maybeparent = _has_matching_zeros(out, B) ? (parent(out), parent(B)) : (out, B) - for j in axes(B, 2) - # store the diagonal separately for unit triangular matrices - if isunit - @inbounds @stable_muladdmul _modify!(MulAddMul(alpha,beta), D.diag[j] * B[j,j], out, (j,j)) - end - # The indices of out corresponding to the stored indices of B - rowrange = _rowrange_tri_stored(B, j) - @inbounds @simd for i in rowrange - @stable_muladdmul _modify!(MulAddMul(alpha,beta), D.diag[i] * B_maybeparent[i,j], out_maybeparent, (i,j)) - end - # Fill the indices of out corresponding to the zeros of B - # we only fill these if out and B don't have matching zeros - if !_has_matching_zeros(out, B) - rowrange = _rowrange_tri_zeros(B, j) - @inbounds @simd for i in rowrange - @stable_muladdmul _modify!(MulAddMul(alpha,beta), D.diag[i] * B[i,j], out, (i,j)) - end - end - end - return out -end - -@inline function __muldiag_nonzeroalpha_right!(out, A, D::Diagonal, alpha::Number, beta::Number) - @inbounds for j in axes(A, 2) - dja = @stable_muladdmul MulAddMul(alpha,false)(D.diag[j]) - @simd for i in axes(A, 1) - @stable_muladdmul _modify!(MulAddMul(true,beta), A[i,j] * dja, out, (i,j)) - end - end - return out -end - -function __muldiag_nonzeroalpha!(out, A, D::Diagonal, alpha::Number, beta::Number) - __muldiag_nonzeroalpha_right!(out, A, D, alpha, beta) -end -function __muldiag_nonzeroalpha!(out, A::UpperOrLowerTriangular, D::Diagonal, alpha::Number, beta::Number) - isunit = A isa UnitUpperOrUnitLowerTriangular - # if both A and out have the same upper/lower triangular structure, - # we may directly read and write from the parents - out_maybeparent, A_maybeparent = _has_matching_zeros(out, A) ? (parent(out), parent(A)) : (out, A) - for j in axes(A, 2) - dja = @stable_muladdmul MulAddMul(alpha,false)(@inbounds D.diag[j]) - # store the diagonal separately for unit triangular matrices - if isunit - # since alpha is multiplied to the diagonal element of D, - # we may skip alpha in the second multiplication by setting ais1 to true - @inbounds @stable_muladdmul _modify!(MulAddMul(true,beta), A[j,j] * dja, out, (j,j)) - end - # indices of out corresponding to the stored indices of A - rowrange = _rowrange_tri_stored(A, j) - @inbounds @simd for i in rowrange - # since alpha is multiplied to the diagonal element of D, - # we may skip alpha in the second multiplication by setting ais1 to true - @stable_muladdmul _modify!(MulAddMul(true,beta), A_maybeparent[i,j] * dja, out_maybeparent, (i,j)) - end - # Fill the indices of out corresponding to the zeros of A - # we only fill these if out and A don't have matching zeros - if !_has_matching_zeros(out, A) - rowrange = _rowrange_tri_zeros(A, j) - @inbounds @simd for i in rowrange - @stable_muladdmul _modify!(MulAddMul(true,beta), A[i,j] * dja, out, (i,j)) - end - end - end - return out -end - -# ambiguity resolution -function __muldiag_nonzeroalpha!(out, D1::Diagonal, D2::Diagonal, alpha::Number, beta::Number) - __muldiag_nonzeroalpha_right!(out, D1, D2, alpha, beta) -end - -@inline function __muldiag_nonzeroalpha!(out::Diagonal, D1::Diagonal, D2::Diagonal, alpha::Number, beta::Number) - d1 = D1.diag - d2 = D2.diag - outd = out.diag - @inbounds @simd for i in eachindex(d1, d2, outd) - @stable_muladdmul _modify!(MulAddMul(alpha,beta), d1[i] * d2[i], outd, i) - end - return out -end - -# muldiag handles the zero-alpha case, so that we need only -# specialize the non-trivial case -function _mul_diag!(out, A, B, alpha, beta) - require_one_based_indexing(out, A, B) - _muldiag_size_check(size(out), size(A), size(B)) - if iszero(alpha) - _rmul_or_fill!(out, beta) - else - __muldiag_nonzeroalpha!(out, A, B, alpha, beta) - end - return out -end - -_mul!(out::AbstractVector, D::Diagonal, V::AbstractVector, alpha::Number, beta::Number) = - _mul_diag!(out, D, V, alpha, beta) -_mul!(out::AbstractMatrix, D::Diagonal, V::AbstractVector, alpha::Number, beta::Number) = - _mul_diag!(out, D, V, alpha, beta) -for MT in (:AbstractMatrix, :AbstractTriangular) - @eval begin - _mul!(out::AbstractMatrix, D::Diagonal, B::$MT, alpha::Number, beta::Number) = - _mul_diag!(out, D, B, alpha, beta) - _mul!(out::AbstractMatrix, A::$MT, D::Diagonal, alpha::Number, beta::Number) = - _mul_diag!(out, A, D, alpha, beta) - end -end -_mul!(C::AbstractMatrix, Da::Diagonal, Db::Diagonal, alpha::Number, beta::Number) = - _mul_diag!(C, Da, Db, alpha, beta) - -function (*)(Da::Diagonal, A::AbstractMatrix, Db::Diagonal) - _muldiag_size_check(size(Da), size(A)) - _muldiag_size_check(size(A), size(Db)) - return broadcast(*, Da.diag, A, permutedims(Db.diag)) -end - -function (*)(Da::Diagonal, Db::Diagonal, Dc::Diagonal) - _muldiag_size_check(size(Da), size(Db)) - _muldiag_size_check(size(Db), size(Dc)) - return Diagonal(Da.diag .* Db.diag .* Dc.diag) -end - -/(A::AbstractVecOrMat, D::Diagonal) = _rdiv!(matprod_dest(A, D, promote_op(/, eltype(A), eltype(D))), A, D) - -rdiv!(A::AbstractVecOrMat, D::Diagonal) = @inline _rdiv!(A, A, D) -# avoid copy when possible via internal 3-arg backend -function _rdiv!(B::AbstractVecOrMat, A::AbstractVecOrMat, D::Diagonal) - require_one_based_indexing(A) - dd = D.diag - m, n = size(A, 1), size(A, 2) - if (k = length(dd)) != n - throw(DimensionMismatch(lazy"left hand side has $n columns but D is $k by $k")) - end - @inbounds for j in 1:n - ddj = dd[j] - iszero(ddj) && throw(SingularException(j)) - for i in 1:m - B[i, j] = A[i, j] / ddj - end - end - B -end - -function \(D::Diagonal, B::AbstractVector) - j = findfirst(iszero, D.diag) - isnothing(j) || throw(SingularException(j)) - return D.diag .\ B -end -\(D::Diagonal, B::AbstractMatrix) = ldiv!(matprod_dest(D, B, promote_op(\, eltype(D), eltype(B))), D, B) - -ldiv!(D::Diagonal, B::AbstractVecOrMat) = @inline ldiv!(B, D, B) -function ldiv!(B::AbstractVecOrMat, D::Diagonal, A::AbstractVecOrMat) - require_one_based_indexing(A, B) - dd = D.diag - d = length(dd) - m, n = size(A, 1), size(A, 2) - m′, n′ = size(B, 1), size(B, 2) - m == d || throw(DimensionMismatch(lazy"right hand side has $m rows but D is $d by $d")) - (m, n) == (m′, n′) || throw(DimensionMismatch(lazy"expect output to be $m by $n, but got $m′ by $n′")) - j = findfirst(iszero, D.diag) - isnothing(j) || throw(SingularException(j)) - @inbounds for j = 1:n, i = 1:m - B[i, j] = dd[i] \ A[i, j] - end - B -end - -function _rdiv!(Dc::Diagonal, Db::Diagonal, Da::Diagonal) - n, k = length(Db.diag), length(Da.diag) - n == k || throw(DimensionMismatch(lazy"left hand side has $n columns but D is $k by $k")) - j = findfirst(iszero, Da.diag) - isnothing(j) || throw(SingularException(j)) - Dc.diag .= Db.diag ./ Da.diag - Dc -end -ldiv!(Dc::Diagonal, Da::Diagonal, Db::Diagonal) = Diagonal(ldiv!(Dc.diag, Da, Db.diag)) - -# optimizations for (Sym)Tridiagonal and Diagonal -@propagate_inbounds _getudiag(T::Tridiagonal, i) = T.du[i] -@propagate_inbounds _getudiag(S::SymTridiagonal, i) = S.ev[i] -@propagate_inbounds _getdiag(T::Tridiagonal, i) = T.d[i] -@propagate_inbounds _getdiag(S::SymTridiagonal, i) = symmetric(S.dv[i], :U)::symmetric_type(eltype(S.dv)) -@propagate_inbounds _getldiag(T::Tridiagonal, i) = T.dl[i] -@propagate_inbounds _getldiag(S::SymTridiagonal, i) = transpose(S.ev[i]) - -function (\)(D::Diagonal, S::SymTridiagonal) - T = promote_op(\, eltype(D), eltype(S)) - du = similar(S.ev, T, max(length(S.dv)-1, 0)) - d = similar(S.dv, T, length(S.dv)) - dl = similar(S.ev, T, max(length(S.dv)-1, 0)) - ldiv!(Tridiagonal(dl, d, du), D, S) -end -(\)(D::Diagonal, T::Tridiagonal) = ldiv!(similar(T, promote_op(\, eltype(D), eltype(T))), D, T) -function ldiv!(T::Tridiagonal, D::Diagonal, S::Union{SymTridiagonal,Tridiagonal}) - m = size(S, 1) - dd = D.diag - if (k = length(dd)) != m - throw(DimensionMismatch(lazy"diagonal matrix is $k by $k but right hand side has $m rows")) - end - if length(T.d) != m - throw(DimensionMismatch(lazy"target matrix size $(size(T)) does not match input matrix size $(size(S))")) - end - m == 0 && return T - j = findfirst(iszero, dd) - isnothing(j) || throw(SingularException(j)) - ddj = dd[1] - T.d[1] = ddj \ _getdiag(S, 1) - @inbounds if m > 1 - T.du[1] = ddj \ _getudiag(S, 1) - for j in 2:m-1 - ddj = dd[j] - T.dl[j-1] = ddj \ _getldiag(S, j-1) - T.d[j] = ddj \ _getdiag(S, j) - T.du[j] = ddj \ _getudiag(S, j) - end - ddj = dd[m] - T.dl[m-1] = ddj \ _getldiag(S, m-1) - T.d[m] = ddj \ _getdiag(S, m) - end - return T -end - -function (/)(S::SymTridiagonal, D::Diagonal) - T = promote_op(\, eltype(D), eltype(S)) - du = similar(S.ev, T, max(length(S.dv)-1, 0)) - d = similar(S.dv, T, length(S.dv)) - dl = similar(S.ev, T, max(length(S.dv)-1, 0)) - _rdiv!(Tridiagonal(dl, d, du), S, D) -end -(/)(T::Tridiagonal, D::Diagonal) = _rdiv!(matprod_dest(T, D, promote_op(/, eltype(T), eltype(D))), T, D) -function _rdiv!(T::Tridiagonal, S::Union{SymTridiagonal,Tridiagonal}, D::Diagonal) - n = size(S, 2) - dd = D.diag - if (k = length(dd)) != n - throw(DimensionMismatch(lazy"left hand side has $n columns but D is $k by $k")) - end - if length(T.d) != n - throw(DimensionMismatch(lazy"target matrix size $(size(T)) does not match input matrix size $(size(S))")) - end - n == 0 && return T - j = findfirst(iszero, dd) - isnothing(j) || throw(SingularException(j)) - ddj = dd[1] - T.d[1] = _getdiag(S, 1) / ddj - @inbounds if n > 1 - T.dl[1] = _getldiag(S, 1) / ddj - for j in 2:n-1 - ddj = dd[j] - T.dl[j] = _getldiag(S, j) / ddj - T.d[j] = _getdiag(S, j) / ddj - T.du[j-1] = _getudiag(S, j-1) / ddj - end - ddj = dd[n] - T.d[n] = _getdiag(S, n) / ddj - T.du[n-1] = _getudiag(S, n-1) / ddj - end - return T -end - -# Optimizations for [l/r]mul!, l/rdiv!, *, / and \ between Triangular and Diagonal. -# These functions are generally more efficient if we calculate the whole data field. -# The following code implements them in a unified pattern to avoid missing. -@inline function _setdiag!(data, f, diag, diag′ = nothing) - @inbounds for i in 1:length(diag) - data[i,i] = isnothing(diag′) ? f(diag[i]) : f(diag[i],diag′[i]) - end - data -end -for Tri in (:UpperTriangular, :LowerTriangular) - UTri = Symbol(:Unit, Tri) - # 2 args - for (fun, f) in zip((:*, :rmul!, :rdiv!, :/), (:identity, :identity, :inv, :inv)) - @eval $fun(A::$Tri, D::Diagonal) = $Tri($fun(A.data, D)) - @eval $fun(A::$UTri, D::Diagonal) = $Tri(_setdiag!($fun(A.data, D), $f, D.diag)) - end - @eval *(A::$Tri{<:Any, <:StridedMaybeAdjOrTransMat}, D::Diagonal) = - @invoke *(A::AbstractMatrix, D::Diagonal) - @eval *(A::$UTri{<:Any, <:StridedMaybeAdjOrTransMat}, D::Diagonal) = - @invoke *(A::AbstractMatrix, D::Diagonal) - for (fun, f) in zip((:*, :lmul!, :ldiv!, :\), (:identity, :identity, :inv, :inv)) - @eval $fun(D::Diagonal, A::$Tri) = $Tri($fun(D, A.data)) - @eval $fun(D::Diagonal, A::$UTri) = $Tri(_setdiag!($fun(D, A.data), $f, D.diag)) - end - @eval *(D::Diagonal, A::$Tri{<:Any, <:StridedMaybeAdjOrTransMat}) = - @invoke *(D::Diagonal, A::AbstractMatrix) - @eval *(D::Diagonal, A::$UTri{<:Any, <:StridedMaybeAdjOrTransMat}) = - @invoke *(D::Diagonal, A::AbstractMatrix) - # 3-arg ldiv! - @eval ldiv!(C::$Tri, D::Diagonal, A::$Tri) = $Tri(ldiv!(C.data, D, A.data)) - @eval ldiv!(C::$Tri, D::Diagonal, A::$UTri) = $Tri(_setdiag!(ldiv!(C.data, D, A.data), inv, D.diag)) -end - -@inline function kron!(C::AbstractMatrix, A::Diagonal, B::Diagonal) - valA = A.diag; mA, nA = size(A) - valB = B.diag; mB, nB = size(B) - nC = checksquare(C) - @boundscheck nC == nA*nB || - throw(DimensionMismatch(lazy"expect C to be a $(nA*nB)x$(nA*nB) matrix, got size $(nC)x$(nC)")) - zerofilled = false - if !(isempty(A) || isempty(B)) - z = A[1,1] * B[1,1] - if haszero(typeof(z)) - # in this case, the zero is unique - fill!(C, zero(z)) - zerofilled = true - end - end - for i in eachindex(valA), j in eachindex(valB) - idx = (i-1)*nB+j - @inbounds C[idx, idx] = valA[i] * valB[j] - end - if !zerofilled - for j in axes(A,2), i in axes(A,1) - Δrow, Δcol = (i-1)*mB, (j-1)*nB - for k in axes(B,2), l in axes(B,1) - i == j && k == l && continue - @inbounds C[Δrow + l, Δcol + k] = A[i,j] * B[l,k] - end - end - end - return C -end - -kron(A::Diagonal, B::Diagonal) = Diagonal(kron(A.diag, B.diag)) - -function kron(A::Diagonal, B::SymTridiagonal) - kdv = kron(diag(A), B.dv) - # We don't need to drop the last element - kev = kron(diag(A), _pushzero(_evview(B))) - SymTridiagonal(kdv, kev) -end -function kron(A::Diagonal, B::Tridiagonal) - # `_droplast!` is only guaranteed to work with `Vector` - kd = convert(Vector, kron(diag(A), B.d)) - kdl = _droplast!(convert(Vector, kron(diag(A), _pushzero(B.dl)))) - kdu = _droplast!(convert(Vector, kron(diag(A), _pushzero(B.du)))) - Tridiagonal(kdl, kd, kdu) -end - -@inline function kron!(C::AbstractMatrix, A::Diagonal, B::AbstractMatrix) - require_one_based_indexing(B) - (mA, nA) = size(A) - (mB, nB) = size(B) - (mC, nC) = size(C) - @boundscheck (mC, nC) == (mA * mB, nA * nB) || - throw(DimensionMismatch(lazy"expect C to be a $(mA * mB)x$(nA * nB) matrix, got size $(mC)x$(nC)")) - zerofilled = false - if !(isempty(A) || isempty(B)) - z = A[1,1] * B[1,1] - if haszero(typeof(z)) - # in this case, the zero is unique - fill!(C, zero(z)) - zerofilled = true - end - end - m = 1 - for j in axes(A,2) - A_jj = @inbounds A[j,j] - for k in axes(B,2) - for l in axes(B,1) - @inbounds C[m] = A_jj * B[l,k] - m += 1 - end - m += (nA - 1) * mB - end - if !zerofilled - # populate the zero elements - for i in axes(A,1) - i == j && continue - A_ij = @inbounds A[i, j] - Δrow, Δcol = (i-1)*mB, (j-1)*nB - for k in axes(B,2), l in axes(B,1) - B_lk = @inbounds B[l, k] - @inbounds C[Δrow + l, Δcol + k] = A_ij * B_lk - end - end - end - m += mB - end - return C -end - -@inline function kron!(C::AbstractMatrix, A::AbstractMatrix, B::Diagonal) - require_one_based_indexing(A) - (mA, nA) = size(A) - (mB, nB) = size(B) - (mC, nC) = size(C) - @boundscheck (mC, nC) == (mA * mB, nA * nB) || - throw(DimensionMismatch(lazy"expect C to be a $(mA * mB)x$(nA * nB) matrix, got size $(mC)x$(nC)")) - zerofilled = false - if !(isempty(A) || isempty(B)) - z = A[1,1] * B[1,1] - if haszero(typeof(z)) - # in this case, the zero is unique - fill!(C, zero(z)) - zerofilled = true - end - end - m = 1 - for j in axes(A,2) - for l in axes(B,1) - Bll = @inbounds B[l,l] - for i in axes(A,1) - @inbounds C[m] = A[i,j] * Bll - m += nB - end - m += 1 - end - if !zerofilled - for i in axes(A,1) - A_ij = @inbounds A[i, j] - Δrow, Δcol = (i-1)*mB, (j-1)*nB - for k in axes(B,2), l in axes(B,1) - l == k && continue - B_lk = @inbounds B[l, k] - @inbounds C[Δrow + l, Δcol + k] = A_ij * B_lk - end - end - end - m -= nB - end - return C -end - -conj(D::Diagonal) = Diagonal(conj(D.diag)) -transpose(D::Diagonal{<:Number}) = D -transpose(D::Diagonal) = Diagonal(transpose.(D.diag)) -adjoint(D::Diagonal{<:Number}) = Diagonal(vec(adjoint(D.diag))) -adjoint(D::Diagonal{<:Number,<:Base.ReshapedArray{<:Number,1,<:Adjoint}}) = Diagonal(adjoint(parent(D.diag))) -adjoint(D::Diagonal) = Diagonal(adjoint.(D.diag)) -permutedims(D::Diagonal) = D -permutedims(D::Diagonal, perm) = (Base.checkdims_perm(axes(D), axes(D), perm); D) - -function diag(D::Diagonal, k::Integer=0) - # every branch call similar(..., ::Int) to make sure the - # same vector type is returned independent of k - v = similar(D.diag, max(0, length(D.diag)-abs(k))) - if k == 0 - copyto!(v, D.diag) - else - for i in eachindex(v) - v[i] = D[BandIndex(k, i)] - end - end - return v -end -tr(D::Diagonal) = sum(tr, D.diag) -det(D::Diagonal) = prod(det, D.diag) -function logdet(D::Diagonal{<:Complex}) # make sure branch cut is correct - z = sum(log, D.diag) - complex(real(z), rem2pi(imag(z), RoundNearest)) -end - -# Matrix functions -for f in (:exp, :cis, :log, :sqrt, - :cos, :sin, :tan, :csc, :sec, :cot, - :cosh, :sinh, :tanh, :csch, :sech, :coth, - :acos, :asin, :atan, :acsc, :asec, :acot, - :acosh, :asinh, :atanh, :acsch, :asech, :acoth) - @eval $f(D::Diagonal) = Diagonal($f.(D.diag)) -end - -# Cube root of a real-valued diagonal matrix -cbrt(A::Diagonal{<:Real}) = Diagonal(cbrt.(A.diag)) - -function inv(D::Diagonal{T}) where T - Di = similar(D.diag, typeof(inv(oneunit(T)))) - for i = 1:length(D.diag) - if iszero(D.diag[i]) - throw(SingularException(i)) - end - Di[i] = inv(D.diag[i]) - end - Diagonal(Di) -end - -function pinv(D::Diagonal{T}) where T - Di = similar(D.diag, typeof(inv(oneunit(T)))) - for i = 1:length(D.diag) - if !iszero(D.diag[i]) - invD = inv(D.diag[i]) - if isfinite(invD) - Di[i] = invD - continue - end - end - # fallback - Di[i] = zero(T) - end - Diagonal(Di) -end -function pinv(D::Diagonal{T}, tol::Real) where T - Di = similar(D.diag, typeof(inv(oneunit(T)))) - if !isempty(D.diag) - maxabsD = maximum(abs, D.diag) - for i = 1:length(D.diag) - if abs(D.diag[i]) > tol*maxabsD - invD = inv(D.diag[i]) - if isfinite(invD) - Di[i] = invD - continue - end - end - # fallback - Di[i] = zero(T) - end - end - Diagonal(Di) -end - -# TODO Docstrings for eigvals, eigvecs, eigen all mention permute, scale, sortby as keyword args -# but not all of them below provide them. Do we need to fix that? -#Eigensystem -eigvals(D::Diagonal{<:Number}; permute::Bool=true, scale::Bool=true) = copy(D.diag) -eigvals(D::Diagonal; permute::Bool=true, scale::Bool=true) = - reduce(vcat, eigvals(x) for x in D.diag) #For block matrices, etc. -function eigvecs(D::Diagonal{T}) where T<:AbstractMatrix - diag_vecs = [ eigvecs(x) for x in D.diag ] - matT = reduce((a,b) -> promote_type(typeof(a),typeof(b)), diag_vecs) - ncols_diag = [ size(x, 2) for x in D.diag ] - nrows = size(D, 1) - vecs = Matrix{Vector{eltype(matT)}}(undef, nrows, sum(ncols_diag)) - for j in axes(D, 2), i in axes(D, 1) - jj = sum(view(ncols_diag,1:j-1)) - if i == j - for k in 1:ncols_diag[j] - vecs[i,jj+k] = diag_vecs[i][:,k] - end - else - for k in 1:ncols_diag[j] - vecs[i,jj+k] = zeros(eltype(T), ncols_diag[i]) - end - end - end - return vecs -end -function eigen(D::Diagonal; permute::Bool=true, scale::Bool=true, sortby::Union{Function,Nothing}=nothing) - if any(!isfinite, D.diag) - throw(ArgumentError("matrix contains Infs or NaNs")) - end - Td = Base.promote_op(/, eltype(D), eltype(D)) - λ = eigvals(D) - if !isnothing(sortby) - p = sortperm(λ; alg=QuickSort, by=sortby) - λ = λ[p] - evecs = zeros(Td, size(D)) - @inbounds for i in eachindex(p) - evecs[p[i],i] = one(Td) - end - else - evecs = Diagonal(ones(Td, length(λ))) - end - Eigen(λ, evecs) -end -function eigen(D::Diagonal{<:AbstractMatrix}; permute::Bool=true, scale::Bool=true, sortby::Union{Function,Nothing}=nothing) - if any(any(!isfinite, x) for x in D.diag) - throw(ArgumentError("matrix contains Infs or NaNs")) - end - λ = eigvals(D) - evecs = eigvecs(D) - if !isnothing(sortby) - p = sortperm(λ; alg=QuickSort, by=sortby) - λ = λ[p] - evecs = evecs[:,p] - end - Eigen(λ, evecs) -end -function eigen(Da::Diagonal, Db::Diagonal; sortby::Union{Function,Nothing}=nothing) - if any(!isfinite, Da.diag) || any(!isfinite, Db.diag) - throw(ArgumentError("matrices contain Infs or NaNs")) - end - if any(iszero, Db.diag) - throw(ArgumentError("right-hand side diagonal matrix is singular")) - end - return GeneralizedEigen(eigen(Db \ Da; sortby)...) -end -function eigen(A::AbstractMatrix, D::Diagonal; sortby::Union{Function,Nothing}=nothing) - if any(iszero, D.diag) - throw(ArgumentError("right-hand side diagonal matrix is singular")) - end - if size(A, 1) == size(A, 2) && isdiag(A) - return eigen(Diagonal(A), D; sortby) - elseif all(isposdef, D.diag) - S = promote_type(eigtype(eltype(A)), eltype(D)) - return eigen(A, cholesky(Diagonal{S}(D)); sortby) - else - return eigen!(D \ A; sortby) - end -end - -#Singular system -svdvals(D::Diagonal{<:Number}) = sort!(abs.(D.diag), rev = true) -svdvals(D::Diagonal) = [svdvals(v) for v in D.diag] -function svd(D::Diagonal{T}) where {T<:Number} - d = D.diag - s = abs.(d) - piv = sortperm(s, rev = true) - S = s[piv] - Td = typeof(oneunit(T)/oneunit(T)) - U = zeros(Td, size(D)) - Vt = copy(U) - for i in 1:length(d) - j = piv[i] - U[j,i] = d[j] / S[i] - Vt[i,j] = one(Td) - end - return SVD(U, S, Vt) -end - -*(x::AdjointAbsVec, D::Diagonal, y::AbstractVector) = _mapreduce_prod(*, x, D, y) -*(x::TransposeAbsVec, D::Diagonal, y::AbstractVector) = _mapreduce_prod(*, x, D, y) -/(u::AdjointAbsVec, D::Diagonal) = (D' \ u')' -/(u::TransposeAbsVec, D::Diagonal) = transpose(transpose(D) \ transpose(u)) -# disambiguation methods: Call unoptimized version for user defined AbstractTriangular. -*(A::AbstractTriangular, D::Diagonal) = @invoke *(A::AbstractMatrix, D::Diagonal) -*(D::Diagonal, A::AbstractTriangular) = @invoke *(D::Diagonal, A::AbstractMatrix) - -dot(x::AbstractVector, D::Diagonal, y::AbstractVector) = _mapreduce_prod(dot, x, D, y) - -dot(A::Diagonal, B::Diagonal) = dot(A.diag, B.diag) -function dot(D::Diagonal, B::AbstractMatrix) - size(D) == size(B) || throw(DimensionMismatch(lazy"Matrix sizes $(size(D)) and $(size(B)) differ")) - return dot(D.diag, diagview(B)) -end - -dot(A::AbstractMatrix, B::Diagonal) = conj(dot(B, A)) - -function _mapreduce_prod(f, x, D::Diagonal, y) - if !(length(x) == length(D.diag) == length(y)) - throw(DimensionMismatch(lazy"x has length $(length(x)), D has size $(size(D)), and y has $(length(y))")) - end - if isempty(x) && isempty(D) && isempty(y) - return zero(promote_op(f, eltype(x), eltype(D), eltype(y))) - else - return mapreduce(t -> f(t[1], t[2], t[3]), +, zip(x, D.diag, y)) - end -end - -function cholesky!(A::Diagonal, ::NoPivot = NoPivot(); check::Bool = true) - info = 0 - for (i, di) in enumerate(A.diag) - if isreal(di) && real(di) > 0 - A.diag[i] = √di - elseif check - throw(PosDefException(i)) - else - info = i - break - end - end - Cholesky(A, 'U', convert(BlasInt, info)) -end -@deprecate cholesky!(A::Diagonal, ::Val{false}; check::Bool = true) cholesky!(A::Diagonal, NoPivot(); check) false -@deprecate cholesky(A::Diagonal, ::Val{false}; check::Bool = true) cholesky(A::Diagonal, NoPivot(); check) false - -function cholesky!(A::Diagonal, ::RowMaximum; tol=0.0, check=true) - if !ishermitian(A) - C = CholeskyPivoted(A, 'U', Vector{BlasInt}(), convert(BlasInt, 1), - tol, convert(BlasInt, -1)) - check && checkpositivedefinite(convert(BlasInt, -1)) - else - d = A.diag - n = length(d) - info = 0 - rank = n - p = sortperm(d, rev = true, by = real) - tol = tol < 0 ? n*eps(eltype(A))*real(d[p[1]]) : tol # LAPACK behavior - permute!(d, p) - @inbounds for i in eachindex(d) - di = d[i] - rootdi, j = _cholpivoted!(di, tol) - if j == 0 - d[i] = rootdi - else - rank = i - 1 - info = 1 - break - end - end - C = CholeskyPivoted(A, 'U', p, convert(BlasInt, rank), tol, convert(BlasInt, info)) - check && chkfullrank(C) - end - return C -end - -inv(C::Cholesky{<:Any,<:Diagonal}) = Diagonal(map(inv∘abs2, C.factors.diag)) - -cholcopy(A::Diagonal) = copymutable_oftype(A, choltype(A)) -cholcopy(A::RealHermSymComplexHerm{<:Any,<:Diagonal}) = Diagonal(copy_similar(diag(A), choltype(A))) - -function getproperty(C::Cholesky{<:Any,<:Diagonal}, d::Symbol) - Cfactors = getfield(C, :factors) - if d in (:U, :L, :UL) - return Cfactors - else - return getfield(C, d) - end -end - -Base._sum(A::Diagonal, ::Colon) = sum(A.diag) -function Base._sum(A::Diagonal, dims::Integer) - res = Base.reducedim_initarray(A, dims, zero(eltype(A))) - if dims <= 2 - for i = 1:length(A.diag) - @inbounds res[i] = A.diag[i] - end - else - for i = 1:length(A.diag) - @inbounds res[i,i] = A.diag[i] - end - end - res -end - -function logabsdet(A::Diagonal) - mapreduce(x -> (log(abs(x)), sign(x)), ((d1, s1), (d2, s2)) -> (d1 + d2, s1 * s2), - A.diag) -end - -function Base.muladd(A::Diagonal, B::Diagonal, z::Diagonal) - Diagonal(A.diag .* B.diag .+ z.diag) -end - -uppertriangular(D::Diagonal) = D -lowertriangular(D::Diagonal) = D diff --git a/stdlib/LinearAlgebra/src/eigen.jl b/stdlib/LinearAlgebra/src/eigen.jl deleted file mode 100644 index e0124f2e9d870..0000000000000 --- a/stdlib/LinearAlgebra/src/eigen.jl +++ /dev/null @@ -1,682 +0,0 @@ -# This file is a part of Julia. License is MIT: https://julialang.org/license - -# Eigendecomposition -""" - Eigen <: Factorization - -Matrix factorization type of the eigenvalue/spectral decomposition of a square -matrix `A`. This is the return type of [`eigen`](@ref), the corresponding matrix -factorization function. - -If `F::Eigen` is the factorization object, the eigenvalues can be obtained via -`F.values` and the eigenvectors as the columns of the matrix `F.vectors`. -(The `k`th eigenvector can be obtained from the slice `F.vectors[:, k]`.) - -Iterating the decomposition produces the components `F.values` and `F.vectors`. - -# Examples -```jldoctest -julia> F = eigen([1.0 0.0 0.0; 0.0 3.0 0.0; 0.0 0.0 18.0]) -Eigen{Float64, Float64, Matrix{Float64}, Vector{Float64}} -values: -3-element Vector{Float64}: - 1.0 - 3.0 - 18.0 -vectors: -3×3 Matrix{Float64}: - 1.0 0.0 0.0 - 0.0 1.0 0.0 - 0.0 0.0 1.0 - -julia> F.values -3-element Vector{Float64}: - 1.0 - 3.0 - 18.0 - -julia> F.vectors -3×3 Matrix{Float64}: - 1.0 0.0 0.0 - 0.0 1.0 0.0 - 0.0 0.0 1.0 - -julia> vals, vecs = F; # destructuring via iteration - -julia> vals == F.values && vecs == F.vectors -true -``` -""" -struct Eigen{T,V,S<:AbstractMatrix,U<:AbstractVector} <: Factorization{T} - values::U - vectors::S - Eigen{T,V,S,U}(values::AbstractVector{V}, vectors::AbstractMatrix{T}) where {T,V,S,U} = - new(values, vectors) -end -Eigen(values::AbstractVector{V}, vectors::AbstractMatrix{T}) where {T,V} = - Eigen{T,V,typeof(vectors),typeof(values)}(values, vectors) - -# Generalized eigenvalue problem. -""" - GeneralizedEigen <: Factorization - -Matrix factorization type of the generalized eigenvalue/spectral decomposition of -`A` and `B`. This is the return type of [`eigen`](@ref), the corresponding -matrix factorization function, when called with two matrix arguments. - -If `F::GeneralizedEigen` is the factorization object, the eigenvalues can be obtained via -`F.values` and the eigenvectors as the columns of the matrix `F.vectors`. -(The `k`th eigenvector can be obtained from the slice `F.vectors[:, k]`.) - -Iterating the decomposition produces the components `F.values` and `F.vectors`. - -# Examples -```jldoctest -julia> A = [1 0; 0 -1] -2×2 Matrix{Int64}: - 1 0 - 0 -1 - -julia> B = [0 1; 1 0] -2×2 Matrix{Int64}: - 0 1 - 1 0 - -julia> F = eigen(A, B) -GeneralizedEigen{ComplexF64, ComplexF64, Matrix{ComplexF64}, Vector{ComplexF64}} -values: -2-element Vector{ComplexF64}: - 0.0 - 1.0im - 0.0 + 1.0im -vectors: -2×2 Matrix{ComplexF64}: - 0.0+1.0im 0.0-1.0im - -1.0+0.0im -1.0-0.0im - -julia> F.values -2-element Vector{ComplexF64}: - 0.0 - 1.0im - 0.0 + 1.0im - -julia> F.vectors -2×2 Matrix{ComplexF64}: - 0.0+1.0im 0.0-1.0im - -1.0+0.0im -1.0-0.0im - -julia> vals, vecs = F; # destructuring via iteration - -julia> vals == F.values && vecs == F.vectors -true -``` -""" -struct GeneralizedEigen{T,V,S<:AbstractMatrix,U<:AbstractVector} <: Factorization{T} - values::U - vectors::S - GeneralizedEigen{T,V,S,U}(values::AbstractVector{V}, vectors::AbstractMatrix{T}) where {T,V,S,U} = - new(values, vectors) -end -GeneralizedEigen(values::AbstractVector{V}, vectors::AbstractMatrix{T}) where {T,V} = - GeneralizedEigen{T,V,typeof(vectors),typeof(values)}(values, vectors) - -# iteration for destructuring into components -Base.iterate(S::Union{Eigen,GeneralizedEigen}) = (S.values, Val(:vectors)) -Base.iterate(S::Union{Eigen,GeneralizedEigen}, ::Val{:vectors}) = (S.vectors, Val(:done)) -Base.iterate(S::Union{Eigen,GeneralizedEigen}, ::Val{:done}) = nothing - -isposdef(A::Union{Eigen,GeneralizedEigen}) = isreal(A.values) && all(x -> x > 0, A.values) - -# pick a canonical ordering to avoid returning eigenvalues in "random" order -# as is the LAPACK default (for complex λ — LAPACK sorts by λ for the Hermitian/Symmetric case) -eigsortby(λ::Real) = λ -eigsortby(λ::Complex) = (real(λ),imag(λ)) -function sorteig!(λ::AbstractVector, X::AbstractMatrix, sortby::Union{Function,Nothing}=eigsortby) - if sortby !== nothing && !issorted(λ, by=sortby) - p = sortperm(λ; alg=QuickSort, by=sortby) - permute!(λ, p) - Base.permutecols!!(X, p) - end - return λ, X -end -sorteig!(λ::AbstractVector, sortby::Union{Function,Nothing}=eigsortby) = sortby === nothing ? λ : sort!(λ, by=sortby) - -""" - eigen!(A; permute, scale, sortby) - eigen!(A, B; sortby) - -Same as [`eigen`](@ref), but saves space by overwriting the input `A` (and -`B`), instead of creating a copy. -""" -function eigen!(A::StridedMatrix{T}; permute::Bool=true, scale::Bool=true, sortby::Union{Function,Nothing}=eigsortby) where T<:BlasReal - n = size(A, 2) - n == 0 && return Eigen(zeros(T, 0), zeros(T, 0, 0)) - issymmetric(A) && return eigen!(Symmetric(A), sortby=sortby) - A, WR, WI, VL, VR, _ = LAPACK.geevx!(permute ? (scale ? 'B' : 'P') : (scale ? 'S' : 'N'), 'N', 'V', 'N', A) - iszero(WI) && return Eigen(sorteig!(WR, VR, sortby)...) - evec = zeros(Complex{T}, n, n) - j = 1 - while j <= n - if WI[j] == 0 - evec[:,j] = view(VR, :, j) - else - for i = 1:n - evec[i,j] = VR[i,j] + im*VR[i,j+1] - evec[i,j+1] = VR[i,j] - im*VR[i,j+1] - end - j += 1 - end - j += 1 - end - return Eigen(sorteig!(complex.(WR, WI), evec, sortby)...) -end - -function eigen!(A::StridedMatrix{T}; permute::Bool=true, scale::Bool=true, sortby::Union{Function,Nothing}=eigsortby) where T<:BlasComplex - n = size(A, 2) - n == 0 && return Eigen(zeros(T, 0), zeros(T, 0, 0)) - ishermitian(A) && return eigen!(Hermitian(A), sortby=sortby) - E = LAPACK.geevx!(permute ? (scale ? 'B' : 'P') : (scale ? 'S' : 'N'), 'N', 'V', 'N', A) - eval, evec = E[2], E[4] - return Eigen(sorteig!(eval, evec, sortby)...) -end - -""" - eigen(A; permute::Bool=true, scale::Bool=true, sortby) -> Eigen - -Compute the eigenvalue decomposition of `A`, returning an [`Eigen`](@ref) factorization object `F` -which contains the eigenvalues in `F.values` and the eigenvectors in the columns of the -matrix `F.vectors`. This corresponds to solving an eigenvalue problem of the form -`Ax = λx`, where `A` is a matrix, `x` is an eigenvector, and `λ` is an eigenvalue. -(The `k`th eigenvector can be obtained from the slice `F.vectors[:, k]`.) - -Iterating the decomposition produces the components `F.values` and `F.vectors`. - -The following functions are available for `Eigen` objects: [`inv`](@ref), [`det`](@ref), and [`isposdef`](@ref). - -For general nonsymmetric matrices it is possible to specify how the matrix is balanced -before the eigenvector calculation. The option `permute=true` permutes the matrix to become -closer to upper triangular, and `scale=true` scales the matrix by its diagonal elements to -make rows and columns more equal in norm. The default is `true` for both options. - -By default, the eigenvalues and vectors are sorted lexicographically by `(real(λ),imag(λ))`. -A different comparison function `by(λ)` can be passed to `sortby`, or you can pass -`sortby=nothing` to leave the eigenvalues in an arbitrary order. Some special matrix types -(e.g. [`Diagonal`](@ref) or [`SymTridiagonal`](@ref)) may implement their own sorting convention and not -accept a `sortby` keyword. - -# Examples -```jldoctest -julia> F = eigen([1.0 0.0 0.0; 0.0 3.0 0.0; 0.0 0.0 18.0]) -Eigen{Float64, Float64, Matrix{Float64}, Vector{Float64}} -values: -3-element Vector{Float64}: - 1.0 - 3.0 - 18.0 -vectors: -3×3 Matrix{Float64}: - 1.0 0.0 0.0 - 0.0 1.0 0.0 - 0.0 0.0 1.0 - -julia> F.values -3-element Vector{Float64}: - 1.0 - 3.0 - 18.0 - -julia> F.vectors -3×3 Matrix{Float64}: - 1.0 0.0 0.0 - 0.0 1.0 0.0 - 0.0 0.0 1.0 - -julia> vals, vecs = F; # destructuring via iteration - -julia> vals == F.values && vecs == F.vectors -true -``` -""" -function eigen(A::AbstractMatrix{T}; permute::Bool=true, scale::Bool=true, sortby::Union{Function,Nothing}=eigsortby) where T - _eigen(A; permute, scale, sortby) -end -function eigen(A::AbstractMatrix{T}; permute::Bool=true, scale::Bool=true, sortby::Union{Function,Nothing}=eigsortby) where {T <: Union{Float16,Complex{Float16}}} - E = _eigen(A; permute, scale, sortby) - values = convert(AbstractVector{isreal(E.values) ? Float16 : Complex{Float16}}, E.values) - vectors = convert(AbstractMatrix{isreal(E.vectors) ? Float16 : Complex{Float16}}, E.vectors) - return Eigen(values, vectors) -end -function _eigen(A::AbstractMatrix{T}; permute=true, scale=true, sortby=eigsortby) where {T} - isdiag(A) && return eigen(Diagonal{eigtype(T)}(diag(A)); sortby) - if ishermitian(A) - eigen!(eigencopy_oftype(Hermitian(A), eigtype(T)); sortby) - else - eigen!(eigencopy_oftype(A, eigtype(T)); permute, scale, sortby) - end -end - -eigen(x::Number) = Eigen([x], fill(one(x), 1, 1)) - -""" - eigvecs(A; permute::Bool=true, scale::Bool=true, `sortby`) -> Matrix - -Return a matrix `M` whose columns are the eigenvectors of `A`. (The `k`th eigenvector can -be obtained from the slice `M[:, k]`.) The `permute`, `scale`, and `sortby` keywords are the same as -for [`eigen`](@ref). - -# Examples -```jldoctest -julia> eigvecs([1.0 0.0 0.0; 0.0 3.0 0.0; 0.0 0.0 18.0]) -3×3 Matrix{Float64}: - 1.0 0.0 0.0 - 0.0 1.0 0.0 - 0.0 0.0 1.0 -``` -""" -eigvecs(A::Union{Number, AbstractMatrix}; kws...) = - eigvecs(eigen(A; kws...)) -eigvecs(F::Union{Eigen, GeneralizedEigen}) = F.vectors - -eigvals(F::Union{Eigen, GeneralizedEigen}) = F.values - -""" - eigvals!(A; permute::Bool=true, scale::Bool=true, sortby) -> values - -Same as [`eigvals`](@ref), but saves space by overwriting the input `A`, instead of creating a copy. -The `permute`, `scale`, and `sortby` keywords are the same as for [`eigen`](@ref). - -!!! note - The input matrix `A` will not contain its eigenvalues after `eigvals!` is - called on it - `A` is used as a workspace. - -# Examples -```jldoctest -julia> A = [1. 2.; 3. 4.] -2×2 Matrix{Float64}: - 1.0 2.0 - 3.0 4.0 - -julia> eigvals!(A) -2-element Vector{Float64}: - -0.3722813232690143 - 5.372281323269014 - -julia> A -2×2 Matrix{Float64}: - -0.372281 -1.0 - 0.0 5.37228 -``` -""" -function eigvals!(A::StridedMatrix{<:BlasReal}; permute::Bool=true, scale::Bool=true, sortby::Union{Function,Nothing}=eigsortby) - issymmetric(A) && return sorteig!(eigvals!(Symmetric(A)), sortby) - _, valsre, valsim, _ = LAPACK.geevx!(permute ? (scale ? 'B' : 'P') : (scale ? 'S' : 'N'), 'N', 'N', 'N', A) - return sorteig!(iszero(valsim) ? valsre : complex.(valsre, valsim), sortby) -end -function eigvals!(A::StridedMatrix{<:BlasComplex}; permute::Bool=true, scale::Bool=true, sortby::Union{Function,Nothing}=eigsortby) - ishermitian(A) && return sorteig!(eigvals(Hermitian(A)), sortby) - return sorteig!(LAPACK.geevx!(permute ? (scale ? 'B' : 'P') : (scale ? 'S' : 'N'), 'N', 'N', 'N', A)[2], sortby) -end - -# promotion type to use for eigenvalues of a Matrix{T} -eigtype(T) = promote_type(Float32, typeof(zero(T)/sqrt(abs2(one(T))))) - -""" - eigvals(A; permute::Bool=true, scale::Bool=true, sortby) -> values - -Return the eigenvalues of `A`. - -For general non-symmetric matrices it is possible to specify how the matrix is balanced -before the eigenvalue calculation. The `permute`, `scale`, and `sortby` keywords are -the same as for [`eigen`](@ref). - -# Examples -```jldoctest -julia> diag_matrix = [1 0; 0 4] -2×2 Matrix{Int64}: - 1 0 - 0 4 - -julia> eigvals(diag_matrix) -2-element Vector{Float64}: - 1.0 - 4.0 -``` -""" -eigvals(A::AbstractMatrix{T}; kws...) where T = - eigvals!(eigencopy_oftype(A, eigtype(T)); kws...) - -""" -For a scalar input, `eigvals` will return a scalar. - -# Examples -```jldoctest -julia> eigvals(-2) --2 -``` -""" -eigvals(x::Number; kwargs...) = imag(x) == 0 ? real(x) : x - -""" - eigmax(A; permute::Bool=true, scale::Bool=true) - -Return the largest eigenvalue of `A`. -The option `permute=true` permutes the matrix to become -closer to upper triangular, and `scale=true` scales the matrix by its diagonal elements to -make rows and columns more equal in norm. -Note that if the eigenvalues of `A` are complex, -this method will fail, since complex numbers cannot -be sorted. - -# Examples -```jldoctest -julia> A = [0 im; -im 0] -2×2 Matrix{Complex{Int64}}: - 0+0im 0+1im - 0-1im 0+0im - -julia> eigmax(A) -1.0 - -julia> A = [0 im; -1 0] -2×2 Matrix{Complex{Int64}}: - 0+0im 0+1im - -1+0im 0+0im - -julia> eigmax(A) -ERROR: DomainError with Complex{Int64}[0+0im 0+1im; -1+0im 0+0im]: -`A` cannot have complex eigenvalues. -Stacktrace: -[...] -``` -""" -function eigmax(A::Union{Number, AbstractMatrix}; permute::Bool=true, scale::Bool=true) - v = eigvals(A, permute = permute, scale = scale) - if eltype(v)<:Complex - throw(DomainError(A, "`A` cannot have complex eigenvalues.")) - end - maximum(v) -end - -""" - eigmin(A; permute::Bool=true, scale::Bool=true) - -Return the smallest eigenvalue of `A`. -The option `permute=true` permutes the matrix to become -closer to upper triangular, and `scale=true` scales the matrix by its diagonal elements to -make rows and columns more equal in norm. -Note that if the eigenvalues of `A` are complex, -this method will fail, since complex numbers cannot -be sorted. - -# Examples -```jldoctest -julia> A = [0 im; -im 0] -2×2 Matrix{Complex{Int64}}: - 0+0im 0+1im - 0-1im 0+0im - -julia> eigmin(A) --1.0 - -julia> A = [0 im; -1 0] -2×2 Matrix{Complex{Int64}}: - 0+0im 0+1im - -1+0im 0+0im - -julia> eigmin(A) -ERROR: DomainError with Complex{Int64}[0+0im 0+1im; -1+0im 0+0im]: -`A` cannot have complex eigenvalues. -Stacktrace: -[...] -``` -""" -function eigmin(A::Union{Number, AbstractMatrix}; - permute::Bool=true, scale::Bool=true) - v = eigvals(A, permute = permute, scale = scale) - if eltype(v)<:Complex - throw(DomainError(A, "`A` cannot have complex eigenvalues.")) - end - minimum(v) -end - -inv(A::Eigen) = A.vectors * inv(Diagonal(A.values)) / A.vectors -det(A::Eigen) = prod(A.values) - -# Generalized eigenproblem -function eigen!(A::StridedMatrix{T}, B::StridedMatrix{T}; sortby::Union{Function,Nothing}=eigsortby) where T<:BlasReal - issymmetric(A) && isposdef(B) && return eigen!(Symmetric(A), Symmetric(B), sortby=sortby) - n = size(A, 1) - if LAPACK.version() < v"3.6.0" - alphar, alphai, beta, _, vr = LAPACK.ggev!('N', 'V', A, B) - else - alphar, alphai, beta, _, vr = LAPACK.ggev3!('N', 'V', A, B) - end - iszero(alphai) && return GeneralizedEigen(sorteig!(alphar ./ beta, vr, sortby)...) - - vecs = zeros(Complex{T}, n, n) - j = 1 - while j <= n - if alphai[j] == 0 - vecs[:,j] = view(vr, :, j) - else - for i = 1:n - vecs[i,j ] = vr[i,j] + im*vr[i,j+1] - vecs[i,j+1] = vr[i,j] - im*vr[i,j+1] - end - j += 1 - end - j += 1 - end - return GeneralizedEigen(sorteig!(complex.(alphar, alphai)./beta, vecs, sortby)...) -end - -function eigen!(A::StridedMatrix{T}, B::StridedMatrix{T}; sortby::Union{Function,Nothing}=eigsortby) where T<:BlasComplex - ishermitian(A) && isposdef(B) && return eigen!(Hermitian(A), Hermitian(B), sortby=sortby) - if LAPACK.version() < v"3.6.0" - alpha, beta, _, vr = LAPACK.ggev!('N', 'V', A, B) - else - alpha, beta, _, vr = LAPACK.ggev3!('N', 'V', A, B) - end - return GeneralizedEigen(sorteig!(alpha./beta, vr, sortby)...) -end - -""" - eigen(A, B; sortby) -> GeneralizedEigen - -Compute the generalized eigenvalue decomposition of `A` and `B`, returning a -[`GeneralizedEigen`](@ref) factorization object `F` which contains the generalized eigenvalues in -`F.values` and the generalized eigenvectors in the columns of the matrix `F.vectors`. -This corresponds to solving a generalized eigenvalue problem of the form -`Ax = λBx`, where `A, B` are matrices, `x` is an eigenvector, and `λ` is an eigenvalue. -(The `k`th generalized eigenvector can be obtained from the slice `F.vectors[:, k]`.) - -Iterating the decomposition produces the components `F.values` and `F.vectors`. - -By default, the eigenvalues and vectors are sorted lexicographically by `(real(λ),imag(λ))`. -A different comparison function `by(λ)` can be passed to `sortby`, or you can pass -`sortby=nothing` to leave the eigenvalues in an arbitrary order. - -# Examples -```jldoctest -julia> A = [1 0; 0 -1] -2×2 Matrix{Int64}: - 1 0 - 0 -1 - -julia> B = [0 1; 1 0] -2×2 Matrix{Int64}: - 0 1 - 1 0 - -julia> F = eigen(A, B); - -julia> F.values -2-element Vector{ComplexF64}: - 0.0 - 1.0im - 0.0 + 1.0im - -julia> F.vectors -2×2 Matrix{ComplexF64}: - 0.0+1.0im 0.0-1.0im - -1.0+0.0im -1.0-0.0im - -julia> vals, vecs = F; # destructuring via iteration - -julia> vals == F.values && vecs == F.vectors -true -``` -""" -function eigen(A::AbstractMatrix{TA}, B::AbstractMatrix{TB}; kws...) where {TA,TB} - S = promote_type(eigtype(TA), TB) - eigen!(copy_similar(A, S), copy_similar(B, S); kws...) -end -eigen(A::Number, B::Number) = eigen(fill(A,1,1), fill(B,1,1)) - -""" - LinearAlgebra.eigencopy_oftype(A::AbstractMatrix, ::Type{S}) - -Creates a dense copy of `A` with eltype `S` by calling `copy_similar(A, S)`. -In the case of `Hermitian` or `Symmetric` matrices additionally retains the wrapper, -together with the `uplo` field. -""" -eigencopy_oftype(A, S) = copy_similar(A, S) - -""" - eigvals!(A, B; sortby) -> values - -Same as [`eigvals`](@ref), but saves space by overwriting the input `A` (and `B`), -instead of creating copies. - -!!! note - The input matrices `A` and `B` will not contain their eigenvalues after - `eigvals!` is called. They are used as workspaces. - -# Examples -```jldoctest -julia> A = [1. 0.; 0. -1.] -2×2 Matrix{Float64}: - 1.0 0.0 - 0.0 -1.0 - -julia> B = [0. 1.; 1. 0.] -2×2 Matrix{Float64}: - 0.0 1.0 - 1.0 0.0 - -julia> eigvals!(A, B) -2-element Vector{ComplexF64}: - 0.0 - 1.0im - 0.0 + 1.0im - -julia> A -2×2 Matrix{Float64}: - -0.0 -1.0 - 1.0 -0.0 - -julia> B -2×2 Matrix{Float64}: - 1.0 0.0 - 0.0 1.0 -``` -""" -function eigvals!(A::StridedMatrix{T}, B::StridedMatrix{T}; sortby::Union{Function,Nothing}=eigsortby) where T<:BlasReal - issymmetric(A) && isposdef(B) && return sorteig!(eigvals!(Symmetric(A), Symmetric(B)), sortby) - if LAPACK.version() < v"3.6.0" - alphar, alphai, beta, vl, vr = LAPACK.ggev!('N', 'N', A, B) - else - alphar, alphai, beta, vl, vr = LAPACK.ggev3!('N', 'N', A, B) - end - return sorteig!((iszero(alphai) ? alphar : complex.(alphar, alphai))./beta, sortby) -end -function eigvals!(A::StridedMatrix{T}, B::StridedMatrix{T}; sortby::Union{Function,Nothing}=eigsortby) where T<:BlasComplex - ishermitian(A) && isposdef(B) && return sorteig!(eigvals!(Hermitian(A), Hermitian(B)), sortby) - if LAPACK.version() < v"3.6.0" - alpha, beta, vl, vr = LAPACK.ggev!('N', 'N', A, B) - else - alpha, beta, vl, vr = LAPACK.ggev3!('N', 'N', A, B) - end - return sorteig!(alpha./beta, sortby) -end - -""" - eigvals(A, B) -> values - -Compute the generalized eigenvalues of `A` and `B`. - -# Examples -```jldoctest -julia> A = [1 0; 0 -1] -2×2 Matrix{Int64}: - 1 0 - 0 -1 - -julia> B = [0 1; 1 0] -2×2 Matrix{Int64}: - 0 1 - 1 0 - -julia> eigvals(A,B) -2-element Vector{ComplexF64}: - 0.0 - 1.0im - 0.0 + 1.0im -``` -""" -function eigvals(A::AbstractMatrix{TA}, B::AbstractMatrix{TB}; kws...) where {TA,TB} - S = promote_type(eigtype(TA), TB) - return eigvals!(copy_similar(A, S), copy_similar(B, S); kws...) -end - -""" - eigvecs(A, B) -> Matrix - -Return a matrix `M` whose columns are the generalized eigenvectors of `A` and `B`. (The `k`th eigenvector can -be obtained from the slice `M[:, k]`.) - -# Examples -```jldoctest -julia> A = [1 0; 0 -1] -2×2 Matrix{Int64}: - 1 0 - 0 -1 - -julia> B = [0 1; 1 0] -2×2 Matrix{Int64}: - 0 1 - 1 0 - -julia> eigvecs(A, B) -2×2 Matrix{ComplexF64}: - 0.0+1.0im 0.0-1.0im - -1.0+0.0im -1.0-0.0im -``` -""" -eigvecs(A::AbstractMatrix, B::AbstractMatrix; kws...) = eigvecs(eigen(A, B; kws...)) - -function show(io::IO, mime::MIME{Symbol("text/plain")}, F::Union{Eigen,GeneralizedEigen}) - summary(io, F); println(io) - println(io, "values:") - show(io, mime, F.values) - println(io, "\nvectors:") - show(io, mime, F.vectors) -end - -_equalcheck(f, Avalues, Avectors, Bvalues, Bvectors) = f(Avalues, Bvalues) && f(Avectors, Bvectors) -for T in (Eigen, GeneralizedEigen) - @eval begin - function Base.hash(F::$T, h::UInt) - return hash(F.values, hash(F.vectors, hash($T, h))) - end - function Base.:(==)(A::$T, B::$T) - return _equalcheck(==, A..., B...) - end - function Base.isequal(A::$T, B::$T) - return _equalcheck(isequal, A..., B...) - end - end -end - -# Conversion methods - -## Can we determine the source/result is Real? This is not stored in the type Eigen -AbstractMatrix(F::Eigen) = F.vectors * Diagonal(F.values) / F.vectors -AbstractArray(F::Eigen) = AbstractMatrix(F) -Matrix(F::Eigen) = Array(AbstractArray(F)) -Array(F::Eigen) = Matrix(F) diff --git a/stdlib/LinearAlgebra/src/exceptions.jl b/stdlib/LinearAlgebra/src/exceptions.jl deleted file mode 100644 index 7791b1ddef416..0000000000000 --- a/stdlib/LinearAlgebra/src/exceptions.jl +++ /dev/null @@ -1,76 +0,0 @@ -# This file is a part of Julia. License is MIT: https://julialang.org/license - -export LAPACKException, - SingularException, - PosDefException, - RankDeficientException, - ZeroPivotException - -""" - LAPACKException - -Generic LAPACK exception thrown either during direct calls to the [LAPACK functions](@ref man-linalg-lapack-functions) -or during calls to other functions that use the LAPACK functions internally but lack specialized error handling. The `info` field -contains additional information on the underlying error and depends on the LAPACK function that was invoked. -""" -struct LAPACKException <: Exception - info::BlasInt -end - -""" - SingularException - -Exception thrown when the input matrix has one or more zero-valued eigenvalues, and is not invertible. -A linear solve involving such a matrix cannot be computed. -The `info` field indicates the location of (one of) the singular value(s). -""" -struct SingularException <: Exception - info::BlasInt -end - -""" - PosDefException - -Exception thrown when the input matrix was not [positive definite](https://en.wikipedia.org/wiki/Definiteness_of_a_matrix). -Some linear algebra functions and factorizations are only applicable to positive definite matrices. -The `info` field indicates the location of (one of) the eigenvalue(s) which is (are) less than/equal to 0. -""" -struct PosDefException <: Exception - info::BlasInt -end -function Base.showerror(io::IO, ex::PosDefException) - print(io, "PosDefException: matrix is not ") - if ex.info == -1 - print(io, "Hermitian") - else - print(io, "positive definite") - end - print(io, "; Factorization failed.") -end - -""" - RankDeficientException - -Exception thrown when the input matrix is [rank deficient](https://en.wikipedia.org/wiki/Rank_(linear_algebra)). Some -linear algebra functions, such as the Cholesky decomposition, are only applicable to matrices that are not rank -deficient. The `info` field indicates the computed rank of the matrix. -""" -struct RankDeficientException <: Exception - info::BlasInt -end - -""" - ZeroPivotException <: Exception - -Exception thrown when a matrix factorization/solve encounters a zero in a pivot (diagonal) -position and cannot proceed. This may *not* mean that the matrix is singular: -it may be fruitful to switch to a different factorization such as pivoted LU -that can re-order variables to eliminate spurious zero pivots. -The `info` field indicates the location of (one of) the zero pivot(s). -""" -struct ZeroPivotException <: Exception - info::BlasInt -end -function Base.showerror(io::IO, ex::ZeroPivotException) - print(io, "ZeroPivotException: factorization encountered one or more zero pivots. Consider switching to a pivoted LU factorization.") -end diff --git a/stdlib/LinearAlgebra/src/factorization.jl b/stdlib/LinearAlgebra/src/factorization.jl deleted file mode 100644 index 4cefc661741be..0000000000000 --- a/stdlib/LinearAlgebra/src/factorization.jl +++ /dev/null @@ -1,202 +0,0 @@ -# This file is a part of Julia. License is MIT: https://julialang.org/license - -## Matrix factorizations and decompositions -""" - LinearAlgebra.Factorization - -Abstract type for [matrix factorizations](https://en.wikipedia.org/wiki/Matrix_decomposition) -a.k.a. matrix decompositions. -See [online documentation](@ref man-linalg-factorizations) for a list of available -matrix factorizations. -""" -abstract type Factorization{T} end - -""" - AdjointFactorization - -Lazy wrapper type for the adjoint of the underlying `Factorization` object. Usually, the -`AdjointFactorization` constructor should not be called directly, use -[`adjoint(:: Factorization)`](@ref) instead. -""" -struct AdjointFactorization{T,S<:Factorization} <: Factorization{T} - parent::S -end -AdjointFactorization(F::Factorization) = - AdjointFactorization{Base.promote_op(adjoint,eltype(F)),typeof(F)}(F) - -""" - TransposeFactorization - -Lazy wrapper type for the transpose of the underlying `Factorization` object. Usually, the -`TransposeFactorization` constructor should not be called directly, use -[`transpose(:: Factorization)`](@ref) instead. -""" -struct TransposeFactorization{T,S<:Factorization} <: Factorization{T} - parent::S -end -TransposeFactorization(F::Factorization) = - TransposeFactorization{Base.promote_op(adjoint,eltype(F)),typeof(F)}(F) - -eltype(::Type{<:Factorization{T}}) where {T} = T -size(F::AdjointFactorization) = reverse(size(parent(F))) -size(F::TransposeFactorization) = reverse(size(parent(F))) -size(F::Union{AdjointFactorization,TransposeFactorization}, d::Integer) = d in (1, 2) ? size(F)[d] : 1 -parent(F::Union{AdjointFactorization,TransposeFactorization}) = F.parent - -""" - adjoint(F::Factorization) - -Lazy adjoint of the factorization `F`. By default, returns an -[`AdjointFactorization`](@ref) wrapper. -""" -adjoint(F::Factorization) = AdjointFactorization(F) -""" - transpose(F::Factorization) - -Lazy transpose of the factorization `F`. By default, returns a [`TransposeFactorization`](@ref), -except for `Factorization`s with real `eltype`, in which case returns an [`AdjointFactorization`](@ref). -""" -transpose(F::Factorization) = TransposeFactorization(F) -transpose(F::Factorization{<:Real}) = AdjointFactorization(F) -adjoint(F::AdjointFactorization) = F.parent -transpose(F::TransposeFactorization) = F.parent -transpose(F::AdjointFactorization{<:Real}) = F.parent -conj(A::TransposeFactorization) = adjoint(A.parent) -conj(A::AdjointFactorization) = transpose(A.parent) - -# These functions expect a non-zero info to be positive, indicating the position where a problem was detected -checkpositivedefinite(info) = info == 0 || throw(PosDefException(info)) -checknonsingular(info) = info == 0 || throw(SingularException(info)) -checknozeropivot(info) = info == 0 || throw(ZeroPivotException(info)) - -""" - issuccess(F::Factorization) - -Test that a factorization of a matrix succeeded. - -!!! compat "Julia 1.6" - `issuccess(::CholeskyPivoted)` requires Julia 1.6 or later. - -# Examples - -```jldoctest -julia> F = cholesky([1 0; 0 1]); - -julia> issuccess(F) -true -``` -""" -issuccess(F::Factorization) - -function logdet(F::Factorization) - d, s = logabsdet(F) - return d + log(s) -end - -function det(F::Factorization) - d, s = logabsdet(F) - return exp(d)*s -end - -convert(::Type{T}, f::T) where {T<:Factorization} = f -convert(::Type{T}, f::Factorization) where {T<:Factorization} = T(f)::T - -convert(::Type{T}, f::Factorization) where {T<:AbstractArray} = T(f)::T - -### General promotion rules -Factorization{T}(F::Factorization{T}) where {T} = F -# This no longer looks odd since the return _is_ a Factorization! -Factorization{T}(A::AdjointFactorization) where {T} = - adjoint(Factorization{T}(parent(A))) -Factorization{T}(A::TransposeFactorization) where {T} = - transpose(Factorization{T}(parent(A))) -inv(F::Factorization{T}) where {T} = (n = size(F, 1); ldiv!(F, Matrix{T}(I, n, n))) - -Base.hash(F::Factorization, h::UInt) = mapreduce(f -> hash(getfield(F, f)), hash, 1:nfields(F); init=h) -Base.:(==)( F::T, G::T) where {T<:Factorization} = all(f -> getfield(F, f) == getfield(G, f), 1:nfields(F)) -Base.isequal(F::T, G::T) where {T<:Factorization} = all(f -> isequal(getfield(F, f), getfield(G, f)), 1:nfields(F))::Bool - -function Base.show(io::IO, x::AdjointFactorization) - print(io, "adjoint of ") - show(io, parent(x)) -end -function Base.show(io::IO, x::TransposeFactorization) - print(io, "transpose of ") - show(io, parent(x)) -end -function Base.show(io::IO, ::MIME"text/plain", x::AdjointFactorization) - print(io, "adjoint of ") - show(io, MIME"text/plain"(), parent(x)) -end -function Base.show(io::IO, ::MIME"text/plain", x::TransposeFactorization) - print(io, "transpose of ") - show(io, MIME"text/plain"(), parent(x)) -end - -function (\)(F::Factorization, B::AbstractVecOrMat) - require_one_based_indexing(B) - TFB = typeof(oneunit(eltype(F)) \ oneunit(eltype(B))) - ldiv!(F, copy_similar(B, TFB)) -end -(\)(F::TransposeFactorization, B::AbstractVecOrMat) = conj!(adjoint(F.parent) \ conj.(B)) -# With a real lhs and complex rhs with the same precision, we can reinterpret -# the complex rhs as a real rhs with twice the number of columns or rows -function (\)(F::Factorization{T}, B::VecOrMat{Complex{T}}) where {T<:BlasReal} - require_one_based_indexing(B) - c2r = reshape(copy(transpose(reinterpret(T, reshape(B, (1, length(B)))))), size(B, 1), 2*size(B, 2)) - x = ldiv!(F, c2r) - return reshape(copy(reinterpret(Complex{T}, copy(transpose(reshape(x, div(length(x), 2), 2))))), _ret_size(F, B)) -end -# don't do the reinterpretation for [Adjoint/Transpose]Factorization -(\)(F::TransposeFactorization{T}, B::VecOrMat{Complex{T}}) where {T<:BlasReal} = - conj!(adjoint(parent(F)) \ conj.(B)) -(\)(F::AdjointFactorization{T}, B::VecOrMat{Complex{T}}) where {T<:BlasReal} = - @invoke \(F::typeof(F), B::VecOrMat) - -function ldiv!(Y::AbstractVector, A::Factorization, B::AbstractVector) - require_one_based_indexing(Y, B) - m, n = size(A) - if m > n - Bc = copy(B) - ldiv!(A, Bc) - return copyto!(Y, 1, Bc, 1, n) - else - return ldiv!(A, copyto!(Y, B)) - end -end -function ldiv!(Y::AbstractMatrix, A::Factorization, B::AbstractMatrix) - require_one_based_indexing(Y, B) - m, n = size(A) - if m > n - Bc = copy(B) - ldiv!(A, Bc) - return copyto!(Y, view(Bc, 1:n, :)) - else - copyto!(view(Y, 1:m, :), view(B, 1:m, :)) - return ldiv!(A, Y) - end -end - -function (/)(B::AbstractMatrix, F::Factorization) - require_one_based_indexing(B) - TFB = typeof(oneunit(eltype(B)) / oneunit(eltype(F))) - rdiv!(copy_similar(B, TFB), F) -end -# reinterpretation trick for complex lhs and real factorization -function (/)(B::Union{Matrix{Complex{T}},AdjOrTrans{Complex{T},Vector{Complex{T}}}}, F::Factorization{T}) where {T<:BlasReal} - require_one_based_indexing(B) - x = rdiv!(copy(reinterpret(T, B)), F) - return copy(reinterpret(Complex{T}, x)) -end -# don't do the reinterpretation for [Adjoint/Transpose]Factorization -(/)(B::Union{Matrix{Complex{T}},AdjOrTrans{Complex{T},Vector{Complex{T}}}}, F::TransposeFactorization{T}) where {T<:BlasReal} = - @invoke /(B::AbstractMatrix, F::Factorization) -(/)(B::Matrix{Complex{T}}, F::AdjointFactorization{T}) where {T<:BlasReal} = - @invoke /(B::AbstractMatrix, F::Factorization) -(/)(B::Adjoint{Complex{T},Vector{Complex{T}}}, F::AdjointFactorization{T}) where {T<:BlasReal} = - (F' \ B')' -(/)(B::Transpose{Complex{T},Vector{Complex{T}}}, F::TransposeFactorization{T}) where {T<:BlasReal} = - transpose(transpose(F) \ transpose(B)) - -rdiv!(B::AbstractMatrix, A::TransposeFactorization) = transpose(ldiv!(A.parent, transpose(B))) -rdiv!(B::AbstractMatrix, A::AdjointFactorization) = adjoint(ldiv!(A.parent, adjoint(B))) diff --git a/stdlib/LinearAlgebra/src/generic.jl b/stdlib/LinearAlgebra/src/generic.jl deleted file mode 100644 index 2b03b24932c80..0000000000000 --- a/stdlib/LinearAlgebra/src/generic.jl +++ /dev/null @@ -1,2093 +0,0 @@ -# This file is a part of Julia. License is MIT: https://julialang.org/license - -## linalg.jl: Some generic Linear Algebra definitions - -# Elements of `out` may not be defined (e.g., for `BigFloat`). To make -# `mul!(out, A, B)` work for such cases, `out .*ₛ beta` short-circuits -# `out * beta`. Using `broadcasted` to avoid the multiplication -# inside this function. -function *ₛ end -Broadcast.broadcasted(::typeof(*ₛ), out, beta) = - iszero(beta::Number) ? false : broadcasted(*, out, beta) - -""" - MulAddMul(alpha, beta) - -A callable for operating short-circuiting version of `x * alpha + y * beta`. - -# Examples -```jldoctest -julia> using LinearAlgebra: MulAddMul - -julia> _add = MulAddMul(1, 0); - -julia> _add(123, nothing) -123 - -julia> MulAddMul(12, 34)(56, 78) == 56 * 12 + 78 * 34 -true -``` -""" -struct MulAddMul{ais1, bis0, TA, TB} - alpha::TA - beta::TB -end - -@inline function MulAddMul(alpha::TA, beta::TB) where {TA,TB} - if isone(alpha) - if iszero(beta) - return MulAddMul{true,true,TA,TB}(alpha, beta) - else - return MulAddMul{true,false,TA,TB}(alpha, beta) - end - else - if iszero(beta) - return MulAddMul{false,true,TA,TB}(alpha, beta) - else - return MulAddMul{false,false,TA,TB}(alpha, beta) - end - end -end - -""" - @stable_muladdmul - -Replaces a function call, that has a `MulAddMul(alpha, beta)` constructor as an -argument, with a branch over possible values of `isone(alpha)` and `iszero(beta)` -and constructs `MulAddMul{isone(alpha), iszero(beta)}` explicitly in each branch. -For example, 'f(x, y, MulAddMul(alpha, beta))` is transformed into -``` -if isone(alpha) - if iszero(beta) - f(x, y, MulAddMul{true, true, typeof(alpha), typeof(beta)}(alpha, beta)) - else - f(x, y, MulAddMul{true, false, typeof(alpha), typeof(beta)}(alpha, beta)) - end -else - if iszero(beta) - f(x, y, MulAddMul{false, true, typeof(alpha), typeof(beta)}(alpha, beta)) - else - f(x, y, MulAddMul{false, false, typeof(alpha), typeof(beta)}(alpha, beta)) - end -end -``` -This avoids the type instability of the `MulAddMul(alpha, beta)` constructor, -which causes runtime dispatch in case alpha and zero are not constants. -""" -macro stable_muladdmul(expr) - expr.head == :call || throw(ArgumentError("Can only handle function calls.")) - for (i, e) in enumerate(expr.args) - e isa Expr || continue - if e.head == :call && e.args[1] == :MulAddMul && length(e.args) == 3 - local asym = e.args[2] - local bsym = e.args[3] - - local e_sub11 = copy(expr) - e_sub11.args[i] = :(MulAddMul{true, true, typeof($asym), typeof($bsym)}($asym, $bsym)) - - local e_sub10 = copy(expr) - e_sub10.args[i] = :(MulAddMul{true, false, typeof($asym), typeof($bsym)}($asym, $bsym)) - - local e_sub01 = copy(expr) - e_sub01.args[i] = :(MulAddMul{false, true, typeof($asym), typeof($bsym)}($asym, $bsym)) - - local e_sub00 = copy(expr) - e_sub00.args[i] = :(MulAddMul{false, false, typeof($asym), typeof($bsym)}($asym, $bsym)) - - local e_out = quote - if isone($asym) - if iszero($bsym) - $e_sub11 - else - $e_sub10 - end - else - if iszero($bsym) - $e_sub01 - else - $e_sub00 - end - end - end - return esc(e_out) - end - end - throw(ArgumentError("No valid MulAddMul expression found.")) -end - -MulAddMul() = MulAddMul{true,true,Bool,Bool}(true, false) - -@inline (::MulAddMul{true})(x) = x -@inline (p::MulAddMul{false})(x) = x * p.alpha -@inline (::MulAddMul{true, true})(x, _) = x -@inline (p::MulAddMul{false, true})(x, _) = x * p.alpha -@inline (p::MulAddMul{true, false})(x, y) = x + y * p.beta -@inline (p::MulAddMul{false, false})(x, y) = x * p.alpha + y * p.beta - -""" - _modify!(_add::MulAddMul, x, C, idx) - -Short-circuiting version of `C[idx] = _add(x, C[idx])`. - -Short-circuiting the indexing `C[idx]` is necessary for avoiding `UndefRefError` -when mutating an array of non-primitive numbers such as `BigFloat`. - -# Examples -```jldoctest -julia> using LinearAlgebra: MulAddMul, _modify! - -julia> _add = MulAddMul(1, 0); - C = Vector{BigFloat}(undef, 1); - -julia> _modify!(_add, 123, C, 1) - -julia> C -1-element Vector{BigFloat}: - 123.0 -``` -""" -@inline @propagate_inbounds function _modify!(p::MulAddMul{ais1, bis0}, - x, C, idx′) where {ais1, bis0} - # `idx′` may be an integer, a tuple of integer, or a `CartesianIndex`. - # Let `CartesianIndex` constructor normalize them so that it can be - # used uniformly. It also acts as a workaround for performance penalty - # of splatting a number (#29114): - idx = CartesianIndex(idx′) - if bis0 - C[idx] = p(x) - else - C[idx] = p(x, C[idx]) - end - return -end - -@inline function _rmul_or_fill!(C::AbstractArray, beta::Number) - if isempty(C) - return C - end - if iszero(beta) - fill!(C, zero(eltype(C))) - else - rmul!(C, beta) - end - return C -end - - -function generic_mul!(C::AbstractArray, X::AbstractArray, s::Number, alpha::Number, beta::Number) - if length(C) != length(X) - throw(DimensionMismatch(lazy"first array has length $(length(C)) which does not match the length of the second, $(length(X)).")) - end - for (IC, IX) in zip(eachindex(C), eachindex(X)) - @inbounds @stable_muladdmul _modify!(MulAddMul(alpha,beta), X[IX] * s, C, IC) - end - C -end - -function generic_mul!(C::AbstractArray, s::Number, X::AbstractArray, alpha::Number, beta::Number) - if length(C) != length(X) - throw(DimensionMismatch(LazyString(lazy"first array has length $(length(C)) which does not", - lazy"match the length of the second, $(length(X))."))) - end - for (IC, IX) in zip(eachindex(C), eachindex(X)) - @inbounds @stable_muladdmul _modify!(MulAddMul(alpha,beta), s * X[IX], C, IC) - end - C -end - -@inline mul!(C::AbstractArray, s::Number, X::AbstractArray, alpha::Number, beta::Number) = - _lscale_add!(C, s, X, alpha, beta) - -_lscale_add!(C::StridedArray, s::Number, X::StridedArray, alpha::Number, beta::Number) = - generic_mul!(C, s, X, alpha, beta) -@inline function _lscale_add!(C::AbstractArray, s::Number, X::AbstractArray, alpha::Number, beta::Number) - if axes(C) == axes(X) - if isone(alpha) - if iszero(beta) - @. C = s * X - else - @. C = s * X + C * beta - end - else - if iszero(beta) - @. C = s * X * alpha - else - @. C = s * X * alpha + C * beta - end - end - else - generic_mul!(C, s, X, alpha, beta) - end - return C -end -@inline mul!(C::AbstractArray, X::AbstractArray, s::Number, alpha::Number, beta::Number) = - _rscale_add!(C, X, s, alpha, beta) - -_rscale_add!(C::StridedArray, X::StridedArray, s::Number, alpha::Number, beta::Number) = - generic_mul!(C, X, s, alpha, beta) -@inline function _rscale_add!(C::AbstractArray, X::AbstractArray, s::Number, alpha::Number, beta::Number) - if axes(C) == axes(X) - if isone(alpha) - if iszero(beta) - @. C = X * s - else - @. C = X * s + C * beta - end - else - s_alpha = s * alpha - if iszero(beta) - @. C = X * s_alpha - else - @. C = X * s_alpha + C * beta - end - end - else - generic_mul!(C, X, s, alpha, beta) - end - return C -end - -# For better performance when input and output are the same array -# See https://github.com/JuliaLang/julia/issues/8415#issuecomment-56608729 -""" - rmul!(A::AbstractArray, b::Number) - -Scale an array `A` by a scalar `b` overwriting `A` in-place. Use -[`lmul!`](@ref) to multiply scalar from left. The scaling operation -respects the semantics of the multiplication [`*`](@ref) between an -element of `A` and `b`. In particular, this also applies to -multiplication involving non-finite numbers such as `NaN` and `±Inf`. - -!!! compat "Julia 1.1" - Prior to Julia 1.1, `NaN` and `±Inf` entries in `A` were treated - inconsistently. - -# Examples -```jldoctest -julia> A = [1 2; 3 4] -2×2 Matrix{Int64}: - 1 2 - 3 4 - -julia> rmul!(A, 2) -2×2 Matrix{Int64}: - 2 4 - 6 8 - -julia> rmul!([NaN], 0.0) -1-element Vector{Float64}: - NaN -``` -""" -function rmul!(X::AbstractArray, s::Number) - @simd for I in eachindex(X) - @inbounds X[I] *= s - end - X -end - - -""" - lmul!(a::Number, B::AbstractArray) - -Scale an array `B` by a scalar `a` overwriting `B` in-place. Use -[`rmul!`](@ref) to multiply scalar from right. The scaling operation -respects the semantics of the multiplication [`*`](@ref) between `a` -and an element of `B`. In particular, this also applies to -multiplication involving non-finite numbers such as `NaN` and `±Inf`. - -!!! compat "Julia 1.1" - Prior to Julia 1.1, `NaN` and `±Inf` entries in `B` were treated - inconsistently. - -# Examples -```jldoctest -julia> B = [1 2; 3 4] -2×2 Matrix{Int64}: - 1 2 - 3 4 - -julia> lmul!(2, B) -2×2 Matrix{Int64}: - 2 4 - 6 8 - -julia> lmul!(0.0, [Inf]) -1-element Vector{Float64}: - NaN -``` -""" -function lmul!(s::Number, X::AbstractArray) - @simd for I in eachindex(X) - @inbounds X[I] = s*X[I] - end - X -end - -""" - rdiv!(A::AbstractArray, b::Number) - -Divide each entry in an array `A` by a scalar `b` overwriting `A` -in-place. Use [`ldiv!`](@ref) to divide scalar from left. - -# Examples -```jldoctest -julia> A = [1.0 2.0; 3.0 4.0] -2×2 Matrix{Float64}: - 1.0 2.0 - 3.0 4.0 - -julia> rdiv!(A, 2.0) -2×2 Matrix{Float64}: - 0.5 1.0 - 1.5 2.0 -``` -""" -function rdiv!(X::AbstractArray, s::Number) - @simd for I in eachindex(X) - @inbounds X[I] /= s - end - X -end - -""" - ldiv!(a::Number, B::AbstractArray) - -Divide each entry in an array `B` by a scalar `a` overwriting `B` -in-place. Use [`rdiv!`](@ref) to divide scalar from right. - -# Examples -```jldoctest -julia> B = [1.0 2.0; 3.0 4.0] -2×2 Matrix{Float64}: - 1.0 2.0 - 3.0 4.0 - -julia> ldiv!(2.0, B) -2×2 Matrix{Float64}: - 0.5 1.0 - 1.5 2.0 -``` -""" -function ldiv!(s::Number, X::AbstractArray) - @simd for I in eachindex(X) - @inbounds X[I] = s\X[I] - end - X -end -ldiv!(Y::AbstractArray, s::Number, X::AbstractArray) = Y .= s .\ X - -# Generic fallback. This assumes that B and Y have the same sizes. -ldiv!(Y::AbstractArray, A::AbstractMatrix, B::AbstractArray) = ldiv!(A, copyto!(Y, B)) - - -""" - cross(x, y) - ×(x,y) - -Compute the cross product of two 3-vectors. - -# Examples -```jldoctest -julia> a = [0;1;0] -3-element Vector{Int64}: - 0 - 1 - 0 - -julia> b = [0;0;1] -3-element Vector{Int64}: - 0 - 0 - 1 - -julia> cross(a,b) -3-element Vector{Int64}: - 1 - 0 - 0 -``` -""" -function cross(a::AbstractVector, b::AbstractVector) - if !(length(a) == length(b) == 3) - throw(DimensionMismatch("cross product is only defined for vectors of length 3")) - end - a1, a2, a3 = a - b1, b2, b3 = b - [a2*b3-a3*b2, a3*b1-a1*b3, a1*b2-a2*b1] -end - -""" - triu(M, k::Integer = 0) - -Return the upper triangle of `M` starting from the `k`th superdiagonal. - -# Examples -```jldoctest -julia> a = fill(1.0, (4,4)) -4×4 Matrix{Float64}: - 1.0 1.0 1.0 1.0 - 1.0 1.0 1.0 1.0 - 1.0 1.0 1.0 1.0 - 1.0 1.0 1.0 1.0 - -julia> triu(a,3) -4×4 Matrix{Float64}: - 0.0 0.0 0.0 1.0 - 0.0 0.0 0.0 0.0 - 0.0 0.0 0.0 0.0 - 0.0 0.0 0.0 0.0 - -julia> triu(a,-3) -4×4 Matrix{Float64}: - 1.0 1.0 1.0 1.0 - 1.0 1.0 1.0 1.0 - 1.0 1.0 1.0 1.0 - 1.0 1.0 1.0 1.0 -``` -""" -function triu(M::AbstractMatrix, k::Integer = 0) - d = similar(M) - A = triu!(d,k) - if iszero(k) - copytrito!(A, M, 'U') - else - for col in axes(A,2) - rows = firstindex(A,1):min(col-k, lastindex(A,1)) - A[rows, col] = @view M[rows, col] - end - end - return A -end - -""" - tril(M, k::Integer = 0) - -Return the lower triangle of `M` starting from the `k`th superdiagonal. - -# Examples -```jldoctest -julia> a = fill(1.0, (4,4)) -4×4 Matrix{Float64}: - 1.0 1.0 1.0 1.0 - 1.0 1.0 1.0 1.0 - 1.0 1.0 1.0 1.0 - 1.0 1.0 1.0 1.0 - -julia> tril(a,3) -4×4 Matrix{Float64}: - 1.0 1.0 1.0 1.0 - 1.0 1.0 1.0 1.0 - 1.0 1.0 1.0 1.0 - 1.0 1.0 1.0 1.0 - -julia> tril(a,-3) -4×4 Matrix{Float64}: - 0.0 0.0 0.0 0.0 - 0.0 0.0 0.0 0.0 - 0.0 0.0 0.0 0.0 - 1.0 0.0 0.0 0.0 -``` -""" -function tril(M::AbstractMatrix,k::Integer=0) - d = similar(M) - A = tril!(d,k) - if iszero(k) - copytrito!(A, M, 'L') - else - for col in axes(A,2) - rows = max(firstindex(A,1),col-k):lastindex(A,1) - A[rows, col] = @view M[rows, col] - end - end - return A -end - -""" - triu!(M) - -Upper triangle of a matrix, overwriting `M` in the process. -See also [`triu`](@ref). -""" -triu!(M::AbstractMatrix) = triu!(M,0) - -""" - tril!(M) - -Lower triangle of a matrix, overwriting `M` in the process. -See also [`tril`](@ref). -""" -tril!(M::AbstractMatrix) = tril!(M,0) - -diag(A::AbstractVector) = throw(ArgumentError("use diagm instead of diag to construct a diagonal matrix")) - -########################################################################################### -# Dot products and norms - -# special cases of norm; note that they don't need to handle isempty(x) -generic_normMinusInf(x) = float(mapreduce(norm, min, x)) - -generic_normInf(x) = float(mapreduce(norm, max, x)) - -generic_norm1(x) = mapreduce(float ∘ norm, +, x) - -# faster computation of norm(x)^2, avoiding overflow for integers -norm_sqr(x) = norm(x)^2 -norm_sqr(x::Number) = abs2(x) -norm_sqr(x::Union{T,Complex{T},Rational{T}}) where {T<:Integer} = abs2(float(x)) - -function generic_norm2(x) - maxabs = normInf(x) - (ismissing(maxabs) || iszero(maxabs) || isinf(maxabs)) && return maxabs - (v, s) = iterate(x)::Tuple - T = typeof(maxabs) - if isfinite(length(x)*maxabs*maxabs) && !iszero(maxabs*maxabs) # Scaling not necessary - sum::promote_type(Float64, T) = norm_sqr(v) - while true - y = iterate(x, s) - y === nothing && break - (v, s) = y - sum += norm_sqr(v) - end - ismissing(sum) && return missing - return convert(T, sqrt(sum)) - else - sum = abs2(norm(v)/maxabs) - while true - y = iterate(x, s) - y === nothing && break - (v, s) = y - sum += (norm(v)/maxabs)^2 - end - ismissing(sum) && return missing - return convert(T, maxabs*sqrt(sum)) - end -end - -# Compute L_p norm ‖x‖ₚ = sum(abs(x).^p)^(1/p) -# (Not technically a "norm" for p < 1.) -function generic_normp(x, p) - (v, s) = iterate(x)::Tuple - if p > 1 || p < -1 # might need to rescale to avoid overflow - maxabs = p > 1 ? normInf(x) : normMinusInf(x) - (ismissing(maxabs) || iszero(maxabs) || isinf(maxabs)) && return maxabs - T = typeof(maxabs) - else - T = typeof(float(norm(v))) - end - spp::promote_type(Float64, T) = p - if -1 <= p <= 1 || (isfinite(length(x)*maxabs^spp) && !iszero(maxabs^spp)) # scaling not necessary - sum::promote_type(Float64, T) = norm(v)^spp - while true - y = iterate(x, s) - y === nothing && break - (v, s) = y - ismissing(v) && return missing - sum += norm(v)^spp - end - return convert(T, sum^inv(spp)) - else # rescaling - sum = (norm(v)/maxabs)^spp - ismissing(sum) && return missing - while true - y = iterate(x, s) - y === nothing && break - (v, s) = y - ismissing(v) && return missing - sum += (norm(v)/maxabs)^spp - end - return convert(T, maxabs*sum^inv(spp)) - end -end - -normMinusInf(x) = generic_normMinusInf(x) -normInf(x) = generic_normInf(x) -norm1(x) = generic_norm1(x) -norm2(x) = generic_norm2(x) -normp(x, p) = generic_normp(x, p) - - -""" - norm(A, p::Real=2) - -For any iterable container `A` (including arrays of any dimension) of numbers (or any -element type for which `norm` is defined), compute the `p`-norm (defaulting to `p=2`) as if -`A` were a vector of the corresponding length. - -The `p`-norm is defined as -```math -\\|A\\|_p = \\left( \\sum_{i=1}^n | a_i | ^p \\right)^{1/p} -``` -with ``a_i`` the entries of ``A``, ``| a_i |`` the [`norm`](@ref) of ``a_i``, and -``n`` the length of ``A``. Since the `p`-norm is computed using the [`norm`](@ref)s -of the entries of `A`, the `p`-norm of a vector of vectors is not compatible with -the interpretation of it as a block vector in general if `p != 2`. - -`p` can assume any numeric value (even though not all values produce a -mathematically valid vector norm). In particular, `norm(A, Inf)` returns the largest value -in `abs.(A)`, whereas `norm(A, -Inf)` returns the smallest. If `A` is a matrix and `p=2`, -then this is equivalent to the Frobenius norm. - -The second argument `p` is not necessarily a part of the interface for `norm`, i.e. a custom -type may only implement `norm(A)` without second argument. - -Use [`opnorm`](@ref) to compute the operator norm of a matrix. - -# Examples -```jldoctest -julia> v = [3, -2, 6] -3-element Vector{Int64}: - 3 - -2 - 6 - -julia> norm(v) -7.0 - -julia> norm(v, 1) -11.0 - -julia> norm(v, Inf) -6.0 - -julia> norm([1 2 3; 4 5 6; 7 8 9]) -16.881943016134134 - -julia> norm([1 2 3 4 5 6 7 8 9]) -16.881943016134134 - -julia> norm(1:9) -16.881943016134134 - -julia> norm(hcat(v,v), 1) == norm(vcat(v,v), 1) != norm([v,v], 1) -true - -julia> norm(hcat(v,v), 2) == norm(vcat(v,v), 2) == norm([v,v], 2) -true - -julia> norm(hcat(v,v), Inf) == norm(vcat(v,v), Inf) != norm([v,v], Inf) -true -``` -""" -Base.@constprop :aggressive function norm(itr, p::Real) - isempty(itr) && return float(norm(zero(eltype(itr)))) - norm_recursive_check(itr) - if p == 2 - return norm2(itr) - elseif p == 1 - return norm1(itr) - elseif p == Inf - return normInf(itr) - elseif p == 0 - return typeof(float(norm(first(itr))))(count(!iszero, itr)) - elseif p == -Inf - return normMinusInf(itr) - else - normp(itr, p) - end -end -# Split into a separate method to reduce latency in norm(x) calls (#56330) -function norm(itr) - isempty(itr) && return float(norm(zero(eltype(itr)))) - norm_recursive_check(itr) - norm2(itr) -end -function norm_recursive_check(itr) - v, s = iterate(itr) - !isnothing(s) && !ismissing(v) && v == itr && throw(ArgumentError( - "cannot evaluate norm recursively if the type of the initial element is identical to that of the container")) - return nothing -end - -""" - norm(x::Number, p::Real=2) - -For numbers, return ``\\left( |x|^p \\right)^{1/p}``. - -# Examples -```jldoctest -julia> norm(2, 1) -2.0 - -julia> norm(-2, 1) -2.0 - -julia> norm(2, 2) -2.0 - -julia> norm(-2, 2) -2.0 - -julia> norm(2, Inf) -2.0 - -julia> norm(-2, Inf) -2.0 -``` -""" -@inline function norm(x::Number, p::Real=2) - afx = abs(float(x)) - if p == 0 - if iszero(x) - return zero(afx) - elseif !isnan(x) - return oneunit(afx) - else - return afx - end - else - return afx - end -end -norm(::Missing, p::Real=2) = missing - -# special cases of opnorm -function opnorm1(A::AbstractMatrix{T}) where T - require_one_based_indexing(A) - Tnorm = typeof(float(real(zero(T)))) - Tsum = promote_type(Float64, Tnorm) - nrm::Tsum = 0 - for j in axes(A,2) - nrmj::Tsum = 0 - for i in axes(A,1) - nrmj += norm(@inbounds A[i,j]) - end - nrm = max(nrm,nrmj) - end - return convert(Tnorm, nrm) -end - -function opnorm2(A::AbstractMatrix{T}) where T - require_one_based_indexing(A) - m,n = size(A) - Tnorm = typeof(float(real(zero(T)))) - if m == 0 || n == 0 return zero(Tnorm) end - if m == 1 || n == 1 return norm2(A) end - return svdvals(A)[1] -end - -function opnormInf(A::AbstractMatrix{T}) where T - require_one_based_indexing(A) - Tnorm = typeof(float(real(zero(T)))) - Tsum = promote_type(Float64, Tnorm) - nrm::Tsum = 0 - for i in axes(A,1) - nrmi::Tsum = 0 - for j in axes(A,2) - nrmi += norm(@inbounds A[i,j]) - end - nrm = max(nrm,nrmi) - end - return convert(Tnorm, nrm) -end - - -""" - opnorm(A::AbstractMatrix, p::Real=2) - -Compute the operator norm (or matrix norm) induced by the vector `p`-norm, -where valid values of `p` are `1`, `2`, or `Inf`. (Note that for sparse matrices, -`p=2` is currently not implemented.) Use [`norm`](@ref) to compute the Frobenius -norm. - -When `p=1`, the operator norm is the maximum absolute column sum of `A`: -```math -\\|A\\|_1 = \\max_{1 ≤ j ≤ n} \\sum_{i=1}^m | a_{ij} | -``` -with ``a_{ij}`` the entries of ``A``, and ``m`` and ``n`` its dimensions. - -When `p=2`, the operator norm is the spectral norm, equal to the largest -singular value of `A`. - -When `p=Inf`, the operator norm is the maximum absolute row sum of `A`: -```math -\\|A\\|_\\infty = \\max_{1 ≤ i ≤ m} \\sum _{j=1}^n | a_{ij} | -``` - -# Examples -```jldoctest -julia> A = [1 -2 -3; 2 3 -1] -2×3 Matrix{Int64}: - 1 -2 -3 - 2 3 -1 - -julia> opnorm(A, Inf) -6.0 - -julia> opnorm(A, 1) -5.0 -``` -""" -Base.@constprop :aggressive function opnorm(A::AbstractMatrix, p::Real) - if p == 2 - return opnorm2(A) - elseif p == 1 - return opnorm1(A) - elseif p == Inf - return opnormInf(A) - else - throw(ArgumentError(lazy"invalid p-norm p=$p. Valid: 1, 2, Inf")) - end -end -opnorm(A::AbstractMatrix) = opnorm2(A) - -""" - opnorm(x::Number, p::Real=2) - -For numbers, return ``\\left( |x|^p \\right)^{1/p}``. -This is equivalent to [`norm`](@ref). -""" -@inline opnorm(x::Number, p::Real=2) = norm(x, p) - -""" - opnorm(A::Adjoint{<:Any,<:AbstractVector}, q::Real=2) - opnorm(A::Transpose{<:Any,<:AbstractVector}, q::Real=2) - -For Adjoint/Transpose-wrapped vectors, return the operator ``q``-norm of `A`, which is -equivalent to the `p`-norm with value `p = q/(q-1)`. They coincide at `p = q = 2`. -Use [`norm`](@ref) to compute the `p` norm of `A` as a vector. - -The difference in norm between a vector space and its dual arises to preserve -the relationship between duality and the dot product, and the result is -consistent with the operator `p`-norm of a `1 × n` matrix. - -# Examples -```jldoctest -julia> v = [1; im]; - -julia> vc = v'; - -julia> opnorm(vc, 1) -1.0 - -julia> norm(vc, 1) -2.0 - -julia> norm(v, 1) -2.0 - -julia> opnorm(vc, 2) -1.4142135623730951 - -julia> norm(vc, 2) -1.4142135623730951 - -julia> norm(v, 2) -1.4142135623730951 - -julia> opnorm(vc, Inf) -2.0 - -julia> norm(vc, Inf) -1.0 - -julia> norm(v, Inf) -1.0 -``` -""" -opnorm(v::TransposeAbsVec, q::Real) = q == Inf ? norm(v.parent, 1) : norm(v.parent, q/(q-1)) -opnorm(v::AdjointAbsVec, q::Real) = q == Inf ? norm(conj(v.parent), 1) : norm(conj(v.parent), q/(q-1)) -opnorm(v::AdjointAbsVec) = norm(conj(v.parent)) -opnorm(v::TransposeAbsVec) = norm(v.parent) - -norm(v::AdjOrTrans, p::Real) = norm(v.parent, p) - -""" - dot(x, y) - x ⋅ y - -Compute the dot product between two vectors. For complex vectors, the first -vector is conjugated. - -`dot` also works on arbitrary iterable objects, including arrays of any dimension, -as long as `dot` is defined on the elements. - -`dot` is semantically equivalent to `sum(dot(vx,vy) for (vx,vy) in zip(x, y))`, -with the added restriction that the arguments must have equal lengths. - -`x ⋅ y` (where `⋅` can be typed by tab-completing `\\cdot` in the REPL) is a synonym for -`dot(x, y)`. - -# Examples -```jldoctest -julia> dot([1; 1], [2; 3]) -5 - -julia> dot([im; im], [1; 1]) -0 - 2im - -julia> dot(1:5, 2:6) -70 - -julia> x = fill(2., (5,5)); - -julia> y = fill(3., (5,5)); - -julia> dot(x, y) -150.0 -``` -""" -function dot end - -function dot(x, y) # arbitrary iterables - ix = iterate(x) - iy = iterate(y) - if ix === nothing - if iy !== nothing - throw(DimensionMismatch("x and y are of different lengths!")) - end - return dot(zero(eltype(x)), zero(eltype(y))) - end - if iy === nothing - throw(DimensionMismatch("x and y are of different lengths!")) - end - (vx, xs) = ix - (vy, ys) = iy - typeof(vx) == typeof(x) && typeof(vy) == typeof(y) && throw(ArgumentError( - "cannot evaluate dot recursively if the type of an element is identical to that of the container")) - s = dot(vx, vy) - while true - ix = iterate(x, xs) - iy = iterate(y, ys) - ix === nothing && break - iy === nothing && break - (vx, xs), (vy, ys) = ix, iy - s += dot(vx, vy) - end - if !(iy === nothing && ix === nothing) - throw(DimensionMismatch("x and y are of different lengths!")) - end - return s -end - -dot(x::Number, y::Number) = conj(x) * y - -function dot(x::AbstractArray, y::AbstractArray) - lx = length(x) - if lx != length(y) - throw(DimensionMismatch(lazy"first array has length $(lx) which does not match the length of the second, $(length(y)).")) - end - if lx == 0 - return dot(zero(eltype(x)), zero(eltype(y))) - end - s = zero(dot(first(x), first(y))) - for (Ix, Iy) in zip(eachindex(x), eachindex(y)) - s += dot(@inbounds(x[Ix]), @inbounds(y[Iy])) - end - s -end - -function dot(x::Adjoint{<:Union{Real,Complex}}, y::Adjoint{<:Union{Real,Complex}}) - return conj(dot(parent(x), parent(y))) -end -dot(x::Transpose, y::Transpose) = dot(parent(x), parent(y)) - -""" - dot(x, A, y) - -Compute the generalized dot product `dot(x, A*y)` between two vectors `x` and `y`, -without storing the intermediate result of `A*y`. As for the two-argument -[`dot(_,_)`](@ref), this acts recursively. Moreover, for complex vectors, the -first vector is conjugated. - -!!! compat "Julia 1.4" - Three-argument `dot` requires at least Julia 1.4. - -# Examples -```jldoctest -julia> dot([1; 1], [1 2; 3 4], [2; 3]) -26 - -julia> dot(1:5, reshape(1:25, 5, 5), 2:6) -4850 - -julia> ⋅(1:5, reshape(1:25, 5, 5), 2:6) == dot(1:5, reshape(1:25, 5, 5), 2:6) -true -``` -""" -dot(x, A, y) = dot(x, A*y) # generic fallback for cases that are not covered by specialized methods - -function dot(x::AbstractVector, A::AbstractMatrix, y::AbstractVector) - (axes(x)..., axes(y)...) == axes(A) || throw(DimensionMismatch()) - T = typeof(dot(first(x), first(A), first(y))) - s = zero(T) - i₁ = first(eachindex(x)) - x₁ = first(x) - for j in eachindex(y) - yj = @inbounds y[j] - if !iszero(yj) - temp = zero(adjoint(@inbounds A[i₁,j]) * x₁) - @inbounds @simd for i in eachindex(x) - temp += adjoint(A[i,j]) * x[i] - end - s += dot(temp, yj) - end - end - return s -end -dot(x::AbstractVector, adjA::Adjoint, y::AbstractVector) = adjoint(dot(y, adjA.parent, x)) -dot(x::AbstractVector, transA::Transpose{<:Real}, y::AbstractVector) = adjoint(dot(y, transA.parent, x)) - -########################################################################################### - -""" - rank(A::AbstractMatrix; atol::Real=0, rtol::Real=atol>0 ? 0 : n*ϵ) - rank(A::AbstractMatrix, rtol::Real) - -Compute the numerical rank of a matrix by counting how many outputs of -`svdvals(A)` are greater than `max(atol, rtol*σ₁)` where `σ₁` is `A`'s largest -calculated singular value. `atol` and `rtol` are the absolute and relative -tolerances, respectively. The default relative tolerance is `n*ϵ`, where `n` -is the size of the smallest dimension of `A`, and `ϵ` is the [`eps`](@ref) of -the element type of `A`. - -!!! note - Numerical rank can be a sensitive and imprecise characterization of - ill-conditioned matrices with singular values that are close to the threshold - tolerance `max(atol, rtol*σ₁)`. In such cases, slight perturbations to the - singular-value computation or to the matrix can change the result of `rank` - by pushing one or more singular values across the threshold. These variations - can even occur due to changes in floating-point errors between different Julia - versions, architectures, compilers, or operating systems. - -!!! compat "Julia 1.1" - The `atol` and `rtol` keyword arguments requires at least Julia 1.1. - In Julia 1.0 `rtol` is available as a positional argument, but this - will be deprecated in Julia 2.0. - -# Examples -```jldoctest -julia> rank(Matrix(I, 3, 3)) -3 - -julia> rank(diagm(0 => [1, 0, 2])) -2 - -julia> rank(diagm(0 => [1, 0.001, 2]), rtol=0.1) -2 - -julia> rank(diagm(0 => [1, 0.001, 2]), rtol=0.00001) -3 - -julia> rank(diagm(0 => [1, 0.001, 2]), atol=1.5) -1 -``` -""" -function rank(A::AbstractMatrix; atol::Real = 0.0, rtol::Real = (min(size(A)...)*eps(real(float(one(eltype(A))))))*iszero(atol)) - isempty(A) && return 0 # 0-dimensional case - s = svdvals(A) - tol = max(atol, rtol*s[1]) - count(>(tol), s) -end -rank(x::Union{Number,AbstractVector}) = iszero(x) ? 0 : 1 - -""" - tr(M) - -Matrix trace. Sums the diagonal elements of `M`. - -# Examples -```jldoctest -julia> A = [1 2; 3 4] -2×2 Matrix{Int64}: - 1 2 - 3 4 - -julia> tr(A) -5 -``` -""" -function tr(A) - checksquare(A) - sum(diag(A)) -end -tr(x::Number) = x - -#kron(a::AbstractVector, b::AbstractVector) -#kron(a::AbstractMatrix{T}, b::AbstractMatrix{S}) where {T,S} - -#det(a::AbstractMatrix) - -""" - inv(M) - -Matrix inverse. Computes matrix `N` such that -`M * N = I`, where `I` is the identity matrix. -Computed by solving the left-division -`N = M \\ I`. - -# Examples -```jldoctest -julia> M = [2 5; 1 3] -2×2 Matrix{Int64}: - 2 5 - 1 3 - -julia> N = inv(M) -2×2 Matrix{Float64}: - 3.0 -5.0 - -1.0 2.0 - -julia> M*N == N*M == Matrix(I, 2, 2) -true -``` -""" -function inv(A::AbstractMatrix{T}) where T - n = checksquare(A) - S = typeof(zero(T)/one(T)) # dimensionful - S0 = typeof(zero(T)/oneunit(T)) # dimensionless - dest = Matrix{S0}(I, n, n) - ldiv!(factorize(convert(AbstractMatrix{S}, A)), dest) -end -inv(A::Adjoint) = adjoint(inv(parent(A))) -inv(A::Transpose) = transpose(inv(parent(A))) - -pinv(v::AbstractVector{T}, tol::Real = real(zero(T))) where {T<:Real} = _vectorpinv(transpose, v, tol) -pinv(v::AbstractVector{T}, tol::Real = real(zero(T))) where {T<:Complex} = _vectorpinv(adjoint, v, tol) -pinv(v::AbstractVector{T}, tol::Real = real(zero(T))) where {T} = _vectorpinv(adjoint, v, tol) -function _vectorpinv(dualfn::Tf, v::AbstractVector{Tv}, tol) where {Tv,Tf} - res = dualfn(similar(v, typeof(zero(Tv) / (abs2(one(Tv)) + abs2(one(Tv)))))) - den = sum(abs2, v) - # as tol is the threshold relative to the maximum singular value, for a vector with - # single singular value σ=√den, σ ≦ tol*σ is equivalent to den=0 ∨ tol≥1 - if iszero(den) || tol >= one(tol) - fill!(res, zero(eltype(res))) - else - res .= dualfn(v) ./ den - end - return res -end - -# this method is just an optimization: literal negative powers of A are -# already turned by literal_pow into powers of inv(A), but for A^-1 this -# would turn into inv(A)^1 = copy(inv(A)), which makes an extra copy. -@inline Base.literal_pow(::typeof(^), A::AbstractMatrix, ::Val{-1}) = inv(A) - -""" - \\(A, B) - -Matrix division using a polyalgorithm. For input matrices `A` and `B`, the result `X` is -such that `A*X == B` when `A` is square. The solver that is used depends upon the structure -of `A`. If `A` is upper or lower triangular (or diagonal), no factorization of `A` is -required and the system is solved with either forward or backward substitution. -For non-triangular square matrices, an LU factorization is used. - -For rectangular `A` the result is the minimum-norm least squares solution computed by a -pivoted QR factorization of `A` and a rank estimate of `A` based on the R factor. - -When `A` is sparse, a similar polyalgorithm is used. For indefinite matrices, the `LDLt` -factorization does not use pivoting during the numerical factorization and therefore the -procedure can fail even for invertible matrices. - -See also: [`factorize`](@ref), [`pinv`](@ref). - -# Examples -```jldoctest -julia> A = [1 0; 1 -2]; B = [32; -4]; - -julia> X = A \\ B -2-element Vector{Float64}: - 32.0 - 18.0 - -julia> A * X == B -true -``` -""" -function (\)(A::AbstractMatrix, B::AbstractVecOrMat) - require_one_based_indexing(A, B) - m, n = size(A) - if m == n - if istril(A) - if istriu(A) - return Diagonal(A) \ B - else - return LowerTriangular(A) \ B - end - end - if istriu(A) - return UpperTriangular(A) \ B - end - return lu(A) \ B - end - return qr(A, ColumnNorm()) \ B -end - -(\)(a::AbstractVector, b::AbstractArray) = pinv(a) * b -""" - A / B - -Matrix right-division: `A / B` is equivalent to `(B' \\ A')'` where [`\\`](@ref) is the left-division operator. -For square matrices, the result `X` is such that `A == X*B`. - -See also: [`rdiv!`](@ref). - -# Examples -```jldoctest -julia> A = Float64[1 4 5; 3 9 2]; B = Float64[1 4 2; 3 4 2; 8 7 1]; - -julia> X = A / B -2×3 Matrix{Float64}: - -0.65 3.75 -1.2 - 3.25 -2.75 1.0 - -julia> isapprox(A, X*B) -true - -julia> isapprox(X, A*pinv(B)) -true -``` -""" -function (/)(A::AbstractVecOrMat, B::AbstractVecOrMat) - size(A,2) != size(B,2) && throw(DimensionMismatch("Both inputs should have the same number of columns")) - return copy(adjoint(adjoint(B) \ adjoint(A))) -end -# \(A::StridedMatrix,x::Number) = inv(A)*x Should be added at some point when the old elementwise version has been deprecated long enough -# /(x::Number,A::StridedMatrix) = x*inv(A) -/(x::Number, v::AbstractVector) = x*pinv(v) - -cond(x::Number) = iszero(x) ? Inf : 1.0 -cond(x::Number, p) = cond(x) - -#Skeel condition numbers -condskeel(A::AbstractMatrix, p::Real=Inf) = opnorm(abs.(inv(A))*abs.(A), p) - -""" - condskeel(M, [x, p::Real=Inf]) - -```math -\\kappa_S(M, p) = \\left\\Vert \\left\\vert M \\right\\vert \\left\\vert M^{-1} \\right\\vert \\right\\Vert_p \\\\ -\\kappa_S(M, x, p) = \\frac{\\left\\Vert \\left\\vert M \\right\\vert \\left\\vert M^{-1} \\right\\vert \\left\\vert x \\right\\vert \\right\\Vert_p}{\\left \\Vert x \\right \\Vert_p} -``` - -Skeel condition number ``\\kappa_S`` of the matrix `M`, optionally with respect to the -vector `x`, as computed using the operator `p`-norm. ``\\left\\vert M \\right\\vert`` -denotes the matrix of (entry wise) absolute values of ``M``; -``\\left\\vert M \\right\\vert_{ij} = \\left\\vert M_{ij} \\right\\vert``. -Valid values for `p` are `1`, `2` and `Inf` (default). - -This quantity is also known in the literature as the Bauer condition number, relative -condition number, or componentwise relative condition number. -""" -function condskeel(A::AbstractMatrix, x::AbstractVector, p::Real=Inf) - norm(abs.(inv(A))*(abs.(A)*abs.(x)), p) / norm(x, p) -end - -issymmetric(A::AbstractMatrix{<:Real}) = ishermitian(A) - -""" - issymmetric(A) -> Bool - -Test whether a matrix is symmetric. - -# Examples -```jldoctest -julia> a = [1 2; 2 -1] -2×2 Matrix{Int64}: - 1 2 - 2 -1 - -julia> issymmetric(a) -true - -julia> b = [1 im; -im 1] -2×2 Matrix{Complex{Int64}}: - 1+0im 0+1im - 0-1im 1+0im - -julia> issymmetric(b) -false -``` -""" -function issymmetric(A::AbstractMatrix) - indsm, indsn = axes(A) - if indsm != indsn - return false - end - for i = first(indsn):last(indsn), j = (i):last(indsn) - if A[i,j] != transpose(A[j,i]) - return false - end - end - return true -end - -issymmetric(x::Number) = x == x - -""" - ishermitian(A) -> Bool - -Test whether a matrix is Hermitian. - -# Examples -```jldoctest -julia> a = [1 2; 2 -1] -2×2 Matrix{Int64}: - 1 2 - 2 -1 - -julia> ishermitian(a) -true - -julia> b = [1 im; -im 1] -2×2 Matrix{Complex{Int64}}: - 1+0im 0+1im - 0-1im 1+0im - -julia> ishermitian(b) -true -``` -""" -function ishermitian(A::AbstractMatrix) - indsm, indsn = axes(A) - if indsm != indsn - return false - end - for i = indsn, j = i:last(indsn) - if A[i,j] != adjoint(A[j,i]) - return false - end - end - return true -end - -ishermitian(x::Number) = (x == conj(x)) - -# helper function equivalent to `iszero(v)`, but potentially without the fast exit feature -# of `all` if this improves performance -_iszero(V) = iszero(V) -# A Base.FastContiguousSubArray view of a StridedArray -FastContiguousSubArrayStrided{T,N,P<:StridedArray,I<:Tuple{AbstractUnitRange, Vararg{Any}}} = Base.SubArray{T,N,P,I,true} -# using mapreduce instead of all permits vectorization -_iszero(V::FastContiguousSubArrayStrided) = mapreduce(iszero, &, V, init=true) - -""" - istriu(A::AbstractMatrix, k::Integer = 0) -> Bool - -Test whether `A` is upper triangular starting from the `k`th superdiagonal. - -# Examples -```jldoctest -julia> a = [1 2; 2 -1] -2×2 Matrix{Int64}: - 1 2 - 2 -1 - -julia> istriu(a) -false - -julia> istriu(a, -1) -true - -julia> c = [1 1 1; 1 1 1; 0 1 1] -3×3 Matrix{Int64}: - 1 1 1 - 1 1 1 - 0 1 1 - -julia> istriu(c) -false - -julia> istriu(c, -1) -true -``` -""" -istriu(A::AbstractMatrix, k::Integer = 0) = _isbanded_impl(A, k, size(A,2)-1) -istriu(x::Number) = true - -""" - istril(A::AbstractMatrix, k::Integer = 0) -> Bool - -Test whether `A` is lower triangular starting from the `k`th superdiagonal. - -# Examples -```jldoctest -julia> a = [1 2; 2 -1] -2×2 Matrix{Int64}: - 1 2 - 2 -1 - -julia> istril(a) -false - -julia> istril(a, 1) -true - -julia> c = [1 1 0; 1 1 1; 1 1 1] -3×3 Matrix{Int64}: - 1 1 0 - 1 1 1 - 1 1 1 - -julia> istril(c) -false - -julia> istril(c, 1) -true -``` -""" -istril(A::AbstractMatrix, k::Integer = 0) = _isbanded_impl(A, -size(A,1)+1, k) -istril(x::Number) = true - -""" - isbanded(A::AbstractMatrix, kl::Integer, ku::Integer) -> Bool - -Test whether `A` is banded with lower bandwidth starting from the `kl`th superdiagonal -and upper bandwidth extending through the `ku`th superdiagonal. - -# Examples -```jldoctest -julia> a = [1 2; 2 -1] -2×2 Matrix{Int64}: - 1 2 - 2 -1 - -julia> LinearAlgebra.isbanded(a, 0, 0) -false - -julia> LinearAlgebra.isbanded(a, -1, 1) -true - -julia> b = [1 0; -im -1] # lower bidiagonal -2×2 Matrix{Complex{Int64}}: - 1+0im 0+0im - 0-1im -1+0im - -julia> LinearAlgebra.isbanded(b, 0, 0) -false - -julia> LinearAlgebra.isbanded(b, -1, 0) -true -``` -""" -isbanded(A::AbstractMatrix, kl::Integer, ku::Integer) = _isbanded(A, kl, ku) -_isbanded(A::AbstractMatrix, kl::Integer, ku::Integer) = istriu(A, kl) && istril(A, ku) -# Performance optimization for StridedMatrix by better utilizing cache locality -# The istriu and istril loops are merged -# the additional indirection allows us to reuse the isbanded loop within istriu/istril -# without encountering cycles -_isbanded(A::StridedMatrix, kl::Integer, ku::Integer) = _isbanded_impl(A, kl, ku) -function _isbanded_impl(A, kl, ku) - Base.require_one_based_indexing(A) - - #= - We split the column range into four possible groups, depending on the values of kl and ku. - - The first is the bottom left triangle, where bands below kl must be zero, - but there are no bands above ku in that column. - - The second is where there are both bands below kl and above ku in the column. - These are the middle columns typically. - - The third is the top right, where there are bands above ku but no bands below kl - in the column. - - The fourth is mainly relevant for wide matrices, where there is a block to the right - beyond ku, where the elements should all be zero. The reason we separate this from the - third group is that we may loop over all the rows using A[:, col] instead of A[rowrange, col], - which is usually faster. - =# - - last_col_nonzeroblocks = size(A,1) + ku # fully zero rectangular block beyond this column - last_col_emptytoprows = ku + 1 # empty top rows before this column - last_col_nonemptybottomrows = size(A,1) + kl - 1 # empty bottom rows after this column - - colrange_onlybottomrows = firstindex(A,2):min(last_col_nonemptybottomrows, last_col_emptytoprows) - colrange_topbottomrows = max(last_col_emptytoprows, last(colrange_onlybottomrows))+1:last_col_nonzeroblocks - colrange_onlytoprows_nonzero = last(colrange_topbottomrows)+1:last_col_nonzeroblocks - colrange_zero_block = last_col_nonzeroblocks+1:lastindex(A,2) - - for col in intersect(axes(A,2), colrange_onlybottomrows) # only loop over the bottom rows - botrowinds = max(firstindex(A,1), col-kl+1):lastindex(A,1) - bottomrows = @view A[botrowinds, col] - _iszero(bottomrows) || return false - end - for col in intersect(axes(A,2), colrange_topbottomrows) - toprowinds = firstindex(A,1):min(col-ku-1, lastindex(A,1)) - toprows = @view A[toprowinds, col] - _iszero(toprows) || return false - botrowinds = max(firstindex(A,1), col-kl+1):lastindex(A,1) - bottomrows = @view A[botrowinds, col] - _iszero(bottomrows) || return false - end - for col in intersect(axes(A,2), colrange_onlytoprows_nonzero) - toprowinds = firstindex(A,1):min(col-ku-1, lastindex(A,1)) - toprows = @view A[toprowinds, col] - _iszero(toprows) || return false - end - for col in intersect(axes(A,2), colrange_zero_block) - _iszero(@view A[:, col]) || return false - end - return true -end - -""" - isdiag(A) -> Bool - -Test whether a matrix is diagonal in the sense that `iszero(A[i,j])` is true unless `i == j`. -Note that it is not necessary for `A` to be square; -if you would also like to check that, you need to check that `size(A, 1) == size(A, 2)`. - -# Examples -```jldoctest -julia> a = [1 2; 2 -1] -2×2 Matrix{Int64}: - 1 2 - 2 -1 - -julia> isdiag(a) -false - -julia> b = [im 0; 0 -im] -2×2 Matrix{Complex{Int64}}: - 0+1im 0+0im - 0+0im 0-1im - -julia> isdiag(b) -true - -julia> c = [1 0 0; 0 2 0] -2×3 Matrix{Int64}: - 1 0 0 - 0 2 0 - -julia> isdiag(c) -true - -julia> d = [1 0 0; 0 2 3] -2×3 Matrix{Int64}: - 1 0 0 - 0 2 3 - -julia> isdiag(d) -false -``` -""" -isdiag(A::AbstractMatrix) = isbanded(A, 0, 0) -isdiag(x::Number) = true - -""" - axpy!(α, x::AbstractArray, y::AbstractArray) - -Overwrite `y` with `x * α + y` and return `y`. -If `x` and `y` have the same axes, it's equivalent with `y .+= x .* a`. - -# Examples -```jldoctest -julia> x = [1; 2; 3]; - -julia> y = [4; 5; 6]; - -julia> axpy!(2, x, y) -3-element Vector{Int64}: - 6 - 9 - 12 -``` -""" -function axpy!(α, x::AbstractArray, y::AbstractArray) - n = length(x) - if n != length(y) - throw(DimensionMismatch(lazy"x has length $n, but y has length $(length(y))")) - end - iszero(α) && return y - for (IY, IX) in zip(eachindex(y), eachindex(x)) - @inbounds y[IY] += x[IX]*α - end - return y -end - -function axpy!(α, x::AbstractArray, rx::AbstractArray{<:Integer}, y::AbstractArray, ry::AbstractArray{<:Integer}) - if length(rx) != length(ry) - throw(DimensionMismatch(lazy"rx has length $(length(rx)), but ry has length $(length(ry))")) - elseif !checkindex(Bool, eachindex(IndexLinear(), x), rx) - throw(BoundsError(x, rx)) - elseif !checkindex(Bool, eachindex(IndexLinear(), y), ry) - throw(BoundsError(y, ry)) - end - iszero(α) && return y - for (IY, IX) in zip(eachindex(ry), eachindex(rx)) - @inbounds y[ry[IY]] += x[rx[IX]]*α - end - return y -end - -""" - axpby!(α, x::AbstractArray, β, y::AbstractArray) - -Overwrite `y` with `x * α + y * β` and return `y`. -If `x` and `y` have the same axes, it's equivalent with `y .= x .* a .+ y .* β`. - -# Examples -```jldoctest -julia> x = [1; 2; 3]; - -julia> y = [4; 5; 6]; - -julia> axpby!(2, x, 2, y) -3-element Vector{Int64}: - 10 - 14 - 18 -``` -""" -function axpby!(α, x::AbstractArray, β, y::AbstractArray) - if length(x) != length(y) - throw(DimensionMismatch(lazy"x has length $(length(x)), but y has length $(length(y))")) - end - iszero(α) && isone(β) && return y - for (IX, IY) in zip(eachindex(x), eachindex(y)) - @inbounds y[IY] = x[IX]*α + y[IY]*β - end - y -end - -DenseLike{T} = Union{DenseArray{T}, Base.StridedReshapedArray{T}, Base.StridedReinterpretArray{T}} -StridedVecLike{T} = Union{DenseLike{T}, Base.FastSubArray{T,<:Any,<:DenseLike{T}}} -axpy!(α::Number, x::StridedVecLike{T}, y::StridedVecLike{T}) where {T<:BlasFloat} = BLAS.axpy!(α, x, y) -axpby!(α::Number, x::StridedVecLike{T}, β::Number, y::StridedVecLike{T}) where {T<:BlasFloat} = BLAS.axpby!(α, x, β, y) -function axpy!(α::Number, - x::StridedVecLike{T}, rx::AbstractRange{<:Integer}, - y::StridedVecLike{T}, ry::AbstractRange{<:Integer}, -) where {T<:BlasFloat} - if Base.has_offset_axes(rx, ry) - return @invoke axpy!(α, - x::AbstractArray, rx::AbstractArray{<:Integer}, - y::AbstractArray, ry::AbstractArray{<:Integer}, - ) - end - @views BLAS.axpy!(α, x[rx], y[ry]) - return y -end - -""" - rotate!(x, y, c, s) - -Overwrite `x` with `c*x + s*y` and `y` with `-conj(s)*x + c*y`. -Returns `x` and `y`. - -!!! compat "Julia 1.5" - `rotate!` requires at least Julia 1.5. -""" -function rotate!(x::AbstractVector, y::AbstractVector, c, s) - require_one_based_indexing(x, y) - n = length(x) - if n != length(y) - throw(DimensionMismatch(lazy"x has length $(length(x)), but y has length $(length(y))")) - end - for i in eachindex(x,y) - @inbounds begin - xi, yi = x[i], y[i] - x[i] = c *xi + s*yi - y[i] = -conj(s)*xi + c*yi - end - end - return x, y -end - -""" - reflect!(x, y, c, s) - -Overwrite `x` with `c*x + s*y` and `y` with `conj(s)*x - c*y`. -Returns `x` and `y`. - -!!! compat "Julia 1.5" - `reflect!` requires at least Julia 1.5. -""" -function reflect!(x::AbstractVector, y::AbstractVector, c, s) - require_one_based_indexing(x, y) - n = length(x) - if n != length(y) - throw(DimensionMismatch(lazy"x has length $(length(x)), but y has length $(length(y))")) - end - for i in eachindex(x,y) - @inbounds begin - xi, yi = x[i], y[i] - x[i] = c *xi + s*yi - y[i] = conj(s)*xi - c*yi - end - end - return x, y -end - -# Elementary reflection similar to LAPACK. The reflector is not Hermitian but -# ensures that tridiagonalization of Hermitian matrices become real. See lawn72 -@inline function reflector!(x::AbstractVector{T}) where {T} - require_one_based_indexing(x) - n = length(x) - n == 0 && return zero(eltype(x)) - ξ1 = @inbounds x[1] - normu = norm(x) - if iszero(normu) - return zero(ξ1/normu) - end - ν = T(copysign(normu, real(ξ1))) - ξ1 += ν - @inbounds x[1] = -ν - for i in 2:n - @inbounds x[i] /= ξ1 - end - ξ1/ν -end - -""" - reflectorApply!(x, τ, A) - -Multiplies `A` in-place by a Householder reflection on the left. It is equivalent to `A .= (I - conj(τ)*[1; x[2:end]]*[1; x[2:end]]')*A`. -""" -@inline function reflectorApply!(x::AbstractVector, τ::Number, A::AbstractVecOrMat) - require_one_based_indexing(x, A) - m, n = size(A, 1), size(A, 2) - if length(x) != m - throw(DimensionMismatch(lazy"reflector has length $(length(x)), which must match the first dimension of matrix A, $m")) - end - m == 0 && return A - for j in axes(A,2) - Aj, xj = @inbounds view(A, 2:m, j), view(x, 2:m) - vAj = conj(τ)*(@inbounds(A[1, j]) + dot(xj, Aj)) - @inbounds A[1, j] -= vAj - axpy!(-vAj, xj, Aj) - end - return A -end - -""" - det(M) - -Matrix determinant. - -See also: [`logdet`](@ref) and [`logabsdet`](@ref). - -# Examples -```jldoctest -julia> M = [1 0; 2 2] -2×2 Matrix{Int64}: - 1 0 - 2 2 - -julia> det(M) -2.0 -``` -Note that, in general, `det` computes a floating-point approximation of the -determinant, even for integer matrices, typically via Gaussian elimination. -Julia includes an exact algorithm for integer determinants (the Bareiss algorithm), -but only uses it by default for `BigInt` matrices (since determinants quickly -overflow any fixed integer precision): -```jldoctest -julia> det(BigInt[1 0; 2 2]) # exact integer determinant -2 -``` -""" -function det(A::AbstractMatrix{T}) where {T} - if istriu(A) || istril(A) - S = promote_type(T, typeof((one(T)*zero(T) + zero(T))/one(T))) - return convert(S, det(UpperTriangular(A))) - end - return det(lu(A; check = false)) -end -det(x::Number) = x - -# Resolve Issue #40128 -det(A::AbstractMatrix{BigInt}) = det_bareiss(A) - -""" - logabsdet(M) - -Log of absolute value of matrix determinant. Equivalent to -`(log(abs(det(M))), sign(det(M)))`, but may provide increased accuracy and/or speed. - -# Examples -```jldoctest -julia> A = [-1. 0.; 0. 1.] -2×2 Matrix{Float64}: - -1.0 0.0 - 0.0 1.0 - -julia> det(A) --1.0 - -julia> logabsdet(A) -(0.0, -1.0) - -julia> B = [2. 0.; 0. 1.] -2×2 Matrix{Float64}: - 2.0 0.0 - 0.0 1.0 - -julia> det(B) -2.0 - -julia> logabsdet(B) -(0.6931471805599453, 1.0) -``` -""" -function logabsdet(A::AbstractMatrix) - if istriu(A) || istril(A) - return logabsdet(UpperTriangular(A)) - end - return logabsdet(lu(A, check=false)) -end -logabsdet(a::Number) = log(abs(a)), sign(a) - -""" - logdet(M) - -Logarithm of matrix determinant. Equivalent to `log(det(M))`, but may provide -increased accuracy and avoids overflow/underflow. - -# Examples -```jldoctest -julia> M = [1 0; 2 2] -2×2 Matrix{Int64}: - 1 0 - 2 2 - -julia> logdet(M) -0.6931471805599453 - -julia> logdet(Matrix(I, 3, 3)) -0.0 -``` -""" -function logdet(A::AbstractMatrix) - d,s = logabsdet(A) - return d + log(s) -end - -logdet(A) = log(det(A)) - -const NumberArray{T<:Number} = AbstractArray{T} - -exactdiv(a, b) = a/b -exactdiv(a::Integer, b::Integer) = div(a, b) - -""" - det_bareiss!(M) - -Calculates the determinant of a matrix using the -[Bareiss Algorithm](https://en.wikipedia.org/wiki/Bareiss_algorithm) using -inplace operations. - -# Examples -```jldoctest -julia> M = [1 0; 2 2] -2×2 Matrix{Int64}: - 1 0 - 2 2 - -julia> LinearAlgebra.det_bareiss!(M) -2 -``` -""" -function det_bareiss!(M) - Base.require_one_based_indexing(M) - n = checksquare(M) - sign, prev = Int8(1), one(eltype(M)) - for i in axes(M,2)[begin:end-1] - if iszero(M[i,i]) # swap with another col to make nonzero - swapto = findfirst(!iszero, @view M[i,i+1:end]) - isnothing(swapto) && return zero(prev) - sign = -sign - Base.swapcols!(M, i, i + swapto) - end - for k in i+1:n, j in i+1:n - M[j,k] = exactdiv(M[j,k]*M[i,i] - M[j,i]*M[i,k], prev) - end - prev = M[i,i] - end - return sign * M[end,end] -end -""" - LinearAlgebra.det_bareiss(M) - -Calculates the determinant of a matrix using the -[Bareiss Algorithm](https://en.wikipedia.org/wiki/Bareiss_algorithm). -Also refer to [`det_bareiss!`](@ref). -""" -det_bareiss(M) = det_bareiss!(copymutable(M)) - - - -""" - promote_leaf_eltypes(itr) - -For an (possibly nested) iterable object `itr`, promote the types of leaf -elements. Equivalent to `promote_type(typeof(leaf1), typeof(leaf2), ...)`. -Currently supports only numeric leaf elements. - -# Examples -```jldoctest -julia> a = [[1,2, [3,4]], 5.0, [6im, [7.0, 8.0]]] -3-element Vector{Any}: - Any[1, 2, [3, 4]] - 5.0 - Any[0 + 6im, [7.0, 8.0]] - -julia> LinearAlgebra.promote_leaf_eltypes(a) -ComplexF64 (alias for Complex{Float64}) -``` -""" -promote_leaf_eltypes(x::Union{AbstractArray{T},Tuple{T,Vararg{T}}}) where {T<:Number} = T -promote_leaf_eltypes(x::Union{AbstractArray{T},Tuple{T,Vararg{T}}}) where {T<:NumberArray} = eltype(T) -promote_leaf_eltypes(x::T) where {T} = T -promote_leaf_eltypes(x::Union{AbstractArray,Tuple}) = mapreduce(promote_leaf_eltypes, promote_type, x; init=Bool) - -# isapprox: approximate equality of arrays [like isapprox(Number,Number)] -# Supports nested arrays; e.g., for `a = [[1,2, [3,4]], 5.0, [6im, [7.0, 8.0]]]` -# `a ≈ a` is `true`. -function isapprox(x::AbstractArray, y::AbstractArray; - atol::Real=0, - rtol::Real=Base.rtoldefault(promote_leaf_eltypes(x),promote_leaf_eltypes(y),atol), - nans::Bool=false, norm::Function=norm) - d = norm(x - y) - if isfinite(d) - return iszero(rtol) ? d <= atol : d <= max(atol, rtol*max(norm(x), norm(y))) - else - # Fall back to a component-wise approximate comparison - # (mapreduce instead of all for greater generality [#44893]) - return mapreduce((a, b) -> isapprox(a, b; rtol=rtol, atol=atol, nans=nans), &, x, y) - end -end - -""" - normalize!(a::AbstractArray, p::Real=2) - -Normalize the array `a` in-place so that its `p`-norm equals unity, -i.e. `norm(a, p) == 1`. -See also [`normalize`](@ref) and [`norm`](@ref). -""" -function normalize!(a::AbstractArray, p::Real=2) - nrm = norm(a, p) - __normalize!(a, nrm) -end - -@inline function __normalize!(a::AbstractArray, nrm) - # The largest positive floating point number whose inverse is less than infinity - δ = inv(prevfloat(typemax(nrm))) - if nrm ≥ δ # Safe to multiply with inverse - invnrm = inv(nrm) - rmul!(a, invnrm) - else # scale elements to avoid overflow - εδ = eps(one(nrm))/δ - rmul!(a, εδ) - rmul!(a, inv(nrm*εδ)) - end - return a -end - -""" - normalize(a, p::Real=2) - -Normalize `a` so that its `p`-norm equals unity, -i.e. `norm(a, p) == 1`. For scalars, this is similar to sign(a), -except normalize(0) = NaN. -See also [`normalize!`](@ref), [`norm`](@ref), and [`sign`](@ref). - -# Examples -```jldoctest -julia> a = [1,2,4]; - -julia> b = normalize(a) -3-element Vector{Float64}: - 0.2182178902359924 - 0.4364357804719848 - 0.8728715609439696 - -julia> norm(b) -1.0 - -julia> c = normalize(a, 1) -3-element Vector{Float64}: - 0.14285714285714285 - 0.2857142857142857 - 0.5714285714285714 - -julia> norm(c, 1) -1.0 - -julia> a = [1 2 4 ; 1 2 4] -2×3 Matrix{Int64}: - 1 2 4 - 1 2 4 - -julia> norm(a) -6.48074069840786 - -julia> normalize(a) -2×3 Matrix{Float64}: - 0.154303 0.308607 0.617213 - 0.154303 0.308607 0.617213 - -julia> normalize(3, 1) -1.0 - -julia> normalize(-8, 1) --1.0 - -julia> normalize(0, 1) -NaN -``` -""" -function normalize(a::AbstractArray, p::Real = 2) - nrm = norm(a, p) - if !isempty(a) - aa = copymutable_oftype(a, typeof(first(a)/nrm)) - return __normalize!(aa, nrm) - else - T = typeof(zero(eltype(a))/nrm) - return T[] - end -end - -normalize(x) = x / norm(x) -normalize(x, p::Real) = x / norm(x, p) - -""" - copytrito!(B, A, uplo) -> B - -Copies a triangular part of a matrix `A` to another matrix `B`. -`uplo` specifies the part of the matrix `A` to be copied to `B`. -Set `uplo = 'L'` for the lower triangular part or `uplo = 'U'` -for the upper triangular part. - -!!! compat "Julia 1.11" - `copytrito!` requires at least Julia 1.11. - -# Examples -```jldoctest -julia> A = [1 2 ; 3 4]; - -julia> B = [0 0 ; 0 0]; - -julia> copytrito!(B, A, 'L') -2×2 Matrix{Int64}: - 1 0 - 3 4 -``` -""" -function copytrito!(B::AbstractMatrix, A::AbstractMatrix, uplo::AbstractChar) - require_one_based_indexing(A, B) - BLAS.chkuplo(uplo) - m,n = size(A) - A = Base.unalias(B, A) - if uplo == 'U' - LAPACK.lacpy_size_check(size(B), (n < m ? n : m, n)) - for j in axes(A,2), i in axes(A,1)[begin : min(j,end)] - # extract the parents for UpperTriangular matrices - Bv, Av = uppertridata(B), uppertridata(A) - @inbounds Bv[i,j] = Av[i,j] - end - else # uplo == 'L' - LAPACK.lacpy_size_check(size(B), (m, m < n ? m : n)) - for j in axes(A,2), i in axes(A,1)[j:end] - # extract the parents for LowerTriangular matrices - Bv, Av = lowertridata(B), lowertridata(A) - @inbounds Bv[i,j] = Av[i,j] - end - end - return B -end -# Forward LAPACK-compatible strided matrices to lacpy -function copytrito!(B::StridedMatrixStride1{T}, A::StridedMatrixStride1{T}, uplo::AbstractChar) where {T<:BlasFloat} - LAPACK.lacpy!(B, A, uplo) -end diff --git a/stdlib/LinearAlgebra/src/givens.jl b/stdlib/LinearAlgebra/src/givens.jl deleted file mode 100644 index 4239c8dc4ed48..0000000000000 --- a/stdlib/LinearAlgebra/src/givens.jl +++ /dev/null @@ -1,429 +0,0 @@ -# This file is a part of Julia. License is MIT: https://julialang.org/license - -# givensAlgorithm functions are derived from LAPACK, see below - -abstract type AbstractRotation{T} end -struct AdjointRotation{T,S<:AbstractRotation{T}} <: AbstractRotation{T} - R::S -end - -transpose(R::AbstractRotation) = error("transpose not implemented for $(typeof(R)). Consider using adjoint instead of transpose.") - -(*)(R::AbstractRotation, A::AbstractVector) = _rot_mul_vecormat(R, A) -(*)(R::AbstractRotation, A::AbstractMatrix) = _rot_mul_vecormat(R, A) -function _rot_mul_vecormat(R::AbstractRotation{T}, A::AbstractVecOrMat{S}) where {T,S} - TS = typeof(zero(T)*zero(S) + zero(T)*zero(S)) - lmul!(convert(AbstractRotation{TS}, R), copy_similar(A, TS)) -end - -(*)(A::AbstractVector, R::AbstractRotation) = _vecormat_mul_rot(A, R) -(*)(A::AbstractMatrix, R::AbstractRotation) = _vecormat_mul_rot(A, R) -function _vecormat_mul_rot(A::AbstractVecOrMat{T}, R::AbstractRotation{S}) where {T,S} - TS = typeof(zero(T)*zero(S) + zero(T)*zero(S)) - rmul!(copy_similar(A, TS), convert(AbstractRotation{TS}, R)) -end - -""" - LinearAlgebra.Givens(i1,i2,c,s) -> G - -A Givens rotation linear operator. The fields `c` and `s` represent the cosine and sine of -the rotation angle, respectively. The `Givens` type supports left multiplication `G*A` and -conjugated transpose right multiplication `A*G'`. The type doesn't have a `size` and can -therefore be multiplied with matrices of arbitrary size as long as `i2<=size(A,2)` for -`G*A` or `i2<=size(A,1)` for `A*G'`. - -See also [`givens`](@ref). -""" -struct Givens{T} <: AbstractRotation{T} - i1::Int - i2::Int - c::T - s::T -end -struct Rotation{T} <: AbstractRotation{T} - rotations::Vector{Givens{T}} -end - -convert(::Type{T}, r::T) where {T<:AbstractRotation} = r -convert(::Type{T}, r::AbstractRotation) where {T<:AbstractRotation} = T(r)::T -convert(::Type{AbstractRotation{T}}, r::AdjointRotation) where {T} = convert(AbstractRotation{T}, r.R)' -convert(::Type{AbstractRotation{T}}, r::AdjointRotation{T}) where {T} = r - -Givens(i1, i2, c, s) = Givens(i1, i2, promote(c, s)...) -Givens{T}(G::Givens{T}) where {T} = G -Givens{T}(G::Givens) where {T} = Givens(G.i1, G.i2, convert(T, G.c), convert(T, G.s)) -Rotation{T}(R::Rotation{T}) where {T} = R -Rotation{T}(R::Rotation) where {T} = Rotation{T}([Givens{T}(g) for g in R.rotations]) -AbstractRotation{T}(G::Givens) where {T} = Givens{T}(G) -AbstractRotation{T}(R::Rotation) where {T} = Rotation{T}(R) - -adjoint(G::Givens) = Givens(G.i1, G.i2, G.c', -G.s) -adjoint(R::AbstractRotation) = AdjointRotation(R) -adjoint(adjR::AdjointRotation) = adjR.R - -Base.copy(aR::AdjointRotation{T,Rotation{T}}) where {T} = - Rotation{T}([r' for r in Iterators.reverse(aR.R.rotations)]) - -floatmin2(::Type{Float32}) = reinterpret(Float32, 0x26000000) -floatmin2(::Type{Float64}) = reinterpret(Float64, 0x21a0000000000000) -floatmin2(::Type{T}) where {T} = (twopar = 2one(T); twopar^trunc(Integer,log(floatmin(T)/eps(T))/log(twopar)/twopar)) - -# derived from LAPACK's dlartg -# Copyright: -# Univ. of Tennessee -# Univ. of California Berkeley -# Univ. of Colorado Denver -# NAG Ltd. -function givensAlgorithm(f::T, g::T) where T<:AbstractFloat - onepar = one(T) - T0 = typeof(onepar) # dimensionless - zeropar = T0(zero(T)) # must be dimensionless - - # need both dimensionful and dimensionless versions of these: - safmn2 = floatmin2(T0) - safmn2u = floatmin2(T) - safmx2 = one(T)/safmn2 - safmx2u = oneunit(T)/safmn2 - - if g == 0 - cs = onepar - sn = zeropar - r = f - elseif f == 0 - cs = zeropar - sn = onepar - r = g - else - f1 = f - g1 = g - scalepar = max(abs(f1), abs(g1)) - if scalepar >= safmx2u - count = 0 - while true - count += 1 - f1 *= safmn2 - g1 *= safmn2 - scalepar = max(abs(f1), abs(g1)) - if scalepar < safmx2u || count >= 20 break end - end - r = sqrt(f1*f1 + g1*g1) - cs = f1/r - sn = g1/r - for i = 1:count - r *= safmx2 - end - elseif scalepar <= safmn2u - count = 0 - while true - count += 1 - f1 *= safmx2 - g1 *= safmx2 - scalepar = max(abs(f1), abs(g1)) - if scalepar > safmn2u break end - end - r = sqrt(f1*f1 + g1*g1) - cs = f1/r - sn = g1/r - for i = 1:count - r *= safmn2 - end - else - r = sqrt(f1*f1 + g1*g1) - cs = f1/r - sn = g1/r - end - if abs(f) > abs(g) && cs < 0 - cs = -cs - sn = -sn - r = -r - end - end - return cs, sn, r -end - -# derived from LAPACK's zlartg -# Copyright: -# Univ. of Tennessee -# Univ. of California Berkeley -# Univ. of Colorado Denver -# NAG Ltd. -function givensAlgorithm(f::Complex{T}, g::Complex{T}) where T<:AbstractFloat - onepar = one(T) - T0 = typeof(onepar) # dimensionless - zeropar = T0(zero(T)) # must be dimensionless - czero = complex(zeropar) - - abs1(ff) = max(abs(real(ff)), abs(imag(ff))) - safmin = floatmin(T0) - safmn2 = floatmin2(T0) - safmn2u = floatmin2(T) - safmx2 = one(T)/safmn2 - safmx2u = oneunit(T)/safmn2 - scalepar = max(abs1(f), abs1(g)) - fs = f - gs = g - count = 0 - if scalepar >= safmx2u - while true - count += 1 - fs *= safmn2 - gs *= safmn2 - scalepar *= safmn2 - if scalepar < safmx2u || count >= 20 break end - end - elseif scalepar <= safmn2u - if g == 0 - cs = onepar - sn = czero - r = f - return cs, sn, r - end - while true - count -= 1 - fs *= safmx2 - gs *= safmx2 - scalepar *= safmx2 - if scalepar > safmn2u break end - end - end - f2 = abs2(fs) - g2 = abs2(gs) - if f2 <= max(g2, oneunit(T))*safmin - # This is a rare case: F is very small. - if f == 0 - cs = zero(T) - r = complex(abs(g)) - # do complex/real division explicitly with two real divisions - d = abs(gs) - sn = complex(real(gs)/d, -imag(gs)/d) - return cs, sn, r - end - f2s = abs(fs) - # g2 and g2s are accurate - # g2 is at least safmin, and g2s is at least safmn2 - g2s = sqrt(g2) - # error in cs from underflow in f2s is at most - # unfl / safmn2 .lt. sqrt(unfl*eps) .lt. eps - # if max(g2,one)=g2, then f2 .lt. g2*safmin, - # and so cs .lt. sqrt(safmin) - # if max(g2,one)=one, then f2 .lt. safmin - # and so cs .lt. sqrt(safmin)/safmn2 = sqrt(eps) - # therefore, cs = f2s/g2s / sqrt( 1 + (f2s/g2s)**2 ) = f2s/g2s - cs = f2s/g2s - # make sure abs(ff) = 1 - # do complex/real division explicitly with 2 real divisions - if abs1(f) > 1 - d = abs(f) - ff = complex(real(f)/d, imag(f)/d) - else - dr = safmx2*real(f) - di = safmx2*imag(f) - d = hypot(dr, di) - ff = complex(dr/d, di/d) - end - sn = ff*complex(real(gs)/g2s, -imag(gs)/g2s) - r = cs*f + sn*g - else - # This is the most common case. - # Neither F2 nor F2/G2 are less than SAFMIN - # F2S cannot overflow, and it is accurate - f2s = sqrt(onepar + g2/f2) - # do the f2s(real)*fs(complex) multiply with two real multiplies - r = complex(f2s*real(fs), f2s*imag(fs)) - cs = onepar/f2s - d = f2 + g2 - # do complex/real division explicitly with two real divisions - sn = complex(real(r)/d, imag(r)/d) - sn *= conj(gs) - if count != 0 - if count > 0 - for i = 1:count - r *= safmx2 - end - else - for i = 1:-count - r *= safmn2 - end - end - end - end - return cs, sn, r -end - -# enable for unitful quantities -function givensAlgorithm(f::T, g::T) where T - fs = f / oneunit(T) - gs = g / oneunit(T) - typeof(fs) === T && typeof(gs) === T && - !isa(fs, Union{AbstractFloat,Complex{<:AbstractFloat}}) && - throw(MethodError(givensAlgorithm, (fs, gs))) - - c, s, r = givensAlgorithm(fs, gs) - return c, s, r * oneunit(T) -end - -givensAlgorithm(f, g) = givensAlgorithm(promote(float(f), float(g))...) - -""" - - givens(f::T, g::T, i1::Integer, i2::Integer) where {T} -> (G::Givens, r::T) - -Computes the Givens rotation `G` and scalar `r` such that for any vector `x` where -``` -x[i1] = f -x[i2] = g -``` -the result of the multiplication -``` -y = G*x -``` -has the property that -``` -y[i1] = r -y[i2] = 0 -``` - -See also [`LinearAlgebra.Givens`](@ref). -""" -function givens(f::T, g::T, i1::Integer, i2::Integer) where T - if i1 == i2 - throw(ArgumentError("Indices must be distinct.")) - end - c, s, r = givensAlgorithm(f, g) - if i1 > i2 - s = -conj(s) - i1, i2 = i2, i1 - end - Givens(i1, i2, c, s), r -end -""" - givens(A::AbstractArray, i1::Integer, i2::Integer, j::Integer) -> (G::Givens, r) - -Computes the Givens rotation `G` and scalar `r` such that the result of the multiplication -``` -B = G*A -``` -has the property that -``` -B[i1,j] = r -B[i2,j] = 0 -``` - -See also [`LinearAlgebra.Givens`](@ref). -""" -givens(A::AbstractMatrix, i1::Integer, i2::Integer, j::Integer) = - givens(A[i1,j], A[i2,j], i1, i2) - - -""" - givens(x::AbstractVector, i1::Integer, i2::Integer) -> (G::Givens, r) - -Computes the Givens rotation `G` and scalar `r` such that the result of the multiplication -``` -B = G*x -``` -has the property that -``` -B[i1] = r -B[i2] = 0 -``` - -See also [`LinearAlgebra.Givens`](@ref). -""" -givens(x::AbstractVector, i1::Integer, i2::Integer) = givens(x[i1], x[i2], i1, i2) - -function getindex(G::Givens, i::Integer, j::Integer) - if i == j - if i == G.i1 || i == G.i2 - G.c - else - oneunit(G.c) - end - elseif i == G.i1 && j == G.i2 - G.s - elseif i == G.i2 && j == G.i1 - -conj(G.s) - else - zero(G.s) - end -end - -@inline function lmul!(G::Givens, A::AbstractVecOrMat) - require_one_based_indexing(A) - m, n = size(A, 1), size(A, 2) - if G.i2 > m - throw(DimensionMismatch("column indices for rotation are outside the matrix")) - end - @inbounds for i = 1:n - a1, a2 = A[G.i1,i], A[G.i2,i] - A[G.i1,i] = G.c *a1 + G.s*a2 - A[G.i2,i] = -conj(G.s)*a1 + G.c*a2 - end - return A -end -@inline function rmul!(A::AbstractMatrix, G::Givens) - require_one_based_indexing(A) - m, n = size(A, 1), size(A, 2) - if G.i2 > n - throw(DimensionMismatch("column indices for rotation are outside the matrix")) - end - @inbounds for i = 1:m - a1, a2 = A[i,G.i1], A[i,G.i2] - A[i,G.i1] = a1*G.c - a2*G.s' - A[i,G.i2] = a1*G.s + a2*G.c - end - return A -end - -function lmul!(G::Givens, R::Rotation) - push!(R.rotations, G) - return R -end -function rmul!(R::Rotation, G::Givens) - pushfirst!(R.rotations, G) - return R -end - -function lmul!(R::Rotation, A::AbstractVecOrMat) - @inbounds for i in eachindex(R.rotations) - lmul!(R.rotations[i], A) - end - return A -end -function rmul!(A::AbstractMatrix, R::Rotation) - @inbounds for i in eachindex(R.rotations) - rmul!(A, R.rotations[i]) - end - return A -end - -function lmul!(adjR::AdjointRotation{<:Any,<:Rotation}, A::AbstractVecOrMat) - R = adjR.R - @inbounds for i in eachindex(R.rotations) - lmul!(adjoint(R.rotations[i]), A) - end - return A -end -function rmul!(A::AbstractMatrix, adjR::AdjointRotation{<:Any,<:Rotation}) - R = adjR.R - @inbounds for i in eachindex(R.rotations) - rmul!(A, adjoint(R.rotations[i])) - end - return A -end - -function *(G1::Givens{S}, G2::Givens{T}) where {S,T} - TS = promote_type(T, S) - Rotation{TS}([convert(AbstractRotation{TS}, G2), convert(AbstractRotation{TS}, G1)]) -end -function *(G::Givens{T}, Gs::Givens{T}...) where {T} - return Rotation([reverse(Gs)..., G]) -end -function *(G::Givens{S}, R::Rotation{T}) where {S,T} - TS = promote_type(T, S) - Rotation(vcat(convert(AbstractRotation{TS}, R).rotations, convert(AbstractRotation{TS}, G))) -end -function *(R::Rotation{S}, G::Givens{T}) where {S,T} - TS = promote_type(T, S) - Rotation(vcat(convert(AbstractRotation{TS}, G), convert(AbstractRotation{TS}, R).rotations)) -end diff --git a/stdlib/LinearAlgebra/src/hessenberg.jl b/stdlib/LinearAlgebra/src/hessenberg.jl deleted file mode 100644 index ed654c33aba55..0000000000000 --- a/stdlib/LinearAlgebra/src/hessenberg.jl +++ /dev/null @@ -1,624 +0,0 @@ -# This file is a part of Julia. License is MIT: https://julialang.org/license - -###################################################################################### -# Upper-Hessenberg matrices H+μI, analogous to the UpperTriangular type - -""" - UpperHessenberg(A::AbstractMatrix) - -Construct an `UpperHessenberg` view of the matrix `A`. -Entries of `A` below the first subdiagonal are ignored. - -!!! compat "Julia 1.3" - This type was added in Julia 1.3. - -Efficient algorithms are implemented for `H \\ b`, `det(H)`, and similar. - -See also the [`hessenberg`](@ref) function to factor any matrix into a similar -upper-Hessenberg matrix. - -If `F::Hessenberg` is the factorization object, the unitary matrix can be accessed -with `F.Q` and the Hessenberg matrix with `F.H`. When `Q` is extracted, the resulting -type is the `HessenbergQ` object, and may be converted to a regular matrix with -[`convert(Array, _)`](@ref) (or `Array(_)` for short). - -Iterating the decomposition produces the factors `F.Q` and `F.H`. - -# Examples -```jldoctest -julia> A = [1 2 3 4; 5 6 7 8; 9 10 11 12; 13 14 15 16] -4×4 Matrix{Int64}: - 1 2 3 4 - 5 6 7 8 - 9 10 11 12 - 13 14 15 16 - -julia> UpperHessenberg(A) -4×4 UpperHessenberg{Int64, Matrix{Int64}}: - 1 2 3 4 - 5 6 7 8 - ⋅ 10 11 12 - ⋅ ⋅ 15 16 -``` -""" -struct UpperHessenberg{T,S<:AbstractMatrix{T}} <: AbstractMatrix{T} - data::S - - function UpperHessenberg{T,S}(data) where {T,S<:AbstractMatrix{T}} - require_one_based_indexing(data) - new{T,S}(data) - end -end -UpperHessenberg(H::UpperHessenberg) = H -UpperHessenberg{T}(A::AbstractMatrix) where {T} = UpperHessenberg(convert(AbstractMatrix{T}, A)) -UpperHessenberg{T}(H::UpperHessenberg) where {T} = UpperHessenberg{T}(H.data) -UpperHessenberg(A::AbstractMatrix) = UpperHessenberg{eltype(A),typeof(A)}(A) -Matrix(H::UpperHessenberg{T}) where {T} = Matrix{T}(H) -Array(H::UpperHessenberg) = Matrix(H) -size(H::UpperHessenberg) = size(H.data) -axes(H::UpperHessenberg) = axes(H.data) -parent(H::UpperHessenberg) = H.data - -# similar behaves like UpperTriangular -similar(H::UpperHessenberg, ::Type{T}) where {T} = UpperHessenberg(similar(H.data, T)) -similar(H::UpperHessenberg, ::Type{T}, dims::Dims{N}) where {T,N} = similar(H.data, T, dims) - -AbstractMatrix{T}(H::UpperHessenberg) where {T} = UpperHessenberg{T}(H) -AbstractMatrix{T}(H::UpperHessenberg{T}) where {T} = copy(H) - -Base.dataids(A::UpperHessenberg) = Base.dataids(parent(A)) -Base.unaliascopy(A::UpperHessenberg) = UpperHessenberg(Base.unaliascopy(parent(A))) - -copy(H::UpperHessenberg) = UpperHessenberg(copy(H.data)) -real(H::UpperHessenberg{<:Complex}) = UpperHessenberg(triu!(real(H.data),-1)) -imag(H::UpperHessenberg) = UpperHessenberg(triu!(imag(H.data),-1)) - -Base.@constprop :aggressive function istriu(A::UpperHessenberg, k::Integer=0) - k <= -1 && return true - return _istriu(A, k) -end -# additional indirection to dispatch to optimized method for banded parents (defined in special.jl) -@inline function _istriu(A::UpperHessenberg, k) - P = parent(A) - m = size(A, 1) - for j in firstindex(P,2):min(m + k - 1, lastindex(P,2)) - Prows = @view P[max(begin, j - k + 1):min(j+1,end), j] - _iszero(Prows) || return false - end - return true -end - -function Matrix{T}(H::UpperHessenberg) where T - m,n = size(H) - return triu!(copyto!(Matrix{T}(undef, m, n), H.data), -1) -end - -Base.isassigned(H::UpperHessenberg, i::Int, j::Int) = - i <= j+1 ? isassigned(H.data, i, j) : true - -Base.@propagate_inbounds getindex(H::UpperHessenberg{T}, i::Int, j::Int) where {T} = - i <= j+1 ? convert(T, H.data[i,j]) : zero(T) - -Base._reverse(A::UpperHessenberg, dims) = reverse!(Matrix(A); dims) - -Base.@propagate_inbounds function setindex!(A::UpperHessenberg, x, i::Integer, j::Integer) - if i > j+1 - x == 0 || throw(ArgumentError("cannot set index in the lower triangular part " * - lazy"($i, $j) of an UpperHessenberg matrix to a nonzero value ($x)")) - else - A.data[i,j] = x - end - return A -end - -function Base.replace_in_print_matrix(A::UpperHessenberg, i::Integer, j::Integer, s::AbstractString) - return i <= j+1 ? s : Base.replace_with_centered_mark(s) -end - -Base.copy(A::Adjoint{<:Any,<:UpperHessenberg}) = tril!(adjoint!(similar(A.parent.data), A.parent.data), 1) -Base.copy(A::Transpose{<:Any,<:UpperHessenberg}) = tril!(transpose!(similar(A.parent.data), A.parent.data), 1) - --(A::UpperHessenberg) = UpperHessenberg(-A.data) -rmul!(H::UpperHessenberg, x::Number) = (rmul!(H.data, x); H) -lmul!(x::Number, H::UpperHessenberg) = (lmul!(x, H.data); H) - -fillstored!(H::UpperHessenberg, x) = (fillband!(H.data, x, -1, size(H,2)-1); H) - -+(A::UpperHessenberg, B::UpperHessenberg) = UpperHessenberg(A.data+B.data) --(A::UpperHessenberg, B::UpperHessenberg) = UpperHessenberg(A.data-B.data) - -for T = (:UniformScaling, :Diagonal, :Bidiagonal, :Tridiagonal, :SymTridiagonal, - :UpperTriangular, :UnitUpperTriangular) - for op = (:+, :-) - @eval begin - $op(H::UpperHessenberg, x::$T) = UpperHessenberg($op(H.data, x)) - $op(x::$T, H::UpperHessenberg) = UpperHessenberg($op(x, H.data)) - end - end -end - -for T = (:Number, :UniformScaling, :Diagonal) - @eval begin - *(H::UpperHessenberg, x::$T) = UpperHessenberg(H.data * x) - *(x::$T, H::UpperHessenberg) = UpperHessenberg(x * H.data) - /(H::UpperHessenberg, x::$T) = UpperHessenberg(H.data / x) - \(x::$T, H::UpperHessenberg) = UpperHessenberg(x \ H.data) - end -end - -function *(H::UpperHessenberg, U::UpperOrUnitUpperTriangular) - HH = mul!(matprod_dest(H, U, promote_op(matprod, eltype(H), eltype(U))), H, U) - UpperHessenberg(HH) -end -function *(U::UpperOrUnitUpperTriangular, H::UpperHessenberg) - HH = mul!(matprod_dest(U, H, promote_op(matprod, eltype(U), eltype(H))), U, H) - UpperHessenberg(HH) -end - -function /(H::UpperHessenberg, U::UpperTriangular) - HH = _rdiv!(matprod_dest(H, U, promote_op(/, eltype(H), eltype(U))), H, U) - UpperHessenberg(HH) -end -function /(H::UpperHessenberg, U::UnitUpperTriangular) - HH = _rdiv!(matprod_dest(H, U, promote_op(/, eltype(H), eltype(U))), H, U) - UpperHessenberg(HH) -end - -function \(U::UpperTriangular, H::UpperHessenberg) - HH = ldiv!(matprod_dest(U, H, promote_op(\, eltype(U), eltype(H))), U, H) - UpperHessenberg(HH) -end -function \(U::UnitUpperTriangular, H::UpperHessenberg) - HH = ldiv!(matprod_dest(U, H, promote_op(\, eltype(U), eltype(H))), U, H) - UpperHessenberg(HH) -end - -# Solving (H+µI)x = b: we can do this in O(m²) time and O(m) memory -# (in-place in x) by the RQ algorithm from: -# -# G. Henry, "The shifted Hessenberg system solve computation," Tech. Rep. 94–163, -# Center for Appl. Math., Cornell University (1994). -# -# as reviewed in -# -# C. Beattie et al., "A note on shifted Hessenberg systems and frequency -# response computation," ACM Trans. Math. Soft. 38, pp. 12:6–12:16 (2011) -# -# (Note, however, that there is apparently a typo in Algorithm 1 of the -# Beattie paper: the Givens rotation uses u(k), not H(k,k) - σ.) -# -# Essentially, it works by doing a Givens RQ factorization of H+µI from -# right to left, and doing backsubstitution *simultaneously*. - -# solve (H+μI)X = B, storing result in B -function ldiv!(F::UpperHessenberg, B::AbstractVecOrMat; shift::Number=false) - checksquare(F) - m = size(F,1) - m != size(B,1) && throw(DimensionMismatch(lazy"wrong right-hand-side # rows != $m")) - require_one_based_indexing(B) - n = size(B,2) - H = F.data - μ = shift - u = Vector{typeof(zero(eltype(H))+μ)}(undef, m) # for last rotated col of H-μI - copyto!(u, 1, H, m*(m-1)+1, m) # u .= H[:,m] - u[m] += μ - X = B # not a copy, just rename to match paper - cs = Vector{Tuple{real(eltype(u)),eltype(u)}}(undef, length(u)) # store Givens rotations - @inbounds for k = m:-1:2 - c, s, ρ = givensAlgorithm(u[k], H[k,k-1]) - cs[k] = (c, s) - for i = 1:n - X[k,i] /= ρ - t₁ = s * X[k,i]; t₂ = c * X[k,i] - @simd for j = 1:k-2 - X[j,i] -= u[j]*t₂ + H[j,k-1]*t₁ - end - X[k-1,i] -= u[k-1]*t₂ + (H[k-1,k-1] + μ) * t₁ - end - @simd for j = 1:k-2 - u[j] = H[j,k-1]*c - u[j]*s' - end - u[k-1] = (H[k-1,k-1] + μ) * c - u[k-1]*s' - end - for i = 1:n - τ₁ = X[1,i] / u[1] - @inbounds for j = 2:m - τ₂ = X[j,i] - c, s = cs[j] - X[j-1,i] = c*τ₁ + s*τ₂ - τ₁ = c*τ₂ - s'τ₁ - end - X[m,i] = τ₁ - end - return X -end - -# solve X(H+μI) = B, storing result in B -# -# Note: this can be derived from the Henry (1994) algorithm -# by transformation to F(Hᵀ+µI)F FXᵀ = FBᵀ, where -# F is the permutation matrix that reverses the order -# of rows/cols. Essentially, we take the ldiv! algorithm, -# swap indices of H and X to transpose, and reverse the -# order of the H indices (or the order of the loops). -function rdiv!(B::AbstractMatrix, F::UpperHessenberg; shift::Number=false) - checksquare(F) - m = size(F,1) - m != size(B,2) && throw(DimensionMismatch(lazy"wrong right-hand-side # cols != $m")) - require_one_based_indexing(B) - n = size(B,1) - H = F.data - μ = shift - u = Vector{typeof(zero(eltype(H))+μ)}(undef, m) # for last rotated row of H-μI - u .= @view H[1,:] - u[1] += μ - X = B # not a copy, just rename to match paper - cs = Vector{Tuple{real(eltype(u)),eltype(u)}}(undef, length(u)) # store Givens rotations - @inbounds for k = 1:m-1 - c, s, ρ = givensAlgorithm(u[k], H[k+1,k]) - cs[k] = (c, s) - for i = 1:n - X[i,k] /= ρ - t₁ = s * X[i,k]; t₂ = c * X[i,k] - @simd for j = k+2:m - X[i,j] -= u[j]*t₂ + H[k+1,j]*t₁ - end - X[i,k+1] -= u[k+1]*t₂ + (H[k+1,k+1] + μ) * t₁ - end - @simd for j = k+2:m - u[j] = H[k+1,j]*c - u[j]*s' - end - u[k+1] = (H[k+1,k+1] + μ) * c - u[k+1]*s' - end - for i = 1:n - τ₁ = X[i,m] / u[m] - @inbounds for j = m-1:-1:1 - τ₂ = X[i,j] - c, s = cs[j] - X[i,j+1] = c*τ₁ + s*τ₂ - τ₁ = c*τ₂ - s'τ₁ - end - X[i,1] = τ₁ - end - return X -end - -# Hessenberg-matrix determinant formula for H+μI based on: -# -# N. D. Cahill, J. R. D’Errico, D. A. Narayan, and J. Y. Narayan, "Fibonacci determinants," -# College Math. J. 33, pp. 221-225 (2003). -# -# as reviewed in Theorem 2.1 of: -# -# K. Kaygisiz and A. Sahin, "Determinant and permanent of Hessenberg matrix and generalized Lucas polynomials," -# arXiv:1111.4067 (2011). -# -# Cost is O(m²) with O(m) storage. -function det(F::UpperHessenberg; shift::Number=false) - checksquare(F) - H = F.data - m = size(H,1) - μ = shift - m == 0 && return one(zero(eltype(H)) + μ) - determinant = H[1,1] + μ - prevdeterminant = one(determinant) - m == 1 && return determinant - prods = Vector{typeof(determinant)}(undef, m-1) # temporary storage for partial products - @inbounds for n = 2:m - prods[n-1] = prevdeterminant - prevdeterminant = determinant - determinant *= H[n,n] + μ - h = H[n,n-1] - @simd for r = n-1:-2:2 - determinant -= H[r,n] * (prods[r] *= h) - H[r-1,n] * (prods[r-1] *= h) - end - if iseven(n) - determinant -= H[1,n] * (prods[1] *= h) - end - end - return determinant -end - -# O(m²) log-determinant based on first doing Givens RQ to put H+μI into upper-triangular form and then -# taking the product of the diagonal entries. The trick is that we only need O(m) temporary storage, -# because we don't need to store the whole Givens-rotated matrix, only the most recent column. -# We do RQ (column rotations) rather than QR (row rotations) for more consecutive memory access. -# (We could also use it for det instead of the Cahill algorithm above. Cahill is slightly faster -# for very small matrices where you are likely to use det, and also uses only ± and * so it can -# be applied to Hessenberg matrices over other number fields.) -function logabsdet(F::UpperHessenberg; shift::Number=false) - checksquare(F) - H = F.data - m = size(H,1) - μ = shift - P = one(zero(eltype(H)) + μ) - logdeterminant = zero(real(P)) - m == 0 && return (logdeterminant, P) - g = Vector{typeof(P)}(undef, m) # below, g is the k-th col of Givens-rotated H+μI matrix - copyto!(g, 1, H, m*(m-1)+1, m) # g .= H[:,m] - g[m] += μ - @inbounds for k = m:-1:2 - c, s, ρ = givensAlgorithm(g[k], H[k,k-1]) - logdeterminant += log(abs(ρ)) - P *= sign(ρ) - g[k-1] = c*(H[k-1,k-1] + μ) - s'*g[k-1] - @simd for j = 1:k-2 - g[j] = c*H[j,k-1] - s'*g[j] - end - end - logdeterminant += log(abs(g[1])) - P *= sign(g[1]) - return (logdeterminant, P) -end - -function dot(x::AbstractVector, H::UpperHessenberg, y::AbstractVector) - require_one_based_indexing(x, y) - m = size(H, 1) - (length(x) == m == length(y)) || throw(DimensionMismatch()) - if iszero(m) - return dot(zero(eltype(x)), zero(eltype(H)), zero(eltype(y))) - end - x₁ = x[1] - r = dot(x₁, H[1,1], y[1]) - r += dot(x[2], H[2,1], y[1]) - @inbounds for j in 2:m-1 - yj = y[j] - if !iszero(yj) - temp = adjoint(H[1,j]) * x₁ - @simd for i in 2:j+1 - temp += adjoint(H[i,j]) * x[i] - end - r += dot(temp, yj) - end - end - ym = y[m] - if !iszero(ym) - temp = adjoint(H[1,m]) * x₁ - @simd for i in 2:m - temp += adjoint(H[i,m]) * x[i] - end - r += dot(temp, ym) - end - return r -end - -###################################################################################### -# Hessenberg factorizations Q(H+μI)Q' of A+μI: - -""" - Hessenberg <: Factorization - -A `Hessenberg` object represents the Hessenberg factorization `QHQ'` of a square -matrix, or a shift `Q(H+μI)Q'` thereof, which is produced by the [`hessenberg`](@ref) function. -""" -struct Hessenberg{T,SH<:AbstractMatrix,S<:AbstractMatrix,W<:AbstractVector,V<:Number} <: Factorization{T} - H::SH # UpperHessenberg or SymTridiagonal - uplo::Char - factors::S # reflector data in uplo triangle, may share data with H - τ::W # more Q (reflector) data - μ::V # diagonal shift for copy-free (F+μI) \ b solves and similar -end -Hessenberg(factors::AbstractMatrix, τ::AbstractVector, H::AbstractMatrix=UpperHessenberg(factors), uplo::AbstractChar='L'; μ::Number=false) = - Hessenberg{typeof(zero(eltype(factors))+μ),typeof(H),typeof(factors),typeof(τ),typeof(μ)}(H, uplo, factors, τ, μ) -Hessenberg(F::Hessenberg) = F -Hessenberg(F::Hessenberg, μ::Number) = Hessenberg(F.factors, F.τ, F.H, F.uplo; μ=μ) - -copy(F::Hessenberg{<:Any,<:UpperHessenberg}) = Hessenberg(copy(F.factors), copy(F.τ); μ=F.μ) -copy(F::Hessenberg{<:Any,<:SymTridiagonal}) = Hessenberg(copy(F.factors), copy(F.τ), copy(F.H), F.uplo; μ=F.μ) -size(F::Hessenberg, d::Integer) = size(F.H, d) -size(F::Hessenberg) = size(F.H) - -transpose(F::Hessenberg{<:Real}) = F' -transpose(::Hessenberg) = - throw(ArgumentError("transpose of Hessenberg decomposition is not supported, consider using adjoint")) - -# iteration for destructuring into components -Base.iterate(S::Hessenberg) = (S.Q, Val(:H)) -Base.iterate(S::Hessenberg, ::Val{:H}) = (S.H, Val(:μ)) -Base.iterate(S::Hessenberg, ::Val{:μ}) = (S.μ, Val(:done)) -Base.iterate(S::Hessenberg, ::Val{:done}) = nothing - -hessenberg!(A::StridedMatrix{<:BlasFloat}) = Hessenberg(LAPACK.gehrd!(A)...) - -function hessenberg!(A::Union{Symmetric{<:BlasReal,<:StridedMatrix},Hermitian{<:BlasFloat,<:StridedMatrix}}) - factors, τ, d, e = LAPACK.hetrd!(A.uplo, A.data) - return Hessenberg(factors, τ, SymTridiagonal(d, e), A.uplo) -end - -""" - hessenberg!(A) -> Hessenberg - -`hessenberg!` is the same as [`hessenberg`](@ref), but saves space by overwriting -the input `A`, instead of creating a copy. -""" -hessenberg!(A::AbstractMatrix) - -""" - hessenberg(A) -> Hessenberg - -Compute the Hessenberg decomposition of `A` and return a `Hessenberg` object. If `F` is the -factorization object, the unitary matrix can be accessed with `F.Q` (of type `LinearAlgebra.HessenbergQ`) -and the Hessenberg matrix with `F.H` (of type [`UpperHessenberg`](@ref)), either of -which may be converted to a regular matrix with `Matrix(F.H)` or `Matrix(F.Q)`. - -If `A` is [`Hermitian`](@ref) or real-[`Symmetric`](@ref), then the Hessenberg -decomposition produces a real-symmetric tridiagonal matrix and `F.H` is of type -[`SymTridiagonal`](@ref). - -Note that the shifted factorization `A+μI = Q (H+μI) Q'` can be -constructed efficiently by `F + μ*I` using the [`UniformScaling`](@ref) -object [`I`](@ref), which creates a new `Hessenberg` object with shared storage -and a modified shift. The shift of a given `F` is obtained by `F.μ`. -This is useful because multiple shifted solves `(F + μ*I) \\ b` -(for different `μ` and/or `b`) can be performed efficiently once `F` is created. - -Iterating the decomposition produces the factors `F.Q, F.H, F.μ`. - -# Examples -```julia-repl -julia> A = [4. 9. 7.; 4. 4. 1.; 4. 3. 2.] -3×3 Matrix{Float64}: - 4.0 9.0 7.0 - 4.0 4.0 1.0 - 4.0 3.0 2.0 - -julia> F = hessenberg(A) -Hessenberg{Float64, UpperHessenberg{Float64, Matrix{Float64}}, Matrix{Float64}, Vector{Float64}, Bool} -Q factor: 3×3 LinearAlgebra.HessenbergQ{Float64, Matrix{Float64}, Vector{Float64}, false} -H factor: -3×3 UpperHessenberg{Float64, Matrix{Float64}}: - 4.0 -11.3137 -1.41421 - -5.65685 5.0 2.0 - ⋅ -8.88178e-16 1.0 - -julia> F.Q * F.H * F.Q' -3×3 Matrix{Float64}: - 4.0 9.0 7.0 - 4.0 4.0 1.0 - 4.0 3.0 2.0 - -julia> q, h = F; # destructuring via iteration - -julia> q == F.Q && h == F.H -true -``` -""" -hessenberg(A::AbstractMatrix{T}) where T = - hessenberg!(eigencopy_oftype(A, eigtype(T))) - -function show(io::IO, mime::MIME"text/plain", F::Hessenberg) - summary(io, F) - if !iszero(F.μ) - print("\nwith shift μI for μ = ", F.μ) - end - print(io, "\nQ factor: ") - show(io, mime, F.Q) - println(io, "\nH factor:") - show(io, mime, F.H) -end - -function getproperty(F::Hessenberg, d::Symbol) - d === :Q && return HessenbergQ(F) - return getfield(F, d) -end - -Base.propertynames(F::Hessenberg, private::Bool=false) = - (:Q, :H, :μ, (private ? (:τ, :factors, :uplo) : ())...) - -AbstractArray(F::Hessenberg) = AbstractMatrix(F) -Matrix(F::Hessenberg) = Array(AbstractArray(F)) -Array(F::Hessenberg) = Matrix(F) -function AbstractMatrix(F::Hessenberg) - Q = F.Q - A = rmul!(lmul!(Q, Matrix{eltype(Q)}(F.H)), Q') - μ = F.μ - if iszero(μ) - return A - elseif typeof(zero(eltype(A))+μ) <: eltype(A) # can shift A in-place - for i = 1:size(A,1) - @inbounds A[i,i] += μ - end - return A - else - return A + μ*I # allocate another matrix, e.g. if A is real and μ is complex - end -end - -# multiply x by the entries of M in the upper-k triangle, which contains -# the entries of the upper-Hessenberg matrix H for k=-1 -function rmul_triu!(M::AbstractMatrix, x, k::Integer=0) - require_one_based_indexing(M) - m, n = size(M) - for j = 1:n, i = 1:min(j-k,m) - @inbounds M[i,j] *= x - end - return M -end -function lmul_triu!(x, M::AbstractMatrix, k::Integer=0) - require_one_based_indexing(M) - m, n = size(M) - for j = 1:n, i = 1:min(j-k,m) - @inbounds M[i,j] = x * M[i,j] - end - return M -end - -# when H is UpperHessenberg, it shares data with F.factors -# multiply Hessenberg by scalar (but don't modify lower triangle of F.H.data) -rmul!(F::Hessenberg{<:Any,<:UpperHessenberg{T}}, x::T) where {T<:Number} = Hessenberg(rmul_triu!(F.factors, x, -1), F.τ; μ=F.μ*x) -lmul!(x::T, F::Hessenberg{<:Any,<:UpperHessenberg{T}}) where {T<:Number} = Hessenberg(lmul_triu!(x, F.factors, -1), F.τ; μ=x*F.μ) - -rmul!(F::Hessenberg{<:Any,<:SymTridiagonal{T}}, x::T) where {T<:Number} = Hessenberg(F.factors, F.τ, SymTridiagonal(F.H.dv*x, F.H.ev*x), F.uplo; μ=F.μ*x) -lmul!(x::T, F::Hessenberg{<:Any,<:SymTridiagonal{T}}) where {T<:Number} = Hessenberg(F.factors, F.τ, SymTridiagonal(x*F.H.dv, x*F.H.ev), F.uplo; μ=x*F.μ) - -# Promote F * x or x * F. In general, we don't know how to do promotions -# that would change the element type of F.H, however. -function (*)(F::Hessenberg{<:Any,<:AbstractMatrix{T}}, x::S) where {T,S<:Number} - TS = typeof(zero(T) * x) - if TS === T - return rmul!(copy(F), convert(T, x)) - else - throw(MethodError(*, (F, x))) - end -end -function (*)(x::S, F::Hessenberg{<:Any,<:AbstractMatrix{T}}) where {T,S<:Number} - TS = typeof(zero(T) * x) - if TS === T - return lmul!(convert(T, x), copy(F)) - else - throw(MethodError(*, (x, F))) - end -end --(F::Hessenberg) = F * -one(eltype(F.H)) - -# shift Hessenberg by λI -+(F::Hessenberg, J::UniformScaling) = Hessenberg(F, F.μ + J.λ) -+(J::UniformScaling, F::Hessenberg) = Hessenberg(F, J.λ + F.μ) --(F::Hessenberg, J::UniformScaling) = Hessenberg(F, F.μ - J.λ) --(J::UniformScaling, F::Hessenberg) = Hessenberg(-F, J.λ - F.μ) - -function ldiv!(F::Hessenberg, B::AbstractVecOrMat) - Q = F.Q - if iszero(F.μ) - return lmul!(Q, ldiv!(F.H, lmul!(Q', B))) - else - return lmul!(Q, ldiv!(F.H, lmul!(Q', B); shift=F.μ)) - end -end - -function rdiv!(B::AbstractMatrix, F::Hessenberg) - Q = F.Q - return rmul!(rdiv!(rmul!(B, Q), F.H; shift=F.μ), Q') -end - -# handle case of real H and complex μ — we need to work around the -# fact that we can't multiple a real F.Q by a complex matrix directly in LAPACK -function ldiv!(F::Hessenberg{<:Complex,<:Any,<:AbstractMatrix{<:Real}}, B::AbstractVecOrMat{<:Complex}) - Q = F.Q - Br = lmul!(Q', real(B)) - Bi = lmul!(Q', imag(B)) - ldiv!(F.H, B .= Complex.(Br,Bi); shift=F.μ) - Br .= real.(B); Bi .= imag.(B) - Br = lmul!(Q, Br) - Bi = lmul!(Q, Bi) - return B .= Complex.(Br,Bi) -end -function rdiv!(B::AbstractVecOrMat{<:Complex}, F::Hessenberg{<:Complex,<:Any,<:AbstractMatrix{<:Real}}) - Q = F.Q - Br = rmul!(real(B), Q) - Bi = rmul!(imag(B), Q) - rdiv!(B .= Complex.(Br,Bi), F.H; shift=F.μ) - Br .= real.(B); Bi .= imag.(B) - Br = rmul!(Br, Q') - Bi = rmul!(Bi, Q') - return B .= Complex.(Br,Bi) -end - -ldiv!(F::AdjointFactorization{<:Any,<:Hessenberg}, B::AbstractVecOrMat) = rdiv!(B', F')' - -det(F::Hessenberg) = det(F.H; shift=F.μ) -logabsdet(F::Hessenberg) = logabsdet(F.H; shift=F.μ) -function logdet(F::Hessenberg) - d,s = logabsdet(F) - return d + log(s) -end diff --git a/stdlib/LinearAlgebra/src/lapack.jl b/stdlib/LinearAlgebra/src/lapack.jl deleted file mode 100644 index f53e8bd98454d..0000000000000 --- a/stdlib/LinearAlgebra/src/lapack.jl +++ /dev/null @@ -1,7218 +0,0 @@ -# This file is a part of Julia. License is MIT: https://julialang.org/license - -module LAPACK -@doc """ -Interfaces to LAPACK subroutines. -""" LAPACK - -using ..LinearAlgebra.BLAS: @blasfunc, chkuplo - -using ..LinearAlgebra: libblastrampoline, BlasFloat, BlasInt, LAPACKException, DimensionMismatch, - SingularException, PosDefException, chkstride1, checksquare, triu, tril, dot - -using Base: iszero, require_one_based_indexing - - -# Legacy binding maintained for backwards-compatibility but new packages -# should not look at this, instead preferring to parse the output -# of BLAS.get_config() -const liblapack = libblastrampoline - -#Generic LAPACK error handlers -""" -Handle only negative LAPACK error codes - -*NOTE* use only if the positive error code is useful. -""" -function chkargsok(ret::BlasInt) - if ret < 0 - throw(ArgumentError(lazy"invalid argument #$(-ret) to LAPACK call")) - end -end - -"Handle all nonzero info codes" -function chklapackerror(ret::BlasInt, f...) - if ret == 0 - return - elseif ret < 0 - throw(ArgumentError(lazy"invalid argument #$(-ret) to LAPACK call")) - else # ret > 0 - chklapackerror_positive(ret, f...) - end -end - -chklapackerror_positive(ret, f...) = throw(LAPACKException(ret)) - -function chknonsingular(ret::BlasInt) - if ret > 0 - throw(SingularException(ret)) - end -end - -function chkposdef(ret::BlasInt) - if ret > 0 - throw(PosDefException(ret)) - end -end - -# Generic fallback function to assert that parameters are valid -# In specific cases, the following functions may be more useful -macro chkvalidparam(position::Int, param, validvalues) - :(chkvalidparam($position, $(string(param)), $(esc(param)), $validvalues)) -end -function chkvalidparam(position::Int, var::String, val, validvals) - # mimic `repr` for chars without explicitly calling it - # This is because `repr` introduces dynamic dispatch - _repr(c::AbstractChar) = "'$c'" - _repr(c) = c - if val ∉ validvals - throw(ArgumentError( - lazy"argument #$position: $var must be one of $validvals, but $(_repr(val)) was passed")) - end - return val -end - -"Check that {c}transpose is correctly specified" -function chktrans(trans::AbstractChar) - if !(trans == 'N' || trans == 'C' || trans == 'T') - throw(ArgumentError(lazy"trans argument must be 'N' (no transpose), 'T' (transpose), or 'C' (conjugate transpose), got '$trans'")) - end - trans -end - -"Check that left/right hand side multiply is correctly specified" -function chkside(side::AbstractChar) - if !(side == 'L' || side == 'R') - throw(ArgumentError(lazy"side argument must be 'L' (left hand multiply) or 'R' (right hand multiply), got '$side'")) - end - side -end - -"Check that unit diagonal flag is correctly specified" -function chkdiag(diag::AbstractChar) - if !(diag == 'U' || diag =='N') - throw(ArgumentError(lazy"diag argument must be 'U' (unit diagonal) or 'N' (non-unit diagonal), got '$diag'")) - end - diag -end - -subsetrows(X::AbstractVector, Y::AbstractArray, k) = Y[1:k] -subsetrows(X::AbstractMatrix, Y::AbstractArray, k) = Y[1:k, :] - -function chkfinite(A::AbstractMatrix) - for a in A - if !isfinite(a) - throw(ArgumentError("matrix contains Infs or NaNs")) - end - end - return true -end - -function chkuplofinite(A::AbstractMatrix, uplo::AbstractChar) - require_one_based_indexing(A) - chkuplo(uplo) - m, n = size(A) - if uplo == 'U' - @inbounds for j in 1:n, i in 1:j - if !isfinite(A[i,j]) - throw(ArgumentError("matrix contains Infs or NaNs")) - end - end - else - @inbounds for j in 1:n, i in j:m - if !isfinite(A[i,j]) - throw(ArgumentError("matrix contains Infs or NaNs")) - end - end - end -end - -# LAPACK version number -function version() - major = Ref{BlasInt}(0) - minor = Ref{BlasInt}(0) - patch = Ref{BlasInt}(0) - ccall((@blasfunc(ilaver_), libblastrampoline), Cvoid, - (Ptr{BlasInt}, Ptr{BlasInt}, Ptr{BlasInt}), - major, minor, patch) - return VersionNumber(major[], minor[], patch[]) -end - -# (GB) general banded matrices, LU decomposition and solver -for (gbtrf, gbtrs, elty) in - ((:dgbtrf_,:dgbtrs_,:Float64), - (:sgbtrf_,:sgbtrs_,:Float32), - (:zgbtrf_,:zgbtrs_,:ComplexF64), - (:cgbtrf_,:cgbtrs_,:ComplexF32)) - @eval begin - # SUBROUTINE DGBTRF( M, N, KL, KU, AB, LDAB, IPIV, INFO ) - # * .. Scalar Arguments .. - # INTEGER INFO, KL, KU, LDAB, M, N - # * .. Array Arguments .. - # INTEGER IPIV( * ) - # DOUBLE PRECISION AB( LDAB, * ) - function gbtrf!(kl::Integer, ku::Integer, m::Integer, AB::AbstractMatrix{$elty}) - require_one_based_indexing(AB) - chkstride1(AB) - n = size(AB, 2) - mnmn = min(m, n) - ipiv = similar(AB, BlasInt, mnmn) - info = Ref{BlasInt}() - ccall((@blasfunc($gbtrf), libblastrampoline), Cvoid, - (Ref{BlasInt}, Ref{BlasInt}, Ref{BlasInt}, Ref{BlasInt}, - Ptr{$elty}, Ref{BlasInt}, Ptr{BlasInt}, Ref{BlasInt}), - m, n, kl, ku, AB, max(1,stride(AB,2)), ipiv, info) - chklapackerror(info[]) - AB, ipiv - end - - # SUBROUTINE DGBTRS( TRANS, N, KL, KU, NRHS, AB, LDAB, IPIV, B, LDB, INFO) - # * .. Scalar Arguments .. - # CHARACTER TRANS - # INTEGER INFO, KL, KU, LDAB, LDB, N, NRHS - # * .. Array Arguments .. - # INTEGER IPIV( * ) - # DOUBLE PRECISION AB( LDAB, * ), B( LDB, * ) - function gbtrs!(trans::AbstractChar, kl::Integer, ku::Integer, m::Integer, - AB::AbstractMatrix{$elty}, ipiv::AbstractVector{BlasInt}, - B::AbstractVecOrMat{$elty}) - require_one_based_indexing(AB, B) - chkstride1(AB, B, ipiv) - chktrans(trans) - info = Ref{BlasInt}() - n = size(AB,2) - if m != n || m != size(B,1) - throw(DimensionMismatch(lazy"matrix AB has dimensions $(size(AB)), but right hand side matrix B has dimensions $(size(B))")) - end - ccall((@blasfunc($gbtrs), libblastrampoline), Cvoid, - (Ref{UInt8}, Ref{BlasInt}, Ref{BlasInt}, Ref{BlasInt}, Ref{BlasInt}, - Ptr{$elty}, Ref{BlasInt}, Ptr{BlasInt}, Ptr{$elty}, Ref{BlasInt}, - Ref{BlasInt}, Clong), - trans, n, kl, ku, size(B,2), AB, max(1,stride(AB,2)), ipiv, - B, max(1,stride(B,2)), info, 1) - chklapackerror(info[]) - B - end - end -end - -""" - gbtrf!(kl, ku, m, AB) -> (AB, ipiv) - -Compute the LU factorization of a banded matrix `AB`. `kl` is the first -subdiagonal containing a nonzero band, `ku` is the last superdiagonal -containing one, and `m` is the first dimension of the matrix `AB`. Returns -the LU factorization in-place and `ipiv`, the vector of pivots used. -""" -gbtrf!(kl::Integer, ku::Integer, m::Integer, AB::AbstractMatrix) - -""" - gbtrs!(trans, kl, ku, m, AB, ipiv, B) - -Solve the equation `AB * X = B`. `trans` determines the orientation of `AB`. It may -be `N` (no transpose), `T` (transpose), or `C` (conjugate transpose). `kl` is the -first subdiagonal containing a nonzero band, `ku` is the last superdiagonal -containing one, and `m` is the first dimension of the matrix `AB`. `ipiv` is the vector -of pivots returned from `gbtrf!`. Returns the vector or matrix `X`, overwriting `B` in-place. -""" -gbtrs!(trans::AbstractChar, kl::Integer, ku::Integer, m::Integer, AB::AbstractMatrix, ipiv::AbstractVector{BlasInt}, B::AbstractVecOrMat) - -## (GE) general matrices: balancing and back-transforming -for (gebal, gebak, elty, relty) in - ((:dgebal_, :dgebak_, :Float64, :Float64), - (:sgebal_, :sgebak_, :Float32, :Float32), - (:zgebal_, :zgebak_, :ComplexF64, :Float64), - (:cgebal_, :cgebak_, :ComplexF32, :Float32)) - @eval begin - # SUBROUTINE DGEBAL( JOB, N, A, LDA, ILO, IHI, SCALE, INFO ) - #* .. Scalar Arguments .. - # CHARACTER JOB - # INTEGER IHI, ILP, INFO, LDA, N - # .. Array Arguments .. - # DOUBLE PRECISION A( LDA, * ), SCALE( * ) - function gebal!(job::AbstractChar, A::AbstractMatrix{$elty}) - require_one_based_indexing(A) - chkstride1(A) - @chkvalidparam 1 job ('N', 'P', 'S', 'B') - n = checksquare(A) - chkfinite(A) # balancing routines don't support NaNs and Infs - ihi = Ref{BlasInt}() - ilo = Ref{BlasInt}() - scale = similar(A, $relty, n) - info = Ref{BlasInt}() - ccall((@blasfunc($gebal), libblastrampoline), Cvoid, - (Ref{UInt8}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, - Ptr{BlasInt}, Ptr{BlasInt}, Ptr{$relty}, Ref{BlasInt}, Clong), - job, n, A, max(1,stride(A,2)), ilo, ihi, scale, info, 1) - chklapackerror(info[]) - ilo[], ihi[], scale - end - - # SUBROUTINE DGEBAK( JOB, SIDE, N, ILO, IHI, SCALE, M, V, LDV, INFO ) - #* .. Scalar Arguments .. - # CHARACTER JOB, SIDE - # INTEGER IHI, ILP, INFO, LDV, M, N - # .. Array Arguments .. - # DOUBLE PRECISION SCALE( * ), V( LDV, * ) - function gebak!(job::AbstractChar, side::AbstractChar, - ilo::BlasInt, ihi::BlasInt, scale::AbstractVector{$relty}, - V::AbstractMatrix{$elty}) - require_one_based_indexing(scale, V) - @chkvalidparam 1 job ('N', 'P', 'S', 'B') - chkstride1(scale, V) - chkside(side) - chkfinite(V) # balancing routines don't support NaNs and Infs - n = checksquare(V) - info = Ref{BlasInt}() - ccall((@blasfunc($gebak), libblastrampoline), Cvoid, - (Ref{UInt8}, Ref{UInt8}, Ref{BlasInt}, Ref{BlasInt}, Ref{BlasInt}, - Ptr{$relty}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ptr{BlasInt}, - Clong, Clong), - job, side, size(V,1), ilo, ihi, scale, n, V, max(1,stride(V,2)), info, - 1, 1) - chklapackerror(info[]) - V - end - end -end - -""" - gebal!(job, A) -> (ilo, ihi, scale) - -Balance the matrix `A` before computing its eigensystem or Schur factorization. -`job` can be one of `N` (`A` will not be permuted or scaled), `P` (`A` will only -be permuted), `S` (`A` will only be scaled), or `B` (`A` will be both permuted -and scaled). Modifies `A` in-place and returns `ilo`, `ihi`, and `scale`. If -permuting was turned on, `A[i,j] = 0` if `j > i` and `1 < j < ilo` or `j > ihi`. -`scale` contains information about the scaling/permutations performed. -""" -gebal!(job::AbstractChar, A::AbstractMatrix) - -""" - gebak!(job, side, ilo, ihi, scale, V) - -Transform the eigenvectors `V` of a matrix balanced using `gebal!` to -the unscaled/unpermuted eigenvectors of the original matrix. Modifies `V` -in-place. `side` can be `L` (left eigenvectors are transformed) or `R` -(right eigenvectors are transformed). -""" -gebak!(job::AbstractChar, side::AbstractChar, ilo::BlasInt, ihi::BlasInt, scale::AbstractVector, V::AbstractMatrix) - -# (GE) general matrices, direct decompositions -# -# These mutating functions take as arguments all the values they -# return, even if the value of the function does not depend on them -# (e.g. the tau argument). This is so that a factorization can be -# updated in place. The condensed mutating functions, usually a -# function of A only, are defined after this block. -for (gebrd, gelqf, geqlf, geqrf, geqp3, geqrt, geqrt3, gerqf, getrf, elty, relty) in - ((:dgebrd_,:dgelqf_,:dgeqlf_,:dgeqrf_,:dgeqp3_,:dgeqrt_,:dgeqrt3_,:dgerqf_,:dgetrf_,:Float64,:Float64), - (:sgebrd_,:sgelqf_,:sgeqlf_,:sgeqrf_,:sgeqp3_,:sgeqrt_,:sgeqrt3_,:sgerqf_,:sgetrf_,:Float32,:Float32), - (:zgebrd_,:zgelqf_,:zgeqlf_,:zgeqrf_,:zgeqp3_,:zgeqrt_,:zgeqrt3_,:zgerqf_,:zgetrf_,:ComplexF64,:Float64), - (:cgebrd_,:cgelqf_,:cgeqlf_,:cgeqrf_,:cgeqp3_,:cgeqrt_,:cgeqrt3_,:cgerqf_,:cgetrf_,:ComplexF32,:Float32)) - @eval begin - # SUBROUTINE DGEBRD( M, N, A, LDA, D, E, TAUQ, TAUP, WORK, LWORK, - # INFO ) - # .. Scalar Arguments .. - # INTEGER INFO, LDA, LWORK, M, N - # .. Array Arguments .. - # DOUBLE PRECISION A( LDA, * ), D( * ), E( * ), TAUP( * ), - # TAUQ( * ), WORK( * ) - function gebrd!(A::AbstractMatrix{$elty}) - require_one_based_indexing(A) - chkstride1(A) - m, n = size(A) - k = min(m, n) - d = similar(A, $relty, k) - e = similar(A, $relty, k) - tauq = similar(A, $elty, k) - taup = similar(A, $elty, k) - work = Vector{$elty}(undef, 1) - lwork = BlasInt(-1) - info = Ref{BlasInt}() - for i = 1:2 # first call returns lwork as work[1] - ccall((@blasfunc($gebrd), libblastrampoline), Cvoid, - (Ref{BlasInt}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, - Ptr{$relty}, Ptr{$relty}, Ptr{$elty}, Ptr{$elty}, - Ptr{$elty}, Ref{BlasInt}, Ptr{BlasInt}), - m, n, A, max(1,stride(A,2)), - d, e, tauq, taup, - work, lwork, info) - chklapackerror(info[]) - if i == 1 - lwork = BlasInt(real(work[1])) - resize!(work, lwork) - end - end - A, d, e, tauq, taup - end - - # SUBROUTINE DGELQF( M, N, A, LDA, TAU, WORK, LWORK, INFO ) - # * .. Scalar Arguments .. - # INTEGER INFO, LDA, LWORK, M, N - # * .. Array Arguments .. - # DOUBLE PRECISION A( LDA, * ), TAU( * ), WORK( * ) - function gelqf!(A::AbstractMatrix{$elty}, tau::AbstractVector{$elty}) - require_one_based_indexing(A, tau) - chkstride1(A,tau) - m = BlasInt(size(A, 1)) - n = BlasInt(size(A, 2)) - lda = BlasInt(max(1,stride(A, 2))) - if length(tau) != min(m,n) - throw(DimensionMismatch(lazy"tau has length $(length(tau)), but needs length $(min(m,n))")) - end - lwork = BlasInt(-1) - work = Vector{$elty}(undef, 1) - info = Ref{BlasInt}() - for i = 1:2 # first call returns lwork as work[1] - ccall((@blasfunc($gelqf), libblastrampoline), Cvoid, - (Ref{BlasInt}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, - Ptr{$elty}, Ptr{$elty}, Ref{BlasInt}, Ptr{BlasInt}), - m, n, A, lda, tau, work, lwork, info) - chklapackerror(info[]) - if i == 1 - lwork = BlasInt(real(work[1])) - resize!(work, lwork) - end - end - A, tau - end - - # SUBROUTINE DGEQLF( M, N, A, LDA, TAU, WORK, LWORK, INFO ) - # * .. Scalar Arguments .. - # INTEGER INFO, LDA, LWORK, M, N - # * .. Array Arguments .. - # DOUBLE PRECISION A( LDA, * ), TAU( * ), WORK( * ) - function geqlf!(A::AbstractMatrix{$elty}, tau::AbstractVector{$elty}) - require_one_based_indexing(A, tau) - chkstride1(A,tau) - m = BlasInt(size(A, 1)) - n = BlasInt(size(A, 2)) - lda = BlasInt(max(1,stride(A, 2))) - if length(tau) != min(m,n) - throw(DimensionMismatch(lazy"tau has length $(length(tau)), but needs length $(min(m,n))")) - end - lwork = BlasInt(-1) - work = Vector{$elty}(undef, 1) - info = Ref{BlasInt}() - for i = 1:2 # first call returns lwork as work[1] - ccall((@blasfunc($geqlf), libblastrampoline), Cvoid, - (Ref{BlasInt}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, - Ptr{$elty}, Ptr{$elty}, Ref{BlasInt}, Ptr{BlasInt}), - m, n, A, lda, tau, work, lwork, info) - chklapackerror(info[]) - if i == 1 - lwork = BlasInt(real(work[1])) - resize!(work, lwork) - end - end - A, tau - end - - # SUBROUTINE DGEQP3( M, N, A, LDA, JPVT, TAU, WORK, LWORK, INFO ) - # * .. Scalar Arguments .. - # INTEGER INFO, LDA, LWORK, M, N - # * .. Array Arguments .. - # INTEGER JPVT( * ) - # DOUBLE PRECISION A( LDA, * ), TAU( * ), WORK( * ) - function geqp3!(A::AbstractMatrix{$elty}, jpvt::AbstractVector{BlasInt}, tau::AbstractVector{$elty}) - require_one_based_indexing(A, jpvt, tau) - chkstride1(A,jpvt,tau) - m,n = size(A) - if length(tau) != min(m,n) - throw(DimensionMismatch(lazy"tau has length $(length(tau)), but needs length $(min(m,n))")) - end - if length(jpvt) != n - throw(DimensionMismatch(lazy"jpvt has length $(length(jpvt)), but needs length $n")) - end - lda = stride(A,2) - if lda == 0 - return A, tau, jpvt - end # Early exit - work = Vector{$elty}(undef, 1) - lwork = BlasInt(-1) - cmplx = eltype(A)<:Complex - if cmplx - rwork = Vector{$relty}(undef, 2n) - end - info = Ref{BlasInt}() - for i = 1:2 # first call returns lwork as work[1] - if cmplx - ccall((@blasfunc($geqp3), libblastrampoline), Cvoid, - (Ref{BlasInt}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, - Ptr{BlasInt}, Ptr{$elty}, Ptr{$elty}, Ref{BlasInt}, - Ptr{$relty}, Ptr{BlasInt}), - m, n, A, lda, - jpvt, tau, work, lwork, - rwork, info) - else - ccall((@blasfunc($geqp3), libblastrampoline), Cvoid, - (Ref{BlasInt}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, - Ptr{BlasInt}, Ptr{$elty}, Ptr{$elty}, Ref{BlasInt}, - Ptr{BlasInt}), - m, n, A, lda, - jpvt, tau, work, - lwork, info) - end - chklapackerror(info[]) - if i == 1 - lwork = BlasInt(real(work[1])) - resize!(work, lwork) - end - end - return A, tau, jpvt - end - - function geqrt!(A::AbstractMatrix{$elty}, T::AbstractMatrix{$elty}) - require_one_based_indexing(A, T) - chkstride1(A) - m, n = size(A) - minmn = min(m, n) - nb = size(T, 1) - if nb > minmn - throw(ArgumentError(lazy"block size $nb > $minmn too large")) - end - lda = max(1, stride(A,2)) - work = Vector{$elty}(undef, nb*n) - if minmn > 0 - info = Ref{BlasInt}() - ccall((@blasfunc($geqrt), libblastrampoline), Cvoid, - (Ref{BlasInt}, Ref{BlasInt}, Ref{BlasInt}, Ptr{$elty}, - Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, - Ptr{BlasInt}), - m, n, nb, A, - lda, T, max(1,stride(T,2)), work, - info) - chklapackerror(info[]) - end - A, T - end - - function geqrt3!(A::AbstractMatrix{$elty}, T::AbstractMatrix{$elty}) - require_one_based_indexing(A, T) - chkstride1(A) - chkstride1(T) - m, n = size(A) - p, q = size(T) - if m < n - throw(DimensionMismatch(lazy"input matrix A has dimensions ($m,$n), but should have more rows than columns")) - end - if p != n || q != n - throw(DimensionMismatch(lazy"block reflector T has dimensions ($p,$q), but should have dimensions ($n,$n)")) - end - if n > 0 # this implies `m > 0` because of `m >= n` - info = Ref{BlasInt}() - ccall((@blasfunc($geqrt3), libblastrampoline), Cvoid, - (Ref{BlasInt}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, - Ptr{$elty}, Ref{BlasInt}, Ptr{BlasInt}), - m, n, A, max(1, stride(A, 2)), - T, max(1,stride(T,2)), info) - chklapackerror(info[]) - end - A, T - end - - ## geqrfp! - positive elements on diagonal of R - not defined yet - # SUBROUTINE DGEQRFP( M, N, A, LDA, TAU, WORK, LWORK, INFO ) - # * .. Scalar Arguments .. - # INTEGER INFO, LDA, LWORK, M, N - # * .. Array Arguments .. - # DOUBLE PRECISION A( LDA, * ), TAU( * ), WORK( * ) - function geqrf!(A::AbstractMatrix{$elty}, tau::AbstractVector{$elty}) - require_one_based_indexing(A, tau) - chkstride1(A,tau) - m, n = size(A) - if length(tau) != min(m,n) - throw(DimensionMismatch(lazy"tau has length $(length(tau)), but needs length $(min(m,n))")) - end - work = Vector{$elty}(undef, 1) - lwork = BlasInt(-1) - info = Ref{BlasInt}() - for i = 1:2 # first call returns lwork as work[1] - ccall((@blasfunc($geqrf), libblastrampoline), Cvoid, - (Ref{BlasInt}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, - Ptr{$elty}, Ptr{$elty}, Ref{BlasInt}, Ptr{BlasInt}), - m, n, A, max(1,stride(A,2)), tau, work, lwork, info) - chklapackerror(info[]) - if i == 1 - lwork = max(BlasInt(1),BlasInt(real(work[1]))) - resize!(work, lwork) - end - end - A, tau - end - - # SUBROUTINE DGERQF( M, N, A, LDA, TAU, WORK, LWORK, INFO ) - # * .. Scalar Arguments .. - # INTEGER INFO, LDA, LWORK, M, N - # * .. Array Arguments .. - # DOUBLE PRECISION A( LDA, * ), TAU( * ), WORK( * ) - function gerqf!(A::AbstractMatrix{$elty},tau::AbstractVector{$elty}) - require_one_based_indexing(A, tau) - chkstride1(A,tau) - m, n = size(A) - if length(tau) != min(m,n) - throw(DimensionMismatch(lazy"tau has length $(length(tau)), but needs length $(min(m,n))")) - end - lwork = BlasInt(-1) - work = Vector{$elty}(undef, 1) - info = Ref{BlasInt}() - for i = 1:2 # first call returns lwork as work[1] - ccall((@blasfunc($gerqf), libblastrampoline), Cvoid, - (Ref{BlasInt}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, - Ptr{$elty}, Ptr{$elty}, Ref{BlasInt}, Ptr{BlasInt}), - m, n, A, max(1,stride(A,2)), tau, work, lwork, info) - chklapackerror(info[]) - if i == 1 - lwork = max(BlasInt(m), BlasInt(real(work[1]))) - resize!(work, lwork) - end - end - A, tau - end - - # SUBROUTINE DGETRF( M, N, A, LDA, IPIV, INFO ) - # * .. Scalar Arguments .. - # INTEGER INFO, LDA, M, N - # * .. Array Arguments .. - # INTEGER IPIV( * ) - # DOUBLE PRECISION A( LDA, * ) - function getrf!(A::AbstractMatrix{$elty}, ipiv::AbstractVector{BlasInt}; check::Bool=true) - require_one_based_indexing(A) - check && chkfinite(A) - chkstride1(A) - m, n = size(A) - lda = max(1,stride(A, 2)) - info = Ref{BlasInt}() - ccall((@blasfunc($getrf), libblastrampoline), Cvoid, - (Ref{BlasInt}, Ref{BlasInt}, Ptr{$elty}, - Ref{BlasInt}, Ptr{BlasInt}, Ref{BlasInt}), - m, n, A, lda, ipiv, info) - chkargsok(info[]) - A, ipiv, info[] #Error code is stored in LU factorization type - end - end -end - -""" - gebrd!(A) -> (A, d, e, tauq, taup) - -Reduce `A` in-place to bidiagonal form `A = QBP'`. Returns `A`, containing the -bidiagonal matrix `B`; `d`, containing the diagonal elements of `B`; `e`, -containing the off-diagonal elements of `B`; `tauq`, containing the -elementary reflectors representing `Q`; and `taup`, containing the -elementary reflectors representing `P`. -""" -gebrd!(A::AbstractMatrix) - -""" - gelqf!(A, tau) - -Compute the `LQ` factorization of `A`, `A = LQ`. `tau` contains scalars -which parameterize the elementary reflectors of the factorization. `tau` -must have length greater than or equal to the smallest dimension of `A`. - -Returns -`A` and `tau` modified in-place. -""" -gelqf!(A::AbstractMatrix, tau::AbstractVector) - -""" - geqlf!(A, tau) - -Compute the `QL` factorization of `A`, `A = QL`. `tau` contains scalars -which parameterize the elementary reflectors of the factorization. `tau` -must have length greater than or equal to the smallest dimension of `A`. - -Returns `A` and `tau` modified in-place. -""" -geqlf!(A::AbstractMatrix, tau::AbstractVector) - -""" - geqp3!(A, [jpvt, tau]) -> (A, tau, jpvt) - -Compute the pivoted `QR` factorization of `A`, `AP = QR` using BLAS level 3. -`P` is a pivoting matrix, represented by `jpvt`. `tau` stores the elementary -reflectors. The arguments `jpvt` and `tau` are optional and allow -for passing preallocated arrays. When passed, `jpvt` must have length greater -than or equal to `n` if `A` is an `(m x n)` matrix and `tau` must have length -greater than or equal to the smallest dimension of `A`. On entry, if `jpvt[j]` -does not equal zero then the `j`th column of `A` is permuted to the front of -`AP`. - -`A`, `jpvt`, and `tau` are modified in-place. -""" -geqp3!(A::AbstractMatrix, jpvt::AbstractVector{BlasInt}, tau::AbstractVector) - -function geqp3!(A::AbstractMatrix{<:BlasFloat}, jpvt::AbstractVector{BlasInt}) - require_one_based_indexing(A, jpvt) - m, n = size(A) - geqp3!(A, jpvt, similar(A, min(m, n))) -end - -function geqp3!(A::AbstractMatrix{<:BlasFloat}) - require_one_based_indexing(A) - m, n = size(A) - geqp3!(A, zeros(BlasInt, n), similar(A, min(m, n))) -end - -""" - geqrt!(A, T) - -Compute the blocked `QR` factorization of `A`, `A = QR`. `T` contains upper -triangular block reflectors which parameterize the elementary reflectors of -the factorization. The first dimension of `T` sets the block size and it must -be between 1 and `n`. The second dimension of `T` must equal the smallest -dimension of `A`. - -Returns `A` and `T` modified in-place. -""" -geqrt!(A::AbstractMatrix, T::AbstractMatrix) - -""" - geqrt3!(A, T) - -Recursively computes the blocked `QR` factorization of `A`, `A = QR`. `T` -contains upper triangular block reflectors which parameterize the -elementary reflectors of the factorization. The first dimension of `T` sets the -block size and it must be between 1 and `n`. The second dimension of `T` must -equal the smallest dimension of `A`. - -Returns `A` and `T` modified in-place. -""" -geqrt3!(A::AbstractMatrix, T::AbstractMatrix) - -""" - geqrf!(A, tau) - -Compute the `QR` factorization of `A`, `A = QR`. `tau` contains scalars -which parameterize the elementary reflectors of the factorization. `tau` -must have length greater than or equal to the smallest dimension of `A`. - -Returns `A` and `tau` modified in-place. -""" -geqrf!(A::AbstractMatrix, tau::AbstractVector) - -""" - gerqf!(A, tau) - -Compute the `RQ` factorization of `A`, `A = RQ`. `tau` contains scalars -which parameterize the elementary reflectors of the factorization. `tau` -must have length greater than or equal to the smallest dimension of `A`. - -Returns `A` and `tau` modified in-place. -""" -gerqf!(A::AbstractMatrix, tau::AbstractVector) - -""" - getrf!(A, ipiv) -> (A, ipiv, info) - -Compute the pivoted `LU` factorization of `A`, `A = LU`. `ipiv` contains the pivoting -information and `info` a code which indicates success (`info = 0`), a singular value -in `U` (`info = i`, in which case `U[i,i]` is singular), or an error code (`info < 0`). -""" -getrf!(A::AbstractMatrix, ipiv::AbstractVector; check::Bool=true) - -""" - gelqf!(A) -> (A, tau) - -Compute the `LQ` factorization of `A`, `A = LQ`. - -Returns `A`, modified in-place, and `tau`, which contains scalars -which parameterize the elementary reflectors of the factorization. -""" -gelqf!(A::AbstractMatrix{<:BlasFloat}) = ((m,n) = size(A); gelqf!(A, similar(A, min(m, n)))) - -""" - geqlf!(A) -> (A, tau) - -Compute the `QL` factorization of `A`, `A = QL`. - -Returns `A`, modified in-place, and `tau`, which contains scalars -which parameterize the elementary reflectors of the factorization. -""" -geqlf!(A::AbstractMatrix{<:BlasFloat}) = ((m,n) = size(A); geqlf!(A, similar(A, min(m, n)))) - -""" - geqrt!(A, nb) -> (A, T) - -Compute the blocked `QR` factorization of `A`, `A = QR`. `nb` sets the block size -and it must be between 1 and `n`, the second dimension of `A`. - -Returns `A`, modified in-place, and `T`, which contains upper -triangular block reflectors which parameterize the elementary reflectors of -the factorization. -""" -geqrt!(A::AbstractMatrix{<:BlasFloat}, nb::Integer) = geqrt!(A, similar(A, nb, minimum(size(A)))) - -""" - geqrt3!(A) -> (A, T) - -Recursively computes the blocked `QR` factorization of `A`, `A = QR`. - -Returns `A`, modified in-place, and `T`, which contains upper triangular block -reflectors which parameterize the elementary reflectors of the factorization. -""" -geqrt3!(A::AbstractMatrix{<:BlasFloat}) = (n = size(A, 2); geqrt3!(A, similar(A, n, n))) - -""" - geqrf!(A) -> (A, tau) - -Compute the `QR` factorization of `A`, `A = QR`. - -Returns `A`, modified in-place, and `tau`, which contains scalars -which parameterize the elementary reflectors of the factorization. -""" -geqrf!(A::AbstractMatrix{<:BlasFloat}) = ((m,n) = size(A); geqrf!(A, similar(A, min(m, n)))) - -""" - gerqf!(A) -> (A, tau) - -Compute the `RQ` factorization of `A`, `A = RQ`. - -Returns `A`, modified in-place, and `tau`, which contains scalars -which parameterize the elementary reflectors of the factorization. -""" -gerqf!(A::AbstractMatrix{<:BlasFloat}) = ((m,n) = size(A); gerqf!(A, similar(A, min(m, n)))) - -""" - getrf!(A) -> (A, ipiv, info) - -Compute the pivoted `LU` factorization of `A`, `A = LU`. - -Returns `A`, modified in-place, `ipiv`, the pivoting information, and an `info` -code which indicates success (`info = 0`), a singular value in `U` -(`info = i`, in which case `U[i,i]` is singular), or an error code (`info < 0`). -""" -getrf!(A::AbstractMatrix{T}; check::Bool=true) where {T <: BlasFloat} = ((m,n) = size(A); getrf!(A, similar(A, BlasInt, min(m, n)); check)) - -## Tools to compute and apply elementary reflectors -for (larfg, elty) in - ((:dlarfg_, Float64), - (:slarfg_, Float32), - (:zlarfg_, ComplexF64), - (:clarfg_, ComplexF32)) - @eval begin - # .. Scalar Arguments .. - # INTEGER incx, n - # DOUBLE PRECISION alpha, tau - # .. - # .. Array Arguments .. - # DOUBLE PRECISION x( * ) - function larfg!(x::AbstractVector{$elty}) - require_one_based_indexing(x) - N = BlasInt(length(x)) - α = Ref{$elty}(x[1]) - incx = BlasInt(1) - τ = Ref{$elty}(0) - ccall((@blasfunc($larfg), libblastrampoline), Cvoid, - (Ref{BlasInt}, Ref{$elty}, Ptr{$elty}, Ref{BlasInt}, Ref{$elty}), - N, α, pointer(x, 2), incx, τ) - @inbounds x[1] = one($elty) - return τ[] - end - end -end - -for (larf, elty) in - ((:dlarf_, Float64), - (:slarf_, Float32), - (:zlarf_, ComplexF64), - (:clarf_, ComplexF32)) - @eval begin - # .. Scalar Arguments .. - # CHARACTER side - # INTEGER incv, ldc, m, n - # DOUBLE PRECISION tau - # .. - # .. Array Arguments .. - # DOUBLE PRECISION c( ldc, * ), v( * ), work( * ) - function larf!(side::AbstractChar, v::AbstractVector{$elty}, - τ::$elty, C::AbstractMatrix{$elty}, work::AbstractVector{$elty}) - require_one_based_indexing(v, C, work) - m, n = size(C) - chkside(side) - ldc = max(1, stride(C, 2)) - l = side == 'L' ? n : m - incv = BlasInt(1) - ccall((@blasfunc($larf), libblastrampoline), Cvoid, - (Ref{UInt8}, Ref{BlasInt}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, - Ref{$elty}, Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, Clong), - side, m, n, v, incv, - τ, C, ldc, work, 1) - return C - end - - function larf!(side::AbstractChar, v::AbstractVector{$elty}, - τ::$elty, C::AbstractMatrix{$elty}) - require_one_based_indexing(v, C) - m, n = size(C) - chkside(side) - lwork = side == 'L' ? n : m - return larf!(side, v, τ, C, Vector{$elty}(undef,lwork)) - end - end -end - -## Complete orthogonaliztion tools -for (tzrzf, ormrz, elty) in - ((:dtzrzf_,:dormrz_,:Float64), - (:stzrzf_,:sormrz_,:Float32), - (:ztzrzf_,:zunmrz_,:ComplexF64), - (:ctzrzf_,:cunmrz_,:ComplexF32)) - @eval begin - # SUBROUTINE ZTZRZF( M, N, A, LDA, TAU, WORK, LWORK, INFO ) - # - # .. Scalar Arguments .. - # INTEGER INFO, LDA, LWORK, M, N - # .. - # .. Array Arguments .. - # COMPLEX*16 A( LDA, * ), TAU( * ), WORK( * ) - function tzrzf!(A::AbstractMatrix{$elty}) - require_one_based_indexing(A) - chkstride1(A) - m, n = size(A) - if n < m - throw(DimensionMismatch(lazy"input matrix A has dimensions ($m,$n), but cannot have fewer columns than rows")) - end - lda = max(1, stride(A,2)) - tau = similar(A, $elty, m) - work = Vector{$elty}(undef, 1) - lwork = BlasInt(-1) - info = Ref{BlasInt}() - for i = 1:2 # first call returns lwork as work[1] - ccall((@blasfunc($tzrzf), libblastrampoline), Cvoid, - (Ref{BlasInt}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, - Ptr{$elty}, Ptr{$elty}, Ref{BlasInt}, Ptr{BlasInt}), - m, n, A, lda, - tau, work, lwork, info) - chklapackerror(info[]) - if i == 1 - lwork = BlasInt(real(work[1])) - resize!(work, lwork) - end - end - A, tau - end - - # SUBROUTINE ZUNMRZ( SIDE, TRANS, M, N, K, L, A, LDA, TAU, C, LDC, - # WORK, LWORK, INFO ) - # - # .. Scalar Arguments .. - # CHARACTER SIDE, TRANS - # INTEGER INFO, K, L, LDA, LDC, LWORK, M, N - # .. - # .. Array Arguments .. - # COMPLEX*16 A( LDA, * ), C( LDC, * ), TAU( * ), WORK( * ) - function ormrz!(side::AbstractChar, trans::AbstractChar, A::AbstractMatrix{$elty}, - tau::AbstractVector{$elty}, C::AbstractMatrix{$elty}) - require_one_based_indexing(A, tau, C) - chktrans(trans) - chkside(side) - chkstride1(A, tau, C) - m, n = size(C) - k = length(tau) - l = size(A, 2) - size(A, 1) - lda = max(1, stride(A,2)) - ldc = max(1, stride(C,2)) - work = Vector{$elty}(undef, 1) - lwork = BlasInt(-1) - info = Ref{BlasInt}() - for i = 1:2 # first call returns lwork as work[1] - ccall((@blasfunc($ormrz), libblastrampoline), Cvoid, - (Ref{UInt8}, Ref{UInt8}, Ref{BlasInt}, Ref{BlasInt}, - Ref{BlasInt}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, - Ptr{$elty}, Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, - Ref{BlasInt}, Ref{BlasInt}, Clong, Clong), - side, trans, m, n, - k, l, A, lda, - tau, C, ldc, work, - lwork, info, 1, 1) - chklapackerror(info[]) - if i == 1 - lwork = BlasInt(real(work[1])) - resize!(work, lwork) - end - end - C - end - end -end - -""" - ormrz!(side, trans, A, tau, C) - -Multiplies the matrix `C` by `Q` from the transformation supplied by -`tzrzf!`. Depending on `side` or `trans` the multiplication can be -left-sided (`side = L, Q*C`) or right-sided (`side = R, C*Q`) and `Q` -can be unmodified (`trans = N`), transposed (`trans = T`), or conjugate -transposed (`trans = C`). Returns matrix `C` which is modified in-place -with the result of the multiplication. -""" -ormrz!(side::AbstractChar, trans::AbstractChar, A::AbstractMatrix, tau::AbstractVector, C::AbstractMatrix) - -""" - tzrzf!(A) -> (A, tau) - -Transforms the upper trapezoidal matrix `A` to upper triangular form in-place. -Returns `A` and `tau`, the scalar parameters for the elementary reflectors -of the transformation. -""" -tzrzf!(A::AbstractMatrix) - -## (GE) general matrices, solvers with factorization, solver and inverse -for (gels, gesv, getrs, getri, elty) in - ((:dgels_,:dgesv_,:dgetrs_,:dgetri_,:Float64), - (:sgels_,:sgesv_,:sgetrs_,:sgetri_,:Float32), - (:zgels_,:zgesv_,:zgetrs_,:zgetri_,:ComplexF64), - (:cgels_,:cgesv_,:cgetrs_,:cgetri_,:ComplexF32)) - @eval begin - # SUBROUTINE DGELS( TRANS, M, N, NRHS, A, LDA, B, LDB, WORK, LWORK,INFO) - # * .. Scalar Arguments .. - # CHARACTER TRANS - # INTEGER INFO, LDA, LDB, LWORK, M, N, NRHS - function gels!(trans::AbstractChar, A::AbstractMatrix{$elty}, B::AbstractVecOrMat{$elty}) - require_one_based_indexing(A, B) - chktrans(trans) - chkstride1(A, B) - btrn = trans == 'T' - m, n = size(A) - if size(B,1) != (btrn ? n : m) - throw(DimensionMismatch(lazy"matrix A has dimensions ($m,$n), transposed: $btrn, but leading dimension of B is $(size(B,1))")) - end - info = Ref{BlasInt}() - work = Vector{$elty}(undef, 1) - lwork = BlasInt(-1) - for i = 1:2 # first call returns lwork as work[1] - ccall((@blasfunc($gels), libblastrampoline), Cvoid, - (Ref{UInt8}, Ref{BlasInt}, Ref{BlasInt}, Ref{BlasInt}, - Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, - Ptr{$elty}, Ref{BlasInt}, Ref{BlasInt}, Clong), - (btrn ? 'T' : 'N'), m, n, size(B,2), A, max(1,stride(A,2)), - B, max(1,stride(B,2)), work, lwork, info, 1) - chklapackerror(info[]) - if i == 1 - lwork = BlasInt(real(work[1])) - resize!(work, lwork) - end - end - k = min(m, n) - F = m < n ? tril(A[1:k, 1:k]) : triu(A[1:k, 1:k]) - ssr = Vector{$elty}(undef, size(B, 2)) - for i = 1:size(B,2) - x = zero($elty) - for j = k+1:size(B,1) - x += abs2(B[j,i]) - end - ssr[i] = x - end - F, subsetrows(B, B, k), ssr - end - - # SUBROUTINE DGESV( N, NRHS, A, LDA, IPIV, B, LDB, INFO ) - # * .. Scalar Arguments .. - # INTEGER INFO, LDA, LDB, N, NRHS - # * .. - # * .. Array Arguments .. - # INTEGER IPIV( * ) - # DOUBLE PRECISION A( LDA, * ), B( LDB, * ) - function gesv!(A::AbstractMatrix{$elty}, B::AbstractVecOrMat{$elty}) - require_one_based_indexing(A, B) - chkstride1(A, B) - n = checksquare(A) - if size(B,1) != n - throw(DimensionMismatch(lazy"B has leading dimension $(size(B,1)), but needs $n")) - end - ipiv = similar(A, BlasInt, n) - info = Ref{BlasInt}() - ccall((@blasfunc($gesv), libblastrampoline), Cvoid, - (Ref{BlasInt}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ptr{BlasInt}, - Ptr{$elty}, Ref{BlasInt}, Ptr{BlasInt}), - n, size(B,2), A, max(1,stride(A,2)), ipiv, B, max(1,stride(B,2)), info) - chklapackerror(info[]) - B, A, ipiv - end - - # SUBROUTINE DGETRS( TRANS, N, NRHS, A, LDA, IPIV, B, LDB, INFO ) - #* .. Scalar Arguments .. - # CHARACTER TRANS - # INTEGER INFO, LDA, LDB, N, NRHS - # .. Array Arguments .. - # INTEGER IPIV( * ) - # DOUBLE PRECISION A( LDA, * ), B( LDB, * ) - function getrs!(trans::AbstractChar, A::AbstractMatrix{$elty}, ipiv::AbstractVector{BlasInt}, B::AbstractVecOrMat{$elty}) - require_one_based_indexing(A, ipiv, B) - chktrans(trans) - chkstride1(A, B, ipiv) - n = checksquare(A) - if n != size(B, 1) - throw(DimensionMismatch(lazy"B has leading dimension $(size(B,1)), but needs $n")) - end - if n != length(ipiv) - throw(DimensionMismatch(lazy"ipiv has length $(length(ipiv)), but needs to be $n")) - end - nrhs = size(B, 2) - info = Ref{BlasInt}() - ccall((@blasfunc($getrs), libblastrampoline), Cvoid, - (Ref{UInt8}, Ref{BlasInt}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, - Ptr{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ref{BlasInt}, Clong), - trans, n, size(B,2), A, max(1,stride(A,2)), ipiv, B, max(1,stride(B,2)), info, 1) - chklapackerror(info[]) - B - end - - # SUBROUTINE DGETRI( N, A, LDA, IPIV, WORK, LWORK, INFO ) - #* .. Scalar Arguments .. - # INTEGER INFO, LDA, LWORK, N - #* .. Array Arguments .. - # INTEGER IPIV( * ) - # DOUBLE PRECISION A( LDA, * ), WORK( * ) - function getri!(A::AbstractMatrix{$elty}, ipiv::AbstractVector{BlasInt}) - require_one_based_indexing(A, ipiv) - chkstride1(A, ipiv) - n = checksquare(A) - if n != length(ipiv) - throw(DimensionMismatch(lazy"ipiv has length $(length(ipiv)), but needs $n")) - end - lda = max(1,stride(A, 2)) - lwork = BlasInt(-1) - work = Vector{$elty}(undef, 1) - info = Ref{BlasInt}() - for i = 1:2 # first call returns lwork as work[1] - ccall((@blasfunc($getri), libblastrampoline), Cvoid, - (Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ptr{BlasInt}, - Ptr{$elty}, Ref{BlasInt}, Ptr{BlasInt}), - n, A, lda, ipiv, work, lwork, info) - chklapackerror(info[]) - if i == 1 - lwork = BlasInt(real(work[1])) - resize!(work, lwork) - end - end - A - end - end -end - -""" - gels!(trans, A, B) -> (F, B, ssr) - -Solves the linear equation `A * X = B`, `transpose(A) * X = B`, or `adjoint(A) * X = B` using -a QR or LQ factorization. Modifies the matrix/vector `B` in place with the -solution. `A` is overwritten with its `QR` or `LQ` factorization. `trans` -may be one of `N` (no modification), `T` (transpose), or `C` (conjugate -transpose). `gels!` searches for the minimum norm/least squares solution. -`A` may be under or over determined. The solution is returned in `B`. -""" -gels!(trans::AbstractChar, A::AbstractMatrix, B::AbstractVecOrMat) - -""" - gesv!(A, B) -> (B, A, ipiv) - -Solves the linear equation `A * X = B` where `A` is a square matrix using -the `LU` factorization of `A`. `A` is overwritten with its `LU` -factorization and `B` is overwritten with the solution `X`. `ipiv` contains the -pivoting information for the `LU` factorization of `A`. -""" -gesv!(A::AbstractMatrix, B::AbstractVecOrMat) - -""" - getrs!(trans, A, ipiv, B) - -Solves the linear equation `A * X = B`, `transpose(A) * X = B`, or `adjoint(A) * X = B` for -square `A`. Modifies the matrix/vector `B` in place with the solution. `A` -is the `LU` factorization from `getrf!`, with `ipiv` the pivoting -information. `trans` may be one of `N` (no modification), `T` (transpose), -or `C` (conjugate transpose). -""" -getrs!(trans::AbstractChar, A::AbstractMatrix, ipiv::AbstractVector{BlasInt}, B::AbstractVecOrMat) - -""" - getri!(A, ipiv) - -Computes the inverse of `A`, using its `LU` factorization found by -`getrf!`. `ipiv` is the pivot information output and `A` -contains the `LU` factorization of `getrf!`. `A` is overwritten with -its inverse. -""" -getri!(A::AbstractMatrix, ipiv::AbstractVector{BlasInt}) - -for (gesvx, elty) in - ((:dgesvx_,:Float64), - (:sgesvx_,:Float32)) - @eval begin - # SUBROUTINE DGESVX( FACT, TRANS, N, NRHS, A, LDA, AF, LDAF, IPIV, - # EQUED, R, C, B, LDB, X, LDX, RCOND, FERR, BERR, - # WORK, IWORK, INFO ) - # - # .. Scalar Arguments .. - # CHARACTER EQUED, FACT, TRANS - # INTEGER INFO, LDA, LDAF, LDB, LDX, N, NRHS - # DOUBLE PRECISION RCOND - # .. - # .. Array Arguments .. - # INTEGER IPIV( * ), IWORK( * ) - # DOUBLE PRECISION A( LDA, * ), AF( LDAF, * ), B( LDB, * ), - # $ BERR( * ), C( * ), FERR( * ), R( * ), - # $ WORK( * ), X( LDX, * - # - function gesvx!(fact::AbstractChar, trans::AbstractChar, A::AbstractMatrix{$elty}, - AF::AbstractMatrix{$elty}, ipiv::AbstractVector{BlasInt}, equed::AbstractChar, - R::AbstractVector{$elty}, C::AbstractVector{$elty}, B::AbstractVecOrMat{$elty}) - require_one_based_indexing(A, AF, ipiv, R, C, B) - @chkvalidparam 1 fact ('F', 'N', 'E') - chktrans(trans) - chkstride1(ipiv, R, C, B) - n = checksquare(A) - lda = stride(A,2) - n = checksquare(AF) - ldaf = stride(AF,2) - nrhs = size(B,2) - ldb = stride(B,2) - rcond = Ref{$elty}() - ferr = similar(A, $elty, nrhs) - berr = similar(A, $elty, nrhs) - work = Vector{$elty}(undef, 4n) - iwork = Vector{BlasInt}(undef, n) - info = Ref{BlasInt}() - X = similar(A, $elty, n, nrhs) - ccall((@blasfunc($gesvx), libblastrampoline), Cvoid, - (Ref{UInt8}, Ref{UInt8}, Ref{BlasInt}, Ref{BlasInt}, - Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ptr{BlasInt}, - Ref{UInt8}, Ptr{$elty}, Ptr{$elty}, Ptr{$elty}, Ref{BlasInt}, - Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, Ptr{$elty}, Ptr{$elty}, - Ptr{$elty}, Ptr{BlasInt}, Ref{BlasInt}, Clong, Clong, Clong), - fact, trans, n, nrhs, A, lda, AF, ldaf, ipiv, equed, R, C, B, - ldb, X, n, rcond, ferr, berr, work, iwork, info, 1, 1, 1) - chklapackerror(info[]) - if info[] == n + 1 - @warn "Matrix is singular to working precision" - else - chknonsingular(info[]) - end - #WORK(1) contains the reciprocal pivot growth factor norm(A)/norm(U) - X, equed, R, C, B, rcond[], ferr, berr, work[1] - end - - function gesvx!(A::AbstractMatrix{$elty}, B::AbstractVecOrMat{$elty}) - require_one_based_indexing(A, B) - n = size(A,1) - X, equed, R, C, B, rcond, ferr, berr, rpgf = - gesvx!('N', 'N', A, - similar(A, $elty, n, n), - similar(A, BlasInt, n), - 'N', - similar(A, $elty, n), - similar(A, $elty, n), - B) - X, rcond, ferr, berr, rpgf - end - end -end -for (gesvx, elty, relty) in - ((:zgesvx_,:ComplexF64,:Float64), - (:cgesvx_,:ComplexF32 ,:Float32)) - @eval begin - # SUBROUTINE ZGESVX( FACT, TRANS, N, NRHS, A, LDA, AF, LDAF, IPIV, - # EQUED, R, C, B, LDB, X, LDX, RCOND, FERR, BERR, - # WORK, RWORK, INFO ) - # - # .. Scalar Arguments .. - # CHARACTER EQUED, FACT, TRANS - # INTEGER INFO, LDA, LDAF, LDB, LDX, N, NRHS - # DOUBLE PRECISION RCOND - # .. - # .. Array Arguments .. - # INTEGER IPIV( * ) - # DOUBLE PRECISION BERR( * ), C( * ), FERR( * ), R( * ), - # $ RWORK( * ) - # COMPLEX*16 A( LDA, * ), AF( LDAF, * ), B( LDB, * ), - # $ WORK( * ), X( LDX, * ) - function gesvx!(fact::AbstractChar, trans::AbstractChar, A::AbstractMatrix{$elty}, - AF::AbstractMatrix{$elty}, ipiv::AbstractVector{BlasInt}, equed::AbstractChar, - R::AbstractVector{$relty}, C::AbstractVector{$relty}, B::AbstractVecOrMat{$elty}) - require_one_based_indexing(A, AF, ipiv, R, C, B) - @chkvalidparam 1 fact ('F', 'N', 'E') - chktrans(trans) - chkstride1(A, AF, ipiv, R, C, B) - n = checksquare(A) - lda = stride(A,2) - n = checksquare(AF) - ldaf = stride(AF,2) - nrhs = size(B,2) - ldb = stride(B,2) - rcond = Ref{$relty}() - ferr = similar(A, $relty, nrhs) - berr = similar(A, $relty, nrhs) - work = Vector{$elty}(undef, 2n) - rwork = Vector{$relty}(undef, 2n) - info = Ref{BlasInt}() - X = similar(A, $elty, n, nrhs) - ccall((@blasfunc($gesvx), libblastrampoline), Cvoid, - (Ref{UInt8}, Ref{UInt8}, Ref{BlasInt}, Ref{BlasInt}, - Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ptr{BlasInt}, - Ref{UInt8}, Ptr{$relty}, Ptr{$relty}, Ptr{$elty}, Ref{BlasInt}, - Ptr{$elty}, Ref{BlasInt}, Ptr{$relty}, Ptr{$relty}, Ptr{$relty}, - Ptr{$elty}, Ptr{$relty}, Ref{BlasInt}, Clong, Clong, Clong), - fact, trans, n, nrhs, A, lda, AF, ldaf, ipiv, equed, R, C, B, - ldb, X, n, rcond, ferr, berr, work, rwork, info, 1, 1, 1) - chklapackerror(info[]) - if info[] == n + 1 - @warn "Matrix is singular to working precision" - else - chknonsingular(info[]) - end - #RWORK(1) contains the reciprocal pivot growth factor norm(A)/norm(U) - X, equed, R, C, B, rcond[], ferr, berr, rwork[1] - end - - #Wrapper for the no-equilibration, no-transpose calculation - function gesvx!(A::AbstractMatrix{$elty}, B::AbstractVecOrMat{$elty}) - require_one_based_indexing(A, B) - n = size(A,1) - X, equed, R, C, B, rcond, ferr, berr, rpgf = - gesvx!('N', 'N', A, - similar(A, $elty, n, n), - similar(A, BlasInt, n), - 'N', - similar(A, $relty, n), - similar(A, $relty, n), - B) - X, rcond, ferr, berr, rpgf - end - end -end - -""" - gesvx!(fact, trans, A, AF, ipiv, equed, R, C, B) -> (X, equed, R, C, B, rcond, ferr, berr, work) - -Solves the linear equation `A * X = B` (`trans = N`), `transpose(A) * X = B` -(`trans = T`), or `adjoint(A) * X = B` (`trans = C`) using the `LU` factorization -of `A`. `fact` may be `E`, in which case `A` will be equilibrated and copied -to `AF`; `F`, in which case `AF` and `ipiv` from a previous `LU` factorization -are inputs; or `N`, in which case `A` will be copied to `AF` and then -factored. If `fact = F`, `equed` may be `N`, meaning `A` has not been -equilibrated; `R`, meaning `A` was multiplied by `Diagonal(R)` from the left; -`C`, meaning `A` was multiplied by `Diagonal(C)` from the right; or `B`, meaning -`A` was multiplied by `Diagonal(R)` from the left and `Diagonal(C)` from the right. -If `fact = F` and `equed = R` or `B` the elements of `R` must all be positive. -If `fact = F` and `equed = C` or `B` the elements of `C` must all be positive. - -Returns the solution `X`; `equed`, which is an output if `fact` is not `N`, -and describes the equilibration that was performed; `R`, the row equilibration -diagonal; `C`, the column equilibration diagonal; `B`, which may be overwritten -with its equilibrated form `Diagonal(R)*B` (if `trans = N` and `equed = R,B`) or -`Diagonal(C)*B` (if `trans = T,C` and `equed = C,B`); `rcond`, the reciprocal -condition number of `A` after equilbrating; `ferr`, the forward error bound for -each solution vector in `X`; `berr`, the forward error bound for each solution -vector in `X`; and `work`, the reciprocal pivot growth factor. -""" -gesvx!(fact::AbstractChar, trans::AbstractChar, A::AbstractMatrix, AF::AbstractMatrix, - ipiv::AbstractVector{BlasInt}, equed::AbstractChar, R::AbstractVector, C::AbstractVector, B::AbstractVecOrMat) - -""" - gesvx!(A, B) - -The no-equilibration, no-transpose simplification of `gesvx!`. -""" -gesvx!(A::AbstractMatrix, B::AbstractVecOrMat) - -for (gelsd, gelsy, elty) in - ((:dgelsd_,:dgelsy_,:Float64), - (:sgelsd_,:sgelsy_,:Float32)) - @eval begin - # SUBROUTINE DGELSD( M, N, NRHS, A, LDA, B, LDB, S, RCOND, RANK, - # $ WORK, LWORK, IWORK, INFO ) - # * .. Scalar Arguments .. - # INTEGER INFO, LDA, LDB, LWORK, M, N, NRHS, RANK - # DOUBLE PRECISION RCOND - # * .. - # * .. Array Arguments .. - # INTEGER IWORK( * ) - # DOUBLE PRECISION A( LDA, * ), B( LDB, * ), S( * ), WORK( * ) - function gelsd!(A::AbstractMatrix{$elty}, B::AbstractVecOrMat{$elty}, rcond::Real=-one($elty)) - require_one_based_indexing(A, B) - chkstride1(A, B) - m, n = size(A) - if size(B, 1) != m - throw(DimensionMismatch(lazy"B has leading dimension $(size(B,1)) but needs $m")) - end - newB = [B; zeros($elty, max(0, n - size(B, 1)), size(B, 2))] - s = similar(A, $elty, min(m, n)) - rnk = Ref{BlasInt}() - info = Ref{BlasInt}() - work = Vector{$elty}(undef, 1) - lwork = BlasInt(-1) - iwork = Vector{BlasInt}(undef, 1) - for i = 1:2 # first call returns lwork as work[1] and iwork length as iwork[1] - ccall((@blasfunc($gelsd), libblastrampoline), Cvoid, - (Ref{BlasInt}, Ref{BlasInt}, Ref{BlasInt}, - Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, - Ptr{$elty}, Ref{$elty}, Ref{BlasInt}, Ptr{$elty}, - Ref{BlasInt}, Ptr{BlasInt}, Ptr{BlasInt}), - m, n, size(B,2), - A, max(1,stride(A,2)), newB, max(1,stride(B,2),n), - s, $elty(rcond), rnk, work, - lwork, iwork, info) - chklapackerror(info[]) - if i == 1 - lwork = BlasInt(real(work[1])) - resize!(work, lwork) - resize!(iwork, iwork[1]) - end - end - subsetrows(B, newB, n), rnk[] - end - - # SUBROUTINE DGELSY( M, N, NRHS, A, LDA, B, LDB, JPVT, RCOND, RANK, - # $ WORK, LWORK, INFO ) - # * .. Scalar Arguments .. - # INTEGER INFO, LDA, LDB, LWORK, M, N, NRHS, RANK - # DOUBLE PRECISION RCOND - # * .. - # * .. Array Arguments .. - # INTEGER JPVT( * ) - # DOUBLE PRECISION A( LDA, * ), B( LDB, * ), WORK( * ) - function gelsy!(A::AbstractMatrix{$elty}, B::AbstractVecOrMat{$elty}, rcond::Real=eps($elty)) - require_one_based_indexing(A, B) - chkstride1(A) - m = size(A, 1) - n = size(A, 2) - nrhs = size(B, 2) - if size(B, 1) != m - throw(DimensionMismatch(lazy"B has leading dimension $(size(B,1)) but needs $m")) - end - newB = [B; zeros($elty, max(0, n - size(B, 1)), size(B, 2))] - lda = max(1, stride(A,2)) - ldb = max(1, stride(newB,2)) - jpvt = zeros(BlasInt, n) - rnk = Ref{BlasInt}() - work = Vector{$elty}(undef, 1) - lwork = BlasInt(-1) - info = Ref{BlasInt}() - for i = 1:2 # first call returns lwork as work[1] - ccall((@blasfunc($gelsy), libblastrampoline), Cvoid, - (Ref{BlasInt}, Ref{BlasInt}, Ref{BlasInt}, Ptr{$elty}, - Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ptr{BlasInt}, - Ref{$elty}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, - Ptr{BlasInt}), - m, n, nrhs, A, - lda, newB, ldb, jpvt, - $elty(rcond), rnk, work, lwork, - info) - chklapackerror(info[]) - if i == 1 - lwork = BlasInt(work[1]) - resize!(work, lwork) - end - end - subsetrows(B, newB, n), rnk[] - end - end -end - -for (gelsd, gelsy, elty, relty) in - ((:zgelsd_,:zgelsy_,:ComplexF64,:Float64), - (:cgelsd_,:cgelsy_,:ComplexF32,:Float32)) - @eval begin - # SUBROUTINE ZGELSD( M, N, NRHS, A, LDA, B, LDB, S, RCOND, RANK, - # $ WORK, LWORK, RWORK, IWORK, INFO ) - # * .. Scalar Arguments .. - # INTEGER INFO, LDA, LDB, LWORK, M, N, NRHS, RANK - # DOUBLE PRECISION RCOND - # * .. - # * .. Array Arguments .. - # INTEGER IWORK( * ) - # DOUBLE PRECISION RWORK( * ), S( * ) - # COMPLEX*16 A( LDA, * ), B( LDB, * ), WORK( * ) - function gelsd!(A::AbstractMatrix{$elty}, B::AbstractVecOrMat{$elty}, rcond::Real=-one($relty)) - require_one_based_indexing(A, B) - chkstride1(A, B) - m, n = size(A) - if size(B, 1) != m - throw(DimensionMismatch(lazy"B has leading dimension $(size(B,1)) but needs $m")) - end - newB = [B; zeros($elty, max(0, n - size(B, 1)), size(B, 2))] - s = similar(A, $relty, min(m, n)) - rnk = Ref{BlasInt}() - info = Ref{BlasInt}() - work = Vector{$elty}(undef, 1) - lwork = BlasInt(-1) - rwork = Vector{$relty}(undef, 1) - iwork = Vector{BlasInt}(undef, 1) - for i = 1:2 # first call returns lwork as work[1], rwork length as rwork[1] and iwork length as iwork[1] - ccall((@blasfunc($gelsd), libblastrampoline), Cvoid, - (Ref{BlasInt}, Ref{BlasInt}, Ref{BlasInt}, Ptr{$elty}, - Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ptr{$relty}, - Ref{$relty}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, - Ptr{$relty}, Ref{BlasInt}, Ref{BlasInt}), - m, n, size(B,2), A, - max(1,stride(A,2)), newB, max(1,stride(B,2),n), s, - $relty(rcond), rnk, work, lwork, - rwork, iwork, info) - chklapackerror(info[]) - if i == 1 - lwork = BlasInt(real(work[1])) - resize!(work, lwork) - resize!(rwork, BlasInt(rwork[1])) - resize!(iwork, iwork[1]) - end - end - subsetrows(B, newB, n), rnk[] - end - - # SUBROUTINE ZGELSY( M, N, NRHS, A, LDA, B, LDB, JPVT, RCOND, RANK, - # $ WORK, LWORK, RWORK, INFO ) - # * .. Scalar Arguments .. - # INTEGER INFO, LDA, LDB, LWORK, M, N, NRHS, RANK - # DOUBLE PRECISION RCOND - # * .. - # * .. Array Arguments .. - # INTEGER JPVT( * ) - # DOUBLE PRECISION RWORK( * ) - # COMPLEX*16 A( LDA, * ), B( LDB, * ), WORK( * ) - function gelsy!(A::AbstractMatrix{$elty}, B::AbstractVecOrMat{$elty}, rcond::Real=eps($relty)) - require_one_based_indexing(A, B) - chkstride1(A, B) - m, n = size(A) - nrhs = size(B, 2) - if size(B, 1) != m - throw(DimensionMismatch(lazy"B has leading dimension $(size(B,1)) but needs $m")) - end - newB = [B; zeros($elty, max(0, n - size(B, 1)), size(B, 2))] - lda = max(1, m) - ldb = max(1, m, n) - jpvt = zeros(BlasInt, n) - rnk = Ref{BlasInt}(1) - work = Vector{$elty}(undef, 1) - lwork = BlasInt(-1) - rwork = Vector{$relty}(undef, 2n) - info = Ref{BlasInt}() - for i = 1:2 # first call returns lwork as work[1] - ccall((@blasfunc($gelsy), libblastrampoline), Cvoid, - (Ref{BlasInt}, Ref{BlasInt}, Ref{BlasInt}, Ptr{$elty}, - Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ptr{BlasInt}, - Ref{$relty}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, - Ptr{$relty}, Ptr{BlasInt}), - m, n, nrhs, A, - lda, newB, ldb, jpvt, - $relty(rcond), rnk, work, lwork, - rwork, info) - chklapackerror(info[]) - if i == 1 - lwork = BlasInt(real(work[1])) - resize!(work, lwork) - end - end - subsetrows(B, newB, n), rnk[] - end - end -end - -""" - gelsd!(A, B, rcond) -> (B, rnk) - -Computes the least norm solution of `A * X = B` by finding the `SVD` -factorization of `A`, then dividing-and-conquering the problem. `B` -is overwritten with the solution `X`. Singular values below `rcond` -will be treated as zero. Returns the solution in `B` and the effective rank -of `A` in `rnk`. -""" -gelsd!(A::AbstractMatrix, B::AbstractVecOrMat, rcond::Real) - -""" - gelsy!(A, B, rcond) -> (B, rnk) - -Computes the least norm solution of `A * X = B` by finding the full `QR` -factorization of `A`, then dividing-and-conquering the problem. `B` -is overwritten with the solution `X`. Singular values below `rcond` -will be treated as zero. Returns the solution in `B` and the effective rank -of `A` in `rnk`. -""" -gelsy!(A::AbstractMatrix, B::AbstractVecOrMat, rcond::Real) - -for (gglse, elty) in ((:dgglse_, :Float64), - (:sgglse_, :Float32), - (:zgglse_, :ComplexF64), - (:cgglse_, :ComplexF32)) - @eval begin - # SUBROUTINE DGGLSE( M, N, P, A, LDA, B, LDB, C, D, X, WORK, LWORK, - # $ INFO ) - # * .. Scalar Arguments .. - # INTEGER INFO, LDA, LDB, LWORK, M, N, P - # * .. - # * .. Array Arguments .. - # DOUBLE PRECISION A( LDA, * ), B( LDB, * ), C( * ), D( * ), - # $ WORK( * ), X( * ) - function gglse!(A::AbstractMatrix{$elty}, c::AbstractVector{$elty}, - B::AbstractMatrix{$elty}, d::AbstractVector{$elty}) - require_one_based_indexing(A, c, B, d) - chkstride1(A, c, B, d) - m, n = size(A) - p = size(B, 1) - if size(B, 2) != n - throw(DimensionMismatch(lazy"B has second dimension $(size(B,2)), needs $n")) - end - if length(c) != m - throw(DimensionMismatch(lazy"c has length $(length(c)), needs $m")) - end - if length(d) != p - throw(DimensionMismatch(lazy"d has length $(length(d)), needs $p")) - end - X = zeros($elty, n) - info = Ref{BlasInt}() - work = Vector{$elty}(undef, 1) - lwork = BlasInt(-1) - for i = 1:2 # first call returns lwork as work[1] - ccall((@blasfunc($gglse), libblastrampoline), Cvoid, - (Ref{BlasInt}, Ref{BlasInt}, Ref{BlasInt}, Ptr{$elty}, - Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, - Ptr{$elty}, Ptr{$elty}, Ptr{$elty}, Ref{BlasInt}, - Ptr{BlasInt}), - m, n, p, A, max(1,stride(A,2)), B, max(1,stride(B,2)), c, d, X, - work, lwork, info) - chklapackerror(info[]) - if i == 1 - lwork = BlasInt(real(work[1])) - resize!(work, lwork) - end - end - X, dot(view(c, n - p + 1:m), view(c, n - p + 1:m)) - end - end -end - -""" - gglse!(A, c, B, d) -> (X,res) - -Solves the equation `A * x = c` where `x` is subject to the equality -constraint `B * x = d`. Uses the formula `||c - A*x||^2 = 0` to solve. -Returns `X` and the residual sum-of-squares. -""" -gglse!(A::AbstractMatrix, c::AbstractVector, B::AbstractMatrix, d::AbstractVector) - -# (GE) general matrices eigenvalue-eigenvector and singular value decompositions -for (geev, gesvd, gesdd, ggsvd, elty, relty) in - ((:dgeev_,:dgesvd_,:dgesdd_,:dggsvd_,:Float64,:Float64), - (:sgeev_,:sgesvd_,:sgesdd_,:sggsvd_,:Float32,:Float32), - (:zgeev_,:zgesvd_,:zgesdd_,:zggsvd_,:ComplexF64,:Float64), - (:cgeev_,:cgesvd_,:cgesdd_,:cggsvd_,:ComplexF32,:Float32)) - @eval begin - # SUBROUTINE DGEEV( JOBVL, JOBVR, N, A, LDA, WR, WI, VL, LDVL, VR, - # $ LDVR, WORK, LWORK, INFO ) - # * .. Scalar Arguments .. - # CHARACTER JOBVL, JOBVR - # INTEGER INFO, LDA, LDVL, LDVR, LWORK, N - # * .. Array Arguments .. - # DOUBLE PRECISION A( LDA, * ), VL( LDVL, * ), VR( LDVR, * ), - # $ WI( * ), WORK( * ), WR( * ) - function geev!(jobvl::AbstractChar, jobvr::AbstractChar, A::AbstractMatrix{$elty}) - require_one_based_indexing(A) - chkstride1(A) - n = checksquare(A) - @chkvalidparam 1 jobvl ('N', 'V') - @chkvalidparam 2 jobvr ('N', 'V') - chkfinite(A) # balancing routines don't support NaNs and Infs - lvecs = jobvl == 'V' - rvecs = jobvr == 'V' - VL = similar(A, $elty, (n, lvecs ? n : 0)) - VR = similar(A, $elty, (n, rvecs ? n : 0)) - cmplx = eltype(A) <: Complex - if cmplx - W = similar(A, $elty, n) - rwork = similar(A, $relty, 2n) - else - WR = similar(A, $elty, n) - WI = similar(A, $elty, n) - end - work = Vector{$elty}(undef, 1) - lwork = BlasInt(-1) - info = Ref{BlasInt}() - for i = 1:2 # first call returns lwork as work[1] - if cmplx - ccall((@blasfunc($geev), libblastrampoline), Cvoid, - (Ref{UInt8}, Ref{UInt8}, Ref{BlasInt}, Ptr{$elty}, - Ref{BlasInt}, Ptr{$elty}, Ptr{$elty}, Ref{BlasInt}, - Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, - Ptr{$relty}, Ref{BlasInt}, Clong, Clong), - jobvl, jobvr, n, A, max(1,stride(A,2)), W, VL, n, VR, n, - work, lwork, rwork, info, 1, 1) - else - ccall((@blasfunc($geev), libblastrampoline), Cvoid, - (Ref{UInt8}, Ref{UInt8}, Ref{BlasInt}, Ptr{$elty}, - Ref{BlasInt}, Ptr{$elty}, Ptr{$elty}, Ptr{$elty}, - Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, - Ref{BlasInt}, Ref{BlasInt}, Clong, Clong), - jobvl, jobvr, n, A, max(1,stride(A,2)), WR, WI, VL, n, - VR, n, work, lwork, info, 1, 1) - end - chklapackerror(info[]) - if i == 1 - lwork = BlasInt(real(work[1])) - resize!(work, lwork) - end - end - cmplx ? (W, VL, VR) : (WR, WI, VL, VR) - end - - # SUBROUTINE DGESDD( JOBZ, M, N, A, LDA, S, U, LDU, VT, LDVT, WORK, - # LWORK, IWORK, INFO ) - #* .. Scalar Arguments .. - # CHARACTER JOBZ - # INTEGER INFO, LDA, LDU, LDVT, LWORK, M, N - #* .. - #* .. Array Arguments .. - # INTEGER IWORK( * ) - # DOUBLE PRECISION A( LDA, * ), S( * ), U( LDU, * ), - # VT( LDVT, * ), WORK( * ) - function gesdd!(job::AbstractChar, A::AbstractMatrix{$elty}) - require_one_based_indexing(A) - chkstride1(A) - @chkvalidparam 1 job ('A', 'S', 'O', 'N') - m, n = size(A) - minmn = min(m, n) - if job == 'A' - U = similar(A, $elty, (m, m)) - VT = similar(A, $elty, (n, n)) - elseif job == 'S' - U = similar(A, $elty, (m, minmn)) - VT = similar(A, $elty, (minmn, n)) - elseif job == 'O' - U = similar(A, $elty, (m, m >= n ? 0 : m)) - VT = similar(A, $elty, (n, m >= n ? n : 0)) - else - U = similar(A, $elty, (m, 0)) - VT = similar(A, $elty, (n, 0)) - end - work = Vector{$elty}(undef, 1) - lwork = BlasInt(-1) - S = similar(A, $relty, minmn) - cmplx = eltype(A)<:Complex - if cmplx - rwork = Vector{$relty}(undef, job == 'N' ? 7*minmn : minmn*max(5*minmn+7, 2*max(m,n)+2*minmn+1)) - end - iwork = Vector{BlasInt}(undef, 8*minmn) - info = Ref{BlasInt}() - for i = 1:2 # first call returns lwork as work[1] - if cmplx - ccall((@blasfunc($gesdd), libblastrampoline), Cvoid, - (Ref{UInt8}, Ref{BlasInt}, Ref{BlasInt}, Ptr{$elty}, - Ref{BlasInt}, Ptr{$relty}, Ptr{$elty}, Ref{BlasInt}, - Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, - Ptr{$relty}, Ptr{BlasInt}, Ref{BlasInt}, Clong), - job, m, n, A, max(1,stride(A,2)), S, U, max(1,stride(U,2)), VT, max(1,stride(VT,2)), - work, lwork, rwork, iwork, info, 1) - else - ccall((@blasfunc($gesdd), libblastrampoline), Cvoid, - (Ref{UInt8}, Ref{BlasInt}, Ref{BlasInt}, Ptr{$elty}, - Ref{BlasInt}, Ptr{$elty}, Ptr{$elty}, Ref{BlasInt}, - Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, - Ptr{BlasInt}, Ref{BlasInt}, Clong), - job, m, n, A, max(1,stride(A,2)), S, U, max(1,stride(U,2)), VT, max(1,stride(VT,2)), - work, lwork, iwork, info, 1) - end - chklapackerror(info[]) - if i == 1 - # Work around issue with truncated Float32 representation of lwork in - # sgesdd by using nextfloat. See - # http://icl.cs.utk.edu/lapack-forum/viewtopic.php?f=13&t=4587&p=11036&hilit=sgesdd#p11036 - # and - # https://github.com/scipy/scipy/issues/5401 - lwork = round(BlasInt, nextfloat(real(work[1]))) - resize!(work, lwork) - end - end - if job == 'O' - if m >= n - return (A, S, VT) - else - # ()__ - # ||::Z__ - # ||::|:::Z____ - # ||::|:::|====| - # ||==|===|====| - # ||""|===|====| - # || `"""|====| - # || `""""` - return (U, S, A) - end - end - return (U, S, VT) - end - - # SUBROUTINE DGESVD( JOBU, JOBVT, M, N, A, LDA, S, U, LDU, VT, LDVT, WORK, LWORK, INFO ) - # * .. Scalar Arguments .. - # CHARACTER JOBU, JOBVT - # INTEGER INFO, LDA, LDU, LDVT, LWORK, M, N - # * .. Array Arguments .. - # DOUBLE PRECISION A( LDA, * ), S( * ), U( LDU, * ), - # $ VT( LDVT, * ), WORK( * ) - function gesvd!(jobu::AbstractChar, jobvt::AbstractChar, A::AbstractMatrix{$elty}) - require_one_based_indexing(A) - chkstride1(A) - @chkvalidparam 1 jobu ('A', 'S', 'O', 'N') - @chkvalidparam 2 jobvt ('A', 'S', 'O', 'N') - (jobu == jobvt == 'O') && throw(ArgumentError("jobu and jobvt cannot both be O")) - m, n = size(A) - minmn = min(m, n) - S = similar(A, $relty, minmn) - U = similar(A, $elty, jobu == 'A' ? (m, m) : (jobu == 'S' ? (m, minmn) : (m, 0))) - VT = similar(A, $elty, jobvt == 'A' ? (n, n) : (jobvt == 'S' ? (minmn, n) : (n, 0))) - work = Vector{$elty}(undef, 1) - cmplx = eltype(A) <: Complex - if cmplx - rwork = Vector{$relty}(undef, 5minmn) - end - lwork = BlasInt(-1) - info = Ref{BlasInt}() - for i in 1:2 # first call returns lwork as work[1] - if cmplx - ccall((@blasfunc($gesvd), libblastrampoline), Cvoid, - (Ref{UInt8}, Ref{UInt8}, Ref{BlasInt}, Ref{BlasInt}, - Ptr{$elty}, Ref{BlasInt}, Ptr{$relty}, Ptr{$elty}, - Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, - Ref{BlasInt}, Ptr{$relty}, Ref{BlasInt}, Clong, Clong), - jobu, jobvt, m, n, A, max(1,stride(A,2)), S, U, max(1,stride(U,2)), VT, max(1,stride(VT,2)), - work, lwork, rwork, info, 1, 1) - else - ccall((@blasfunc($gesvd), libblastrampoline), Cvoid, - (Ref{UInt8}, Ref{UInt8}, Ref{BlasInt}, Ref{BlasInt}, - Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, Ptr{$elty}, - Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, - Ref{BlasInt}, Ref{BlasInt}, Clong, Clong), - jobu, jobvt, m, n, A, max(1,stride(A,2)), S, U, max(1,stride(U,2)), VT, max(1,stride(VT,2)), - work, lwork, info, 1, 1) - end - chklapackerror(info[]) - if i == 1 - lwork = BlasInt(real(work[1])) - resize!(work, lwork) - end - end - if jobu == 'O' - return (A, S, VT) - elseif jobvt == 'O' - # =============|===========|() - # # # #:::::: - # # # #:::::: - # # # #:::::: - # # # #:::::: - # # # # # # # - # # # # # # # - # # # # # # # - return (U, S, A) # # # # # # # - else # # # # # # # - return (U, S, VT) # # # # # # # - - end - end - - # SUBROUTINE ZGGSVD( JOBU, JOBV, JOBQ, M, N, P, K, L, A, LDA, B, - # $ LDB, ALPHA, BETA, U, LDU, V, LDV, Q, LDQ, WORK, - # $ RWORK, IWORK, INFO ) - # * .. Scalar Arguments .. - # CHARACTER JOBQ, JOBU, JOBV - # INTEGER INFO, K, L, LDA, LDB, LDQ, LDU, LDV, M, N, P - # * .. - # * .. Array Arguments .. - # INTEGER IWORK( * ) - # DOUBLE PRECISION ALPHA( * ), BETA( * ), RWORK( * ) - # COMPLEX*16 A( LDA, * ), B( LDB, * ), Q( LDQ, * ), - # $ U( LDU, * ), V( LDV, * ), WORK( * ) - function ggsvd!(jobu::AbstractChar, jobv::AbstractChar, jobq::AbstractChar, A::AbstractMatrix{$elty}, B::AbstractMatrix{$elty}) - require_one_based_indexing(A, B) - chkstride1(A, B) - @chkvalidparam 1 jobu ('U', 'N') - @chkvalidparam 2 jobv ('V', 'N') - @chkvalidparam 3 jobq ('Q', 'N') - m, n = size(A) - if size(B, 2) != n - throw(DimensionMismatch(lazy"B has second dimension $(size(B,2)) but needs $n")) - end - p = size(B, 1) - k = Vector{BlasInt}(undef, 1) - l = Vector{BlasInt}(undef, 1) - lda = max(1,stride(A, 2)) - ldb = max(1,stride(B, 2)) - alpha = similar(A, $relty, n) - beta = similar(A, $relty, n) - ldu = max(1, m) - U = jobu == 'U' ? similar(A, $elty, ldu, m) : similar(A, $elty, 0) - ldv = max(1, p) - V = jobv == 'V' ? similar(A, $elty, ldv, p) : similar(A, $elty, 0) - ldq = max(1, n) - Q = jobq == 'Q' ? similar(A, $elty, ldq, n) : similar(A, $elty, 0) - work = Vector{$elty}(undef, max(3n, m, p) + n) - cmplx = eltype(A) <: Complex - if cmplx - rwork = Vector{$relty}(undef, 2n) - end - iwork = Vector{BlasInt}(undef, n) - info = Ref{BlasInt}() - if cmplx - ccall((@blasfunc($ggsvd), libblastrampoline), Cvoid, - (Ref{UInt8}, Ref{UInt8}, Ref{UInt8}, Ref{BlasInt}, - Ref{BlasInt}, Ref{BlasInt}, Ptr{BlasInt}, Ptr{BlasInt}, - Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, - Ptr{$relty}, Ptr{$relty}, Ptr{$elty}, Ref{BlasInt}, - Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, - Ptr{$elty}, Ptr{$relty}, Ptr{BlasInt}, Ptr{BlasInt}, - Clong, Clong, Clong), - jobu, jobv, jobq, m, - n, p, k, l, - A, lda, B, ldb, - alpha, beta, U, ldu, - V, ldv, Q, ldq, - work, rwork, iwork, info, - 1, 1, 1) - else - ccall((@blasfunc($ggsvd), libblastrampoline), Cvoid, - (Ref{UInt8}, Ref{UInt8}, Ref{UInt8}, Ref{BlasInt}, - Ref{BlasInt}, Ref{BlasInt}, Ptr{BlasInt}, Ptr{BlasInt}, - Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, - Ptr{$relty}, Ptr{$relty}, Ptr{$elty}, Ref{BlasInt}, - Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, - Ptr{$elty}, Ptr{BlasInt}, Ptr{BlasInt}, - Clong, Clong, Clong), - jobu, jobv, jobq, m, - n, p, k, l, - A, lda, B, ldb, - alpha, beta, U, ldu, - V, ldv, Q, ldq, - work, iwork, info, - 1, 1, 1) - end - chklapackerror(info[]) - if m - k[1] - l[1] >= 0 - R = triu(A[1:k[1] + l[1],n - k[1] - l[1] + 1:n]) - else - R = triu([A[1:m, n - k[1] - l[1] + 1:n]; B[m - k[1] + 1:l[1], n - k[1] - l[1] + 1:n]]) - end - U, V, Q, alpha, beta, k[1], l[1], R - end - end -end - -""" - geev!(jobvl, jobvr, A) -> (W, VL, VR) - -Finds the eigensystem of `A`. If `jobvl = N`, the left eigenvectors of -`A` aren't computed. If `jobvr = N`, the right eigenvectors of `A` -aren't computed. If `jobvl = V` or `jobvr = V`, the corresponding -eigenvectors are computed. Returns the eigenvalues in `W`, the right -eigenvectors in `VR`, and the left eigenvectors in `VL`. -""" -geev!(jobvl::AbstractChar, jobvr::AbstractChar, A::AbstractMatrix) - -""" - gesdd!(job, A) -> (U, S, VT) - -Finds the singular value decomposition of `A`, `A = U * S * V'`, -using a divide and conquer approach. If `job = A`, all the columns of `U` and -the rows of `V'` are computed. If `job = N`, no columns of `U` or rows of `V'` -are computed. If `job = O`, `A` is overwritten with the columns of (thin) `U` -and the rows of (thin) `V'`. If `job = S`, the columns of (thin) `U` and the -rows of (thin) `V'` are computed and returned separately. -""" -gesdd!(job::AbstractChar, A::AbstractMatrix) - -""" - gesvd!(jobu, jobvt, A) -> (U, S, VT) - -Finds the singular value decomposition of `A`, `A = U * S * V'`. -If `jobu = A`, all the columns of `U` are computed. If `jobvt = A` all the rows -of `V'` are computed. If `jobu = N`, no columns of `U` are computed. If -`jobvt = N` no rows of `V'` are computed. If `jobu = O`, `A` is overwritten with -the columns of (thin) `U`. If `jobvt = O`, `A` is overwritten with the rows -of (thin) `V'`. If `jobu = S`, the columns of (thin) `U` are computed -and returned separately. If `jobvt = S` the rows of (thin) `V'` are -computed and returned separately. `jobu` and `jobvt` can't both be `O`. - -Returns `U`, `S`, and `Vt`, where `S` are the singular values of `A`. -""" -gesvd!(jobu::AbstractChar, jobvt::AbstractChar, A::AbstractMatrix) - -""" - ggsvd!(jobu, jobv, jobq, A, B) -> (U, V, Q, alpha, beta, k, l, R) - -Finds the generalized singular value decomposition of `A` and `B`, `U'*A*Q = D1*R` -and `V'*B*Q = D2*R`. `D1` has `alpha` on its diagonal and `D2` has `beta` on its -diagonal. If `jobu = U`, the orthogonal/unitary matrix `U` is computed. If -`jobv = V` the orthogonal/unitary matrix `V` is computed. If `jobq = Q`, -the orthogonal/unitary matrix `Q` is computed. If `jobu`, `jobv` or `jobq` is -`N`, that matrix is not computed. This function is only available in LAPACK -versions prior to 3.6.0. -""" -ggsvd!(jobu::AbstractChar, jobv::AbstractChar, jobq::AbstractChar, A::AbstractMatrix, B::AbstractMatrix) - - -for (f, elty) in ((:dggsvd3_, :Float64), - (:sggsvd3_, :Float32)) - @eval begin - function ggsvd3!(jobu::AbstractChar, jobv::AbstractChar, jobq::AbstractChar, A::AbstractMatrix{$elty}, B::AbstractMatrix{$elty}) - require_one_based_indexing(A, B) - chkstride1(A, B) - @chkvalidparam 1 jobu ('U', 'N') - @chkvalidparam 2 jobv ('V', 'N') - @chkvalidparam 3 jobq ('Q', 'N') - m, n = size(A) - if size(B, 2) != n - throw(DimensionMismatch(lazy"B has second dimension $(size(B,2)) but needs $n")) - end - p = size(B, 1) - k = Ref{BlasInt}() - l = Ref{BlasInt}() - lda = max(1, stride(A, 2)) - ldb = max(1, stride(B, 2)) - alpha = similar(A, $elty, n) - beta = similar(A, $elty, n) - ldu = max(1, m) - U = jobu == 'U' ? similar(A, $elty, ldu, m) : similar(A, $elty, 0) - ldv = max(1, p) - V = jobv == 'V' ? similar(A, $elty, ldv, p) : similar(A, $elty, 0) - ldq = max(1, n) - Q = jobq == 'Q' ? similar(A, $elty, ldq, n) : similar(A, $elty, 0) - work = Vector{$elty}(undef, 1) - lwork = BlasInt(-1) - iwork = Vector{BlasInt}(undef, n) - info = Ref{BlasInt}() - for i = 1:2 # first call returns lwork as work[1] - ccall((@blasfunc($f), libblastrampoline), Cvoid, - (Ref{UInt8}, Ref{UInt8}, Ref{UInt8}, Ref{BlasInt}, - Ref{BlasInt}, Ref{BlasInt}, Ref{BlasInt}, Ref{BlasInt}, - Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, - Ptr{$elty}, Ptr{$elty}, Ptr{$elty}, Ref{BlasInt}, - Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, - Ptr{$elty}, Ref{BlasInt}, Ptr{BlasInt}, Ref{BlasInt}, - Clong, Clong, Clong), - jobu, jobv, jobq, m, - n, p, k, l, - A, lda, B, ldb, - alpha, beta, U, ldu, - V, ldv, Q, ldq, - work, lwork, iwork, info, - 1, 1, 1) - chklapackerror(info[]) - if i == 1 - lwork = BlasInt(work[1]) - resize!(work, lwork) - end - end - if m - k[] - l[] >= 0 - R = triu(A[1:k[] + l[],n - k[] - l[] + 1:n]) - else - R = triu([A[1:m, n - k[] - l[] + 1:n]; B[m - k[] + 1:l[], n - k[] - l[] + 1:n]]) - end - return U, V, Q, alpha, beta, k[], l[], R - end - end -end - -for (f, elty, relty) in ((:zggsvd3_, :ComplexF64, :Float64), - (:cggsvd3_, :ComplexF32, :Float32)) - @eval begin - function ggsvd3!(jobu::AbstractChar, jobv::AbstractChar, jobq::AbstractChar, A::AbstractMatrix{$elty}, B::AbstractMatrix{$elty}) - require_one_based_indexing(A, B) - chkstride1(A, B) - @chkvalidparam 1 jobu ('U', 'N') - @chkvalidparam 2 jobv ('V', 'N') - @chkvalidparam 3 jobq ('Q', 'N') - m, n = size(A) - if size(B, 2) != n - throw(DimensionMismatch(lazy"B has second dimension $(size(B,2)) but needs $n")) - end - p = size(B, 1) - k = Vector{BlasInt}(undef, 1) - l = Vector{BlasInt}(undef, 1) - lda = max(1,stride(A, 2)) - ldb = max(1,stride(B, 2)) - alpha = similar(A, $relty, n) - beta = similar(A, $relty, n) - ldu = max(1, m) - U = jobu == 'U' ? similar(A, $elty, ldu, m) : similar(A, $elty, 0) - ldv = max(1, p) - V = jobv == 'V' ? similar(A, $elty, ldv, p) : similar(A, $elty, 0) - ldq = max(1, n) - Q = jobq == 'Q' ? similar(A, $elty, ldq, n) : similar(A, $elty, 0) - work = Vector{$elty}(undef, 1) - lwork = BlasInt(-1) - rwork = Vector{$relty}(undef, 2n) - iwork = Vector{BlasInt}(undef, n) - info = Ref{BlasInt}() - for i = 1:2 # first call returns lwork as work[1] - ccall((@blasfunc($f), libblastrampoline), Cvoid, - (Ref{UInt8}, Ref{UInt8}, Ref{UInt8}, Ref{BlasInt}, - Ref{BlasInt}, Ref{BlasInt}, Ptr{BlasInt}, Ptr{BlasInt}, - Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, - Ptr{$relty}, Ptr{$relty}, Ptr{$elty}, Ref{BlasInt}, - Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, - Ptr{$elty}, Ref{BlasInt}, Ptr{$relty}, Ptr{BlasInt}, - Ref{BlasInt}, Clong, Clong, Clong), - jobu, jobv, jobq, m, - n, p, k, l, - A, lda, B, ldb, - alpha, beta, U, ldu, - V, ldv, Q, ldq, - work, lwork, rwork, iwork, - info, 1, 1, 1) - chklapackerror(info[]) - if i == 1 - lwork = BlasInt(work[1]) - resize!(work, lwork) - end - end - if m - k[1] - l[1] >= 0 - R = triu(A[1:k[1] + l[1],n - k[1] - l[1] + 1:n]) - else - R = triu([A[1:m, n - k[1] - l[1] + 1:n]; B[m - k[1] + 1:l[1], n - k[1] - l[1] + 1:n]]) - end - return U, V, Q, alpha, beta, k[1], l[1], R - end - end -end - -""" - ggsvd3!(jobu, jobv, jobq, A, B) -> (U, V, Q, alpha, beta, k, l, R) - -Finds the generalized singular value decomposition of `A` and `B`, `U'*A*Q = D1*R` -and `V'*B*Q = D2*R`. `D1` has `alpha` on its diagonal and `D2` has `beta` on its -diagonal. If `jobu = U`, the orthogonal/unitary matrix `U` is computed. If -`jobv = V` the orthogonal/unitary matrix `V` is computed. If `jobq = Q`, -the orthogonal/unitary matrix `Q` is computed. If `jobu`, `jobv`, or `jobq` is -`N`, that matrix is not computed. This function requires LAPACK 3.6.0. -""" -ggsvd3! - -## Expert driver and generalized eigenvalue problem -for (geevx, ggev, ggev3, elty) in - ((:dgeevx_,:dggev_,:dggev3_,:Float64), - (:sgeevx_,:sggev_,:sggev3_,:Float32)) - @eval begin - # SUBROUTINE DGEEVX( BALANC, JOBVL, JOBVR, SENSE, N, A, LDA, WR, WI, - # VL, LDVL, VR, LDVR, ILO, IHI, SCALE, ABNRM, - # RCONDE, RCONDV, WORK, LWORK, IWORK, INFO ) - # - # .. Scalar Arguments .. - # CHARACTER BALANC, JOBVL, JOBVR, SENSE - # INTEGER IHI, ILO, INFO, LDA, LDVL, LDVR, LWORK, N - # DOUBLE PRECISION ABNRM - # .. - # .. Array Arguments .. - # INTEGER IWORK( * ) - # DOUBLE PRECISION A( LDA, * ), RCONDE( * ), RCONDV( * ), - # $ SCALE( * ), VL( LDVL, * ), VR( LDVR, * ), - # $ WI( * ), WORK( * ), WR( * ) - function geevx!(balanc::AbstractChar, jobvl::AbstractChar, jobvr::AbstractChar, sense::AbstractChar, A::AbstractMatrix{$elty}) - require_one_based_indexing(A) - @chkvalidparam 1 balanc ('N', 'P', 'S', 'B') - @chkvalidparam 4 sense ('N', 'E', 'V', 'B') - if sense ∈ ('E', 'B') && !(jobvl == jobvr == 'V') - throw(ArgumentError(lazy"sense = '$sense' requires jobvl = 'V' and jobvr = 'V'")) - end - n = checksquare(A) - ldvl = 0 - if jobvl == 'V' - ldvl = n - elseif jobvl == 'N' - ldvl = 0 - else - throw(ArgumentError(lazy"jobvl must be 'V' or 'N', but $jobvl was passed")) - end - ldvr = 0 - if jobvr == 'V' - ldvr = n - elseif jobvr == 'N' - ldvr = 0 - else - throw(ArgumentError(lazy"jobvr must be 'V' or 'N', but $jobvr was passed")) - end - chkfinite(A) # balancing routines don't support NaNs and Infs - lda = max(1,stride(A,2)) - wr = similar(A, $elty, n) - wi = similar(A, $elty, n) - VL = similar(A, $elty, ldvl, n) - VR = similar(A, $elty, ldvr, n) - ilo = Ref{BlasInt}() - ihi = Ref{BlasInt}() - scale = similar(A, $elty, n) - abnrm = Ref{$elty}() - rconde = similar(A, $elty, n) - rcondv = similar(A, $elty, n) - work = Vector{$elty}(undef, 1) - lwork = BlasInt(-1) - iworksize = 0 - if sense == 'N' || sense == 'E' - iworksize = 0 - elseif sense == 'V' || sense == 'B' - iworksize = 2*n - 2 - else - throw(ArgumentError(lazy"sense must be 'N', 'E', 'V' or 'B', but $sense was passed")) - end - iwork = Vector{BlasInt}(undef, iworksize) - info = Ref{BlasInt}() - for i = 1:2 # first call returns lwork as work[1] - ccall((@blasfunc($geevx), libblastrampoline), Cvoid, - (Ref{UInt8}, Ref{UInt8}, Ref{UInt8}, Ref{UInt8}, - Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, - Ptr{$elty}, Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, - Ref{BlasInt}, Ptr{BlasInt}, Ptr{BlasInt}, Ptr{$elty}, - Ptr{$elty}, Ptr{$elty}, Ptr{$elty}, Ptr{$elty}, - Ref{BlasInt}, Ptr{BlasInt}, Ref{BlasInt}, - Clong, Clong, Clong, Clong), - balanc, jobvl, jobvr, sense, - n, A, lda, wr, - wi, VL, max(1,ldvl), VR, - max(1,ldvr), ilo, ihi, scale, - abnrm, rconde, rcondv, work, - lwork, iwork, info, - 1, 1, 1, 1) - chklapackerror(info[]) - if i == 1 - lwork = BlasInt(work[1]) - resize!(work, lwork) - end - end - A, wr, wi, VL, VR, ilo[], ihi[], scale, abnrm[], rconde, rcondv - end - - # SUBROUTINE DGGEV( JOBVL, JOBVR, N, A, LDA, B, LDB, ALPHAR, ALPHAI, - # $ BETA, VL, LDVL, VR, LDVR, WORK, LWORK, INFO ) - # * .. Scalar Arguments .. - # CHARACTER JOBVL, JOBVR - # INTEGER INFO, LDA, LDB, LDVL, LDVR, LWORK, N - # * .. - # * .. Array Arguments .. - # DOUBLE PRECISION A( LDA, * ), ALPHAI( * ), ALPHAR( * ), - # $ B( LDB, * ), BETA( * ), VL( LDVL, * ), - # $ VR( LDVR, * ), WORK( * ) - function ggev!(jobvl::AbstractChar, jobvr::AbstractChar, A::AbstractMatrix{$elty}, B::AbstractMatrix{$elty}) - require_one_based_indexing(A, B) - chkstride1(A,B) - n, m = checksquare(A,B) - if n != m - throw(DimensionMismatch(lazy"A has dimensions $(size(A)), and B has dimensions $(size(B)), but A and B must have the same size")) - end - ldvl = 0 - if jobvl == 'V' - ldvl = n - elseif jobvl == 'N' - ldvl = 1 - else - throw(ArgumentError(lazy"jobvl must be 'V' or 'N', but $jobvl was passed")) - end - ldvr = 0 - if jobvr == 'V' - ldvr = n - elseif jobvr == 'N' - ldvr = 1 - else - throw(ArgumentError(lazy"jobvr must be 'V' or 'N', but $jobvr was passed")) - end - lda = max(1, stride(A, 2)) - ldb = max(1, stride(B, 2)) - alphar = similar(A, $elty, n) - alphai = similar(A, $elty, n) - beta = similar(A, $elty, n) - vl = similar(A, $elty, ldvl, n) - vr = similar(A, $elty, ldvr, n) - work = Vector{$elty}(undef, 1) - lwork = BlasInt(-1) - info = Ref{BlasInt}() - for i = 1:2 # first call returns lwork as work[1] - ccall((@blasfunc($ggev), libblastrampoline), Cvoid, - (Ref{UInt8}, Ref{UInt8}, Ref{BlasInt}, Ptr{$elty}, - Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, - Ptr{$elty}, Ptr{$elty}, Ptr{$elty}, Ref{BlasInt}, - Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, - Ref{BlasInt}, Clong, Clong), - jobvl, jobvr, n, A, - lda, B, ldb, alphar, - alphai, beta, vl, ldvl, - vr, ldvr, work, lwork, - info, 1, 1) - chklapackerror(info[]) - if i == 1 - lwork = BlasInt(work[1]) - resize!(work, lwork) - end - end - alphar, alphai, beta, vl, vr - end - - # SUBROUTINE DGGEV3( JOBVL, JOBVR, N, A, LDA, B, LDB, ALPHAR, ALPHAI, - # $ BETA, VL, LDVL, VR, LDVR, WORK, LWORK, INFO ) - # * .. Scalar Arguments .. - # CHARACTER JOBVL, JOBVR - # INTEGER INFO, LDA, LDB, LDVL, LDVR, LWORK, N - # * .. - # * .. Array Arguments .. - # DOUBLE PRECISION A( LDA, * ), ALPHAI( * ), ALPHAR( * ), - # $ B( LDB, * ), BETA( * ), VL( LDVL, * ), - # $ VR( LDVR, * ), WORK( * ) - function ggev3!(jobvl::AbstractChar, jobvr::AbstractChar, A::AbstractMatrix{$elty}, B::AbstractMatrix{$elty}) - require_one_based_indexing(A, B) - chkstride1(A,B) - n, m = checksquare(A,B) - if n != m - throw(DimensionMismatch(lazy"A has dimensions $(size(A)), and B has dimensions $(size(B)), but A and B must have the same size")) - end - ldvl = 0 - if jobvl == 'V' - ldvl = n - elseif jobvl == 'N' - ldvl = 1 - else - throw(ArgumentError(lazy"jobvl must be 'V' or 'N', but $jobvl was passed")) - end - ldvr = 0 - if jobvr == 'V' - ldvr = n - elseif jobvr == 'N' - ldvr = 1 - else - throw(ArgumentError(lazy"jobvr must be 'V' or 'N', but $jobvr was passed")) - end - lda = max(1, stride(A, 2)) - ldb = max(1, stride(B, 2)) - alphar = similar(A, $elty, n) - alphai = similar(A, $elty, n) - beta = similar(A, $elty, n) - vl = similar(A, $elty, ldvl, n) - vr = similar(A, $elty, ldvr, n) - work = Vector{$elty}(undef, 1) - lwork = BlasInt(-1) - info = Ref{BlasInt}() - for i = 1:2 # first call returns lwork as work[1] - ccall((@blasfunc($ggev3), libblastrampoline), Cvoid, - (Ref{UInt8}, Ref{UInt8}, Ref{BlasInt}, Ptr{$elty}, - Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, - Ptr{$elty}, Ptr{$elty}, Ptr{$elty}, Ref{BlasInt}, - Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, - Ref{BlasInt}, Clong, Clong), - jobvl, jobvr, n, A, - lda, B, ldb, alphar, - alphai, beta, vl, ldvl, - vr, ldvr, work, lwork, - info, 1, 1) - chklapackerror(info[]) - if i == 1 - lwork = BlasInt(work[1]) - resize!(work, lwork) - end - end - alphar, alphai, beta, vl, vr - end - end -end - -for (geevx, ggev, ggev3, elty, relty) in - ((:zgeevx_,:zggev_,:zggev3_,:ComplexF64,:Float64), - (:cgeevx_,:cggev_,:cggev3_,:ComplexF32,:Float32)) - @eval begin - # SUBROUTINE ZGEEVX( BALANC, JOBVL, JOBVR, SENSE, N, A, LDA, W, VL, - # LDVL, VR, LDVR, ILO, IHI, SCALE, ABNRM, RCONDE, - # RCONDV, WORK, LWORK, RWORK, INFO ) - # - # .. Scalar Arguments .. - # CHARACTER BALANC, JOBVL, JOBVR, SENSE - # INTEGER IHI, ILO, INFO, LDA, LDVL, LDVR, LWORK, N - # DOUBLE PRECISION ABNRM - # .. - # .. Array Arguments .. - # DOUBLE PRECISION RCONDE( * ), RCONDV( * ), RWORK( * ), - # $ SCALE( * ) - # COMPLEX*16 A( LDA, * ), VL( LDVL, * ), VR( LDVR, * ), - # $ W( * ), WORK( * ) - function geevx!(balanc::AbstractChar, jobvl::AbstractChar, jobvr::AbstractChar, sense::AbstractChar, A::AbstractMatrix{$elty}) - require_one_based_indexing(A) - if balanc ∉ ('N', 'P', 'S', 'B') - throw(ArgumentError(lazy"balanc must be 'N', 'P', 'S', or 'B', but $balanc was passed")) - end - if sense ∉ ('N','E','V','B') - throw(ArgumentError(lazy"sense must be 'N', 'E', 'V' or 'B', but $sense was passed")) - end - if sense ∈ ('E', 'B') && !(jobvl == jobvr == 'V') - throw(ArgumentError(lazy"sense = '$sense' requires jobvl = 'V' and jobvr = 'V'")) - end - n = checksquare(A) - ldvl = 0 - if jobvl == 'V' - ldvl = n - elseif jobvl == 'N' - ldvl = 0 - else - throw(ArgumentError(lazy"jobvl must be 'V' or 'N', but $jobvl was passed")) - end - ldvr = 0 - if jobvr == 'V' - ldvr = n - elseif jobvr == 'N' - ldvr = 0 - else - throw(ArgumentError(lazy"jobvr must be 'V' or 'N', but $jobvr was passed")) - end - chkfinite(A) # balancing routines don't support NaNs and Infs - lda = max(1,stride(A,2)) - w = similar(A, $elty, n) - VL = similar(A, $elty, ldvl, n) - VR = similar(A, $elty, ldvr, n) - ilo = Ref{BlasInt}() - ihi = Ref{BlasInt}() - scale = similar(A, $relty, n) - abnrm = Ref{$relty}() - rconde = similar(A, $relty, n) - rcondv = similar(A, $relty, n) - work = Vector{$elty}(undef, 1) - lwork = BlasInt(-1) - rwork = Vector{$relty}(undef, 2n) - info = Ref{BlasInt}() - for i = 1:2 # first call returns lwork as work[1] - ccall((@blasfunc($geevx), libblastrampoline), Cvoid, - (Ref{UInt8}, Ref{UInt8}, Ref{UInt8}, Ref{UInt8}, - Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, - Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, - Ptr{BlasInt}, Ptr{BlasInt}, Ptr{$relty}, Ptr{$relty}, - Ptr{$relty}, Ptr{$relty}, Ptr{$elty}, Ref{BlasInt}, - Ptr{$relty}, Ref{BlasInt}, Clong, Clong, Clong, Clong), - balanc, jobvl, jobvr, sense, - n, A, lda, w, - VL, max(1,ldvl), VR, max(1,ldvr), - ilo, ihi, scale, abnrm, - rconde, rcondv, work, lwork, - rwork, info, 1, 1, 1, 1) - chklapackerror(info[]) - if i == 1 - lwork = BlasInt(work[1]) - resize!(work, lwork) - end - end - A, w, VL, VR, ilo[], ihi[], scale, abnrm[], rconde, rcondv - end - - # SUBROUTINE ZGGEV( JOBVL, JOBVR, N, A, LDA, B, LDB, ALPHA, BETA, - # $ VL, LDVL, VR, LDVR, WORK, LWORK, RWORK, INFO ) - # * .. Scalar Arguments .. - # CHARACTER JOBVL, JOBVR - # INTEGER INFO, LDA, LDB, LDVL, LDVR, LWORK, N - # * .. - # * .. Array Arguments .. - # DOUBLE PRECISION RWORK( * ) - # COMPLEX*16 A( LDA, * ), ALPHA( * ), B( LDB, * ), - # $ BETA( * ), VL( LDVL, * ), VR( LDVR, * ), - # $ WORK( * ) - function ggev!(jobvl::AbstractChar, jobvr::AbstractChar, A::AbstractMatrix{$elty}, B::AbstractMatrix{$elty}) - require_one_based_indexing(A, B) - chkstride1(A, B) - n, m = checksquare(A, B) - if n != m - throw(DimensionMismatch(lazy"A has dimensions $(size(A)), and B has dimensions $(size(B)), but A and B must have the same size")) - end - ldvl = 0 - if jobvl == 'V' - ldvl = n - elseif jobvl == 'N' - ldvl = 1 - else - throw(ArgumentError(lazy"jobvl must be 'V' or 'N', but $jobvl was passed")) - end - ldvr = 0 - if jobvr == 'V' - ldvr = n - elseif jobvr == 'N' - ldvr = 1 - else - throw(ArgumentError(lazy"jobvr must be 'V' or 'N', but $jobvr was passed")) - end - lda = max(1, stride(A, 2)) - ldb = max(1, stride(B, 2)) - alpha = similar(A, $elty, n) - beta = similar(A, $elty, n) - vl = similar(A, $elty, ldvl, n) - vr = similar(A, $elty, ldvr, n) - work = Vector{$elty}(undef, 1) - lwork = BlasInt(-1) - rwork = Vector{$relty}(undef, 8n) - info = Ref{BlasInt}() - for i = 1:2 # first call returns lwork as work[1] - ccall((@blasfunc($ggev), libblastrampoline), Cvoid, - (Ref{UInt8}, Ref{UInt8}, Ref{BlasInt}, Ptr{$elty}, - Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, - Ptr{$elty}, Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, - Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ptr{$relty}, - Ref{BlasInt}, Clong, Clong), - jobvl, jobvr, n, A, - lda, B, ldb, alpha, - beta, vl, ldvl, vr, - ldvr, work, lwork, rwork, - info, 1, 1) - chklapackerror(info[]) - if i == 1 - lwork = BlasInt(work[1]) - resize!(work, lwork) - end - end - alpha, beta, vl, vr - end - - # SUBROUTINE ZGGEV3( JOBVL, JOBVR, N, A, LDA, B, LDB, ALPHA, BETA, - # $ VL, LDVL, VR, LDVR, WORK, LWORK, RWORK, INFO ) - # * .. Scalar Arguments .. - # CHARACTER JOBVL, JOBVR - # INTEGER INFO, LDA, LDB, LDVL, LDVR, LWORK, N - # * .. - # * .. Array Arguments .. - # DOUBLE PRECISION RWORK( * ) - # COMPLEX*16 A( LDA, * ), ALPHA( * ), B( LDB, * ), - # $ BETA( * ), VL( LDVL, * ), VR( LDVR, * ), - # $ WORK( * ) - function ggev3!(jobvl::AbstractChar, jobvr::AbstractChar, A::AbstractMatrix{$elty}, B::AbstractMatrix{$elty}) - require_one_based_indexing(A, B) - chkstride1(A, B) - n, m = checksquare(A, B) - if n != m - throw(DimensionMismatch(lazy"A has dimensions $(size(A)), and B has dimensions $(size(B)), but A and B must have the same size")) - end - ldvl = 0 - if jobvl == 'V' - ldvl = n - elseif jobvl == 'N' - ldvl = 1 - else - throw(ArgumentError(lazy"jobvl must be 'V' or 'N', but $jobvl was passed")) - end - ldvr = 0 - if jobvr == 'V' - ldvr = n - elseif jobvr == 'N' - ldvr = 1 - else - throw(ArgumentError(lazy"jobvr must be 'V' or 'N', but $jobvr was passed")) - end - lda = max(1, stride(A, 2)) - ldb = max(1, stride(B, 2)) - alpha = similar(A, $elty, n) - beta = similar(A, $elty, n) - vl = similar(A, $elty, ldvl, n) - vr = similar(A, $elty, ldvr, n) - work = Vector{$elty}(undef, 1) - lwork = BlasInt(-1) - rwork = Vector{$relty}(undef, 8n) - info = Ref{BlasInt}() - for i = 1:2 # first call returns lwork as work[1] - ccall((@blasfunc($ggev3), libblastrampoline), Cvoid, - (Ref{UInt8}, Ref{UInt8}, Ref{BlasInt}, Ptr{$elty}, - Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, - Ptr{$elty}, Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, - Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ptr{$relty}, - Ref{BlasInt}, Clong, Clong), - jobvl, jobvr, n, A, - lda, B, ldb, alpha, - beta, vl, ldvl, vr, - ldvr, work, lwork, rwork, - info, 1, 1) - chklapackerror(info[]) - if i == 1 - lwork = BlasInt(work[1]) - resize!(work, lwork) - end - end - alpha, beta, vl, vr - end - end -end - -""" - geevx!(balanc, jobvl, jobvr, sense, A) -> (A, w, VL, VR, ilo, ihi, scale, abnrm, rconde, rcondv) - -Finds the eigensystem of `A` with matrix balancing. If `jobvl = N`, the -left eigenvectors of `A` aren't computed. If `jobvr = N`, the right -eigenvectors of `A` aren't computed. If `jobvl = V` or `jobvr = V`, the -corresponding eigenvectors are computed. If `balanc = N`, no balancing is -performed. If `balanc = P`, `A` is permuted but not scaled. If -`balanc = S`, `A` is scaled but not permuted. If `balanc = B`, `A` is -permuted and scaled. If `sense = N`, no reciprocal condition numbers are -computed. If `sense = E`, reciprocal condition numbers are computed for -the eigenvalues only. If `sense = V`, reciprocal condition numbers are -computed for the right eigenvectors only. If `sense = B`, reciprocal -condition numbers are computed for the right eigenvectors and the -eigenvectors. If `sense = E,B`, the right and left eigenvectors must be -computed. -""" -geevx!(balanc::AbstractChar, jobvl::AbstractChar, jobvr::AbstractChar, sense::AbstractChar, A::AbstractMatrix) - -""" - ggev!(jobvl, jobvr, A, B) -> (alpha, beta, vl, vr) - -Finds the generalized eigendecomposition of `A` and `B`. If `jobvl = N`, -the left eigenvectors aren't computed. If `jobvr = N`, the right -eigenvectors aren't computed. If `jobvl = V` or `jobvr = V`, the -corresponding eigenvectors are computed. -""" -ggev!(jobvl::AbstractChar, jobvr::AbstractChar, A::AbstractMatrix, B::AbstractMatrix) - -""" - ggev3!(jobvl, jobvr, A, B) -> (alpha, beta, vl, vr) - -Finds the generalized eigendecomposition of `A` and `B` using a blocked -algorithm. If `jobvl = N`, the left eigenvectors aren't computed. If -`jobvr = N`, the right eigenvectors aren't computed. If `jobvl = V` or -`jobvr = V`, the corresponding eigenvectors are computed. This function -requires LAPACK 3.6.0. -""" -ggev3!(jobvl::AbstractChar, jobvr::AbstractChar, A::AbstractMatrix, B::AbstractMatrix) - -# One step incremental condition estimation of max/min singular values -for (laic1, elty) in - ((:dlaic1_,:Float64), - (:slaic1_,:Float32)) - @eval begin - # SUBROUTINE DLAIC1( JOB, J, X, SEST, W, GAMMA, SESTPR, S, C ) - # - # .. Scalar Arguments .. - # INTEGER J, JOB - # DOUBLE PRECISION C, GAMMA, S, SEST, SESTPR - # .. - # .. Array Arguments .. - # DOUBLE PRECISION W( J ), X( J ) - function laic1!(job::Integer, x::AbstractVector{$elty}, - sest::$elty, w::AbstractVector{$elty}, gamma::$elty) - require_one_based_indexing(x, w) - @chkvalidparam 1 job (1,2) - j = length(x) - if j != length(w) - throw(DimensionMismatch(lazy"vectors must have same length, but length of x is $j and length of w is $(length(w))")) - end - sestpr = Ref{$elty}() - s = Ref{$elty}() - c = Ref{$elty}() - ccall((@blasfunc($laic1), libblastrampoline), Cvoid, - (Ref{BlasInt}, Ref{BlasInt}, Ptr{$elty}, Ref{$elty}, - Ptr{$elty}, Ref{$elty}, Ref{$elty}, Ref{$elty}, - Ref{$elty}), - job, j, x, sest, - w, gamma, sestpr, s, - c) - sestpr[], s[], c[] - end - end -end -for (laic1, elty, relty) in - ((:zlaic1_,:ComplexF64,:Float64), - (:claic1_,:ComplexF32,:Float32)) - @eval begin - # SUBROUTINE ZLAIC1( JOB, J, X, SEST, W, GAMMA, SESTPR, S, C ) - # - # .. Scalar Arguments .. - # INTEGER J, JOB - # DOUBLE PRECISION SEST, SESTPR - # COMPLEX*16 C, GAMMA, S - # .. - # .. Array Arguments .. - # COMPLEX*16 W( J ), X( J ) - function laic1!(job::Integer, x::AbstractVector{$elty}, - sest::$relty, w::AbstractVector{$elty}, gamma::$elty) - require_one_based_indexing(x, w) - @chkvalidparam 1 job (1,2) - j = length(x) - if j != length(w) - throw(DimensionMismatch(lazy"vectors must have same length, but length of x is $j and length of w is $(length(w))")) - end - sestpr = Ref{$relty}() - s = Ref{$elty}() - c = Ref{$elty}() - ccall((@blasfunc($laic1), libblastrampoline), Cvoid, - (Ref{BlasInt}, Ref{BlasInt}, Ptr{$elty}, Ref{$relty}, - Ptr{$elty}, Ref{$elty}, Ref{$relty}, Ref{$elty}, - Ref{$elty}), - job, j, x, sest, - w, gamma, sestpr, s, - c) - sestpr[], s[], c[] - end - end -end - -# (GT) General tridiagonal, decomposition, solver and direct solver -for (gtsv, gttrf, gttrs, elty) in - ((:dgtsv_,:dgttrf_,:dgttrs_,:Float64), - (:sgtsv_,:sgttrf_,:sgttrs_,:Float32), - (:zgtsv_,:zgttrf_,:zgttrs_,:ComplexF64), - (:cgtsv_,:cgttrf_,:cgttrs_,:ComplexF32)) - @eval begin - # SUBROUTINE DGTSV( N, NRHS, DL, D, DU, B, LDB, INFO ) - # .. Scalar Arguments .. - # INTEGER INFO, LDB, N, NRHS - # .. Array Arguments .. - # DOUBLE PRECISION B( LDB, * ), D( * ), DL( * ), DU( * ) - function gtsv!(dl::AbstractVector{$elty}, d::AbstractVector{$elty}, du::AbstractVector{$elty}, - B::AbstractVecOrMat{$elty}) - require_one_based_indexing(dl, d, du, B) - chkstride1(B, dl, d, du) - n = length(d) - if !(n >= length(dl) >= n - 1) - throw(DimensionMismatch(lazy"subdiagonal has length $(length(dl)), but should be $n or $(n - 1)")) - end - if !(n >= length(du) >= n - 1) - throw(DimensionMismatch(lazy"superdiagonal has length $(length(du)), but should be $n or $(n - 1)")) - end - if n != size(B,1) - throw(DimensionMismatch(lazy"B has leading dimension $(size(B,1)), but should have $n")) - end - if n == 0 - return B # Early exit if possible - end - info = Ref{BlasInt}() - ccall((@blasfunc($gtsv), libblastrampoline), Cvoid, - (Ref{BlasInt}, Ref{BlasInt}, Ptr{$elty}, Ptr{$elty}, Ptr{$elty}, - Ptr{$elty}, Ref{BlasInt}, Ptr{BlasInt}), - n, size(B,2), dl, d, du, B, max(1,stride(B,2)), info) - chklapackerror(info[]) - B - end - - # SUBROUTINE DGTTRF( N, DL, D, DU, DU2, IPIV, INFO ) - # .. Scalar Arguments .. - # INTEGER INFO, N - # .. Array Arguments .. - # INTEGER IPIV( * ) - # DOUBLE PRECISION D( * ), DL( * ), DU( * ), DU2( * ) - function gttrf!(dl::AbstractVector{$elty}, d::AbstractVector{$elty}, du::AbstractVector{$elty}) - require_one_based_indexing(dl, d, du) - chkstride1(dl,d,du) - n = length(d) - if length(dl) != n - 1 - throw(DimensionMismatch(lazy"subdiagonal has length $(length(dl)), but should be $(n - 1)")) - end - if length(du) != n - 1 - throw(DimensionMismatch(lazy"superdiagonal has length $(length(du)), but should be $(n - 1)")) - end - du2 = similar(d, $elty, n-2) - ipiv = similar(d, BlasInt, n) - info = Ref{BlasInt}() - ccall((@blasfunc($gttrf), libblastrampoline), Cvoid, - (Ref{BlasInt}, Ptr{$elty}, Ptr{$elty}, Ptr{$elty}, Ptr{$elty}, - Ptr{BlasInt}, Ref{BlasInt}), - n, dl, d, du, du2, ipiv, info) - chklapackerror(info[]) - dl, d, du, du2, ipiv - end - - # SUBROUTINE DGTTRS( TRANS, N, NRHS, DL, D, DU, DU2, IPIV, B, LDB, INFO ) - # .. Scalar Arguments .. - # CHARACTER TRANS - # INTEGER INFO, LDB, N, NRHS - # .. Array Arguments .. - # INTEGER IPIV( * ) - # DOUBLE PRECISION B( LDB, * ), D( * ), DL( * ), DU( * ), DU2( * ) - function gttrs!(trans::AbstractChar, dl::AbstractVector{$elty}, d::AbstractVector{$elty}, - du::AbstractVector{$elty}, du2::AbstractVector{$elty}, ipiv::AbstractVector{BlasInt}, - B::AbstractVecOrMat{$elty}) - require_one_based_indexing(dl, d, du, du2, ipiv, B) - chktrans(trans) - chkstride1(B, ipiv, dl, d, du, du2) - n = length(d) - if length(dl) != n - 1 - throw(DimensionMismatch(lazy"subdiagonal has length $(length(dl)), but should be $(n - 1)")) - end - if length(du) != n - 1 - throw(DimensionMismatch(lazy"superdiagonal has length $(length(du)), but should be $(n - 1)")) - end - if n != size(B,1) - throw(DimensionMismatch(lazy"B has leading dimension $(size(B,1)), but should have $n")) - end - info = Ref{BlasInt}() - ccall((@blasfunc($gttrs), libblastrampoline), Cvoid, - (Ref{UInt8}, Ref{BlasInt}, Ref{BlasInt}, - Ptr{$elty}, Ptr{$elty}, Ptr{$elty}, Ptr{$elty}, - Ptr{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ref{BlasInt}, Clong), - trans, n, size(B,2), dl, d, du, du2, ipiv, B, max(1,stride(B,2)), info, 1) - chklapackerror(info[]) - B - end - end -end - -""" - gtsv!(dl, d, du, B) - -Solves the equation `A * X = B` where `A` is a tridiagonal matrix with -`dl` on the subdiagonal, `d` on the diagonal, and `du` on the -superdiagonal. - -Overwrites `B` with the solution `X` and returns it. -""" -gtsv!(dl::AbstractVector, d::AbstractVector, du::AbstractVector, B::AbstractVecOrMat) - -""" - gttrf!(dl, d, du) -> (dl, d, du, du2, ipiv) - -Finds the `LU` factorization of a tridiagonal matrix with `dl` on the -subdiagonal, `d` on the diagonal, and `du` on the superdiagonal. - -Modifies `dl`, `d`, and `du` in-place and returns them and the second -superdiagonal `du2` and the pivoting vector `ipiv`. -""" -gttrf!(dl::AbstractVector, d::AbstractVector, du::AbstractVector) - -""" - gttrs!(trans, dl, d, du, du2, ipiv, B) - -Solves the equation `A * X = B` (`trans = N`), `transpose(A) * X = B` (`trans = T`), -or `adjoint(A) * X = B` (`trans = C`) using the `LU` factorization computed by -`gttrf!`. `B` is overwritten with the solution `X`. -""" -gttrs!(trans::AbstractChar, dl::AbstractVector, d::AbstractVector, du::AbstractVector, du2::AbstractVector, - ipiv::AbstractVector{BlasInt}, B::AbstractVecOrMat) - -## (OR) orthogonal (or UN, unitary) matrices, extractors and multiplication -for (orglq, orgqr, orgql, orgrq, ormlq, ormqr, ormql, ormrq, gemqrt, elty) in - ((:dorglq_,:dorgqr_,:dorgql_,:dorgrq_,:dormlq_,:dormqr_,:dormql_,:dormrq_,:dgemqrt_,:Float64), - (:sorglq_,:sorgqr_,:sorgql_,:sorgrq_,:sormlq_,:sormqr_,:sormql_,:sormrq_,:sgemqrt_,:Float32), - (:zunglq_,:zungqr_,:zungql_,:zungrq_,:zunmlq_,:zunmqr_,:zunmql_,:zunmrq_,:zgemqrt_,:ComplexF64), - (:cunglq_,:cungqr_,:cungql_,:cungrq_,:cunmlq_,:cunmqr_,:cunmql_,:cunmrq_,:cgemqrt_,:ComplexF32)) - @eval begin - # SUBROUTINE DORGLQ( M, N, K, A, LDA, TAU, WORK, LWORK, INFO ) - # * .. Scalar Arguments .. - # INTEGER INFO, K, LDA, LWORK, M, N - # * .. Array Arguments .. - # DOUBLE PRECISION A( LDA, * ), TAU( * ), WORK( * ) - function orglq!(A::AbstractMatrix{$elty}, tau::AbstractVector{$elty}, k::Integer = length(tau)) - require_one_based_indexing(A, tau) - chkstride1(A,tau) - n = size(A, 2) - m = min(n, size(A, 1)) - if k > m - throw(DimensionMismatch(lazy"invalid number of reflectors: k = $k should be <= m = $m")) - end - work = Vector{$elty}(undef, 1) - lwork = BlasInt(-1) - info = Ref{BlasInt}() - for i = 1:2 # first call returns lwork as work[1] - ccall((@blasfunc($orglq), libblastrampoline), Cvoid, - (Ref{BlasInt}, Ref{BlasInt}, Ref{BlasInt}, Ptr{$elty}, - Ref{BlasInt}, Ptr{$elty}, Ptr{$elty}, Ref{BlasInt}, Ptr{BlasInt}), - m, n, k, A, max(1,stride(A,2)), tau, work, lwork, info) - chklapackerror(info[]) - if i == 1 - lwork = BlasInt(real(work[1])) - resize!(work, lwork) - end - end - if m < size(A,1) - A[1:m,:] - else - A - end - end - - # SUBROUTINE DORGQR( M, N, K, A, LDA, TAU, WORK, LWORK, INFO ) - # * .. Scalar Arguments .. - # INTEGER INFO, K, LDA, LWORK, M, N - # * .. Array Arguments .. - # DOUBLE PRECISION A( LDA, * ), TAU( * ), WORK( * ) - function orgqr!(A::AbstractMatrix{$elty}, tau::AbstractVector{$elty}, k::Integer = length(tau)) - require_one_based_indexing(A, tau) - chkstride1(A,tau) - m = size(A, 1) - n = min(m, size(A, 2)) - if k > n - throw(DimensionMismatch(lazy"invalid number of reflectors: k = $k should be <= n = $n")) - end - work = Vector{$elty}(undef, 1) - lwork = BlasInt(-1) - info = Ref{BlasInt}() - for i = 1:2 # first call returns lwork as work[1] - ccall((@blasfunc($orgqr), libblastrampoline), Cvoid, - (Ref{BlasInt}, Ref{BlasInt}, Ref{BlasInt}, Ptr{$elty}, - Ref{BlasInt}, Ptr{$elty}, Ptr{$elty}, Ref{BlasInt}, Ptr{BlasInt}), - m, n, k, A, - max(1,stride(A,2)), tau, work, lwork, - info) - chklapackerror(info[]) - if i == 1 - lwork = BlasInt(real(work[1])) - resize!(work, lwork) - end - end - if n < size(A,2) - A[:,1:n] - else - A - end - end - - # SUBROUTINE DORGQL( M, N, K, A, LDA, TAU, WORK, LWORK, INFO ) - # * .. Scalar Arguments .. - # INTEGER INFO, K, LDA, LWORK, M, N - # * .. Array Arguments .. - # DOUBLE PRECISION A( LDA, * ), TAU( * ), WORK( * ) - function orgql!(A::AbstractMatrix{$elty}, tau::AbstractVector{$elty}, k::Integer = length(tau)) - require_one_based_indexing(A, tau) - chkstride1(A,tau) - m = size(A, 1) - n = min(m, size(A, 2)) - if k > n - throw(DimensionMismatch(lazy"invalid number of reflectors: k = $k should be <= n = $n")) - end - work = Vector{$elty}(undef, 1) - lwork = BlasInt(-1) - info = Ref{BlasInt}() - for i = 1:2 # first call returns lwork as work[1] - ccall((@blasfunc($orgql), libblastrampoline), Cvoid, - (Ref{BlasInt}, Ref{BlasInt}, Ref{BlasInt}, Ptr{$elty}, - Ref{BlasInt}, Ptr{$elty}, Ptr{$elty}, Ref{BlasInt}, Ptr{BlasInt}), - m, n, k, A, - max(1,stride(A,2)), tau, work, lwork, - info) - chklapackerror(info[]) - if i == 1 - lwork = BlasInt(real(work[1])) - resize!(work, lwork) - end - end - if n < size(A,2) - A[:,1:n] - else - A - end - end - - # SUBROUTINE DORGRQ( M, N, K, A, LDA, TAU, WORK, LWORK, INFO ) - # * .. Scalar Arguments .. - # INTEGER INFO, K, LDA, LWORK, M, N - # * .. Array Arguments .. - # DOUBLE PRECISION A( LDA, * ), TAU( * ), WORK( * ) - function orgrq!(A::AbstractMatrix{$elty}, tau::AbstractVector{$elty}, k::Integer = length(tau)) - require_one_based_indexing(A, tau) - chkstride1(A,tau) - m, n = size(A) - if n < m - throw(DimensionMismatch(lazy"input matrix A has dimensions ($m,$n), but cannot have fewer columns than rows")) - end - if k > n - throw(DimensionMismatch(lazy"invalid number of reflectors: k = $k should be <= n = $n")) - end - work = Vector{$elty}(undef, 1) - lwork = BlasInt(-1) - info = Ref{BlasInt}() - for i = 1:2 # first call returns lwork as work[1] - ccall((@blasfunc($orgrq), libblastrampoline), Cvoid, - (Ref{BlasInt}, Ref{BlasInt}, Ref{BlasInt}, Ptr{$elty}, - Ref{BlasInt}, Ptr{$elty}, Ptr{$elty}, Ref{BlasInt}, Ptr{BlasInt}), - m, n, k, A, - max(1,stride(A,2)), tau, work, lwork, - info) - chklapackerror(info[]) - if i == 1 - lwork = BlasInt(real(work[1])) - resize!(work, lwork) - end - end - A - end - - # SUBROUTINE DORMLQ( SIDE, TRANS, M, N, K, A, LDA, TAU, C, LDC, - # WORK, LWORK, INFO ) - # .. Scalar Arguments .. - # CHARACTER SIDE, TRANS - # INTEGER INFO, K, LDA, LDC, LWORK, M, N - # .. Array Arguments .. - # DOUBLE PRECISION A( LDA, * ), C( LDC, * ), TAU( * ), WORK( * ) - function ormlq!(side::AbstractChar, trans::AbstractChar, A::AbstractMatrix{$elty}, - tau::AbstractVector{$elty}, C::AbstractVecOrMat{$elty}) - require_one_based_indexing(A, tau, C) - chktrans(trans) - chkside(side) - chkstride1(A, C, tau) - m,n = ndims(C) == 2 ? size(C) : (size(C, 1), 1) - nA = size(A, 2) - k = length(tau) - if side == 'L' && m != nA - throw(DimensionMismatch(lazy"for a left-sided multiplication, the first dimension of C, $m, must equal the second dimension of A, $nA")) - end - if side == 'R' && n != nA - throw(DimensionMismatch(lazy"for a right-sided multiplication, the second dimension of C, $n, must equal the second dimension of A, $nA")) - end - if side == 'L' && k > m - throw(DimensionMismatch(lazy"invalid number of reflectors: k = $k should be <= m = $m")) - end - if side == 'R' && k > n - throw(DimensionMismatch(lazy"invalid number of reflectors: k = $k should be <= n = $n")) - end - work = Vector{$elty}(undef, 1) - lwork = BlasInt(-1) - info = Ref{BlasInt}() - for i = 1:2 # first call returns lwork as work[1] - ccall((@blasfunc($ormlq), libblastrampoline), Cvoid, - (Ref{UInt8}, Ref{UInt8}, Ref{BlasInt}, Ref{BlasInt}, Ref{BlasInt}, - Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, Ptr{$elty}, Ref{BlasInt}, - Ptr{$elty}, Ref{BlasInt}, Ref{BlasInt}, Clong, Clong), - side, trans, m, n, k, A, max(1,stride(A,2)), tau, - C, max(1,stride(C,2)), work, lwork, info, 1, 1) - chklapackerror(info[]) - if i == 1 - lwork = BlasInt(real(work[1])) - resize!(work, lwork) - end - end - C - end - - # SUBROUTINE DORMQR( SIDE, TRANS, M, N, K, A, LDA, TAU, C, LDC, - # WORK, INFO ) - # .. Scalar Arguments .. - # CHARACTER SIDE, TRANS - # INTEGER INFO, K, LDA, LDC, M, N - # .. Array Arguments .. - # DOUBLE PRECISION A( LDA, * ), C( LDC, * ), TAU( * ), WORK( * ) - function ormqr!(side::AbstractChar, trans::AbstractChar, A::AbstractMatrix{$elty}, - tau::AbstractVector{$elty}, C::AbstractVecOrMat{$elty}) - require_one_based_indexing(A, tau, C) - chktrans(trans) - chkside(side) - chkstride1(A, C, tau) - m,n = ndims(C) == 2 ? size(C) : (size(C, 1), 1) - mA = size(A, 1) - k = length(tau) - if side == 'L' && m != mA - throw(DimensionMismatch(lazy"for a left-sided multiplication, the first dimension of C, $m, must equal the second dimension of A, $mA")) - end - if side == 'R' && n != mA - throw(DimensionMismatch(lazy"for a right-sided multiplication, the second dimension of C, $m, must equal the second dimension of A, $mA")) - end - if side == 'L' && k > m - throw(DimensionMismatch(lazy"invalid number of reflectors: k = $k should be <= m = $m")) - end - if side == 'R' && k > n - throw(DimensionMismatch(lazy"invalid number of reflectors: k = $k should be <= n = $n")) - end - work = Vector{$elty}(undef, 1) - lwork = BlasInt(-1) - info = Ref{BlasInt}() - for i = 1:2 # first call returns lwork as work[1] - ccall((@blasfunc($ormqr), libblastrampoline), Cvoid, - (Ref{UInt8}, Ref{UInt8}, Ref{BlasInt}, Ref{BlasInt}, - Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, - Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, - Ref{BlasInt}, Clong, Clong), - side, trans, m, n, - k, A, max(1,stride(A,2)), tau, - C, max(1, stride(C,2)), work, lwork, - info, 1, 1) - chklapackerror(info[]) - if i == 1 - lwork = BlasInt(real(work[1])) - resize!(work, lwork) - end - end - C - end - - # SUBROUTINE DORMQL( SIDE, TRANS, M, N, K, A, LDA, TAU, C, LDC, - # WORK, INFO ) - # .. Scalar Arguments .. - # CHARACTER SIDE, TRANS - # INTEGER INFO, K, LDA, LDC, M, N - # .. Array Arguments .. - # DOUBLE PRECISION A( LDA, * ), C( LDC, * ), TAU( * ), WORK( * ) - function ormql!(side::AbstractChar, trans::AbstractChar, A::AbstractMatrix{$elty}, - tau::AbstractVector{$elty}, C::AbstractVecOrMat{$elty}) - require_one_based_indexing(A, tau, C) - chktrans(trans) - chkside(side) - chkstride1(A, C, tau) - m,n = ndims(C) == 2 ? size(C) : (size(C, 1), 1) - mA = size(A, 1) - k = length(tau) - if side == 'L' && m != mA - throw(DimensionMismatch(lazy"for a left-sided multiplication, the first dimension of C, $m, must equal the second dimension of A, $mA")) - end - if side == 'R' && n != mA - throw(DimensionMismatch(lazy"for a right-sided multiplication, the second dimension of C, $m, must equal the second dimension of A, $mA")) - end - if side == 'L' && k > m - throw(DimensionMismatch(lazy"invalid number of reflectors: k = $k should be <= m = $m")) - end - if side == 'R' && k > n - throw(DimensionMismatch(lazy"invalid number of reflectors: k = $k should be <= n = $n")) - end - work = Vector{$elty}(undef, 1) - lwork = BlasInt(-1) - info = Ref{BlasInt}() - for i = 1:2 # first call returns lwork as work[1] - ccall((@blasfunc($ormql), libblastrampoline), Cvoid, - (Ref{UInt8}, Ref{UInt8}, Ref{BlasInt}, Ref{BlasInt}, - Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, - Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, - Ref{BlasInt}, Clong, Clong), - side, trans, m, n, - k, A, max(1,stride(A,2)), tau, - C, max(1, stride(C,2)), work, lwork, - info, 1, 1) - chklapackerror(info[]) - if i == 1 - lwork = BlasInt(real(work[1])) - resize!(work, lwork) - end - end - C - end - - # SUBROUTINE DORMRQ( SIDE, TRANS, M, N, K, A, LDA, TAU, C, LDC, - # WORK, LWORK, INFO ) - # .. Scalar Arguments .. - # CHARACTER SIDE, TRANS - # INTEGER INFO, K, LDA, LDC, LWORK, M, N - # .. Array Arguments .. - # DOUBLE PRECISION A( LDA, * ), C( LDC, * ), TAU( * ), WORK( * ) - function ormrq!(side::AbstractChar, trans::AbstractChar, A::AbstractMatrix{$elty}, - tau::AbstractVector{$elty}, C::AbstractVecOrMat{$elty}) - require_one_based_indexing(A, tau, C) - chktrans(trans) - chkside(side) - chkstride1(A, C, tau) - m,n = ndims(C) == 2 ? size(C) : (size(C, 1), 1) - nA = size(A, 2) - k = length(tau) - if side == 'L' && m != nA - throw(DimensionMismatch(lazy"for a left-sided multiplication, the first dimension of C, $m, must equal the second dimension of A, $nA")) - end - if side == 'R' && n != nA - throw(DimensionMismatch(lazy"for a right-sided multiplication, the second dimension of C, $m, must equal the second dimension of A, $nA")) - end - if side == 'L' && k > m - throw(DimensionMismatch(lazy"invalid number of reflectors: k = $k should be <= m = $m")) - end - if side == 'R' && k > n - throw(DimensionMismatch(lazy"invalid number of reflectors: k = $k should be <= n = $n")) - end - work = Vector{$elty}(undef, 1) - lwork = BlasInt(-1) - info = Ref{BlasInt}() - for i = 1:2 # first call returns lwork as work[1] - ccall((@blasfunc($ormrq), libblastrampoline), Cvoid, - (Ref{UInt8}, Ref{UInt8}, Ref{BlasInt}, Ref{BlasInt}, Ref{BlasInt}, - Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, Ptr{$elty}, Ref{BlasInt}, - Ptr{$elty}, Ref{BlasInt}, Ref{BlasInt}, Clong, Clong), - side, trans, m, n, k, A, max(1,stride(A,2)), tau, - C, max(1,stride(C,2)), work, lwork, info, 1, 1) - chklapackerror(info[]) - if i == 1 - lwork = BlasInt(real(work[1])) - resize!(work, lwork) - end - end - C - end - - function gemqrt!(side::AbstractChar, trans::AbstractChar, V::AbstractMatrix{$elty}, T::AbstractMatrix{$elty}, C::AbstractVecOrMat{$elty}) - require_one_based_indexing(V, T, C) - chktrans(trans) - chkside(side) - chkstride1(V, T, C) - m,n = ndims(C) == 2 ? size(C) : (size(C, 1), 1) - nb, k = size(T) - if k == 0 - return C - end - if side == 'L' - if !(0 <= k <= m) - throw(DimensionMismatch(lazy"wrong value for k = $k: must be between 0 and $m")) - end - if m != size(V,1) - throw(DimensionMismatch(lazy"first dimensions of C, $m, and V, $(size(V,1)) must match")) - end - ldv = stride(V,2) - if ldv < max(1, m) - throw(DimensionMismatch(lazy"Q and C don't fit! The stride of V, $ldv, is too small")) - end - wss = n*k - elseif side == 'R' - if !(0 <= k <= n) - throw(DimensionMismatch(lazy"wrong value for k = $k: must be between 0 and $n")) - end - if n != size(V,1) - throw(DimensionMismatch(lazy"second dimension of C, $n, and first dimension of V, $(size(V,1)) must match")) - end - ldv = stride(V,2) - if ldv < max(1, n) - throw(DimensionMismatch(lazy"Q and C don't fit! The stride of V, $ldv, is too small")) - end - wss = m*k - end - if !(1 <= nb <= k) - throw(DimensionMismatch(lazy"wrong value for nb = $nb, which must be between 1 and $k")) - end - ldc = stride(C, 2) - work = Vector{$elty}(undef, wss) - info = Ref{BlasInt}() - ccall((@blasfunc($gemqrt), libblastrampoline), Cvoid, - (Ref{UInt8}, Ref{UInt8}, Ref{BlasInt}, Ref{BlasInt}, - Ref{BlasInt}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, - Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, - Ptr{$elty}, Ref{BlasInt}, Clong, Clong), - side, trans, m, n, - k, nb, V, ldv, - T, max(1,stride(T,2)), C, max(1,ldc), - work, info, 1, 1) - chklapackerror(info[]) - return C - end - end -end - -""" - orglq!(A, tau, k = length(tau)) - -Explicitly finds the matrix `Q` of a `LQ` factorization after calling -`gelqf!` on `A`. Uses the output of `gelqf!`. `A` is overwritten by `Q`. -""" -orglq!(A::AbstractMatrix, tau::AbstractVector, k::Integer = length(tau)) - -""" - orgqr!(A, tau, k = length(tau)) - -Explicitly finds the matrix `Q` of a `QR` factorization after calling -`geqrf!` on `A`. Uses the output of `geqrf!`. `A` is overwritten by `Q`. -""" -orgqr!(A::AbstractMatrix, tau::AbstractVector, k::Integer = length(tau)) - -""" - orgql!(A, tau, k = length(tau)) - -Explicitly finds the matrix `Q` of a `QL` factorization after calling -`geqlf!` on `A`. Uses the output of `geqlf!`. `A` is overwritten by `Q`. -""" -orgql!(A::AbstractMatrix, tau::AbstractVector, k::Integer = length(tau)) - -""" - orgrq!(A, tau, k = length(tau)) - -Explicitly finds the matrix `Q` of a `RQ` factorization after calling -`gerqf!` on `A`. Uses the output of `gerqf!`. `A` is overwritten by `Q`. -""" -orgrq!(A::AbstractMatrix, tau::AbstractVector, k::Integer = length(tau)) - -""" - ormlq!(side, trans, A, tau, C) - -Computes `Q * C` (`trans = N`), `transpose(Q) * C` (`trans = T`), `adjoint(Q) * C` -(`trans = C`) for `side = L` or the equivalent right-sided multiplication -for `side = R` using `Q` from a `LQ` factorization of `A` computed using -`gelqf!`. `C` is overwritten. -""" -ormlq!(side::AbstractChar, trans::AbstractChar, A::AbstractMatrix, tau::AbstractVector, C::AbstractVecOrMat) - -""" - ormqr!(side, trans, A, tau, C) - -Computes `Q * C` (`trans = N`), `transpose(Q) * C` (`trans = T`), `adjoint(Q) * C` -(`trans = C`) for `side = L` or the equivalent right-sided multiplication -for `side = R` using `Q` from a `QR` factorization of `A` computed using -`geqrf!`. `C` is overwritten. -""" -ormqr!(side::AbstractChar, trans::AbstractChar, A::AbstractMatrix, tau::AbstractVector, C::AbstractVecOrMat) - -""" - ormql!(side, trans, A, tau, C) - -Computes `Q * C` (`trans = N`), `transpose(Q) * C` (`trans = T`), `adjoint(Q) * C` -(`trans = C`) for `side = L` or the equivalent right-sided multiplication -for `side = R` using `Q` from a `QL` factorization of `A` computed using -`geqlf!`. `C` is overwritten. -""" -ormql!(side::AbstractChar, trans::AbstractChar, A::AbstractMatrix, tau::AbstractVector, C::AbstractVecOrMat) - -""" - ormrq!(side, trans, A, tau, C) - -Computes `Q * C` (`trans = N`), `transpose(Q) * C` (`trans = T`), `adjoint(Q) * C` -(`trans = C`) for `side = L` or the equivalent right-sided multiplication -for `side = R` using `Q` from a `RQ` factorization of `A` computed using -`gerqf!`. `C` is overwritten. -""" -ormrq!(side::AbstractChar, trans::AbstractChar, A::AbstractMatrix, tau::AbstractVector, C::AbstractVecOrMat) - -""" - gemqrt!(side, trans, V, T, C) - -Computes `Q * C` (`trans = N`), `transpose(Q) * C` (`trans = T`), `adjoint(Q) * C` -(`trans = C`) for `side = L` or the equivalent right-sided multiplication -for `side = R` using `Q` from a `QR` factorization of `A` computed using -`geqrt!`. `C` is overwritten. -""" -gemqrt!(side::AbstractChar, trans::AbstractChar, V::AbstractMatrix, T::AbstractMatrix, C::AbstractVecOrMat) - -# (PO) positive-definite symmetric matrices, -for (posv, potrf, potri, potrs, pstrf, elty, rtyp) in - ((:dposv_,:dpotrf_,:dpotri_,:dpotrs_,:dpstrf_,:Float64,:Float64), - (:sposv_,:spotrf_,:spotri_,:spotrs_,:spstrf_,:Float32,:Float32), - (:zposv_,:zpotrf_,:zpotri_,:zpotrs_,:zpstrf_,:ComplexF64,:Float64), - (:cposv_,:cpotrf_,:cpotri_,:cpotrs_,:cpstrf_,:ComplexF32,:Float32)) - @eval begin - # SUBROUTINE DPOSV( UPLO, N, NRHS, A, LDA, B, LDB, INFO ) - #* .. Scalar Arguments .. - # CHARACTER UPLO - # INTEGER INFO, LDA, LDB, N, NRHS - # .. Array Arguments .. - # DOUBLE PRECISION A( LDA, * ), B( LDB, * ) - function posv!(uplo::AbstractChar, A::AbstractMatrix{$elty}, B::AbstractVecOrMat{$elty}) - require_one_based_indexing(A, B) - chkstride1(A, B) - n = checksquare(A) - chkuplo(uplo) - if size(B,1) != n - throw(DimensionMismatch(lazy"first dimension of B, $(size(B,1)), and size of A, ($n,$n), must match!")) - end - info = Ref{BlasInt}() - ccall((@blasfunc($posv), libblastrampoline), Cvoid, - (Ref{UInt8}, Ref{BlasInt}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, - Ptr{$elty}, Ref{BlasInt}, Ref{BlasInt}, Clong), - uplo, n, size(B,2), A, max(1,stride(A,2)), B, max(1,stride(B,2)), info, 1) - chkargsok(info[]) - chkposdef(info[]) - A, B - end - - # SUBROUTINE DPOTRF( UPLO, N, A, LDA, INFO ) - # * .. Scalar Arguments .. - # CHARACTER UPLO - # INTEGER INFO, LDA, N - # * .. Array Arguments .. - # DOUBLE PRECISION A( LDA, * ) - function potrf!(uplo::AbstractChar, A::AbstractMatrix{$elty}) - require_one_based_indexing(A) - chkstride1(A) - checksquare(A) - chkuplo(uplo) - lda = max(1,stride(A,2)) - if lda == 0 - return A, 0 - end - info = Ref{BlasInt}() - ccall((@blasfunc($potrf), libblastrampoline), Cvoid, - (Ref{UInt8}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ref{BlasInt}, Clong), - uplo, size(A,1), A, lda, info, 1) - chkargsok(info[]) - #info[] > 0 means the leading minor of order info[] is not positive definite - #ordinarily, throw Exception here, but return error code here - #this simplifies isposdef! and factorize - return A, info[] # info stored in Cholesky - end - - # SUBROUTINE DPOTRI( UPLO, N, A, LDA, INFO ) - # .. Scalar Arguments .. - # CHARACTER UPLO - # INTEGER INFO, LDA, N - # .. Array Arguments .. - # DOUBLE PRECISION A( LDA, * ) - function potri!(uplo::AbstractChar, A::AbstractMatrix{$elty}) - require_one_based_indexing(A) - chkstride1(A) - chkuplo(uplo) - info = Ref{BlasInt}() - ccall((@blasfunc($potri), libblastrampoline), Cvoid, - (Ref{UInt8}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ref{BlasInt}, Clong), - uplo, size(A,1), A, max(1,stride(A,2)), info, 1) - chkargsok(info[]) - chknonsingular(info[]) - A - end - - # SUBROUTINE DPOTRS( UPLO, N, NRHS, A, LDA, B, LDB, INFO ) - # .. Scalar Arguments .. - # CHARACTER UPLO - # INTEGER INFO, LDA, LDB, N, NRHS - # .. Array Arguments .. - # DOUBLE PRECISION A( LDA, * ), B( LDB, * ) - function potrs!(uplo::AbstractChar, A::AbstractMatrix{$elty}, B::AbstractVecOrMat{$elty}) - require_one_based_indexing(A, B) - chkstride1(A, B) - n = checksquare(A) - chkuplo(uplo) - nrhs = size(B,2) - if size(B,1) != n - throw(DimensionMismatch(lazy"first dimension of B, $(size(B,1)), and size of A, ($n,$n), must match!")) - end - lda = max(1,stride(A,2)) - if lda == 0 || nrhs == 0 - return B - end - ldb = max(1,stride(B,2)) - info = Ref{BlasInt}() - ccall((@blasfunc($potrs), libblastrampoline), Cvoid, - (Ref{UInt8}, Ref{BlasInt}, Ref{BlasInt}, Ptr{$elty}, - Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ref{BlasInt}, Clong), - uplo, n, nrhs, A, - lda, B, ldb, info, 1) - chklapackerror(info[]) - return B - end - - # SUBROUTINE DPSTRF( UPLO, N, A, LDA, PIV, RANK, TOL, WORK, INFO ) - # .. Scalar Arguments .. - # DOUBLE PRECISION TOL - # INTEGER INFO, LDA, N, RANK - # CHARACTER UPLO - # .. Array Arguments .. - # DOUBLE PRECISION A( LDA, * ), WORK( 2*N ) - # INTEGER PIV( N ) - function pstrf!(uplo::AbstractChar, A::AbstractMatrix{$elty}, tol::Real) - require_one_based_indexing(A) - chkstride1(A) - n = checksquare(A) - chkuplo(uplo) - piv = similar(A, BlasInt, n) - rank = Vector{BlasInt}(undef, 1) - work = Vector{$rtyp}(undef, 2n) - info = Ref{BlasInt}() - ccall((@blasfunc($pstrf), libblastrampoline), Cvoid, - (Ref{UInt8}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ptr{BlasInt}, - Ptr{BlasInt}, Ref{$rtyp}, Ptr{$rtyp}, Ref{BlasInt}, Clong), - uplo, n, A, max(1,stride(A,2)), piv, rank, tol, work, info, 1) - chkargsok(info[]) - A, piv, rank[1], info[] #Stored in CholeskyPivoted - end - end -end - -""" - posv!(uplo, A, B) -> (A, B) - -Finds the solution to `A * X = B` where `A` is a symmetric or Hermitian -positive definite matrix. If `uplo = U` the upper Cholesky decomposition -of `A` is computed. If `uplo = L` the lower Cholesky decomposition of `A` -is computed. `A` is overwritten by its Cholesky decomposition. `B` is -overwritten with the solution `X`. -""" -posv!(uplo::AbstractChar, A::AbstractMatrix, B::AbstractVecOrMat) - -""" - potrf!(uplo, A) - -Computes the Cholesky (upper if `uplo = U`, lower if `uplo = L`) -decomposition of positive-definite matrix `A`. `A` is overwritten and -returned with an info code. -""" -potrf!(uplo::AbstractChar, A::AbstractMatrix) - -""" - potri!(uplo, A) - -Computes the inverse of positive-definite matrix `A` after calling -`potrf!` to find its (upper if `uplo = U`, lower if `uplo = L`) Cholesky -decomposition. - -`A` is overwritten by its inverse and returned. -""" -potri!(uplo::AbstractChar, A::AbstractMatrix) - -""" - potrs!(uplo, A, B) - -Finds the solution to `A * X = B` where `A` is a symmetric or Hermitian -positive definite matrix whose Cholesky decomposition was computed by -`potrf!`. If `uplo = U` the upper Cholesky decomposition of `A` was -computed. If `uplo = L` the lower Cholesky decomposition of `A` was -computed. `B` is overwritten with the solution `X`. -""" -potrs!(uplo::AbstractChar, A::AbstractMatrix, B::AbstractVecOrMat) - -""" - pstrf!(uplo, A, tol) -> (A, piv, rank, info) - -Computes the (upper if `uplo = U`, lower if `uplo = L`) pivoted Cholesky -decomposition of positive-definite matrix `A` with a user-set tolerance -`tol`. `A` is overwritten by its Cholesky decomposition. - -Returns `A`, the pivots `piv`, the rank of `A`, and an `info` code. If `info = 0`, -the factorization succeeded. If `info = i > 0 `, then `A` is indefinite or -rank-deficient. -""" -pstrf!(uplo::AbstractChar, A::AbstractMatrix, tol::Real) - -# (PT) positive-definite, symmetric, tri-diagonal matrices -# Direct solvers for general tridiagonal and symmetric positive-definite tridiagonal -for (ptsv, pttrf, elty, relty) in - ((:dptsv_,:dpttrf_,:Float64,:Float64), - (:sptsv_,:spttrf_,:Float32,:Float32), - (:zptsv_,:zpttrf_,:ComplexF64,:Float64), - (:cptsv_,:cpttrf_,:ComplexF32,:Float32)) - @eval begin - # SUBROUTINE DPTSV( N, NRHS, D, E, B, LDB, INFO ) - # .. Scalar Arguments .. - # INTEGER INFO, LDB, N, NRHS - # .. Array Arguments .. - # DOUBLE PRECISION B( LDB, * ), D( * ), E( * ) - function ptsv!(D::AbstractVector{$relty}, E::AbstractVector{$elty}, B::AbstractVecOrMat{$elty}) - require_one_based_indexing(D, E, B) - chkstride1(B, D, E) - n = length(D) - if length(E) != n - 1 - throw(DimensionMismatch(lazy"E has length $(length(E)), but needs $(n - 1)")) - end - if n != size(B,1) - throw(DimensionMismatch(lazy"B has first dimension $(size(B,1)) but needs $n")) - end - info = Ref{BlasInt}() - ccall((@blasfunc($ptsv), libblastrampoline), Cvoid, - (Ref{BlasInt}, Ref{BlasInt}, Ptr{$relty}, Ptr{$elty}, - Ptr{$elty}, Ref{BlasInt}, Ptr{BlasInt}), - n, size(B,2), D, E, B, max(1,stride(B,2)), info) - chklapackerror(info[]) - B - end - - # SUBROUTINE DPTTRF( N, D, E, INFO ) - # .. Scalar Arguments .. - # INTEGER INFO, N - # .. Array Arguments .. - # DOUBLE PRECISION D( * ), E( * ) - function pttrf!(D::AbstractVector{$relty}, E::AbstractVector{$elty}) - require_one_based_indexing(D, E) - chkstride1(D, E) - n = length(D) - if length(E) != n - 1 - throw(DimensionMismatch(lazy"E has length $(length(E)), but needs $(n - 1)")) - end - info = Ref{BlasInt}() - ccall((@blasfunc($pttrf), libblastrampoline), Cvoid, - (Ref{BlasInt}, Ptr{$relty}, Ptr{$elty}, Ptr{BlasInt}), - n, D, E, info) - chklapackerror(info[]) - D, E - end - end -end - -""" - ptsv!(D, E, B) - -Solves `A * X = B` for positive-definite tridiagonal `A`. `D` is the -diagonal of `A` and `E` is the off-diagonal. `B` is overwritten with the -solution `X` and returned. -""" -ptsv!(D::AbstractVector, E::AbstractVector, B::AbstractVecOrMat) - -""" - pttrf!(D, E) - -Computes the LDLt factorization of a positive-definite tridiagonal matrix -with `D` as diagonal and `E` as off-diagonal. `D` and `E` are overwritten -and returned. -""" -pttrf!(D::AbstractVector, E::AbstractVector) - -for (pttrs, elty, relty) in - ((:dpttrs_,:Float64,:Float64), - (:spttrs_,:Float32,:Float32)) - @eval begin - # SUBROUTINE DPTTRS( N, NRHS, D, E, B, LDB, INFO ) - # .. Scalar Arguments .. - # INTEGER INFO, LDB, N, NRHS - # .. Array Arguments .. - # DOUBLE PRECISION B( LDB, * ), D( * ), E( * ) - function pttrs!(D::AbstractVector{$relty}, E::AbstractVector{$elty}, B::AbstractVecOrMat{$elty}) - require_one_based_indexing(D, E, B) - chkstride1(B, D, E) - n = length(D) - if length(E) != n - 1 - throw(DimensionMismatch(lazy"E has length $(length(E)), but needs $(n - 1)")) - end - if n != size(B,1) - throw(DimensionMismatch(lazy"B has first dimension $(size(B,1)) but needs $n")) - end - info = Ref{BlasInt}() - ccall((@blasfunc($pttrs), libblastrampoline), Cvoid, - (Ref{BlasInt}, Ref{BlasInt}, Ptr{$relty}, Ptr{$elty}, - Ptr{$elty}, Ref{BlasInt}, Ptr{BlasInt}), - n, size(B,2), D, E, B, max(1,stride(B,2)), info) - chklapackerror(info[]) - B - end - end -end - -for (pttrs, elty, relty) in - ((:zpttrs_,:ComplexF64,:Float64), - (:cpttrs_,:ComplexF32,:Float32)) - @eval begin - # SUBROUTINE ZPTTRS( UPLO, N, NRHS, D, E, B, LDB, INFO ) - # * .. Scalar Arguments .. - # CHARACTER UPLO - # INTEGER INFO, LDB, N, NRHS - # * .. - # * .. Array Arguments .. - # DOUBLE PRECISION D( * ) - # COMPLEX*16 B( LDB, * ), E( * ) - function pttrs!(uplo::AbstractChar, D::AbstractVector{$relty}, E::AbstractVector{$elty}, B::AbstractVecOrMat{$elty}) - require_one_based_indexing(D, E, B) - chkstride1(B, D, E) - chkuplo(uplo) - n = length(D) - if length(E) != n - 1 - throw(DimensionMismatch(lazy"E has length $(length(E)), but needs $(n - 1)")) - end - if n != size(B,1) - throw(DimensionMismatch(lazy"B has first dimension $(size(B,1)) but needs $n")) - end - info = Ref{BlasInt}() - ccall((@blasfunc($pttrs), libblastrampoline), Cvoid, - (Ref{UInt8}, Ref{BlasInt}, Ref{BlasInt}, Ptr{$relty}, Ptr{$elty}, - Ptr{$elty}, Ref{BlasInt}, Ref{BlasInt}, Clong), - uplo, n, size(B,2), D, E, B, max(1,stride(B,2)), info, 1) - chklapackerror(info[]) - B - end - end -end - -""" - pttrs!(D, E, B) - -Solves `A * X = B` for positive-definite tridiagonal `A` with diagonal -`D` and off-diagonal `E` after computing `A`'s LDLt factorization using -`pttrf!`. `B` is overwritten with the solution `X`. -""" -pttrs!(D::AbstractVector, E::AbstractVector, B::AbstractVecOrMat) - -## (TR) triangular matrices: solver and inverse -for (trtri, trtrs, elty) in - ((:dtrtri_,:dtrtrs_,:Float64), - (:strtri_,:strtrs_,:Float32), - (:ztrtri_,:ztrtrs_,:ComplexF64), - (:ctrtri_,:ctrtrs_,:ComplexF32)) - @eval begin - # SUBROUTINE DTRTRI( UPLO, DIAG, N, A, LDA, INFO ) - #* .. Scalar Arguments .. - # CHARACTER DIAG, UPLO - # INTEGER INFO, LDA, N - # .. Array Arguments .. - # DOUBLE PRECISION A( LDA, * ) - function trtri!(uplo::AbstractChar, diag::AbstractChar, A::AbstractMatrix{$elty}) - require_one_based_indexing(A) - chkstride1(A) - n = checksquare(A) - chkuplo(uplo) - chkdiag(diag) - lda = max(1,stride(A, 2)) - info = Ref{BlasInt}() - ccall((@blasfunc($trtri), libblastrampoline), Cvoid, - (Ref{UInt8}, Ref{UInt8}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, - Ref{BlasInt}, Clong, Clong), - uplo, diag, n, A, lda, info, 1, 1) - chklapackerror(info[]) - A - end - - # SUBROUTINE DTRTRS( UPLO, TRANS, DIAG, N, NRHS, A, LDA, B, LDB, INFO ) - # * .. Scalar Arguments .. - # CHARACTER DIAG, TRANS, UPLO - # INTEGER INFO, LDA, LDB, N, NRHS - # * .. Array Arguments .. - # DOUBLE PRECISION A( LDA, * ), B( LDB, * ) - function trtrs!(uplo::AbstractChar, trans::AbstractChar, diag::AbstractChar, - A::AbstractMatrix{$elty}, B::AbstractVecOrMat{$elty}) - require_one_based_indexing(A, B) - chktrans(trans) - chkdiag(diag) - chkstride1(A) - n = checksquare(A) - chkuplo(uplo) - if n != size(B,1) - throw(DimensionMismatch(lazy"B has first dimension $(size(B,1)) but needs $n")) - end - info = Ref{BlasInt}() - ccall((@blasfunc($trtrs), libblastrampoline), Cvoid, - (Ref{UInt8}, Ref{UInt8}, Ref{UInt8}, Ref{BlasInt}, Ref{BlasInt}, - Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ptr{BlasInt}, - Clong, Clong, Clong), - uplo, trans, diag, n, size(B,2), A, max(1,stride(A,2)), - B, max(1,stride(B,2)), info, - 1, 1, 1) - chklapackerror(info[], trtrs!) - B - end - end -end -chklapackerror_positive(ret, ::typeof(trtrs!)) = chknonsingular(ret) - -""" - trtri!(uplo, diag, A) - -Finds the inverse of (upper if `uplo = U`, lower if `uplo = L`) -triangular matrix `A`. If `diag = N`, `A` has non-unit diagonal elements. -If `diag = U`, all diagonal elements of `A` are one. `A` is overwritten -with its inverse. -""" -trtri!(uplo::AbstractChar, diag::AbstractChar, A::AbstractMatrix) - -""" - trtrs!(uplo, trans, diag, A, B) - -Solves `A * X = B` (`trans = N`), `transpose(A) * X = B` (`trans = T`), or -`adjoint(A) * X = B` (`trans = C`) for (upper if `uplo = U`, lower if `uplo = L`) -triangular matrix `A`. If `diag = N`, `A` has non-unit diagonal elements. -If `diag = U`, all diagonal elements of `A` are one. `B` is overwritten -with the solution `X`. -""" -trtrs!(uplo::AbstractChar, trans::AbstractChar, diag::AbstractChar, A::AbstractMatrix, B::AbstractVecOrMat) - -#Eigenvector computation and condition number estimation -for (trcon, trevc, trrfs, elty) in - ((:dtrcon_,:dtrevc_,:dtrrfs_,:Float64), - (:strcon_,:strevc_,:strrfs_,:Float32)) - @eval begin - # SUBROUTINE DTRCON( NORM, UPLO, DIAG, N, A, LDA, RCOND, WORK, - # IWORK, INFO ) - # .. Scalar Arguments .. - # CHARACTER DIAG, NORM, UPLO - # INTEGER INFO, LDA, N - # DOUBLE PRECISION RCOND - # .. Array Arguments .. - # INTEGER IWORK( * ) - # DOUBLE PRECISION A( LDA, * ), WORK( * ) - function trcon!(norm::AbstractChar, uplo::AbstractChar, diag::AbstractChar, A::AbstractMatrix{$elty}) - require_one_based_indexing(A) - chkstride1(A) - chkdiag(diag) - n = checksquare(A) - chkuplo(uplo) - @chkvalidparam 1 norm ('O', '1', 'I') - rcond = Ref{$elty}() - work = Vector{$elty}(undef, 3n) - iwork = Vector{BlasInt}(undef, n) - info = Ref{BlasInt}() - ccall((@blasfunc($trcon), libblastrampoline), Cvoid, - (Ref{UInt8}, Ref{UInt8}, Ref{UInt8}, Ref{BlasInt}, - Ptr{$elty}, Ref{BlasInt}, Ref{$elty}, Ptr{$elty}, Ptr{BlasInt}, Ptr{BlasInt}, - Clong, Clong, Clong), - norm, uplo, diag, n, - A, max(1,stride(A,2)), rcond, work, iwork, info, - 1, 1, 1) - chklapackerror(info[]) - rcond[] - end - - # SUBROUTINE DTREVC( SIDE, HOWMNY, SELECT, N, T, LDT, VL, LDVL, VR, - # LDVR, MM, M, WORK, INFO ) - # - # .. Scalar Arguments .. - # CHARACTER HOWMNY, SIDE - # INTEGER INFO, LDT, LDVL, LDVR, M, MM, N - # .. - # .. Array Arguments .. - # LOGICAL SELECT( * ) - # DOUBLE PRECISION T( LDT, * ), VL( LDVL, * ), VR( LDVR, * ), - #$ WORK( * ) - Base.@constprop :aggressive function trevc!(side::AbstractChar, howmny::AbstractChar, select::AbstractVector{BlasInt}, T::AbstractMatrix{$elty}, - VL::AbstractMatrix{$elty} = similar(T), - VR::AbstractMatrix{$elty} = similar(T)) - require_one_based_indexing(select, T, VL, VR) - # Extract - if side ∉ ('L','R','B') - throw(ArgumentError(lazy"side argument must be 'L' (left eigenvectors), 'R' (right eigenvectors), or 'B' (both), got $side")) - end - @chkvalidparam 2 howmny ('A', 'B', 'S') - n, mm = checksquare(T), size(VL, 2) - ldt, ldvl, ldvr = stride(T, 2), stride(VL, 2), stride(VR, 2) - - # Check - chkstride1(T, select, VL, VR) - - # Allocate - m = Ref{BlasInt}() - work = Vector{$elty}(undef, 3n) - info = Ref{BlasInt}() - - ccall((@blasfunc($trevc), libblastrampoline), Cvoid, - (Ref{UInt8}, Ref{UInt8}, Ptr{BlasInt}, Ref{BlasInt}, - Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, - Ptr{$elty}, Ref{BlasInt}, Ref{BlasInt}, Ptr{BlasInt}, - Ptr{$elty}, Ref{BlasInt}, Clong, Clong), - side, howmny, select, n, - T, ldt, VL, ldvl, - VR, ldvr, mm, m, - work, info, 1, 1) - chklapackerror(info[]) - - #Decide what exactly to return - if howmny == 'S' #compute selected eigenvectors - if side == 'L' #left eigenvectors only - return select, VL[:,1:m[]] - elseif side == 'R' #right eigenvectors only - return select, VR[:,1:m[]] - else #side == 'B' #both eigenvectors - return select, VL[:,1:m[]], VR[:,1:m[]] - end - else #compute all eigenvectors - if side == 'L' #left eigenvectors only - return VL[:,1:m[]] - elseif side == 'R' #right eigenvectors only - return VR[:,1:m[]] - else #side == 'B' #both eigenvectors - return VL[:,1:m[]], VR[:,1:m[]] - end - end - end - - # SUBROUTINE DTRRFS( UPLO, TRANS, DIAG, N, NRHS, A, LDA, B, LDB, X, - # LDX, FERR, BERR, WORK, IWORK, INFO ) - # .. Scalar Arguments .. - # CHARACTER DIAG, TRANS, UPLO - # INTEGER INFO, LDA, LDB, LDX, N, NRHS - # .. Array Arguments .. - # INTEGER IWORK( * ) - # DOUBLE PRECISION A( LDA, * ), B( LDB, * ), BERR( * ), FERR( * ), - #$ WORK( * ), X( LDX, * ) - function trrfs!(uplo::AbstractChar, trans::AbstractChar, diag::AbstractChar, - A::AbstractMatrix{$elty}, B::AbstractVecOrMat{$elty}, X::AbstractVecOrMat{$elty}, - Ferr::AbstractVector{$elty} = similar(B, $elty, size(B,2)), - Berr::AbstractVector{$elty} = similar(B, $elty, size(B,2))) - require_one_based_indexing(A, B, X, Ferr, Berr) - chkstride1(A, B, X, Ferr, Berr) - chktrans(trans) - chkuplo(uplo) - chkdiag(diag) - n = size(A,2) - nrhs = size(B,2) - if nrhs != size(X,2) - throw(DimensionMismatch(lazy"second dimensions of B, $nrhs, and X, $(size(X,2)), must match")) - end - work = Vector{$elty}(undef, 3n) - iwork = Vector{BlasInt}(undef, n) - info = Ref{BlasInt}() - ccall((@blasfunc($trrfs), libblastrampoline), Cvoid, - (Ref{UInt8}, Ref{UInt8}, Ref{UInt8}, Ref{BlasInt}, - Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, - Ptr{$elty}, Ptr{$elty}, Ptr{$elty}, Ptr{BlasInt}, Ref{BlasInt}, Clong, Clong, Clong), - uplo, trans, diag, n, - nrhs, A, max(1,stride(A,2)), B, max(1,stride(B,2)), X, max(1,stride(X,2)), - Ferr, Berr, work, iwork, info, 1, 1, 1) - chklapackerror(info[]) - Ferr, Berr - end - end -end - -for (trcon, trevc, trrfs, elty, relty) in - ((:ztrcon_,:ztrevc_,:ztrrfs_,:ComplexF64,:Float64), - (:ctrcon_,:ctrevc_,:ctrrfs_,:ComplexF32, :Float32)) - @eval begin - # SUBROUTINE ZTRCON( NORM, UPLO, DIAG, N, A, LDA, RCOND, WORK, - # RWORK, INFO ) - # .. Scalar Arguments .. - # CHARACTER DIAG, NORM, UPLO - # INTEGER INFO, LDA, N - # DOUBLE PRECISION RCOND - # .. Array Arguments .. - # DOUBLE PRECISION RWORK( * ) - # COMPLEX*16 A( LDA, * ), WORK( * ) - function trcon!(norm::AbstractChar, uplo::AbstractChar, diag::AbstractChar, A::AbstractMatrix{$elty}) - require_one_based_indexing(A) - chkstride1(A) - n = checksquare(A) - @chkvalidparam 1 norm ('O', '1', 'I') - chkuplo(uplo) - chkdiag(diag) - rcond = Ref{$relty}(1) - work = Vector{$elty}(undef, 2n) - rwork = Vector{$relty}(undef, n) - info = Ref{BlasInt}() - ccall((@blasfunc($trcon), libblastrampoline), Cvoid, - (Ref{UInt8}, Ref{UInt8}, Ref{UInt8}, Ref{BlasInt}, - Ptr{$elty}, Ref{BlasInt}, Ref{$relty}, Ptr{$elty}, Ptr{$relty}, Ptr{BlasInt}, - Clong, Clong, Clong), - norm, uplo, diag, n, - A, max(1,stride(A,2)), rcond, work, rwork, info, - 1, 1, 1) - chklapackerror(info[]) - rcond[] - end - - # SUBROUTINE ZTREVC( SIDE, HOWMNY, SELECT, N, T, LDT, VL, LDVL, VR, - # LDVR, MM, M, WORK, RWORK, INFO ) - # - # .. Scalar Arguments .. - # CHARACTER HOWMNY, SIDE - # INTEGER INFO, LDT, LDVL, LDVR, M, MM, N - # .. - # .. Array Arguments .. - # LOGICAL SELECT( * ) - # DOUBLE PRECISION RWORK( * ) - # COMPLEX*16 T( LDT, * ), VL( LDVL, * ), VR( LDVR, * ), - #$ WORK( * ) - function trevc!(side::AbstractChar, howmny::AbstractChar, select::AbstractVector{BlasInt}, T::AbstractMatrix{$elty}, - VL::AbstractMatrix{$elty} = similar(T), - VR::AbstractMatrix{$elty} = similar(T)) - require_one_based_indexing(select, T, VL, VR) - # Extract - n, mm = checksquare(T), size(VL, 2) - ldt, ldvl, ldvr = stride(T, 2), stride(VL, 2), stride(VR, 2) - - # Check - chkstride1(T, select, VL, VR) - if side ∉ ('L','R','B') - throw(ArgumentError(lazy"side argument must be 'L' (left eigenvectors), 'R' (right eigenvectors), or 'B' (both), got $side")) - end - @chkvalidparam 2 howmny ('A', 'B', 'S') - - # Allocate - m = Ref{BlasInt}() - work = Vector{$elty}(undef, 2n) - rwork = Vector{$relty}(undef, n) - info = Ref{BlasInt}() - ccall((@blasfunc($trevc), libblastrampoline), Cvoid, - (Ref{UInt8}, Ref{UInt8}, Ptr{BlasInt}, Ref{BlasInt}, - Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, - Ptr{$elty}, Ref{BlasInt}, Ref{BlasInt}, Ptr{BlasInt}, - Ptr{$elty}, Ptr{$relty}, Ref{BlasInt}, Clong, Clong), - side, howmny, select, n, - T, ldt, VL, ldvl, - VR, ldvr, mm, m, - work, rwork, info, 1, 1) - chklapackerror(info[]) - - #Decide what exactly to return - if howmny == 'S' #compute selected eigenvectors - if side == 'L' #left eigenvectors only - return select, VL[:,1:m[]] - elseif side == 'R' #right eigenvectors only - return select, VR[:,1:m[]] - else #side=='B' #both eigenvectors - return select, VL[:,1:m[]], VR[:,1:m[]] - end - else #compute all eigenvectors - if side == 'L' #left eigenvectors only - return VL[:,1:m[]] - elseif side == 'R' #right eigenvectors only - return VR[:,1:m[]] - else #side=='B' #both eigenvectors - return VL[:,1:m[]], VR[:,1:m[]] - end - end - end - - # SUBROUTINE ZTRRFS( UPLO, TRANS, DIAG, N, NRHS, A, LDA, B, LDB, X, - # LDX, FERR, BERR, WORK, IWORK, INFO ) - # .. Scalar Arguments .. - # CHARACTER DIAG, TRANS, UPLO - # INTEGER INFO, LDA, LDB, LDX, N, NRHS - # .. Array Arguments .. - # INTEGER IWORK( * ) - # DOUBLE PRECISION A( LDA, * ), B( LDB, * ), BERR( * ), FERR( * ), - #$ WORK( * ), X( LDX, * ) - function trrfs!(uplo::AbstractChar, trans::AbstractChar, diag::AbstractChar, - A::AbstractMatrix{$elty}, B::AbstractVecOrMat{$elty}, X::AbstractVecOrMat{$elty}, - Ferr::AbstractVector{$relty} = similar(B, $relty, size(B,2)), - Berr::AbstractVector{$relty} = similar(B, $relty, size(B,2))) - require_one_based_indexing(A, B, X, Ferr, Berr) - chkstride1(A, B, X, Ferr, Berr) - chktrans(trans) - chkuplo(uplo) - chkdiag(diag) - n = size(A,2) - nrhs = size(B,2) - if nrhs != size(X,2) - throw(DimensionMismatch(lazy"second dimensions of B, $nrhs, and X, $(size(X,2)), must match")) - end - work = Vector{$elty}(undef, 2n) - rwork = Vector{$relty}(undef, n) - info = Ref{BlasInt}() - ccall((@blasfunc($trrfs), libblastrampoline), Cvoid, - (Ref{UInt8}, Ref{UInt8}, Ref{UInt8}, Ref{BlasInt}, - Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, - Ptr{$relty}, Ptr{$relty}, Ptr{$elty}, Ptr{$relty}, Ref{BlasInt}, Clong, Clong, Clong), - uplo, trans, diag, n, - nrhs, A, max(1,stride(A,2)), B, max(1,stride(B,2)), X, max(1,stride(X,2)), - Ferr, Berr, work, rwork, info, 1, 1, 1) - chklapackerror(info[]) - Ferr, Berr - end - end -end - -""" - trcon!(norm, uplo, diag, A) - -Finds the reciprocal condition number of (upper if `uplo = U`, lower if -`uplo = L`) triangular matrix `A`. If `diag = N`, `A` has non-unit -diagonal elements. If `diag = U`, all diagonal elements of `A` are one. -If `norm = I`, the condition number is found in the infinity norm. If -`norm = O` or `1`, the condition number is found in the one norm. -""" -trcon!(norm::AbstractChar, uplo::AbstractChar, diag::AbstractChar, A::AbstractMatrix) - -""" - trevc!(side, howmny, select, T, VL = similar(T), VR = similar(T)) - -Finds the eigensystem of an upper triangular matrix `T`. If `side = R`, -the right eigenvectors are computed. If `side = L`, the left -eigenvectors are computed. If `side = B`, both sets are computed. If -`howmny = A`, all eigenvectors are found. If `howmny = B`, all -eigenvectors are found and backtransformed using `VL` and `VR`. If -`howmny = S`, only the eigenvectors corresponding to the values in -`select` are computed. -""" -trevc!(side::AbstractChar, howmny::AbstractChar, select::AbstractVector{BlasInt}, T::AbstractMatrix, - VL::AbstractMatrix = similar(T), VR::AbstractMatrix = similar(T)) - -""" - trrfs!(uplo, trans, diag, A, B, X, Ferr, Berr) -> (Ferr, Berr) - -Estimates the error in the solution to `A * X = B` (`trans = N`), -`transpose(A) * X = B` (`trans = T`), `adjoint(A) * X = B` (`trans = C`) for `side = L`, -or the equivalent equations a right-handed `side = R` `X * A` after -computing `X` using `trtrs!`. If `uplo = U`, `A` is upper triangular. -If `uplo = L`, `A` is lower triangular. If `diag = N`, `A` has non-unit -diagonal elements. If `diag = U`, all diagonal elements of `A` are one. -`Ferr` and `Berr` are optional inputs. `Ferr` is the forward error and -`Berr` is the backward error, each component-wise. -""" -trrfs!(uplo::AbstractChar, trans::AbstractChar, diag::AbstractChar, A::AbstractMatrix, B::AbstractVecOrMat, - X::AbstractVecOrMat, Ferr::AbstractVector, Berr::AbstractVector) - -## (ST) Symmetric tridiagonal - eigendecomposition -for (stev, stebz, stegr, stein, elty) in - ((:dstev_,:dstebz_,:dstegr_,:dstein_,:Float64), - (:sstev_,:sstebz_,:sstegr_,:sstein_,:Float32) -# , (:zstev_,:ComplexF64) Need to rewrite for ZHEEV, rwork, etc. -# , (:cstev_,:ComplexF32) - ) - @eval begin - function stev!(job::AbstractChar, dv::AbstractVector{$elty}, ev::AbstractVector{$elty}) - require_one_based_indexing(dv, ev) - @chkvalidparam 1 job ('N', 'V') - chkstride1(dv, ev) - n = length(dv) - if length(ev) != n - 1 && length(ev) != n - throw(DimensionMismatch(lazy"ev has length $(length(ev)) but needs one less than or equal to dv's length, $n)")) - end - Zmat = similar(dv, $elty, (n, job != 'N' ? n : 0)) - work = Vector{$elty}(undef, max(1, 2n-2)) - info = Ref{BlasInt}() - ccall((@blasfunc($stev), libblastrampoline), Cvoid, - (Ref{UInt8}, Ref{BlasInt}, Ptr{$elty}, Ptr{$elty}, Ptr{$elty}, - Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Clong), - job, n, dv, ev, Zmat, n, work, info, 1) - chklapackerror(info[]) - dv, Zmat - end - - #* DSTEBZ computes the eigenvalues of a symmetric tridiagonal - #* matrix T. The user may ask for all eigenvalues, all eigenvalues - #* in the half-open interval (VL, VU], or the IL-th through IU-th - #* eigenvalues. - function stebz!(range::AbstractChar, order::AbstractChar, vl::$elty, vu::$elty, il::Integer, iu::Integer, abstol::Real, dv::AbstractVector{$elty}, ev::AbstractVector{$elty}) - require_one_based_indexing(dv, ev) - @chkvalidparam 1 range ('A', 'V', 'I') - @chkvalidparam 2 order ('B', 'E') - chkstride1(dv, ev) - n = length(dv) - if length(ev) != n - 1 - throw(DimensionMismatch(lazy"ev has length $(length(ev)) but needs one less than dv's length, $n)")) - end - m = Ref{BlasInt}() - nsplit = Vector{BlasInt}(undef, 1) - w = similar(dv, $elty, n) - tmp = 0.0 - iblock = similar(dv, BlasInt,n) - isplit = similar(dv, BlasInt,n) - work = Vector{$elty}(undef, 4*n) - iwork = Vector{BlasInt}(undef, 3*n) - info = Ref{BlasInt}() - ccall((@blasfunc($stebz), libblastrampoline), Cvoid, - (Ref{UInt8}, Ref{UInt8}, Ref{BlasInt}, Ref{$elty}, - Ref{$elty}, Ref{BlasInt}, Ref{BlasInt}, Ref{$elty}, - Ptr{$elty}, Ptr{$elty}, Ptr{BlasInt}, Ptr{BlasInt}, - Ptr{$elty}, Ptr{BlasInt}, Ptr{BlasInt}, Ptr{$elty}, - Ptr{BlasInt}, Ref{BlasInt}, Clong, Clong), - range, order, n, vl, - vu, il, iu, abstol, - dv, ev, m, nsplit, - w, iblock, isplit, work, - iwork, info, 1, 1) - chklapackerror(info[]) - w[1:m[]], iblock[1:m[]], isplit[1:nsplit[1]] - end - - function stegr!(jobz::AbstractChar, range::AbstractChar, dv::AbstractVector{$elty}, ev::AbstractVector{$elty}, vl::Real, vu::Real, il::Integer, iu::Integer) - require_one_based_indexing(dv, ev) - @chkvalidparam 1 jobz ('N', 'V') - @chkvalidparam 2 range ('A', 'V', 'I') - chkstride1(dv, ev) - n = length(dv) - ne = length(ev) - if ne == n - 1 - eev = [ev; zero($elty)] - elseif ne == n - eev = copy(ev) - eev[n] = zero($elty) - else - throw(DimensionMismatch(lazy"ev has length $ne but needs one less than or equal to dv's length, $n)")) - end - - abstol = Vector{$elty}(undef, 1) - m = Ref{BlasInt}() - w = similar(dv, $elty, n) - ldz = jobz == 'N' ? 1 : n - Z = similar(dv, $elty, ldz, range == 'I' ? iu-il+1 : n) - isuppz = similar(dv, BlasInt, 2*size(Z, 2)) - work = Vector{$elty}(undef, 1) - lwork = BlasInt(-1) - iwork = Vector{BlasInt}(undef, 1) - liwork = BlasInt(-1) - info = Ref{BlasInt}() - for i = 1:2 # first call returns lwork as work[1] and liwork as iwork[1] - ccall((@blasfunc($stegr), libblastrampoline), Cvoid, - (Ref{UInt8}, Ref{UInt8}, Ref{BlasInt}, Ptr{$elty}, - Ptr{$elty}, Ref{$elty}, Ref{$elty}, Ref{BlasInt}, - Ref{BlasInt}, Ptr{$elty}, Ptr{BlasInt}, Ptr{$elty}, - Ptr{$elty}, Ref{BlasInt}, Ptr{BlasInt}, Ptr{$elty}, - Ref{BlasInt}, Ptr{BlasInt}, Ref{BlasInt}, Ptr{BlasInt}, - Clong, Clong), - jobz, range, n, dv, - eev, vl, vu, il, - iu, abstol, m, w, - Z, ldz, isuppz, work, - lwork, iwork, liwork, info, - 1, 1) - chklapackerror(info[]) - if i == 1 - lwork = BlasInt(work[1]) - resize!(work, lwork) - liwork = iwork[1] - resize!(iwork, liwork) - end - end - m[] == length(w) ? w : w[1:m[]], m[] == size(Z, 2) ? Z : Z[:,1:m[]] - end - - function stein!(dv::AbstractVector{$elty}, ev_in::AbstractVector{$elty}, w_in::AbstractVector{$elty}, iblock_in::AbstractVector{BlasInt}, isplit_in::AbstractVector{BlasInt}) - require_one_based_indexing(dv, ev_in, w_in, iblock_in, isplit_in) - chkstride1(dv, ev_in, w_in, iblock_in, isplit_in) - n = length(dv) - ne = length(ev_in) - if ne == n - 1 - ev = [ev_in; zero($elty)] - elseif ne == n - ev = copy(ev_in) - ev[n] = zero($elty) - else - throw(DimensionMismatch(lazy"ev_in has length $ne but needs one less than or equal to dv's length, $n)")) - end - ldz = n #Leading dimension - #Number of eigenvalues to find - if !(1 <= length(w_in) <= n) - throw(DimensionMismatch(lazy"w_in has length $(length(w_in)), but needs to be between 1 and $n")) - end - m = length(w_in) - #If iblock and isplit are invalid input, assume worst-case block partitioning, - # i.e. set the block scheme to be the entire matrix - iblock = similar(dv, BlasInt,n) - isplit = similar(dv, BlasInt,n) - w = similar(dv, $elty,n) - if length(iblock_in) < m #Not enough block specifications - iblock[1:m] = fill(BlasInt(1), m) - w[1:m] = sort(w_in) - else - iblock[1:m] = iblock_in - w[1:m] = w_in #Assume user has sorted the eigenvalues properly - end - if length(isplit_in) < 1 #Not enough block specifications - isplit[1] = n - else - isplit[1:length(isplit_in)] = isplit_in - end - z = similar(dv, $elty,(n,m)) - work = Vector{$elty}(undef, 5*n) - iwork = Vector{BlasInt}(undef, n) - ifail = Vector{BlasInt}(undef, m) - info = Ref{BlasInt}() - ccall((@blasfunc($stein), libblastrampoline), Cvoid, - (Ref{BlasInt}, Ptr{$elty}, Ptr{$elty}, Ref{BlasInt}, - Ptr{$elty}, Ptr{BlasInt}, Ptr{BlasInt}, Ptr{$elty}, - Ref{BlasInt}, Ptr{$elty}, Ptr{BlasInt}, Ptr{BlasInt}, - Ptr{BlasInt}), - n, dv, ev, m, w, iblock, isplit, z, ldz, work, iwork, ifail, info) - chklapackerror(info[]) - if any(ifail .!= 0) - # TODO: better error message / type - error("failed to converge eigenvectors:\n$(findall(!iszero, ifail))") - end - z - end - end -end -stegr!(jobz::AbstractChar, dv::AbstractVector, ev::AbstractVector) = stegr!(jobz, 'A', dv, ev, 0.0, 0.0, 0, 0) - -# Allow user to skip specification of iblock and isplit -stein!(dv::AbstractVector, ev::AbstractVector, w_in::AbstractVector) = stein!(dv, ev, w_in, zeros(BlasInt,0), zeros(BlasInt,0)) -# Allow user to specify just one eigenvector to get in stein! -stein!(dv::AbstractVector, ev::AbstractVector, eval::Real) = stein!(dv, ev, [eval], zeros(BlasInt,0), zeros(BlasInt,0)) - -""" - stev!(job, dv, ev) -> (dv, Zmat) - -Computes the eigensystem for a symmetric tridiagonal matrix with `dv` as -diagonal and `ev` as off-diagonal. If `job = N` only the eigenvalues are -found and returned in `dv`. If `job = V` then the eigenvectors are also found -and returned in `Zmat`. -""" -stev!(job::AbstractChar, dv::AbstractVector, ev::AbstractVector) - -""" - stebz!(range, order, vl, vu, il, iu, abstol, dv, ev) -> (dv, iblock, isplit) - -Computes the eigenvalues for a symmetric tridiagonal matrix with `dv` as -diagonal and `ev` as off-diagonal. If `range = A`, all the eigenvalues -are found. If `range = V`, the eigenvalues in the half-open interval -`(vl, vu]` are found. If `range = I`, the eigenvalues with indices between -`il` and `iu` are found. If `order = B`, eigvalues are ordered within a -block. If `order = E`, they are ordered across all the blocks. -`abstol` can be set as a tolerance for convergence. -""" -stebz!(range::AbstractChar, order::AbstractChar, vl, vu, il::Integer, iu::Integer, abstol::Real, dv::AbstractVector, ev::AbstractVector) - -""" - stegr!(jobz, range, dv, ev, vl, vu, il, iu) -> (w, Z) - -Computes the eigenvalues (`jobz = N`) or eigenvalues and eigenvectors -(`jobz = V`) for a symmetric tridiagonal matrix with `dv` as diagonal -and `ev` as off-diagonal. If `range = A`, all the eigenvalues -are found. If `range = V`, the eigenvalues in the half-open interval -`(vl, vu]` are found. If `range = I`, the eigenvalues with indices between -`il` and `iu` are found. The eigenvalues are returned in `w` and the eigenvectors -in `Z`. -""" -stegr!(jobz::AbstractChar, range::AbstractChar, dv::AbstractVector, ev::AbstractVector, vl::Real, vu::Real, il::Integer, iu::Integer) - -""" - stein!(dv, ev_in, w_in, iblock_in, isplit_in) - -Computes the eigenvectors for a symmetric tridiagonal matrix with `dv` -as diagonal and `ev_in` as off-diagonal. `w_in` specifies the input -eigenvalues for which to find corresponding eigenvectors. `iblock_in` -specifies the submatrices corresponding to the eigenvalues in `w_in`. -`isplit_in` specifies the splitting points between the submatrix blocks. -""" -stein!(dv::AbstractVector, ev_in::AbstractVector, w_in::AbstractVector, iblock_in::AbstractVector{BlasInt}, isplit_in::AbstractVector{BlasInt}) - -## (SY) symmetric real matrices - Bunch-Kaufman decomposition, -## solvers (direct and factored) and inverse. -for (syconv, sysv, sytrf, sytri, sytrs, elty) in - ((:dsyconv_,:dsysv_,:dsytrf_,:dsytri_,:dsytrs_,:Float64), - (:ssyconv_,:ssysv_,:ssytrf_,:ssytri_,:ssytrs_,:Float32)) - @eval begin - # SUBROUTINE DSYCONV( UPLO, WAY, N, A, LDA, IPIV, WORK, INFO ) - # * .. Scalar Arguments .. - # CHARACTER UPLO, WAY - # INTEGER INFO, LDA, N - # * .. Array Arguments .. - # INTEGER IPIV( * ) - # DOUBLE PRECISION A( LDA, * ), WORK( * ) - function syconv!(uplo::AbstractChar, A::AbstractMatrix{$elty}, ipiv::AbstractVector{BlasInt}) - require_one_based_indexing(A, ipiv) - chkstride1(A, ipiv) - n = checksquare(A) - chkuplo(uplo) - work = Vector{$elty}(undef, n) - info = Ref{BlasInt}() - ccall((@blasfunc($syconv), libblastrampoline), Cvoid, - (Ref{UInt8}, Ref{UInt8}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, - Ptr{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Clong, Clong), - uplo, 'C', n, A, max(1,stride(A,2)), ipiv, work, info, 1, 1) - chklapackerror(info[]) - A, work - end - - # SUBROUTINE DSYSV( UPLO, N, NRHS, A, LDA, IPIV, B, LDB, WORK, - # LWORK, INFO ) - # .. Scalar Arguments .. - # CHARACTER UPLO - # INTEGER INFO, LDA, LDB, LWORK, N, NRHS - # .. Array Arguments .. - # INTEGER IPIV( * ) - # DOUBLE PRECISION A( LDA, * ), B( LDB, * ), WORK( * ) - function sysv!(uplo::AbstractChar, A::AbstractMatrix{$elty}, B::AbstractVecOrMat{$elty}) - require_one_based_indexing(A, B) - chkstride1(A,B) - n = checksquare(A) - chkuplo(uplo) - if n != size(B,1) - throw(DimensionMismatch(lazy"B has first dimension $(size(B,1)), but needs $n")) - end - ipiv = similar(A, BlasInt, n) - work = Vector{$elty}(undef, 1) - lwork = BlasInt(-1) - info = Ref{BlasInt}() - for i = 1:2 # first call returns lwork as work[1] - ccall((@blasfunc($sysv), libblastrampoline), Cvoid, - (Ref{UInt8}, Ref{BlasInt}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ptr{BlasInt}, - Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ref{BlasInt}, Clong), - uplo, n, size(B,2), A, max(1,stride(A,2)), ipiv, B, max(1,stride(B,2)), - work, lwork, info, 1) - chkargsok(info[]) - chknonsingular(info[]) - if i == 1 - lwork = BlasInt(real(work[1])) - resize!(work, lwork) - end - end - B, A, ipiv - end - - # SUBROUTINE DSYTRF( UPLO, N, A, LDA, IPIV, WORK, LWORK, INFO ) - # * .. Scalar Arguments .. - # CHARACTER UPLO - # INTEGER INFO, LDA, LWORK, N - # * .. Array Arguments .. - # INTEGER IPIV( * ) - # DOUBLE PRECISION A( LDA, * ), WORK( * ) - function sytrf!(uplo::AbstractChar, A::AbstractMatrix{$elty}, ipiv::AbstractVector{BlasInt}) - require_one_based_indexing(A) - chkstride1(A) - n = checksquare(A) - chkuplo(uplo) - if n == 0 - return A, ipiv, zero(BlasInt) - end - work = Vector{$elty}(undef, 1) - lwork = BlasInt(-1) - info = Ref{BlasInt}() - for i = 1:2 # first call returns lwork as work[1] - ccall((@blasfunc($sytrf), libblastrampoline), Cvoid, - (Ref{UInt8}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, - Ptr{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ref{BlasInt}, Clong), - uplo, n, A, stride(A,2), ipiv, work, lwork, info, 1) - chkargsok(info[]) - if i == 1 - lwork = BlasInt(real(work[1])) - resize!(work, lwork) - end - end - return A, ipiv, info[] - end - - function sytrf!(uplo::AbstractChar, A::AbstractMatrix{$elty}) - require_one_based_indexing(A) - chkuplo(uplo) - n = checksquare(A) - ipiv = similar(A, BlasInt, n) - sytrf!(uplo, A, ipiv) - end - - # SUBROUTINE DSYTRI2( UPLO, N, A, LDA, IPIV, WORK, LWORK, INFO ) - # * .. Scalar Arguments .. - # CHARACTER UPLO - # INTEGER INFO, LDA, LWORK, N - # * .. Array Arguments .. - # INTEGER IPIV( * ) - # DOUBLE PRECISION A( LDA, * ), WORK( * ) -# function sytri!(uplo::AbstractChar, A::AbstractMatrix{$elty}, ipiv::Vector{BlasInt}) -# chkstride1(A) -# n = checksquare(A) -# chkuplo(uplo) -# work = Vector{$elty}(undef, 1) -# lwork = BlasInt(-1) -# info = Ref{BlasInt}() -# for i in 1:2 -# ccall((@blasfunc($sytri), libblastrampoline), Cvoid, -# (Ptr{UInt8}, Ptr{BlasInt}, Ptr{$elty}, Ptr{BlasInt}, -# Ptr{BlasInt}, Ptr{$elty}, Ptr{BlasInt}, Ref{BlasInt}, Clong), -# &uplo, &n, A, &max(1,stride(A,2)), ipiv, work, &lwork, info, 1) -# @assertargsok -# chknonsingular(info[]) -# if lwork < 0 -# lwork = BlasInt(real(work[1])) -# work = Vector{$elty}(undef, lwork) -# end -# end -# A -# end - - # SUBROUTINE DSYTRI( UPLO, N, A, LDA, IPIV, WORK, INFO ) - # .. Scalar Arguments .. - # CHARACTER UPLO - # INTEGER INFO, LDA, N - # .. Array Arguments .. - # INTEGER IPIV( * ) - # DOUBLE PRECISION A( LDA, * ), WORK( * ) - function sytri!(uplo::AbstractChar, A::AbstractMatrix{$elty}, ipiv::AbstractVector{BlasInt}) - require_one_based_indexing(A, ipiv) - chkstride1(A, ipiv) - n = checksquare(A) - chkuplo(uplo) - work = Vector{$elty}(undef, n) - info = Ref{BlasInt}() - ccall((@blasfunc($sytri), libblastrampoline), Cvoid, - (Ref{UInt8}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, - Ptr{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Clong), - uplo, n, A, max(1,stride(A,2)), ipiv, work, info, 1) - chkargsok(info[]) - chknonsingular(info[]) - A - end - - # SUBROUTINE DSYTRS( UPLO, N, NRHS, A, LDA, IPIV, B, LDB, INFO ) - # - # .. Scalar Arguments .. - # CHARACTER UPLO - # INTEGER INFO, LDA, LDB, N, NRHS - # .. Array Arguments .. - # INTEGER IPIV( * ) - # DOUBLE PRECISION A( LDA, * ), B( LDB, * ) - function sytrs!(uplo::AbstractChar, A::AbstractMatrix{$elty}, - ipiv::AbstractVector{BlasInt}, B::AbstractVecOrMat{$elty}) - require_one_based_indexing(A, ipiv, B) - chkstride1(A,B,ipiv) - n = checksquare(A) - chkuplo(uplo) - if n != size(B,1) - throw(DimensionMismatch(lazy"B has first dimension $(size(B,1)), but needs $n")) - end - info = Ref{BlasInt}() - ccall((@blasfunc($sytrs), libblastrampoline), Cvoid, - (Ref{UInt8}, Ref{BlasInt}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, - Ptr{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ref{BlasInt}, Clong), - uplo, n, size(B,2), A, max(1,stride(A,2)), ipiv, B, max(1,stride(B,2)), info, 1) - chklapackerror(info[]) - B - end - end -end - -# Rook-pivoting variants of symmetric-matrix algorithms -for (sysv, sytrf, sytri, sytrs, syconvf, elty) in - ((:dsysv_rook_,:dsytrf_rook_,:dsytri_rook_,:dsytrs_rook_,:dsyconvf_rook_,:Float64), - (:ssysv_rook_,:ssytrf_rook_,:ssytri_rook_,:ssytrs_rook_,:ssyconvf_rook_,:Float32)) - @eval begin - # SUBROUTINE DSYSV_ROOK(UPLO, N, NRHS, A, LDA, IPIV, B, LDB, WORK, - # LWORK, INFO ) - # .. Scalar Arguments .. - # CHARACTER UPLO - # INTEGER INFO, LDA, LDB, LWORK, N, NRHS - # .. Array Arguments .. - # INTEGER IPIV( * ) - # DOUBLE PRECISION A( LDA, * ), B( LDB, * ), WORK( * ) - function sysv_rook!(uplo::AbstractChar, A::AbstractMatrix{$elty}, B::AbstractVecOrMat{$elty}) - require_one_based_indexing(A, B) - chkstride1(A,B) - n = checksquare(A) - chkuplo(uplo) - if n != size(B,1) - throw(DimensionMismatch(lazy"B has first dimension $(size(B,1)), but needs $n")) - end - ipiv = similar(A, BlasInt, n) - work = Vector{$elty}(undef, 1) - lwork = BlasInt(-1) - info = Ref{BlasInt}() - for i = 1:2 # first call returns lwork as work[1] - ccall((@blasfunc($sysv), libblastrampoline), Cvoid, - (Ref{UInt8}, Ref{BlasInt}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ptr{BlasInt}, - Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ref{BlasInt}, Clong), - uplo, n, size(B,2), A, max(1,stride(A,2)), ipiv, B, max(1,stride(B,2)), - work, lwork, info, 1) - chkargsok(info[]) - chknonsingular(info[]) - if i == 1 - lwork = BlasInt(real(work[1])) - resize!(work, lwork) - end - end - B, A, ipiv - end - - # SUBROUTINE DSYTRF_ROOK(UPLO, N, A, LDA, IPIV, WORK, LWORK, INFO ) - # * .. Scalar Arguments .. - # CHARACTER UPLO - # INTEGER INFO, LDA, LWORK, N - # * .. Array Arguments .. - # INTEGER IPIV( * ) - # DOUBLE PRECISION A( LDA, * ), WORK( * ) - function sytrf_rook!(uplo::AbstractChar, A::AbstractMatrix{$elty}) - require_one_based_indexing(A) - chkstride1(A) - n = checksquare(A) - chkuplo(uplo) - ipiv = similar(A, BlasInt, n) - if n == 0 - return A, ipiv, zero(BlasInt) - end - work = Vector{$elty}(undef, 1) - lwork = BlasInt(-1) - info = Ref{BlasInt}() - for i = 1:2 # first call returns lwork as work[1] - ccall((@blasfunc($sytrf), libblastrampoline), Cvoid, - (Ref{UInt8}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, - Ptr{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ref{BlasInt}, Clong), - uplo, n, A, stride(A,2), ipiv, work, lwork, info, 1) - chkargsok(info[]) - if i == 1 - lwork = BlasInt(real(work[1])) - resize!(work, lwork) - end - end - return A, ipiv, info[] - end - - # SUBROUTINE DSYTRI_ROOK( UPLO, N, A, LDA, IPIV, WORK, INFO ) - # .. Scalar Arguments .. - # CHARACTER UPLO - # INTEGER INFO, LDA, N - # .. Array Arguments .. - # INTEGER IPIV( * ) - # DOUBLE PRECISION A( LDA, * ), WORK( * ) - function sytri_rook!(uplo::AbstractChar, A::AbstractMatrix{$elty}, ipiv::AbstractVector{BlasInt}) - require_one_based_indexing(A, ipiv) - chkstride1(A, ipiv) - n = checksquare(A) - chkuplo(uplo) - work = Vector{$elty}(undef, n) - info = Ref{BlasInt}() - ccall((@blasfunc($sytri), libblastrampoline), Cvoid, - (Ref{UInt8}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, - Ptr{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Clong), - uplo, n, A, max(1,stride(A,2)), ipiv, work, info, 1) - chkargsok(info[]) - chknonsingular(info[]) - A - end - - # SUBROUTINE DSYTRS_ROOK( UPLO, N, NRHS, A, LDA, IPIV, B, LDB, INFO ) - # - # .. Scalar Arguments .. - # CHARACTER UPLO - # INTEGER INFO, LDA, LDB, N, NRHS - # .. Array Arguments .. - # INTEGER IPIV( * ) - # DOUBLE PRECISION A( LDA, * ), B( LDB, * ) - function sytrs_rook!(uplo::AbstractChar, A::AbstractMatrix{$elty}, - ipiv::AbstractVector{BlasInt}, B::AbstractVecOrMat{$elty}) - require_one_based_indexing(A, ipiv, B) - chkstride1(A,B,ipiv) - n = checksquare(A) - chkuplo(uplo) - if n != size(B,1) - throw(DimensionMismatch(lazy"B has first dimension $(size(B,1)), but needs $n")) - end - info = Ref{BlasInt}() - ccall((@blasfunc($sytrs), libblastrampoline), Cvoid, - (Ref{UInt8}, Ref{BlasInt}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, - Ptr{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ref{BlasInt}, Clong), - uplo, n, size(B,2), A, max(1,stride(A,2)), ipiv, B, max(1,stride(B,2)), info, 1) - chklapackerror(info[]) - B - end - - # SUBROUTINE DSYCONVF_ROOK( UPLO, WAY, N, A, LDA, IPIV, E, INFO ) - # - # .. Scalar Arguments .. - # CHARACTER UPLO, WAY - # INTEGER INFO, LDA, N - # .. - # .. Array Arguments .. - # INTEGER IPIV( * ) - # DOUBLE PRECISION A( LDA, * ), E( * ) - function syconvf_rook!(uplo::AbstractChar, way::AbstractChar, - A::AbstractMatrix{$elty}, ipiv::AbstractVector{BlasInt}, - e::AbstractVector{$elty} = Vector{$elty}(undef, length(ipiv))) - require_one_based_indexing(A, ipiv, e) - # extract - n = checksquare(A) - lda = max(1, stride(A, 2)) - - # check - chkuplo(uplo) - if way != 'C' && way != 'R' - throw(ArgumentError("way must be C or R")) - end - if length(ipiv) != n - throw(ArgumentError(lazy"length of pivot vector was $(length(ipiv)) but should have been $n")) - end - if length(e) != n - throw(ArgumentError(lazy"length of e vector was $(length(e)) but should have been $n")) - end - - # allocate - info = Ref{BlasInt}() - - ccall((@blasfunc($syconvf), libblastrampoline), Cvoid, - (Ref{UInt8}, Ref{UInt8}, Ref{BlasInt}, Ptr{$elty}, - Ref{BlasInt}, Ptr{$elty}, Ptr{BlasInt}, Ptr{BlasInt}, - Clong, Clong), - uplo, way, n, A, - lda, e, ipiv, info, - 1, 1) - - chklapackerror(info[]) - return A, e - end - end -end - -## (SY) hermitian matrices - eigendecomposition, Bunch-Kaufman decomposition, -## solvers (direct and factored) and inverse. -for (syconv, hesv, hetrf, hetri, hetrs, elty, relty) in - ((:zsyconv_,:zhesv_,:zhetrf_,:zhetri_,:zhetrs_,:ComplexF64, :Float64), - (:csyconv_,:chesv_,:chetrf_,:chetri_,:chetrs_,:ComplexF32, :Float32)) - @eval begin - # SUBROUTINE ZSYCONV( UPLO, WAY, N, A, LDA, IPIV, WORK, INFO ) - # - # .. Scalar Arguments .. - # CHARACTER UPLO, WAY - # INTEGER INFO, LDA, N - # .. - # .. Array Arguments .. - # INTEGER IPIV( * ) - # COMPLEX*16 A( LDA, * ), WORK( * ) - function syconv!(uplo::AbstractChar, A::AbstractMatrix{$elty}, ipiv::AbstractVector{BlasInt}) - require_one_based_indexing(A,ipiv) - chkstride1(A,ipiv) - n = checksquare(A) - chkuplo(uplo) - work = Vector{$elty}(undef, n) - info = Ref{BlasInt}() - ccall((@blasfunc($syconv), libblastrampoline), Cvoid, - (Ref{UInt8}, Ref{UInt8}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, - Ptr{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Clong, Clong), - uplo, 'C', n, A, max(1,stride(A,2)), ipiv, work, info, 1, 1) - chklapackerror(info[]) - A, work - end - - # SUBROUTINE ZHESV( UPLO, N, NRHS, A, LDA, IPIV, B, LDB, WORK, - # * .. Scalar Arguments .. - # CHARACTER UPLO - # INTEGER INFO, LDA, LDB, LWORK, N, NRHS - # * .. - # * .. Array Arguments .. - # INTEGER IPIV( * ) - # COMPLEX*16 A( LDA, * ), B( LDB, * ), WORK( * ) - function hesv!(uplo::AbstractChar, A::AbstractMatrix{$elty}, B::AbstractVecOrMat{$elty}) - require_one_based_indexing(A, B) - chkstride1(A,B) - n = checksquare(A) - chkuplo(uplo) - if n != size(B,1) - throw(DimensionMismatch(lazy"B has first dimension $(size(B,1)), but needs $n")) - end - ipiv = similar(A, BlasInt, n) - work = Vector{$elty}(undef, 1) - lwork = BlasInt(-1) - info = Ref{BlasInt}() - for i = 1:2 # first call returns lwork as work[1] - ccall((@blasfunc($hesv), libblastrampoline), Cvoid, - (Ref{UInt8}, Ref{BlasInt}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ptr{BlasInt}, - Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ref{BlasInt}, Clong), - uplo, n, size(B,2), A, max(1,stride(A,2)), ipiv, B, max(1,stride(B,2)), - work, lwork, info, 1) - chklapackerror(info[]) - if i == 1 - lwork = BlasInt(real(work[1])) - resize!(work, lwork) - end - end - B, A, ipiv - end - - # SUBROUTINE ZHETRF( UPLO, N, A, LDA, IPIV, WORK, LWORK, INFO ) - # * .. Scalar Arguments .. - # CHARACTER UPLO - # INTEGER INFO, LDA, LWORK, N - # * .. - # * .. Array Arguments .. - # INTEGER IPIV( * ) - # COMPLEX*16 A( LDA, * ), WORK( * ) - function hetrf!(uplo::AbstractChar, A::AbstractMatrix{$elty}, ipiv::AbstractVector{BlasInt}) - require_one_based_indexing(A) - chkstride1(A) - n = checksquare(A) - chkuplo(uplo) - work = Vector{$elty}(undef, 1) - lwork = BlasInt(-1) - info = Ref{BlasInt}() - for i in 1:2 # first call returns lwork as work[1] - ccall((@blasfunc($hetrf), libblastrampoline), Cvoid, - (Ref{UInt8}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, - Ptr{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ref{BlasInt}, Clong), - uplo, n, A, max(1,stride(A,2)), ipiv, work, lwork, info, 1) - chkargsok(info[]) - if i == 1 - lwork = BlasInt(real(work[1])) - resize!(work, lwork) - end - end - A, ipiv, info[] - end - - function hetrf!(uplo::AbstractChar, A::AbstractMatrix{$elty}) - require_one_based_indexing(A) - chkuplo(uplo) - n = checksquare(A) - ipiv = similar(A, BlasInt, n) - hetrf!(uplo, A, ipiv) - end - -# SUBROUTINE ZHETRI2( UPLO, N, A, LDA, IPIV, WORK, LWORK, INFO ) -# * .. Scalar Arguments .. -# CHARACTER UPLO -# INTEGER INFO, LDA, LWORK, N -# * .. -# * .. Array Arguments .. -# INTEGER IPIV( * ) -# COMPLEX*16 A( LDA, * ), WORK( * ) -# function hetri!(uplo::AbstractChar, A::AbstractMatrix{$elty}, ipiv::Vector{BlasInt}) -# chkstride1(A) -# n = checksquare(A) -# chkuplo(uplo) -# work = Vector{$elty}(undef, 1) -# lwork = BlasInt(-1) -# info = Ref{BlasInt}() -# for i in 1:2 -# ccall((@blasfunc($hetri), libblastrampoline), Cvoid, -# (Ptr{UInt8}, Ptr{BlasInt}, Ptr{$elty}, Ptr{BlasInt}, -# Ptr{BlasInt}, Ptr{$elty}, Ptr{BlasInt}, Ref{BlasInt}, Clong), -# &uplo, &n, A, &max(1,stride(A,2)), ipiv, work, &lwork, info, 1) -# chklapackerror(info[]) -# if lwork < 0 -# lwork = BlasInt(real(work[1])) -# work = Vector{$elty}(undef, lwork) -# end -# end -# A -# end - - - # SUBROUTINE ZHETRI( UPLO, N, A, LDA, IPIV, WORK, INFO ) - # * .. Scalar Arguments .. - # CHARACTER UPLO - # INTEGER INFO, LDA, N - # * .. - # * .. Array Arguments .. - # INTEGER IPIV( * ) - # COMPLEX*16 A( LDA, * ), WORK( * ) - function hetri!(uplo::AbstractChar, A::AbstractMatrix{$elty}, ipiv::AbstractVector{BlasInt}) - require_one_based_indexing(A, ipiv) - chkstride1(A, ipiv) - n = checksquare(A) - chkuplo(uplo) - work = Vector{$elty}(undef, n) - info = Ref{BlasInt}() - ccall((@blasfunc($hetri), libblastrampoline), Cvoid, - (Ref{UInt8}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, - Ptr{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Clong), - uplo, n, A, max(1,stride(A,2)), ipiv, work, info, 1) - chklapackerror(info[]) - A - end - - # SUBROUTINE ZHETRS( UPLO, N, NRHS, A, LDA, IPIV, B, LDB, INFO ) - # * .. Scalar Arguments .. - # CHARACTER UPLO - # INTEGER INFO, LDA, LDB, N, NRHS - # * .. - # * .. Array Arguments .. - # INTEGER IPIV( * ) - # COMPLEX*16 A( LDA, * ), B( LDB, * ) - function hetrs!(uplo::AbstractChar, A::AbstractMatrix{$elty}, - ipiv::AbstractVector{BlasInt}, B::AbstractVecOrMat{$elty}) - require_one_based_indexing(A, ipiv, B) - chkuplo(uplo) - chkstride1(A,B,ipiv) - n = checksquare(A) - if n != size(B,1) - throw(DimensionMismatch(lazy"B has first dimension $(size(B,1)), but needs $n")) - end - info = Ref{BlasInt}() - ccall((@blasfunc($hetrs), libblastrampoline), Cvoid, - (Ref{UInt8}, Ref{BlasInt}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, - Ptr{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ref{BlasInt}, Clong), - uplo, n, size(B,2), A, max(1,stride(A,2)), ipiv, B, max(1,stride(B,2)), info, 1) - chklapackerror(info[]) - B - end - end -end - -for (hesv, hetrf, hetri, hetrs, elty, relty) in - ((:zhesv_rook_,:zhetrf_rook_,:zhetri_rook_,:zhetrs_rook_,:ComplexF64, :Float64), - (:chesv_rook_,:chetrf_rook_,:chetri_rook_,:chetrs_rook_,:ComplexF32, :Float32)) - @eval begin - # SUBROUTINE ZHESV_ROOK( UPLO, N, NRHS, A, LDA, IPIV, B, LDB, WORK, - # * .. Scalar Arguments .. - # CHARACTER UPLO - # INTEGER INFO, LDA, LDB, LWORK, N, NRHS - # * .. - # * .. Array Arguments .. - # INTEGER IPIV( * ) - # COMPLEX*16 A( LDA, * ), B( LDB, * ), WORK( * ) - function hesv_rook!(uplo::AbstractChar, A::AbstractMatrix{$elty}, B::AbstractVecOrMat{$elty}) - require_one_based_indexing(A, B) - chkstride1(A,B) - n = checksquare(A) - chkuplo(uplo) - if n != size(B,1) - throw(DimensionMismatch(lazy"B has first dimension $(size(B,1)), but needs $n")) - end - ipiv = similar(A, BlasInt, n) - work = Vector{$elty}(undef, 1) - lwork = BlasInt(-1) - info = Ref{BlasInt}() - for i = 1:2 # first call returns lwork as work[1] - ccall((@blasfunc($hesv), libblastrampoline), Cvoid, - (Ref{UInt8}, Ref{BlasInt}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ptr{BlasInt}, - Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ref{BlasInt}, Clong), - uplo, n, size(B,2), A, max(1,stride(A,2)), ipiv, B, max(1,stride(B,2)), - work, lwork, info, 1) - chklapackerror(info[]) - if i == 1 - lwork = BlasInt(real(work[1])) - resize!(work, lwork) - end - end - B, A, ipiv - end - - # SUBROUTINE ZHETRF_ROOK( UPLO, N, A, LDA, IPIV, WORK, LWORK, INFO ) - # * .. Scalar Arguments .. - # CHARACTER UPLO - # INTEGER INFO, LDA, LWORK, N - # * .. - # * .. Array Arguments .. - # INTEGER IPIV( * ) - # COMPLEX*16 A( LDA, * ), WORK( * ) - function hetrf_rook!(uplo::AbstractChar, A::AbstractMatrix{$elty}) - require_one_based_indexing(A) - chkstride1(A) - n = checksquare(A) - chkuplo(uplo) - ipiv = similar(A, BlasInt, n) - work = Vector{$elty}(undef, 1) - lwork = BlasInt(-1) - info = Ref{BlasInt}() - for i in 1:2 # first call returns lwork as work[1] - ccall((@blasfunc($hetrf), libblastrampoline), Cvoid, - (Ref{UInt8}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, - Ptr{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ref{BlasInt}, Clong), - uplo, n, A, max(1,stride(A,2)), ipiv, work, lwork, info, 1) - chkargsok(info[]) - if i == 1 - lwork = BlasInt(real(work[1])) - resize!(work, lwork) - end - end - A, ipiv, info[] - end - - # SUBROUTINE ZHETRI_ROOK( UPLO, N, A, LDA, IPIV, WORK, INFO ) - # * .. Scalar Arguments .. - # CHARACTER UPLO - # INTEGER INFO, LDA, N - # * .. - # * .. Array Arguments .. - # INTEGER IPIV( * ) - # COMPLEX*16 A( LDA, * ), WORK( * ) - function hetri_rook!(uplo::AbstractChar, A::AbstractMatrix{$elty}, ipiv::AbstractVector{BlasInt}) - require_one_based_indexing(A,ipiv) - chkstride1(A,ipiv) - n = checksquare(A) - chkuplo(uplo) - work = Vector{$elty}(undef, n) - info = Ref{BlasInt}() - ccall((@blasfunc($hetri), libblastrampoline), Cvoid, - (Ref{UInt8}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, - Ptr{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Clong), - uplo, n, A, max(1,stride(A,2)), ipiv, work, info, 1) - chklapackerror(info[]) - A - end - - # SUBROUTINE ZHETRS_ROOK( UPLO, N, NRHS, A, LDA, IPIV, B, LDB, INFO ) - # * .. Scalar Arguments .. - # CHARACTER UPLO - # INTEGER INFO, LDA, LDB, N, NRHS - # * .. - # * .. Array Arguments .. - # INTEGER IPIV( * ) - # COMPLEX*16 A( LDA, * ), B( LDB, * ) - function hetrs_rook!(uplo::AbstractChar, A::AbstractMatrix{$elty}, - ipiv::AbstractVector{BlasInt}, B::AbstractVecOrMat{$elty}) - require_one_based_indexing(A, ipiv, B) - chkstride1(A,B,ipiv) - chkuplo(uplo) - n = checksquare(A) - if n != size(B,1) - throw(DimensionMismatch(lazy"B has first dimension $(size(B,1)), but needs $n")) - end - info = Ref{BlasInt}() - ccall((@blasfunc($hetrs), libblastrampoline), Cvoid, - (Ref{UInt8}, Ref{BlasInt}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, - Ptr{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ref{BlasInt}, Clong), - uplo, n, size(B,2), A, max(1,stride(A,2)), ipiv, B, max(1,stride(B,2)), info, 1) - chklapackerror(info[]) - B - end - end -end - -for (sysv, sytrf, sytri, sytrs, elty, relty) in - ((:zsysv_,:zsytrf_,:zsytri_,:zsytrs_,:ComplexF64, :Float64), - (:csysv_,:csytrf_,:csytri_,:csytrs_,:ComplexF32, :Float32)) - @eval begin - # SUBROUTINE ZSYSV( UPLO, N, NRHS, A, LDA, IPIV, B, LDB, WORK, - # $ LWORK, INFO ) - # * .. Scalar Arguments .. - # CHARACTER UPLO - # INTEGER INFO, LDA, LDB, LWORK, N, NRHS - # * .. - # * .. Array Arguments .. - # INTEGER IPIV( * ) - # COMPLEX*16 A( LDA, * ), B( LDB, * ), WORK( * ) - function sysv!(uplo::AbstractChar, A::AbstractMatrix{$elty}, B::AbstractVecOrMat{$elty}) - require_one_based_indexing(A, B) - chkstride1(A,B) - n = checksquare(A) - chkuplo(uplo) - if n != size(B,1) - throw(DimensionMismatch(lazy"B has first dimension $(size(B,1)), but needs $n")) - end - ipiv = similar(A, BlasInt, n) - work = Vector{$elty}(undef, 1) - lwork = BlasInt(-1) - info = Ref{BlasInt}() - for i = 1:2 # first call returns lwork as work[1] - ccall((@blasfunc($sysv), libblastrampoline), Cvoid, - (Ref{UInt8}, Ref{BlasInt}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ptr{BlasInt}, - Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ref{BlasInt}, Clong), - uplo, n, size(B,2), A, max(1,stride(A,2)), ipiv, B, max(1,stride(B,2)), - work, lwork, info, 1) - chkargsok(info[]) - chknonsingular(info[]) - if i == 1 - lwork = BlasInt(real(work[1])) - resize!(work, lwork) - end - end - B, A, ipiv - end - - # SUBROUTINE ZSYTRF( UPLO, N, A, LDA, IPIV, WORK, LWORK, INFO ) - # * .. Scalar Arguments .. - # CHARACTER UPLO - # INTEGER INFO, LDA, LWORK, N - # * .. - # * .. Array Arguments .. - # INTEGER IPIV( * ) - # COMPLEX*16 A( LDA, * ), WORK( * ) - function sytrf!(uplo::AbstractChar, A::AbstractMatrix{$elty}, ipiv::AbstractVector{BlasInt}) - require_one_based_indexing(A) - chkstride1(A) - n = checksquare(A) - chkuplo(uplo) - if n == 0 - return A, ipiv, zero(BlasInt) - end - work = Vector{$elty}(undef, 1) - lwork = BlasInt(-1) - info = Ref{BlasInt}() - for i = 1:2 # first call returns lwork as work[1] - ccall((@blasfunc($sytrf), libblastrampoline), Cvoid, - (Ref{UInt8}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, - Ptr{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ref{BlasInt}, Clong), - uplo, n, A, max(1,stride(A,2)), ipiv, work, lwork, info, 1) - chkargsok(info[]) - if i == 1 - lwork = BlasInt(real(work[1])) - resize!(work, lwork) - end - end - A, ipiv, info[] - end - - function sytrf!(uplo::AbstractChar, A::AbstractMatrix{$elty}) - require_one_based_indexing(A) - chkuplo(uplo) - n = checksquare(A) - ipiv = similar(A, BlasInt, n) - sytrf!(uplo, A, ipiv) - end - -# SUBROUTINE ZSYTRI2( UPLO, N, A, LDA, IPIV, WORK, LWORK, INFO ) -# * .. Scalar Arguments .. -# CHARACTER UPLO -# INTEGER INFO, LDA, LWORK, N -# * .. -# * .. Array Arguments .. -# INTEGER IPIV( * ) -# COMPLEX*16 A( LDA, * ), WORK( * ) -# function sytri!(uplo::AbstractChar, A::AbstractMatrix{$elty}, ipiv::Vector{BlasInt}) -# chkstride1(A) -# n = checksquare(A) -# chkuplo(uplo) -# work = Vector{$elty}(undef, 1) -# lwork = BlasInt(-1) -# info = Ref{BlasInt}() -# for i in 1:2 -# ccall((@blasfunc($sytri), libblastrampoline), Cvoid, -# (Ptr{UInt8}, Ptr{BlasInt}, Ptr{$elty}, Ptr{BlasInt}, -# Ptr{BlasInt}, Ptr{$elty}, Ptr{BlasInt}, Ref{BlasInt}, Clong), -# &uplo, &n, A, &max(1,stride(A,2)), ipiv, work, &lwork, info, 1) -# chklapackerror(info[]) -# if lwork < 0 -# lwork = BlasInt(real(work[1])) -# work = Vector{$elty}(undef, lwork) -# end -# end -# A -# end - - # SUBROUTINE ZSYTRI( UPLO, N, A, LDA, IPIV, WORK, INFO ) - # * .. Scalar Arguments .. - # CHARACTER UPLO - # INTEGER INFO, LDA, N - # * .. - # * .. Array Arguments .. - # INTEGER IPIV( * ) - # COMPLEX*16 A( LDA, * ), WORK( * ) - function sytri!(uplo::AbstractChar, A::AbstractMatrix{$elty}, ipiv::AbstractVector{BlasInt}) - require_one_based_indexing(A, ipiv) - chkstride1(A, ipiv) - n = checksquare(A) - chkuplo(uplo) - work = Vector{$elty}(undef, n) - info = Ref{BlasInt}() - ccall((@blasfunc($sytri), libblastrampoline), Cvoid, - (Ref{UInt8}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, - Ptr{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Clong), - uplo, n, A, max(1,stride(A,2)), ipiv, work, info, 1) - chklapackerror(info[]) - A - end - - # SUBROUTINE ZSYTRS( UPLO, N, NRHS, A, LDA, IPIV, B, LDB, INFO ) - # * .. Scalar Arguments .. - # CHARACTER UPLO - # INTEGER INFO, LDA, LDB, N, NRHS - # * .. - # * .. Array Arguments .. - # INTEGER IPIV( * ) - # COMPLEX*16 A( LDA, * ), B( LDB, * ) - function sytrs!(uplo::AbstractChar, A::AbstractMatrix{$elty}, - ipiv::AbstractVector{BlasInt}, B::AbstractVecOrMat{$elty}) - require_one_based_indexing(A, ipiv, B) - chkstride1(A,B,ipiv) - n = checksquare(A) - chkuplo(uplo) - if n != size(B,1) - throw(DimensionMismatch(lazy"B has first dimension $(size(B,1)), but needs $n")) - end - info = Ref{BlasInt}() - ccall((@blasfunc($sytrs), libblastrampoline), Cvoid, - (Ref{UInt8}, Ref{BlasInt}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, - Ptr{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ref{BlasInt}, Clong), - uplo, n, size(B,2), A, max(1,stride(A,2)), ipiv, B, max(1,stride(B,2)), info, 1) - chklapackerror(info[]) - B - end - end -end - -for (sysv, sytrf, sytri, sytrs, syconvf, elty, relty) in - ((:zsysv_rook_,:zsytrf_rook_,:zsytri_rook_,:zsytrs_rook_,:zsyconvf_rook_,:ComplexF64, :Float64), - (:csysv_rook_,:csytrf_rook_,:csytri_rook_,:csytrs_rook_,:csyconvf_rook_,:ComplexF32, :Float32)) - @eval begin - # SUBROUTINE ZSYSV_ROOK(UPLO, N, NRHS, A, LDA, IPIV, B, LDB, WORK, - # $ LWORK, INFO ) - # * .. Scalar Arguments .. - # CHARACTER UPLO - # INTEGER INFO, LDA, LDB, LWORK, N, NRHS - # * .. - # * .. Array Arguments .. - # INTEGER IPIV( * ) - # COMPLEX*16 A( LDA, * ), B( LDB, * ), WORK( * ) - function sysv_rook!(uplo::AbstractChar, A::AbstractMatrix{$elty}, B::AbstractVecOrMat{$elty}) - require_one_based_indexing(A, B) - chkstride1(A,B) - n = checksquare(A) - chkuplo(uplo) - if n != size(B,1) - throw(DimensionMismatch(lazy"B has first dimension $(size(B,1)), but needs $n")) - end - ipiv = similar(A, BlasInt, n) - work = Vector{$elty}(undef, 1) - lwork = BlasInt(-1) - info = Ref{BlasInt}() - for i = 1:2 # first call returns lwork as work[1] - ccall((@blasfunc($sysv), libblastrampoline), Cvoid, - (Ref{UInt8}, Ref{BlasInt}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ptr{BlasInt}, - Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ref{BlasInt}, Clong), - uplo, n, size(B,2), A, max(1,stride(A,2)), ipiv, B, max(1,stride(B,2)), - work, lwork, info, 1) - chkargsok(info[]) - chknonsingular(info[]) - if i == 1 - lwork = BlasInt(real(work[1])) - resize!(work, lwork) - end - end - B, A, ipiv - end - - # SUBROUTINE ZSYTRF_ROOK( UPLO, N, A, LDA, IPIV, WORK, LWORK, INFO ) - # * .. Scalar Arguments .. - # CHARACTER UPLO - # INTEGER INFO, LDA, LWORK, N - # * .. - # * .. Array Arguments .. - # INTEGER IPIV( * ) - # COMPLEX*16 A( LDA, * ), WORK( * ) - function sytrf_rook!(uplo::AbstractChar, A::AbstractMatrix{$elty}) - require_one_based_indexing(A) - chkstride1(A) - n = checksquare(A) - chkuplo(uplo) - ipiv = similar(A, BlasInt, n) - if n == 0 - return A, ipiv, zero(BlasInt) - end - work = Vector{$elty}(undef, 1) - lwork = BlasInt(-1) - info = Ref{BlasInt}() - for i = 1:2 # first call returns lwork as work[1] - ccall((@blasfunc($sytrf), libblastrampoline), Cvoid, - (Ref{UInt8}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, - Ptr{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ref{BlasInt}, Clong), - uplo, n, A, max(1,stride(A,2)), ipiv, work, lwork, info, 1) - chkargsok(info[]) - if i == 1 - lwork = BlasInt(real(work[1])) - resize!(work, lwork) - end - end - A, ipiv, info[] - end - - # SUBROUTINE ZSYTRI_ROOK( UPLO, N, A, LDA, IPIV, WORK, INFO ) - # * .. Scalar Arguments .. - # CHARACTER UPLO - # INTEGER INFO, LDA, N - # * .. - # * .. Array Arguments .. - # INTEGER IPIV( * ) - # COMPLEX*16 A( LDA, * ), WORK( * ) - function sytri_rook!(uplo::AbstractChar, A::AbstractMatrix{$elty}, ipiv::AbstractVector{BlasInt}) - require_one_based_indexing(A, ipiv) - chkstride1(A, ipiv) - n = checksquare(A) - chkuplo(uplo) - work = Vector{$elty}(undef, n) - info = Ref{BlasInt}() - ccall((@blasfunc($sytri), libblastrampoline), Cvoid, - (Ref{UInt8}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, - Ptr{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Clong), - uplo, n, A, max(1,stride(A,2)), ipiv, work, info, 1) - chklapackerror(info[]) - A - end - - # SUBROUTINE ZSYTRS_ROOK( UPLO, N, NRHS, A, LDA, IPIV, B, LDB, INFO ) - # * .. Scalar Arguments .. - # CHARACTER UPLO - # INTEGER INFO, LDA, LDB, N, NRHS - # * .. - # * .. Array Arguments .. - # INTEGER IPIV( * ) - # COMPLEX*16 A( LDA, * ), B( LDB, * ) - function sytrs_rook!(uplo::AbstractChar, A::AbstractMatrix{$elty}, - ipiv::AbstractVector{BlasInt}, B::AbstractVecOrMat{$elty}) - require_one_based_indexing(A, ipiv, B) - chkstride1(A,B,ipiv) - n = checksquare(A) - chkuplo(uplo) - if n != size(B,1) - throw(DimensionMismatch(lazy"B has first dimension $(size(B,1)), but needs $n")) - end - info = Ref{BlasInt}() - ccall((@blasfunc($sytrs), libblastrampoline), Cvoid, - (Ref{UInt8}, Ref{BlasInt}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, - Ptr{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ref{BlasInt}, Clong), - uplo, n, size(B,2), A, max(1,stride(A,2)), ipiv, B, max(1,stride(B,2)), info, 1) - chklapackerror(info[]) - B - end - - # SUBROUTINE ZSYCONVF_ROOK( UPLO, WAY, N, A, LDA, IPIV, E, INFO ) - # - # .. Scalar Arguments .. - # CHARACTER UPLO, WAY - # INTEGER INFO, LDA, N - # .. - # .. Array Arguments .. - # INTEGER IPIV( * ) - # COMPLEX*16 A( LDA, * ), E( * ) - function syconvf_rook!(uplo::AbstractChar, way::AbstractChar, - A::AbstractMatrix{$elty}, ipiv::AbstractVector{BlasInt}, - e::AbstractVector{$elty} = Vector{$elty}(undef, length(ipiv))) - require_one_based_indexing(A, ipiv, e) - chkstride1(A, ipiv, e) - - # extract - n = checksquare(A) - lda = stride(A, 2) - - # check - chkuplo(uplo) - if way != 'C' && way != 'R' - throw(ArgumentError(lazy"way must be 'C' or 'R'")) - end - if length(ipiv) != n - throw(ArgumentError(lazy"length of pivot vector was $(length(ipiv)) but should have been $n")) - end - if length(e) != n - throw(ArgumentError(lazy"length of e vector was $(length(e)) but should have been $n")) - end - - # allocate - info = Ref{BlasInt}() - - ccall((@blasfunc($syconvf), libblastrampoline), Cvoid, - (Ref{UInt8}, Ref{UInt8}, Ref{BlasInt}, Ptr{$elty}, - Ref{BlasInt}, Ptr{$elty}, Ptr{BlasInt}, Ptr{BlasInt}, - Clong, Clong), - uplo, way, n, A, - max(1, lda), e, ipiv, info, - 1, 1) - - chklapackerror(info[]) - return A, e - end - end -end - -""" - syconv!(uplo, A, ipiv) -> (A, work) - -Converts a symmetric matrix `A` (which has been factorized into a -triangular matrix) into two matrices `L` and `D`. If `uplo = U`, `A` -is upper triangular. If `uplo = L`, it is lower triangular. `ipiv` is -the pivot vector from the triangular factorization. `A` is overwritten -by `L` and `D`. -""" -syconv!(uplo::AbstractChar, A::AbstractMatrix, ipiv::AbstractVector{BlasInt}) - -""" - sysv!(uplo, A, B) -> (B, A, ipiv) - -Finds the solution to `A * X = B` for symmetric matrix `A`. If `uplo = U`, -the upper half of `A` is stored. If `uplo = L`, the lower half is stored. -`B` is overwritten by the solution `X`. `A` is overwritten by its -Bunch-Kaufman factorization. `ipiv` contains pivoting information about the -factorization. -""" -sysv!(uplo::AbstractChar, A::AbstractMatrix, B::AbstractVecOrMat) - -""" - sytrf!(uplo, A) -> (A, ipiv, info) - -Computes the Bunch-Kaufman factorization of a symmetric matrix `A`. If -`uplo = U`, the upper half of `A` is stored. If `uplo = L`, the lower -half is stored. - -Returns `A`, overwritten by the factorization, a pivot vector `ipiv`, and -the error code `info` which is a non-negative integer. If `info` is positive -the matrix is singular and the diagonal part of the factorization is exactly -zero at position `info`. -""" -sytrf!(uplo::AbstractChar, A::AbstractMatrix) - -""" - sytrf!(uplo, A, ipiv) -> (A, ipiv, info) - -Computes the Bunch-Kaufman factorization of a symmetric matrix `A`. If -`uplo = U`, the upper half of `A` is stored. If `uplo = L`, the lower -half is stored. - -Returns `A`, overwritten by the factorization, the pivot vector `ipiv`, and -the error code `info` which is a non-negative integer. If `info` is positive -the matrix is singular and the diagonal part of the factorization is exactly -zero at position `info`. -""" -sytrf!(uplo::AbstractChar, A::AbstractMatrix, ipiv::AbstractVector{BlasInt}) - -""" - sytri!(uplo, A, ipiv) - -Computes the inverse of a symmetric matrix `A` using the results of -`sytrf!`. If `uplo = U`, the upper half of `A` is stored. If `uplo = L`, -the lower half is stored. `A` is overwritten by its inverse. -""" -sytri!(uplo::AbstractChar, A::AbstractMatrix, ipiv::AbstractVector{BlasInt}) - -""" - sytrs!(uplo, A, ipiv, B) - -Solves the equation `A * X = B` for a symmetric matrix `A` using the -results of `sytrf!`. If `uplo = U`, the upper half of `A` is stored. -If `uplo = L`, the lower half is stored. `B` is overwritten by the -solution `X`. -""" -sytrs!(uplo::AbstractChar, A::AbstractMatrix, ipiv::AbstractVector{BlasInt}, B::AbstractVecOrMat) - - -""" - hesv!(uplo, A, B) -> (B, A, ipiv) - -Finds the solution to `A * X = B` for Hermitian matrix `A`. If `uplo = U`, -the upper half of `A` is stored. If `uplo = L`, the lower half is stored. -`B` is overwritten by the solution `X`. `A` is overwritten by its -Bunch-Kaufman factorization. `ipiv` contains pivoting information about the -factorization. -""" -hesv!(uplo::AbstractChar, A::AbstractMatrix, B::AbstractVecOrMat) - -""" - hetrf!(uplo, A) -> (A, ipiv, info) - -Computes the Bunch-Kaufman factorization of a Hermitian matrix `A`. If -`uplo = U`, the upper half of `A` is stored. If `uplo = L`, the lower -half is stored. - -Returns `A`, overwritten by the factorization, a pivot vector `ipiv`, and -the error code `info` which is a non-negative integer. If `info` is positive -the matrix is singular and the diagonal part of the factorization is exactly -zero at position `info`. -""" -hetrf!(uplo::AbstractChar, A::AbstractMatrix) - -""" - hetrf!(uplo, A, ipiv) -> (A, ipiv, info) - -Computes the Bunch-Kaufman factorization of a Hermitian matrix `A`. If -`uplo = U`, the upper half of `A` is stored. If `uplo = L`, the lower -half is stored. - -Returns `A`, overwritten by the factorization, the pivot vector `ipiv`, and -the error code `info` which is a non-negative integer. If `info` is positive -the matrix is singular and the diagonal part of the factorization is exactly -zero at position `info`. -""" -hetrf!(uplo::AbstractChar, A::AbstractMatrix, ipiv::AbstractVector{BlasInt}) - -""" - hetri!(uplo, A, ipiv) - -Computes the inverse of a Hermitian matrix `A` using the results of -`sytrf!`. If `uplo = U`, the upper half of `A` is stored. If `uplo = L`, -the lower half is stored. `A` is overwritten by its inverse. -""" -hetri!(uplo::AbstractChar, A::AbstractMatrix, ipiv::AbstractVector{BlasInt}) - -""" - hetrs!(uplo, A, ipiv, B) - -Solves the equation `A * X = B` for a Hermitian matrix `A` using the -results of `sytrf!`. If `uplo = U`, the upper half of `A` is stored. -If `uplo = L`, the lower half is stored. `B` is overwritten by the -solution `X`. -""" -hetrs!(uplo::AbstractChar, A::AbstractMatrix, ipiv::AbstractVector{BlasInt}, B::AbstractVecOrMat) - -for f in (:syevd!, :syev!) - _f = Symbol(:_, f) - @eval function $f(jobz::AbstractChar, uplo::AbstractChar, A::AbstractMatrix) - W, A = $_f(jobz, uplo, A) - jobz == 'V' ? (W, A) : W - end -end - -# Symmetric (real) eigensolvers -for (syev, syevr, syevd, sygvd, elty) in - ((:dsyev_,:dsyevr_,:dsyevd_,:dsygvd_,:Float64), - (:ssyev_,:ssyevr_,:ssyevd_,:ssygvd_,:Float32)) - @eval begin - # SUBROUTINE DSYEV( JOBZ, UPLO, N, A, LDA, W, WORK, LWORK, INFO ) - # * .. Scalar Arguments .. - # CHARACTER JOBZ, UPLO - # INTEGER INFO, LDA, LWORK, N - # * .. Array Arguments .. - # DOUBLE PRECISION A( LDA, * ), W( * ), WORK( * ) - Base.@constprop :none function _syev!(jobz::AbstractChar, uplo::AbstractChar, A::AbstractMatrix{$elty}) - require_one_based_indexing(A) - @chkvalidparam 1 jobz ('N', 'V') - chkuplo(uplo) - chkstride1(A) - n = checksquare(A) - W = similar(A, $elty, n) - work = Vector{$elty}(undef, 1) - lwork = BlasInt(-1) - info = Ref{BlasInt}() - for i = 1:2 # first call returns lwork as work[1] - ccall((@blasfunc($syev), libblastrampoline), Cvoid, - (Ref{UInt8}, Ref{UInt8}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, - Ptr{$elty}, Ptr{$elty}, Ref{BlasInt}, Ref{BlasInt}, Clong, Clong), - jobz, uplo, n, A, max(1,stride(A,2)), W, work, lwork, info, 1, 1) - chklapackerror(info[]) - if i == 1 - lwork = BlasInt(real(work[1])) - resize!(work, lwork) - end - end - W, A - end - - # SUBROUTINE DSYEVR( JOBZ, RANGE, UPLO, N, A, LDA, VL, VU, IL, IU, - # $ ABSTOL, M, W, Z, LDZ, ISUPPZ, WORK, LWORK, - # $ IWORK, LIWORK, INFO ) - # * .. Scalar Arguments .. - # CHARACTER JOBZ, RANGE, UPLO - # INTEGER IL, INFO, IU, LDA, LDZ, LIWORK, LWORK, M, N - # DOUBLE PRECISION ABSTOL, VL, VU - # * .. - # * .. Array Arguments .. - # INTEGER ISUPPZ( * ), IWORK( * ) - # DOUBLE PRECISION A( LDA, * ), W( * ), WORK( * ), Z( LDZ, * ) - function syevr!(jobz::AbstractChar, range::AbstractChar, uplo::AbstractChar, A::AbstractMatrix{$elty}, - vl::AbstractFloat, vu::AbstractFloat, il::Integer, iu::Integer, abstol::AbstractFloat) - require_one_based_indexing(A) - @chkvalidparam 1 jobz ('N', 'V') - @chkvalidparam 2 range ('A', 'V', 'I') - chkstride1(A) - n = checksquare(A) - if range == 'I' && !(1 <= il <= iu <= n) - throw(ArgumentError(lazy"illegal choice of eigenvalue indices (il = $il, iu = $iu), which must be between 1 and n = $n")) - end - if range == 'V' && vl >= vu - throw(ArgumentError(lazy"lower boundary, $vl, must be less than upper boundary, $vu")) - end - chkuplofinite(A, uplo) - lda = stride(A,2) - m = Ref{BlasInt}() - W = similar(A, $elty, n) - ldz = n - if jobz == 'N' - Z = similar(A, $elty, ldz, 0) - elseif jobz == 'V' - Z = similar(A, $elty, ldz, n) - end - isuppz = similar(A, BlasInt, 2*n) - work = Vector{$elty}(undef, 1) - lwork = BlasInt(-1) - iwork = Vector{BlasInt}(undef, 1) - liwork = BlasInt(-1) - info = Ref{BlasInt}() - for i = 1:2 # first call returns lwork as work[1] and liwork as iwork[1] - ccall((@blasfunc($syevr), libblastrampoline), Cvoid, - (Ref{UInt8}, Ref{UInt8}, Ref{UInt8}, Ref{BlasInt}, - Ptr{$elty}, Ref{BlasInt}, Ref{$elty}, Ref{$elty}, - Ref{BlasInt}, Ref{BlasInt}, Ref{$elty}, Ptr{BlasInt}, - Ptr{$elty}, Ptr{$elty}, Ref{BlasInt}, Ptr{BlasInt}, - Ptr{$elty}, Ref{BlasInt}, Ptr{BlasInt}, Ref{BlasInt}, - Ref{BlasInt}, Clong, Clong, Clong), - jobz, range, uplo, n, - A, max(1,lda), vl, vu, - il, iu, abstol, m, - W, Z, max(1,ldz), isuppz, - work, lwork, iwork, liwork, - info, 1, 1, 1) - chklapackerror(info[]) - if i == 1 - lwork = BlasInt(real(work[1])) - resize!(work, lwork) - liwork = iwork[1] - resize!(iwork, liwork) - end - end - W[1:m[]], Z[:,1:(jobz == 'V' ? m[] : 0)] - end - syevr!(jobz::AbstractChar, A::AbstractMatrix{$elty}) = - syevr!(jobz, 'A', 'U', A, 0.0, 0.0, 0, 0, -1.0) - - # SUBROUTINE DSYEVD( JOBZ, UPLO, N, A, LDA, W, WORK, LWORK, - # $ IWORK, LIWORK, INFO ) - # * .. Scalar Arguments .. - # CHARACTER JOBZ, UPLO - # INTEGER INFO, LDA, LIWORK, LWORK, N - # * .. - # * .. Array Arguments .. - # INTEGER IWORK( * ) - # DOUBLE PRECISION A( LDA, * ), W( * ), WORK( * ) - Base.@constprop :none function _syevd!(jobz::AbstractChar, uplo::AbstractChar, A::AbstractMatrix{$elty}) - require_one_based_indexing(A) - @chkvalidparam 1 jobz ('N', 'V') - chkstride1(A) - n = checksquare(A) - chkuplofinite(A, uplo) - lda = stride(A,2) - m = Ref{BlasInt}() - W = similar(A, $elty, n) - work = Vector{$elty}(undef, 1) - lwork = BlasInt(-1) - iwork = Vector{BlasInt}(undef, 1) - liwork = BlasInt(-1) - info = Ref{BlasInt}() - for i = 1:2 # first call returns lwork as work[1] and liwork as iwork[1] - ccall((@blasfunc($syevd), libblastrampoline), Cvoid, - (Ref{UInt8}, Ref{UInt8}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, - Ptr{$elty}, Ptr{$elty}, Ref{BlasInt}, Ptr{BlasInt}, Ref{BlasInt}, - Ref{BlasInt}, Clong, Clong), - jobz, uplo, n, A, max(1,lda), - W, work, lwork, iwork, liwork, - info, 1, 1) - chklapackerror(info[]) - if i == 1 - lwork = BlasInt(real(work[1])) - resize!(work, lwork) - liwork = iwork[1] - resize!(iwork, liwork) - end - end - W, A - end - - # Generalized eigenproblem - # SUBROUTINE DSYGVD( ITYPE, JOBZ, UPLO, N, A, LDA, B, LDB, W, WORK, - # $ LWORK, IWORK, LIWORK, INFO ) - # * .. Scalar Arguments .. - # CHARACTER JOBZ, UPLO - # INTEGER INFO, ITYPE, LDA, LDB, LIWORK, LWORK, N - # * .. - # * .. Array Arguments .. - # INTEGER IWORK( * ) - # DOUBLE PRECISION A( LDA, * ), B( LDB, * ), W( * ), WORK( * ) - function sygvd!(itype::Integer, jobz::AbstractChar, uplo::AbstractChar, A::AbstractMatrix{$elty}, B::AbstractMatrix{$elty}) - require_one_based_indexing(A, B) - @chkvalidparam 1 itype 1:3 - @chkvalidparam 2 jobz ('N', 'V') - chkuplo(uplo) - chkstride1(A, B) - n, m = checksquare(A, B) - if n != m - throw(DimensionMismatch(lazy"dimensions of A, ($n,$n), and B, ($m,$m), must match")) - end - lda = max(1, stride(A, 2)) - ldb = max(1, stride(B, 2)) - w = similar(A, $elty, n) - work = Vector{$elty}(undef, 1) - lwork = BlasInt(-1) - iwork = Vector{BlasInt}(undef, 1) - liwork = BlasInt(-1) - info = Ref{BlasInt}() - for i = 1:2 # first call returns lwork as work[1] and liwork as iwork[1] - ccall((@blasfunc($sygvd), libblastrampoline), Cvoid, - (Ref{BlasInt}, Ref{UInt8}, Ref{UInt8}, Ref{BlasInt}, - Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, - Ptr{$elty}, Ptr{$elty}, Ref{BlasInt}, Ptr{BlasInt}, - Ref{BlasInt}, Ref{BlasInt}, Clong, Clong), - itype, jobz, uplo, n, - A, lda, B, ldb, - w, work, lwork, iwork, - liwork, info, 1, 1) - chkargsok(info[]) - if i == 1 - lwork = BlasInt(work[1]) - resize!(work, lwork) - liwork = iwork[1] - resize!(iwork, liwork) - end - end - chkposdef(info[]) - w, A, B - end - end -end -# Hermitian eigensolvers -for (syev, syevr, syevd, sygvd, elty, relty) in - ((:zheev_,:zheevr_,:zheevd_,:zhegvd_,:ComplexF64,:Float64), - (:cheev_,:cheevr_,:cheevd_,:chegvd_,:ComplexF32,:Float32)) - @eval begin - # SUBROUTINE ZHEEV( JOBZ, UPLO, N, A, LDA, W, WORK, LWORK, RWORK, INFO ) - # * .. Scalar Arguments .. - # CHARACTER JOBZ, UPLO - # INTEGER INFO, LDA, LWORK, N - # * .. - # * .. Array Arguments .. - # DOUBLE PRECISION RWORK( * ), W( * ) - # COMPLEX*16 A( LDA, * ), WORK( * ) - Base.@constprop :none function _syev!(jobz::AbstractChar, uplo::AbstractChar, A::AbstractMatrix{$elty}) - require_one_based_indexing(A) - @chkvalidparam 1 jobz ('N', 'V') - chkstride1(A) - chkuplofinite(A, uplo) - n = checksquare(A) - W = similar(A, $relty, n) - work = Vector{$elty}(undef, 1) - lwork = BlasInt(-1) - rwork = Vector{$relty}(undef, max(1, 3n-2)) - info = Ref{BlasInt}() - for i = 1:2 # first call returns lwork as work[1] - ccall((@blasfunc($syev), libblastrampoline), Cvoid, - (Ref{UInt8}, Ref{UInt8}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, - Ptr{$relty}, Ptr{$elty}, Ref{BlasInt}, Ptr{$relty}, Ptr{BlasInt}, - Clong, Clong), - jobz, uplo, n, A, stride(A,2), W, work, lwork, rwork, info, - 1, 1) - chklapackerror(info[]) - if i == 1 - lwork = BlasInt(real(work[1])) - resize!(work, lwork) - end - end - W, A - end - - # SUBROUTINE ZHEEVR( JOBZ, RANGE, UPLO, N, A, LDA, VL, VU, IL, IU, - # $ ABSTOL, M, W, Z, LDZ, ISUPPZ, WORK, LWORK, - # $ RWORK, LRWORK, IWORK, LIWORK, INFO ) - # * .. Scalar Arguments .. - # CHARACTER JOBZ, RANGE, UPLO - # INTEGER IL, INFO, IU, LDA, LDZ, LIWORK, LRWORK, LWORK, - # $ M, N - # DOUBLE PRECISION ABSTOL, VL, VU - # * .. - # * .. Array Arguments .. - # INTEGER ISUPPZ( * ), IWORK( * ) - # DOUBLE PRECISION RWORK( * ), W( * ) - # COMPLEX*16 A( LDA, * ), WORK( * ), Z( LDZ, * ) - function syevr!(jobz::AbstractChar, range::AbstractChar, uplo::AbstractChar, A::AbstractMatrix{$elty}, - vl::AbstractFloat, vu::AbstractFloat, il::Integer, iu::Integer, abstol::AbstractFloat) - require_one_based_indexing(A) - @chkvalidparam 1 jobz ('N', 'V') - @chkvalidparam 2 range ('A', 'V', 'I') - chkstride1(A) - chkuplofinite(A, uplo) - n = checksquare(A) - if range == 'I' && !(1 <= il <= iu <= n) - throw(ArgumentError(lazy"illegal choice of eigenvalue indices (il = $il, iu=$iu), which must be between 1 and n = $n")) - end - if range == 'V' && vl >= vu - throw(ArgumentError(lazy"lower boundary, $vl, must be less than upper boundary, $vu")) - end - lda = max(1,stride(A,2)) - m = Ref{BlasInt}() - W = similar(A, $relty, n) - if jobz == 'N' - ldz = 1 - Z = similar(A, $elty, ldz, 0) - elseif jobz == 'V' - ldz = n - Z = similar(A, $elty, ldz, n) - end - isuppz = similar(A, BlasInt, 2*n) - work = Vector{$elty}(undef, 1) - lwork = BlasInt(-1) - rwork = Vector{$relty}(undef, 1) - lrwork = BlasInt(-1) - iwork = Vector{BlasInt}(undef, 1) - liwork = BlasInt(-1) - info = Ref{BlasInt}() - for i = 1:2 # first call returns lwork as work[1], lrwork as rwork[1] and liwork as iwork[1] - ccall((@blasfunc($syevr), libblastrampoline), Cvoid, - (Ref{UInt8}, Ref{UInt8}, Ref{UInt8}, Ref{BlasInt}, - Ptr{$elty}, Ref{BlasInt}, Ref{$elty}, Ref{$elty}, - Ref{BlasInt}, Ref{BlasInt}, Ref{$elty}, Ptr{BlasInt}, - Ptr{$relty}, Ptr{$elty}, Ref{BlasInt}, Ptr{BlasInt}, - Ptr{$elty}, Ref{BlasInt}, Ptr{$relty}, Ref{BlasInt}, - Ptr{BlasInt}, Ref{BlasInt}, Ptr{BlasInt}, - Clong, Clong, Clong), - jobz, range, uplo, n, - A, lda, vl, vu, - il, iu, abstol, m, - W, Z, ldz, isuppz, - work, lwork, rwork, lrwork, - iwork, liwork, info, - 1, 1, 1) - chklapackerror(info[]) - if i == 1 - lwork = BlasInt(real(work[1])) - resize!(work, lwork) - lrwork = BlasInt(rwork[1]) - resize!(rwork, lrwork) - liwork = iwork[1] - resize!(iwork, liwork) - end - end - W[1:m[]], Z[:,1:(jobz == 'V' ? m[] : 0)] - end - syevr!(jobz::AbstractChar, A::AbstractMatrix{$elty}) = - syevr!(jobz, 'A', 'U', A, 0.0, 0.0, 0, 0, -1.0) - - # SUBROUTINE ZHEEVD( JOBZ, UPLO, N, A, LDA, W, WORK, LWORK, RWORK, - # $ LRWORK, IWORK, LIWORK, INFO ) - # * .. Scalar Arguments .. - # CHARACTER JOBZ, UPLO - # INTEGER INFO, LDA, LIWORK, LRWORK, LWORK, N - # * .. - # * .. Array Arguments .. - # INTEGER IWORK( * ) - # DOUBLE PRECISION RWORK( * ) - # COMPLEX*16 A( LDA, * ), WORK( * ) - Base.@constprop :none function _syevd!(jobz::AbstractChar, uplo::AbstractChar, A::AbstractMatrix{$elty}) - require_one_based_indexing(A) - @chkvalidparam 1 jobz ('N', 'V') - chkstride1(A) - chkuplofinite(A, uplo) - n = checksquare(A) - lda = max(1, stride(A,2)) - m = Ref{BlasInt}() - W = similar(A, $relty, n) - work = Vector{$elty}(undef, 1) - lwork = BlasInt(-1) - rwork = Vector{$relty}(undef, 1) - lrwork = BlasInt(-1) - iwork = Vector{BlasInt}(undef, 1) - liwork = BlasInt(-1) - info = Ref{BlasInt}() - for i = 1:2 # first call returns lwork as work[1], lrwork as rwork[1] and liwork as iwork[1] - ccall((@blasfunc($syevd), liblapack), Cvoid, - (Ref{UInt8}, Ref{UInt8}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, - Ptr{$relty}, Ptr{$elty}, Ref{BlasInt}, Ptr{$relty}, Ref{BlasInt}, - Ptr{BlasInt}, Ref{BlasInt}, Ref{BlasInt}, Clong, Clong), - jobz, uplo, n, A, stride(A,2), - W, work, lwork, rwork, lrwork, - iwork, liwork, info, 1, 1) - chklapackerror(info[]) - if i == 1 - lwork = BlasInt(real(work[1])) - resize!(work, lwork) - lrwork = BlasInt(rwork[1]) - resize!(rwork, lrwork) - liwork = iwork[1] - resize!(iwork, liwork) - end - end - W, A - end - - # SUBROUTINE ZHEGVD( ITYPE, JOBZ, UPLO, N, A, LDA, B, LDB, W, WORK, - # $ LWORK, RWORK, LRWORK, IWORK, LIWORK, INFO ) - # * .. Scalar Arguments .. - # CHARACTER JOBZ, UPLO - # INTEGER INFO, ITYPE, LDA, LDB, LIWORK, LRWORK, LWORK, N - # * .. - # * .. Array Arguments .. - # INTEGER IWORK( * ) - # DOUBLE PRECISION RWORK( * ), W( * ) - # COMPLEX*16 A( LDA, * ), B( LDB, * ), WORK( * ) - function sygvd!(itype::Integer, jobz::AbstractChar, uplo::AbstractChar, A::AbstractMatrix{$elty}, B::AbstractMatrix{$elty}) - require_one_based_indexing(A, B) - @chkvalidparam 1 itype 1:3 - @chkvalidparam 2 jobz ('N', 'V') - chkstride1(A, B) - chkuplofinite(A, uplo) - chkuplofinite(B, uplo) - n, m = checksquare(A, B) - if n != m - throw(DimensionMismatch(lazy"dimensions of A, ($n,$n), and B, ($m,$m), must match")) - end - lda = max(1, stride(A, 2)) - ldb = max(1, stride(B, 2)) - w = similar(A, $relty, n) - work = Vector{$elty}(undef, 1) - lwork = BlasInt(-1) - iwork = Vector{BlasInt}(undef, 1) - liwork = BlasInt(-1) - rwork = Vector{$relty}(undef, 1) - lrwork = BlasInt(-1) - info = Ref{BlasInt}() - for i = 1:2 # first call returns lwork as work[1], lrwork as rwork[1] and liwork as iwork[1] - ccall((@blasfunc($sygvd), libblastrampoline), Cvoid, - (Ref{BlasInt}, Ref{UInt8}, Ref{UInt8}, Ref{BlasInt}, - Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, - Ptr{$relty}, Ptr{$elty}, Ref{BlasInt}, Ptr{$relty}, - Ref{BlasInt}, Ptr{BlasInt}, Ref{BlasInt}, Ptr{BlasInt}, - Clong, Clong), - itype, jobz, uplo, n, - A, lda, B, ldb, - w, work, lwork, rwork, - lrwork, iwork, liwork, info, - 1, 1) - chkargsok(info[]) - if i == 1 - lwork = BlasInt(real(work[1])) - resize!(work, lwork) - liwork = iwork[1] - resize!(iwork, liwork) - lrwork = BlasInt(rwork[1]) - resize!(rwork, lrwork) - end - end - chkposdef(info[]) - w, A, B - end - end -end - -""" - syev!(jobz, uplo, A) - -Finds the eigenvalues (`jobz = N`) or eigenvalues and eigenvectors -(`jobz = V`) of a symmetric matrix `A`. If `uplo = U`, the upper triangle -of `A` is used. If `uplo = L`, the lower triangle of `A` is used. -""" -syev!(jobz::AbstractChar, uplo::AbstractChar, A::AbstractMatrix) - -""" - syevr!(jobz, range, uplo, A, vl, vu, il, iu, abstol) -> (W, Z) - -Finds the eigenvalues (`jobz = N`) or eigenvalues and eigenvectors -(`jobz = V`) of a symmetric matrix `A`. If `uplo = U`, the upper triangle -of `A` is used. If `uplo = L`, the lower triangle of `A` is used. If -`range = A`, all the eigenvalues are found. If `range = V`, the -eigenvalues in the half-open interval `(vl, vu]` are found. -If `range = I`, the eigenvalues with indices between `il` and `iu` are -found. `abstol` can be set as a tolerance for convergence. - -The eigenvalues are returned in `W` and the eigenvectors in `Z`. -""" -syevr!(jobz::AbstractChar, range::AbstractChar, uplo::AbstractChar, A::AbstractMatrix, - vl::AbstractFloat, vu::AbstractFloat, il::Integer, iu::Integer, abstol::AbstractFloat) - -""" - syevd!(jobz, uplo, A) - -Finds the eigenvalues (`jobz = N`) or eigenvalues and eigenvectors -(`jobz = V`) of a symmetric matrix `A`. If `uplo = U`, the upper triangle -of `A` is used. If `uplo = L`, the lower triangle of `A` is used. -""" -syevd!(jobz::AbstractChar, uplo::AbstractChar, A::AbstractMatrix) - -""" - sygvd!(itype, jobz, uplo, A, B) -> (w, A, B) - -Finds the generalized eigenvalues (`jobz = N`) or eigenvalues and -eigenvectors (`jobz = V`) of a symmetric matrix `A` and symmetric -positive-definite matrix `B`. If `uplo = U`, the upper triangles -of `A` and `B` are used. If `uplo = L`, the lower triangles of `A` and -`B` are used. If `itype = 1`, the problem to solve is -`A * x = lambda * B * x`. If `itype = 2`, the problem to solve is -`A * B * x = lambda * x`. If `itype = 3`, the problem to solve is -`B * A * x = lambda * x`. -""" -sygvd!(itype::Integer, jobz::AbstractChar, uplo::AbstractChar, A::AbstractMatrix, B::AbstractMatrix) - -## (BD) Bidiagonal matrices - singular value decomposition -for (bdsqr, relty, elty) in - ((:dbdsqr_,:Float64,:Float64), - (:sbdsqr_,:Float32,:Float32), - (:zbdsqr_,:Float64,:ComplexF64), - (:cbdsqr_,:Float32,:ComplexF32)) - @eval begin - function bdsqr!(uplo::AbstractChar, d::AbstractVector{$relty}, e_::AbstractVector{$relty}, - Vt::AbstractMatrix{$elty}, U::AbstractMatrix{$elty}, C::AbstractMatrix{$elty}) - require_one_based_indexing(d, e_, Vt, U, C) - chkstride1(d, e_, Vt, U, C) - # Extract number - n = length(d) - ncvt, nru, ncc = size(Vt, 2), size(U, 1), size(C, 2) - ldvt, ldu, ldc = max(1, stride(Vt,2)), max(1, stride(U, 2)), max(1, stride(C,2)) - # Do checks - chkuplo(uplo) - if length(e_) != n - 1 - throw(DimensionMismatch(lazy"off-diagonal has length $(length(e_)) but should have length $(n - 1)")) - end - if ncvt > 0 && ldvt < n - throw(DimensionMismatch(lazy"leading dimension of Vt, $ldvt, must be at least $n")) - end - if ldu < nru - throw(DimensionMismatch(lazy"leading dimension of U, $ldu, must be at least $nru")) - end - if size(U, 2) != n - throw(DimensionMismatch(lazy"U must have $n columns but has $(size(U, 2))")) - end - if ncc > 0 && ldc < n - throw(DimensionMismatch(lazy"leading dimension of C, $ldc, must be at least $n")) - end - # Allocate - work = Vector{$relty}(undef, 4n) - info = Ref{BlasInt}() - ccall((@blasfunc($bdsqr), libblastrampoline), Cvoid, - (Ref{UInt8}, Ref{BlasInt}, Ref{BlasInt}, Ref{BlasInt}, - Ref{BlasInt}, Ptr{$relty}, Ptr{$relty}, Ptr{$elty}, - Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, - Ref{BlasInt}, Ptr{$relty}, Ref{BlasInt}, Clong), - uplo, n, ncvt, nru, - ncc, d, e_, Vt, - ldvt, U, ldu, C, - ldc, work, info, 1) - chklapackerror(info[]) - d, Vt, U, C #singular values in descending order, P**T * VT, U * Q, Q**T * C - end - end -end - -""" - bdsqr!(uplo, d, e_, Vt, U, C) -> (d, Vt, U, C) - -Computes the singular value decomposition of a bidiagonal matrix with -`d` on the diagonal and `e_` on the off-diagonal. If `uplo = U`, `e_` is -the superdiagonal. If `uplo = L`, `e_` is the subdiagonal. Can optionally also -compute the product `Q' * C`. - -Returns the singular values in `d`, and the matrix `C` overwritten with `Q' * C`. -""" -bdsqr!(uplo::AbstractChar, d::AbstractVector, e_::AbstractVector, Vt::AbstractMatrix, U::AbstractMatrix, C::AbstractMatrix) - -#Defined only for real types -for (bdsdc, elty) in - ((:dbdsdc_,:Float64), - (:sbdsdc_,:Float32)) - @eval begin - #* DBDSDC computes the singular value decomposition (SVD) of a real - #* N-by-N (upper or lower) bidiagonal matrix B: B = U * S * VT, - #* using a divide and conquer method - #* .. Scalar Arguments .. - # CHARACTER COMPQ, UPLO - # INTEGER INFO, LDU, LDVT, N - #* .. - #* .. Array Arguments .. - # INTEGER IQ( * ), IWORK( * ) - # DOUBLE PRECISION D( * ), E( * ), Q( * ), U( LDU, * ), - # $ VT( LDVT, * ), WORK( * ) - function bdsdc!(uplo::AbstractChar, compq::AbstractChar, d::AbstractVector{$elty}, e_::AbstractVector{$elty}) - require_one_based_indexing(d, e_) - chkstride1(d, e_) - n, ldiq, ldq, ldu, ldvt = length(d), 1, 1, 1, 1 - chkuplo(uplo) - if compq == 'N' - lwork = 6*n - elseif compq == 'P' - @warn "COMPQ='P' is not tested" - #TODO turn this into an actual LAPACK call - #smlsiz=ilaenv(9, $elty === :Float64 ? 'dbdsqr' : 'sbdsqr', string(uplo, compq), n,n,n,n) - smlsiz=100 #For now, completely overkill - ldq = n*(11+2*smlsiz+8*round(Int,log((n/(smlsiz+1)))/log(2))) - ldiq = n*(3+3*round(Int,log(n/(smlsiz+1))/log(2))) - lwork = 6*n - elseif compq == 'I' - ldvt=ldu=max(1, n) - lwork=3*n^2 + 4*n - else - throw(ArgumentError(lazy"COMPQ argument must be 'N', 'P' or 'I', got $(repr(compq))")) - end - u = similar(d, $elty, (ldu, n)) - vt = similar(d, $elty, (ldvt, n)) - q = similar(d, $elty, ldq) - iq = similar(d, BlasInt, ldiq) - work = Vector{$elty}(undef, lwork) - iwork = Vector{BlasInt}(undef, 8n) - info = Ref{BlasInt}() - ccall((@blasfunc($bdsdc), libblastrampoline), Cvoid, - (Ref{UInt8}, Ref{UInt8}, Ref{BlasInt}, Ptr{$elty}, Ptr{$elty}, - Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, - Ptr{$elty}, Ptr{BlasInt}, Ptr{$elty}, Ptr{BlasInt}, Ptr{BlasInt}, - Clong, Clong), - uplo, compq, n, d, e_, - u, ldu, vt, ldvt, - q, iq, work, iwork, info, - 1, 1) - chklapackerror(info[]) - d, e_, u, vt, q, iq - end - end -end - -""" - bdsdc!(uplo, compq, d, e_) -> (d, e, u, vt, q, iq) - -Computes the singular value decomposition of a bidiagonal matrix with `d` on the -diagonal and `e_` on the off-diagonal using a divide and conqueq method. -If `uplo = U`, `e_` is the superdiagonal. If `uplo = L`, `e_` is the subdiagonal. -If `compq = N`, only the singular values are found. If `compq = I`, the singular -values and vectors are found. If `compq = P`, the singular values -and vectors are found in compact form. Only works for real types. - -Returns the singular values in `d`, and if `compq = P`, the compact singular -vectors in `iq`. -""" -bdsdc!(uplo::AbstractChar, compq::AbstractChar, d::AbstractVector, e_::AbstractVector) - -for (gecon, elty) in - ((:dgecon_,:Float64), - (:sgecon_,:Float32)) - @eval begin - # SUBROUTINE DGECON( NORM, N, A, LDA, ANORM, RCOND, WORK, IWORK, - # $ INFO ) - # * .. Scalar Arguments .. - # CHARACTER NORM - # INTEGER INFO, LDA, N - # DOUBLE PRECISION ANORM, RCOND - # * .. - # * .. Array Arguments .. - # INTEGER IWORK( * ) - # DOUBLE PRECISION A( LDA, * ), WORK( * ) - function gecon!(normtype::AbstractChar, A::AbstractMatrix{$elty}, anorm::$elty) - require_one_based_indexing(A) - @chkvalidparam 1 normtype ('0', '1', 'I') - chkstride1(A) - n = checksquare(A) - lda = max(1, stride(A, 2)) - rcond = Ref{$elty}() - work = Vector{$elty}(undef, 4n) - iwork = Vector{BlasInt}(undef, n) - info = Ref{BlasInt}() - ccall((@blasfunc($gecon), libblastrampoline), Cvoid, - (Ref{UInt8}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, - Ref{$elty}, Ref{$elty}, Ptr{$elty}, Ptr{BlasInt}, - Ref{BlasInt}, Clong), - normtype, n, A, lda, anorm, rcond, work, iwork, - info, 1) - chklapackerror(info[]) - rcond[] - end - end -end - -for (gecon, elty, relty) in - ((:zgecon_,:ComplexF64,:Float64), - (:cgecon_,:ComplexF32,:Float32)) - @eval begin - # SUBROUTINE ZGECON( NORM, N, A, LDA, ANORM, RCOND, WORK, RWORK, - # $ INFO ) - # * .. Scalar Arguments .. - # CHARACTER NORM - # INTEGER INFO, LDA, N - # DOUBLE PRECISION ANORM, RCOND - # * .. - # * .. Array Arguments .. - # DOUBLE PRECISION RWORK( * ) - # COMPLEX*16 A( LDA, * ), WORK( * ) - function gecon!(normtype::AbstractChar, A::AbstractMatrix{$elty}, anorm::$relty) - require_one_based_indexing(A) - @chkvalidparam 1 normtype ('0', '1', 'I') - chkstride1(A) - n = checksquare(A) - lda = max(1, stride(A, 2)) - rcond = Ref{$relty}() - work = Vector{$elty}(undef, 2n) - rwork = Vector{$relty}(undef, 2n) - info = Ref{BlasInt}() - ccall((@blasfunc($gecon), libblastrampoline), Cvoid, - (Ref{UInt8}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, - Ref{$relty}, Ref{$relty}, Ptr{$elty}, Ptr{$relty}, - Ref{BlasInt}, Clong), - normtype, n, A, lda, anorm, rcond, work, rwork, - info, 1) - chklapackerror(info[]) - rcond[] - end - end -end - -""" - gecon!(normtype, A, anorm) - -Finds the reciprocal condition number of matrix `A`. If `normtype = I`, -the condition number is found in the infinity norm. If `normtype = O` or -`1`, the condition number is found in the one norm. `A` must be the -result of `getrf!` and `anorm` is the norm of `A` in the relevant norm. -""" -gecon!(normtype::AbstractChar, A::AbstractMatrix, anorm) - -for (gehrd, elty) in - ((:dgehrd_,:Float64), - (:sgehrd_,:Float32), - (:zgehrd_,:ComplexF64), - (:cgehrd_,:ComplexF32)) - @eval begin - - # .. Scalar Arguments .. - # INTEGER IHI, ILO, INFO, LDA, LWORK, N - # * .. - # * .. Array Arguments .. - # DOUBLE PRECISION A( LDA, * ), TAU( * ), WORK( * ) - function gehrd!(ilo::Integer, ihi::Integer, A::AbstractMatrix{$elty}) - require_one_based_indexing(A) - chkstride1(A) - n = checksquare(A) - chkfinite(A) # balancing routines don't support NaNs and Infs - tau = similar(A, $elty, max(0,n - 1)) - work = Vector{$elty}(undef, 1) - lwork = BlasInt(-1) - info = Ref{BlasInt}() - for i = 1:2 # first call returns lwork as work[1] - ccall((@blasfunc($gehrd), libblastrampoline), Cvoid, - (Ref{BlasInt}, Ref{BlasInt}, Ref{BlasInt}, Ptr{$elty}, - Ref{BlasInt}, Ptr{$elty}, Ptr{$elty}, Ref{BlasInt}, - Ptr{BlasInt}), - n, ilo, ihi, A, - max(1, stride(A, 2)), tau, work, lwork, - info) - chklapackerror(info[]) - if i == 1 - lwork = BlasInt(real(work[1])) - resize!(work, lwork) - end - end - A, tau - end - end -end -gehrd!(A::AbstractMatrix) = gehrd!(1, size(A, 1), A) - -""" - gehrd!(ilo, ihi, A) -> (A, tau) - -Converts a matrix `A` to Hessenberg form. If `A` is balanced with `gebal!` -then `ilo` and `ihi` are the outputs of `gebal!`. Otherwise they should be -`ilo = 1` and `ihi = size(A,2)`. `tau` contains the elementary reflectors of -the factorization. -""" -gehrd!(ilo::Integer, ihi::Integer, A::AbstractMatrix) - -for (orghr, elty) in - ((:dorghr_,:Float64), - (:sorghr_,:Float32), - (:zunghr_,:ComplexF64), - (:cunghr_,:ComplexF32)) - @eval begin - # * .. Scalar Arguments .. - # INTEGER IHI, ILO, INFO, LDA, LWORK, N - # * .. - # * .. Array Arguments .. - # DOUBLE PRECISION A( LDA, * ), TAU( * ), WORK( * ) - function orghr!(ilo::Integer, ihi::Integer, A::AbstractMatrix{$elty}, tau::AbstractVector{$elty}) - require_one_based_indexing(A, tau) - chkstride1(A, tau) - n = checksquare(A) - if n - length(tau) != 1 - throw(DimensionMismatch(lazy"tau has length $(length(tau)), needs $(n - 1)")) - end - work = Vector{$elty}(undef, 1) - lwork = BlasInt(-1) - info = Ref{BlasInt}() - for i = 1:2 # first call returns lwork as work[1] - ccall((@blasfunc($orghr), libblastrampoline), Cvoid, - (Ref{BlasInt}, Ref{BlasInt}, Ref{BlasInt}, Ptr{$elty}, - Ref{BlasInt}, Ptr{$elty}, Ptr{$elty}, Ref{BlasInt}, - Ptr{BlasInt}), - n, ilo, ihi, A, - max(1, stride(A, 2)), tau, work, lwork, - info) - chklapackerror(info[]) - if i == 1 - lwork = BlasInt(real(work[1])) - resize!(work, lwork) - end - end - A - end - end -end - -""" - orghr!(ilo, ihi, A, tau) - -Explicitly finds `Q`, the orthogonal/unitary matrix from `gehrd!`. `ilo`, -`ihi`, `A`, and `tau` must correspond to the input/output to `gehrd!`. -""" -orghr!(ilo::Integer, ihi::Integer, A::AbstractMatrix, tau::AbstractVector) - -for (ormhr, elty) in - ((:dormhr_,:Float64), - (:sormhr_,:Float32), - (:zunmhr_,:ComplexF64), - (:cunmhr_,:ComplexF32)) - @eval begin - # .. Scalar Arguments .. - # CHARACTER side, trans - # INTEGER ihi, ilo, info, lda, ldc, lwork, m, n - # .. - # .. Array Arguments .. - # DOUBLE PRECISION a( lda, * ), c( ldc, * ), tau( * ), work( * ) - function ormhr!(side::AbstractChar, trans::AbstractChar, ilo::Integer, ihi::Integer, A::AbstractMatrix{$elty}, - tau::AbstractVector{$elty}, C::AbstractVecOrMat{$elty}) - - require_one_based_indexing(A, tau, C) - chkstride1(A, tau, C) - chkside(side) - chktrans(trans) - n = checksquare(A) - mC, nC = size(C, 1), size(C, 2) - - if n - length(tau) != 1 - throw(DimensionMismatch(lazy"tau has length $(length(tau)), needs $(n - 1)")) - end - if (side == 'L' && mC != n) || (side == 'R' && nC != n) - throw(DimensionMismatch("A and C matrices are not conformable")) - end - - work = Vector{$elty}(undef, 1) - lwork = BlasInt(-1) - info = Ref{BlasInt}() - for i = 1:2 # first call returns lwork as work[1] - ccall((@blasfunc($ormhr), libblastrampoline), Cvoid, - (Ref{UInt8}, Ref{UInt8}, Ref{BlasInt}, Ref{BlasInt}, - Ref{BlasInt}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, - Ptr{$elty}, Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, - Ref{BlasInt}, Ref{BlasInt}, Clong, Clong), - side, trans, mC, nC, - ilo, ihi, A, max(1, stride(A, 2)), - tau, C, max(1, stride(C, 2)), work, - lwork, info, 1, 1) - chklapackerror(info[]) - if i == 1 - lwork = BlasInt(real(work[1])) - resize!(work, lwork) - end - end - C - end - end -end - -for (hseqr, elty) in - ((:zhseqr_,:ComplexF64), - (:chseqr_,:ComplexF32)) - @eval begin - # * .. Scalar Arguments .. - # CHARACTER JOB, COMPZ - # INTEGER N, ILO, IHI, LWORK, LDH, LDZ, INFO - # * .. - # * .. Array Arguments .. - # COMPLEX*16 H( LDH, * ), Z( LDZ, * ), WORK( * ) - function hseqr!(job::AbstractChar, compz::AbstractChar, ilo::Integer, ihi::Integer, - H::AbstractMatrix{$elty}, Z::AbstractMatrix{$elty}) - require_one_based_indexing(H, Z) - @chkvalidparam 1 job ('E', 'S') - @chkvalidparam 2 compz ('N', 'I', 'V') - chkstride1(H) - n = checksquare(H) - checksquare(Z) == n || throw(DimensionMismatch()) - ldh = max(1, stride(H, 2)) - ldz = max(1, stride(Z, 2)) - w = similar(H, $elty, n) - work = Vector{$elty}(undef, 1) - lwork = BlasInt(-1) - info = Ref{BlasInt}() - for i = 1:2 # first call returns lwork as work[1] - ccall((@blasfunc($hseqr), libblastrampoline), Cvoid, - (Ref{UInt8}, Ref{UInt8}, Ref{BlasInt}, Ref{BlasInt}, - Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, - Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, - Ptr{BlasInt}), - job, compz, n, ilo, ihi, - H, ldh, w, Z, ldz, work, - lwork, info) - chklapackerror(info[]) - if i == 1 - lwork = BlasInt(real(work[1])) - resize!(work, lwork) - end - end - H, Z, w - end - end -end - -for (hseqr, elty) in - ((:dhseqr_,:Float64), - (:shseqr_,:Float32)) - @eval begin - # * .. Scalar Arguments .. - # CHARACTER JOB, COMPZ - # INTEGER N, ILO, IHI, LWORK, LDH, LDZ, INFO - # * .. - # * .. Array Arguments .. - # COMPLEX*16 H( LDH, * ), Z( LDZ, * ), WORK( * ) - function hseqr!(job::AbstractChar, compz::AbstractChar, ilo::Integer, ihi::Integer, - H::AbstractMatrix{$elty}, Z::AbstractMatrix{$elty}) - require_one_based_indexing(H, Z) - @chkvalidparam 1 job ('E', 'S') - @chkvalidparam 2 compz ('N', 'I', 'V') - chkstride1(H) - n = checksquare(H) - checksquare(Z) == n || throw(DimensionMismatch()) - ldh = max(1, stride(H, 2)) - ldz = max(1, stride(Z, 2)) - wr = similar(H, $elty, n) - wi = similar(H, $elty, n) - work = Vector{$elty}(undef, 1) - lwork = BlasInt(-1) - info = Ref{BlasInt}() - for i = 1:2 # first call returns lwork as work[1] - ccall((@blasfunc($hseqr), libblastrampoline), Cvoid, - (Ref{UInt8}, Ref{UInt8}, Ref{BlasInt}, Ref{BlasInt}, - Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, Ptr{$elty}, - Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, - Ptr{BlasInt}), - job, compz, n, ilo, ihi, - H, ldh, wr, wi, Z, ldz, work, - lwork, info) - chklapackerror(info[]) - if i == 1 - lwork = BlasInt(real(work[1])) - resize!(work, lwork) - end - end - H, Z, complex.(wr, wi) - end - end -end -hseqr!(H::StridedMatrix{T}, Z::StridedMatrix{T}) where {T<:BlasFloat} = hseqr!('S', 'V', 1, size(H, 1), H, Z) -hseqr!(H::StridedMatrix{T}) where {T<:BlasFloat} = hseqr!('S', 'I', 1, size(H, 1), H, similar(H)) - -""" - hseqr!(job, compz, ilo, ihi, H, Z) -> (H, Z, w) - -Computes all eigenvalues and (optionally) the Schur factorization of a matrix -reduced to Hessenberg form. If `H` is balanced with `gebal!` -then `ilo` and `ihi` are the outputs of `gebal!`. Otherwise they should be -`ilo = 1` and `ihi = size(H,2)`. `tau` contains the elementary reflectors of -the factorization. -""" -hseqr!(job::AbstractChar, compz::AbstractChar, ilo::Integer, ihi::Integer, H::AbstractMatrix, Z::AbstractMatrix) - -for (hetrd, elty) in - ((:dsytrd_,Float64), - (:ssytrd_,Float32), - (:zhetrd_,ComplexF64), - (:chetrd_,ComplexF32)) - relty = real(elty) - @eval begin - - # .. Scalar Arguments .. - # CHARACTER UPLO - # INTEGER INFO, LDA, LWORK, N - # * .. - # * .. Array Arguments .. - # DOUBLE PRECISION A( LDA, * ), D( * ), E( * ), TAU( * ), WORK( * ) - function hetrd!(uplo::AbstractChar, A::AbstractMatrix{$elty}) - require_one_based_indexing(A) - chkstride1(A) - n = checksquare(A) - chkuplofinite(A, uplo) # balancing routines don't support NaNs and Infs - tau = similar(A, $elty, max(0,n - 1)) - d = Vector{$relty}(undef, n) - e = Vector{$relty}(undef, max(0,n - 1)) - work = Vector{$elty}(undef, 1) - lwork = BlasInt(-1) - info = Ref{BlasInt}() - for i = 1:2 # first call returns lwork as work[1] - ccall((@blasfunc($hetrd), libblastrampoline), Cvoid, - (Ref{UInt8}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, - Ptr{$relty}, Ptr{$relty}, - Ptr{$elty}, Ptr{$elty}, Ref{BlasInt}, Ref{BlasInt}, Clong), - uplo, n, A, max(1, stride(A, 2)), d, e, tau, work, lwork, info, 1) - chklapackerror(info[]) - if i == 1 - lwork = BlasInt(real(work[1])) - resize!(work, lwork) - end - end - A, tau, d, e - end - end -end - -""" - hetrd!(uplo, A) -> (A, tau, d, e) - -Converts a Hermitian matrix `A` to real-symmetric tridiagonal Hessenberg form. -If `uplo = U`, the upper half of `A` is stored; if `uplo = L`, the lower half is stored. -`tau` contains the elementary reflectors of the factorization, `d` contains the -diagonal and `e` contains the upper/lower diagonal. -""" -hetrd!(uplo::AbstractChar, A::AbstractMatrix) - -for (orgtr, elty) in - ((:dorgtr_,:Float64), - (:sorgtr_,:Float32), - (:zungtr_,:ComplexF64), - (:cungtr_,:ComplexF32)) - @eval begin - # * .. Scalar Arguments .. - # CHARACTER UPLO - # INTEGER INFO, LDA, LWORK, N - # * .. - # * .. Array Arguments .. - # DOUBLE PRECISION A( LDA, * ), TAU( * ), WORK( * ) - function orgtr!(uplo::AbstractChar, A::AbstractMatrix{$elty}, tau::AbstractVector{$elty}) - require_one_based_indexing(A, tau) - chkstride1(A, tau) - n = checksquare(A) - if n - length(tau) != 1 - throw(DimensionMismatch(lazy"tau has length $(length(tau)), needs $(n - 1)")) - end - chkuplo(uplo) - work = Vector{$elty}(undef, 1) - lwork = BlasInt(-1) - info = Ref{BlasInt}() - for i = 1:2 # first call returns lwork as work[1] - ccall((@blasfunc($orgtr), libblastrampoline), Cvoid, - (Ref{UInt8}, Ref{BlasInt}, Ptr{$elty}, - Ref{BlasInt}, Ptr{$elty}, Ptr{$elty}, Ref{BlasInt}, - Ref{BlasInt}, Clong), - uplo, n, A, - max(1, stride(A, 2)), tau, work, lwork, - info, 1) - chklapackerror(info[]) - if i == 1 - lwork = BlasInt(real(work[1])) - resize!(work, lwork) - end - end - A - end - end -end - -""" - orgtr!(uplo, A, tau) - -Explicitly finds `Q`, the orthogonal/unitary matrix from `hetrd!`. `uplo`, -`A`, and `tau` must correspond to the input/output to `hetrd!`. -""" -orgtr!(uplo::AbstractChar, A::AbstractMatrix, tau::AbstractVector) - -for (ormtr, elty) in - ((:dormtr_,:Float64), - (:sormtr_,:Float32), - (:zunmtr_,:ComplexF64), - (:cunmtr_,:ComplexF32)) - @eval begin - # .. Scalar Arguments .. - # CHARACTER side, trans, uplo - # INTEGER info, lda, ldc, lwork, m, n - # .. - # .. Array Arguments .. - # DOUBLE PRECISION a( lda, * ), c( ldc, * ), tau( * ), work( * ) - function ormtr!(side::AbstractChar, uplo::AbstractChar, trans::AbstractChar, A::AbstractMatrix{$elty}, - tau::AbstractVector{$elty}, C::AbstractVecOrMat{$elty}) - - require_one_based_indexing(A, tau, C) - chkstride1(A, tau, C) - n = checksquare(A) - chkside(side) - chkuplo(uplo) - chktrans(trans) - mC, nC = size(C, 1), size(C, 2) - - if n - length(tau) != 1 - throw(DimensionMismatch(lazy"tau has length $(length(tau)), needs $(n - 1)")) - end - if (side == 'L' && mC != n) || (side == 'R' && nC != n) - throw(DimensionMismatch(lazy"A and C matrices are not conformable")) - end - - work = Vector{$elty}(undef, 1) - lwork = BlasInt(-1) - info = Ref{BlasInt}() - for i = 1:2 # first call returns lwork as work[1] - ccall((@blasfunc($ormtr), libblastrampoline), Cvoid, - (Ref{UInt8}, Ref{UInt8}, Ref{UInt8}, Ref{BlasInt}, Ref{BlasInt}, - Ptr{$elty}, Ref{BlasInt}, - Ptr{$elty}, Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, - Ref{BlasInt}, Ref{BlasInt}, Clong, Clong, Clong), - side, uplo, trans, mC, nC, - A, max(1, stride(A, 2)), - tau, C, max(1, stride(C, 2)), work, - lwork, info, 1, 1, 1) - chklapackerror(info[]) - if i == 1 - lwork = BlasInt(real(work[1])) - resize!(work, lwork) - end - end - C - end - end -end - -for (gees, gges, gges3, elty) in - ((:dgees_,:dgges_,:dgges3_,:Float64), - (:sgees_,:sgges_,:sgges3_,:Float32)) - @eval begin - # .. Scalar Arguments .. - # CHARACTER JOBVS, SORT - # INTEGER INFO, LDA, LDVS, LWORK, N, SDIM - # .. - # .. Array Arguments .. - # LOGICAL BWORK( * ) - # DOUBLE PRECISION A( LDA, * ), VS( LDVS, * ), WI( * ), WORK( * ), - # $ WR( * ) - function gees!(jobvs::AbstractChar, A::AbstractMatrix{$elty}) - require_one_based_indexing(A) - @chkvalidparam 1 jobvs ('N', 'V') - chkstride1(A) - n = checksquare(A) - sdim = Vector{BlasInt}(undef, 1) - wr = similar(A, $elty, n) - wi = similar(A, $elty, n) - vs = similar(A, $elty, jobvs == 'V' ? n : 0, n) - ldvs = max(size(vs, 1), 1) - work = Vector{$elty}(undef, 1) - lwork = BlasInt(-1) - info = Ref{BlasInt}() - for i = 1:2 # first call returns lwork as work[1] - ccall((@blasfunc($gees), libblastrampoline), Cvoid, - (Ref{UInt8}, Ref{UInt8}, Ptr{Cvoid}, Ref{BlasInt}, - Ptr{$elty}, Ref{BlasInt}, Ptr{BlasInt}, Ptr{$elty}, - Ptr{$elty}, Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, - Ref{BlasInt}, Ptr{Cvoid}, Ref{BlasInt}, Clong, Clong), - jobvs, 'N', C_NULL, n, - A, max(1, stride(A, 2)), sdim, wr, - wi, vs, ldvs, work, - lwork, C_NULL, info, 1, 1) - chklapackerror(info[]) - if i == 1 - lwork = BlasInt(real(work[1])) - resize!(work, lwork) - end - end - iszero(wi) ? (A, vs, wr) : (A, vs, complex.(wr, wi)) - end - - # * .. Scalar Arguments .. - # CHARACTER JOBVSL, JOBVSR, SORT - # INTEGER INFO, LDA, LDB, LDVSL, LDVSR, LWORK, N, SDIM - # * .. - # * .. Array Arguments .. - # LOGICAL BWORK( * ) - # DOUBLE PRECISION A( LDA, * ), ALPHAI( * ), ALPHAR( * ), - # $ B( LDB, * ), BETA( * ), VSL( LDVSL, * ), - # $ VSR( LDVSR, * ), WORK( * ) - function gges!(jobvsl::AbstractChar, jobvsr::AbstractChar, A::AbstractMatrix{$elty}, B::AbstractMatrix{$elty}) - require_one_based_indexing(A, B) - @chkvalidparam 1 jobvsl ('N', 'V') - @chkvalidparam 2 jobvsr ('N', 'V') - chkstride1(A, B) - n, m = checksquare(A, B) - if n != m - throw(DimensionMismatch(lazy"dimensions of A, ($n,$n), and B, ($m,$m), must match")) - end - sdim = BlasInt(0) - alphar = similar(A, $elty, n) - alphai = similar(A, $elty, n) - beta = similar(A, $elty, n) - ldvsl = jobvsl == 'V' ? max(1, n) : 1 - vsl = similar(A, $elty, ldvsl, n) - ldvsr = jobvsr == 'V' ? max(1, n) : 1 - vsr = similar(A, $elty, ldvsr, n) - work = Vector{$elty}(undef, 1) - lwork = BlasInt(-1) - info = Ref{BlasInt}() - for i = 1:2 # first call returns lwork as work[1] - ccall((@blasfunc($gges), libblastrampoline), Cvoid, - (Ref{UInt8}, Ref{UInt8}, Ref{UInt8}, Ptr{Cvoid}, - Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, - Ref{BlasInt}, Ref{BlasInt}, Ptr{$elty}, Ptr{$elty}, - Ptr{$elty}, Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, - Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ptr{Cvoid}, - Ref{BlasInt}, Clong, Clong, Clong), - jobvsl, jobvsr, 'N', C_NULL, - n, A, max(1,stride(A, 2)), B, - max(1,stride(B, 2)), sdim, alphar, alphai, - beta, vsl, ldvsl, vsr, - ldvsr, work, lwork, C_NULL, - info, 1, 1, 1) - chklapackerror(info[]) - if i == 1 - lwork = BlasInt(real(work[1])) - resize!(work, lwork) - end - end - A, B, complex.(alphar, alphai), beta, vsl[1:(jobvsl == 'V' ? n : 0),:], vsr[1:(jobvsr == 'V' ? n : 0),:] - end - - # * .. Scalar Arguments .. - # CHARACTER JOBVSL, JOBVSR, SORT - # INTEGER INFO, LDA, LDB, LDVSL, LDVSR, LWORK, N, SDIM - # * .. - # * .. Array Arguments .. - # LOGICAL BWORK( * ) - # DOUBLE PRECISION A( LDA, * ), ALPHAI( * ), ALPHAR( * ), - # $ B( LDB, * ), BETA( * ), VSL( LDVSL, * ), - # $ VSR( LDVSR, * ), WORK( * ) - function gges3!(jobvsl::AbstractChar, jobvsr::AbstractChar, A::AbstractMatrix{$elty}, B::AbstractMatrix{$elty}) - require_one_based_indexing(A, B) - @chkvalidparam 1 jobvsl ('N', 'V') - @chkvalidparam 2 jobvsr ('N', 'V') - chkstride1(A, B) - n, m = checksquare(A, B) - if n != m - throw(DimensionMismatch(lazy"dimensions of A, ($n,$n), and B, ($m,$m), must match")) - end - sdim = BlasInt(0) - alphar = similar(A, $elty, n) - alphai = similar(A, $elty, n) - beta = similar(A, $elty, n) - ldvsl = jobvsl == 'V' ? max(1, n) : 1 - vsl = similar(A, $elty, ldvsl, n) - ldvsr = jobvsr == 'V' ? max(1, n) : 1 - vsr = similar(A, $elty, ldvsr, n) - work = Vector{$elty}(undef, 1) - lwork = BlasInt(-1) - info = Ref{BlasInt}() - for i = 1:2 # first call returns lwork as work[1] - ccall((@blasfunc($gges3), libblastrampoline), Cvoid, - (Ref{UInt8}, Ref{UInt8}, Ref{UInt8}, Ptr{Cvoid}, - Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, - Ref{BlasInt}, Ref{BlasInt}, Ptr{$elty}, Ptr{$elty}, - Ptr{$elty}, Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, - Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ptr{Cvoid}, - Ref{BlasInt}, Clong, Clong, Clong), - jobvsl, jobvsr, 'N', C_NULL, - n, A, max(1,stride(A, 2)), B, - max(1,stride(B, 2)), sdim, alphar, alphai, - beta, vsl, ldvsl, vsr, - ldvsr, work, lwork, C_NULL, - info, 1, 1, 1) - chklapackerror(info[]) - if i == 1 - lwork = BlasInt(real(work[1])) - resize!(work, lwork) - end - end - A, B, complex.(alphar, alphai), beta, vsl[1:(jobvsl == 'V' ? n : 0),:], vsr[1:(jobvsr == 'V' ? n : 0),:] - end - end -end - -for (gees, gges, gges3, elty, relty) in - ((:zgees_,:zgges_,:zgges3_,:ComplexF64,:Float64), - (:cgees_,:cgges_,:cgges3_,:ComplexF32,:Float32)) - @eval begin - # * .. Scalar Arguments .. - # CHARACTER JOBVS, SORT - # INTEGER INFO, LDA, LDVS, LWORK, N, SDIM - # * .. - # * .. Array Arguments .. - # LOGICAL BWORK( * ) - # DOUBLE PRECISION RWORK( * ) - # COMPLEX*16 A( LDA, * ), VS( LDVS, * ), W( * ), WORK( * ) - function gees!(jobvs::AbstractChar, A::AbstractMatrix{$elty}) - require_one_based_indexing(A) - @chkvalidparam 1 jobvs ('N', 'V') - chkstride1(A) - n = checksquare(A) - sort = 'N' - sdim = BlasInt(0) - w = similar(A, $elty, n) - vs = similar(A, $elty, jobvs == 'V' ? n : 1, n) - ldvs = max(size(vs, 1), 1) - work = Vector{$elty}(undef, 1) - lwork = BlasInt(-1) - rwork = Vector{$relty}(undef, n) - info = Ref{BlasInt}() - for i = 1:2 # first call returns lwork as work[1] - ccall((@blasfunc($gees), libblastrampoline), Cvoid, - (Ref{UInt8}, Ref{UInt8}, Ptr{Cvoid}, Ref{BlasInt}, - Ptr{$elty}, Ref{BlasInt}, Ref{BlasInt}, Ptr{$elty}, - Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, - Ptr{$relty}, Ptr{Cvoid}, Ref{BlasInt}, Clong, Clong), - jobvs, sort, C_NULL, n, - A, max(1, stride(A, 2)), sdim, w, - vs, ldvs, work, lwork, - rwork, C_NULL, info, 1, 1) - chklapackerror(info[]) - if i == 1 - lwork = BlasInt(real(work[1])) - resize!(work, lwork) - end - end - A, vs, w - end - - # * .. Scalar Arguments .. - # CHARACTER JOBVSL, JOBVSR, SORT - # INTEGER INFO, LDA, LDB, LDVSL, LDVSR, LWORK, N, SDIM - # * .. - # * .. Array Arguments .. - # LOGICAL BWORK( * ) - # DOUBLE PRECISION RWORK( * ) - # COMPLEX*16 A( LDA, * ), ALPHA( * ), B( LDB, * ), - # $ BETA( * ), VSL( LDVSL, * ), VSR( LDVSR, * ), - # $ WORK( * ) - function gges!(jobvsl::AbstractChar, jobvsr::AbstractChar, A::AbstractMatrix{$elty}, B::AbstractMatrix{$elty}) - require_one_based_indexing(A, B) - @chkvalidparam 1 jobvsl ('N', 'V') - @chkvalidparam 2 jobvsr ('N', 'V') - chkstride1(A, B) - n, m = checksquare(A, B) - if n != m - throw(DimensionMismatch(lazy"dimensions of A, ($n,$n), and B, ($m,$m), must match")) - end - sdim = BlasInt(0) - alpha = similar(A, $elty, n) - beta = similar(A, $elty, n) - ldvsl = jobvsl == 'V' ? max(1, n) : 1 - vsl = similar(A, $elty, ldvsl, n) - ldvsr = jobvsr == 'V' ? max(1, n) : 1 - vsr = similar(A, $elty, ldvsr, n) - work = Vector{$elty}(undef, 1) - lwork = BlasInt(-1) - rwork = Vector{$relty}(undef, 8n) - info = Ref{BlasInt}() - for i = 1:2 # first call returns lwork as work[1] - ccall((@blasfunc($gges), libblastrampoline), Cvoid, - (Ref{UInt8}, Ref{UInt8}, Ref{UInt8}, Ptr{Cvoid}, - Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, - Ref{BlasInt}, Ref{BlasInt}, Ptr{$elty}, Ptr{$elty}, - Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, - Ptr{$elty}, Ref{BlasInt}, Ptr{$relty}, Ptr{Cvoid}, - Ref{BlasInt}, Clong, Clong, Clong), - jobvsl, jobvsr, 'N', C_NULL, - n, A, max(1, stride(A, 2)), B, - max(1, stride(B, 2)), sdim, alpha, beta, - vsl, ldvsl, vsr, ldvsr, - work, lwork, rwork, C_NULL, - info, 1, 1, 1) - chklapackerror(info[]) - if i == 1 - lwork = BlasInt(real(work[1])) - resize!(work, lwork) - end - end - A, B, alpha, beta, vsl[1:(jobvsl == 'V' ? n : 0),:], vsr[1:(jobvsr == 'V' ? n : 0),:] - end - - # * .. Scalar Arguments .. - # CHARACTER JOBVSL, JOBVSR, SORT - # INTEGER INFO, LDA, LDB, LDVSL, LDVSR, LWORK, N, SDIM - # * .. - # * .. Array Arguments .. - # LOGICAL BWORK( * ) - # DOUBLE PRECISION RWORK( * ) - # COMPLEX*16 A( LDA, * ), ALPHA( * ), B( LDB, * ), - # $ BETA( * ), VSL( LDVSL, * ), VSR( LDVSR, * ), - # $ WORK( * ) - function gges3!(jobvsl::AbstractChar, jobvsr::AbstractChar, A::AbstractMatrix{$elty}, B::AbstractMatrix{$elty}) - require_one_based_indexing(A, B) - @chkvalidparam 1 jobvsl ('N', 'V') - @chkvalidparam 2 jobvsr ('N', 'V') - chkstride1(A, B) - n, m = checksquare(A, B) - if n != m - throw(DimensionMismatch(lazy"dimensions of A, ($n,$n), and B, ($m,$m), must match")) - end - sdim = BlasInt(0) - alpha = similar(A, $elty, n) - beta = similar(A, $elty, n) - ldvsl = jobvsl == 'V' ? max(1, n) : 1 - vsl = similar(A, $elty, ldvsl, n) - ldvsr = jobvsr == 'V' ? max(1, n) : 1 - vsr = similar(A, $elty, ldvsr, n) - work = Vector{$elty}(undef, 1) - lwork = BlasInt(-1) - rwork = Vector{$relty}(undef, 8n) - info = Ref{BlasInt}() - for i = 1:2 # first call returns lwork as work[1] - ccall((@blasfunc($gges3), libblastrampoline), Cvoid, - (Ref{UInt8}, Ref{UInt8}, Ref{UInt8}, Ptr{Cvoid}, - Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, - Ref{BlasInt}, Ref{BlasInt}, Ptr{$elty}, Ptr{$elty}, - Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, - Ptr{$elty}, Ref{BlasInt}, Ptr{$relty}, Ptr{Cvoid}, - Ref{BlasInt}, Clong, Clong, Clong), - jobvsl, jobvsr, 'N', C_NULL, - n, A, max(1, stride(A, 2)), B, - max(1, stride(B, 2)), sdim, alpha, beta, - vsl, ldvsl, vsr, ldvsr, - work, lwork, rwork, C_NULL, - info, 1, 1, 1) - chklapackerror(info[]) - if i == 1 - lwork = BlasInt(real(work[1])) - resize!(work, lwork) - end - end - A, B, alpha, beta, vsl[1:(jobvsl == 'V' ? n : 0),:], vsr[1:(jobvsr == 'V' ? n : 0),:] - end - end -end - -""" - gees!(jobvs, A) -> (A, vs, w) - -Computes the eigenvalues (`jobvs = N`) or the eigenvalues and Schur -vectors (`jobvs = V`) of matrix `A`. `A` is overwritten by its Schur form. - -Returns `A`, `vs` containing the Schur vectors, and `w`, containing the -eigenvalues. -""" -gees!(jobvs::AbstractChar, A::AbstractMatrix) - - -""" - gges!(jobvsl, jobvsr, A, B) -> (A, B, alpha, beta, vsl, vsr) - -Computes the generalized eigenvalues, generalized Schur form, left Schur -vectors (`jobsvl = V`), or right Schur vectors (`jobvsr = V`) of `A` and -`B`. - -The generalized eigenvalues are returned in `alpha` and `beta`. The left Schur -vectors are returned in `vsl` and the right Schur vectors are returned in `vsr`. -""" -gges!(jobvsl::AbstractChar, jobvsr::AbstractChar, A::AbstractMatrix, B::AbstractMatrix) - -""" - gges3!(jobvsl, jobvsr, A, B) -> (A, B, alpha, beta, vsl, vsr) - -Computes the generalized eigenvalues, generalized Schur form, left Schur -vectors (`jobsvl = V`), or right Schur vectors (`jobvsr = V`) of `A` and -`B` using a blocked algorithm. This function requires LAPACK 3.6.0. - -The generalized eigenvalues are returned in `alpha` and `beta`. The left Schur -vectors are returned in `vsl` and the right Schur vectors are returned in `vsr`. -""" -gges3!(jobvsl::AbstractChar, jobvsr::AbstractChar, A::AbstractMatrix, B::AbstractMatrix) - -for (trexc, trsen, tgsen, elty) in - ((:dtrexc_, :dtrsen_, :dtgsen_, :Float64), - (:strexc_, :strsen_, :stgsen_, :Float32)) - @eval begin - # * .. Scalar Arguments .. - # CHARACTER COMPQ - # INTEGER IFST, ILST, INFO, LDQ, LDT, N - # * .. - # * .. Array Arguments .. - # DOUBLE PRECISION Q( LDQ, * ), T( LDT, * ), WORK( * ) - function trexc!(compq::AbstractChar, ifst::BlasInt, ilst::BlasInt, T::AbstractMatrix{$elty}, Q::AbstractMatrix{$elty}) - require_one_based_indexing(T, Q) - @chkvalidparam 1 compq ('V', 'N') - chkstride1(T, Q) - n = checksquare(T) - ldt = max(1, stride(T, 2)) - ldq = max(1, stride(Q, 2)) - work = Vector{$elty}(undef, n) - info = Ref{BlasInt}() - ccall((@blasfunc($trexc), libblastrampoline), Cvoid, - (Ref{UInt8}, Ref{BlasInt}, - Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, - Ref{BlasInt}, Ref{BlasInt}, - Ptr{$elty}, Ref{BlasInt}, Clong), - compq, n, - T, ldt, Q, ldq, - ifst, ilst, - work, info, 1) - chklapackerror(info[]) - T, Q - end - trexc!(ifst::BlasInt, ilst::BlasInt, T::AbstractMatrix{$elty}, Q::AbstractMatrix{$elty}) = - trexc!('V', ifst, ilst, T, Q) - - # * .. Scalar Arguments .. - # CHARACTER COMPQ, JOB - # INTEGER INFO, LDQ, LDT, LIWORK, LWORK, M, N - # DOUBLE PRECISION S, SEP - # * .. - # * .. Array Arguments .. - # LOGICAL SELECT( * ) - # INTEGER IWORK( * ) - # DOUBLE PRECISION Q( LDQ, * ), T( LDT, * ), WI( * ), WORK( * ), WR( * ) - function trsen!(job::AbstractChar, compq::AbstractChar, select::AbstractVector{BlasInt}, - T::AbstractMatrix{$elty}, Q::AbstractMatrix{$elty}) - require_one_based_indexing(T, Q, select) - @chkvalidparam 1 job ('N', 'E', 'V', 'B') - @chkvalidparam 2 compq ('V', 'N') - chkstride1(T, Q, select) - n = checksquare(T) - ldt = max(1, stride(T, 2)) - ldq = max(1, stride(Q, 2)) - wr = similar(T, $elty, n) - wi = similar(T, $elty, n) - m = sum(select) - work = Vector{$elty}(undef, 1) - lwork = BlasInt(-1) - iwork = Vector{BlasInt}(undef, 1) - liwork = BlasInt(-1) - info = Ref{BlasInt}() - select = convert(Array{BlasInt}, select) - s = Ref{$elty}(zero($elty)) - sep = Ref{$elty}(zero($elty)) - for i = 1:2 # first call returns lwork as work[1] and liwork as iwork[1] - ccall((@blasfunc($trsen), libblastrampoline), Cvoid, - (Ref{UInt8}, Ref{UInt8}, Ptr{BlasInt}, Ref{BlasInt}, - Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, - Ptr{$elty}, Ptr{$elty}, Ref{BlasInt}, Ref{$elty}, Ref{$elty}, - Ptr{$elty}, Ref{BlasInt}, Ptr{BlasInt}, Ref{BlasInt}, - Ref{BlasInt}, Clong, Clong), - job, compq, select, n, - T, ldt, Q, ldq, - wr, wi, m, s, sep, - work, lwork, iwork, liwork, - info, 1, 1) - chklapackerror(info[]) - if i == 1 # only estimated optimal lwork, liwork - lwork = BlasInt(real(work[1])) - resize!(work, lwork) - liwork = BlasInt(real(iwork[1])) - resize!(iwork, liwork) - end - end - iszero(wi) ? (T, Q, wr, s[], sep[]) : (T, Q, complex.(wr, wi), s[], sep[]) - end - trsen!(select::AbstractVector{BlasInt}, T::AbstractMatrix{$elty}, Q::AbstractMatrix{$elty}) = - trsen!('N', 'V', select, T, Q) - - # .. Scalar Arguments .. - # LOGICAL WANTQ, WANTZ - # INTEGER IJOB, INFO, LDA, LDB, LDQ, LDZ, LIWORK, LWORK, - # $ M, N - # DOUBLE PRECISION PL, PR - # .. - # .. Array Arguments .. - # LOGICAL SELECT( * ) - # INTEGER IWORK( * ) - # DOUBLE PRECISION A( LDA, * ), ALPHAI( * ), ALPHAR( * ), - # $ B( LDB, * ), BETA( * ), DIF( * ), Q( LDQ, * ), - # $ WORK( * ), Z( LDZ, * ) - # .. - function tgsen!(select::AbstractVector{BlasInt}, S::AbstractMatrix{$elty}, T::AbstractMatrix{$elty}, - Q::AbstractMatrix{$elty}, Z::AbstractMatrix{$elty}) - require_one_based_indexing(select, S, T, Q, Z) - chkstride1(select, S, T, Q, Z) - n, nt, nq, nz = checksquare(S, T, Q, Z) - if n != nt - throw(DimensionMismatch(lazy"dimensions of S, ($n,$n), and T, ($nt,$nt), must match")) - end - if n != nq - throw(DimensionMismatch(lazy"dimensions of S, ($n,$n), and Q, ($nq,$nq), must match")) - end - if n != nz - throw(DimensionMismatch(lazy"dimensions of S, ($n,$n), and Z, ($nz,$nz), must match")) - end - lds = max(1, stride(S, 2)) - ldt = max(1, stride(T, 2)) - ldq = max(1, stride(Q, 2)) - ldz = max(1, stride(Z, 2)) - m = sum(select) - alphai = similar(T, $elty, n) - alphar = similar(T, $elty, n) - beta = similar(T, $elty, n) - lwork = BlasInt(-1) - work = Vector{$elty}(undef, 1) - liwork = BlasInt(-1) - iwork = Vector{BlasInt}(undef, 1) - info = Ref{BlasInt}() - select = convert(Array{BlasInt}, select) - for i = 1:2 # first call returns lwork as work[1] and liwork as iwork[1] - ccall((@blasfunc($tgsen), libblastrampoline), Cvoid, - (Ref{BlasInt}, Ref{BlasInt}, Ref{BlasInt}, Ptr{BlasInt}, - Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, - Ref{BlasInt}, Ptr{$elty}, Ptr{$elty}, Ptr{$elty}, - Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, - Ref{BlasInt}, Ptr{Cvoid}, Ptr{Cvoid}, Ptr{Cvoid}, - Ptr{$elty}, Ref{BlasInt}, Ptr{BlasInt}, Ref{BlasInt}, - Ptr{BlasInt}), - 0, 1, 1, select, - n, S, lds, T, - ldt, alphar, alphai, beta, - Q, ldq, Z, ldz, - m, C_NULL, C_NULL, C_NULL, - work, lwork, iwork, liwork, - info) - chklapackerror(info[]) - if i == 1 # only estimated optimal lwork, liwork - lwork = BlasInt(real(work[1])) - resize!(work, lwork) - liwork = BlasInt(real(iwork[1])) - resize!(iwork, liwork) - end - end - S, T, complex.(alphar, alphai), beta, Q, Z - end - end -end - -for (trexc, trsen, tgsen, elty, relty) in - ((:ztrexc_, :ztrsen_, :ztgsen_, :ComplexF64, :Float64), - (:ctrexc_, :ctrsen_, :ctgsen_, :ComplexF32, :Float32)) - @eval begin - # .. Scalar Arguments .. - # CHARACTER COMPQ - # INTEGER IFST, ILST, INFO, LDQ, LDT, N - # .. - # .. Array Arguments .. - # DOUBLE PRECISION Q( LDQ, * ), T( LDT, * ), WORK( * ) - function trexc!(compq::AbstractChar, ifst::BlasInt, ilst::BlasInt, T::AbstractMatrix{$elty}, Q::AbstractMatrix{$elty}) - require_one_based_indexing(T, Q) - @chkvalidparam 1 compq ('V', 'N') - chkstride1(T, Q) - n = checksquare(T) - ldt = max(1, stride(T, 2)) - ldq = max(1, stride(Q, 2)) - info = Ref{BlasInt}() - ccall((@blasfunc($trexc), libblastrampoline), Cvoid, - (Ref{UInt8}, Ref{BlasInt}, - Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, - Ref{BlasInt}, Ref{BlasInt}, - Ref{BlasInt}, Clong), - compq, n, - T, ldt, Q, ldq, - ifst, ilst, - info, 1) - chklapackerror(info[]) - T, Q - end - trexc!(ifst::BlasInt, ilst::BlasInt, T::AbstractMatrix{$elty}, Q::AbstractMatrix{$elty}) = - trexc!('V', ifst, ilst, T, Q) - - # .. Scalar Arguments .. - # CHARACTER COMPQ, JOB - # INTEGER INFO, LDQ, LDT, LWORK, M, N - # DOUBLE PRECISION S, SEP - # .. - # .. Array Arguments .. - # LOGICAL SELECT( * ) - # COMPLEX Q( LDQ, * ), T( LDT, * ), W( * ), WORK( * ) - function trsen!(job::AbstractChar, compq::AbstractChar, select::AbstractVector{BlasInt}, - T::AbstractMatrix{$elty}, Q::AbstractMatrix{$elty}) - require_one_based_indexing(select, T, Q) - @chkvalidparam 1 job ('N', 'E', 'V', 'B') - @chkvalidparam 2 compq ('N', 'V') - chkstride1(select, T, Q) - n = checksquare(T) - ldt = max(1, stride(T, 2)) - ldq = max(1, stride(Q, 2)) - w = similar(T, $elty, n) - m = sum(select) - work = Vector{$elty}(undef, 1) - lwork = BlasInt(-1) - info = Ref{BlasInt}() - select = convert(Array{BlasInt}, select) - s = Ref{$relty}(zero($relty)) - sep = Ref{$relty}(zero($relty)) - for i = 1:2 # first call returns lwork as work[1] - ccall((@blasfunc($trsen), libblastrampoline), Cvoid, - (Ref{UInt8}, Ref{UInt8}, Ptr{BlasInt}, Ref{BlasInt}, - Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, - Ptr{$elty}, Ref{BlasInt}, Ref{$relty}, Ref{$relty}, - Ptr{$elty}, Ref{BlasInt}, - Ref{BlasInt}, Clong, Clong), - job, compq, select, n, - T, ldt, Q, ldq, - w, m, s, sep, - work, lwork, - info, 1, 1) - chklapackerror(info[]) - if i == 1 # only estimated optimal lwork, liwork - lwork = BlasInt(real(work[1])) - resize!(work, lwork) - end - end - T, Q, w, s[], sep[] - end - trsen!(select::AbstractVector{BlasInt}, T::AbstractMatrix{$elty}, Q::AbstractMatrix{$elty}) = - trsen!('N', 'V', select, T, Q) - - # .. Scalar Arguments .. - # LOGICAL WANTQ, WANTZ - # INTEGER IJOB, INFO, LDA, LDB, LDQ, LDZ, LIWORK, LWORK, - # $ M, N - # DOUBLE PRECISION PL, PR - # .. - # .. Array Arguments .. - # LOGICAL SELECT( * ) - # INTEGER IWORK( * ) - # DOUBLE PRECISION DIF( * ) - # COMPLEX*16 A( LDA, * ), ALPHA( * ), B( LDB, * ), - # $ BETA( * ), Q( LDQ, * ), WORK( * ), Z( LDZ, * ) - # .. - function tgsen!(select::AbstractVector{BlasInt}, S::AbstractMatrix{$elty}, T::AbstractMatrix{$elty}, - Q::AbstractMatrix{$elty}, Z::AbstractMatrix{$elty}) - require_one_based_indexing(select, S, T, Q, Z) - chkstride1(select, S, T, Q, Z) - n, nt, nq, nz = checksquare(S, T, Q, Z) - if n != nt - throw(DimensionMismatch(lazy"dimensions of S, ($n,$n), and T, ($nt,$nt), must match")) - end - if n != nq - throw(DimensionMismatch(lazy"dimensions of S, ($n,$n), and Q, ($nq,$nq), must match")) - end - if n != nz - throw(DimensionMismatch(lazy"dimensions of S, ($n,$n), and Z, ($nz,$nz), must match")) - end - lds = max(1, stride(S, 2)) - ldt = max(1, stride(T, 2)) - ldq = max(1, stride(Q, 2)) - ldz = max(1, stride(Z, 2)) - m = sum(select) - alpha = similar(T, $elty, n) - beta = similar(T, $elty, n) - lwork = BlasInt(-1) - work = Vector{$elty}(undef, 1) - liwork = BlasInt(-1) - iwork = Vector{BlasInt}(undef, 1) - info = Ref{BlasInt}() - select = convert(Array{BlasInt}, select) - for i = 1:2 # first call returns lwork as work[1] and liwork as iwork[1] - ccall((@blasfunc($tgsen), libblastrampoline), Cvoid, - (Ref{BlasInt}, Ref{BlasInt}, Ref{BlasInt}, Ptr{BlasInt}, - Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, - Ref{BlasInt}, Ptr{$elty}, Ptr{$elty}, - Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, - Ref{BlasInt}, Ptr{Cvoid}, Ptr{Cvoid}, Ptr{Cvoid}, - Ptr{$elty}, Ref{BlasInt}, Ptr{BlasInt}, Ref{BlasInt}, - Ptr{BlasInt}), - 0, 1, 1, select, - n, S, lds, T, - ldt, alpha, beta, - Q, ldq, Z, ldz, - m, C_NULL, C_NULL, C_NULL, - work, lwork, iwork, liwork, - info) - chklapackerror(info[]) - if i == 1 # only estimated optimal lwork, liwork - lwork = BlasInt(real(work[1])) - resize!(work, lwork) - liwork = BlasInt(real(iwork[1])) - resize!(iwork, liwork) - end - end - S, T, alpha, beta, Q, Z - end - end -end - -""" - trexc!(compq, ifst, ilst, T, Q) -> (T, Q) - trexc!(ifst, ilst, T, Q) -> (T, Q) - -Reorder the Schur factorization `T` of a matrix, such that the diagonal block -of `T` with row index `ifst` is moved to row index `ilst`. If `compq = V`, the Schur -vectors `Q` are reordered. If `compq = N` they are not modified. The 4-arg method -calls the 5-arg method with `compq = V`. -""" -trexc!(compq::AbstractChar, ifst::BlasInt, ilst::BlasInt, T::AbstractMatrix, Q::AbstractMatrix) - -""" - trsen!(job, compq, select, T, Q) -> (T, Q, w, s, sep) - trsen!(select, T, Q) -> (T, Q, w, s, sep) - -Reorder the Schur factorization of a matrix and optionally finds reciprocal -condition numbers. If `job = N`, no condition numbers are found. If `job = E`, -only the condition number for this cluster of eigenvalues is found. If -`job = V`, only the condition number for the invariant subspace is found. -If `job = B` then the condition numbers for the cluster and subspace are -found. If `compq = V` the Schur vectors `Q` are updated. If `compq = N` -the Schur vectors are not modified. `select` determines which -eigenvalues are in the cluster. The 3-arg method calls the 5-arg method -with `job = N` and `compq = V`. - -Returns `T`, `Q`, reordered eigenvalues in `w`, the condition number of the -cluster of eigenvalues `s`, and the condition number of the invariant subspace -`sep`. -""" -trsen!(compq::AbstractChar, job::AbstractChar, select::AbstractVector{BlasInt}, T::AbstractMatrix, Q::AbstractMatrix) - -""" - tgsen!(select, S, T, Q, Z) -> (S, T, alpha, beta, Q, Z) - -Reorders the vectors of a generalized Schur decomposition. `select` specifies -the eigenvalues in each cluster. -""" -tgsen!(select::AbstractVector{BlasInt}, S::AbstractMatrix, T::AbstractMatrix, Q::AbstractMatrix, Z::AbstractMatrix) - -for (fn, elty, relty) in ((:dtrsyl_, :Float64, :Float64), - (:strsyl_, :Float32, :Float32), - (:ztrsyl_, :ComplexF64, :Float64), - (:ctrsyl_, :ComplexF32, :Float32)) - @eval begin - function trsyl!(transa::AbstractChar, transb::AbstractChar, A::AbstractMatrix{$elty}, - B::AbstractMatrix{$elty}, C::AbstractMatrix{$elty}, isgn::Int=1) - require_one_based_indexing(A, B, C) - chktrans(transa) - chktrans(transb) - chkstride1(A, B, C) - m, n = checksquare(A), checksquare(B) - lda = max(1, stride(A, 2)) - ldb = max(1, stride(B, 2)) - m1, n1 = size(C) - if m != m1 || n != n1 - throw(DimensionMismatch(lazy"dimensions of A, ($m,$n), and C, ($m1,$n1), must match")) - end - ldc = max(1, stride(C, 2)) - scale = Ref{$relty}() - info = Ref{BlasInt}() - ccall((@blasfunc($fn), libblastrampoline), Cvoid, - (Ref{UInt8}, Ref{UInt8}, Ref{BlasInt}, Ref{BlasInt}, Ref{BlasInt}, - Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, - Ptr{$relty}, Ref{BlasInt}, Clong, Clong), - transa, transb, isgn, m, n, - A, lda, B, ldb, C, ldc, - scale, info, 1, 1) - chklapackerror(info[]) - C, scale[] - end - end -end - -""" - trsyl!(transa, transb, A, B, C, isgn=1) -> (C, scale) - -Solves the Sylvester matrix equation `A * X +/- X * B = scale*C` where `A` and -`B` are both quasi-upper triangular. If `transa = N`, `A` is not modified. -If `transa = T`, `A` is transposed. If `transa = C`, `A` is conjugate -transposed. Similarly for `transb` and `B`. If `isgn = 1`, the equation -`A * X + X * B = scale * C` is solved. If `isgn = -1`, the equation -`A * X - X * B = scale * C` is solved. - -Returns `X` (overwriting `C`) and `scale`. -""" -trsyl!(transa::AbstractChar, transb::AbstractChar, A::AbstractMatrix, B::AbstractMatrix, C::AbstractMatrix, isgn::Int=1) - -for (fn, elty) in ((:dlacpy_, :Float64), - (:slacpy_, :Float32), - (:zlacpy_, :ComplexF64), - (:clacpy_, :ComplexF32)) - @eval begin - # SUBROUTINE DLACPY( UPLO, M, N, A, LDA, B, LDB ) - # .. Scalar Arguments .. - # CHARACTER UPLO - # INTEGER LDA, LDB, M, N - # .. - # .. Array Arguments .. - # DOUBLE PRECISION A( LDA, * ), B( LDB, * ) - # .. - function lacpy!(B::AbstractMatrix{$elty}, A::AbstractMatrix{$elty}, uplo::AbstractChar) - require_one_based_indexing(A, B) - chkstride1(A, B) - m, n = size(A) - m1, n1 = size(B) - if uplo == 'U' - lacpy_size_check((m1, n1), (n < m ? n : m, n)) - elseif uplo == 'L' - lacpy_size_check((m1, n1), (m, m < n ? m : n)) - else - lacpy_size_check((m1, n1), (m, n)) - end - lda = max(1, stride(A, 2)) - ldb = max(1, stride(B, 2)) - ccall((@blasfunc($fn), libblastrampoline), Cvoid, - (Ref{UInt8}, Ref{BlasInt}, Ref{BlasInt}, Ptr{$elty}, - Ref{BlasInt}, Ptr{$elty}, Ref{BlasInt}, Clong), - uplo, m, n, A, lda, B, ldb, 1) - B - end - end -end - -# The noinline annotation reduces latency -@noinline lacpy_size_check((m1, n1), (m, n)) = (m1 < m || n1 < n) && throw(DimensionMismatch(lazy"B of size ($m1,$n1) should have at least size ($m,$n)")) - -""" - lacpy!(B, A, uplo) -> B - -Copies all or part of a matrix `A` to another matrix `B`. -uplo specifies the part of the matrix `A` to be copied to `B`. -Set `uplo = 'L'` for the lower triangular part, `uplo = 'U'` -for the upper triangular part, any other character for all -the matrix `A`. - -# Examples -```jldoctest -julia> A = [1. 2. ; 3. 4.]; - -julia> B = [0. 0. ; 0. 0.]; - -julia> LAPACK.lacpy!(B, A, 'U') -2×2 Matrix{Float64}: - 1.0 2.0 - 0.0 4.0 -``` -""" -lacpy!(B::AbstractMatrix, A::AbstractMatrix, uplo::AbstractChar) - -end # module diff --git a/stdlib/LinearAlgebra/src/lbt.jl b/stdlib/LinearAlgebra/src/lbt.jl deleted file mode 100644 index 81d10f930c8c5..0000000000000 --- a/stdlib/LinearAlgebra/src/lbt.jl +++ /dev/null @@ -1,348 +0,0 @@ -# This file is a part of Julia. License is MIT: https://julialang.org/license - -## This file contains libblastrampoline-specific APIs - -# Keep these in sync with `src/libblastrampoline_internal.h` -struct lbt_library_info_t - libname::Cstring - handle::Ptr{Cvoid} - suffix::Cstring - active_forwards::Ptr{UInt8} - interface::Int32 - complex_retstyle::Int32 - f2c::Int32 - cblas::Int32 -end - -macro get_warn(map, key) - return quote - if !haskey($(esc(map)), $(esc(key))) - println(Core.stderr, string("Warning: [LBT] Unknown key into ", $(string(map)), ": ", $(esc(key)), ", defaulting to :unknown")) - # All the unknown values share a common value: `-1` - $(esc(map))[$(esc(LBT_INTERFACE_UNKNOWN))] - else - $(esc(map))[$(esc(key))] - end - end -end - -const LBT_INTERFACE_LP64 = 32 -const LBT_INTERFACE_ILP64 = 64 -const LBT_INTERFACE_UNKNOWN = -1 -const LBT_INTERFACE_MAP = Dict( - LBT_INTERFACE_LP64 => :lp64, - LBT_INTERFACE_ILP64 => :ilp64, - LBT_INTERFACE_UNKNOWN => :unknown, -) -const LBT_INV_INTERFACE_MAP = Dict(v => k for (k, v) in LBT_INTERFACE_MAP) - -const LBT_F2C_PLAIN = 0 -const LBT_F2C_REQUIRED = 1 -const LBT_F2C_UNKNOWN = -1 -const LBT_F2C_MAP = Dict( - LBT_F2C_PLAIN => :plain, - LBT_F2C_REQUIRED => :required, - LBT_F2C_UNKNOWN => :unknown, -) -const LBT_INV_F2C_MAP = Dict(v => k for (k, v) in LBT_F2C_MAP) - -const LBT_COMPLEX_RETSTYLE_NORMAL = 0 -const LBT_COMPLEX_RETSTYLE_ARGUMENT = 1 -const LBT_COMPLEX_RETSTYLE_FNDA = 2 -const LBT_COMPLEX_RETSTYLE_UNKNOWN = -1 -const LBT_COMPLEX_RETSTYLE_MAP = Dict( - LBT_COMPLEX_RETSTYLE_NORMAL => :normal, - LBT_COMPLEX_RETSTYLE_ARGUMENT => :argument, - LBT_COMPLEX_RETSTYLE_FNDA => :float_normal_double_argument, - LBT_COMPLEX_RETSTYLE_UNKNOWN => :unknown, -) -const LBT_INV_COMPLEX_RETSTYLE_MAP = Dict(v => k for (k, v) in LBT_COMPLEX_RETSTYLE_MAP) - -const LBT_CBLAS_CONFORMANT = 0 -const LBT_CBLAS_DIVERGENT = 1 -const LBT_CBLAS_UNKNOWN = -1 -const LBT_CBLAS_MAP = Dict( - LBT_CBLAS_CONFORMANT => :conformant, - LBT_CBLAS_DIVERGENT => :divergent, - LBT_CBLAS_UNKNOWN => :unknown, -) -const LBT_INV_CBLAS_MAP = Dict(v => k for (k, v) in LBT_CBLAS_MAP) - -struct LBTLibraryInfo - libname::String - handle::Ptr{Cvoid} - suffix::String - active_forwards::Vector{UInt8} - interface::Symbol - complex_retstyle::Symbol - f2c::Symbol - cblas::Symbol - - function LBTLibraryInfo(lib_info::lbt_library_info_t, num_exported_symbols::UInt32) - return new( - unsafe_string(lib_info.libname), - lib_info.handle, - unsafe_string(lib_info.suffix), - unsafe_wrap(Vector{UInt8}, lib_info.active_forwards, div(num_exported_symbols,8)+1), - @get_warn(LBT_INTERFACE_MAP, lib_info.interface), - @get_warn(LBT_COMPLEX_RETSTYLE_MAP, lib_info.complex_retstyle), - @get_warn(LBT_F2C_MAP, lib_info.f2c), - @get_warn(LBT_CBLAS_MAP, lib_info.cblas), - ) - end -end - -struct lbt_config_t - loaded_libs::Ptr{Ptr{lbt_library_info_t}} - build_flags::UInt32 - exported_symbols::Ptr{Cstring} - num_exported_symbols::UInt32 -end -const LBT_BUILDFLAGS_DEEPBINDLESS = 0x01 -const LBT_BUILDFLAGS_F2C_CAPABLE = 0x02 -const LBT_BUILDFLAGS_CBLAS_DIVERGENCE = 0x04 -const LBT_BUILDFLAGS_COMPLEX_RETSTYLE = 0x08 -const LBT_BUILDFLAGS_SYMBOL_TRIMMING = 0x10 -const LBT_BUILDFLAGS_MAP = Dict( - LBT_BUILDFLAGS_DEEPBINDLESS => :deepbindless, - LBT_BUILDFLAGS_F2C_CAPABLE => :f2c_capable, - LBT_BUILDFLAGS_CBLAS_DIVERGENCE => :cblas_divergence, - LBT_BUILDFLAGS_COMPLEX_RETSTYLE => :complex_retstyle, - LBT_BUILDFLAGS_SYMBOL_TRIMMING => :symbol_trimming, -) - -struct LBTConfig - loaded_libs::Vector{LBTLibraryInfo} - build_flags::Vector{Symbol} - exported_symbols::Vector{String} - - function LBTConfig(config::lbt_config_t) - # Decode OR'ed flags into a list of names - build_flag_names = Symbol[] - for (flag, name) in LBT_BUILDFLAGS_MAP - if config.build_flags & flag != 0x00 - push!(build_flag_names, name) - end - end - - # Load all exported symbol names - exported_symbols = String[] - for sym_idx in 1:config.num_exported_symbols - str_ptr = unsafe_load(config.exported_symbols, sym_idx) - if str_ptr != C_NULL - push!(exported_symbols, unsafe_string(str_ptr)) - else - println(Core.stderr, "Error: NULL string in lbt_config.exported_symbols[$(sym_idx)]") - end - end - - # Unpack library info structures - libs = LBTLibraryInfo[] - idx = 1 - lib_ptr = unsafe_load(config.loaded_libs, idx) - while lib_ptr != C_NULL - push!(libs, LBTLibraryInfo(unsafe_load(lib_ptr), config.num_exported_symbols)) - - idx += 1 - lib_ptr = unsafe_load(config.loaded_libs, idx) - end - return new( - libs, - build_flag_names, - exported_symbols, - ) - end -end - -Base.show(io::IO, lbt::LBTLibraryInfo) = print(io, "LBTLibraryInfo(", basename(lbt.libname), ", ", lbt.interface, ")") -function Base.show(io::IO, mime::MIME{Symbol("text/plain")}, lbt::LBTLibraryInfo) - summary(io, lbt); println(io) - println(io, "├ Library: ", basename(lbt.libname)) - println(io, "├ Interface: ", lbt.interface) - println(io, "├ Complex return style: ", lbt.complex_retstyle) - println(io, "├ F2C: ", lbt.f2c) - print(io, "└ CBLAS: ", lbt.cblas) -end - -function Base.show(io::IO, lbt::LBTConfig) - if length(lbt.loaded_libs) <= 3 - print(io, "LBTConfig(") - gen = (string("[", uppercase(string(l.interface)), "] ", - basename(l.libname)) for l in lbt.loaded_libs) - print(io, join(gen, ", ")) - print(io, ")") - else - print(io, "LBTConfig(...)") - end -end -function Base.show(io::IO, mime::MIME{Symbol("text/plain")}, lbt::LBTConfig) - summary(io, lbt); println(io) - println(io, "Libraries: ") - for (i,l) in enumerate(lbt.loaded_libs) - char = i == length(lbt.loaded_libs) ? "└" : "├" - interface_str = if l.interface === :ilp64 - "ILP64" - elseif l.interface === :lp64 - " LP64" - else - "UNKWN" - end - print(io, char, " [", interface_str,"] ", basename(l.libname)) - i !== length(lbt.loaded_libs) && println() - end -end - -mutable struct ConfigCache - @atomic config::Union{Nothing,LBTConfig} - lock::ReentrantLock -end - -# In the event that users want to call `lbt_get_config()` multiple times (e.g. for -# runtime checks of which BLAS vendor is providing a symbol), let's cache the value -# and clear it only when someone calls something that would cause it to change. -const _CACHED_CONFIG = ConfigCache(nothing, ReentrantLock()) - -function lbt_get_config() - config = @atomic :acquire _CACHED_CONFIG.config - config === nothing || return config - return lock(_CACHED_CONFIG.lock) do - local config = @atomic :monotonic _CACHED_CONFIG.config - config === nothing || return config - config_ptr = ccall((:lbt_get_config, libblastrampoline), Ptr{lbt_config_t}, ()) - @atomic :release _CACHED_CONFIG.config = LBTConfig(unsafe_load(config_ptr)) - end -end - -function _clear_config_with(f) - lock(_CACHED_CONFIG.lock) do - @atomic :release _CACHED_CONFIG.config = nothing - f() - end -end - -function lbt_get_num_threads() - return ccall((:lbt_get_num_threads, libblastrampoline), Int32, ()) -end - -function lbt_set_num_threads(nthreads) - return ccall((:lbt_set_num_threads, libblastrampoline), Cvoid, (Int32,), nthreads) -end - -function lbt_forward(path::AbstractString; clear::Bool = false, verbose::Bool = false, suffix_hint::Union{String,Nothing} = nothing) - _clear_config_with() do - return ccall((:lbt_forward, libblastrampoline), Int32, (Cstring, Int32, Int32, Cstring), - path, clear ? 1 : 0, verbose ? 1 : 0, something(suffix_hint, C_NULL)) - end -end - -function lbt_set_default_func(addr) - _clear_config_with() do - return ccall((:lbt_set_default_func, libblastrampoline), Cvoid, (Ptr{Cvoid},), addr) - end -end - -function lbt_get_default_func() - return ccall((:lbt_get_default_func, libblastrampoline), Ptr{Cvoid}, ()) -end - -""" - lbt_find_backing_library(symbol_name, interface; config::LBTConfig = lbt_get_config()) - -Return the `LBTLibraryInfo` that represents the backing library for the given symbol -exported from libblastrampoline. This allows us to discover which library will service -a particular BLAS call from Julia code. This method returns `nothing` if either of the -following conditions are met: - - * No loaded library exports the desired symbol (the default function will be called) - * The symbol was set via `lbt_set_forward()`, which does not track library provenance. - -If the given `symbol_name` is not contained within the list of exported symbols, an -`ArgumentError` will be thrown. -""" -function lbt_find_backing_library(symbol_name, interface::Symbol; - config::LBTConfig = lbt_get_config()) - if interface ∉ (:ilp64, :lp64) - throw(ArgumentError(lazy"Invalid interface specification: '$(interface)'")) - end - symbol_idx = findfirst(s -> s == symbol_name, config.exported_symbols) - if symbol_idx === nothing - throw(ArgumentError(lazy"Invalid exported symbol name '$(symbol_name)'")) - end - # Convert to zero-indexed - symbol_idx -= 1 - - forward_byte_offset = div(symbol_idx, 8) - forward_byte_mask = 1 << mod(symbol_idx, 8) - for lib in filter(l -> l.interface == interface, config.loaded_libs) - if lib.active_forwards[forward_byte_offset+1] & forward_byte_mask != 0x00 - return lib - end - end - - # No backing library was found - return nothing -end - - -""" - lbt_forwarded_funcs(config::LBTConfig, lib::LBTLibraryInfo) - -Given a backing library `lib`, return the list of all functions that are -forwarded to that library, as a vector of `String`s. -""" -function lbt_forwarded_funcs(config::LBTConfig, lib::LBTLibraryInfo) - forwarded_funcs = String[] - for (symbol_idx, symbol) in enumerate(config.exported_symbols) - forward_byte_offset = div(symbol_idx - 1, 8) - forward_byte_mask = 1 << mod(symbol_idx - 1, 8) - if lib.active_forwards[forward_byte_offset+1] & forward_byte_mask != 0x00 - push!(forwarded_funcs, symbol) - end - end - return forwarded_funcs -end - - -## NOTE: Manually setting forwards is referred to as the 'footgun API'. It allows truly -## bizarre and complex setups to be created. If you run into strange errors while using -## it, the first thing you should ask yourself is whether you've set things up properly. -function lbt_set_forward(symbol_name, addr, interface, - complex_retstyle = LBT_COMPLEX_RETSTYLE_NORMAL, - f2c = LBT_F2C_PLAIN; verbose::Bool = false) - _clear_config_with() do - return ccall( - (:lbt_set_forward, libblastrampoline), - Int32, - (Cstring, Ptr{Cvoid}, Int32, Int32, Int32, Int32), - string(symbol_name), - addr, - Int32(interface), - Int32(complex_retstyle), - Int32(f2c), - verbose ? Int32(1) : Int32(0), - ) - end -end -function lbt_set_forward(symbol_name, addr, interface::Symbol, - complex_retstyle::Symbol = :normal, - f2c::Symbol = :plain; kwargs...) - return lbt_set_forward(symbol_name, addr, - LBT_INV_INTERFACE_MAP[interface], - LBT_INV_COMPLEX_RETSTYLE_MAP[complex_retstyle], - LBT_INV_F2C_MAP[f2c]; - kwargs...) -end - -function lbt_get_forward(symbol_name, interface, f2c = LBT_F2C_PLAIN) - return ccall( - (:lbt_get_forward, libblastrampoline), - Ptr{Cvoid}, - (Cstring, Int32, Int32), - string(symbol_name), - Int32(interface), - Int32(f2c), - ) -end -function lbt_get_forward(symbol_name, interface::Symbol, f2c::Symbol = :plain) - return lbt_get_forward(symbol_name, LBT_INV_INTERFACE_MAP[interface], LBT_INV_F2C_MAP[f2c]) -end diff --git a/stdlib/LinearAlgebra/src/ldlt.jl b/stdlib/LinearAlgebra/src/ldlt.jl deleted file mode 100644 index 89e57d0dd27eb..0000000000000 --- a/stdlib/LinearAlgebra/src/ldlt.jl +++ /dev/null @@ -1,224 +0,0 @@ -# This file is a part of Julia. License is MIT: https://julialang.org/license - -""" - LDLt <: Factorization - -Matrix factorization type of the `LDLt` factorization of a real [`SymTridiagonal`](@ref) -matrix `S` such that `S = L*Diagonal(d)*L'`, where `L` is a [`UnitLowerTriangular`](@ref) -matrix and `d` is a vector. The main use of an `LDLt` factorization `F = ldlt(S)` -is to solve the linear system of equations `Sx = b` with `F\\b`. This is the -return type of [`ldlt`](@ref), the corresponding matrix factorization function. - -The individual components of the factorization `F::LDLt` can be accessed via `getproperty`: - -| Component | Description | -|:---------:|:--------------------------------------------| -| `F.L` | `L` (unit lower triangular) part of `LDLt` | -| `F.D` | `D` (diagonal) part of `LDLt` | -| `F.Lt` | `Lt` (unit upper triangular) part of `LDLt` | -| `F.d` | diagonal values of `D` as a `Vector` | - -# Examples -```jldoctest -julia> S = SymTridiagonal([3., 4., 5.], [1., 2.]) -3×3 SymTridiagonal{Float64, Vector{Float64}}: - 3.0 1.0 ⋅ - 1.0 4.0 2.0 - ⋅ 2.0 5.0 - -julia> F = ldlt(S) -LDLt{Float64, SymTridiagonal{Float64, Vector{Float64}}} -L factor: -3×3 UnitLowerTriangular{Float64, SymTridiagonal{Float64, Vector{Float64}}}: - 1.0 ⋅ ⋅ - 0.333333 1.0 ⋅ - 0.0 0.545455 1.0 -D factor: -3×3 Diagonal{Float64, Vector{Float64}}: - 3.0 ⋅ ⋅ - ⋅ 3.66667 ⋅ - ⋅ ⋅ 3.90909 -``` -""" -struct LDLt{T,S<:AbstractMatrix{T}} <: Factorization{T} - data::S - - function LDLt{T,S}(data) where {T,S<:AbstractMatrix{T}} - require_one_based_indexing(data) - new{T,S}(data) - end -end -LDLt(data::AbstractMatrix{T}) where {T} = LDLt{T,typeof(data)}(data) -LDLt{T}(data::AbstractMatrix) where {T} = LDLt(convert(AbstractMatrix{T}, data)::AbstractMatrix{T}) - -size(S::LDLt) = size(S.data) -size(S::LDLt, i::Integer) = size(S.data, i) - -LDLt{T,S}(F::LDLt{T,S}) where {T,S<:AbstractMatrix{T}} = F -LDLt{T,S}(F::LDLt) where {T,S<:AbstractMatrix{T}} = LDLt{T,S}(convert(S, F.data)::S) -LDLt{T}(F::LDLt{T}) where {T} = F -LDLt{T}(F::LDLt) where {T} = LDLt(convert(AbstractMatrix{T}, F.data)::AbstractMatrix{T}) - -Factorization{T}(F::LDLt{T}) where {T} = F -Factorization{T}(F::LDLt) where {T} = LDLt{T}(F) - -function getproperty(F::LDLt{<:Any, <:SymTridiagonal}, d::Symbol) - Fdata = getfield(F, :data) - if d === :d - return Fdata.dv - elseif d === :D - return Diagonal(Fdata.dv) - elseif d === :L - return UnitLowerTriangular(Fdata) - elseif d === :Lt - return UnitUpperTriangular(Fdata) - else - return getfield(F, d) - end -end - -adjoint(F::LDLt{<:Real,<:SymTridiagonal}) = F -adjoint(F::LDLt) = LDLt(copy(adjoint(F.data))) - -function show(io::IO, mime::MIME{Symbol("text/plain")}, F::LDLt) - summary(io, F); println(io) - println(io, "L factor:") - show(io, mime, F.L) - println(io, "\nD factor:") - show(io, mime, F.D) -end - -# SymTridiagonal -""" - ldlt!(S::SymTridiagonal) -> LDLt - -Same as [`ldlt`](@ref), but saves space by overwriting the input `S`, instead of creating a copy. - -# Examples -```jldoctest -julia> S = SymTridiagonal([3., 4., 5.], [1., 2.]) -3×3 SymTridiagonal{Float64, Vector{Float64}}: - 3.0 1.0 ⋅ - 1.0 4.0 2.0 - ⋅ 2.0 5.0 - -julia> ldltS = ldlt!(S); - -julia> ldltS === S -false - -julia> S -3×3 SymTridiagonal{Float64, Vector{Float64}}: - 3.0 0.333333 ⋅ - 0.333333 3.66667 0.545455 - ⋅ 0.545455 3.90909 -``` -""" -function ldlt!(S::SymTridiagonal{T,V}) where {T,V} - n = size(S,1) - d = S.dv - e = S.ev - @inbounds for i in 1:n-1 - iszero(d[i]) && throw(ZeroPivotException(i)) - e[i] /= d[i] - d[i+1] -= e[i]^2*d[i] - end - return LDLt{T,SymTridiagonal{T,V}}(S) -end - -""" - ldlt(S::SymTridiagonal) -> LDLt - -Compute an `LDLt` (i.e., ``LDL^T``) factorization of the real symmetric tridiagonal matrix `S` such that `S = L*Diagonal(d)*L'` -where `L` is a unit lower triangular matrix and `d` is a vector. The main use of an `LDLt` -factorization `F = ldlt(S)` is to solve the linear system of equations `Sx = b` with `F\\b`. - -See also [`bunchkaufman`](@ref) for a similar, but pivoted, factorization of arbitrary symmetric or Hermitian matrices. - -# Examples -```jldoctest -julia> S = SymTridiagonal([3., 4., 5.], [1., 2.]) -3×3 SymTridiagonal{Float64, Vector{Float64}}: - 3.0 1.0 ⋅ - 1.0 4.0 2.0 - ⋅ 2.0 5.0 - -julia> ldltS = ldlt(S); - -julia> b = [6., 7., 8.]; - -julia> ldltS \\ b -3-element Vector{Float64}: - 1.7906976744186047 - 0.627906976744186 - 1.3488372093023255 - -julia> S \\ b -3-element Vector{Float64}: - 1.7906976744186047 - 0.627906976744186 - 1.3488372093023255 -``` -""" -function ldlt(M::SymTridiagonal{T}; shift::Number=false) where T - S = typeof((zero(T)+shift)/one(T)) - Mₛ = SymTridiagonal{S}(copymutable_oftype(M.dv, S), copymutable_oftype(M.ev, S)) - if !iszero(shift) - Mₛ.dv .+= shift - end - return ldlt!(Mₛ) -end - -factorize(S::SymTridiagonal) = ldlt(S) - -function ldiv!(S::LDLt{<:Any,<:SymTridiagonal}, B::AbstractVecOrMat) - require_one_based_indexing(B) - n, nrhs = size(B, 1), size(B, 2) - if size(S,1) != n - throw(DimensionMismatch(lazy"Matrix has dimensions $(size(S)) but right hand side has first dimension $n")) - end - d = S.data.dv - l = S.data.ev - @inbounds begin - for i = 2:n - li1 = l[i-1] - @simd for j = 1:nrhs - B[i,j] -= li1*B[i-1,j] - end - end - dn = d[n] - @simd for j = 1:nrhs - B[n,j] /= dn - end - for i = n-1:-1:1 - di = d[i] - li = l[i] - @simd for j = 1:nrhs - B[i,j] /= di - B[i,j] -= li*B[i+1,j] - end - end - end - return B -end - -rdiv!(B::AbstractVecOrMat, S::LDLt{<:Any,<:SymTridiagonal}) = - transpose(ldiv!(S, transpose(B))) - -function logabsdet(F::LDLt{<:Any,<:SymTridiagonal}) - it = (F.data[i,i] for i in 1:size(F, 1)) - return sum(log∘abs, it), prod(sign, it) -end - -# Conversion methods -function SymTridiagonal(F::LDLt{<:Any, <:SymTridiagonal}) - e = copy(F.data.ev) - d = copy(F.data.dv) - e .*= d[1:end-1] - d[2:end] += e .* F.data.ev - SymTridiagonal(d, e) -end -AbstractMatrix(F::LDLt) = SymTridiagonal(F) -AbstractArray(F::LDLt) = AbstractMatrix(F) -Matrix(F::LDLt) = Array(AbstractArray(F)) -Array(F::LDLt) = Matrix(F) diff --git a/stdlib/LinearAlgebra/src/lq.jl b/stdlib/LinearAlgebra/src/lq.jl deleted file mode 100644 index 07d918c4374a5..0000000000000 --- a/stdlib/LinearAlgebra/src/lq.jl +++ /dev/null @@ -1,203 +0,0 @@ -# This file is a part of Julia. License is MIT: https://julialang.org/license - -# LQ Factorizations -""" - LQ <: Factorization - -Matrix factorization type of the `LQ` factorization of a matrix `A`. The `LQ` -decomposition is the [`QR`](@ref) decomposition of `transpose(A)`. This is the return -type of [`lq`](@ref), the corresponding matrix factorization function. - -If `S::LQ` is the factorization object, the lower triangular component can be -obtained via `S.L`, and the orthogonal/unitary component via `S.Q`, such that -`A ≈ S.L*S.Q`. - -Iterating the decomposition produces the components `S.L` and `S.Q`. - -# Examples -```jldoctest -julia> A = [5. 7.; -2. -4.] -2×2 Matrix{Float64}: - 5.0 7.0 - -2.0 -4.0 - -julia> S = lq(A) -LQ{Float64, Matrix{Float64}, Vector{Float64}} -L factor: -2×2 Matrix{Float64}: - -8.60233 0.0 - 4.41741 -0.697486 -Q factor: 2×2 LinearAlgebra.LQPackedQ{Float64, Matrix{Float64}, Vector{Float64}} - -julia> S.L * S.Q -2×2 Matrix{Float64}: - 5.0 7.0 - -2.0 -4.0 - -julia> l, q = S; # destructuring via iteration - -julia> l == S.L && q == S.Q -true -``` -""" -struct LQ{T,S<:AbstractMatrix{T},C<:AbstractVector{T}} <: Factorization{T} - factors::S - τ::C - - function LQ{T,S,C}(factors, τ) where {T,S<:AbstractMatrix{T},C<:AbstractVector{T}} - require_one_based_indexing(factors) - new{T,S,C}(factors, τ) - end -end -LQ(factors::AbstractMatrix{T}, τ::AbstractVector{T}) where {T} = - LQ{T,typeof(factors),typeof(τ)}(factors, τ) -LQ{T}(factors::AbstractMatrix, τ::AbstractVector) where {T} = - LQ(convert(AbstractMatrix{T}, factors), convert(AbstractVector{T}, τ)) -# backwards-compatible constructors (remove with Julia 2.0) -@deprecate(LQ{T,S}(factors::AbstractMatrix{T}, τ::AbstractVector{T}) where {T,S}, - LQ{T,S,typeof(τ)}(factors, τ), false) - -# iteration for destructuring into components -Base.iterate(S::LQ) = (S.L, Val(:Q)) -Base.iterate(S::LQ, ::Val{:Q}) = (S.Q, Val(:done)) -Base.iterate(S::LQ, ::Val{:done}) = nothing - -""" - lq!(A) -> LQ - -Compute the [`LQ`](@ref) factorization of `A`, using the input -matrix as a workspace. See also [`lq`](@ref). -""" -lq!(A::StridedMatrix{<:BlasFloat}) = LQ(LAPACK.gelqf!(A)...) - -""" - lq(A) -> S::LQ - -Compute the LQ decomposition of `A`. The decomposition's lower triangular -component can be obtained from the [`LQ`](@ref) object `S` via `S.L`, and the -orthogonal/unitary component via `S.Q`, such that `A ≈ S.L*S.Q`. - -Iterating the decomposition produces the components `S.L` and `S.Q`. - -The LQ decomposition is the QR decomposition of `transpose(A)`, and it is useful -in order to compute the minimum-norm solution `lq(A) \\ b` to an underdetermined -system of equations (`A` has more columns than rows, but has full row rank). - -# Examples -```jldoctest -julia> A = [5. 7.; -2. -4.] -2×2 Matrix{Float64}: - 5.0 7.0 - -2.0 -4.0 - -julia> S = lq(A) -LQ{Float64, Matrix{Float64}, Vector{Float64}} -L factor: -2×2 Matrix{Float64}: - -8.60233 0.0 - 4.41741 -0.697486 -Q factor: 2×2 LinearAlgebra.LQPackedQ{Float64, Matrix{Float64}, Vector{Float64}} - -julia> S.L * S.Q -2×2 Matrix{Float64}: - 5.0 7.0 - -2.0 -4.0 - -julia> l, q = S; # destructuring via iteration - -julia> l == S.L && q == S.Q -true -``` -""" -lq(A::AbstractMatrix{T}) where {T} = lq!(copy_similar(A, lq_eltype(T))) -lq(x::Number) = lq!(fill(convert(lq_eltype(typeof(x)), x), 1, 1)) - -lq_eltype(::Type{T}) where {T} = typeof(zero(T) / sqrt(abs2(one(T)))) - -copy(A::LQ) = LQ(copy(A.factors), copy(A.τ)) - -LQ{T}(A::LQ) where {T} = LQ(convert(AbstractMatrix{T}, A.factors), convert(Vector{T}, A.τ)) -Factorization{T}(A::LQ) where {T} = LQ{T}(A) - -AbstractMatrix(A::LQ) = A.L*A.Q -AbstractArray(A::LQ) = AbstractMatrix(A) -Matrix(A::LQ) = Array(AbstractArray(A)) -Array(A::LQ) = Matrix(A) - -transpose(F::LQ{<:Real}) = F' -transpose(::LQ) = - throw(ArgumentError("transpose of LQ decomposition is not supported, consider using adjoint")) - -Base.copy(F::AdjointFactorization{T,<:LQ{T}}) where {T} = - QR{T,typeof(F.parent.factors),typeof(F.parent.τ)}(copy(adjoint(F.parent.factors)), copy(F.parent.τ)) - -function getproperty(F::LQ, d::Symbol) - m, n = size(F) - if d === :L - return tril!(getfield(F, :factors)[1:m, 1:min(m,n)]) - elseif d === :Q - return LQPackedQ(getfield(F, :factors), getfield(F, :τ)) - else - return getfield(F, d) - end -end - -Base.propertynames(F::LQ, private::Bool=false) = - (:L, :Q, (private ? fieldnames(typeof(F)) : ())...) - -# getindex(A::LQPackedQ, i::Integer, j::Integer) = -# lmul!(A, setindex!(zeros(eltype(A), size(A, 2)), 1, j))[i] - -function show(io::IO, mime::MIME{Symbol("text/plain")}, F::LQ) - summary(io, F); println(io) - println(io, "L factor:") - show(io, mime, F.L) - print(io, "\nQ factor: ") - show(io, mime, F.Q) -end - -size(F::LQ, dim::Integer) = size(getfield(F, :factors), dim) -size(F::LQ) = size(getfield(F, :factors)) - -## Multiplication by LQ -function lmul!(A::LQ, B::AbstractVecOrMat) - lmul!(LowerTriangular(A.L), view(lmul!(A.Q, B), 1:size(A,1), axes(B,2))) - return B -end -function *(A::LQ{TA}, B::AbstractVecOrMat{TB}) where {TA,TB} - TAB = promote_type(TA, TB) - _cut_B(lmul!(convert(Factorization{TAB}, A), copy_similar(B, TAB)), 1:size(A,1)) -end - -# With a real lhs and complex rhs with the same precision, we can reinterpret -# the complex rhs as a real rhs with twice the number of columns -function (\)(F::LQ{T}, B::VecOrMat{Complex{T}}) where T<:BlasReal - require_one_based_indexing(B) - X = zeros(T, size(F,2), 2*size(B,2)) - X[1:size(B,1), 1:size(B,2)] .= real.(B) - X[1:size(B,1), size(B,2)+1:size(X,2)] .= imag.(B) - ldiv!(F, X) - return reshape(copy(reinterpret(Complex{T}, copy(transpose(reshape(X, div(length(X), 2), 2))))), - isa(B, AbstractVector) ? (size(F,2),) : (size(F,2), size(B,2))) -end - - -function ldiv!(A::LQ, B::AbstractVecOrMat) - require_one_based_indexing(B) - m, n = size(A) - m ≤ n || throw(DimensionMismatch("LQ solver does not support overdetermined systems (more rows than columns)")) - - ldiv!(LowerTriangular(A.L), view(B, 1:size(A,1), axes(B,2))) - return lmul!(adjoint(A.Q), B) -end - -function ldiv!(Fadj::AdjointFactorization{<:Any,<:LQ}, B::AbstractVecOrMat) - require_one_based_indexing(B) - m, n = size(Fadj) - m >= n || throw(DimensionMismatch("solver does not support underdetermined systems (more columns than rows)")) - - F = parent(Fadj) - lmul!(F.Q, B) - ldiv!(UpperTriangular(adjoint(F.L)), view(B, 1:size(F,1), axes(B,2))) - return B -end diff --git a/stdlib/LinearAlgebra/src/lu.jl b/stdlib/LinearAlgebra/src/lu.jl deleted file mode 100644 index 0837ac08e74ea..0000000000000 --- a/stdlib/LinearAlgebra/src/lu.jl +++ /dev/null @@ -1,834 +0,0 @@ -# This file is a part of Julia. License is MIT: https://julialang.org/license - -#################### -# LU Factorization # -#################### -""" - LU <: Factorization - -Matrix factorization type of the `LU` factorization of a square matrix `A`. This -is the return type of [`lu`](@ref), the corresponding matrix factorization function. - -The individual components of the factorization `F::LU` can be accessed via [`getproperty`](@ref): - -| Component | Description | -|:----------|:-----------------------------------------| -| `F.L` | `L` (unit lower triangular) part of `LU` | -| `F.U` | `U` (upper triangular) part of `LU` | -| `F.p` | (right) permutation `Vector` | -| `F.P` | (right) permutation `Matrix` | - -Iterating the factorization produces the components `F.L`, `F.U`, and `F.p`. - -# Examples - -```jldoctest -julia> A = [4 3; 6 3] -2×2 Matrix{Int64}: - 4 3 - 6 3 - -julia> F = lu(A) -LU{Float64, Matrix{Float64}, Vector{Int64}} -L factor: -2×2 Matrix{Float64}: - 1.0 0.0 - 0.666667 1.0 -U factor: -2×2 Matrix{Float64}: - 6.0 3.0 - 0.0 1.0 - -julia> F.L * F.U == A[F.p, :] -true - -julia> l, u, p = lu(A); # destructuring via iteration - -julia> l == F.L && u == F.U && p == F.p -true -``` -""" -struct LU{T,S<:AbstractMatrix{T},P<:AbstractVector{<:Integer}} <: Factorization{T} - factors::S - ipiv::P - info::BlasInt # Can be negative to indicate failed unpivoted factorization - - function LU{T,S,P}(factors, ipiv, info) where {T, S<:AbstractMatrix{T}, P<:AbstractVector{<:Integer}} - require_one_based_indexing(factors) - new{T,S,P}(factors, ipiv, info) - end -end -LU(factors::AbstractMatrix{T}, ipiv::AbstractVector{<:Integer}, info::BlasInt) where {T} = - LU{T,typeof(factors),typeof(ipiv)}(factors, ipiv, info) -LU{T}(factors::AbstractMatrix, ipiv::AbstractVector{<:Integer}, info::Integer) where {T} = - LU(convert(AbstractMatrix{T}, factors), ipiv, BlasInt(info)) -# backwards-compatible constructors (remove with Julia 2.0) -@deprecate(LU{T,S}(factors::AbstractMatrix{T}, ipiv::AbstractVector{<:Integer}, - info::BlasInt) where {T,S}, - LU{T,S,typeof(ipiv)}(factors, ipiv, info), false) - -# iteration for destructuring into components -Base.iterate(S::LU) = (S.L, Val(:U)) -Base.iterate(S::LU, ::Val{:U}) = (S.U, Val(:p)) -Base.iterate(S::LU, ::Val{:p}) = (S.p, Val(:done)) -Base.iterate(S::LU, ::Val{:done}) = nothing - -# LU prefers transpose over adjoint in the real case, override the generic fallback -adjoint(F::LU{<:Real}) = TransposeFactorization(F) -transpose(F::LU{<:Real}) = TransposeFactorization(F) - -function _check_lu_success(info, allowsingular) - if info < 0 # zero pivot error from unpivoted LU - checknozeropivot(-info) - else - allowsingular || checknonsingular(info) - end -end - -# the following method is meant to catch calls to lu!(A::LAPACKArray) without a pivoting strategy -lu!(A::StridedMatrix{<:BlasFloat}; check::Bool = true, allowsingular::Bool = false) = lu!(A, RowMaximum(); check, allowsingular) -function lu!(A::StridedMatrix{T}, ::RowMaximum; check::Bool = true, allowsingular::Bool = false) where {T<:BlasFloat} - lpt = LAPACK.getrf!(A; check) - check && _check_lu_success(lpt[3], allowsingular) - return LU{T,typeof(lpt[1]),typeof(lpt[2])}(lpt[1], lpt[2], lpt[3]) -end -function lu!(A::HermOrSym{T}, pivot::Union{RowMaximum,NoPivot,RowNonZero} = lupivottype(T); - check::Bool = true, allowsingular::Bool = false) where {T} - copytri!(A.data, A.uplo, isa(A, Hermitian)) - @inbounds if isa(A, Hermitian) # realify diagonal - for i in axes(A, 1) - A.data[i,i] = A[i,i] - end - end - lu!(A.data, pivot; check, allowsingular) -end -# for backward compatibility -# TODO: remove towards Julia v2 -@deprecate lu!(A::Union{StridedMatrix,HermOrSym,Tridiagonal}, ::Val{true}; check::Bool = true) lu!(A, RowMaximum(); check=check) -@deprecate lu!(A::Union{StridedMatrix,HermOrSym,Tridiagonal}, ::Val{false}; check::Bool = true) lu!(A, NoPivot(); check=check) - -""" - lu!(A, pivot = RowMaximum(); check = true, allowsingular = false) -> LU - -`lu!` is the same as [`lu`](@ref), but saves space by overwriting the -input `A`, instead of creating a copy. An [`InexactError`](@ref) -exception is thrown if the factorization produces a number not representable by the -element type of `A`, e.g. for integer types. - -!!! compat "Julia 1.11" - The `allowsingular` keyword argument was added in Julia 1.11. - -# Examples -```jldoctest -julia> A = [4. 3.; 6. 3.] -2×2 Matrix{Float64}: - 4.0 3.0 - 6.0 3.0 - -julia> F = lu!(A) -LU{Float64, Matrix{Float64}, Vector{Int64}} -L factor: -2×2 Matrix{Float64}: - 1.0 0.0 - 0.666667 1.0 -U factor: -2×2 Matrix{Float64}: - 6.0 3.0 - 0.0 1.0 - -julia> iA = [4 3; 6 3] -2×2 Matrix{Int64}: - 4 3 - 6 3 - -julia> lu!(iA) -ERROR: InexactError: Int64(0.6666666666666666) -Stacktrace: -[...] -``` -""" -lu!(A::AbstractMatrix, pivot::Union{RowMaximum,NoPivot,RowNonZero} = lupivottype(eltype(A)); - check::Bool = true, allowsingular::Bool = false) = generic_lufact!(A, pivot; check, allowsingular) -function generic_lufact!(A::AbstractMatrix{T}, pivot::Union{RowMaximum,NoPivot,RowNonZero} = lupivottype(T); - check::Bool = true, allowsingular::Bool = false) where {T} - check && LAPACK.chkfinite(A) - # Extract values - m, n = size(A) - minmn = min(m,n) - - # Initialize variables - info = 0 - ipiv = Vector{BlasInt}(undef, minmn) - @inbounds begin - for k = 1:minmn - # find index max - kp = k - if pivot === RowMaximum() && k < m - amax = abs(A[k, k]) - for i = k+1:m - absi = abs(A[i,k]) - if absi > amax - kp = i - amax = absi - end - end - elseif pivot === RowNonZero() - for i = k:m - if !iszero(A[i,k]) - kp = i - break - end - end - end - ipiv[k] = kp - if !iszero(A[kp,k]) - if k != kp - # Interchange - for i = 1:n - tmp = A[k,i] - A[k,i] = A[kp,i] - A[kp,i] = tmp - end - end - # Scale first column - Akkinv = inv(A[k,k]) - for i = k+1:m - A[i,k] *= Akkinv - end - elseif info == 0 - info = k - end - # Update the rest - for j = k+1:n - for i = k+1:m - A[i,j] -= A[i,k]*A[k,j] - end - end - end - end - if pivot === NoPivot() - # Use a negative value to distinguish a failed factorization (zero in pivot - # position during unpivoted LU) from a valid but rank-deficient factorization - info = -info - end - check && _check_lu_success(info, allowsingular) - return LU{T,typeof(A),typeof(ipiv)}(A, ipiv, convert(BlasInt, info)) -end - -function lutype(T::Type) - # In generic_lufact!, the elements of the lower part of the matrix are - # obtained using the division of two matrix elements. Hence their type can - # be different (e.g. the division of two types with the same unit is a type - # without unit). - # The elements of the upper part are obtained by U - U * L - # where U is an upper part element and L is a lower part element. - # Therefore, the types LT, UT should be invariant under the map: - # (LT, UT) -> begin - # L = oneunit(UT) / oneunit(UT) - # U = oneunit(UT) - oneunit(UT) * L - # typeof(L), typeof(U) - # end - # The following should handle most cases - UT = typeof(oneunit(T) - oneunit(T) * (oneunit(T) / (oneunit(T) + zero(T)))) - LT = typeof(oneunit(UT) / oneunit(UT)) - S = promote_type(T, LT, UT) -end - -lupivottype(::Type{T}) where {T} = RowMaximum() - -# for all other types we must promote to a type which is stable under division -""" - lu(A, pivot = RowMaximum(); check = true, allowsingular = false) -> F::LU - -Compute the LU factorization of `A`. - -When `check = true`, an error is thrown if the decomposition fails. -When `check = false`, responsibility for checking the decomposition's -validity (via [`issuccess`](@ref)) lies with the user. - -By default, with `check = true`, an error is also thrown when the decomposition -produces valid factors, but the upper-triangular factor `U` is rank-deficient. This may be changed by -passing `allowsingular = true`. - -In most cases, if `A` is a subtype `S` of `AbstractMatrix{T}` with an element -type `T` supporting `+`, `-`, `*` and `/`, the return type is `LU{T,S{T}}`. - -In general, LU factorization involves a permutation of the rows of the matrix -(corresponding to the `F.p` output described below), known as "pivoting" (because it -corresponds to choosing which row contains the "pivot", the diagonal entry of `F.U`). -One of the following pivoting strategies can be selected via the optional `pivot` argument: - -* `RowMaximum()` (default): the standard pivoting strategy; the pivot corresponds - to the element of maximum absolute value among the remaining, to be factorized rows. - This pivoting strategy requires the element type to also support [`abs`](@ref) and - [`<`](@ref). (This is generally the only numerically stable option for floating-point - matrices.) -* `RowNonZero()`: the pivot corresponds to the first non-zero element among the remaining, - to be factorized rows. (This corresponds to the typical choice in hand calculations, and - is also useful for more general algebraic number types that support [`iszero`](@ref) but - not `abs` or `<`.) -* `NoPivot()`: pivoting turned off (will fail if a zero entry is encountered in - a pivot position, even when `allowsingular = true`). - -The individual components of the factorization `F` can be accessed via [`getproperty`](@ref): - -| Component | Description | -|:----------|:------------------------------------| -| `F.L` | `L` (lower triangular) part of `LU` | -| `F.U` | `U` (upper triangular) part of `LU` | -| `F.p` | (right) permutation `Vector` | -| `F.P` | (right) permutation `Matrix` | - -Iterating the factorization produces the components `F.L`, `F.U`, and `F.p`. - -The relationship between `F` and `A` is - -`F.L*F.U == A[F.p, :]` - -`F` further supports the following functions: - -| Supported function | `LU` | `LU{T,Tridiagonal{T}}` | -|:---------------------------------|:-----|:-----------------------| -| [`/`](@ref) | ✓ | | -| [`\\`](@ref) | ✓ | ✓ | -| [`inv`](@ref) | ✓ | ✓ | -| [`det`](@ref) | ✓ | ✓ | -| [`logdet`](@ref) | ✓ | ✓ | -| [`logabsdet`](@ref) | ✓ | ✓ | -| [`size`](@ref) | ✓ | ✓ | - -!!! compat "Julia 1.11" - The `allowsingular` keyword argument was added in Julia 1.11. - -# Examples -```jldoctest -julia> A = [4 3; 6 3] -2×2 Matrix{Int64}: - 4 3 - 6 3 - -julia> F = lu(A) -LU{Float64, Matrix{Float64}, Vector{Int64}} -L factor: -2×2 Matrix{Float64}: - 1.0 0.0 - 0.666667 1.0 -U factor: -2×2 Matrix{Float64}: - 6.0 3.0 - 0.0 1.0 - -julia> F.L * F.U == A[F.p, :] -true - -julia> l, u, p = lu(A); # destructuring via iteration - -julia> l == F.L && u == F.U && p == F.p -true - -julia> lu([1 2; 1 2], allowsingular = true) -LU{Float64, Matrix{Float64}, Vector{Int64}} -L factor: -2×2 Matrix{Float64}: - 1.0 0.0 - 1.0 1.0 -U factor (rank-deficient): -2×2 Matrix{Float64}: - 1.0 2.0 - 0.0 0.0 -``` -""" -lu(A::AbstractMatrix{T}, args...; kwargs...) where {T} = - _lu(_lucopy(A, lutype(T)), args...; kwargs...) -# TODO: remove for Julia v2.0 -@deprecate lu(A::AbstractMatrix, ::Val{true}; check::Bool = true) lu(A, RowMaximum(); check=check) -@deprecate lu(A::AbstractMatrix, ::Val{false}; check::Bool = true) lu(A, NoPivot(); check=check) -# allow packages like SparseArrays.jl to interfere here and call their own `lu` -_lu(A::AbstractMatrix, args...; kwargs...) = lu!(A, args...; kwargs...) - -_lucopy(A::AbstractMatrix, T) = copy_similar(A, T) -_lucopy(A::HermOrSym, T) = copymutable_oftype(A, T) -_lucopy(A::Tridiagonal, T) = copymutable_oftype(A, T) - -lu(S::LU) = S -function lu(x::Number; check::Bool=true, allowsingular::Bool=false) - info = x == 0 ? one(BlasInt) : zero(BlasInt) - check && _check_lu_success(info, allowsingular) - return LU(fill(x, 1, 1), BlasInt[1], info) -end - -function LU{T}(F::LU) where T - M = convert(AbstractMatrix{T}, F.factors) - LU{T,typeof(M),typeof(F.ipiv)}(M, F.ipiv, F.info) -end -LU{T,S,P}(F::LU) where {T,S,P} = LU{T,S,P}(convert(S, F.factors), convert(P, F.ipiv), F.info) -Factorization{T}(F::LU{T}) where {T} = F -Factorization{T}(F::LU) where {T} = LU{T}(F) - -copy(A::LU{T,S,P}) where {T,S,P} = LU{T,S,P}(copy(A.factors), copy(A.ipiv), A.info) - -size(A::LU) = size(getfield(A, :factors)) -size(A::LU, i::Integer) = size(getfield(A, :factors), i) - -function ipiv2perm(v::AbstractVector{T}, maxi::Integer) where T - require_one_based_indexing(v) - p = T[1:maxi;] - @inbounds for i in 1:length(v) - p[i], p[v[i]] = p[v[i]], p[i] - end - return p -end - -function getproperty(F::LU{T}, d::Symbol) where T - m, n = size(F) - if d === :L - L = tril!(getfield(F, :factors)[1:m, 1:min(m,n)]) - for i = 1:min(m,n); L[i,i] = one(T); end - return L - elseif d === :U - return triu!(getfield(F, :factors)[1:min(m,n), 1:n]) - elseif d === :p - return ipiv2perm(getfield(F, :ipiv), m) - elseif d === :P - return Matrix{T}(I, m, m)[:,invperm(F.p)] - else - getfield(F, d) - end -end - -Base.propertynames(F::LU, private::Bool=false) = - (:L, :U, :p, :P, (private ? fieldnames(typeof(F)) : ())...) - - -""" - issuccess(F::LU; allowsingular = false) - -Test that the LU factorization of a matrix succeeded. By default a -factorization that produces a valid but rank-deficient U factor is considered a -failure. This can be changed by passing `allowsingular = true`. - -!!! compat "Julia 1.11" - The `allowsingular` keyword argument was added in Julia 1.11. - -# Examples - -```jldoctest -julia> F = lu([1 2; 1 2], check = false); - -julia> issuccess(F) -false - -julia> issuccess(F, allowsingular = true) -true -``` -""" -function issuccess(F::LU; allowsingular::Bool=false) - # A negative info is always a failure, a positive info indicates a valid but rank-deficient U factor - F.info == 0 || (allowsingular && F.info > 0) -end - -function show(io::IO, mime::MIME{Symbol("text/plain")}, F::LU) - if F.info < 0 - print(io, "Failed factorization of type $(typeof(F))") - else - summary(io, F); println(io) - println(io, "L factor:") - show(io, mime, F.L) - if F.info > 0 - println(io, "\nU factor (rank-deficient):") - else - println(io, "\nU factor:") - end - show(io, mime, F.U) - end -end - -_apply_ipiv_rows!(A::LU, B::AbstractVecOrMat) = _ipiv_rows!(A, 1 : length(A.ipiv), B) -_apply_inverse_ipiv_rows!(A::LU, B::AbstractVecOrMat) = _ipiv_rows!(A, length(A.ipiv) : -1 : 1, B) - -function _ipiv_rows!(A::LU, order::OrdinalRange, B::AbstractVecOrMat) - for i = order - if i != A.ipiv[i] - _swap_rows!(B, i, A.ipiv[i]) - end - end - B -end - -function _swap_rows!(B::AbstractVector, i::Integer, j::Integer) - B[i], B[j] = B[j], B[i] - B -end - -function _swap_rows!(B::AbstractMatrix, i::Integer, j::Integer) - for col = 1 : size(B, 2) - B[i,col], B[j,col] = B[j,col], B[i,col] - end - B -end - -_apply_ipiv_cols!(A::LU, B::AbstractVecOrMat) = _ipiv_cols!(A, 1 : length(A.ipiv), B) -_apply_inverse_ipiv_cols!(A::LU, B::AbstractVecOrMat) = _ipiv_cols!(A, length(A.ipiv) : -1 : 1, B) - -function _ipiv_cols!(A::LU, order::OrdinalRange, B::AbstractVecOrMat) - for i = order - if i != A.ipiv[i] - _swap_cols!(B, i, A.ipiv[i]) - end - end - B -end - -function _swap_cols!(B::AbstractVector, i::Integer, j::Integer) - _swap_rows!(B, i, j) -end - -function _swap_cols!(B::AbstractMatrix, i::Integer, j::Integer) - for row = 1 : size(B, 1) - B[row,i], B[row,j] = B[row,j], B[row,i] - end - B -end - -function rdiv!(A::AbstractVecOrMat, B::LU) - rdiv!(rdiv!(A, UpperTriangular(B.factors)), UnitLowerTriangular(B.factors)) - _apply_inverse_ipiv_cols!(B, A) -end - -ldiv!(A::LU{T,<:StridedMatrix}, B::StridedVecOrMat{T}) where {T<:BlasFloat} = - LAPACK.getrs!('N', A.factors, A.ipiv, B) - -function ldiv!(A::LU, B::AbstractVecOrMat) - _apply_ipiv_rows!(A, B) - ldiv!(UpperTriangular(A.factors), ldiv!(UnitLowerTriangular(A.factors), B)) -end - -ldiv!(transA::TransposeFactorization{T,<:LU{T,<:StridedMatrix}}, B::StridedVecOrMat{T}) where {T<:BlasFloat} = - (A = transA.parent; LAPACK.getrs!('T', A.factors, A.ipiv, B)) - -function ldiv!(transA::TransposeFactorization{<:Any,<:LU}, B::AbstractVecOrMat) - A = transA.parent - ldiv!(transpose(UnitLowerTriangular(A.factors)), ldiv!(transpose(UpperTriangular(A.factors)), B)) - _apply_inverse_ipiv_rows!(A, B) -end - -ldiv!(adjA::AdjointFactorization{T,<:LU{T,<:StridedMatrix}}, B::StridedVecOrMat{T}) where {T<:BlasComplex} = - (A = adjA.parent; LAPACK.getrs!('C', A.factors, A.ipiv, B)) - -function ldiv!(adjA::AdjointFactorization{<:Any,<:LU}, B::AbstractVecOrMat) - A = adjA.parent - ldiv!(adjoint(UnitLowerTriangular(A.factors)), ldiv!(adjoint(UpperTriangular(A.factors)), B)) - _apply_inverse_ipiv_rows!(A, B) -end - -(\)(A::AdjointFactorization{T,<:LU{T,<:StridedMatrix}}, B::Adjoint{T,<:StridedVecOrMat{T}}) where {T<:BlasComplex} = - LAPACK.getrs!('C', A.parent.factors, A.parent.ipiv, copy(B)) -(\)(A::TransposeFactorization{T,<:LU{T,<:StridedMatrix}}, B::Transpose{T,<:StridedVecOrMat{T}}) where {T<:BlasFloat} = - LAPACK.getrs!('T', A.parent.factors, A.parent.ipiv, copy(B)) - -function det(F::LU{T}) where T - n = checksquare(F) - issuccess(F) || return zero(T) - P = one(T) - c = 0 - @inbounds for i = 1:n - P *= F.factors[i,i] - if F.ipiv[i] != i - c += 1 - end - end - s = (isodd(c) ? -one(T) : one(T)) - return P * s -end - -function logabsdet(F::LU{T}) where T # return log(abs(det)) and sign(det) - n = checksquare(F) - issuccess(F) || return log(zero(real(T))), log(one(T)) - c = 0 - P = one(T) - abs_det = zero(real(T)) - @inbounds for i = 1:n - dg_ii = F.factors[i,i] - P *= sign(dg_ii) - if F.ipiv[i] != i - c += 1 - end - abs_det += log(abs(dg_ii)) - end - s = ifelse(isodd(c), -one(real(T)), one(real(T))) * P - abs_det, s -end - -inv!(A::LU{<:BlasFloat,<:StridedMatrix}) = - LAPACK.getri!(A.factors, A.ipiv) -inv!(A::LU{T,<:StridedMatrix}) where {T} = - ldiv!(A.factors, copy(A), Matrix{T}(I, size(A, 1), size(A, 1))) -inv(A::LU{<:BlasFloat,<:StridedMatrix}) = inv!(copy(A)) - -# Tridiagonal -function lu!(A::Tridiagonal{T,V}, pivot::Union{RowMaximum,NoPivot} = RowMaximum(); - check::Bool = true, allowsingular::Bool = false) where {T,V} - n = size(A, 1) - has_du2_defined = isdefined(A, :du2) && length(A.du2) == max(0, n-2) - if has_du2_defined - du2 = A.du2::V - else - du2 = similar(A.d, max(0, n-2))::V - end - _lu_tridiag!(A.dl, A.d, A.du, du2, Vector{BlasInt}(undef, n), pivot, check, allowsingular) -end -function lu!(F::LU{<:Any,<:Tridiagonal}, A::Tridiagonal, pivot::Union{RowMaximum,NoPivot} = RowMaximum(); - check::Bool = true, allowsingular::Bool = false) - B = F.factors - size(B) == size(A) || throw(DimensionMismatch()) - copyto!(B, A) - _lu_tridiag!(B.dl, B.d, B.du, B.du2, F.ipiv, pivot, check, allowsingular) -end -# See dgttrf.f -@inline function _lu_tridiag!(dl, d, du, du2, ipiv, pivot, check, allowsingular) - T = eltype(d) - V = typeof(d) - - # Extract values - n = length(d) - - # Initialize variables - info = 0 - fill!(du2, 0) - - @inbounds begin - for i = 1:n - ipiv[i] = i - end - for i = 1:n-2 - # pivot or not? - if pivot === NoPivot() || abs(d[i]) >= abs(dl[i]) - # No interchange - if d[i] != 0 - fact = dl[i]/d[i] - dl[i] = fact - d[i+1] -= fact*du[i] - du2[i] = 0 - end - else - # Interchange - fact = d[i]/dl[i] - d[i] = dl[i] - dl[i] = fact - tmp = du[i] - du[i] = d[i+1] - d[i+1] = tmp - fact*d[i+1] - du2[i] = du[i+1] - du[i+1] = -fact*du[i+1] - ipiv[i] = i+1 - end - end - if n > 1 - i = n-1 - if pivot === NoPivot() || abs(d[i]) >= abs(dl[i]) - if d[i] != 0 - fact = dl[i]/d[i] - dl[i] = fact - d[i+1] -= fact*du[i] - end - else - fact = d[i]/dl[i] - d[i] = dl[i] - dl[i] = fact - tmp = du[i] - du[i] = d[i+1] - d[i+1] = tmp - fact*d[i+1] - ipiv[i] = i+1 - end - end - # check for a zero on the diagonal of U - for i = 1:n - if d[i] == 0 - info = i - break - end - end - end - check && _check_lu_success(info, allowsingular) - return LU{T,Tridiagonal{T,V},typeof(ipiv)}(Tridiagonal{T,V}(dl, d, du, du2), ipiv, convert(BlasInt, info)) -end - -factorize(A::Tridiagonal) = lu(A) - -function getproperty(F::LU{T,Tridiagonal{T,V}}, d::Symbol) where {T,V} - m, n = size(F) - if d === :L - dl = getfield(getfield(F, :factors), :dl) - L = Array(Bidiagonal(fill!(similar(dl, n), one(T)), dl, d)) - for i = 2:n - tmp = L[getfield(F, :ipiv)[i], 1:i - 1] - L[getfield(F, :ipiv)[i], 1:i - 1] = L[i, 1:i - 1] - L[i, 1:i - 1] = tmp - end - return L - elseif d === :U - U = Array(Bidiagonal(getfield(getfield(F, :factors), :d), getfield(getfield(F, :factors), :du), d)) - for i = 1:n - 2 - U[i,i + 2] = getfield(getfield(F, :factors), :du2)[i] - end - return U - elseif d === :p - return ipiv2perm(getfield(F, :ipiv), m) - elseif d === :P - return Matrix{T}(I, m, m)[:,invperm(F.p)] - end - return getfield(F, d) -end - -# See dgtts2.f -function ldiv!(A::LU{T,Tridiagonal{T,V}}, B::AbstractVecOrMat) where {T,V} - require_one_based_indexing(B) - n = size(A,1) - if n != size(B,1) - throw(DimensionMismatch(lazy"matrix has dimensions ($n,$n) but right hand side has $(size(B,1)) rows")) - end - nrhs = size(B,2) - dl = A.factors.dl - d = A.factors.d - du = A.factors.du - du2 = A.factors.du2 - ipiv = A.ipiv - @inbounds begin - for j = 1:nrhs - for i = 1:n-1 - ip = ipiv[i] - tmp = B[i+1-ip+i,j] - dl[i]*B[ip,j] - B[i,j] = B[ip,j] - B[i+1,j] = tmp - end - B[n,j] /= d[n] - if n > 1 - B[n-1,j] = (B[n-1,j] - du[n-1]*B[n,j])/d[n-1] - end - for i = n-2:-1:1 - B[i,j] = (B[i,j] - du[i]*B[i+1,j] - du2[i]*B[i+2,j])/d[i] - end - end - end - return B -end - -function ldiv!(transA::TransposeFactorization{<:Any,<:LU{T,Tridiagonal{T,V}}}, B::AbstractVecOrMat) where {T,V} - require_one_based_indexing(B) - A = transA.parent - n = size(A,1) - if n != size(B,1) - throw(DimensionMismatch(lazy"matrix has dimensions ($n,$n) but right hand side has $(size(B,1)) rows")) - end - nrhs = size(B,2) - dl = A.factors.dl - d = A.factors.d - du = A.factors.du - du2 = A.factors.du2 - ipiv = A.ipiv - @inbounds begin - for j = 1:nrhs - B[1,j] /= d[1] - if n > 1 - B[2,j] = (B[2,j] - du[1]*B[1,j])/d[2] - end - for i = 3:n - B[i,j] = (B[i,j] - du[i-1]*B[i-1,j] - du2[i-2]*B[i-2,j])/d[i] - end - for i = n-1:-1:1 - if ipiv[i] == i - B[i,j] = B[i,j] - dl[i]*B[i+1,j] - else - tmp = B[i+1,j] - B[i+1,j] = B[i,j] - dl[i]*tmp - B[i,j] = tmp - end - end - end - end - return B -end - -# Ac_ldiv_B!(A::LU{T,Tridiagonal{T}}, B::AbstractVecOrMat) where {T<:Real} = At_ldiv_B!(A,B) -function ldiv!(adjA::AdjointFactorization{<:Any,<:LU{T,Tridiagonal{T,V}}}, B::AbstractVecOrMat) where {T,V} - require_one_based_indexing(B) - A = adjA.parent - n = size(A,1) - if n != size(B,1) - throw(DimensionMismatch(lazy"matrix has dimensions ($n,$n) but right hand side has $(size(B,1)) rows")) - end - nrhs = size(B,2) - dl = A.factors.dl - d = A.factors.d - du = A.factors.du - du2 = A.factors.du2 - ipiv = A.ipiv - @inbounds begin - for j = 1:nrhs - B[1,j] /= conj(d[1]) - if n > 1 - B[2,j] = (B[2,j] - conj(du[1])*B[1,j])/conj(d[2]) - end - for i = 3:n - B[i,j] = (B[i,j] - conj(du[i-1])*B[i-1,j] - conj(du2[i-2])*B[i-2,j])/conj(d[i]) - end - for i = n-1:-1:1 - if ipiv[i] == i - B[i,j] = B[i,j] - conj(dl[i])*B[i+1,j] - else - tmp = B[i+1,j] - B[i+1,j] = B[i,j] - conj(dl[i])*tmp - B[i,j] = tmp - end - end - end - end - return B -end - -rdiv!(B::AbstractMatrix, A::LU{T,Tridiagonal{T,V}}) where {T,V} = transpose(ldiv!(transpose(A), transpose(B))) - -# Conversions -AbstractMatrix(F::LU) = (F.L * F.U)[invperm(F.p),:] -AbstractArray(F::LU) = AbstractMatrix(F) -Matrix(F::LU) = Array(AbstractArray(F)) -Array(F::LU) = Matrix(F) - -function Tridiagonal(F::LU{T,Tridiagonal{T,V}}) where {T,V} - n = size(F, 1) - - dl = copy(F.factors.dl) - d = copy(F.factors.d) - du = copy(F.factors.du) - du2 = copy(F.factors.du2) - - for i = n - 1:-1:1 - li = dl[i] - dl[i] = li*d[i] - d[i + 1] += li*du[i] - if i < n - 1 - du[i + 1] += li*du2[i] - end - - if F.ipiv[i] != i - tmp = dl[i] - dl[i] = d[i] - d[i] = tmp - - tmp = d[i + 1] - d[i + 1] = du[i] - du[i] = tmp - - if i < n - 1 - tmp = du[i + 1] - du[i + 1] = du2[i] - du2[i] = tmp - end - end - end - return Tridiagonal(dl, d, du) -end -AbstractMatrix(F::LU{T,Tridiagonal{T,V}}) where {T,V} = Tridiagonal(F) -AbstractArray(F::LU{T,Tridiagonal{T,V}}) where {T,V} = AbstractMatrix(F) -Matrix(F::LU{T,Tridiagonal{T,V}}) where {T,V} = Array(AbstractArray(F)) -Array(F::LU{T,Tridiagonal{T,V}}) where {T,V} = Matrix(F) diff --git a/stdlib/LinearAlgebra/src/matmul.jl b/stdlib/LinearAlgebra/src/matmul.jl deleted file mode 100644 index e22b6dce4bb03..0000000000000 --- a/stdlib/LinearAlgebra/src/matmul.jl +++ /dev/null @@ -1,1339 +0,0 @@ -# This file is a part of Julia. License is MIT: https://julialang.org/license - -# matmul.jl: Everything to do with dense matrix multiplication - -# unused internal constant, here for legacy reasons -const tilebufsize = 10800 # Approximately 32k/3 - -# Matrix-matrix multiplication - -AdjOrTransStridedMat{T} = Union{Adjoint{<:Any, <:StridedMatrix{T}}, Transpose{<:Any, <:StridedMatrix{T}}} -StridedMaybeAdjOrTransMat{T} = Union{StridedMatrix{T}, Adjoint{<:Any, <:StridedMatrix{T}}, Transpose{<:Any, <:StridedMatrix{T}}} -StridedMaybeAdjOrTransVecOrMat{T} = Union{StridedVecOrMat{T}, AdjOrTrans{<:Any, <:StridedVecOrMat{T}}} - -matprod(x, y) = x*y + x*y - -# dot products - -dot(x::StridedVecLike{T}, y::StridedVecLike{T}) where {T<:BlasReal} = BLAS.dot(x, y) -dot(x::StridedVecLike{T}, y::StridedVecLike{T}) where {T<:BlasComplex} = BLAS.dotc(x, y) - -function dot(x::Vector{T}, rx::AbstractRange{TI}, y::Vector{T}, ry::AbstractRange{TI}) where {T<:BlasReal,TI<:Integer} - if length(rx) != length(ry) - throw(DimensionMismatch(lazy"length of rx, $(length(rx)), does not equal length of ry, $(length(ry))")) - end - if minimum(rx) < 1 || maximum(rx) > length(x) - throw(BoundsError(x, rx)) - end - if minimum(ry) < 1 || maximum(ry) > length(y) - throw(BoundsError(y, ry)) - end - GC.@preserve x y BLAS.dot(length(rx), pointer(x)+(first(rx)-1)*sizeof(T), step(rx), pointer(y)+(first(ry)-1)*sizeof(T), step(ry)) -end - -function dot(x::Vector{T}, rx::AbstractRange{TI}, y::Vector{T}, ry::AbstractRange{TI}) where {T<:BlasComplex,TI<:Integer} - if length(rx) != length(ry) - throw(DimensionMismatch(lazy"length of rx, $(length(rx)), does not equal length of ry, $(length(ry))")) - end - if minimum(rx) < 1 || maximum(rx) > length(x) - throw(BoundsError(x, rx)) - end - if minimum(ry) < 1 || maximum(ry) > length(y) - throw(BoundsError(y, ry)) - end - GC.@preserve x y BLAS.dotc(length(rx), pointer(x)+(first(rx)-1)*sizeof(T), step(rx), pointer(y)+(first(ry)-1)*sizeof(T), step(ry)) -end - -function *(transx::Transpose{<:Any,<:StridedVector{T}}, y::StridedVector{T}) where {T<:BlasComplex} - x = transx.parent - return BLAS.dotu(x, y) -end - -# Matrix-vector multiplication -function (*)(A::StridedMaybeAdjOrTransMat{T}, x::StridedVector{S}) where {T<:BlasFloat,S<:Real} - TS = promote_op(matprod, T, S) - y = isconcretetype(TS) ? convert(AbstractVector{TS}, x) : x - mul!(similar(x, TS, size(A,1)), A, y) -end -function (*)(A::AbstractMatrix{T}, x::AbstractVector{S}) where {T,S} - TS = promote_op(matprod, T, S) - mul!(similar(x, TS, axes(A,1)), A, x) -end - -# these will throw a DimensionMismatch unless B has 1 row (or 1 col for transposed case): -function (*)(a::AbstractVector, B::AbstractMatrix) - require_one_based_indexing(a) - reshape(a, length(a), 1) * B -end - -# Add a level of indirection and specialize _mul! to avoid ambiguities in mul! -@inline mul!(y::AbstractVector, A::AbstractVecOrMat, x::AbstractVector, - alpha::Number, beta::Number) = _mul!(y, A, x, alpha, beta) - -_mul!(y::AbstractVector, A::AbstractVecOrMat, x::AbstractVector, - alpha::Number, beta::Number) = - generic_matvecmul!(y, wrapper_char(A), _unwrap(A), x, alpha, beta) -# BLAS cases -# equal eltypes -generic_matvecmul!(y::StridedVector{T}, tA, A::StridedVecOrMat{T}, x::StridedVector{T}, - alpha::Number, beta::Number) where {T<:BlasFloat} = - gemv!(y, tA, A, x, alpha, beta) - -# Real (possibly transposed) matrix times complex vector. -# Multiply the matrix with the real and imaginary parts separately -generic_matvecmul!(y::StridedVector{Complex{T}}, tA, A::StridedVecOrMat{T}, x::StridedVector{Complex{T}}, - alpha::Number, beta::Number) where {T<:BlasReal} = - gemv!(y, tA, A, x, alpha, beta) - -# Complex matrix times real vector. -# Reinterpret the matrix as a real matrix and do real matvec computation. -# works only in cooperation with BLAS when A is untransposed (tA == 'N') -# but that check is included in gemv! anyway -generic_matvecmul!(y::StridedVector{Complex{T}}, tA, A::StridedVecOrMat{Complex{T}}, x::StridedVector{T}, - alpha::Number, beta::Number) where {T<:BlasReal} = - gemv!(y, tA, A, x, alpha, beta) - -# Vector-Matrix multiplication -(*)(x::AdjointAbsVec, A::AbstractMatrix) = (A'*x')' -(*)(x::TransposeAbsVec, A::AbstractMatrix) = transpose(transpose(A)*transpose(x)) - -# Matrix-matrix multiplication -""" - *(A::AbstractMatrix, B::AbstractMatrix) - -Matrix multiplication. - -# Examples -```jldoctest -julia> [1 1; 0 1] * [1 0; 1 1] -2×2 Matrix{Int64}: - 2 1 - 1 1 -``` -""" -function (*)(A::AbstractMatrix, B::AbstractMatrix) - TS = promote_op(matprod, eltype(A), eltype(B)) - mul!(matprod_dest(A, B, TS), A, B) -end - -""" - matprod_dest(A, B, T) - -Return an appropriate `AbstractArray` with element type `T` that may be used to store the result of `A * B`. - -!!! compat - This function requires at least Julia 1.11 -""" -matprod_dest(A, B, T) = similar(B, T, (size(A, 1), size(B, 2))) - -# optimization for dispatching to BLAS, e.g. *(::Matrix{Float32}, ::Matrix{Float64}) -# but avoiding the case *(::Matrix{<:BlasComplex}, ::Matrix{<:BlasReal}) -# which is better handled by reinterpreting rather than promotion -function (*)(A::StridedMaybeAdjOrTransMat{<:BlasReal}, B::StridedMaybeAdjOrTransMat{<:BlasReal}) - TS = promote_type(eltype(A), eltype(B)) - mul!(similar(B, TS, (size(A, 1), size(B, 2))), - wrapperop(A)(convert(AbstractArray{TS}, _unwrap(A))), - wrapperop(B)(convert(AbstractArray{TS}, _unwrap(B)))) -end -function (*)(A::StridedMaybeAdjOrTransMat{<:BlasComplex}, B::StridedMaybeAdjOrTransMat{<:BlasComplex}) - TS = promote_type(eltype(A), eltype(B)) - mul!(similar(B, TS, (size(A, 1), size(B, 2))), - wrapperop(A)(convert(AbstractArray{TS}, _unwrap(A))), - wrapperop(B)(convert(AbstractArray{TS}, _unwrap(B)))) -end - -# Complex Matrix times real matrix: We use that it is generally faster to reinterpret the -# first matrix as a real matrix and carry out real matrix matrix multiply -function (*)(A::StridedMatrix{<:BlasComplex}, B::StridedMaybeAdjOrTransMat{<:BlasReal}) - TS = promote_type(eltype(A), eltype(B)) - mul!(similar(B, TS, (size(A, 1), size(B, 2))), - convert(AbstractArray{TS}, A), - wrapperop(B)(convert(AbstractArray{real(TS)}, _unwrap(B)))) -end -function (*)(A::AdjOrTransStridedMat{<:BlasComplex}, B::StridedMaybeAdjOrTransMat{<:BlasReal}) - TS = promote_type(eltype(A), eltype(B)) - mul!(similar(B, TS, (size(A, 1), size(B, 2))), - copymutable_oftype(A, TS), # remove AdjOrTrans to use reinterpret trick below - wrapperop(B)(convert(AbstractArray{real(TS)}, _unwrap(B)))) -end -# the following case doesn't seem to benefit from the translation A*B = (B' * A')' -function (*)(A::StridedMatrix{<:BlasReal}, B::StridedMatrix{<:BlasComplex}) - temp = real(B) - R = A * temp - temp .= imag.(B) - I = A * temp - Complex.(R, I) -end -(*)(A::AdjOrTransStridedMat{<:BlasReal}, B::StridedMatrix{<:BlasComplex}) = copy(transpose(transpose(B) * parent(A))) -(*)(A::StridedMaybeAdjOrTransMat{<:BlasReal}, B::AdjOrTransStridedMat{<:BlasComplex}) = copy(wrapperop(B)(parent(B) * transpose(A))) - -""" - muladd(A, y, z) - -Combined multiply-add, `A*y .+ z`, for matrix-matrix or matrix-vector multiplication. -The result is always the same size as `A*y`, but `z` may be smaller, or a scalar. - -!!! compat "Julia 1.6" - These methods require Julia 1.6 or later. - -# Examples -```jldoctest -julia> A=[1.0 2.0; 3.0 4.0]; B=[1.0 1.0; 1.0 1.0]; z=[0, 100]; - -julia> muladd(A, B, z) -2×2 Matrix{Float64}: - 3.0 3.0 - 107.0 107.0 -``` -""" -function Base.muladd(A::AbstractMatrix, y::AbstractVecOrMat, z::Union{Number, AbstractArray}) - Ay = A * y - for d in 1:ndims(Ay) - # Same error as Ay .+= z would give, to match StridedMatrix method: - size(z,d) > size(Ay,d) && throw(DimensionMismatch("array could not be broadcast to match destination")) - end - for d in ndims(Ay)+1:ndims(z) - # Similar error to what Ay + z would give, to match (Any,Any,Any) method: - size(z,d) > 1 && throw(DimensionMismatch(string("z has dims ", - axes(z), ", must have singleton at dim ", d))) - end - Ay .+ z -end - -function Base.muladd(u::AbstractVector, v::AdjOrTransAbsVec, z::Union{Number, AbstractArray}) - if size(z,1) > length(u) || size(z,2) > length(v) - # Same error as (u*v) .+= z: - throw(DimensionMismatch("array could not be broadcast to match destination")) - end - for d in 3:ndims(z) - # Similar error to (u*v) + z: - size(z,d) > 1 && throw(DimensionMismatch(string("z has dims ", - axes(z), ", must have singleton at dim ", d))) - end - (u .* v) .+ z -end - -Base.muladd(x::AdjointAbsVec, A::AbstractMatrix, z::Union{Number, AbstractVecOrMat}) = - muladd(A', x', z')' -Base.muladd(x::TransposeAbsVec, A::AbstractMatrix, z::Union{Number, AbstractVecOrMat}) = - transpose(muladd(transpose(A), transpose(x), transpose(z))) - -function Base.muladd(A::StridedMaybeAdjOrTransMat{<:Number}, y::AbstractVector{<:Number}, z::Union{Number, AbstractVector}) - T = promote_type(eltype(A), eltype(y), eltype(z)) - C = similar(A, T, axes(A,1)) - C .= z - mul!(C, A, y, true, true) -end - -function Base.muladd(A::StridedMaybeAdjOrTransMat{<:Number}, B::StridedMaybeAdjOrTransMat{<:Number}, z::Union{Number, AbstractVecOrMat}) - T = promote_type(eltype(A), eltype(B), eltype(z)) - C = similar(A, T, axes(A,1), axes(B,2)) - C .= z - mul!(C, A, B, true, true) -end - -""" - mul!(Y, A, B) -> Y - -Calculates the matrix-matrix or matrix-vector product ``A B`` and stores the result in `Y`, -overwriting the existing value of `Y`. Note that `Y` must not be aliased with either `A` or -`B`. - -# Examples -```jldoctest -julia> A = [1.0 2.0; 3.0 4.0]; B = [1.0 1.0; 1.0 1.0]; Y = similar(B); - -julia> mul!(Y, A, B) === Y -true - -julia> Y -2×2 Matrix{Float64}: - 3.0 3.0 - 7.0 7.0 - -julia> Y == A * B -true -``` - -# Implementation -For custom matrix and vector types, it is recommended to implement -5-argument `mul!` rather than implementing 3-argument `mul!` directly -if possible. -""" -mul!(C, A, B) = mul!(C, A, B, true, false) - -""" - mul!(C, A, B, α, β) -> C - -Combined inplace matrix-matrix or matrix-vector multiply-add ``A B α + C β``. -The result is stored in `C` by overwriting it. Note that `C` must not be -aliased with either `A` or `B`. - -!!! compat "Julia 1.3" - Five-argument `mul!` requires at least Julia 1.3. - -# Examples -```jldoctest -julia> A = [1.0 2.0; 3.0 4.0]; B = [1.0 1.0; 1.0 1.0]; C = [1.0 2.0; 3.0 4.0]; - -julia> α, β = 100.0, 10.0; - -julia> mul!(C, A, B, α, β) === C -true - -julia> C -2×2 Matrix{Float64}: - 310.0 320.0 - 730.0 740.0 - -julia> C_original = [1.0 2.0; 3.0 4.0]; # A copy of the original value of C - -julia> C == A * B * α + C_original * β -true -``` -""" -@inline mul!(C::AbstractMatrix, A::AbstractVecOrMat, B::AbstractVecOrMat, α::Number, β::Number) = _mul!(C, A, B, α, β) -# Add a level of indirection and specialize _mul! to avoid ambiguities in mul! -module BlasFlag -@enum BlasFunction SYRK HERK GEMM SYMM HEMM NONE -const SyrkHerkGemm = Union{Val{SYRK}, Val{HERK}, Val{GEMM}} -const SymmHemmGeneric = Union{Val{SYMM}, Val{HEMM}, Val{NONE}} -end -@inline function _mul!(C::AbstractMatrix, A::AbstractVecOrMat, B::AbstractVecOrMat, α::Number, β::Number) - tA = wrapper_char(A) - tB = wrapper_char(B) - tA_uc = uppercase(tA) - tB_uc = uppercase(tB) - isntc = wrapper_char_NTC(A) & wrapper_char_NTC(B) - blasfn = if isntc - if (tA_uc == 'T' && tB_uc == 'N') || (tA_uc == 'N' && tB_uc == 'T') - BlasFlag.SYRK - elseif (tA_uc == 'C' && tB_uc == 'N') || (tA_uc == 'N' && tB_uc == 'C') - BlasFlag.HERK - else isntc - BlasFlag.GEMM - end - else - if (tA_uc == 'S' && tB_uc == 'N') || (tA_uc == 'N' && tB_uc == 'S') - BlasFlag.SYMM - elseif (tA_uc == 'H' && tB_uc == 'N') || (tA_uc == 'N' && tB_uc == 'H') - BlasFlag.HEMM - else - BlasFlag.NONE - end - end - - generic_matmatmul_wrapper!( - C, - tA, - tB, - _unwrap(A), - _unwrap(B), - α, β, - Val(blasfn), - ) -end - -# this indirection allows is to specialize on the types of the wrappers of A and B to some extent, -# even though the wrappers are stripped off in mul! -# By default, we ignore the wrapper info and forward the arguments to generic_matmatmul! -function generic_matmatmul_wrapper!(C, tA, tB, A, B, α, β, @nospecialize(val)) - generic_matmatmul!(C, tA, tB, A, B, α, β) -end - - -""" - rmul!(A, B) - -Calculate the matrix-matrix product ``AB``, overwriting `A`, and return the result. -Here, `B` must be of special matrix type, like, e.g., [`Diagonal`](@ref), -[`UpperTriangular`](@ref) or [`LowerTriangular`](@ref), or of some orthogonal type, -see [`QR`](@ref). - -# Examples -```jldoctest -julia> A = [0 1; 1 0]; - -julia> B = UpperTriangular([1 2; 0 3]); - -julia> rmul!(A, B); - -julia> A -2×2 Matrix{Int64}: - 0 3 - 1 2 - -julia> A = [1.0 2.0; 3.0 4.0]; - -julia> F = qr([0 1; -1 0]); - -julia> rmul!(A, F.Q) -2×2 Matrix{Float64}: - 2.0 1.0 - 4.0 3.0 -``` -""" -rmul!(A, B) - -""" - lmul!(A, B) - -Calculate the matrix-matrix product ``AB``, overwriting `B`, and return the result. -Here, `A` must be of special matrix type, like, e.g., [`Diagonal`](@ref), -[`UpperTriangular`](@ref) or [`LowerTriangular`](@ref), or of some orthogonal type, -see [`QR`](@ref). - -# Examples -```jldoctest -julia> B = [0 1; 1 0]; - -julia> A = UpperTriangular([1 2; 0 3]); - -julia> lmul!(A, B); - -julia> B -2×2 Matrix{Int64}: - 2 1 - 3 0 - -julia> B = [1.0 2.0; 3.0 4.0]; - -julia> F = qr([0 1; -1 0]); - -julia> lmul!(F.Q, B) -2×2 Matrix{Float64}: - 3.0 4.0 - 1.0 2.0 -``` -""" -lmul!(A, B) - -# We may inline the matmul2x2! and matmul3x3! calls for `α == true` -# to simplify the @stable_muladdmul branches -function matmul2x2or3x3_nonzeroalpha!(C, tA, tB, A, B, α, β) - if size(C) == size(A) == size(B) == (2,2) - matmul2x2!(C, tA, tB, A, B, α, β) - return true - end - if size(C) == size(A) == size(B) == (3,3) - matmul3x3!(C, tA, tB, A, B, α, β) - return true - end - return false -end -function matmul2x2or3x3_nonzeroalpha!(C, tA, tB, A, B, α::Bool, β) - if size(C) == size(A) == size(B) == (2,2) - Aelements, Belements = _matmul2x2_elements(C, tA, tB, A, B) - @stable_muladdmul _modify2x2!(Aelements, Belements, C, MulAddMul(true, β)) - return true - end - if size(C) == size(A) == size(B) == (3,3) - Aelements, Belements = _matmul3x3_elements(C, tA, tB, A, B) - @stable_muladdmul _modify3x3!(Aelements, Belements, C, MulAddMul(true, β)) - return true - end - return false -end - -# THE one big BLAS dispatch. This is split into two methods to improve latency -Base.@constprop :aggressive function generic_matmatmul_wrapper!(C::StridedMatrix{T}, tA, tB, A::StridedVecOrMat{T}, B::StridedVecOrMat{T}, - α::Number, β::Number, val::BlasFlag.SyrkHerkGemm) where {T<:BlasFloat} - mA, nA = lapack_size(tA, A) - mB, nB = lapack_size(tB, B) - if any(iszero, size(A)) || any(iszero, size(B)) || iszero(α) - if size(C) != (mA, nB) - throw(DimensionMismatch(lazy"C has dimensions $(size(C)), should have ($mA,$nB)")) - end - return _rmul_or_fill!(C, β) - end - matmul2x2or3x3_nonzeroalpha!(C, tA, tB, A, B, α, β) && return C - _syrk_herk_gemm_wrapper!(C, tA, tB, A, B, α, β, val) - return C -end -Base.@constprop :aggressive function _syrk_herk_gemm_wrapper!(C, tA, tB, A, B, α, β, ::Val{BlasFlag.SYRK}) - if A === B - tA_uc = uppercase(tA) # potentially strip a WrapperChar - return syrk_wrapper!(C, tA_uc, A, α, β) - else - return gemm_wrapper!(C, tA, tB, A, B, α, β) - end -end -Base.@constprop :aggressive function _syrk_herk_gemm_wrapper!(C, tA, tB, A, B, α, β, ::Val{BlasFlag.HERK}) - if A === B - tA_uc = uppercase(tA) # potentially strip a WrapperChar - return herk_wrapper!(C, tA_uc, A, α, β) - else - return gemm_wrapper!(C, tA, tB, A, B, α, β) - end -end -Base.@constprop :aggressive function _syrk_herk_gemm_wrapper!(C, tA, tB, A, B, α, β, ::Val{BlasFlag.GEMM}) - return gemm_wrapper!(C, tA, tB, A, B, α, β) -end -_valtypeparam(v::Val{T}) where {T} = T -Base.@constprop :aggressive function generic_matmatmul_wrapper!(C::StridedMatrix{T}, tA, tB, A::StridedVecOrMat{T}, B::StridedVecOrMat{T}, - α::Number, β::Number, val::BlasFlag.SymmHemmGeneric) where {T<:BlasFloat} - mA, nA = lapack_size(tA, A) - mB, nB = lapack_size(tB, B) - if any(iszero, size(A)) || any(iszero, size(B)) || iszero(α) - if size(C) != (mA, nB) - throw(DimensionMismatch(lazy"C has dimensions $(size(C)), should have ($mA,$nB)")) - end - return _rmul_or_fill!(C, β) - end - matmul2x2or3x3_nonzeroalpha!(C, tA, tB, A, B, α, β) && return C - alpha, beta = promote(α, β, zero(T)) - blasfn = _valtypeparam(val) - if alpha isa Union{Bool,T} && beta isa Union{Bool,T} && blasfn ∈ (BlasFlag.SYMM, BlasFlag.HEMM) - _blasfn = blasfn - αβ = (alpha, beta) - else - _blasfn = BlasFlag.NONE - αβ = (α, β) - end - _symm_hemm_generic!(C, tA, tB, A, B, αβ..., Val(_blasfn)) - return C -end -Base.@constprop :aggressive function _lrchar_ulchar(tA, tB) - if uppercase(tA) == 'N' - lrchar = 'R' - ulchar = isuppercase(tB) ? 'U' : 'L' - else - lrchar = 'L' - ulchar = isuppercase(tA) ? 'U' : 'L' - end - return lrchar, ulchar -end -function _symm_hemm_generic!(C, tA, tB, A, B, alpha, beta, ::Val{BlasFlag.SYMM}) - lrchar, ulchar = _lrchar_ulchar(tA, tB) - if lrchar == 'L' - BLAS.symm!(lrchar, ulchar, alpha, A, B, beta, C) - else - BLAS.symm!(lrchar, ulchar, alpha, B, A, beta, C) - end -end -function _symm_hemm_generic!(C, tA, tB, A, B, alpha, beta, ::Val{BlasFlag.HEMM}) - lrchar, ulchar = _lrchar_ulchar(tA, tB) - if lrchar == 'L' - BLAS.hemm!(lrchar, ulchar, alpha, A, B, beta, C) - else - BLAS.hemm!(lrchar, ulchar, alpha, B, A, beta, C) - end -end -Base.@constprop :aggressive function _symm_hemm_generic!(C, tA, tB, A, B, alpha, beta, ::Val{BlasFlag.NONE}) - _generic_matmatmul!(C, wrap(A, tA), wrap(B, tB), alpha, beta) -end - -# legacy method -Base.@constprop :aggressive generic_matmatmul!(C::StridedMatrix{T}, tA, tB, A::StridedVecOrMat{T}, B::StridedVecOrMat{T}, - _add::MulAddMul = MulAddMul()) where {T<:BlasFloat} = - generic_matmatmul!(C, tA, tB, A, B, _add.alpha, _add.beta) - -function generic_matmatmul_wrapper!(C::StridedVecOrMat{Complex{T}}, tA, tB, A::StridedVecOrMat{Complex{T}}, B::StridedVecOrMat{T}, - α::Number, β::Number, ::Val{true}) where {T<:BlasReal} - gemm_wrapper!(C, tA, tB, A, B, α, β) -end -Base.@constprop :aggressive function generic_matmatmul_wrapper!(C::StridedVecOrMat{Complex{T}}, tA, tB, A::StridedVecOrMat{Complex{T}}, B::StridedVecOrMat{T}, - alpha::Number, beta::Number, ::Val{false}) where {T<:BlasReal} - _generic_matmatmul!(C, wrap(A, tA), wrap(B, tB), alpha, beta) -end -# legacy method -Base.@constprop :aggressive generic_matmatmul!(C::StridedVecOrMat{Complex{T}}, tA, tB, A::StridedVecOrMat{Complex{T}}, B::StridedVecOrMat{T}, - _add::MulAddMul = MulAddMul()) where {T<:BlasReal} = - generic_matmatmul!(C, tA, tB, A, B, _add.alpha, _add.beta) - -# Supporting functions for matrix multiplication - -# copy transposed(adjoint) of upper(lower) side-diagonals. Optionally include diagonal. -@inline function copytri!(A::AbstractMatrix, uplo::AbstractChar, conjugate::Bool=false, diag::Bool=false) - n = checksquare(A) - off = diag ? 0 : 1 - if uplo == 'U' - for i = 1:n, j = (i+off):n - A[j,i] = conjugate ? adjoint(A[i,j]) : transpose(A[i,j]) - end - elseif uplo == 'L' - for i = 1:n, j = (i+off):n - A[i,j] = conjugate ? adjoint(A[j,i]) : transpose(A[j,i]) - end - else - throw(ArgumentError(lazy"uplo argument must be 'U' (upper) or 'L' (lower), got $uplo")) - end - A -end - -_fullstride2(A, f=identity) = f(stride(A, 2)) >= size(A, 1) -# for some standard StridedArrays, the _fullstride2 condition is known to hold at compile-time -# We specialize the function for certain StridedArray subtypes -_fullstride2(A::StridedArrayStdSubArray, ::typeof(abs)) = true -_fullstride2(A::StridedArrayStdSubArrayIncr, ::typeof(identity)) = true - -Base.@constprop :aggressive function gemv!(y::StridedVector{T}, tA::AbstractChar, - A::StridedVecOrMat{T}, x::StridedVector{T}, - α::Number=true, β::Number=false) where {T<:BlasFloat} - mA, nA = lapack_size(tA, A) - nA != length(x) && - throw(DimensionMismatch(lazy"second dimension of A, $nA, does not match length of x, $(length(x))")) - mA != length(y) && - throw(DimensionMismatch(lazy"first dimension of A, $mA, does not match length of y, $(length(y))")) - mA == 0 && return y - nA == 0 && return _rmul_or_fill!(y, β) - alpha, beta = promote(α, β, zero(T)) - tA_uc = uppercase(tA) # potentially convert a WrapperChar to a Char - if alpha isa Union{Bool,T} && beta isa Union{Bool,T} && - stride(A, 1) == 1 && _fullstride2(A, abs) && - !iszero(stride(x, 1)) && # We only check input's stride here. - if tA_uc in ('N', 'T', 'C') - return BLAS.gemv!(tA, alpha, A, x, beta, y) - elseif tA_uc == 'S' - return BLAS.symv!(tA == 'S' ? 'U' : 'L', alpha, A, x, beta, y) - elseif tA_uc == 'H' - return BLAS.hemv!(tA == 'H' ? 'U' : 'L', alpha, A, x, beta, y) - end - end - if tA_uc in ('S', 'H') - # re-wrap again and use plain ('N') matvec mul algorithm, - # because _generic_matvecmul! can't handle the HermOrSym cases specifically - return _generic_matvecmul!(y, 'N', wrap(A, tA), x, α, β) - else - return _generic_matvecmul!(y, tA, A, x, α, β) - end -end - -Base.@constprop :aggressive function gemv!(y::StridedVector{Complex{T}}, tA::AbstractChar, A::StridedVecOrMat{Complex{T}}, x::StridedVector{T}, - α::Number = true, β::Number = false) where {T<:BlasReal} - mA, nA = lapack_size(tA, A) - nA != length(x) && - throw(DimensionMismatch(lazy"second dimension of A, $nA, does not match length of x, $(length(x))")) - mA != length(y) && - throw(DimensionMismatch(lazy"first dimension of A, $mA, does not match length of y, $(length(y))")) - mA == 0 && return y - nA == 0 && return _rmul_or_fill!(y, β) - alpha, beta = promote(α, β, zero(T)) - tA_uc = uppercase(tA) # potentially convert a WrapperChar to a Char - if alpha isa Union{Bool,T} && beta isa Union{Bool,T} && - stride(A, 1) == 1 && _fullstride2(A, abs) && - stride(y, 1) == 1 && tA_uc == 'N' && # reinterpret-based optimization is valid only for contiguous `y` - !iszero(stride(x, 1)) - BLAS.gemv!(tA, alpha, reinterpret(T, A), x, beta, reinterpret(T, y)) - return y - else - Anew, ta = tA_uc in ('S', 'H') ? (wrap(A, tA), oftype(tA, 'N')) : (A, tA) - return _generic_matvecmul!(y, ta, Anew, x, α, β) - end -end - -Base.@constprop :aggressive function gemv!(y::StridedVector{Complex{T}}, tA::AbstractChar, - A::StridedVecOrMat{T}, x::StridedVector{Complex{T}}, - α::Number = true, β::Number = false) where {T<:BlasReal} - mA, nA = lapack_size(tA, A) - nA != length(x) && - throw(DimensionMismatch(lazy"second dimension of A, $nA, does not match length of x, $(length(x))")) - mA != length(y) && - throw(DimensionMismatch(lazy"first dimension of A, $mA, does not match length of y, $(length(y))")) - mA == 0 && return y - nA == 0 && return _rmul_or_fill!(y, β) - alpha, beta = promote(α, β, zero(T)) - tA_uc = uppercase(tA) # potentially convert a WrapperChar to a Char - @views if alpha isa Union{Bool,T} && beta isa Union{Bool,T} && - stride(A, 1) == 1 && _fullstride2(A, abs) && - !iszero(stride(x, 1)) && tA_uc in ('N', 'T', 'C') - xfl = reinterpret(reshape, T, x) # Use reshape here. - yfl = reinterpret(reshape, T, y) - BLAS.gemv!(tA, alpha, A, xfl[1, :], beta, yfl[1, :]) - BLAS.gemv!(tA, alpha, A, xfl[2, :], beta, yfl[2, :]) - return y - elseif tA_uc in ('S', 'H') - # re-wrap again and use plain ('N') matvec mul algorithm, - # because _generic_matvecmul! can't handle the HermOrSym cases specifically - return _generic_matvecmul!(y, 'N', wrap(A, tA), x, α, β) - else - return _generic_matvecmul!(y, tA, A, x, α, β) - end -end - -# the aggressive constprop pushes tA and tB into gemm_wrapper!, which is needed for wrap calls within it -# to be concretely inferred -Base.@constprop :aggressive function syrk_wrapper!(C::StridedMatrix{T}, tA::AbstractChar, A::StridedVecOrMat{T}, - alpha::Number, beta::Number) where {T<:BlasFloat} - nC = checksquare(C) - tA_uc = uppercase(tA) # potentially convert a WrapperChar to a Char - if tA_uc == 'T' - (nA, mA) = size(A,1), size(A,2) - tAt = 'N' - else - (mA, nA) = size(A,1), size(A,2) - tAt = 'T' - end - if nC != mA - throw(DimensionMismatch(lazy"output matrix has size: $(nC), but should have size $(mA)")) - end - - # BLAS.syrk! only updates symmetric C - # alternatively, make non-zero β a show-stopper for BLAS.syrk! - if iszero(beta) || issymmetric(C) - α, β = promote(alpha, beta, zero(T)) - if (alpha isa Union{Bool,T} && - beta isa Union{Bool,T} && - stride(A, 1) == stride(C, 1) == 1 && - _fullstride2(A) && _fullstride2(C)) - return copytri!(BLAS.syrk!('U', tA, alpha, A, beta, C), 'U') - end - end - return gemm_wrapper!(C, tA, tAt, A, A, alpha, beta) -end -# legacy method -syrk_wrapper!(C::StridedMatrix{T}, tA::AbstractChar, A::StridedVecOrMat{T}, _add::MulAddMul = MulAddMul()) where {T<:BlasFloat} = - syrk_wrapper!(C, tA, A, _add.alpha, _add.beta) - -# the aggressive constprop pushes tA and tB into gemm_wrapper!, which is needed for wrap calls within it -# to be concretely inferred -Base.@constprop :aggressive function herk_wrapper!(C::Union{StridedMatrix{T}, StridedMatrix{Complex{T}}}, tA::AbstractChar, A::Union{StridedVecOrMat{T}, StridedVecOrMat{Complex{T}}}, - α::Number, β::Number) where {T<:BlasReal} - nC = checksquare(C) - tA_uc = uppercase(tA) # potentially convert a WrapperChar to a Char - if tA_uc == 'C' - (nA, mA) = size(A,1), size(A,2) - tAt = 'N' - else - (mA, nA) = size(A,1), size(A,2) - tAt = 'C' - end - if nC != mA - throw(DimensionMismatch(lazy"output matrix has size: $(nC), but should have size $(mA)")) - end - - # Result array does not need to be initialized as long as beta==0 - # C = Matrix{T}(undef, mA, mA) - - if iszero(β) || issymmetric(C) - alpha, beta = promote(α, β, zero(T)) - if (alpha isa Union{Bool,T} && - beta isa Union{Bool,T} && - stride(A, 1) == stride(C, 1) == 1 && - _fullstride2(A) && _fullstride2(C)) - return copytri!(BLAS.herk!('U', tA, alpha, A, beta, C), 'U', true) - end - end - return gemm_wrapper!(C, tA, tAt, A, A, α, β) -end -# legacy method -herk_wrapper!(C::Union{StridedMatrix{T}, StridedMatrix{Complex{T}}}, tA::AbstractChar, A::Union{StridedVecOrMat{T}, StridedVecOrMat{Complex{T}}}, - _add::MulAddMul = MulAddMul()) where {T<:BlasReal} = - herk_wrapper!(C, tA, A, _add.alpha, _add.beta) - -# Aggressive constprop helps propagate the values of tA and tB into wrap, which -# makes the calls concretely inferred -Base.@constprop :aggressive function gemm_wrapper(tA::AbstractChar, tB::AbstractChar, - A::StridedVecOrMat{T}, - B::StridedVecOrMat{T}) where {T<:BlasFloat} - mA, nA = lapack_size(tA, A) - mB, nB = lapack_size(tB, B) - C = similar(B, T, mA, nB) - # We convert the chars to uppercase to potentially unwrap a WrapperChar, - # and extract the char corresponding to the wrapper type - tA_uc, tB_uc = uppercase(tA), uppercase(tB) - # the map in all ensures constprop by acting on tA and tB individually, instead of looping over them. - if all(map(in(('N', 'T', 'C')), (tA_uc, tB_uc))) - gemm_wrapper!(C, tA, tB, A, B, true, false) - else - _generic_matmatmul!(C, wrap(A, tA), wrap(B, tB), true, false) - end -end - -# Aggressive constprop helps propagate the values of tA and tB into wrap, which -# makes the calls concretely inferred -Base.@constprop :aggressive function gemm_wrapper!(C::StridedVecOrMat{T}, tA::AbstractChar, tB::AbstractChar, - A::StridedVecOrMat{T}, B::StridedVecOrMat{T}, - α::Number, β::Number) where {T<:BlasFloat} - mA, nA = lapack_size(tA, A) - mB, nB = lapack_size(tB, B) - - if nA != mB - throw(DimensionMismatch(lazy"A has dimensions ($mA,$nA) but B has dimensions ($mB,$nB)")) - end - - if C === A || B === C - throw(ArgumentError("output matrix must not be aliased with input matrix")) - end - - alpha, beta = promote(α, β, zero(T)) - if (alpha isa Union{Bool,T} && - beta isa Union{Bool,T} && - stride(A, 1) == stride(B, 1) == stride(C, 1) == 1 && - _fullstride2(A) && _fullstride2(B) && _fullstride2(C)) - return BLAS.gemm!(tA, tB, alpha, A, B, beta, C) - end - _generic_matmatmul!(C, wrap(A, tA), wrap(B, tB), α, β) -end -# legacy method -gemm_wrapper!(C::StridedVecOrMat{T}, tA::AbstractChar, tB::AbstractChar, - A::StridedVecOrMat{T}, B::StridedVecOrMat{T}, _add::MulAddMul = MulAddMul()) where {T<:BlasFloat} = - gemm_wrapper!(C, tA, tB, A, B, _add.alpha, _add.beta) - -# Aggressive constprop helps propagate the values of tA and tB into wrap, which -# makes the calls concretely inferred -Base.@constprop :aggressive function gemm_wrapper!(C::StridedVecOrMat{Complex{T}}, tA::AbstractChar, tB::AbstractChar, - A::StridedVecOrMat{Complex{T}}, B::StridedVecOrMat{T}, - α::Number, β::Number) where {T<:BlasReal} - mA, nA = lapack_size(tA, A) - mB, nB = lapack_size(tB, B) - - if nA != mB - throw(DimensionMismatch(lazy"A has dimensions ($mA,$nA) but B has dimensions ($mB,$nB)")) - end - - if C === A || B === C - throw(ArgumentError("output matrix must not be aliased with input matrix")) - end - - alpha, beta = promote(α, β, zero(T)) - - tA_uc = uppercase(tA) # potentially convert a WrapperChar to a Char - - # Make-sure reinterpret-based optimization is BLAS-compatible. - if (alpha isa Union{Bool,T} && - beta isa Union{Bool,T} && - stride(A, 1) == stride(B, 1) == stride(C, 1) == 1 && - _fullstride2(A) && _fullstride2(B) && _fullstride2(C) && tA_uc == 'N') - BLAS.gemm!(tA, tB, alpha, reinterpret(T, A), B, beta, reinterpret(T, C)) - return C - end - _generic_matmatmul!(C, wrap(A, tA), wrap(B, tB), α, β) -end -# legacy method -gemm_wrapper!(C::StridedVecOrMat{Complex{T}}, tA::AbstractChar, tB::AbstractChar, - A::StridedVecOrMat{Complex{T}}, B::StridedVecOrMat{T}, _add::MulAddMul = MulAddMul()) where {T<:BlasReal} = - gemm_wrapper!(C, tA, tB, A, B, _add.alpha, _add.beta) - -# blas.jl defines matmul for floats; other integer and mixed precision -# cases are handled here - -lapack_size(t::AbstractChar, M::AbstractVecOrMat) = (size(M, t=='N' ? 1 : 2), size(M, t=='N' ? 2 : 1)) - -""" - copyto!(B::AbstractMatrix, ir_dest::AbstractUnitRange, jr_dest::AbstractUnitRange, - tM::AbstractChar, - M::AbstractVecOrMat, ir_src::AbstractUnitRange, jr_src::AbstractUnitRange) -> B - -Efficiently copy elements of matrix `M` to `B` conditioned on the character -parameter `tM` as follows: - -| `tM` | Destination | Source | -| --- | :--- | :--- | -| `'N'` | `B[ir_dest, jr_dest]` | `M[ir_src, jr_src]` | -| `'T'` | `B[ir_dest, jr_dest]` | `transpose(M)[ir_src, jr_src]` | -| `'C'` | `B[ir_dest, jr_dest]` | `adjoint(M)[ir_src, jr_src]` | - -The elements `B[ir_dest, jr_dest]` are overwritten. Furthermore, the index range -parameters must satisfy `length(ir_dest) == length(ir_src)` and -`length(jr_dest) == length(jr_src)`. - -See also [`copy_transpose!`](@ref) and [`copy_adjoint!`](@ref). -""" -function copyto!(B::AbstractVecOrMat, ir_dest::AbstractUnitRange{Int}, jr_dest::AbstractUnitRange{Int}, tM::AbstractChar, M::AbstractVecOrMat, ir_src::AbstractUnitRange{Int}, jr_src::AbstractUnitRange{Int}) - tM_uc = uppercase(tM) # potentially convert a WrapperChar to a Char - if tM_uc == 'N' - copyto!(B, ir_dest, jr_dest, M, ir_src, jr_src) - elseif tM_uc == 'T' - copy_transpose!(B, ir_dest, jr_dest, M, jr_src, ir_src) - else - copy_adjoint!(B, ir_dest, jr_dest, M, jr_src, ir_src) - end - B -end - -""" - copy_transpose!(B::AbstractMatrix, ir_dest::AbstractUnitRange, jr_dest::AbstractUnitRange, - tM::AbstractChar, - M::AbstractVecOrMat, ir_src::AbstractUnitRange, jr_src::AbstractUnitRange) -> B - -Efficiently copy elements of matrix `M` to `B` conditioned on the character -parameter `tM` as follows: - -| `tM` | Destination | Source | -| --- | :--- | :--- | -| `'N'` | `B[ir_dest, jr_dest]` | `transpose(M)[jr_src, ir_src]` | -| `'T'` | `B[ir_dest, jr_dest]` | `M[jr_src, ir_src]` | -| `'C'` | `B[ir_dest, jr_dest]` | `conj(M)[jr_src, ir_src]` | - -The elements `B[ir_dest, jr_dest]` are overwritten. Furthermore, the index -range parameters must satisfy `length(ir_dest) == length(jr_src)` and -`length(jr_dest) == length(ir_src)`. - -See also [`copyto!`](@ref) and [`copy_adjoint!`](@ref). -""" -function copy_transpose!(B::AbstractMatrix, ir_dest::AbstractUnitRange{Int}, jr_dest::AbstractUnitRange{Int}, tM::AbstractChar, M::AbstractVecOrMat, ir_src::AbstractUnitRange{Int}, jr_src::AbstractUnitRange{Int}) - tM_uc = uppercase(tM) # potentially convert a WrapperChar to a Char - if tM_uc == 'N' - copy_transpose!(B, ir_dest, jr_dest, M, ir_src, jr_src) - else - copyto!(B, ir_dest, jr_dest, M, jr_src, ir_src) - tM_uc == 'C' && conj!(@view B[ir_dest, jr_dest]) - end - B -end - -# TODO: It will be faster for large matrices to convert to float, -# call BLAS, and convert back to required type. - -# NOTE: the generic version is also called as fallback for -# strides != 1 cases - -# legacy method, retained for backward compatibility -generic_matvecmul!(C::AbstractVector, tA, A::AbstractVecOrMat, B::AbstractVector, _add::MulAddMul = MulAddMul()) = - generic_matvecmul!(C, tA, A, B, _add.alpha, _add.beta) -@inline function generic_matvecmul!(C::AbstractVector, tA, A::AbstractVecOrMat, B::AbstractVector, - alpha::Number, beta::Number) - tA_uc = uppercase(tA) # potentially convert a WrapperChar to a Char - Anew, ta = tA_uc in ('S', 'H') ? (wrap(A, tA), oftype(tA, 'N')) : (A, tA) - return _generic_matvecmul!(C, ta, Anew, B, alpha, beta) -end - -# legacy method, retained for backward compatibility -_generic_matvecmul!(C::AbstractVector, tA, A::AbstractVecOrMat, B::AbstractVector, _add::MulAddMul = MulAddMul()) = - _generic_matvecmul!(C, tA, A, B, _add.alpha, _add.beta) -function __generic_matvecmul!(f::F, C::AbstractVector, A::AbstractVecOrMat, B::AbstractVector, - alpha::Number, beta::Number) where {F} - Astride = size(A, 1) - @inbounds begin - if length(B) == 0 - for k = eachindex(C) - @stable_muladdmul _modify!(MulAddMul(alpha,beta), false, C, k) - end - else - for k = eachindex(C) - aoffs = (k-1)*Astride - firstterm = f(A[aoffs + 1]) * B[1] - s = zero(firstterm + firstterm) - for i = eachindex(B) - s += f(A[aoffs+i]) * B[i] - end - @stable_muladdmul _modify!(MulAddMul(alpha,beta), s, C, k) - end - end - end -end -function __generic_matvecmul!(::typeof(identity), C::AbstractVector, A::AbstractVecOrMat, B::AbstractVector, - alpha::Number, beta::Number) - Astride = size(A, 1) - @inbounds begin - for i = eachindex(C) - if !iszero(beta) - C[i] *= beta - elseif length(B) == 0 - C[i] = false - else - C[i] = zero(A[i]*B[1] + A[i]*B[1]) - end - end - for k = eachindex(B) - aoffs = (k-1)*Astride - b = @stable_muladdmul MulAddMul(alpha,beta)(B[k]) - for i = eachindex(C) - C[i] += A[aoffs + i] * b - end - end - end - return C -end -function _generic_matvecmul!(C::AbstractVector, tA, A::AbstractVecOrMat, B::AbstractVector, - alpha::Number, beta::Number) - require_one_based_indexing(C, A, B) - @assert tA in ('N', 'T', 'C') - mB = length(B) - mA, nA = lapack_size(tA, A) - if mB != nA - throw(DimensionMismatch(lazy"matrix A has dimensions ($mA,$nA), vector B has length $mB")) - end - if mA != length(C) - throw(DimensionMismatch(lazy"result C has length $(length(C)), needs length $mA")) - end - - if tA == 'T' # fastest case - __generic_matvecmul!(transpose, C, A, B, alpha, beta) - elseif tA == 'C' - __generic_matvecmul!(adjoint, C, A, B, alpha, beta) - else # tA == 'N' - __generic_matvecmul!(identity, C, A, B, alpha, beta) - end - C -end - -function generic_matmatmul(tA, tB, A::AbstractVecOrMat{T}, B::AbstractMatrix{S}) where {T,S} - mA, nA = lapack_size(tA, A) - mB, nB = lapack_size(tB, B) - C = similar(B, promote_op(matprod, T, S), mA, nB) - generic_matmatmul!(C, tA, tB, A, B, true, false) -end - -# aggressive const prop makes mixed eltype mul!(C, A, B) invoke _generic_matmatmul! directly -# legacy method -Base.@constprop :aggressive generic_matmatmul!(C::AbstractVecOrMat, tA, tB, A::AbstractVecOrMat, B::AbstractVecOrMat, _add::MulAddMul = MulAddMul()) = - _generic_matmatmul!(C, wrap(A, tA), wrap(B, tB), _add.alpha, _add.beta) -Base.@constprop :aggressive generic_matmatmul!(C::AbstractVecOrMat, tA, tB, A::AbstractVecOrMat, B::AbstractVecOrMat, alpha::Number, beta::Number) = - _generic_matmatmul!(C, wrap(A, tA), wrap(B, tB), alpha, beta) - -# legacy method -_generic_matmatmul!(C::AbstractVecOrMat, A::AbstractVecOrMat, B::AbstractVecOrMat, _add::MulAddMul) = - _generic_matmatmul!(C, A, B, _add.alpha, _add.beta) - -@noinline function _generic_matmatmul!(C::AbstractVecOrMat{R}, A::AbstractVecOrMat, B::AbstractVecOrMat, - alpha::Number, beta::Number) where {R} - AxM = axes(A, 1) - AxK = axes(A, 2) # we use two `axes` calls in case of `AbstractVector` - BxK = axes(B, 1) - BxN = axes(B, 2) - CxM = axes(C, 1) - CxN = axes(C, 2) - if AxM != CxM - throw(DimensionMismatch(lazy"matrix A has axes ($AxM,$AxK), matrix C has axes ($CxM,$CxN)")) - end - if AxK != BxK - throw(DimensionMismatch(lazy"matrix A has axes ($AxM,$AxK), matrix B has axes ($BxK,$CxN)")) - end - if BxN != CxN - throw(DimensionMismatch(lazy"matrix B has axes ($BxK,$BxN), matrix C has axes ($CxM,$CxN)")) - end - __generic_matmatmul!(C, A, B, alpha, beta, Val(isbitstype(R) && sizeof(R) ≤ 16)) - return C -end -__generic_matmatmul!(C, A::Adjoint, B::Adjoint, alpha, beta, ::Val{true}) = _generic_matmatmul_adjtrans!(C, A, B, alpha, beta) -__generic_matmatmul!(C, A::Transpose, B::Transpose, alpha, beta, ::Val{true}) = _generic_matmatmul_adjtrans!(C, A, B, alpha, beta) -__generic_matmatmul!(C, A::Union{Adjoint, Transpose}, B, alpha, beta, ::Val{true}) = _generic_matmatmul_generic!(C, A, B, alpha, beta) -__generic_matmatmul!(C, A, B, alpha, beta, ::Val{true}) = _generic_matmatmul_nonadjtrans!(C, A, B, alpha, beta) -__generic_matmatmul!(C, A, B, alpha, beta, ::Val{false}) = _generic_matmatmul_generic!(C, A, B, alpha, beta) - -function _generic_matmatmul_nonadjtrans!(C, A, B, alpha, beta) - _rmul_or_fill!(C, beta) - (iszero(alpha) || isempty(A) || isempty(B)) && return C - @inbounds for n in axes(B, 2), k in axes(B, 1) - # Balpha = B[k,n] * alpha, but we skip the multiplication in case isone(alpha) - Balpha = @stable_muladdmul MulAddMul(alpha, false)(B[k,n]) - @simd for m in axes(A, 1) - C[m,n] = muladd(A[m,k], Balpha, C[m,n]) - end - end - C -end -function _generic_matmatmul_adjtrans!(C, A, B, alpha, beta) - _rmul_or_fill!(C, beta) - (iszero(alpha) || isempty(A) || isempty(B)) && return C - t = wrapperop(A) - pB = parent(B) - pA = parent(A) - tmp = similar(C, axes(C, 2)) - ci = firstindex(C, 1) - ta = t(alpha) - for i in axes(A, 1) - mul!(tmp, pB, view(pA, :, i)) - @views C[ci,:] .+= t.(ta .* tmp) - ci += 1 - end - C -end -function _generic_matmatmul_generic!(C, A, B, alpha, beta) - if iszero(alpha) || isempty(A) || isempty(B) - return _rmul_or_fill!(C, beta) - end - a1 = firstindex(A, 2) - b1 = firstindex(B, 1) - @inbounds for i in axes(A, 1), j in axes(B, 2) - z2 = zero(A[i, a1]*B[b1, j] + A[i, a1]*B[b1, j]) - Ctmp = convert(promote_type(eltype(C), typeof(z2)), z2) - @simd for k in axes(A, 2) - Ctmp = muladd(A[i, k], B[k, j], Ctmp) - end - @stable_muladdmul _modify!(MulAddMul(alpha,beta), Ctmp, C, (i,j)) - end - C -end - -# multiply 2x2 matrices -function matmul2x2(tA, tB, A::AbstractMatrix{T}, B::AbstractMatrix{S}) where {T,S} - matmul2x2!(similar(B, promote_op(matprod, T, S), 2, 2), tA, tB, A, B) -end - -function __matmul_checks(C, A, B, sz) - require_one_based_indexing(C, A, B) - if C === A || B === C - throw(ArgumentError("output matrix must not be aliased with input matrix")) - end - if !(size(A) == size(B) == size(C) == sz) - throw(DimensionMismatch(lazy"A has size $(size(A)), B has size $(size(B)), C has size $(size(C))")) - end - return nothing -end - -# separate function with the core of matmul2x2! that doesn't depend on a MulAddMul -function _matmul2x2_elements(C::AbstractMatrix, tA, tB, A::AbstractMatrix, B::AbstractMatrix) - __matmul_checks(C, A, B, (2,2)) - __matmul2x2_elements(tA, tB, A, B) -end -function __matmul2x2_elements(tA, A::AbstractMatrix) - @inbounds begin - tA_uc = uppercase(tA) # possibly unwrap a WrapperChar - if tA_uc == 'N' - A11 = A[1,1]; A12 = A[1,2]; A21 = A[2,1]; A22 = A[2,2] - elseif tA_uc == 'T' - # TODO making these lazy could improve perf - A11 = copy(transpose(A[1,1])); A12 = copy(transpose(A[2,1])) - A21 = copy(transpose(A[1,2])); A22 = copy(transpose(A[2,2])) - elseif tA_uc == 'C' - # TODO making these lazy could improve perf - A11 = copy(A[1,1]'); A12 = copy(A[2,1]') - A21 = copy(A[1,2]'); A22 = copy(A[2,2]') - elseif tA_uc == 'S' - if isuppercase(tA) # tA == 'S' - A11 = symmetric(A[1,1], :U); A12 = A[1,2] - A21 = copy(transpose(A[1,2])); A22 = symmetric(A[2,2], :U) - else - A11 = symmetric(A[1,1], :L); A12 = copy(transpose(A[2,1])) - A21 = A[2,1]; A22 = symmetric(A[2,2], :L) - end - elseif tA_uc == 'H' - if isuppercase(tA) # tA == 'H' - A11 = hermitian(A[1,1], :U); A12 = A[1,2] - A21 = copy(adjoint(A[1,2])); A22 = hermitian(A[2,2], :U) - else # if tA == 'h' - A11 = hermitian(A[1,1], :L); A12 = copy(adjoint(A[2,1])) - A21 = A[2,1]; A22 = hermitian(A[2,2], :L) - end - end - end # inbounds - A11, A12, A21, A22 -end -__matmul2x2_elements(tA, tB, A, B) = __matmul2x2_elements(tA, A), __matmul2x2_elements(tB, B) - -function _modify2x2!(Aelements, Belements, C, _add) - (A11, A12, A21, A22), (B11, B12, B21, B22) = Aelements, Belements - @inbounds begin - _modify!(_add, A11*B11 + A12*B21, C, (1,1)) - _modify!(_add, A21*B11 + A22*B21, C, (2,1)) - _modify!(_add, A11*B12 + A12*B22, C, (1,2)) - _modify!(_add, A21*B12 + A22*B22, C, (2,2)) - end # inbounds - C -end -function matmul2x2!(C::AbstractMatrix, tA, tB, A::AbstractMatrix, B::AbstractMatrix, - α = true, β = false) - Aelements, Belements = _matmul2x2_elements(C, tA, tB, A, B) - @stable_muladdmul _modify2x2!(Aelements, Belements, C, MulAddMul(α, β)) - C -end - -# Multiply 3x3 matrices -function matmul3x3(tA, tB, A::AbstractMatrix{T}, B::AbstractMatrix{S}) where {T,S} - matmul3x3!(similar(B, promote_op(matprod, T, S), 3, 3), tA, tB, A, B) -end - -# separate function with the core of matmul3x3! that doesn't depend on a MulAddMul -function _matmul3x3_elements(C::AbstractMatrix, tA, tB, A::AbstractMatrix, B::AbstractMatrix) - __matmul_checks(C, A, B, (3,3)) - __matmul3x3_elements(tA, tB, A, B) -end -function __matmul3x3_elements(tA, A::AbstractMatrix) - @inbounds begin - tA_uc = uppercase(tA) # possibly unwrap a WrapperChar - if tA_uc == 'N' - A11 = A[1,1]; A12 = A[1,2]; A13 = A[1,3] - A21 = A[2,1]; A22 = A[2,2]; A23 = A[2,3] - A31 = A[3,1]; A32 = A[3,2]; A33 = A[3,3] - elseif tA_uc == 'T' - # TODO making these lazy could improve perf - A11 = copy(transpose(A[1,1])); A12 = copy(transpose(A[2,1])); A13 = copy(transpose(A[3,1])) - A21 = copy(transpose(A[1,2])); A22 = copy(transpose(A[2,2])); A23 = copy(transpose(A[3,2])) - A31 = copy(transpose(A[1,3])); A32 = copy(transpose(A[2,3])); A33 = copy(transpose(A[3,3])) - elseif tA_uc == 'C' - # TODO making these lazy could improve perf - A11 = copy(A[1,1]'); A12 = copy(A[2,1]'); A13 = copy(A[3,1]') - A21 = copy(A[1,2]'); A22 = copy(A[2,2]'); A23 = copy(A[3,2]') - A31 = copy(A[1,3]'); A32 = copy(A[2,3]'); A33 = copy(A[3,3]') - elseif tA_uc == 'S' - if isuppercase(tA) # tA == 'S' - A11 = symmetric(A[1,1], :U); A12 = A[1,2]; A13 = A[1,3] - A21 = copy(transpose(A[1,2])); A22 = symmetric(A[2,2], :U); A23 = A[2,3] - A31 = copy(transpose(A[1,3])); A32 = copy(transpose(A[2,3])); A33 = symmetric(A[3,3], :U) - else - A11 = symmetric(A[1,1], :L); A12 = copy(transpose(A[2,1])); A13 = copy(transpose(A[3,1])) - A21 = A[2,1]; A22 = symmetric(A[2,2], :L); A23 = copy(transpose(A[3,2])) - A31 = A[3,1]; A32 = A[3,2]; A33 = symmetric(A[3,3], :L) - end - elseif tA_uc == 'H' - if isuppercase(tA) # tA == 'H' - A11 = hermitian(A[1,1], :U); A12 = A[1,2]; A13 = A[1,3] - A21 = copy(adjoint(A[1,2])); A22 = hermitian(A[2,2], :U); A23 = A[2,3] - A31 = copy(adjoint(A[1,3])); A32 = copy(adjoint(A[2,3])); A33 = hermitian(A[3,3], :U) - else # if tA == 'h' - A11 = hermitian(A[1,1], :L); A12 = copy(adjoint(A[2,1])); A13 = copy(adjoint(A[3,1])) - A21 = A[2,1]; A22 = hermitian(A[2,2], :L); A23 = copy(adjoint(A[3,2])) - A31 = A[3,1]; A32 = A[3,2]; A33 = hermitian(A[3,3], :L) - end - end - end # inbounds - A11, A12, A13, A21, A22, A23, A31, A32, A33 -end -__matmul3x3_elements(tA, tB, A, B) = __matmul3x3_elements(tA, A), __matmul3x3_elements(tB, B) - -function _modify3x3!(Aelements, Belements, C, _add) - (A11, A12, A13, A21, A22, A23, A31, A32, A33), - (B11, B12, B13, B21, B22, B23, B31, B32, B33) = Aelements, Belements - @inbounds begin - _modify!(_add, A11*B11 + A12*B21 + A13*B31, C, (1,1)) - _modify!(_add, A21*B11 + A22*B21 + A23*B31, C, (2,1)) - _modify!(_add, A31*B11 + A32*B21 + A33*B31, C, (3,1)) - - _modify!(_add, A11*B12 + A12*B22 + A13*B32, C, (1,2)) - _modify!(_add, A21*B12 + A22*B22 + A23*B32, C, (2,2)) - _modify!(_add, A31*B12 + A32*B22 + A33*B32, C, (3,2)) - - _modify!(_add, A11*B13 + A12*B23 + A13*B33, C, (1,3)) - _modify!(_add, A21*B13 + A22*B23 + A23*B33, C, (2,3)) - _modify!(_add, A31*B13 + A32*B23 + A33*B33, C, (3,3)) - end # inbounds - C -end -function matmul3x3!(C::AbstractMatrix, tA, tB, A::AbstractMatrix, B::AbstractMatrix, - α = true, β = false) - - Aelements, Belements = _matmul3x3_elements(C, tA, tB, A, B) - @stable_muladdmul _modify3x3!(Aelements, Belements, C, MulAddMul(α, β)) - C -end - -const RealOrComplex = Union{Real,Complex} - -# Three-argument * -""" - *(A, B::AbstractMatrix, C) - A * B * C * D - -Chained multiplication of 3 or 4 matrices is done in the most efficient sequence, -based on the sizes of the arrays. That is, the number of scalar multiplications needed -for `(A * B) * C` (with 3 dense matrices) is compared to that for `A * (B * C)` -to choose which of these to execute. - -If the last factor is a vector, or the first a transposed vector, then it is efficient -to deal with these first. In particular `x' * B * y` means `(x' * B) * y` -for an ordinary column-major `B::Matrix`. Unlike `dot(x, B, y)`, this -allocates an intermediate array. - -If the first or last factor is a number, this will be fused with the matrix -multiplication, using 5-arg [`mul!`](@ref). - -See also [`muladd`](@ref), [`dot`](@ref). - -!!! compat "Julia 1.7" - These optimisations require at least Julia 1.7. -""" -*(A::AbstractMatrix, B::AbstractMatrix, x::AbstractVector) = A * (B*x) - -*(tu::AdjOrTransAbsVec, B::AbstractMatrix, v::AbstractVector) = (tu*B) * v -*(tu::AdjOrTransAbsVec, B::AdjOrTransAbsMat, v::AbstractVector) = tu * (B*v) - -*(A::AbstractMatrix, x::AbstractVector, γ::Number) = mat_vec_scalar(A,x,γ) -*(A::AbstractMatrix, B::AbstractMatrix, γ::Number) = mat_mat_scalar(A,B,γ) -*(α::RealOrComplex, B::AbstractMatrix{<:RealOrComplex}, C::AbstractVector{<:RealOrComplex}) = - mat_vec_scalar(B,C,α) -*(α::RealOrComplex, B::AbstractMatrix{<:RealOrComplex}, C::AbstractMatrix{<:RealOrComplex}) = - mat_mat_scalar(B,C,α) - -*(α::Number, u::AbstractVector, tv::AdjOrTransAbsVec) = broadcast(*, α, u, tv) -*(u::AbstractVector, tv::AdjOrTransAbsVec, γ::Number) = broadcast(*, u, tv, γ) -*(u::AbstractVector, tv::AdjOrTransAbsVec, C::AbstractMatrix) = u * (tv*C) - -*(A::AbstractMatrix, B::AbstractMatrix, C::AbstractMatrix) = _tri_matmul(A,B,C) -*(tv::AdjOrTransAbsVec, B::AbstractMatrix, C::AbstractMatrix) = (tv*B) * C - -function _tri_matmul(A,B,C,δ=nothing) - n,m = size(A) - # m,k == size(B) - k,l = size(C) - costAB_C = n*m*k + n*k*l # multiplications, allocations n*k + n*l - costA_BC = m*k*l + n*m*l # m*l + n*l - if costA_BC < costAB_C - isnothing(δ) ? A * (B*C) : A * mat_mat_scalar(B,C,δ) - else - isnothing(δ) ? (A*B) * C : mat_mat_scalar(A*B, C, δ) - end -end - -# Fast path for two arrays * one scalar is opt-in, via mat_vec_scalar and mat_mat_scalar. - -mat_vec_scalar(A, x, γ) = A * (x * γ) # fallback -mat_vec_scalar(A::StridedMaybeAdjOrTransMat, x::StridedVector, γ) = _mat_vec_scalar(A, x, γ) -mat_vec_scalar(A::AdjOrTransAbsVec, x::StridedVector, γ) = (A * x) * γ - -function _mat_vec_scalar(A, x, γ) - T = promote_type(eltype(A), eltype(x), typeof(γ)) - C = similar(A, T, axes(A,1)) - mul!(C, A, x, γ, false) -end - -mat_mat_scalar(A, B, γ) = (A*B) * γ # fallback -mat_mat_scalar(A::StridedMaybeAdjOrTransMat, B::StridedMaybeAdjOrTransMat, γ) = - _mat_mat_scalar(A, B, γ) - -function _mat_mat_scalar(A, B, γ) - T = promote_type(eltype(A), eltype(B), typeof(γ)) - C = similar(A, T, axes(A,1), axes(B,2)) - mul!(C, A, B, γ, false) -end - -mat_mat_scalar(A::AdjointAbsVec, B, γ) = (γ' * (A * B)')' # preserving order, adjoint reverses -mat_mat_scalar(A::AdjointAbsVec{<:RealOrComplex}, B::StridedMaybeAdjOrTransMat{<:RealOrComplex}, γ::RealOrComplex) = - mat_vec_scalar(B', A', γ')' - -mat_mat_scalar(A::TransposeAbsVec, B, γ) = transpose(γ * transpose(A * B)) -mat_mat_scalar(A::TransposeAbsVec{<:RealOrComplex}, B::StridedMaybeAdjOrTransMat{<:RealOrComplex}, γ::RealOrComplex) = - transpose(mat_vec_scalar(transpose(B), transpose(A), γ)) - - -# Four-argument *, by type -*(α::Number, β::Number, C::AbstractMatrix, x::AbstractVector) = (α*β) * C * x -*(α::Number, β::Number, C::AbstractMatrix, D::AbstractMatrix) = (α*β) * C * D -*(α::Number, B::AbstractMatrix, C::AbstractMatrix, x::AbstractVector) = α * B * (C*x) -*(α::Number, vt::AdjOrTransAbsVec, C::AbstractMatrix, x::AbstractVector) = α * (vt*C*x) -*(α::RealOrComplex, vt::AdjOrTransAbsVec{<:RealOrComplex}, C::AbstractMatrix{<:RealOrComplex}, D::AbstractMatrix{<:RealOrComplex}) = - (α*vt*C) * D # solves an ambiguity - -*(A::AbstractMatrix, x::AbstractVector, γ::Number, δ::Number) = A * x * (γ*δ) -*(A::AbstractMatrix, B::AbstractMatrix, γ::Number, δ::Number) = A * B * (γ*δ) -*(A::AbstractMatrix, B::AbstractMatrix, x::AbstractVector, δ::Number, ) = A * (B*x*δ) -*(vt::AdjOrTransAbsVec, B::AbstractMatrix, x::AbstractVector, δ::Number) = (vt*B*x) * δ -*(vt::AdjOrTransAbsVec, B::AbstractMatrix, C::AbstractMatrix, δ::Number) = (vt*B) * C * δ - -*(A::AbstractMatrix, B::AbstractMatrix, C::AbstractMatrix, x::AbstractVector) = A * B * (C*x) -*(vt::AdjOrTransAbsVec, B::AbstractMatrix, C::AbstractMatrix, D::AbstractMatrix) = (vt*B) * C * D -*(vt::AdjOrTransAbsVec, B::AbstractMatrix, C::AbstractMatrix, x::AbstractVector) = vt * B * (C*x) - -# Four-argument *, by size -*(A::AbstractMatrix, B::AbstractMatrix, C::AbstractMatrix, δ::Number) = _tri_matmul(A,B,C,δ) -*(α::RealOrComplex, B::AbstractMatrix{<:RealOrComplex}, C::AbstractMatrix{<:RealOrComplex}, D::AbstractMatrix{<:RealOrComplex}) = - _tri_matmul(B,C,D,α) -*(A::AbstractMatrix, B::AbstractMatrix, C::AbstractMatrix, D::AbstractMatrix) = - _quad_matmul(A,B,C,D) - -function _quad_matmul(A,B,C,D) - c1 = _mul_cost((A,B),(C,D)) - c2 = _mul_cost(((A,B),C),D) - c3 = _mul_cost(A,(B,(C,D))) - c4 = _mul_cost((A,(B,C)),D) - c5 = _mul_cost(A,((B,C),D)) - cmin = min(c1,c2,c3,c4,c5) - if c1 == cmin - (A*B) * (C*D) - elseif c2 == cmin - ((A*B) * C) * D - elseif c3 == cmin - A * (B * (C*D)) - elseif c4 == cmin - (A * (B*C)) * D - else - A * ((B*C) * D) - end -end -@inline _mul_cost(A::AbstractMatrix) = 0 -@inline _mul_cost((A,B)::Tuple) = _mul_cost(A,B) -@inline _mul_cost(A,B) = _mul_cost(A) + _mul_cost(B) + *(_mul_sizes(A)..., last(_mul_sizes(B))) -@inline _mul_sizes(A::AbstractMatrix) = size(A) -@inline _mul_sizes((A,B)::Tuple) = first(_mul_sizes(A)), last(_mul_sizes(B)) diff --git a/stdlib/LinearAlgebra/src/qr.jl b/stdlib/LinearAlgebra/src/qr.jl deleted file mode 100644 index 9a89e58372d08..0000000000000 --- a/stdlib/LinearAlgebra/src/qr.jl +++ /dev/null @@ -1,769 +0,0 @@ -# This file is a part of Julia. License is MIT: https://julialang.org/license - -# QR Factorization -""" - QR <: Factorization - -A QR matrix factorization stored in a packed format, typically obtained from -[`qr`](@ref). If ``A`` is an `m`×`n` matrix, then - -```math -A = Q R -``` - -where ``Q`` is an orthogonal/unitary matrix and ``R`` is upper triangular. -The matrix ``Q`` is stored as a sequence of Householder reflectors ``v_i`` -and coefficients ``\\tau_i`` where: - -```math -Q = \\prod_{i=1}^{\\min(m,n)} (I - \\tau_i v_i v_i^T). -``` - -Iterating the decomposition produces the components `Q` and `R`. - -The object has two fields: - -* `factors` is an `m`×`n` matrix. - - - The upper triangular part contains the elements of ``R``, that is `R = - triu(F.factors)` for a `QR` object `F`. - - - The subdiagonal part contains the reflectors ``v_i`` stored in a packed format where - ``v_i`` is the ``i``th column of the matrix `V = I + tril(F.factors, -1)`. - -* `τ` is a vector of length `min(m,n)` containing the coefficients ``\tau_i``. -""" -struct QR{T,S<:AbstractMatrix{T},C<:AbstractVector{T}} <: Factorization{T} - factors::S - τ::C - - function QR{T,S,C}(factors, τ) where {T,S<:AbstractMatrix{T},C<:AbstractVector{T}} - require_one_based_indexing(factors) - new{T,S,C}(factors, τ) - end -end -QR(factors::AbstractMatrix{T}, τ::AbstractVector{T}) where {T} = - QR{T,typeof(factors),typeof(τ)}(factors, τ) -QR{T}(factors::AbstractMatrix, τ::AbstractVector) where {T} = - QR(convert(AbstractMatrix{T}, factors), convert(AbstractVector{T}, τ)) -# backwards-compatible constructors (remove with Julia 2.0) -@deprecate(QR{T,S}(factors::AbstractMatrix{T}, τ::AbstractVector{T}) where {T,S}, - QR{T,S,typeof(τ)}(factors, τ), false) - -# iteration for destructuring into components -Base.iterate(S::QR) = (S.Q, Val(:R)) -Base.iterate(S::QR, ::Val{:R}) = (S.R, Val(:done)) -Base.iterate(S::QR, ::Val{:done}) = nothing - -# Note. For QRCompactWY factorization without pivoting, the WY representation based method introduced in LAPACK 3.4 -""" - QRCompactWY <: Factorization - -A QR matrix factorization stored in a compact blocked format, typically obtained from -[`qr`](@ref). If ``A`` is an `m`×`n` matrix, then - -```math -A = Q R -``` - -where ``Q`` is an orthogonal/unitary matrix and ``R`` is upper triangular. It is similar -to the [`QR`](@ref) format except that the orthogonal/unitary matrix ``Q`` is stored in -*Compact WY* format [^Schreiber1989]. For the block size ``n_b``, it is stored as -a `m`×`n` lower trapezoidal matrix ``V`` and a matrix ``T = (T_1 \\; T_2 \\; ... \\; -T_{b-1} \\; T_b')`` composed of ``b = \\lceil \\min(m,n) / n_b \\rceil`` upper triangular -matrices ``T_j`` of size ``n_b``×``n_b`` (``j = 1, ..., b-1``) and an upper trapezoidal -``n_b``×``\\min(m,n) - (b-1) n_b`` matrix ``T_b'`` (``j=b``) whose upper square part -denoted with ``T_b`` satisfying - -```math -Q = \\prod_{i=1}^{\\min(m,n)} (I - \\tau_i v_i v_i^T) -= \\prod_{j=1}^{b} (I - V_j T_j V_j^T) -``` - -such that ``v_i`` is the ``i``th column of ``V``, ``\\tau_i`` is the ``i``th element -of `[diag(T_1); diag(T_2); …; diag(T_b)]`, and ``(V_1 \\; V_2 \\; ... \\; V_b)`` -is the left `m`×`min(m, n)` block of ``V``. When constructed using [`qr`](@ref), -the block size is given by ``n_b = \\min(m, n, 36)``. - -Iterating the decomposition produces the components `Q` and `R`. - -The object has two fields: - -* `factors`, as in the [`QR`](@ref) type, is an `m`×`n` matrix. - - - The upper triangular part contains the elements of ``R``, that is `R = - triu(F.factors)` for a `QR` object `F`. - - - The subdiagonal part contains the reflectors ``v_i`` stored in a packed format such - that `V = I + tril(F.factors, -1)`. - -* `T` is a ``n_b``-by-``\\min(m,n)`` matrix as described above. The subdiagonal elements - for each triangular matrix ``T_j`` are ignored. - -!!! note - - This format should not to be confused with the older *WY* representation - [^Bischof1987]. - - -[^Bischof1987]: C Bischof and C Van Loan, "The WY representation for products of Householder matrices", SIAM J Sci Stat Comput 8 (1987), s2-s13. [doi:10.1137/0908009](https://doi.org/10.1137/0908009) - -[^Schreiber1989]: R Schreiber and C Van Loan, "A storage-efficient WY representation for products of Householder transformations", SIAM J Sci Stat Comput 10 (1989), 53-57. [doi:10.1137/0910005](https://doi.org/10.1137/0910005) -""" -struct QRCompactWY{S,M<:AbstractMatrix{S},C<:AbstractMatrix{S}} <: Factorization{S} - factors::M - T::C - - function QRCompactWY{S,M,C}(factors, T) where {S,M<:AbstractMatrix{S},C<:AbstractMatrix{S}} - require_one_based_indexing(factors) - new{S,M,C}(factors, T) - end -end -QRCompactWY(factors::AbstractMatrix{S}, T::AbstractMatrix{S}) where {S} = - QRCompactWY{S,typeof(factors),typeof(T)}(factors, T) -QRCompactWY{S}(factors::AbstractMatrix, T::AbstractMatrix) where {S} = - QRCompactWY(convert(AbstractMatrix{S}, factors), convert(AbstractMatrix{S}, T)) -# backwards-compatible constructors (remove with Julia 2.0) -@deprecate(QRCompactWY{S,M}(factors::AbstractMatrix{S}, T::AbstractMatrix{S}) where {S,M}, - QRCompactWY{S,M,typeof(T)}(factors, T), false) - -# iteration for destructuring into components -Base.iterate(S::QRCompactWY) = (S.Q, Val(:R)) -Base.iterate(S::QRCompactWY, ::Val{:R}) = (S.R, Val(:done)) -Base.iterate(S::QRCompactWY, ::Val{:done}) = nothing - -# returns upper triangular views of all non-undef values of `qr(A).T`: -# -# julia> sparse(qr(A).T .== qr(A).T) -# 36×100 SparseMatrixCSC{Bool, Int64} with 1767 stored entries: -# ⠙⢿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⠙⢿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⠙⢿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿ -# ⠀⠀⠙⢿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⠀⠀⠙⢿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⠀⠀⠙⢿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿ -# ⠀⠀⠀⠀⠙⢿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⠀⠀⠀⠀⠙⢿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⠀⠀⠀⠀⠙⢿⣿⣿⣿⣿⣿⣿⣿⣿ -# ⠀⠀⠀⠀⠀⠂⠛⢿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⠀⠀⠀⠀⠀⠀⠙⢿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⠀⠀⠀⠀⠀⠀⠙⢿⣿⣿⣿⣿⣿⣿ -# ⠀⠀⠀⠀⠀⠀⠀⠀⠙⢿⣿⣿⣿⣿⣿⣿⣿⣿⠀⠀⠀⠀⠀⠀⠀⠀⠙⢿⣿⣿⣿⣿⣿⣿⣿⣿⠀⠀⠀⠀⠀⠀⢀⠐⠙⢿⣿⣿⣿⣿ -# ⠀⠀⠐⠀⠀⠀⠀⠀⠀⢀⢙⣿⣿⣿⣿⣿⣿⣿⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠙⢿⣿⣿⣿⣿⣿⣿⠀⠀⠀⠀⠀⠀⠁⠀⡀⠀⠙⢿⣿⣿ -# ⠀⠀⠐⠀⠀⠀⠀⠀⠀⠀⠄⠀⠙⢿⣿⣿⣿⣿⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠙⢿⣿⣿⣿⣿⠀⠀⠀⠀⠀⠀⡀⠀⠀⢀⠀⠀⠙⢿ -# ⠀⡀⠀⠀⠀⠀⠀⠀⠂⠒⠒⠀⠀⠀⠙⢿⣿⣿⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠙⢿⣿⣿⠀⠀⠀⠀⠀⠀⠀⢀⠀⠀⠀⡀⠀⠀ -# ⠀⠀⠀⠀⠀⠀⠀⠀⣈⡀⠀⠀⠀⠀⠀⠀⠙⢿⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠙⢿⠀⠀⠀⠀⠀⠀⠀⠀⠀⡀⠂⠀⢀⠀ -# -function _triuppers_qr(T) - blocksize, cols = size(T) - return Iterators.map(0:div(cols - 1, blocksize)) do i - n = min(blocksize, cols - i * blocksize) - return UpperTriangular(view(T, 1:n, (1:n) .+ i * blocksize)) - end -end - -function Base.hash(F::QRCompactWY, h::UInt) - return hash(F.factors, foldr(hash, _triuppers_qr(F.T); init=hash(QRCompactWY, h))) -end -function Base.:(==)(A::QRCompactWY, B::QRCompactWY) - return A.factors == B.factors && all(splat(==), zip(_triuppers_qr.((A.T, B.T))...)) -end -function Base.isequal(A::QRCompactWY, B::QRCompactWY) - return isequal(A.factors, B.factors) && all(zip(_triuppers_qr.((A.T, B.T))...)) do (a, b) - isequal(a, b)::Bool - end -end - -""" - QRPivoted <: Factorization - -A QR matrix factorization with column pivoting in a packed format, typically obtained from -[`qr`](@ref). If ``A`` is an `m`×`n` matrix, then - -```math -A P = Q R -``` - -where ``P`` is a permutation matrix, ``Q`` is an orthogonal/unitary matrix and ``R`` is -upper triangular. The matrix ``Q`` is stored as a sequence of Householder reflectors: - -```math -Q = \\prod_{i=1}^{\\min(m,n)} (I - \\tau_i v_i v_i^T). -``` - -Iterating the decomposition produces the components `Q`, `R`, and `p`. - -The object has three fields: - -* `factors` is an `m`×`n` matrix. - - - The upper triangular part contains the elements of ``R``, that is `R = - triu(F.factors)` for a `QR` object `F`. - - - The subdiagonal part contains the reflectors ``v_i`` stored in a packed format where - ``v_i`` is the ``i``th column of the matrix `V = I + tril(F.factors, -1)`. - -* `τ` is a vector of length `min(m,n)` containing the coefficients ``\tau_i``. - -* `jpvt` is an integer vector of length `n` corresponding to the permutation ``P``. -""" -struct QRPivoted{T,S<:AbstractMatrix{T},C<:AbstractVector{T},P<:AbstractVector{<:Integer}} <: Factorization{T} - factors::S - τ::C - jpvt::P - - function QRPivoted{T,S,C,P}(factors, τ, jpvt) where {T,S<:AbstractMatrix{T},C<:AbstractVector{T},P<:AbstractVector{<:Integer}} - require_one_based_indexing(factors, τ, jpvt) - new{T,S,C,P}(factors, τ, jpvt) - end -end -QRPivoted(factors::AbstractMatrix{T}, τ::AbstractVector{T}, - jpvt::AbstractVector{<:Integer}) where {T} = - QRPivoted{T,typeof(factors),typeof(τ),typeof(jpvt)}(factors, τ, jpvt) -QRPivoted{T}(factors::AbstractMatrix, τ::AbstractVector, - jpvt::AbstractVector{<:Integer}) where {T} = - QRPivoted(convert(AbstractMatrix{T}, factors), convert(AbstractVector{T}, τ), jpvt) -# backwards-compatible constructors (remove with Julia 2.0) -@deprecate(QRPivoted{T,S}(factors::AbstractMatrix{T}, τ::AbstractVector{T}, - jpvt::AbstractVector{<:Integer}) where {T,S}, - QRPivoted{T,S,typeof(τ),typeof(jpvt)}(factors, τ, jpvt), false) - -# iteration for destructuring into components -Base.iterate(S::QRPivoted) = (S.Q, Val(:R)) -Base.iterate(S::QRPivoted, ::Val{:R}) = (S.R, Val(:p)) -Base.iterate(S::QRPivoted, ::Val{:p}) = (S.p, Val(:done)) -Base.iterate(S::QRPivoted, ::Val{:done}) = nothing - -function qrfactUnblocked!(A::AbstractMatrix{T}) where {T} - require_one_based_indexing(A) - m, n = size(A) - τ = zeros(T, min(m,n)) - for k = 1:min(m - 1 + !(T<:Real), n) - x = view(A, k:m, k) - τk = reflector!(x) - τ[k] = τk - reflectorApply!(x, τk, view(A, k:m, k + 1:n)) - end - QR(A, τ) -end - -# Find index for columns with largest two norm -function indmaxcolumn(A::AbstractMatrix) - mm = norm(view(A, :, 1)) - ii = 1 - for i = 2:size(A, 2) - mi = norm(view(A, :, i)) - if abs(mi) > mm - mm = mi - ii = i - end - end - return ii -end - -function qrfactPivotedUnblocked!(A::AbstractMatrix) - m, n = size(A) - piv = Vector(UnitRange{BlasInt}(1,n)) - τ = Vector{eltype(A)}(undef, min(m,n)) - for j = 1:min(m,n) - - # Find column with maximum norm in trailing submatrix - jm = indmaxcolumn(view(A, j:m, j:n)) + j - 1 - - if jm != j - # Flip elements in pivoting vector - tmpp = piv[jm] - piv[jm] = piv[j] - piv[j] = tmpp - - # Update matrix with - for i = 1:m - tmp = A[i,jm] - A[i,jm] = A[i,j] - A[i,j] = tmp - end - end - - # Compute reflector of columns j - x = view(A, j:m, j) - τj = reflector!(x) - τ[j] = τj - - # Update trailing submatrix with reflector - reflectorApply!(x, τj, view(A, j:m, j+1:n)) - end - return QRPivoted{eltype(A), typeof(A), typeof(τ), typeof(piv)}(A, τ, piv) -end - -# LAPACK version -qr!(A::StridedMatrix{<:BlasFloat}, ::NoPivot; blocksize=36) = - QRCompactWY(LAPACK.geqrt!(A, min(min(size(A)...), blocksize))...) -qr!(A::StridedMatrix{<:BlasFloat}, ::ColumnNorm) = QRPivoted(LAPACK.geqp3!(A)...) - -# Generic fallbacks - -""" - qr!(A, pivot = NoPivot(); blocksize) - -`qr!` is the same as [`qr`](@ref) when `A` is a subtype of [`AbstractMatrix`](@ref), -but saves space by overwriting the input `A`, instead of creating a copy. -An [`InexactError`](@ref) exception is thrown if the factorization produces a number not -representable by the element type of `A`, e.g. for integer types. - -!!! compat "Julia 1.4" - The `blocksize` keyword argument requires Julia 1.4 or later. - -# Examples -```jldoctest -julia> a = [1. 2.; 3. 4.] -2×2 Matrix{Float64}: - 1.0 2.0 - 3.0 4.0 - -julia> qr!(a) -LinearAlgebra.QRCompactWY{Float64, Matrix{Float64}, Matrix{Float64}} -Q factor: 2×2 LinearAlgebra.QRCompactWYQ{Float64, Matrix{Float64}, Matrix{Float64}} -R factor: -2×2 Matrix{Float64}: - -3.16228 -4.42719 - 0.0 -0.632456 - -julia> a = [1 2; 3 4] -2×2 Matrix{Int64}: - 1 2 - 3 4 - -julia> qr!(a) -ERROR: InexactError: Int64(3.1622776601683795) -Stacktrace: -[...] -``` -""" -qr!(A::AbstractMatrix, ::NoPivot) = qrfactUnblocked!(A) -qr!(A::AbstractMatrix, ::ColumnNorm) = qrfactPivotedUnblocked!(A) -qr!(A::AbstractMatrix) = qr!(A, NoPivot()) -# TODO: Remove in Julia v2.0 -@deprecate qr!(A::AbstractMatrix, ::Val{true}) qr!(A, ColumnNorm()) -@deprecate qr!(A::AbstractMatrix, ::Val{false}) qr!(A, NoPivot()) - -_qreltype(::Type{T}) where T = typeof(zero(T)/sqrt(abs2(one(T)))) - -""" - qr(A, pivot = NoPivot(); blocksize) -> F - -Compute the QR factorization of the matrix `A`: an orthogonal (or unitary if `A` is -complex-valued) matrix `Q`, and an upper triangular matrix `R` such that - -```math -A = Q R -``` - -The returned object `F` stores the factorization in a packed format: - - - if `pivot == ColumnNorm()` then `F` is a [`QRPivoted`](@ref) object, - - - otherwise if the element type of `A` is a BLAS type ([`Float32`](@ref), [`Float64`](@ref), - `ComplexF32` or `ComplexF64`), then `F` is a [`QRCompactWY`](@ref) object, - - - otherwise `F` is a [`QR`](@ref) object. - -The individual components of the decomposition `F` can be retrieved via property accessors: - - - `F.Q`: the orthogonal/unitary matrix `Q` - - `F.R`: the upper triangular matrix `R` - - `F.p`: the permutation vector of the pivot ([`QRPivoted`](@ref) only) - - `F.P`: the permutation matrix of the pivot ([`QRPivoted`](@ref) only) - -!!! note - Each reference to the upper triangular factor via `F.R` allocates a new array. - It is therefore advisable to cache that array, say, by `R = F.R` and continue working - with `R`. - -Iterating the decomposition produces the components `Q`, `R`, and if extant `p`. - -The following functions are available for the `QR` objects: [`inv`](@ref), [`size`](@ref), -and [`\\`](@ref). When `A` is rectangular, `\\` will return a least squares -solution and if the solution is not unique, the one with smallest norm is returned. When -`A` is not full rank, factorization with (column) pivoting is required to obtain a minimum -norm solution. - -Multiplication with respect to either full/square or non-full/square `Q` is allowed, i.e. both `F.Q*F.R` -and `F.Q*A` are supported. A `Q` matrix can be converted into a regular matrix with -[`Matrix`](@ref). This operation returns the "thin" Q factor, i.e., if `A` is `m`×`n` with `m>=n`, then -`Matrix(F.Q)` yields an `m`×`n` matrix with orthonormal columns. To retrieve the "full" Q factor, an -`m`×`m` orthogonal matrix, use `F.Q*I` or `collect(F.Q)`. If `m<=n`, then `Matrix(F.Q)` yields an `m`×`m` -orthogonal matrix. - -The block size for QR decomposition can be specified by keyword argument -`blocksize :: Integer` when `pivot == NoPivot()` and `A isa StridedMatrix{<:BlasFloat}`. -It is ignored when `blocksize > minimum(size(A))`. See [`QRCompactWY`](@ref). - -!!! compat "Julia 1.4" - The `blocksize` keyword argument requires Julia 1.4 or later. - -# Examples -```jldoctest -julia> A = [3.0 -6.0; 4.0 -8.0; 0.0 1.0] -3×2 Matrix{Float64}: - 3.0 -6.0 - 4.0 -8.0 - 0.0 1.0 - -julia> F = qr(A) -LinearAlgebra.QRCompactWY{Float64, Matrix{Float64}, Matrix{Float64}} -Q factor: 3×3 LinearAlgebra.QRCompactWYQ{Float64, Matrix{Float64}, Matrix{Float64}} -R factor: -2×2 Matrix{Float64}: - -5.0 10.0 - 0.0 -1.0 - -julia> F.Q * F.R == A -true -``` - -!!! note - `qr` returns multiple types because LAPACK uses several representations - that minimize the memory storage requirements of products of Householder - elementary reflectors, so that the `Q` and `R` matrices can be stored - compactly rather than two separate dense matrices. -""" -function qr(A::AbstractMatrix{T}, arg...; kwargs...) where T - require_one_based_indexing(A) - AA = copy_similar(A, _qreltype(T)) - return _qr(AA, arg...; kwargs...) -end -# TODO: remove in Julia v2.0 -@deprecate qr(A::AbstractMatrix, ::Val{false}; kwargs...) qr(A, NoPivot(); kwargs...) -@deprecate qr(A::AbstractMatrix, ::Val{true}; kwargs...) qr(A, ColumnNorm(); kwargs...) - -# allow packages like SparseArrays.jl to hook into here and redirect to out-of-place `qr` -_qr(A::AbstractMatrix, args...; kwargs...) = qr!(A, args...; kwargs...) - -qr(x::Number) = qr(fill(x,1,1)) -function qr(v::AbstractVector) - require_one_based_indexing(v) - qr(reshape(v, (length(v), 1))) -end - -# Conversions -QR{T}(A::QR) where {T} = QR(convert(AbstractMatrix{T}, A.factors), convert(Vector{T}, A.τ)) -Factorization{T}(A::QR{T}) where {T} = A -Factorization{T}(A::QR) where {T} = QR{T}(A) -QRCompactWY{T}(A::QRCompactWY) where {T} = QRCompactWY(convert(AbstractMatrix{T}, A.factors), convert(AbstractMatrix{T}, A.T)) -Factorization{T}(A::QRCompactWY{T}) where {T} = A -Factorization{T}(A::QRCompactWY) where {T} = QRCompactWY{T}(A) -AbstractMatrix(F::Union{QR,QRCompactWY}) = F.Q * F.R -AbstractArray(F::Union{QR,QRCompactWY}) = AbstractMatrix(F) -Matrix(F::Union{QR,QRCompactWY}) = Array(AbstractArray(F)) -Array(F::Union{QR,QRCompactWY}) = Matrix(F) -QRPivoted{T}(A::QRPivoted) where {T} = QRPivoted(convert(AbstractMatrix{T}, A.factors), convert(Vector{T}, A.τ), A.jpvt) -Factorization{T}(A::QRPivoted{T}) where {T} = A -Factorization{T}(A::QRPivoted) where {T} = QRPivoted{T}(A) -AbstractMatrix(F::QRPivoted) = (F.Q * F.R)[:,invperm(F.p)] -AbstractArray(F::QRPivoted) = AbstractMatrix(F) -Matrix(F::QRPivoted) = Array(AbstractArray(F)) -Array(F::QRPivoted) = Matrix(F) - -function show(io::IO, mime::MIME{Symbol("text/plain")}, F::Union{QR, QRCompactWY, QRPivoted}) - summary(io, F); println(io) - print(io, "Q factor: ") - show(io, mime, F.Q) - println(io, "\nR factor:") - show(io, mime, F.R) - if F isa QRPivoted - println(io, "\npermutation:") - show(io, mime, F.p) - end -end - -function getproperty(F::QR, d::Symbol) - m, n = size(F) - if d === :R - return triu!(getfield(F, :factors)[1:min(m,n), 1:n]) - elseif d === :Q - return QRPackedQ(getfield(F, :factors), F.τ) - else - getfield(F, d) - end -end -function getproperty(F::QRCompactWY, d::Symbol) - m, n = size(F) - if d === :R - return triu!(getfield(F, :factors)[1:min(m,n), 1:n]) - elseif d === :Q - return QRCompactWYQ(getfield(F, :factors), F.T) - else - getfield(F, d) - end -end -Base.propertynames(F::Union{QR,QRCompactWY}, private::Bool=false) = - (:R, :Q, (private ? fieldnames(typeof(F)) : ())...) - -function getproperty(F::QRPivoted{T}, d::Symbol) where T - m, n = size(F) - if d === :R - return triu!(getfield(F, :factors)[1:min(m,n), 1:n]) - elseif d === :Q - return QRPackedQ(getfield(F, :factors), F.τ) - elseif d === :p - return getfield(F, :jpvt) - elseif d === :P - p = F.p - n = length(p) - P = zeros(T, n, n) - for i in 1:n - P[p[i],i] = one(T) - end - return P - else - getfield(F, d) - end -end -Base.propertynames(F::QRPivoted, private::Bool=false) = - (:R, :Q, :p, :P, (private ? fieldnames(typeof(F)) : ())...) - -transpose(F::Union{QR{<:Real},QRPivoted{<:Real},QRCompactWY{<:Real}}) = F' -transpose(::Union{QR,QRPivoted,QRCompactWY}) = - throw(ArgumentError("transpose of QR decomposition is not supported, consider using adjoint")) - -size(F::Union{QR,QRCompactWY,QRPivoted}) = size(getfield(F, :factors)) -size(F::Union{QR,QRCompactWY,QRPivoted}, dim::Integer) = size(getfield(F, :factors), dim) - - -function ldiv!(A::QRCompactWY{T}, b::AbstractVector{T}) where {T} - require_one_based_indexing(b) - m, n = size(A) - ldiv!(UpperTriangular(view(A.factors, 1:min(m,n), 1:n)), view(lmul!(adjoint(A.Q), b), 1:size(A, 2))) - return b -end -function ldiv!(A::QRCompactWY{T}, B::AbstractMatrix{T}) where {T} - require_one_based_indexing(B) - m, n = size(A) - ldiv!(UpperTriangular(view(A.factors, 1:min(m,n), 1:n)), view(lmul!(adjoint(A.Q), B), 1:size(A, 2), 1:size(B, 2))) - return B -end - -function rank(A::QRPivoted; atol::Real=0, rtol::Real=min(size(A)...) * eps(real(float(one(eltype(A.Q))))) * iszero(atol)) - m = min(size(A)...) - m == 0 && return 0 - tol = max(atol, rtol*abs(A.R[1,1])) - return something(findfirst(i -> abs(A.R[i,i]) <= tol, 1:m), m+1) - 1 -end - -# Julia implementation similar to xgelsy -function ldiv!(A::QRPivoted{T,<:StridedMatrix}, B::AbstractMatrix{T}, rcond::Real) where {T<:BlasFloat} - require_one_based_indexing(B) - m, n = size(A) - - if m > size(B, 1) || n > size(B, 1) - throw(DimensionMismatch(lazy"B has leading dimension $(size(B, 1)) but needs at least $(max(m, n))")) - end - - if length(A.factors) == 0 || length(B) == 0 - return B, 0 - end - - @inbounds begin - smin = smax = abs(A.factors[1]) - - if smax == 0 - return fill!(B, 0), 0 - end - - mn = min(m, n) - - # allocate temporary work space - tmp = Vector{T}(undef, 2mn) - wmin = view(tmp, 1:mn) - wmax = view(tmp, mn+1:2mn) - - rnk = 1 - wmin[1] = 1 - wmax[1] = 1 - - while rnk < mn - i = rnk + 1 - - smin, s1, c1 = LAPACK.laic1!(2, view(wmin, 1:rnk), smin, view(A.factors, 1:rnk, i), A.factors[i,i]) - smax, s2, c2 = LAPACK.laic1!(1, view(wmax, 1:rnk), smax, view(A.factors, 1:rnk, i), A.factors[i,i]) - - if smax*rcond > smin - break - end - - for j in 1:rnk - wmin[j] *= s1 - wmax[j] *= s2 - end - wmin[i] = c1 - wmax[i] = c2 - - rnk += 1 - end - - if rnk < n - C, τ = LAPACK.tzrzf!(A.factors[1:rnk, :]) - work = vec(C) - else - C, τ = A.factors, A.τ - work = resize!(tmp, n) - end - - lmul!(adjoint(A.Q), view(B, 1:m, :)) - ldiv!(UpperTriangular(view(C, 1:rnk, 1:rnk)), view(B, 1:rnk, :)) - - if rnk < n - B[rnk+1:n,:] .= zero(T) - LAPACK.ormrz!('L', T <: Complex ? 'C' : 'T', C, τ, view(B, 1:n, :)) - end - - for j in axes(B, 2) - for i in 1:n - work[A.p[i]] = B[i,j] - end - for i in 1:n - B[i,j] = work[i] - end - end - end - - return B, rnk -end - -ldiv!(A::QRPivoted{T,<:StridedMatrix}, B::AbstractVector{T}) where {T<:BlasFloat} = - vec(ldiv!(A, reshape(B, length(B), 1))) -ldiv!(A::QRPivoted{T,<:StridedMatrix}, B::AbstractMatrix{T}) where {T<:BlasFloat} = - ldiv!(A, B, min(size(A)...)*eps(real(T)))[1] - -function _wide_qr_ldiv!(A::QR{T}, B::AbstractMatrix{T}) where T - m, n = size(A) - minmn = min(m,n) - mB, nB = size(B) - lmul!(adjoint(A.Q), view(B, 1:m, :)) - R = A.R # makes a copy, used as a buffer below - @inbounds begin - if n > m # minimum norm solution - τ = zeros(T,m) - for k = m:-1:1 # Trapezoid to triangular by elementary operation - x = view(R, k, [k; m + 1:n]) - τk = reflector!(x) - τ[k] = conj(τk) - for i = 1:k - 1 - vRi = R[i,k] - for j = m + 1:n - vRi += R[i,j]*x[j - m + 1]' - end - vRi *= τk - R[i,k] -= vRi - for j = m + 1:n - R[i,j] -= vRi*x[j - m + 1] - end - end - end - end - ldiv!(UpperTriangular(view(R, :, 1:minmn)), view(B, 1:minmn, :)) - if n > m # Apply elementary transformation to solution - B[m + 1:mB,1:nB] .= zero(T) - for j = 1:nB - for k = 1:m - vBj = B[k,j]' - for i = m + 1:n - vBj += B[i,j]'*R[k,i]' - end - vBj *= τ[k] - B[k,j] -= vBj' - for i = m + 1:n - B[i,j] -= R[k,i]'*vBj' - end - end - end - end - end - return B -end - - -function ldiv!(A::QR{T}, B::AbstractMatrix{T}) where T - m, n = size(A) - m < n && return _wide_qr_ldiv!(A, B) - - lmul!(adjoint(A.Q), view(B, 1:m, :)) - R = A.factors - ldiv!(UpperTriangular(view(R,1:n,:)), view(B, 1:n, :)) - return B -end -function ldiv!(A::QR, B::AbstractVector) - ldiv!(A, reshape(B, length(B), 1)) - return B -end - -function ldiv!(A::QRPivoted, b::AbstractVector) - ldiv!(QR(A.factors,A.τ), b) - b[1:size(A.factors, 2)] = view(b, 1:size(A.factors, 2))[invperm(A.jpvt)] - b -end -function ldiv!(A::QRPivoted, B::AbstractMatrix) - ldiv!(QR(A.factors, A.τ), B) - B[1:size(A.factors, 2),:] = view(B, 1:size(A.factors, 2), :)[invperm(A.jpvt),:] - B -end - -function _apply_permutation!(F::QRPivoted, B::AbstractVecOrMat) - # Apply permutation but only to the top part of the solution vector since - # it's padded with zeros for underdetermined problems - B[1:length(F.p), :] = B[F.p, :] - return B -end -_apply_permutation!(::Factorization, B::AbstractVecOrMat) = B - -function ldiv!(Fadj::AdjointFactorization{<:Any,<:Union{QR,QRCompactWY,QRPivoted}}, B::AbstractVecOrMat) - require_one_based_indexing(B) - m, n = size(Fadj) - - # We don't allow solutions overdetermined systems - if m > n - throw(DimensionMismatch("overdetermined systems are not supported")) - end - if n != size(B, 1) - throw(DimensionMismatch("inputs should have the same number of rows")) - end - F = parent(Fadj) - - B = _apply_permutation!(F, B) - - # For underdetermined system, the triangular solve should only be applied to the top - # part of B that contains the rhs. For square problems, the view corresponds to B itself - ldiv!(LowerTriangular(adjoint(F.R)), view(B, 1:size(F.R, 2), :)) - lmul!(F.Q, B) - - return B -end - -# With a real lhs and complex rhs with the same precision, we can reinterpret the complex -# rhs as a real rhs with twice the number of columns. - -# convenience methods to compute the return size correctly for vectors and matrices -_ret_size(A::Factorization, b::AbstractVector) = (max(size(A, 2), length(b)),) -_ret_size(A::Factorization, B::AbstractMatrix) = (max(size(A, 2), size(B, 1)), size(B, 2)) - -function (\)(A::Union{QR{T},QRCompactWY{T},QRPivoted{T}}, BIn::VecOrMat{Complex{T}}) where T<:BlasReal - require_one_based_indexing(BIn) - m, n = size(A) - m == size(BIn, 1) || throw(DimensionMismatch(lazy"left hand side has $m rows, but right hand side has $(size(BIn,1)) rows")) - -# |z1|z3| reinterpret |x1|x2|x3|x4| transpose |x1|y1| reshape |x1|y1|x3|y3| -# |z2|z4| -> |y1|y2|y3|y4| -> |x2|y2| -> |x2|y2|x4|y4| -# |x3|y3| -# |x4|y4| - B = reshape(copy(transpose(reinterpret(T, reshape(BIn, (1, length(BIn)))))), size(BIn, 1), 2*size(BIn, 2)) - - X = _zeros(T, B, n) - X[1:size(B, 1), :] = B - - ldiv!(A, X) - -# |z1|z3| reinterpret |x1|x2|x3|x4| transpose |x1|y1| reshape |x1|y1|x3|y3| -# |z2|z4| <- |y1|y2|y3|y4| <- |x2|y2| <- |x2|y2|x4|y4| -# |x3|y3| -# |x4|y4| - XX = reshape(collect(reinterpret(Complex{T}, copy(transpose(reshape(X, div(length(X), 2), 2))))), _ret_size(A, BIn)) - return _cut_B(XX, 1:n) -end - -##TODO: Add methods for rank(A::QRP{T}) and adjust the (\) method accordingly -## Add rcond methods for Cholesky, LU, QR and QRP types -## Lower priority: Add LQ, QL and RQ factorizations - -# FIXME! Should add balancing option through xgebal diff --git a/stdlib/LinearAlgebra/src/schur.jl b/stdlib/LinearAlgebra/src/schur.jl deleted file mode 100644 index 7257544ff872e..0000000000000 --- a/stdlib/LinearAlgebra/src/schur.jl +++ /dev/null @@ -1,449 +0,0 @@ -# This file is a part of Julia. License is MIT: https://julialang.org/license - -# Schur decomposition -""" - Schur <: Factorization - -Matrix factorization type of the Schur factorization of a matrix `A`. This is the -return type of [`schur(_)`](@ref), the corresponding matrix factorization function. - -If `F::Schur` is the factorization object, the (quasi) triangular Schur factor can -be obtained via either `F.Schur` or `F.T` and the orthogonal/unitary Schur vectors -via `F.vectors` or `F.Z` such that `A = F.vectors * F.Schur * F.vectors'`. The -eigenvalues of `A` can be obtained with `F.values`. - -Iterating the decomposition produces the components `F.T`, `F.Z`, and `F.values`. - -# Examples -```jldoctest -julia> A = [5. 7.; -2. -4.] -2×2 Matrix{Float64}: - 5.0 7.0 - -2.0 -4.0 - -julia> F = schur(A) -Schur{Float64, Matrix{Float64}, Vector{Float64}} -T factor: -2×2 Matrix{Float64}: - 3.0 9.0 - 0.0 -2.0 -Z factor: -2×2 Matrix{Float64}: - 0.961524 0.274721 - -0.274721 0.961524 -eigenvalues: -2-element Vector{Float64}: - 3.0 - -2.0 - -julia> F.vectors * F.Schur * F.vectors' -2×2 Matrix{Float64}: - 5.0 7.0 - -2.0 -4.0 - -julia> t, z, vals = F; # destructuring via iteration - -julia> t == F.T && z == F.Z && vals == F.values -true -``` -""" -struct Schur{Ty,S<:AbstractMatrix,C<:AbstractVector} <: Factorization{Ty} - T::S - Z::S - values::C - Schur{Ty,S,C}(T::AbstractMatrix{Ty}, Z::AbstractMatrix{Ty}, - values::AbstractVector) where {Ty,S,C} = new(T, Z, values) -end -Schur(T::AbstractMatrix{Ty}, Z::AbstractMatrix{Ty}, values::AbstractVector) where {Ty} = - Schur{Ty, typeof(T), typeof(values)}(T, Z, values) -# backwards-compatible constructors (remove with Julia 2.0) -@deprecate(Schur{Ty,S}(T::AbstractMatrix{Ty}, Z::AbstractMatrix{Ty}, - values::AbstractVector) where {Ty,S}, - Schur{Ty,S,typeof(values)}(T, Z, values)) - -# iteration for destructuring into components -Base.iterate(S::Schur) = (S.T, Val(:Z)) -Base.iterate(S::Schur, ::Val{:Z}) = (S.Z, Val(:values)) -Base.iterate(S::Schur, ::Val{:values}) = (S.values, Val(:done)) -Base.iterate(S::Schur, ::Val{:done}) = nothing - -""" - schur!(A) -> F::Schur - -Same as [`schur`](@ref) but uses the input argument `A` as workspace. - -# Examples -```jldoctest -julia> A = [5. 7.; -2. -4.] -2×2 Matrix{Float64}: - 5.0 7.0 - -2.0 -4.0 - -julia> F = schur!(A) -Schur{Float64, Matrix{Float64}, Vector{Float64}} -T factor: -2×2 Matrix{Float64}: - 3.0 9.0 - 0.0 -2.0 -Z factor: -2×2 Matrix{Float64}: - 0.961524 0.274721 - -0.274721 0.961524 -eigenvalues: -2-element Vector{Float64}: - 3.0 - -2.0 - -julia> A -2×2 Matrix{Float64}: - 3.0 9.0 - 0.0 -2.0 -``` -""" -schur!(A::StridedMatrix{<:BlasFloat}) = Schur(LinearAlgebra.LAPACK.gees!('V', A)...) - -schur!(A::UpperHessenberg{T}) where {T<:BlasFloat} = Schur(LinearAlgebra.LAPACK.hseqr!(parent(A))...) - -""" - schur(A) -> F::Schur - -Computes the Schur factorization of the matrix `A`. The (quasi) triangular Schur factor can -be obtained from the `Schur` object `F` with either `F.Schur` or `F.T` and the -orthogonal/unitary Schur vectors can be obtained with `F.vectors` or `F.Z` such that -`A = F.vectors * F.Schur * F.vectors'`. The eigenvalues of `A` can be obtained with `F.values`. - -For real `A`, the Schur factorization is "quasitriangular", which means that it -is upper-triangular except with 2×2 diagonal blocks for any conjugate pair -of complex eigenvalues; this allows the factorization to be purely real even -when there are complex eigenvalues. To obtain the (complex) purely upper-triangular -Schur factorization from a real quasitriangular factorization, you can use -`Schur{Complex}(schur(A))`. - -Iterating the decomposition produces the components `F.T`, `F.Z`, and `F.values`. - -# Examples -```jldoctest -julia> A = [5. 7.; -2. -4.] -2×2 Matrix{Float64}: - 5.0 7.0 - -2.0 -4.0 - -julia> F = schur(A) -Schur{Float64, Matrix{Float64}, Vector{Float64}} -T factor: -2×2 Matrix{Float64}: - 3.0 9.0 - 0.0 -2.0 -Z factor: -2×2 Matrix{Float64}: - 0.961524 0.274721 - -0.274721 0.961524 -eigenvalues: -2-element Vector{Float64}: - 3.0 - -2.0 - -julia> F.vectors * F.Schur * F.vectors' -2×2 Matrix{Float64}: - 5.0 7.0 - -2.0 -4.0 - -julia> t, z, vals = F; # destructuring via iteration - -julia> t == F.T && z == F.Z && vals == F.values -true -``` -""" -schur(A::AbstractMatrix{T}) where {T} = schur!(copy_similar(A, eigtype(T))) -schur(A::UpperHessenberg{T}) where {T} = schur!(copy_similar(A, eigtype(T))) -function schur(A::RealHermSymComplexHerm) - F = eigen(A; sortby=nothing) - return Schur(typeof(F.vectors)(Diagonal(F.values)), F.vectors, F.values) -end -function schur(A::Union{UnitUpperTriangular{T},UpperTriangular{T}}) where {T} - t = eigtype(T) - Z = copy_similar(A, t) - return Schur(Z, Matrix{t}(I, size(A)), convert(Vector{t}, diag(A))) -end -function schur(A::Union{UnitLowerTriangular{T},LowerTriangular{T}}) where {T} - t = eigtype(T) - # double flip the matrix A - Z = copy_similar(A, t) - reverse!(reshape(Z, :)) - # construct "reverse" identity - n = size(A, 1) - J = zeros(t, n, n) - for i in axes(J, 2) - J[n+1-i, i] = oneunit(t) - end - return Schur(Z, J, convert(Vector{t}, diag(A))) -end -function schur(A::Bidiagonal{T}) where {T} - t = eigtype(T) - if A.uplo == 'U' - return Schur(Matrix{t}(A), Matrix{t}(I, size(A)), Vector{t}(A.dv)) - else # A.uplo == 'L' - # construct "reverse" identity - n = size(A, 1) - J = zeros(t, n, n) - for i in axes(J, 2) - J[n+1-i, i] = oneunit(t) - end - dv = reverse!(Vector{t}(A.dv)) - ev = reverse!(Vector{t}(A.ev)) - return Schur(Matrix{t}(Bidiagonal(dv, ev, 'U')), J, dv) - end -end - -function getproperty(F::Schur, d::Symbol) - if d === :Schur - return getfield(F, :T) - elseif d === :vectors - return getfield(F, :Z) - else - getfield(F, d) - end -end - -Base.propertynames(F::Schur) = - (:Schur, :vectors, fieldnames(typeof(F))...) - -function show(io::IO, mime::MIME{Symbol("text/plain")}, F::Schur) - summary(io, F); println(io) - println(io, "T factor:") - show(io, mime, F.T) - println(io, "\nZ factor:") - show(io, mime, F.Z) - println(io, "\neigenvalues:") - show(io, mime, F.values) -end - -# convert a (standard-form) quasi-triangular real Schur factorization into a -# triangular complex Schur factorization. -# -# Based on the "triangularize" function from GenericSchur.jl, -# released under the MIT "Expat" license by @RalphAS -function Schur{CT}(S::Schur{<:Real}) where {CT<:Complex} - Tr = S.T - T = CT.(Tr) - Z = CT.(S.Z) - n = size(T,1) - for j=n:-1:2 - if !iszero(Tr[j,j-1]) - # We want a unitary similarity transform from - # ┌ ┐ ┌ ┐ - # │a b│ │w₁ x│ - # │c a│ into │0 w₂│ where bc < 0 (a,b,c real) - # └ ┘ └ ┘ - # If we write it as - # ┌ ┐ - # │u v'│ - # │-v u'│ - # └ ┘ - # and make the Ansatz that u is real (so v is imaginary), - # we arrive at a Givens rotation: - # θ = atan(sqrt(-Tr[j,j-1]/Tr[j-1,j])) - # s,c = sin(θ), cos(θ) - s = sqrt(abs(Tr[j,j-1])) - c = sqrt(abs(Tr[j-1,j])) - r = hypot(s,c) - G = Givens(j-1,j,complex(c/r),im*(-s/r)) - lmul!(G,T) - rmul!(T,G') - rmul!(Z,G') - end - end - return Schur(triu!(T),Z,diag(T)) -end - -Schur{Complex}(S::Schur{<:Complex}) = S -Schur{T}(S::Schur{T}) where {T} = S -Schur{T}(S::Schur) where {T} = Schur(T.(S.T), T.(S.Z), T <: Real && !(eltype(S.values) <: Real) ? complex(T).(S.values) : T.(S.values)) - -""" - ordschur!(F::Schur, select::Union{Vector{Bool},BitVector}) -> F::Schur - -Same as [`ordschur`](@ref) but overwrites the factorization `F`. -""" -function ordschur!(schur::Schur, select::Union{Vector{Bool},BitVector}) - _, _, vals = _ordschur!(schur.T, schur.Z, select) - schur.values[:] = vals - return schur -end - -_ordschur(T::StridedMatrix{Ty}, Z::StridedMatrix{Ty}, select::Union{Vector{Bool},BitVector}) where {Ty<:BlasFloat} = - _ordschur!(copy(T), copy(Z), select) - -_ordschur!(T::StridedMatrix{Ty}, Z::StridedMatrix{Ty}, select::Union{Vector{Bool},BitVector}) where {Ty<:BlasFloat} = - LinearAlgebra.LAPACK.trsen!(convert(Vector{BlasInt}, select), T, Z)[1:3] - -""" - ordschur(F::Schur, select::Union{Vector{Bool},BitVector}) -> F::Schur - -Reorders the Schur factorization `F` of a matrix `A = Z*T*Z'` according to the logical array -`select` returning the reordered factorization `F` object. The selected eigenvalues appear -in the leading diagonal of `F.Schur` and the corresponding leading columns of -`F.vectors` form an orthogonal/unitary basis of the corresponding right invariant -subspace. In the real case, a complex conjugate pair of eigenvalues must be either both -included or both excluded via `select`. -""" -ordschur(schur::Schur, select::Union{Vector{Bool},BitVector}) = - Schur(_ordschur(schur.T, schur.Z, select)...) - -""" - GeneralizedSchur <: Factorization - -Matrix factorization type of the generalized Schur factorization of two matrices -`A` and `B`. This is the return type of [`schur(_, _)`](@ref), the corresponding -matrix factorization function. - -If `F::GeneralizedSchur` is the factorization object, the (quasi) triangular Schur -factors can be obtained via `F.S` and `F.T`, the left unitary/orthogonal Schur -vectors via `F.left` or `F.Q`, and the right unitary/orthogonal Schur vectors can -be obtained with `F.right` or `F.Z` such that `A=F.left*F.S*F.right'` and -`B=F.left*F.T*F.right'`. The generalized eigenvalues of `A` and `B` can be obtained -with `F.α./F.β`. - -Iterating the decomposition produces the components `F.S`, `F.T`, `F.Q`, `F.Z`, -`F.α`, and `F.β`. -""" -struct GeneralizedSchur{Ty,M<:AbstractMatrix,A<:AbstractVector,B<:AbstractVector{Ty}} <: Factorization{Ty} - S::M - T::M - α::A - β::B - Q::M - Z::M - function GeneralizedSchur{Ty,M,A,B}(S::AbstractMatrix{Ty}, T::AbstractMatrix{Ty}, - alpha::AbstractVector, beta::AbstractVector{Ty}, - Q::AbstractMatrix{Ty}, Z::AbstractMatrix{Ty}) where {Ty,M,A,B} - new{Ty,M,A,B}(S, T, alpha, beta, Q, Z) - end -end -function GeneralizedSchur(S::AbstractMatrix{Ty}, T::AbstractMatrix{Ty}, - alpha::AbstractVector, beta::AbstractVector{Ty}, - Q::AbstractMatrix{Ty}, Z::AbstractMatrix{Ty}) where Ty - GeneralizedSchur{Ty, typeof(S), typeof(alpha), typeof(beta)}(S, T, alpha, beta, Q, Z) -end -# backwards-compatible constructors (remove with Julia 2.0) -@deprecate(GeneralizedSchur{Ty,M}(S::AbstractMatrix{Ty}, T::AbstractMatrix{Ty}, - alpha::AbstractVector, beta::AbstractVector{Ty}, - Q::AbstractMatrix{Ty}, Z::AbstractMatrix{Ty}) where {Ty,M}, - GeneralizedSchur{Ty,M,typeof(alpha),typeof(beta)}(S, T, alpha, beta, Q, Z)) - -# iteration for destructuring into components -Base.iterate(S::GeneralizedSchur) = (S.S, Val(:T)) -Base.iterate(S::GeneralizedSchur, ::Val{:T}) = (S.T, Val(:Q)) -Base.iterate(S::GeneralizedSchur, ::Val{:Q}) = (S.Q, Val(:Z)) -Base.iterate(S::GeneralizedSchur, ::Val{:Z}) = (S.Z, Val(:α)) -Base.iterate(S::GeneralizedSchur, ::Val{:α}) = (S.α, Val(:β)) -Base.iterate(S::GeneralizedSchur, ::Val{:β}) = (S.β, Val(:done)) -Base.iterate(S::GeneralizedSchur, ::Val{:done}) = nothing - -""" - schur!(A::StridedMatrix, B::StridedMatrix) -> F::GeneralizedSchur - -Same as [`schur`](@ref) but uses the input matrices `A` and `B` as workspace. -""" -function schur!(A::StridedMatrix{T}, B::StridedMatrix{T}) where {T<:BlasFloat} - if LAPACK.version() < v"3.6.0" - GeneralizedSchur(LinearAlgebra.LAPACK.gges!('V', 'V', A, B)...) - else - GeneralizedSchur(LinearAlgebra.LAPACK.gges3!('V', 'V', A, B)...) - end -end - -""" - schur(A, B) -> F::GeneralizedSchur - -Computes the Generalized Schur (or QZ) factorization of the matrices `A` and `B`. The -(quasi) triangular Schur factors can be obtained from the `Schur` object `F` with `F.S` -and `F.T`, the left unitary/orthogonal Schur vectors can be obtained with `F.left` or -`F.Q` and the right unitary/orthogonal Schur vectors can be obtained with `F.right` or -`F.Z` such that `A=F.left*F.S*F.right'` and `B=F.left*F.T*F.right'`. The -generalized eigenvalues of `A` and `B` can be obtained with `F.α./F.β`. - -Iterating the decomposition produces the components `F.S`, `F.T`, `F.Q`, `F.Z`, -`F.α`, and `F.β`. -""" -function schur(A::AbstractMatrix{TA}, B::AbstractMatrix{TB}) where {TA,TB} - S = promote_type(eigtype(TA), TB) - return schur!(copy_similar(A, S), copy_similar(B, S)) -end - -""" - ordschur!(F::GeneralizedSchur, select::Union{Vector{Bool},BitVector}) -> F::GeneralizedSchur - -Same as `ordschur` but overwrites the factorization `F`. -""" -function ordschur!(gschur::GeneralizedSchur, select::Union{Vector{Bool},BitVector}) - _, _, α, β, _, _ = _ordschur!(gschur.S, gschur.T, gschur.Q, gschur.Z, select) - gschur.α[:] = α - gschur.β[:] = β - return gschur -end - -_ordschur(S::StridedMatrix{Ty}, T::StridedMatrix{Ty}, Q::StridedMatrix{Ty}, - Z::StridedMatrix{Ty}, select::Union{Vector{Bool},BitVector}) where {Ty<:BlasFloat} = - _ordschur!(copy(S), copy(T), copy(Q), copy(Z), select) - -_ordschur!(S::StridedMatrix{Ty}, T::StridedMatrix{Ty}, Q::StridedMatrix{Ty}, - Z::StridedMatrix{Ty}, select::Union{Vector{Bool},BitVector}) where {Ty<:BlasFloat} = - LinearAlgebra.LAPACK.tgsen!(convert(Vector{BlasInt}, select), S, T, Q, Z) - -""" - ordschur(F::GeneralizedSchur, select::Union{Vector{Bool},BitVector}) -> F::GeneralizedSchur - -Reorders the Generalized Schur factorization `F` of a matrix pair `(A, B) = (Q*S*Z', Q*T*Z')` -according to the logical array `select` and returns a GeneralizedSchur object `F`. The -selected eigenvalues appear in the leading diagonal of both `F.S` and `F.T`, and the -left and right orthogonal/unitary Schur vectors are also reordered such that -`(A, B) = F.Q*(F.S, F.T)*F.Z'` still holds and the generalized eigenvalues of `A` -and `B` can still be obtained with `F.α./F.β`. -""" -ordschur(gschur::GeneralizedSchur, select::Union{Vector{Bool},BitVector}) = - GeneralizedSchur(_ordschur(gschur.S, gschur.T, gschur.Q, gschur.Z, select)...) - -function getproperty(F::GeneralizedSchur, d::Symbol) - if d === :values - return getfield(F, :α) ./ getfield(F, :β) - elseif d === :alpha - return getfield(F, :α) - elseif d === :beta - return getfield(F, :β) - elseif d === :left - return getfield(F, :Q) - elseif d === :right - return getfield(F, :Z) - else - getfield(F, d) - end -end - -Base.propertynames(F::GeneralizedSchur) = - (:values, :left, :right, fieldnames(typeof(F))...) - -function show(io::IO, mime::MIME{Symbol("text/plain")}, F::GeneralizedSchur) - summary(io, F); println(io) - println(io, "S factor:") - show(io, mime, F.S) - println(io, "\nT factor:") - show(io, mime, F.T) - println(io, "\nQ factor:") - show(io, mime, F.Q) - println(io, "\nZ factor:") - show(io, mime, F.Z) - println(io, "\nα:") - show(io, mime, F.α) - println(io, "\nβ:") - show(io, mime, F.β) -end - -# Conversion -AbstractMatrix(F::Schur) = (F.Z * F.T) * F.Z' -AbstractArray(F::Schur) = AbstractMatrix(F) -Matrix(F::Schur) = Array(AbstractArray(F)) -Array(F::Schur) = Matrix(F) - -copy(F::Schur) = Schur(copy(F.T), copy(F.Z), copy(F.values)) -copy(F::GeneralizedSchur) = GeneralizedSchur(copy(F.S), copy(F.T), copy(F.α), copy(F.β), copy(F.Q), copy(F.Z)) diff --git a/stdlib/LinearAlgebra/src/special.jl b/stdlib/LinearAlgebra/src/special.jl deleted file mode 100644 index c61586a810140..0000000000000 --- a/stdlib/LinearAlgebra/src/special.jl +++ /dev/null @@ -1,595 +0,0 @@ -# This file is a part of Julia. License is MIT: https://julialang.org/license - -# Methods operating on different special matrix types - -# Interconversion between special matrix types - -# conversions from Diagonal to other special matrix types -Bidiagonal(A::Diagonal) = Bidiagonal(A.diag, fill!(similar(A.diag, length(A.diag)-1), 0), :U) -SymTridiagonal(A::Diagonal) = SymTridiagonal(A.diag, fill!(similar(A.diag, length(A.diag)-1), 0)) -Tridiagonal(A::Diagonal) = Tridiagonal(fill!(similar(A.diag, length(A.diag)-1), 0), A.diag, - fill!(similar(A.diag, length(A.diag)-1), 0)) - -# conversions from Bidiagonal to other special matrix types -Diagonal(A::Bidiagonal) = Diagonal(A.dv) -SymTridiagonal(A::Bidiagonal) = - iszero(A.ev) ? SymTridiagonal(A.dv, A.ev) : - throw(ArgumentError("matrix cannot be represented as SymTridiagonal")) -function Tridiagonal(A::Bidiagonal) - # ensure that the types are identical, even if zero returns a different type - z = oftype(A.ev, zero(A.ev)) - Tridiagonal(A.uplo == 'U' ? z : A.ev, A.dv, A.uplo == 'U' ? A.ev : z) -end - -_diagview(S::SymTridiagonal{<:Number}) = S.dv -_diagview(S::SymTridiagonal) = diagview(S) - -# conversions from SymTridiagonal to other special matrix types -Diagonal(A::SymTridiagonal) = Diagonal(_diagview(A)) - -# These can fail when ev has the same length as dv -# TODO: Revisit when a good solution for #42477 is found -Bidiagonal(A::SymTridiagonal{<:Number}) = - iszero(A.ev) ? Bidiagonal(A.dv, A.ev, :U) : - throw(ArgumentError("matrix cannot be represented as Bidiagonal")) -Tridiagonal(A::SymTridiagonal{<:Number}) = - Tridiagonal(A.ev, A.dv, A.ev) - -# conversions from Tridiagonal to other special matrix types -Diagonal(A::Tridiagonal) = Diagonal(A.d) -Bidiagonal(A::Tridiagonal) = - iszero(A.dl) ? Bidiagonal(A.d, A.du, :U) : - iszero(A.du) ? Bidiagonal(A.d, A.dl, :L) : - throw(ArgumentError("matrix cannot be represented as Bidiagonal")) - -# conversions from AbstractTriangular to special matrix types -Bidiagonal(A::AbstractTriangular) = - isbanded(A, 0, 1) ? Bidiagonal(diag(A, 0), diag(A, 1), :U) : # is upper bidiagonal - isbanded(A, -1, 0) ? Bidiagonal(diag(A, 0), diag(A, -1), :L) : # is lower bidiagonal - throw(ArgumentError("matrix cannot be represented as Bidiagonal")) - -_lucopy(A::Bidiagonal, T) = copymutable_oftype(Tridiagonal(A), T) -_lucopy(A::Diagonal, T) = copymutable_oftype(Tridiagonal(A), T) -function _lucopy(A::SymTridiagonal, T) - du = copy_similar(_evview(A), T) - dl = copy.(transpose.(du)) - d = copy_similar(A.dv, T) - return Tridiagonal(dl, d, du) -end - -const ConvertibleSpecialMatrix = Union{Diagonal,Bidiagonal,SymTridiagonal,Tridiagonal,AbstractTriangular} -const PossibleTriangularMatrix = Union{Diagonal, Bidiagonal, AbstractTriangular} - -convert(::Type{T}, m::ConvertibleSpecialMatrix) where {T<:Diagonal} = m isa T ? m : - isdiag(m) ? T(m)::T : throw(ArgumentError("matrix cannot be represented as Diagonal")) -convert(::Type{T}, m::ConvertibleSpecialMatrix) where {T<:SymTridiagonal} = m isa T ? m : - issymmetric(m) && isbanded(m, -1, 1) ? T(m)::T : throw(ArgumentError("matrix cannot be represented as SymTridiagonal")) -convert(::Type{T}, m::ConvertibleSpecialMatrix) where {T<:Tridiagonal} = m isa T ? m : - isbanded(m, -1, 1) ? T(m)::T : throw(ArgumentError("matrix cannot be represented as Tridiagonal")) - -convert(::Type{T}, m::Union{LowerTriangular,UnitLowerTriangular}) where {T<:LowerTriangular} = m isa T ? m : T(m)::T -convert(::Type{T}, m::Union{UpperTriangular,UnitUpperTriangular}) where {T<:UpperTriangular} = m isa T ? m : T(m)::T - -convert(::Type{T}, m::PossibleTriangularMatrix) where {T<:LowerTriangular} = m isa T ? m : - istril(m) ? T(m)::T : throw(ArgumentError("matrix cannot be represented as LowerTriangular")) -convert(::Type{T}, m::PossibleTriangularMatrix) where {T<:UpperTriangular} = m isa T ? m : - istriu(m) ? T(m)::T : throw(ArgumentError("matrix cannot be represented as UpperTriangular")) - -# Constructs two method definitions taking into account (assumed) commutativity -# e.g. @commutative f(x::S, y::T) where {S,T} = x+y is the same is defining -# f(x::S, y::T) where {S,T} = x+y -# f(y::T, x::S) where {S,T} = f(x, y) -macro commutative(myexpr) - @assert Base.is_function_def(myexpr) # Make sure it is a function definition - y = copy(myexpr.args[1].args[2:end]) - reverse!(y) - reversed_call = Expr(:(=), Expr(:call,myexpr.args[1].args[1],y...), myexpr.args[1]) - esc(Expr(:block, myexpr, reversed_call)) -end - -for op in (:+, :-) - for (matrixtype, uplo, converttype) in ((:UpperTriangular, 'U', :UpperTriangular), - (:UnitUpperTriangular, 'U', :UpperTriangular), - (:LowerTriangular, 'L', :LowerTriangular), - (:UnitLowerTriangular, 'L', :LowerTriangular)) - @eval begin - function ($op)(A::$matrixtype, B::Bidiagonal) - if B.uplo == $uplo - ($op)(A, convert($converttype, B)) - else - ($op).(A, B) - end - end - - function ($op)(A::Bidiagonal, B::$matrixtype) - if A.uplo == $uplo - ($op)(convert($converttype, A), B) - else - ($op).(A, B) - end - end - end - end -end - -(*)(Da::Diagonal, A::BandedMatrix, Db::Diagonal) = _tri_matmul(Da, A, Db) - -# disambiguation between triangular and banded matrices, banded ones "dominate" -_mul!(C::AbstractMatrix, A::AbstractTriangular, B::BandedMatrix, alpha::Number, beta::Number) = - @stable_muladdmul _mul!(C, A, B, MulAddMul(alpha, beta)) -_mul!(C::AbstractMatrix, A::BandedMatrix, B::AbstractTriangular, alpha::Number, beta::Number) = - @stable_muladdmul _mul!(C, A, B, MulAddMul(alpha, beta)) - -function *(H::UpperHessenberg, B::Bidiagonal) - T = promote_op(matprod, eltype(H), eltype(B)) - A = mul!(similar(H, T, size(H)), H, B) - return B.uplo == 'U' ? UpperHessenberg(A) : A -end -function *(B::Bidiagonal, H::UpperHessenberg) - T = promote_op(matprod, eltype(B), eltype(H)) - A = mul!(similar(H, T, size(H)), B, H) - return B.uplo == 'U' ? UpperHessenberg(A) : A -end - -function /(H::UpperHessenberg, B::Bidiagonal) - T = typeof(oneunit(eltype(H))/oneunit(eltype(B))) - A = _rdiv!(similar(H, T, size(H)), H, B) - return B.uplo == 'U' ? UpperHessenberg(A) : A -end - -function \(B::Bidiagonal, H::UpperHessenberg) - T = typeof(oneunit(eltype(B))\oneunit(eltype(H))) - A = ldiv!(similar(H, T, size(H)), B, H) - return B.uplo == 'U' ? UpperHessenberg(A) : A -end - -# specialized +/- for structured matrices. If these are removed, it falls -# back to broadcasting which has ~2-10x speed regressions. -# For the other structure matrix pairs, broadcasting works well. - -# For structured matrix types with different non-zero diagonals the underlying -# representations must be promoted to the same type. -# For example, in Diagonal + Bidiagonal only the main diagonal is touched so -# the off diagonal could be a different type after the operation resulting in -# an error. See issue #28994 - -@commutative function (+)(A::Bidiagonal, B::Diagonal) - newdv = A.dv + B.diag - Bidiagonal(newdv, typeof(newdv)(A.ev), A.uplo) -end - -function (-)(A::Bidiagonal, B::Diagonal) - newdv = A.dv - B.diag - Bidiagonal(newdv, typeof(newdv)(A.ev), A.uplo) -end - -function (-)(A::Diagonal, B::Bidiagonal) - newdv = A.diag - B.dv - Bidiagonal(newdv, typeof(newdv)(-B.ev), B.uplo) -end - -# Return a SymTridiagonal if the elements of `newdv` are -# statically known to be symmetric. Return a Tridiagonal otherwise -function _symtri_or_tri(dl, d, du) - new_du = oftype(d, du) - new_dl = oftype(d, dl) - if symmetric_type(eltype(d)) == eltype(d) - SymTridiagonal(d, new_du) - else - Tridiagonal(new_dl, d, new_du) - end -end - -@commutative function (+)(A::Diagonal, B::SymTridiagonal) - newdv = A.diag + _diagview(B) - _symtri_or_tri(_evview_transposed(B), newdv, _evview(B)) -end - -function (-)(A::Diagonal, B::SymTridiagonal) - newdv = A.diag - _diagview(B) - _symtri_or_tri(-_evview_transposed(B), newdv, -_evview(B)) -end - -function (-)(A::SymTridiagonal, B::Diagonal) - newdv = _diagview(A) - B.diag - _symtri_or_tri(_evview_transposed(A), newdv, _evview(A)) -end - -# this set doesn't have the aforementioned problem -_evview_transposed(S::SymTridiagonal{<:Number}) = _evview(S) -_evview_transposed(S::SymTridiagonal) = transpose.(_evview(S)) -@commutative function (+)(A::Tridiagonal, B::SymTridiagonal) - Tridiagonal(A.dl+_evview_transposed(B), A.d+_diagview(B), A.du+_evview(B)) -end -function -(A::Tridiagonal, B::SymTridiagonal) - Tridiagonal(A.dl-_evview_transposed(B), A.d-_diagview(B), A.du-_evview(B)) -end -function -(A::SymTridiagonal, B::Tridiagonal) - Tridiagonal(_evview_transposed(A)-B.dl, _diagview(A)-B.d, _evview(A)-B.du) -end - -@commutative function (+)(A::Diagonal, B::Tridiagonal) - newdv = A.diag + B.d - Tridiagonal(typeof(newdv)(B.dl), newdv, typeof(newdv)(B.du)) -end - -function (-)(A::Diagonal, B::Tridiagonal) - newdv = A.diag - B.d - Tridiagonal(typeof(newdv)(-B.dl), newdv, typeof(newdv)(-B.du)) -end - -function (-)(A::Tridiagonal, B::Diagonal) - newdv = A.d - B.diag - Tridiagonal(typeof(newdv)(A.dl), newdv, typeof(newdv)(A.du)) -end - -@commutative function (+)(A::Bidiagonal, B::Tridiagonal) - newdv = A.dv + B.d - Tridiagonal((A.uplo == 'U' ? (typeof(newdv)(B.dl), newdv, A.ev+B.du) : (A.ev+B.dl, newdv, typeof(newdv)(B.du)))...) -end - -function (-)(A::Bidiagonal, B::Tridiagonal) - newdv = A.dv - B.d - Tridiagonal((A.uplo == 'U' ? (typeof(newdv)(-B.dl), newdv, A.ev-B.du) : (A.ev-B.dl, newdv, typeof(newdv)(-B.du)))...) -end - -function (-)(A::Tridiagonal, B::Bidiagonal) - newdv = A.d - B.dv - Tridiagonal((B.uplo == 'U' ? (typeof(newdv)(A.dl), newdv, A.du-B.ev) : (A.dl-B.ev, newdv, typeof(newdv)(A.du)))...) -end - -@commutative function (+)(A::Bidiagonal, B::SymTridiagonal) - newdv = A.dv + _diagview(B) - Tridiagonal((A.uplo == 'U' ? (typeof(newdv)(_evview_transposed(B)), newdv, A.ev+_evview(B)) : (A.ev+_evview_transposed(B), newdv, typeof(newdv)(_evview(B))))...) -end - -function (-)(A::Bidiagonal, B::SymTridiagonal) - newdv = A.dv - _diagview(B) - Tridiagonal((A.uplo == 'U' ? (typeof(newdv)(-_evview_transposed(B)), newdv, A.ev-_evview(B)) : (A.ev-_evview_transposed(B), newdv, typeof(newdv)(-_evview(B))))...) -end - -function (-)(A::SymTridiagonal, B::Bidiagonal) - newdv = _diagview(A) - B.dv - Tridiagonal((B.uplo == 'U' ? (typeof(newdv)(_evview_transposed(A)), newdv, _evview(A)-B.ev) : (_evview_transposed(A)-B.ev, newdv, typeof(newdv)(_evview(A))))...) -end - -@commutative function (+)(A::Tridiagonal, B::UniformScaling) - newd = A.d .+ Ref(B) - Tridiagonal(typeof(newd)(A.dl), newd, typeof(newd)(A.du)) -end - -@commutative function (+)(A::SymTridiagonal, B::UniformScaling) - newdv = A.dv .+ Ref(B) - SymTridiagonal(newdv, typeof(newdv)(A.ev)) -end - -@commutative function (+)(A::Bidiagonal, B::UniformScaling) - newdv = A.dv .+ Ref(B) - Bidiagonal(newdv, typeof(newdv)(A.ev), A.uplo) -end - -@commutative function (+)(A::Diagonal, B::UniformScaling) - Diagonal(A.diag .+ Ref(B)) -end - -# StructuredMatrix - UniformScaling = StructuredMatrix + (-UniformScaling) => -# no need to define reversed order -function (-)(A::UniformScaling, B::Tridiagonal) - d = Ref(A) .- B.d - Tridiagonal(convert(typeof(d), -B.dl), d, convert(typeof(d), -B.du)) -end -function (-)(A::UniformScaling, B::SymTridiagonal) - dv = Ref(A) .- B.dv - SymTridiagonal(dv, convert(typeof(dv), -_evview(B))) -end -function (-)(A::UniformScaling, B::Bidiagonal) - dv = Ref(A) .- B.dv - Bidiagonal(dv, convert(typeof(dv), -B.ev), B.uplo) -end -function (-)(A::UniformScaling, B::Diagonal) - Diagonal(Ref(A) .- B.diag) -end - -for f in (:+, :-) - @eval function $f(D::Diagonal{<:Number}, S::Symmetric) - uplo = sym_uplo(S.uplo) - return Symmetric(parentof_applytri($f, Symmetric(D, uplo), S), uplo) - end - @eval function $f(S::Symmetric, D::Diagonal{<:Number}) - uplo = sym_uplo(S.uplo) - return Symmetric(parentof_applytri($f, S, Symmetric(D, uplo)), uplo) - end - @eval function $f(D::Diagonal{<:Real}, H::Hermitian) - uplo = sym_uplo(H.uplo) - return Hermitian(parentof_applytri($f, Hermitian(D, uplo), H), uplo) - end - @eval function $f(H::Hermitian, D::Diagonal{<:Real}) - uplo = sym_uplo(H.uplo) - return Hermitian(parentof_applytri($f, H, Hermitian(D, uplo)), uplo) - end -end - -## Diagonal construction from UniformScaling -Diagonal{T}(s::UniformScaling, m::Integer) where {T} = Diagonal{T}(fill(T(s.λ), m)) -Diagonal(s::UniformScaling, m::Integer) = Diagonal{eltype(s)}(s, m) - -Base.muladd(A::Union{Diagonal, UniformScaling}, B::Union{Diagonal, UniformScaling}, z::Union{Diagonal, UniformScaling}) = - Diagonal(_diag_or_value(A) .* _diag_or_value(B) .+ _diag_or_value(z)) - -_diag_or_value(A::Diagonal) = A.diag -_diag_or_value(A::UniformScaling) = A.λ - -# fill[stored]! methods -fillstored!(A::Diagonal, x) = (fill!(A.diag, x); A) -fillstored!(A::Bidiagonal, x) = (fill!(A.dv, x); fill!(A.ev, x); A) -fillstored!(A::Tridiagonal, x) = (fill!(A.dl, x); fill!(A.d, x); fill!(A.du, x); A) -fillstored!(A::SymTridiagonal, x) = (fill!(A.dv, x); fill!(A.ev, x); A) - -_small_enough(A::Union{Diagonal, Bidiagonal}) = size(A, 1) <= 1 -_small_enough(A::Tridiagonal) = size(A, 1) <= 2 -_small_enough(A::SymTridiagonal) = size(A, 1) <= 2 - -function fill!(A::Union{Diagonal,Bidiagonal,Tridiagonal}, x) - xT = convert(eltype(A), x) - (iszero(xT) || _small_enough(A)) && return fillstored!(A, xT) - throw(ArgumentError(lazy"array of type $(typeof(A)) and size $(size(A)) can - not be filled with $x, since some of its entries are constrained.")) -end -function fill!(A::SymTridiagonal, x) - issymmetric(x) || throw(ArgumentError("cannot fill a SymTridiagonal with an asymmetric value")) - xT = convert(eltype(A), x) - (iszero(xT) || _small_enough(A)) && return fillstored!(A, xT) - throw(ArgumentError(lazy"array of type $(typeof(A)) and size $(size(A)) can - not be filled with $x, since some of its entries are constrained.")) -end - -one(D::Diagonal) = Diagonal(one.(D.diag)) -one(A::Bidiagonal{T}) where T = Bidiagonal(fill!(similar(A.dv, typeof(one(T))), one(T)), fill!(similar(A.ev, typeof(one(T))), zero(one(T))), A.uplo) -one(A::Tridiagonal{T}) where T = Tridiagonal(fill!(similar(A.du, typeof(one(T))), zero(one(T))), fill!(similar(A.d, typeof(one(T))), one(T)), fill!(similar(A.dl, typeof(one(T))), zero(one(T)))) -one(A::SymTridiagonal{T}) where T = SymTridiagonal(fill!(similar(A.dv, typeof(one(T))), one(T)), fill!(similar(A.ev, typeof(one(T))), zero(one(T)))) -for t in (:LowerTriangular, :UnitLowerTriangular, :UpperTriangular, :UnitUpperTriangular) - @eval one(A::$t) = $t(one(parent(A))) - @eval oneunit(A::$t) = $t(oneunit(parent(A))) -end - -zero(D::Diagonal) = Diagonal(zero.(D.diag)) -oneunit(D::Diagonal) = Diagonal(oneunit.(D.diag)) - -isdiag(A::HermOrSym{<:Any,<:Diagonal}) = isdiag(parent(A)) -dot(x::AbstractVector, A::RealHermSymComplexSym{<:Real,<:Diagonal}, y::AbstractVector) = - dot(x, A.data, y) - -# O(N) implementations using the banded structure -function copyto!(dest::BandedMatrix, src::BandedMatrix) - if axes(dest) == axes(src) - _copyto_banded!(dest, src) - else - @invoke copyto!(dest::AbstractMatrix, src::AbstractMatrix) - end - return dest -end -function _copyto_banded!(T::Tridiagonal, D::Diagonal) - T.d .= D.diag - T.dl .= diagview(D, -1) - T.du .= diagview(D, 1) - return T -end -function _copyto_banded!(SymT::SymTridiagonal, D::Diagonal) - issymmetric(D) || throw(ArgumentError("cannot copy a non-symmetric Diagonal matrix to a SymTridiagonal")) - SymT.dv .= D.diag - _ev = _evview(SymT) - _ev .= diagview(D, 1) - return SymT -end -function _copyto_banded!(B::Bidiagonal, D::Diagonal) - B.dv .= D.diag - B.ev .= diagview(D, _offdiagind(B.uplo)) - return B -end -function _copyto_banded!(D::Diagonal, B::Bidiagonal) - isdiag(B) || - throw(ArgumentError("cannot copy a Bidiagonal with a non-zero off-diagonal band to a Diagonal")) - D.diag .= B.dv - return D -end -function _copyto_banded!(D::Diagonal, T::Tridiagonal) - isdiag(T) || - throw(ArgumentError("cannot copy a Tridiagonal with a non-zero off-diagonal band to a Diagonal")) - D.diag .= T.d - return D -end -function _copyto_banded!(D::Diagonal, SymT::SymTridiagonal) - isdiag(SymT) || - throw(ArgumentError("cannot copy a SymTridiagonal with a non-zero off-diagonal band to a Diagonal")) - # we broadcast identity for numbers using the fact that symmetric(x::Number) = x - # this potentially allows us to access faster copyto! paths - _symmetric = eltype(SymT) <: Number ? identity : symmetric - D.diag .= _symmetric.(SymT.dv) - return D -end -function _copyto_banded!(T::Tridiagonal, B::Bidiagonal) - T.d .= B.dv - if B.uplo == 'U' - T.du .= B.ev - T.dl .= diagview(B,-1) - else - T.dl .= B.ev - T.du .= diagview(B, 1) - end - return T -end -function _copyto_banded!(SymT::SymTridiagonal, B::Bidiagonal) - issymmetric(B) || throw(ArgumentError("cannot copy a non-symmetric Bidiagonal matrix to a SymTridiagonal")) - SymT.dv .= B.dv - _ev = _evview(SymT) - _ev .= B.ev - return SymT -end -function _copyto_banded!(B::Bidiagonal, T::Tridiagonal) - if B.uplo == 'U' && !iszero(T.dl) - throw(ArgumentError("cannot copy a Tridiagonal with a non-zero subdiagonal to a Bidiagonal with uplo=:U")) - elseif B.uplo == 'L' && !iszero(T.du) - throw(ArgumentError("cannot copy a Tridiagonal with a non-zero superdiagonal to a Bidiagonal with uplo=:L")) - end - B.dv .= T.d - B.ev .= B.uplo == 'U' ? T.du : T.dl - return B -end -function _copyto_banded!(B::Bidiagonal, SymT::SymTridiagonal) - isdiag(SymT) || - throw(ArgumentError("cannot copy a SymTridiagonal with a non-zero off-diagonal band to a Bidiagonal")) - # we broadcast identity for numbers using the fact that symmetric(x::Number) = x - # this potentially allows us to access faster copyto! paths - _symmetric = eltype(SymT) <: Number ? identity : symmetric - B.dv .= _symmetric.(SymT.dv) - return B -end - -# equals and approx equals methods for structured matrices -# SymTridiagonal == Tridiagonal is already defined in tridiag.jl - -==(A::Diagonal, B::Bidiagonal) = iszero(B.ev) && A.diag == B.dv -==(A::Diagonal, B::SymTridiagonal) = iszero(_evview(B)) && A.diag == _diagview(B) -==(B::Bidiagonal, A::Diagonal) = A == B -==(A::Diagonal, B::Tridiagonal) = iszero(B.dl) && iszero(B.du) && A.diag == B.d -==(B::Tridiagonal, A::Diagonal) = A == B - -function ==(A::Bidiagonal, B::Tridiagonal) - if A.uplo == 'U' - return iszero(B.dl) && A.dv == B.d && A.ev == B.du - else - return iszero(B.du) && A.dv == B.d && A.ev == B.dl - end -end -==(B::Tridiagonal, A::Bidiagonal) = A == B - -==(A::Bidiagonal, B::SymTridiagonal) = iszero(_evview(B)) && iszero(A.ev) && A.dv == _diagview(B) -==(B::SymTridiagonal, A::Bidiagonal) = A == B - -# TODO: remove these deprecations (used by SparseArrays in the past) -const _DenseConcatGroup = Union{} -const _SpecialArrays = Union{} - -promote_to_array_type(::Tuple) = Matrix - -# promote_to_arrays(n,k, T, A...) promotes any UniformScaling matrices -# in A to matrices of type T and sizes given by n[k:end]. n is an array -# so that the same promotion code can be used for hvcat. We pass the type T -# so that we can re-use this code for sparse-matrix hcat etcetera. -promote_to_arrays_(n::Int, ::Type, a::Number) = a -promote_to_arrays_(n::Int, ::Type{Matrix}, J::UniformScaling{T}) where {T} = Matrix(J, n, n) -promote_to_arrays_(n::Int, ::Type, A::AbstractArray) = A -promote_to_arrays_(n::Int, ::Type, A::AbstractQ) = collect(A) -promote_to_arrays(n,k, ::Type) = () -promote_to_arrays(n,k, ::Type{T}, A) where {T} = (promote_to_arrays_(n[k], T, A),) -promote_to_arrays(n,k, ::Type{T}, A, B) where {T} = - (promote_to_arrays_(n[k], T, A), promote_to_arrays_(n[k+1], T, B)) -promote_to_arrays(n,k, ::Type{T}, A, B, C) where {T} = - (promote_to_arrays_(n[k], T, A), promote_to_arrays_(n[k+1], T, B), promote_to_arrays_(n[k+2], T, C)) -promote_to_arrays(n,k, ::Type{T}, A, B, Cs...) where {T} = - (promote_to_arrays_(n[k], T, A), promote_to_arrays_(n[k+1], T, B), promote_to_arrays(n,k+2, T, Cs...)...) - -_us2number(A) = A -_us2number(J::UniformScaling) = J.λ - -for (f, _f, dim, name) in ((:hcat, :_hcat, 1, "rows"), (:vcat, :_vcat, 2, "cols")) - @eval begin - @inline $f(A::Union{AbstractArray,AbstractQ,UniformScaling}...) = $_f(A...) - # if there's a Number present, J::UniformScaling must be 1x1-dimensional - @inline $f(A::Union{AbstractArray,AbstractQ,UniformScaling,Number}...) = $f(map(_us2number, A)...) - function $_f(A::Union{AbstractArray,AbstractQ,UniformScaling,Number}...; array_type = promote_to_array_type(A)) - n = -1 - for a in A - if !isa(a, UniformScaling) - require_one_based_indexing(a) - na = size(a,$dim) - n >= 0 && n != na && - throw(DimensionMismatch(string("number of ", $name, - " of each array must match (got ", n, " and ", na, ")"))) - n = na - end - end - n == -1 && throw(ArgumentError($("$f of only UniformScaling objects cannot determine the matrix size"))) - return cat(promote_to_arrays(fill(n, length(A)), 1, array_type, A...)..., dims=Val(3-$dim)) - end - end -end - -hvcat(rows::Tuple{Vararg{Int}}, A::Union{AbstractArray,AbstractQ,UniformScaling}...) = _hvcat(rows, A...) -hvcat(rows::Tuple{Vararg{Int}}, A::Union{AbstractArray,AbstractQ,UniformScaling,Number}...) = _hvcat(rows, A...) -function _hvcat(rows::Tuple{Vararg{Int}}, A::Union{AbstractArray,AbstractQ,UniformScaling,Number}...; array_type = promote_to_array_type(A)) - require_one_based_indexing(A...) - nr = length(rows) - sum(rows) == length(A) || throw(ArgumentError("mismatch between row sizes and number of arguments")) - n = fill(-1, length(A)) - needcols = false # whether we also need to infer some sizes from the column count - j = 0 - for i = 1:nr # infer UniformScaling sizes from row counts, if possible: - ni = -1 # number of rows in this block-row, -1 indicates unknown - for k = 1:rows[i] - if !isa(A[j+k], UniformScaling) - na = size(A[j+k], 1) - ni >= 0 && ni != na && - throw(DimensionMismatch("mismatch in number of rows")) - ni = na - end - end - if ni >= 0 - for k = 1:rows[i] - n[j+k] = ni - end - else # row consisted only of UniformScaling objects - needcols = true - end - j += rows[i] - end - if needcols # some sizes still unknown, try to infer from column count - nc = -1 - j = 0 - for i = 1:nr - nci = 0 - rows[i] > 0 && n[j+1] == -1 && (j += rows[i]; continue) - for k = 1:rows[i] - nci += isa(A[j+k], UniformScaling) ? n[j+k] : size(A[j+k], 2) - end - nc >= 0 && nc != nci && throw(DimensionMismatch("mismatch in number of columns")) - nc = nci - j += rows[i] - end - nc == -1 && throw(ArgumentError("sizes of UniformScalings could not be inferred")) - j = 0 - for i = 1:nr - if rows[i] > 0 && n[j+1] == -1 # this row consists entirely of UniformScalings - nci, r = divrem(nc, rows[i]) - r != 0 && throw(DimensionMismatch("indivisible UniformScaling sizes")) - for k = 1:rows[i] - n[j+k] = nci - end - end - j += rows[i] - end - end - Amat = promote_to_arrays(n, 1, array_type, A...) - # We have two methods for promote_to_array_type, one returning Matrix and - # another one returning SparseMatrixCSC (in SparseArrays.jl). In the dense - # case, we cannot call hvcat for the promoted UniformScalings because this - # causes a stack overflow. In the sparse case, however, we cannot call - # typed_hvcat because we need a sparse output. - if array_type == Matrix - return typed_hvcat(promote_eltype(Amat...), rows, Amat...) - else - return hvcat(rows, Amat...) - end -end - -# factorizations -function cholesky(S::RealHermSymComplexHerm{<:Real,<:SymTridiagonal}, ::NoPivot = NoPivot(); check::Bool = true) - T = choltype(S) - B = Bidiagonal{T}(diag(S, 0), diag(S, S.uplo == 'U' ? 1 : -1), sym_uplo(S.uplo)) - cholesky!(Hermitian(B, sym_uplo(S.uplo)), NoPivot(); check = check) -end - -# istriu/istril for triangular wrappers of structured matrices -_istril(A::LowerTriangular{<:Any, <:BandedMatrix}, k) = istril(parent(A), k) -_istriu(A::UpperTriangular{<:Any, <:BandedMatrix}, k) = istriu(parent(A), k) -_istriu(A::UpperHessenberg{<:Any, <:BandedMatrix}, k) = istriu(parent(A), k) diff --git a/stdlib/LinearAlgebra/src/structuredbroadcast.jl b/stdlib/LinearAlgebra/src/structuredbroadcast.jl deleted file mode 100644 index 9a4d55fd58bf0..0000000000000 --- a/stdlib/LinearAlgebra/src/structuredbroadcast.jl +++ /dev/null @@ -1,297 +0,0 @@ -# This file is a part of Julia. License is MIT: https://julialang.org/license - -## Broadcast styles -import Base.Broadcast -using Base.Broadcast: DefaultArrayStyle, Broadcasted - -struct StructuredMatrixStyle{T} <: Broadcast.AbstractArrayStyle{2} end -StructuredMatrixStyle{T}(::Val{2}) where {T} = StructuredMatrixStyle{T}() -StructuredMatrixStyle{T}(::Val{N}) where {T,N} = Broadcast.DefaultArrayStyle{N}() - -const StructuredMatrix{T} = Union{Diagonal{T},Bidiagonal{T},SymTridiagonal{T},Tridiagonal{T},LowerTriangular{T},UnitLowerTriangular{T},UpperTriangular{T},UnitUpperTriangular{T}} -for ST in (Diagonal,Bidiagonal,SymTridiagonal,Tridiagonal,LowerTriangular,UnitLowerTriangular,UpperTriangular,UnitUpperTriangular) - @eval Broadcast.BroadcastStyle(::Type{<:$ST}) = $(StructuredMatrixStyle{ST}()) -end - -# Promotion of broadcasts between structured matrices. This is slightly unusual -# as we define them symmetrically. This allows us to have a fallback to DefaultArrayStyle{2}(). -# Diagonal can cavort with all the other structured matrix types. -# Bidiagonal doesn't know if it's upper or lower, so it becomes Tridiagonal -Broadcast.BroadcastStyle(::StructuredMatrixStyle{Diagonal}, ::StructuredMatrixStyle{Diagonal}) = - StructuredMatrixStyle{Diagonal}() -Broadcast.BroadcastStyle(::StructuredMatrixStyle{Diagonal}, ::StructuredMatrixStyle{Bidiagonal}) = - StructuredMatrixStyle{Bidiagonal}() -Broadcast.BroadcastStyle(::StructuredMatrixStyle{Diagonal}, ::StructuredMatrixStyle{<:Union{SymTridiagonal,Tridiagonal}}) = - StructuredMatrixStyle{Tridiagonal}() -Broadcast.BroadcastStyle(::StructuredMatrixStyle{Diagonal}, ::StructuredMatrixStyle{<:Union{LowerTriangular,UnitLowerTriangular}}) = - StructuredMatrixStyle{LowerTriangular}() -Broadcast.BroadcastStyle(::StructuredMatrixStyle{Diagonal}, ::StructuredMatrixStyle{<:Union{UpperTriangular,UnitUpperTriangular}}) = - StructuredMatrixStyle{UpperTriangular}() - -Broadcast.BroadcastStyle(::StructuredMatrixStyle{Bidiagonal}, ::StructuredMatrixStyle{Diagonal}) = - StructuredMatrixStyle{Bidiagonal}() -Broadcast.BroadcastStyle(::StructuredMatrixStyle{Bidiagonal}, ::StructuredMatrixStyle{<:Union{Bidiagonal,SymTridiagonal,Tridiagonal}}) = - StructuredMatrixStyle{Tridiagonal}() -Broadcast.BroadcastStyle(::StructuredMatrixStyle{SymTridiagonal}, ::StructuredMatrixStyle{<:Union{Diagonal,Bidiagonal,SymTridiagonal,Tridiagonal}}) = - StructuredMatrixStyle{Tridiagonal}() -Broadcast.BroadcastStyle(::StructuredMatrixStyle{Tridiagonal}, ::StructuredMatrixStyle{<:Union{Diagonal,Bidiagonal,SymTridiagonal,Tridiagonal}}) = - StructuredMatrixStyle{Tridiagonal}() - -Broadcast.BroadcastStyle(::StructuredMatrixStyle{LowerTriangular}, ::StructuredMatrixStyle{<:Union{Diagonal,LowerTriangular,UnitLowerTriangular}}) = - StructuredMatrixStyle{LowerTriangular}() -Broadcast.BroadcastStyle(::StructuredMatrixStyle{UpperTriangular}, ::StructuredMatrixStyle{<:Union{Diagonal,UpperTriangular,UnitUpperTriangular}}) = - StructuredMatrixStyle{UpperTriangular}() -Broadcast.BroadcastStyle(::StructuredMatrixStyle{UnitLowerTriangular}, ::StructuredMatrixStyle{<:Union{Diagonal,LowerTriangular,UnitLowerTriangular}}) = - StructuredMatrixStyle{LowerTriangular}() -Broadcast.BroadcastStyle(::StructuredMatrixStyle{UnitUpperTriangular}, ::StructuredMatrixStyle{<:Union{Diagonal,UpperTriangular,UnitUpperTriangular}}) = - StructuredMatrixStyle{UpperTriangular}() - -Broadcast.BroadcastStyle(::StructuredMatrixStyle{<:Union{LowerTriangular,UnitLowerTriangular}}, ::StructuredMatrixStyle{<:Union{UpperTriangular,UnitUpperTriangular}}) = - StructuredMatrixStyle{Matrix}() -Broadcast.BroadcastStyle(::StructuredMatrixStyle{<:Union{UpperTriangular,UnitUpperTriangular}}, ::StructuredMatrixStyle{<:Union{LowerTriangular,UnitLowerTriangular}}) = - StructuredMatrixStyle{Matrix}() - -# Make sure that `StructuredMatrixStyle{Matrix}` doesn't ever end up falling -# through and give back `DefaultArrayStyle{2}` -Broadcast.BroadcastStyle(T::StructuredMatrixStyle{Matrix}, ::StructuredMatrixStyle) = T -Broadcast.BroadcastStyle(::StructuredMatrixStyle, T::StructuredMatrixStyle{Matrix}) = T -Broadcast.BroadcastStyle(T::StructuredMatrixStyle{Matrix}, ::StructuredMatrixStyle{Matrix}) = T - -# All other combinations fall back to the default style -Broadcast.BroadcastStyle(::StructuredMatrixStyle, ::StructuredMatrixStyle) = DefaultArrayStyle{2}() - -# And a definition akin to similar using the structured type: -structured_broadcast_alloc(bc, ::Type{Diagonal}, ::Type{ElType}, n) where {ElType} = - Diagonal(Array{ElType}(undef, n)) -# Bidiagonal is tricky as we need to know if it's upper or lower. The promotion -# system will return Tridiagonal when there's more than one Bidiagonal, but when -# there's only one, we need to make figure out upper or lower -merge_uplos(::Nothing, ::Nothing) = nothing -merge_uplos(a, ::Nothing) = a -merge_uplos(::Nothing, b) = b -merge_uplos(a, b) = a == b ? a : 'T' - -find_uplo(a::Bidiagonal) = a.uplo -find_uplo(a) = nothing -find_uplo(bc::Broadcasted) = mapfoldl(find_uplo, merge_uplos, Broadcast.cat_nested(bc), init=nothing) - -function structured_broadcast_alloc(bc, ::Type{Bidiagonal}, ::Type{ElType}, n) where {ElType} - uplo = n > 0 ? find_uplo(bc) : 'U' - n1 = max(n - 1, 0) - if count_structedmatrix(Bidiagonal, bc) > 1 && uplo == 'T' - return Tridiagonal(Array{ElType}(undef, n1), Array{ElType}(undef, n), Array{ElType}(undef, n1)) - end - return Bidiagonal(Array{ElType}(undef, n),Array{ElType}(undef, n1), uplo) -end -structured_broadcast_alloc(bc, ::Type{SymTridiagonal}, ::Type{ElType}, n) where {ElType} = - SymTridiagonal(Array{ElType}(undef, n),Array{ElType}(undef, n-1)) -structured_broadcast_alloc(bc, ::Type{Tridiagonal}, ::Type{ElType}, n) where {ElType} = - Tridiagonal(Array{ElType}(undef, n-1),Array{ElType}(undef, n),Array{ElType}(undef, n-1)) -structured_broadcast_alloc(bc, ::Type{LowerTriangular}, ::Type{ElType}, n) where {ElType} = - LowerTriangular(Array{ElType}(undef, n, n)) -structured_broadcast_alloc(bc, ::Type{UpperTriangular}, ::Type{ElType}, n) where {ElType} = - UpperTriangular(Array{ElType}(undef, n, n)) -structured_broadcast_alloc(bc, ::Type{UnitLowerTriangular}, ::Type{ElType}, n) where {ElType} = - UnitLowerTriangular(Array{ElType}(undef, n, n)) -structured_broadcast_alloc(bc, ::Type{UnitUpperTriangular}, ::Type{ElType}, n) where {ElType} = - UnitUpperTriangular(Array{ElType}(undef, n, n)) -structured_broadcast_alloc(bc, ::Type{Matrix}, ::Type{ElType}, n) where {ElType} = - Array{ElType}(undef, n, n) - -# A _very_ limited list of structure-preserving functions known at compile-time. This list is -# derived from the formerly-implemented `broadcast` methods in 0.6. Note that this must -# preserve both zeros and ones (for Unit***erTriangular) and symmetry (for SymTridiagonal) -const TypeFuncs = Union{typeof(round),typeof(trunc),typeof(floor),typeof(ceil)} -isstructurepreserving(bc::Broadcasted) = isstructurepreserving(bc.f, bc.args...) -isstructurepreserving(::Union{typeof(abs),typeof(big)}, ::StructuredMatrix) = true -isstructurepreserving(::TypeFuncs, ::StructuredMatrix) = true -isstructurepreserving(::TypeFuncs, ::Ref{<:Type}, ::StructuredMatrix) = true -function isstructurepreserving(::typeof(Base.literal_pow), ::Ref{typeof(^)}, ::StructuredMatrix, ::Ref{Val{N}}) where N - return N isa Integer && N > 0 -end -isstructurepreserving(f, args...) = false - -""" - iszerodefined(T::Type) - -Return a `Bool` indicating whether `iszero` is well-defined for objects of type -`T`. By default, this function returns `false` unless `T <: Number`. Note that -this function may return `true` even if `zero(::T)` is not defined as long as -`iszero(::T)` has a method that does not requires `zero(::T)`. - -This function is used to determine if mapping the elements of an array with -a specific structure of nonzero elements preserve this structure. -For instance, it is used to determine whether the output of -`tuple.(Diagonal([1, 2]))` is `Diagonal([(1,), (2,)])` or -`[(1,) (0,); (0,) (2,)]`. For this, we need to determine whether `(0,)` is -considered to be zero. `iszero((0,))` falls back to `(0,) == zero((0,))` which -fails as `zero(::Tuple{Int})` is not defined. However, -`iszerodefined(::Tuple{Int})` is `false` hence we falls back to the comparison -`(0,) == 0` which returns `false` and decides that the correct output is -`[(1,) (0,); (0,) (2,)]`. -""" -iszerodefined(::Type) = false -iszerodefined(::Type{<:Number}) = true -iszerodefined(::Type{<:AbstractArray{T}}) where T = iszerodefined(T) -iszerodefined(::Type{<:UniformScaling{T}}) where T = iszerodefined(T) - -count_structedmatrix(T, bc::Broadcasted) = sum(Base.Fix2(isa, T), Broadcast.cat_nested(bc); init = 0) - -""" - fzeropreserving(bc) -> Bool - -Return true if the broadcasted function call evaluates to zero for structural zeros of the -structured arguments. - -For trivial broadcasted values such as `bc::Number`, this reduces to `iszero(bc)`. -""" -function fzeropreserving(bc) - v = fzero(bc) - isnothing(v) && return false - v2 = something(v) - iszerodefined(typeof(v2)) ? iszero(v2) : isequal(v2, 0) -end - -# Like sparse matrices, we assume that the zero-preservation property of a broadcasted -# expression is stable. We can test the zero-preservability by applying the function -# in cases where all other arguments are known scalars against a zero from the structured -# matrix. If any non-structured matrix argument is not a known scalar, we give up. -fzero(x::Number) = Some(x) -fzero(::Type{T}) where T = Some(T) -fzero(r::Ref) = Some(r[]) -fzero(t::Tuple{Any}) = Some(only(t)) -fzero(S::StructuredMatrix) = Some(zero(eltype(S))) -fzero(::StructuredMatrix{<:AbstractMatrix{T}}) where {T<:Number} = Some(haszero(T) ? zero(T)*I : nothing) -fzero(x) = nothing -function fzero(bc::Broadcast.Broadcasted) - args = map(fzero, bc.args) - return any(isnothing, args) ? nothing : Some(bc.f(map(something, args)...)) -end - -function Base.similar(bc::Broadcasted{StructuredMatrixStyle{T}}, ::Type{ElType}) where {T,ElType} - inds = axes(bc) - fzerobc = fzeropreserving(bc) - if isstructurepreserving(bc) || (fzerobc && !(T <: Union{UnitLowerTriangular,UnitUpperTriangular})) - return structured_broadcast_alloc(bc, T, ElType, length(inds[1])) - elseif fzerobc && T <: UnitLowerTriangular - return similar(convert(Broadcasted{StructuredMatrixStyle{LowerTriangular}}, bc), ElType) - elseif fzerobc && T <: UnitUpperTriangular - return similar(convert(Broadcasted{StructuredMatrixStyle{UpperTriangular}}, bc), ElType) - end - return similar(convert(Broadcasted{DefaultArrayStyle{ndims(bc)}}, bc), ElType) -end - -isvalidstructbc(dest, bc::Broadcasted{T}) where {T<:StructuredMatrixStyle} = - Broadcast.combine_styles(dest, bc) === Broadcast.combine_styles(dest) && - (isstructurepreserving(bc) || fzeropreserving(bc)) - -isvalidstructbc(dest::Bidiagonal, bc::Broadcasted{StructuredMatrixStyle{Bidiagonal}}) = - (size(dest, 1) < 2 || find_uplo(bc) == dest.uplo) && - (isstructurepreserving(bc) || fzeropreserving(bc)) - -@inline function getindex(bc::Broadcasted, b::BandIndex) - @boundscheck checkbounds(bc, b) - @inbounds Broadcast._broadcast_getindex(bc, b) -end - -function Broadcast.newindex(A::StructuredMatrix, b::BandIndex) - # we use the fact that a StructuredMatrix is square, - # and we apply newindex to both the axes at once to obtain the result - size(A,1) > 1 ? b : BandIndex(0, 1) -end -# All structured matrices are square, and therefore they only broadcast out if they are size (1, 1) -Broadcast.newindex(D::StructuredMatrix, I::CartesianIndex{2}) = size(D) == (1,1) ? CartesianIndex(1,1) : I - -function copyto!(dest::Diagonal, bc::Broadcasted{<:StructuredMatrixStyle}) - isvalidstructbc(dest, bc) || return copyto!(dest, convert(Broadcasted{Nothing}, bc)) - axs = axes(dest) - axes(bc) == axs || Broadcast.throwdm(axes(bc), axs) - for i in axs[1] - dest.diag[i] = @inbounds bc[BandIndex(0, i)] - end - return dest -end - -function copyto!(dest::Bidiagonal, bc::Broadcasted{<:StructuredMatrixStyle}) - isvalidstructbc(dest, bc) || return copyto!(dest, convert(Broadcasted{Nothing}, bc)) - axs = axes(dest) - axes(bc) == axs || Broadcast.throwdm(axes(bc), axs) - for i in axs[1] - dest.dv[i] = @inbounds bc[BandIndex(0, i)] - end - if dest.uplo == 'U' - for i = 1:size(dest, 1)-1 - dest.ev[i] = @inbounds bc[BandIndex(1, i)] - end - else - for i = 1:size(dest, 1)-1 - dest.ev[i] = @inbounds bc[BandIndex(-1, i)] - end - end - return dest -end - -function copyto!(dest::SymTridiagonal, bc::Broadcasted{<:StructuredMatrixStyle}) - isvalidstructbc(dest, bc) || return copyto!(dest, convert(Broadcasted{Nothing}, bc)) - axs = axes(dest) - axes(bc) == axs || Broadcast.throwdm(axes(bc), axs) - for i in axs[1] - dest.dv[i] = @inbounds bc[BandIndex(0, i)] - end - for i = 1:size(dest, 1)-1 - v = @inbounds bc[BandIndex(1, i)] - v == transpose(@inbounds bc[BandIndex(-1, i)]) || - throw(ArgumentError(lazy"broadcasted assignment breaks symmetry between locations ($i, $(i+1)) and ($(i+1), $i)")) - dest.ev[i] = v - end - return dest -end - -function copyto!(dest::Tridiagonal, bc::Broadcasted{<:StructuredMatrixStyle}) - isvalidstructbc(dest, bc) || return copyto!(dest, convert(Broadcasted{Nothing}, bc)) - axs = axes(dest) - axes(bc) == axs || Broadcast.throwdm(axes(bc), axs) - for i in axs[1] - dest.d[i] = @inbounds bc[BandIndex(0, i)] - end - for i = 1:size(dest, 1)-1 - dest.du[i] = @inbounds bc[BandIndex(1, i)] - end - for i = 1:size(dest, 1)-1 - dest.dl[i] = @inbounds bc[BandIndex(-1, i)] - end - return dest -end - -function copyto!(dest::LowerTriangular, bc::Broadcasted{<:StructuredMatrixStyle}) - isvalidstructbc(dest, bc) || return copyto!(dest, convert(Broadcasted{Nothing}, bc)) - axs = axes(dest) - axes(bc) == axs || Broadcast.throwdm(axes(bc), axs) - for j in axs[2] - for i in j:axs[1][end] - @inbounds dest.data[i,j] = bc[CartesianIndex(i, j)] - end - end - return dest -end - -function copyto!(dest::UpperTriangular, bc::Broadcasted{<:StructuredMatrixStyle}) - isvalidstructbc(dest, bc) || return copyto!(dest, convert(Broadcasted{Nothing}, bc)) - axs = axes(dest) - axes(bc) == axs || Broadcast.throwdm(axes(bc), axs) - for j in axs[2] - for i in 1:j - @inbounds dest.data[i,j] = bc[CartesianIndex(i, j)] - end - end - return dest -end - -# We can also implement `map` and its promotion in terms of broadcast with a stricter dimension check -function map(f, A::StructuredMatrix, Bs::StructuredMatrix...) - sz = size(A) - for B in Bs - size(B) == sz || Base.throw_promote_shape_mismatch(sz, size(B)) - end - return f.(A, Bs...) -end diff --git a/stdlib/LinearAlgebra/src/svd.jl b/stdlib/LinearAlgebra/src/svd.jl deleted file mode 100644 index 7a88c4a6e14c4..0000000000000 --- a/stdlib/LinearAlgebra/src/svd.jl +++ /dev/null @@ -1,578 +0,0 @@ -# This file is a part of Julia. License is MIT: https://julialang.org/license - -# Singular Value Decomposition -""" - SVD <: Factorization - -Matrix factorization type of the singular value decomposition (SVD) of a matrix `A`. -This is the return type of [`svd(_)`](@ref), the corresponding matrix factorization function. - -If `F::SVD` is the factorization object, `U`, `S`, `V` and `Vt` can be obtained -via `F.U`, `F.S`, `F.V` and `F.Vt`, such that `A = U * Diagonal(S) * Vt`. -The singular values in `S` are sorted in descending order. - -Iterating the decomposition produces the components `U`, `S`, and `V`. - -# Examples -```jldoctest -julia> A = [1. 0. 0. 0. 2.; 0. 0. 3. 0. 0.; 0. 0. 0. 0. 0.; 0. 2. 0. 0. 0.] -4×5 Matrix{Float64}: - 1.0 0.0 0.0 0.0 2.0 - 0.0 0.0 3.0 0.0 0.0 - 0.0 0.0 0.0 0.0 0.0 - 0.0 2.0 0.0 0.0 0.0 - -julia> F = svd(A) -SVD{Float64, Float64, Matrix{Float64}, Vector{Float64}} -U factor: -4×4 Matrix{Float64}: - 0.0 1.0 0.0 0.0 - 1.0 0.0 0.0 0.0 - 0.0 0.0 0.0 1.0 - 0.0 0.0 -1.0 0.0 -singular values: -4-element Vector{Float64}: - 3.0 - 2.23606797749979 - 2.0 - 0.0 -Vt factor: -4×5 Matrix{Float64}: - -0.0 0.0 1.0 -0.0 0.0 - 0.447214 0.0 0.0 0.0 0.894427 - 0.0 -1.0 0.0 0.0 0.0 - 0.0 0.0 0.0 1.0 0.0 - -julia> F.U * Diagonal(F.S) * F.Vt -4×5 Matrix{Float64}: - 1.0 0.0 0.0 0.0 2.0 - 0.0 0.0 3.0 0.0 0.0 - 0.0 0.0 0.0 0.0 0.0 - 0.0 2.0 0.0 0.0 0.0 - -julia> u, s, v = F; # destructuring via iteration - -julia> u == F.U && s == F.S && v == F.V -true -``` -""" -struct SVD{T,Tr,M<:AbstractArray{T},C<:AbstractVector{Tr}} <: Factorization{T} - U::M - S::C - Vt::M - function SVD{T,Tr,M,C}(U, S, Vt) where {T,Tr,M<:AbstractArray{T},C<:AbstractVector{Tr}} - require_one_based_indexing(U, S, Vt) - new{T,Tr,M,C}(U, S, Vt) - end -end -SVD(U::AbstractArray{T}, S::AbstractVector{Tr}, Vt::AbstractArray{T}) where {T,Tr} = - SVD{T,Tr,typeof(U),typeof(S)}(U, S, Vt) -SVD{T}(U::AbstractArray, S::AbstractVector{Tr}, Vt::AbstractArray) where {T,Tr} = - SVD(convert(AbstractArray{T}, U), - convert(AbstractVector{Tr}, S), - convert(AbstractArray{T}, Vt)) -# backwards-compatible constructors (remove with Julia 2.0) -@deprecate(SVD{T,Tr,M}(U::AbstractArray{T}, S::AbstractVector{Tr}, Vt::AbstractArray{T}) where {T,Tr,M}, - SVD{T,Tr,M,typeof(S)}(U, S, Vt)) - -SVD{T}(F::SVD) where {T} = SVD( - convert(AbstractMatrix{T}, F.U), - convert(AbstractVector{real(T)}, F.S), - convert(AbstractMatrix{T}, F.Vt)) -Factorization{T}(F::SVD) where {T} = SVD{T}(F) - -# iteration for destructuring into components -Base.iterate(S::SVD) = (S.U, Val(:S)) -Base.iterate(S::SVD, ::Val{:S}) = (S.S, Val(:V)) -Base.iterate(S::SVD, ::Val{:V}) = (S.V, Val(:done)) -Base.iterate(S::SVD, ::Val{:done}) = nothing - - -default_svd_alg(A) = DivideAndConquer() - - -""" - svd!(A; full::Bool = false, alg::Algorithm = default_svd_alg(A)) -> SVD - -`svd!` is the same as [`svd`](@ref), but saves space by -overwriting the input `A`, instead of creating a copy. See documentation of [`svd`](@ref) for details. -""" -function svd!(A::StridedMatrix{T}; full::Bool = false, alg::Algorithm = default_svd_alg(A)) where {T<:BlasFloat} - m, n = size(A) - if m == 0 || n == 0 - u, s, vt = (Matrix{T}(I, m, full ? m : n), real(zeros(T,0)), Matrix{T}(I, n, n)) - else - u, s, vt = _svd!(A, full, alg) - end - SVD(u, s, vt) -end -function svd!(A::StridedVector{T}; full::Bool = false, alg::Algorithm = default_svd_alg(A)) where {T<:BlasFloat} - m = length(A) - normA = norm(A) - if iszero(normA) - return SVD(Matrix{T}(I, m, full ? m : 1), [normA], ones(T, 1, 1)) - elseif !full - normalize!(A) - return SVD(reshape(A, (m, 1)), [normA], ones(T, 1, 1)) - else - u, s, vt = _svd!(reshape(A, (m, 1)), full, alg) - return SVD(u, s, vt) - end -end - -_svd!(A::StridedMatrix{T}, full::Bool, alg::Algorithm) where {T<:BlasFloat} = - throw(ArgumentError("Unsupported value for `alg` keyword.")) -_svd!(A::StridedMatrix{T}, full::Bool, alg::DivideAndConquer) where {T<:BlasFloat} = - LAPACK.gesdd!(full ? 'A' : 'S', A) -function _svd!(A::StridedMatrix{T}, full::Bool, alg::QRIteration) where {T<:BlasFloat} - c = full ? 'A' : 'S' - u, s, vt = LAPACK.gesvd!(c, c, A) -end - - - -""" - svd(A; full::Bool = false, alg::Algorithm = default_svd_alg(A)) -> SVD - -Compute the singular value decomposition (SVD) of `A` and return an `SVD` object. - -`U`, `S`, `V` and `Vt` can be obtained from the factorization `F` with `F.U`, -`F.S`, `F.V` and `F.Vt`, such that `A = U * Diagonal(S) * Vt`. -The algorithm produces `Vt` and hence `Vt` is more efficient to extract than `V`. -The singular values in `S` are sorted in descending order. - -Iterating the decomposition produces the components `U`, `S`, and `V`. - -If `full = false` (default), a "thin" SVD is returned. For an ``M -\\times N`` matrix `A`, in the full factorization `U` is ``M \\times M`` -and `V` is ``N \\times N``, while in the thin factorization `U` is ``M -\\times K`` and `V` is ``N \\times K``, where ``K = \\min(M,N)`` is the -number of singular values. - -`alg` specifies which algorithm and LAPACK method to use for SVD: -- `alg = DivideAndConquer()` (default): Calls `LAPACK.gesdd!`. -- `alg = QRIteration()`: Calls `LAPACK.gesvd!` (typically slower but more accurate) . - -!!! compat "Julia 1.3" - The `alg` keyword argument requires Julia 1.3 or later. - -# Examples -```jldoctest -julia> A = rand(4,3); - -julia> F = svd(A); # Store the Factorization Object - -julia> A ≈ F.U * Diagonal(F.S) * F.Vt -true - -julia> U, S, V = F; # destructuring via iteration - -julia> A ≈ U * Diagonal(S) * V' -true - -julia> Uonly, = svd(A); # Store U only - -julia> Uonly == U -true -``` -""" -function svd(A::AbstractVecOrMat{T}; full::Bool = false, alg::Algorithm = default_svd_alg(A)) where {T} - svd!(eigencopy_oftype(A, eigtype(T)), full = full, alg = alg) -end -function svd(A::AbstractVecOrMat{T}; full::Bool = false, alg::Algorithm = default_svd_alg(A)) where {T <: Union{Float16,Complex{Float16}}} - A = svd!(eigencopy_oftype(A, eigtype(T)), full = full, alg = alg) - return SVD{T}(A) -end -function svd(x::Number; full::Bool = false, alg::Algorithm = default_svd_alg(x)) - SVD(x == 0 ? fill(one(x), 1, 1) : fill(x/abs(x), 1, 1), [abs(x)], fill(one(x), 1, 1)) -end -function svd(x::Integer; full::Bool = false, alg::Algorithm = default_svd_alg(x)) - svd(float(x), full = full, alg = alg) -end -function svd(A::Adjoint; full::Bool = false, alg::Algorithm = default_svd_alg(A)) - s = svd(A.parent, full = full, alg = alg) - return SVD(s.Vt', s.S, s.U') -end -function svd(A::Transpose; full::Bool = false, alg::Algorithm = default_svd_alg(A)) - s = svd(A.parent, full = full, alg = alg) - return SVD(transpose(s.Vt), s.S, transpose(s.U)) -end - -function getproperty(F::SVD, d::Symbol) - if d === :V - return getfield(F, :Vt)' - else - return getfield(F, d) - end -end - -Base.propertynames(F::SVD, private::Bool=false) = - private ? (:V, fieldnames(typeof(F))...) : (:U, :S, :V, :Vt) - -""" - svdvals!(A) - -Return the singular values of `A`, saving space by overwriting the input. -See also [`svdvals`](@ref) and [`svd`](@ref). -""" -svdvals!(A::StridedMatrix{T}) where {T<:BlasFloat} = isempty(A) ? zeros(real(T), 0) : LAPACK.gesdd!('N', A)[2] -svdvals!(A::StridedVector{T}) where {T<:BlasFloat} = svdvals!(reshape(A, (length(A), 1))) - -""" - svdvals(A) - -Return the singular values of `A` in descending order. - -# Examples -```jldoctest -julia> A = [1. 0. 0. 0. 2.; 0. 0. 3. 0. 0.; 0. 0. 0. 0. 0.; 0. 2. 0. 0. 0.] -4×5 Matrix{Float64}: - 1.0 0.0 0.0 0.0 2.0 - 0.0 0.0 3.0 0.0 0.0 - 0.0 0.0 0.0 0.0 0.0 - 0.0 2.0 0.0 0.0 0.0 - -julia> svdvals(A) -4-element Vector{Float64}: - 3.0 - 2.23606797749979 - 2.0 - 0.0 -``` -""" -svdvals(A::AbstractMatrix{T}) where {T} = svdvals!(eigencopy_oftype(A, eigtype(T))) -svdvals(A::AbstractVector{T}) where {T} = [convert(eigtype(T), norm(A))] -svdvals(x::Number) = abs(x) -svdvals(S::SVD{<:Any,T}) where {T} = (S.S)::Vector{T} - -### SVD least squares ### -function ldiv!(A::SVD{T}, B::AbstractVecOrMat) where T - m, n = size(A) - k = searchsortedlast(A.S, eps(real(T))*A.S[1], rev=true) - mul!(view(B, 1:n, :), view(A.Vt, 1:k, :)', view(A.S, 1:k) .\ (view(A.U, :, 1:k)' * _cut_B(B, 1:m))) - return B -end - -function inv(F::SVD{T}) where T - @inbounds for i in eachindex(F.S) - iszero(F.S[i]) && throw(SingularException(i)) - end - k = searchsortedlast(F.S, eps(real(T))*F.S[1], rev=true) - @views (F.S[1:k] .\ F.Vt[1:k, :])' * F.U[:,1:k]' -end - -size(A::SVD, dim::Integer) = dim == 1 ? size(A.U, dim) : size(A.Vt, dim) -size(A::SVD) = (size(A, 1), size(A, 2)) - -function adjoint(F::SVD) - return SVD(F.Vt', F.S, F.U') -end - -function show(io::IO, mime::MIME{Symbol("text/plain")}, F::SVD{<:Any,<:Any,<:AbstractArray,<:AbstractVector}) - summary(io, F); println(io) - println(io, "U factor:") - show(io, mime, F.U) - println(io, "\nsingular values:") - show(io, mime, F.S) - println(io, "\nVt factor:") - show(io, mime, F.Vt) -end - -# Generalized svd -""" - GeneralizedSVD <: Factorization - -Matrix factorization type of the generalized singular value decomposition (SVD) -of two matrices `A` and `B`, such that `A = F.U*F.D1*F.R0*F.Q'` and -`B = F.V*F.D2*F.R0*F.Q'`. This is the return type of [`svd(_, _)`](@ref), the -corresponding matrix factorization function. - -For an M-by-N matrix `A` and P-by-N matrix `B`, - -- `U` is a M-by-M orthogonal matrix, -- `V` is a P-by-P orthogonal matrix, -- `Q` is a N-by-N orthogonal matrix, -- `D1` is a M-by-(K+L) diagonal matrix with 1s in the first K entries, -- `D2` is a P-by-(K+L) matrix whose top right L-by-L block is diagonal, -- `R0` is a (K+L)-by-N matrix whose rightmost (K+L)-by-(K+L) block is - nonsingular upper block triangular, - -`K+L` is the effective numerical rank of the matrix `[A; B]`. - -Iterating the decomposition produces the components `U`, `V`, `Q`, `D1`, `D2`, and `R0`. - -The entries of `F.D1` and `F.D2` are related, as explained in the LAPACK -documentation for the -[generalized SVD](https://www.netlib.org/lapack/lug/node36.html) and the -[xGGSVD3](https://www.netlib.org/lapack/explore-html/d6/db3/dggsvd3_8f.html) -routine which is called underneath (in LAPACK 3.6.0 and newer). - -# Examples -```jldoctest -julia> A = [1. 0.; 0. -1.] -2×2 Matrix{Float64}: - 1.0 0.0 - 0.0 -1.0 - -julia> B = [0. 1.; 1. 0.] -2×2 Matrix{Float64}: - 0.0 1.0 - 1.0 0.0 - -julia> F = svd(A, B) -GeneralizedSVD{Float64, Matrix{Float64}, Float64, Vector{Float64}} -U factor: -2×2 Matrix{Float64}: - 1.0 0.0 - 0.0 1.0 -V factor: -2×2 Matrix{Float64}: - -0.0 -1.0 - 1.0 0.0 -Q factor: -2×2 Matrix{Float64}: - 1.0 0.0 - 0.0 1.0 -D1 factor: -2×2 Matrix{Float64}: - 0.707107 0.0 - 0.0 0.707107 -D2 factor: -2×2 Matrix{Float64}: - 0.707107 0.0 - 0.0 0.707107 -R0 factor: -2×2 Matrix{Float64}: - 1.41421 0.0 - 0.0 -1.41421 - -julia> F.U*F.D1*F.R0*F.Q' -2×2 Matrix{Float64}: - 1.0 0.0 - 0.0 -1.0 - -julia> F.V*F.D2*F.R0*F.Q' -2×2 Matrix{Float64}: - -0.0 1.0 - 1.0 0.0 -``` -""" -struct GeneralizedSVD{T,S<:AbstractMatrix,Tr,C<:AbstractVector{Tr}} <: Factorization{T} - U::S - V::S - Q::S - a::C - b::C - k::Int - l::Int - R::S - function GeneralizedSVD{T,S,Tr,C}(U, V, Q, a, b, k, l, R) where {T,S<:AbstractMatrix{T},Tr,C<:AbstractVector{Tr}} - new{T,S,Tr,C}(U, V, Q, a, b, k, l, R) - end -end -GeneralizedSVD(U::AbstractMatrix{T}, V::AbstractMatrix{T}, Q::AbstractMatrix{T}, - a::AbstractVector{Tr}, b::AbstractVector{Tr}, k::Int, l::Int, - R::AbstractMatrix{T}) where {T, Tr} = - GeneralizedSVD{T,typeof(U),Tr,typeof(a)}(U, V, Q, a, b, k, l, R) -# backwards-compatible constructors (remove with Julia 2.0) -@deprecate(GeneralizedSVD{T,S}(U, V, Q, a, b, k, l, R) where {T, S}, - GeneralizedSVD{T,S,real(T),typeof(a)}(U, V, Q, a, b, k, l, R)) - -# iteration for destructuring into components -Base.iterate(S::GeneralizedSVD) = (S.U, Val(:V)) -Base.iterate(S::GeneralizedSVD, ::Val{:V}) = (S.V, Val(:Q)) -Base.iterate(S::GeneralizedSVD, ::Val{:Q}) = (S.Q, Val(:D1)) -Base.iterate(S::GeneralizedSVD, ::Val{:D1}) = (S.D1, Val(:D2)) -Base.iterate(S::GeneralizedSVD, ::Val{:D2}) = (S.D2, Val(:R0)) -Base.iterate(S::GeneralizedSVD, ::Val{:R0}) = (S.R0, Val(:done)) -Base.iterate(S::GeneralizedSVD, ::Val{:done}) = nothing - -""" - svd!(A, B) -> GeneralizedSVD - -`svd!` is the same as [`svd`](@ref), but modifies the arguments -`A` and `B` in-place, instead of making copies. See documentation of [`svd`](@ref) for details. -""" -function svd!(A::StridedMatrix{T}, B::StridedMatrix{T}) where T<:BlasFloat - # xggsvd3 replaced xggsvd in LAPACK 3.6.0 - if LAPACK.version() < v"3.6.0" - U, V, Q, a, b, k, l, R = LAPACK.ggsvd!('U', 'V', 'Q', A, B) - else - U, V, Q, a, b, k, l, R = LAPACK.ggsvd3!('U', 'V', 'Q', A, B) - end - GeneralizedSVD(U, V, Q, a, b, Int(k), Int(l), R) -end -svd(A::AbstractMatrix{T}, B::AbstractMatrix{T}) where {T<:BlasFloat} = - svd!(copy_similar(A, T), copy_similar(B, T)) - -""" - - svd(A, B) -> GeneralizedSVD - -Compute the generalized SVD of `A` and `B`, returning a `GeneralizedSVD` factorization -object `F` such that `[A;B] = [F.U * F.D1; F.V * F.D2] * F.R0 * F.Q'` - -- `U` is a M-by-M orthogonal matrix, -- `V` is a P-by-P orthogonal matrix, -- `Q` is a N-by-N orthogonal matrix, -- `D1` is a M-by-(K+L) diagonal matrix with 1s in the first K entries, -- `D2` is a P-by-(K+L) matrix whose top right L-by-L block is diagonal, -- `R0` is a (K+L)-by-N matrix whose rightmost (K+L)-by-(K+L) block is - nonsingular upper block triangular, - -`K+L` is the effective numerical rank of the matrix `[A; B]`. - -Iterating the decomposition produces the components `U`, `V`, `Q`, `D1`, `D2`, and `R0`. - -The generalized SVD is used in applications such as when one wants to compare how much belongs -to `A` vs. how much belongs to `B`, as in human vs yeast genome, or signal vs noise, or between -clusters vs within clusters. (See Edelman and Wang for discussion: https://arxiv.org/abs/1901.00485) - -It decomposes `[A; B]` into `[UC; VS]H`, where `[UC; VS]` is a natural orthogonal basis for the -column space of `[A; B]`, and `H = RQ'` is a natural non-orthogonal basis for the rowspace of `[A;B]`, -where the top rows are most closely attributed to the `A` matrix, and the bottom to the `B` matrix. -The multi-cosine/sine matrices `C` and `S` provide a multi-measure of how much `A` vs how much `B`, -and `U` and `V` provide directions in which these are measured. - -# Examples -```jldoctest -julia> A = randn(3,2); B=randn(4,2); - -julia> F = svd(A, B); - -julia> U,V,Q,C,S,R = F; - -julia> H = R*Q'; - -julia> [A; B] ≈ [U*C; V*S]*H -true - -julia> [A; B] ≈ [F.U*F.D1; F.V*F.D2]*F.R0*F.Q' -true - -julia> Uonly, = svd(A,B); - -julia> U == Uonly -true -``` -""" -function svd(A::AbstractMatrix{TA}, B::AbstractMatrix{TB}) where {TA,TB} - S = promote_type(eigtype(TA),TB) - return svd!(copy_similar(A, S), copy_similar(B, S)) -end -# This method can be heavily optimized but it is probably not critical -# and might introduce bugs or inconsistencies relative to the 1x1 matrix -# version -svd(x::Number, y::Number) = svd(fill(x, 1, 1), fill(y, 1, 1)) - -@inline function getproperty(F::GeneralizedSVD{T}, d::Symbol) where T - Fa = getfield(F, :a) - Fb = getfield(F, :b) - Fk = getfield(F, :k) - Fl = getfield(F, :l) - FU = getfield(F, :U) - FV = getfield(F, :V) - FQ = getfield(F, :Q) - FR = getfield(F, :R) - if d === :alpha - return Fa - elseif d === :beta - return Fb - elseif d === :vals || d === :S - return Fa[1:Fk + Fl] ./ Fb[1:Fk + Fl] - elseif d === :D1 - m = size(FU, 1) - if m - Fk - Fl >= 0 - return [Matrix{T}(I, Fk, Fk) zeros(T, Fk, Fl) ; - zeros(T, Fl, Fk) Diagonal(Fa[Fk + 1:Fk + Fl]); - zeros(T, m - Fk - Fl, Fk + Fl) ] - else - return [Matrix{T}(I, m, Fk) [zeros(T, Fk, m - Fk); Diagonal(Fa[Fk + 1:m])] zeros(T, m, Fk + Fl - m)] - end - elseif d === :D2 - m = size(FU, 1) - p = size(FV, 1) - if m - Fk - Fl >= 0 - return [zeros(T, Fl, Fk) Diagonal(Fb[Fk + 1:Fk + Fl]); zeros(T, p - Fl, Fk + Fl)] - else - return [zeros(T, p, Fk) [Diagonal(Fb[Fk + 1:m]); zeros(T, Fk + p - m, m - Fk)] [zeros(T, m - Fk, Fk + Fl - m); Matrix{T}(I, Fk + p - m, Fk + Fl - m)]] - end - elseif d === :R0 - n = size(FQ, 1) - return [zeros(T, Fk + Fl, n - Fk - Fl) FR] - else - getfield(F, d) - end -end - -Base.propertynames(F::GeneralizedSVD) = - (:alpha, :beta, :vals, :S, :D1, :D2, :R0, fieldnames(typeof(F))...) - -function show(io::IO, mime::MIME{Symbol("text/plain")}, F::GeneralizedSVD{<:Any,<:AbstractArray}) - summary(io, F); println(io) - println(io, "U factor:") - show(io, mime, F.U) - println(io, "\nV factor:") - show(io, mime, F.V) - println(io, "\nQ factor:") - show(io, mime, F.Q) - println(io, "\nD1 factor:") - show(io, mime, F.D1) - println(io, "\nD2 factor:") - show(io, mime, F.D2) - println(io, "\nR0 factor:") - show(io, mime, F.R0) -end - -""" - svdvals!(A, B) - -Return the generalized singular values from the generalized singular value -decomposition of `A` and `B`, saving space by overwriting `A` and `B`. -See also [`svd`](@ref) and [`svdvals`](@ref). -""" -function svdvals!(A::StridedMatrix{T}, B::StridedMatrix{T}) where T<:BlasFloat - # xggsvd3 replaced xggsvd in LAPACK 3.6.0 - if LAPACK.version() < v"3.6.0" - _, _, _, a, b, k, l, _ = LAPACK.ggsvd!('N', 'N', 'N', A, B) - else - _, _, _, a, b, k, l, _ = LAPACK.ggsvd3!('N', 'N', 'N', A, B) - end - a[1:k + l] ./ b[1:k + l] -end - -""" - svdvals(A, B) - -Return the generalized singular values from the generalized singular value -decomposition of `A` and `B`. See also [`svd`](@ref). - -# Examples -```jldoctest -julia> A = [1. 0.; 0. -1.] -2×2 Matrix{Float64}: - 1.0 0.0 - 0.0 -1.0 - -julia> B = [0. 1.; 1. 0.] -2×2 Matrix{Float64}: - 0.0 1.0 - 1.0 0.0 - -julia> svdvals(A, B) -2-element Vector{Float64}: - 1.0 - 1.0 -``` -""" -function svdvals(A::AbstractMatrix{TA}, B::AbstractMatrix{TB}) where {TA,TB} - S = promote_type(eigtype(TA), TB) - return svdvals!(copy_similar(A, S), copy_similar(B, S)) -end -svdvals(x::Number, y::Number) = abs(x/y) - -# Conversion -AbstractMatrix(F::SVD) = (F.U * Diagonal(F.S)) * F.Vt -AbstractArray(F::SVD) = AbstractMatrix(F) -Matrix(F::SVD) = Array(AbstractArray(F)) -Array(F::SVD) = Matrix(F) diff --git a/stdlib/LinearAlgebra/src/symmetric.jl b/stdlib/LinearAlgebra/src/symmetric.jl deleted file mode 100644 index b059f31737b55..0000000000000 --- a/stdlib/LinearAlgebra/src/symmetric.jl +++ /dev/null @@ -1,1064 +0,0 @@ -# This file is a part of Julia. License is MIT: https://julialang.org/license - -# Symmetric and Hermitian matrices -struct Symmetric{T,S<:AbstractMatrix{<:T}} <: AbstractMatrix{T} - data::S - uplo::Char - - function Symmetric{T,S}(data, uplo::Char) where {T,S<:AbstractMatrix{<:T}} - require_one_based_indexing(data) - (uplo != 'U' && uplo != 'L') && throw_uplo() - new{T,S}(data, uplo) - end -end -""" - Symmetric(A::AbstractMatrix, uplo::Symbol=:U) - -Construct a `Symmetric` view of the upper (if `uplo = :U`) or lower (if `uplo = :L`) -triangle of the matrix `A`. - -`Symmetric` views are mainly useful for real-symmetric matrices, for which -specialized algorithms (e.g. for eigenproblems) are enabled for `Symmetric` types. -More generally, see also [`Hermitian(A)`](@ref) for Hermitian matrices `A == A'`, which -is effectively equivalent to `Symmetric` for real matrices but is also useful for -complex matrices. (Whereas complex `Symmetric` matrices are supported but have few -if any specialized algorithms.) - -To compute the symmetric part of a real matrix, or more generally the Hermitian part `(A + A') / 2` of -a real or complex matrix `A`, use [`hermitianpart`](@ref). - -# Examples -```jldoctest -julia> A = [1 2 3; 4 5 6; 7 8 9] -3×3 Matrix{Int64}: - 1 2 3 - 4 5 6 - 7 8 9 - -julia> Supper = Symmetric(A) -3×3 Symmetric{Int64, Matrix{Int64}}: - 1 2 3 - 2 5 6 - 3 6 9 - -julia> Slower = Symmetric(A, :L) -3×3 Symmetric{Int64, Matrix{Int64}}: - 1 4 7 - 4 5 8 - 7 8 9 - -julia> hermitianpart(A) -3×3 Hermitian{Float64, Matrix{Float64}}: - 1.0 3.0 5.0 - 3.0 5.0 7.0 - 5.0 7.0 9.0 -``` - -Note that `Supper` will not be equal to `Slower` unless `A` is itself symmetric (e.g. if -`A == transpose(A)`). -""" -function Symmetric(A::AbstractMatrix, uplo::Symbol=:U) - checksquare(A) - return symmetric_type(typeof(A))(A, char_uplo(uplo)) -end - -""" - symmetric(A, uplo::Symbol=:U) - -Construct a symmetric view of `A`. If `A` is a matrix, `uplo` controls whether the upper -(if `uplo = :U`) or lower (if `uplo = :L`) triangle of `A` is used to implicitly fill the -other one. If `A` is a `Number`, it is returned as is. - -If a symmetric view of a matrix is to be constructed of which the elements are neither -matrices nor numbers, an appropriate method of `symmetric` has to be implemented. In that -case, `symmetric_type` has to be implemented, too. -""" -symmetric(A::AbstractMatrix, uplo::Symbol=:U) = Symmetric(A, uplo) -symmetric(A::Number, ::Symbol=:U) = A - -""" - symmetric_type(T::Type) - -The type of the object returned by `symmetric(::T, ::Symbol)`. For matrices, this is an -appropriately typed `Symmetric`, for `Number`s, it is the original type. If `symmetric` is -implemented for a custom type, so should be `symmetric_type`, and vice versa. -""" -function symmetric_type(::Type{T}) where {S, T<:AbstractMatrix{S}} - return Symmetric{Union{S, promote_op(transpose, S), symmetric_type(S)}, T} -end -function symmetric_type(::Type{T}) where {S<:Number, T<:AbstractMatrix{S}} - return Symmetric{S, T} -end -function symmetric_type(::Type{T}) where {S<:AbstractMatrix, T<:AbstractMatrix{S}} - return Symmetric{AbstractMatrix, T} -end -symmetric_type(::Type{T}) where {T<:Number} = T - -struct Hermitian{T,S<:AbstractMatrix{<:T}} <: AbstractMatrix{T} - data::S - uplo::Char - - function Hermitian{T,S}(data, uplo::Char) where {T,S<:AbstractMatrix{<:T}} - require_one_based_indexing(data) - (uplo != 'U' && uplo != 'L') && throw_uplo() - new{T,S}(data, uplo) - end -end -""" - Hermitian(A::AbstractMatrix, uplo::Symbol=:U) - -Construct a `Hermitian` view of the upper (if `uplo = :U`) or lower (if `uplo = :L`) -triangle of the matrix `A`. - -To compute the Hermitian part of `A`, use [`hermitianpart`](@ref). - -# Examples -```jldoctest -julia> A = [1 2+2im 3-3im; 4 5 6-6im; 7 8+8im 9] -3×3 Matrix{Complex{Int64}}: - 1+0im 2+2im 3-3im - 4+0im 5+0im 6-6im - 7+0im 8+8im 9+0im - -julia> Hupper = Hermitian(A) -3×3 Hermitian{Complex{Int64}, Matrix{Complex{Int64}}}: - 1+0im 2+2im 3-3im - 2-2im 5+0im 6-6im - 3+3im 6+6im 9+0im - -julia> Hlower = Hermitian(A, :L) -3×3 Hermitian{Complex{Int64}, Matrix{Complex{Int64}}}: - 1+0im 4+0im 7+0im - 4+0im 5+0im 8-8im - 7+0im 8+8im 9+0im - -julia> hermitianpart(A) -3×3 Hermitian{ComplexF64, Matrix{ComplexF64}}: - 1.0+0.0im 3.0+1.0im 5.0-1.5im - 3.0-1.0im 5.0+0.0im 7.0-7.0im - 5.0+1.5im 7.0+7.0im 9.0+0.0im -``` - -Note that `Hupper` will not be equal to `Hlower` unless `A` is itself Hermitian (e.g. if `A == adjoint(A)`). - -All non-real parts of the diagonal will be ignored. - -```julia -Hermitian(fill(complex(1,1), 1, 1)) == fill(1, 1, 1) -``` -""" -function Hermitian(A::AbstractMatrix, uplo::Symbol=:U) - n = checksquare(A) - return hermitian_type(typeof(A))(A, char_uplo(uplo)) -end - -""" - hermitian(A, uplo::Symbol=:U) - -Construct a hermitian view of `A`. If `A` is a matrix, `uplo` controls whether the upper -(if `uplo = :U`) or lower (if `uplo = :L`) triangle of `A` is used to implicitly fill the -other one. If `A` is a `Number`, its real part is returned converted back to the input -type. - -If a hermitian view of a matrix is to be constructed of which the elements are neither -matrices nor numbers, an appropriate method of `hermitian` has to be implemented. In that -case, `hermitian_type` has to be implemented, too. -""" -hermitian(A::AbstractMatrix, uplo::Symbol=:U) = Hermitian(A, uplo) -hermitian(A::Number, ::Symbol=:U) = convert(typeof(A), real(A)) - -""" - hermitian_type(T::Type) - -The type of the object returned by `hermitian(::T, ::Symbol)`. For matrices, this is an -appropriately typed `Hermitian`, for `Number`s, it is the original type. If `hermitian` is -implemented for a custom type, so should be `hermitian_type`, and vice versa. -""" -function hermitian_type(::Type{T}) where {S, T<:AbstractMatrix{S}} - return Hermitian{Union{S, promote_op(adjoint, S), hermitian_type(S)}, T} -end -function hermitian_type(::Type{T}) where {S<:Number, T<:AbstractMatrix{S}} - return Hermitian{S, T} -end -function hermitian_type(::Type{T}) where {S<:AbstractMatrix, T<:AbstractMatrix{S}} - return Hermitian{AbstractMatrix, T} -end -hermitian_type(::Type{T}) where {T<:Number} = T - -_unwrap(A::Hermitian) = parent(A) -_unwrap(A::Symmetric) = parent(A) - -for (S, H) in ((:Symmetric, :Hermitian), (:Hermitian, :Symmetric)) - @eval begin - $S(A::$S) = A - function $S(A::$S, uplo::Symbol) - if A.uplo == char_uplo(uplo) - return A - else - throw(ArgumentError("Cannot construct $($S); uplo doesn't match")) - end - end - $S(A::$H) = $S(A, sym_uplo(A.uplo)) - function $S(A::$H, uplo::Symbol) - if A.uplo == char_uplo(uplo) - if $H === Hermitian && !(eltype(A) <: Real) && - any(!isreal, A.data[i] for i in diagind(A.data, IndexStyle(A.data))) - - throw(ArgumentError("Cannot construct $($S)($($H))); diagonal contains complex values")) - end - return $S(A.data, sym_uplo(A.uplo)) - else - throw(ArgumentError("Cannot construct $($S); uplo doesn't match")) - end - end - end -end - -convert(::Type{T}, m::Union{Symmetric,Hermitian}) where {T<:Symmetric} = m isa T ? m : T(m)::T -convert(::Type{T}, m::Union{Symmetric,Hermitian}) where {T<:Hermitian} = m isa T ? m : T(m)::T - -const HermOrSym{T, S} = Union{Hermitian{T,S}, Symmetric{T,S}} -const RealHermSym{T<:Real,S} = Union{Hermitian{T,S}, Symmetric{T,S}} -const SymSymTri{T} = Union{Symmetric{T}, SymTridiagonal{T}} -const RealHermSymSymTri{T<:Real} = Union{RealHermSym{T}, SymTridiagonal{T}} -const RealHermSymComplexHerm{T<:Real,S} = Union{Hermitian{T,S}, Symmetric{T,S}, Hermitian{Complex{T},S}} -const RealHermSymComplexSym{T<:Real,S} = Union{Hermitian{T,S}, Symmetric{T,S}, Symmetric{Complex{T},S}} -const RealHermSymSymTriComplexHerm{T<:Real} = Union{RealHermSymComplexSym{T}, SymTridiagonal{T}} -const SelfAdjoint = Union{Symmetric{<:Real}, Hermitian{<:Number}} - -wrappertype(::Union{Symmetric, SymTridiagonal}) = Symmetric -wrappertype(::Hermitian) = Hermitian - -size(A::HermOrSym) = size(A.data) -axes(A::HermOrSym) = axes(A.data) -@inline function Base.isassigned(A::HermOrSym, i::Int, j::Int) - @boundscheck checkbounds(Bool, A, i, j) || return false - @inbounds if i == j || ((A.uplo == 'U') == (i < j)) - return isassigned(A.data, i, j) - else - return isassigned(A.data, j, i) - end -end - -@inline function getindex(A::Symmetric, i::Int, j::Int) - @boundscheck checkbounds(A, i, j) - @inbounds if i == j - return symmetric(A.data[i, j], sym_uplo(A.uplo))::symmetric_type(eltype(A.data)) - elseif (A.uplo == 'U') == (i < j) - return A.data[i, j] - else - return transpose(A.data[j, i]) - end -end -@inline function getindex(A::Hermitian, i::Int, j::Int) - @boundscheck checkbounds(A, i, j) - @inbounds if i == j - return hermitian(A.data[i, j], sym_uplo(A.uplo))::hermitian_type(eltype(A.data)) - elseif (A.uplo == 'U') == (i < j) - return A.data[i, j] - else - return adjoint(A.data[j, i]) - end -end - -Base._reverse(A::Symmetric, dims::Integer) = reverse!(Matrix(A); dims) -Base._reverse(A::Symmetric, ::Colon) = Symmetric(reverse(A.data), A.uplo == 'U' ? :L : :U) - -@propagate_inbounds function setindex!(A::Symmetric, v, i::Integer, j::Integer) - i == j || throw(ArgumentError("Cannot set a non-diagonal index in a symmetric matrix")) - setindex!(A.data, v, i, j) - return A -end - -Base._reverse(A::Hermitian, dims) = reverse!(Matrix(A); dims) -Base._reverse(A::Hermitian, ::Colon) = Hermitian(reverse(A.data), A.uplo == 'U' ? :L : :U) - -@propagate_inbounds function setindex!(A::Hermitian, v, i::Integer, j::Integer) - if i != j - throw(ArgumentError("Cannot set a non-diagonal index in a Hermitian matrix")) - elseif !isreal(v) - throw(ArgumentError("Cannot set a diagonal entry in a Hermitian matrix to a nonreal value")) - else - setindex!(A.data, v, i, j) - end - return A -end - -Base.dataids(A::HermOrSym) = Base.dataids(parent(A)) -Base.unaliascopy(A::Hermitian) = Hermitian(Base.unaliascopy(parent(A)), sym_uplo(A.uplo)) -Base.unaliascopy(A::Symmetric) = Symmetric(Base.unaliascopy(parent(A)), sym_uplo(A.uplo)) - -_conjugation(::Symmetric) = transpose -_conjugation(::Hermitian) = adjoint - -diag(A::Symmetric) = symmetric.(diag(parent(A)), sym_uplo(A.uplo)) -diag(A::Hermitian) = hermitian.(diag(parent(A)), sym_uplo(A.uplo)) - -function applytri(f, A::HermOrSym) - if A.uplo == 'U' - f(uppertriangular(A.data)) - else - f(lowertriangular(A.data)) - end -end - -function applytri(f, A::HermOrSym, B::HermOrSym) - if A.uplo == B.uplo == 'U' - f(uppertriangular(A.data), uppertriangular(B.data)) - elseif A.uplo == B.uplo == 'L' - f(lowertriangular(A.data), lowertriangular(B.data)) - elseif A.uplo == 'U' - f(uppertriangular(A.data), uppertriangular(_conjugation(B)(B.data))) - else # A.uplo == 'L' - f(uppertriangular(_conjugation(A)(A.data)), uppertriangular(B.data)) - end -end -_parent_tri(U::UpperOrLowerTriangular) = parent(U) -_parent_tri(U) = U -parentof_applytri(f, args...) = _parent_tri(applytri(f, args...)) - -isdiag(A::HermOrSym) = applytri(isdiag, A) - -# For A<:Union{Symmetric,Hermitian}, similar(A[, neweltype]) should yield a matrix with the same -# symmetry type, uplo flag, and underlying storage type as A. The following methods cover these cases. -similar(A::Symmetric, ::Type{T}) where {T} = Symmetric(similar(parent(A), T), ifelse(A.uplo == 'U', :U, :L)) -# If the Hermitian constructor's check ascertaining that the wrapped matrix's -# diagonal is strictly real is removed, the following method can be simplified. -function similar(A::Hermitian, ::Type{T}) where T - B = similar(parent(A), T) - for i in 1:size(B, 1) B[i, i] = 0 end - return Hermitian(B, ifelse(A.uplo == 'U', :U, :L)) -end -# On the other hand, similar(A, [neweltype,] shape...) should yield a matrix of the underlying -# storage type of A (not wrapped in a symmetry type). The following method covers these cases. -similar(A::Union{Symmetric,Hermitian}, ::Type{T}, dims::Dims{N}) where {T,N} = similar(parent(A), T, dims) - -parent(A::HermOrSym) = A.data -Symmetric{T,S}(A::Symmetric{T,S}) where {T,S<:AbstractMatrix{T}} = A -Symmetric{T,S}(A::Symmetric) where {T,S<:AbstractMatrix{T}} = Symmetric{T,S}(convert(S,A.data),A.uplo) -AbstractMatrix{T}(A::Symmetric) where {T} = Symmetric(convert(AbstractMatrix{T}, A.data), sym_uplo(A.uplo)) -AbstractMatrix{T}(A::Symmetric{T}) where {T} = copy(A) -Hermitian{T,S}(A::Hermitian{T,S}) where {T,S<:AbstractMatrix{T}} = A -Hermitian{T,S}(A::Hermitian) where {T,S<:AbstractMatrix{T}} = Hermitian{T,S}(convert(S,A.data),A.uplo) -AbstractMatrix{T}(A::Hermitian) where {T} = Hermitian(convert(AbstractMatrix{T}, A.data), sym_uplo(A.uplo)) -AbstractMatrix{T}(A::Hermitian{T}) where {T} = copy(A) - -copy(A::Symmetric) = (Symmetric(parentof_applytri(copy, A), sym_uplo(A.uplo))) -copy(A::Hermitian) = (Hermitian(parentof_applytri(copy, A), sym_uplo(A.uplo))) - -function copyto!(dest::Symmetric, src::Symmetric) - if axes(dest) != axes(src) - @invoke copyto!(dest::AbstractMatrix, src::AbstractMatrix) - elseif src.uplo == dest.uplo - copytrito!(dest.data, src.data, src.uplo) - else - transpose!(dest.data, Base.unalias(dest.data, src.data)) - end - return dest -end - -function copyto!(dest::Hermitian, src::Hermitian) - if axes(dest) != axes(src) - @invoke copyto!(dest::AbstractMatrix, src::AbstractMatrix) - elseif src.uplo == dest.uplo - copytrito!(dest.data, src.data, src.uplo) - else - adjoint!(dest.data, Base.unalias(dest.data, src.data)) - end - return dest -end - -@propagate_inbounds function copyto!(dest::StridedMatrix, A::HermOrSym) - if axes(dest) != axes(A) - @invoke copyto!(dest::StridedMatrix, A::AbstractMatrix) - else - _copyto!(dest, Base.unalias(dest, A)) - end - return dest -end -@propagate_inbounds function _copyto!(dest::StridedMatrix, A::HermOrSym) - copytrito!(dest, parent(A), A.uplo) - conjugate = A isa Hermitian - copytri!(dest, A.uplo, conjugate) - _symmetrize_diagonal!(dest, A) - return dest -end -@inline function _symmetrize_diagonal!(B, A::Symmetric) - for i = 1:size(A, 1) - B[i,i] = symmetric(A[i,i], sym_uplo(A.uplo))::symmetric_type(eltype(A.data)) - end - return B -end -@inline function _symmetrize_diagonal!(B, A::Hermitian) - for i = 1:size(A, 1) - B[i,i] = hermitian(A[i,i], sym_uplo(A.uplo))::hermitian_type(eltype(A.data)) - end - return B -end - -# fill[stored]! -fill!(A::HermOrSym, x) = fillstored!(A, x) -function fillstored!(A::HermOrSym{T}, x) where T - xT = convert(T, x) - if isa(A, Hermitian) - isreal(xT) || throw(ArgumentError("cannot fill Hermitian matrix with a nonreal value")) - end - if A.uplo == 'U' - fillband!(A.data, xT, 0, size(A,2)-1) - else # A.uplo == 'L' - fillband!(A.data, xT, 1-size(A,1), 0) - end - return A -end - -Base.isreal(A::HermOrSym{<:Real}) = true -function Base.isreal(A::HermOrSym) - n = size(A, 1) - @inbounds if A.uplo == 'U' - for j in 1:n - for i in 1:(j - (A isa Hermitian)) - if !isreal(A.data[i,j]) - return false - end - end - end - else - for j in 1:n - for i in (j + (A isa Hermitian)):n - if !isreal(A.data[i,j]) - return false - end - end - end - end - return true -end - -ishermitian(A::Hermitian) = true -ishermitian(A::Symmetric{<:Real}) = true -ishermitian(A::Symmetric{<:Complex}) = isreal(A) -issymmetric(A::Hermitian{<:Real}) = true -issymmetric(A::Hermitian{<:Complex}) = isreal(A) -issymmetric(A::Symmetric) = true - -adjoint(A::Hermitian) = A -transpose(A::Symmetric) = A -adjoint(A::Symmetric{<:Real}) = A -transpose(A::Hermitian{<:Real}) = A -adjoint(A::Symmetric) = Adjoint(A) -transpose(A::Hermitian) = Transpose(A) - -real(A::Symmetric{<:Real}) = A -real(A::Hermitian{<:Real}) = A -real(A::Symmetric) = Symmetric(parentof_applytri(real, A), sym_uplo(A.uplo)) -real(A::Hermitian) = Hermitian(parentof_applytri(real, A), sym_uplo(A.uplo)) -imag(A::Symmetric) = Symmetric(parentof_applytri(imag, A), sym_uplo(A.uplo)) - -Base.copy(A::Adjoint{<:Any,<:Symmetric}) = - Symmetric(copy(adjoint(A.parent.data)), ifelse(A.parent.uplo == 'U', :L, :U)) -Base.copy(A::Transpose{<:Any,<:Hermitian}) = - Hermitian(copy(transpose(A.parent.data)), ifelse(A.parent.uplo == 'U', :L, :U)) - -tr(A::Symmetric{<:Number}) = tr(A.data) # to avoid AbstractMatrix fallback (incl. allocations) -tr(A::Hermitian{<:Number}) = real(tr(A.data)) - -Base.conj(A::Symmetric) = Symmetric(parentof_applytri(conj, A), sym_uplo(A.uplo)) -Base.conj(A::Hermitian) = Hermitian(parentof_applytri(conj, A), sym_uplo(A.uplo)) -Base.conj!(A::HermOrSym) = typeof(A)(parentof_applytri(conj!, A), A.uplo) - -# tril/triu -function tril(A::Hermitian, k::Integer=0) - if A.uplo == 'U' && k <= 0 - return tril!(copy(A.data'),k) - elseif A.uplo == 'U' && k > 0 - return tril!(copy(A.data'),-1) + tril!(triu(A.data),k) - elseif A.uplo == 'L' && k <= 0 - return tril(A.data,k) - else - return tril(A.data,-1) + tril!(triu!(copy(A.data')),k) - end -end - -function tril(A::Symmetric, k::Integer=0) - if A.uplo == 'U' && k <= 0 - return tril!(copy(transpose(A.data)),k) - elseif A.uplo == 'U' && k > 0 - return tril!(copy(transpose(A.data)),-1) + tril!(triu(A.data),k) - elseif A.uplo == 'L' && k <= 0 - return tril(A.data,k) - else - return tril(A.data,-1) + tril!(triu!(copy(transpose(A.data))),k) - end -end - -function triu(A::Hermitian, k::Integer=0) - if A.uplo == 'U' && k >= 0 - return triu(A.data,k) - elseif A.uplo == 'U' && k < 0 - return triu(A.data,1) + triu!(tril!(copy(A.data')),k) - elseif A.uplo == 'L' && k >= 0 - return triu!(copy(A.data'),k) - else - return triu!(copy(A.data'),1) + triu!(tril(A.data),k) - end -end - -function triu(A::Symmetric, k::Integer=0) - if A.uplo == 'U' && k >= 0 - return triu(A.data,k) - elseif A.uplo == 'U' && k < 0 - return triu(A.data,1) + triu!(tril!(copy(transpose(A.data))),k) - elseif A.uplo == 'L' && k >= 0 - return triu!(copy(transpose(A.data)),k) - else - return triu!(copy(transpose(A.data)),1) + triu!(tril(A.data),k) - end -end - -for (T, trans, real) in [(:Symmetric, :transpose, :identity), (:(Hermitian{<:Union{Real,Complex}}), :adjoint, :real)] - @eval begin - function dot(A::$T, B::$T) - n = size(A, 2) - if n != size(B, 2) - throw(DimensionMismatch(lazy"A has dimensions $(size(A)) but B has dimensions $(size(B))")) - end - - dotprod = $real(zero(dot(first(A), first(B)))) - @inbounds if A.uplo == 'U' && B.uplo == 'U' - for j in 1:n - for i in 1:(j - 1) - dotprod += 2 * $real(dot(A.data[i, j], B.data[i, j])) - end - dotprod += $real(dot(A[j, j], B[j, j])) - end - elseif A.uplo == 'L' && B.uplo == 'L' - for j in 1:n - dotprod += $real(dot(A[j, j], B[j, j])) - for i in (j + 1):n - dotprod += 2 * $real(dot(A.data[i, j], B.data[i, j])) - end - end - elseif A.uplo == 'U' && B.uplo == 'L' - for j in 1:n - for i in 1:(j - 1) - dotprod += 2 * $real(dot(A.data[i, j], $trans(B.data[j, i]))) - end - dotprod += $real(dot(A[j, j], B[j, j])) - end - else - for j in 1:n - dotprod += $real(dot(A[j, j], B[j, j])) - for i in (j + 1):n - dotprod += 2 * $real(dot(A.data[i, j], $trans(B.data[j, i]))) - end - end - end - return dotprod - end - end -end - -function kron(A::Hermitian{<:Union{Real,Complex},<:StridedMatrix}, B::Hermitian{<:Union{Real,Complex},<:StridedMatrix}) - resultuplo = A.uplo == 'U' || B.uplo == 'U' ? :U : :L - C = Hermitian(Matrix{promote_op(*, eltype(A), eltype(B))}(undef, _kronsize(A, B)), resultuplo) - return kron!(C, A, B) -end -function kron(A::Symmetric{<:Number,<:StridedMatrix}, B::Symmetric{<:Number,<:StridedMatrix}) - resultuplo = A.uplo == 'U' || B.uplo == 'U' ? :U : :L - C = Symmetric(Matrix{promote_op(*, eltype(A), eltype(B))}(undef, _kronsize(A, B)), resultuplo) - return kron!(C, A, B) -end - -function kron!(C::Hermitian{<:Union{Real,Complex},<:StridedMatrix}, A::Hermitian{<:Union{Real,Complex},<:StridedMatrix}, B::Hermitian{<:Union{Real,Complex},<:StridedMatrix}) - size(C) == _kronsize(A, B) || throw(DimensionMismatch("kron!")) - if ((A.uplo == 'U' || B.uplo == 'U') && C.uplo != 'U') || ((A.uplo == 'L' && B.uplo == 'L') && C.uplo != 'L') - throw(ArgumentError("C.uplo must match A.uplo and B.uplo, got $(C.uplo) $(A.uplo) $(B.uplo)")) - end - _hermkron!(C.data, A.data, B.data, conj, real, A.uplo, B.uplo) - return C -end -function kron!(C::Symmetric{<:Number,<:StridedMatrix}, A::Symmetric{<:Number,<:StridedMatrix}, B::Symmetric{<:Number,<:StridedMatrix}) - size(C) == _kronsize(A, B) || throw(DimensionMismatch("kron!")) - if ((A.uplo == 'U' || B.uplo == 'U') && C.uplo != 'U') || ((A.uplo == 'L' && B.uplo == 'L') && C.uplo != 'L') - throw(ArgumentError("C.uplo must match A.uplo and B.uplo, got $(C.uplo) $(A.uplo) $(B.uplo)")) - end - _hermkron!(C.data, A.data, B.data, identity, identity, A.uplo, B.uplo) - return C -end - -function _hermkron!(C, A, B, conj, real, Auplo, Buplo) - n_A = size(A, 1) - n_B = size(B, 1) - @inbounds if Auplo == 'U' && Buplo == 'U' - for j = 1:n_A - jnB = (j - 1) * n_B - for i = 1:(j-1) - Aij = A[i, j] - inB = (i - 1) * n_B - for l = 1:n_B - for k = 1:(l-1) - C[inB+k, jnB+l] = Aij * B[k, l] - C[inB+l, jnB+k] = Aij * conj(B[k, l]) - end - C[inB+l, jnB+l] = Aij * real(B[l, l]) - end - end - Ajj = real(A[j, j]) - for l = 1:n_B - for k = 1:(l-1) - C[jnB+k, jnB+l] = Ajj * B[k, l] - end - C[jnB+l, jnB+l] = Ajj * real(B[l, l]) - end - end - elseif Auplo == 'U' && Buplo == 'L' - for j = 1:n_A - jnB = (j - 1) * n_B - for i = 1:(j-1) - Aij = A[i, j] - inB = (i - 1) * n_B - for l = 1:n_B - C[inB+l, jnB+l] = Aij * real(B[l, l]) - for k = (l+1):n_B - C[inB+l, jnB+k] = Aij * conj(B[k, l]) - C[inB+k, jnB+l] = Aij * B[k, l] - end - end - end - Ajj = real(A[j, j]) - for l = 1:n_B - C[jnB+l, jnB+l] = Ajj * real(B[l, l]) - for k = (l+1):n_B - C[jnB+l, jnB+k] = Ajj * conj(B[k, l]) - end - end - end - elseif Auplo == 'L' && Buplo == 'U' - for j = 1:n_A - jnB = (j - 1) * n_B - Ajj = real(A[j, j]) - for l = 1:n_B - for k = 1:(l-1) - C[jnB+k, jnB+l] = Ajj * B[k, l] - end - C[jnB+l, jnB+l] = Ajj * real(B[l, l]) - end - for i = (j+1):n_A - conjAij = conj(A[i, j]) - inB = (i - 1) * n_B - for l = 1:n_B - for k = 1:(l-1) - C[jnB+k, inB+l] = conjAij * B[k, l] - C[jnB+l, inB+k] = conjAij * conj(B[k, l]) - end - C[jnB+l, inB+l] = conjAij * real(B[l, l]) - end - end - end - else #if Auplo == 'L' && Buplo == 'L' - for j = 1:n_A - jnB = (j - 1) * n_B - Ajj = real(A[j, j]) - for l = 1:n_B - C[jnB+l, jnB+l] = Ajj * real(B[l, l]) - for k = (l+1):n_B - C[jnB+k, jnB+l] = Ajj * B[k, l] - end - end - for i = (j+1):n_A - Aij = A[i, j] - inB = (i - 1) * n_B - for l = 1:n_B - C[inB+l, jnB+l] = Aij * real(B[l, l]) - for k = (l+1):n_B - C[inB+k, jnB+l] = Aij * B[k, l] - C[inB+l, jnB+k] = Aij * conj(B[k, l]) - end - end - end - end - end -end - -(-)(A::Symmetric) = Symmetric(parentof_applytri(-, A), sym_uplo(A.uplo)) -(-)(A::Hermitian) = Hermitian(parentof_applytri(-, A), sym_uplo(A.uplo)) - -## Addition/subtraction -for f ∈ (:+, :-), Wrapper ∈ (:Hermitian, :Symmetric) - @eval function $f(A::$Wrapper, B::$Wrapper) - uplo = A.uplo == B.uplo ? sym_uplo(A.uplo) : (:U) - $Wrapper(parentof_applytri($f, A, B), uplo) - end -end - -for f in (:+, :-) - @eval begin - $f(A::Hermitian, B::Symmetric{<:Real}) = $f(A, Hermitian(parent(B), sym_uplo(B.uplo))) - $f(A::Symmetric{<:Real}, B::Hermitian) = $f(Hermitian(parent(A), sym_uplo(A.uplo)), B) - $f(A::SymTridiagonal, B::Symmetric) = $f(Symmetric(A, sym_uplo(B.uplo)), B) - $f(A::Symmetric, B::SymTridiagonal) = $f(A, Symmetric(B, sym_uplo(A.uplo))) - $f(A::SymTridiagonal{<:Real}, B::Hermitian) = $f(Hermitian(A, sym_uplo(B.uplo)), B) - $f(A::Hermitian, B::SymTridiagonal{<:Real}) = $f(A, Hermitian(B, sym_uplo(A.uplo))) - end -end - -*(A::HermOrSym, B::HermOrSym) = A * copyto!(similar(parent(B)), B) - -function dot(x::AbstractVector, A::RealHermSymComplexHerm, y::AbstractVector) - require_one_based_indexing(x, y) - n = length(x) - (n == length(y) == size(A, 1)) || throw(DimensionMismatch()) - data = A.data - r = dot(zero(eltype(x)), zero(eltype(A)), zero(eltype(y))) - iszero(n) && return r - if A.uplo == 'U' - @inbounds for j = 1:length(y) - r += dot(x[j], real(data[j,j]), y[j]) - @simd for i = 1:j-1 - Aij = data[i,j] - r += dot(x[i], Aij, y[j]) + dot(x[j], adjoint(Aij), y[i]) - end - end - else # A.uplo == 'L' - @inbounds for j = 1:length(y) - r += dot(x[j], real(data[j,j]), y[j]) - @simd for i = j+1:length(y) - Aij = data[i,j] - r += dot(x[i], Aij, y[j]) + dot(x[j], adjoint(Aij), y[i]) - end - end - end - return r -end - -# Scaling with Number -*(A::Symmetric, x::Number) = Symmetric(parentof_applytri(y -> y * x, A), sym_uplo(A.uplo)) -*(x::Number, A::Symmetric) = Symmetric(parentof_applytri(y -> x * y, A), sym_uplo(A.uplo)) -*(A::Hermitian, x::Real) = Hermitian(parentof_applytri(y -> y * x, A), sym_uplo(A.uplo)) -*(x::Real, A::Hermitian) = Hermitian(parentof_applytri(y -> x * y, A), sym_uplo(A.uplo)) -/(A::Symmetric, x::Number) = Symmetric(parentof_applytri(y -> y/x, A), sym_uplo(A.uplo)) -/(A::Hermitian, x::Real) = Hermitian(parentof_applytri(y -> y/x, A), sym_uplo(A.uplo)) - -factorize(A::HermOrSym) = _factorize(A) -function _factorize(A::HermOrSym{T}; check::Bool=true) where T - TT = typeof(sqrt(oneunit(T))) - if isdiag(A) - return Diagonal(A) - elseif TT <: BlasFloat - return bunchkaufman(A; check=check) - else # fallback - return lu(A; check=check) - end -end - -logabsdet(A::RealHermSymComplexHerm) = ((l, s) = logabsdet(_factorize(A; check=false)); return real(l), s) -logabsdet(A::Symmetric{<:Real}) = logabsdet(_factorize(A; check=false)) -logabsdet(A::Symmetric) = logabsdet(_factorize(A; check=false)) -logdet(A::RealHermSymComplexHerm) = real(logdet(_factorize(A; check=false))) -logdet(A::Symmetric{<:Real}) = logdet(_factorize(A; check=false)) -logdet(A::Symmetric) = logdet(_factorize(A; check=false)) -det(A::RealHermSymComplexHerm) = real(det(_factorize(A; check=false))) -det(A::Symmetric{<:Real}) = det(_factorize(A; check=false)) -det(A::Symmetric) = det(_factorize(A; check=false)) - -\(A::HermOrSym, B::AbstractVector) = \(factorize(A), B) -# Bunch-Kaufman solves can not utilize BLAS-3 for multiple right hand sides -# so using LU is faster for AbstractMatrix right hand side -\(A::HermOrSym, B::AbstractMatrix) = \(isdiag(A) ? Diagonal(A) : lu(A), B) - -function _inv(A::HermOrSym) - n = checksquare(A) - B = inv!(lu(A)) - conjugate = isa(A, Hermitian) - # symmetrize - if A.uplo == 'U' # add to upper triangle - @inbounds for i = 1:n, j = i:n - B[i,j] = conjugate ? (B[i,j] + conj(B[j,i])) / 2 : (B[i,j] + B[j,i]) / 2 - end - else # A.uplo == 'L', add to lower triangle - @inbounds for i = 1:n, j = i:n - B[j,i] = conjugate ? (B[j,i] + conj(B[i,j])) / 2 : (B[j,i] + B[i,j]) / 2 - end - end - B -end -# StridedMatrix restriction seems necessary due to inv! call in _inv above -inv(A::Hermitian{<:Any,<:StridedMatrix}) = Hermitian(_inv(A), sym_uplo(A.uplo)) -inv(A::Symmetric{<:Any,<:StridedMatrix}) = Symmetric(_inv(A), sym_uplo(A.uplo)) - -function svd(A::RealHermSymComplexHerm; full::Bool=false) - vals, vecs = eigen(A) - I = sortperm(vals; by=abs, rev=true) - permute!(vals, I) - Base.permutecols!!(vecs, I) # left-singular vectors - V = copy(vecs) # right-singular vectors - # shifting -1 from singular values to right-singular vectors - @inbounds for i = 1:length(vals) - if vals[i] < 0 - vals[i] = -vals[i] - for j = 1:size(V,1); V[j,i] = -V[j,i]; end - end - end - return SVD(vecs, vals, V') -end -function svd(A::RealHermSymComplexHerm{Float16}; full::Bool = false) - T = eltype(A) - F = svd(eigencopy_oftype(A, eigtype(T)); full) - return SVD{T}(F) -end - -function svdvals!(A::RealHermSymComplexHerm) - vals = eigvals!(A) - for i = 1:length(vals) - vals[i] = abs(vals[i]) - end - return sort!(vals, rev = true) -end - -# Matrix functions -^(A::Symmetric{<:Real}, p::Integer) = sympow(A, p) -^(A::Symmetric{<:Complex}, p::Integer) = sympow(A, p) -^(A::SymTridiagonal{<:Real}, p::Integer) = sympow(A, p) -^(A::SymTridiagonal{<:Complex}, p::Integer) = sympow(A, p) -function sympow(A::SymSymTri, p::Integer) - if p < 0 - return Symmetric(Base.power_by_squaring(inv(A), -p)) - else - return Symmetric(Base.power_by_squaring(A, p)) - end -end -for hermtype in (:Symmetric, :SymTridiagonal) - @eval begin - function ^(A::$hermtype{<:Real}, p::Real) - isinteger(p) && return integerpow(A, p) - F = eigen(A) - if all(λ -> λ ≥ 0, F.values) - return Symmetric((F.vectors * Diagonal((F.values).^p)) * F.vectors') - else - return Symmetric((F.vectors * Diagonal(complex.(F.values).^p)) * F.vectors') - end - end - function ^(A::$hermtype{<:Complex}, p::Real) - isinteger(p) && return integerpow(A, p) - return Symmetric(schurpow(A, p)) - end - end -end -function ^(A::Hermitian, p::Integer) - if p < 0 - retmat = Base.power_by_squaring(inv(A), -p) - else - retmat = Base.power_by_squaring(A, p) - end - for i in diagind(retmat, IndexStyle(retmat)) - retmat[i] = real(retmat[i]) - end - return Hermitian(retmat) -end -function ^(A::Hermitian{T}, p::Real) where T - isinteger(p) && return integerpow(A, p) - F = eigen(A) - if all(λ -> λ ≥ 0, F.values) - retmat = (F.vectors * Diagonal((F.values).^p)) * F.vectors' - if T <: Real - return Hermitian(retmat) - else - for i in diagind(retmat, IndexStyle(retmat)) - retmat[i] = real(retmat[i]) - end - return Hermitian(retmat) - end - else - retmat = (F.vectors * Diagonal((complex.(F.values).^p))) * F.vectors' - if T <: Real - return Symmetric(retmat) - else - return retmat - end - end -end - -for func in (:exp, :cos, :sin, :tan, :cosh, :sinh, :tanh, :atan, :asinh, :atanh, :cbrt) - @eval begin - function ($func)(A::RealHermSymSymTri) - F = eigen(A) - return wrappertype(A)((F.vectors * Diagonal(($func).(F.values))) * F.vectors') - end - function ($func)(A::Hermitian{<:Complex}) - F = eigen(A) - retmat = (F.vectors * Diagonal(($func).(F.values))) * F.vectors' - for i in diagind(retmat, IndexStyle(retmat)) - retmat[i] = real(retmat[i]) - end - return Hermitian(retmat) - end - end -end - -function cis(A::RealHermSymSymTri) - F = eigen(A) - return Symmetric(F.vectors .* cis.(F.values') * F.vectors') -end -function cis(A::Hermitian{<:Complex}) - F = eigen(A) - return F.vectors .* cis.(F.values') * F.vectors' -end - - -for func in (:acos, :asin) - @eval begin - function ($func)(A::RealHermSymSymTri) - F = eigen(A) - if all(λ -> -1 ≤ λ ≤ 1, F.values) - return wrappertype(A)((F.vectors * Diagonal(($func).(F.values))) * F.vectors') - else - return Symmetric((F.vectors * Diagonal(($func).(complex.(F.values)))) * F.vectors') - end - end - function ($func)(A::Hermitian{<:Complex}) - F = eigen(A) - if all(λ -> -1 ≤ λ ≤ 1, F.values) - retmat = (F.vectors * Diagonal(($func).(F.values))) * F.vectors' - for i in diagind(retmat, IndexStyle(retmat)) - retmat[i] = real(retmat[i]) - end - return Hermitian(retmat) - else - return (F.vectors * Diagonal(($func).(complex.(F.values)))) * F.vectors' - end - end - end -end - -function acosh(A::RealHermSymSymTri) - F = eigen(A) - if all(λ -> λ ≥ 1, F.values) - return wrappertype(A)((F.vectors * Diagonal(acosh.(F.values))) * F.vectors') - else - return Symmetric((F.vectors * Diagonal(acosh.(complex.(F.values)))) * F.vectors') - end -end -function acosh(A::Hermitian{<:Complex}) - F = eigen(A) - if all(λ -> λ ≥ 1, F.values) - retmat = (F.vectors * Diagonal(acosh.(F.values))) * F.vectors' - for i in diagind(retmat, IndexStyle(retmat)) - retmat[i] = real(retmat[i]) - end - return Hermitian(retmat) - else - return (F.vectors * Diagonal(acosh.(complex.(F.values)))) * F.vectors' - end -end - -function sincos(A::RealHermSymSymTri) - n = checksquare(A) - F = eigen(A) - T = float(eltype(F.values)) - S, C = Diagonal(similar(A, T, (n,))), Diagonal(similar(A, T, (n,))) - for i in eachindex(S.diag, C.diag, F.values) - S.diag[i], C.diag[i] = sincos(F.values[i]) - end - return wrappertype(A)((F.vectors * S) * F.vectors'), wrappertype(A)((F.vectors * C) * F.vectors') -end -function sincos(A::Hermitian{<:Complex}) - n = checksquare(A) - F = eigen(A) - T = float(eltype(F.values)) - S, C = Diagonal(similar(A, T, (n,))), Diagonal(similar(A, T, (n,))) - for i in eachindex(S.diag, C.diag, F.values) - S.diag[i], C.diag[i] = sincos(F.values[i]) - end - retmatS, retmatC = (F.vectors * S) * F.vectors', (F.vectors * C) * F.vectors' - for i in diagind(retmatS, IndexStyle(retmatS)) - retmatS[i] = real(retmatS[i]) - retmatC[i] = real(retmatC[i]) - end - return Hermitian(retmatS), Hermitian(retmatC) -end - - -for func in (:log, :sqrt) - # sqrt has rtol arg to handle matrices that are semidefinite up to roundoff errors - rtolarg = func === :sqrt ? Any[Expr(:kw, :(rtol::Real), :(eps(real(float(one(T))))*size(A,1)))] : Any[] - rtolval = func === :sqrt ? :(-maximum(abs, F.values) * rtol) : 0 - @eval begin - function ($func)(A::RealHermSymSymTri{T}; $(rtolarg...)) where {T<:Real} - F = eigen(A) - λ₀ = $rtolval # treat λ ≥ λ₀ as "zero" eigenvalues up to roundoff - if all(λ -> λ ≥ λ₀, F.values) - return wrappertype(A)((F.vectors * Diagonal(($func).(max.(0, F.values)))) * F.vectors') - else - return Symmetric((F.vectors * Diagonal(($func).(complex.(F.values)))) * F.vectors') - end - end - function ($func)(A::Hermitian{T}; $(rtolarg...)) where {T<:Complex} - n = checksquare(A) - F = eigen(A) - λ₀ = $rtolval # treat λ ≥ λ₀ as "zero" eigenvalues up to roundoff - if all(λ -> λ ≥ λ₀, F.values) - retmat = (F.vectors * Diagonal(($func).(max.(0, F.values)))) * F.vectors' - for i in diagind(retmat, IndexStyle(retmat)) - retmat[i] = real(retmat[i]) - end - return Hermitian(retmat) - else - retmat = (F.vectors * Diagonal(($func).(complex.(F.values)))) * F.vectors' - return retmat - end - end - end -end - -""" - hermitianpart(A::AbstractMatrix, uplo::Symbol=:U) -> Hermitian - -Return the Hermitian part of the square matrix `A`, defined as `(A + A') / 2`, as a -[`Hermitian`](@ref) matrix. For real matrices `A`, this is also known as the symmetric part -of `A`; it is also sometimes called the "operator real part". The optional argument `uplo` controls the corresponding argument of the -[`Hermitian`](@ref) view. For real matrices, the latter is equivalent to a -[`Symmetric`](@ref) view. - -See also [`hermitianpart!`](@ref) for the corresponding in-place operation. - -!!! compat "Julia 1.10" - This function requires Julia 1.10 or later. -""" -hermitianpart(A::AbstractMatrix, uplo::Symbol=:U) = Hermitian(_hermitianpart(A), uplo) - -""" - hermitianpart!(A::AbstractMatrix, uplo::Symbol=:U) -> Hermitian - -Overwrite the square matrix `A` in-place with its Hermitian part `(A + A') / 2`, and return -[`Hermitian(A, uplo)`](@ref). For real matrices `A`, this is also known as the symmetric -part of `A`. - -See also [`hermitianpart`](@ref) for the corresponding out-of-place operation. - -!!! compat "Julia 1.10" - This function requires Julia 1.10 or later. -""" -hermitianpart!(A::AbstractMatrix, uplo::Symbol=:U) = Hermitian(_hermitianpart!(A), uplo) - -_hermitianpart(A::AbstractMatrix) = _hermitianpart!(copy_similar(A, Base.promote_op(/, eltype(A), Int))) -_hermitianpart(a::Number) = real(a) - -function _hermitianpart!(A::AbstractMatrix) - require_one_based_indexing(A) - n = checksquare(A) - @inbounds for j in 1:n - A[j, j] = _hermitianpart(A[j, j]) - for i in 1:j-1 - A[i, j] = val = (A[i, j] + adjoint(A[j, i])) / 2 - A[j, i] = adjoint(val) - end - end - return A -end - -## structured matrix printing ## -function Base.replace_in_print_matrix(A::HermOrSym,i::Integer,j::Integer,s::AbstractString) - ijminmax = minmax(i, j) - inds = A.uplo == 'U' ? ijminmax : reverse(ijminmax) - Base.replace_in_print_matrix(parent(A), inds..., s) -end diff --git a/stdlib/LinearAlgebra/src/symmetriceigen.jl b/stdlib/LinearAlgebra/src/symmetriceigen.jl deleted file mode 100644 index 68a1b29f5dbc7..0000000000000 --- a/stdlib/LinearAlgebra/src/symmetriceigen.jl +++ /dev/null @@ -1,410 +0,0 @@ -# This file is a part of Julia. License is MIT: https://julialang.org/license - -# preserve HermOrSym wrapper -# Call `copytrito!` instead of `copy_similar` to only copy the matching triangular half -eigencopy_oftype(A::Hermitian, S) = Hermitian(copytrito!(similar(parent(A), S, size(A)), A.data, A.uplo), sym_uplo(A.uplo)) -eigencopy_oftype(A::Symmetric, S) = Symmetric(copytrito!(similar(parent(A), S, size(A)), A.data, A.uplo), sym_uplo(A.uplo)) -eigencopy_oftype(A::Symmetric{<:Complex}, S) = copyto!(similar(parent(A), S), A) - -default_eigen_alg(A) = DivideAndConquer() - -# Eigensolvers for symmetric and Hermitian matrices -function eigen!(A::RealHermSymComplexHerm{<:BlasReal,<:StridedMatrix}, alg::Algorithm = default_eigen_alg(A); sortby::Union{Function,Nothing}=nothing) - if alg === DivideAndConquer() - Eigen(sorteig!(LAPACK.syevd!('V', A.uplo, A.data)..., sortby)...) - elseif alg === QRIteration() - Eigen(sorteig!(LAPACK.syev!('V', A.uplo, A.data)..., sortby)...) - elseif alg === RobustRepresentations() - Eigen(sorteig!(LAPACK.syevr!('V', 'A', A.uplo, A.data, 0.0, 0.0, 0, 0, -1.0)..., sortby)...) - else - throw(ArgumentError("Unsupported value for `alg` keyword.")) - end -end - -""" - eigen(A::Union{Hermitian, Symmetric}, alg::Algorithm = default_eigen_alg(A)) -> Eigen - -Compute the eigenvalue decomposition of `A`, returning an [`Eigen`](@ref) factorization object `F` -which contains the eigenvalues in `F.values` and the eigenvectors in the columns of the -matrix `F.vectors`. (The `k`th eigenvector can be obtained from the slice `F.vectors[:, k]`.) - -Iterating the decomposition produces the components `F.values` and `F.vectors`. - -`alg` specifies which algorithm and LAPACK method to use for eigenvalue decomposition: -- `alg = DivideAndConquer()` (default): Calls `LAPACK.syevd!`. -- `alg = QRIteration()`: Calls `LAPACK.syev!`. -- `alg = RobustRepresentations()`: Multiple relatively robust representations method, Calls `LAPACK.syevr!`. - -See James W. Demmel et al, SIAM J. Sci. Comput. 30, 3, 1508 (2008) for -a comparison of the accuracy and performance of different algorithms. - -The default `alg` used may change in the future. - -!!! compat "Julia 1.12" - The `alg` keyword argument requires Julia 1.12 or later. - -The following functions are available for `Eigen` objects: [`inv`](@ref), [`det`](@ref), and [`isposdef`](@ref). -""" -function eigen(A::RealHermSymComplexHerm, alg::Algorithm = default_eigen_alg(A); sortby::Union{Function,Nothing}=nothing) - _eigen(A, alg; sortby) -end - -# we dispatch on the eltype in an internal method to avoid ambiguities -function _eigen(A::RealHermSymComplexHerm, alg::Algorithm; sortby) - S = eigtype(eltype(A)) - eigen!(eigencopy_oftype(A, S), alg; sortby) -end - -function _eigen(A::RealHermSymComplexHerm{Float16}, alg::Algorithm; sortby::Union{Function,Nothing}=nothing) - S = eigtype(eltype(A)) - E = eigen!(eigencopy_oftype(A, S), alg, sortby=sortby) - values = convert(AbstractVector{Float16}, E.values) - vectors = convert(AbstractMatrix{isreal(E.vectors) ? Float16 : Complex{Float16}}, E.vectors) - return Eigen(values, vectors) -end - -eigen!(A::RealHermSymComplexHerm{<:BlasReal,<:StridedMatrix}, irange::UnitRange) = - Eigen(LAPACK.syevr!('V', 'I', A.uplo, A.data, 0.0, 0.0, irange.start, irange.stop, -1.0)...) - -""" - eigen(A::Union{SymTridiagonal, Hermitian, Symmetric}, irange::UnitRange) -> Eigen - -Compute the eigenvalue decomposition of `A`, returning an [`Eigen`](@ref) factorization object `F` -which contains the eigenvalues in `F.values` and the eigenvectors in the columns of the -matrix `F.vectors`. (The `k`th eigenvector can be obtained from the slice `F.vectors[:, k]`.) - -Iterating the decomposition produces the components `F.values` and `F.vectors`. - -The following functions are available for `Eigen` objects: [`inv`](@ref), [`det`](@ref), and [`isposdef`](@ref). - -The [`UnitRange`](@ref) `irange` specifies indices of the sorted eigenvalues to search for. - -!!! note - If `irange` is not `1:n`, where `n` is the dimension of `A`, then the returned factorization - will be a *truncated* factorization. -""" -function eigen(A::RealHermSymComplexHerm, irange::UnitRange) - S = eigtype(eltype(A)) - eigen!(eigencopy_oftype(A, S), irange) -end - -eigen!(A::RealHermSymComplexHerm{T,<:StridedMatrix}, vl::Real, vh::Real) where {T<:BlasReal} = - Eigen(LAPACK.syevr!('V', 'V', A.uplo, A.data, convert(T, vl), convert(T, vh), 0, 0, -1.0)...) - -""" - eigen(A::Union{SymTridiagonal, Hermitian, Symmetric}, vl::Real, vu::Real) -> Eigen - -Compute the eigenvalue decomposition of `A`, returning an [`Eigen`](@ref) factorization object `F` -which contains the eigenvalues in `F.values` and the eigenvectors in the columns of the -matrix `F.vectors`. (The `k`th eigenvector can be obtained from the slice `F.vectors[:, k]`.) - -Iterating the decomposition produces the components `F.values` and `F.vectors`. - -The following functions are available for `Eigen` objects: [`inv`](@ref), [`det`](@ref), and [`isposdef`](@ref). - -`vl` is the lower bound of the window of eigenvalues to search for, and `vu` is the upper bound. - -!!! note - If [`vl`, `vu`] does not contain all eigenvalues of `A`, then the returned factorization - will be a *truncated* factorization. -""" -function eigen(A::RealHermSymComplexHerm, vl::Real, vh::Real) - S = eigtype(eltype(A)) - eigen!(eigencopy_oftype(A, S), vl, vh) -end - - -function eigvals!(A::RealHermSymComplexHerm{<:BlasReal,<:StridedMatrix}, alg::Algorithm = default_eigen_alg(A); sortby::Union{Function,Nothing}=nothing) - vals::Vector{real(eltype(A))} = if alg === DivideAndConquer() - LAPACK.syevd!('N', A.uplo, A.data) - elseif alg === QRIteration() - LAPACK.syev!('N', A.uplo, A.data) - elseif alg === RobustRepresentations() - LAPACK.syevr!('N', 'A', A.uplo, A.data, 0.0, 0.0, 0, 0, -1.0)[1] - else - throw(ArgumentError("Unsupported value for `alg` keyword.")) - end - !isnothing(sortby) && sort!(vals, by=sortby) - return vals -end - -""" - eigvals(A::Union{Hermitian, Symmetric}, alg::Algorithm = default_eigen_alg(A))) -> values - -Return the eigenvalues of `A`. - -`alg` specifies which algorithm and LAPACK method to use for eigenvalue decomposition: -- `alg = DivideAndConquer()` (default): Calls `LAPACK.syevd!`. -- `alg = QRIteration()`: Calls `LAPACK.syev!`. -- `alg = RobustRepresentations()`: Multiple relatively robust representations method, Calls `LAPACK.syevr!`. - -See James W. Demmel et al, SIAM J. Sci. Comput. 30, 3, 1508 (2008) for -a comparison of the accuracy and performance of different methods. - -The default `alg` used may change in the future. -""" -function eigvals(A::RealHermSymComplexHerm, alg::Algorithm = default_eigen_alg(A); sortby::Union{Function,Nothing}=nothing) - S = eigtype(eltype(A)) - eigvals!(eigencopy_oftype(A, S), alg; sortby) -end - - -""" - eigvals!(A::Union{SymTridiagonal, Hermitian, Symmetric}, irange::UnitRange) -> values - -Same as [`eigvals`](@ref), but saves space by overwriting the input `A`, instead of creating a copy. -`irange` is a range of eigenvalue *indices* to search for - for instance, the 2nd to 8th eigenvalues. -""" -eigvals!(A::RealHermSymComplexHerm{<:BlasReal,<:StridedMatrix}, irange::UnitRange) = - LAPACK.syevr!('N', 'I', A.uplo, A.data, 0.0, 0.0, irange.start, irange.stop, -1.0)[1] - -""" - eigvals(A::Union{SymTridiagonal, Hermitian, Symmetric}, irange::UnitRange) -> values - -Return the eigenvalues of `A`. It is possible to calculate only a subset of the -eigenvalues by specifying a [`UnitRange`](@ref) `irange` covering indices of the sorted eigenvalues, -e.g. the 2nd to 8th eigenvalues. - -# Examples -```jldoctest -julia> A = SymTridiagonal([1.; 2.; 1.], [2.; 3.]) -3×3 SymTridiagonal{Float64, Vector{Float64}}: - 1.0 2.0 ⋅ - 2.0 2.0 3.0 - ⋅ 3.0 1.0 - -julia> eigvals(A, 2:2) -1-element Vector{Float64}: - 0.9999999999999996 - -julia> eigvals(A) -3-element Vector{Float64}: - -2.1400549446402604 - 1.0000000000000002 - 5.140054944640259 -``` -""" -function eigvals(A::RealHermSymComplexHerm, irange::UnitRange) - S = eigtype(eltype(A)) - eigvals!(eigencopy_oftype(A, S), irange) -end - -""" - eigvals!(A::Union{SymTridiagonal, Hermitian, Symmetric}, vl::Real, vu::Real) -> values - -Same as [`eigvals`](@ref), but saves space by overwriting the input `A`, instead of creating a copy. -`vl` is the lower bound of the interval to search for eigenvalues, and `vu` is the upper bound. -""" -eigvals!(A::RealHermSymComplexHerm{T,<:StridedMatrix}, vl::Real, vh::Real) where {T<:BlasReal} = - LAPACK.syevr!('N', 'V', A.uplo, A.data, convert(T, vl), convert(T, vh), 0, 0, -1.0)[1] - -""" - eigvals(A::Union{SymTridiagonal, Hermitian, Symmetric}, vl::Real, vu::Real) -> values - -Return the eigenvalues of `A`. It is possible to calculate only a subset of the eigenvalues -by specifying a pair `vl` and `vu` for the lower and upper boundaries of the eigenvalues. - -# Examples -```jldoctest -julia> A = SymTridiagonal([1.; 2.; 1.], [2.; 3.]) -3×3 SymTridiagonal{Float64, Vector{Float64}}: - 1.0 2.0 ⋅ - 2.0 2.0 3.0 - ⋅ 3.0 1.0 - -julia> eigvals(A, -1, 2) -1-element Vector{Float64}: - 1.0000000000000009 - -julia> eigvals(A) -3-element Vector{Float64}: - -2.1400549446402604 - 1.0000000000000002 - 5.140054944640259 -``` -""" -function eigvals(A::RealHermSymComplexHerm, vl::Real, vh::Real) - S = eigtype(eltype(A)) - eigvals!(eigencopy_oftype(A, S), vl, vh) -end - -eigmax(A::RealHermSymComplexHerm{<:Real}) = eigvals(A, size(A, 1):size(A, 1))[1] -eigmin(A::RealHermSymComplexHerm{<:Real}) = eigvals(A, 1:1)[1] - -function eigen(A::HermOrSym{TA}, B::HermOrSym{TB}; kws...) where {TA,TB} - S = promote_type(eigtype(TA), TB) - return eigen!(eigencopy_oftype(A, S), eigencopy_oftype(B, S); kws...) -end - -function eigen!(A::HermOrSym{T,S}, B::HermOrSym{T,S}; sortby::Union{Function,Nothing}=nothing) where {T<:BlasReal,S<:StridedMatrix} - vals, vecs, _ = LAPACK.sygvd!(1, 'V', A.uplo, A.data, B.uplo == A.uplo ? B.data : copy(B.data')) - GeneralizedEigen(sorteig!(vals, vecs, sortby)...) -end -function eigen!(A::Hermitian{T,S}, B::Hermitian{T,S}; sortby::Union{Function,Nothing}=nothing) where {T<:BlasComplex,S<:StridedMatrix} - vals, vecs, _ = LAPACK.sygvd!(1, 'V', A.uplo, A.data, B.uplo == A.uplo ? B.data : copy(B.data')) - GeneralizedEigen(sorteig!(vals, vecs, sortby)...) -end - -function eigen(A::AbstractMatrix, C::Cholesky; sortby::Union{Function,Nothing}=nothing) - if ishermitian(A) - eigen!(eigencopy_oftype(Hermitian(A), eigtype(eltype(A))), C; sortby) - else - eigen!(copy_similar(A, eigtype(eltype(A))), C; sortby) - end -end -function eigen!(A::AbstractMatrix, C::Cholesky; sortby::Union{Function,Nothing}=nothing) - # Cholesky decomposition based eigenvalues and eigenvectors - vals, w = eigen!(UtiAUi!(A, C.U)) - vecs = C.U \ w - GeneralizedEigen(sorteig!(vals, vecs, sortby)...) -end - -# Bunch-Kaufmann (LDLT) based solution for generalized eigenvalues and eigenvectors -function eigen(A::StridedMatrix{T}, B::BunchKaufman{T,<:AbstractMatrix}; sortby::Union{Function,Nothing}=nothing) where {T<:BlasFloat} - eigen!(copy(A), copy(B); sortby) -end -function eigen!(A::StridedMatrix{T}, B::BunchKaufman{T,<:StridedMatrix}; sortby::Union{Function,Nothing}=nothing) where {T<:BlasFloat} - M, TD, p = getproperties!(B) - # Compute generalized eigenvalues of equivalent matrix: - # A' = inv(Tridiagonal(dl,d,du))*inv(M)*P*A*P'*inv(M') - # See: https://github.com/JuliaLang/julia/pull/50471#issuecomment-1627836781 - permutecols!(A, p) - permuterows!(A, p) - ldiv!(M, A) - rdiv!(A, M') - ldiv!(TD, A) - vals, vecs = eigen!(A; sortby) - # Compute generalized eigenvectors from 'vecs': - # vecs = P'*inv(M')*vecs - # See: https://github.com/JuliaLang/julia/pull/50471#issuecomment-1627836781 - M = B.uplo == 'U' ? UnitUpperTriangular{eltype(vecs)}(M) : UnitLowerTriangular{eltype(vecs)}(M) ; - ldiv!(M', vecs) - invpermuterows!(vecs, p) - GeneralizedEigen(sorteig!(vals, vecs, sortby)...) -end - -# LU based solution for generalized eigenvalues and eigenvectors -function eigen(A::StridedMatrix{T}, F::LU{T,<:StridedMatrix}; sortby::Union{Function,Nothing}=nothing) where {T} - return eigen!(copy(A), copy(F); sortby) -end -function eigen!(A::StridedMatrix{T}, F::LU{T,<:StridedMatrix}; sortby::Union{Function,Nothing}=nothing) where {T} - L = UnitLowerTriangular(F.L) - U = UpperTriangular(F.U) - permuterows!(A, F.p) - ldiv!(L, A) - rdiv!(A, U) - vals, vecs = eigen!(A; sortby) - # Compute generalized eigenvectors from 'vecs': - # vecs = P'*inv(M')*vecs - # See: https://github.com/JuliaLang/julia/pull/50471#issuecomment-1627836781 - U = UpperTriangular{eltype(vecs)}(U) - ldiv!(U, vecs) - GeneralizedEigen(sorteig!(vals, vecs, sortby)...) -end - -# Perform U' \ A / U in-place, where U::Union{UpperTriangular,Diagonal} -UtiAUi!(A, U) = _UtiAUi!(A, U) -UtiAUi!(A::Symmetric, U) = Symmetric(_UtiAUi!(copytri!(parent(A), A.uplo), U), sym_uplo(A.uplo)) -UtiAUi!(A::Hermitian, U) = Hermitian(_UtiAUi!(copytri!(parent(A), A.uplo, true), U), sym_uplo(A.uplo)) -_UtiAUi!(A, U) = rdiv!(ldiv!(U', A), U) - -function eigvals(A::HermOrSym{TA}, B::HermOrSym{TB}; kws...) where {TA,TB} - S = promote_type(eigtype(TA), TB) - return eigvals!(eigencopy_oftype(A, S), eigencopy_oftype(B, S); kws...) -end - -function eigvals!(A::HermOrSym{T,S}, B::HermOrSym{T,S}; sortby::Union{Function,Nothing}=nothing) where {T<:BlasReal,S<:StridedMatrix} - vals = LAPACK.sygvd!(1, 'N', A.uplo, A.data, B.uplo == A.uplo ? B.data : copy(B.data'))[1] - isnothing(sortby) || sort!(vals, by=sortby) - return vals -end -function eigvals!(A::Hermitian{T,S}, B::Hermitian{T,S}; sortby::Union{Function,Nothing}=nothing) where {T<:BlasComplex,S<:StridedMatrix} - vals = LAPACK.sygvd!(1, 'N', A.uplo, A.data, B.uplo == A.uplo ? B.data : copy(B.data'))[1] - isnothing(sortby) || sort!(vals, by=sortby) - return vals -end -eigvecs(A::HermOrSym) = eigvecs(eigen(A)) - -function eigvals(A::AbstractMatrix, C::Cholesky; sortby::Union{Function,Nothing}=nothing) - if ishermitian(A) - eigvals!(eigencopy_oftype(Hermitian(A), eigtype(eltype(A))), C; sortby) - else - eigvals!(copy_similar(A, eigtype(eltype(A))), C; sortby) - end -end -function eigvals!(A::AbstractMatrix{T}, C::Cholesky{T, <:AbstractMatrix}; sortby::Union{Function,Nothing}=nothing) where {T<:Number} - # Cholesky decomposition based eigenvalues - return eigvals!(UtiAUi!(A, C.U); sortby) -end - -# Bunch-Kaufmann (LDLT) based solution for generalized eigenvalues -function eigvals(A::StridedMatrix{T}, B::BunchKaufman{T,<:AbstractMatrix}; sortby::Union{Function,Nothing}=nothing) where {T<:BlasFloat} - eigvals!(copy(A), copy(B); sortby) -end -function eigvals!(A::StridedMatrix{T}, B::BunchKaufman{T,<:StridedMatrix}; sortby::Union{Function,Nothing}=nothing) where {T<:BlasFloat} - M, TD, p = getproperties!(B) - # Compute generalized eigenvalues of equivalent matrix: - # A' = inv(Tridiagonal(dl,d,du))*inv(M)*P*A*P'*inv(M') - # See: https://github.com/JuliaLang/julia/pull/50471#issuecomment-1627836781 - permutecols!(A, p) - permuterows!(A, p) - ldiv!(M, A) - rdiv!(A, M') - ldiv!(TD, A) - return eigvals!(A; sortby) -end - -# LU based solution for generalized eigenvalues -function eigvals(A::StridedMatrix{T}, F::LU{T,<:StridedMatrix}; sortby::Union{Function,Nothing}=nothing) where {T} - return eigvals!(copy(A), copy(F); sortby) -end -function eigvals!(A::StridedMatrix{T}, F::LU{T,<:StridedMatrix}; sortby::Union{Function,Nothing}=nothing) where {T} - L = UnitLowerTriangular(F.L) - U = UpperTriangular(F.U) - # Compute generalized eigenvalues of equivalent matrix: - # A' = inv(L)*(P*A)*inv(U) - # See: https://github.com/JuliaLang/julia/pull/50471#issuecomment-1627836781 - permuterows!(A, F.p) - ldiv!(L, A) - rdiv!(A, U) - return eigvals!(A; sortby) -end - -eigen(A::Hermitian{<:Complex, <:Tridiagonal}; kwargs...) = - _eigenhermtridiag(A; kwargs...) -# disambiguation -function eigen(A::Hermitian{Complex{Float16}, <:Tridiagonal}; kwargs...) - E = _eigenhermtridiag(A; kwargs...) - values = convert(AbstractVector{Float16}, E.values) - vectors = convert(AbstractMatrix{ComplexF16}, E.vectors) - return Eigen(values, vectors) -end -function _eigenhermtridiag(A::Hermitian{<:Complex,<:Tridiagonal}; kwargs...) - (; dl, d, du) = parent(A) - N = length(d) - if N <= 1 - eigen(parent(A); kwargs...) - else - if A.uplo == 'U' - E = du' - Er = abs.(du) - else - E = dl - Er = abs.(E) - end - S = Vector{eigtype(eltype(A))}(undef, N) - S[1] = 1 - for i ∈ 1:N-1 - S[i+1] = iszero(Er[i]) ? oneunit(eltype(S)) : S[i] * sign(E[i]) - end - B = SymTridiagonal(float.(real.(d)), Er) - Λ, Φ = eigen(B; kwargs...) - return Eigen(Λ, Diagonal(S) * Φ) - end -end - -function eigvals(A::Hermitian{Complex{T}, <:Tridiagonal}; kwargs...) where {T} - (; dl, d, du) = parent(A) - Er = A.uplo == 'U' ? abs.(du) : abs.(dl) - eigvals(SymTridiagonal(float.(real.(d)), Er); kwargs...) -end diff --git a/stdlib/LinearAlgebra/src/transpose.jl b/stdlib/LinearAlgebra/src/transpose.jl deleted file mode 100644 index a36919b2e557a..0000000000000 --- a/stdlib/LinearAlgebra/src/transpose.jl +++ /dev/null @@ -1,257 +0,0 @@ -# This file is a part of Julia. License is MIT: https://julialang.org/license - -adjoint(a::AbstractArray) = error("adjoint not defined for $(typeof(a)). Consider using `permutedims` for higher-dimensional arrays.") -transpose(a::AbstractArray) = error("transpose not defined for $(typeof(a)). Consider using `permutedims` for higher-dimensional arrays.") - -## Matrix transposition ## - -""" - transpose!(dest,src) - -Transpose array `src` and store the result in the preallocated array `dest`, which should -have a size corresponding to `(size(src,2),size(src,1))`. No in-place transposition is -supported and unexpected results will happen if `src` and `dest` have overlapping memory -regions. - -# Examples -```jldoctest -julia> A = [3+2im 9+2im; 8+7im 4+6im] -2×2 Matrix{Complex{Int64}}: - 3+2im 9+2im - 8+7im 4+6im - -julia> B = zeros(Complex{Int64}, 2, 2) -2×2 Matrix{Complex{Int64}}: - 0+0im 0+0im - 0+0im 0+0im - -julia> transpose!(B, A); - -julia> B -2×2 Matrix{Complex{Int64}}: - 3+2im 8+7im - 9+2im 4+6im - -julia> A -2×2 Matrix{Complex{Int64}}: - 3+2im 9+2im - 8+7im 4+6im -``` -""" -transpose!(B::AbstractMatrix, A::AbstractMatrix) = transpose_f!(transpose, B, A) - -""" - adjoint!(dest,src) - -Conjugate transpose array `src` and store the result in the preallocated array `dest`, which -should have a size corresponding to `(size(src,2),size(src,1))`. No in-place transposition -is supported and unexpected results will happen if `src` and `dest` have overlapping memory -regions. - -# Examples -```jldoctest -julia> A = [3+2im 9+2im; 8+7im 4+6im] -2×2 Matrix{Complex{Int64}}: - 3+2im 9+2im - 8+7im 4+6im - -julia> B = zeros(Complex{Int64}, 2, 2) -2×2 Matrix{Complex{Int64}}: - 0+0im 0+0im - 0+0im 0+0im - -julia> adjoint!(B, A); - -julia> B -2×2 Matrix{Complex{Int64}}: - 3-2im 8-7im - 9-2im 4-6im - -julia> A -2×2 Matrix{Complex{Int64}}: - 3+2im 9+2im - 8+7im 4+6im -``` -""" -adjoint!(B::AbstractMatrix, A::AbstractMatrix) = transpose_f!(adjoint, B, A) - -@noinline function check_transpose_axes(axesA, axesB) - axesB == reverse(axesA) || throw(DimensionMismatch("axes of the destination are incompatible with that of the source")) -end - -function transpose!(B::AbstractVector, A::AbstractMatrix) - check_transpose_axes((axes(B,1), axes(B,2)), axes(A)) - copyto!(B, A) -end -function transpose!(B::AbstractMatrix, A::AbstractVector) - check_transpose_axes(axes(B), (axes(A,1), axes(A,2))) - copyto!(B, A) -end -function adjoint!(B::AbstractVector, A::AbstractMatrix) - check_transpose_axes((axes(B,1), axes(B,2)), axes(A)) - ccopy!(B, A) -end -function adjoint!(B::AbstractMatrix, A::AbstractVector) - check_transpose_axes(axes(B), (axes(A,1), axes(A,2))) - ccopy!(B, A) -end - -const transposebaselength=64 -function transpose_f!(f, B::AbstractMatrix, A::AbstractMatrix) - inds = axes(A) - check_transpose_axes(axes(B), inds) - - m, n = length(inds[1]), length(inds[2]) - if m*n<=4*transposebaselength - @inbounds begin - for j = inds[2] - for i = inds[1] - B[j,i] = f(A[i,j]) - end - end - end - else - transposeblock!(f,B,A,m,n,first(inds[1])-1,first(inds[2])-1) - end - return B -end -function transposeblock!(f, B::AbstractMatrix, A::AbstractMatrix, m::Int, n::Int, offseti::Int, offsetj::Int) - if m*n<=transposebaselength - @inbounds begin - for j = offsetj .+ (1:n) - for i = offseti .+ (1:m) - B[j,i] = f(A[i,j]) - end - end - end - elseif m>n - newm=m>>1 - transposeblock!(f,B,A,newm,n,offseti,offsetj) - transposeblock!(f,B,A,m-newm,n,offseti+newm,offsetj) - else - newn=n>>1 - transposeblock!(f,B,A,m,newn,offseti,offsetj) - transposeblock!(f,B,A,m,n-newn,offseti,offsetj+newn) - end - return B -end - -function ccopy!(B, A) - RB, RA = eachindex(B), eachindex(A) - if RB == RA - for i = RB - B[i] = adjoint(A[i]) - end - else - for (i,j) = zip(RB, RA) - B[i] = adjoint(A[j]) - end - end - return B -end - -""" - copy(A::Transpose) - copy(A::Adjoint) - -Eagerly evaluate the lazy matrix transpose/adjoint. -Note that the transposition is applied recursively to elements. - -This operation is intended for linear algebra usage - for general data manipulation see -[`permutedims`](@ref Base.permutedims), which is non-recursive. - -# Examples -```jldoctest -julia> A = [1 2im; -3im 4] -2×2 Matrix{Complex{Int64}}: - 1+0im 0+2im - 0-3im 4+0im - -julia> T = transpose(A) -2×2 transpose(::Matrix{Complex{Int64}}) with eltype Complex{Int64}: - 1+0im 0-3im - 0+2im 4+0im - -julia> copy(T) -2×2 Matrix{Complex{Int64}}: - 1+0im 0-3im - 0+2im 4+0im -``` -""" -copy(::Union{Transpose,Adjoint}) - -Base.copy(A::TransposeAbsMat) = transpose!(similar(A.parent, reverse(axes(A.parent))), A.parent) -Base.copy(A::AdjointAbsMat) = adjoint!(similar(A.parent, reverse(axes(A.parent))), A.parent) - -""" - copy_transpose!(B::AbstractVecOrMat, ir_dest::AbstractRange{Int}, jr_dest::AbstractRange{Int}, - A::AbstractVecOrMat, ir_src::AbstractRange{Int}, jr_src::AbstractRange{Int}) -> B - -Efficiently copy elements of matrix `A` to `B` with transposition as follows: - - B[ir_dest, jr_dest] = transpose(A)[jr_src, ir_src] - -The elements `B[ir_dest, jr_dest]` are overwritten. Furthermore, -the index range parameters must satisfy `length(ir_dest) == length(jr_src)` and -`length(jr_dest) == length(ir_src)`. -""" -copy_transpose!(B::AbstractVecOrMat, ir_dest::AbstractRange{Int}, jr_dest::AbstractRange{Int}, - A::AbstractVecOrMat, ir_src::AbstractRange{Int}, jr_src::AbstractRange{Int}) = - _copy_adjtrans!(B, ir_dest, jr_dest, A, ir_src, jr_src, transpose) - -""" - copy_adjoint!(B::AbstractVecOrMat, ir_dest::AbstractRange{Int}, jr_dest::AbstractRange{Int}, - A::AbstractVecOrMat, ir_src::AbstractRange{Int}, jr_src::AbstractRange{Int}) -> B - -Efficiently copy elements of matrix `A` to `B` with adjunction as follows: - - B[ir_dest, jr_dest] = adjoint(A)[jr_src, ir_src] - -The elements `B[ir_dest, jr_dest]` are overwritten. Furthermore, -the index range parameters must satisfy `length(ir_dest) == length(jr_src)` and -`length(jr_dest) == length(ir_src)`. -""" -copy_adjoint!(B::AbstractVecOrMat, ir_dest::AbstractRange{Int}, jr_dest::AbstractRange{Int}, - A::AbstractVecOrMat, ir_src::AbstractRange{Int}, jr_src::AbstractRange{Int}) = - _copy_adjtrans!(B, ir_dest, jr_dest, A, ir_src, jr_src, adjoint) - -function _copy_adjtrans!(B::AbstractVecOrMat, ir_dest::AbstractRange{Int}, jr_dest::AbstractRange{Int}, - A::AbstractVecOrMat, ir_src::AbstractRange{Int}, jr_src::AbstractRange{Int}, - tfun::T) where {T} - if length(ir_dest) != length(jr_src) - throw(ArgumentError(LazyString("source and destination must have same size (got ", - length(jr_src)," and ",length(ir_dest),")"))) - end - if length(jr_dest) != length(ir_src) - throw(ArgumentError(LazyString("source and destination must have same size (got ", - length(ir_src)," and ",length(jr_dest),")"))) - end - @boundscheck checkbounds(B, ir_dest, jr_dest) - @boundscheck checkbounds(A, ir_src, jr_src) - idest = first(ir_dest) - for jsrc in jr_src - jdest = first(jr_dest) - for isrc in ir_src - B[idest,jdest] = tfun(A[isrc,jsrc]) - jdest += step(jr_dest) - end - idest += step(ir_dest) - end - return B -end - -function copy_similar(A::AdjOrTransAbsMat, ::Type{T}) where {T} - Ap = parent(A) - f! = inplace_adj_or_trans(A) - return f!(similar(Ap, T, reverse(axes(Ap))), Ap) -end - -function Base.copyto_unaliased!(deststyle::IndexStyle, dest::AbstractMatrix, srcstyle::IndexCartesian, src::AdjOrTransAbsMat) - if axes(dest) == axes(src) - f! = inplace_adj_or_trans(src) - f!(dest, parent(src)) - else - @invoke Base.copyto_unaliased!(deststyle::IndexStyle, dest::AbstractArray, srcstyle::IndexStyle, src::AbstractArray) - end - return dest -end diff --git a/stdlib/LinearAlgebra/src/triangular.jl b/stdlib/LinearAlgebra/src/triangular.jl deleted file mode 100644 index b602e08256afc..0000000000000 --- a/stdlib/LinearAlgebra/src/triangular.jl +++ /dev/null @@ -1,2990 +0,0 @@ -# This file is a part of Julia. License is MIT: https://julialang.org/license - -## Triangular - -# could be renamed to Triangular when that name has been fully deprecated -""" - AbstractTriangular - -Supertype of triangular matrix types such as [`LowerTriangular`](@ref), [`UpperTriangular`](@ref), -[`UnitLowerTriangular`](@ref) and [`UnitUpperTriangular`](@ref). -""" -abstract type AbstractTriangular{T} <: AbstractMatrix{T} end - -# First loop through all methods that don't need special care for upper/lower and unit diagonal -for t in (:LowerTriangular, :UnitLowerTriangular, :UpperTriangular, :UnitUpperTriangular) - @eval begin - struct $t{T,S<:AbstractMatrix{T}} <: AbstractTriangular{T} - data::S - - function $t{T,S}(data) where {T,S<:AbstractMatrix{T}} - require_one_based_indexing(data) - checksquare(data) - new{T,S}(data) - end - end - $t(A::$t) = A - $t{T}(A::$t{T}) where {T} = A - $t(A::AbstractMatrix) = $t{eltype(A), typeof(A)}(A) - $t{T}(A::AbstractMatrix) where {T} = $t(convert(AbstractMatrix{T}, A)) - $t{T}(A::$t) where {T} = $t(convert(AbstractMatrix{T}, A.data)) - - AbstractMatrix{T}(A::$t) where {T} = $t{T}(A) - AbstractMatrix{T}(A::$t{T}) where {T} = copy(A) - - size(A::$t) = size(A.data) - axes(A::$t) = axes(A.data) - - # For A<:AbstractTriangular, similar(A[, neweltype]) should yield a matrix with the same - # triangular type and underlying storage type as A. The following method covers these cases. - similar(A::$t, ::Type{T}) where {T} = $t(similar(parent(A), T)) - # On the other hand, similar(A, [neweltype,] shape...) should yield a matrix of the underlying - # storage type of A (not wrapped in a triangular type). The following method covers these cases. - similar(A::$t, ::Type{T}, dims::Dims{N}) where {T,N} = similar(parent(A), T, dims) - - copy(A::$t) = $t(copy(A.data)) - Base.unaliascopy(A::$t) = $t(Base.unaliascopy(A.data)) - - real(A::$t{<:Complex}) = (B = real(A.data); $t(B)) - real(A::$t{<:Complex, <:StridedMaybeAdjOrTransMat}) = $t(real.(A)) - end -end - -""" - LowerTriangular(A::AbstractMatrix) - -Construct a `LowerTriangular` view of the matrix `A`. - -# Examples -```jldoctest -julia> A = [1.0 2.0 3.0; 4.0 5.0 6.0; 7.0 8.0 9.0] -3×3 Matrix{Float64}: - 1.0 2.0 3.0 - 4.0 5.0 6.0 - 7.0 8.0 9.0 - -julia> LowerTriangular(A) -3×3 LowerTriangular{Float64, Matrix{Float64}}: - 1.0 ⋅ ⋅ - 4.0 5.0 ⋅ - 7.0 8.0 9.0 -``` -""" -LowerTriangular -""" - UpperTriangular(A::AbstractMatrix) - -Construct an `UpperTriangular` view of the matrix `A`. - -# Examples -```jldoctest -julia> A = [1.0 2.0 3.0; 4.0 5.0 6.0; 7.0 8.0 9.0] -3×3 Matrix{Float64}: - 1.0 2.0 3.0 - 4.0 5.0 6.0 - 7.0 8.0 9.0 - -julia> UpperTriangular(A) -3×3 UpperTriangular{Float64, Matrix{Float64}}: - 1.0 2.0 3.0 - ⋅ 5.0 6.0 - ⋅ ⋅ 9.0 -``` -""" -UpperTriangular -""" - UnitLowerTriangular(A::AbstractMatrix) - -Construct a `UnitLowerTriangular` view of the matrix `A`. -Such a view has the [`oneunit`](@ref) of the [`eltype`](@ref) -of `A` on its diagonal. - -# Examples -```jldoctest -julia> A = [1.0 2.0 3.0; 4.0 5.0 6.0; 7.0 8.0 9.0] -3×3 Matrix{Float64}: - 1.0 2.0 3.0 - 4.0 5.0 6.0 - 7.0 8.0 9.0 - -julia> UnitLowerTriangular(A) -3×3 UnitLowerTriangular{Float64, Matrix{Float64}}: - 1.0 ⋅ ⋅ - 4.0 1.0 ⋅ - 7.0 8.0 1.0 -``` -""" -UnitLowerTriangular -""" - UnitUpperTriangular(A::AbstractMatrix) - -Construct an `UnitUpperTriangular` view of the matrix `A`. -Such a view has the [`oneunit`](@ref) of the [`eltype`](@ref) -of `A` on its diagonal. - -# Examples -```jldoctest -julia> A = [1.0 2.0 3.0; 4.0 5.0 6.0; 7.0 8.0 9.0] -3×3 Matrix{Float64}: - 1.0 2.0 3.0 - 4.0 5.0 6.0 - 7.0 8.0 9.0 - -julia> UnitUpperTriangular(A) -3×3 UnitUpperTriangular{Float64, Matrix{Float64}}: - 1.0 2.0 3.0 - ⋅ 1.0 6.0 - ⋅ ⋅ 1.0 -``` -""" -UnitUpperTriangular - -const UpperOrUnitUpperTriangular{T,S} = Union{UpperTriangular{T,S}, UnitUpperTriangular{T,S}} -const LowerOrUnitLowerTriangular{T,S} = Union{LowerTriangular{T,S}, UnitLowerTriangular{T,S}} -const UpperOrLowerTriangular{T,S} = Union{UpperOrUnitUpperTriangular{T,S}, LowerOrUnitLowerTriangular{T,S}} -const UnitUpperOrUnitLowerTriangular{T,S} = Union{UnitUpperTriangular{T,S}, UnitLowerTriangular{T,S}} - -uppertriangular(M) = UpperTriangular(M) -lowertriangular(M) = LowerTriangular(M) - -uppertriangular(U::UpperOrUnitUpperTriangular) = U -lowertriangular(U::LowerOrUnitLowerTriangular) = U - -Base.dataids(A::UpperOrLowerTriangular) = Base.dataids(A.data) - -imag(A::UpperTriangular) = UpperTriangular(imag(A.data)) -imag(A::LowerTriangular) = LowerTriangular(imag(A.data)) -imag(A::UpperTriangular{<:Any,<:StridedMaybeAdjOrTransMat}) = imag.(A) -imag(A::LowerTriangular{<:Any,<:StridedMaybeAdjOrTransMat}) = imag.(A) -function imag(A::UnitLowerTriangular) - L = LowerTriangular(A.data) - Lim = similar(L) # must be mutable to set diagonals to zero - Lim .= imag.(L) - for i in axes(Lim,1) - Lim[i,i] = zero(Lim[i,i]) - end - return Lim -end -function imag(A::UnitUpperTriangular) - U = UpperTriangular(A.data) - Uim = similar(U) # must be mutable to set diagonals to zero - Uim .= imag.(U) - for i in axes(Uim,1) - Uim[i,i] = zero(Uim[i,i]) - end - return Uim -end - -parent(A::UpperOrLowerTriangular) = A.data - -# For strided matrices, we may only loop over the filled triangle -copy(A::UpperOrLowerTriangular{<:Any, <:StridedMaybeAdjOrTransMat}) = copyto!(similar(A), A) - -# then handle all methods that requires specific handling of upper/lower and unit diagonal - -function full(A::Union{UpperTriangular,LowerTriangular}) - return _triangularize(A)(parent(A)) -end -function full(A::UnitUpperOrUnitLowerTriangular) - isupper = A isa UnitUpperTriangular - Ap = _triangularize(A)(parent(A), isupper ? 1 : -1) - diagview(Ap) .= diagview(A) - return Ap -end - -function full!(A::LowerTriangular) - B = A.data - tril!(B) - B -end -function full!(A::UnitLowerTriangular) - B = A.data - tril!(B) - for i in axes(A,1) - B[i,i] = oneunit(eltype(B)) - end - B -end -function full!(A::UpperTriangular) - B = A.data - triu!(B) - B -end -function full!(A::UnitUpperTriangular) - B = A.data - triu!(B) - for i in axes(A,1) - B[i,i] = oneunit(eltype(B)) - end - B -end - -_shouldforwardindex(U::UpperTriangular, row::Integer, col::Integer) = row <= col -_shouldforwardindex(U::LowerTriangular, row::Integer, col::Integer) = row >= col -_shouldforwardindex(U::UnitUpperTriangular, row::Integer, col::Integer) = row < col -_shouldforwardindex(U::UnitLowerTriangular, row::Integer, col::Integer) = row > col - -Base.isassigned(A::UpperOrLowerTriangular, i::Int, j::Int) = - _shouldforwardindex(A, i, j) ? isassigned(A.data, i, j) : true - -Base.isstored(A::UpperOrLowerTriangular, i::Int, j::Int) = - _shouldforwardindex(A, i, j) ? Base.isstored(A.data, i, j) : false - -@propagate_inbounds getindex(A::Union{UnitLowerTriangular{T}, UnitUpperTriangular{T}}, i::Int, j::Int) where {T} = - _shouldforwardindex(A, i, j) ? A.data[i,j] : ifelse(i == j, oneunit(T), zero(T)) -@propagate_inbounds getindex(A::Union{LowerTriangular, UpperTriangular}, i::Int, j::Int) = - _shouldforwardindex(A, i, j) ? A.data[i,j] : diagzero(A,i,j) - -_shouldforwardindex(U::UpperTriangular, b::BandIndex) = b.band >= 0 -_shouldforwardindex(U::LowerTriangular, b::BandIndex) = b.band <= 0 -_shouldforwardindex(U::UnitUpperTriangular, b::BandIndex) = b.band > 0 -_shouldforwardindex(U::UnitLowerTriangular, b::BandIndex) = b.band < 0 - -# these specialized getindex methods enable constant-propagation of the band -Base.@constprop :aggressive @propagate_inbounds function getindex(A::Union{UnitLowerTriangular{T}, UnitUpperTriangular{T}}, b::BandIndex) where {T} - _shouldforwardindex(A, b) ? A.data[b] : ifelse(b.band == 0, oneunit(T), zero(T)) -end -Base.@constprop :aggressive @propagate_inbounds function getindex(A::Union{LowerTriangular, UpperTriangular}, b::BandIndex) - _shouldforwardindex(A, b) ? A.data[b] : diagzero(A.data, b) -end - -_zero_triangular_half_str(::Type{<:UpperOrUnitUpperTriangular}) = "lower" -_zero_triangular_half_str(::Type{<:LowerOrUnitLowerTriangular}) = "upper" - -@noinline function throw_nonzeroerror(T, @nospecialize(x), i, j) - Ts = _zero_triangular_half_str(T) - Tn = nameof(T) - throw(ArgumentError( - lazy"cannot set index in the $Ts triangular part ($i, $j) of an $Tn matrix to a nonzero value ($x)")) -end -@noinline function throw_nononeerror(T, @nospecialize(x), i, j) - Tn = nameof(T) - throw(ArgumentError( - lazy"cannot set index on the diagonal ($i, $j) of an $Tn matrix to a non-unit value ($x)")) -end - -@propagate_inbounds function setindex!(A::UpperTriangular, x, i::Integer, j::Integer) - if i > j - iszero(x) || throw_nonzeroerror(typeof(A), x, i, j) - else - A.data[i,j] = x - end - return A -end - -@propagate_inbounds function setindex!(A::UnitUpperTriangular, x, i::Integer, j::Integer) - if i > j - iszero(x) || throw_nonzeroerror(typeof(A), x, i, j) - elseif i == j - x == oneunit(x) || throw_nononeerror(typeof(A), x, i, j) - else - A.data[i,j] = x - end - return A -end - -@propagate_inbounds function setindex!(A::LowerTriangular, x, i::Integer, j::Integer) - if i < j - iszero(x) || throw_nonzeroerror(typeof(A), x, i, j) - else - A.data[i,j] = x - end - return A -end - -@propagate_inbounds function setindex!(A::UnitLowerTriangular, x, i::Integer, j::Integer) - if i < j - iszero(x) || throw_nonzeroerror(typeof(A), x, i, j) - elseif i == j - x == oneunit(x) || throw_nononeerror(typeof(A), x, i, j) - else - A.data[i,j] = x - end - return A -end - -@noinline function throw_setindex_structuralzero_error(T, @nospecialize(x)) - Ts = _zero_triangular_half_str(T) - Tn = nameof(T) - throw(ArgumentError( - lazy"cannot set indices in the $Ts triangular part of an $Tn matrix to a nonzero value ($x)")) -end - -@inline function fill!(A::UpperTriangular, x) - iszero(x) || throw_setindex_structuralzero_error(typeof(A), x) - for col in axes(A,2), row in firstindex(A,1):col - @inbounds A.data[row, col] = x - end - A -end -@inline function fill!(A::LowerTriangular, x) - iszero(x) || throw_setindex_structuralzero_error(typeof(A), x) - for col in axes(A,2), row in col:lastindex(A,1) - @inbounds A.data[row, col] = x - end - A -end - -Base._reverse(A::UpperOrUnitUpperTriangular, dims::Integer) = reverse!(Matrix(A); dims) -Base._reverse(A::UpperTriangular, ::Colon) = LowerTriangular(reverse(A.data)) -Base._reverse(A::UnitUpperTriangular, ::Colon) = UnitLowerTriangular(reverse(A.data)) -Base._reverse(A::LowerOrUnitLowerTriangular, dims) = reverse!(Matrix(A); dims) -Base._reverse(A::LowerTriangular, ::Colon) = UpperTriangular(reverse(A.data)) -Base._reverse(A::UnitLowerTriangular, ::Colon) = UnitUpperTriangular(reverse(A.data)) - -## structured matrix methods ## -function Base.replace_in_print_matrix(A::Union{UpperTriangular,UnitUpperTriangular}, - i::Integer, j::Integer, s::AbstractString) - return i <= j ? s : Base.replace_with_centered_mark(s) -end -function Base.replace_in_print_matrix(A::Union{LowerTriangular,UnitLowerTriangular}, - i::Integer, j::Integer, s::AbstractString) - return i >= j ? s : Base.replace_with_centered_mark(s) -end - -istril(A::UnitLowerTriangular, k::Integer=0) = k >= 0 -istriu(A::UnitUpperTriangular, k::Integer=0) = k <= 0 -Base.@constprop :aggressive function istril(A::LowerTriangular, k::Integer=0) - k >= 0 && return true - return _istril(A, k) -end -# additional indirection to dispatch to optimized method for banded parents (defined in special.jl) -@inline function _istril(A::LowerTriangular, k) - P = parent(A) - for j in max(firstindex(P,2), k + 2):lastindex(P,2) - _iszero(@view P[max(j, begin):min(j - k - 1, end), j]) || return false - end - return true -end - -Base.@constprop :aggressive function istriu(A::UpperTriangular, k::Integer=0) - k <= 0 && return true - return _istriu(A, k) -end -# additional indirection to dispatch to optimized method for banded parents (defined in special.jl) -@inline function _istriu(A::UpperTriangular, k) - P = parent(A) - m = size(A, 1) - for j in firstindex(P,2):min(m + k - 1, lastindex(P,2)) - _iszero(@view P[max(begin, j - k + 1):min(j, end), j]) || return false - end - return true -end - -istril(A::Adjoint, k::Integer=0) = istriu(A.parent, -k) -istril(A::Transpose, k::Integer=0) = istriu(A.parent, -k) -istriu(A::Adjoint, k::Integer=0) = istril(A.parent, -k) -istriu(A::Transpose, k::Integer=0) = istril(A.parent, -k) - -function tril!(A::UpperTriangular{T}, k::Integer=0) where {T} - if k < 0 - fill!(A.data, zero(T)) - return A - elseif k == 0 - for j in axes(A.data,2), i in intersect(axes(A.data,1), 1:j-1) - A.data[i,j] = zero(T) - end - return A - else - return UpperTriangular(tril!(A.data,k)) - end -end -function triu!(A::UpperTriangular, k::Integer=0) - if k > 0 - for j in axes(A.data,2), i in intersect(axes(A.data,1), range(stop=j, length=k)) - A.data[i,j] = zero(eltype(A)) - end - end - return A -end - -function tril!(A::UnitUpperTriangular{T}, k::Integer=0) where {T} - if k < 0 - fill!(A.data, zero(T)) - return UpperTriangular(A.data) - elseif k == 0 - fill!(A.data, zero(T)) - for i in diagind(A.data, IndexStyle(A.data)) - A.data[i] = oneunit(T) - end - return UpperTriangular(A.data) - else - for i in diagind(A.data, IndexStyle(A.data)) - A.data[i] = oneunit(T) - end - return UpperTriangular(tril!(A.data,k)) - end -end - -function triu!(A::UnitUpperTriangular, k::Integer=0) - for i in diagind(A.data, IndexStyle(A.data)) - A.data[i] = oneunit(eltype(A)) - end - return triu!(UpperTriangular(A.data), k) -end - -function triu!(A::LowerTriangular{T}, k::Integer=0) where {T} - if k > 0 - fill!(A.data, zero(T)) - return A - elseif k == 0 - for j in axes(A.data,2), i in j+1:lastindex(A.data,1) - A.data[i,j] = zero(T) - end - return A - else - return LowerTriangular(triu!(A.data, k)) - end -end - -function tril!(A::LowerTriangular, k::Integer=0) - if k < 0 - for j in axes(A.data,2), i in intersect(range(j, length=-k), axes(A.data,1)) - A.data[i, j] = zero(eltype(A)) - end - end - A -end - -function triu!(A::UnitLowerTriangular{T}, k::Integer=0) where T - if k > 0 - fill!(A.data, zero(T)) - return LowerTriangular(A.data) - elseif k == 0 - fill!(A.data, zero(T)) - for i in diagind(A.data, IndexStyle(A.data)) - A.data[i] = oneunit(T) - end - return LowerTriangular(A.data) - else - for i in diagind(A.data, IndexStyle(A.data)) - A.data[i] = oneunit(T) - end - return LowerTriangular(triu!(A.data, k)) - end -end - -function tril!(A::UnitLowerTriangular, k::Integer=0) - for i in diagind(A.data, IndexStyle(A.data)) - A.data[i] = oneunit(eltype(A)) - end - return tril!(LowerTriangular(A.data), k) -end - -adjoint(A::LowerTriangular) = UpperTriangular(adjoint(A.data)) -adjoint(A::UpperTriangular) = LowerTriangular(adjoint(A.data)) -adjoint(A::UnitLowerTriangular) = UnitUpperTriangular(adjoint(A.data)) -adjoint(A::UnitUpperTriangular) = UnitLowerTriangular(adjoint(A.data)) -transpose(A::LowerTriangular) = UpperTriangular(transpose(A.data)) -transpose(A::UpperTriangular) = LowerTriangular(transpose(A.data)) -transpose(A::UnitLowerTriangular) = UnitUpperTriangular(transpose(A.data)) -transpose(A::UnitUpperTriangular) = UnitLowerTriangular(transpose(A.data)) - -transpose!(A::LowerTriangular) = UpperTriangular(copytri!(A.data, 'L', false, true)) -transpose!(A::UnitLowerTriangular) = UnitUpperTriangular(copytri!(A.data, 'L', false, false)) -transpose!(A::UpperTriangular) = LowerTriangular(copytri!(A.data, 'U', false, true)) -transpose!(A::UnitUpperTriangular) = UnitLowerTriangular(copytri!(A.data, 'U', false, false)) -adjoint!(A::LowerTriangular) = UpperTriangular(copytri!(A.data, 'L' , true, true)) -adjoint!(A::UnitLowerTriangular) = UnitUpperTriangular(copytri!(A.data, 'L' , true, false)) -adjoint!(A::UpperTriangular) = LowerTriangular(copytri!(A.data, 'U' , true, true)) -adjoint!(A::UnitUpperTriangular) = UnitLowerTriangular(copytri!(A.data, 'U' , true, false)) - -diag(A::UpperOrLowerTriangular) = diag(A.data) -diag(A::Union{UnitLowerTriangular, UnitUpperTriangular}) = fill(oneunit(eltype(A)), size(A,1)) - -# Unary operations --(A::LowerTriangular) = LowerTriangular(-A.data) --(A::UpperTriangular) = UpperTriangular(-A.data) -function -(A::UnitLowerTriangular) - Adata = A.data - Anew = similar(Adata) # must be mutable, even if Adata is not - @. Anew = -Adata - for i in axes(A, 1) - Anew[i, i] = -A[i, i] - end - LowerTriangular(Anew) -end -function -(A::UnitUpperTriangular) - Adata = A.data - Anew = similar(Adata) # must be mutable, even if Adata is not - @. Anew = -Adata - for i in axes(A, 1) - Anew[i, i] = -A[i, i] - end - UpperTriangular(Anew) -end - -# use broadcasting if the parents are strided, where we loop only over the triangular part -for TM in (:LowerTriangular, :UpperTriangular) - @eval -(A::$TM{<:Any, <:StridedMaybeAdjOrTransMat}) = broadcast(-, A) -end - -tr(A::UpperOrLowerTriangular) = tr(A.data) -tr(A::Union{UnitLowerTriangular, UnitUpperTriangular}) = size(A, 1) * oneunit(eltype(A)) - -for T in (:UpperOrUnitUpperTriangular, :LowerOrUnitLowerTriangular) - @eval @propagate_inbounds function copyto!(dest::$T, U::$T) - if axes(dest) != axes(U) - @invoke copyto!(dest::AbstractArray, U::AbstractArray) - else - _copyto!(dest, U) - end - return dest - end -end - -# copy and scale -for (T, UT) in ((:UpperTriangular, :UnitUpperTriangular), (:LowerTriangular, :UnitLowerTriangular)) - @eval @inline function _copyto!(A::$T, B::$T) - @boundscheck checkbounds(A, axes(B)...) - copytrito!(parent(A), parent(B), uplo_char(A)) - return A - end - @eval @inline function _copyto!(A::$UT, B::$T) - for dind in diagind(A, IndexStyle(A)) - if A[dind] != B[dind] - throw_nononeerror(typeof(A), B[dind], Tuple(dind)...) - end - end - _copyto!($T(parent(A)), B) - return A - end -end -@inline function _copyto!(A::UpperOrUnitUpperTriangular, B::UnitUpperTriangular) - @boundscheck checkbounds(A, axes(B)...) - B2 = Base.unalias(A, B) - Ap = parent(A) - B2p = parent(B2) - for j in axes(B2,2) - for i in firstindex(Ap,1):j-1 - @inbounds Ap[i,j] = B2p[i,j] - end - if A isa UpperTriangular # copy diagonal - @inbounds Ap[j,j] = B2[j,j] - end - end - return A -end -@inline function _copyto!(A::LowerOrUnitLowerTriangular, B::UnitLowerTriangular) - @boundscheck checkbounds(A, axes(B)...) - B2 = Base.unalias(A, B) - Ap = parent(A) - B2p = parent(B2) - for j in axes(B2,2) - if A isa LowerTriangular # copy diagonal - @inbounds Ap[j,j] = B2[j,j] - end - for i in j+1:lastindex(Ap,1) - @inbounds Ap[i,j] = B2p[i,j] - end - end - return A -end - -_triangularize(::UpperOrUnitUpperTriangular) = triu -_triangularize(::LowerOrUnitLowerTriangular) = tril -_triangularize!(::UpperOrUnitUpperTriangular) = triu! -_triangularize!(::LowerOrUnitLowerTriangular) = tril! - -@propagate_inbounds function copyto!(dest::StridedMatrix, U::UpperOrLowerTriangular) - if axes(dest) != axes(U) - @invoke copyto!(dest::StridedMatrix, U::AbstractArray) - else - _copyto!(dest, U) - end - return dest -end -@propagate_inbounds function _copyto!(dest::StridedMatrix, U::UpperOrLowerTriangular) - copytrito!(dest, parent(U), U isa UpperOrUnitUpperTriangular ? 'U' : 'L') - copytrito!(dest, U, U isa UpperOrUnitUpperTriangular ? 'L' : 'U') - return dest -end -@propagate_inbounds function _copyto!(dest::StridedMatrix, U::UpperOrLowerTriangular{<:Any, <:StridedMatrix}) - U2 = Base.unalias(dest, U) - copyto_unaliased!(dest, U2) - return dest -end -# for strided matrices, we explicitly loop over the arrays to improve cache locality -# This fuses the copytrito! for the two halves -@inline function copyto_unaliased!(dest::StridedMatrix, U::UpperOrUnitUpperTriangular{<:Any, <:StridedMatrix}) - @boundscheck checkbounds(dest, axes(U)...) - isunit = U isa UnitUpperTriangular - for col in axes(dest,2) - for row in firstindex(dest,1):col-isunit - @inbounds dest[row,col] = U.data[row,col] - end - for row in col+!isunit:lastindex(dest,1) - @inbounds dest[row,col] = U[row,col] - end - end - return dest -end -@inline function copyto_unaliased!(dest::StridedMatrix, L::LowerOrUnitLowerTriangular{<:Any, <:StridedMatrix}) - @boundscheck checkbounds(dest, axes(L)...) - isunit = L isa UnitLowerTriangular - for col in axes(dest,2) - for row in firstindex(dest,1):col-!isunit - @inbounds dest[row,col] = L[row,col] - end - for row in col+isunit:lastindex(dest,1) - @inbounds dest[row,col] = L.data[row,col] - end - end - return dest -end - -Base.@constprop :aggressive function copytrito_triangular!(Bdata, Adata, uplo, uplomatch, sz) - if uplomatch - copytrito!(Bdata, Adata, uplo) - else - BLAS.chkuplo(uplo) - LAPACK.lacpy_size_check(size(Bdata), sz) - # only the diagonal is copied in this case - copyto!(diagview(Bdata), diagview(Adata)) - end - return Bdata -end - -function copytrito!(B::UpperTriangular, A::UpperTriangular, uplo::AbstractChar) - m,n = size(A) - copytrito_triangular!(B.data, A.data, uplo, uplo == 'U', (m, m < n ? m : n)) - return B -end -function copytrito!(B::LowerTriangular, A::LowerTriangular, uplo::AbstractChar) - m,n = size(A) - copytrito_triangular!(B.data, A.data, uplo, uplo == 'L', (n < m ? n : m, n)) - return B -end - -uppertridata(A) = A -lowertridata(A) = A -# we restrict these specializations only to strided matrices to avoid cases where an UpperTriangular type -# doesn't share its indexing with the parent -uppertridata(A::UpperTriangular{<:Any, <:StridedMatrix}) = parent(A) -lowertridata(A::LowerTriangular{<:Any, <:StridedMatrix}) = parent(A) - -@inline _rscale_add!(A::AbstractTriangular, B::AbstractTriangular, C::Number, alpha::Number, beta::Number) = - @stable_muladdmul _triscale!(A, B, C, MulAddMul(alpha, beta)) -@inline _lscale_add!(A::AbstractTriangular, B::Number, C::AbstractTriangular, alpha::Number, beta::Number) = - @stable_muladdmul _triscale!(A, B, C, MulAddMul(alpha, beta)) - -function checksize1(A, B) - szA, szB = size(A), size(B) - szA == szB || throw(DimensionMismatch(lazy"size of A, $szA, does not match size of B, $szB")) - checksquare(B) -end - -function _triscale!(A::UpperTriangular, B::UpperTriangular, c::Number, _add) - checksize1(A, B) - iszero(_add.alpha) && return _rmul_or_fill!(A, _add.beta) - for j in axes(B.data,2) - for i in firstindex(B.data,1):j - @inbounds _modify!(_add, B.data[i,j] * c, A.data, (i,j)) - end - end - return A -end -function _triscale!(A::UpperTriangular, c::Number, B::UpperTriangular, _add) - checksize1(A, B) - iszero(_add.alpha) && return _rmul_or_fill!(A, _add.beta) - for j in axes(B.data,2) - for i in firstindex(B.data,1):j - @inbounds _modify!(_add, c * B.data[i,j], A.data, (i,j)) - end - end - return A -end -function _triscale!(A::UpperOrUnitUpperTriangular, B::UnitUpperTriangular, c::Number, _add) - checksize1(A, B) - iszero(_add.alpha) && return _rmul_or_fill!(A, _add.beta) - for j in axes(B.data,2) - @inbounds _modify!(_add, c, A, (j,j)) - for i in firstindex(B.data,1):(j - 1) - @inbounds _modify!(_add, B.data[i,j] * c, A.data, (i,j)) - end - end - return A -end -function _triscale!(A::UpperOrUnitUpperTriangular, c::Number, B::UnitUpperTriangular, _add) - checksize1(A, B) - iszero(_add.alpha) && return _rmul_or_fill!(A, _add.beta) - for j in axes(B.data,2) - @inbounds _modify!(_add, c, A, (j,j)) - for i in firstindex(B.data,1):(j - 1) - @inbounds _modify!(_add, c * B.data[i,j], A.data, (i,j)) - end - end - return A -end -function _triscale!(A::LowerTriangular, B::LowerTriangular, c::Number, _add) - checksize1(A, B) - iszero(_add.alpha) && return _rmul_or_fill!(A, _add.beta) - for j in axes(B.data,2) - for i in j:lastindex(B.data,1) - @inbounds _modify!(_add, B.data[i,j] * c, A.data, (i,j)) - end - end - return A -end -function _triscale!(A::LowerTriangular, c::Number, B::LowerTriangular, _add) - checksize1(A, B) - iszero(_add.alpha) && return _rmul_or_fill!(A, _add.beta) - for j in axes(B.data,2) - for i in j:lastindex(B.data,1) - @inbounds _modify!(_add, c * B.data[i,j], A.data, (i,j)) - end - end - return A -end -function _triscale!(A::LowerOrUnitLowerTriangular, B::UnitLowerTriangular, c::Number, _add) - checksize1(A, B) - iszero(_add.alpha) && return _rmul_or_fill!(A, _add.beta) - for j in axes(B.data,2) - @inbounds _modify!(_add, c, A, (j,j)) - for i in (j + 1):lastindex(B.data,1) - @inbounds _modify!(_add, B.data[i,j] * c, A.data, (i,j)) - end - end - return A -end -function _triscale!(A::LowerOrUnitLowerTriangular, c::Number, B::UnitLowerTriangular, _add) - checksize1(A, B) - iszero(_add.alpha) && return _rmul_or_fill!(A, _add.beta) - for j in axes(B.data,2) - @inbounds _modify!(_add, c, A, (j,j)) - for i in (j + 1):lastindex(B.data,1) - @inbounds _modify!(_add, c * B.data[i,j], A.data, (i,j)) - end - end - return A -end - -function _trirdiv!(A::UpperTriangular, B::UpperOrUnitUpperTriangular, c::Number) - checksize1(A, B) - for j in axes(B,2) - for i in firstindex(B,1):j - @inbounds A[i, j] = B[i, j] / c - end - end - return A -end -function _trirdiv!(A::LowerTriangular, B::LowerOrUnitLowerTriangular, c::Number) - checksize1(A, B) - for j in axes(B,2) - for i in j:lastindex(B,1) - @inbounds A[i, j] = B[i, j] / c - end - end - return A -end -function _trildiv!(A::UpperTriangular, c::Number, B::UpperOrUnitUpperTriangular) - checksize1(A, B) - for j in axes(B,2) - for i in firstindex(B,1):j - @inbounds A[i, j] = c \ B[i, j] - end - end - return A -end -function _trildiv!(A::LowerTriangular, c::Number, B::LowerOrUnitLowerTriangular) - checksize1(A, B) - for j in axes(B,2) - for i in j:lastindex(B,1) - @inbounds A[i, j] = c \ B[i, j] - end - end - return A -end - -rmul!(A::UpperOrLowerTriangular, c::Number) = @inline _triscale!(A, A, c, MulAddMul()) -lmul!(c::Number, A::UpperOrLowerTriangular) = @inline _triscale!(A, c, A, MulAddMul()) - -function dot(x::AbstractVector, A::UpperTriangular, y::AbstractVector) - require_one_based_indexing(x, y) - m = size(A, 1) - (length(x) == m == length(y)) || throw(DimensionMismatch()) - if iszero(m) - return dot(zero(eltype(x)), zero(eltype(A)), zero(eltype(y))) - end - x₁ = x[1] - r = dot(x₁, A[1,1], y[1]) - @inbounds for j in axes(A, 2)[2:end] - yj = y[j] - if !iszero(yj) - temp = adjoint(A[1,j]) * x₁ - @simd for i in 2:j - temp += adjoint(A[i,j]) * x[i] - end - r += dot(temp, yj) - end - end - return r -end -function dot(x::AbstractVector, A::UnitUpperTriangular, y::AbstractVector) - require_one_based_indexing(x, y) - m = size(A, 1) - (length(x) == m == length(y)) || throw(DimensionMismatch()) - if iszero(m) - return dot(zero(eltype(x)), zero(eltype(A)), zero(eltype(y))) - end - x₁ = first(x) - r = dot(x₁, y[1]) - @inbounds for j in axes(A, 2)[2:end] - yj = y[j] - if !iszero(yj) - temp = adjoint(A[1,j]) * x₁ - @simd for i in 2:j-1 - temp += adjoint(A[i,j]) * x[i] - end - r += dot(temp, yj) - r += dot(x[j], yj) - end - end - return r -end -function dot(x::AbstractVector, A::LowerTriangular, y::AbstractVector) - require_one_based_indexing(x, y) - m = size(A, 1) - (length(x) == m == length(y)) || throw(DimensionMismatch()) - if iszero(m) - return dot(zero(eltype(x)), zero(eltype(A)), zero(eltype(y))) - end - r = zero(typeof(dot(first(x), first(A), first(y)))) - @inbounds for j in axes(A, 2) - yj = y[j] - if !iszero(yj) - temp = adjoint(A[j,j]) * x[j] - @simd for i in j+1:lastindex(A,1) - temp += adjoint(A[i,j]) * x[i] - end - r += dot(temp, yj) - end - end - return r -end -function dot(x::AbstractVector, A::UnitLowerTriangular, y::AbstractVector) - require_one_based_indexing(x, y) - m = size(A, 1) - (length(x) == m == length(y)) || throw(DimensionMismatch()) - if iszero(m) - return dot(zero(eltype(x)), zero(eltype(A)), zero(eltype(y))) - end - r = zero(typeof(dot(first(x), first(y)))) - @inbounds for j in axes(A, 2) - yj = y[j] - if !iszero(yj) - temp = x[j] - @simd for i in j+1:lastindex(A,1) - temp += adjoint(A[i,j]) * x[i] - end - r += dot(temp, yj) - end - end - return r -end - -fillstored!(A::LowerTriangular, x) = (fillband!(A.data, x, 1-size(A,1), 0); A) -fillstored!(A::UnitLowerTriangular, x) = (fillband!(A.data, x, 1-size(A,1), -1); A) -fillstored!(A::UpperTriangular, x) = (fillband!(A.data, x, 0, size(A,2)-1); A) -fillstored!(A::UnitUpperTriangular, x) = (fillband!(A.data, x, 1, size(A,2)-1); A) - -# Binary operations -# use broadcasting if the parents are strided, where we loop only over the triangular part -function +(A::UpperTriangular, B::UpperTriangular) - (parent(A) isa StridedMatrix || parent(B) isa StridedMatrix) && return A .+ B - UpperTriangular(A.data + B.data) -end -function +(A::LowerTriangular, B::LowerTriangular) - (parent(A) isa StridedMatrix || parent(B) isa StridedMatrix) && return A .+ B - LowerTriangular(A.data + B.data) -end -function +(A::UpperTriangular, B::UnitUpperTriangular) - (parent(A) isa StridedMatrix || parent(B) isa StridedMatrix) && return A .+ B - UpperTriangular(A.data + triu(B.data, 1) + I) -end -function +(A::LowerTriangular, B::UnitLowerTriangular) - (parent(A) isa StridedMatrix || parent(B) isa StridedMatrix) && return A .+ B - LowerTriangular(A.data + tril(B.data, -1) + I) -end -function +(A::UnitUpperTriangular, B::UpperTriangular) - (parent(A) isa StridedMatrix || parent(B) isa StridedMatrix) && return A .+ B - UpperTriangular(triu(A.data, 1) + B.data + I) -end -function +(A::UnitLowerTriangular, B::LowerTriangular) - (parent(A) isa StridedMatrix || parent(B) isa StridedMatrix) && return A .+ B - LowerTriangular(tril(A.data, -1) + B.data + I) -end -function +(A::UnitUpperTriangular, B::UnitUpperTriangular) - (parent(A) isa StridedMatrix || parent(B) isa StridedMatrix) && return A .+ B - UpperTriangular(triu(A.data, 1) + triu(B.data, 1) + 2I) -end -function +(A::UnitLowerTriangular, B::UnitLowerTriangular) - (parent(A) isa StridedMatrix || parent(B) isa StridedMatrix) && return A .+ B - LowerTriangular(tril(A.data, -1) + tril(B.data, -1) + 2I) -end -+(A::UpperOrLowerTriangular, B::UpperOrLowerTriangular) = full(A) + full(B) -+(A::AbstractTriangular, B::AbstractTriangular) = copyto!(similar(parent(A), size(A)), A) + copyto!(similar(parent(B), size(B)), B) - -function -(A::UpperTriangular, B::UpperTriangular) - (parent(A) isa StridedMatrix || parent(B) isa StridedMatrix) && return A .- B - UpperTriangular(A.data - B.data) -end -function -(A::LowerTriangular, B::LowerTriangular) - (parent(A) isa StridedMatrix || parent(B) isa StridedMatrix) && return A .- B - LowerTriangular(A.data - B.data) -end -function -(A::UpperTriangular, B::UnitUpperTriangular) - (parent(A) isa StridedMatrix || parent(B) isa StridedMatrix) && return A .- B - UpperTriangular(A.data - triu(B.data, 1) - I) -end -function -(A::LowerTriangular, B::UnitLowerTriangular) - (parent(A) isa StridedMatrix || parent(B) isa StridedMatrix) && return A .- B - LowerTriangular(A.data - tril(B.data, -1) - I) -end -function -(A::UnitUpperTriangular, B::UpperTriangular) - (parent(A) isa StridedMatrix || parent(B) isa StridedMatrix) && return A .- B - UpperTriangular(triu(A.data, 1) - B.data + I) -end -function -(A::UnitLowerTriangular, B::LowerTriangular) - (parent(A) isa StridedMatrix || parent(B) isa StridedMatrix) && return A .- B - LowerTriangular(tril(A.data, -1) - B.data + I) -end -function -(A::UnitUpperTriangular, B::UnitUpperTriangular) - (parent(A) isa StridedMatrix || parent(B) isa StridedMatrix) && return A .- B - UpperTriangular(triu(A.data, 1) - triu(B.data, 1)) -end -function -(A::UnitLowerTriangular, B::UnitLowerTriangular) - (parent(A) isa StridedMatrix || parent(B) isa StridedMatrix) && return A .- B - LowerTriangular(tril(A.data, -1) - tril(B.data, -1)) -end --(A::UpperOrLowerTriangular, B::UpperOrLowerTriangular) = full(A) - full(B) --(A::AbstractTriangular, B::AbstractTriangular) = copyto!(similar(parent(A), size(A)), A) - copyto!(similar(parent(B), size(B)), B) - -function kron(A::UpperTriangular{T,<:StridedMaybeAdjOrTransMat}, B::UpperTriangular{S,<:StridedMaybeAdjOrTransMat}) where {T,S} - C = UpperTriangular(Matrix{promote_op(*, T, S)}(undef, _kronsize(A, B))) - return kron!(C, A, B) -end -function kron(A::LowerTriangular{T,<:StridedMaybeAdjOrTransMat}, B::LowerTriangular{S,<:StridedMaybeAdjOrTransMat}) where {T,S} - C = LowerTriangular(Matrix{promote_op(*, T, S)}(undef, _kronsize(A, B))) - return kron!(C, A, B) -end - -function kron!(C::UpperTriangular{<:Any,<:StridedMaybeAdjOrTransMat}, A::UpperTriangular{<:Any,<:StridedMaybeAdjOrTransMat}, B::UpperTriangular{<:Any,<:StridedMaybeAdjOrTransMat}) - size(C) == _kronsize(A, B) || throw(DimensionMismatch("kron!")) - _triukron!(C.data, A.data, B.data) - return C -end -function kron!(C::LowerTriangular{<:Any,<:StridedMaybeAdjOrTransMat}, A::LowerTriangular{<:Any,<:StridedMaybeAdjOrTransMat}, B::LowerTriangular{<:Any,<:StridedMaybeAdjOrTransMat}) - size(C) == _kronsize(A, B) || throw(DimensionMismatch("kron!")) - _trilkron!(C.data, A.data, B.data) - return C -end - -function _triukron!(C, A, B) - n_B = size(B, 1) - @inbounds for j in axes(A,2) - jnB = (j - 1) * n_B - for i in firstindex(A,1):(j-1) - Aij = A[i, j] - inB = (i - 1) * n_B - for l in axes(B,2) - for k in firstindex(B,1):l - C[inB+k, jnB+l] = Aij * B[k, l] - end - for k in firstindex(B,1):(l-1) - C[inB+l, jnB+k] = zero(C[inB+k, jnB+l]) - end - end - end - Ajj = A[j, j] - for l in axes(B,2) - for k in firstindex(B,1):l - C[jnB+k, jnB+l] = Ajj * B[k, l] - end - end - end -end - -function _trilkron!(C, A, B) - n_A = size(A, 1) - n_B = size(B, 1) - @inbounds for j in axes(A,2) - jnB = (j - 1) * n_B - Ajj = A[j, j] - for l in axes(B,2) - for k in l:lastindex(B,1) - C[jnB+k, jnB+l] = Ajj * B[k, l] - end - end - for i in (j+1):n_A - Aij = A[i, j] - inB = (i - 1) * n_B - for l in axes(B,2) - for k in l:lastindex(B,1) - C[inB+k, jnB+l] = Aij * B[k, l] - end - for k in (l+1):lastindex(B,1) - C[inB+l, jnB+k] = zero(C[inB+k, jnB+l]) - end - end - end - end -end - -###################### -# BlasFloat routines # -###################### - -# which triangle to use of the underlying data -uplo_char(::UpperOrUnitUpperTriangular) = 'U' -uplo_char(::LowerOrUnitLowerTriangular) = 'L' -uplo_char(::UpperOrUnitUpperTriangular{<:Any,<:AdjOrTrans}) = 'L' -uplo_char(::LowerOrUnitLowerTriangular{<:Any,<:AdjOrTrans}) = 'U' -uplo_char(::UpperOrUnitUpperTriangular{<:Any,<:Adjoint{<:Any,<:Transpose}}) = 'U' -uplo_char(::LowerOrUnitLowerTriangular{<:Any,<:Adjoint{<:Any,<:Transpose}}) = 'L' -uplo_char(::UpperOrUnitUpperTriangular{<:Any,<:Transpose{<:Any,<:Adjoint}}) = 'U' -uplo_char(::LowerOrUnitLowerTriangular{<:Any,<:Transpose{<:Any,<:Adjoint}}) = 'L' - -isunit_char(::UpperTriangular) = 'N' -isunit_char(::UnitUpperTriangular) = 'U' -isunit_char(::LowerTriangular) = 'N' -isunit_char(::UnitLowerTriangular) = 'U' - -# generic fallback for AbstractTriangular matrices outside of the four subtypes provided here -_trimul!(C::AbstractVecOrMat, A::AbstractTriangular, B::AbstractVector) = - lmul!(A, copyto!(C, B)) -_trimul!(C::AbstractMatrix, A::AbstractTriangular, B::AbstractMatrix) = - lmul!(A, copyto!(C, B)) -_trimul!(C::AbstractMatrix, A::AbstractMatrix, B::AbstractTriangular) = - rmul!(copyto!(C, A), B) -_trimul!(C::AbstractMatrix, A::AbstractTriangular, B::AbstractTriangular) = - lmul!(A, copyto!(C, B)) -# redirect for UpperOrLowerTriangular -_trimul!(C::AbstractVecOrMat, A::UpperOrLowerTriangular, B::AbstractVector) = - generic_trimatmul!(C, uplo_char(A), isunit_char(A), wrapperop(parent(A)), _unwrap_at(parent(A)), B) -_trimul!(C::AbstractMatrix, A::UpperOrLowerTriangular, B::AbstractMatrix) = - generic_trimatmul!(C, uplo_char(A), isunit_char(A), wrapperop(parent(A)), _unwrap_at(parent(A)), B) -_trimul!(C::AbstractMatrix, A::AbstractMatrix, B::UpperOrLowerTriangular) = - generic_mattrimul!(C, uplo_char(B), isunit_char(B), wrapperop(parent(B)), A, _unwrap_at(parent(B))) -_trimul!(C::AbstractMatrix, A::UpperOrLowerTriangular, B::UpperOrLowerTriangular) = - generic_trimatmul!(C, uplo_char(A), isunit_char(A), wrapperop(parent(A)), _unwrap_at(parent(A)), B) -# disambiguation with AbstractTriangular -_trimul!(C::AbstractMatrix, A::UpperOrLowerTriangular, B::AbstractTriangular) = - generic_trimatmul!(C, uplo_char(A), isunit_char(A), wrapperop(parent(A)), _unwrap_at(parent(A)), B) -_trimul!(C::AbstractMatrix, A::AbstractTriangular, B::UpperOrLowerTriangular) = - generic_mattrimul!(C, uplo_char(B), isunit_char(B), wrapperop(parent(B)), A, _unwrap_at(parent(B))) - -function lmul!(A::AbstractTriangular, B::AbstractVecOrMat) - if istriu(A) - _trimul!(B, uppertriangular(A), B) - else - _trimul!(B, lowertriangular(A), B) - end -end -function rmul!(A::AbstractMatrix, B::AbstractTriangular) - if istriu(B) - _trimul!(A, A, uppertriangular(B)) - else - _trimul!(A, A, lowertriangular(B)) - end -end - -for TC in (:AbstractVector, :AbstractMatrix) - @eval @inline function _mul!(C::$TC, A::AbstractTriangular, B::AbstractVector, alpha::Number, beta::Number) - check_A_mul_B!_sizes(size(C), size(A), size(B)) - if isone(alpha) && iszero(beta) - return _trimul!(C, A, B) - else - return _generic_matvecmul!(C, 'N', A, B, alpha, beta) - end - end -end -for (TA, TB) in ((:AbstractTriangular, :AbstractMatrix), - (:AbstractMatrix, :AbstractTriangular), - (:AbstractTriangular, :AbstractTriangular) - ) - @eval @inline function _mul!(C::AbstractMatrix, A::$TA, B::$TB, alpha::Number, beta::Number) - check_A_mul_B!_sizes(size(C), size(A), size(B)) - if isone(alpha) && iszero(beta) - return _trimul!(C, A, B) - else - return generic_matmatmul!(C, 'N', 'N', A, B, alpha, beta) - end - end -end - -ldiv!(C::AbstractVecOrMat, A::AbstractTriangular, B::AbstractVecOrMat) = _ldiv!(C, A, B) -# generic fallback for AbstractTriangular, directs to 2-arg [l/r]div! -_ldiv!(C::AbstractVecOrMat, A::AbstractTriangular, B::AbstractVecOrMat) = - ldiv!(A, copyto!(C, B)) -_rdiv!(C::AbstractMatrix, A::AbstractMatrix, B::AbstractTriangular) = - rdiv!(copyto!(C, A), B) -# redirect for UpperOrLowerTriangular to generic_*div! -_ldiv!(C::AbstractVecOrMat, A::UpperOrLowerTriangular, B::AbstractVecOrMat) = - generic_trimatdiv!(C, uplo_char(A), isunit_char(A), wrapperop(parent(A)), _unwrap_at(parent(A)), B) -_rdiv!(C::AbstractMatrix, A::AbstractMatrix, B::UpperOrLowerTriangular) = - generic_mattridiv!(C, uplo_char(B), isunit_char(B), wrapperop(parent(B)), A, _unwrap_at(parent(B))) - -function ldiv!(A::AbstractTriangular, B::AbstractVecOrMat) - if istriu(A) - _ldiv!(B, uppertriangular(A), B) - else - _ldiv!(B, lowertriangular(A), B) - end -end -function rdiv!(A::AbstractMatrix, B::AbstractTriangular) - if istriu(B) - _rdiv!(A, A, uppertriangular(B)) - else - _rdiv!(A, A, lowertriangular(B)) - end -end - -# preserve triangular structure in in-place multiplication/division -for (cty, aty, bty) in ((:UpperTriangular, :UpperTriangular, :UpperTriangular), - (:UpperTriangular, :UpperTriangular, :UnitUpperTriangular), - (:UpperTriangular, :UnitUpperTriangular, :UpperTriangular), - (:UnitUpperTriangular, :UnitUpperTriangular, :UnitUpperTriangular), - (:LowerTriangular, :LowerTriangular, :LowerTriangular), - (:LowerTriangular, :LowerTriangular, :UnitLowerTriangular), - (:LowerTriangular, :UnitLowerTriangular, :LowerTriangular), - (:UnitLowerTriangular, :UnitLowerTriangular, :UnitLowerTriangular)) - @eval begin - function _trimul!(C::$cty, A::$aty, B::$bty) - _trimul!(parent(C), A, B) - return C - end - function _ldiv!(C::$cty, A::$aty, B::$bty) - _ldiv!(parent(C), A, B) - return C - end - function _rdiv!(C::$cty, A::$aty, B::$bty) - _rdiv!(parent(C), A, B) - return C - end - end -end - -for (t, uploc, isunitc) in ((:LowerTriangular, 'L', 'N'), - (:UnitLowerTriangular, 'L', 'U'), - (:UpperTriangular, 'U', 'N'), - (:UnitUpperTriangular, 'U', 'U')) - @eval begin - # Matrix inverse - inv!(A::$t{T,S}) where {T<:BlasFloat,S<:StridedMatrix} = - $t{T,S}(LAPACK.trtri!($uploc, $isunitc, A.data)) - - function inv(A::$t{T}) where {T} - S = typeof(inv(oneunit(T))) - if S <: BlasFloat || S === T # i.e. A is unitless - $t(ldiv!(convert(AbstractArray{S}, A), Matrix{S}(I, size(A)))) - else - J = (one(T)*I)(size(A, 1)) - $t(ldiv!(similar(A, S, size(A)), A, J)) - end - end - - # Error bounds for triangular solve - errorbounds(A::$t{T,<:StridedMatrix}, X::StridedVecOrMat{T}, B::StridedVecOrMat{T}) where {T<:BlasFloat} = - LAPACK.trrfs!($uploc, 'N', $isunitc, A.data, B, X) - - # Condition numbers - function cond(A::$t{<:BlasFloat,<:StridedMatrix}, p::Real=2) - checksquare(A) - if p == 1 - return inv(LAPACK.trcon!('O', $uploc, $isunitc, A.data)) - elseif p == Inf - return inv(LAPACK.trcon!('I', $uploc, $isunitc, A.data)) - else # use fallback - return cond(copyto!(similar(parent(A)), A), p) - end - end - end -end - -# multiplication -generic_trimatmul!(c::StridedVector{T}, uploc, isunitc, tfun::Function, A::StridedMatrix{T}, b::AbstractVector{T}) where {T<:BlasFloat} = - BLAS.trmv!(uploc, tfun === identity ? 'N' : tfun === transpose ? 'T' : 'C', isunitc, A, c === b ? c : copyto!(c, b)) -generic_trimatmul!(C::StridedMatrix{T}, uploc, isunitc, tfun::Function, A::StridedMatrix{T}, B::AbstractMatrix{T}) where {T<:BlasFloat} = - BLAS.trmm!('L', uploc, tfun === identity ? 'N' : tfun === transpose ? 'T' : 'C', isunitc, one(T), A, C === B ? C : copyto!(C, B)) -generic_mattrimul!(C::StridedMatrix{T}, uploc, isunitc, tfun::Function, A::AbstractMatrix{T}, B::StridedMatrix{T}) where {T<:BlasFloat} = - BLAS.trmm!('R', uploc, tfun === identity ? 'N' : tfun === transpose ? 'T' : 'C', isunitc, one(T), B, C === A ? C : copyto!(C, A)) -# division -generic_trimatdiv!(C::StridedVecOrMat{T}, uploc, isunitc, tfun::Function, A::StridedMatrix{T}, B::AbstractVecOrMat{T}) where {T<:BlasFloat} = - LAPACK.trtrs!(uploc, tfun === identity ? 'N' : tfun === transpose ? 'T' : 'C', isunitc, A, C === B ? C : copyto!(C, B)) -generic_mattridiv!(C::StridedMatrix{T}, uploc, isunitc, tfun::Function, A::AbstractMatrix{T}, B::StridedMatrix{T}) where {T<:BlasFloat} = - BLAS.trsm!('R', uploc, tfun === identity ? 'N' : tfun === transpose ? 'T' : 'C', isunitc, one(T), B, C === A ? C : copyto!(C, A)) - -errorbounds(A::AbstractTriangular{T}, X::AbstractVecOrMat{T}, B::AbstractVecOrMat{T}) where {T<:Union{BigFloat,Complex{BigFloat}}} = - error("not implemented yet! Please submit a pull request.") -function errorbounds(A::AbstractTriangular{TA}, X::AbstractVecOrMat{TX}, B::AbstractVecOrMat{TB}) where {TA<:Number,TX<:Number,TB<:Number} - TAXB = promote_type(TA, TB, TX, Float32) - errorbounds(convert(AbstractMatrix{TAXB}, A), convert(AbstractArray{TAXB}, X), convert(AbstractArray{TAXB}, B)) -end - -# Eigensystems -## Notice that trecv works for quasi-triangular matrices and therefore the lower sub diagonal must be zeroed before calling the subroutine -function eigvecs(A::UpperTriangular{<:BlasFloat,<:StridedMatrix}) - LAPACK.trevc!('R', 'A', BlasInt[], triu!(A.data)) -end -function eigvecs(A::UnitUpperTriangular{<:BlasFloat,<:StridedMatrix}) - for i in axes(A, 1) - A.data[i,i] = 1 - end - LAPACK.trevc!('R', 'A', BlasInt[], triu!(A.data)) -end -function eigvecs(A::LowerTriangular{<:BlasFloat,<:StridedMatrix}) - LAPACK.trevc!('L', 'A', BlasInt[], copy(tril!(A.data)')) -end -function eigvecs(A::UnitLowerTriangular{<:BlasFloat,<:StridedMatrix}) - for i in axes(A, 1) - A.data[i,i] = 1 - end - LAPACK.trevc!('L', 'A', BlasInt[], copy(tril!(A.data)')) -end - -#################### -# Generic routines # -#################### - -for (t, unitt) in ((UpperTriangular, UnitUpperTriangular), - (LowerTriangular, UnitLowerTriangular)) - tstrided = t{<:Any, <:StridedMaybeAdjOrTransMat} - @eval begin - (*)(A::$t, x::Number) = $t(A.data*x) - function (*)(A::$tstrided, x::Number) - eltype_dest = promote_op(*, eltype(A), typeof(x)) - dest = $t(similar(parent(A), eltype_dest)) - _triscale!(dest, x, A, MulAddMul()) - end - - function (*)(A::$unitt, x::Number) - B = $t(A.data)*x - for i in axes(A, 1) - B.data[i,i] = x - end - return B - end - - (*)(x::Number, A::$t) = $t(x*A.data) - function (*)(x::Number, A::$tstrided) - eltype_dest = promote_op(*, typeof(x), eltype(A)) - dest = $t(similar(parent(A), eltype_dest)) - _triscale!(dest, x, A, MulAddMul()) - end - - function (*)(x::Number, A::$unitt) - B = x*$t(A.data) - for i in axes(A, 1) - B.data[i,i] = x - end - return B - end - - (/)(A::$t, x::Number) = $t(A.data/x) - function (/)(A::$tstrided, x::Number) - eltype_dest = promote_op(/, eltype(A), typeof(x)) - dest = $t(similar(parent(A), eltype_dest)) - _trirdiv!(dest, A, x) - end - - function (/)(A::$unitt, x::Number) - B = $t(A.data)/x - invx = inv(x) - for i in axes(A, 1) - B.data[i,i] = invx - end - return B - end - - (\)(x::Number, A::$t) = $t(x\A.data) - function (\)(x::Number, A::$tstrided) - eltype_dest = promote_op(\, typeof(x), eltype(A)) - dest = $t(similar(parent(A), eltype_dest)) - _trildiv!(dest, x, A) - end - - function (\)(x::Number, A::$unitt) - B = x\$t(A.data) - invx = inv(x) - for i in axes(A, 1) - B.data[i,i] = invx - end - return B - end - end -end - -## Generic triangular multiplication -function generic_trimatmul!(C::AbstractVecOrMat, uploc, isunitc, tfun::Function, A::AbstractMatrix, B::AbstractVecOrMat) - require_one_based_indexing(C, A, B) - check_A_mul_B!_sizes(size(C), size(A), size(B)) - oA = oneunit(eltype(A)) - unit = isunitc == 'U' - @inbounds if uploc == 'U' - if tfun === identity - for j in axes(B,2) - for i in axes(B,1) - Cij = (unit ? oA : A[i,i]) * B[i,j] - for k in i + 1:lastindex(B,1) - Cij += A[i,k] * B[k,j] - end - C[i,j] = Cij - end - end - else # tfun in (transpose, adjoint) - for j in axes(B,2) - for i in reverse(axes(B,1)) - Cij = (unit ? oA : tfun(A[i,i])) * B[i,j] - for k in firstindex(B,1):i - 1 - Cij += tfun(A[k,i]) * B[k,j] - end - C[i,j] = Cij - end - end - end - else # uploc == 'L' - if tfun === identity - for j in axes(B,2) - for i in reverse(axes(B,1)) - Cij = (unit ? oA : A[i,i]) * B[i,j] - for k in firstindex(B,1):i - 1 - Cij += A[i,k] * B[k,j] - end - C[i,j] = Cij - end - end - else # tfun in (transpose, adjoint) - for j in axes(B,2) - for i in axes(B,1) - Cij = (unit ? oA : tfun(A[i,i])) * B[i,j] - for k in i + 1:lastindex(B,1) - Cij += tfun(A[k,i]) * B[k,j] - end - C[i,j] = Cij - end - end - end - end - return C -end -# conjugate cases -function generic_trimatmul!(C::AbstractVecOrMat, uploc, isunitc, ::Function, xA::AdjOrTrans, B::AbstractVecOrMat) - require_one_based_indexing(C, xA, B) - check_A_mul_B!_sizes(size(C), size(xA), size(B)) - A = parent(xA) - oA = oneunit(eltype(A)) - unit = isunitc == 'U' - @inbounds if uploc == 'U' - for j in axes(B,2) - for i in axes(B,1) - Cij = (unit ? oA : conj(A[i,i])) * B[i,j] - for k in i + 1:lastindex(B,1) - Cij += conj(A[i,k]) * B[k,j] - end - C[i,j] = Cij - end - end - else # uploc == 'L' - for j in axes(B,2) - for i in reverse(axes(B,1)) - Cij = (unit ? oA : conj(A[i,i])) * B[i,j] - for k in firstindex(B,1):i - 1 - Cij += conj(A[i,k]) * B[k,j] - end - C[i,j] = Cij - end - end - end - return C -end - -function generic_mattrimul!(C::AbstractMatrix, uploc, isunitc, tfun::Function, A::AbstractMatrix, B::AbstractMatrix) - require_one_based_indexing(C, A, B) - check_A_mul_B!_sizes(size(C), size(A), size(B)) - oB = oneunit(eltype(B)) - unit = isunitc == 'U' - @inbounds if uploc == 'U' - if tfun === identity - for i in axes(A,1) - for j in reverse(axes(A,2)) - Cij = A[i,j] * (unit ? oB : B[j,j]) - for k in firstindex(A,2):j - 1 - Cij += A[i,k] * B[k,j] - end - C[i,j] = Cij - end - end - else # tfun in (transpose, adjoint) - for i in axes(A,1) - for j in axes(A,2) - Cij = A[i,j] * (unit ? oB : tfun(B[j,j])) - for k in j + 1:lastindex(A,2) - Cij += A[i,k] * tfun(B[j,k]) - end - C[i,j] = Cij - end - end - end - else # uploc == 'L' - if tfun === identity - for i in axes(A,1) - for j in axes(A,2) - Cij = A[i,j] * (unit ? oB : B[j,j]) - for k in j + 1:lastindex(A,2) - Cij += A[i,k] * B[k,j] - end - C[i,j] = Cij - end - end - else # tfun in (transpose, adjoint) - for i in axes(A,1) - for j in reverse(axes(A,2)) - Cij = A[i,j] * (unit ? oB : tfun(B[j,j])) - for k in firstindex(A,2):j - 1 - Cij += A[i,k] * tfun(B[j,k]) - end - C[i,j] = Cij - end - end - end - end - return C -end -# conjugate cases -function generic_mattrimul!(C::AbstractMatrix, uploc, isunitc, ::Function, A::AbstractMatrix, xB::AdjOrTrans) - require_one_based_indexing(C, A, xB) - check_A_mul_B!_sizes(size(C), size(A), size(xB)) - B = parent(xB) - oB = oneunit(eltype(B)) - unit = isunitc == 'U' - @inbounds if uploc == 'U' - for i in axes(A,1) - for j in reverse(axes(A,2)) - Cij = A[i,j] * (unit ? oB : conj(B[j,j])) - for k in firstindex(A,2):j - 1 - Cij += A[i,k] * conj(B[k,j]) - end - C[i,j] = Cij - end - end - else # uploc == 'L' - for i in axes(A,1) - for j in axes(A,2) - Cij = A[i,j] * (unit ? oB : conj(B[j,j])) - for k in j + 1:lastindex(A,2) - Cij += A[i,k] * conj(B[k,j]) - end - C[i,j] = Cij - end - end - end - return C -end - -#Generic solver using naive substitution - -@inline _ustrip(a) = oneunit(a) \ a -@inline _ustrip(a::Union{AbstractFloat,Integer,Complex,Rational}) = a - -# manually hoisting b[j] significantly improves performance as of Dec 2015 -# manually eliding bounds checking significantly improves performance as of Dec 2015 -# replacing repeated references to A.data[j,j] with [Ajj = A.data[j,j] and references to Ajj] -# does not significantly impact performance as of Dec 2015 -# in the transpose and conjugate transpose naive substitution variants, -# accumulating in z rather than b[j,k] significantly improves performance as of Dec 2015 -function generic_trimatdiv!(C::AbstractVecOrMat, uploc, isunitc, tfun::Function, A::AbstractMatrix, B::AbstractVecOrMat) - require_one_based_indexing(C, A, B) - mA, nA = size(A) - m = size(B, 1) - if nA != m - throw(DimensionMismatch(lazy"second dimension of left hand side A, $nA, and first dimension of right hand side B, $m, must be equal")) - end - if size(C) != size(B) - throw(DimensionMismatch(lazy"size of output, $(size(C)), does not match size of right hand side, $(size(B))")) - end - iszero(mA) && return C - oA = oneunit(eltype(A)) - @inbounds if uploc == 'U' - if isunitc == 'N' - if tfun === identity - for k in axes(B,2) - amm = A[m,m] - iszero(amm) && throw(SingularException(m)) - Cm = C[m,k] = amm \ B[m,k] - # fill C-column - for i in reverse(axes(B,1))[2:end] - C[i,k] = oA \ B[i,k] - _ustrip(A[i,m]) * Cm - end - for j in reverse(axes(B,1))[2:end] - ajj = A[j,j] - iszero(ajj) && throw(SingularException(j)) - Cj = C[j,k] = _ustrip(ajj) \ C[j,k] - for i in j-1:-1:firstindex(B,1) - C[i,k] -= _ustrip(A[i,j]) * Cj - end - end - end - else # tfun in (adjoint, transpose) - for k in axes(B,2) - for j in axes(B,1) - ajj = A[j,j] - iszero(ajj) && throw(SingularException(j)) - Bj = B[j,k] - for i in firstindex(A,1):j-1 - Bj -= tfun(A[i,j]) * C[i,k] - end - C[j,k] = tfun(ajj) \ Bj - end - end - end - else # isunitc == 'U' - if tfun === identity - for k in axes(B,2) - Cm = C[m,k] = oA \ B[m,k] - # fill C-column - for i in reverse(axes(B,1))[2:end] - C[i,k] = oA \ B[i,k] - _ustrip(A[i,m]) * Cm - end - for j in reverse(axes(B,1))[2:end] - Cj = C[j,k] - for i in firstindex(A,1):j-1 - C[i,k] -= _ustrip(A[i,j]) * Cj - end - end - end - else # tfun in (adjoint, transpose) - for k in axes(B,2) - for j in axes(B,1) - Bj = B[j,k] - for i in firstindex(A,1):j-1 - Bj -= tfun(A[i,j]) * C[i,k] - end - C[j,k] = oA \ Bj - end - end - end - end - else # uploc == 'L' - if isunitc == 'N' - if tfun === identity - for k in axes(B,2) - a11 = A[1,1] - iszero(a11) && throw(SingularException(1)) - C1 = C[1,k] = a11 \ B[1,k] - # fill C-column - for i in axes(B,1)[2:end] - C[i,k] = oA \ B[i,k] - _ustrip(A[i,1]) * C1 - end - for j in axes(B,1)[2:end] - ajj = A[j,j] - iszero(ajj) && throw(SingularException(j)) - Cj = C[j,k] = _ustrip(ajj) \ C[j,k] - for i in j+1:lastindex(A,1) - C[i,k] -= _ustrip(A[i,j]) * Cj - end - end - end - else # tfun in (adjoint, transpose) - for k in axes(B,2) - for j in reverse(axes(B,1)) - ajj = A[j,j] - iszero(ajj) && throw(SingularException(j)) - Bj = B[j,k] - for i in j+1:lastindex(A,1) - Bj -= tfun(A[i,j]) * C[i,k] - end - C[j,k] = tfun(ajj) \ Bj - end - end - end - else # isunitc == 'U' - if tfun === identity - for k in axes(B,2) - C1 = C[1,k] = oA \ B[1,k] - # fill C-column - for i in axes(B,1)[2:end] - C[i,k] = oA \ B[i,k] - _ustrip(A[i,1]) * C1 - end - for j in axes(B,1)[2:end] - Cj = C[j,k] - for i in j+1:lastindex(A,1) - C[i,k] -= _ustrip(A[i,j]) * Cj - end - end - end - else # tfun in (adjoint, transpose) - for k in axes(B,2) - for j in reverse(axes(B,1)) - Bj = B[j,k] - for i in j+1:lastindex(A,1) - Bj -= tfun(A[i,j]) * C[i,k] - end - C[j,k] = oA \ Bj - end - end - end - end - end - return C -end -# conjugate cases -function generic_trimatdiv!(C::AbstractVecOrMat, uploc, isunitc, ::Function, xA::AdjOrTrans, B::AbstractVecOrMat) - A = parent(xA) - require_one_based_indexing(C, A, B) - mA, nA = size(A) - m = size(B, 1) - if nA != m - throw(DimensionMismatch(lazy"second dimension of left hand side A, $nA, and first dimension of right hand side B, $m, must be equal")) - end - if size(C) != size(B) - throw(DimensionMismatch(lazy"size of output, $(size(C)), does not match size of right hand side, $(size(B))")) - end - iszero(mA) && return C - oA = oneunit(eltype(A)) - @inbounds if uploc == 'U' - if isunitc == 'N' - for k in axes(B,2) - amm = conj(A[m,m]) - iszero(amm) && throw(SingularException(m)) - Cm = C[m,k] = amm \ B[m,k] - # fill C-column - for i in reverse(axes(B,1))[2:end] - C[i,k] = oA \ B[i,k] - _ustrip(conj(A[i,m])) * Cm - end - for j in reverse(axes(B,1))[2:end] - ajj = conj(A[j,j]) - iszero(ajj) && throw(SingularException(j)) - Cj = C[j,k] = _ustrip(ajj) \ C[j,k] - for i in j-1:-1:firstindex(A,1) - C[i,k] -= _ustrip(conj(A[i,j])) * Cj - end - end - end - else # isunitc == 'U' - for k in axes(B,2) - Cm = C[m,k] = oA \ B[m,k] - # fill C-column - for i in reverse(axes(B,1))[2:end] - C[i,k] = oA \ B[i,k] - _ustrip(conj(A[i,m])) * Cm - end - for j in reverse(axes(B,1))[2:end] - Cj = C[j,k] - for i in firstindex(A,1):j-1 - C[i,k] -= _ustrip(conj(A[i,j])) * Cj - end - end - end - end - else # uploc == 'L' - if isunitc == 'N' - for k in axes(B,2) - a11 = conj(A[1,1]) - iszero(a11) && throw(SingularException(1)) - C1 = C[1,k] = a11 \ B[1,k] - # fill C-column - for i in axes(B,1)[2:end] - C[i,k] = oA \ B[i,k] - _ustrip(conj(A[i,1])) * C1 - end - for j in axes(A,2)[2:end] - ajj = conj(A[j,j]) - iszero(ajj) && throw(SingularException(j)) - Cj = C[j,k] = _ustrip(ajj) \ C[j,k] - for i in j+1:lastindex(A,1) - C[i,k] -= _ustrip(conj(A[i,j])) * Cj - end - end - end - else # isunitc == 'U' - for k in axes(B,2) - C1 = C[1,k] = oA \ B[1,k] - # fill C-column - for i in axes(B,1)[2:end] - C[i,k] = oA \ B[i,k] - _ustrip(conj(A[i,1])) * C1 - end - for j in axes(A,2) - Cj = C[j,k] - for i in j+1:lastindex(A,1) - C[i,k] -= _ustrip(conj(A[i,j])) * Cj - end - end - end - end - end - return C -end - -function generic_mattridiv!(C::AbstractMatrix, uploc, isunitc, tfun::Function, A::AbstractMatrix, B::AbstractMatrix) - require_one_based_indexing(C, A, B) - n = size(A,2) - if size(B, 1) != n - throw(DimensionMismatch(lazy"right hand side B needs first dimension of size $n, has size $(size(B,1))")) - end - if size(C) != size(A) - throw(DimensionMismatch(lazy"size of output, $(size(C)), does not match size of left hand side, $(size(A))")) - end - oB = oneunit(eltype(B)) - unit = isunitc == 'U' - @inbounds if uploc == 'U' - if tfun === identity - for i in axes(A,1) - for j in axes(A,2) - Aij = A[i,j] - for k in firstindex(B,1):j - 1 - Aij -= C[i,k]*B[k,j] - end - unit || (iszero(B[j,j]) && throw(SingularException(j))) - C[i,j] = Aij / (unit ? oB : B[j,j]) - end - end - else # tfun in (adjoint, transpose) - for i in axes(A,1) - for j in reverse(axes(A,2)) - Aij = A[i,j] - for k in j + 1:lastindex(B,2) - Aij -= C[i,k]*tfun(B[j,k]) - end - unit || (iszero(B[j,j]) && throw(SingularException(j))) - C[i,j] = Aij / (unit ? oB : tfun(B[j,j])) - end - end - end - else # uploc == 'L' - if tfun === identity - for i in axes(A,1) - for j in reverse(axes(A,2)) - Aij = A[i,j] - for k in j + 1:lastindex(B,1) - Aij -= C[i,k]*B[k,j] - end - unit || (iszero(B[j,j]) && throw(SingularException(j))) - C[i,j] = Aij / (unit ? oB : B[j,j]) - end - end - else # tfun in (adjoint, transpose) - for i in axes(A,1) - for j in axes(A,2) - Aij = A[i,j] - for k in firstindex(B,2):j - 1 - Aij -= C[i,k]*tfun(B[j,k]) - end - unit || (iszero(B[j,j]) && throw(SingularException(j))) - C[i,j] = Aij / (unit ? oB : tfun(B[j,j])) - end - end - end - end - return C -end -function generic_mattridiv!(C::AbstractMatrix, uploc, isunitc, ::Function, A::AbstractMatrix, xB::AdjOrTrans) - B = parent(xB) - require_one_based_indexing(C, A, B) - n = size(A,2) - if size(B, 1) != n - throw(DimensionMismatch(lazy"right hand side B needs first dimension of size $n, has size $(size(B,1))")) - end - if size(C) != size(A) - throw(DimensionMismatch(lazy"size of output, $(size(C)), does not match size of left hand side, $(size(A))")) - end - oB = oneunit(eltype(B)) - unit = isunitc == 'U' - if uploc == 'U' - @inbounds for i in axes(A,1) - for j in axes(A,2) - Aij = A[i,j] - for k in firstindex(B,1):j - 1 - Aij -= C[i,k]*conj(B[k,j]) - end - unit || (iszero(B[j,j]) && throw(SingularException(j))) - C[i,j] = Aij / (unit ? oB : conj(B[j,j])) - end - end - else # uploc == 'L' - @inbounds for i in axes(A,1) - for j in reverse(axes(A,2)) - Aij = A[i,j] - for k in j + 1:lastindex(B,1) - Aij -= C[i,k]*conj(B[k,j]) - end - unit || (iszero(B[j,j]) && throw(SingularException(j))) - C[i,j] = Aij / (unit ? oB : conj(B[j,j])) - end - end - end - return C -end - -# these are needed because we don't keep track of left- and right-multiplication in tritrimul! -rmul!(A::UpperTriangular, B::UpperTriangular) = UpperTriangular(rmul!(triu!(A.data), B)) -rmul!(A::UpperTriangular, B::UnitUpperTriangular) = UpperTriangular(rmul!(triu!(A.data), B)) -rmul!(A::LowerTriangular, B::LowerTriangular) = LowerTriangular(rmul!(tril!(A.data), B)) -rmul!(A::LowerTriangular, B::UnitLowerTriangular) = LowerTriangular(rmul!(tril!(A.data), B)) - -# Promotion -## Promotion methods in matmul don't apply to triangular multiplication since -## it is inplace. Hence we have to make very similar definitions, but without -## allocation of a result array. For multiplication and unit diagonal division -## the element type doesn't have to be stable under division whereas that is -## necessary in the general triangular solve problem. - -_inner_type_promotion(op, ::Type{TA}, ::Type{TB}) where {TA<:Integer,TB<:Integer} = - promote_op(matprod, TA, TB) -_inner_type_promotion(op, ::Type{TA}, ::Type{TB}) where {TA,TB} = - promote_op(op, TA, TB) -## The general promotion methods -for mat in (:AbstractVector, :AbstractMatrix) - ### Left division with triangle to the left hence rhs cannot be transposed. No quotients. - @eval function \(A::Union{UnitUpperTriangular,UnitLowerTriangular}, B::$mat) - require_one_based_indexing(B) - TAB = _inner_type_promotion(\, eltype(A), eltype(B)) - ldiv!(similar(B, TAB, size(B)), A, B) - end - ### Left division with triangle to the left hence rhs cannot be transposed. Quotients. - @eval function \(A::Union{UpperTriangular,LowerTriangular}, B::$mat) - require_one_based_indexing(B) - TAB = promote_op(\, eltype(A), eltype(B)) - ldiv!(similar(B, TAB, size(B)), A, B) - end - ### Right division with triangle to the right hence lhs cannot be transposed. No quotients. - @eval function /(A::$mat, B::Union{UnitUpperTriangular, UnitLowerTriangular}) - require_one_based_indexing(A) - TAB = _inner_type_promotion(/, eltype(A), eltype(B)) - _rdiv!(similar(A, TAB, size(A)), A, B) - end - ### Right division with triangle to the right hence lhs cannot be transposed. Quotients. - @eval function /(A::$mat, B::Union{UpperTriangular,LowerTriangular}) - require_one_based_indexing(A) - TAB = promote_op(/, eltype(A), eltype(B)) - _rdiv!(similar(A, TAB, size(A)), A, B) - end -end - -## Some Triangular-Triangular cases. We might want to write tailored methods -## for these cases, but I'm not sure it is worth it. -for f in (:*, :\) - @eval begin - ($f)(A::LowerTriangular, B::LowerTriangular) = - LowerTriangular(@invoke $f(A::LowerTriangular, B::AbstractMatrix)) - ($f)(A::LowerTriangular, B::UnitLowerTriangular) = - LowerTriangular(@invoke $f(A::LowerTriangular, B::AbstractMatrix)) - ($f)(A::UnitLowerTriangular, B::LowerTriangular) = - LowerTriangular(@invoke $f(A::UnitLowerTriangular, B::AbstractMatrix)) - ($f)(A::UnitLowerTriangular, B::UnitLowerTriangular) = - UnitLowerTriangular(@invoke $f(A::UnitLowerTriangular, B::AbstractMatrix)) - ($f)(A::UpperTriangular, B::UpperTriangular) = - UpperTriangular(@invoke $f(A::UpperTriangular, B::AbstractMatrix)) - ($f)(A::UpperTriangular, B::UnitUpperTriangular) = - UpperTriangular(@invoke $f(A::UpperTriangular, B::AbstractMatrix)) - ($f)(A::UnitUpperTriangular, B::UpperTriangular) = - UpperTriangular(@invoke $f(A::UnitUpperTriangular, B::AbstractMatrix)) - ($f)(A::UnitUpperTriangular, B::UnitUpperTriangular) = - UnitUpperTriangular(@invoke $f(A::UnitUpperTriangular, B::AbstractMatrix)) - end -end -(/)(A::LowerTriangular, B::LowerTriangular) = - LowerTriangular(@invoke /(A::AbstractMatrix, B::LowerTriangular)) -(/)(A::LowerTriangular, B::UnitLowerTriangular) = - LowerTriangular(@invoke /(A::AbstractMatrix, B::UnitLowerTriangular)) -(/)(A::UnitLowerTriangular, B::LowerTriangular) = - LowerTriangular(@invoke /(A::AbstractMatrix, B::LowerTriangular)) -(/)(A::UnitLowerTriangular, B::UnitLowerTriangular) = - UnitLowerTriangular(@invoke /(A::AbstractMatrix, B::UnitLowerTriangular)) -(/)(A::UpperTriangular, B::UpperTriangular) = - UpperTriangular(@invoke /(A::AbstractMatrix, B::UpperTriangular)) -(/)(A::UpperTriangular, B::UnitUpperTriangular) = - UpperTriangular(@invoke /(A::AbstractMatrix, B::UnitUpperTriangular)) -(/)(A::UnitUpperTriangular, B::UpperTriangular) = - UpperTriangular(@invoke /(A::AbstractMatrix, B::UpperTriangular)) -(/)(A::UnitUpperTriangular, B::UnitUpperTriangular) = - UnitUpperTriangular(@invoke /(A::AbstractMatrix, B::UnitUpperTriangular)) - -# Complex matrix power for upper triangular factor, see: -# Higham and Lin, "A Schur-Padé algorithm for fractional powers of a Matrix", -# SIAM J. Matrix Anal. & Appl., 32 (3), (2011) 1056–1078. -# Higham and Lin, "An improved Schur-Padé algorithm for fractional powers of -# a matrix and their Fréchet derivatives", SIAM. J. Matrix Anal. & Appl., -# 34(3), (2013) 1341–1360. -function powm!(A0::UpperTriangular, p::Real) - if abs(p) >= 1 - throw(ArgumentError(lazy"p must be a real number in (-1,1), got $p")) - end - - normA0 = opnorm(A0, 1) - rmul!(A0, 1/normA0) - - theta = [1.53e-5, 2.25e-3, 1.92e-2, 6.08e-2, 1.25e-1, 2.03e-1, 2.84e-1] - checksquare(A0) - - A, m, s = invsquaring(A0, theta) - A = I - A - - # Compute accurate diagonal of I - T - sqrt_diag!(A0, A, s) - for i in axes(A,1) - A[i, i] = -A[i, i] - end - # Compute the Padé approximant - c = 0.5 * (p - m) / (2 * m - 1) - triu!(A) - S = c * A - Stmp = similar(S) - for j in m-1:-1:1 - j4 = 4 * j - c = (-p - j) / (j4 + 2) - for i in axes(S,1) - @inbounds S[i, i] = S[i, i] + 1 - end - copyto!(Stmp, S) - mul!(S, A, c) - ldiv!(Stmp, S) - - c = (p - j) / (j4 - 2) - for i in axes(S,1) - @inbounds S[i, i] = S[i, i] + 1 - end - copyto!(Stmp, S) - mul!(S, A, c) - ldiv!(Stmp, S) - end - for i in axes(S,1) - S[i, i] = S[i, i] + 1 - end - copyto!(Stmp, S) - mul!(S, A, -p) - ldiv!(Stmp, S) - for i in axes(S,1) - @inbounds S[i, i] = S[i, i] + 1 - end - - blockpower!(A0, S, p/(2^s)) - for m = 1:s - mul!(Stmp.data, S, S) - copyto!(S, Stmp) - blockpower!(A0, S, p/(2^(s-m))) - end - rmul!(S, normA0^p) - return S -end -powm(A::LowerTriangular, p::Real) = copy(transpose(powm!(copy(transpose(A)), p::Real))) - -# Complex matrix logarithm for the upper triangular factor, see: -# Al-Mohy and Higham, "Improved inverse scaling and squaring algorithms for -# the matrix logarithm", SIAM J. Sci. Comput., 34(4), (2012), pp. C153–C169. -# Al-Mohy, Higham and Relton, "Computing the Frechet derivative of the matrix -# logarithm and estimating the condition number", SIAM J. Sci. Comput., -# 35(4), (2013), C394–C410. -# -# Based on the code available at http://eprints.ma.man.ac.uk/1851/02/logm.zip, -# Copyright (c) 2011, Awad H. Al-Mohy and Nicholas J. Higham -# Julia version relicensed with permission from original authors -log(A::UpperTriangular{T}) where {T<:BlasFloat} = log_quasitriu(A) -log(A::UnitUpperTriangular{T}) where {T<:BlasFloat} = log_quasitriu(A) -log(A::LowerTriangular) = copy(transpose(log(copy(transpose(A))))) -log(A::UnitLowerTriangular) = copy(transpose(log(copy(transpose(A))))) - -function log_quasitriu(A0::AbstractMatrix{T}) where T<:BlasFloat - # allocate real A if log(A) will be real and complex A otherwise - checksquare(A0) - if isreal(A0) && (!istriu(A0) || !any(x -> real(x) < zero(real(T)), diag(A0))) - A = T <: Complex ? real(A0) : copy(A0) - else - A = T <: Complex ? copy(A0) : complex(A0) - end - if A0 isa UnitUpperTriangular - A = UpperTriangular(parent(A)) - @inbounds for i in axes(A,1) - A[i,i] = 1 - end - end - Y0 = _log_quasitriu!(A0, A) - # return complex result for complex input - Y = T <: Complex ? complex(Y0) : Y0 - - if A0 isa UpperTriangular || A0 isa UnitUpperTriangular - return UpperTriangular(Y) - else - return Y - end -end -# type-stable implementation of log_quasitriu -# A is a copy of A0 that is overwritten while computing the result. It has the same eltype -# as the result. -function _log_quasitriu!(A0, A) - # Find Padé degree m and s while replacing A with A^(1/2^s) - m, s = _find_params_log_quasitriu!(A) - - # Compute accurate superdiagonal of A - _pow_superdiag_quasitriu!(A, A0, 0.5^s) - - # Compute accurate block diagonal of A - _sqrt_pow_diag_quasitriu!(A, A0, s) - - # Get the Gauss-Legendre quadrature points and weights - R = zeros(Float64, m, m) - for i in 1:m - 1 - R[i,i+1] = i / sqrt((2 * i)^2 - 1) - R[i+1,i] = R[i,i+1] - end - x,V = eigen(R) - w = Vector{Float64}(undef, m) - for i in 1:m - x[i] = (x[i] + 1) / 2 - w[i] = V[1,i]^2 - end - - # Compute the Padé approximation - t = eltype(A) - n = size(A, 1) - Y = zeros(t, n, n) - B = similar(A) - for k in 1:m - B .= t(x[k]) .* A - @inbounds for i in axes(B,1) - B[i,i] += 1 - end - Y .+= t(w[k]) .* rdiv_quasitriu!(A, B) - end - - # Scale back - lmul!(2.0^s, Y) - - # Compute accurate diagonal and superdiagonal of log(A) - _log_diag_quasitriu!(Y, A0) - - return Y -end - -# Auxiliary functions for matrix logarithm and matrix power - -# Find Padé degree m and s while replacing A with A^(1/2^s) -# Al-Mohy and Higham, "Improved inverse scaling and squaring algorithms for -# the matrix logarithm", SIAM J. Sci. Comput., 34(4), (2012), pp. C153–C169. -# from Algorithm 4.1 -function _find_params_log_quasitriu!(A) - maxsqrt = 100 - theta = [1.586970738772063e-005, - 2.313807884242979e-003, - 1.938179313533253e-002, - 6.209171588994762e-002, - 1.276404810806775e-001, - 2.060962623452836e-001, - 2.879093714241194e-001] - tmax = size(theta, 1) - p = 0 - m = 0 - - # Find s0, the smallest s such that the ρ(triu(A)^(1/2^s) - I) ≤ theta[tmax], where ρ(X) - # is the spectral radius of X - d = complex.(diagview(A)) - dm1 = d .- 1 - s = 0 - while norm(dm1, Inf) > theta[tmax] && s < maxsqrt - d .= sqrt.(d) - dm1 .= d .- 1 - s = s + 1 - end - s0 = s - - # Compute repeated roots - for k in 1:min(s, maxsqrt) - _sqrt_quasitriu!(A isa UpperTriangular ? parent(A) : A, A) - end - - # these three never needed at the same time, so reuse the same temporary - AmI = AmI4 = AmI5 = A - I - AmI2 = AmI * AmI - AmI3 = AmI2 * AmI - d2 = sqrt(opnorm(AmI2, 1)) - d3 = cbrt(opnorm(AmI3, 1)) - alpha2 = max(d2, d3) - foundm = false - if alpha2 <= theta[2] - m = alpha2 <= theta[1] ? 1 : 2 - foundm = true - end - - while !foundm - more_sqrt = false - mul!(AmI4, AmI2, AmI2) - d4 = opnorm(AmI4, 1)^(1/4) - alpha3 = max(d3, d4) - if alpha3 <= theta[tmax] - local j - for outer j = 3:tmax - if alpha3 <= theta[j] - break - end - end - if j <= 6 - m = j - break - elseif alpha3 / 2 <= theta[5] && p < 2 - more_sqrt = true - p = p + 1 - end - end - - if !more_sqrt - mul!(AmI5, AmI3, AmI2) - d5 = opnorm(AmI5, 1)^(1/5) - alpha4 = max(d4, d5) - eta = min(alpha3, alpha4) - if eta <= theta[tmax] - j = 0 - for outer j = 6:tmax - if eta <= theta[j] - m = j - break - end - end - break - end - end - - if s == maxsqrt - m = tmax - break - end - _sqrt_quasitriu!(A isa UpperTriangular ? parent(A) : A, A) - copyto!(AmI, A) - for i in axes(AmI,1) - @inbounds AmI[i,i] -= 1 - end - mul!(AmI2, AmI, AmI) - mul!(AmI3, AmI2, AmI) - d3 = cbrt(opnorm(AmI3, 1)) - s = s + 1 - end - return m, s -end - -# Compute accurate diagonal of A = A0^s - I -function sqrt_diag!(A0::UpperTriangular, A::UpperTriangular, s) - checksquare(A0) - @inbounds for i in axes(A0,1) - a = complex(A0[i,i]) - A[i,i] = _sqrt_pow(a, s) - end -end -# Compute accurate block diagonal of A = A0^s - I for upper quasi-triangular A0 produced -# by the Schur decomposition. Diagonal is made of 1x1 and 2x2 blocks. -# 2x2 blocks are real with non-negative conjugate pair eigenvalues -function _sqrt_pow_diag_quasitriu!(A, A0, s) - n = checksquare(A0) - t = typeof(sqrt(zero(eltype(A)))) - i = 1 - @inbounds while i < n - if iszero(A0[i+1,i]) # 1x1 block - A[i,i] = _sqrt_pow(t(A0[i,i]), s) - i += 1 - else # real 2x2 block - @views _sqrt_pow_diag_block_2x2!(A[i:i+1,i:i+1], A0[i:i+1,i:i+1], s) - i += 2 - end - end - if i == n # last block is 1x1 - @inbounds A[n,n] = _sqrt_pow(t(A0[n,n]), s) - end - return A -end -# compute a^(1/2^s)-1 -# Al-Mohy, "A more accurate Briggs method for the logarithm", -# Numer. Algorithms, 59, (2012), 393–402. -# Algorithm 2 -function _sqrt_pow(a::Number, s) - T = typeof(sqrt(zero(a))) - s == 0 && return T(a) - 1 - s0 = s - if imag(a) >= 0 && real(a) <= 0 && !iszero(a) # angle(a) ≥ π / 2 - a = sqrt(a) - s0 = s - 1 - end - z0 = a - 1 - a = sqrt(a) - r = 1 + a - for j in 1:s0-1 - a = sqrt(a) - r = r * (1 + a) - end - return z0 / r -end -# compute A0 = A^(1/2^s)-I for 2x2 real matrices A and A0 -# A has non-negative conjugate pair eigenvalues -# "Improved Inverse Scaling and Squaring Algorithms for the Matrix Logarithm" -# SIAM J. Sci. Comput., 34(4), (2012) C153–C169. doi: 10.1137/110852553 -# Algorithm 5.1 -Base.@propagate_inbounds function _sqrt_pow_diag_block_2x2!(A, A0, s) - if iszero(s) - A[1,1] -= 1 - A[2,2] -= 1 - return A - end - _sqrt_real_2x2!(A, A0) - if isone(s) - A[1,1] -= 1 - A[2,2] -= 1 - else - # Z = A - I - z11, z21, z12, z22 = A[1,1] - 1, A[2,1], A[1,2], A[2,2] - 1 - # A = sqrt(A) - _sqrt_real_2x2!(A, A) - # P = A + I - p11, p21, p12, p22 = A[1,1] + 1, A[2,1], A[1,2], A[2,2] + 1 - for i in 1:(s - 2) - # A = sqrt(A) - _sqrt_real_2x2!(A, A) - a11, a21, a12, a22 = A[1,1], A[2,1], A[1,2], A[2,2] - # P += P * A - r11 = p11*(1 + a11) + p12*a21 - r22 = p21*a12 + p22*(1 + a22) - p21 = p21*(1 + a11) + p22*a21 - p12 = p11*a12 + p12*(1 + a22) - p11 = r11 - p22 = r22 - end - # A = Z / P - c = inv(p11*p22 - p21*p12) - A[1,1] = (p22*z11 - p21*z12) * c - A[2,1] = (p22*z21 - p21*z22) * c - A[1,2] = (p11*z12 - p12*z11) * c - A[2,2] = (p11*z22 - p12*z21) * c - end - return A -end -# Compute accurate superdiagonal of A = A0^s - I for upper quasi-triangular A0 produced -# by a Schur decomposition. -# Higham and Lin, "A Schur–Padé Algorithm for Fractional Powers of a Matrix" -# SIAM J. Matrix Anal. Appl., 32(3), (2011), 1056–1078. -# Equation 5.6 -# see also blockpower for when A0 is upper triangular -function _pow_superdiag_quasitriu!(A, A0, p) - n = checksquare(A0) - t = eltype(A) - k = 1 - @inbounds while k < n - if !iszero(A[k+1,k]) - k += 2 - continue - end - if !(k == n - 1 || iszero(A[k+2,k+1])) - k += 3 - continue - end - Ak = t(A0[k,k]) - Akp1 = t(A0[k+1,k+1]) - - Akp = Ak^p - Akp1p = Akp1^p - - if Ak == Akp1 - A[k,k+1] = p * A0[k,k+1] * Ak^(p-1) - elseif 2 * abs(Ak) < abs(Akp1) || 2 * abs(Akp1) < abs(Ak) || iszero(Akp1 + Ak) - A[k,k+1] = A0[k,k+1] * (Akp1p - Akp) / (Akp1 - Ak) - else - logAk = log(Ak) - logAkp1 = log(Akp1) - z = (Akp1 - Ak)/(Akp1 + Ak) - if abs(z) > 1 - A[k,k+1] = A0[k,k+1] * (Akp1p - Akp) / (Akp1 - Ak) - else - w = atanh(z) + im * pi * (unw(logAkp1-logAk) - unw(log1p(z)-log1p(-z))) - dd = 2 * exp(p*(logAk+logAkp1)/2) * sinh(p*w) / (Akp1 - Ak); - A[k,k+1] = A0[k,k+1] * dd - end - end - k += 1 - end -end - -# Compute accurate block diagonal and superdiagonal of A = log(A0) for upper -# quasi-triangular A0 produced by the Schur decomposition. -function _log_diag_quasitriu!(A, A0) - n = checksquare(A0) - t = eltype(A) - k = 1 - @inbounds while k < n - if iszero(A0[k+1,k]) # 1x1 block - Ak = t(A0[k,k]) - logAk = log(Ak) - A[k,k] = logAk - if k < n - 2 && iszero(A0[k+2,k+1]) - Akp1 = t(A0[k+1,k+1]) - logAkp1 = log(Akp1) - A[k+1,k+1] = logAkp1 - if Ak == Akp1 - A[k,k+1] = A0[k,k+1] / Ak - elseif 2 * abs(Ak) < abs(Akp1) || 2 * abs(Akp1) < abs(Ak) || iszero(Akp1 + Ak) - A[k,k+1] = A0[k,k+1] * (logAkp1 - logAk) / (Akp1 - Ak) - else - z = (Akp1 - Ak)/(Akp1 + Ak) - if abs(z) > 1 - A[k,k+1] = A0[k,k+1] * (logAkp1 - logAk) / (Akp1 - Ak) - else - w = atanh(z) + im * pi * (unw(logAkp1-logAk) - unw(log1p(z)-log1p(-z))) - A[k,k+1] = 2 * A0[k,k+1] * w / (Akp1 - Ak) - end - end - k += 2 - else - k += 1 - end - else # real 2x2 block - @views _log_diag_block_2x2!(A[k:k+1,k:k+1], A0[k:k+1,k:k+1]) - k += 2 - end - end - if k == n # last 1x1 block - @inbounds A[n,n] = log(t(A0[n,n])) - end - return A -end -# compute A0 = log(A) for 2x2 real matrices A and A0, where A0 is a diagonal 2x2 block -# produced by real Schur decomposition. -# Al-Mohy, Higham and Relton, "Computing the Frechet derivative of the matrix -# logarithm and estimating the condition number", SIAM J. Sci. Comput., -# 35(4), (2013), C394–C410. -# Eq. 6.1 -Base.@propagate_inbounds function _log_diag_block_2x2!(A, A0) - a, b, c = A0[1,1], A0[1,2], A0[2,1] - # avoid underflow/overflow for large/small b and c - s = sqrt(abs(b)) * sqrt(abs(c)) - θ = atan(s, a) - t = θ / s - au = abs(a) - if au > s - a1 = log1p((s / au)^2) / 2 + log(au) - else - a1 = log1p((au / s)^2) / 2 + log(s) - end - A[1,1] = a1 - A[2,1] = c*t - A[1,2] = b*t - A[2,2] = a1 - return A -end - -# Used only by powm at the moment -# Repeatedly compute the square roots of A so that in the end its -# eigenvalues are close enough to the positive real line -function invsquaring(A0::UpperTriangular, theta) - require_one_based_indexing(theta) - # assumes theta is in ascending order - maxsqrt = 100 - tmax = size(theta, 1) - checksquare(A0) - A = complex(copy(A0)) - p = 0 - m = 0 - - # Compute repeated roots - d = complex(diag(A)) - dm1 = d .- 1 - s = 0 - while norm(dm1, Inf) > theta[tmax] && s < maxsqrt - d .= sqrt.(d) - dm1 .= d .- 1 - s = s + 1 - end - s0 = s - for k in 1:min(s, maxsqrt) - A = sqrt(A) - end - - AmI = A - I - d2 = sqrt(opnorm(AmI^2, 1)) - d3 = cbrt(opnorm(AmI^3, 1)) - alpha2 = max(d2, d3) - foundm = false - if alpha2 <= theta[2] - m = alpha2 <= theta[1] ? 1 : 2 - foundm = true - end - - while !foundm - more = false - if s > s0 - d3 = cbrt(opnorm(AmI^3, 1)) - end - d4 = opnorm(AmI^4, 1)^(1/4) - alpha3 = max(d3, d4) - if alpha3 <= theta[tmax] - local j - for outer j = 3:tmax - if alpha3 <= theta[j] - break - elseif alpha3 / 2 <= theta[5] && p < 2 - more = true - p = p + 1 - end - end - if j <= 6 - m = j - foundm = true - break - elseif alpha3 / 2 <= theta[5] && p < 2 - more = true - p = p + 1 - end - end - - if !more - d5 = opnorm(AmI^5, 1)^(1/5) - alpha4 = max(d4, d5) - eta = min(alpha3, alpha4) - if eta <= theta[tmax] - j = 0 - for outer j = 6:tmax - if eta <= theta[j] - m = j - break - end - break - end - end - if s == maxsqrt - m = tmax - break - end - A = sqrt(A) - AmI = A - I - s = s + 1 - end - end - - # Compute accurate superdiagonal of T - p = 1 / 2^s - A = complex(A) - blockpower!(A, A0, p) - return A,m,s -end - -# Compute accurate diagonal and superdiagonal of A = A0^p -function blockpower!(A::UpperTriangular, A0::UpperTriangular, p) - checksquare(A0) - @inbounds for k in axes(A0,1)[1:end-1] - Ak = complex(A0[k,k]) - Akp1 = complex(A0[k+1,k+1]) - - Akp = Ak^p - Akp1p = Akp1^p - - A[k,k] = Akp - A[k+1,k+1] = Akp1p - - if Ak == Akp1 - A[k,k+1] = p * A0[k,k+1] * Ak^(p-1) - elseif 2 * abs(Ak) < abs(Akp1) || 2 * abs(Akp1) < abs(Ak) || iszero(Akp1 + Ak) - A[k,k+1] = A0[k,k+1] * (Akp1p - Akp) / (Akp1 - Ak) - else - logAk = log(Ak) - logAkp1 = log(Akp1) - z = (Akp1 - Ak)/(Akp1 + Ak) - if abs(z) > 1 - A[k,k+1] = A0[k,k+1] * (Akp1p - Akp) / (Akp1 - Ak) - else - w = atanh(z) + im * pi * (unw(logAkp1-logAk) - unw(log1p(z)-log1p(-z))) - dd = 2 * exp(p*(logAk+logAkp1)/2) * sinh(p*w) / (Akp1 - Ak); - A[k,k+1] = A0[k,k+1] * dd - end - end - end -end - -# Unwinding number -unw(x::Real) = 0 -unw(x::Number) = ceil((imag(x) - pi) / (2 * pi)) - -# compute A / B for upper quasi-triangular B, possibly overwriting B -function rdiv_quasitriu!(A, B) - checksquare(A) - AG = copy(A) - # use Givens rotations to annihilate 2x2 blocks - @inbounds for k in axes(B,2)[1:end-1] - s = B[k+1,k] - iszero(s) && continue # 1x1 block - G = first(givens(B[k+1,k+1], s, k, k+1)) - rmul!(B, G) - rmul!(AG, G) - end - return rdiv!(AG, UpperTriangular(B)) -end - -# End of auxiliary functions for matrix logarithm and matrix power - -sqrt(A::UpperTriangular) = sqrt_quasitriu(A) -function sqrt(A::UnitUpperTriangular{T}) where T - B = A.data - t = typeof(sqrt(zero(T))) - R = Matrix{t}(I, size(A)) - tt = typeof(oneunit(t)*oneunit(t)) - half = inv(R[1,1]+R[1,1]) # for general, algebraic cases. PR#20214 - @inbounds for j in axes(B,2) - for i in j-1:-1:firstindex(B) - r::tt = B[i,j] - @simd for k in i+1:j-1 - r -= R[i,k]*R[k,j] - end - iszero(r) || (R[i,j] = half*r) - end - end - return UnitUpperTriangular(R) -end -sqrt(A::LowerTriangular) = copy(transpose(sqrt(copy(transpose(A))))) -sqrt(A::UnitLowerTriangular) = copy(transpose(sqrt(copy(transpose(A))))) - -# Auxiliary functions for matrix square root - -# square root of upper triangular or real upper quasitriangular matrix -function sqrt_quasitriu(A0; blockwidth = eltype(A0) <: Complex ? 512 : 256) - n = checksquare(A0) - T = eltype(A0) - Tr = typeof(sqrt(real(zero(T)))) - Tc = typeof(sqrt(complex(zero(T)))) - if isreal(A0) - is_sqrt_real = true - if istriu(A0) - for i in axes(A0,1) - Aii = real(A0[i,i]) - if Aii < zero(Aii) - is_sqrt_real = false - break - end - end - end - if is_sqrt_real - R = zeros(Tr, size(A0)) - A = real(A0) - else - R = zeros(Tc, size(A0)) - A = A0 - end - else - A = A0 - R = zeros(Tc, size(A0)) - end - _sqrt_quasitriu!(R, A; blockwidth=blockwidth, n=n) - Rc = eltype(A0) <: Real ? R : complex(R) - if A0 isa UpperTriangular - return UpperTriangular(Rc) - elseif A0 isa UnitUpperTriangular - return UnitUpperTriangular(Rc) - else - return Rc - end -end - -# in-place recursive sqrt of upper quasi-triangular matrix A from -# Deadman E., Higham N.J., Ralha R. (2013) Blocked Schur Algorithms for Computing the Matrix -# Square Root. Applied Parallel and Scientific Computing. PARA 2012. Lecture Notes in -# Computer Science, vol 7782. https://doi.org/10.1007/978-3-642-36803-5_12 -function _sqrt_quasitriu!(R, A; blockwidth=64, n=checksquare(A)) - if n ≤ blockwidth || !(eltype(R) <: BlasFloat) # base case, perform "point" algorithm - _sqrt_quasitriu_block!(R, A) - else # compute blockwise recursion - split = div(n, 2) - iszero(A[split+1, split]) || (split += 1) # don't split 2x2 diagonal block - r1 = 1:split - r2 = (split + 1):n - n1, n2 = split, n - split - A11, A12, A22 = @views A[r1,r1], A[r1,r2], A[r2,r2] - R11, R12, R22 = @views R[r1,r1], R[r1,r2], R[r2,r2] - # solve diagonal blocks recursively - _sqrt_quasitriu!(R11, A11; blockwidth=blockwidth, n=n1) - _sqrt_quasitriu!(R22, A22; blockwidth=blockwidth, n=n2) - # solve off-diagonal block - R12 .= .- A12 - _sylvester_quasitriu!(R11, R22, R12; blockwidth=blockwidth, nA=n1, nB=n2, raise=false) - end - return R -end - -function _sqrt_quasitriu_block!(R, A) - _sqrt_quasitriu_diag_block!(R, A) - _sqrt_quasitriu_offdiag_block!(R, A) - return R -end - -function _sqrt_quasitriu_diag_block!(R, A) - n = size(R, 1) - ta = eltype(R) <: Complex ? complex(eltype(A)) : eltype(A) - i = 1 - @inbounds while i < n - if iszero(A[i + 1, i]) - R[i, i] = sqrt(ta(A[i, i])) - i += 1 - else - # This branch is never reached when A is complex triangular - @assert eltype(A) <: Real - @views _sqrt_real_2x2!(R[i:(i + 1), i:(i + 1)], A[i:(i + 1), i:(i + 1)]) - i += 2 - end - end - if i == n - R[n, n] = sqrt(ta(A[n, n])) - end - return R -end - -function _sqrt_quasitriu_offdiag_block!(R, A) - n = size(R, 1) - j = 1 - @inbounds while j ≤ n - jsize_is_2 = j < n && !iszero(A[j + 1, j]) - i = j - 1 - while i > 0 - isize_is_2 = i > 1 && !iszero(A[i, i - 1]) - if isize_is_2 - if jsize_is_2 - _sqrt_quasitriu_offdiag_block_2x2!(R, A, i - 1, j) - else - _sqrt_quasitriu_offdiag_block_2x1!(R, A, i - 1, j) - end - i -= 2 - else - if jsize_is_2 - _sqrt_quasitriu_offdiag_block_1x2!(R, A, i, j) - else - _sqrt_quasitriu_offdiag_block_1x1!(R, A, i, j) - end - i -= 1 - end - end - j += 2 - !jsize_is_2 - end - return R -end - -# real square root of 2x2 diagonal block of quasi-triangular matrix from real Schur -# decomposition. Eqs 6.8-6.9 and Algorithm 6.5 of -# Higham, 2008, "Functions of Matrices: Theory and Computation", SIAM. -Base.@propagate_inbounds function _sqrt_real_2x2!(R, A) - # in the real Schur form, A[1, 1] == A[2, 2], and A[2, 1] * A[1, 2] < 0 - θ, a21, a12 = A[1, 1], A[2, 1], A[1, 2] - # avoid overflow/underflow of μ - # for real sqrt, |d| ≤ 2 max(|a12|,|a21|) - μ = sqrt(abs(a12)) * sqrt(abs(a21)) - α = _real_sqrt(θ, μ) - c = 2α - R[1, 1] = α - R[2, 1] = a21 / c - R[1, 2] = a12 / c - R[2, 2] = α - return R -end - -# real part of square root of θ+im*μ -@inline function _real_sqrt(θ, μ) - t = sqrt((abs(θ) + hypot(θ, μ)) / 2) - return θ ≥ 0 ? t : μ / 2t -end - -Base.@propagate_inbounds function _sqrt_quasitriu_offdiag_block_1x1!(R, A, i, j) - Rii = R[i, i] - Rjj = R[j, j] - iszero(Rii) && iszero(Rjj) && return R - t = eltype(R) - tt = typeof(zero(t)*zero(t)) - r = tt(-A[i, j]) - @simd for k in (i + 1):(j - 1) - r += R[i, k] * R[k, j] - end - iszero(r) && return R - R[i, j] = sylvester(Rii, Rjj, r) - return R -end - -Base.@propagate_inbounds function _sqrt_quasitriu_offdiag_block_1x2!(R, A, i, j) - jrange = j:(j + 1) - t = eltype(R) - tt = typeof(zero(t)*zero(t)) - r1 = tt(-A[i, j]) - r2 = tt(-A[i, j + 1]) - @simd for k in (i + 1):(j - 1) - rik = R[i, k] - r1 += rik * R[k, j] - r2 += rik * R[k, j + 1] - end - Rjj = @view R[jrange, jrange] - Rij = @view R[i, jrange] - Rij[1] = r1 - Rij[2] = r2 - _sylvester_1x2!(R[i, i], Rjj, Rij) - return R -end - -Base.@propagate_inbounds function _sqrt_quasitriu_offdiag_block_2x1!(R, A, i, j) - irange = i:(i + 1) - t = eltype(R) - tt = typeof(zero(t)*zero(t)) - r1 = tt(-A[i, j]) - r2 = tt(-A[i + 1, j]) - @simd for k in (i + 2):(j - 1) - rkj = R[k, j] - r1 += R[i, k] * rkj - r2 += R[i + 1, k] * rkj - end - Rii = @view R[irange, irange] - Rij = @view R[irange, j] - Rij[1] = r1 - Rij[2] = r2 - @views _sylvester_2x1!(Rii, R[j, j], Rij) - return R -end - -Base.@propagate_inbounds function _sqrt_quasitriu_offdiag_block_2x2!(R, A, i, j) - irange = i:(i + 1) - jrange = j:(j + 1) - t = eltype(R) - tt = typeof(zero(t)*zero(t)) - for i′ in irange, j′ in jrange - Cij = tt(-A[i′, j′]) - @simd for k in (i + 2):(j - 1) - Cij += R[i′, k] * R[k, j′] - end - R[i′, j′] = Cij - end - Rii = @view R[irange, irange] - Rjj = @view R[jrange, jrange] - Rij = @view R[irange, jrange] - if !iszero(Rij) && !all(isnan, Rij) - _sylvester_2x2!(Rii, Rjj, Rij) - end - return R -end - -# solve Sylvester's equation AX + XB = -C using blockwise recursion until the dimension of -# A and B are no greater than blockwidth, based on Algorithm 1 from -# Jonsson I, Kågström B. Recursive blocked algorithms for solving triangular systems— -# Part I: one-sided and coupled Sylvester-type matrix equations. (2002) ACM Trans Math Softw. -# 28(4), https://doi.org/10.1145/592843.592845. -# specify raise=false to avoid breaking the recursion if a LAPACKException is thrown when -# computing one of the blocks. -function _sylvester_quasitriu!(A, B, C; blockwidth=64, nA=checksquare(A), nB=checksquare(B), raise=true) - if 1 ≤ nA ≤ blockwidth && 1 ≤ nB ≤ blockwidth - _sylvester_quasitriu_base!(A, B, C; raise=raise) - elseif nA ≥ 2nB ≥ 2 - _sylvester_quasitriu_split1!(A, B, C; blockwidth=blockwidth, nA=nA, nB=nB, raise=raise) - elseif nB ≥ 2nA ≥ 2 - _sylvester_quasitriu_split2!(A, B, C; blockwidth=blockwidth, nA=nA, nB=nB, raise=raise) - else - _sylvester_quasitriu_splitall!(A, B, C; blockwidth=blockwidth, nA=nA, nB=nB, raise=raise) - end - return C -end -function _sylvester_quasitriu_base!(A, B, C; raise=true) - try - _, scale = LAPACK.trsyl!('N', 'N', A, B, C) - rmul!(C, -inv(scale)) - catch e - if !(e isa LAPACKException) || raise - throw(e) - end - end - return C -end -function _sylvester_quasitriu_split1!(A, B, C; nA=checksquare(A), kwargs...) - iA = div(nA, 2) - iszero(A[iA + 1, iA]) || (iA += 1) # don't split 2x2 diagonal block - rA1, rA2 = 1:iA, (iA + 1):nA - nA1, nA2 = iA, nA-iA - A11, A12, A22 = @views A[rA1,rA1], A[rA1,rA2], A[rA2,rA2] - C1, C2 = @views C[rA1,:], C[rA2,:] - _sylvester_quasitriu!(A22, B, C2; nA=nA2, kwargs...) - mul!(C1, A12, C2, true, true) - _sylvester_quasitriu!(A11, B, C1; nA=nA1, kwargs...) - return C -end -function _sylvester_quasitriu_split2!(A, B, C; nB=checksquare(B), kwargs...) - iB = div(nB, 2) - iszero(B[iB + 1, iB]) || (iB += 1) # don't split 2x2 diagonal block - rB1, rB2 = 1:iB, (iB + 1):nB - nB1, nB2 = iB, nB-iB - B11, B12, B22 = @views B[rB1,rB1], B[rB1,rB2], B[rB2,rB2] - C1, C2 = @views C[:,rB1], C[:,rB2] - _sylvester_quasitriu!(A, B11, C1; nB=nB1, kwargs...) - mul!(C2, C1, B12, true, true) - _sylvester_quasitriu!(A, B22, C2; nB=nB2, kwargs...) - return C -end -function _sylvester_quasitriu_splitall!(A, B, C; nA=checksquare(A), nB=checksquare(B), kwargs...) - iA = div(nA, 2) - iszero(A[iA + 1, iA]) || (iA += 1) # don't split 2x2 diagonal block - iB = div(nB, 2) - iszero(B[iB + 1, iB]) || (iB += 1) # don't split 2x2 diagonal block - rA1, rA2 = 1:iA, (iA + 1):nA - nA1, nA2 = iA, nA-iA - rB1, rB2 = 1:iB, (iB + 1):nB - nB1, nB2 = iB, nB-iB - A11, A12, A22 = @views A[rA1,rA1], A[rA1,rA2], A[rA2,rA2] - B11, B12, B22 = @views B[rB1,rB1], B[rB1,rB2], B[rB2,rB2] - C11, C21, C12, C22 = @views C[rA1,rB1], C[rA2,rB1], C[rA1,rB2], C[rA2,rB2] - _sylvester_quasitriu!(A22, B11, C21; nA=nA2, nB=nB1, kwargs...) - mul!(C11, A12, C21, true, true) - _sylvester_quasitriu!(A11, B11, C11; nA=nA1, nB=nB1, kwargs...) - mul!(C22, C21, B12, true, true) - _sylvester_quasitriu!(A22, B22, C22; nA=nA2, nB=nB2, kwargs...) - mul!(C12, A12, C22, true, true) - mul!(C12, C11, B12, true, true) - _sylvester_quasitriu!(A11, B22, C12; nA=nA1, nB=nB2, kwargs...) - return C -end - -# End of auxiliary functions for matrix square root - -# Generic eigensystems -eigvals(A::AbstractTriangular) = diag(A) -# fallback for unknown types -function eigvecs(A::AbstractTriangular{<:BlasFloat}) - if istriu(A) - eigvecs(UpperTriangular(Matrix(A))) - else # istril(A) - eigvecs(LowerTriangular(Matrix(A))) - end -end -function eigvecs(A::AbstractTriangular{T}) where T - TT = promote_type(T, Float32) - if TT <: BlasFloat - return eigvecs(convert(AbstractMatrix{TT}, A)) - else - throw(ArgumentError(lazy"eigvecs type $(typeof(A)) not supported. Please submit a pull request.")) - end -end -det(A::UnitUpperTriangular{T}) where {T} = one(T) -det(A::UnitLowerTriangular{T}) where {T} = one(T) -logdet(A::UnitUpperTriangular{T}) where {T} = zero(T) -logdet(A::UnitLowerTriangular{T}) where {T} = zero(T) -logabsdet(A::UnitUpperTriangular{T}) where {T} = zero(T), one(T) -logabsdet(A::UnitLowerTriangular{T}) where {T} = zero(T), one(T) -det(A::UpperTriangular) = prod(diag(A.data)) -det(A::LowerTriangular) = prod(diag(A.data)) -function logabsdet(A::Union{UpperTriangular{T},LowerTriangular{T}}) where T - sgn = one(T) - abs_det = zero(real(T)) - @inbounds for i in axes(A.data,1) - diag_i = A.data[i,i] - sgn *= sign(diag_i) - abs_det += log(abs(diag_i)) - end - return abs_det, sgn -end - -eigen(A::AbstractTriangular) = Eigen(eigvals(A), eigvecs(A)) - -# Generic singular systems -for func in (:svd, :svd!, :svdvals) - @eval begin - ($func)(A::AbstractTriangular; kwargs...) = ($func)(copyto!(similar(parent(A)), A); kwargs...) - end -end - -factorize(A::AbstractTriangular) = A - -# disambiguation methods: /(Adjoint of AbsVec, <:AbstractTriangular) -/(u::AdjointAbsVec, A::Union{LowerTriangular,UpperTriangular}) = adjoint(adjoint(A) \ u.parent) -/(u::AdjointAbsVec, A::Union{UnitLowerTriangular,UnitUpperTriangular}) = adjoint(adjoint(A) \ u.parent) -# disambiguation methods: /(Transpose of AbsVec, <:AbstractTriangular) -/(u::TransposeAbsVec, A::Union{LowerTriangular,UpperTriangular}) = transpose(transpose(A) \ u.parent) -/(u::TransposeAbsVec, A::Union{UnitLowerTriangular,UnitUpperTriangular}) = transpose(transpose(A) \ u.parent) -# disambiguation methods: /(Transpose of AbsVec, Adj/Trans of <:AbstractTriangular) -for (tritype, comptritype) in ((:LowerTriangular, :UpperTriangular), - (:UnitLowerTriangular, :UnitUpperTriangular), - (:UpperTriangular, :LowerTriangular), - (:UnitUpperTriangular, :UnitLowerTriangular)) - @eval /(u::TransposeAbsVec, A::$tritype{<:Any,<:Adjoint}) = transpose($comptritype(conj(parent(parent(A)))) \ u.parent) - @eval /(u::TransposeAbsVec, A::$tritype{<:Any,<:Transpose}) = transpose(transpose(A) \ u.parent) -end - -# Cube root of a 2x2 real-valued matrix with complex conjugate eigenvalues and equal diagonal values. -# Reference [1]: Smith, M. I. (2003). A Schur Algorithm for Computing Matrix pth Roots. -# SIAM Journal on Matrix Analysis and Applications (Vol. 24, Issue 4, pp. 971–989). -# https://doi.org/10.1137/s0895479801392697 -function _cbrt_2x2!(A::AbstractMatrix{T}) where {T<:Real} - @assert checksquare(A) == 2 - @inbounds begin - (A[1,1] == A[2,2]) || throw(ArgumentError("_cbrt_2x2!: Matrix A must have equal diagonal values.")) - (A[1,2]*A[2,1] < 0) || throw(ArgumentError("_cbrt_2x2!: Matrix A must have complex conjugate eigenvalues.")) - μ = sqrt(-A[1,2]*A[2,1]) - r = cbrt(hypot(A[1,1], μ)) - θ = atan(μ, A[1,1]) - s, c = sincos(θ/3) - α, β′ = r*c, r*s/µ - A[1,1] = α - A[2,2] = α - A[1,2] = β′*A[1,2] - A[2,1] = β′*A[2,1] - end - return A -end - -# Cube root of a quasi upper triangular matrix (output of Schur decomposition) -# Reference [1]: Smith, M. I. (2003). A Schur Algorithm for Computing Matrix pth Roots. -# SIAM Journal on Matrix Analysis and Applications (Vol. 24, Issue 4, pp. 971–989). -# https://doi.org/10.1137/s0895479801392697 -@views function _cbrt_quasi_triu!(A::AbstractMatrix{T}) where {T<:Real} - m, n = size(A) - (m == n) || throw(ArgumentError("_cbrt_quasi_triu!: Matrix A must be square.")) - # Cube roots of 1x1 and 2x2 diagonal blocks - i = 1 - sizes = ones(Int,n) - S = zeros(T,2,n) - while i < n - if !iszero(A[i+1,i]) - _cbrt_2x2!(A[i:i+1,i:i+1]) - mul!(S[1:2,i:i+1], A[i:i+1,i:i+1], A[i:i+1,i:i+1]) - sizes[i] = 2 - sizes[i+1] = 0 - i += 2 - else - A[i,i] = cbrt(A[i,i]) - S[1,i] = A[i,i]*A[i,i] - i += 1 - end - end - if i == n - A[n,n] = cbrt(A[n,n]) - S[1,n] = A[n,n]*A[n,n] - end - # Algorithm 4.3 in Reference [1] - Δ = I(4) - M_L₀ = zeros(T,4,4) - M_L₁ = zeros(T,4,4) - M_Bᵢⱼ⁽⁰⁾ = zeros(T,2,2) - M_Bᵢⱼ⁽¹⁾ = zeros(T,2,2) - for k in axes(A,2)[1:end-1] - for i in axes(A,2)[1:end-k] - if sizes[i] == 0 || sizes[i+k] == 0 continue end - k₁, k₂ = i+1+(sizes[i+1]==0), i+k-1 - i₁, i₂, j₁, j₂, s₁, s₂ = i, i+sizes[i]-1, i+k, i+k+sizes[i+k]-1, sizes[i], sizes[i+k] - L₀ = M_L₀[1:s₁*s₂,1:s₁*s₂] - L₁ = M_L₁[1:s₁*s₂,1:s₁*s₂] - Bᵢⱼ⁽⁰⁾ = M_Bᵢⱼ⁽⁰⁾[1:s₁, 1:s₂] - Bᵢⱼ⁽¹⁾ = M_Bᵢⱼ⁽¹⁾[1:s₁, 1:s₂] - # Compute Bᵢⱼ⁽⁰⁾ and Bᵢⱼ⁽¹⁾ - mul!(Bᵢⱼ⁽⁰⁾, A[i₁:i₂,k₁:k₂], A[k₁:k₂,j₁:j₂]) - # Retrieve Rᵢ,ᵢ₊ₖ as A[i+k,i]' - mul!(Bᵢⱼ⁽¹⁾, A[i₁:i₂,k₁:k₂], A[j₁:j₂,k₁:k₂]') - # Solve Uᵢ,ᵢ₊ₖ using Reference [1, (4.10)] - kron!(L₀, Δ[1:s₂,1:s₂], S[1:s₁,i₁:i₂]) - L₀ .+= kron!(L₁, A[j₁:j₂,j₁:j₂]', A[i₁:i₂,i₁:i₂]) - L₀ .+= kron!(L₁, S[1:s₂,j₁:j₂]', Δ[1:s₁,1:s₁]) - mul!(A[i₁:i₂,j₁:j₂], A[i₁:i₂,i₁:i₂], Bᵢⱼ⁽⁰⁾, -1.0, 1.0) - A[i₁:i₂,j₁:j₂] .-= Bᵢⱼ⁽¹⁾ - ldiv!(lu!(L₀), A[i₁:i₂,j₁:j₂][:]) - # Compute and store Rᵢ,ᵢ₊ₖ' in A[i+k,i] - mul!(Bᵢⱼ⁽⁰⁾, A[i₁:i₂,i₁:i₂], A[i₁:i₂,j₁:j₂], 1.0, 1.0) - mul!(Bᵢⱼ⁽⁰⁾, A[i₁:i₂,j₁:j₂], A[j₁:j₂,j₁:j₂], 1.0, 1.0) - A[j₁:j₂,i₁:i₂] .= Bᵢⱼ⁽⁰⁾' - end - end - # Make quasi triangular - for j in axes(A,2) - for i=j+1+(sizes[j]==2):lastindex(A,1) - A[i,j] = 0 - end - end - return A -end - -# Cube roots of real-valued triangular matrices -cbrt(A::UpperTriangular{T}) where {T<:Real} = UpperTriangular(_cbrt_quasi_triu!(Matrix{T}(A))) -cbrt(A::LowerTriangular{T}) where {T<:Real} = LowerTriangular(_cbrt_quasi_triu!(Matrix{T}(A'))') diff --git a/stdlib/LinearAlgebra/src/tridiag.jl b/stdlib/LinearAlgebra/src/tridiag.jl deleted file mode 100644 index 0d73e6dd46fdb..0000000000000 --- a/stdlib/LinearAlgebra/src/tridiag.jl +++ /dev/null @@ -1,1099 +0,0 @@ -# This file is a part of Julia. License is MIT: https://julialang.org/license - -#### Specialized matrix types #### - -## (complex) symmetric tridiagonal matrices -struct SymTridiagonal{T, V<:AbstractVector{T}} <: AbstractMatrix{T} - dv::V # diagonal - ev::V # superdiagonal - function SymTridiagonal{T, V}(dv, ev) where {T, V<:AbstractVector{T}} - require_one_based_indexing(dv, ev) - if !(length(dv) - 1 <= length(ev) <= length(dv)) - throw(DimensionMismatch(lazy"subdiagonal has wrong length. Has length $(length(ev)), but should be either $(length(dv) - 1) or $(length(dv)).")) - end - new{T, V}(dv, ev) - end -end - -""" - SymTridiagonal(dv::V, ev::V) where V <: AbstractVector - -Construct a symmetric tridiagonal matrix from the diagonal (`dv`) and first -sub/super-diagonal (`ev`), respectively. The result is of type `SymTridiagonal` -and provides efficient specialized eigensolvers, but may be converted into a -regular matrix with [`convert(Array, _)`](@ref) (or `Array(_)` for short). - -For `SymTridiagonal` block matrices, the elements of `dv` are symmetrized. -The argument `ev` is interpreted as the superdiagonal. Blocks from the -subdiagonal are (materialized) transpose of the corresponding superdiagonal blocks. - -# Examples -```jldoctest -julia> dv = [1, 2, 3, 4] -4-element Vector{Int64}: - 1 - 2 - 3 - 4 - -julia> ev = [7, 8, 9] -3-element Vector{Int64}: - 7 - 8 - 9 - -julia> SymTridiagonal(dv, ev) -4×4 SymTridiagonal{Int64, Vector{Int64}}: - 1 7 ⋅ ⋅ - 7 2 8 ⋅ - ⋅ 8 3 9 - ⋅ ⋅ 9 4 - -julia> A = SymTridiagonal(fill([1 2; 3 4], 3), fill([1 2; 3 4], 2)); - -julia> A[1,1] -2×2 Symmetric{Int64, Matrix{Int64}}: - 1 2 - 2 4 - -julia> A[1,2] -2×2 Matrix{Int64}: - 1 2 - 3 4 - -julia> A[2,1] -2×2 Matrix{Int64}: - 1 3 - 2 4 -``` -""" -SymTridiagonal(dv::V, ev::V) where {T,V<:AbstractVector{T}} = SymTridiagonal{T}(dv, ev) -SymTridiagonal{T}(dv::V, ev::V) where {T,V<:AbstractVector{T}} = SymTridiagonal{T,V}(dv, ev) -function SymTridiagonal{T}(dv::AbstractVector, ev::AbstractVector) where {T} - d = convert(AbstractVector{T}, dv)::AbstractVector{T} - e = convert(AbstractVector{T}, ev)::AbstractVector{T} - typeof(d) == typeof(e) ? - SymTridiagonal{T}(d, e) : - throw(ArgumentError("diagonal vectors needed to be convertible to same type")) -end -SymTridiagonal(d::AbstractVector{T}, e::AbstractVector{S}) where {T,S} = - SymTridiagonal{promote_type(T, S)}(d, e) - -""" - SymTridiagonal(A::AbstractMatrix) - -Construct a symmetric tridiagonal matrix from the diagonal and first superdiagonal -of the symmetric matrix `A`. - -# Examples -```jldoctest -julia> A = [1 2 3; 2 4 5; 3 5 6] -3×3 Matrix{Int64}: - 1 2 3 - 2 4 5 - 3 5 6 - -julia> SymTridiagonal(A) -3×3 SymTridiagonal{Int64, Vector{Int64}}: - 1 2 ⋅ - 2 4 5 - ⋅ 5 6 - -julia> B = reshape([[1 2; 2 3], [1 2; 3 4], [1 3; 2 4], [1 2; 2 3]], 2, 2); - -julia> SymTridiagonal(B) -2×2 SymTridiagonal{Matrix{Int64}, Vector{Matrix{Int64}}}: - [1 2; 2 3] [1 3; 2 4] - [1 2; 3 4] [1 2; 2 3] -``` -""" -function SymTridiagonal(A::AbstractMatrix) - checksquare(A) - du = diag(A, 1) - d = diag(A) - dl = diag(A, -1) - if all(((x, y),) -> x == transpose(y), zip(du, dl)) && all(issymmetric, d) - SymTridiagonal(d, du) - else - throw(ArgumentError("matrix is not symmetric; cannot convert to SymTridiagonal")) - end -end - -SymTridiagonal{T,V}(S::SymTridiagonal{T,V}) where {T,V<:AbstractVector{T}} = S -SymTridiagonal{T,V}(S::SymTridiagonal) where {T,V<:AbstractVector{T}} = - SymTridiagonal(convert(V, S.dv)::V, convert(V, S.ev)::V) -SymTridiagonal{T}(S::SymTridiagonal{T}) where {T} = S -SymTridiagonal{T}(S::SymTridiagonal) where {T} = - SymTridiagonal(convert(AbstractVector{T}, S.dv)::AbstractVector{T}, - convert(AbstractVector{T}, S.ev)::AbstractVector{T}) -SymTridiagonal(S::SymTridiagonal) = S - -AbstractMatrix{T}(S::SymTridiagonal) where {T} = SymTridiagonal{T}(S) -AbstractMatrix{T}(S::SymTridiagonal{T}) where {T} = copy(S) - -function Matrix{T}(M::SymTridiagonal) where T - n = size(M, 1) - Mf = Matrix{T}(undef, n, n) - n == 0 && return Mf - if haszero(T) # optimized path for types with zero(T) defined - n > 2 && fill!(Mf, zero(T)) - @inbounds for i = 1:n-1 - Mf[i,i] = symmetric(M.dv[i], :U) - Mf[i+1,i] = transpose(M.ev[i]) - Mf[i,i+1] = M.ev[i] - end - Mf[n,n] = symmetric(M.dv[n], :U) - else - copyto!(Mf, M) - end - return Mf -end -Matrix(M::SymTridiagonal{T}) where {T} = Matrix{promote_type(T, typeof(zero(T)))}(M) -Array(M::SymTridiagonal) = Matrix(M) - -size(A::SymTridiagonal) = (n = length(A.dv); (n, n)) -axes(M::SymTridiagonal) = (ax = axes(M.dv, 1); (ax, ax)) - -similar(S::SymTridiagonal, ::Type{T}) where {T} = SymTridiagonal(similar(S.dv, T), similar(S.ev, T)) -similar(S::SymTridiagonal, ::Type{T}, dims::Union{Dims{1},Dims{2}}) where {T} = similar(S.dv, T, dims) - -# copyto! for matching axes -_copyto_banded!(dest::SymTridiagonal, src::SymTridiagonal) = - (copyto!(dest.dv, src.dv); copyto!(dest.ev, _evview(src)); dest) - -#Elementary operations -for func in (:conj, :copy, :real, :imag) - @eval ($func)(M::SymTridiagonal) = SymTridiagonal(($func)(M.dv), ($func)(M.ev)) -end - -transpose(S::SymTridiagonal) = S -adjoint(S::SymTridiagonal{<:Number}) = SymTridiagonal(vec(adjoint(S.dv)), vec(adjoint(S.ev))) -adjoint(S::SymTridiagonal{<:Number, <:Base.ReshapedArray{<:Number,1,<:Adjoint}}) = - SymTridiagonal(adjoint(parent(S.dv)), adjoint(parent(S.ev))) - -permutedims(S::SymTridiagonal) = S -function permutedims(S::SymTridiagonal, perm) - Base.checkdims_perm(axes(S), axes(S), perm) - NTuple{2}(perm) == (2, 1) ? permutedims(S) : S -end -Base.copy(S::Adjoint{<:Any,<:SymTridiagonal}) = SymTridiagonal(map(x -> copy.(adjoint.(x)), (S.parent.dv, S.parent.ev))...) - -ishermitian(S::SymTridiagonal) = isreal(S.dv) && isreal(_evview(S)) -issymmetric(S::SymTridiagonal) = true - -tr(S::SymTridiagonal) = sum(symmetric, S.dv) - -_diagiter(M::SymTridiagonal{<:Number}) = M.dv -_diagiter(M::SymTridiagonal) = (symmetric(x, :U) for x in M.dv) -_eviter_transposed(M::SymTridiagonal{<:Number}) = _evview(M) -_eviter_transposed(M::SymTridiagonal) = (transpose(x) for x in _evview(M)) - -function diag(M::SymTridiagonal, n::Integer=0) - # every branch call similar(..., ::Int) to make sure the - # same vector type is returned independent of n - v = similar(M.dv, max(0, length(M.dv)-abs(n))) - if n == 0 - return copyto!(v, _diagiter(M)) - elseif n == 1 - return copyto!(v, _evview(M)) - elseif n == -1 - return copyto!(v, _eviter_transposed(M)) - else - for i in eachindex(v) - v[i] = M[BandIndex(n,i)] - end - end - return v -end - -+(A::SymTridiagonal, B::SymTridiagonal) = SymTridiagonal(A.dv+B.dv, _evview(A)+_evview(B)) --(A::SymTridiagonal, B::SymTridiagonal) = SymTridiagonal(A.dv-B.dv, _evview(A)-_evview(B)) --(A::SymTridiagonal) = SymTridiagonal(-A.dv, -A.ev) -*(A::SymTridiagonal, B::Number) = SymTridiagonal(A.dv*B, A.ev*B) -*(B::Number, A::SymTridiagonal) = SymTridiagonal(B*A.dv, B*A.ev) -function rmul!(A::SymTridiagonal, x::Number) - if size(A,1) > 2 - # ensure that zeros are preserved on scaling - y = A[3,1] * x - iszero(y) || throw(ArgumentError(LazyString("cannot set index (3, 1) off ", - lazy"the tridiagonal band to a nonzero value ($y)"))) - end - A.dv .*= x - _evview(A) .*= x - return A -end -function lmul!(x::Number, B::SymTridiagonal) - if size(B,1) > 2 - # ensure that zeros are preserved on scaling - y = x * B[3,1] - iszero(y) || throw(ArgumentError(LazyString("cannot set index (3, 1) off ", - lazy"the tridiagonal band to a nonzero value ($y)"))) - end - @. B.dv = x * B.dv - ev = _evview(B) - @. ev = x * ev - return B -end -/(A::SymTridiagonal, B::Number) = SymTridiagonal(A.dv/B, A.ev/B) -\(B::Number, A::SymTridiagonal) = SymTridiagonal(B\A.dv, B\A.ev) -==(A::SymTridiagonal{<:Number}, B::SymTridiagonal{<:Number}) = - (A.dv == B.dv) && (_evview(A) == _evview(B)) -==(A::SymTridiagonal, B::SymTridiagonal) = - size(A) == size(B) && all(i -> A[i,i] == B[i,i], axes(A, 1)) && (_evview(A) == _evview(B)) - -function dot(x::AbstractVector, S::SymTridiagonal, y::AbstractVector) - require_one_based_indexing(x, y) - nx, ny = length(x), length(y) - (nx == size(S, 1) == ny) || throw(DimensionMismatch("dot")) - if nx ≤ 1 - nx == 0 && return dot(zero(eltype(x)), zero(eltype(S)), zero(eltype(y))) - return dot(x[1], S.dv[1], y[1]) - end - dv, ev = S.dv, S.ev - @inbounds begin - x₀ = x[1] - x₊ = x[2] - sub = transpose(ev[1]) - r = dot(adjoint(dv[1])*x₀ + adjoint(sub)*x₊, y[1]) - for j in 2:nx-1 - x₋, x₀, x₊ = x₀, x₊, x[j+1] - sup, sub = transpose(sub), transpose(ev[j]) - r += dot(adjoint(sup)*x₋ + adjoint(dv[j])*x₀ + adjoint(sub)*x₊, y[j]) - end - r += dot(adjoint(transpose(sub))*x₀ + adjoint(dv[nx])*x₊, y[nx]) - end - return r -end - -(\)(T::SymTridiagonal, B::AbstractVecOrMat) = ldlt(T)\B - -# division with optional shift for use in shifted-Hessenberg solvers (hessenberg.jl): -ldiv!(A::SymTridiagonal, B::AbstractVecOrMat; shift::Number=false) = ldiv!(ldlt(A, shift=shift), B) -rdiv!(B::AbstractVecOrMat, A::SymTridiagonal; shift::Number=false) = rdiv!(B, ldlt(A, shift=shift)) - -eigen!(A::SymTridiagonal{<:BlasReal,<:StridedVector}) = Eigen(LAPACK.stegr!('V', A.dv, A.ev)...) -eigen(A::SymTridiagonal{T}) where T = eigen!(copymutable_oftype(A, eigtype(T))) - -eigen!(A::SymTridiagonal{<:BlasReal,<:StridedVector}, irange::UnitRange) = - Eigen(LAPACK.stegr!('V', 'I', A.dv, A.ev, 0.0, 0.0, irange.start, irange.stop)...) -eigen(A::SymTridiagonal{T}, irange::UnitRange) where T = - eigen!(copymutable_oftype(A, eigtype(T)), irange) - -eigen!(A::SymTridiagonal{<:BlasReal,<:StridedVector}, vl::Real, vu::Real) = - Eigen(LAPACK.stegr!('V', 'V', A.dv, A.ev, vl, vu, 0, 0)...) -eigen(A::SymTridiagonal{T}, vl::Real, vu::Real) where T = - eigen!(copymutable_oftype(A, eigtype(T)), vl, vu) - -eigvals!(A::SymTridiagonal{<:BlasReal,<:StridedVector}) = LAPACK.stev!('N', A.dv, A.ev)[1] -eigvals(A::SymTridiagonal{T}) where T = eigvals!(copymutable_oftype(A, eigtype(T))) - -eigvals!(A::SymTridiagonal{<:BlasReal,<:StridedVector}, irange::UnitRange) = - LAPACK.stegr!('N', 'I', A.dv, A.ev, 0.0, 0.0, irange.start, irange.stop)[1] -eigvals(A::SymTridiagonal{T}, irange::UnitRange) where T = - eigvals!(copymutable_oftype(A, eigtype(T)), irange) - -eigvals!(A::SymTridiagonal{<:BlasReal,<:StridedVector}, vl::Real, vu::Real) = - LAPACK.stegr!('N', 'V', A.dv, A.ev, vl, vu, 0, 0)[1] -eigvals(A::SymTridiagonal{T}, vl::Real, vu::Real) where T = - eigvals!(copymutable_oftype(A, eigtype(T)), vl, vu) - -#Computes largest and smallest eigenvalue -eigmax(A::SymTridiagonal) = eigvals(A, size(A, 1):size(A, 1))[1] -eigmin(A::SymTridiagonal) = eigvals(A, 1:1)[1] - -#Compute selected eigenvectors only corresponding to particular eigenvalues -""" - eigvecs(A::SymTridiagonal[, eigvals]) -> Matrix - -Return a matrix `M` whose columns are the eigenvectors of `A`. (The `k`th eigenvector can -be obtained from the slice `M[:, k]`.) - -If the optional vector of eigenvalues `eigvals` is specified, `eigvecs` -returns the specific corresponding eigenvectors. - -# Examples -```jldoctest -julia> A = SymTridiagonal([1.; 2.; 1.], [2.; 3.]) -3×3 SymTridiagonal{Float64, Vector{Float64}}: - 1.0 2.0 ⋅ - 2.0 2.0 3.0 - ⋅ 3.0 1.0 - -julia> eigvals(A) -3-element Vector{Float64}: - -2.1400549446402604 - 1.0000000000000002 - 5.140054944640259 - -julia> eigvecs(A) -3×3 Matrix{Float64}: - 0.418304 -0.83205 0.364299 - -0.656749 -7.39009e-16 0.754109 - 0.627457 0.5547 0.546448 - -julia> eigvecs(A, [1.]) -3×1 Matrix{Float64}: - 0.8320502943378438 - 4.263514128092366e-17 - -0.5547001962252291 -``` -""" -eigvecs(A::SymTridiagonal{<:BlasFloat,<:StridedVector}, eigvals::Vector{<:Real}) = LAPACK.stein!(A.dv, A.ev, eigvals) - -function svdvals!(A::SymTridiagonal) - vals = eigvals!(A) - return sort!(map!(abs, vals, vals); rev=true) -end - -# tril and triu - -Base.@constprop :aggressive function istriu(M::SymTridiagonal, k::Integer=0) - if k <= -1 - return true - elseif k == 0 - return iszero(_evview(M)) - else # k >= 1 - return iszero(_evview(M)) && iszero(M.dv) - end -end -Base.@constprop :aggressive istril(M::SymTridiagonal, k::Integer) = istriu(M, -k) -iszero(M::SymTridiagonal) = iszero(_evview(M)) && iszero(M.dv) -isone(M::SymTridiagonal) = iszero(_evview(M)) && all(isone, M.dv) -isdiag(M::SymTridiagonal) = iszero(_evview(M)) - - -function tril!(M::SymTridiagonal{T}, k::Integer=0) where T - n = length(M.dv) - if !(-n - 1 <= k <= n - 1) - throw(ArgumentError(LazyString(lazy"the requested diagonal, $k, must be at least ", - lazy"$(-n - 1) and at most $(n - 1) in an $n-by-$n matrix"))) - elseif k < -1 - fill!(M.ev, zero(T)) - fill!(M.dv, zero(T)) - return Tridiagonal(M.ev,M.dv,copy(M.ev)) - elseif k == -1 - fill!(M.dv, zero(T)) - return Tridiagonal(M.ev,M.dv,zero(M.ev)) - elseif k == 0 - return Tridiagonal(M.ev,M.dv,zero(M.ev)) - else # if k >= 1 - return Tridiagonal(M.ev,M.dv,copy(M.ev)) - end -end - -function triu!(M::SymTridiagonal{T}, k::Integer=0) where T - n = length(M.dv) - if !(-n + 1 <= k <= n + 1) - throw(ArgumentError(LazyString(lazy"the requested diagonal, $k, must be at least ", - lazy"$(-n + 1) and at most $(n + 1) in an $n-by-$n matrix"))) - elseif k > 1 - fill!(M.ev, zero(T)) - fill!(M.dv, zero(T)) - return Tridiagonal(M.ev,M.dv,copy(M.ev)) - elseif k == 1 - fill!(M.dv, zero(T)) - return Tridiagonal(zero(M.ev),M.dv,M.ev) - elseif k == 0 - return Tridiagonal(zero(M.ev),M.dv,M.ev) - else # if k <= -1 - return Tridiagonal(M.ev,M.dv,copy(M.ev)) - end -end - -################### -# Generic methods # -################### - -## structured matrix methods ## -function Base.replace_in_print_matrix(A::SymTridiagonal, i::Integer, j::Integer, s::AbstractString) - i==j-1||i==j||i==j+1 ? s : Base.replace_with_centered_mark(s) -end - -# Implements the determinant using principal minors -# a, b, c are assumed to be the subdiagonal, diagonal, and superdiagonal of -# a tridiagonal matrix. -#Reference: -# R. Usmani, "Inversion of a tridiagonal Jacobi matrix", -# Linear Algebra and its Applications 212-213 (1994), pp.413-414 -# doi:10.1016/0024-3795(94)90414-6 -function det_usmani(a::V, b::V, c::V, shift::Number=0) where {T,V<:AbstractVector{T}} - require_one_based_indexing(a, b, c) - n = length(b) - θa = oneunit(T)+zero(shift) - if n == 0 - return θa - end - θb = b[1]+shift - for i in 2:n - θb, θa = (b[i]+shift)*θb - a[i-1]*c[i-1]*θa, θb - end - return θb -end - -# det with optional diagonal shift for use with shifted Hessenberg factorizations -det(A::SymTridiagonal; shift::Number=false) = det_usmani(A.ev, A.dv, A.ev, shift) -logabsdet(A::SymTridiagonal; shift::Number=false) = logabsdet(ldlt(A; shift=shift)) - -@inline function Base.isassigned(A::SymTridiagonal, i::Int, j::Int) - @boundscheck checkbounds(Bool, A, i, j) || return false - if i == j - return @inbounds isassigned(A.dv, i) - elseif i == j + 1 - return @inbounds isassigned(A.ev, j) - elseif i + 1 == j - return @inbounds isassigned(A.ev, i) - else - return true - end -end - -@inline function Base.isstored(A::SymTridiagonal, i::Int, j::Int) - @boundscheck checkbounds(A, i, j) - if i == j - return @inbounds Base.isstored(A.dv, i) - elseif i == j + 1 - return @inbounds Base.isstored(A.ev, j) - elseif i + 1 == j - return @inbounds Base.isstored(A.ev, i) - else - return false - end -end - -@inline function getindex(A::SymTridiagonal{T}, i::Int, j::Int) where T - @boundscheck checkbounds(A, i, j) - if i == j - return symmetric((@inbounds A.dv[i]), :U)::symmetric_type(eltype(A.dv)) - elseif i == j + 1 - return copy(transpose(@inbounds A.ev[j])) # materialized for type stability - elseif i + 1 == j - return @inbounds A.ev[i] - else - return zero(T) - end -end - -Base._reverse(A::SymTridiagonal, dims) = reverse!(Matrix(A); dims) -Base._reverse(A::SymTridiagonal, dims::Colon) = SymTridiagonal(reverse(A.dv), reverse(A.ev)) -Base._reverse!(A::SymTridiagonal, dims::Colon) = (reverse!(A.dv); reverse!(A.ev); A) - -@inline function setindex!(A::SymTridiagonal, x, i::Integer, j::Integer) - @boundscheck checkbounds(A, i, j) - if i == j - issymmetric(x) || throw(ArgumentError("cannot set a diagonal entry of a SymTridiagonal to an asymmetric value")) - @inbounds A.dv[i] = x - else - throw(ArgumentError(lazy"cannot set off-diagonal entry ($i, $j)")) - end - return A -end - -## Tridiagonal matrices ## -struct Tridiagonal{T,V<:AbstractVector{T}} <: AbstractMatrix{T} - dl::V # sub-diagonal - d::V # diagonal - du::V # sup-diagonal - du2::V # supsup-diagonal for pivoting in LU - function Tridiagonal{T,V}(dl, d, du) where {T,V<:AbstractVector{T}} - require_one_based_indexing(dl, d, du) - n = length(d) - if (length(dl) != n-1 || length(du) != n-1) && !(length(d) == 0 && length(dl) == 0 && length(du) == 0) - throw(ArgumentError(LazyString("cannot construct Tridiagonal from incompatible ", - "lengths of subdiagonal, diagonal and superdiagonal: ", - lazy"($(length(dl)), $(length(d)), $(length(du)))"))) - end - new{T,V}(dl, d, Base.unalias(dl, du)) - end - # constructor used in lu! - function Tridiagonal{T,V}(dl, d, du, du2) where {T,V<:AbstractVector{T}} - require_one_based_indexing(dl, d, du, du2) - # length checks? - new{T,V}(dl, d, Base.unalias(dl, du), du2) - end -end - -""" - Tridiagonal(dl::V, d::V, du::V) where V <: AbstractVector - -Construct a tridiagonal matrix from the first subdiagonal, diagonal, and first superdiagonal, -respectively. The result is of type `Tridiagonal` and provides efficient specialized linear -solvers, but may be converted into a regular matrix with -[`convert(Array, _)`](@ref) (or `Array(_)` for short). -The lengths of `dl` and `du` must be one less than the length of `d`. - -!!! note - The subdiagonal `dl` and the superdiagonal `du` must not be aliased to each other. - If aliasing is detected, the constructor will use a copy of `du` as its argument. - -# Examples -```jldoctest -julia> dl = [1, 2, 3]; - -julia> du = [4, 5, 6]; - -julia> d = [7, 8, 9, 0]; - -julia> Tridiagonal(dl, d, du) -4×4 Tridiagonal{Int64, Vector{Int64}}: - 7 4 ⋅ ⋅ - 1 8 5 ⋅ - ⋅ 2 9 6 - ⋅ ⋅ 3 0 -``` -""" -Tridiagonal(dl::V, d::V, du::V) where {T,V<:AbstractVector{T}} = Tridiagonal{T,V}(dl, d, du) -Tridiagonal(dl::V, d::V, du::V, du2::V) where {T,V<:AbstractVector{T}} = Tridiagonal{T,V}(dl, d, du, du2) -Tridiagonal(dl::AbstractVector{T}, d::AbstractVector{S}, du::AbstractVector{U}) where {T,S,U} = - Tridiagonal{promote_type(T, S, U)}(dl, d, du) -Tridiagonal(dl::AbstractVector{T}, d::AbstractVector{S}, du::AbstractVector{U}, du2::AbstractVector{V}) where {T,S,U,V} = - Tridiagonal{promote_type(T, S, U, V)}(dl, d, du, du2) -function Tridiagonal{T}(dl::AbstractVector, d::AbstractVector, du::AbstractVector) where {T} - l, d, u = map(x->convert(AbstractVector{T}, x), (dl, d, du)) - typeof(l) == typeof(d) == typeof(u) ? - Tridiagonal(l, d, u) : - throw(ArgumentError("diagonal vectors needed to be convertible to same type")) -end -function Tridiagonal{T}(dl::AbstractVector, d::AbstractVector, du::AbstractVector, du2::AbstractVector) where {T} - l, d, u, u2 = map(x->convert(AbstractVector{T}, x), (dl, d, du, du2)) - typeof(l) == typeof(d) == typeof(u) == typeof(u2) ? - Tridiagonal(l, d, u, u2) : - throw(ArgumentError("diagonal vectors needed to be convertible to same type")) -end - -""" - Tridiagonal(A) - -Construct a tridiagonal matrix from the first sub-diagonal, -diagonal and first super-diagonal of the matrix `A`. - -# Examples -```jldoctest -julia> A = [1 2 3 4; 1 2 3 4; 1 2 3 4; 1 2 3 4] -4×4 Matrix{Int64}: - 1 2 3 4 - 1 2 3 4 - 1 2 3 4 - 1 2 3 4 - -julia> Tridiagonal(A) -4×4 Tridiagonal{Int64, Vector{Int64}}: - 1 2 ⋅ ⋅ - 1 2 3 ⋅ - ⋅ 2 3 4 - ⋅ ⋅ 3 4 -``` -""" -Tridiagonal(A::AbstractMatrix) = Tridiagonal(diag(A,-1), diag(A,0), diag(A,1)) - -Tridiagonal(A::Tridiagonal) = A -Tridiagonal{T}(A::Tridiagonal{T}) where {T} = A -function Tridiagonal{T}(A::Tridiagonal) where {T} - dl, d, du = map(x -> convert(AbstractVector{T}, x)::AbstractVector{T}, (A.dl, A.d, A.du)) - if isdefined(A, :du2) - Tridiagonal{T}(dl, d, du, convert(AbstractVector{T}, A.du2)::AbstractVector{T}) - else - Tridiagonal{T}(dl, d, du) - end -end -Tridiagonal{T,V}(A::Tridiagonal{T,V}) where {T,V<:AbstractVector{T}} = A -function Tridiagonal{T,V}(A::Tridiagonal) where {T,V<:AbstractVector{T}} - dl, d, du = map(x -> convert(V, x)::V, (A.dl, A.d, A.du)) - if isdefined(A, :du2) - Tridiagonal{T,V}(dl, d, du, convert(V, A.du2)::V) - else - Tridiagonal{T,V}(dl, d, du) - end -end - -size(M::Tridiagonal) = (n = length(M.d); (n, n)) -axes(M::Tridiagonal) = (ax = axes(M.d,1); (ax, ax)) - -function Matrix{T}(M::Tridiagonal) where {T} - A = Matrix{T}(undef, size(M)) - if haszero(T) # optimized path for types with zero(T) defined - size(A,1) > 2 && fill!(A, zero(T)) - copyto!(diagview(A), M.d) - copyto!(diagview(A,1), M.du) - copyto!(diagview(A,-1), M.dl) - else - copyto!(A, M) - end - A -end -Matrix(M::Tridiagonal{T}) where {T} = Matrix{promote_type(T, typeof(zero(T)))}(M) -Array(M::Tridiagonal) = Matrix(M) - -similar(M::Tridiagonal, ::Type{T}) where {T} = Tridiagonal(similar(M.dl, T), similar(M.d, T), similar(M.du, T)) -similar(M::Tridiagonal, ::Type{T}, dims::Union{Dims{1},Dims{2}}) where {T} = similar(M.d, T, dims) - -# Operations on Tridiagonal matrices -# copyto! for matching axes -function _copyto_banded!(dest::Tridiagonal, src::Tridiagonal) - copyto!(dest.dl, src.dl) - copyto!(dest.d, src.d) - copyto!(dest.du, src.du) - dest -end - -#Elementary operations -for func in (:conj, :copy, :real, :imag) - @eval function ($func)(M::Tridiagonal) - Tridiagonal(($func)(M.dl), ($func)(M.d), ($func)(M.du)) - end -end - -adjoint(S::Tridiagonal{<:Number}) = Tridiagonal(vec(adjoint(S.du)), vec(adjoint(S.d)), vec(adjoint(S.dl))) -adjoint(S::Tridiagonal{<:Number, <:Base.ReshapedArray{<:Number,1,<:Adjoint}}) = - Tridiagonal(adjoint(parent(S.du)), adjoint(parent(S.d)), adjoint(parent(S.dl))) -transpose(S::Tridiagonal{<:Number}) = Tridiagonal(S.du, S.d, S.dl) -permutedims(T::Tridiagonal) = Tridiagonal(T.du, T.d, T.dl) -function permutedims(T::Tridiagonal, perm) - Base.checkdims_perm(axes(T), axes(T), perm) - NTuple{2}(perm) == (2, 1) ? permutedims(T) : T -end -Base.copy(aS::Adjoint{<:Any,<:Tridiagonal}) = (S = aS.parent; Tridiagonal(map(x -> copy.(adjoint.(x)), (S.du, S.d, S.dl))...)) -Base.copy(tS::Transpose{<:Any,<:Tridiagonal}) = (S = tS.parent; Tridiagonal(map(x -> copy.(transpose.(x)), (S.du, S.d, S.dl))...)) - -ishermitian(S::Tridiagonal) = all(ishermitian, S.d) && all(Iterators.map((x, y) -> x == y', S.du, S.dl)) -issymmetric(S::Tridiagonal) = all(issymmetric, S.d) && all(Iterators.map((x, y) -> x == transpose(y), S.du, S.dl)) - -\(A::Adjoint{<:Any,<:Tridiagonal}, B::Adjoint{<:Any,<:AbstractVecOrMat}) = copy(A) \ B - -function diag(M::Tridiagonal, n::Integer=0) - # every branch call similar(..., ::Int) to make sure the - # same vector type is returned independent of n - v = similar(M.d, max(0, length(M.d)-abs(n))) - if n == 0 - copyto!(v, M.d) - elseif n == -1 - copyto!(v, M.dl) - elseif n == 1 - copyto!(v, M.du) - elseif abs(n) <= size(M,1) - for i in eachindex(v) - v[i] = M[BandIndex(n,i)] - end - end - return v -end - -@inline function Base.isassigned(A::Tridiagonal, i::Int, j::Int) - @boundscheck checkbounds(Bool, A, i, j) || return false - if i == j - return @inbounds isassigned(A.d, i) - elseif i == j + 1 - return @inbounds isassigned(A.dl, j) - elseif i + 1 == j - return @inbounds isassigned(A.du, i) - else - return true - end -end - -@inline function Base.isstored(A::Tridiagonal, i::Int, j::Int) - @boundscheck checkbounds(A, i, j) - if i == j - return @inbounds Base.isstored(A.d, i) - elseif i == j + 1 - return @inbounds Base.isstored(A.dl, j) - elseif i + 1 == j - return @inbounds Base.isstored(A.du, i) - else - return false - end -end - -@inline function getindex(A::Tridiagonal{T}, i::Int, j::Int) where T - @boundscheck checkbounds(A, i, j) - if i == j - return @inbounds A.d[i] - elseif i == j + 1 - return @inbounds A.dl[j] - elseif i + 1 == j - return @inbounds A.du[i] - else - return zero(T) - end -end - -@inline function getindex(A::Tridiagonal{T}, b::BandIndex) where T - @boundscheck checkbounds(A, b) - if b.band == 0 - return @inbounds A.d[b.index] - elseif b.band == -1 - return @inbounds A.dl[b.index] - elseif b.band == 1 - return @inbounds A.du[b.index] - else - return zero(T) - end -end - -@inline function setindex!(A::Tridiagonal, x, i::Integer, j::Integer) - @boundscheck checkbounds(A, i, j) - if i == j - @inbounds A.d[i] = x - elseif i - j == 1 - @inbounds A.dl[j] = x - elseif j - i == 1 - @inbounds A.du[i] = x - elseif !iszero(x) - throw(ArgumentError(LazyString(lazy"cannot set entry ($i, $j) off ", - lazy"the tridiagonal band to a nonzero value ($x)"))) - end - return A -end - -## structured matrix methods ## -function Base.replace_in_print_matrix(A::Tridiagonal,i::Integer,j::Integer,s::AbstractString) - i==j-1||i==j||i==j+1 ? s : Base.replace_with_centered_mark(s) -end - -# reverse - -Base._reverse(A::Tridiagonal, dims) = reverse!(Matrix(A); dims) -Base._reverse(A::Tridiagonal, dims::Colon) = Tridiagonal(reverse(A.du), reverse(A.d), reverse(A.dl)) -function Base._reverse!(A::Tridiagonal, dims::Colon) - n = length(A.du) # == length(A.dl), & always 1-based - # reverse and swap A.dl and A.du: - @inbounds for i in 1:n - A.dl[i], A.du[n+1-i] = A.du[n+1-i], A.dl[i] - end - reverse!(A.d) - return A -end - -#tril and triu - -iszero(M::Tridiagonal) = iszero(M.dl) && iszero(M.d) && iszero(M.du) -isone(M::Tridiagonal) = iszero(M.dl) && all(isone, M.d) && iszero(M.du) -Base.@constprop :aggressive function istriu(M::Tridiagonal, k::Integer=0) - if k <= -1 - return true - elseif k == 0 - return iszero(M.dl) - elseif k == 1 - return iszero(M.dl) && iszero(M.d) - else # k >= 2 - return iszero(M.dl) && iszero(M.d) && iszero(M.du) - end -end -Base.@constprop :aggressive function istril(M::Tridiagonal, k::Integer=0) - if k >= 1 - return true - elseif k == 0 - return iszero(M.du) - elseif k == -1 - return iszero(M.du) && iszero(M.d) - else # k <= -2 - return iszero(M.du) && iszero(M.d) && iszero(M.dl) - end -end -isdiag(M::Tridiagonal) = iszero(M.dl) && iszero(M.du) - -function tril!(M::Tridiagonal{T}, k::Integer=0) where T - n = length(M.d) - if !(-n - 1 <= k <= n - 1) - throw(ArgumentError(LazyString(lazy"the requested diagonal, $k, must be at least ", - lazy"$(-n - 1) and at most $(n - 1) in an $n-by-$n matrix"))) - elseif k < -1 - fill!(M.dl, zero(T)) - fill!(M.d, zero(T)) - fill!(M.du, zero(T)) - elseif k == -1 - fill!(M.d, zero(T)) - fill!(M.du, zero(T)) - elseif k == 0 - fill!(M.du, zero(T)) - end - return M -end - -function triu!(M::Tridiagonal{T}, k::Integer=0) where T - n = length(M.d) - if !(-n + 1 <= k <= n + 1) - throw(ArgumentError(LazyString(lazy"the requested diagonal, $k, must be at least ", - lazy"$(-n + 1) and at most $(n + 1) in an $n-by-$n matrix"))) - elseif k > 1 - fill!(M.dl, zero(T)) - fill!(M.d, zero(T)) - fill!(M.du, zero(T)) - elseif k == 1 - fill!(M.dl, zero(T)) - fill!(M.d, zero(T)) - elseif k == 0 - fill!(M.dl, zero(T)) - end - return M -end - -tr(M::Tridiagonal) = sum(M.d) - -################### -# Generic methods # -################### - -+(A::Tridiagonal, B::Tridiagonal) = Tridiagonal(A.dl+B.dl, A.d+B.d, A.du+B.du) --(A::Tridiagonal, B::Tridiagonal) = Tridiagonal(A.dl-B.dl, A.d-B.d, A.du-B.du) --(A::Tridiagonal) = Tridiagonal(-A.dl, -A.d, -A.du) -*(A::Tridiagonal, B::Number) = Tridiagonal(A.dl*B, A.d*B, A.du*B) -*(B::Number, A::Tridiagonal) = Tridiagonal(B*A.dl, B*A.d, B*A.du) -function rmul!(T::Tridiagonal, x::Number) - if size(T,1) > 2 - # ensure that zeros are preserved on scaling - y = T[3,1] * x - iszero(y) || throw(ArgumentError(LazyString("cannot set index (3, 1) off ", - lazy"the tridiagonal band to a nonzero value ($y)"))) - end - T.dl .*= x - T.d .*= x - T.du .*= x - return T -end -function lmul!(x::Number, T::Tridiagonal) - if size(T,1) > 2 - # ensure that zeros are preserved on scaling - y = x * T[3,1] - iszero(y) || throw(ArgumentError(LazyString("cannot set index (3, 1) off ", - lazy"the tridiagonal band to a nonzero value ($y)"))) - end - @. T.dl = x * T.dl - @. T.d = x * T.d - @. T.du = x * T.du - return T -end -/(A::Tridiagonal, B::Number) = Tridiagonal(A.dl/B, A.d/B, A.du/B) -\(B::Number, A::Tridiagonal) = Tridiagonal(B\A.dl, B\A.d, B\A.du) - -==(A::Tridiagonal, B::Tridiagonal) = (A.dl==B.dl) && (A.d==B.d) && (A.du==B.du) -function ==(A::Tridiagonal, B::SymTridiagonal) - iseq = all(Iterators.map((x, y) -> x == transpose(y), A.du, A.dl)) - iseq = iseq && A.du == _evview(B) - iseq && all(Iterators.map((x, y) -> x == symmetric(y, :U), A.d, B.dv)) -end -==(A::SymTridiagonal, B::Tridiagonal) = B == A - -det(A::Tridiagonal) = det_usmani(A.dl, A.d, A.du) - -AbstractMatrix{T}(M::Tridiagonal) where {T} = Tridiagonal{T}(M) -AbstractMatrix{T}(M::Tridiagonal{T}) where {T} = copy(M) -Tridiagonal{T}(M::SymTridiagonal{T}) where {T} = Tridiagonal(M) -function SymTridiagonal{T}(M::Tridiagonal) where T - if issymmetric(M) - return SymTridiagonal{T}(convert(AbstractVector{T},M.d), convert(AbstractVector{T},M.dl)) - else - throw(ArgumentError("Tridiagonal is not symmetric, cannot convert to SymTridiagonal")) - end -end - -Base._sum(A::Tridiagonal, ::Colon) = sum(A.d) + sum(A.dl) + sum(A.du) -function Base._sum(A::SymTridiagonal, ::Colon) - se = sum(_evview(A)) - symmetric(sum(A.dv), :U) + se + transpose(se) -end - -function Base._sum(A::Tridiagonal, dims::Integer) - res = Base.reducedim_initarray(A, dims, zero(eltype(A))) - n = length(A.d) - if n == 0 - return res - elseif n == 1 - res[1] = A.d[1] - return res - end - @inbounds begin - if dims == 1 - res[1] = A.dl[1] + A.d[1] - for i = 2:n-1 - res[i] = A.dl[i] + A.d[i] + A.du[i-1] - end - res[n] = A.d[n] + A.du[n-1] - elseif dims == 2 - res[1] = A.d[1] + A.du[1] - for i = 2:n-1 - res[i] = A.dl[i-1] + A.d[i] + A.du[i] - end - res[n] = A.dl[n-1] + A.d[n] - elseif dims >= 3 - for i = 1:n-1 - res[i,i+1] = A.du[i] - res[i,i] = A.d[i] - res[i+1,i] = A.dl[i] - end - res[n,n] = A.d[n] - end - end - res -end - -function Base._sum(A::SymTridiagonal, dims::Integer) - res = Base.reducedim_initarray(A, dims, zero(eltype(A))) - n = length(A.dv) - if n == 0 - return res - elseif n == 1 - res[1] = A.dv[1] - return res - end - @inbounds begin - if dims == 1 - res[1] = transpose(A.ev[1]) + symmetric(A.dv[1], :U) - for i = 2:n-1 - res[i] = transpose(A.ev[i]) + symmetric(A.dv[i], :U) + A.ev[i-1] - end - res[n] = symmetric(A.dv[n], :U) + A.ev[n-1] - elseif dims == 2 - res[1] = symmetric(A.dv[1], :U) + A.ev[1] - for i = 2:n-1 - res[i] = transpose(A.ev[i-1]) + symmetric(A.dv[i], :U) + A.ev[i] - end - res[n] = transpose(A.ev[n-1]) + symmetric(A.dv[n], :U) - elseif dims >= 3 - for i = 1:n-1 - res[i,i+1] = A.ev[i] - res[i,i] = symmetric(A.dv[i], :U) - res[i+1,i] = transpose(A.ev[i]) - end - res[n,n] = symmetric(A.dv[n], :U) - end - end - res -end - -function dot(x::AbstractVector, A::Tridiagonal, y::AbstractVector) - require_one_based_indexing(x, y) - nx, ny = length(x), length(y) - (nx == size(A, 1) == ny) || throw(DimensionMismatch()) - if nx ≤ 1 - nx == 0 && return dot(zero(eltype(x)), zero(eltype(A)), zero(eltype(y))) - return dot(x[1], A.d[1], y[1]) - end - @inbounds begin - x₀ = x[1] - x₊ = x[2] - dl, d, du = A.dl, A.d, A.du - r = dot(adjoint(d[1])*x₀ + adjoint(dl[1])*x₊, y[1]) - for j in 2:nx-1 - x₋, x₀, x₊ = x₀, x₊, x[j+1] - r += dot(adjoint(du[j-1])*x₋ + adjoint(d[j])*x₀ + adjoint(dl[j])*x₊, y[j]) - end - r += dot(adjoint(du[nx-1])*x₀ + adjoint(d[nx])*x₊, y[nx]) - end - return r -end - -function cholesky(S::SymTridiagonal, ::NoPivot = NoPivot(); check::Bool = true) - if !ishermitian(S) - check && checkpositivedefinite(-1) - return Cholesky(S, 'U', convert(BlasInt, -1)) - end - T = choltype(S) - cholesky!(Hermitian(Bidiagonal{T}(diag(S, 0), diag(S, 1), :U)), NoPivot(); check = check) -end - -# See dgtsv.f -""" - ldiv!(A::Tridiagonal, B::AbstractVecOrMat) -> B - -Compute `A \\ B` in-place by Gaussian elimination with partial pivoting and store the result -in `B`, returning the result. In the process, the diagonals of `A` are overwritten as well. - -!!! compat "Julia 1.11" - `ldiv!` for `Tridiagonal` left-hand sides requires at least Julia 1.11. -""" -function ldiv!(A::Tridiagonal, B::AbstractVecOrMat) - LinearAlgebra.require_one_based_indexing(B) - n = size(A, 1) - if n != size(B,1) - throw(DimensionMismatch(lazy"matrix has dimensions ($n,$n) but right hand side has $(size(B,1)) rows")) - end - nrhs = size(B, 2) - - # Initialize variables - dl = A.dl - d = A.d - du = A.du - - @inbounds begin - for i in 1:n-1 - # pivot or not? - if abs(d[i]) >= abs(dl[i]) - # No interchange - if d[i] != 0 - fact = dl[i]/d[i] - d[i+1] -= fact*du[i] - for j in 1:nrhs - B[i+1,j] -= fact*B[i,j] - end - else - checknonsingular(i) - end - i < n-1 && (dl[i] = 0) - else - # Interchange - fact = d[i]/dl[i] - d[i] = dl[i] - tmp = d[i+1] - d[i+1] = du[i] - fact*tmp - du[i] = tmp - if i < n-1 - dl[i] = du[i+1] - du[i+1] = -fact*dl[i] - end - for j in 1:nrhs - temp = B[i,j] - B[i,j] = B[i+1,j] - B[i+1,j] = temp - fact*B[i+1,j] - end - end - end - iszero(d[n]) && checknonsingular(n) - # backward substitution - for j in 1:nrhs - B[n,j] /= d[n] - if n > 1 - B[n-1,j] = (B[n-1,j] - du[n-1]*B[n,j])/d[n-1] - end - for i in n-2:-1:1 - B[i,j] = (B[i,j] - du[i]*B[i+1,j] - dl[i]*B[i+2,j]) / d[i] - end - end - end - return B -end - -# combinations of Tridiagonal and Symtridiagonal -# copyto! for matching axes -function _copyto_banded!(A::Tridiagonal, B::SymTridiagonal) - Bev = _evview(B) - A.du .= Bev - # Broadcast identity for numbers to access the faster copyto! path - # This uses the fact that transpose(x::Number) = x and symmetric(x::Number) = x - A.dl .= (eltype(B) <: Number ? identity : transpose).(Bev) - A.d .= (eltype(B) <: Number ? identity : symmetric).(B.dv) - return A -end -function _copyto_banded!(A::SymTridiagonal, B::Tridiagonal) - issymmetric(B) || throw(ArgumentError("cannot copy an asymmetric Tridiagonal matrix to a SymTridiagonal")) - A.dv .= B.d - _evview(A) .= B.du - return A -end - -# display -function show(io::IO, T::Tridiagonal) - print(io, "Tridiagonal(") - show(io, T.dl) - print(io, ", ") - show(io, T.d) - print(io, ", ") - show(io, T.du) - print(io, ")") -end -function show(io::IO, S::SymTridiagonal) - print(io, "SymTridiagonal(") - show(io, _diagview(S)) - print(io, ", ") - show(io, S.ev) - print(io, ")") -end diff --git a/stdlib/LinearAlgebra/src/uniformscaling.jl b/stdlib/LinearAlgebra/src/uniformscaling.jl deleted file mode 100644 index 4422799fada85..0000000000000 --- a/stdlib/LinearAlgebra/src/uniformscaling.jl +++ /dev/null @@ -1,448 +0,0 @@ -# This file is a part of Julia. License is MIT: https://julialang.org/license - -import Base: copy, adjoint, getindex, show, transpose, one, zero, inv, float, - hcat, vcat, hvcat, ^ - -""" - UniformScaling{T<:Number} - -Generically sized uniform scaling operator defined as a scalar times -the identity operator, `λ*I`. Although without an explicit `size`, it -acts similarly to a matrix in many cases and includes support for some -indexing. See also [`I`](@ref). - -!!! compat "Julia 1.6" - Indexing using ranges is available as of Julia 1.6. - -# Examples -```jldoctest -julia> J = UniformScaling(2.) -UniformScaling{Float64} -2.0*I - -julia> A = [1. 2.; 3. 4.] -2×2 Matrix{Float64}: - 1.0 2.0 - 3.0 4.0 - -julia> J*A -2×2 Matrix{Float64}: - 2.0 4.0 - 6.0 8.0 - -julia> J[1:2, 1:2] -2×2 Matrix{Float64}: - 2.0 0.0 - 0.0 2.0 -``` -""" -struct UniformScaling{T<:Number} - λ::T -end - -""" - I - -An object of type [`UniformScaling`](@ref), representing an identity matrix of any size. - -# Examples -```jldoctest -julia> fill(1, (5,6)) * I == fill(1, (5,6)) -true - -julia> [1 2im 3; 1im 2 3] * I -2×3 Matrix{Complex{Int64}}: - 1+0im 0+2im 3+0im - 0+1im 2+0im 3+0im -``` -""" -const I = UniformScaling(true) - -""" - (I::UniformScaling)(n::Integer) - -Construct a `Diagonal` matrix from a `UniformScaling`. - -!!! compat "Julia 1.2" - This method is available as of Julia 1.2. - -# Examples -```jldoctest -julia> I(3) -3×3 Diagonal{Bool, Vector{Bool}}: - 1 ⋅ ⋅ - ⋅ 1 ⋅ - ⋅ ⋅ 1 - -julia> (0.7*I)(3) -3×3 Diagonal{Float64, Vector{Float64}}: - 0.7 ⋅ ⋅ - ⋅ 0.7 ⋅ - ⋅ ⋅ 0.7 -``` -""" -(I::UniformScaling)(n::Integer) = Diagonal(fill(I.λ, n)) - -eltype(::Type{UniformScaling{T}}) where {T} = T -ndims(J::UniformScaling) = 2 -Base.has_offset_axes(::UniformScaling) = false -getindex(J::UniformScaling, ind::CartesianIndex{2}) = J[Tuple(ind)...] -getindex(J::UniformScaling, i::Integer,j::Integer) = ifelse(i==j,J.λ,zero(J.λ)) - -getindex(J::UniformScaling, n::Integer, m::AbstractVector{<:Integer}) = getindex(J, m, n) -function getindex(J::UniformScaling{T}, n::AbstractVector{<:Integer}, m::Integer) where T - v = zeros(T, axes(n)) - @inbounds for (i,ii) in pairs(n) - if ii == m - v[i] = J.λ - end - end - return v -end - -function getindex(J::UniformScaling{T}, n::AbstractVector{<:Integer}, m::AbstractVector{<:Integer}) where T - A = zeros(T, axes(n)..., axes(m)...) - @inbounds for (j,jj) in pairs(m), (i,ii) in pairs(n) - if ii == jj - A[i,j] = J.λ - end - end - return A -end - -function show(io::IO, ::MIME"text/plain", J::UniformScaling) - s = "$(J.λ)" - if occursin(r"\w+\s*[\+\-]\s*\w+", s) - s = "($s)" - end - print(io, typeof(J), "\n$s*I") -end -copy(J::UniformScaling) = UniformScaling(J.λ) - -Base.convert(::Type{UniformScaling{T}}, J::UniformScaling) where {T} = UniformScaling(convert(T, J.λ))::UniformScaling{T} - -conj(J::UniformScaling) = UniformScaling(conj(J.λ)) -real(J::UniformScaling) = UniformScaling(real(J.λ)) -imag(J::UniformScaling) = UniformScaling(imag(J.λ)) - -float(J::UniformScaling) = UniformScaling(float(J.λ)) - -transpose(J::UniformScaling) = J -adjoint(J::UniformScaling) = UniformScaling(conj(J.λ)) - -one(::Type{UniformScaling{T}}) where {T} = UniformScaling(one(T)) -one(J::UniformScaling{T}) where {T} = one(UniformScaling{T}) -oneunit(::Type{UniformScaling{T}}) where {T} = UniformScaling(oneunit(T)) -oneunit(J::UniformScaling{T}) where {T} = oneunit(UniformScaling{T}) -zero(::Type{UniformScaling{T}}) where {T} = UniformScaling(zero(T)) -zero(J::UniformScaling{T}) where {T} = zero(UniformScaling{T}) - -isdiag(::UniformScaling) = true -istriu(::UniformScaling) = true -istril(::UniformScaling) = true -issymmetric(::UniformScaling) = true -ishermitian(J::UniformScaling) = isreal(J.λ) -isposdef(J::UniformScaling) = isposdef(J.λ) - -(+)(J::UniformScaling, x::Number) = J.λ + x -(+)(x::Number, J::UniformScaling) = x + J.λ -(-)(J::UniformScaling, x::Number) = J.λ - x -(-)(x::Number, J::UniformScaling) = x - J.λ - -(+)(J::UniformScaling) = UniformScaling(+J.λ) -(+)(J1::UniformScaling, J2::UniformScaling) = UniformScaling(J1.λ+J2.λ) -(+)(B::BitArray{2}, J::UniformScaling) = Array(B) + J -(+)(J::UniformScaling, B::BitArray{2}) = J + Array(B) -(+)(J::UniformScaling, A::AbstractMatrix) = A + J - -(-)(J::UniformScaling) = UniformScaling(-J.λ) -(-)(J1::UniformScaling, J2::UniformScaling) = UniformScaling(J1.λ-J2.λ) -(-)(B::BitArray{2}, J::UniformScaling) = Array(B) - J -(-)(J::UniformScaling, B::BitArray{2}) = J - Array(B) -(-)(A::AbstractMatrix, J::UniformScaling) = A + (-J) - -# matrix functions -for f in ( :exp, :log, :cis, - :expm1, :log1p, - :sqrt, :cbrt, - :sin, :cos, :tan, - :asin, :acos, :atan, - :csc, :sec, :cot, - :acsc, :asec, :acot, - :sinh, :cosh, :tanh, - :asinh, :acosh, :atanh, - :csch, :sech, :coth, - :acsch, :asech, :acoth ) - @eval Base.$f(J::UniformScaling) = UniformScaling($f(J.λ)) -end -for f in (:sincos, :sincosd) - @eval Base.$f(J::UniformScaling) = map(UniformScaling, $f(J.λ)) -end - -# Unit{Lower/Upper}Triangular matrices become {Lower/Upper}Triangular under -# addition with a UniformScaling -for (t1, t2) in ((:UnitUpperTriangular, :UpperTriangular), - (:UnitLowerTriangular, :LowerTriangular)) - @eval begin - function (+)(UL::$t1, J::UniformScaling) - ULnew = copymutable_oftype(UL.data, Base.promote_op(+, eltype(UL), typeof(J))) - for i in axes(ULnew, 1) - ULnew[i,i] = one(ULnew[i,i]) + J - end - return ($t2)(ULnew) - end - end -end - -# Adding a complex UniformScaling to the diagonal of a Hermitian -# matrix breaks the hermiticity, if the UniformScaling is non-real. -# However, to preserve type stability, we do not special-case a -# UniformScaling{<:Complex} that happens to be real. -function (+)(A::Hermitian, J::UniformScaling{<:Complex}) - TS = Base.promote_op(+, eltype(A), typeof(J)) - B = copytri!(copymutable_oftype(parent(A), TS), A.uplo, true) - for i in diagind(B, IndexStyle(B)) - B[i] = A[i] + J - end - return B -end - -function (-)(J::UniformScaling{<:Complex}, A::Hermitian) - TS = Base.promote_op(+, eltype(A), typeof(J)) - B = copytri!(copymutable_oftype(parent(A), TS), A.uplo, true) - B .= .-B - for i in diagind(B, IndexStyle(B)) - B[i] = J - A[i] - end - return B -end - -function (+)(A::AbstractMatrix, J::UniformScaling) - checksquare(A) - B = copymutable_oftype(A, Base.promote_op(+, eltype(A), typeof(J))) - for i in intersect(axes(A,1), axes(A,2)) - @inbounds B[i,i] += J - end - return B -end - -function (-)(J::UniformScaling, A::AbstractMatrix) - checksquare(A) - B = convert(AbstractMatrix{Base.promote_op(+, eltype(A), typeof(J))}, -A) - for i in intersect(axes(A,1), axes(A,2)) - @inbounds B[i,i] += J - end - return B -end - -inv(J::UniformScaling) = UniformScaling(inv(J.λ)) -opnorm(J::UniformScaling, p::Real=2) = opnorm(J.λ, p) - -pinv(J::UniformScaling) = ifelse(iszero(J.λ), - UniformScaling(zero(inv(J.λ))), # type stability - UniformScaling(inv(J.λ))) - -function det(J::UniformScaling{T}) where T - if isone(J.λ) - one(T) - elseif iszero(J.λ) - zero(T) - else - throw(ArgumentError("Determinant of UniformScaling is only well-defined when λ = 0 or 1.")) - end -end - -function tr(J::UniformScaling{T}) where T - if iszero(J.λ) - zero(T) - else - throw(ArgumentError("Trace of UniformScaling is only well-defined when λ = 0")) - end -end - -*(J1::UniformScaling, J2::UniformScaling) = UniformScaling(J1.λ*J2.λ) -*(B::BitArray{2}, J::UniformScaling) = *(Array(B), J::UniformScaling) -*(J::UniformScaling, B::BitArray{2}) = *(J::UniformScaling, Array(B)) -*(A::AbstractMatrix, J::UniformScaling) = A*J.λ -*(v::AbstractVector, J::UniformScaling) = reshape(v, length(v), 1) * J -*(J::UniformScaling, A::AbstractVecOrMat) = J.λ*A -*(x::Number, J::UniformScaling) = UniformScaling(x*J.λ) -*(J::UniformScaling, x::Number) = UniformScaling(J.λ*x) - -/(J1::UniformScaling, J2::UniformScaling) = J2.λ == 0 ? throw(SingularException(1)) : UniformScaling(J1.λ/J2.λ) -/(J::UniformScaling, A::AbstractMatrix) = - (invA = inv(A); lmul!(J.λ, convert(AbstractMatrix{promote_type(eltype(J),eltype(invA))}, invA))) -/(A::AbstractMatrix, J::UniformScaling) = J.λ == 0 ? throw(SingularException(1)) : A/J.λ -/(v::AbstractVector, J::UniformScaling) = reshape(v, length(v), 1) / J - -/(J::UniformScaling, x::Number) = UniformScaling(J.λ/x) -//(J::UniformScaling, x::Number) = UniformScaling(J.λ//x) - -\(J1::UniformScaling, J2::UniformScaling) = J1.λ == 0 ? throw(SingularException(1)) : UniformScaling(J1.λ\J2.λ) -\(J::UniformScaling, A::AbstractVecOrMat) = J.λ == 0 ? throw(SingularException(1)) : J.λ\A -\(A::AbstractMatrix, J::UniformScaling) = - (invA = inv(A); rmul!(convert(AbstractMatrix{promote_type(eltype(invA),eltype(J))}, invA), J.λ)) -\(F::Factorization, J::UniformScaling) = F \ J(size(F,1)) - -\(x::Number, J::UniformScaling) = UniformScaling(x\J.λ) - -@inline mul!(C::AbstractMatrix, A::AbstractMatrix, J::UniformScaling, alpha::Number, beta::Number) = - mul!(C, A, J.λ, alpha, beta) -@inline mul!(C::AbstractVecOrMat, J::UniformScaling, B::AbstractVecOrMat, alpha::Number, beta::Number) = - mul!(C, J.λ, B, alpha, beta) - -function mul!(out::AbstractMatrix{T}, a::Number, B::UniformScaling, α::Number, β::Number) where {T} - checksquare(out) - if iszero(β) # zero contribution of the out matrix - fill!(out, zero(T)) - elseif !isone(β) - rmul!(out, β) - end - s = convert(T, a*B.λ*α) - if !iszero(s) - @inbounds for i in diagind(out, IndexStyle(out)) - out[i] += s - end - end - return out -end -@inline mul!(out::AbstractMatrix, A::UniformScaling, b::Number, α::Number, β::Number)= - mul!(out, A.λ, UniformScaling(b), α, β) -rmul!(A::AbstractMatrix, J::UniformScaling) = rmul!(A, J.λ) -lmul!(J::UniformScaling, B::AbstractVecOrMat) = lmul!(J.λ, B) -rdiv!(A::AbstractMatrix, J::UniformScaling) = rdiv!(A, J.λ) -ldiv!(J::UniformScaling, B::AbstractVecOrMat) = ldiv!(J.λ, B) -ldiv!(Y::AbstractVecOrMat, J::UniformScaling, B::AbstractVecOrMat) = (Y .= J.λ .\ B) - -Broadcast.broadcasted(::typeof(*), x::Number,J::UniformScaling) = UniformScaling(x*J.λ) -Broadcast.broadcasted(::typeof(*), J::UniformScaling,x::Number) = UniformScaling(J.λ*x) - -Broadcast.broadcasted(::typeof(/), J::UniformScaling,x::Number) = UniformScaling(J.λ/x) - -Broadcast.broadcasted(::typeof(\), x::Number,J::UniformScaling) = UniformScaling(x\J.λ) - -(^)(J::UniformScaling, x::Number) = UniformScaling((J.λ)^x) -Base.literal_pow(::typeof(^), J::UniformScaling, x::Val) = UniformScaling(Base.literal_pow(^, J.λ, x)) - -Broadcast.broadcasted(::typeof(^), J::UniformScaling, x::Number) = UniformScaling(J.λ^x) -function Broadcast.broadcasted(::typeof(Base.literal_pow), ::typeof(^), J::UniformScaling, x::Val) - UniformScaling(Base.literal_pow(^, J.λ, x)) -end - -==(J1::UniformScaling,J2::UniformScaling) = (J1.λ == J2.λ) - -## equality comparison with UniformScaling -==(J::UniformScaling, A::AbstractMatrix) = A == J -function ==(A::AbstractMatrix, J::UniformScaling) - require_one_based_indexing(A) - size(A, 1) == size(A, 2) || return false - iszero(J.λ) && return iszero(A) - isone(J.λ) && return isone(A) - return A == J.λ*one(A) -end -function ==(A::StridedMatrix, J::UniformScaling) - size(A, 1) == size(A, 2) || return false - iszero(J.λ) && return iszero(A) - isone(J.λ) && return isone(A) - for j in axes(A, 2), i in axes(A, 1) - ifelse(i == j, A[i, j] == J.λ, iszero(A[i, j])) || return false - end - return true -end - -isequal(A::AbstractMatrix, J::UniformScaling) = false -isequal(J::UniformScaling, A::AbstractMatrix) = false - -function isapprox(J1::UniformScaling{T}, J2::UniformScaling{S}; - atol::Real=0, rtol::Real=Base.rtoldefault(T,S,atol), nans::Bool=false) where {T<:Number,S<:Number} - isapprox(J1.λ, J2.λ, rtol=rtol, atol=atol, nans=nans) -end -function isapprox(J::UniformScaling, A::AbstractMatrix; - atol::Real = 0, - rtol::Real = Base.rtoldefault(promote_leaf_eltypes(A), eltype(J), atol), - nans::Bool = false, norm::Function = norm) - n = checksquare(A) - normJ = norm === opnorm ? abs(J.λ) : - norm === LinearAlgebra.norm ? abs(J.λ) * sqrt(n) : - norm(Diagonal(fill(J.λ, n))) - return norm(A - J) <= max(atol, rtol * max(norm(A), normJ)) -end -isapprox(A::AbstractMatrix, J::UniformScaling; kwargs...) = isapprox(J, A; kwargs...) - -""" - copyto!(dest::AbstractMatrix, src::UniformScaling) - -Copies a [`UniformScaling`](@ref) onto a matrix. - -!!! compat "Julia 1.1" - In Julia 1.0 this method only supported a square destination matrix. Julia 1.1. added - support for a rectangular matrix. -""" -function copyto!(A::AbstractMatrix, J::UniformScaling) - require_one_based_indexing(A) - fill!(A, 0) - λ = J.λ - for i = 1:min(size(A,1),size(A,2)) - @inbounds A[i,i] = λ - end - return A -end - -function copyto!(A::Diagonal, J::UniformScaling) - A.diag .= J.λ - return A -end -function copyto!(A::Union{Bidiagonal, SymTridiagonal}, J::UniformScaling) - A.ev .= 0 - A.dv .= J.λ - return A -end -function copyto!(A::Tridiagonal, J::UniformScaling) - A.dl .= 0 - A.du .= 0 - A.d .= J.λ - return A -end - -""" - copy!(dest::AbstractMatrix, src::UniformScaling) - -Copies a [`UniformScaling`](@ref) onto a matrix. - -!!! compat "Julia 1.12" - This method is available as of Julia 1.12. -""" -Base.copy!(A::AbstractMatrix, J::UniformScaling) = copyto!(A, J) - -function cond(J::UniformScaling{T}) where T - onereal = inv(one(real(J.λ))) - return J.λ ≠ zero(T) ? onereal : oftype(onereal, Inf) -end - -## Matrix construction from UniformScaling -function Matrix{T}(s::UniformScaling, dims::Dims{2}) where {T} - A = zeros(T, dims) - v = T(s.λ) - for i in diagind(dims...) - @inbounds A[i] = v - end - return A -end -Matrix{T}(s::UniformScaling, m::Integer, n::Integer) where {T} = Matrix{T}(s, Dims((m, n))) -Matrix(s::UniformScaling, m::Integer, n::Integer) = Matrix(s, Dims((m, n))) -Matrix(s::UniformScaling, dims::Dims{2}) = Matrix{eltype(s)}(s, dims) -Array{T}(s::UniformScaling, dims::Dims{2}) where {T} = Matrix{T}(s, dims) -Array{T}(s::UniformScaling, m::Integer, n::Integer) where {T} = Matrix{T}(s, m, n) -Array(s::UniformScaling, m::Integer, n::Integer) = Matrix(s, m, n) -Array(s::UniformScaling, dims::Dims{2}) = Matrix(s, dims) - -dot(A::AbstractMatrix, J::UniformScaling) = dot(tr(A), J.λ) -dot(J::UniformScaling, A::AbstractMatrix) = dot(J.λ, tr(A)) - -dot(x::AbstractVector, J::UniformScaling, y::AbstractVector) = dot(x, J.λ, y) -dot(x::AbstractVector, a::Number, y::AbstractVector) = sum(t -> dot(t[1], a, t[2]), zip(x, y)) -dot(x::AbstractVector, a::Union{Real,Complex}, y::AbstractVector) = a*dot(x, y) - -# muladd -Base.muladd(A::UniformScaling, B::UniformScaling, z::UniformScaling) = - UniformScaling(A.λ * B.λ + z.λ) diff --git a/stdlib/LinearAlgebra/test/abstractq.jl b/stdlib/LinearAlgebra/test/abstractq.jl deleted file mode 100644 index 5bfd62b467718..0000000000000 --- a/stdlib/LinearAlgebra/test/abstractq.jl +++ /dev/null @@ -1,156 +0,0 @@ -# This file is a part of Julia. License is MIT: https://julialang.org/license - -module TestAbstractQ - -using Test -using LinearAlgebra -using LinearAlgebra: AbstractQ, AdjointQ -import LinearAlgebra: lmul!, rmul! -import Base: size, convert - -n = 5 - -@testset "custom AbstractQ type" begin - struct MyQ{T,S<:AbstractQ{T}} <: AbstractQ{T} - Q::S - end - MyQ{T}(Q::AbstractQ) where {T} = (P = convert(AbstractQ{T}, Q); MyQ{T,typeof(P)}(P)) - MyQ(Q::MyQ) = Q - - Base.size(Q::MyQ) = size(Q.Q) - LinearAlgebra.lmul!(Q::MyQ, B::AbstractVecOrMat) = lmul!(Q.Q, B) - LinearAlgebra.lmul!(adjQ::AdjointQ{<:Any,<:MyQ}, B::AbstractVecOrMat) = lmul!(parent(adjQ).Q', B) - LinearAlgebra.rmul!(A::AbstractVecOrMat, Q::MyQ) = rmul!(A, Q.Q) - LinearAlgebra.rmul!(A::AbstractVecOrMat, adjQ::AdjointQ{<:Any,<:MyQ}) = rmul!(A, parent(adjQ).Q') - Base.convert(::Type{AbstractQ{T}}, Q::MyQ) where {T} = MyQ{T}(Q.Q) - LinearAlgebra.det(Q::MyQ) = det(Q.Q) - - for T in (Float64, ComplexF64) - A = rand(T, n, n) - F = qr(A) - Q = MyQ(F.Q) - @test ndims(Q) == 2 - T <: Real && @test transpose(Q) == adjoint(Q) - T <: Complex && @test_throws ErrorException transpose(Q) - @test convert(AbstractQ{complex(T)}, Q) isa MyQ{complex(T)} - @test convert(AbstractQ{complex(T)}, Q') isa AdjointQ{<:complex(T),<:MyQ{complex(T)}} - @test *(Q) == Q - @test Q*I ≈ Q.Q*I rtol=2eps(real(T)) - @test Q'*I ≈ Q.Q'*I rtol=2eps(real(T)) - @test I*Q ≈ Q.Q*I rtol=2eps(real(T)) - @test I*Q' ≈ I*Q.Q' rtol=2eps(real(T)) - @test Q^3 ≈ Q*Q*Q - @test Q^2 ≈ Q*Q - @test Q^1 == Q - @test Q^(-1) == Q' - @test (Q')^(-1) == Q - @test (Q')^2 ≈ Q'*Q' - @test abs(det(Q)) ≈ 1 - @test logabsdet(Q)[1] ≈ 0 atol=2n*eps(real(T)) - y = rand(T, n) - @test Q * y ≈ Q.Q * y ≈ Q' \ y ≈ ldiv!(Q', copy(y)) ≈ ldiv!(zero(y), Q', y) - @test Q'y ≈ Q.Q' * y ≈ Q \ y ≈ ldiv!(Q, copy(y)) ≈ ldiv!(zero(y), Q, y) - @test y'Q ≈ y'Q.Q ≈ y' / Q' - @test y'Q' ≈ y'Q.Q' ≈ y' / Q - y = Matrix(y') - @test y*Q ≈ y*Q.Q ≈ y / Q' ≈ rdiv!(copy(y), Q') - @test y*Q' ≈ y*Q.Q' ≈ y / Q ≈ rdiv!(copy(y), Q) - Y = rand(T, n, n); X = similar(Y) - for transQ in (identity, adjoint), transY in (identity, adjoint), Y in (Y, Y') - @test mul!(X, transQ(Q), transY(Y)) ≈ transQ(Q) * transY(Y) ≈ transQ(Q.Q) * transY(Y) - @test mul!(X, transY(Y), transQ(Q)) ≈ transY(Y) * transQ(Q) ≈ transY(Y) * transQ(Q.Q) - end - @test convert(Matrix, Q) ≈ Matrix(Q) ≈ Q[:,:] ≈ copyto!(zeros(T, size(Q)), Q) ≈ Q.Q*I - @test convert(Matrix, Q') ≈ Matrix(Q') ≈ (Q')[:,:] ≈ copyto!(zeros(T, size(Q)), Q') ≈ Q.Q'*I - @test Q[1,:] == Q.Q[1,:] == view(Q, 1, :) - @test Q[:,1] == Q.Q[:,1] == view(Q, :, 1) - @test Q[1,1] == Q.Q[1,1] - @test Q[:] == Q.Q[:] - @test Q[:,1:3] == Q.Q[:,1:3] == view(Q, :, 1:3) - @test Q[:,1:3] ≈ Matrix(Q)[:,1:3] - @test Q[2:3,2:3] == view(Q, 2:3, 2:3) ≈ Matrix(Q)[2:3,2:3] - @test_throws BoundsError Q[0,1] - @test_throws BoundsError Q[n+1,1] - @test_throws BoundsError Q[1,0] - @test_throws BoundsError Q[1,n+1] - @test_throws BoundsError Q[:,1:n+1] - @test_throws BoundsError Q[:,0:n] - for perm in ((1, 2), (2, 1)) - P = PermutedDimsArray(zeros(T, size(Q)), perm) - @test copyto!(P, Q) ≈ Matrix(Q) - end - x = randn(T) - @test x * Q ≈ (x*I)*Q ≈ x * Q.Q - @test Q * x ≈ Q*(x*I) ≈ Q.Q * x - @test x * Q' ≈ (x*I)* Q' ≈ x * Q.Q' - @test Q' * x ≈ Q'*(x*I) ≈ Q.Q' * x - x = rand(T, 1) - Q = MyQ(qr(rand(T, 1, 1)).Q) - @test x * Q ≈ x * Q.Q - @test x * Q' ≈ x * Q.Q' - @test Q * x ≈ Q.Q * x - @test Q' * x ≈ Q.Q' * x - end - A = randn(Float64, 5, 3) - F = qr(A) - Q = MyQ(F.Q) - Prect = Matrix(F.Q) - Psquare = collect(F.Q) - @test Q == Prect - @test Q == Psquare - @test Q == F.Q*I - @test Q ≈ Prect - @test Q ≈ Psquare - @test Q ≈ F.Q*I - - @testset "similar" begin - QS = similar(Q) - @test QS isa Matrix{eltype(Q)} - @test size(QS) == size(Q) - - QS = similar(Q, Int8) - @test QS isa Matrix{Int8} - @test size(QS) == size(Q) - - QS = similar(Q, 1) - @test QS isa Vector{eltype(Q)} - @test size(QS) == (1,) - - QS = similar(Q, Int8, 2) - @test QS isa Vector{Int8} - @test size(QS) == (2,) - - QS = similar(Q, Int8, ()) - @test QS isa Array{Int8,0} - - QS = similar(Q, ()) - @test QS isa Array{eltype(Q),0} - end - - # matrix division - q, r = F - R = randn(Float64, 5, 5) - @test q / r ≈ Matrix(q) / r - @test_throws DimensionMismatch MyQ(q) / r # doesn't have size flexibility - @test q / R ≈ collect(q) / R - @test copy(r') \ q' ≈ (q / r)' - @test_throws DimensionMismatch copy(r') \ MyQ(q') - @test r \ q' ≈ r \ Matrix(q)' - @test R \ q' ≈ R \ MyQ(q') ≈ R \ collect(q') - @test R \ q ≈ R \ MyQ(q) ≈ R \ collect(q) - B = copy(A') - G = lq(B) - l, q = G - L = R - @test l \ q ≈ l \ Matrix(q) - @test_throws DimensionMismatch l \ MyQ(q) - @test L \ q ≈ L \ collect(q) - @test q' / copy(l') ≈ (l \ q)' - @test_throws DimensionMismatch MyQ(q') / copy(l') - @test q' / l ≈ Matrix(q)' / l - @test q' / L ≈ MyQ(q') / L ≈ collect(q)' / L - @test q / L ≈ Matrix(q) / L - @test MyQ(q) / L ≈ collect(q) / L -end - -end # module diff --git a/stdlib/LinearAlgebra/test/addmul.jl b/stdlib/LinearAlgebra/test/addmul.jl deleted file mode 100644 index 903e3b17f0ef1..0000000000000 --- a/stdlib/LinearAlgebra/test/addmul.jl +++ /dev/null @@ -1,273 +0,0 @@ -# This file is a part of Julia. License is MIT: https://julialang.org/license - -module TestAddmul - -using Base: rtoldefault -using Test -using LinearAlgebra -using LinearAlgebra: AbstractTriangular -using Random - -_rand(::Type{T}) where {T <: AbstractFloat} = T(randn()) -_rand(::Type{T}) where {F, T <: Complex{F}} = T(_rand(F), _rand(F)) -_rand(::Type{T}) where {T <: Integer} = - T(rand(max(typemin(T), -10):min(typemax(T), 10))) -_rand(::Type{BigInt}) = BigInt(_rand(Int)) - -function _rand(A::Type{<:Array}, shape) - T = eltype(A) - data = T[_rand(T) for _ in 1:prod(shape)] - return copy(reshape(data, shape)) -end - -constructor_of(::Type{T}) where T = getfield(parentmodule(T), nameof(T)) - -function _rand(A::Type{<: AbstractArray}, shape) - data = _rand(Array{eltype(A)}, shape) - T = constructor_of(A) - if A <: Union{Bidiagonal, Hermitian, Symmetric} - return T(data, rand([:U, :L])) - # Maybe test with both :U and :L? - end - return T(data) -end - -_rand(A::Type{<: SymTridiagonal{T}}, shape) where {T} = - SymTridiagonal(_rand(Symmetric{T}, shape)) - -const FloatOrC = Union{AbstractFloat, Complex{<: AbstractFloat}} -const IntegerOrC = Union{Integer, Complex{<: Integer}} -const LTri = Union{LowerTriangular, UnitLowerTriangular, Diagonal} -const UTri = Union{UpperTriangular, UnitUpperTriangular, Diagonal} - -needsquare(::Type{<:Matrix}) = false -needsquare(::Type) = true - -testdata = [] - -sizecandidates = 1:4 -floattypes = [ - Float64, Float32, ComplexF64, ComplexF32, # BlasFloat - BigFloat, -] -inttypes = [ - Int, - BigInt, -] -# `Bool` can be added to `inttypes` but it's hard to handle -# `InexactError` bug that is mentioned in: -# https://github.com/JuliaLang/julia/issues/30094#issuecomment-440175887 -alleltypes = [floattypes; inttypes] -celtypes = [Float64, ComplexF64, BigFloat, Int] - -mattypes = [ - Matrix, - Bidiagonal, - Diagonal, - Hermitian, - LowerTriangular, - SymTridiagonal, - Symmetric, - Tridiagonal, - UnitLowerTriangular, - UnitUpperTriangular, - UpperTriangular, -] - -isnanfillable(::AbstractArray) = false -isnanfillable(::Array{<:AbstractFloat}) = true -isnanfillable(A::AbstractArray{<:AbstractFloat}) = parent(A) isa Array - -""" -Sample `n` elements from `S` on average but make sure at least one -element is sampled. -""" -function sample(S, n::Real) - length(S) <= n && return S - xs = randsubseq(S, n / length(S)) - return length(xs) > 0 ? xs : rand(S, 1) # sample at least one -end - -function inputeltypes(celt, alleltypes = alleltypes) - # Skip if destination type is "too small" - celt <: Bool && return [] - filter(alleltypes) do aelt - celt <: Real && aelt <: Complex && return false - !(celt <: BigFloat) && aelt <: BigFloat && return false - !(celt <: BigInt) && aelt <: BigInt && return false - celt <: IntegerOrC && aelt <: FloatOrC && return false - if celt <: IntegerOrC && !(celt <: BigInt) - typemin(celt) > typemin(aelt) && return false - typemax(celt) < typemax(aelt) && return false - end - return true - end -end -# Note: using `randsubseq` instead of `rand` to avoid repetition. - -function inputmattypes(cmat, mattypes = mattypes) - # Skip if destination type is "too small" - cmat <: Union{Bidiagonal, Tridiagonal, SymTridiagonal, - UnitLowerTriangular, UnitUpperTriangular, - Hermitian, Symmetric} && return [] - filter(mattypes) do amat - cmat <: Diagonal && (amat <: Diagonal || return false) - cmat <: LowerTriangular && (amat <: LTri || return false) - cmat <: UpperTriangular && (amat <: UTri || return false) - return true - end -end - -n_samples = 1.5 -# n_samples = Inf # to try all combinations -for cmat in mattypes, - amat in sample(inputmattypes(cmat), n_samples), - bmat in sample(inputmattypes(cmat), n_samples), - celt in celtypes, - aelt in sample(inputeltypes(celt), n_samples), - belt in sample(inputeltypes(celt), n_samples) - - push!(testdata, (cmat{celt}, amat{aelt}, bmat{belt})) -end - -strongzero(α) = iszero(α) ? false : α -function compare_matmul(C, A, B, α, β, - rtol = max(rtoldefault.(real.(eltype.((C, A, B))))..., - rtoldefault.(real.(typeof.((α, β))))...); - Ac = collect(A), Bc = collect(B), Cc = collect(C)) - @testset let A=A, B=B, C=C, α=α, β=β - Ccopy = copy(C) - returned_mat = mul!(Ccopy, A, B, α, β) - @test returned_mat === Ccopy - atol = max(maximum(eps∘real∘float∘eltype, (C,A,B)), - maximum(eps∘real∘float∘typeof, (α,β))) - exp_val = Ac * Bc * strongzero(α) + Cc * strongzero(β) - @test collect(returned_mat) ≈ exp_val rtol=rtol atol=atol - rtol_match = isapprox(collect(returned_mat), exp_val, rtol=rtol) - if !(rtol_match || β isa Bool || isapprox(β, 0, atol=eps(typeof(β)))) - negβ = -β - returned_mat = mul!(copy(C), A, B, α, negβ) - exp_val = Ac * Bc * strongzero(α) + Cc * negβ - @test collect(returned_mat) ≈ exp_val rtol=rtol atol=atol - end - end -end - -@testset "mul!(::$TC, ::$TA, ::$TB, α, β)" for (TC, TA, TB) in testdata - if needsquare(TA) - na1 = na2 = rand(sizecandidates) - else - na1, na2 = rand(sizecandidates, 2) - end - if needsquare(TB) - nb2 = na2 - elseif needsquare(TC) - nb2 = na1 - else - nb2 = rand(sizecandidates) - end - asize = (na1, na2) - bsize = (na2, nb2) - csize = (na1, nb2) - - C = _rand(TC, csize) - A = _rand(TA, asize) - B = _rand(TB, bsize) - Cc = Matrix(C) - Ac = Matrix(A) - Bc = Matrix(B) - - @testset for α in Any[true, eltype(TC)(1), _rand(eltype(TC))], - β in Any[false, eltype(TC)(0), _rand(eltype(TC))] - - - # This is similar to how `isapprox` choose `rtol` (when - # `atol=0`) but consider all number types involved: - rtol = max(rtoldefault.(real.(eltype.((C, A, B))))..., - rtoldefault.(real.(typeof.((α, β))))...) - - compare_matmul(C, A, B, α, β, rtol; Ac, Bc, Cc) - - y = C[:, 1] - x = B[:, 1] - yc = Vector(y) - xc = Vector(x) - compare_matmul(y, A, x, α, β, rtol; Ac, Bc=xc, Cc=yc) - - if TC <: Matrix - @testset "adjoint and transpose" begin - @testset for fa in [identity, adjoint, transpose], - fb in [identity, adjoint, transpose] - fa === fb === identity && continue - - Af = fa === identity ? A : fa(_rand(TA, reverse(asize))) - Bf = fb === identity ? B : fb(_rand(TB, reverse(bsize))) - - compare_matmul(C, Af, Bf, α, β, rtol) - end - end - end - - if isnanfillable(C) - @testset "β = 0 ignores C .= NaN" begin - Ccopy = copy(C) - parent(Ccopy) .= NaN - compare_matmul(Ccopy, A, B, α, zero(eltype(C)), rtol; Ac, Bc, Cc) - end - end - - if isnanfillable(A) - @testset "α = 0 ignores A .= NaN" begin - Acopy = copy(A) - parent(Acopy) .= NaN - compare_matmul(C, Acopy, B, zero(eltype(A)), β, rtol; Ac, Bc, Cc) - end - end - end -end - -@testset "issue #55727" begin - C = zeros(1,1) - @testset "$(nameof(typeof(A)))" for A in Any[Diagonal([NaN]), - Bidiagonal([NaN], Float64[], :U), - Bidiagonal([NaN], Float64[], :L), - SymTridiagonal([NaN], Float64[]), - Tridiagonal(Float64[], [NaN], Float64[]), - ] - @testset "$(nameof(typeof(B)))" for B in Any[ - Diagonal([1.0]), - Bidiagonal([1.0], Float64[], :U), - Bidiagonal([1.0], Float64[], :L), - SymTridiagonal([1.0], Float64[]), - Tridiagonal(Float64[], [1.0], Float64[]), - ] - C .= 0 - @test mul!(C, A, B, 0.0, false)[] === 0.0 - @test mul!(C, B, A, 0.0, false)[] === 0.0 - end - end -end - -@testset "Diagonal scaling of a triangular matrix with a non-triangular destination" begin - for MT in (UpperTriangular, UnitUpperTriangular, LowerTriangular, UnitLowerTriangular) - U = MT(reshape([1:9;],3,3)) - M = Array(U) - D = Diagonal(1:3) - A = reshape([1:9;],3,3) - @test mul!(copy(A), U, D, 2, 3) == M * D * 2 + A * 3 - @test mul!(copy(A), D, U, 2, 3) == D * M * 2 + A * 3 - - # nan values with iszero(alpha) - D = Diagonal(fill(NaN,3)) - @test mul!(copy(A), U, D, 0, 3) == A * 3 - @test mul!(copy(A), D, U, 0, 3) == A * 3 - - # nan values with iszero(beta) - A = fill(NaN,3,3) - D = Diagonal(1:3) - @test mul!(copy(A), U, D, 2, 0) == M * D * 2 - @test mul!(copy(A), D, U, 2, 0) == D * M * 2 - end -end - -end # module diff --git a/stdlib/LinearAlgebra/test/adjtrans.jl b/stdlib/LinearAlgebra/test/adjtrans.jl deleted file mode 100644 index 6cf2ff9ada09c..0000000000000 --- a/stdlib/LinearAlgebra/test/adjtrans.jl +++ /dev/null @@ -1,721 +0,0 @@ -# This file is a part of Julia. License is MIT: https://julialang.org/license - -module TestAdjointTranspose - -using Test, LinearAlgebra - -const BASE_TEST_PATH = joinpath(Sys.BINDIR, "..", "share", "julia", "test") - -isdefined(Main, :OffsetArrays) || @eval Main include(joinpath($(BASE_TEST_PATH), "testhelpers", "OffsetArrays.jl")) -using .Main.OffsetArrays - -@testset "Adjoint and Transpose inner constructor basics" begin - intvec, intmat = [1, 2], [1 2; 3 4] - # Adjoint/Transpose eltype must match the type of the Adjoint/Transpose of the input eltype - @test_throws TypeError Adjoint{Float64,Vector{Int}}(intvec)[1,1] - @test_throws TypeError Adjoint{Float64,Matrix{Int}}(intmat)[1,1] - @test_throws TypeError Transpose{Float64,Vector{Int}}(intvec)[1,1] - @test_throws TypeError Transpose{Float64,Matrix{Int}}(intmat)[1,1] - # Adjoint/Transpose wrapped array type must match the input array type - @test_throws TypeError Adjoint{Int,Vector{Float64}}(intvec)[1,1] - @test_throws TypeError Adjoint{Int,Matrix{Float64}}(intmat)[1,1] - @test_throws TypeError Transpose{Int,Vector{Float64}}(intvec)[1,1] - @test_throws TypeError Transpose{Int,Matrix{Float64}}(intmat)[1,1] - # Adjoint/Transpose inner constructor basic functionality, concrete scalar eltype - @test (Adjoint{Int,Vector{Int}}(intvec)::Adjoint{Int,Vector{Int}}).parent === intvec - @test (Adjoint{Int,Matrix{Int}}(intmat)::Adjoint{Int,Matrix{Int}}).parent === intmat - @test (Transpose{Int,Vector{Int}}(intvec)::Transpose{Int,Vector{Int}}).parent === intvec - @test (Transpose{Int,Matrix{Int}}(intmat)::Transpose{Int,Matrix{Int}}).parent === intmat - # Adjoint/Transpose inner constructor basic functionality, abstract scalar eltype - anyvec, anymat = Any[1, 2], Any[1 2; 3 4] - @test (Adjoint{Any,Vector{Any}}(anyvec)::Adjoint{Any,Vector{Any}}).parent === anyvec - @test (Adjoint{Any,Matrix{Any}}(anymat)::Adjoint{Any,Matrix{Any}}).parent === anymat - @test (Transpose{Any,Vector{Any}}(anyvec)::Transpose{Any,Vector{Any}}).parent === anyvec - @test (Transpose{Any,Matrix{Any}}(anymat)::Transpose{Any,Matrix{Any}}).parent === anymat - # Adjoint/Transpose inner constructor basic functionality, concrete array eltype - intvecvec = [[1, 2], [3, 4]] - intmatmat = [[[1 2]] [[3 4]] [[5 6]]; [[7 8]] [[9 10]] [[11 12]]] - @test (X = Adjoint{Adjoint{Int,Vector{Int}},Vector{Vector{Int}}}(intvecvec); - isa(X, Adjoint{Adjoint{Int,Vector{Int}},Vector{Vector{Int}}}) && X.parent === intvecvec) - @test (X = Adjoint{Adjoint{Int,Matrix{Int}},Matrix{Matrix{Int}}}(intmatmat); - isa(X, Adjoint{Adjoint{Int,Matrix{Int}},Matrix{Matrix{Int}}}) && X.parent === intmatmat) - @test (X = Transpose{Transpose{Int,Vector{Int}},Vector{Vector{Int}}}(intvecvec); - isa(X, Transpose{Transpose{Int,Vector{Int}},Vector{Vector{Int}}}) && X.parent === intvecvec) - @test (X = Transpose{Transpose{Int,Matrix{Int}},Matrix{Matrix{Int}}}(intmatmat); - isa(X, Transpose{Transpose{Int,Matrix{Int}},Matrix{Matrix{Int}}}) && X.parent === intmatmat) -end - -@testset "Adjoint and Transpose outer constructor basics" begin - intvec, intmat = [1, 2], [1 2; 3 4] - # the wrapped array's eltype strictly determines the Adjoint/Transpose eltype - # so Adjoint{T}/Transpose{T} constructors are somewhat unnecessary and error-prone - # so ascertain that such calls throw whether or not T and the input eltype are compatible - @test_throws MethodError Adjoint{Int}(intvec) - @test_throws MethodError Adjoint{Int}(intmat) - @test_throws MethodError Adjoint{Float64}(intvec) - @test_throws MethodError Adjoint{Float64}(intmat) - @test_throws MethodError Transpose{Int}(intvec) - @test_throws MethodError Transpose{Int}(intmat) - @test_throws MethodError Transpose{Float64}(intvec) - @test_throws MethodError Transpose{Float64}(intmat) - # Adjoint/Transpose outer constructor basic functionality, concrete scalar eltype - @test (Adjoint(intvec)::Adjoint{Int,Vector{Int}}).parent === intvec - @test (Adjoint(intmat)::Adjoint{Int,Matrix{Int}}).parent === intmat - @test (Transpose(intvec)::Transpose{Int,Vector{Int}}).parent === intvec - @test (Transpose(intmat)::Transpose{Int,Matrix{Int}}).parent === intmat - # the tests for the inner constructors exercise abstract scalar and concrete array eltype, forgoing here -end - -@testset "Adjoint and Transpose add additional layers to already-wrapped objects" begin - intvec, intmat = [1, 2], [1 2; 3 4] - @test (A = Adjoint(Adjoint(intvec))::Adjoint{Int,Adjoint{Int,Vector{Int}}}; A.parent.parent === intvec) - @test (A = Adjoint(Adjoint(intmat))::Adjoint{Int,Adjoint{Int,Matrix{Int}}}; A.parent.parent === intmat) - @test (A = Transpose(Transpose(intvec))::Transpose{Int,Transpose{Int,Vector{Int}}}; A.parent.parent === intvec) - @test (A = Transpose(Transpose(intmat))::Transpose{Int,Transpose{Int,Matrix{Int}}}; A.parent.parent === intmat) -end - -@testset "Adjoint and Transpose basic AbstractArray functionality" begin - # vectors and matrices with real scalar eltype, and their adjoints/transposes - intvec, intmat = [1, 2], [1 2 3; 4 5 6] - tintvec, tintmat = [1 2], [1 4; 2 5; 3 6] - @testset "length methods" begin - @test length(Adjoint(intvec)) == length(intvec) - @test length(Adjoint(intmat)) == length(intmat) - @test length(Transpose(intvec)) == length(intvec) - @test length(Transpose(intmat)) == length(intmat) - end - @testset "size methods" begin - @test size(Adjoint(intvec)) == (1, length(intvec)) - @test size(Adjoint(intmat)) == reverse(size(intmat)) - @test size(Transpose(intvec)) == (1, length(intvec)) - @test size(Transpose(intmat)) == reverse(size(intmat)) - end - @testset "axes methods" begin - @test axes(Adjoint(intvec)) == (Base.OneTo(1), Base.OneTo(length(intvec))) - @test axes(Adjoint(intmat)) == reverse(axes(intmat)) - @test axes(Transpose(intvec)) == (Base.OneTo(1), Base.OneTo(length(intvec))) - @test axes(Transpose(intmat)) == reverse(axes(intmat)) - - A = OffsetArray([1,2], 2) - @test (@inferred axes(A')[2]) === axes(A,1) - @test (@inferred axes(A')[1]) === axes(A,2) - end - @testset "IndexStyle methods" begin - @test IndexStyle(Adjoint(intvec)) == IndexLinear() - @test IndexStyle(Adjoint(intmat)) == IndexCartesian() - @test IndexStyle(Transpose(intvec)) == IndexLinear() - @test IndexStyle(Transpose(intmat)) == IndexCartesian() - end - # vectors and matrices with complex scalar eltype, and their adjoints/transposes - complexintvec, complexintmat = [1im, 2im], [1im 2im 3im; 4im 5im 6im] - tcomplexintvec, tcomplexintmat = [1im 2im], [1im 4im; 2im 5im; 3im 6im] - acomplexintvec, acomplexintmat = conj.(tcomplexintvec), conj.(tcomplexintmat) - # vectors and matrices with real-vector and real-matrix eltype, and their adjoints/transposes - intvecvec = [[1, 2], [3, 4]] - tintvecvec = [[[1 2]] [[3 4]]] - intmatmat = [[[1 2]] [[3 4]] [[ 5 6]]; - [[7 8]] [[9 10]] [[11 12]]] - tintmatmat = [[hcat([1, 2])] [hcat([7, 8])]; - [hcat([3, 4])] [hcat([9, 10])]; - [hcat([5, 6])] [hcat([11, 12])]] - # vectors and matrices with complex-vector and complex-matrix eltype, and their adjoints/transposes - complexintvecvec, complexintmatmat = im .* (intvecvec, intmatmat) - tcomplexintvecvec, tcomplexintmatmat = im .* (tintvecvec, tintmatmat) - acomplexintvecvec, acomplexintmatmat = conj.(tcomplexintvecvec), conj.(tcomplexintmatmat) - @testset "getindex methods, elementary" begin - # implicitly test elementary definitions, for arrays with concrete real scalar eltype - @test Adjoint(intvec) == tintvec - @test Adjoint(intmat) == tintmat - @test Transpose(intvec) == tintvec - @test Transpose(intmat) == tintmat - # implicitly test elementary definitions, for arrays with concrete complex scalar eltype - @test Adjoint(complexintvec) == acomplexintvec - @test Adjoint(complexintmat) == acomplexintmat - @test Transpose(complexintvec) == tcomplexintvec - @test Transpose(complexintmat) == tcomplexintmat - # implicitly test elementary definitions, for arrays with concrete real-array eltype - @test Adjoint(intvecvec) == tintvecvec - @test Adjoint(intmatmat) == tintmatmat - @test Transpose(intvecvec) == tintvecvec - @test Transpose(intmatmat) == tintmatmat - # implicitly test elementary definitions, for arrays with concrete complex-array type - @test Adjoint(complexintvecvec) == acomplexintvecvec - @test Adjoint(complexintmatmat) == acomplexintmatmat - @test Transpose(complexintvecvec) == tcomplexintvecvec - @test Transpose(complexintmatmat) == tcomplexintmatmat - end - @testset "getindex(::AdjOrTransVec, ::Colon, ::AbstractArray{Int}) methods that preserve wrapper type" begin - # for arrays with concrete scalar eltype - @test Adjoint(intvec)[:, [1, 2]] == Adjoint(intvec) - @test Transpose(intvec)[:, [1, 2]] == Transpose(intvec) - @test Adjoint(complexintvec)[:, [1, 2]] == Adjoint(complexintvec) - @test Transpose(complexintvec)[:, [1, 2]] == Transpose(complexintvec) - # for arrays with concrete array eltype - @test Adjoint(intvecvec)[:, [1, 2]] == Adjoint(intvecvec) - @test Transpose(intvecvec)[:, [1, 2]] == Transpose(intvecvec) - @test Adjoint(complexintvecvec)[:, [1, 2]] == Adjoint(complexintvecvec) - @test Transpose(complexintvecvec)[:, [1, 2]] == Transpose(complexintvecvec) - end - @testset "getindex(::AdjOrTransVec, ::Colon, ::Colon) methods that preserve wrapper type" begin - # for arrays with concrete scalar eltype - @test Adjoint(intvec)[:, :] == Adjoint(intvec) - @test Transpose(intvec)[:, :] == Transpose(intvec) - @test Adjoint(complexintvec)[:, :] == Adjoint(complexintvec) - @test Transpose(complexintvec)[:, :] == Transpose(complexintvec) - # for arrays with concrete array elype - @test Adjoint(intvecvec)[:, :] == Adjoint(intvecvec) - @test Transpose(intvecvec)[:, :] == Transpose(intvecvec) - @test Adjoint(complexintvecvec)[:, :] == Adjoint(complexintvecvec) - @test Transpose(complexintvecvec)[:, :] == Transpose(complexintvecvec) - end - @testset "getindex(::AdjOrTransVec, ::Colon, ::Int) should preserve wrapper type on result entries" begin - # for arrays with concrete scalar eltype - @test Adjoint(intvec)[:, 2] == intvec[2:2] - @test Transpose(intvec)[:, 2] == intvec[2:2] - @test Adjoint(complexintvec)[:, 2] == conj.(complexintvec[2:2]) - @test Transpose(complexintvec)[:, 2] == complexintvec[2:2] - # for arrays with concrete array eltype - @test Adjoint(intvecvec)[:, 2] == Adjoint.(intvecvec[2:2]) - @test Transpose(intvecvec)[:, 2] == Transpose.(intvecvec[2:2]) - @test Adjoint(complexintvecvec)[:, 2] == Adjoint.(complexintvecvec[2:2]) - @test Transpose(complexintvecvec)[:, 2] == Transpose.(complexintvecvec[2:2]) - end - @testset "setindex! methods" begin - # for vectors with real scalar eltype - @test (wv = Adjoint(copy(intvec)); - wv === setindex!(wv, 3, 2) && - wv == setindex!(copy(tintvec), 3, 1, 2) ) - @test (wv = Transpose(copy(intvec)); - wv === setindex!(wv, 4, 2) && - wv == setindex!(copy(tintvec), 4, 1, 2) ) - # for matrices with real scalar eltype - @test (wA = Adjoint(copy(intmat)); - wA === setindex!(wA, 7, 3, 1) && - wA == setindex!(copy(tintmat), 7, 3, 1) ) - @test (wA = Transpose(copy(intmat)); - wA === setindex!(wA, 7, 3, 1) && - wA == setindex!(copy(tintmat), 7, 3, 1) ) - # for vectors with complex scalar eltype - @test (wz = Adjoint(copy(complexintvec)); - wz === setindex!(wz, 3im, 2) && - wz == setindex!(copy(acomplexintvec), 3im, 1, 2) ) - @test (wz = Transpose(copy(complexintvec)); - wz === setindex!(wz, 4im, 2) && - wz == setindex!(copy(tcomplexintvec), 4im, 1, 2) ) - # for matrices with complex scalar eltype - @test (wZ = Adjoint(copy(complexintmat)); - wZ === setindex!(wZ, 7im, 3, 1) && - wZ == setindex!(copy(acomplexintmat), 7im, 3, 1) ) - @test (wZ = Transpose(copy(complexintmat)); - wZ === setindex!(wZ, 7im, 3, 1) && - wZ == setindex!(copy(tcomplexintmat), 7im, 3, 1) ) - # for vectors with concrete real-vector eltype - @test (wv = Adjoint(copy(intvecvec)); - wv === setindex!(wv, Adjoint([5, 6]), 2) && - wv == setindex!(copy(tintvecvec), [5 6], 2)) - @test (wv = Transpose(copy(intvecvec)); - wv === setindex!(wv, Transpose([5, 6]), 2) && - wv == setindex!(copy(tintvecvec), [5 6], 2)) - # for matrices with concrete real-matrix eltype - @test (wA = Adjoint(copy(intmatmat)); - wA === setindex!(wA, Adjoint([13 14]), 3, 1) && - wA == setindex!(copy(tintmatmat), hcat([13, 14]), 3, 1)) - @test (wA = Transpose(copy(intmatmat)); - wA === setindex!(wA, Transpose([13 14]), 3, 1) && - wA == setindex!(copy(tintmatmat), hcat([13, 14]), 3, 1)) - # for vectors with concrete complex-vector eltype - @test (wz = Adjoint(copy(complexintvecvec)); - wz === setindex!(wz, Adjoint([5im, 6im]), 2) && - wz == setindex!(copy(acomplexintvecvec), [-5im -6im], 2)) - @test (wz = Transpose(copy(complexintvecvec)); - wz === setindex!(wz, Transpose([5im, 6im]), 2) && - wz == setindex!(copy(tcomplexintvecvec), [5im 6im], 2)) - # for matrices with concrete complex-matrix eltype - @test (wZ = Adjoint(copy(complexintmatmat)); - wZ === setindex!(wZ, Adjoint([13im 14im]), 3, 1) && - wZ == setindex!(copy(acomplexintmatmat), hcat([-13im, -14im]), 3, 1)) - @test (wZ = Transpose(copy(complexintmatmat)); - wZ === setindex!(wZ, Transpose([13im 14im]), 3, 1) && - wZ == setindex!(copy(tcomplexintmatmat), hcat([13im, 14im]), 3, 1)) - end -end - -@testset "Adjoint and Transpose convert methods that convert underlying storage" begin - intvec, intmat = [1, 2], [1 2 3; 4 5 6] - @test convert(Adjoint{Float64,Vector{Float64}}, Adjoint(intvec))::Adjoint{Float64,Vector{Float64}} == Adjoint(intvec) - @test convert(Adjoint{Float64,Matrix{Float64}}, Adjoint(intmat))::Adjoint{Float64,Matrix{Float64}} == Adjoint(intmat) - @test convert(Transpose{Float64,Vector{Float64}}, Transpose(intvec))::Transpose{Float64,Vector{Float64}} == Transpose(intvec) - @test convert(Transpose{Float64,Matrix{Float64}}, Transpose(intmat))::Transpose{Float64,Matrix{Float64}} == Transpose(intmat) -end - -isdefined(Main, :ImmutableArrays) || @eval Main include(joinpath($(BASE_TEST_PATH), "testhelpers", "ImmutableArrays.jl")) -using .Main.ImmutableArrays - -@testset "Adjoint and Transpose convert methods to AbstractArray" begin - # tests corresponding to #34995 - intvec, intmat = [1, 2], [1 2 3; 4 5 6] - statvec = ImmutableArray(intvec) - statmat = ImmutableArray(intmat) - - @test convert(AbstractArray{Float64}, Adjoint(statvec))::Adjoint{Float64,ImmutableArray{Float64,1,Array{Float64,1}}} == Adjoint(statvec) - @test convert(AbstractArray{Float64}, Adjoint(statmat))::Array{Float64,2} == Adjoint(statmat) - @test convert(AbstractArray{Float64}, Transpose(statvec))::Transpose{Float64,ImmutableArray{Float64,1,Array{Float64,1}}} == Transpose(statvec) - @test convert(AbstractArray{Float64}, Transpose(statmat))::Array{Float64,2} == Transpose(statmat) - @test convert(AbstractMatrix{Float64}, Adjoint(statvec))::Adjoint{Float64,ImmutableArray{Float64,1,Array{Float64,1}}} == Adjoint(statvec) - @test convert(AbstractMatrix{Float64}, Adjoint(statmat))::Array{Float64,2} == Adjoint(statmat) - @test convert(AbstractMatrix{Float64}, Transpose(statvec))::Transpose{Float64,ImmutableArray{Float64,1,Array{Float64,1}}} == Transpose(statvec) - @test convert(AbstractMatrix{Float64}, Transpose(statmat))::Array{Float64,2} == Transpose(statmat) -end - -@testset "Adjoint and Transpose similar methods" begin - intvec, intmat = [1, 2], [1 2 3; 4 5 6] - # similar with no additional specifications, vector (rewrapping) semantics - @test size(similar(Adjoint(intvec))::Adjoint{Int,Vector{Int}}) == size(Adjoint(intvec)) - @test size(similar(Transpose(intvec))::Transpose{Int,Vector{Int}}) == size(Transpose(intvec)) - # similar with no additional specifications, matrix (no-rewrapping) semantics - @test size(similar(Adjoint(intmat))::Matrix{Int}) == size(Adjoint(intmat)) - @test size(similar(Transpose(intmat))::Matrix{Int}) == size(Transpose(intmat)) - # similar with element type specification, vector (rewrapping) semantics - @test size(similar(Adjoint(intvec), Float64)::Adjoint{Float64,Vector{Float64}}) == size(Adjoint(intvec)) - @test size(similar(Transpose(intvec), Float64)::Transpose{Float64,Vector{Float64}}) == size(Transpose(intvec)) - # similar with element type specification, matrix (no-rewrapping) semantics - @test size(similar(Adjoint(intmat), Float64)::Matrix{Float64}) == size(Adjoint(intmat)) - @test size(similar(Transpose(intmat), Float64)::Matrix{Float64}) == size(Transpose(intmat)) - # similar with element type and arbitrary dims specifications - shape = (2, 2, 2) - @test size(similar(Adjoint(intvec), Float64, shape)::Array{Float64,3}) == shape - @test size(similar(Adjoint(intmat), Float64, shape)::Array{Float64,3}) == shape - @test size(similar(Transpose(intvec), Float64, shape)::Array{Float64,3}) == shape - @test size(similar(Transpose(intmat), Float64, shape)::Array{Float64,3}) == shape -end - -@testset "Adjoint and Transpose parent methods" begin - intvec, intmat = [1, 2], [1 2 3; 4 5 6] - @test parent(Adjoint(intvec)) === intvec - @test parent(Adjoint(intmat)) === intmat - @test parent(Transpose(intvec)) === intvec - @test parent(Transpose(intmat)) === intmat -end - -@testset "Adjoint and Transpose vector vec methods" begin - intvec = [1, 2] - @test vec(Adjoint(intvec)) === intvec - @test vec(Transpose(intvec)) === intvec - cvec = [1 + 1im] - @test vec(cvec')[1] == cvec[1]' - mvec = [[1 2; 3 4+5im]]; - @test vec(transpose(mvec))[1] == transpose(mvec[1]) - @test vec(adjoint(mvec))[1] == adjoint(mvec[1]) -end - -@testset "horizontal concatenation of Adjoint/Transpose-wrapped vectors and Numbers" begin - # horizontal concatenation of Adjoint/Transpose-wrapped vectors and Numbers - # should preserve the Adjoint/Transpose-wrapper to preserve semantics downstream - vec, tvec, avec = [1im, 2im], [1im 2im], [-1im -2im] - vecvec = [[1im, 2im], [3im, 4im]] - tvecvec = [[[1im 2im]] [[3im 4im]]] - avecvec = [[[-1im -2im]] [[-3im -4im]]] - # for arrays with concrete scalar eltype - @test hcat(Adjoint(vec), Adjoint(vec))::Adjoint{Complex{Int},Vector{Complex{Int}}} == hcat(avec, avec) - @test hcat(Adjoint(vec), 1, Adjoint(vec))::Adjoint{Complex{Int},Vector{Complex{Int}}} == hcat(avec, 1, avec) - @test hcat(Transpose(vec), Transpose(vec))::Transpose{Complex{Int},Vector{Complex{Int}}} == hcat(tvec, tvec) - @test hcat(Transpose(vec), 1, Transpose(vec))::Transpose{Complex{Int},Vector{Complex{Int}}} == hcat(tvec, 1, tvec) - # for arrays with concrete array eltype - @test hcat(Adjoint(vecvec), Adjoint(vecvec))::Adjoint{Adjoint{Complex{Int},Vector{Complex{Int}}},Vector{Vector{Complex{Int}}}} == hcat(avecvec, avecvec) - @test hcat(Transpose(vecvec), Transpose(vecvec))::Transpose{Transpose{Complex{Int},Vector{Complex{Int}}},Vector{Vector{Complex{Int}}}} == hcat(tvecvec, tvecvec) -end - -@testset "map/broadcast over Adjoint/Transpose-wrapped vectors and Numbers" begin - # map and broadcast over Adjoint/Transpose-wrapped vectors and Numbers - # should preserve the Adjoint/Transpose-wrapper to preserve semantics downstream - vec, tvec, avec = [1im, 2im], [1im 2im], [-1im -2im] - vecvec = [[1im, 2im], [3im, 4im]] - tvecvec = [[[1im 2im]] [[3im 4im]]] - avecvec = [[[-1im -2im]] [[-3im -4im]]] - # unary map over wrapped vectors with concrete scalar eltype - @test map(-, Adjoint(vec))::Adjoint{Complex{Int},Vector{Complex{Int}}} == -avec - @test map(-, Transpose(vec))::Transpose{Complex{Int},Vector{Complex{Int}}} == -tvec - # unary map over wrapped vectors with concrete array eltype - @test map(-, Adjoint(vecvec))::Adjoint{Adjoint{Complex{Int},Vector{Complex{Int}}},Vector{Vector{Complex{Int}}}} == -avecvec - @test map(-, Transpose(vecvec))::Transpose{Transpose{Complex{Int},Vector{Complex{Int}}},Vector{Vector{Complex{Int}}}} == -tvecvec - # binary map over wrapped vectors with concrete scalar eltype - @test map(+, Adjoint(vec), Adjoint(vec))::Adjoint{Complex{Int},Vector{Complex{Int}}} == avec + avec - @test map(+, Transpose(vec), Transpose(vec))::Transpose{Complex{Int},Vector{Complex{Int}}} == tvec + tvec - # binary map over wrapped vectors with concrete array eltype - @test map(+, Adjoint(vecvec), Adjoint(vecvec))::Adjoint{Adjoint{Complex{Int},Vector{Complex{Int}}},Vector{Vector{Complex{Int}}}} == avecvec + avecvec - @test map(+, Transpose(vecvec), Transpose(vecvec))::Transpose{Transpose{Complex{Int},Vector{Complex{Int}}},Vector{Vector{Complex{Int}}}} == tvecvec + tvecvec - # unary broadcast over wrapped vectors with concrete scalar eltype - @test broadcast(-, Adjoint(vec))::Adjoint{Complex{Int},Vector{Complex{Int}}} == -avec - @test broadcast(-, Transpose(vec))::Transpose{Complex{Int},Vector{Complex{Int}}} == -tvec - # unary broadcast over wrapped vectors with concrete array eltype - @test broadcast(-, Adjoint(vecvec))::Adjoint{Adjoint{Complex{Int},Vector{Complex{Int}}},Vector{Vector{Complex{Int}}}} == -avecvec - @test broadcast(-, Transpose(vecvec))::Transpose{Transpose{Complex{Int},Vector{Complex{Int}}},Vector{Vector{Complex{Int}}}} == -tvecvec - # binary broadcast over wrapped vectors with concrete scalar eltype - @test broadcast(+, Adjoint(vec), Adjoint(vec))::Adjoint{Complex{Int},Vector{Complex{Int}}} == avec + avec - @test broadcast(+, Transpose(vec), Transpose(vec))::Transpose{Complex{Int},Vector{Complex{Int}}} == tvec + tvec - # binary broadcast over wrapped vectors with concrete array eltype - @test broadcast(+, Adjoint(vecvec), Adjoint(vecvec))::Adjoint{Adjoint{Complex{Int},Vector{Complex{Int}}},Vector{Vector{Complex{Int}}}} == avecvec + avecvec - @test broadcast(+, Transpose(vecvec), Transpose(vecvec))::Transpose{Transpose{Complex{Int},Vector{Complex{Int}}},Vector{Vector{Complex{Int}}}} == tvecvec + tvecvec - # trinary broadcast over wrapped vectors with concrete scalar eltype and numbers - @test broadcast(+, Adjoint(vec), 1, Adjoint(vec))::Adjoint{Complex{Int},Vector{Complex{Int}}} == avec + avec .+ 1 - @test broadcast(+, Transpose(vec), 1, Transpose(vec))::Transpose{Complex{Int},Vector{Complex{Int}}} == tvec + tvec .+ 1 - @test broadcast(+, Adjoint(vec), 1im, Adjoint(vec))::Adjoint{Complex{Int},Vector{Complex{Int}}} == avec + avec .+ 1im - @test broadcast(+, Transpose(vec), 1im, Transpose(vec))::Transpose{Complex{Int},Vector{Complex{Int}}} == tvec + tvec .+ 1im -end - -@testset "Adjoint/Transpose-wrapped vector multiplication" begin - realvec, realmat = [1, 2, 3], [1 2 3; 4 5 6; 7 8 9] - complexvec, complexmat = [1im, 2, -3im], [1im 2 3; 4 5 -6im; 7im 8 9] - # Adjoint/Transpose-vector * vector - @test Adjoint(realvec) * realvec == dot(realvec, realvec) - @test Transpose(realvec) * realvec == dot(realvec, realvec) - @test Adjoint(complexvec) * complexvec == dot(complexvec, complexvec) - @test Transpose(complexvec) * complexvec == dot(conj(complexvec), complexvec) - # vector * Adjoint/Transpose-vector - @test realvec * Adjoint(realvec) == broadcast(*, realvec, reshape(realvec, (1, 3))) - @test realvec * Transpose(realvec) == broadcast(*, realvec, reshape(realvec, (1, 3))) - @test complexvec * Adjoint(complexvec) == broadcast(*, complexvec, reshape(conj(complexvec), (1, 3))) - @test complexvec * Transpose(complexvec) == broadcast(*, complexvec, reshape(complexvec, (1, 3))) - # Adjoint/Transpose-vector * matrix - @test (Adjoint(realvec) * realmat)::Adjoint{Int,Vector{Int}} == - reshape(copy(Adjoint(realmat)) * realvec, (1, 3)) - @test (Transpose(realvec) * realmat)::Transpose{Int,Vector{Int}} == - reshape(copy(Transpose(realmat)) * realvec, (1, 3)) - @test (Adjoint(complexvec) * complexmat)::Adjoint{Complex{Int},Vector{Complex{Int}}} == - reshape(conj(copy(Adjoint(complexmat)) * complexvec), (1, 3)) - @test (Transpose(complexvec) * complexmat)::Transpose{Complex{Int},Vector{Complex{Int}}} == - reshape(copy(Transpose(complexmat)) * complexvec, (1, 3)) - # Adjoint/Transpose-vector * Adjoint/Transpose-matrix - @test (Adjoint(realvec) * Adjoint(realmat))::Adjoint{Int,Vector{Int}} == - reshape(realmat * realvec, (1, 3)) - @test (Transpose(realvec) * Transpose(realmat))::Transpose{Int,Vector{Int}} == - reshape(realmat * realvec, (1, 3)) - @test (Adjoint(complexvec) * Adjoint(complexmat))::Adjoint{Complex{Int},Vector{Complex{Int}}} == - reshape(conj(complexmat * complexvec), (1, 3)) - @test (Transpose(complexvec) * Transpose(complexmat))::Transpose{Complex{Int},Vector{Complex{Int}}} == - reshape(complexmat * complexvec, (1, 3)) -end - -@testset "Adjoint/Transpose-wrapped vector pseudoinversion" begin - realvec, complexvec = [1, 2, 3, 4], [1im, 2, 3im, 4] - rowrealvec, rowcomplexvec = reshape(realvec, (1, 4)), reshape(complexvec, (1, 4)) - # pinv(Adjoint/Transpose-vector) should match matrix equivalents - # TODO tighten type asserts once pinv yields Transpose/Adjoint - @test pinv(Adjoint(realvec))::Vector{Float64} ≈ pinv(rowrealvec) - @test pinv(Transpose(realvec))::Vector{Float64} ≈ pinv(rowrealvec) - @test pinv(Adjoint(complexvec))::Vector{ComplexF64} ≈ pinv(conj(rowcomplexvec)) - @test pinv(Transpose(complexvec))::Vector{ComplexF64} ≈ pinv(rowcomplexvec) -end - -@testset "Adjoint/Transpose-wrapped vector left-division" begin - realvec, complexvec = [1., 2., 3., 4.,], [1.0im, 2., 3.0im, 4.] - rowrealvec, rowcomplexvec = reshape(realvec, (1, 4)), reshape(complexvec, (1, 4)) - # \(Adjoint/Transpose-vector, Adjoint/Transpose-vector) should mat matrix equivalents - @test Adjoint(realvec)\Adjoint(realvec) ≈ rowrealvec\rowrealvec - @test Transpose(realvec)\Transpose(realvec) ≈ rowrealvec\rowrealvec - @test Adjoint(complexvec)\Adjoint(complexvec) ≈ conj(rowcomplexvec)\conj(rowcomplexvec) - @test Transpose(complexvec)\Transpose(complexvec) ≈ rowcomplexvec\rowcomplexvec -end - -@testset "Adjoint/Transpose-wrapped vector right-division" begin - realvec, realmat = [1, 2, 3], [1 0 0; 0 2 0; 0 0 3] - complexvec, complexmat = [1im, 2, -3im], [2im 0 0; 0 3 0; 0 0 -5im] - rowrealvec, rowcomplexvec = reshape(realvec, (1, 3)), reshape(complexvec, (1, 3)) - # /(Adjoint/Transpose-vector, matrix) - @test (Adjoint(realvec) / realmat)::Adjoint ≈ rowrealvec / realmat - @test (Adjoint(complexvec) / complexmat)::Adjoint ≈ conj(rowcomplexvec) / complexmat - @test (Transpose(realvec) / realmat)::Transpose ≈ rowrealvec / realmat - @test (Transpose(complexvec) / complexmat)::Transpose ≈ rowcomplexvec / complexmat - # /(Adjoint/Transpose-vector, Adjoint matrix) - @test (Adjoint(realvec) / Adjoint(realmat))::Adjoint ≈ rowrealvec / copy(Adjoint(realmat)) - @test (Adjoint(complexvec) / Adjoint(complexmat))::Adjoint ≈ conj(rowcomplexvec) / copy(Adjoint(complexmat)) - @test (Transpose(realvec) / Adjoint(realmat))::Transpose ≈ rowrealvec / copy(Adjoint(realmat)) - @test (Transpose(complexvec) / Adjoint(complexmat))::Transpose ≈ rowcomplexvec / copy(Adjoint(complexmat)) - # /(Adjoint/Transpose-vector, Transpose matrix) - @test (Adjoint(realvec) / Transpose(realmat))::Adjoint ≈ rowrealvec / copy(Transpose(realmat)) - @test (Adjoint(complexvec) / Transpose(complexmat))::Adjoint ≈ conj(rowcomplexvec) / copy(Transpose(complexmat)) - @test (Transpose(realvec) / Transpose(realmat))::Transpose ≈ rowrealvec / copy(Transpose(realmat)) - @test (Transpose(complexvec) / Transpose(complexmat))::Transpose ≈ rowcomplexvec / copy(Transpose(complexmat)) -end - -@testset "norm and opnorm of Adjoint/Transpose-wrapped vectors" begin - # definitions are in base/linalg/generic.jl - realvec, complexvec = [3, -4], [3im, -4im] - # one norm result should be sum(abs.(realvec)) == 7 - # two norm result should be sqrt(sum(abs.(realvec))) == 5 - # inf norm result should be maximum(abs.(realvec)) == 4 - for v in (realvec, complexvec) - @test norm(Adjoint(v)) ≈ 5 - @test norm(Adjoint(v), 1) ≈ 7 - @test norm(Adjoint(v), Inf) ≈ 4 - @test norm(Transpose(v)) ≈ 5 - @test norm(Transpose(v), 1) ≈ 7 - @test norm(Transpose(v), Inf) ≈ 4 - end - # one opnorm result should be maximum(abs.(realvec)) == 4 - # two opnorm result should be sqrt(sum(abs.(realvec))) == 5 - # inf opnorm result should be sum(abs.(realvec)) == 7 - for v in (realvec, complexvec) - @test opnorm(Adjoint(v)) ≈ 5 - @test opnorm(Adjoint(v), 1) ≈ 4 - @test opnorm(Adjoint(v), Inf) ≈ 7 - @test opnorm(Transpose(v)) ≈ 5 - @test opnorm(Transpose(v), 1) ≈ 4 - @test opnorm(Transpose(v), Inf) ≈ 7 - end -end - -@testset "adjoint and transpose of Numbers" begin - @test adjoint(1) == 1 - @test adjoint(1.0) == 1.0 - @test adjoint(1im) == -1im - @test adjoint(1.0im) == -1.0im - @test transpose(1) == 1 - @test transpose(1.0) == 1.0 - @test transpose(1im) == 1im - @test transpose(1.0im) == 1.0im -end - -@testset "adjoint!(a, b) return a" begin - a = fill(1.0+im, 5) - b = fill(1.0+im, 1, 5) - @test adjoint!(a, b) === a - @test adjoint!(b, a) === b -end - -@testset "copyto! uses adjoint!/transpose!" begin - for T in (Float64, ComplexF64), f in (transpose, adjoint), sz in ((5,4), (5,)) - S = rand(T, sz) - adjS = f(S) - A = similar(S') - copyto!(A, adjS) - @test A == adjS - end -end - -@testset "aliasing with adjoint and transpose" begin - A = collect(reshape(1:25, 5, 5)) .+ rand.().*im - B = copy(A) - B .= B' - @test B == A' - B = copy(A) - B .= transpose(B) - @test B == transpose(A) - B = copy(A) - B .= B .* B' - @test B == A .* A' -end - -@testset "test show methods for $t of Factorizations" for t in (adjoint, transpose) - A = randn(ComplexF64, 4, 4) - F = lu(A) - Fop = t(F) - @test sprint(show, Fop) == - "$t of "*sprint(show, parent(Fop)) - @test sprint((io, t) -> show(io, MIME"text/plain"(), t), Fop) == - "$t of "*sprint((io, t) -> show(io, MIME"text/plain"(), t), parent(Fop)) -end - -@testset "showarg" begin - io = IOBuffer() - - A = ones(Float64, 3,3) - - B = Adjoint(A) - @test summary(B) == "3×3 adjoint(::Matrix{Float64}) with eltype Float64" - @test Base.showarg(io, B, false) === nothing - @test String(take!(io)) == "adjoint(::Matrix{Float64})" - - B = Transpose(A) - @test summary(B) == "3×3 transpose(::Matrix{Float64}) with eltype Float64" - @test Base.showarg(io, B, false) === nothing - @test String(take!(io)) == "transpose(::Matrix{Float64})" -end - -@testset "show" begin - @test repr(adjoint([1,2,3])) == "adjoint([1, 2, 3])" - @test repr(transpose([1f0,2f0])) == "transpose(Float32[1.0, 2.0])" -end - -@testset "strided transposes" begin - for t in (Adjoint, Transpose) - @test strides(t(rand(3))) == (3, 1) - @test strides(t(rand(3,2))) == (3, 1) - @test strides(t(view(rand(3, 2), :))) == (6, 1) - @test strides(t(view(rand(3, 2), :, 1:2))) == (3, 1) - - A = rand(3) - @test pointer(t(A)) === pointer(A) - B = rand(3,1) - @test pointer(t(B)) === pointer(B) - end - @test_throws MethodError strides(Adjoint(rand(3) .+ rand(3).*im)) - @test_throws MethodError strides(Adjoint(rand(3, 2) .+ rand(3, 2).*im)) - @test strides(Transpose(rand(3) .+ rand(3).*im)) == (3, 1) - @test strides(Transpose(rand(3, 2) .+ rand(3, 2).*im)) == (3, 1) - - C = rand(3) .+ rand(3).*im - @test_throws ErrorException pointer(Adjoint(C)) - @test pointer(Transpose(C)) === pointer(C) - D = rand(3,2) .+ rand(3,2).*im - @test_throws ErrorException pointer(Adjoint(D)) - @test pointer(Transpose(D)) === pointer(D) -end - -isdefined(Main, :OffsetArrays) || @eval Main include(joinpath($(BASE_TEST_PATH), "testhelpers", "OffsetArrays.jl")) -using .Main.OffsetArrays - -@testset "offset axes" begin - s = Base.Slice(-3:3)' - @test axes(s) === (Base.OneTo(1), Base.IdentityUnitRange(-3:3)) - @test collect(LinearIndices(s)) == reshape(1:7, 1, 7) - @test collect(CartesianIndices(s)) == reshape([CartesianIndex(1,i) for i = -3:3], 1, 7) - @test s[1] == -3 - @test s[7] == 3 - @test s[4] == 0 - @test_throws BoundsError s[0] - @test_throws BoundsError s[8] - @test s[1,-3] == -3 - @test s[1, 3] == 3 - @test s[1, 0] == 0 - @test_throws BoundsError s[1,-4] - @test_throws BoundsError s[1, 4] -end - -@testset "specialized conj of Adjoint/Transpose" begin - realmat = [1 2; 3 4] - complexmat = ComplexF64[1+im 2; 3 4-im] - nested = [[complexmat] [-complexmat]; [0complexmat] [3complexmat]] - @testset "AdjOrTrans{...,$(typeof(i))}" for i in ( - realmat, vec(realmat), - complexmat, vec(complexmat), - nested, vec(nested), - ) - for (t,type) in ((transpose, Adjoint), (adjoint, Transpose)) - M = t(i) - @test conj(M) isa type - @test conj(M) == conj(collect(M)) - @test conj(conj(M)) === M - end - end - # test if `conj(transpose(::Hermitian))` is a no-op - hermitian = Hermitian([1 2+im; 2-im 3]) - @test conj(transpose(hermitian)) === hermitian -end - -@testset "empty and mismatched lengths" begin - # issue 36678 - @test_throws DimensionMismatch [1, 2]' * [1,2,3] - @test Int[]' * Int[] == 0 - @test transpose(Int[]) * Int[] == 0 -end - -@testset "reductions: $adjtrans" for adjtrans in (transpose, adjoint) - for (reduction, reduction!, op) in ((sum, sum!, +), (prod, prod!, *), (minimum, minimum!, min), (maximum, maximum!, max)) - T = op in (max, min) ? Float64 : ComplexF64 - mat = rand(T, 3,5) - rd1 = zeros(T, 1, 3) - rd2 = zeros(T, 5, 1) - rd3 = zeros(T, 1, 1) - @test reduction(adjtrans(mat)) ≈ reduction(copy(adjtrans(mat))) - @test reduction(adjtrans(mat), dims=1) ≈ reduction(copy(adjtrans(mat)), dims=1) - @test reduction(adjtrans(mat), dims=2) ≈ reduction(copy(adjtrans(mat)), dims=2) - @test reduction(adjtrans(mat), dims=(1,2)) ≈ reduction(copy(adjtrans(mat)), dims=(1,2)) - - @test reduction!(rd1, adjtrans(mat)) ≈ reduction!(rd1, copy(adjtrans(mat))) - @test reduction!(rd2, adjtrans(mat)) ≈ reduction!(rd2, copy(adjtrans(mat))) - @test reduction!(rd3, adjtrans(mat)) ≈ reduction!(rd3, copy(adjtrans(mat))) - - @test reduction(imag, adjtrans(mat)) ≈ reduction(imag, copy(adjtrans(mat))) - @test reduction(imag, adjtrans(mat), dims=1) ≈ reduction(imag, copy(adjtrans(mat)), dims=1) - @test reduction(imag, adjtrans(mat), dims=2) ≈ reduction(imag, copy(adjtrans(mat)), dims=2) - @test reduction(imag, adjtrans(mat), dims=(1,2)) ≈ reduction(imag, copy(adjtrans(mat)), dims=(1,2)) - - @test Base.mapreducedim!(imag, op, rd1, adjtrans(mat)) ≈ Base.mapreducedim!(imag, op, rd1, copy(adjtrans(mat))) - @test Base.mapreducedim!(imag, op, rd2, adjtrans(mat)) ≈ Base.mapreducedim!(imag, op, rd2, copy(adjtrans(mat))) - @test Base.mapreducedim!(imag, op, rd3, adjtrans(mat)) ≈ Base.mapreducedim!(imag, op, rd3, copy(adjtrans(mat))) - - op in (max, min) && continue - mat = [rand(T,2,2) for _ in 1:3, _ in 1:5] - rd1 = fill(zeros(T, 2, 2), 1, 3) - rd2 = fill(zeros(T, 2, 2), 5, 1) - rd3 = fill(zeros(T, 2, 2), 1, 1) - @test reduction(adjtrans(mat)) ≈ reduction(copy(adjtrans(mat))) - @test reduction(adjtrans(mat), dims=1) ≈ reduction(copy(adjtrans(mat)), dims=1) - @test reduction(adjtrans(mat), dims=2) ≈ reduction(copy(adjtrans(mat)), dims=2) - @test reduction(adjtrans(mat), dims=(1,2)) ≈ reduction(copy(adjtrans(mat)), dims=(1,2)) - - @test reduction(imag, adjtrans(mat)) ≈ reduction(imag, copy(adjtrans(mat))) - @test reduction(x -> x[1,2], adjtrans(mat)) ≈ reduction(x -> x[1,2], copy(adjtrans(mat))) - @test reduction(imag, adjtrans(mat), dims=1) ≈ reduction(imag, copy(adjtrans(mat)), dims=1) - @test reduction(x -> x[1,2], adjtrans(mat), dims=1) ≈ reduction(x -> x[1,2], copy(adjtrans(mat)), dims=1) - end - # see #46605 - Ac = [1 2; 3 4]' - @test mapreduce(identity, (x, y) -> 10x+y, copy(Ac)) == mapreduce(identity, (x, y) -> 10x+y, Ac) == 1234 - @test extrema([3,7,4]') == (3, 7) - @test mapreduce(x -> [x;;;], +, [1, 2, 3]') == sum(x -> [x;;;], [1, 2, 3]') == [6;;;] - @test mapreduce(string, *, [1 2; 3 4]') == mapreduce(string, *, copy([1 2; 3 4]')) == "1234" -end - -@testset "trace" begin - for T in (Float64, ComplexF64), t in (adjoint, transpose) - A = randn(T, 10, 10) - @test tr(t(A)) == tr(copy(t(A))) == t(tr(A)) - end -end - -@testset "structured printing" begin - D = Diagonal(1:3) - @test sprint(Base.print_matrix, Adjoint(D)) == sprint(Base.print_matrix, D) - @test sprint(Base.print_matrix, Transpose(D)) == sprint(Base.print_matrix, D) - D = Diagonal((1:3)*im) - D2 = Diagonal((1:3)*(-im)) - @test sprint(Base.print_matrix, Transpose(D)) == sprint(Base.print_matrix, D) - @test sprint(Base.print_matrix, Adjoint(D)) == sprint(Base.print_matrix, D2) - - struct OneHotVecOrMat{N} <: AbstractArray{Bool,N} - inds::NTuple{N,Int} - sz::NTuple{N,Int} - end - Base.size(x::OneHotVecOrMat) = x.sz - function Base.getindex(x::OneHotVecOrMat{N}, inds::Vararg{Int,N}) where {N} - checkbounds(x, inds...) - inds == x.inds - end - Base.replace_in_print_matrix(o::OneHotVecOrMat{1}, i::Integer, j::Integer, s::AbstractString) = - o.inds == (i,) ? s : Base.replace_with_centered_mark(s) - Base.replace_in_print_matrix(o::OneHotVecOrMat{2}, i::Integer, j::Integer, s::AbstractString) = - o.inds == (i,j) ? s : Base.replace_with_centered_mark(s) - - o = OneHotVecOrMat((2,), (4,)) - @test sprint(Base.print_matrix, Transpose(o)) == sprint(Base.print_matrix, OneHotVecOrMat((1,2), (1,4))) - @test sprint(Base.print_matrix, Adjoint(o)) == sprint(Base.print_matrix, OneHotVecOrMat((1,2), (1,4))) -end - -@testset "copy_transpose!" begin - # scalar case - A = [randn() for _ in 1:2, _ in 1:3] - At = copy(transpose(A)) - B = zero.(At) - LinearAlgebra.copy_transpose!(B, axes(B, 1), axes(B, 2), A, axes(A, 1), axes(A, 2)) - @test B == At - # matrix of matrices - A = [randn(2,3) for _ in 1:2, _ in 1:3] - At = copy(transpose(A)) - B = zero.(At) - LinearAlgebra.copy_transpose!(B, axes(B, 1), axes(B, 2), A, axes(A, 1), axes(A, 2)) - @test B == At -end - -@testset "error message in transpose" begin - v = zeros(2) - A = zeros(1,1) - B = zeros(2,3) - for (t1, t2) in Any[(A, v), (v, A), (A, B)] - @test_throws "axes of the destination are incompatible with that of the source" transpose!(t1, t2) - @test_throws "axes of the destination are incompatible with that of the source" adjoint!(t1, t2) - end -end - -end # module TestAdjointTranspose diff --git a/stdlib/LinearAlgebra/test/ambiguous_exec.jl b/stdlib/LinearAlgebra/test/ambiguous_exec.jl deleted file mode 100644 index 7b89c0a457afb..0000000000000 --- a/stdlib/LinearAlgebra/test/ambiguous_exec.jl +++ /dev/null @@ -1,21 +0,0 @@ -# This file is a part of Julia. License is MIT: https://julialang.org/license - -using Test, LinearAlgebra -let ambig = detect_ambiguities(LinearAlgebra; recursive=true) - @test isempty(ambig) - ambig = Set{Any}(((m1.sig, m2.sig) for (m1, m2) in ambig)) - expect = [] - good = true - while !isempty(ambig) - sigs = pop!(ambig) - i = findfirst(==(sigs), expect) - if i === nothing - println(stderr, "push!(expect, (", sigs[1], ", ", sigs[2], "))") - good = false - continue - end - deleteat!(expect, i) - end - @test isempty(expect) - @test good -end diff --git a/stdlib/LinearAlgebra/test/bidiag.jl b/stdlib/LinearAlgebra/test/bidiag.jl deleted file mode 100644 index df30748e042b5..0000000000000 --- a/stdlib/LinearAlgebra/test/bidiag.jl +++ /dev/null @@ -1,1141 +0,0 @@ -# This file is a part of Julia. License is MIT: https://julialang.org/license - -module TestBidiagonal - -using Test, LinearAlgebra, Random -using LinearAlgebra: BlasReal, BlasFloat - -const BASE_TEST_PATH = joinpath(Sys.BINDIR, "..", "share", "julia", "test") - -isdefined(Main, :Furlongs) || @eval Main include(joinpath($(BASE_TEST_PATH), "testhelpers", "Furlongs.jl")) -using .Main.Furlongs - -isdefined(Main, :Quaternions) || @eval Main include(joinpath($(BASE_TEST_PATH), "testhelpers", "Quaternions.jl")) -using .Main.Quaternions - -isdefined(Main, :InfiniteArrays) || @eval Main include(joinpath($(BASE_TEST_PATH), "testhelpers", "InfiniteArrays.jl")) -using .Main.InfiniteArrays - -isdefined(Main, :FillArrays) || @eval Main include(joinpath($(BASE_TEST_PATH), "testhelpers", "FillArrays.jl")) -using .Main.FillArrays - -isdefined(Main, :OffsetArrays) || @eval Main include(joinpath($(BASE_TEST_PATH), "testhelpers", "OffsetArrays.jl")) -using .Main.OffsetArrays - -isdefined(Main, :SizedArrays) || @eval Main include(joinpath($(BASE_TEST_PATH), "testhelpers", "SizedArrays.jl")) -using .Main.SizedArrays - -include("testutils.jl") # test_approx_eq_modphase - -n = 10 #Size of test matrix -Random.seed!(1) - -@testset for relty in (Int, Float32, Float64, BigFloat), elty in (relty, Complex{relty}) - if relty <: AbstractFloat - dv = convert(Vector{elty}, randn(n)) - ev = convert(Vector{elty}, randn(n-1)) - if (elty <: Complex) - dv += im*convert(Vector{elty}, randn(n)) - ev += im*convert(Vector{elty}, randn(n-1)) - end - elseif relty <: Integer - dv = convert(Vector{elty}, rand(1:10, n)) - ev = convert(Vector{elty}, rand(1:10, n-1)) - if (elty <: Complex) - dv += im*convert(Vector{elty}, rand(1:10, n)) - ev += im*convert(Vector{elty}, rand(1:10, n-1)) - end - end - dv0 = zeros(elty, 0) - ev0 = zeros(elty, 0) - - @testset "Constructors" begin - for (x, y) in ((dv0, ev0), (dv, ev), (GenericArray(dv), GenericArray(ev))) - # from vectors - ubd = Bidiagonal(x, y, :U) - lbd = Bidiagonal(x, y, :L) - @test ubd != lbd || x === dv0 - @test ubd.dv === x - @test lbd.ev === y - @test_throws ArgumentError Bidiagonal(x, y, :R) - @test_throws ArgumentError Bidiagonal(x, y, 'R') - x == dv0 || @test_throws DimensionMismatch Bidiagonal(x, x, :U) - @test_throws MethodError Bidiagonal(x, y) - # from matrix - @test Bidiagonal(ubd, :U) == Bidiagonal(Matrix(ubd), :U) == ubd - @test Bidiagonal(lbd, :L) == Bidiagonal(Matrix(lbd), :L) == lbd - # from its own type - @test typeof(ubd)(ubd) === ubd - @test typeof(lbd)(lbd) === lbd - end - @test eltype(Bidiagonal{elty}([1,2,3,4], [1.0f0,2.0f0,3.0f0], :U)) == elty - @test eltype(Bidiagonal([1,2,3,4], [1.0f0,2.0f0,3.0f0], :U)) == Float32 # promotion test - @test isa(Bidiagonal{elty,Vector{elty}}(GenericArray(dv), ev, :U), Bidiagonal{elty,Vector{elty}}) - @test_throws MethodError Bidiagonal(dv, GenericArray(ev), :U) - @test_throws MethodError Bidiagonal(GenericArray(dv), ev, :U) - BI = Bidiagonal([1,2,3,4], [1,2,3], :U) - @test Bidiagonal(BI) === BI - @test isa(Bidiagonal{elty}(BI), Bidiagonal{elty}) - end - - @testset "getindex, setindex!, size, and similar" begin - ubd = Bidiagonal(dv, ev, :U) - lbd = Bidiagonal(dv, ev, :L) - # bidiagonal getindex / upper & lower - @test_throws BoundsError ubd[n + 1, 1] - @test_throws BoundsError ubd[1, n + 1] - @test ubd[2, 2] == dv[2] - # bidiagonal getindex / upper - @test ubd[2, 3] == ev[2] - @test iszero(ubd[3, 2]) - # bidiagonal getindex / lower - @test lbd[3, 2] == ev[2] - @test iszero(lbd[2, 3]) - # bidiagonal setindex! / upper - cubd = copy(ubd) - @test_throws ArgumentError ubd[2, 1] = 1 - @test_throws ArgumentError ubd[3, 1] = 1 - @test (cubd[2, 1] = 0; cubd == ubd) - @test ((cubd[1, 2] = 10) == 10; cubd[1, 2] == 10) - # bidiagonal setindex! / lower - clbd = copy(lbd) - @test_throws ArgumentError lbd[1, 2] = 1 - @test_throws ArgumentError lbd[1, 3] = 1 - @test (clbd[1, 2] = 0; clbd == lbd) - @test ((clbd[2, 1] = 10) == 10; clbd[2, 1] == 10) - # bidiagonal setindex! / upper & lower - @test_throws BoundsError ubd[n + 1, 1] = 1 - @test_throws BoundsError ubd[1, n + 1] = 1 - @test ((cubd[2, 2] = 10) == 10; cubd[2, 2] == 10) - # bidiagonal size - @test_throws BoundsError size(ubd, 0) - @test size(ubd, 1) == size(ubd, 2) == n - @test size(ubd, 3) == 1 - # bidiagonal similar - @test isa(similar(ubd), Bidiagonal{elty}) - @test similar(ubd).uplo == ubd.uplo - @test isa(similar(ubd, Int), Bidiagonal{Int}) - @test similar(ubd, Int).uplo == ubd.uplo - @test isa(similar(ubd, (3, 2)), Matrix) - @test isa(similar(ubd, Int, (3, 2)), Matrix{Int}) - - # setindex! when off diagonal is zero bug - Bu = Bidiagonal(rand(elty, 10), zeros(elty, 9), 'U') - Bl = Bidiagonal(rand(elty, 10), zeros(elty, 9), 'L') - @test_throws ArgumentError Bu[5, 4] = 1 - @test_throws ArgumentError Bl[4, 5] = 1 - - # setindex should return the destination - @test setindex!(ubd, 1, 1, 1) === ubd - end - - @testset "isstored" begin - ubd = Bidiagonal(dv, ev, :U) - lbd = Bidiagonal(dv, ev, :L) - # bidiagonal isstored / upper & lower - @test_throws BoundsError Base.isstored(ubd, n + 1, 1) - @test_throws BoundsError Base.isstored(ubd, 1, n + 1) - @test Base.isstored(ubd, 2, 2) - # bidiagonal isstored / upper - @test Base.isstored(ubd, 2, 3) - @test !Base.isstored(ubd, 3, 2) - # bidiagonal isstored / lower - @test Base.isstored(lbd, 3, 2) - @test !Base.isstored(lbd, 2, 3) - end - - @testset "show" begin - BD = Bidiagonal(dv, ev, :U) - @test sprint(show,BD) == "Bidiagonal($(repr(dv)), $(repr(ev)), :U)" - BD = Bidiagonal(dv,ev,:L) - @test sprint(show,BD) == "Bidiagonal($(repr(dv)), $(repr(ev)), :L)" - end - - @testset for uplo in (:U, :L) - T = Bidiagonal(dv, ev, uplo) - - @testset "Constructor and basic properties" begin - @test size(T, 1) == size(T, 2) == n - @test size(T) == (n, n) - @test Array(T) == diagm(0 => dv, (uplo === :U ? 1 : -1) => ev) - @test Bidiagonal(Array(T), uplo) == T - @test big.(T) == T - @test Array(abs.(T)) == abs.(diagm(0 => dv, (uplo === :U ? 1 : -1) => ev)) - @test Array(real(T)) == real(diagm(0 => dv, (uplo === :U ? 1 : -1) => ev)) - @test Array(imag(T)) == imag(diagm(0 => dv, (uplo === :U ? 1 : -1) => ev)) - end - - @testset for func in (conj, transpose, adjoint) - @test func(func(T)) == T - if func ∈ (transpose, adjoint) - @test func(func(T)) === T - end - end - - @testset "permutedims(::Bidiagonal)" begin - @test permutedims(permutedims(T)) === T - @test permutedims(T) == transpose.(transpose(T)) - @test permutedims(T, [1, 2]) === T - @test permutedims(T, (2, 1)) == permutedims(T) - end - - @testset "triu and tril" begin - zerosdv = zeros(elty, length(dv)) - zerosev = zeros(elty, length(ev)) - bidiagcopy(dv, ev, uplo) = Bidiagonal(copy(dv), copy(ev), uplo) - - @test istril(Bidiagonal(dv,ev,:L)) - @test istril(Bidiagonal(dv,ev,:L), 1) - @test !istril(Bidiagonal(dv,ev,:L), -1) - @test istril(Bidiagonal(zerosdv,ev,:L), -1) - @test !istril(Bidiagonal(zerosdv,ev,:L), -2) - @test istril(Bidiagonal(zerosdv,zerosev,:L), -2) - @test !istril(Bidiagonal(dv,ev,:U)) - @test istril(Bidiagonal(dv,ev,:U), 1) - @test !istril(Bidiagonal(dv,ev,:U), -1) - @test !istril(Bidiagonal(zerosdv,ev,:U), -1) - @test istril(Bidiagonal(zerosdv,zerosev,:U), -1) - @test tril!(bidiagcopy(dv,ev,:U),-1) == Bidiagonal(zerosdv,zerosev,:U) - @test tril!(bidiagcopy(dv,ev,:L),-1) == Bidiagonal(zerosdv,ev,:L) - @test tril!(bidiagcopy(dv,ev,:U),-2) == Bidiagonal(zerosdv,zerosev,:U) - @test tril!(bidiagcopy(dv,ev,:L),-2) == Bidiagonal(zerosdv,zerosev,:L) - @test tril!(bidiagcopy(dv,ev,:U),1) == Bidiagonal(dv,ev,:U) - @test tril!(bidiagcopy(dv,ev,:L),1) == Bidiagonal(dv,ev,:L) - @test tril!(bidiagcopy(dv,ev,:U)) == Bidiagonal(dv,zerosev,:U) - @test tril!(bidiagcopy(dv,ev,:L)) == Bidiagonal(dv,ev,:L) - @test_throws ArgumentError tril!(bidiagcopy(dv, ev, :U), -n - 2) - @test_throws ArgumentError tril!(bidiagcopy(dv, ev, :U), n) - - @test istriu(Bidiagonal(dv,ev,:U)) - @test istriu(Bidiagonal(dv,ev,:U), -1) - @test !istriu(Bidiagonal(dv,ev,:U), 1) - @test istriu(Bidiagonal(zerosdv,ev,:U), 1) - @test !istriu(Bidiagonal(zerosdv,ev,:U), 2) - @test istriu(Bidiagonal(zerosdv,zerosev,:U), 2) - @test !istriu(Bidiagonal(dv,ev,:L)) - @test istriu(Bidiagonal(dv,ev,:L), -1) - @test !istriu(Bidiagonal(dv,ev,:L), 1) - @test !istriu(Bidiagonal(zerosdv,ev,:L), 1) - @test istriu(Bidiagonal(zerosdv,zerosev,:L), 1) - @test triu!(bidiagcopy(dv,ev,:L),1) == Bidiagonal(zerosdv,zerosev,:L) - @test triu!(bidiagcopy(dv,ev,:U),1) == Bidiagonal(zerosdv,ev,:U) - @test triu!(bidiagcopy(dv,ev,:U),2) == Bidiagonal(zerosdv,zerosev,:U) - @test triu!(bidiagcopy(dv,ev,:L),2) == Bidiagonal(zerosdv,zerosev,:L) - @test triu!(bidiagcopy(dv,ev,:U),-1) == Bidiagonal(dv,ev,:U) - @test triu!(bidiagcopy(dv,ev,:L),-1) == Bidiagonal(dv,ev,:L) - @test triu!(bidiagcopy(dv,ev,:L)) == Bidiagonal(dv,zerosev,:L) - @test triu!(bidiagcopy(dv,ev,:U)) == Bidiagonal(dv,ev,:U) - @test_throws ArgumentError triu!(bidiagcopy(dv, ev, :U), -n) - @test_throws ArgumentError triu!(bidiagcopy(dv, ev, :U), n + 2) - @test !isdiag(Bidiagonal(dv,ev,:U)) - @test !isdiag(Bidiagonal(dv,ev,:L)) - @test isdiag(Bidiagonal(dv,zerosev,:U)) - @test isdiag(Bidiagonal(dv,zerosev,:L)) - end - - @testset "iszero and isone" begin - for uplo in (:U, :L) - BDzero = Bidiagonal(zeros(elty, 10), zeros(elty, 9), uplo) - BDone = Bidiagonal(ones(elty, 10), zeros(elty, 9), uplo) - BDmix = Bidiagonal(zeros(elty, 10), zeros(elty, 9), uplo) - BDmix[end,end] = one(elty) - - @test iszero(BDzero) - @test !isone(BDzero) - @test !iszero(BDone) - @test isone(BDone) - @test !iszero(BDmix) - @test !isone(BDmix) - end - end - - @testset "trace" begin - for uplo in (:U, :L) - B = Bidiagonal(dv, ev, uplo) - if relty <: Integer - @test tr(B) == tr(Matrix(B)) - else - @test tr(B) ≈ tr(Matrix(B)) rtol=2eps(relty) - end - end - end - - Tfull = Array(T) - @testset "Linear solves" begin - if relty <: AbstractFloat - c = convert(Matrix{elty}, randn(n,n)) - b = convert(Matrix{elty}, randn(n, 2)) - if (elty <: Complex) - b += im*convert(Matrix{elty}, randn(n, 2)) - end - elseif relty <: Integer - c = convert(Matrix{elty}, rand(1:10, n, n)) - b = convert(Matrix{elty}, rand(1:10, n, 2)) - if (elty <: Complex) - b += im*convert(Matrix{elty}, rand(1:10, n, 2)) - end - end - condT = cond(map(ComplexF64,Tfull)) - promty = typeof((zero(relty)*zero(relty) + zero(relty)*zero(relty))/one(relty)) - if relty != BigFloat - x = transpose(T)\transpose(c) - tx = transpose(Tfull) \ transpose(c) - elty <: AbstractFloat && @test norm(x-tx,Inf) <= 4*condT*max(eps()*norm(tx,Inf), eps(promty)*norm(x,Inf)) - @test_throws DimensionMismatch transpose(T)\transpose(b) - x = T'\copy(transpose(c)) - tx = Tfull'\copy(transpose(c)) - @test norm(x-tx,Inf) <= 4*condT*max(eps()*norm(tx,Inf), eps(promty)*norm(x,Inf)) - @test_throws DimensionMismatch T'\copy(transpose(b)) - x = T\transpose(c) - tx = Tfull\transpose(c) - @test norm(x-tx,Inf) <= 4*condT*max(eps()*norm(tx,Inf), eps(promty)*norm(x,Inf)) - @test_throws DimensionMismatch T\transpose(b) - end - offsizemat = Matrix{elty}(undef, n+1, 2) - @test_throws DimensionMismatch T \ offsizemat - @test_throws DimensionMismatch transpose(T) \ offsizemat - @test_throws DimensionMismatch T' \ offsizemat - - if elty <: BigFloat - @test_throws SingularException ldiv!(Bidiagonal(zeros(elty, n), ones(elty, n-1), :U), rand(elty, n)) - @test_throws SingularException ldiv!(Bidiagonal(zeros(elty, n), ones(elty, n-1), :L), rand(elty, n)) - end - let bb = b, cc = c - for atype in ("Array", "SubArray") - if atype == "Array" - b = bb - c = cc - else - b = view(bb, 1:n) - c = view(cc, 1:n, 1:2) - end - end - x = T \ b - tx = Tfull \ b - @test_throws DimensionMismatch ldiv!(T, Vector{elty}(undef, n+1)) - @test norm(x-tx,Inf) <= 4*condT*max(eps()*norm(tx,Inf), eps(promty)*norm(x,Inf)) - x = transpose(T) \ b - tx = transpose(Tfull) \ b - @test norm(x-tx,Inf) <= 4*condT*max(eps()*norm(tx,Inf), eps(promty)*norm(x,Inf)) - x = copy(transpose(b)) / T - tx = copy(transpose(b)) / Tfull - @test_throws DimensionMismatch rdiv!(Matrix{elty}(undef, 1, n+1), T) - @test norm(x-tx,Inf) <= 4*condT*max(eps()*norm(tx,Inf), eps(promty)*norm(x,Inf)) - x = copy(transpose(b)) / transpose(T) - tx = copy(transpose(b)) / transpose(Tfull) - @test norm(x-tx,Inf) <= 4*condT*max(eps()*norm(tx,Inf), eps(promty)*norm(x,Inf)) - @testset "Generic Mat-vec ops" begin - @test T*b ≈ Tfull*b - @test T'*b ≈ Tfull'*b - if relty != BigFloat # not supported by pivoted QR - @test T/b' ≈ Tfull/b' - end - end - end - zdv = Vector{elty}(undef, 0) - zev = Vector{elty}(undef, 0) - zA = Bidiagonal(zdv, zev, :U) - zb = Vector{elty}(undef, 0) - @test ldiv!(zA, zb) === zb - @testset "linear solves with abstract matrices" begin - diag = b[:,1] - D = Diagonal(diag) - x = T \ D - tx = Tfull \ D - @test norm(x-tx,Inf) <= 4*condT*max(eps()*norm(tx,Inf), eps(promty)*norm(x,Inf)) - x = D / T - tx = D / Tfull - @test norm(x-tx,Inf) <= 4*condT*max(eps()*norm(tx,Inf), eps(promty)*norm(x,Inf)) - x = transpose(T) \ D - tx = transpose(Tfull) \ D - @test norm(x-tx,Inf) <= 4*condT*max(eps()*norm(tx,Inf), eps(promty)*norm(x,Inf)) - x = D / transpose(T) - tx = D / transpose(Tfull) - @test norm(x-tx,Inf) <= 4*condT*max(eps()*norm(tx,Inf), eps(promty)*norm(x,Inf)) - end - @testset "Specialized multiplication/division" begin - getval(x) = x - getval(x::Furlong) = x.val - function _bidiagdivmultest(T, - x, - typemul=T.uplo == 'U' ? UpperTriangular : Matrix, - typediv=T.uplo == 'U' ? UpperTriangular : Matrix, - typediv2=T.uplo == 'U' ? UpperTriangular : Matrix) - TM = Matrix(T) - @test map(getval, (T*x)::typemul) ≈ map(getval, TM*x) - @test map(getval, (x*T)::typemul) ≈ map(getval, x*TM) - @test map(getval, (x\T)::typediv) ≈ map(getval, x\TM) - @test map(getval, (T/x)::typediv) ≈ map(getval, TM/x) - if !isa(x, Number) - @test map(getval, Array((T\x)::typediv2)) ≈ map(getval, Array(TM\x)) - @test map(getval, Array((x/T)::typediv2)) ≈ map(getval, Array(x/TM)) - end - return nothing - end - A = Matrix(T) - for t in (T, Furlong.(T)), (A, dv, ev) in ((A, dv, ev), (Furlong.(A), Furlong.(dv), Furlong.(ev))) - _bidiagdivmultest(t, 5, Bidiagonal, Bidiagonal) - _bidiagdivmultest(t, 5I, Bidiagonal, Bidiagonal, t.uplo == 'U' ? UpperTriangular : LowerTriangular) - _bidiagdivmultest(t, Diagonal(dv), Bidiagonal, Bidiagonal, t.uplo == 'U' ? UpperTriangular : LowerTriangular) - _bidiagdivmultest(t, UpperTriangular(A)) - _bidiagdivmultest(t, UnitUpperTriangular(A)) - _bidiagdivmultest(t, LowerTriangular(A), t.uplo == 'L' ? LowerTriangular : Matrix, t.uplo == 'L' ? LowerTriangular : Matrix, t.uplo == 'L' ? LowerTriangular : Matrix) - _bidiagdivmultest(t, UnitLowerTriangular(A), t.uplo == 'L' ? LowerTriangular : Matrix, t.uplo == 'L' ? LowerTriangular : Matrix, t.uplo == 'L' ? LowerTriangular : Matrix) - _bidiagdivmultest(t, Bidiagonal(dv, ev, :U), Matrix, Matrix, Matrix) - _bidiagdivmultest(t, Bidiagonal(dv, ev, :L), Matrix, Matrix, Matrix) - end - end - end - - if elty <: BlasReal - @testset "$f" for f in (floor, trunc, round, ceil) - @test (f.(Int, T))::Bidiagonal == Bidiagonal(f.(Int, T.dv), f.(Int, T.ev), T.uplo) - @test (f.(T))::Bidiagonal == Bidiagonal(f.(T.dv), f.(T.ev), T.uplo) - end - end - - @testset "diag" begin - @test (@inferred diag(T))::typeof(dv) == dv - @test (@inferred diag(T, uplo === :U ? 1 : -1))::typeof(dv) == ev - @test (@inferred diag(T,2))::typeof(dv) == zeros(elty, n-2) - @test isempty(@inferred diag(T, -n - 1)) - @test isempty(@inferred diag(T, n + 1)) - # test diag with another wrapped vector type - gdv, gev = GenericArray(dv), GenericArray(ev) - G = Bidiagonal(gdv, gev, uplo) - @test (@inferred diag(G))::typeof(gdv) == gdv - @test (@inferred diag(G, uplo === :U ? 1 : -1))::typeof(gdv) == gev - @test (@inferred diag(G,2))::typeof(gdv) == GenericArray(zeros(elty, n-2)) - end - - @testset "Eigensystems" begin - if relty <: AbstractFloat - d1, v1 = eigen(T) - d2, v2 = eigen(map(elty<:Complex ? ComplexF64 : Float64,Tfull), sortby=nothing) - @test (uplo === :U ? d1 : reverse(d1)) ≈ d2 - if elty <: Real - test_approx_eq_modphase(v1, uplo === :U ? v2 : v2[:,n:-1:1]) - end - end - end - - @testset "Singular systems" begin - if (elty <: BlasReal) - @test AbstractArray(svd(T)) ≈ AbstractArray(svd!(copy(Tfull))) - @test svdvals(Tfull) ≈ svdvals(T) - u1, d1, v1 = svd(Tfull) - u2, d2, v2 = svd(T) - @test d1 ≈ d2 - if elty <: Real - test_approx_eq_modphase(u1, u2) - test_approx_eq_modphase(copy(v1), copy(v2)) - end - @test 0 ≈ norm(u2*Diagonal(d2)*v2'-Tfull) atol=n*max(n^2*eps(relty),norm(u1*Diagonal(d1)*v1'-Tfull)) - @inferred svdvals(T) - @inferred svd(T) - end - end - - @testset "Binary operations" begin - @test -T == Bidiagonal(-T.dv,-T.ev,T.uplo) - @test convert(elty,-1.0) * T == Bidiagonal(-T.dv,-T.ev,T.uplo) - @test T / convert(elty,-1.0) == Bidiagonal(-T.dv,-T.ev,T.uplo) - @test T * convert(elty,-1.0) == Bidiagonal(-T.dv,-T.ev,T.uplo) - @testset for uplo2 in (:U, :L) - dv = convert(Vector{elty}, relty <: AbstractFloat ? randn(n) : rand(1:10, n)) - ev = convert(Vector{elty}, relty <: AbstractFloat ? randn(n-1) : rand(1:10, n-1)) - T2 = Bidiagonal(dv, ev, uplo2) - Tfull2 = Array(T2) - for op in (+, -, *) - @test Array(op(T, T2)) ≈ op(Tfull, Tfull2) - end - A = kron(T.dv, T.dv') - @test T * A ≈ lmul!(T, copy(A)) - @test A * T ≈ rmul!(copy(A), T) - end - # test pass-through of mul! for SymTridiagonal*Bidiagonal - TriSym = SymTridiagonal(T.dv, T.ev) - @test Array(TriSym*T) ≈ Array(TriSym)*Array(T) - # test pass-through of mul! for AbstractTriangular*Bidiagonal - Tri = UpperTriangular(diagm(1 => T.ev)) - Dia = Diagonal(T.dv) - @test Array(Tri*T) ≈ Array(Tri)*Array(T) ≈ rmul!(copy(Tri), T) - @test Array(T*Tri) ≈ Array(T)*Array(Tri) ≈ lmul!(T, copy(Tri)) - # test mul! itself for these types - for AA in (Tri, Dia) - for f in (identity, transpose, adjoint) - C = rand(elty, n, n) - D = copy(C) + 2.0 * Array(f(AA) * T) - mul!(C, f(AA), T, 2.0, 1.0) ≈ D - end - end - # test mul! for BiTrySym * adjoint/transpose AbstractMat - for f in (identity, transpose, adjoint) - C = relty == Int ? rand(float(elty), n, n) : rand(elty, n, n) - B = rand(elty, n, n) - D = C + 2.0 * Array(T*f(B)) - @test mul!(C, T, f(B), 2.0, 1.0) ≈ D - @test lmul!(T, copy(f(B))) ≈ T * f(B) - @test rmul!(copy(f(B)), T) ≈ f(B) * T - end - - # Issue #31870 - # Bi/Tri/Sym times Diagonal - Diag = Diagonal(rand(elty, 10)) - BidiagU = Bidiagonal(rand(elty, 10), rand(elty, 9), 'U') - BidiagL = Bidiagonal(rand(elty, 10), rand(elty, 9), 'L') - Tridiag = Tridiagonal(rand(elty, 9), rand(elty, 10), rand(elty, 9)) - SymTri = SymTridiagonal(rand(elty, 10), rand(elty, 9)) - - mats = Any[Diag, BidiagU, BidiagL, Tridiag, SymTri] - for a in mats - for b in mats - @test a*b ≈ Matrix(a)*Matrix(b) - end - end - - @test typeof(BidiagU*Diag) <: Bidiagonal - @test typeof(BidiagL*Diag) <: Bidiagonal - @test typeof(Tridiag*Diag) <: Tridiagonal - @test typeof(SymTri*Diag) <: Tridiagonal - - @test typeof(BidiagU*Diag) <: Bidiagonal - @test typeof(Diag*BidiagL) <: Bidiagonal - @test typeof(Diag*Tridiag) <: Tridiagonal - @test typeof(Diag*SymTri) <: Tridiagonal - end - - @test inv(T)*Tfull ≈ Matrix(I, n, n) - @test factorize(T) === T - end - BD = Bidiagonal(dv, ev, :U) - @test Matrix{ComplexF64}(BD) == BD -end - -# Issue 10742 and similar -let A = Bidiagonal([1,2,3], [0,0], :U) - @test istril(A) - @test isdiag(A) -end - -# test construct from range -@test Bidiagonal(1:3, 1:2, :U) == [1 1 0; 0 2 2; 0 0 3] - -@testset "promote_rule" begin - A = Bidiagonal(fill(1f0,10),fill(1f0,9),:U) - B = rand(Float64,10,10) - C = Tridiagonal(rand(Float64,9),rand(Float64,10),rand(Float64,9)) - @test promote_rule(Matrix{Float64}, Bidiagonal{Float64}) == Matrix{Float64} - @test promote(B,A) == (B, convert(Matrix{Float64}, A)) - @test promote(B,A) isa Tuple{Matrix{Float64}, Matrix{Float64}} - @test promote(C,A) == (C,Tridiagonal(zeros(Float64,9),convert(Vector{Float64},A.dv),convert(Vector{Float64},A.ev))) - @test promote(C,A) isa Tuple{Tridiagonal, Tridiagonal} -end - -using LinearAlgebra: fillstored!, UnitLowerTriangular -@testset "fill! and fillstored!" begin - let # fillstored! - A = Tridiagonal(randn(2), randn(3), randn(2)) - @test fillstored!(A, 3) == Tridiagonal([3, 3], [3, 3, 3], [3, 3]) - B = Bidiagonal(randn(3), randn(2), :U) - @test fillstored!(B, 2) == Bidiagonal([2,2,2], [2,2], :U) - S = SymTridiagonal(randn(3), randn(2)) - @test fillstored!(S, 1) == SymTridiagonal([1,1,1], [1,1]) - Ult = UnitLowerTriangular(randn(3,3)) - @test fillstored!(Ult, 3) == UnitLowerTriangular([1 0 0; 3 1 0; 3 3 1]) - end - let # fill!(exotic, 0) - exotic_arrays = Any[Tridiagonal(randn(3), randn(4), randn(3)), - Bidiagonal(randn(3), randn(2), rand([:U,:L])), - SymTridiagonal(randn(3), randn(2)), - Diagonal(randn(5)), - # LowerTriangular(randn(3,3)), # AbstractTriangular fill! deprecated, see below - # UpperTriangular(randn(3,3)) # AbstractTriangular fill! deprecated, see below - ] - for A in exotic_arrays - @test iszero(fill!(A, 0)) - end - - # Diagonal fill! is no longer deprecated. See #29780 - # AbstractTriangular fill! was defined as fillstored!, - # not matching the general behavior of fill!, and so it has been deprecated. - # In a future dev cycle, this fill! methods should probably be reintroduced - # with behavior matching that of fill! for other structured matrix types. - # In the interim, equivalently test fillstored! below - @test iszero(fillstored!(Diagonal(fill(1, 3)), 0)) - @test iszero(fillstored!(LowerTriangular(fill(1, 3, 3)), 0)) - @test iszero(fillstored!(UpperTriangular(fill(1, 3, 3)), 0)) - end - let # fill!(small, x) - val = randn() - b = Bidiagonal(randn(1,1), :U) - st = SymTridiagonal(randn(1,1)) - d = Diagonal(rand(1)) - for x in (b, st, d) - @test Array(fill!(x, val)) == fill!(Array(x), val) - end - b = Bidiagonal(randn(2,2), :U) - st = SymTridiagonal(randn(3), randn(2)) - t = Tridiagonal(randn(3,3)) - d = Diagonal(rand(3)) - for x in (b, t, st, d) - @test_throws ArgumentError fill!(x, val) - @test Array(fill!(x, 0)) == fill!(Array(x), 0) - end - end -end - -@testset "pathological promotion (#24707)" begin - @test promote_type(Matrix{Int}, Bidiagonal{Tuple{S}} where S<:Integer) <: Matrix - @test promote_type(Matrix{Tuple{T}} where T<:Integer, Bidiagonal{Tuple{S}} where S<:Integer) <: Matrix - @test promote_type(Matrix{Tuple{T}} where T<:Integer, Bidiagonal{Int}) <: Matrix - @test promote_type(Tridiagonal{Int}, Bidiagonal{Tuple{S}} where S<:Integer) <: Tridiagonal - @test promote_type(Tridiagonal{Tuple{T}} where T<:Integer, Bidiagonal{Tuple{S}} where S<:Integer) <: Tridiagonal - @test promote_type(Tridiagonal{Tuple{T}} where T<:Integer, Bidiagonal{Int}) <: Tridiagonal -end - -@testset "solve with matrix elements" begin - A = triu(tril(randn(9, 9), 3), -3) - b = randn(9) - Alb = Bidiagonal(Any[tril(A[1:3,1:3]), tril(A[4:6,4:6]), tril(A[7:9,7:9])], - Any[triu(A[4:6,1:3]), triu(A[7:9,4:6])], 'L') - Aub = Bidiagonal(Any[triu(A[1:3,1:3]), triu(A[4:6,4:6]), triu(A[7:9,7:9])], - Any[tril(A[1:3,4:6]), tril(A[4:6,7:9])], 'U') - bb = Any[b[1:3], b[4:6], b[7:9]] - @test vcat((Alb\bb)...) ≈ LowerTriangular(A)\b - @test vcat((Aub\bb)...) ≈ UpperTriangular(A)\b - Alb = Bidiagonal([tril(A[1:3,1:3]), tril(A[4:6,4:6]), tril(A[7:9,7:9])], - [triu(A[4:6,1:3]), triu(A[7:9,4:6])], 'L') - Aub = Bidiagonal([triu(A[1:3,1:3]), triu(A[4:6,4:6]), triu(A[7:9,7:9])], - [tril(A[1:3,4:6]), tril(A[4:6,7:9])], 'U') - d = [randn(3,3) for _ in 1:3] - dl = [randn(3,3) for _ in 1:2] - B = [randn(3,3) for _ in 1:3, _ in 1:3] - for W in (UpperTriangular, LowerTriangular), t in (identity, adjoint, transpose) - @test Matrix(t(Alb) \ W(B)) ≈ t(Alb) \ Matrix(W(B)) - @test Matrix(t(Aub) \ W(B)) ≈ t(Aub) \ Matrix(W(B)) - @test Matrix(W(B) / t(Alb)) ≈ Matrix(W(B)) / t(Alb) - @test Matrix(W(B) / t(Aub)) ≈ Matrix(W(B)) / t(Aub) - end -end - -@testset "sum, mapreduce" begin - Bu = Bidiagonal([1,2,3], [1,2], :U) - Budense = Matrix(Bu) - Bl = Bidiagonal([1,2,3], [1,2], :L) - Bldense = Matrix(Bl) - @test sum(Bu) == 9 - @test sum(Bl) == 9 - @test_throws ArgumentError sum(Bu, dims=0) - @test sum(Bu, dims=1) == sum(Budense, dims=1) - @test sum(Bu, dims=2) == sum(Budense, dims=2) - @test sum(Bu, dims=3) == sum(Budense, dims=3) - @test typeof(sum(Bu, dims=1)) == typeof(sum(Budense, dims=1)) - @test mapreduce(one, min, Bu, dims=1) == mapreduce(one, min, Budense, dims=1) - @test mapreduce(one, min, Bu, dims=2) == mapreduce(one, min, Budense, dims=2) - @test mapreduce(one, min, Bu, dims=3) == mapreduce(one, min, Budense, dims=3) - @test typeof(mapreduce(one, min, Bu, dims=1)) == typeof(mapreduce(one, min, Budense, dims=1)) - @test mapreduce(zero, max, Bu, dims=1) == mapreduce(zero, max, Budense, dims=1) - @test mapreduce(zero, max, Bu, dims=2) == mapreduce(zero, max, Budense, dims=2) - @test mapreduce(zero, max, Bu, dims=3) == mapreduce(zero, max, Budense, dims=3) - @test typeof(mapreduce(zero, max, Bu, dims=1)) == typeof(mapreduce(zero, max, Budense, dims=1)) - @test_throws ArgumentError sum(Bl, dims=0) - @test sum(Bl, dims=1) == sum(Bldense, dims=1) - @test sum(Bl, dims=2) == sum(Bldense, dims=2) - @test sum(Bl, dims=3) == sum(Bldense, dims=3) - @test typeof(sum(Bl, dims=1)) == typeof(sum(Bldense, dims=1)) - @test mapreduce(one, min, Bl, dims=1) == mapreduce(one, min, Bldense, dims=1) - @test mapreduce(one, min, Bl, dims=2) == mapreduce(one, min, Bldense, dims=2) - @test mapreduce(one, min, Bl, dims=3) == mapreduce(one, min, Bldense, dims=3) - @test typeof(mapreduce(one, min, Bl, dims=1)) == typeof(mapreduce(one, min, Bldense, dims=1)) - @test mapreduce(zero, max, Bl, dims=1) == mapreduce(zero, max, Bldense, dims=1) - @test mapreduce(zero, max, Bl, dims=2) == mapreduce(zero, max, Bldense, dims=2) - @test mapreduce(zero, max, Bl, dims=3) == mapreduce(zero, max, Bldense, dims=3) - @test typeof(mapreduce(zero, max, Bl, dims=1)) == typeof(mapreduce(zero, max, Bldense, dims=1)) - - Bu = Bidiagonal([2], Int[], :U) - Budense = Matrix(Bu) - Bl = Bidiagonal([2], Int[], :L) - Bldense = Matrix(Bl) - @test sum(Bu) == 2 - @test sum(Bl) == 2 - @test_throws ArgumentError sum(Bu, dims=0) - @test sum(Bu, dims=1) == sum(Budense, dims=1) - @test sum(Bu, dims=2) == sum(Budense, dims=2) - @test sum(Bu, dims=3) == sum(Budense, dims=3) - @test typeof(sum(Bu, dims=1)) == typeof(sum(Budense, dims=1)) -end - -@testset "empty sub-diagonal" begin - # `mul!` must use non-specialized method when sub-diagonal is empty - A = [1 2 3 4]' - @test A * Tridiagonal(ones(1, 1)) == A -end - -@testset "generalized dot" begin - for elty in (Float64, ComplexF64), n in (5, 1) - dv = randn(elty, n) - ev = randn(elty, n-1) - x = randn(elty, n) - y = randn(elty, n) - for uplo in (:U, :L) - B = Bidiagonal(dv, ev, uplo) - @test dot(x, B, y) ≈ dot(B'x, y) ≈ dot(x, B*y) ≈ dot(x, Matrix(B), y) - end - dv = Vector{elty}(undef, 0) - ev = Vector{elty}(undef, 0) - x = Vector{elty}(undef, 0) - y = Vector{elty}(undef, 0) - for uplo in (:U, :L) - B = Bidiagonal(dv, ev, uplo) - @test dot(x, B, y) === zero(elty) - end - end -end - -@testset "multiplication of bidiagonal and triangular matrix" begin - n = 5 - for eltyB in (Int, ComplexF64) - if eltyB == Int - BU = Bidiagonal(rand(1:7, n), rand(1:7, n - 1), :U) - BL = Bidiagonal(rand(1:7, n), rand(1:7, n - 1), :L) - else - BU = Bidiagonal(randn(eltyB, n), randn(eltyB, n - 1), :U) - BL = Bidiagonal(randn(eltyB, n), randn(eltyB, n - 1), :L) - end - for eltyT in (Int, ComplexF64) - for TriT in (LowerTriangular, UnitLowerTriangular, UpperTriangular, UnitUpperTriangular) - if eltyT == Int - T = TriT(rand(1:7, n, n)) - else - T = TriT(randn(eltyT, n, n)) - end - for B in (BU, BL) - MB = Matrix(B) - MT = Matrix(T) - for transB in (identity, adjoint, transpose), transT in (identity, adjoint, transpose) - @test transB(B) * transT(T) ≈ transB(MB) * transT(MT) - @test transT(T) * transB(B) ≈ transT(MT) * transB(MB) - end - end - end - end - end -end - -struct MyNotANumberType - n::Float64 -end -Base.zero(n::MyNotANumberType) = MyNotANumberType(zero(Float64)) -Base.zero(T::Type{MyNotANumberType}) = MyNotANumberType(zero(Float64)) -Base.copy(n::MyNotANumberType) = MyNotANumberType(copy(n.n)) -Base.transpose(n::MyNotANumberType) = n - -@testset "transpose for a non-numeric eltype" begin - @test !(MyNotANumberType(1.0) isa Number) - a = [MyNotANumberType(1.0), MyNotANumberType(2.0), MyNotANumberType(3.0)] - b = [MyNotANumberType(5.0), MyNotANumberType(6.0)] - B = Bidiagonal(a, b, :U) - tB = transpose(B) - @test tB == Bidiagonal(a, b, :L) - @test transpose(copy(tB)) == B -end - -@testset "empty bidiagonal matrices" begin - dv0 = zeros(0) - ev0 = zeros(0) - zm = zeros(0, 0) - ubd = Bidiagonal(dv0, ev0, :U) - lbd = Bidiagonal(dv0, ev0, :L) - @test size(ubd) == (0, 0) - @test_throws BoundsError getindex(ubd, 1, 1) - @test_throws BoundsError setindex!(ubd, 0.0, 1, 1) - @test similar(ubd) == ubd - @test similar(lbd, Int) == zeros(Int, 0, 0) - @test ubd == zm - @test lbd == zm - @test ubd == lbd - @test ubd * ubd == ubd - @test lbd + lbd == lbd - @test lbd' == ubd - @test ubd' == lbd - @test triu(ubd, 1) == ubd - @test triu(lbd, 1) == ubd - @test tril(ubd, -1) == ubd - @test tril(lbd, -1) == ubd - @test_throws ArgumentError triu(ubd) - @test_throws ArgumentError tril(ubd) - @test sum(ubd) == 0.0 - @test reduce(+, ubd) == 0.0 - @test reduce(+, ubd, dims=1) == zeros(1, 0) - @test reduce(+, ubd, dims=2) == zeros(0, 1) - @test hcat(ubd, ubd) == zm - @test vcat(ubd, lbd) == zm - @test hcat(lbd, ones(0, 3)) == ones(0, 3) - @test fill!(copy(ubd), 1.0) == ubd - @test map(abs, ubd) == zm - @test lbd .+ 1 == zm - @test lbd + ubd isa Bidiagonal - @test lbd .+ ubd isa Bidiagonal - @test ubd * 5 == ubd - @test ubd .* 3 == ubd -end - -@testset "non-commutative algebra (#39701)" begin - A = Bidiagonal(Quaternion.(randn(5), randn(5), randn(5), randn(5)), Quaternion.(randn(4), randn(4), randn(4), randn(4)), :U) - c = Quaternion(1,2,3,4) - @test A * c ≈ Matrix(A) * c - @test A / c ≈ Matrix(A) / c - @test c * A ≈ c * Matrix(A) - @test c \ A ≈ c \ Matrix(A) -end - -isdefined(Main, :ImmutableArrays) || @eval Main include(joinpath($(BASE_TEST_PATH), "testhelpers", "ImmutableArrays.jl")) -using .Main.ImmutableArrays - -@testset "Conversion to AbstractArray" begin - # tests corresponding to #34995 - dv = ImmutableArray([1, 2, 3, 4]) - ev = ImmutableArray([7, 8, 9]) - Bu = Bidiagonal(dv, ev, :U) - Bl = Bidiagonal(dv, ev, :L) - - @test convert(AbstractArray{Float64}, Bu)::Bidiagonal{Float64,ImmutableArray{Float64,1,Array{Float64,1}}} == Bu - @test convert(AbstractMatrix{Float64}, Bu)::Bidiagonal{Float64,ImmutableArray{Float64,1,Array{Float64,1}}} == Bu - @test convert(AbstractArray{Float64}, Bl)::Bidiagonal{Float64,ImmutableArray{Float64,1,Array{Float64,1}}} == Bl - @test convert(AbstractMatrix{Float64}, Bl)::Bidiagonal{Float64,ImmutableArray{Float64,1,Array{Float64,1}}} == Bl -end - -@testset "block-bidiagonal matrix indexing" begin - dv = [ones(4,3), ones(2,2).*2, ones(2,3).*3, ones(4,4).*4] - evu = [ones(4,2), ones(2,3).*2, ones(2,4).*3] - evl = [ones(2,3), ones(2,2).*2, ones(4,3).*3] - BU = Bidiagonal(dv, evu, :U) - BL = Bidiagonal(dv, evl, :L) - # check that all the matrices along a column have the same number of columns, - # and the matrices along a row have the same number of rows - for j in axes(BU, 2), i in 2:size(BU, 1) - @test size(BU[i,j], 2) == size(BU[1,j], 2) - @test size(BU[i,j], 1) == size(BU[i,1], 1) - if j < i || j > i + 1 - @test iszero(BU[i,j]) - end - end - for j in axes(BL, 2), i in 2:size(BL, 1) - @test size(BL[i,j], 2) == size(BL[1,j], 2) - @test size(BL[i,j], 1) == size(BL[i,1], 1) - if j < i-1 || j > i - @test iszero(BL[i,j]) - end - end - - @test diag(BU, -1) == [zeros(size(dv[i+1], 1), size(dv[i],2)) for i in 1:length(dv)-1] - @test diag(BL, 1) == [zeros(size(dv[i], 1), size(dv[i+1],2)) for i in 1:length(dv)-1] - - M = ones(2,2) - for n in 0:1 - dv = fill(M, n) - ev = fill(M, 0) - B = Bidiagonal(dv, ev, :U) - @test B == Matrix{eltype(B)}(B) - end - - @testset "non-standard axes" begin - LinearAlgebra.diagzero(T::Type, ax::Tuple{SizedArrays.SOneTo, Vararg{SizedArrays.SOneTo}}) = - zeros(T, ax) - - s = SizedArrays.SizedArray{(2,2)}([1 2; 3 4]) - B = Bidiagonal(fill(s,4), fill(s,3), :U) - @test @inferred(B[2,1]) isa typeof(s) - @test all(iszero, B[2,1]) - end -end - -@testset "copyto!" begin - ev, dv = [1:4;], [1:5;] - B = Bidiagonal(dv, ev, :U) - B2 = copyto!(zero(B), B) - @test B2 == B - for (ul1, ul2) in ((:U, :L), (:L, :U)) - B3 = Bidiagonal(dv, zero(ev), ul1) - B2 = Bidiagonal(zero(dv), zero(ev), ul2) - @test copyto!(B2, B3) == B3 - end - - @testset "mismatched sizes" begin - dv2 = [4; @view dv[2:end]] - @test copyto!(B, Bidiagonal([4], Int[], :U)) == Bidiagonal(dv2, ev, :U) - @test copyto!(B, Bidiagonal([4], Int[], :L)) == Bidiagonal(dv2, ev, :U) - @test copyto!(B, Bidiagonal(Int[], Int[], :U)) == Bidiagonal(dv, ev, :U) - @test copyto!(B, Bidiagonal(Int[], Int[], :L)) == Bidiagonal(dv, ev, :U) - end -end - -@testset "copyto! with UniformScaling" begin - @testset "Fill" begin - for len in (4, InfiniteArrays.Infinity()) - d = FillArrays.Fill(1, len) - ud = FillArrays.Fill(0, len-1) - B = Bidiagonal(d, ud, :U) - @test copyto!(B, I) === B - end - end - B = Bidiagonal(fill(2, 4), fill(3, 3), :U) - copyto!(B, I) - @test all(isone, diag(B)) - @test all(iszero, diag(B, 1)) -end - -@testset "diagind" begin - B = Bidiagonal(1:4, 1:3, :U) - M = Matrix(B) - @testset for k in -4:4 - @test B[diagind(B,k)] == M[diagind(M,k)] - end -end - -@testset "custom axes" begin - dv, uv = OffsetArray(1:4), OffsetArray(1:3) - B = Bidiagonal(dv, uv, :U) - ax = axes(dv, 1) - @test axes(B) === (ax, ax) -end - -@testset "avoid matmul ambiguities with ::MyMatrix * ::AbstractMatrix" begin - A = [i+j for i in 1:2, j in 1:2] - S = SizedArrays.SizedArray{(2,2)}(A) - B = Bidiagonal([1:2;], [1;], :U) - @test S * B == A * B - @test B * S == B * A - C1, C2 = zeros(2,2), zeros(2,2) - @test mul!(C1, S, B) == mul!(C2, A, B) - @test mul!(C1, S, B, 1, 2) == mul!(C2, A, B, 1 ,2) - @test mul!(C1, B, S) == mul!(C2, B, A) - @test mul!(C1, B, S, 1, 2) == mul!(C2, B, A, 1 ,2) - - v = [i for i in 1:2] - sv = SizedArrays.SizedArray{(2,)}(v) - @test B * sv == B * v - C1, C2 = zeros(2), zeros(2) - @test mul!(C1, B, sv) == mul!(C2, B, v) - @test mul!(C1, B, sv, 1, 2) == mul!(C2, B, v, 1 ,2) -end - -@testset "Reverse operation on Bidiagonal" begin - n = 5 - d = randn(n) - e = randn(n - 1) - for uplo in (:U, :L) - B = Bidiagonal(d, e, uplo) - @test reverse(B, dims=1) == reverse(Matrix(B), dims=1) - @test reverse(B, dims=2) == reverse(Matrix(B), dims=2) - @test reverse(B)::Bidiagonal == reverse(Matrix(B)) - end -end - -@testset "Matrix conversion for non-numeric" begin - B = Bidiagonal(fill(Diagonal([1,3]), 3), fill(Diagonal([1,3]), 2), :U) - M = Matrix{eltype(B)}(B) - @test M isa Matrix{eltype(B)} - @test M == B -end - -@testset "getindex with Integers" begin - dv, ev = 1:4, 1:3 - B = Bidiagonal(dv, ev, :U) - @test_throws "invalid index" B[3, true] - @test B[1,2] == B[Int8(1),UInt16(2)] == B[big(1), Int16(2)] -end - -@testset "rmul!/lmul! with banded matrices" begin - dv, ev = rand(4), rand(3) - for A in (Bidiagonal(dv, ev, :U), Bidiagonal(dv, ev, :L)) - @testset "$(nameof(typeof(B)))" for B in ( - Bidiagonal(dv, ev, :U), - Bidiagonal(dv, ev, :L), - Diagonal(dv) - ) - @test_throws ArgumentError rmul!(B, A) - @test_throws ArgumentError lmul!(A, B) - end - end - @testset "non-commutative" begin - S32 = SizedArrays.SizedArray{(3,2)}(rand(3,2)) - S33 = SizedArrays.SizedArray{(3,3)}(rand(3,3)) - S22 = SizedArrays.SizedArray{(2,2)}(rand(2,2)) - for uplo in (:L, :U) - B = Bidiagonal(fill(S32, 4), fill(S32, 3), uplo) - D = Diagonal(fill(S22, size(B,2))) - @test rmul!(copy(B), D) ≈ B * D - D = Diagonal(fill(S33, size(B,1))) - @test lmul!(D, copy(B)) ≈ D * B - end - - B = Bidiagonal(fill(S33, 4), fill(S33, 3), :U) - D = Diagonal(fill(S32, 4)) - @test lmul!(B, Array(D)) ≈ B * D - B = Bidiagonal(fill(S22, 4), fill(S22, 3), :U) - @test rmul!(Array(D), B) ≈ D * B - end -end - -@testset "rmul!/lmul! with numbers" begin - for T in (Bidiagonal(rand(4), rand(3), :U), Bidiagonal(rand(4), rand(3), :L)) - @test rmul!(copy(T), 0.2) ≈ rmul!(Array(T), 0.2) - @test lmul!(0.2, copy(T)) ≈ lmul!(0.2, Array(T)) - @test_throws ArgumentError rmul!(T, NaN) - @test_throws ArgumentError lmul!(NaN, T) - end - for T in (Bidiagonal(rand(1), rand(0), :U), Bidiagonal(rand(1), rand(0), :L)) - @test all(isnan, rmul!(copy(T), NaN)) - @test all(isnan, lmul!(NaN, copy(T))) - end -end - -@testset "mul with Diagonal" begin - for n in 0:4 - dv, ev = rand(n), rand(max(n-1,0)) - d = rand(n) - for uplo in (:U, :L) - A = Bidiagonal(dv, ev, uplo) - D = Diagonal(d) - M = Matrix(A) - S = similar(A, size(A)) - @test A * D ≈ mul!(S, A, D) ≈ M * D - @test D * A ≈ mul!(S, D, A) ≈ D * M - @test mul!(copy(S), D, A, 2, 2) ≈ D * M * 2 + S * 2 - @test mul!(copy(S), A, D, 2, 2) ≈ M * D * 2 + S * 2 - - A2 = Bidiagonal(dv, zero(ev), uplo) - M2 = Array(A2) - S2 = Bidiagonal(copy(dv), copy(ev), uplo == (:U) ? (:L) : (:U)) - MS2 = Array(S2) - @test mul!(copy(S2), D, A2) ≈ D * M2 - @test mul!(copy(S2), A2, D) ≈ M2 * D - @test mul!(copy(S2), A2, D, 2, 2) ≈ M2 * D * 2 + MS2 * 2 - @test mul!(copy(S2), D, A2, 2, 2) ≈ D * M2 * 2 + MS2 * 2 - end - end - - t1 = SizedArrays.SizedArray{(2,3)}([1 2 3; 3 4 5]) - t2 = SizedArrays.SizedArray{(3,2)}([1 2; 3 4; 5 6]) - dv, ev, d = fill(t1, 4), fill(2t1, 3), fill(t2, 4) - for uplo in (:U, :L) - A = Bidiagonal(dv, ev, uplo) - D = Diagonal(d) - @test A * D ≈ Array(A) * Array(D) - @test D * A ≈ Array(D) * Array(A) - end -end - -@testset "conversion to Tridiagonal for immutable bands" begin - n = 4 - dv = FillArrays.Fill(3, n) - ev = FillArrays.Fill(2, n-1) - z = FillArrays.Fill(0, n-1) - dvf = FillArrays.Fill(Float64(3), n) - evf = FillArrays.Fill(Float64(2), n-1) - zf = FillArrays.Fill(Float64(0), n-1) - B = Bidiagonal(dv, ev, :U) - @test Tridiagonal{Int}(B) === Tridiagonal(B) === Tridiagonal(z, dv, ev) - @test Tridiagonal{Float64}(B) === Tridiagonal(zf, dvf, evf) - B = Bidiagonal(dv, ev, :L) - @test Tridiagonal{Int}(B) === Tridiagonal(B) === Tridiagonal(ev, dv, z) - @test Tridiagonal{Float64}(B) === Tridiagonal(evf, dvf, zf) -end - -@testset "off-band indexing error" begin - B = Bidiagonal(Vector{BigInt}(undef, 4), Vector{BigInt}(undef,3), :L) - @test_throws "cannot set entry" B[1,2] = 4 -end - -@testset "mul with empty arrays" begin - A = zeros(5,0) - B = Bidiagonal(zeros(0), zeros(0), :U) - BL = Bidiagonal(zeros(5), zeros(4), :U) - @test size(A * B) == size(A) - @test size(BL * A) == size(A) - @test size(B * B) == size(B) - C = similar(A) - @test mul!(C, A, B) == A * B - @test mul!(C, BL, A) == BL * A - @test mul!(similar(B), B, B) == B * B - @test mul!(similar(B, size(B)), B, B) == B * B - - v = zeros(size(B,2)) - @test size(B * v) == size(v) - @test mul!(similar(v), B, v) == B * v - - D = Diagonal(zeros(size(B,2))) - @test size(B * D) == size(D * B) == size(D) - @test mul!(similar(D), B, D) == mul!(similar(D), D, B) == B * D -end - -@testset "mul for small matrices" begin - @testset for n in 0:6 - D = Diagonal(rand(n)) - v = rand(n) - @testset for uplo in (:L, :U) - B = Bidiagonal(rand(n), rand(max(n-1,0)), uplo) - M = Matrix(B) - - @test B * v ≈ M * v - @test mul!(similar(v), B, v) ≈ M * v - @test mul!(ones(size(v)), B, v, 2, 3) ≈ M * v * 2 .+ 3 - - @test B * B ≈ M * M - @test mul!(similar(B, size(B)), B, B) ≈ M * M - @test mul!(ones(size(B)), B, B, 2, 4) ≈ M * M * 2 .+ 4 - - for m in 0:6 - AL = rand(m,n) - AR = rand(n,m) - @test AL * B ≈ AL * M - @test B * AR ≈ M * AR - @test mul!(similar(AL), AL, B) ≈ AL * M - @test mul!(similar(AR), B, AR) ≈ M * AR - @test mul!(ones(size(AL)), AL, B, 2, 4) ≈ AL * M * 2 .+ 4 - @test mul!(ones(size(AR)), B, AR, 2, 4) ≈ M * AR * 2 .+ 4 - end - - @test B * D ≈ M * D - @test D * B ≈ D * M - @test mul!(similar(B), B, D) ≈ M * D - @test mul!(similar(B), B, D) ≈ M * D - @test mul!(similar(B, size(B)), D, B) ≈ D * M - @test mul!(similar(B, size(B)), B, D) ≈ M * D - @test mul!(ones(size(B)), D, B, 2, 4) ≈ D * M * 2 .+ 4 - @test mul!(ones(size(B)), B, D, 2, 4) ≈ M * D * 2 .+ 4 - end - BL = Bidiagonal(rand(n), rand(max(0, n-1)), :L) - ML = Matrix(BL) - BU = Bidiagonal(rand(n), rand(max(0, n-1)), :U) - MU = Matrix(BU) - T = Tridiagonal(zeros(max(0, n-1)), zeros(n), zeros(max(0, n-1))) - @test mul!(T, BL, BU) ≈ ML * MU - @test mul!(T, BU, BL) ≈ MU * ML - T = Tridiagonal(ones(max(0, n-1)), ones(n), ones(max(0, n-1))) - @test mul!(copy(T), BL, BU, 2, 3) ≈ ML * MU * 2 + T * 3 - @test mul!(copy(T), BU, BL, 2, 3) ≈ MU * ML * 2 + T * 3 - end - - n = 4 - arr = SizedArrays.SizedArray{(2,2)}(reshape([1:4;],2,2)) - for B in ( - Bidiagonal(fill(arr,n), fill(arr,n-1), :L), - Bidiagonal(fill(arr,n), fill(arr,n-1), :U), - ) - @test B * B ≈ Matrix(B) * Matrix(B) - BL = Bidiagonal(fill(arr,n), fill(arr,n-1), :L) - BU = Bidiagonal(fill(arr,n), fill(arr,n-1), :U) - @test BL * B ≈ Matrix(BL) * Matrix(B) - @test BU * B ≈ Matrix(BU) * Matrix(B) - @test B * BL ≈ Matrix(B) * Matrix(BL) - @test B * BU ≈ Matrix(B) * Matrix(BU) - D = Diagonal(fill(arr,n)) - @test D * B ≈ Matrix(D) * Matrix(B) - @test B * D ≈ Matrix(B) * Matrix(D) - end -end - -end # module TestBidiagonal diff --git a/stdlib/LinearAlgebra/test/blas.jl b/stdlib/LinearAlgebra/test/blas.jl deleted file mode 100644 index 80494da7babbe..0000000000000 --- a/stdlib/LinearAlgebra/test/blas.jl +++ /dev/null @@ -1,783 +0,0 @@ -# This file is a part of Julia. License is MIT: https://julialang.org/license - -module TestBLAS - -using Test, LinearAlgebra, Random -using LinearAlgebra: BlasReal, BlasComplex -using Libdl: dlsym, dlopen -fabs(x::Real) = abs(x) -fabs(x::Complex) = abs(real(x)) + abs(imag(x)) - -# help function to build packed storage -function pack(A, uplo) - AP = eltype(A)[] - n = size(A, 1) - for j in 1:n, i in (uplo === :L ? (j:n) : (1:j)) - push!(AP, A[i,j]) - end - return AP -end - -@testset "vec_pointer_stride" begin - a = float(rand(1:20,4,4,4)) - @test BLAS.asum(a) == sum(a) # dense case - @test BLAS.asum(view(a,1:2:4,:,:)) == sum(view(a,1:2:4,:,:)) # vector like - @test BLAS.asum(view(a,1:3,2:2,3:3)) == sum(view(a,1:3,2:2,3:3)) - @test BLAS.asum(view(a,1:1,1:3,1:1)) == sum(view(a,1:1,1:3,1:1)) - @test BLAS.asum(view(a,1:1,1:1,1:3)) == sum(view(a,1:1,1:1,1:3)) - @test_throws ArgumentError BLAS.asum(view(a,1:3:4,:,:)) # non-vector like - @test_throws ArgumentError BLAS.asum(view(a,1:2,1:1,1:3)) -end -Random.seed!(100) -## BLAS tests - testing the interface code to BLAS routines -@testset for elty in [Float32, Float64, ComplexF32, ComplexF64] - - @testset "syr2k!" begin - U = randn(elty, 5, 2) - V = randn(elty, 5, 2) - @test tril(LinearAlgebra.BLAS.syr2k('L','N',U,V)) ≈ tril(U*transpose(V) + V*transpose(U)) - @test triu(LinearAlgebra.BLAS.syr2k('U','N',U,V)) ≈ triu(U*transpose(V) + V*transpose(U)) - @test tril(LinearAlgebra.BLAS.syr2k('L','T',U,V)) ≈ tril(transpose(U)*V + transpose(V)*U) - @test triu(LinearAlgebra.BLAS.syr2k('U','T',U,V)) ≈ triu(transpose(U)*V + transpose(V)*U) - end - - if elty in (ComplexF32, ComplexF64) - @testset "her2k!" begin - U = randn(elty, 5, 2) - V = randn(elty, 5, 2) - @test tril(LinearAlgebra.BLAS.her2k('L','N',U,V)) ≈ tril(U*V' + V*U') - @test triu(LinearAlgebra.BLAS.her2k('U','N',U,V)) ≈ triu(U*V' + V*U') - @test tril(LinearAlgebra.BLAS.her2k('L','C',U,V)) ≈ tril(U'*V + V'*U) - @test triu(LinearAlgebra.BLAS.her2k('U','C',U,V)) ≈ triu(U'*V + V'*U) - end - end - - o4 = fill(elty(1), 4) - z4 = zeros(elty, 4) - - I4 = Matrix{elty}(I, 4, 4) - I43 = Matrix{elty}(I, 4, 3) - L4 = tril(fill(elty(1), 4,4)) - U4 = triu(fill(elty(1), 4,4)) - Z4 = zeros(elty, (4,4)) - - elm1 = elty(-1) - el2 = elty(2) - v14 = elty[1:4;] - v41 = elty[4:-1:1;] - - let n = 10 - @testset "dot products" begin - if elty <: Real - x1 = randn(elty, n) - x2 = randn(elty, n) - @test BLAS.dot(x1,x2) ≈ sum(x1.*x2) - @test_throws DimensionMismatch BLAS.dot(x1,rand(elty, n + 1)) - else - z1 = randn(elty, n) - z2 = randn(elty, n) - @test BLAS.dotc(z1,z2) ≈ sum(conj(z1).*z2) - @test BLAS.dotu(z1,z2) ≈ sum(z1.*z2) - @test_throws DimensionMismatch BLAS.dotc(z1,rand(elty, n + 1)) - @test_throws DimensionMismatch BLAS.dotu(z1,rand(elty, n + 1)) - end - end - @testset "iamax" begin - x = randn(elty, n) - @test BLAS.iamax(x) == findmax(fabs, x)[2] - end - @testset "rot!" begin - x = randn(elty, n) - y = randn(elty, n) - c = rand(real(elty)) - for sty in unique!([real(elty), elty]) - s = rand(sty) - x2 = copy(x) - y2 = copy(y) - BLAS.rot!(n, x, 1, y, 1, c, s) - @test x ≈ c*x2 + s*y2 - @test y ≈ -conj(s)*x2 + c*y2 - end - end - @testset "axp(b)y" begin - x1 = randn(elty, n) - x2 = randn(elty, n) - α = rand(elty) - β = rand(elty) - for X1 in (x1, view(x1,n:-1:1)), X2 in (x2, view(x2, n:-1:1)) - @test BLAS.axpy!(α,deepcopy(X1),deepcopy(X2)) ≈ α*X1 + X2 - @test BLAS.axpby!(α,deepcopy(X1),β,deepcopy(X2)) ≈ α*X1 + β*X2 - end - for ind1 in (1:n, n:-1:1), ind2 in (1:n, n:-1:1) - @test BLAS.axpy!(α,copy(x1),ind1,copy(x2),ind2) ≈ x2 + α*(ind1 == ind2 ? x1 : reverse(x1)) - end - @test_throws DimensionMismatch BLAS.axpy!(α, copy(x1), rand(elty, n + 1)) - @test_throws DimensionMismatch BLAS.axpby!(α, copy(x1), β, rand(elty, n + 1)) - @test_throws DimensionMismatch BLAS.axpy!(α, copy(x1), 1:div(n,2), copy(x2), 1:n) - @test_throws ArgumentError BLAS.axpy!(α, copy(x1), 0:div(n,2), copy(x2), 1:(div(n, 2) + 1)) - @test_throws ArgumentError BLAS.axpy!(α, copy(x1), 1:div(n,2), copy(x2), 0:(div(n, 2) - 1)) - end - @testset "nrm2, iamax, and asum for StridedVectors" begin - a = rand(elty,n) - for ind in (2:2:n, n:-2:2) - b = view(a, ind, 1) - @test BLAS.nrm2(b) ≈ sqrt(sum(abs2, b)) - @test BLAS.asum(b) ≈ sum(fabs, b) - @test BLAS.iamax(b) == findmax(fabs, b)[2] * (step(ind) >= 0) - end - end - @testset "nrm2 with non-finite elements" begin - # These tests would have caught - # when running on appropriate hardware. - a = zeros(elty,n) - a[begin] = elty(-Inf) - @test BLAS.nrm2(a) === abs2(elty(Inf)) - a[begin] = elty(NaN) - @test BLAS.nrm2(a) === abs2(elty(NaN)) - end - @testset "deterministic mul!" begin - # mul! should be deterministic, see #53054 - function tester_53054() - C = ComplexF32 - mat = zeros(C, 1, 1) - for _ in 1:100 - v = [C(1-0.2im) C(2+0.3im)] - mul!(mat, v, v', C(1+im), 1) - end - return mat - end - @test allequal(tester_53054() for _ in 1:10000) - end - @testset "scal" begin - α = rand(elty) - a = rand(elty,n) - @test BLAS.scal(n,α,a,1) ≈ α * a - for v in (a, view(a, n:-1:1)) - @test BLAS.scal!(α, deepcopy(v)) ≈ α * v - end - end - - @testset "ger, geru, her, syr" for x in (rand(elty, n), view(rand(elty,2n), 1:2:2n), view(rand(elty,n), n:-1:1)), - y in (rand(elty,n), view(rand(elty,3n), 1:3:3n), view(rand(elty,2n), 2n:-2:2)) - - A = rand(elty,n,n) - α = rand(elty) - - @test BLAS.ger!(α,x,y,copy(A)) ≈ A + α*x*y' - @test_throws DimensionMismatch BLAS.ger!(α,Vector{elty}(undef,n+1),y,copy(A)) - - @test BLAS.geru!(α,x,y,copy(A)) ≈ A + α*x*transpose(y) - @test_throws DimensionMismatch BLAS.geru!(α,Vector{elty}(undef,n+1),y,copy(A)) - - A = rand(elty,n,n) - A = A + transpose(A) - @test issymmetric(A) - @test triu(BLAS.syr!('U',α,x,copy(A))) ≈ triu(A + α*x*transpose(x)) - @test_throws DimensionMismatch BLAS.syr!('U',α,Vector{elty}(undef,n+1),copy(A)) - - if elty <: Complex - A = rand(elty,n,n) - A = A + A' - α = real(α) - @test triu(BLAS.her!('U',α,x,copy(A))) ≈ triu(A + α*x*x') - @test_throws DimensionMismatch BLAS.her!('U',α,Vector{elty}(undef,n+1),copy(A)) - end - end - @testset "copy" begin - x1 = randn(elty, n) - x2 = randn(elty, n) - for ind1 in (1:n, n:-1:1), ind2 in (1:n, n:-1:1) - @test x2 === BLAS.copyto!(x2, ind1, x1, ind2) == (ind1 == ind2 ? x1 : reverse(x1)) - end - @test_throws DimensionMismatch BLAS.copyto!(x2, 1:n, x1, 1:(n - 1)) - @test_throws ArgumentError BLAS.copyto!(x1, 0:div(n, 2), x2, 1:(div(n, 2) + 1)) - @test_throws ArgumentError BLAS.copyto!(x1, 1:(div(n, 2) + 1), x2, 0:div(n, 2)) - end - @testset "trmv and trsv" begin - A = rand(elty,n,n) - x = rand(elty,n) - xerr = Vector{elty}(undef,n+1) - for uplo in ('U', 'L'), diag in ('U','N'), trans in ('N', 'T', 'C') - Wrapper = if uplo == 'U' - diag == 'U' ? UnitUpperTriangular : UpperTriangular - else - diag == 'U' ? UnitLowerTriangular : LowerTriangular - end - fun = trans == 'N' ? identity : trans == 'T' ? transpose : adjoint - fullA = collect(fun(Wrapper(A))) - @testset "trmv" begin - @test BLAS.trmv(uplo,trans,diag,A,x) ≈ fullA * x - @test_throws DimensionMismatch BLAS.trmv(uplo,trans,diag,A,xerr) - for xx in (x, view(x, n:-1:1)) - @test BLAS.trmv!(uplo,trans,diag,A,deepcopy(xx)) ≈ fullA * xx - end - end - @testset "trsv" begin - @test BLAS.trsv(uplo,trans,diag,A,x) ≈ fullA \ x - @test_throws DimensionMismatch BLAS.trsv(uplo,trans,diag,A,xerr) - for xx in (x, view(x, n:-1:1)) - @test BLAS.trsv!(uplo,trans,diag,A,deepcopy(xx)) ≈ fullA \ xx - end - end - end - end - @testset "symmetric/Hermitian multiplication" begin - x = rand(elty,n) - A = rand(elty,n,n) - y = rand(elty, n) - α = randn(elty) - β = randn(elty) - Aherm = A + A' - Asymm = A + transpose(A) - offsizevec, offsizemat = Array{elty}.(undef,(n+1, (n,n+1))) - @testset "symv and hemv" for uplo in ('U', 'L') - @test BLAS.symv(uplo,Asymm,x) ≈ Asymm*x - for xx in (x, view(x, n:-1:1)), yy in (y, view(y, n:-1:1)) - @test BLAS.symv!(uplo,α,Asymm,xx,β,deepcopy(yy)) ≈ α * Asymm * xx + β * yy - end - @test_throws DimensionMismatch BLAS.symv!(uplo,α,Asymm,x,β,offsizevec) - @test_throws DimensionMismatch BLAS.symv(uplo,offsizemat,x) - if elty <: BlasComplex - @test BLAS.hemv(uplo,Aherm,x) ≈ Aherm*x - for xx in (x, view(x, n:-1:1)), yy in (y, view(y, n:-1:1)) - @test BLAS.hemv!(uplo,α,Aherm,xx,β,deepcopy(yy)) ≈ α * Aherm * xx + β * yy - end - @test_throws DimensionMismatch BLAS.hemv(uplo,offsizemat,x) - @test_throws DimensionMismatch BLAS.hemv!(uplo,one(elty),Aherm,x,one(elty),offsizevec) - end - end - - @testset "symm error throwing" begin - Cnn, Cnm, Cmn = Matrix{elty}.(undef,((n,n), (n,n-1), (n-1,n))) - @test_throws DimensionMismatch BLAS.symm('L','U',Cnm,Cnn) - @test_throws DimensionMismatch BLAS.symm('R','U',Cmn,Cnn) - @test_throws DimensionMismatch BLAS.symm!('L','U',one(elty),Asymm,Cnn,one(elty),Cmn) - @test_throws DimensionMismatch BLAS.symm!('L','U',one(elty),Asymm,Cnn,one(elty),Cnm) - @test_throws DimensionMismatch BLAS.symm!('L','U',one(elty),Asymm,Cmn,one(elty),Cnn) - @test_throws DimensionMismatch BLAS.symm!('R','U',one(elty),Asymm,Cnm,one(elty),Cmn) - @test_throws DimensionMismatch BLAS.symm!('R','U',one(elty),Asymm,Cnn,one(elty),Cnm) - @test_throws DimensionMismatch BLAS.symm!('R','U',one(elty),Asymm,Cmn,one(elty),Cnn) - if elty <: BlasComplex - @test_throws DimensionMismatch BLAS.hemm('L','U',Cnm,Cnn) - @test_throws DimensionMismatch BLAS.hemm('R','U',Cmn,Cnn) - @test_throws DimensionMismatch BLAS.hemm!('L','U',one(elty),Aherm,Cnn,one(elty),Cmn) - @test_throws DimensionMismatch BLAS.hemm!('L','U',one(elty),Aherm,Cnn,one(elty),Cnm) - @test_throws DimensionMismatch BLAS.hemm!('L','U',one(elty),Aherm,Cmn,one(elty),Cnn) - @test_throws DimensionMismatch BLAS.hemm!('R','U',one(elty),Aherm,Cnm,one(elty),Cmn) - @test_throws DimensionMismatch BLAS.hemm!('R','U',one(elty),Aherm,Cnn,one(elty),Cnm) - @test_throws DimensionMismatch BLAS.hemm!('R','U',one(elty),Aherm,Cmn,one(elty),Cnn) - end - end - end - @testset "trmm error throwing" begin - Cnn, Cmn, Cnm = Matrix{elty}.(undef,((n,n), (n+1,n), (n,n+1))) - @test_throws DimensionMismatch BLAS.trmm('L','U','N','N',one(elty),triu(Cnn),Cmn) - @test_throws DimensionMismatch BLAS.trmm('R','U','N','N',one(elty),triu(Cnn),Cnm) - end - - # hpmv! - if elty in (ComplexF32, ComplexF64) - @testset "hpmv!" begin - # Both matrix dimensions n coincide, as we have Hermitian matrices. - # Define the inputs and outputs of hpmv!, y = α*A*x+β*y - α = rand(elty) - A = rand(elty, n, n) - x = rand(elty, n) - β = rand(elty) - y = rand(elty, n) - for uplo in (:L, :U) - Cuplo = String(uplo)[1] - AH = Hermitian(A, uplo) - # Create lower/upper triangular packing of AL - AP = pack(AH, uplo) - for xx in (x, view(x,n:-1:1)), yy in (y, view(y,n:-1:1)) - @test BLAS.hpmv!(Cuplo, α, AP, xx, β, deepcopy(yy)) ≈ α*AH*xx + β*yy - end - AP′ = view(zeros(elty, n*(n+1)),1:2:n*(n+1)) - @test_throws ErrorException BLAS.hpmv!(Cuplo, α, AP′, x, β, y) - AP′ = view(AP, 1:length(AP′) - 1) - @test_throws DimensionMismatch BLAS.hpmv!(Cuplo, α, AP′, x, β, y) - @test_throws DimensionMismatch BLAS.hpmv!(Cuplo, α, AP′, x, β, view(y,1:n-1)) - end - end - end - - # spmv! - if elty in (Float32, Float64) - @testset "spmv!" begin - # Both matrix dimensions n coincide, as we have symmetric matrices. - # Define the inputs and outputs of spmv!, y = α*A*x+β*y - α = rand(elty) - A = rand(elty, n, n) - x = rand(elty, n) - β = rand(elty) - y = rand(elty, n) - for uplo in (:L, :U) - Cuplo = String(uplo)[1] - AS = Symmetric(A, uplo) - # Create lower/upper triangular packing of AL - AP = pack(AS, uplo) - for xx in (x, view(x,n:-1:1)), yy in (y, view(y,n:-1:1)) - @test BLAS.spmv!(Cuplo, α, AP, xx, β, deepcopy(yy)) ≈ α*AS*xx + β*yy - end - AP′ = view(zeros(elty, n*(n+1)),1:2:n*(n+1)) - @test_throws ErrorException BLAS.spmv!(Cuplo, α, AP′, x, β, y) - AP′ = view(AP, 1:length(AP′) - 1) - @test_throws DimensionMismatch BLAS.spmv!(Cuplo, α, AP′, x, β, y) - @test_throws DimensionMismatch BLAS.spmv!(Cuplo, α, AP′, x, β, view(y,1:n-1)) - end - end - end - - # spr! - if elty in (Float32, Float64) - @testset "spr! $elty" begin - α = rand(elty) - M = rand(elty, n, n) - AL = Symmetric(M, :L) - AU = Symmetric(M, :U) - for x in (rand(elty, n), view(rand(elty, n), n:-1:1)) - ALP_result_julia_lower = pack(α*x*x' + AL, :L) - ALP_result_blas_lower = pack(AL, :L) - BLAS.spr!('L', α, x, ALP_result_blas_lower) - @test ALP_result_julia_lower ≈ ALP_result_blas_lower - ALP_result_blas_lower = append!(pack(AL, :L), ones(elty, 10)) - BLAS.spr!('L', α, x, ALP_result_blas_lower) - @test ALP_result_julia_lower ≈ ALP_result_blas_lower[1:end-10] - ALP_result_blas_lower = reshape(pack(AL, :L), 1, length(ALP_result_julia_lower), 1) - BLAS.spr!('L', α, x, ALP_result_blas_lower) - @test ALP_result_julia_lower ≈ vec(ALP_result_blas_lower) - - AUP_result_julia_upper = pack(α*x*x' + AU, :U) - AUP_result_blas_upper = pack(AU, :U) - BLAS.spr!('U', α, x, AUP_result_blas_upper) - @test AUP_result_julia_upper ≈ AUP_result_blas_upper - AUP_result_blas_upper = append!(pack(AU, :U), ones(elty, 10)) - BLAS.spr!('U', α, x, AUP_result_blas_upper) - @test AUP_result_julia_upper ≈ AUP_result_blas_upper[1:end-10] - AUP_result_blas_upper = reshape(pack(AU, :U), 1, length(AUP_result_julia_upper), 1) - BLAS.spr!('U', α, x, AUP_result_blas_upper) - @test AUP_result_julia_upper ≈ vec(AUP_result_blas_upper) - end - end - end - - #trsm - A = triu(rand(elty,n,n)) - B = rand(elty,(n,n)) - @test BLAS.trsm('L','U','N','N',one(elty),A,B) ≈ A\B - - #will work for SymTridiagonal,Tridiagonal,Bidiagonal! - @testset "banded matrix mv" begin - @testset "gbmv" begin - TD = Tridiagonal(rand(elty,n-1),rand(elty,n),rand(elty,n-1)) - x = rand(elty, n) - #put TD into the BLAS format! - fTD = zeros(elty,3,n) - fTD[1,2:n] = TD.du - fTD[2,:] = TD.d - fTD[3,1:n-1] = TD.dl - @test BLAS.gbmv('N',n,1,1,fTD,x) ≈ TD*x - y = rand(elty, n) - α = randn(elty) - β = randn(elty) - for xx in (x, view(x, n:-1:1)), yy in (y, view(y, n:-1:1)) - @test BLAS.gbmv!('N',n,1,1,α,fTD,xx,β,deepcopy(yy)) ≈ α * TD * xx + β * yy - end - end - #will work for SymTridiagonal only! - @testset "sbmv and hbmv" begin - x = rand(elty,n) - if elty <: BlasReal - ST = SymTridiagonal(rand(elty,n),rand(elty,n-1)) - #put TD into the BLAS format! - fST = zeros(elty,2,n) - fST[1,2:n] = ST.ev - fST[2,:] = ST.dv - @test BLAS.sbmv('U',1,fST,x) ≈ ST*x - y = rand(elty, n) - α = randn(elty) - β = randn(elty) - for xx in (x, view(x, n:-1:1)), yy in (y, view(y, n:-1:1)) - @test BLAS.sbmv!('U',1,α,fST,xx,β,deepcopy(yy)) ≈ α * ST * xx + β * yy - end - else - dv = rand(real(elty),n) - ev = rand(elty,n-1) - bH = zeros(elty,2,n) - bH[1,2:n] = ev - bH[2,:] = dv - fullH = diagm(0 => dv, -1 => conj(ev), 1 => ev) - @test BLAS.hbmv('U',1,bH,x) ≈ fullH*x - y = rand(elty, n) - α = randn(elty) - β = randn(elty) - for xx in (x, view(x, n:-1:1)), yy in (y, view(y, n:-1:1)) - @test BLAS.hbmv!('U',1,α,bH,xx,β,deepcopy(yy)) ≈ α * fullH * xx + β * yy - end - end - end - end - end - - @testset "gemv" begin - @test all(BLAS.gemv('N', I4, o4) .== o4) - @test all(BLAS.gemv('T', I4, o4) .== o4) - @test all(BLAS.gemv('N', el2, I4, o4) .== el2 * o4) - @test all(BLAS.gemv('T', el2, I4, o4) .== el2 * o4) - @test_throws DimensionMismatch BLAS.gemv('N',I43,o4) - o4cp = copy(o4) - @test_throws DimensionMismatch BLAS.gemv!('T',one(elty),I43,o4,one(elty),o4cp) - @test_throws DimensionMismatch BLAS.gemv!('C',one(elty),I43,o4,one(elty),o4cp) - @test all(BLAS.gemv!('N', one(elty), I4, o4, elm1, o4cp) .== z4) - @test all(o4cp .== z4) - o4cp[:] = o4 - @test all(BLAS.gemv!('T', one(elty), I4, o4, elm1, o4cp) .== z4) - @test all(o4cp .== z4) - @test all(BLAS.gemv('N', U4, o4) .== v41) - @test all(BLAS.gemv('N', U4, o4) .== v41) - @testset "non-standard strides" begin - A = rand(elty, 3, 4) - x = rand(elty, 5) - for y = (view(ones(elty, 5), 1:2:5), view(ones(elty, 7), 6:-2:2)) - ycopy = copy(y) - @test BLAS.gemv!('N', elty(2), view(A, :, 2:2:4), view(x, 1:3:4), elty(3), y) ≈ 2*A[:,2:2:4]*x[1:3:4] + 3*ycopy - ycopy = copy(y) - @test BLAS.gemv!('N', elty(2), view(A, :, 4:-2:2), view(x, 1:3:4), elty(3), y) ≈ 2*A[:,4:-2:2]*x[1:3:4] + 3*ycopy - ycopy = copy(y) - @test BLAS.gemv!('N', elty(2), view(A, :, 2:2:4), view(x, 4:-3:1), elty(3), y) ≈ 2*A[:,2:2:4]*x[4:-3:1] + 3*ycopy - ycopy = copy(y) - @test BLAS.gemv!('N', elty(2), view(A, :, 4:-2:2), view(x, 4:-3:1), elty(3), y) ≈ 2*A[:,4:-2:2]*x[4:-3:1] + 3*ycopy - ycopy = copy(y) - @test BLAS.gemv!('N', elty(2), view(A, :, StepRangeLen(1,0,1)), view(x, 1:1), elty(3), y) ≈ 2*A[:,1:1]*x[1:1] + 3*ycopy # stride(A,2) == 0 - end - @test BLAS.gemv!('N', elty(1), zeros(elty, 0, 5), zeros(elty, 5), elty(1), zeros(elty, 0)) == elty[] # empty matrix, stride(A,2) == 0 - @test BLAS.gemv('N', elty(-1), view(A, 2:3, 1:2:3), view(x, 2:-1:1)) ≈ -1*A[2:3,1:2:3]*x[2:-1:1] - @test BLAS.gemv('N', view(A, 2:3, 3:-2:1), view(x, 1:2:3)) ≈ A[2:3,3:-2:1]*x[1:2:3] - for (trans, f) = (('T',transpose), ('C',adjoint)) - for y = (view(ones(elty, 3), 1:2:3), view(ones(elty, 5), 4:-2:2)) - ycopy = copy(y) - @test BLAS.gemv!(trans, elty(2), view(A, :, 2:2:4), view(x, 1:2:5), elty(3), y) ≈ 2*f(A[:,2:2:4])*x[1:2:5] + 3*ycopy - ycopy = copy(y) - @test BLAS.gemv!(trans, elty(2), view(A, :, 4:-2:2), view(x, 1:2:5), elty(3), y) ≈ 2*f(A[:,4:-2:2])*x[1:2:5] + 3*ycopy - ycopy = copy(y) - @test BLAS.gemv!(trans, elty(2), view(A, :, 2:2:4), view(x, 5:-2:1), elty(3), y) ≈ 2*f(A[:,2:2:4])*x[5:-2:1] + 3*ycopy - ycopy = copy(y) - @test BLAS.gemv!(trans, elty(2), view(A, :, 4:-2:2), view(x, 5:-2:1), elty(3), y) ≈ 2*f(A[:,4:-2:2])*x[5:-2:1] + 3*ycopy - end - @test BLAS.gemv!(trans, elty(2), view(A, :, StepRangeLen(1,0,1)), view(x, 1:2:5), elty(3), elty[1]) ≈ 2*f(A[:,1:1])*x[1:2:5] + elty[3] # stride(A,2) == 0 - end - for trans = ('N', 'T', 'C') - @test_throws ErrorException BLAS.gemv(trans, view(A, 1:2:3, 1:2), view(x, 1:2)) # stride(A,1) must be 1 - end - end - end - @testset "gemmt" begin - for (wrapper, uplo) in ((LowerTriangular, 'L'), (UpperTriangular, 'U')) - @test wrapper(BLAS.gemmt(uplo, 'N', 'N', I4, I4)) ≈ wrapper(I4) - @test wrapper(BLAS.gemmt(uplo, 'N', 'T', I4, I4)) ≈ wrapper(I4) - @test wrapper(BLAS.gemmt(uplo, 'T', 'N', I4, I4)) ≈ wrapper(I4) - @test wrapper(BLAS.gemmt(uplo, 'T', 'T', I4, I4)) ≈ wrapper(I4) - @test wrapper(BLAS.gemmt(uplo, 'N', 'N', el2, I4, I4)) ≈ wrapper(el2 * I4) - @test wrapper(BLAS.gemmt(uplo, 'N', 'T', el2, I4, I4)) ≈ wrapper(el2 * I4) - @test wrapper(BLAS.gemmt(uplo, 'T', 'N', el2, I4, I4)) ≈ wrapper(el2 * I4) - @test wrapper(BLAS.gemmt(uplo, 'T', 'T', el2, I4, I4)) ≈ wrapper(el2 * I4) - I4cp = copy(I4) - @test wrapper(BLAS.gemmt!(uplo, 'N', 'N', one(elty), I4, I4, elm1, I4cp)) ≈ wrapper(Z4) - @test I4cp ≈ Z4 - I4cp[:] = I4 - @test wrapper(BLAS.gemmt!(uplo, 'N', 'T', one(elty), I4, I4, elm1, I4cp)) ≈ wrapper(Z4) - @test I4cp ≈ Z4 - I4cp[:] = I4 - @test wrapper(BLAS.gemmt!(uplo, 'T', 'N', one(elty), I4, I4, elm1, I4cp)) ≈ wrapper(Z4) - @test I4cp ≈ Z4 - I4cp[:] = I4 - @test wrapper(BLAS.gemmt!(uplo, 'T', 'T', one(elty), I4, I4, elm1, I4cp)) ≈ wrapper(Z4) - @test I4cp ≈ Z4 - M1 = uplo == 'U' ? U4 : I4 - @test wrapper(BLAS.gemmt(uplo, 'N', 'N', I4, U4)) ≈ wrapper(M1) - M2 = uplo == 'U' ? I4 : U4' - @test wrapper(BLAS.gemmt(uplo, 'N', 'T', I4, U4)) ≈ wrapper(M2) - @test_throws DimensionMismatch BLAS.gemmt!(uplo, 'N', 'N', one(elty), I43, I4, elm1, I43) - @test_throws DimensionMismatch BLAS.gemmt!(uplo, 'N', 'N', one(elty), I4, I4, elm1, Matrix{elty}(I, 5, 5)) - @test_throws DimensionMismatch BLAS.gemmt!(uplo, 'N', 'N', one(elty), I43, I4, elm1, I4) - @test_throws DimensionMismatch BLAS.gemmt!(uplo, 'T', 'N', one(elty), I4, I43, elm1, I43) - @test_throws DimensionMismatch BLAS.gemmt!(uplo, 'N', 'T', one(elty), I43, I43, elm1, I43) - @test_throws DimensionMismatch BLAS.gemmt!(uplo, 'T', 'T', one(elty), I43, I43, elm1, Matrix{elty}(I, 3, 4)) - end - end - @testset "gemm" begin - @test all(BLAS.gemm('N', 'N', I4, I4) .== I4) - @test all(BLAS.gemm('N', 'T', I4, I4) .== I4) - @test all(BLAS.gemm('T', 'N', I4, I4) .== I4) - @test all(BLAS.gemm('T', 'T', I4, I4) .== I4) - @test all(BLAS.gemm('N', 'N', el2, I4, I4) .== el2 * I4) - @test all(BLAS.gemm('N', 'T', el2, I4, I4) .== el2 * I4) - @test all(BLAS.gemm('T', 'N', el2, I4, I4) .== el2 * I4) - @test all(BLAS.gemm('T', 'T', el2, I4, I4) .== el2 * I4) - I4cp = copy(I4) - @test all(BLAS.gemm!('N', 'N', one(elty), I4, I4, elm1, I4cp) .== Z4) - @test all(I4cp .== Z4) - I4cp[:] = I4 - @test all(BLAS.gemm!('N', 'T', one(elty), I4, I4, elm1, I4cp) .== Z4) - @test all(I4cp .== Z4) - I4cp[:] = I4 - @test all(BLAS.gemm!('T', 'N', one(elty), I4, I4, elm1, I4cp) .== Z4) - @test all(I4cp .== Z4) - I4cp[:] = I4 - @test all(BLAS.gemm!('T', 'T', one(elty), I4, I4, elm1, I4cp) .== Z4) - @test all(I4cp .== Z4) - @test all(BLAS.gemm('N', 'N', I4, U4) .== U4) - @test all(BLAS.gemm('N', 'T', I4, U4) .== L4) - @test_throws DimensionMismatch BLAS.gemm!('N','N', one(elty), I4, I4, elm1, Matrix{elty}(I, 5, 5)) - @test_throws DimensionMismatch BLAS.gemm!('N','N', one(elty), I43, I4, elm1, I4) - @test_throws DimensionMismatch BLAS.gemm!('T','N', one(elty), I43, I4, elm1, I43) - @test_throws DimensionMismatch BLAS.gemm!('N','T', one(elty), I43, I43, elm1, I43) - @test_throws DimensionMismatch BLAS.gemm!('T','T', one(elty), I43, I43, elm1, Matrix{elty}(I, 3, 4)) - end - @testset "gemm compared to (sy)(he)rk" begin - if eltype(elm1) <: Complex - @test all(triu(BLAS.herk('U', 'N', U4)) .== triu(BLAS.gemm('N', 'T', U4, U4))) - @test all(tril(BLAS.herk('L', 'N', U4)) .== tril(BLAS.gemm('N', 'T', U4, U4))) - @test all(triu(BLAS.herk('U', 'N', L4)) .== triu(BLAS.gemm('N', 'T', L4, L4))) - @test all(tril(BLAS.herk('L', 'N', L4)) .== tril(BLAS.gemm('N', 'T', L4, L4))) - @test all(triu(BLAS.herk('U', 'C', U4)) .== triu(BLAS.gemm('T', 'N', U4, U4))) - @test all(tril(BLAS.herk('L', 'C', U4)) .== tril(BLAS.gemm('T', 'N', U4, U4))) - @test all(triu(BLAS.herk('U', 'C', L4)) .== triu(BLAS.gemm('T', 'N', L4, L4))) - @test all(tril(BLAS.herk('L', 'C', L4)) .== tril(BLAS.gemm('T', 'N', L4, L4))) - ans = similar(L4) - @test all(tril(BLAS.herk('L','C', L4)) .== tril(BLAS.herk!('L', 'C', real(one(elty)), L4, real(zero(elty)), ans))) - @test all(LinearAlgebra.copytri!(ans, 'L') .== LinearAlgebra.BLAS.gemm('T', 'N', L4, L4)) - @test_throws DimensionMismatch BLAS.herk!('L','N',real(one(elty)),Matrix{elty}(I, 5, 5),real(one(elty)), Matrix{elty}(I, 6, 6)) - else - @test all(triu(BLAS.syrk('U', 'N', U4)) .== triu(BLAS.gemm('N', 'T', U4, U4))) - @test all(tril(BLAS.syrk('L', 'N', U4)) .== tril(BLAS.gemm('N', 'T', U4, U4))) - @test all(triu(BLAS.syrk('U', 'N', L4)) .== triu(BLAS.gemm('N', 'T', L4, L4))) - @test all(tril(BLAS.syrk('L', 'N', L4)) .== tril(BLAS.gemm('N', 'T', L4, L4))) - @test all(triu(BLAS.syrk('U', 'T', U4)) .== triu(BLAS.gemm('T', 'N', U4, U4))) - @test all(tril(BLAS.syrk('L', 'T', U4)) .== tril(BLAS.gemm('T', 'N', U4, U4))) - @test all(triu(BLAS.syrk('U', 'T', L4)) .== triu(BLAS.gemm('T', 'N', L4, L4))) - @test all(tril(BLAS.syrk('L', 'T', L4)) .== tril(BLAS.gemm('T', 'N', L4, L4))) - ans = similar(L4) - @test all(tril(BLAS.syrk('L','T', L4)) .== tril(BLAS.syrk!('L', 'T', one(elty), L4, zero(elty), ans))) - @test all(LinearAlgebra.copytri!(ans, 'L') .== BLAS.gemm('T', 'N', L4, L4)) - @test_throws DimensionMismatch BLAS.syrk!('L','N',one(elty), Matrix{elty}(I, 5, 5),one(elty), Matrix{elty}(I, 6, 6)) - end - end -end - -@testset "syr for eltype $elty" for elty in (Float32, Float64, ComplexF32, ComplexF64) - A = rand(elty, 5, 5) - @test triu(A[1,:] * transpose(A[1,:])) ≈ BLAS.syr!('U', one(elty), A[1,:], zeros(elty, 5, 5)) - @test tril(A[1,:] * transpose(A[1,:])) ≈ BLAS.syr!('L', one(elty), A[1,:], zeros(elty, 5, 5)) - @test triu(A[1,:] * transpose(A[1,:])) ≈ BLAS.syr!('U', one(elty), view(A, 1, :), zeros(elty, 5, 5)) - @test tril(A[1,:] * transpose(A[1,:])) ≈ BLAS.syr!('L', one(elty), view(A, 1, :), zeros(elty, 5, 5)) -end - -@testset "her for eltype $elty" for elty in (ComplexF32, ComplexF64) - A = rand(elty, 5, 5) - @test triu(A[1,:] * A[1,:]') ≈ BLAS.her!('U', one(real(elty)), A[1,:], zeros(elty, 5, 5)) - @test tril(A[1,:] * A[1,:]') ≈ BLAS.her!('L', one(real(elty)), A[1,:], zeros(elty, 5, 5)) - @test triu(A[1,:] * A[1,:]') ≈ BLAS.her!('U', one(real(elty)), view(A, 1, :), zeros(elty, 5, 5)) - @test tril(A[1,:] * A[1,:]') ≈ BLAS.her!('L', one(real(elty)), view(A, 1, :), zeros(elty, 5, 5)) -end - -struct WrappedArray{T,N} <: AbstractArray{T,N} - A::Array{T,N} -end - -Base.size(A::WrappedArray) = size(A.A) -Base.getindex(A::WrappedArray, i::Int) = A.A[i] -Base.getindex(A::WrappedArray{T, N}, I::Vararg{Int, N}) where {T, N} = A.A[I...] -Base.setindex!(A::WrappedArray, v, i::Int) = setindex!(A.A, v, i) -Base.setindex!(A::WrappedArray{T, N}, v, I::Vararg{Int, N}) where {T, N} = setindex!(A.A, v, I...) -Base.cconvert(::Type{Ptr{T}}, A::WrappedArray{T}) where T = Base.cconvert(Ptr{T}, A.A) - -Base.strides(A::WrappedArray) = strides(A.A) -Base.elsize(::Type{WrappedArray{T,N}}) where {T,N} = Base.elsize(Array{T,N}) - -@testset "strided interface adjtrans" begin - x = WrappedArray([1, 2, 3, 4]) - @test stride(x,1) == 1 - @test stride(x,2) == stride(x,3) == 4 - @test strides(x') == strides(transpose(x)) == (4,1) - @test pointer(x') == pointer(transpose(x)) == pointer(x) - @test_throws BoundsError stride(x,0) - - A = WrappedArray([1 2; 3 4; 5 6]) - @test stride(A,1) == 1 - @test stride(A,2) == 3 - @test stride(A,3) == stride(A,4) >= 6 - @test strides(A') == strides(transpose(A)) == (3,1) - @test pointer(A') == pointer(transpose(A)) == pointer(A) - @test_throws BoundsError stride(A,0) - - y = WrappedArray([1+im, 2, 3, 4]) - @test strides(transpose(y)) == (4,1) - @test pointer(transpose(y)) == pointer(y) - @test_throws MethodError strides(y') - @test_throws ErrorException pointer(y') - - B = WrappedArray([1+im 2; 3 4; 5 6]) - @test strides(transpose(B)) == (3,1) - @test pointer(transpose(B)) == pointer(B) - @test_throws MethodError strides(B') - @test_throws ErrorException pointer(B') - - @test_throws MethodError stride(1:5,0) - @test_throws MethodError stride(1:5,1) - @test_throws MethodError stride(1:5,2) - @test_throws MethodError strides(transpose(1:5)) - @test_throws MethodError strides((1:5)') - @test_throws ErrorException pointer(transpose(1:5)) - @test_throws ErrorException pointer((1:5)') -end - -@testset "strided interface blas" begin - for elty in (Float32, Float64, ComplexF32, ComplexF64) - # Level 1 - x = WrappedArray(elty[1, 2, 3, 4]) - y = WrappedArray(elty[5, 6, 7, 8]) - BLAS.blascopy!(2, x, 1, y, 2) - @test y == WrappedArray(elty[1, 6, 2, 8]) - BLAS.scal!(2, elty(2), x, 1) - @test x == WrappedArray(elty[2, 4, 3, 4]) - @test BLAS.nrm2(1, x, 2) == elty(2) - @test BLAS.nrm2(x) == BLAS.nrm2(x.A) - BLAS.asum(x) == elty(13) - BLAS.axpy!(4, elty(2), x, 1, y, 1) - @test y == WrappedArray(elty[5, 14, 8, 16]) - BLAS.axpby!(elty(2), x, elty(3), y) - @test y == WrappedArray(elty[19, 50, 30, 56]) - @test BLAS.iamax(x) == 2 - - M = fill(elty(1.0), 3, 3) - @test BLAS.scal!(elty(2), view(M,:,2)) === view(M,:,2) - @test BLAS.scal!(elty(3), view(M,3,:)) === view(M,3,:) - @test M == elty[1. 2. 1.; 1. 2. 1.; 3. 6. 3.] - # Level 2 - A = WrappedArray(elty[1 2; 3 4]) - x = WrappedArray(elty[1, 2]) - y = WrappedArray(elty[3, 4]) - @test BLAS.gemv!('N', elty(2), A, x, elty(1), y) isa WrappedArray{elty,1} - @test y == WrappedArray(elty[13, 26]) - @test BLAS.gbmv!('N', 2, 1, 0, elty(2), A, x, elty(1), y) isa WrappedArray{elty,1} - @test y == WrappedArray(elty[15, 40]) - @test BLAS.symv!('U', elty(2), A, x, elty(1), y) isa WrappedArray{elty,1} - @test y == WrappedArray(elty[25, 60]) - @test BLAS.trmv!('U', 'N', 'N', A, y) isa WrappedArray{elty,1} - @test y == WrappedArray(elty[145, 240]) - @test BLAS.trsv!('U', 'N', 'N', A, y) isa WrappedArray{elty,1} - @test y == WrappedArray(elty[25,60]) - @test BLAS.ger!(elty(2), x, y, A) isa WrappedArray{elty,2} - @test A == WrappedArray(elty[51 122; 103 244]) - @test BLAS.syr!('L', elty(2), x, A) isa WrappedArray{elty,2} - @test A == WrappedArray(elty[53 122; 107 252]) - # Level 3 - A = WrappedArray(elty[1 2; 3 4]) - B = WrappedArray(elty[5 6; 7 8]) - C = WrappedArray(elty[9 10; 11 12]) - BLAS.gemm!('N', 'N', elty(2), A, B, elty(1), C) isa WrappedArray{elty,2} - @test C == WrappedArray([47 54; 97 112]) - BLAS.symm!('L', 'U', elty(2), A, B, elty(1), C) isa WrappedArray{elty,2} - @test C == WrappedArray([85 98; 173 200]) - BLAS.syrk!('U', 'N', elty(2), A, elty(1), C) isa WrappedArray{elty,2} - @test C == WrappedArray([95 120; 173 250]) - BLAS.syr2k!('U', 'N', elty(2), A, B, elty(1), C) isa WrappedArray{elty,2} - @test C == WrappedArray([163 244; 173 462]) - BLAS.trmm!('L', 'U', 'N', 'N', elty(2), A, B) isa WrappedArray{elty,2} - @test B == WrappedArray([38 44; 56 64]) - BLAS.trsm!('L', 'U', 'N', 'N', elty(2), A, B) isa WrappedArray{elty,2} - @test B == WrappedArray([20 24; 28 32]) - end - for elty in (Float32, Float64) - # Level 1 - x = WrappedArray(elty[1, 2, 3, 4]) - y = WrappedArray(elty[5, 6, 7, 8]) - @test BLAS.dot(2, x, 1, y, 2) == elty(19) - # Level 2 - A = WrappedArray(elty[1 2; 3 4]) - x = WrappedArray(elty[1, 2]) - y = WrappedArray(elty[3, 4]) - BLAS.sbmv!('U', 1, elty(2), A, x, elty(1), y) isa WrappedArray{elty,1} - @test y == WrappedArray(elty[17,24]) - end - for elty in (ComplexF32, ComplexF64) - # Level 1 - x = WrappedArray(elty[1+im, 2+2im, 3+3im, 4+im]) - y = WrappedArray(elty[5-im, 6-2im, 7-3im, 8-im]) - @test BLAS.dotc(2, x, 1, y, 2) == elty(12-26im) - @test BLAS.dotu(2, x, 1, y, 2) == elty(26+12im) - # Level 2 - A = WrappedArray(elty[1+im 2+2im; 3+3im 4+4im]) - x = WrappedArray(elty[1+im, 2+2im]) - y = WrappedArray(elty[5-im, 6-2im]) - @test BLAS.hemv!('U', elty(2), A, x, elty(1), y) isa WrappedArray{elty,1} - @test y == WrappedArray(elty[7+17im, 30+14im]) - BLAS.hbmv!('U', 1, elty(2), A, x, elty(1), y) isa WrappedArray{elty,1} - @test y == WrappedArray(elty[13+39im, 54+30im]) - @test BLAS.her!('L', real(elty(2)), x, A) isa WrappedArray{elty,2} - @test A == WrappedArray(elty[5 2+2im; 11+3im 20]) - # Level 3 - A = WrappedArray(elty[1+im 2+2im; 3+3im 4+4im]) - B = WrappedArray(elty[1+im 2+2im; 3+3im 4+4im]) - C = WrappedArray(elty[1+im 2+2im; 3+3im 4+4im]) - @test BLAS.hemm!('L', 'U', elty(2), A, B, elty(1), C) isa WrappedArray{elty,2} - @test C == WrappedArray([3+27im 6+38im; 35+27im 52+36im]) - @test BLAS.herk!('U', 'N', real(elty(2)), A, real(elty(1)), C) isa WrappedArray{elty,2} - @test C == WrappedArray([23 50+38im; 35+27im 152]) - @test BLAS.her2k!('U', 'N', elty(2), A, B, real(elty(1)), C) isa WrappedArray{elty,2} - @test C == WrappedArray([63 138+38im; 35+27im 352]) - end -end - -@testset "get_set_num_threads" begin - default = BLAS.get_num_threads() - @test default isa Int - @test default > 0 - BLAS.set_num_threads(1) - @test BLAS.get_num_threads() === 1 - BLAS.set_num_threads(default) - @test BLAS.get_num_threads() === default -end - -@testset "test for 0-strides" for elty in (Float32, Float64, ComplexF32, ComplexF64) - A = randn(elty, 10, 10); - a = view([randn(elty)], 1 .+ 0(1:10)) - b = view([randn(elty)], 1 .+ 0(1:10)) - α, β = randn(elty), randn(elty) - @testset "dot/dotc/dotu" begin - if elty <: Real - @test BLAS.dot(a,b) ≈ sum(a.*b) - else - @test BLAS.dotc(a,b) ≈ sum(conj(a).*b) - @test BLAS.dotu(a,b) ≈ sum(a.*b) - end - end - @testset "axp(b)y!" begin - @test BLAS.axpy!(α,a,copy(b)) ≈ α*a + b - @test BLAS.axpby!(α,a,β,copy(b)) ≈ α*a + β*b - @test_throws "dest" BLAS.axpy!(α,a,b) - @test_throws "dest" BLAS.axpby!(α,a,β,b) - end - @test BLAS.iamax(a) == 0 - @test_throws "dest" BLAS.scal!(b[1], a) - @testset "nrm2/asum" begin # OpenBLAS always return 0.0 - @test_throws "input" BLAS.nrm2(a) - @test_throws "input" BLAS.asum(a) - end - # All level2 reject 0-stride array. - @testset "gemv!" begin - @test_throws "input" BLAS.gemv!('N', true, A, a, false, copy(b)) - @test_throws "dest" BLAS.gemv!('N', true, A, copy(a), false, b) - end -end - -# Make sure we can use `Base.libblas_name`. Avoid causing -# https://github.com/JuliaLang/julia/issues/48427 again. -@testset "libblas_name" begin - dot_sym = dlsym(dlopen(Base.libblas_name), "cblas_ddot" * (Sys.WORD_SIZE == 64 ? "64_" : "")) - @test 23.0 === @ccall $(dot_sym)(2::Int, [2.0, 3.0]::Ref{Cdouble}, 1::Int, [4.0, 5.0]::Ref{Cdouble}, 1::Int)::Cdouble -end - -end # module TestBLAS diff --git a/stdlib/LinearAlgebra/test/bunchkaufman.jl b/stdlib/LinearAlgebra/test/bunchkaufman.jl deleted file mode 100644 index 68c519d1197ed..0000000000000 --- a/stdlib/LinearAlgebra/test/bunchkaufman.jl +++ /dev/null @@ -1,260 +0,0 @@ -# This file is a part of Julia. License is MIT: https://julialang.org/license - -module TestBunchKaufman - -using Test, LinearAlgebra, Random -using LinearAlgebra: BlasComplex, BlasFloat, BlasReal, QRPivoted -using Base: getproperty - -n = 10 - -# Split n into 2 parts for tests needing two matrices -n1 = div(n, 2) -n2 = 2*n1 - -Random.seed!(12343212) - -areal = randn(n,n)/2 -aimg = randn(n,n)/2 -a2real = randn(n,n)/2 -a2img = randn(n,n)/2 -breal = randn(n,2)/2 -bimg = randn(n,2)/2 - -areint = rand(1:7, n, n) -aimint = rand(1:7, n, n) -a2reint = rand(1:7, n, n) -a2imint = rand(1:7, n, n) -breint = rand(1:5, n, 2) -bimint = rand(1:5, n, 2) - -@testset "$eltya argument A" for eltya in (Float32, Float64, ComplexF32, ComplexF64, Int, ### - Float16, Complex{Float16}, BigFloat, Complex{BigFloat}, Complex{Int}, BigInt, - Complex{BigInt}, Rational{BigInt}, Complex{Rational{BigInt}}) - # a = eltya == Int ? rand(1:7, n, n) : convert(Matrix{eltya}, eltya <: Complex ? complex.(areal, aimg) : areal) - # a2 = eltya == Int ? rand(1:7, n, n) : convert(Matrix{eltya}, eltya <: Complex ? complex.(a2real, a2img) : a2real) - a = convert(Matrix{eltya}, eltya <: Complex ? (real(eltya) <: AbstractFloat ? - complex.(areal, aimg) : complex.(areint, aimint)) : (eltya <: AbstractFloat ? - areal : areint)) - a2 = convert(Matrix{eltya}, eltya <: Complex ? (real(eltya) <: AbstractFloat ? - complex.(a2real, a2img) : complex.(a2reint, a2imint)) : (eltya <: AbstractFloat ? - a2real : a2reint)) - asym = transpose(a) + a # symmetric indefinite - aher = a' + a # Hermitian indefinite - apd = a' * a # Positive-definite - for (a, a2, aher, apd) in ((a, a2, aher, apd), - (view(a, 1:n, 1:n), - view(a2, 1:n, 1:n), - view(aher, 1:n, 1:n), - view(apd , 1:n, 1:n))) - ε = εa = eps(abs(float(one(eltya)))) - - # Inertia tests - @testset "$uplo Bunch-Kaufman factor inertia" for uplo in (:L, :U) - @testset "rook pivoting: $rook" for rook in (false, true) - test_list = eltya <: Complex ? (Hermitian(aher, uplo), Hermitian(apd, uplo)) : - (Symmetric(transpose(a) + a, uplo), Hermitian(aher, uplo), - Hermitian(apd, uplo)) - ελ = n*max(eps(Float64), εa) # zero-eigenvalue threshold - ελ = typeof(Integer(one(real(eltya)))) <: Signed ? Rational{BigInt}(ελ) : - real(eltya(ελ)) - for M in test_list - bc = bunchkaufman(M, rook) - D = bc.D - λ = real(eltya <: Complex ? eigen(ComplexF64.(D)).values : - eigen(Float64.(D)).values) - σ₁ = norm(λ, Inf) - np = sum(λ .> ελ*σ₁) - nn = sum(λ .< -ελ*σ₁) - nz = n - np - nn - if real(eltya) <: AbstractFloat - @test inertia(bc) == (np, nn, nz) - else - @test inertia(bc; rtol=ελ) == (np, nn, nz) - end - end - end - end - - # check that factorize gives a Bunch-Kaufman - if eltya <: Union{Float32, Float64, ComplexF32, ComplexF64, Int} - # Default behaviour only uses Bunch-Kaufman for these types, for now. - @test isa(factorize(asym), LinearAlgebra.BunchKaufman) - @test isa(factorize(aher), LinearAlgebra.BunchKaufman) - end - @testset "$uplo Bunch-Kaufman factor of indefinite matrix" for uplo in (:L, :U) - bc1 = bunchkaufman(Hermitian(aher, uplo)) - @test LinearAlgebra.issuccess(bc1) - @test logabsdet(bc1)[1] ≈ log(abs(det(bc1))) - if eltya <: Real - @test logabsdet(bc1)[2] == sign(det(bc1)) - else - @test logabsdet(bc1)[2] ≈ sign(det(bc1)) - end - @test inv(bc1)*aher ≈ Matrix(I, n, n) - @testset for rook in (false, true) - @test inv(bunchkaufman(Symmetric(transpose(a) + a, uplo), rook))*(transpose(a) + a) ≈ Matrix(I, n, n) - if eltya <: BlasFloat - # test also bunchkaufman! without explicit type tag - # no bunchkaufman! method for Int ... yet - @test inv(bunchkaufman!(transpose(a) + a, rook))*(transpose(a) + a) ≈ Matrix(I, n, n) - end - @test size(bc1) == size(bc1.LD) - @test size(bc1, 1) == size(bc1.LD, 1) - @test size(bc1, 2) == size(bc1.LD, 2) - if eltya <: BlasReal - @test_throws ArgumentError bunchkaufman(a) - end - # Test extraction of factors - if eltya <: Real - @test getproperty(bc1, uplo)*bc1.D*getproperty(bc1, uplo)' ≈ aher[bc1.p, bc1.p] - @test getproperty(bc1, uplo)*bc1.D*getproperty(bc1, uplo)' ≈ bc1.P*aher*bc1.P' - end - - bc1 = bunchkaufman(Symmetric(asym, uplo)) - @test getproperty(bc1, uplo)*bc1.D*transpose(getproperty(bc1, uplo)) ≈ asym[bc1.p, bc1.p] - @test getproperty(bc1, uplo)*bc1.D*transpose(getproperty(bc1, uplo)) ≈ bc1.P*asym*transpose(bc1.P) - @test_throws FieldError bc1.Z - @test_throws ArgumentError uplo === :L ? bc1.U : bc1.L - end - # test Base.iterate - ref_objs = (bc1.D, uplo === :L ? bc1.L : bc1.U, bc1.p) - for (bki, bkobj) in enumerate(bc1) - @test bkobj == ref_objs[bki] - end - if eltya <: BlasFloat - @test convert(LinearAlgebra.BunchKaufman{eltya}, bc1) === bc1 - @test convert(LinearAlgebra.Factorization{eltya}, bc1) === bc1 - if eltya <: BlasReal - @test convert(LinearAlgebra.Factorization{Float16}, bc1) == convert(LinearAlgebra.BunchKaufman{Float16}, bc1) - elseif eltya <: BlasComplex - @test convert(LinearAlgebra.Factorization{ComplexF16}, bc1) == convert(LinearAlgebra.BunchKaufman{ComplexF16}, bc1) - end - end - @test Base.propertynames(bc1) == (:p, :P, :L, :U, :D) - end - - @testset "$eltyb argument B" for eltyb in (Float32, Float64, ComplexF32, ComplexF64, Int, ### - Float16, Complex{Float16}, BigFloat, Complex{BigFloat}, Complex{Int}, BigInt, - Complex{BigInt}, Rational{BigInt}, Complex{Rational{BigInt}}) - # b = eltyb == Int ? rand(1:5, n, 2) : convert(Matrix{eltyb}, eltyb <: Complex ? complex.(breal, bimg) : breal) - b = convert(Matrix{eltyb}, eltyb <: Complex ? (real(eltyb) <: AbstractFloat ? - complex.(breal, bimg) : complex.(breint, bimint)) : (eltyb <: AbstractFloat ? - breal : breint)) - for b in (b, view(b, 1:n, 1:2)) - εb = eps(abs(float(one(eltyb)))) - ε = max(εa,εb) - epsc = eltya <: Complex ? sqrt(2)*n : n # tolerance scale - - @testset "$uplo Bunch-Kaufman factor of indefinite matrix" for uplo in (:L, :U) - bc1 = bunchkaufman(Hermitian(aher, uplo)) - # @test aher*(bc1\b) ≈ b atol=1000ε - cda = eltya <: Complex ? cond(ComplexF64.(aher)) : cond(Float64.(aher)) - cda = real(eltya) <: AbstractFloat ? real(eltya(cda)) : cda - @test norm(aher*(bc1\b) - b) <= epsc*sqrt(eps(cda))*max( - norm(aher*(bc1\b)), norm(b)) - end - - @testset "$uplo Bunch-Kaufman factors of a pos-def matrix" for uplo in (:U, :L) - @testset "rook pivoting: $rook" for rook in (false, true) - bc2 = bunchkaufman(Hermitian(apd, uplo), rook) - @test LinearAlgebra.issuccess(bc2) - bks = split(sprint(show, "text/plain", bc2), "\n") - @test bks[1] == summary(bc2) - @test bks[2] == "D factor:" - @test bks[4+n] == "$uplo factor:" - @test bks[6+2n] == "permutation:" - @test logdet(bc2) ≈ log(det(bc2)) - @test logabsdet(bc2)[1] ≈ log(abs(det(bc2))) - @test logabsdet(bc2)[2] == sign(det(bc2)) - # @test inv(bc2)*apd ≈ Matrix(I, n, n) rtol=Base.rtoldefault(real(eltya)) - # @test apd*(bc2\b) ≈ b rtol=eps(cond(apd)) - @test norm(inv(bc2)*apd - Matrix(I, n, n)) <= epsc*Base.rtoldefault( - real(eltya))*max(norm(inv(bc2)*apd), norm(Matrix(I, n, n))) - cda = eltya <: Complex ? cond(ComplexF64.(apd)) : cond(Float64.(apd)) - cda = real(eltya) <: AbstractFloat ? real(eltya(cda)) : cda - @test norm(apd*(bc2\b) - b) <= epsc*sqrt(eps(cda))*max( - norm(apd*(bc2\b)), norm(b)) - @test ishermitian(bc2) - @test !issymmetric(bc2) || eltya <: Real - end - end - end - end - end -end - -@testset "Singular matrices" begin - R = Float64[1 0; 0 0] - C = ComplexF64[1 0; 0 0] - for A in (R, Symmetric(R), C, Hermitian(C)) - @test_throws SingularException bunchkaufman(A) - @test_throws SingularException bunchkaufman!(copy(A)) - @test_throws SingularException bunchkaufman(A; check = true) - @test_throws SingularException bunchkaufman!(copy(A); check = true) - @test !issuccess(bunchkaufman(A; check = false)) - @test !issuccess(bunchkaufman!(copy(A); check = false)) - end - F = bunchkaufman(R; check = false) - @test sprint(show, "text/plain", F) == "Failed factorization of type $(typeof(F))" -end - -@testset "test example due to @timholy in PR 15354" begin - A = rand(6,5); A = complex(A'*A) # to avoid calling the real-lhs-complex-rhs method - F = cholesky(A); - v6 = rand(ComplexF64, 6) - v5 = view(v6, 1:5) - @test F\v5 == F\v6[1:5] -end - -@testset "issue #32080" begin - A = Symmetric([-5 -9 9; -9 4 1; 9 1 2]) - B = bunchkaufman(A, true) - @test B.U * B.D * B.U' ≈ A[B.p, B.p] -end - -@test_throws DomainError logdet(bunchkaufman([-1 -1; -1 1])) -@test logabsdet(bunchkaufman([8 4; 4 2]; check = false))[1] == -Inf - -@testset "0x0 matrix" begin - for ul in (:U, :L) - B = bunchkaufman(Symmetric(ones(0, 0), ul)) - @test isa(B, BunchKaufman) - @test B.D == Tridiagonal([], [], []) - @test B.P == ones(0, 0) - @test B.p == [] - if ul === :U - @test B.U == UnitUpperTriangular(ones(0, 0)) - @test_throws ArgumentError B.L - else - @test B.L == UnitLowerTriangular(ones(0, 0)) - @test_throws ArgumentError B.U - end - end -end - -@testset "adjoint of BunchKaufman" begin - Ar = randn(5, 5) - Ar = Ar + Ar' - Actmp = complex.(randn(5, 5), randn(5, 5)) - Ac1 = Actmp + Actmp' - Ac2 = Actmp + transpose(Actmp) - b = ones(size(Ar, 1)) - - F = bunchkaufman(Ar) - @test F\b == F'\b - - F = bunchkaufman(Ac1) - @test F\b == F'\b - - F = bunchkaufman(Ac2) - @test_throws ArgumentError("adjoint not implemented for complex symmetric matrices") F' -end - -@testset "BunchKaufman for AbstractMatrix" begin - S = SymTridiagonal(fill(2.0, 4), ones(3)) - B = bunchkaufman(S) - @test B.U * B.D * B.U' ≈ S -end - -end # module TestBunchKaufman diff --git a/stdlib/LinearAlgebra/test/cholesky.jl b/stdlib/LinearAlgebra/test/cholesky.jl deleted file mode 100644 index 6ba72432048a9..0000000000000 --- a/stdlib/LinearAlgebra/test/cholesky.jl +++ /dev/null @@ -1,661 +0,0 @@ -# This file is a part of Julia. License is MIT: https://julialang.org/license - -module TestCholesky - -using Test, LinearAlgebra, Random -using LinearAlgebra: BlasComplex, BlasFloat, BlasReal, QRPivoted, - PosDefException, RankDeficientException, chkfullrank - -const BASE_TEST_PATH = joinpath(Sys.BINDIR, "..", "share", "julia", "test") - -isdefined(Main, :Quaternions) || @eval Main include(joinpath($(BASE_TEST_PATH), "testhelpers", "Quaternions.jl")) -using .Main.Quaternions - -function unary_ops_tests(a, ca, tol; n=size(a, 1)) - @test inv(ca)*a ≈ Matrix(I, n, n) - @test a*inv(ca) ≈ Matrix(I, n, n) - @test abs((det(ca) - det(a))/det(ca)) <= tol # Ad hoc, but statistically verified, revisit - @test logdet(ca) ≈ logdet(a) broken = eltype(a) <: Quaternion - @test logdet(ca) ≈ log(det(ca)) # logdet is less likely to overflow - logabsdet_ca = logabsdet(ca) - logabsdet_a = logabsdet(a) - @test logabsdet_ca[1] ≈ logabsdet_a[1] - @test logabsdet_ca[2] ≈ logabsdet_a[2] - @test isposdef(ca) - @test_throws FieldError ca.Z - @test size(ca) == size(a) - @test Array(copy(ca)) ≈ a - @test tr(ca) ≈ tr(a) skip=ca isa CholeskyPivoted -end - -function factor_recreation_tests(a_U, a_L) - c_U = cholesky(a_U) - c_L = cholesky(a_L) - cl = c_L.U - ls = c_L.L - @test Array(c_U) ≈ Array(c_L) ≈ a_U - @test ls*ls' ≈ a_U - @test triu(c_U.factors) ≈ c_U.U - @test tril(c_L.factors) ≈ c_L.L - @test istriu(cl) - @test cl'cl ≈ a_U - @test cl'cl ≈ a_L -end - -@testset "core functionality" begin - n = 10 - - # Split n into 2 parts for tests needing two matrices - n1 = div(n, 2) - n2 = 2*n1 - - Random.seed!(12344) - - areal = randn(n,n)/2 - aimg = randn(n,n)/2 - a2real = randn(n,n)/2 - a2img = randn(n,n)/2 - breal = randn(n,2)/2 - bimg = randn(n,2)/2 - - for eltya in (Float32, Float64, ComplexF32, ComplexF64, BigFloat, Complex{BigFloat}, Quaternion{Float64}, Int) - a = if eltya == Int - rand(1:7, n, n) - elseif eltya <: Real - convert(Matrix{eltya}, areal) - elseif eltya <: Complex - convert(Matrix{eltya}, complex.(areal, aimg)) - else - convert(Matrix{eltya}, Quaternion.(areal, aimg, a2real, a2img)) - end - - ε = εa = eps(abs(float(one(eltya)))) - - # Test of symmetric pos. def. strided matrix - apd = Matrix(Hermitian(a'*a)) - capd = @inferred cholesky(apd) - r = capd.U - κ = cond(apd, 1) #condition number - - unary_ops_tests(apd, capd, ε*κ*n) - if eltya != Int - @test Factorization{eltya}(capd) === capd - if eltya <: Real - @test Array(Factorization{complex(eltya)}(capd)) ≈ Array(cholesky(complex(apd))) - @test eltype(Factorization{complex(eltya)}(capd)) == complex(eltya) - end - end - @testset "throw for non-square input" begin - A = rand(eltya, 2, 3) - @test_throws DimensionMismatch cholesky(A) - @test_throws DimensionMismatch cholesky!(A) - end - - #Test error bound on reconstruction of matrix: LAWNS 14, Lemma 2.1 - - #these tests were failing on 64-bit linux when inside the inner loop - #for eltya = ComplexF32 and eltyb = Int. The E[i,j] had NaN32 elements - #but only with Random.seed!(1234321) set before the loops. - E = abs.(apd - r'*r) - for i=1:n, j=1:n - @test E[i,j] <= (n+1)ε/(1-(n+1)ε)*sqrt(real(apd[i,i]*apd[j,j])) - end - E = abs.(apd - Matrix(capd)) - for i=1:n, j=1:n - @test E[i,j] <= (n+1)ε/(1-(n+1)ε)*sqrt(real(apd[i,i]*apd[j,j])) - end - @test LinearAlgebra.issuccess(capd) - @inferred(logdet(capd)) - - apos = real(apd[1,1]) - @test all(x -> x ≈ √apos, cholesky(apos).factors) - - # Test cholesky with Symmetric/Hermitian upper/lower - apds = Symmetric(apd) - apdsL = Symmetric(apd, :L) - apdh = Hermitian(apd) - apdhL = Hermitian(apd, :L) - if eltya <: Real - capds = cholesky(apds) - unary_ops_tests(apds, capds, ε*κ*n) - if eltya <: BlasReal - capds = cholesky!(copy(apds)) - unary_ops_tests(apds, capds, ε*κ*n) - end - ulstring = sprint((t, s) -> show(t, "text/plain", s), capds.UL) - @test sprint((t, s) -> show(t, "text/plain", s), capds) == "$(typeof(capds))\nU factor:\n$ulstring" - else - capdh = cholesky(apdh) - unary_ops_tests(apdh, capdh, ε*κ*n) - capdh = cholesky!(copy(apdh)) - unary_ops_tests(apdh, capdh, ε*κ*n) - capdh = cholesky!(copy(apd)) - unary_ops_tests(apd, capdh, ε*κ*n) - ulstring = sprint((t, s) -> show(t, "text/plain", s), capdh.UL) - @test sprint((t, s) -> show(t, "text/plain", s), capdh) == "$(typeof(capdh))\nU factor:\n$ulstring" - end - - # test cholesky of 2x2 Strang matrix - S = SymTridiagonal{eltya}([2, 2], [-1]) - for uplo in (:U, :L) - @test Matrix(@inferred cholesky(Hermitian(S, uplo))) ≈ S - if eltya <: Real - @test Matrix(@inferred cholesky(Symmetric(S, uplo))) ≈ S - end - end - @test Matrix(cholesky(S).U) ≈ [2 -1; 0 float(eltya)(sqrt(real(eltya)(3)))] / float(eltya)(sqrt(real(eltya)(2))) - @test Matrix(cholesky(S)) ≈ S - - # test extraction of factor and re-creating original matrix - if eltya <: Real - factor_recreation_tests(apds, apdsL) - else - factor_recreation_tests(apdh, apdhL) - end - - #pivoted upper Cholesky - for tol in (0.0, -1.0), APD in (apdh, apdhL) - cpapd = cholesky(APD, RowMaximum(), tol=tol) - unary_ops_tests(APD, cpapd, ε*κ*n) - @test rank(cpapd) == n - @test all(diff(real(diag(cpapd.factors))).<=0.) # diagonal should be non-increasing - - @test cpapd.P*cpapd.L*cpapd.U*cpapd.P' ≈ apd - end - - for eltyb in (Float32, Float64, ComplexF32, ComplexF64, Int) - b = if eltya <: Quaternion - convert(Matrix{eltya}, Quaternion.(breal, bimg, bimg, bimg)) - elseif eltyb == Int - rand(1:5, n, 2) - elseif eltyb <: Complex - convert(Matrix{eltyb}, complex.(breal, bimg)) - elseif eltyb <: Real - convert(Matrix{eltyb}, breal) - end - εb = eps(abs(float(one(eltyb)))) - ε = max(εa,εb) - - for b in (b, view(b, 1:n, 1)) # Array and SubArray - - # Test error bound on linear solver: LAWNS 14, Theorem 2.1 - # This is a surprisingly loose bound - x = capd\b - @test norm(x-apd\b,1)/norm(x,1) <= (3n^2 + n + n^3*ε)*ε/(1-(n+1)*ε)*κ - @test norm(apd*x-b,1)/norm(b,1) <= (3n^2 + n + n^3*ε)*ε/(1-(n+1)*ε)*κ - - @test norm(a*(capd\(a'*b)) - b,1)/norm(b,1) <= ε*κ*n # Ad hoc, revisit - - lapd = cholesky(apdhL) - @test norm(apd * (lapd\b) - b)/norm(b) <= ε*κ*n - @test norm(apd * (lapd\b[1:n]) - b[1:n])/norm(b[1:n]) <= ε*κ*n - - cpapd = cholesky(apdh, RowMaximum()) - @test norm(apd * (cpapd\b) - b)/norm(b) <= ε*κ*n # Ad hoc, revisit - @test norm(apd * (cpapd\b[1:n]) - b[1:n])/norm(b[1:n]) <= ε*κ*n - - lpapd = cholesky(apdhL, RowMaximum()) - @test norm(apd * (lpapd\b) - b)/norm(b) <= ε*κ*n # Ad hoc, revisit - @test norm(apd * (lpapd\b[1:n]) - b[1:n])/norm(b[1:n]) <= ε*κ*n - end - end - - for eltyb in (Float64, ComplexF64) - Breal = convert(Matrix{BigFloat}, randn(n,n)/2) - Bimg = convert(Matrix{BigFloat}, randn(n,n)/2) - B = if eltya <: Quaternion - Quaternion.(Float64.(Breal), Float64.(Bimg), Float64.(Bimg), Float64.(Bimg)) - elseif eltya <: Complex || eltyb <: Complex - complex.(Breal, Bimg) - else - Breal - end - εb = eps(abs(float(one(eltyb)))) - ε = max(εa,εb) - - for B in (B, view(B, 1:n, 1:n)) # Array and SubArray - - # Test error bound on linear solver: LAWNS 14, Theorem 2.1 - # This is a surprisingly loose bound - BB = copy(B) - ldiv!(capd, BB) - @test norm(apd \ B - BB, 1) / norm(BB, 1) <= (3n^2 + n + n^3*ε)*ε/(1-(n+1)*ε)*κ - @test norm(apd * BB - B, 1) / norm(B, 1) <= (3n^2 + n + n^3*ε)*ε/(1-(n+1)*ε)*κ - cpapd = cholesky(apdh, RowMaximum()) - BB = copy(B) - ldiv!(cpapd, BB) - @test norm(apd \ B - BB, 1) / norm(BB, 1) <= (3n^2 + n + n^3*ε)*ε/(1-(n+1)*ε)*κ - @test norm(apd * BB - B, 1) / norm(B, 1) <= (3n^2 + n + n^3*ε)*ε/(1-(n+1)*ε)*κ - end - end - - @testset "solve with generic Cholesky" begin - Breal = convert(Matrix{BigFloat}, randn(n,n)/2) - Bimg = convert(Matrix{BigFloat}, randn(n,n)/2) - B = if eltya <: Quaternion - eltya.(Breal, Bimg, Bimg, Bimg) - elseif eltya <: Complex - complex.(Breal, Bimg) - else - Breal - end - εb = eps(abs(float(one(eltype(B))))) - ε = max(εa,εb) - - for B in (B, view(B, 1:n, 1:n)) # Array and SubArray - - # Test error bound on linear solver: LAWNS 14, Theorem 2.1 - # This is a surprisingly loose bound - cpapd = cholesky(eltya <: Real ? apds : apdh) - BB = copy(B) - rdiv!(BB, cpapd) - @test norm(B / apd - BB, 1) / norm(BB, 1) <= (3n^2 + n + n^3*ε)*ε/(1-(n+1)*ε)*κ - @test norm(BB * apd - B, 1) / norm(B, 1) <= (3n^2 + n + n^3*ε)*ε/(1-(n+1)*ε)*κ - cpapd = cholesky(eltya <: Real ? apdsL : apdhL) - BB = copy(B) - rdiv!(BB, cpapd) - @test norm(B / apd - BB, 1) / norm(BB, 1) <= (3n^2 + n + n^3*ε)*ε/(1-(n+1)*ε)*κ - @test norm(BB * apd - B, 1) / norm(B, 1) <= (3n^2 + n + n^3*ε)*ε/(1-(n+1)*ε)*κ - cpapd = cholesky(eltya <: Real ? apds : apdh, RowMaximum()) - BB = copy(B) - rdiv!(BB, cpapd) - @test norm(B / apd - BB, 1) / norm(BB, 1) <= (3n^2 + n + n^3*ε)*ε/(1-(n+1)*ε)*κ - @test norm(BB * apd - B, 1) / norm(B, 1) <= (3n^2 + n + n^3*ε)*ε/(1-(n+1)*ε)*κ - cpapd = cholesky(eltya <: Real ? apdsL : apdhL, RowMaximum()) - BB = copy(B) - rdiv!(BB, cpapd) - @test norm(B / apd - BB, 1) / norm(BB, 1) <= (3n^2 + n + n^3*ε)*ε/(1-(n+1)*ε)*κ - @test norm(BB * apd - B, 1) / norm(B, 1) <= (3n^2 + n + n^3*ε)*ε/(1-(n+1)*ε)*κ - end - end - if eltya <: BlasFloat - @testset "generic cholesky!" begin - if eltya <: Complex - A = complex.(randn(5,5), randn(5,5)) - else - A = randn(5,5) - end - A = convert(Matrix{eltya}, A'A) - @test Matrix(cholesky(A).L) ≈ Matrix(invoke(LinearAlgebra._chol!, Tuple{AbstractMatrix, Type{LowerTriangular}}, copy(A), LowerTriangular)[1]) - @test Matrix(cholesky(A).U) ≈ Matrix(invoke(LinearAlgebra._chol!, Tuple{AbstractMatrix, Type{UpperTriangular}}, copy(A), UpperTriangular)[1]) - end - end - end - - @testset "eltype/matrixtype conversions" begin - apd = Matrix(Hermitian(areal'*areal)) - capd = cholesky(apd) - @test convert(Cholesky{Float64}, capd) === capd - @test convert(Cholesky{Float64,Matrix{Float64}}, capd) === convert(typeof(capd), capd) === capd - @test eltype(convert(Cholesky{Float32}, capd)) === Float32 - @test eltype(convert(Cholesky{Float32,Matrix{Float32}}, capd)) === Float32 - - capd = cholesky(apd, RowMaximum()) - @test convert(CholeskyPivoted{Float64}, capd) === capd - @test convert(CholeskyPivoted{Float64,Matrix{Float64}}, capd) === capd - @test convert(CholeskyPivoted{Float64,Matrix{Float64},Vector{Int}}, capd) === convert(typeof(capd), capd) === capd - @test eltype(convert(CholeskyPivoted{Float32}, capd)) === Float32 - @test eltype(convert(CholeskyPivoted{Float32,Matrix{Float32}}, capd)) === Float32 - @test eltype(convert(CholeskyPivoted{Float32,Matrix{Float32},Vector{Int}}, capd)) === Float32 - @test eltype(convert(CholeskyPivoted{Float32,Matrix{Float32},Vector{Int16}}, capd).piv) === Int16 - end -end - -@testset "behavior for non-positive definite matrices" for T in (Float64, ComplexF64, BigFloat) - A = T[1 2; 2 1] - B = T[1 2; 0 1] - C = T[2 0; 0 0] - # check = (true|false) - for M in (A, Hermitian(A), B, C) - @test_throws PosDefException cholesky(M) - @test_throws PosDefException cholesky!(copy(M)) - @test_throws PosDefException cholesky(M; check = true) - @test_throws PosDefException cholesky!(copy(M); check = true) - @test !issuccess(cholesky(M; check = false)) - @test !issuccess(cholesky!(copy(M); check = false)) - end - for M in (A, Hermitian(A)) # hermitian, but not semi-positive definite - @test_throws RankDeficientException cholesky(M, RowMaximum()) - @test_throws RankDeficientException cholesky!(copy(M), RowMaximum()) - @test_throws RankDeficientException cholesky(M, RowMaximum(); check = true) - @test_throws RankDeficientException cholesky!(copy(M), RowMaximum(); check = true) - @test !issuccess(cholesky(M, RowMaximum(); check = false)) - @test !issuccess(cholesky!(copy(M), RowMaximum(); check = false)) - C = cholesky(M, RowMaximum(); check = false) - @test_throws RankDeficientException chkfullrank(C) - C = cholesky!(copy(M), RowMaximum(); check = false) - @test_throws RankDeficientException chkfullrank(C) - end - for M in (B,) # not hermitian - @test_throws PosDefException(-1) cholesky(M, RowMaximum()) - @test_throws PosDefException(-1) cholesky!(copy(M), RowMaximum()) - @test_throws PosDefException(-1) cholesky(M, RowMaximum(); check = true) - @test_throws PosDefException(-1) cholesky!(copy(M), RowMaximum(); check = true) - @test !issuccess(cholesky(M, RowMaximum(); check = false)) - @test !issuccess(cholesky!(copy(M), RowMaximum(); check = false)) - C = cholesky(M, RowMaximum(); check = false) - @test_throws RankDeficientException chkfullrank(C) - C = cholesky!(copy(M), RowMaximum(); check = false) - @test_throws RankDeficientException chkfullrank(C) - end - @test !isposdef(A) - str = sprint((io, x) -> show(io, "text/plain", x), cholesky(A; check = false)) -end - -@testset "Cholesky factor of Matrix with non-commutative elements, here 2x2-matrices" begin - X = Matrix{Float64}[0.1*rand(2,2) for i in 1:3, j = 1:3] - L = Matrix(LinearAlgebra._chol!(X*X', LowerTriangular)[1]) - U = Matrix(LinearAlgebra._chol!(X*X', UpperTriangular)[1]) - XX = Matrix(X*X') - - @test sum(sum(norm, L*L' - XX)) < eps() - @test sum(sum(norm, U'*U - XX)) < eps() -end - -@testset "Non-strided Cholesky solves" begin - B = randn(5, 5) - v = rand(5) - @test cholesky(Diagonal(v)) \ B ≈ Diagonal(v) \ B - @test B / cholesky(Diagonal(v)) ≈ B / Diagonal(v) - @test inv(cholesky(Diagonal(v)))::Diagonal ≈ Diagonal(1 ./ v) -end - -struct WrappedVector{T} <: AbstractVector{T} - data::Vector{T} -end -Base.copy(v::WrappedVector) = WrappedVector(copy(v.data)) -Base.size(v::WrappedVector) = size(v.data) -Base.getindex(v::WrappedVector, i::Integer) = getindex(v.data, i) -Base.setindex!(v::WrappedVector, val, i::Integer) = setindex!(v.data, val, i) - -@testset "cholesky up- and downdates" begin - A = complex.(randn(10,5), randn(10, 5)) - v = complex.(randn(5), randn(5)) - w = WrappedVector(v) - for uplo in (:U, :L) - AcA = A'*A - BcB = AcA + v*v' - BcB = (BcB + BcB')/2 - F = cholesky(Hermitian(AcA, uplo)) - G = cholesky(Hermitian(BcB, uplo)) - @test getproperty(lowrankupdate(F, v), uplo) ≈ getproperty(G, uplo) - @test getproperty(lowrankupdate(F, w), uplo) ≈ getproperty(G, uplo) - @test_throws DimensionMismatch lowrankupdate(F, Vector{eltype(v)}(undef,length(v)+1)) - @test getproperty(lowrankdowndate(G, v), uplo) ≈ getproperty(F, uplo) - @test getproperty(lowrankdowndate(G, w), uplo) ≈ getproperty(F, uplo) - @test_throws DimensionMismatch lowrankdowndate(G, Vector{eltype(v)}(undef,length(v)+1)) - end -end - -@testset "issue #13243, unexpected nans in complex cholesky" begin - apd = [5.8525753f0 + 0.0f0im -0.79540455f0 + 0.7066077f0im 0.98274714f0 + 1.3824869f0im 2.619998f0 + 1.8532984f0im -1.8306153f0 - 1.2336911f0im 0.32275113f0 + 0.015575029f0im 2.1968813f0 + 1.0640624f0im 0.27894387f0 + 0.97911835f0im 3.0476584f0 + 0.18548489f0im 0.3842994f0 + 0.7050991f0im - -0.79540455f0 - 0.7066077f0im 8.313246f0 + 0.0f0im -1.8076122f0 - 0.8882447f0im 0.47806996f0 + 0.48494184f0im 0.5096429f0 - 0.5395974f0im -0.7285097f0 - 0.10360408f0im -1.1760061f0 - 2.7146957f0im -0.4271084f0 + 0.042899966f0im -1.7228563f0 + 2.8335886f0im 1.8942566f0 + 0.6389735f0im - 0.98274714f0 - 1.3824869f0im -1.8076122f0 + 0.8882447f0im 9.367975f0 + 0.0f0im -0.1838578f0 + 0.6468568f0im -1.8338387f0 + 0.7064959f0im 0.041852742f0 - 0.6556877f0im 2.5673025f0 + 1.9732997f0im -1.1148382f0 - 0.15693812f0im 2.4704504f0 - 1.0389464f0im 1.0858271f0 - 1.298006f0im - 2.619998f0 - 1.8532984f0im 0.47806996f0 - 0.48494184f0im -0.1838578f0 - 0.6468568f0im 3.1117508f0 + 0.0f0im -1.956626f0 + 0.22825956f0im 0.07081801f0 - 0.31801307f0im 0.3698375f0 - 0.5400855f0im 0.80686307f0 + 1.5315914f0im 1.5649154f0 - 1.6229297f0im -0.112077385f0 + 1.2014246f0im - -1.8306153f0 + 1.2336911f0im 0.5096429f0 + 0.5395974f0im -1.8338387f0 - 0.7064959f0im -1.956626f0 - 0.22825956f0im 3.6439795f0 + 0.0f0im -0.2594722f0 + 0.48786148f0im -0.47636223f0 - 0.27821827f0im -0.61608654f0 - 2.01858f0im -2.7767487f0 + 1.7693765f0im 0.048102796f0 - 0.9741874f0im - 0.32275113f0 - 0.015575029f0im -0.7285097f0 + 0.10360408f0im 0.041852742f0 + 0.6556877f0im 0.07081801f0 + 0.31801307f0im -0.2594722f0 - 0.48786148f0im 3.624376f0 + 0.0f0im -1.6697118f0 + 0.4017511f0im -1.4397877f0 - 0.7550918f0im -0.31456697f0 - 1.0403451f0im -0.31978557f0 + 0.13701046f0im - 2.1968813f0 - 1.0640624f0im -1.1760061f0 + 2.7146957f0im 2.5673025f0 - 1.9732997f0im 0.3698375f0 + 0.5400855f0im -0.47636223f0 + 0.27821827f0im -1.6697118f0 - 0.4017511f0im 6.8273163f0 + 0.0f0im -0.10051322f0 + 0.24303961f0im 1.4415971f0 + 0.29750675f0im 1.221786f0 - 0.85654986f0im - 0.27894387f0 - 0.97911835f0im -0.4271084f0 - 0.042899966f0im -1.1148382f0 + 0.15693812f0im 0.80686307f0 - 1.5315914f0im -0.61608654f0 + 2.01858f0im -1.4397877f0 + 0.7550918f0im -0.10051322f0 - 0.24303961f0im 3.4057708f0 + 0.0f0im -0.5856801f0 - 1.0203559f0im 0.7103452f0 + 0.8422135f0im - 3.0476584f0 - 0.18548489f0im -1.7228563f0 - 2.8335886f0im 2.4704504f0 + 1.0389464f0im 1.5649154f0 + 1.6229297f0im -2.7767487f0 - 1.7693765f0im -0.31456697f0 + 1.0403451f0im 1.4415971f0 - 0.29750675f0im -0.5856801f0 + 1.0203559f0im 7.005772f0 + 0.0f0im -0.9617417f0 - 1.2486815f0im - 0.3842994f0 - 0.7050991f0im 1.8942566f0 - 0.6389735f0im 1.0858271f0 + 1.298006f0im -0.112077385f0 - 1.2014246f0im 0.048102796f0 + 0.9741874f0im -0.31978557f0 - 0.13701046f0im 1.221786f0 + 0.85654986f0im 0.7103452f0 - 0.8422135f0im -0.9617417f0 + 1.2486815f0im 3.4629636f0 + 0.0f0im] - b = [-0.905011814118756 + 0.2847570854574069im -0.7122162951294634 - 0.630289556702497im - -0.7620356655676837 + 0.15533508334193666im 0.39947219167701153 - 0.4576746001199889im - -0.21782716937787788 - 0.9222220085490986im -0.727775859267237 + 0.50638268521728im - -1.0509472322215125 + 0.5022165705328413im -0.7264975746431271 + 0.31670415674097235im - -0.6650468984506477 - 0.5000967284800251im -0.023682508769195098 + 0.18093440285319276im - -0.20604111555491242 + 0.10570814584017311im 0.562377322638969 - 0.2578030745663871im - -0.3451346708401685 + 1.076948486041297im 0.9870834574024372 - 0.2825689605519449im - 0.25336108035924787 + 0.975317836492159im 0.0628393808469436 - 0.1253397353973715im - 0.11192755545114 - 0.1603741874112385im 0.8439562576196216 + 1.0850814110398734im - -1.0568488936791578 - 0.06025820467086475im 0.12696236014017806 - 0.09853584666755086im] - cholesky(Hermitian(apd, :L), RowMaximum()) \ b - r = cholesky(apd).U - E = abs.(apd - r'*r) - ε = eps(abs(float(one(ComplexF32)))) - n = 10 - for i=1:n, j=1:n - @test E[i,j] <= (n+1)ε/(1-(n+1)ε)*real(sqrt(apd[i,i]*apd[j,j])) - end -end - -@testset "cholesky Diagonal" begin - # real - d = abs.(randn(3)) .+ 0.1 - D = Diagonal(d) - CD = cholesky(D) - CM = cholesky(Matrix(D)) - @test CD isa Cholesky{Float64} - @test CD.U ≈ Diagonal(.√d) ≈ CM.U - @test D ≈ CD.L * CD.U - @test CD.info == 0 - CD = cholesky(D, RowMaximum()) - CM = cholesky(Matrix(D), RowMaximum()) - @test CD isa CholeskyPivoted{Float64} - @test CD.U ≈ Diagonal(.√sort(d, rev=true)) ≈ CM.U - @test D ≈ Matrix(CD) - @test CD.info == 0 - - F = cholesky(Hermitian(I(3))) - @test F isa Cholesky{Float64,<:Diagonal} - @test Matrix(F) ≈ I(3) - F = cholesky(I(3), RowMaximum()) - @test F isa CholeskyPivoted{Float64,<:Diagonal} - @test Matrix(F) ≈ I(3) - - # real, failing - @test_throws PosDefException cholesky(Diagonal([1.0, -2.0])) - @test_throws RankDeficientException cholesky(Diagonal([1.0, -2.0]), RowMaximum()) - Dnpd = cholesky(Diagonal([1.0, -2.0]); check = false) - @test Dnpd.info == 2 - Dnpd = cholesky(Diagonal([1.0, -2.0]), RowMaximum(); check = false) - @test Dnpd.info == 1 - @test Dnpd.rank == 1 - - # complex - D = complex(D) - CD = cholesky(Hermitian(D)) - CM = cholesky(Matrix(Hermitian(D))) - @test CD isa Cholesky{ComplexF64,<:Diagonal} - @test CD.U ≈ Diagonal(.√d) ≈ CM.U - @test D ≈ CD.L * CD.U - @test CD.info == 0 - CD = cholesky(D, RowMaximum()) - CM = cholesky(Matrix(D), RowMaximum()) - @test CD isa CholeskyPivoted{ComplexF64,<:Diagonal} - @test CD.U ≈ Diagonal(.√sort(d, by=real, rev=true)) ≈ CM.U - @test D ≈ Matrix(CD) - @test CD.info == 0 - - # complex, failing - D[2, 2] = 0.0 + 0im - @test_throws PosDefException cholesky(D) - @test_throws RankDeficientException cholesky(D, RowMaximum()) - Dnpd = cholesky(D; check = false) - @test Dnpd.info == 2 - Dnpd = cholesky(D, RowMaximum(); check = false) - @test Dnpd.info == 1 - @test Dnpd.rank == 2 - - # InexactError for Int - @test_throws InexactError cholesky!(Diagonal([2, 1])) - - # tolerance - D = Diagonal([0.5, 1]) - @test_throws RankDeficientException cholesky(D, RowMaximum(), tol=nextfloat(0.5)) - CD = cholesky(D, RowMaximum(), tol=nextfloat(0.5), check=false) - @test rank(CD) == 1 - @test !issuccess(CD) - @test Matrix(cholesky(D, RowMaximum(), tol=prevfloat(0.5))) ≈ D -end - -@testset "Cholesky for AbstractMatrix" begin - S = SymTridiagonal(fill(2.0, 4), ones(3)) - C = cholesky(S) - @test C.L * C.U ≈ S -end - -@testset "constructor with non-BlasInt arguments" begin - - x = rand(5,5) - chol = cholesky(x'x) - - factors, uplo, info = chol.factors, chol.uplo, chol.info - - @test Cholesky(factors, uplo, Int32(info)) == chol - @test Cholesky(factors, uplo, Int64(info)) == chol - - cholp = cholesky(x'x, RowMaximum()) - - factors, uplo, piv, rank, tol, info = - cholp.factors, cholp.uplo, cholp.piv, cholp.rank, cholp.tol, cholp.info - - @test CholeskyPivoted(factors, uplo, piv, Int32(rank), tol, info) == cholp - @test CholeskyPivoted(factors, uplo, piv, Int64(rank), tol, info) == cholp - - @test CholeskyPivoted(factors, uplo, piv, rank, tol, Int32(info)) == cholp - @test CholeskyPivoted(factors, uplo, piv, rank, tol, Int64(info)) == cholp - -end - -@testset "issue #33704, casting low-rank CholeskyPivoted to Matrix" begin - A = randn(1,8) - B = A'A - C = cholesky(B, RowMaximum(), check=false) - @test B ≈ Matrix(C) -end - -@testset "CholeskyPivoted and Factorization" begin - A = randn(8,8) - B = A'A - C = cholesky(B, RowMaximum(), check=false) - @test CholeskyPivoted{eltype(C)}(C) === C - @test Factorization{eltype(C)}(C) === C - @test Array(CholeskyPivoted{complex(eltype(C))}(C)) ≈ Array(cholesky(complex(B), RowMaximum(), check=false)) - @test Array(Factorization{complex(eltype(C))}(C)) ≈ Array(cholesky(complex(B), RowMaximum(), check=false)) - @test eltype(Factorization{complex(eltype(C))}(C)) == complex(eltype(C)) -end - -@testset "REPL printing of CholeskyPivoted" begin - A = randn(8,8) - B = A'A - C = cholesky(B, RowMaximum(), check=false) - cholstring = sprint((t, s) -> show(t, "text/plain", s), C) - rankstring = "$(C.uplo) factor with rank $(rank(C)):" - factorstring = sprint((t, s) -> show(t, "text/plain", s), C.uplo == 'U' ? C.U : C.L) - permstring = sprint((t, s) -> show(t, "text/plain", s), C.p) - @test cholstring == "$(summary(C))\n$rankstring\n$factorstring\npermutation:\n$permstring" -end - -@testset "destructuring for Cholesky[Pivoted]" begin - for val in (NoPivot(), RowMaximum()) - A = rand(8, 8) - B = A'A - C = cholesky(B, val, check=false) - l, u = C - @test l == C.L - @test u == C.U - end -end - -@testset "issue #37356, diagonal elements of hermitian generic matrix" begin - B = Hermitian(hcat([one(BigFloat) + im])) - @test Matrix(cholesky(B)) ≈ B - C = Hermitian(hcat([one(BigFloat) + im]), :L) - @test Matrix(cholesky(C)) ≈ C -end - -@testset "constructing a Cholesky factor from a triangular matrix" begin - A = [1.0 2.0; 3.0 4.0] - let - U = UpperTriangular(A) - C = Cholesky(U) - @test C isa Cholesky{Float64} - @test C.U == U - @test C.L == U' - end - let - L = LowerTriangular(A) - C = Cholesky(L) - @test C isa Cholesky{Float64} - @test C.L == L - @test C.U == L' - end -end - -@testset "adjoint of Cholesky" begin - A = randn(5, 5) - A = A'A - F = cholesky(A) - b = ones(size(A, 1)) - @test F\b == F'\b -end - -@testset "Float16" begin - A = Float16[4. 12. -16.; 12. 37. -43.; -16. -43. 98.] - B = cholesky(A) - B32 = cholesky(Float32.(A)) - @test B isa Cholesky{Float16, Matrix{Float16}} - @test B.U isa UpperTriangular{Float16, Matrix{Float16}} - @test B.L isa LowerTriangular{Float16, Matrix{Float16}} - @test B.UL isa UpperTriangular{Float16, Matrix{Float16}} - @test B.U ≈ B32.U - @test B.L ≈ B32.L - @test B.UL ≈ B32.UL - @test Matrix(B) ≈ A - B = cholesky(A, RowMaximum()) - B32 = cholesky(Float32.(A), RowMaximum()) - @test B isa CholeskyPivoted{Float16,Matrix{Float16}} - @test B.U isa UpperTriangular{Float16, Matrix{Float16}} - @test B.L isa LowerTriangular{Float16, Matrix{Float16}} - @test B.U ≈ B32.U - @test B.L ≈ B32.L - @test Matrix(B) ≈ A -end - -@testset "det and logdet" begin - A = [4083 3825 5876 2048 4470 5490; - 3825 3575 5520 1920 4200 5140; - 5876 5520 8427 2940 6410 7903; - 2048 1920 2940 1008 2240 2740; - 4470 4200 6410 2240 4875 6015; - 5490 5140 7903 2740 6015 7370] - B = cholesky(A, RowMaximum(), check=false) - @test det(B) == 0.0 - @test det(B) ≈ det(A) atol=eps() - @test logdet(B) == -Inf - @test logabsdet(B)[1] == -Inf -end - -@testset "partly initialized factors" begin - @testset for uplo in ('U', 'L') - M = Matrix{BigFloat}(undef, 2, 2) - M[1,1] = M[2,2] = M[1+(uplo=='L'), 1+(uplo=='U')] = 3 - C = Cholesky(M, uplo, 0) - @test C == C - @test C.L == C.U' - # parameters are arbitrary - C = CholeskyPivoted(M, uplo, [1,2], 2, 0.0, 0) - @test C.L == C.U' - end -end - -@testset "diag" begin - for T in (Float64, ComplexF64), k in (0, 1, -3), uplo in (:U, :L) - A = randn(T, 100, 100) - P = Hermitian(A' * A, uplo) - C = cholesky(P) - @test diag(P, k) ≈ diag(C, k) - end -end - -@testset "cholesky_of_cholesky" begin - for T in (Float64, ComplexF64), uplo in (:U, :L) - A = randn(T, 100, 100) - P = Hermitian(A' * A, uplo) - C = cholesky(P) - CC = cholesky(C) - @test C == CC - end -end - -end # module TestCholesky diff --git a/stdlib/LinearAlgebra/test/dense.jl b/stdlib/LinearAlgebra/test/dense.jl deleted file mode 100644 index a7616e2fc294a..0000000000000 --- a/stdlib/LinearAlgebra/test/dense.jl +++ /dev/null @@ -1,1331 +0,0 @@ -# This file is a part of Julia. License is MIT: https://julialang.org/license - -module TestDense - -using Test, LinearAlgebra, Random -using LinearAlgebra: BlasComplex, BlasFloat, BlasReal - -const BASE_TEST_PATH = joinpath(Sys.BINDIR, "..", "share", "julia", "test") -isdefined(Main, :FillArrays) || @eval Main include(joinpath($(BASE_TEST_PATH), "testhelpers", "FillArrays.jl")) -import Main.FillArrays - -@testset "Check that non-floats are correctly promoted" begin - @test [1 0 0; 0 1 0]\[1,1] ≈ [1;1;0] -end - -n = 10 - -# Split n into 2 parts for tests needing two matrices -n1 = div(n, 2) -n2 = 2*n1 - -Random.seed!(1234323) - -@testset "Matrix condition number" begin - ainit = rand(n, n) - @testset "for $elty" for elty in (Float32, Float64, ComplexF32, ComplexF64) - ainit = convert(Matrix{elty}, ainit) - for a in (copy(ainit), view(ainit, 1:n, 1:n)) - ainv = inv(a) - @test cond(a, 1) == opnorm(a, 1) *opnorm(ainv, 1) - @test cond(a, Inf) == opnorm(a, Inf)*opnorm(ainv, Inf) - @test cond(a[:, 1:5]) == (\)(extrema(svdvals(a[:, 1:5]))...) - @test_throws ArgumentError cond(a,3) - end - end - @testset "Singular matrices" for p in (1, 2, Inf) - @test cond(zeros(Int, 2, 2), p) == Inf - @test cond(zeros(2, 2), p) == Inf - @test cond([0 0; 1 1], p) == Inf - @test cond([0. 0.; 1. 1.], p) == Inf - end - @testset "Issue #33547, condition number of 2x2 matrix" begin - M = [1.0 -2.0 - -2.0 -1.5] - @test cond(M, 1) ≈ 2.227272727272727 - end - @testset "Condition numbers of a non-random matrix" begin - # To ensure that we detect any regressions in the underlying functions - Mars= [11 24 7 20 3 - 4 12 25 8 16 - 17 5 13 21 9 - 10 18 1 14 22 - 23 6 19 2 15] - @test cond(Mars, 1) ≈ 7.1 - @test cond(Mars, 2) ≈ 6.181867355918493 - @test cond(Mars, Inf) ≈ 7.1 - end -end - -areal = randn(n,n)/2 -aimg = randn(n,n)/2 -a2real = randn(n,n)/2 -a2img = randn(n,n)/2 -breal = randn(n,2)/2 -bimg = randn(n,2)/2 - -@testset "For A containing $eltya" for eltya in (Float32, Float64, ComplexF32, ComplexF64, Int) - ainit = eltya == Int ? rand(1:7, n, n) : convert(Matrix{eltya}, eltya <: Complex ? complex.(areal, aimg) : areal) - ainit2 = eltya == Int ? rand(1:7, n, n) : convert(Matrix{eltya}, eltya <: Complex ? complex.(a2real, a2img) : a2real) - ε = εa = eps(abs(float(one(eltya)))) - - apd = ainit'*ainit # symmetric positive-definite - @testset "Positive definiteness" begin - @test !isposdef(ainit) - @test isposdef(apd) - if eltya != Int # cannot perform cholesky! for Matrix{Int} - @test !isposdef!(copy(ainit)) - @test isposdef!(copy(apd)) - end - end - @testset "For b containing $eltyb" for eltyb in (Float32, Float64, ComplexF32, ComplexF64, Int) - binit = eltyb == Int ? rand(1:5, n, 2) : convert(Matrix{eltyb}, eltyb <: Complex ? complex.(breal, bimg) : breal) - εb = eps(abs(float(one(eltyb)))) - ε = max(εa,εb) - for (a, b) in ((copy(ainit), copy(binit)), (view(ainit, 1:n, 1:n), view(binit, 1:n, 1:2))) - @testset "Solve square general system of equations" begin - κ = cond(a,1) - x = a \ b - @test_throws DimensionMismatch b'\b - @test_throws DimensionMismatch b\b' - @test norm(a*x - b, 1)/norm(b) < ε*κ*n*2 # Ad hoc, revisit! - @test zeros(eltya,n)\fill(eltya(1),n) ≈ (zeros(eltya,n,1)\fill(eltya(1),n,1))[1,1] - end - - @testset "Test nullspace" begin - a15null = nullspace(a[:,1:n1]') - @test rank([a[:,1:n1] a15null]) == 10 - @test norm(a[:,1:n1]'a15null,Inf) ≈ zero(eltya) atol=300ε - @test norm(a15null'a[:,1:n1],Inf) ≈ zero(eltya) atol=400ε - @test size(nullspace(b), 2) == 0 - @test size(nullspace(b, rtol=0.001), 2) == 0 - @test size(nullspace(b, atol=100*εb), 2) == 0 - @test size(nullspace(b, 100*εb), 2) == 0 - @test nullspace(zeros(eltya,n)) == Matrix(I, 1, 1) - @test nullspace(zeros(eltya,n), 0.1) == Matrix(I, 1, 1) - # test empty cases - @test @inferred(nullspace(zeros(n, 0))) == Matrix(I, 0, 0) - @test @inferred(nullspace(zeros(0, n))) == Matrix(I, n, n) - # test vector cases - @test size(@inferred nullspace(a[:, 1])) == (1, 0) - @test size(@inferred nullspace(zero(a[:, 1]))) == (1, 1) - @test nullspace(zero(a[:, 1]))[1,1] == 1 - # test adjortrans vectors, including empty ones - @test size(@inferred nullspace(a[:, 1]')) == (n, n - 1) - @test @inferred(nullspace(a[1:0, 1]')) == Matrix(I, 0, 0) - @test size(@inferred nullspace(b[1, :]')) == (2, 1) - @test @inferred(nullspace(b[1, 1:0]')) == Matrix(I, 0, 0) - @test size(@inferred nullspace(transpose(a[:, 1]))) == (n, n - 1) - @test size(@inferred nullspace(transpose(b[1, :]))) == (2, 1) - end - end - end # for eltyb - - for (a, a2) in ((copy(ainit), copy(ainit2)), (view(ainit, 1:n, 1:n), view(ainit2, 1:n, 1:n))) - @testset "Test pinv" begin - pinva15 = pinv(a[:,1:n1]) - @test a[:,1:n1]*pinva15*a[:,1:n1] ≈ a[:,1:n1] - @test pinva15*a[:,1:n1]*pinva15 ≈ pinva15 - pinva15 = pinv(a[:,1:n1]') # the Adjoint case - @test a[:,1:n1]'*pinva15*a[:,1:n1]' ≈ a[:,1:n1]' - @test pinva15*a[:,1:n1]'*pinva15 ≈ pinva15 - - @test size(pinv(Matrix{eltya}(undef,0,0))) == (0,0) - end - - @testset "Lyapunov/Sylvester" begin - x = lyap(a, a2) - @test -a2 ≈ a*x + x*a' - y = lyap(a', a2') - @test y ≈ lyap(Array(a'), Array(a2')) - @test -a2' ≈ a'y + y*a - z = lyap(Tridiagonal(a)', Diagonal(a2)) - @test z ≈ lyap(Array(Tridiagonal(a)'), Array(Diagonal(a2))) - @test -Diagonal(a2) ≈ Tridiagonal(a)'*z + z*Tridiagonal(a) - x2 = sylvester(a[1:3, 1:3], a[4:n, 4:n], a2[1:3,4:n]) - @test -a2[1:3, 4:n] ≈ a[1:3, 1:3]*x2 + x2*a[4:n, 4:n] - y2 = sylvester(a[1:3, 1:3]', a[4:n, 4:n]', a2[4:n,1:3]') - @test y2 ≈ sylvester(Array(a[1:3, 1:3]'), Array(a[4:n, 4:n]'), Array(a2[4:n,1:3]')) - @test -a2[4:n, 1:3]' ≈ a[1:3, 1:3]'*y2 + y2*a[4:n, 4:n]' - z2 = sylvester(Tridiagonal(a[1:3, 1:3]), Diagonal(a[4:n, 4:n]), a2[1:3,4:n]) - @test z2 ≈ sylvester(Array(Tridiagonal(a[1:3, 1:3])), Array(Diagonal(a[4:n, 4:n])), Array(a2[1:3,4:n])) - @test -a2[1:3, 4:n] ≈ Tridiagonal(a[1:3, 1:3])*z2 + z2*Diagonal(a[4:n, 4:n]) - end - - @testset "Matrix square root" begin - asq = sqrt(a) - @test asq*asq ≈ a - @test sqrt(transpose(a))*sqrt(transpose(a)) ≈ transpose(a) - @test sqrt(adjoint(a))*sqrt(adjoint(a)) ≈ adjoint(a) - asym = a + a' # symmetric indefinite - asymsq = sqrt(asym) - @test asymsq*asymsq ≈ asym - @test sqrt(transpose(asym))*sqrt(transpose(asym)) ≈ transpose(asym) - @test sqrt(adjoint(asym))*sqrt(adjoint(asym)) ≈ adjoint(asym) - if eltype(a) <: Real # real square root - apos = a * a - @test sqrt(apos)^2 ≈ apos - @test eltype(sqrt(apos)) <: Real - # test that real but Complex input produces Complex output - @test sqrt(complex(apos)) ≈ sqrt(apos) - @test eltype(sqrt(complex(apos))) <: Complex - end - end - - @testset "Powers" begin - if eltya <: AbstractFloat - z = zero(eltya) - t = convert(eltya,2) - r = convert(eltya,2.5) - @test a^z ≈ Matrix(I, size(a)) - @test a^t ≈ a^2 - @test Matrix{eltya}(I, n, n)^r ≈ Matrix(I, size(a)) - end - end - end # end for loop over arraytype - - @testset "Factorize" begin - d = rand(eltya,n) - e = rand(eltya,n-1) - e2 = rand(eltya,n-1) - f = rand(eltya,n-2) - A = diagm(0 => d) - @test factorize(A) == Diagonal(d) - A += diagm(-1 => e) - @test factorize(A) == Bidiagonal(d,e,:L) - A += diagm(-2 => f) - @test factorize(A) == LowerTriangular(A) - A = diagm(0 => d, 1 => e) - @test factorize(A) == Bidiagonal(d,e,:U) - if eltya <: Real - A = diagm(0 => d, 1 => e, -1 => e) - @test Matrix(factorize(A)) ≈ Matrix(factorize(SymTridiagonal(d,e))) - A = diagm(0 => d, 1 => e, -1 => e, 2 => f, -2 => f) - @test inv(factorize(A)) ≈ inv(factorize(Symmetric(A))) - end - A = diagm(0 => d, 1 => e, -1 => e2) - @test Matrix(factorize(A)) ≈ Matrix(factorize(Tridiagonal(e2,d,e))) - A = diagm(0 => d, 1 => e, 2 => f) - @test factorize(A) == UpperTriangular(A) - - x = rand(eltya) - @test factorize(x) == x - end -end # for eltya - -@testset "Test diagm for vectors" begin - @test diagm(zeros(50)) == diagm(0 => zeros(50)) - @test diagm(ones(50)) == diagm(0 => ones(50)) - v = randn(500) - @test diagm(v) == diagm(0 => v) - @test diagm(500, 501, v) == diagm(500, 501, 0 => v) -end - -@testset "Non-square diagm" begin - x = [7, 8] - for m=1:4, n=2:4 - if m < 2 || n < 3 - @test_throws DimensionMismatch diagm(m,n, 0 => x, 1 => x) - @test_throws DimensionMismatch diagm(n,m, 0 => x, -1 => x) - else - M = zeros(m,n) - M[1:2,1:3] = [7 7 0; 0 8 8] - @test diagm(m,n, 0 => x, 1 => x) == M - @test diagm(n,m, 0 => x, -1 => x) == M' - end - end -end - -@testset "Test pinv (rtol, atol)" begin - M = [1 0 0; 0 1 0; 0 0 0] - @test pinv(M,atol=1)== zeros(3,3) - @test pinv(M,rtol=0.5)== M -end - -@testset "Test inv of matrix of NaNs" begin - for eltya in (NaN16, NaN32, NaN32) - r = fill(eltya, 2, 2) - @test_throws ArgumentError inv(r) - c = fill(complex(eltya, eltya), 2, 2) - @test_throws ArgumentError inv(c) - end -end - -@testset "test out of bounds triu/tril" begin - local m, n = 5, 7 - ainit = rand(m, n) - for a in (copy(ainit), view(ainit, 1:m, 1:n)) - @test triu(a, -m) == a - @test triu(a, n + 2) == zero(a) - @test tril(a, -m - 2) == zero(a) - @test tril(a, n) == a - end -end - -@testset "triu M > N case bug fix" begin - mat=[1 2; - 3 4; - 5 6; - 7 8] - res=[1 2; - 3 4; - 0 6; - 0 0] - @test triu(mat, -1) == res -end - -@testset "Tests norms" begin - nnorm = 10 - mmat = 10 - nmat = 8 - @testset "For $elty" for elty in (Float32, Float64, BigFloat, ComplexF32, ComplexF64, Complex{BigFloat}, Int32, Int64, BigInt) - x = fill(elty(1),10) - @testset "Vector" begin - xs = view(x,1:2:10) - @test norm(x, -Inf) ≈ 1 - @test norm(x, -1) ≈ 1/10 - @test norm(x, 0) ≈ 10 - @test norm(x, 1) ≈ 10 - @test norm(x, 2) ≈ sqrt(10) - @test norm(x, 3) ≈ cbrt(10) - @test norm(x, Inf) ≈ 1 - if elty <: LinearAlgebra.BlasFloat - @test norm(x, 1:4) ≈ 2 - @test_throws BoundsError norm(x,-1:4) - @test_throws BoundsError norm(x,1:11) - end - @test norm(xs, -Inf) ≈ 1 - @test norm(xs, -1) ≈ 1/5 - @test norm(xs, 0) ≈ 5 - @test norm(xs, 1) ≈ 5 - @test norm(xs, 2) ≈ sqrt(5) - @test norm(xs, 3) ≈ cbrt(5) - @test norm(xs, Inf) ≈ 1 - end - - @testset "Issue #12552:" begin - if real(elty) <: AbstractFloat - for p in [-Inf,-1,1,2,3,Inf] - @test isnan(norm(elty[0,NaN],p)) - @test isnan(norm(elty[NaN,0],p)) - end - end - end - - @testset "Number" begin - norm(x[1:1]) === norm(x[1], -Inf) - norm(x[1:1]) === norm(x[1], 0) - norm(x[1:1]) === norm(x[1], 1) - norm(x[1:1]) === norm(x[1], 2) - norm(x[1:1]) === norm(x[1], Inf) - end - - @testset "Absolute homogeneity, triangle inequality, & vectorized versions" begin - for i = 1:10 - xinit = elty <: Integer ? convert(Vector{elty}, rand(1:10, nnorm)) : - elty <: Complex ? convert(Vector{elty}, complex.(randn(nnorm), randn(nnorm))) : - convert(Vector{elty}, randn(nnorm)) - yinit = elty <: Integer ? convert(Vector{elty}, rand(1:10, nnorm)) : - elty <: Complex ? convert(Vector{elty}, complex.(randn(nnorm), randn(nnorm))) : - convert(Vector{elty}, randn(nnorm)) - α = elty <: Integer ? randn() : - elty <: Complex ? convert(elty, complex(randn(),randn())) : - convert(elty, randn()) - for (x, y) in ((copy(xinit), copy(yinit)), (view(xinit,1:2:nnorm), view(yinit,1:2:nnorm))) - # Absolute homogeneity - @test norm(α*x,-Inf) ≈ abs(α)*norm(x,-Inf) - @test norm(α*x,-1) ≈ abs(α)*norm(x,-1) - @test norm(α*x,1) ≈ abs(α)*norm(x,1) - @test norm(α*x) ≈ abs(α)*norm(x) # two is default - @test norm(α*x,3) ≈ abs(α)*norm(x,3) - @test norm(α*x,Inf) ≈ abs(α)*norm(x,Inf) - - # Triangle inequality - @test norm(x + y,1) <= norm(x,1) + norm(y,1) - @test norm(x + y) <= norm(x) + norm(y) # two is default - @test norm(x + y,3) <= norm(x,3) + norm(y,3) - @test norm(x + y,Inf) <= norm(x,Inf) + norm(y,Inf) - - # Against vectorized versions - @test norm(x,-Inf) ≈ minimum(abs.(x)) - @test norm(x,-1) ≈ inv(sum(1 ./ abs.(x))) - @test norm(x,0) ≈ sum(x .!= 0) - @test norm(x,1) ≈ sum(abs.(x)) - @test norm(x) ≈ sqrt(sum(abs2.(x))) - @test norm(x,3) ≈ cbrt(sum(abs.(x).^3.)) - @test norm(x,Inf) ≈ maximum(abs.(x)) - end - end - end - - @testset "Matrix (Operator) opnorm" begin - A = fill(elty(1),10,10) - As = view(A,1:5,1:5) - @test opnorm(A, 1) ≈ 10 - elty <: Union{BigFloat,Complex{BigFloat},BigInt} || @test opnorm(A, 2) ≈ 10 - @test opnorm(A, Inf) ≈ 10 - @test opnorm(As, 1) ≈ 5 - elty <: Union{BigFloat,Complex{BigFloat},BigInt} || @test opnorm(As, 2) ≈ 5 - @test opnorm(As, Inf) ≈ 5 - end - - @testset "Absolute homogeneity, triangle inequality, & norm" begin - for i = 1:10 - Ainit = elty <: Integer ? convert(Matrix{elty}, rand(1:10, mmat, nmat)) : - elty <: Complex ? convert(Matrix{elty}, complex.(randn(mmat, nmat), randn(mmat, nmat))) : - convert(Matrix{elty}, randn(mmat, nmat)) - Binit = elty <: Integer ? convert(Matrix{elty}, rand(1:10, mmat, nmat)) : - elty <: Complex ? convert(Matrix{elty}, complex.(randn(mmat, nmat), randn(mmat, nmat))) : - convert(Matrix{elty}, randn(mmat, nmat)) - α = elty <: Integer ? randn() : - elty <: Complex ? convert(elty, complex(randn(),randn())) : - convert(elty, randn()) - for (A, B) in ((copy(Ainit), copy(Binit)), (view(Ainit,1:nmat,1:nmat), view(Binit,1:nmat,1:nmat))) - # Absolute homogeneity - @test norm(α*A,1) ≈ abs(α)*norm(A,1) - elty <: Union{BigFloat,Complex{BigFloat},BigInt} || @test norm(α*A) ≈ abs(α)*norm(A) # two is default - @test norm(α*A,Inf) ≈ abs(α)*norm(A,Inf) - - # Triangle inequality - @test norm(A + B,1) <= norm(A,1) + norm(B,1) - elty <: Union{BigFloat,Complex{BigFloat},BigInt} || @test norm(A + B) <= norm(A) + norm(B) # two is default - @test norm(A + B,Inf) <= norm(A,Inf) + norm(B,Inf) - - # norm - for p in (-Inf, Inf, (-2:3)...) - @test norm(A, p) == norm(vec(A), p) - end - end - end - - @testset "issue #10234" begin - if elty <: AbstractFloat || elty <: Complex - z = zeros(elty, 100) - z[1] = -Inf - for p in [-2,-1.5,-1,-0.5,0.5,1,1.5,2,Inf] - @test norm(z, p) == (p < 0 ? 0 : Inf) - @test norm(elty[Inf],p) == Inf - end - end - end - end - end - - @testset "issue #10234" begin - @test norm(Any[Inf],-2) == norm(Any[Inf],-1) == norm(Any[Inf],1) == norm(Any[Inf],1.5) == norm(Any[Inf],2) == norm(Any[Inf],Inf) == Inf - end - - @testset "overflow/underflow in norms" begin - @test norm(Float64[1e-300, 1], -3)*1e300 ≈ 1 - @test norm(Float64[1e300, 1], 3)*1e-300 ≈ 1 - end -end - -## Issue related tests -@testset "issue #1447" begin - A = [1.0+0.0im 0; 0 1] - B = pinv(A) - for i = 1:4 - @test A[i] ≈ B[i] - end -end - -@testset "issue #2246" begin - A = [1 2 0 0; 0 1 0 0; 0 0 0 0; 0 0 0 0] - Asq = sqrt(A) - @test Asq*Asq ≈ A - A2 = view(A, 1:2, 1:2) - A2sq = sqrt(A2) - @test A2sq*A2sq ≈ A2 - - N = 3 - @test log(det(Matrix(1.0I, N, N))) ≈ logdet(Matrix(1.0I, N, N)) -end - -@testset "issue #2637" begin - a = [1, 2, 3] - b = [4, 5, 6] - @test kron(Matrix(I, 2, 2), Matrix(I, 2, 2)) == Matrix(I, 4, 4) - @test kron(a,b) == [4,5,6,8,10,12,12,15,18] - @test kron(a',b') == [4 5 6 8 10 12 12 15 18] - @test kron(a,b') == [4 5 6; 8 10 12; 12 15 18] - @test kron(a',b) == [4 8 12; 5 10 15; 6 12 18] - @test kron(a, Matrix(1I, 2, 2)) == [1 0; 0 1; 2 0; 0 2; 3 0; 0 3] - @test kron(Matrix(1I, 2, 2), a) == [ 1 0; 2 0; 3 0; 0 1; 0 2; 0 3] - @test kron(Matrix(1I, 2, 2), 2) == Matrix(2I, 2, 2) - @test kron(3, Matrix(1I, 3, 3)) == Matrix(3I, 3, 3) - @test kron(a,2) == [2, 4, 6] - @test kron(b',2) == [8 10 12] -end - -@testset "kron!" begin - a = [1.0, 0.0] - b = [0.0, 1.0] - @test kron!([1.0, 0.0], b, 0.5) == [0.0; 0.5] - @test kron!([1.0, 0.0], 0.5, b) == [0.0; 0.5] - c = Vector{Float64}(undef, 4) - kron!(c, a, b) - @test c == [0.0; 1.0; 0.0; 0.0] - c = Matrix{Float64}(undef, 2, 2) - kron!(c, a, b') - @test c == [0.0 1.0; 0.0 0.0] -end - -@testset "kron adjoint" begin - a = [1+im, 2, 3] - b = [4, 5, 6+7im] - @test kron(a', b') isa Adjoint - @test kron(a', b') == kron(a, b)' - @test kron(transpose(a), b') isa Transpose - @test kron(transpose(a), b') == kron(permutedims(a), collect(b')) - @test kron(transpose(a), transpose(b)) isa Transpose - @test kron(transpose(a), transpose(b)) == transpose(kron(a, b)) -end - -@testset "issue #4796" begin - dim=2 - S=zeros(Complex,dim,dim) - T=zeros(Complex,dim,dim) - fill!(T, 1) - z = 2.5 + 1.5im - S[1] = z - @test S*T == [z z; 0 0] - - # similar issue for Array{Real} - @test Real[1 2] * Real[1.5; 2.0] == Real[5.5] -end - -@testset "Matrix exponential" begin - @testset "Tests for $elty" for elty in (Float32, Float64, ComplexF32, ComplexF64) - A1 = convert(Matrix{elty}, [4 2 0; 1 4 1; 1 1 4]) - eA1 = convert(Matrix{elty}, [147.866622446369 127.781085523181 127.781085523182; - 183.765138646367 183.765138646366 163.679601723179; - 71.797032399996 91.8825693231832 111.968106246371]') - @test exp(A1) ≈ eA1 - @test exp(adjoint(A1)) ≈ adjoint(eA1) - @test exp(transpose(A1)) ≈ transpose(eA1) - for f in (sin, cos, sinh, cosh, tanh, tan) - @test f(adjoint(A1)) ≈ f(copy(adjoint(A1))) - end - - A2 = convert(Matrix{elty}, - [29.87942128909879 0.7815750847907159 -2.289519314033932; - 0.7815750847907159 25.72656945571064 8.680737820540137; - -2.289519314033932 8.680737820540137 34.39400925519054]) - eA2 = convert(Matrix{elty}, - [ 5496313853692458.0 -18231880972009236.0 -30475770808580460.0; - -18231880972009252.0 60605228702221920.0 101291842930249760.0; - -30475770808580480.0 101291842930249728.0 169294411240851968.0]) - @test exp(A2) ≈ eA2 - @test exp(adjoint(A2)) ≈ adjoint(eA2) - @test exp(transpose(A2)) ≈ transpose(eA2) - - A3 = convert(Matrix{elty}, [-131 19 18;-390 56 54;-387 57 52]) - eA3 = convert(Matrix{elty}, [-1.50964415879218 -5.6325707998812 -4.934938326092; - 0.367879439109187 1.47151775849686 1.10363831732856; - 0.135335281175235 0.406005843524598 0.541341126763207]') - @test exp(A3) ≈ eA3 - @test exp(adjoint(A3)) ≈ adjoint(eA3) - @test exp(transpose(A3)) ≈ transpose(eA3) - - A4 = convert(Matrix{elty}, [0.25 0.25; 0 0]) - eA4 = convert(Matrix{elty}, [1.2840254166877416 0.2840254166877415; 0 1]) - @test exp(A4) ≈ eA4 - @test exp(adjoint(A4)) ≈ adjoint(eA4) - @test exp(transpose(A4)) ≈ transpose(eA4) - - A5 = convert(Matrix{elty}, [0 0.02; 0 0]) - eA5 = convert(Matrix{elty}, [1 0.02; 0 1]) - @test exp(A5) ≈ eA5 - @test exp(adjoint(A5)) ≈ adjoint(eA5) - @test exp(transpose(A5)) ≈ transpose(eA5) - - # Hessenberg - @test hessenberg(A1).H ≈ convert(Matrix{elty}, - [4.000000000000000 -1.414213562373094 -1.414213562373095 - -1.414213562373095 4.999999999999996 -0.000000000000000 - 0 -0.000000000000002 3.000000000000000]) - - # cis always returns a complex matrix - if elty <: Real - eltyim = Complex{elty} - else - eltyim = elty - end - - @test cis(A1) ≈ convert(Matrix{eltyim}, [-0.339938 + 0.000941506im 0.772659 - 0.8469im 0.52745 + 0.566543im; - 0.650054 - 0.140179im -0.0762135 + 0.284213im 0.38633 - 0.42345im ; - 0.650054 - 0.140179im 0.913779 + 0.143093im -0.603663 - 0.28233im ]) rtol=7e-7 - end - - @testset "Additional tests for $elty" for elty in (Float64, ComplexF64) - A4 = convert(Matrix{elty}, [1/2 1/3 1/4 1/5+eps(); - 1/3 1/4 1/5 1/6; - 1/4 1/5 1/6 1/7; - 1/5 1/6 1/7 1/8]) - @test exp(log(A4)) ≈ A4 - @test exp(log(transpose(A4))) ≈ transpose(A4) - @test exp(log(adjoint(A4))) ≈ adjoint(A4) - - A5 = convert(Matrix{elty}, [1 1 0 1; 0 1 1 0; 0 0 1 1; 1 0 0 1]) - @test exp(log(A5)) ≈ A5 - @test exp(log(transpose(A5))) ≈ transpose(A5) - @test exp(log(adjoint(A5))) ≈ adjoint(A5) - - A6 = convert(Matrix{elty}, [-5 2 0 0 ; 1/2 -7 3 0; 0 1/3 -9 4; 0 0 1/4 -11]) - @test exp(log(A6)) ≈ A6 - @test exp(log(transpose(A6))) ≈ transpose(A6) - @test exp(log(adjoint(A6))) ≈ adjoint(A6) - - A7 = convert(Matrix{elty}, [1 0 0 1e-8; 0 1 0 0; 0 0 1 0; 0 0 0 1]) - @test exp(log(A7)) ≈ A7 - @test exp(log(transpose(A7))) ≈ transpose(A7) - @test exp(log(adjoint(A7))) ≈ adjoint(A7) - end - - @testset "Integer promotion tests" begin - for (elty1, elty2) in ((Int64, Float64), (Complex{Int64}, ComplexF64)) - A4int = convert(Matrix{elty1}, [1 2; 3 4]) - A4float = convert(Matrix{elty2}, A4int) - @test exp(A4int) == exp(A4float) - end - end - - @testset "^ tests" for elty in (Float32, Float64, ComplexF32, ComplexF64, Int32, Int64) - # should all be exact as the lhs functions are simple aliases - @test ℯ^(fill(elty(2), (4,4))) == exp(fill(elty(2), (4,4))) - @test 2^(fill(elty(2), (4,4))) == exp(log(2)*fill(elty(2), (4,4))) - @test 2.0^(fill(elty(2), (4,4))) == exp(log(2.0)*fill(elty(2), (4,4))) - end - - A8 = 100 * [-1+1im 0 0 1e-8; 0 1 0 0; 0 0 1 0; 0 0 0 1] - @test exp(log(A8)) ≈ A8 -end - -@testset "Matrix trigonometry" begin - @testset "Tests for $elty" for elty in (Float32, Float64, ComplexF32, ComplexF64) - A1 = convert(Matrix{elty}, [3 2 0; 1 3 1; 1 1 3]) - A2 = convert(Matrix{elty}, - [3.975884257819758 0.15631501695814318 -0.4579038628067864; - 0.15631501695814318 4.545313891142127 1.7361475641080275; - -0.4579038628067864 1.7361475641080275 6.478801851038108]) - A3 = convert(Matrix{elty}, [0.25 0.25; 0 0]) - A4 = convert(Matrix{elty}, [0 0.02; 0 0]) - A5 = convert(Matrix{elty}, [2.0 0; 0 3.0]) - - cosA1 = convert(Matrix{elty},[-0.18287716254368605 -0.29517205254584633 0.761711400552759; - 0.23326967400345625 0.19797853773269333 -0.14758602627292305; - 0.23326967400345636 0.6141253742798355 -0.5637328628200653]) - sinA1 = convert(Matrix{elty}, [0.2865568596627417 -1.107751980582015 -0.13772915374386513; - -0.6227405671629401 0.2176922827908092 -0.5538759902910078; - -0.6227405671629398 -0.6916051440348725 0.3554214365346742]) - @test @inferred(cos(A1)) ≈ cosA1 - @test @inferred(sin(A1)) ≈ sinA1 - - cosA2 = convert(Matrix{elty}, [-0.6331745163802187 0.12878366262380136 -0.17304181968301532; - 0.12878366262380136 -0.5596234510748788 0.5210483146041339; - -0.17304181968301532 0.5210483146041339 0.002263776356015268]) - sinA2 = convert(Matrix{elty},[-0.6677253518411841 -0.32599318928375437 0.020799609079003523; - -0.32599318928375437 -0.04568726058081066 0.5388748740270427; - 0.020799609079003523 0.5388748740270427 0.6385462428126032]) - @test cos(A2) ≈ cosA2 - @test sin(A2) ≈ sinA2 - - cosA3 = convert(Matrix{elty}, [0.9689124217106446 -0.031087578289355197; 0.0 1.0]) - sinA3 = convert(Matrix{elty}, [0.24740395925452285 0.24740395925452285; 0.0 0.0]) - @test cos(A3) ≈ cosA3 - @test sin(A3) ≈ sinA3 - - cosA4 = convert(Matrix{elty}, [1.0 0.0; 0.0 1.0]) - sinA4 = convert(Matrix{elty}, [0.0 0.02; 0.0 0.0]) - @test cos(A4) ≈ cosA4 - @test sin(A4) ≈ sinA4 - - # Identities - for (i, A) in enumerate((A1, A2, A3, A4, A5)) - @test @inferred(sincos(A)) == (sin(A), cos(A)) - @test cos(A)^2 + sin(A)^2 ≈ Matrix(I, size(A)) - @test cos(A) ≈ cos(-A) - @test sin(A) ≈ -sin(-A) - @test @inferred(tan(A)) ≈ sin(A) / cos(A) - - @test cos(A) ≈ real(exp(im*A)) - @test sin(A) ≈ imag(exp(im*A)) - @test cos(A) ≈ real(cis(A)) - @test sin(A) ≈ imag(cis(A)) - @test @inferred(cis(A)) ≈ cos(A) + im * sin(A) - - @test @inferred(cosh(A)) ≈ 0.5 * (exp(A) + exp(-A)) - @test @inferred(sinh(A)) ≈ 0.5 * (exp(A) - exp(-A)) - @test @inferred(cosh(A)) ≈ cosh(-A) - @test @inferred(sinh(A)) ≈ -sinh(-A) - - # Some of the following identities fail for A3, A4 because the matrices are singular - if i in (1, 2, 5) - @test @inferred(sec(A)) ≈ inv(cos(A)) - @test @inferred(csc(A)) ≈ inv(sin(A)) - @test @inferred(cot(A)) ≈ inv(tan(A)) - @test @inferred(sech(A)) ≈ inv(cosh(A)) - @test @inferred(csch(A)) ≈ inv(sinh(A)) - @test @inferred(coth(A)) ≈ inv(@inferred tanh(A)) - end - # The following identities fail for A1, A2 due to rounding errors; - # probably needs better algorithm for the general case - if i in (3, 4, 5) - @test cosh(A)^2 - sinh(A)^2 ≈ Matrix(I, size(A)) - @test tanh(A) ≈ sinh(A) / cosh(A) - end - end - end - - @testset "Additional tests for $elty" for elty in (ComplexF32, ComplexF64) - A5 = convert(Matrix{elty}, [1im 2; 0.02+0.5im 3]) - - @test sincos(A5) == (sin(A5), cos(A5)) - - @test cos(A5)^2 + sin(A5)^2 ≈ Matrix(I, size(A5)) - @test cosh(A5)^2 - sinh(A5)^2 ≈ Matrix(I, size(A5)) - @test cos(A5)^2 + sin(A5)^2 ≈ Matrix(I, size(A5)) - @test tan(A5) ≈ sin(A5) / cos(A5) - @test tanh(A5) ≈ sinh(A5) / cosh(A5) - - @test sec(A5) ≈ inv(cos(A5)) - @test csc(A5) ≈ inv(sin(A5)) - @test cot(A5) ≈ inv(tan(A5)) - @test sech(A5) ≈ inv(cosh(A5)) - @test csch(A5) ≈ inv(sinh(A5)) - @test coth(A5) ≈ inv(tanh(A5)) - - @test cos(A5) ≈ 0.5 * (exp(im*A5) + exp(-im*A5)) - @test sin(A5) ≈ -0.5im * (exp(im*A5) - exp(-im*A5)) - @test cos(A5) ≈ 0.5 * (cis(A5) + cis(-A5)) - @test sin(A5) ≈ -0.5im * (cis(A5) - cis(-A5)) - - @test cosh(A5) ≈ 0.5 * (exp(A5) + exp(-A5)) - @test sinh(A5) ≈ 0.5 * (exp(A5) - exp(-A5)) - end - - @testset "Additional tests for $elty" for elty in (Int32, Int64, Complex{Int32}, Complex{Int64}) - A1 = convert(Matrix{elty}, [1 2; 3 4]) - A2 = convert(Matrix{elty}, [1 2; 2 1]) - - cosA1 = convert(Matrix{float(elty)}, [0.855423165077998 -0.11087638101074865; - -0.16631457151612294 0.689108593561875]) - cosA2 = convert(Matrix{float(elty)}, [-0.22484509536615283 -0.7651474012342925; - -0.7651474012342925 -0.22484509536615283]) - - @test cos(A1) ≈ cosA1 - @test cos(A2) ≈ cosA2 - - sinA1 = convert(Matrix{float(elty)}, [-0.46558148631373036 -0.14842445991317652; - -0.22263668986976476 -0.6882181761834951]) - sinA2 = convert(Matrix{float(elty)}, [-0.3501754883740146 0.4912954964338818; - 0.4912954964338818 -0.3501754883740146]) - - @test sin(A1) ≈ sinA1 - @test sin(A2) ≈ sinA2 - end - - @testset "Inverse functions for $elty" for elty in (Float32, Float64) - A1 = convert(Matrix{elty}, [0.244637 -0.63578; - 0.22002 0.189026]) - A2 = convert(Matrix{elty}, [1.11656 -0.098672 0.158485; - -0.098672 0.100933 -0.107107; - 0.158485 -0.107107 0.612404]) - - for A in (A1, A2) - @test cos(acos(cos(A))) ≈ cos(A) - @test sin(asin(sin(A))) ≈ sin(A) - @test tan(atan(tan(A))) ≈ tan(A) - @test cosh(acosh(cosh(A))) ≈ cosh(A) - @test sinh(asinh(sinh(A))) ≈ sinh(A) - @test tanh(atanh(tanh(A))) ≈ tanh(A) - @test sec(asec(sec(A))) ≈ sec(A) - @test csc(acsc(csc(A))) ≈ csc(A) - @test cot(acot(cot(A))) ≈ cot(A) - @test sech(asech(sech(A))) ≈ sech(A) - @test csch(acsch(csch(A))) ≈ csch(A) - @test coth(acoth(coth(A))) ≈ coth(A) - end - end - - @testset "Inverse functions for $elty" for elty in (ComplexF32, ComplexF64) - A1 = convert(Matrix{elty}, [ 0.143721-0.0im -0.138386-0.106905im; - -0.138386+0.106905im 0.306224-0.0im]) - A2 = convert(Matrix{elty}, [1im 2; 0.02+0.5im 3]) - A3 = convert(Matrix{elty}, [0.138721-0.266836im 0.0971722-0.13715im 0.205046-0.137136im; - -0.0154974-0.00358254im 0.152163-0.445452im 0.0314575-0.536521im; - -0.387488+0.0294059im -0.0448773+0.114305im 0.230684-0.275894im]) - for A in (A1, A2, A3) - @test cos(acos(cos(A))) ≈ cos(A) - @test sin(asin(sin(A))) ≈ sin(A) - @test tan(atan(tan(A))) ≈ tan(A) - @test cosh(acosh(cosh(A))) ≈ cosh(A) - @test sinh(asinh(sinh(A))) ≈ sinh(A) - @test tanh(atanh(tanh(A))) ≈ tanh(A) - @test sec(asec(sec(A))) ≈ sec(A) - @test csc(acsc(csc(A))) ≈ csc(A) - @test cot(acot(cot(A))) ≈ cot(A) - @test sech(asech(sech(A))) ≈ sech(A) - @test csch(acsch(csch(A))) ≈ csch(A) - @test coth(acoth(coth(A))) ≈ coth(A) - - # Definition of principal values (Aprahamian & Higham, 2016, pp. 4-5) - abstol = sqrt(eps(real(elty))) * norm(acosh(A)) - @test all(z -> (0 < real(z) < π || - abs(real(z)) < abstol && imag(z) >= 0 || - abs(real(z) - π) < abstol && imag(z) <= 0), - eigen(acos(A)).values) - @test all(z -> (-π/2 < real(z) < π/2 || - abs(real(z) + π/2) < abstol && imag(z) >= 0 || - abs(real(z) - π/2) < abstol && imag(z) <= 0), - eigen(asin(A)).values) - @test all(z -> (-π < imag(z) < π && real(z) > 0 || - 0 <= imag(z) < π && abs(real(z)) < abstol || - abs(imag(z) - π) < abstol && real(z) >= 0), - eigen(acosh(A)).values) - @test all(z -> (-π/2 < imag(z) < π/2 || - abs(imag(z) + π/2) < abstol && real(z) <= 0 || - abs(imag(z) - π/2) < abstol && real(z) <= 0), - eigen(asinh(A)).values) - end - end -end - -@testset "issue 5116" begin - A9 = [0 10 0 0; -1 0 0 0; 0 0 0 0; -2 0 0 0] - eA9 = [-0.999786072879326 -0.065407069689389 0.0 0.0 - 0.006540706968939 -0.999786072879326 0.0 0.0 - 0.0 0.0 1.0 0.0 - 0.013081413937878 -3.999572145758650 0.0 1.0] - @test exp(A9) ≈ eA9 - - A10 = [ 0. 0. 0. 0. ; 0. 0. -im 0.; 0. im 0. 0.; 0. 0. 0. 0.] - eA10 = [ 1.0+0.0im 0.0+0.0im 0.0+0.0im 0.0+0.0im - 0.0+0.0im 1.543080634815244+0.0im 0.0-1.175201193643801im 0.0+0.0im - 0.0+0.0im 0.0+1.175201193643801im 1.543080634815243+0.0im 0.0+0.0im - 0.0+0.0im 0.0+0.0im 0.0+0.0im 1.0+0.0im] - @test exp(A10) ≈ eA10 -end - -@testset "Additional matrix logarithm tests" for elty in (Float64, ComplexF64) - A11 = convert(Matrix{elty}, [3 2; -5 -3]) - @test exp(log(A11)) ≈ A11 - - A13 = convert(Matrix{elty}, [2 0; 0 2]) - @test typeof(log(A13)) == Array{elty, 2} - - T = elty == Float64 ? Symmetric : Hermitian - @test typeof(log(T(A13))) == T{elty, Array{elty, 2}} - - A1 = convert(Matrix{elty}, [4 2 0; 1 4 1; 1 1 4]) - logA1 = convert(Matrix{elty}, [1.329661349 0.5302876358 -0.06818951543; - 0.2310490602 1.295566591 0.2651438179; - 0.2310490602 0.1969543025 1.363756107]) - @test log(A1) ≈ logA1 - @test exp(log(A1)) ≈ A1 - @test typeof(log(A1)) == Matrix{elty} - - A4 = convert(Matrix{elty}, [1/2 1/3 1/4 1/5+eps(); - 1/3 1/4 1/5 1/6; - 1/4 1/5 1/6 1/7; - 1/5 1/6 1/7 1/8]) - logA4 = convert(Matrix{elty}, [-1.73297159 1.857349738 0.4462766564 0.2414170219; - 1.857349738 -5.335033737 2.994142974 0.5865285289; - 0.4462766564 2.994142974 -7.351095988 3.318413247; - 0.2414170219 0.5865285289 3.318413247 -5.444632124]) - @test log(A4) ≈ logA4 - @test exp(log(A4)) ≈ A4 - @test typeof(log(A4)) == Matrix{elty} - - # real triu matrix - A5 = convert(Matrix{elty}, [1 2 3; 0 4 5; 0 0 6]) # triu - logA5 = convert(Matrix{elty}, [0.0 0.9241962407465937 0.5563245488984037; - 0.0 1.3862943611198906 1.0136627702704109; - 0.0 0.0 1.791759469228055]) - @test log(A5) ≈ logA5 - @test exp(log(A5)) ≈ A5 - @test typeof(log(A5)) == Matrix{elty} - - # real quasitriangular schur form with 2 2x2 blocks, 2 1x1 blocks, and all positive eigenvalues - A6 = convert(Matrix{elty}, [2 3 2 2 3 1; - 1 3 3 2 3 1; - 3 3 3 1 1 2; - 2 1 2 2 2 2; - 1 1 2 2 3 1; - 2 2 2 2 1 3]) - @test exp(log(A6)) ≈ A6 - @test typeof(log(A6)) == Matrix{elty} - - # real quasitriangular schur form with a negative eigenvalue - A7 = convert(Matrix{elty}, [1 3 3 2 2 2; - 1 2 1 3 1 2; - 3 1 2 3 2 1; - 3 1 2 2 2 1; - 3 1 3 1 2 1; - 1 1 3 1 1 3]) - @test exp(log(A7)) ≈ A7 - @test typeof(log(A7)) == Matrix{complex(elty)} - - if elty <: Complex - A8 = convert(Matrix{elty}, [1 + 1im 1 + 1im 1 - 1im; - 1 + 1im -1 + 1im 1 + 1im; - 1 - 1im 1 + 1im -1 - 1im]) - logA8 = convert( - Matrix{elty}, - [0.9478628953131517 + 1.3725201223387407im -0.2547157147532057 + 0.06352318334299434im 0.8560050197863862 - 1.0471975511965979im; - -0.2547157147532066 + 0.06352318334299467im -0.16285783922644065 + 0.2617993877991496im 0.2547157147532063 + 2.1579182857361894im; - 0.8560050197863851 - 1.0471975511965974im 0.25471571475320665 + 2.1579182857361903im 0.9478628953131519 - 0.8489213467404436im], - ) - @test log(A8) ≈ logA8 - @test exp(log(A8)) ≈ A8 - @test typeof(log(A8)) == Matrix{elty} - end -end - -@testset "matrix logarithm is type-inferable" for elty in (Float32,Float64,ComplexF32,ComplexF64) - A1 = randn(elty, 4, 4) - @inferred Union{Matrix{elty},Matrix{complex(elty)}} log(A1) -end - -@testset "Additional matrix square root tests" for elty in (Float64, ComplexF64) - A11 = convert(Matrix{elty}, [3 2; -5 -3]) - @test sqrt(A11)^2 ≈ A11 - - A13 = convert(Matrix{elty}, [2 0; 0 2]) - @test typeof(sqrt(A13)) == Array{elty, 2} - - T = elty == Float64 ? Symmetric : Hermitian - @test typeof(sqrt(T(A13))) == T{elty, Array{elty, 2}} - - A1 = convert(Matrix{elty}, [4 2 0; 1 4 1; 1 1 4]) - sqrtA1 = convert(Matrix{elty}, [1.971197119306979 0.5113118387140085 -0.03301921523780871; - 0.23914631173809942 1.9546875116880718 0.2556559193570036; - 0.23914631173810008 0.22263670411919556 1.9877067269258815]) - @test sqrt(A1) ≈ sqrtA1 - @test sqrt(A1)^2 ≈ A1 - @test typeof(sqrt(A1)) == Matrix{elty} - - A4 = convert(Matrix{elty}, [1/2 1/3 1/4 1/5+eps(); - 1/3 1/4 1/5 1/6; - 1/4 1/5 1/6 1/7; - 1/5 1/6 1/7 1/8]) - sqrtA4 = convert( - Matrix{elty}, - [0.590697761556362 0.3055006800405779 0.19525404749300546 0.14007621469988107; - 0.30550068004057784 0.2825388389385975 0.21857572599211642 0.17048692323164674; - 0.19525404749300565 0.21857572599211622 0.21155429252242863 0.18976816626246887; - 0.14007621469988046 0.17048692323164724 0.1897681662624689 0.20075085592778794], - ) - @test sqrt(A4) ≈ sqrtA4 - @test sqrt(A4)^2 ≈ A4 - @test typeof(sqrt(A4)) == Matrix{elty} - - # real triu matrix - A5 = convert(Matrix{elty}, [1 2 3; 0 4 5; 0 0 6]) # triu - sqrtA5 = convert(Matrix{elty}, [1.0 0.6666666666666666 0.6525169217864183; - 0.0 2.0 1.1237243569579454; - 0.0 0.0 2.449489742783178]) - @test sqrt(A5) ≈ sqrtA5 - @test sqrt(A5)^2 ≈ A5 - @test typeof(sqrt(A5)) == Matrix{elty} - - # real quasitriangular schur form with 2 2x2 blocks, 2 1x1 blocks, and all positive eigenvalues - A6 = convert(Matrix{elty}, [2 3 2 2 3 1; - 1 3 3 2 3 1; - 3 3 3 1 1 2; - 2 1 2 2 2 2; - 1 1 2 2 3 1; - 2 2 2 2 1 3]) - @test sqrt(A6)^2 ≈ A6 - @test typeof(sqrt(A6)) == Matrix{elty} - - # real quasitriangular schur form with a negative eigenvalue - A7 = convert(Matrix{elty}, [1 3 3 2 2 2; - 1 2 1 3 1 2; - 3 1 2 3 2 1; - 3 1 2 2 2 1; - 3 1 3 1 2 1; - 1 1 3 1 1 3]) - @test sqrt(A7)^2 ≈ A7 - @test typeof(sqrt(A7)) == Matrix{complex(elty)} - - if elty <: Complex - A8 = convert(Matrix{elty}, [1 + 1im 1 + 1im 1 - 1im; - 1 + 1im -1 + 1im 1 + 1im; - 1 - 1im 1 + 1im -1 - 1im]) - sqrtA8 = convert( - Matrix{elty}, - [1.2559748527474284 + 0.6741878819930323im 0.20910077991005582 + 0.24969165051825476im 0.591784212275146 - 0.6741878819930327im; - 0.2091007799100553 + 0.24969165051825515im 0.3320953202361413 + 0.2915044496279425im 0.33209532023614136 + 1.0568713143581219im; - 0.5917842122751455 - 0.674187881993032im 0.33209532023614147 + 1.0568713143581223im 0.7147787526012315 - 0.6323750828833452im], - ) - @test sqrt(A8) ≈ sqrtA8 - @test sqrt(A8)^2 ≈ A8 - @test typeof(sqrt(A8)) == Matrix{elty} - end -end - -@testset "issue #40141" begin - x = [-1 -eps() 0 0; eps() -1 0 0; 0 0 -1 -eps(); 0 0 eps() -1] - @test sqrt(x)^2 ≈ x - - x2 = [-1 -eps() 0 0; 3eps() -1 0 0; 0 0 -1 -3eps(); 0 0 eps() -1] - @test sqrt(x2)^2 ≈ x2 - - x3 = [-1 -eps() 0 0; eps() -1 0 0; 0 0 -1 -eps(); 0 0 eps() Inf] - @test all(isnan, sqrt(x3)) - - # test overflow/underflow handled - x4 = [0 -1e200; 1e200 0] - @test sqrt(x4)^2 ≈ x4 - - x5 = [0 -1e-200; 1e-200 0] - @test sqrt(x5)^2 ≈ x5 - - x6 = [1.0 1e200; -1e-200 1.0] - @test sqrt(x6)^2 ≈ x6 -end - -@testset "matrix logarithm block diagonal underflow/overflow" begin - x1 = [0 -1e200; 1e200 0] - @test exp(log(x1)) ≈ x1 - - x2 = [0 -1e-200; 1e-200 0] - @test exp(log(x2)) ≈ x2 - - x3 = [1.0 1e200; -1e-200 1.0] - @test exp(log(x3)) ≈ x3 -end - -@testset "issue #7181" begin - A = [ 1 5 9 - 2 6 10 - 3 7 11 - 4 8 12 ] - @test diag(A,-5) == [] - @test diag(A,-4) == [] - @test diag(A,-3) == [4] - @test diag(A,-2) == [3,8] - @test diag(A,-1) == [2,7,12] - @test diag(A, 0) == [1,6,11] - @test diag(A, 1) == [5,10] - @test diag(A, 2) == [9] - @test diag(A, 3) == [] - @test diag(A, 4) == [] - - @test diag(zeros(0,0)) == [] - @test diag(zeros(0,0),1) == [] - @test diag(zeros(0,0),-1) == [] - - @test diag(zeros(1,0)) == [] - @test diag(zeros(1,0),-1) == [] - @test diag(zeros(1,0),1) == [] - @test diag(zeros(1,0),-2) == [] - - @test diag(zeros(0,1)) == [] - @test diag(zeros(0,1),1) == [] - @test diag(zeros(0,1),-1) == [] - @test diag(zeros(0,1),2) == [] -end - -@testset "diagview" begin - for sz in ((3,3), (3,5), (5,3)) - A = rand(sz...) - for k in -5:5 - @test diagview(A,k) == diag(A,k) - end - end -end - -@testset "issue #39857" begin - @test lyap(1.0+2.0im, 3.0+4.0im) == -1.5 - 2.0im -end - -@testset "$elty Matrix to real power" for elty in (Float64, ComplexF64) - # Tests proposed at Higham, Deadman: Testing Matrix Function Algorithms Using Identities, March 2014 - #Aa : only positive real eigenvalues - Aa = convert(Matrix{elty}, [5 4 2 1; 0 1 -1 -1; -1 -1 3 0; 1 1 -1 2]) - - #Ab : both positive and negative real eigenvalues - Ab = convert(Matrix{elty}, [1 2 3; 4 7 1; 2 1 4]) - - #Ac : complex eigenvalues - Ac = convert(Matrix{elty}, [5 4 2 1;0 1 -1 -1;-1 -1 3 6;1 1 -1 5]) - - #Ad : defective Matrix - Ad = convert(Matrix{elty}, [3 1; 0 3]) - - #Ah : Hermitian Matrix - Ah = convert(Matrix{elty}, [3 1; 1 3]) - if elty <: LinearAlgebra.BlasComplex - Ah += [0 im; -im 0] - end - - #ADi : Diagonal Matrix - ADi = convert(Matrix{elty}, [3 0; 0 3]) - if elty <: LinearAlgebra.BlasComplex - ADi += [im 0; 0 im] - end - - for A in (Aa, Ab, Ac, Ad, Ah, ADi) - @test A^(1/2) ≈ sqrt(A) - @test A^(-1/2) ≈ inv(sqrt(A)) - @test A^(3/4) ≈ sqrt(A) * sqrt(sqrt(A)) - @test A^(-3/4) ≈ inv(A) * sqrt(sqrt(A)) - @test A^(17/8) ≈ A^2 * sqrt(sqrt(sqrt(A))) - @test A^(-17/8) ≈ inv(A^2 * sqrt(sqrt(sqrt(A)))) - @test (A^0.2)^5 ≈ A - @test (A^(2/3))*(A^(1/3)) ≈ A - @test (A^im)^(-im) ≈ A - end - - Tschurpow = Union{Matrix{real(elty)}, Matrix{complex(elty)}} - @test (@inferred Tschurpow LinearAlgebra.schurpow(Aa, 2.0)) ≈ Aa^2 -end - -@testset "BigFloat triangular real power" begin - A = Float64[3 1; 0 3] - @test A^(3/4) ≈ big.(A)^(3/4) -end - -@testset "diagonal integer matrix to real power" begin - A = Matrix(Diagonal([1, 2, 3])) - @test A^2.3 ≈ float(A)^2.3 -end - -@testset "issue #23366 (Int Matrix to Int power)" begin - @testset "Tests for $elty" for elty in (Int128, Int16, Int32, Int64, Int8, - UInt128, UInt16, UInt32, UInt64, UInt8, - BigInt) - #@info "Testing $elty" - @test elty[1 1;1 0]^-1 == [0 1; 1 -1] - @test elty[1 1;1 0]^-2 == [1 -1; -1 2] - @test (@inferred elty[1 1;1 0]^2) == elty[2 1;1 1] - I_ = elty[1 0;0 1] - @test I_^-1 == I_ - if !(elty<:Unsigned) - @test (@inferred (-I_)^-1) == -I_ - @test (@inferred (-I_)^-2) == I_ - end - # make sure that type promotion for ^(::Matrix{<:Integer}, ::Integer) - # is analogous to type promotion for ^(::Integer, ::Integer) - # e.g. [1 1;1 0]^big(10000) should return Matrix{BigInt}, the same - # way as 2^big(10000) returns BigInt - for elty2 = (Int64, BigInt) - TT = Base.promote_op(^, elty, elty2) - @test (@inferred elty[1 1;1 0]^elty2(1))::Matrix{TT} == [1 1;1 0] - end - end -end - -@testset "Least squares solutions" begin - a = [fill(1, 20) 1:20 1:20] - b = reshape(Matrix(1.0I, 8, 5), 20, 2) - @testset "Tests for type $elty" for elty in (Float32, Float64, ComplexF32, ComplexF64) - a = convert(Matrix{elty}, a) - b = convert(Matrix{elty}, b) - - # Vector rhs - x = a[:,1:2]\b[:,1] - @test ((a[:,1:2]*x-b[:,1])'*(a[:,1:2]*x-b[:,1]))[1] ≈ convert(elty, 2.546616541353384) - - # Matrix rhs - x = a[:,1:2]\b - @test det((a[:,1:2]*x-b)'*(a[:,1:2]*x-b)) ≈ convert(elty, 4.437969924812031) - - # Rank deficient - x = a\b - @test det((a*x-b)'*(a*x-b)) ≈ convert(elty, 4.437969924812031) - - # Underdetermined minimum norm - x = convert(Matrix{elty}, [1 0 0; 0 1 -1]) \ convert(Vector{elty}, [1,1]) - @test x ≈ convert(Vector{elty}, [1, 0.5, -0.5]) - - # symmetric, positive definite - @test inv(convert(Matrix{elty}, [6. 2; 2 1])) ≈ convert(Matrix{elty}, [0.5 -1; -1 3]) - - # symmetric, indefinite - @test inv(convert(Matrix{elty}, [1. 2; 2 1])) ≈ convert(Matrix{elty}, [-1. 2; 2 -1]/3) - end -end - -function test_rdiv_pinv_consistency(a, b) - @test (a*b)/b ≈ a*(b/b) ≈ (a*b)*pinv(b) ≈ a*(b*pinv(b)) - @test typeof((a*b)/b) == typeof(a*(b/b)) == typeof((a*b)*pinv(b)) == typeof(a*(b*pinv(b))) -end -function test_ldiv_pinv_consistency(a, b) - @test a\(a*b) ≈ (a\a)*b ≈ (pinv(a)*a)*b ≈ pinv(a)*(a*b) - @test typeof(a\(a*b)) == typeof((a\a)*b) == typeof((pinv(a)*a)*b) == typeof(pinv(a)*(a*b)) -end -function test_div_pinv_consistency(a, b) - test_rdiv_pinv_consistency(a, b) - test_ldiv_pinv_consistency(a, b) -end - -@testset "/ and \\ consistency with pinv for vectors" begin - @testset "Tests for type $elty" for elty in (Float32, Float64, ComplexF32, ComplexF64) - c = rand(elty, 5) - r = (elty <: Complex ? adjoint : transpose)(rand(elty, 5)) - cm = rand(elty, 5, 1) - rm = rand(elty, 1, 5) - @testset "dot products" begin - test_div_pinv_consistency(r, c) - test_div_pinv_consistency(rm, c) - test_div_pinv_consistency(r, cm) - test_div_pinv_consistency(rm, cm) - end - @testset "outer products" begin - test_div_pinv_consistency(c, r) - test_div_pinv_consistency(cm, rm) - end - @testset "matrix/vector" begin - m = rand(5, 5) - test_ldiv_pinv_consistency(m, c) - test_rdiv_pinv_consistency(r, m) - end - end -end - -@testset "test ops on Numbers for $elty" for elty in [Float32,Float64,ComplexF32,ComplexF64] - a = rand(elty) - @test isposdef(one(elty)) - @test lyap(one(elty),a) == -a/2 -end - -@testset "strides" begin - a = rand(10) - b = view(a,2:2:10) - @test LinearAlgebra.stride1(a) == 1 - @test LinearAlgebra.stride1(b) == 2 -end - -@testset "inverse of Adjoint" begin - A = randn(n, n) - - @test @inferred(inv(A'))*A' ≈ I - @test @inferred(inv(transpose(A)))*transpose(A) ≈ I - - B = complex.(A, randn(n, n)) - - @test @inferred(inv(B'))*B' ≈ I - @test @inferred(inv(transpose(B)))*transpose(B) ≈ I -end - -@testset "Factorize fallback for Adjoint/Transpose" begin - a = rand(Complex{Int8}, n, n) - @test Array(transpose(factorize(Transpose(a)))) ≈ Array(factorize(a)) - @test transpose(factorize(transpose(a))) == factorize(a) - @test Array(adjoint(factorize(Adjoint(a)))) ≈ Array(factorize(a)) - @test adjoint(factorize(adjoint(a))) == factorize(a) -end - -@testset "Matrix log issue #32313" begin - for A in ([30 20; -50 -30], [10.0im 0; 0 -10.0im], randn(6,6)) - @test exp(log(A)) ≈ A - end -end - -@testset "Matrix log PR #33245" begin - # edge case for divided difference - A1 = triu(ones(3,3),1) + diagm([1.0, -2eps()-1im, -eps()+0.75im]) - @test exp(log(A1)) ≈ A1 - # case where no sqrt is needed (s=0) - A2 = [1.01 0.01 0.01; 0 1.01 0.01; 0 0 1.01] - @test exp(log(A2)) ≈ A2 -end - -@testset "sqrt of empty Matrix of type $T" for T in [Int,Float32,Float64,ComplexF32,ComplexF64] - @test sqrt(Matrix{T}(undef, 0, 0)) == Matrix{T}(undef, 0, 0) - @test_throws DimensionMismatch sqrt(Matrix{T}(undef, 0, 3)) -end - -struct TypeWithoutZero end -Base.zero(::Type{TypeWithoutZero}) = TypeWithZero() -struct TypeWithZero end -Base.promote_rule(::Type{TypeWithoutZero}, ::Type{TypeWithZero}) = TypeWithZero -Base.zero(::Type{<:Union{TypeWithoutZero, TypeWithZero}}) = TypeWithZero() -Base.:+(x::TypeWithZero, ::TypeWithoutZero) = x - -@testset "diagm for type with no zero" begin - @test diagm(0 => [TypeWithoutZero()]) isa Matrix{TypeWithZero} -end - -@testset "cbrt(A::AbstractMatrix{T})" begin - N = 10 - - # Non-square - A = randn(N,N+2) - @test_throws DimensionMismatch cbrt(A) - - # Real valued diagonal - D = Diagonal(randn(N)) - T = cbrt(D) - @test T*T*T ≈ D - @test eltype(D) == eltype(T) - # Real valued triangular - U = UpperTriangular(randn(N,N)) - T = cbrt(U) - @test T*T*T ≈ U - @test eltype(U) == eltype(T) - L = LowerTriangular(randn(N,N)) - T = cbrt(L) - @test T*T*T ≈ L - @test eltype(L) == eltype(T) - # Real valued symmetric - S = (A -> (A+A')/2)(randn(N,N)) - T = cbrt(Symmetric(S,:U)) - @test T*T*T ≈ S - @test eltype(S) == eltype(T) - # Real valued symmetric - S = (A -> (A+A')/2)(randn(N,N)) - T = cbrt(Symmetric(S,:L)) - @test T*T*T ≈ S - @test eltype(S) == eltype(T) - # Real valued Hermitian - S = (A -> (A+A')/2)(randn(N,N)) - T = cbrt(Hermitian(S,:U)) - @test T*T*T ≈ S - @test eltype(S) == eltype(T) - # Real valued Hermitian - S = (A -> (A+A')/2)(randn(N,N)) - T = cbrt(Hermitian(S,:L)) - @test T*T*T ≈ S - @test eltype(S) == eltype(T) - # Real valued arbitrary - A = randn(N,N) - T = cbrt(A) - @test T*T*T ≈ A - @test eltype(A) == eltype(T) -end - -@testset "tr" begin - @testset "block matrices" begin - S = [1 2; 3 4] - M = fill(S, 3, 3) - @test tr(M) == 3S - @test tr(view(M, :, :)) == 3S - @test tr(view(M, axes(M)...)) == 3S - end - @testset "avoid promotion" begin - A = Int8[1 3; 2 4] - @test tr(A) === Int8(5) - @test tr(view(A, :, :)) === Int8(5) - @test tr(view(A, axes(A)...)) === Int8(5) - end -end - -@testset "trig functions for non-strided" begin - @testset for T in (Float32,ComplexF32) - A = FillArrays.Fill(T(0.1), 4, 4) # all.(<(1), eigvals(A)) for atanh - M = Matrix(A) - @testset for f in (sin,cos,tan,sincos,sinh,cosh,tanh) - @test f(A) == f(M) - end - @testset for f in (asin,acos,atan,asinh,acosh,atanh) - @test f(A) == f(M) - end - end -end - -end # module TestDense diff --git a/stdlib/LinearAlgebra/test/diagonal.jl b/stdlib/LinearAlgebra/test/diagonal.jl deleted file mode 100644 index 16f3d2287f317..0000000000000 --- a/stdlib/LinearAlgebra/test/diagonal.jl +++ /dev/null @@ -1,1455 +0,0 @@ -# This file is a part of Julia. License is MIT: https://julialang.org/license - -module TestDiagonal - -using Test, LinearAlgebra, Random -using LinearAlgebra: BlasFloat, BlasComplex - -const BASE_TEST_PATH = joinpath(Sys.BINDIR, "..", "share", "julia", "test") -isdefined(Main, :Furlongs) || @eval Main include(joinpath($(BASE_TEST_PATH), "testhelpers", "Furlongs.jl")) -using .Main.Furlongs - -isdefined(Main, :OffsetArrays) || @eval Main include(joinpath($(BASE_TEST_PATH), "testhelpers", "OffsetArrays.jl")) -using .Main.OffsetArrays - -isdefined(Main, :InfiniteArrays) || @eval Main include(joinpath($(BASE_TEST_PATH), "testhelpers", "InfiniteArrays.jl")) -using .Main.InfiniteArrays - -isdefined(Main, :FillArrays) || @eval Main include(joinpath($(BASE_TEST_PATH), "testhelpers", "FillArrays.jl")) -using .Main.FillArrays - -isdefined(Main, :SizedArrays) || @eval Main include(joinpath($(BASE_TEST_PATH), "testhelpers", "SizedArrays.jl")) -using .Main.SizedArrays - -const n=12 # Size of matrix problem to test -Random.seed!(1) - -@testset for relty in (Float32, Float64, BigFloat), elty in (relty, Complex{relty}) - dd=convert(Vector{elty}, randn(n)) - vv=convert(Vector{elty}, randn(n)) - UU=convert(Matrix{elty}, randn(n,n)) - if elty <: Complex - dd+=im*convert(Vector{elty}, randn(n)) - vv+=im*convert(Vector{elty}, randn(n)) - UU+=im*convert(Matrix{elty}, randn(n,n)) - end - D = Diagonal(dd) - DM = Matrix(Diagonal(dd)) - - @testset "constructor" begin - for x in (dd, GenericArray(dd)) - @test Diagonal(x)::Diagonal{elty,typeof(x)} == DM - @test Diagonal(x).diag === x - @test Diagonal{elty}(x)::Diagonal{elty,typeof(x)} == DM - @test Diagonal{elty}(x).diag === x - @test Diagonal{elty}(D) === D - end - @test eltype(Diagonal{elty}([1,2,3,4])) == elty - @test isa(Diagonal{elty,Vector{elty}}(GenericArray([1,2,3,4])), Diagonal{elty,Vector{elty}}) - @test isa(Diagonal{elty}(rand(Int,n,n)), Diagonal{elty,Vector{elty}}) - DI = Diagonal([1,2,3,4]) - @test Diagonal(DI) === DI - @test isa(Diagonal{elty}(DI), Diagonal{elty}) - - # diagonal matrices may be converted to Diagonal - local A = [1 0; 0 2] - local DA = convert(Diagonal{Float32,Vector{Float32}}, A) - @test DA isa Diagonal{Float32,Vector{Float32}} - @test DA == A - - # issue #26178 - @test_throws MethodError convert(Diagonal, [1,2,3,4]) - @test_throws DimensionMismatch convert(Diagonal, [1 2 3 4]) - @test_throws InexactError convert(Diagonal, ones(2,2)) - - # Test reversing - # Test reversing along rows - @test reverse(D, dims=1) == reverse(Matrix(D), dims=1) - - # Test reversing along columns - @test reverse(D, dims=2) == reverse(Matrix(D), dims=2) - - # Test reversing the entire matrix - @test reverse(D)::Diagonal == reverse(Matrix(D)) == reverse!(copy(D)) - end - - @testset "Basic properties" begin - @test_throws BoundsError size(D,0) - @test size(D,1) == size(D,2) == length(dd) - @test size(D,3) == 1 - @test typeof(convert(Diagonal{ComplexF32},D)) <: Diagonal{ComplexF32} - @test typeof(convert(AbstractMatrix{ComplexF32},D)) <: Diagonal{ComplexF32} - - @test Array(real(D)) == real(DM) - @test Array(abs.(D)) == abs.(DM) - @test Array(imag(D)) == imag(DM) - - @test parent(D) == dd - @test D[1,1] == dd[1] - @test D[1,2] == 0 - - @test issymmetric(D) - @test isdiag(D) - @test isdiag(Diagonal([[1 0; 0 1], [1 0; 0 1]])) - @test !isdiag(Diagonal([[1 0; 0 1], [1 0; 1 1]])) - @test istriu(D) - @test istriu(D, -1) - @test !istriu(D, 1) - @test istriu(Diagonal(zero(diag(D))), 1) - @test istril(D) - @test !istril(D, -1) - @test istril(D, 1) - @test istril(Diagonal(zero(diag(D))), -1) - @test Base.isstored(D,1,1) - @test !Base.isstored(D,1,2) - @test_throws BoundsError Base.isstored(D, n + 1, 1) - if elty <: Real - @test ishermitian(D) - end - end - - @testset "diag" begin - @test isempty(@inferred diag(D, n+1)) - @test isempty(@inferred diag(D, -n-1)) - @test (@inferred diag(D))::typeof(dd) == dd - @test (@inferred diag(D, 0))::typeof(dd) == dd - @test (@inferred diag(D, 1))::typeof(dd) == zeros(elty, n-1) - DG = Diagonal(GenericArray(dd)) - @test (@inferred diag(DG))::typeof(GenericArray(dd)) == GenericArray(dd) - @test (@inferred diag(DG, 1))::typeof(GenericArray(dd)) == GenericArray(zeros(elty, n-1)) - end - - - @testset "Simple unary functions" begin - for op in (-,) - @test op(D)==op(DM) - end - - for func in (det, tr) - @test func(D) ≈ func(DM) atol=n^2*eps(relty)*(1+(elty<:Complex)) - end - - if eltype(D) <: Real - @test minimum(D) ≈ minimum(DM) - @test maximum(D) ≈ maximum(DM) - end - - if relty <: BlasFloat - for func in (exp, cis, sinh, cosh, tanh, sech, csch, coth) - @test func(D) ≈ func(DM) atol=n^3*eps(relty) - end - @test log(Diagonal(abs.(D.diag))) ≈ log(abs.(DM)) atol=n^3*eps(relty) - end - if elty <: BlasComplex - for func in (logdet, sqrt, sin, cos, tan, sec, csc, cot, - asin, acos, atan, asec, acsc, acot, - asinh, acosh, atanh, asech, acsch, acoth) - @test func(D) ≈ func(DM) atol=n^2*eps(relty)*2 - end - end - end - - @testset "Two-dimensional Euler formula for Diagonal" begin - @test cis(Diagonal([π, π])) ≈ -I - end - - @testset "Linear solve" begin - for (v, U) in ((vv, UU), (view(vv, 1:n), view(UU, 1:n, 1:2))) - @test D*v ≈ DM*v atol=n*eps(relty)*(1+(elty<:Complex)) - @test D*U ≈ DM*U atol=n^2*eps(relty)*(1+(elty<:Complex)) - - @test transpose(U)*D ≈ transpose(U)*Array(D) - @test U'*D ≈ U'*Array(D) - - if relty != BigFloat - atol_two = 2n^2 * eps(relty) * (1 + (elty <: Complex)) - atol_three = 2n^3 * eps(relty) * (1 + (elty <: Complex)) - @test D\v ≈ DM\v atol=atol_two - @test D\U ≈ DM\U atol=atol_three - @test ldiv!(D, copy(v)) ≈ DM\v atol=atol_two - @test ldiv!(transpose(D), copy(v)) ≈ DM\v atol=atol_two - @test ldiv!(adjoint(conj(D)), copy(v)) ≈ DM\v atol=atol_two - @test ldiv!(D, copy(U)) ≈ DM\U atol=atol_three - @test ldiv!(transpose(D), copy(U)) ≈ DM\U atol=atol_three - @test ldiv!(adjoint(conj(D)), copy(U)) ≈ DM\U atol=atol_three - # this method tests AbstractMatrix/AbstractVec for second arg - Usym_bad = Symmetric(ones(elty, n+1, n+1)) - @test_throws DimensionMismatch ldiv!(D, copy(Usym_bad)) - - @test ldiv!(zero(v), D, copy(v)) ≈ DM\v atol=atol_two - @test ldiv!(zero(v), transpose(D), copy(v)) ≈ DM\v atol=atol_two - @test ldiv!(zero(v), adjoint(conj(D)), copy(v)) ≈ DM\v atol=atol_two - @test ldiv!(zero(U), D, copy(U)) ≈ DM\U atol=atol_three - @test ldiv!(zero(U), transpose(D), copy(U)) ≈ DM\U atol=atol_three - @test ldiv!(zero(U), adjoint(conj(D)), copy(U)) ≈ DM\U atol=atol_three - - Uc = copy(U') - target = rmul!(Uc, Diagonal(inv.(D.diag))) - @test rdiv!(Uc, D) ≈ target atol=atol_three - @test_throws DimensionMismatch rdiv!(Matrix{elty}(I, n-1, n-1), D) - @test_throws SingularException rdiv!(Uc, Diagonal(fill!(similar(D.diag), 0))) - @test rdiv!(Uc, transpose(D)) ≈ target atol=atol_three - @test rdiv!(Uc, adjoint(conj(D))) ≈ target atol=atol_three - @test ldiv!(D, Matrix{eltype(D)}(I, size(D))) ≈ D \ Matrix{eltype(D)}(I, size(D)) atol=atol_three - @test_throws DimensionMismatch ldiv!(D, fill(elty(1), n + 1)) - @test_throws SingularException ldiv!(Diagonal(zeros(relty, n)), copy(v)) - b = rand(elty, n, n) - @test ldiv!(D, copy(b)) ≈ Array(D)\Array(b) - @test_throws SingularException ldiv!(Diagonal(zeros(elty, n)), copy(b)) - b = view(rand(elty, n), Vector(1:n)) - b2 = copy(b) - c = ldiv!(D, b) - d = Array(D)\b2 - @test c ≈ d - @test_throws SingularException ldiv!(Diagonal(zeros(elty, n)), b) - b = rand(elty, n+1, n+1) - @test_throws DimensionMismatch ldiv!(D, copy(b)) - b = view(rand(elty, n+1), Vector(1:n+1)) - @test_throws DimensionMismatch ldiv!(D, b) - end - end - end - d = convert(Vector{elty}, randn(n)) - D2 = Diagonal(d) - DM2= Matrix(Diagonal(d)) - @testset "Binary operations" begin - for op in (+, -, *) - @test Array(op(D, D2)) ≈ op(DM, DM2) - end - @testset "with plain numbers" begin - a = rand() - @test Array(a*D) ≈ a*DM - @test Array(D*a) ≈ DM*a - @test Array(D/a) ≈ DM/a - if elty <: Real - @test Array(abs.(D)^a) ≈ abs.(DM)^a - else - @test Array(D^a) ≈ DM^a - end - @test Diagonal(1:100)^2 == Diagonal((1:100).^2) - p = 3 - @test Diagonal(1:100)^p == Diagonal((1:100).^p) - @test Diagonal(1:100)^(-1) == Diagonal(inv.(1:100)) - @test Diagonal(1:100)^2.0 == Diagonal((1:100).^2.0) - @test Diagonal(1:100)^(2.0+0im) == Diagonal((1:100).^(2.0+0im)) - end - - if relty <: BlasFloat - for b in (rand(elty,n,n), rand(elty,n)) - @test lmul!(copy(D), copy(b)) ≈ Array(D)*Array(b) - @test lmul!(transpose(copy(D)), copy(b)) ≈ transpose(Array(D))*Array(b) - @test lmul!(adjoint(copy(D)), copy(b)) ≈ Array(D)'*Array(b) - end - end - - #a few missing mults - bd = Bidiagonal(D2) - @test D*transpose(D2) ≈ Array(D)*transpose(Array(D2)) - @test D2*transpose(D) ≈ Array(D2)*transpose(Array(D)) - @test D2*D' ≈ Array(D2)*Array(D)' - - #division of two Diagonals - @test D/D2 ≈ Diagonal(D.diag./D2.diag) - @test D\D2 ≈ Diagonal(D2.diag./D.diag) - - # QR \ Diagonal - A = rand(elty, n, n) - qrA = qr(A) - @test qrA \ D ≈ A \ D - - # HermOrSym - A = rand(elty, n, n) - Asym = Symmetric(A + transpose(A), :U) - Aherm = Hermitian(A + adjoint(A), :U) - for op in (+, -) - @test op(Asym, D) isa Symmetric - @test Array(op(Asym, D)) ≈ Array(Symmetric(op(Array(Asym), Array(D)))) - @test op(D, Asym) isa Symmetric - @test Array(op(D, Asym)) ≈ Array(Symmetric(op(Array(D), Array(Asym)))) - if !(elty <: Real) - Dr = real(D) - @test op(Aherm, Dr) isa Hermitian - @test Array(op(Aherm, Dr)) ≈ Array(Hermitian(op(Array(Aherm), Array(Dr)))) - @test op(Dr, Aherm) isa Hermitian - @test Array(op(Dr, Aherm)) ≈ Array(Hermitian(op(Array(Dr), Array(Aherm)))) - end - end - @test Array(D*transpose(Asym)) ≈ Array(D) * Array(transpose(Asym)) - @test Array(D*adjoint(Asym)) ≈ Array(D) * Array(adjoint(Asym)) - @test Array(D*transpose(Aherm)) ≈ Array(D) * Array(transpose(Aherm)) - @test Array(D*adjoint(Aherm)) ≈ Array(D) * Array(adjoint(Aherm)) - @test Array(transpose(Asym)*transpose(D)) ≈ Array(transpose(Asym)) * Array(transpose(D)) - @test Array(transpose(D)*transpose(Asym)) ≈ Array(transpose(D)) * Array(transpose(Asym)) - @test Array(adjoint(Aherm)*adjoint(D)) ≈ Array(adjoint(Aherm)) * Array(adjoint(D)) - @test Array(adjoint(D)*adjoint(Aherm)) ≈ Array(adjoint(D)) * Array(adjoint(Aherm)) - - # Performance specialisations for A*_mul_B! - vvv = similar(vv) - @test (r = Matrix(D) * vv ; mul!(vvv, D, vv) ≈ r ≈ vvv) - @test (r = Matrix(D)' * vv ; mul!(vvv, adjoint(D), vv) ≈ r ≈ vvv) - @test (r = transpose(Matrix(D)) * vv ; mul!(vvv, transpose(D), vv) ≈ r ≈ vvv) - - UUU = similar(UU) - for transformA in (identity, adjoint, transpose) - for transformD in (identity, adjoint, transpose) - @test mul!(UUU, transformA(UU), transformD(D)) ≈ transformA(UU) * Matrix(transformD(D)) - @test mul!(UUU, transformD(D), transformA(UU)) ≈ Matrix(transformD(D)) * transformA(UU) - end - end - - alpha = elty(randn()) # randn(elty) does not work with BigFloat - beta = elty(randn()) - @test begin - vvv = similar(vv) - vvv .= randn(size(vvv)) # randn!(vvv) does not work with BigFloat - r = alpha * Matrix(D) * vv + beta * vvv - mul!(vvv, D, vv, alpha, beta) ≈ r ≈ vvv - end - @test begin - vvv = similar(vv) - vvv .= randn(size(vvv)) # randn!(vvv) does not work with BigFloat - r = alpha * Matrix(D)' * vv + beta * vvv - mul!(vvv, adjoint(D), vv, alpha, beta) ≈ r ≈ vvv - end - @test begin - vvv = similar(vv) - vvv .= randn(size(vvv)) # randn!(vvv) does not work with BigFloat - r = alpha * transpose(Matrix(D)) * vv + beta * vvv - mul!(vvv, transpose(D), vv, alpha, beta) ≈ r ≈ vvv - end - - @test begin - UUU = similar(UU) - UUU .= randn(size(UUU)) # randn!(UUU) does not work with BigFloat - r = alpha * Matrix(D) * UU + beta * UUU - mul!(UUU, D, UU, alpha, beta) ≈ r ≈ UUU - end - @test begin - UUU = similar(UU) - UUU .= randn(size(UUU)) # randn!(UUU) does not work with BigFloat - r = alpha * Matrix(D)' * UU + beta * UUU - mul!(UUU, adjoint(D), UU, alpha, beta) ≈ r ≈ UUU - end - @test begin - UUU = similar(UU) - UUU .= randn(size(UUU)) # randn!(UUU) does not work with BigFloat - r = alpha * transpose(Matrix(D)) * UU + beta * UUU - mul!(UUU, transpose(D), UU, alpha, beta) ≈ r ≈ UUU - end - - # make sure that mul!(A, {Adj|Trans}(B)) works with B as a Diagonal - VV = Array(D) - DD = copy(D) - r = VV * Matrix(D) - @test Array(rmul!(VV, DD)) ≈ r ≈ Array(D)*Array(D) - DD = copy(D) - r = VV * transpose(Array(D)) - @test Array(rmul!(VV, transpose(DD))) ≈ r - DD = copy(D) - r = VV * Array(D)' - @test Array(rmul!(VV, adjoint(DD))) ≈ r - - # kron - D3 = Diagonal(convert(Vector{elty}, rand(n÷2))) - DM3= Matrix(D3) - @test Matrix(kron(D, D3)) ≈ kron(DM, DM3) - M4 = rand(elty, size(D3,1) + 1, size(D3,2) + 2) # choose a different size from D3 - @test kron(D3, M4) ≈ kron(DM3, M4) - @test kron(M4, D3) ≈ kron(M4, DM3) - X = [ones(1,1) for i in 1:2, j in 1:2] - @test kron(I(2), X)[1,3] == zeros(1,1) - X = [ones(2,2) for i in 1:2, j in 1:2] - @test kron(I(2), X)[1,3] == zeros(2,2) - end - @testset "iszero, isone, triu, tril" begin - Dzero = Diagonal(zeros(elty, 10)) - Done = Diagonal(ones(elty, 10)) - Dmix = Diagonal(zeros(elty, 10)) - Dmix[end,end] = one(elty) - @test iszero(Dzero) - @test !isone(Dzero) - @test !iszero(Done) - @test isone(Done) - @test !iszero(Dmix) - @test !isone(Dmix) - @test istriu(D) - @test istril(D) - @test iszero(triu(D,1)) - @test triu(D,0) == D - @test triu(D,-1) == D - @test tril(D,1) == D - @test iszero(tril(D,-1)) - @test tril(D,0) == D - @test_throws ArgumentError tril(D, -n - 2) - @test_throws ArgumentError tril(D, n) - @test_throws ArgumentError triu(D, -n) - @test_throws ArgumentError triu(D, n + 2) - end - - # factorize - @test factorize(D) == D - - @testset "Eigensystem" begin - eigD = eigen(D) - @test Diagonal(eigD.values) == D - @test eigD.vectors == Matrix(I, size(D)) - eigsortD = eigen(D, sortby=LinearAlgebra.eigsortby) - @test eigsortD.values !== D.diag - @test eigsortD.values == sort(D.diag, by=LinearAlgebra.eigsortby) - @test Matrix(eigsortD) == D - end - - @testset "ldiv" begin - v = rand(n + 1) - @test_throws DimensionMismatch D\v - v = rand(n) - @test D\v ≈ DM\v - V = rand(n + 1, n) - @test_throws DimensionMismatch D\V - V = rand(n, n) - @test D\V ≈ DM\V - end - - @testset "conj and transpose" begin - @test transpose(D) == D - if elty <: Real - @test transpose(D) === D - @test adjoint(D) === D - elseif elty <: BlasComplex - @test Array(conj(D)) ≈ conj(DM) - @test adjoint(D) == conj(D) - local D2 = copy(D) - local D2adj = adjoint(D2) - D2adj[1,1] = rand(eltype(D2adj)) - @test D2[1,1] == adjoint(D2adj[1,1]) - @test D2adj' === D2 - end - # Translates to Ac/t_mul_B, which is specialized after issue 21286 - @test(D' * vv == conj(D) * vv) - @test(transpose(D) * vv == D * vv) - end - - # logdet and logabsdet - if relty <: Real - lD = Diagonal(convert(Vector{relty}, rand(n))) - lM = Matrix(lD) - @test logdet(lD) ≈ logdet(lM) - d1, s1 = @inferred logabsdet(lD) - d2, s2 = logabsdet(lM) - @test d1 ≈ d2 - @test s1 == s2 - @test logdet(Diagonal(relty[-1,-2])) ≈ log(2) - @test_throws DomainError logdet(Diagonal(relty[-1,-2,-3])) - end - - @testset "similar" begin - @test isa(similar(D), Diagonal{elty}) - @test isa(similar(D, Int), Diagonal{Int}) - @test isa(similar(D, (3,2)), Matrix{elty}) - @test isa(similar(D, Int, (3,2)), Matrix{Int}) - end - - # Issue number 10036 - # make sure issymmetric/ishermitian work for - # non-real diagonal matrices - @testset "issymmetric/hermitian for complex Diagonal" begin - @test issymmetric(D2) - @test ishermitian(D2) - if elty <: Complex - dc = d .+ elty(1im) - D3 = Diagonal(dc) - @test issymmetric(D3) - @test !ishermitian(D3) - end - end - - @testset "svd (#11120/#11247)" begin - U, s, V = svd(D) - @test (U*Diagonal(s))*V' ≈ D - @test svdvals(D) == s - @test svd(D).V == V - end - - @testset "svd/eigen with Diagonal{Furlong}" begin - Du = Furlong.(D) - @test Du isa Diagonal{<:Furlong{1}} - F = svd(Du) - U, s, V = F - @test map(x -> x.val, Matrix(F)) ≈ map(x -> x.val, Du) - @test svdvals(Du) == s - @test U isa AbstractMatrix{<:Furlong{0}} - @test V isa AbstractMatrix{<:Furlong{0}} - @test s isa AbstractVector{<:Furlong{1}} - E = eigen(Du) - vals, vecs = E - @test Matrix(E) == Du - @test vals isa AbstractVector{<:Furlong{1}} - @test vecs isa AbstractMatrix{<:Furlong{0}} - end -end - -@testset "axes" begin - v = OffsetArray(1:3) - D = Diagonal(v) - @test axes(D) isa NTuple{2,typeof(axes(v,1))} -end - -@testset "rdiv! (#40887)" begin - @test rdiv!(Matrix(Diagonal([2.0, 3.0])), Diagonal(2:3)) == Diagonal([1.0, 1.0]) - @test rdiv!(fill(3.0, 3, 3), 3.0I(3)) == ones(3,3) -end - -@testset "kron (issue #40595)" begin - # custom array type to test that kron on Diagonal matrices preserves types of the parents if possible - struct KronTestArray{T, N, AT} <: AbstractArray{T, N} - data::AT - end - KronTestArray(data::AbstractArray) = KronTestArray{eltype(data), ndims(data), typeof(data)}(data) - Base.size(A::KronTestArray) = size(A.data) - LinearAlgebra.kron(A::KronTestArray, B::KronTestArray) = KronTestArray(kron(A.data, B.data)) - Base.getindex(K::KronTestArray{<:Any,N}, i::Vararg{Int,N}) where {N} = K.data[i...] - - A = KronTestArray([1, 2, 3]); - @test kron(A, A) isa KronTestArray - Ad = Diagonal(A); - @test kron(Ad, Ad).diag isa KronTestArray - @test kron(Ad, Ad).diag == kron([1, 2, 3], [1, 2, 3]) -end - -# Define a vector type that does not support `deleteat!`, to ensure that `kron` handles this -struct SimpleVector{T} <: AbstractVector{T} - vec::Vector{T} -end -SimpleVector(x::SimpleVector) = SimpleVector(Vector(x.vec)) -SimpleVector{T}(::UndefInitializer, n::Integer) where {T} = SimpleVector(Vector{T}(undef, n)) -Base.:(==)(x::SimpleVector, y::SimpleVector) = x == y -Base.axes(x::SimpleVector) = axes(x.vec) -Base.convert(::Type{Vector{T}}, x::SimpleVector) where {T} = convert(Vector{T}, x.vec) -Base.convert(::Type{Vector}, x::SimpleVector{T}) where {T} = convert(Vector{T}, x) -Base.convert(::Type{Array{T}}, x::SimpleVector) where {T} = convert(Vector{T}, x) -Base.convert(::Type{Array}, x::SimpleVector) = convert(Vector, x) -Base.copyto!(x::SimpleVector, y::SimpleVector) = (copyto!(x.vec, y.vec); x) -Base.eltype(::Type{SimpleVector{T}}) where {T} = T -Base.getindex(x::SimpleVector, ind...) = getindex(x.vec, ind...) -Base.kron(x::SimpleVector, y::SimpleVector) = SimpleVector(kron(x.vec, y.vec)) -Base.promote_rule(::Type{<:AbstractVector{T}}, ::Type{SimpleVector{U}}) where {T,U} = Vector{promote_type(T, U)} -Base.promote_rule(::Type{SimpleVector{T}}, ::Type{SimpleVector{U}}) where {T,U} = SimpleVector{promote_type(T, U)} -Base.setindex!(x::SimpleVector, val, ind...) = (setindex!(x.vec, val, ind...), x) -Base.similar(x::SimpleVector, ::Type{T}) where {T} = SimpleVector(similar(x.vec, T)) -Base.similar(x::SimpleVector, ::Type{T}, dims::Dims{1}) where {T} = SimpleVector(similar(x.vec, T, dims)) -Base.size(x::SimpleVector) = size(x.vec) - -@testset "kron (issue #46456)" for repr in Any[identity, SimpleVector] - A = Diagonal(repr(randn(10))) - BL = Bidiagonal(repr(randn(10)), repr(randn(9)), :L) - BU = Bidiagonal(repr(randn(10)), repr(randn(9)), :U) - C = SymTridiagonal(repr(randn(10)), repr(randn(9))) - Cl = SymTridiagonal(repr(randn(10)), repr(randn(10))) - D = Tridiagonal(repr(randn(9)), repr(randn(10)), repr(randn(9))) - @test kron(A, BL)::Bidiagonal == kron(Array(A), Array(BL)) - @test kron(A, BU)::Bidiagonal == kron(Array(A), Array(BU)) - @test kron(A, C)::SymTridiagonal == kron(Array(A), Array(C)) - @test kron(A, Cl)::SymTridiagonal == kron(Array(A), Array(Cl)) - @test kron(A, D)::Tridiagonal == kron(Array(A), Array(D)) -end - -@testset "svdvals and eigvals (#11120/#11247)" begin - D = Diagonal(Matrix{Float64}[randn(3,3), randn(2,2)]) - @test sort([svdvals(D)...;], rev = true) ≈ svdvals([D.diag[1] zeros(3,2); zeros(2,3) D.diag[2]]) - @test sort([eigvals(D)...;], by=LinearAlgebra.eigsortby) ≈ eigvals([D.diag[1] zeros(3,2); zeros(2,3) D.diag[2]]) -end - -@testset "eigvals should return a copy of the diagonal" begin - D = Diagonal([1, 2, 3]) - lam = eigvals(D) - D[3,3] = 4 # should not affect lam - @test lam == [1, 2, 3] -end - -@testset "eigmin (#27847)" begin - for _ in 1:100 - d = randn(rand(1:10)) - D = Diagonal(d) - @test eigmin(D) == minimum(d) - end -end - -@testset "isposdef" begin - @test isposdef(Diagonal(1.0 .+ rand(n))) - @test !isposdef(Diagonal(-1.0 * rand(n))) - @test isposdef(Diagonal(complex(1.0, 0.0) .+ rand(n))) - @test !isposdef(Diagonal(complex(1.0, 1.0) .+ rand(n))) - @test isposdef(Diagonal([[1 0; 0 1], [1 0; 0 1]])) - @test !isposdef(Diagonal([[1 0; 0 1], [1 0; 1 1]])) -end - -@testset "getindex" begin - d = randn(n) - D = Diagonal(d) - # getindex bounds checking - @test_throws BoundsError D[0, 0] - @test_throws BoundsError D[-1, -2] - @test_throws BoundsError D[n, n + 1] - @test_throws BoundsError D[n + 1, n] - @test_throws BoundsError D[n + 1, n + 1] - # getindex on and off the diagonal - for i in 1:n, j in 1:n - @test D[i, j] == (i == j ? d[i] : 0) - end -end - -@testset "setindex!" begin - d = randn(n) - D = Diagonal(d) - # setindex! bounds checking - @test_throws BoundsError D[0, 0] = 0 - @test_throws BoundsError D[-1 , -2] = 0 - @test_throws BoundsError D[n, n + 1] = 0 - @test_throws BoundsError D[n + 1, n] = 0 - @test_throws BoundsError D[n + 1, n + 1] = 0 - for i in 1:n, j in 1:n - if i == j - # setindex on! the diagonal - @test ((D[i, j] = i) == i; D[i, j] == i) - else - # setindex! off the diagonal - @test ((D[i, j] = 0) == 0; iszero(D[i, j])) - @test_throws ArgumentError D[i, j] = 1 - end - end - # setindex should return the destination - @test setindex!(D, 1, 1, 1) === D -end - -@testset "Test reverse" begin - D = Diagonal(randn(5)) - @test reverse(D, dims=1) == reverse(Matrix(D), dims=1) - @test reverse(D, dims=2) == reverse(Matrix(D), dims=2) - @test reverse(D)::Diagonal == reverse(Matrix(D)) -end - -@testset "inverse" begin - for d in Any[randn(n), Int[], [1, 2, 3], [1im, 2im, 3im], [1//1, 2//1, 3//1], [1+1im//1, 2//1, 3im//1]] - D = Diagonal(d) - @test inv(D) ≈ inv(Array(D)) - end - @test_throws SingularException inv(Diagonal(zeros(n))) - @test_throws SingularException inv(Diagonal([0, 1, 2])) - @test_throws SingularException inv(Diagonal([0im, 1im, 2im])) -end - -@testset "pseudoinverse" begin - for d in Any[randn(n), zeros(n), Int[], [0, 2, 0.003], [0im, 1+2im, 0.003im], [0//1, 2//1, 3//100], [0//1, 1//1+2im, 3im//100]] - D = Diagonal(d) - @test pinv(D) ≈ pinv(Array(D)) - @test pinv(D, 1.0e-2) ≈ pinv(Array(D), 1.0e-2) - end -end - -# allow construct from range -@test all(Diagonal(range(1, stop=3, length=3)) .== Diagonal([1.0,2.0,3.0])) - -# Issue 12803 -for t in (Float32, Float64, Int, ComplexF64, Rational{Int}) - @test Diagonal(Matrix{t}[fill(t(1), 2, 2), fill(t(1), 3, 3)])[2,1] == zeros(t, 3, 2) -end - -# Issue 15401 -@test Matrix(1.0I, 5, 5) \ Diagonal(fill(1.,5)) == Matrix(I, 5, 5) - -@testset "Triangular and Diagonal" begin - function _test_matrix(type) - if type == Int - return rand(1:9, 5, 5) - else - return randn(type, 5, 5) - end - end - types = (Float64, Int, ComplexF64) - for ta in types - D = Diagonal(_test_matrix(ta)) - for tb in types - B = _test_matrix(tb) - Tmats = (LowerTriangular(B), UnitLowerTriangular(B), UpperTriangular(B), UnitUpperTriangular(B)) - restypes = (LowerTriangular, LowerTriangular, UpperTriangular, UpperTriangular) - for (T, rtype) in zip(Tmats, restypes) - adjtype = (rtype == LowerTriangular) ? UpperTriangular : LowerTriangular - - # Triangular * Diagonal - R = T * D - @test R ≈ Array(T) * Array(D) - @test isa(R, rtype) - - # Diagonal * Triangular - R = D * T - @test R ≈ Array(D) * Array(T) - @test isa(R, rtype) - - # Adjoint of Triangular * Diagonal - R = T' * D - @test R ≈ Array(T)' * Array(D) - @test isa(R, adjtype) - - # Diagonal * Adjoint of Triangular - R = D * T' - @test R ≈ Array(D) * Array(T)' - @test isa(R, adjtype) - - # Transpose of Triangular * Diagonal - R = transpose(T) * D - @test R ≈ transpose(Array(T)) * Array(D) - @test isa(R, adjtype) - - # Diagonal * Transpose of Triangular - R = D * transpose(T) - @test R ≈ Array(D) * transpose(Array(T)) - @test isa(R, adjtype) - end - end - end -end - -let D1 = Diagonal(rand(5)), D2 = Diagonal(rand(5)) - @test LinearAlgebra.rmul!(copy(D1),D2) == D1*D2 - @test LinearAlgebra.lmul!(D1,copy(D2)) == D1*D2 - @test LinearAlgebra.rmul!(copy(D1),transpose(D2)) == D1*transpose(D2) - @test LinearAlgebra.lmul!(transpose(D1),copy(D2)) == transpose(D1)*D2 - @test LinearAlgebra.rmul!(copy(D1),adjoint(D2)) == D1*adjoint(D2) - @test LinearAlgebra.lmul!(adjoint(D1),copy(D2)) == adjoint(D1)*D2 -end - -@testset "multiplication of a Diagonal with a Matrix" begin - A = collect(reshape(1:8, 4, 2)); - B = BigFloat.(A); - DL = Diagonal(collect(axes(A, 1))); - DR = Diagonal(Float16.(collect(axes(A, 2)))); - - @test DL * A == collect(DL) * A - @test A * DR == A * collect(DR) - @test DL * B == collect(DL) * B - @test B * DR == B * collect(DR) - - A = reshape([ones(2,2), ones(2,2)*2, ones(2,2)*3, ones(2,2)*4], 2, 2) - Ac = collect(A) - D = Diagonal([collect(reshape(1:4, 2, 2)), collect(reshape(5:8, 2, 2))]) - Dc = collect(D) - @test A * D == Ac * Dc - @test D * A == Dc * Ac - @test D * D == Dc * Dc - - AS = similar(A) - mul!(AS, A, D, true, false) - @test AS == A * D - - D2 = similar(D) - mul!(D2, D, D) - @test D2 == D * D - - copyto!(D2, D) - lmul!(D, D2) - @test D2 == D * D - copyto!(D2, D) - rmul!(D2, D) - @test D2 == D * D -end - -@testset "multiplication of 2 Diagonal and a Matrix (#46400)" begin - A = randn(10, 10) - D = Diagonal(randn(10)) - D2 = Diagonal(randn(10)) - @test D * A * D2 ≈ D * (A * D2) - @test D * A * D2 ≈ (D * A) * D2 - @test_throws DimensionMismatch Diagonal(ones(9)) * A * D2 - @test_throws DimensionMismatch D * A * Diagonal(ones(9)) -end - -@testset "multiplication of QR Q-factor and Diagonal (#16615 spot test)" begin - D = Diagonal(randn(5)) - Q = qr(randn(5, 5)).Q - @test D * Q' == Array(D) * Q' - Q = qr(randn(5, 5), ColumnNorm()).Q - @test_throws ArgumentError lmul!(Q, D) -end - -@testset "block diagonal matrices" begin - D = Diagonal([[1 2; 3 4], [1 2; 3 4]]) - Dherm = Diagonal([[1 1+im; 1-im 1], [1 1+im; 1-im 1]]) - Dsym = Diagonal([[1 1+im; 1+im 1], [1 1+im; 1+im 1]]) - @test adjoint(D) == Diagonal([[1 3; 2 4], [1 3; 2 4]]) - @test transpose(D) == Diagonal([[1 3; 2 4], [1 3; 2 4]]) - @test adjoint(Dherm) == Dherm - @test transpose(Dherm) == Diagonal([[1 1-im; 1+im 1], [1 1-im; 1+im 1]]) - @test adjoint(Dsym) == Diagonal([[1 1-im; 1-im 1], [1 1-im; 1-im 1]]) - @test transpose(Dsym) == Dsym - @test diag(D, 0) == diag(D) == [[1 2; 3 4], [1 2; 3 4]] - @test diag(D, 1) == diag(D, -1) == [zeros(Int,2,2)] - @test diag(D, 2) == diag(D, -2) == [] - - v = [[1, 2], [3, 4]] - @test Dherm' * v == Dherm * v - @test transpose(D) * v == [[7, 10], [15, 22]] - - @test issymmetric(D) == false - @test issymmetric(Dherm) == false - @test issymmetric(Dsym) == true - - @test ishermitian(D) == false - @test ishermitian(Dherm) == true - @test ishermitian(Dsym) == false - - @test exp(D) == Diagonal([exp([1 2; 3 4]), exp([1 2; 3 4])]) - @test cis(D) == Diagonal([cis([1 2; 3 4]), cis([1 2; 3 4])]) - @test log(D) == Diagonal([log([1 2; 3 4]), log([1 2; 3 4])]) - @test sqrt(D) == Diagonal([sqrt([1 2; 3 4]), sqrt([1 2; 3 4])]) - - @test tr(D) == 10 - @test det(D) == 4 - - M = [1 2; 3 4] - for n in 0:1 - D = Diagonal(fill(M, n)) - @test D == Matrix{eltype(D)}(D) - end - - S = SizedArray{(2,3)}(reshape([1:6;],2,3)) - D = Diagonal(fill(S,3)) - @test D * fill(S,2,3)' == fill(S * S', 3, 2) - @test fill(S,3,2)' * D == fill(S' * S, 2, 3) - - @testset "indexing with non-standard-axes" begin - s = SizedArrays.SizedArray{(2,2)}([1 2; 3 4]) - D = Diagonal(fill(s,3)) - @test @inferred(D[1,2]) isa typeof(s) - @test all(iszero, D[1,2]) - end - - @testset "mul!" begin - D1 = Diagonal(fill(ones(2,3), 2)) - D2 = Diagonal(fill(ones(3,2), 2)) - C = similar(D1, size(D1)) - mul!(C, D1, D2) - @test all(x -> size(x) == (2,2), C) - @test C == D1 * D2 - D = similar(D1) - mul!(D, D1, D2) - @test all(x -> size(x) == (2,2), D) - @test D == D1 * D2 - end -end - -@testset "Eigensystem for block diagonal (issue #30681)" begin - I2 = Matrix(I, 2,2) - D = Diagonal([2.0*I2, 3.0*I2]) - eigD = eigen(D) - evals = [ 2.0, 2.0, 3.0, 3.0 ] - evecs = [ [[ 1.0, 0.0 ]] [[ 0.0, 1.0 ]] [[ 0.0, 0.0 ]] [[ 0.0, 0.0 ]]; - [[ 0.0, 0.0 ]] [[ 0.0, 0.0 ]] [[ 1.0, 0.0 ]] [[ 0.0, 1.0 ]] ] - @test eigD.values == evals - @test eigD.vectors == evecs - @test D * eigD.vectors ≈ eigD.vectors * Diagonal(eigD.values) - - I3 = Matrix(I, 3,3) - D = Diagonal([[0.0 -1.0; 1.0 0.0], 2.0*I3]) - eigD = eigen(D) - evals = [ -1.0im, 1.0im, 2.0, 2.0, 2.0 ] - evecs = [ [[ 1/sqrt(2)+0im, 1/sqrt(2)*im ]] [[ 1/sqrt(2)+0im, -1/sqrt(2)*im ]] [[ 0.0, 0.0 ]] [[ 0.0, 0.0 ]] [[ 0.0, 0.0]]; - [[ 0.0, 0.0, 0.0 ]] [[ 0.0, 0.0, 0.0 ]] [[ 1.0, 0.0, 0.0 ]] [[ 0.0, 1.0, 0.0 ]] [[ 0.0, 0.0, 1.0]] ] - @test eigD.values == evals - @test eigD.vectors ≈ evecs - @test D * eigD.vectors ≈ eigD.vectors * Diagonal(eigD.values) -end - -@testset "linear solve for block diagonal matrices" begin - D = Diagonal([rand(2,2) for _ in 1:5]) - b = [rand(2,2) for _ in 1:5] - B = [rand(2,2) for _ in 1:5, _ in 1:5] - @test ldiv!(D, copy(b)) ≈ Diagonal(inv.(D.diag)) * b - @test ldiv!(D, copy(B)) ≈ Diagonal(inv.(D.diag)) * B - @test rdiv!(copy(B), D) ≈ B * Diagonal(inv.(D.diag)) -end - -@testset "multiplication/division with Symmetric/Hermitian" begin - for T in (Float64, ComplexF64) - D = Diagonal(randn(T, n)) - A = randn(T, n, n); A = A'A - S = Symmetric(A) - H = Hermitian(A) - for (transform1, transform2) in ((identity, identity), - (identity, adjoint ), (adjoint, identity ), (adjoint, adjoint ), - (identity, transpose), (transpose, identity ), (transpose, transpose) ) - @test *(transform1(D), transform2(S)) ≈ *(transform1(Matrix(D)), transform2(Matrix(S))) - @test *(transform1(D), transform2(H)) ≈ *(transform1(Matrix(D)), transform2(Matrix(H))) - @test *(transform1(S), transform2(D)) ≈ *(transform1(Matrix(S)), transform2(Matrix(D))) - @test *(transform1(S), transform2(H)) ≈ *(transform1(Matrix(S)), transform2(Matrix(H))) - @test (transform1(H)/D) * D ≈ transform1(H) - @test (transform1(S)/D) * D ≈ transform1(S) - @test D * (D\transform2(H)) ≈ transform2(H) - @test D * (D\transform2(S)) ≈ transform2(S) - end - end -end - -@testset "multiplication of transposes of Diagonal (#22428)" begin - for T in (Float64, ComplexF64) - D = Diagonal(randn(T, 5, 5)) - B = Diagonal(randn(T, 5, 5)) - DD = Diagonal([randn(T, 2, 2), rand(T, 2, 2)]) - BB = Diagonal([randn(T, 2, 2), rand(T, 2, 2)]) - fullDD = copyto!(Matrix{Matrix{T}}(undef, 2, 2), DD) - fullBB = copyto!(Matrix{Matrix{T}}(undef, 2, 2), BB) - for (transform1, transform2) in ((identity, identity), - (identity, adjoint ), (adjoint, identity ), (adjoint, adjoint ), - (identity, transpose), (transpose, identity ), (transpose, transpose)) - @test *(transform1(D), transform2(B))::typeof(D) ≈ *(transform1(Matrix(D)), transform2(Matrix(B))) atol=2 * eps() - @test *(transform1(DD), transform2(BB))::typeof(DD) == *(transform1(fullDD), transform2(fullBB)) - end - M = randn(T, 5, 5) - MM = [randn(T, 2, 2) for _ in 1:2, _ in 1:2] - for transform in (identity, adjoint, transpose) - @test lmul!(transform(D), copy(M)) ≈ *(transform(Matrix(D)), M) - @test rmul!(copy(M), transform(D)) ≈ *(M, transform(Matrix(D))) - @test lmul!(transform(DD), copy(MM)) ≈ *(transform(fullDD), MM) - @test rmul!(copy(MM), transform(DD)) ≈ *(MM, transform(fullDD)) - end - end -end - -@testset "Diagonal of adjoint/transpose vectors (#23649)" begin - @test Diagonal(adjoint([1, 2, 3])) == Diagonal([1 2 3]) - @test Diagonal(transpose([1, 2, 3])) == Diagonal([1 2 3]) -end - -@testset "Multiplication with adjoint and transpose vectors (#26863)" begin - x = collect(1:2) - xt = transpose(x) - A = reshape([[1 2; 3 4], zeros(Int,2,2), zeros(Int, 2, 2), [5 6; 7 8]], 2, 2) - D = Diagonal(A) - @test x'*D == x'*A == collect(x')*D == collect(x')*A - @test xt*D == xt*A == collect(xt)*D == collect(xt)*A - outadjxD = similar(x'*D); outtrxD = similar(xt*D); - mul!(outadjxD, x', D) - @test outadjxD == x'*D - mul!(outtrxD, xt, D) - @test outtrxD == xt*D - - D1 = Diagonal([[1 2; 3 4]]) - @test D1 * x' == D1 * collect(x') == collect(D1) * collect(x') - @test D1 * xt == D1 * collect(xt) == collect(D1) * collect(xt) - outD1adjx = similar(D1 * x'); outD1trx = similar(D1 * xt); - mul!(outadjxD, D1, x') - @test outadjxD == D1*x' - mul!(outtrxD, D1, xt) - @test outtrxD == D1*xt - - y = [x, x] - yt = transpose(y) - @test y'*D*y == (y'*D)*y == (y'*A)*y - @test yt*D*y == (yt*D)*y == (yt*A)*y - outadjyD = similar(y'*D); outtryD = similar(yt*D); - outadjyD2 = similar(collect(y'*D)); outtryD2 = similar(collect(yt*D)); - mul!(outadjyD, y', D) - mul!(outadjyD2, y', D) - @test outadjyD == outadjyD2 == y'*D - mul!(outtryD, yt, D) - mul!(outtryD2, yt, D) - @test outtryD == outtryD2 == yt*D -end - -@testset "Multiplication of single element Diagonal (#36746, #40726)" begin - @test_throws DimensionMismatch Diagonal(randn(1)) * randn(5) - @test_throws DimensionMismatch Diagonal(randn(1)) * Diagonal(randn(3, 3)) - A = [1 0; 0 2] - v = [3, 4] - @test Diagonal(A) * v == A * v - @test Diagonal(A) * Diagonal(A) == A * A - @test_throws DimensionMismatch [1 0;0 1] * Diagonal([2 3]) # Issue #40726 - @test_throws DimensionMismatch lmul!(Diagonal([1]), [1,2,3]) # nearby -end - -@testset "Multiplication of a Diagonal with an OffsetArray" begin - # Offset indices should throw - D = Diagonal(1:4) - A = OffsetArray(rand(4,4), 2, 2) - @test_throws ArgumentError D * A - @test_throws ArgumentError A * D - @test_throws ArgumentError mul!(similar(A, size(A)), A, D) - @test_throws ArgumentError mul!(similar(A, size(A)), D, A) -end - -@testset "Triangular division by Diagonal #27989" begin - K = 5 - for elty in (Float32, Float64, ComplexF32, ComplexF64) - U = UpperTriangular(randn(elty, K, K)) - L = LowerTriangular(randn(elty, K, K)) - D = Diagonal(randn(elty, K)) - @test (U / D)::UpperTriangular{elty} == UpperTriangular(Matrix(U) / Matrix(D)) - @test (L / D)::LowerTriangular{elty} == LowerTriangular(Matrix(L) / Matrix(D)) - @test (D \ U)::UpperTriangular{elty} == UpperTriangular(Matrix(D) \ Matrix(U)) - @test (D \ L)::LowerTriangular{elty} == LowerTriangular(Matrix(D) \ Matrix(L)) - end -end - -@testset "(Sym)Tridiagonal division by Diagonal" begin - for K in (5, 1), elty in (Float64, ComplexF32), overlength in (1, 0) - S = SymTridiagonal(randn(elty, K), randn(elty, K-overlength)) - T = Tridiagonal(randn(elty, K-1), randn(elty, K), randn(elty, K-1)) - D = Diagonal(randn(elty, K)) - D0 = Diagonal(zeros(elty, K)) - @test (D \ S)::Tridiagonal{elty} == Tridiagonal(Matrix(D) \ Matrix(S)) - @test (D \ T)::Tridiagonal{elty} == Tridiagonal(Matrix(D) \ Matrix(T)) - @test (S / D)::Tridiagonal{elty} == Tridiagonal(Matrix(S) / Matrix(D)) - @test (T / D)::Tridiagonal{elty} == Tridiagonal(Matrix(T) / Matrix(D)) - @test_throws SingularException D0 \ S - @test_throws SingularException D0 \ T - @test_throws SingularException S / D0 - @test_throws SingularException T / D0 - end - # 0-length case - S = SymTridiagonal(Float64[], Float64[]) - T = Tridiagonal(Float64[], Float64[], Float64[]) - D = Diagonal(Float64[]) - @test (D \ S)::Tridiagonal{Float64} == T - @test (D \ T)::Tridiagonal{Float64} == T - @test (S / D)::Tridiagonal{Float64} == T - @test (T / D)::Tridiagonal{Float64} == T - # matrix eltype case - K = 5 - for elty in (Float64, ComplexF32), overlength in (1, 0) - S = SymTridiagonal([rand(elty, 2, 2) for _ in 1:K], [rand(elty, 2, 2) for _ in 1:K-overlength]) - T = Tridiagonal([rand(elty, 2, 2) for _ in 1:K-1], [rand(elty, 2, 2) for _ in 1:K], [rand(elty, 2, 2) for _ in 1:K-1]) - D = Diagonal(randn(elty, K)) - SM = fill(zeros(elty, 2, 2), K, K) - TM = copy(SM) - SM[1,1] = S[1,1]; TM[1,1] = T[1,1] - for j in 2:K - SM[j,j-1] = S[j,j-1]; SM[j,j] = S[j,j]; SM[j-1,j] = S[j-1,j] - TM[j,j-1] = T[j,j-1]; TM[j,j] = T[j,j]; TM[j-1,j] = T[j-1,j] - end - for (M, Mm) in ((S, SM), (T, TM)) - DS = D \ M - @test DS isa Tridiagonal - DM = D \ Mm - for i in -1:1; @test diag(DS, i) ≈ diag(DM, i) end - DS = M / D - @test DS isa Tridiagonal - DM = Mm / D - for i in -1:1; @test diag(DS, i) ≈ diag(DM, i) end - end - end - # eltype promotion case - S = SymTridiagonal(rand(-20:20, K), rand(-20:20, K-1)) - T = Tridiagonal(rand(-20:20, K-1), rand(-20:20, K), rand(-20:20, K-1)) - D = Diagonal(rand(1:20, K)) - @test (D \ S)::Tridiagonal{Float64} == Tridiagonal(Matrix(D) \ Matrix(S)) - @test (D \ T)::Tridiagonal{Float64} == Tridiagonal(Matrix(D) \ Matrix(T)) - @test (S / D)::Tridiagonal{Float64} == Tridiagonal(Matrix(S) / Matrix(D)) - @test (T / D)::Tridiagonal{Float64} == Tridiagonal(Matrix(T) / Matrix(D)) -end - -@testset "eigenvalue sorting" begin - D = Diagonal([0.4, 0.2, -1.3]) - @test eigvals(D) == eigen(D).values == [0.4, 0.2, -1.3] # not sorted by default - @test eigvals(Matrix(D)) == eigen(Matrix(D)).values == [-1.3, 0.2, 0.4] # sorted even if diagonal special case is detected - E = eigen(D, sortby=abs) # sortby keyword supported for eigen(::Diagonal) - @test E.values == [0.2, 0.4, -1.3] - @test E.vectors == [0 1 0; 1 0 0; 0 0 1] -end - -@testset "sum, mapreduce" begin - D = Diagonal([1,2,3]) - Ddense = Matrix(D) - @test sum(D) == 6 - @test_throws ArgumentError sum(D, dims=0) - @test sum(D, dims=1) == sum(Ddense, dims=1) - @test sum(D, dims=2) == sum(Ddense, dims=2) - @test sum(D, dims=3) == sum(Ddense, dims=3) - @test typeof(sum(D, dims=1)) == typeof(sum(Ddense, dims=1)) - @test mapreduce(one, min, D, dims=1) == mapreduce(one, min, Ddense, dims=1) - @test mapreduce(one, min, D, dims=2) == mapreduce(one, min, Ddense, dims=2) - @test mapreduce(one, min, D, dims=3) == mapreduce(one, min, Ddense, dims=3) - @test typeof(mapreduce(one, min, D, dims=1)) == typeof(mapreduce(one, min, Ddense, dims=1)) - @test mapreduce(zero, max, D, dims=1) == mapreduce(zero, max, Ddense, dims=1) - @test mapreduce(zero, max, D, dims=2) == mapreduce(zero, max, Ddense, dims=2) - @test mapreduce(zero, max, D, dims=3) == mapreduce(zero, max, Ddense, dims=3) - @test typeof(mapreduce(zero, max, D, dims=1)) == typeof(mapreduce(zero, max, Ddense, dims=1)) - - D = Diagonal(Int[]) - Ddense = Matrix(D) - @test sum(D) == 0 - @test_throws ArgumentError sum(D, dims=0) - @test sum(D, dims=1) == sum(Ddense, dims=1) - @test sum(D, dims=2) == sum(Ddense, dims=2) - @test sum(D, dims=3) == sum(Ddense, dims=3) - @test typeof(sum(D, dims=1)) == typeof(sum(Ddense, dims=1)) - - D = Diagonal(Int[2]) - Ddense = Matrix(D) - @test sum(D) == 2 - @test_throws ArgumentError sum(D, dims=0) - @test sum(D, dims=1) == sum(Ddense, dims=1) - @test sum(D, dims=2) == sum(Ddense, dims=2) - @test sum(D, dims=3) == sum(Ddense, dims=3) - @test typeof(sum(D, dims=1)) == typeof(sum(Ddense, dims=1)) -end - -@testset "logabsdet for generic eltype" begin - d = Any[1, -2.0, -3.0] - D = Diagonal(d) - d1, s1 = logabsdet(D) - @test d1 ≈ sum(log ∘ abs, d) - @test s1 == prod(sign, d) -end - -@testset "Empty (#35424) & size checks (#47060)" begin - @test zeros(0)'*Diagonal(zeros(0))*zeros(0) === 0.0 - @test transpose(zeros(0))*Diagonal(zeros(Complex{Int}, 0))*zeros(0) === 0.0 + 0.0im - @test dot(zeros(Int32, 0), Diagonal(zeros(Int, 0)), zeros(Int16, 0)) === 0 - @test_throws DimensionMismatch zeros(2)' * Diagonal(zeros(2)) * zeros(3) - @test_throws DimensionMismatch zeros(3)' * Diagonal(zeros(2)) * zeros(2) - @test_throws DimensionMismatch dot(zeros(2), Diagonal(zeros(2)), zeros(3)) - @test_throws DimensionMismatch dot(zeros(3), Diagonal(zeros(2)), zeros(2)) -end - -@testset "Diagonal(undef)" begin - d = Diagonal{Float32}(undef, 2) - @test length(d.diag) == 2 -end - -@testset "permutedims (#39447)" begin - for D in (Diagonal(zeros(5)), Diagonal(zeros(5) .+ 1im), Diagonal([[1,2],[3,4]])) - @test permutedims(D) === permutedims(D,(1,2)) === permutedims(D,(2,1)) === D - @test_throws ArgumentError permutedims(D,(1,3)) - end -end - -@testset "Inner product" begin - A = Diagonal(rand(10) .+ im) - B = Diagonal(rand(10) .+ im) - @test dot(A, B) ≈ dot(Matrix(A), B) - @test dot(A, B) ≈ dot(A, Matrix(B)) - @test dot(A, B) ≈ dot(Matrix(A), Matrix(B)) - @test dot(A, B) ≈ conj(dot(B, A)) -end - -@testset "eltype relaxation(#41015)" begin - A = rand(3,3) - for trans in (identity, adjoint, transpose) - @test ldiv!(trans(I(3)), A) == A - @test rdiv!(A, trans(I(3))) == A - end -end - -const BASE_TEST_PATH = joinpath(Sys.BINDIR, "..", "share", "julia", "test") -isdefined(Main, :ImmutableArrays) || @eval Main include(joinpath($(BASE_TEST_PATH), "testhelpers", "ImmutableArrays.jl")) -using .Main.ImmutableArrays - -@testset "Conversion to AbstractArray" begin - # tests corresponding to #34995 - d = ImmutableArray([1, 2, 3, 4]) - D = Diagonal(d) - - @test convert(AbstractArray{Float64}, D)::Diagonal{Float64,ImmutableArray{Float64,1,Array{Float64,1}}} == D - @test convert(AbstractMatrix{Float64}, D)::Diagonal{Float64,ImmutableArray{Float64,1,Array{Float64,1}}} == D -end - -@testset "divisions functionality" for elty in (Int, Float64, ComplexF64) - B = Diagonal(rand(elty,5,5)) - x = rand(elty) - @test \(x, B) == /(B, x) -end - -@testset "promotion" begin - for (v1, v2) in (([true], [1]), ([zeros(2,2)], [zeros(Int, 2,2)])) - T = promote_type(eltype(v1), eltype(v2)) - V = promote_type(typeof(v1), typeof(v2)) - d1 = Diagonal(v1) - d2 = Diagonal(v2) - v = [d1, d2] - @test (@inferred eltype(v)) == Diagonal{T, V} - end - # test for a type for which promote_type doesn't lead to a concrete eltype - struct MyArrayWrapper{T,N,A<:AbstractArray{T,N}} <: AbstractArray{T,N} - a :: A - end - Base.size(M::MyArrayWrapper) = size(M.a) - Base.axes(M::MyArrayWrapper) = axes(M.a) - Base.length(M::MyArrayWrapper) = length(M.a) - Base.getindex(M::MyArrayWrapper, i::Int...) = M.a[i...] - Base.setindex!(M::MyArrayWrapper, v, i::Int...) = M.a[i...] = v - d1 = Diagonal(MyArrayWrapper(1:3)) - d2 = Diagonal(MyArrayWrapper(1.0:3.0)) - c = [d1, d2] - @test c[1] == d1 - @test c[2] == d2 -end - -@testset "zero and one" begin - D1 = Diagonal(rand(3)) - @test D1 + zero(D1) == D1 - @test D1 * one(D1) == D1 - @test D1 * oneunit(D1) == D1 - @test oneunit(D1) isa typeof(D1) - D2 = Diagonal([collect(reshape(1:4, 2, 2)), collect(reshape(5:8, 2, 2))]) - @test D2 + zero(D2) == D2 - @test D2 * one(D2) == D2 - @test D2 * oneunit(D2) == D2 - @test oneunit(D2) isa typeof(D2) - D3 = Diagonal([D2, D2]); - @test D3 + zero(D3) == D3 - @test D3 * one(D3) == D3 - @test D3 * oneunit(D3) == D3 - @test oneunit(D3) isa typeof(D3) -end - -@testset "$Tri" for (Tri, UTri) in ((UpperTriangular, UnitUpperTriangular), (LowerTriangular, UnitLowerTriangular)) - A = randn(4, 4) - TriA = Tri(A) - UTriA = UTri(A) - D = Diagonal(1.0:4.0) - DM = Matrix(D) - DMF = factorize(DM) - outTri = similar(TriA) - out = similar(A) - # 2 args - for fun in (*, rmul!, rdiv!, /) - @test fun(copy(TriA), D)::Tri == fun(Matrix(TriA), D) - @test fun(copy(UTriA), D)::Tri == fun(Matrix(UTriA), D) - end - for fun in (*, lmul!, ldiv!, \) - @test fun(D, copy(TriA))::Tri == fun(D, Matrix(TriA)) - @test fun(D, copy(UTriA))::Tri == fun(D, Matrix(UTriA)) - end - # 3 args - @test outTri === ldiv!(outTri, D, TriA)::Tri == ldiv!(out, D, Matrix(TriA)) - @test outTri === ldiv!(outTri, D, UTriA)::Tri == ldiv!(out, D, Matrix(UTriA)) - @test outTri === mul!(outTri, D, TriA)::Tri == mul!(out, D, Matrix(TriA)) - @test outTri === mul!(outTri, D, UTriA)::Tri == mul!(out, D, Matrix(UTriA)) - @test outTri === mul!(outTri, TriA, D)::Tri == mul!(out, Matrix(TriA), D) - @test outTri === mul!(outTri, UTriA, D)::Tri == mul!(out, Matrix(UTriA), D) - # 5 args - @test outTri === mul!(outTri, D, TriA, 2, 1)::Tri == mul!(out, D, Matrix(TriA), 2, 1) - @test outTri === mul!(outTri, D, UTriA, 2, 1)::Tri == mul!(out, D, Matrix(UTriA), 2, 1) - @test outTri === mul!(outTri, TriA, D, 2, 1)::Tri == mul!(out, Matrix(TriA), D, 2, 1) - @test outTri === mul!(outTri, UTriA, D, 2, 1)::Tri == mul!(out, Matrix(UTriA), D, 2, 1) - - # we may write to a Unit triangular if the diagonal is preserved - ID = Diagonal(ones(size(UTriA,2))) - @test mul!(copy(UTriA), UTriA, ID) == UTriA - @test mul!(copy(UTriA), ID, UTriA) == UTriA - - @testset "partly filled parents" begin - M = Matrix{BigFloat}(undef, 2, 2) - M[1,1] = M[2,2] = 3 - isupper = Tri == UpperTriangular - M[1+!isupper, 1+isupper] = 3 - D = Diagonal(1:2) - T = Tri(M) - TA = Array(T) - @test T * D == TA * D - @test D * T == D * TA - @test mul!(copy(T), T, D, 2, 3) == 2T * D + 3T - @test mul!(copy(T), D, T, 2, 3) == 2D * T + 3T - - U = UTri(M) - UA = Array(U) - @test U * D == UA * D - @test D * U == D * UA - @test mul!(copy(T), U, D, 2, 3) == 2 * UA * D + 3TA - @test mul!(copy(T), D, U, 2, 3) == 2 * D * UA + 3TA - - M2 = Matrix{BigFloat}(undef, 2, 2) - M2[1+!isupper, 1+isupper] = 3 - U = UTri(M2) - UA = Array(U) - @test U * D == UA * D - @test D * U == D * UA - ID = Diagonal(ones(size(U,2))) - @test mul!(copy(U), U, ID) == U - @test mul!(copy(U), ID, U) == U - @test mul!(copy(U), U, ID, 2, -1) == U - @test mul!(copy(U), ID, U, 2, -1) == U - end -end - -struct SMatrix1{T} <: AbstractArray{T,2} - elt::T -end -Base.:(==)(A::SMatrix1, B::SMatrix1) = A.elt == B.elt -Base.zero(::Type{SMatrix1{T}}) where {T} = SMatrix1(zero(T)) -Base.iszero(A::SMatrix1) = iszero(A.elt) -Base.getindex(A::SMatrix1, inds...) = A.elt -Base.size(::SMatrix1) = (1, 1) -@testset "map for Diagonal matrices (#46292)" begin - A = Diagonal([1]) - @test A isa Diagonal{Int,Vector{Int}} - @test 2*A isa Diagonal{Int,Vector{Int}} - @test A.+1 isa Matrix{Int} - # Numeric element types remain diagonal - B = map(SMatrix1, A) - @test B == fill(SMatrix1(1), 1, 1) - @test B isa Diagonal{SMatrix1{Int},Vector{SMatrix1{Int}}} - # Non-numeric element types become dense - C = map(a -> SMatrix1(string(a)), A) - @test C == fill(SMatrix1(string(1)), 1, 1) - @test C isa Matrix{SMatrix1{String}} -end - -@testset "show" begin - @test repr(Diagonal([1,2])) == "Diagonal([1, 2])" # 2-arg show - @test contains(repr(MIME"text/plain"(), Diagonal([1,2])), "⋅ 2") # 3-arg show -end - -@testset "copyto! with UniformScaling" begin - @testset "Fill" begin - for len in (4, InfiniteArrays.Infinity()) - d = FillArrays.Fill(1, len) - D = Diagonal(d) - @test copyto!(D, I) === D - end - end - D = Diagonal(fill(2, 2)) - copyto!(D, I) - @test all(isone, diag(D)) -end - -@testset "diagonal triple multiplication (#49005)" begin - n = 10 - @test *(Diagonal(ones(n)), Diagonal(1:n), Diagonal(ones(n))) isa Diagonal - @test_throws DimensionMismatch (*(Diagonal(ones(n)), Diagonal(1:n), Diagonal(ones(n+1)))) - @test_throws DimensionMismatch (*(Diagonal(ones(n)), Diagonal(1:n+1), Diagonal(ones(n+1)))) - @test_throws DimensionMismatch (*(Diagonal(ones(n+1)), Diagonal(1:n), Diagonal(ones(n)))) - - # currently falls back to two-term * - @test *(Diagonal(ones(n)), Diagonal(1:n), Diagonal(ones(n)), Diagonal(1:n)) isa Diagonal -end - -@testset "triple multiplication with a sandwiched BandedMatrix" begin - D = Diagonal(StepRangeLen(NaN, 0, 4)); - B = Bidiagonal(1:4, 1:3, :U) - C = D * B * D - @test iszero(diag(C, 2)) - # test associativity - C1 = (D * B) * D - C2 = D * (B * D) - @test diag(C,2) == diag(C1,2) == diag(C2,2) -end - -@testset "diagind" begin - D = Diagonal(1:4) - M = Matrix(D) - @testset for k in -4:4 - @test D[diagind(D,k)] == M[diagind(M,k)] - end -end - -@testset "avoid matmul ambiguities with ::MyMatrix * ::AbstractMatrix" begin - A = [i+j for i in 1:2, j in 1:2] - S = SizedArrays.SizedArray{(2,2)}(A) - D = Diagonal([1:2;]) - @test S * D == A * D - @test D * S == D * A - C1, C2 = zeros(2,2), zeros(2,2) - @test mul!(C1, S, D) == mul!(C2, A, D) - @test mul!(C1, S, D, 1, 2) == mul!(C2, A, D, 1 ,2) - @test mul!(C1, D, S) == mul!(C2, D, A) - @test mul!(C1, D, S, 1, 2) == mul!(C2, D, A, 1 ,2) - - v = [i for i in 1:2] - sv = SizedArrays.SizedArray{(2,)}(v) - @test D * sv == D * v - C1, C2 = zeros(2), zeros(2) - @test mul!(C1, D, sv) == mul!(C2, D, v) - @test mul!(C1, D, sv, 1, 2) == mul!(C2, D, v, 1 ,2) -end - -@testset "copy" begin - @test copy(Diagonal(1:5)) === Diagonal(1:5) -end - -@testset "kron! for Diagonal" begin - a = Diagonal([2,2]) - b = Diagonal([1,1]) - c = Diagonal([0,0,0,0]) - kron!(c,b,a) - @test c == Diagonal([2,2,2,2]) - c=Diagonal(Vector{Float64}(undef, 4)) - kron!(c,a,b) - @test c == Diagonal([2,2,2,2]) -end - -@testset "uppertriangular/lowertriangular" begin - D = Diagonal([1,2]) - @test LinearAlgebra.uppertriangular(D) === D - @test LinearAlgebra.lowertriangular(D) === D -end - -@testset "mul/div with an adjoint vector" begin - A = [1.0;;] - x = [1.0] - yadj = Diagonal(A) \ x' - @test typeof(yadj) == typeof(x') - @test yadj == x' - yadj = Diagonal(A) * x' - @test typeof(yadj) == typeof(x') - @test yadj == x' -end - -@testset "Matrix conversion for non-numeric" begin - D = Diagonal(fill(Diagonal([1,3]), 2)) - M = Matrix{eltype(D)}(D) - @test M isa Matrix{eltype(D)} - @test M == D -end - -@testset "rmul!/lmul! with banded matrices" begin - @testset "$(nameof(typeof(B)))" for B in ( - Bidiagonal(rand(4), rand(3), :L), - Tridiagonal(rand(3), rand(4), rand(3)) - ) - BA = Array(B) - D = Diagonal(rand(size(B,1))) - DA = Array(D) - @test rmul!(copy(B), D) ≈ B * D ≈ BA * DA - @test lmul!(D, copy(B)) ≈ D * B ≈ DA * BA - end -end - -@testset "rmul!/lmul! with numbers" begin - D = Diagonal(rand(4)) - @test rmul!(copy(D), 0.2) ≈ rmul!(Array(D), 0.2) - @test lmul!(0.2, copy(D)) ≈ lmul!(0.2, Array(D)) - @test_throws ArgumentError rmul!(D, NaN) - @test_throws ArgumentError lmul!(NaN, D) - D = Diagonal(rand(1)) - @test all(isnan, rmul!(copy(D), NaN)) - @test all(isnan, lmul!(NaN, copy(D))) -end - -@testset "+/- with block Symmetric/Hermitian" begin - for p in ([1 2; 3 4], [1 2+im; 2-im 4+2im]) - m = SizedArrays.SizedArray{(2,2)}(p) - D = Diagonal(fill(m, 2)) - for T in (Symmetric, Hermitian) - S = T(fill(m, 2, 2)) - @test D + S == Array(D) + Array(S) - @test S + D == Array(S) + Array(D) - end - end -end - -@testset "bounds-check with CartesianIndex ranges" begin - D = Diagonal(1:typemax(Int)) - @test checkbounds(Bool, D, diagind(D, IndexCartesian())) -end - -@testset "zeros in kron with block matrices" begin - D = Diagonal(1:4) - B = reshape([ones(2,2), ones(3,2), ones(2,3), ones(3,3)], 2, 2) - @test kron(D, B) == kron(Array(D), B) - @test kron(B, D) == kron(B, Array(D)) - D2 = Diagonal([ones(2,2), ones(3,3)]) - @test kron(D, D2) == kron(D, Array{eltype(D2)}(D2)) - @test kron(D2, D) == kron(Array{eltype(D2)}(D2), D) -end - -end # module TestDiagonal diff --git a/stdlib/LinearAlgebra/test/eigen.jl b/stdlib/LinearAlgebra/test/eigen.jl deleted file mode 100644 index a82c745436009..0000000000000 --- a/stdlib/LinearAlgebra/test/eigen.jl +++ /dev/null @@ -1,282 +0,0 @@ -# This file is a part of Julia. License is MIT: https://julialang.org/license - -module TestEigen - -using Test, LinearAlgebra, Random -using LinearAlgebra: BlasComplex, BlasFloat, BlasReal, QRPivoted, UtiAUi! - -n = 10 - -# Split n into 2 parts for tests needing two matrices -n1 = div(n, 2) -n2 = 2*n1 - -Random.seed!(12343219) - -areal = randn(n,n)/2 -aimg = randn(n,n)/2 - -@testset for eltya in (Float32, Float64, ComplexF32, ComplexF64, Int) - aa = eltya == Int ? rand(1:7, n, n) : convert(Matrix{eltya}, eltya <: Complex ? complex.(areal, aimg) : areal) - asym = aa' + aa # symmetric indefinite - apd = aa' * aa # symmetric positive-definite - for (a, asym, apd) in ((aa, asym, apd), - (view(aa, 1:n, 1:n), - view(asym, 1:n, 1:n), - view(apd, 1:n, 1:n))) - ε = εa = eps(abs(float(one(eltya)))) - - α = rand(eltya) - β = rand(eltya) - eab = eigen(α,β) - @test eab.values == eigvals(fill(α,1,1),fill(β,1,1)) - @test eab.vectors == eigvecs(fill(α,1,1),fill(β,1,1)) - - @testset "non-symmetric eigen decomposition" begin - d, v = eigen(a) - for i in 1:size(a,2) - @test a*v[:,i] ≈ d[i]*v[:,i] - end - f = eigen(a) - @test det(a) ≈ det(f) - @test inv(a) ≈ inv(f) - @test isposdef(a) == isposdef(f) - @test eigvals(f) === f.values - @test eigvecs(f) === f.vectors - @test Array(f) ≈ a - - for T in (Tridiagonal(a), Hermitian(Tridiagonal(a), :U), Hermitian(Tridiagonal(a), :L)) - f = eigen(T) - d, v = f - for i in 1:size(a,2) - @test T*v[:,i] ≈ d[i]*v[:,i] - end - @test eigvals(T) ≈ d - @test det(T) ≈ det(f) - @test inv(T) ≈ inv(f) - end - - num_fact = eigen(one(eltya)) - @test num_fact.values[1] == one(eltya) - h = asym - @test minimum(eigvals(h)) ≈ eigmin(h) - @test maximum(eigvals(h)) ≈ eigmax(h) - @test_throws DomainError eigmin(a - a') - @test_throws DomainError eigmax(a - a') - end - @testset "symmetric generalized eigenproblem" begin - if isa(a, Array) - asym_sg = asym[1:n1, 1:n1] - a_sg = a[:,n1+1:n2] - else - asym_sg = view(asym, 1:n1, 1:n1) - a_sg = view(a, 1:n, n1+1:n2) - end - ASG2 = a_sg'a_sg - f = eigen(asym_sg, ASG2) - @test asym_sg*f.vectors ≈ (ASG2*f.vectors) * Diagonal(f.values) - @test f.values ≈ eigvals(asym_sg, ASG2) - @test prod(f.values) ≈ prod(eigvals(asym_sg/(ASG2))) atol=200ε - @test eigvecs(asym_sg, ASG2) == f.vectors - @test eigvals(f) === f.values - @test eigvecs(f) === f.vectors - @test_throws FieldError f.Z - - d,v = eigen(asym_sg, ASG2) - @test d == f.values - @test v == f.vectors - - # solver for in-place U' \ A / U (#14896) - if !(eltya <: Integer) - for atyp in (eltya <: Real ? (Symmetric, Hermitian) : (Hermitian,)) - for utyp in (UpperTriangular, Diagonal), uplo in (:L, :U) - A = atyp(asym_sg, uplo) - U = utyp(ASG2) - @test UtiAUi!(copy(A), U) ≈ U' \ A / U - end - end - end - - # matrices of different types (#14896) - D = Diagonal(ASG2) - for uplo in (:L, :U) - if eltya <: Real - fs = eigen(Symmetric(asym_sg, uplo), ASG2) - @test fs.values ≈ f.values - @test abs.(fs.vectors) ≈ abs.(f.vectors) # may change sign - gs = eigen(Symmetric(asym_sg, uplo), D) - @test Symmetric(asym_sg, uplo)*gs.vectors ≈ (D*gs.vectors) * Diagonal(gs.values) - end - fh = eigen(Hermitian(asym_sg, uplo), ASG2) - @test fh.values ≈ f.values - @test abs.(fh.vectors) ≈ abs.(f.vectors) # may change sign - gh = eigen(Hermitian(asym_sg, uplo), D) - @test Hermitian(asym_sg, uplo)*gh.vectors ≈ (D*gh.vectors) * Diagonal(gh.values) - gd = eigen(Matrix(Hermitian(ASG2, uplo)), D) - @test Hermitian(ASG2, uplo) * gd.vectors ≈ D * gd.vectors * Diagonal(gd.values) - gd = eigen(Hermitian(Tridiagonal(ASG2), uplo), D) - @test Hermitian(Tridiagonal(ASG2), uplo) * gd.vectors ≈ D * gd.vectors * Diagonal(gd.values) - end - gd = eigen(D, D) - @test all(≈(1), gd.values) - @test D * gd.vectors ≈ D * gd.vectors * Diagonal(gd.values) - gd = eigen(Matrix(D), D) - @test D * gd.vectors ≈ D * gd.vectors * Diagonal(gd.values) - gd = eigen(D, Matrix(D)) - @test D * gd.vectors ≈ D * gd.vectors * Diagonal(gd.values) - gd = eigen(Tridiagonal(ASG2), Matrix(D)) - @test Tridiagonal(ASG2) * gd.vectors ≈ D * gd.vectors * Diagonal(gd.values) - end - @testset "Non-symmetric generalized eigenproblem" begin - if isa(a, Array) - a1_nsg = a[1:n1, 1:n1] - a2_nsg = a[n1+1:n2, n1+1:n2] - else - a1_nsg = view(a, 1:n1, 1:n1) - a2_nsg = view(a, n1+1:n2, n1+1:n2) - end - sortfunc = x -> real(x) + imag(x) - f = eigen(a1_nsg, a2_nsg; sortby = sortfunc) - @test a1_nsg*f.vectors ≈ (a2_nsg*f.vectors) * Diagonal(f.values) - @test f.values ≈ eigvals(a1_nsg, a2_nsg; sortby = sortfunc) - @test prod(f.values) ≈ prod(eigvals(a1_nsg/a2_nsg, sortby = sortfunc)) atol=50000ε - @test eigvecs(a1_nsg, a2_nsg; sortby = sortfunc) == f.vectors - @test_throws FieldError f.Z - - g = eigen(a1_nsg, Diagonal(1:n1)) - @test a1_nsg*g.vectors ≈ (Diagonal(1:n1)*g.vectors) * Diagonal(g.values) - - d,v = eigen(a1_nsg, a2_nsg; sortby = sortfunc) - @test d == f.values - @test v == f.vectors - end - end -end - -@testset "eigenvalue computations with NaNs" begin - for eltya in (NaN16, NaN32, NaN) - @test_throws(ArgumentError, eigen(fill(eltya, 1, 1))) - @test_throws(ArgumentError, eigen(fill(eltya, 2, 2))) - test_matrix = rand(typeof(eltya),3,3) - test_matrix[1,3] = eltya - @test_throws(ArgumentError, eigen(test_matrix)) - @test_throws(ArgumentError, eigvals(test_matrix)) - @test_throws(ArgumentError, eigvecs(test_matrix)) - @test_throws(ArgumentError, eigen(Symmetric(test_matrix))) - @test_throws(ArgumentError, eigvals(Symmetric(test_matrix))) - @test_throws(ArgumentError, eigvecs(Symmetric(test_matrix))) - @test_throws(ArgumentError, eigen(Hermitian(test_matrix))) - @test_throws(ArgumentError, eigvals(Hermitian(test_matrix))) - @test_throws(ArgumentError, eigvecs(Hermitian(test_matrix))) - @test_throws(ArgumentError, eigen(Hermitian(complex.(test_matrix)))) - @test_throws(ArgumentError, eigvals(Hermitian(complex.(test_matrix)))) - @test_throws(ArgumentError, eigvecs(Hermitian(complex.(test_matrix)))) - @test eigen(Symmetric(test_matrix, :L)) isa Eigen - @test eigen(Hermitian(test_matrix, :L)) isa Eigen - end -end - -# test a matrix larger than 140-by-140 for #14174 -let aa = rand(200, 200) - for a in (aa, view(aa, 1:n, 1:n)) - f = eigen(a) - @test a ≈ f.vectors * Diagonal(f.values) / f.vectors - end -end - -@testset "rational promotion: issue #24935" begin - A = [1//2 0//1; 0//1 2//3] - for λ in (eigvals(A), @inferred(eigvals(Symmetric(A)))) - @test λ isa Vector{Float64} - @test λ ≈ [0.5, 2/3] - end -end - -@testset "text/plain (REPL) printing of Eigen and GeneralizedEigen" begin - A, B = randn(5,5), randn(5,5) - e = eigen(A) - ge = eigen(A, B) - valsstring = sprint((t, s) -> show(t, "text/plain", s), e.values) - vecsstring = sprint((t, s) -> show(t, "text/plain", s), e.vectors) - factstring = sprint((t, s) -> show(t, "text/plain", s), e) - @test factstring == "$(summary(e))\nvalues:\n$valsstring\nvectors:\n$vecsstring" -end - -@testset "eigen of an Adjoint" begin - Random.seed!(4) - A = randn(3,3) - @test eigvals(A') == eigvals(copy(A')) - @test eigen(A') == eigen(copy(A')) - @test eigmin(A') == eigmin(copy(A')) - @test eigmax(A') == eigmax(copy(A')) -end - -@testset "equality of eigen factorizations" begin - A1 = Float32[1 0; 0 2] - A2 = Float64[1 0; 0 2] - EA1 = eigen(A1) - EA2 = eigen(A2) - @test EA1 == EA2 - @test hash(EA1) == hash(EA2) - @test isequal(EA1, EA2) - - # trivial RHS to ensure that values match exactly - B1 = Float32[1 0; 0 1] - B2 = Float64[1 0; 0 1] - EA1B1 = eigen(A1, B1) - EA2B2 = eigen(A2, B2) - @test EA1B1 == EA2B2 - @test hash(EA1B1) == hash(EA2B2) - @test isequal(EA1B1, EA2B2) -end - -@testset "Float16" begin - A = Float16[4. 12. -16.; 12. 37. -43.; -16. -43. 98.] - B = eigen(A) - B32 = eigen(Float32.(A)) - C = Float16[3 -2; 4 -1] - D = eigen(C) - D32 = eigen(Float32.(C)) - F = eigen(complex(C)) - F32 = eigen(complex(Float32.(C))) - @test B isa Eigen{Float16, Float16, Matrix{Float16}, Vector{Float16}} - @test B.values isa Vector{Float16} - @test B.vectors isa Matrix{Float16} - @test B.values ≈ B32.values - @test B.vectors ≈ B32.vectors - @test D isa Eigen{ComplexF16, ComplexF16, Matrix{ComplexF16}, Vector{ComplexF16}} - @test D.values isa Vector{ComplexF16} - @test D.vectors isa Matrix{ComplexF16} - @test D.values ≈ D32.values - @test D.vectors ≈ D32.vectors - @test F isa Eigen{ComplexF16, ComplexF16, Matrix{ComplexF16}, Vector{ComplexF16}} - @test F.values isa Vector{ComplexF16} - @test F.vectors isa Matrix{ComplexF16} - @test F.values ≈ F32.values - @test F.vectors ≈ F32.vectors - - for T in (Float16, ComplexF16) - D = Diagonal(T[1,2,4]) - A = Array(D) - B = eigen(A) - @test B isa Eigen{Float16, Float16, Matrix{Float16}, Vector{Float16}} - @test B.values isa Vector{Float16} - @test B.vectors isa Matrix{Float16} - end - D = Diagonal(ComplexF16[im,2,4]) - A = Array(D) - B = eigen(A) - @test B isa Eigen{Float16, ComplexF16, Matrix{Float16}, Vector{ComplexF16}} - @test B.values isa Vector{ComplexF16} - @test B.vectors isa Matrix{Float16} -end - -@testset "complex eigen inference (#52289)" begin - A = ComplexF64[1.0 0.0; 0.0 8.0] - TC = Eigen{ComplexF64, ComplexF64, Matrix{ComplexF64}, Vector{ComplexF64}} - TR = Eigen{ComplexF64, Float64, Matrix{ComplexF64}, Vector{Float64}} - λ, v = @inferred Union{TR,TC} eigen(A) - @test λ == [1.0, 8.0] -end - -end # module TestEigen diff --git a/stdlib/LinearAlgebra/test/factorization.jl b/stdlib/LinearAlgebra/test/factorization.jl deleted file mode 100644 index f80c5197836a1..0000000000000 --- a/stdlib/LinearAlgebra/test/factorization.jl +++ /dev/null @@ -1,94 +0,0 @@ -# This file is a part of Julia. License is MIT: https://julialang.org/license - -module TestFactorization -using Test, LinearAlgebra - -@testset "equality for factorizations - $f" for f in Any[ - bunchkaufman, - cholesky, - x -> cholesky(x, RowMaximum()), - eigen, - hessenberg, - lq, - lu, - qr, - x -> qr(x, ColumnNorm()), - svd, - schur, -] - A = randn(3, 3) - A = A * A' # ensure A is pos. def. and symmetric - F, G = f(A), f(A) - - @test F == G - @test isequal(F, G) - @test hash(F) == hash(G) - - f === hessenberg && continue - - # change all arrays in F to have eltype Float32 - F = typeof(F).name.wrapper(Base.mapany(1:nfields(F)) do i - x = getfield(F, i) - return x isa AbstractArray{Float64} ? Float32.(x) : x - end...) - # round all arrays in G to the nearest Float64 representable as Float32 - G = typeof(G).name.wrapper(Base.mapany(1:nfields(G)) do i - x = getfield(G, i) - return x isa AbstractArray{Float64} ? Float64.(Float32.(x)) : x - end...) - - @test F == G broken=!(f === eigen || f === qr || f == bunchkaufman || f == cholesky || F isa CholeskyPivoted) - @test isequal(F, G) broken=!(f === eigen || f === qr || f == bunchkaufman || f == cholesky || F isa CholeskyPivoted) - @test hash(F) == hash(G) -end - -@testset "size for factorizations - $f" for f in Any[ - bunchkaufman, - cholesky, - x -> cholesky(x, RowMaximum()), - hessenberg, - lq, - lu, - qr, - x -> qr(x, ColumnNorm()), - svd, -] - A = randn(3, 3) - A = A * A' # ensure A is pos. def. and symmetric - F = f(A) - @test size(F) == size(A) - @test size(F') == size(A') -end - -@testset "size for transpose factorizations - $f" for f in Any[ - bunchkaufman, - cholesky, - x -> cholesky(x, RowMaximum()), - hessenberg, - lq, - lu, - svd, -] - A = randn(3, 3) - A = A * A' # ensure A is pos. def. and symmetric - F = f(A) - @test size(F) == size(A) - @test size(transpose(F)) == size(transpose(A)) -end - -@testset "equality of QRCompactWY" begin - A = rand(100, 100) - F, G = qr(A), qr(A) - - @test F == G - @test isequal(F, G) - @test hash(F) == hash(G) - - G.T[28, 100] = 42 - - @test F != G - @test !isequal(F, G) - @test hash(F) != hash(G) -end - -end diff --git a/stdlib/LinearAlgebra/test/generic.jl b/stdlib/LinearAlgebra/test/generic.jl deleted file mode 100644 index 6d11ec824e538..0000000000000 --- a/stdlib/LinearAlgebra/test/generic.jl +++ /dev/null @@ -1,840 +0,0 @@ -# This file is a part of Julia. License is MIT: https://julialang.org/license - -module TestGeneric - -using Test, LinearAlgebra, Random -using Test: GenericArray -using LinearAlgebra: isbanded - -const BASE_TEST_PATH = joinpath(Sys.BINDIR, "..", "share", "julia", "test") - -isdefined(Main, :Quaternions) || @eval Main include(joinpath($(BASE_TEST_PATH), "testhelpers", "Quaternions.jl")) -using .Main.Quaternions - -isdefined(Main, :OffsetArrays) || @eval Main include(joinpath($(BASE_TEST_PATH), "testhelpers", "OffsetArrays.jl")) -using .Main.OffsetArrays - -isdefined(Main, :DualNumbers) || @eval Main include(joinpath($(BASE_TEST_PATH), "testhelpers", "DualNumbers.jl")) -using .Main.DualNumbers - -isdefined(Main, :FillArrays) || @eval Main include(joinpath($(BASE_TEST_PATH), "testhelpers", "FillArrays.jl")) -using .Main.FillArrays - -isdefined(Main, :SizedArrays) || @eval Main include(joinpath($(BASE_TEST_PATH), "testhelpers", "SizedArrays.jl")) -using .Main.SizedArrays - -Random.seed!(123) - -n = 5 # should be odd - -@testset for elty in (Int, Rational{BigInt}, Float32, Float64, BigFloat, ComplexF32, ComplexF64, Complex{BigFloat}) - # In the long run, these tests should step through Strang's - # axiomatic definition of determinants. - # If all axioms are satisfied and all the composition rules work, - # all determinants will be correct except for floating point errors. - if elty != Rational{BigInt} - @testset "det(A::Matrix)" begin - # The determinant of the identity matrix should always be 1. - for i = 1:10 - A = Matrix{elty}(I, i, i) - @test det(A) ≈ one(elty) - end - - # The determinant of a Householder reflection matrix should always be -1. - for i = 1:10 - A = Matrix{elty}(I, 10, 10) - A[i, i] = -one(elty) - @test det(A) ≈ -one(elty) - end - - # The determinant of a rotation matrix should always be 1. - if elty != Int - for theta = convert(Vector{elty}, pi ./ [1:4;]) - R = [cos(theta) -sin(theta); - sin(theta) cos(theta)] - @test convert(elty, det(R)) ≈ one(elty) - end - end - end - end - if elty <: Int - A = rand(-n:n, n, n) + 10I - elseif elty <: Rational - A = Rational{BigInt}[rand(-n:n)/rand(1:n) for i = 1:n, j = 1:n] + 10I - elseif elty <: Real - A = convert(Matrix{elty}, randn(n,n)) + 10I - else - A = convert(Matrix{elty}, complex.(randn(n,n), randn(n,n))) - end - - @testset "logdet and logabsdet" begin - @test logdet(A[1,1]) == log(det(A[1,1])) - @test logdet(A) ≈ log(det(A)) - @test logabsdet(A)[1] ≈ log(abs(det(A))) - @test logabsdet(Matrix{elty}(-I, n, n))[2] == -1 - infinity = convert(float(elty), Inf) - @test logabsdet(zeros(elty, n, n)) == (-infinity, zero(elty)) - if elty <: Real - @test logabsdet(A)[2] == sign(det(A)) - @test_throws DomainError logdet(Matrix{elty}(-I, n, n)) - else - @test logabsdet(A)[2] ≈ sign(det(A)) - end - # logabsdet for Number" - x = A[1, 1] # getting a number of type elty - X = fill(x, 1, 1) - @test logabsdet(x)[1] ≈ logabsdet(X)[1] - @test logabsdet(x)[2] ≈ logabsdet(X)[2] - # Diagonal, upper, and lower triangular matrices - chksign(s1, s2) = if elty <: Real s1 == s2 else s1 ≈ s2 end - D = Matrix(Diagonal(A)) - v, s = logabsdet(D) - @test v ≈ log(abs(det(D))) && chksign(s, sign(det(D))) - R = triu(A) - v, s = logabsdet(R) - @test v ≈ log(abs(det(R))) && chksign(s, sign(det(R))) - L = tril(A) - v, s = logabsdet(L) - @test v ≈ log(abs(det(L))) && chksign(s, sign(det(L))) - end - - @testset "det with nonstandard Number type" begin - elty <: Real && @test det(Dual.(triu(A), zero(A))) isa Dual - end -end - -@testset "diag" begin - A = Matrix(1.0I, 4, 4) - @test diag(A) == fill(1, 4) - @test diag(view(A, 1:3, 1:3)) == fill(1, 3) - @test diag(view(A, 1:2, 1:2)) == fill(1, 2) - @test_throws ArgumentError diag(rand(10)) -end - -@testset "generic axpy" begin - x = ['a','b','c','d','e'] - y = ['a','b','c','d','e'] - α, β = 'f', 'g' - @test_throws DimensionMismatch axpy!(α, x, ['g']) - @test_throws DimensionMismatch axpby!(α, x, β, ['g']) - @test_throws BoundsError axpy!(α, x, Vector(-1:5), y, Vector(1:7)) - @test_throws BoundsError axpy!(α, x, Vector(1:7), y, Vector(-1:5)) - @test_throws BoundsError axpy!(α, x, Vector(1:7), y, Vector(1:7)) - @test_throws DimensionMismatch axpy!(α, x, Vector(1:3), y, Vector(1:5)) -end - -@test !issymmetric(fill(1,5,3)) -@test !ishermitian(fill(1,5,3)) -@test (x = fill(1,3); cross(x,x) == zeros(3)) -@test_throws DimensionMismatch cross(fill(1,3), fill(1,4)) -@test_throws DimensionMismatch cross(fill(1,2), fill(1,3)) - -@test tr(Bidiagonal(fill(1,5),fill(0,4),:U)) == 5 - - -@testset "array and subarray" begin - for aa in (reshape([1.:6;], (2,3)), fill(float.(rand(Int8,2,2)), 2,3)) - for a in (aa, view(aa, 1:2, 1:2)) - am, an = size(a) - @testset "Scaling with rmul! and lmul" begin - @test rmul!(copy(a), 5.) == a*5 - @test lmul!(5., copy(a)) == a*5 - b = randn(2048) - subB = view(b, :, :) - @test rmul!(copy(b), 5.) == b*5 - @test rmul!(copy(subB), 5.) == subB*5 - @test lmul!(Diagonal([1.; 2.]), copy(a)) == a.*[1; 2] - @test lmul!(Diagonal([1; 2]), copy(a)) == a.*[1; 2] - @test rmul!(copy(a), Diagonal(1.:an)) == a.*Vector(1:an)' - @test rmul!(copy(a), Diagonal(1:an)) == a.*Vector(1:an)' - @test_throws DimensionMismatch lmul!(Diagonal(Vector{Float64}(undef,am+1)), a) - @test_throws DimensionMismatch rmul!(a, Diagonal(Vector{Float64}(undef,an+1))) - end - - @testset "Scaling with rdiv! and ldiv!" begin - @test rdiv!(copy(a), 5.) == a/5 - @test ldiv!(5., copy(a)) == a/5 - @test ldiv!(zero(a), 5., copy(a)) == a/5 - end - - @testset "Scaling with 3-argument mul!" begin - @test mul!(similar(a), 5., a) == a*5 - @test mul!(similar(a), a, 5.) == a*5 - @test mul!(similar(a), Diagonal([1.; 2.]), a) == a.*[1; 2] - @test mul!(similar(a), Diagonal([1; 2]), a) == a.*[1; 2] - @test_throws DimensionMismatch mul!(similar(a), Diagonal(Vector{Float64}(undef, am+1)), a) - @test_throws DimensionMismatch mul!(Matrix{Float64}(undef, 3, 2), a, Diagonal(Vector{Float64}(undef, an+1))) - @test_throws DimensionMismatch mul!(similar(a), a, Diagonal(Vector{Float64}(undef, an+1))) - @test mul!(similar(a), a, Diagonal(1.:an)) == a.*Vector(1:an)' - @test mul!(similar(a), a, Diagonal(1:an)) == a.*Vector(1:an)' - end - - @testset "Scaling with 5-argument mul!" begin - @test mul!(copy(a), 5., a, 10, 100) == a*150 - @test mul!(copy(a), a, 5., 10, 100) == a*150 - @test mul!(vec(copy(a)), 5., a, 10, 100) == vec(a*150) - @test mul!(vec(copy(a)), a, 5., 10, 100) == vec(a*150) - @test_throws DimensionMismatch mul!([vec(copy(a)); 0], 5., a, 10, 100) - @test_throws DimensionMismatch mul!([vec(copy(a)); 0], a, 5., 10, 100) - @test mul!(copy(a), Diagonal([1.; 2.]), a, 10, 100) == 10a.*[1; 2] .+ 100a - @test mul!(copy(a), Diagonal([1; 2]), a, 10, 100) == 10a.*[1; 2] .+ 100a - @test mul!(copy(a), a, Diagonal(1.:an), 10, 100) == 10a.*Vector(1:an)' .+ 100a - @test mul!(copy(a), a, Diagonal(1:an), 10, 100) == 10a.*Vector(1:an)' .+ 100a - end - end - end -end - -@testset "scale real matrix by complex type" begin - @test_throws InexactError rmul!([1.0], 2.0im) - @test isequal([1.0] * 2.0im, ComplexF64[2.0im]) - @test isequal(2.0im * [1.0], ComplexF64[2.0im]) - @test isequal(Float32[1.0] * 2.0f0im, ComplexF32[2.0im]) - @test isequal(Float32[1.0] * 2.0im, ComplexF64[2.0im]) - @test isequal(Float64[1.0] * 2.0f0im, ComplexF64[2.0im]) - @test isequal(Float32[1.0] * big(2.0)im, Complex{BigFloat}[2.0im]) - @test isequal(Float64[1.0] * big(2.0)im, Complex{BigFloat}[2.0im]) - @test isequal(BigFloat[1.0] * 2.0im, Complex{BigFloat}[2.0im]) - @test isequal(BigFloat[1.0] * 2.0f0im, Complex{BigFloat}[2.0im]) -end -@testset "* and mul! for non-commutative scaling" begin - q = Quaternion(0.44567, 0.755871, 0.882548, 0.423612) - qmat = [Quaternion(0.015007, 0.355067, 0.418645, 0.318373)] - @test lmul!(q, copy(qmat)) != rmul!(copy(qmat), q) - @test q*qmat ≉ qmat*q - @test conj(q*qmat) ≈ conj(qmat)*conj(q) - @test q * (q \ qmat) ≈ qmat ≈ (qmat / q) * q - @test q\qmat ≉ qmat/q - alpha = Quaternion(rand(4)...) - beta = Quaternion(0, 0, 0, 0) - @test mul!(copy(qmat), qmat, q, alpha, beta) ≈ qmat * q * alpha - @test mul!(copy(qmat), q, qmat, alpha, beta) ≈ q * qmat * alpha -end -@testset "ops on Numbers" begin - @testset for elty in [Float32,Float64,ComplexF32,ComplexF64] - a = rand(elty) - @test tr(a) == a - @test rank(zero(elty)) == 0 - @test rank(one(elty)) == 1 - @test !isfinite(cond(zero(elty))) - @test cond(a) == one(elty) - @test cond(a,1) == one(elty) - @test issymmetric(a) - @test ishermitian(one(elty)) - @test det(a) == a - @test norm(a) == abs(a) - @test norm(a, 0) == 1 - @test norm(0, 0) == 0 - end - - @test !issymmetric(NaN16) - @test !issymmetric(NaN32) - @test !issymmetric(NaN) - @test norm(NaN) === NaN - @test norm(NaN, 0) === NaN -end - -@test rank(zeros(4)) == 0 -@test rank(1:10) == 1 -@test rank(fill(0, 0, 0)) == 0 -@test rank([1.0 0.0; 0.0 0.9],0.95) == 1 -@test rank([1.0 0.0; 0.0 0.9],rtol=0.95) == 1 -@test rank([1.0 0.0; 0.0 0.9],atol=0.95) == 1 -@test rank([1.0 0.0; 0.0 0.9],atol=0.95,rtol=0.95)==1 -@test qr(big.([0 1; 0 0])).R == [0 1; 0 0] - -@test norm([2.4e-322, 4.4e-323]) ≈ 2.47e-322 -@test norm([2.4e-322, 4.4e-323], 3) ≈ 2.4e-322 -@test_throws ArgumentError opnorm(Matrix{Float64}(undef,5,5),5) - -# operator norm for zero-dimensional domain is zero (see #40370) -@testset "opnorm" begin - for m in (0, 1, 2) - @test @inferred(opnorm(fill(1,0,m))) == 0.0 - @test @inferred(opnorm(fill(1,m,0))) == 0.0 - end - for m in (1, 2) - @test @inferred(opnorm(fill(1im,1,m))) ≈ sqrt(m) - @test @inferred(opnorm(fill(1im,m,1))) ≈ sqrt(m) - end - @test @inferred(opnorm(fill(1,2,2))) ≈ 2 -end - -@testset "generic norm for arrays of arrays" begin - x = Vector{Int}[[1,2], [3,4]] - @test @inferred(norm(x)) ≈ sqrt(30) - @test norm(x, 0) == length(x) - @test norm(x, 1) ≈ 5+sqrt(5) - @test norm(x, 3) ≈ cbrt(5^3 +sqrt(5)^3) -end - -@testset "norm of transpose/adjoint equals norm of parent #32739" begin - for t in (transpose, adjoint), elt in (Float32, Float64, BigFloat, ComplexF32, ComplexF64, Complex{BigFloat}) - # Vector/matrix of scalars - for sz in ((2,), (2, 3)) - A = rand(elt, sz...) - Aᵀ = t(A) - @test norm(Aᵀ) ≈ norm(Matrix(Aᵀ)) - end - - # Vector/matrix of vectors/matrices - for sz_outer in ((2,), (2, 3)), sz_inner in ((3,), (1, 2)) - A = [rand(elt, sz_inner...) for _ in CartesianIndices(sz_outer)] - Aᵀ = t(A) - @test norm(Aᵀ) ≈ norm(Matrix(Matrix.(Aᵀ))) - end - end -end - -@testset "rotate! and reflect!" begin - x = rand(ComplexF64, 10) - y = rand(ComplexF64, 10) - c = rand(Float64) - s = rand(ComplexF64) - - x2 = copy(x) - y2 = copy(y) - rotate!(x, y, c, s) - @test x ≈ c*x2 + s*y2 - @test y ≈ -conj(s)*x2 + c*y2 - @test_throws DimensionMismatch rotate!([x; x], y, c, s) - - x3 = copy(x) - y3 = copy(y) - reflect!(x, y, c, s) - @test x ≈ c*x3 + s*y3 - @test y ≈ conj(s)*x3 - c*y3 - @test_throws DimensionMismatch reflect!([x; x], y, c, s) -end - -@testset "LinearAlgebra.reflectorApply!" begin - for T in (Float64, ComplexF64) - x = rand(T, 6) - τ = rand(T) - A = rand(T, 6) - B = LinearAlgebra.reflectorApply!(x, τ, copy(A)) - C = LinearAlgebra.reflectorApply!(x, τ, reshape(copy(A), (length(A), 1))) - @test B[1] ≈ C[1] ≈ A[1] - conj(τ)*(A[1] + dot(x[2:end], A[2:end])) - @test B[2:end] ≈ C[2:end] ≈ A[2:end] - conj(τ)*(A[1] + dot(x[2:end], A[2:end]))*x[2:end] - end -end - -@testset "axp(b)y! for element type without commutative multiplication" begin - α = [1 2; 3 4] - β = [5 6; 7 8] - x = fill([ 9 10; 11 12], 3) - y = fill([13 14; 15 16], 3) - axpy = axpy!(α, x, deepcopy(y)) - axpby = axpby!(α, x, β, deepcopy(y)) - @test axpy == x .* [α] .+ y - @test axpy != [α] .* x .+ y - @test axpby == x .* [α] .+ y .* [β] - @test axpby != [α] .* x .+ [β] .* y - axpy = axpy!(zero(α), x, deepcopy(y)) - axpby = axpby!(zero(α), x, one(β), deepcopy(y)) - @test axpy == y - @test axpy == y - @test axpby == y - @test axpby == y -end - -@testset "axpy! for x and y of different dimensions" begin - α = 5 - x = 2:5 - y = fill(1, 2, 4) - rx = [1 4] - ry = [2 8] - @test axpy!(α, x, rx, y, ry) == [1 1 1 1; 11 1 1 26] -end - -@testset "axp(b)y! for non strides input" begin - a = rand(5, 5) - @test axpby!(1, Hermitian(a), 1, zeros(size(a))) == Hermitian(a) - @test axpby!(1, 1.:5, 1, zeros(5)) == 1.:5 - @test axpy!(1, Hermitian(a), zeros(size(a))) == Hermitian(a) - @test axpy!(1, 1.:5, zeros(5)) == 1.:5 -end - -@testset "LinearAlgebra.axp(b)y! for stride-vector like input" begin - for T in (Float32, Float64, ComplexF32, ComplexF64) - a = rand(T, 5, 5) - @test axpby!(1, view(a, :, 1:5), 1, zeros(T, size(a))) == a - @test axpy!(1, view(a, :, 1:5), zeros(T, size(a))) == a - b = view(a, 25:-2:1) - @test axpby!(1, b, 1, zeros(T, size(b))) == b - @test axpy!(1, b, zeros(T, size(b))) == b - end -end - -@testset "norm and normalize!" begin - vr = [3.0, 4.0] - for Tr in (Float32, Float64) - for T in (Tr, Complex{Tr}) - v = convert(Vector{T}, vr) - @test norm(v) == 5.0 - w = normalize(v) - @test norm(w - [0.6, 0.8], Inf) < eps(Tr) - @test norm(w) == 1.0 - @test norm(normalize!(copy(v)) - w, Inf) < eps(Tr) - @test isempty(normalize!(T[])) - end - end -end - -@testset "normalize for multidimensional arrays" begin - - for arr in ( - fill(10.0, ()), # 0 dim - [1.0], # 1 dim - [1.0 2.0 3.0; 4.0 5.0 6.0], # 2-dim - rand(1,2,3), # higher dims - rand(1,2,3,4), - Dual.(randn(2,3), randn(2,3)), - OffsetArray([-1,0], (-2,)) # no index 1 - ) - @test normalize(arr) == normalize!(copy(arr)) - @test size(normalize(arr)) == size(arr) - @test axes(normalize(arr)) == axes(arr) - @test vec(normalize(arr)) == normalize(vec(arr)) - end - - @test typeof(normalize([1 2 3; 4 5 6])) == Array{Float64,2} -end - -@testset "normalize for scalars" begin - @test normalize(8.0) == 1.0 - @test normalize(-3.0) == -1.0 - @test normalize(-3.0, 1) == -1.0 - @test isnan(normalize(0.0)) -end - -@testset "Issue #30466" begin - @test norm([typemin(Int), typemin(Int)], Inf) == -float(typemin(Int)) - @test norm([typemin(Int), typemin(Int)], 1) == -2float(typemin(Int)) -end - -@testset "potential overflow in normalize!" begin - δ = inv(prevfloat(typemax(Float64))) - v = [δ, -δ] - - @test norm(v) === 7.866824069956793e-309 - w = normalize(v) - @test w ≈ [1/√2, -1/√2] - @test norm(w) === 1.0 - @test norm(normalize!(v) - w, Inf) < eps() -end - -@testset "normalize with Infs. Issue 29681." begin - @test all(isequal.(normalize([1, -1, Inf]), - [0.0, -0.0, NaN])) - @test all(isequal.(normalize([complex(1), complex(0, -1), complex(Inf, -Inf)]), - [0.0 + 0.0im, 0.0 - 0.0im, NaN + NaN*im])) -end - -@testset "Issue 14657" begin - @test det([true false; false true]) == det(Matrix(1I, 2, 2)) -end - -@test_throws ArgumentError LinearAlgebra.char_uplo(:Z) - -@testset "Issue 17650" begin - @test [0.01311489462160816, Inf] ≈ [0.013114894621608135, Inf] -end - -@testset "Issue 19035" begin - @test LinearAlgebra.promote_leaf_eltypes([1, 2, [3.0, 4.0]]) == Float64 - @test LinearAlgebra.promote_leaf_eltypes([[1,2, [3,4]], 5.0, [6im, [7.0, 8.0]]]) == ComplexF64 - @test [1, 2, 3] ≈ [1, 2, 3] - @test [[1, 2], [3, 4]] ≈ [[1, 2], [3, 4]] - @test [[1, 2], [3, 4]] ≈ [[1.0-eps(), 2.0+eps()], [3.0+2eps(), 4.0-1e8eps()]] - @test [[1, 2], [3, 4]] ≉ [[1.0-eps(), 2.0+eps()], [3.0+2eps(), 4.0-1e9eps()]] - @test [[1,2, [3,4]], 5.0, [6im, [7.0, 8.0]]] ≈ [[1,2, [3,4]], 5.0, [6im, [7.0, 8.0]]] -end - -@testset "Issue 40128" begin - @test det(BigInt[9 1 8 0; 0 0 8 7; 7 6 8 3; 2 9 7 7])::BigInt == -1 - @test det(BigInt[1 big(2)^65+1; 3 4])::BigInt == (4 - 3*(big(2)^65+1)) -end - -# Minimal modulo number type - but not subtyping Number -struct ModInt{n} - k - ModInt{n}(k) where {n} = new(mod(k,n)) - ModInt{n}(k::ModInt{n}) where {n} = k -end -Base.:+(a::ModInt{n}, b::ModInt{n}) where {n} = ModInt{n}(a.k + b.k) -Base.:-(a::ModInt{n}, b::ModInt{n}) where {n} = ModInt{n}(a.k - b.k) -Base.:*(a::ModInt{n}, b::ModInt{n}) where {n} = ModInt{n}(a.k * b.k) -Base.:-(a::ModInt{n}) where {n} = ModInt{n}(-a.k) -Base.inv(a::ModInt{n}) where {n} = ModInt{n}(invmod(a.k, n)) -Base.:/(a::ModInt{n}, b::ModInt{n}) where {n} = a*inv(b) - -Base.isfinite(a::ModInt{n}) where {n} = isfinite(a.k) -Base.zero(::Type{ModInt{n}}) where {n} = ModInt{n}(0) -Base.zero(::ModInt{n}) where {n} = ModInt{n}(0) -Base.one(::Type{ModInt{n}}) where {n} = ModInt{n}(1) -Base.one(::ModInt{n}) where {n} = ModInt{n}(1) -Base.conj(a::ModInt{n}) where {n} = a -LinearAlgebra.lupivottype(::Type{ModInt{n}}) where {n} = RowNonZero() -Base.adjoint(a::ModInt{n}) where {n} = ModInt{n}(conj(a)) -Base.transpose(a::ModInt{n}) where {n} = a # see Issue 20978 -LinearAlgebra.Adjoint(a::ModInt{n}) where {n} = adjoint(a) -LinearAlgebra.Transpose(a::ModInt{n}) where {n} = transpose(a) - -@testset "Issue 22042" begin - A = [ModInt{2}(1) ModInt{2}(0); ModInt{2}(1) ModInt{2}(1)] - b = [ModInt{2}(1), ModInt{2}(0)] - - @test A*(A\b) == b - @test A*(lu(A)\b) == b - @test A*(lu(A, NoPivot())\b) == b - @test A*(lu(A, RowNonZero())\b) == b - @test_throws MethodError lu(A, RowMaximum()) - - # Needed for pivoting: - Base.abs(a::ModInt{n}) where {n} = a - Base.:<(a::ModInt{n}, b::ModInt{n}) where {n} = a.k < b.k - @test A*(lu(A, RowMaximum())\b) == b - - A = [ModInt{2}(0) ModInt{2}(1); ModInt{2}(1) ModInt{2}(1)] - @test A*(A\b) == b - @test A*(lu(A)\b) == b - @test A*(lu(A, RowMaximum())\b) == b - @test A*(lu(A, RowNonZero())\b) == b -end - -@testset "Issue 18742" begin - @test_throws DimensionMismatch ones(4,5)/zeros(3,6) - @test_throws DimensionMismatch ones(4,5)\zeros(3,6) -end -@testset "fallback throws properly for AbstractArrays with dimension > 2" begin - @test_throws ErrorException adjoint(rand(2,2,2,2)) - @test_throws ErrorException transpose(rand(2,2,2,2)) -end - -@testset "generic functions for checking whether matrices have banded structure" begin - pentadiag = [1 2 3; 4 5 6; 7 8 9] - tridiag = [1 2 0; 4 5 6; 0 8 9] - tridiagG = GenericArray([1 2 0; 4 5 6; 0 8 9]) - Tridiag = Tridiagonal(tridiag) - ubidiag = [1 2 0; 0 5 6; 0 0 9] - ubidiagG = GenericArray([1 2 0; 0 5 6; 0 0 9]) - uBidiag = Bidiagonal(ubidiag, :U) - lbidiag = [1 0 0; 4 5 0; 0 8 9] - lbidiagG = GenericArray([1 0 0; 4 5 0; 0 8 9]) - lBidiag = Bidiagonal(lbidiag, :L) - adiag = [1 0 0; 0 5 0; 0 0 9] - adiagG = GenericArray([1 0 0; 0 5 0; 0 0 9]) - aDiag = Diagonal(adiag) - @testset "istriu" begin - @test !istriu(pentadiag) - @test istriu(pentadiag, -2) - @test !istriu(tridiag) - @test istriu(tridiag) == istriu(tridiagG) == istriu(Tridiag) - @test istriu(tridiag, -1) - @test istriu(tridiag, -1) == istriu(tridiagG, -1) == istriu(Tridiag, -1) - @test istriu(ubidiag) - @test istriu(ubidiag) == istriu(ubidiagG) == istriu(uBidiag) - @test !istriu(ubidiag, 1) - @test istriu(ubidiag, 1) == istriu(ubidiagG, 1) == istriu(uBidiag, 1) - @test !istriu(lbidiag) - @test istriu(lbidiag) == istriu(lbidiagG) == istriu(lBidiag) - @test istriu(lbidiag, -1) - @test istriu(lbidiag, -1) == istriu(lbidiagG, -1) == istriu(lBidiag, -1) - @test istriu(adiag) - @test istriu(adiag) == istriu(adiagG) == istriu(aDiag) - end - @testset "istril" begin - @test !istril(pentadiag) - @test istril(pentadiag, 2) - @test !istril(tridiag) - @test istril(tridiag) == istril(tridiagG) == istril(Tridiag) - @test istril(tridiag, 1) - @test istril(tridiag, 1) == istril(tridiagG, 1) == istril(Tridiag, 1) - @test !istril(ubidiag) - @test istril(ubidiag) == istril(ubidiagG) == istril(ubidiagG) - @test istril(ubidiag, 1) - @test istril(ubidiag, 1) == istril(ubidiagG, 1) == istril(uBidiag, 1) - @test istril(lbidiag) - @test istril(lbidiag) == istril(lbidiagG) == istril(lBidiag) - @test !istril(lbidiag, -1) - @test istril(lbidiag, -1) == istril(lbidiagG, -1) == istril(lBidiag, -1) - @test istril(adiag) - @test istril(adiag) == istril(adiagG) == istril(aDiag) - end - @testset "isbanded" begin - @test isbanded(pentadiag, -2, 2) - @test !isbanded(pentadiag, -1, 2) - @test !isbanded(pentadiag, -2, 1) - @test isbanded(tridiag, -1, 1) - @test isbanded(tridiag, -1, 1) == isbanded(tridiagG, -1, 1) == isbanded(Tridiag, -1, 1) - @test !isbanded(tridiag, 0, 1) - @test isbanded(tridiag, 0, 1) == isbanded(tridiagG, 0, 1) == isbanded(Tridiag, 0, 1) - @test !isbanded(tridiag, -1, 0) - @test isbanded(tridiag, -1, 0) == isbanded(tridiagG, -1, 0) == isbanded(Tridiag, -1, 0) - @test isbanded(ubidiag, 0, 1) - @test isbanded(ubidiag, 0, 1) == isbanded(ubidiagG, 0, 1) == isbanded(uBidiag, 0, 1) - @test !isbanded(ubidiag, 1, 1) - @test isbanded(ubidiag, 1, 1) == isbanded(ubidiagG, 1, 1) == isbanded(uBidiag, 1, 1) - @test !isbanded(ubidiag, 0, 0) - @test isbanded(ubidiag, 0, 0) == isbanded(ubidiagG, 0, 0) == isbanded(uBidiag, 0, 0) - @test isbanded(lbidiag, -1, 0) - @test isbanded(lbidiag, -1, 0) == isbanded(lbidiagG, -1, 0) == isbanded(lBidiag, -1, 0) - @test !isbanded(lbidiag, 0, 0) - @test isbanded(lbidiag, 0, 0) == isbanded(lbidiagG, 0, 0) == isbanded(lBidiag, 0, 0) - @test !isbanded(lbidiag, -1, -1) - @test isbanded(lbidiag, -1, -1) == isbanded(lbidiagG, -1, -1) == isbanded(lBidiag, -1, -1) - @test isbanded(adiag, 0, 0) - @test isbanded(adiag, 0, 0) == isbanded(adiagG, 0, 0) == isbanded(aDiag, 0, 0) - @test !isbanded(adiag, -1, -1) - @test isbanded(adiag, -1, -1) == isbanded(adiagG, -1, -1) == isbanded(aDiag, -1, -1) - @test !isbanded(adiag, 1, 1) - @test isbanded(adiag, 1, 1) == isbanded(adiagG, 1, 1) == isbanded(aDiag, 1, 1) - end - @testset "isdiag" begin - @test !isdiag(tridiag) - @test isdiag(tridiag) == isdiag(tridiagG) == isdiag(Tridiag) - @test !isdiag(ubidiag) - @test isdiag(ubidiag) == isdiag(ubidiagG) == isdiag(uBidiag) - @test !isdiag(lbidiag) - @test isdiag(lbidiag) == isdiag(lbidiagG) == isdiag(lBidiag) - @test isdiag(adiag) - @test isdiag(adiag) ==isdiag(adiagG) == isdiag(aDiag) - end -end - -@testset "isbanded/istril/istriu with rectangular matrices" begin - @testset "$(size(A))" for A in [zeros(0,4), zeros(2,5), zeros(5,2), zeros(4,0)] - @testset for m in -(size(A,1)-1):(size(A,2)-1) - A .= 0 - A[diagind(A, m)] .= 1 - G = GenericArray(A) - @testset for (kl,ku) in Iterators.product(-6:6, -6:6) - @test isbanded(A, kl, ku) == isbanded(G, kl, ku) == isempty(A) || (m in (kl:ku)) - end - @testset for k in -6:6 - @test istriu(A,k) == istriu(G,k) == isempty(A) || (k <= m) - @test istril(A,k) == istril(G,k) == isempty(A) || (k >= m) - end - end - end -end - -@testset "missing values" begin - @test ismissing(norm(missing)) - x = [5, 6, missing] - y = [missing, 5, 6] - for p in (-Inf, -1, 1, 2, 3, Inf) - @test ismissing(norm(x, p)) - @test ismissing(norm(y, p)) - end - @test_broken ismissing(norm(x, 0)) -end - -@testset "avoid stackoverflow of norm on AbstractChar" begin - @test_throws ArgumentError norm('a') - @test_throws ArgumentError norm(['a', 'b']) - @test_throws ArgumentError norm("s") - @test_throws ArgumentError norm(["s", "t"]) -end - -@testset "peakflops" begin - @test LinearAlgebra.peakflops(1024, eltype=Float32, ntrials=2) > 0 -end - -@testset "NaN handling: Issue 28972" begin - @test all(isnan, rmul!([NaN], 0.0)) - @test all(isnan, rmul!(Any[NaN], 0.0)) - @test all(isnan, lmul!(0.0, [NaN])) - @test all(isnan, lmul!(0.0, Any[NaN])) - - @test all(!isnan, rmul!([NaN], false)) - @test all(!isnan, rmul!(Any[NaN], false)) - @test all(!isnan, lmul!(false, [NaN])) - @test all(!isnan, lmul!(false, Any[NaN])) -end - -@testset "adjtrans dot" begin - for t in (transpose, adjoint), T in (ComplexF64, Quaternion{Float64}) - x, y = t(rand(T, 10)), t(rand(T, 10)) - X, Y = copy(x), copy(y) - @test dot(x, y) ≈ dot(X, Y) - x, y = t([rand(T, 2, 2) for _ in 1:5]), t([rand(T, 2, 2) for _ in 1:5]) - X, Y = copy(x), copy(y) - @test dot(x, y) ≈ dot(X, Y) - x, y = t(rand(T, 10, 5)), t(rand(T, 10, 5)) - X, Y = copy(x), copy(y) - @test dot(x, y) ≈ dot(X, Y) - x = t([rand(T, 2, 2) for _ in 1:5, _ in 1:5]) - y = t([rand(T, 2, 2) for _ in 1:5, _ in 1:5]) - X, Y = copy(x), copy(y) - @test dot(x, y) ≈ dot(X, Y) - x, y = t([rand(T, 2, 2) for _ in 1:5]), t([rand(T, 2, 2) for _ in 1:5]) - end -end - -@testset "avoid stackoverflow in dot" begin - @test_throws "cannot evaluate dot recursively" dot('a', 'c') - @test_throws "cannot evaluate dot recursively" dot('a', 'b':'c') - @test_throws "x and y are of different lengths" dot(1, 1:2) -end - -@testset "generalized dot #32739" begin - for elty in (Int, Float32, Float64, BigFloat, ComplexF32, ComplexF64, Complex{BigFloat}) - n = 10 - if elty <: Int - A = rand(-n:n, n, n) - x = rand(-n:n, n) - y = rand(-n:n, n) - elseif elty <: Real - A = convert(Matrix{elty}, randn(n,n)) - x = rand(elty, n) - y = rand(elty, n) - else - A = convert(Matrix{elty}, complex.(randn(n,n), randn(n,n))) - x = rand(elty, n) - y = rand(elty, n) - end - @test dot(x, A, y) ≈ dot(A'x, y) ≈ *(x', A, y) ≈ (x'A)*y - @test dot(x, A', y) ≈ dot(A*x, y) ≈ *(x', A', y) ≈ (x'A')*y - elty <: Real && @test dot(x, transpose(A), y) ≈ dot(x, transpose(A)*y) ≈ *(x', transpose(A), y) ≈ (x'*transpose(A))*y - B = reshape([A], 1, 1) - x = [x] - y = [y] - @test dot(x, B, y) ≈ dot(B'x, y) - @test dot(x, B', y) ≈ dot(B*x, y) - elty <: Real && @test dot(x, transpose(B), y) ≈ dot(x, transpose(B)*y) - end -end - -@testset "condskeel #34512" begin - A = rand(3, 3) - @test condskeel(A) ≈ condskeel(A, [8,8,8]) -end - -@testset "copytrito!" begin - n = 10 - @testset "square" begin - for A in (rand(n, n), rand(Int8, n, n)), uplo in ('L', 'U') - for AA in (A, view(A, reverse.(axes(A))...)) - C = uplo == 'L' ? tril(AA) : triu(AA) - for B in (zeros(n, n), zeros(n+1, n+2)) - copytrito!(B, AA, uplo) - @test view(B, 1:n, 1:n) == C - end - end - end - end - @testset "wide" begin - for A in (rand(n, 2n), rand(Int8, n, 2n)) - for AA in (A, view(A, reverse.(axes(A))...)) - C = tril(AA) - for (M, N) in ((n, n), (n+1, n), (n, n+1), (n+1, n+1)) - B = zeros(M, N) - copytrito!(B, AA, 'L') - @test view(B, 1:n, 1:n) == view(C, 1:n, 1:n) - end - @test_throws DimensionMismatch copytrito!(zeros(n-1, 2n), AA, 'L') - C = triu(AA) - for (M, N) in ((n, 2n), (n+1, 2n), (n, 2n+1), (n+1, 2n+1)) - B = zeros(M, N) - copytrito!(B, AA, 'U') - @test view(B, 1:n, 1:2n) == view(C, 1:n, 1:2n) - end - @test_throws DimensionMismatch copytrito!(zeros(n+1, 2n-1), AA, 'U') - end - end - end - @testset "tall" begin - for A in (rand(2n, n), rand(Int8, 2n, n)) - for AA in (A, view(A, reverse.(axes(A))...)) - C = triu(AA) - for (M, N) in ((n, n), (n+1, n), (n, n+1), (n+1, n+1)) - B = zeros(M, N) - copytrito!(B, AA, 'U') - @test view(B, 1:n, 1:n) == view(C, 1:n, 1:n) - end - @test_throws DimensionMismatch copytrito!(zeros(n-1, n+1), AA, 'U') - C = tril(AA) - for (M, N) in ((2n, n), (2n, n+1), (2n+1, n), (2n+1, n+1)) - B = zeros(M, N) - copytrito!(B, AA, 'L') - @test view(B, 1:2n, 1:n) == view(C, 1:2n, 1:n) - end - @test_throws DimensionMismatch copytrito!(zeros(n-1, n+1), AA, 'L') - end - end - end - @testset "aliasing" begin - M = Matrix(reshape(1:36, 6, 6)) - A = view(M, 1:5, 1:5) - A2 = Matrix(A) - B = view(M, 2:6, 2:6) - copytrito!(B, A, 'U') - @test UpperTriangular(B) == UpperTriangular(A2) - end -end - -@testset "immutable arrays" begin - A = FillArrays.Fill(big(3), (4, 4)) - M = Array(A) - @test triu(A) == triu(M) - @test triu(A, -1) == triu(M, -1) - @test tril(A) == tril(M) - @test tril(A, 1) == tril(M, 1) - @test det(A) == det(M) -end - -@testset "tril/triu" begin - @testset "with partly initialized matrices" begin - function test_triu(M, k=nothing) - M[1,1] = M[2,2] = M[1,2] = M[1,3] = M[2,3] = 3 - if isnothing(k) - MU = triu(M) - else - MU = triu(M, k) - end - @test iszero(MU[2,1]) - @test MU[1,1] == MU[2,2] == MU[1,2] == MU[1,3] == MU[2,3] == 3 - end - test_triu(Matrix{BigInt}(undef, 2, 3)) - test_triu(Matrix{BigInt}(undef, 2, 3), 0) - test_triu(SizedArrays.SizedArray{(2,3)}(Matrix{BigInt}(undef, 2, 3))) - test_triu(SizedArrays.SizedArray{(2,3)}(Matrix{BigInt}(undef, 2, 3)), 0) - - function test_tril(M, k=nothing) - M[1,1] = M[2,2] = M[2,1] = 3 - if isnothing(k) - ML = tril(M) - else - ML = tril(M, k) - end - @test ML[1,2] == ML[1,3] == ML[2,3] == 0 - @test ML[1,1] == ML[2,2] == ML[2,1] == 3 - end - test_tril(Matrix{BigInt}(undef, 2, 3)) - test_tril(Matrix{BigInt}(undef, 2, 3), 0) - test_tril(SizedArrays.SizedArray{(2,3)}(Matrix{BigInt}(undef, 2, 3))) - test_tril(SizedArrays.SizedArray{(2,3)}(Matrix{BigInt}(undef, 2, 3)), 0) - end - - @testset "block arrays" begin - for nrows in 0:3, ncols in 0:3 - M = [randn(2,2) for _ in 1:nrows, _ in 1:ncols] - Mu = triu(M) - for col in axes(M,2) - rowcutoff = min(col, size(M,1)) - @test @views Mu[1:rowcutoff, col] == M[1:rowcutoff, col] - @test @views Mu[rowcutoff+1:end, col] == zero.(M[rowcutoff+1:end, col]) - end - Ml = tril(M) - for col in axes(M,2) - @test @views Ml[col:end, col] == M[col:end, col] - rowcutoff = min(col-1, size(M,1)) - @test @views Ml[1:rowcutoff, col] == zero.(M[1:rowcutoff, col]) - end - end - end -end - -end # module TestGeneric diff --git a/stdlib/LinearAlgebra/test/givens.jl b/stdlib/LinearAlgebra/test/givens.jl deleted file mode 100644 index 62d677cf086ad..0000000000000 --- a/stdlib/LinearAlgebra/test/givens.jl +++ /dev/null @@ -1,124 +0,0 @@ -# This file is a part of Julia. License is MIT: https://julialang.org/license - -module TestGivens - -using Test, LinearAlgebra, Random -using LinearAlgebra: Givens, Rotation, givensAlgorithm - -# Test givens rotations -@testset "Test Givens for $elty" for elty in (Float32, Float64, ComplexF32, ComplexF64) - if elty <: Real - raw_A = convert(Matrix{elty}, randn(10,10)) - else - raw_A = convert(Matrix{elty}, complex.(randn(10,10),randn(10,10))) - end - @testset for A in (raw_A, view(raw_A, 1:10, 1:10)) - Ac = copy(A) - R = Rotation(Givens{elty}[]) - T = Rotation(Givens{elty}[]) - for j = 1:8 - for i = j+2:10 - G, _ = givens(A, j+1, i, j) - lmul!(G, A) - rmul!(A, adjoint(G)) - lmul!(G, R) - rmul!(T, G) - - @test lmul!(G, Matrix{elty}(I, 10, 10)) == [G[i,j] for i=1:10,j=1:10] - - @testset "transposes" begin - @test (@inferred G'*G)*Matrix(elty(1)I, 10, 10) ≈ Matrix(I, 10, 10) - @test (G*Matrix(elty(1)I, 10, 10))*G' ≈ Matrix(I, 10, 10) - @test (@inferred copy(R'))*(R*Matrix(elty(1)I, 10, 10)) ≈ Matrix(I, 10, 10) - @test_throws ErrorException transpose(G) - @test_throws ErrorException transpose(R) - end - end - end - @test (R')' === R - # test products of Givens and Rotations - for r in (R, T, *(R.rotations...), *(R.rotations[1], *(R.rotations[2:end]...))) - @test r * A ≈ (A' * r')' ≈ lmul!(r, copy(A)) - @test A * r ≈ (r' * A')' ≈ rmul!(copy(A), r) - @test r' * A ≈ lmul!(r', copy(A)) - @test A * r' ≈ rmul!(copy(A), r') - end - @test_throws ArgumentError givens(A, 3, 3, 2) - @test_throws ArgumentError givens(one(elty),zero(elty),2,2) - G, _ = givens(one(elty),zero(elty),11,12) - @test_throws DimensionMismatch lmul!(G, A) - @test_throws DimensionMismatch rmul!(A, adjoint(G)) - @test abs.(A) ≈ abs.(hessenberg(Ac).H) - @test opnorm(R*Matrix{elty}(I, 10, 10)) ≈ one(elty) - - I10 = Matrix{elty}(I, 10, 10) - G, _ = givens(one(elty),zero(elty),9,10) - @test (G*I10)' * (G*I10) ≈ I10 - K, _ = givens(zero(elty),one(elty),9,10) - @test (K*I10)' * (K*I10) ≈ I10 - end - - @testset "Givens * vectors" begin - for x in (raw_A[:,1], view(raw_A, :, 1)) - G, r = @inferred givens(x[2], x[4], 2, 4) - @test (G*x)[2] ≈ r - @test abs((G*x)[4]) < eps(real(elty)) - - G, r = @inferred givens(x, 2, 4) - @test (G*x)[2] ≈ r - @test abs((G*x)[4]) < eps(real(elty)) - - G, r = givens(x, 4, 2) - @test (G*x)[4] ≈ r - @test abs((G*x)[2]) < eps(real(elty)) - end - d = rand(4) - l = d[1] - g2, l = givens(l, d[2], 1, 2) - g3, l = givens(l, d[3], 1, 3) - g4, l = givens(l, d[4], 1, 4) - @test g2*(g3*d) ≈ g2*g3*d ≈ (g2*g3)*d - @test g2*g3*g4 isa Rotation - end -end - -# 36430 -# dimensional correctness: -const BASE_TEST_PATH = joinpath(Sys.BINDIR, "..", "share", "julia", "test") -isdefined(Main, :Furlongs) || @eval Main include(joinpath($(BASE_TEST_PATH), "testhelpers", "Furlongs.jl")) -using .Main.Furlongs - -@testset "testing dimensions with Furlongs" begin - @test_throws MethodError givens(Furlong(1.0), Furlong(2.0), 1, 2) -end - -const TNumber = Union{Float64,ComplexF64} -struct MockUnitful{T<:TNumber} <: Number - data::T - MockUnitful(data::T) where T<:TNumber = new{T}(data) -end -import Base: *, /, one, oneunit -*(a::MockUnitful{T}, b::T) where T<:TNumber = MockUnitful(a.data * b) -*(a::T, b::MockUnitful{T}) where T<:TNumber = MockUnitful(a * b.data) -*(a::MockUnitful{T}, b::MockUnitful{T}) where T<:TNumber = MockUnitful(a.data * b.data) -/(a::MockUnitful{T}, b::MockUnitful{T}) where T<:TNumber = a.data / b.data -one(::Type{<:MockUnitful{T}}) where T = one(T) -oneunit(::Type{<:MockUnitful{T}}) where T = MockUnitful(one(T)) - -@testset "unitful givens rotation unitful $T " for T in (Float64, ComplexF64) - g, r = givens(MockUnitful(T(3)), MockUnitful(T(4)), 1, 2) - @test g.c ≈ 3/5 - @test g.s ≈ 4/5 - @test r.data ≈ 5.0 -end - -# 51554 -# avoid infinite loop on Inf inputs -@testset "givensAlgorithm - Inf inputs" for T in (Float64, ComplexF64) - cs, sn, r = givensAlgorithm(T(Inf), T(1.0)) - @test !isfinite(r) - cs, sn, r = givensAlgorithm(T(1.0), T(Inf)) - @test !isfinite(r) -end - -end # module TestGivens diff --git a/stdlib/LinearAlgebra/test/hessenberg.jl b/stdlib/LinearAlgebra/test/hessenberg.jl deleted file mode 100644 index de58fea9fb27e..0000000000000 --- a/stdlib/LinearAlgebra/test/hessenberg.jl +++ /dev/null @@ -1,308 +0,0 @@ -# This file is a part of Julia. License is MIT: https://julialang.org/license - -module TestHessenberg - -using Test, LinearAlgebra, Random - -const BASE_TEST_PATH = joinpath(Sys.BINDIR, "..", "share", "julia", "test") -isdefined(Main, :Furlongs) || @eval Main include(joinpath($(BASE_TEST_PATH), "testhelpers", "Furlongs.jl")) -using .Main.Furlongs - -isdefined(Main, :SizedArrays) || @eval Main include(joinpath($(BASE_TEST_PATH), "testhelpers", "SizedArrays.jl")) -using .Main.SizedArrays - -# for tuple tests below -≅(x,y) = all(p -> p[1] ≈ p[2], zip(x,y)) - -let n = 10 - Random.seed!(1234321) - - Areal = randn(n,n)/2 - Aimg = randn(n,n)/2 - b_ = randn(n) - B_ = randn(n,3) - - # UpperHessenberg methods not covered by the tests below - @testset "UpperHessenberg" begin - A = Areal - H = UpperHessenberg(A) - AH = triu(A,-1) - for k in -2:2 - @test istril(H, k) == istril(AH, k) - @test istriu(H, k) == istriu(AH, k) - @test (k <= -1 ? istriu(H, k) : !istriu(H, k)) - end - @test UpperHessenberg(H) === H - @test parent(H) === A - @test Matrix(H) == Array(H) == H == AH - @test real(H) == real(AH) - @test real(UpperHessenberg{ComplexF64}(A)) == H - @test real(UpperHessenberg{ComplexF64}(H)) == H - sim = similar(H, ComplexF64) - @test sim isa UpperHessenberg{ComplexF64} - @test size(sim) == size(H) - for x in (2,2+3im) - @test x*H == H*x == x*AH - for op in (+,-) - @test op(H,x*I) == op(AH,x*I) == op(op(x*I,H)) - @test op(H,x*I)*x == op(AH,x*I)*x == x*op(H,x*I) - end - end - @test [H[i,j] for i=1:size(H,1), j=1:size(H,2)] == triu(A,-1) - H1 = LinearAlgebra.fillstored!(copy(H), 1) - @test H1 == triu(fill(1, n,n), -1) - @test tril(H1.data,-2) == tril(H.data,-2) - A2, H2 = copy(A), copy(H) - A2[1:4,3]=H2[1:4,3]=1:4 - H2[5,3]=0 - @test H2 == triu(A2,-1) - @test_throws ArgumentError H[5,3]=1 - Hc = UpperHessenberg(Areal + im .* Aimg) - AHc = triu(Areal + im .* Aimg,-1) - @test real(Hc) == real(AHc) - @test imag(Hc) == imag(AHc) - @test Array(copy(adjoint(Hc))) == adjoint(Array(Hc)) - @test Array(copy(transpose(Hc))) == transpose(Array(Hc)) - @test rmul!(copy(Hc), 2.0) == lmul!(2.0, copy(Hc)) - H = UpperHessenberg(Areal) - @test Array(Hc + H) == Array(Hc) + Array(H) - @test Array(Hc - H) == Array(Hc) - Array(H) - @testset "Preserve UpperHessenberg shape (issue #39388)" begin - for H = (UpperHessenberg(Areal), UpperHessenberg(Furlong.(Areal))) - if eltype(H) <: Furlong - A = Furlong.(rand(n,n)) - d = Furlong.(rand(n)) - dl = Furlong.(rand(n-1)) - du = Furlong.(rand(n-1)) - us = Furlong(1)*I - else - A = rand(n,n) - d = rand(n) - dl = rand(n-1) - du = rand(n-1) - us = 1*I - end - @testset "$op" for op = (+,-) - for x = (us, Diagonal(d), Bidiagonal(d,dl,:U), Bidiagonal(d,dl,:L), - Tridiagonal(dl,d,du), SymTridiagonal(d,dl), - UpperTriangular(A), UnitUpperTriangular(A)) - @test op(H,x) == op(Array(H),x) - @test op(x,H) == op(x,Array(H)) - @test op(H,x) isa UpperHessenberg - @test op(x,H) isa UpperHessenberg - end - end - end - H = UpperHessenberg(Areal) - A = randn(n,n) - d = randn(n) - dl = randn(n-1) - @testset "Multiplication/division" begin - for x = (5, 5I, Diagonal(d), Bidiagonal(d,dl,:U), - UpperTriangular(A), UnitUpperTriangular(A)) - @test (H*x)::UpperHessenberg ≈ Array(H)*x - @test (x*H)::UpperHessenberg ≈ x*Array(H) - @test H/x ≈ Array(H)/x# broken = eltype(H) <: Furlong && x isa UpperTriangular - @test x\H ≈ x\Array(H)# broken = eltype(H) <: Furlong && x isa UpperTriangular - @test H/x isa UpperHessenberg - @test x\H isa UpperHessenberg - end - x = Bidiagonal(d, dl, :L) - @test H*x == Array(H)*x - @test x*H == x*Array(H) - @test H/x == Array(H)/x - @test x\H == x\Array(H) - end - H = UpperHessenberg(Furlong.(Areal)) - for A in (A, Furlong.(A)) - @testset "Multiplication/division Furlong" begin - for x = (5, 5I, Diagonal(d), Bidiagonal(d,dl,:U), - UpperTriangular(A), UnitUpperTriangular(A)) - @test map(x -> x.val, (H*x)::UpperHessenberg) ≈ map(x -> x.val, Array(H)*x) - @test map(x -> x.val, (x*H)::UpperHessenberg) ≈ map(x -> x.val, x*Array(H)) - @test map(x -> x.val, (H/x)::UpperHessenberg) ≈ map(x -> x.val, Array(H)/x) - @test map(x -> x.val, (x\H)::UpperHessenberg) ≈ map(x -> x.val, x\Array(H)) - end - x = Bidiagonal(d, dl, :L) - @test H*x == Array(H)*x - @test x*H == x*Array(H) - @test H/x == Array(H)/x - @test x\H == x\Array(H) - end - end - end - end - - @testset for eltya in (Float32, Float64, ComplexF32, ComplexF64, Int), herm in (false, true) - A_ = eltya == Int ? - rand(1:7, n, n) : - convert(Matrix{eltya}, eltya <: Complex ? - complex.(Areal, Aimg) : - Areal) - A = herm ? Hermitian(A_ + A_') : A_ - - H = hessenberg(A) - @test Hessenberg(H) === H - eltyh = eltype(H) - @test size(H.Q, 1) == size(A, 1) - @test size(H.Q, 2) == size(A, 2) - @test size(H.Q) == size(A) - @test size(H) == size(A) - @test_throws FieldError H.Z - @test convert(Array, H) ≈ A - @test (H.Q * H.H) * H.Q' ≈ A ≈ (Matrix(H.Q) * Matrix(H.H)) * Matrix(H.Q)' - @test (H.Q' * A) * H.Q ≈ H.H - #getindex for HessenbergQ - @test H.Q[1,1] ≈ Array(H.Q)[1,1] - @test det(H.Q) ≈ det(Matrix(H.Q)) - @test logabsdet(H.Q)[1] ≈ logabsdet(Matrix(H.Q))[1] atol=2n*eps(float(real(eltya))) - - # REPL show - hessstring = sprint((t, s) -> show(t, "text/plain", s), H) - qstring = sprint((t, s) -> show(t, "text/plain", s), H.Q) - hstring = sprint((t, s) -> show(t, "text/plain", s), H.H) - @test hessstring == "$(summary(H))\nQ factor: $qstring\nH factor:\n$hstring" - - #iterate - q,h = H - @test q == H.Q - @test h == H.H - - @test convert(Array, 2 * H) ≈ 2 * A ≈ convert(Array, H * 2) - @test convert(Array, H + 2I) ≈ A + 2I ≈ convert(Array, 2I + H) - @test convert(Array, H + (2+4im)I) ≈ A + (2+4im)I ≈ convert(Array, (2+4im)I + H) - @test convert(Array, H - 2I) ≈ A - 2I ≈ -convert(Array, 2I - H) - @test convert(Array, -H) == -convert(Array, H) - @test convert(Array, 2*(H + (2+4im)I)) ≈ 2A + (4+8im)I - - b = convert(Vector{eltype(H)}, b_) - B = convert(Matrix{eltype(H)}, B_) - @test H \ b ≈ A \ b ≈ H \ complex(b) - @test H \ B ≈ A \ B ≈ H \ complex(B) - @test (H - I) \ B ≈ (A - I) \ B - @test (H - (3+4im)I) \ B ≈ (A - (3+4im)I) \ B - @test b' / H ≈ b' / A ≈ complex(b') / H - @test transpose(b) / H ≈ transpose(b) / A ≈ transpose(complex(b)) / H - @test B' / H ≈ B' / A ≈ complex(B') / H - @test b' / H' ≈ complex(b)' / H' - @test B' / (H - I) ≈ B' / (A - I) - @test B' / (H - (3+4im)I) ≈ B' / (A - (3+4im)I) - @test (H - (3+4im)I)' \ B ≈ (A - (3+4im)I)' \ B - @test B' / (H - (3+4im)I)' ≈ B' / (A - (3+4im)I)' - - for shift in (0,1,3+4im) - @test det(H + shift*I) ≈ det(A + shift*I) - @test logabsdet(H + shift*I) ≅ logabsdet(A + shift*I) - end - - HM = Matrix(h) - @test dot(b, h, b) ≈ dot(h'b, b) ≈ dot(b, HM, b) ≈ dot(HM'b, b) - c = b .+ 1 - @test dot(b, h, c) ≈ dot(h'b, c) ≈ dot(b, HM, c) ≈ dot(HM'b, c) - end -end - -@testset "Reverse operation on UpperHessenberg" begin - A = UpperHessenberg(randn(5, 5)) - @test reverse(A, dims=1) == reverse(Matrix(A), dims=1) - @test reverse(A, dims=2) == reverse(Matrix(A), dims=2) - @test reverse(A) == reverse(Matrix(A)) -end - -@testset "hessenberg(::AbstractMatrix)" begin - n = 10 - A = Tridiagonal(rand(n-1), rand(n), rand(n-1)) - H = hessenberg(A) - @test convert(Array, H) ≈ A -end - -# check logdet on a matrix that has a positive determinant -let A = [0.5 0.1 0.9 0.4; 0.9 0.7 0.5 0.4; 0.3 0.4 0.9 0.0; 0.4 0.0 0.0 0.5] - @test logdet(hessenberg(A)) ≈ logdet(A) ≈ -3.5065578973199822 -end - -@testset "Base.propertynames" begin - F = hessenberg([4. 9. 7.; 4. 4. 1.; 4. 3. 2.]) - @test Base.propertynames(F) == (:Q, :H, :μ) - @test Base.propertynames(F, true) == (:Q, :H, :μ, :τ, :factors, :uplo) -end - -@testset "adjoint of Hessenberg" begin - Ar = randn(5, 5) - Ac = complex.(randn(5, 5), randn(5, 5)) - b = ones(size(Ar, 1)) - - for A in (Ar, Ac) - F = hessenberg(A) - @test A'\b ≈ F'\b - end -end - -isdefined(Main, :ImmutableArrays) || @eval Main include(joinpath($(BASE_TEST_PATH), "testhelpers", "ImmutableArrays.jl")) -using .Main.ImmutableArrays - -@testset "Conversion to AbstractArray" begin - # tests corresponding to #34995 - A = ImmutableArray([1 2 3; 4 5 6; 7 8 9]) - H = UpperHessenberg(A) - - @test convert(AbstractArray{Float64}, H)::UpperHessenberg{Float64,ImmutableArray{Float64,2,Array{Float64,2}}} == H - @test convert(AbstractMatrix{Float64}, H)::UpperHessenberg{Float64,ImmutableArray{Float64,2,Array{Float64,2}}} == H -end - -@testset "custom axes" begin - SZA = SizedArrays.SizedArray{(2,2)}([1 2; 3 4]) - S = UpperHessenberg(SZA) - r = SizedArrays.SOneTo(2) - @test axes(S) === (r,r) -end - -@testset "copyto! with aliasing (#39460)" begin - M = Matrix(reshape(1:36, 6, 6)) - A = UpperHessenberg(view(M, 1:5, 1:5)) - A2 = copy(A) - B = UpperHessenberg(view(M, 2:6, 2:6)) - @test copyto!(B, A) == A2 -end - -@testset "getindex with Integers" begin - M = reshape(1:9, 3, 3) - S = UpperHessenberg(M) - @test_throws "invalid index" S[3, true] - @test S[1,2] == S[Int8(1),UInt16(2)] == S[big(1), Int16(2)] -end - -@testset "complex Symmetric" begin - D = diagm(0=>ComplexF64[1,2]) - S = Symmetric(D) - H = hessenberg(S) - @test H.H == D -end - -@testset "istriu/istril forwards to parent" begin - n = 10 - @testset "$(nameof(typeof(M)))" for M in [Tridiagonal(rand(n-1), rand(n), rand(n-1)), - Tridiagonal(zeros(n-1), zeros(n), zeros(n-1)), - Diagonal(randn(n)), - Diagonal(zeros(n)), - ] - U = UpperHessenberg(M) - A = Array(U) - for k in -n:n - @test istriu(U, k) == istriu(A, k) - @test istril(U, k) == istril(A, k) - end - end - z = zeros(n,n) - P = Matrix{BigFloat}(undef, n, n) - copytrito!(P, z, 'U') - P[diagind(P,-1)] .= 0 - U = UpperHessenberg(P) - A = Array(U) - @testset for k in -n:n - @test istriu(U, k) == istriu(A, k) - @test istril(U, k) == istril(A, k) - end -end - -end # module TestHessenberg diff --git a/stdlib/LinearAlgebra/test/lapack.jl b/stdlib/LinearAlgebra/test/lapack.jl deleted file mode 100644 index f05d7d99c2437..0000000000000 --- a/stdlib/LinearAlgebra/test/lapack.jl +++ /dev/null @@ -1,902 +0,0 @@ -# This file is a part of Julia. License is MIT: https://julialang.org/license - -module TestLAPACK - -using Test, LinearAlgebra, Random -using LinearAlgebra: BlasInt - -@test_throws ArgumentError LinearAlgebra.LAPACK.chkuplo('Z') -@test_throws ArgumentError LinearAlgebra.LAPACK.chkside('Z') -@test_throws ArgumentError LinearAlgebra.LAPACK.chkdiag('Z') -@test_throws ArgumentError LinearAlgebra.LAPACK.chktrans('Z') -@test_throws ArgumentError LinearAlgebra.LAPACK.chkvalidparam(1, "job", 2, (0,1)) - -@testset "syevr" begin - Random.seed!(123) - Ainit = randn(5,5) - @testset for elty in (Float32, Float64, ComplexF32, ComplexF64) - if elty == ComplexF32 || elty == ComplexF64 - A = complex.(Ainit, Ainit) - else - A = Ainit - end - A = convert(Array{elty, 2}, A) - Asym = A'A - vals, Z = LAPACK.syevr!('V', copy(Asym)) - @test Z*(Diagonal(vals)*Z') ≈ Asym - @test all(vals .> 0.0) - @test LAPACK.syevr!('N', 'V', 'U', copy(Asym), 0.0, 1.0, 4, 5, -1.0)[1] ≈ vals[vals .< 1.0] - @test LAPACK.syevr!('N', 'I', 'U', copy(Asym), 0.0, 1.0, 4, 5, -1.0)[1] ≈ vals[4:5] - @test vals ≈ LAPACK.syev!('N', 'U', copy(Asym)) - @test vals ≈ LAPACK.syevd!('N', 'U', copy(Asym)) - vals_test, Z_test = LAPACK.syev!('V', 'U', copy(Asym)) - @test vals_test ≈ vals - @test Z_test*(Diagonal(vals)*Z_test') ≈ Asym - vals_test, Z_test = LAPACK.syevd!('V', 'U', copy(Asym)) - @test vals_test ≈ vals - @test Z_test*(Diagonal(vals)*Z_test') ≈ Asym - @test_throws DimensionMismatch LAPACK.sygvd!(1, 'V', 'U', copy(Asym), zeros(elty, 6, 6)) - - @test_throws "jobz must be one of ('N', 'V'), but 'X' was passed" LAPACK.syevr!('X', Asym) - @test_throws "jobz must be one of ('N', 'V'), but 'X' was passed" LAPACK.syev!('X', 'U', Asym) - @test_throws "uplo argument must be 'U' (upper) or 'L' (lower), got 'M'" LAPACK.syev!('N', 'M', Asym) - @test_throws "jobz must be one of ('N', 'V'), but 'X' was passed" LAPACK.syevd!('X', 'U', Asym) - @test_throws "uplo argument must be 'U' (upper) or 'L' (lower), got 'M'" LAPACK.syevd!('N', 'M', Asym) - end -end - -@testset "gglse" begin - let - @testset for elty in (Float32, Float64, ComplexF32, ComplexF64) - A = convert(Array{elty, 2}, [1 1 1 1; 1 3 1 1; 1 -1 3 1; 1 1 1 3; 1 1 1 -1]) - c = convert(Array{elty, 1}, [2, 1, 6, 3, 1]) - B = convert(Array{elty, 2}, [1 1 1 -1; 1 -1 1 1; 1 1 -1 1]) - d = convert(Array{elty, 1}, [1, 3, -1]) - @test LAPACK.gglse!(A, c, B, d)[1] ≈ convert(Array{elty}, [0.5, -0.5, 1.5, 0.5]) - end - end -end - -@testset "gebrd, bdsqr, throw for bdsdc" begin - let - n = 10 - @testset for elty in (Float32, Float64) - d, e = convert(Vector{elty}, randn(n)), convert(Vector{elty}, randn(n - 1)) - U, Vt, C = Matrix{elty}(I, n, n), Matrix{elty}(I, n, n), Matrix{elty}(I, n, n) - s, _ = LAPACK.bdsqr!('U', copy(d), copy(e), Vt, U, C) - @test Array(Bidiagonal(d, e, :U)) ≈ U*Diagonal(s)*Vt - - @test_throws ArgumentError LAPACK.bdsqr!('A', d, e, Vt, U, C) - @test_throws DimensionMismatch LAPACK.bdsqr!('U', d, [e; 1], Vt, U, C) - @test_throws DimensionMismatch LAPACK.bdsqr!('U', d, e, Vt[1:end - 1, :], U, C) - @test_throws DimensionMismatch LAPACK.bdsqr!('U', d, e, Vt, U[:,1:end - 1], C) - @test_throws DimensionMismatch LAPACK.bdsqr!('U', d, e, Vt, U, C[1:end - 1, :]) - - @test_throws ArgumentError LAPACK.bdsdc!('U','Z',d,e) - - A = rand(elty,n,n) - B = copy(A) - B, d, e, tauq, taup = LAPACK.gebrd!(B) - U, Vt, C = Matrix{elty}(I, n, n), Matrix{elty}(I, n, n), Matrix{elty}(I, n, n) - s, _ = LAPACK.bdsqr!('U',d,e[1:n-1],Vt, U, C) - @test s ≈ svdvals(A) - end - end -end - -@testset "Issue #7886" begin - let - x, r = LAPACK.gelsy!([0 1; 0 2; 0 3.], [2, 4, 6.]) - @test x ≈ [0,2] - @test r == 1 - end -end - -@testset "geqrt(3)" begin - @testset for elty in (Float32, Float64, ComplexF32, ComplexF64) - A = rand(elty,10,10) - B = copy(A) - C,T = LAPACK.geqrt!(A,zeros(elty,10,10)) - D,S = LAPACK.geqrt3!(A,zeros(elty,10,10)) - @test C ≈ D - end -end - -@testset "gbtrf and gbtrs" begin - @testset for elty in (Float32, Float64, ComplexF32, ComplexF64) - d = rand(elty,6) - dl = rand(elty,5) - du = rand(elty,5) - dl2 = rand(elty,4) - AB = zeros(elty,6,6) - AB[6,1:4] = dl2 - AB[5,1:5] = dl - AB[4,:] = d - AB[3,2:6] = du - AB,ipiv = LAPACK.gbtrf!(2,1,6,AB) - C = rand(elty,6,6) - D = copy(C) - D = LAPACK.gbtrs!('N',2,1,6,AB,ipiv,D) - A = diagm(-2 => dl2, -1 => dl, 0 => d, 1 => du) - @test A\C ≈ D - M = Matrix{elty}(undef,7,6) - @test_throws DimensionMismatch LAPACK.gbtrs!('N',2,1,6,AB,ipiv,M) - @test_throws ArgumentError LAPACK.gbtrs!('M',2,1,6,AB,ipiv,M) - @test_throws LinearAlgebra.LAPACKException LAPACK.gbtrf!(2,1,6,zeros(elty,6,6)) - end -end - - -@testset "geqp3, geqrt error handling" begin - @testset for elty in (Float32, Float64, ComplexF32, ComplexF64) - x10, x11 = Vector{elty}.(undef, (10, 11)) - y10, y11 = Vector{LinearAlgebra.BlasInt}.(undef, (10, 11)) - A10x10, A11x10, A10x11, A11x11 = Matrix{elty}.(undef, ((10,10), (11,10), (10,11), (11,11))) - @test_throws DimensionMismatch LAPACK.geqlf!(A10x10, x11) - @test_throws DimensionMismatch LAPACK.gelqf!(A10x10, x11) - @test_throws DimensionMismatch LAPACK.geqp3!(A10x10, y11, x10) - @test_throws DimensionMismatch LAPACK.geqp3!(A10x10, y10, x11) - @test_throws ArgumentError LAPACK.geqrt!(A10x10, A11x10) - @test_throws DimensionMismatch LAPACK.geqrt3!(A10x10, A11x10) - @test_throws DimensionMismatch LAPACK.geqrt3!(A10x11, A11x11) - @test_throws DimensionMismatch LAPACK.geqrf!(A10x10, x11) - @test_throws DimensionMismatch LAPACK.gerqf!(A10x10, x11) - end -end - -@testset "gels, gesv, getrs, getri error handling" begin - @testset for elty in (Float32, Float64, ComplexF32, ComplexF64) - A10x10, B11x11 = Matrix{elty}.(undef, ((10,10), (11,11))) - x10, x11 = Vector{LinearAlgebra.BlasInt}.(undef, (10, 11)) - @test_throws DimensionMismatch LAPACK.gels!('N',A10x10,B11x11) - @test_throws DimensionMismatch LAPACK.gels!('T',A10x10,B11x11) - @test_throws ArgumentError LAPACK.gels!('X',A10x10,B11x11) - @test_throws DimensionMismatch LAPACK.gesv!(A10x10,B11x11) - @test_throws DimensionMismatch LAPACK.getrs!('N',A10x10,x10,B11x11) - @test_throws DimensionMismatch LAPACK.getrs!('T',A10x10,x10,B11x11) - @test_throws ArgumentError LAPACK.getrs!('X',A10x10,x10,B11x11) - @test_throws DimensionMismatch LAPACK.getri!(A10x10,x11) - end -end - -@testset "gelsy, gelsd" begin - @testset for elty in (Float32, Float64, ComplexF32, ComplexF64) - A = rand(elty, 10, 10) - B = rand(elty, 10, 10) - C, j = LAPACK.gelsd!(copy(A),copy(B)) - D, k = LAPACK.gelsy!(copy(A),copy(B)) - @test C ≈ D rtol=4*eps(cond(A)) - @test_throws DimensionMismatch LAPACK.gelsd!(A,rand(elty,12,10)) - @test_throws DimensionMismatch LAPACK.gelsy!(A,rand(elty,12,10)) - end -end - -@testset "gglse errors" begin - @testset for elty in (Float32, Float64, ComplexF32, ComplexF64) - A = rand(elty,10,10) - @test_throws DimensionMismatch LAPACK.gglse!(A,zeros(elty,10),rand(elty,12,11),zeros(elty,12)) - @test_throws DimensionMismatch LAPACK.gglse!(A,zeros(elty,11),rand(elty,10,10),zeros(elty,10)) - @test_throws DimensionMismatch LAPACK.gglse!(A,zeros(elty,10),rand(elty,10,10),zeros(elty,11)) - end -end - -@testset "gesvd, ggsvd" begin - @testset for elty in (Float32, Float64, ComplexF32, ComplexF64) - A = rand(elty,10,5) - U,S,V = svd(A) - lU,lS,lVt = LAPACK.gesvd!('S','S',A) - @test U ≈ lU - @test S ≈ lS - @test V' ≈ lVt - @test_throws ArgumentError LAPACK.gesvd!('X','S',A) - @test_throws ArgumentError LAPACK.gesvd!('S','X',A) - B = rand(elty,10,10) - # xggsvd3 replaced xggsvd in LAPACK 3.6.0 - if LAPACK.version() < v"3.6.0" - @test_throws DimensionMismatch LAPACK.ggsvd!('N','N','N',A,B) - @test_throws ArgumentError LAPACK.ggsvd!('X','N','N',A,B) - @test_throws ArgumentError LAPACK.ggsvd!('N','X','N',A,B) - @test_throws ArgumentError LAPACK.ggsvd!('N','N','X',A,B) - else - @test_throws DimensionMismatch LAPACK.ggsvd3!('N','N','N',A,B) - @test_throws ArgumentError LAPACK.ggsvd3!('X','N','N',A,B) - @test_throws ArgumentError LAPACK.ggsvd3!('N','X','N',A,B) - @test_throws ArgumentError LAPACK.ggsvd3!('N','N','X',A,B) - end - end -end - -@testset "geevx, ggev, ggev3 errors" begin - @testset for elty in (Float32, Float64, ComplexF32, ComplexF64) - A = rand(elty,10,10) - B = rand(elty,10,10) - @test_throws ArgumentError LAPACK.geevx!('M','N','N','N',A) - @test_throws ArgumentError LAPACK.geevx!('N','Z','N','N',A) - @test_throws ArgumentError LAPACK.geevx!('N','N','Z','N',A) - @test_throws ArgumentError LAPACK.geevx!('N','N','N','Z',A) - @test_throws ArgumentError LAPACK.ggev!('N','B',A,B) - @test_throws ArgumentError LAPACK.ggev!('B','N',A,B) - @test_throws DimensionMismatch LAPACK.ggev!('N','N',A,zeros(elty,12,12)) - @test_throws ArgumentError LAPACK.ggev3!('N','B',A,B) - @test_throws ArgumentError LAPACK.ggev3!('B','N',A,B) - @test_throws DimensionMismatch LAPACK.ggev3!('N','N',A,zeros(elty,12,12)) - end -end - -@testset "gebal/gebak" begin - @testset for elty in (Float32, Float64, ComplexF32, ComplexF64) - typescale = log10(eps(real(elty))) / 3 * 2 - A = rand(elty,10,10) * Diagonal(exp10.(range(typescale, stop=-typescale, length=10))) - B = copy(A) - ilo, ihi, scale = LAPACK.gebal!('S',B) - Bvs = eigvecs(B) - Avs = eigvecs(A) - Bvs = LAPACK.gebak!('S','R',ilo,ihi,scale,Bvs) - @test norm(diff(Avs ./ Bvs, dims=1)) < 100 * eps(abs(float(one(elty)))) - end -end - -@testset "gels" begin - @testset for elty in (Float32, Float64, ComplexF32, ComplexF64) - Random.seed!(913) - A = rand(elty,10,10) - X = rand(elty,10) - B,Y,z = LAPACK.gels!('N',copy(A),copy(X)) - @test A\X ≈ Y - @test_throws ArgumentError LAPACK.gels!('X',A,X) - end -end - -@testset "getrf/getri" begin - @testset for elty in (Float32, Float64, ComplexF32, ComplexF64) - A = rand(elty,10,10) - iA = inv(A) - A, ipiv, info = LAPACK.getrf!(A) - A = LAPACK.getri!(A, ipiv) - @test A ≈ iA - - B = rand(elty,10,10) - iB = inv(B) - ipiv = rand(BlasInt,10) - B, ipiv, info = LAPACK.getrf!(B, ipiv) - B = LAPACK.getri!(B, ipiv) - @test B ≈ iB - end -end - -@testset "geev" begin - # complex is easier for now - @testset for elty in (ComplexF32, ComplexF64) - A = rand(elty,10,10) - Aw, Avl, Avr = LAPACK.geev!('N','V',copy(A)) - fA = eigen(A, sortby=nothing) - @test fA.values ≈ Aw - @test fA.vectors ≈ Avr - - @test_throws ArgumentError LAPACK.geev!('X','V',A) - @test_throws ArgumentError LAPACK.geev!('N','X',A) - end -end - -@testset "gtsv" begin - @testset for elty in (Float32, Float64, ComplexF32, ComplexF64) - du = rand(elty,9) - d = rand(elty,10) - dl = rand(elty,9) - b = rand(elty,10) - c = Tridiagonal(dl,d,du) \ b - b = LAPACK.gtsv!(dl,d,du,b) - @test b ≈ c - @test_throws DimensionMismatch LAPACK.gtsv!(zeros(elty,11),d,du,b) - @test_throws DimensionMismatch LAPACK.gtsv!(dl,d,zeros(elty,11),b) - @test_throws DimensionMismatch LAPACK.gtsv!(dl,d,du,zeros(elty,11)) - @test LAPACK.gtsv!(elty[],elty[],elty[],elty[]) == elty[] - end -end - -@testset "gttrs,gttrf errors" begin - @testset for elty in (Float32, Float64, ComplexF32, ComplexF64) - du = rand(elty,9) - d = rand(elty,10) - dl = rand(elty,9) - b = rand(elty,10) - y10 = Vector{BlasInt}(undef, 10) - x9, x11 = Vector{elty}.(undef, (9, 11)) - @test_throws DimensionMismatch LAPACK.gttrf!(x11, d, du) - @test_throws DimensionMismatch LAPACK.gttrf!(dl, d, x11) - @test_throws DimensionMismatch LAPACK.gttrs!('N', x11, d, du, x9, y10, b) - @test_throws DimensionMismatch LAPACK.gttrs!('N', dl, d, x11, x9, y10, b) - @test_throws DimensionMismatch LAPACK.gttrs!('N', dl, d, du, x9, y10, x11) - @test_throws ArgumentError LAPACK.gttrs!('X', dl, d, du, x9, y10, x11) - A = lu(Tridiagonal(dl,d,du)) - b = rand(elty,10,5) - c = copy(b) - dl,d,du,du2,ipiv = LAPACK.gttrf!(dl,d,du) - c = LAPACK.gttrs!('N',dl,d,du,du2,ipiv,c) - @test A\b ≈ c - end -end - -@testset "orglq and friends errors" begin - @testset for elty in (Float32, Float64, ComplexF32, ComplexF64) - A = rand(elty,10,10) - A,tau = LAPACK.gelqf!(A) - @test_throws DimensionMismatch LAPACK.orglq!(A,tau,11) - temp = rand(elty,11,11) - @test_throws DimensionMismatch LAPACK.ormlq!('R','N',A,tau,temp) - @test_throws DimensionMismatch LAPACK.ormlq!('L','N',A,tau,temp) - @test_throws ArgumentError LAPACK.ormlq!('X','N',A,tau,temp) - @test_throws ArgumentError LAPACK.ormlq!('R','X',A,tau,temp) - temp = zeros(elty,11) - B = copy(A) - @test_throws DimensionMismatch LAPACK.ormlq!('R','N',A,temp,B) - @test_throws DimensionMismatch LAPACK.ormlq!('L','N',A,temp,B) - @test_throws ArgumentError LAPACK.ormlq!('X','N',A,temp,B) - @test_throws ArgumentError LAPACK.ormlq!('L','X',A,temp,B) - - B = copy(A) - C = LAPACK.orglq!(B,tau) - @test LAPACK.ormlq!('R','N',A,tau, Matrix{elty}(I, 10, 10)) ≈ C - - A = rand(elty,10,10) - A,tau = LAPACK.geqrf!(A) - @test_throws DimensionMismatch LAPACK.orgqr!(A,tau,11) - B = copy(A) - @test LAPACK.orgqr!(B,tau) ≈ LAPACK.ormqr!('R','N',A,tau,Matrix{elty}(I, 10, 10)) - temp = rand(elty,11,11) - @test_throws DimensionMismatch LAPACK.ormqr!('R','N',A,tau,temp) - @test_throws DimensionMismatch LAPACK.ormqr!('L','N',A,tau,temp) - @test_throws ArgumentError LAPACK.ormqr!('X','N',A,tau,temp) - @test_throws ArgumentError LAPACK.ormqr!('L','X',A,tau,temp) - B = copy(A) - temp = zeros(elty,11) - @test_throws DimensionMismatch LAPACK.ormqr!('R','N',A,temp,B) - @test_throws DimensionMismatch LAPACK.ormqr!('L','N',A,temp,B) - @test_throws ArgumentError LAPACK.ormqr!('X','N',A,temp,B) - @test_throws ArgumentError LAPACK.ormqr!('L','X',A,temp,B) - - A = rand(elty,10,10) - A,tau = LAPACK.geqlf!(A) - @test_throws DimensionMismatch LAPACK.orgql!(A,tau,11) - B = copy(A) - @test LAPACK.orgql!(B,tau) ≈ LAPACK.ormql!('R','N',A,tau,Matrix{elty}(I, 10, 10)) - temp = rand(elty,11,11) - @test_throws DimensionMismatch LAPACK.ormql!('R','N',A,tau,temp) - @test_throws DimensionMismatch LAPACK.ormql!('L','N',A,tau,temp) - @test_throws ArgumentError LAPACK.ormql!('X','N',A,tau,temp) - @test_throws ArgumentError LAPACK.ormql!('L','X',A,tau,temp) - temp = zeros(elty,11) - B = copy(A) - @test_throws DimensionMismatch LAPACK.ormql!('R','N',A,temp,B) - @test_throws DimensionMismatch LAPACK.ormql!('L','N',A,temp,B) - @test_throws ArgumentError LAPACK.ormql!('X','N',A,temp,B) - @test_throws ArgumentError LAPACK.ormql!('L','X',A,temp,B) - - A = rand(elty,10,10) - A,tau = LAPACK.gerqf!(A) - @test_throws DimensionMismatch LAPACK.orgrq!(A,tau,11) - B = copy(A) - @test LAPACK.orgrq!(B,tau) ≈ LAPACK.ormrq!('R','N',A,tau,Matrix{elty}(I, 10, 10)) - temp = rand(elty,11,11) - @test_throws DimensionMismatch LAPACK.ormrq!('R','N',A,tau,temp) - @test_throws DimensionMismatch LAPACK.ormrq!('L','N',A,tau,temp) - @test_throws ArgumentError LAPACK.ormrq!('X','N',A,tau,temp) - @test_throws ArgumentError LAPACK.ormrq!('L','X',A,tau,temp) - B = copy(A) - temp = zeros(elty,11) - @test_throws DimensionMismatch LAPACK.ormrq!('R','N',A,temp,B) - @test_throws DimensionMismatch LAPACK.ormrq!('L','N',A,temp,B) - @test_throws ArgumentError LAPACK.ormrq!('X','N',A,temp,B) - @test_throws ArgumentError LAPACK.ormrq!('L','X',A,temp,B) - - A = rand(elty,10,11) - Q = copy(A) - Q,tau = LAPACK.gerqf!(Q) - R = triu(Q[:,2:11]) - LAPACK.orgrq!(Q,tau) - @test Q*Q' ≈ Matrix(I, 10, 10) - @test R*Q ≈ A - @test_throws DimensionMismatch LAPACK.orgrq!(zeros(elty,11,10),zeros(elty,10)) - - C = rand(elty,10,10) - V = rand(elty,10,10) - T = zeros(elty,10,11) - @test_throws DimensionMismatch LAPACK.gemqrt!('L','N',V,T,C) - @test_throws DimensionMismatch LAPACK.gemqrt!('R','N',V,T,C) - @test_throws ArgumentError LAPACK.gemqrt!('X','N',V,T,C) - @test_throws ArgumentError LAPACK.gemqrt!('R','X',V,T,C) - - C = rand(elty,10,10) - V = rand(elty,11,10) - T = zeros(elty,10,10) - @test_throws DimensionMismatch LAPACK.gemqrt!('R','N',V,T,C) - @test_throws DimensionMismatch LAPACK.gemqrt!('L','N',V,T,C) - @test_throws ArgumentError LAPACK.gemqrt!('X','N',V,T,C) - @test_throws ArgumentError LAPACK.gemqrt!('L','X',V,T,C) - - # test size(T) = (nb,k) ensures 1 <= nb <= k - T = zeros(elty,10,10) - V = rand(elty,5,10) - @test_throws DimensionMismatch LAPACK.gemqrt!('L','N',V,T,C) - @test_throws ArgumentError LAPACK.gemqrt!('X','N',V,T,C) - @test_throws ArgumentError LAPACK.gemqrt!('L','X',V,T,C) - C = rand(elty,10,10) - V = rand(elty,10,10) - T = zeros(elty,11,10) - @test_throws DimensionMismatch LAPACK.gemqrt!('R','N',V,T,C) - @test_throws ArgumentError LAPACK.gemqrt!('X','N',V,T,C) - @test_throws ArgumentError LAPACK.gemqrt!('R','X',V,T,C) - - @test_throws DimensionMismatch LAPACK.orghr!(1, 10, C, zeros(elty,11)) - end -end - -@testset "sytri, sytrs, and sytrf" begin - @testset for elty in (Float32, Float64, ComplexF32, ComplexF64) - A = rand(elty,10,10) - A = A + transpose(A) #symmetric! - B = copy(A) - B,ipiv = LAPACK.sytrf!('U',B) - @test_throws ArgumentError LAPACK.sytrf!('X',B) - @test triu(inv(A)) ≈ triu(LAPACK.sytri!('U',B,ipiv)) rtol=eps(cond(A)) - @test_throws ArgumentError LAPACK.sytri!('X',B,ipiv) - temp = rand(elty,11,5) - @test_throws DimensionMismatch LAPACK.sytrs!('U',B,ipiv,temp) - @test_throws ArgumentError LAPACK.sytrs!('X',B,ipiv,temp) - @test LAPACK.sytrf!('U',zeros(elty,0,0)) == (zeros(elty,0,0),zeros(BlasInt,0),zero(BlasInt)) - end - - # Rook-pivoting variants - @testset for elty in (Float32, Float64, ComplexF32, ComplexF64) - A = rand(elty, 10, 10) - A = A + transpose(A) #symmetric! - B = copy(A) - B,ipiv = LAPACK.sytrf_rook!('U', B) - @test triu(inv(A)) ≈ triu(LAPACK.sytri_rook!('U', B, ipiv)) rtol=eps(cond(A)) - @test_throws ArgumentError LAPACK.sytri_rook!('X', B, ipiv) - temp = rand(elty, 11, 5) - @test_throws DimensionMismatch LAPACK.sytrs_rook!('U', B, ipiv, temp) - @test_throws ArgumentError LAPACK.sytrs_rook!('X', B, ipiv, temp) - @test LAPACK.sytrf_rook!('U',zeros(elty, 0, 0)) == (zeros(elty, 0, 0),zeros(BlasInt, 0),zero(BlasInt)) - A = rand(elty, 10, 10) - A = A + transpose(A) #symmetric! - b = rand(elty, 10) - c = A \ b - cnd = cond(A) - b,A = LAPACK.sysv_rook!('U', A, b) - @test b ≈ c rtol=eps(cnd) - temp = rand(elty,11) - @test_throws DimensionMismatch LAPACK.sysv_rook!('U',A,temp) - @test_throws ArgumentError LAPACK.sysv_rook!('X',A,temp) - - # syconvf_rook error handling - # way argument is wrong - @test_throws ArgumentError LAPACK.syconvf_rook!('U', 'U', A, rand(BlasInt, 10)) - # ipiv has wrong length - @test_throws ArgumentError LAPACK.syconvf_rook!('U', 'R', A, rand(BlasInt, 9)) - # e has wrong length - @test_throws ArgumentError LAPACK.syconvf_rook!('U', 'R', A, rand(BlasInt, 10), rand(elty, 9)) - end -end - -@testset "hetrf, hetrs" begin - @testset for elty in (ComplexF32, ComplexF64) - A = rand(elty,10,10) - A = A + A' #hermitian! - B = copy(A) - B,ipiv = LAPACK.hetrf!('U',B) - temp = rand(elty,11,5) - @test_throws DimensionMismatch LAPACK.hetrs!('U',B,ipiv,temp) - @test_throws ArgumentError LAPACK.hetrs!('X',B,ipiv,temp) - @test_throws DimensionMismatch LAPACK.hetrs_rook!('U',B,ipiv,temp) - @test_throws ArgumentError LAPACK.hetrs_rook!('X',B,ipiv,temp) - end -end - -@testset "stev, stebz, stein, stegr" begin - @testset for elty in (Float32, Float64) - d = rand(elty,10) - e = rand(elty,9) - temp = rand(elty,11) - @test_throws DimensionMismatch LAPACK.stev!('N',d,temp) - @test_throws ArgumentError LAPACK.stev!('X',d,temp) - temp = rand(elty,10) - @test_throws DimensionMismatch LAPACK.stebz!('A','B',zero(elty),zero(elty),0,0,-1.,d,temp) - @test_throws ArgumentError LAPACK.stebz!('X','B',zero(elty),zero(elty),0,0,-1.,d,temp) - @test_throws ArgumentError LAPACK.stebz!('A','X',zero(elty),zero(elty),0,0,-1.,d,temp) - temp11 = rand(elty,11) - @test_throws DimensionMismatch LAPACK.stegr!('N','A',d,temp11,zero(elty),zero(elty),0,0) - @test_throws ArgumentError LAPACK.stegr!('X','A',d,temp11,zero(elty),zero(elty),0,0) - @test_throws ArgumentError LAPACK.stegr!('N','X',d,temp11,zero(elty),zero(elty),0,0) - tempblasint10 = zeros(BlasInt,10) - tempblasint10_2 = zeros(BlasInt,10) - @test_throws DimensionMismatch LAPACK.stein!(d,temp11,temp,tempblasint10,tempblasint10_2) - @test_throws DimensionMismatch LAPACK.stein!(d,e,temp11,tempblasint10,tempblasint10_2) - end -end - -@testset "trtri & trtrs" begin - @testset for elty in (Float32, Float64, ComplexF32, ComplexF64) - A = rand(elty,10,10) - A = triu(A) - B = copy(A) - @test inv(A) ≈ LAPACK.trtri!('U','N',B) - @test_throws ArgumentError LAPACK.trtri!('X','N',B) - @test_throws ArgumentError LAPACK.trtri!('U','X',B) - temp = zeros(elty,11,10) - @test_throws DimensionMismatch LAPACK.trtrs!('U','N','N',B,temp) - @test_throws ArgumentError LAPACK.trtrs!('X','N','N',B,temp) - @test_throws ArgumentError LAPACK.trtrs!('U','X','N',B,temp) - @test_throws ArgumentError LAPACK.trtrs!('U','N','X',B,temp) - end -end - -@testset "larfg & larf" begin - @testset for elty in (Float32, Float64, ComplexF32, ComplexF64) - ## larfg - Random.seed!(0) - x = rand(elty, 5) - v = copy(x) - τ = LinearAlgebra.LAPACK.larfg!(v) - H = (I - τ*v*v') - # for complex input, LAPACK wants a conjugate transpose of H (check clarfg docs) - y = elty <: Complex ? H'*x : H*x - # we have rotated a vector - @test norm(y) ≈ norm(x) - # an annihilated almost all the first column - @test norm(y[2:end], Inf) < 10*eps(real(one(elty))) - - ## larf - C = rand(elty, 5, 5) - C_norm = norm(C, 2) - v = C[1:end, 1] - τ = LinearAlgebra.LAPACK.larfg!(v) - LinearAlgebra.LAPACK.larf!('L', v, conj(τ), C) - # we have applied a unitary transformation - @test norm(C, 2) ≈ C_norm - # an annihilated almost all the first column - @test norm(C[2:end, 1], Inf) < 10*eps(real(one(elty))) - - # apply left and right - C1 = rand(elty, 5, 5) - C2 = rand(elty, 5, 5) - C = C2*C1 - - v = C1[1:end, 1] - τ = LinearAlgebra.LAPACK.larfg!(v) - LinearAlgebra.LAPACK.larf!('L', v, τ, C1) - LinearAlgebra.LAPACK.larf!('R', v, conj(τ), C2) - @test C ≈ C2*C1 - - @test_throws ArgumentError LAPACK.larf!('X', v, τ, C1) - end -end - -@testset "tgsen, tzrzf, & trsyl" begin - @testset for elty in (Float32, Float64, ComplexF32, ComplexF64) - Z = zeros(elty,10,10) - @test_throws DimensionMismatch LAPACK.tgsen!(zeros(BlasInt,10),Z,zeros(elty,11,11),Z,Z) - @test_throws DimensionMismatch LAPACK.tgsen!(zeros(BlasInt,10),Z,Z,zeros(elty,11,11),Z) - @test_throws DimensionMismatch LAPACK.tgsen!(zeros(BlasInt,10),Z,Z,Z,zeros(elty,11,11)) - @test_throws DimensionMismatch LAPACK.trsyl!('N','N',Z,Z,zeros(elty,11,11)) - @test_throws ArgumentError LAPACK.trsyl!('X','N',Z,Z,zeros(elty,11,11)) - @test_throws ArgumentError LAPACK.trsyl!('N','X',Z,Z,zeros(elty,11,11)) - @test_throws DimensionMismatch LAPACK.tzrzf!(zeros(elty,10,5)) - - A = triu(rand(elty,4,4)) - V = view(A, 1:2, :) - M = Matrix(V) - @test LAPACK.tzrzf!(V) == LAPACK.tzrzf!(M) - end -end - -@testset "sysv" begin - @testset for elty in (Float32, Float64, ComplexF32, ComplexF64) - Random.seed!(123) - A = rand(elty,10,10) - A = A + transpose(A) #symmetric! - b = rand(elty,10) - c = A \ b - b,A = LAPACK.sysv!('U',A,b) - @test b ≈ c - @test_throws DimensionMismatch LAPACK.sysv!('U',A,rand(elty,11)) - @test_throws ArgumentError LAPACK.sysv!('X',A,rand(elty,11)) - end -end - -@testset "hesv" begin - @testset for elty in (ComplexF32, ComplexF64) - Random.seed!(935) - A = rand(elty,10,10) - A = A + A' #hermitian! - b = rand(elty,10) - c = A \ b - b,A = LAPACK.hesv!('U',A,b) - @test b ≈ c - temp = rand(elty,11) - @test_throws DimensionMismatch LAPACK.hesv!('U',A,temp) - @test_throws ArgumentError LAPACK.hesv!('X',A,temp) - A = rand(elty,10,10) - A = A + A' #hermitian! - b = rand(elty,10) - c = A \ b - b,A = LAPACK.hesv_rook!('U',A,b) - @test b ≈ c - @test_throws DimensionMismatch LAPACK.hesv_rook!('U',A,temp) - @test_throws ArgumentError LAPACK.hesv_rook!('X',A,temp) - end -end - -@testset "ptsv" begin - @testset for elty in (Float32, Float64, ComplexF32, ComplexF64) - dv = fill(elty(1),10) - ev = zeros(elty,9) - rdv = real(dv) - A = SymTridiagonal(dv,ev) - if elty <: Complex - A = Tridiagonal(conj(ev),dv,ev) - end - B = rand(elty,10,10) - C = copy(B) - @test A\B ≈ LAPACK.ptsv!(rdv,ev,C) - @test_throws DimensionMismatch LAPACK.ptsv!(rdv,Vector{elty}(undef,10),C) - @test_throws DimensionMismatch LAPACK.ptsv!(rdv,ev,Matrix{elty}(undef,11,11)) - end -end - -@testset "pttrf and pttrs" begin - @testset for elty in (Float32, Float64, ComplexF32, ComplexF64) - dv = fill(elty(1),10) - ev = zeros(elty,9) - rdv = real(dv) - A = SymTridiagonal(dv,ev) - if elty <: Complex - A = Tridiagonal(conj(ev),dv,ev) - end - rdv,ev = LAPACK.pttrf!(rdv,ev) - @test_throws DimensionMismatch LAPACK.pttrf!(rdv,dv) - B = rand(elty,10,10) - C = copy(B) - if elty <: Complex - @test A\B ≈ LAPACK.pttrs!('U',rdv,ev,C) - tempvec = Vector{elty}(undef,10) - tempmat = Matrix{elty}(undef,11,11) - @test_throws DimensionMismatch LAPACK.pttrs!('U',rdv,tempvec,C) - @test_throws DimensionMismatch LAPACK.pttrs!('U',rdv,ev,tempmat) - @test_throws ArgumentError LAPACK.pttrs!('X',rdv,tempvec,C) - @test_throws ArgumentError LAPACK.pttrs!('X',rdv,ev,tempmat) - else - @test A\B ≈ LAPACK.pttrs!(rdv,ev,C) - @test_throws DimensionMismatch LAPACK.pttrs!(rdv,Vector{elty}(undef,10),C) - @test_throws DimensionMismatch LAPACK.pttrs!(rdv,ev,Matrix{elty}(undef,11,11)) - end - end -end - -@testset "posv and some errors for friends" begin - @testset for elty in (Float32, Float64, ComplexF32, ComplexF64) - local n = 10 - A = rand(elty,n,n)/100 - A += real(diagm(0 => n*real(rand(elty,n)))) - if elty <: Complex - A = A + A' - else - A = A + transpose(A) - end - B = rand(elty,n,n) - D = copy(A) - C = copy(B) - D,C = LAPACK.posv!('U',D,C) - @test A\B ≈ C - offsizemat = Matrix{elty}(undef, n+1, n+1) - @test_throws DimensionMismatch LAPACK.posv!('U', D, offsizemat) - @test_throws DimensionMismatch LAPACK.potrs!('U', D, offsizemat) - @test_throws ArgumentError LAPACK.posv!('X', D, offsizemat) - @test_throws ArgumentError LAPACK.potrs!('X', D, offsizemat) - - @test LAPACK.potrs!('U',Matrix{elty}(undef,0,0),elty[]) == elty[] - end -end - -@testset "gesvx" begin - @testset for elty in (Float32, Float64, ComplexF32, ComplexF64) - A = rand(elty,10,10) - B = rand(elty,10,5) - C = copy(A) - D = copy(B) - X, rcond, f, b, r = LAPACK.gesvx!(C,D) - @test X ≈ A\B rtol=inv(rcond)*eps(real(elty)) - end -end - -@testset "gees, gges, gges3 error throwing" begin - @testset for elty in (Float32, Float64, ComplexF32, ComplexF64) - A = rand(elty,10,10) - B = rand(elty,11,11) - @test_throws DimensionMismatch LAPACK.gges!('V','V',A,B) - @test_throws DimensionMismatch LAPACK.gges3!('V','V',A,B) - @test_throws ArgumentError LAPACK.gges!('X','V',A,B) - @test_throws ArgumentError LAPACK.gges3!('X','V',A,B) - @test_throws ArgumentError LAPACK.gges!('V','X',A,B) - @test_throws ArgumentError LAPACK.gges3!('V','X',A,B) - end -end - -@testset "trrfs & trevc" begin - @testset for elty in (Float32, Float64, ComplexF32, ComplexF64) - T = triu(rand(elty,10,10)) - v = eigvecs(T, sortby=nothing)[:,1] - select = zeros(LinearAlgebra.BlasInt,10) - select[1] = 1 - select,Vr = LAPACK.trevc!('R','S',select,copy(T)) - @test Vr ≈ v - select = zeros(LinearAlgebra.BlasInt,10) - select[1] = 1 - select,Vl = LAPACK.trevc!('L','S',select,copy(T)) - select = zeros(LinearAlgebra.BlasInt,10) - select[1] = 1 - select,Vln,Vrn = LAPACK.trevc!('B','S',select,copy(T)) - @test Vrn ≈ v - @test Vln ≈ Vl - @test_throws ArgumentError LAPACK.trevc!('V','S',select,T) - @test_throws ArgumentError LAPACK.trevc!('R','X',select,T) - temp1010 = rand(elty,10,10) - temp1011 = rand(elty,10,11) - @test_throws DimensionMismatch LAPACK.trrfs!('U','N','N',T,temp1010,temp1011) - @test_throws ArgumentError LAPACK.trrfs!('X','N','N',T,temp1010,temp1011) - @test_throws ArgumentError LAPACK.trrfs!('U','X','N',T,temp1010,temp1011) - @test_throws ArgumentError LAPACK.trrfs!('U','N','X',T,temp1010,temp1011) - end -end - -@testset "laic1" begin - @testset for elty in (Float32, Float64, ComplexF32, ComplexF64) - @test_throws DimensionMismatch LAPACK.laic1!(1,rand(elty,10),real(rand(elty)),rand(elty,11),rand(elty)) - end -end - -@testset "trsen" begin - @testset for elty in (Float32, Float64, ComplexF32, ComplexF64) - for job in ('N', 'E', 'V', 'B') - for c in ('V', 'N') - A = convert(Matrix{elty}, [7 2 2 1; 1 5 2 0; 0 3 9 4; 1 1 1 4]) - T,Q,d = schur(A) - s, sep = LinearAlgebra.LAPACK.trsen!(job,c,Array{LinearAlgebra.BlasInt}([0,1,0,0]),T,Q)[4:5] - @test d[1] ≈ T[2,2] - @test d[2] ≈ T[1,1] - if c == 'V' - @test Q*T*Q' ≈ A - end - if job == 'N' || job == 'V' - @test iszero(s) - else - @test s ≈ 0.8080423 atol=1e-6 - end - if job == 'N' || job == 'E' - @test iszero(sep) - else - @test sep ≈ 2. atol=3e-1 - end - end - end - end -end - -@testset "trexc" begin - @testset for elty in (Float32, Float64, ComplexF32, ComplexF64) - for c in ('V', 'N') - A = convert(Matrix{elty}, [7 2 2 1; 1 5 2 0; 0 3 9 4; 1 1 1 4]) - T,Q,d = schur(A) - LinearAlgebra.LAPACK.trexc!(c,LinearAlgebra.BlasInt(1),LinearAlgebra.BlasInt(2),T,Q) - @test d[1] ≈ T[2,2] - @test d[2] ≈ T[1,1] - if c == 'V' - @test Q*T*Q' ≈ A - end - end - end -end - -@testset "lacpy!" begin - @testset for elty in (Float32, Float64, ComplexF32, ComplexF64) - n = 10 - A = rand(elty, n, n) - for uplo in ('L', 'U', 'N') - B = zeros(elty, n, n) - LinearAlgebra.LAPACK.lacpy!(B, A, uplo) - C = uplo == 'L' ? tril(A) : (uplo == 'U' ? triu(A) : A) - @test B == C - B = zeros(elty, n+1, n+1) - LinearAlgebra.LAPACK.lacpy!(B, A, uplo) - C = uplo == 'L' ? tril(A) : (uplo == 'U' ? triu(A) : A) - @test view(B, 1:n, 1:n) == C - end - A = rand(elty, n, n+1) - B = zeros(elty, n, n) - LinearAlgebra.LAPACK.lacpy!(B, A, 'L') - @test B == view(tril(A), 1:n, 1:n) - B = zeros(elty, n, n+1) - LinearAlgebra.LAPACK.lacpy!(B, A, 'U') - @test B == triu(A) - A = rand(elty, n+1, n) - B = zeros(elty, n, n) - LinearAlgebra.LAPACK.lacpy!(B, A, 'U') - @test B == view(triu(A), 1:n, 1:n) - B = zeros(elty, n+1, n) - LinearAlgebra.LAPACK.lacpy!(B, A, 'L') - @test B == tril(A) - end -end - -@testset "Julia vs LAPACK" begin - # Test our own linear algebra functionality against LAPACK - @testset for elty in (Float32, Float64, ComplexF32, ComplexF64) - for nn in (5,10,15) - if elty <: Real - A = convert(Matrix{elty}, randn(10,nn)) - else - A = convert(Matrix{elty}, complex.(randn(10,nn),randn(10,nn))) - end ## LU (only equal for real because LAPACK uses different absolute value when choosing permutations) - if elty <: Real - FJulia = LinearAlgebra.generic_lufact!(copy(A)) - FLAPACK = LinearAlgebra.LAPACK.getrf!(copy(A)) - @test FJulia.factors ≈ FLAPACK[1] - @test FJulia.ipiv ≈ FLAPACK[2] - @test FJulia.info ≈ FLAPACK[3] - end - - ## QR - FJulia = LinearAlgebra.qrfactUnblocked!(copy(A)) - FLAPACK = LinearAlgebra.LAPACK.geqrf!(copy(A)) - @test FJulia.factors ≈ FLAPACK[1] - @test FJulia.τ ≈ FLAPACK[2] - end - end -end - -# Issue 13976 -let A = [NaN 0.0 NaN; 0 0 0; NaN 0 NaN] - @test_throws ArgumentError exp(A) -end - -# Issue 14065 (and 14220) -let A = [NaN NaN; NaN NaN] - @test_throws ArgumentError eigen(A) -end - -# Issue #42762 https://github.com/JuliaLang/julia/issues/42762 -# Tests geqrf! and gerqf! with null column dimensions -a = zeros(2,0), zeros(0) -@test LinearAlgebra.LAPACK.geqrf!(a...) === a -@test LinearAlgebra.LAPACK.gerqf!(a...) === a - -# Issue #49489: https://github.com/JuliaLang/julia/issues/49489 -# Dimension mismatch between A and ipiv causes segfaults -@testset "issue #49489" begin - A = randn(23,23) - b = randn(23) - ipiv = collect(1:20) - @test_throws DimensionMismatch LinearAlgebra.LAPACK.getrs!('N', A, ipiv, b) -end - -@testset "hetrd ignore non-filled half" begin - A = rand(3,3) - B = copy(A) - B[2,1] = NaN - B[3,1] = Inf - LAPACK.hetrd!('U', A) - LAPACK.hetrd!('U', B) - @test UpperTriangular(A) == UpperTriangular(B) -end - -@testset "inference in syev!/syevd!" begin - for T in (Float32, Float64), CT in (T, Complex{T}) - A = rand(CT, 4,4) - @inferred (A -> LAPACK.syev!('N', 'U', A))(A) - @inferred (A -> LAPACK.syev!('V', 'U', A))(A) - @inferred (A -> LAPACK.syevd!('N', 'U', A))(A) - @inferred (A -> LAPACK.syevd!('V', 'U', A))(A) - end -end - -end # module TestLAPACK diff --git a/stdlib/LinearAlgebra/test/ldlt.jl b/stdlib/LinearAlgebra/test/ldlt.jl deleted file mode 100644 index 51abf31086091..0000000000000 --- a/stdlib/LinearAlgebra/test/ldlt.jl +++ /dev/null @@ -1,41 +0,0 @@ -# This file is a part of Julia. License is MIT: https://julialang.org/license - -module TestLDLT - -using Test, LinearAlgebra, Random - -Random.seed!(123) - -@testset "Factorization conversions of LDLT" begin - S = SymTridiagonal(randn(5), randn(4)) - F = ldlt(S) - @test Factorization{eltype(S)}(F) === F - @test Array(Factorization{complex(eltype(S))}(F)) ≈ Array(ldlt(complex(S))) - @test eltype(Factorization{complex(eltype(S))}) == complex(eltype(S)) -end - -@testset "eltype conversions of LDLT" begin - S = SymTridiagonal(randn(5), randn(4)) - F = ldlt(S) - Fc = LDLt{ComplexF32}(F.data) - @test Fc isa LDLt{ComplexF32} - @test Array(Fc) ≈ ComplexF32.(Array(S)) -end - -@testset "Accessing fields of LDLT" begin - S = SymTridiagonal(randn(5), randn(4)) - F = ldlt(S) - @test getproperty(F, :L) == transpose(getproperty(F, :Lt)) - @test getproperty(F, :d) == diag(getproperty(F, :D), 0) -end - -@testset "REPL printing of LDLT" begin - S = SymTridiagonal(randn(5), randn(4)) - F = ldlt(S) - ldltstring = sprint((t, s) -> show(t, "text/plain", s), F) - lstring = sprint((t, s) -> show(t, "text/plain", s), F.L) - dstring = sprint((t, s) -> show(t, "text/plain", s), F.D) - @test ldltstring == "$(summary(F))\nL factor:\n$lstring\nD factor:\n$dstring" -end - -end # module TestLDLT diff --git a/stdlib/LinearAlgebra/test/lq.jl b/stdlib/LinearAlgebra/test/lq.jl deleted file mode 100644 index c3499f7f46fa6..0000000000000 --- a/stdlib/LinearAlgebra/test/lq.jl +++ /dev/null @@ -1,237 +0,0 @@ -# This file is a part of Julia. License is MIT: https://julialang.org/license - -module TestLQ - -using Test, LinearAlgebra, Random -using LinearAlgebra: BlasComplex, BlasFloat, BlasReal, rmul!, lmul! - -m = 10 - -Random.seed!(1234321) - -asquare = randn(ComplexF64, m, m) / 2 -awide = randn(ComplexF64, m, m+3) / 2 -bcomplex = randn(ComplexF64, m, 2) / 2 - -# helper functions to unambiguously recover explicit forms of an LQPackedQ -squareQ(Q::LinearAlgebra.LQPackedQ) = (n = size(Q.factors, 2); lmul!(Q, Matrix{eltype(Q)}(I, n, n))) -rectangularQ(Q::LinearAlgebra.LQPackedQ) = convert(Array, Q) - -@testset for eltya in (Float32, Float64, ComplexF32, ComplexF64), n in (m, size(awide, 2)) - adata = m == n ? asquare : awide - a = convert(Matrix{eltya}, eltya <: Complex ? adata : real(adata)) - ε = εa = eps(abs(float(one(eltya)))) - n1 = n ÷ 2 - - α = rand(eltya) - aα = fill(α,1,1) - @test lq(α).L*lq(α).Q ≈ lq(aα).L*lq(aα).Q - @test abs(lq(α).Q[1,1]) ≈ one(eltya) - - @testset for eltyb in (Float32, Float64, ComplexF32, ComplexF64, Int) - b = eltyb == Int ? rand(1:5, m, 2) : convert(Matrix{eltyb}, eltyb <: Complex ? bcomplex : real(bcomplex)) - εb = eps(abs(float(one(eltyb)))) - ε = max(εa,εb) - - tab = promote_type(eltya,eltyb) - - @testset for isview in (false,true) - let a = isview ? view(a, 1:m - 1, 1:n - 1) : a, b = isview ? view(b, 1:m - 1) : b, m = m - isview, n = n - isview - lqa = lq(a) - x = lqa\b - l, q = lqa.L, lqa.Q - qra = qr(a, ColumnNorm()) - @testset "Basic ops" begin - @test size(lqa,1) == size(a,1) - @test size(lqa,3) == 1 - @test size(lqa.Q,3) == 1 - @test Base.propertynames(lqa) == (:L, :Q) - ref_obs = (l, q) - for (ii, lq_obj) in enumerate(lqa) - @test ref_obs[ii] == lq_obj - end - @test_throws FieldError lqa.Z - @test Array(copy(adjoint(lqa))) ≈ a' - @test q*squareQ(q)' ≈ Matrix(I, n, n) - @test l*q ≈ a - @test Array(lqa) ≈ a - @test Array(copy(lqa)) ≈ a - @test LinearAlgebra.Factorization{eltya}(lqa) === lqa - @test Matrix{eltya}(q) isa Matrix{eltya} - # test Array{T}(LQPackedQ{T}) - @test Array{eltya}(q) ≈ Matrix(q) - end - @testset "Binary ops" begin - k = size(a, 2) - T = Tridiagonal(rand(eltya, k-1), rand(eltya, k), rand(eltya, k-1)) - @test lq(T) * T ≈ T * T rtol=3000ε - @test lqa * T ≈ a * T rtol=3000ε - @test a*x ≈ b rtol=3000ε - @test x ≈ qra \ b rtol=3000ε - @test lqa*x ≈ a*x rtol=3000ε - @test (sq = size(q.factors, 2); *(Matrix{eltyb}(I, sq, sq), adjoint(q))*squareQ(q)) ≈ Matrix(I, n, n) rtol=5000ε - if eltya != Int - @test Matrix{eltyb}(I, n, n)*q ≈ Matrix(I, n, n) * convert(LinearAlgebra.AbstractQ{tab}, q) - end - @test q*x ≈ squareQ(q)*x rtol=100ε - @test q'*x ≈ squareQ(q)'*x rtol=100ε - @test a*q ≈ a*squareQ(q) rtol=100ε - @test a*q' ≈ a*squareQ(q)' rtol=100ε - @test q*a'≈ squareQ(q)*a' rtol=100ε - @test q'*a' ≈ squareQ(q)'*a' rtol=100ε - @test_throws DimensionMismatch q*x[1:n1 + 1] - @test_throws DimensionMismatch adjoint(q) * Matrix{eltya}(undef,m+2,m+2) - @test_throws DimensionMismatch Matrix{eltyb}(undef,m+2,m+2)*q - if isa(a, DenseArray) && isa(b, DenseArray) - # use this to test 2nd branch in mult code - pad_a = vcat(I, a) - pad_x = hcat(I, x) - @test pad_a*q ≈ pad_a*squareQ(q) rtol=100ε - @test q'*pad_x ≈ squareQ(q)'*pad_x rtol=100ε - end - end - end - end - - @testset "Matmul with LQ factorizations" begin - lqa = lq(a[:,1:n1]) - l,q = lqa.L, lqa.Q - @test rectangularQ(q)*rectangularQ(q)' ≈ Matrix(I, n1, n1) - @test squareQ(q)'*squareQ(q) ≈ Matrix(I, n1, n1) - @test_throws DimensionMismatch rmul!(Matrix{eltya}(I, n+1, n+1),q) - @test lmul!(adjoint(q), rectangularQ(q)) ≈ Matrix(I, n1, n1) - @test_throws DimensionMismatch rmul!(Matrix{eltya}(I, n+1, n+1), adjoint(q)) - @test_throws BoundsError size(q,-1) - end - end -end - -@testset "getindex on LQPackedQ (#23733)" begin - local m, n - function getqs(F::LinearAlgebra.LQ) - implicitQ = F.Q - sq = size(implicitQ.factors, 2) - explicitQ = lmul!(implicitQ, Matrix{eltype(implicitQ)}(I, sq, sq)) - return implicitQ, explicitQ - end - - m, n = 3, 3 # reduced Q 3-by-3, full Q 3-by-3 - implicitQ, explicitQ = getqs(lq(randn(m, n))) - @test implicitQ[1, 1] == explicitQ[1, 1] - @test implicitQ[m, 1] == explicitQ[m, 1] - @test implicitQ[1, n] == explicitQ[1, n] - @test implicitQ[m, n] == explicitQ[m, n] - - m, n = 3, 4 # reduced Q 3-by-4, full Q 4-by-4 - implicitQ, explicitQ = getqs(lq(randn(m, n))) - @test implicitQ[1, 1] == explicitQ[1, 1] - @test implicitQ[m, 1] == explicitQ[m, 1] - @test implicitQ[1, n] == explicitQ[1, n] - @test implicitQ[m, n] == explicitQ[m, n] - @test implicitQ[m+1, 1] == explicitQ[m+1, 1] - @test implicitQ[m+1, n] == explicitQ[m+1, n] - - m, n = 4, 3 # reduced Q 3-by-3, full Q 3-by-3 - implicitQ, explicitQ = getqs(lq(randn(m, n))) - @test implicitQ[1, 1] == explicitQ[1, 1] - @test implicitQ[n, 1] == explicitQ[n, 1] - @test implicitQ[1, n] == explicitQ[1, n] - @test implicitQ[n, n] == explicitQ[n, n] -end - -@testset "size on LQPackedQ (#23780)" begin - # size(Q::LQPackedQ) yields the shape of Q's full/square form - for ((mA, nA), nQ) in ( - ((3, 3), 3), # A 3-by-3 => full/square Q 3-by-3 - ((3, 4), 4), # A 3-by-4 => full/square Q 4-by-4 - ((4, 3), 3) )# A 4-by-3 => full/square Q 3-by-3 - @test size(lq(randn(mA, nA)).Q) == (nQ, nQ) - end -end - -@testset "postmultiplication with / right-application of LQPackedQ (#23779)" begin - function getqs(F::LinearAlgebra.LQ) - implicitQ = F.Q - explicitQ = lmul!(implicitQ, Matrix{eltype(implicitQ)}(I, size(implicitQ)...)) - return implicitQ, explicitQ - end - # for any shape m-by-n of LQ-factored matrix, where Q is an LQPackedQ - # A_mul_B*(C, Q) (Ac_mul_B*(C, Q)) operations should work for - # *-by-n (n-by-*) C, which we test below via n-by-n C - for (mA, nA) in ((3, 3), (3, 4), (4, 3)) - implicitQ, explicitQ = getqs(lq(randn(mA, nA))) - C = randn(nA, nA) - @test *(C, implicitQ) ≈ *(C, explicitQ) - @test *(C, adjoint(implicitQ)) ≈ *(C, adjoint(explicitQ)) - @test *(adjoint(C), implicitQ) ≈ *(adjoint(C), explicitQ) - @test *(adjoint(C), adjoint(implicitQ)) ≈ *(adjoint(C), adjoint(explicitQ)) - end - # where the LQ-factored matrix has at least as many rows m as columns n, - # Q's full/square and reduced/rectangular forms have the same shape (n-by-n). hence we expect - # _only_ *-by-n (n-by-*) C to work in A_mul_B*(C, Q) (Ac_mul_B*(C, Q)) ops. - # and hence the n-by-n C tests above suffice. - # - # where the LQ-factored matrix has more columns n than rows m, - # Q's full/square form is n-by-n whereas its reduced/rectangular form is m-by-n. - # hence we need also test *-by-m C with - # A*_mul_B(C, Q) ops, as below via m-by-m C. - mA, nA = 3, 4 - implicitQ, explicitQ = getqs(lq(randn(mA, nA))) - C = randn(mA, mA) - zeroextCright = hcat(C, zeros(eltype(C), mA)) - zeroextCdown = vcat(C, zeros(eltype(C), (1, mA))) - @test *(C, implicitQ) ≈ *(zeroextCright, explicitQ) - @test *(adjoint(C), implicitQ) ≈ *(adjoint(zeroextCdown), explicitQ) - @test_throws DimensionMismatch C * adjoint(implicitQ) - @test_throws DimensionMismatch adjoint(C) * adjoint(implicitQ) -end - -@testset "det(Q::LQPackedQ)" begin - @testset for n in 1:3, m in 1:3 - @testset "real" begin - _, Q = lq(randn(n, m)) - @test det(Q) ≈ det(Q*I) - @test abs(det(Q)) ≈ 1 - end - @testset "complex" begin - _, Q = lq(randn(ComplexF64, n, m)) - @test det(Q) ≈ det(Q*I) - @test abs(det(Q)) ≈ 1 - end - end -end - -@testset "REPL printing" begin - bf = IOBuffer() - show(bf, "text/plain", lq(Matrix(I, 4, 4))) - seekstart(bf) - @test String(take!(bf)) == """ -$(LinearAlgebra.LQ){Float64, Matrix{Float64}, Vector{Float64}} -L factor: -4×4 Matrix{Float64}: - 1.0 0.0 0.0 0.0 - 0.0 1.0 0.0 0.0 - 0.0 0.0 1.0 0.0 - 0.0 0.0 0.0 1.0 -Q factor: 4×4 $(LinearAlgebra.LQPackedQ){Float64, Matrix{Float64}, Vector{Float64}}""" -end - -@testset "adjoint of LQ" begin - n = 5 - - for b in (ones(n), ones(n, 2), ones(Complex{Float64}, n, 2)) - for A in ( - randn(n, n), - # Tall problems become least squares problems similarly to QR - randn(n - 2, n), - complex.(randn(n, n), randn(n, n))) - - F = lq(A) - @test A'\b ≈ F'\b - end - @test_throws DimensionMismatch lq(randn(n, n + 2))'\b - end - -end - -end # module TestLQ diff --git a/stdlib/LinearAlgebra/test/lu.jl b/stdlib/LinearAlgebra/test/lu.jl deleted file mode 100644 index 56a402d70493e..0000000000000 --- a/stdlib/LinearAlgebra/test/lu.jl +++ /dev/null @@ -1,502 +0,0 @@ -# This file is a part of Julia. License is MIT: https://julialang.org/license - -module TestLU - -using Test, LinearAlgebra, Random -using LinearAlgebra: ldiv!, BlasReal, BlasInt, BlasFloat, rdiv! - -n = 10 - -# Split n into 2 parts for tests needing two matrices -n1 = div(n, 2) -n2 = 2*n1 - -Random.seed!(1234324) - -areal = randn(n,n)/2 -aimg = randn(n,n)/2 -breal = randn(n,2)/2 -bimg = randn(n,2)/2 -creal = randn(n)/2 -cimg = randn(n)/2 -dureal = randn(n-1)/2 -duimg = randn(n-1)/2 -dlreal = randn(n-1)/2 -dlimg = randn(n-1)/2 -dreal = randn(n)/2 -dimg = randn(n)/2 - -@testset for eltya in (Float32, Float64, ComplexF32, ComplexF64, BigFloat, Int) - a = eltya == Int ? rand(1:7, n, n) : - convert(Matrix{eltya}, eltya <: Complex ? complex.(areal, aimg) : areal) - d = if eltya == Int - Tridiagonal(rand(1:7, n-1), rand(1:7, n), rand(1:7, n-1)) - elseif eltya <: Complex - convert(Tridiagonal{eltya}, Tridiagonal( - complex.(dlreal, dlimg), complex.(dreal, dimg), complex.(dureal, duimg))) - else - convert(Tridiagonal{eltya}, Tridiagonal(dlreal, dreal, dureal)) - end - εa = eps(abs(float(one(eltya)))) - - if eltya <: BlasFloat - @testset "LU factorization for Number" begin - num = rand(eltya) - @test (lu(num)...,) == (hcat(one(eltya)), hcat(num), [1]) - @test convert(Array, lu(num)) ≈ eltya[num] - end - @testset "Balancing in eigenvector calculations" begin - A = convert(Matrix{eltya}, [ 3.0 -2.0 -0.9 2*eps(real(one(eltya))); - -2.0 4.0 1.0 -eps(real(one(eltya))); - -eps(real(one(eltya)))/4 eps(real(one(eltya)))/2 -1.0 0; - -0.5 -0.5 0.1 1.0]) - F = eigen(A, permute=false, scale=false) - @test F.vectors*Diagonal(F.values)/F.vectors ≈ A - F = eigen(A) - # @test norm(F.vectors*Diagonal(F.values)/F.vectors - A) > 0.01 - end - end - κ = cond(a,1) - @testset "(Automatic) Square LU decomposition" begin - lua = factorize(a) - @test_throws FieldError lua.Z - l,u,p = lua.L, lua.U, lua.p - ll,ul,pl = @inferred lu(a) - @test ll * ul ≈ a[pl,:] - @test l*u ≈ a[p,:] - @test (l*u)[invperm(p),:] ≈ a - @test a * inv(lua) ≈ Matrix(I, n, n) - @test copy(lua) == lua - if eltya <: BlasFloat - # test conversion of LU factorization's numerical type - bft = eltya <: Real ? LinearAlgebra.LU{BigFloat} : LinearAlgebra.LU{Complex{BigFloat}} - bflua = convert(bft, lua) - @test bflua.L*bflua.U ≈ big.(a)[p,:] rtol=εa*norm(a) - @test Factorization{eltya}(lua) === lua - # test Factorization with different eltype - if eltya <: BlasReal - @test Array(Factorization{Float16}(lua)) ≈ Array(lu(convert(Matrix{Float16}, a))) - @test eltype(Factorization{Float16}(lua)) == Float16 - end - end - # compact printing - lstring = sprint(show,l) - ustring = sprint(show,u) - end - κd = cond(Array(d),1) - @testset "Tridiagonal LU" begin - lud = @inferred lu(d) - @test LinearAlgebra.issuccess(lud) - @test @inferred(lu(lud)) == lud - @test_throws FieldError lud.Z - @test lud.L*lud.U ≈ lud.P*Array(d) - @test lud.L*lud.U ≈ Array(d)[lud.p,:] - @test AbstractArray(lud) ≈ d - @test Array(lud) ≈ d - if eltya != Int - dlu = convert.(eltya, [1, 1]) - dia = convert.(eltya, [-2, -2, -2]) - tri = Tridiagonal(dlu, dia, dlu) - L = lu(tri) - @test lu!(tri) == L - @test UpperTriangular(tri) == L.U - end - end - @testset for eltyb in (Float32, Float64, ComplexF32, ComplexF64, Int) - b = eltyb == Int ? rand(1:5, n, 2) : - convert(Matrix{eltyb}, eltyb <: Complex ? complex.(breal, bimg) : breal) - c = eltyb == Int ? rand(1:5, n) : - convert(Vector{eltyb}, eltyb <: Complex ? complex.(creal, cimg) : creal) - εb = eps(abs(float(one(eltyb)))) - ε = max(εa,εb) - @testset "(Automatic) Square LU decomposition" begin - lua = factorize(a) - let Bs = copy(b), Cs = copy(c) - for (bb, cc) in ((Bs, Cs), (view(Bs, 1:n, 1), view(Cs, 1:n))) - @test norm(a*(lua\bb) - bb, 1) < ε*κ*n*2 # Two because the right hand side has two columns - @test norm(a'*(lua'\bb) - bb, 1) < ε*κ*n*2 # Two because the right hand side has two columns - @test norm(a'*(lua'\a') - a', 1) < ε*κ*n^2 - @test norm(a*(lua\cc) - cc, 1) < ε*κ*n # cc is a vector - @test norm(a'*(lua'\cc) - cc, 1) < ε*κ*n # cc is a vector - @test AbstractArray(lua) ≈ a - @test norm(transpose(a)*(transpose(lua)\bb) - bb,1) < ε*κ*n*2 # Two because the right hand side has two columns - @test norm(transpose(a)*(transpose(lua)\cc) - cc,1) < ε*κ*n - end - - # Test whether Ax_ldiv_B!(y, LU, x) indeed overwrites y - resultT = typeof(oneunit(eltyb) / oneunit(eltya)) - - b_dest = similar(b, resultT) - c_dest = similar(c, resultT) - - ldiv!(b_dest, lua, b) - ldiv!(c_dest, lua, c) - @test norm(b_dest - lua \ b, 1) < ε*κ*2n - @test norm(c_dest - lua \ c, 1) < ε*κ*n - - ldiv!(b_dest, transpose(lua), b) - ldiv!(c_dest, transpose(lua), c) - @test norm(b_dest - transpose(lua) \ b, 1) < ε*κ*2n - @test norm(c_dest - transpose(lua) \ c, 1) < ε*κ*n - - ldiv!(b_dest, adjoint(lua), b) - ldiv!(c_dest, adjoint(lua), c) - @test norm(b_dest - lua' \ b, 1) < ε*κ*2n - @test norm(c_dest - lua' \ c, 1) < ε*κ*n - - if eltyb != Int && !(eltya <: Complex) || eltya <: Complex && eltyb <: Complex - p = Matrix(b') - q = Matrix(c') - p_dest = copy(p) - q_dest = copy(q) - rdiv!(p_dest, lua) - rdiv!(q_dest, lua) - @test norm(p_dest - p / lua, 1) < ε*κ*2n - @test norm(q_dest - q / lua, 1) < ε*κ*n - end - end - if eltya <: BlasFloat && eltyb <: BlasFloat - e = rand(eltyb,n,n) - @test norm(e/lua - e/a,1) < ε*κ*n^2 - end - end - @testset "Tridiagonal LU" begin - lud = factorize(d) - f = zeros(eltyb, n+1) - @test_throws DimensionMismatch lud\f - @test_throws DimensionMismatch transpose(lud)\f - @test_throws DimensionMismatch lud'\f - @test_throws DimensionMismatch LinearAlgebra.ldiv!(transpose(lud), f) - let Bs = copy(b) - for bb in (Bs, view(Bs, 1:n, 1)) - @test norm(d*(lud\bb) - bb, 1) < ε*κd*n*2 # Two because the right hand side has two columns - if eltya <: Real - @test norm((transpose(lud)\bb) - Array(transpose(d))\bb, 1) < ε*κd*n*2 # Two because the right hand side has two columns - if eltya != Int && eltyb != Int - @test norm(LinearAlgebra.ldiv!(transpose(lud), copy(bb)) - Array(transpose(d))\bb, 1) < ε*κd*n*2 - end - end - if eltya <: Complex - dummy_factor = 2.5 - # TODO: Remove dummy_factor, this test started failing when the RNG stream changed - # so the factor was added. - @test norm((lud'\bb) - Array(d')\bb, 1) < ε*κd*n*2*dummy_factor # Two because the right hand side has two columns - end - end - end - if eltya <: BlasFloat && eltyb <: BlasFloat - e = rand(eltyb,n,n) - @test norm(e/lud - e/d,1) < ε*κ*n^2 - @test norm((transpose(lud)\e') - Array(transpose(d))\e',1) < ε*κd*n^2 - #test singular - du = rand(eltya,n-1) - dl = rand(eltya,n-1) - dd = rand(eltya,n) - dd[1] = zero(eltya) - du[1] = zero(eltya) - dl[1] = zero(eltya) - zT = Tridiagonal(dl,dd,du) - @test !LinearAlgebra.issuccess(lu(zT; check = false)) - end - end - @testset "Thin LU" begin - lua = @inferred lu(a[:,1:n1]) - @test lua.L*lua.U ≈ lua.P*a[:,1:n1] - end - @testset "Fat LU" begin - lua = @inferred lu(a[1:n1,:]) - @test lua.L*lua.U ≈ lua.P*a[1:n1,:] - end - end - - @testset "LU of Symmetric/Hermitian" begin - for HS in (Hermitian(a'a), Symmetric(a'a)) - luhs = @inferred lu(HS) - @test luhs.L*luhs.U ≈ luhs.P*Matrix(HS) - end - end - - @testset "Factorization of symtridiagonal dense matrix with zero ldlt-pivot (#38026)" begin - A = [0.0 -1.0 0.0 0.0 - -1.0 0.0 0.0 0.0 - 0.0 0.0 0.0 -1.0 - 0.0 0.0 -1.0 0.0] - F = factorize(A) - @test all((!isnan).(Matrix(F))) - end -end - -@testset "Small tridiagonal matrices" for T in (Float64, ComplexF64) - A = Tridiagonal(T[], T[1], T[]) - @test inv(A) == A -end - -@testset "Singular matrices" for T in (Float64, ComplexF64) - A = T[1 2; 0 0] - @test_throws SingularException lu(A) - @test_throws SingularException lu!(copy(A)) - @test_throws SingularException lu(A; check = true) - @test_throws SingularException lu!(copy(A); check = true) - @test !issuccess(lu(A; check = false)) - @test !issuccess(lu!(copy(A); check = false)) - @test_throws ZeroPivotException lu(A, NoPivot()) - @test_throws ZeroPivotException lu!(copy(A), NoPivot()) - @test_throws ZeroPivotException lu(A, NoPivot(); check = true) - @test_throws ZeroPivotException lu!(copy(A), NoPivot(); check = true) - @test !issuccess(lu(A, NoPivot(); check = false)) - @test !issuccess(lu!(copy(A), NoPivot(); check = false)) - F = lu(A, NoPivot(); check = false) - @test sprint((io, x) -> show(io, "text/plain", x), F) == - "Failed factorization of type $(typeof(F))" - F2 = lu(A; allowsingular = true) - @test !issuccess(F2) - @test issuccess(F2, allowsingular = true) - @test occursin("U factor (rank-deficient)", sprint((io, x) -> show(io, "text/plain", x), F2)) -end - -@testset "conversion" begin - Random.seed!(4) - a = Tridiagonal(rand(9),rand(10),rand(9)) - fa = Array(a) - falu = lu(fa) - alu = lu(a) - falu = convert(typeof(falu),alu) - @test Array(alu) == fa - @test AbstractArray(alu) == fa -end - -@testset "Rational Matrices" begin - ## Integrate in general tests when more linear algebra is implemented in julia - a = convert(Matrix{Rational{BigInt}}, rand(1:10//1,n,n))/n - b = rand(1:10,n,2) - @inferred lu(a) - lua = factorize(a) - l,u,p = lua.L, lua.U, lua.p - @test l*u ≈ a[p,:] - @test l[invperm(p),:]*u ≈ a - @test a*inv(lua) ≈ Matrix(I, n, n) - let Bs = b - for b in (Bs, view(Bs, 1:n, 1)) - @test a*(lua\b) ≈ b - end - end - @test @inferred(det(a)) ≈ det(Array{Float64}(a)) -end - -@testset "Rational{BigInt} and BigFloat Hilbert Matrix" begin - ## Hilbert Matrix (very ill conditioned) - ## Testing Rational{BigInt} and BigFloat version - nHilbert = 50 - H = Rational{BigInt}[1//(i+j-1) for i = 1:nHilbert,j = 1:nHilbert] - Hinv = Rational{BigInt}[(-1)^(i+j)*(i+j-1)*binomial(nHilbert+i-1,nHilbert-j)* - binomial(nHilbert+j-1,nHilbert-i)*binomial(i+j-2,i-1)^2 - for i = big(1):nHilbert,j=big(1):nHilbert] - @test inv(H) == Hinv - setprecision(2^10) do - @test norm(Array{Float64}(inv(float(H)) - float(Hinv))) < 1e-100 - end -end - -@testset "logdet" begin - @test @inferred(logdet(ComplexF32[1.0f0 0.5f0; 0.5f0 -1.0f0])) === 0.22314355f0 + 3.1415927f0im - @test_throws DomainError logdet([1 1; 1 -1]) -end - -@testset "REPL printing" begin - bf = IOBuffer() - show(bf, "text/plain", lu(Matrix(I, 4, 4))) - seekstart(bf) - @test String(take!(bf)) == """ -$(LinearAlgebra.LU){Float64, Matrix{Float64}, Vector{$Int}} -L factor: -4×4 Matrix{Float64}: - 1.0 0.0 0.0 0.0 - 0.0 1.0 0.0 0.0 - 0.0 0.0 1.0 0.0 - 0.0 0.0 0.0 1.0 -U factor: -4×4 Matrix{Float64}: - 1.0 0.0 0.0 0.0 - 0.0 1.0 0.0 0.0 - 0.0 0.0 1.0 0.0 - 0.0 0.0 0.0 1.0""" -end - -@testset "propertynames" begin - names = sort!(collect(string.(Base.propertynames(lu(rand(3,3)))))) - @test names == ["L", "P", "U", "p"] - allnames = sort!(collect(string.(Base.propertynames(lu(rand(3,3)), true)))) - @test allnames == ["L", "P", "U", "factors", "info", "ipiv", "p"] -end - -include("trickyarithmetic.jl") - -@testset "lu with type whose sum is another type" begin - A = TrickyArithmetic.A[1 2; 3 4] - ElT = TrickyArithmetic.D{TrickyArithmetic.C,TrickyArithmetic.C} - B = lu(A, NoPivot()) - @test B isa LinearAlgebra.LU{ElT,Matrix{ElT}} -end - -# dimensional correctness: -const BASE_TEST_PATH = joinpath(Sys.BINDIR, "..", "share", "julia", "test") -isdefined(Main, :Furlongs) || @eval Main include(joinpath($(BASE_TEST_PATH), "testhelpers", "Furlongs.jl")) -using .Main.Furlongs - -@testset "lu factorization with dimension type" begin - n = 4 - A = Matrix(Furlong(1.0) * I, n, n) - F = lu(A).factors - @test Diagonal(F) == Diagonal(A) - # upper triangular part has a unit Furlong{1} - @test all(x -> typeof(x) == Furlong{1, Float64}, F[i,j] for j=1:n for i=1:j) - # lower triangular part is unitless Furlong{0} - @test all(x -> typeof(x) == Furlong{0, Float64}, F[i,j] for j=1:n for i=j+1:n) -end - -@testset "Issue #30917. Determinant of integer matrix" begin - @test det([1 1 0 0 1 0 0 0 - 1 0 1 0 0 1 0 0 - 1 0 0 1 0 0 1 0 - 0 1 1 1 0 0 0 0 - 0 1 0 0 0 0 1 1 - 0 0 1 0 1 0 0 1 - 0 0 0 1 1 1 0 0 - 0 0 0 0 1 1 0 1]) ≈ 6 -end - -@testset "Issue #33177. No ldiv!(LU, Adjoint)" begin - A = [1 0; 1 1] - B = [1 2; 2 8] - F = lu(B) - @test (A / F') * B == A - @test (A' / F') * B == A' - - a = complex.(randn(2), randn(2)) - @test (a' / F') * B ≈ a' - @test (transpose(a) / F') * B ≈ transpose(a) - - A = complex.(randn(2, 2), randn(2, 2)) - @test (A' / F') * B ≈ A' - @test (transpose(A) / F') * B ≈ transpose(A) -end - -@testset "0x0 matrix" begin - A = ones(0, 0) - F = lu(A) - @test F.U == ones(0, 0) - @test F.L == ones(0, 0) - @test F.P == ones(0, 0) - @test F.p == [] -end - -@testset "more rdiv! methods" begin - for elty in (Float16, Float64, ComplexF64), transform in (transpose, adjoint) - A = randn(elty, 5, 5) - C = copy(A) - B = randn(elty, 5, 5) - @test rdiv!(transform(A), transform(lu(B))) ≈ transform(C) / transform(B) - end - for elty in (Float32, Float64, ComplexF64), transF in (identity, transpose), - transB in (transpose, adjoint), transT in (identity, complex) - A = randn(elty, 5, 5) - F = lu(A) - b = randn(transT(elty), 5) - @test rdiv!(transB(copy(b)), transF(F)) ≈ transB(b) / transF(F) ≈ transB(b) / transF(A) - B = randn(transT(elty), 5, 5) - @test rdiv!(copy(B), transF(F)) ≈ B / transF(F) ≈ B / transF(A) - end -end - -@testset "transpose(A) / lu(B)' should not overwrite A (#36657)" begin - for elty in (Float16, Float64, ComplexF64) - A = randn(elty, 5, 5) - B = randn(elty, 5, 5) - C = copy(A) - a = randn(elty, 5) - c = copy(a) - @test transpose(A) / lu(B)' ≈ transpose(A) / B' - @test transpose(a) / lu(B)' ≈ transpose(a) / B' - @test A == C - @test a == c - end -end - -@testset "lu on *diagonal matrices" begin - dl = rand(3) - d = rand(4) - Bl = Bidiagonal(d, dl, :L) - Bu = Bidiagonal(d, dl, :U) - Tri = Tridiagonal(dl, d, dl) - Sym = SymTridiagonal(d, dl) - D = Diagonal(d) - b = ones(4) - B = rand(4,4) - for A in (Bl, Bu, Tri, Sym, D), pivot in (NoPivot(), RowMaximum()) - @test A\b ≈ lu(A, pivot)\b - @test B/A ≈ B/lu(A, pivot) - @test B/A ≈ B/Matrix(A) - @test Matrix(lu(A, pivot)) ≈ A - @test @inferred(lu(A)) isa LU - if A isa Union{Bidiagonal, Diagonal, Tridiagonal, SymTridiagonal} - @test lu(A) isa LU{Float64, Tridiagonal{Float64, Vector{Float64}}} - @test lu(A, pivot) isa LU{Float64, Tridiagonal{Float64, Vector{Float64}}} - @test lu(A, pivot; check = false) isa LU{Float64, Tridiagonal{Float64, Vector{Float64}}} - end - end -end - -@testset "can push to vector after 3-arg ldiv! (#43507)" begin - u = rand(3) - A = rand(3,3) - b = rand(3) - ldiv!(u,lu(A),b) - push!(b,4.0) - @test length(b) == 4 -end - -@testset "NaN matrix should throw error" begin - for eltya in (NaN16, NaN32, NaN64, BigFloat(NaN)) - r = fill(eltya, 2, 3) - c = fill(complex(eltya, eltya), 2, 3) - @test_throws ArgumentError lu(r) - @test_throws ArgumentError lu(c) - end -end - -@testset "more generic ldiv! #35419" begin - A = rand(3, 3) - b = rand(3) - @test A * ldiv!(lu(A), Base.ReshapedArray(copy(b)', (3,), ())) ≈ b -end - -@testset "generic lu!" begin - A = rand(3,3); B = deepcopy(A); C = A[2:3,2:3] - Asub1 = @view(A[2:3,2:3]) - F1 = lu!(Asub1) - Asub2 = @view(B[[2,3],[2,3]]) - F2 = lu!(Asub2) - @test Matrix(F1) ≈ Matrix(F2) ≈ C -end - -@testset "matrix with Nonfinite" begin - lu(fill(NaN, 2, 2), check=false) - lu(fill(Inf, 2, 2), check=false) - LinearAlgebra.generic_lufact!(fill(NaN, 2, 2), check=false) - LinearAlgebra.generic_lufact!(fill(Inf, 2, 2), check=false) -end - -@testset "lu for empty matrices" begin - for T in (Float64, BigFloat) - A = fill(T(0.0), 0, 0) - v = fill(T(1.0), 0, 10) - @test A \ v ≈ lu(A) \ v - vt = permutedims(v) - @test vt / A ≈ vt / lu(A) - B = UpperTriangular(transpose(fill(complex(T(0.0)), 0, 0)')) - @test B \ v ≈ v - @test vt / B ≈ vt - end -end - -end # module TestLU diff --git a/stdlib/LinearAlgebra/test/matmul.jl b/stdlib/LinearAlgebra/test/matmul.jl deleted file mode 100644 index 1294e97c2a30c..0000000000000 --- a/stdlib/LinearAlgebra/test/matmul.jl +++ /dev/null @@ -1,1151 +0,0 @@ -# This file is a part of Julia. License is MIT: https://julialang.org/license - -module TestMatmul - -using Base: rtoldefault -using Test, LinearAlgebra, Random -using LinearAlgebra: mul!, Symmetric, Hermitian - -## Test Julia fallbacks to BLAS routines - -mul_wrappers = [ - m -> m, - m -> Symmetric(m, :U), - m -> Symmetric(m, :L), - m -> Hermitian(m, :U), - m -> Hermitian(m, :L), - m -> adjoint(m), - m -> transpose(m)] - -@testset "wrap" begin - f(A) = LinearAlgebra.wrap(A, 'N') - A = ones(1,1) - @test @inferred(f(A)) === A - g(A) = LinearAlgebra.wrap(A, 'T') - @test @inferred(g(A)) === transpose(A) - # https://github.com/JuliaLang/julia/issues/52202 - @test Base.infer_return_type((Vector{Float64},)) do v - LinearAlgebra.wrap(v, 'N') - end == Vector{Float64} - h(A) = LinearAlgebra.wrap(LinearAlgebra._unwrap(A), LinearAlgebra.wrapper_char(A)) - @test @inferred(h(transpose(A))) === transpose(A) - @test @inferred(h(adjoint(A))) === transpose(A) - - M = rand(2,2) - for S in (Symmetric(M), Hermitian(M)) - @test @inferred((A -> LinearAlgebra.wrap(parent(A), LinearAlgebra.wrapper_char(A)))(S)) === Symmetric(M) - end - M = rand(ComplexF64,2,2) - for S in (Symmetric(M), Hermitian(M)) - @test @inferred((A -> LinearAlgebra.wrap(parent(A), LinearAlgebra.wrapper_char(A)))(S)) === S - end - - @testset "WrapperChar" begin - @test LinearAlgebra.WrapperChar('c') == 'c' - @test LinearAlgebra.WrapperChar('C') == 'C' - @testset "constant propagation in uppercase/lowercase" begin - v = @inferred (() -> Val(uppercase(LinearAlgebra.WrapperChar('C'))))() - @test v isa Val{'C'} - v = @inferred (() -> Val(uppercase(LinearAlgebra.WrapperChar('s'))))() - @test v isa Val{'S'} - v = @inferred (() -> Val(lowercase(LinearAlgebra.WrapperChar('C'))))() - @test v isa Val{'c'} - v = @inferred (() -> Val(lowercase(LinearAlgebra.WrapperChar('s'))))() - @test v isa Val{'s'} - end - end -end - -@testset "matrices with zero dimensions" begin - for (dimsA, dimsB, dimsC) in ( - ((0, 5), (5, 3), (0, 3)), - ((3, 5), (5, 0), (3, 0)), - ((3, 0), (0, 4), (3, 4)), - ((0, 5), (5, 0), (0, 0)), - ((0, 0), (0, 4), (0, 4)), - ((3, 0), (0, 0), (3, 0)), - ((0, 0), (0, 0), (0, 0))) - @test Matrix{Float64}(undef, dimsA) * Matrix{Float64}(undef, dimsB) == zeros(dimsC) - end - @test Matrix{Float64}(undef, 5, 0) |> t -> t't == zeros(0, 0) - @test Matrix{Float64}(undef, 5, 0) |> t -> t * t' == zeros(5, 5) - @test Matrix{ComplexF64}(undef, 5, 0) |> t -> t't == zeros(0, 0) - @test Matrix{ComplexF64}(undef, 5, 0) |> t -> t * t' == zeros(5, 5) -end -@testset "2x2 matmul" begin - AA = [1 2; 3 4] - BB = [5 6; 7 8] - AAi = AA + (0.5 * im) .* BB - BBi = BB + (2.5 * im) .* AA[[2, 1], [2, 1]] - for A in (copy(AA), view(AA, 1:2, 1:2)), B in (copy(BB), view(BB, 1:2, 1:2)) - @test A * B == [19 22; 43 50] - @test *(transpose(A), B) == [26 30; 38 44] - @test *(A, transpose(B)) == [17 23; 39 53] - @test *(transpose(A), transpose(B)) == [23 31; 34 46] - end - for Ai in (copy(AAi), view(AAi, 1:2, 1:2)), Bi in (copy(BBi), view(BBi, 1:2, 1:2)) - @test Ai * Bi == [-21+53.5im -4.25+51.5im; -12+95.5im 13.75+85.5im] - @test *(adjoint(Ai), Bi) == [68.5-12im 57.5-28im; 88-3im 76.5-25im] - @test *(Ai, adjoint(Bi)) == [64.5+5.5im 43+31.5im; 104-18.5im 80.5+31.5im] - @test *(adjoint(Ai), adjoint(Bi)) == [-28.25-66im 9.75-58im; -26-89im 21-73im] - @test_throws DimensionMismatch [1 2; 0 0; 0 0] * [1 2] - end - for wrapper_a in mul_wrappers, wrapper_b in mul_wrappers - @test wrapper_a(AA) * wrapper_b(BB) == Array(wrapper_a(AA)) * Array(wrapper_b(BB)) - end - @test_throws DimensionMismatch mul!(Matrix{Float64}(undef, 3, 3), AA, BB) -end -@testset "3x3 matmul" begin - AA = [1 2 3; 4 5 6; 7 8 9] .- 5 - BB = [1 0 5; 6 -10 3; 2 -4 -1] - AAi = AA + (0.5 * im) .* BB - BBi = BB + (2.5 * im) .* AA[[2, 1, 3], [2, 3, 1]] - for A in (copy(AA), view(AA, 1:3, 1:3)), B in (copy(BB), view(BB, 1:3, 1:3)) - @test A * B == [-26 38 -27; 1 -4 -6; 28 -46 15] - @test *(adjoint(A), B) == [-6 2 -25; 3 -12 -18; 12 -26 -11] - @test *(A, adjoint(B)) == [-14 0 6; 4 -3 -3; 22 -6 -12] - @test *(adjoint(A), adjoint(B)) == [6 -8 -6; 12 -9 -9; 18 -10 -12] - end - for Ai in (copy(AAi), view(AAi, 1:3, 1:3)), Bi in (copy(BBi), view(BBi, 1:3, 1:3)) - @test Ai * Bi == [-44.75+13im 11.75-25im -38.25+30im; -47.75-16.5im -51.5+51.5im -56+6im; 16.75-4.5im -53.5+52im -15.5im] - @test *(adjoint(Ai), Bi) == [-21+2im -1.75+49im -51.25+19.5im; 25.5+56.5im -7-35.5im 22+35.5im; -3+12im -32.25+43im -34.75-2.5im] - @test *(Ai, adjoint(Bi)) == [-20.25+15.5im -28.75-54.5im 22.25+68.5im; -12.25+13im -15.5+75im -23+27im; 18.25+im 1.5+94.5im -27-54.5im] - @test *(adjoint(Ai), adjoint(Bi)) == [1+2im 20.75+9im -44.75+42im; 19.5+17.5im -54-36.5im 51-14.5im; 13+7.5im 11.25+31.5im -43.25-14.5im] - @test_throws DimensionMismatch [1 2 3; 0 0 0; 0 0 0] * [1 2 3] - end - for wrapper_a in mul_wrappers, wrapper_b in mul_wrappers - @test wrapper_a(AA) * wrapper_b(BB) == Array(wrapper_a(AA)) * Array(wrapper_b(BB)) - end - @test_throws DimensionMismatch mul!(Matrix{Float64}(undef, 4, 4), AA, BB) -end - -# Generic AbstractArrays -module MyArray15367 -using Test, Random - -struct MyArray{T,N} <: AbstractArray{T,N} - data::Array{T,N} -end - -Base.size(A::MyArray) = size(A.data) -Base.getindex(A::MyArray, indices...) = A.data[indices...] - -A = MyArray(rand(4, 5)) -b = rand(5) -@test A * b ≈ A.data * b -end - -@testset "Generic integer matrix multiplication" begin - AA = [1 2 3; 4 5 6] .- 3 - BB = [2 -2; 3 -5; -4 7] - for A in (copy(AA), view(AA, 1:2, 1:3)), B in (copy(BB), view(BB, 1:3, 1:2)) - @test A * B == [-7 9; -4 9] - @test *(transpose(A), transpose(B)) == [-6 -11 15; -6 -13 18; -6 -15 21] - end - AA = fill(1, 2, 100) - BB = fill(1, 100, 3) - for A in (copy(AA), view(AA, 1:2, 1:100)), B in (copy(BB), view(BB, 1:100, 1:3)) - @test A * B == [100 100 100; 100 100 100] - end - AA = rand(1:20, 5, 5) .- 10 - BB = rand(1:20, 5, 5) .- 10 - CC = Matrix{Int}(undef, size(AA, 1), size(BB, 2)) - for A in (copy(AA), view(AA, 1:5, 1:5)), B in (copy(BB), view(BB, 1:5, 1:5)), C in (copy(CC), view(CC, 1:5, 1:5)) - @test *(transpose(A), B) == A' * B - @test *(A, transpose(B)) == A * B' - # Preallocated - @test mul!(C, A, B) == A * B - @test mul!(C, transpose(A), B) == A' * B - @test mul!(C, A, transpose(B)) == A * B' - @test mul!(C, transpose(A), transpose(B)) == A' * B' - @test mul!(C, adjoint(A), transpose(B)) == A' * transpose(B) - - # Inplace multiply-add - α = rand(-10:10) - β = rand(-10:10) - rand!(C, -10:10) - βC = β * C - _C0 = copy(C) - C0() = (C .= _C0; C) # reset C but don't change the container type - @test mul!(C0(), A, B, α, β) == α * A * B .+ βC - @test mul!(C0(), transpose(A), B, α, β) == α * A' * B .+ βC - @test mul!(C0(), A, transpose(B), α, β) == α * A * B' .+ βC - @test mul!(C0(), transpose(A), transpose(B), α, β) == α * A' * B' .+ βC - @test mul!(C0(), adjoint(A), transpose(B), α, β) == α * A' * transpose(B) .+ βC - - #test DimensionMismatch for generic_matmatmul - @test_throws DimensionMismatch mul!(C, adjoint(A), transpose(fill(1, 4, 4))) - @test_throws DimensionMismatch mul!(C, adjoint(fill(1, 4, 4)), transpose(B)) - end - vv = [1, 2] - CC = Matrix{Int}(undef, 2, 2) - for v in (copy(vv), view(vv, 1:2)), C in (copy(CC), view(CC, 1:2, 1:2)) - @test @inferred(mul!(C, v, adjoint(v))) == [1 2; 2 4] - - C .= [1 0; 0 1] - @test @inferred(mul!(C, v, adjoint(v), 2, 3)) == [5 4; 4 11] - end -end - -@testset "generic_matvecmul" begin - AA = rand(5, 5) - BB = rand(5) - for A in (copy(AA), view(AA, 1:5, 1:5)), B in (copy(BB), view(BB, 1:5)) - @test_throws DimensionMismatch LinearAlgebra.generic_matvecmul!(zeros(6), 'N', A, B) - @test_throws DimensionMismatch LinearAlgebra.generic_matvecmul!(B, 'N', A, zeros(6)) - end - vv = [1, 2, 3] - CC = Matrix{Int}(undef, 3, 3) - for v in (copy(vv), view(vv, 1:3)), C in (copy(CC), view(CC, 1:3, 1:3)) - @test mul!(C, v, transpose(v)) == v * v' - C .= C0 = rand(-10:10, size(C)) - @test mul!(C, v, transpose(v), 2, 3) == 2v * v' .+ 3C0 - end - vvf = map(Float64, vv) - CC = Matrix{Float64}(undef, 3, 3) - for vf in (copy(vvf), view(vvf, 1:3)), C in (copy(CC), view(CC, 1:3, 1:3)) - @test mul!(C, vf, transpose(vf)) == vf * vf' - C .= C0 = rand(eltype(C), size(C)) - @test mul!(C, vf, transpose(vf), 2, 3) ≈ 2vf * vf' .+ 3C0 - end - - @testset "zero stride" begin - for AAv in (view(AA, StepRangeLen(2,0,size(AA,1)), :), - view(AA, StepRangeLen.(2,0,size(AA))...), - view(complex.(AA, AA), StepRangeLen.(2,0,size(AA))...),) - for BB2 in (BB, complex.(BB, BB)) - C = AAv * BB2 - @test allequal(C) - @test C ≈ Array(AAv) * BB2 - end - end - end -end - -@testset "generic_matvecmul for vectors of vectors" begin - @testset "matrix of scalars" begin - u = [[1, 2], [3, 4]] - A = [1 2; 3 4] - v = [[0, 0], [0, 0]] - Au = [[7, 10], [15, 22]] - @test A * u == Au - mul!(v, A, u) - @test v == Au - mul!(v, A, u, 2, -1) - @test v == Au - end - - @testset "matrix of matrices" begin - u = [[1, 2], [3, 4]] - A = Matrix{Matrix{Int}}(undef, 2, 2) - A[1, 1] = [1 2; 3 4] - A[1, 2] = [5 6; 7 8] - A[2, 1] = [9 10; 11 12] - A[2, 2] = [13 14; 15 16] - v = [[0, 0], [0, 0]] - Au = [[44, 64], [124, 144]] - @test A * u == Au - mul!(v, A, u) - @test v == Au - mul!(v, A, u, 2, -1) - @test v == Au - end -end - -@testset "generic_matvecmul for vectors of matrices" begin - x = [1 2 3; 4 5 6] - A = reshape([x,2x,3x,4x],2,2) - b = [x, 2x] - for f in (adjoint, transpose) - c = f(A) * b - for i in eachindex(c) - @test c[i] == sum(f(A)[i, j] * b[j] for j in eachindex(b)) - end - end -end - -@testset "generic_matmatmul for matrices of vectors" begin - B = Matrix{Vector{Int}}(undef, 2, 2) - B[1, 1] = [1, 2] - B[2, 1] = [3, 4] - B[1, 2] = [5, 6] - B[2, 2] = [7, 8] - A = [1 2; 3 4] - C = Matrix{Vector{Int}}(undef, 2, 2) - AB = Matrix{Vector{Int}}(undef, 2, 2) - AB[1, 1] = [7, 10] - AB[2, 1] = [15, 22] - AB[1, 2] = [19, 22] - AB[2, 2] = [43, 50] - @test A * B == AB - mul!(C, A, B) - @test C == AB - mul!(C, A, B, 2, -1) - @test C == AB - LinearAlgebra.generic_matmatmul!(C, 'N', 'N', A, B, LinearAlgebra.MulAddMul(2, -1)) - @test C == AB -end - -@testset "fallbacks & such for BlasFloats" begin - AA = rand(Float64, 6, 6) - BB = rand(Float64, 6, 6) - CC = zeros(Float64, 6, 6) - for A in (copy(AA), view(AA, 1:6, 1:6)), B in (copy(BB), view(BB, 1:6, 1:6)), C in (copy(CC), view(CC, 1:6, 1:6)) - @test mul!(C, transpose(A), transpose(B)) == transpose(A) * transpose(B) - @test mul!(C, A, adjoint(B)) == A * transpose(B) - @test mul!(C, adjoint(A), B) == transpose(A) * B - - # Inplace multiply-add - α = rand(Float64) - β = rand(Float64) - rand!(C) - βC = β * C - _C0 = copy(C) - C0() = (C .= _C0; C) # reset C but don't change the container type - @test mul!(C0(), transpose(A), transpose(B), α, β) ≈ α * transpose(A) * transpose(B) .+ βC - @test mul!(C0(), A, adjoint(B), α, β) ≈ α * A * transpose(B) .+ βC - @test mul!(C0(), adjoint(A), B, α, β) ≈ α * transpose(A) * B .+ βC - end -end - -@testset "allocations in BLAS-mul" begin - for n in (2, 3, 6) - A = rand(Float64, n, n) - B = rand(Float64, n, n) - C = zeros(Float64, n, n) - # gemm - for t in (identity, adjoint, transpose) - At = t(A) - Bt = t(B) - mul!(C, At, B) - @test 0 == @allocations mul!(C, At, B) - mul!(C, A, Bt) - @test 0 == @allocations mul!(C, A, Bt) - mul!(C, At, Bt) - @test 0 == @allocations mul!(C, At, Bt) - end - # syrk/herk - @test 0 == @allocations mul!(C, transpose(A), A) - @test 0 == @allocations mul!(C, adjoint(A), A) - @test 0 == @allocations mul!(C, A, transpose(A)) - @test 0 == @allocations mul!(C, A, adjoint(A)) - # complex times real - Cc = complex(C) - Ac = complex(A) - for t in (identity, adjoint, transpose) - Bt = t(B) - @test 0 == @allocations mul!(Cc, Ac, Bt) - end - end -end - -@testset "mixed Blas-non-Blas matmul" begin - AA = rand(-10:10, 6, 6) - BB = ones(Float64, 6, 6) - CC = zeros(Float64, 6, 6) - for A in (copy(AA), view(AA, 1:6, 1:6)), B in (copy(BB), view(BB, 1:6, 1:6)), C in (copy(CC), view(CC, 1:6, 1:6)) - @test mul!(C, A, B) == A * B - @test mul!(C, transpose(A), transpose(B)) == transpose(A) * transpose(B) - @test mul!(C, A, adjoint(B)) == A * transpose(B) - @test mul!(C, adjoint(A), B) == transpose(A) * B - end -end - -@testset "allocations in mixed Blas-non-Blas matmul" begin - for n in (2, 3, 6) - A = rand(-10:10, n, n) - B = ones(Float64, n, n) - C = zeros(Float64, n, n) - @test 0 == @allocations mul!(C, A, B) - @test 0 == @allocations mul!(C, A, transpose(B)) - @test 0 == @allocations mul!(C, adjoint(A), B) - end -end - -@testset "matrix algebra with subarrays of floats (stride != 1)" begin - A = reshape(map(Float64, 1:20), 5, 4) - Aref = A[1:2:end, 1:2:end] - Asub = view(A, 1:2:5, 1:2:4) - b = [1.2, -2.5] - @test (Aref * b) == (Asub * b) - @test *(transpose(Asub), Asub) == *(transpose(Aref), Aref) - @test *(Asub, transpose(Asub)) == *(Aref, transpose(Aref)) - Ai = A .+ im - Aref = Ai[1:2:end, 1:2:end] - Asub = view(Ai, 1:2:5, 1:2:4) - @test *(adjoint(Asub), Asub) == *(adjoint(Aref), Aref) - @test *(Asub, adjoint(Asub)) == *(Aref, adjoint(Aref)) -end - -@testset "matrix x matrix with negative stride" begin - M = reshape(map(Float64, 1:77), 7, 11) - N = reshape(map(Float64, 1:63), 9, 7) - U = view(M, 7:-1:1, 11:-2:1) - V = view(N, 7:-1:2, 7:-1:1) - @test U * V ≈ Matrix(U) * Matrix(V) -end - -@testset "dot product of subarrays of vectors (floats, negative stride, issue #37767)" begin - for T in (Float32, Float64, ComplexF32, ComplexF64) - a = Vector{T}(3:2:7) - b = Vector{T}(1:10) - v = view(b, 7:-2:3) - @test dot(a, Vector(v)) ≈ 67.0 - @test dot(a, v) ≈ 67.0 - @test dot(v, a) ≈ 67.0 - @test dot(Vector(v), Vector(v)) ≈ 83.0 - @test dot(v, v) ≈ 83.0 - end -end - -@testset "dot product of stride-vector like input" begin - for T in (Float32, Float64, ComplexF32, ComplexF64) - a = randn(T, 10) - b = view(a, 1:10) - c = reshape(b, 5, 2) - d = view(c, :, 1:2) - r = sum(abs2, a) - for x in (a,b,c,d), y in (a,b,c,d) - @test dot(x, y) ≈ r - end - end -end - -@testset "Complex matrix x real MatOrVec etc (issue #29224)" for T in (Float32, Float64) - A0 = randn(complex(T), 10, 10) - B0 = randn(T, 10, 10) - @testset "Combination Mat{$(complex(T))} Mat{$T}" for Bax1 in (1:5, 2:2:10), Bax2 in (1:5, 2:2:10) - B = view(A0, Bax1, Bax2) - tB = transpose(B) - Bd, tBd = copy(B), copy(tB) - for Aax1 in (1:5, 2:2:10, (:)), Aax2 in (1:5, 2:2:10) - A = view(A0, Aax1, Aax2) - AB_correct = copy(A) * Bd - AtB_correct = copy(A) * tBd - @test A*Bd ≈ AB_correct # view times matrix - @test A*B ≈ AB_correct # view times view - @test A*tBd ≈ AtB_correct # view times transposed matrix - @test A*tB ≈ AtB_correct # view times transposed view - end - end - x = randn(T, 10) - y0 = similar(A0, 20) - @testset "Combination Mat{$(complex(T))} Vec{$T}" for Aax1 in (1:5, 2:2:10, (:)), Aax2 in (1:5, 2:2:10) - A = view(A0, Aax1, Aax2) - Ad = copy(A) - for indx in (1:5, 1:2:10, 6:-1:2) - vx = view(x, indx) - dx = x[indx] - Ax_correct = Ad*dx - @test A*vx ≈ A*dx ≈ Ad*vx ≈ Ax_correct # view/matrix times view/vector - for indy in (1:2:2size(A,1), size(A,1):-1:1) - y = view(y0, indy) - @test mul!(y, A, vx) ≈ mul!(y, A, dx) ≈ mul!(y, Ad, vx) ≈ - mul!(y, Ad, dx) ≈ Ax_correct # test for uncontiguous dest - end - end - end -end - -@testset "real matrix x complex vec" begin - _matmulres(M, v) = [mapreduce(*, +, row, v) for row in eachrow(M)] - testmatmul(M, v) = @test M * v ≈ _matmulres(M, v) - - @testset for T in (Float32, Float64), n = (4, 5) - M1 = reshape(Vector{T}(1:n^2), n, n) - M2 = reinterpret(reshape, T, [Tuple(T(i + j) for j in 1:n) for i in 1:n]) - v = convert(Vector{Complex{T}}, (1:n) .+ im .* (4 .+ (1:n))) - - for M in (M1, M2) - M_view_cont = @view M[:, :] - v_view_cont = @view v[:] - for _M in (M, M_view_cont), _v in (v, v_view_cont) - testmatmul(_M, _v) - end - - # construct a view with strides(M, 1) == 1 and strides(M, 2) != 1 - ax_noncont = 1:2:n - n1 = length(ax_noncont) - M_view_noncont = @view M[1:n1, ax_noncont] - v_view_noncont = @view v[ax_noncont] - testmatmul(M_view_noncont, v_view_noncont) - - @testset for op in (transpose, adjoint) - for _M in (M, M_view_cont), _v in (v, v_view_cont) - _M2 = op(_M) - testmatmul(_M2, _v) - end - _M2 = op(M_view_noncont) - testmatmul(_M2, v_view_noncont) - end - end - end -end - -@testset "matrix x vector with negative lda or 0 stride" for T in (Float32, Float64) - for TA in (T, complex(T)), TB in (T, complex(T)) - A = view(randn(TA, 10, 10), 1:10, 10:-1:1) # negative lda - v = view([randn(TB)], 1 .+ 0(1:10)) # 0 stride - Ad, vd = copy(A), copy(v) - @test Ad * vd ≈ A * vd ≈ Ad * v ≈ A * v - end -end - -@testset "issue #15286" begin - A = reshape(map(Float64, 1:20), 5, 4) - C = zeros(8, 8) - sC = view(C, 1:2:8, 1:2:8) - B = reshape(map(Float64, -9:10), 5, 4) - @test mul!(sC, transpose(A), A) == A' * A - @test mul!(sC, transpose(A), B) == A' * B - - Aim = A .- im - C = zeros(ComplexF64, 8, 8) - sC = view(C, 1:2:8, 1:2:8) - B = reshape(map(Float64, -9:10), 5, 4) .+ im - @test mul!(sC, adjoint(Aim), Aim) == Aim' * Aim - @test mul!(sC, adjoint(Aim), B) == Aim' * B -end - -@testset "syrk & herk" begin - AA = reshape(1:1503, 501, 3) .- 750.0 - res = Float64[135228751 9979252 -115270247; 9979252 10481254 10983256; -115270247 10983256 137236759] - for A in (copy(AA), view(AA, 1:501, 1:3)) - @test *(transpose(A), A) == res - @test *(adjoint(A), transpose(copy(A'))) == res - end - cutoff = 501 - A = reshape(1:6*cutoff, 2 * cutoff, 3) .- (6 * cutoff) / 2 - Asub = view(A, 1:2:2*cutoff, 1:3) - Aref = A[1:2:2*cutoff, 1:3] - @test *(transpose(Asub), Asub) == *(transpose(Aref), Aref) - Ai = A .- im - Asub = view(Ai, 1:2:2*cutoff, 1:3) - Aref = Ai[1:2:2*cutoff, 1:3] - @test *(adjoint(Asub), Asub) == *(adjoint(Aref), Aref) - - A5x5, A6x5 = Matrix{Float64}.(undef, ((5, 5), (6, 5))) - @test_throws DimensionMismatch LinearAlgebra.syrk_wrapper!(A5x5, 'N', A6x5) - @test_throws DimensionMismatch LinearAlgebra.herk_wrapper!(A5x5, 'N', A6x5) -end - -@testset "matmul for types w/o sizeof (issue #1282)" begin - AA = fill(complex(1, 1), 10, 10) - for A in (copy(AA), view(AA, 1:10, 1:10)) - A2 = A^2 - @test A2[1, 1] == 20im - end -end - -@testset "mul! (scaling)" begin - A5x5, b5, C5x6 = Array{Float64}.(undef, ((5, 5), 5, (5, 6))) - for A in (A5x5, view(A5x5, :, :)), b in (b5, view(b5, :)), C in (C5x6, view(C5x6, :, :)) - @test_throws DimensionMismatch mul!(A, Diagonal(b), C) - end -end - -@testset "muladd" begin - A23 = reshape(1:6, 2, 3) .+ 0 - B34 = reshape(1:12, 3, 4) .+ im - u2 = [10, 20] - v3 = [3, 5, 7] .+ im - w4 = [11, 13, 17, 19im] - - @testset "matrix-matrix" begin - @test muladd(A23, B34, 0) == A23 * B34 - @test muladd(A23, B34, 100) == A23 * B34 .+ 100 - @test muladd(A23, B34, u2) == A23 * B34 .+ u2 - @test muladd(A23, B34, w4') == A23 * B34 .+ w4' - @test_throws DimensionMismatch muladd(B34, A23, 1) - @test muladd(ones(1, 3), ones(3, 4), ones(1, 4)) == fill(4.0, 1, 4) - @test_throws DimensionMismatch muladd(ones(1, 3), ones(3, 4), ones(9, 4)) - - # broadcasting fallback method allows trailing dims - @test muladd(A23, B34, ones(2, 4, 1)) == A23 * B34 + ones(2, 4, 1) - @test_throws DimensionMismatch muladd(ones(1, 3), ones(3, 4), ones(9, 4, 1)) - @test_throws DimensionMismatch muladd(ones(1, 3), ones(3, 4), ones(1, 4, 9)) - # and catches z::Array{T,0} - @test muladd(A23, B34, fill(0)) == A23 * B34 - end - @testset "matrix-vector" begin - @test muladd(A23, v3, 0) == A23 * v3 - @test muladd(A23, v3, 100) == A23 * v3 .+ 100 - @test muladd(A23, v3, u2) == A23 * v3 .+ u2 - @test muladd(A23, v3, im) isa Vector{Complex{Int}} - @test muladd(ones(1, 3), ones(3), ones(1)) == [4] - @test_throws DimensionMismatch muladd(ones(1, 3), ones(3), ones(7)) - - # fallback - @test muladd(A23, v3, ones(2, 1, 1)) == A23 * v3 + ones(2, 1, 1) - @test_throws DimensionMismatch muladd(A23, v3, ones(2, 2)) - @test_throws DimensionMismatch muladd(ones(1, 3), ones(3), ones(7, 1)) - @test_throws DimensionMismatch muladd(ones(1, 3), ones(3), ones(1, 7)) - @test muladd(A23, v3, fill(0)) == A23 * v3 - end - @testset "adjoint-matrix" begin - @test muladd(v3', B34, 0) isa Adjoint - @test muladd(v3', B34, 2im) == v3' * B34 .+ 2im - @test muladd(v3', B34, w4') == v3' * B34 .+ w4' - - # via fallback - @test muladd(v3', B34, ones(1, 4)) == (B34' * v3 + ones(4, 1))' - @test_throws DimensionMismatch muladd(v3', B34, ones(7, 4)) - @test_throws DimensionMismatch muladd(v3', B34, ones(1, 4, 7)) - @test muladd(v3', B34, fill(0)) == v3' * B34 # does not make an Adjoint - end - @testset "vector-adjoint" begin - @test muladd(u2, v3', 0) isa Matrix - @test muladd(u2, v3', 99) == u2 * v3' .+ 99 - @test muladd(u2, v3', A23) == u2 * v3' .+ A23 - - @test muladd(u2, v3', ones(2, 3, 1)) == u2 * v3' + ones(2, 3, 1) - @test_throws DimensionMismatch muladd(u2, v3', ones(2, 3, 4)) - @test_throws DimensionMismatch muladd([1], v3', ones(7, 3)) - @test muladd(u2, v3', fill(0)) == u2 * v3' - end - @testset "dot" begin # all use muladd(::Any, ::Any, ::Any) - @test muladd(u2', u2, 0) isa Number - @test muladd(v3', v3, im) == dot(v3, v3) + im - @test muladd(u2', u2, [1]) == [dot(u2, u2) + 1] - @test_throws DimensionMismatch muladd(u2', u2, [1, 1]) == [dot(u2, u2) + 1] - @test muladd(u2', u2, fill(0)) == dot(u2, u2) - end - @testset "arrays of arrays" begin - vofm = [rand(1:9, 2, 2) for _ in 1:3] - Mofm = [rand(1:9, 2, 2) for _ in 1:3, _ in 1:3] - - @test muladd(vofm', vofm, vofm[1]) == vofm' * vofm .+ vofm[1] # inner - @test muladd(vofm, vofm', Mofm) == vofm * vofm' .+ Mofm # outer - @test muladd(vofm', Mofm, vofm') == vofm' * Mofm .+ vofm' # bra-mat - @test muladd(Mofm, Mofm, vofm) == Mofm * Mofm .+ vofm # mat-mat - @test muladd(Mofm, vofm, vofm) == Mofm * vofm .+ vofm # mat-vec - end -end - -@testset "muladd & structured matrices" begin - A33 = reshape(1:9, 3, 3) .+ im - v3 = [3, 5, 7im] - - # no special treatment - @test muladd(Symmetric(A33), Symmetric(A33), 1) == Symmetric(A33) * Symmetric(A33) .+ 1 - @test muladd(Hermitian(A33), Hermitian(A33), v3) == Hermitian(A33) * Hermitian(A33) .+ v3 - @test muladd(adjoint(A33), transpose(A33), A33) == A33' * transpose(A33) .+ A33 - - u1 = muladd(UpperTriangular(A33), UpperTriangular(A33), Diagonal(v3)) - @test u1 isa UpperTriangular - @test u1 == UpperTriangular(A33) * UpperTriangular(A33) + Diagonal(v3) - - # diagonal - @test muladd(Diagonal(v3), Diagonal(A33), Diagonal(v3)).diag == ([1, 5, 9] .+ im .+ 1) .* v3 - - # uniformscaling - @test muladd(Diagonal(v3), I, I).diag == v3 .+ 1 - @test muladd(2 * I, 3 * I, I).λ == 7 - @test muladd(A33, A33', I) == A33 * A33' + I - - # https://github.com/JuliaLang/julia/issues/38426 - @test @evalpoly(A33, 1.0 * I, 1.0 * I) == I + A33 - @test @evalpoly(A33, 1.0 * I, 1.0 * I, 1.0 * I) == I + A33 + A33^2 -end - -# issue #6450 -@test dot(Any[1.0, 2.0], Any[3.5, 4.5]) === 12.5 - -@testset "dot" for elty in (Float32, Float64, ComplexF32, ComplexF64) - x = convert(Vector{elty}, [1.0, 2.0, 3.0]) - y = convert(Vector{elty}, [3.5, 4.5, 5.5]) - @test_throws DimensionMismatch dot(x, 1:2, y, 1:3) - @test_throws BoundsError dot(x, 1:4, y, 1:4) - @test_throws BoundsError dot(x, 1:3, y, 2:4) - @test dot(x, 1:2, y, 1:2) == convert(elty, 12.5) - @test transpose(x) * y == convert(elty, 29.0) - X = convert(Matrix{elty}, [1.0 2.0; 3.0 4.0]) - Y = convert(Matrix{elty}, [1.5 2.5; 3.5 4.5]) - @test dot(X, Y) == convert(elty, 35.0) - Z = Matrix{elty}[reshape(1:4, 2, 2), fill(1, 2, 2)] - @test dot(Z, Z) == convert(elty, 34.0) -end - -dot1(x, y) = invoke(dot, Tuple{Any,Any}, x, y) -dot2(x, y) = invoke(dot, Tuple{AbstractArray,AbstractArray}, x, y) -@testset "generic dot" begin - AA = [1+2im 3+4im; 5+6im 7+8im] - BB = [2+7im 4+1im; 3+8im 6+5im] - for A in (copy(AA), view(AA, 1:2, 1:2)), B in (copy(BB), view(BB, 1:2, 1:2)) - @test dot(A, B) == dot(vec(A), vec(B)) == dot1(A, B) == dot2(A, B) == dot(float.(A), float.(B)) - @test dot(Int[], Int[]) == 0 == dot1(Int[], Int[]) == dot2(Int[], Int[]) - @test_throws MethodError dot(Any[], Any[]) - @test_throws MethodError dot1(Any[], Any[]) - @test_throws MethodError dot2(Any[], Any[]) - for n1 = 0:2, n2 = 0:2, d in (dot, dot1, dot2) - if n1 != n2 - @test_throws DimensionMismatch d(1:n1, 1:n2) - else - @test d(1:n1, 1:n2) ≈ norm(1:n1)^2 - end - end - end -end - -@testset "Issue 11978" begin - A = Matrix{Matrix{Float64}}(undef, 2, 2) - A[1, 1] = Matrix(1.0I, 3, 3) - A[2, 2] = Matrix(1.0I, 2, 2) - A[1, 2] = Matrix(1.0I, 3, 2) - A[2, 1] = Matrix(1.0I, 2, 3) - b = Vector{Vector{Float64}}(undef, 2) - b[1] = fill(1.0, 3) - b[2] = fill(1.0, 2) - @test A * b == Vector{Float64}[[2, 2, 1], [2, 2]] -end - -@test_throws ArgumentError LinearAlgebra.copytri!(Matrix{Float64}(undef, 10, 10), 'Z') - -@testset "Issue 30055" begin - B = [1+im 2+im 3+im; 4+im 5+im 6+im; 7+im 9+im im] - A = UpperTriangular(B) - @test copy(transpose(A)) == transpose(A) - @test copy(A') == A' - A = LowerTriangular(B) - @test copy(transpose(A)) == transpose(A) - @test copy(A') == A' - B = Matrix{Matrix{Complex{Int}}}(undef, 2, 2) - B[1, 1] = [1+im 2+im; 3+im 4+im] - B[2, 1] = [1+2im 1+3im; 1+3im 1+4im] - B[1, 2] = [7+im 8+2im; 9+3im 4im] - B[2, 2] = [9+im 8+im; 7+im 6+im] - A = UpperTriangular(B) - @test copy(transpose(A)) == transpose(A) - @test copy(A') == A' - A = LowerTriangular(B) - @test copy(transpose(A)) == transpose(A) - @test copy(A') == A' -end - -@testset "gemv! and gemm_wrapper for $elty" for elty in [Float32, Float64, ComplexF64, ComplexF32] - A10x10, x10, x11 = Array{elty}.(undef, ((10, 10), 10, 11)) - @test_throws DimensionMismatch LinearAlgebra.gemv!(x10, 'N', A10x10, x11) - @test_throws DimensionMismatch LinearAlgebra.gemv!(x11, 'N', A10x10, x10) - @test LinearAlgebra.gemv!(elty[], 'N', Matrix{elty}(undef, 0, 0), elty[]) == elty[] - @test LinearAlgebra.gemv!(x10, 'N', Matrix{elty}(undef, 10, 0), elty[]) == zeros(elty, 10) - - I0x0 = Matrix{elty}(I, 0, 0) - I10x10 = Matrix{elty}(I, 10, 10) - I10x11 = Matrix{elty}(I, 10, 11) - @test LinearAlgebra.gemm_wrapper('N', 'N', I10x10, I10x10) == I10x10 - @test_throws DimensionMismatch LinearAlgebra.gemm_wrapper!(I10x10, 'N', 'N', I10x11, I10x10) - @test_throws DimensionMismatch LinearAlgebra.gemm_wrapper!(I10x10, 'N', 'N', I0x0, I0x0) - - A = rand(elty, 3, 3) - @test LinearAlgebra.matmul3x3('T', 'N', A, Matrix{elty}(I, 3, 3)) == transpose(A) -end - -@testset "#13593, #13488" begin - aa = rand(3, 3) - bb = rand(3, 3) - for a in (copy(aa), view(aa, 1:3, 1:3)), b in (copy(bb), view(bb, 1:3, 1:3)) - @test_throws ArgumentError mul!(a, a, b) - @test_throws ArgumentError mul!(a, b, a) - @test_throws ArgumentError mul!(a, a, a) - end -end - -@testset "#35163" begin - # typemax(Int32) * Int32(1) + Int32(1) * Int32(1) should wrap around - # not promote to Int64, convert to Int32 and throw inexacterror - val = mul!(Int32[1], fill(typemax(Int32), 1, 1), Int32[1], Int32(1), Int32(1)) - @test val[1] == typemin(Int32) -end - -# Number types that lack conversion to the destination type -struct RootInt - i::Int -end -import Base: *, adjoint, transpose -import LinearAlgebra: Adjoint, Transpose -(*)(x::RootInt, y::RootInt) = x.i * y.i -(*)(x::RootInt, y::Integer) = x.i * y -adjoint(x::RootInt) = x -transpose(x::RootInt) = x - -@test Base.promote_op(*, RootInt, RootInt) === Int - -@testset "#14293" begin - a = [RootInt(3)] - C = [0;;] - mul!(C, a, transpose(a)) - @test C[1] == 9 - C = [1;;] - mul!(C, a, transpose(a), 2, 3) - @test C[1] == 21 - a = [RootInt(2), RootInt(10)] - @test a * adjoint(a) == [4 20; 20 100] - A = [RootInt(3) RootInt(5)] - @test A * a == [56] -end - -function test_mul(C, A, B, S) - mul!(C, A, B) - @test Array(A) * Array(B) ≈ C - @test A * B ≈ C - - # This is similar to how `isapprox` choose `rtol` (when `atol=0`) - # but consider all number types involved: - rtol = max(rtoldefault.(real.(eltype.((C, A, B))))...) - - rand!(C, S) - T = promote_type(eltype.((A, B))...) - α = T <: AbstractFloat ? rand(T) : rand(T(-10):T(10)) - β = T <: AbstractFloat ? rand(T) : rand(T(-10):T(10)) - βArrayC = β * Array(C) - βC = β * C - mul!(C, A, B, α, β) - @test α * Array(A) * Array(B) .+ βArrayC ≈ C rtol = rtol - @test α * A * B .+ βC ≈ C rtol = rtol -end - -@testset "mul! vs * for special types" begin - eltypes = [Float32, Float64, Int64(-100):Int64(100)] - for k in [3, 4, 10] - T = rand(eltypes) - bi1 = Bidiagonal(rand(T, k), rand(T, k - 1), rand([:U, :L])) - bi2 = Bidiagonal(rand(T, k), rand(T, k - 1), rand([:U, :L])) - tri1 = Tridiagonal(rand(T, k - 1), rand(T, k), rand(T, k - 1)) - tri2 = Tridiagonal(rand(T, k - 1), rand(T, k), rand(T, k - 1)) - stri1 = SymTridiagonal(rand(T, k), rand(T, k - 1)) - stri2 = SymTridiagonal(rand(T, k), rand(T, k - 1)) - C = rand(T, k, k) - specialmatrices = (bi1, bi2, tri1, tri2, stri1, stri2) - for A in specialmatrices - B = specialmatrices[rand(1:length(specialmatrices))] - test_mul(C, A, B, T) - end - for S in specialmatrices - l = rand(1:6) - B = randn(k, l) - C = randn(k, l) - test_mul(C, S, B, T) - A = randn(l, k) - C = randn(l, k) - test_mul(C, A, S, T) - end - end - for T in eltypes - A = Bidiagonal(rand(T, 2), rand(T, 1), rand([:U, :L])) - B = Bidiagonal(rand(T, 2), rand(T, 1), rand([:U, :L])) - C = randn(2, 2) - test_mul(C, A, B, T) - B = randn(2, 9) - C = randn(2, 9) - test_mul(C, A, B, T) - end - let - tri44 = Tridiagonal(randn(3), randn(4), randn(3)) - tri33 = Tridiagonal(randn(2), randn(3), randn(2)) - full43 = randn(4, 3) - full24 = randn(2, 4) - full33 = randn(3, 3) - full44 = randn(4, 4) - @test_throws DimensionMismatch mul!(full43, tri44, tri33) - @test_throws DimensionMismatch mul!(full44, tri44, tri33) - @test_throws DimensionMismatch mul!(full44, tri44, full43) - @test_throws DimensionMismatch mul!(full43, tri33, full43) - @test_throws DimensionMismatch mul!(full43, full43, tri44) - end -end - -# #18218 -module TestPR18218 -using Test -import Base.*, Base.+, Base.zero -struct TypeA - x::Int -end -Base.convert(::Type{TypeA}, x::Int) = TypeA(x) -struct TypeB - x::Int -end -struct TypeC - x::Int -end -Base.convert(::Type{TypeC}, x::Int) = TypeC(x) -zero(c::TypeC) = TypeC(0) -zero(::Type{TypeC}) = TypeC(0) -(*)(x::Int, a::TypeA) = TypeB(x * a.x) -(*)(a::TypeA, x::Int) = TypeB(a.x * x) -(+)(a::Union{TypeB,TypeC}, b::Union{TypeB,TypeC}) = TypeC(a.x + b.x) -A = TypeA[1 2; 3 4] -b = [1, 2] -d = A * b -@test typeof(d) == Vector{TypeC} -@test d == TypeC[5, 11] -end - -@testset "VecOrMat of Vectors" begin - X = rand(ComplexF64, 3, 3) - Xv1 = [X[:, j] for i in 1:1, j in 1:3] - Xv2 = [transpose(X[i, :]) for i in 1:3] - Xv3 = [transpose(X[i, :]) for i in 1:3, j in 1:1] - - XX = X * X - XtX = transpose(X) * X - XcX = X' * X - XXt = X * transpose(X) - XtXt = transpose(XX) - XcXt = X' * transpose(X) - XXc = X * X' - XtXc = transpose(X) * X' - XcXc = X' * X' - - @test (Xv1*Xv2)[1] ≈ XX - @test (Xv1*Xv3)[1] ≈ XX - @test transpose(Xv1) * Xv1 ≈ XtX - @test transpose(Xv2) * Xv2 ≈ XtX - @test (transpose(Xv3)*Xv3)[1] ≈ XtX - @test Xv1' * Xv1 ≈ XcX - @test Xv2' * Xv2 ≈ XcX - @test (Xv3'*Xv3)[1] ≈ XcX - @test (Xv1*transpose(Xv1))[1] ≈ XXt - @test Xv2 * transpose(Xv2) ≈ XXt - @test Xv3 * transpose(Xv3) ≈ XXt - @test transpose(Xv1) * transpose(Xv2) ≈ XtXt - @test transpose(Xv1) * transpose(Xv3) ≈ XtXt - @test Xv1' * transpose(Xv2) ≈ XcXt - @test Xv1' * transpose(Xv3) ≈ XcXt - @test (Xv1*Xv1')[1] ≈ XXc - @test Xv2 * Xv2' ≈ XXc - @test Xv3 * Xv3' ≈ XXc - @test transpose(Xv1) * Xv2' ≈ XtXc - @test transpose(Xv1) * Xv3' ≈ XtXc - @test Xv1' * Xv2' ≈ XcXc - @test Xv1' * Xv3' ≈ XcXc -end - -@testset "copyto! for matrices of matrices" begin - A = [randn(ComplexF64, 2,3) for _ in 1:2, _ in 1:3] - for (tfun, tM) in ((identity, 'N'), (transpose, 'T'), (adjoint, 'C')) - At = copy(tfun(A)) - B = zero.(At) - copyto!(B, axes(B, 1), axes(B, 2), tM, A, axes(A, tM == 'N' ? 1 : 2), axes(A, tM == 'N' ? 2 : 1)) - @test B == At - end -end - -@testset "method ambiguity" begin - # Ambiguity test is run inside a clean process. - # https://github.com/JuliaLang/julia/issues/28804 - script = joinpath(@__DIR__, "ambiguous_exec.jl") - cmd = `$(Base.julia_cmd()) --startup-file=no $script` - @test success(pipeline(cmd; stdout = stdout, stderr = stderr)) -end - -struct A32092 - x::Float64 -end -Base.:+(x::Float64, a::A32092) = x + a.x -Base.:*(x::Float64, a::A32092) = x * a.x -@testset "Issue #32092" begin - @test ones(2, 2) * [A32092(1.0), A32092(2.0)] == fill(3.0, (2,)) -end - -@testset "strong zero" begin - @testset for α in Any[false, 0.0, 0], n in 1:4 - C = ones(n, n) - A = fill!(zeros(n, n), NaN) - B = ones(n, n) - @test mul!(copy(C), A, B, α, 1.0) == C - end -end - -@testset "CartesianIndex handling in _modify!" begin - C = rand(10, 10) - A = rand(10, 10) - @test mul!(view(C, 1:10, 1:10), A, 0.5) == A * 0.5 -end - -@testset "Issue #33214: tiled generic mul!" begin - n = 100 - A = rand(n, n) - B = rand(n, n) - C = zeros(n, n) - mul!(C, A, B, -1 + 0im, 0) - D = -A * B - @test D ≈ C - - # Just in case dispatching on the surface API `mul!` is changed in the future, - # let's test the function where the tiled multiplication is defined. - fill!(C, 0) - LinearAlgebra.generic_matmatmul!(C, 'N', 'N', A, B, LinearAlgebra.MulAddMul(-1, 0)) - @test D ≈ C -end - -@testset "size zero types in matrix mult (see issue 39362)" begin - A = [missing missing; missing missing] - v = [missing, missing] - @test (A * v == v) === missing - M = fill(1.0, 2, 2) - a = fill(missing, 2, 1) - @test (a' * M * a == fill(missing, 1, 1)) === missing -end - - -@testset "multiplication of empty matrices without calling zero" begin - r, c = rand(0:9, 2) - A = collect(Number, rand(r, c)) - B = rand(c, 0) - C = A * B - @test size(C) == (r, 0) - @test_throws MethodError zero(eltype(C)) -end - -@testset "Issue #33873: genmatmul! with empty operands" begin - @test Matrix{Any}(undef, 0, 2) * Matrix{Any}(undef, 2, 3) == Matrix{Any}(undef, 0, 3) - @test_throws MethodError Matrix{Any}(undef, 2, 0) * Matrix{Any}(undef, 0, 3) - @test Matrix{Int}(undef, 2, 0) * Matrix{Int}(undef, 0, 3) == zeros(Int, 2, 3) -end - -@testset "3-arg *, order by type" begin - x = [1, 2im] - y = [im, 20, 30 + 40im] - z = [-1, 200 + im, -3] - A = [1 2 3im; 4 5 6+im] - B = [-10 -20; -30 -40] - a = 3 + im * round(Int, 10^6 * (pi - 3)) - b = 123 - - @test x' * A * y == (x' * A) * y == x' * (A * y) - @test y' * A' * x == (y' * A') * x == y' * (A' * x) - @test y' * transpose(A) * x == (y' * transpose(A)) * x == y' * (transpose(A) * x) - - @test B * A * y == (B * A) * y == B * (A * y) - - @test a * A * y == (a * A) * y == a * (A * y) - @test A * y * a == (A * y) * a == A * (y * a) - - @test a * B * A == (a * B) * A == a * (B * A) - @test B * A * a == (B * A) * a == B * (A * a) - - @test a * y' * z == (a * y') * z == a * (y' * z) - @test y' * z * a == (y' * z) * a == y' * (z * a) - - @test a * y * z' == (a * y) * z' == a * (y * z') - @test y * z' * a == (y * z') * a == y * (z' * a) - - @test a * x' * A == (a * x') * A == a * (x' * A) - @test x' * A * a == (x' * A) * a == x' * (A * a) - @test a * x' * A isa Adjoint{<:Any,<:Vector} - - @test a * transpose(x) * A == (a * transpose(x)) * A == a * (transpose(x) * A) - @test transpose(x) * A * a == (transpose(x) * A) * a == transpose(x) * (A * a) - @test a * transpose(x) * A isa Transpose{<:Any,<:Vector} - - @test x' * B * A == (x' * B) * A == x' * (B * A) - @test x' * B * A isa Adjoint{<:Any,<:Vector} - - @test y * x' * A == (y * x') * A == y * (x' * A) - y31 = reshape(y, 3, 1) - @test y31 * x' * A == (y31 * x') * A == y31 * (x' * A) - - vm = [rand(1:9, 2, 2) for _ in 1:3] - Mm = [rand(1:9, 2, 2) for _ in 1:3, _ in 1:3] - - @test vm' * Mm * vm == (vm' * Mm) * vm == vm' * (Mm * vm) - @test Mm * Mm' * vm == (Mm * Mm') * vm == Mm * (Mm' * vm) - @test vm' * Mm * Mm == (vm' * Mm) * Mm == vm' * (Mm * Mm) - @test Mm * Mm' * Mm == (Mm * Mm') * Mm == Mm * (Mm' * Mm) -end - -@testset "3-arg *, order by size" begin - M44 = randn(4, 4) - M24 = randn(2, 4) - M42 = randn(4, 2) - @test M44 * M44 * M44 ≈ (M44 * M44) * M44 ≈ M44 * (M44 * M44) - @test M42 * M24 * M44 ≈ (M42 * M24) * M44 ≈ M42 * (M24 * M44) - @test M44 * M42 * M24 ≈ (M44 * M42) * M24 ≈ M44 * (M42 * M24) -end - -@testset "4-arg *, by type" begin - y = [im, 20, 30 + 40im] - z = [-1, 200 + im, -3] - a = 3 + im * round(Int, 10^6 * (pi - 3)) - b = 123 - M = rand(vcat(1:9, im .* [1, 2, 3]), 3, 3) - N = rand(vcat(1:9, im .* [1, 2, 3]), 3, 3) - - @test a * b * M * y == (a * b) * (M * y) - @test a * b * M * N == (a * b) * (M * N) - @test a * M * N * y == (a * M) * (N * y) - @test a * y' * M * z == (a * y') * (M * z) - @test a * y' * M * N == (a * y') * (M * N) - - @test M * y * a * b == (M * y) * (a * b) - @test M * N * a * b == (M * N) * (a * b) - @test M * N * y * a == (a * M) * (N * y) - @test y' * M * z * a == (a * y') * (M * z) - @test y' * M * N * a == (a * y') * (M * N) - - @test M * N * conj(M) * y == (M * N) * (conj(M) * y) - @test y' * M * N * conj(M) == (y' * M) * (N * conj(M)) - @test y' * M * N * z == (y' * M) * (N * z) -end - -@testset "4-arg *, by size" begin - for shift in 1:5 - s1, s2, s3, s4, s5 = circshift(3:7, shift) - a = randn(s1, s2) - b = randn(s2, s3) - c = randn(s3, s4) - d = randn(s4, s5) - - # _quad_matmul - @test *(a, b, c, d) ≈ (a * b) * (c * d) - - # _tri_matmul(A,B,B,δ) - @test *(11.1, b, c, d) ≈ (11.1 * b) * (c * d) - @test *(a, b, c, 99.9) ≈ (a * b) * (c * 99.9) - end -end - -#46865 -@testset "mul!() with non-const alpha, beta" begin - f!(C,A,B,alphas,betas) = mul!(C, A, B, alphas[1], betas[1]) - alphas = [1.0] - betas = [0.5] - for d in [2,3,4] # test native small-matrix cases as well as BLAS - A = rand(d,d) - B = copy(A) - C = copy(A) - f!(C, A, B, alphas, betas) - @test (@allocated f!(C, A, B, alphas, betas)) == 0 - end -end - -@testset "vector-matrix multiplication" begin - a = [1,2] - A = reshape([1,2], 2, 1) - B = [1 2] - @test a * B ≈ A * B - B = reshape([1,2], 2, 1) - @test a * B' ≈ A * B' - @test a * transpose(B) ≈ A * transpose(B) -end - -@testset "issue #56085" begin - struct Thing - data::Float64 - end - - Base.zero(::Type{Thing}) = Thing(0.) - Base.zero(::Thing) = Thing(0.) - Base.one(::Type{Thing}) = Thing(1.) - Base.one(::Thing) = Thing(1.) - Base.:+(t1::Thing, t::Thing...) = +(getfield.((t1, t...), :data)...) - Base.:*(t1::Thing, t::Thing...) = *(getfield.((t1, t...), :data)...) - - M = Float64[1 2; 3 4] - A = Thing.(M) - - @test A * A ≈ M * M -end - -end # module TestMatmul diff --git a/stdlib/LinearAlgebra/test/pinv.jl b/stdlib/LinearAlgebra/test/pinv.jl deleted file mode 100644 index c7268865a0505..0000000000000 --- a/stdlib/LinearAlgebra/test/pinv.jl +++ /dev/null @@ -1,186 +0,0 @@ -# This file is a part of Julia. License is MIT: https://julialang.org/license - -module TestPinv - -using Test, LinearAlgebra, Random - -Random.seed!(12345) - -function hilb(T::Type, n::Integer) - a = Matrix{T}(undef, n, n) - for i=1:n - for j=1:n - a[j,i]=one(T)/(i+j-one(T)) - end - end - return a -end -hilb(n::Integer) = hilb(Float64,n) - -function hilb(T::Type, m::Integer, n::Integer) - a = Matrix{T}(undef, m, n) - for i=1:n - for j=1:m - a[j,i]=one(T)/(i+j-one(T)) - end - end - return a -end -hilb(m::Integer, n::Integer) = hilb(Float64,m,n) - -function onediag(T::Type, m::Integer, n::Integer) - a=zeros(T,m,n) - for i=1:min(n,m) - a[i,i]=one(T)/(float(i)^5) - end - a[1,1] = 0 - a[min(m,n),min(m,n)] = 0 - return a -end -onediag(m::Integer, n::Integer) = onediag(Float64, m::Integer, n::Integer) - -function onediag_sparse(T::Type, n::Integer) - a=zeros(T,n) - for i=1:n - a[i]=one(T)/(float(i)^5) - end - a[1] = 0 - a[n] = 0 - return Diagonal(a) -end -onediag_sparse(n::Integer) = onediag_sparse(Float64, n::Integer) - -function tridiag(T::Type, m::Integer, n::Integer) - a=zeros(T,m,n) - for i=1:min(n,m) - a[i,i]=one(T)/(float(i)^5) - end - for i=1:min(n,m)-1 - a[i+1,i]=2*one(T)/(float(i)^5) - a[1,i+1]=2*one(T)/(float(i)^5) - end - return a -end -tridiag(m::Integer, n::Integer) = tridiag(Float64, m::Integer, n::Integer) - -function test_pinv(a,tol1,tol2) - m,n = size(a) - - apinv = @inferred pinv(a) - @test size(apinv) == (n,m) - @test norm(a*apinv*a-a)/norm(a) ≈ 0 atol=tol1 - @test norm(apinv*a*apinv-apinv)/norm(apinv) ≈ 0 atol=tol1 - b = a*randn(n) - x = apinv*b - @test norm(a*x-b)/norm(b) ≈ 0 atol=tol1 - - apinv = @inferred pinv(a,sqrt(eps(real(one(eltype(a)))))) - @test size(apinv) == (n,m) - @test norm(a*apinv*a-a)/norm(a) ≈ 0 atol=tol2 - @test norm(apinv*a*apinv-apinv)/norm(apinv) ≈ 0 atol=tol2 - b = a*randn(n) - x = apinv*b - @test norm(a*x-b)/norm(b) ≈ 0 atol=tol2 -end - -@testset for eltya in (Float32, Float64, ComplexF32, ComplexF64) - @testset for (m, n) in [(1000, 100), (100, 100), (100, 1000)] - default_tol = (real(one(eltya))) * max(m,n) * 10 - tol1 = 1e-2 - tol2 = 1e-5 - if real(eltya) == Float32 - tol1 = 1e0 - tol2 = 1e-2 - end - @testset "dense/ill-conditioned matrix" begin - a = hilb(eltya, m, n) - test_pinv(a, tol1, tol2) - end - @testset "dense/diagonal matrix" begin - a = onediag(eltya, m, n) - test_pinv(a, default_tol, default_tol) - end - @testset "dense/tri-diagonal matrix" begin - a = tridiag(eltya, m, n) - test_pinv(a, default_tol, tol2) - end - @testset "Diagonal matrix" begin - a = onediag_sparse(eltya, m) - test_pinv(a, default_tol, default_tol) - end - @testset "Vector" begin - a = rand(eltya, m) - apinv = @inferred pinv(a) - @test pinv(hcat(a)) ≈ apinv - @test isa(apinv, eltya <: Complex ? Adjoint{eltya} : Transpose{eltya}) - end - @testset "Adjoint/Transpose vector" begin - a = rand(eltya, m)' - apinv = @inferred pinv(a) - @test pinv(vcat(a)) ≈ apinv - @test apinv isa Vector{eltya} - end - end - - @testset "zero valued numbers/vectors/matrices" begin - a = pinv(zero(eltya)) - @test a ≈ 0.0 - - a = pinv([zero(eltya); zero(eltya)]) - @test a[1] ≈ 0.0 - @test a[2] ≈ 0.0 - - a = pinv([zero(eltya); zero(eltya)]') - @test a[1] ≈ 0.0 - @test a[2] ≈ 0.0 - - a = pinv(Diagonal([zero(eltya); zero(eltya)])) - @test a.diag[1] ≈ 0.0 - @test a.diag[2] ≈ 0.0 - end - - @testset "hermitian matrices" begin - Q = ones(2,2) - C = pinv(Hermitian(Q))/0.25 - @test C ≈ ones(2,2) - end - - @testset "non-square diagonal matrices" begin - A = eltya[1 0 ; 0 1 ; 0 0] - B = pinv(A) - @test A*B*A ≈ A - @test B*A*B ≈ B - - A = eltya[1 0 0 ; 0 1 0] - B = pinv(A) - @test A*B*A ≈ A - @test B*A*B ≈ B - end - - if eltya <: LinearAlgebra.BlasReal - @testset "sub-normal numbers/vectors/matrices" begin - a = pinv(floatmin(eltya)/100) - @test a ≈ 0.0 - # Complex subnormal - a = pinv(floatmin(eltya)/100*(1+1im)) - @test a ≈ 0.0 - - a = pinv([floatmin(eltya); floatmin(eltya)]/100) - @test a[1] ≈ 0.0 - @test a[2] ≈ 0.0 - # Complex subnormal - a = pinv([floatmin(eltya); floatmin(eltya)]/100*(1+1im)) - @test a[1] ≈ 0.0 - @test a[2] ≈ 0.0 - a = pinv(Diagonal([floatmin(eltya); floatmin(eltya)]/100)) - @test a.diag[1] ≈ 0.0 - @test a.diag[2] ≈ 0.0 - # Complex subnormal - a = pinv(Diagonal([floatmin(eltya); floatmin(eltya)]/100*(1+1im))) - @test a.diag[1] ≈ 0.0 - @test a.diag[2] ≈ 0.0 - end - end -end - -end # module TestPinv diff --git a/stdlib/LinearAlgebra/test/qr.jl b/stdlib/LinearAlgebra/test/qr.jl deleted file mode 100644 index b6e9ce3a82743..0000000000000 --- a/stdlib/LinearAlgebra/test/qr.jl +++ /dev/null @@ -1,543 +0,0 @@ -# This file is a part of Julia. License is MIT: https://julialang.org/license - -module TestQR - -using Test, LinearAlgebra, Random -using LinearAlgebra: BlasComplex, BlasFloat, BlasReal, QRPivoted, rmul!, lmul! - -n = 10 - -# Split n into 2 parts for tests needing two matrices -n1 = div(n, 2) -n2 = 2*n1 - -Random.seed!(1234325) - -areal = randn(n,n)/2 -aimg = randn(n,n)/2 -a2real = randn(n,n)/2 -a2img = randn(n,n)/2 -breal = randn(n,2)/2 -bimg = randn(n,2)/2 - -# helper functions to unambiguously recover explicit forms of an implicit QR Q -squareQ(Q::LinearAlgebra.AbstractQ) = Q*I -rectangularQ(Q::LinearAlgebra.AbstractQ) = Matrix(Q) - -@testset for eltya in (Float32, Float64, ComplexF32, ComplexF64, BigFloat, Int) - raw_a = eltya == Int ? rand(1:7, n, n) : convert(Matrix{eltya}, eltya <: Complex ? complex.(areal, aimg) : areal) - raw_a2 = eltya == Int ? rand(1:7, n, n) : convert(Matrix{eltya}, eltya <: Complex ? complex.(a2real, a2img) : a2real) - asym = raw_a' + raw_a # symmetric indefinite - apd = raw_a' * raw_a # symmetric positive-definite - ε = εa = eps(abs(float(one(eltya)))) - - @testset for eltyb in (Float32, Float64, ComplexF32, ComplexF64, Int) - raw_b = eltyb == Int ? rand(1:5, n, 2) : convert(Matrix{eltyb}, eltyb <: Complex ? complex.(breal, bimg) : breal) - εb = eps(abs(float(one(eltyb)))) - ε = max(εa, εb) - tab = promote_type(eltya, eltyb) - - @testset "QR decomposition of a Number" begin - α = rand(eltyb) - aα = fill(α, 1, 1) - @test qr(α).Q * qr(α).R ≈ qr(aα).Q * qr(aα).R - @test abs(qr(α).Q[1,1]) ≈ one(eltyb) - end - - for (a, b) in ((raw_a, raw_b), - (view(raw_a, 1:n-1, 1:n-1), view(raw_b, 1:n-1, 1))) - a_1 = size(a, 1) - @testset "QR decomposition (without pivoting)" begin - qra = @inferred qr(a) - q, r = qra.Q, qra.R - @test_throws FieldError qra.Z - @test q'*squareQ(q) ≈ Matrix(I, a_1, a_1) - @test q*squareQ(q)' ≈ Matrix(I, a_1, a_1) - @test q'*Matrix(1.0I, a_1, a_1)' ≈ squareQ(q)' - @test squareQ(q)'q ≈ Matrix(I, a_1, a_1) - @test Matrix(1.0I, a_1, a_1)'q' ≈ squareQ(q)' - @test q*r ≈ a - @test a*(qra\b) ≈ b atol=3000ε - @test Array(qra) ≈ a - sq = size(q.factors, 2) - @test *(Matrix{eltyb}(I, sq, sq), adjoint(q)) * squareQ(q) ≈ Matrix(I, sq, sq) atol=5000ε - if eltya != Int - @test Matrix{eltyb}(I, a_1, a_1)*q ≈ squareQ(convert(LinearAlgebra.AbstractQ{tab}, q)) - ac = copy(a) - @test qr!(a[:, 1:5])\b == qr!(view(ac, :, 1:5))\b - end - qrstring = sprint((t, s) -> show(t, "text/plain", s), qra) - rstring = sprint((t, s) -> show(t, "text/plain", s), r) - qstring = sprint((t, s) -> show(t, "text/plain", s), q) - @test qrstring == "$(summary(qra))\nQ factor: $qstring\nR factor:\n$rstring" - # iterate - q, r = qra - @test q*r ≈ a - # property names - @test Base.propertynames(qra) == (:R, :Q) - end - @testset "Thin QR decomposition (without pivoting)" begin - qra = @inferred qr(a[:, 1:n1], NoPivot()) - q,r = qra.Q, qra.R - @test_throws FieldError qra.Z - @test q'*squareQ(q) ≈ Matrix(I, a_1, a_1) - @test q'*rectangularQ(q) ≈ Matrix(I, a_1, n1) - @test q*r ≈ a[:, 1:n1] - @test q*b[1:n1] ≈ rectangularQ(q)*b[1:n1] atol=100ε - @test q*b ≈ squareQ(q)*b atol=100ε - if eltya != Int - @test Array{eltya}(q) ≈ rectangularQ(q) - end - @test_throws DimensionMismatch q*b[1:n1 + 1] - @test_throws DimensionMismatch b[1:n1 + 1]*q' - sq = size(q.factors, 2) - @test *(UpperTriangular(Matrix{eltyb}(I, sq, sq)), adjoint(q))*squareQ(q) ≈ Matrix(I, n1, a_1) atol=5000ε - if eltya != Int - @test Matrix{eltyb}(I, a_1, a_1)*q ≈ squareQ(convert(LinearAlgebra.AbstractQ{tab},q)) - end - # iterate - q, r = qra - @test q*r ≈ a[:, 1:n1] - # property names - @test Base.propertynames(qra) == (:R, :Q) - end - @testset "(Automatic) Fat (pivoted) QR decomposition" begin - @inferred qr(a, ColumnNorm()) - - qrpa = factorize(a[1:n1,:]) - q,r = qrpa.Q, qrpa.R - @test_throws FieldError qrpa.Z - p = qrpa.p - @test q'*squareQ(q) ≈ Matrix(I, n1, n1) - @test q*squareQ(q)' ≈ Matrix(I, n1, n1) - sq = size(q, 2); - @test (UpperTriangular(Matrix{eltya}(I, sq, sq))*q')*squareQ(q) ≈ Matrix(I, n1, n1) - @test q*r ≈ (isa(qrpa,QRPivoted) ? a[1:n1,p] : a[1:n1,:]) - @test q*r[:,invperm(p)] ≈ a[1:n1,:] - @test q*r*transpose(qrpa.P) ≈ a[1:n1,:] - @test a[1:n1,:]*(qrpa\b[1:n1]) ≈ b[1:n1] atol=5000ε - @test Array(qrpa) ≈ a[1:5,:] - if eltya != Int - @test Array{eltya}(q) ≈ Matrix(q) - end - @test_throws DimensionMismatch q*b[1:n1+1] - @test_throws DimensionMismatch b[1:n1+1]*q' - if eltya != Int - @test Matrix{eltyb}(I, n1, n1)*q ≈ squareQ(convert(LinearAlgebra.AbstractQ{tab},q)) - end - # iterate - q, r, p = qrpa - @test q*r[:,invperm(p)] ≈ a[1:n1,:] - # property names - @test Base.propertynames(qrpa) == (:R, :Q, :p, :P) - end - @testset "(Automatic) Thin (pivoted) QR decomposition" begin - qrpa = factorize(a[:,1:n1]) - q,r = qrpa.Q, qrpa.R - @test_throws FieldError qrpa.Z - p = qrpa.p - @test q'*squareQ(q) ≈ Matrix(I, a_1, a_1) - @test q*squareQ(q)' ≈ Matrix(I, a_1, a_1) - @test q*r ≈ a[:,p] - @test q*r[:,invperm(p)] ≈ a[:,1:n1] - @test Array(qrpa) ≈ a[:,1:5] - if eltya != Int - @test Array{eltya}(q) ≈ Matrix(q) - end - @test_throws DimensionMismatch q*b[1:n1+1] - @test_throws DimensionMismatch b[1:n1+1]*q' - sq = size(q.factors, 2) - @test *(UpperTriangular(Matrix{eltyb}(I, sq, sq)), adjoint(q))*squareQ(q) ≈ Matrix(I, n1, a_1) atol=5000ε - if eltya != Int - @test Matrix{eltyb}(I, a_1, a_1)*q ≈ squareQ(convert(LinearAlgebra.AbstractQ{tab},q)) - end - qrstring = sprint((t, s) -> show(t, "text/plain", s), qrpa) - rstring = sprint((t, s) -> show(t, "text/plain", s), r) - qstring = sprint((t, s) -> show(t, "text/plain", s), q) - pstring = sprint((t, s) -> show(t, "text/plain", s), p) - @test qrstring == "$(summary(qrpa))\nQ factor: $qstring\nR factor:\n$rstring\npermutation:\n$pstring" - # iterate - q, r, p = qrpa - @test q*r[:,invperm(p)] ≈ a[:,1:n1] - # property names - @test Base.propertynames(qrpa) == (:R, :Q, :p, :P) - end - end - if eltya != Int - @testset "Matmul with QR factorizations" begin - a = raw_a - qrpa = factorize(a[:,1:n1]) - q, r = qrpa.Q, qrpa.R - @test rmul!(copy(squareQ(q)'), q) ≈ Matrix(I, n, n) - @test_throws DimensionMismatch rmul!(Matrix{eltya}(I, n+1, n+1),q) - @test rmul!(squareQ(q), adjoint(q)) ≈ Matrix(I, n, n) - @test_throws DimensionMismatch rmul!(Matrix{eltya}(I, n+1, n+1), adjoint(q)) - @test_throws ErrorException size(q,-1) - @test_throws DimensionMismatch LinearAlgebra.lmul!(q,zeros(eltya,n1+1)) - @test_throws DimensionMismatch LinearAlgebra.lmul!(adjoint(q), zeros(eltya,n1+1)) - - b = similar(a); rand!(b) - c = similar(a) - d = similar(a[:,1:n1]) - @test mul!(c, q, b) ≈ q*b - @test mul!(d, q, r) ≈ q*r ≈ a[:,qrpa.p] - @test mul!(c, q', b) ≈ q'*b - @test mul!(d, q', a[:,qrpa.p])[1:n1,:] ≈ r - @test all(x -> abs(x) < ε*norm(a), d[n1+1:end,:]) - @test mul!(c, b, q) ≈ b*q - @test mul!(c, b, q') ≈ b*q' - @test_throws DimensionMismatch mul!(Matrix{eltya}(I, n+1, n), q, b) - - qra = qr(a[:,1:n1], NoPivot()) - q, r = qra.Q, qra.R - @test rmul!(copy(squareQ(q)'), q) ≈ Matrix(I, n, n) - @test_throws DimensionMismatch rmul!(Matrix{eltya}(I, n+1, n+1),q) - @test rmul!(squareQ(q), adjoint(q)) ≈ Matrix(I, n, n) - @test_throws DimensionMismatch rmul!(Matrix{eltya}(I, n+1, n+1),adjoint(q)) - @test_throws ErrorException size(q,-1) - @test_throws DimensionMismatch q * Matrix{Int8}(I, n+4, n+4) - - @test mul!(c, q, b) ≈ q*b - @test mul!(d, q, r) ≈ a[:,1:n1] - @test mul!(c, q', b) ≈ q'*b - @test mul!(d, q', a[:,1:n1])[1:n1,:] ≈ r - @test all(x -> abs(x) < ε*norm(a), d[n1+1:end,:]) - @test mul!(c, b, q) ≈ b*q - @test mul!(c, b, q') ≈ b*q' - @test_throws DimensionMismatch mul!(Matrix{eltya}(I, n+1, n), q, b) - - b = similar(a[:,1]); rand!(b) - c = similar(a[:,1]) - d = similar(a[:,1]) - @test mul!(c, q, b) ≈ q*b - @test mul!(c, q', b) ≈ q'*b - @test_throws DimensionMismatch mul!(Vector{eltya}(undef, n+1), q, b) - end - end - end -end - -@testset "transpose errors" begin - @test_throws ArgumentError transpose(qr(randn(ComplexF64,3,3))) - @test_throws ArgumentError transpose(qr(randn(ComplexF64,3,3), NoPivot())) - @test_throws ArgumentError transpose(qr(big.(randn(ComplexF64,3,3)))) -end - -@testset "Issue 7304" begin - A = [-√.5 -√.5; -√.5 √.5] - Q = rectangularQ(qr(A).Q) - @test norm(A-Q) < eps() -end - -@testset "qr on AbstractVector" begin - vr = [3.0, 4.0] - for Tr in (Float32, Float64) - for T in (Tr, Complex{Tr}) - v = convert(Vector{T}, vr) - nv, nm = qr(v) - @test norm(nv*Matrix(I, (2,2)) - [-0.6 -0.8; -0.8 0.6], Inf) < eps(Tr) - @test nm == fill(-5.0, 1, 1) - end - end -end - -@testset "QR on Ints" begin - # not sure what to do about this edge case now that we build decompositions - # for qr(...), so for now just commenting this out - # @test qr(Int[]) == (Int[],1) - - B = rand(7,2) - @test (1:7)\B ≈ Vector(1:7)\B -end - -@testset "Issue 16520" begin - @test_throws DimensionMismatch rand(3,2)\(1:5) -end - -@testset "Issue 22810" begin - A = zeros(1, 2) - B = zeros(1, 1) - @test A \ B == zeros(2, 1) - @test qr(A, ColumnNorm()) \ B == zeros(2, 1) -end - -@testset "Issue 24107" begin - A = rand(200,2) - @test A \ range(0, stop=1, length=200) == A \ Vector(range(0, stop=1, length=200)) -end - -@testset "Issue 24589. Promotion of rational matrices" begin - A = rand(1//1:5//5, 4,3) - @test Matrix(first(qr(A))) == Matrix(first(qr(float(A)))) -end - -@testset "Issue Test Factorization fallbacks for rectangular problems" begin - A = randn(3,2) - Ac = copy(A') - b = randn(3) - b0 = copy(b) - c = randn(2) - B = randn(3,3) - B0 = copy(B) - C = randn(2,3) - @test A \b ≈ ldiv!(c, qr(A ), b) - @test b == b0 - @test A \B ≈ ldiv!(C, qr(A ), B) - @test B == B0 - c0 = copy(c) - C0 = copy(C) - @test Ac\c ≈ ldiv!(b, qr(Ac, ColumnNorm()), c) - @test c0 == c - @test Ac\C ≈ ldiv!(B, qr(Ac, ColumnNorm()), C) - @test C0 == C -end - -@testset "Issue reflector of zero-length vector" begin - a = [2.0] - x = view(a,1:0) - τ = LinearAlgebra.reflector!(view(x,1:0)) - @test τ == 0.0 - - b = reshape([3.0],1,1) - @test isempty(LinearAlgebra.reflectorApply!(x, τ, view(b,1:0,:))) - @test b[1] == 3.0 -end - -@testset "det(Q::Union{QRCompactWYQ, QRPackedQ})" begin - # 40 is the number larger than the default block size 36 of QRCompactWY - @testset for n in [1:3; 40], m in [1:3; 40], pivot in (NoPivot(), ColumnNorm()) - @testset "real" begin - @testset for k in 0:min(n, m, 5) - A = cat(Array(I(k)), randn(n - k, m - k); dims=(1, 2)) - Q, = qr(A, pivot) - @test det(Q) ≈ det(Q*Matrix(I, size(Q, 1), size(Q, 1))) - @test abs(det(Q)) ≈ 1 - end - end - @testset "complex" begin - @testset for k in 0:min(n, m, 5) - A = cat(Array(I(k)), randn(ComplexF64, n - k, m - k); dims=(1, 2)) - Q, = qr(A, pivot) - @test det(Q) ≈ det(Q*Matrix(I, size(Q, 1), size(Q, 1))) - @test abs(det(Q)) ≈ 1 - end - end - end -end - -@testset "inv(::AbstractQ)" begin - for T in (Float64, ComplexF64) - Q = qr(randn(T,5,5)).Q - @test inv(Q) === Q' - @test inv(Q)' === inv(Q') === Q - end -end - -@testset "QR factorization of Q" begin - for T in (Float32, Float64, ComplexF32, ComplexF64) - Q1, R1 = qr(randn(T,5,5)) - Q2, R2 = qr(Q1) - @test Matrix(Q1) ≈ Matrix(Q2) - @test R2 ≈ I - end -end - -@testset "Generation of orthogonal matrices" begin - for T in (Float32, Float64) - n = 5 - Q, R = qr(randn(T,n,n)) - O = Q * Diagonal(sign.(diag(R))) - @test O' * O ≈ I - end -end - -@testset "Multiplication of Q by special matrices" begin - for T in (Float32, Float64, ComplexF32, ComplexF64) - n = 5 - Q, R = qr(randn(T,n,n)) - Qmat = Matrix(Q) - D = Diagonal(randn(T,n)) - @test Q * D ≈ Qmat * D - @test D * Q ≈ D * Qmat - J = 2*I - @test Q * J ≈ Qmat * J - @test J * Q ≈ J * Qmat - end -end - -@testset "copyto! for Q" begin - for T in (Float32, Float64, ComplexF32, ComplexF64) - n = 5 - Q, R = qr(randn(T,n,n)) - Qmat = Matrix(Q) - dest1 = Matrix{T}(undef, size(Q)) - copyto!(dest1, Q) - @test dest1 ≈ Qmat - dest2 = PermutedDimsArray(Matrix{T}(undef, size(Q)), (1, 2)) - copyto!(dest2, Q) - @test dest2 ≈ Qmat - dest3 = PermutedDimsArray(Matrix{T}(undef, size(Q)), (2, 1)) - copyto!(dest3, Q) - @test dest3 ≈ Qmat - end -end - -@testset "adjoint of QR" begin - n = 5 - B = randn(5, 2) - - @testset "size(b)=$(size(b))" for b in (B[:, 1], B) - @testset "size(A)=$(size(A))" for A in ( - randn(n, n), - # Wide problems become minimum norm (in x) problems similarly to LQ - randn(n + 2, n), - complex.(randn(n, n), randn(n, n))) - - @testset "QRCompactWY" begin - F = qr(A) - x = F'\b - @test x ≈ A'\b - @test length(size(x)) == length(size(b)) - end - - @testset "QR" begin - F = LinearAlgebra.qrfactUnblocked!(copy(A)) - x = F'\b - @test x ≈ A'\b - @test length(size(x)) == length(size(b)) - end - - @testset "QRPivoted" begin - F = LinearAlgebra.qr(A, ColumnNorm()) - x = F'\b - @test x ≈ A'\b - @test length(size(x)) == length(size(b)) - end - end - @test_throws DimensionMismatch("overdetermined systems are not supported") qr(randn(n - 2, n))'\b - @test_throws DimensionMismatch("arguments must have the same number of rows") qr(randn(n, n + 1))'\b - @test_throws DimensionMismatch("overdetermined systems are not supported") LinearAlgebra.qrfactUnblocked!(randn(n - 2, n))'\b - @test_throws DimensionMismatch("arguments must have the same number of rows") LinearAlgebra.qrfactUnblocked!(randn(n, n + 1))'\b - @test_throws DimensionMismatch("overdetermined systems are not supported") qr(randn(n - 2, n), ColumnNorm())'\b - @test_throws DimensionMismatch("arguments must have the same number of rows") qr(randn(n, n + 1), ColumnNorm())'\b - end -end - -@testset "issue #38974" begin - A = qr(ones(3, 1)) - B = I(3) - C = B*A.Q' - @test C ≈ A.Q * Matrix(I, 3, 3) - @test A.Q' * B ≈ A.Q * Matrix(I, 3, 3) -end - -@testset "convert between eltypes" begin - a = rand(Float64, 10, 5) - qra = qr(a) - qrwy = LinearAlgebra.QRCompactWY{Float32}(qra.factors, qra.T) - @test Array(qrwy) ≈ Array(qr(Float32.(a))) - @test eltype(qrwy.factors) == eltype(qrwy.T) == Float32 - qra = qr(a, ColumnNorm()) - qrp = QRPivoted{Float32}(qra.factors, qra.τ, qra.jpvt) - @test Array(qrp) ≈ Array(qr(Float32.(a), ColumnNorm())) - @test eltype(qrp.factors) == eltype(qrp.τ) == Float32 - a = rand(Float16, 10, 5) - qra = qr(a) - qrnonblas = QR{ComplexF16}(qra.factors, qra.τ) - @test Array(qrnonblas) ≈ Array(qr(ComplexF16.(a))) - @test eltype(qrnonblas.factors) == eltype(qrnonblas.τ) == ComplexF16 -end - -# We use approximate equals to get MKL.jl tests to pass. -@testset "optimized getindex for an AbstractQ" begin - for T in [Float64, ComplexF64] - Q = qr(rand(T, 4, 4)) - Q2 = Q.Q - M = Matrix(Q2) - for j in axes(M, 2) - @test Q2[:, j] ≈ M[:, j] - for i in axes(M, 1) - @test Q2[i, :] ≈ M[i, :] - @test Q2[i, j] ≈ M[i, j] - end - end - @test Q2[:] ≈ M[:] - @test Q2[:, :] ≈ M[:, :] - @test Q2[:, :, :] ≈ M[:, :, :] - end - # Check that getindex works if copy returns itself (#44729) - struct MyIdentity{T} <: LinearAlgebra.AbstractQ{T} end - Base.size(::MyIdentity, dim::Integer) = dim in (1,2) ? 2 : 1 - Base.size(::MyIdentity) = (2, 2) - Base.copy(J::MyIdentity) = J - LinearAlgebra.lmul!(::MyIdentity{T}, M::Array{T}) where {T} = M - @test MyIdentity{Float64}()[1,:] == [1.0, 0.0] -end - -@testset "issue #48911" begin - # testcase in the original issue - # test ldiv!(::QRPivoted, ::AbstractVector) - A = Complex{BigFloat}[1+im 1-im] - b = Complex{BigFloat}[3+im] - x = A\b - AF = Complex{Float64}[1+im 1-im] - bf = Complex{Float64}[3+im] - xf = AF\bf - @test x ≈ xf - - # test ldiv!(::QRPivoted, ::AbstractVector) - A = Complex{BigFloat}[1+im 2-2im 3+3im; 4-4im 5+5im 6-6im] - b = Complex{BigFloat}[1+im; 0] - x = A\b - AF = Complex{Float64}[1+im 2-2im 3+3im; 4-4im 5+5im 6-6im] - bf = Complex{Float64}[1+im; 0] - xf = AF\bf - @test x ≈ xf - - # test ldiv!(::QRPivoted, ::AbstractMatrix) - C = Complex{BigFloat}[1+im 2-2im 3+3im; 4-4im 5+5im 6-6im] - D = Complex{BigFloat}[1+im 1-im; 0 0] - x = C\D - CF = Complex{Float64}[1+im 2-2im 3+3im; 4-4im 5+5im 6-6im] - DF = Complex{Float64}[1+im 1-im; 0 0] - xf = CF\DF - @test x ≈ xf -end - -@testset "issue #53451" begin - # in the issue it was noted that QR factorizations of zero-column matrices - # were possible, but zero row-matrices errored, because LAPACK does not - # accept these empty matrices. now, the `geqrt!` call should be forwarded only - # if both matrix dimensions are positive. - - for dimA in (0, 1, 2, 4) - for F in (Float32, Float64, ComplexF32, ComplexF64, BigFloat) - # this should have worked before, Q is square, and R is 0 × 0: - A_zero_cols = rand(F, dimA, 0) - qr_zero_cols = qr(A_zero_cols) - @test size(qr_zero_cols.Q) == (dimA, dimA) - @test size(qr_zero_cols.R) == (0, 0) - @test qr_zero_cols.Q == LinearAlgebra.I(dimA) - - # this should work now, Q is 0 × 0, and R has `dimA` columns: - A_zero_rows = rand(F, 0, dimA) - qr_zero_rows = qr(A_zero_rows) - @test size(qr_zero_rows.Q) == (0, 0) - @test size(qr_zero_rows.R) == (0, dimA) - end - end -end - -@testset "issue #53214" begin - # Test that the rank of a QRPivoted matrix is computed correctly - @test rank(qr([1.0 0.0; 0.0 1.0], ColumnNorm())) == 2 - @test rank(qr([1.0 0.0; 0.0 0.9], ColumnNorm()), rtol=0.95) == 1 - @test rank(qr([1.0 0.0; 0.0 0.9], ColumnNorm()), atol=0.95) == 1 - @test rank(qr([1.0 0.0; 0.0 1.0], ColumnNorm()), rtol=1.01) == 0 - @test rank(qr([1.0 0.0; 0.0 1.0], ColumnNorm()), atol=1.01) == 0 - - @test rank(qr([1.0 2.0; 2.0 4.0], ColumnNorm())) == 1 - @test rank(qr([1.0 2.0 3.0; 4.0 5.0 6.0 ; 7.0 8.0 9.0], ColumnNorm())) == 2 -end - -end # module TestQR diff --git a/stdlib/LinearAlgebra/test/runtests.jl b/stdlib/LinearAlgebra/test/runtests.jl deleted file mode 100644 index d64da9899ca86..0000000000000 --- a/stdlib/LinearAlgebra/test/runtests.jl +++ /dev/null @@ -1,10 +0,0 @@ -# This file is a part of Julia. License is MIT: https://julialang.org/license -using Test, LinearAlgebra - -for file in readlines(joinpath(@__DIR__, "testgroups")) - include(file * ".jl") -end - -@testset "Docstrings" begin - @test isempty(Docs.undocumented_names(LinearAlgebra)) -end diff --git a/stdlib/LinearAlgebra/test/schur.jl b/stdlib/LinearAlgebra/test/schur.jl deleted file mode 100644 index f3d494fba7942..0000000000000 --- a/stdlib/LinearAlgebra/test/schur.jl +++ /dev/null @@ -1,221 +0,0 @@ -# This file is a part of Julia. License is MIT: https://julialang.org/license - -module TestSchur - -using Test, LinearAlgebra, Random -using LinearAlgebra: BlasComplex, BlasFloat, BlasReal, QRPivoted - -n = 10 - -# Split n into 2 parts for tests needing two matrices -n1 = div(n, 2) -n2 = 2*n1 - -Random.seed!(1234321) - -areal = randn(n,n)/2 -aimg = randn(n,n)/2 - -@testset for eltya in (Float32, Float64, ComplexF32, ComplexF64, Int) - a = eltya == Int ? rand(1:7, n, n) : convert(Matrix{eltya}, eltya <: Complex ? complex.(areal, aimg) : areal) - asym = a' + a # symmetric indefinite - apd = a' * a # symmetric positive-definite - for (a, asym, apd) in ((a, asym, apd), - (view(a, 1:n, 1:n), - view(asym, 1:n, 1:n), - view(apd, 1:n, 1:n))) - ε = εa = eps(abs(float(one(eltya)))) - - d,v = eigen(a) - f = schur(a) - @test f.vectors*f.Schur*f.vectors' ≈ a - @test sort(real(f.values)) ≈ sort(real(d)) - @test sort(imag(f.values)) ≈ sort(imag(d)) - @test istriu(f.Schur) || eltype(a)<:Real - @test convert(Array, f) ≈ a - @test_throws FieldError f.A - - sch, vecs, vals = schur(UpperTriangular(triu(a))) - @test vecs*sch*vecs' ≈ triu(a) - sch, vecs, vals = schur(UnitUpperTriangular(triu(a))) - @test vecs*sch*vecs' ≈ UnitUpperTriangular(triu(a)) - sch, vecs, vals = schur(LowerTriangular(tril(a))) - @test vecs*sch*vecs' ≈ tril(a) - sch, vecs, vals = schur(UnitLowerTriangular(tril(a))) - @test vecs*sch*vecs' ≈ UnitLowerTriangular(tril(a)) - sch, vecs, vals = schur(Hermitian(asym)) - @test vecs*sch*vecs' ≈ asym - sch, vecs, vals = schur(Symmetric(a + transpose(a))) - @test vecs*sch*vecs' ≈ a + transpose(a) - sch, vecs, vals = schur(Tridiagonal(a + transpose(a))) - @test vecs*sch*vecs' ≈ Tridiagonal(a + transpose(a)) - sch, vecs, vals = schur(Bidiagonal(a, :U)) - @test vecs*sch*vecs' ≈ Bidiagonal(a, :U) - sch, vecs, vals = schur(Bidiagonal(a, :L)) - @test vecs*sch*vecs' ≈ Bidiagonal(a, :L) - - tstring = sprint((t, s) -> show(t, "text/plain", s), f.T) - zstring = sprint((t, s) -> show(t, "text/plain", s), f.Z) - vstring = sprint((t, s) -> show(t, "text/plain", s), f.values) - fstring = sprint((t, s) -> show(t, "text/plain", s), f) - @test fstring == "$(summary(f))\nT factor:\n$tstring\nZ factor:\n$(zstring)\neigenvalues:\n$vstring" - @testset "Reorder Schur" begin - # use asym for real schur to enforce tridiag structure - # avoiding partly selection of conj. eigenvalues - ordschura = eltya <: Complex ? a : asym - S = schur(ordschura) - select = bitrand(n) - O = ordschur(S, select) - sum(select) != 0 && @test S.values[findall(select)] ≈ O.values[1:sum(select)] - @test O.vectors*O.Schur*O.vectors' ≈ ordschura - @test_throws FieldError f.A - Snew = LinearAlgebra.Schur(S.T, S.Z, S.values) - SchurNew = ordschur!(copy(Snew), select) - @test O.vectors ≈ SchurNew.vectors - @test O.Schur ≈ SchurNew.Schur - end - - if isa(a, Array) - a1_sf = a[1:n1, 1:n1] - a2_sf = a[n1+1:n2, n1+1:n2] - else - a1_sf = view(a, 1:n1, 1:n1) - a2_sf = view(a, n1+1:n2, n1+1:n2) - end - @testset "Generalized Schur" begin - f = schur(a1_sf, a2_sf) - @test f.Q*f.S*f.Z' ≈ a1_sf - @test f.Q*f.T*f.Z' ≈ a2_sf - @test istriu(f.S) || eltype(a)<:Real - @test istriu(f.T) || eltype(a)<:Real - @test_throws FieldError f.A - - sstring = sprint((t, s) -> show(t, "text/plain", s), f.S) - tstring = sprint((t, s) -> show(t, "text/plain", s), f.T) - qstring = sprint((t, s) -> show(t, "text/plain", s), f.Q) - zstring = sprint((t, s) -> show(t, "text/plain", s), f.Z) - αstring = sprint((t, s) -> show(t, "text/plain", s), f.α) - βstring = sprint((t, s) -> show(t, "text/plain", s), f.β) - fstring = sprint((t, s) -> show(t, "text/plain", s), f) - @test fstring == "$(summary(f))\nS factor:\n$sstring\nT factor:\n$(tstring)\nQ factor:\n$(qstring)\nZ factor:\n$(zstring)\nα:\n$αstring\nβ:\n$βstring" - end - @testset "Reorder Generalized Schur" begin - NS = schur(a1_sf, a2_sf) - # Currently just testing with selecting gen eig values < 1 - select = abs2.(NS.values) .< 1 - m = sum(select) - S = ordschur(NS, select) - # Make sure that the new factorization still factors matrix - @test S.Q*S.S*S.Z' ≈ a1_sf - @test S.Q*S.T*S.Z' ≈ a2_sf - # Make sure that we have sorted it correctly - @test NS.values[findall(select)] ≈ S.values[1:m] - - Snew = LinearAlgebra.GeneralizedSchur(NS.S, NS.T, NS.alpha, NS.beta, NS.Q, NS.Z) - SchurNew = ordschur!(copy(Snew), select) - @test S.Q ≈ SchurNew.Q - @test S.S ≈ SchurNew.S - @test S.T ≈ SchurNew.T - @test S.Z ≈ SchurNew.Z - @test S.alpha ≈ SchurNew.alpha - @test S.beta ≈ SchurNew.beta - sS,sT,sQ,sZ = schur(a1_sf,a2_sf) - @test NS.Q ≈ sQ - @test NS.T ≈ sT - @test NS.S ≈ sS - @test NS.Z ≈ sZ - end - end - @testset "0x0 matrix" for A in (zeros(eltya, 0, 0), view(rand(eltya, 2, 2), 1:0, 1:0)) - T, Z, λ = LinearAlgebra.schur(A) - @test T == A - @test Z == A - @test λ == zeros(0) - end - - if eltya <: Real - @testset "quasitriangular to triangular" begin - S = schur(a) - SC = Schur{Complex}(S) - @test eltype(SC) == complex(eltype(S)) - @test istriu(SC.T) - @test SC.Z*SC.Z' ≈ I - @test SC.Z*SC.T*SC.Z' ≈ a - @test sort(SC.values,by=LinearAlgebra.eigsortby) ≈ sort(S.values,by=LinearAlgebra.eigsortby) - @test Schur{Complex}(SC) === SC === Schur{eltype(SC)}(SC) - @test Schur{eltype(S)}(S) === S - if eltype(S) === Float32 - S64 = Schur{Float64}(S) - @test eltype(S64) == Float64 - @test S64.Z == S.Z - @test S64.T == S.T - @test S64.values == S.values - end - end - end - - @testset "0x0 $eltya matrices" begin - A = zeros(eltya, 0, 0) - B = zeros(eltya, 0, 0) - S = LinearAlgebra.schur(A, B) - @test S.S == A - @test S.T == A - @test S.Q == A - @test S.Z == A - @test S.alpha == zeros(0) - @test S.beta == zeros(0) - end -end - -@testset "Generalized Schur convergence" begin - # Check for convergence issues, #40279 - problematic_pencils = [ - ( ComplexF64[0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0; 3.7796350217469814 -3.3125635598133054 0.0 0.0 0.0 0.0 0.0 0.0 6.418270043493963 -6.625127119626611 0.0 0.0 0.0 0.0 0.0 -1.0; -3.312563559813306 3.779635021746982 0.0 0.0 0.0 0.0 0.0 0.0 -6.625127119626612 6.418270043493964 -1.0 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 3.7796350217469814 0.0 0.0 -3.3125635598133054 0.0 0.0 0.0 -1.0 6.418270043493963 0.0 0.0 -6.625127119626611 0.0 0.0; 0.0 0.0 0.0 3.779635021746982 -3.312563559813306 0.0 0.0 0.0 0.0 0.0 0.0 6.418270043493964 -6.625127119626612 0.0 -1.0 0.0; 0.0 0.0 0.0 -3.3125635598133054 3.7796350217469814 0.0 0.0 0.0 0.0 0.0 0.0 -6.625127119626611 6.418270043493963 -1.0 0.0 0.0; 0.0 0.0 -3.312563559813306 0.0 0.0 3.779635021746982 0.0 0.0 0.0 0.0 -6.625127119626612 0.0 -1.0 6.418270043493964 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 3.7796350217469814 -3.3125635598133054 0.0 0.0 0.0 -1.0 0.0 0.0 6.418270043493963 -6.625127119626611; 0.0 0.0 0.0 0.0 0.0 0.0 -3.312563559813306 3.779635021746982 -1.0 0.0 0.0 0.0 0.0 0.0 -6.625127119626612 6.418270043493964], - ComplexF64[1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0; 0.0 1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 -3.7796350217469814 3.312563559813306 0.0 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 3.3125635598133054 -3.779635021746982 0.0 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 -3.7796350217469814 0.0 0.0 3.312563559813306 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 -3.779635021746982 3.3125635598133054 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 3.312563559813306 -3.7796350217469814 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 3.3125635598133054 0.0 0.0 -3.779635021746982 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 -3.7796350217469814 3.312563559813306; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 3.3125635598133054 -3.779635021746982] - ), - ( ComplexF64[0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0; 0.0 0.0 0.0 0.0 0.0 0.0 -1.0 0.0 0.0 0.0 0.0 0.0 -2.62 0.0 0.0 0.0 0.0 0.0 -1.0 -1.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 -2.62 0.0 0.0 0.0 0.0 0.0 -1.0 -1.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 -1.0 0.0 0.0 0.0 0.0 0.0 -2.62 0.0 0.0 0.0 0.0 0.0 -1.0 -1.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 -2.62 0.0 0.0 0.0 0.0 0.0 -1.0 -1.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 -1.0 0.0 0.0 0.0 0.0 0.0 -2.62 0.0 0.0 0.0 0.0 0.0 -1.0 -1.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 -2.62 -1.0 0.0 0.0 0.0 0.0 -1.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 -1.0 0.0 0.0 0.0 0.0 -1.0 -2.62 0.0 0.0 0.0 0.0 0.0; 0.0 -1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 -1.0 -1.0 0.0 0.0 0.0 0.0 0.0 -2.62 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 -1.0 -1.0 0.0 0.0 0.0 0.0 0.0 -2.62 0.0 0.0 0.0; 0.0 0.0 0.0 -1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 -1.0 -1.0 0.0 0.0 0.0 0.0 0.0 -2.62 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 -1.0 -1.0 0.0 0.0 0.0 0.0 0.0 -2.62 0.0; 0.0 0.0 0.0 0.0 0.0 -1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 -1.0 -1.0 0.0 0.0 0.0 0.0 0.0 -2.62], - ComplexF64[1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0; 0.0 1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0] - ), - ( ComplexF64[0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0; 0.33748484079831426 -0.10323794456968927 0.0 0.0 0.0 0.0 0.0 0.0 -2.5940303184033713 -0.20647588913937853 0.0 0.0 0.0 0.0 0.0 -1.0; -0.10323794456968927 0.3374848407983142 0.0 0.0 0.0 0.0 0.0 0.0 -0.20647588913937853 -2.5940303184033713 -1.0 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.33748484079831426 0.0 0.0 -0.10323794456968927 0.0 0.0 0.0 -1.0 -2.5940303184033713 0.0 0.0 -0.20647588913937853 0.0 0.0; 0.0 0.0 0.0 0.3374848407983142 -0.10323794456968927 0.0 0.0 0.0 0.0 0.0 0.0 -2.5940303184033713 -0.20647588913937853 0.0 -1.0 0.0; 0.0 0.0 0.0 -0.10323794456968927 0.33748484079831426 0.0 0.0 0.0 0.0 0.0 0.0 -0.20647588913937853 -2.5940303184033713 -1.0 0.0 0.0; 0.0 0.0 -0.10323794456968927 0.0 0.0 0.3374848407983142 0.0 0.0 0.0 0.0 -0.20647588913937853 0.0 -1.0 -2.5940303184033713 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.33748484079831426 -0.10323794456968927 0.0 0.0 0.0 -1.0 0.0 0.0 -2.5940303184033713 -0.20647588913937853; 0.0 0.0 0.0 0.0 0.0 0.0 -0.10323794456968927 0.3374848407983142 -1.0 0.0 0.0 0.0 0.0 0.0 -0.20647588913937853 -2.5940303184033713], - ComplexF64[1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0; 0.0 1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 -0.33748484079831426 0.10323794456968927 0.0 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.10323794456968927 -0.3374848407983142 0.0 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 -0.33748484079831426 0.0 0.0 0.10323794456968927 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 -0.3374848407983142 0.10323794456968927 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.10323794456968927 -0.33748484079831426 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.10323794456968927 0.0 0.0 -0.3374848407983142 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 -0.33748484079831426 0.10323794456968927; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.10323794456968927 -0.3374848407983142] - ), - ( ComplexF64[0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0; 1.7391668762048442 -1.309613611600033 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 2.150333752409688 -2.619227223200066 0.0 -1.0 0.0 0.0 0.0 0.0 0.0 0.0; -1.3096136116000332 1.739166876204844 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 -2.6192272232000664 2.150333752409688 -1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 1.739166876204844 0.0 0.0 -1.3096136116000332 0.0 0.0 0.0 0.0 0.0 -1.0 2.150333752409688 0.0 0.0 -2.6192272232000664 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 1.739166876204844 0.0 0.0 0.0 0.0 -1.3096136116000332 0.0 -1.0 0.0 0.0 2.150333752409688 0.0 0.0 0.0 0.0 -2.6192272232000664 0.0; 0.0 0.0 0.0 0.0 1.7391668762048442 0.0 0.0 0.0 0.0 -1.309613611600033 0.0 0.0 0.0 0.0 2.150333752409688 -1.0 0.0 0.0 0.0 -2.619227223200066; 0.0 0.0 -1.309613611600033 0.0 0.0 1.7391668762048442 0.0 0.0 0.0 0.0 0.0 0.0 -2.619227223200066 0.0 -1.0 2.150333752409688 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 1.739166876204844 -1.3096136116000332 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 2.150333752409688 -2.6192272232000664 0.0 -1.0; 0.0 0.0 0.0 0.0 0.0 0.0 -1.309613611600033 1.7391668762048442 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 -2.619227223200066 2.150333752409688 -1.0 0.0; 0.0 0.0 0.0 -1.309613611600033 0.0 0.0 0.0 0.0 1.7391668762048442 0.0 0.0 0.0 0.0 -2.619227223200066 0.0 0.0 0.0 -1.0 2.150333752409688 0.0; 0.0 0.0 0.0 0.0 -1.3096136116000332 0.0 0.0 0.0 0.0 1.739166876204844 0.0 0.0 0.0 0.0 -2.6192272232000664 0.0 -1.0 0.0 0.0 2.150333752409688], - ComplexF64[1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0; 0.0 1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 -1.7391668762048442 1.3096136116000332 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.309613611600033 -1.739166876204844 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 -1.739166876204844 0.0 0.0 1.309613611600033 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 -1.739166876204844 0.0 0.0 0.0 0.0 1.309613611600033 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 -1.7391668762048442 0.0 0.0 0.0 0.0 1.3096136116000332; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.3096136116000332 0.0 0.0 -1.7391668762048442 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 -1.739166876204844 1.309613611600033 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.3096136116000332 -1.7391668762048442 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.3096136116000332 0.0 0.0 0.0 0.0 -1.7391668762048442 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.309613611600033 0.0 0.0 0.0 0.0 -1.739166876204844] - ), - ( ComplexF64[0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0; 6.490384615384624 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 -6.009615384615394 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 11.90076923076925 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 -12.019230769230788 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 -1.0000000000000007; 0.0 6.490384615384624 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 -6.009615384615394 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 11.90076923076925 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 -1.0000000000000007 -12.019230769230788 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 6.490384615384624 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 -6.009615384615394 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 11.90076923076925 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 -1.0000000000000007 -12.019230769230788 0.0 0.0 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 6.490384615384624 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 -6.009615384615392 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 11.900769230769246 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 -1.0000000000000007 -12.019230769230784 0.0 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 6.490384615384624 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 -6.009615384615394 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 11.90076923076925 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 -1.0000000000000007 -12.019230769230788 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 6.490384615384624 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 -6.009615384615394 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 11.90076923076925 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 -1.0000000000000007 -12.019230769230788 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 6.490384615384624 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 -6.009615384615394 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 11.90076923076925 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 -1.0000000000000007 -12.019230769230788 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 6.490384615384624 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 -6.009615384615392 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 11.900769230769246 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 -1.0000000000000007 -12.019230769230784 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 6.490384615384624 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 -6.009615384615394 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 11.90076923076925 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 -1.0000000000000007 -12.019230769230788 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 6.490384615384624 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 -6.009615384615394 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 11.90076923076925 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 -1.0000000000000007 -12.019230769230788; -6.009615384615392 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 6.490384615384624 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 -12.019230769230784 -1.0000000000000007 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 11.900769230769248 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0; 0.0 -6.009615384615392 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 6.490384615384624 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 -12.019230769230784 -1.0000000000000007 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 11.900769230769248 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 -6.009615384615392 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 6.490384615384624 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 -12.019230769230784 -1.0000000000000007 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 11.900769230769248 0.0 0.0 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 -6.009615384615393 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 6.490384615384622 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 -12.019230769230784 -1.0000000000000007 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 11.900769230769244 0.0 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 -6.009615384615392 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 6.490384615384624 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 -12.019230769230784 -1.0000000000000007 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 11.900769230769248 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 -6.009615384615392 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 6.490384615384624 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 -12.019230769230784 -1.0000000000000007 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 11.900769230769248 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 -6.009615384615392 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 6.490384615384624 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 -12.019230769230784 -1.0000000000000007 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 11.900769230769248 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 -6.009615384615393 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 6.490384615384622 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 -12.019230769230784 -1.0000000000000007 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 11.900769230769244 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 -6.009615384615392 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 6.490384615384624 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 -12.019230769230784 -1.0000000000000007 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 11.900769230769248 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 -6.009615384615392 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 6.490384615384624 -1.0000000000000007 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 -12.019230769230784 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 11.900769230769248], - ComplexF64[1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0; 0.0 1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 -6.490384615384624 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 6.009615384615392 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 -6.490384615384624 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 6.009615384615392 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 -6.490384615384624 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 6.009615384615392 0.0 0.0 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 -6.490384615384624 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 6.009615384615393 0.0 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 -6.490384615384624 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 6.009615384615392 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 -6.490384615384624 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 6.009615384615392 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 -6.490384615384624 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 6.009615384615392 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 -6.490384615384624 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 6.009615384615393 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 -6.490384615384624 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 6.009615384615392 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 -6.490384615384624 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 6.009615384615392; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 6.009615384615394 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 -6.490384615384624 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 6.009615384615394 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 -6.490384615384624 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 6.009615384615394 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 -6.490384615384624 0.0 0.0 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 6.009615384615392 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 -6.490384615384622 0.0 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 6.009615384615394 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 -6.490384615384624 0.0 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 6.009615384615394 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 -6.490384615384624 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 6.009615384615394 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 -6.490384615384624 0.0 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 6.009615384615392 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 -6.490384615384622 0.0 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 6.009615384615394 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 -6.490384615384624 0.0; 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 6.009615384615394 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 -6.490384615384624] - )] - - for (A, B) in problematic_pencils - f = schur(A, B) - @test f.Q*f.S*f.Z' ≈ A - @test f.Q*f.T*f.Z' ≈ B - end -end - -@testset "adjoint and transpose for schur (#40941)" begin - A = rand(3, 3) - B = schur(A', A) - C = B.left*B.S*B.right' - D = schur(transpose(A), A) - E = D.left*D.S*D.right' - @test A' ≈ C ≈ E -end - -@testset "UpperHessenberg schur" begin - A = UpperHessenberg(rand(ComplexF64, 100, 100)) - B = Array(A) - fact1 = schur(A) - fact2 = schur(B) - @test fact1.values ≈ fact2.values - @test fact1.Z * fact1.T * fact1.Z' ≈ B - - A = UpperHessenberg(rand(Int32, 50, 50)) - B = Array(A) - fact1 = schur(A) - fact2 = schur(B) - @test fact1.values ≈ fact2.values - @test fact1.Z * fact1.T * fact1.Z' ≈ B -end - -end # module TestSchur diff --git a/stdlib/LinearAlgebra/test/special.jl b/stdlib/LinearAlgebra/test/special.jl deleted file mode 100644 index 4b91bcfc1a4d5..0000000000000 --- a/stdlib/LinearAlgebra/test/special.jl +++ /dev/null @@ -1,862 +0,0 @@ -# This file is a part of Julia. License is MIT: https://julialang.org/license - -module TestSpecial - -using Test, LinearAlgebra, Random -using LinearAlgebra: rmul!, BandIndex - -const BASE_TEST_PATH = joinpath(Sys.BINDIR, "..", "share", "julia", "test") -isdefined(Main, :SizedArrays) || @eval Main include(joinpath($(BASE_TEST_PATH), "testhelpers", "SizedArrays.jl")) -using .Main.SizedArrays - -n= 10 #Size of matrix to test -Random.seed!(1) - -@testset "Interconversion between special matrix types" begin - a = [1.0:n;] - A = Diagonal(a) - @testset for newtype in [Diagonal, Bidiagonal, SymTridiagonal, Tridiagonal, Matrix] - @test Matrix(convert(newtype, A)) == Matrix(A) - @test Matrix(convert(newtype, Diagonal(GenericArray(a)))) == Matrix(A) - end - - @testset for isupper in (true, false) - A = Bidiagonal(a, [1.0:n-1;], ifelse(isupper, :U, :L)) - for newtype in [Bidiagonal, Tridiagonal, Matrix] - @test Matrix(convert(newtype, A)) == Matrix(A) - @test Matrix(newtype(A)) == Matrix(A) - end - @test_throws ArgumentError convert(SymTridiagonal, A) - tritype = isupper ? UpperTriangular : LowerTriangular - @test Matrix(tritype(A)) == Matrix(A) - - A = Bidiagonal(a, zeros(n-1), ifelse(isupper, :U, :L)) #morally Diagonal - for newtype in [Diagonal, Bidiagonal, SymTridiagonal, Tridiagonal, Matrix] - @test Matrix(convert(newtype, A)) == Matrix(A) - @test Matrix(newtype(A)) == Matrix(A) - end - @test Matrix(tritype(A)) == Matrix(A) - end - - A = SymTridiagonal(a, [1.0:n-1;]) - for newtype in [Tridiagonal, Matrix] - @test Matrix(convert(newtype, A)) == Matrix(A) - end - for newtype in [Diagonal, Bidiagonal] - @test_throws ArgumentError convert(newtype,A) - end - A = SymTridiagonal(a, zeros(n-1)) - @test Matrix(convert(Bidiagonal,A)) == Matrix(A) - - A = Tridiagonal(zeros(n-1), [1.0:n;], zeros(n-1)) #morally Diagonal - for newtype in [Diagonal, Bidiagonal, SymTridiagonal, Matrix] - @test Matrix(convert(newtype, A)) == Matrix(A) - end - A = Tridiagonal(fill(1., n-1), [1.0:n;], fill(1., n-1)) #not morally Diagonal - for newtype in [SymTridiagonal, Matrix] - @test Matrix(convert(newtype, A)) == Matrix(A) - end - for newtype in [Diagonal, Bidiagonal] - @test_throws ArgumentError convert(newtype,A) - end - A = Tridiagonal(zeros(n-1), [1.0:n;], fill(1., n-1)) #not morally Diagonal - @test Matrix(convert(Bidiagonal, A)) == Matrix(A) - A = UpperTriangular(Tridiagonal(zeros(n-1), [1.0:n;], fill(1., n-1))) - @test Matrix(convert(Bidiagonal, A)) == Matrix(A) - A = Tridiagonal(fill(1., n-1), [1.0:n;], zeros(n-1)) #not morally Diagonal - @test Matrix(convert(Bidiagonal, A)) == Matrix(A) - A = LowerTriangular(Tridiagonal(fill(1., n-1), [1.0:n;], zeros(n-1))) - @test Matrix(convert(Bidiagonal, A)) == Matrix(A) - @test_throws ArgumentError convert(SymTridiagonal,A) - - A = LowerTriangular(Matrix(Diagonal(a))) #morally Diagonal - for newtype in [Diagonal, Bidiagonal, SymTridiagonal, LowerTriangular, Matrix] - @test Matrix(convert(newtype, A)) == Matrix(A) - end - A = UpperTriangular(Matrix(Diagonal(a))) #morally Diagonal - for newtype in [Diagonal, Bidiagonal, SymTridiagonal, UpperTriangular, Matrix] - @test Matrix(convert(newtype, A)) == Matrix(A) - end - A = UpperTriangular(triu(rand(n,n))) - for newtype in [Diagonal, Bidiagonal, Tridiagonal, SymTridiagonal] - @test_throws ArgumentError convert(newtype,A) - end - - - # test operations/constructors (not conversions) permitted in the docs - dl = [1., 1.] - d = [-2., -2., -2.] - T = Tridiagonal(dl, d, -dl) - S = SymTridiagonal(d, dl) - Bu = Bidiagonal(d, dl, :U) - Bl = Bidiagonal(d, dl, :L) - D = Diagonal(d) - M = [-2. 0. 0.; 1. -2. 0.; -1. 1. -2.] - U = UpperTriangular(M) - L = LowerTriangular(Matrix(M')) - - for A in (T, S, Bu, Bl, D, U, L, M) - Adense = Matrix(A) - B = Symmetric(A) - Bdense = Matrix(B) - for (C,Cdense) in ((A,Adense), (B,Bdense)) - @test Diagonal(C) == Diagonal(Cdense) - @test Bidiagonal(C, :U) == Bidiagonal(Cdense, :U) - @test Bidiagonal(C, :L) == Bidiagonal(Cdense, :L) - @test Tridiagonal(C) == Tridiagonal(Cdense) - @test UpperTriangular(C) == UpperTriangular(Cdense) - @test LowerTriangular(C) == LowerTriangular(Cdense) - end - end - - @testset "Matrix constructor for !isa(zero(T), T)" begin - # the following models JuMP.jl's VariableRef and AffExpr, resp. - struct TypeWithoutZero end - struct TypeWithZero end - Base.promote_rule(::Type{TypeWithoutZero}, ::Type{TypeWithZero}) = TypeWithZero - Base.convert(::Type{TypeWithZero}, ::TypeWithoutZero) = TypeWithZero() - Base.zero(x::Union{TypeWithoutZero, TypeWithZero}) = zero(typeof(x)) - Base.zero(::Type{<:Union{TypeWithoutZero, TypeWithZero}}) = TypeWithZero() - LinearAlgebra.symmetric(::TypeWithoutZero, ::Symbol) = TypeWithoutZero() - LinearAlgebra.symmetric_type(::Type{TypeWithoutZero}) = TypeWithoutZero - Base.copy(A::TypeWithoutZero) = A - Base.transpose(::TypeWithoutZero) = TypeWithoutZero() - d = fill(TypeWithoutZero(), 3) - du = fill(TypeWithoutZero(), 2) - dl = fill(TypeWithoutZero(), 2) - D = Diagonal(d) - Bu = Bidiagonal(d, du, :U) - Bl = Bidiagonal(d, dl, :L) - Tri = Tridiagonal(dl, d, du) - Sym = SymTridiagonal(d, dl) - for M in (D, Bu, Bl, Tri, Sym) - @test Matrix(M) == zeros(TypeWithZero, 3, 3) - end - - mutable struct MTypeWithZero end - Base.convert(::Type{MTypeWithZero}, ::TypeWithoutZero) = MTypeWithZero() - Base.convert(::Type{MTypeWithZero}, ::TypeWithZero) = MTypeWithZero() - Base.zero(x::MTypeWithZero) = zero(typeof(x)) - Base.zero(::Type{MTypeWithZero}) = MTypeWithZero() - U = UpperTriangular(Symmetric(fill(TypeWithoutZero(), 2, 2))) - M = Matrix{MTypeWithZero}(U) - @test all(x -> x isa MTypeWithZero, M) - end -end - -@testset "Binary ops among special types" begin - a=[1.0:n;] - A=Diagonal(a) - Spectypes = [Diagonal, Bidiagonal, Tridiagonal, Matrix] - for (idx, type1) in enumerate(Spectypes) - for type2 in Spectypes - B = convert(type1,A) - C = convert(type2,A) - @test Matrix(B + C) ≈ Matrix(A + A) - @test Matrix(B - C) ≈ Matrix(A - A) - end - end - B = SymTridiagonal(a, fill(1., n-1)) - for Spectype in [Diagonal, Bidiagonal, Tridiagonal, Matrix] - @test Matrix(B + convert(Spectype,A)) ≈ Matrix(B + A) - @test Matrix(convert(Spectype,A) + B) ≈ Matrix(B + A) - @test Matrix(B - convert(Spectype,A)) ≈ Matrix(B - A) - @test Matrix(convert(Spectype,A) - B) ≈ Matrix(A - B) - end - - C = rand(n,n) - for TriType in [LinearAlgebra.UnitLowerTriangular, LinearAlgebra.UnitUpperTriangular, UpperTriangular, LowerTriangular] - D = TriType(C) - for Spectype in [Diagonal, Bidiagonal, Tridiagonal, Matrix] - @test Matrix(D + convert(Spectype,A)) ≈ Matrix(D + A) - @test Matrix(convert(Spectype,A) + D) ≈ Matrix(A + D) - @test Matrix(D - convert(Spectype,A)) ≈ Matrix(D - A) - @test Matrix(convert(Spectype,A) - D) ≈ Matrix(A - D) - end - end - - UpTri = UpperTriangular(rand(20,20)) - LoTri = LowerTriangular(rand(20,20)) - Diag = Diagonal(rand(20,20)) - Tridiag = Tridiagonal(rand(20, 20)) - UpBi = Bidiagonal(rand(20,20), :U) - LoBi = Bidiagonal(rand(20,20), :L) - Sym = SymTridiagonal(rand(20), rand(19)) - Dense = rand(20, 20) - mats = Any[UpTri, LoTri, Diag, Tridiag, UpBi, LoBi, Sym, Dense] - - for op in (+,-,*) - for A in mats - for B in mats - @test (op)(A, B) ≈ (op)(Matrix(A), Matrix(B)) ≈ Matrix((op)(A, B)) - end - end - end -end - -@testset "+ and - among structured matrices with different container types" begin - diag = 1:5 - offdiag = 1:4 - uniformscalingmats = [UniformScaling(3), UniformScaling(1.0), UniformScaling(3//5), UniformScaling(ComplexF64(1.3, 3.5))] - mats = Any[Diagonal(diag), Bidiagonal(diag, offdiag, 'U'), Bidiagonal(diag, offdiag, 'L'), Tridiagonal(offdiag, diag, offdiag), SymTridiagonal(diag, offdiag)] - for T in [ComplexF64, Int64, Rational{Int64}, Float64] - push!(mats, Diagonal(Vector{T}(diag))) - push!(mats, Bidiagonal(Vector{T}(diag), Vector{T}(offdiag), 'U')) - push!(mats, Bidiagonal(Vector{T}(diag), Vector{T}(offdiag), 'L')) - push!(mats, Tridiagonal(Vector{T}(offdiag), Vector{T}(diag), Vector{T}(offdiag))) - push!(mats, SymTridiagonal(Vector{T}(diag), Vector{T}(offdiag))) - end - - for op in (+,-,*) - for A in mats - for B in mats - @test (op)(A, B) ≈ (op)(Matrix(A), Matrix(B)) ≈ Matrix((op)(A, B)) - end - end - end - for op in (+,-) - for A in mats - for B in uniformscalingmats - @test (op)(A, B) ≈ (op)(Matrix(A), B) ≈ Matrix((op)(A, B)) - @test (op)(B, A) ≈ (op)(B, Matrix(A)) ≈ Matrix((op)(B, A)) - end - end - end - diag = [randn(ComplexF64, 2, 2) for _ in 1:3] - odiag = [randn(ComplexF64, 2, 2) for _ in 1:2] - for A in (Diagonal(diag), - Bidiagonal(diag, odiag, :U), - Bidiagonal(diag, odiag, :L), - Tridiagonal(odiag, diag, odiag), - SymTridiagonal(diag, odiag)), B in uniformscalingmats - @test (A + B)::typeof(A) == (B + A)::typeof(A) - @test (A - B)::typeof(A) == ((A + (-B))::typeof(A)) - @test (B - A)::typeof(A) == ((B + (-A))::typeof(A)) - end -end - - -@testset "Triangular Types and QR" begin - for typ in (UpperTriangular, LowerTriangular, UnitUpperTriangular, UnitLowerTriangular) - a = rand(n,n) - atri = typ(a) - matri = Matrix(atri) - b = rand(n,n) - for pivot in (ColumnNorm(), NoPivot()) - qrb = qr(b, pivot) - @test atri * qrb.Q ≈ matri * qrb.Q - @test atri * qrb.Q' ≈ matri * qrb.Q' - @test qrb.Q * atri ≈ qrb.Q * matri - @test qrb.Q' * atri ≈ qrb.Q' * matri - end - end -end - -@testset "Multiplication of Qs" begin - for pivot in (ColumnNorm(), NoPivot()), A in (rand(5, 3), rand(5, 5), rand(3, 5)) - Q = qr(A, pivot).Q - m = size(A, 1) - C = Matrix{Float64}(undef, (m, m)) - @test Q*Q ≈ (Q*I) * (Q*I) ≈ mul!(C, Q, Q) - @test size(Q*Q) == (m, m) - @test Q'Q ≈ (Q'*I) * (Q*I) ≈ mul!(C, Q', Q) - @test size(Q'Q) == (m, m) - @test Q*Q' ≈ (Q*I) * (Q'*I) ≈ mul!(C, Q, Q') - @test size(Q*Q') == (m, m) - @test Q'Q' ≈ (Q'*I) * (Q'*I) ≈ mul!(C, Q', Q') - @test size(Q'Q') == (m, m) - end -end - -@testset "concatenations of combinations of special and other matrix types" begin - N = 4 - # Test concatenating pairwise combinations of special matrices - diagmat = Diagonal(1:N) - bidiagmat = Bidiagonal(1:N, 1:(N-1), :U) - tridiagmat = Tridiagonal(1:(N-1), 1:N, 1:(N-1)) - symtridiagmat = SymTridiagonal(1:N, 1:(N-1)) - abstractq = qr(tridiagmat).Q - specialmats = (diagmat, bidiagmat, tridiagmat, symtridiagmat, abstractq, zeros(Int,N,N)) - for specialmata in specialmats, specialmatb in specialmats - MA = collect(specialmata); MB = collect(specialmatb) - @test hcat(specialmata, specialmatb) == hcat(MA, MB) - @test vcat(specialmata, specialmatb) == vcat(MA, MB) - @test hvcat((1,1), specialmata, specialmatb) == hvcat((1,1), MA, MB) - @test cat(specialmata, specialmatb; dims=(1,2)) == cat(MA, MB; dims=(1,2)) - end - # Test concatenating pairwise combinations of special matrices with dense matrices or dense vectors - densevec = fill(1., N) - densemat = diagm(0 => densevec) - for specialmat in specialmats - SM = Matrix(specialmat) - # --> Tests applicable only to pairs of matrices - @test vcat(specialmat, densemat) == vcat(SM, densemat) - @test vcat(densemat, specialmat) == vcat(densemat, SM) - # --> Tests applicable also to pairs including vectors - for specialmat in specialmats, othermatorvec in (densemat, densevec) - SM = Matrix(specialmat); OM = Array(othermatorvec) - @test hcat(specialmat, othermatorvec) == hcat(SM, OM) - @test hcat(othermatorvec, specialmat) == hcat(OM, SM) - @test hvcat((2,), specialmat, othermatorvec) == hvcat((2,), SM, OM) - @test hvcat((2,), othermatorvec, specialmat) == hvcat((2,), OM, SM) - @test cat(specialmat, othermatorvec; dims=(1,2)) == cat(SM, OM; dims=(1,2)) - @test cat(othermatorvec, specialmat; dims=(1,2)) == cat(OM, SM; dims=(1,2)) - end - end -end - -@testset "concatenations of annotated types" begin - N = 4 - # The tested annotation types - testfull = Base.get_bool_env("JULIA_TESTFULL", false) - utriannotations = (UpperTriangular, UnitUpperTriangular) - ltriannotations = (LowerTriangular, UnitLowerTriangular) - triannotations = (utriannotations..., ltriannotations...) - symannotations = (Symmetric, Hermitian) - annotations = testfull ? (triannotations..., symannotations...) : (LowerTriangular, Symmetric) - # Concatenations involving these types, un/annotated - diagmat = Diagonal(1:N) - bidiagmat = Bidiagonal(1:N, 1:(N-1), :U) - tridiagmat = Tridiagonal(1:(N-1), 1:N, 1:(N-1)) - symtridiagmat = SymTridiagonal(1:N, 1:(N-1)) - specialconcatmats = testfull ? (diagmat, bidiagmat, tridiagmat, symtridiagmat) : (diagmat,) - # Concatenations involving strictly these types, un/annotated - densevec = fill(1., N) - densemat = fill(1., N, N) - # Annotated collections - annodmats = [annot(densemat) for annot in annotations] - annospcmats = [annot(spcmat) for annot in annotations, spcmat in specialconcatmats] - # Test concatenations of pairwise combinations of annotated special matrices - for annospcmata in annospcmats, annospcmatb in annospcmats - AM = Array(annospcmata); BM = Array(annospcmatb) - @test vcat(annospcmata, annospcmatb) == vcat(AM, BM) - @test hcat(annospcmata, annospcmatb) == hcat(AM, BM) - @test hvcat((2,), annospcmata, annospcmatb) == hvcat((2,), AM, BM) - @test cat(annospcmata, annospcmatb; dims=(1,2)) == cat(AM, BM; dims=(1,2)) - end - # Test concatenations of pairwise combinations of annotated special matrices and other matrix/vector types - for annospcmat in annospcmats - AM = Array(annospcmat) - # --> Tests applicable to pairs including only matrices - for othermat in (densemat, annodmats..., specialconcatmats...) - OM = Array(othermat) - @test vcat(annospcmat, othermat) == vcat(AM, OM) - @test vcat(othermat, annospcmat) == vcat(OM, AM) - end - # --> Tests applicable to pairs including other vectors or matrices - for other in (densevec, densemat, annodmats..., specialconcatmats...) - OM = Array(other) - @test hcat(annospcmat, other) == hcat(AM, OM) - @test hcat(other, annospcmat) == hcat(OM, AM) - @test hvcat((2,), annospcmat, other) == hvcat((2,), AM, OM) - @test hvcat((2,), other, annospcmat) == hvcat((2,), OM, AM) - @test cat(annospcmat, other; dims=(1,2)) == cat(AM, OM; dims=(1,2)) - @test cat(other, annospcmat; dims=(1,2)) == cat(OM, AM; dims=(1,2)) - end - end - # Test concatenations strictly involving un/annotated dense matrices/vectors - for densemata in (densemat, annodmats...) - AM = Array(densemata) - # --> Tests applicable to pairs including only matrices - for densematb in (densemat, annodmats...) - BM = Array(densematb) - @test vcat(densemata, densematb) == vcat(AM, BM) - @test vcat(densematb, densemata) == vcat(BM, AM) - end - # --> Tests applicable to pairs including vectors or matrices - for otherdense in (densevec, densemat, annodmats...) - OM = Array(otherdense) - @test hcat(densemata, otherdense) == hcat(AM, OM) - @test hcat(otherdense, densemata) == hcat(OM, AM) - @test hvcat((2,), densemata, otherdense) == hvcat((2,), AM, OM) - @test hvcat((2,), otherdense, densemata) == hvcat((2,), OM, AM) - @test cat(densemata, otherdense; dims=(1,2)) == cat(AM, OM; dims=(1,2)) - @test cat(otherdense, densemata; dims=(1,2)) == cat(OM, AM; dims=(1,2)) - end - end -end - -# for testing types with a dimension -const BASE_TEST_PATH = joinpath(Sys.BINDIR, "..", "share", "julia", "test") -isdefined(Main, :Furlongs) || @eval Main include(joinpath($(BASE_TEST_PATH), "testhelpers", "Furlongs.jl")) -using .Main.Furlongs - -@testset "zero and one for structured matrices" begin - for elty in (Int64, Float64, ComplexF64) - D = Diagonal(rand(elty, 10)) - Bu = Bidiagonal(rand(elty, 10), rand(elty, 9), 'U') - Bl = Bidiagonal(rand(elty, 10), rand(elty, 9), 'L') - T = Tridiagonal(rand(elty, 9),rand(elty, 10), rand(elty, 9)) - S = SymTridiagonal(rand(elty, 10), rand(elty, 9)) - mats = Any[D, Bu, Bl, T, S] - for A in mats - @test iszero(zero(A)) - @test isone(one(A)) - @test zero(A) == zero(Matrix(A)) - @test one(A) == one(Matrix(A)) - end - - @test zero(D) isa Diagonal - @test one(D) isa Diagonal - - @test zero(Bu) isa Bidiagonal - @test one(Bu) isa Bidiagonal - @test zero(Bl) isa Bidiagonal - @test one(Bl) isa Bidiagonal - @test zero(Bu).uplo == one(Bu).uplo == Bu.uplo - @test zero(Bl).uplo == one(Bl).uplo == Bl.uplo - - @test zero(T) isa Tridiagonal - @test one(T) isa Tridiagonal - @test zero(S) isa SymTridiagonal - @test one(S) isa SymTridiagonal - end - - # ranges - D = Diagonal(1:10) - Bu = Bidiagonal(1:10, 1:9, 'U') - Bl = Bidiagonal(1:10, 1:9, 'L') - T = Tridiagonal(1:9, 1:10, 1:9) - S = SymTridiagonal(1:10, 1:9) - mats = [D, Bu, Bl, T, S] - for A in mats - @test iszero(zero(A)) - @test isone(one(A)) - @test zero(A) == zero(Matrix(A)) - @test one(A) == one(Matrix(A)) - end - - @test zero(D) isa Diagonal - @test one(D) isa Diagonal - - @test zero(Bu) isa Bidiagonal - @test one(Bu) isa Bidiagonal - @test zero(Bl) isa Bidiagonal - @test one(Bl) isa Bidiagonal - @test zero(Bu).uplo == one(Bu).uplo == Bu.uplo - @test zero(Bl).uplo == one(Bl).uplo == Bl.uplo - - @test zero(T) isa Tridiagonal - @test one(T) isa Tridiagonal - @test zero(S) isa SymTridiagonal - @test one(S) isa SymTridiagonal - - # eltype with dimensions - D0 = Diagonal{Furlong{0, Int64}}([1, 2, 3, 4]) - Bu0 = Bidiagonal{Furlong{0, Int64}}([1, 2, 3, 4], [1, 2, 3], 'U') - Bl0 = Bidiagonal{Furlong{0, Int64}}([1, 2, 3, 4], [1, 2, 3], 'L') - T0 = Tridiagonal{Furlong{0, Int64}}([1, 2, 3], [1, 2, 3, 4], [1, 2, 3]) - S0 = SymTridiagonal{Furlong{0, Int64}}([1, 2, 3, 4], [1, 2, 3]) - F2 = Furlongs.Furlong{2}(1) - D2 = Diagonal{Furlong{2, Int64}}([1, 2, 3, 4].*F2) - Bu2 = Bidiagonal{Furlong{2, Int64}}([1, 2, 3, 4].*F2, [1, 2, 3].*F2, 'U') - Bl2 = Bidiagonal{Furlong{2, Int64}}([1, 2, 3, 4].*F2, [1, 2, 3].*F2, 'L') - T2 = Tridiagonal{Furlong{2, Int64}}([1, 2, 3].*F2, [1, 2, 3, 4].*F2, [1, 2, 3].*F2) - S2 = SymTridiagonal{Furlong{2, Int64}}([1, 2, 3, 4].*F2, [1, 2, 3].*F2) - mats = Any[D0, Bu0, Bl0, T0, S0, D2, Bu2, Bl2, T2, S2] - for A in mats - @test iszero(zero(A)) - @test isone(one(A)) - @test zero(A) == zero(Matrix(A)) - @test one(A) == one(Matrix(A)) - @test eltype(one(A)) == typeof(one(eltype(A))) - end -end - -@testset "== for structured matrices" begin - diag = rand(10) - offdiag = rand(9) - D = Diagonal(rand(10)) - Bup = Bidiagonal(diag, offdiag, 'U') - Blo = Bidiagonal(diag, offdiag, 'L') - Bupd = Bidiagonal(diag, zeros(9), 'U') - Blod = Bidiagonal(diag, zeros(9), 'L') - T = Tridiagonal(offdiag, diag, offdiag) - Td = Tridiagonal(zeros(9), diag, zeros(9)) - Tu = Tridiagonal(zeros(9), diag, offdiag) - Tl = Tridiagonal(offdiag, diag, zeros(9)) - S = SymTridiagonal(diag, offdiag) - Sd = SymTridiagonal(diag, zeros(9)) - - mats = [D, Bup, Blo, Bupd, Blod, T, Td, Tu, Tl, S, Sd] - - for a in mats - for b in mats - @test (a == b) == (Matrix(a) == Matrix(b)) == (b == a) == (Matrix(b) == Matrix(a)) - end - end -end - -@testset "BiTriSym*Q' and Q'*BiTriSym" begin - dl = [1, 1, 1] - d = [1, 1, 1, 1] - D = Diagonal(d) - Bi = Bidiagonal(d, dl, :L) - Tri = Tridiagonal(dl, d, dl) - Sym = SymTridiagonal(d, dl) - F = qr(ones(4, 1)) - A = F.Q' - for A in (F.Q, F.Q'), B in (D, Bi, Tri, Sym) - @test B*A ≈ Matrix(B)*A - @test A*B ≈ A*Matrix(B) - end -end - -@testset "Ops on SymTridiagonal ev has the same length as dv" begin - x = rand(3) - y = rand(3) - z = rand(2) - - S = SymTridiagonal(x, y) - T = Tridiagonal(z, x, z) - Bu = Bidiagonal(x, z, :U) - Bl = Bidiagonal(x, z, :L) - - Ms = Matrix(S) - Mt = Matrix(T) - Mbu = Matrix(Bu) - Mbl = Matrix(Bl) - - @test S + T ≈ Ms + Mt - @test T + S ≈ Mt + Ms - @test S + Bu ≈ Ms + Mbu - @test Bu + S ≈ Mbu + Ms - @test S + Bl ≈ Ms + Mbl - @test Bl + S ≈ Mbl + Ms -end - -@testset "Ensure Strided * (Sym)Tridiagonal is Dense" begin - x = rand(3) - y = rand(3) - z = rand(2) - - l = rand(12, 12) - # strided but not a Matrix - v = @view l[1:4:end, 1:4:end] - M_v = Matrix(v) - m = rand(3, 3) - - S = SymTridiagonal(x, y) - T = Tridiagonal(z, x, z) - M_S = Matrix(S) - M_T = Matrix(T) - - @test m * T ≈ m * M_T - @test m * S ≈ m * M_S - @test v * T ≈ M_v * T - @test v * S ≈ M_v * S - - @test m * T isa Matrix - @test m * S isa Matrix - @test v * T isa Matrix - @test v * S isa Matrix -end - -@testset "copyto! between matrix types" begin - dl, d, du = zeros(Int,4), [1:5;], zeros(Int,4) - d_ones = ones(Int,size(du)) - - @testset "from Diagonal" begin - D = Diagonal(d) - @testset "to Bidiagonal" begin - BU = Bidiagonal(similar(d, BigInt), similar(du, BigInt), :U) - BL = Bidiagonal(similar(d, BigInt), similar(dl, BigInt), :L) - for B in (BL, BU) - copyto!(B, D) - @test B == D - end - - @testset "mismatched size" begin - for B in (BU, BL) - B .= 0 - copyto!(B, Diagonal(Int[1])) - @test B[1,1] == 1 - B[1,1] = 0 - @test iszero(B) - end - end - end - @testset "to Tridiagonal" begin - T = Tridiagonal(similar(dl, BigInt), similar(d, BigInt), similar(du, BigInt)) - copyto!(T, D) - @test T == D - - @testset "mismatched size" begin - T .= 0 - copyto!(T, Diagonal([1])) - @test T[1,1] == 1 - T[1,1] = 0 - @test iszero(T) - end - end - @testset "to SymTridiagonal" begin - for du2 in (similar(du, BigInt), similar(d, BigInt)) - S = SymTridiagonal(similar(d), du2) - copyto!(S, D) - @test S == D - end - - @testset "mismatched size" begin - S = SymTridiagonal(zero(d), zero(du)) - copyto!(S, Diagonal([1])) - @test S[1,1] == 1 - S[1,1] = 0 - @test iszero(S) - end - end - end - - @testset "from Bidiagonal" begin - BU = Bidiagonal(d, du, :U) - BUones = Bidiagonal(d, oneunit.(du), :U) - BL = Bidiagonal(d, dl, :L) - BLones = Bidiagonal(d, oneunit.(dl), :L) - @testset "to Diagonal" begin - D = Diagonal(zero(d)) - for B in (BL, BU) - @test copyto!(D, B) == B - D .= 0 - end - for B in (BLones, BUones) - errmsg = "cannot copy a Bidiagonal with a non-zero off-diagonal band to a Diagonal" - @test_throws errmsg copyto!(D, B) - @test iszero(D) - end - - @testset "mismatched size" begin - for uplo in (:L, :U) - D .= 0 - copyto!(D, Bidiagonal(Int[1], Int[], uplo)) - @test D[1,1] == 1 - D[1,1] = 0 - @test iszero(D) - end - end - end - @testset "to Tridiagonal" begin - T = Tridiagonal(similar(dl, BigInt), similar(d, BigInt), similar(du, BigInt)) - for B in (BL, BU, BLones, BUones) - copyto!(T, B) - @test T == B - end - - @testset "mismatched size" begin - T = Tridiagonal(oneunit.(dl), zero(d), oneunit.(du)) - for uplo in (:L, :U) - T .= 0 - copyto!(T, Bidiagonal([1], Int[], uplo)) - @test T[1,1] == 1 - T[1,1] = 0 - @test iszero(T) - end - end - end - @testset "to SymTridiagonal" begin - for du2 in (similar(du, BigInt), similar(d, BigInt)) - S = SymTridiagonal(similar(d, BigInt), du2) - for B in (BL, BU) - copyto!(S, B) - @test S == B - end - errmsg = "cannot copy a non-symmetric Bidiagonal matrix to a SymTridiagonal" - @test_throws errmsg copyto!(S, BUones) - @test_throws errmsg copyto!(S, BLones) - end - - @testset "mismatched size" begin - S = SymTridiagonal(zero(d), zero(du)) - for uplo in (:L, :U) - copyto!(S, Bidiagonal([1], Int[], uplo)) - @test S[1,1] == 1 - S[1,1] = 0 - @test iszero(S) - end - end - end - end - - @testset "from Tridiagonal" begin - T = Tridiagonal(dl, d, du) - TU = Tridiagonal(dl, d, d_ones) - TL = Tridiagonal(d_ones, d, dl) - @testset "to Diagonal" begin - D = Diagonal(zero(d)) - @test copyto!(D, T) == Diagonal(d) - errmsg = "cannot copy a Tridiagonal with a non-zero off-diagonal band to a Diagonal" - D .= 0 - @test_throws errmsg copyto!(D, TU) - @test iszero(D) - errmsg = "cannot copy a Tridiagonal with a non-zero off-diagonal band to a Diagonal" - @test_throws errmsg copyto!(D, TL) - @test iszero(D) - - @testset "mismatched size" begin - D .= 0 - copyto!(D, Tridiagonal(Int[], Int[1], Int[])) - @test D[1,1] == 1 - D[1,1] = 0 - @test iszero(D) - end - end - @testset "to Bidiagonal" begin - BU = Bidiagonal(zero(d), zero(du), :U) - BL = Bidiagonal(zero(d), zero(du), :L) - @test copyto!(BU, T) == Bidiagonal(d, du, :U) - @test copyto!(BL, T) == Bidiagonal(d, du, :L) - - BU .= 0 - BL .= 0 - errmsg = "cannot copy a Tridiagonal with a non-zero superdiagonal to a Bidiagonal with uplo=:L" - @test_throws errmsg copyto!(BL, TU) - @test iszero(BL) - @test copyto!(BU, TU) == Bidiagonal(d, d_ones, :U) - - BU .= 0 - BL .= 0 - @test copyto!(BL, TL) == Bidiagonal(d, d_ones, :L) - errmsg = "cannot copy a Tridiagonal with a non-zero subdiagonal to a Bidiagonal with uplo=:U" - @test_throws errmsg copyto!(BU, TL) - @test iszero(BU) - - @testset "mismatched size" begin - for B in (BU, BL) - B .= 0 - copyto!(B, Tridiagonal(Int[], Int[1], Int[])) - @test B[1,1] == 1 - B[1,1] = 0 - @test iszero(B) - end - end - end - end - - @testset "from SymTridiagonal" begin - S2 = SymTridiagonal(d, ones(Int,size(d))) - for S in (SymTridiagonal(d, du), SymTridiagonal(d, zero(d))) - @testset "to Diagonal" begin - D = Diagonal(zero(d)) - @test copyto!(D, S) == Diagonal(d) - D .= 0 - errmsg = "cannot copy a SymTridiagonal with a non-zero off-diagonal band to a Diagonal" - @test_throws errmsg copyto!(D, S2) - @test iszero(D) - - @testset "mismatched size" begin - D .= 0 - copyto!(D, SymTridiagonal(Int[1], Int[])) - @test D[1,1] == 1 - D[1,1] = 0 - @test iszero(D) - end - end - @testset "to Bidiagonal" begin - BU = Bidiagonal(zero(d), zero(du), :U) - BL = Bidiagonal(zero(d), zero(du), :L) - @test copyto!(BU, S) == Bidiagonal(d, du, :U) - @test copyto!(BL, S) == Bidiagonal(d, du, :L) - - BU .= 0 - BL .= 0 - errmsg = "cannot copy a SymTridiagonal with a non-zero off-diagonal band to a Bidiagonal" - @test_throws errmsg copyto!(BU, S2) - @test iszero(BU) - @test_throws errmsg copyto!(BL, S2) - @test iszero(BL) - - @testset "mismatched size" begin - for B in (BU, BL) - B .= 0 - copyto!(B, SymTridiagonal(Int[1], Int[])) - @test B[1,1] == 1 - B[1,1] = 0 - @test iszero(B) - end - end - end - end - end -end - -@testset "BandIndex indexing" begin - for D in (Diagonal(1:3), Bidiagonal(1:3, 2:3, :U), Bidiagonal(1:3, 2:3, :L), - Tridiagonal(2:3, 1:3, 1:2), SymTridiagonal(1:3, 2:3)) - M = Matrix(D) - for band in -size(D,1)+1:size(D,1)-1 - for idx in 1:size(D,1)-abs(band) - @test D[BandIndex(band, idx)] == M[BandIndex(band, idx)] - end - end - @test_throws BoundsError D[BandIndex(size(D,1),1)] - end -end - -@testset "Partly filled Hermitian and Diagonal algebra" begin - D = Diagonal([1,2]) - for S in (Symmetric, Hermitian), uplo in (:U, :L) - M = Matrix{BigInt}(undef, 2, 2) - M[1,1] = M[2,2] = M[1+(uplo == :L), 1 + (uplo == :U)] = 3 - H = S(M, uplo) - HM = Matrix(H) - @test H + D == D + H == HM + D - @test H - D == HM - D - @test D - H == D - HM - end -end - -@testset "block SymTridiagonal" begin - m = SizedArrays.SizedArray{(2,2)}(reshape([1:4;;],2,2)) - S = SymTridiagonal(fill(m,4), fill(m,3)) - SA = Array(S) - D = Diagonal(fill(m,4)) - DA = Array(D) - BU = Bidiagonal(fill(m,4), fill(m,3), :U) - BUA = Array(BU) - BL = Bidiagonal(fill(m,4), fill(m,3), :L) - BLA = Array(BL) - T = Tridiagonal(fill(m,3), fill(m,4), fill(m,3)) - TA = Array(T) - IA = Array(Diagonal(fill(one(m), 4))) - @test S + D == D + S == SA + DA - @test S - D == -(D - S) == SA - DA - @test S + BU == SA + BUA - @test S - BU == -(BU - S) == SA - BUA - @test S + BL == SA + BLA - @test S - BL == -(BL - S) == SA - BLA - @test S + T == SA + TA - @test S - T == -(T - S) == SA - TA - @test S + S == SA + SA - @test S - S == -(S - S) == SA - SA - @test S + I == I + S == SA + IA - @test S - I == -(I - S) == SA - IA - - @test S == S - @test S != D - @test S != BL - @test S != BU - @test S != T - - @test_throws ArgumentError fill!(S, m) - S_small = SymTridiagonal(fill(m,2), fill(m,1)) - @test_throws "cannot fill a SymTridiagonal with an asymmetric value" fill!(S, m) - fill!(S_small, Symmetric(m)) - @test all(==(Symmetric(m)), S_small) - - @testset "diag" begin - m = SizedArrays.SizedArray{(2,2)}([1 3; 3 4]) - D = Diagonal(fill(m,4)) - z = fill(zero(m),3) - d = fill(m,4) - BU = Bidiagonal(d, z, :U) - BL = Bidiagonal(d, z, :L) - T = Tridiagonal(z, d, z) - for ev in (fill(zero(m),3), fill(zero(m),4)) - SD = SymTridiagonal(fill(m,4), ev) - @test SD == D == SD - @test SD == BU == SD - @test SD == BL == SD - @test SD == T == SD - end - end -end - -end # module TestSpecial diff --git a/stdlib/LinearAlgebra/test/structuredbroadcast.jl b/stdlib/LinearAlgebra/test/structuredbroadcast.jl deleted file mode 100644 index 71494aedcbef5..0000000000000 --- a/stdlib/LinearAlgebra/test/structuredbroadcast.jl +++ /dev/null @@ -1,379 +0,0 @@ -# This file is a part of Julia. License is MIT: https://julialang.org/license - -module TestStructuredBroadcast -using Test, LinearAlgebra - -const BASE_TEST_PATH = joinpath(Sys.BINDIR, "..", "share", "julia", "test") -isdefined(Main, :SizedArrays) || @eval Main include(joinpath($(BASE_TEST_PATH), "testhelpers", "SizedArrays.jl")) -using .Main.SizedArrays - -@testset "broadcast[!] over combinations of scalars, structured matrices, and dense vectors/matrices" begin - N = 10 - s = rand() - fV = rand(N) - fA = rand(N, N) - Z = copy(fA) - D = Diagonal(rand(N)) - B = Bidiagonal(rand(N), rand(N - 1), :U) - T = Tridiagonal(rand(N - 1), rand(N), rand(N - 1)) - S = SymTridiagonal(rand(N), rand(N - 1)) - U = UpperTriangular(rand(N,N)) - L = LowerTriangular(rand(N,N)) - M = Matrix(rand(N,N)) - structuredarrays = (D, B, T, U, L, M, S) - fstructuredarrays = map(Array, structuredarrays) - for (X, fX) in zip(structuredarrays, fstructuredarrays) - @test (Q = broadcast(sin, X); typeof(Q) == typeof(X) && Q == broadcast(sin, fX)) - @test broadcast!(sin, Z, X) == broadcast(sin, fX) - @test (Q = broadcast(cos, X); Q isa Matrix && Q == broadcast(cos, fX)) - @test broadcast!(cos, Z, X) == broadcast(cos, fX) - @test (Q = broadcast(*, s, X); typeof(Q) == typeof(X) && Q == broadcast(*, s, fX)) - @test broadcast!(*, Z, s, X) == broadcast(*, s, fX) - @test (Q = broadcast(+, fV, fA, X); Q isa Matrix && Q == broadcast(+, fV, fA, fX)) - @test broadcast!(+, Z, fV, fA, X) == broadcast(+, fV, fA, fX) - @test (Q = broadcast(*, s, fV, fA, X); Q isa Matrix && Q == broadcast(*, s, fV, fA, fX)) - @test broadcast!(*, Z, s, fV, fA, X) == broadcast(*, s, fV, fA, fX) - - @test X .* 2.0 == X .* (2.0,) == fX .* 2.0 - @test X .* 2.0 isa typeof(X) - @test X .* (2.0,) isa typeof(X) - @test isequal(X .* Inf, fX .* Inf) - - two = 2 - @test X .^ 2 == X .^ (2,) == fX .^ 2 == X .^ two - @test X .^ 2 isa typeof(X) - @test X .^ (2,) isa typeof(X) - @test X .^ two isa typeof(X) - @test X .^ 0 == fX .^ 0 - @test X .^ -1 == fX .^ -1 - - for (Y, fY) in zip(structuredarrays, fstructuredarrays) - @test broadcast(+, X, Y) == broadcast(+, fX, fY) - @test broadcast!(+, Z, X, Y) == broadcast(+, fX, fY) - @test broadcast(*, X, Y) == broadcast(*, fX, fY) - @test broadcast!(*, Z, X, Y) == broadcast(*, fX, fY) - end - end - diagonals = (D, B, T) - fdiagonals = map(Array, diagonals) - for (X, fX) in zip(diagonals, fdiagonals) - for (Y, fY) in zip(diagonals, fdiagonals) - @test broadcast(+, X, Y)::Union{Diagonal,Bidiagonal,Tridiagonal} == broadcast(+, fX, fY) - @test broadcast!(+, Z, X, Y) == broadcast(+, fX, fY) - @test broadcast(*, X, Y)::Union{Diagonal,Bidiagonal,Tridiagonal} == broadcast(*, fX, fY) - @test broadcast!(*, Z, X, Y) == broadcast(*, fX, fY) - end - end - UU = UnitUpperTriangular(rand(N,N)) - UL = UnitLowerTriangular(rand(N,N)) - unittriangulars = (UU, UL) - Ttris = typeof.((UpperTriangular(parent(UU)), LowerTriangular(parent(UU)))) - funittriangulars = map(Array, unittriangulars) - for (X, fX, Ttri) in zip(unittriangulars, funittriangulars, Ttris) - @test (Q = broadcast(sin, X); typeof(Q) == Ttri && Q == broadcast(sin, fX)) - @test broadcast!(sin, Z, X) == broadcast(sin, fX) - @test (Q = broadcast(cos, X); Q isa Matrix && Q == broadcast(cos, fX)) - @test broadcast!(cos, Z, X) == broadcast(cos, fX) - @test (Q = broadcast(*, s, X); typeof(Q) == Ttri && Q == broadcast(*, s, fX)) - @test broadcast!(*, Z, s, X) == broadcast(*, s, fX) - @test (Q = broadcast(+, fV, fA, X); Q isa Matrix && Q == broadcast(+, fV, fA, fX)) - @test broadcast!(+, Z, fV, fA, X) == broadcast(+, fV, fA, fX) - @test (Q = broadcast(*, s, fV, fA, X); Q isa Matrix && Q == broadcast(*, s, fV, fA, fX)) - @test broadcast!(*, Z, s, fV, fA, X) == broadcast(*, s, fV, fA, fX) - - @test X .* 2.0 == X .* (2.0,) == fX .* 2.0 - @test X .* 2.0 isa Ttri - @test X .* (2.0,) isa Ttri - @test isequal(X .* Inf, fX .* Inf) - - two = 2 - @test X .^ 2 == X .^ (2,) == fX .^ 2 == X .^ two - @test X .^ 2 isa typeof(X) # special cased, as isstructurepreserving - @test X .^ (2,) isa Ttri - @test X .^ two isa Ttri - @test X .^ 0 == fX .^ 0 - @test X .^ -1 == fX .^ -1 - - for (Y, fY) in zip(unittriangulars, funittriangulars) - @test broadcast(+, X, Y) == broadcast(+, fX, fY) - @test broadcast!(+, Z, X, Y) == broadcast(+, fX, fY) - @test broadcast(*, X, Y) == broadcast(*, fX, fY) - @test broadcast!(*, Z, X, Y) == broadcast(*, fX, fY) - end - end - - @testset "type-stability in Bidiagonal" begin - B2 = @inferred (B -> .- B)(B) - @test B2 isa Bidiagonal - @test B2 == -1 * B - B2 = @inferred (B -> B .* 2)(B) - @test B2 isa Bidiagonal - @test B2 == B + B - B2 = @inferred (B -> 2 .* B)(B) - @test B2 isa Bidiagonal - @test B2 == B + B - B2 = @inferred (B -> B ./ 1)(B) - @test B2 isa Bidiagonal - @test B2 == B - B2 = @inferred (B -> 1 .\ B)(B) - @test B2 isa Bidiagonal - @test B2 == B - end -end - -@testset "broadcast! where the destination is a structured matrix" begin - N = 5 - A = rand(N, N) - sA = A + copy(A') - D = Diagonal(rand(N)) - Bu = Bidiagonal(rand(N), rand(N - 1), :U) - Bl = Bidiagonal(rand(N), rand(N - 1), :L) - T = Tridiagonal(rand(N - 1), rand(N), rand(N - 1)) - ◣ = LowerTriangular(rand(N,N)) - ◥ = UpperTriangular(rand(N,N)) - M = Matrix(rand(N,N)) - - @test broadcast!(sin, copy(D), D) == Diagonal(sin.(D)) - @test broadcast!(sin, copy(Bu), Bu) == Bidiagonal(sin.(Bu), :U) - @test broadcast!(sin, copy(Bl), Bl) == Bidiagonal(sin.(Bl), :L) - @test broadcast!(sin, copy(T), T) == Tridiagonal(sin.(T)) - @test broadcast!(sin, copy(◣), ◣) == LowerTriangular(sin.(◣)) - @test broadcast!(sin, copy(◥), ◥) == UpperTriangular(sin.(◥)) - @test broadcast!(sin, copy(M), M) == Matrix(sin.(M)) - @test broadcast!(*, copy(D), D, A) == Diagonal(broadcast(*, D, A)) - @test broadcast!(*, copy(Bu), Bu, A) == Bidiagonal(broadcast(*, Bu, A), :U) - @test broadcast!(*, copy(Bl), Bl, A) == Bidiagonal(broadcast(*, Bl, A), :L) - @test broadcast!(*, copy(T), T, A) == Tridiagonal(broadcast(*, T, A)) - @test broadcast!(*, copy(◣), ◣, A) == LowerTriangular(broadcast(*, ◣, A)) - @test broadcast!(*, copy(◥), ◥, A) == UpperTriangular(broadcast(*, ◥, A)) - @test broadcast!(*, copy(M), M, A) == Matrix(broadcast(*, M, A)) - - @test_throws ArgumentError broadcast!(cos, copy(D), D) == Diagonal(sin.(D)) - @test_throws ArgumentError broadcast!(cos, copy(Bu), Bu) == Bidiagonal(sin.(Bu), :U) - @test_throws ArgumentError broadcast!(cos, copy(Bl), Bl) == Bidiagonal(sin.(Bl), :L) - @test_throws ArgumentError broadcast!(cos, copy(T), T) == Tridiagonal(sin.(T)) - @test_throws ArgumentError broadcast!(cos, copy(◣), ◣) == LowerTriangular(sin.(◣)) - @test_throws ArgumentError broadcast!(cos, copy(◥), ◥) == UpperTriangular(sin.(◥)) - @test_throws ArgumentError broadcast!(+, copy(D), D, A) == Diagonal(broadcast(*, D, A)) - @test_throws ArgumentError broadcast!(+, copy(Bu), Bu, A) == Bidiagonal(broadcast(*, Bu, A), :U) - @test_throws ArgumentError broadcast!(+, copy(Bl), Bl, A) == Bidiagonal(broadcast(*, Bl, A), :L) - @test_throws ArgumentError broadcast!(+, copy(T), T, A) == Tridiagonal(broadcast(*, T, A)) - @test_throws ArgumentError broadcast!(+, copy(◣), ◣, A) == LowerTriangular(broadcast(*, ◣, A)) - @test_throws ArgumentError broadcast!(+, copy(◥), ◥, A) == UpperTriangular(broadcast(*, ◥, A)) - @test_throws ArgumentError broadcast!(*, copy(◥), ◣, 2) - @test_throws ArgumentError broadcast!(*, copy(Bu), Bl, 2) -end - -@testset "map[!] over combinations of structured matrices" begin - N = 10 - fA = rand(N, N) - Z = copy(fA) - D = Diagonal(rand(N)) - B = Bidiagonal(rand(N), rand(N - 1), :U) - T = Tridiagonal(rand(N - 1), rand(N), rand(N - 1)) - S = SymTridiagonal(rand(N), rand(N - 1)) - U = UpperTriangular(rand(N,N)) - L = LowerTriangular(rand(N,N)) - M = Matrix(rand(N,N)) - structuredarrays = (M, D, B, T, S, U, L) - fstructuredarrays = map(Array, structuredarrays) - for (X, fX) in zip(structuredarrays, fstructuredarrays) - @test (Q = map(sin, X); typeof(Q) == typeof(X) && Q == map(sin, fX)) - @test map!(sin, Z, X) == map(sin, fX) - @test (Q = map(cos, X); Q isa Matrix && Q == map(cos, fX)) - @test map!(cos, Z, X) == map(cos, fX) - @test (Q = map(+, fA, X); Q isa Matrix && Q == map(+, fA, fX)) - @test map!(+, Z, fA, X) == map(+, fA, fX) - for (Y, fY) in zip(structuredarrays, fstructuredarrays) - @test map(+, X, Y) == map(+, fX, fY) - @test map!(+, Z, X, Y) == map(+, fX, fY) - @test map(*, X, Y) == map(*, fX, fY) - @test map!(*, Z, X, Y) == map(*, fX, fY) - @test map(+, X, fA, Y) == map(+, fX, fA, fY) - @test map!(+, Z, X, fA, Y) == map(+, fX, fA, fY) - end - end - diagonals = (D, B, T) - fdiagonals = map(Array, diagonals) - for (X, fX) in zip(diagonals, fdiagonals) - for (Y, fY) in zip(diagonals, fdiagonals) - @test map(+, X, Y)::Union{Diagonal,Bidiagonal,Tridiagonal} == broadcast(+, fX, fY) - @test map!(+, Z, X, Y) == broadcast(+, fX, fY) - @test map(*, X, Y)::Union{Diagonal,Bidiagonal,Tridiagonal} == broadcast(*, fX, fY) - @test map!(*, Z, X, Y) == broadcast(*, fX, fY) - end - end - # these would be valid for broadcast, but not for map - @test_throws DimensionMismatch map(+, D, Diagonal(rand(1))) - @test_throws DimensionMismatch map(+, D, Diagonal(rand(1)), D) - @test_throws DimensionMismatch map(+, D, D, Diagonal(rand(1))) - @test_throws DimensionMismatch map(+, Diagonal(rand(1)), D, D) -end - -@testset "Issue #33397" begin - N = 5 - U = UpperTriangular(rand(N, N)) - L = LowerTriangular(rand(N, N)) - UnitU = UnitUpperTriangular(rand(N, N)) - UnitL = UnitLowerTriangular(rand(N, N)) - D = Diagonal(rand(N)) - @test U .+ L .+ D == U + L + D - @test L .+ U .+ D == L + U + D - @test UnitU .+ UnitL .+ D == UnitU + UnitL + D - @test UnitL .+ UnitU .+ D == UnitL + UnitU + D - @test U .+ UnitL .+ D == U + UnitL + D - @test L .+ UnitU .+ D == L + UnitU + D - @test L .+ U .+ L .+ U == L + U + L + U - @test U .+ L .+ U .+ L == U + L + U + L - @test L .+ UnitL .+ UnitU .+ U .+ D == L + UnitL + UnitU + U + D - @test L .+ U .+ D .+ D .+ D .+ D == L + U + D + D + D + D -end -@testset "Broadcast Returned Types" begin - # Issue 35245 - N = 3 - dV = rand(N) - evu = rand(N-1) - evl = rand(N-1) - - Bu = Bidiagonal(dV, evu, :U) - Bl = Bidiagonal(dV, evl, :L) - T = Tridiagonal(evl, dV * 2, evu) - - @test typeof(Bu .+ Bl) <: Tridiagonal - @test typeof(Bl .+ Bu) <: Tridiagonal - @test typeof(Bu .+ Bu) <: Bidiagonal - @test typeof(Bl .+ Bl) <: Bidiagonal - @test Bu .+ Bl == T - @test Bl .+ Bu == T - @test Bu .+ Bu == Bidiagonal(dV * 2, evu * 2, :U) - @test Bl .+ Bl == Bidiagonal(dV * 2, evl * 2, :L) - - - @test typeof(Bu .* Bl) <: Tridiagonal - @test typeof(Bl .* Bu) <: Tridiagonal - @test typeof(Bu .* Bu) <: Bidiagonal - @test typeof(Bl .* Bl) <: Bidiagonal - - @test Bu .* Bl == Tridiagonal(zeros(N-1), dV .* dV, zeros(N-1)) - @test Bl .* Bu == Tridiagonal(zeros(N-1), dV .* dV, zeros(N-1)) - @test Bu .* Bu == Bidiagonal(dV .* dV, evu .* evu, :U) - @test Bl .* Bl == Bidiagonal(dV .* dV, evl .* evl, :L) - - Bu2 = Bu .* 2 - @test typeof(Bu2) <: Bidiagonal && Bu2.uplo == 'U' - Bu2 = 2 .* Bu - @test typeof(Bu2) <: Bidiagonal && Bu2.uplo == 'U' - Bl2 = Bl .* 2 - @test typeof(Bl2) <: Bidiagonal && Bl2.uplo == 'L' - Bu2 = 2 .* Bl - @test typeof(Bl2) <: Bidiagonal && Bl2.uplo == 'L' - - # Example of Nested Broadcasts - tmp = (1 .* 2) .* (Bidiagonal(1:3, 1:2, 'U') .* (3 .* 4)) .* (5 .* Bidiagonal(1:3, 1:2, 'L')) - @test typeof(tmp) <: Tridiagonal - -end - -struct Zero36193 end -Base.iszero(::Zero36193) = true -LinearAlgebra.iszerodefined(::Type{Zero36193}) = true -@testset "PR #36193" begin - f(::Union{Int, Zero36193}) = Zero36193() - function test(el) - M = [el el - el el] - v = [el, el] - U = UpperTriangular(M) - L = LowerTriangular(M) - D = Diagonal(v) - for (T, A) in [(UpperTriangular, U), (LowerTriangular, L), (Diagonal, D)] - @test identity.(A) isa typeof(A) - @test map(identity, A) isa typeof(A) - @test f.(A) isa T{Zero36193} - @test map(f, A) isa T{Zero36193} - end - end - # This should not need `zero(::Type{Zero36193})` to be defined - test(1) - Base.zero(::Type{Zero36193}) = Zero36193() - # This should not need `==(::Zero36193, ::Int)` to be defined as `iszerodefined` - # returns true. - test(Zero36193()) -end - -# structured broadcast with function returning non-number type -@test tuple.(Diagonal([1, 2])) == [(1,) (0,); (0,) (2,)] - -@testset "Broadcast with missing (#54467)" begin - select_first(x, y) = x - diag = Diagonal([1,2]) - @test select_first.(diag, missing) == diag - @test select_first.(diag, missing) isa Diagonal{Int} - @test isequal(select_first.(missing, diag), fill(missing, 2, 2)) - @test select_first.(missing, diag) isa Matrix{Missing} -end - -@testset "broadcast over structured matrices with matrix elements" begin - function standardbroadcastingtests(D, T) - M = [x for x in D] - Dsum = D .+ D - @test Dsum isa T - @test Dsum == M .+ M - Dcopy = copy.(D) - @test Dcopy isa T - @test Dcopy == D - Df = float.(D) - @test Df isa T - @test Df == D - @test eltype(eltype(Df)) <: AbstractFloat - @test (x -> (x,)).(D) == (x -> (x,)).(M) - @test (x -> 1).(D) == ones(Int,size(D)) - @test all(==(2), ndims.(D)) - @test_throws MethodError size.(D) - end - @testset "Diagonal" begin - @testset "square" begin - A = [1 3; 2 4] - D = Diagonal([A, A]) - standardbroadcastingtests(D, Diagonal) - @test sincos.(D) == sincos.(Matrix{eltype(D)}(D)) - M = [x for x in D] - @test cos.(D) == cos.(M) - end - - @testset "different-sized square blocks" begin - D = Diagonal([ones(3,3), fill(3.0,2,2)]) - standardbroadcastingtests(D, Diagonal) - end - - @testset "rectangular blocks" begin - D = Diagonal([ones(Bool,3,4), ones(Bool,2,3)]) - standardbroadcastingtests(D, Diagonal) - end - - @testset "incompatible sizes" begin - A = reshape(1:12, 4, 3) - B = reshape(1:12, 3, 4) - D1 = Diagonal(fill(A, 2)) - D2 = Diagonal(fill(B, 2)) - @test_throws DimensionMismatch D1 .+ D2 - end - end - @testset "Bidiagonal" begin - A = [1 3; 2 4] - B = Bidiagonal(fill(A,3), fill(A,2), :U) - standardbroadcastingtests(B, Bidiagonal) - end - @testset "UpperTriangular" begin - A = [1 3; 2 4] - U = UpperTriangular([(i+j)*A for i in 1:3, j in 1:3]) - standardbroadcastingtests(U, UpperTriangular) - end - @testset "SymTridiagonal" begin - m = SizedArrays.SizedArray{(2,2)}([1 2; 3 4]) - S = SymTridiagonal(fill(m,4), fill(m,3)) - standardbroadcastingtests(S, SymTridiagonal) - end -end - -end diff --git a/stdlib/LinearAlgebra/test/svd.jl b/stdlib/LinearAlgebra/test/svd.jl deleted file mode 100644 index 9e8b5d5cda7d2..0000000000000 --- a/stdlib/LinearAlgebra/test/svd.jl +++ /dev/null @@ -1,297 +0,0 @@ -# This file is a part of Julia. License is MIT: https://julialang.org/license - -module TestSVD - -using Test, LinearAlgebra, Random -using LinearAlgebra: BlasComplex, BlasFloat, BlasReal, QRPivoted - -@testset "Simple svdvals / svd tests" begin - ≊(x,y) = isapprox(x,y,rtol=1e-15) - - m = [2, 0] - @test @inferred(svdvals(m)) ≊ [2] - @test @inferred(svdvals!(float(m))) ≊ [2] - for sf in (@inferred(svd(m)), @inferred(svd!(float(m)))) - @test sf.S ≊ [2] - @test sf.U'sf.U ≊ [1] - @test sf.Vt'sf.Vt ≊ [1] - @test sf.U*Diagonal(sf.S)*sf.Vt' ≊ m - end - F = @inferred svd(m, full=true) - @test size(F.U) == (2, 2) - @test F.S ≊ [2] - @test F.U'F.U ≊ Matrix(I, 2, 2) - @test F.Vt'*F.Vt ≊ [1] - @test @inferred(svdvals(3:4)) ≊ [5] - A = Matrix(1.0I, 2, 2) - Z = svd(Hermitian(A); full=true) - @test Z.S ≈ ones(2) - @test Z.U'Z.U ≈ I(2) - - m1 = [2 0; 0 0] - m2 = [2 -2; 1 1]/sqrt(2) - m2c = Complex.([2 -2; 1 1]/sqrt(2)) - @test @inferred(svdvals(m1)) ≊ [2, 0] - @test @inferred(svdvals(m2)) ≊ [2, 1] - @test @inferred(svdvals(m2c)) ≊ [2, 1] - - sf1 = @inferred svd(m1) - sf2 = @inferred svd(m2) - @test sf1.S ≊ [2, 0] - @test sf2.S ≊ [2, 1] - # U & Vt are unitary - I22 = Matrix(I, 2, 2) - @test sf1.U*sf1.U' ≊ I22 - @test sf1.Vt*sf1.Vt' ≊ I22 - @test sf2.U*sf2.U' ≊ I22 - @test sf2.Vt*sf2.Vt' ≊ I22 - # SVD not uniquely determined, so just test we can reconstruct the - # matrices from the factorization as expected. - @test sf1.U*Diagonal(sf1.S)*sf1.Vt' ≊ m1 - @test sf2.U*Diagonal(sf2.S)*sf2.Vt' ≊ m2 - - @test ldiv!([0., 0.], svd(Matrix(I, 2, 2)), [1., 1.]) ≊ [1., 1.] - @test inv(svd(Matrix(I, 2, 2))) ≈ I - @test inv(svd([1 2; 3 4])) ≈ [-2.0 1.0; 1.5 -0.5] - @test inv(svd([1 0 1; 0 1 0])) ≈ [0.5 0.0; 0.0 1.0; 0.5 0.0] - @test_throws SingularException inv(svd([0 0; 0 0])) - @test inv(svd([1+2im 3+4im; 5+6im 7+8im])) ≈ [-0.5 + 0.4375im 0.25 - 0.1875im; 0.375 - 0.3125im -0.125 + 0.0625im] -end - -n = 10 - -Random.seed!(1234321) - -areal = randn(n,n)/2 -aimg = randn(n,n)/2 - -@testset for eltya in (Float32, Float64, ComplexF32, ComplexF64, Int) - aa = eltya == Int ? rand(1:7, n, n) : convert(Matrix{eltya}, eltya <: Complex ? complex.(areal, aimg) : areal) - asym = aa' + aa # symmetric indefinite - for a in (aa, view(aa, 1:n, 1:n)) - usv = svd(a) - @testset "singular value decomposition" begin - @test usv.S === svdvals(usv) - @test usv.U * (Diagonal(usv.S) * usv.Vt) ≈ a - @test convert(Array, usv) ≈ a - @test usv.Vt' ≈ usv.V - @test_throws FieldError usv.Z - b = rand(eltya,n) - @test usv\b ≈ a\b - @test Base.propertynames(usv) == (:U, :S, :V, :Vt) - @test size(usv) == size(a) - if eltya <: BlasFloat - svdz = svd!(Matrix{eltya}(undef,0,0)) - @test svdz.U ≈ Matrix{eltya}(I, 0, 0) - @test svdz.S ≈ real(zeros(eltya,0)) - @test svdz.Vt ≈ Matrix{eltya}(I, 0, 0) - end - end - @testset "singular value decomposition of adjoint/transpose" begin - for transform in (adjoint, transpose) - usv = svd(transform(a)) - @test usv.S === svdvals(usv) - @test usv.U * (Diagonal(usv.S) * usv.Vt) ≈ transform(a) - @test convert(Array, usv) ≈ transform(a) - @test usv.Vt' ≈ usv.V - @test_throws FieldError usv.Z - b = rand(eltya,n) - @test usv\b ≈ transform(a)\b - end - end - @testset "Generalized svd" begin - a_svd = a[1:div(n, 2), :] - gsvd = svd(a,a_svd) - @test Base.propertynames(gsvd) == (:alpha, :beta, :vals, :S, :D1, :D2, :R0, :U, :V, :Q, :a, :b, :k, :l, :R) - @test gsvd.U*gsvd.D1*gsvd.R*gsvd.Q' ≈ a - @test gsvd.V*gsvd.D2*gsvd.R*gsvd.Q' ≈ a_svd - @test usv.Vt' ≈ usv.V - @test_throws FieldError usv.Z - @test_throws FieldError gsvd.Z - @test gsvd.vals ≈ svdvals(a,a_svd) - α = eltya == Int ? -1 : rand(eltya) - β = svd(α) - @test β.S == [abs(α)] - @test svdvals(α) == abs(α) - u,v,q,d1,d2,r0 = svd(a,a_svd) - @test u ≈ gsvd.U - @test v ≈ gsvd.V - @test d1 ≈ gsvd.D1 - @test d2 ≈ gsvd.D2 - @test q ≈ gsvd.Q - @test gsvd.a.^2 + gsvd.b.^2 ≈ fill(1, length(gsvd.a)) - @test gsvd.alpha.^2 + gsvd.beta.^2 ≈ ones(eltya, length(gsvd.a)) - #testing the other layout for D1 & D2 - b = rand(eltya,n,2*n) - c = rand(eltya,n,2*n) - gsvd = svd(b,c) - @test gsvd.U*gsvd.D1*gsvd.R*gsvd.Q' ≈ b - @test gsvd.V*gsvd.D2*gsvd.R*gsvd.Q' ≈ c - # AbstractMatrix svd - T = Tridiagonal(a) - asvd = svd(T, a) - @test asvd.U*asvd.D1*asvd.R*asvd.Q' ≈ T - @test asvd.V*asvd.D2*asvd.R*asvd.Q' ≈ a - @test all(≈(1), svdvals(T, T)) - end - end - @testset "singular value decomposition of AbstractMatrix" begin - A = Tridiagonal(aa) - F = svd(A) - @test Matrix(F) ≈ A - @test svdvals(A) ≈ F.S - end - @testset "singular value decomposition of Hermitian/real-Symmetric" begin - for T in (eltya <: Real ? (Symmetric, Hermitian) : (Hermitian,)) - usv = svd(T(asym)) - @test usv.S === svdvals(usv) - @test usv.U * (Diagonal(usv.S) * usv.Vt) ≈ T(asym) - @test convert(Array, usv) ≈ T(asym) - @test usv.Vt' ≈ usv.V - @test_throws FieldError usv.Z - b = rand(eltya,n) - @test usv\b ≈ T(asym)\b - end - end - if eltya <: LinearAlgebra.BlasReal - @testset "Number input" begin - x, y = randn(eltya, 2) - @test svd(x) == svd(fill(x, 1, 1)) - @test svdvals(x) == first(svdvals(fill(x, 1, 1))) - @test svd(x, y) == svd(fill(x, 1, 1), fill(y, 1, 1)) - @test svdvals(x, y) ≈ first(svdvals(fill(x, 1, 1), fill(y, 1, 1))) - end - end - if eltya != Int - @testset "isequal, ==, and hash" begin - x, y = rand(eltya), convert(eltya, NaN) - Fx, Fy = svd(x), svd(y) - @test Fx == Fx - @test !(Fy == Fy) - @test isequal(Fy, Fy) - @test hash(Fx) == hash(Fx) - @test hash(Fx, UInt(1)) == hash(Fx, UInt(1)) - @test hash(Fy) == hash(Fy) - @test hash(Fy, UInt(1)) == hash(Fy, UInt(1)) - end - end -end - - - -@testset "SVD Algorithms" begin - ≊(x,y) = isapprox(x,y,rtol=1e-15) - - x = [0.1 0.2; 0.3 0.4] - - for alg in [LinearAlgebra.QRIteration(), LinearAlgebra.DivideAndConquer()] - sx1 = svd(x, alg = alg) - @test sx1.U * Diagonal(sx1.S) * sx1.Vt ≊ x - @test sx1.V * sx1.Vt ≊ I - @test sx1.U * sx1.U' ≊ I - @test all(sx1.S .≥ 0) - - sx2 = svd!(copy(x), alg = alg) - @test sx2.U * Diagonal(sx2.S) * sx2.Vt ≊ x - @test sx2.V * sx2.Vt ≊ I - @test sx2.U * sx2.U' ≊ I - @test all(sx2.S .≥ 0) - end -end - -@testset "REPL printing of SVD" begin - svdd = svd(randn(3, 3)) - svdstring = sprint((t, s) -> show(t, "text/plain", s), svdd) - ustring = sprint((t, s) -> show(t, "text/plain", s), svdd.U) - sstring = sprint((t, s) -> show(t, "text/plain", s), svdd.S) - vtstring = sprint((t, s) -> show(t, "text/plain", s), svdd.Vt) - @test svdstring == "$(summary(svdd))\nU factor:\n$ustring\nsingular values:\n$sstring\nVt factor:\n$vtstring" -end - -@testset "REPL printing of Generalized SVD" begin - a = randn(3, 3) - b = randn(3, 3) - svdd = svd(a, b) - svdstring = sprint((t, s) -> show(t, "text/plain", s), svdd) - ustring = sprint((t, s) -> show(t, "text/plain", s), svdd.U) - qstring = sprint((t, s) -> show(t, "text/plain", s), svdd.Q) - vstring = sprint((t, s) -> show(t, "text/plain", s), svdd.V) - d1string = sprint((t, s) -> show(t, "text/plain", s), svdd.D1) - d2string = sprint((t, s) -> show(t, "text/plain", s), svdd.D2) - r0string = sprint((t, s) -> show(t, "text/plain", s), svdd.R0) - @test svdstring == "$(summary(svdd))\nU factor:\n$ustring\nV factor:\n$vstring\nQ factor:\n$qstring\nD1 factor:\n$d1string\nD2 factor:\n$d2string\nR0 factor:\n$r0string" -end - -@testset "c-tor with varying input eltypes" begin - A = randn(Float64, 10, 10) - U, S, V = svd(A) - Ut = convert.(Float16, U) - Vt = convert.(Float32, V) - svdc = SVD{ComplexF32}(Ut, S, Vt) - @test svdc isa SVD{ComplexF32} - Uc, Sc, Vc = svdc - @test Uc * diagm(0=>Sc) * transpose(V) ≈ complex.(A) rtol=1e-3 -end - -@testset "Issue 40944. ldiV!(SVD) should update rhs" begin - F = svd(randn(2, 2)) - b = randn(2) - x = ldiv!(F, b) - @test x === b -end - -@testset "adjoint of SVD" begin - n = 5 - B = randn(5, 2) - - @testset "size(b)=$(size(b))" for b in (B[:, 1], B) - @testset "size(A)=$(size(A))" for A in ( - randn(n, n), - # Wide problems become minimum norm (in x) problems similarly to LQ - randn(n + 2, n), - randn(n - 2, n), - complex.(randn(n, n), randn(n, n))) - - F = svd(A) - x = F'\b - @test x ≈ A'\b - @test length(size(x)) == length(size(b)) - end - end -end - -@testset "Float16" begin - A = Float16[4. 12. -16.; 12. 37. -43.; -16. -43. 98.] - B = svd(A) - B32 = svd(Float32.(A)) - @test B isa SVD{Float16, Float16, Matrix{Float16}} - @test B.U isa Matrix{Float16} - @test B.Vt isa Matrix{Float16} - @test B.S isa Vector{Float16} - @test B.U ≈ B32.U - @test B.Vt ≈ B32.Vt - @test B.S ≈ B32.S - C = Symmetric(A'A) - D = svd(C) - D32 = svd(Symmetric(Float32.(C))) - @test D isa SVD{Float16, Float16, Matrix{Float16}} - @test D.U isa Matrix{Float16} - @test D.Vt isa Matrix{Float16} - @test D.S isa Vector{Float16} - @test D.U ≈ D32.U - @test D.Vt ≈ D32.Vt - @test D.S ≈ D32.S - A = randn(ComplexF16, 3, 3) - E = Hermitian(A'A) - F = svd(E) - F32 = svd(Hermitian(ComplexF32.(E))) - @test F isa SVD{ComplexF16, Float16, Matrix{ComplexF16}, Vector{Float16}} - @test F.U isa Matrix{ComplexF16} - @test F.Vt isa Matrix{ComplexF16} - @test F.S isa Vector{Float16} - @test F.U ≈ F32.U - @test F.Vt ≈ F32.Vt - @test F.S ≈ F32.S -end - -end # module TestSVD diff --git a/stdlib/LinearAlgebra/test/symmetric.jl b/stdlib/LinearAlgebra/test/symmetric.jl deleted file mode 100644 index edd3af483b5f6..0000000000000 --- a/stdlib/LinearAlgebra/test/symmetric.jl +++ /dev/null @@ -1,1181 +0,0 @@ -# This file is a part of Julia. License is MIT: https://julialang.org/license - -module TestSymmetric - -using Test, LinearAlgebra, Random - -const BASE_TEST_PATH = joinpath(Sys.BINDIR, "..", "share", "julia", "test") - -isdefined(Main, :Quaternions) || @eval Main include(joinpath($(BASE_TEST_PATH), "testhelpers", "Quaternions.jl")) -using .Main.Quaternions - -isdefined(Main, :SizedArrays) || @eval Main include(joinpath($(BASE_TEST_PATH), "testhelpers", "SizedArrays.jl")) -using .Main.SizedArrays - -Random.seed!(1010) - -@testset "Pauli σ-matrices: $σ" for σ in map(Hermitian, - Any[ [1 0; 0 1], [0 1; 1 0], [0 -im; im 0], [1 0; 0 -1] ]) - @test ishermitian(σ) -end - -@testset "Two-dimensional Euler formula for Hermitian" begin - @test cis(Hermitian([π 0; 0 π])) ≈ -I -end - -@testset "Hermitian matrix exponential/log" begin - A1 = randn(4,4) + im*randn(4,4) - A2 = A1 + A1' - @test exp(A2) ≈ exp(Hermitian(A2)) - @test cis(A2) ≈ cis(Hermitian(A2)) - @test log(A2) ≈ log(Hermitian(A2)) - A3 = A1 * A1' # posdef - @test exp(A3) ≈ exp(Hermitian(A3)) - @test cis(A3) ≈ cis(Hermitian(A3)) - @test log(A3) ≈ log(Hermitian(A3)) - - A1 = randn(4,4) - A3 = A1 * A1' - A4 = A1 + transpose(A1) - @test exp(A4) ≈ exp(Symmetric(A4)) - @test log(A3) ≈ log(Symmetric(A3)) - @test log(A3) ≈ log(Hermitian(A3)) -end - -@testset "Core functionality" begin - n = 10 - areal = randn(n,n)/2 - aimg = randn(n,n)/2 - @testset for eltya in (Float32, Float64, ComplexF32, ComplexF64, BigFloat, Int) - a = eltya == Int ? rand(1:7, n, n) : convert(Matrix{eltya}, eltya <: Complex ? complex.(areal, aimg) : areal) - asym = transpose(a) + a # symmetric indefinite - aherm = a' + a # Hermitian indefinite - apos = a' * a # Hermitian positive definite - aposs = apos + transpose(apos) # Symmetric positive definite - ε = εa = eps(abs(float(one(eltya)))) - - x = randn(n) - y = randn(n) - b = randn(n,n)/2 - x = eltya == Int ? rand(1:7, n) : convert(Vector{eltya}, eltya <: Complex ? complex.(x, zeros(n)) : x) - y = eltya == Int ? rand(1:7, n) : convert(Vector{eltya}, eltya <: Complex ? complex.(y, zeros(n)) : y) - b = eltya == Int ? rand(1:7, n, n) : convert(Matrix{eltya}, eltya <: Complex ? complex.(b, zeros(n,n)) : b) - @testset "basic ops" begin - @testset "constructor" begin - @test Symmetric(Symmetric(asym, :U)) === Symmetric(asym, :U) - @test Hermitian(Hermitian(aherm, :U)) === Hermitian(aherm, :U) - @test Symmetric(Symmetric(asym, :U), :U) === Symmetric(asym, :U) - @test Hermitian(Hermitian(aherm, :U), :U) === Hermitian(aherm, :U) - @test_throws ArgumentError Symmetric(Symmetric(asym, :U), :L) - @test_throws ArgumentError Hermitian(Hermitian(aherm, :U), :L) - - @test_throws ArgumentError Symmetric(asym, :R) - @test_throws ArgumentError Hermitian(asym, :R) - - @test_throws MethodError Symmetric{eltya,typeof(asym)}(asym, :L) - @test_throws MethodError Hermitian{eltya,typeof(aherm)}(aherm, :L) - - # mixed cases with Hermitian/Symmetric - if eltya <: Real - @test Symmetric(Hermitian(aherm, :U)) === Symmetric(aherm, :U) - @test Hermitian(Symmetric(asym, :U)) === Hermitian(asym, :U) - @test Symmetric(Hermitian(aherm, :U), :U) === Symmetric(aherm, :U) - @test Hermitian(Symmetric(asym, :U), :U) === Hermitian(asym, :U) - @test_throws ArgumentError Symmetric(Hermitian(aherm, :U), :L) - @test_throws ArgumentError Hermitian(Symmetric(aherm, :U), :L) - end - end - @testset "diag" begin - D = Diagonal(x) - DM = Matrix(D) - B = diagm(-1 => x, 1 => x) - for uplo in (:U, :L) - @test diag(Symmetric(D, uplo))::Vector == x - @test diag(Hermitian(D, uplo))::Vector == real(x) - @test isdiag(Symmetric(DM, uplo)) - @test isdiag(Hermitian(DM, uplo)) - @test !isdiag(Symmetric(B, uplo)) - @test !isdiag(Hermitian(B, uplo)) - end - end - @testset "similar" begin - @test isa(similar(Symmetric(asym)), Symmetric{eltya}) - @test isa(similar(Hermitian(aherm)), Hermitian{eltya}) - @test isa(similar(Symmetric(asym), Int), Symmetric{Int}) - @test isa(similar(Hermitian(aherm), Int), Hermitian{Int}) - @test isa(similar(Symmetric(asym), (3,2)), Matrix{eltya}) - @test isa(similar(Hermitian(aherm), (3,2)), Matrix{eltya}) - @test isa(similar(Symmetric(asym), Int, (3,2)), Matrix{Int}) - @test isa(similar(Hermitian(aherm), Int, (3,2)), Matrix{Int}) - end - - @testset "Array/Matrix constructor from Symmetric/Hermitian" begin - @test asym == Matrix(Symmetric(asym)) == Array(Symmetric(asym)) - @test aherm == Matrix(Hermitian(aherm)) == Array(Hermitian(aherm)) - end - - @testset "parent" begin - @test asym === parent(Symmetric(asym)) - @test aherm === parent(Hermitian(aherm)) - end - # Unary minus for Symmetric/Hermitian matrices - @testset "Unary minus for Symmetric/Hermitian matrices" begin - @test (-Symmetric(asym))::typeof(Symmetric(asym)) == -asym - @test (-Hermitian(aherm))::typeof(Hermitian(aherm)) == -aherm - @test (-Symmetric([true true; false false]))::Symmetric{Int,Matrix{Int}} == [-1 -1; -1 0] - @test (-Hermitian([true false; true false]))::Hermitian{Int,Matrix{Int}} == [-1 0; 0 0] - end - - @testset "Addition and subtraction for Symmetric/Hermitian matrices" begin - for f in (+, -) - @test (f(Symmetric(asym), Symmetric(aposs)))::typeof(Symmetric(asym)) == f(asym, aposs) - @test (f(Hermitian(aherm), Hermitian(apos)))::typeof(Hermitian(aherm)) == f(aherm, apos) - @test (f(Symmetric(real(asym)), Hermitian(aherm)))::typeof(Hermitian(aherm)) == f(real(asym), aherm) - @test (f(Hermitian(aherm), Symmetric(real(asym))))::typeof(Hermitian(aherm)) == f(aherm, real(asym)) - @test (f(Symmetric(asym), Hermitian(aherm))) == f(asym, aherm) - @test (f(Hermitian(aherm), Symmetric(asym))) == f(aherm, asym) - end - end - - @testset "getindex and unsafe_getindex" begin - @test aherm[1,1] == Hermitian(aherm)[1,1] - @test asym[1,1] == Symmetric(asym)[1,1] - @test Symmetric(asym)[1:2,1:2] == asym[1:2,1:2] - @test Hermitian(aherm)[1:2,1:2] == aherm[1:2,1:2] - end - - @testset "conversion" begin - @test Symmetric(asym) == convert(Symmetric,Symmetric(asym)) - if eltya <: Real - typs = [Float16,Float32,Float64] - for typ in typs - @test Symmetric(convert(Matrix{typ},asym)) == convert(Symmetric{typ,Matrix{typ}},Symmetric(asym)) - end - end - if eltya <: Complex - typs = [ComplexF32,ComplexF64] - for typ in typs - @test Symmetric(convert(Matrix{typ},asym)) == convert(Symmetric{typ,Matrix{typ}},Symmetric(asym)) - @test Hermitian(convert(Matrix{typ},aherm)) == convert(Hermitian{typ,Matrix{typ}},Hermitian(aherm)) - end - end - @test Symmetric{eltya, Matrix{eltya}}(Symmetric(asym, :U)) === Symmetric(asym, :U) - @test Hermitian{eltya, Matrix{eltya}}(Hermitian(aherm, :U)) === Hermitian(aherm, :U) - end - - @testset "issymmetric, ishermitian" begin - @test issymmetric(Symmetric(asym)) - @test ishermitian(Hermitian(aherm)) - if eltya <: Real - @test ishermitian(Symmetric(asym)) - @test issymmetric(Hermitian(asym)) - elseif eltya <: Complex - # test that zero imaginary component is - # handled properly - @test ishermitian(Symmetric(b + b')) - end - end - - @testset "tril/triu" begin - for (op, validks) in ( - (triu, (-n + 1):(n + 1)), - (tril, (-n - 1):(n - 1)) ) - for di in validks - @test op(Symmetric(asym), di) == op(asym, di) - @test op(Hermitian(aherm), di) == op(aherm, di) - @test op(Symmetric(asym, :L), di) == op(asym, di) - @test op(Hermitian(aherm, :L), di) == op(aherm, di) - end - end - end - - @testset "transpose, adjoint" begin - S = Symmetric(asym) - H = Hermitian(aherm) - @test transpose(S) === S == asym - @test adjoint(H) === H == aherm - if eltya <: Real - @test adjoint(S) === S == asym - @test transpose(H) === H == aherm - else - @test adjoint(S) == Symmetric(conj(asym)) - @test transpose(H) == Hermitian(copy(transpose(aherm))) - end - @test copy(adjoint(H)) == copy(aherm) - @test copy(transpose(S)) == copy(asym) - end - - @testset "real, imag" begin - S = Symmetric(asym) - H = Hermitian(aherm) - @test issymmetric(real(S)) - @test ishermitian(real(H)) - if eltya <: Real - @test real(S) === S == asym - @test real(H) === H == aherm - elseif eltya <: Complex - @test issymmetric(imag(S)) - @test !ishermitian(imag(H)) - end - end - - end - - @testset "linalg unary ops" begin - @testset "tr" begin - @test tr(asym) ≈ tr(Symmetric(asym)) - @test tr(aherm) ≈ tr(Hermitian(aherm)) - end - - @testset "isposdef[!]" begin - @test isposdef(Symmetric(asym)) == isposdef(asym) - @test isposdef(Symmetric(aposs)) == isposdef(aposs) == true - @test isposdef(Hermitian(aherm)) == isposdef(aherm) - @test isposdef(Hermitian(apos)) == isposdef(apos) == true - if eltya != Int #chol! won't work with Int - @test isposdef!(Symmetric(copy(asym))) == isposdef(asym) - @test isposdef!(Symmetric(copy(aposs))) == isposdef(aposs) == true - @test isposdef!(Hermitian(copy(aherm))) == isposdef(aherm) - @test isposdef!(Hermitian(copy(apos))) == isposdef(apos) == true - end - end - - @testset "$f" for f in (det, logdet, logabsdet) - for uplo in (:U, :L) - @test all(f(apos) .≈ f(Hermitian(apos, uplo))) - @test all(f(aposs) .≈ f(Symmetric(aposs, uplo))) - if f != logdet - @test all(f(aherm) .≈ f(Hermitian(aherm, uplo))) - @test all(f(asym) .≈ f(Symmetric(asym, uplo))) - end - end - end - - @testset "inversion" begin - for uplo in (:U, :L) - @test inv(Symmetric(asym, uplo))::Symmetric ≈ inv(asym) - @test inv(Hermitian(aherm, uplo))::Hermitian ≈ inv(aherm) - @test inv(Symmetric(a, uplo))::Symmetric ≈ inv(Matrix(Symmetric(a, uplo))) - if eltya <: Real - @test inv(Hermitian(a, uplo))::Hermitian ≈ inv(Matrix(Hermitian(a, uplo))) - end - end - if eltya <: LinearAlgebra.BlasComplex - @testset "inverse edge case with complex Hermitian" begin - # Hermitian matrix, where inv(lu(A)) generates non-real diagonal elements - for T in (ComplexF32, ComplexF64) - # data should have nonvanishing imaginary parts on the diagonal - M = T[0.279982+0.988074im 0.770011+0.870555im - 0.138001+0.889728im 0.177242+0.701413im] - H = Hermitian(M) - A = Matrix(H) - @test inv(H) ≈ inv(A) - @test ishermitian(Matrix(inv(H))) - end - end - end - if eltya <: AbstractFloat - @testset "inv should error with NaNs/Infs" begin - h = Hermitian(fill(eltya(NaN), 2, 2)) - @test_throws ArgumentError inv(h) - s = Symmetric(fill(eltya(NaN), 2, 2)) - @test_throws ArgumentError inv(s) - end - end - end - - # Revisit when implemented in julia - if eltya != BigFloat - @testset "cond" begin - if eltya <: Real #svdvals! has no method for Symmetric{Complex} - @test cond(Symmetric(asym)) ≈ cond(asym) - end - @test cond(Hermitian(aherm)) ≈ cond(aherm) - end - - @testset "symmetric eigendecomposition" begin - if eltya <: Real # the eigenvalues are only real and ordered for Hermitian matrices - d, v = eigen(asym) - @test asym*v[:,1] ≈ d[1]*v[:,1] - @test v*Diagonal(d)*transpose(v) ≈ asym - @test isequal(eigvals(asym[1]), eigvals(asym[1:1,1:1])[1]) - @test abs.(eigen(Symmetric(asym), 1:2).vectors'v[:,1:2]) ≈ Matrix(I, 2, 2) - @test abs.(eigen(Symmetric(asym), d[1] - 1, (d[2] + d[3])/2).vectors'v[:,1:2]) ≈ Matrix(I, 2, 2) - @test eigvals(Symmetric(asym), 1:2) ≈ d[1:2] - @test eigvals(Symmetric(asym), sortby= x -> -x) ≈ eigvals(eigen(Symmetric(asym), sortby = x -> -x)) - @test eigvals(Symmetric(asym), d[1] - 1, (d[2] + d[3])/2) ≈ d[1:2] - # eigen doesn't support Symmetric{Complex} - @test Matrix(eigen(asym)) ≈ asym - @test eigvecs(Symmetric(asym)) ≈ eigvecs(asym) - end - - d, v = eigen(aherm) - @test aherm*v[:,1] ≈ d[1]*v[:,1] - @test v*Diagonal(d)*v' ≈ aherm - @test isequal(eigvals(aherm[1]), eigvals(aherm[1:1,1:1])[1]) - @test abs.(eigen(Hermitian(aherm), 1:2).vectors'v[:,1:2]) ≈ Matrix(I, 2, 2) - @test abs.(eigen(Hermitian(aherm), d[1] - 1, (d[2] + d[3])/2).vectors'v[:,1:2]) ≈ Matrix(I, 2, 2) - @test eigvals(Hermitian(aherm), 1:2) ≈ d[1:2] - @test eigvals(Hermitian(aherm), sortby= x -> -x) ≈ eigvals(eigen(Hermitian(aherm), sortby = x -> -x)) - @test eigvals(Hermitian(aherm), d[1] - 1, (d[2] + d[3])/2) ≈ d[1:2] - @test Matrix(eigen(aherm)) ≈ aherm - @test eigvecs(Hermitian(aherm)) ≈ eigvecs(aherm) - - # relation to svdvals - if eltya <: Real #svdvals! has no method for Symmetric{Complex} - @test sum(sort(abs.(eigvals(Symmetric(asym))))) == sum(sort(svdvals(Symmetric(asym)))) - end - @test sum(sort(abs.(eigvals(Hermitian(aherm))))) == sum(sort(svdvals(Hermitian(aherm)))) - end - - @testset "rank" begin - let A = a[:,1:5]*a[:,1:5]' - # Make sure A is Hermitian even in the presence of rounding error - # xianyi/OpenBLAS#729 - A = (A + A') / 2 - @test rank(A) == rank(Hermitian(A)) - end - end - - @testset "pow" begin - # Integer power - @test (asym)^2 ≈ (Symmetric(asym)^2)::Symmetric - @test (asym)^-2 ≈ (Symmetric(asym)^-2)::Symmetric - @test (aposs)^2 ≈ (Symmetric(aposs)^2)::Symmetric - @test (aherm)^2 ≈ (Hermitian(aherm)^2)::Hermitian - @test (aherm)^-2 ≈ (Hermitian(aherm)^-2)::Hermitian - @test (apos)^2 ≈ (Hermitian(apos)^2)::Hermitian - # integer floating point power - @test (asym)^2.0 ≈ (Symmetric(asym)^2.0)::Symmetric - @test (asym)^-2.0 ≈ (Symmetric(asym)^-2.0)::Symmetric - @test (aposs)^2.0 ≈ (Symmetric(aposs)^2.0)::Symmetric - @test (aherm)^2.0 ≈ (Hermitian(aherm)^2.0)::Hermitian - @test (aherm)^-2.0 ≈ (Hermitian(aherm)^-2.0)::Hermitian - @test (apos)^2.0 ≈ (Hermitian(apos)^2.0)::Hermitian - # non-integer floating point power - @test (asym)^2.5 ≈ (Symmetric(asym)^2.5)::Symmetric - @test (asym)^-2.5 ≈ (Symmetric(asym)^-2.5)::Symmetric - @test (aposs)^2.5 ≈ (Symmetric(aposs)^2.5)::Symmetric - @test (aherm)^2.5 ≈ (Hermitian(aherm)^2.5)#::Hermitian - @test (aherm)^-2.5 ≈ (Hermitian(aherm)^-2.5)#::Hermitian - @test (apos)^2.5 ≈ (Hermitian(apos)^2.5)::Hermitian - end - end - end - - @testset "linalg binary ops" begin - @testset "mat * vec" begin - @test Symmetric(asym)*x+y ≈ asym*x+y - # testing fallbacks for transpose-vector * transpose(SymHerm) - xadj = transpose(x) - @test xadj * transpose(Symmetric(asym)) ≈ xadj * asym - @test x' * Symmetric(asym) ≈ x' * asym - - @test Hermitian(aherm)*x+y ≈ aherm*x+y - # testing fallbacks for adjoint-vector * SymHerm' - xadj = x' - @test x' * Hermitian(aherm) ≈ x' * aherm - @test xadj * Hermitian(aherm)' ≈ xadj * aherm - end - - @testset "mat * mat" begin - C = zeros(eltya,n,n) - @test Hermitian(aherm) * a ≈ aherm * a - @test a * Hermitian(aherm) ≈ a * aherm - # rectangular multiplication - @test [a; a] * Hermitian(aherm) ≈ [a; a] * aherm - @test Hermitian(aherm) * [a a] ≈ aherm * [a a] - @test Hermitian(aherm) * Hermitian(aherm) ≈ aherm*aherm - @test_throws DimensionMismatch Hermitian(aherm) * Vector{eltya}(undef, n+1) - LinearAlgebra.mul!(C,a,Hermitian(aherm)) - @test C ≈ a*aherm - - @test Symmetric(asym) * Symmetric(asym) ≈ asym*asym - @test Symmetric(asym) * a ≈ asym * a - @test a * Symmetric(asym) ≈ a * asym - # rectangular multiplication - @test Symmetric(asym) * [a a] ≈ asym * [a a] - @test [a; a] * Symmetric(asym) ≈ [a; a] * asym - @test_throws DimensionMismatch Symmetric(asym) * Vector{eltya}(undef, n+1) - LinearAlgebra.mul!(C,a,Symmetric(asym)) - @test C ≈ a*asym - - tri_b = UpperTriangular(triu(b)) - @test Array(transpose(Hermitian(aherm)) * tri_b) ≈ transpose(aherm) * Array(tri_b) - @test Array(tri_b * transpose(Hermitian(aherm))) ≈ Array(tri_b) * transpose(aherm) - @test Array(Hermitian(aherm)' * tri_b) ≈ aherm' * Array(tri_b) - @test Array(tri_b * Hermitian(aherm)') ≈ Array(tri_b) * aherm' - - @test Array(transpose(Symmetric(asym)) * tri_b) ≈ transpose(asym) * Array(tri_b) - @test Array(tri_b * transpose(Symmetric(asym))) ≈ Array(tri_b) * transpose(asym) - @test Array(Symmetric(asym)' * tri_b) ≈ asym' * Array(tri_b) - @test Array(tri_b * Symmetric(asym)') ≈ Array(tri_b) * asym' - end - @testset "solver" begin - @test Hermitian(aherm)\x ≈ aherm\x - @test Hermitian(aherm)\b ≈ aherm\b - @test Symmetric(asym)\x ≈ asym\x - @test Symmetric(asym)\b ≈ asym\b - @test Hermitian(Diagonal(aherm))\x ≈ Diagonal(aherm)\x - @test Hermitian(Matrix(Diagonal(aherm)))\b ≈ Diagonal(aherm)\b - @test Symmetric(Diagonal(asym))\x ≈ Diagonal(asym)\x - @test Symmetric(Matrix(Diagonal(asym)))\b ≈ Diagonal(asym)\b - end - end - @testset "generalized dot product" begin - for uplo in (:U, :L) - @test dot(x, Hermitian(aherm, uplo), y) ≈ dot(x, Hermitian(aherm, uplo)*y) ≈ dot(x, Matrix(Hermitian(aherm, uplo)), y) - @test dot(x, Hermitian(aherm, uplo), x) ≈ dot(x, Hermitian(aherm, uplo)*x) ≈ dot(x, Matrix(Hermitian(aherm, uplo)), x) - end - @test dot(x, Hermitian(Diagonal(a)), y) ≈ dot(x, Hermitian(Diagonal(a))*y) ≈ dot(x, Matrix(Hermitian(Diagonal(a))), y) - @test dot(x, Hermitian(Diagonal(a)), x) ≈ dot(x, Hermitian(Diagonal(a))*x) ≈ dot(x, Matrix(Hermitian(Diagonal(a))), x) - if eltya <: Real - for uplo in (:U, :L) - @test dot(x, Symmetric(aherm, uplo), y) ≈ dot(x, Symmetric(aherm, uplo)*y) ≈ dot(x, Matrix(Symmetric(aherm, uplo)), y) - @test dot(x, Symmetric(aherm, uplo), x) ≈ dot(x, Symmetric(aherm, uplo)*x) ≈ dot(x, Matrix(Symmetric(aherm, uplo)), x) - end - end - end - - @testset "dot product of symmetric and Hermitian matrices" begin - for mtype in (Symmetric, Hermitian) - symau = mtype(a, :U) - symal = mtype(a, :L) - msymau = Matrix(symau) - msymal = Matrix(symal) - @test_throws DimensionMismatch dot(symau, mtype(zeros(eltya, n-1, n-1))) - for eltyc in (Float32, Float64, ComplexF32, ComplexF64, BigFloat, Int) - creal = randn(n, n)/2 - cimag = randn(n, n)/2 - c = eltya == Int ? rand(1:7, n, n) : convert(Matrix{eltya}, eltya <: Complex ? complex.(creal, cimag) : creal) - symcu = mtype(c, :U) - symcl = mtype(c, :L) - msymcu = Matrix(symcu) - msymcl = Matrix(symcl) - @test dot(symau, symcu) ≈ dot(msymau, msymcu) - @test dot(symau, symcl) ≈ dot(msymau, msymcl) - @test dot(symal, symcu) ≈ dot(msymal, msymcu) - @test dot(symal, symcl) ≈ dot(msymal, msymcl) - end - - # block matrices - blockm = [eltya == Int ? rand(1:7, 3, 3) : convert(Matrix{eltya}, eltya <: Complex ? complex.(randn(3, 3)/2, randn(3, 3)/2) : randn(3, 3)/2) for _ in 1:3, _ in 1:3] - symblockmu = mtype(blockm, :U) - symblockml = mtype(blockm, :L) - msymblockmu = Matrix(symblockmu) - msymblockml = Matrix(symblockml) - @test dot(symblockmu, symblockmu) ≈ dot(msymblockmu, msymblockmu) - @test dot(symblockmu, symblockml) ≈ dot(msymblockmu, msymblockml) - @test dot(symblockml, symblockmu) ≈ dot(msymblockml, msymblockmu) - @test dot(symblockml, symblockml) ≈ dot(msymblockml, msymblockml) - end - end - - @testset "kronecker product of symmetric and Hermitian matrices" begin - for mtype in (Symmetric, Hermitian) - symau = mtype(a, :U) - symal = mtype(a, :L) - msymau = Matrix(symau) - msymal = Matrix(symal) - for eltyc in (Float32, Float64, ComplexF32, ComplexF64, BigFloat, Int) - creal = randn(n, n)/2 - cimag = randn(n, n)/2 - c = eltya == Int ? rand(1:7, n, n) : convert(Matrix{eltya}, eltya <: Complex ? complex.(creal, cimag) : creal) - symcu = mtype(c, :U) - symcl = mtype(c, :L) - msymcu = Matrix(symcu) - msymcl = Matrix(symcl) - @test kron(symau, symcu) ≈ kron(msymau, msymcu) - @test kron(symau, symcl) ≈ kron(msymau, msymcl) - @test kron(symal, symcu) ≈ kron(msymal, msymcu) - @test kron(symal, symcl) ≈ kron(msymal, msymcl) - end - end - end - end -end - -@testset "non-isbits algebra" begin - for ST in (Symmetric, Hermitian), uplo in (:L, :U) - M = Matrix{Complex{BigFloat}}(undef,2,2) - M[1,1] = rand() - M[2,2] = rand() - M[1+(uplo==:L), 1+(uplo==:U)] = rand(ComplexF64) - S = ST(M, uplo) - MS = Matrix(S) - @test real(S) == real(MS) - @test imag(S) == imag(MS) - @test conj(S) == conj(MS) - @test conj!(copy(S)) == conj(MS) - @test -S == -MS - @test S + S == MS + MS - @test S - S == MS - MS - @test S*2 == 2*S == 2*MS - @test S/2 == MS/2 - @test kron(S,S) == kron(MS,MS) - end - @testset "mixed uplo" begin - Mu = Matrix{Complex{BigFloat}}(undef,2,2) - Mu[1,1] = Mu[2,2] = 3 - Mu[1,2] = 2 + 3im - Ml = Matrix{Complex{BigFloat}}(undef,2,2) - Ml[1,1] = Ml[2,2] = 4 - Ml[2,1] = 4 + 5im - for ST in (Symmetric, Hermitian) - Su = ST(Mu, :U) - MSu = Matrix(Su) - Sl = ST(Ml, :L) - MSl = Matrix(Sl) - @test Su + Sl == Sl + Su == MSu + MSl - @test Su - Sl == -(Sl - Su) == MSu - MSl - @test kron(Su,Sl) == kron(MSu,MSl) - @test kron(Sl,Su) == kron(MSl,MSu) - end - end - @testset "non-strided" begin - @testset "diagonal" begin - for ST1 in (Symmetric, Hermitian), uplo1 in (:L, :U) - m = ST1(Matrix{BigFloat}(undef,2,2), uplo1) - m.data[1,1] = 1 - m.data[2,2] = 3 - m.data[1+(uplo1==:L), 1+(uplo1==:U)] = 2 - A = Array(m) - for ST2 in (Symmetric, Hermitian), uplo2 in (:L, :U) - id = ST2(I(2), uplo2) - @test m + id == id + m == A + id - end - end - end - @testset "unit triangular" begin - for ST1 in (Symmetric, Hermitian), uplo1 in (:L, :U) - H1 = ST1(UnitUpperTriangular(big.(rand(Int8,4,4))), uplo1) - M1 = Matrix(H1) - for ST2 in (Symmetric, Hermitian), uplo2 in (:L, :U) - H2 = ST2(UnitUpperTriangular(big.(rand(Int8,4,4))), uplo2) - @test H1 + H2 == M1 + Matrix(H2) - end - end - end - end -end - -@testset "Reverse operation on Symmetric" begin - for uplo in (:U, :L) - A = Symmetric(randn(5, 5), uplo) - @test reverse(A, dims=1) == reverse(Matrix(A), dims=1) - @test reverse(A, dims=2) == reverse(Matrix(A), dims=2) - @test reverse(A)::Symmetric == reverse(Matrix(A)) - end -end - -@testset "Reverse operation on Hermitian" begin - for uplo in (:U, :L) - A = Hermitian(randn(ComplexF64, 5, 5), uplo) - @test reverse(A, dims=1) == reverse(Matrix(A), dims=1) - @test reverse(A, dims=2) == reverse(Matrix(A), dims=2) - @test reverse(A)::Hermitian == reverse(Matrix(A)) - end -end - - -# bug identified in PR #52318: dot products of quaternionic Hermitian matrices, -# or any number type where conj(a)*conj(b) ≠ conj(a*b): -@testset "dot Hermitian quaternion #52318" begin - A, B = [Quaternion.(randn(3,3), randn(3, 3), randn(3, 3), randn(3,3)) |> t -> t + t' for i in 1:2] - @test A == Hermitian(A) && B == Hermitian(B) - @test dot(A, B) ≈ dot(Hermitian(A), Hermitian(B)) - A, B = [Quaternion.(randn(3,3), randn(3, 3), randn(3, 3), randn(3,3)) |> t -> t + transpose(t) for i in 1:2] - @test A == Symmetric(A) && B == Symmetric(B) - @test dot(A, B) ≈ dot(Symmetric(A), Symmetric(B)) -end - -# let's make sure the analogous bug will not show up with kronecker products -@testset "kron Hermitian quaternion #52318" begin - A, B = [Quaternion.(randn(3,3), randn(3, 3), randn(3, 3), randn(3,3)) |> t -> t + t' for i in 1:2] - @test A == Hermitian(A) && B == Hermitian(B) - @test kron(A, B) ≈ kron(Hermitian(A), Hermitian(B)) - A, B = [Quaternion.(randn(3,3), randn(3, 3), randn(3, 3), randn(3,3)) |> t -> t + transpose(t) for i in 1:2] - @test A == Symmetric(A) && B == Symmetric(B) - @test kron(A, B) ≈ kron(Symmetric(A), Symmetric(B)) -end - -@testset "kron with symmetric/hermitian matrices of matrices" begin - M = fill(ones(2,2), 2, 2) - for W in (Symmetric, Hermitian) - for (t1, t2) in ((W(M, :U), W(M, :U)), (W(M, :U), W(M, :L)), (W(M, :L), W(M, :L))) - @test kron(t1, t2) ≈ kron(Matrix(t1), Matrix(t2)) - end - end -end - -#Issue #7647: test xsyevr, xheevr, xstevr drivers. -@testset "Eigenvalues in interval for $(typeof(Mi7647))" for Mi7647 in - (Symmetric(diagm(0 => 1.0:3.0)), - Hermitian(diagm(0 => 1.0:3.0)), - Hermitian(diagm(0 => complex(1.0:3.0))), - SymTridiagonal([1.0:3.0;], zeros(2))) - @test eigmin(Mi7647) == eigvals(Mi7647, 0.5, 1.5)[1] == 1.0 - @test eigmax(Mi7647) == eigvals(Mi7647, 2.5, 3.5)[1] == 3.0 - @test eigvals(Mi7647) == eigvals(Mi7647, 0.5, 3.5) == [1.0:3.0;] -end - -@testset "Hermitian wrapper ignores imaginary parts on diagonal" begin - A = [1.0+im 2.0; 2.0 0.0] - @test !ishermitian(A) - @test Hermitian(A)[1,1] == 1 -end - -@testset "Issue #7933" begin - A7933 = [1 2; 3 4] - B7933 = copy(A7933) - C7933 = Matrix(Symmetric(A7933)) - @test A7933 == B7933 -end - -@testset "Issues #8057 and #8058. f=$f, A=$A" for f in - (eigen, eigvals), - A in (Symmetric([0 1; 1 0]), Hermitian([0 im; -im 0])) - @test_throws ArgumentError f(A, 3, 2) - @test_throws ArgumentError f(A, 1:4) -end - -@testset "Ignore imaginary part of Hermitian diagonal" begin - A = [1.0+im 2.0; 2.0 0.0] - @test !ishermitian(A) - @test diag(Hermitian(A)) == real(diag(A)) -end - -@testset "Issue #17780" begin - a = randn(2,2) - a = a'a - b = complex.(a,a) - c = Symmetric(b) - @test conj(c) == conj(Array(c)) - cc = copy(c) - @test conj!(c) == conj(Array(cc)) - c = Hermitian(b + b') - @test conj(c) == conj(Array(c)) - cc = copy(c) - @test conj!(c) == conj(Array(cc)) -end - -@testset "Issue # 19225" begin - X = [1 -1; -1 1] - for T in (Symmetric, Hermitian) - Y = T(copy(X)) - _Y = similar(Y) - copyto!(_Y, Y) - @test _Y == Y - - W = T(copy(X), :L) - copyto!(W, Y) - @test W.data == Y.data - @test W.uplo != Y.uplo - - W[1,1] = 4 - @test W == T([4 -1; -1 1]) - @test_throws ArgumentError (W[1,2] = 2) - if T == Hermitian - @test_throws ArgumentError (W[2,2] = 3+4im) - end - - @test Y + I == T([2 -1; -1 2]) - @test Y - I == T([0 -1; -1 0]) - @test Y * I == Y - - @test Y .+ 1 == T([2 0; 0 2]) - @test Y .- 1 == T([0 -2; -2 0]) - @test Y * 2 == T([2 -2; -2 2]) - @test Y / 1 == Y - - @test T([true false; false true]) .+ true == T([2 1; 1 2]) - end -end - -@testset "Issue #21981" begin - B = complex(rand(4,4)) - B[4,1] += 1im; - @test ishermitian(Symmetric(B, :U)) - @test issymmetric(Hermitian(B, :U)) - B[4,1] = real(B[4,1]) - B[1,4] += 1im - @test ishermitian(Symmetric(B, :L)) - @test issymmetric(Hermitian(B, :L)) -end - -@testset "$HS solver with $RHS RHS - $T" for HS in (Hermitian, Symmetric), - RHS in (Hermitian, Symmetric, Diagonal, UpperTriangular, LowerTriangular), - T in (Float64, ComplexF64) - D = rand(T, 10, 10); D = D'D - A = HS(D) - B = RHS(D) - @test A\B ≈ Matrix(A)\Matrix(B) -end - -@testset "inversion of Hilbert matrix" begin - for T in (Float64, ComplexF64) - H = T[1/(i + j - 1) for i in 1:8, j in 1:8] - @test norm(inv(Symmetric(H))*(H*fill(1., 8)) .- 1) ≈ 0 atol = 1e-5 - @test norm(inv(Hermitian(H))*(H*fill(1., 8)) .- 1) ≈ 0 atol = 1e-5 - end -end - -@testset "eigendecomposition Algorithms" begin - using LinearAlgebra: DivideAndConquer, QRIteration, RobustRepresentations - for T in (Float64, ComplexF64, Float32, ComplexF32) - n = 4 - A = T <: Real ? Symmetric(randn(T, n, n)) : Hermitian(randn(T, n, n)) - d, v = eigen(A) - for alg in (DivideAndConquer(), QRIteration(), RobustRepresentations()) - @test (@inferred eigvals(A, alg)) ≈ d - d2, v2 = @inferred eigen(A, alg) - @test d2 ≈ d - @test A * v2 ≈ v2 * Diagonal(d2) - end - end -end - -const BASE_TEST_PATH = joinpath(Sys.BINDIR, "..", "share", "julia", "test") -isdefined(Main, :ImmutableArrays) || @eval Main include(joinpath($(BASE_TEST_PATH), "testhelpers", "ImmutableArrays.jl")) -using .Main.ImmutableArrays - -@testset "Conversion to AbstractArray" begin - # tests corresponding to #34995 - immutablemat = ImmutableArray([1 2 3; 4 5 6; 7 8 9]) - for SymType in (Symmetric, Hermitian) - S = Float64 - symmat = SymType(immutablemat) - @test convert(AbstractArray{S}, symmat).data isa ImmutableArray{S} - @test convert(AbstractMatrix{S}, symmat).data isa ImmutableArray{S} - @test AbstractArray{S}(symmat).data isa ImmutableArray{S} - @test AbstractMatrix{S}(symmat).data isa ImmutableArray{S} - @test convert(AbstractArray{S}, symmat) == symmat - @test convert(AbstractMatrix{S}, symmat) == symmat - end -end - - -@testset "#24572: eltype(A::HermOrSym) === eltype(parent(A))" begin - A = rand(Float32, 3, 3) - @test_throws TypeError Symmetric{Float64,Matrix{Float32}}(A, 'U') - @test_throws TypeError Hermitian{Float64,Matrix{Float32}}(A, 'U') -end - -@testset "fill[stored]!" begin - for uplo in (:U, :L) - # Hermitian - A = Hermitian(fill(1.0+0im, 2, 2), uplo) - @test fill!(A, 2) == fill(2, 2, 2) - @test A.data == (uplo === :U ? [2 2; 1.0+0im 2] : [2 1.0+0im; 2 2]) - @test_throws ArgumentError fill!(A, 2+im) - - # Symmetric - A = Symmetric(fill(1.0+im, 2, 2), uplo) - @test fill!(A, 2) == fill(2, 2, 2) - @test A.data == (uplo === :U ? [2 2; 1.0+im 2] : [2 1.0+im; 2 2]) - end -end - -@testset "#25625 recursive transposition" begin - A = Matrix{Matrix{Int}}(undef, 2, 2) - A[1,1] = [1 2; 2 3] - A[1,2] = [4 5 6; 7 8 9] - A[2,1] = [4 7; 5 8; 6 9] - A[2,2] = [1 2; 3 4] - for uplo in (:U, :L) - S = Symmetric(A, uplo) - @test S[1,1] == A[1,1] - @test S[1,2] == transpose(S[2,1]) == A[1,2] - @test S[2,2] == Symmetric(A[2,2], uplo) - @test S == transpose(S) == Matrix(S) == Matrix(transpose(S)) == transpose(Matrix(S)) - end - - B = Matrix{Matrix{Complex{Int}}}(undef, 2, 2) - B[1,1] = [1 2+im; 2-im 3] - B[1,2] = [4 5+1im 6-2im; 7+3im 8-4im 9+5im] - B[2,1] = [4 7-3im; 5-1im 8+4im; 6+2im 9-5im] - B[2,2] = [1+1im 2+2im; 3-3im 4-2im] - for uplo in (:U, :L) - H = Hermitian(B, uplo) - @test H[1,1] == Hermitian(B[1,1], uplo) - @test H[1,2] == adjoint(H[2,1]) == B[1,2] - @test H[2,1] == adjoint(H[1,2]) == B[2,1] - @test H[2,2] == Hermitian(B[2,2], uplo) - @test H == adjoint(H) == Matrix(H) == Matrix(adjoint(H)) == adjoint(Matrix(H)) - end -end - -@testset "getindex of diagonal element (#25972)" begin - A = rand(ComplexF64, 2, 2) - @test Hermitian(A, :U)[1,1] == Hermitian(A, :L)[1,1] == real(A[1,1]) -end - -@testset "issue #29392: SymOrHerm scaled with Number" begin - R = rand(Float64, 2, 2); C = rand(ComplexF64, 2, 2) - # Symmetric * Real, Real * Symmetric - A = Symmetric(R); x = 2.0 - @test (A * x)::Symmetric == (x * A)::Symmetric - A = Symmetric(C); x = 2.0 - @test (A * x)::Symmetric == (x * A)::Symmetric - # Symmetric * Complex, Complex * Symmetrics - A = Symmetric(R); x = 2.0im - @test (A * x)::Symmetric == (x * A)::Symmetric - A = Symmetric(C); x = 2.0im - @test (A * x)::Symmetric == (x * A)::Symmetric - # Hermitian * Real, Real * Hermitian - A = Hermitian(R); x = 2.0 - @test (A * x)::Hermitian == (x * A)::Hermitian - A = Hermitian(C); x = 2.0 - @test (A * x)::Hermitian == (x * A)::Hermitian - # Hermitian * Complex, Complex * Hermitian - A = Hermitian(R); x = 2.0im - @test (A * x)::Matrix == (x * A)::Matrix - A = Hermitian(C); x = 2.0im - @test (A * x)::Matrix == (x * A)::Matrix - # Symmetric / Real - A = Symmetric(R); x = 2.0 - @test (A / x)::Symmetric == Matrix(A) / x - A = Symmetric(C); x = 2.0 - @test (A / x)::Symmetric == Matrix(A) / x - # Symmetric / Complex - A = Symmetric(R); x = 2.0im - @test (A / x)::Symmetric == Matrix(A) / x - A = Symmetric(C); x = 2.0im - @test (A / x)::Symmetric == Matrix(A) / x - # Hermitian / Real - A = Hermitian(R); x = 2.0 - @test (A / x)::Hermitian == Matrix(A) / x - A = Hermitian(C); x = 2.0 - @test (A / x)::Hermitian == Matrix(A) / x - # Hermitian / Complex - A = Hermitian(R); x = 2.0im - @test (A / x)::Matrix == Matrix(A) / x - A = Hermitian(C); x = 2.0im - @test (A / x)::Matrix == Matrix(A) / x -end - -@testset "issue #30814: Symmetric of Hermitian if diag is not real" begin - A = [1 2; 3 4] * (1 + im) - B = Hermitian(A) - @test_throws ArgumentError Symmetric(B) == Symmetric(Matrix(B)) - A[1,1] = 1; A[2,2] = 4 - @test Symmetric(B) == Symmetric(Matrix(B)) -end - -@testset "issue #32079: det for singular Symmetric matrix" begin - A = ones(Float64, 3, 3) - @test det(Symmetric(A))::Float64 == det(A) == 0.0 - @test det(Hermitian(A))::Float64 == det(A) == 0.0 - A = ones(ComplexF64, 3, 3) - @test det(Symmetric(A))::ComplexF64 == det(A) == 0.0 - @test det(Hermitian(A))::Float64 == det(A) == 0.0 -end - -@testset "symmetric()/hermitian() for Numbers" begin - @test LinearAlgebra.symmetric(1) == LinearAlgebra.symmetric(1, :U) == 1 - @test LinearAlgebra.symmetric_type(Int) == Int - @test LinearAlgebra.hermitian(1) == LinearAlgebra.hermitian(1, :U) == 1 - @test LinearAlgebra.hermitian_type(Int) == Int -end - -@testset "sqrt(nearly semidefinite)" begin - let A = [0.9999999999999998 4.649058915617843e-16 -1.3149405273715513e-16 9.9959579317056e-17; -8.326672684688674e-16 1.0000000000000004 2.9280733590254494e-16 -2.9993900031619594e-16; 9.43689570931383e-16 -1.339206523454095e-15 1.0000000000000007 -8.550505126287743e-16; -6.245004513516506e-16 -2.0122792321330962e-16 1.183061278035052e-16 1.0000000000000002], - B = [0.09648289218436859 0.023497875751503007 0.0 0.0; 0.023497875751503007 0.045787575150300804 0.0 0.0; 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0], - C = Symmetric(A*B*A'), # semidefinite up to roundoff - Csqrt = sqrt(C) - @test Csqrt isa Symmetric{Float64} - @test Csqrt*Csqrt ≈ C rtol=1e-14 - end - let D = Symmetric(Matrix(Diagonal([1 0; 0 -1e-14]))) - @test sqrt(D) ≈ [1 0; 0 1e-7im] rtol=1e-14 - @test sqrt(D, rtol=1e-13) ≈ [1 0; 0 0] rtol=1e-14 - @test sqrt(D, rtol=1e-13)^2 ≈ D rtol=1e-13 - end -end - -@testset "Multiplications symmetric/hermitian for $T and $S" for T in - (Float16, Float32, Float64, BigFloat), S in (ComplexF16, ComplexF32, ComplexF64) - let A = transpose(Symmetric(rand(S, 3, 3))), Bv = Vector(rand(T, 3)), Bm = Matrix(rand(T, 3,3)) - @test A * Bv ≈ Matrix(A) * Bv - @test A * Bm ≈ Matrix(A) * Bm - @test Bm * A ≈ Bm * Matrix(A) - end - let A = adjoint(Hermitian(rand(S, 3,3))), Bv = Vector(rand(T, 3)), Bm = Matrix(rand(T, 3,3)) - @test A * Bv ≈ Matrix(A) * Bv - @test A * Bm ≈ Matrix(A) * Bm - @test Bm * A ≈ Bm * Matrix(A) - end - let Ahrs = transpose(Hermitian(Symmetric(rand(T, 3, 3)))), - Acs = transpose(Symmetric(rand(S, 3, 3))), - Ahcs = transpose(Hermitian(Symmetric(rand(S, 3, 3)))) - - @test Ahrs * Ahrs ≈ Ahrs * Matrix(Ahrs) - @test Ahrs * Acs ≈ Ahrs * Matrix(Acs) - @test Acs * Acs ≈ Matrix(Acs) * Matrix(Acs) - @test Acs * Ahrs ≈ Matrix(Acs) * Ahrs - @test Ahrs * Ahcs ≈ Matrix(Ahrs) * Ahcs - @test Ahcs * Ahrs ≈ Ahcs * Matrix(Ahrs) - end - let Ahrs = adjoint(Hermitian(Symmetric(rand(T, 3, 3)))), - Acs = adjoint(Symmetric(rand(S, 3, 3))), - Ahcs = adjoint(Hermitian(Symmetric(rand(S, 3, 3)))) - - @test Ahrs * Ahrs ≈ Ahrs * Matrix(Ahrs) - @test Ahcs * Ahcs ≈ Matrix(Ahcs) * Matrix(Ahcs) - @test Ahrs * Ahcs ≈ Ahrs * Matrix(Ahcs) - @test Acs * Ahcs ≈ Acs * Matrix(Ahcs) - @test Ahcs * Ahrs ≈ Matrix(Ahcs) * Ahrs - @test Ahcs * Acs ≈ Matrix(Ahcs) * Acs - end -end - -@testset "Addition/subtraction with SymTridiagonal" begin - TR = SymTridiagonal(randn(Float64,5), randn(Float64,4)) - TC = SymTridiagonal(randn(ComplexF64,5), randn(ComplexF64,4)) - SR = Symmetric(randn(Float64,5,5)) - SC = Symmetric(randn(ComplexF64,5,5)) - HR = Hermitian(randn(Float64,5,5)) - HC = Hermitian(randn(ComplexF64,5,5)) - for op = (+,-) - for T = (TR, TC), S = (SR, SC) - @test op(T, S) == op(Array(T), S) - @test op(S, T) == op(S, Array(T)) - @test op(T, S) isa Symmetric - @test op(S, T) isa Symmetric - end - for H = (HR, HC) - for T = (TR, TC) - @test op(T, H) == op(Array(T), H) - @test op(H, T) == op(H, Array(T)) - end - @test op(TR, H) isa Hermitian - @test op(H, TR) isa Hermitian - end - end -end - -@testset "hermitian part" begin - for T in [Float32, Complex{Float32}, Int32, Rational{Int32}, - Complex{Int32}, Complex{Rational{Int32}}] - f, f!, t = hermitianpart, hermitianpart!, T <: Real ? transpose : adjoint - X = T[1 2 3; 4 5 6; 7 8 9] - T <: Complex && (X .+= im .* X) - Xc = copy(X) - Y = (X + t(X)) / 2 - U = f(X) - L = f(X, :L) - @test U isa Hermitian - @test L isa Hermitian - @test U.uplo == 'U' - @test L.uplo == 'L' - @test U == L == Y - if T <: AbstractFloat || real(T) <: AbstractFloat - HU = f!(X) - @test HU == Y - @test triu(X) == triu(Y) - HL = f!(Xc, :L) - @test HL == Y - @test tril(Xc) == tril(Y) - end - end - @test_throws DimensionMismatch hermitianpart(ones(1,2)) - for T in (Float64, ComplexF64), uplo in (:U, :L) - A = [randn(T, 2, 2) for _ in 1:2, _ in 1:2] - Aherm = hermitianpart(A, uplo) - @test Aherm == Aherm.data == (A + A')/2 - @test Aherm isa Hermitian - @test Aherm.uplo == LinearAlgebra.char_uplo(uplo) - end -end - -@testset "Structured display" begin - @testset "Diagonal" begin - d = 10:13 - D = Diagonal(d) - for uplo in (:L, :U), SymHerm in (Symmetric, Hermitian) - S = SymHerm(D, uplo) - @test sprint(Base.print_matrix, S) == sprint(Base.print_matrix, D) - end - - d = (10:13) .+ 2im - D = Diagonal(d) - DR = Diagonal(complex.(real.(d))) - for uplo in (:L, :U) - H = Hermitian(D, uplo) - @test sprint(Base.print_matrix, H) == sprint(Base.print_matrix, DR) - - S = Symmetric(D, uplo) - @test sprint(Base.print_matrix, S) == sprint(Base.print_matrix, D) - end - end - @testset "Bidiagonal" begin - dv, ev = 1:4, 1:3 - ST = SymTridiagonal(dv, ev) - D = Diagonal(dv) - for B_uplo in (:L, :U) - B = Bidiagonal(dv, ev, B_uplo) - for Sym_uplo in (:L, :U), SymHerm in (Symmetric, Hermitian) - SB = SymHerm(B, Sym_uplo) - teststr = sprint(Base.print_matrix, Sym_uplo == B_uplo ? ST : D) - @test sprint(Base.print_matrix, SB) == teststr - SB = SymHerm(Transpose(B), Sym_uplo) - teststr = sprint(Base.print_matrix, Sym_uplo == B_uplo ? D : ST) - @test sprint(Base.print_matrix, SB) == teststr - end - end - end - @testset "Tridiagonal" begin - superd, d, subd = 3:5, 10:13, 1:3 - for uplo in (:U, :L), SymHerm in (Symmetric, Hermitian) - S = SymHerm(Tridiagonal(subd, d, superd), uplo) - ST = SymTridiagonal(d, uplo == :U ? superd : subd) - @test sprint(Base.print_matrix, S) == sprint(Base.print_matrix, ST) - end - - superd, d, subd = collect((3:5)*im), collect(Complex{Int}, 10:13), collect((1:3)*im) - for uplo in (:U, :L) - S = Symmetric(Tridiagonal(subd, d, superd), uplo) - ST = SymTridiagonal(d, uplo == :U ? superd : subd) - @test sprint(Base.print_matrix, S) == sprint(Base.print_matrix, ST) - - H = Hermitian(Tridiagonal(subd, d, superd), uplo) - T = Tridiagonal(uplo == :L ? subd : conj(superd), d, uplo == :U ? superd : conj(subd)) - @test sprint(Base.print_matrix, H) == sprint(Base.print_matrix, T) - end - end -end - -@testset "symmetric/hermitian for matrices" begin - A = [1 2; 3 4] - @test LinearAlgebra.symmetric(A) === Symmetric(A) - @test LinearAlgebra.symmetric(A, :L) === Symmetric(A, :L) - @test LinearAlgebra.hermitian(A) === Hermitian(A) - @test LinearAlgebra.hermitian(A, :L) === Hermitian(A, :L) -end - -@testset "custom axes" begin - SZA = SizedArrays.SizedArray{(2,2)}([1 2; 3 4]) - for T in (Symmetric, Hermitian) - S = T(SZA) - r = SizedArrays.SOneTo(2) - @test axes(S) === (r,r) - end -end - -@testset "Matrix elements" begin - M = [UpperTriangular([1 2; 3 4]) for i in 1:2, j in 1:2] - for T in (Symmetric, Hermitian) - H = T(M) - A = Array(H) - @test A isa Matrix - @test A == H - A = Array{Matrix{Int}}(H) - @test A isa Matrix{Matrix{Int}} - @test A == H - end -end - -@testset "conj for immutable" begin - S = Symmetric(reshape((1:16)*im, 4, 4)) - @test conj(S) == conj(Array(S)) - H = Hermitian(reshape((1:16)*im, 4, 4)) - @test conj(H) == conj(Array(H)) -end - -@testset "copyto! with aliasing (#39460)" begin - M = Matrix(reshape(1:36, 6, 6)) - @testset for T in (Symmetric, Hermitian), uploA in (:U, :L), uploB in (:U, :L) - A = T(view(M, 1:5, 1:5), uploA) - A2 = copy(A) - B = T(view(M, 2:6, 2:6), uploB) - @test copyto!(B, A) == A2 - - A = view(M, 2:4, 2:4) - B = T(view(M, 1:3, 1:3), uploB) - B2 = copy(B) - @test copyto!(A, B) == B2 - end -end - -@testset "copyto with incompatible sizes" begin - A = zeros(3,3); B = zeros(2,2) - @testset "copyto with incompatible sizes" begin - for T in (Symmetric, Hermitian) - @test_throws BoundsError copyto!(T(B), T(A)) - @test_throws "Cannot set a non-diagonal index" copyto!(T(A), T(B)) - end - end -end - -@testset "getindex with Integers" begin - M = reshape(1:4,2,2) - for ST in (Symmetric, Hermitian) - S = ST(M) - @test_throws "invalid index" S[true, true] - @test S[1,2] == S[Int8(1),UInt16(2)] == S[big(1), Int16(2)] - end -end - -@testset "tr for block matrices" begin - m = [1 2; 3 4] - for b in (m, m * (1 + im)) - M = fill(b, 3, 3) - for ST in (Symmetric, Hermitian) - S = ST(M) - @test tr(S) == sum(diag(S)) - end - end -end - -@testset "setindex! returns the destination" begin - M = rand(2,2) - for T in (Symmetric, Hermitian) - S = T(M) - @test setindex!(S, 0, 2, 2) === S - end -end - -@testset "partly iniitalized matrices" begin - a = Matrix{BigFloat}(undef, 2,2) - a[1] = 1; a[3] = 1; a[4] = 1 - h = Hermitian(a) - s = Symmetric(a) - d = Diagonal([1,1]) - symT = SymTridiagonal([1 1;1 1]) - @test h+d == Array(h) + Array(d) - @test h+symT == Array(h) + Array(symT) - @test s+d == Array(s) + Array(d) - @test s+symT == Array(s) + Array(symT) - @test h-d == Array(h) - Array(d) - @test h-symT == Array(h) - Array(symT) - @test s-d == Array(s) - Array(d) - @test s-symT == Array(s) - Array(symT) - @test d+h == Array(d) + Array(h) - @test symT+h == Array(symT) + Array(h) - @test d+s == Array(d) + Array(s) - @test symT+s == Array(symT) + Array(s) - @test d-h == Array(d) - Array(h) - @test symT-h == Array(symT) - Array(h) - @test d-s == Array(d) - Array(s) - @test symT-s == Array(symT) - Array(s) -end - -@testset "issue #56283" begin - a = 1.0 - D = Diagonal(randn(10)) - H = Hermitian(D*D') - @test a*H == H -end - -@testset "trigonometric functions for Integer matrices" begin - A = diagm(0=>1:4, 1=>1:3, -1=>1:3) - for B in (Symmetric(A), Symmetric(complex.(A))) - SC = @inferred(sincos(B)) - @test SC[1] ≈ sin(B) - @test SC[2] ≈ cos(B) - @test cos(A) ≈ real(exp(im*A)) - @test sin(A) ≈ imag(exp(im*A)) - end -end - -end # module TestSymmetric diff --git a/stdlib/LinearAlgebra/test/symmetriceigen.jl b/stdlib/LinearAlgebra/test/symmetriceigen.jl deleted file mode 100644 index 71087ae4d8d24..0000000000000 --- a/stdlib/LinearAlgebra/test/symmetriceigen.jl +++ /dev/null @@ -1,187 +0,0 @@ -# This file is a part of Julia. License is MIT: https://julialang.org/license - -module TestSymmetricEigen - -using Test, LinearAlgebra - -@testset "chol-eigen-eigvals" begin - ## Cholesky decomposition based - - # eigenvalue sorting - sf = x->(imag(x),real(x)) - - ## Real valued - A = Float64[1 1 0 0; 1 2 1 0; 0 1 3 1; 0 0 1 4] - H = (A+A')/2 - B = Float64[2 1 4 3; 0 3 1 3; 3 1 0 0; 0 1 3 1] - BH = (B+B')/2 - # PD matrix - BPD = B*B' - # eigen - C = cholesky(BPD) - e,v = eigen(A, C; sortby=sf) - @test A*v ≈ BPD*v*Diagonal(e) - # eigvals - @test eigvals(A, BPD; sortby=sf) ≈ eigvals(A, C; sortby=sf) - - ## Complex valued - A = [1.0+im 1.0+1.0im 0 0; 1.0+1.0im 2.0+3.0im 1.0+1.0im 0; 0 1.0+2.0im 3.0+4.0im 1.0+5.0im; 0 0 1.0+1.0im 4.0+4.0im] - AH = (A+A')/2 - B = [2.0+2.0im 1.0+1.0im 4.0+4.0im 3.0+3.0im; 0 3.0+2.0im 1.0+1.0im 3.0+4.0im; 3.0+3.0im 1.0+4.0im 0 0; 0 1.0+2.0im 3.0+1.0im 1.0+1.0im] - BH = (B+B')/2 - # PD matrix - BPD = B*B' - # eigen - C = cholesky(BPD) - e,v = eigen(A, C; sortby=sf) - @test A*v ≈ BPD*v*Diagonal(e) - # eigvals - @test eigvals(A, BPD; sortby=sf) ≈ eigvals(A, C; sortby=sf) -end - -@testset "issue #49533" begin - # eigenvalue sorting - sf = x->(imag(x),real(x)) - - ## Real valued - A = Float64[1 1 0 0; 1 2 1 0; 0 1 3 1; 0 0 1 4] - B = Matrix(Diagonal(Float64[1:4;])) - # eigen - e0,v0 = eigen(A, B) - e1,v1 = eigen(A, Symmetric(B)) - e2,v2 = eigen(Symmetric(A), B) - e3,v3 = eigen(Symmetric(A), Symmetric(B)) - @test e0 ≈ e1 && v0 ≈ v1 - @test e0 ≈ e2 && v0 ≈ v2 - @test e0 ≈ e3 && v0 ≈ v3 - # eigvals - @test eigvals(A, B) ≈ eigvals(A, Symmetric(B)) - @test eigvals(A, B) ≈ eigvals(Symmetric(A), B) - @test eigvals(A, B) ≈ eigvals(Symmetric(A), Symmetric(B)) - - ## Complex valued - A = [1.0+im 1.0+1.0im 0 0; 1.0+1.0im 2.0+3.0im 1.0+1.0im 0; 0 1.0+2.0im 3.0+4.0im 1.0+5.0im; 0 0 1.0+1.0im 4.0+4.0im] - AH = A'A - B = [2.0+2.0im 1.0+1.0im 4.0+4.0im 3.0+3.0im; 0 3.0+2.0im 1.0+1.0im 3.0+4.0im; 3.0+3.0im 1.0+4.0im 0 0; 0 1.0+2.0im 3.0+1.0im 1.0+1.0im] - BH = B'B - # eigen - e1,v1 = eigen(A, Hermitian(BH)) - @test A*v1 ≈ Hermitian(BH)*v1*Diagonal(e1) - e2,v2 = eigen(Hermitian(AH), B) - @test Hermitian(AH)*v2 ≈ B*v2*Diagonal(e2) - e3,v3 = eigen(Hermitian(AH), Hermitian(BH)) - @test Hermitian(AH)*v3 ≈ Hermitian(BH)*v3*Diagonal(e3) - # eigvals - @test eigvals(A, BH; sortby=sf) ≈ eigvals(A, Hermitian(BH); sortby=sf) - @test eigvals(AH, B; sortby=sf) ≈ eigvals(Hermitian(AH), B; sortby=sf) - @test eigvals(AH, BH; sortby=sf) ≈ eigvals(Hermitian(AH), Hermitian(BH); sortby=sf) -end - -@testset "bk-lu-eigen-eigvals" begin - # Bunchkaufman decomposition based - - # eigenvalue sorting - sf = x->(imag(x),real(x)) - - # Real-valued random matrix - N = 10 - A = randn(N,N) - B = randn(N,N) - BH = (B+B')/2 - # eigen - e0 = eigvals(A,BH; sortby=sf) - e,v = eigen(A,bunchkaufman(Hermitian(BH,:L)); sortby=sf) - @test e0 ≈ e - @test A*v ≈ BH*v*Diagonal(e) - e,v = eigen(A,bunchkaufman(Hermitian(BH,:U)); sortby=sf) - @test e0 ≈ e - @test A*v ≈ BH*v*Diagonal(e) - e,v = eigen(A,lu(Hermitian(BH,:L)); sortby=sf) - @test e0 ≈ e - @test A*v ≈ BH*v*Diagonal(e) - e,v = eigen(A,lu(Hermitian(BH,:U)); sortby=sf) - @test e0 ≈ e - @test A*v ≈ BH*v*Diagonal(e) - # eigvals - e0 = eigvals(A,BH; sortby=sf) - el = eigvals(A,bunchkaufman(Hermitian(BH,:L)); sortby=sf) - eu = eigvals(A,bunchkaufman(Hermitian(BH,:U)); sortby=sf) - @test e0 ≈ el - @test e0 ≈ eu - el = eigvals(A,lu(Hermitian(BH,:L)); sortby=sf) - eu = eigvals(A,lu(Hermitian(BH,:U)); sortby=sf) - @test e0 ≈ el - @test e0 ≈ eu - - # Complex-valued random matrix - N = 10 - A = complex.(randn(N,N),randn(N,N)) - B = complex.(randn(N,N),randn(N,N)) - BH = (B+B')/2 - # eigen - e0 = eigvals(A,BH; sortby=sf) - e,v = eigen(A,bunchkaufman(Hermitian(BH,:L)); sortby=sf) - @test e0 ≈ e - @test A*v ≈ BH*v*Diagonal(e) - e,v = eigen(A,bunchkaufman(Hermitian(BH,:U)); sortby=sf) - @test e0 ≈ e - @test A*v ≈ BH*v*Diagonal(e) - e,v = eigen(A,lu(Hermitian(BH,:L)); sortby=sf) - @test e0 ≈ e - @test A*v ≈ BH*v*Diagonal(e) - e,v = eigen(A,lu(Hermitian(BH,:U)); sortby=sf) - @test e0 ≈ e - @test A*v ≈ BH*v*Diagonal(e) - # eigvals - e0 = eigvals(A,BH; sortby=sf) - el = eigvals(A,bunchkaufman(Hermitian(BH,:L)); sortby=sf) - eu = eigvals(A,bunchkaufman(Hermitian(BH,:U)); sortby=sf) - @test e0 ≈ el - @test e0 ≈ eu - el = eigvals(A,lu(Hermitian(BH,:L)); sortby=sf) - eu = eigvals(A,lu(Hermitian(BH,:U)); sortby=sf) - @test e0 ≈ el - @test e0 ≈ eu -end - -@testset "Hermitian tridiagonal eigen with Complex{Int} elements (#52801)" begin - dv, ev = fill(complex(2), 4), fill(3-4im, 3) - HT = Hermitian(Tridiagonal(ev, dv, ev)) - λ, V = eigen(HT) - @test HT * V ≈ V * Diagonal(λ) - HT = Hermitian(Tridiagonal(ComplexF16.(ev), ComplexF16.(dv), ComplexF16.(ev))) - F = eigen(HT) - @test F isa Eigen{ComplexF16, Float16, Matrix{ComplexF16}, Vector{Float16}} - λ, V = F - @test HT * V ≈ V * Diagonal(λ) -end - -@testset "Float16" begin - A = rand(Float16, 3, 3) - A = Symmetric(A*A') - B = eigen(A) - B32 = eigen(Symmetric(Float32.(A))) - @test B isa Eigen{Float16, Float16, Matrix{Float16}, Vector{Float16}} - @test B.values ≈ B32.values - @test B.vectors ≈ B32.vectors - C = randn(ComplexF16, 3, 3) - C = Hermitian(C*C') - D = eigen(C) - D32 = eigen(Hermitian(ComplexF32.(C))) - @test D isa Eigen{ComplexF16, Float16, Matrix{ComplexF16}, Vector{Float16}} - @test D.values ≈ D32.values - @test D.vectors ≈ D32.vectors - - # ensure that different algorithms dispatch correctly - λ, V = eigen(C, LinearAlgebra.QRIteration()) - @test λ isa Vector{Float16} - @test C * V ≈ V * Diagonal(λ) -end - -@testset "complex Symmetric" begin - S = Symmetric(rand(ComplexF64,2,2)) - λ, v = eigen(S) - @test S * v ≈ v * Diagonal(λ) -end - -end # module TestSymmetricEigen diff --git a/stdlib/LinearAlgebra/test/testgroups b/stdlib/LinearAlgebra/test/testgroups deleted file mode 100644 index 0f2f4f4af8708..0000000000000 --- a/stdlib/LinearAlgebra/test/testgroups +++ /dev/null @@ -1,30 +0,0 @@ -triangular -addmul -bidiag -matmul -dense -symmetric -diagonal -special -qr -cholesky -blas -lu -uniformscaling -structuredbroadcast -hessenberg -svd -eigen -tridiag -lapack -lq -adjtrans -generic -schur -bunchkaufman -givens -pinv -factorization -abstractq -ldlt -symmetriceigen diff --git a/stdlib/LinearAlgebra/test/testutils.jl b/stdlib/LinearAlgebra/test/testutils.jl deleted file mode 100644 index 33eff29765c70..0000000000000 --- a/stdlib/LinearAlgebra/test/testutils.jl +++ /dev/null @@ -1,27 +0,0 @@ -# This file is a part of Julia. License is MIT: https://julialang.org/license - -# Test approximate equality of vectors or columns of matrices modulo floating -# point roundoff and phase (sign) differences. -# -# This function is designed to test for equality between vectors of floating point -# numbers when the vectors are defined only up to a global phase or sign, such as -# normalized eigenvectors or singular vectors. The global phase is usually -# defined consistently, but may occasionally change due to small differences in -# floating point rounding noise or rounding modes, or through the use of -# different conventions in different algorithms. As a result, most tests checking -# such vectors have to detect and discard such overall phase differences. -# -# Inputs: -# a, b:: StridedVecOrMat to be compared -# err :: Default: m^3*(eps(S)+eps(T)), where m is the number of rows -# -# Raises an error if any columnwise vector norm exceeds err. Otherwise, returns -# nothing. -function test_approx_eq_modphase(a::StridedVecOrMat{S}, b::StridedVecOrMat{T}, - err = length(axes(a,1))^3*(eps(S)+eps(T))) where {S<:Real,T<:Real} - @test axes(a,1) == axes(b,1) && axes(a,2) == axes(b,2) - for i in axes(a,2) - v1, v2 = a[:, i], b[:, i] - @test min(abs(norm(v1-v2)),abs(norm(v1+v2))) ≈ 0.0 atol=err - end -end diff --git a/stdlib/LinearAlgebra/test/triangular.jl b/stdlib/LinearAlgebra/test/triangular.jl deleted file mode 100644 index e69c86cc93663..0000000000000 --- a/stdlib/LinearAlgebra/test/triangular.jl +++ /dev/null @@ -1,1419 +0,0 @@ -# This file is a part of Julia. License is MIT: https://julialang.org/license - -module TestTriangular - -debug = false -using Test, LinearAlgebra, Random -using LinearAlgebra: BlasFloat, errorbounds, full!, transpose!, - UnitUpperTriangular, UnitLowerTriangular, - mul!, rdiv!, rmul!, lmul!, BandIndex - -const BASE_TEST_PATH = joinpath(Sys.BINDIR, "..", "share", "julia", "test") - -isdefined(Main, :SizedArrays) || @eval Main include(joinpath($(BASE_TEST_PATH), "testhelpers", "SizedArrays.jl")) -using .Main.SizedArrays - -isdefined(Main, :FillArrays) || @eval Main include(joinpath($(BASE_TEST_PATH), "testhelpers", "FillArrays.jl")) -using .Main.FillArrays - -debug && println("Triangular matrices") - -n = 9 -Random.seed!(123) - -debug && println("Test basic type functionality") -@test_throws DimensionMismatch LowerTriangular(randn(5, 4)) -@test LowerTriangular(randn(3, 3)) |> t -> [size(t, i) for i = 1:3] == [size(Matrix(t), i) for i = 1:3] - -struct MyTriangular{T, A<:LinearAlgebra.AbstractTriangular{T}} <: LinearAlgebra.AbstractTriangular{T} - data :: A -end -Base.size(A::MyTriangular) = size(A.data) -Base.getindex(A::MyTriangular, i::Int, j::Int) = A.data[i,j] - -# The following test block tries to call all methods in base/linalg/triangular.jl in order for a combination of input element types. Keep the ordering when adding code. -@testset for elty1 in (Float32, Float64, BigFloat, ComplexF32, ComplexF64, Complex{BigFloat}, Int) - # Begin loop for first Triangular matrix - @testset for (t1, uplo1) in ((UpperTriangular, :U), - (UnitUpperTriangular, :U), - (LowerTriangular, :L), - (UnitLowerTriangular, :L)) - - # Construct test matrix - A1 = t1(elty1 == Int ? rand(1:7, n, n) : convert(Matrix{elty1}, (elty1 <: Complex ? complex.(randn(n, n), randn(n, n)) : randn(n, n)) |> t -> cholesky(t't).U |> t -> uplo1 === :U ? t : copy(t'))) - M1 = Matrix(A1) - @test t1(A1) === A1 - @test t1{elty1}(A1) === A1 - # test the ctor works for AbstractMatrix - symm = Symmetric(rand(Int8, n, n)) - t1s = t1{elty1}(symm) - @test typeof(t1s) == t1{elty1, Symmetric{elty1, Matrix{elty1}}} - t1t = t1{elty1}(t1(rand(Int8, n, n))) - @test typeof(t1t) == t1{elty1, Matrix{elty1}} - - debug && println("elty1: $elty1, A1: $t1") - - # Convert - @test convert(AbstractMatrix{elty1}, A1) == A1 - @test convert(Matrix, A1) == A1 - @test t1{elty1}(convert(AbstractMatrix{elty1}, A1)) == A1 - - # full! - @test full!(copy(A1)) == A1 - - # similar - @test isa(similar(A1), t1) - @test eltype(similar(A1)) == elty1 - @test isa(similar(A1, Int), t1) - @test eltype(similar(A1, Int)) == Int - @test isa(similar(A1, (3,2)), Matrix{elty1}) - @test isa(similar(A1, Int, (3,2)), Matrix{Int}) - - #copyto! - simA1 = similar(A1) - copyto!(simA1, A1) - @test simA1 == A1 - - # getindex - let mA1 = M1 - # linear indexing - for i in 1:length(A1) - @test A1[i] == mA1[i] - end - # cartesian indexing - for i in 1:size(A1, 1), j in 1:size(A1, 2) - @test A1[i,j] == mA1[i,j] - end - end - @test isa(A1[2:4,1], Vector) - - - # setindex! (and copy) - A1c = copy(A1) - for i = 1:size(A1, 1) - for j = 1:size(A1, 2) - if uplo1 === :U - if i > j - A1c[i,j] = 0 - @test_throws ArgumentError A1c[i,j] = 1 - elseif i == j && t1 == UnitUpperTriangular - A1c[i,j] = 1 - @test_throws ArgumentError A1c[i,j] = 0 - else - A1c[i,j] = 0 - @test A1c[i,j] == 0 - end - else - if i < j - A1c[i,j] = 0 - @test_throws ArgumentError A1c[i,j] = 1 - elseif i == j && t1 == UnitLowerTriangular - A1c[i,j] = 1 - @test_throws ArgumentError A1c[i,j] = 0 - else - A1c[i,j] = 0 - @test A1c[i,j] == 0 - end - end - end - end - - # istril/istriu - if uplo1 === :L - @test istril(A1) - @test !istriu(A1) - @test istriu(A1') - @test istriu(transpose(A1)) - @test !istril(A1') - @test !istril(transpose(A1)) - else - @test istriu(A1) - @test !istril(A1) - @test istril(A1') - @test istril(transpose(A1)) - @test !istriu(A1') - @test !istriu(transpose(A1)) - end - M = copy(parent(A1)) - for trans in (adjoint, transpose), k in -1:1 - triu!(M, k) - @test istril(trans(M), -k) == istril(copy(trans(M)), -k) == true - end - M = copy(parent(A1)) - for trans in (adjoint, transpose), k in 1:-1:-1 - tril!(M, k) - @test istriu(trans(M), -k) == istriu(copy(trans(M)), -k) == true - end - - #tril/triu - if uplo1 === :L - @test tril(A1,0) == A1 - @test tril(A1,-1) == LowerTriangular(tril(M1, -1)) - @test tril(A1,1) == t1(tril(tril(M1, 1))) - @test tril(A1, -n - 2) == zeros(size(A1)) - @test tril(A1, n) == A1 - @test triu(A1,0) == t1(diagm(0 => diag(A1))) - @test triu(A1,-1) == t1(tril(triu(A1.data,-1))) - @test triu(A1,1) == zeros(size(A1)) # or just @test iszero(triu(A1,1))? - @test triu(A1, -n) == A1 - @test triu(A1, n + 2) == zeros(size(A1)) - else - @test triu(A1,0) == A1 - @test triu(A1,1) == UpperTriangular(triu(M1, 1)) - @test triu(A1,-1) == t1(triu(triu(M1, -1))) - @test triu(A1, -n) == A1 - @test triu(A1, n + 2) == zeros(size(A1)) - @test tril(A1,0) == t1(diagm(0 => diag(A1))) - @test tril(A1,1) == t1(triu(tril(A1.data,1))) - @test tril(A1,-1) == zeros(size(A1)) # or just @test iszero(tril(A1,-1))? - @test tril(A1, -n - 2) == zeros(size(A1)) - @test tril(A1, n) == A1 - end - - # factorize - @test factorize(A1) == A1 - - # [c]transpose[!] (test views as well, see issue #14317) - let vrange = 1:n-1, viewA1 = t1(view(A1.data, vrange, vrange)) - # transpose - @test copy(transpose(A1)) == transpose(M1) - @test copy(transpose(viewA1)) == transpose(Matrix(viewA1)) - # adjoint - @test copy(A1') == M1' - @test copy(viewA1') == Matrix(viewA1)' - # transpose! - @test transpose!(copy(A1)) == transpose(A1) - @test typeof(transpose!(copy(A1))).name == typeof(transpose(A1)).name - @test transpose!(t1(view(copy(A1).data, vrange, vrange))) == transpose(viewA1) - # adjoint! - @test adjoint!(copy(A1)) == adjoint(A1) - @test typeof(adjoint!(copy(A1))).name == typeof(adjoint(A1)).name - @test adjoint!(t1(view(copy(A1).data, vrange, vrange))) == adjoint(viewA1) - end - - # diag - @test diag(A1) == diag(M1) - - # tr - @test tr(A1)::elty1 == tr(M1) - - # real - @test real(A1) == real(M1) - @test imag(A1) == imag(M1) - @test abs.(A1) == abs.(M1) - - # zero - if A1 isa UpperTriangular || A1 isa LowerTriangular - @test zero(A1) == zero(parent(A1)) - end - - # Unary operations - @test -A1 == -M1 - - # copy and copyto! (test views as well, see issue #14317) - let vrange = 1:n-1, viewA1 = t1(view(A1.data, vrange, vrange)) - # copy - @test copy(A1) == copy(M1) - @test copy(viewA1) == copy(Matrix(viewA1)) - # copyto! - B = similar(A1) - copyto!(B, A1) - @test B == A1 - B = similar(copy(transpose(A1))) - copyto!(B, copy(transpose(A1))) - @test B == copy(transpose(A1)) - B = similar(viewA1) - copyto!(B, viewA1) - @test B == viewA1 - B = similar(copy(transpose(viewA1))) - copyto!(B, copy(transpose(viewA1))) - @test B == transpose(viewA1) - end - - #exp/log - if elty1 ∈ (Float32,Float64,ComplexF32,ComplexF64) - @test exp(Matrix(log(A1))) ≈ A1 - end - - # scale - if (t1 == UpperTriangular || t1 == LowerTriangular) - unitt = istriu(A1) ? UnitUpperTriangular : UnitLowerTriangular - if elty1 == Int - cr = 2 - else - cr = 0.5 - end - ci = cr * im - if elty1 <: Real - A1tmp = copy(A1) - rmul!(A1tmp, cr) - @test A1tmp == cr*A1 - A1tmp = copy(A1) - lmul!(cr, A1tmp) - @test A1tmp == cr*A1 - A1tmp = copy(A1) - A2tmp = unitt(A1) - mul!(A1tmp, A2tmp, cr) - @test A1tmp == cr * A2tmp - A1tmp = copy(A1) - A2tmp = unitt(A1) - mul!(A1tmp, cr, A2tmp) - @test A1tmp == cr * A2tmp - - A1tmp .= A1 - @test mul!(A1tmp, A2tmp, cr, 0, 2) == 2A1 - A1tmp .= A1 - @test mul!(A1tmp, cr, A2tmp, 0, 2) == 2A1 - else - A1tmp = copy(A1) - rmul!(A1tmp, ci) - @test A1tmp == ci*A1 - A1tmp = copy(A1) - lmul!(ci, A1tmp) - @test A1tmp == ci*A1 - A1tmp = copy(A1) - A2tmp = unitt(A1) - mul!(A1tmp, ci, A2tmp) - @test A1tmp == ci * A2tmp - A1tmp = copy(A1) - A2tmp = unitt(A1) - mul!(A1tmp, A2tmp, ci) - @test A1tmp == A2tmp*ci - end - end - - # generalized dot - for eltyb in (Float32, Float64, BigFloat, ComplexF32, ComplexF64, Complex{BigFloat}) - b1 = convert(Vector{eltyb}, (elty1 <: Complex ? real(A1) : A1)*fill(1., n)) - b2 = convert(Vector{eltyb}, (elty1 <: Complex ? real(A1) : A1)*randn(n)) - @test dot(b1, A1, b2) ≈ dot(A1'b1, b2) atol=sqrt(max(eps(real(float(one(elty1)))),eps(real(float(one(eltyb))))))*n*n - end - - # Binary operations - @test A1*0.5 == M1*0.5 - @test 0.5*A1 == 0.5*M1 - @test A1/0.5 == M1/0.5 - @test 0.5\A1 == 0.5\M1 - - # inversion - @test inv(A1) ≈ inv(lu(M1)) - inv(M1) # issue #11298 - @test isa(inv(A1), t1) - # make sure the call to LAPACK works right - if elty1 <: BlasFloat - @test LinearAlgebra.inv!(copy(A1)) ≈ inv(lu(M1)) - end - - # Determinant - @test det(A1) ≈ det(lu(M1)) atol=sqrt(eps(real(float(one(elty1)))))*n*n - @test logdet(A1) ≈ logdet(lu(M1)) atol=sqrt(eps(real(float(one(elty1)))))*n*n - lada, ladb = logabsdet(A1) - flada, fladb = logabsdet(lu(M1)) - @test lada ≈ flada atol=sqrt(eps(real(float(one(elty1)))))*n*n - @test ladb ≈ fladb atol=sqrt(eps(real(float(one(elty1)))))*n*n - - # Matrix square root - @test sqrt(A1) |> (t -> (t*t)::typeof(t)) ≈ A1 - - # naivesub errors - @test_throws DimensionMismatch ldiv!(A1, Vector{elty1}(undef, n+1)) - - # eigenproblems - if !(elty1 in (BigFloat, Complex{BigFloat})) # Not handled yet - vals, vecs = eigen(A1) - if (t1 == UpperTriangular || t1 == LowerTriangular) && elty1 != Int # Cannot really handle degenerate eigen space and Int matrices will probably have repeated eigenvalues. - @test vecs*diagm(0 => vals)/vecs ≈ A1 atol=sqrt(eps(float(real(one(vals[1])))))*(opnorm(A1,Inf)*n)^2 - end - end - - # Condition number tests - can be VERY approximate - if elty1 <:BlasFloat - for p in (1.0, Inf) - @test cond(A1,p) ≈ cond(A1,p) atol=(cond(A1,p)+cond(A1,p)) - end - @test cond(A1,2) == cond(M1,2) - end - - if !(elty1 in (BigFloat, Complex{BigFloat})) # Not implemented yet - svd(A1) - elty1 <: BlasFloat && svd!(copy(A1)) - svdvals(A1) - end - - @test ((A1*A1)::t1) ≈ M1 * M1 - @test ((A1/A1)::t1) ≈ M1 / M1 - @test ((A1\A1)::t1) ≈ M1 \ M1 - - # Begin loop for second Triangular matrix - @testset for elty2 in (Float32, Float64, BigFloat, ComplexF32, ComplexF64, Complex{BigFloat}, Int) - @testset for (t2, uplo2) in ((UpperTriangular, :U), - (UnitUpperTriangular, :U), - (LowerTriangular, :L), - (UnitLowerTriangular, :L)) - - debug && println("elty1: $elty1, A1: $t1, elty2: $elty2, A2: $t2") - - A2 = t2(elty2 == Int ? rand(1:7, n, n) : convert(Matrix{elty2}, (elty2 <: Complex ? complex.(randn(n, n), randn(n, n)) : randn(n, n)) |> t -> cholesky(t't).U |> t -> uplo2 === :U ? t : copy(t'))) - M2 = Matrix(A2) - # Convert - if elty1 <: Real && !(elty2 <: Integer) - @test convert(AbstractMatrix{elty2}, A1) == t1(convert(Matrix{elty2}, A1.data)) - elseif elty2 <: Real && !(elty1 <: Integer) - @test_throws InexactError convert(AbstractMatrix{elty2}, A1) == t1(convert(Matrix{elty2}, A1.data)) - end - - # Binary operations - @test A1 + A2 == M1 + M2 - @test A1 - A2 == M1 - M2 - @test kron(A1,A2) == kron(M1,M2) - - # Triangular-Triangular multiplication and division - @test A1*A2 ≈ M1*M2 - @test transpose(A1)*A2 ≈ transpose(M1)*M2 - @test transpose(A1)*adjoint(A2) ≈ transpose(M1)*adjoint(M2) - @test adjoint(A1)*transpose(A2) ≈ adjoint(M1)*transpose(M2) - @test A1'A2 ≈ M1'M2 - @test A1*transpose(A2) ≈ M1*transpose(M2) - @test A1*A2' ≈ M1*M2' - @test transpose(A1)*transpose(A2) ≈ transpose(M1)*transpose(M2) - @test A1'A2' ≈ M1'M2' - @test A1/A2 ≈ M1/M2 - @test A1\A2 ≈ M1\M2 - if uplo1 === :U && uplo2 === :U - if t1 === UnitUpperTriangular && t2 === UnitUpperTriangular - @test A1*A2 isa UnitUpperTriangular - @test A1/A2 isa UnitUpperTriangular - elty1 == Int && elty2 == Int && @test eltype(A1/A2) == Int - @test A1\A2 isa UnitUpperTriangular - elty1 == Int && elty2 == Int && @test eltype(A1\A2) == Int - else - @test A1*A2 isa UpperTriangular - @test A1/A2 isa UpperTriangular - elty1 == Int && elty2 == Int && t2 === UnitUpperTriangular && @test eltype(A1/A2) == Int - @test A1\A2 isa UpperTriangular - elty1 == Int && elty2 == Int && t1 === UnitUpperTriangular && @test eltype(A1\A2) == Int - end - elseif uplo1 === :L && uplo2 === :L - if t1 === UnitLowerTriangular && t2 === UnitLowerTriangular - @test A1*A2 isa UnitLowerTriangular - @test A1/A2 isa UnitLowerTriangular - elty1 == Int && elty2 == Int && @test eltype(A1/A2) == Int - @test A1\A2 isa UnitLowerTriangular - elty1 == Int && elty2 == Int && @test eltype(A1\A2) == Int - else - @test A1*A2 isa LowerTriangular - @test A1/A2 isa LowerTriangular - elty1 == Int && elty2 == Int && t2 === UnitLowerTriangular && @test eltype(A1/A2) == Int - @test A1\A2 isa LowerTriangular - elty1 == Int && elty2 == Int && t1 === UnitLowerTriangular && @test eltype(A1\A2) == Int - end - end - offsizeA = Matrix{Float64}(I, n+1, n+1) - @test_throws DimensionMismatch offsizeA / A2 - @test_throws DimensionMismatch offsizeA / transpose(A2) - @test_throws DimensionMismatch offsizeA / A2' - @test_throws DimensionMismatch offsizeA * A2 - @test_throws DimensionMismatch offsizeA * transpose(A2) - @test_throws DimensionMismatch offsizeA * A2' - @test_throws DimensionMismatch transpose(A2) * offsizeA - @test_throws DimensionMismatch A2' * offsizeA - @test_throws DimensionMismatch A2 * offsizeA - if (uplo1 == uplo2 && elty1 == elty2 != Int && t1 != UnitLowerTriangular && t1 != UnitUpperTriangular) - @test rdiv!(copy(A1), A2)::t1 ≈ A1/A2 ≈ M1/M2 - @test ldiv!(A2, copy(A1))::t1 ≈ A2\A1 ≈ M2\M1 - end - if (uplo1 != uplo2 && elty1 == elty2 != Int && t2 != UnitLowerTriangular && t2 != UnitUpperTriangular) - @test lmul!(adjoint(A1), copy(A2)) ≈ A1'*A2 ≈ M1'*M2 - @test lmul!(transpose(A1), copy(A2)) ≈ transpose(A1)*A2 ≈ transpose(M1)*M2 - @test ldiv!(adjoint(A1), copy(A2)) ≈ A1'\A2 ≈ M1'\M2 - @test ldiv!(transpose(A1), copy(A2)) ≈ transpose(A1)\A2 ≈ transpose(M1)\M2 - end - if (uplo1 != uplo2 && elty1 == elty2 != Int && t1 != UnitLowerTriangular && t1 != UnitUpperTriangular) - @test rmul!(copy(A1), adjoint(A2)) ≈ A1*A2' ≈ M1*M2' - @test rmul!(copy(A1), transpose(A2)) ≈ A1*transpose(A2) ≈ M1*transpose(M2) - @test rdiv!(copy(A1), adjoint(A2)) ≈ A1/A2' ≈ M1/M2' - @test rdiv!(copy(A1), transpose(A2)) ≈ A1/transpose(A2) ≈ M1/transpose(M2) - end - end - end - - for eltyB in (Float32, Float64, BigFloat, ComplexF32, ComplexF64, Complex{BigFloat}) - B = convert(Matrix{eltyB}, (elty1 <: Complex ? real(A1) : A1)*fill(1., n, n)) - - debug && println("elty1: $elty1, A1: $t1, B: $eltyB") - - Tri = Tridiagonal(rand(eltyB,n-1),rand(eltyB,n),rand(eltyB,n-1)) - C = Matrix{promote_type(elty1,eltyB)}(undef, n, n) - mul!(C, Tri, A1) - @test C ≈ Tri*M1 - Tri = Tridiagonal(rand(eltyB,n-1),rand(eltyB,n),rand(eltyB,n-1)) - mul!(C, A1, Tri) - @test C ≈ M1*Tri - - # Triangular-dense Matrix/vector multiplication - @test A1*B[:,1] ≈ M1*B[:,1] - @test A1*B ≈ M1*B - @test transpose(A1)*B[:,1] ≈ transpose(M1)*B[:,1] - @test A1'B[:,1] ≈ M1'B[:,1] - @test transpose(A1)*B ≈ transpose(M1)*B - @test A1'B ≈ M1'B - @test A1*transpose(B) ≈ M1*transpose(B) - @test adjoint(A1)*transpose(B) ≈ M1'*transpose(B) - @test transpose(A1)*adjoint(B) ≈ transpose(M1)*adjoint(B) - @test A1*B' ≈ M1*B' - @test B*A1 ≈ B*M1 - @test transpose(B[:,1])*A1 ≈ transpose(B[:,1])*M1 - @test B[:,1]'A1 ≈ B[:,1]'M1 - @test transpose(B)*A1 ≈ transpose(B)*M1 - @test transpose(B)*adjoint(A1) ≈ transpose(B)*M1' - @test adjoint(B)*transpose(A1) ≈ adjoint(B)*transpose(M1) - @test B'A1 ≈ B'M1 - @test B*transpose(A1) ≈ B*transpose(M1) - @test B*A1' ≈ B*M1' - @test transpose(B[:,1])*transpose(A1) ≈ transpose(B[:,1])*transpose(M1) - @test B[:,1]'A1' ≈ B[:,1]'M1' - @test transpose(B)*transpose(A1) ≈ transpose(B)*transpose(M1) - @test B'A1' ≈ B'M1' - - if eltyB == elty1 - @test mul!(similar(B), A1, B) ≈ M1*B - @test mul!(similar(B), A1, adjoint(B)) ≈ M1*B' - @test mul!(similar(B), A1, transpose(B)) ≈ M1*transpose(B) - @test mul!(similar(B), adjoint(A1), adjoint(B)) ≈ M1'*B' - @test mul!(similar(B), transpose(A1), transpose(B)) ≈ transpose(M1)*transpose(B) - @test mul!(similar(B), transpose(A1), adjoint(B)) ≈ transpose(M1)*B' - @test mul!(similar(B), adjoint(A1), transpose(B)) ≈ M1'*transpose(B) - @test mul!(similar(B), adjoint(A1), B) ≈ M1'*B - @test mul!(similar(B), transpose(A1), B) ≈ transpose(M1)*B - # test also vector methods - B1 = vec(B[1,:]) - @test mul!(similar(B1), A1, B1) ≈ M1*B1 - @test mul!(similar(B1), adjoint(A1), B1) ≈ M1'*B1 - @test mul!(similar(B1), transpose(A1), B1) ≈ transpose(M1)*B1 - end - #error handling - Ann, Bmm, bm = A1, Matrix{eltyB}(undef, n+1, n+1), Vector{eltyB}(undef, n+1) - @test_throws DimensionMismatch lmul!(Ann, bm) - @test_throws DimensionMismatch rmul!(Bmm, Ann) - @test_throws DimensionMismatch lmul!(transpose(Ann), bm) - @test_throws DimensionMismatch lmul!(adjoint(Ann), bm) - @test_throws DimensionMismatch rmul!(Bmm, adjoint(Ann)) - @test_throws DimensionMismatch rmul!(Bmm, transpose(Ann)) - - # ... and division - @test A1\B[:,1] ≈ M1\B[:,1] - @test A1\B ≈ M1\B - @test transpose(A1)\B[:,1] ≈ transpose(M1)\B[:,1] - @test A1'\B[:,1] ≈ M1'\B[:,1] - @test transpose(A1)\B ≈ transpose(M1)\B - @test A1'\B ≈ M1'\B - @test A1\transpose(B) ≈ M1\transpose(B) - @test A1\B' ≈ M1\B' - @test transpose(A1)\transpose(B) ≈ transpose(M1)\transpose(B) - @test A1'\B' ≈ M1'\B' - Ann, bm = A1, Vector{elty1}(undef,n+1) - @test_throws DimensionMismatch Ann\bm - @test_throws DimensionMismatch Ann'\bm - @test_throws DimensionMismatch transpose(Ann)\bm - if t1 == UpperTriangular || t1 == LowerTriangular - @test_throws SingularException ldiv!(t1(zeros(elty1, n, n)), fill(eltyB(1), n)) - end - @test B/A1 ≈ B/M1 - @test B/transpose(A1) ≈ B/transpose(M1) - @test B/A1' ≈ B/M1' - @test transpose(B)/A1 ≈ transpose(B)/M1 - @test B'/A1 ≈ B'/M1 - @test transpose(B)/transpose(A1) ≈ transpose(B)/transpose(M1) - @test B'/A1' ≈ B'/M1' - - # Error bounds - !(elty1 in (BigFloat, Complex{BigFloat})) && !(eltyB in (BigFloat, Complex{BigFloat})) && errorbounds(A1, A1\B, B) - - end - end -end - -@testset "non-strided arithmetic" begin - for (T,T1) in ((UpperTriangular, UnitUpperTriangular), (LowerTriangular, UnitLowerTriangular)) - U = T(reshape(1:16, 4, 4)) - M = Matrix(U) - @test -U == -M - U1 = T1(reshape(1:16, 4, 4)) - M1 = Matrix(U1) - @test -U1 == -M1 - for op in (+, -) - for (A, MA) in ((U, M), (U1, M1)), (B, MB) in ((U, M), (U1, M1)) - @test op(A, B) == op(MA, MB) - end - end - @test imag(U) == zero(U) - end -end - -# Matrix square root -Atn = UpperTriangular([-1 1 2; 0 -2 2; 0 0 -3]) -Atp = UpperTriangular([1 1 2; 0 2 2; 0 0 3]) -Atu = UnitUpperTriangular([1 1 2; 0 1 2; 0 0 1]) -@test sqrt(Atn) |> t->t*t ≈ Atn -@test sqrt(Atn) isa UpperTriangular -@test typeof(sqrt(Atn)[1,1]) <: Complex -@test sqrt(Atp) |> t->t*t ≈ Atp -@test sqrt(Atp) isa UpperTriangular -@test typeof(sqrt(Atp)[1,1]) <: Real -@test typeof(sqrt(complex(Atp))[1,1]) <: Complex -@test sqrt(Atu) |> t->t*t ≈ Atu -@test sqrt(Atu) isa UnitUpperTriangular -@test typeof(sqrt(Atu)[1,1]) <: Real -@test typeof(sqrt(complex(Atu))[1,1]) <: Complex - -@testset "matrix square root quasi-triangular blockwise" begin - @testset for T in (Float32, Float64, ComplexF32, ComplexF64) - A = schur(rand(T, 100, 100)^2).T - @test LinearAlgebra.sqrt_quasitriu(A; blockwidth=16)^2 ≈ A - end - n = 256 - A = rand(ComplexF64, n, n) - U = schur(A).T - Ubig = Complex{BigFloat}.(U) - @test LinearAlgebra.sqrt_quasitriu(U; blockwidth=64) ≈ LinearAlgebra.sqrt_quasitriu(Ubig; blockwidth=64) -end - -@testset "sylvester quasi-triangular blockwise" begin - @testset for T in (Float32, Float64, ComplexF32, ComplexF64), m in (15, 40), n in (15, 45) - A = schur(rand(T, m, m)).T - B = schur(rand(T, n, n)).T - C = randn(T, m, n) - Ccopy = copy(C) - X = LinearAlgebra._sylvester_quasitriu!(A, B, C; blockwidth=16) - @test X === C - @test A * X + X * B ≈ -Ccopy - - @testset "test raise=false does not break recursion" begin - Az = zero(A) - Bz = zero(B) - C2 = copy(Ccopy) - @test_throws LAPACKException LinearAlgebra._sylvester_quasitriu!(Az, Bz, C2; blockwidth=16) - m == n || @test any(C2 .== Ccopy) # recursion broken - C3 = copy(Ccopy) - X3 = LinearAlgebra._sylvester_quasitriu!(Az, Bz, C3; blockwidth=16, raise=false) - @test !any(X3 .== Ccopy) # recursion not broken - end - end -end - -@testset "check matrix logarithm type-inferable" for elty in (Float32,Float64,ComplexF32,ComplexF64) - A = UpperTriangular(exp(triu(randn(elty, n, n)))) - @inferred Union{typeof(A),typeof(complex(A))} log(A) - @test exp(Matrix(log(A))) ≈ A - if elty <: Real - @test typeof(log(A)) <: UpperTriangular{elty} - @test typeof(log(complex(A))) <: UpperTriangular{complex(elty)} - @test isreal(log(complex(A))) - @test log(complex(A)) ≈ log(A) - end - - Au = UnitUpperTriangular(exp(triu(randn(elty, n, n), 1))) - @inferred Union{typeof(A),typeof(complex(A))} log(Au) - @test exp(Matrix(log(Au))) ≈ Au - if elty <: Real - @test typeof(log(Au)) <: UpperTriangular{elty} - @test typeof(log(complex(Au))) <: UpperTriangular{complex(elty)} - @test isreal(log(complex(Au))) - @test log(complex(Au)) ≈ log(Au) - end -end - -Areal = randn(n, n)/2 -Aimg = randn(n, n)/2 -A2real = randn(n, n)/2 -A2img = randn(n, n)/2 - -for eltya in (Float32, Float64, ComplexF32, ComplexF64, BigFloat, Int) - A = eltya == Int ? rand(1:7, n, n) : convert(Matrix{eltya}, eltya <: Complex ? complex.(Areal, Aimg) : Areal) - # a2 = eltya == Int ? rand(1:7, n, n) : convert(Matrix{eltya}, eltya <: Complex ? complex.(a2real, a2img) : a2real) - εa = eps(abs(float(one(eltya)))) - - for eltyb in (Float32, Float64, ComplexF32, ComplexF64) - εb = eps(abs(float(one(eltyb)))) - ε = max(εa,εb) - - debug && println("\ntype of A: ", eltya, " type of b: ", eltyb, "\n") - - debug && println("Solve upper triangular system") - Atri = UpperTriangular(lu(A).U) |> t -> eltya <: Complex && eltyb <: Real ? real(t) : t # Here the triangular matrix can't be too badly conditioned - b = convert(Matrix{eltyb}, Matrix(Atri)*fill(1., n, 2)) - x = Matrix(Atri) \ b - - debug && println("Test error estimates") - if eltya != BigFloat && eltyb != BigFloat - for i = 1:2 - @test norm(x[:,1] .- 1) <= errorbounds(UpperTriangular(A), x, b)[1][i] - end - end - debug && println("Test forward error [JIN 5705] if this is not a BigFloat") - - x = Atri \ b - γ = n*ε/(1 - n*ε) - if eltya != BigFloat - bigA = big.(Atri) - x̂ = fill(1., n, 2) - for i = 1:size(b, 2) - @test norm(x̂[:,i] - x[:,i], Inf)/norm(x̂[:,i], Inf) <= condskeel(bigA, x̂[:,i])*γ/(1 - condskeel(bigA)*γ) - end - end - - debug && println("Test backward error [JIN 5705]") - for i = 1:size(b, 2) - @test norm(abs.(b[:,i] - Atri*x[:,i]), Inf) <= γ * norm(Atri, Inf) * norm(x[:,i], Inf) - end - - debug && println("Solve lower triangular system") - Atri = UpperTriangular(lu(A).U) |> t -> eltya <: Complex && eltyb <: Real ? real(t) : t # Here the triangular matrix can't be too badly conditioned - b = convert(Matrix{eltyb}, Matrix(Atri)*fill(1., n, 2)) - x = Matrix(Atri)\b - - debug && println("Test error estimates") - if eltya != BigFloat && eltyb != BigFloat - for i = 1:2 - @test norm(x[:,1] .- 1) <= errorbounds(UpperTriangular(A), x, b)[1][i] - end - end - - debug && println("Test forward error [JIN 5705] if this is not a BigFloat") - b = (b0 = Atri*fill(1, n, 2); convert(Matrix{eltyb}, eltyb == Int ? trunc.(b0) : b0)) - x = Atri \ b - γ = n*ε/(1 - n*ε) - if eltya != BigFloat - bigA = big.(Atri) - x̂ = fill(1., n, 2) - for i = 1:size(b, 2) - @test norm(x̂[:,i] - x[:,i], Inf)/norm(x̂[:,i], Inf) <= condskeel(bigA, x̂[:,i])*γ/(1 - condskeel(bigA)*γ) - end - end - - debug && println("Test backward error [JIN 5705]") - for i = 1:size(b, 2) - @test norm(abs.(b[:,i] - Atri*x[:,i]), Inf) <= γ * norm(Atri, Inf) * norm(x[:,i], Inf) - end - end -end - -# Issue 10742 and similar -@test istril(UpperTriangular(diagm(0 => [1,2,3,4]))) -@test istriu(LowerTriangular(diagm(0 => [1,2,3,4]))) -@test isdiag(UpperTriangular(diagm(0 => [1,2,3,4]))) -@test isdiag(LowerTriangular(diagm(0 => [1,2,3,4]))) -@test !isdiag(UpperTriangular(rand(4, 4))) -@test !isdiag(LowerTriangular(rand(4, 4))) - -# Test throwing in fallbacks for non BlasFloat/BlasComplex in A_rdiv_Bx! -let n = 5 - A = rand(Float16, n, n) - B = rand(Float16, n-1, n-1) - @test_throws DimensionMismatch rdiv!(A, LowerTriangular(B)) - @test_throws DimensionMismatch rdiv!(A, UpperTriangular(B)) - @test_throws DimensionMismatch rdiv!(A, UnitLowerTriangular(B)) - @test_throws DimensionMismatch rdiv!(A, UnitUpperTriangular(B)) - - @test_throws DimensionMismatch rdiv!(A, adjoint(LowerTriangular(B))) - @test_throws DimensionMismatch rdiv!(A, adjoint(UpperTriangular(B))) - @test_throws DimensionMismatch rdiv!(A, adjoint(UnitLowerTriangular(B))) - @test_throws DimensionMismatch rdiv!(A, adjoint(UnitUpperTriangular(B))) - - @test_throws DimensionMismatch rdiv!(A, transpose(LowerTriangular(B))) - @test_throws DimensionMismatch rdiv!(A, transpose(UpperTriangular(B))) - @test_throws DimensionMismatch rdiv!(A, transpose(UnitLowerTriangular(B))) - @test_throws DimensionMismatch rdiv!(A, transpose(UnitUpperTriangular(B))) -end - -@test isdiag(LowerTriangular(UpperTriangular(randn(3,3)))) -@test isdiag(UpperTriangular(LowerTriangular(randn(3,3)))) - -# Issue 16196 -@test UpperTriangular(Matrix(1.0I, 3, 3)) \ view(fill(1., 3), [1,2,3]) == fill(1., 3) - -@testset "reverse" begin - A = randn(5, 5) - for (T, Trev) in ((UpperTriangular, LowerTriangular), - (UnitUpperTriangular, UnitLowerTriangular), - (LowerTriangular, UpperTriangular), - (UnitLowerTriangular, UnitUpperTriangular)) - A = T(randn(5, 5)) - AM = Matrix(A) - @test reverse(A, dims=1) == reverse(AM, dims=1) - @test reverse(A, dims=2) == reverse(AM, dims=2) - @test reverse(A)::Trev == reverse(AM) - end -end - -# dimensional correctness: -const BASE_TEST_PATH = joinpath(Sys.BINDIR, "..", "share", "julia", "test") -isdefined(Main, :Furlongs) || @eval Main include(joinpath($(BASE_TEST_PATH), "testhelpers", "Furlongs.jl")) -using .Main.Furlongs -LinearAlgebra.sylvester(a::Furlong,b::Furlong,c::Furlong) = -c / (a + b) - -@testset "dimensional correctness" begin - A = UpperTriangular([Furlong(1) Furlong(4); Furlong(0) Furlong(1)]) - @test sqrt(A)::UpperTriangular == Furlong{1//2}.(UpperTriangular([1 2; 0 1])) - @test inv(A)::UpperTriangular == Furlong{-1}.(UpperTriangular([1 -4; 0 1])) - B = UnitUpperTriangular([Furlong(1) Furlong(4); Furlong(0) Furlong(1)]) - @test sqrt(B)::UnitUpperTriangular == Furlong{1//2}.(UpperTriangular([1 2; 0 1])) - @test inv(B)::UnitUpperTriangular == Furlong{-1}.(UpperTriangular([1 -4; 0 1])) - b = [Furlong(5), Furlong(8)] - @test (A \ b)::Vector{<:Furlong{0}} == (B \ b)::Vector{<:Furlong{0}} == Furlong{0}.([-27, 8]) - C = LowerTriangular([Furlong(1) Furlong(0); Furlong(4) Furlong(1)]) - @test sqrt(C)::LowerTriangular == Furlong{1//2}.(LowerTriangular([1 0; 2 1])) - @test inv(C)::LowerTriangular == Furlong{-1}.(LowerTriangular([1 0; -4 1])) - D = UnitLowerTriangular([Furlong(1) Furlong(0); Furlong(4) Furlong(1)]) - @test sqrt(D)::UnitLowerTriangular == Furlong{1//2}.(UnitLowerTriangular([1 0; 2 1])) - @test inv(D)::UnitLowerTriangular == Furlong{-1}.(UnitLowerTriangular([1 0; -4 1])) - b = [Furlong(5), Furlong(8)] - @test (C \ b)::Vector{<:Furlong{0}} == (D \ b)::Vector{<:Furlong{0}} == Furlong{0}.([5, -12]) -end - -isdefined(Main, :ImmutableArrays) || @eval Main include(joinpath($(BASE_TEST_PATH), "testhelpers", "ImmutableArrays.jl")) -using .Main.ImmutableArrays - -@testset "AbstractArray constructor should preserve underlying storage type" begin - # tests corresponding to #34995 - local m = 4 - local T, S = Float32, Float64 - immutablemat = ImmutableArray(randn(T,m,m)) - for TriType in (UpperTriangular, LowerTriangular, UnitUpperTriangular, UnitLowerTriangular) - trimat = TriType(immutablemat) - @test convert(AbstractArray{S}, trimat).data isa ImmutableArray{S} - @test convert(AbstractMatrix{S}, trimat).data isa ImmutableArray{S} - @test AbstractArray{S}(trimat).data isa ImmutableArray{S} - @test AbstractMatrix{S}(trimat).data isa ImmutableArray{S} - @test convert(AbstractArray{S}, trimat) == trimat - @test convert(AbstractMatrix{S}, trimat) == trimat - end -end - -@testset "inplace mul of appropriate types should preserve triagular structure" begin - for elty1 in (Float64, ComplexF32), elty2 in (Float64, ComplexF32) - T = promote_type(elty1, elty2) - M1 = rand(elty1, 5, 5) - M2 = rand(elty2, 5, 5) - A = UpperTriangular(M1) - A2 = UpperTriangular(M2) - Au = UnitUpperTriangular(M1) - Au2 = UnitUpperTriangular(M2) - B = LowerTriangular(M1) - B2 = LowerTriangular(M2) - Bu = UnitLowerTriangular(M1) - Bu2 = UnitLowerTriangular(M2) - - @test mul!(similar(A), A, A)::typeof(A) == A*A - @test mul!(similar(A, T), A, A2) ≈ A*A2 - @test mul!(similar(A, T), A2, A) ≈ A2*A - @test mul!(typeof(similar(A, T))(A), A, A2, 2.0, 3.0) ≈ 2.0*A*A2 + 3.0*A - @test mul!(typeof(similar(A2, T))(A2), A2, A, 2.0, 3.0) ≈ 2.0*A2*A + 3.0*A2 - - @test mul!(similar(A), A, Au)::typeof(A) == A*Au - @test mul!(similar(A), Au, A)::typeof(A) == Au*A - @test mul!(similar(Au), Au, Au)::typeof(Au) == Au*Au - @test mul!(similar(A, T), A, Au2) ≈ A*Au2 - @test mul!(similar(A, T), Au2, A) ≈ Au2*A - @test mul!(similar(Au2), Au2, Au2) == Au2*Au2 - - @test mul!(similar(B), B, B)::typeof(B) == B*B - @test mul!(similar(B, T), B, B2) ≈ B*B2 - @test mul!(similar(B, T), B2, B) ≈ B2*B - @test mul!(typeof(similar(B, T))(B), B, B2, 2.0, 3.0) ≈ 2.0*B*B2 + 3.0*B - @test mul!(typeof(similar(B2, T))(B2), B2, B, 2.0, 3.0) ≈ 2.0*B2*B + 3.0*B2 - - @test mul!(similar(B), B, Bu)::typeof(B) == B*Bu - @test mul!(similar(B), Bu, B)::typeof(B) == Bu*B - @test mul!(similar(Bu), Bu, Bu)::typeof(Bu) == Bu*Bu - @test mul!(similar(B, T), B, Bu2) ≈ B*Bu2 - @test mul!(similar(B, T), Bu2, B) ≈ Bu2*B - end -end - -@testset "indexing partly initialized matrices" begin - M = Matrix{BigFloat}(undef, 2, 2) - U = UpperTriangular(M) - @test iszero(U[2,1]) - L = LowerTriangular(M) - @test iszero(L[1,2]) -end - -@testset "special printing of Lower/UpperTriangular" begin - @test occursin(r"3×3 (LinearAlgebra\.)?LowerTriangular{Int64, Matrix{Int64}}:\n 2 ⋅ ⋅\n 2 2 ⋅\n 2 2 2", - sprint(show, MIME"text/plain"(), LowerTriangular(2ones(Int64,3,3)))) - @test occursin(r"3×3 (LinearAlgebra\.)?UnitLowerTriangular{Int64, Matrix{Int64}}:\n 1 ⋅ ⋅\n 2 1 ⋅\n 2 2 1", - sprint(show, MIME"text/plain"(), UnitLowerTriangular(2ones(Int64,3,3)))) - @test occursin(r"3×3 (LinearAlgebra\.)?UpperTriangular{Int64, Matrix{Int64}}:\n 2 2 2\n ⋅ 2 2\n ⋅ ⋅ 2", - sprint(show, MIME"text/plain"(), UpperTriangular(2ones(Int64,3,3)))) - @test occursin(r"3×3 (LinearAlgebra\.)?UnitUpperTriangular{Int64, Matrix{Int64}}:\n 1 2 2\n ⋅ 1 2\n ⋅ ⋅ 1", - sprint(show, MIME"text/plain"(), UnitUpperTriangular(2ones(Int64,3,3)))) - - # don't access non-structural elements while displaying - M = Matrix{BigFloat}(undef, 2, 2) - @test sprint(show, UpperTriangular(M)) == "BigFloat[#undef #undef; 0.0 #undef]" - @test sprint(show, LowerTriangular(M)) == "BigFloat[#undef 0.0; #undef #undef]" -end - -@testset "adjoint/transpose triangular/vector multiplication" begin - for elty in (Float64, ComplexF64), trity in (UpperTriangular, LowerTriangular) - A1 = trity(rand(elty, 1, 1)) - b1 = rand(elty, 1) - A4 = trity(rand(elty, 4, 4)) - b4 = rand(elty, 4) - @test A1 * b1' ≈ Matrix(A1) * b1' - @test_throws DimensionMismatch A4 * b4' - @test A1 * transpose(b1) ≈ Matrix(A1) * transpose(b1) - @test_throws DimensionMismatch A4 * transpose(b4) - @test A1' * b1' ≈ Matrix(A1') * b1' - @test_throws DimensionMismatch A4' * b4' - @test A1' * transpose(b1) ≈ Matrix(A1') * transpose(b1) - @test_throws DimensionMismatch A4' * transpose(b4) - @test transpose(A1) * transpose(b1) ≈ Matrix(transpose(A1)) * transpose(b1) - @test_throws DimensionMismatch transpose(A4) * transpose(b4) - @test transpose(A1) * b1' ≈ Matrix(transpose(A1)) * b1' - @test_throws DimensionMismatch transpose(A4) * b4' - @test b1' * transpose(A1) ≈ b1' * Matrix(transpose(A1)) - @test b4' * transpose(A4) ≈ b4' * Matrix(transpose(A4)) - @test transpose(b1) * A1' ≈ transpose(b1) * Matrix(A1') - @test transpose(b4) * A4' ≈ transpose(b4) * Matrix(A4') - end -end - -@testset "Error condition for powm" begin - A = UpperTriangular(rand(ComplexF64, 10, 10)) - @test_throws ArgumentError LinearAlgebra.powm!(A, 2.2) - A = LowerTriangular(rand(ComplexF64, 10, 10)) - At = copy(transpose(A)) - p = rand() - @test LinearAlgebra.powm(A, p) == transpose(LinearAlgebra.powm!(At, p)) - @test_throws ArgumentError LinearAlgebra.powm(A, 2.2) -end - -# Issue 35058 -let A = [0.9999999999999998 4.649058915617843e-16 -1.3149405273715513e-16 9.9959579317056e-17; -8.326672684688674e-16 1.0000000000000004 2.9280733590254494e-16 -2.9993900031619594e-16; 9.43689570931383e-16 -1.339206523454095e-15 1.0000000000000007 -8.550505126287743e-16; -6.245004513516506e-16 -2.0122792321330962e-16 1.183061278035052e-16 1.0000000000000002], - B = [0.09648289218436859 0.023497875751503007 0.0 0.0; 0.023497875751503007 0.045787575150300804 0.0 0.0; 0.0 0.0 0.0 0.0; 0.0 0.0 0.0 0.0] - @test sqrt(A*B*A')^2 ≈ A*B*A' -end - -@testset "one and oneunit for triangular" begin - m = rand(4,4) - function test_one_oneunit_triangular(a) - b = Matrix(a) - @test (@inferred a^1) == b^1 - @test (@inferred a^-1) ≈ b^-1 - @test one(a) == one(b) - @test one(a)*a == a - @test a*one(a) == a - @test oneunit(a) == oneunit(b) - @test oneunit(a) isa typeof(a) - end - for T in [UpperTriangular, LowerTriangular, UnitUpperTriangular, UnitLowerTriangular] - a = T(m) - test_one_oneunit_triangular(a) - end - # more complicated examples - b = UpperTriangular(LowerTriangular(m)) - test_one_oneunit_triangular(b) - c = UpperTriangular(Diagonal(rand(2))) - test_one_oneunit_triangular(c) -end - -@testset "LowerTriangular(Diagonal(...)) and friends (issue #28869)" begin - for elty in (Float32, Float64, BigFloat, ComplexF32, ComplexF64, Complex{BigFloat}, Int) - V = elty ≡ Int ? rand(1:10, 5) : elty.(randn(5)) - D = Diagonal(V) - for dty in (UpperTriangular, LowerTriangular) - A = dty(D) - @test A * A' == D * D' - end - end -end - -@testset "tril!/triu! for non-bitstype matrices" begin - @testset "numeric" begin - M = Matrix{BigFloat}(undef, 3, 3) - tril!(M) - L = LowerTriangular(ones(3,3)) - copytrito!(M, L, 'L') - @test M == L - - M = Matrix{BigFloat}(undef, 3, 3) - triu!(M) - U = UpperTriangular(ones(3,3)) - copytrito!(M, U, 'U') - @test M == U - end - @testset "array elements" begin - M = fill(ones(2,2), 4, 4) - tril!(M) - L = LowerTriangular(fill(fill(2,2,2),4,4)) - copytrito!(M, L, 'L') - @test M == L - - M = fill(ones(2,2), 4, 4) - triu!(M) - U = UpperTriangular(fill(fill(2,2,2),4,4)) - copytrito!(M, U, 'U') - @test M == U - end -end - -@testset "avoid matmul ambiguities with ::MyMatrix * ::AbstractMatrix" begin - A = [i+j for i in 1:2, j in 1:2] - S = SizedArrays.SizedArray{(2,2)}(A) - U = UpperTriangular(ones(2,2)) - @test S * U == A * U - @test U * S == U * A - C1, C2 = zeros(2,2), zeros(2,2) - @test mul!(C1, S, U) == mul!(C2, A, U) - @test mul!(C1, S, U, 1, 2) == mul!(C2, A, U, 1 ,2) - @test mul!(C1, U, S) == mul!(C2, U, A) - @test mul!(C1, U, S, 1, 2) == mul!(C2, U, A, 1 ,2) - - v = [i for i in 1:2] - sv = SizedArrays.SizedArray{(2,)}(v) - @test U * sv == U * v - C1, C2 = zeros(2), zeros(2) - @test mul!(C1, U, sv) == mul!(C2, U, v) - @test mul!(C1, U, sv, 1, 2) == mul!(C2, U, v, 1 ,2) -end - -@testset "custom axes" begin - SZA = SizedArrays.SizedArray{(2,2)}([1 2; 3 4]) - for T in (UpperTriangular, LowerTriangular, UnitUpperTriangular, UnitLowerTriangular) - S = T(SZA) - r = SizedArrays.SOneTo(2) - @test axes(S) === (r,r) - end -end - -@testset "immutable and non-strided parent" begin - F = FillArrays.Fill(2, (4,4)) - for UT in (UnitUpperTriangular, UnitLowerTriangular) - U = UT(F) - @test -U == -Array(U) - end - - F = FillArrays.Fill(3im, (4,4)) - for U in (UnitUpperTriangular(F), UnitLowerTriangular(F)) - @test imag(F) == imag(collect(F)) - end - - @testset "copyto!" begin - for T in (UpperTriangular, LowerTriangular, UnitUpperTriangular, UnitLowerTriangular) - @test Matrix(T(F)) == T(F) - end - @test copyto!(zeros(eltype(F), length(F)), UpperTriangular(F)) == vec(UpperTriangular(F)) - end -end - -@testset "error paths" begin - A = zeros(1,1); B = zeros(2,2) - @testset "inplace mul scaling with incompatible sizes" begin - for T in (UpperTriangular, LowerTriangular, UnitUpperTriangular, UnitLowerTriangular) - @test_throws DimensionMismatch mul!(T(A), T(B), 3) - @test_throws DimensionMismatch mul!(T(A), 3, T(B)) - end - end - @testset "copyto with incompatible sizes" begin - for T in (UpperTriangular, LowerTriangular, UnitUpperTriangular, UnitLowerTriangular) - @test_throws BoundsError copyto!(T(A), T(B)) - end - end -end - -@testset "uppertriangular/lowertriangular" begin - M = rand(2,2) - @test LinearAlgebra.uppertriangular(M) === UpperTriangular(M) - @test LinearAlgebra.lowertriangular(M) === LowerTriangular(M) - @test LinearAlgebra.uppertriangular(UnitUpperTriangular(M)) === UnitUpperTriangular(M) - @test LinearAlgebra.lowertriangular(UnitLowerTriangular(M)) === UnitLowerTriangular(M) -end - -@testset "arithmetic with partly uninitialized matrices" begin - @testset "$(typeof(A))" for A in (Matrix{BigFloat}(undef,2,2), Matrix{Complex{BigFloat}}(undef,2,2)') - A[2,1] = eltype(A) <: Complex ? 4 + 3im : 4 - B = Matrix{eltype(A)}(undef, size(A)) - for MT in (LowerTriangular, UnitLowerTriangular) - if MT == LowerTriangular - A[1,1] = A[2,2] = eltype(A) <: Complex ? 4 + 3im : 4 - end - L = MT(A) - B .= 0 - copyto!(B, L) - @test copy(L) == B - @test L * 2 == 2 * L == 2B - @test L/2 == B/2 - @test 2\L == 2\B - @test real(L) == real(B) - @test imag(L) == imag(B) - if MT == LowerTriangular - @test isa(kron(L,L), MT) - end - @test kron(L,L) == kron(B,B) - @test transpose!(MT(copy(A))) == transpose(L) broken=!(A isa Matrix) - @test adjoint!(MT(copy(A))) == adjoint(L) broken=!(A isa Matrix) - end - end - - @testset "$(typeof(A))" for A in (Matrix{BigFloat}(undef,2,2), Matrix{Complex{BigFloat}}(undef,2,2)') - A[1,2] = eltype(A) <: Complex ? 4 + 3im : 4 - B = Matrix{eltype(A)}(undef, size(A)) - for MT in (UpperTriangular, UnitUpperTriangular) - if MT == UpperTriangular - A[1,1] = A[2,2] = eltype(A) <: Complex ? 4 + 3im : 4 - end - U = MT(A) - B .= 0 - copyto!(B, U) - @test copy(U) == B - @test U * 2 == 2 * U == 2B - @test U/2 == B/2 - @test 2\U == 2\B - @test real(U) == real(B) - @test imag(U) == imag(B) - if MT == UpperTriangular - @test isa(kron(U,U), MT) - end - @test kron(U,U) == kron(B,B) - @test transpose!(MT(copy(A))) == transpose(U) broken=!(A isa Matrix) - @test adjoint!(MT(copy(A))) == adjoint(U) broken=!(A isa Matrix) - end - end -end - -@testset "kron with triangular matrices of matrices" begin - for T in (UpperTriangular, LowerTriangular) - t = T(fill(ones(2,2), 2, 2)) - m = Matrix(t) - @test isa(kron(t,t), T) - @test kron(t, t) ≈ kron(m, m) - end -end - -@testset "kron with triangular matrices of mixed eltypes" begin - for T in (UpperTriangular, LowerTriangular) - U = T(Matrix{Union{Missing,Int}}(fill(2, 2, 2))) - U[1, 1] = missing - @test kron(U, U)[2, 3] == 0 - @test kron(U, U)[3, 2] == 0 - end -end - -@testset "copyto! tests" begin - @testset "copyto! with aliasing (#39460)" begin - M = Matrix(reshape(1:36, 6, 6)) - @testset for T in (UpperTriangular, LowerTriangular, UnitUpperTriangular, UnitLowerTriangular) - A = T(view(M, 1:5, 1:5)) - A2 = copy(A) - B = T(view(M, 2:6, 2:6)) - @test copyto!(B, A) == A2 - end - end - - @testset "copyto! with different matrix types" begin - M1 = Matrix(reshape(1:36, 6, 6)) - M2 = similar(M1) - # these copies always work - @testset for (Tdest, Tsrc) in ( - (UpperTriangular, UnitUpperTriangular), - (UpperTriangular, UpperTriangular), - (LowerTriangular, UnitLowerTriangular), - (LowerTriangular, LowerTriangular), - (UnitUpperTriangular, UnitUpperTriangular), - (UnitLowerTriangular, UnitLowerTriangular) - ) - - M2 .= 0 - copyto!(Tdest(M2), Tsrc(M1)) - @test Tdest(M2) == Tsrc(M1) - end - # these copies only work if the source has a unit diagonal - M3 = copy(M1) - M3[diagind(M3)] .= 1 - @testset for (Tdest, Tsrc) in ( - (UnitUpperTriangular, UpperTriangular), - (UnitLowerTriangular, LowerTriangular), - ) - - M2 .= 0 - copyto!(Tdest(M2), Tsrc(M3)) - @test Tdest(M2) == Tsrc(M3) - @test_throws ArgumentError copyto!(Tdest(M2), Tsrc(M1)) - end - # these copies work even when the parent of the source isn't initialized along the diagonal - @testset for (T, TU) in ((UpperTriangular, UnitUpperTriangular), - (LowerTriangular, UnitLowerTriangular)) - M1 = Matrix{BigFloat}(undef, 3, 3) - M2 = similar(M1) - if TU == UnitUpperTriangular - M1[1,2] = M1[1,3] = M1[2,3] = 2 - else - M1[2,1] = M1[3,1] = M1[3,2] = 2 - end - for TD in (T, TU) - M2 .= 0 - copyto!(T(M2), TU(M1)) - @test T(M2) == TU(M1) - end - end - end - - @testset "copyto! with different sizes" begin - Ap = zeros(3,3) - Bp = rand(2,2) - @testset for T in (UpperTriangular, LowerTriangular, UnitUpperTriangular, UnitLowerTriangular) - A = T(Ap) - B = T(Bp) - @test_throws ArgumentError copyto!(A, B) - end - @testset "error message" begin - A = UpperTriangular(Ap) - B = UpperTriangular(Bp) - @test_throws "cannot set index in the lower triangular part" copyto!(A, B) - - A = LowerTriangular(Ap) - B = LowerTriangular(Bp) - @test_throws "cannot set index in the upper triangular part" copyto!(A, B) - end - end -end - -@testset "getindex with Integers" begin - M = reshape(1:4,2,2) - for Ttype in (UpperTriangular, UnitUpperTriangular) - T = Ttype(M) - @test_throws "invalid index" T[2, true] - @test T[1,2] == T[Int8(1),UInt16(2)] == T[big(1), Int16(2)] - end - for Ttype in (LowerTriangular, UnitLowerTriangular) - T = Ttype(M) - @test_throws "invalid index" T[true, 2] - @test T[2,1] == T[Int8(2),UInt16(1)] == T[big(2), Int16(1)] - end -end - -@testset "type-stable eigvecs" begin - D = Float64[1 0; 0 2] - V = @inferred eigvecs(UpperTriangular(D)) - @test V == Diagonal([1, 1]) -end - -@testset "preserve structure in scaling by NaN" begin - M = rand(Int8,2,2) - for (Ts, TD) in (((UpperTriangular, UnitUpperTriangular), UpperTriangular), - ((LowerTriangular, UnitLowerTriangular), LowerTriangular)) - for T in Ts - U = T(M) - for V in (U * NaN, NaN * U, U / NaN, NaN \ U) - @test V isa TD{Float64, Matrix{Float64}} - @test all(isnan, diag(V)) - end - end - end -end - -@testset "eigvecs for AbstractTriangular" begin - S = SizedArrays.SizedArray{(3,3)}(reshape(1:9,3,3)) - for T in (UpperTriangular, UnitUpperTriangular, - LowerTriangular, UnitLowerTriangular) - U = T(S) - V = eigvecs(U) - λ = eigvals(U) - @test U * V ≈ V * Diagonal(λ) - - MU = MyTriangular(U) - V = eigvecs(U) - λ = eigvals(U) - @test MU * V ≈ V * Diagonal(λ) - end -end - -@testset "(l/r)mul! and (l/r)div! for generic triangular" begin - @testset for T in (UpperTriangular, LowerTriangular, UnitUpperTriangular, UnitLowerTriangular) - M = MyTriangular(T(rand(4,4))) - A = rand(4,4) - Ac = similar(A) - @testset "lmul!" begin - Ac .= A - lmul!(M, Ac) - @test Ac ≈ M * A - end - @testset "rmul!" begin - Ac .= A - rmul!(Ac, M) - @test Ac ≈ A * M - end - @testset "ldiv!" begin - Ac .= A - ldiv!(M, Ac) - @test Ac ≈ M \ A - end - @testset "rdiv!" begin - Ac .= A - rdiv!(Ac, M) - @test Ac ≈ A / M - end - end -end - -@testset "istriu/istril forwards to parent" begin - @testset "$(nameof(typeof(M)))" for M in [Tridiagonal(rand(n-1), rand(n), rand(n-1)), - Tridiagonal(zeros(n-1), zeros(n), zeros(n-1)), - Diagonal(randn(n)), - Diagonal(zeros(n)), - ] - @testset for TriT in (UpperTriangular, UnitUpperTriangular, LowerTriangular, UnitLowerTriangular) - U = TriT(M) - A = Array(U) - for k in -n:n - @test istriu(U, k) == istriu(A, k) - @test istril(U, k) == istril(A, k) - end - end - end - z = zeros(n,n) - @testset for TriT in (UpperTriangular, UnitUpperTriangular, LowerTriangular, UnitLowerTriangular) - P = Matrix{BigFloat}(undef, n, n) - copytrito!(P, z, TriT <: Union{UpperTriangular, UnitUpperTriangular} ? 'U' : 'L') - U = TriT(P) - A = Array(U) - @testset for k in -n:n - @test istriu(U, k) == istriu(A, k) - @test istril(U, k) == istril(A, k) - end - end - - @testset "Union eltype" begin - M = Matrix{Union{Int,Missing}}(missing,2,2) - U = triu(M) - @test iszero(U[2,1]) - U = tril(M) - @test iszero(U[1,2]) - end -end - -@testset "indexing with a BandIndex" begin - # these tests should succeed even if the linear index along - # the band isn't a constant, or type-inferred at all - M = rand(Int,2,2) - f(A,j, v::Val{n}) where {n} = Val(A[BandIndex(n,j)]) - function common_tests(M, ind) - j = ind[] - @test @inferred(f(UpperTriangular(M), j, Val(-1))) == Val(0) - @test @inferred(f(UnitUpperTriangular(M), j, Val(-1))) == Val(0) - @test @inferred(f(UnitUpperTriangular(M), j, Val(0))) == Val(1) - @test @inferred(f(LowerTriangular(M), j, Val(1))) == Val(0) - @test @inferred(f(UnitLowerTriangular(M), j, Val(1))) == Val(0) - @test @inferred(f(UnitLowerTriangular(M), j, Val(0))) == Val(1) - end - common_tests(M, Any[1]) - - M = Diagonal([1,2]) - common_tests(M, Any[1]) - # extra tests for banded structure of the parent - for T in (UpperTriangular, UnitUpperTriangular) - @test @inferred(f(T(M), 1, Val(1))) == Val(0) - end - for T in (LowerTriangular, UnitLowerTriangular) - @test @inferred(f(T(M), 1, Val(-1))) == Val(0) - end - - M = Tridiagonal([1,2], [1,2,3], [1,2]) - common_tests(M, Any[1]) - for T in (UpperTriangular, UnitUpperTriangular) - @test @inferred(f(T(M), 1, Val(2))) == Val(0) - end - for T in (LowerTriangular, UnitLowerTriangular) - @test @inferred(f(T(M), 1, Val(-2))) == Val(0) - end -end - -@testset "indexing uses diagzero" begin - @testset "block matrix" begin - M = reshape([zeros(2,2), zeros(4,2), zeros(2,3), zeros(4,3)],2,2) - U = UpperTriangular(M) - @test [size(x) for x in U] == [size(x) for x in M] - end - @testset "Union eltype" begin - M = Matrix{Union{Int,Missing}}(missing,4,4) - U = UpperTriangular(M) - @test iszero(U[3,1]) - end -end - -@testset "addition/subtraction of mixed triangular" begin - for A in (Hermitian(rand(4, 4)), Diagonal(rand(5))) - for T in (UpperTriangular, LowerTriangular, - UnitUpperTriangular, UnitLowerTriangular) - B = T(A) - M = Matrix(B) - R = B - B' - if A isa Diagonal - @test R isa Diagonal - end - @test R == M - M' - R = B + B' - if A isa Diagonal - @test R isa Diagonal - end - @test R == M + M' - C = MyTriangular(B) - @test C - C' == M - M' - @test C + C' == M + M' - end - end - @testset "unfilled parent" begin - @testset for T in (UpperTriangular, LowerTriangular, - UnitUpperTriangular, UnitLowerTriangular) - F = Matrix{BigFloat}(undef, 2, 2) - B = T(F) - isupper = B isa Union{UpperTriangular, UnitUpperTriangular} - B[1+!isupper, 1+isupper] = 2 - if !(B isa Union{UnitUpperTriangular, UnitLowerTriangular}) - B[1,1] = B[2,2] = 3 - end - M = Matrix(B) - @test B - B' == M - M' - @test B + B' == M + M' - @test B - copy(B') == M - M' - @test B + copy(B') == M + M' - C = MyTriangular(B) - @test C - C' == M - M' - @test C + C' == M + M' - end - end -end - -@testset "log_quasitriu with internal scaling s=0 (issue #54833)" begin - M = [0.9949357359852791 -0.015567763143324862 -0.09091193493947397 -0.03994428739762443 0.07338356301650806; - 0.011813655598647289 0.9968988574699793 -0.06204555000202496 0.04694097614450692 0.09028834462782365; - 0.092737943594701 0.059546719185135925 0.9935850721633324 0.025348893985651405 -0.018530261590167685; - 0.0369187299165628 -0.04903571106913449 -0.025962938675946543 0.9977767446862031 0.12901494726320517; - 0.0 0.0 0.0 0.0 1.0] - - @test exp(log(M)) ≈ M -end - -@testset "copytrito!" begin - for T in (UpperTriangular, LowerTriangular) - M = Matrix{BigFloat}(undef, 2, 2) - M[1,1] = M[2,2] = 3 - U = T(M) - isupper = U isa UpperTriangular - M[1+!isupper, 1+isupper] = 4 - uplo, loup = U isa UpperTriangular ? ('U', 'L') : ('L', 'U' ) - @test copytrito!(similar(U), U, uplo) == U - @test copytrito!(zero(M), U, uplo) == U - @test copytrito!(similar(U), Array(U), uplo) == U - @test copytrito!(zero(U), U, loup) == Diagonal(U) - @test copytrito!(similar(U), MyTriangular(U), uplo) == U - @test copytrito!(zero(M), MyTriangular(U), uplo) == U - Ubig = T(similar(M, (3,3))) - copytrito!(Ubig, U, uplo) - @test Ubig[axes(U)...] == U - end -end - -end # module TestTriangular diff --git a/stdlib/LinearAlgebra/test/trickyarithmetic.jl b/stdlib/LinearAlgebra/test/trickyarithmetic.jl deleted file mode 100644 index ad04ac89c2761..0000000000000 --- a/stdlib/LinearAlgebra/test/trickyarithmetic.jl +++ /dev/null @@ -1,66 +0,0 @@ -# This file is a part of Julia. License is MIT: https://julialang.org/license - -module TrickyArithmetic - struct A - x::Int - end - A(a::A) = a - Base.convert(::Type{A}, i::Int) = A(i) - Base.zero(::Union{A, Type{A}}) = A(0) - Base.one(::Union{A, Type{A}}) = A(1) - Base.isfinite(a::A) = isfinite(a.x) - struct B - x::Int - end - struct C - x::Int - end - Base.isfinite(b::B) = isfinite(b.x) - Base.isfinite(c::C) = isfinite(c.x) - C(a::A) = C(a.x) - Base.zero(::Union{C, Type{C}}) = C(0) - Base.one(::Union{C, Type{C}}) = C(1) - - Base.:(*)(x::Int, a::A) = B(x*a.x) - Base.:(*)(a::A, x::Int) = B(a.x*x) - Base.:(*)(a::Union{A,B}, b::Union{A,B}) = B(a.x*b.x) - Base.:(*)(a::Union{A,B,C}, b::Union{A,B,C}) = C(a.x*b.x) - Base.:(+)(a::Union{A,B,C}, b::Union{A,B,C}) = C(a.x+b.x) - Base.:(-)(a::Union{A,B,C}, b::Union{A,B,C}) = C(a.x-b.x) - - struct D{NT, DT} - n::NT - d::DT - end - D{NT, DT}(d::D{NT, DT}) where {NT, DT} = d # called by oneunit - Base.zero(::Union{D{NT, DT}, Type{D{NT, DT}}}) where {NT, DT} = zero(NT) / one(DT) - Base.one(::Union{D{NT, DT}, Type{D{NT, DT}}}) where {NT, DT} = one(NT) / one(DT) - Base.convert(::Type{D{NT, DT}}, a::Union{A, B, C}) where {NT, DT} = NT(a) / one(DT) - #Base.convert(::Type{D{NT, DT}}, a::D) where {NT, DT} = NT(a.n) / DT(a.d) - - Base.:(*)(a::D, b::D) = (a.n*b.n) / (a.d*b.d) - Base.:(*)(a::D, b::Union{A,B,C}) = (a.n * b) / a.d - Base.:(*)(a::Union{A,B,C}, b::D) = b * a - Base.inv(a::Union{A,B,C}) = A(1) / a - Base.inv(a::D) = a.d / a.n - Base.isfinite(a::D) = isfinite(a.n) && isfinite(a.d) - Base.:(/)(a::Union{A,B,C}, b::Union{A,B,C}) = D(a, b) - Base.:(/)(a::D, b::Union{A,B,C}) = a.n / (a.d*b) - Base.:(/)(a::Union{A,B,C,D}, b::D) = a * inv(b) - Base.:(+)(a::Union{A,B,C}, b::D) = (a*b.d+b.n) / b.d - Base.:(+)(a::D, b::Union{A,B,C}) = b + a - Base.:(+)(a::D, b::D) = (a.n*b.d+a.d*b.n) / (a.d*b.d) - Base.:(-)(a::Union{A,B,C}) = typeof(a)(a.x) - Base.:(-)(a::D) = (-a.n) / a.d - Base.:(-)(a::Union{A,B,C,D}, b::Union{A,B,C,D}) = a + (-b) - - Base.promote_rule(::Type{A}, ::Type{B}) = B - Base.promote_rule(::Type{B}, ::Type{A}) = B - Base.promote_rule(::Type{A}, ::Type{C}) = C - Base.promote_rule(::Type{C}, ::Type{A}) = C - Base.promote_rule(::Type{B}, ::Type{C}) = C - Base.promote_rule(::Type{C}, ::Type{B}) = C - Base.promote_rule(::Type{D{NT,DT}}, T::Type{<:Union{A,B,C}}) where {NT,DT} = D{promote_type(NT,T),DT} - Base.promote_rule(T::Type{<:Union{A,B,C}}, ::Type{D{NT,DT}}) where {NT,DT} = D{promote_type(NT,T),DT} - Base.promote_rule(::Type{D{NS,DS}}, ::Type{D{NT,DT}}) where {NS,DS,NT,DT} = D{promote_type(NS,NT),promote_type(DS,DT)} -end diff --git a/stdlib/LinearAlgebra/test/tridiag.jl b/stdlib/LinearAlgebra/test/tridiag.jl deleted file mode 100644 index dc14ddb1d1b27..0000000000000 --- a/stdlib/LinearAlgebra/test/tridiag.jl +++ /dev/null @@ -1,1078 +0,0 @@ -# This file is a part of Julia. License is MIT: https://julialang.org/license - -module TestTridiagonal - -using Test, LinearAlgebra, Random - -const BASE_TEST_PATH = joinpath(Sys.BINDIR, "..", "share", "julia", "test") - -isdefined(Main, :Quaternions) || @eval Main include(joinpath($(BASE_TEST_PATH), "testhelpers", "Quaternions.jl")) -using .Main.Quaternions - -isdefined(Main, :InfiniteArrays) || @eval Main include(joinpath($(BASE_TEST_PATH), "testhelpers", "InfiniteArrays.jl")) -using .Main.InfiniteArrays - -isdefined(Main, :FillArrays) || @eval Main include(joinpath($(BASE_TEST_PATH), "testhelpers", "FillArrays.jl")) -using .Main.FillArrays - -isdefined(Main, :OffsetArrays) || @eval Main include(joinpath($(BASE_TEST_PATH), "testhelpers", "OffsetArrays.jl")) -using .Main.OffsetArrays - -isdefined(Main, :SizedArrays) || @eval Main include(joinpath($(BASE_TEST_PATH), "testhelpers", "SizedArrays.jl")) -using .Main.SizedArrays - -include("testutils.jl") # test_approx_eq_modphase - -#Test equivalence of eigenvectors/singular vectors taking into account possible phase (sign) differences -function test_approx_eq_vecs(a::StridedVecOrMat{S}, b::StridedVecOrMat{T}, error=nothing) where {S<:Real,T<:Real} - n = size(a, 1) - @test n==size(b,1) && size(a,2)==size(b,2) - error===nothing && (error=n^3*(eps(S)+eps(T))) - for i=1:n - ev1, ev2 = a[:,i], b[:,i] - deviation = min(abs(norm(ev1-ev2)),abs(norm(ev1+ev2))) - if !isnan(deviation) - @test deviation ≈ 0.0 atol=error - end - end -end - -@testset for elty in (Float32, Float64, ComplexF32, ComplexF64, Int) - n = 12 #Size of matrix problem to test - Random.seed!(123) - if elty == Int - Random.seed!(61516384) - d = rand(1:100, n) - dl = -rand(0:10, n-1) - du = -rand(0:10, n-1) - v = rand(1:100, n) - B = rand(1:100, n, 2) - a = rand(1:100, n-1) - b = rand(1:100, n) - c = rand(1:100, n-1) - else - d = convert(Vector{elty}, 1 .+ randn(n)) - dl = convert(Vector{elty}, randn(n - 1)) - du = convert(Vector{elty}, randn(n - 1)) - v = convert(Vector{elty}, randn(n)) - B = convert(Matrix{elty}, randn(n, 2)) - a = convert(Vector{elty}, randn(n - 1)) - b = convert(Vector{elty}, randn(n)) - c = convert(Vector{elty}, randn(n - 1)) - if elty <: Complex - a += im*convert(Vector{elty}, randn(n - 1)) - b += im*convert(Vector{elty}, randn(n)) - c += im*convert(Vector{elty}, randn(n - 1)) - end - end - @test_throws DimensionMismatch SymTridiagonal(dl, fill(elty(1), n+1)) - @test_throws ArgumentError SymTridiagonal(rand(n, n)) - @test_throws ArgumentError Tridiagonal(dl, dl, dl) - @test_throws ArgumentError convert(SymTridiagonal{elty}, Tridiagonal(dl, d, du)) - - if elty != Int - @testset "issue #1490" begin - @test det(fill(elty(1),3,3)) ≈ zero(elty) atol=3*eps(real(one(elty))) - @test det(SymTridiagonal(elty[],elty[])) == one(elty) - end - end - - @testset "constructor" begin - for (x, y) in ((d, dl), (GenericArray(d), GenericArray(dl))) - ST = (SymTridiagonal(x, y))::SymTridiagonal{elty, typeof(x)} - @test ST == Matrix(ST) - @test ST.dv === x - @test ST.ev === y - @test typeof(ST)(ST) === ST - TT = (Tridiagonal(y, x, y))::Tridiagonal{elty, typeof(x)} - @test TT == Matrix(TT) - @test TT.dl === y - @test TT.d === x - @test TT.du == y - @test typeof(TT)(TT) === TT - end - ST = SymTridiagonal{elty}([1,2,3,4], [1,2,3]) - @test eltype(ST) == elty - @test SymTridiagonal{elty, Vector{elty}}(ST) === ST - @test SymTridiagonal{Int64, Vector{Int64}}(ST) isa SymTridiagonal{Int64, Vector{Int64}} - TT = Tridiagonal{elty}([1,2,3], [1,2,3,4], [1,2,3]) - @test eltype(TT) == elty - ST = SymTridiagonal{elty,Vector{elty}}(d, GenericArray(dl)) - @test isa(ST, SymTridiagonal{elty,Vector{elty}}) - TT = Tridiagonal{elty,Vector{elty}}(GenericArray(dl), d, GenericArray(dl)) - @test isa(TT, Tridiagonal{elty,Vector{elty}}) - @test_throws ArgumentError SymTridiagonal(d, GenericArray(dl)) - @test_throws ArgumentError SymTridiagonal(GenericArray(d), dl) - @test_throws ArgumentError Tridiagonal(GenericArray(dl), d, GenericArray(dl)) - @test_throws ArgumentError Tridiagonal(dl, GenericArray(d), dl) - @test_throws ArgumentError SymTridiagonal{elty}(d, GenericArray(dl)) - @test_throws ArgumentError Tridiagonal{elty}(GenericArray(dl), d,GenericArray(dl)) - STI = SymTridiagonal([1,2,3,4], [1,2,3]) - TTI = Tridiagonal([1,2,3], [1,2,3,4], [1,2,3]) - TTI2 = Tridiagonal([1,2,3], [1,2,3,4], [1,2,3], [1,2]) - @test SymTridiagonal(STI) === STI - @test Tridiagonal(TTI) === TTI - @test Tridiagonal(TTI2) === TTI2 - @test isa(SymTridiagonal{elty}(STI), SymTridiagonal{elty}) - @test isa(Tridiagonal{elty}(TTI), Tridiagonal{elty}) - TTI2y = Tridiagonal{elty}(TTI2) - @test isa(TTI2y, Tridiagonal{elty}) - @test TTI2y.du2 == convert(Vector{elty}, [1,2]) - end - @testset "interconversion of Tridiagonal and SymTridiagonal" begin - @test Tridiagonal(dl, d, dl) == SymTridiagonal(d, dl) - @test SymTridiagonal(d, dl) == Tridiagonal(dl, d, dl) - @test Tridiagonal(dl, d, du) + Tridiagonal(du, d, dl) == SymTridiagonal(2d, dl+du) - @test SymTridiagonal(d, dl) + Tridiagonal(dl, d, du) == Tridiagonal(dl + dl, d+d, dl+du) - @test convert(SymTridiagonal,Tridiagonal(SymTridiagonal(d, dl))) == SymTridiagonal(d, dl) - @test Array(convert(SymTridiagonal{ComplexF32},Tridiagonal(SymTridiagonal(d, dl)))) == convert(Matrix{ComplexF32}, SymTridiagonal(d, dl)) - end - @testset "tril/triu" begin - zerosd = fill!(similar(d), 0) - zerosdl = fill!(similar(dl), 0) - zerosdu = fill!(similar(du), 0) - @test_throws ArgumentError tril!(SymTridiagonal(d, dl), -n - 2) - @test_throws ArgumentError tril!(SymTridiagonal(d, dl), n) - @test_throws ArgumentError tril!(Tridiagonal(dl, d, du), -n - 2) - @test_throws ArgumentError tril!(Tridiagonal(dl, d, du), n) - @test @inferred(tril(SymTridiagonal(d,dl))) == Tridiagonal(dl,d,zerosdl) - @test @inferred(tril(SymTridiagonal(d,dl),1)) == Tridiagonal(dl,d,dl) - @test @inferred(tril(SymTridiagonal(d,dl),-1)) == Tridiagonal(dl,zerosd,zerosdl) - @test @inferred(tril(SymTridiagonal(d,dl),-2)) == Tridiagonal(zerosdl,zerosd,zerosdl) - @test @inferred(tril(Tridiagonal(dl,d,du))) == Tridiagonal(dl,d,zerosdu) - @test @inferred(tril(Tridiagonal(dl,d,du),1)) == Tridiagonal(dl,d,du) - @test @inferred(tril(Tridiagonal(dl,d,du),-1)) == Tridiagonal(dl,zerosd,zerosdu) - @test @inferred(tril(Tridiagonal(dl,d,du),-2)) == Tridiagonal(zerosdl,zerosd,zerosdu) - @test @inferred(tril!(copy(SymTridiagonal(d,dl)))) == Tridiagonal(dl,d,zerosdl) - @test @inferred(tril!(copy(SymTridiagonal(d,dl)),1)) == Tridiagonal(dl,d,dl) - @test @inferred(tril!(copy(SymTridiagonal(d,dl)),-1)) == Tridiagonal(dl,zerosd,zerosdl) - @test @inferred(tril!(copy(SymTridiagonal(d,dl)),-2)) == Tridiagonal(zerosdl,zerosd,zerosdl) - @test @inferred(tril!(copy(Tridiagonal(dl,d,du)))) == Tridiagonal(dl,d,zerosdu) - @test @inferred(tril!(copy(Tridiagonal(dl,d,du)),1)) == Tridiagonal(dl,d,du) - @test @inferred(tril!(copy(Tridiagonal(dl,d,du)),-1)) == Tridiagonal(dl,zerosd,zerosdu) - @test @inferred(tril!(copy(Tridiagonal(dl,d,du)),-2)) == Tridiagonal(zerosdl,zerosd,zerosdu) - - @test_throws ArgumentError triu!(SymTridiagonal(d, dl), -n) - @test_throws ArgumentError triu!(SymTridiagonal(d, dl), n + 2) - @test_throws ArgumentError triu!(Tridiagonal(dl, d, du), -n) - @test_throws ArgumentError triu!(Tridiagonal(dl, d, du), n + 2) - @test @inferred(triu(SymTridiagonal(d,dl))) == Tridiagonal(zerosdl,d,dl) - @test @inferred(triu(SymTridiagonal(d,dl),-1)) == Tridiagonal(dl,d,dl) - @test @inferred(triu(SymTridiagonal(d,dl),1)) == Tridiagonal(zerosdl,zerosd,dl) - @test @inferred(triu(SymTridiagonal(d,dl),2)) == Tridiagonal(zerosdl,zerosd,zerosdl) - @test @inferred(triu(Tridiagonal(dl,d,du))) == Tridiagonal(zerosdl,d,du) - @test @inferred(triu(Tridiagonal(dl,d,du),-1)) == Tridiagonal(dl,d,du) - @test @inferred(triu(Tridiagonal(dl,d,du),1)) == Tridiagonal(zerosdl,zerosd,du) - @test @inferred(triu(Tridiagonal(dl,d,du),2)) == Tridiagonal(zerosdl,zerosd,zerosdu) - @test @inferred(triu!(copy(SymTridiagonal(d,dl)))) == Tridiagonal(zerosdl,d,dl) - @test @inferred(triu!(copy(SymTridiagonal(d,dl)),-1)) == Tridiagonal(dl,d,dl) - @test @inferred(triu!(copy(SymTridiagonal(d,dl)),1)) == Tridiagonal(zerosdl,zerosd,dl) - @test @inferred(triu!(copy(SymTridiagonal(d,dl)),2)) == Tridiagonal(zerosdl,zerosd,zerosdl) - @test @inferred(triu!(copy(Tridiagonal(dl,d,du)))) == Tridiagonal(zerosdl,d,du) - @test @inferred(triu!(copy(Tridiagonal(dl,d,du)),-1)) == Tridiagonal(dl,d,du) - @test @inferred(triu!(copy(Tridiagonal(dl,d,du)),1)) == Tridiagonal(zerosdl,zerosd,du) - @test @inferred(triu!(copy(Tridiagonal(dl,d,du)),2)) == Tridiagonal(zerosdl,zerosd,zerosdu) - - @test !istril(SymTridiagonal(d,dl)) - @test istril(SymTridiagonal(d,zerosdl)) - @test !istril(SymTridiagonal(d,dl),-2) - @test !istriu(SymTridiagonal(d,dl)) - @test istriu(SymTridiagonal(d,zerosdl)) - @test !istriu(SymTridiagonal(d,dl),2) - @test istriu(Tridiagonal(zerosdl,d,du)) - @test !istriu(Tridiagonal(dl,d,zerosdu)) - @test istriu(Tridiagonal(zerosdl,zerosd,du),1) - @test !istriu(Tridiagonal(dl,d,zerosdu),2) - @test istril(Tridiagonal(dl,d,zerosdu)) - @test !istril(Tridiagonal(zerosdl,d,du)) - @test istril(Tridiagonal(dl,zerosd,zerosdu),-1) - @test !istril(Tridiagonal(dl,d,zerosdu),-2) - - @test isdiag(SymTridiagonal(d,zerosdl)) - @test !isdiag(SymTridiagonal(d,dl)) - @test isdiag(Tridiagonal(zerosdl,d,zerosdu)) - @test !isdiag(Tridiagonal(dl,d,zerosdu)) - @test !isdiag(Tridiagonal(zerosdl,d,du)) - @test !isdiag(Tridiagonal(dl,d,du)) - - # Test methods that could fail due to dv and ev having the same length - # see #41089 - - badev = zero(d) - badev[end] = 1 - S = SymTridiagonal(d, badev) - - @test istriu(S, -2) - @test istriu(S, 0) - @test !istriu(S, 2) - - @test isdiag(S) - end - - @testset "iszero and isone" begin - Tzero = Tridiagonal(zeros(elty, 9), zeros(elty, 10), zeros(elty, 9)) - Tone = Tridiagonal(zeros(elty, 9), ones(elty, 10), zeros(elty, 9)) - Tmix = Tridiagonal(zeros(elty, 9), zeros(elty, 10), zeros(elty, 9)) - Tmix[end, end] = one(elty) - - Szero = SymTridiagonal(zeros(elty, 10), zeros(elty, 9)) - Sone = SymTridiagonal(ones(elty, 10), zeros(elty, 9)) - Smix = SymTridiagonal(zeros(elty, 10), zeros(elty, 9)) - Smix[end, end] = one(elty) - - @test iszero(Tzero) - @test !isone(Tzero) - @test !iszero(Tone) - @test isone(Tone) - @test !iszero(Tmix) - @test !isone(Tmix) - - @test iszero(Szero) - @test !isone(Szero) - @test !iszero(Sone) - @test isone(Sone) - @test !iszero(Smix) - @test !isone(Smix) - - badev = zeros(elty, 3) - badev[end] = 1 - - @test isone(SymTridiagonal(ones(elty, 3), badev)) - @test iszero(SymTridiagonal(zeros(elty, 3), badev)) - end - - @testset for mat_type in (Tridiagonal, SymTridiagonal) - A = mat_type == Tridiagonal ? mat_type(dl, d, du) : mat_type(d, dl) - fA = map(elty <: Complex ? ComplexF64 : Float64, Array(A)) - @testset "similar, size, and copyto!" begin - B = similar(A) - @test size(B) == size(A) - copyto!(B, A) - @test B == A - @test isa(similar(A), mat_type{elty}) - @test isa(similar(A, Int), mat_type{Int}) - @test isa(similar(A, (3, 2)), Matrix) - @test isa(similar(A, Int, (3, 2)), Matrix{Int}) - @test size(A, 3) == 1 - @test size(A, 1) == n - @test size(A) == (n, n) - @test_throws BoundsError size(A, 0) - end - @testset "getindex" begin - @test_throws BoundsError A[n + 1, 1] - @test_throws BoundsError A[1, n + 1] - @test A[1, n] == convert(elty, 0.0) - @test A[1, 1] == d[1] - end - @testset "setindex!" begin - @test_throws BoundsError A[n + 1, 1] = 0 # test bounds check - @test_throws BoundsError A[1, n + 1] = 0 # test bounds check - @test_throws ArgumentError A[1, 3] = 1 # test assignment off the main/sub/super diagonal - if mat_type == Tridiagonal - @test (A[3, 3] = A[3, 3]; A == fA) # test assignment on the main diagonal - @test (A[3, 2] = A[3, 2]; A == fA) # test assignment on the subdiagonal - @test (A[2, 3] = A[2, 3]; A == fA) # test assignment on the superdiagonal - @test ((A[1, 3] = 0) == 0; A == fA) # test zero assignment off the main/sub/super diagonal - else # mat_type is SymTridiagonal - @test ((A[3, 3] = A[3, 3]) == A[3, 3]; A == fA) # test assignment on the main diagonal - @test_throws ArgumentError A[3, 2] = 1 # test assignment on the subdiagonal - @test_throws ArgumentError A[2, 3] = 1 # test assignment on the superdiagonal - end - # setindex! should return the destination - @test setindex!(A, A[2,2], 2, 2) === A - end - @testset "diag" begin - @test (@inferred diag(A))::typeof(d) == d - @test (@inferred diag(A, 0))::typeof(d) == d - @test (@inferred diag(A, 1))::typeof(d) == (mat_type == Tridiagonal ? du : dl) - @test (@inferred diag(A, -1))::typeof(d) == dl - @test (@inferred diag(A, n-1))::typeof(d) == zeros(elty, 1) - @test isempty(@inferred diag(A, -n - 1)) - @test isempty(@inferred diag(A, n + 1)) - GA = mat_type == Tridiagonal ? mat_type(GenericArray.((dl, d, du))...) : mat_type(GenericArray.((d, dl))...) - @test (@inferred diag(GA))::typeof(GenericArray(d)) == GenericArray(d) - @test (@inferred diag(GA, -1))::typeof(GenericArray(d)) == GenericArray(dl) - end - @testset "trace" begin - if real(elty) <: Integer - @test tr(A) == tr(fA) - else - @test tr(A) ≈ tr(fA) rtol=2eps(real(elty)) - end - end - @testset "Idempotent tests" begin - for func in (conj, transpose, adjoint) - @test func(func(A)) == A - if func ∈ (transpose, adjoint) - @test func(func(A)) === A - end - end - end - @testset "permutedims(::[Sym]Tridiagonal)" begin - @test permutedims(permutedims(A)) === A - @test permutedims(A) == transpose.(transpose(A)) - @test permutedims(A, [1, 2]) === A - @test permutedims(A, (2, 1)) == permutedims(A) - end - if elty != Int - @testset "Simple unary functions" begin - for func in (det, inv) - @test func(A) ≈ func(fA) atol=n^2*sqrt(eps(real(one(elty)))) - end - end - end - ds = mat_type == Tridiagonal ? (dl, d, du) : (d, dl) - for f in (real, imag) - @test f(A)::mat_type == mat_type(map(f, ds)...) - end - if elty <: Real - for f in (round, trunc, floor, ceil) - fds = [f.(d) for d in ds] - @test f.(A)::mat_type == mat_type(fds...) - @test f.(Int, A)::mat_type == f.(Int, fA) - end - end - fds = [abs.(d) for d in ds] - @test abs.(A)::mat_type == mat_type(fds...) - @testset "Multiplication with strided matrix/vector" begin - @test (x = fill(1.,n); A*x ≈ Array(A)*x) - @test (X = fill(1.,n,2); A*X ≈ Array(A)*X) - end - @testset "Binary operations" begin - B = mat_type == Tridiagonal ? mat_type(a, b, c) : mat_type(b, a) - fB = map(elty <: Complex ? ComplexF64 : Float64, Array(B)) - for op in (+, -, *) - @test Array(op(A, B)) ≈ op(fA, fB) - end - α = rand(elty) - @test Array(α*A) ≈ α*Array(A) - @test Array(A*α) ≈ Array(A)*α - @test Array(A/α) ≈ Array(A)/α - - @testset "Matmul with Triangular types" begin - @test A*LinearAlgebra.UnitUpperTriangular(Matrix(1.0I, n, n)) ≈ fA - @test A*LinearAlgebra.UnitLowerTriangular(Matrix(1.0I, n, n)) ≈ fA - @test A*UpperTriangular(Matrix(1.0I, n, n)) ≈ fA - @test A*LowerTriangular(Matrix(1.0I, n, n)) ≈ fA - end - @testset "mul! errors" begin - Cnn, Cnm, Cmn = Matrix{elty}.(undef, ((n,n), (n,n+1), (n+1,n))) - @test_throws DimensionMismatch LinearAlgebra.mul!(Cnn,A,Cnm) - @test_throws DimensionMismatch LinearAlgebra.mul!(Cnn,A,Cmn) - @test_throws DimensionMismatch LinearAlgebra.mul!(Cnn,B,Cmn) - @test_throws DimensionMismatch LinearAlgebra.mul!(Cmn,B,Cnn) - @test_throws DimensionMismatch LinearAlgebra.mul!(Cnm,B,Cnn) - end - end - @testset "Negation" begin - mA = -A - @test mA isa mat_type - @test -mA == A - end - if mat_type == SymTridiagonal - @testset "Tridiagonal/SymTridiagonal mixing ops" begin - B = convert(Tridiagonal{elty}, A) - @test B == A - @test B + A == A + B - @test B - A == A - B - end - if elty <: LinearAlgebra.BlasReal - @testset "Eigensystems" begin - zero, infinity = convert(elty, 0), convert(elty, Inf) - @testset "stebz! and stein!" begin - w, iblock, isplit = LAPACK.stebz!('V', 'B', -infinity, infinity, 0, 0, zero, b, a) - evecs = LAPACK.stein!(b, a, w) - - (e, v) = eigen(SymTridiagonal(b, a)) - @test e ≈ w - test_approx_eq_vecs(v, evecs) - end - @testset "stein! call using iblock and isplit" begin - w, iblock, isplit = LAPACK.stebz!('V', 'B', -infinity, infinity, 0, 0, zero, b, a) - evecs = LAPACK.stein!(b, a, w, iblock, isplit) - test_approx_eq_vecs(v, evecs) - end - @testset "stegr! call with index range" begin - F = eigen(SymTridiagonal(b, a),1:2) - fF = eigen(Symmetric(Array(SymTridiagonal(b, a))),1:2) - test_approx_eq_modphase(F.vectors, fF.vectors) - @test F.values ≈ fF.values - end - @testset "stegr! call with value range" begin - F = eigen(SymTridiagonal(b, a),0.0,1.0) - fF = eigen(Symmetric(Array(SymTridiagonal(b, a))),0.0,1.0) - test_approx_eq_modphase(F.vectors, fF.vectors) - @test F.values ≈ fF.values - end - @testset "eigenvalues/eigenvectors of symmetric tridiagonal" begin - if elty === Float32 || elty === Float64 - DT, VT = @inferred eigen(A) - @inferred eigen(A, 2:4) - @inferred eigen(A, 1.0, 2.0) - D, Vecs = eigen(fA) - @test DT ≈ D - @test abs.(VT'Vecs) ≈ Matrix(elty(1)I, n, n) - test_approx_eq_modphase(eigvecs(A), eigvecs(fA)) - #call to LAPACK.stein here - test_approx_eq_modphase(eigvecs(A,eigvals(A)),eigvecs(A)) - elseif elty != Int - # check that undef is determined accurately even if type inference - # bails out due to the number of try/catch blocks in this code. - @test_throws UndefVarError fA - end - end - end - end - if elty <: Real - Ts = SymTridiagonal(d, dl) - Fs = Array(Ts) - Tldlt = factorize(Ts) - @testset "symmetric tridiagonal" begin - @test_throws DimensionMismatch Tldlt\rand(elty,n+1) - @test size(Tldlt) == size(Ts) - if elty <: AbstractFloat - @test LinearAlgebra.LDLt{elty,SymTridiagonal{elty,Vector{elty}}}(Tldlt) === Tldlt - @test LinearAlgebra.LDLt{elty}(Tldlt) === Tldlt - @test typeof(convert(LinearAlgebra.LDLt{Float32,Matrix{Float32}},Tldlt)) == - LinearAlgebra.LDLt{Float32,Matrix{Float32}} - @test typeof(convert(LinearAlgebra.LDLt{Float32},Tldlt)) == - LinearAlgebra.LDLt{Float32,SymTridiagonal{Float32,Vector{Float32}}} - end - for vv in (copy(v), view(v, 1:n)) - invFsv = Fs\vv - x = Ts\vv - @test x ≈ invFsv - @test Array(Tldlt) ≈ Fs - end - - @testset "similar" begin - @test isa(similar(Ts), SymTridiagonal{elty}) - @test isa(similar(Ts, Int), SymTridiagonal{Int}) - @test isa(similar(Ts, (3, 2)), Matrix) - @test isa(similar(Ts, Int, (3, 2)), Matrix{Int}) - end - - @test first(logabsdet(Tldlt)) ≈ first(logabsdet(Fs)) - @test last(logabsdet(Tldlt)) ≈ last(logabsdet(Fs)) - # just test that the det method exists. The numerical value of the - # determinant is unreliable - det(Tldlt) - end - end - else # mat_type is Tridiagonal - @testset "tridiagonal linear algebra" begin - for vv in (copy(v), view(copy(v), 1:n)) - @test A*vv ≈ fA*vv - invFv = fA\vv - @test A\vv ≈ invFv - Tlu = factorize(A) - x = Tlu\vv - @test x ≈ invFv - end - elty != Int && @test A \ v ≈ ldiv!(copy(A), copy(v)) - end - F = lu(A) - L1, U1, p1 = F - G = lu!(F, 2A) - L2, U2, p2 = F - @test L1 ≈ L2 - @test 2U1 ≈ U2 - @test p1 == p2 - end - @testset "generalized dot" begin - x = fill(convert(elty, 1), n) - y = fill(convert(elty, 1), n) - @test dot(x, A, y) ≈ dot(A'x, y) ≈ dot(x, A*y) - @test dot([1], SymTridiagonal([1], Int[]), [1]) == 1 - @test dot([1], Tridiagonal(Int[], [1], Int[]), [1]) == 1 - @test dot(Int[], SymTridiagonal(Int[], Int[]), Int[]) === 0 - @test dot(Int[], Tridiagonal(Int[], Int[], Int[]), Int[]) === 0 - end - end -end - -@testset "SymTridiagonal/Tridiagonal block matrix" begin - M = [1 2; 3 4] - n = 5 - A = SymTridiagonal(fill(M, n), fill(M, n-1)) - @test @inferred A[1,1] == Symmetric(M) - @test @inferred A[1,2] == M - @test @inferred A[2,1] == transpose(M) - @test @inferred diag(A, 1) == fill(M, n-1) - @test @inferred diag(A, 0) == fill(Symmetric(M), n) - @test @inferred diag(A, -1) == fill(transpose(M), n-1) - @test_broken diag(A, -2) == fill(M, n-2) - @test_broken diag(A, 2) == fill(M, n-2) - @test isempty(@inferred diag(A, n+1)) - @test isempty(@inferred diag(A, -n-1)) - - A[1,1] = Symmetric(2M) - @test A[1,1] == Symmetric(2M) - @test_throws ArgumentError A[1,1] = M - - @test tr(A) == sum(diag(A)) - @test issymmetric(tr(A)) - - A = Tridiagonal(fill(M, n-1), fill(M, n), fill(M, n-1)) - @test @inferred A[1,1] == M - @test @inferred A[1,2] == M - @test @inferred A[2,1] == M - @test @inferred diag(A, 1) == fill(M, n-1) - @test @inferred diag(A, 0) == fill(M, n) - @test @inferred diag(A, -1) == fill(M, n-1) - @test_broken diag(A, -2) == fill(M, n-2) - @test_broken diag(A, 2) == fill(M, n-2) - @test isempty(@inferred diag(A, n+1)) - @test isempty(@inferred diag(A, -n-1)) - - for n in 0:2 - dv, ev = fill(M, n), fill(M, max(n-1,0)) - A = SymTridiagonal(dv, ev) - @test A == Matrix{eltype(A)}(A) - - A = Tridiagonal(ev, dv, ev) - @test A == Matrix{eltype(A)}(A) - end - - M = SizedArrays.SizedArray{(2,2)}([1 2; 3 4]) - S = SymTridiagonal(fill(M,4), fill(M,3)) - @test diag(S,2) == fill(zero(M), 2) - @test diag(S,-2) == fill(zero(M), 2) - @test isempty(diag(S,4)) - @test isempty(diag(S,-4)) -end - -@testset "Issue 12068" begin - @test SymTridiagonal([1, 2], [0])^3 == [1 0; 0 8] -end - -@testset "Issue #48505" begin - @test SymTridiagonal([1,2,3],[4,5.0]) == [1.0 4.0 0.0; 4.0 2.0 5.0; 0.0 5.0 3.0] - @test Tridiagonal([1, 2], [4, 5, 1], [6.0, 7]) == [4.0 6.0 0.0; 1.0 5.0 7.0; 0.0 2.0 1.0] -end - -@testset "convert for SymTridiagonal" begin - STF32 = SymTridiagonal{Float32}(fill(1f0, 5), fill(1f0, 4)) - @test convert(SymTridiagonal{Float64}, STF32)::SymTridiagonal{Float64} == STF32 - @test convert(AbstractMatrix{Float64}, STF32)::SymTridiagonal{Float64} == STF32 -end - -@testset "constructors from matrix" begin - @test SymTridiagonal([1 2 3; 2 5 6; 0 6 9]) == [1 2 0; 2 5 6; 0 6 9] - @test Tridiagonal([1 2 3; 4 5 6; 7 8 9]) == [1 2 0; 4 5 6; 0 8 9] -end - -@testset "constructors with range and other abstract vectors" begin - @test SymTridiagonal(1:3, 1:2) == [1 1 0; 1 2 2; 0 2 3] - @test Tridiagonal(4:5, 1:3, 1:2) == [1 1 0; 4 2 2; 0 5 3] -end - -@testset "Prevent off-diagonal aliasing in Tridiagonal" begin - e = ones(4) - f = e[1:end-1] - T = Tridiagonal(f, 2e, f) - T ./= 10 - @test all(==(0.1), f) -end - -@testset "Issue #26994 (and the empty case)" begin - T = SymTridiagonal([1.0],[3.0]) - x = ones(1) - @test T*x == ones(1) - @test SymTridiagonal(ones(0), ones(0)) * ones(0, 2) == ones(0, 2) -end - -@testset "Issue 29630" begin - function central_difference_discretization(N; dfunc = x -> 12x^2 - 2N^2, - dufunc = x -> N^2 + 4N*x, - dlfunc = x -> N^2 - 4N*x, - bfunc = x -> 114ℯ^-x * (1 + 3x), - b0 = 0, bf = 57/ℯ, - x0 = 0, xf = 1) - h = 1/N - d, du, dl, b = map(dfunc, (x0+h):h:(xf-h)), map(dufunc, (x0+h):h:(xf-2h)), - map(dlfunc, (x0+2h):h:(xf-h)), map(bfunc, (x0+h):h:(xf-h)) - b[1] -= dlfunc(x0)*b0 # subtract the boundary term - b[end] -= dufunc(xf)*bf # subtract the boundary term - Tridiagonal(dl, d, du), b - end - - A90, b90 = central_difference_discretization(90) - - @test A90\b90 ≈ inv(A90)*b90 -end - -@testset "singular values of SymTridiag" begin - @test svdvals(SymTridiagonal([-4,2,3], [0,0])) ≈ [4,3,2] - @test svdvals(SymTridiagonal(collect(0.:10.), zeros(10))) ≈ reverse(0:10) - @test svdvals(SymTridiagonal([1,2,1], [1,1])) ≈ [3,1,0] - # test that dependent methods such as `cond` also work - @test cond(SymTridiagonal([1,2,3], [0,0])) ≈ 3 -end - -@testset "sum, mapreduce" begin - T = Tridiagonal([1,2], [1,2,3], [7,8]) - Tdense = Matrix(T) - S = SymTridiagonal([1,2,3], [1,2]) - Sdense = Matrix(S) - @test sum(T) == 24 - @test sum(S) == 12 - @test_throws ArgumentError sum(T, dims=0) - @test sum(T, dims=1) == sum(Tdense, dims=1) - @test sum(T, dims=2) == sum(Tdense, dims=2) - @test sum(T, dims=3) == sum(Tdense, dims=3) - @test typeof(sum(T, dims=1)) == typeof(sum(Tdense, dims=1)) - @test mapreduce(one, min, T, dims=1) == mapreduce(one, min, Tdense, dims=1) - @test mapreduce(one, min, T, dims=2) == mapreduce(one, min, Tdense, dims=2) - @test mapreduce(one, min, T, dims=3) == mapreduce(one, min, Tdense, dims=3) - @test typeof(mapreduce(one, min, T, dims=1)) == typeof(mapreduce(one, min, Tdense, dims=1)) - @test mapreduce(zero, max, T, dims=1) == mapreduce(zero, max, Tdense, dims=1) - @test mapreduce(zero, max, T, dims=2) == mapreduce(zero, max, Tdense, dims=2) - @test mapreduce(zero, max, T, dims=3) == mapreduce(zero, max, Tdense, dims=3) - @test typeof(mapreduce(zero, max, T, dims=1)) == typeof(mapreduce(zero, max, Tdense, dims=1)) - @test_throws ArgumentError sum(S, dims=0) - @test sum(S, dims=1) == sum(Sdense, dims=1) - @test sum(S, dims=2) == sum(Sdense, dims=2) - @test sum(S, dims=3) == sum(Sdense, dims=3) - @test typeof(sum(S, dims=1)) == typeof(sum(Sdense, dims=1)) - @test mapreduce(one, min, S, dims=1) == mapreduce(one, min, Sdense, dims=1) - @test mapreduce(one, min, S, dims=2) == mapreduce(one, min, Sdense, dims=2) - @test mapreduce(one, min, S, dims=3) == mapreduce(one, min, Sdense, dims=3) - @test typeof(mapreduce(one, min, S, dims=1)) == typeof(mapreduce(one, min, Sdense, dims=1)) - @test mapreduce(zero, max, S, dims=1) == mapreduce(zero, max, Sdense, dims=1) - @test mapreduce(zero, max, S, dims=2) == mapreduce(zero, max, Sdense, dims=2) - @test mapreduce(zero, max, S, dims=3) == mapreduce(zero, max, Sdense, dims=3) - @test typeof(mapreduce(zero, max, S, dims=1)) == typeof(mapreduce(zero, max, Sdense, dims=1)) - - T = Tridiagonal(Int[], Int[], Int[]) - Tdense = Matrix(T) - S = SymTridiagonal(Int[], Int[]) - Sdense = Matrix(S) - @test sum(T) == 0 - @test sum(S) == 0 - @test_throws ArgumentError sum(T, dims=0) - @test sum(T, dims=1) == sum(Tdense, dims=1) - @test sum(T, dims=2) == sum(Tdense, dims=2) - @test sum(T, dims=3) == sum(Tdense, dims=3) - @test typeof(sum(T, dims=1)) == typeof(sum(Tdense, dims=1)) - @test_throws ArgumentError sum(S, dims=0) - @test sum(S, dims=1) == sum(Sdense, dims=1) - @test sum(S, dims=2) == sum(Sdense, dims=2) - @test sum(S, dims=3) == sum(Sdense, dims=3) - @test typeof(sum(S, dims=1)) == typeof(sum(Sdense, dims=1)) - - T = Tridiagonal(Int[], Int[2], Int[]) - Tdense = Matrix(T) - S = SymTridiagonal(Int[2], Int[]) - Sdense = Matrix(S) - @test sum(T) == 2 - @test sum(S) == 2 - @test_throws ArgumentError sum(T, dims=0) - @test sum(T, dims=1) == sum(Tdense, dims=1) - @test sum(T, dims=2) == sum(Tdense, dims=2) - @test sum(T, dims=3) == sum(Tdense, dims=3) - @test typeof(sum(T, dims=1)) == typeof(sum(Tdense, dims=1)) - @test_throws ArgumentError sum(S, dims=0) - @test sum(S, dims=1) == sum(Sdense, dims=1) - @test sum(S, dims=2) == sum(Sdense, dims=2) - @test sum(S, dims=3) == sum(Sdense, dims=3) - @test typeof(sum(S, dims=1)) == typeof(sum(Sdense, dims=1)) -end - -@testset "Issue #28994 (sum of Tridigonal and UniformScaling)" begin - dl = [1., 1.] - d = [-2., -2., -2.] - T = Tridiagonal(dl, d, dl) - S = SymTridiagonal(T) - - @test diag(T + 2I) == zero(d) - @test diag(S + 2I) == zero(d) -end - -@testset "convert Tridiagonal to SymTridiagonal error" begin - du = rand(Float64, 4) - d = rand(Float64, 5) - dl = rand(Float64, 4) - T = Tridiagonal(dl, d, du) - @test_throws ArgumentError SymTridiagonal{Float32}(T) -end - -# Issue #38765 -@testset "Eigendecomposition with different lengths" begin - # length(A.ev) can be either length(A.dv) or length(A.dv) - 1 - A = SymTridiagonal(fill(1.0, 3), fill(-1.0, 3)) - F = eigen(A) - A2 = SymTridiagonal(fill(1.0, 3), fill(-1.0, 2)) - F2 = eigen(A2) - test_approx_eq_modphase(F.vectors, F2.vectors) - @test F.values ≈ F2.values ≈ eigvals(A) ≈ eigvals(A2) - @test eigvecs(A) ≈ eigvecs(A2) - @test eigvecs(A, eigvals(A)[1:1]) ≈ eigvecs(A2, eigvals(A2)[1:1]) -end - -@testset "non-commutative algebra (#39701)" begin - for A in (SymTridiagonal(Quaternion.(randn(5), randn(5), randn(5), randn(5)), Quaternion.(randn(4), randn(4), randn(4), randn(4))), - Tridiagonal(Quaternion.(randn(4), randn(4), randn(4), randn(4)), Quaternion.(randn(5), randn(5), randn(5), randn(5)), Quaternion.(randn(4), randn(4), randn(4), randn(4)))) - c = Quaternion(1,2,3,4) - @test A * c ≈ Matrix(A) * c - @test A / c ≈ Matrix(A) / c - @test c * A ≈ c * Matrix(A) - @test c \ A ≈ c \ Matrix(A) - end -end - -@testset "adjoint of LDLt" begin - Sr = SymTridiagonal(randn(5), randn(4)) - Sc = SymTridiagonal(complex.(randn(5)) .+ 1im, complex.(randn(4), randn(4))) - b = ones(size(Sr, 1)) - - F = ldlt(Sr) - @test F\b == F'\b - - F = ldlt(Sc) - @test copy(Sc')\b == F'\b -end - -@testset "symmetric and hermitian tridiagonals" begin - A = [im 0; 0 -im] - @test issymmetric(A) - @test !ishermitian(A) - - # real - A = SymTridiagonal(randn(5), randn(4)) - @test issymmetric(A) - @test ishermitian(A) - - A = Tridiagonal(A.ev, A.dv, A.ev .+ 1) - @test !issymmetric(A) - @test !ishermitian(A) - - # complex - # https://github.com/JuliaLang/julia/pull/41037#discussion_r645524081 - S = SymTridiagonal(randn(5) .+ 0im, randn(5) .+ 0im) - S.ev[end] = im - @test issymmetric(S) - @test ishermitian(S) - - S = SymTridiagonal(randn(5) .+ 1im, randn(4) .+ 1im) - @test issymmetric(S) - @test !ishermitian(S) - - S = Tridiagonal(S.ev, S.dv, adjoint.(S.ev)) - @test !issymmetric(S) - @test !ishermitian(S) - - S = Tridiagonal(S.dl, real.(S.d) .+ 0im, S.du) - @test !issymmetric(S) - @test ishermitian(S) -end - -isdefined(Main, :ImmutableArrays) || @eval Main include(joinpath($(BASE_TEST_PATH), "testhelpers", "ImmutableArrays.jl")) -using .Main.ImmutableArrays - -@testset "Conversion to AbstractArray" begin - # tests corresponding to #34995 - v1 = ImmutableArray([1, 2]) - v2 = ImmutableArray([3, 4, 5]) - v3 = ImmutableArray([6, 7]) - T = Tridiagonal(v1, v2, v3) - Tsym = SymTridiagonal(v2, v1) - - @test convert(AbstractArray{Float64}, T)::Tridiagonal{Float64,ImmutableArray{Float64,1,Array{Float64,1}}} == T - @test convert(AbstractMatrix{Float64}, T)::Tridiagonal{Float64,ImmutableArray{Float64,1,Array{Float64,1}}} == T - @test convert(AbstractArray{Float64}, Tsym)::SymTridiagonal{Float64,ImmutableArray{Float64,1,Array{Float64,1}}} == Tsym - @test convert(AbstractMatrix{Float64}, Tsym)::SymTridiagonal{Float64,ImmutableArray{Float64,1,Array{Float64,1}}} == Tsym -end - -@testset "dot(x,A,y) for A::Tridiagonal or SymTridiagonal" begin - for elty in (Float32, Float64, ComplexF32, ComplexF64, Int) - x = fill(convert(elty, 1), 0) - T = Tridiagonal(x, x, x) - Tsym = SymTridiagonal(x, x) - @test dot(x, T, x) == 0.0 - @test dot(x, Tsym, x) == 0.0 - end -end - -isdefined(Main, :SizedArrays) || @eval Main include(joinpath($(BASE_TEST_PATH), "testhelpers", "SizedArrays.jl")) -using .Main.SizedArrays -@testset "non-number eltype" begin - @testset "sum for SymTridiagonal" begin - dv = [SizedArray{(2,2)}(rand(1:2048,2,2)) for i in 1:10] - ev = [SizedArray{(2,2)}(rand(1:2048,2,2)) for i in 1:10] - S = SymTridiagonal(dv, ev) - Sdense = Matrix(S) - @test Sdense == collect(S) - @test sum(S) == sum(Sdense) - @test sum(S, dims = 1) == sum(Sdense, dims = 1) - @test sum(S, dims = 2) == sum(Sdense, dims = 2) - end - @testset "issymmetric/ishermitian for Tridiagonal" begin - @test !issymmetric(Tridiagonal([[1 2;3 4]], [[1 2;2 3], [1 2;2 3]], [[1 2;3 4]])) - @test !issymmetric(Tridiagonal([[1 3;2 4]], [[1 2;3 4], [1 2;3 4]], [[1 2;3 4]])) - @test issymmetric(Tridiagonal([[1 3;2 4]], [[1 2;2 3], [1 2;2 3]], [[1 2;3 4]])) - - @test ishermitian(Tridiagonal([[1 3;2 4].+im], [[1 2;2 3].+0im, [1 2;2 3].+0im], [[1 2;3 4].-im])) - @test !ishermitian(Tridiagonal([[1 3;2 4].+im], [[1 2;2 3].+0im, [1 2;2 3].+0im], [[1 2;3 4].+im])) - @test !ishermitian(Tridiagonal([[1 3;2 4].+im], [[1 2;2 3].+im, [1 2;2 3].+0im], [[1 2;3 4].-im])) - end - @testset "== between Tridiagonal and SymTridiagonal" begin - dv = [SizedArray{(2,2)}([1 2;3 4]) for i in 1:4] - ev = [SizedArray{(2,2)}([3 4;1 2]) for i in 1:4] - S = SymTridiagonal(dv, ev) - Sdense = Matrix(S) - @test S == Tridiagonal(diag(Sdense, -1), diag(Sdense), diag(Sdense, 1)) == S - @test S !== Tridiagonal(diag(Sdense, 1), diag(Sdense), diag(Sdense, 1)) !== S - end -end - -@testset "copyto! between SymTridiagonal and Tridiagonal" begin - ev, dv = [1:4;], [1:5;] - S = SymTridiagonal(dv, ev) - T = Tridiagonal(zero(ev), zero(dv), zero(ev)) - @test copyto!(T, S) == S - @test copyto!(zero(S), T) == T - - ev2 = [1:5;] - S = SymTridiagonal(dv, ev2) - T = Tridiagonal(zeros(length(ev2)-1), zero(dv), zeros(length(ev2)-1)) - @test copyto!(T, S) == S - @test copyto!(zero(S), T) == T - - T2 = Tridiagonal(ones(length(ev)), zero(dv), zero(ev)) - @test_throws "cannot copy an asymmetric Tridiagonal matrix to a SymTridiagonal" copyto!(zero(S), T2) - - @testset "mismatched sizes" begin - dv2 = [4; @view dv[2:end]] - @test copyto!(S, SymTridiagonal([4], Int[])) == SymTridiagonal(dv2, ev) - @test copyto!(T, SymTridiagonal([4], Int[])) == Tridiagonal(ev, dv2, ev) - @test copyto!(S, Tridiagonal(Int[], [4], Int[])) == SymTridiagonal(dv2, ev) - @test copyto!(T, Tridiagonal(Int[], [4], Int[])) == Tridiagonal(ev, dv2, ev) - @test copyto!(S, SymTridiagonal(Int[], Int[])) == SymTridiagonal(dv, ev) - @test copyto!(T, SymTridiagonal(Int[], Int[])) == Tridiagonal(ev, dv, ev) - @test copyto!(S, Tridiagonal(Int[], Int[], Int[])) == SymTridiagonal(dv, ev) - @test copyto!(T, Tridiagonal(Int[], Int[], Int[])) == Tridiagonal(ev, dv, ev) - end -end - -@testset "copyto! with UniformScaling" begin - @testset "Tridiagonal" begin - @testset "Fill" begin - for len in (4, InfiniteArrays.Infinity()) - d = FillArrays.Fill(1, len) - ud = FillArrays.Fill(0, len-1) - T = Tridiagonal(ud, d, ud) - @test copyto!(T, I) === T - end - end - T = Tridiagonal(fill(3, 3), fill(2, 4), fill(3, 3)) - copyto!(T, I) - @test all(isone, diag(T)) - @test all(iszero, diag(T, 1)) - @test all(iszero, diag(T, -1)) - end - @testset "SymTridiagonal" begin - @testset "Fill" begin - for len in (4, InfiniteArrays.Infinity()) - d = FillArrays.Fill(1, len) - ud = FillArrays.Fill(0, len-1) - ST = SymTridiagonal(d, ud) - @test copyto!(ST, I) === ST - end - end - ST = SymTridiagonal(fill(2, 4), fill(3, 3)) - copyto!(ST, I) - @test all(isone, diag(ST)) - @test all(iszero, diag(ST, 1)) - @test all(iszero, diag(ST, -1)) - end -end - -@testset "custom axes" begin - dv, uv = OffsetArray(1:4), OffsetArray(1:3) - B = Tridiagonal(uv, dv, uv) - ax = axes(dv, 1) - @test axes(B) === (ax, ax) - B = SymTridiagonal(dv, uv) - @test axes(B) === (ax, ax) -end - -@testset "Reverse operation on Tridiagonal" begin - for n in 5:6 - d = randn(n) - dl = randn(n - 1) - du = randn(n - 1) - T = Tridiagonal(dl, d, du) - @test reverse(T, dims=1) == reverse(Matrix(T), dims=1) - @test reverse(T, dims=2) == reverse(Matrix(T), dims=2) - @test reverse(T)::Tridiagonal == reverse(Matrix(T)) == reverse!(copy(T)) - end -end - -@testset "Reverse operation on SymTridiagonal" begin - n = 5 - d = randn(n) - dl = randn(n - 1) - ST = SymTridiagonal(d, dl) - @test reverse(ST, dims=1) == reverse(Matrix(ST), dims=1) - @test reverse(ST, dims=2) == reverse(Matrix(ST), dims=2) - @test reverse(ST)::SymTridiagonal == reverse(Matrix(ST)) -end - -@testset "getindex with Integers" begin - dv, ev = 1:4, 1:3 - for S in (Tridiagonal(ev, dv, ev), SymTridiagonal(dv, ev)) - @test_throws "invalid index" S[3, true] - @test S[1,2] == S[Int8(1),UInt16(2)] == S[big(1), Int16(2)] - end -end - -@testset "rmul!/lmul! with banded matrices" begin - dl, d, du = rand(3), rand(4), rand(3) - A = Tridiagonal(dl, d, du) - D = Diagonal(d) - @test rmul!(copy(A), D) ≈ A * D - @test lmul!(D, copy(A)) ≈ D * A - - @testset "non-commutative" begin - S32 = SizedArrays.SizedArray{(3,2)}(rand(3,2)) - S33 = SizedArrays.SizedArray{(3,3)}(rand(3,3)) - S22 = SizedArrays.SizedArray{(2,2)}(rand(2,2)) - T = Tridiagonal(fill(S32,3), fill(S32, 4), fill(S32, 3)) - D = Diagonal(fill(S22, size(T,2))) - @test rmul!(copy(T), D) ≈ T * D - D = Diagonal(fill(S33, size(T,1))) - @test lmul!(D, copy(T)) ≈ D * T - end -end - -@testset "rmul!/lmul! with numbers" begin - for T in (SymTridiagonal(rand(4), rand(3)), Tridiagonal(rand(3), rand(4), rand(3))) - @test rmul!(copy(T), 0.2) ≈ rmul!(Array(T), 0.2) - @test lmul!(0.2, copy(T)) ≈ lmul!(0.2, Array(T)) - @test_throws ArgumentError rmul!(T, NaN) - @test_throws ArgumentError lmul!(NaN, T) - end - for T in (SymTridiagonal(rand(2), rand(1)), Tridiagonal(rand(1), rand(2), rand(1))) - @test all(isnan, rmul!(copy(T), NaN)) - @test all(isnan, lmul!(NaN, copy(T))) - end -end - -@testset "mul with empty arrays" begin - A = zeros(5,0) - T = Tridiagonal(zeros(0), zeros(0), zeros(0)) - TL = Tridiagonal(zeros(4), zeros(5), zeros(4)) - @test size(A * T) == size(A) - @test size(TL * A) == size(A) - @test size(T * T) == size(T) - C = similar(A) - @test mul!(C, A, T) == A * T - @test mul!(C, TL, A) == TL * A - @test mul!(similar(T), T, T) == T * T - @test mul!(similar(T, size(T)), T, T) == T * T - - v = zeros(size(T,2)) - @test size(T * v) == size(v) - @test mul!(similar(v), T, v) == T * v - - D = Diagonal(zeros(size(T,2))) - @test size(T * D) == size(D * T) == size(D) - @test mul!(similar(D), T, D) == mul!(similar(D), D, T) == T * D -end - -@testset "show" begin - T = Tridiagonal(1:3, 1:4, 1:3) - @test sprint(show, T) == "Tridiagonal(1:3, 1:4, 1:3)" - S = SymTridiagonal(1:4, 1:3) - @test sprint(show, S) == "SymTridiagonal(1:4, 1:3)" - - m = SizedArrays.SizedArray{(2,2)}(reshape([1:4;],2,2)) - T = Tridiagonal(fill(m,2), fill(m,3), fill(m,2)) - @test sprint(show, T) == "Tridiagonal($(repr(diag(T,-1))), $(repr(diag(T))), $(repr(diag(T,1))))" - S = SymTridiagonal(fill(m,3), fill(m,2)) - @test sprint(show, S) == "SymTridiagonal($(repr(diag(S))), $(repr(diag(S,1))))" -end - -@testset "mul for small matrices" begin - @testset for n in 0:6 - for T in ( - Tridiagonal(rand(max(n-1,0)), rand(n), rand(max(n-1,0))), - SymTridiagonal(rand(n), rand(max(n-1,0))), - ) - M = Matrix(T) - @test T * T ≈ M * M - @test mul!(similar(T, size(T)), T, T) ≈ M * M - @test mul!(ones(size(T)), T, T, 2, 4) ≈ M * M * 2 .+ 4 - - for m in 0:6 - AR = rand(n,m) - AL = rand(m,n) - @test AL * T ≈ AL * M - @test T * AR ≈ M * AR - @test mul!(similar(AL), AL, T) ≈ AL * M - @test mul!(similar(AR), T, AR) ≈ M * AR - @test mul!(ones(size(AL)), AL, T, 2, 4) ≈ AL * M * 2 .+ 4 - @test mul!(ones(size(AR)), T, AR, 2, 4) ≈ M * AR * 2 .+ 4 - end - - v = rand(n) - @test T * v ≈ M * v - @test mul!(similar(v), T, v) ≈ M * v - - D = Diagonal(rand(n)) - @test T * D ≈ M * D - @test D * T ≈ D * M - @test mul!(Tridiagonal(similar(T)), D, T) ≈ D * M - @test mul!(Tridiagonal(similar(T)), T, D) ≈ M * D - @test mul!(similar(T, size(T)), D, T) ≈ D * M - @test mul!(similar(T, size(T)), T, D) ≈ M * D - @test mul!(ones(size(T)), D, T, 2, 4) ≈ D * M * 2 .+ 4 - @test mul!(ones(size(T)), T, D, 2, 4) ≈ M * D * 2 .+ 4 - - for uplo in (:U, :L) - B = Bidiagonal(rand(n), rand(max(0, n-1)), uplo) - @test T * B ≈ M * B - @test B * T ≈ B * M - if n <= 2 - @test mul!(Tridiagonal(similar(T)), B, T) ≈ B * M - @test mul!(Tridiagonal(similar(T)), T, B) ≈ M * B - end - @test mul!(similar(T, size(T)), B, T) ≈ B * M - @test mul!(similar(T, size(T)), T, B) ≈ M * B - @test mul!(ones(size(T)), B, T, 2, 4) ≈ B * M * 2 .+ 4 - @test mul!(ones(size(T)), T, B, 2, 4) ≈ M * B * 2 .+ 4 - end - end - end - - n = 4 - arr = SizedArrays.SizedArray{(2,2)}(reshape([1:4;],2,2)) - for T in ( - SymTridiagonal(fill(arr,n), fill(arr,n-1)), - Tridiagonal(fill(arr,n-1), fill(arr,n), fill(arr,n-1)), - ) - @test T * T ≈ Matrix(T) * Matrix(T) - BL = Bidiagonal(fill(arr,n), fill(arr,n-1), :L) - BU = Bidiagonal(fill(arr,n), fill(arr,n-1), :U) - @test BL * T ≈ Matrix(BL) * Matrix(T) - @test BU * T ≈ Matrix(BU) * Matrix(T) - @test T * BL ≈ Matrix(T) * Matrix(BL) - @test T * BU ≈ Matrix(T) * Matrix(BU) - D = Diagonal(fill(arr,n)) - @test D * T ≈ Matrix(D) * Matrix(T) - @test T * D ≈ Matrix(T) * Matrix(D) - end -end - -@testset "diagview" begin - A = Tridiagonal(rand(3), rand(4), rand(3)) - for k in -5:5 - @test diagview(A,k) == diag(A,k) - end - v = diagview(A,1) - v .= 0 - @test all(iszero, diag(A,1)) -end - -end # module TestTridiagonal diff --git a/stdlib/LinearAlgebra/test/uniformscaling.jl b/stdlib/LinearAlgebra/test/uniformscaling.jl deleted file mode 100644 index 10d427d1dc6c4..0000000000000 --- a/stdlib/LinearAlgebra/test/uniformscaling.jl +++ /dev/null @@ -1,577 +0,0 @@ -# This file is a part of Julia. License is MIT: https://julialang.org/license - -module TestUniformscaling - -using Test, LinearAlgebra, Random - -const BASE_TEST_PATH = joinpath(Sys.BINDIR, "..", "share", "julia", "test") -isdefined(Main, :Quaternions) || @eval Main include(joinpath($(BASE_TEST_PATH), "testhelpers", "Quaternions.jl")) -using .Main.Quaternions -isdefined(Main, :OffsetArrays) || @eval Main include(joinpath($(BASE_TEST_PATH), "testhelpers", "OffsetArrays.jl")) -using .Main.OffsetArrays - -Random.seed!(1234543) - -@testset "basic functions" begin - @test I === I' # transpose - @test ndims(I) == 2 - @test one(UniformScaling{Float32}) == UniformScaling(one(Float32)) - @test zero(UniformScaling{Float32}) == UniformScaling(zero(Float32)) - @test eltype(one(UniformScaling{Float32})) == Float32 - @test zero(UniformScaling(rand(ComplexF64))) == zero(UniformScaling{ComplexF64}) - @test one(UniformScaling(rand(ComplexF64))) == one(UniformScaling{ComplexF64}) - @test eltype(one(UniformScaling(rand(ComplexF64)))) == ComplexF64 - @test -one(UniformScaling(2)) == UniformScaling(-1) - @test opnorm(UniformScaling(1+im)) ≈ sqrt(2) - @test convert(UniformScaling{Float64}, 2I) === 2.0I - @test float(2I) === 2.0*I -end - -@testset "getindex" begin - @test I[1,1] == I[CartesianIndex(1,1)] == 1 - @test I[1,2] == I[CartesianIndex(1,2)] == 0 - - J = I(15) - for (a, b) in [ - # indexing that returns a Vector - (1:10, 1), - (4, 1:10), - (11, 1:10), - # indexing that returns a Matrix - (1:2, 1:2), - (1:2:3, 1:2:3), - (1:2:8, 2:2:9), - (1:2:8, 9:-4:1), - (9:-4:1, 1:2:8), - (2:3, 1:2), - (2:-1:1, 1:2), - (1:2:9, 5:2:13), - (1, [1,2,5]), - (1, [1,10,5,2]), - (10, [10]), - ([1], 1), - ([15,1,5,2], 6), - ([2], [2]), - ([2,9,8,2,1], [2,8,4,3,1]), - ([8,3,5,3], 2:9), - ] - @test I[a,b] == J[a,b] - ndims(a) == 1 && @test I[OffsetArray(a,-10),b] == J[OffsetArray(a,-10),b] - ndims(b) == 1 && @test I[a,OffsetArray(b,-9)] == J[a,OffsetArray(b,-9)] - ndims(a) == ndims(b) == 1 && @test I[OffsetArray(a,-7),OffsetArray(b,-8)] == J[OffsetArray(a,-7),OffsetArray(b,-8)] - end -end - -@testset "sqrt, exp, log, and trigonometric functions" begin - # convert to a dense matrix with random size - M(J) = (N = rand(1:10); Matrix(J, N, N)) - - # on complex plane - J = UniformScaling(randn(ComplexF64)) - for f in ( exp, log, cis, - sqrt, - sin, cos, tan, - asin, acos, atan, - csc, sec, cot, - acsc, asec, acot, - sinh, cosh, tanh, - asinh, acosh, atanh, - csch, sech, coth, - acsch, asech, acoth ) - @test f(J) ≈ f(M(J)) - end - - for f in (sincos, sincosd) - @test all(splat(≈), zip(f(J), f(M(J)))) - end - - # on real axis - for (λ, fs) in ( - # functions defined for x ∈ ℝ - (()->randn(), (exp, - sin, cos, tan, - csc, sec, cot, - atan, acot, - sinh, cosh, tanh, - csch, sech, coth, - asinh, acsch)), - # functions defined for x ≥ 0 - (()->abs(randn()), (log, sqrt)), - # functions defined for -1 ≤ x ≤ 1 - (()->2rand()-1, (asin, acos, atanh)), - # functions defined for x ≤ -1 or x ≥ 1 - (()->1/(2rand()-1), (acsc, asec, acoth)), - # functions defined for 0 ≤ x ≤ 1 - (()->rand(), (asech,)), - # functions defined for x ≥ 1 - (()->1/rand(), (acosh,)) - ) - for f in fs - J = UniformScaling(λ()) - @test f(J) ≈ f(M(J)) - end - end -end - -@testset "conjugation of UniformScaling" begin - @test conj(UniformScaling(1))::UniformScaling{Int} == UniformScaling(1) - @test conj(UniformScaling(1.0))::UniformScaling{Float64} == UniformScaling(1.0) - @test conj(UniformScaling(1+1im))::UniformScaling{Complex{Int}} == UniformScaling(1-1im) - @test conj(UniformScaling(1.0+1.0im))::UniformScaling{ComplexF64} == UniformScaling(1.0-1.0im) -end - -@testset "isdiag, istriu, istril, issymmetric, ishermitian, isposdef, isapprox" begin - @test isdiag(I) - @test istriu(I) - @test istril(I) - @test issymmetric(I) - @test issymmetric(UniformScaling(complex(1.0,1.0))) - @test ishermitian(I) - @test !ishermitian(UniformScaling(complex(1.0,1.0))) - @test isposdef(UniformScaling(rand())) - @test !isposdef(UniformScaling(-rand())) - @test !isposdef(UniformScaling(randn(ComplexF64))) - @test !isposdef(UniformScaling(NaN)) - @test isposdef(I) - @test !isposdef(-I) - @test isposdef(UniformScaling(complex(1.0, 0.0))) - @test !isposdef(UniformScaling(complex(1.0, 1.0))) - @test UniformScaling(4.00000000000001) ≈ UniformScaling(4.0) - @test UniformScaling(4.32) ≈ UniformScaling(4.3) rtol=0.1 atol=0.01 - @test UniformScaling(4.32) ≈ 4.3 * [1 0; 0 1] rtol=0.1 atol=0.01 - @test UniformScaling(4.32) ≈ 4.3 * [1 0; 0 1] rtol=0.1 atol=0.01 norm=norm - @test 4.3 * [1 0; 0 1] ≈ UniformScaling(4.32) rtol=0.1 atol=0.01 - @test [4.3201 0.002;0.001 4.32009] ≈ UniformScaling(4.32) rtol=0.1 atol=0. - @test UniformScaling(4.32) ≉ fill(4.3,2,2) rtol=0.1 atol=0.01 - @test UniformScaling(4.32) ≈ 4.32 * [1 0; 0 1] -end - -@testset "arithmetic with Number" begin - α = rand() - @test α + I == α + 1 - @test I + α == α + 1 - @test α - I == α - 1 - @test I - α == 1 - α - @test α .* UniformScaling(1.0) == UniformScaling(1.0) .* α - @test UniformScaling(α)./α == UniformScaling(1.0) - @test α.\UniformScaling(α) == UniformScaling(1.0) - @test α * UniformScaling(1.0) == UniformScaling(1.0) * α - @test UniformScaling(α)/α == UniformScaling(1.0) - @test 2I//3 == (2//3)*I - @test (2I)^α == (2I).^α == (2^α)I - - β = rand() - @test (α*I)^2 == UniformScaling(α^2) - @test (α*I)^(-2) == UniformScaling(α^(-2)) - @test (α*I)^(.5) == UniformScaling(α^(.5)) - @test (α*I)^β == UniformScaling(α^β) - - @test (α * I) .^ 2 == UniformScaling(α^2) - @test (α * I) .^ β == UniformScaling(α^β) -end - -@testset "unary" begin - @test +I === +1*I - @test -I === -1*I -end - -@testset "tr, det and logdet" begin - for T in (Int, Float64, ComplexF64, Bool) - @test tr(UniformScaling(zero(T))) === zero(T) - end - @test_throws ArgumentError tr(UniformScaling(1)) - @test det(I) === true - @test det(1.0I) === 1.0 - @test det(0I) === 0 - @test det(0.0I) === 0.0 - @test logdet(I) == 0 - @test_throws ArgumentError det(2I) -end - -@test copy(UniformScaling(one(Float64))) == UniformScaling(one(Float64)) -@test sprint(show,MIME"text/plain"(),UniformScaling(one(ComplexF64))) == "$(LinearAlgebra.UniformScaling){ComplexF64}\n(1.0 + 0.0im)*I" -@test sprint(show,MIME"text/plain"(),UniformScaling(one(Float32))) == "$(LinearAlgebra.UniformScaling){Float32}\n1.0*I" -@test sprint(show,UniformScaling(one(ComplexF64))) == "$(LinearAlgebra.UniformScaling){ComplexF64}(1.0 + 0.0im)" -@test sprint(show,UniformScaling(one(Float32))) == "$(LinearAlgebra.UniformScaling){Float32}(1.0f0)" - -let - λ = complex(randn(),randn()) - J = UniformScaling(λ) - @testset "transpose, conj, inv, pinv, cond" begin - @test ndims(J) == 2 - @test transpose(J) == J - @test J * [1 0; 0 1] == conj(*(adjoint(J), [1 0; 0 1])) # ctranpose (and A(c)_mul_B) - @test I + I === UniformScaling(2) # + - @test inv(I) == I - @test inv(J) == UniformScaling(inv(λ)) - @test pinv(J) == UniformScaling(inv(λ)) - @test @inferred(pinv(0.0I)) == 0.0I - @test @inferred(pinv(0I)) == 0.0I - @test @inferred(pinv(false*I)) == 0.0I - @test @inferred(pinv(0im*I)) == 0im*I - @test cond(I) == 1 - @test cond(J) == (λ ≠ zero(λ) ? one(real(λ)) : oftype(real(λ), Inf)) - end - - @testset "real, imag, reim" begin - @test real(J) == UniformScaling(real(λ)) - @test imag(J) == UniformScaling(imag(λ)) - @test reim(J) == (UniformScaling(real(λ)), UniformScaling(imag(λ))) - end - - @testset "copyto!" begin - A = Matrix{Int}(undef, (3,3)) - @test copyto!(A, I) == one(A) - B = Matrix{ComplexF64}(undef, (1,2)) - @test copyto!(B, J) == [λ zero(λ)] - end - - @testset "copy!" begin - A = Matrix{Int}(undef, (3,3)) - @test copy!(A, I) == one(A) - B = Matrix{ComplexF64}(undef, (1,2)) - @test copy!(B, J) == [λ zero(λ)] - end - - @testset "binary ops with vectors" begin - v = complex.(randn(3), randn(3)) - # As shown in #20423@GitHub, vector acts like x1 matrix when participating in linear algebra - @test v * J ≈ v * λ - @test v' * J ≈ v' * λ - @test J * v ≈ λ * v - @test J * v' ≈ λ * v' - @test v / J ≈ v / λ - @test v' / J ≈ v' / λ - @test J \ v ≈ λ \ v - @test J \ v' ≈ λ \ v' - end - - @testset "binary ops with matrices" begin - B = bitrand(2, 2) - @test B + I == B + Matrix(I, size(B)) - @test I + B == B + Matrix(I, size(B)) - AA = randn(2, 2) - for A in (AA, view(AA, 1:2, 1:2)) - I22 = Matrix(I, size(A)) - @test @inferred(A + I) == A + I22 - @test @inferred(I + A) == A + I22 - @test @inferred(I - I) === UniformScaling(0) - @test @inferred(B - I) == B - I22 - @test @inferred(I - B) == I22 - B - @test @inferred(A - I) == A - I22 - @test @inferred(I - A) == I22 - A - @test @inferred(I*J) === UniformScaling(λ) - @test @inferred(B*J) == B*λ - @test @inferred(J*B) == B*λ - @test @inferred(I*A) !== A # Don't alias - @test @inferred(A*I) !== A # Don't alias - - @test @inferred(A*J) == A*λ - @test @inferred(J*A) == A*λ - @test @inferred(J*fill(1, 3)) == fill(λ, 3) - @test @inferred(λ*J) === UniformScaling(λ*J.λ) - @test @inferred(J*λ) === UniformScaling(λ*J.λ) - @test @inferred(J/I) === J - @test @inferred(I/A) == inv(A) - @test @inferred(A/I) == A - @test @inferred(I/λ) === UniformScaling(1/λ) - @test @inferred(I\J) === J - - if isa(A, Array) - T = LowerTriangular(randn(3,3)) - else - T = LowerTriangular(view(randn(3,3), 1:3, 1:3)) - end - @test @inferred(T + J) == Array(T) + J - @test @inferred(J + T) == J + Array(T) - @test @inferred(T - J) == Array(T) - J - @test @inferred(J - T) == J - Array(T) - @test @inferred(T\I) == inv(T) - - if isa(A, Array) - T = LinearAlgebra.UnitLowerTriangular(randn(3,3)) - else - T = LinearAlgebra.UnitLowerTriangular(view(randn(3,3), 1:3, 1:3)) - end - @test @inferred(T + J) == Array(T) + J - @test @inferred(J + T) == J + Array(T) - @test @inferred(T - J) == Array(T) - J - @test @inferred(J - T) == J - Array(T) - @test @inferred(T\I) == inv(T) - - if isa(A, Array) - T = UpperTriangular(randn(3,3)) - else - T = UpperTriangular(view(randn(3,3), 1:3, 1:3)) - end - @test @inferred(T + J) == Array(T) + J - @test @inferred(J + T) == J + Array(T) - @test @inferred(T - J) == Array(T) - J - @test @inferred(J - T) == J - Array(T) - @test @inferred(T\I) == inv(T) - - if isa(A, Array) - T = LinearAlgebra.UnitUpperTriangular(randn(3,3)) - else - T = LinearAlgebra.UnitUpperTriangular(view(randn(3,3), 1:3, 1:3)) - end - @test @inferred(T + J) == Array(T) + J - @test @inferred(J + T) == J + Array(T) - @test @inferred(T - J) == Array(T) - J - @test @inferred(J - T) == J - Array(T) - @test @inferred(T\I) == inv(T) - - for elty in (Float64, ComplexF64) - if isa(A, Array) - T = Hermitian(randn(elty, 3,3)) - else - T = Hermitian(view(randn(elty, 3,3), 1:3, 1:3)) - end - @test @inferred(T + J) == Array(T) + J - @test @inferred(J + T) == J + Array(T) - @test @inferred(T - J) == Array(T) - J - @test @inferred(J - T) == J - Array(T) - end - - @test @inferred(I\A) == A - @test @inferred(A\I) == inv(A) - @test @inferred(λ\I) === UniformScaling(1/λ) - end - end -end - -@testset "hcat and vcat" begin - @test_throws ArgumentError hcat(I) - @test_throws ArgumentError [I I] - @test_throws ArgumentError vcat(I) - @test_throws ArgumentError [I; I] - @test_throws ArgumentError [I I; I] - - A = rand(3,4) - B = rand(3,3) - C = rand(0,3) - D = rand(2,0) - E = rand(1,3) - F = rand(3,1) - α = rand() - @test (hcat(A, 2I))::Matrix == hcat(A, Matrix(2I, 3, 3)) - @test (hcat(E, α))::Matrix == hcat(E, [α]) - @test (hcat(E, α, 2I))::Matrix == hcat(E, [α], fill(2, 1, 1)) - @test (vcat(A, 2I))::Matrix == vcat(A, Matrix(2I, 4, 4)) - @test (vcat(F, α))::Matrix == vcat(F, [α]) - @test (vcat(F, α, 2I))::Matrix == vcat(F, [α], fill(2, 1, 1)) - @test (hcat(C, 2I))::Matrix == C - @test_throws DimensionMismatch hcat(C, α) - @test (vcat(D, 2I))::Matrix == D - @test_throws DimensionMismatch vcat(D, α) - @test (hcat(I, 3I, A, 2I))::Matrix == hcat(Matrix(I, 3, 3), Matrix(3I, 3, 3), A, Matrix(2I, 3, 3)) - @test (vcat(I, 3I, A, 2I))::Matrix == vcat(Matrix(I, 4, 4), Matrix(3I, 4, 4), A, Matrix(2I, 4, 4)) - @test (hvcat((2,1,2), B, 2I, I, 3I, 4I))::Matrix == - hvcat((2,1,2), B, Matrix(2I, 3, 3), Matrix(I, 6, 6), Matrix(3I, 3, 3), Matrix(4I, 3, 3)) - @test hvcat((3,1), C, C, I, 3I)::Matrix == hvcat((2,1), C, C, Matrix(3I, 6,6)) - @test hvcat((2,2,2), I, 2I, 3I, 4I, C, C)::Matrix == - hvcat((2,2,2), Matrix(I, 3, 3), Matrix(2I, 3,3 ), Matrix(3I, 3,3), Matrix(4I, 3,3), C, C) - @test hvcat((2,2,4), C, C, I, 2I, 3I, 4I, 5I, D)::Matrix == - hvcat((2,2,4), C, C, Matrix(I, 3, 3), Matrix(2I,3,3), - Matrix(3I, 2, 2), Matrix(4I, 2, 2), Matrix(5I,2,2), D) - @test (hvcat((2,3,2), B, 2I, C, C, I, 3I, 4I))::Matrix == - hvcat((2,2,2), B, Matrix(2I, 3, 3), C, C, Matrix(3I, 3, 3), Matrix(4I, 3, 3)) - @test hvcat((3,2,1), C, C, I, B ,3I, 2I)::Matrix == - hvcat((2,2,1), C, C, B, Matrix(3I,3,3), Matrix(2I,6,6)) - @test (hvcat((1,2), A, E, α))::Matrix == hvcat((1,2), A, E, [α]) == hvcat((1,2), A, E, α*I) - @test (hvcat((2,2), α, E, F, 3I))::Matrix == hvcat((2,2), [α], E, F, Matrix(3I, 3, 3)) - @test (hvcat((2,2), 3I, F, E, α))::Matrix == hvcat((2,2), Matrix(3I, 3, 3), F, E, [α]) -end - -@testset "Matrix/Array construction from UniformScaling" begin - I2_33 = [2 0 0; 0 2 0; 0 0 2] - I2_34 = [2 0 0 0; 0 2 0 0; 0 0 2 0] - I2_43 = [2 0 0; 0 2 0; 0 0 2; 0 0 0] - for ArrType in (Matrix, Array) - @test ArrType(2I, 3, 3)::Matrix{Int} == I2_33 - @test ArrType(2I, 3, 4)::Matrix{Int} == I2_34 - @test ArrType(2I, 4, 3)::Matrix{Int} == I2_43 - @test ArrType(2.0I, 3, 3)::Matrix{Float64} == I2_33 - @test ArrType{Real}(2I, 3, 3)::Matrix{Real} == I2_33 - @test ArrType{Float64}(2I, 3, 3)::Matrix{Float64} == I2_33 - end -end - -@testset "Diagonal construction from UniformScaling" begin - @test Diagonal(2I, 3)::Diagonal{Int} == Matrix(2I, 3, 3) - @test Diagonal(2.0I, 3)::Diagonal{Float64} == Matrix(2I, 3, 3) - @test Diagonal{Real}(2I, 3)::Diagonal{Real} == Matrix(2I, 3, 3) - @test Diagonal{Float64}(2I, 3)::Diagonal{Float64} == Matrix(2I, 3, 3) -end - -@testset "equality comparison of matrices with UniformScaling" begin - # AbstractMatrix methods - diagI = Diagonal(fill(1, 3)) - rdiagI = view(diagI, 1:2, 1:3) - bidiag = Bidiagonal(fill(2, 3), fill(2, 2), :U) - @test diagI == I == diagI # test isone(I) path / equality - @test 2diagI != I != 2diagI # test isone(I) path / inequality - @test 0diagI == 0I == 0diagI # test iszero(I) path / equality - @test 2diagI != 0I != 2diagI # test iszero(I) path / inequality - @test 2diagI == 2I == 2diagI # test generic path / equality - @test 0diagI != 2I != 0diagI # test generic path / inequality on diag - @test bidiag != 2I != bidiag # test generic path / inequality off diag - @test rdiagI != I != rdiagI # test square matrix check - # StridedMatrix specialization - denseI = [1 0 0; 0 1 0; 0 0 1] - rdenseI = [1 0 0 0; 0 1 0 0; 0 0 1 0] - alltwos = fill(2, (3, 3)) - @test denseI == I == denseI # test isone(I) path / equality - @test 2denseI != I != 2denseI # test isone(I) path / inequality - @test 0denseI == 0I == 0denseI # test iszero(I) path / equality - @test 2denseI != 0I != 2denseI # test iszero(I) path / inequality - @test 2denseI == 2I == 2denseI # test generic path / equality - @test 0denseI != 2I != 0denseI # test generic path / inequality on diag - @test alltwos != 2I != alltwos # test generic path / inequality off diag - @test rdenseI != I != rdenseI # test square matrix check - - # isequal - @test !isequal(I, I(3)) - @test !isequal(I(1), I) - @test !isequal([1], I) - @test isequal(I, 1I) - @test !isequal(2I, 3I) -end - -@testset "operations involving I should preserve eltype" begin - @test isa(Int8(1) + I, Int8) - @test isa(Float16(1) + I, Float16) - @test eltype(Int8(1)I) == Int8 - @test eltype(Float16(1)I) == Float16 - @test eltype(fill(Int8(1), 2, 2)I) == Int8 - @test eltype(fill(Float16(1), 2, 2)I) == Float16 - @test eltype(fill(Int8(1), 2, 2) + I) == Int8 - @test eltype(fill(Float16(1), 2, 2) + I) == Float16 -end - -@testset "test that UniformScaling is applied correctly for matrices of matrices" begin - LL = Bidiagonal(fill(0*I, 3), fill(1*I, 2), :L) - @test (I - LL')\[[0], [0], [1]] == (I - LL)'\[[0], [0], [1]] == fill([1], 3) -end - -# Ensure broadcasting of I is an error (could be made to work in the future) -@testset "broadcasting of I (#23197)" begin - @test_throws MethodError I .+ 1 - @test_throws MethodError I .+ [1 1; 1 1] -end - -@testset "in-place mul! and div! methods" begin - J = randn()*I - A = randn(4, 3) - C = similar(A) - target_mul = J * A - target_div = A / J - @test mul!(C, J, A) == target_mul - @test mul!(C, A, J) == target_mul - @test lmul!(J, copyto!(C, A)) == target_mul - @test rmul!(copyto!(C, A), J) == target_mul - @test ldiv!(J, copyto!(C, A)) == target_div - @test ldiv!(C, J, A) == target_div - @test rdiv!(copyto!(C, A), J) == target_div - - A = randn(4, 3) - C = randn!(similar(A)) - alpha = randn() - beta = randn() - target = J * A * alpha + C * beta - @test mul!(copy(C), J, A, alpha, beta) ≈ target - @test mul!(copy(C), A, J, alpha, beta) ≈ target - - a = randn() - C = randn(3, 3) - target_5mul = a*alpha*J + beta*C - @test mul!(copy(C), a, J, alpha, beta) ≈ target_5mul - @test mul!(copy(C), J, a, alpha, beta) ≈ target_5mul - target_5mul = beta*C # alpha = 0 - @test mul!(copy(C), a, J, 0, beta) ≈ target_5mul - target_5mul = a*alpha*Matrix(J, 3, 3) # beta = 0 - @test mul!(copy(C), a, J, alpha, 0) ≈ target_5mul - -end - -@testset "Construct Diagonal from UniformScaling" begin - @test size(I(3)) === (3,3) - @test I(3) isa Diagonal - @test I(3) == [1 0 0; 0 1 0; 0 0 1] -end - -@testset "dot" begin - A = randn(3, 3) - λ = randn() - J = UniformScaling(λ) - @test dot(A, J) ≈ dot(J, A) - @test dot(A, J) ≈ tr(A' * J) - - A = rand(ComplexF64, 3, 3) - λ = randn() + im * randn() - J = UniformScaling(λ) - @test dot(A, J) ≈ conj(dot(J, A)) - @test dot(A, J) ≈ tr(A' * J) -end - -@testset "generalized dot" begin - x = rand(-10:10, 3) - y = rand(-10:10, 3) - λ = rand(-10:10) - J = UniformScaling(λ) - @test dot(x, J, y) == λ*dot(x, y) - λ = Quaternion(0.44567, 0.755871, 0.882548, 0.423612) - x, y = Quaternion(rand(4)...), Quaternion(rand(4)...) - @test dot([x], λ*I, [y]) ≈ dot(x, λ, y) ≈ dot(x, λ*y) -end - -@testset "Factorization solutions" begin - J = complex(randn(),randn()) * I - qrp = A -> qr(A, ColumnNorm()) - - # thin matrices - X = randn(3,2) - Z = pinv(X) - for fac in (qr,qrp,svd) - F = fac(X) - @test @inferred(F \ I) ≈ Z - @test @inferred(F \ J) ≈ Z * J - end - - # square matrices - X = randn(3,3) - X = X'X + rand()I # make positive definite for cholesky - Z = pinv(X) - for fac in (bunchkaufman,cholesky,lu,qr,qrp,svd) - F = fac(X) - @test @inferred(F \ I) ≈ Z - @test @inferred(F \ J) ≈ Z * J - end - - # fat matrices - only rank-revealing variants - X = randn(2,3) - Z = pinv(X) - for fac in (qrp,svd) - F = fac(X) - @test @inferred(F \ I) ≈ Z - @test @inferred(F \ J) ≈ Z * J - end -end - -@testset "offset arrays" begin - A = OffsetArray(zeros(4,4), -1:2, 0:3) - @test sum(I + A) ≈ 3.0 - @test sum(A + I) ≈ 3.0 - @test sum(I - A) ≈ 3.0 - @test sum(A - I) ≈ -3.0 -end - -@testset "type promotion when dividing UniformScaling by matrix" begin - A = randn(5,5) - cA = complex(A) - J = (5+2im)*I - @test J/A ≈ J/cA - @test A\J ≈ cA\J -end - -end # module TestUniformscaling diff --git a/stdlib/Makefile b/stdlib/Makefile index aacf7ca30e146..a10503a3566c6 100644 --- a/stdlib/Makefile +++ b/stdlib/Makefile @@ -40,14 +40,14 @@ endef $(foreach jll,$(JLLS),$(eval $(call download-artifacts-toml,$(jll)))) STDLIBS = Artifacts Base64 CRC32c Dates FileWatching \ - Future InteractiveUtils Libdl LibGit2 LinearAlgebra Logging \ + Future InteractiveUtils Libdl LibGit2 Logging \ Markdown Mmap Printf Profile Random REPL Serialization \ SharedArrays Sockets Test TOML Unicode UUIDs \ $(JLL_NAMES) STDLIBS_EXT = Pkg Statistics LazyArtifacts LibCURL DelimitedFiles Downloads ArgTools \ Tar NetworkOptions SuiteSparse SparseArrays StyledStrings SHA Distributed \ - JuliaSyntaxHighlighting + JuliaSyntaxHighlighting LinearAlgebra $(foreach module, $(STDLIBS_EXT), $(eval $(call stdlib-external,$(module),$(shell echo $(module) | tr a-z A-Z)))) From 81568a6dc1bc9bf27feed4f36460c60fcf2f4fa8 Mon Sep 17 00:00:00 2001 From: Zentrik Date: Mon, 25 Nov 2024 13:17:51 +0000 Subject: [PATCH 502/537] Bump LLVMLibUnwind to v19.1.4 (#56674) --- deps/checksums/llvm | 70 +++++++-------- deps/llvmunwind.version | 2 +- deps/patches/llvm-libunwind-force-dwarf.patch | 87 ++++++++++--------- .../llvm-libunwind-prologue-epilogue.patch | 2 +- deps/unwind.mk | 16 +++- stdlib/LLVMLibUnwind_jll/Project.toml | 2 +- 6 files changed, 95 insertions(+), 84 deletions(-) diff --git a/deps/checksums/llvm b/deps/checksums/llvm index 1b375e6e72c5d..fbbb34480d893 100644 --- a/deps/checksums/llvm +++ b/deps/checksums/llvm @@ -110,38 +110,40 @@ LLVM.v18.1.7+3.x86_64-w64-mingw32-cxx11-llvm_version+18.asserts.tar.gz/md5/0a4ce LLVM.v18.1.7+3.x86_64-w64-mingw32-cxx11-llvm_version+18.asserts.tar.gz/sha512/7fd5c69bfde6264ae4e548ec9c399dd09b1a5fe4b9cced23d6bc4257f0f67874b838d53ee8d6eef7fc01ee9d086758e06f00bb0a0388b97de2eb85143a47192a LLVM.v18.1.7+3.x86_64-w64-mingw32-cxx11-llvm_version+18.tar.gz/md5/da2430483844823d31bcc5f302252ac2 LLVM.v18.1.7+3.x86_64-w64-mingw32-cxx11-llvm_version+18.tar.gz/sha512/19e9168b44d40acdc0d924e16f93c315237207a4441ae78997c511135872e557f654236bc859453069671145e81e961ac93c9dfa601d1b6631b9ccfa09b929b3 -LLVMLibUnwind.v14.0.6+0.aarch64-apple-darwin.tar.gz/md5/d8584e0e3dc26ea7404d3719cea9e233 -LLVMLibUnwind.v14.0.6+0.aarch64-apple-darwin.tar.gz/sha512/7a0396eaace91b9b4d013c209605d468a7ff9b99ede9fdd57602539a6fa6f3ea84a440f32840056a1234df3ef1896739ea0820fee72b4f208096c553fc54adb9 -LLVMLibUnwind.v14.0.6+0.aarch64-linux-gnu.tar.gz/md5/d6edea561b61173d05aa79936e49f6b7 -LLVMLibUnwind.v14.0.6+0.aarch64-linux-gnu.tar.gz/sha512/9fbe29ec6a33c719bc9a4dd19911ceded9622269c042192d339a6cf45aa8209ad64c424167c094ca01293438af5930f091acba0538b3fe640a746297f5cc8cb3 -LLVMLibUnwind.v14.0.6+0.aarch64-linux-musl.tar.gz/md5/3ec68c87e4bddd024ee0ca6adc2b3b96 -LLVMLibUnwind.v14.0.6+0.aarch64-linux-musl.tar.gz/sha512/be3cd9d5510c2693dee1494c36c479d32311ff83f5b2d31c08508a3dd370788961ce46e9025afe148a0febd05942fd294370a357dd717bee353d8a108617f6de -LLVMLibUnwind.v14.0.6+0.armv6l-linux-gnueabihf.tar.gz/md5/8ca5a926d69124225d485d679232a54f -LLVMLibUnwind.v14.0.6+0.armv6l-linux-gnueabihf.tar.gz/sha512/353f540b342bc54877e7a41fe65c9eeac525fd91bf4cddbe1b3ec2ed93c3751beaf8316a4d31530502b067100b160301262e10cbe4407db3abf1ceb5d9a74eb2 -LLVMLibUnwind.v14.0.6+0.armv6l-linux-musleabihf.tar.gz/md5/4e5b576958f2a2e708eb5918ceef0de0 -LLVMLibUnwind.v14.0.6+0.armv6l-linux-musleabihf.tar.gz/sha512/2e98c472d3ee25c2e062efa4eb21ac9cfc49b26ea9d99ad4a8e7660c4c09f121d31193bd161f54ea332ce94785d601897311e9e6668adb1e25e2b666e0d5bb3f -LLVMLibUnwind.v14.0.6+0.armv7l-linux-gnueabihf.tar.gz/md5/1c81a886e799663ce8d04400c5b516a9 -LLVMLibUnwind.v14.0.6+0.armv7l-linux-gnueabihf.tar.gz/sha512/236b78b9a17eaae74ab07349ac8dde16c3abbd48e0d075abd1c195d60efff48e2fbf799554df114ea3d3dba937e0369430a2788bde2a1201126e026ef6cdac42 -LLVMLibUnwind.v14.0.6+0.armv7l-linux-musleabihf.tar.gz/md5/0371f43ebcb571d0a635739252b88986 -LLVMLibUnwind.v14.0.6+0.armv7l-linux-musleabihf.tar.gz/sha512/605318ae3737e26ff89d6291311a7db3bc3ec7c8d1f2e72ae40fd3d9df0754ee2ebfb77687122605f26d76d62effb85157bc39982814920d5af46c124e71a5ff -LLVMLibUnwind.v14.0.6+0.i686-linux-gnu.tar.gz/md5/cd3f1cdf404b6102754ced4bd3a890f6 -LLVMLibUnwind.v14.0.6+0.i686-linux-gnu.tar.gz/sha512/65fe2c5b1e04da1e1d8111a0b0083fa0fa9447eaea7af7a018c09fe6d5506566c491bbad296a7be8c488ca3495016ae16a6879d69f057f8866d94910147dee03 -LLVMLibUnwind.v14.0.6+0.i686-linux-musl.tar.gz/md5/abac9b416d2ba5abcf5ce849f43ffa96 -LLVMLibUnwind.v14.0.6+0.i686-linux-musl.tar.gz/sha512/fed677ed6f103c56eb9dd4578fa37a56ed2a4bc803aa1997c5af19762a623d2f82db1f72f429448d66fcef3b37af2104e6cb782f023aaabef086a921a862b042 -LLVMLibUnwind.v14.0.6+0.i686-w64-mingw32.tar.gz/md5/4c71ffd7c8cabb1c0ed6290b193883c5 -LLVMLibUnwind.v14.0.6+0.i686-w64-mingw32.tar.gz/sha512/6b1421a3268170467225112167cdb33fec962181993a2dad5594d4ee0623ac88ee0588cdc7d0656dc1cb9129ef96f621a97a224731cd161134d7d63c8fd32c16 -LLVMLibUnwind.v14.0.6+0.powerpc64le-linux-gnu.tar.gz/md5/06faf505f0dc354afcd01113cfc57af2 -LLVMLibUnwind.v14.0.6+0.powerpc64le-linux-gnu.tar.gz/sha512/1f9dfbd403e2ce121e126c217baede178cb1323012bb5e3cd1f778ff51e4216aed9dd69036e2baffbd60a6f5ae438ddaba6c13809459e94bb00be3f7bfc8c30e -LLVMLibUnwind.v14.0.6+0.x86_64-apple-darwin.tar.gz/md5/516a11d99306e3f214968a7951b07a06 -LLVMLibUnwind.v14.0.6+0.x86_64-apple-darwin.tar.gz/sha512/885738599bbd96f20083f9b9368ce3f243bd5868d3ac9a45189de6cb40b6664a6dcdaece159989e504670231db8c2addfa8d544003eb0cdabba960e4ab6a4470 -LLVMLibUnwind.v14.0.6+0.x86_64-linux-gnu.tar.gz/md5/d851b90ea3f9664774316169fc494e21 -LLVMLibUnwind.v14.0.6+0.x86_64-linux-gnu.tar.gz/sha512/a1f529454f0881baaa508481ba97ecffb040fa92141b4cbc72278adcf8b84f0766fa918aea7fb99ce690c4fd80c36fec365987625db42f4e7bb36ad24ce177d0 -LLVMLibUnwind.v14.0.6+0.x86_64-linux-musl.tar.gz/md5/dc4e86eb2effe1f6cb0d0ceda635f226 -LLVMLibUnwind.v14.0.6+0.x86_64-linux-musl.tar.gz/sha512/c52de384853890f9df81aa9e422c1ba3fde12b2ae9c7b60b9ecdc6d0c88eab495dd336af2b6cd2c31d6eddcd0a213954eadbc7884bc39ce2039cec672eac32fe -LLVMLibUnwind.v14.0.6+0.x86_64-unknown-freebsd.tar.gz/md5/8477e3624c73a820d8ab82a53e1e10fa -LLVMLibUnwind.v14.0.6+0.x86_64-unknown-freebsd.tar.gz/sha512/32ce031245a5b59a779cd77fa3c9bf05ee59e48c913b75d4964bea49f37da232c59a42ad993f7b5edc88322148c1d7394984349682bfce3b69d33a51756ac8e3 -LLVMLibUnwind.v14.0.6+0.x86_64-w64-mingw32.tar.gz/md5/7be93eccbdb0aff427c43af651073d66 -LLVMLibUnwind.v14.0.6+0.x86_64-w64-mingw32.tar.gz/sha512/89a61a81ec664c72107ac09e717200b00434350bf77064267180bc0c101a59e0ee8c8af4dd6fe75eacdeb14e82743c138b2fc558ca08550d8796b8db93f89da4 +LLVMLibUnwind.v19.1.4+0.aarch64-apple-darwin.tar.gz/md5/aace388fc1ece82ea524c582506ae931 +LLVMLibUnwind.v19.1.4+0.aarch64-apple-darwin.tar.gz/sha512/c0211340a05630bcfcf9e3bab97da3e9f07e596e8d391427fa919c99502ab0a09878eda379254f379511884347f7e742872e8589f9b6ccbc2d126a5dfe0a350f +LLVMLibUnwind.v19.1.4+0.aarch64-linux-gnu.tar.gz/md5/942d0b4ffb8bfd743cdafebf5bdfdbb3 +LLVMLibUnwind.v19.1.4+0.aarch64-linux-gnu.tar.gz/sha512/ec68df054c6694d17cb7f5c389adc4b8b855023f9ca03713d21f1f0c58de2b90166a9f3981b81da5f817f6b09f85fb11e85732d6c78f1d115d6aecf326dc20a1 +LLVMLibUnwind.v19.1.4+0.aarch64-linux-musl.tar.gz/md5/2c27d3c130f54e38e6639ebf7095f743 +LLVMLibUnwind.v19.1.4+0.aarch64-linux-musl.tar.gz/sha512/d348cc1f87927a3d36cd3f2587cf4161dbdc9f3555900ee338857d806384c0cff8fbe67bef97cad0d3098cc8c7f149aac699f3defe87db70fffcc94d681810b6 +LLVMLibUnwind.v19.1.4+0.aarch64-unknown-freebsd.tar.gz/md5/6bb1466d45159193407f27201a443ddc +LLVMLibUnwind.v19.1.4+0.aarch64-unknown-freebsd.tar.gz/sha512/da6da450e6fba5d501be13d83bc9133796b92e1b3a6cc7cb97470cc7476a369fcd8ddbc9267f03fa4cbe1f2484359eeb70fb629b26c9a1d7ea0065c5a671e1b9 +LLVMLibUnwind.v19.1.4+0.armv6l-linux-gnueabihf.tar.gz/md5/2cdf57d34b1db677498dfc5d89501599 +LLVMLibUnwind.v19.1.4+0.armv6l-linux-gnueabihf.tar.gz/sha512/217c15e1bfdc72014dd26321eb46ae9cfadb7839c693caf3c974989ee2036781cf7e62bb7175766f5171bf32de53a95598ef463c70a0ac64ec012ca9bc19e6df +LLVMLibUnwind.v19.1.4+0.armv6l-linux-musleabihf.tar.gz/md5/110c80b549d1f80faa36a3e0b39a11b4 +LLVMLibUnwind.v19.1.4+0.armv6l-linux-musleabihf.tar.gz/sha512/b9151aaaaae4adf5da5701ee5962d712def509f85101dae485b905f73391d8658b5a0a58ea1a4c68cc3bc68d7e17d557c05c98d33d907cdb512513ffff75765b +LLVMLibUnwind.v19.1.4+0.armv7l-linux-gnueabihf.tar.gz/md5/bf50011ce9e4c82d49e61e868b27ea23 +LLVMLibUnwind.v19.1.4+0.armv7l-linux-gnueabihf.tar.gz/sha512/d08faae71010e4a7d25a16374249ff1740ed7883e260e544e4fb0f0d3758d2eb76fea93433cb1987850f54f1ae6528b6336fc2e1db9b46f49defd870e97f8a94 +LLVMLibUnwind.v19.1.4+0.armv7l-linux-musleabihf.tar.gz/md5/142118a84c1b959b0b202d51072168f9 +LLVMLibUnwind.v19.1.4+0.armv7l-linux-musleabihf.tar.gz/sha512/71ac937417f5f2226b8952c925fff94b553de8a29fc45fee6c0fef53a9cf8c07979c60408c8efcf827b260bc3a287059aefa24e050393f2e09b65af45b60d07f +LLVMLibUnwind.v19.1.4+0.i686-linux-gnu.tar.gz/md5/1bcd011ba209cc840647c684dcad9631 +LLVMLibUnwind.v19.1.4+0.i686-linux-gnu.tar.gz/sha512/8309c3d82d0a94c4c7a8b72720702f5cb0c97f316492217f1eebfc0dc33b4e9c7c8af5c6ee3700ea0c1cc0fd66c90a52389c2aaaaeb67f6278e53e33a476abc1 +LLVMLibUnwind.v19.1.4+0.i686-linux-musl.tar.gz/md5/8db27a7ab4a23febfd6a8eb2f65cd611 +LLVMLibUnwind.v19.1.4+0.i686-linux-musl.tar.gz/sha512/dc7839d2c9a258b122985eb35096e0000561598c54fbd1c5f269921146e6e85589c6f60a0fb964ebfc78af703045373999163253ad2c8f09475bf6bdb923a59f +LLVMLibUnwind.v19.1.4+0.i686-w64-mingw32.tar.gz/md5/7de74ebac40c9425f619c7f8b309de00 +LLVMLibUnwind.v19.1.4+0.i686-w64-mingw32.tar.gz/sha512/f28f4e8c25cdc06c8d363735e1914c748c150a962c37dfa8a45a3ba514d3fa1b6c551809b8d7f668b258c3165674f012ee6a18f36421e624f38ece27db755a3f +LLVMLibUnwind.v19.1.4+0.powerpc64le-linux-gnu.tar.gz/md5/c5277c6c127ccc5fa66867ddeb6f93a2 +LLVMLibUnwind.v19.1.4+0.powerpc64le-linux-gnu.tar.gz/sha512/b3d61aee2187c185be1b1b26edaccea66da750931c1216db1f3e89393c1d2c101335d791f0124282320084e697386f395951035e5071da23ecd55133fad472fc +LLVMLibUnwind.v19.1.4+0.x86_64-apple-darwin.tar.gz/md5/64d459ec7cb7d70b89f5ed62a1261425 +LLVMLibUnwind.v19.1.4+0.x86_64-apple-darwin.tar.gz/sha512/861130348376c8a54b2aa8c86d9d338a4b5fb88d3d2745578dcf15e0f477f518c07a505ce86c898c87142a7c5bf2e1ce43daedecc386a7f3bde67af8e6a56e64 +LLVMLibUnwind.v19.1.4+0.x86_64-linux-gnu.tar.gz/md5/2702948c4171ad35f521e15ee4ebcc8e +LLVMLibUnwind.v19.1.4+0.x86_64-linux-gnu.tar.gz/sha512/306759ae9064a9746474c53b674eb0b9da7cef6271094009c3244542295ef7a86cb77096b4a18dc2e50628c6ab02e2f1c6e39a1401e86fe4743410ae8d782126 +LLVMLibUnwind.v19.1.4+0.x86_64-linux-musl.tar.gz/md5/a7f9ea5dfbd4760b5a33c97581ad4b95 +LLVMLibUnwind.v19.1.4+0.x86_64-linux-musl.tar.gz/sha512/08add6b1a4e90f50fbceea6d72a476fba3a2b271f44bf64f06b53f35dfecc756f71843d54d0895a2f62d56df24f3675619cf3220215acb2e0a574696c6fa630c +LLVMLibUnwind.v19.1.4+0.x86_64-unknown-freebsd.tar.gz/md5/05f5b916fa639a68096cc73fb82007f8 +LLVMLibUnwind.v19.1.4+0.x86_64-unknown-freebsd.tar.gz/sha512/0a137168c466861fdbdbef86dec96ece0d4c10f87fdc2dd729b445deb0fd59b214241b62644da77581a0100826e07dacf81fa060e67e35ff38df0d6807cb618b +LLVMLibUnwind.v19.1.4+0.x86_64-w64-mingw32.tar.gz/md5/bb073cb86c821a70b845bd5de0edc2d9 +LLVMLibUnwind.v19.1.4+0.x86_64-w64-mingw32.tar.gz/sha512/24d206c65c7be34485a1492250a9ca958e70be7057b981940bc24c4822e50e3963c9f88f42892ba2ea6df17fedb2783ace1693aeac74f200a5ca6033a14d6cb9 libLLVM.v18.1.7+3.aarch64-apple-darwin-llvm_version+18.asserts.tar.gz/md5/f7ce9539d0802dd4b5e5e673d36d1a99 libLLVM.v18.1.7+3.aarch64-apple-darwin-llvm_version+18.asserts.tar.gz/sha512/7a54be16ccc327731c802380d29f2c9ee5e635cd6af0b7eb6b69e9d3b0b4fecb74147359af182def3b016ec4445891bdb91eb0d541b783e451e8263968c25161 libLLVM.v18.1.7+3.aarch64-apple-darwin-llvm_version+18.tar.gz/md5/cd946ab46745ce71ad7438cf0f30cfd0 @@ -256,5 +258,5 @@ libLLVM.v18.1.7+3.x86_64-w64-mingw32-cxx11-llvm_version+18.tar.gz/md5/0e21a6d22d libLLVM.v18.1.7+3.x86_64-w64-mingw32-cxx11-llvm_version+18.tar.gz/sha512/efbbad538c6f8b773d7ef1019a9b754e1ce7da59ea5f00f452fa7f7cc93c40f248762eb7f708e3d2fa7f9bdbc0b680d6e6502a07bbca0d4e701b51b0565d625e llvm-julia-18.1.7-2.tar.gz/md5/5c0ae4abc4ce31a86d5d6d4ecabc2683 llvm-julia-18.1.7-2.tar.gz/sha512/b4d1dde929a8670eec1a9b25abe23fbc926a922e61b60ed99b52b440cd07cb026e7f746878292db4cd0cb422d9b87ecc4ee4b2b141f8e9411855d18da51facb9 -llvm-project-14.0.6.tar.xz/md5/0b3373eded268dc27e2e874872fed4eb -llvm-project-14.0.6.tar.xz/sha512/6fc6eeb60fac698702d1aac495fc0161eb7216a1f8db2020af8fccec5837831f7cc20dc2a169bf4f0b5f520748280b4a86621f3697d622aa58faaa45dbfaad13 +llvm-project-19.1.4.tar.xz/md5/1e13043b18558e4346ea3769094c9737 +llvm-project-19.1.4.tar.xz/sha512/a586f8a41dde5e0d9ca6d8c58e9ef2a2e59b70a86d2e2c46106dc31b5c096bb80af0cdbdb486179e9cc676a540099f49a1c2db9e5e84c50362db1f72e9af6906 diff --git a/deps/llvmunwind.version b/deps/llvmunwind.version index 9c2a91c566ba2..666cae54025b4 100644 --- a/deps/llvmunwind.version +++ b/deps/llvmunwind.version @@ -2,4 +2,4 @@ LLVMUNWIND_JLL_NAME := LLVMLibUnwind ## source build -LLVMUNWIND_VER := 14.0.6 +LLVMUNWIND_VER := 19.1.4 diff --git a/deps/patches/llvm-libunwind-force-dwarf.patch b/deps/patches/llvm-libunwind-force-dwarf.patch index 2f4d31acb8a4a..494c5e77e187b 100644 --- a/deps/patches/llvm-libunwind-force-dwarf.patch +++ b/deps/patches/llvm-libunwind-force-dwarf.patch @@ -6,22 +6,23 @@ Date: Tue Aug 27 15:01:22 2013 -0400 Add option to step with DWARF --- -diff -pur a/libunwind/include/libunwind.h b/libunwind/include/libunwind.h ---- a/libunwind/include/libunwind.h 2021-06-28 18:23:38.000000000 +0200 -+++ b/libunwind/include/libunwind.h 2022-05-04 18:44:24.000000000 +0200 +diff --git a/libunwind/include/libunwind.h b/libunwind/include/libunwind.h +index b2dae8f..fc37afb 100644 +--- a/libunwind/include/libunwind.h ++++ b/libunwind/include/libunwind.h @@ -108,6 +108,7 @@ extern "C" { - + extern int unw_getcontext(unw_context_t *) LIBUNWIND_AVAIL; extern int unw_init_local(unw_cursor_t *, unw_context_t *) LIBUNWIND_AVAIL; +extern int unw_init_local_dwarf(unw_cursor_t *, unw_context_t *) LIBUNWIND_AVAIL; extern int unw_step(unw_cursor_t *) LIBUNWIND_AVAIL; extern int unw_get_reg(unw_cursor_t *, unw_regnum_t, unw_word_t *) LIBUNWIND_AVAIL; extern int unw_get_fpreg(unw_cursor_t *, unw_regnum_t, unw_fpreg_t *) LIBUNWIND_AVAIL; -Only in b/libunwind/include: libunwind.h.orig -diff -pur a/libunwind/src/UnwindCursor.hpp b/libunwind/src/UnwindCursor.hpp ---- a/libunwind/src/UnwindCursor.hpp 2021-06-28 18:23:38.000000000 +0200 -+++ b/libunwind/src/UnwindCursor.hpp 2022-05-04 18:45:11.000000000 +0200 -@@ -437,6 +437,9 @@ public: +diff --git a/libunwind/src/UnwindCursor.hpp b/libunwind/src/UnwindCursor.hpp +index 7753936..26ca486 100644 +--- a/libunwind/src/UnwindCursor.hpp ++++ b/libunwind/src/UnwindCursor.hpp +@@ -453,6 +453,9 @@ public: virtual bool isSignalFrame() { _LIBUNWIND_ABORT("isSignalFrame not implemented"); } @@ -31,7 +32,7 @@ diff -pur a/libunwind/src/UnwindCursor.hpp b/libunwind/src/UnwindCursor.hpp virtual bool getFunctionName(char *, size_t, unw_word_t *) { _LIBUNWIND_ABORT("getFunctionName not implemented"); } -@@ -894,6 +897,7 @@ public: +@@ -944,6 +947,7 @@ public: virtual void getInfo(unw_proc_info_t *); virtual void jumpto(); virtual bool isSignalFrame(); @@ -39,24 +40,23 @@ diff -pur a/libunwind/src/UnwindCursor.hpp b/libunwind/src/UnwindCursor.hpp virtual bool getFunctionName(char *buf, size_t len, unw_word_t *off); virtual void setInfoBasedOnIPRegister(bool isReturnAddress = false); virtual const char *getRegisterName(int num); -@@ -963,7 +967,7 @@ private: +@@ -1031,7 +1035,7 @@ private: const UnwindInfoSections §s); - int stepWithCompactEncoding() { - #if defined(_LIBUNWIND_SUPPORT_DWARF_UNWIND) + int stepWithCompactEncoding(bool stage2 = false) { + #if defined(_LIBUNWIND_SUPPORT_DWARF_UNWIND) - if ( compactSaysUseDwarf() ) + if ( _forceDwarf || compactSaysUseDwarf() ) - return stepWithDwarfFDE(); - #endif + return stepWithDwarfFDE(stage2); + #endif R dummy; -@@ -1198,6 +1202,7 @@ private: - unw_proc_info_t _info; - bool _unwindInfoMissing; - bool _isSignalFrame; -+ bool _forceDwarf; - #if defined(_LIBUNWIND_TARGET_LINUX) && defined(_LIBUNWIND_TARGET_AARCH64) +@@ -1317,13 +1321,14 @@ private: + #if defined(_LIBUNWIND_CHECK_LINUX_SIGRETURN) bool _isSigReturn = false; #endif -@@ -1207,7 +1212,7 @@ private: ++ bool _forceDwarf; + }; + + template UnwindCursor::UnwindCursor(unw_context_t *context, A &as) : _addressSpace(as), _registers(context), _unwindInfoMissing(false), @@ -65,8 +65,8 @@ diff -pur a/libunwind/src/UnwindCursor.hpp b/libunwind/src/UnwindCursor.hpp static_assert((check_fit, unw_cursor_t>::does_fit), "UnwindCursor<> does not fit in unw_cursor_t"); static_assert((alignof(UnwindCursor) <= alignof(unw_cursor_t)), -@@ -1217,7 +1222,8 @@ UnwindCursor::UnwindCursor(unw_con - +@@ -1333,7 +1338,8 @@ UnwindCursor::UnwindCursor(unw_context_t *context, A &as) + template UnwindCursor::UnwindCursor(A &as, void *) - : _addressSpace(as), _unwindInfoMissing(false), _isSignalFrame(false) { @@ -75,18 +75,18 @@ diff -pur a/libunwind/src/UnwindCursor.hpp b/libunwind/src/UnwindCursor.hpp memset(&_info, 0, sizeof(_info)); // FIXME // fill in _registers from thread arg -@@ -1273,6 +1279,10 @@ template bool U +@@ -1396,6 +1402,10 @@ template bool UnwindCursor::isSignalFrame() { return _isSignalFrame; } - + +template void UnwindCursor::setForceDWARF(bool force) { + _forceDwarf = force; +} + #endif // defined(_LIBUNWIND_SUPPORT_SEH_UNWIND) - + #if defined(_LIBUNWIND_ARM_EHABI) -@@ -1941,7 +1951,13 @@ void UnwindCursor::setInfoBasedOnI +@@ -2611,7 +2621,12 @@ void UnwindCursor::setInfoBasedOnIPRegister(bool isReturnAddress) { // record that we have no unwind info. if (_info.format == 0) _unwindInfoMissing = true; @@ -96,14 +96,14 @@ diff -pur a/libunwind/src/UnwindCursor.hpp b/libunwind/src/UnwindCursor.hpp + #else return; + #endif -+ } } #endif // defined(_LIBUNWIND_SUPPORT_COMPACT_UNWIND) -diff -pur a/libunwind/src/libunwind.cpp b/libunwind/src/libunwind.cpp ---- a/libunwind/src/libunwind.cpp 2021-06-28 18:23:38.000000000 +0200 -+++ b/libunwind/src/libunwind.cpp 2022-05-04 18:44:24.000000000 +0200 -@@ -71,6 +71,7 @@ _LIBUNWIND_HIDDEN int __unw_init_local(u +diff --git a/libunwind/src/libunwind.cpp b/libunwind/src/libunwind.cpp +index 217dde9..8e9a77a 100644 +--- a/libunwind/src/libunwind.cpp ++++ b/libunwind/src/libunwind.cpp +@@ -86,6 +86,7 @@ _LIBUNWIND_HIDDEN int __unw_init_local(unw_cursor_t *cursor, new (reinterpret_cast *>(cursor)) UnwindCursor( context, LocalAddressSpace::sThisAddressSpace); @@ -111,10 +111,10 @@ diff -pur a/libunwind/src/libunwind.cpp b/libunwind/src/libunwind.cpp #undef REGISTER_KIND AbstractUnwindCursor *co = (AbstractUnwindCursor *)cursor; co->setInfoBasedOnIPRegister(); -@@ -79,6 +80,54 @@ _LIBUNWIND_HIDDEN int __unw_init_local(u +@@ -109,6 +110,54 @@ _LIBUNWIND_HIDDEN int __unw_get_reg(unw_cursor_t *cursor, unw_regnum_t regNum, } - _LIBUNWIND_WEAK_ALIAS(__unw_init_local, unw_init_local) - + _LIBUNWIND_WEAK_ALIAS(__unw_get_reg, unw_get_reg) + +_LIBUNWIND_HIDDEN int __unw_init_local_dwarf(unw_cursor_t *cursor, + unw_context_t *context) { + _LIBUNWIND_TRACE_API("__unw_init_local_dwarf(cursor=%p, context=%p)", @@ -163,14 +163,15 @@ diff -pur a/libunwind/src/libunwind.cpp b/libunwind/src/libunwind.cpp +} +_LIBUNWIND_WEAK_ALIAS(__unw_init_local_dwarf, unw_init_local_dwarf) + - /// Get value of specified register at cursor position in stack frame. - _LIBUNWIND_HIDDEN int __unw_get_reg(unw_cursor_t *cursor, unw_regnum_t regNum, - unw_word_t *value) { -diff -pur a/libunwind/src/libunwind_ext.h b/libunwind/src/libunwind_ext.h ---- a/libunwind/src/libunwind_ext.h 2021-06-28 18:23:38.000000000 +0200 -+++ b/libunwind/src/libunwind_ext.h 2022-05-04 18:44:24.000000000 +0200 + /// Set value of specified register at cursor position in stack frame. + _LIBUNWIND_HIDDEN int __unw_set_reg(unw_cursor_t *cursor, unw_regnum_t regNum, + unw_word_t value) { +diff --git a/libunwind/src/libunwind_ext.h b/libunwind/src/libunwind_ext.h +index 28db43a..c4f9767 100644 +--- a/libunwind/src/libunwind_ext.h ++++ b/libunwind/src/libunwind_ext.h @@ -25,6 +25,7 @@ extern "C" { - + extern int __unw_getcontext(unw_context_t *); extern int __unw_init_local(unw_cursor_t *, unw_context_t *); +extern int __unw_init_local_dwarf(unw_cursor_t *, unw_context_t *); diff --git a/deps/patches/llvm-libunwind-prologue-epilogue.patch b/deps/patches/llvm-libunwind-prologue-epilogue.patch index 7dadca728f9cf..b2618998905e4 100644 --- a/deps/patches/llvm-libunwind-prologue-epilogue.patch +++ b/deps/patches/llvm-libunwind-prologue-epilogue.patch @@ -14,7 +14,7 @@ index 1c3175dff50a..78a658ccbc27 100644 @@ -310,6 +310,50 @@ int CompactUnwinder_x86_64::stepWithCompactEncodingRBPFrame( uint32_t savedRegistersLocations = EXTRACT_BITS(compactEncoding, UNWIND_X86_64_RBP_FRAME_REGISTERS); - + + // If we have not stored EBP yet + if (functionStart == registers.getIP()) { + uint64_t rsp = registers.getSP(); diff --git a/deps/unwind.mk b/deps/unwind.mk index 3951bbf36e22f..c934c382a23e7 100644 --- a/deps/unwind.mk +++ b/deps/unwind.mk @@ -85,7 +85,7 @@ check-unwind: $(BUILDDIR)/libunwind-$(UNWIND_VER)/build-checked ## LLVM libunwind ## -LLVMUNWIND_OPTS := $(CMAKE_COMMON) \ +LLVMUNWIND_OPTS := $(CMAKE_GENERATOR_COMMAND) $(CMAKE_COMMON) \ -DCMAKE_BUILD_TYPE=MinSizeRel \ -DLIBUNWIND_ENABLE_PEDANTIC=OFF \ -DLIBUNWIND_INCLUDE_DOCS=OFF \ @@ -93,6 +93,7 @@ LLVMUNWIND_OPTS := $(CMAKE_COMMON) \ -DLIBUNWIND_INSTALL_HEADERS=ON \ -DLIBUNWIND_ENABLE_ASSERTIONS=OFF \ -DLLVM_CONFIG_PATH=$(build_depsbindir)/llvm-config \ + -DLLVM_ENABLE_RUNTIMES="libunwind" \ -DLLVM_PATH=$(SRCCACHE)/llvm-project-$(LLVMUNWIND_VER)/llvm $(SRCCACHE)/llvm-project-$(LLVMUNWIND_VER).tar.xz: | $(SRCCACHE) @@ -122,16 +123,23 @@ checksum-llvmunwind: $(SRCCACHE)/llvm-project-$(LLVMUNWIND_VER).tar.xz $(BUILDDIR)/llvmunwind-$(LLVMUNWIND_VER)/build-configured: $(SRCCACHE)/llvm-project-$(LLVMUNWIND_VER)/source-extracted $(SRCCACHE)/llvm-project-$(LLVMUNWIND_VER)/libunwind/llvm-libunwind-freebsd-libgcc-api-compat.patch-applied mkdir -p $(dir $@) cd $(dir $@) && \ - $(CMAKE) $(dir $<)/libunwind $(LLVMUNWIND_OPTS) + $(CMAKE) $(dir $<) -S $(dir $<)/runtimes $(LLVMUNWIND_OPTS) echo 1 > $@ $(BUILDDIR)/llvmunwind-$(LLVMUNWIND_VER)/build-compiled: $(BUILDDIR)/llvmunwind-$(LLVMUNWIND_VER)/build-configured - $(MAKE) -C $(dir $<) + cd $(dir $<) && \ + $(if $(filter $(CMAKE_GENERATOR),make), \ + $(MAKE), \ + $(CMAKE) --build . --target unwind) echo 1 > $@ +LIBUNWIND_INSTALL = \ + cd $1 && mkdir -p $2$$(build_depsbindir) && \ + $$(CMAKE) -DCMAKE_INSTALL_PREFIX="$2$$(build_prefix)" -P libunwind/cmake_install.cmake + $(eval $(call staged-install, \ llvmunwind,llvmunwind-$(LLVMUNWIND_VER), \ - MAKE_INSTALL,,, \ + LIBUNWIND_INSTALL,,, \ cp -fR $(SRCCACHE)/llvm-project-$(LLVMUNWIND_VER)/libunwind/* $(build_includedir))) clean-llvmunwind: diff --git a/stdlib/LLVMLibUnwind_jll/Project.toml b/stdlib/LLVMLibUnwind_jll/Project.toml index 0cb0fe5440066..e102af311abec 100644 --- a/stdlib/LLVMLibUnwind_jll/Project.toml +++ b/stdlib/LLVMLibUnwind_jll/Project.toml @@ -1,6 +1,6 @@ name = "LLVMLibUnwind_jll" uuid = "47c5dbc3-30ba-59ef-96a6-123e260183d9" -version = "14.0.6+0" +version = "19.1.4+0" [deps] Libdl = "8f399da3-3557-5675-b5ff-fb832c97cbdb" From 7df1dfa2f1a51755eb35a2b5e51c810674dc10ee Mon Sep 17 00:00:00 2001 From: N5N3 <2642243996@qq.com> Date: Tue, 26 Nov 2024 08:16:32 +0800 Subject: [PATCH 503/537] subtype: fast path for Type == TypeVar (#56640) close #56606 --- src/subtype.c | 42 ++++++++++++++++++++++++++++++++++++++++++ test/subtype.jl | 16 ++++++++++++++++ 2 files changed, 58 insertions(+) diff --git a/src/subtype.c b/src/subtype.c index 8de5b3514ef2f..a0b7bff4006ce 100644 --- a/src/subtype.c +++ b/src/subtype.c @@ -1660,6 +1660,42 @@ static int local_forall_exists_subtype(jl_value_t *x, jl_value_t *y, jl_stenv_t return sub; } +static int equal_var(jl_tvar_t *v, jl_value_t *x, jl_stenv_t *e) +{ + assert(e->Loffset == 0); + // Theoretically bounds change would be merged for union inputs. + // But intersection is not happy as splitting helps to avoid circular env. + assert(!e->intersection || !jl_is_uniontype(x)); + jl_varbinding_t *vb = lookup(e, v); + if (e->intersection && vb != NULL && vb->lb == vb->ub && jl_is_typevar(vb->lb)) + return equal_var((jl_tvar_t *)vb->lb, x, e); + record_var_occurrence(vb, e, 2); + if (vb == NULL) + return e->ignore_free || ( + local_forall_exists_subtype(x, v->lb, e, 2, !jl_has_free_typevars(x)) && + local_forall_exists_subtype(v->ub, x, e, 0, 0)); + if (!vb->right) + return local_forall_exists_subtype(x, vb->lb, e, 2, !jl_has_free_typevars(x)) && + local_forall_exists_subtype(vb->ub, x, e, 0, 0); + if (vb->lb == x) + return var_lt(v, x, e, 0); + if (!subtype_ccheck(x, vb->ub, e)) + return 0; + jl_value_t *lb = simple_join(vb->lb, x); + JL_GC_PUSH1(&lb); + if (!e->intersection || !jl_is_typevar(lb) || !reachable_var(lb, v, e)) + vb->lb = lb; + JL_GC_POP(); + if (vb->ub == x) + return 1; + if (!subtype_ccheck(vb->lb, x, e)) + return 0; + // skip `simple_meet` here as we have proven `x <: vb->ub` + if (!e->intersection || !reachable_var(x, v, e)) + vb->ub = x; + return 1; +} + static int forall_exists_equal(jl_value_t *x, jl_value_t *y, jl_stenv_t *e) { if (obviously_egal(x, y)) return 1; @@ -1690,6 +1726,12 @@ static int forall_exists_equal(jl_value_t *x, jl_value_t *y, jl_stenv_t *e) } } + if (e->Loffset == 0 && jl_is_typevar(y) && jl_is_type(x) && (!e->intersection || !jl_is_uniontype(x))) { + // Fastpath for Type == TypeVar. + // Avoid duplicated `<:` check between adjacent `var_gt` and `var_lt` + return equal_var((jl_tvar_t *)y, x, e); + } + jl_saved_unionstate_t oldLunions; push_unionstate(&oldLunions, &e->Lunions); int sub = local_forall_exists_subtype(x, y, e, 2, -1); diff --git a/test/subtype.jl b/test/subtype.jl index dfa1487eaa55d..ba7f86bb86a14 100644 --- a/test/subtype.jl +++ b/test/subtype.jl @@ -2730,3 +2730,19 @@ let S = Dict{V,V} where {V}, @test A <: typeintersect(S, T) @test A <: typeintersect(T, S) end + +#issue 56606 +let + A = Tuple{Val{1}} + B = Tuple{Val} + for _ in 1:30 + A = Tuple{Val{A}} + B = Tuple{Val{<:B}} + end + @test A <: B +end +@testintersect( + Val{Tuple{Int,S,T}} where {S<:Any,T<:Vector{Vector{Int}}}, + Val{Tuple{T,R,S}} where {T,R<:Vector{T},S<:Vector{R}}, + Val{Tuple{Int, Vector{Int}, T}} where T<:Vector{Vector{Int}}, +) From ea825388134b356210d74b13e75326c7d2b636f1 Mon Sep 17 00:00:00 2001 From: "Steven G. Johnson" Date: Tue, 26 Nov 2024 05:27:29 -0500 Subject: [PATCH 504/537] attach binomial(n::Integer, k::Integer) method to docstring (#56679) Slight tweak to #54307: attach the `binomial(n::Integer, k::Integer)` method to the corresponding docstring, rather than the narrower `binomial(n::T, k::T) where {T<:Integer}` method. --- base/intfuncs.jl | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/base/intfuncs.jl b/base/intfuncs.jl index e8d4b65305be7..db3b1fdeeb521 100644 --- a/base/intfuncs.jl +++ b/base/intfuncs.jl @@ -1205,6 +1205,8 @@ julia> binomial(-5, 3) # External links * [Binomial coefficient](https://en.wikipedia.org/wiki/Binomial_coefficient) on Wikipedia. """ +binomial(n::Integer, k::Integer) = binomial(promote(n, k)...) + Base.@assume_effects :terminates_locally function binomial(n::T, k::T) where T<:Integer n0, k0 = n, k k < 0 && return zero(T) @@ -1233,7 +1235,6 @@ Base.@assume_effects :terminates_locally function binomial(n::T, k::T) where T<: end copysign(x, sgn) end -binomial(n::Integer, k::Integer) = binomial(promote(n, k)...) """ binomial(x::Number, k::Integer) From a17db2b138b0e042329d2a3fb2efd53c68e2563f Mon Sep 17 00:00:00 2001 From: Lilith Orion Hafner Date: Tue, 26 Nov 2024 12:38:49 -0600 Subject: [PATCH 505/537] Make DefaultStable and DefaultUnstable dispatchable and display without internals (#56661) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Previously, `DEFAULT_STABLE` was a giant chain of algorithms reflecting the full sorting dispatch system. Now, it's simply `DefaultStable()`. This has a few minor impacts: Previously, the public binding `Base.Sort.DEFAULT_STABLE` documented non-public implementation details of sorting dispatch in its extended help with a caviat that they are internal. Now, `Base.Sort.DEFAULT_STABLE` refers to the non-public binding `Base.Sort.DefaultStable` and implementation details are documented there with a warning that they are non-public. Previously, dispatching on `Base.Sort.DEFAULT_STABLE` required writing `::typeof(Base.Sort.DEFAULT_STABLE)` whereas now one could alternatively dispatch on the (internal) type `Base.Sort.DefaultStable`. Previously `Base.Sort.DEFAULT_STABLE === Base.Sort.DEFAULT_UNSTABLE` so when writing sorting algorithms for custom collections it was impossible to determine if the user asked for a stable algorithm. Now `DEFAULT_STABLE` is `DefaultStable()` and `DEFAULT_UNSTABLE` is `DefaultUnstable()`. Both the algorithms expand to the same large chain of algorithms `_DEFAULT_ALGORITHMS_FOR_VECTORS` but it is possible to intercept them before that happens. `Base.Sort.DEFAULT_STABLE` now prints as `DefaultStable()` instead of ```julia-repl julia> Base.Sort.DEFAULT_STABLE Base.Sort.SubArrayOptimization( Base.Sort.MissingOptimization( Base.Sort.BoolOptimization( Base.Sort.Small{10}( Base.Sort.InsertionSortAlg(), Base.Sort.IEEEFloatOptimization( Base.Sort.IsUIntMappable( Base.Sort.Small{40}( Base.Sort.InsertionSortAlg(), Base.Sort.CheckSorted( Base.Sort.ComputeExtrema( Base.Sort.ConsiderCountingSort( Base.Sort.CountingSort(), Base.Sort.ConsiderRadixSort( Base.Sort.RadixSort(), Base.Sort.Small{80}( Base.Sort.InsertionSortAlg(), Base.Sort.ScratchQuickSort(missing, missing, Base.Sort.InsertionSortAlg()))))))), Base.Sort.StableCheckSorted( Base.Sort.ScratchQuickSort(missing, missing, Base.Sort.InsertionSortAlg())))))))) ``` Factored out of #54494 at Triage's request (the git history reflects this history). --------- Co-authored-by: Lars Göttgens --- base/sort.jl | 61 +++++++++++++++++++++++++++++++++---------------- test/sorting.jl | 15 +++++++----- 2 files changed, 50 insertions(+), 26 deletions(-) diff --git a/base/sort.jl b/base/sort.jl index 6991f12551ab4..29e67a3eb8d8c 100644 --- a/base/sort.jl +++ b/base/sort.jl @@ -1475,21 +1475,15 @@ InitialOptimizations(next) = SubArrayOptimization( Small{10}( IEEEFloatOptimization( next))))) -""" - DEFAULT_STABLE -The default sorting algorithm. - -This algorithm is guaranteed to be stable (i.e. it will not reorder elements that compare -equal). It makes an effort to be fast for most inputs. - -The algorithms used by `DEFAULT_STABLE` are an implementation detail. See extended help -for the current dispatch system. +""" + struct DefaultStable <: Algorithm end -# Extended Help +`DefaultStable` is an algorithm which indicates that a fast, general purpose sorting +algorithm should be used, but does not specify exactly which algorithm. -`DEFAULT_STABLE` is composed of two parts: the [`InitialOptimizations`](@ref) and a hybrid -of Radix, Insertion, Counting, Quick sorts. +Currently, it is composed of two parts: the [`InitialOptimizations`](@ref) and a hybrid of +Radix, Insertion, Counting, Quick sorts. We begin with MissingOptimization because it has no runtime cost when it is not triggered and can enable other optimizations to be applied later. For example, @@ -1549,7 +1543,39 @@ stage. Finally, if the input has length less than 80, we dispatch to [`InsertionSort`](@ref) and otherwise we dispatch to [`ScratchQuickSort`](@ref). """ -const DEFAULT_STABLE = InitialOptimizations( +struct DefaultStable <: Algorithm end + +""" + DEFAULT_STABLE + +The default sorting algorithm. + +This algorithm is guaranteed to be stable (i.e. it will not reorder elements that compare +equal). It makes an effort to be fast for most inputs. + +The algorithms used by `DEFAULT_STABLE` are an implementation detail. See the docstring +of `Base.Sort.DefaultStable` for the current dispatch system. +""" +const DEFAULT_STABLE = DefaultStable() + +""" + DefaultUnstable <: Algorithm + +Like [`DefaultStable`](@ref), but does not guarantee stability. +""" +struct DefaultUnstable <: Algorithm end + +""" + DEFAULT_UNSTABLE + +An efficient sorting algorithm which may or may not be stable. + +The algorithms used by `DEFAULT_UNSTABLE` are an implementation detail. They are currently +the same as those used by [`DEFAULT_STABLE`](@ref), but this is subject to change in future. +""" +const DEFAULT_UNSTABLE = DefaultUnstable() + +const _DEFAULT_ALGORITHMS_FOR_VECTORS = InitialOptimizations( IsUIntMappable( Small{40}( CheckSorted( @@ -1560,15 +1586,10 @@ const DEFAULT_STABLE = InitialOptimizations( ScratchQuickSort())))))), StableCheckSorted( ScratchQuickSort()))) -""" - DEFAULT_UNSTABLE -An efficient sorting algorithm. +_sort!(v::AbstractVector, ::Union{DefaultStable, DefaultUnstable}, o::Ordering, kw) = + _sort!(v, _DEFAULT_ALGORITHMS_FOR_VECTORS, o, kw) -The algorithms used by `DEFAULT_UNSTABLE` are an implementation detail. They are currently -the same as those used by [`DEFAULT_STABLE`](@ref), but this is subject to change in future. -""" -const DEFAULT_UNSTABLE = DEFAULT_STABLE const SMALL_THRESHOLD = 20 function Base.show(io::IO, alg::Algorithm) diff --git a/test/sorting.jl b/test/sorting.jl index f12486b9c9b40..71af50429027a 100644 --- a/test/sorting.jl +++ b/test/sorting.jl @@ -819,9 +819,9 @@ end let requires_uint_mappable = Union{Base.Sort.RadixSort, Base.Sort.ConsiderRadixSort, Base.Sort.CountingSort, Base.Sort.ConsiderCountingSort, - typeof(Base.Sort.DEFAULT_STABLE.next.next.next.big.next.yes), - typeof(Base.Sort.DEFAULT_STABLE.next.next.next.big.next.yes.big), - typeof(Base.Sort.DEFAULT_STABLE.next.next.next.big.next.yes.big.next)} + typeof(Base.Sort._DEFAULT_ALGORITHMS_FOR_VECTORS.next.next.next.big.next.yes), + typeof(Base.Sort._DEFAULT_ALGORITHMS_FOR_VECTORS.next.next.next.big.next.yes.big), + typeof(Base.Sort._DEFAULT_ALGORITHMS_FOR_VECTORS.next.next.next.big.next.yes.big.next)} function test_alg(kw, alg, float=true) for order in [Base.Forward, Base.Reverse, Base.By(x -> x^2)] @@ -861,15 +861,18 @@ end end end - test_alg_rec(Base.DEFAULT_STABLE) + test_alg_rec(Base.Sort._DEFAULT_ALGORITHMS_FOR_VECTORS) end end @testset "show(::Algorithm)" begin - @test eval(Meta.parse(string(Base.DEFAULT_STABLE))) === Base.DEFAULT_STABLE - lines = split(string(Base.DEFAULT_STABLE), '\n') + @test eval(Meta.parse(string(Base.Sort._DEFAULT_ALGORITHMS_FOR_VECTORS))) === Base.Sort._DEFAULT_ALGORITHMS_FOR_VECTORS + lines = split(string(Base.Sort._DEFAULT_ALGORITHMS_FOR_VECTORS), '\n') @test 10 < maximum(length, lines) < 100 @test 1 < length(lines) < 30 + + @test eval(Meta.parse(string(Base.DEFAULT_STABLE))) === Base.DEFAULT_STABLE + @test string(Base.DEFAULT_STABLE) == "Base.Sort.DefaultStable()" end @testset "Extensibility" begin From f6ebc4b2e87db68490a5c756dd4671c92ef8e789 Mon Sep 17 00:00:00 2001 From: Shuhei Kadowaki <40514306+aviatesk@users.noreply.github.com> Date: Wed, 27 Nov 2024 14:45:16 +0900 Subject: [PATCH 506/537] optimizer: handle `EnterNode` with `catch_dest == 0` (#56686) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In some parts of the optimizer code, such as `cfg_simplify!` and irinterp, it is assumed that `EnterNode` always has `catch_dest ≠ 0`, but this assumption is incorrect. This commit fixes those cases. --- Compiler/src/ssair/irinterp.jl | 6 ++++-- Compiler/src/ssair/passes.jl | 6 ++++-- Compiler/test/irpasses.jl | 29 ++++++++++++++++++++++++++++- 3 files changed, 36 insertions(+), 5 deletions(-) diff --git a/Compiler/src/ssair/irinterp.jl b/Compiler/src/ssair/irinterp.jl index e96d27a85bc37..a4969e81828cc 100644 --- a/Compiler/src/ssair/irinterp.jl +++ b/Compiler/src/ssair/irinterp.jl @@ -249,8 +249,10 @@ function process_terminator!(@nospecialize(stmt), bb::Int, bb_ip::BitSetBoundedM return backedge elseif isa(stmt, EnterNode) dest = stmt.catch_dest - @assert dest > bb - push!(bb_ip, dest) + if dest ≠ 0 + @assert dest > bb + push!(bb_ip, dest) + end push!(bb_ip, bb+1) return false else diff --git a/Compiler/src/ssair/passes.jl b/Compiler/src/ssair/passes.jl index e61f3207fc07a..ff333b9b0a129 100644 --- a/Compiler/src/ssair/passes.jl +++ b/Compiler/src/ssair/passes.jl @@ -2393,8 +2393,10 @@ function cfg_simplify!(ir::IRCode) end elseif isa(terminator, EnterNode) catchbb = terminator.catch_dest - if bb_rename_succ[catchbb] == 0 - push!(worklist, catchbb) + if catchbb ≠ 0 + if bb_rename_succ[catchbb] == 0 + push!(worklist, catchbb) + end end elseif isa(terminator, GotoNode) || isa(terminator, ReturnNode) # No implicit fall through. Schedule from work list. diff --git a/Compiler/test/irpasses.jl b/Compiler/test/irpasses.jl index 412ff3b98cb19..e9d6f57337530 100644 --- a/Compiler/test/irpasses.jl +++ b/Compiler/test/irpasses.jl @@ -1816,7 +1816,34 @@ function f53521() end end end -@test code_typed(f53521)[1][2] === Nothing +let (ir,rt) = only(Base.code_ircode(f53521, ())) + @test rt == Nothing + Compiler.verify_ir(ir) + Compiler.cfg_simplify!(ir) + Compiler.verify_ir(ir) +end + +Base.@assume_effects :foldable Base.@constprop :aggressive function f53521(x::Int, ::Int) + VALUE = ScopedValue(x) + @with VALUE => 2 begin + for i = 1 + @with VALUE => 3 begin + local v + try + v = sin(VALUE[]) + catch + v = nothing + end + return v + end + end + end +end +let (ir,rt) = only(Base.code_ircode((Int,)) do y + f53521(1, y) + end) + @test rt == Union{Nothing,Float64} +end # Test that adce_pass! sets Refined on PhiNode values let code = Any[ From 42807311db112b0116c3e0923845808efec0fed4 Mon Sep 17 00:00:00 2001 From: Gabriel Baraldi Date: Wed, 27 Nov 2024 10:59:32 -0300 Subject: [PATCH 507/537] Pass `JULIA_NUM_THREADS=1` to the sysimage builder (#56695) Having multiple threads isn't suported during the build since the scheduler doesn't exist at this point. Fixes https://github.com/JuliaLang/julia/issues/56533 --- sysimage.mk | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sysimage.mk b/sysimage.mk index 5371fbd975025..a74aace4dd11c 100644 --- a/sysimage.mk +++ b/sysimage.mk @@ -65,14 +65,14 @@ RELDATADIR := $(call rel_path,$(JULIAHOME)/base,$(build_datarootdir))/ # <-- mak $(build_private_libdir)/basecompiler.ji: $(COMPILER_SRCS) @$(call PRINT_JULIA, cd $(JULIAHOME)/base && \ - $(call spawn,$(JULIA_EXECUTABLE)) -C "$(JULIA_CPU_TARGET)" $(HEAPLIM) --output-ji $(call cygpath_w,$@).tmp \ + JULIA_NUM_THREADS=1 $(call spawn,$(JULIA_EXECUTABLE)) -C "$(JULIA_CPU_TARGET)" $(HEAPLIM) --output-ji $(call cygpath_w,$@).tmp \ --startup-file=no --warn-overwrite=yes -g$(BOOTSTRAP_DEBUG_LEVEL) -O1 Base_compiler.jl --buildroot $(RELBUILDROOT) --dataroot $(RELDATADIR)) @mv $@.tmp $@ $(build_private_libdir)/sys.ji: $(build_private_libdir)/basecompiler.ji $(JULIAHOME)/VERSION $(BASE_SRCS) $(STDLIB_SRCS) @$(call PRINT_JULIA, cd $(JULIAHOME)/base && \ if ! JULIA_BINDIR=$(call cygpath_w,$(build_bindir)) WINEPATH="$(call cygpath_w,$(build_bindir));$$WINEPATH" \ - $(call spawn, $(JULIA_EXECUTABLE)) -g1 -O1 -C "$(JULIA_CPU_TARGET)" $(HEAPLIM) --output-ji $(call cygpath_w,$@).tmp $(JULIA_SYSIMG_BUILD_FLAGS) \ + JULIA_NUM_THREADS=1 $(call spawn, $(JULIA_EXECUTABLE)) -g1 -O1 -C "$(JULIA_CPU_TARGET)" $(HEAPLIM) --output-ji $(call cygpath_w,$@).tmp $(JULIA_SYSIMG_BUILD_FLAGS) \ --startup-file=no --warn-overwrite=yes --sysimage $(call cygpath_w,$<) sysimg.jl --buildroot $(RELBUILDROOT) --dataroot $(RELDATADIR); then \ echo '*** This error might be fixed by running `make clean`. If the error persists$(COMMA) try `make cleanall`. ***'; \ false; \ From 68fa4a94421d4cb6ee406d31697f05b2def6aeef Mon Sep 17 00:00:00 2001 From: Erik Schnetter Date: Wed, 27 Nov 2024 19:01:18 -0500 Subject: [PATCH 508/537] deps/cacert: Update to 2024-11-26 (#56697) --- deps/checksums/cacert-2024-03-11.pem/md5 | 1 - deps/checksums/cacert-2024-03-11.pem/sha512 | 1 - deps/checksums/cacert-2024-11-26.pem/md5 | 1 + deps/checksums/cacert-2024-11-26.pem/sha512 | 1 + deps/libgit2.version | 2 +- stdlib/MozillaCACerts_jll/Project.toml | 2 +- 6 files changed, 4 insertions(+), 4 deletions(-) delete mode 100644 deps/checksums/cacert-2024-03-11.pem/md5 delete mode 100644 deps/checksums/cacert-2024-03-11.pem/sha512 create mode 100644 deps/checksums/cacert-2024-11-26.pem/md5 create mode 100644 deps/checksums/cacert-2024-11-26.pem/sha512 diff --git a/deps/checksums/cacert-2024-03-11.pem/md5 b/deps/checksums/cacert-2024-03-11.pem/md5 deleted file mode 100644 index 618b6c74efdd4..0000000000000 --- a/deps/checksums/cacert-2024-03-11.pem/md5 +++ /dev/null @@ -1 +0,0 @@ -594084120d27f482b1dc48f558d12d48 diff --git a/deps/checksums/cacert-2024-03-11.pem/sha512 b/deps/checksums/cacert-2024-03-11.pem/sha512 deleted file mode 100644 index 441b8e84707b0..0000000000000 --- a/deps/checksums/cacert-2024-03-11.pem/sha512 +++ /dev/null @@ -1 +0,0 @@ -31f03cc19566d007c4cffdad2ada71d99b4734ad7b13bc4f30d73d321f40cbe13b87a801aa61d9788207a851cc1f95a8af8ac732a372d45edb932f204bce3744 diff --git a/deps/checksums/cacert-2024-11-26.pem/md5 b/deps/checksums/cacert-2024-11-26.pem/md5 new file mode 100644 index 0000000000000..865c6abf3e77a --- /dev/null +++ b/deps/checksums/cacert-2024-11-26.pem/md5 @@ -0,0 +1 @@ +92c13373d7dbe43bdc167479274a43e2 diff --git a/deps/checksums/cacert-2024-11-26.pem/sha512 b/deps/checksums/cacert-2024-11-26.pem/sha512 new file mode 100644 index 0000000000000..d51605348faf4 --- /dev/null +++ b/deps/checksums/cacert-2024-11-26.pem/sha512 @@ -0,0 +1 @@ +26c6fa1ac7bcfd523f9ab9e6c2d971103ccfc610ad0df504d4e9b064dad74576d77240c052b808f4c37c9240302a7e973a20f79ee39ac7bf3201a6fa9f0dfa96 diff --git a/deps/libgit2.version b/deps/libgit2.version index d51beb34c27f5..ae475f0b3644f 100644 --- a/deps/libgit2.version +++ b/deps/libgit2.version @@ -11,4 +11,4 @@ LIBGIT2_SHA1=d74d491481831ddcd23575d376e56d2197e95910 # The versions of cacert.pem are identified by the date (YYYY-MM-DD) of their changes. # See https://curl.haxx.se/docs/caextract.html for more details. # Keep in sync with `stdlib/MozillaCACerts_jll/Project.toml`. -MOZILLA_CACERT_VERSION := 2024-03-11 +MOZILLA_CACERT_VERSION := 2024-11-26 diff --git a/stdlib/MozillaCACerts_jll/Project.toml b/stdlib/MozillaCACerts_jll/Project.toml index 181171a4c04c1..5df9bd5949972 100644 --- a/stdlib/MozillaCACerts_jll/Project.toml +++ b/stdlib/MozillaCACerts_jll/Project.toml @@ -1,7 +1,7 @@ name = "MozillaCACerts_jll" uuid = "14a3606d-f60d-562e-9121-12d972cd8159" # Keep in sync with `deps/libgit2.version`. -version = "2024.03.11" +version = "2024.11.26" [extras] Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" From e50f1ce0f12ab4f6e6bbb719251ae678fb1f3665 Mon Sep 17 00:00:00 2001 From: Jakob Nybo Nissen Date: Thu, 28 Nov 2024 04:13:07 +0100 Subject: [PATCH 509/537] Make Threads.Condition public (#56503) Co-authored-by: Dilum Aluthge --- base/threadingconstructs.jl | 2 ++ 1 file changed, 2 insertions(+) diff --git a/base/threadingconstructs.jl b/base/threadingconstructs.jl index a21d708b4a077..07ff814af1570 100644 --- a/base/threadingconstructs.jl +++ b/base/threadingconstructs.jl @@ -3,6 +3,8 @@ export threadid, nthreads, @threads, @spawn, threadpool, nthreadpools +public Condition + """ Threads.threadid([t::Task]) -> Int From 79d8d3f764e7fde6da323d1063293855612691fd Mon Sep 17 00:00:00 2001 From: Jameson Nash Date: Wed, 27 Nov 2024 22:13:34 -0500 Subject: [PATCH 510/537] support passing a specific Method to invoke (#56692) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Mosè Giordano <765740+giordano@users.noreply.github.com> --- Compiler/src/abstractinterpretation.jl | 57 +++++++++++++++----------- Compiler/src/abstractlattice.jl | 2 +- Compiler/src/utilities.jl | 4 +- NEWS.md | 1 + base/docs/basedocs.jl | 15 ++++++- src/builtins.c | 30 +++++++++----- test/core.jl | 7 ++++ 7 files changed, 77 insertions(+), 39 deletions(-) diff --git a/Compiler/src/abstractinterpretation.jl b/Compiler/src/abstractinterpretation.jl index a3abbf814165a..5946adf80ad52 100644 --- a/Compiler/src/abstractinterpretation.jl +++ b/Compiler/src/abstractinterpretation.jl @@ -856,8 +856,7 @@ end struct InvokeCall types # ::Type - lookupsig # ::Type - InvokeCall(@nospecialize(types), @nospecialize(lookupsig)) = new(types, lookupsig) + InvokeCall(@nospecialize(types)) = new(types) end struct ConstCallResult @@ -2218,26 +2217,38 @@ function abstract_invoke(interp::AbstractInterpreter, arginfo::ArgInfo, si::Stmt ft′ = argtype_by_index(argtypes, 2) ft = widenconst(ft′) ft === Bottom && return Future(CallMeta(Bottom, Any, EFFECTS_THROWS, NoCallInfo())) - (types, isexact, isconcrete, istype) = instanceof_tfunc(argtype_by_index(argtypes, 3), false) - isexact || return Future(CallMeta(Any, Any, Effects(), NoCallInfo())) - unwrapped = unwrap_unionall(types) - types === Bottom && return Future(CallMeta(Bottom, Any, EFFECTS_THROWS, NoCallInfo())) - if !(unwrapped isa DataType && unwrapped.name === Tuple.name) - return Future(CallMeta(Bottom, TypeError, EFFECTS_THROWS, NoCallInfo())) - end - argtype = argtypes_to_type(argtype_tail(argtypes, 4)) - nargtype = typeintersect(types, argtype) - nargtype === Bottom && return Future(CallMeta(Bottom, TypeError, EFFECTS_THROWS, NoCallInfo())) - nargtype isa DataType || return Future(CallMeta(Any, Any, Effects(), NoCallInfo())) # other cases are not implemented below - isdispatchelem(ft) || return Future(CallMeta(Any, Any, Effects(), NoCallInfo())) # check that we might not have a subtype of `ft` at runtime, before doing supertype lookup below - ft = ft::DataType - lookupsig = rewrap_unionall(Tuple{ft, unwrapped.parameters...}, types)::Type - nargtype = Tuple{ft, nargtype.parameters...} - argtype = Tuple{ft, argtype.parameters...} - matched, valid_worlds = findsup(lookupsig, method_table(interp)) - matched === nothing && return Future(CallMeta(Any, Any, Effects(), NoCallInfo())) - update_valid_age!(sv, valid_worlds) - method = matched.method + types = argtype_by_index(argtypes, 3) + if types isa Const && types.val isa Method + method = types.val::Method + types = method # argument value + lookupsig = method.sig # edge kind + argtype = argtypes_to_type(pushfirst!(argtype_tail(argtypes, 4), ft)) + nargtype = typeintersect(lookupsig, argtype) + nargtype === Bottom && return Future(CallMeta(Bottom, TypeError, EFFECTS_THROWS, NoCallInfo())) + nargtype isa DataType || return Future(CallMeta(Any, Any, Effects(), NoCallInfo())) # other cases are not implemented below + else + widenconst(types) >: Method && return Future(CallMeta(Any, Any, Effects(), NoCallInfo())) + (types, isexact, isconcrete, istype) = instanceof_tfunc(argtype_by_index(argtypes, 3), false) + isexact || return Future(CallMeta(Any, Any, Effects(), NoCallInfo())) + unwrapped = unwrap_unionall(types) + types === Bottom && return Future(CallMeta(Bottom, Any, EFFECTS_THROWS, NoCallInfo())) + if !(unwrapped isa DataType && unwrapped.name === Tuple.name) + return Future(CallMeta(Bottom, TypeError, EFFECTS_THROWS, NoCallInfo())) + end + argtype = argtypes_to_type(argtype_tail(argtypes, 4)) + nargtype = typeintersect(types, argtype) + nargtype === Bottom && return Future(CallMeta(Bottom, TypeError, EFFECTS_THROWS, NoCallInfo())) + nargtype isa DataType || return Future(CallMeta(Any, Any, Effects(), NoCallInfo())) # other cases are not implemented below + isdispatchelem(ft) || return Future(CallMeta(Any, Any, Effects(), NoCallInfo())) # check that we might not have a subtype of `ft` at runtime, before doing supertype lookup below + ft = ft::DataType + lookupsig = rewrap_unionall(Tuple{ft, unwrapped.parameters...}, types)::Type + nargtype = Tuple{ft, nargtype.parameters...} + argtype = Tuple{ft, argtype.parameters...} + matched, valid_worlds = findsup(lookupsig, method_table(interp)) + matched === nothing && return Future(CallMeta(Any, Any, Effects(), NoCallInfo())) + update_valid_age!(sv, valid_worlds) + method = matched.method + end tienv = ccall(:jl_type_intersection_with_env, Any, (Any, Any), nargtype, method.sig)::SimpleVector ti = tienv[1] env = tienv[2]::SimpleVector @@ -2245,7 +2256,7 @@ function abstract_invoke(interp::AbstractInterpreter, arginfo::ArgInfo, si::Stmt match = MethodMatch(ti, env, method, argtype <: method.sig) ft′_box = Core.Box(ft′) lookupsig_box = Core.Box(lookupsig) - invokecall = InvokeCall(types, lookupsig) + invokecall = InvokeCall(types) return Future{CallMeta}(mresult, interp, sv) do result, interp, sv (; rt, exct, effects, edge, volatile_inf_result) = result local ft′ = ft′_box.contents diff --git a/Compiler/src/abstractlattice.jl b/Compiler/src/abstractlattice.jl index 645c865d085b3..c1f3050739170 100644 --- a/Compiler/src/abstractlattice.jl +++ b/Compiler/src/abstractlattice.jl @@ -229,7 +229,7 @@ end if isa(t, Const) # don't consider mutable values useful constants val = t.val - return isa(val, Symbol) || isa(val, Type) || !ismutable(val) + return isa(val, Symbol) || isa(val, Type) || isa(val, Method) || !ismutable(val) end isa(t, PartialTypeVar) && return false # this isn't forwardable return is_const_prop_profitable_arg(widenlattice(𝕃), t) diff --git a/Compiler/src/utilities.jl b/Compiler/src/utilities.jl index 11d926f0c9d4e..29f3dfa4afd4a 100644 --- a/Compiler/src/utilities.jl +++ b/Compiler/src/utilities.jl @@ -54,8 +54,8 @@ function count_const_size(@nospecialize(x), count_self::Bool = true) # No definite size (isa(x, GenericMemory) || isa(x, String) || isa(x, SimpleVector)) && return MAX_INLINE_CONST_SIZE + 1 - if isa(x, Module) - # We allow modules, because we already assume they are externally + if isa(x, Module) || isa(x, Method) + # We allow modules and methods, because we already assume they are externally # rooted, so we count their contents as 0 size. return sizeof(Ptr{Cvoid}) end diff --git a/NEWS.md b/NEWS.md index 535d14208f0b8..61bad831e261c 100644 --- a/NEWS.md +++ b/NEWS.md @@ -119,6 +119,7 @@ New library features * `Base.require_one_based_indexing` and `Base.has_offset_axes` are now public ([#56196]) * New `ltruncate`, `rtruncate` and `ctruncate` functions for truncating strings to text width, accounting for char widths ([#55351]) * `isless` (and thus `cmp`, sorting, etc.) is now supported for zero-dimensional `AbstractArray`s ([#55772]) +* `invoke` now supports passing a Method instead of a type signature making this interface somewhat more flexible for certain uncommon use cases ([#56692]). Standard library changes ------------------------ diff --git a/base/docs/basedocs.jl b/base/docs/basedocs.jl index c872244964160..5119ceaf2164a 100644 --- a/base/docs/basedocs.jl +++ b/base/docs/basedocs.jl @@ -2030,21 +2030,32 @@ applicable """ invoke(f, argtypes::Type, args...; kwargs...) + invoke(f, argtypes::Method, args...; kwargs...) Invoke a method for the given generic function `f` matching the specified types `argtypes` on the specified arguments `args` and passing the keyword arguments `kwargs`. The arguments `args` must conform with the specified types in `argtypes`, i.e. conversion is not automatically performed. This method allows invoking a method other than the most specific matching method, which is useful when the behavior of a more general definition is explicitly needed (often as part of the -implementation of a more specific method of the same function). +implementation of a more specific method of the same function). However, because this means +the runtime must do more work, `invoke` is generally also slower--sometimes significantly +so--than doing normal dispatch with a regular call. -Be careful when using `invoke` for functions that you don't write. What definition is used +Be careful when using `invoke` for functions that you don't write. What definition is used for given `argtypes` is an implementation detail unless the function is explicitly states that calling with certain `argtypes` is a part of public API. For example, the change between `f1` and `f2` in the example below is usually considered compatible because the change is invisible by the caller with a normal (non-`invoke`) call. However, the change is visible if you use `invoke`. +# Passing a `Method` instead of a signature +The `argtypes` argument may be a `Method`, in which case the ordinary method table lookup is +bypassed entirely and the given method is invoked directly. Needing this feature is uncommon. +Note in particular that the specified `Method` may be entirely unreachable from ordinary dispatch +(or ordinary invoke), e.g. because it was replaced or fully covered by more specific methods. +If the method is part of the ordinary method table, this call behaves similar +to `invoke(f, method.sig, args...)`. + # Examples ```jldoctest julia> f(x::Real) = x^2; diff --git a/src/builtins.c b/src/builtins.c index b129cca0ee71d..c6b0bf130550b 100644 --- a/src/builtins.c +++ b/src/builtins.c @@ -931,22 +931,27 @@ JL_CALLABLE(jl_f__call_in_world_total) // tuples --------------------------------------------------------------------- -JL_CALLABLE(jl_f_tuple) +static jl_value_t *arg_tuple(jl_value_t *a1, jl_value_t **args, size_t nargs) { size_t i; - if (nargs == 0) - return (jl_value_t*)jl_emptytuple; - jl_datatype_t *tt = jl_inst_arg_tuple_type(args[0], &args[1], nargs, 0); + jl_datatype_t *tt = jl_inst_arg_tuple_type(a1, args, nargs, 0); JL_GC_PROMISE_ROOTED(tt); // it is a concrete type if (tt->instance != NULL) return tt->instance; jl_task_t *ct = jl_current_task; jl_value_t *jv = jl_gc_alloc(ct->ptls, jl_datatype_size(tt), tt); for (i = 0; i < nargs; i++) - set_nth_field(tt, jv, i, args[i], 0); + set_nth_field(tt, jv, i, i == 0 ? a1 : args[i - 1], 0); return jv; } +JL_CALLABLE(jl_f_tuple) +{ + if (nargs == 0) + return (jl_value_t*)jl_emptytuple; + return arg_tuple(args[0], &args[1], nargs); +} + JL_CALLABLE(jl_f_svec) { size_t i; @@ -1577,14 +1582,17 @@ JL_CALLABLE(jl_f_invoke) { JL_NARGSV(invoke, 2); jl_value_t *argtypes = args[1]; - JL_GC_PUSH1(&argtypes); - if (!jl_is_tuple_type(jl_unwrap_unionall(args[1]))) - jl_type_error("invoke", (jl_value_t*)jl_anytuple_type_type, args[1]); + if (jl_is_method(argtypes)) { + jl_method_t *m = (jl_method_t*)argtypes; + if (!jl_tuple1_isa(args[0], &args[2], nargs - 1, (jl_datatype_t*)m->sig)) + jl_type_error("invoke: argument type error", argtypes, arg_tuple(args[0], &args[2], nargs - 1)); + return jl_gf_invoke_by_method(m, args[0], &args[2], nargs - 1); + } + if (!jl_is_tuple_type(jl_unwrap_unionall(argtypes))) + jl_type_error("invoke", (jl_value_t*)jl_anytuple_type_type, argtypes); if (!jl_tuple_isa(&args[2], nargs - 2, (jl_datatype_t*)argtypes)) jl_type_error("invoke: argument type error", argtypes, jl_f_tuple(NULL, &args[2], nargs - 2)); - jl_value_t *res = jl_gf_invoke(argtypes, args[0], &args[2], nargs - 1); - JL_GC_POP(); - return res; + return jl_gf_invoke(argtypes, args[0], &args[2], nargs - 1); } // Expr constructor for internal use ------------------------------------------ diff --git a/test/core.jl b/test/core.jl index 836532d661638..39d02d5d567c9 100644 --- a/test/core.jl +++ b/test/core.jl @@ -8352,3 +8352,10 @@ macro define_call(sym) end @test eval(Expr(:toplevel, :(@define_call(f_macro_defined1)))) == 1 @test @define_call(f_macro_defined2) == 1 + +let m = which(+, (Int, Int)) + @eval f56692(i) = invoke(+, $m, i, 4) + global g56692() = f56692(5) == 9 ? "true" : false +end +@test @inferred(f56692(3)) == 7 +@test @inferred(g56692()) == "true" From 7e2e0aeca207e865c72c42282fb86db949854839 Mon Sep 17 00:00:00 2001 From: Jameson Nash Date: Wed, 27 Nov 2024 22:15:24 -0500 Subject: [PATCH 511/537] Add mechanism to disable creating new worlds (#56639) --- base/experimental.jl | 9 +++ src/gf.c | 152 ++++++++++++++++++++++++++++------------- src/staticdata.c | 32 ++++++--- src/staticdata_utils.c | 142 +++++++++++++++++++++----------------- src/toplevel.c | 2 +- test/misc.jl | 36 +++++++--- 6 files changed, 243 insertions(+), 130 deletions(-) diff --git a/base/experimental.jl b/base/experimental.jl index 31238d4015b3b..411bb2407cdc5 100644 --- a/base/experimental.jl +++ b/base/experimental.jl @@ -494,4 +494,13 @@ function entrypoint(@nospecialize(argt::Type)) nothing end +""" + Base.Experimental.disable_new_worlds() + +Mark that no new worlds (methods additions, deletions, etc) are permitted to be created at +any future time, allowing for lower latencies for some operations and slightly lower memory +usage, by eliminating the tracking of those possible invalidation. +""" +disable_new_worlds() = ccall(:jl_disable_new_worlds, Cvoid, ()) + end diff --git a/src/gf.c b/src/gf.c index 90b874d614b0c..bbf065a4fac0d 100644 --- a/src/gf.c +++ b/src/gf.c @@ -24,6 +24,7 @@ extern "C" { #endif +_Atomic(int) allow_new_worlds = 1; JL_DLLEXPORT _Atomic(size_t) jl_world_counter = 1; // uses atomic acquire/release jl_mutex_t world_counter_lock; JL_DLLEXPORT size_t jl_get_world_counter(void) JL_NOTSAFEPOINT @@ -1819,38 +1820,42 @@ static void invalidate_backedges(jl_method_instance_t *replaced_mi, size_t max_w // add a backedge from callee to caller JL_DLLEXPORT void jl_method_instance_add_backedge(jl_method_instance_t *callee, jl_value_t *invokesig, jl_code_instance_t *caller) { + if (!jl_atomic_load_relaxed(&allow_new_worlds)) + return; if (invokesig == jl_nothing) invokesig = NULL; // julia uses `nothing` but C uses NULL (#undef) assert(jl_is_method_instance(callee)); assert(jl_is_code_instance(caller)); assert(invokesig == NULL || jl_is_type(invokesig)); JL_LOCK(&callee->def.method->writelock); - int found = 0; - // TODO: use jl_cache_type_(invokesig) like cache_method does to save memory - if (!callee->backedges) { - // lazy-init the backedges array - callee->backedges = jl_alloc_vec_any(0); - jl_gc_wb(callee, callee->backedges); - } - else { - size_t i = 0, l = jl_array_nrows(callee->backedges); - for (i = 0; i < l; i++) { - // optimized version of while (i < l) i = get_next_edge(callee->backedges, i, &invokeTypes, &mi); - jl_value_t *mi = jl_array_ptr_ref(callee->backedges, i); - if (mi != (jl_value_t*)caller) - continue; - jl_value_t *invokeTypes = i > 0 ? jl_array_ptr_ref(callee->backedges, i - 1) : NULL; - if (invokeTypes && jl_is_method_instance(invokeTypes)) - invokeTypes = NULL; - if ((invokesig == NULL && invokeTypes == NULL) || - (invokesig && invokeTypes && jl_types_equal(invokesig, invokeTypes))) { - found = 1; - break; + if (jl_atomic_load_relaxed(&allow_new_worlds)) { + int found = 0; + // TODO: use jl_cache_type_(invokesig) like cache_method does to save memory + if (!callee->backedges) { + // lazy-init the backedges array + callee->backedges = jl_alloc_vec_any(0); + jl_gc_wb(callee, callee->backedges); + } + else { + size_t i = 0, l = jl_array_nrows(callee->backedges); + for (i = 0; i < l; i++) { + // optimized version of while (i < l) i = get_next_edge(callee->backedges, i, &invokeTypes, &mi); + jl_value_t *mi = jl_array_ptr_ref(callee->backedges, i); + if (mi != (jl_value_t*)caller) + continue; + jl_value_t *invokeTypes = i > 0 ? jl_array_ptr_ref(callee->backedges, i - 1) : NULL; + if (invokeTypes && jl_is_method_instance(invokeTypes)) + invokeTypes = NULL; + if ((invokesig == NULL && invokeTypes == NULL) || + (invokesig && invokeTypes && jl_types_equal(invokesig, invokeTypes))) { + found = 1; + break; + } } } + if (!found) + push_edge(callee->backedges, invokesig, caller); } - if (!found) - push_edge(callee->backedges, invokesig, caller); JL_UNLOCK(&callee->def.method->writelock); } @@ -1858,37 +1863,41 @@ JL_DLLEXPORT void jl_method_instance_add_backedge(jl_method_instance_t *callee, JL_DLLEXPORT void jl_method_table_add_backedge(jl_methtable_t *mt, jl_value_t *typ, jl_code_instance_t *caller) { assert(jl_is_code_instance(caller)); + if (!jl_atomic_load_relaxed(&allow_new_worlds)) + return; JL_LOCK(&mt->writelock); - if (!mt->backedges) { - // lazy-init the backedges array - mt->backedges = jl_alloc_vec_any(2); - jl_gc_wb(mt, mt->backedges); - jl_array_ptr_set(mt->backedges, 0, typ); - jl_array_ptr_set(mt->backedges, 1, caller); - } - else { - // check if the edge is already present and avoid adding a duplicate - size_t i, l = jl_array_nrows(mt->backedges); - for (i = 1; i < l; i += 2) { - if (jl_array_ptr_ref(mt->backedges, i) == (jl_value_t*)caller) { - if (jl_types_equal(jl_array_ptr_ref(mt->backedges, i - 1), typ)) { - JL_UNLOCK(&mt->writelock); - return; + if (jl_atomic_load_relaxed(&allow_new_worlds)) { + if (!mt->backedges) { + // lazy-init the backedges array + mt->backedges = jl_alloc_vec_any(2); + jl_gc_wb(mt, mt->backedges); + jl_array_ptr_set(mt->backedges, 0, typ); + jl_array_ptr_set(mt->backedges, 1, caller); + } + else { + // check if the edge is already present and avoid adding a duplicate + size_t i, l = jl_array_nrows(mt->backedges); + for (i = 1; i < l; i += 2) { + if (jl_array_ptr_ref(mt->backedges, i) == (jl_value_t*)caller) { + if (jl_types_equal(jl_array_ptr_ref(mt->backedges, i - 1), typ)) { + JL_UNLOCK(&mt->writelock); + return; + } } } - } - // reuse an already cached instance of this type, if possible - // TODO: use jl_cache_type_(tt) like cache_method does, instead of this linear scan? - for (i = 1; i < l; i += 2) { - if (jl_array_ptr_ref(mt->backedges, i) != (jl_value_t*)caller) { - if (jl_types_equal(jl_array_ptr_ref(mt->backedges, i - 1), typ)) { - typ = jl_array_ptr_ref(mt->backedges, i - 1); - break; + // reuse an already cached instance of this type, if possible + // TODO: use jl_cache_type_(tt) like cache_method does, instead of this linear scan? + for (i = 1; i < l; i += 2) { + if (jl_array_ptr_ref(mt->backedges, i) != (jl_value_t*)caller) { + if (jl_types_equal(jl_array_ptr_ref(mt->backedges, i - 1), typ)) { + typ = jl_array_ptr_ref(mt->backedges, i - 1); + break; + } } } + jl_array_ptr_1d_push(mt->backedges, typ); + jl_array_ptr_1d_push(mt->backedges, (jl_value_t*)caller); } - jl_array_ptr_1d_push(mt->backedges, typ); - jl_array_ptr_1d_push(mt->backedges, (jl_value_t*)caller); } JL_UNLOCK(&mt->writelock); } @@ -2024,10 +2033,55 @@ static void jl_method_table_invalidate(jl_methtable_t *mt, jl_method_t *replaced } } +static int erase_method_backedges(jl_typemap_entry_t *def, void *closure) +{ + jl_method_t *method = def->func.method; + JL_LOCK(&method->writelock); + jl_value_t *specializations = jl_atomic_load_relaxed(&method->specializations); + if (jl_is_svec(specializations)) { + size_t i, l = jl_svec_len(specializations); + for (i = 0; i < l; i++) { + jl_method_instance_t *mi = (jl_method_instance_t*)jl_svecref(specializations, i); + if ((jl_value_t*)mi != jl_nothing) { + mi->backedges = NULL; + } + } + } + else { + jl_method_instance_t *mi = (jl_method_instance_t*)specializations; + mi->backedges = NULL; + } + JL_UNLOCK(&method->writelock); + return 1; +} + +static int erase_all_backedges(jl_methtable_t *mt, void *env) +{ + // removes all method caches + // this might not be entirely safe (GC or MT), thus we only do it very early in bootstrapping + JL_LOCK(&mt->writelock); + mt->backedges = NULL; + JL_UNLOCK(&mt->writelock); + jl_typemap_visitor(jl_atomic_load_relaxed(&mt->defs), erase_method_backedges, env); + return 1; +} + +JL_DLLEXPORT void jl_disable_new_worlds(void) +{ + if (jl_generating_output()) + jl_error("Disabling Method changes is not possible when generating output."); + JL_LOCK(&world_counter_lock); + jl_atomic_store_relaxed(&allow_new_worlds, 0); + JL_UNLOCK(&world_counter_lock); + jl_foreach_reachable_mtable(erase_all_backedges, (void*)NULL); +} + JL_DLLEXPORT void jl_method_table_disable(jl_methtable_t *mt, jl_method_t *method) { jl_typemap_entry_t *methodentry = do_typemap_search(mt, method); JL_LOCK(&world_counter_lock); + if (!jl_atomic_load_relaxed(&allow_new_worlds)) + jl_error("Method changes have been disabled via a call to disable_new_worlds."); JL_LOCK(&mt->writelock); // Narrow the world age on the method to make it uncallable size_t world = jl_atomic_load_relaxed(&jl_world_counter); @@ -2341,6 +2395,8 @@ JL_DLLEXPORT void jl_method_table_insert(jl_methtable_t *mt, jl_method_t *method jl_typemap_entry_t *newentry = jl_method_table_add(mt, method, simpletype); JL_GC_PUSH1(&newentry); JL_LOCK(&world_counter_lock); + if (!jl_atomic_load_relaxed(&allow_new_worlds)) + jl_error("Method changes have been disabled via a call to disable_new_worlds."); size_t world = jl_atomic_load_relaxed(&jl_world_counter) + 1; jl_atomic_store_relaxed(&method->primary_world, world); jl_atomic_store_relaxed(&method->deleted_world, ~(size_t)0); diff --git a/src/staticdata.c b/src/staticdata.c index c2e8d7e3956ea..f5fe088ce1c98 100644 --- a/src/staticdata.c +++ b/src/staticdata.c @@ -89,6 +89,7 @@ External links: #include "julia_assert.h" static const size_t WORLD_AGE_REVALIDATION_SENTINEL = 0x1; +size_t jl_require_world = ~(size_t)0; #include "staticdata_utils.c" #include "precompile_utils.c" @@ -2678,7 +2679,6 @@ jl_genericmemory_t *jl_global_roots_list; jl_genericmemory_t *jl_global_roots_keyset; jl_mutex_t global_roots_lock; extern jl_mutex_t world_counter_lock; -extern size_t jl_require_world; jl_mutex_t precompile_field_replace_lock; jl_svec_t *precompile_field_replace JL_GLOBALLY_ROOTED; @@ -4044,16 +4044,30 @@ static jl_value_t *jl_restore_package_image_from_stream(void* pkgimage_handle, i // Add roots to methods jl_copy_roots(method_roots_list, jl_worklist_key((jl_array_t*)restored)); // Insert method extensions and handle edges + int new_methods = jl_array_nrows(extext_methods) > 0; + if (!new_methods) { + size_t i, l = jl_array_nrows(internal_methods); + for (i = 0; i < l; i++) { + jl_value_t *obj = jl_array_ptr_ref(internal_methods, i); + if (jl_is_method(obj)) { + new_methods = 1; + break; + } + } + } JL_LOCK(&world_counter_lock); - // allocate a world for the new methods, and insert them there, invalidating content as needed - size_t world = jl_atomic_load_relaxed(&jl_world_counter) + 1; - jl_activate_methods(extext_methods, internal_methods, world); - // TODO: inject new_ext_cis into caches here, so the system can see them immediately as potential candidates (before validation) - // allow users to start running in this updated world - jl_atomic_store_release(&jl_world_counter, world); - // now permit more methods to be added again + // allocate a world for the new methods, and insert them there, invalidating content as needed + size_t world = jl_atomic_load_relaxed(&jl_world_counter); + if (new_methods) + world += 1; + jl_activate_methods(extext_methods, internal_methods, world, pkgname); + // TODO: inject new_ext_cis into caches here, so the system can see them immediately as potential candidates (before validation) + // allow users to start running in this updated world + if (new_methods) + jl_atomic_store_release(&jl_world_counter, world); + // now permit more methods to be added again JL_UNLOCK(&world_counter_lock); - // but one of those immediate users is going to be our cache insertions + // but one of those immediate users is going to be our cache insertions jl_insert_backedges((jl_array_t*)edges, (jl_array_t*)new_ext_cis); // restore existing caches (needs to be last) // reinit ccallables jl_reinit_ccallable(&ccallable_list, base, pkgimage_handle); diff --git a/src/staticdata_utils.c b/src/staticdata_utils.c index 32e59d7d7c641..ea0b7216155bd 100644 --- a/src/staticdata_utils.c +++ b/src/staticdata_utils.c @@ -704,10 +704,12 @@ static void jl_add_methods(jl_array_t *external) } } -static void jl_activate_methods(jl_array_t *external, jl_array_t *internal, size_t world) +extern _Atomic(int) allow_new_worlds; +static void jl_activate_methods(jl_array_t *external, jl_array_t *internal, size_t world, const char *pkgname) { size_t i, l = jl_array_nrows(internal); for (i = 0; i < l; i++) { + // allow_new_worlds doesn't matter here, since we aren't actually changing anything external jl_value_t *obj = jl_array_ptr_ref(internal, i); if (jl_typetagis(obj, jl_typemap_entry_type)) { jl_typemap_entry_t *entry = (jl_typemap_entry_t*)obj; @@ -735,11 +737,17 @@ static void jl_activate_methods(jl_array_t *external, jl_array_t *internal, size } } l = jl_array_nrows(external); - for (i = 0; i < l; i++) { - jl_typemap_entry_t *entry = (jl_typemap_entry_t*)jl_array_ptr_ref(external, i); - jl_methtable_t *mt = jl_method_get_table(entry->func.method); - assert((jl_value_t*)mt != jl_nothing); - jl_method_table_activate(mt, entry); + if (l) { + if (!jl_atomic_load_relaxed(&allow_new_worlds)) { + jl_printf(JL_STDERR, "WARNING: Method changes for %s have been disabled via a call to disable_new_worlds.\n", pkgname); + return; + } + for (i = 0; i < l; i++) { + jl_typemap_entry_t *entry = (jl_typemap_entry_t*)jl_array_ptr_ref(external, i); + jl_methtable_t *mt = jl_method_get_table(entry->func.method); + assert((jl_value_t*)mt != jl_nothing); + jl_method_table_activate(mt, entry); + } } } @@ -864,71 +872,80 @@ static int jl_verify_method(jl_code_instance_t *codeinst, size_t *minworld, size size_t depth = stack->len; *bp = (char*)HT_NOTFOUND + depth; JL_TIMING(VERIFY_IMAGE, VERIFY_Methods); - jl_value_t *loctag = NULL; - jl_value_t *sig = NULL; - jl_value_t *matches = NULL; - JL_GC_PUSH3(&loctag, &matches, &sig); jl_svec_t *callees = jl_atomic_load_relaxed(&codeinst->edges); assert(jl_is_svec((jl_value_t*)callees)); // verify current edges - for (size_t j = 0; j < jl_svec_len(callees); ) { - jl_value_t *edge = jl_svecref(callees, j); - size_t min_valid2; - size_t max_valid2; - assert(!jl_is_method(edge)); // `Method`-edge isn't allowed for the optimized one-edge format - if (jl_is_code_instance(edge)) - edge = (jl_value_t*)((jl_code_instance_t*)edge)->def; - if (jl_is_method_instance(edge)) { - jl_method_instance_t *mi = (jl_method_instance_t*)edge; - sig = jl_type_intersection(mi->def.method->sig, (jl_value_t*)mi->specTypes); // TODO: ?? - verify_call(sig, callees, j, 1, world, &min_valid2, &max_valid2, &matches); - sig = NULL; - j += 1; - } - else if (jl_is_long(edge)) { - jl_value_t *sig = jl_svecref(callees, j + 1); - size_t nedges = jl_unbox_long(edge); - verify_call(sig, callees, j + 2, nedges, world, &min_valid2, &max_valid2, &matches); - j += 2 + nedges; - edge = sig; - } - else { - jl_method_instance_t *callee = (jl_method_instance_t*)jl_svecref(callees, j + 1); - jl_method_t *meth; - if (jl_is_mtable(callee)) { - // skip the legacy edge (missing backedge) - j += 2; - continue; + if (callees == jl_emptysvec) { + // quick return: no edges to verify (though we probably shouldn't have gotten here from WORLD_AGE_REVALIDATION_SENTINEL) + } + else if (*maxworld == jl_require_world) { + // if no new worlds were allocated since serializing the base module, then no new validation is worth doing right now either + *minworld = *maxworld; + } + else { + jl_value_t *loctag = NULL; + jl_value_t *sig = NULL; + jl_value_t *matches = NULL; + JL_GC_PUSH3(&loctag, &matches, &sig); + for (size_t j = 0; j < jl_svec_len(callees); ) { + jl_value_t *edge = jl_svecref(callees, j); + size_t min_valid2; + size_t max_valid2; + assert(!jl_is_method(edge)); // `Method`-edge isn't allowed for the optimized one-edge format + if (jl_is_code_instance(edge)) + edge = (jl_value_t*)((jl_code_instance_t*)edge)->def; + if (jl_is_method_instance(edge)) { + jl_method_instance_t *mi = (jl_method_instance_t*)edge; + sig = jl_type_intersection(mi->def.method->sig, (jl_value_t*)mi->specTypes); // TODO: ?? + verify_call(sig, callees, j, 1, world, &min_valid2, &max_valid2, &matches); + sig = NULL; + j += 1; } - if (jl_is_code_instance(callee)) - callee = ((jl_code_instance_t*)callee)->def; - if (jl_is_method_instance(callee)) { - meth = callee->def.method; + else if (jl_is_long(edge)) { + jl_value_t *sig = jl_svecref(callees, j + 1); + size_t nedges = jl_unbox_long(edge); + verify_call(sig, callees, j + 2, nedges, world, &min_valid2, &max_valid2, &matches); + j += 2 + nedges; + edge = sig; } else { - assert(jl_is_method(callee)); - meth = (jl_method_t*)callee; + jl_method_instance_t *callee = (jl_method_instance_t*)jl_svecref(callees, j + 1); + jl_method_t *meth; + if (jl_is_mtable(callee)) { + // skip the legacy edge (missing backedge) + j += 2; + continue; + } + if (jl_is_code_instance(callee)) + callee = ((jl_code_instance_t*)callee)->def; + if (jl_is_method_instance(callee)) { + meth = callee->def.method; + } + else { + assert(jl_is_method(callee)); + meth = (jl_method_t*)callee; + } + verify_invokesig(edge, meth, world, &min_valid2, &max_valid2); + j += 2; } - verify_invokesig(edge, meth, world, &min_valid2, &max_valid2); - j += 2; - } - if (*minworld < min_valid2) - *minworld = min_valid2; - if (*maxworld > max_valid2) - *maxworld = max_valid2; - if (max_valid2 != ~(size_t)0 && _jl_debug_method_invalidation) { - jl_array_ptr_1d_push(_jl_debug_method_invalidation, edge); - loctag = jl_cstr_to_string("insert_backedges_callee"); - jl_array_ptr_1d_push(_jl_debug_method_invalidation, loctag); - jl_array_ptr_1d_push(_jl_debug_method_invalidation, (jl_value_t*)codeinst); - jl_array_ptr_1d_push(_jl_debug_method_invalidation, matches); + if (*minworld < min_valid2) + *minworld = min_valid2; + if (*maxworld > max_valid2) + *maxworld = max_valid2; + if (max_valid2 != ~(size_t)0 && _jl_debug_method_invalidation) { + jl_array_ptr_1d_push(_jl_debug_method_invalidation, edge); + loctag = jl_cstr_to_string("insert_backedges_callee"); + jl_array_ptr_1d_push(_jl_debug_method_invalidation, loctag); + jl_array_ptr_1d_push(_jl_debug_method_invalidation, (jl_value_t*)codeinst); + jl_array_ptr_1d_push(_jl_debug_method_invalidation, matches); + } + //jl_static_show((JL_STREAM*)ios_stderr, (jl_value_t*)edge); + //ios_puts(max_valid2 == ~(size_t)0 ? "valid\n" : "INVALID\n", ios_stderr); + if (max_valid2 == 0 && !_jl_debug_method_invalidation) + break; } - //jl_static_show((JL_STREAM*)ios_stderr, (jl_value_t*)edge); - //ios_puts(max_valid2 == ~(size_t)0 ? "valid\n" : "INVALID\n", ios_stderr); - if (max_valid2 == 0 && !_jl_debug_method_invalidation) - break; + JL_GC_POP(); } - JL_GC_POP(); // verify recursive edges (if valid, or debugging) size_t cycle = depth; jl_code_instance_t *cause = codeinst; @@ -979,6 +996,7 @@ static int jl_verify_method(jl_code_instance_t *codeinst, size_t *minworld, size assert(*bp == (char*)HT_NOTFOUND + stack->len + 1); *bp = HT_NOTFOUND; if (_jl_debug_method_invalidation && *maxworld < current_world) { + jl_value_t *loctag; jl_array_ptr_1d_push(_jl_debug_method_invalidation, (jl_value_t*)child); loctag = jl_cstr_to_string("verify_methods"); JL_GC_PUSH1(&loctag); diff --git a/src/toplevel.c b/src/toplevel.c index 56a5f21f43661..e8286a584b119 100644 --- a/src/toplevel.c +++ b/src/toplevel.c @@ -496,7 +496,7 @@ static void body_attributes(jl_array_t *body, int *has_ccall, int *has_defs, int *forced_compile = jl_has_meta(body, jl_force_compile_sym); } -size_t jl_require_world = ~(size_t)0; +extern size_t jl_require_world; static jl_module_t *call_require(jl_module_t *mod, jl_sym_t *var) JL_GLOBALLY_ROOTED { JL_TIMING(LOAD_IMAGE, LOAD_Require); diff --git a/test/misc.jl b/test/misc.jl index 7f9992e22a3d7..5bbc2c3c65fa2 100644 --- a/test/misc.jl +++ b/test/misc.jl @@ -1211,10 +1211,7 @@ include("testenv.jl") let flags = Cmd(filter(a->!occursin("depwarn", a), collect(test_exeflags))) local cmd = `$test_exename $flags --depwarn=yes deprecation_exec.jl` - - if !success(pipeline(cmd; stdout=stdout, stderr=stderr)) - error("Deprecation test failed, cmd : $cmd") - end + run(cmd, devnull) end # PR #23664, make sure names don't get added to the default `Main` workspace @@ -1489,7 +1486,7 @@ end # Test that read fault on a prot-none region does not incorrectly give # ReadOnlyMemoryError, but rather crashes the program const MAP_ANONYMOUS_PRIVATE = Sys.isbsd() ? 0x1002 : 0x22 -let script = :( +let script = """ let ptr = Ptr{Cint}(ccall(:jl_mmap, Ptr{Cvoid}, (Ptr{Cvoid}, Csize_t, Cint, Cint, Cint, Int), C_NULL, 16*1024, 0, $MAP_ANONYMOUS_PRIVATE, -1, 0)) @@ -1499,19 +1496,24 @@ let script = :( println(e) end end - ) + """ cmd = if Sys.isunix() # Set the maximum core dump size to 0 to keep this expected crash from # producing a (and potentially overwriting an existing) core dump file - `sh -c "ulimit -c 0; $(Base.shell_escape(Base.julia_cmd())) -e '$script'"` + `sh -c "ulimit -c 0; $(Base.shell_escape(Base.julia_cmd())) -e $(Base.shell_escape(script))"` else - `$(Base.julia_cmd()) -e '$script'` + `$(Base.julia_cmd()) -e $script` + end + p = run(ignorestatus(cmd), devnull, stdout, devnull) + if p.termsignal == 0 + Sys.isunix() ? @test(p.exitcode ∈ (128+7, 128+10, 128+11)) : @test(p.exitcode != 0) # expect SIGBUS (7 on BSDs or 10 on Linux) or SIGSEGV (11) + else + @test(p.termsignal ∈ (7, 10, 11)) end - @test !success(cmd) end # issue #41656 -@test success(`$(Base.julia_cmd()) -e 'isempty(x) = true'`) +run(`$(Base.julia_cmd()) -e 'isempty(x) = true'`) @testset "Base/timing.jl" begin @test Base.jit_total_bytes() >= 0 @@ -1596,3 +1598,17 @@ end end @test !occursin("loop not unrolled", out_err) end + +let errs = IOBuffer() + run(`$(Base.julia_cmd()) -e ' + using Test + @test isdefined(DataType.name.mt, :backedges) + Base.Experimental.disable_new_worlds() + @test_throws "disable_new_worlds" @eval f() = 1 + @test !isdefined(DataType.name.mt, :backedges) + @test_throws "disable_new_worlds" Base.delete_method(which(+, (Int, Int))) + @test 1+1 == 2 + using Dates + '`, devnull, stdout, errs) + @test occursin("disable_new_worlds", String(take!(errs))) +end From 680803c33b45864c14b4f55a1cbda65a6a52b9e0 Mon Sep 17 00:00:00 2001 From: Jeff Bezanson Date: Wed, 27 Nov 2024 22:15:52 -0500 Subject: [PATCH 512/537] get trimming test working again (#56689) --- contrib/juliac.jl | 19 ++++++++----------- src/staticdata.c | 1 + test/trimming/Makefile | 2 +- 3 files changed, 10 insertions(+), 12 deletions(-) diff --git a/contrib/juliac.jl b/contrib/juliac.jl index 0f008976d2b4f..20d56615c6357 100644 --- a/contrib/juliac.jl +++ b/contrib/juliac.jl @@ -4,7 +4,6 @@ cmd = Base.julia_cmd() cmd = `$cmd --startup-file=no --history-file=no` output_type = nothing # exe, sharedlib, sysimage -trim = nothing outname = nothing file = nothing add_ccallables = false @@ -15,13 +14,16 @@ if help !== nothing println( """ Usage: julia juliac.jl [--output-exe | --output-lib | --output-sysimage] [options] - --trim= Only output code statically determined to be reachable + --experimental --trim= Only output code statically determined to be reachable --compile-ccallable Include all methods marked `@ccallable` in output --verbose Request verbose output """) exit(0) end +# arguments to forward to julia compilation process +julia_args = [] + let i = 1 while i <= length(ARGS) arg = ARGS[i] @@ -31,17 +33,13 @@ let i = 1 i == length(ARGS) && error("Output specifier requires an argument") global outname = ARGS[i+1] i += 1 - elseif startswith(arg, "--trim") - arg = split(arg, '=') - if length(arg) == 1 - global trim = "safe" - else - global trim = arg[2] - end elseif arg == "--compile-ccallable" global add_ccallables = true elseif arg == "--verbose" global verbose = true + elseif startswith(arg, "--trim") || arg == "--experimental" + # forwarded args + push!(julia_args, arg) else if arg[1] == '-' || !isnothing(file) println("Unexpected argument `$arg`") @@ -79,8 +77,7 @@ open(initsrc_path, "w") do io """) end -static_call_graph_arg() = isnothing(trim) ? `` : `--trim=$(trim)` -cmd = addenv(`$cmd --project=$(Base.active_project()) --output-o $img_path --output-incremental=no --strip-ir --strip-metadata $(static_call_graph_arg()) $(joinpath(@__DIR__,"juliac-buildscript.jl")) $absfile $output_type $add_ccallables`, "OPENBLAS_NUM_THREADS" => 1, "JULIA_NUM_THREADS" => 1) +cmd = addenv(`$cmd --project=$(Base.active_project()) --output-o $img_path --output-incremental=no --strip-ir --strip-metadata $julia_args $(joinpath(@__DIR__,"juliac-buildscript.jl")) $absfile $output_type $add_ccallables`, "OPENBLAS_NUM_THREADS" => 1, "JULIA_NUM_THREADS" => 1) verbose && println("Running: $cmd") if !success(pipeline(cmd; stdout, stderr)) println(stderr, "\nFailed to compile $file") diff --git a/src/staticdata.c b/src/staticdata.c index f5fe088ce1c98..78cfa85695076 100644 --- a/src/staticdata.c +++ b/src/staticdata.c @@ -2578,6 +2578,7 @@ static void strip_specializations_(jl_method_instance_t *mi) if (inferred && inferred != jl_nothing) { if (jl_options.strip_ir) { record_field_change((jl_value_t**)&codeinst->inferred, jl_nothing); + record_field_change((jl_value_t**)&codeinst->edges, (jl_value_t*)jl_emptysvec); } else if (jl_options.strip_metadata) { jl_value_t *stripped = strip_codeinfo_meta(mi->def.method, inferred, codeinst); diff --git a/test/trimming/Makefile b/test/trimming/Makefile index c6e105d637013..d2da21eb71a88 100644 --- a/test/trimming/Makefile +++ b/test/trimming/Makefile @@ -33,7 +33,7 @@ LDFLAGS_ADD = -lm $(shell $(JULIA_CONFIG) --ldflags --ldlibs) -ljulia-internal release: hello$(EXE) hello.o: $(SRCDIR)/hello.jl $(BUILDSCRIPT) - $(JULIA) -t 1 -J $(BIN)/../lib/julia/sys.so --startup-file=no --history-file=no --output-o $@ --output-incremental=no --strip-ir --strip-metadata --trim $(BUILDSCRIPT) $(SRCDIR)/hello.jl --output-exe true + $(JULIA) -t 1 -J $(BIN)/../lib/julia/sys.so --startup-file=no --history-file=no --output-o $@ --output-incremental=no --strip-ir --strip-metadata --experimental --trim $(BUILDSCRIPT) $(SRCDIR)/hello.jl --output-exe true init.o: $(SRCDIR)/init.c $(CC) -c -o $@ $< $(CPPFLAGS_ADD) $(CPPFLAGS) $(CFLAGS_ADD) $(CFLAGS) From 60f0057e03dbf5158c29c8fe6b4bc8e59fbde735 Mon Sep 17 00:00:00 2001 From: CY Han Date: Thu, 28 Nov 2024 11:19:15 +0800 Subject: [PATCH 513/537] doc: fix docstring for `Dates.format` (#56682) --- stdlib/Dates/src/io.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stdlib/Dates/src/io.jl b/stdlib/Dates/src/io.jl index 388edb693d76f..aa7019566093c 100644 --- a/stdlib/Dates/src/io.jl +++ b/stdlib/Dates/src/io.jl @@ -713,7 +713,7 @@ except that it does not truncate values longer than the width. When creating a `format` you can use any non-code characters as a separator. For example to generate the string "1996-01-15T00:00:00" you could use `format`: "yyyy-mm-ddTHH:MM:SS". Note that if you need to use a code character as a literal you can use the escape character -backslash. The string "1996y01m" can be produced with the format "yyyy\\ymm\\m". +backslash. The string "1996y01m" can be produced with the format raw"yyyy\\ymm\\m". """ function format(dt::TimeType, f::AbstractString; locale::Locale=ENGLISH) format(dt, DateFormat(f, locale)) From f5dc26b9d30ab4dcd52e0ff29e6b469f1f0c007c Mon Sep 17 00:00:00 2001 From: Cody Tapscott <84105208+topolarity@users.noreply.github.com> Date: Wed, 27 Nov 2024 22:24:22 -0500 Subject: [PATCH 514/537] Make a failed extension load throw an error during pre-compilation (#56668) Co-authored-by: Ian Butterworth --- base/loading.jl | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/base/loading.jl b/base/loading.jl index 0a70564077692..8ed43e4539c20 100644 --- a/base/loading.jl +++ b/base/loading.jl @@ -1587,9 +1587,14 @@ function run_extension_callbacks(extid::ExtensionId) true catch # Try to continue loading if loading an extension errors - errs = current_exceptions() - @error "Error during loading of extension $(extid.id.name) of $(extid.parentid.name), \ + if JLOptions().incremental != 0 + # during incremental precompilation, this should be fail-fast + rethrow() + else + errs = current_exceptions() + @error "Error during loading of extension $(extid.id.name) of $(extid.parentid.name), \ use `Base.retry_load_extensions()` to retry." exception=errs + end false finally global loading_extension = false From 9162b141b7638ffe6db577ae4a91761fc33eb0a5 Mon Sep 17 00:00:00 2001 From: Tim Besard Date: Thu, 28 Nov 2024 08:51:16 +0100 Subject: [PATCH 515/537] Remove arraylist_t from external native code APIs. (#56693) This makes them usable for external consumers like GPUCompiler.jl. --- src/aotcompile.cpp | 56 ++++++++++++++++++++++++++++++-------------- src/julia_internal.h | 8 ++++--- src/staticdata.c | 18 ++++++++++---- 3 files changed, 58 insertions(+), 24 deletions(-) diff --git a/src/aotcompile.cpp b/src/aotcompile.cpp index 6af5227aafd92..7b3c771f9dc12 100644 --- a/src/aotcompile.cpp +++ b/src/aotcompile.cpp @@ -95,33 +95,55 @@ void jl_get_function_id_impl(void *native_code, jl_code_instance_t *codeinst, } } -extern "C" JL_DLLEXPORT_CODEGEN -void jl_get_llvm_mis_impl(void *native_code, arraylist_t* MIs) +extern "C" JL_DLLEXPORT_CODEGEN void +jl_get_llvm_mis_impl(void *native_code, size_t *num_elements, jl_method_instance_t **data) { - jl_native_code_desc_t *data = (jl_native_code_desc_t*)native_code; - auto map = data->jl_fvar_map; + jl_native_code_desc_t *desc = (jl_native_code_desc_t *)native_code; + auto &map = desc->jl_fvar_map; + + if (data == NULL) { + *num_elements = map.size(); + return; + } + + assert(*num_elements == map.size()); + size_t i = 0; for (auto &ci : map) { - jl_method_instance_t *mi = ci.first->def; - arraylist_push(MIs, mi); + data[i++] = ci.first->def; } } -extern "C" JL_DLLEXPORT_CODEGEN -void jl_get_llvm_gvs_impl(void *native_code, arraylist_t *gvs) +extern "C" JL_DLLEXPORT_CODEGEN void jl_get_llvm_gvs_impl(void *native_code, + size_t *num_elements, void **data) { // map a memory location (jl_value_t or jl_binding_t) to a GlobalVariable - jl_native_code_desc_t *data = (jl_native_code_desc_t*)native_code; - arraylist_grow(gvs, data->jl_value_to_llvm.size()); - memcpy(gvs->items, data->jl_value_to_llvm.data(), gvs->len * sizeof(void*)); + jl_native_code_desc_t *desc = (jl_native_code_desc_t *)native_code; + auto &value_map = desc->jl_value_to_llvm; + + if (data == NULL) { + *num_elements = value_map.size(); + return; + } + + assert(*num_elements == value_map.size()); + memcpy(data, value_map.data(), *num_elements * sizeof(void *)); } -extern "C" JL_DLLEXPORT_CODEGEN -void jl_get_llvm_external_fns_impl(void *native_code, arraylist_t *external_fns) +extern "C" JL_DLLEXPORT_CODEGEN void jl_get_llvm_external_fns_impl(void *native_code, + size_t *num_elements, + jl_code_instance_t *data) { - jl_native_code_desc_t *data = (jl_native_code_desc_t*)native_code; - arraylist_grow(external_fns, data->jl_external_to_llvm.size()); - memcpy(external_fns->items, data->jl_external_to_llvm.data(), - external_fns->len * sizeof(jl_code_instance_t*)); + jl_native_code_desc_t *desc = (jl_native_code_desc_t *)native_code; + auto &external_map = desc->jl_external_to_llvm; + + if (data == NULL) { + *num_elements = external_map.size(); + return; + } + + assert(*num_elements == external_map.size()); + memcpy((void *)data, (const void *)external_map.data(), + *num_elements * sizeof(jl_code_instance_t *)); } extern "C" JL_DLLEXPORT_CODEGEN diff --git a/src/julia_internal.h b/src/julia_internal.h index ca3f63b274968..e081c94329deb 100644 --- a/src/julia_internal.h +++ b/src/julia_internal.h @@ -1951,13 +1951,15 @@ JL_DLLIMPORT void *jl_create_native(jl_array_t *methods, LLVMOrcThreadSafeModule JL_DLLIMPORT void jl_dump_native(void *native_code, const char *bc_fname, const char *unopt_bc_fname, const char *obj_fname, const char *asm_fname, ios_t *z, ios_t *s, jl_emission_params_t *params); -JL_DLLIMPORT void jl_get_llvm_gvs(void *native_code, arraylist_t *gvs); -JL_DLLIMPORT void jl_get_llvm_external_fns(void *native_code, arraylist_t *gvs); +JL_DLLIMPORT void jl_get_llvm_gvs(void *native_code, size_t *num_els, void **gvs); +JL_DLLIMPORT void jl_get_llvm_external_fns(void *native_code, size_t *num_els, + jl_code_instance_t *gvs); JL_DLLIMPORT void jl_get_function_id(void *native_code, jl_code_instance_t *ncode, int32_t *func_idx, int32_t *specfunc_idx); JL_DLLIMPORT void jl_register_fptrs(uint64_t image_base, const struct _jl_image_fptrs_t *fptrs, jl_method_instance_t **linfos, size_t n); -JL_DLLIMPORT void jl_get_llvm_mis(void *native_code, arraylist_t* MIs); +JL_DLLIMPORT void jl_get_llvm_mis(void *native_code, size_t *num_els, + jl_method_instance_t *MIs); JL_DLLIMPORT void jl_init_codegen(void); JL_DLLIMPORT void jl_teardown_codegen(void) JL_NOTSAFEPOINT; JL_DLLIMPORT int jl_getFunctionInfo(jl_frame_t **frames, uintptr_t pointer, int skipC, int noInline) JL_NOTSAFEPOINT; diff --git a/src/staticdata.c b/src/staticdata.c index 78cfa85695076..65584c015e228 100644 --- a/src/staticdata.c +++ b/src/staticdata.c @@ -2897,10 +2897,20 @@ static void jl_save_system_image_to_stream(ios_t *f, jl_array_t *mod_array, int en = jl_gc_enable(0); if (native_functions) { - jl_get_llvm_gvs(native_functions, &gvars); - jl_get_llvm_external_fns(native_functions, &external_fns); - if (jl_options.trim) - jl_get_llvm_mis(native_functions, &MIs); + size_t num_gvars, num_external_fns; + jl_get_llvm_gvs(native_functions, &num_gvars, NULL); + arraylist_grow(&gvars, num_gvars); + jl_get_llvm_gvs(native_functions, &num_gvars, gvars.items); + jl_get_llvm_external_fns(native_functions, &num_external_fns, NULL); + arraylist_grow(&external_fns, num_external_fns); + jl_get_llvm_external_fns(native_functions, &num_external_fns, + (jl_code_instance_t *)external_fns.items); + if (jl_options.trim) { + size_t num_mis; + jl_get_llvm_mis(native_functions, &num_mis, NULL); + arraylist_grow(&MIs, num_mis); + jl_get_llvm_mis(native_functions, &num_mis, (jl_method_instance_t *)MIs.items); + } } if (jl_options.trim) { jl_rebuild_methtables(&MIs, &new_methtables); From 447dc2d8e9a71669c6a3f62ff8ac71054c811a94 Mon Sep 17 00:00:00 2001 From: Jameson Nash Date: Thu, 28 Nov 2024 07:34:29 -0500 Subject: [PATCH 516/537] fix world_age_at_entry in codegen (#56700) --- src/ccall.cpp | 18 ++++-------- src/codegen.cpp | 73 ++++++++++++++++++++++++++----------------------- src/toplevel.c | 5 +--- 3 files changed, 46 insertions(+), 50 deletions(-) diff --git a/src/ccall.cpp b/src/ccall.cpp index 952625a71287b..707203bd13506 100644 --- a/src/ccall.cpp +++ b/src/ccall.cpp @@ -1712,18 +1712,12 @@ static jl_cgval_t emit_ccall(jl_codectx_t &ctx, jl_value_t **args, size_t nargs) return ghostValue(ctx, jl_nothing_type); } else if (is_libjulia_func(jl_get_tls_world_age)) { - bool toplevel = !(ctx.linfo && jl_is_method(ctx.linfo->def.method)); - if (!toplevel) { // top level code does not see a stable world age during execution - ++CCALL_STAT(jl_get_tls_world_age); - assert(lrt == ctx.types().T_size); - assert(!isVa && !llvmcall && nccallargs == 0); - JL_GC_POP(); - Instruction *world_age = cast(ctx.world_age_at_entry); - setName(ctx.emission_context, world_age, "task_world_age"); - jl_aliasinfo_t ai = jl_aliasinfo_t::fromTBAA(ctx, ctx.tbaa().tbaa_gcframe); - ai.decorateInst(world_age); - return mark_or_box_ccall_result(ctx, world_age, retboxed, rt, unionall, static_rt); - } + ++CCALL_STAT(jl_get_tls_world_age); + assert(lrt == ctx.types().T_size); + assert(!isVa && !llvmcall && nccallargs == 0); + JL_GC_POP(); + Value *world_age = get_tls_world_age(ctx); + return mark_or_box_ccall_result(ctx, world_age, retboxed, rt, unionall, static_rt); } else if (is_libjulia_func(jl_get_world_counter)) { ++CCALL_STAT(jl_get_world_counter); diff --git a/src/codegen.cpp b/src/codegen.cpp index 3645a0b25827e..ebb96837f4db5 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -1985,7 +1985,7 @@ class jl_codectx_t { Value *pgcstack = NULL; Instruction *topalloca = NULL; - Value *world_age_at_entry = NULL; // Not valid to use in toplevel code + Value *world_age_at_entry = NULL; bool use_cache = false; bool external_linkage = false; @@ -2115,6 +2115,7 @@ static jl_cgval_t emit_sparam(jl_codectx_t &ctx, size_t i); static Value *emit_condition(jl_codectx_t &ctx, const jl_cgval_t &condV, const Twine &msg); static Value *get_current_task(jl_codectx_t &ctx); static Value *get_current_ptls(jl_codectx_t &ctx); +static Value *get_tls_world_age(jl_codectx_t &ctx); static Value *get_scope_field(jl_codectx_t &ctx); static Value *get_tls_world_age_field(jl_codectx_t &ctx); static void CreateTrap(IRBuilder<> &irbuilder, bool create_new_block = true); @@ -7044,11 +7045,7 @@ static jl_cgval_t emit_expr(jl_codectx_t &ctx, jl_value_t *expr, ssize_t ssaidx_ std::tie(F, specF) = get_oc_function(ctx, (jl_method_t*)source.constant, (jl_tupletype_t*)env_t, argt_typ, ub.constant); if (F) { jl_cgval_t jlcall_ptr = mark_julia_type(ctx, F, false, jl_voidpointer_type); - jl_aliasinfo_t ai = jl_aliasinfo_t::fromTBAA(ctx, ctx.tbaa().tbaa_gcframe); - bool not_toplevel = (ctx.linfo && jl_is_method(ctx.linfo->def.method)); - Instruction *I = not_toplevel ? cast(ctx.world_age_at_entry) : - ctx.builder.CreateAlignedLoad(ctx.types().T_size, get_tls_world_age_field(ctx), ctx.types().alignof_ptr); - jl_cgval_t world_age = mark_julia_type(ctx, ai.decorateInst(I), false, jl_long_type); + jl_cgval_t world_age = mark_julia_type(ctx, get_tls_world_age(ctx), false, jl_long_type); jl_cgval_t fptr; if (specF) fptr = mark_julia_type(ctx, specF, false, jl_voidpointer_type); @@ -7207,6 +7204,25 @@ static Value *get_tls_world_age_field(jl_codectx_t &ctx) return emit_ptrgep(ctx, ct, offsetof(jl_task_t, world_age), "world_age"); } +// Get the value of the world age of the current task +static Value *get_tls_world_age(jl_codectx_t &ctx) +{ + if (ctx.world_age_at_entry) + return ctx.world_age_at_entry; + IRBuilderBase::InsertPointGuard IP(ctx.builder); + bool toplevel = !jl_is_method(ctx.linfo->def.method); + if (!toplevel) { + ctx.builder.SetInsertPoint(ctx.topalloca->getParent(), ++ctx.topalloca->getIterator()); + ctx.builder.SetCurrentDebugLocation(ctx.topalloca->getStableDebugLoc()); + } + jl_aliasinfo_t ai = jl_aliasinfo_t::fromTBAA(ctx, ctx.tbaa().tbaa_gcframe); + auto *world = ctx.builder.CreateAlignedLoad(ctx.types().T_size, get_tls_world_age_field(ctx), ctx.types().alignof_ptr); + ai.decorateInst(world); + if (!toplevel) + ctx.world_age_at_entry = world; + return world; +} + static Value *get_scope_field(jl_codectx_t &ctx) { Value *ct = get_current_task(ctx); @@ -7524,9 +7540,8 @@ static Function* gen_cfun_wrapper( auto world_age_field = get_tls_world_age_field(ctx); jl_aliasinfo_t ai = jl_aliasinfo_t::fromTBAA(ctx, ctx.tbaa().tbaa_gcframe); - Value *last_age = ai.decorateInst( + ctx.world_age_at_entry = ai.decorateInst( ctx.builder.CreateAlignedLoad(ctx.types().T_size, world_age_field, ctx.types().alignof_ptr)); - ctx.world_age_at_entry = last_age; Value *world_v = ctx.builder.CreateAlignedLoad(ctx.types().T_size, prepare_global_in(jl_Module, jlgetworld_global), ctx.types().alignof_ptr); cast(world_v)->setOrdering(AtomicOrdering::Acquire); @@ -7808,7 +7823,7 @@ static Function* gen_cfun_wrapper( r = NULL; } - ctx.builder.CreateStore(last_age, world_age_field); + ctx.builder.CreateStore(ctx.world_age_at_entry, world_age_field); ctx.builder.CreateRet(r); ctx.builder.SetCurrentDebugLocation(noDbg); @@ -8418,7 +8433,6 @@ static jl_llvm_functions_t ctx.source = src; std::map labels; - bool toplevel = false; ctx.module = jl_is_method(lam->def.method) ? lam->def.method->module : lam->def.module; ctx.linfo = lam; ctx.name = TSM.getModuleUnlocked()->getModuleIdentifier().data(); @@ -8438,7 +8452,6 @@ static jl_llvm_functions_t if (vn != jl_unused_sym) ctx.vaSlot = ctx.nargs - 1; } - toplevel = !jl_is_method(lam->def.method); ctx.rettype = jlrettype; ctx.funcName = ctx.name; ctx.spvals_ptr = NULL; @@ -8776,12 +8789,12 @@ static jl_llvm_functions_t // step 6. set up GC frame allocate_gc_frame(ctx, b0); Value *last_age = NULL; - auto world_age_field = get_tls_world_age_field(ctx); - { // scope + Value *world_age_field = NULL; + if (ctx.is_opaque_closure) { + world_age_field = get_tls_world_age_field(ctx); jl_aliasinfo_t ai = jl_aliasinfo_t::fromTBAA(ctx, ctx.tbaa().tbaa_gcframe); last_age = ai.decorateInst(ctx.builder.CreateAlignedLoad( ctx.types().T_size, world_age_field, ctx.types().alignof_ptr)); - ctx.world_age_at_entry = last_age; // Load world age for use in get_tls_world_age } // step 7. allocate local variables slots @@ -9005,6 +9018,7 @@ static jl_llvm_functions_t Value *worldaddr = emit_ptrgep(ctx, oc_this, offsetof(jl_opaque_closure_t, world)); jl_cgval_t closure_world = typed_load(ctx, worldaddr, NULL, (jl_value_t*)jl_long_type, nullptr, nullptr, false, AtomicOrdering::NotAtomic, false, ctx.types().alignof_ptr.value()); + assert(ctx.world_age_at_entry == nullptr); ctx.world_age_at_entry = closure_world.V; // The tls world in a OC is the world of the closure emit_unbox_store(ctx, closure_world, world_age_field, ctx.tbaa().tbaa_gcframe, ctx.types().alignof_ptr); @@ -9282,19 +9296,11 @@ static jl_llvm_functions_t Instruction &prologue_end = ctx.builder.GetInsertBlock()->back(); - // step 11a. For top-level code, load the world age - if (toplevel && !ctx.is_opaque_closure) { - LoadInst *world = ctx.builder.CreateAlignedLoad(ctx.types().T_size, - prepare_global_in(jl_Module, jlgetworld_global), ctx.types().alignof_ptr); - world->setOrdering(AtomicOrdering::Acquire); - ctx.builder.CreateAlignedStore(world, world_age_field, ctx.types().alignof_ptr); - } - - // step 11b. Emit the entry safepoint + // step 11a. Emit the entry safepoint if (JL_FEAT_TEST(ctx, safepoint_on_entry)) emit_gc_safepoint(ctx.builder, ctx.types().T_size, get_current_ptls(ctx), ctx.tbaa().tbaa_const); - // step 11c. Do codegen in control flow order + // step 11b. Do codegen in control flow order SmallVector workstack; std::map BB; std::map come_from_bb; @@ -9966,8 +9972,7 @@ static jl_llvm_functions_t Instruction *root = cast_or_null(ctx.slots[ctx.vaSlot].boxroot); if (root) { bool have_real_use = false; - for (Use &U : root->uses()) { - User *RU = U.getUser(); + for (User *RU : root->users()) { if (StoreInst *SRU = dyn_cast(RU)) { assert(isa(SRU->getValueOperand()) || SRU->getValueOperand() == restTuple); (void)SRU; @@ -9986,21 +9991,21 @@ static jl_llvm_functions_t } } if (!have_real_use) { - Instruction *use = NULL; - for (Use &U : root->uses()) { - if (use) // erase after the iterator moves on - use->eraseFromParent(); - User *RU = U.getUser(); - use = cast(RU); + for (User *RU : make_early_inc_range(root->users())) { + // This is safe because it checked above that each User is known and has at most one Use of root + cast(RU)->eraseFromParent(); } - if (use) - use->eraseFromParent(); root->eraseFromParent(); restTuple->eraseFromParent(); } } } + if (ctx.topalloca->use_empty()) { + ctx.topalloca->eraseFromParent(); + ctx.topalloca = nullptr; + } + // link the dependent llvmcall modules, but switch their function's linkage to internal // so that they don't conflict when they show up in the execution engine. Linker L(*jl_Module); diff --git a/src/toplevel.c b/src/toplevel.c index e8286a584b119..21737119af9a6 100644 --- a/src/toplevel.c +++ b/src/toplevel.c @@ -1050,10 +1050,7 @@ JL_DLLEXPORT jl_value_t *jl_toplevel_eval_flex(jl_module_t *JL_NONNULL m, jl_val // use codegen mfunc = jl_method_instance_for_thunk(thk, m); jl_resolve_globals_in_ir((jl_array_t*)thk->code, m, NULL, 0); - // Don't infer blocks containing e.g. method definitions, since it's probably not - // worthwhile and also unsound (see #24316). - // TODO: This is still not correct since an `eval` can happen elsewhere, but it - // helps in common cases. + // Don't infer blocks containing e.g. method definitions, since it's probably not worthwhile. size_t world = jl_atomic_load_acquire(&jl_world_counter); ct->world_age = world; if (!has_defs && jl_get_module_infer(m) != 0) { From 1ed2b98131cb7074cec5f556f222e8ad023285f9 Mon Sep 17 00:00:00 2001 From: Lilith Orion Hafner Date: Thu, 28 Nov 2024 07:43:03 -0600 Subject: [PATCH 517/537] Fix typo in `@cmd` docstring (#56664) I'm not sure what `` `cmd` `` could refer to, but it would make sense to refer to `` `str` `` in this case. I'm assuming it's a typo. --- base/cmd.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/base/cmd.jl b/base/cmd.jl index 84ec52f865e98..b46c8293cdf3c 100644 --- a/base/cmd.jl +++ b/base/cmd.jl @@ -491,7 +491,7 @@ end """ @cmd str -Similar to `cmd`, generate a `Cmd` from the `str` string which represents the shell command(s) to be executed. +Similar to ``` `str` ```, generate a `Cmd` from the `str` string which represents the shell command(s) to be executed. The [`Cmd`](@ref) object can be run as a process and can outlive the spawning julia process (see `Cmd` for more). # Examples From 5053a175274fc973e7d0479f4ee4452cb2865299 Mon Sep 17 00:00:00 2001 From: Erik Schnetter Date: Thu, 28 Nov 2024 14:40:10 -0500 Subject: [PATCH 518/537] deps/pcre: Update to version 10.44 (#56704) --- deps/checksums/pcre | 72 +++++++++++++++---------------- deps/pcre.version | 2 +- stdlib/PCRE2_jll/Project.toml | 2 +- stdlib/PCRE2_jll/test/runtests.jl | 2 +- 4 files changed, 39 insertions(+), 39 deletions(-) diff --git a/deps/checksums/pcre b/deps/checksums/pcre index 018ffd5201653..0c2732f8cc2b5 100644 --- a/deps/checksums/pcre +++ b/deps/checksums/pcre @@ -1,36 +1,36 @@ -PCRE2.v10.43.0+1.aarch64-apple-darwin.tar.gz/md5/f1bee27b8d9465c14eaf9362701fb795 -PCRE2.v10.43.0+1.aarch64-apple-darwin.tar.gz/sha512/33b8f6e3703f0a52cd2d57897c28e35fb3c63af459296a2fef4e414dc99239617833b2ab176068d6aab690122a34a9ab9b6042dfff54b5a30ad60429a809818d -PCRE2.v10.43.0+1.aarch64-linux-gnu.tar.gz/md5/c55a569260e302f315f4a1bd185346ab -PCRE2.v10.43.0+1.aarch64-linux-gnu.tar.gz/sha512/be4d2883e69d562898a157424b2baa146fe79545a8c10935cf25b54e498ca2c14fae026fa0d958d175895fe2cb695d0f96ef7f09fecbf54e1cee4a55b81a382b -PCRE2.v10.43.0+1.aarch64-linux-musl.tar.gz/md5/fb041ccace415ccc26263968c6435a47 -PCRE2.v10.43.0+1.aarch64-linux-musl.tar.gz/sha512/06672ebe18e0f6bfa1dd2d5c02e10d9fd67236a73fd38ee2e8f4496d98f297f7866760f0be3b9cebeca348a5d748a3719e416b84cec96a90c71eac55afbbd905 -PCRE2.v10.43.0+1.aarch64-unknown-freebsd.tar.gz/md5/8c73fe6faa94102616cfafcc6cc1bf9d -PCRE2.v10.43.0+1.aarch64-unknown-freebsd.tar.gz/sha512/464a892e646fb5aa028d2e96e6f8beaa0c15f0ef56a6ba3388cba4ce85151448b0dfd51357a3e8dea4505957394ffbab14ceb29b9fc73a67e2b2f54dd28a7aed -PCRE2.v10.43.0+1.armv6l-linux-gnueabihf.tar.gz/md5/4f303a4cbf26abb7bf4ffb8bfe3d636d -PCRE2.v10.43.0+1.armv6l-linux-gnueabihf.tar.gz/sha512/dddb3b227ee48d8329f6c65c5d0fce9f460eccaec98594a05bf28d1d9af01397cf7ef86c96e88b0e96030a7f6d8406461f78dd5fa558db8fc8f7bfb3b522ed54 -PCRE2.v10.43.0+1.armv6l-linux-musleabihf.tar.gz/md5/eade1fff90404bf3584fd15b62be0cfa -PCRE2.v10.43.0+1.armv6l-linux-musleabihf.tar.gz/sha512/351f6fa11c39b90fcc4086bd00b1b1126ed92272595f0b745757ca4e7e360c84d244446a871029245c3bcf838b23f42d908f858e44fae7deb9002a36cb76753c -PCRE2.v10.43.0+1.armv7l-linux-gnueabihf.tar.gz/md5/daa0a34b2cf0b71a6f8e1f9456cd4b06 -PCRE2.v10.43.0+1.armv7l-linux-gnueabihf.tar.gz/sha512/ae72956ae7a9a5f315bfc816fdbb500937a170dfea306a28289ec9eac57d883cf2fa5a467ce9406eea80546b632a272c63bbb48b89ebe6d9f69d30366fd84180 -PCRE2.v10.43.0+1.armv7l-linux-musleabihf.tar.gz/md5/90bfb9e4efd7c92a2bb6a1a48fd88ecb -PCRE2.v10.43.0+1.armv7l-linux-musleabihf.tar.gz/sha512/147ac98d82fec4695de0c43c87d3d9242b9c024bc6df7ad7504d17ef6a12a029ed703c4deade0e2b24faf5283d66309f880d62f8c4834f27b2cc8889587d7abe -PCRE2.v10.43.0+1.i686-linux-gnu.tar.gz/md5/6fde649bf449c4122438fff32c0706ab -PCRE2.v10.43.0+1.i686-linux-gnu.tar.gz/sha512/edfaa15490497723c095eaa5df26194637b0606e9dce7b89b400024ef8ac42e21f010bb31c2cee5c735ce82fc8de0c42bf2b35b095a1e70a9a111d3bfba6da64 -PCRE2.v10.43.0+1.i686-linux-musl.tar.gz/md5/73aa8d13cc48338a5071e30b3a899109 -PCRE2.v10.43.0+1.i686-linux-musl.tar.gz/sha512/200e2d3ffd68f49b76c70a5be80cb0ae9703049214674485a2ab24abaaea7aefd6dec2042a14bd48cc52b04379f57322ec1e1788dc8c00896e1074921725d9cc -PCRE2.v10.43.0+1.i686-w64-mingw32.tar.gz/md5/4ddf0f31c97463e5216ed71afc4fb014 -PCRE2.v10.43.0+1.i686-w64-mingw32.tar.gz/sha512/75903d81668a66a5c4d830e31657391d507883943d86245998f224655406dcc6a95ba4f5fad20dcf608a98d6ccf49abe50107993448669b03c42a878d8466611 -PCRE2.v10.43.0+1.powerpc64le-linux-gnu.tar.gz/md5/64cb71080da1c97eba3a440ff53d298c -PCRE2.v10.43.0+1.powerpc64le-linux-gnu.tar.gz/sha512/16348b96a45c7a7d86775cb1d082b4d1c060e5a8acfb37554885d8da0db87430d8a40f834f008a90f4a7b1c07b8329df96836ba0430ecec506a143b7347bb101 -PCRE2.v10.43.0+1.x86_64-apple-darwin.tar.gz/md5/31bbb2485f5e06c3616fb061ffb2f022 -PCRE2.v10.43.0+1.x86_64-apple-darwin.tar.gz/sha512/3284ee63ed1e5631267efacb354a1d90bd1b7db0bc81d7233c9580eee4a9af06093c1c4f240786c34299df89a36a17ed92598fc302074f5a200c56cc96081bf1 -PCRE2.v10.43.0+1.x86_64-linux-gnu.tar.gz/md5/2fb7e0e9bbc32dddf543f4d395b50d3f -PCRE2.v10.43.0+1.x86_64-linux-gnu.tar.gz/sha512/5a533a3a01f817689077377835dc88edf914459ed0df7323f8f4dba602a47fd6af700075feb1f448221366b1cf7e2d717c615a5c506eb4ca2db9c600fd290fb0 -PCRE2.v10.43.0+1.x86_64-linux-musl.tar.gz/md5/b432063c93aa477dd0883428191041f8 -PCRE2.v10.43.0+1.x86_64-linux-musl.tar.gz/sha512/36475e90e29d7324046fe1da669fb37f667245a680df23f3978394964e14eb9bda3fd56703ad62cd56e27a5af77d8b6b9612516457ae803cef0627bd919e4628 -PCRE2.v10.43.0+1.x86_64-unknown-freebsd.tar.gz/md5/6124870a991e70c2ed8a64d8f3258760 -PCRE2.v10.43.0+1.x86_64-unknown-freebsd.tar.gz/sha512/4645a2d05af149467f2e4ce5e48853b57c585d6a5950c70726d04bc71a5d82f50809af141ad98e99671e764ac74965651ecad1c49a849caa8fd077c7f4911c7c -PCRE2.v10.43.0+1.x86_64-w64-mingw32.tar.gz/md5/cc4e9f45471f538c1fefa657ab99b878 -PCRE2.v10.43.0+1.x86_64-w64-mingw32.tar.gz/sha512/eed45e621263cb307b6e8ab42e2c12cf9e1d61ad523760fd721a85765c359b74d580752ca7c3d222e0cba26a74e872a6d43dbf2dbf08e4733a3e709417e48651 -pcre2-10.43.tar.bz2/md5/c8e2043cbc4abb80e76dba323f7c409f -pcre2-10.43.tar.bz2/sha512/8ac1520c32e9e5672404aaf6104e23c9ee5c3c28ad28ff101435599d813cbb20e0491a3fd34e012b4411b3e0366a4c6dfa3f02d093acaa6ff0ab25478bb7ade9 +PCRE2.v10.44.0+0.aarch64-apple-darwin.tar.gz/md5/14de26cfc0f6ff7635fac39e81e81a27 +PCRE2.v10.44.0+0.aarch64-apple-darwin.tar.gz/sha512/45079ecca5f4966a32895fcc63585f1dd60f306dc1cb5c098d42452fcff67f7f6b405c200a15747af4680151bb6a6374832a0119b8ddd743d2ed13d0beaef7c9 +PCRE2.v10.44.0+0.aarch64-linux-gnu.tar.gz/md5/3cf179ed36d37bff698ab81cf3d5797b +PCRE2.v10.44.0+0.aarch64-linux-gnu.tar.gz/sha512/db93e5a5c0c46b5536ed49515682d9bfe1d23f6ba8ae2468289ec8f2160140f39f5606a3c7095f45251f3663d8ccf2d6d7e5e8b1efb21c39bbf9a13b6ec60ef9 +PCRE2.v10.44.0+0.aarch64-linux-musl.tar.gz/md5/02baa415218f581a5ceeb7bf7fc0a090 +PCRE2.v10.44.0+0.aarch64-linux-musl.tar.gz/sha512/1685f37ed8f465ecc2f738fdf65d20bb1806934ff2c50194882282fb6c3900121c61c39210e4c0b89847493bfc3e15bb7b9136b0d968103b47c8662a78b412fe +PCRE2.v10.44.0+0.aarch64-unknown-freebsd.tar.gz/md5/4de065ea59ab4f622b46079df1d9d941 +PCRE2.v10.44.0+0.aarch64-unknown-freebsd.tar.gz/sha512/aa6df9edfb690d155a8b5a9390db7ca11622ac0020174cf070a33a075801bfe43bd4c80b8e28017989a8b7374d39897cdcf72ab0e1962e3e234239975f7ac0b4 +PCRE2.v10.44.0+0.armv6l-linux-gnueabihf.tar.gz/md5/f8a0907fbb20a06507fce849db098c4f +PCRE2.v10.44.0+0.armv6l-linux-gnueabihf.tar.gz/sha512/3f5bcc1742380a31683a81740d55e198d7ec8d8ea5a13d6d0556d6603e4fadbf0dc648093c44e36dd6d3793c52a5e3dae6f2f459c73e3d3b5a005f3395d26772 +PCRE2.v10.44.0+0.armv6l-linux-musleabihf.tar.gz/md5/8854c24183441aa6fd21989c00888904 +PCRE2.v10.44.0+0.armv6l-linux-musleabihf.tar.gz/sha512/a74d9378f071dc4cb021e5171d66cd4ac5de3b348e993fc90d824ce5d2f554f7c8af7af55ec31d874d302aaba7d542b6505cc5963e53656c28026a06a53ed48b +PCRE2.v10.44.0+0.armv7l-linux-gnueabihf.tar.gz/md5/04960309ee7cf69a53e280878d5880ef +PCRE2.v10.44.0+0.armv7l-linux-gnueabihf.tar.gz/sha512/a1644daf036daa3799368598427c87c23bcfdddac55a0d06adca08a2e9d617c893285855af562101b05129d0ed0d84d22f5a8a1703316ecd09aa1752b8330eef +PCRE2.v10.44.0+0.armv7l-linux-musleabihf.tar.gz/md5/1335defc6090be76c509840633f7cdfb +PCRE2.v10.44.0+0.armv7l-linux-musleabihf.tar.gz/sha512/9595052eeae4da413b930b14d7e89359a29220cd9e908325e0b7788c8f4a2feb2134e78a0d8f56007787f0fefadc9de31750db6104bbdd048fa50e1d785c2a8c +PCRE2.v10.44.0+0.i686-linux-gnu.tar.gz/md5/e2d6be1d19566c965c2afeb995aba52f +PCRE2.v10.44.0+0.i686-linux-gnu.tar.gz/sha512/4a9d981bb6aa9150b670db7c5d4d188c8391fcb2a16bc710ede7a84bf7ec546fc5fd9096a339720579d25b6dcb5674b2b5b28e9664e5ef589b1a5044ce38b6a7 +PCRE2.v10.44.0+0.i686-linux-musl.tar.gz/md5/23cf857bd3daea4f094fcec48a7712dc +PCRE2.v10.44.0+0.i686-linux-musl.tar.gz/sha512/534f0cfab0cd60db9498eff387f7280a8baaf893a98dd2e7a737e68ba6473ed8236e9da85116eefb9812ec5323c705a00fcaff010b1900f752de8bdff65ef3ad +PCRE2.v10.44.0+0.i686-w64-mingw32.tar.gz/md5/3d05764df2305f16e4ffab60031ad40c +PCRE2.v10.44.0+0.i686-w64-mingw32.tar.gz/sha512/3e21cc6b71849c1a361373de30567990dba13dfd8812e7a7b5e2734b572bf1d45aeb730289d329975e76932c4c40e476824be2ab8e80a40fb7a7e2f46159235a +PCRE2.v10.44.0+0.powerpc64le-linux-gnu.tar.gz/md5/596d7c29d1417ed8959ea3ae3b4df453 +PCRE2.v10.44.0+0.powerpc64le-linux-gnu.tar.gz/sha512/89e03bfd6890150e2c8dddc4e7d024f2e09421c25a3d0fef3b5cd7f6bab7d6402ec1e82b02ecb5d26d01dfa2fb6068d050513894c374b7f2244c8fcbf00d69e2 +PCRE2.v10.44.0+0.x86_64-apple-darwin.tar.gz/md5/18f13c78ff6388c601bd36788e526b31 +PCRE2.v10.44.0+0.x86_64-apple-darwin.tar.gz/sha512/7b43a289f54064fc3c292de98173ec91cde2e49402c99c7848cbdc0e6d90a23a86d41f521e3986fcc8d941ee070d09e29ddc89a4e23009b8e9333e577ae4a09c +PCRE2.v10.44.0+0.x86_64-linux-gnu.tar.gz/md5/9f45feca0955f81ceb898208b9c74e15 +PCRE2.v10.44.0+0.x86_64-linux-gnu.tar.gz/sha512/eac215838306f7b5adb2166c3f620a69ed52fbd752ef3673a887507963a826c305d9b078dbb5236dc9a45eaca0d34f77325aab41703745701a077c84822ec0d0 +PCRE2.v10.44.0+0.x86_64-linux-musl.tar.gz/md5/79f092c6e8e971027ac6c1f0987376fb +PCRE2.v10.44.0+0.x86_64-linux-musl.tar.gz/sha512/2c5655b0f719a7d442c89f1040f2973b03f8becd855a0cfd6c0a985a07b25de351a84e3b9daaebd952b62628db0d937de08a8d05ee4bcace7e72d6b5ce6b8435 +PCRE2.v10.44.0+0.x86_64-unknown-freebsd.tar.gz/md5/a0bc32a099a584d453458a76c892fe47 +PCRE2.v10.44.0+0.x86_64-unknown-freebsd.tar.gz/sha512/6649c1b9e9569a9decccf6ebaa61d44acdb9069208ec796777d8e70a908210f775be2142053f6a5762ebaa321e297f6d8b51db99629766bc702c498b5f772492 +PCRE2.v10.44.0+0.x86_64-w64-mingw32.tar.gz/md5/eeffb6164fba08b0d5c7f50afa081475 +PCRE2.v10.44.0+0.x86_64-w64-mingw32.tar.gz/sha512/f06db992a2070a88559c15224972aeb098d4291a4325970fc0fbbb7cdd539f4a2fd4f90c0de90a34fe454da6c38290f9e0c7fdf2fe8c441f687fe4491d652adc +pcre2-10.44.tar.bz2/md5/9d1fe11e2e919c7b395e3e8f0a5c3eec +pcre2-10.44.tar.bz2/sha512/ee91cc10a2962bc7818b03d368df3dd31f42ea9a7260ae51483ea8cd331b7431e36e63256b0adc213cc6d6741e7c90414fd420622308c0ae3fcb5dd878591be2 diff --git a/deps/pcre.version b/deps/pcre.version index e3ea507376105..681e97e197f51 100644 --- a/deps/pcre.version +++ b/deps/pcre.version @@ -2,4 +2,4 @@ PCRE_JLL_NAME := PCRE2 ## source build -PCRE_VER := 10.43 +PCRE_VER := 10.44 diff --git a/stdlib/PCRE2_jll/Project.toml b/stdlib/PCRE2_jll/Project.toml index ae1fb74922d79..fee83c7ce552c 100644 --- a/stdlib/PCRE2_jll/Project.toml +++ b/stdlib/PCRE2_jll/Project.toml @@ -1,6 +1,6 @@ name = "PCRE2_jll" uuid = "efcefdf7-47ab-520b-bdef-62a2eaa19f15" -version = "10.43.0+1" +version = "10.44.0+0" [deps] Libdl = "8f399da3-3557-5675-b5ff-fb832c97cbdb" diff --git a/stdlib/PCRE2_jll/test/runtests.jl b/stdlib/PCRE2_jll/test/runtests.jl index af0ed9434d2b6..21df2ec430e0e 100644 --- a/stdlib/PCRE2_jll/test/runtests.jl +++ b/stdlib/PCRE2_jll/test/runtests.jl @@ -6,5 +6,5 @@ using Test, Libdl, PCRE2_jll vstr = zeros(UInt8, 32) @test ccall((:pcre2_config_8, libpcre2_8), Cint, (UInt32, Ref{UInt8}), 11, vstr) > 0 vn = VersionNumber(split(unsafe_string(pointer(vstr)), " ")[1]) - @test vn == v"10.43.0" + @test vn == v"10.44.0" end From c1f806dfc168fa2a3dd426f0c003a843bd01aaad Mon Sep 17 00:00:00 2001 From: Jameson Nash Date: Thu, 28 Nov 2024 22:20:40 -0500 Subject: [PATCH 519/537] compile: make more efficient by discarding internal names (#56702) These are not user-visible, so this makes the compiler faster and more efficient with no effort on our part, and avoids duplicating the debug_level parameter. --- src/aotcompile.cpp | 7 +++++-- src/ccall.cpp | 10 ++++++++-- src/codegen.cpp | 27 ++++++++++++--------------- src/jitlayers.cpp | 7 ++++--- src/jitlayers.h | 1 - src/llvm-late-gc-lowering.cpp | 21 ++++++--------------- src/llvm-pass-helpers.cpp | 7 ------- 7 files changed, 35 insertions(+), 45 deletions(-) diff --git a/src/aotcompile.cpp b/src/aotcompile.cpp index 7b3c771f9dc12..1d1e48efc8c6c 100644 --- a/src/aotcompile.cpp +++ b/src/aotcompile.cpp @@ -593,9 +593,10 @@ void *jl_create_native_impl(jl_array_t *methods, LLVMOrcThreadSafeModuleRef llvm }); egal_set method_roots; jl_codegen_params_t params(ctxt, std::move(target_info.first), std::move(target_info.second)); + if (!llvmmod) + params.getContext().setDiscardValueNames(true); params.params = cgparams; params.imaging_mode = imaging; - params.debug_level = cgparams->debug_info_level; params.external_linkage = _external_linkage; params.temporary_roots = jl_alloc_array_1d(jl_array_any_type, 0); JL_GC_PUSH3(¶ms.temporary_roots, &method_roots.list, &method_roots.keyset); @@ -1719,6 +1720,7 @@ static SmallVector add_output(Module &M, TargetMachine &TM, Stri for (unsigned i = 0; i < threads; i++) { std::function func = [&, i]() { LLVMContext ctx; + ctx.setDiscardValueNames(true); #if JL_LLVM_VERSION < 170000 SetOpaquePointer(ctx); #endif @@ -1930,6 +1932,7 @@ void jl_dump_native_impl(void *native_code, if (z) { JL_TIMING(NATIVE_AOT, NATIVE_Sysimg); LLVMContext Context; + Context.setDiscardValueNames(true); #if JL_LLVM_VERSION < 170000 SetOpaquePointer(Context); #endif @@ -2077,6 +2080,7 @@ void jl_dump_native_impl(void *native_code, { JL_TIMING(NATIVE_AOT, NATIVE_Metadata); LLVMContext Context; + Context.setDiscardValueNames(true); #if JL_LLVM_VERSION < 170000 SetOpaquePointer(Context); #endif @@ -2278,7 +2282,6 @@ void jl_get_llvmf_defn_impl(jl_llvmf_dump_t* dump, jl_method_instance_t *mi, jl_ // output.imaging = true; // This would also be nice, but it seems to cause OOMs on the windows32 builder // To get correct names in the IR this needs to be at least 2 - output.debug_level = params.debug_info_level; output.temporary_roots = jl_alloc_array_1d(jl_array_any_type, 0); JL_GC_PUSH1(&output.temporary_roots); auto decls = jl_emit_code(m, mi, src, output); diff --git a/src/ccall.cpp b/src/ccall.cpp index 707203bd13506..52f8f807132e5 100644 --- a/src/ccall.cpp +++ b/src/ccall.cpp @@ -851,6 +851,7 @@ static jl_cgval_t emit_llvmcall(jl_codectx_t &ctx, jl_value_t **args, size_t nar // generate a temporary module that contains our IR std::unique_ptr Mod; + bool shouldDiscardValueNames = ctx.builder.getContext().shouldDiscardValueNames(); Function *f; if (entry == NULL) { // we only have function IR, which we should put in a function @@ -878,7 +879,9 @@ static jl_cgval_t emit_llvmcall(jl_codectx_t &ctx, jl_value_t **args, size_t nar << jl_string_data(ir) << "\n}"; SMDiagnostic Err = SMDiagnostic(); + ctx.builder.getContext().setDiscardValueNames(false); Mod = parseAssemblyString(ir_stream.str(), Err, ctx.builder.getContext()); + ctx.builder.getContext().setDiscardValueNames(shouldDiscardValueNames); // backwards compatibility: support for IR with integer pointers if (!Mod) { @@ -911,8 +914,9 @@ static jl_cgval_t emit_llvmcall(jl_codectx_t &ctx, jl_value_t **args, size_t nar << jl_string_data(ir) << "\n}"; SMDiagnostic Err = SMDiagnostic(); - Mod = - parseAssemblyString(compat_ir_stream.str(), Err, ctx.builder.getContext()); + ctx.builder.getContext().setDiscardValueNames(false); + Mod = parseAssemblyString(compat_ir_stream.str(), Err, ctx.builder.getContext()); + ctx.builder.getContext().setDiscardValueNames(shouldDiscardValueNames); } if (!Mod) { @@ -932,7 +936,9 @@ static jl_cgval_t emit_llvmcall(jl_codectx_t &ctx, jl_value_t **args, size_t nar if (jl_is_string(ir)) { SMDiagnostic Err = SMDiagnostic(); + ctx.builder.getContext().setDiscardValueNames(false); Mod = parseAssemblyString(jl_string_data(ir), Err, ctx.builder.getContext()); + ctx.builder.getContext().setDiscardValueNames(shouldDiscardValueNames); if (!Mod) { std::string message = "Failed to parse LLVM assembly: \n"; raw_string_ostream stream(message); diff --git a/src/codegen.cpp b/src/codegen.cpp index ebb96837f4db5..0b483696567ad 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -167,7 +167,7 @@ void setName(jl_codegen_params_t ¶ms, Value *V, const Twine &Name) // is not checking that setName is only called for non-folded instructions (e.g. folded bitcasts // and 0-byte geps), which can result in information loss on the renamed instruction. assert((isa(V) || isa(V)) && "Should only set names on instructions!"); - if (params.debug_level >= 2 && !isa(V)) { + if (!isa(V)) { V->setName(Name); } } @@ -175,23 +175,21 @@ void setName(jl_codegen_params_t ¶ms, Value *V, const Twine &Name) void maybeSetName(jl_codegen_params_t ¶ms, Value *V, const Twine &Name) { // To be used when we may get an Instruction or something that is not an instruction i.e Constants/Arguments - if (params.debug_level >= 2 && isa(V)) { + if (isa(V)) V->setName(Name); - } } void setName(jl_codegen_params_t ¶ms, Value *V, std::function GetName) { assert((isa(V) || isa(V)) && "Should only set names on instructions!"); - if (params.debug_level >= 2 && !isa(V)) { + if (!params.getContext().shouldDiscardValueNames() && !isa(V)) V->setName(Twine(GetName())); - } } void setNameWithField(jl_codegen_params_t ¶ms, Value *V, std::function GetObjName, jl_datatype_t *jt, unsigned idx, const Twine &suffix) { assert((isa(V) || isa(V)) && "Should only set names on instructions!"); - if (params.debug_level >= 2 && !isa(V)) { + if (!params.getContext().shouldDiscardValueNames() && !isa(V)) { if (jl_is_tuple_type(jt)){ V->setName(Twine(GetObjName()) + "[" + Twine(idx + 1) + "]"+ suffix); return; @@ -8327,7 +8325,7 @@ static jl_returninfo_t get_specsig_function(jl_codectx_t &ctx, Module *M, Value if (f == NULL) { f = Function::Create(ftype, GlobalVariable::ExternalLinkage, name, M); jl_init_function(f, ctx.emission_context.TargetTriple); - if (ctx.emission_context.debug_level >= 2) { + if (ctx.emission_context.params->debug_info_level >= 2) { ios_t sigbuf; ios_mem(&sigbuf, 0); jl_static_show_func_sig((JL_STREAM*) &sigbuf, sig); @@ -8435,7 +8433,7 @@ static jl_llvm_functions_t std::map labels; ctx.module = jl_is_method(lam->def.method) ? lam->def.method->module : lam->def.module; ctx.linfo = lam; - ctx.name = TSM.getModuleUnlocked()->getModuleIdentifier().data(); + ctx.name = name_from_method_instance(lam); size_t nreq = src->nargs; int va = src->isva; ctx.nargs = nreq; @@ -8488,7 +8486,7 @@ static jl_llvm_functions_t // jl_printf(JL_STDERR, "\n*** compiling %s at %s:%d\n\n", // jl_symbol_name(ctx.name), ctx.file.str().c_str(), toplineno); - bool debug_enabled = ctx.emission_context.debug_level != 0; + bool debug_enabled = ctx.emission_context.params->debug_info_level != 0; if (dbgFuncName.empty()) // Should never happen anymore? debug_enabled = false; @@ -8564,7 +8562,6 @@ static jl_llvm_functions_t // allocate Function declarations and wrapper objects //Safe because params holds ctx lock Module *M = TSM.getModuleUnlocked(); - M->addModuleFlag(Module::Warning, "julia.debug_level", ctx.emission_context.debug_level); jl_debugcache_t debugcache; debugcache.initialize(M); jl_returninfo_t returninfo = {}; @@ -8572,7 +8569,7 @@ static jl_llvm_functions_t bool has_sret = false; if (specsig) { // assumes !va and !needsparams SmallVector ArgNames(0); - if (ctx.emission_context.debug_level >= 2) { + if (!M->getContext().shouldDiscardValueNames()) { ArgNames.resize(ctx.nargs, ""); for (int i = 0; i < ctx.nargs; i++) { jl_sym_t *argname = slot_symbol(ctx, i); @@ -8639,7 +8636,7 @@ static jl_llvm_functions_t declarations.functionObject = needsparams ? "jl_fptr_sparam" : "jl_fptr_args"; } - if (ctx.emission_context.debug_level >= 2 && lam->def.method && jl_is_method(lam->def.method) && lam->specTypes != (jl_value_t*)jl_emptytuple_type) { + if (!params.getContext().shouldDiscardValueNames() && ctx.emission_context.params->debug_info_level >= 2 && lam->def.method && jl_is_method(lam->def.method) && lam->specTypes != (jl_value_t*)jl_emptytuple_type) { ios_t sigbuf; ios_mem(&sigbuf, 0); jl_static_show_func_sig((JL_STREAM*) &sigbuf, (jl_value_t*)lam->specTypes); @@ -8694,7 +8691,7 @@ static jl_llvm_functions_t if (debug_enabled) { topfile = dbuilder.createFile(ctx.file, "."); DISubroutineType *subrty; - if (ctx.emission_context.debug_level <= 1) + if (ctx.emission_context.params->debug_info_level <= 1) subrty = debugcache.jl_di_func_null_sig; else if (!specsig) subrty = debugcache.jl_di_func_sig; @@ -8715,7 +8712,7 @@ static jl_llvm_functions_t ); topdebugloc = DILocation::get(ctx.builder.getContext(), toplineno, 0, SP, NULL); f->setSubprogram(SP); - if (ctx.emission_context.debug_level >= 2) { + if (ctx.emission_context.params->debug_info_level >= 2) { const bool AlwaysPreserve = true; // Go over all arguments and local variables and initialize their debug information for (i = 0; i < nreq; i++) { @@ -10161,7 +10158,7 @@ jl_llvm_functions_t jl_emit_codeinst( if (// keep code when keeping everything !(JL_DELETE_NON_INLINEABLE) || // aggressively keep code when debugging level >= 2 - // note that this uses the global jl_options.debug_level, not the local emission_ctx.debug_level + // note that this uses the global jl_options.debug_level, not the local emission_ctx.debug_info_level jl_options.debug_level > 1) { // update the stored code if (inferred != (jl_value_t*)src) { diff --git a/src/jitlayers.cpp b/src/jitlayers.cpp index d7e8ca4a4850a..42ddfb688af39 100644 --- a/src/jitlayers.cpp +++ b/src/jitlayers.cpp @@ -690,9 +690,9 @@ static void jl_emit_codeinst_to_jit( JL_TIMING(CODEINST_COMPILE, CODEINST_COMPILE); // emit the code in LLVM IR form to the new context jl_codegen_params_t params(std::make_unique(), jl_ExecutionEngine->getDataLayout(), jl_ExecutionEngine->getTargetTriple()); // Locks the context + params.getContext().setDiscardValueNames(true); params.cache = true; params.imaging_mode = imaging_default(); - params.debug_level = jl_options.debug_level; orc::ThreadSafeModule result_m = jl_create_ts_module(name_from_method_instance(codeinst->def), params.tsctx, params.DL, params.TargetTriple); params.temporary_roots = jl_alloc_array_1d(jl_array_any_type, 0); @@ -795,9 +795,10 @@ int jl_compile_extern_c_impl(LLVMOrcThreadSafeModuleRef llvmmod, void *p, void * Module *M = into->getModuleUnlocked(); jl_codegen_params_t params(into->getContext(), M->getDataLayout(), Triple(M->getTargetTriple())); params.imaging_mode = imaging_default(); - params.debug_level = jl_options.debug_level; - if (pparams == NULL) + if (pparams == NULL) { + M->getContext().setDiscardValueNames(true); pparams = ¶ms; + } assert(pparams->tsctx.getContext() == into->getContext().getContext()); const char *name = jl_generate_ccallable(wrap(into), sysimg, declrt, sigt, *pparams); if (!sysimg) { diff --git a/src/jitlayers.h b/src/jitlayers.h index d5fa878211200..baba5412226e3 100644 --- a/src/jitlayers.h +++ b/src/jitlayers.h @@ -259,7 +259,6 @@ struct jl_codegen_params_t { bool cache = false; bool external_linkage = false; bool imaging_mode; - int debug_level; bool use_swiftcc = true; jl_codegen_params_t(orc::ThreadSafeContext ctx, DataLayout DL, Triple triple) : tsctx(std::move(ctx)), diff --git a/src/llvm-late-gc-lowering.cpp b/src/llvm-late-gc-lowering.cpp index 3e372ec9884e7..d95cc9c49b698 100644 --- a/src/llvm-late-gc-lowering.cpp +++ b/src/llvm-late-gc-lowering.cpp @@ -1944,28 +1944,19 @@ void LateLowerGCFrame::CleanupWriteBarriers(Function &F, State *S, const SmallVe if (CFGModified) { *CFGModified = true; } - auto DebugInfoMeta = F.getParent()->getModuleFlag("julia.debug_level"); - int debug_info = 1; - if (DebugInfoMeta != nullptr) { - debug_info = cast(cast(DebugInfoMeta)->getValue())->getZExtValue(); - } IRBuilder<> builder(CI); builder.SetCurrentDebugLocation(CI->getDebugLoc()); - auto parBits = builder.CreateAnd(EmitLoadTag(builder, T_size, parent), GC_OLD_MARKED); - setName(parBits, "parent_bits", debug_info); - auto parOldMarked = builder.CreateICmpEQ(parBits, ConstantInt::get(T_size, GC_OLD_MARKED)); - setName(parOldMarked, "parent_old_marked", debug_info); + auto parBits = builder.CreateAnd(EmitLoadTag(builder, T_size, parent), GC_OLD_MARKED, "parent_bits"); + auto parOldMarked = builder.CreateICmpEQ(parBits, ConstantInt::get(T_size, GC_OLD_MARKED), "parent_old_marked"); auto mayTrigTerm = SplitBlockAndInsertIfThen(parOldMarked, CI, false); builder.SetInsertPoint(mayTrigTerm); - setName(mayTrigTerm->getParent(), "may_trigger_wb", debug_info); + mayTrigTerm->getParent()->setName("may_trigger_wb"); Value *anyChldNotMarked = NULL; for (unsigned i = 1; i < CI->arg_size(); i++) { Value *child = CI->getArgOperand(i); - Value *chldBit = builder.CreateAnd(EmitLoadTag(builder, T_size, child), GC_MARKED); - setName(chldBit, "child_bit", debug_info); - Value *chldNotMarked = builder.CreateICmpEQ(chldBit, ConstantInt::get(T_size, 0),"child_not_marked"); - setName(chldNotMarked, "child_not_marked", debug_info); + Value *chldBit = builder.CreateAnd(EmitLoadTag(builder, T_size, child), GC_MARKED, "child_bit"); + Value *chldNotMarked = builder.CreateICmpEQ(chldBit, ConstantInt::get(T_size, 0), "child_not_marked"); anyChldNotMarked = anyChldNotMarked ? builder.CreateOr(anyChldNotMarked, chldNotMarked) : chldNotMarked; } assert(anyChldNotMarked); // handled by all_of test above @@ -1973,7 +1964,7 @@ void LateLowerGCFrame::CleanupWriteBarriers(Function &F, State *S, const SmallVe SmallVector Weights{1, 9}; auto trigTerm = SplitBlockAndInsertIfThen(anyChldNotMarked, mayTrigTerm, false, MDB.createBranchWeights(Weights)); - setName(trigTerm->getParent(), "trigger_wb", debug_info); + trigTerm->getParent()->setName("trigger_wb"); builder.SetInsertPoint(trigTerm); if (CI->getCalledOperand() == write_barrier_func) { builder.CreateCall(getOrDeclare(jl_intrinsics::queueGCRoot), parent); diff --git a/src/llvm-pass-helpers.cpp b/src/llvm-pass-helpers.cpp index cc6c73161968d..4e9e4826b4f75 100644 --- a/src/llvm-pass-helpers.cpp +++ b/src/llvm-pass-helpers.cpp @@ -335,10 +335,3 @@ namespace jl_well_known { return addGCAllocAttributes(allocTypedFunc); }); } - -void setName(llvm::Value *V, const llvm::Twine &Name, int debug_info) -{ - if (debug_info >= 2 && !llvm::isa(V)) { - V->setName(Name); - } -} From d32843b606bdb59957662d267f258d3dc7bf27b5 Mon Sep 17 00:00:00 2001 From: Valentin Churavy Date: Sat, 30 Nov 2024 16:34:42 +0100 Subject: [PATCH 520/537] Automatically enable JITPROFILING with ITTAPI (#55598) This helps when profiling remotely since VTunes doesn't support setting environment variables on remote systems. Will still respect `ENABLE_JITPROFILING=0`. --- src/codegen.cpp | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/src/codegen.cpp b/src/codegen.cpp index 0b483696567ad..b8bed0793730b 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -81,6 +81,10 @@ #include #include +#ifdef USE_ITTAPI +#include "ittapi/ittnotify.h" +#endif + using namespace llvm; static bool jl_fpo_disabled(const Triple &TT) { @@ -10427,8 +10431,16 @@ extern "C" void jl_init_llvm(void) const char *jit_profiling = getenv("ENABLE_JITPROFILING"); #if defined(JL_USE_INTEL_JITEVENTS) - if (jit_profiling && atoi(jit_profiling)) { - jl_using_intel_jitevents = 1; + if (jit_profiling) { + if (atoi(jit_profiling)) { + jl_using_intel_jitevents = 1; + } + } else { +#ifdef USE_ITTAPI + __itt_collection_state state = __itt_get_collection_state(); + jl_using_intel_jitevents = state == __itt_collection_init_successful || + state == __itt_collection_collector_exists; +#endif } #endif From ef328064a96fb143c28e2765f5a84b3ab8c43c87 Mon Sep 17 00:00:00 2001 From: Zentrik Date: Sun, 1 Dec 2024 04:35:39 +0000 Subject: [PATCH 521/537] Fix string handling in jlchecksum (#56720) A `TAGGED_RELEASE_BANNER` with spaces such as `Official https://julialang.org release` produces the error `/cache/build/builder-amdci4-5/julialang/julia-master/deps/tools/jlchecksum: 66: [: Official: unexpected operator`. --- deps/tools/jlchecksum | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deps/tools/jlchecksum b/deps/tools/jlchecksum index 329d3a2a845d4..9945ec89e6bda 100755 --- a/deps/tools/jlchecksum +++ b/deps/tools/jlchecksum @@ -63,7 +63,7 @@ find_checksum() fi done if [ ! -f "$DEPSDIR/checksums/$BASENAME/$CHECKSUM_TYPE" ]; then - if [ ${TAGGED_RELEASE_BANNER:-} ]; then + if [ "${TAGGED_RELEASE_BANNER:-}" ]; then echo "WARNING: $CHECKSUM_TYPE checksum for $BASENAME not found in deps/checksums/, failing release build." >&2 exit 3 fi From ea421126d831b18fa00408794247317b544b18d8 Mon Sep 17 00:00:00 2001 From: Priynsh <119518987+Priynsh@users.noreply.github.com> Date: Mon, 2 Dec 2024 00:29:52 +0530 Subject: [PATCH 522/537] Clarifying ispunct behavior difference between Julia and C in documentation (#56727) Fixes #56680. This PR updates the documentation for the ispunct function in Julia to explicitly note its differing behavior from the similarly named function in C. --------- Co-authored-by: Lilith Orion Hafner --- base/strings/unicode.jl | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/base/strings/unicode.jl b/base/strings/unicode.jl index ad047514c85a6..fcb4a371e9898 100644 --- a/base/strings/unicode.jl +++ b/base/strings/unicode.jl @@ -534,11 +534,17 @@ iscntrl(c::AbstractChar) = c <= '\x1f' || '\x7f' <= c <= '\u9f' Tests whether a character belongs to the Unicode general category Punctuation, i.e. a character whose category code begins with 'P'. +!!! note + This behavior is different from the `ispunct` function in C. + # Examples ```jldoctest julia> ispunct('α') false +julia> ispunct('=') +false + julia> ispunct('/') true From 8ce7d0fce419746e36556d561fe7d1c89704e291 Mon Sep 17 00:00:00 2001 From: Christian Guinard <28689358+christiangnrd@users.noreply.github.com> Date: Mon, 2 Dec 2024 11:13:51 -0400 Subject: [PATCH 523/537] [NEWS.md] Add PR numbers and remove some 1.11 changes that accidentally came back. (#56722) --- NEWS.md | 29 ++++++----------------------- 1 file changed, 6 insertions(+), 23 deletions(-) diff --git a/NEWS.md b/NEWS.md index 61bad831e261c..c1d5f38f337b0 100644 --- a/NEWS.md +++ b/NEWS.md @@ -55,11 +55,11 @@ Command-line option changes --------------------------- * The `-m/--module` flag can be passed to run the `main` function inside a package with a set of arguments. - This `main` function should be declared using `@main` to indicate that it is an entry point. + This `main` function should be declared using `@main` to indicate that it is an entry point. ([#52103]) * Enabling or disabling color text in Julia can now be controlled with the [`NO_COLOR`](https://no-color.org/) or [`FORCE_COLOR`](https://force-color.org/) environment variables. These variables are also honored by Julia's build system ([#53742], [#56346]). -* `--project=@temp` starts Julia with a temporary environment. +* `--project=@temp` starts Julia with a temporary environment. ([#51149]) * New `--trace-compile-timing` option to report how long each method reported by `--trace-compile` took to compile, in ms. ([#54662]) * `--trace-compile` now prints recompiled methods in yellow or with a trailing comment if color is not supported ([#55763]) @@ -72,7 +72,7 @@ Multi-threading changes a `OncePerProcess{T}` type, which allows defining a function that should be run exactly once the first time it is called, and then always return the same result value of type `T` every subsequent time afterwards. There are also `OncePerThread{T}` and `OncePerTask{T}` types for - similar usage with threads or tasks. ([#TBD]) + similar usage with threads or tasks. ([#55793]) Build system changes -------------------- @@ -86,32 +86,15 @@ New library functions * The new `isfull(c::Channel)` function can be used to check if `put!(c, some_value)` will block. ([#53159]) * `waitany(tasks; throw=false)` and `waitall(tasks; failfast=false, throw=false)` which wait multiple tasks at once ([#53341]). * `uuid7()` creates an RFC 9652 compliant UUID with version 7 ([#54834]). -* `insertdims(array; dims)` allows to insert singleton dimensions into an array which is the inverse operation to `dropdims` +* `insertdims(array; dims)` allows to insert singleton dimensions into an array which is the inverse operation to `dropdims`. ([#45793]) * The new `Fix` type is a generalization of `Fix1/Fix2` for fixing a single argument ([#54653]). New library features -------------------- -* `invmod(n, T)` where `T` is a native integer type now computes the modular inverse of `n` in the modular integer ring that `T` defines ([#52180]). -* `invmod(n)` is an abbreviation for `invmod(n, typeof(n))` for native integer types ([#52180]). -* `replace(string, pattern...)` now supports an optional `IO` argument to - write the output to a stream rather than returning a string ([#48625]). -* `sizehint!(s, n)` now supports an optional `shrink` argument to disable shrinking ([#51929]). -* New function `Docs.hasdoc(module, symbol)` tells whether a name has a docstring ([#52139]). -* New function `Docs.undocumented_names(module)` returns a module's undocumented public names ([#52413]). -* Passing an `IOBuffer` as a stdout argument for `Process` spawn now works as - expected, synchronized with `wait` or `success`, so a `Base.BufferStream` is - no longer required there for correctness to avoid data races ([#52461]). -* After a process exits, `closewrite` will no longer be automatically called on - the stream passed to it. Call `wait` on the process instead to ensure the - content is fully written, then call `closewrite` manually to avoid - data-races. Or use the callback form of `open` to have all that handled - automatically. -* `@timed` now additionally returns the elapsed compilation and recompilation time ([#52889]) * `escape_string` takes additional keyword arguments `ascii=true` (to escape all non-ASCII characters) and `fullhex=true` (to require full 4/8-digit hex numbers - for u/U escapes, e.g. for C compatibility) [#55099]). -* `filter` can now act on a `NamedTuple` ([#50795]). + for u/U escapes, e.g. for C compatibility) ([#55099]). * `tempname` can now take a suffix string to allow the file name to include a suffix and include that suffix in the uniquing checking ([#53474]) * `RegexMatch` objects can now be used to construct `NamedTuple`s and `Dict`s ([#50988]) @@ -133,7 +116,7 @@ Standard library changes * A new standard library for applying syntax highlighting to Julia code, this uses `JuliaSyntax` and `StyledStrings` to implement a `highlight` function - that creates an `AnnotatedString` with syntax highlighting applied. + that creates an `AnnotatedString` with syntax highlighting applied. ([#51810]) #### Package Manager From 6a0de347469abe38c970b9efbf0244f2d60806ca Mon Sep 17 00:00:00 2001 From: Jameson Nash Date: Sat, 30 Nov 2024 18:06:26 -0500 Subject: [PATCH 524/537] ircode: cleanup code crud - support gc running - don't duplicate field 4 - remove some unused code only previously needed for handling cycles (which are not valid in IR) --- src/clangsa/GCChecker.cpp | 3 +- src/ircode.c | 399 +++++++++++++++++++++++++------------- src/serialize.h | 69 ------- 3 files changed, 263 insertions(+), 208 deletions(-) diff --git a/src/clangsa/GCChecker.cpp b/src/clangsa/GCChecker.cpp index cac89a6761d01..40093ca15859b 100644 --- a/src/clangsa/GCChecker.cpp +++ b/src/clangsa/GCChecker.cpp @@ -847,7 +847,8 @@ bool GCChecker::isGCTrackedType(QualType QT) { Name.ends_with_insensitive("jl_vararg_t") || Name.ends_with_insensitive("jl_opaque_closure_t") || Name.ends_with_insensitive("jl_globalref_t") || - // Probably not technically true for these, but let's allow it + // Probably not technically true for these, but let's allow it as a root + Name.ends_with_insensitive("jl_ircode_state") || Name.ends_with_insensitive("typemap_intersection_env") || Name.ends_with_insensitive("interpreter_state") || Name.ends_with_insensitive("jl_typeenv_t") || diff --git a/src/ircode.c b/src/ircode.c index bec8d46513eef..d3137ce26edef 100644 --- a/src/ircode.c +++ b/src/ircode.c @@ -10,17 +10,76 @@ #include "julia_internal.h" #include "serialize.h" -#ifndef _OS_WINDOWS_ -#include -#endif - -#include "valgrind.h" #include "julia_assert.h" #ifdef __cplusplus extern "C" { #endif +#define TAG_SYMBOL 2 +#define TAG_SSAVALUE 3 +#define TAG_DATATYPE 4 +#define TAG_SLOTNUMBER 5 +#define TAG_SVEC 6 +// #define TAG_UNUSED 7 +#define TAG_NULL 8 +#define TAG_EXPR 9 +#define TAG_PHINODE 10 +#define TAG_PHICNODE 11 +#define TAG_LONG_SYMBOL 12 +#define TAG_LONG_SVEC 13 +#define TAG_LONG_EXPR 14 +#define TAG_LONG_PHINODE 15 +#define TAG_LONG_PHICNODE 16 +#define TAG_METHODROOT 17 +#define TAG_EDGE 18 +#define TAG_STRING 19 +#define TAG_SHORT_INT64 20 +//#define TAG_UNUSED 21 +#define TAG_CNULL 22 +#define TAG_ARRAY1D 23 +#define TAG_SINGLETON 24 +#define TAG_MODULE 25 +#define TAG_TVAR 26 +#define TAG_METHOD_INSTANCE 27 +#define TAG_METHOD 28 +#define TAG_CODE_INSTANCE 29 +#define TAG_COMMONSYM 30 +#define TAG_NEARBYGLOBAL 31 +#define TAG_GLOBALREF 32 +#define TAG_CORE 33 +#define TAG_BASE 34 +#define TAG_BITYPENAME 35 +#define TAG_NEARBYMODULE 36 +#define TAG_INT32 37 +#define TAG_INT64 38 +#define TAG_UINT8 39 +#define TAG_VECTORTY 40 +#define TAG_PTRTY 41 +#define TAG_LONG_SSAVALUE 42 +#define TAG_LONG_METHODROOT 43 +#define TAG_LONG_EDGE 44 +#define TAG_SHORTER_INT64 45 +#define TAG_SHORT_INT32 46 +#define TAG_CALL1 47 +#define TAG_CALL2 48 +#define TAG_SHORT_BACKREF 49 +#define TAG_BACKREF 50 +#define TAG_UNIONALL 51 +#define TAG_GOTONODE 52 +#define TAG_QUOTENODE 53 +#define TAG_GENERAL 54 +#define TAG_GOTOIFNOT 55 +#define TAG_RETURNNODE 56 +#define TAG_ARGUMENT 57 +#define TAG_RELOC_METHODROOT 58 +#define TAG_BINDING 59 +#define TAG_MEMORYT 60 +#define TAG_ENTERNODE 61 + +#define LAST_TAG 61 + + typedef struct { ios_t *s; // method we're compressing for @@ -38,29 +97,29 @@ static jl_value_t *deser_tag[256]; static htable_t common_symbol_tag; static jl_value_t *deser_symbols[256]; -void *jl_lookup_ser_tag(jl_value_t *v) +static void *jl_lookup_ser_tag(jl_value_t *v) { return ptrhash_get(&ser_tag, v); } -void *jl_lookup_common_symbol(jl_value_t *v) +static void *jl_lookup_common_symbol(jl_value_t *v) { return ptrhash_get(&common_symbol_tag, v); } -jl_value_t *jl_deser_tag(uint8_t tag) +static jl_value_t *jl_deser_tag(uint8_t tag) { return deser_tag[tag]; } -jl_value_t *jl_deser_symbol(uint8_t tag) +static jl_value_t *jl_deser_symbol(uint8_t tag) { return deser_symbols[tag]; } // --- encoding --- -static void jl_encode_value_(jl_ircode_state *s, jl_value_t *v, int as_literal) JL_GC_DISABLED; +static void jl_encode_value_(jl_ircode_state *s, jl_value_t *v, int as_literal); #define jl_encode_value(s, v) jl_encode_value_((s), (jl_value_t*)(v), 0) static void tagged_root(rle_reference *rr, jl_ircode_state *s, int i) @@ -69,7 +128,7 @@ static void tagged_root(rle_reference *rr, jl_ircode_state *s, int i) s->relocatability = 0; } -static void literal_val_id(rle_reference *rr, jl_ircode_state *s, jl_value_t *v) JL_GC_DISABLED +static void literal_val_id(rle_reference *rr, jl_ircode_state *s, jl_value_t *v) { jl_array_t *rs = s->method->roots; int i, l = jl_array_nrows(rs); @@ -142,7 +201,7 @@ static void jl_encode_as_indexed_root(jl_ircode_state *s, jl_value_t *v) } } -static void jl_encode_memory_slice(jl_ircode_state *s, jl_genericmemory_t *mem, size_t offset, size_t len) JL_GC_DISABLED +static void jl_encode_memory_slice(jl_ircode_state *s, jl_genericmemory_t *mem, size_t offset, size_t len) { jl_datatype_t *t = (jl_datatype_t*)jl_typetagof(mem); size_t i; @@ -180,7 +239,7 @@ static void jl_encode_memory_slice(jl_ircode_state *s, jl_genericmemory_t *mem, } } -static void jl_encode_value_(jl_ircode_state *s, jl_value_t *v, int as_literal) JL_GC_DISABLED +static void jl_encode_value_(jl_ircode_state *s, jl_value_t *v, int as_literal) { size_t i; @@ -306,8 +365,11 @@ static void jl_encode_value_(jl_ircode_state *s, jl_value_t *v, int as_literal) } for (i = 0; i < l; i++) { int32_t e = jl_array_data(edges, int32_t)[i]; - if (e <= 20) - jl_encode_value(s, jl_box_int32(e)); + if (e <= 0 && e <= 20) { // 1-byte encodings + jl_value_t *ebox = jl_box_int32(e); + JL_GC_PROMISE_ROOTED(ebox); + jl_encode_value(s, ebox); + } else jl_encode_int32(s, e); } @@ -333,25 +395,39 @@ static void jl_encode_value_(jl_ircode_state *s, jl_value_t *v, int as_literal) } else if (jl_is_gotonode(v)) { write_uint8(s->s, TAG_GOTONODE); - jl_encode_value(s, jl_get_nth_field(v, 0)); + jl_value_t *f = jl_get_nth_field(v, 0); + JL_GC_PUSH1(&f); + jl_encode_value(s, f); + JL_GC_POP(); } else if (jl_is_gotoifnot(v)) { write_uint8(s->s, TAG_GOTOIFNOT); - jl_encode_value(s, jl_get_nth_field(v, 0)); - jl_encode_value(s, jl_get_nth_field(v, 1)); + jl_value_t *f = jl_get_nth_field_noalloc(v, 0); + JL_GC_PUSH1(&f); + jl_encode_value(s, f); + f = jl_get_nth_field(v, 1); + jl_encode_value(s, f); + JL_GC_POP(); } else if (jl_is_enternode(v)) { write_uint8(s->s, TAG_ENTERNODE); - jl_encode_value(s, jl_get_nth_field(v, 0)); - jl_encode_value(s, jl_get_nth_field(v, 1)); + jl_value_t *f = jl_get_nth_field(v, 0); + JL_GC_PUSH1(&f); + jl_encode_value(s, f); + f = jl_get_nth_field_noalloc(v, 1); + jl_encode_value(s, f); + JL_GC_POP(); } else if (jl_is_argument(v)) { write_uint8(s->s, TAG_ARGUMENT); - jl_encode_value(s, jl_get_nth_field(v, 0)); + jl_value_t *f = jl_get_nth_field(v, 0); + JL_GC_PUSH1(&f); + jl_encode_value(s, f); + JL_GC_POP(); } else if (jl_is_returnnode(v)) { write_uint8(s->s, TAG_RETURNNODE); - jl_encode_value(s, jl_get_nth_field(v, 0)); + jl_encode_value(s, jl_returnnode_value(v)); } else if (jl_is_quotenode(v)) { write_uint8(s->s, TAG_QUOTENODE); @@ -394,19 +470,15 @@ static void jl_encode_value_(jl_ircode_state *s, jl_value_t *v, int as_literal) write_int32(s->s, jl_string_len(v)); ios_write(s->s, jl_string_data(v), jl_string_len(v)); } - else if (as_literal && jl_is_array(v)) { + else if (as_literal && jl_is_array(v) && jl_array_ndims(v)) { jl_array_t *ar = (jl_array_t*)v; - if (jl_array_ndims(ar) == 1) { - write_uint8(s->s, TAG_ARRAY1D); - } - else { - write_uint8(s->s, TAG_ARRAY); - write_uint16(s->s, jl_array_ndims(ar)); - } - for (i = 0; i < jl_array_ndims(ar); i++) - jl_encode_value(s, jl_box_long(jl_array_dim(ar, i))); + write_uint8(s->s, TAG_ARRAY1D); + size_t l = jl_array_dim0(ar); + jl_value_t *lbox = jl_box_long(l); + JL_GC_PUSH1(&lbox); + jl_encode_value(s, lbox); + JL_GC_POP(); jl_encode_value(s, jl_typeof(ar)); - size_t l = jl_array_len(ar); const jl_datatype_layout_t *layout = ((jl_datatype_t*)jl_typetagof(ar->ref.mem))->layout; size_t offset; if (layout->flags.arrayelem_isunion || layout->size == 0) @@ -419,7 +491,10 @@ static void jl_encode_value_(jl_ircode_state *s, jl_value_t *v, int as_literal) jl_genericmemory_t* m = (jl_genericmemory_t*)v; write_uint8(s->s, TAG_MEMORYT); jl_encode_value(s, (jl_datatype_t*)jl_typetagof(v)); - jl_encode_value(s, jl_box_long(m->length)); + jl_value_t *lbox = jl_box_long(m->length); + JL_GC_PUSH1(&lbox); + jl_encode_value(s, lbox); + JL_GC_POP(); jl_encode_memory_slice(s, m, 0, m->length); } else if (as_literal && jl_is_layout_opaque(((jl_datatype_t*)jl_typeof(v))->layout)) { @@ -428,16 +503,8 @@ static void jl_encode_value_(jl_ircode_state *s, jl_value_t *v, int as_literal) else if (as_literal || jl_is_uniontype(v) || jl_is_newvarnode(v) || jl_is_linenode(v) || jl_is_upsilonnode(v) || jl_is_pinode(v) || jl_is_slotnumber(v) || jl_is_ssavalue(v) || (jl_isbits(jl_typeof(v)) && jl_datatype_size(jl_typeof(v)) <= 64)) { + write_uint8(s->s, TAG_GENERAL); jl_datatype_t *t = (jl_datatype_t*)jl_typeof(v); - size_t tsz = jl_datatype_size(t); - if (tsz <= 255) { - write_uint8(s->s, TAG_SHORT_GENERAL); - write_uint8(s->s, tsz); - } - else { - write_uint8(s->s, TAG_GENERAL); - write_int32(s->s, tsz); - } jl_encode_value(s, t); char *data = (char*)jl_data_ptr(v); @@ -492,34 +559,35 @@ static jl_code_info_flags_t code_info_flags(uint8_t propagate_inbounds, uint8_t // --- decoding --- -static jl_value_t *jl_decode_value(jl_ircode_state *s) JL_GC_DISABLED; +static jl_value_t *jl_decode_value(jl_ircode_state *s); -static jl_value_t *jl_decode_value_svec(jl_ircode_state *s, uint8_t tag) JL_GC_DISABLED +static jl_value_t *jl_decode_value_svec(jl_ircode_state *s, uint8_t tag) { size_t i, len; if (tag == TAG_SVEC) len = read_uint8(s->s); else len = read_int32(s->s); - jl_svec_t *sv = jl_alloc_svec_uninit(len); - jl_value_t **data = jl_svec_data(sv); - for (i = 0; i < len; i++) { - data[i] = jl_decode_value(s); - } + jl_svec_t *sv = jl_alloc_svec(len); + JL_GC_PUSH1(&sv); + for (i = 0; i < len; i++) + jl_svecset(sv, i, jl_decode_value(s)); + JL_GC_POP(); return (jl_value_t*)sv; } -static jl_value_t *jl_decode_value_memory(jl_ircode_state *s, jl_value_t *mty, size_t nel) JL_GC_DISABLED +static jl_genericmemory_t *jl_decode_value_memory(jl_ircode_state *s, jl_value_t *mty, size_t nel) { jl_genericmemory_t *m = jl_alloc_genericmemory(mty, nel); + JL_GC_PUSH1(&m); const jl_datatype_layout_t *layout = ((jl_datatype_t*)mty)->layout; if (layout->flags.arrayelem_isboxed) { jl_value_t **data = (jl_value_t**)m->ptr; size_t i, numel = m->length; for (i = 0; i < numel; i++) { data[i] = jl_decode_value(s); + jl_gc_wb(m, data[i]); } - assert(jl_astaggedvalue(m)->bits.gc == GC_CLEAN); // gc is disabled } else if (layout->first_ptr >= 0) { size_t i, numel = m->length; @@ -534,49 +602,48 @@ static jl_value_t *jl_decode_value_memory(jl_ircode_state *s, jl_value_t *mty, s if ((char*)fld != start) ios_readall(s->s, start, (const char*)fld - start); *fld = jl_decode_value(s); + jl_gc_wb(m, fld); start = (char*)&fld[1]; } data += elsz; if (data != start) ios_readall(s->s, start, data - start); } - assert(jl_astaggedvalue(m)->bits.gc == GC_CLEAN); // gc is disabled } else { size_t extra = jl_genericmemory_isbitsunion(m) ? m->length : 0; size_t tot = m->length * layout->size + extra; ios_readall(s->s, (char*)m->ptr, tot); } - return (jl_value_t*)m; + JL_GC_POP(); + return m; } JL_DLLEXPORT jl_array_t *jl_alloc_array_nd(jl_value_t *atype, size_t *dims, size_t ndims); -static jl_value_t *jl_decode_value_array(jl_ircode_state *s, uint8_t tag) JL_GC_DISABLED +static jl_value_t *jl_decode_value_array1d(jl_ircode_state *s, uint8_t tag) { - int16_t i, ndims; - if (tag == TAG_ARRAY1D) - ndims = 1; - else - ndims = read_uint16(s->s); - size_t *dims = (size_t*)alloca(ndims * sizeof(size_t)); - size_t len = 1; - for (i = 0; i < ndims; i++) { - dims[i] = jl_unbox_long(jl_decode_value(s)); - len *= dims[i]; - } + int16_t ndims = 1; + size_t dim0 = jl_unbox_long(jl_decode_value(s)); + size_t len = dim0; jl_value_t *aty = jl_decode_value(s); - jl_array_t *a = jl_alloc_array_nd(aty, dims, ndims); - a->ref.mem = (jl_genericmemory_t*)jl_decode_value_memory(s, jl_field_type_concrete((jl_datatype_t*)jl_field_type_concrete((jl_datatype_t*)aty, 0), 1), len); + JL_GC_PROMISE_ROOTED(aty); // (JL_ALWAYS_LEAFTYPE) + jl_genericmemory_t *mem = jl_decode_value_memory(s, jl_field_type_concrete((jl_datatype_t*)jl_field_type_concrete((jl_datatype_t*)aty, 0), 1), len); + JL_GC_PUSH1(&mem); + int tsz = sizeof(jl_array_t) + ndims*sizeof(size_t); + jl_array_t *a = (jl_array_t*)jl_gc_alloc(s->ptls, tsz, aty); + a->ref.mem = mem; const jl_datatype_layout_t *layout = ((jl_datatype_t*)jl_typetagof(a->ref.mem))->layout; if (layout->flags.arrayelem_isunion || layout->size == 0) a->ref.ptr_or_offset = (void*)0; else a->ref.ptr_or_offset = a->ref.mem->ptr; + a->dimsize[0] = dim0; + JL_GC_POP(); return (jl_value_t*)a; } -static jl_value_t *jl_decode_value_expr(jl_ircode_state *s, uint8_t tag) JL_GC_DISABLED +static jl_value_t *jl_decode_value_expr(jl_ircode_state *s, uint8_t tag) { size_t i, len; jl_sym_t *head = NULL; @@ -597,14 +664,18 @@ static jl_value_t *jl_decode_value_expr(jl_ircode_state *s, uint8_t tag) JL_GC_D if (head == NULL) head = (jl_sym_t*)jl_decode_value(s); jl_expr_t *e = jl_exprn(head, len); + JL_GC_PUSH1(&e); jl_value_t **data = jl_array_ptr_data(e->args); + jl_value_t *owner = jl_array_owner(e->args); for (i = 0; i < len; i++) { data[i] = jl_decode_value(s); + jl_gc_wb(owner, data[i]); } + JL_GC_POP(); return (jl_value_t*)e; } -static jl_value_t *jl_decode_value_phi(jl_ircode_state *s, uint8_t tag) JL_GC_DISABLED +static jl_value_t *jl_decode_value_phi(jl_ircode_state *s, uint8_t tag) { size_t i, len_e, len_v; if (tag == TAG_PHINODE) { @@ -614,9 +685,13 @@ static jl_value_t *jl_decode_value_phi(jl_ircode_state *s, uint8_t tag) JL_GC_DI len_e = read_int32(s->s); len_v = read_int32(s->s); } - jl_array_t *e = jl_alloc_array_1d(jl_array_int32_type, len_e); - jl_array_t *v = jl_alloc_vec_any(len_v); - jl_value_t *phi = jl_new_struct(jl_phinode_type, e, v); + jl_array_t *e = NULL; + jl_array_t *v = NULL; + jl_value_t *phi = NULL; + JL_GC_PUSH3(&e, &v, &phi); + e = jl_alloc_array_1d(jl_array_int32_type, len_e); + v = jl_alloc_vec_any(len_v); + phi = jl_new_struct(jl_phinode_type, e, v); int32_t *data_e = jl_array_data(e, int32_t); for (i = 0; i < len_e; i++) { data_e[i] = jl_unbox_int32(jl_decode_value(s)); @@ -624,11 +699,13 @@ static jl_value_t *jl_decode_value_phi(jl_ircode_state *s, uint8_t tag) JL_GC_DI jl_value_t **data_v = jl_array_ptr_data(v); for (i = 0; i < len_v; i++) { data_v[i] = jl_decode_value(s); + jl_gc_wb(jl_array_owner(v), data_v[i]); } + JL_GC_POP(); return phi; } -static jl_value_t *jl_decode_value_phic(jl_ircode_state *s, uint8_t tag) JL_GC_DISABLED +static jl_value_t *jl_decode_value_phic(jl_ircode_state *s, uint8_t tag) { size_t i, len; if (tag == TAG_PHICNODE) @@ -636,41 +713,53 @@ static jl_value_t *jl_decode_value_phic(jl_ircode_state *s, uint8_t tag) JL_GC_D else len = read_int32(s->s); jl_array_t *v = jl_alloc_vec_any(len); - jl_value_t *phic = jl_new_struct(jl_phicnode_type, v); + jl_value_t *phic = (jl_value_t*)v; + JL_GC_PUSH1(&phic); + phic = jl_new_struct(jl_phicnode_type, v); jl_value_t **data = jl_array_ptr_data(v); for (i = 0; i < len; i++) { data[i] = jl_decode_value(s); + jl_gc_wb(jl_array_owner(v), data[i]); } + JL_GC_POP(); return phic; } -static jl_value_t *jl_decode_value_globalref(jl_ircode_state *s) JL_GC_DISABLED +static jl_value_t *jl_decode_value_globalref(jl_ircode_state *s) { - jl_value_t *mod = jl_decode_value(s); - jl_value_t *var = jl_decode_value(s); - return jl_module_globalref((jl_module_t*)mod, (jl_sym_t*)var); + jl_module_t *mod = (jl_module_t*)jl_decode_value(s); + JL_GC_PROMISE_ROOTED(mod); + jl_sym_t *var = (jl_sym_t*)jl_decode_value(s); + JL_GC_PROMISE_ROOTED(var); + return jl_module_globalref(mod, var); } -static jl_value_t *jl_decode_value_any(jl_ircode_state *s, uint8_t tag) JL_GC_DISABLED +static jl_value_t *jl_decode_value_any(jl_ircode_state *s) { - int32_t sz = (tag == TAG_SHORT_GENERAL ? read_uint8(s->s) : read_int32(s->s)); - jl_value_t *v = jl_gc_alloc(s->ptls, sz, NULL); - jl_set_typeof(v, (void*)(intptr_t)0xf50); jl_datatype_t *dt = (jl_datatype_t*)jl_decode_value(s); - if (dt->smalltag) + JL_GC_PROMISE_ROOTED(dt); // (JL_ALWAYS_LEAFTYPE) + // jl_new_struct_uninit + size_t sz = jl_datatype_size(dt); + jl_value_t *v = jl_gc_alloc(s->ptls, sz, dt); + if (dt->smalltag) // TODO: do we need this? jl_set_typetagof(v, dt->smalltag, 0); - else - jl_set_typeof(v, dt); char *data = (char*)jl_data_ptr(v); size_t i, np = dt->layout->npointers; char *start = data; - for (i = 0; i < np; i++) { - uint32_t ptr = jl_ptr_offset(dt, i); - jl_value_t **fld = &((jl_value_t**)data)[ptr]; - if ((char*)fld != start) - ios_readall(s->s, start, (const char*)fld - start); - *fld = jl_decode_value(s); - start = (char*)&fld[1]; + if (np) { + if (sz > 0) + memset(v, 0, sz); + JL_GC_PUSH1(&v); + for (i = 0; i < np; i++) { + uint32_t ptr = jl_ptr_offset(dt, i); + jl_value_t **fld = &((jl_value_t**)data)[ptr]; + if ((char*)fld != start) + ios_readall(s->s, start, (const char*)fld - start); + *fld = jl_decode_value(s); + jl_gc_wb(v, *fld); + start = (char*)&fld[1]; + } + JL_GC_POP(); } data += jl_datatype_size(dt); if (data != start) @@ -678,7 +767,7 @@ static jl_value_t *jl_decode_value_any(jl_ircode_state *s, uint8_t tag) JL_GC_DI return v; } -static jl_value_t *jl_decode_value(jl_ircode_state *s) JL_GC_DISABLED +static jl_value_t *jl_decode_value(jl_ircode_state *s) { assert(!ios_eof(s->s)); jl_value_t *v; @@ -724,10 +813,12 @@ static jl_value_t *jl_decode_value(jl_ircode_state *s) JL_GC_DISABLED case TAG_SLOTNUMBER: v = jl_box_slotnumber(read_uint16(s->s)); return v; - case TAG_ARRAY: JL_FALLTHROUGH; case TAG_ARRAY1D: - return jl_decode_value_array(s, tag); + case TAG_ARRAY1D: + return jl_decode_value_array1d(s, tag); case TAG_MEMORYT: - return jl_decode_value_memory(s, jl_decode_value(s), jl_unbox_long(jl_decode_value(s))); + v = jl_decode_value(s); + JL_GC_PROMISE_ROOTED(v); // (JL_ALWAYS_LEAFTYPE) + return (jl_value_t*)jl_decode_value_memory(s, v, jl_unbox_long(jl_decode_value(s))); case TAG_EXPR: JL_FALLTHROUGH; case TAG_LONG_EXPR: JL_FALLTHROUGH; case TAG_CALL1: JL_FALLTHROUGH; @@ -738,27 +829,47 @@ static jl_value_t *jl_decode_value(jl_ircode_state *s) JL_GC_DISABLED case TAG_PHICNODE: JL_FALLTHROUGH; case TAG_LONG_PHICNODE: return jl_decode_value_phic(s, tag); case TAG_GOTONODE: JL_FALLTHROUGH; case TAG_QUOTENODE: + { v = jl_new_struct_uninit(tag == TAG_GOTONODE ? jl_gotonode_type : jl_quotenode_type); + JL_GC_PUSH1(&v); set_nth_field(tag == TAG_GOTONODE ? jl_gotonode_type : jl_quotenode_type, v, 0, jl_decode_value(s), 0); + JL_GC_POP(); return v; + } case TAG_GOTOIFNOT: + { v = jl_new_struct_uninit(jl_gotoifnot_type); + JL_GC_PUSH1(&v); set_nth_field(jl_gotoifnot_type, v, 0, jl_decode_value(s), 0); set_nth_field(jl_gotoifnot_type, v, 1, jl_decode_value(s), 0); + JL_GC_POP(); return v; + } case TAG_ENTERNODE: + { v = jl_new_struct_uninit(jl_enternode_type); + JL_GC_PUSH1(&v); set_nth_field(jl_enternode_type, v, 0, jl_decode_value(s), 0); set_nth_field(jl_enternode_type, v, 1, jl_decode_value(s), 0); + JL_GC_POP(); return v; + } case TAG_ARGUMENT: + { v = jl_new_struct_uninit(jl_argument_type); + JL_GC_PUSH1(&v); set_nth_field(jl_argument_type, v, 0, jl_decode_value(s), 0); + JL_GC_POP(); return v; + } case TAG_RETURNNODE: + { v = jl_new_struct_uninit(jl_returnnode_type); + JL_GC_PUSH1(&v); set_nth_field(jl_returnnode_type, v, 0, jl_decode_value(s), 0); + JL_GC_POP(); return v; + } case TAG_SHORTER_INT64: v = jl_box_int64((int16_t)read_uint16(s->s)); return v; @@ -777,9 +888,14 @@ static jl_value_t *jl_decode_value(jl_ircode_state *s) JL_GC_DISABLED case TAG_UINT8: return jl_box_uint8(read_uint8(s->s)); case TAG_NEARBYGLOBAL: - assert(s->method != NULL); + { + jl_method_t *m = s->method; + assert(m != NULL); + JL_GC_PROMISE_ROOTED(m); v = jl_decode_value(s); - return jl_module_globalref(s->method->module, (jl_sym_t*)v); + JL_GC_PROMISE_ROOTED(v); // symbol + return jl_module_globalref(m->module, (jl_sym_t*)v); + } case TAG_NEARBYMODULE: assert(s->method != NULL); return (jl_value_t*)s->method->module; @@ -792,19 +908,29 @@ static jl_value_t *jl_decode_value(jl_ircode_state *s) JL_GC_DISABLED case TAG_BASE: return (jl_value_t*)jl_base_module; case TAG_VECTORTY: + { v = jl_decode_value(s); - return jl_apply_type2((jl_value_t*)jl_array_type, v, jl_box_long(1)); + JL_GC_PUSH1(&v); + v = jl_apply_type2((jl_value_t*)jl_array_type, v, jl_box_long(1)); + JL_GC_POP(); + return v; + } case TAG_PTRTY: + { v = jl_decode_value(s); - return jl_apply_type1((jl_value_t*)jl_pointer_type, v); + JL_GC_PUSH1(&v); + v = jl_apply_type1((jl_value_t*)jl_pointer_type, v); + JL_GC_POP(); + return v; + } case TAG_STRING: n = read_int32(s->s); v = jl_alloc_string(n); ios_readall(s->s, jl_string_data(v), n); return v; default: - assert(tag == TAG_GENERAL || tag == TAG_SHORT_GENERAL); - return jl_decode_value_any(s, tag); + assert(tag == TAG_GENERAL); + return jl_decode_value_any(s); } } @@ -880,8 +1006,6 @@ JL_DLLEXPORT jl_string_t *jl_compress_ir(jl_method_t *m, jl_code_info_t *code) assert(jl_array_nrows(code->code) == codelocs_nstmts(code->debuginfo->codelocs) || jl_string_len(code->debuginfo->codelocs) == 0); ios_t dest; ios_mem(&dest, 0); - int en = jl_gc_enable(0); // Might GC - size_t i; if (m->roots == NULL) { m->roots = jl_alloc_vec_any(0); @@ -919,38 +1043,35 @@ JL_DLLEXPORT jl_string_t *jl_compress_ir(jl_method_t *m, jl_code_info_t *code) write_int32(s.s, (int32_t)nargs); } - for (i = 0; i < 5; i++) { - int copy = 1; - if (i == 1) { // skip debuginfo - assert(jl_field_offset(jl_code_info_type, i) == offsetof(jl_code_info_t, debuginfo)); - continue; - } - jl_encode_value_(&s, jl_get_nth_field((jl_value_t*)code, i), copy); - } + jl_encode_value_(&s, (jl_value_t*)code->code, 1); + jl_encode_value_(&s, (jl_value_t*)code->ssavaluetypes, 1); + jl_encode_value_(&s, (jl_value_t*)code->ssaflags, 1); // For opaque closure, also save the slottypes. We technically only need the first slot type, // but this is simpler for now. We may want to refactor where this gets stored in the future. if (m->is_for_opaque_closure) jl_encode_value_(&s, code->slottypes, 1); + jl_string_t *v = NULL; + JL_GC_PUSH1(&v); // Slotnames. For regular methods, we require that m->slot_syms matches the // CodeInfo's slotnames, so we do not need to save it here. - if (m->generator) + if (m->generator) { // can't optimize generated functions - jl_encode_value_(&s, (jl_value_t*)jl_compress_argnames(code->slotnames), 1); - else + v = jl_compress_argnames(code->slotnames); + jl_encode_value_(&s, (jl_value_t*)v, 1); + } + else { jl_encode_value(&s, jl_nothing); + } write_uint8(s.s, s.relocatability); ios_flush(s.s); - jl_string_t *v = jl_pchar_to_string(s.s->buf, s.s->size); + v = jl_pchar_to_string(s.s->buf, s.s->size); ios_close(s.s); - if (jl_array_nrows(m->roots) == 0) { + if (jl_array_nrows(m->roots) == 0) m->roots = NULL; - } - JL_GC_PUSH1(&v); - jl_gc_enable(en); JL_UNLOCK(&m->writelock); // Might GC JL_GC_POP(); @@ -965,12 +1086,10 @@ JL_DLLEXPORT jl_code_info_t *jl_uncompress_ir(jl_method_t *m, jl_code_instance_t JL_LOCK(&m->writelock); // protect the roots array (Might GC) assert(jl_is_method(m)); assert(jl_is_string(data)); - size_t i; ios_t src; ios_mem(&src, 0); ios_setbuf(&src, (char*)jl_string_data(data), jl_string_len(data), 0); src.size = jl_string_len(data); - int en = jl_gc_enable(0); // Might GC jl_ircode_state s = { &src, m, @@ -978,8 +1097,10 @@ JL_DLLEXPORT jl_code_info_t *jl_uncompress_ir(jl_method_t *m, jl_code_instance_t jl_current_task->ptls, 1 }; - jl_code_info_t *code = jl_new_code_info_uninit(); + jl_value_t *slotnames = NULL; + JL_GC_PUSH2(&code, &slotnames); + jl_code_info_flags_t flags; flags.packed = read_uint16(s.s); code->inlining = flags.bits.inlining; @@ -991,9 +1112,9 @@ JL_DLLEXPORT jl_code_info_t *jl_uncompress_ir(jl_method_t *m, jl_code_instance_t code->purity.bits = read_uint16(s.s); code->inlining_cost = read_uint16(s.s); - size_t nslots = read_int32(s.s); code->slotflags = jl_alloc_array_1d(jl_array_uint8_type, nslots); + jl_gc_wb(code, code->slotflags); ios_readall(s.s, jl_array_data(code->slotflags, char), nslots); if (flags.bits.nargsmatchesmethod) { @@ -1002,25 +1123,29 @@ JL_DLLEXPORT jl_code_info_t *jl_uncompress_ir(jl_method_t *m, jl_code_instance_t code->nargs = read_int32(s.s); } - for (i = 0; i < 5; i++) { - if (i == 1) // skip debuginfo - continue; - assert(jl_field_isptr(jl_code_info_type, i)); - jl_value_t **fld = (jl_value_t**)((char*)jl_data_ptr(code) + jl_field_offset(jl_code_info_type, i)); - *fld = jl_decode_value(&s); - } - if (m->is_for_opaque_closure) + code->code = (jl_array_t*)jl_decode_value(&s); + jl_gc_wb(code, code->code); + code->ssavaluetypes = jl_decode_value(&s); + jl_gc_wb(code, code->ssavaluetypes); + code->ssaflags = (jl_array_t*)jl_decode_value(&s); + jl_gc_wb(code, code->ssaflags); + + if (m->is_for_opaque_closure) { code->slottypes = jl_decode_value(&s); + jl_gc_wb(code, code->slottypes); + } - jl_value_t *slotnames = jl_decode_value(&s); + slotnames = jl_decode_value(&s); if (!jl_is_string(slotnames)) slotnames = m->slot_syms; code->slotnames = jl_uncompress_argnames(slotnames); + jl_gc_wb(code, code->slotnames); if (metadata) code->debuginfo = jl_atomic_load_relaxed(&metadata->debuginfo); else code->debuginfo = m->debuginfo; + jl_gc_wb(code, code->debuginfo); assert(code->debuginfo); assert(jl_array_nrows(code->code) == codelocs_nstmts(code->debuginfo->codelocs) || jl_string_len(code->debuginfo->codelocs) == 0); @@ -1029,10 +1154,7 @@ JL_DLLEXPORT jl_code_info_t *jl_uncompress_ir(jl_method_t *m, jl_code_instance_t assert(ios_getc(s.s) == -1); ios_close(s.s); - JL_GC_PUSH1(&code); - jl_gc_enable(en); JL_UNLOCK(&m->writelock); // Might GC - JL_GC_POP(); if (metadata) { code->parent = metadata->def; jl_gc_wb(code, code->parent); @@ -1043,6 +1165,7 @@ JL_DLLEXPORT jl_code_info_t *jl_uncompress_ir(jl_method_t *m, jl_code_instance_t code->edges = (jl_value_t*)s.edges; jl_gc_wb(code, s.edges); } + JL_GC_POP(); return code; } @@ -1470,7 +1593,7 @@ void jl_init_serializer(void) deser_tag[TAG_DATATYPE] = (jl_value_t*)jl_datatype_type; deser_tag[TAG_SLOTNUMBER] = (jl_value_t*)jl_slotnumber_type; deser_tag[TAG_SVEC] = (jl_value_t*)jl_simplevector_type; - deser_tag[TAG_ARRAY] = (jl_value_t*)jl_array_type; + deser_tag[TAG_ARRAY1D] = (jl_value_t*)jl_array_type; deser_tag[TAG_MEMORYT] = (jl_value_t*)jl_genericmemory_type; deser_tag[TAG_EXPR] = (jl_value_t*)jl_expr_type; deser_tag[TAG_PHINODE] = (jl_value_t*)jl_phinode_type; diff --git a/src/serialize.h b/src/serialize.h index 3aa82a1d09a9b..549c1588073ff 100644 --- a/src/serialize.h +++ b/src/serialize.h @@ -7,69 +7,6 @@ extern "C" { #endif -#define TAG_SYMBOL 2 -#define TAG_SSAVALUE 3 -#define TAG_DATATYPE 4 -#define TAG_SLOTNUMBER 5 -#define TAG_SVEC 6 -#define TAG_ARRAY 7 -#define TAG_NULL 8 -#define TAG_EXPR 9 -#define TAG_PHINODE 10 -#define TAG_PHICNODE 11 -#define TAG_LONG_SYMBOL 12 -#define TAG_LONG_SVEC 13 -#define TAG_LONG_EXPR 14 -#define TAG_LONG_PHINODE 15 -#define TAG_LONG_PHICNODE 16 -#define TAG_METHODROOT 17 -#define TAG_EDGE 18 -#define TAG_STRING 19 -#define TAG_SHORT_INT64 20 -#define TAG_SHORT_GENERAL 21 -#define TAG_CNULL 22 -#define TAG_ARRAY1D 23 -#define TAG_SINGLETON 24 -#define TAG_MODULE 25 -#define TAG_TVAR 26 -#define TAG_METHOD_INSTANCE 27 -#define TAG_METHOD 28 -#define TAG_CODE_INSTANCE 29 -#define TAG_COMMONSYM 30 -#define TAG_NEARBYGLOBAL 31 -#define TAG_GLOBALREF 32 -#define TAG_CORE 33 -#define TAG_BASE 34 -#define TAG_BITYPENAME 35 -#define TAG_NEARBYMODULE 36 -#define TAG_INT32 37 -#define TAG_INT64 38 -#define TAG_UINT8 39 -#define TAG_VECTORTY 40 -#define TAG_PTRTY 41 -#define TAG_LONG_SSAVALUE 42 -#define TAG_LONG_METHODROOT 43 -#define TAG_LONG_EDGE 44 -#define TAG_SHORTER_INT64 45 -#define TAG_SHORT_INT32 46 -#define TAG_CALL1 47 -#define TAG_CALL2 48 -#define TAG_SHORT_BACKREF 49 -#define TAG_BACKREF 50 -#define TAG_UNIONALL 51 -#define TAG_GOTONODE 52 -#define TAG_QUOTENODE 53 -#define TAG_GENERAL 54 -#define TAG_GOTOIFNOT 55 -#define TAG_RETURNNODE 56 -#define TAG_ARGUMENT 57 -#define TAG_RELOC_METHODROOT 58 -#define TAG_BINDING 59 -#define TAG_MEMORYT 60 -#define TAG_ENTERNODE 61 - -#define LAST_TAG 61 - #define write_uint8(s, n) ios_putc((n), (s)) #define read_uint8(s) ((uint8_t)ios_getc((s))) #define write_int8(s, n) write_uint8((s), (n)) @@ -137,12 +74,6 @@ static inline uint32_t read_uint32(ios_t *s) JL_NOTSAFEPOINT #define read_uint(s) read_uint32(s) #endif - -void *jl_lookup_ser_tag(jl_value_t *v); -void *jl_lookup_common_symbol(jl_value_t *v); -jl_value_t *jl_deser_tag(uint8_t tag); -jl_value_t *jl_deser_symbol(uint8_t tag); - #ifdef __cplusplus } #endif From 42e14d6983ac855726b31b753adba05aaefcf884 Mon Sep 17 00:00:00 2001 From: Jameson Nash Date: Mon, 2 Dec 2024 12:32:16 -0500 Subject: [PATCH 525/537] ircode: small optimization for nearby ssavalue Since most ssavalue are used just after their def, this gives a small memory savings on compressed IR (a fraction of a percent). --- src/ircode.c | 28 +++++++++++++++++++++++++--- 1 file changed, 25 insertions(+), 3 deletions(-) diff --git a/src/ircode.c b/src/ircode.c index d3137ce26edef..de27f8dbeefca 100644 --- a/src/ircode.c +++ b/src/ircode.c @@ -21,7 +21,7 @@ extern "C" { #define TAG_DATATYPE 4 #define TAG_SLOTNUMBER 5 #define TAG_SVEC 6 -// #define TAG_UNUSED 7 +#define TAG_NEARBYSSAVALUE 7 #define TAG_NULL 8 #define TAG_EXPR 9 #define TAG_PHINODE 10 @@ -82,6 +82,7 @@ extern "C" { typedef struct { ios_t *s; + size_t ssaid; // method we're compressing for jl_method_t *method; jl_svec_t *edges; @@ -307,6 +308,10 @@ static void jl_encode_value_(jl_ircode_state *s, jl_value_t *v, int as_literal) jl_encode_value(s, jl_globalref_name(v)); } } + else if (jl_is_ssavalue(v) && s->ssaid - ((jl_ssavalue_t*)v)->id < 256) { + write_uint8(s->s, TAG_NEARBYSSAVALUE); + write_uint8(s->s, s->ssaid - ((jl_ssavalue_t*)v)->id); + } else if (jl_is_ssavalue(v) && ((jl_ssavalue_t*)v)->id < 256 && ((jl_ssavalue_t*)v)->id >= 0) { write_uint8(s->s, TAG_SSAVALUE); write_uint8(s->s, ((jl_ssavalue_t*)v)->id); @@ -807,6 +812,9 @@ static jl_value_t *jl_decode_value(jl_ircode_state *s) case TAG_SSAVALUE: v = jl_box_ssavalue(read_uint8(s->s)); return v; + case TAG_NEARBYSSAVALUE: + v = jl_box_ssavalue(s->ssaid - read_uint8(s->s)); + return v; case TAG_LONG_SSAVALUE: v = jl_box_ssavalue(read_uint16(s->s)); return v; @@ -1014,6 +1022,7 @@ JL_DLLEXPORT jl_string_t *jl_compress_ir(jl_method_t *m, jl_code_info_t *code) jl_value_t *edges = code->edges; jl_ircode_state s = { &dest, + 0, m, (!isdef && jl_is_svec(edges)) ? (jl_svec_t*)edges : jl_emptysvec, jl_current_task->ptls, @@ -1043,7 +1052,13 @@ JL_DLLEXPORT jl_string_t *jl_compress_ir(jl_method_t *m, jl_code_info_t *code) write_int32(s.s, (int32_t)nargs); } - jl_encode_value_(&s, (jl_value_t*)code->code, 1); + size_t i, l = jl_array_dim0(code->code); + write_uint64(s.s, l); + for (i = 0; i < l; i++) { + s.ssaid = i; + jl_encode_value(&s, jl_array_ptr_ref(code->code, i)); + } + s.ssaid = 0; jl_encode_value_(&s, (jl_value_t*)code->ssavaluetypes, 1); jl_encode_value_(&s, (jl_value_t*)code->ssaflags, 1); @@ -1092,6 +1107,7 @@ JL_DLLEXPORT jl_code_info_t *jl_uncompress_ir(jl_method_t *m, jl_code_instance_t src.size = jl_string_len(data); jl_ircode_state s = { &src, + 0, m, metadata == NULL ? NULL : jl_atomic_load_relaxed(&metadata->edges), jl_current_task->ptls, @@ -1123,8 +1139,14 @@ JL_DLLEXPORT jl_code_info_t *jl_uncompress_ir(jl_method_t *m, jl_code_instance_t code->nargs = read_int32(s.s); } - code->code = (jl_array_t*)jl_decode_value(&s); + size_t i, n = read_uint64(s.s); + code->code = jl_alloc_array_1d(jl_array_any_type, n); jl_gc_wb(code, code->code); + for (i = 0; i < n; i++) { + s.ssaid = i; + jl_array_ptr_set(code->code, i, jl_decode_value(&s)); + } + s.ssaid = 0; code->ssavaluetypes = jl_decode_value(&s); jl_gc_wb(code, code->ssavaluetypes); code->ssaflags = (jl_array_t*)jl_decode_value(&s); From f1b0b010dd20591a013c71d8f3f7a09503e55baf Mon Sep 17 00:00:00 2001 From: Keno Fischer Date: Mon, 2 Dec 2024 18:02:41 -0500 Subject: [PATCH 526/537] Fix scope of hoisted signature-local variables (#56712) When we declare inner methods, e.g. the `f` in ``` function fs() f(lhs::Integer) = 1 f(lhs::Integer, rhs::(local x=Integer; x)) = 2 return f end ``` we must hoist the definition of the (appropriately mangled) generic function `f` to top-level, including all variables that were used in the signature definition of `f`. This situation is a bit unique in the language because it uses inner function scope, but gets executed in toplevel scope. For example, you're not allowed to use a local of the inner function in the signature definition: ``` julia> function fs() local x=Integer f(lhs::Integer, rhs::x) = 2 return f end ERROR: syntax: local variable x cannot be used in closure declaration Stacktrace: [1] top-level scope @ REPL[3]:1 ``` In particular, the restriction is signature-local: ``` julia> function fs() f(rhs::(local x=Integer; x)) = 1 f(lhs::Integer, rhs::x) = 2 return f end ERROR: syntax: local variable x cannot be used in closure declaration Stacktrace: [1] top-level scope @ REPL[4]:1 ``` There's a special intermediate form `moved-local` that gets generated for this definition. In c6c3d72d1cbddb3d27e0df0e739bb27dd709a413, this form stopped getting generated for certain inner methods. I suspect this happened because of the incorrect assumption that the set of moved locals is being computed over all signatures, rather than being a per-signature property. The result of all of this was that this is one of the few places where lowering still generated a symbol as the lhs of an assignment for a global (instead of globalref), because the code that generates the assignment assumes it's a local, but the later pass doesn't know this. Because we still retain the code for this from before we started using globalref consistently, this wasn't generally causing a problems, except possibly leaking a global (or potentially assigning to a global when this wasn't intended). However, in follow on work, I want to make use of knowing whether the LHS is a global or local in lowering, so this was causing me trouble. Fix all of this by putting back the `moved-local` where it was dropped. Fixes #56711 --- src/julia-syntax.scm | 1 + test/syntax.jl | 8 ++++++++ 2 files changed, 9 insertions(+) diff --git a/src/julia-syntax.scm b/src/julia-syntax.scm index 72e97da3c2daa..852d5eb4d6f86 100644 --- a/src/julia-syntax.scm +++ b/src/julia-syntax.scm @@ -4279,6 +4279,7 @@ f(x) = yt(x) (if (or exists (and short (pair? alldefs))) `(toplevel-butfirst (null) + ,@(map (lambda (v) `(moved-local ,v)) moved-vars) ,@sp-inits ,@mk-method (latestworld)) diff --git a/test/syntax.jl b/test/syntax.jl index d9d311ac6615d..0fb752bae480f 100644 --- a/test/syntax.jl +++ b/test/syntax.jl @@ -4033,3 +4033,11 @@ end @test isa(create_inner_f_no_methods(), Function) @test length(methods(create_inner_f_no_methods())) == 0 @test Base.invoke_in_world(first(methods(create_inner_f_one_method)).primary_world, create_inner_f_one_method()) == 1 + +# Issue 56711 - Scope of signature hoisting +function fs56711() + f(lhs::Integer) = 1 + f(lhs::Integer, rhs::(local x_should_not_be_defined=Integer; x_should_not_be_defined)) = 2 + return f +end +@test !@isdefined(x_should_not_be_defined) From 1b37a2f9e0afc5f684e0b2a0af17cd9526aae529 Mon Sep 17 00:00:00 2001 From: Jameson Nash Date: Mon, 2 Dec 2024 20:09:26 -0500 Subject: [PATCH 527/537] ircode: avoid serializing ssaflags in the common case when they are all zero When not all-zero, run-length encoding would also probably be great here for lowered code (before inference). --- Compiler/test/interpreter_exec.jl | 6 +++--- src/ircode.c | 30 +++++++++++++++++++++++------- src/julia_internal.h | 1 + 3 files changed, 27 insertions(+), 10 deletions(-) diff --git a/Compiler/test/interpreter_exec.jl b/Compiler/test/interpreter_exec.jl index 4972df1a27202..b1d450f8f4286 100644 --- a/Compiler/test/interpreter_exec.jl +++ b/Compiler/test/interpreter_exec.jl @@ -23,7 +23,7 @@ let m = Meta.@lower 1 + 1 ] nstmts = length(src.code) src.ssavaluetypes = nstmts - src.ssaflags = fill(UInt8(0x00), nstmts) + src.ssaflags = fill(zero(UInt32), nstmts) src.debuginfo = Core.DebugInfo(:none) Compiler.verify_ir(Compiler.inflate_ir(src)) global test29262 = true @@ -63,7 +63,7 @@ let m = Meta.@lower 1 + 1 ] nstmts = length(src.code) src.ssavaluetypes = nstmts - src.ssaflags = fill(UInt8(0x00), nstmts) + src.ssaflags = fill(zero(UInt32), nstmts) src.debuginfo = Core.DebugInfo(:none) m.args[1] = copy(src) Compiler.verify_ir(Compiler.inflate_ir(src)) @@ -103,7 +103,7 @@ let m = Meta.@lower 1 + 1 ] nstmts = length(src.code) src.ssavaluetypes = nstmts - src.ssaflags = fill(UInt8(0x00), nstmts) + src.ssaflags = fill(zero(UInt32), nstmts) src.debuginfo = Core.DebugInfo(:none) Compiler.verify_ir(Compiler.inflate_ir(src)) global test29262 = true diff --git a/src/ircode.c b/src/ircode.c index de27f8dbeefca..9e64e3fe2b574 100644 --- a/src/ircode.c +++ b/src/ircode.c @@ -549,7 +549,8 @@ static void jl_encode_value_(jl_ircode_state *s, jl_value_t *v, int as_literal) static jl_code_info_flags_t code_info_flags(uint8_t propagate_inbounds, uint8_t has_fcall, uint8_t nospecializeinfer, uint8_t isva, - uint8_t inlining, uint8_t constprop, uint8_t nargsmatchesmethod) + uint8_t inlining, uint8_t constprop, uint8_t nargsmatchesmethod, + jl_array_t *ssaflags) { jl_code_info_flags_t flags; flags.bits.propagate_inbounds = propagate_inbounds; @@ -559,6 +560,11 @@ static jl_code_info_flags_t code_info_flags(uint8_t propagate_inbounds, uint8_t flags.bits.inlining = inlining; flags.bits.constprop = constprop; flags.bits.nargsmatchesmethod = nargsmatchesmethod; + flags.bits.has_ssaflags = 0; + const uint32_t *ssaflag_data = jl_array_data(ssaflags, uint32_t); + for (size_t i = 0, l = jl_array_dim0(ssaflags); i < l; i++) + if (ssaflag_data[i]) + flags.bits.has_ssaflags = 1; return flags; } @@ -1033,7 +1039,8 @@ JL_DLLEXPORT jl_string_t *jl_compress_ir(jl_method_t *m, jl_code_info_t *code) jl_code_info_flags_t flags = code_info_flags(code->propagate_inbounds, code->has_fcall, code->nospecializeinfer, code->isva, code->inlining, code->constprop, - nargsmatchesmethod); + nargsmatchesmethod, + code->ssaflags); write_uint16(s.s, checked_size(flags.packed, IR_DATASIZE_FLAGS)); write_uint16(s.s, checked_size(code->purity.bits, IR_DATASIZE_PURITY)); write_uint16(s.s, checked_size(code->inlining_cost, IR_DATASIZE_INLINING_COST)); @@ -1060,7 +1067,11 @@ JL_DLLEXPORT jl_string_t *jl_compress_ir(jl_method_t *m, jl_code_info_t *code) } s.ssaid = 0; jl_encode_value_(&s, (jl_value_t*)code->ssavaluetypes, 1); - jl_encode_value_(&s, (jl_value_t*)code->ssaflags, 1); + assert(jl_typetagis(code->ssaflags, jl_array_uint32_type)); + assert(jl_array_dim0(code->ssaflags) == l); + const uint32_t *ssaflags_data = jl_array_data(code->ssaflags, uint32_t); + if (flags.bits.has_ssaflags) + ios_write(s.s, (const char*)ssaflags_data, l * sizeof(*ssaflags_data)); // For opaque closure, also save the slottypes. We technically only need the first slot type, // but this is simpler for now. We may want to refactor where this gets stored in the future. @@ -1139,18 +1150,23 @@ JL_DLLEXPORT jl_code_info_t *jl_uncompress_ir(jl_method_t *m, jl_code_instance_t code->nargs = read_int32(s.s); } - size_t i, n = read_uint64(s.s); - code->code = jl_alloc_array_1d(jl_array_any_type, n); + size_t i, l = read_uint64(s.s); + code->code = jl_alloc_array_1d(jl_array_any_type, l); jl_gc_wb(code, code->code); - for (i = 0; i < n; i++) { + for (i = 0; i < l; i++) { s.ssaid = i; jl_array_ptr_set(code->code, i, jl_decode_value(&s)); } s.ssaid = 0; code->ssavaluetypes = jl_decode_value(&s); jl_gc_wb(code, code->ssavaluetypes); - code->ssaflags = (jl_array_t*)jl_decode_value(&s); + code->ssaflags = jl_alloc_array_1d(jl_array_uint32_type, l); jl_gc_wb(code, code->ssaflags); + uint32_t *ssaflags_data = jl_array_data(code->ssaflags, uint32_t); + if (flags.bits.has_ssaflags) + ios_readall(s.s, (char*)ssaflags_data, l * sizeof(*ssaflags_data)); + else + memset(ssaflags_data, 0, l * sizeof(*ssaflags_data)); if (m->is_for_opaque_closure) { code->slottypes = jl_decode_value(&s); diff --git a/src/julia_internal.h b/src/julia_internal.h index e081c94329deb..2178f603441e0 100644 --- a/src/julia_internal.h +++ b/src/julia_internal.h @@ -651,6 +651,7 @@ typedef struct { uint16_t nargsmatchesmethod:1; uint16_t inlining:2; // 0 = use heuristic; 1 = aggressive; 2 = none uint16_t constprop:2; // 0 = use heuristic; 1 = aggressive; 2 = none + uint16_t has_ssaflags:1; } jl_code_info_flags_bitfield_t; typedef union { From efa917e8775cd40fdd74b657d1e5d2db2342cd07 Mon Sep 17 00:00:00 2001 From: Keno Fischer Date: Mon, 2 Dec 2024 20:28:05 -0500 Subject: [PATCH 528/537] Extend `invoke` to accept CodeInstance (#56660) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This is an alternative mechanism to #56650 that largely achieves the same result, but by hooking into `invoke` rather than a generated function. They are orthogonal mechanisms, and its possible we want both. However, in #56650, both Jameson and Valentin were skeptical of the generated function signature bottleneck. This PR is sort of a hybrid of mechanism in #52964 and what I proposed in https://github.com/JuliaLang/julia/pull/56650#issuecomment-2493800877. In particular, this PR: 1. Extends `invoke` to support a CodeInstance in place of its usual `types` argument. 2. Adds a new `typeinf` optimized generic. The semantics of this optimized generic allow the compiler to instead call a companion `typeinf_edge` function, allowing a mid-inference interpreter switch (like #52964), without being forced through a concrete signature bottleneck. However, if calling `typeinf_edge` does not work (e.g. because the compiler version is mismatched), this still has well defined semantics, you just don't get inference support. The additional benefit of the `typeinf` optimized generic is that it lets custom cache owners tell the runtime how to "cure" code instances that have lost their native code. Currently the runtime only knows how to do that for `owner == nothing` CodeInstances (by re-running inference). This extension is not implemented, but the idea is that the runtime would be permitted to call the `typeinf` optimized generic on the dead CodeInstance's `owner` and `def` fields to obtain a cured CodeInstance (or a user-actionable error from the plugin). This PR includes an implementation of `with_new_compiler` from #56650. This PR includes just enough compiler support to make the compiler optimize this to the same code that #56650 produced: ``` julia> @code_typed with_new_compiler(sin, 1.0) CodeInfo( 1 ─ $(Expr(:foreigncall, :(:jl_get_tls_world_age), UInt64, svec(), 0, :(:ccall)))::UInt64 │ %2 = builtin Core.getfield(args, 1)::Float64 │ %3 = invoke sin(%2::Float64)::Float64 └── return %3 ) => Float64 ``` However, the implementation here is extremely incomplete. I'm putting it up only as a directional sketch to see if people prefer it over #56650. If so, I would prepare a cleaned up version of this PR that has the optimized generics as well as the curing support, but not the full inference integration (which needs a fair bit more work). --- .../extras/CompilerDevTools/Manifest.toml | 15 +++++ Compiler/extras/CompilerDevTools/Project.toml | 5 ++ .../CompilerDevTools/src/CompilerDevTools.jl | 56 +++++++++++++++++++ Compiler/src/abstractinterpretation.jl | 48 +++++++++++++--- Compiler/src/abstractlattice.jl | 2 +- Compiler/src/bootstrap.jl | 10 +++- Compiler/src/stmtinfo.jl | 11 ++++ Compiler/src/utilities.jl | 4 +- NEWS.md | 2 + base/docs/basedocs.jl | 17 ++++++ base/optimized_generics.jl | 27 +++++++++ src/builtins.c | 22 ++++++++ src/interpreter.c | 24 +++++++- test/core.jl | 14 +++++ 14 files changed, 242 insertions(+), 15 deletions(-) create mode 100644 Compiler/extras/CompilerDevTools/Manifest.toml create mode 100644 Compiler/extras/CompilerDevTools/Project.toml create mode 100644 Compiler/extras/CompilerDevTools/src/CompilerDevTools.jl diff --git a/Compiler/extras/CompilerDevTools/Manifest.toml b/Compiler/extras/CompilerDevTools/Manifest.toml new file mode 100644 index 0000000000000..bcc78f1ded34a --- /dev/null +++ b/Compiler/extras/CompilerDevTools/Manifest.toml @@ -0,0 +1,15 @@ +# This file is machine-generated - editing it directly is not advised + +julia_version = "1.12.0-DEV" +manifest_format = "2.0" +project_hash = "84f495a1bf065c95f732a48af36dd0cd2cefb9d5" + +[[deps.Compiler]] +path = "../.." +uuid = "807dbc54-b67e-4c79-8afb-eafe4df6f2e1" +version = "0.0.2" + +[[deps.CompilerDevTools]] +path = "." +uuid = "92b2d91f-d2bd-4c05-9214-4609ac33433f" +version = "0.0.0" diff --git a/Compiler/extras/CompilerDevTools/Project.toml b/Compiler/extras/CompilerDevTools/Project.toml new file mode 100644 index 0000000000000..a2749a9a56a84 --- /dev/null +++ b/Compiler/extras/CompilerDevTools/Project.toml @@ -0,0 +1,5 @@ +name = "CompilerDevTools" +uuid = "92b2d91f-d2bd-4c05-9214-4609ac33433f" + +[deps] +Compiler = "807dbc54-b67e-4c79-8afb-eafe4df6f2e1" diff --git a/Compiler/extras/CompilerDevTools/src/CompilerDevTools.jl b/Compiler/extras/CompilerDevTools/src/CompilerDevTools.jl new file mode 100644 index 0000000000000..cd3f7b7b4bdac --- /dev/null +++ b/Compiler/extras/CompilerDevTools/src/CompilerDevTools.jl @@ -0,0 +1,56 @@ +module CompilerDevTools + +using Compiler +using Core.IR + +struct SplitCacheOwner; end +struct SplitCacheInterp <: Compiler.AbstractInterpreter + world::UInt + inf_params::Compiler.InferenceParams + opt_params::Compiler.OptimizationParams + inf_cache::Vector{Compiler.InferenceResult} + function SplitCacheInterp(; + world::UInt = Base.get_world_counter(), + inf_params::Compiler.InferenceParams = Compiler.InferenceParams(), + opt_params::Compiler.OptimizationParams = Compiler.OptimizationParams(), + inf_cache::Vector{Compiler.InferenceResult} = Compiler.InferenceResult[]) + new(world, inf_params, opt_params, inf_cache) + end +end + +Compiler.InferenceParams(interp::SplitCacheInterp) = interp.inf_params +Compiler.OptimizationParams(interp::SplitCacheInterp) = interp.opt_params +Compiler.get_inference_world(interp::SplitCacheInterp) = interp.world +Compiler.get_inference_cache(interp::SplitCacheInterp) = interp.inf_cache +Compiler.cache_owner(::SplitCacheInterp) = SplitCacheOwner() + +import Core.OptimizedGenerics.CompilerPlugins: typeinf, typeinf_edge +@eval @noinline typeinf(::SplitCacheOwner, mi::MethodInstance, source_mode::UInt8) = + Base.invoke_in_world(which(typeinf, Tuple{SplitCacheOwner, MethodInstance, UInt8}).primary_world, Compiler.typeinf_ext, SplitCacheInterp(; world=Base.tls_world_age()), mi, source_mode) + +@eval @noinline function typeinf_edge(::SplitCacheOwner, mi::MethodInstance, parent_frame::Compiler.InferenceState, world::UInt, source_mode::UInt8) + # TODO: This isn't quite right, we're just sketching things for now + interp = SplitCacheInterp(; world) + Compiler.typeinf_edge(interp, mi.def, mi.specTypes, Core.svec(), parent_frame, false, false) +end + +# TODO: This needs special compiler support to properly case split for multiple +# method matches, etc. +@noinline function mi_for_tt(tt, world=Base.tls_world_age()) + interp = SplitCacheInterp(; world) + match, _ = Compiler.findsup(tt, Compiler.method_table(interp)) + Base.specialize_method(match) +end + +function with_new_compiler(f, args...) + tt = Base.signature_type(f, typeof(args)) + world = Base.tls_world_age() + new_compiler_ci = Core.OptimizedGenerics.CompilerPlugins.typeinf( + SplitCacheOwner(), mi_for_tt(tt), Compiler.SOURCE_MODE_ABI + ) + invoke(f, new_compiler_ci, args...) +end + +export with_new_compiler + +end diff --git a/Compiler/src/abstractinterpretation.jl b/Compiler/src/abstractinterpretation.jl index 5946adf80ad52..ffb4f4312cdcf 100644 --- a/Compiler/src/abstractinterpretation.jl +++ b/Compiler/src/abstractinterpretation.jl @@ -2218,16 +2218,46 @@ function abstract_invoke(interp::AbstractInterpreter, arginfo::ArgInfo, si::Stmt ft = widenconst(ft′) ft === Bottom && return Future(CallMeta(Bottom, Any, EFFECTS_THROWS, NoCallInfo())) types = argtype_by_index(argtypes, 3) - if types isa Const && types.val isa Method - method = types.val::Method - types = method # argument value - lookupsig = method.sig # edge kind - argtype = argtypes_to_type(pushfirst!(argtype_tail(argtypes, 4), ft)) - nargtype = typeintersect(lookupsig, argtype) - nargtype === Bottom && return Future(CallMeta(Bottom, TypeError, EFFECTS_THROWS, NoCallInfo())) - nargtype isa DataType || return Future(CallMeta(Any, Any, Effects(), NoCallInfo())) # other cases are not implemented below + if types isa Const && types.val isa Union{Method, CodeInstance} + method_or_ci = types.val + if isa(method_or_ci, CodeInstance) + our_world = sv.world.this + argtype = argtypes_to_type(pushfirst!(argtype_tail(argtypes, 4), ft)) + sig = method_or_ci.def.specTypes + exct = method_or_ci.exctype + if !hasintersect(argtype, sig) + return Future(CallMeta(Bottom, TypeError, EFFECTS_THROWS, NoCallInfo())) + elseif !(argtype <: sig) + exct = Union{exct, TypeError} + end + callee_valid_range = WorldRange(method_or_ci.min_world, method_or_ci.max_world) + if !(our_world in callee_valid_range) + if our_world < first(callee_valid_range) + update_valid_age!(sv, WorldRange(first(sv.world.valid_worlds), first(callee_valid_range)-1)) + else + update_valid_age!(sv, WorldRange(last(callee_valid_range)+1, last(sv.world.valid_worlds))) + end + return Future(CallMeta(Bottom, ErrorException, EFFECTS_THROWS, NoCallInfo())) + end + # TODO: When we add curing, we may want to assume this is nothrow + if (method_or_ci.owner === Nothing && method_ir_ci.def.def isa Method) + exct = Union{exct, ErrorException} + end + update_valid_age!(sv, callee_valid_range) + return Future(CallMeta(method_or_ci.rettype, exct, Effects(decode_effects(method_or_ci.ipo_purity_bits), nothrow=(exct===Bottom)), + InvokeCICallInfo(method_or_ci))) + else + method = method_or_ci::Method + types = method # argument value + lookupsig = method.sig # edge kind + argtype = argtypes_to_type(pushfirst!(argtype_tail(argtypes, 4), ft)) + nargtype = typeintersect(lookupsig, argtype) + nargtype === Bottom && return Future(CallMeta(Bottom, TypeError, EFFECTS_THROWS, NoCallInfo())) + nargtype isa DataType || return Future(CallMeta(Any, Any, Effects(), NoCallInfo())) # other cases are not implemented below + # Fall through to generic invoke handling + end else - widenconst(types) >: Method && return Future(CallMeta(Any, Any, Effects(), NoCallInfo())) + widenconst(types) >: Union{Method, CodeInstance} && return Future(CallMeta(Any, Any, Effects(), NoCallInfo())) (types, isexact, isconcrete, istype) = instanceof_tfunc(argtype_by_index(argtypes, 3), false) isexact || return Future(CallMeta(Any, Any, Effects(), NoCallInfo())) unwrapped = unwrap_unionall(types) diff --git a/Compiler/src/abstractlattice.jl b/Compiler/src/abstractlattice.jl index c1f3050739170..7a9cff8918175 100644 --- a/Compiler/src/abstractlattice.jl +++ b/Compiler/src/abstractlattice.jl @@ -229,7 +229,7 @@ end if isa(t, Const) # don't consider mutable values useful constants val = t.val - return isa(val, Symbol) || isa(val, Type) || isa(val, Method) || !ismutable(val) + return isa(val, Symbol) || isa(val, Type) || isa(val, Method) || isa(val, CodeInstance) || !ismutable(val) end isa(t, PartialTypeVar) && return false # this isn't forwardable return is_const_prop_profitable_arg(widenlattice(𝕃), t) diff --git a/Compiler/src/bootstrap.jl b/Compiler/src/bootstrap.jl index 7ee439cc7ac67..475c53e317152 100644 --- a/Compiler/src/bootstrap.jl +++ b/Compiler/src/bootstrap.jl @@ -5,7 +5,15 @@ # especially try to make sure any recursive and leaf functions have concrete signatures, # since we won't be able to specialize & infer them at runtime -activate_codegen!() = ccall(:jl_set_typeinf_func, Cvoid, (Any,), typeinf_ext_toplevel) +function activate_codegen!() + ccall(:jl_set_typeinf_func, Cvoid, (Any,), typeinf_ext_toplevel) + Core.eval(Compiler, quote + let typeinf_world_age = Base.tls_world_age() + @eval Core.OptimizedGenerics.CompilerPlugins.typeinf(::Nothing, mi::MethodInstance, source_mode::UInt8) = + Base.invoke_in_world($(Expr(:$, :typeinf_world_age)), typeinf_ext_toplevel, mi, Base.tls_world_age(), source_mode) + end + end) +end function bootstrap!() let time() = ccall(:jl_clock_now, Float64, ()) diff --git a/Compiler/src/stmtinfo.jl b/Compiler/src/stmtinfo.jl index 830bfa02d2d99..9f0f1f38d4c8a 100644 --- a/Compiler/src/stmtinfo.jl +++ b/Compiler/src/stmtinfo.jl @@ -268,6 +268,17 @@ end add_edges_impl(edges::Vector{Any}, info::UnionSplitApplyCallInfo) = for split in info.infos; add_edges!(edges, split); end +""" + info::InvokeCICallInfo + +Represents a resolved call to `Core.invoke` targeting a `Core.CodeInstance` +""" +struct InvokeCICallInfo <: CallInfo + edge::CodeInstance +end +add_edges_impl(edges::Vector{Any}, info::InvokeCICallInfo) = + add_one_edge!(edges, info.edge) + """ info::InvokeCallInfo diff --git a/Compiler/src/utilities.jl b/Compiler/src/utilities.jl index 29f3dfa4afd4a..da20f9aafbfb2 100644 --- a/Compiler/src/utilities.jl +++ b/Compiler/src/utilities.jl @@ -54,8 +54,8 @@ function count_const_size(@nospecialize(x), count_self::Bool = true) # No definite size (isa(x, GenericMemory) || isa(x, String) || isa(x, SimpleVector)) && return MAX_INLINE_CONST_SIZE + 1 - if isa(x, Module) || isa(x, Method) - # We allow modules and methods, because we already assume they are externally + if isa(x, Module) || isa(x, Method) || isa(x, CodeInstance) + # We allow modules, methods and CodeInstance, because we already assume they are externally # rooted, so we count their contents as 0 size. return sizeof(Ptr{Cvoid}) end diff --git a/NEWS.md b/NEWS.md index c1d5f38f337b0..b77a786c24823 100644 --- a/NEWS.md +++ b/NEWS.md @@ -103,6 +103,8 @@ New library features * New `ltruncate`, `rtruncate` and `ctruncate` functions for truncating strings to text width, accounting for char widths ([#55351]) * `isless` (and thus `cmp`, sorting, etc.) is now supported for zero-dimensional `AbstractArray`s ([#55772]) * `invoke` now supports passing a Method instead of a type signature making this interface somewhat more flexible for certain uncommon use cases ([#56692]). +* `invoke` now supports passing a CodeInstance instead of a type, which can enable +certain compiler plugin workflows ([#56660]). Standard library changes ------------------------ diff --git a/base/docs/basedocs.jl b/base/docs/basedocs.jl index 5119ceaf2164a..141950f5e92ff 100644 --- a/base/docs/basedocs.jl +++ b/base/docs/basedocs.jl @@ -2031,6 +2031,7 @@ applicable """ invoke(f, argtypes::Type, args...; kwargs...) invoke(f, argtypes::Method, args...; kwargs...) + invoke(f, argtypes::CodeInstance, args...; kwargs...) Invoke a method for the given generic function `f` matching the specified types `argtypes` on the specified arguments `args` and passing the keyword arguments `kwargs`. The arguments `args` must @@ -2056,6 +2057,22 @@ Note in particular that the specified `Method` may be entirely unreachable from If the method is part of the ordinary method table, this call behaves similar to `invoke(f, method.sig, args...)`. +!!! compat "Julia 1.12" + Passing a `Method` requires Julia 1.12. + +# Passing a `CodeInstance` instead of a signature +The `argtypes` argument may be a `CodeInstance`, bypassing both method lookup and specialization. +The semantics of this invocation are similar to a function pointer call of the `CodeInstance`'s +`invoke` pointer. It is an error to invoke a `CodeInstance` with arguments that do not match its +parent MethodInstance or from a world age not included in the `min_world`/`max_world` range. +It is undefined behavior to invoke a CodeInstance whose behavior does not match the constraints +specified in its fields. For some code instances with `owner !== nothing` (i.e. those generated +by external compilers), it may be an error to invoke them after passing through precompilation. +This is an advanced interface intended for use with external compiler plugins. + +!!! compat "Julia 1.12" + Passing a `CodeInstance` requires Julia 1.12. + # Examples ```jldoctest julia> f(x::Real) = x^2; diff --git a/base/optimized_generics.jl b/base/optimized_generics.jl index 86b54a294564d..c0b953777ca94 100644 --- a/base/optimized_generics.jl +++ b/base/optimized_generics.jl @@ -54,4 +54,31 @@ module KeyValue function get end end +# Compiler-recognized intrinsics for compiler plugins +""" + module CompilerPlugins + +Implements a pair of functions `typeinf`/`typeinf_edge`. When the optimizer sees +a call to `typeinf`, it has license to instead call `typeinf_edge`, supplying the +current inference stack in `parent_frame` (but otherwise supplying the arguments +to `typeinf`). typeinf_edge will return the `CodeInstance` that `typeinf` would +have returned at runtime. The optimizer may perform a non-IPO replacement of +the call to `typeinf` by the result of `typeinf_edge`. In addition, the IPO-safe +fields of the `CodeInstance` may be propagated in IPO mode. +""" +module CompilerPlugins + """ + typeinf(owner, mi, source_mode)::CodeInstance + + Return a `CodeInstance` for the given `mi` whose valid results include at + the least current tls world and satisfies the requirements of `source_mode`. + """ + function typeinf end + + """ + typeinf_edge(owner, mi, parent_frame, world, abi_mode)::CodeInstance + """ + function typeinf_edge end +end + end diff --git a/src/builtins.c b/src/builtins.c index c6b0bf130550b..3f555da9d2a83 100644 --- a/src/builtins.c +++ b/src/builtins.c @@ -1587,6 +1587,28 @@ JL_CALLABLE(jl_f_invoke) if (!jl_tuple1_isa(args[0], &args[2], nargs - 1, (jl_datatype_t*)m->sig)) jl_type_error("invoke: argument type error", argtypes, arg_tuple(args[0], &args[2], nargs - 1)); return jl_gf_invoke_by_method(m, args[0], &args[2], nargs - 1); + } else if (jl_is_code_instance(argtypes)) { + jl_code_instance_t *codeinst = (jl_code_instance_t*)args[1]; + jl_callptr_t invoke = jl_atomic_load_acquire(&codeinst->invoke); + if (jl_tuple1_isa(args[0], &args[2], nargs - 2, (jl_datatype_t*)codeinst->def->specTypes)) { + jl_type_error("invoke: argument type error", codeinst->def->specTypes, arg_tuple(args[0], &args[2], nargs - 2)); + } + if (jl_atomic_load_relaxed(&codeinst->min_world) > jl_current_task->world_age || + jl_current_task->world_age > jl_atomic_load_relaxed(&codeinst->max_world)) { + jl_error("invoke: CodeInstance not valid for this world"); + } + if (!invoke) { + jl_compile_codeinst(codeinst); + invoke = jl_atomic_load_acquire(&codeinst->invoke); + } + if (invoke) { + return invoke(args[0], &args[2], nargs - 2, codeinst); + } else { + if (codeinst->owner != jl_nothing || !jl_is_method(codeinst->def->def.value)) { + jl_error("Failed to invoke or compile external codeinst"); + } + return jl_gf_invoke_by_method(codeinst->def->def.method, args[0], &args[2], nargs - 1); + } } if (!jl_is_tuple_type(jl_unwrap_unionall(argtypes))) jl_type_error("invoke", (jl_value_t*)jl_anytuple_type_type, argtypes); diff --git a/src/interpreter.c b/src/interpreter.c index 49a3afed14f0c..2dc1c9ed5a0c4 100644 --- a/src/interpreter.c +++ b/src/interpreter.c @@ -137,8 +137,28 @@ static jl_value_t *do_invoke(jl_value_t **args, size_t nargs, interpreter_state argv[i-1] = eval_value(args[i], s); jl_value_t *c = args[0]; assert(jl_is_code_instance(c) || jl_is_method_instance(c)); - jl_method_instance_t *meth = jl_is_method_instance(c) ? (jl_method_instance_t*)c : ((jl_code_instance_t*)c)->def; - jl_value_t *result = jl_invoke(argv[0], nargs == 2 ? NULL : &argv[1], nargs - 2, meth); + jl_value_t *result = NULL; + if (jl_is_code_instance(c)) { + jl_code_instance_t *codeinst = (jl_code_instance_t*)c; + assert(jl_atomic_load_relaxed(&codeinst->min_world) <= jl_current_task->world_age && + jl_current_task->world_age <= jl_atomic_load_relaxed(&codeinst->max_world)); + jl_callptr_t invoke = jl_atomic_load_acquire(&codeinst->invoke); + if (!invoke) { + jl_compile_codeinst(codeinst); + invoke = jl_atomic_load_acquire(&codeinst->invoke); + } + if (invoke) { + result = invoke(argv[0], nargs == 2 ? NULL : &argv[1], nargs - 2, codeinst); + + } else { + if (codeinst->owner != jl_nothing) { + jl_error("Failed to invoke or compile external codeinst"); + } + result = jl_invoke(argv[0], nargs == 2 ? NULL : &argv[1], nargs - 2, codeinst->def); + } + } else { + result = jl_invoke(argv[0], nargs == 2 ? NULL : &argv[1], nargs - 2, (jl_method_instance_t*)c); + } JL_GC_POP(); return result; } diff --git a/test/core.jl b/test/core.jl index 39d02d5d567c9..63952e8728e1e 100644 --- a/test/core.jl +++ b/test/core.jl @@ -8353,9 +8353,23 @@ end @test eval(Expr(:toplevel, :(@define_call(f_macro_defined1)))) == 1 @test @define_call(f_macro_defined2) == 1 +# `invoke` of `Method` let m = which(+, (Int, Int)) @eval f56692(i) = invoke(+, $m, i, 4) global g56692() = f56692(5) == 9 ? "true" : false end @test @inferred(f56692(3)) == 7 @test @inferred(g56692()) == "true" + +# `invoke` of `CodeInstance` +f_invalidate_me() = return 1 +f_invoke_me() = return f_invalidate_me() +@test f_invoke_me() == 1 +const f_invoke_me_ci = Base.specialize_method(Base._which(Tuple{typeof(f_invoke_me)})).cache +f_call_me() = invoke(f_invoke_me, f_invoke_me_ci) +@test invoke(f_invoke_me, f_invoke_me_ci) == 1 +@test f_call_me() == 1 +@test_throws TypeError invoke(f_invoke_me, f_invoke_me_ci, 1) +f_invalidate_me() = 2 +@test_throws ErrorException invoke(f_invoke_me, f_invoke_me_ci) +@test_throws ErrorException f_call_me() From ba8290ea4cf47abd69ff45d2e011259dca161ed1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?P=C3=A1ll=20Haraldsson?= Date: Tue, 3 Dec 2024 01:46:24 +0000 Subject: [PATCH 529/537] Update references to LTS from v1.6 to v1.10 (#56729) --- CONTRIBUTING.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index fd7f1c89420d6..9a3fe2cd441b3 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -278,8 +278,8 @@ Be sure to change the UUID value back before making the pull request. The process of [creating a patch release](https://docs.julialang.org/en/v1/devdocs/build/distributing/#Point-releasing-101) is roughly as follows: -1. Create a new branch (e.g. `backports-release-1.6`) against the relevant minor release - branch (e.g. `release-1.6`). Usually a corresponding pull request is created as well. +1. Create a new branch (e.g. `backports-release-1.10`) against the relevant minor release + branch (e.g. `release-1.10`). Usually a corresponding pull request is created as well. 2. Add commits, nominally from `master` (hence "backports"), to that branch. See below for more information on this process. @@ -291,8 +291,8 @@ The process of [creating a patch release](https://docs.julialang.org/en/v1/devdo the pull request associated with the backports branch. Fix any issues. 4. Once all test and benchmark reports look good, merge the backports branch into - the corresponding release branch (e.g. merge `backports-release-1.6` into - `release-1.6`). + the corresponding release branch (e.g. merge `backports-release-1.10` into + `release-1.10`). 5. Open a pull request that bumps the version of the relevant minor release to the next patch version, e.g. as in [this pull request](https://github.com/JuliaLang/julia/pull/37718). From 2c87290f2e7d5c057d1f4bdce9c5568c01f31d69 Mon Sep 17 00:00:00 2001 From: Keno Fischer Date: Tue, 3 Dec 2024 01:19:03 -0500 Subject: [PATCH 530/537] lowering: Canonicalize to builtins for global assignment (#56713) This adjusts lowering to emit `setglobal!` for assignment to globals, thus making the `=` expr head used only for slots in `CodeInfo` and entirely absent in `IRCode`. The primary reason for this is just to reduce the number of special cases that compiler passes have to reason about. In IRCode, `=` was already essentially equivalent to `setglobal!`, so there's no good reason not to canonicalize. Finally, the `=` syntax form for globals already gets recognized specially to insert `convert` calls to their declared binding type, so this doesn't impose any additional requirements on lowering to distinguish local from global assignments. In general, I'd also like to separate syntax and intermediate forms as much as possible where their semantics differ, which this accomplises by just using the builtin. This change is mostly semantically invisible, except that spliced-in GlobalRefs now declare their binding because they are indistinguishable from ordinary assignments at the stage where I inserted the lowering. If we want to, we can preserve the difference, but it'd be a bit more annoying for not much benefit, because: 1. The spliced in version was only recently made to work anyway, and 2. The semantics of when exactly bindings are declared is still messy on master anyway and will get tweaked shortly in further binding partitions work. --- Compiler/src/abstractinterpretation.jl | 8 +------- Compiler/src/optimize.jl | 11 +---------- Compiler/src/ssair/EscapeAnalysis.jl | 12 ------------ Compiler/src/ssair/verify.jl | 10 ++-------- Compiler/test/inline.jl | 2 +- src/interpreter.c | 24 +++++------------------- src/julia-syntax.scm | 14 ++++++++++---- test/syntax.jl | 5 +++-- 8 files changed, 23 insertions(+), 63 deletions(-) diff --git a/Compiler/src/abstractinterpretation.jl b/Compiler/src/abstractinterpretation.jl index ffb4f4312cdcf..24daaf1e6f626 100644 --- a/Compiler/src/abstractinterpretation.jl +++ b/Compiler/src/abstractinterpretation.jl @@ -4050,13 +4050,7 @@ function typeinf_local(interp::AbstractInterpreter, frame::InferenceState, nextr end effects === nothing || merge_override_effects!(interp, effects, frame) if lhs !== nothing && rt !== Bottom - if isa(lhs, SlotNumber) - changes = StateUpdate(lhs, VarState(rt, false)) - elseif isa(lhs, GlobalRef) - handle_global_assignment!(interp, frame, currsaw_latestworld, lhs, rt) - else - merge_effects!(interp, frame, EFFECTS_UNKNOWN) - end + changes = StateUpdate(lhs::SlotNumber, VarState(rt, false)) end end if !has_curr_ssaflag(frame, IR_FLAG_NOTHROW) diff --git a/Compiler/src/optimize.jl b/Compiler/src/optimize.jl index d2dfd26bfa00d..856e64e404388 100644 --- a/Compiler/src/optimize.jl +++ b/Compiler/src/optimize.jl @@ -1408,16 +1408,7 @@ function statement_cost(ex::Expr, line::Int, src::Union{CodeInfo, IRCode}, sptyp extyp = line == -1 ? Any : argextype(SSAValue(line), src, sptypes) return extyp === Union{} ? 0 : UNKNOWN_CALL_COST elseif head === :(=) - if ex.args[1] isa GlobalRef - cost = UNKNOWN_CALL_COST - else - cost = 0 - end - a = ex.args[2] - if a isa Expr - cost = plus_saturate(cost, statement_cost(a, -1, src, sptypes, params)) - end - return cost + return statement_cost(ex.args[2], -1, src, sptypes, params) elseif head === :copyast return 100 end diff --git a/Compiler/src/ssair/EscapeAnalysis.jl b/Compiler/src/ssair/EscapeAnalysis.jl index 47a7840628bb5..4f32550d056b2 100644 --- a/Compiler/src/ssair/EscapeAnalysis.jl +++ b/Compiler/src/ssair/EscapeAnalysis.jl @@ -642,13 +642,6 @@ function analyze_escapes(ir::IRCode, nargs::Int, 𝕃ₒ::AbstractLattice, get_e escape_invoke!(astate, pc, stmt.args) elseif head === :new || head === :splatnew escape_new!(astate, pc, stmt.args) - elseif head === :(=) - lhs, rhs = stmt.args - if isa(lhs, GlobalRef) # global store - add_escape_change!(astate, rhs, ⊤) - else - unexpected_assignment!(ir, pc) - end elseif head === :foreigncall escape_foreigncall!(astate, pc, stmt.args) elseif head === :throw_undef_if_not # XXX when is this expression inserted ? @@ -981,11 +974,6 @@ function escape_unanalyzable_obj!(astate::AnalysisState, @nospecialize(obj), obj return objinfo end -@noinline function unexpected_assignment!(ir::IRCode, pc::Int) - @eval Main (ir = $ir; pc = $pc) - error("unexpected assignment found: inspect `Main.pc` and `Main.pc`") -end - is_nothrow(ir::IRCode, pc::Int) = has_flag(ir[SSAValue(pc)], IR_FLAG_NOTHROW) # NOTE if we don't maintain the alias set that is separated from the lattice state, we can do diff --git a/Compiler/src/ssair/verify.jl b/Compiler/src/ssair/verify.jl index 59051058e1750..072a564a31f78 100644 --- a/Compiler/src/ssair/verify.jl +++ b/Compiler/src/ssair/verify.jl @@ -363,14 +363,8 @@ function verify_ir(ir::IRCode, print::Bool=true, isforeigncall = false if isa(stmt, Expr) if stmt.head === :(=) - if stmt.args[1] isa SSAValue - @verify_error "SSAValue as assignment LHS" - raise_error() - end - if stmt.args[2] isa GlobalRef - # undefined GlobalRef as assignment RHS is OK - continue - end + @verify_error "Assignment should have been removed during SSA conversion" + raise_error() elseif stmt.head === :isdefined if length(stmt.args) > 2 || (length(stmt.args) == 2 && !isa(stmt.args[2], Bool)) @verify_error "malformed isdefined" diff --git a/Compiler/test/inline.jl b/Compiler/test/inline.jl index 46b78db3b781c..5f95fb761859e 100644 --- a/Compiler/test/inline.jl +++ b/Compiler/test/inline.jl @@ -2111,7 +2111,7 @@ for run_finalizer_escape_test in (run_finalizer_escape_test1, run_finalizer_esca global finalizer_escape::Int = 0 let src = code_typed1(run_finalizer_escape_test, Tuple{Bool, Bool}) - @test any(x->isexpr(x, :(=)), src.code) + @test any(iscall((src, Core.setglobal!)), src.code) end let diff --git a/src/interpreter.c b/src/interpreter.c index 2dc1c9ed5a0c4..fa4fba94a60a5 100644 --- a/src/interpreter.c +++ b/src/interpreter.c @@ -589,25 +589,11 @@ static jl_value_t *eval_body(jl_array_t *stmts, interpreter_state *s, size_t ip, s->locals[n - 1] = rhs; } else { - jl_module_t *modu; - jl_sym_t *sym; - // Plain assignment is allowed to create bindings at - // toplevel and only for the current module - int alloc = toplevel; - if (jl_is_globalref(lhs)) { - modu = jl_globalref_mod(lhs); - sym = jl_globalref_name(lhs); - alloc &= modu == s->module; - } - else { - assert(jl_is_symbol(lhs)); - modu = s->module; - sym = (jl_sym_t*)lhs; - } - JL_GC_PUSH1(&rhs); - jl_binding_t *b = jl_get_binding_wr(modu, sym, alloc); - jl_checked_assignment(b, modu, sym, rhs); - JL_GC_POP(); + // This is an unmodeled error. Our frontend only generates + // legal `=` expressions, but since GlobalRef used to be legal + // here, give a loud error in case any package is modifying + // internals. + jl_error("Invalid IR: Assignment LHS not a Slot"); } } else if (head == jl_leave_sym) { diff --git a/src/julia-syntax.scm b/src/julia-syntax.scm index 852d5eb4d6f86..c7ca5d553bb31 100644 --- a/src/julia-syntax.scm +++ b/src/julia-syntax.scm @@ -4607,14 +4607,20 @@ f(x) = yt(x) (cdr cnd) (list cnd)))))) tests)) + (define (emit-assignment-or-setglobal lhs rhs) + (if (globalref? lhs) + (begin + (emit `(global ,lhs)) + (emit `(call (top setglobal!) ,(cadr lhs) (inert ,(caddr lhs)) ,rhs))) + (emit `(= ,lhs ,rhs)))) (define (emit-assignment lhs rhs) (if rhs (if (valid-ir-rvalue? lhs rhs) - (emit `(= ,lhs ,rhs)) + (emit-assignment-or-setglobal lhs rhs) (let ((rr (make-ssavalue))) (emit `(= ,rr ,rhs)) - (emit `(= ,lhs ,rr)))) - (emit `(= ,lhs (null)))) ; in unreachable code (such as after return), still emit the assignment so that the structure of those uses is preserved + (emit-assignment-or-setglobal lhs rr))) + (emit-assignment-or-setglobal lhs `(null))) ; in unreachable code (such as after return), still emit the assignment so that the structure of those uses is preserved #f) ;; the interpreter loop. `break-labels` keeps track of the labels to jump to ;; for all currently closing break-blocks. @@ -4693,7 +4699,7 @@ f(x) = yt(x) rhs (make-ssavalue)))) (if (not (eq? rr rhs)) (emit `(= ,rr ,rhs))) - (emit `(= ,lhs ,rr)) + (emit-assignment-or-setglobal lhs rr) (if tail (emit-return tail rr)) rr) (emit-assignment lhs rhs)))))) diff --git a/test/syntax.jl b/test/syntax.jl index 0fb752bae480f..315f2d8b0f38b 100644 --- a/test/syntax.jl +++ b/test/syntax.jl @@ -3713,7 +3713,7 @@ end module Foreign54607 # Syntactic, not dynamic try_to_create_binding1() = (Foreign54607.foo = 2) - # GlobalRef is allowed for same-module assignment + # GlobalRef is allowed for same-module assignment and declares the binding @eval try_to_create_binding2() = ($(GlobalRef(Foreign54607, :foo2)) = 2) function global_create_binding() global bar @@ -3728,7 +3728,7 @@ module Foreign54607 end @test_throws ErrorException (Foreign54607.foo = 1) @test_throws ErrorException Foreign54607.try_to_create_binding1() -@test_throws ErrorException Foreign54607.try_to_create_binding2() +Foreign54607.try_to_create_binding2() function assign_in_foreign_module() (Foreign54607.foo = 1) nothing @@ -3744,6 +3744,7 @@ Foreign54607.global_create_binding() @test isdefined(Foreign54607, :baz) @test isdefined(Foreign54607, :compiled_assign) @test isdefined(Foreign54607, :gr_assign) +@test isdefined(Foreign54607, :foo2) Foreign54607.bar = 8 @test Foreign54607.bar == 8 begin From 9acf1129c91cddd9194f529ad9cc82afd2694190 Mon Sep 17 00:00:00 2001 From: Julius Krumbiegel <22495855+jkrumbiegel@users.noreply.github.com> Date: Tue, 3 Dec 2024 08:45:28 +0100 Subject: [PATCH 531/537] Actually show glyphs for latex or emoji shortcodes being suggested in the REPL (#54800) When a user requests a completion for a backslash shortcode, this PR adds the glyphs for all the suggestions to the output. This makes it much easier to find the result one is looking for, especially if the user doesn't know all latex and emoji specifiers by heart. Before: image After: image --------- Co-authored-by: Dilum Aluthge --- stdlib/REPL/src/LineEdit.jl | 38 +++++++++++++++++++++++++---- stdlib/REPL/src/REPL.jl | 6 ++--- stdlib/REPL/src/REPLCompletions.jl | 34 +++++++++++++++++--------- stdlib/REPL/test/replcompletions.jl | 33 +++++++++++++++++-------- 4 files changed, 81 insertions(+), 30 deletions(-) diff --git a/stdlib/REPL/src/LineEdit.jl b/stdlib/REPL/src/LineEdit.jl index e881a65ca6b1c..e15807f645119 100644 --- a/stdlib/REPL/src/LineEdit.jl +++ b/stdlib/REPL/src/LineEdit.jl @@ -181,7 +181,18 @@ struct EmptyHistoryProvider <: HistoryProvider end reset_state(::EmptyHistoryProvider) = nothing -complete_line(c::EmptyCompletionProvider, s; hint::Bool=false) = String[], "", true +# Before, completions were always given as strings. But at least for backslash +# completions, it's nice to see what glyphs are available in the completion preview. +# To separate between what's shown in the preview list of possible matches, and what's +# actually completed, we introduce this struct. +struct NamedCompletion + completion::String # what is actually completed, for example "\trianglecdot" + name::String # what is displayed in lists of possible completions, for example "◬ \trianglecdot" +end + +NamedCompletion(completion::String) = NamedCompletion(completion, completion) + +complete_line(c::EmptyCompletionProvider, s; hint::Bool=false) = NamedCompletion[], "", true # complete_line can be specialized for only two arguments, when the active module # doesn't matter (e.g. Pkg does this) @@ -308,6 +319,7 @@ end set_action!(s, command::Symbol) = nothing +common_prefix(completions::Vector{NamedCompletion}) = common_prefix(map(x -> x.completion, completions)) function common_prefix(completions::Vector{String}) ret = "" c1 = completions[1] @@ -330,6 +342,8 @@ end # does not restrict column length when multiple columns are used. const MULTICOLUMN_THRESHOLD = 5 +show_completions(s::PromptState, completions::Vector{NamedCompletion}) = show_completions(s, map(x -> x.name, completions)) + # Show available completions function show_completions(s::PromptState, completions::Vector{String}) # skip any lines of input after the cursor @@ -374,6 +388,18 @@ function complete_line(s::MIState) end end +# due to close coupling of the Pkg ReplExt `complete_line` can still return a vector of strings, +# so we convert those in this helper +function complete_line_named(args...; kwargs...)::Tuple{Vector{NamedCompletion},String,Bool} + result = complete_line(args...; kwargs...)::Union{Tuple{Vector{NamedCompletion},String,Bool},Tuple{Vector{String},String,Bool}} + if result isa Tuple{Vector{NamedCompletion},String,Bool} + return result + else + completions, partial, should_complete = result + return map(NamedCompletion, completions), partial, should_complete + end +end + function check_for_hint(s::MIState) st = state(s) if !options(st).hint_tab_completes || !eof(buffer(st)) @@ -383,12 +409,14 @@ function check_for_hint(s::MIState) return clear_hint(st) end - completions, partial, should_complete = try - complete_line(st.p.complete, st, s.active_module; hint = true)::Tuple{Vector{String},String,Bool} + named_completions, partial, should_complete = try + complete_line_named(st.p.complete, st, s.active_module; hint = true) catch @debug "error completing line for hint" exception=current_exceptions() return clear_hint(st) end + completions = map(x -> x.completion, named_completions) + isempty(completions) && return clear_hint(st) # Don't complete for single chars, given e.g. `x` completes to `xor` if length(partial) > 1 && should_complete @@ -425,7 +453,7 @@ function clear_hint(s::ModeState) end function complete_line(s::PromptState, repeats::Int, mod::Module; hint::Bool=false) - completions, partial, should_complete = complete_line(s.p.complete, s, mod; hint)::Tuple{Vector{String},String,Bool} + completions, partial, should_complete = complete_line_named(s.p.complete, s, mod; hint) isempty(completions) && return false if !should_complete # should_complete is false for cases where we only want to show @@ -435,7 +463,7 @@ function complete_line(s::PromptState, repeats::Int, mod::Module; hint::Bool=fal # Replace word by completion prev_pos = position(s) push_undo(s) - edit_splice!(s, (prev_pos - sizeof(partial)) => prev_pos, completions[1]) + edit_splice!(s, (prev_pos - sizeof(partial)) => prev_pos, completions[1].completion) else p = common_prefix(completions) if !isempty(p) && p != partial diff --git a/stdlib/REPL/src/REPL.jl b/stdlib/REPL/src/REPL.jl index 50f610ff3b3e8..6c3f4bd4ba73a 100644 --- a/stdlib/REPL/src/REPL.jl +++ b/stdlib/REPL/src/REPL.jl @@ -843,7 +843,7 @@ function complete_line(c::REPLCompletionProvider, s::PromptState, mod::Module; h full = LineEdit.input_string(s) ret, range, should_complete = completions(full, lastindex(partial), mod, c.modifiers.shift, hint) c.modifiers = LineEdit.Modifiers() - return unique!(String[completion_text(x) for x in ret]), partial[range], should_complete + return unique!(LineEdit.NamedCompletion[named_completion(x) for x in ret]), partial[range], should_complete end function complete_line(c::ShellCompletionProvider, s::PromptState; hint::Bool=false) @@ -851,14 +851,14 @@ function complete_line(c::ShellCompletionProvider, s::PromptState; hint::Bool=fa partial = beforecursor(s.input_buffer) full = LineEdit.input_string(s) ret, range, should_complete = shell_completions(full, lastindex(partial), hint) - return unique!(String[completion_text(x) for x in ret]), partial[range], should_complete + return unique!(LineEdit.NamedCompletion[named_completion(x) for x in ret]), partial[range], should_complete end function complete_line(c::LatexCompletions, s; hint::Bool=false) partial = beforecursor(LineEdit.buffer(s)) full = LineEdit.input_string(s)::String ret, range, should_complete = bslash_completions(full, lastindex(partial), hint)[2] - return unique!(String[completion_text(x) for x in ret]), partial[range], should_complete + return unique!(LineEdit.NamedCompletion[named_completion(x) for x in ret]), partial[range], should_complete end with_repl_linfo(f, repl) = f(outstream(repl)) diff --git a/stdlib/REPL/src/REPLCompletions.jl b/stdlib/REPL/src/REPLCompletions.jl index 1f2c0cabbdb38..0bffb1a1015cd 100644 --- a/stdlib/REPL/src/REPLCompletions.jl +++ b/stdlib/REPL/src/REPLCompletions.jl @@ -2,7 +2,7 @@ module REPLCompletions -export completions, shell_completions, bslash_completions, completion_text +export completions, shell_completions, bslash_completions, completion_text, named_completion using Core: Const # We want to insulate the REPLCompletion module from any changes the user may @@ -13,6 +13,8 @@ using Base.Meta using Base: propertynames, something, IdSet using Base.Filesystem: _readdirx +using ..REPL.LineEdit: NamedCompletion + abstract type Completion end struct TextCompletion <: Completion @@ -57,8 +59,10 @@ struct MethodCompletion <: Completion end struct BslashCompletion <: Completion - bslash::String + completion::String # what is actually completed, for example "\trianglecdot" + name::String # what is displayed, for example "◬ \trianglecdot" end +BslashCompletion(completion::String) = BslashCompletion(completion, completion) struct ShellCompletion <: Completion text::String @@ -114,13 +118,21 @@ _completion_text(c::PackageCompletion) = c.package _completion_text(c::PropertyCompletion) = sprint(Base.show_sym, c.property) _completion_text(c::FieldCompletion) = sprint(Base.show_sym, c.field) _completion_text(c::MethodCompletion) = repr(c.method) -_completion_text(c::BslashCompletion) = c.bslash _completion_text(c::ShellCompletion) = c.text _completion_text(c::DictCompletion) = c.key _completion_text(c::KeywordArgumentCompletion) = c.kwarg*'=' completion_text(c) = _completion_text(c)::String +named_completion(c::BslashCompletion) = NamedCompletion(c.completion, c.name) + +function named_completion(c) + text = completion_text(c)::String + return NamedCompletion(text, text) +end + +named_completion_completion(c) = named_completion(c).completion::String + const Completions = Tuple{Vector{Completion}, UnitRange{Int}, Bool} function completes_global(x, name) @@ -984,12 +996,10 @@ function bslash_completions(string::String, pos::Int, hint::Bool=false) end # return possible matches; these cannot be mixed with regular # Julian completions as only latex / emoji symbols contain the leading \ - if startswith(s, "\\:") # emoji - namelist = Iterators.filter(k -> startswith(k, s), keys(emoji_symbols)) - else # latex - namelist = Iterators.filter(k -> startswith(k, s), keys(latex_symbols)) - end - return (true, (Completion[BslashCompletion(name) for name in sort!(collect(namelist))], slashpos:pos, true)) + symbol_dict = startswith(s, "\\:") ? emoji_symbols : latex_symbols + namelist = Iterators.filter(k -> startswith(k, s), keys(symbol_dict)) + completions = Completion[BslashCompletion(name, "$(symbol_dict[name]) $name") for name in sort!(collect(namelist))] + return (true, (completions, slashpos:pos, true)) end return (false, (Completion[], 0:-1, false)) end @@ -1099,7 +1109,7 @@ function complete_keyword_argument(partial::String, last_idx::Int, context_modul complete_keyval!(suggestions, last_word) end - return sort!(suggestions, by=completion_text), wordrange + return sort!(suggestions, by=named_completion_completion), wordrange end function get_loading_candidates(pkgstarts::String, project_file::String) @@ -1298,7 +1308,7 @@ function completions(string::String, pos::Int, context_module::Module=Main, shif complete_identifiers!(suggestions, context_module, string, name, pos, separatorpos, startpos; shift) - return sort!(unique!(completion_text, suggestions), by=completion_text), (separatorpos+1):pos, true + return sort!(unique!(named_completion, suggestions), by=named_completion_completion), (separatorpos+1):pos, true elseif inc_tag === :cmd # TODO: should this call shell_completions instead of partially reimplementing it? let m = match(r"[\t\n\r\"`><=*?|]| (?!\\)", reverse(partial)) # fuzzy shell_parse in reverse @@ -1496,7 +1506,7 @@ function completions(string::String, pos::Int, context_module::Module=Main, shif complete_identifiers!(suggestions, context_module, string, name, pos, separatorpos, startpos; comp_keywords, complete_modules_only, shift) - return sort!(unique!(completion_text, suggestions), by=completion_text), namepos:pos, true + return sort!(unique!(named_completion, suggestions), by=named_completion_completion), namepos:pos, true end function shell_completions(string, pos, hint::Bool=false) diff --git a/stdlib/REPL/test/replcompletions.jl b/stdlib/REPL/test/replcompletions.jl index b259567884486..2c8d48cc232cf 100644 --- a/stdlib/REPL/test/replcompletions.jl +++ b/stdlib/REPL/test/replcompletions.jl @@ -170,17 +170,23 @@ end function map_completion_text(completions) c, r, res = completions - return map(completion_text, c), r, res + return map(x -> named_completion(x).completion, c), r, res +end + +function map_named_completion(completions) + c, r, res = completions + return map(named_completion, c), r, res end test_complete(s) = map_completion_text(@inferred(completions(s, lastindex(s)))) test_scomplete(s) = map_completion_text(@inferred(shell_completions(s, lastindex(s)))) -test_bslashcomplete(s) = map_completion_text(@inferred(bslash_completions(s, lastindex(s)))[2]) test_complete_context(s, m=@__MODULE__; shift::Bool=true) = map_completion_text(@inferred(completions(s,lastindex(s), m, shift))) test_complete_foo(s) = test_complete_context(s, Main.CompletionFoo) test_complete_noshift(s) = map_completion_text(@inferred(completions(s, lastindex(s), Main, false))) +test_bslashcomplete(s) = map_named_completion(@inferred(bslash_completions(s, lastindex(s)))[2]) + test_methods_list(@nospecialize(f), tt) = map(x -> string(x.method), Base._methods_by_ftype(Base.signature_type(f, tt), 10, Base.get_world_counter())) @@ -350,7 +356,8 @@ end # test latex symbol completions let s = "\\alpha" c, r = test_bslashcomplete(s) - @test c[1] == "α" + @test c[1].completion == "α" + @test c[1].name == "α" @test r == 1:lastindex(s) @test length(c) == 1 end @@ -358,7 +365,8 @@ end # test latex symbol completions after unicode #9209 let s = "α\\alpha" c, r = test_bslashcomplete(s) - @test c[1] == "α" + @test c[1].completion == "α" + @test c[1].name == "α" @test r == 3:sizeof(s) @test length(c) == 1 end @@ -366,20 +374,25 @@ end # test emoji symbol completions let s = "\\:koala:" c, r = test_bslashcomplete(s) - @test c[1] == "🐨" + @test c[1].completion == "🐨" + @test c[1].name == "🐨" @test r == 1:sizeof(s) @test length(c) == 1 end let s = "\\:ko" c, r = test_bslashcomplete(s) - @test "\\:koala:" in c + ko = only(filter(c) do namedcompletion + namedcompletion.completion == "\\:koala:" + end) + @test ko.name == "🐨 \\:koala:" end # test emoji symbol completions after unicode #9209 let s = "α\\:koala:" c, r = test_bslashcomplete(s) - @test c[1] == "🐨" + @test c[1].name == "🐨" + @test c[1].completion == "🐨" @test r == 3:sizeof(s) @test length(c) == 1 end @@ -1069,8 +1082,8 @@ let s, c, r # Issue #8047 s = "@show \"/dev/nul\"" c,r = completions(s, 15) - c = map(completion_text, c) - @test "null\"" in c + c = map(named_completion, c) + @test "null\"" in [_c.completion for _c in c] @test r == 13:15 @test s[r] == "nul" @@ -1476,7 +1489,7 @@ function test_dict_completion(dict_name) @test c == Any["\"abcd\"]"] s = "$dict_name[\"abcd]" # trailing close bracket c, r = completions(s, lastindex(s) - 1) - c = map(completion_text, c) + c = map(x -> named_completion(x).completion, c) @test c == Any["\"abcd\""] s = "$dict_name[:b" c, r = test_complete(s) From 2590e675885b97579a7531c343a546f6f5bbcbe5 Mon Sep 17 00:00:00 2001 From: Daniel Wennberg Date: Tue, 3 Dec 2024 05:36:39 -0800 Subject: [PATCH 532/537] Update annotated.jl docstrings according to #55741 (#56736) Annotations now use a NamedTuple --- base/strings/annotated.jl | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/base/strings/annotated.jl b/base/strings/annotated.jl index c5c330fe0dfcd..814ee2afa9d55 100644 --- a/base/strings/annotated.jl +++ b/base/strings/annotated.jl @@ -55,9 +55,9 @@ like [`string`](@ref) but preserves any annotations present in the arguments. # Examples -```julia-repl +```jldoctest; setup=:(using Base: AnnotatedString) julia> AnnotatedString("this is an example annotated string", - [(1:18, :A => 1), (12:28, :B => 2), (18:35, :C => 3)]) + [(1:18, :A, 1), (12:28, :B, 2), (18:35, :C, 3)]) "this is an example annotated string" ``` """ @@ -87,8 +87,8 @@ AnnotatedChar(s::S, annotations::Vector{$Annotation}) # Examples -```julia-repl -julia> AnnotatedChar('j', :label => 1) +```jldoctest; setup=:(using Base: AnnotatedChar) +julia> AnnotatedChar('j', [(:label, 1)]) 'j': ASCII/Unicode U+006A (category Ll: Letter, lowercase) ``` """ @@ -232,11 +232,11 @@ See also [`AnnotatedString`](@ref) and [`AnnotatedChar`](@ref). ## Examples -```julia-repl +```jldoctest; setup=:(using Base: AnnotatedString, annotatedstring) julia> annotatedstring("now a AnnotatedString") "now a AnnotatedString" -julia> annotatedstring(AnnotatedString("annotated", [(1:9, :label => 1)]), ", and unannotated") +julia> annotatedstring(AnnotatedString("annotated", [(1:9, :label, 1)]), ", and unannotated") "annotated, and unannotated" ``` """ @@ -344,7 +344,7 @@ end annotate!(str::AnnotatedString, [range::UnitRange{Int}], label::Symbol, value) annotate!(str::SubString{AnnotatedString}, [range::UnitRange{Int}], label::Symbol, value) -Annotate a `range` of `str` (or the entire string) with a labeled value (`label` => `value`). +Annotate a `range` of `str` (or the entire string) with a labeled value `(label, value)`. To remove existing `label` annotations, use a value of `nothing`. The order in which annotations are applied to `str` is semantically meaningful, @@ -365,7 +365,7 @@ annotate!(s::SubString{<:AnnotatedString}, label::Symbol, @nospecialize(val::Any """ annotate!(char::AnnotatedChar, label::Symbol, value::Any) -Annotate `char` with the pair `label => value`. +Annotate `char` with the labeled value `(label, value)`. """ annotate!(c::AnnotatedChar, label::Symbol, @nospecialize(val::Any)) = (push!(c.annotations, Annotation((; label, val))); c) From 5ae26276c1a1834f7b2ebdaf03696278df59b11b Mon Sep 17 00:00:00 2001 From: Jameson Nash Date: Tue, 3 Dec 2024 19:50:25 -0500 Subject: [PATCH 533/537] effects: pack bits better (#56737) There is no reason to preserve duplicates of the bits for the value before and after inference, and many of the numbers in the comments had gotten incorrect. Now we are able to pack all 16 bits of currently defined bitflags into 20 bits, instead of 25 bits (although either case still rounds up to 32). There was also no reason for InferenceState to be mutating of CodeInfo during execution, so remove that mutation. --- Compiler/src/inferencestate.jl | 25 +++++++++---------- Compiler/src/optimize.jl | 45 +++++++++++++++++++--------------- Compiler/src/typeinfer.jl | 3 ++- src/julia.h | 13 +++------- 4 files changed, 42 insertions(+), 44 deletions(-) diff --git a/Compiler/src/inferencestate.jl b/Compiler/src/inferencestate.jl index 9eb929b725fbf..6988e74310fc5 100644 --- a/Compiler/src/inferencestate.jl +++ b/Compiler/src/inferencestate.jl @@ -267,6 +267,7 @@ mutable struct InferenceState bb_vartables::Vector{Union{Nothing,VarTable}} # nothing if not analyzed yet bb_saw_latestworld::Vector{Bool} ssavaluetypes::Vector{Any} + ssaflags::Vector{UInt32} edges::Vector{Any} stmt_info::Vector{CallInfo} @@ -343,6 +344,7 @@ mutable struct InferenceState bb_vartable1[i] = VarState(argtyp, i > nargtypes) end src.ssavaluetypes = ssavaluetypes = Any[ NOT_FOUND for i = 1:nssavalues ] + ssaflags = copy(src.ssaflags) unreachable = BitSet() pclimitations = IdSet{InferenceState}() @@ -374,7 +376,7 @@ mutable struct InferenceState this = new( mi, WorldWithRange(world, valid_worlds), mod, sptypes, slottypes, src, cfg, spec_info, - currbb, currpc, ip, handler_info, ssavalue_uses, bb_vartables, bb_saw_latestworld, ssavaluetypes, edges, stmt_info, + currbb, currpc, ip, handler_info, ssavalue_uses, bb_vartables, bb_saw_latestworld, ssavaluetypes, ssaflags, edges, stmt_info, tasks, pclimitations, limitations, cycle_backedges, callstack, parentid, frameid, cycleid, result, unreachable, bestguess, exc_bestguess, ipo_effects, restrict_abstract_call_sites, cache_mode, insert_coverage, @@ -1004,25 +1006,22 @@ function callers_in_cycle(sv::InferenceState) end callers_in_cycle(sv::IRInterpretationState) = AbsIntCycle(sv.callstack::Vector{AbsIntState}, 0, 0) -get_curr_ssaflag(sv::InferenceState) = sv.src.ssaflags[sv.currpc] +get_curr_ssaflag(sv::InferenceState) = sv.ssaflags[sv.currpc] get_curr_ssaflag(sv::IRInterpretationState) = sv.ir.stmts[sv.curridx][:flag] -has_curr_ssaflag(sv::InferenceState, flag::UInt32) = has_flag(sv.src.ssaflags[sv.currpc], flag) +has_curr_ssaflag(sv::InferenceState, flag::UInt32) = has_flag(sv.ssaflags[sv.currpc], flag) has_curr_ssaflag(sv::IRInterpretationState, flag::UInt32) = has_flag(sv.ir.stmts[sv.curridx][:flag], flag) function set_curr_ssaflag!(sv::InferenceState, flag::UInt32, mask::UInt32=typemax(UInt32)) - curr_flag = sv.src.ssaflags[sv.currpc] - sv.src.ssaflags[sv.currpc] = (curr_flag & ~mask) | flag -end -function set_curr_ssaflag!(sv::IRInterpretationState, flag::UInt32, mask::UInt32=typemax(UInt32)) - curr_flag = sv.ir.stmts[sv.curridx][:flag] - sv.ir.stmts[sv.curridx][:flag] = (curr_flag & ~mask) | flag + curr_flag = sv.ssaflags[sv.currpc] + sv.ssaflags[sv.currpc] = (curr_flag & ~mask) | flag + nothing end -add_curr_ssaflag!(sv::InferenceState, flag::UInt32) = sv.src.ssaflags[sv.currpc] |= flag +add_curr_ssaflag!(sv::InferenceState, flag::UInt32) = sv.ssaflags[sv.currpc] |= flag add_curr_ssaflag!(sv::IRInterpretationState, flag::UInt32) = add_flag!(sv.ir.stmts[sv.curridx], flag) -sub_curr_ssaflag!(sv::InferenceState, flag::UInt32) = sv.src.ssaflags[sv.currpc] &= ~flag +sub_curr_ssaflag!(sv::InferenceState, flag::UInt32) = sv.ssaflags[sv.currpc] &= ~flag sub_curr_ssaflag!(sv::IRInterpretationState, flag::UInt32) = sub_flag!(sv.ir.stmts[sv.curridx], flag) function merge_effects!(::AbstractInterpreter, caller::InferenceState, effects::Effects) @@ -1035,8 +1034,8 @@ function merge_effects!(::AbstractInterpreter, caller::InferenceState, effects:: end merge_effects!(::AbstractInterpreter, ::IRInterpretationState, ::Effects) = return -decode_statement_effects_override(sv::AbsIntState) = - decode_statement_effects_override(get_curr_ssaflag(sv)) +decode_statement_effects_override(sv::InferenceState) = decode_statement_effects_override(sv.src.ssaflags[sv.currpc]) +decode_statement_effects_override(sv::IRInterpretationState) = decode_statement_effects_override(UInt32(0)) struct InferenceLoopState rt diff --git a/Compiler/src/optimize.jl b/Compiler/src/optimize.jl index 856e64e404388..1c02bd67b5bd4 100644 --- a/Compiler/src/optimize.jl +++ b/Compiler/src/optimize.jl @@ -17,37 +17,41 @@ const SLOT_USEDUNDEF = 32 # slot has uses that might raise UndefVarError const IR_FLAG_NULL = zero(UInt32) # This statement is marked as @inbounds by user. -# Ff replaced by inlining, any contained boundschecks may be removed. +# If replaced by inlining, any contained boundschecks may be removed. const IR_FLAG_INBOUNDS = one(UInt32) << 0 # This statement is marked as @inline by user const IR_FLAG_INLINE = one(UInt32) << 1 # This statement is marked as @noinline by user const IR_FLAG_NOINLINE = one(UInt32) << 2 -# An optimization pass has updated this statement in a way that may -# have exposed information that inference did not see. Re-running -# inference on this statement may be profitable. -const IR_FLAG_REFINED = one(UInt32) << 3 # This statement is proven :consistent -const IR_FLAG_CONSISTENT = one(UInt32) << 4 +const IR_FLAG_CONSISTENT = one(UInt32) << 3 # This statement is proven :effect_free -const IR_FLAG_EFFECT_FREE = one(UInt32) << 5 +const IR_FLAG_EFFECT_FREE = one(UInt32) << 4 # This statement is proven :nothrow -const IR_FLAG_NOTHROW = one(UInt32) << 6 -# This statement is proven :terminates -const IR_FLAG_TERMINATES = one(UInt32) << 7 -# This statement is proven :noub -const IR_FLAG_NOUB = one(UInt32) << 8 -# TODO: Both of these should eventually go away once -# This statement is :effect_free == EFFECT_FREE_IF_INACCESSIBLEMEMONLY -const IR_FLAG_EFIIMO = one(UInt32) << 9 -# This statement is :inaccessiblememonly == INACCESSIBLEMEM_OR_ARGMEMONLY -const IR_FLAG_INACCESSIBLEMEM_OR_ARGMEM = one(UInt32) << 10 +const IR_FLAG_NOTHROW = one(UInt32) << 5 +# This statement is proven :terminates_globally +const IR_FLAG_TERMINATES = one(UInt32) << 6 +#const IR_FLAG_TERMINATES_LOCALLY = one(UInt32) << 7 +#const IR_FLAG_NOTASKSTATE = one(UInt32) << 8 +#const IR_FLAG_INACCESSIBLEMEM = one(UInt32) << 9 +const IR_FLAG_NOUB = one(UInt32) << 10 +#const IR_FLAG_NOUBINIB = one(UInt32) << 11 +#const IR_FLAG_CONSISTENTOVERLAY = one(UInt32) << 12 # This statement is :nortcall -const IR_FLAG_NORTCALL = one(UInt32) << 11 +const IR_FLAG_NORTCALL = one(UInt32) << 13 +# An optimization pass has updated this statement in a way that may +# have exposed information that inference did not see. Re-running +# inference on this statement may be profitable. +const IR_FLAG_REFINED = one(UInt32) << 16 # This statement has no users and may be deleted if flags get refined to IR_FLAGS_REMOVABLE -const IR_FLAG_UNUSED = one(UInt32) << 12 +const IR_FLAG_UNUSED = one(UInt32) << 17 +# TODO: Both of these next two should eventually go away once +# This statement is :effect_free == EFFECT_FREE_IF_INACCESSIBLEMEMONLY +const IR_FLAG_EFIIMO = one(UInt32) << 18 +# This statement is :inaccessiblememonly == INACCESSIBLEMEM_OR_ARGMEMONLY +const IR_FLAG_INACCESSIBLEMEM_OR_ARGMEM = one(UInt32) << 19 -const NUM_IR_FLAGS = 13 # sync with julia.h +const NUM_IR_FLAGS = 3 # sync with julia.h const IR_FLAGS_EFFECTS = IR_FLAG_CONSISTENT | IR_FLAG_EFFECT_FREE | IR_FLAG_NOTHROW | @@ -815,6 +819,7 @@ function scan_non_dataflow_flags!(inst::Instruction, sv::PostOptAnalysisState) sv.nortcall = false end end + nothing end function scan_inconsistency!(inst::Instruction, sv::PostOptAnalysisState) diff --git a/Compiler/src/typeinfer.jl b/Compiler/src/typeinfer.jl index 83ec0271ea474..20c0a5000bd39 100644 --- a/Compiler/src/typeinfer.jl +++ b/Compiler/src/typeinfer.jl @@ -413,6 +413,7 @@ function finishinfer!(me::InferenceState, interp::AbstractInterpreter) ipo_effects = result.ipo_effects = me.ipo_effects = adjust_effects(me) result.exc_result = me.exc_bestguess = refine_exception_type(me.exc_bestguess, ipo_effects) me.src.rettype = widenconst(ignorelimited(bestguess)) + me.src.ssaflags = me.ssaflags me.src.min_world = first(me.world.valid_worlds) me.src.max_world = last(me.world.valid_worlds) istoplevel = !(me.linfo.def isa Method) @@ -936,7 +937,7 @@ function codeinfo_for_const(interp::AbstractInterpreter, mi::MethodInstance, @no tree.slotflags = fill(0x00, nargs) tree.ssavaluetypes = 1 tree.debuginfo = DebugInfo(mi) - tree.ssaflags = UInt32[0] + tree.ssaflags = [IR_FLAG_NULL] tree.rettype = Core.Typeof(val) tree.edges = Core.svec() set_inlineable!(tree, true) diff --git a/src/julia.h b/src/julia.h index 944fd3c43a297..6c0dd700f9472 100644 --- a/src/julia.h +++ b/src/julia.h @@ -279,7 +279,7 @@ typedef union __jl_purity_overrides_t { } _jl_purity_overrides_t; #define NUM_EFFECTS_OVERRIDES 11 -#define NUM_IR_FLAGS 13 +#define NUM_IR_FLAGS 3 // This type describes a single function body typedef struct _jl_code_info_t { @@ -292,15 +292,8 @@ typedef struct _jl_code_info_t { // 1 << 0 = inbounds region // 1 << 1 = callsite inline region // 1 << 2 = callsite noinline region - // 1 << 3 = refined statement - // 1 << 4 = :consistent - // 1 << 5 = :effect_free - // 1 << 6 = :nothrow - // 1 << 7 = :terminates - // 1 << 8 = :noub - // 1 << 9 = :effect_free_if_inaccessiblememonly - // 1 << 10 = :inaccessiblemem_or_argmemonly - // 1 << 11-19 = callsite effects overrides + // 1 << 3-14 = purity + // 1 << 16+ = reserved for inference // miscellaneous data: jl_array_t *slotnames; // names of local variables jl_array_t *slotflags; // local var bit flags From e48bf8cba9a9305050704e3f21b53431ae123e93 Mon Sep 17 00:00:00 2001 From: DilumAluthgeBot <43731525+DilumAluthgeBot@users.noreply.github.com> Date: Wed, 4 Dec 2024 07:04:39 -0500 Subject: [PATCH 534/537] =?UTF-8?q?=F0=9F=A4=96=20[master]=20Bump=20the=20?= =?UTF-8?q?Pkg=20stdlib=20from=207b759d7f0=20to=20d84a1a38b=20(#56743)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Stdlib: Pkg URL: https://github.com/JuliaLang/Pkg.jl.git Stdlib branch: master Julia branch: master Old commit: 7b759d7f0 New commit: d84a1a38b Julia version: 1.12.0-DEV Pkg version: 1.12.0 Bump invoked by: @KristofferC Powered by: [BumpStdlibs.jl](https://github.com/JuliaLang/BumpStdlibs.jl) Diff: https://github.com/JuliaLang/Pkg.jl/compare/7b759d7f0af56c5ad01f2289bbad71284a556970...d84a1a38b6466fa7400e9ad2874a0ef963a10456 ``` $ git log --oneline 7b759d7f0..d84a1a38b d84a1a38b Allow use of a url and subdir in [sources] (#4039) cd75456a8 Fix heading (#4102) b61066120 rename FORMER_STDLIBS -> UPGRADABLE_STDLIBS (#4070) 814949ed2 Increase version of `StaticArrays` in `why` tests (#4077) 83e13631e Run CI on backport branch too (#4094) ``` Co-authored-by: Dilum Aluthge --- .../Pkg-7b759d7f0af56c5ad01f2289bbad71284a556970.tar.gz/md5 | 1 - .../Pkg-7b759d7f0af56c5ad01f2289bbad71284a556970.tar.gz/sha512 | 1 - .../Pkg-d84a1a38b6466fa7400e9ad2874a0ef963a10456.tar.gz/md5 | 1 + .../Pkg-d84a1a38b6466fa7400e9ad2874a0ef963a10456.tar.gz/sha512 | 1 + stdlib/Pkg.version | 2 +- 5 files changed, 3 insertions(+), 3 deletions(-) delete mode 100644 deps/checksums/Pkg-7b759d7f0af56c5ad01f2289bbad71284a556970.tar.gz/md5 delete mode 100644 deps/checksums/Pkg-7b759d7f0af56c5ad01f2289bbad71284a556970.tar.gz/sha512 create mode 100644 deps/checksums/Pkg-d84a1a38b6466fa7400e9ad2874a0ef963a10456.tar.gz/md5 create mode 100644 deps/checksums/Pkg-d84a1a38b6466fa7400e9ad2874a0ef963a10456.tar.gz/sha512 diff --git a/deps/checksums/Pkg-7b759d7f0af56c5ad01f2289bbad71284a556970.tar.gz/md5 b/deps/checksums/Pkg-7b759d7f0af56c5ad01f2289bbad71284a556970.tar.gz/md5 deleted file mode 100644 index e55e74562d717..0000000000000 --- a/deps/checksums/Pkg-7b759d7f0af56c5ad01f2289bbad71284a556970.tar.gz/md5 +++ /dev/null @@ -1 +0,0 @@ -20d63322fc5b547d4c2464c27e9a6a0e diff --git a/deps/checksums/Pkg-7b759d7f0af56c5ad01f2289bbad71284a556970.tar.gz/sha512 b/deps/checksums/Pkg-7b759d7f0af56c5ad01f2289bbad71284a556970.tar.gz/sha512 deleted file mode 100644 index 5094dddb8142a..0000000000000 --- a/deps/checksums/Pkg-7b759d7f0af56c5ad01f2289bbad71284a556970.tar.gz/sha512 +++ /dev/null @@ -1 +0,0 @@ -93dd178af474c76cce9368416d34570b66cc44c7c311e4dc14569d3f9deed70afcae8a2b1976535ed0732ed305c6d8d1b0ef04cbeeaa3af2891e97650d51467d diff --git a/deps/checksums/Pkg-d84a1a38b6466fa7400e9ad2874a0ef963a10456.tar.gz/md5 b/deps/checksums/Pkg-d84a1a38b6466fa7400e9ad2874a0ef963a10456.tar.gz/md5 new file mode 100644 index 0000000000000..f5f87b3f2fa9e --- /dev/null +++ b/deps/checksums/Pkg-d84a1a38b6466fa7400e9ad2874a0ef963a10456.tar.gz/md5 @@ -0,0 +1 @@ +1a5c995237815e0d7d5ee1ec50006c1c diff --git a/deps/checksums/Pkg-d84a1a38b6466fa7400e9ad2874a0ef963a10456.tar.gz/sha512 b/deps/checksums/Pkg-d84a1a38b6466fa7400e9ad2874a0ef963a10456.tar.gz/sha512 new file mode 100644 index 0000000000000..839cd4704a135 --- /dev/null +++ b/deps/checksums/Pkg-d84a1a38b6466fa7400e9ad2874a0ef963a10456.tar.gz/sha512 @@ -0,0 +1 @@ +2e0b984c8272fe4468e0b527698a58d5010ef3f18d38d862665902e5a0e0b7ba65d3085b3d9de367a7b48a216e71d1611687804254503b3905b7b4d217a00f2f diff --git a/stdlib/Pkg.version b/stdlib/Pkg.version index 8b40c45c4366f..9ef9ca4b04376 100644 --- a/stdlib/Pkg.version +++ b/stdlib/Pkg.version @@ -1,4 +1,4 @@ PKG_BRANCH = master -PKG_SHA1 = 7b759d7f0af56c5ad01f2289bbad71284a556970 +PKG_SHA1 = d84a1a38b6466fa7400e9ad2874a0ef963a10456 PKG_GIT_URL := https://github.com/JuliaLang/Pkg.jl.git PKG_TAR_URL = https://api.github.com/repos/JuliaLang/Pkg.jl/tarball/$1 From 8aac4cc282004e5f4c7fc70810207956fde98164 Mon Sep 17 00:00:00 2001 From: Keno Fischer Date: Thu, 5 Dec 2024 00:33:33 -0700 Subject: [PATCH 535/537] Hide IRShow include from Revise (#56756) Revise in theory wants to re-evaluate this include, but it fails at doing so, because the include call no longer works after bootstrap. It happens to work right now on master, because the lowering of `Compiler.include` happens to hide the include call from Revise, but that's a Revise bug I'm about to fix. Address this by moving the include call into the package and using an absolute include if necessary. --- Compiler/src/Compiler.jl | 16 +++++++++++++++- base/show.jl | 2 +- 2 files changed, 16 insertions(+), 2 deletions(-) diff --git a/Compiler/src/Compiler.jl b/Compiler/src/Compiler.jl index 2cf7e5508196c..ddcea8a6c5cbb 100644 --- a/Compiler/src/Compiler.jl +++ b/Compiler/src/Compiler.jl @@ -181,12 +181,26 @@ include("bootstrap.jl") include("reflection_interface.jl") include("opaque_closure.jl") +macro __SOURCE_FILE__() + __source__.file === nothing && return nothing + return __source__.file::Symbol +end + module IRShow end +function load_irshow!() + if isdefined(Base, :end_base_include) + # This code path is exclusively for Revise, which may want to re-run this + # after bootstrap. + include(IRShow, Base.joinpath(Base.dirname(Base.String(@__SOURCE_FILE__)), "ssair/show.jl")) + else + include(IRShow, "ssair/show.jl") + end +end if !isdefined(Base, :end_base_include) # During bootstrap, skip including this file and defer it to base/show.jl to include later else # When this module is loaded as the standard library, include this file as usual - include(IRShow, "ssair/show.jl") + load_irshow!() end end # baremodule Compiler diff --git a/base/show.jl b/base/show.jl index 23957d6e29b2d..cb36488b92bc1 100644 --- a/base/show.jl +++ b/base/show.jl @@ -2821,7 +2821,7 @@ function show(io::IO, vm::Core.TypeofVararg) end end -Compiler.include(Compiler.IRShow, "ssair/show.jl") # define `show` for the compiler types +Compiler.load_irshow!() const IRShow = Compiler.IRShow # an alias for compatibility function show(io::IO, src::CodeInfo; debuginfo::Symbol=:source) From e572d2316fb5bd6cb5e57c0d4300edddb4eb2062 Mon Sep 17 00:00:00 2001 From: Shuhei Kadowaki <40514306+aviatesk@users.noreply.github.com> Date: Thu, 5 Dec 2024 18:12:23 +0900 Subject: [PATCH 536/537] fixup!: JuliaLang/julia#56756 (#56758) We need to quote it, otherwise it would result in `UnderVarError`. --- Compiler/src/Compiler.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Compiler/src/Compiler.jl b/Compiler/src/Compiler.jl index ddcea8a6c5cbb..8dd15462ed998 100644 --- a/Compiler/src/Compiler.jl +++ b/Compiler/src/Compiler.jl @@ -183,7 +183,7 @@ include("opaque_closure.jl") macro __SOURCE_FILE__() __source__.file === nothing && return nothing - return __source__.file::Symbol + return QuoteNode(__source__.file::Symbol) end module IRShow end From 5835c3b69e4e0f47eeb9a512d91622b50ad3423c Mon Sep 17 00:00:00 2001 From: Jishnu Bhattacharya Date: Thu, 5 Dec 2024 17:38:12 +0530 Subject: [PATCH 537/537] Accept more general Integer sizes in reshape (#55521) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This PR generalizes the `reshape` methods to accept `Integer`s instead of `Int`s, and adds a `_reshape_uncolon` method for `Integer` arguments. The current `_reshape_uncolon` method that accepts `Int`s is left unchanged to ensure that the inferred types are not impacted. I've also tried to ensure that most `Integer` subtypes in `Base` that may be safely converted to `Int`s pass through that method. The call sequence would now go like this: ```julia reshape(A, ::Tuple{Vararg{Union{Integer, Colon}}}) -> reshape(A, ::Tuple{Vararg{Integer}}) -> reshape(A, ::Tuple{Vararg{Int}}) (fallback) ``` This lets packages define `reshape(A::CustomArray, ::Tuple{Integer, Vararg{Integer}})` without having to implement `_reshape_uncolon` by themselves (or having to call internal `Base` functions, as in https://github.com/JuliaArrays/FillArrays.jl/issues/373). `reshape` calls involving a `Colon` would convert this to an `Integer` in `Base`, and then pass the `Integer` sizes to the custom method defined in the package. This PR does not resolve issues like https://github.com/JuliaLang/julia/issues/40076 because this still converts `Integer`s to `Int`s in the actual reshaping step. However, `BigInt` sizes that may be converted to `Int`s will work now: ```julia julia> reshape(1:4, big(2), big(2)) 2×2 reshape(::UnitRange{Int64}, 2, 2) with eltype Int64: 1 3 2 4 julia> reshape(1:4, big(1), :) 1×4 reshape(::UnitRange{Int64}, 1, 4) with eltype Int64: 1 2 3 4 ``` Note that the reshape method with `Integer` sizes explicitly converts these to `Int`s to avoid self-recursion (as opposed to calling `to_shape` to carry out the conversion implicitly). In the future, we may want to decide what to do with types or values that can't be converted to an `Int`. --------- Co-authored-by: Neven Sajko --- base/reshapedarray.jl | 57 +++++++++++++++++++++----------- test/abstractarray.jl | 20 +++++++++++ test/offsetarray.jl | 11 ++++++ test/testhelpers/OffsetArrays.jl | 4 ++- 4 files changed, 72 insertions(+), 20 deletions(-) diff --git a/base/reshapedarray.jl b/base/reshapedarray.jl index 07f608588837b..f65a7d8c9561a 100644 --- a/base/reshapedarray.jl +++ b/base/reshapedarray.jl @@ -121,37 +121,56 @@ reshape reshape(parent::AbstractArray, dims::IntOrInd...) = reshape(parent, dims) reshape(parent::AbstractArray, shp::Tuple{Union{Integer,OneTo}, Vararg{Union{Integer,OneTo}}}) = reshape(parent, to_shape(shp)) +reshape(parent::AbstractArray, dims::Tuple{Integer, Vararg{Integer}}) = reshape(parent, map(Int, dims)) reshape(parent::AbstractArray, dims::Dims) = _reshape(parent, dims) # Allow missing dimensions with Colon(): reshape(parent::AbstractVector, ::Colon) = parent reshape(parent::AbstractVector, ::Tuple{Colon}) = parent reshape(parent::AbstractArray, dims::Int...) = reshape(parent, dims) -reshape(parent::AbstractArray, dims::Union{Int,Colon}...) = reshape(parent, dims) -reshape(parent::AbstractArray, dims::Tuple{Vararg{Union{Int,Colon}}}) = reshape(parent, _reshape_uncolon(parent, dims)) -@inline function _reshape_uncolon(A, dims) - @noinline throw1(dims) = throw(DimensionMismatch(string("new dimensions $(dims) ", - "may have at most one omitted dimension specified by `Colon()`"))) - @noinline throw2(A, dims) = throw(DimensionMismatch(string("array size $(length(A)) ", - "must be divisible by the product of the new dimensions $dims"))) - pre = _before_colon(dims...)::Tuple{Vararg{Int}} +reshape(parent::AbstractArray, dims::Integer...) = reshape(parent, dims) +reshape(parent::AbstractArray, dims::Union{Integer,Colon}...) = reshape(parent, dims) +reshape(parent::AbstractArray, dims::Tuple{Vararg{Union{Integer,Colon}}}) = reshape(parent, _reshape_uncolon(parent, dims)) + +@noinline throw1(dims) = throw(DimensionMismatch(LazyString("new dimensions ", dims, + " may have at most one omitted dimension specified by `Colon()`"))) +@noinline throw2(lenA, dims) = throw(DimensionMismatch(string("array size ", lenA, + " must be divisible by the product of the new dimensions ", dims))) + +@inline function _reshape_uncolon(A, _dims::Tuple{Vararg{Union{Integer, Colon}}}) + # promote the dims to `Int` at least + dims = map(x -> x isa Colon ? x : promote_type(typeof(x), Int)(x), _dims) + pre = _before_colon(dims...) post = _after_colon(dims...) _any_colon(post...) && throw1(dims) - post::Tuple{Vararg{Int}} len = length(A) - sz, is_exact = if iszero(len) - (0, true) + _reshape_uncolon_computesize(len, dims, pre, post) +end +@inline function _reshape_uncolon_computesize(len::Int, dims, pre::Tuple{Vararg{Int}}, post::Tuple{Vararg{Int}}) + sz = if iszero(len) + 0 else let pr = Core.checked_dims(pre..., post...) # safe product - if iszero(pr) - throw2(A, dims) - end - (quo, rem) = divrem(len, pr) - (Int(quo), iszero(rem)) + quo = _reshape_uncolon_computesize_nonempty(len, dims, pr) + convert(Int, quo) end - end::Tuple{Int,Bool} - is_exact || throw2(A, dims) - (pre..., sz, post...)::Tuple{Int,Vararg{Int}} + end + (pre..., sz, post...) +end +@inline function _reshape_uncolon_computesize(len, dims, pre, post) + pr = prod((pre..., post...)) + sz = if iszero(len) + promote(len, pr)[1] # zero of the correct type + else + _reshape_uncolon_computesize_nonempty(len, dims, pr) + end + (pre..., sz, post...) +end +@inline function _reshape_uncolon_computesize_nonempty(len, dims, pr) + iszero(pr) && throw2(len, dims) + (quo, rem) = divrem(len, pr) + iszero(rem) || throw2(len, dims) + quo end @inline _any_colon() = false @inline _any_colon(dim::Colon, tail...) = true diff --git a/test/abstractarray.jl b/test/abstractarray.jl index 2a2ec8e8e432c..4af4099eced45 100644 --- a/test/abstractarray.jl +++ b/test/abstractarray.jl @@ -2186,3 +2186,23 @@ end copyto!(A, 1, x, 1) @test A == axes(A,1) end + +@testset "reshape with Integer sizes" begin + @test reshape(1:4, big(2), big(2)) == reshape(1:4, 2, 2) + a = [1 2 3; 4 5 6] + reshaped_arrays = ( + reshape(a, 3, 2), + reshape(a, (3, 2)), + reshape(a, big(3), big(2)), + reshape(a, (big(3), big(2))), + reshape(a, :, big(2)), + reshape(a, (:, big(2))), + reshape(a, big(3), :), + reshape(a, (big(3), :)), + ) + @test allequal(reshaped_arrays) + for b ∈ reshaped_arrays + @test b isa Matrix{Int} + @test b.ref === a.ref + end +end diff --git a/test/offsetarray.jl b/test/offsetarray.jl index fb5855dfbaa0d..8e2ee33c49ed6 100644 --- a/test/offsetarray.jl +++ b/test/offsetarray.jl @@ -914,3 +914,14 @@ end b = sum(a, dims=1) @test b[begin] == sum(r) end + +@testset "reshape" begin + A0 = [1 3; 2 4] + A = reshape(A0, 2:3, 4:5) + @test axes(A) == Base.IdentityUnitRange.((2:3, 4:5)) + + B = reshape(A0, -10:-9, 9:10) + @test isa(B, OffsetArray{Int,2}) + @test parent(B) == A0 + @test axes(B) == Base.IdentityUnitRange.((-10:-9, 9:10)) +end diff --git a/test/testhelpers/OffsetArrays.jl b/test/testhelpers/OffsetArrays.jl index 17b2d8c28680a..e895372a34974 100644 --- a/test/testhelpers/OffsetArrays.jl +++ b/test/testhelpers/OffsetArrays.jl @@ -529,7 +529,7 @@ _similar_axes_or_length(AT, ax::I, ::I) where {I} = similar(AT, map(_indexlength # reshape accepts a single colon Base.reshape(A::AbstractArray, inds::OffsetAxis...) = reshape(A, inds) -function Base.reshape(A::AbstractArray, inds::Tuple{OffsetAxis,Vararg{OffsetAxis}}) +function Base.reshape(A::AbstractArray, inds::Tuple{Vararg{OffsetAxis}}) AR = reshape(no_offset_view(A), map(_indexlength, inds)) O = OffsetArray(AR, map(_offset, axes(AR), inds)) return _popreshape(O, axes(AR), _filterreshapeinds(inds)) @@ -557,6 +557,8 @@ Base.reshape(A::OffsetArray, inds::Tuple{OffsetAxis,Vararg{OffsetAxis}}) = OffsetArray(_reshape(parent(A), inds), map(_toaxis, inds)) # And for non-offset axes, we can just return a reshape of the parent directly Base.reshape(A::OffsetArray, inds::Tuple{Union{Integer,Base.OneTo},Vararg{Union{Integer,Base.OneTo}}}) = _reshape_nov(A, inds) +Base.reshape(A::OffsetArray, inds::Tuple{Integer,Vararg{Integer}}) = _reshape_nov(A, inds) +Base.reshape(A::OffsetArray, inds::Tuple{Union{Colon, Integer}, Vararg{Union{Colon, Integer}}}) = _reshape_nov(A, inds) Base.reshape(A::OffsetArray, inds::Dims) = _reshape_nov(A, inds) Base.reshape(A::OffsetVector, ::Colon) = A Base.reshape(A::OffsetVector, ::Tuple{Colon}) = A