diff --git a/base/abstractarray.jl b/base/abstractarray.jl index 98ac697ab4d12..01116e7c26037 100644 --- a/base/abstractarray.jl +++ b/base/abstractarray.jl @@ -1084,6 +1084,7 @@ function copy(a::AbstractArray) @_propagate_inbounds_meta copymutable(a) end +copy(a::Core.ImmutableArray) = a function copyto!(B::AbstractVecOrMat{R}, ir_dest::AbstractRange{Int}, jr_dest::AbstractRange{Int}, A::AbstractVecOrMat{S}, ir_src::AbstractRange{Int}, jr_src::AbstractRange{Int}) where {R,S} diff --git a/base/array.jl b/base/array.jl index 2fc1ccfdf7dda..81a2187707552 100644 --- a/base/array.jl +++ b/base/array.jl @@ -118,6 +118,42 @@ Union type of [`DenseVector{T}`](@ref) and [`DenseMatrix{T}`](@ref). """ const DenseVecOrMat{T} = Union{DenseVector{T}, DenseMatrix{T}} +""" + ImmutableArray{T,N} <: AbstractArray{T,N} + +Dynamically allocated, immutable array. + +""" +const ImmutableArray = Core.ImmutableArray + +""" + ImmutableVector{T} <: AbstractVector{T} + +Dynamically allocated, immutable vector. +""" +const ImmutableVector{T} = ImmutableArray{T,1} + +""" + IMArray{T,N} + +Union type of [`Array{T,N}`](@ref) and [`ImmutableArray{T,N}`](@ref) +""" +const IMArray{T,N} = Union{Array{T, N}, ImmutableArray{T,N}} + +""" + IMVector{T} + +One-dimensional [`ImmutableArray`](@ref) or [`Array`](@ref) with elements of type `T`. Alias for `IMArray{T, 1}`. +""" +const IMVector{T} = IMArray{T, 1} + +""" + IMMatrix{T} + +Two-dimensional [`ImmutableArray`](@ref) or [`Array`](@ref) with elements of type `T`. Alias for `IMArray{T,2}`. +""" +const IMMatrix{T} = IMArray{T, 2} + ## Basic functions ## import Core: arraysize, arrayset, arrayref, const_arrayref @@ -147,12 +183,13 @@ function vect(X...) return copyto!(Vector{T}(undef, length(X)), X) end -size(a::Array, d::Integer) = arraysize(a, convert(Int, d)) -size(a::Vector) = (arraysize(a,1),) -size(a::Matrix) = (arraysize(a,1), arraysize(a,2)) -size(a::Array{<:Any,N}) where {N} = (@inline; ntuple(M -> size(a, M), Val(N))::Dims) +# Size functions for arrays, both mutable and immutable +size(a::IMArray, d::Integer) = arraysize(a, convert(Int, d)) +size(a::IMVector) = (arraysize(a,1),) +size(a::IMMatrix) = (arraysize(a,1), arraysize(a,2)) +size(a::IMArray{<:Any,N}) where {N} = (@inline; ntuple(M -> size(a, M), Val(N))::Dims) -asize_from(a::Array, n) = n > ndims(a) ? () : (arraysize(a,n), asize_from(a, n+1)...) +asize_from(a::IMArray, n) = n > ndims(a) ? () : (arraysize(a,n), asize_from(a, n+1)...) allocatedinline(T::Type) = (@_pure_meta; ccall(:jl_stored_inline, Cint, (Any,), T) != Cint(0)) @@ -216,7 +253,7 @@ length(a::Array) = arraylen(a) elsize(::Type{<:Array{T}}) where {T} = aligned_sizeof(T) sizeof(a::Array) = Core.sizeof(a) -function isassigned(a::Array, i::Int...) +function isassigned(a::IMArray, i::Int...) @inline ii = (_sub2ind(size(a), i...) % UInt) - 1 @boundscheck ii < length(a) % UInt || return false @@ -611,8 +648,8 @@ oneunit(x::AbstractMatrix{T}) where {T} = _one(oneunit(T), x) ## Conversions ## -convert(::Type{T}, a::AbstractArray) where {T<:Array} = a isa T ? a : T(a) convert(::Type{Union{}}, a::AbstractArray) = throw(MethodError(convert, (Union{}, a))) +convert(T::Type{<:IMArray}, a::AbstractArray) = a isa T ? a : T(a) promote_rule(a::Type{Array{T,n}}, b::Type{Array{S,n}}) where {T,n,S} = el_same(promote_type(T,S), a, b) @@ -622,6 +659,7 @@ if nameof(@__MODULE__) === :Base # avoid method overwrite # constructors should make copies Array{T,N}(x::AbstractArray{S,N}) where {T,N,S} = copyto_axcheck!(Array{T,N}(undef, size(x)), x) AbstractArray{T,N}(A::AbstractArray{S,N}) where {T,N,S} = copyto_axcheck!(similar(A,T), A) +ImmutableArray{T,N}(Ar::AbstractArray{S,N}) where {T,N,S} = Core.arrayfreeze(copyto_axcheck!(Array{T,N}(undef, size(Ar)), Ar)) end ## copying iterators to containers @@ -921,6 +959,9 @@ function getindex end @eval getindex(A::Array, i1::Int) = arrayref($(Expr(:boundscheck)), A, i1) @eval getindex(A::Array, i1::Int, i2::Int, I::Int...) = (@inline; arrayref($(Expr(:boundscheck)), A, i1, i2, I...)) +@eval getindex(A::ImmutableArray, i1::Int) = arrayref($(Expr(:boundscheck)), A, i1) +@eval getindex(A::ImmutableArray, i1::Int, i2::Int, I::Int...) = (@inline; arrayref($(Expr(:boundscheck)), A, i1, i2, I...)) + # Faster contiguous indexing using copyto! for AbstractUnitRange and Colon function getindex(A::Array, I::AbstractUnitRange{<:Integer}) @inline diff --git a/base/broadcast.jl b/base/broadcast.jl index fb9ba9555cfd9..85b9057e8ceef 100644 --- a/base/broadcast.jl +++ b/base/broadcast.jl @@ -1345,4 +1345,17 @@ function Base.show(io::IO, op::BroadcastFunction) end Base.show(io::IO, ::MIME"text/plain", op::BroadcastFunction) = show(io, op) +struct IMArrayStyle <: Broadcast.AbstractArrayStyle{Any} end +BroadcastStyle(::Type{<:Core.ImmutableArray}) = IMArrayStyle() + +#similar has to return mutable array +function Base.similar(bc::Broadcasted{IMArrayStyle}, ::Type{ElType}) where ElType + similar(Array{ElType}, axes(bc)) +end + +@inline function copy(bc::Broadcasted{IMArrayStyle}) + ElType = combine_eltypes(bc.f, bc.args) + return Core.ImmutableArray(copyto!(similar(bc, ElType), bc)) +end + end # module diff --git a/base/compiler/EscapeAnalysis/EAUtils.jl b/base/compiler/EscapeAnalysis/EAUtils.jl new file mode 100644 index 0000000000000..cde0c8257a7d3 --- /dev/null +++ b/base/compiler/EscapeAnalysis/EAUtils.jl @@ -0,0 +1,403 @@ +const EA_AS_PKG = Symbol(@__MODULE__) !== :Base # develop EA as an external package + +module EAUtils + +import ..EA_AS_PKG +if EA_AS_PKG + import ..EscapeAnalysis +else + import Core.Compiler.EscapeAnalysis: EscapeAnalysis + Base.getindex(estate::EscapeAnalysis.EscapeState, @nospecialize(x)) = + Core.Compiler.getindex(estate, x) +end +const EA = EscapeAnalysis +const CC = Core.Compiler + +# entries +# ------- + +@static if EA_AS_PKG +import InteractiveUtils: gen_call_with_extracted_types_and_kwargs + +@doc """ + @code_escapes [options...] f(args...) + +Evaluates the arguments to the function call, determines its types, and then calls +[`code_escapes`](@ref) on the resulting expression. +As with `@code_typed` and its family, any of `code_escapes` keyword arguments can be given +as the optional arguments like `@code_escapes interp=myinterp myfunc(myargs...)`. +""" +macro code_escapes(ex0...) + return gen_call_with_extracted_types_and_kwargs(__module__, :code_escapes, ex0) +end +end # @static if EA_AS_PKG + +""" + code_escapes(f, argtypes=Tuple{}; [world], [interp]) -> result::EscapeResult + code_escapes(tt::Type{<:Tuple}; [world], [interp]) -> result::EscapeResult + +Runs the escape analysis on optimized IR of a generic function call with the given type signature. +Note that the escape analysis runs after inlining, but before any other optimizations. + +```julia +julia> mutable struct SafeRef{T} + x::T + end + +julia> Base.getindex(x::SafeRef) = x.x; + +julia> Base.isassigned(x::SafeRef) = true; + +julia> get′(x) = isassigned(x) ? x[] : throw(x); + +julia> result = code_escapes((String,String,String)) do s1, s2, s3 + r1 = Ref(s1) + r2 = Ref(s2) + r3 = SafeRef(s3) + try + s1 = get′(r1) + ret = sizeof(s1) + catch err + global g = err # will definitely escape `r1` + end + s2 = get′(r2) # still `r2` doesn't escape fully + s3 = get′(r3) # still `r2` doesn't escape fully + return s2, s3 + end +#3(X _2::String, ↑ _3::String, ↑ _4::String) in Main at REPL[7]:2 +2 X 1 ── %1 = %new(Base.RefValue{String}, _2)::Base.RefValue{String} │╻╷╷ Ref +3 *′ │ %2 = %new(Base.RefValue{String}, _3)::Base.RefValue{String} │╻╷╷ Ref +4 ✓′ └─── %3 = %new(SafeRef{String}, _4)::SafeRef{String} │╻╷ SafeRef +5 ◌ 2 ── %4 = \$(Expr(:enter, #8)) │ + ✓′ │ %5 = ϒ (%3)::SafeRef{String} │ + *′ └─── %6 = ϒ (%2)::Base.RefValue{String} │ +6 ◌ 3 ── %7 = Base.isdefined(%1, :x)::Bool │╻╷ get′ + ◌ └─── goto #5 if not %7 ││ + X 4 ── Base.getfield(%1, :x)::String ││╻ getindex + ◌ └─── goto #6 ││ + ◌ 5 ── Main.throw(%1)::Union{} ││ + ◌ └─── unreachable ││ +7 ◌ 6 ── nothing::typeof(Core.sizeof) │╻ sizeof + ◌ │ nothing::Int64 ││ + ◌ └─── \$(Expr(:leave, 1)) │ + ◌ 7 ── goto #10 │ + ✓′ 8 ── %17 = φᶜ (%5)::SafeRef{String} │ + *′ │ %18 = φᶜ (%6)::Base.RefValue{String} │ + ◌ └─── \$(Expr(:leave, 1)) │ + X 9 ── %20 = \$(Expr(:the_exception))::Any │ +9 ◌ │ (Main.g = %20)::Any │ + ◌ └─── \$(Expr(:pop_exception, :(%4)))::Any │ +11 ✓′ 10 ┄ %23 = φ (#7 => %3, #9 => %17)::SafeRef{String} │ + *′ │ %24 = φ (#7 => %2, #9 => %18)::Base.RefValue{String} │ + ◌ │ %25 = Base.isdefined(%24, :x)::Bool ││╻ isassigned + ◌ └─── goto #12 if not %25 ││ + ↑ 11 ─ %27 = Base.getfield(%24, :x)::String │││╻ getproperty + ◌ └─── goto #13 ││ + ◌ 12 ─ Main.throw(%24)::Union{} ││ + ◌ └─── unreachable ││ +12 ↑ 13 ─ %31 = Base.getfield(%23, :x)::String │╻╷╷ get′ +13 ↑ │ %32 = Core.tuple(%27, %31)::Tuple{String, String} │ + ◌ └─── return %32 │ +``` + +The symbols in the side of each call argument and SSA statements represents the following meaning: +- `◌`: this value is not analyzed because escape information of it won't be used anyway (when the object is `isbitstype` for example) +- `✓`: this value never escapes (`has_no_escape(result.state[x])` holds) +- `↑`: this value can escape to the caller via return (`has_return_escape(result.state[x])` holds) +- `X`: this value can escape to somewhere the escape analysis can't reason about like escapes to a global memory (`has_all_escape(result.state[x])` holds) +- `*`: this value's escape state is between the `ReturnEscape` and `AllEscape` in the `EscapeLattice`, e.g. it has unhandled `ThrownEscape` +and additional `′` indicates that field analysis has been done successfully on that value. + +For testing, escape information of each call argument and SSA value can be inspected programmatically as like: +```julia +julia> result.state[Core.Argument(3)] +ReturnEscape + +julia> result.state[Core.SSAValue(3)] +NoEscape′ +``` +""" +function code_escapes(@nospecialize(args...); + world = get_world_counter(), + interp = Core.Compiler.NativeInterpreter(world)) + interp = EscapeAnalyzer(interp) + results = code_typed(args...; optimize=true, world, interp) + isone(length(results)) || throw(ArgumentError("`code_escapes` only supports single analysis result")) + return EscapeResult(interp.ir, interp.state, interp.linfo) +end + +# AbstractInterpreter +# ------------------- + +# imports +import .CC: + AbstractInterpreter, + NativeInterpreter, + WorldView, + WorldRange, + InferenceParams, + OptimizationParams, + get_world_counter, + get_inference_cache, + lock_mi_inference, + unlock_mi_inference, + add_remark!, + may_optimize, + may_compress, + may_discard_trees, + verbose_stmt_info, + code_cache, + @timeit, + get_inference_cache, + convert_to_ircode, + slot2reg, + compact!, + ssa_inlining_pass!, + sroa_pass!, + adce_pass!, + type_lift_pass!, + JLOptions, + verify_ir, + verify_linetable +# usings +import Core: + CodeInstance, MethodInstance, CodeInfo +import .CC: + OptimizationState, IRCode +import .EA: + analyze_escapes, cache_escapes! + +mutable struct EscapeAnalyzer{State} <: AbstractInterpreter + native::NativeInterpreter + ir::IRCode + state::State + linfo::MethodInstance + EscapeAnalyzer(native::NativeInterpreter) = new{EscapeState}(native) +end + +CC.InferenceParams(interp::EscapeAnalyzer) = InferenceParams(interp.native) +CC.OptimizationParams(interp::EscapeAnalyzer) = OptimizationParams(interp.native) +CC.get_world_counter(interp::EscapeAnalyzer) = get_world_counter(interp.native) + +CC.lock_mi_inference(::EscapeAnalyzer, ::MethodInstance) = nothing +CC.unlock_mi_inference(::EscapeAnalyzer, ::MethodInstance) = nothing + +CC.add_remark!(interp::EscapeAnalyzer, sv, s) = add_remark!(interp.native, sv, s) + +CC.may_optimize(interp::EscapeAnalyzer) = may_optimize(interp.native) +CC.may_compress(interp::EscapeAnalyzer) = may_compress(interp.native) +CC.may_discard_trees(interp::EscapeAnalyzer) = may_discard_trees(interp.native) +CC.verbose_stmt_info(interp::EscapeAnalyzer) = verbose_stmt_info(interp.native) + +CC.get_inference_cache(interp::EscapeAnalyzer) = get_inference_cache(interp.native) + +const GLOBAL_CODE_CACHE = IdDict{MethodInstance,CodeInstance}() +__clear_code_cache!() = empty!(GLOBAL_CODE_CACHE) + +function CC.code_cache(interp::EscapeAnalyzer) + worlds = WorldRange(get_world_counter(interp)) + return WorldView(GlobalCache(), worlds) +end + +struct GlobalCache end + +CC.haskey(wvc::WorldView{GlobalCache}, mi::MethodInstance) = haskey(GLOBAL_CODE_CACHE, mi) + +CC.get(wvc::WorldView{GlobalCache}, mi::MethodInstance, default) = get(GLOBAL_CODE_CACHE, mi, default) + +CC.getindex(wvc::WorldView{GlobalCache}, mi::MethodInstance) = getindex(GLOBAL_CODE_CACHE, mi) + +function CC.setindex!(wvc::WorldView{GlobalCache}, ci::CodeInstance, mi::MethodInstance) + GLOBAL_CODE_CACHE[mi] = ci + add_callback!(mi) # register the callback on invalidation + return nothing +end + +function add_callback!(linfo) + if !isdefined(linfo, :callbacks) + linfo.callbacks = Any[invalidate_cache!] + else + if !any(@nospecialize(cb)->cb===invalidate_cache!, linfo.callbacks) + push!(linfo.callbacks, invalidate_cache!) + end + end + return nothing +end + +function invalidate_cache!(replaced, max_world, depth = 0) + delete!(GLOBAL_CODE_CACHE, replaced) + + if isdefined(replaced, :backedges) + for mi in replaced.backedges + mi = mi::MethodInstance + if !haskey(GLOBAL_CODE_CACHE, mi) + continue # otherwise fall into infinite loop + end + invalidate_cache!(mi, max_world, depth+1) + end + end + return nothing +end + +function CC.optimize(interp::EscapeAnalyzer, opt::OptimizationState, params::OptimizationParams, @nospecialize(result)) + ir = run_passes_with_ea(interp, opt.src, opt) + return CC.finish(interp, opt, params, ir, result) +end + +function run_passes_with_ea(interp::EscapeAnalyzer, ci::CodeInfo, sv::OptimizationState) + @timeit "convert" ir = convert_to_ircode(ci, sv) + @timeit "slot2reg" ir = slot2reg(ir, ci, sv) + # TODO: Domsorting can produce an updated domtree - no need to recompute here + @timeit "compact 1" ir = compact!(ir) + @timeit "Inlining" ir = ssa_inlining_pass!(ir, ir.linetable, sv.inlining, ci.propagate_inbounds) + # @timeit "verify 2" verify_ir(ir) + @timeit "compact 2" ir = compact!(ir) + nargs = let def = sv.linfo.def; isa(def, Method) ? Int(def.nargs) : 0; end + local state + try + @timeit "collect escape information" state = analyze_escapes(ir, nargs) + catch err + @info "error happened within `analyze_escapes`, insepct `Main.ir` and `Main.nargs`" + @eval Main (ir = $ir; nargs = $nargs) + rethrow(err) + end + cacheir = Core.Compiler.copy(ir) + # cache this result + cache_escapes!(sv.linfo, state, cacheir) + # return back the result + interp.ir = cacheir + interp.state = state + interp.linfo = sv.linfo + @timeit "SROA" ir = sroa_pass!(ir) + @timeit "ADCE" ir = adce_pass!(ir) + @timeit "type lift" ir = type_lift_pass!(ir) + @timeit "compact 3" ir = compact!(ir) + if JLOptions().debug_level == 2 + @timeit "verify 3" (verify_ir(ir); verify_linetable(ir.linetable)) + end + return ir +end + +# printing +# -------- + +import Core: Argument, SSAValue +import .CC: widenconst, singleton_type +import .EA: EscapeLattice, EscapeState, ⊑, ⊏ + +# in order to run a whole analysis from ground zero (e.g. for benchmarking, etc.) +__clear_caches!() = (__clear_code_cache!(); EA.__clear_escape_cache!()) + +function get_name_color(x::EscapeLattice, symbol::Bool = false) + getname(x) = string(nameof(x)) + if x === EA.⊥ + name, color = (getname(EA.NotAnalyzed), "◌"), :plain + elseif EA.has_no_escape(x) + name, color = (getname(EA.NoEscape), "✓"), :green + elseif EA.has_all_escape(x) + name, color = (getname(EA.AllEscape), "X"), :red + elseif EA.NoEscape() ⊏ (EA.ignore_thrownescapes ∘ EA.ignore_aliasinfo)(x) ⊑ EA.AllReturnEscape() + name = (getname(EA.ReturnEscape), "↑") + color = EA.has_thrown_escape(x) ? :yellow : :cyan + else + name = (nothing, "*") + color = EA.has_thrown_escape(x) ? :yellow : :bold + end + name = symbol ? last(name) : first(name) + if name !== nothing && !isa(x.AliasEscapes, Bool) + name = string(name, "′") + end + return name, color +end + +# pcs = sprint(show, collect(x.EscapeSites); context=:limit=>true) +function Base.show(io::IO, x::EscapeLattice) + name, color = get_name_color(x) + if isnothing(name) + Base.@invoke show(io::IO, x::Any) + else + printstyled(io, name; color) + end +end +function Base.show(io::IO, ::MIME"application/prs.juno.inline", x::EscapeLattice) + name, color = get_name_color(x) + if isnothing(name) + return x # use fancy tree-view + else + printstyled(io, name; color) + end +end + +struct EscapeResult + ir::IRCode + state::EscapeState + linfo::Union{Nothing,MethodInstance} + EscapeResult(ir::IRCode, state::EscapeState, linfo::Union{Nothing,MethodInstance} = nothing) = + new(ir, state, linfo) +end +Base.show(io::IO, result::EscapeResult) = print_with_info(io, result.ir, result.state, result.linfo) +@eval Base.iterate(res::EscapeResult, state=1) = + return state > $(fieldcount(EscapeResult)) ? nothing : (getfield(res, state), state+1) + +# adapted from https://github.com/JuliaDebug/LoweredCodeUtils.jl/blob/4612349432447e868cf9285f647108f43bd0a11c/src/codeedges.jl#L881-L897 +function print_with_info(io::IO, + ir::IRCode, state::EscapeState, linfo::Union{Nothing,MethodInstance}) + # print escape information on SSA values + function preprint(io::IO) + ft = ir.argtypes[1] + f = singleton_type(ft) + if f === nothing + f = widenconst(ft) + end + print(io, f, '(') + for i in 1:state.nargs + arg = state[Argument(i)] + i == 1 && continue + c, color = get_name_color(arg, true) + printstyled(io, c, ' ', '_', i, "::", ir.argtypes[i]; color) + i ≠ state.nargs && print(io, ", ") + end + print(io, ')') + if !isnothing(linfo) + def = linfo.def + printstyled(io, " in ", (isa(def, Module) ? (def,) : (def.module, " at ", def.file, ':', def.line))...; color=:bold) + end + println(io) + end + + # print escape information on SSA values + # nd = ndigits(length(ssavalues)) + function preprint(io::IO, idx::Int) + c, color = get_name_color(state[SSAValue(idx)], true) + # printstyled(io, lpad(idx, nd), ' ', c, ' '; color) + printstyled(io, rpad(c, 2), ' '; color) + end + + print_with_info(preprint, (args...)->nothing, io, ir) +end + +function print_with_info(preprint, postprint, io::IO, ir::IRCode) + io = IOContext(io, :displaysize=>displaysize(io)) + used = Base.IRShow.stmts_used(io, ir) + # line_info_preprinter = Base.IRShow.lineinfo_disabled + line_info_preprinter = function (io::IO, indent::String, idx::Int) + r = Base.IRShow.inline_linfo_printer(ir)(io, indent, idx) + idx ≠ 0 && preprint(io, idx) + return r + end + line_info_postprinter = Base.IRShow.default_expr_type_printer + preprint(io) + bb_idx_prev = bb_idx = 1 + for idx = 1:length(ir.stmts) + preprint(io, idx) + bb_idx = Base.IRShow.show_ir_stmt(io, ir, idx, line_info_preprinter, line_info_postprinter, used, ir.cfg, bb_idx) + postprint(io, idx, bb_idx != bb_idx_prev) + bb_idx_prev = bb_idx + end + max_bb_idx_size = ndigits(length(ir.cfg.blocks)) + line_info_preprinter(io, " "^(max_bb_idx_size + 2), 0) + postprint(io) + return nothing +end + +end # module EAUtils diff --git a/base/compiler/EscapeAnalysis/EscapeAnalysis.jl b/base/compiler/EscapeAnalysis/EscapeAnalysis.jl new file mode 100644 index 0000000000000..d314c33c9004b --- /dev/null +++ b/base/compiler/EscapeAnalysis/EscapeAnalysis.jl @@ -0,0 +1,1499 @@ +baremodule EscapeAnalysis + +export + analyze_escapes, + cache_escapes!, + has_no_escape, + has_return_escape, + has_thrown_escape, + has_all_escape + +# analysis +# ======== + +const _TOP_MOD = ccall(:jl_base_relative_to, Any, (Any,), EscapeAnalysis)::Module + +# imports +import ._TOP_MOD: ==, getindex, setindex! +# usings +import Core: + MethodInstance, Const, Argument, SSAValue, PiNode, PhiNode, UpsilonNode, PhiCNode, + ReturnNode, GotoNode, GotoIfNot, SimpleVector, sizeof, ifelse, arrayset, arrayref, + arraysize, ImmutableArray, arrayfreeze, mutating_arrayfreeze, arraythaw +import ._TOP_MOD: # Base definitions + @__MODULE__, @eval, @assert, @nospecialize, @inbounds, @inline, @noinline, @label, @goto, + !, !==, !=, ≠, +, -, ≤, <, ≥, >, &, |, include, error, missing, copy, + Vector, BitSet, IdDict, IdSet, UnitRange, ∪, ⊆, ∩, :, ∈, ∉, in, length, get, first, last, + isempty, isassigned, pop!, push!, pushfirst!, empty!, max, min, Csize_t +import Core.Compiler: # Core.Compiler specific definitions + isbitstype, isexpr, is_meta_expr_head, println, + IRCode, IR_FLAG_EFFECT_FREE, widenconst, argextype, singleton_type, fieldcount_noerror, + try_compute_field, try_compute_fieldidx, hasintersect, ⊑ as ⊑ₜ, intrinsic_nothrow, + array_builtin_common_typecheck, arrayset_typecheck, setfield!_nothrow, compute_trycatch + +if _TOP_MOD !== Core.Compiler + include(@__MODULE__, "disjoint_set.jl") +else + include(@__MODULE__, "compiler/EscapeAnalysis/disjoint_set.jl") +end + +const AInfo = BitSet # XXX better to be IdSet{Int}? +struct Indexable + array::Bool + infos::Vector{AInfo} +end +struct Unindexable + array::Bool + info::AInfo +end +function merge_to_unindexable(info::AInfo, infos::Vector{AInfo}) + for i = 1:length(infos) + info = info ∪ infos[i] + end + return info +end +merge_to_unindexable(infos::Vector{AInfo}) = merge_to_unindexable(AInfo(), infos) + +""" + x::EscapeLattice + +A lattice for escape information, which holds the following properties: +- `x.Analyzed::Bool`: not formally part of the lattice, only indicates `x` has not been analyzed or not +- `x.ReturnEscape::BitSet`: records SSA statements where `x` can escape to the caller via return + where `0 ∈ x.ReturnEscape` has the special meaning that it's visible to the caller + simply because it's passed as call argument +- `x.ThrownEscape::BitSet`: records SSA statements where `x` can be thrown as exception: + this information will be used by `escape_exception!` to propagate potential escapes via exception +- `x.AliasInfo::Union{Indexable,Unindexable,Bool}`: maintains all possible values + that can be aliased to fields or array elements of `x`: + * `x.AliasInfo === false` indicates the fields/elements of `x` isn't analyzed yet + * `x.AliasInfo === true` indicates the fields/elements of `x` can't be analyzed, + e.g. the type of `x` is not known or is not concrete and thus its fields/elements + can't be known precisely + * `x.AliasInfo::Indexable` records all the possible values that can be aliased to fields/elements of `x` with precise index information + * `x.AliasInfo::Unindexable` records all the possible values that can be aliased to fields/elements of `x` without precise index information +- `x.ArgEscape::Int` (not implemented yet): indicates it will escape to the caller through + `setfield!` on argument(s) + * `-1` : no escape + * `0` : unknown or multiple + * `n` : through argument N + +There are utility constructors to create common `EscapeLattice`s, e.g., +- `NoEscape()`: the bottom(-like) element of this lattice, meaning it won't escape to anywhere +- `AllEscape()`: the topmost element of this lattice, meaning it will escape to everywhere + +`analyze_escapes` will transition these elements from the bottom to the top, +in the same direction as Julia's native type inference routine. +An abstract state will be initialized with the bottom(-like) elements: +- the call arguments are initialized as `ArgumentReturnEscape()`, because they're visible from a caller immediately +- the other states are initialized as `NotAnalyzed()`, which is a special lattice element that + is slightly lower than `NoEscape`, but at the same time doesn't represent any meaning + other than it's not analyzed yet (thus it's not formally part of the lattice). +""" +struct EscapeLattice + Analyzed::Bool + ReturnEscape::BitSet + ThrownEscape::BitSet + AliasInfo #::Union{Indexable,Unindexable,Bool} + # TODO: ArgEscape::Int + + function EscapeLattice( + Analyzed::Bool, + ReturnEscape::BitSet, + ThrownEscape::BitSet, + AliasInfo#=::Union{Indexable,Unindexable,Bool}=#, + ) + @nospecialize AliasInfo + return new( + Analyzed, + ReturnEscape, + ThrownEscape, + AliasInfo, + ) + end + function EscapeLattice( + x::EscapeLattice, + # non-concrete fields should be passed as default arguments + # in order to avoid allocating non-concrete `NamedTuple`s + AliasInfo#=::Union{Indexable,Unindexable,Bool}=# = x.AliasInfo; + Analyzed::Bool = x.Analyzed, + ReturnEscape::BitSet = x.ReturnEscape, + ThrownEscape::BitSet = x.ThrownEscape, + ) + @nospecialize AliasInfo + return new( + Analyzed, + ReturnEscape, + ThrownEscape, + AliasInfo, + ) + end +end + +# precomputed default values in order to eliminate computations at each callsite +const BOT_RETURN_ESCAPE = const BOT_THROWN_ESCAPE = BitSet() +const TOP_RETURN_ESCAPE = const TOP_THROWN_ESCAPE = BitSet(0:100_000) +const ARG_RETURN_ESCAPE = BitSet(0) + +const BOT_ALIAS_INFO = false +const TOP_ALIAS_INFO = true + +# the constructors +NotAnalyzed() = EscapeLattice(false, BOT_RETURN_ESCAPE, BOT_THROWN_ESCAPE, BOT_ALIAS_INFO) # not formally part of the lattice +NoEscape() = EscapeLattice(true, BOT_RETURN_ESCAPE, BOT_THROWN_ESCAPE, BOT_ALIAS_INFO) +ReturnEscape(pc::Int) = EscapeLattice(true, BitSet(pc), BOT_THROWN_ESCAPE, BOT_ALIAS_INFO) +ArgumentReturnEscape() = EscapeLattice(true, ARG_RETURN_ESCAPE, BOT_THROWN_ESCAPE, TOP_ALIAS_INFO) # TODO allow interprocedural field analysis? +AllReturnEscape() = EscapeLattice(true, TOP_RETURN_ESCAPE, BOT_THROWN_ESCAPE, BOT_ALIAS_INFO) +ThrownEscape(pc::Int) = EscapeLattice(true, BOT_RETURN_ESCAPE, BitSet(pc), BOT_ALIAS_INFO) +ThrownEscape(pcs::BitSet) = EscapeLattice(true, BOT_RETURN_ESCAPE, pcs, BOT_ALIAS_INFO) +AllEscape() = EscapeLattice(true, TOP_RETURN_ESCAPE, TOP_THROWN_ESCAPE, TOP_ALIAS_INFO) + +const ⊥, ⊤ = NotAnalyzed(), AllEscape() + +# Convenience names for some ⊑ queries +has_no_escape(x::EscapeLattice) = ignore_aliasinfo(x) ⊑ NoEscape() +has_return_escape(x::EscapeLattice) = !isempty(x.ReturnEscape) +has_return_escape(x::EscapeLattice, pc::Int) = pc in x.ReturnEscape +has_thrown_escape(x::EscapeLattice) = !isempty(x.ThrownEscape) +has_thrown_escape(x::EscapeLattice, pc::Int) = pc in x.ThrownEscape +has_all_escape(x::EscapeLattice) = ⊤ ⊑ x + +# utility lattice constructors +ignore_thrownescapes(x::EscapeLattice) = EscapeLattice(x; ThrownEscape=BOT_THROWN_ESCAPE) +ignore_aliasinfo(x::EscapeLattice) = EscapeLattice(x, BOT_ALIAS_INFO) + +# we need to make sure this `==` operator corresponds to lattice equality rather than object equality, +# otherwise `propagate_changes` can't detect the convergence +x::EscapeLattice == y::EscapeLattice = begin + # fast pass: better to avoid top comparison + x === y && return true + x.Analyzed === y.Analyzed || return false + xr, yr = x.ReturnEscape, y.ReturnEscape + if xr === TOP_RETURN_ESCAPE + yr === TOP_RETURN_ESCAPE || return false + elseif yr === TOP_RETURN_ESCAPE + return false # x.ReturnEscape === TOP_RETURN_ESCAPE + else + xr == yr || return false + end + xt, yt = x.ThrownEscape, y.ThrownEscape + if xt === TOP_THROWN_ESCAPE + yt === TOP_THROWN_ESCAPE || return false + elseif yt === TOP_THROWN_ESCAPE + return false # x.ThrownEscape === TOP_THROWN_ESCAPE + else + xt == yt || return false + end + xa, ya = x.AliasInfo, y.AliasInfo + if isa(xa, Bool) + xa === ya || return false + elseif isa(xa, Indexable) + isa(ya, Indexable) || return false + xa.array === ya.array || return false + xa.infos == ya.infos || return false + else + xa = xa::Unindexable + isa(ya, Unindexable) || return false + xa.array === ya.array || return false + xa.info == ya.info || return false + end + return true +end + +""" + x::EscapeLattice ⊑ y::EscapeLattice -> Bool + +The non-strict partial order over `EscapeLattice`. +""" +x::EscapeLattice ⊑ y::EscapeLattice = begin + # fast pass: better to avoid top comparison + if y === ⊤ + return true + elseif x === ⊤ + return false # return y === ⊤ + elseif x === ⊥ + return true + elseif y === ⊥ + return false # return x === ⊥ + end + x.Analyzed ≤ y.Analyzed || return false + xr, yr = x.ReturnEscape, y.ReturnEscape + if xr === TOP_RETURN_ESCAPE + yr !== TOP_RETURN_ESCAPE && return false + elseif yr !== TOP_RETURN_ESCAPE + xr ⊆ yr || return false + end + xt, yt = x.ThrownEscape, y.ThrownEscape + if xt === TOP_THROWN_ESCAPE + yt !== TOP_THROWN_ESCAPE && return false + elseif yt !== TOP_THROWN_ESCAPE + xt ⊆ yt || return false + end + xa, ya = x.AliasInfo, y.AliasInfo + if isa(xa, Bool) + xa && ya !== true && return false + elseif isa(xa, Indexable) + if isa(ya, Indexable) + xa.array === ya.array || return false + xinfos, yinfos = xa.infos, ya.infos + xn, yn = length(xinfos), length(yinfos) + xn > yn && return false + for i in 1:xn + xinfos[i] ⊆ yinfos[i] || return false + end + elseif isa(ya, Unindexable) + xa.array === ya.array || return false + xinfos, yinfo = xa.infos, ya.info + for i = length(xf) + xinfos[i] ⊆ yinfo || return false + end + else + ya === true || return false + end + else + xa = xa::Unindexable + if isa(ya, Unindexable) + xa.array === ya.array || return false + xinfo, yinfo = xa.info, ya.info + xinfo ⊆ yinfo || return false + else + ya === true || return false + end + end + return true +end + +""" + x::EscapeLattice ⊏ y::EscapeLattice -> Bool + +The strict partial order over `EscapeLattice`. +This is defined as the irreflexive kernel of `⊏`. +""" +x::EscapeLattice ⊏ y::EscapeLattice = x ⊑ y && !(y ⊑ x) + +""" + x::EscapeLattice ⋤ y::EscapeLattice -> Bool + +This order could be used as a slightly more efficient version of the strict order `⊏`, +where we can safely assume `x ⊑ y` holds. +""" +x::EscapeLattice ⋤ y::EscapeLattice = !(y ⊑ x) + +""" + x::EscapeLattice ⊔ y::EscapeLattice -> EscapeLattice + +Computes the join of `x` and `y` in the partial order defined by `EscapeLattice`. +""" +x::EscapeLattice ⊔ y::EscapeLattice = begin + # fast pass: better to avoid top join + if x === ⊤ || y === ⊤ + return ⊤ + elseif x === ⊥ + return y + elseif y === ⊥ + return x + end + xr, yr = x.ReturnEscape, y.ReturnEscape + if xr === TOP_RETURN_ESCAPE || yr === TOP_RETURN_ESCAPE + ReturnEscape = TOP_RETURN_ESCAPE + elseif xr === BOT_RETURN_ESCAPE + ReturnEscape = yr + elseif yr === BOT_RETURN_ESCAPE + ReturnEscape = xr + else + ReturnEscape = xr ∪ yr + end + xt, yt = x.ThrownEscape, y.ThrownEscape + if xt === TOP_THROWN_ESCAPE || yt === TOP_THROWN_ESCAPE + ThrownEscape = TOP_THROWN_ESCAPE + elseif xt === BOT_THROWN_ESCAPE + ThrownEscape = yt + elseif yt === BOT_THROWN_ESCAPE + ThrownEscape = xt + else + ThrownEscape = xt ∪ yt + end + xa, ya = x.AliasInfo, y.AliasInfo + if xa === true || ya === true + AliasInfo = true + elseif xa === false + AliasInfo = ya + elseif ya === false + AliasInfo = xa + elseif isa(xa, Indexable) + if isa(ya, Indexable) && xa.array === ya.array + xinfos, yinfos = xa.infos, ya.infos + xn, yn = length(xinfos), length(yinfos) + nmax, nmin = max(xn, yn), min(xn, yn) + infos = Vector{AInfo}(undef, nmax) + for i in 1:nmax + if i > nmin + infos[i] = (xn > yn ? xinfos : yinfos)[i] + else + infos[i] = xinfos[i] ∪ yinfos[i] + end + end + AliasInfo = Indexable(xa.array, infos) + elseif isa(ya, Unindexable) && xa.array === ya.array + xinfos, yinfo = xa.infos, ya.info + info = merge_to_unindexable(yinfo, xinfos) + AliasInfo = Unindexable(xa.array, info) + else + AliasInfo = true # handle conflicting case conservatively + end + else + xa = xa::Unindexable + if isa(ya, Indexable) && xa.array === ya.array + xinfo, yinfos = xa.info, ya.infos + info = merge_to_unindexable(xinfo, yinfos) + AliasInfo = Unindexable(xa.array, info) + elseif isa(ya, Unindexable) && xa.array === ya.array + xinfo, yinfo = xa.info, ya.info + info = xinfo ∪ yinfo + AliasInfo = Unindexable(xa.array, info) + else + AliasInfo = true # handle conflicting case conservatively + end + end + return EscapeLattice( + x.Analyzed | y.Analyzed, + ReturnEscape, + ThrownEscape, + AliasInfo, + ) +end + +# TODO setup a more effient struct for cache +# which can discard escape information on SSS values and arguments that don't join dispatch signature + +const AliasSet = IntDisjointSet{Int} + +""" + estate::EscapeState + +Extended lattice that maps arguments and SSA values to escape information represented as `EscapeLattice`. +Escape information imposed on SSA IR element `x` can be retrieved by `estate[x]`. +""" +struct EscapeState + escapes::Vector{EscapeLattice} + aliasset::AliasSet + nargs::Int +end +function EscapeState(nargs::Int, nstmts::Int) + escapes = EscapeLattice[ + 1 ≤ i ≤ nargs ? ArgumentReturnEscape() : ⊥ for i in 1:(nargs+nstmts)] + aliaset = AliasSet(nargs+nstmts) + return EscapeState(escapes, aliaset, nargs) +end +function getindex(estate::EscapeState, @nospecialize(x)) + if isa(x, Argument) || isa(x, SSAValue) + return estate.escapes[iridx(x, estate)] + else + return nothing + end +end +function setindex!(estate::EscapeState, v::EscapeLattice, @nospecialize(x)) + if isa(x, Argument) || isa(x, SSAValue) + estate.escapes[iridx(x, estate)] = v + end + return estate +end + +""" + iridx(x, estate::EscapeState) -> xidx::Union{Int,Nothing} + +Tries to convert analyzable IR element `x::Union{Argument,SSAValue}` to +its unique identifier number `xidx` that is valid in the analysis context of `estate`. +Returns `nothing` if `x` isn't maintained by `estate` and thus unanalyzable (e.g. `x::GlobalRef`). + +`irval` is the inverse function of `iridx` (not formally), i.e. +`irval(iridx(x::Union{Argument,SSAValue}, state), state) === x`. +""" +function iridx(@nospecialize(x), estate::EscapeState) + if isa(x, Argument) + xidx = x.n + @assert 1 ≤ xidx ≤ estate.nargs "invalid Argument" + elseif isa(x, SSAValue) + xidx = x.id + estate.nargs + else + return nothing + end + return xidx +end + +""" + irval(xidx::Int, estate::EscapeState) -> x::Union{Argument,SSAValue} + +Converts its unique identifier number `xidx` to the original IR element `x::Union{Argument,SSAValue}` +that is analyzable in the context of `estate`. + +`iridx` is the inverse function of `irval` (not formally), i.e. +`iridx(irval(xidx, state), state) === xidx`. +""" +function irval(xidx::Int, estate::EscapeState) + x = xidx > estate.nargs ? SSAValue(xidx-estate.nargs) : Argument(xidx) + return x +end + +function getaliases(xidx::Int, estate::EscapeState) + aliasset = estate.aliasset + root = find_root!(aliasset, xidx) + if xidx ≠ root || aliasset.ranks[xidx] > 0 + # the size of this alias set containing `key` is larger than 1, + # collect the entire alias set + aliases = Int[] + for aidx in 1:length(aliasset.parents) + if aliasset.parents[aidx] == root + push!(aliases, aidx) + end + end + return aliases + else + return nothing + end +end + +""" + EscapeLatticeCache(x::EscapeLattice) -> x′::EscapeLatticeCache + +The data structure for caching `x::EscapeLattice` for interprocedural propagation, +which is slightly more efficient than the original `x` object. +""" +struct EscapeLatticeCache + AllEscape::Bool + ReturnEscape::Bool + ThrownEscape::Bool + function EscapeLatticeCache(x::EscapeLattice) + x === ⊤ && return new(true, true, true) + ReturnEscape = x.ReturnEscape === ARG_RETURN_ESCAPE ? false : true + ThrownEscape = isempty(x.ThrownEscape) ? false : true + return new(false, ReturnEscape, ThrownEscape) + end +end + +""" + cache_escapes!(linfo::MethodInstance, estate::EscapeState, _::IRCode) + +Transforms escape information of `estate` for interprocedural propagation, +and caches it in a global cache that can then be looked up later when +`linfo` callsite is seen again. +""" +function cache_escapes! end + +# when working outside of Core.Compiler, cache as much as information for later inspection and debugging +if _TOP_MOD !== Core.Compiler + struct EscapeCache + cache::Vector{EscapeLatticeCache} + state::EscapeState # preserved just for debugging purpose + ir::IRCode # preserved just for debugging purpose + end + const GLOBAL_ESCAPE_CACHE = IdDict{MethodInstance,EscapeCache}() + function cache_escapes!(linfo::MethodInstance, estate::EscapeState, cacheir::IRCode) + cache = EscapeCache(to_interprocedural(estate), estate, cacheir) + GLOBAL_ESCAPE_CACHE[linfo] = cache + return cache + end + argescapes_from_cache(cache::EscapeCache) = cache.cache +else + const GLOBAL_ESCAPE_CACHE = IdDict{MethodInstance,Vector{EscapeLatticeCache}}() + function cache_escapes!(linfo::MethodInstance, estate::EscapeState, _::IRCode) + cache = to_interprocedural(estate) + GLOBAL_ESCAPE_CACHE[linfo] = cache + return cache + end + argescapes_from_cache(cache::Vector{EscapeLatticeCache}) = cache +end + +function to_interprocedural(estate::EscapeState) + cache = Vector{EscapeLatticeCache}(undef, estate.nargs) + for i = 1:estate.nargs + cache[i] = EscapeLatticeCache(estate.escapes[i]) + end + return cache +end + +__clear_escape_cache!() = empty!(GLOBAL_ESCAPE_CACHE) + +const EscapeChange = Pair{Int,EscapeLattice} +const AliasChange = Pair{Int,Int} +const Changes = Vector{Union{EscapeChange,AliasChange}} + +struct AnalysisState + ir::IRCode + estate::EscapeState + changes::Changes +end + +function getinst(ir::IRCode, idx::Int) + nstmts = length(ir.stmts) + if idx ≤ nstmts + return ir.stmts[idx] + else + return ir.new_nodes.stmts[idx - nstmts] + end +end + +""" + analyze_escapes(ir::IRCode, nargs::Int) -> estate::EscapeState + +Analyzes escape information in `ir`. +`nargs` is the number of actual arguments of the analyzed call. +""" +function analyze_escapes(ir::IRCode, nargs::Int) + stmts = ir.stmts + nstmts = length(stmts) + length(ir.new_nodes.stmts) + + # only manage a single state, some flow-sensitivity is encoded as `EscapeLattice` properties + estate = EscapeState(nargs, nstmts) + changes = Changes() # stashes changes that happen at current statement + tryregions = compute_tryregions(ir) + astate = AnalysisState(ir, estate, changes) + + local debug_itr_counter = 0 + while true + local anyupdate = false + + for pc in nstmts:-1:1 + stmt = getinst(ir, pc)[:inst] + + # collect escape information + if isa(stmt, Expr) + head = stmt.head + if head === :call + escape_call!(astate, pc, stmt.args) + elseif head === :invoke + escape_invoke!(astate, pc, stmt.args) + elseif head === :new || head === :splatnew + escape_new!(astate, pc, stmt.args) + elseif head === :(=) + lhs, rhs = stmt.args + if isa(lhs, GlobalRef) # global store + add_escape_change!(astate, rhs, ⊤) + else + unexpected_assignment!(ir, pc) + end + elseif head === :foreigncall + escape_foreigncall!(astate, pc, stmt.args) + elseif head === :throw_undef_if_not # XXX when is this expression inserted ? + add_escape_change!(astate, stmt.args[1], ThrownEscape(pc)) + elseif is_meta_expr_head(head) + # meta expressions doesn't account for any usages + continue + elseif head === :enter || head === :leave || head === :the_exception || head === :pop_exception + # ignore these expressions since escapes via exceptions are handled by `escape_exception!` + # `escape_exception!` conservatively propagates `AllEscape` anyway, + # and so escape information imposed on `:the_exception` isn't computed + continue + elseif head === :static_parameter || # this exists statically, not interested in its escape + head === :copyast || # XXX can this account for some escapes? + head === :undefcheck || # XXX can this account for some escapes? + head === :isdefined || # just returns `Bool`, nothing accounts for any escapes + head === :gc_preserve_begin || # `GC.@preserve` expressions themselves won't be used anywhere + head === :gc_preserve_end # `GC.@preserve` expressions themselves won't be used anywhere + continue + else + for x in stmt.args + add_escape_change!(astate, x, ⊤) + end + end + elseif isa(stmt, ReturnNode) + if isdefined(stmt, :val) + add_escape_change!(astate, stmt.val, ReturnEscape(pc)) + end + elseif isa(stmt, PhiNode) + escape_edges!(astate, pc, stmt.values) + elseif isa(stmt, PiNode) + escape_val_ifdefined!(astate, pc, stmt) + elseif isa(stmt, PhiCNode) + escape_edges!(astate, pc, stmt.values) + elseif isa(stmt, UpsilonNode) + escape_val_ifdefined!(astate, pc, stmt) + elseif isa(stmt, GlobalRef) # global load + add_escape_change!(astate, SSAValue(pc), ⊤) + elseif isa(stmt, SSAValue) + escape_val!(astate, pc, stmt) + elseif isa(stmt, Argument) + escape_val!(astate, pc, stmt) + else # otherwise `stmt` can be GotoNode, GotoIfNot, and inlined values etc. + continue + end + + isempty(changes) && continue + + anyupdate |= propagate_changes!(estate, changes) + + empty!(changes) + end + + tryregions !== nothing && escape_exception!(astate, tryregions) + + debug_itr_counter += 1 + + anyupdate || break + end + + # if debug_itr_counter > 2 + # println("[EA] excessive iteration count found ", debug_itr_counter, " (", singleton_type(ir.argtypes[1]), ")") + # end + + return estate +end + +# propagate changes, and check convergence +function propagate_changes!(estate::EscapeState, changes::Changes) + local anychanged = false + for change in changes + if isa(change, EscapeChange) + anychanged |= propagate_escape_change!(estate, change) + else + anychanged |= propagate_alias_change!(estate, change) + end + end + return anychanged +end + +@inline propagate_escape_change!(estate::EscapeState, change::EscapeChange) = + propagate_escape_change!(⊔, estate, change) + +# allows this to work as lattice join as well as lattice meet +@inline function propagate_escape_change!(@nospecialize(op), + estate::EscapeState, change::EscapeChange) + xidx, info = change + anychanged = _propagate_escape_change!(op, estate, xidx, info) + aliases = getaliases(xidx, estate) + if aliases !== nothing + for aidx in aliases + anychanged |= _propagate_escape_change!(op, estate, aidx, info) + end + end + return anychanged +end + +@inline function _propagate_escape_change!(@nospecialize(op), + estate::EscapeState, xidx::Int, info::EscapeLattice) + old = estate.escapes[xidx] + new = op(old, info) + if old ≠ new + estate.escapes[xidx] = new + return true + end + return false +end + +@inline function propagate_alias_change!(estate::EscapeState, change::AliasChange) + xidx, yidx = change + xroot = find_root!(estate.aliasset, xidx) + yroot = find_root!(estate.aliasset, yidx) + if xroot ≠ yroot + union!(estate.aliasset, xroot, yroot) + return true + end + return false +end + +function add_escape_change!(astate::AnalysisState, @nospecialize(x), info::EscapeLattice) + info === ⊥ && return nothing # performance optimization + xidx = iridx(x, astate.estate) + if xidx !== nothing + if !isbitstype(widenconst(argextype(x, astate.ir))) + push!(astate.changes, EscapeChange(xidx, info)) + end + end + return nothing +end + +function add_alias_change!(astate::AnalysisState, @nospecialize(x), @nospecialize(y)) + if isa(x, GlobalRef) + return add_escape_change!(astate, y, ⊤) + elseif isa(y, GlobalRef) + return add_escape_change!(astate, x, ⊤) + end + estate = astate.estate + xidx = iridx(x, estate) + yidx = iridx(y, estate) + if xidx !== nothing && yidx !== nothing + pushfirst!(astate.changes, AliasChange(xidx, yidx)) # propagate `AliasChange` first for faster convergence + xinfo = estate.escapes[xidx] + yinfo = estate.escapes[yidx] + xyinfo = xinfo ⊔ yinfo + add_escape_change!(astate, x, xyinfo) + add_escape_change!(astate, y, xyinfo) + end + return nothing +end + +function escape_edges!(astate::AnalysisState, pc::Int, edges::Vector{Any}) + ret = SSAValue(pc) + for i in 1:length(edges) + if isassigned(edges, i) + v = edges[i] + add_alias_change!(astate, ret, v) + end + end +end + +escape_val_ifdefined!(astate::AnalysisState, pc::Int, x) = + isdefined(x, :val) && escape_val!(astate, pc, x.val) + +function escape_val!(astate::AnalysisState, pc::Int, @nospecialize(val)) + ret = SSAValue(pc) + add_alias_change!(astate, ret, val) +end + +# NOTE if we don't maintain the alias set that is separated from the lattice state, we can do +# soemthing like below: it essentially incorporates forward escape propagation in our default +# backward propagation, and leads to inefficient convergence that requires more iterations +# # lhs = rhs: propagate escape information of `rhs` to `lhs` +# function escape_alias!(astate::AnalysisState, @nospecialize(lhs), @nospecialize(rhs)) +# if isa(rhs, SSAValue) || isa(rhs, Argument) +# vinfo = astate.estate[rhs] +# else +# return +# end +# add_escape_change!(astate, lhs, vinfo) +# end + +# linear scan to find regions in which potential throws will be caught +function compute_tryregions(ir::IRCode) + tryregions = nothing + for idx in 1:length(ir.stmts) + stmt = ir.stmts[idx][:inst] + if isexpr(stmt, :enter) + tryregions === nothing && (tryregions = UnitRange{Int}[]) + leave_block = stmt.args[1]::Int + leave_pc = first(ir.cfg.blocks[leave_block].stmts) + push!(tryregions, idx:leave_pc) + end + end + for idx in 1:length(ir.new_nodes.stmts) + stmt = ir.new_nodes.stmts[idx][:inst] + @assert !isexpr(stmt, :enter) "try/catch inside new_nodes unsupported" + end + return tryregions +end + +""" + escape_exception!(astate::AnalysisState, tryregions::Vector{UnitRange{Int}}) + +Propagates escapes via exceptions that can happen in `tryregions`. + +Naively it seems enough to propagate escape information imposed on `:the_exception` object, +but actually there are several other ways to access to the exception object such as +`Base.current_exceptions` and manual catch of `rethrow`n object. +For example, escape analysis needs to account for potential escape of the allocated object +via `rethrow_escape!()` call in the example below: +```julia +const Gx = Ref{Any}() +@noinline function rethrow_escape!() + try + rethrow() + catch err + Gx[] = err + end +end +unsafeget(x) = isassigned(x) ? x[] : throw(x) + +code_escapes() do + r = Ref{String}() + try + t = unsafeget(r) + catch err + t = typeof(err) # `err` (which `r` may alias to) doesn't escape here + rethrow_escape!() # `r` can escape here + end + return t +end +``` + +As indicated by the above example, it requires a global analysis in addition to a base escape +analysis to reason about all possible escapes via existing exception interfaces correctly. +For now we conservatively always propagate `AllEscape` to all potentially thrown objects, +since such an additional analysis might not be worthwhile to do given that exception handlings +and error paths usually don't need to be very performance sensitive, and optimizations of +error paths might be very ineffective anyway since they are sometimes "unoptimized" +intentionally for latency reasons. +""" +function escape_exception!(astate::AnalysisState, tryregions::Vector{UnitRange{Int}}) + estate = astate.estate + # NOTE if `:the_exception` is the only way to access the exception, we can do: + # exc = SSAValue(pc) + # excinfo = estate[exc] + excinfo = ⊤ + escapes = estate.escapes + for i in 1:length(escapes) + x = escapes[i] + xt = x.ThrownEscape + xt === TOP_THROWN_ESCAPE && @goto propagate_exception_escape # fast pass + for pc in x.ThrownEscape + for region in tryregions + pc in region && @goto propagate_exception_escape # early break because of AllEscape + end + end + continue + @label propagate_exception_escape + xval = irval(i, estate) + add_escape_change!(astate, xval, excinfo) + end +end + +function escape_invoke!(astate::AnalysisState, pc::Int, args::Vector{Any}) + linfo = first(args)::MethodInstance + cache = get(GLOBAL_ESCAPE_CACHE, linfo, nothing) + if cache === nothing + for i in 2:length(args) + x = args[i] + add_escape_change!(astate, x, ⊤) + end + else + argescapes = argescapes_from_cache(cache) + retinfo = astate.estate[SSAValue(pc)] # escape information imposed on the call statement + method = linfo.def::Method + nargs = Int(method.nargs) + for i in 2:length(args) + arg = args[i] + if i-1 ≤ nargs + argi = i-1 + else # handle isva signature: COMBAK will this be invalid once we take alias information into account ? + argi = nargs + end + arginfo = argescapes[argi] + info = from_interprocedural(arginfo, retinfo, pc) + add_escape_change!(astate, arg, info) + end + end +end + +""" + from_interprocedural(arginfo::EscapeLatticeCache, retinfo::EscapeLattice, pc::Int) -> x::EscapeLattice + +Reinterprets the escape information imposed on the call argument which is cached as `arginfo` +in the context of the caller frame, where `retinfo` is the escape information imposed on +the return value and `pc` is the SSA statement number of the return value. +""" +function from_interprocedural(arginfo::EscapeLatticeCache, retinfo::EscapeLattice, pc::Int) + arginfo.AllEscape && return ⊤ + + ThrownEscape = arginfo.ThrownEscape ? BitSet(pc) : BOT_THROWN_ESCAPE + + newarginfo = EscapeLattice( + #=Analyzed=#true, #=ReturnEscape=#BOT_RETURN_ESCAPE, ThrownEscape, + # FIXME implement interprocedural memory effect-analysis + # currently, this essentially disables the entire field analysis + # it might be okay from the SROA point of view, since we can't remove the allocation + # as far as it's passed to a callee anyway, but still we may want some field analysis + # for e.g. stack allocation or some other IPO optimizations + #=AliasInfo=#TOP_ALIAS_INFO) + + if !arginfo.ReturnEscape + # if this is simply passed as the call argument, we can discard the `ReturnEscape` + # information and just propagate the other escape information + return newarginfo + end + + # if this argument can be "returned", we have to merge its escape information with that imposed on the return value + return newarginfo ⊔ retinfo +end + +@noinline function unexpected_assignment!(ir::IRCode, pc::Int) + @eval Main (ir = $ir; pc = $pc) + error("unexpected assignment found: inspect `Main.pc` and `Main.pc`") +end + +function escape_new!(astate::AnalysisState, pc::Int, args::Vector{Any}) + obj = SSAValue(pc) + objinfo = astate.estate[obj] + AliasInfo = objinfo.AliasInfo + nargs = length(args) + if isa(AliasInfo, Bool) + @goto conservative_propagation + elseif isa(AliasInfo, Indexable) && !AliasInfo.array + # fields are known precisely: propagate escape information imposed on recorded possibilities to the exact field values + infos = AliasInfo.infos + nf = length(infos) + for i in 2:nargs + i-1 > nf && break # may happen when e.g. ϕ-node merges values with different types + escape_field!(astate, args[i], infos[i-1]) + push!(infos[i-1], -pc) # record def + # propagate the escape information of this object ignoring field information + add_escape_change!(astate, args[i], ignore_aliasinfo(objinfo)) + end + elseif isa(AliasInfo, Unindexable) && !AliasInfo.array + # fields are known partially: propagate escape information imposed on recorded possibilities to all fields values + info = AliasInfo.info + for i in 2:nargs + escape_field!(astate, args[i], info) + push!(info, -pc) # record def + # propagate the escape information of this object ignoring field information + add_escape_change!(astate, args[i], ignore_aliasinfo(objinfo)) + end + else + # this object has been used as array, but it is allocated as struct here (i.e. should throw) + # update obj's field information and just handle this case conservatively + objinfo = escape_unanalyzable_obj!(astate, obj, objinfo) + @label conservative_propagation + # the fields couldn't be analyzed precisely: propagate the entire escape information + # of this object to all its fields as the most conservative propagation + for i in 2:nargs + add_escape_change!(astate, args[i], objinfo) + end + end + if !(getinst(astate.ir, pc)[:flag] & IR_FLAG_EFFECT_FREE ≠ 0) + add_thrown_escapes!(astate, pc, args) + end +end + +function escape_field!(astate::AnalysisState, @nospecialize(v), xf::AInfo) + estate = astate.estate + for xidx in xf + xidx < 0 && continue # ignore def + x = SSAValue(xidx) # obviously this won't be true once we implement ArgEscape + add_alias_change!(astate, v, x) + end +end + +function escape_unanalyzable_obj!(astate::AnalysisState, @nospecialize(obj), objinfo::EscapeLattice) + objinfo = EscapeLattice(objinfo, TOP_ALIAS_INFO) + add_escape_change!(astate, obj, objinfo) + return objinfo +end + +function add_thrown_escapes!(astate::AnalysisState, pc::Int, args::Vector{Any}, + first_idx::Int = 1, last_idx::Int = length(args)) + for i in first_idx:last_idx + add_escape_change!(astate, args[i], ThrownEscape(pc)) + end +end + +# escape every argument `(args[6:length(args[3])])` and the name `args[1]` +# TODO: we can apply a similar strategy like builtin calls to specialize some foreigncalls +function escape_foreigncall!(astate::AnalysisState, pc::Int, args::Vector{Any}) + nargs = length(args) + if nargs < 6 + # invalid foreigncall, just escape everything + for i = 1:length(args) + add_escape_change!(astate, args[i], ⊤) + end + return + end + argtypes = args[3]::SimpleVector + nargs = length(argtypes) + name = args[1] + nn = normalize(name) + if isa(nn, Symbol) + boundserror_ninds = array_resize_info(nn) + if boundserror_ninds !== nothing + boundserror, ninds = boundserror_ninds + escape_array_resize!(boundserror, ninds, astate, pc, args) + return + end + if is_array_copy(nn) + escape_array_copy!(astate, pc, args) + return + elseif is_array_isassigned(nn) + escape_array_isassigned!(astate, pc, args) + return + end + # if nn === :jl_gc_add_finalizer_th + # # TODO add `FinalizerEscape` ? + # end + end + # NOTE array allocations might have been proven as nothrow (https://github.com/JuliaLang/julia/pull/43565) + nothrow = astate.ir.stmts[pc][:flag] & IR_FLAG_EFFECT_FREE ≠ 0 + if nothrow + name_info = NoEscape() + else + name_info = ThrownEscape(pc) + end + add_escape_change!(astate, name, name_info) + for i = 1:nargs + # we should escape this argument if it is directly called, + # otherwise just impose ThrownEscape if not nothrow + if argtypes[i] === Any + arg_info = ⊤ + else + if nothrow + arg_info = NoEscape() + else + arg_info = ThrownEscape(pc) + end + end + add_escape_change!(astate, args[5+i], arg_info) + end + preserve_info = NoEscape() # TODO encode liveness + for i = (5+nargs):length(args) + add_escape_change!(astate, args[i], preserve_info) + end +end + +normalize(@nospecialize x) = isa(x, QuoteNode) ? x.value : x + +function escape_call!(astate::AnalysisState, pc::Int, args::Vector{Any}) + ir = astate.ir + ft = argextype(first(args), ir, ir.sptypes, ir.argtypes) + f = singleton_type(ft) + if isa(f, Core.IntrinsicFunction) + # XXX somehow `:call` expression can creep in here, ideally we should be able to do: + # argtypes = Any[argextype(args[i], astate.ir) for i = 2:length(args)] + argtypes = Any[] + for i = 2:length(args) + arg = args[i] + push!(argtypes, isexpr(arg, :call) ? Any : argextype(arg, ir)) + end + intrinsic_nothrow(f, argtypes) || add_thrown_escapes!(astate, pc, args, 2) + return # TODO accounts for pointer operations? + end + result = escape_builtin!(f, astate, pc, args) + if result === missing + # if this call hasn't been handled by any of pre-defined handlers, + # we escape this call conservatively + for i in 2:length(args) + add_escape_change!(astate, args[i], ⊤) + end + return + elseif result === true + return # ThrownEscape is already checked + end + # we escape statements with the `ThrownEscape` property using the effect-freeness + # computed by `stmt_effect_free` invoked within inlining + # TODO throwness ≠ "effect-free-ness" + if !(getinst(astate.ir, pc)[:flag] & IR_FLAG_EFFECT_FREE ≠ 0) + add_thrown_escapes!(astate, pc, args, 2) + end +end + +escape_builtin!(@nospecialize(f), _...) = return missing + +# safe builtins +escape_builtin!(::typeof(isa), _...) = return false +escape_builtin!(::typeof(typeof), _...) = return false +escape_builtin!(::typeof(sizeof), _...) = return false +escape_builtin!(::typeof(===), _...) = return false +# not really safe, but `ThrownEscape` will be imposed later +escape_builtin!(::typeof(isdefined), _...) = return false +escape_builtin!(::typeof(throw), _...) = return false + +function escape_builtin!(::typeof(ifelse), astate::AnalysisState, pc::Int, args::Vector{Any}) + length(args) == 4 || return false + f, cond, th, el = args + ret = SSAValue(pc) + condt = argextype(cond, astate.ir) + if isa(condt, Const) && (cond = condt.val; isa(cond, Bool)) + if cond + add_alias_change!(astate, th, ret) + else + add_alias_change!(astate, el, ret) + end + else + add_alias_change!(astate, th, ret) + add_alias_change!(astate, el, ret) + end + return false +end + +function escape_builtin!(::typeof(typeassert), astate::AnalysisState, pc::Int, args::Vector{Any}) + length(args) == 3 || return false + f, obj, typ = args + ret = SSAValue(pc) + add_alias_change!(astate, ret, obj) + return false +end + +function escape_builtin!(::typeof(tuple), astate::AnalysisState, pc::Int, args::Vector{Any}) + escape_new!(astate, pc, args) + return false +end + +function analyze_fields(ir::IRCode, @nospecialize(typ), @nospecialize(fld)) + nfields = fieldcount_noerror(typ) + if nfields === nothing + return Unindexable(false, AInfo()), 0 + end + if isa(typ, DataType) + fldval = try_compute_field(ir, fld) + fidx = try_compute_fieldidx(typ, fldval) + else + fidx = nothing + end + if fidx === nothing + return Unindexable(false, AInfo()), 0 + end + return Indexable(false, AInfo[AInfo() for _ in 1:nfields]), fidx +end + +function reanalyze_fields(ir::IRCode, AliasInfo::Indexable, @nospecialize(typ), @nospecialize(fld)) + infos = AliasInfo.infos + nfields = fieldcount_noerror(typ) + if nfields === nothing + return Unindexable(false, merge_to_unindexable(infos)), 0 + end + if isa(typ, DataType) + fldval = try_compute_field(ir, fld) + fidx = try_compute_fieldidx(typ, fldval) + else + fidx = nothing + end + if fidx === nothing + return Unindexable(false, merge_to_unindexable(infos)), 0 + end + ninfos = length(infos) + if nfields > ninfos + for _ in 1:(nfields-ninfos) + push!(infos, AInfo()) + end + end + return AliasInfo, fidx +end + +function escape_builtin!(::typeof(getfield), astate::AnalysisState, pc::Int, args::Vector{Any}) + length(args) ≥ 3 || return false + ir, estate = astate.ir, astate.estate + obj = args[2] + typ = widenconst(argextype(obj, ir)) + if hasintersect(typ, Module) # global load + add_escape_change!(astate, SSAValue(pc), ⊤) + end + if isa(obj, SSAValue) || isa(obj, Argument) + objinfo = estate[obj] + else + return false + end + AliasInfo = objinfo.AliasInfo + if isa(AliasInfo, Bool) + AliasInfo && @goto conservative_propagation + # the fields of this object haven't been analyzed yet: analyze them now + AliasInfo, fidx = analyze_fields(ir, typ, args[3]) + if isa(AliasInfo, Indexable) + @goto record_indexable_use + else + @goto record_unindexable_use + end + elseif isa(AliasInfo, Indexable) && !AliasInfo.array + AliasInfo, fidx = reanalyze_fields(ir, AliasInfo, typ, args[3]) + isa(AliasInfo, Unindexable) && @goto record_unindexable_use + @label record_indexable_use + push!(AliasInfo.infos[fidx], pc) # record use + objinfo = EscapeLattice(objinfo, AliasInfo) + add_escape_change!(astate, obj, objinfo) + elseif isa(AliasInfo, Unindexable) && !AliasInfo.array + @label record_unindexable_use + push!(AliasInfo.info, pc) # record use + objinfo = EscapeLattice(objinfo, AliasInfo) + add_escape_change!(astate, obj, objinfo) + else + # this object has been used as array, but it is used as struct here (i.e. should throw) + # update obj's field information and just handle this case conservatively + objinfo = escape_unanalyzable_obj!(astate, obj, objinfo) + @label conservative_propagation + # the field couldn't be analyzed precisely: propagate the escape information + # imposed on the return value of this `getfield` call to the object itself + # as the most conservative propagation + ssainfo = estate[SSAValue(pc)] + add_escape_change!(astate, obj, ssainfo) + end + return false +end + +function escape_builtin!(::typeof(setfield!), astate::AnalysisState, pc::Int, args::Vector{Any}) + length(args) ≥ 4 || return false + ir, estate = astate.ir, astate.estate + obj = args[2] + val = args[4] + if isa(obj, SSAValue) || isa(obj, Argument) + objinfo = estate[obj] + else + # unanalyzable object (e.g. obj::GlobalRef): escape field value conservatively + add_escape_change!(astate, val, ⊤) + @goto add_thrown_escapes + end + AliasInfo = objinfo.AliasInfo + if isa(AliasInfo, Bool) + AliasInfo && @goto conservative_propagation + # the fields of this object haven't been analyzed yet: analyze them now + typ = widenconst(argextype(obj, ir)) + AliasInfo, fidx = analyze_fields(ir, typ, args[3]) + if isa(AliasInfo, Indexable) + @goto escape_indexable_def + else + @goto escape_unindexable_def + end + elseif isa(AliasInfo, Indexable) && !AliasInfo.array + typ = widenconst(argextype(obj, ir)) + AliasInfo, fidx = reanalyze_fields(ir, AliasInfo, typ, args[3]) + isa(AliasInfo, Unindexable) && @goto escape_unindexable_def + @label escape_indexable_def + escape_field!(astate, val, AliasInfo.infos[fidx]) + push!(AliasInfo.infos[fidx], -pc) # record def + objinfo = EscapeLattice(objinfo, AliasInfo) + add_escape_change!(astate, obj, objinfo) + # propagate the escape information of this object ignoring field information + add_escape_change!(astate, val, ignore_aliasinfo(objinfo)) + elseif isa(AliasInfo, Unindexable) && !AliasInfo.array + info = AliasInfo.info + @label escape_unindexable_def + escape_field!(astate, val, AliasInfo.info) + push!(AliasInfo.info, -pc) # record def + objinfo = EscapeLattice(objinfo, AliasInfo) + add_escape_change!(astate, obj, objinfo) + # propagate the escape information of this object ignoring field information + add_escape_change!(astate, val, ignore_aliasinfo(objinfo)) + else + # this object has been used as array, but it is "used" as struct here (i.e. should throw) + # update obj's field information and just handle this case conservatively + objinfo = escape_unanalyzable_obj!(astate, obj, objinfo) + @label conservative_propagation + # the field couldn't be analyzed: propagate the entire escape information + # of this object to the value being assigned as the most conservative propagation + add_escape_change!(astate, val, objinfo) + end + # also propagate escape information imposed on the return value of this `setfield!` + ssainfo = estate[SSAValue(pc)] + add_escape_change!(astate, val, ssainfo) + # compute the throwness of this setfield! call here since builtin_nothrow doesn't account for that + @label add_thrown_escapes + argtypes = Any[] + for i = 2:length(args) + push!(argtypes, argextype(args[i], ir)) + end + setfield!_nothrow(argtypes) || add_thrown_escapes!(astate, pc, args, 2) + return true +end + +const Arrayish = Union{Array,Core.ImmutableArray} + +function escape_builtin!(::typeof(arrayref), astate::AnalysisState, pc::Int, args::Vector{Any}) + length(args) ≥ 4 || return false + # check potential thrown escapes from this arrayref call + argtypes = Any[argextype(args[i], astate.ir) for i in 2:length(args)] + boundcheckt = argtypes[1] + aryt = argtypes[2] + if !array_builtin_common_typecheck(Arrayish, boundcheckt, aryt, argtypes, 3) + add_thrown_escapes!(astate, pc, args, 2) + end + ary = args[3] + inbounds = isa(boundcheckt, Const) && !boundcheckt.val::Bool + inbounds || add_escape_change!(astate, ary, ThrownEscape(pc)) + # we don't track precise index information about this array and thus don't know what values + # can be referenced here: directly propagate the escape information imposed on the return + # value of this `arrayref` call to the array itself as the most conservative propagation + # but also with updated index information + # TODO enable index analysis when constant values are available? + estate = astate.estate + if isa(ary, SSAValue) || isa(ary, Argument) + aryinfo = estate[ary] + else + return true + end + AliasInfo = aryinfo.AliasInfo + if isa(AliasInfo, Bool) + AliasInfo && @goto conservative_propagation + # the elements of this array haven't been analyzed yet: set AliasInfo now + AliasInfo = Unindexable(true, AInfo()) + @goto record_unindexable_use + elseif isa(AliasInfo, Indexable) && AliasInfo.array + throw("array index analysis unsupported") + elseif isa(AliasInfo, Unindexable) && AliasInfo.array + # record the return value of this `arrayref` call as a possibility that imposes escape + @label record_unindexable_use + push!(AliasInfo.info, pc) # record use + add_escape_change!(astate, ary, EscapeLattice(aryinfo, AliasInfo)) + else + # this object has been used as struct, but it is used as array here (thus should throw) + # update ary's element information and just handle this case conservatively + aryinfo = escape_unanalyzable_obj!(astate, ary, aryinfo) + @label conservative_propagation + ssainfo = estate[SSAValue(pc)] + add_escape_change!(astate, ary, ssainfo) + end + return true +end + +function escape_builtin!(::typeof(arrayset), astate::AnalysisState, pc::Int, args::Vector{Any}) + length(args) ≥ 5 || return false + # check potential escapes from this arrayset call + # NOTE here we essentially only need to account for TypeError, assuming that + # UndefRefError or BoundsError don't capture any of the arguments here + argtypes = Any[argextype(args[i], astate.ir) for i in 2:length(args)] + boundcheckt = argtypes[1] + aryt = argtypes[2] + valt = argtypes[3] + if !(array_builtin_common_typecheck(Array, boundcheckt, aryt, argtypes, 4) && + arrayset_typecheck(aryt, valt)) + add_thrown_escapes!(astate, pc, args, 2) + end + ary = args[3] + val = args[4] + inbounds = isa(boundcheckt, Const) && !boundcheckt.val::Bool + inbounds || add_escape_change!(astate, ary, ThrownEscape(pc)) + # we don't track precise index information about this array and won't record what value + # is being assigned here: directly propagate the escape information of this array to + # the value being assigned as the most conservative propagation + # TODO enable index analysis when constant values are available? + estate = astate.estate + if isa(ary, SSAValue) || isa(ary, Argument) + aryinfo = estate[ary] + else + # unanalyzable object (e.g. obj::GlobalRef): escape field value conservatively + add_escape_change!(astate, val, ⊤) + return true + end + AliasInfo = aryinfo.AliasInfo + if isa(AliasInfo, Bool) + AliasInfo && @goto conservative_propagation + # the elements of this array haven't been analyzed yet: set AliasInfo now + AliasInfo = Unindexable(true, AInfo()) + @goto escape_unindexable_def + elseif isa(AliasInfo, Indexable) && AliasInfo.array + throw("array index analysis unsupported") + elseif isa(AliasInfo, Unindexable) && AliasInfo.array + @label escape_unindexable_def + escape_elements!(astate, val, AliasInfo.info) + push!(AliasInfo.info, -pc) # record def + add_escape_change!(astate, ary, EscapeLattice(aryinfo, AliasInfo)) + # propagate the escape information of this array ignoring elements information + add_escape_change!(astate, val, ignore_aliasinfo(aryinfo)) + else + # this object has been used as struct, but it is "used" as array here (thus should throw) + # update ary's element information and just handle this case conservatively + aryinfo = escape_unanalyzable_obj!(astate, ary, aryinfo) + @label conservative_propagation + add_escape_change!(astate, val, aryinfo) + end + # also propagate escape information imposed on the return value of this `arrayset` + ssainfo = estate[SSAValue(pc)] + add_escape_change!(astate, ary, ssainfo) + return true +end + +function escape_elements!(astate::AnalysisState, @nospecialize(v), info::AInfo) + estate = astate.estate + for xidx in info + xidx < 0 && continue # ignore def + x = SSAValue(xidx) # obviously this won't be true once we implement ArgEscape + add_alias_change!(astate, v, x) + end +end + +function escape_builtin!(::typeof(arraysize), astate::AnalysisState, pc::Int, args::Vector{Any}) + length(args) == 3 || return false + ary = args[2] + dim = args[3] + if !arraysize_typecheck(ary, dim, astate.ir) + add_escape_change!(astate, ary, ThrownEscape(pc)) + add_escape_change!(astate, dim, ThrownEscape(pc)) + end + # NOTE we may still see "arraysize: dimension out of range", but it doesn't capture anything + return true +end + +function arraysize_typecheck(@nospecialize(ary), @nospecialize(dim), ir::IRCode) + aryt = argextype(ary, ir) + aryt ⊑ₜ Array || return false + dimt = argextype(dim, ir) + dimt ⊑ₜ Int || return false + return true +end + +# returns nothing if this isn't array resizing operation, +# otherwise returns true if it can throw BoundsError and false if not +function array_resize_info(name::Symbol) + if name === :jl_array_grow_beg || name === :jl_array_grow_end + return false, 1 + elseif name === :jl_array_del_beg || name === :jl_array_del_end + return true, 1 + elseif name === :jl_array_grow_at || name === :jl_array_del_at + return true, 2 + else + return nothing + end +end + +# NOTE may potentially throw "cannot resize array with shared data" error, +# but just ignore it since it doesn't capture anything +function escape_array_resize!(boundserror::Bool, ninds::Int, + astate::AnalysisState, pc::Int, args::Vector{Any}) + length(args) ≥ 6+ninds || return add_thrown_escapes!(astate, pc, args) + ary = args[6] + aryt = argextype(ary, astate.ir) + aryt ⊑ₜ Array || return add_thrown_escapes!(astate, pc, args) + for i in 1:ninds + ind = args[i+6] + indt = argextype(ind, astate.ir) + indt ⊑ₜ Integer || return add_thrown_escapes!(astate, pc, args) + end + if boundserror + # this array resizing can potentially throw `BoundsError`, impose it now + add_escape_change!(astate, ary, ThrownEscape(pc)) + end +end + +is_array_copy(name::Symbol) = name === :jl_array_copy + +# FIXME this implementation is very conservative, improve the accuracy and solve broken test cases +function escape_array_copy!(astate::AnalysisState, pc::Int, args::Vector{Any}) + length(args) ≥ 6 || return add_thrown_escapes!(astate, pc, args) + ary = args[6] + aryt = argextype(ary, astate.ir) + aryt ⊑ₜ Array || return add_thrown_escapes!(astate, pc, args) + if isa(ary, SSAValue) || isa(ary, Argument) + newary = SSAValue(pc) + aryinfo = astate.estate[ary] + newaryinfo = astate.estate[newary] + add_escape_change!(astate, newary, aryinfo) + add_escape_change!(astate, ary, newaryinfo) + end +end + +is_array_isassigned(name::Symbol) = name === :jl_array_isassigned + +function escape_array_isassigned!(astate::AnalysisState, pc::Int, args::Vector{Any}) + if !array_isassigned_nothrow(args, astate.ir) + add_thrown_escapes!(astate, pc, args) + end +end + +function array_isassigned_nothrow(args::Vector{Any}, src::IRCode) + # if !validate_foreigncall_args(args, + # :jl_array_isassigned, Cint, svec(Any,Csize_t), 0, :ccall) + # return false + # end + length(args) ≥ 7 || return false + arytype = argextype(args[6], src) + arytype ⊑ₜ Array || return false + idxtype = argextype(args[7], src) + idxtype ⊑ₜ Csize_t || return false + return true +end + +# # COMBAK do we want to enable this (and also backport this to Base for array allocations?) +# import Core.Compiler: Cint, svec +# function validate_foreigncall_args(args::Vector{Any}, +# name::Symbol, @nospecialize(rt), argtypes::SimpleVector, nreq::Int, convension::Symbol) +# length(args) ≥ 5 || return false +# normalize(args[1]) === name || return false +# args[2] === rt || return false +# args[3] === argtypes || return false +# args[4] === vararg || return false +# normalize(args[5]) === convension || return false +# return true +# end + +escape_builtin!(::typeof(arrayfreeze), astate::AnalysisState, pc::Int, args::Vector{Any}) = + is_safe_immutable_array_op(Array, astate, args) +escape_builtin!(::typeof(mutating_arrayfreeze), astate::AnalysisState, pc::Int, args::Vector{Any}) = + is_safe_immutable_array_op(Array, astate, args) +escape_builtin!(::typeof(arraythaw), astate::AnalysisState, pc::Int, args::Vector{Any}) = + is_safe_immutable_array_op(ImmutableArray, astate, args) +function is_safe_immutable_array_op(@nospecialize(arytype), astate::AnalysisState, args::Vector{Any}) + length(args) == 2 || return false + argextype(args[2], astate.ir) ⊑ₜ arytype || return false + return true +end + +# NOTE define fancy package utilities when developing EA as an external package +if _TOP_MOD !== Core.Compiler + include(@__MODULE__, "EAUtils.jl") + using .EAUtils: code_escapes, @code_escapes + export code_escapes, @code_escapes +end + +end # baremodule EscapeAnalysis diff --git a/base/compiler/EscapeAnalysis/disjoint_set.jl b/base/compiler/EscapeAnalysis/disjoint_set.jl new file mode 100644 index 0000000000000..915bc214d7c3c --- /dev/null +++ b/base/compiler/EscapeAnalysis/disjoint_set.jl @@ -0,0 +1,143 @@ +# A disjoint set implementation adapted from +# https://github.com/JuliaCollections/DataStructures.jl/blob/f57330a3b46f779b261e6c07f199c88936f28839/src/disjoint_set.jl +# under the MIT license: https://github.com/JuliaCollections/DataStructures.jl/blob/master/License.md + +# imports +import ._TOP_MOD: + length, + eltype, + union!, + push! +# usings +import ._TOP_MOD: + OneTo, collect, zero, zeros, one, typemax + +# Disjoint-Set + +############################################################ +# +# A forest of disjoint sets of integers +# +# Since each element is an integer, we can use arrays +# instead of dictionary (for efficiency) +# +# Disjoint sets over other key types can be implemented +# based on an IntDisjointSet through a map from the key +# to an integer index +# +############################################################ + +_intdisjointset_bounds_err_msg(T) = "the maximum number of elements in IntDisjointSet{$T} is $(typemax(T))" + +""" + IntDisjointSet{T<:Integer}(n::Integer) + +A forest of disjoint sets of integers, which is a data structure +(also called a union–find data structure or merge–find set) +that tracks a set of elements partitioned +into a number of disjoint (non-overlapping) subsets. +""" +mutable struct IntDisjointSet{T<:Integer} + parents::Vector{T} + ranks::Vector{T} + ngroups::T +end + +IntDisjointSet(n::T) where {T<:Integer} = IntDisjointSet{T}(collect(OneTo(n)), zeros(T, n), n) +IntDisjointSet{T}(n::Integer) where {T<:Integer} = IntDisjointSet{T}(collect(OneTo(T(n))), zeros(T, T(n)), T(n)) +length(s::IntDisjointSet) = length(s.parents) + +""" + num_groups(s::IntDisjointSet) + +Get a number of groups. +""" +num_groups(s::IntDisjointSet) = s.ngroups +eltype(::Type{IntDisjointSet{T}}) where {T<:Integer} = T + +# find the root element of the subset that contains x +# path compression is implemented here +function find_root_impl!(parents::Vector{T}, x::Integer) where {T<:Integer} + p = parents[x] + @inbounds if parents[p] != p + parents[x] = p = _find_root_impl!(parents, p) + end + return p +end + +# unsafe version of the above +function _find_root_impl!(parents::Vector{T}, x::Integer) where {T<:Integer} + @inbounds p = parents[x] + @inbounds if parents[p] != p + parents[x] = p = _find_root_impl!(parents, p) + end + return p +end + +""" + find_root!(s::IntDisjointSet{T}, x::T) + +Find the root element of the subset that contains an member `x`. +Path compression happens here. +""" +find_root!(s::IntDisjointSet{T}, x::T) where {T<:Integer} = find_root_impl!(s.parents, x) + +""" + in_same_set(s::IntDisjointSet{T}, x::T, y::T) + +Returns `true` if `x` and `y` belong to the same subset in `s`, and `false` otherwise. +""" +in_same_set(s::IntDisjointSet{T}, x::T, y::T) where {T<:Integer} = find_root!(s, x) == find_root!(s, y) + +""" + union!(s::IntDisjointSet{T}, x::T, y::T) + +Merge the subset containing `x` and that containing `y` into one +and return the root of the new set. +""" +function union!(s::IntDisjointSet{T}, x::T, y::T) where {T<:Integer} + parents = s.parents + xroot = find_root_impl!(parents, x) + yroot = find_root_impl!(parents, y) + return xroot != yroot ? root_union!(s, xroot, yroot) : xroot +end + +""" + root_union!(s::IntDisjointSet{T}, x::T, y::T) + +Form a new set that is the union of the two sets whose root elements are +`x` and `y` and return the root of the new set. +Assume `x ≠ y` (unsafe). +""" +function root_union!(s::IntDisjointSet{T}, x::T, y::T) where {T<:Integer} + parents = s.parents + rks = s.ranks + @inbounds xrank = rks[x] + @inbounds yrank = rks[y] + + if xrank < yrank + x, y = y, x + elseif xrank == yrank + rks[x] += one(T) + end + @inbounds parents[y] = x + s.ngroups -= one(T) + return x +end + +""" + push!(s::IntDisjointSet{T}) + +Make a new subset with an automatically chosen new element `x`. +Returns the new element. Throw an `ArgumentError` if the +capacity of the set would be exceeded. +""" +function push!(s::IntDisjointSet{T}) where {T<:Integer} + l = length(s) + l < typemax(T) || throw(ArgumentError(_intdisjointset_bounds_err_msg(T))) + x = l + one(T) + push!(s.parents, x) + push!(s.ranks, zero(T)) + s.ngroups += one(T) + return x +end diff --git a/base/compiler/bootstrap.jl b/base/compiler/bootstrap.jl index 2517b181d2804..75ec987656509 100644 --- a/base/compiler/bootstrap.jl +++ b/base/compiler/bootstrap.jl @@ -14,7 +14,7 @@ let fs = Any[ # we first create caches for the optimizer, because they contain many loop constructions # and they're better to not run in interpreter even during bootstrapping - run_passes, + analyze_escapes, run_passes, # then we create caches for inference entries typeinf_ext, typeinf, typeinf_edge, ] diff --git a/base/compiler/compiler.jl b/base/compiler/compiler.jl index 5f2f5614ba209..bf4f7ca7dbb42 100644 --- a/base/compiler/compiler.jl +++ b/base/compiler/compiler.jl @@ -132,6 +132,8 @@ include("compiler/stmtinfo.jl") include("compiler/abstractinterpretation.jl") include("compiler/typeinfer.jl") include("compiler/optimize.jl") # TODO: break this up further + extract utilities +include("compiler/EscapeAnalysis/EscapeAnalysis.jl") +using .EscapeAnalysis # required for bootstrap # TODO: find why this is needed and remove it. diff --git a/base/compiler/optimize.jl b/base/compiler/optimize.jl index 56f23ca7c2b39..1a6ff0c00498e 100644 --- a/base/compiler/optimize.jl +++ b/base/compiler/optimize.jl @@ -518,6 +518,12 @@ function run_passes(ci::CodeInfo, sv::OptimizationState) @timeit "ADCE" ir = adce_pass!(ir) @timeit "type lift" ir = type_lift_pass!(ir) @timeit "compact 3" ir = compact!(ir) + nargs = let def = sv.linfo.def + isa(def, Method) ? Int(def.nargs) : 0 + end + estate = analyze_escapes(ir, nargs) + cache_escapes!(sv.linfo, estate, ir) + @timeit "memory opt" ir = memory_opt!(ir, estate) if JLOptions().debug_level == 2 @timeit "verify 3" (verify_ir(ir); verify_linetable(ir.linetable)) end diff --git a/base/compiler/ssair/ir.jl b/base/compiler/ssair/ir.jl index a86e125fcb307..41a69b6d25dd4 100644 --- a/base/compiler/ssair/ir.jl +++ b/base/compiler/ssair/ir.jl @@ -319,6 +319,13 @@ function setindex!(x::IRCode, repl::Instruction, s::SSAValue) return x end +function ssadominates(ir::IRCode, domtree::DomTree, ssa1::Int, ssa2::Int) + bb1 = block_for_inst(ir.cfg, ssa1) + bb2 = block_for_inst(ir.cfg, ssa2) + bb1 == bb2 && return ssa1 < ssa2 + return dominates(domtree, bb1, bb2) +end + # SSA values that need renaming struct OldSSAValue id::Int diff --git a/base/compiler/ssair/passes.jl b/base/compiler/ssair/passes.jl index a0ff86a218412..827a4d755e7e5 100644 --- a/base/compiler/ssair/passes.jl +++ b/base/compiler/ssair/passes.jl @@ -1420,3 +1420,26 @@ function cfg_simplify!(ir::IRCode) compact.active_result_bb = length(bb_starts) return finish(compact) end + +# Inspect calls to arrayfreeze to determine if mutating_arrayfreeze can be safely used instead +function memory_opt!(ir::IRCode, estate) + estate = estate::EscapeAnalysis.EscapeState + for idx in 1:length(ir.stmts) + stmt = ir.stmts[idx][:inst] + isexpr(stmt, :call) || continue + if is_known_call(stmt, Core.arrayfreeze, ir) + # array as SSA value might have been initialized within this frame + # (thus potentially doesn't escape to anywhere) + length(stmt.args) ≥ 2 || continue + ary = stmt.args[2] + if isa(ary, SSAValue) + # we can change this arrayfreeze call (which incurs allocation) to mutating_arrayfreeze + # so that it just changes the type tag of the array and avoids the allocation + # as far as the array doesn't escape at this point (meaning we can ignore ThrownEscape here) + has_return_escape(estate[ary]) && continue + stmt.args[1] = GlobalRef(Core, :mutating_arrayfreeze) + end + end + end + return ir +end diff --git a/base/compiler/tfuncs.jl b/base/compiler/tfuncs.jl index 177f33bd227f8..6f8b81677fa0e 100644 --- a/base/compiler/tfuncs.jl +++ b/base/compiler/tfuncs.jl @@ -461,8 +461,10 @@ add_tfunc(Core._typevar, 3, 3, typevar_tfunc, 100) add_tfunc(applicable, 1, INT_INF, (@nospecialize(f), args...)->Bool, 100) add_tfunc(Core.Intrinsics.arraylen, 1, 1, @nospecialize(x)->Int, 4) +const Arrayish = Union{Array,ImmutableArray} + function arraysize_tfunc(@nospecialize(ary), @nospecialize(dim)) - hasintersect(widenconst(ary), Array) || return Bottom + hasintersect(widenconst(ary), Arrayish) || return Bottom hasintersect(widenconst(dim), Int) || return Bottom return Int end @@ -472,7 +474,7 @@ function arraysize_nothrow(argtypes::Vector{Any}) length(argtypes) == 2 || return false ary = argtypes[1] dim = argtypes[2] - ary ⊑ Array || return false + widenconst(ary) <: Arrayish || return false if isa(dim, Const) dimval = dim.val return isa(dimval, Int) && dimval > 0 @@ -1524,27 +1526,27 @@ function tuple_tfunc(argtypes::Vector{Any}) end arrayref_tfunc(@nospecialize(boundscheck), @nospecialize(ary), @nospecialize idxs...) = - _arrayref_tfunc(boundscheck, ary, idxs) -function _arrayref_tfunc(@nospecialize(boundscheck), @nospecialize(ary), - @nospecialize idxs::Tuple) + _arrayref_tfunc(Arrayish, boundscheck, ary, idxs) +function _arrayref_tfunc(@nospecialize(Arytype), + @nospecialize(boundscheck), @nospecialize(ary), @nospecialize idxs::Tuple) isempty(idxs) && return Bottom - array_builtin_common_errorcheck(boundscheck, ary, idxs) || return Bottom - return array_elmtype(ary) + array_builtin_common_errorcheck(Arytype, boundscheck, ary, idxs) || return Bottom + return array_elmtype(Arytype, ary) end add_tfunc(arrayref, 3, INT_INF, arrayref_tfunc, 20) add_tfunc(const_arrayref, 3, INT_INF, arrayref_tfunc, 20) function arrayset_tfunc(@nospecialize(boundscheck), @nospecialize(ary), @nospecialize(item), @nospecialize idxs...) - hasintersect(widenconst(item), _arrayref_tfunc(boundscheck, ary, idxs)) || return Bottom + hasintersect(widenconst(item), _arrayref_tfunc(Array, boundscheck, ary, idxs)) || return Bottom return ary end add_tfunc(arrayset, 4, INT_INF, arrayset_tfunc, 20) -function array_builtin_common_errorcheck(@nospecialize(boundscheck), @nospecialize(ary), - @nospecialize idxs::Tuple) +function array_builtin_common_errorcheck(@nospecialize(Arytype), + @nospecialize(boundscheck), @nospecialize(ary), @nospecialize idxs::Tuple) hasintersect(widenconst(boundscheck), Bool) || return false - hasintersect(widenconst(ary), Array) || return false + hasintersect(widenconst(ary), Arytype) || return false for i = 1:length(idxs) idx = getfield(idxs, i) idx = isvarargtype(idx) ? unwrapva(idx) : widenconst(idx) @@ -1553,9 +1555,9 @@ function array_builtin_common_errorcheck(@nospecialize(boundscheck), @nospeciali return true end -function array_elmtype(@nospecialize ary) +function array_elmtype(@nospecialize(Arytype), @nospecialize(ary)) a = widenconst(ary) - if !has_free_typevars(a) && a <: Array + if !has_free_typevars(a) && a <: Arytype a0 = a if isa(a, UnionAll) a = unwrap_unionall(a0) @@ -1569,6 +1571,32 @@ function array_elmtype(@nospecialize ary) return Any end +# the ImmutableArray operations might involve copies and so their computation costs can be high, +# nevertheless we assign smaller inlining costs to them here, since the escape analysis +# at this moment isn't able to propagate array escapes interprocedurally +# and it will fail to optimize most cases without inlining + +arrayfreeze_tfunc(@nospecialize a) = immutable_array_tfunc(Array, ImmutableArray, a) +add_tfunc(Core.arrayfreeze, 1, 1, arrayfreeze_tfunc, 20) + +mutating_arrayfreeze_tfunc(@nospecialize a) = immutable_array_tfunc(Array, ImmutableArray, a) +add_tfunc(Core.mutating_arrayfreeze, 1, 1, mutating_arrayfreeze_tfunc, 10) + +arraythaw_tfunc(@nospecialize a) = immutable_array_tfunc(ImmutableArray, Array, a) +add_tfunc(Core.arraythaw, 1, 1, arraythaw_tfunc, 20) + +function immutable_array_tfunc(@nospecialize(at), @nospecialize(rt), @nospecialize(a)) + a = widenconst(a) + hasintersect(a, at) || return Bottom + if a <: at + unw = unwrap_unionall(a) + if isa(unw, DataType) + return rewrap_unionall(rt{unw.parameters[1], unw.parameters[2]}, a) + end + end + return rt +end + function _opaque_closure_tfunc(@nospecialize(arg), @nospecialize(isva), @nospecialize(lb), @nospecialize(ub), @nospecialize(source), env::Vector{Any}, linfo::MethodInstance) @@ -1602,11 +1630,12 @@ function array_type_undefable(@nospecialize(arytype)) end end -function array_builtin_common_nothrow(argtypes::Vector{Any}, first_idx_idx::Int) +function array_builtin_common_nothrow(@nospecialize(Arytype), + argtypes::Vector{Any}, first_idx_idx::Int) length(argtypes) >= 4 || return false boundscheck = argtypes[1] arytype = argtypes[2] - array_builtin_common_typecheck(boundscheck, arytype, argtypes, first_idx_idx) || return false + array_builtin_common_typecheck(Arytype, boundscheck, arytype, argtypes, first_idx_idx) || return false # If we could potentially throw undef ref errors, bail out now. arytype = widenconst(arytype) array_type_undefable(arytype) && return false @@ -1621,12 +1650,12 @@ function array_builtin_common_nothrow(argtypes::Vector{Any}, first_idx_idx::Int) return false end -function array_builtin_common_typecheck( +function array_builtin_common_typecheck(@nospecialize(Arytype), @nospecialize(boundscheck), @nospecialize(arytype), argtypes::Vector{Any}, first_idx_idx::Int) - (boundscheck ⊑ Bool && arytype ⊑ Array) || return false + (widenconst(boundscheck) <: Bool && widenconst(arytype) <: Arytype) || return false for i = first_idx_idx:length(argtypes) - argtypes[i] ⊑ Int || return false + widenconst(argtypes[i]) <: Int || return false end return true end @@ -1645,11 +1674,11 @@ end # Query whether the given builtin is guaranteed not to throw given the argtypes function _builtin_nothrow(@nospecialize(f), argtypes::Array{Any,1}, @nospecialize(rt)) if f === arrayset - array_builtin_common_nothrow(argtypes, 4) || return true + array_builtin_common_nothrow(Array, argtypes, 4) || return true # Additionally check element type compatibility return arrayset_typecheck(argtypes[2], argtypes[3]) elseif f === arrayref || f === const_arrayref - return array_builtin_common_nothrow(argtypes, 3) + return array_builtin_common_nothrow(Arrayish, argtypes, 3) elseif f === arraysize return arraysize_nothrow(argtypes) elseif f === Core._expr @@ -1709,8 +1738,7 @@ function builtin_tfunction(interp::AbstractInterpreter, @nospecialize(f), argtyp sv::Union{InferenceState,Nothing}) if f === tuple return tuple_tfunc(argtypes) - end - if isa(f, IntrinsicFunction) + elseif isa(f, IntrinsicFunction) if is_pure_intrinsic_infer(f) && _all(@nospecialize(a) -> isa(a, Const), argtypes) argvals = anymap(@nospecialize(a) -> (a::Const).val, argtypes) try diff --git a/base/compiler/types.jl b/base/compiler/types.jl index c72896b61b0e5..6236acd13bc95 100644 --- a/base/compiler/types.jl +++ b/base/compiler/types.jl @@ -164,7 +164,7 @@ struct NativeInterpreter <: AbstractInterpreter end # If they didn't pass typemax(UInt) but passed something more subtly - # incorrect, fail out loudly. + # incorrect, fail out loud. @assert world <= get_world_counter() diff --git a/base/compiler/utilities.jl b/base/compiler/utilities.jl index e97441495f16b..9b1106e964919 100644 --- a/base/compiler/utilities.jl +++ b/base/compiler/utilities.jl @@ -19,6 +19,8 @@ function _any(@nospecialize(f), a) end return false end +any(@nospecialize(f), itr) = _any(f, itr) +any(itr) = _any(identity, itr) function _all(@nospecialize(f), a) for x in a @@ -26,6 +28,8 @@ function _all(@nospecialize(f), a) end return true end +all(@nospecialize(f), itr) = _all(f, itr) +all(itr) = _all(identity, itr) function contains_is(itr, @nospecialize(x)) for y in itr diff --git a/base/dict.jl b/base/dict.jl index dabdfa5c34773..83ef7f423f7e7 100644 --- a/base/dict.jl +++ b/base/dict.jl @@ -373,7 +373,7 @@ end function setindex!(h::Dict{K,V}, v0, key0) where V where K key = convert(K, key0) if !isequal(key, key0) - throw(ArgumentError("$(limitrepr(key0)) is not a valid key for type $K")) + throw(KeyTypeError(K, key0)) end setindex!(h, v0, key) end diff --git a/base/experimental.jl b/base/experimental.jl index d5af876cbbb23..12fcb9273b4b6 100644 --- a/base/experimental.jl +++ b/base/experimental.jl @@ -11,6 +11,8 @@ module Experimental using Base: Threads, sync_varname using Base.Meta +using Base: ImmutableArray + """ Const(A::Array) diff --git a/base/exports.jl b/base/exports.jl index c43e66eecb74c..30ad9c8c8c0f6 100644 --- a/base/exports.jl +++ b/base/exports.jl @@ -22,6 +22,7 @@ export AbstractVector, AbstractVecOrMat, Array, + ImmutableArray, AbstractMatch, AbstractPattern, AbstractDict, @@ -96,6 +97,7 @@ export Val, VecOrMat, Vector, + ImmutableVector, VersionNumber, WeakKeyDict, diff --git a/base/indices.jl b/base/indices.jl index 28028f23c72a3..0b7d7d7212940 100644 --- a/base/indices.jl +++ b/base/indices.jl @@ -95,6 +95,7 @@ IndexStyle(A::AbstractArray) = IndexStyle(typeof(A)) IndexStyle(::Type{Union{}}) = IndexLinear() IndexStyle(::Type{<:AbstractArray}) = IndexCartesian() IndexStyle(::Type{<:Array}) = IndexLinear() +IndexStyle(::Type{<:Core.ImmutableArray}) = IndexLinear() IndexStyle(::Type{<:AbstractRange}) = IndexLinear() IndexStyle(A::AbstractArray, B::AbstractArray) = IndexStyle(IndexStyle(A), IndexStyle(B)) diff --git a/base/pointer.jl b/base/pointer.jl index b9475724f7637..334d160bc92fa 100644 --- a/base/pointer.jl +++ b/base/pointer.jl @@ -63,6 +63,7 @@ cconvert(::Type{Ptr{UInt8}}, s::AbstractString) = String(s) cconvert(::Type{Ptr{Int8}}, s::AbstractString) = String(s) unsafe_convert(::Type{Ptr{T}}, a::Array{T}) where {T} = ccall(:jl_array_ptr, Ptr{T}, (Any,), a) +unsafe_convert(::Type{Ptr{T}}, a::Core.ImmutableArray{T}) where {T} = ccall(:jl_array_ptr, Ptr{T}, (Any,), a) unsafe_convert(::Type{Ptr{S}}, a::AbstractArray{T}) where {S,T} = convert(Ptr{S}, unsafe_convert(Ptr{T}, a)) unsafe_convert(::Type{Ptr{T}}, a::AbstractArray{T}) where {T} = error("conversion to pointer not defined for $(typeof(a))") diff --git a/src/builtin_proto.h b/src/builtin_proto.h index e0b328e664d6c..b6332329e8ca7 100644 --- a/src/builtin_proto.h +++ b/src/builtin_proto.h @@ -53,6 +53,10 @@ DECLARE_BUILTIN(typeassert); DECLARE_BUILTIN(_typebody); DECLARE_BUILTIN(typeof); DECLARE_BUILTIN(_typevar); +DECLARE_BUILTIN(arrayfreeze); +DECLARE_BUILTIN(arraythaw); +DECLARE_BUILTIN(mutating_arrayfreeze); +DECLARE_BUILTIN(maybecopy); JL_CALLABLE(jl_f_invoke_kwsorter); #ifdef DEFINE_BUILTIN_GLOBALS diff --git a/src/builtins.c b/src/builtins.c index 900a3c44dc4ec..bf3977358c3a2 100644 --- a/src/builtins.c +++ b/src/builtins.c @@ -1336,7 +1336,9 @@ JL_CALLABLE(jl_f__typevar) JL_CALLABLE(jl_f_arraysize) { JL_NARGS(arraysize, 2, 2); - JL_TYPECHK(arraysize, array, args[0]); + if (!jl_is_arrayish(args[0])) { + jl_type_error("arraysize", (jl_value_t*)jl_array_type, args[0]); + } jl_array_t *a = (jl_array_t*)args[0]; size_t nd = jl_array_ndims(a); JL_TYPECHK(arraysize, long, args[1]); @@ -1375,7 +1377,9 @@ JL_CALLABLE(jl_f_arrayref) { JL_NARGSV(arrayref, 3); JL_TYPECHK(arrayref, bool, args[0]); - JL_TYPECHK(arrayref, array, args[1]); + if (!jl_is_arrayish(args[1])) { + jl_type_error("arrayref", (jl_value_t*)jl_array_type, args[1]); + } jl_array_t *a = (jl_array_t*)args[1]; size_t i = array_nd_index(a, &args[2], nargs - 2, "arrayref"); return jl_arrayref(a, i); @@ -1397,6 +1401,16 @@ JL_CALLABLE(jl_f_arrayset) return args[1]; } +JL_CALLABLE(jl_f_maybecopy) +{ + // calls to this builtin are potentially replaced with a call to copy + // if not replaced, the default behavior is to typecheck and return the array it was called on + JL_NARGS(maybecopy, 1, 1); + JL_TYPECHK(maybecopy, array, args[0]); + jl_array_t *a = (jl_array_t*)args[0]; + return (jl_value_t*)a; +} + // type definition ------------------------------------------------------------ JL_CALLABLE(jl_f__structtype) @@ -1655,6 +1669,54 @@ JL_CALLABLE(jl_f__equiv_typedef) return equiv_type(args[0], args[1]) ? jl_true : jl_false; } +JL_CALLABLE(jl_f_arrayfreeze) +{ + JL_NARGS(arrayfreeze, 1, 1); + JL_TYPECHK(arrayfreeze, array, args[0]); + jl_array_t *a = (jl_array_t*)args[0]; + jl_datatype_t *it = (jl_datatype_t *)jl_apply_type2((jl_value_t*)jl_immutable_array_type, + jl_tparam0(jl_typeof(a)), jl_tparam1(jl_typeof(a))); + JL_GC_PUSH1(&it); + // The idea is to elide this copy if the compiler or runtime can prove that + // doing so is safe to do. + jl_array_t *na = jl_array_copy(a); + jl_set_typeof(na, it); + JL_GC_POP(); + return (jl_value_t*)na; +} + +JL_CALLABLE(jl_f_mutating_arrayfreeze) +{ + // N.B.: These error checks pretend to be arrayfreeze since this is a drop + // in replacement and we don't want to change the visible error type in the + // optimizer + JL_NARGS(arrayfreeze, 1, 1); + JL_TYPECHK(arrayfreeze, array, args[0]); + jl_array_t *a = (jl_array_t*)args[0]; + jl_datatype_t *it = (jl_datatype_t *)jl_apply_type2((jl_value_t*)jl_immutable_array_type, + jl_tparam0(jl_typeof(a)), jl_tparam1(jl_typeof(a))); + jl_set_typeof(a, it); + return (jl_value_t*)a; +} + +JL_CALLABLE(jl_f_arraythaw) +{ + JL_NARGS(arraythaw, 1, 1); + if (((jl_datatype_t*)jl_typeof(args[0]))->name != jl_immutable_array_typename) { + jl_type_error("arraythaw", (jl_value_t*)jl_immutable_array_type, args[0]); + } + jl_array_t *a = (jl_array_t*)args[0]; + jl_datatype_t *it = (jl_datatype_t *)jl_apply_type2((jl_value_t*)jl_array_type, + jl_tparam0(jl_typeof(a)), jl_tparam1(jl_typeof(a))); + JL_GC_PUSH1(&it); + // The idea is to elide this copy if the compiler or runtime can prove that + // doing so is safe to do. + jl_array_t *na = jl_array_copy(a); + jl_set_typeof(na, it); + JL_GC_POP(); + return (jl_value_t*)na; +} + // IntrinsicFunctions --------------------------------------------------------- static void (*runtime_fp[num_intrinsics])(void); @@ -1809,6 +1871,10 @@ void jl_init_primitives(void) JL_GC_DISABLED jl_builtin_arrayset = add_builtin_func("arrayset", jl_f_arrayset); jl_builtin_arraysize = add_builtin_func("arraysize", jl_f_arraysize); + jl_builtin_arrayfreeze = add_builtin_func("arrayfreeze", jl_f_arrayfreeze); + jl_builtin_mutating_arrayfreeze = add_builtin_func("mutating_arrayfreeze", jl_f_mutating_arrayfreeze); + jl_builtin_arraythaw = add_builtin_func("arraythaw", jl_f_arraythaw); + // method table utils jl_builtin_applicable = add_builtin_func("applicable", jl_f_applicable); jl_builtin_invoke = add_builtin_func("invoke", jl_f_invoke); @@ -1833,6 +1899,7 @@ void jl_init_primitives(void) JL_GC_DISABLED add_builtin_func("_setsuper!", jl_f__setsuper); jl_builtin__typebody = add_builtin_func("_typebody!", jl_f__typebody); add_builtin_func("_equiv_typedef", jl_f__equiv_typedef); + jl_builtin_maybecopy = add_builtin_func("maybecopy", jl_f_maybecopy); // builtin types add_builtin("Any", (jl_value_t*)jl_any_type); @@ -1880,6 +1947,7 @@ void jl_init_primitives(void) JL_GC_DISABLED add_builtin("AbstractArray", (jl_value_t*)jl_abstractarray_type); add_builtin("DenseArray", (jl_value_t*)jl_densearray_type); add_builtin("Array", (jl_value_t*)jl_array_type); + add_builtin("ImmutableArray", (jl_value_t*)jl_immutable_array_type); add_builtin("Expr", (jl_value_t*)jl_expr_type); add_builtin("LineNumberNode", (jl_value_t*)jl_linenumbernode_type); diff --git a/src/cgutils.cpp b/src/cgutils.cpp index ce64f3242e774..02fb7ed1e97c6 100644 --- a/src/cgutils.cpp +++ b/src/cgutils.cpp @@ -491,7 +491,7 @@ static Type *_julia_type_to_llvm(jl_codegen_params_t *ctx, LLVMContext &ctxt, jl if (isboxed) *isboxed = false; if (jt == (jl_value_t*)jl_bottom_type) return getVoidTy(ctxt); - if (jl_is_concrete_immutable(jt)) { + if (jl_is_concrete_immutable(jt) && !jl_is_arrayish_type(jt)) { if (jl_datatype_nbits(jt) == 0) return getVoidTy(ctxt); Type *t = _julia_struct_to_llvm(ctx, ctxt, jt, isboxed); diff --git a/src/codegen.cpp b/src/codegen.cpp index 07b935b1449a1..6cb3de20f84b1 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -739,12 +739,13 @@ static const auto jl_newbits_func = new JuliaFunction{ // `julia.typeof` does read memory, but it is effectively readnone before we lower // the allocation function. This is OK as long as we lower `julia.typeof` no later than // `julia.gc_alloc_obj`. +// Updated to argmemonly due to C++ deconstructor style usage in jl_f_arrayfreeze / mutating_arrayfreeze static const auto jl_typeof_func = new JuliaFunction{ "julia.typeof", [](LLVMContext &C) { return FunctionType::get(T_prjlvalue, {T_prjlvalue}, false); }, [](LLVMContext &C) { return AttributeList::get(C, - Attributes(C, {Attribute::ReadNone, Attribute::NoUnwind, Attribute::NoRecurse}), + Attributes(C, {Attribute::ArgMemOnly, Attribute::NoUnwind, Attribute::NoRecurse}), Attributes(C, {Attribute::NonNull}), None); }, }; @@ -936,6 +937,15 @@ static const auto pointer_from_objref_func = new JuliaFunction{ Attributes(C, {Attribute::NonNull}), None); }, }; +static const auto mutating_arrayfreeze_func = new JuliaFunction{ + "julia.mutating_arrayfreeze", + [](LLVMContext &C) { return FunctionType::get(T_prjlvalue, + {T_prjlvalue, T_prjlvalue}, false); }, + [](LLVMContext &C) { return AttributeList::get(C, + Attributes(C, {Attribute::NoUnwind, Attribute::NoRecurse}), + Attributes(C, {Attribute::NonNull}), + None); }, +}; static const auto jltuple_func = new JuliaFunction{XSTR(jl_f_tuple), get_func_sig, get_func_attrs}; static std::map builtin_func_map; @@ -1007,7 +1017,7 @@ static bool deserves_retbox(jl_value_t* t) static bool deserves_sret(jl_value_t *dt, Type *T) { assert(jl_is_datatype(dt)); - return (size_t)jl_datatype_size(dt) > sizeof(void*) && !T->isFloatingPointTy() && !T->isVectorTy(); + return (size_t)jl_datatype_size(dt) > sizeof(void*) && !T->isFloatingPointTy() && !T->isVectorTy() && !jl_is_arrayish_type(dt); } @@ -2871,6 +2881,21 @@ static bool emit_builtin_call(jl_codectx_t &ctx, jl_cgval_t *ret, jl_value_t *f, } } + else if (f == jl_builtin_mutating_arrayfreeze && nargs == 1) { + const jl_cgval_t &ary = argv[1]; + jl_value_t *aty_dt = jl_unwrap_unionall(ary.typ); + if (jl_is_array_type(aty_dt)) { + jl_datatype_t *it = (jl_datatype_t *)jl_apply_type2((jl_value_t*)jl_immutable_array_type, + jl_tparam0(aty_dt), jl_tparam1(aty_dt)); + *ret = mark_julia_type(ctx, + ctx.builder.CreateCall(prepare_call(mutating_arrayfreeze_func), + { boxed(ctx, ary), + track_pjlvalue(ctx, literal_pointer_val(ctx, (jl_value_t*)it)) }), true, it); + return true; + } + return false; + } + else if (f == jl_builtin_arrayset && nargs >= 4) { const jl_cgval_t &ary = argv[2]; jl_cgval_t val = argv[3]; @@ -8017,37 +8042,41 @@ void jl_init_debuginfo(void); extern "C" void jl_init_llvm(void) { builtin_func_map = - { { jl_f_is_addr, new JuliaFunction{XSTR(jl_f_is), get_func_sig, get_func_attrs} }, - { jl_f_typeof_addr, new JuliaFunction{XSTR(jl_f_typeof), get_func_sig, get_func_attrs} }, - { jl_f_sizeof_addr, new JuliaFunction{XSTR(jl_f_sizeof), get_func_sig, get_func_attrs} }, - { jl_f_issubtype_addr, new JuliaFunction{XSTR(jl_f_issubtype), get_func_sig, get_func_attrs} }, - { jl_f_isa_addr, new JuliaFunction{XSTR(jl_f_isa), get_func_sig, get_func_attrs} }, - { jl_f_typeassert_addr, new JuliaFunction{XSTR(jl_f_typeassert), get_func_sig, get_func_attrs} }, - { jl_f_ifelse_addr, new JuliaFunction{XSTR(jl_f_ifelse), get_func_sig, get_func_attrs} }, - { jl_f__apply_iterate_addr, new JuliaFunction{XSTR(jl_f__apply_iterate), get_func_sig, get_func_attrs} }, - { jl_f__apply_pure_addr, new JuliaFunction{XSTR(jl_f__apply_pure), get_func_sig, get_func_attrs} }, - { jl_f__call_latest_addr, new JuliaFunction{XSTR(jl_f__call_latest), get_func_sig, get_func_attrs} }, - { jl_f__call_in_world_addr, new JuliaFunction{XSTR(jl_f__call_in_world), get_func_sig, get_func_attrs} }, - { jl_f_throw_addr, new JuliaFunction{XSTR(jl_f_throw), get_func_sig, get_func_attrs} }, - { jl_f_tuple_addr, jltuple_func }, - { jl_f_svec_addr, new JuliaFunction{XSTR(jl_f_svec), get_func_sig, get_func_attrs} }, - { jl_f_applicable_addr, new JuliaFunction{XSTR(jl_f_applicable), get_func_sig, get_func_attrs} }, - { jl_f_invoke_addr, new JuliaFunction{XSTR(jl_f_invoke), get_func_sig, get_func_attrs} }, - { jl_f_invoke_kwsorter_addr, new JuliaFunction{XSTR(jl_f_invoke_kwsorter), get_func_sig, get_func_attrs} }, - { jl_f_isdefined_addr, new JuliaFunction{XSTR(jl_f_isdefined), get_func_sig, get_func_attrs} }, - { jl_f_getfield_addr, new JuliaFunction{XSTR(jl_f_getfield), get_func_sig, get_func_attrs} }, - { jl_f_setfield_addr, new JuliaFunction{XSTR(jl_f_setfield), get_func_sig, get_func_attrs} }, - { jl_f_swapfield_addr, new JuliaFunction{XSTR(jl_f_swapfield), get_func_sig, get_func_attrs} }, - { jl_f_modifyfield_addr, new JuliaFunction{XSTR(jl_f_modifyfield), get_func_sig, get_func_attrs} }, - { jl_f_fieldtype_addr, new JuliaFunction{XSTR(jl_f_fieldtype), get_func_sig, get_func_attrs} }, - { jl_f_nfields_addr, new JuliaFunction{XSTR(jl_f_nfields), get_func_sig, get_func_attrs} }, - { jl_f__expr_addr, new JuliaFunction{XSTR(jl_f__expr), get_func_sig, get_func_attrs} }, - { jl_f__typevar_addr, new JuliaFunction{XSTR(jl_f__typevar), get_func_sig, get_func_attrs} }, - { jl_f_arrayref_addr, new JuliaFunction{XSTR(jl_f_arrayref), get_func_sig, get_func_attrs} }, - { jl_f_const_arrayref_addr, new JuliaFunction{XSTR(jl_f_const_arrayref), get_func_sig, get_func_attrs} }, - { jl_f_arrayset_addr, new JuliaFunction{XSTR(jl_f_arrayset), get_func_sig, get_func_attrs} }, - { jl_f_arraysize_addr, new JuliaFunction{XSTR(jl_f_arraysize), get_func_sig, get_func_attrs} }, - { jl_f_apply_type_addr, new JuliaFunction{XSTR(jl_f_apply_type), get_func_sig, get_func_attrs} }, + { { jl_f_is_addr, new JuliaFunction{XSTR(jl_f_is), get_func_sig, get_func_attrs} }, + { jl_f_typeof_addr, new JuliaFunction{XSTR(jl_f_typeof), get_func_sig, get_func_attrs} }, + { jl_f_sizeof_addr, new JuliaFunction{XSTR(jl_f_sizeof), get_func_sig, get_func_attrs} }, + { jl_f_issubtype_addr, new JuliaFunction{XSTR(jl_f_issubtype), get_func_sig, get_func_attrs} }, + { jl_f_isa_addr, new JuliaFunction{XSTR(jl_f_isa), get_func_sig, get_func_attrs} }, + { jl_f_typeassert_addr, new JuliaFunction{XSTR(jl_f_typeassert), get_func_sig, get_func_attrs} }, + { jl_f_ifelse_addr, new JuliaFunction{XSTR(jl_f_ifelse), get_func_sig, get_func_attrs} }, + { jl_f__apply_iterate_addr, new JuliaFunction{XSTR(jl_f__apply_iterate), get_func_sig, get_func_attrs} }, + { jl_f__apply_pure_addr, new JuliaFunction{XSTR(jl_f__apply_pure), get_func_sig, get_func_attrs} }, + { jl_f__call_latest_addr, new JuliaFunction{XSTR(jl_f__call_latest), get_func_sig, get_func_attrs} }, + { jl_f__call_in_world_addr, new JuliaFunction{XSTR(jl_f__call_in_world), get_func_sig, get_func_attrs} }, + { jl_f_throw_addr, new JuliaFunction{XSTR(jl_f_throw), get_func_sig, get_func_attrs} }, + { jl_f_tuple_addr, jltuple_func }, + { jl_f_svec_addr, new JuliaFunction{XSTR(jl_f_svec), get_func_sig, get_func_attrs} }, + { jl_f_applicable_addr, new JuliaFunction{XSTR(jl_f_applicable), get_func_sig, get_func_attrs} }, + { jl_f_invoke_addr, new JuliaFunction{XSTR(jl_f_invoke), get_func_sig, get_func_attrs} }, + { jl_f_invoke_kwsorter_addr, new JuliaFunction{XSTR(jl_f_invoke_kwsorter), get_func_sig, get_func_attrs} }, + { jl_f_isdefined_addr, new JuliaFunction{XSTR(jl_f_isdefined), get_func_sig, get_func_attrs} }, + { jl_f_getfield_addr, new JuliaFunction{XSTR(jl_f_getfield), get_func_sig, get_func_attrs} }, + { jl_f_setfield_addr, new JuliaFunction{XSTR(jl_f_setfield), get_func_sig, get_func_attrs} }, + { jl_f_swapfield_addr, new JuliaFunction{XSTR(jl_f_swapfield), get_func_sig, get_func_attrs} }, + { jl_f_modifyfield_addr, new JuliaFunction{XSTR(jl_f_modifyfield), get_func_sig, get_func_attrs} }, + { jl_f_fieldtype_addr, new JuliaFunction{XSTR(jl_f_fieldtype), get_func_sig, get_func_attrs} }, + { jl_f_nfields_addr, new JuliaFunction{XSTR(jl_f_nfields), get_func_sig, get_func_attrs} }, + { jl_f__expr_addr, new JuliaFunction{XSTR(jl_f__expr), get_func_sig, get_func_attrs} }, + { jl_f__typevar_addr, new JuliaFunction{XSTR(jl_f__typevar), get_func_sig, get_func_attrs} }, + { jl_f_arrayref_addr, new JuliaFunction{XSTR(jl_f_arrayref), get_func_sig, get_func_attrs} }, + { jl_f_const_arrayref_addr, new JuliaFunction{XSTR(jl_f_const_arrayref), get_func_sig, get_func_attrs} }, + { jl_f_arrayset_addr, new JuliaFunction{XSTR(jl_f_arrayset), get_func_sig, get_func_attrs} }, + { jl_f_arraysize_addr, new JuliaFunction{XSTR(jl_f_arraysize), get_func_sig, get_func_attrs} }, + { jl_f_apply_type_addr, new JuliaFunction{XSTR(jl_f_apply_type), get_func_sig, get_func_attrs} }, + { jl_f_arrayfreeze_addr, new JuliaFunction{XSTR(jl_f_arrayfreeze), get_func_sig, get_func_attrs} }, + { jl_f_arraythaw_addr, new JuliaFunction{XSTR(jl_f_arraythaw), get_func_sig, get_func_attrs} }, + { jl_f_mutating_arrayfreeze_addr, new JuliaFunction{XSTR(jl_f_mutating_arrayfreeze), get_func_sig, get_func_attrs} }, + { jl_f_maybecopy_addr, new JuliaFunction{XSTR(jl_f_maybecopy), get_func_sig, get_func_attrs} }, }; jl_default_debug_info_kind = (int) DICompileUnit::DebugEmissionKind::FullDebug; diff --git a/src/datatype.c b/src/datatype.c index e7f1ab22365b8..36ea0e08bced1 100644 --- a/src/datatype.c +++ b/src/datatype.c @@ -223,7 +223,8 @@ unsigned jl_special_vector_alignment(size_t nfields, jl_value_t *t) STATIC_INLINE int jl_is_datatype_make_singleton(jl_datatype_t *d) JL_NOTSAFEPOINT { - return (!d->name->abstract && jl_datatype_size(d) == 0 && d != jl_symbol_type && d->name != jl_array_typename && + return (!d->name->abstract && jl_datatype_size(d) == 0 && d != jl_symbol_type && + d->name != jl_array_typename && d->name != jl_immutable_array_typename && d->isconcretetype && !d->name->mutabl); } @@ -395,7 +396,9 @@ void jl_compute_field_offsets(jl_datatype_t *st) st->layout = &opaque_byte_layout; return; } - else if (st == jl_simplevector_type || st == jl_module_type || st->name == jl_array_typename) { + else if (st == jl_simplevector_type || st == jl_module_type || + st->name == jl_array_typename || + st->name == jl_immutable_array_typename) { static const jl_datatype_layout_t opaque_ptr_layout = {0, 1, -1, sizeof(void*), 0, 0}; st->layout = &opaque_ptr_layout; return; diff --git a/src/gc.c b/src/gc.c index 56b2c31cbe7f1..d73f84096f2e0 100644 --- a/src/gc.c +++ b/src/gc.c @@ -859,7 +859,8 @@ void jl_gc_force_mark_old(jl_ptls_t ptls, jl_value_t *v) JL_NOTSAFEPOINT size_t l = jl_svec_len(v); dtsz = l * sizeof(void*) + sizeof(jl_svec_t); } - else if (dt->name == jl_array_typename) { + else if (dt->name == jl_array_typename || + dt->name == jl_immutable_array_typename) { jl_array_t *a = (jl_array_t*)v; if (!a->flags.pooled) dtsz = GC_MAX_SZCLASS + 1; @@ -2526,7 +2527,8 @@ mark: { objary = (gc_mark_objarray_t*)sp.data; goto objarray_loaded; } - else if (vt->name == jl_array_typename) { + else if (vt->name == jl_array_typename || + vt->name == jl_immutable_array_typename) { jl_array_t *a = (jl_array_t*)new_obj; jl_array_flags_t flags = a->flags; if (update_meta) { diff --git a/src/intrinsics.cpp b/src/intrinsics.cpp index adbc832cc6553..b5bc349640a56 100644 --- a/src/intrinsics.cpp +++ b/src/intrinsics.cpp @@ -1081,7 +1081,7 @@ static jl_cgval_t emit_intrinsic(jl_codectx_t &ctx, intrinsic f, jl_value_t **ar assert(nargs == 1); const jl_cgval_t &x = argv[0]; jl_value_t *typ = jl_unwrap_unionall(x.typ); - if (!jl_is_datatype(typ) || ((jl_datatype_t*)typ)->name != jl_array_typename) + if (!jl_is_arrayish_type(typ)) return emit_runtime_call(ctx, f, argv, nargs); return mark_julia_type(ctx, emit_arraylen(ctx, x), false, jl_long_type); } diff --git a/src/jl_exported_data.inc b/src/jl_exported_data.inc index 09d2949c22489..bf2e9ac0e4bd0 100644 --- a/src/jl_exported_data.inc +++ b/src/jl_exported_data.inc @@ -17,6 +17,8 @@ XX(jl_array_symbol_type) \ XX(jl_array_type) \ XX(jl_array_typename) \ + XX(jl_immutable_array_type) \ + XX(jl_immutable_array_typename) \ XX(jl_array_uint8_type) \ XX(jl_array_uint64_type) \ XX(jl_atomicerror_type) \ diff --git a/src/jltypes.c b/src/jltypes.c index 0042388660362..f8165e831d669 100644 --- a/src/jltypes.c +++ b/src/jltypes.c @@ -2258,6 +2258,15 @@ void jl_init_types(void) JL_GC_DISABLED jl_atomic_store_relaxed(&jl_nonfunction_mt->leafcache, (jl_array_t*)jl_an_empty_vec_any); jl_atomic_store_relaxed(&jl_type_type_mt->leafcache, (jl_array_t*)jl_an_empty_vec_any); + tv = jl_svec2(tvar("T"), tvar("N")); + jl_immutable_array_type = (jl_unionall_t*) + jl_new_datatype(jl_symbol("ImmutableArray"), core, + (jl_datatype_t*) + jl_apply_type((jl_value_t*)jl_densearray_type, jl_svec_data(tv), 2), + tv, jl_emptysvec, jl_emptysvec, jl_emptysvec, 0, 0, 0)->name->wrapper; + jl_immutable_array_typename = ((jl_datatype_t*)jl_unwrap_unionall((jl_value_t*)jl_immutable_array_type))->name; + jl_compute_field_offsets((jl_datatype_t*)jl_unwrap_unionall((jl_value_t*)jl_immutable_array_type)); + jl_expr_type = jl_new_datatype(jl_symbol("Expr"), core, jl_any_type, jl_emptysvec, @@ -2672,6 +2681,7 @@ void jl_init_types(void) JL_GC_DISABLED // override the preferred layout for a couple types jl_lineinfonode_type->name->mayinlinealloc = 0; // FIXME: assumed to be a pointer by codegen + jl_immutable_array_typename->mayinlinealloc = 0; // It seems like we probably usually end up needing the box for kinds (used in an Any context)--but is that true? jl_uniontype_type->name->mayinlinealloc = 0; jl_unionall_type->name->mayinlinealloc = 0; diff --git a/src/julia.h b/src/julia.h index 1f1186606d8cc..d2a90f650b27f 100644 --- a/src/julia.h +++ b/src/julia.h @@ -655,6 +655,8 @@ extern JL_DLLIMPORT jl_unionall_t *jl_abstractarray_type JL_GLOBALLY_ROOTED; extern JL_DLLIMPORT jl_unionall_t *jl_densearray_type JL_GLOBALLY_ROOTED; extern JL_DLLIMPORT jl_unionall_t *jl_array_type JL_GLOBALLY_ROOTED; extern JL_DLLIMPORT jl_typename_t *jl_array_typename JL_GLOBALLY_ROOTED; +extern JL_DLLEXPORT jl_unionall_t *jl_immutable_array_type JL_GLOBALLY_ROOTED; +extern JL_DLLEXPORT jl_typename_t *jl_immutable_array_typename JL_GLOBALLY_ROOTED; extern JL_DLLIMPORT jl_datatype_t *jl_weakref_type JL_GLOBALLY_ROOTED; extern JL_DLLIMPORT jl_datatype_t *jl_abstractstring_type JL_GLOBALLY_ROOTED; extern JL_DLLIMPORT jl_datatype_t *jl_string_type JL_GLOBALLY_ROOTED; @@ -1208,11 +1210,25 @@ STATIC_INLINE int jl_is_primitivetype(void *v) JL_NOTSAFEPOINT jl_datatype_size(v) > 0); } +STATIC_INLINE int jl_is_array_type(void *t) JL_NOTSAFEPOINT +{ + return (jl_is_datatype(t) && + (((jl_datatype_t*)(t))->name == jl_array_typename)); +} + +STATIC_INLINE int jl_is_arrayish_type(void *t) JL_NOTSAFEPOINT +{ + return (jl_is_datatype(t) && + (((jl_datatype_t*)(t))->name == jl_array_typename || + ((jl_datatype_t*)(t))->name == jl_immutable_array_typename)); +} + STATIC_INLINE int jl_is_structtype(void *v) JL_NOTSAFEPOINT { return (jl_is_datatype(v) && !((jl_datatype_t*)(v))->name->abstract && - !jl_is_primitivetype(v)); + !jl_is_primitivetype(v) && + !jl_is_arrayish_type(v)); } STATIC_INLINE int jl_isbits(void *t) JL_NOTSAFEPOINT // corresponding to isbits() in julia @@ -1230,16 +1246,16 @@ STATIC_INLINE int jl_is_abstracttype(void *v) JL_NOTSAFEPOINT return (jl_is_datatype(v) && ((jl_datatype_t*)(v))->name->abstract); } -STATIC_INLINE int jl_is_array_type(void *t) JL_NOTSAFEPOINT +STATIC_INLINE int jl_is_array(void *v) JL_NOTSAFEPOINT { - return (jl_is_datatype(t) && - ((jl_datatype_t*)(t))->name == jl_array_typename); + jl_value_t *t = jl_typeof(v); + return jl_is_array_type(t); } -STATIC_INLINE int jl_is_array(void *v) JL_NOTSAFEPOINT +STATIC_INLINE int jl_is_arrayish(void *v) JL_NOTSAFEPOINT { jl_value_t *t = jl_typeof(v); - return jl_is_array_type(t); + return jl_is_arrayish_type(t); } @@ -1510,6 +1526,7 @@ JL_DLLEXPORT jl_value_t *jl_array_to_string(jl_array_t *a); JL_DLLEXPORT jl_array_t *jl_alloc_vec_any(size_t n); JL_DLLEXPORT jl_value_t *jl_arrayref(jl_array_t *a, size_t i); // 0-indexed JL_DLLEXPORT jl_value_t *jl_ptrarrayref(jl_array_t *a JL_PROPAGATES_ROOT, size_t i) JL_NOTSAFEPOINT; // 0-indexed +JL_DLLEXPORT jl_array_t *jl_array_copy(jl_array_t *ary); JL_DLLEXPORT void jl_arrayset(jl_array_t *a JL_ROOTING_ARGUMENT, jl_value_t *v JL_ROOTED_ARGUMENT JL_MAYBE_UNROOTED, size_t i); // 0-indexed JL_DLLEXPORT void jl_arrayunset(jl_array_t *a, size_t i); // 0-indexed JL_DLLEXPORT int jl_array_isassigned(jl_array_t *a, size_t i); // 0-indexed diff --git a/src/llvm-late-gc-lowering.cpp b/src/llvm-late-gc-lowering.cpp index 3586527668135..30f9ecd67bc3e 100644 --- a/src/llvm-late-gc-lowering.cpp +++ b/src/llvm-late-gc-lowering.cpp @@ -359,6 +359,7 @@ struct LateLowerGCFrame: public FunctionPass, private JuliaPassContext { void RefineLiveSet(BitVector &LS, State &S, const std::vector &CalleeRoots); Value *EmitTagPtr(IRBuilder<> &builder, Type *T, Value *V); Value *EmitLoadTag(IRBuilder<> &builder, Value *V); + Value *EmitStoreTag(IRBuilder<> &builder, Value *V, Value *Typ); }; static unsigned getValueAddrSpace(Value *V) { @@ -2184,6 +2185,16 @@ Value *LateLowerGCFrame::EmitLoadTag(IRBuilder<> &builder, Value *V) return load; } +Value *LateLowerGCFrame::EmitStoreTag(IRBuilder<> &builder, Value *V, Value *Typ) +{ + auto addr = EmitTagPtr(builder, T_size, V); + StoreInst *store = builder.CreateAlignedStore(Typ, addr, Align(sizeof(size_t))); + store->setOrdering(AtomicOrdering::Unordered); + store->setMetadata(LLVMContext::MD_tbaa, tbaa_tag); + return store; +} + + // Enable this optimization only on LLVM 4.0+ since this cause LLVM to optimize // constant store loop to produce a `memset_pattern16` with a global variable // that's initialized by `addrspacecast`. Such a global variable is not supported by the backend. @@ -2370,6 +2381,19 @@ bool LateLowerGCFrame::CleanupIR(Function &F, State *S) { typ->takeName(CI); CI->replaceAllUsesWith(typ); UpdatePtrNumbering(CI, typ, S); + } else if (mutating_arrayfreeze_func && callee == mutating_arrayfreeze_func) { + assert(CI->getNumArgOperands() == 2); + IRBuilder<> builder(CI); + builder.SetCurrentDebugLocation(CI->getDebugLoc()); + auto array = CI->getArgOperand(0); + auto tag = EmitLoadTag(builder, array); + auto mark_bits = builder.CreateAnd(tag, ConstantInt::get(T_size, (uintptr_t)15)); + auto new_typ = builder.CreateAddrSpaceCast(CI->getArgOperand(1), + T_pjlvalue); + auto new_typ_marked = builder.CreateOr(builder.CreatePtrToInt(new_typ, T_size), mark_bits); + EmitStoreTag(builder, array, new_typ_marked); + CI->replaceAllUsesWith(array); + UpdatePtrNumbering(CI, array, S); } else if (write_barrier_func && callee == write_barrier_func) { // The replacement for this requires creating new BasicBlocks // which messes up the loop. Queue all of them to be replaced later. diff --git a/src/llvm-pass-helpers.cpp b/src/llvm-pass-helpers.cpp index 2821f9838a0a7..7ba412081a786 100644 --- a/src/llvm-pass-helpers.cpp +++ b/src/llvm-pass-helpers.cpp @@ -29,7 +29,8 @@ JuliaPassContext::JuliaPassContext() pgcstack_getter(nullptr), gc_flush_func(nullptr), gc_preserve_begin_func(nullptr), gc_preserve_end_func(nullptr), pointer_from_objref_func(nullptr), alloc_obj_func(nullptr), - typeof_func(nullptr), write_barrier_func(nullptr), module(nullptr) + typeof_func(nullptr), mutating_arrayfreeze_func(nullptr), + write_barrier_func(nullptr), module(nullptr) { } @@ -50,6 +51,7 @@ void JuliaPassContext::initFunctions(Module &M) gc_preserve_end_func = M.getFunction("llvm.julia.gc_preserve_end"); pointer_from_objref_func = M.getFunction("julia.pointer_from_objref"); typeof_func = M.getFunction("julia.typeof"); + mutating_arrayfreeze_func = M.getFunction("julia.mutating_arrayfreeze"); write_barrier_func = M.getFunction("julia.write_barrier"); alloc_obj_func = M.getFunction("julia.gc_alloc_obj"); } diff --git a/src/llvm-pass-helpers.h b/src/llvm-pass-helpers.h index f80786d1e7149..9352d01e2fbe9 100644 --- a/src/llvm-pass-helpers.h +++ b/src/llvm-pass-helpers.h @@ -67,6 +67,7 @@ struct JuliaPassContext { llvm::Function *pointer_from_objref_func; llvm::Function *alloc_obj_func; llvm::Function *typeof_func; + llvm::Function *mutating_arrayfreeze_func; llvm::Function *write_barrier_func; // Creates a pass context. Type and function pointers diff --git a/src/rtutils.c b/src/rtutils.c index b4432d8af3d0c..b79970ba47f47 100644 --- a/src/rtutils.c +++ b/src/rtutils.c @@ -1001,8 +1001,8 @@ static size_t jl_static_show_x_(JL_STREAM *out, jl_value_t *v, jl_datatype_t *vt n += jl_printf(out, ")"); } } - else if (jl_array_type && jl_is_array_type(vt)) { - n += jl_printf(out, "Array{"); + else if (jl_array_type && jl_is_arrayish_type(vt)) { + n += jl_printf(out, jl_is_array_type(vt) ? "Array{" : "ImmutableArray{"); n += jl_static_show_x(out, (jl_value_t*)jl_tparam0(vt), depth); n += jl_printf(out, ", ("); size_t i, ndims = jl_array_ndims(v); diff --git a/src/staticdata.c b/src/staticdata.c index 9291d2ba90f09..3787b17b94122 100644 --- a/src/staticdata.c +++ b/src/staticdata.c @@ -26,7 +26,7 @@ extern "C" { // TODO: put WeakRefs on the weak_refs list during deserialization // TODO: handle finalizers -#define NUM_TAGS 152 +#define NUM_TAGS 158 // An array of references that need to be restored from the sysimg // This is a manually constructed dual of the gvars array, which would be produced by codegen for Julia code, for C. @@ -46,6 +46,7 @@ jl_value_t **const*const get_tags(void) { INSERT_TAG(jl_slotnumber_type); INSERT_TAG(jl_simplevector_type); INSERT_TAG(jl_array_type); + INSERT_TAG(jl_immutable_array_type); INSERT_TAG(jl_typedslot_type); INSERT_TAG(jl_expr_type); INSERT_TAG(jl_globalref_type); @@ -131,6 +132,7 @@ jl_value_t **const*const get_tags(void) { INSERT_TAG(jl_pointer_typename); INSERT_TAG(jl_llvmpointer_typename); INSERT_TAG(jl_array_typename); + INSERT_TAG(jl_immutable_array_typename); INSERT_TAG(jl_type_typename); INSERT_TAG(jl_namedtuple_typename); INSERT_TAG(jl_vecelement_typename); @@ -198,6 +200,10 @@ jl_value_t **const*const get_tags(void) { INSERT_TAG(jl_builtin__expr); INSERT_TAG(jl_builtin_ifelse); INSERT_TAG(jl_builtin__typebody); + INSERT_TAG(jl_builtin_arrayfreeze); + INSERT_TAG(jl_builtin_mutating_arrayfreeze); + INSERT_TAG(jl_builtin_arraythaw); + INSERT_TAG(jl_builtin_maybecopy); // All optional tags must be placed at the end, so that we // don't accidentally have a `NULL` in the middle @@ -252,7 +258,9 @@ static const jl_fptr_args_t id_to_fptrs[] = { &jl_f_applicable, &jl_f_invoke, &jl_f_sizeof, &jl_f__expr, &jl_f__typevar, &jl_f_ifelse, &jl_f__structtype, &jl_f__abstracttype, &jl_f__primitivetype, &jl_f__typebody, &jl_f__setsuper, &jl_f__equiv_typedef, &jl_f_opaque_closure_call, - NULL }; + &jl_f_arrayfreeze, &jl_f_mutating_arrayfreeze, &jl_f_arraythaw, &jl_f_maybecopy, + NULL +}; typedef struct { ios_t *s; diff --git a/stdlib/LinearAlgebra/test/adjtrans.jl b/stdlib/LinearAlgebra/test/adjtrans.jl index 7b782d463768d..ae2946a68809a 100644 --- a/stdlib/LinearAlgebra/test/adjtrans.jl +++ b/stdlib/LinearAlgebra/test/adjtrans.jl @@ -241,22 +241,22 @@ end @test convert(Transpose{Float64,Matrix{Float64}}, Transpose(intmat))::Transpose{Float64,Matrix{Float64}} == Transpose(intmat) end -isdefined(Main, :ImmutableArrays) || @eval Main include(joinpath($(BASE_TEST_PATH), "testhelpers", "ImmutableArrays.jl")) -using .Main.ImmutableArrays +isdefined(Main, :SimpleImmutableArrays) || @eval Main include(joinpath($(BASE_TEST_PATH), "testhelpers", "SimpleImmutableArrays.jl")) +using .Main.SimpleImmutableArrays @testset "Adjoint and Transpose convert methods to AbstractArray" begin # tests corresponding to #34995 intvec, intmat = [1, 2], [1 2 3; 4 5 6] - statvec = ImmutableArray(intvec) - statmat = ImmutableArray(intmat) + statvec = SimpleImmutableArray(intvec) + statmat = SimpleImmutableArray(intmat) - @test convert(AbstractArray{Float64}, Adjoint(statvec))::Adjoint{Float64,ImmutableArray{Float64,1,Array{Float64,1}}} == Adjoint(statvec) + @test convert(AbstractArray{Float64}, Adjoint(statvec))::Adjoint{Float64,SimpleImmutableArray{Float64,1,Array{Float64,1}}} == Adjoint(statvec) @test convert(AbstractArray{Float64}, Adjoint(statmat))::Array{Float64,2} == Adjoint(statmat) - @test convert(AbstractArray{Float64}, Transpose(statvec))::Transpose{Float64,ImmutableArray{Float64,1,Array{Float64,1}}} == Transpose(statvec) + @test convert(AbstractArray{Float64}, Transpose(statvec))::Transpose{Float64,SimpleImmutableArray{Float64,1,Array{Float64,1}}} == Transpose(statvec) @test convert(AbstractArray{Float64}, Transpose(statmat))::Array{Float64,2} == Transpose(statmat) - @test convert(AbstractMatrix{Float64}, Adjoint(statvec))::Adjoint{Float64,ImmutableArray{Float64,1,Array{Float64,1}}} == Adjoint(statvec) + @test convert(AbstractMatrix{Float64}, Adjoint(statvec))::Adjoint{Float64,SimpleImmutableArray{Float64,1,Array{Float64,1}}} == Adjoint(statvec) @test convert(AbstractMatrix{Float64}, Adjoint(statmat))::Array{Float64,2} == Adjoint(statmat) - @test convert(AbstractMatrix{Float64}, Transpose(statvec))::Transpose{Float64,ImmutableArray{Float64,1,Array{Float64,1}}} == Transpose(statvec) + @test convert(AbstractMatrix{Float64}, Transpose(statvec))::Transpose{Float64,SimpleImmutableArray{Float64,1,Array{Float64,1}}} == Transpose(statvec) @test convert(AbstractMatrix{Float64}, Transpose(statmat))::Array{Float64,2} == Transpose(statmat) end diff --git a/stdlib/LinearAlgebra/test/bidiag.jl b/stdlib/LinearAlgebra/test/bidiag.jl index 58de45e9e525c..182b1dbe02ede 100644 --- a/stdlib/LinearAlgebra/test/bidiag.jl +++ b/stdlib/LinearAlgebra/test/bidiag.jl @@ -668,20 +668,20 @@ end @test c \ A ≈ c \ Matrix(A) end -isdefined(Main, :ImmutableArrays) || @eval Main include(joinpath($(BASE_TEST_PATH), "testhelpers", "ImmutableArrays.jl")) -using .Main.ImmutableArrays +isdefined(Main, :SimpleImmutableArrays) || @eval Main include(joinpath($(BASE_TEST_PATH), "testhelpers", "SimpleImmutableArrays.jl")) +using .Main.SimpleImmutableArrays @testset "Conversion to AbstractArray" begin # tests corresponding to #34995 - dv = ImmutableArray([1, 2, 3, 4]) - ev = ImmutableArray([7, 8, 9]) + dv = SimpleImmutableArray([1, 2, 3, 4]) + ev = SimpleImmutableArray([7, 8, 9]) Bu = Bidiagonal(dv, ev, :U) Bl = Bidiagonal(dv, ev, :L) - @test convert(AbstractArray{Float64}, Bu)::Bidiagonal{Float64,ImmutableArray{Float64,1,Array{Float64,1}}} == Bu - @test convert(AbstractMatrix{Float64}, Bu)::Bidiagonal{Float64,ImmutableArray{Float64,1,Array{Float64,1}}} == Bu - @test convert(AbstractArray{Float64}, Bl)::Bidiagonal{Float64,ImmutableArray{Float64,1,Array{Float64,1}}} == Bl - @test convert(AbstractMatrix{Float64}, Bl)::Bidiagonal{Float64,ImmutableArray{Float64,1,Array{Float64,1}}} == Bl + @test convert(AbstractArray{Float64}, Bu)::Bidiagonal{Float64,SimpleImmutableArray{Float64,1,Array{Float64,1}}} == Bu + @test convert(AbstractMatrix{Float64}, Bu)::Bidiagonal{Float64,SimpleImmutableArray{Float64,1,Array{Float64,1}}} == Bu + @test convert(AbstractArray{Float64}, Bl)::Bidiagonal{Float64,SimpleImmutableArray{Float64,1,Array{Float64,1}}} == Bl + @test convert(AbstractMatrix{Float64}, Bl)::Bidiagonal{Float64,SimpleImmutableArray{Float64,1,Array{Float64,1}}} == Bl end @testset "block-bidiagonal matrix indexing" begin diff --git a/stdlib/LinearAlgebra/test/diagonal.jl b/stdlib/LinearAlgebra/test/diagonal.jl index 6f4aae5358a39..f2c5c619ab66d 100644 --- a/stdlib/LinearAlgebra/test/diagonal.jl +++ b/stdlib/LinearAlgebra/test/diagonal.jl @@ -910,16 +910,16 @@ end end const BASE_TEST_PATH = joinpath(Sys.BINDIR, "..", "share", "julia", "test") -isdefined(Main, :ImmutableArrays) || @eval Main include(joinpath($(BASE_TEST_PATH), "testhelpers", "ImmutableArrays.jl")) -using .Main.ImmutableArrays +isdefined(Main, :SimpleImmutableArrays) || @eval Main include(joinpath($(BASE_TEST_PATH), "testhelpers", "SimpleImmutableArrays.jl")) +using .Main.SimpleImmutableArrays @testset "Conversion to AbstractArray" begin # tests corresponding to #34995 - d = ImmutableArray([1, 2, 3, 4]) + d = SimpleImmutableArray([1, 2, 3, 4]) D = Diagonal(d) - @test convert(AbstractArray{Float64}, D)::Diagonal{Float64,ImmutableArray{Float64,1,Array{Float64,1}}} == D - @test convert(AbstractMatrix{Float64}, D)::Diagonal{Float64,ImmutableArray{Float64,1,Array{Float64,1}}} == D + @test convert(AbstractArray{Float64}, D)::Diagonal{Float64,SimpleImmutableArray{Float64,1,Array{Float64,1}}} == D + @test convert(AbstractMatrix{Float64}, D)::Diagonal{Float64,SimpleImmutableArray{Float64,1,Array{Float64,1}}} == D end @testset "divisions functionality" for elty in (Int, Float64, ComplexF64) diff --git a/stdlib/LinearAlgebra/test/hessenberg.jl b/stdlib/LinearAlgebra/test/hessenberg.jl index 9b623273666c2..1c93359bad6bb 100644 --- a/stdlib/LinearAlgebra/test/hessenberg.jl +++ b/stdlib/LinearAlgebra/test/hessenberg.jl @@ -194,16 +194,16 @@ end end end -isdefined(Main, :ImmutableArrays) || @eval Main include(joinpath($(BASE_TEST_PATH), "testhelpers", "ImmutableArrays.jl")) -using .Main.ImmutableArrays +isdefined(Main, :SimpleImmutableArrays) || @eval Main include(joinpath($(BASE_TEST_PATH), "testhelpers", "SimpleImmutableArrays.jl")) +using .Main.SimpleImmutableArrays @testset "Conversion to AbstractArray" begin # tests corresponding to #34995 - A = ImmutableArray([1 2 3; 4 5 6; 7 8 9]) + A = SimpleImmutableArray([1 2 3; 4 5 6; 7 8 9]) H = UpperHessenberg(A) - @test convert(AbstractArray{Float64}, H)::UpperHessenberg{Float64,ImmutableArray{Float64,2,Array{Float64,2}}} == H - @test convert(AbstractMatrix{Float64}, H)::UpperHessenberg{Float64,ImmutableArray{Float64,2,Array{Float64,2}}} == H + @test convert(AbstractArray{Float64}, H)::UpperHessenberg{Float64,SimpleImmutableArray{Float64,2,Array{Float64,2}}} == H + @test convert(AbstractMatrix{Float64}, H)::UpperHessenberg{Float64,SimpleImmutableArray{Float64,2,Array{Float64,2}}} == H end end # module TestHessenberg diff --git a/stdlib/LinearAlgebra/test/symmetric.jl b/stdlib/LinearAlgebra/test/symmetric.jl index 47a36df5e7883..55af4fe456ff2 100644 --- a/stdlib/LinearAlgebra/test/symmetric.jl +++ b/stdlib/LinearAlgebra/test/symmetric.jl @@ -544,19 +544,19 @@ end end const BASE_TEST_PATH = joinpath(Sys.BINDIR, "..", "share", "julia", "test") -isdefined(Main, :ImmutableArrays) || @eval Main include(joinpath($(BASE_TEST_PATH), "testhelpers", "ImmutableArrays.jl")) -using .Main.ImmutableArrays +isdefined(Main, :SimpleImmutableArrays) || @eval Main include(joinpath($(BASE_TEST_PATH), "testhelpers", "SimpleImmutableArrays.jl")) +using .Main.SimpleImmutableArrays @testset "Conversion to AbstractArray" begin # tests corresponding to #34995 - immutablemat = ImmutableArray([1 2 3; 4 5 6; 7 8 9]) + immutablemat = SimpleImmutableArray([1 2 3; 4 5 6; 7 8 9]) for SymType in (Symmetric, Hermitian) S = Float64 symmat = SymType(immutablemat) - @test convert(AbstractArray{S}, symmat).data isa ImmutableArray{S} - @test convert(AbstractMatrix{S}, symmat).data isa ImmutableArray{S} - @test AbstractArray{S}(symmat).data isa ImmutableArray{S} - @test AbstractMatrix{S}(symmat).data isa ImmutableArray{S} + @test convert(AbstractArray{S}, symmat).data isa SimpleImmutableArray{S} + @test convert(AbstractMatrix{S}, symmat).data isa SimpleImmutableArray{S} + @test AbstractArray{S}(symmat).data isa SimpleImmutableArray{S} + @test AbstractMatrix{S}(symmat).data isa SimpleImmutableArray{S} @test convert(AbstractArray{S}, symmat) == symmat @test convert(AbstractMatrix{S}, symmat) == symmat end diff --git a/stdlib/LinearAlgebra/test/triangular.jl b/stdlib/LinearAlgebra/test/triangular.jl index b53100c6fc654..72959910dab1a 100644 --- a/stdlib/LinearAlgebra/test/triangular.jl +++ b/stdlib/LinearAlgebra/test/triangular.jl @@ -689,20 +689,20 @@ let A = UpperTriangular([Furlong(1) Furlong(4); Furlong(0) Furlong(1)]) @test sqrt(A) == Furlong{1//2}.(UpperTriangular([1 2; 0 1])) end -isdefined(Main, :ImmutableArrays) || @eval Main include(joinpath($(BASE_TEST_PATH), "testhelpers", "ImmutableArrays.jl")) -using .Main.ImmutableArrays +isdefined(Main, :SimpleImmutableArrays) || @eval Main include(joinpath($(BASE_TEST_PATH), "testhelpers", "SimpleImmutableArrays.jl")) +using .Main.SimpleImmutableArrays @testset "AbstractArray constructor should preserve underlying storage type" begin # tests corresponding to #34995 local m = 4 local T, S = Float32, Float64 - immutablemat = ImmutableArray(randn(T,m,m)) + immutablemat = SimpleImmutableArray(randn(T,m,m)) for TriType in (UpperTriangular, LowerTriangular, UnitUpperTriangular, UnitLowerTriangular) trimat = TriType(immutablemat) - @test convert(AbstractArray{S}, trimat).data isa ImmutableArray{S} - @test convert(AbstractMatrix{S}, trimat).data isa ImmutableArray{S} - @test AbstractArray{S}(trimat).data isa ImmutableArray{S} - @test AbstractMatrix{S}(trimat).data isa ImmutableArray{S} + @test convert(AbstractArray{S}, trimat).data isa SimpleImmutableArray{S} + @test convert(AbstractMatrix{S}, trimat).data isa SimpleImmutableArray{S} + @test AbstractArray{S}(trimat).data isa SimpleImmutableArray{S} + @test AbstractMatrix{S}(trimat).data isa SimpleImmutableArray{S} @test convert(AbstractArray{S}, trimat) == trimat @test convert(AbstractMatrix{S}, trimat) == trimat end diff --git a/stdlib/LinearAlgebra/test/tridiag.jl b/stdlib/LinearAlgebra/test/tridiag.jl index ecdf6b416baa5..fa6ce93fc296c 100644 --- a/stdlib/LinearAlgebra/test/tridiag.jl +++ b/stdlib/LinearAlgebra/test/tridiag.jl @@ -667,21 +667,21 @@ end @test ishermitian(S) end -isdefined(Main, :ImmutableArrays) || @eval Main include(joinpath($(BASE_TEST_PATH), "testhelpers", "ImmutableArrays.jl")) -using .Main.ImmutableArrays +isdefined(Main, :SimpleImmutableArrays) || @eval Main include(joinpath($(BASE_TEST_PATH), "testhelpers", "SimpleImmutableArrays.jl")) +using .Main.SimpleImmutableArrays @testset "Conversion to AbstractArray" begin # tests corresponding to #34995 - v1 = ImmutableArray([1, 2]) - v2 = ImmutableArray([3, 4, 5]) - v3 = ImmutableArray([6, 7]) + v1 = SimpleImmutableArray([1, 2]) + v2 = SimpleImmutableArray([3, 4, 5]) + v3 = SimpleImmutableArray([6, 7]) T = Tridiagonal(v1, v2, v3) Tsym = SymTridiagonal(v2, v1) - @test convert(AbstractArray{Float64}, T)::Tridiagonal{Float64,ImmutableArray{Float64,1,Array{Float64,1}}} == T - @test convert(AbstractMatrix{Float64}, T)::Tridiagonal{Float64,ImmutableArray{Float64,1,Array{Float64,1}}} == T - @test convert(AbstractArray{Float64}, Tsym)::SymTridiagonal{Float64,ImmutableArray{Float64,1,Array{Float64,1}}} == Tsym - @test convert(AbstractMatrix{Float64}, Tsym)::SymTridiagonal{Float64,ImmutableArray{Float64,1,Array{Float64,1}}} == Tsym + @test convert(AbstractArray{Float64}, T)::Tridiagonal{Float64,SimpleImmutableArray{Float64,1,Array{Float64,1}}} == T + @test convert(AbstractMatrix{Float64}, T)::Tridiagonal{Float64,SimpleImmutableArray{Float64,1,Array{Float64,1}}} == T + @test convert(AbstractArray{Float64}, Tsym)::SymTridiagonal{Float64,SimpleImmutableArray{Float64,1,Array{Float64,1}}} == Tsym + @test convert(AbstractMatrix{Float64}, Tsym)::SymTridiagonal{Float64,SimpleImmutableArray{Float64,1,Array{Float64,1}}} == Tsym end @testset "dot(x,A,y) for A::Tridiagonal or SymTridiagonal" begin diff --git a/test/choosetests.jl b/test/choosetests.jl index e00aedffdd42e..66d87232d02d0 100644 --- a/test/choosetests.jl +++ b/test/choosetests.jl @@ -23,7 +23,7 @@ const TESTNAMES = [ "errorshow", "sets", "goto", "llvmcall", "llvmcall2", "ryu", "some", "meta", "stacktraces", "docs", "misc", "threads", "stress", "binaryplatforms", "atexit", - "enums", "cmdlineargs", "int", "interpreter", + "enums", "cmdlineargs", "immutablearray", "int", "interpreter", "checked", "bitset", "floatfuncs", "precompile", "boundscheck", "error", "ambiguous", "cartesian", "osutils", "channels", "iostream", "secretbuffer", "specificity", @@ -142,7 +142,7 @@ function choosetests(choices = []) filtertests!(tests, "subarray") filtertests!(tests, "compiler", ["compiler/inference", "compiler/validation", "compiler/ssair", "compiler/irpasses", "compiler/codegen", - "compiler/inline", "compiler/contextual"]) + "compiler/inline", "compiler/contextual", "compiler/immutablearray"]) filtertests!(tests, "stdlib", STDLIBS) # do ambiguous first to avoid failing if ambiguities are introduced by other tests filtertests!(tests, "ambiguous") diff --git a/test/compiler/immutablearray.jl b/test/compiler/immutablearray.jl new file mode 100644 index 0000000000000..9beb24df1dc41 --- /dev/null +++ b/test/compiler/immutablearray.jl @@ -0,0 +1,422 @@ +using Test +import Core: arrayfreeze, mutating_arrayfreeze, arraythaw +import Core.Compiler: arrayfreeze_tfunc, mutating_arrayfreeze_tfunc, arraythaw_tfunc + +@testset "ImmutableArray tfuncs" begin + @test arrayfreeze_tfunc(Vector{Int}) === ImmutableVector{Int} + @test arrayfreeze_tfunc(Vector) === ImmutableVector + @test arrayfreeze_tfunc(Array) === ImmutableArray + @test arrayfreeze_tfunc(Any) === ImmutableArray + @test arrayfreeze_tfunc(ImmutableVector{Int}) === Union{} + @test arrayfreeze_tfunc(ImmutableVector) === Union{} + @test arrayfreeze_tfunc(ImmutableArray) === Union{} + @test mutating_arrayfreeze_tfunc(Vector{Int}) === ImmutableVector{Int} + @test mutating_arrayfreeze_tfunc(Vector) === ImmutableVector + @test mutating_arrayfreeze_tfunc(Array) === ImmutableArray + @test mutating_arrayfreeze_tfunc(Any) === ImmutableArray + @test mutating_arrayfreeze_tfunc(ImmutableVector{Int}) === Union{} + @test mutating_arrayfreeze_tfunc(ImmutableVector) === Union{} + @test mutating_arrayfreeze_tfunc(ImmutableArray) === Union{} + @test arraythaw_tfunc(ImmutableVector{Int}) === Vector{Int} + @test arraythaw_tfunc(ImmutableVector) === Vector + @test arraythaw_tfunc(ImmutableArray) === Array + @test arraythaw_tfunc(Any) === Array + @test arraythaw_tfunc(Vector{Int}) === Union{} + @test arraythaw_tfunc(Vector) === Union{} + @test arraythaw_tfunc(Array) === Union{} +end + +# mutating_arrayfreeze optimization +# ================================= + +import Core.Compiler: argextype, singleton_type +const EMPTY_SPTYPES = Any[] + +code_typed1(args...; kwargs...) = first(only(code_typed(args...; kwargs...)))::Core.CodeInfo + +# check if `x` is a statement with a given `head` +isnew(@nospecialize x) = Meta.isexpr(x, :new) + +# check if `x` is a dynamic call of a given function +iscall(y) = @nospecialize(x) -> iscall(y, x) +function iscall((src, f)::Tuple{Core.CodeInfo,Base.Callable}, @nospecialize(x)) + return iscall(x) do @nospecialize x + singleton_type(argextype(x, src, EMPTY_SPTYPES)) === f + end +end +iscall(pred::Base.Callable, @nospecialize(x)) = Meta.isexpr(x, :call) && pred(x.args[1]) + +# check if `x` is a statically-resolved call of a function whose name is `sym` +isinvoke(y) = @nospecialize(x) -> isinvoke(y, x) +isinvoke(sym::Symbol, @nospecialize(x)) = isinvoke(mi->mi.def.name===sym, x) +isinvoke(pred::Function, @nospecialize(x)) = Meta.isexpr(x, :invoke) && pred(x.args[1]::Core.MethodInstance) + +function is_array_alloc(@nospecialize x) + Meta.isexpr(x, :foreigncall) || return false + args = x.args + name = args[1] + isa(name, QuoteNode) && (name = name.value) + isa(name, Symbol) || return false + return Core.Compiler.alloc_array_ndims(name) !== nothing +end + +# optimizable examples +# -------------------- + +let # simplest -- vector + function optimizable(gen) + a = [1,2,3,4,5] + return gen(a) + end + let src = code_typed1(optimizable, (Type{ImmutableArray},)) + @test count(is_array_alloc, src.code) == 1 + @test count(iscall((src, mutating_arrayfreeze)), src.code) == 1 + @test count(iscall((src, arrayfreeze)), src.code) == 0 + optimizable(identity) + allocated = @allocated optimizable(identity) + optimizable(ImmutableArray) + @test allocated == @allocated optimizable(ImmutableArray) + end +end + +let # handle matrix etc. (actually this example also requires inter-procedural escape handling) + function optimizable(gen) + a = [1 2 3; 4 5 6] + b = [1 2 3 4 5 6] + return gen(a), gen(b) + end + let src = code_typed1(optimizable, (Type{ImmutableArray},)) + # @test count(is_array_alloc, src.code) == 1 + @test count(iscall((src, mutating_arrayfreeze)), src.code) == 2 + @test count(iscall((src, arrayfreeze)), src.code) == 0 + optimizable(identity) + allocated = @allocated optimizable(identity) + optimizable(ImmutableArray) + @test allocated == @allocated optimizable(ImmutableArray) + end +end + +let # multiple returns don't matter + function optimizable(gen) + a = [1,2,3,4,5] + return gen(a), gen(a) + end + let src = code_typed1(optimizable, (Type{ImmutableArray},)) + @test count(is_array_alloc, src.code) == 1 + @test count(iscall((src, mutating_arrayfreeze)), src.code) == 2 + @test count(iscall((src, arrayfreeze)), src.code) == 0 + optimizable(identity) + allocated = @allocated optimizable(identity) + optimizable(ImmutableArray) + @test allocated == @allocated optimizable(ImmutableArray) + end +end + +let # arrayset + function optimizable1(gen) + a = Vector{Int}(undef, 5) + for i = 1:5 + a[i] = i + end + return gen(a) + end + let src = code_typed1(optimizable1, (Type{ImmutableArray},)) + @test count(is_array_alloc, src.code) == 1 + @test count(iscall((src, mutating_arrayfreeze)), src.code) == 1 + @test count(iscall((src, arrayfreeze)), src.code) == 0 + optimizable1(identity) + allocated = @allocated optimizable1(identity) + optimizable1(ImmutableArray) + @test allocated == @allocated optimizable1(ImmutableArray) + end + + function unoptimizable(gen) + a = Matrix{Float64}(undef, 5, 2) + for i = 1:5 + for j = 1:2 + a[i, j] = i + j + end + end + return gen(a) + end + let src = code_typed1(unoptimizable, (Type{ImmutableArray},)) + @test count(is_array_alloc, src.code) == 1 + @test count(iscall((src, mutating_arrayfreeze)), src.code) == 1 + @test count(iscall((src, arrayfreeze)), src.code) == 0 + unoptimizable(identity) + allocated = @allocated unoptimizable(identity) + unoptimizable(ImmutableArray) + @test allocated == @allocated unoptimizable(ImmutableArray) + end +end + +let # arrayref + function optimizable(gen) + a = [1,2,3] + b = getindex(a, 2) + return gen(a) + end + let src = code_typed1(optimizable, (Type{ImmutableArray},)) + @test count(is_array_alloc, src.code) == 1 + @test count(iscall((src, mutating_arrayfreeze)), src.code) == 1 + @test count(iscall((src, arrayfreeze)), src.code) == 0 + optimizable(identity) + allocated = @allocated optimizable(identity) + optimizable(ImmutableArray) + @test allocated == @allocated optimizable(ImmutableArray) + end +end + +let # array resize + function optimizable(gen, n) + a = Int[] + for i = 1:n + push!(a, i) + end + return gen(a) + end + let src = code_typed1(optimizable, (Type{ImmutableArray},Int,)) + @test count(is_array_alloc, src.code) == 1 + @test count(iscall((src, mutating_arrayfreeze)), src.code) == 1 + @test count(iscall((src, arrayfreeze)), src.code) == 0 + optimizable(identity, 42) + allocated = @allocated optimizable(identity, 42) + optimizable(ImmutableArray, 42) + @test allocated == @allocated optimizable(ImmutableArray, 42) + end +end + +@noinline function same′(a) + return reverse(reverse(a)) +end +let # inter-procedural + function optimizable(gen) + a = ones(5) + a = same′(a) + return gen(a) + end + let src = code_typed1(optimizable, (Type{ImmutableArray},)) + @test count(is_array_alloc, src.code) == 1 + @test count(isinvoke(:same′), src.code) == 1 + @test count(iscall((src, mutating_arrayfreeze)), src.code) == 1 + @test count(iscall((src, arrayfreeze)), src.code) == 0 + optimizable(identity) + allocated = @allocated optimizable(identity) + optimizable(ImmutableArray) + @test allocated == @allocated optimizable(ImmutableArray) + end +end + +let # ignore ThrownEscape if it never happens when `arrayfreeze` is called + function optimizable(gen, n) + a = Int[] + for i = 1:n + push!(a, i) + end + n > 100 && throw(a) + return gen(a) + end + let src = code_typed1(optimizable, (Type{ImmutableArray},Int,)) + @test count(is_array_alloc, src.code) == 1 + @test count(iscall((src, mutating_arrayfreeze)), src.code) == 1 + @test count(iscall((src, arrayfreeze)), src.code) == 0 + optimizable(identity, 42) + allocated = @allocated optimizable(identity, 42) + optimizable(ImmutableArray, 42) + @test allocated == @allocated optimizable(ImmutableArray, 42) + end +end +@noinline function ipo_getindex′(a, n) + ele = getindex(a, n) + return ele +end +let # ignore ThrownEscape if it never happens when `arrayfreeze` is called (interprocedural) + function optimizable(gen) + a = [1,2,3] + b = ipo_getindex′(a, 2) + return gen(a) + end + let src = code_typed1(optimizable, (Type{ImmutableArray},)) + @test count(is_array_alloc, src.code) == 1 + @test count(isinvoke(:ipo_getindex′), src.code) == 1 + @test count(iscall((src, mutating_arrayfreeze)), src.code) == 1 + @test count(iscall((src, arrayfreeze)), src.code) == 0 + optimizable(identity) + allocated = @allocated optimizable(identity) + optimizable(ImmutableArray) + @test allocated == @allocated optimizable(ImmutableArray) + end +end + +let # nested case + function optimizable(gen, n) + a = [collect(1:m) for m in 1:n] + for i = 1:n + a[i][1] = i + end + return gen(a) + end + let src = code_typed1(optimizable, (Type{ImmutableArray},Int)) + @test count(iscall((src, mutating_arrayfreeze)), src.code) == 1 + @test count(iscall((src, arrayfreeze)), src.code) == 0 + optimizable(identity, 100) + allocated = @allocated optimizable(identity, 100) + optimizable(ImmutableArray, 100) + @test allocated == @allocated optimizable(ImmutableArray, 100) + end +end + +# demonstrate alias analysis +broadcast_identity(a) = broadcast(identity, a) +function optimizable_aa(gen, n) # can't be a closure somehow + return collect(1:n) |> + Ref |> Ref |> Ref |> + broadcast_identity |> broadcast_identity |> broadcast_identity |> + gen +end +let src = code_typed1(optimizable_aa, (Type{ImmutableArray},Int)) + @test count(is_array_alloc, src.code) == 1 + @test count(iscall((src, mutating_arrayfreeze)), src.code) == 1 + @test count(iscall((src, arrayfreeze)), src.code) == 0 + optimizable_aa(identity, 100) + allocated = @allocated optimizable_aa(identity, 100) + optimizable_aa(ImmutableArray, 100) + @test allocated == @allocated optimizable_aa(ImmutableArray, 100) +end + +let # should be possible if we change BoundsError semantics (so that it doesn't capture the indexed array) + function optimizable(gen) + a = [1,2,3] + try + getindex(a, 4) + catch + end + return gen(a) + end + let src = code_typed1(optimizable, (Type{ImmutableArray},)) + @test count(is_array_alloc, src.code) == 1 + @test_broken count(iscall((src, mutating_arrayfreeze)), src.code) == 1 + @test_broken count(iscall((src, arrayfreeze)), src.code) == 0 + optimizable(identity) + allocated = @allocated optimizable(identity) + optimizable(ImmutableArray) + @test_broken allocated == @allocated optimizable(ImmutableArray) + end +end + +# unoptimizable examples +# ---------------------- + +const Rx = Ref{Any}() # global memory + +let # return escape + function unoptimizable(gen) + a = [1,2,3,4,5] + return a, gen(a) + end + let src = code_typed1(unoptimizable, (Type{ImmutableArray},)) + @test count(is_array_alloc, src.code) == 1 + @test count(iscall((src, mutating_arrayfreeze)), src.code) == 0 + @test count(iscall((src, arrayfreeze)), src.code) == 1 + unoptimizable(ImmutableArray) + a, b = unoptimizable(ImmutableArray) + @test a !== b + @test !(a isa ImmutableArray) + end +end + +let # arg escape + unoptimizable(a, gen) = gen(a) + let src = code_typed1(unoptimizable, (Vector{Int}, Type{ImmutableArray},)) + @test count(iscall((src, mutating_arrayfreeze)), src.code) == 0 + @test count(iscall((src, arrayfreeze)), src.code) == 1 + a = [1,2,3] + unoptimizable(a, ImmutableArray) + b = unoptimizable(a, ImmutableArray) + @test a !== b + @test !(a isa ImmutableArray) + @test b isa ImmutableArray + end +end + +let # global escape + function unoptimizable(gen) + a = [1,2,3,4,5] + global global_array = a + return gen(a) + end + let src = code_typed1(unoptimizable, (Type{ImmutableArray},)) + @test count(is_array_alloc, src.code) == 1 + @test count(iscall((src, mutating_arrayfreeze)), src.code) == 0 + @test count(iscall((src, arrayfreeze)), src.code) == 1 + unoptimizable(identity) + unoptimizable(ImmutableArray) + a = unoptimizable(ImmutableArray) + @test global_array !== a + @test !(global_array isa ImmutableArray) + end +end + +let # global escape + function unoptimizable(gen) + a = [1,2,3,4,5] + Rx[] = a + return gen(a) + end + let src = code_typed1(unoptimizable, (Type{ImmutableArray},)) + @test count(is_array_alloc, src.code) == 1 + @test count(iscall((src, mutating_arrayfreeze)), src.code) == 0 + @test count(iscall((src, arrayfreeze)), src.code) == 1 + unoptimizable(identity) + unoptimizable(ImmutableArray) + a = unoptimizable(ImmutableArray) + @test Rx[] !== a + @test !(Rx[] isa ImmutableArray) + end +end + +let # escapes via exception + function unoptimizable(gen) + a = [1,2,3,4,5] + try + throw(a) + catch err + global global_array = err + end + return gen(a) + end + let src = code_typed1(unoptimizable, (Type{ImmutableArray},)) + @test count(is_array_alloc, src.code) == 1 + @test count(iscall((src, mutating_arrayfreeze)), src.code) == 0 + @test count(iscall((src, arrayfreeze)), src.code) == 1 + unoptimizable(identity) + allocated = @allocated unoptimizable(identity) + unoptimizable(ImmutableArray) + local a + @test allocated < @allocated a = unoptimizable(ImmutableArray) + @test global_array !== a + @test !(global_array isa ImmutableArray) + end +end + +const g = Ref{Any}() +let # escapes via BoundsError + function unoptimizable(gen) + a = [1,2,3] + try + getindex(a, 4) + catch e + g[] = e.a + end + return gen(a) + end + let src = code_typed1(unoptimizable, (Type{ImmutableArray},)) + @test count(is_array_alloc, src.code) == 1 + @test count(iscall((src, arrayfreeze)), src.code) == 1 + @test count(iscall((src, mutating_arrayfreeze)), src.code) == 0 + unoptimizable(identity) + unoptimizable(ImmutableArray) + ia = unoptimizable(ImmutableArray) + @test g[] !== ia + end +end \ No newline at end of file diff --git a/test/compiler/inference.jl b/test/compiler/inference.jl index 9549f4ab5ff1d..174f317488089 100644 --- a/test/compiler/inference.jl +++ b/test/compiler/inference.jl @@ -1542,9 +1542,17 @@ import Core.Compiler: Const, arrayref_tfunc, arrayset_tfunc, arraysize_tfunc @test arrayref_tfunc(Const(true), Vector{Int}, Int, Vararg{Int}) === Int @test arrayref_tfunc(Const(true), Vector{Int}, Vararg{Int}) === Int @test arrayref_tfunc(Const(true), Vector{Int}) === Union{} +@test arrayref_tfunc(Const(true), Core.ImmutableArray{Int,1}, Int) === Int +@test arrayref_tfunc(Const(true), Core.ImmutableArray{<:Integer,1}, Int) === Integer +@test arrayref_tfunc(Const(true), Core.ImmutableArray, Int) === Any +@test arrayref_tfunc(Const(true), Core.ImmutableArray{Int,1}, Int, Vararg{Int}) === Int +@test arrayref_tfunc(Const(true), Core.ImmutableArray{Int,1}, Vararg{Int}) === Int +@test arrayref_tfunc(Const(true), Core.ImmutableArray{Int,1}) === Union{} @test arrayref_tfunc(Const(true), String, Int) === Union{} @test arrayref_tfunc(Const(true), Vector{Int}, Float64) === Union{} @test arrayref_tfunc(Int, Vector{Int}, Int) === Union{} +@test arrayref_tfunc(Const(true), Core.ImmutableArray{Int,1}, Float64) === Union{} +@test arrayref_tfunc(Int, Core.ImmutableArray{Int,1}, Int) === Union{} @test arrayset_tfunc(Const(true), Vector{Int}, Int, Int) === Vector{Int} let ua = Vector{<:Integer} @test arrayset_tfunc(Const(true), ua, Int, Int) === ua @@ -1553,13 +1561,22 @@ end @test arrayset_tfunc(Const(true), Any, Int, Int) === Any @test arrayset_tfunc(Const(true), Vector{String}, String, Int, Vararg{Int}) === Vector{String} @test arrayset_tfunc(Const(true), Vector{String}, String, Vararg{Int}) === Vector{String} +@test arrayset_tfunc(Const(true), Core.ImmutableArray{Int,1}, Int, Int) === Union{} +let ua = Core.ImmutableArray{<:Integer,1} + @test arrayset_tfunc(Const(true), ua, Int, Int) === Union{} +end +@test arrayset_tfunc(Const(true), Core.ImmutableArray, Int, Int) === Union{} +@test arrayset_tfunc(Const(true), Core.ImmutableArray{String,1}, String, Int, Vararg{Int}) === Union{} +@test arrayset_tfunc(Const(true), Core.ImmutableArray{String,1}, String, Vararg{Int}) === Union{} @test arrayset_tfunc(Const(true), Vector{String}, String) === Union{} @test arrayset_tfunc(Const(true), String, Char, Int) === Union{} @test arrayset_tfunc(Const(true), Vector{Int}, Int, Float64) === Union{} @test arrayset_tfunc(Int, Vector{Int}, Int, Int) === Union{} @test arrayset_tfunc(Const(true), Vector{Int}, Float64, Int) === Union{} @test arraysize_tfunc(Vector, Int) === Int +@test arraysize_tfunc(Core.ImmutableArray, Int) === Int @test arraysize_tfunc(Vector, Float64) === Union{} +@test arraysize_tfunc(Core.ImmutableArray, Float64) === Union{} @test arraysize_tfunc(String, Int) === Union{} function f23024(::Type{T}, ::Int) where T diff --git a/test/core.jl b/test/core.jl index 391a13e3784f2..ffa6062b9ef26 100644 --- a/test/core.jl +++ b/test/core.jl @@ -2641,7 +2641,8 @@ end # pull request #9534 @test_throws BoundsError((1, 2), 3) begin; a, b, c = 1, 2; end let a = [] - @test try; a[]; catch ex; (ex::BoundsError).a === a && ex.i == (); end + # no longer passes because BoundsError now copies arrays + # @test try; a[]; catch ex; (ex::BoundsError).a === a && ex.i == (); end @test_throws BoundsError(a, (1, 2)) a[1, 2] @test_throws BoundsError(a, (10,)) a[10] end diff --git a/test/immutablearray.jl b/test/immutablearray.jl new file mode 100644 index 0000000000000..dcff145b263a5 --- /dev/null +++ b/test/immutablearray.jl @@ -0,0 +1,182 @@ +using Test +import Core: arrayfreeze, mutating_arrayfreeze, arraythaw + +@testset "basic ImmutableArray functionality" begin + eltypes = (Float16, Float32, Float64, Int8, UInt8, Int16, UInt16, Int32, UInt32, Int64, UInt64, Int128, UInt128) + for t in eltypes + a = rand(t, rand(1:100), rand(1:10)) + b = ImmutableArray(a) + @test a == b + @test a !== b + @test length(a) == length(b) + for i in 1:length(a) + getindex(a, i) == getindex(b, i) + end + @test size(a) == size(b) + if !(t in (Float16, Float32, Float64)) + @test sum(a) == sum(b) + end + @test reverse(a) == reverse(b) + @test ndims(a) == ndims(b) + for d in 1:ndims(a) + @test axes(a, d) == axes(b, d) + end + @test strides(a) == strides(b) + @test keys(a) == keys(b) + @test IndexStyle(a) == IndexStyle(b) + @test eachindex(a) == eachindex(b) + @test isempty(a) == isempty(b) + # Check that broadcast precedence is working correctly + @test typeof(a .+ b) <: ImmutableArray + end + +end + +@testset "ImmutableArray builtins" begin + a = [1,2,3] + b = ImmutableArray(a) + # errors + @test_throws ArgumentError arrayfreeze() + @test_throws ArgumentError arrayfreeze([1,2,3], nothing) + @test_throws TypeError arrayfreeze(b) + @test_throws TypeError arrayfreeze("not an array") + @test_throws ArgumentError mutating_arrayfreeze() + @test_throws ArgumentError mutating_arrayfreeze([1,2,3], nothing) + @test_throws TypeError mutating_arrayfreeze(b) + @test_throws TypeError mutating_arrayfreeze("not an array") + @test_throws ArgumentError arraythaw() + @test_throws ArgumentError arraythaw([1,2,3], nothing) + @test_throws TypeError arraythaw(a) + @test_throws TypeError arraythaw("not an array") + + @test arrayfreeze(a) === b + @test arraythaw(b) !== a # arraythaw copies so not === + @test arraythaw(arrayfreeze(a)) == a + @test arraythaw(arrayfreeze(a)) !== a + @test arrayfreeze(arraythaw(b)) === b + @test arraythaw(arrayfreeze(arraythaw(b))) == b + @test arraythaw(arrayfreeze(arraythaw(b))) !== b + + mutating_arrayfreeze(a) # last because this mutates a + @test isa(a, ImmutableArray) + @test a === b + @test arraythaw(a) !== a + @test !isa(arraythaw(a), ImmutableArray) +end + +A = ImmutableArray(rand(5,4,3)) +@testset "Bounds checking" begin + @test checkbounds(Bool, A, 1, 1, 1) == true + @test checkbounds(Bool, A, 5, 4, 3) == true + @test checkbounds(Bool, A, 0, 1, 1) == false + @test checkbounds(Bool, A, 1, 0, 1) == false + @test checkbounds(Bool, A, 1, 1, 0) == false + @test checkbounds(Bool, A, 6, 4, 3) == false + @test checkbounds(Bool, A, 5, 5, 3) == false + @test checkbounds(Bool, A, 5, 4, 4) == false + @test checkbounds(Bool, A, 1) == true # linear indexing + @test checkbounds(Bool, A, 60) == true + @test checkbounds(Bool, A, 61) == false + @test checkbounds(Bool, A, 2, 2, 2, 1) == true # extra indices + @test checkbounds(Bool, A, 2, 2, 2, 2) == false + @test checkbounds(Bool, A, 1, 1) == false + @test checkbounds(Bool, A, 1, 12) == false + @test checkbounds(Bool, A, 5, 12) == false + @test checkbounds(Bool, A, 1, 13) == false + @test checkbounds(Bool, A, 6, 12) == false +end + +@testset "single CartesianIndex" begin + @test checkbounds(Bool, A, CartesianIndex((1, 1, 1))) == true + @test checkbounds(Bool, A, CartesianIndex((5, 4, 3))) == true + @test checkbounds(Bool, A, CartesianIndex((0, 1, 1))) == false + @test checkbounds(Bool, A, CartesianIndex((1, 0, 1))) == false + @test checkbounds(Bool, A, CartesianIndex((1, 1, 0))) == false + @test checkbounds(Bool, A, CartesianIndex((6, 4, 3))) == false + @test checkbounds(Bool, A, CartesianIndex((5, 5, 3))) == false + @test checkbounds(Bool, A, CartesianIndex((5, 4, 4))) == false + @test checkbounds(Bool, A, CartesianIndex((1,))) == false + @test checkbounds(Bool, A, CartesianIndex((60,))) == false + @test checkbounds(Bool, A, CartesianIndex((61,))) == false + @test checkbounds(Bool, A, CartesianIndex((2, 2, 2, 1,))) == true + @test checkbounds(Bool, A, CartesianIndex((2, 2, 2, 2,))) == false + @test checkbounds(Bool, A, CartesianIndex((1, 1,))) == false + @test checkbounds(Bool, A, CartesianIndex((1, 12,))) == false + @test checkbounds(Bool, A, CartesianIndex((5, 12,))) == false + @test checkbounds(Bool, A, CartesianIndex((1, 13,))) == false + @test checkbounds(Bool, A, CartesianIndex((6, 12,))) == false +end + +@testset "mix of CartesianIndex and Int" begin + @test checkbounds(Bool, A, CartesianIndex((1,)), 1, CartesianIndex((1,))) == true + @test checkbounds(Bool, A, CartesianIndex((5, 4)), 3) == true + @test checkbounds(Bool, A, CartesianIndex((0, 1)), 1) == false + @test checkbounds(Bool, A, 1, CartesianIndex((0, 1))) == false + @test checkbounds(Bool, A, 1, 1, CartesianIndex((0,))) == false + @test checkbounds(Bool, A, 6, CartesianIndex((4, 3))) == false + @test checkbounds(Bool, A, 5, CartesianIndex((5,)), 3) == false + @test checkbounds(Bool, A, CartesianIndex((5,)), CartesianIndex((4,)), CartesianIndex((4,))) == false +end + +@testset "vector indices" begin + @test checkbounds(Bool, A, 1:5, 1:4, 1:3) == true + @test checkbounds(Bool, A, 0:5, 1:4, 1:3) == false + @test checkbounds(Bool, A, 1:5, 0:4, 1:3) == false + @test checkbounds(Bool, A, 1:5, 1:4, 0:3) == false + @test checkbounds(Bool, A, 1:6, 1:4, 1:3) == false + @test checkbounds(Bool, A, 1:5, 1:5, 1:3) == false + @test checkbounds(Bool, A, 1:5, 1:4, 1:4) == false + @test checkbounds(Bool, A, 1:60) == true + @test checkbounds(Bool, A, 1:61) == false + @test checkbounds(Bool, A, 2, 2, 2, 1:1) == true # extra indices + @test checkbounds(Bool, A, 2, 2, 2, 1:2) == false + @test checkbounds(Bool, A, 1:5, 1:4) == false + @test checkbounds(Bool, A, 1:5, 1:12) == false + @test checkbounds(Bool, A, 1:5, 1:13) == false + @test checkbounds(Bool, A, 1:6, 1:12) == false +end + +@testset "logical" begin + @test checkbounds(Bool, A, trues(5), trues(4), trues(3)) == true + @test checkbounds(Bool, A, trues(6), trues(4), trues(3)) == false + @test checkbounds(Bool, A, trues(5), trues(5), trues(3)) == false + @test checkbounds(Bool, A, trues(5), trues(4), trues(4)) == false + @test checkbounds(Bool, A, trues(60)) == true + @test checkbounds(Bool, A, trues(61)) == false + @test checkbounds(Bool, A, 2, 2, 2, trues(1)) == true # extra indices + @test checkbounds(Bool, A, 2, 2, 2, trues(2)) == false + @test checkbounds(Bool, A, trues(5), trues(12)) == false + @test checkbounds(Bool, A, trues(5), trues(13)) == false + @test checkbounds(Bool, A, trues(6), trues(12)) == false + @test checkbounds(Bool, A, trues(5, 4, 3)) == true + @test checkbounds(Bool, A, trues(5, 4, 2)) == false + @test checkbounds(Bool, A, trues(5, 12)) == false + @test checkbounds(Bool, A, trues(1, 5), trues(1, 4, 1), trues(1, 1, 3)) == false + @test checkbounds(Bool, A, trues(1, 5), trues(1, 4, 1), trues(1, 1, 2)) == false + @test checkbounds(Bool, A, trues(1, 5), trues(1, 5, 1), trues(1, 1, 3)) == false + @test checkbounds(Bool, A, trues(1, 5), :, 2) == false + @test checkbounds(Bool, A, trues(5, 4), trues(3)) == true + @test checkbounds(Bool, A, trues(4, 4), trues(3)) == true + @test checkbounds(Bool, A, trues(5, 4), trues(2)) == false + @test checkbounds(Bool, A, trues(6, 4), trues(3)) == false + @test checkbounds(Bool, A, trues(5, 4), trues(4)) == false +end + +@testset "array of CartesianIndex" begin + @test checkbounds(Bool, A, [CartesianIndex((1, 1, 1))]) == true + @test checkbounds(Bool, A, [CartesianIndex((5, 4, 3))]) == true + @test checkbounds(Bool, A, [CartesianIndex((0, 1, 1))]) == false + @test checkbounds(Bool, A, [CartesianIndex((1, 0, 1))]) == false + @test checkbounds(Bool, A, [CartesianIndex((1, 1, 0))]) == false + @test checkbounds(Bool, A, [CartesianIndex((6, 4, 3))]) == false + @test checkbounds(Bool, A, [CartesianIndex((5, 5, 3))]) == false + @test checkbounds(Bool, A, [CartesianIndex((5, 4, 4))]) == false + @test checkbounds(Bool, A, [CartesianIndex((1, 1))], 1) == true + @test checkbounds(Bool, A, [CartesianIndex((5, 4))], 3) == true + @test checkbounds(Bool, A, [CartesianIndex((0, 1))], 1) == false + @test checkbounds(Bool, A, [CartesianIndex((1, 0))], 1) == false + @test checkbounds(Bool, A, [CartesianIndex((1, 1))], 0) == false + @test checkbounds(Bool, A, [CartesianIndex((6, 4))], 3) == false + @test checkbounds(Bool, A, [CartesianIndex((5, 5))], 3) == false + @test checkbounds(Bool, A, [CartesianIndex((5, 4))], 4) == false +end \ No newline at end of file diff --git a/test/testhelpers/ImmutableArrays.jl b/test/testhelpers/ImmutableArrays.jl deleted file mode 100644 index df2a78387e07b..0000000000000 --- a/test/testhelpers/ImmutableArrays.jl +++ /dev/null @@ -1,28 +0,0 @@ -# This file is a part of Julia. License is MIT: https://julialang.org/license - -# ImmutableArrays (arrays that implement getindex but not setindex!) - -# This test file defines an array wrapper that is immutable. It can be used to -# test the action of methods on immutable arrays. - -module ImmutableArrays - -export ImmutableArray - -"An immutable wrapper type for arrays." -struct ImmutableArray{T,N,A<:AbstractArray} <: AbstractArray{T,N} - data::A -end - -ImmutableArray(data::AbstractArray{T,N}) where {T,N} = ImmutableArray{T,N,typeof(data)}(data) - -# Minimal AbstractArray interface -Base.size(A::ImmutableArray) = size(A.data) -Base.size(A::ImmutableArray, d) = size(A.data, d) -Base.getindex(A::ImmutableArray, i...) = getindex(A.data, i...) - -# The immutable array remains immutable after conversion to AbstractArray -AbstractArray{T}(A::ImmutableArray) where {T} = ImmutableArray(AbstractArray{T}(A.data)) -AbstractArray{T,N}(A::ImmutableArray{S,N}) where {S,T,N} = ImmutableArray(AbstractArray{T,N}(A.data)) - -end diff --git a/test/testhelpers/SimpleImmutableArrays.jl b/test/testhelpers/SimpleImmutableArrays.jl new file mode 100644 index 0000000000000..7276020310b89 --- /dev/null +++ b/test/testhelpers/SimpleImmutableArrays.jl @@ -0,0 +1,28 @@ +# This file is a part of Julia. License is MIT: https://julialang.org/license + +# SimpleImmutableArrays (arrays that implement getindex but not setindex!) + +# This test file defines an array wrapper that is immutable. It can be used to +# test the action of methods on immutable arrays. + +module SimpleImmutableArrays + +export SimpleImmutableArray + +"An immutable wrapper type for arrays." +struct SimpleImmutableArray{T,N,A<:AbstractArray} <: AbstractArray{T,N} + data::A +end + +SimpleImmutableArray(data::AbstractArray{T,N}) where {T,N} = SimpleImmutableArray{T,N,typeof(data)}(data) + +# Minimal AbstractArray interface +Base.size(A::SimpleImmutableArray) = size(A.data) +Base.size(A::SimpleImmutableArray, d) = size(A.data, d) +Base.getindex(A::SimpleImmutableArray, i...) = getindex(A.data, i...) + +# The immutable array remains immutable after conversion to AbstractArray +AbstractArray{T}(A::SimpleImmutableArray) where {T} = SimpleImmutableArray(AbstractArray{T}(A.data)) +AbstractArray{T,N}(A::SimpleImmutableArray{S,N}) where {S,T,N} = SimpleImmutableArray(AbstractArray{T,N}(A.data)) + +end