Skip to content

Commit

Permalink
Add LinuxPerf extension for branch + instruction counts
Browse files Browse the repository at this point in the history
This updates the core BenchmarkTools types to include `instructions` and
`branches` fields. If the extension is not available, these are `NaN`.

No support is included for measuring overhead or making judgements based
on these fields, but Serialization, Statistics, etc. are all supported
with their usual functionality for Trial / TrialEstimate / etc.
  • Loading branch information
topolarity committed Sep 30, 2024
1 parent cb09e40 commit 0b21870
Show file tree
Hide file tree
Showing 14 changed files with 418 additions and 64 deletions.
8 changes: 1 addition & 7 deletions .github/workflows/CI.yml
Original file line number Diff line number Diff line change
Expand Up @@ -16,20 +16,14 @@ jobs:
fail-fast: false
matrix:
version:
- '1.6'
- '1.10'
- '1'
- 'nightly'
arch:
- x64
os:
- ubuntu-latest
include:
- version: '1.7'
arch: x64
os: ubuntu-20.04
- version: '1.8'
arch: x64
os: ubuntu-22.04
- version: '1.9'
arch: x64
os: ubuntu-22.04
Expand Down
11 changes: 9 additions & 2 deletions Project.toml
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,12 @@ Profile = "9abbd945-dff8-562f-b5e8-e1ebf5ef1b79"
Statistics = "10745b16-79ce-11e8-11f9-7d13ad32a3b2"
UUIDs = "cf7118a7-6976-5b1a-9a39-7adc72f591a4"

[weakdeps]
LinuxPerf = "b4c46c6c-4fb0-484d-a11a-41bc3392d094"

[extensions]
LinuxPerfExt = "LinuxPerf"

[compat]
Aqua = "0.8"
Compat = ">= 4.11.0"
Expand All @@ -22,7 +28,8 @@ Profile = "<0.0.1, 1"
Statistics = "<0.0.1, 1"
Test = "<0.0.1, 1"
UUIDs = "<0.0.1, 1"
julia = "1.6"
julia = "1.9"
LinuxPerf = ">= 0.4"

[extras]
Aqua = "4c88cf16-eb10-579e-8560-4a9242c79595"
Expand All @@ -31,4 +38,4 @@ Statistics = "10745b16-79ce-11e8-11f9-7d13ad32a3b2"
Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40"

[targets]
test = ["Aqua", "JuliaFormatter", "Statistics", "Test"]
test = ["Aqua", "JuliaFormatter", "Statistics", "Test", "LinuxPerf"]
24 changes: 24 additions & 0 deletions ext/LinuxPerfExt/LinuxPerfExt.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
module LinuxPerfExt

import BenchmarkTools: PerfInterface
import LinuxPerf: LinuxPerf, PerfBench, EventGroup, EventType
import LinuxPerf: enable!, disable!, enable_all!, disable_all!, close, read!

function interface()
return PerfInterface(;
setup=() -> PerfBench(
0, [EventGroup([EventType(:hw, :instructions), EventType(:hw, :branches)])]
),
start=(bench) -> enable_all!(),
stop=(bench) -> disable_all!(),
# start=(bench) -> enable!(bench),
# stop=(bench) -> disable!(bench),
teardown=(bench) -> close(bench),
read=(bench) -> let g = only(bench.groups)
(insts, branches) = read!(g.leader_io, Vector{UInt64}(undef, 5))
return (insts, branches)
end,
)
end

end
2 changes: 2 additions & 0 deletions src/BenchmarkTools.jl
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,8 @@ export loadparams!
include("trials.jl")

export gctime,
instructions,
branches,
memory,
allocs,
params,
Expand Down
65 changes: 56 additions & 9 deletions src/execution.jl
Original file line number Diff line number Diff line change
Expand Up @@ -506,6 +506,24 @@ macro benchmarkable(args...)
end
end

struct PerfInterface
setup::Function
start::Function
stop::Function
read::Function
teardown::Function

function PerfInterface(;
setup=Returns(nothing),
start=Returns(nothing),
stop=Returns(nothing),
read=Returns((-1, -1)),
teardown=Returns(nothing),
)
return new(setup, start, stop, read, teardown)
end
end

# `eval` an expression that forcibly defines the specified benchmark at
# top-level in order to allow transfer of locally-scoped variables into
# benchmark scope.
Expand Down Expand Up @@ -553,6 +571,8 @@ function generate_benchmark_definition(
end
)
end
ext = Base.get_extension(BenchmarkTools, :BenchmarkToolsLinuxPerfExt)
LinuxPerf = isnothing(ext) ? PerfInterface() : ext.interface()
return Core.eval(
eval_module,
quote
Expand All @@ -563,17 +583,42 @@ function generate_benchmark_definition(
$(Expr(:tuple, quote_vars...)), __params::$BenchmarkTools.Parameters
)
$(setup)
__perf_bench = $(LinuxPerf.setup)()
__gcdiff = nothing
__return_val = nothing
__sample_time::Int64 = 0
__sample_instructions::Int64 = 0
__sample_branches::Int64 = 0
__evals = __params.evals
__gc_start = Base.gc_num()
__start_time = time_ns()
__return_val = $(invocation)
for __iter in 2:__evals
$(invocation)
try
__gc_start = Base.gc_num()
$(LinuxPerf.start)(__perf_bench)
__start_time = time_ns()
__return_val = $(invocation)
for __iter in 2:__evals
$(invocation)
end
__sample_time = time_ns() - __start_time
$(LinuxPerf.stop)(__perf_bench)
__gcdiff = Base.GC_Diff(Base.gc_num(), __gc_start)
__sample_instructions, __sample_branches = $(LinuxPerf.read)(
__perf_bench
)
finally
$(LinuxPerf.teardown)(__perf_bench)
$(teardown)
end
__sample_time = time_ns() - __start_time
__gcdiff = Base.GC_Diff(Base.gc_num(), __gc_start)
$(teardown)
__time = max((__sample_time / __evals) - __params.overhead, 0.001)
__instructions = if (__sample_instructions == -1)
NaN
else
max((__sample_instructions / __evals) - __params.insts_overhead, 0.0)
end
__branches = if (__sample_branches == -1)
NaN
else
max((__sample_branches / __evals) - 0.0, 0.0)
end
__gctime = max((__gcdiff.total_time / __evals) - __params.overhead, 0.0)
__memory = Int(Base.fld(__gcdiff.allocd, __evals))
__allocs = Int(
Expand All @@ -585,7 +630,9 @@ function generate_benchmark_definition(
__evals,
),
)
return __time, __gctime, __memory, __allocs, __return_val
return __time,
__instructions, __branches, __gctime, __memory, __allocs,
__return_val
end
$BenchmarkTools.Benchmark($(samplefunc), $(quote_vals), $(params))
end,
Expand Down
2 changes: 2 additions & 0 deletions src/groups.jl
Original file line number Diff line number Diff line change
Expand Up @@ -113,6 +113,8 @@ Base.min(groups::BenchmarkGroup...) = mapvals(min, groups...)
Base.max(groups::BenchmarkGroup...) = mapvals(max, groups...)

Base.time(group::BenchmarkGroup) = mapvals(time, group)
instructions(group::BenchmarkGroup) = mapvals(instructions, group)
branches(group::BenchmarkGroup) = mapvals(branches, group)
gctime(group::BenchmarkGroup) = mapvals(gctime, group)
memory(group::BenchmarkGroup) = mapvals(memory, group)
allocs(group::BenchmarkGroup) = mapvals(allocs, group)
Expand Down
6 changes: 5 additions & 1 deletion src/parameters.jl
Original file line number Diff line number Diff line change
Expand Up @@ -109,7 +109,11 @@ end

@noinline function overhead_sample(evals)
start_time = time_ns()
for _ in 1:evals
try
for _ in 1:evals
nullfunc()
end
finally
nullfunc()
end
sample_time = time_ns() - start_time
Expand Down
20 changes: 20 additions & 0 deletions src/serialization.jl
Original file line number Diff line number Diff line change
Expand Up @@ -55,6 +55,26 @@ function recover(x::Vector)
else
xsi = if fn == "evals_set" && !haskey(fields, fn)
false
elseif fn in ("instructions", "branches")
# JSON spec doesn't support NaN, so handle it specially here
if !haskey(fields, fn)
if ft === Vector{Float64}
Float64[NaN for _ in length(fields["time"])]
elseif ft === Float64
NaN
else
@assert false
end
else
if ft === Vector{Float64}
Float64[
elem === nothing ? NaN : convert(Float64, elem) for
elem in fields[fn]
]
else
fields[fn] === nothing ? NaN : convert(ft, fields[fn])
end
end
elseif fn in ("seconds", "overhead", "time_tolerance", "memory_tolerance") &&
fields[fn] === nothing
# JSON spec doesn't support Inf
Expand Down
Loading

0 comments on commit 0b21870

Please sign in to comment.