Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add FixedPointDecimal benchmark. #42

Open
wants to merge 16 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
104 changes: 104 additions & 0 deletions benchmark/Manifest.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,104 @@
[[Base64]]
uuid = "2a0f44e3-6c83-55bd-87e4-b1978d98bd5f"

[[BenchmarkTools]]
deps = ["JSON", "Printf", "Statistics", "Test"]
git-tree-sha1 = "e686f1754227e4748259f400839b83a1e8773e02"
uuid = "6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf"
version = "0.4.1"

[[Dates]]
deps = ["Printf"]
uuid = "ade2ca70-3891-5945-98fb-dc099432e06a"

[[Distributed]]
deps = ["Random", "Serialization", "Sockets"]
uuid = "8ba89e20-285c-5b6f-9357-94700520ee1b"

[[InteractiveUtils]]
deps = ["Markdown"]
uuid = "b77e0a4c-d291-57a0-90e8-8db25a27a240"

[[JSON]]
deps = ["Dates", "Distributed", "Mmap", "Sockets", "Test", "Unicode"]
git-tree-sha1 = "1f7a25b53ec67f5e9422f1f551ee216503f4a0fa"
uuid = "682c06a0-de6a-54ab-a142-c8b1cf79cde6"
version = "0.20.0"

[[LibGit2]]
uuid = "76f85450-5226-5b5a-8eaa-529ad045b433"

[[Libdl]]
uuid = "8f399da3-3557-5675-b5ff-fb832c97cbdb"

[[LinearAlgebra]]
deps = ["Libdl"]
uuid = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"

[[Logging]]
uuid = "56ddb016-857b-54e1-b83d-db4d58db5568"

[[Markdown]]
deps = ["Base64"]
uuid = "d6f4376e-aef5-505a-96c1-9c027394607a"

[[Mmap]]
uuid = "a63ad114-7e13-5084-954f-fe012c677804"

[[Pkg]]
deps = ["Dates", "LibGit2", "Markdown", "Printf", "REPL", "Random", "SHA", "UUIDs"]
uuid = "44cfe95a-1eb2-52ea-b672-e2afdf69b78f"

[[PkgBenchmark]]
deps = ["BenchmarkTools", "Dates", "InteractiveUtils", "JSON", "LibGit2", "Pkg", "Printf", "ProgressMeter", "Random", "Statistics", "Test"]
git-tree-sha1 = "717f93d4cfcbd5b5dadbbb74071541c725ba6552"
repo-rev = "master"
repo-url = "https://github.com/JuliaCI/PkgBenchmark.jl.git"
uuid = "32113eaa-f34f-5b0d-bd6c-c81e245fc73d"
version = "0.2.0+"

[[Printf]]
deps = ["Unicode"]
uuid = "de0858da-6303-5e67-8744-51eddeeeb8d7"

[[ProgressMeter]]
deps = ["Distributed", "Printf", "Random", "Test"]
git-tree-sha1 = "48058bc11607676e5bbc0b974af79106c6200787"
uuid = "92933f4c-e287-5a05-a399-4b506db050ca"
version = "0.9.0"

[[REPL]]
deps = ["InteractiveUtils", "Markdown", "Sockets"]
uuid = "3fa0cd96-eef1-5676-8a61-b3b8758bbffb"

[[Random]]
deps = ["Serialization"]
uuid = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"

[[SHA]]
uuid = "ea8e919c-243c-51af-8825-aaa63cd721ce"

[[Serialization]]
uuid = "9e88b42a-f829-5b0c-bbe9-9e923198166b"

[[Sockets]]
uuid = "6462fe0b-24de-5631-8697-dd941f90decc"

[[SparseArrays]]
deps = ["LinearAlgebra", "Random"]
uuid = "2f01184e-e22b-5df5-ae63-d93ebab69eaf"

[[Statistics]]
deps = ["LinearAlgebra", "SparseArrays"]
uuid = "10745b16-79ce-11e8-11f9-7d13ad32a3b2"

[[Test]]
deps = ["Distributed", "InteractiveUtils", "Logging", "Random"]
uuid = "8dfed614-e22c-5e08-85e1-65c5234f0b40"

[[UUIDs]]
deps = ["Random", "SHA"]
uuid = "cf7118a7-6976-5b1a-9a39-7adc72f591a4"

[[Unicode]]
uuid = "4ec0a83e-493e-50e2-b9ac-8f72acf5a8f5"
3 changes: 3 additions & 0 deletions benchmark/Project.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
[deps]
BenchmarkTools = "6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf"
PkgBenchmark = "32113eaa-f34f-5b0d-bd6c-c81e245fc73d"
1 change: 1 addition & 0 deletions benchmark/REQUIRE
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
BenchmarkTools
90 changes: 90 additions & 0 deletions benchmark/benchmarks.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,90 @@
# Decimal Representation Comparisons
#
# This benchmark compares the performance of several numeric representations, over various
# numeric operations (+,-,*,/,÷...) on large arrays of numbers, in order to guide
# decision-making about how to represent fixed-decimal numbers.
#
# It compares fixed-decimal types against the builtin Int and Float types of various sizes.
# The output is written to a .csv file in the same directory as this file.

# TODO: remove this file once BenchmarkTools has a built-in solution for diffing two
# @benchmarkable runs
using Pkg
Pkg.activate(@__DIR__)

using FixedPointDecimals
using Random
using BenchmarkTools, Statistics

# Define a parent BenchmarkGroup to contain our suite
const SUITE = BenchmarkGroup()
const N = parse(Int, get(ENV, "BENCH_NUM_ITERS", "1000"))

benchtypes = [
FixedPointDecimals.FixedDecimal{Int32, 2},
FixedPointDecimals.FixedDecimal{Int64, 2},
FixedPointDecimals.FixedDecimal{Int128, 2},
]

identity1(a,_) = a
allops = (*, /, +, ÷, identity1)

prettytype(::Type{FixedPointDecimals.FixedDecimal{T,f}}) where {T,f} = "FD{$T,$f}"
prettytype(::Type{FixedPointDecimals.FixedDecimal{T,f}}) where {T<:Union{Int32,Int64},f} = "FD{ $T,$f}"
opname(f) = string(Symbol(f))
opname(f::typeof(identity1)) = "identity"

# --------- Define benchmark functions -------------
# Some care is taken here to prevent the compiler from optimizing away the operations:
# - Marked @noinline so the constants we pass in aren't available to the optimizer.
# - We take `a` and `out` as parameters so that their values aren't available when
# compiling this function.
# - `out` is a Ref{T} so that this function will have side effects. We use an output
# parameter instead of returning the value directly so that it will play nicely with
# the `@benchmark` macro which returns the benchmark results as an object.
# - `T` and `op` _should_ be available as compile-time constants, since we don't want to be
# measuring the time it takes to read from global variables.
@noinline function benchmark(::Type{T}, op, a::T, n, out::Ref{T}) where {T}
for _ in 1:n
tmp = op(a,a)
out[] += tmp
a += one(T)
end
end

@noinline function baseline(::Type{T}, a::T, n, out::Ref{T}) where {T}
for _ in 1:n
tmp = a
out[] += tmp
a += one(T)
end
end

# Define the benchmark structure
for op in allops
SUITE[opname(op)] = BenchmarkGroup()
for T in benchtypes
SUITE[opname(op)][prettytype(T)] = BenchmarkGroup(["base", "bench"])
end
end

for op in allops
println()
println("$op")
for T in benchtypes
print("$T ")

initial_value = zero(T)
a = one(T)

# For some reason this is necessary to eliminate mysterious "1 allocation"
fbase = @eval (out::Ref{$T})->baseline($T, $a, $N, out)
fbench = @eval (out::Ref{$T})->benchmark($T, $op, $a, $N, out)

# Run the benchmark
outbase = Ref(initial_value)
SUITE[opname(op)][prettytype(T)]["base"] = @benchmarkable $fbase($outbase) evals=1 setup=($outbase[]=$initial_value)
outbench = Ref(initial_value)
SUITE[opname(op)][prettytype(T)]["bench"] = @benchmarkable $fbench($outbench) evals=1 setup=($outbench[]=$initial_value)
end
end
85 changes: 85 additions & 0 deletions benchmark/runbench.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,85 @@
module FixedPointDecimals_RunBench

using Pkg

Pkg.activate(@__DIR__)
using PkgBenchmark, BenchmarkTools, Statistics

const N = 1_000

import Base: -, /
function -(a::BenchmarkTools.TrialEstimate, b::BenchmarkTools.TrialEstimate)
ttol = max(params(a).time_tolerance, params(b).time_tolerance)
mtol = max(params(a).memory_tolerance, params(b).memory_tolerance)
p = BenchmarkTools.Parameters(params(a); time_tolerance = ttol, memory_tolerance = mtol)
return BenchmarkTools.TrialEstimate(p, -(time(a), time(b)), -(gctime(a), gctime(b)),
-(memory(a), memory(b)), -(allocs(a), allocs(b)))
end
function /(a::BenchmarkTools.TrialEstimate, b::Int)
ttol = params(a).time_tolerance / b
mtol = params(a).memory_tolerance / b
p = BenchmarkTools.Parameters(params(a); time_tolerance = ttol, memory_tolerance = mtol)
return BenchmarkTools.TrialEstimate(p, time(a)/b, gctime(a)/b,
round(memory(a)/b), round(allocs(a)/b))
end

function postprocess(results::BenchmarkGroup)
for (op, op_group) in results.data
op_results = op_group.data
for (type, type_group) in op_results
benchresults = type_group.data
if op == "identity"
# For :identity, bench and base are identical so we don't want to subtract.
op_results[type] = median(benchresults["bench"]) / N
else
op_results[type] = median(benchresults["bench"])/N - median(benchresults["base"])/N
end
end
end
results
end
function postprocess_no_div(results::BenchmarkGroup)
for (op, op_group) in results.data
op_results = op_group.data
for (type, type_group) in op_results
benchresults = type_group.data
if op == "identity"
# For :identity, bench and base are identical so we don't want to subtract.
op_results[type] = median(benchresults["bench"])
else
op_results[type] = median(benchresults["bench"]) - median(benchresults["base"])
end
end
end
results
end


function runbench()
rm(joinpath(@__DIR__, "tune.json")) # Remove the existing tune.json file.
bench_results = withenv("BENCH_NUM_ITERS"=>string(N)) do
benchmarkpkg("FixedPointDecimals"; postprocess=postprocess)
end

export_markdown(joinpath(@__DIR__, "results.md"), bench_results)
return bench_results
end

function judgebench(target::Union{String, BenchmarkConfig}, baseline::Union{String, BenchmarkConfig},
postprocess_fn=postprocess_no_div)
try rm(joinpath(@__DIR__, "tune.json")) catch end # Remove the existing tune.json file.
bench_results = withenv("BENCH_NUM_ITERS"=>string(N)) do
if postprocess_fn != nothing
judge("FixedPointDecimals", target, baseline; f=identity, postprocess=postprocess_fn)
else
judge("FixedPointDecimals", target, baseline)
end
end
export_markdown(joinpath(@__DIR__, "judge.md"), bench_results)
return bench_results
end
function judgebench(baseline::Union{String, BenchmarkConfig})
judgebench(BenchmarkConfig(), baseline)
end

end
10 changes: 10 additions & 0 deletions benchmark/subtract-benchmarks.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
import PkgBenchmark
function PkgBenchmark._run(b::PkgBenchmark.BenchmarkTools.BenchmarkDiff, p::PkgBenchmark.BenchmarkTools.Parameters = b.params;
prog = nothing, verbose::Bool = false, pad = "", hierarchy = [], kwargs...)
res = BenchmarkTools.run_result(b, p; kwargs...)[1]
if prog != nothing
indent = 0
ProgressMeter.next!(prog; showvalues = [map(id -> (" "^(indent += 1) * "[$(id[2])/$(id[3])]", id[1]), hierarchy)...])
end
return res
end