Skip to content

Commit 4fae8b0

Browse files
authored
Add MOI.Benchmarks submodule to facilitate solver benchmarking (#769)
1 parent c7a3152 commit 4fae8b0

File tree

6 files changed

+355
-0
lines changed

6 files changed

+355
-0
lines changed

docs/src/apimanual.md

Lines changed: 43 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1084,3 +1084,46 @@ const cache = MOIU.UniversalFallback(ModelData{Float64}())
10841084
const cached = MOIU.CachingOptimizer(cache, optimizer)
10851085
const bridged = MOIB.full_bridge_optimizer(cached, Float64)
10861086
```
1087+
1088+
### Benchmarking
1089+
1090+
To aid the development of efficient solver wrappers, MathOptInterface provides
1091+
benchmarking functionality. Benchmarking a wrapper follows a two-step process.
1092+
1093+
First, prior to making changes, run and save the benchmark results on a given
1094+
benchmark suite as follows:
1095+
1096+
```julia
1097+
using SolverPackage, MathOptInterface
1098+
1099+
const MOI = MathOptInterface
1100+
1101+
suite = MOI.Benchmarks.suite() do
1102+
SolverPackage.Optimizer()
1103+
end
1104+
1105+
MOI.Benchmarks.create_baseline(
1106+
suite, "current"; directory = "/tmp", verbose = true
1107+
)
1108+
```
1109+
Use the `exclude` argument to [`Benchmarks.suite`](@ref) to
1110+
exclude benchmarks that the solver doesn't support.
1111+
1112+
Second, after making changes to the package, re-run the benchmark suite and
1113+
compare to the prior saved results:
1114+
1115+
```julia
1116+
using SolverPackage, MathOptInterface
1117+
1118+
const MOI = MathOptInterface
1119+
1120+
suite = MOI.Benchmarks.suite() do
1121+
SolverPackage.Optimizer()
1122+
end
1123+
1124+
MOI.Benchmarks.compare_against_baseline(
1125+
suite, "current"; directory = "/tmp", verbose = true
1126+
)
1127+
```
1128+
1129+
This comparison will create a report detailing improvements and regressions.

docs/src/apireference.md

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -546,3 +546,14 @@ Utilities.load_variables
546546
Utilities.load
547547
Utilities.load_constraint
548548
```
549+
550+
## Benchmarks
551+
552+
Functions to help benchmark the performance of solver wrappers. See
553+
[Benchmarking](@ref) for more details.
554+
555+
```@docs
556+
Benchmarks.suite
557+
Benchmarks.create_baseline
558+
Benchmarks.compare_against_baseline
559+
```

src/Benchmarks/Benchmarks.jl

Lines changed: 236 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,236 @@
1+
module Benchmarks
2+
3+
using BenchmarkTools, MathOptInterface
4+
5+
const MOI = MathOptInterface
6+
const BENCHMARKS = Dict{String, Function}()
7+
8+
"""
9+
suite(
10+
new_model::Function;
11+
exclude::Vector{Regex} = Regex[]
12+
)
13+
14+
Create a suite of benchmarks. `new_model` should be a function that takes no
15+
arguments, and returns a new instance of the optimizer you wish to benchmark.
16+
17+
Use `exclude` to exclude a subset of benchmarks.
18+
19+
### Examples
20+
21+
```julia
22+
suite() do
23+
GLPK.Optimizer()
24+
end
25+
26+
suite(exclude = [r"delete"]) do
27+
Gurobi.Optimizer(OutputFlag=0)
28+
end
29+
````
30+
"""
31+
function suite(new_model::Function; exclude::Vector{Regex} = Regex[])
32+
group = BenchmarkGroup()
33+
for (name, func) in BENCHMARKS
34+
any(occursin.(exclude, Ref(name))) && continue
35+
group[name] = @benchmarkable $func($new_model)
36+
end
37+
return group
38+
end
39+
40+
"""
41+
create_baseline(suite, name::String; directory::String = ""; kwargs...)
42+
43+
Run all benchmarks in `suite` and save to files called `name` in `directory`.
44+
45+
Extra `kwargs` are based to `BenchmarkTools.run`.
46+
47+
### Examples
48+
49+
```julia
50+
my_suite = suite(() -> GLPK.Optimizer())
51+
create_baseline(my_suite, "glpk_master"; directory = "/tmp", verbose = true)
52+
```
53+
"""
54+
function create_baseline(
55+
suite::BenchmarkTools.BenchmarkGroup, name::String; directory::String = "",
56+
kwargs...
57+
)
58+
tune!(suite)
59+
BenchmarkTools.save(joinpath(directory, name * "_params.json"), params(suite))
60+
results = run(suite; kwargs...)
61+
BenchmarkTools.save(joinpath(directory, name * "_baseline.json"), results)
62+
return
63+
end
64+
65+
"""
66+
compare_against_baseline(
67+
suite, name::String; directory::String = "",
68+
report_filename::String = "report.txt"
69+
)
70+
71+
Run all benchmarks in `suite` and compare against files called `name` in
72+
`directory` that were created by a call to `create_baseline`.
73+
74+
A report summarizing the comparison is written to `report_filename` in
75+
`directory`.
76+
77+
Extra `kwargs` are based to `BenchmarkTools.run`.
78+
79+
### Examples
80+
81+
```julia
82+
my_suite = suite(() -> GLPK.Optimizer())
83+
compare_against_baseline(
84+
my_suite, "glpk_master"; directory = "/tmp", verbose = true
85+
)
86+
```
87+
"""
88+
function compare_against_baseline(
89+
suite::BenchmarkTools.BenchmarkGroup, name::String;
90+
directory::String = "", report_filename::String = "report.txt", kwargs...
91+
)
92+
params_filename = joinpath(directory, name * "_params.json")
93+
baseline_filename = joinpath(directory, name * "_baseline.json")
94+
if !isfile(params_filename) || !isfile(baseline_filename)
95+
error("You create a baseline with `create_baseline` first.")
96+
end
97+
loadparams!(
98+
suite, BenchmarkTools.load(params_filename)[1], :evals, :samples
99+
)
100+
new_results = run(suite; kwargs...)
101+
old_results = BenchmarkTools.load(baseline_filename)[1]
102+
open(joinpath(directory, report_filename), "w") do io
103+
println(stdout, "\n========== Results ==========")
104+
println(io, "\n========== Results ==========")
105+
for key in keys(new_results)
106+
judgement = judge(
107+
BenchmarkTools.median(new_results[key]),
108+
BenchmarkTools.median(old_results[key])
109+
)
110+
println(stdout, "\n", key)
111+
println(io, "\n", key)
112+
show(stdout, MIME"text/plain"(), judgement)
113+
show(io, MIME"text/plain"(), judgement)
114+
end
115+
end
116+
return
117+
end
118+
119+
###
120+
### Benchmarks
121+
###
122+
123+
macro add_benchmark(f)
124+
name = f.args[1].args[1]
125+
return quote
126+
$(esc(f))
127+
BENCHMARKS[String($(Base.Meta.quot(name)))] = $(esc(name))
128+
end
129+
end
130+
131+
@add_benchmark function add_variable(new_model)
132+
model = new_model()
133+
for i in 1:10_000
134+
MOI.add_variable(model)
135+
end
136+
return model
137+
end
138+
139+
@add_benchmark function add_variables(new_model)
140+
model = new_model()
141+
MOI.add_variables(model, 10_000)
142+
return model
143+
end
144+
145+
@add_benchmark function add_variable_constraint(new_model)
146+
model = new_model()
147+
x = MOI.add_variables(model, 10_000)
148+
for (i, xi) in enumerate(x)
149+
MOI.add_constraint(model, MOI.SingleVariable(xi), MOI.LessThan(1.0 * i))
150+
end
151+
return model
152+
end
153+
154+
@add_benchmark function add_variable_constraints(new_model)
155+
model = new_model()
156+
x = MOI.add_variables(model, 10_000)
157+
MOI.add_constraints(
158+
model,
159+
MOI.SingleVariable.(x),
160+
MOI.LessThan.(1.0:10_000.0)
161+
)
162+
return model
163+
end
164+
165+
@add_benchmark function delete_variable(new_model)
166+
model = new_model()
167+
x = MOI.add_variables(model, 1_000)
168+
MOI.add_constraint.(model, MOI.SingleVariable.(x), Ref(MOI.LessThan(1.0)))
169+
MOI.delete.(model, x)
170+
return model
171+
end
172+
173+
@add_benchmark function delete_variable_constraint(new_model)
174+
model = new_model()
175+
x = MOI.add_variables(model, 1_000)
176+
cons = MOI.add_constraint.(model, MOI.SingleVariable.(x), Ref(MOI.LessThan(1.0)))
177+
for con in cons
178+
MOI.delete(model, con)
179+
end
180+
cons = MOI.add_constraint.(model, MOI.SingleVariable.(x), Ref(MOI.LessThan(1.0)))
181+
MOI.set(model, MOI.ObjectiveSense(), MOI.MAX_SENSE)
182+
MOI.set(model,
183+
MOI.ObjectiveFunction{MOI.ScalarAffineFunction{Float64}}(),
184+
MOI.ScalarAffineFunction(MOI.ScalarAffineTerm.(1.0, x), 0.0)
185+
)
186+
MOI.optimize!(model)
187+
for con in cons
188+
MOI.delete(model, con)
189+
end
190+
return model
191+
end
192+
193+
@add_benchmark function add_constraint(new_model)
194+
model = new_model()
195+
index = MOI.add_variables(model, 10_000)
196+
for (i, x) in enumerate(index)
197+
MOI.add_constraint(
198+
model,
199+
MOI.ScalarAffineFunction([MOI.ScalarAffineTerm(1.0, x)], 0.0),
200+
MOI.LessThan(1.0 * i)
201+
)
202+
end
203+
return model
204+
end
205+
206+
@add_benchmark function add_constraints(new_model)
207+
model = new_model()
208+
x = MOI.add_variables(model, 10_000)
209+
MOI.add_constraints(
210+
model,
211+
[MOI.ScalarAffineFunction([MOI.ScalarAffineTerm(1.0, xi)], 0.0) for xi in x],
212+
MOI.LessThan.(1:1.0:10_000)
213+
)
214+
return model
215+
end
216+
217+
@add_benchmark function delete_constraint(new_model)
218+
model = new_model()
219+
index = MOI.add_variables(model, 1_000)
220+
cons = Vector{
221+
MOI.ConstraintIndex{MOI.ScalarAffineFunction{Float64}, MOI.LessThan{Float64}}
222+
}(undef, 1_000)
223+
for (i, x) in enumerate(index)
224+
cons[i] = MOI.add_constraint(
225+
model,
226+
MOI.ScalarAffineFunction([MOI.ScalarAffineTerm(1.0, x)], 0.0),
227+
MOI.LessThan(1.0 * i)
228+
)
229+
end
230+
for con in cons
231+
MOI.delete(model, con)
232+
end
233+
return model
234+
end
235+
236+
end

src/MathOptInterface.jl

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -121,5 +121,6 @@ include("nlp.jl")
121121
include("Utilities/Utilities.jl") # MOI.Utilities
122122
include("Test/Test.jl") # MOI.Test
123123
include("Bridges/Bridges.jl") # MOI.Bridges
124+
include("Benchmarks/Benchmarks.jl")
124125

125126
end

test/Benchmarks/Benchmarks.jl

Lines changed: 60 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,60 @@
1+
using MathOptInterface, Test
2+
3+
const MOI = MathOptInterface
4+
const MOIU = MOI.Utilities
5+
6+
MOIU.@model(
7+
BenchmarkModel,
8+
(), (MOI.LessThan, ), (), (),
9+
(), (MOI.ScalarAffineFunction, ), (), ()
10+
)
11+
12+
const NUM_BENCHMARKS = length(MOI.Benchmarks.BENCHMARKS)
13+
14+
@testset "suite" begin
15+
suite = MOI.Benchmarks.suite() do
16+
MOIU.MockOptimizer(BenchmarkModel{Float64}())
17+
end
18+
@test length(suite.data) == NUM_BENCHMARKS
19+
20+
suite = MOI.Benchmarks.suite(
21+
exclude = [r"delete_"]
22+
) do
23+
MOIU.MockOptimizer(BenchmarkModel{Float64}())
24+
end
25+
# Note: update this value whenever more benchmarks are added to
26+
# `src/Benchmarks/Benchmarks.jl`.
27+
@test 6 <= length(suite.data) <= NUM_BENCHMARKS - 3
28+
end
29+
30+
@testset "Perform benchmark" begin
31+
params = joinpath(@__DIR__, "baseline_params.json")
32+
baseline = joinpath(@__DIR__, "baseline_baseline.json")
33+
@test !isfile(params)
34+
@test !isfile(baseline)
35+
@testset "create_baseline" begin
36+
suite = MOI.Benchmarks.suite() do
37+
MOIU.MockOptimizer(BenchmarkModel{Float64}())
38+
end
39+
MOI.Benchmarks.create_baseline(
40+
suite, "baseline"; directory=@__DIR__, seconds = 2, verbose = true
41+
)
42+
end
43+
@test isfile(params)
44+
@test isfile(baseline)
45+
@testset "compare_against_baseline" begin
46+
suite = MOI.Benchmarks.suite() do
47+
MOIU.MockOptimizer(BenchmarkModel{Float64}())
48+
end
49+
MOI.Benchmarks.compare_against_baseline(
50+
suite, "baseline"; directory=@__DIR__, seconds = 2, verbose = true
51+
)
52+
end
53+
rm(params)
54+
rm(baseline)
55+
@testset "Report" begin
56+
report = read(joinpath(@__DIR__, "report.txt"), String)
57+
@test occursin("=> invariant", report)
58+
end
59+
rm(joinpath(@__DIR__, "report.txt"))
60+
end

test/runtests.jl

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -37,5 +37,9 @@ end
3737
include("Bridges/Bridges.jl")
3838
end
3939

40+
@testset "MOI.Benchmarks" begin
41+
include("Benchmarks/Benchmarks.jl")
42+
end
43+
4044
# Test hygiene of @model macro
4145
include("hygiene.jl")

0 commit comments

Comments
 (0)