|  | 
|  | 1 | +module Benchmarks | 
|  | 2 | + | 
|  | 3 | +using BenchmarkTools, MathOptInterface | 
|  | 4 | + | 
|  | 5 | +const MOI = MathOptInterface | 
|  | 6 | +const BENCHMARKS = Dict{String, Function}() | 
|  | 7 | + | 
|  | 8 | +""" | 
|  | 9 | +    suite( | 
|  | 10 | +        new_model::Function; | 
|  | 11 | +        exclude::Vector{Regex} = Regex[] | 
|  | 12 | +    ) | 
|  | 13 | +
 | 
|  | 14 | +Create a suite of benchmarks. `new_model` should be a function that takes no | 
|  | 15 | +arguments, and returns a new instance of the optimizer you wish to benchmark. | 
|  | 16 | +
 | 
|  | 17 | +Use `exclude` to exclude a subset of benchmarks. | 
|  | 18 | +
 | 
|  | 19 | +### Examples | 
|  | 20 | +
 | 
|  | 21 | +```julia | 
|  | 22 | +suite() do | 
|  | 23 | +    GLPK.Optimizer() | 
|  | 24 | +end | 
|  | 25 | +
 | 
|  | 26 | +suite(exclude = [r"delete"]) do | 
|  | 27 | +    Gurobi.Optimizer(OutputFlag=0) | 
|  | 28 | +end | 
|  | 29 | +```` | 
|  | 30 | +""" | 
|  | 31 | +function suite(new_model::Function; exclude::Vector{Regex} = Regex[]) | 
|  | 32 | +    group = BenchmarkGroup() | 
|  | 33 | +    for (name, func) in BENCHMARKS | 
|  | 34 | +        any(occursin.(exclude, Ref(name))) && continue | 
|  | 35 | +        group[name] = @benchmarkable $func($new_model) | 
|  | 36 | +    end | 
|  | 37 | +    return group | 
|  | 38 | +end | 
|  | 39 | + | 
|  | 40 | +""" | 
|  | 41 | +    create_baseline(suite, name::String; directory::String = ""; kwargs...) | 
|  | 42 | +
 | 
|  | 43 | +Run all benchmarks in `suite` and save to files called `name` in `directory`. | 
|  | 44 | +
 | 
|  | 45 | +Extra `kwargs` are based to `BenchmarkTools.run`. | 
|  | 46 | +
 | 
|  | 47 | +### Examples | 
|  | 48 | +
 | 
|  | 49 | +```julia | 
|  | 50 | +my_suite = suite(() -> GLPK.Optimizer()) | 
|  | 51 | +create_baseline(my_suite, "glpk_master"; directory = "/tmp", verbose = true) | 
|  | 52 | +``` | 
|  | 53 | +""" | 
|  | 54 | +function create_baseline( | 
|  | 55 | +    suite::BenchmarkTools.BenchmarkGroup, name::String; directory::String = "", | 
|  | 56 | +    kwargs... | 
|  | 57 | +) | 
|  | 58 | +    tune!(suite) | 
|  | 59 | +    BenchmarkTools.save(joinpath(directory, name * "_params.json"), params(suite)) | 
|  | 60 | +    results = run(suite; kwargs...) | 
|  | 61 | +    BenchmarkTools.save(joinpath(directory, name * "_baseline.json"), results) | 
|  | 62 | +    return | 
|  | 63 | +end | 
|  | 64 | + | 
|  | 65 | +""" | 
|  | 66 | +    compare_against_baseline( | 
|  | 67 | +        suite, name::String; directory::String = "", | 
|  | 68 | +        report_filename::String = "report.txt" | 
|  | 69 | +    ) | 
|  | 70 | +
 | 
|  | 71 | +Run all benchmarks in `suite` and compare against files called `name` in | 
|  | 72 | +`directory` that were created by a call to `create_baseline`. | 
|  | 73 | +
 | 
|  | 74 | +A report summarizing the comparison is written to `report_filename` in | 
|  | 75 | +`directory`. | 
|  | 76 | +
 | 
|  | 77 | +Extra `kwargs` are based to `BenchmarkTools.run`. | 
|  | 78 | +
 | 
|  | 79 | +### Examples | 
|  | 80 | +
 | 
|  | 81 | +```julia | 
|  | 82 | +my_suite = suite(() -> GLPK.Optimizer()) | 
|  | 83 | +compare_against_baseline( | 
|  | 84 | +    my_suite, "glpk_master"; directory = "/tmp", verbose = true | 
|  | 85 | +) | 
|  | 86 | +``` | 
|  | 87 | +""" | 
|  | 88 | +function compare_against_baseline( | 
|  | 89 | +    suite::BenchmarkTools.BenchmarkGroup, name::String; | 
|  | 90 | +    directory::String = "", report_filename::String = "report.txt", kwargs... | 
|  | 91 | +) | 
|  | 92 | +    params_filename = joinpath(directory, name * "_params.json") | 
|  | 93 | +    baseline_filename = joinpath(directory, name * "_baseline.json") | 
|  | 94 | +    if !isfile(params_filename) || !isfile(baseline_filename) | 
|  | 95 | +        error("You create a baseline with `create_baseline` first.") | 
|  | 96 | +    end | 
|  | 97 | +    loadparams!( | 
|  | 98 | +        suite, BenchmarkTools.load(params_filename)[1], :evals, :samples | 
|  | 99 | +    ) | 
|  | 100 | +    new_results = run(suite; kwargs...) | 
|  | 101 | +    old_results = BenchmarkTools.load(baseline_filename)[1] | 
|  | 102 | +    open(joinpath(directory, report_filename), "w") do io | 
|  | 103 | +        println(stdout, "\n========== Results ==========") | 
|  | 104 | +        println(io,     "\n========== Results ==========") | 
|  | 105 | +        for key in keys(new_results) | 
|  | 106 | +            judgement = judge( | 
|  | 107 | +                BenchmarkTools.median(new_results[key]), | 
|  | 108 | +                BenchmarkTools.median(old_results[key]) | 
|  | 109 | +            ) | 
|  | 110 | +            println(stdout, "\n", key) | 
|  | 111 | +            println(io,     "\n", key) | 
|  | 112 | +            show(stdout, MIME"text/plain"(), judgement) | 
|  | 113 | +            show(io, MIME"text/plain"(), judgement) | 
|  | 114 | +        end | 
|  | 115 | +    end | 
|  | 116 | +    return | 
|  | 117 | +end | 
|  | 118 | + | 
|  | 119 | +### | 
|  | 120 | +### Benchmarks | 
|  | 121 | +### | 
|  | 122 | + | 
|  | 123 | +macro add_benchmark(f) | 
|  | 124 | +    name = f.args[1].args[1] | 
|  | 125 | +    return quote | 
|  | 126 | +        $(esc(f)) | 
|  | 127 | +        BENCHMARKS[String($(Base.Meta.quot(name)))] = $(esc(name)) | 
|  | 128 | +    end | 
|  | 129 | +end | 
|  | 130 | + | 
|  | 131 | +@add_benchmark function add_variable(new_model) | 
|  | 132 | +    model = new_model() | 
|  | 133 | +    for i in 1:10_000 | 
|  | 134 | +        MOI.add_variable(model) | 
|  | 135 | +    end | 
|  | 136 | +    return model | 
|  | 137 | +end | 
|  | 138 | + | 
|  | 139 | +@add_benchmark function add_variables(new_model) | 
|  | 140 | +    model = new_model() | 
|  | 141 | +    MOI.add_variables(model, 10_000) | 
|  | 142 | +    return model | 
|  | 143 | +end | 
|  | 144 | + | 
|  | 145 | +@add_benchmark function add_variable_constraint(new_model) | 
|  | 146 | +    model = new_model() | 
|  | 147 | +    x = MOI.add_variables(model, 10_000) | 
|  | 148 | +    for (i, xi) in enumerate(x) | 
|  | 149 | +        MOI.add_constraint(model, MOI.SingleVariable(xi), MOI.LessThan(1.0 * i)) | 
|  | 150 | +    end | 
|  | 151 | +    return model | 
|  | 152 | +end | 
|  | 153 | + | 
|  | 154 | +@add_benchmark function add_variable_constraints(new_model) | 
|  | 155 | +    model = new_model() | 
|  | 156 | +    x = MOI.add_variables(model, 10_000) | 
|  | 157 | +    MOI.add_constraints( | 
|  | 158 | +        model, | 
|  | 159 | +        MOI.SingleVariable.(x), | 
|  | 160 | +        MOI.LessThan.(1.0:10_000.0) | 
|  | 161 | +    ) | 
|  | 162 | +    return model | 
|  | 163 | +end | 
|  | 164 | + | 
|  | 165 | +@add_benchmark function delete_variable(new_model) | 
|  | 166 | +    model = new_model() | 
|  | 167 | +    x = MOI.add_variables(model, 1_000) | 
|  | 168 | +    MOI.add_constraint.(model, MOI.SingleVariable.(x), Ref(MOI.LessThan(1.0))) | 
|  | 169 | +    MOI.delete.(model, x) | 
|  | 170 | +    return model | 
|  | 171 | +end | 
|  | 172 | + | 
|  | 173 | +@add_benchmark function delete_variable_constraint(new_model) | 
|  | 174 | +    model = new_model() | 
|  | 175 | +    x = MOI.add_variables(model, 1_000) | 
|  | 176 | +    cons = MOI.add_constraint.(model, MOI.SingleVariable.(x), Ref(MOI.LessThan(1.0))) | 
|  | 177 | +    for con in cons | 
|  | 178 | +        MOI.delete(model, con) | 
|  | 179 | +    end | 
|  | 180 | +    cons = MOI.add_constraint.(model, MOI.SingleVariable.(x), Ref(MOI.LessThan(1.0))) | 
|  | 181 | +    MOI.set(model, MOI.ObjectiveSense(), MOI.MAX_SENSE) | 
|  | 182 | +    MOI.set(model, | 
|  | 183 | +        MOI.ObjectiveFunction{MOI.ScalarAffineFunction{Float64}}(), | 
|  | 184 | +        MOI.ScalarAffineFunction(MOI.ScalarAffineTerm.(1.0, x), 0.0) | 
|  | 185 | +    ) | 
|  | 186 | +    MOI.optimize!(model) | 
|  | 187 | +    for con in cons | 
|  | 188 | +        MOI.delete(model, con) | 
|  | 189 | +    end | 
|  | 190 | +    return model | 
|  | 191 | +end | 
|  | 192 | + | 
|  | 193 | +@add_benchmark function add_constraint(new_model) | 
|  | 194 | +    model = new_model() | 
|  | 195 | +    index = MOI.add_variables(model, 10_000) | 
|  | 196 | +    for (i, x) in enumerate(index) | 
|  | 197 | +        MOI.add_constraint( | 
|  | 198 | +            model, | 
|  | 199 | +            MOI.ScalarAffineFunction([MOI.ScalarAffineTerm(1.0, x)], 0.0), | 
|  | 200 | +            MOI.LessThan(1.0 * i) | 
|  | 201 | +        ) | 
|  | 202 | +    end | 
|  | 203 | +    return model | 
|  | 204 | +end | 
|  | 205 | + | 
|  | 206 | +@add_benchmark function add_constraints(new_model) | 
|  | 207 | +    model = new_model() | 
|  | 208 | +    x = MOI.add_variables(model, 10_000) | 
|  | 209 | +    MOI.add_constraints( | 
|  | 210 | +        model, | 
|  | 211 | +        [MOI.ScalarAffineFunction([MOI.ScalarAffineTerm(1.0, xi)], 0.0) for xi in x], | 
|  | 212 | +        MOI.LessThan.(1:1.0:10_000) | 
|  | 213 | +    ) | 
|  | 214 | +    return model | 
|  | 215 | +end | 
|  | 216 | + | 
|  | 217 | +@add_benchmark function delete_constraint(new_model) | 
|  | 218 | +    model = new_model() | 
|  | 219 | +    index = MOI.add_variables(model, 1_000) | 
|  | 220 | +    cons = Vector{ | 
|  | 221 | +        MOI.ConstraintIndex{MOI.ScalarAffineFunction{Float64}, MOI.LessThan{Float64}} | 
|  | 222 | +    }(undef, 1_000) | 
|  | 223 | +    for (i, x) in enumerate(index) | 
|  | 224 | +        cons[i] = MOI.add_constraint( | 
|  | 225 | +            model, | 
|  | 226 | +            MOI.ScalarAffineFunction([MOI.ScalarAffineTerm(1.0, x)], 0.0), | 
|  | 227 | +            MOI.LessThan(1.0 * i) | 
|  | 228 | +        ) | 
|  | 229 | +    end | 
|  | 230 | +    for con in cons | 
|  | 231 | +        MOI.delete(model, con) | 
|  | 232 | +    end | 
|  | 233 | +    return model | 
|  | 234 | +end | 
|  | 235 | + | 
|  | 236 | +end | 
0 commit comments