66
77module Benchmarks
88
9- import BenchmarkTools
109import MathOptInterface as MOI
1110
1211const BENCHMARKS = Dict {String,Function} ()
@@ -22,26 +21,24 @@ arguments, and returns a new instance of the optimizer you wish to benchmark.
2221
2322Use `exclude` to exclude a subset of benchmarks.
2423
24+ ## BenchmarkTools
25+
26+ To use this function you must first install and load the `BenchmarkTools.jl`
27+ package.
28+
2529## Example
2630
2731```julia
28- julia> MOI.Benchmarks.suite() do
29- return GLPK.Optimizer()
30- end
32+ julia> import BenchmarkTools, GLPK, Gurobi
33+
34+ julia> MOI.Benchmarks.suite(GLPK.Optimizer)
3135
3236julia> MOI.Benchmarks.suite(; exclude = [r"delete"]) do
3337 return Gurobi.Optimizer()
3438 end
3539```
3640"""
37- function suite (new_model:: Function ; exclude:: Vector{Regex} = Regex[])
38- group = BenchmarkTools. BenchmarkGroup ()
39- for (name, func) in BENCHMARKS
40- any (occursin .(exclude, Ref (name))) && continue
41- group[name] = BenchmarkTools. @benchmarkable $ func ($ new_model)
42- end
43- return group
44- end
41+ function suite end
4542
4643"""
4744 create_baseline(suite, name::String; directory::String = ""; kwargs...)
@@ -50,12 +47,17 @@ Run all benchmarks in `suite` and save to files called `name` in `directory`.
5047
5148Extra `kwargs` are based to `BenchmarkTools.run`.
5249
50+ ## BenchmarkTools
51+
52+ To use this function you must first install and load the `BenchmarkTools.jl`
53+ package.
54+
5355## Example
5456
5557```julia
56- julia> import GLPK
58+ julia> import BenchmarkTools, GLPK
5759
58- julia> my_suite = MOI.Benchmarks.suite(() -> GLPK.Optimizer() );
60+ julia> my_suite = MOI.Benchmarks.suite(GLPK.Optimizer);
5961
6062julia> MOI.Benchmarks.create_baseline(
6163 my_suite,
@@ -65,21 +67,7 @@ julia> MOI.Benchmarks.create_baseline(
6567 )
6668```
6769"""
68- function create_baseline (
69- suite:: BenchmarkTools.BenchmarkGroup ,
70- name:: String ;
71- directory:: String = " " ,
72- kwargs... ,
73- )
74- BenchmarkTools. tune! (suite)
75- BenchmarkTools. save (
76- joinpath (directory, name * " _params.json" ),
77- BenchmarkTools. params (suite),
78- )
79- results = BenchmarkTools. run (suite; kwargs... )
80- BenchmarkTools. save (joinpath (directory, name * " _baseline.json" ), results)
81- return
82- end
70+ function create_baseline end
8371
8472"""
8573 compare_against_baseline(
@@ -95,12 +83,17 @@ A report summarizing the comparison is written to `report_filename` in
9583
9684Extra `kwargs` are based to `BenchmarkTools.run`.
9785
86+ ## BenchmarkTools
87+
88+ To use this function you must first install and load the `BenchmarkTools.jl`
89+ package.
90+
9891## Example
9992
10093```julia
101- julia> import GLPK
94+ julia> import BenchmarkTools, GLPK
10295
103- julia> my_suite = MOI.Benchmarks.suite(() -> GLPK.Optimizer() );
96+ julia> my_suite = MOI.Benchmarks.suite(GLPK.Optimizer);
10497
10598julia> MOI.Benchmarks.compare_against_baseline(
10699 my_suite,
@@ -110,42 +103,7 @@ julia> MOI.Benchmarks.compare_against_baseline(
110103 )
111104```
112105"""
113- function compare_against_baseline (
114- suite:: BenchmarkTools.BenchmarkGroup ,
115- name:: String ;
116- directory:: String = " " ,
117- report_filename:: String = " report.txt" ,
118- kwargs... ,
119- )
120- params_filename = joinpath (directory, name * " _params.json" )
121- baseline_filename = joinpath (directory, name * " _baseline.json" )
122- if ! isfile (params_filename) || ! isfile (baseline_filename)
123- error (" You create a baseline with `create_baseline` first." )
124- end
125- BenchmarkTools. loadparams! (
126- suite,
127- BenchmarkTools. load (params_filename)[1 ],
128- :evals ,
129- :samples ,
130- )
131- new_results = BenchmarkTools. run (suite; kwargs... )
132- old_results = BenchmarkTools. load (baseline_filename)[1 ]
133- open (joinpath (directory, report_filename), " w" ) do io
134- println (stdout , " \n ========== Results ==========" )
135- println (io, " \n ========== Results ==========" )
136- for key in keys (new_results)
137- judgement = BenchmarkTools. judge (
138- BenchmarkTools. median (new_results[key]),
139- BenchmarkTools. median (old_results[key]),
140- )
141- println (stdout , " \n " , key)
142- println (io, " \n " , key)
143- show (stdout , MIME " text/plain" (), judgement)
144- show (io, MIME " text/plain" (), judgement)
145- end
146- end
147- return
148- end
106+ function compare_against_baseline end
149107
150108# ##
151109# ## Benchmarks
0 commit comments