diff --git a/benchmark/Manifest.toml b/benchmark/Manifest.toml
index 2ccc280..8e66266 100644
--- a/benchmark/Manifest.toml
+++ b/benchmark/Manifest.toml
@@ -2,13 +2,19 @@
julia_version = "1.9.3"
manifest_format = "2.0"
-project_hash = "f006ef060ddbf480ce33ee7b47ad003b561e8a33"
+project_hash = "7e3bfad4520897296adf2d24a1291fe14f898463"
[[deps.AbstractTrees]]
git-tree-sha1 = "faa260e4cb5aba097a73fab382dd4b5819d8ec8c"
uuid = "1520ce14-60c1-5f80-bbc7-55ef81b5835c"
version = "0.4.4"
+[[deps.ArgParse]]
+deps = ["Logging", "TextWrap"]
+git-tree-sha1 = "3102bce13da501c9104df33549f511cd25264d7d"
+uuid = "c7e460c6-2fb9-53a9-8c5b-16f535851c63"
+version = "1.1.4"
+
[[deps.ArgTools]]
uuid = "0dad84c5-d112-42e6-8d28-ef12dabb789f"
version = "1.1.1"
@@ -571,6 +577,11 @@ version = "0.1.7"
deps = ["InteractiveUtils", "Logging", "Random", "Serialization"]
uuid = "8dfed614-e22c-5e08-85e1-65c5234f0b40"
+[[deps.TextWrap]]
+git-tree-sha1 = "9250ef9b01b66667380cf3275b3f7488d0e25faf"
+uuid = "b718987f-49a8-5099-9789-dcd902bef87d"
+version = "1.0.1"
+
[[deps.UUIDs]]
deps = ["Random", "SHA"]
uuid = "cf7118a7-6976-5b1a-9a39-7adc72f591a4"
diff --git a/benchmark/Project.toml b/benchmark/Project.toml
index 67822c4..ab58144 100644
--- a/benchmark/Project.toml
+++ b/benchmark/Project.toml
@@ -1,4 +1,5 @@
[deps]
+ArgParse = "c7e460c6-2fb9-53a9-8c5b-16f535851c63"
BenchmarkTools = "6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf"
Dictionaries = "85a47980-9c8c-11e8-2b9f-f7ca1fa99fb4"
Distributions = "31c24e10-a181-5473-b8eb-7969acd0382f"
diff --git a/benchmark/run.jl b/benchmark/run.jl
index 9f81e6d..af6022e 100644
--- a/benchmark/run.jl
+++ b/benchmark/run.jl
@@ -4,8 +4,11 @@
# > julia benchmark/run.jl
#
# By default it will run all of the benchmark suites.
-# To select a subset, pass them in as ARGS:
-# > julia benchmark/run.jl mttkrp
+# To select a subset, pass them in as a command line arg:
+# > julia benchmark/run.jl --suite mttkrp
+#
+# To compare against a previous commit, pass the commit git id as a command line arg:
+# > julia benchmark/run.jl --compare eca7cb4
#
# This script produces/overwrites two files:
# + `benchmark/results.json` : results from the benchmark run
@@ -14,16 +17,42 @@
## Make sure the benchmark environment is activated
import Pkg
Pkg.activate(@__DIR__)
+Pkg.instantiate()
## Run benchmarks
-using GCPDecompositions, PkgBenchmark
-results =
- isempty(ARGS) ? benchmarkpkg(GCPDecompositions) :
- benchmarkpkg(
- GCPDecompositions,
- BenchmarkConfig(; env = Dict("GCP_BENCHMARK_SUITES" => join(ARGS, ' '))),
- )
-writeresults(joinpath(@__DIR__, "results.json"), results)
+using GCPDecompositions, PkgBenchmark, ArgParse
+
+settings = ArgParseSettings()
+@add_arg_table settings begin
+ "--suite"
+ help = "which suite to run benchmarks for"
+ arg_type = String
+ default = "all"
+ "--compare"
+ help = "git id for previous commit to compare current version against"
+ arg_type = String
+ default = "none"
+end
+parsed_args = parse_args(settings)
+
+if (parsed_args["compare"] == "none")
+ results =
+ parsed_args["suite"] == "all" ? benchmarkpkg(GCPDecompositions) :
+ benchmarkpkg(
+ GCPDecompositions,
+ BenchmarkConfig(; env = Dict("GCP_BENCHMARK_SUITES" => parsed_args["suite"])),
+ )
+ writeresults(joinpath(@__DIR__, "results.json"), results)
+else
+ results =
+ parsed_args["suite"] == "all" ? judge(GCPDecompositions, parsed_args["compare"]) :
+ judge(
+ GCPDecompositions,
+ parsed_args["compare"],
+ BenchmarkConfig(; env = Dict("GCP_BENCHMARK_SUITES" => parsed_args["suite"])),
+ )
+ writeresults(joinpath(@__DIR__, "results.json"), results)
+end
## Generate report and save
using BenchmarkTools, Dictionaries, SplitApplyCombine, UnicodePlots
@@ -59,6 +88,8 @@ if haskey(PkgBenchmark.benchmarkgroup(results), "mttkrp")
return key => result
end
+ plot_vars = ["size"]
+
# Runtime vs. size (for square tensors)
size_sweeps = (sortkeys ∘ group)(
((key, _),) -> (; ndims = length(key.size), rank = key.rank, mode = key.mode),
@@ -80,6 +111,7 @@ if haskey(PkgBenchmark.benchmarkgroup(results), "mttkrp")
end
size_report = """
## Runtime vs. size (for square tensors)
+ Below are plots showing the runtime in miliseconds of MTTKRP as a function of the size of the square tensor, for varying ranks and modes:
$(join(["$(string(key)[begin+1:end-1]) | " for key in keys(size_plts)], '\n'))
@@ -111,6 +143,7 @@ if haskey(PkgBenchmark.benchmarkgroup(results), "mttkrp")
end
rank_report = """
## Runtime vs. rank
+ Below are plots showing the runtime in miliseconds of MTTKRP as a function of the size of the rank, for varying sizes and modes:
$(join(["$(string(key)[begin+1:end-1]) | " for key in keys(rank_plts)], '\n'))
@@ -141,6 +174,7 @@ if haskey(PkgBenchmark.benchmarkgroup(results), "mttkrp")
end
mode_report = """
## Runtime vs. mode
+ Below are plots showing the runtime in miliseconds of MTTKRP as a function of the mode, for varying sizes and ranks:
$(join(["$(string(key)[begin+1:end-1]) | " for key in keys(mode_plts)], '\n'))
diff --git a/benchmark/suites/mttkrp.jl b/benchmark/suites/mttkrp.jl
index 3b66aac..bb03021 100644
--- a/benchmark/suites/mttkrp.jl
+++ b/benchmark/suites/mttkrp.jl
@@ -16,7 +16,8 @@ szs = [
(200, 200, 200),
]
ns = 1:3
-rs = 20:20:200
+#rs = 20:20:200
+rs = 20
for sz in szs, r in rs, n in ns
Random.seed!(0)