Skip to content

Commit

Permalink
Add authoring benchmarks section
Browse files Browse the repository at this point in the history
  • Loading branch information
dianaclarke committed Jun 21, 2021
1 parent 7a92164 commit 5ccd9c3
Show file tree
Hide file tree
Showing 4 changed files with 158 additions and 8 deletions.
59 changes: 56 additions & 3 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -240,15 +240,68 @@ class SimpleBenchmark(conbench.runner.Benchmark):
### Example external benchmarks

An "external benchmark" records results that were obtained from some other
benchmarking tool (like executing the Arrow C++ micro benchmarks from command
line, parsing the resulting JSON, and recording those results).
benchmarking tool (like executing an R benchmarks from command line, parsing
the resulting JSON, and recording those results).

Implementation details: Note that the following benchmark sets
`external = True`, and calls `record()` rather than `benchmark()` as the
example above does.

```
TODO
@conbench.runner.register_benchmark
class ExternalBenchmark(conbench.runner.Benchmark):
"""Example benchmark that just records external results.
Usage: conbench external [OPTIONS]
Run external benchmark.
Options:
--iterations INTEGER [default: 1]
--drop-caches BOOLEAN [default: False]
--gc-collect BOOLEAN [default: True]
--gc-disable BOOLEAN [default: True]
--show-result BOOLEAN [default: True]
--show-output BOOLEAN [default: False]
--run-id TEXT Group executions together with a run id.
--run-name TEXT Name of run (commit, pull request, etc).
--help Show this message and exit.
"""
external = True
name = "external"
def __init__(self):
self.conbench = conbench.runner.Conbench()
def run(self, **kwargs):
tags = {"year": "2020"}
context = {"benchmark_language": "Python"}
github_info = {
"commit": "02addad336ba19a654f9c857ede546331be7b631",
"repository": "https://github.com/apache/arrow",
}
# external results from somewhere
# (an API call, command line execution, etc)
result = {
"data": [100, 200, 300],
"unit": "i/s",
"times": [0.100, 0.200, 0.300],
"time_unit": "s",
}
benchmark, output = self.conbench.record(
result,
self.name,
tags,
context,
github_info,
kwargs,
output=result["data"],
)
self.conbench.publish(benchmark)
yield benchmark, output
```

### Example case benchmarks
Expand Down
40 changes: 40 additions & 0 deletions conbench/tests/benchmark/_example_benchmarks.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,46 @@ def func():
yield benchmark, output


@conbench.runner.register_benchmark
class ExternalBenchmark(conbench.runner.Benchmark):
"""Example benchmark that just records external results."""

external = True
name = "external"

def __init__(self):
self.conbench = conbench.runner.Conbench()

def run(self, **kwargs):
tags = {"year": "2020"}
context = {"benchmark_language": "Python"}
github_info = {
"commit": "02addad336ba19a654f9c857ede546331be7b631",
"repository": "https://github.com/apache/arrow",
}

# external results from somewhere
# (an API call, command line execution, etc)
result = {
"data": [100, 200, 300],
"unit": "i/s",
"times": [0.100, 0.200, 0.300],
"time_unit": "s",
}

benchmark, output = self.conbench.record(
result,
self.name,
tags,
context,
github_info,
kwargs,
output=result["data"],
)
self.conbench.publish(benchmark)
yield benchmark, output


@conbench.runner.register_benchmark
class CasesBenchmark(conbench.runner.Benchmark):
"""Example benchmark with cases, an option, and an argument."""
Expand Down
40 changes: 40 additions & 0 deletions conbench/tests/benchmark/test_cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
Commands:
addition Run addition benchmark.
external Run external benchmark.
list List of benchmarks (for orchestration).
subtraction Run subtraction benchmark(s).
"""
Expand All @@ -23,6 +24,9 @@
{
"command": "addition --iterations=2"
},
{
"command": "external --iterations=2"
},
{
"command": "subtraction --all=true --iterations=2"
}
Expand Down Expand Up @@ -94,6 +98,26 @@
"""


CONBENCH_EXTERNAL = """
Benchmark output:
[100, 200, 300]
"""


CONBENCH_EXTERNAL_HELP = """
Usage: conbench external [OPTIONS]
Run external benchmark.
Options:
--show-result BOOLEAN [default: True]
--show-output BOOLEAN [default: False]
--run-id TEXT Group executions together with a run id.
--run-name TEXT Name of run (commit, pull request, etc).
--help Show this message and exit.
"""


this_dir = os.path.dirname(os.path.abspath(__file__))
register_benchmarks(this_dir)

Expand Down Expand Up @@ -156,6 +180,22 @@ def test_conbench_command_with_cases_help(runner):
assert_command_output(result, CONBENCH_SUBTRACTION_HELP)


def test_conbench_command_external(runner):
from conbench.cli import conbench

command = "external --show-result=false --show-output=true"
with unittest.mock.patch("conbench.util.Connection.publish"):
result = runner.invoke(conbench, command)
assert_command_output(result, CONBENCH_EXTERNAL)


def test_conbench_command_external_help(runner):
from conbench.cli import conbench

result = runner.invoke(conbench, "external --help")
assert_command_output(result, CONBENCH_EXTERNAL_HELP)


def test_conbench_list(runner):
from conbench.cli import conbench

Expand Down
27 changes: 22 additions & 5 deletions conbench/tests/benchmark/test_runner.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
from ._example_benchmarks import CasesBenchmark, SimpleBenchmark
from ._example_benchmarks import CasesBenchmark, ExternalBenchmark, SimpleBenchmark
from ...entities.summary import BenchmarkFacadeSchema

example = {
Expand Down Expand Up @@ -77,7 +77,7 @@ def assert_keys_equal(a, b):
assert set(a.keys()) == set(b.keys())


def test_runner_without_cases():
def test_runner_simple_benchmark():
benchmark = SimpleBenchmark()
[(result, output)] = benchmark.run(iterations=10)
assert not BenchmarkFacadeSchema.create.validate(result)
Expand All @@ -89,15 +89,14 @@ def test_runner_without_cases():
assert_keys_equal(result, example)
assert_keys_equal(result["tags"], expected_tags)
assert_keys_equal(result["stats"], example["stats"])
assert_keys_equal(result["context"], example["context"])
assert_keys_equal(result["machine_info"], example["machine_info"])
assert result["tags"] == expected_tags
assert result["stats"]["iterations"] == 10
assert len(result["stats"]["data"]) == 10
assert result["context"]["benchmark_language"] == "Python"


def test_runner_with_cases():
def test_runner_case_benchmark():
benchmark = CasesBenchmark()
case = ("pink", "apple")
[(result, output)] = benchmark.run("sample", case=case, iterations=10)
Expand All @@ -113,9 +112,27 @@ def test_runner_with_cases():
assert_keys_equal(result, example)
assert_keys_equal(result["tags"], expected_tags)
assert_keys_equal(result["stats"], example["stats"])
assert_keys_equal(result["context"], example["context"])
assert_keys_equal(result["machine_info"], example["machine_info"])
assert result["tags"] == expected_tags
assert result["stats"]["iterations"] == 10
assert len(result["stats"]["data"]) == 10
assert result["context"]["benchmark_language"] == "Python"


def test_runner_external_benchmark():
benchmark = ExternalBenchmark()
[(result, output)] = benchmark.run()
assert not BenchmarkFacadeSchema.create.validate(result)
expected_tags = {
"year": "2020",
"name": "external",
}
assert output == [100, 200, 300]
assert_keys_equal(result, example)
assert_keys_equal(result["tags"], expected_tags)
assert_keys_equal(result["stats"], example["stats"])
assert_keys_equal(result["machine_info"], example["machine_info"])
assert result["tags"] == expected_tags
assert result["stats"]["iterations"] == 3
assert len(result["stats"]["data"]) == 3
assert result["context"]["benchmark_language"] == "Python"

0 comments on commit 5ccd9c3

Please sign in to comment.