Skip to content

Commit d4a624f

Browse files
authored
Add run_command benchcomp visualization (rust-lang#2542)
This allows users to write their own custom visualization script to run after running the benchmarks. Prior to this commit, visualizations had to be checked into the Kani repository. When run_command is specified as a visualization, benchcomp runs the specified command and passes the result of the run as a JSON file on stdin. The command can then process the result however it likes. This resolves rust-lang#2518.
1 parent 987c9ce commit d4a624f

File tree

4 files changed

+138
-4
lines changed

4 files changed

+138
-4
lines changed

docs/src/SUMMARY.md

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,6 @@
3535
- [Performance comparisons](./performance-comparisons.md)
3636
- [`benchcomp` command line](./benchcomp-cli.md)
3737
- [`benchcomp` configuration file](./benchcomp-conf.md)
38-
- [Custom visualizations](./benchcomp-viz.md)
3938
- [Custom parsers](./benchcomp-parse.md)
4039

4140
- [Limitations](./limitations.md)

docs/src/benchcomp-conf.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
# `benchcomp` configuration file
22

33
`benchcomp`'s operation is controlled through a YAML file---`benchcomp.yaml` by default or a file passed to the `-c/--config` option.
4-
This page describes the file's schema and lists the different parsers and visualizations that are available.
4+
This page lists the different visualizations that are available.
55

66

77
## Built-in visualizations

tools/benchcomp/benchcomp/visualizers/__init__.py

Lines changed: 40 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,8 @@
33

44

55
import dataclasses
6+
import json
7+
import subprocess
68
import textwrap
79

810
import jinja2
@@ -12,8 +14,44 @@
1214
import benchcomp.visualizers.utils as viz_utils
1315

1416

15-
# TODO The doc comment should appear in the help output, which should list all
16-
# available checks.
17+
18+
@dataclasses.dataclass
19+
class run_command:
20+
"""Run an executable command, passing the performance metrics as JSON on stdin.
21+
22+
This allows you to write your own visualization, which reads a result file
23+
on stdin and does something with it, e.g. writing out a graph or other
24+
output file.
25+
26+
Sample configuration:
27+
28+
```
29+
visualize:
30+
- type: run_command
31+
command: ./my_visualization.py
32+
```
33+
"""
34+
35+
command: str
36+
37+
38+
def __call__(self, results):
39+
results = json.dumps(results, indent=2)
40+
try:
41+
proc = subprocess.Popen(
42+
self.command, shell=True, text=True, stdin=subprocess.PIPE)
43+
_, _ = proc.communicate(input=results)
44+
except (OSError, subprocess.SubprocessError) as exe:
45+
logging.error(
46+
"visualization command '%s' failed: %s", self.command, str(exe))
47+
viz_utils.EXIT_CODE = 1
48+
if proc.returncode:
49+
logging.error(
50+
"visualization command '%s' exited with code %d",
51+
self.command, proc.returncode)
52+
viz_utils.EXIT_CODE = 1
53+
54+
1755

1856
@dataclasses.dataclass
1957
class error_on_regression:

tools/benchcomp/test/test_regression.py

Lines changed: 97 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,7 @@
1010
import tempfile
1111
import textwrap
1212
import unittest
13+
import uuid
1314

1415
import yaml
1516

@@ -737,3 +738,99 @@ def test_command_parser(self):
737738

738739
for item in ["benchmarks", "metrics"]:
739740
self.assertIn(item, result)
741+
742+
743+
def test_run_command_visualization(self):
744+
"""Ensure that the run_command visualization can execute a command"""
745+
746+
with tempfile.TemporaryDirectory() as tmp:
747+
out_file = pathlib.Path(tmp) / str(uuid.uuid4())
748+
run_bc = Benchcomp({
749+
"variants": {
750+
"v1": {
751+
"config": {
752+
"command_line": "true",
753+
"directory": tmp,
754+
}
755+
},
756+
"v2": {
757+
"config": {
758+
"command_line": "true",
759+
"directory": tmp,
760+
}
761+
}
762+
},
763+
"run": {
764+
"suites": {
765+
"suite_1": {
766+
"parser": {
767+
"command": """
768+
echo '{
769+
"benchmarks": {},
770+
"metrics": {}
771+
}'
772+
"""
773+
},
774+
"variants": ["v2", "v1"]
775+
}
776+
}
777+
},
778+
"visualize": [{
779+
"type": "run_command",
780+
"command": f"cat - > {out_file}"
781+
}],
782+
})
783+
run_bc()
784+
self.assertEqual(
785+
run_bc.proc.returncode, 0, msg=run_bc.stderr)
786+
787+
with open(out_file) as handle:
788+
result = yaml.safe_load(handle)
789+
790+
for item in ["benchmarks", "metrics"]:
791+
self.assertIn(item, result)
792+
793+
794+
def test_run_failing_command_visualization(self):
795+
"""Ensure that benchcomp terminates with a non-zero return code when run_command visualization fails"""
796+
797+
with tempfile.TemporaryDirectory() as tmp:
798+
out_file = pathlib.Path(tmp) / str(uuid.uuid4())
799+
run_bc = Benchcomp({
800+
"variants": {
801+
"v1": {
802+
"config": {
803+
"command_line": "true",
804+
"directory": tmp,
805+
}
806+
},
807+
"v2": {
808+
"config": {
809+
"command_line": "true",
810+
"directory": tmp,
811+
}
812+
}
813+
},
814+
"run": {
815+
"suites": {
816+
"suite_1": {
817+
"parser": {
818+
"command": """
819+
echo '{
820+
"benchmarks": {},
821+
"metrics": {}
822+
}'
823+
"""
824+
},
825+
"variants": ["v2", "v1"]
826+
}
827+
}
828+
},
829+
"visualize": [{
830+
"type": "run_command",
831+
"command": f"cat - > {out_file}; false"
832+
}],
833+
})
834+
run_bc()
835+
self.assertNotEqual(
836+
run_bc.proc.returncode, 0, msg=run_bc.stderr)

0 commit comments

Comments
 (0)