Skip to content

Commit e1dcd91

Browse files
committed
Add new cprofile options.
1 parent 61229eb commit e1dcd91

File tree

5 files changed

+88
-45
lines changed

5 files changed

+88
-45
lines changed

CHANGELOG.rst

+6
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,12 @@ Changelog
1212
Contributed by Tony Kuo in `#257 <https://github.com/ionelmc/pytest-benchmark/pull/257>`_.
1313
* Fixes spelling in some help texts.
1414
Contributed by Eugeniy in `#267 <https://github.com/ionelmc/pytest-benchmark/pull/267>`_.
15+
* Added new cprofile options:
16+
17+
- ``--benchmark-cprofile-loops=LOOPS`` - previously profiling only ran the function once, this allow customization.
18+
- ``--benchmark-cprofile-top=COUNT`` - allows showing more rows.
19+
- ``--benchmark-cprofile-dump=[FILENAME-PREFIX]`` - allows saving to a file (that you can load in `snakeviz <https://pypi.org/project/snakeviz/>`_ or other tools).
20+
1521

1622
4.0.0 (2022-10-26)
1723
------------------

docs/usage.rst

+49-37
Original file line numberDiff line numberDiff line change
@@ -77,20 +77,20 @@ Commandline options
7777
--benchmark-max-time=SECONDS
7878
Maximum run time per test - it will be repeated until
7979
this total time is reached. It may be exceeded if test
80-
function is very slow or --benchmark-min-rounds is
81-
large (it takes precedence). Default: '1.0'
80+
function is very slow or --benchmark-min-rounds is large
81+
(it takes precedence). Default: '1.0'
8282
--benchmark-min-rounds=NUM
83-
Minimum rounds, even if total time would exceed
84-
`--max-time`. Default: 5
83+
Minimum rounds, even if total time would exceed `--max-
84+
time`. Default: 5
8585
--benchmark-timer=FUNC
8686
Timer to use when measuring time. Default:
8787
'time.perf_counter'
8888
--benchmark-calibration-precision=NUM
89-
Precision to use when calibrating number of
90-
iterations. Precision of 10 will make the timer look
91-
10 times more accurate, at a cost of less precise
92-
measure of deviations. Default: 10
93-
--benchmark-warmup=KIND
89+
Precision to use when calibrating number of iterations.
90+
Precision of 10 will make the timer look 10 times more
91+
accurate, at a cost of less precise measure of
92+
deviations. Default: 10
93+
--benchmark-warmup=[KIND]
9494
Activates warmup. Will run the test function up to
9595
number of times in the calibration phase. See
9696
`--benchmark-warmup-iterations`. Note: Even the warmup
@@ -104,11 +104,11 @@ Commandline options
104104
Disable GC during benchmarks.
105105
--benchmark-skip Skip running any tests that contain benchmarks.
106106
--benchmark-disable Disable benchmarks. Benchmarked functions are only ran
107-
once and no stats are reported. Use this if you want
108-
to run the test but don't do any benchmarking.
109-
--benchmark-enable Forcibly enable benchmarks. Use this option to
110-
override --benchmark-disable (in case you have it in
111-
pytest configuration).
107+
once and no stats are reported. Use this is you want to
108+
run the test but don't do any benchmarking.
109+
--benchmark-enable Forcibly enable benchmarks. Use this option to override
110+
--benchmark-disable (in case you have it in pytest
111+
configuration).
112112
--benchmark-only Only run benchmarks. This overrides --benchmark-skip.
113113
--benchmark-save=NAME
114114
Save the current run into 'STORAGE-PATH/counter-
@@ -123,49 +123,61 @@ Commandline options
123123
stats.
124124
--benchmark-json=PATH
125125
Dump a JSON report into PATH. Note that this will
126-
include the complete data (all the timings, not just
127-
the stats).
128-
--benchmark-compare=NUM
126+
include the complete data (all the timings, not just the
127+
stats).
128+
--benchmark-compare=[NUM|_ID]
129129
Compare the current run against run NUM (or prefix of
130130
_id in elasticsearch) or the latest saved run if
131131
unspecified.
132-
--benchmark-compare-fail=EXPR
132+
--benchmark-compare-fail=EXPR [EXPR ...]
133133
Fail test if performance regresses according to given
134134
EXPR (eg: min:5% or mean:0.001 for number of seconds).
135135
Can be used multiple times.
136136
--benchmark-cprofile=COLUMN
137-
If specified measure one run with cProfile and stores
138-
10 top functions. Argument is a column to sort by.
139-
Available columns: 'ncalls_recursion', 'ncalls',
140-
'tottime', 'tottime_per', 'cumtime', 'cumtime_per',
141-
'function_name'.
137+
If specified cProfile will be enabled. Top functions
138+
will be stored for the given column. Available columns:
139+
'ncalls_recursion', 'ncalls', 'tottime', 'tottime_per',
140+
'cumtime', 'cumtime_per', 'function_name'.
141+
--benchmark-cprofile-loops=LOOPS
142+
How many times to run the function in cprofile.
143+
Available options: 'auto', or an integer.
144+
--benchmark-cprofile-top=COUNT
145+
How many rows to display.
146+
--benchmark-cprofile-dump=[FILENAME-PREFIX]
147+
Save cprofile dumps as FILENAME-PREFIX-test_name.prof.
148+
If FILENAME-PREFIX contains slashes ('/') then
149+
directories will be created. Default:
150+
'benchmark_20241028_160327'
151+
--benchmark-time-unit=COLUMN
152+
Unit to scale the results to. Available units: 'ns',
153+
'us', 'ms', 's'. Default: 'auto'.
142154
--benchmark-storage=URI
143155
Specify a path to store the runs as uri in form
144-
file\:\/\/path or elasticsearch+http[s]\:\/\/host1,host2/[in
145-
dex/doctype?project_name=Project] (when --benchmark-
146-
save or --benchmark-autosave are used). For backwards
156+
file://path or elasticsearch+http[s]://host1,host2/[inde
157+
x/doctype?project_name=Project] (when --benchmark-save
158+
or --benchmark-autosave are used). For backwards
147159
compatibility unexpected values are converted to
148-
file\:\/\/<value>. Default: 'file\:\/\/./.benchmarks'.
149-
--benchmark-netrc=BENCHMARK_NETRC
160+
file://<value>. Default: 'file://./.benchmarks'.
161+
--benchmark-netrc=[BENCHMARK_NETRC]
150162
Load elasticsearch credentials from a netrc file.
151163
Default: ''.
152164
--benchmark-verbose Dump diagnostic and progress information.
153-
--benchmark-sort=COL Column to sort on. Can be one of: 'min', 'max',
154-
'mean', 'stddev', 'name', 'fullname'. Default: 'min'
155-
--benchmark-group-by=LABELS
156-
Comma-separated list of categories by which to
157-
group tests. Can be one or more of: 'group', 'name',
158-
'fullname', 'func', 'fullfunc', 'param' or
159-
'param:NAME', where NAME is the name passed to
160-
@pytest.parametrize. Default: 'group'
165+
--benchmark-quiet Disable reporting. Verbose mode takes precedence.
166+
--benchmark-sort=COL Column to sort on. Can be one of: 'min', 'max', 'mean',
167+
'stddev', 'name', 'fullname'. Default: 'min'
168+
--benchmark-group-by=LABEL
169+
How to group tests. Can be one of: 'group', 'name',
170+
'fullname', 'func', 'fullfunc', 'param' or 'param:NAME',
171+
where NAME is the name passed to @pytest.parametrize.
172+
Default: 'group'
161173
--benchmark-columns=LABELS
162174
Comma-separated list of columns to show in the result
163175
table. Default: 'min, max, mean, stddev, median, iqr,
164176
outliers, ops, rounds, iterations'
165177
--benchmark-name=FORMAT
166178
How to format names in results. Can be one of 'short',
167179
'normal', 'long', or 'trial'. Default: 'normal'
168-
--benchmark-histogram=FILENAME-PREFIX
180+
--benchmark-histogram=[FILENAME-PREFIX]
169181
Plot graphs of min/max/avg/stddev over time in
170182
FILENAME-PREFIX-test_name.svg. If FILENAME-PREFIX
171183
contains slashes ('/') then directories will be

src/pytest_benchmark/fixture.py

+15-2
Original file line numberDiff line numberDiff line change
@@ -6,10 +6,12 @@
66
import traceback
77
import typing
88
from math import ceil
9+
from pathlib import Path
910

1011
from .timers import compute_timer_precision
1112
from .utils import NameWrapper
1213
from .utils import format_time
14+
from .utils import slugify
1315

1416
try:
1517
import statistics
@@ -45,6 +47,7 @@ def __init__(
4547
disabled,
4648
cprofile,
4749
cprofile_loops,
50+
cprofile_dump,
4851
group=None,
4952
):
5053
self.name = node.name
@@ -75,6 +78,7 @@ def __init__(
7578
self._mode = None
7679
self.cprofile = cprofile
7780
self.cprofile_loops = cprofile_loops
81+
self.cprofile_dump = cprofile_dump
7882
self.cprofile_stats = None
7983
self.stats = None
8084

@@ -134,6 +138,15 @@ def _make_stats(self, iterations):
134138
self.stats = bench_stats
135139
return bench_stats
136140

141+
def _save_cprofile(self, profile: cProfile.Profile):
142+
stats = pstats.Stats(profile)
143+
self.stats.cprofile_stats = stats
144+
if self.cprofile_dump:
145+
output_file = Path(f'{self.cprofile_dump}-{slugify(self.name)}.prof')
146+
output_file.parent.mkdir(parents=True, exist_ok=True)
147+
stats.dump_stats(output_file)
148+
self._logger.info(f'Saved profile: {output_file}', bold=True)
149+
137150
def __call__(self, function_to_benchmark, *args, **kwargs):
138151
if self._mode:
139152
self.has_error = True
@@ -191,7 +204,7 @@ def _raw(self, function_to_benchmark, *args, **kwargs):
191204
profile = cProfile.Profile()
192205
for _ in cprofile_loops:
193206
function_result = profile.runcall(function_to_benchmark, *args, **kwargs)
194-
self.stats.cprofile_stats = pstats.Stats(profile)
207+
self._save_cprofile(profile)
195208
else:
196209
function_result = function_to_benchmark(*args, **kwargs)
197210
return function_result
@@ -260,7 +273,7 @@ def make_arguments(args=args, kwargs=kwargs):
260273
args, kwargs = make_arguments()
261274
for _ in cprofile_loops:
262275
profile.runcall(target, *args, **kwargs)
263-
self.stats.cprofile_stats = pstats.Stats(profile)
276+
self._save_cprofile(profile)
264277

265278
return result
266279

src/pytest_benchmark/plugin.py

+11
Original file line numberDiff line numberDiff line change
@@ -287,6 +287,17 @@ def pytest_addoption(parser):
287287
type=int,
288288
help='How many rows to display.',
289289
)
290+
cprofile_dump_prefix = f'benchmark_{get_current_time()}'
291+
group.addoption(
292+
'--benchmark-cprofile-dump',
293+
action='append',
294+
metavar='FILENAME-PREFIX',
295+
nargs='?',
296+
default=[],
297+
const=cprofile_dump_prefix,
298+
help='Save cprofile dumps as FILENAME-PREFIX-test_name.prof. If FILENAME-PREFIX contains'
299+
f" slashes ('/') then directories will be created. Default: {cprofile_dump_prefix!r}",
300+
)
290301
group.addoption(
291302
'--benchmark-time-unit',
292303
metavar='COLUMN',

src/pytest_benchmark/session.py

+7-6
Original file line numberDiff line numberDiff line change
@@ -43,6 +43,10 @@ def __init__(self, config):
4343
default_machine_id=self.machine_id,
4444
netrc=config.getoption('benchmark_netrc'),
4545
)
46+
self.cprofile_sort_by = config.getoption('benchmark_cprofile')
47+
self.cprofile_loops = config.getoption('benchmark_cprofile_loops')
48+
self.cprofile_top = config.getoption('benchmark_cprofile_top')
49+
self.cprofile_dump = first_or_value(config.getoption('benchmark_cprofile_dump'), False)
4650
self.options = {
4751
'min_time': SecondsDecimal(config.getoption('benchmark_min_time')),
4852
'min_rounds': config.getoption('benchmark_min_rounds'),
@@ -52,14 +56,12 @@ def __init__(self, config):
5256
'disable_gc': config.getoption('benchmark_disable_gc'),
5357
'warmup': config.getoption('benchmark_warmup'),
5458
'warmup_iterations': config.getoption('benchmark_warmup_iterations'),
55-
'cprofile': bool(config.getoption('benchmark_cprofile')),
56-
'cprofile_loops': config.getoption('benchmark_cprofile_loops'),
59+
'cprofile': bool(self.cprofile_sort_by),
60+
'cprofile_loops': self.cprofile_loops,
61+
'cprofile_dump': self.cprofile_dump,
5762
}
5863
self.skip = config.getoption('benchmark_skip')
5964
self.disabled = config.getoption('benchmark_disable') and not config.getoption('benchmark_enable')
60-
self.cprofile_sort_by = config.getoption('benchmark_cprofile')
61-
self.cprofile_loops = config.getoption('benchmark_cprofile_loops')
62-
self.cprofile_top = config.getoption('benchmark_cprofile_top')
6365

6466
if config.getoption('dist', 'no') != 'no' and not self.skip and not self.disabled:
6567
self.logger.warning(
@@ -93,7 +95,6 @@ def __init__(self, config):
9395
self.compare = config.getoption('benchmark_compare')
9496
self.compare_fail = config.getoption('benchmark_compare_fail')
9597
self.name_format = NAME_FORMATTERS[config.getoption('benchmark_name')]
96-
9798
self.histogram = first_or_value(config.getoption('benchmark_histogram'), False)
9899

99100
def get_machine_info(self):

0 commit comments

Comments
 (0)