Skip to content

Add a feature for using the same number of loops as a previous run #327

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 9 commits into from
Feb 2, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions doc/changelog.rst
Original file line number Diff line number Diff line change
@@ -1,6 +1,9 @@
Changelog
=========

* Add a --same-loops option to the run command to use the exact same number of
loops as a previous run (without recalibrating).

Version 1.10.0 (2023-10-22)
--------------
* Add benchmark for asyncio_webockets
Expand Down
4 changes: 4 additions & 0 deletions doc/usage.rst
Original file line number Diff line number Diff line change
Expand Up @@ -140,6 +140,10 @@ options::
-p PYTHON, --python PYTHON
Python executable (default: use running
Python)
--same-loops SAME_LOOPS
Use the same number of loops as a previous run
(i.e., don't recalibrate). Should be a path to a
.json file from a previous run.

show
----
Expand Down
4 changes: 4 additions & 0 deletions pyperformance/cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -75,6 +75,10 @@ def parse_args():
cmd.add_argument("--min-time", metavar="MIN_TIME",
help="Minimum duration in seconds of a single "
"value, used to calibrate the number of loops")
cmd.add_argument("--same-loops",
help="Use the same number of loops as a previous run "
"(i.e., don't recalibrate). Should be a path to a "
".json file from a previous run.")
filter_opts(cmd)

# show
Expand Down
3 changes: 3 additions & 0 deletions pyperformance/compile.py
Original file line number Diff line number Diff line change
Expand Up @@ -543,6 +543,8 @@ def run_benchmark(self, python=None):
cmd.extend(('--affinity', self.conf.affinity))
if self.conf.debug:
cmd.append('--debug-single-value')
if self.conf.same_loops:
cmd.append('--same_loops=%s' % self.conf.same_loops)
exitcode = self.run_nocheck(*cmd)

if os.path.exists(self.filename):
Expand Down Expand Up @@ -812,6 +814,7 @@ def getint(section, key, default=None):
conf.benchmarks = getstr('run_benchmark', 'benchmarks', default='')
conf.affinity = getstr('run_benchmark', 'affinity', default='')
conf.upload = getboolean('run_benchmark', 'upload', False)
conf.same_loops = getfile('run_benchmark', 'same_loops', default='')

# paths
conf.build_dir = os.path.join(conf.directory, 'build')
Expand Down
3 changes: 3 additions & 0 deletions pyperformance/data-files/benchmarks/MANIFEST
Original file line number Diff line number Diff line change
Expand Up @@ -77,6 +77,9 @@ spectral_norm <local>
sqlalchemy_declarative <local>
sqlalchemy_imperative <local>
sqlglot <local>
sqlglot_parse <local:sqlglot>
sqlglot_transpile <local:sqlglot>
sqlglot_optimize <local:sqlglot>
sqlite_synth <local>
sympy <local>
telco <local>
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
[tool.pyperformance]
name = "sqlglot_optimize"
extra_opts = ["optimize"]
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
[tool.pyperformance]
name = "sqlglot_parse"
extra_opts = ["parse"]
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
[tool.pyperformance]
name = "sqlglot_transpile"
extra_opts = ["transpile"]
Original file line number Diff line number Diff line change
Expand Up @@ -10,3 +10,4 @@ dynamic = ["version"]

[tool.pyperformance]
name = "sqlglot"
extra_opts = ["normalize"]
31 changes: 26 additions & 5 deletions pyperformance/data-files/benchmarks/bm_sqlglot/run_benchmark.py
Original file line number Diff line number Diff line change
Expand Up @@ -164,10 +164,31 @@ def bench_normalize(loops):
return elapsed


BENCHMARKS = {
"parse": bench_parse,
"transpile": bench_transpile,
"optimize": bench_optimize,
"normalize": bench_normalize
}


def add_cmdline_args(cmd, args):
cmd.append(args.benchmark)


def add_parser_args(parser):
parser.add_argument(
"benchmark",
choices=BENCHMARKS,
help="Which benchmark to run."
)


if __name__ == "__main__":
runner = pyperf.Runner()
runner = pyperf.Runner(add_cmdline_args=add_cmdline_args)
runner.metadata['description'] = "SQLGlot benchmark"
runner.bench_time_func("sqlglot_parse", bench_parse)
runner.bench_time_func("sqlglot_transpile", bench_transpile)
runner.bench_time_func("sqlglot_optimize", bench_optimize)
runner.bench_time_func("sqlglot_normalize", bench_normalize)
add_parser_args(runner.argparser)
args = runner.parse_args()
benchmark = args.benchmark

runner.bench_time_func(f"sqlglot_{benchmark}", BENCHMARKS[benchmark])
25 changes: 25 additions & 0 deletions pyperformance/run.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
from collections import namedtuple
import hashlib
import json
import sys
import time
import traceback
Expand Down Expand Up @@ -50,7 +51,28 @@ def get_run_id(python, bench=None):
return RunID(py_id, compat_id, bench, ts)


def get_loops_from_file(filename):
with open(filename) as fd:
data = json.load(fd)

loops = {}
for benchmark in data["benchmarks"]:
metadata = benchmark.get("metadata", data["metadata"])
name = metadata["name"]
if name.endswith("_none"):
name = name[:-len("_none")]
if "loops" in metadata:
loops[name] = metadata["loops"]

return loops


def run_benchmarks(should_run, python, options):
if options.same_loops is not None:
loops = get_loops_from_file(options.same_loops)
else:
loops = {}

to_run = sorted(should_run)

info = _pythoninfo.get_info(python)
Expand Down Expand Up @@ -136,6 +158,9 @@ def add_bench(dest_suite, obj):

return dest_suite

if name in loops:
pyperf_opts.append(f"--loops={loops[name]}")

bench_venv, bench_runid = benchmarks.get(bench)
if bench_venv is None:
print("ERROR: Benchmark %s failed: could not install requirements" % name)
Expand Down