From ff2fc8f14c0e6dd160027feb8741dd7af0d70525 Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Thu, 18 Mar 2021 17:10:29 -0600 Subject: [PATCH 01/13] Add a benchmark for the Azure CLI. --- doc/benchmarks.rst | 11 ++ pyperformance/.gitignore | 2 + pyperformance/benchmarks/__init__.py | 19 +- pyperformance/benchmarks/bm_azure_cli.py | 236 +++++++++++++++++++++++ pyperformance/requirements.in | 1 + pyperformance/requirements.txt | 232 +++++++++++++++++++--- pyperformance/run.py | 5 + 7 files changed, 482 insertions(+), 24 deletions(-) create mode 100644 pyperformance/.gitignore create mode 100644 pyperformance/benchmarks/bm_azure_cli.py diff --git a/doc/benchmarks.rst b/doc/benchmarks.rst index 06104a69..9f145b43 100644 --- a/doc/benchmarks.rst +++ b/doc/benchmarks.rst @@ -54,6 +54,17 @@ depending on the Python version. them, and more generally to not modify them. +azure_cli +--------- + +Exercise the `Azure CLI `_ in a very +rough approximation of a regular usage workload. (At the moment we run +a small subset of the azure-cli test suite.) + +Note that ``azure_cli_tests`` and ``azure_cli_verify`` are similar, but +take a lot longer to run (on the order of 10-20 minutes). + + chameleon --------- diff --git a/pyperformance/.gitignore b/pyperformance/.gitignore new file mode 100644 index 00000000..ee649ae1 --- /dev/null +++ b/pyperformance/.gitignore @@ -0,0 +1,2 @@ + +azure-cli diff --git a/pyperformance/benchmarks/__init__.py b/pyperformance/benchmarks/__init__.py index 5afd82f5..b82dc762 100644 --- a/pyperformance/benchmarks/__init__.py +++ b/pyperformance/benchmarks/__init__.py @@ -7,6 +7,8 @@ # specified. DEFAULT_GROUP = [ '2to3', + 'azure_cli', + # Note that we leave azure_cli_* out. (They're really slow.) 'chameleon', 'chaos', 'crypto_pyaes', @@ -73,7 +75,7 @@ "pickle", "unpickle", "xml_etree", "json_dumps", "json_loads"], - "apps": ["2to3", "chameleon", "html5lib", "tornado_http"], + "apps": ["2to3", "chameleon", "html5lib", "tornado_http", "azure_cli"], "math": ["float", "nbody", "pidigits"], "template": ["django_template", "mako"], } @@ -83,6 +85,20 @@ def BM_2to3(python, options): return run_perf_script(python, options, "2to3") +def BM_azure_cli(python, options): + return run_perf_script(python, options, "azure_cli") + + +def BM_azure_cli_tests(python, options): + return run_perf_script(python, options, "azure_cli", + extra_args=["--kind", "tests"]) + + +def BM_azure_cli_verify(python, options): + return run_perf_script(python, options, "azure_cli", + extra_args=["--kind", "verify"]) + + # def BM_hg_startup(python, options): # return run_perf_script(python, options, "hg_startup") @@ -127,7 +143,6 @@ def BM_unpickle(python, options): def BM_pickle_list(python, options): return pickle_benchmark(python, options, "pickle_list") - def BM_pickle_dict(python, options): return pickle_benchmark(python, options, "pickle_dict") diff --git a/pyperformance/benchmarks/bm_azure_cli.py b/pyperformance/benchmarks/bm_azure_cli.py new file mode 100644 index 00000000..1c61c9d3 --- /dev/null +++ b/pyperformance/benchmarks/bm_azure_cli.py @@ -0,0 +1,236 @@ +"""Test the performance of the Azure CLI. + +The test suite is an adequate proxy for regular usage of the CLI. +""" + +# The code for this benchmark is based on the manual steps defined +# for the azure-cli repo. + +# See: +# - azure-pipelines.yml +# - https://github.com/Azure/azure-cli-dev-tools +# +# sudo apt install python3.8 +# sudo apt install python3.8-venv +# sudo apt install python3.8-devel +# git clone https://github.com/Azure/azure-cli +# cd azure-cli +# python3.8 -m venv .venv +# source .venv/bin/activate +# python3 -m pip install azdev +# azdev setup --cli . +# +# azdev test +# (PYTHONPATH=tools python3 -m automation test --cli .) +# PYTHONPATH=tools python3 -m automation verify commands +# (./scripts/ci/unittest.sh) +# (./scripts/ci/test_automation.sh) +# (./scripts/ci/test_integration.sh) + +import os +import os.path +import pyperf +import subprocess +import sys + + +AZURE_CLI_UPSTREAM = "https://github.com/Azure/azure-cli" +AZURE_CLI_REPO = os.path.join(os.path.dirname(__file__), 'data', 'azure-cli') + + +def _run_bench_command_env(runner, name, command, env): + if runner.args.inherit_environ: + runner.args.inherit_environ.extend(env) + else: + runner.args.inherit_environ = list(env) + + env_before = dict(os.environ) + os.environ.update(env) + try: + return runner.bench_command(name, command) + finally: + os.environ.clear() + os.environ.update(env_before) + + +def _resolve_virtual_env(pypath=None): + # This is roughly equivalent to ensuring the env is activated. + if sys.prefix == sys.base_prefix: + raise NotImplementedError("not in a virtual environment") + env = {} + + env["VIRTUAL_ENV"] = os.environ.get("VIRTUAL_ENV", sys.prefix) + + bindir = os.path.dirname(sys.executable) + PATH = os.environ.get("PATH") + if not PATH: + PATH = bindir + elif bindir not in PATH.split(os.pathsep): + PATH = os.pathsep.join([bindir, PATH]) + env["PATH"] = PATH + + if pypath: + if not isinstance(pypath, str): + pypath = os.pathsep.join(pypath) + env["PYTHONPATH"] = pypath + + return env + + +def _run(argv, **kwargs): + proc = subprocess.run(argv, **kwargs) + proc.check_returncode() + + +################### +# azure-cli helpers + +def install(): + print("installing for the azure_cli benchmark...") + if os.path.exists(AZURE_CLI_REPO): + print("local repo already exists (skipping)") + else: + _run(["git", "clone", AZURE_CLI_UPSTREAM, AZURE_CLI_REPO]) + + print("...setting up...") + # XXX Do not run this again if already done. + #_run( + # ["azdev", "setup", "--cli", AZURE_CLI_REPO], + # env=_resolve_virtual_env(), + #) + + print("...done") + + +TESTS_FAST = [ + # XXX Is this a good sample of tests (to ~ represent the workload)? + ("src/azure-cli/azure/cli/command_modules/ams/tests/latest/test_ams_account_scenarios.py", + "AmsAccountTests.test_ams_check_name"), +] +TESTS_MEDIUM = [ +] + + +def _get_tests_cmd(tests=''): + if not tests: + tests = '' + if isinstance(tests, str): + if tests == '': + tests = [] # slow + elif tests == '': + tests = [] # slow + elif tests == '': + tests = TESTS_MEDIUM + elif tests == '': + tests = TESTS_FAST + else: + if tests.startswith('<'): + raise ValueError('unsupported "test" ({!r})'.format(tests)) + raise NotImplementedError + else: + raise NotImplementedError + testargs = [file + ":" + name for file, name in tests] + + cmd = ["azdev", "test"] + testargs + return cmd + + +################### +# benchmarks + +def run_sample(runner): + # For now we run just a small subset of azure-cli test suite. + env = _resolve_virtual_env() + cmd = _get_tests_cmd(tests='') + return _run_bench_command_env( + runner, + "azure_cli", + cmd, + env, + ) + + # XXX It may make sense for this test to instead manually invoke + # the Azure CLI in 3-5 different ways. + #def func(): + # raise NotImplementedError + #return runner.bench_func("azure_cli", func) + + +def run_tests(runner): + env = _resolve_virtual_env() + tests = '' if runner.args.fast else '' + cmd = _get_tests_cmd(tests) + return _run_bench_command_env( + runner, + "azure_cli", + cmd, + env, + ) + + +def run_verify(runner): + pypath = os.path.join(AZURE_CLI_REPO, 'tools') + env = _resolve_virtual_env(pypath) + cmd = [ + sys.executable, + "-m", "automation", + "verify", + "commands", + ] + if runner.args.fast: + cmd.extend([ + # XXX Is this a good enough proxy? + "--prefix", "account", + ]) + return _run_bench_command_env( + runner, + "azure_cli_verify", + cmd, + env, + ) + + +################### +# the script + +def get_runner(): + def add_cmdline_args(cmd, args): + # Preserve --kind. + kind = getattr(args, 'kind', 'sample') + cmd.extend(["--kind", kind]) + if args.fast: + cmd.append('--fast') + + runner = pyperf.Runner( + add_cmdline_args=add_cmdline_args, + metadata={ + "description": "Performance of the Azure CLI", + }, + ) + + runner.argparser.add_argument( + "--kind", + choices=["sample", "tests", "verify"], + default="sample", + ) + + return runner + + +if __name__ == '__main__': + runner = get_runner() + args = runner.parse_args() + + if args.kind == "sample": + # fast(er) + run_sample(runner) + elif args.kind == "tests": + # slow + #runner.values = 1 + run_tests(runner) + elif args.kind == "verify": + # slow + #runner.values = 1 + run_verify(runner) + else: + raise NotImplementedError(args.kind) diff --git a/pyperformance/requirements.in b/pyperformance/requirements.in index c5e60535..651b29c2 100644 --- a/pyperformance/requirements.in +++ b/pyperformance/requirements.in @@ -22,6 +22,7 @@ html5lib # bm_html5lib pyaes # bm_crypto_pyaes sympy # bm_sympy tornado # bm_tornado_http +azdev # bm_azure_cli # Optional dependencies diff --git a/pyperformance/requirements.txt b/pyperformance/requirements.txt index b7a5dc8f..39082d6b 100644 --- a/pyperformance/requirements.txt +++ b/pyperformance/requirements.txt @@ -2,26 +2,214 @@ # This file is autogenerated by pip-compile # To update, run: # -# pip-compile requirements.in +# pip-compile pyperformance/requirements.in # -asgiref==3.2.10 # via django -certifi==2020.6.20 # via dulwich -chameleon==3.7.4 # via -r requirements.in -django==3.0.7 # via -r requirements.in -dulwich==0.20.5 # via -r requirements.in -genshi==0.7.3 # via -r requirements.in -html5lib==1.1 # via -r requirements.in -mako==1.1.3 # via -r requirements.in -markupsafe==1.1.1 # via mako -mpmath==1.1.0 # via sympy -psutil==5.7.0 # via -r requirements.in -pyaes==1.6.1 # via -r requirements.in -pyperf==2.0.0 # via -r requirements.in -pytz==2020.1 # via django -six==1.15.0 # via html5lib -sqlalchemy==1.3.17 # via -r requirements.in -sqlparse==0.3.1 # via django -sympy==1.6 # via -r requirements.in -tornado==6.0.4 # via -r requirements.in -urllib3==1.25.9 # via dulwich -webencodings==0.5.1 # via html5lib +alabaster==0.7.12 + # via sphinx +apipkg==1.5 + # via execnet +appdirs==1.4.4 + # via virtualenv +argcomplete==1.12.2 + # via knack +asgiref==3.3.1 + # via django +astroid==2.5.1 + # via pylint +attrs==20.3.0 + # via pytest +azdev==0.1.29 + # via -r pyperformance/requirements.in +azure-common==1.1.26 + # via + # azure-storage-blob + # azure-storage-common +azure-storage-blob==1.5.0 + # via azdev +azure-storage-common==1.4.2 + # via azure-storage-blob +babel==2.9.0 + # via sphinx +certifi==2020.12.5 + # via + # dulwich + # requests +cffi==1.14.5 + # via cryptography +chameleon==3.9.0 + # via -r pyperformance/requirements.in +chardet==4.0.0 + # via requests +colorama==0.4.4 + # via knack +cryptography==3.4.6 + # via azure-storage-common +distlib==0.3.1 + # via virtualenv +django==3.1.7 + # via -r pyperformance/requirements.in +docutils==0.16 + # via + # azdev + # sphinx +dulwich==0.20.20 + # via -r pyperformance/requirements.in +execnet==1.8.0 + # via pytest-xdist +filelock==3.0.12 + # via + # tox + # virtualenv +flake8==3.9.0 + # via azdev +genshi==0.7.5 + # via -r pyperformance/requirements.in +gitdb==4.0.5 + # via gitpython +gitpython==3.1.14 + # via azdev +greenlet==1.0.0 + # via sqlalchemy +html5lib==1.1 + # via -r pyperformance/requirements.in +idna==2.10 + # via requests +imagesize==1.2.0 + # via sphinx +iniconfig==1.1.1 + # via pytest +isort==4.3.21 + # via + # azdev + # pylint +jinja2==2.11.3 + # via + # azdev + # sphinx +jmespath==0.10.0 + # via knack +knack==0.7.2 + # via azdev +lazy-object-proxy==1.5.2 + # via astroid +mako==1.1.4 + # via -r pyperformance/requirements.in +markupsafe==1.1.1 + # via + # jinja2 + # mako +mccabe==0.6.1 + # via + # flake8 + # pylint +mock==4.0.3 + # via azdev +mpmath==1.2.1 + # via sympy +packaging==20.9 + # via + # pytest + # tox +pluggy==0.13.1 + # via + # pytest + # tox +psutil==5.8.0 + # via -r pyperformance/requirements.in +py==1.10.0 + # via + # pytest + # pytest-forked + # tox +pyaes==1.6.1 + # via -r pyperformance/requirements.in +pycodestyle==2.7.0 + # via flake8 +pycparser==2.20 + # via cffi +pyflakes==2.3.0 + # via flake8 +pygments==2.8.1 + # via + # knack + # sphinx +pylint==2.3.0 + # via azdev +pyparsing==2.4.7 + # via packaging +pyperf==2.1.0 + # via -r pyperformance/requirements.in +pytest-forked==1.3.0 + # via pytest-xdist +pytest-xdist==2.2.1 + # via azdev +pytest==6.2.2 + # via + # azdev + # pytest-forked + # pytest-xdist +python-dateutil==2.8.1 + # via azure-storage-common +pytz==2021.1 + # via + # babel + # django +pyyaml==5.4.1 + # via + # azdev + # knack +requests==2.25.1 + # via + # azdev + # azure-storage-common + # sphinx +six==1.15.0 + # via + # genshi + # html5lib + # knack + # python-dateutil + # sphinx + # tox + # virtualenv +smmap==3.0.5 + # via gitdb +snowballstemmer==2.1.0 + # via sphinx +sphinx==1.6.7 + # via azdev +sphinxcontrib-serializinghtml==1.1.4 + # via sphinxcontrib-websupport +sphinxcontrib-websupport==1.2.4 + # via sphinx +sqlalchemy==1.4.1 + # via -r pyperformance/requirements.in +sqlparse==0.4.1 + # via django +sympy==1.7.1 + # via -r pyperformance/requirements.in +tabulate==0.8.9 + # via knack +toml==0.10.2 + # via + # pytest + # tox +tornado==6.1 + # via -r pyperformance/requirements.in +tox==3.23.0 + # via azdev +urllib3==1.26.4 + # via + # dulwich + # requests +virtualenv==20.4.3 + # via tox +webencodings==0.5.1 + # via html5lib +wheel==0.30.0 + # via azdev +wrapt==1.12.1 + # via astroid + +# The following packages are considered to be unsafe in a requirements file: +# setuptools diff --git a/pyperformance/run.py b/pyperformance/run.py index 88007dc5..62fac695 100644 --- a/pyperformance/run.py +++ b/pyperformance/run.py @@ -103,6 +103,11 @@ def run_benchmarks(bench_funcs, should_run, cmd_prefix, options): run_count = str(len(to_run)) errors = [] + # XXX Do this in a more generic way. + if any(name.startswith("azure_cli") for name in to_run): + from .benchmarks import bm_azure_cli + bm_azure_cli.install() + for index, name in enumerate(to_run): func = bench_funcs[name] print("[%s/%s] %s..." % From dab3dfc2df0534e524f9c22ac28feb8aaa92ce11 Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Fri, 19 Mar 2021 11:13:14 -0600 Subject: [PATCH 02/13] During tests, use the created venv when appropriate. --- runtests.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/runtests.py b/runtests.py index 34ac702a..0fcc7267 100755 --- a/runtests.py +++ b/runtests.py @@ -54,7 +54,7 @@ def run_bench(*cmd): os.path.join('pyperformance', 'tests', 'data', 'py36.json'), os.path.join('pyperformance', 'tests', 'data', 'mem1.json'), ): - run_cmd((python, script, 'show', filename)) + run_cmd((venv_python, script, 'show', filename)) run_bench(python, script, 'list') run_bench(python, script, 'list_groups') From 4a9de738836003a2e8131734651c1e8738156945 Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Fri, 19 Mar 2021 11:18:25 -0600 Subject: [PATCH 03/13] Only "install" when necessary. --- pyperformance/benchmarks/__init__.py | 15 +++++-- pyperformance/benchmarks/bm_azure_cli.py | 57 +++++++++++++++++++----- pyperformance/run.py | 5 --- 3 files changed, 59 insertions(+), 18 deletions(-) diff --git a/pyperformance/benchmarks/__init__.py b/pyperformance/benchmarks/__init__.py index b82dc762..1cd6b0d1 100644 --- a/pyperformance/benchmarks/__init__.py +++ b/pyperformance/benchmarks/__init__.py @@ -86,17 +86,26 @@ def BM_2to3(python, options): def BM_azure_cli(python, options): - return run_perf_script(python, options, "azure_cli") + return run_perf_script(python, options, "azure_cli", + extra_args=[ + "--install", + ]) def BM_azure_cli_tests(python, options): return run_perf_script(python, options, "azure_cli", - extra_args=["--kind", "tests"]) + extra_args=[ + "--install", + "--kind", "tests", + ]) def BM_azure_cli_verify(python, options): return run_perf_script(python, options, "azure_cli", - extra_args=["--kind", "verify"]) + extra_args=[ + "--install", + "--kind", "verify", + ]) # def BM_hg_startup(python, options): diff --git a/pyperformance/benchmarks/bm_azure_cli.py b/pyperformance/benchmarks/bm_azure_cli.py index 1c61c9d3..fa41e42b 100644 --- a/pyperformance/benchmarks/bm_azure_cli.py +++ b/pyperformance/benchmarks/bm_azure_cli.py @@ -85,7 +85,32 @@ def _run(argv, **kwargs): ################### # azure-cli helpers -def install(): +AZDEV_MARKER = os.path.join(AZURE_CLI_REPO, ".AZDEV_READY") +# This global allows us to only check the install once per proc. +INSTALL_ENSURED = False + + +def install(force=False): + global INSTALL_ENSURED + + if not force: + if INSTALL_ENSURED: + print("already checked") + return + if os.path.exists(AZDEV_MARKER): + print("marker exists") + INSTALL_ENSURED = True + return + + _install() + + INSTALL_ENSURED = True + # Touch the file. + with open(AZDEV_MARKER, "a"): + pass + + +def _install(): print("installing for the azure_cli benchmark...") if os.path.exists(AZURE_CLI_REPO): print("local repo already exists (skipping)") @@ -94,10 +119,10 @@ def install(): print("...setting up...") # XXX Do not run this again if already done. - #_run( - # ["azdev", "setup", "--cli", AZURE_CLI_REPO], - # env=_resolve_virtual_env(), - #) + _run( + ["azdev", "setup", "--cli", AZURE_CLI_REPO], + env=_resolve_virtual_env(), + ) print("...done") @@ -198,6 +223,8 @@ def add_cmdline_args(cmd, args): # Preserve --kind. kind = getattr(args, 'kind', 'sample') cmd.extend(["--kind", kind]) + # Note that we do not preserve --install. We don't need + # the worker to duplicate the work. if args.fast: cmd.append('--fast') @@ -208,11 +235,14 @@ def add_cmdline_args(cmd, args): }, ) - runner.argparser.add_argument( - "--kind", - choices=["sample", "tests", "verify"], - default="sample", - ) + runner.argparser.add_argument("--kind", + choices=["sample", "tests", "verify"], + default="sample") + runner.argparser.add_argument("--install", action="store_const", const="") + runner.argparser.add_argument("--force-install", dest="install", + action="store_const", const="") + runner.argparser.add_argument("--no-install", dest="install", + action="store_const", const=None) return runner @@ -221,6 +251,13 @@ def add_cmdline_args(cmd, args): runner = get_runner() args = runner.parse_args() + if args.install == "": + install(force=False) + elif args.install == "": + install(force=True) + elif args.install: + raise NotImplementedError(args.install) + if args.kind == "sample": # fast(er) run_sample(runner) diff --git a/pyperformance/run.py b/pyperformance/run.py index 62fac695..88007dc5 100644 --- a/pyperformance/run.py +++ b/pyperformance/run.py @@ -103,11 +103,6 @@ def run_benchmarks(bench_funcs, should_run, cmd_prefix, options): run_count = str(len(to_run)) errors = [] - # XXX Do this in a more generic way. - if any(name.startswith("azure_cli") for name in to_run): - from .benchmarks import bm_azure_cli - bm_azure_cli.install() - for index, name in enumerate(to_run): func = bench_funcs[name] print("[%s/%s] %s..." % From 1caa4d4afd50cfff626de8b17777b98f9c57ea6e Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Fri, 19 Mar 2021 11:32:49 -0600 Subject: [PATCH 04/13] Mark some benchmarks as "slow". --- pyperformance/benchmarks/__init__.py | 20 ++++++++++++++++++++ pyperformance/cli.py | 4 ++++ 2 files changed, 24 insertions(+) diff --git a/pyperformance/benchmarks/__init__.py b/pyperformance/benchmarks/__init__.py index 1cd6b0d1..59aecc5d 100644 --- a/pyperformance/benchmarks/__init__.py +++ b/pyperformance/benchmarks/__init__.py @@ -1,5 +1,6 @@ import logging +from pyperformance.cli import fast_requested from pyperformance.run import run_perf_script @@ -78,9 +79,23 @@ "apps": ["2to3", "chameleon", "html5lib", "tornado_http", "azure_cli"], "math": ["float", "nbody", "pidigits"], "template": ["django_template", "mako"], + "slow": [], } +def slow(func): + """A decorator to mark a benchmark as slow.""" + if not func.__name__.startswith("BM_"): + raise NotImplementedError(func) + name = func.__name__[3:].lower() + BENCH_GROUPS["slow"].append(name) + return func + + +def maybe_slow(func): + return func if fast_requested() else slow(func) + + def BM_2to3(python, options): return run_perf_script(python, options, "2to3") @@ -92,6 +107,7 @@ def BM_azure_cli(python, options): ]) +@maybe_slow def BM_azure_cli_tests(python, options): return run_perf_script(python, options, "azure_cli", extra_args=[ @@ -100,6 +116,7 @@ def BM_azure_cli_tests(python, options): ]) +@maybe_slow def BM_azure_cli_verify(python, options): return run_perf_script(python, options, "azure_cli", extra_args=[ @@ -320,6 +337,9 @@ def get_benchmarks(): # create the 'all' group bench_groups["all"] = sorted(bench_funcs) + bench_groups["fast"] = [name + for name in bench_groups["all"] + if name not in bench_groups["slow"]] return (bench_funcs, bench_groups) diff --git a/pyperformance/cli.py b/pyperformance/cli.py index 46d72361..8f2d3fb4 100644 --- a/pyperformance/cli.py +++ b/pyperformance/cli.py @@ -177,6 +177,10 @@ def parse_args(): return (parser, options) +def fast_requested(): + return '--fast' in sys.argv + + def _main(): parser, options = parse_args() From a5d583e8583d62a2ce3352764d22ba0a287f1c05 Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Fri, 19 Mar 2021 11:33:16 -0600 Subject: [PATCH 05/13] Only run "fast" benchmarks in the functional tests. --- runtests.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/runtests.py b/runtests.py index 0fcc7267..85d57a83 100755 --- a/runtests.py +++ b/runtests.py @@ -65,7 +65,7 @@ def run_bench(*cmd): # # --debug-single-value: benchmark results don't matter, we only # check that running benchmarks don't fail. - run_bench(python, script, 'run', '-b', 'all', '--debug-single-value', + run_bench(python, script, 'run', '-b', 'fast', '--debug-single-value', '-o', json) # Display slowest benchmarks From 38b6fb1d94eecfcb23c83daddf23546f8299cf77 Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Fri, 19 Mar 2021 11:48:17 -0600 Subject: [PATCH 06/13] Look for the "already installed" marker in the venv instead of data dir. --- pyperformance/benchmarks/bm_azure_cli.py | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/pyperformance/benchmarks/bm_azure_cli.py b/pyperformance/benchmarks/bm_azure_cli.py index fa41e42b..0aedbc46 100644 --- a/pyperformance/benchmarks/bm_azure_cli.py +++ b/pyperformance/benchmarks/bm_azure_cli.py @@ -85,7 +85,6 @@ def _run(argv, **kwargs): ################### # azure-cli helpers -AZDEV_MARKER = os.path.join(AZURE_CLI_REPO, ".AZDEV_READY") # This global allows us to only check the install once per proc. INSTALL_ENSURED = False @@ -97,17 +96,23 @@ def install(force=False): if INSTALL_ENSURED: print("already checked") return - if os.path.exists(AZDEV_MARKER): - print("marker exists") + if _already_installed(): + print("already ready") INSTALL_ENSURED = True return _install() INSTALL_ENSURED = True - # Touch the file. - with open(AZDEV_MARKER, "a"): - pass + + +def _already_installed(): + try: + import azure.cli + except ImportError: + return False + else: + return True def _install(): From 04140211f4f604f0b65b0ecc2721f29ad89e9690 Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Fri, 19 Mar 2021 12:48:50 -0600 Subject: [PATCH 07/13] Do non-pip setup for benchmarks before running them. --- pyperformance/benchmarks/bm_azure_cli.py | 25 ++++++++++++++++-------- pyperformance/run.py | 9 +++++++++ runtests.py | 4 ++++ 3 files changed, 30 insertions(+), 8 deletions(-) diff --git a/pyperformance/benchmarks/bm_azure_cli.py b/pyperformance/benchmarks/bm_azure_cli.py index 0aedbc46..dbfc7b86 100644 --- a/pyperformance/benchmarks/bm_azure_cli.py +++ b/pyperformance/benchmarks/bm_azure_cli.py @@ -97,11 +97,15 @@ def install(force=False): print("already checked") return if _already_installed(): - print("already ready") + print("already installed") INSTALL_ENSURED = True return + print("=========================================") + print("installing for the azure_cli benchmark...") _install() + print("...done") + print("=========================================") INSTALL_ENSURED = True @@ -116,7 +120,6 @@ def _already_installed(): def _install(): - print("installing for the azure_cli benchmark...") if os.path.exists(AZURE_CLI_REPO): print("local repo already exists (skipping)") else: @@ -125,12 +128,10 @@ def _install(): print("...setting up...") # XXX Do not run this again if already done. _run( - ["azdev", "setup", "--cli", AZURE_CLI_REPO], + [sys.executable, "-m", "azdev", "setup", "--cli", AZURE_CLI_REPO], env=_resolve_virtual_env(), ) - print("...done") - TESTS_FAST = [ # XXX Is this a good sample of tests (to ~ represent the workload)? @@ -241,9 +242,11 @@ def add_cmdline_args(cmd, args): ) runner.argparser.add_argument("--kind", - choices=["sample", "tests", "verify"], + choices=["sample", "tests", "verify", "install"], default="sample") - runner.argparser.add_argument("--install", action="store_const", const="") + runner.argparser.add_argument("--install", + action="store_const", const="", + default="") runner.argparser.add_argument("--force-install", dest="install", action="store_const", const="") runner.argparser.add_argument("--no-install", dest="install", @@ -252,7 +255,7 @@ def add_cmdline_args(cmd, args): return runner -if __name__ == '__main__': +def main(): runner = get_runner() args = runner.parse_args() @@ -274,5 +277,11 @@ def add_cmdline_args(cmd, args): # slow #runner.values = 1 run_verify(runner) + elif args.kind == "install": + return else: raise NotImplementedError(args.kind) + + +if __name__ == '__main__': + main() diff --git a/pyperformance/run.py b/pyperformance/run.py index 88007dc5..f2d2af4f 100644 --- a/pyperformance/run.py +++ b/pyperformance/run.py @@ -97,12 +97,21 @@ def run_perf_script(python, options, name, extra_args=[]): return pyperf.BenchmarkSuite.load(tmp) +def _prepare_benchmarks(names): + # XXX Do this in a more generic way. + if any(name.startswith("azure_cli") for name in names): + from .benchmarks import bm_azure_cli + bm_azure_cli.install() + + def run_benchmarks(bench_funcs, should_run, cmd_prefix, options): suite = None to_run = sorted(should_run) run_count = str(len(to_run)) errors = [] + _prepare_benchmarks(to_run) + for index, name in enumerate(to_run): func = bench_funcs[name] print("[%s/%s] %s..." % diff --git a/runtests.py b/runtests.py index 85d57a83..6dea6e27 100755 --- a/runtests.py +++ b/runtests.py @@ -50,6 +50,10 @@ def run_bench(*cmd): run_bench(python, script, 'venv') + # Pre-install for the azure_cli benchmark. + azurecli = os.path.join('pyperformance', 'benchmarks', 'bm_azure_cli.py') + run_cmd([venv_python, azurecli, '--kind', 'install']) + for filename in ( os.path.join('pyperformance', 'tests', 'data', 'py36.json'), os.path.join('pyperformance', 'tests', 'data', 'mem1.json'), From 54b4ee5e3b187c1a6aaaa160afba330f048ea44e Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Mon, 22 Mar 2021 09:38:43 -0600 Subject: [PATCH 08/13] Clean up install() a little. --- pyperformance/benchmarks/bm_azure_cli.py | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/pyperformance/benchmarks/bm_azure_cli.py b/pyperformance/benchmarks/bm_azure_cli.py index dbfc7b86..31363227 100644 --- a/pyperformance/benchmarks/bm_azure_cli.py +++ b/pyperformance/benchmarks/bm_azure_cli.py @@ -92,18 +92,16 @@ def _run(argv, **kwargs): def install(force=False): global INSTALL_ENSURED - if not force: - if INSTALL_ENSURED: - print("already checked") - return - if _already_installed(): - print("already installed") - INSTALL_ENSURED = True - return - print("=========================================") print("installing for the azure_cli benchmark...") - _install() + if force: + _install() + elif INSTALL_ENSURED: + print("already checked") + elif _already_installed(): + print("already installed") + else: + _install() print("...done") print("=========================================") From 64b269bdcd4c7597ee72ee98adc4f871892d4ee2 Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Mon, 22 Mar 2021 09:46:08 -0600 Subject: [PATCH 09/13] Add a "log" entry. --- pyperformance/benchmarks/bm_azure_cli.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/pyperformance/benchmarks/bm_azure_cli.py b/pyperformance/benchmarks/bm_azure_cli.py index 31363227..db8e8f5d 100644 --- a/pyperformance/benchmarks/bm_azure_cli.py +++ b/pyperformance/benchmarks/bm_azure_cli.py @@ -30,6 +30,7 @@ import os import os.path import pyperf +import shlex import subprocess import sys @@ -78,6 +79,8 @@ def _resolve_virtual_env(pypath=None): def _run(argv, **kwargs): + cmd_str = ' '.join(map(shlex.quote, argv)) + print("Execute: %s" % cmd_str) proc = subprocess.run(argv, **kwargs) proc.check_returncode() From 1c9dc6c674a6567fe9902aff4cc93640eeb9b0e4 Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Mon, 22 Mar 2021 10:21:13 -0600 Subject: [PATCH 10/13] Flush stdout before running subprocesses. --- pyperformance/benchmarks/bm_azure_cli.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pyperformance/benchmarks/bm_azure_cli.py b/pyperformance/benchmarks/bm_azure_cli.py index db8e8f5d..78e98087 100644 --- a/pyperformance/benchmarks/bm_azure_cli.py +++ b/pyperformance/benchmarks/bm_azure_cli.py @@ -81,6 +81,8 @@ def _resolve_virtual_env(pypath=None): def _run(argv, **kwargs): cmd_str = ' '.join(map(shlex.quote, argv)) print("Execute: %s" % cmd_str) + sys.stdout.flush() + sys.stderr.flush() proc = subprocess.run(argv, **kwargs) proc.check_returncode() From 0cc1974402199a386d998e77dd32e52450f4e512 Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Mon, 22 Mar 2021 12:00:03 -0600 Subject: [PATCH 11/13] Set $VIRTUAL_ENV and $PATH when running in a venv. --- pyperformance/benchmarks/bm_azure_cli.py | 18 +++++----------- pyperformance/venv.py | 26 +++++++++++++++++++++++- 2 files changed, 30 insertions(+), 14 deletions(-) diff --git a/pyperformance/benchmarks/bm_azure_cli.py b/pyperformance/benchmarks/bm_azure_cli.py index 78e98087..689face3 100644 --- a/pyperformance/benchmarks/bm_azure_cli.py +++ b/pyperformance/benchmarks/bm_azure_cli.py @@ -34,6 +34,8 @@ import subprocess import sys +import pyperformance.venv + AZURE_CLI_UPSTREAM = "https://github.com/Azure/azure-cli" AZURE_CLI_REPO = os.path.join(os.path.dirname(__file__), 'data', 'azure-cli') @@ -56,19 +58,7 @@ def _run_bench_command_env(runner, name, command, env): def _resolve_virtual_env(pypath=None): # This is roughly equivalent to ensuring the env is activated. - if sys.prefix == sys.base_prefix: - raise NotImplementedError("not in a virtual environment") - env = {} - - env["VIRTUAL_ENV"] = os.environ.get("VIRTUAL_ENV", sys.prefix) - - bindir = os.path.dirname(sys.executable) - PATH = os.environ.get("PATH") - if not PATH: - PATH = bindir - elif bindir not in PATH.split(os.pathsep): - PATH = os.pathsep.join([bindir, PATH]) - env["PATH"] = PATH + env = pyperformance.venv.resolve_env_vars() if pypath: if not isinstance(pypath, str): @@ -129,6 +119,8 @@ def _install(): _run(["git", "clone", AZURE_CLI_UPSTREAM, AZURE_CLI_REPO]) print("...setting up...") + if not os.environ.get("VIRTUAL_ENV"): + raise Exception("the target venv is not activated") # XXX Do not run this again if already done. _run( [sys.executable, "-m", "azdev", "setup", "--cli", AZURE_CLI_REPO], diff --git a/pyperformance/venv.py b/pyperformance/venv.py index 216553c5..fdfeb091 100644 --- a/pyperformance/venv.py +++ b/pyperformance/venv.py @@ -125,6 +125,25 @@ def create_environ(inherit_environ): return env +def resolve_env_vars(executable=sys.executable, os_environ=os.environ): + bindir = os.path.dirname(executable) + prefix = os.path.dirname(bindir) + if prefix == sys.base_prefix: + raise NotImplementedError("not a virtual environment") + env = {} + + env["VIRTUAL_ENV"] = os_environ.get("VIRTUAL_ENV") or prefix + + PATH = os_environ.get("PATH") + if not PATH: + PATH = bindir + elif bindir not in PATH.split(os.pathsep): + PATH = os.pathsep.join([bindir, PATH]) + env["PATH"] = PATH + + return env + + def download(url, filename): response = urllib.request.urlopen(url) with response: @@ -162,6 +181,9 @@ def run_cmd_nocheck(self, cmd, verbose=True): sys.stderr.flush() env = create_environ(self.options.inherit_environ) + env.update( + resolve_env_vars(self.get_python_program(), env), + ) try: proc = subprocess.Popen(cmd, env=env) except OSError as exc: @@ -467,7 +489,9 @@ def exec_in_virtualenv(options): venv.run_cmd(args, verbose=False) sys.exit(0) else: - os.execv(args[0], args) + env = dict(os.environ) + env.update(resolve_env_vars(venv_python)) + os.execve(args[0], args, env) def cmd_venv(options): From 10afb763b47aaf3ddb195180ad7a585a67ed6df8 Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Mon, 22 Mar 2021 13:09:30 -0600 Subject: [PATCH 12/13] Drop the restriction about not running in a venv. --- pyperformance/benchmarks/bm_azure_cli.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/pyperformance/benchmarks/bm_azure_cli.py b/pyperformance/benchmarks/bm_azure_cli.py index 689face3..0bc68864 100644 --- a/pyperformance/benchmarks/bm_azure_cli.py +++ b/pyperformance/benchmarks/bm_azure_cli.py @@ -119,8 +119,6 @@ def _install(): _run(["git", "clone", AZURE_CLI_UPSTREAM, AZURE_CLI_REPO]) print("...setting up...") - if not os.environ.get("VIRTUAL_ENV"): - raise Exception("the target venv is not activated") # XXX Do not run this again if already done. _run( [sys.executable, "-m", "azdev", "setup", "--cli", AZURE_CLI_REPO], From f2d05efe8941dd4fcf25248149bcb14315aae315 Mon Sep 17 00:00:00 2001 From: Eric Snow Date: Tue, 23 Mar 2021 17:00:12 -0600 Subject: [PATCH 13/13] Disable hash randomization when installing for azure-cli. --- pyperformance/benchmarks/bm_azure_cli.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/pyperformance/benchmarks/bm_azure_cli.py b/pyperformance/benchmarks/bm_azure_cli.py index 0bc68864..1733daa5 100644 --- a/pyperformance/benchmarks/bm_azure_cli.py +++ b/pyperformance/benchmarks/bm_azure_cli.py @@ -119,10 +119,12 @@ def _install(): _run(["git", "clone", AZURE_CLI_UPSTREAM, AZURE_CLI_REPO]) print("...setting up...") + env = _resolve_virtual_env() + env['PYTHONHASHSEED'] = '0' # XXX Do not run this again if already done. _run( [sys.executable, "-m", "azdev", "setup", "--cli", AZURE_CLI_REPO], - env=_resolve_virtual_env(), + env=env, )