diff --git a/benchmarks/MANIFEST b/benchmarks/MANIFEST index a99f58a..5229652 100644 --- a/benchmarks/MANIFEST +++ b/benchmarks/MANIFEST @@ -8,7 +8,7 @@ gevent_hub gunicorn json kinto -mypy +mypy2 pycparser pylint pytorch_alexnet_inference diff --git a/benchmarks/bm_mypy/data/mypy_target.py b/benchmarks/bm_mypy2/data/mypy_target.py similarity index 100% rename from benchmarks/bm_mypy/data/mypy_target.py rename to benchmarks/bm_mypy2/data/mypy_target.py diff --git a/benchmarks/bm_mypy/legacyutils.py b/benchmarks/bm_mypy2/legacyutils.py similarity index 100% rename from benchmarks/bm_mypy/legacyutils.py rename to benchmarks/bm_mypy2/legacyutils.py diff --git a/benchmarks/bm_mypy/pyproject.toml b/benchmarks/bm_mypy2/pyproject.toml similarity index 84% rename from benchmarks/bm_mypy/pyproject.toml rename to benchmarks/bm_mypy2/pyproject.toml index 5da0cd8..7ac33ed 100644 --- a/benchmarks/bm_mypy/pyproject.toml +++ b/benchmarks/bm_mypy2/pyproject.toml @@ -1,5 +1,5 @@ [project] -name = "bm_mypy" +name = "bm_mypy2" dependencies = [ "mypy", ] diff --git a/benchmarks/bm_mypy/requirements.txt b/benchmarks/bm_mypy2/requirements.txt similarity index 100% rename from benchmarks/bm_mypy/requirements.txt rename to benchmarks/bm_mypy2/requirements.txt diff --git a/benchmarks/bm_mypy/run_benchmark.py b/benchmarks/bm_mypy2/run_benchmark.py similarity index 84% rename from benchmarks/bm_mypy/run_benchmark.py rename to benchmarks/bm_mypy2/run_benchmark.py index d64aa74..535b9af 100644 --- a/benchmarks/bm_mypy/run_benchmark.py +++ b/benchmarks/bm_mypy2/run_benchmark.py @@ -41,8 +41,6 @@ def _bench_mypy(loops=20, *, legacy=False): times = [] with open(os.devnull, "w") as devnull: for i in range(loops): - if legacy: - print(i) # This is a macro benchmark for a Python implementation # so "elapsed" covers more than just how long main() takes. t0 = pyperf.perf_counter() @@ -52,8 +50,12 @@ def _bench_mypy(loops=20, *, legacy=False): pass t1 = pyperf.perf_counter() - elapsed += t1 - t0 - times.append(t0) + # Don't include results from the first run, since it loads the + # files from disk. Subsequent runs will use the file contents in an + # in-memory cache. + if i > 1: + elapsed += t1 - t0 + times.append(t0) times.append(pyperf.perf_counter()) return elapsed, times @@ -67,4 +69,4 @@ def _bench_mypy(loops=20, *, legacy=False): runner = pyperf.Runner() runner.metadata['description'] = "Test the performance of mypy types" - runner.bench_time_func("mypy", bench_mypy) + runner.bench_time_func("mypy2", bench_mypy)