diff --git a/pyperformance/data-files/benchmarks/bm_pickle_opt/pyproject.toml b/pyperformance/data-files/benchmarks/bm_pickle_opt/pyproject.toml new file mode 100644 index 00000000..07235075 --- /dev/null +++ b/pyperformance/data-files/benchmarks/bm_pickle_opt/pyproject.toml @@ -0,0 +1,9 @@ +[project] +name = "pyperformance_bm_pickle_opt" +requires-python = ">=3.8" +dependencies = ["pyperf"] +urls = {repository = "https://github.com/python/pyperformance"} +dynamic = ["version"] + +[tool.pyperformance] +name = "pickle_opt" diff --git a/pyperformance/data-files/benchmarks/bm_pickle_opt/run_benchmark.py b/pyperformance/data-files/benchmarks/bm_pickle_opt/run_benchmark.py new file mode 100644 index 00000000..775fc090 --- /dev/null +++ b/pyperformance/data-files/benchmarks/bm_pickle_opt/run_benchmark.py @@ -0,0 +1,28 @@ +"""The background for this benchmark is that the garbage collection in +Python 3.14.0 had a performance regression, see + +* https://github.com/python/cpython/issues/140175 +* https://github.com/python/cpython/issues/139951 + +""" + +import pickle +import pickletools +import pyperf + + +def setup(N: int) -> bytes: + x = {i: f"ii{i:>07}" for i in range(N)} + return pickle.dumps(x, protocol=4) + + +def run(p: bytes) -> None: + pickletools.optimize(p) + + +if __name__ == "__main__": + runner = pyperf.Runner() + runner.metadata["description"] = "Pickletools optimize" + N = 100_000 + payload = setup(N) + runner.bench_func("pickle_opt", run, payload)