From 70248e8c50b8a9beb4663f7a3f3168581fe8f463 Mon Sep 17 00:00:00 2001 From: Richard Giliam Date: Wed, 22 Jan 2025 16:37:20 -0800 Subject: [PATCH] Update perf regression workflow to assume pyproject use in repo HEAD --- .github/workflows/performance-regression.yml | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/.github/workflows/performance-regression.yml b/.github/workflows/performance-regression.yml index b320f814f..6950a2437 100644 --- a/.github/workflows/performance-regression.yml +++ b/.github/workflows/performance-regression.yml @@ -14,8 +14,7 @@ env: spec_defaults: '{warmups:100,iterations:100}' specs: '{command:read,format:ion_text} {command:write,format:ion_text} {command:read,format:ion_binary} {command:write,format:ion_binary}' test_data_id: 'generated-test-data' - run_cli: 'python amazon/ionbenchmark/ion_benchmark_cli.py' - run_cli_new: 'python src-python/amazon/ionbenchmark/ion_benchmark_cli.py' + run_cli: 'python src-python/amazon/ionbenchmark/ion_benchmark_cli.py' jobs: @@ -104,10 +103,7 @@ jobs: # Generates performance results for the previous commit - name: Create a virtual environment for baseline working-directory: ./baseline - run: | - pip install -r requirements.txt - [ -e "requirements_benchmark.txt" ] && pip install -r requirements_benchmark.txt # include benchmark requirements if they exist. - pip install . + run: pip install '.[test,benchmarking]' - name: Run baseline performance benchmark id: 'baseline' @@ -130,7 +126,7 @@ jobs: id: 'new' working-directory: ./new run: | - ${{env.run_cli_new}} spec '${{env.specs}}' -d '${{env.spec_defaults}}' \ + ${{env.run_cli}} spec '${{env.specs}}' -d '${{env.spec_defaults}}' \ -O '{input_file:"${{steps.download.outputs.download-path}}/${{ matrix.test-data }}.10n"}' \ -o "$PWD/report.ion" -r '${{env.report_statistics}}' echo "::group::Ion Report" @@ -141,4 +137,4 @@ jobs: # Compare results and identify regression - name: Detect performance regression working-directory: ./new - run: ${{env.run_cli_new}} compare --fail ${{steps.baseline.outputs.report}} ${{steps.new.outputs.report}} -c '${{env.compare_statistics}}' + run: ${{env.run_cli}} compare --fail ${{steps.baseline.outputs.report}} ${{steps.new.outputs.report}} -c '${{env.compare_statistics}}'