Skip to content

Commit 3bf5eb2

Browse files
authored
Update perf regression workflow to assume pyproject use in repo HEAD (#401)
1 parent 85ee6ab commit 3bf5eb2

File tree

1 file changed

+4
-8
lines changed

1 file changed

+4
-8
lines changed

.github/workflows/performance-regression.yml

Lines changed: 4 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -14,8 +14,7 @@ env:
1414
spec_defaults: '{warmups:100,iterations:100}'
1515
specs: '{command:read,format:ion_text} {command:write,format:ion_text} {command:read,format:ion_binary} {command:write,format:ion_binary}'
1616
test_data_id: 'generated-test-data'
17-
run_cli: 'python amazon/ionbenchmark/ion_benchmark_cli.py'
18-
run_cli_new: 'python src-python/amazon/ionbenchmark/ion_benchmark_cli.py'
17+
run_cli: 'python src-python/amazon/ionbenchmark/ion_benchmark_cli.py'
1918

2019

2120
jobs:
@@ -104,10 +103,7 @@ jobs:
104103
# Generates performance results for the previous commit
105104
- name: Create a virtual environment for baseline
106105
working-directory: ./baseline
107-
run: |
108-
pip install -r requirements.txt
109-
[ -e "requirements_benchmark.txt" ] && pip install -r requirements_benchmark.txt # include benchmark requirements if they exist.
110-
pip install .
106+
run: pip install '.[test,benchmarking]'
111107

112108
- name: Run baseline performance benchmark
113109
id: 'baseline'
@@ -130,7 +126,7 @@ jobs:
130126
id: 'new'
131127
working-directory: ./new
132128
run: |
133-
${{env.run_cli_new}} spec '${{env.specs}}' -d '${{env.spec_defaults}}' \
129+
${{env.run_cli}} spec '${{env.specs}}' -d '${{env.spec_defaults}}' \
134130
-O '{input_file:"${{steps.download.outputs.download-path}}/${{ matrix.test-data }}.10n"}' \
135131
-o "$PWD/report.ion" -r '${{env.report_statistics}}'
136132
echo "::group::Ion Report"
@@ -141,4 +137,4 @@ jobs:
141137
# Compare results and identify regression
142138
- name: Detect performance regression
143139
working-directory: ./new
144-
run: ${{env.run_cli_new}} compare --fail ${{steps.baseline.outputs.report}} ${{steps.new.outputs.report}} -c '${{env.compare_statistics}}'
140+
run: ${{env.run_cli}} compare --fail ${{steps.baseline.outputs.report}} ${{steps.new.outputs.report}} -c '${{env.compare_statistics}}'

0 commit comments

Comments
 (0)