Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
105 changes: 105 additions & 0 deletions .github/workflows/tox-self-hosted.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,105 @@
name: Run tox on Ubuntu 22.04

on:
push:
paths-ignore:
- 'docs/**'
- '**/*.rst'
- '**/*.md'
branches:
- master
- main
- '[0-9].[0-9]'
pull_request:
branches:
- master
- main

jobs:
start-runner:
name: Start self-hosted EC2 runner
runs-on: ubuntu-latest
outputs:
label: ${{ steps.start-ec2-runner.outputs.label }}
ec2-instance-id: ${{ steps.start-ec2-runner.outputs.ec2-instance-id }}
steps:
- name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@v1
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws-region: ${{ secrets.AWS_REGION }}
- name: Start EC2 runner
id: start-ec2-runner
uses: machulav/ec2-github-runner@v2
with:
mode: start
github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
# Ubuntu 22.04 region AMI
ec2-image-id: ami-050b6fdb9f05d7b32
ec2-instance-type: c5.9xlarge
subnet-id: ${{ secrets.AWS_EC2_SUBNET_ID }}
security-group-id: ${{ secrets.AWS_EC2_SG_ID }}

tox:
name: Run tox on the runner
needs: start-runner # required to start the main job when the runner is ready
runs-on: ${{ needs.start-runner.outputs.label }} # run the job on the newly created runner
steps:
- name: checkout
uses: actions/checkout@v3

- name: Print runner info
run: |
printf "Runner lscpu:\n$(lscpu)\n"
printf "Runner lsmem:\n$(lsmem)\n"
printf "Runner nproc:\n$(nproc)\n"
printf "Runner uname:\n$(uname -a)\n"
- name: Install benchmark dependencies
run: |
sudo apt update -y
sudo apt install python3-pip -y
sudo pip3 install --upgrade pip
sudo apt install docker.io -y
pip3 install -r dev_requirements.txt

- name: Install Poetry
run: |
curl -sSL https://install.python-poetry.org | python3 -

- name: Install Dev requirements
run: |
pip install -U setuptools wheel
pip install -r dev_requirements.txt

- name: Run tox
run: |
tox

- name: Upload coverage to Codecov
uses: codecov/codecov-action@v2
with:
token: ${{secrets.CODECOV_TOKEN}}
fail_ci_if_error: true

stop-runner:
name: Stop self-hosted EC2 runner
needs:
- start-runner # required to get output from the start-runner job
- tox # required to wait when the main job is done
runs-on: ubuntu-latest
if: ${{ always() }} # required to stop the runner even if the error happened in the previous jobs
steps:
- name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@v1
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws-region: ${{ secrets.AWS_REGION }}
- name: Stop EC2 runner
uses: machulav/ec2-github-runner@v2
with:
mode: stop
github-token: ${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}
label: ${{ needs.start-runner.outputs.label }}
ec2-instance-id: ${{ needs.start-runner.outputs.ec2-instance-id }}
49 changes: 0 additions & 49 deletions .github/workflows/tox.yml

This file was deleted.

2 changes: 1 addition & 1 deletion Readme.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@


[![codecov](https://codecov.io/gh/redis/redis-benchmarks-specification/branch/main/graph/badge.svg?token=GS64MV1H4W)](https://codecov.io/gh/redis/redis-benchmarks-specification)
[![CI tests](https://github.com/redis/redis-benchmarks-specification/actions/workflows/tox.yml/badge.svg)](https://github.com/redis/redis-benchmarks-specification/actions/workflows/tox.yml)
[![Run tox on Ubuntu 22.04](https://github.com/redis/redis-benchmarks-specification/actions/workflows/tox-self-hosted.yml/badge.svg)](https://github.com/redis/redis-benchmarks-specification/actions/workflows/tox-self-hosted.yml)
[![PyPI version](https://badge.fury.io/py/redis-benchmarks-specification.svg)](https://pypi.org/project/redis-benchmarks-specification)

<!-- toc -->
Expand Down
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[tool.poetry]
name = "redis-benchmarks-specification"
version = "0.1.63"
version = "0.1.64"
description = "The Redis benchmarks specification describes the cross-language/tools requirements and expectations to foster performance and observability standards around redis related technologies. Members from both industry and academia, including organizations and individuals are encouraged to contribute."
authors = ["filipecosta90 <[email protected]>","Redis Performance Group <[email protected]>"]
readme = "Readme.md"
Expand Down
63 changes: 34 additions & 29 deletions redis_benchmarks_specification/__runner__/runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -459,36 +459,12 @@ def process_self_contained_coordinator_stream(
logging.info("Sending FLUSHALL to the DB")
r.flushall()

benchmark_required_memory = 0
maxmemory = 0
if "resources" in benchmark_config["dbconfig"]:
resources = benchmark_config["dbconfig"]["resources"]
if "requests" in resources:
resources_requests = benchmark_config["dbconfig"][
"resources"
]["requests"]
if "memory" in resources_requests:
benchmark_required_memory = resources_requests["memory"]
benchmark_required_memory = parse_size(
benchmark_required_memory
)
logging.info(
"Benchmark required memory: {} Bytes".format(
benchmark_required_memory
)
)

maxmemory = r.info("memory")["maxmemory"]
if maxmemory == 0:
total_system_memory = r.info("memory")["total_system_memory"]
logging.info(
" Using total system memory as max {}".format(
total_system_memory
)
)
maxmemory = total_system_memory
benchmark_required_memory = get_benchmark_required_memory(
benchmark_config
)
maxmemory = get_maxmemory(r)
if benchmark_required_memory > maxmemory:
logging.WARN(
logging.warning(
"Skipping test {} given maxmemory of server is bellow the benchmark required memory: {} < {}".format(
test_name, maxmemory, benchmark_required_memory
)
Expand Down Expand Up @@ -878,6 +854,35 @@ def process_self_contained_coordinator_stream(
)


def get_maxmemory(r):
maxmemory = int(r.info("memory")["maxmemory"])
if maxmemory == 0:
total_system_memory = int(r.info("memory")["total_system_memory"])
logging.info(" Using total system memory as max {}".format(total_system_memory))
maxmemory = total_system_memory
else:
logging.info(" Detected redis maxmemory config value {}".format(maxmemory))

return maxmemory


def get_benchmark_required_memory(benchmark_config):
benchmark_required_memory = 0
if "resources" in benchmark_config["dbconfig"]:
resources = benchmark_config["dbconfig"]["resources"]
if "requests" in resources:
resources_requests = benchmark_config["dbconfig"]["resources"]["requests"]
if "memory" in resources_requests:
benchmark_required_memory = resources_requests["memory"]
benchmark_required_memory = int(parse_size(benchmark_required_memory))
logging.info(
"Benchmark required memory: {} Bytes".format(
benchmark_required_memory
)
)
return benchmark_required_memory


def used_memory_check(test_name, benchmark_required_memory, r, stage):
used_memory = r.info("memory")["used_memory"]
used_memory_gb = int(math.ceil(float(used_memory) / 1024.0 / 1024.0 / 1024.0))
Expand Down