From 4b650ab9e07cab28b9623ba908495f1dd188cd27 Mon Sep 17 00:00:00 2001 From: Jean Schmidt Date: Thu, 25 Jul 2024 19:58:38 +0200 Subject: [PATCH] Replace runners prefix amz2023. --- .github/workflows/fbgemm_ci.yml | 6 +++--- .github/workflows/fbgemm_gpu_ci_cpu.yml | 8 ++++---- .github/workflows/fbgemm_gpu_ci_cuda.yml | 6 +++--- .github/workflows/fbgemm_gpu_ci_rocm.yml | 2 +- .github/workflows/fbgemm_gpu_docs.yml | 2 +- .github/workflows/fbgemm_gpu_pip.yml | 6 +++--- .github/workflows/fbgemm_gpu_release_cpu.yml | 8 ++++---- .github/workflows/fbgemm_gpu_release_cuda.yml | 4 ++-- 8 files changed, 21 insertions(+), 21 deletions(-) diff --git a/.github/workflows/fbgemm_ci.yml b/.github/workflows/fbgemm_ci.yml index 3bde37d4f0..513fba331f 100644 --- a/.github/workflows/fbgemm_ci.yml +++ b/.github/workflows/fbgemm_ci.yml @@ -35,7 +35,7 @@ jobs: fail-fast: false matrix: host-machine: [ - { arch: x86, instance: "linux.12xlarge" }, + { arch: x86, instance: "amz2023.linux.12xlarge" }, ] library-type: [ static, shared ] compiler: [ "gcc", "clang" ] @@ -105,7 +105,7 @@ jobs: build-bazel: - runs-on: linux.12xlarge + runs-on: amz2023.linux.12xlarge container: image: amazonlinux:2023 options: --user root @@ -119,7 +119,7 @@ jobs: fail-fast: false matrix: host-machine: [ - { arch: x86, instance: "linux.12xlarge" }, + { arch: x86, instance: "amz2023.linux.12xlarge" }, ] compiler: [ "gcc", "clang" ] diff --git a/.github/workflows/fbgemm_gpu_ci_cpu.yml b/.github/workflows/fbgemm_gpu_ci_cpu.yml index 9997b3b47f..665b9d1540 100644 --- a/.github/workflows/fbgemm_gpu_ci_cpu.yml +++ b/.github/workflows/fbgemm_gpu_ci_cpu.yml @@ -64,8 +64,8 @@ jobs: fail-fast: false matrix: host-machine: [ - { arch: x86, instance: "linux.4xlarge" }, - { arch: arm, instance: "linux.arm64.2xlarge" }, + { arch: x86, instance: "amz2023.linux.4xlarge" }, + { arch: arm, instance: "amz2023.linux.arm64.2xlarge" }, ] python-version: [ "3.8", "3.9", "3.10", "3.11", "3.12" ] compiler: [ "gcc", "clang" ] @@ -132,8 +132,8 @@ jobs: fail-fast: false matrix: host-machine: [ - { arch: x86, instance: "linux.4xlarge", timeout: 20 }, - { arch: arm, instance: "linux.arm64.2xlarge", timeout: 30 }, + { arch: x86, instance: "amz2023.linux.4xlarge", timeout: 20 }, + { arch: arm, instance: "amz2023.linux.arm64.2xlarge", timeout: 30 }, ] python-version: [ "3.8", "3.9", "3.10", "3.11", "3.12" ] compiler: [ "gcc", "clang" ] diff --git a/.github/workflows/fbgemm_gpu_ci_cuda.yml b/.github/workflows/fbgemm_gpu_ci_cuda.yml index e35c0f1b4f..9a8d6007d5 100644 --- a/.github/workflows/fbgemm_gpu_ci_cuda.yml +++ b/.github/workflows/fbgemm_gpu_ci_cuda.yml @@ -63,7 +63,7 @@ jobs: fail-fast: false matrix: host-machine: [ - { arch: x86, instance: "linux.24xlarge" }, + { arch: x86, instance: "amz2023.linux.24xlarge" }, ] python-version: [ "3.8", "3.9", "3.10", "3.11", "3.12" ] cuda-version: [ "11.8.0", "12.1.1", "12.4.1" ] @@ -126,7 +126,7 @@ jobs: # Download the built artifact from GHA, test on GPU, and push to PyPI test_and_publish_artifact: - # runs-on: linux.4xlarge.nvidia.gpu + # runs-on: amz2023.linux.4xlarge.nvidia.gpu # Use available instance types - https://github.com/pytorch/test-infra/blob/main/.github/scale-config.yml runs-on: ${{ matrix.host-machine.instance }} defaults: @@ -141,7 +141,7 @@ jobs: fail-fast: false matrix: host-machine: [ - { arch: x86, instance: "linux.g5.4xlarge.nvidia.gpu" }, + { arch: x86, instance: "amz2023.linux.g5.4xlarge.nvidia.gpu" }, # TODO: Enable when A100 machine queues are reasonably small enough for doing per-PR CI # https://hud.pytorch.org/metrics # { arch: x86, instance: "linux.gcp.a100" }, diff --git a/.github/workflows/fbgemm_gpu_ci_rocm.yml b/.github/workflows/fbgemm_gpu_ci_rocm.yml index 85ea1ba6a6..fab1be5e59 100644 --- a/.github/workflows/fbgemm_gpu_ci_rocm.yml +++ b/.github/workflows/fbgemm_gpu_ci_rocm.yml @@ -61,7 +61,7 @@ jobs: fail-fast: false matrix: host-machine: [ - { arch: x86, instance: "linux.24xlarge" }, + { arch: x86, instance: "amz2023.linux.24xlarge" }, ] container-image: [ "ubuntu:20.04" ] python-version: [ "3.8", "3.9", "3.10", "3.11", "3.12" ] diff --git a/.github/workflows/fbgemm_gpu_docs.yml b/.github/workflows/fbgemm_gpu_docs.yml index 5654cae455..7964629208 100644 --- a/.github/workflows/fbgemm_gpu_docs.yml +++ b/.github/workflows/fbgemm_gpu_docs.yml @@ -27,7 +27,7 @@ jobs: permissions: # Grant write permission here so that the generated docs can be pushed to `gh-pages` branch contents: write - runs-on: linux.2xlarge + runs-on: amz2023.linux.2xlarge container: image: amazonlinux:2023 options: --user root diff --git a/.github/workflows/fbgemm_gpu_pip.yml b/.github/workflows/fbgemm_gpu_pip.yml index f0c07ced3e..b10db2ee9c 100644 --- a/.github/workflows/fbgemm_gpu_pip.yml +++ b/.github/workflows/fbgemm_gpu_pip.yml @@ -61,8 +61,8 @@ jobs: fail-fast: false matrix: host-machine: [ - { arch: x86, instance: "linux.4xlarge", timeout: 20 }, - { arch: arm, instance: "linux.arm64.2xlarge", timeout: 30 }, + { arch: x86, instance: "amz2023.linux.4xlarge", timeout: 20 }, + { arch: arm, instance: "amz2023.linux.arm64.2xlarge", timeout: 30 }, ] python-version: [ "3.8", "3.9", "3.10", "3.11", "3.12" ] @@ -118,7 +118,7 @@ jobs: fail-fast: false matrix: host-machine: [ - { instance: "linux.g5.4xlarge.nvidia.gpu" }, + { instance: "amz2023.linux.g5.4xlarge.nvidia.gpu" }, ] python-version: [ "3.8", "3.9", "3.10", "3.11", "3.12" ] cuda-version: [ "11.8.0", "12.1.1", "12.4.1" ] diff --git a/.github/workflows/fbgemm_gpu_release_cpu.yml b/.github/workflows/fbgemm_gpu_release_cpu.yml index ed3472dbc8..93a9621b2a 100644 --- a/.github/workflows/fbgemm_gpu_release_cpu.yml +++ b/.github/workflows/fbgemm_gpu_release_cpu.yml @@ -61,8 +61,8 @@ jobs: fail-fast: false matrix: host-machine: [ - { arch: x86, instance: "linux.4xlarge" }, - { arch: arm, instance: "linux.arm64.2xlarge" }, + { arch: x86, instance: "amz2023.linux.4xlarge" }, + { arch: arm, instance: "amz2023.linux.arm64.2xlarge" }, ] python-version: [ "3.8", "3.9", "3.10", "3.11", "3.12" ] @@ -128,8 +128,8 @@ jobs: fail-fast: false matrix: host-machine: [ - { arch: x86, instance: "linux.4xlarge", timeout: 20 }, - { arch: arm, instance: "linux.arm64.2xlarge", timeout: 30 }, + { arch: x86, instance: "amz2023.linux.4xlarge", timeout: 20 }, + { arch: arm, instance: "amz2023.linux.arm64.2xlarge", timeout: 30 }, ] python-version: [ "3.8", "3.9", "3.10", "3.11", "3.12" ] needs: build_artifact diff --git a/.github/workflows/fbgemm_gpu_release_cuda.yml b/.github/workflows/fbgemm_gpu_release_cuda.yml index 60681ec7d6..e0cdf3b421 100644 --- a/.github/workflows/fbgemm_gpu_release_cuda.yml +++ b/.github/workflows/fbgemm_gpu_release_cuda.yml @@ -67,7 +67,7 @@ jobs: fail-fast: false matrix: host-machine: [ - { arch: x86, instance: "linux.24xlarge" }, + { arch: x86, instance: "amz2023.linux.24xlarge" }, ] python-version: [ "3.8", "3.9", "3.10", "3.11", "3.12" ] cuda-version: [ "11.8.0", "12.1.1", "12.4.1" ] @@ -138,7 +138,7 @@ jobs: fail-fast: false matrix: host-machine: [ - { arch: x86, instance: "linux.g5.4xlarge.nvidia.gpu" }, + { arch: x86, instance: "amz2023.linux.g5.4xlarge.nvidia.gpu" }, ] python-version: [ "3.8", "3.9", "3.10", "3.11", "3.12" ] cuda-version: [ "11.8.0", "12.1.1", "12.4.1" ]