Skip to content

Commit f3e876c

Browse files
authored
Put back GPU linux tests (#3993)
1 parent 94d5b4f commit f3e876c

File tree

1 file changed

+115
-111
lines changed

1 file changed

+115
-111
lines changed
Lines changed: 115 additions & 111 deletions
Original file line numberDiff line numberDiff line change
@@ -1,123 +1,127 @@
1-
# name: Unit-tests on Linux GPU
1+
name: Unit-tests on Linux GPU
22

3-
# on:
4-
# pull_request:
5-
# push:
6-
# branches:
7-
# - nightly
8-
# - main
9-
# - release/*
10-
# workflow_dispatch:
3+
on:
4+
pull_request:
5+
push:
6+
branches:
7+
- nightly
8+
- main
9+
- release/*
10+
workflow_dispatch:
1111

12-
# jobs:
13-
# tests:
14-
# strategy:
15-
# matrix:
16-
# # TODO add up to 3.13
17-
# python_version: ["3.9", "3.10"]
18-
# cuda_arch_version: ["12.6"]
19-
# fail-fast: false
20-
# uses: pytorch/test-infra/.github/workflows/linux_job_v2.yml@main
21-
# permissions:
22-
# id-token: write
23-
# contents: read
24-
# with:
25-
# runner: linux.g5.4xlarge.nvidia.gpu
26-
# repository: pytorch/audio
27-
# gpu-arch-type: cuda
28-
# gpu-arch-version: ${{ matrix.cuda_arch_version }}
29-
# timeout: 120
12+
jobs:
13+
tests:
14+
strategy:
15+
matrix:
16+
# TODO add up to 3.13
17+
python_version: ["3.10"]
18+
cuda_arch_version: ["12.6"]
19+
fail-fast: false
20+
uses: pytorch/test-infra/.github/workflows/linux_job_v2.yml@main
21+
permissions:
22+
id-token: write
23+
contents: read
24+
with:
25+
runner: linux.g5.4xlarge.nvidia.gpu
26+
repository: pytorch/audio
27+
gpu-arch-type: cuda
28+
gpu-arch-version: ${{ matrix.cuda_arch_version }}
29+
timeout: 120
3030

31-
# script: |
32-
# set -ex
33-
# # Set up Environment Variables
34-
# export PYTHON_VERSION="${{ matrix.python_version }}"
35-
# export CU_VERSION="${{ matrix.cuda_arch_version }}"
36-
# export CUDATOOLKIT="pytorch-cuda=${CU_VERSION}"
37-
# export TORCHAUDIO_TEST_ALLOW_SKIP_IF_NO_CMD_APPLY_CMVN_SLIDING=true
38-
# export TORCHAUDIO_TEST_ALLOW_SKIP_IF_NO_CMD_COMPUTE_FBANK_FEATS=true
39-
# export TORCHAUDIO_TEST_ALLOW_SKIP_IF_NO_CMD_COMPUTE_KALDI_PITCH_FEATS=true
40-
# export TORCHAUDIO_TEST_ALLOW_SKIP_IF_NO_CMD_COMPUTE_MFCC_FEATS=true
41-
# export TORCHAUDIO_TEST_ALLOW_SKIP_IF_NO_CMD_COMPUTE_SPECTROGRAM_FEATS=true
42-
# export TORCHAUDIO_TEST_ALLOW_SKIP_IF_CUDA_SMALL_MEMORY=true
43-
# export TORCHAUDIO_TEST_ALLOW_SKIP_IF_ON_PYTHON_310=true
44-
# export TORCHAUDIO_TEST_ALLOW_SKIP_IF_TEMPORARY_DISABLED=true
45-
# export TORCHAUDIO_TEST_ALLOW_SKIP_IF_NO_SOX_DECODER=true
46-
# export TORCHAUDIO_TEST_ALLOW_SKIP_IF_NO_SOX_ENCODER=true
31+
script: |
32+
set -ex
33+
# Set up Environment Variables
34+
export PYTHON_VERSION="${{ matrix.python_version }}"
35+
export CU_VERSION="${{ matrix.cuda_arch_version }}"
36+
export CUDATOOLKIT="pytorch-cuda=${CU_VERSION}"
37+
export TORCHAUDIO_TEST_ALLOW_SKIP_IF_NO_CMD_APPLY_CMVN_SLIDING=true
38+
export TORCHAUDIO_TEST_ALLOW_SKIP_IF_NO_CMD_COMPUTE_FBANK_FEATS=true
39+
export TORCHAUDIO_TEST_ALLOW_SKIP_IF_NO_CMD_COMPUTE_KALDI_PITCH_FEATS=true
40+
export TORCHAUDIO_TEST_ALLOW_SKIP_IF_NO_CMD_COMPUTE_MFCC_FEATS=true
41+
export TORCHAUDIO_TEST_ALLOW_SKIP_IF_NO_CMD_COMPUTE_SPECTROGRAM_FEATS=true
42+
export TORCHAUDIO_TEST_ALLOW_SKIP_IF_CUDA_SMALL_MEMORY=true
43+
export TORCHAUDIO_TEST_ALLOW_SKIP_IF_ON_PYTHON_310=true
44+
export TORCHAUDIO_TEST_ALLOW_SKIP_IF_TEMPORARY_DISABLED=true
45+
export TORCHAUDIO_TEST_ALLOW_SKIP_IF_NO_SOX_DECODER=true
46+
export TORCHAUDIO_TEST_ALLOW_SKIP_IF_NO_SOX_ENCODER=true
47+
export TORCHAUDIO_TEST_ALLOW_SKIP_IF_NO_FFMPEG=true
48+
# Avoid reproducibility errors with CUBLAS: https://docs.nvidia.com/cuda/cublas/index.html#results-reproducibility
49+
export CUBLAS_WORKSPACE_CONFIG=:4096:8
4750
48-
# # Set CHANNEL
49-
# if [[(${GITHUB_EVENT_NAME} = 'pull_request' && (${GITHUB_BASE_REF} = 'release'*)) || (${GITHUB_REF} = 'refs/heads/release'*) ]]; then
50-
# export CHANNEL=test
51-
# else
52-
# export CHANNEL=nightly
53-
# fi
51+
# Set UPLOAD_CHANNEL
52+
if [[(${GITHUB_EVENT_NAME} = 'pull_request' && (${GITHUB_BASE_REF} = 'release'*)) || (${GITHUB_REF} = 'refs/heads/release'*) ]]; then
53+
export UPLOAD_CHANNEL=test
54+
else
55+
export UPLOAD_CHANNEL=nightly
56+
fi
5457
55-
# echo "::group::Create conda env"
56-
# # Mark Build Directory Safe
57-
# git config --global --add safe.directory /__w/audio/audio
58-
# conda create --quiet -y --prefix ci_env python="${PYTHON_VERSION}"
59-
# conda activate ./ci_env
58+
echo "::group::Create conda env"
59+
# Mark Build Directory Safe
60+
git config --global --add safe.directory /__w/audio/audio
61+
conda create --quiet -y --prefix ci_env python="${PYTHON_VERSION}"
62+
conda activate ./ci_env
6063
61-
# echo "::endgroup::"
62-
# echo "::group::Install PyTorch"
63-
# conda install \
64-
# --yes \
65-
# --quiet \
66-
# -c "pytorch-${CHANNEL}" \
67-
# -c nvidia "pytorch-${CHANNEL}"::pytorch[build="*${CU_VERSION}*"] \
68-
# "${CUDATOOLKIT}"
64+
echo "::endgroup::"
65+
echo "::group::Install PyTorch"
66+
export GPU_ARCH_ID="cu126" # TODO this is currently hardcoded, should depend on matrix's value.
67+
PYTORCH_WHEEL_INDEX="https://download.pytorch.org/whl/${UPLOAD_CHANNEL}/${GPU_ARCH_ID}"
68+
pip install --progress-bar=off --pre torch torchcodec --index-url="${PYTORCH_WHEEL_INDEX}"
69+
echo "::endgroup::"
6970
70-
# echo "::endgroup::"
71-
# echo "::group::Install TorchAudio"
72-
# conda install --quiet --yes 'cmake>=3.18.0' ninja
73-
# pip3 install --progress-bar off -v -e . --no-use-pep517
71+
echo "::group::Install TorchAudio"
72+
conda install --quiet --yes cmake ninja
73+
pip3 install --progress-bar off -v -e . --no-use-pep517
7474
75-
# echo "::endgroup::"
76-
# echo "::group::Build FFmpeg"
77-
# .github/scripts/ffmpeg/build_gpu.sh
75+
echo "::endgroup::"
76+
echo "::group::Build FFmpeg"
77+
.github/scripts/ffmpeg/build_gpu.sh
7878
79-
# echo "::endgroup::"
80-
# echo "::group::Install other Dependencies"
81-
# conda install \
82-
# --quiet --yes \
83-
# -c conda-forge \
84-
# -c numba/label/dev \
85-
# sox libvorbis 'librosa==0.10.0' parameterized 'requests>=2.20'
86-
# pip3 install --progress-bar off \
87-
# kaldi-io \
88-
# SoundFile \
89-
# coverage \
90-
# pytest \
91-
# pytest-cov \
92-
# 'scipy==1.7.3' \
93-
# transformers \
94-
# expecttest \
95-
# unidecode \
96-
# inflect \
97-
# Pillow \
98-
# sentencepiece \
99-
# pytorch-lightning \
100-
# 'protobuf<4.21.0' \
101-
# demucs \
102-
# tinytag \
103-
# flashlight-text \
104-
# git+https://github.com/kpu/kenlm/ \
105-
# git+https://github.com/pytorch/fairseq.git@e47a4c8
79+
echo "::endgroup::"
80+
echo "::group::Install other Dependencies"
81+
# conda install \
82+
# --quiet --yes \
83+
# -c conda-forge \
84+
# -c numba/label/dev \
85+
# sox libvorbis 'librosa==0.10.0' parameterized 'requests>=2.20'
86+
# pip3 install --progress-bar off \
87+
# kaldi-io \
88+
# SoundFile \
89+
# coverage \
90+
# pytest \
91+
# pytest-cov \
92+
# scipy \
93+
# transformers \
94+
# expecttest \
95+
# unidecode \
96+
# inflect \
97+
# Pillow \
98+
# sentencepiece \
99+
# pytorch-lightning \
100+
# 'protobuf<4.21.0' \
101+
# demucs \
102+
# tinytag \
103+
# flashlight-text \
104+
# git+https://github.com/kpu/kenlm/ \
105+
# git+https://github.com/pytorch/fairseq.git@e47a4c8
106+
107+
pip3 install parameterized requests
108+
pip3 install kaldi-io SoundFile librosa coverage pytest pytest-cov scipy expecttest unidecode inflect Pillow sentencepiece pytorch-lightning 'protobuf<4.21.0' demucs tinytag
109+
pip3 install "pillow<10.0" "scipy<1.10" "numpy<2.0"
106110
107-
# echo "::endgroup::"
108-
# echo "::group::Run tests"
109-
# export PATH="${PWD}/third_party/install/bin/:${PATH}"
111+
echo "::endgroup::"
112+
echo "::group::Run tests"
113+
export PATH="${PWD}/third_party/install/bin/:${PATH}"
110114
111-
# declare -a args=(
112-
# '-v'
113-
# '--cov=torchaudio'
114-
# "--junitxml=${RUNNER_TEST_RESULTS_DIR}/junit.xml"
115-
# '--durations' '100'
116-
# '-k' 'cuda or gpu'
117-
# )
115+
declare -a args=(
116+
'-v'
117+
'--cov=torchaudio'
118+
"--junitxml=${RUNNER_TEST_RESULTS_DIR}/junit.xml"
119+
'--durations' '100'
120+
'-k' 'cuda or gpu'
121+
)
118122
119-
# cd test
120-
# python3 -m torch.utils.collect_env
121-
# env | grep TORCHAUDIO || true
122-
# pytest "${args[@]}" torchaudio_unittest
123-
# coverage html
123+
cd test
124+
python3 -m torch.utils.collect_env
125+
env | grep TORCHAUDIO || true
126+
pytest "${args[@]}" torchaudio_unittest
127+
coverage html

0 commit comments

Comments
 (0)