Skip to content
Closed
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
65 changes: 33 additions & 32 deletions .azure-pipelines/gpu-tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -21,9 +21,9 @@ jobs:
strategy:
matrix:
'PyTorch - LTS':
image: "pytorchlightning/pytorch_lightning:base-cuda-py3.7-torch1.8"
image: "pytorchlightning/pytorch_lightning:base-cuda-py3.7-torch1.8@sha256:fe3f5560d539135cdd6bdfe9e2a87b9a7b6955b928cd2780a9bff565688bf618"
'PyTorch - stable':
image: "pytorchlightning/pytorch_lightning:base-cuda-py3.9-torch1.11"
image: "pytorchlightning/pytorch_lightning:base-cuda-py3.9-torch1.11@sha256:e51b268be67096fa0942e6f395d85ec26ede493f35a7636d8a56b569cf65ddad"
# how long to run the job before automatically cancelling
timeoutInMinutes: "100"
# how much time to give 'run always even if cancelled tasks' before stopping them
Expand Down Expand Up @@ -72,30 +72,31 @@ jobs:
- bash: bash .actions/pull_legacy_checkpoints.sh
displayName: 'Get legacy checkpoints'

- bash: |
python -m coverage run --source pytorch_lightning -m pytest pytorch_lightning tests --ignore tests/benchmarks -v --junitxml=$(Build.StagingDirectory)/test-results.xml --durations=50
displayName: 'Testing: standard'
# - bash: |
# python -m coverage run --source pytorch_lightning -m pytest pytorch_lightning tests --ignore tests/benchmarks -v --junitxml=$(Build.StagingDirectory)/test-results.xml --durations=50
# displayName: 'Testing: standard'

- bash: |
bash tests/standalone_tests.sh
env:
PL_USE_MOCKED_MNIST: "1"
CUDA_LAUNCH_BLOCKING: "1"
displayName: 'Testing: standalone'

- bash: |
python -m coverage report
python -m coverage xml
python -m coverage html
python -m codecov --token=$(CODECOV_TOKEN) --commit=$(Build.SourceVersion) --flags=gpu,pytest --name="GPU-coverage" --env=linux,azure
ls -l
displayName: 'Statistics'

- task: PublishTestResults@2
displayName: 'Publish test results'
inputs:
testResultsFiles: '$(Build.StagingDirectory)/test-results.xml'
testRunTitle: '$(Agent.OS) - $(Build.DefinitionName) - Python $(python.version)'
condition: succeededOrFailed()
# - bash: |
# python -m coverage report
# python -m coverage xml
# python -m coverage html
# python -m codecov --token=$(CODECOV_TOKEN) --commit=$(Build.SourceVersion) --flags=gpu,pytest --name="GPU-coverage" --env=linux,azure
# ls -l
# displayName: 'Statistics'

# - task: PublishTestResults@2
# displayName: 'Publish test results'
# inputs:
# testResultsFiles: '$(Build.StagingDirectory)/test-results.xml'
# testRunTitle: '$(Agent.OS) - $(Build.DefinitionName) - Python $(python.version)'
# condition: succeededOrFailed()

# todo: re-enable after schema check pass, also atm it seems does not have any effect
#- task: PublishCodeCoverageResults@2
Expand All @@ -107,16 +108,16 @@ jobs:
# testRunTitle: '$(Agent.OS) - $(Build.BuildNumber)[$(Agent.JobName)] - Python $(python.version)'
# condition: succeededOrFailed()

- script: |
set -e
python -m pytest pl_examples -v --maxfail=2 --durations=0
bash pl_examples/run_examples.sh --trainer.accelerator=gpu --trainer.devices=1
bash pl_examples/run_examples.sh --trainer.accelerator=gpu --trainer.devices=2 --trainer.strategy=ddp
bash pl_examples/run_examples.sh --trainer.accelerator=gpu --trainer.devices=2 --trainer.strategy=ddp --trainer.precision=16
env:
PL_USE_MOCKED_MNIST: "1"
displayName: 'Testing: examples'

- bash: |
python -m pytest tests/benchmarks -v --maxfail=2 --durations=0
displayName: 'Testing: benchmarks'
# - script: |
# set -e
# python -m pytest pl_examples -v --maxfail=2 --durations=0
# bash pl_examples/run_examples.sh --trainer.accelerator=gpu --trainer.devices=1
# bash pl_examples/run_examples.sh --trainer.accelerator=gpu --trainer.devices=2 --trainer.strategy=ddp
# bash pl_examples/run_examples.sh --trainer.accelerator=gpu --trainer.devices=2 --trainer.strategy=ddp --trainer.precision=16
# env:
# PL_USE_MOCKED_MNIST: "1"
# displayName: 'Testing: examples'

# - bash: |
# python -m pytest tests/benchmarks -v --maxfail=2 --durations=0
# displayName: 'Testing: benchmarks'